diff options
Diffstat (limited to 'arch')
1212 files changed, 26063 insertions, 33043 deletions
diff --git a/arch/Kconfig b/arch/Kconfig index af14a567b493..958be0531eb9 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -106,6 +106,12 @@ config STATIC_KEYS_SELFTEST help Boot time self-test of the branch patching code. +config STATIC_CALL_SELFTEST + bool "Static call selftest" + depends on HAVE_STATIC_CALL + help + Boot time self-test of the call patching code. + config OPTPROBES def_bool y depends on KPROBES && HAVE_OPTPROBES @@ -414,6 +420,13 @@ config MMU_GATHER_NO_GATHER bool depends on MMU_GATHER_TABLE_FREE +config ARCH_WANT_IRQS_OFF_ACTIVATE_MM + bool + help + Temporary select until all architectures can be converted to have + irqs disabled over activate_mm. Architectures that do IPI based TLB + shootdowns should enable this. + config ARCH_HAVE_NMI_SAFE_CMPXCHG bool @@ -444,10 +457,23 @@ config ARCH_WANT_OLD_COMPAT_IPC select ARCH_WANT_COMPAT_IPC_PARSE_VERSION bool +config HAVE_ARCH_SECCOMP + bool + help + An arch should select this symbol to support seccomp mode 1 (the fixed + syscall policy), and must provide an overrides for __NR_seccomp_sigreturn, + and compat syscalls if the asm-generic/seccomp.h defaults need adjustment: + - __NR_seccomp_read_32 + - __NR_seccomp_write_32 + - __NR_seccomp_exit_32 + - __NR_seccomp_sigreturn_32 + config HAVE_ARCH_SECCOMP_FILTER bool + select HAVE_ARCH_SECCOMP help An arch should select this symbol if it provides all of these things: + - all the requirements for HAVE_ARCH_SECCOMP - syscall_get_arch() - syscall_get_arguments() - syscall_rollback() @@ -458,6 +484,23 @@ config HAVE_ARCH_SECCOMP_FILTER results in the system call being skipped immediately. - seccomp syscall wired up +config SECCOMP + prompt "Enable seccomp to safely execute untrusted bytecode" + def_bool y + depends on HAVE_ARCH_SECCOMP + help + This kernel feature is useful for number crunching applications + that may need to handle untrusted bytecode during their + execution. By using pipes or other transports made available + to the process as file descriptors supporting the read/write + syscalls, it's possible to isolate those applications in their + own address space using seccomp. Once seccomp is enabled via + prctl(PR_SET_SECCOMP) or the seccomp() syscall, it cannot be + disabled and the task is only allowed to execute a few safe + syscalls defined by each seccomp mode. + + If unsure, say Y. + config SECCOMP_FILTER def_bool y depends on HAVE_ARCH_SECCOMP_FILTER && SECCOMP && NET @@ -975,6 +1018,13 @@ config HAVE_SPARSE_SYSCALL_NR config ARCH_HAS_VDSO_DATA bool +config HAVE_STATIC_CALL + bool + +config HAVE_STATIC_CALL_INLINE + bool + depends on HAVE_STATIC_CALL + source "kernel/gcov/Kconfig" source "scripts/gcc-plugins/Kconfig" diff --git a/arch/alpha/include/asm/checksum.h b/arch/alpha/include/asm/checksum.h index 0eac81624d01..99d631e146b2 100644 --- a/arch/alpha/include/asm/checksum.h +++ b/arch/alpha/include/asm/checksum.h @@ -42,9 +42,10 @@ extern __wsum csum_partial(const void *buff, int len, __wsum sum); * better 64-bit) boundary */ #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER -__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *errp); +#define _HAVE_ARCH_CSUM_AND_COPY +__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len); -__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum); +__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len); /* diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c index 81037907268d..d84b19aa8e9d 100644 --- a/arch/alpha/kernel/pci_iommu.c +++ b/arch/alpha/kernel/pci_iommu.c @@ -11,7 +11,7 @@ #include <linux/export.h> #include <linux/scatterlist.h> #include <linux/log2.h> -#include <linux/dma-mapping.h> +#include <linux/dma-map-ops.h> #include <linux/iommu-helper.h> #include <asm/io.h> @@ -141,12 +141,7 @@ iommu_arena_find_pages(struct device *dev, struct pci_iommu_arena *arena, unsigned long boundary_size; base = arena->dma_base >> PAGE_SHIFT; - if (dev) { - boundary_size = dma_get_seg_boundary(dev) + 1; - boundary_size >>= PAGE_SHIFT; - } else { - boundary_size = 1UL << (32 - PAGE_SHIFT); - } + boundary_size = dma_get_seg_boundary_nr_pages(dev, PAGE_SHIFT); /* Search forward for the first mask-aligned sequence of N free ptes */ ptes = arena->ptes; @@ -957,5 +952,7 @@ const struct dma_map_ops alpha_pci_ops = { .dma_supported = alpha_pci_supported, .mmap = dma_common_mmap, .get_sgtable = dma_common_get_sgtable, + .alloc_pages = dma_common_alloc_pages, + .free_pages = dma_common_free_pages, }; EXPORT_SYMBOL(alpha_pci_ops); diff --git a/arch/alpha/kernel/syscalls/syscall.tbl b/arch/alpha/kernel/syscalls/syscall.tbl index ec8bed9e7b75..ee7b01bb7346 100644 --- a/arch/alpha/kernel/syscalls/syscall.tbl +++ b/arch/alpha/kernel/syscalls/syscall.tbl @@ -479,3 +479,4 @@ 547 common openat2 sys_openat2 548 common pidfd_getfd sys_pidfd_getfd 549 common faccessat2 sys_faccessat2 +550 common process_madvise sys_process_madvise diff --git a/arch/alpha/kernel/vmlinux.lds.S b/arch/alpha/kernel/vmlinux.lds.S index bc6f727278fd..5b78d640725d 100644 --- a/arch/alpha/kernel/vmlinux.lds.S +++ b/arch/alpha/kernel/vmlinux.lds.S @@ -72,6 +72,7 @@ SECTIONS STABS_DEBUG DWARF_DEBUG + ELF_DETAILS DISCARDS } diff --git a/arch/alpha/lib/csum_partial_copy.c b/arch/alpha/lib/csum_partial_copy.c index af1dad74e933..dc68efbe9367 100644 --- a/arch/alpha/lib/csum_partial_copy.c +++ b/arch/alpha/lib/csum_partial_copy.c @@ -39,12 +39,11 @@ __asm__ __volatile__("insql %1,%2,%0":"=r" (z):"r" (x),"r" (y)) #define insqh(x,y,z) \ __asm__ __volatile__("insqh %1,%2,%0":"=r" (z):"r" (x),"r" (y)) - -#define __get_user_u(x,ptr) \ +#define __get_word(insn,x,ptr) \ ({ \ long __guu_err; \ __asm__ __volatile__( \ - "1: ldq_u %0,%2\n" \ + "1: "#insn" %0,%2\n" \ "2:\n" \ EXC(1b,2b,%0,%1) \ : "=r"(x), "=r"(__guu_err) \ @@ -52,19 +51,6 @@ __asm__ __volatile__("insqh %1,%2,%0":"=r" (z):"r" (x),"r" (y)) __guu_err; \ }) -#define __put_user_u(x,ptr) \ -({ \ - long __puu_err; \ - __asm__ __volatile__( \ - "1: stq_u %2,%1\n" \ - "2:\n" \ - EXC(1b,2b,$31,%0) \ - : "=r"(__puu_err) \ - : "m"(__m(addr)), "rJ"(x), "0"(0)); \ - __puu_err; \ -}) - - static inline unsigned short from64to16(unsigned long x) { /* Using extract instructions is a bit more efficient @@ -95,15 +81,15 @@ static inline unsigned short from64to16(unsigned long x) */ static inline unsigned long csum_partial_cfu_aligned(const unsigned long __user *src, unsigned long *dst, - long len, unsigned long checksum, - int *errp) + long len) { + unsigned long checksum = ~0U; unsigned long carry = 0; - int err = 0; while (len >= 0) { unsigned long word; - err |= __get_user(word, src); + if (__get_word(ldq, word, src)) + return 0; checksum += carry; src++; checksum += word; @@ -116,7 +102,8 @@ csum_partial_cfu_aligned(const unsigned long __user *src, unsigned long *dst, checksum += carry; if (len) { unsigned long word, tmp; - err |= __get_user(word, src); + if (__get_word(ldq, word, src)) + return 0; tmp = *dst; mskql(word, len, word); checksum += word; @@ -125,7 +112,6 @@ csum_partial_cfu_aligned(const unsigned long __user *src, unsigned long *dst, *dst = word | tmp; checksum += carry; } - if (err && errp) *errp = err; return checksum; } @@ -137,20 +123,21 @@ static inline unsigned long csum_partial_cfu_dest_aligned(const unsigned long __user *src, unsigned long *dst, unsigned long soff, - long len, unsigned long checksum, - int *errp) + long len) { unsigned long first; unsigned long word, carry; unsigned long lastsrc = 7+len+(unsigned long)src; - int err = 0; + unsigned long checksum = ~0U; - err |= __get_user_u(first,src); + if (__get_word(ldq_u, first,src)) + return 0; carry = 0; while (len >= 0) { unsigned long second; - err |= __get_user_u(second, src+1); + if (__get_word(ldq_u, second, src+1)) + return 0; extql(first, soff, word); len -= 8; src++; @@ -168,7 +155,8 @@ csum_partial_cfu_dest_aligned(const unsigned long __user *src, if (len) { unsigned long tmp; unsigned long second; - err |= __get_user_u(second, lastsrc); + if (__get_word(ldq_u, second, lastsrc)) + return 0; tmp = *dst; extql(first, soff, word); extqh(second, soff, first); @@ -180,7 +168,6 @@ csum_partial_cfu_dest_aligned(const unsigned long __user *src, *dst = word | tmp; checksum += carry; } - if (err && errp) *errp = err; return checksum; } @@ -191,18 +178,18 @@ static inline unsigned long csum_partial_cfu_src_aligned(const unsigned long __user *src, unsigned long *dst, unsigned long doff, - long len, unsigned long checksum, - unsigned long partial_dest, - int *errp) + long len, + unsigned long partial_dest) { unsigned long carry = 0; unsigned long word; unsigned long second_dest; - int err = 0; + unsigned long checksum = ~0U; mskql(partial_dest, doff, partial_dest); while (len >= 0) { - err |= __get_user(word, src); + if (__get_word(ldq, word, src)) + return 0; len -= 8; insql(word, doff, second_dest); checksum += carry; @@ -216,7 +203,8 @@ csum_partial_cfu_src_aligned(const unsigned long __user *src, len += 8; if (len) { checksum += carry; - err |= __get_user(word, src); + if (__get_word(ldq, word, src)) + return 0; mskql(word, len, word); len -= 8; checksum += word; @@ -237,7 +225,6 @@ csum_partial_cfu_src_aligned(const unsigned long __user *src, stq_u(partial_dest | second_dest, dst); out: checksum += carry; - if (err && errp) *errp = err; return checksum; } @@ -249,23 +236,23 @@ static inline unsigned long csum_partial_cfu_unaligned(const unsigned long __user * src, unsigned long * dst, unsigned long soff, unsigned long doff, - long len, unsigned long checksum, - unsigned long partial_dest, - int *errp) + long len, unsigned long partial_dest) { unsigned long carry = 0; unsigned long first; unsigned long lastsrc; - int err = 0; + unsigned long checksum = ~0U; - err |= __get_user_u(first, src); + if (__get_word(ldq_u, first, src)) + return 0; lastsrc = 7+len+(unsigned long)src; mskql(partial_dest, doff, partial_dest); while (len >= 0) { unsigned long second, word; unsigned long second_dest; - err |= __get_user_u(second, src+1); + if (__get_word(ldq_u, second, src+1)) + return 0; extql(first, soff, word); checksum += carry; len -= 8; @@ -286,7 +273,8 @@ csum_partial_cfu_unaligned(const unsigned long __user * src, unsigned long second, word; unsigned long second_dest; - err |= __get_user_u(second, lastsrc); + if (__get_word(ldq_u, second, lastsrc)) + return 0; extql(first, soff, word); extqh(second, soff, first); word |= first; @@ -307,7 +295,8 @@ csum_partial_cfu_unaligned(const unsigned long __user * src, unsigned long second, word; unsigned long second_dest; - err |= __get_user_u(second, lastsrc); + if (__get_word(ldq_u, second, lastsrc)) + return 0; extql(first, soff, word); extqh(second, soff, first); word |= first; @@ -320,66 +309,55 @@ csum_partial_cfu_unaligned(const unsigned long __user * src, stq_u(partial_dest | word | second_dest, dst); checksum += carry; } - if (err && errp) *errp = err; return checksum; } -__wsum -csum_and_copy_from_user(const void __user *src, void *dst, int len, - __wsum sum, int *errp) +static __wsum __csum_and_copy(const void __user *src, void *dst, int len) { - unsigned long checksum = (__force u32) sum; unsigned long soff = 7 & (unsigned long) src; unsigned long doff = 7 & (unsigned long) dst; - - if (len) { - if (!access_ok(src, len)) { - if (errp) *errp = -EFAULT; - memset(dst, 0, len); - return sum; - } - if (!doff) { - if (!soff) - checksum = csum_partial_cfu_aligned( - (const unsigned long __user *) src, - (unsigned long *) dst, - len-8, checksum, errp); - else - checksum = csum_partial_cfu_dest_aligned( - (const unsigned long __user *) src, - (unsigned long *) dst, - soff, len-8, checksum, errp); - } else { - unsigned long partial_dest; - ldq_u(partial_dest, dst); - if (!soff) - checksum = csum_partial_cfu_src_aligned( - (const unsigned long __user *) src, - (unsigned long *) dst, - doff, len-8, checksum, - partial_dest, errp); - else - checksum = csum_partial_cfu_unaligned( - (const unsigned long __user *) src, - (unsigned long *) dst, - soff, doff, len-8, checksum, - partial_dest, errp); - } - checksum = from64to16 (checksum); + unsigned long checksum; + + if (!doff) { + if (!soff) + checksum = csum_partial_cfu_aligned( + (const unsigned long __user *) src, + (unsigned long *) dst, len-8); + else + checksum = csum_partial_cfu_dest_aligned( + (const unsigned long __user *) src, + (unsigned long *) dst, + soff, len-8); + } else { + unsigned long partial_dest; + ldq_u(partial_dest, dst); + if (!soff) + checksum = csum_partial_cfu_src_aligned( + (const unsigned long __user *) src, + (unsigned long *) dst, + doff, len-8, partial_dest); + else + checksum = csum_partial_cfu_unaligned( + (const unsigned long __user *) src, + (unsigned long *) dst, + soff, doff, len-8, partial_dest); } - return (__force __wsum)checksum; + return (__force __wsum)from64to16 (checksum); +} + +__wsum +csum_and_copy_from_user(const void __user *src, void *dst, int len) +{ + if (!access_ok(src, len)) + return 0; + return __csum_and_copy(src, dst, len); } EXPORT_SYMBOL(csum_and_copy_from_user); __wsum -csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) +csum_partial_copy_nocheck(const void *src, void *dst, int len) { - __wsum checksum; - mm_segment_t oldfs = get_fs(); - set_fs(KERNEL_DS); - checksum = csum_and_copy_from_user((__force const void __user *)src, - dst, len, sum, NULL); - set_fs(oldfs); - return checksum; + return __csum_and_copy((__force const void __user *)src, + dst, len); } EXPORT_SYMBOL(csum_partial_copy_nocheck); diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index ba00c4e1e1c2..19f8f2367d6d 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -96,8 +96,6 @@ menu "ARC Platform/SoC/Board" source "arch/arc/plat-tb10x/Kconfig" source "arch/arc/plat-axs10x/Kconfig" -#New platform adds here -source "arch/arc/plat-eznps/Kconfig" source "arch/arc/plat-hsdk/Kconfig" endmenu diff --git a/arch/arc/Makefile b/arch/arc/Makefile index d00f8b8afd08..0c6bf0d1df7a 100644 --- a/arch/arc/Makefile +++ b/arch/arc/Makefile @@ -94,13 +94,8 @@ core-y += arch/arc/boot/dts/ core-y += arch/arc/plat-sim/ core-$(CONFIG_ARC_PLAT_TB10X) += arch/arc/plat-tb10x/ core-$(CONFIG_ARC_PLAT_AXS10X) += arch/arc/plat-axs10x/ -core-$(CONFIG_ARC_PLAT_EZNPS) += arch/arc/plat-eznps/ core-$(CONFIG_ARC_SOC_HSDK) += arch/arc/plat-hsdk/ -ifdef CONFIG_ARC_PLAT_EZNPS -KBUILD_CPPFLAGS += -I$(srctree)/arch/arc/plat-eznps/include -endif - drivers-$(CONFIG_OPROFILE) += arch/arc/oprofile/ libs-y += arch/arc/lib/ $(LIBGCC) diff --git a/arch/arc/boot/dts/axc001.dtsi b/arch/arc/boot/dts/axc001.dtsi index 79ec27c043c1..2a151607b080 100644 --- a/arch/arc/boot/dts/axc001.dtsi +++ b/arch/arc/boot/dts/axc001.dtsi @@ -91,7 +91,7 @@ * avoid duplicating the MB dtsi file given that IRQ from * this intc to cpu intc are different for axs101 and axs103 */ - mb_intc: dw-apb-ictl@e0012000 { + mb_intc: interrupt-controller@e0012000 { #interrupt-cells = <1>; compatible = "snps,dw-apb-ictl"; reg = < 0x0 0xe0012000 0x0 0x200 >; diff --git a/arch/arc/boot/dts/axc003.dtsi b/arch/arc/boot/dts/axc003.dtsi index ac8e1b463a70..cd1edcf4f95e 100644 --- a/arch/arc/boot/dts/axc003.dtsi +++ b/arch/arc/boot/dts/axc003.dtsi @@ -129,7 +129,7 @@ * avoid duplicating the MB dtsi file given that IRQ from * this intc to cpu intc are different for axs101 and axs103 */ - mb_intc: dw-apb-ictl@e0012000 { + mb_intc: interrupt-controller@e0012000 { #interrupt-cells = <1>; compatible = "snps,dw-apb-ictl"; reg = < 0x0 0xe0012000 0x0 0x200 >; diff --git a/arch/arc/boot/dts/axc003_idu.dtsi b/arch/arc/boot/dts/axc003_idu.dtsi index 9da21e7fd246..70779386ca79 100644 --- a/arch/arc/boot/dts/axc003_idu.dtsi +++ b/arch/arc/boot/dts/axc003_idu.dtsi @@ -135,7 +135,7 @@ * avoid duplicating the MB dtsi file given that IRQ from * this intc to cpu intc are different for axs101 and axs103 */ - mb_intc: dw-apb-ictl@e0012000 { + mb_intc: interrupt-controller@e0012000 { #interrupt-cells = <1>; compatible = "snps,dw-apb-ictl"; reg = < 0x0 0xe0012000 0x0 0x200 >; diff --git a/arch/arc/boot/dts/eznps.dts b/arch/arc/boot/dts/eznps.dts deleted file mode 100644 index a7e2e8d8ff06..000000000000 --- a/arch/arc/boot/dts/eznps.dts +++ /dev/null @@ -1,84 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright(c) 2015 EZchip Technologies. - */ - -/dts-v1/; - -/ { - compatible = "ezchip,arc-nps"; - #address-cells = <1>; - #size-cells = <1>; - interrupt-parent = <&intc>; - present-cpus = "0-1,16-17"; - possible-cpus = "0-4095"; - - aliases { - ethernet0 = &gmac0; - }; - - chosen { - bootargs = "earlycon=uart8250,mmio32be,0xf7209000,115200n8 console=ttyS0,115200n8"; - }; - - memory { - device_type = "memory"; - reg = <0x80000000 0x20000000>; /* 512M */ - }; - - clocks { - sysclk: sysclk { - compatible = "fixed-clock"; - #clock-cells = <0>; - clock-frequency = <83333333>; - }; - }; - - soc { - compatible = "simple-bus"; - #address-cells = <1>; - #size-cells = <1>; - - /* child and parent address space 1:1 mapped */ - ranges; - - intc: interrupt-controller { - compatible = "ezchip,nps400-ic"; - interrupt-controller; - #interrupt-cells = <1>; - }; - - timer0: timer_clkevt { - compatible = "snps,arc-timer"; - interrupts = <3>; - clocks = <&sysclk>; - }; - - timer1: timer_clksrc { - compatible = "ezchip,nps400-timer"; - clocks = <&sysclk>; - clock-names="sysclk"; - }; - - uart@f7209000 { - compatible = "snps,dw-apb-uart"; - device_type = "serial"; - reg = <0xf7209000 0x100>; - interrupts = <6>; - clocks = <&sysclk>; - clock-names="baudclk"; - baud = <115200>; - reg-shift = <2>; - reg-io-width = <4>; - native-endian; - }; - - gmac0: ethernet@f7470000 { - compatible = "ezchip,nps-mgt-enet"; - reg = <0xf7470000 0x1940>; - interrupts = <7>; - /* Filled in by U-Boot */ - mac-address = [ 00 C0 00 F0 04 03 ]; - }; - }; -}; diff --git a/arch/arc/boot/dts/vdk_axc003.dtsi b/arch/arc/boot/dts/vdk_axc003.dtsi index f8be7ba8dad4..c21d0eb07bf6 100644 --- a/arch/arc/boot/dts/vdk_axc003.dtsi +++ b/arch/arc/boot/dts/vdk_axc003.dtsi @@ -46,7 +46,7 @@ }; - mb_intc: dw-apb-ictl@e0012000 { + mb_intc: interrupt-controller@e0012000 { #interrupt-cells = <1>; compatible = "snps,dw-apb-ictl"; reg = < 0xe0012000 0x200 >; diff --git a/arch/arc/boot/dts/vdk_axc003_idu.dtsi b/arch/arc/boot/dts/vdk_axc003_idu.dtsi index 0afa3e53a4e3..4d348853ac7c 100644 --- a/arch/arc/boot/dts/vdk_axc003_idu.dtsi +++ b/arch/arc/boot/dts/vdk_axc003_idu.dtsi @@ -54,7 +54,7 @@ }; - mb_intc: dw-apb-ictl@e0012000 { + mb_intc: interrupt-controller@e0012000 { #interrupt-cells = <1>; compatible = "snps,dw-apb-ictl"; reg = < 0xe0012000 0x200 >; diff --git a/arch/arc/configs/nps_defconfig b/arch/arc/configs/nps_defconfig deleted file mode 100644 index f7a978dfdf1d..000000000000 --- a/arch/arc/configs/nps_defconfig +++ /dev/null @@ -1,80 +0,0 @@ -# CONFIG_LOCALVERSION_AUTO is not set -# CONFIG_SWAP is not set -CONFIG_SYSVIPC=y -CONFIG_NO_HZ_IDLE=y -CONFIG_HIGH_RES_TIMERS=y -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -CONFIG_BLK_DEV_INITRD=y -CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y -# CONFIG_EPOLL is not set -# CONFIG_SIGNALFD is not set -# CONFIG_TIMERFD is not set -# CONFIG_EVENTFD is not set -# CONFIG_AIO is not set -CONFIG_EMBEDDED=y -CONFIG_PERF_EVENTS=y -# CONFIG_COMPAT_BRK is not set -CONFIG_ISA_ARCOMPACT=y -CONFIG_KPROBES=y -CONFIG_MODULES=y -CONFIG_MODULE_FORCE_LOAD=y -CONFIG_MODULE_UNLOAD=y -# CONFIG_BLK_DEV_BSG is not set -CONFIG_ARC_PLAT_EZNPS=y -CONFIG_SMP=y -CONFIG_NR_CPUS=4096 -CONFIG_ARC_CACHE_LINE_SHIFT=5 -# CONFIG_ARC_CACHE_PAGES is not set -# CONFIG_ARC_HAS_LLSC is not set -CONFIG_ARC_KVADDR_SIZE=402 -CONFIG_ARC_EMUL_UNALIGNED=y -CONFIG_PREEMPT=y -CONFIG_NET=y -CONFIG_UNIX=y -CONFIG_INET=y -CONFIG_IP_PNP=y -# CONFIG_INET_XFRM_MODE_TRANSPORT is not set -# CONFIG_INET_XFRM_MODE_TUNNEL is not set -# CONFIG_INET_XFRM_MODE_BEET is not set -# CONFIG_INET_DIAG is not set -# CONFIG_IPV6 is not set -# CONFIG_WIRELESS is not set -CONFIG_DEVTMPFS=y -CONFIG_DEVTMPFS_MOUNT=y -# CONFIG_PREVENT_FIRMWARE_BUILD is not set -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_COUNT=1 -CONFIG_BLK_DEV_RAM_SIZE=2048 -CONFIG_NETDEVICES=y -CONFIG_NETCONSOLE=y -# CONFIG_NET_VENDOR_BROADCOM is not set -# CONFIG_NET_VENDOR_MICREL is not set -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_WLAN is not set -# CONFIG_INPUT_MOUSEDEV is not set -# CONFIG_INPUT_KEYBOARD is not set -# CONFIG_INPUT_MOUSE is not set -# CONFIG_SERIO is not set -# CONFIG_LEGACY_PTYS is not set -CONFIG_SERIAL_8250=y -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_SERIAL_8250_NR_UARTS=1 -CONFIG_SERIAL_8250_RUNTIME_UARTS=1 -CONFIG_SERIAL_8250_DW=y -CONFIG_SERIAL_OF_PLATFORM=y -# CONFIG_HW_RANDOM is not set -# CONFIG_HWMON is not set -# CONFIG_USB_SUPPORT is not set -# CONFIG_DNOTIFY is not set -CONFIG_PROC_KCORE=y -CONFIG_TMPFS=y -# CONFIG_MISC_FILESYSTEMS is not set -CONFIG_NFS_FS=y -CONFIG_NFS_V3_ACL=y -CONFIG_ROOT_NFS=y -CONFIG_DEBUG_INFO=y -# CONFIG_ENABLE_MUST_CHECK is not set -CONFIG_MAGIC_SYSRQ=y -CONFIG_DEBUG_MEMORY_INIT=y -CONFIG_ENABLE_DEFAULT_TRACERS=y diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h index c614857eb209..5afc79c9b2f5 100644 --- a/arch/arc/include/asm/atomic.h +++ b/arch/arc/include/asm/atomic.h @@ -14,8 +14,6 @@ #include <asm/barrier.h> #include <asm/smp.h> -#ifndef CONFIG_ARC_PLAT_EZNPS - #define atomic_read(v) READ_ONCE((v)->counter) #ifdef CONFIG_ARC_HAS_LLSC @@ -45,7 +43,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ \ /* \ * Explicit full memory barrier needed before/after as \ - * LLOCK/SCOND thmeselves don't provide any such semantics \ + * LLOCK/SCOND themselves don't provide any such semantics \ */ \ smp_mb(); \ \ @@ -71,7 +69,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \ \ /* \ * Explicit full memory barrier needed before/after as \ - * LLOCK/SCOND thmeselves don't provide any such semantics \ + * LLOCK/SCOND themselves don't provide any such semantics \ */ \ smp_mb(); \ \ @@ -195,108 +193,6 @@ ATOMIC_OPS(andnot, &= ~, bic) ATOMIC_OPS(or, |=, or) ATOMIC_OPS(xor, ^=, xor) -#else /* CONFIG_ARC_PLAT_EZNPS */ - -static inline int atomic_read(const atomic_t *v) -{ - int temp; - - __asm__ __volatile__( - " ld.di %0, [%1]" - : "=r"(temp) - : "r"(&v->counter) - : "memory"); - return temp; -} - -static inline void atomic_set(atomic_t *v, int i) -{ - __asm__ __volatile__( - " st.di %0,[%1]" - : - : "r"(i), "r"(&v->counter) - : "memory"); -} - -#define ATOMIC_OP(op, c_op, asm_op) \ -static inline void atomic_##op(int i, atomic_t *v) \ -{ \ - __asm__ __volatile__( \ - " mov r2, %0\n" \ - " mov r3, %1\n" \ - " .word %2\n" \ - : \ - : "r"(i), "r"(&v->counter), "i"(asm_op) \ - : "r2", "r3", "memory"); \ -} \ - -#define ATOMIC_OP_RETURN(op, c_op, asm_op) \ -static inline int atomic_##op##_return(int i, atomic_t *v) \ -{ \ - unsigned int temp = i; \ - \ - /* Explicit full memory barrier needed before/after */ \ - smp_mb(); \ - \ - __asm__ __volatile__( \ - " mov r2, %0\n" \ - " mov r3, %1\n" \ - " .word %2\n" \ - " mov %0, r2" \ - : "+r"(temp) \ - : "r"(&v->counter), "i"(asm_op) \ - : "r2", "r3", "memory"); \ - \ - smp_mb(); \ - \ - temp c_op i; \ - \ - return temp; \ -} - -#define ATOMIC_FETCH_OP(op, c_op, asm_op) \ -static inline int atomic_fetch_##op(int i, atomic_t *v) \ -{ \ - unsigned int temp = i; \ - \ - /* Explicit full memory barrier needed before/after */ \ - smp_mb(); \ - \ - __asm__ __volatile__( \ - " mov r2, %0\n" \ - " mov r3, %1\n" \ - " .word %2\n" \ - " mov %0, r2" \ - : "+r"(temp) \ - : "r"(&v->counter), "i"(asm_op) \ - : "r2", "r3", "memory"); \ - \ - smp_mb(); \ - \ - return temp; \ -} - -#define ATOMIC_OPS(op, c_op, asm_op) \ - ATOMIC_OP(op, c_op, asm_op) \ - ATOMIC_OP_RETURN(op, c_op, asm_op) \ - ATOMIC_FETCH_OP(op, c_op, asm_op) - -ATOMIC_OPS(add, +=, CTOP_INST_AADD_DI_R2_R2_R3) -#define atomic_sub(i, v) atomic_add(-(i), (v)) -#define atomic_sub_return(i, v) atomic_add_return(-(i), (v)) -#define atomic_fetch_sub(i, v) atomic_fetch_add(-(i), (v)) - -#undef ATOMIC_OPS -#define ATOMIC_OPS(op, c_op, asm_op) \ - ATOMIC_OP(op, c_op, asm_op) \ - ATOMIC_FETCH_OP(op, c_op, asm_op) - -ATOMIC_OPS(and, &=, CTOP_INST_AAND_DI_R2_R2_R3) -ATOMIC_OPS(or, |=, CTOP_INST_AOR_DI_R2_R2_R3) -ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3) - -#endif /* CONFIG_ARC_PLAT_EZNPS */ - #undef ATOMIC_OPS #undef ATOMIC_FETCH_OP #undef ATOMIC_OP_RETURN diff --git a/arch/arc/include/asm/barrier.h b/arch/arc/include/asm/barrier.h index 7823811e7cf5..4637de9e02fa 100644 --- a/arch/arc/include/asm/barrier.h +++ b/arch/arc/include/asm/barrier.h @@ -27,7 +27,7 @@ #define rmb() asm volatile("dmb 1\n" : : : "memory") #define wmb() asm volatile("dmb 2\n" : : : "memory") -#elif !defined(CONFIG_ARC_PLAT_EZNPS) /* CONFIG_ISA_ARCOMPACT */ +#else /* * ARCompact based cores (ARC700) only have SYNC instruction which is super @@ -37,13 +37,6 @@ #define mb() asm volatile("sync\n" : : : "memory") -#else /* CONFIG_ARC_PLAT_EZNPS */ - -#include <plat/ctop.h> - -#define mb() asm volatile (".word %0" : : "i"(CTOP_INST_SCHD_RW) : "memory") -#define rmb() asm volatile (".word %0" : : "i"(CTOP_INST_SCHD_RD) : "memory") - #endif #include <asm-generic/barrier.h> diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h index 50eb3f64a77c..c6606f4d20d6 100644 --- a/arch/arc/include/asm/bitops.h +++ b/arch/arc/include/asm/bitops.h @@ -85,7 +85,7 @@ static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long * return (old & (1 << nr)) != 0; \ } -#elif !defined(CONFIG_ARC_PLAT_EZNPS) +#else /* !CONFIG_ARC_HAS_LLSC */ /* * Non hardware assisted Atomic-R-M-W @@ -136,55 +136,7 @@ static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long * return (old & (1UL << (nr & 0x1f))) != 0; \ } -#else /* CONFIG_ARC_PLAT_EZNPS */ - -#define BIT_OP(op, c_op, asm_op) \ -static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\ -{ \ - m += nr >> 5; \ - \ - nr = (1UL << (nr & 0x1f)); \ - if (asm_op == CTOP_INST_AAND_DI_R2_R2_R3) \ - nr = ~nr; \ - \ - __asm__ __volatile__( \ - " mov r2, %0\n" \ - " mov r3, %1\n" \ - " .word %2\n" \ - : \ - : "r"(nr), "r"(m), "i"(asm_op) \ - : "r2", "r3", "memory"); \ -} - -#define TEST_N_BIT_OP(op, c_op, asm_op) \ -static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\ -{ \ - unsigned long old; \ - \ - m += nr >> 5; \ - \ - nr = old = (1UL << (nr & 0x1f)); \ - if (asm_op == CTOP_INST_AAND_DI_R2_R2_R3) \ - old = ~old; \ - \ - /* Explicit full memory barrier needed before/after */ \ - smp_mb(); \ - \ - __asm__ __volatile__( \ - " mov r2, %0\n" \ - " mov r3, %1\n" \ - " .word %2\n" \ - " mov %0, r2" \ - : "+r"(old) \ - : "r"(m), "i"(asm_op) \ - : "r2", "r3", "memory"); \ - \ - smp_mb(); \ - \ - return (old & nr) != 0; \ -} - -#endif /* CONFIG_ARC_PLAT_EZNPS */ +#endif /*************************************** * Non atomic variants @@ -226,15 +178,9 @@ static inline int __test_and_##op##_bit(unsigned long nr, volatile unsigned long /* __test_and_set_bit(), __test_and_clear_bit(), __test_and_change_bit() */\ __TEST_N_BIT_OP(op, c_op, asm_op) -#ifndef CONFIG_ARC_PLAT_EZNPS BIT_OPS(set, |, bset) BIT_OPS(clear, & ~, bclr) BIT_OPS(change, ^, bxor) -#else -BIT_OPS(set, |, CTOP_INST_AOR_DI_R2_R2_R3) -BIT_OPS(clear, & ~, CTOP_INST_AAND_DI_R2_R2_R3) -BIT_OPS(change, ^, CTOP_INST_AXOR_DI_R2_R2_R3) -#endif /* * This routine doesn't need to be atomic. diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h index c11398160240..9b87e162e539 100644 --- a/arch/arc/include/asm/cmpxchg.h +++ b/arch/arc/include/asm/cmpxchg.h @@ -20,7 +20,7 @@ __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new) /* * Explicit full memory barrier needed before/after as - * LLOCK/SCOND thmeselves don't provide any such semantics + * LLOCK/SCOND themselves don't provide any such semantics */ smp_mb(); @@ -41,7 +41,7 @@ __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new) return prev; } -#elif !defined(CONFIG_ARC_PLAT_EZNPS) +#else /* !CONFIG_ARC_HAS_LLSC */ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new) @@ -61,33 +61,7 @@ __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new) return prev; } -#else /* CONFIG_ARC_PLAT_EZNPS */ - -static inline unsigned long -__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new) -{ - /* - * Explicit full memory barrier needed before/after - */ - smp_mb(); - - write_aux_reg(CTOP_AUX_GPA1, expected); - - __asm__ __volatile__( - " mov r2, %0\n" - " mov r3, %1\n" - " .word %2\n" - " mov %0, r2" - : "+r"(new) - : "r"(ptr), "i"(CTOP_INST_EXC_DI_R2_R2_R3) - : "r2", "r3", "memory"); - - smp_mb(); - - return new; -} - -#endif /* CONFIG_ARC_HAS_LLSC */ +#endif #define cmpxchg(ptr, o, n) ({ \ (typeof(*(ptr)))__cmpxchg((ptr), \ @@ -104,8 +78,6 @@ __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new) #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) -#ifndef CONFIG_ARC_PLAT_EZNPS - /* * xchg (reg with memory) based on "Native atomic" EX insn */ @@ -168,44 +140,6 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr, #endif -#else /* CONFIG_ARC_PLAT_EZNPS */ - -static inline unsigned long __xchg(unsigned long val, volatile void *ptr, - int size) -{ - extern unsigned long __xchg_bad_pointer(void); - - switch (size) { - case 4: - /* - * Explicit full memory barrier needed before/after - */ - smp_mb(); - - __asm__ __volatile__( - " mov r2, %0\n" - " mov r3, %1\n" - " .word %2\n" - " mov %0, r2\n" - : "+r"(val) - : "r"(ptr), "i"(CTOP_INST_XEX_DI_R2_R2_R3) - : "r2", "r3", "memory"); - - smp_mb(); - - return val; - } - return __xchg_bad_pointer(); -} - -#define xchg(ptr, with) ({ \ - (typeof(*(ptr)))__xchg((unsigned long)(with), \ - (ptr), \ - sizeof(*(ptr))); \ -}) - -#endif /* CONFIG_ARC_PLAT_EZNPS */ - /* * "atomic" variant of xchg() * REQ: It needs to follow the same serialization rules as other atomic_xxx() diff --git a/arch/arc/include/asm/entry-compact.h b/arch/arc/include/asm/entry-compact.h index c3aa775878dc..6dbf5cecc8cc 100644 --- a/arch/arc/include/asm/entry-compact.h +++ b/arch/arc/include/asm/entry-compact.h @@ -33,10 +33,6 @@ #include <asm/irqflags-compact.h> #include <asm/thread_info.h> /* For THREAD_SIZE */ -#ifdef CONFIG_ARC_PLAT_EZNPS -#include <plat/ctop.h> -#endif - /*-------------------------------------------------------------- * Switch to Kernel Mode stack if SP points to User Mode stack * @@ -189,12 +185,6 @@ PUSHAX lp_start PUSHAX erbta -#ifdef CONFIG_ARC_PLAT_EZNPS - .word CTOP_INST_SCHD_RW - PUSHAX CTOP_AUX_GPA1 - PUSHAX CTOP_AUX_EFLAGS -#endif - lr r10, [ecr] st r10, [sp, PT_event] /* EV_Trap expects r10 to have ECR */ .endm @@ -211,11 +201,6 @@ * by hardware and that is not good. *-------------------------------------------------------------*/ .macro EXCEPTION_EPILOGUE -#ifdef CONFIG_ARC_PLAT_EZNPS - .word CTOP_INST_SCHD_RW - POPAX CTOP_AUX_EFLAGS - POPAX CTOP_AUX_GPA1 -#endif POPAX erbta POPAX lp_start @@ -278,11 +263,6 @@ PUSHAX lp_start PUSHAX bta_l\LVL\() -#ifdef CONFIG_ARC_PLAT_EZNPS - .word CTOP_INST_SCHD_RW - PUSHAX CTOP_AUX_GPA1 - PUSHAX CTOP_AUX_EFLAGS -#endif .endm /*-------------------------------------------------------------- @@ -295,11 +275,6 @@ * by hardware and that is not good. *-------------------------------------------------------------*/ .macro INTERRUPT_EPILOGUE LVL -#ifdef CONFIG_ARC_PLAT_EZNPS - .word CTOP_INST_SCHD_RW - POPAX CTOP_AUX_EFLAGS - POPAX CTOP_AUX_GPA1 -#endif POPAX bta_l\LVL\() POPAX lp_start @@ -327,13 +302,11 @@ bic \reg, sp, (THREAD_SIZE - 1) .endm -#ifndef CONFIG_ARC_PLAT_EZNPS /* Get CPU-ID of this core */ .macro GET_CPU_ID reg lr \reg, [identity] lsr \reg, \reg, 8 bmsk \reg, \reg, 7 .endm -#endif #endif /* __ASM_ARC_ENTRY_COMPACT_H */ diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h index 0fcea5bad343..e4031ecd3c8c 100644 --- a/arch/arc/include/asm/processor.h +++ b/arch/arc/include/asm/processor.h @@ -17,13 +17,6 @@ #include <asm/dsp.h> #include <asm/fpu.h> -#ifdef CONFIG_ARC_PLAT_EZNPS -struct eznps_dp { - unsigned int eflags; - unsigned int gpa1; -}; -#endif - /* Arch specific stuff which needs to be saved per task. * However these items are not so important so as to earn a place in * struct thread_info @@ -38,9 +31,6 @@ struct thread_struct { #ifdef CONFIG_ARC_FPU_SAVE_RESTORE struct arc_fpu fpu; #endif -#ifdef CONFIG_ARC_PLAT_EZNPS - struct eznps_dp dp; -#endif }; #define INIT_THREAD { \ @@ -60,17 +50,8 @@ struct task_struct; * A lot of busy-wait loops in SMP are based off of non-volatile data otherwise * get optimised away by gcc */ -#ifndef CONFIG_EZNPS_MTM_EXT - #define cpu_relax() barrier() -#else - -#define cpu_relax() \ - __asm__ __volatile__ (".word %0" : : "i"(CTOP_INST_SCHD_RW) : "memory") - -#endif - #define KSTK_EIP(tsk) (task_pt_regs(tsk)->ret) #define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp) @@ -118,25 +99,7 @@ extern unsigned int get_wchan(struct task_struct *p); #define USER_KERNEL_GUTTER (VMALLOC_START - TASK_SIZE) -#ifdef CONFIG_ARC_PLAT_EZNPS -/* NPS architecture defines special window of 129M in user address space for - * special memory areas, when accessing this window the MMU do not use TLB. - * Instead MMU direct the access to: - * 0x57f00000:0x57ffffff -- 1M of closely coupled memory (aka CMEM) - * 0x58000000:0x5fffffff -- 16 huge pages, 8M each, with fixed map (aka FMTs) - * - * CMEM - is the fastest memory we got and its size is 16K. - * FMT - is used to map either to internal/external memory. - * Internal memory is the second fast memory and its size is 16M - * External memory is the biggest memory (16G) and also the slowest. - * - * STACK_TOP need to be PMD align (21bit) that is why we supply 0x57e00000. - */ -#define STACK_TOP 0x57e00000 -#else #define STACK_TOP TASK_SIZE -#endif - #define STACK_TOP_MAX STACK_TOP /* This decides where the kernel will search for a free chunk of vm diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h index 2fdb87addadc..4c3c9be5bd16 100644 --- a/arch/arc/include/asm/ptrace.h +++ b/arch/arc/include/asm/ptrace.h @@ -16,11 +16,6 @@ #ifdef CONFIG_ISA_ARCOMPACT struct pt_regs { -#ifdef CONFIG_ARC_PLAT_EZNPS - unsigned long eflags; /* Extended FLAGS */ - unsigned long gpa1; /* General Purpose Aux */ -#endif - /* Real registers */ unsigned long bta; /* bta_l1, bta_l2, erbta */ diff --git a/arch/arc/include/asm/setup.h b/arch/arc/include/asm/setup.h index 61a97fe70b86..01f85478170d 100644 --- a/arch/arc/include/asm/setup.h +++ b/arch/arc/include/asm/setup.h @@ -9,11 +9,7 @@ #include <linux/types.h> #include <uapi/asm/setup.h> -#ifdef CONFIG_ARC_PLAT_EZNPS -#define COMMAND_LINE_SIZE 2048 -#else #define COMMAND_LINE_SIZE 256 -#endif /* * Data structure to map a ID to string diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h index 94bbed88e3fc..192871608925 100644 --- a/arch/arc/include/asm/spinlock.h +++ b/arch/arc/include/asm/spinlock.h @@ -232,15 +232,9 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) __asm__ __volatile__( "1: ex %0, [%1] \n" -#ifdef CONFIG_EZNPS_MTM_EXT - " .word %3 \n" -#endif " breq %0, %2, 1b \n" : "+&r" (val) : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__) -#ifdef CONFIG_EZNPS_MTM_EXT - , "i"(CTOP_INST_SCHD_RW) -#endif : "memory"); smp_mb(); diff --git a/arch/arc/include/asm/switch_to.h b/arch/arc/include/asm/switch_to.h index 4a3d67989d19..1f85de8288b1 100644 --- a/arch/arc/include/asm/switch_to.h +++ b/arch/arc/include/asm/switch_to.h @@ -12,19 +12,10 @@ #include <asm/dsp-impl.h> #include <asm/fpu.h> -#ifdef CONFIG_ARC_PLAT_EZNPS -extern void dp_save_restore(struct task_struct *p, struct task_struct *n); -#define ARC_EZNPS_DP_PREV(p, n) dp_save_restore(p, n) -#else -#define ARC_EZNPS_DP_PREV(p, n) - -#endif /* !CONFIG_ARC_PLAT_EZNPS */ - struct task_struct *__switch_to(struct task_struct *p, struct task_struct *n); #define switch_to(prev, next, last) \ do { \ - ARC_EZNPS_DP_PREV(prev, next); \ dsp_save_restore(prev, next); \ fpu_save_restore(prev, next); \ last = __switch_to(prev, next);\ diff --git a/arch/arc/kernel/ctx_sw.c b/arch/arc/kernel/ctx_sw.c index e172c3333a84..1a76f2d6f694 100644 --- a/arch/arc/kernel/ctx_sw.c +++ b/arch/arc/kernel/ctx_sw.c @@ -14,9 +14,6 @@ #include <asm/asm-offsets.h> #include <linux/sched.h> #include <linux/sched/debug.h> -#ifdef CONFIG_ARC_PLAT_EZNPS -#include <plat/ctop.h> -#endif #define KSP_WORD_OFF ((TASK_THREAD + THREAD_KSP) / 4) @@ -68,16 +65,9 @@ __switch_to(struct task_struct *prev_task, struct task_struct *next_task) #ifndef CONFIG_SMP "st %2, [@_current_task] \n\t" #else -#ifdef CONFIG_ARC_PLAT_EZNPS - "lr r24, [%4] \n\t" -#ifndef CONFIG_EZNPS_MTM_EXT - "lsr r24, r24, 4 \n\t" -#endif -#else "lr r24, [identity] \n\t" "lsr r24, r24, 8 \n\t" "bmsk r24, r24, 7 \n\t" -#endif "add2 r24, @_current_task, r24 \n\t" "st %2, [r24] \n\t" #endif @@ -115,9 +105,6 @@ __switch_to(struct task_struct *prev_task, struct task_struct *next_task) : "=r"(tmp) : "n"(KSP_WORD_OFF), "r"(next), "r"(prev) -#ifdef CONFIG_ARC_PLAT_EZNPS - , "i"(CTOP_AUX_LOGIC_GLOBAL_ID) -#endif : "blink" ); diff --git a/arch/arc/kernel/devtree.c b/arch/arc/kernel/devtree.c index fa86d13df5ed..721d465f1580 100644 --- a/arch/arc/kernel/devtree.c +++ b/arch/arc/kernel/devtree.c @@ -29,8 +29,6 @@ static void __init arc_set_early_base_baud(unsigned long dt_root) else if (of_flat_dt_is_compatible(dt_root, "snps,arc-sdp") || of_flat_dt_is_compatible(dt_root, "snps,hsdk")) arc_base_baud = 33333333; /* Fixed 33MHz clk (AXS10x & HSDK) */ - else if (of_flat_dt_is_compatible(dt_root, "ezchip,arc-nps")) - arc_base_baud = 800000000; /* Fixed 800MHz clk (NPS) */ else arc_base_baud = 50000000; /* Fixed default 50MHz */ } diff --git a/arch/arc/kernel/kprobes.c b/arch/arc/kernel/kprobes.c index 7d3efe83cba7..cabef45f11df 100644 --- a/arch/arc/kernel/kprobes.c +++ b/arch/arc/kernel/kprobes.c @@ -388,6 +388,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, { ri->ret_addr = (kprobe_opcode_t *) regs->blink; + ri->fp = NULL; /* Replace the return addr with trampoline addr */ regs->blink = (unsigned long)&kretprobe_trampoline; @@ -396,58 +397,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, static int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) { - struct kretprobe_instance *ri = NULL; - struct hlist_head *head, empty_rp; - struct hlist_node *tmp; - unsigned long flags, orig_ret_address = 0; - unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; - - INIT_HLIST_HEAD(&empty_rp); - kretprobe_hash_lock(current, &head, &flags); - - /* - * It is possible to have multiple instances associated with a given - * task either because an multiple functions in the call path - * have a return probe installed on them, and/or more than one return - * return probe was registered for a target function. - * - * We can handle this because: - * - instances are always inserted at the head of the list - * - when multiple return probes are registered for the same - * function, the first instance's ret_addr will point to the - * real return address, and all the rest will point to - * kretprobe_trampoline - */ - hlist_for_each_entry_safe(ri, tmp, head, hlist) { - if (ri->task != current) - /* another task is sharing our hash bucket */ - continue; - - if (ri->rp && ri->rp->handler) - ri->rp->handler(ri, regs); - - orig_ret_address = (unsigned long)ri->ret_addr; - recycle_rp_inst(ri, &empty_rp); - - if (orig_ret_address != trampoline_address) { - /* - * This is the real return address. Any other - * instances associated with this task are for - * other calls deeper on the call stack - */ - break; - } - } - - kretprobe_assert(ri, orig_ret_address, trampoline_address); - regs->ret = orig_ret_address; - - kretprobe_hash_unlock(current, &flags); - - hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { - hlist_del(&ri->hlist); - kfree(ri); - } + regs->ret = __kretprobe_trampoline_handler(regs, &kretprobe_trampoline, NULL); /* By returning a non zero value, we are telling the kprobe handler * that we don't want the post_handler to run diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c index efeba1fe7252..37f724ad5e39 100644 --- a/arch/arc/kernel/process.c +++ b/arch/arc/kernel/process.c @@ -116,17 +116,6 @@ void arch_cpu_idle(void) :"I"(arg)); /* can't be "r" has to be embedded const */ } -#elif defined(CONFIG_EZNPS_MTM_EXT) /* ARC700 variant in NPS */ - -void arch_cpu_idle(void) -{ - /* only the calling HW thread needs to sleep */ - __asm__ __volatile__( - ".word %0 \n" - : - :"i"(CTOP_INST_HWSCHD_WFT_IE12)); -} - #else /* ARC700 */ void arch_cpu_idle(void) @@ -278,10 +267,6 @@ void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp) */ regs->status32 = STATUS_U_MASK | STATUS_L_MASK | ISA_INIT_STATUS_BITS; -#ifdef CONFIG_EZNPS_MTM_EXT - regs->eflags = 0; -#endif - fpu_init_task(regs); /* bogus seed values for debugging */ diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c index eca35e02ce06..52906d314537 100644 --- a/arch/arc/kernel/smp.c +++ b/arch/arc/kernel/smp.c @@ -226,7 +226,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) } if (!cpu_online(cpu)) { - pr_info("Timeout: CPU%u FAILED to comeup !!!\n", cpu); + pr_info("Timeout: CPU%u FAILED to come up !!!\n", cpu); return -1; } diff --git a/arch/arc/kernel/vmlinux.lds.S b/arch/arc/kernel/vmlinux.lds.S index 54139a6f469b..33ce59d91461 100644 --- a/arch/arc/kernel/vmlinux.lds.S +++ b/arch/arc/kernel/vmlinux.lds.S @@ -122,6 +122,7 @@ SECTIONS _end = . ; STABS_DEBUG + ELF_DETAILS DISCARDS .arcextmap 0 : { diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c index e947572a521e..517988e60cfc 100644 --- a/arch/arc/mm/dma.c +++ b/arch/arc/mm/dma.c @@ -3,7 +3,7 @@ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) */ -#include <linux/dma-noncoherent.h> +#include <linux/dma-map-ops.h> #include <asm/cache.h> #include <asm/cacheflush.h> diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S index 31f54bdd95f2..062fae46c3f8 100644 --- a/arch/arc/mm/tlbex.S +++ b/arch/arc/mm/tlbex.S @@ -281,13 +281,6 @@ ex_saved_reg1: .macro COMMIT_ENTRY_TO_MMU #if (CONFIG_ARC_MMU_VER < 4) -#ifdef CONFIG_EZNPS_MTM_EXT - /* verify if entry for this vaddr+ASID already exists */ - sr TLBProbe, [ARC_REG_TLBCOMMAND] - lr r0, [ARC_REG_TLBINDEX] - bbit0 r0, 31, 88f -#endif - /* Get free TLB slot: Set = computed from vaddr, way = random */ sr TLBGetIndex, [ARC_REG_TLBCOMMAND] diff --git a/arch/arc/plat-eznps/Kconfig b/arch/arc/plat-eznps/Kconfig deleted file mode 100644 index a645bca5899a..000000000000 --- a/arch/arc/plat-eznps/Kconfig +++ /dev/null @@ -1,58 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -# -# For a description of the syntax of this configuration file, -# see Documentation/kbuild/kconfig-language.rst. -# - -menuconfig ARC_PLAT_EZNPS - bool "\"EZchip\" ARC dev platform" - depends on ISA_ARCOMPACT - select CPU_BIG_ENDIAN - select CLKSRC_NPS if !PHYS_ADDR_T_64BIT - select EZNPS_GIC - select EZCHIP_NPS_MANAGEMENT_ENET if ETHERNET - help - Support for EZchip development platforms, - based on ARC700 cores. - We handle few flavors: - - Hardware Emulator AKA HE which is FPGA based chassis - - Simulator based on MetaWare nSIM - - NPS400 chip based on ASIC - -config EZNPS_MTM_EXT - bool "ARC-EZchip MTM Extensions" - select CPUMASK_OFFSTACK - depends on ARC_PLAT_EZNPS && SMP - default y - help - Here we add new hierarchy for CPUs topology. - We got: - Core - Thread - At the new thread level each CPU represent one HW thread. - At highest hierarchy each core contain 16 threads, - any of them seem like CPU from Linux point of view. - All threads within same core share the execution unit of the - core and HW scheduler round robin between them. - -config EZNPS_MEM_ERROR_ALIGN - bool "ARC-EZchip Memory error as an exception" - depends on EZNPS_MTM_EXT - default n - help - On the real chip of the NPS, user memory errors are handled - as a machine check exception, which is fatal, whereas on - simulator platform for NPS, is handled as a Level 2 interrupt - (just a stock ARC700) which is recoverable. This option makes - simulator behave like hardware. - -config EZNPS_SHARED_AUX_REGS - bool "ARC-EZchip Shared Auxiliary Registers Per Core" - depends on ARC_PLAT_EZNPS - default y - help - On the real chip of the NPS, auxiliary registers are shared between - all the cpus of the core, whereas on simulator platform for NPS, - each cpu has a different set of auxiliary registers. Configuration - should be unset if auxiliary registers are not shared between the cpus - of the core, so there will be a need to initialize them per cpu. diff --git a/arch/arc/plat-eznps/Makefile b/arch/arc/plat-eznps/Makefile deleted file mode 100644 index ebb9723002cf..000000000000 --- a/arch/arc/plat-eznps/Makefile +++ /dev/null @@ -1,8 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -# -# Makefile for the linux kernel. -# - -obj-y := entry.o platform.o ctop.o -obj-$(CONFIG_SMP) += smp.o -obj-$(CONFIG_EZNPS_MTM_EXT) += mtm.o diff --git a/arch/arc/plat-eznps/ctop.c b/arch/arc/plat-eznps/ctop.c deleted file mode 100644 index b398e6e838a9..000000000000 --- a/arch/arc/plat-eznps/ctop.c +++ /dev/null @@ -1,21 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright(c) 2015 EZchip Technologies. - */ - -#include <linux/sched.h> -#include <asm/processor.h> -#include <plat/ctop.h> - -void dp_save_restore(struct task_struct *prev, struct task_struct *next) -{ - struct eznps_dp *prev_task_dp = &prev->thread.dp; - struct eznps_dp *next_task_dp = &next->thread.dp; - - /* Here we save all Data Plane related auxiliary registers */ - prev_task_dp->eflags = read_aux_reg(CTOP_AUX_EFLAGS); - write_aux_reg(CTOP_AUX_EFLAGS, next_task_dp->eflags); - - prev_task_dp->gpa1 = read_aux_reg(CTOP_AUX_GPA1); - write_aux_reg(CTOP_AUX_GPA1, next_task_dp->gpa1); -} diff --git a/arch/arc/plat-eznps/entry.S b/arch/arc/plat-eznps/entry.S deleted file mode 100644 index 3f18c0108e72..000000000000 --- a/arch/arc/plat-eznps/entry.S +++ /dev/null @@ -1,60 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/******************************************************************************* - - EZNPS CPU startup Code - Copyright(c) 2012 EZchip Technologies. - - -*******************************************************************************/ -#include <linux/linkage.h> -#include <asm/entry.h> -#include <asm/cache.h> -#include <plat/ctop.h> - - .cpu A7 - - .section .init.text, "ax",@progbits - .align 1024 ; HW requierment for restart first PC - -ENTRY(res_service) -#if defined(CONFIG_EZNPS_MTM_EXT) && defined(CONFIG_EZNPS_SHARED_AUX_REGS) - ; There is no work for HW thread id != 0 - lr r3, [CTOP_AUX_THREAD_ID] - cmp r3, 0 - jne stext -#endif - -#ifdef CONFIG_ARC_HAS_DCACHE - ; With no cache coherency mechanism D$ need to be used very carefully. - ; Address space: - ; 0G-2G: We disable CONFIG_ARC_CACHE_PAGES. - ; 2G-3G: We disable D$ by setting this bit. - ; 3G-4G: D$ is disabled by architecture. - ; FMT are huge pages for user application reside at 0-2G. - ; Only FMT left as one who can use D$ where each such page got - ; disable/enable bit for cachability. - ; Programmer will use FMT pages for private data so cache coherency - ; would not be a problem. - ; First thing we invalidate D$ - sr 1, [ARC_REG_DC_IVDC] - sr HW_COMPLY_KRN_NOT_D_CACHED, [CTOP_AUX_HW_COMPLY] -#endif - -#ifdef CONFIG_SMP - ; We set logical cpuid to be used by GET_CPUID - ; We do not use physical cpuid since we want ids to be continious when - ; it comes to cpus on the same quad cluster. - ; This is useful for applications that used shared resources of a quad - ; cluster such SRAMS. - lr r3, [CTOP_AUX_CORE_ID] - sr r3, [CTOP_AUX_LOGIC_CORE_ID] - lr r3, [CTOP_AUX_CLUSTER_ID] - ; Set logical is acheived by swap of 2 middle bits of cluster id (4 bit) - ; r3 is used since we use short instruction and we need q-class reg - .short CTOP_INST_MOV2B_FLIP_R3_B1_B2_INST - .word CTOP_INST_MOV2B_FLIP_R3_B1_B2_LIMM - sr r3, [CTOP_AUX_LOGIC_CLUSTER_ID] -#endif - - j stext -END(res_service) diff --git a/arch/arc/plat-eznps/include/plat/ctop.h b/arch/arc/plat-eznps/include/plat/ctop.h deleted file mode 100644 index 77712c5ffe84..000000000000 --- a/arch/arc/plat-eznps/include/plat/ctop.h +++ /dev/null @@ -1,208 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright(c) 2015 EZchip Technologies. - */ - -#ifndef _PLAT_EZNPS_CTOP_H -#define _PLAT_EZNPS_CTOP_H - -#ifndef CONFIG_ARC_PLAT_EZNPS -#error "Incorrect ctop.h include" -#endif - -#include <linux/bits.h> -#include <linux/types.h> -#include <soc/nps/common.h> - -/* core auxiliary registers */ -#ifdef __ASSEMBLY__ -#define CTOP_AUX_BASE (-0x800) -#else -#define CTOP_AUX_BASE 0xFFFFF800 -#endif - -#define CTOP_AUX_GLOBAL_ID (CTOP_AUX_BASE + 0x000) -#define CTOP_AUX_CLUSTER_ID (CTOP_AUX_BASE + 0x004) -#define CTOP_AUX_CORE_ID (CTOP_AUX_BASE + 0x008) -#define CTOP_AUX_THREAD_ID (CTOP_AUX_BASE + 0x00C) -#define CTOP_AUX_LOGIC_GLOBAL_ID (CTOP_AUX_BASE + 0x010) -#define CTOP_AUX_LOGIC_CLUSTER_ID (CTOP_AUX_BASE + 0x014) -#define CTOP_AUX_LOGIC_CORE_ID (CTOP_AUX_BASE + 0x018) -#define CTOP_AUX_MT_CTRL (CTOP_AUX_BASE + 0x020) -#define CTOP_AUX_HW_COMPLY (CTOP_AUX_BASE + 0x024) -#define CTOP_AUX_DPC (CTOP_AUX_BASE + 0x02C) -#define CTOP_AUX_LPC (CTOP_AUX_BASE + 0x030) -#define CTOP_AUX_EFLAGS (CTOP_AUX_BASE + 0x080) -#define CTOP_AUX_GPA1 (CTOP_AUX_BASE + 0x08C) -#define CTOP_AUX_UDMC (CTOP_AUX_BASE + 0x300) - -/* EZchip core instructions */ -#define CTOP_INST_HWSCHD_WFT_IE12 0x3E6F7344 -#define CTOP_INST_HWSCHD_OFF_R4 0x3C6F00BF -#define CTOP_INST_HWSCHD_RESTORE_R4 0x3E6F7103 -#define CTOP_INST_SCHD_RW 0x3E6F7004 -#define CTOP_INST_SCHD_RD 0x3E6F7084 -#define CTOP_INST_ASRI_0_R3 0x3B56003E -#define CTOP_INST_XEX_DI_R2_R2_R3 0x4A664C00 -#define CTOP_INST_EXC_DI_R2_R2_R3 0x4A664C01 -#define CTOP_INST_AADD_DI_R2_R2_R3 0x4A664C02 -#define CTOP_INST_AAND_DI_R2_R2_R3 0x4A664C04 -#define CTOP_INST_AOR_DI_R2_R2_R3 0x4A664C05 -#define CTOP_INST_AXOR_DI_R2_R2_R3 0x4A664C06 - -/* Do not use D$ for address in 2G-3G */ -#define HW_COMPLY_KRN_NOT_D_CACHED BIT(28) - -#define NPS_MSU_EN_CFG 0x80 -#define NPS_CRG_BLKID 0x480 -#define NPS_CRG_SYNC_BIT BIT(0) -#define NPS_GIM_BLKID 0x5C0 - -/* GIM registers and fields*/ -#define NPS_GIM_UART_LINE BIT(7) -#define NPS_GIM_DBG_LAN_EAST_TX_DONE_LINE BIT(10) -#define NPS_GIM_DBG_LAN_EAST_RX_RDY_LINE BIT(11) -#define NPS_GIM_DBG_LAN_WEST_TX_DONE_LINE BIT(25) -#define NPS_GIM_DBG_LAN_WEST_RX_RDY_LINE BIT(26) - -#ifndef __ASSEMBLY__ -/* Functional registers definition */ -struct nps_host_reg_mtm_cfg { - union { - struct { - u32 gen:1, gdis:1, clk_gate_dis:1, asb:1, - __reserved:9, nat:3, ten:16; - }; - u32 value; - }; -}; - -struct nps_host_reg_mtm_cpu_cfg { - union { - struct { - u32 csa:22, dmsid:6, __reserved:3, cs:1; - }; - u32 value; - }; -}; - -struct nps_host_reg_thr_init { - union { - struct { - u32 str:1, __reserved:27, thr_id:4; - }; - u32 value; - }; -}; - -struct nps_host_reg_thr_init_sts { - union { - struct { - u32 bsy:1, err:1, __reserved:26, thr_id:4; - }; - u32 value; - }; -}; - -struct nps_host_reg_msu_en_cfg { - union { - struct { - u32 __reserved1:11, - rtc_en:1, ipc_en:1, gim_1_en:1, - gim_0_en:1, ipi_en:1, buff_e_rls_bmuw:1, - buff_e_alc_bmuw:1, buff_i_rls_bmuw:1, buff_i_alc_bmuw:1, - buff_e_rls_bmue:1, buff_e_alc_bmue:1, buff_i_rls_bmue:1, - buff_i_alc_bmue:1, __reserved2:1, buff_e_pre_en:1, - buff_i_pre_en:1, pmuw_ja_en:1, pmue_ja_en:1, - pmuw_nj_en:1, pmue_nj_en:1, msu_en:1; - }; - u32 value; - }; -}; - -struct nps_host_reg_gim_p_int_dst { - union { - struct { - u32 int_out_en:1, __reserved1:4, - is:1, intm:2, __reserved2:4, - nid:4, __reserved3:4, cid:4, - __reserved4:4, tid:4; - }; - u32 value; - }; -}; - -/* AUX registers definition */ -struct nps_host_reg_aux_dpc { - union { - struct { - u32 ien:1, men:1, hen:1, reserved:29; - }; - u32 value; - }; -}; - -struct nps_host_reg_aux_udmc { - union { - struct { - u32 dcp:1, cme:1, __reserved:19, nat:3, - __reserved2:5, dcas:3; - }; - u32 value; - }; -}; - -struct nps_host_reg_aux_mt_ctrl { - union { - struct { - u32 mten:1, hsen:1, scd:1, sten:1, - st_cnt:8, __reserved:8, - hs_cnt:8, __reserved1:4; - }; - u32 value; - }; -}; - -struct nps_host_reg_aux_hw_comply { - union { - struct { - u32 me:1, le:1, te:1, knc:1, __reserved:28; - }; - u32 value; - }; -}; - -struct nps_host_reg_aux_lpc { - union { - struct { - u32 mep:1, __reserved:31; - }; - u32 value; - }; -}; - -/* CRG registers */ -#define REG_GEN_PURP_0 nps_host_reg_non_cl(NPS_CRG_BLKID, 0x1BF) - -/* GIM registers */ -#define REG_GIM_P_INT_EN_0 nps_host_reg_non_cl(NPS_GIM_BLKID, 0x100) -#define REG_GIM_P_INT_POL_0 nps_host_reg_non_cl(NPS_GIM_BLKID, 0x110) -#define REG_GIM_P_INT_SENS_0 nps_host_reg_non_cl(NPS_GIM_BLKID, 0x114) -#define REG_GIM_P_INT_BLK_0 nps_host_reg_non_cl(NPS_GIM_BLKID, 0x118) -#define REG_GIM_P_INT_DST_10 nps_host_reg_non_cl(NPS_GIM_BLKID, 0x13A) -#define REG_GIM_P_INT_DST_11 nps_host_reg_non_cl(NPS_GIM_BLKID, 0x13B) -#define REG_GIM_P_INT_DST_25 nps_host_reg_non_cl(NPS_GIM_BLKID, 0x149) -#define REG_GIM_P_INT_DST_26 nps_host_reg_non_cl(NPS_GIM_BLKID, 0x14A) - -#else - -.macro GET_CPU_ID reg - lr \reg, [CTOP_AUX_LOGIC_GLOBAL_ID] -#ifndef CONFIG_EZNPS_MTM_EXT - lsr \reg, \reg, 4 -#endif -.endm - -#endif /* __ASSEMBLY__ */ - -#endif /* _PLAT_EZNPS_CTOP_H */ diff --git a/arch/arc/plat-eznps/include/plat/mtm.h b/arch/arc/plat-eznps/include/plat/mtm.h deleted file mode 100644 index 7c55becc891b..000000000000 --- a/arch/arc/plat-eznps/include/plat/mtm.h +++ /dev/null @@ -1,49 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright(c) 2015 EZchip Technologies. - */ - -#ifndef _PLAT_EZNPS_MTM_H -#define _PLAT_EZNPS_MTM_H - -#include <plat/ctop.h> - -static inline void *nps_mtm_reg_addr(u32 cpu, u32 reg) -{ - struct global_id gid; - u32 core, blkid; - - gid.value = cpu; - core = gid.core; - blkid = (((core & 0x0C) << 2) | (core & 0x03)); - - return nps_host_reg(cpu, blkid, reg); -} - -#ifdef CONFIG_EZNPS_MTM_EXT -#define NPS_CPU_TO_THREAD_NUM(cpu) \ - ({ struct global_id gid; gid.value = cpu; gid.thread; }) - -/* MTM registers */ -#define MTM_CFG(cpu) nps_mtm_reg_addr(cpu, 0x81) -#define MTM_THR_INIT(cpu) nps_mtm_reg_addr(cpu, 0x92) -#define MTM_THR_INIT_STS(cpu) nps_mtm_reg_addr(cpu, 0x93) - -#define get_thread(map) map.thread -#define eznps_max_cpus 4096 -#define eznps_cpus_per_cluster 256 - -void mtm_enable_core(unsigned int cpu); -int mtm_enable_thread(int cpu); -#else /* !CONFIG_EZNPS_MTM_EXT */ - -#define get_thread(map) 0 -#define eznps_max_cpus 256 -#define eznps_cpus_per_cluster 16 -#define mtm_enable_core(cpu) -#define mtm_enable_thread(cpu) 1 -#define NPS_CPU_TO_THREAD_NUM(cpu) 0 - -#endif /* CONFIG_EZNPS_MTM_EXT */ - -#endif /* _PLAT_EZNPS_MTM_H */ diff --git a/arch/arc/plat-eznps/include/plat/smp.h b/arch/arc/plat-eznps/include/plat/smp.h deleted file mode 100644 index e433f118bdca..000000000000 --- a/arch/arc/plat-eznps/include/plat/smp.h +++ /dev/null @@ -1,15 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright(c) 2015 EZchip Technologies. - */ - -#ifndef __PLAT_EZNPS_SMP_H -#define __PLAT_EZNPS_SMP_H - -#ifdef CONFIG_SMP - -extern void res_service(void); - -#endif /* CONFIG_SMP */ - -#endif diff --git a/arch/arc/plat-eznps/mtm.c b/arch/arc/plat-eznps/mtm.c deleted file mode 100644 index 3dcf5a9e2976..000000000000 --- a/arch/arc/plat-eznps/mtm.c +++ /dev/null @@ -1,166 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright(c) 2015 EZchip Technologies. - */ - -#include <linux/smp.h> -#include <linux/init.h> -#include <linux/kernel.h> -#include <linux/io.h> -#include <linux/log2.h> -#include <asm/arcregs.h> -#include <plat/mtm.h> -#include <plat/smp.h> - -#define MT_HS_CNT_MIN 0x01 -#define MT_HS_CNT_MAX 0xFF -#define MT_CTRL_ST_CNT 0xF -#define NPS_NUM_HW_THREADS 0x10 - -static int mtm_hs_ctr = MT_HS_CNT_MAX; - -#ifdef CONFIG_EZNPS_MEM_ERROR_ALIGN -int do_memory_error(unsigned long address, struct pt_regs *regs) -{ - die("Invalid Mem Access", regs, address); - - return 1; -} -#endif - -static void mtm_init_nat(int cpu) -{ - struct nps_host_reg_mtm_cfg mtm_cfg; - struct nps_host_reg_aux_udmc udmc; - int log_nat, nat = 0, i, t; - - /* Iterate core threads and update nat */ - for (i = 0, t = cpu; i < NPS_NUM_HW_THREADS; i++, t++) - nat += test_bit(t, cpumask_bits(cpu_possible_mask)); - - log_nat = ilog2(nat); - - udmc.value = read_aux_reg(CTOP_AUX_UDMC); - udmc.nat = log_nat; - write_aux_reg(CTOP_AUX_UDMC, udmc.value); - - mtm_cfg.value = ioread32be(MTM_CFG(cpu)); - mtm_cfg.nat = log_nat; - iowrite32be(mtm_cfg.value, MTM_CFG(cpu)); -} - -static void mtm_init_thread(int cpu) -{ - int i, tries = 5; - struct nps_host_reg_thr_init thr_init; - struct nps_host_reg_thr_init_sts thr_init_sts; - - /* Set thread init register */ - thr_init.value = 0; - iowrite32be(thr_init.value, MTM_THR_INIT(cpu)); - thr_init.thr_id = NPS_CPU_TO_THREAD_NUM(cpu); - thr_init.str = 1; - iowrite32be(thr_init.value, MTM_THR_INIT(cpu)); - - /* Poll till thread init is done */ - for (i = 0; i < tries; i++) { - thr_init_sts.value = ioread32be(MTM_THR_INIT_STS(cpu)); - if (thr_init_sts.thr_id == thr_init.thr_id) { - if (thr_init_sts.bsy) - continue; - else if (thr_init_sts.err) - pr_warn("Failed to thread init cpu %u\n", cpu); - break; - } - - pr_warn("Wrong thread id in thread init for cpu %u\n", cpu); - break; - } - - if (i == tries) - pr_warn("Got thread init timeout for cpu %u\n", cpu); -} - -int mtm_enable_thread(int cpu) -{ - struct nps_host_reg_mtm_cfg mtm_cfg; - - if (NPS_CPU_TO_THREAD_NUM(cpu) == 0) - return 1; - - /* Enable thread in mtm */ - mtm_cfg.value = ioread32be(MTM_CFG(cpu)); - mtm_cfg.ten |= (1 << (NPS_CPU_TO_THREAD_NUM(cpu))); - iowrite32be(mtm_cfg.value, MTM_CFG(cpu)); - - return 0; -} - -void mtm_enable_core(unsigned int cpu) -{ - int i; - struct nps_host_reg_aux_mt_ctrl mt_ctrl; - struct nps_host_reg_mtm_cfg mtm_cfg; - struct nps_host_reg_aux_dpc dpc; - - /* - * Initializing dpc register in each CPU. - * Overwriting the init value of the DPC - * register so that CMEM and FMT virtual address - * spaces are accessible, and Data Plane HW - * facilities are enabled. - */ - dpc.ien = 1; - dpc.men = 1; - write_aux_reg(CTOP_AUX_DPC, dpc.value); - - if (NPS_CPU_TO_THREAD_NUM(cpu) != 0) - return; - - /* Initialize Number of Active Threads */ - mtm_init_nat(cpu); - - /* Initialize mtm_cfg */ - mtm_cfg.value = ioread32be(MTM_CFG(cpu)); - mtm_cfg.ten = 1; - iowrite32be(mtm_cfg.value, MTM_CFG(cpu)); - - /* Initialize all other threads in core */ - for (i = 1; i < NPS_NUM_HW_THREADS; i++) - mtm_init_thread(cpu + i); - - - /* Enable HW schedule, stall counter, mtm */ - mt_ctrl.value = 0; - mt_ctrl.hsen = 1; - mt_ctrl.hs_cnt = mtm_hs_ctr; - mt_ctrl.mten = 1; - write_aux_reg(CTOP_AUX_MT_CTRL, mt_ctrl.value); - - /* - * HW scheduling mechanism will start working - * Only after call to instruction "schd.rw". - * cpu_relax() calls "schd.rw" instruction. - */ - cpu_relax(); -} - -/* Verify and set the value of the mtm hs counter */ -static int __init set_mtm_hs_ctr(char *ctr_str) -{ - int hs_ctr; - int ret; - - ret = kstrtoint(ctr_str, 0, &hs_ctr); - - if (ret || hs_ctr > MT_HS_CNT_MAX || hs_ctr < MT_HS_CNT_MIN) { - pr_err("** Invalid @nps_mtm_hs_ctr [%d] needs to be [%d:%d] (incl)\n", - hs_ctr, MT_HS_CNT_MIN, MT_HS_CNT_MAX); - return -EINVAL; - } - - mtm_hs_ctr = hs_ctr; - - return 0; -} -early_param("nps_mtm_hs_ctr", set_mtm_hs_ctr); diff --git a/arch/arc/plat-eznps/platform.c b/arch/arc/plat-eznps/platform.c deleted file mode 100644 index 6de2fe840043..000000000000 --- a/arch/arc/plat-eznps/platform.c +++ /dev/null @@ -1,91 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright(c) 2015 EZchip Technologies. - */ - -#include <linux/init.h> -#include <linux/io.h> -#include <asm/mach_desc.h> -#include <plat/mtm.h> - -static void __init eznps_configure_msu(void) -{ - int cpu; - struct nps_host_reg_msu_en_cfg msu_en_cfg = {.value = 0}; - - msu_en_cfg.msu_en = 1; - msu_en_cfg.ipi_en = 1; - msu_en_cfg.gim_0_en = 1; - msu_en_cfg.gim_1_en = 1; - - /* enable IPI and GIM messages on all clusters */ - for (cpu = 0 ; cpu < eznps_max_cpus; cpu += eznps_cpus_per_cluster) - iowrite32be(msu_en_cfg.value, - nps_host_reg(cpu, NPS_MSU_BLKID, NPS_MSU_EN_CFG)); -} - -static void __init eznps_configure_gim(void) -{ - u32 reg_value; - u32 gim_int_lines; - struct nps_host_reg_gim_p_int_dst gim_p_int_dst = {.value = 0}; - - gim_int_lines = NPS_GIM_UART_LINE; - gim_int_lines |= NPS_GIM_DBG_LAN_EAST_TX_DONE_LINE; - gim_int_lines |= NPS_GIM_DBG_LAN_EAST_RX_RDY_LINE; - gim_int_lines |= NPS_GIM_DBG_LAN_WEST_TX_DONE_LINE; - gim_int_lines |= NPS_GIM_DBG_LAN_WEST_RX_RDY_LINE; - - /* - * IRQ polarity - * low or high level - * negative or positive edge - */ - reg_value = ioread32be(REG_GIM_P_INT_POL_0); - reg_value &= ~gim_int_lines; - iowrite32be(reg_value, REG_GIM_P_INT_POL_0); - - /* IRQ type level or edge */ - reg_value = ioread32be(REG_GIM_P_INT_SENS_0); - reg_value |= NPS_GIM_DBG_LAN_EAST_TX_DONE_LINE; - reg_value |= NPS_GIM_DBG_LAN_WEST_TX_DONE_LINE; - iowrite32be(reg_value, REG_GIM_P_INT_SENS_0); - - /* - * GIM interrupt select type for - * dbg_lan TX and RX interrupts - * should be type 1 - * type 0 = IRQ line 6 - * type 1 = IRQ line 7 - */ - gim_p_int_dst.is = 1; - iowrite32be(gim_p_int_dst.value, REG_GIM_P_INT_DST_10); - iowrite32be(gim_p_int_dst.value, REG_GIM_P_INT_DST_11); - iowrite32be(gim_p_int_dst.value, REG_GIM_P_INT_DST_25); - iowrite32be(gim_p_int_dst.value, REG_GIM_P_INT_DST_26); - - /* - * CTOP IRQ lines should be defined - * as blocking in GIM - */ - iowrite32be(gim_int_lines, REG_GIM_P_INT_BLK_0); - - /* enable CTOP IRQ lines in GIM */ - iowrite32be(gim_int_lines, REG_GIM_P_INT_EN_0); -} - -static void __init eznps_early_init(void) -{ - eznps_configure_msu(); - eznps_configure_gim(); -} - -static const char *eznps_compat[] __initconst = { - "ezchip,arc-nps", - NULL, -}; - -MACHINE_START(NPS, "nps") - .dt_compat = eznps_compat, - .init_early = eznps_early_init, -MACHINE_END diff --git a/arch/arc/plat-eznps/smp.c b/arch/arc/plat-eznps/smp.c deleted file mode 100644 index f119cb7de2ae..000000000000 --- a/arch/arc/plat-eznps/smp.c +++ /dev/null @@ -1,138 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright(c) 2015 EZchip Technologies. - */ - -#include <linux/smp.h> -#include <linux/of_fdt.h> -#include <linux/io.h> -#include <linux/irqdomain.h> -#include <asm/irq.h> -#include <plat/ctop.h> -#include <plat/smp.h> -#include <plat/mtm.h> - -#define NPS_DEFAULT_MSID 0x34 -#define NPS_MTM_CPU_CFG 0x90 - -static char smp_cpuinfo_buf[128] = {"Extn [EZNPS-SMP]\t: On\n"}; - -/* Get cpu map from device tree */ -static int __init eznps_get_map(const char *name, struct cpumask *cpumask) -{ - unsigned long dt_root = of_get_flat_dt_root(); - const char *buf; - - buf = of_get_flat_dt_prop(dt_root, name, NULL); - if (!buf) - return 1; - - cpulist_parse(buf, cpumask); - - return 0; -} - -/* Update board cpu maps */ -static void __init eznps_init_cpumasks(void) -{ - struct cpumask cpumask; - - if (eznps_get_map("present-cpus", &cpumask)) { - pr_err("Failed to get present-cpus from dtb"); - return; - } - init_cpu_present(&cpumask); - - if (eznps_get_map("possible-cpus", &cpumask)) { - pr_err("Failed to get possible-cpus from dtb"); - return; - } - init_cpu_possible(&cpumask); -} - -static void eznps_init_core(unsigned int cpu) -{ - u32 sync_value; - struct nps_host_reg_aux_hw_comply hw_comply; - struct nps_host_reg_aux_lpc lpc; - - if (NPS_CPU_TO_THREAD_NUM(cpu) != 0) - return; - - hw_comply.value = read_aux_reg(CTOP_AUX_HW_COMPLY); - hw_comply.me = 1; - hw_comply.le = 1; - hw_comply.te = 1; - write_aux_reg(CTOP_AUX_HW_COMPLY, hw_comply.value); - - /* Enable MMU clock */ - lpc.mep = 1; - write_aux_reg(CTOP_AUX_LPC, lpc.value); - - /* Boot CPU only */ - if (!cpu) { - /* Write to general purpose register in CRG */ - sync_value = ioread32be(REG_GEN_PURP_0); - sync_value |= NPS_CRG_SYNC_BIT; - iowrite32be(sync_value, REG_GEN_PURP_0); - } -} - -/* - * Master kick starting another CPU - */ -static void __init eznps_smp_wakeup_cpu(int cpu, unsigned long pc) -{ - struct nps_host_reg_mtm_cpu_cfg cpu_cfg; - - if (mtm_enable_thread(cpu) == 0) - return; - - /* set PC, dmsid, and start CPU */ - cpu_cfg.value = (u32)res_service; - cpu_cfg.dmsid = NPS_DEFAULT_MSID; - cpu_cfg.cs = 1; - iowrite32be(cpu_cfg.value, nps_mtm_reg_addr(cpu, NPS_MTM_CPU_CFG)); -} - -static void eznps_ipi_send(int cpu) -{ - struct global_id gid; - struct { - union { - struct { - u32 num:8, cluster:8, core:8, thread:8; - }; - u32 value; - }; - } ipi; - - gid.value = cpu; - ipi.thread = get_thread(gid); - ipi.core = gid.core; - ipi.cluster = nps_cluster_logic_to_phys(gid.cluster); - ipi.num = NPS_IPI_IRQ; - - __asm__ __volatile__( - " mov r3, %0\n" - " .word %1\n" - : - : "r"(ipi.value), "i"(CTOP_INST_ASRI_0_R3) - : "r3"); -} - -static void eznps_init_per_cpu(int cpu) -{ - smp_ipi_irq_setup(cpu, NPS_IPI_IRQ); - - eznps_init_core(cpu); - mtm_enable_core(cpu); -} - -struct plat_smp_ops plat_smp_ops = { - .info = smp_cpuinfo_buf, - .init_early_smp = eznps_init_cpumasks, - .cpu_kick = eznps_smp_wakeup_cpu, - .ipi_send = eznps_ipi_send, - .init_per_cpu = eznps_init_per_cpu, -}; diff --git a/arch/arc/plat-hsdk/Kconfig b/arch/arc/plat-hsdk/Kconfig index ce8101834518..6b5c54576f54 100644 --- a/arch/arc/plat-hsdk/Kconfig +++ b/arch/arc/plat-hsdk/Kconfig @@ -8,5 +8,6 @@ menuconfig ARC_SOC_HSDK select ARC_HAS_ACCL_REGS select ARC_IRQ_NO_AUTOSAVE select CLK_HSDK + select RESET_CONTROLLER select RESET_HSDK select HAVE_PCI diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index e00d94b16658..3996b6572c3a 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -49,6 +49,7 @@ config ARM select GENERIC_ARCH_TOPOLOGY if ARM_CPU_TOPOLOGY select GENERIC_ATOMIC64 if CPU_V7M || CPU_V6 || !CPU_32v6K || !AEABI select GENERIC_CLOCKEVENTS_BROADCAST if SMP + select GENERIC_IRQ_IPI if SMP select GENERIC_CPU_AUTOPROBE select GENERIC_EARLY_IOREMAP select GENERIC_IDLE_POLL_SETUP @@ -67,6 +68,7 @@ config ARM select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU select HAVE_ARCH_MMAP_RND_BITS if MMU + select HAVE_ARCH_SECCOMP select HAVE_ARCH_SECCOMP_FILTER if AEABI && !OABI_COMPAT select HAVE_ARCH_THREAD_STRUCT_WHITELIST select HAVE_ARCH_TRACEHOOK @@ -83,7 +85,7 @@ config ARM select HAVE_FAST_GUP if ARM_LPAE select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL select HAVE_FUNCTION_GRAPH_TRACER if !THUMB2_KERNEL && !CC_IS_CLANG - select HAVE_FUNCTION_TRACER if !XIP_KERNEL && (CC_IS_GCC || CLANG_VERSION >= 100000) + select HAVE_FUNCTION_TRACER if !XIP_KERNEL select HAVE_GCC_PLUGINS select HAVE_HW_BREAKPOINT if PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7) select HAVE_IDE if PCI || ISA || PCMCIA @@ -1617,20 +1619,6 @@ config UACCESS_WITH_MEMCPY However, if the CPU data cache is using a write-allocate mode, this option is unlikely to provide any performance gain. -config SECCOMP - bool - prompt "Enable seccomp to safely compute untrusted bytecode" - help - This kernel feature is useful for number crunching applications - that may need to compute untrusted bytecode during their - execution. By using pipes or other transports made available to - the process as file descriptors supporting the read/write - syscalls, it's possible to isolate those applications in - their own address space using seccomp. Once seccomp is - enabled via prctl(PR_SET_SECCOMP), it cannot be disabled - and the task is only allowed to execute a few safe syscalls - defined by each seccomp mode. - config PARAVIRT bool "Enable paravirtualization code" help diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug index 80000a66a4e3..87912e5c2256 100644 --- a/arch/arm/Kconfig.debug +++ b/arch/arm/Kconfig.debug @@ -1546,6 +1546,17 @@ config DEBUG_SIRFSOC_UART bool depends on ARCH_SIRF +config DEBUG_UART_FLOW_CONTROL + bool "Enable flow control (CTS) for the debug UART" + depends on DEBUG_LL + default y if ARCH_EBSA110 || DEBUG_FOOTBRIDGE_COM1 || DEBUG_GEMINI || ARCH_RPC + help + Some UART ports are connected to terminals that will use modem + control signals to indicate whether they are ready to receive text. + In practice this means that the terminal is asserting the special + control signal CTS (Clear To Send). If your debug UART supports + this and your debug terminal will require it, enable this option. + config DEBUG_LL_INCLUDE string default "debug/sa1100.S" if DEBUG_SA1100 @@ -1893,11 +1904,6 @@ config DEBUG_UART_8250_PALMCHIP except for having a different register layout. Say Y here if the debug UART is of this type. -config DEBUG_UART_8250_FLOW_CONTROL - bool "Enable flow control for 8250 UART" - depends on DEBUG_LL_UART_8250 || DEBUG_UART_8250 - default y if ARCH_EBSA110 || DEBUG_FOOTBRIDGE_COM1 || DEBUG_GEMINI || ARCH_RPC - config DEBUG_UNCOMPRESS bool "Enable decompressor debugging via DEBUG_LL output" depends on ARCH_MULTIPLATFORM || PLAT_SAMSUNG || ARM_SINGLE_ARMV7M diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 4e877354515f..c4301437ca72 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -16,6 +16,10 @@ LDFLAGS_vmlinux += --be8 KBUILD_LDFLAGS_MODULE += --be8 endif +# We never want expected sections to be placed heuristically by the +# linker. All sections should be explicitly named in the linker script. +LDFLAGS_vmlinux += $(call ld-option, --orphan-handling=warn) + ifeq ($(CONFIG_ARM_MODULE_PLTS),y) KBUILD_LDS_MODULE += $(srctree)/arch/arm/kernel/module.lds endif @@ -139,6 +143,9 @@ head-y := arch/arm/kernel/head$(MMUEXT).o # Text offset. This list is sorted numerically by address in order to # provide a means to avoid/resolve conflicts in multi-arch kernels. +# Note: the 32kB below this value is reserved for use by the kernel +# during boot, and this offset is critical to the functioning of +# kexec-tools. textofs-y := 0x00008000 # We don't want the htc bootloader to corrupt kernel during resume textofs-$(CONFIG_PM_H1940) := 0x00108000 diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index b1147b7f2c8d..47f001ca5499 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile @@ -7,11 +7,11 @@ OBJS = -AFLAGS_head.o += -DTEXT_OFFSET=$(TEXT_OFFSET) HEAD = head.o OBJS += misc.o decompress.o ifeq ($(CONFIG_DEBUG_UNCOMPRESS),y) OBJS += debug.o +AFLAGS_head.o += -DDEBUG endif FONTC = $(srctree)/lib/fonts/font_acorn_8x8.c @@ -68,7 +68,12 @@ ZTEXTADDR := 0 ZBSSADDR := ALIGN(8) endif +MALLOC_SIZE := 65536 + +AFLAGS_head.o += -DTEXT_OFFSET=$(TEXT_OFFSET) -DMALLOC_SIZE=$(MALLOC_SIZE) CPPFLAGS_vmlinux.lds := -DTEXT_START="$(ZTEXTADDR)" -DBSS_START="$(ZBSSADDR)" +CPPFLAGS_vmlinux.lds += -DTEXT_OFFSET="$(TEXT_OFFSET)" +CPPFLAGS_vmlinux.lds += -DMALLOC_SIZE="$(MALLOC_SIZE)" compress-$(CONFIG_KERNEL_GZIP) = gzip compress-$(CONFIG_KERNEL_LZO) = lzo @@ -123,6 +128,8 @@ endif LDFLAGS_vmlinux += --no-undefined # Delete all temporary local symbols LDFLAGS_vmlinux += -X +# Report orphan sections +LDFLAGS_vmlinux += $(call ld-option, --orphan-handling=warn) # Next argument is a linker script LDFLAGS_vmlinux += -T diff --git a/arch/arm/boot/compressed/debug.S b/arch/arm/boot/compressed/debug.S index 6bf2917a4621..fac40a717fcf 100644 --- a/arch/arm/boot/compressed/debug.S +++ b/arch/arm/boot/compressed/debug.S @@ -8,7 +8,10 @@ ENTRY(putc) addruart r1, r2, r3 - waituart r3, r1 +#ifdef CONFIG_DEBUG_UART_FLOW_CONTROL + waituartcts r3, r1 +#endif + waituarttxrdy r3, r1 senduart r0, r1 busyuart r3, r1 mov pc, lr diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index 434a16982e34..2e04ec5b5446 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S @@ -28,19 +28,19 @@ #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7) .macro loadsp, rb, tmp1, tmp2 .endm - .macro writeb, ch, rb + .macro writeb, ch, rb, tmp mcr p14, 0, \ch, c0, c5, 0 .endm #elif defined(CONFIG_CPU_XSCALE) .macro loadsp, rb, tmp1, tmp2 .endm - .macro writeb, ch, rb + .macro writeb, ch, rb, tmp mcr p14, 0, \ch, c8, c0, 0 .endm #else .macro loadsp, rb, tmp1, tmp2 .endm - .macro writeb, ch, rb + .macro writeb, ch, rb, tmp mcr p14, 0, \ch, c1, c0, 0 .endm #endif @@ -49,8 +49,13 @@ #include CONFIG_DEBUG_LL_INCLUDE - .macro writeb, ch, rb + .macro writeb, ch, rb, tmp +#ifdef CONFIG_DEBUG_UART_FLOW_CONTROL + waituartcts \tmp, \rb +#endif + waituarttxrdy \tmp, \rb senduart \ch, \rb + busyuart \tmp, \rb .endm #if defined(CONFIG_ARCH_SA1100) @@ -81,42 +86,11 @@ bl phex .endm - .macro debug_reloc_start -#ifdef DEBUG - kputc #'\n' - kphex r6, 8 /* processor id */ - kputc #':' - kphex r7, 8 /* architecture id */ -#ifdef CONFIG_CPU_CP15 - kputc #':' - mrc p15, 0, r0, c1, c0 - kphex r0, 8 /* control reg */ -#endif - kputc #'\n' - kphex r5, 8 /* decompressed kernel start */ - kputc #'-' - kphex r9, 8 /* decompressed kernel end */ - kputc #'>' - kphex r4, 8 /* kernel execution address */ - kputc #'\n' -#endif - .endm - - .macro debug_reloc_end -#ifdef DEBUG - kphex r5, 8 /* end of kernel */ - kputc #'\n' - mov r0, r4 - bl memdump /* dump 256 bytes at start of kernel */ -#endif - .endm - /* * Debug kernel copy by printing the memory addresses involved */ .macro dbgkc, begin, end, cbegin, cend #ifdef DEBUG - kputc #'\n' kputc #'C' kputc #':' kputc #'0' @@ -136,7 +110,28 @@ kputc #'x' kphex \cend, 8 /* End of kernel copy */ kputc #'\n' - kputc #'\r' +#endif + .endm + + /* + * Debug print of the final appended DTB location + */ + .macro dbgadtb, begin, end +#ifdef DEBUG + kputc #'D' + kputc #'T' + kputc #'B' + kputc #':' + kputc #'0' + kputc #'x' + kphex \begin, 8 /* Start of appended DTB */ + kputc #' ' + kputc #'(' + kputc #'0' + kputc #'x' + kphex \end, 8 /* End of appended DTB */ + kputc #')' + kputc #'\n' #endif .endm @@ -303,7 +298,7 @@ restart: adr r0, LC1 #ifndef CONFIG_ZBOOT_ROM /* malloc space is above the relocated stack (64k max) */ - add r10, sp, #0x10000 + add r10, sp, #MALLOC_SIZE #else /* * With ZBOOT_ROM the bss/stack is non relocatable, @@ -357,6 +352,7 @@ restart: adr r0, LC1 mov r5, r5, ror #8 eor r5, r5, r1, lsr #8 #endif + dbgadtb r6, r5 /* 50% DTB growth should be good enough */ add r5, r5, r5, lsr #1 /* preserve 64-bit alignment */ @@ -614,7 +610,7 @@ not_relocated: mov r0, #0 */ mov r0, r4 mov r1, sp @ malloc space above stack - add r2, sp, #0x10000 @ 64k max + add r2, sp, #MALLOC_SIZE @ 64k max mov r3, r7 bl decompress_kernel @@ -1356,7 +1352,7 @@ puts: loadsp r3, r2, r1 1: ldrb r2, [r0], #1 teq r2, #0 moveq pc, lr -2: writeb r2, r3 +2: writeb r2, r3, r1 mov r1, #0x00020000 3: subs r1, r1, #1 bne 3b diff --git a/arch/arm/boot/compressed/vmlinux.lds.S b/arch/arm/boot/compressed/vmlinux.lds.S index 09ac33f52814..1bcb68ac4b01 100644 --- a/arch/arm/boot/compressed/vmlinux.lds.S +++ b/arch/arm/boot/compressed/vmlinux.lds.S @@ -2,6 +2,7 @@ /* * Copyright (C) 2000 Russell King */ +#include <asm/vmlinux.lds.h> #ifdef CONFIG_CPU_ENDIAN_BE8 #define ZIMAGE_MAGIC(x) ( (((x) >> 24) & 0x000000ff) | \ @@ -17,8 +18,11 @@ ENTRY(_start) SECTIONS { /DISCARD/ : { + COMMON_DISCARDS *(.ARM.exidx*) *(.ARM.extab*) + *(.note.*) + *(.rel.*) /* * Discard any r/w data - this produces a link error if we have any, * which is required for PIC decompression. Local data generates @@ -36,16 +40,16 @@ SECTIONS *(.start) *(.text) *(.text.*) - *(.gnu.warning) - *(.glue_7t) - *(.glue_7) + ARM_STUBS_TEXT } .table : ALIGN(4) { _table_start = .; - LONG(ZIMAGE_MAGIC(4)) + LONG(ZIMAGE_MAGIC(6)) LONG(ZIMAGE_MAGIC(0x5a534c4b)) LONG(ZIMAGE_MAGIC(__piggy_size_addr - _start)) LONG(ZIMAGE_MAGIC(_kernel_bss_size)) + LONG(ZIMAGE_MAGIC(TEXT_OFFSET)) + LONG(ZIMAGE_MAGIC(MALLOC_SIZE)) LONG(0) _table_end = .; } @@ -128,12 +132,10 @@ SECTIONS PROVIDE(__pecoff_data_size = ALIGN(512) - ADDR(.data)); PROVIDE(__pecoff_end = ALIGN(512)); - .stab 0 : { *(.stab) } - .stabstr 0 : { *(.stabstr) } - .stab.excl 0 : { *(.stab.excl) } - .stab.exclstr 0 : { *(.stab.exclstr) } - .stab.index 0 : { *(.stab.index) } - .stab.indexstr 0 : { *(.stab.indexstr) } - .comment 0 : { *(.comment) } + STABS_DEBUG + DWARF_DEBUG + ARM_DETAILS + + ARM_ASSERTS } ASSERT(_edata_real == _edata, "error: zImage file size is incorrect"); diff --git a/arch/arm/boot/dts/at91-sam9x60ek.dts b/arch/arm/boot/dts/at91-sam9x60ek.dts index ca15ff8fea18..eae28b82c7fd 100644 --- a/arch/arm/boot/dts/at91-sam9x60ek.dts +++ b/arch/arm/boot/dts/at91-sam9x60ek.dts @@ -563,6 +563,12 @@ atmel,pins = <AT91_PIOD 18 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>; }; }; + + usb0 { + pinctrl_usba_vbus: usba_vbus { + atmel,pins = <AT91_PIOB 16 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>; + }; + }; }; /* pinctrl */ &pmc { @@ -666,6 +672,13 @@ }; }; +&usb0 { + atmel,vbus-gpio = <&pioB 16 GPIO_ACTIVE_HIGH>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_usba_vbus>; + status = "okay"; +}; + &usb1 { num-ports = <3>; atmel,vbus-gpio = <0 diff --git a/arch/arm/boot/dts/bcm2711-rpi-4-b.dts b/arch/arm/boot/dts/bcm2711-rpi-4-b.dts index 222d7825e1ab..e94244a215af 100644 --- a/arch/arm/boot/dts/bcm2711-rpi-4-b.dts +++ b/arch/arm/boot/dts/bcm2711-rpi-4-b.dts @@ -4,6 +4,8 @@ #include "bcm2835-rpi.dtsi" #include "bcm283x-rpi-usb-peripheral.dtsi" +#include <dt-bindings/reset/raspberrypi,firmware-reset.h> + / { compatible = "raspberrypi,4-model-b", "brcm,bcm2711"; model = "Raspberry Pi 4 Model B"; @@ -88,6 +90,11 @@ ""; status = "okay"; }; + + reset: reset { + compatible = "raspberrypi,firmware-reset"; + #reset-cells = <1>; + }; }; &gpio { @@ -207,6 +214,21 @@ }; }; +&pcie0 { + pci@1,0 { + #address-cells = <3>; + #size-cells = <2>; + ranges; + + reg = <0 0 0 0 0>; + + usb@1,0 { + reg = <0x10000 0 0 0 0>; + resets = <&reset RASPBERRYPI_FIRMWARE_RESET_ID_USB>; + }; + }; +}; + /* uart0 communicates with the BT module */ &uart0 { pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/r8a7742.dtsi b/arch/arm/boot/dts/r8a7742.dtsi index 9743b4242801..0240d017c90d 100644 --- a/arch/arm/boot/dts/r8a7742.dtsi +++ b/arch/arm/boot/dts/r8a7742.dtsi @@ -386,6 +386,54 @@ #thermal-sensor-cells = <0>; }; + ipmmu_sy0: iommu@e6280000 { + compatible = "renesas,ipmmu-r8a7742", + "renesas,ipmmu-vmsa"; + reg = <0 0xe6280000 0 0x1000>; + interrupts = <GIC_SPI 223 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 224 IRQ_TYPE_LEVEL_HIGH>; + #iommu-cells = <1>; + status = "disabled"; + }; + + ipmmu_sy1: iommu@e6290000 { + compatible = "renesas,ipmmu-r8a7742", + "renesas,ipmmu-vmsa"; + reg = <0 0xe6290000 0 0x1000>; + interrupts = <GIC_SPI 225 IRQ_TYPE_LEVEL_HIGH>; + #iommu-cells = <1>; + status = "disabled"; + }; + + ipmmu_ds: iommu@e6740000 { + compatible = "renesas,ipmmu-r8a7742", + "renesas,ipmmu-vmsa"; + reg = <0 0xe6740000 0 0x1000>; + interrupts = <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>; + #iommu-cells = <1>; + status = "disabled"; + }; + + ipmmu_mp: iommu@ec680000 { + compatible = "renesas,ipmmu-r8a7742", + "renesas,ipmmu-vmsa"; + reg = <0 0xec680000 0 0x1000>; + interrupts = <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>; + #iommu-cells = <1>; + status = "disabled"; + }; + + ipmmu_mx: iommu@fe951000 { + compatible = "renesas,ipmmu-r8a7742", + "renesas,ipmmu-vmsa"; + reg = <0 0xfe951000 0 0x1000>; + interrupts = <GIC_SPI 222 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 221 IRQ_TYPE_LEVEL_HIGH>; + #iommu-cells = <1>; + status = "disabled"; + }; + icram0: sram@e63a0000 { compatible = "mmio-sram"; reg = <0 0xe63a0000 0 0x12000>; diff --git a/arch/arm/boot/dts/sam9x60.dtsi b/arch/arm/boot/dts/sam9x60.dtsi index d10843da4a85..42f76212d472 100644 --- a/arch/arm/boot/dts/sam9x60.dtsi +++ b/arch/arm/boot/dts/sam9x60.dtsi @@ -69,6 +69,20 @@ #size-cells = <1>; ranges; + usb0: gadget@500000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "microchip,sam9x60-udc"; + reg = <0x00500000 0x100000 + 0xf803c000 0x400>; + interrupts = <23 IRQ_TYPE_LEVEL_HIGH 2>; + clocks = <&pmc PMC_TYPE_PERIPHERAL 23>, <&pmc PMC_TYPE_CORE PMC_UTMI>; + clock-names = "pclk", "hclk"; + assigned-clocks = <&pmc PMC_TYPE_CORE PMC_UTMI>; + assigned-clock-rates = <480000000>; + status = "disabled"; + }; + usb1: ohci@600000 { compatible = "atmel,at91rm9200-ohci", "usb-ohci"; reg = <0x00600000 0x100000>; diff --git a/arch/arm/boot/dts/tegra20-cpu-opp-microvolt.dtsi b/arch/arm/boot/dts/tegra20-cpu-opp-microvolt.dtsi index dce85d39480d..6f3e8c5fc5f0 100644 --- a/arch/arm/boot/dts/tegra20-cpu-opp-microvolt.dtsi +++ b/arch/arm/boot/dts/tegra20-cpu-opp-microvolt.dtsi @@ -26,14 +26,6 @@ opp-microvolt = <800000 800000 1125000>; }; - opp@456000000,800,2,2 { - opp-microvolt = <800000 800000 1125000>; - }; - - opp@456000000,800,3,2 { - opp-microvolt = <800000 800000 1125000>; - }; - opp@456000000,825 { opp-microvolt = <825000 825000 1125000>; }; @@ -46,10 +38,6 @@ opp-microvolt = <800000 800000 1125000>; }; - opp@608000000,800,3,2 { - opp-microvolt = <800000 800000 1125000>; - }; - opp@608000000,825 { opp-microvolt = <825000 825000 1125000>; }; @@ -78,18 +66,6 @@ opp-microvolt = <875000 875000 1125000>; }; - opp@760000000,875,1,1 { - opp-microvolt = <875000 875000 1125000>; - }; - - opp@760000000,875,0,2 { - opp-microvolt = <875000 875000 1125000>; - }; - - opp@760000000,875,1,2 { - opp-microvolt = <875000 875000 1125000>; - }; - opp@760000000,900 { opp-microvolt = <900000 900000 1125000>; }; @@ -134,14 +110,6 @@ opp-microvolt = <950000 950000 1125000>; }; - opp@912000000,950,0,2 { - opp-microvolt = <950000 950000 1125000>; - }; - - opp@912000000,950,2,2 { - opp-microvolt = <950000 950000 1125000>; - }; - opp@912000000,1000 { opp-microvolt = <1000000 1000000 1125000>; }; @@ -170,10 +138,6 @@ opp-microvolt = <1000000 1000000 1125000>; }; - opp@1000000000,1000,0,2 { - opp-microvolt = <1000000 1000000 1125000>; - }; - opp@1000000000,1025 { opp-microvolt = <1025000 1025000 1125000>; }; diff --git a/arch/arm/boot/dts/tegra20-cpu-opp.dtsi b/arch/arm/boot/dts/tegra20-cpu-opp.dtsi index 9b8fedb57a1b..702a635e88e7 100644 --- a/arch/arm/boot/dts/tegra20-cpu-opp.dtsi +++ b/arch/arm/boot/dts/tegra20-cpu-opp.dtsi @@ -37,19 +37,8 @@ opp@456000000,800 { clock-latency-ns = <400000>; - opp-supported-hw = <0x03 0x0006>; - opp-hz = /bits/ 64 <456000000>; - }; - - opp@456000000,800,2,2 { - clock-latency-ns = <400000>; - opp-supported-hw = <0x04 0x0004>; - opp-hz = /bits/ 64 <456000000>; - }; - - opp@456000000,800,3,2 { - clock-latency-ns = <400000>; - opp-supported-hw = <0x08 0x0004>; + opp-supported-hw = <0x03 0x0006>, <0x04 0x0004>, + <0x08 0x0004>; opp-hz = /bits/ 64 <456000000>; }; @@ -67,13 +56,7 @@ opp@608000000,800 { clock-latency-ns = <400000>; - opp-supported-hw = <0x04 0x0006>; - opp-hz = /bits/ 64 <608000000>; - }; - - opp@608000000,800,3,2 { - clock-latency-ns = <400000>; - opp-supported-hw = <0x08 0x0004>; + opp-supported-hw = <0x04 0x0006>, <0x08 0x0004>; opp-hz = /bits/ 64 <608000000>; }; @@ -115,25 +98,8 @@ opp@760000000,875 { clock-latency-ns = <400000>; - opp-supported-hw = <0x04 0x0001>; - opp-hz = /bits/ 64 <760000000>; - }; - - opp@760000000,875,1,1 { - clock-latency-ns = <400000>; - opp-supported-hw = <0x02 0x0002>; - opp-hz = /bits/ 64 <760000000>; - }; - - opp@760000000,875,0,2 { - clock-latency-ns = <400000>; - opp-supported-hw = <0x01 0x0004>; - opp-hz = /bits/ 64 <760000000>; - }; - - opp@760000000,875,1,2 { - clock-latency-ns = <400000>; - opp-supported-hw = <0x02 0x0004>; + opp-supported-hw = <0x04 0x0001>, <0x02 0x0002>, + <0x01 0x0004>, <0x02 0x0004>; opp-hz = /bits/ 64 <760000000>; }; @@ -199,19 +165,8 @@ opp@912000000,950 { clock-latency-ns = <400000>; - opp-supported-hw = <0x02 0x0006>; - opp-hz = /bits/ 64 <912000000>; - }; - - opp@912000000,950,0,2 { - clock-latency-ns = <400000>; - opp-supported-hw = <0x01 0x0004>; - opp-hz = /bits/ 64 <912000000>; - }; - - opp@912000000,950,2,2 { - clock-latency-ns = <400000>; - opp-supported-hw = <0x04 0x0004>; + opp-supported-hw = <0x02 0x0006>, <0x01 0x0004>, + <0x04 0x0004>; opp-hz = /bits/ 64 <912000000>; }; @@ -253,13 +208,7 @@ opp@1000000000,1000 { clock-latency-ns = <400000>; - opp-supported-hw = <0x02 0x0006>; - opp-hz = /bits/ 64 <1000000000>; - }; - - opp@1000000000,1000,0,2 { - clock-latency-ns = <400000>; - opp-supported-hw = <0x01 0x0004>; + opp-supported-hw = <0x02 0x0006>, <0x01 0x0004>; opp-hz = /bits/ 64 <1000000000>; }; diff --git a/arch/arm/boot/dts/tegra30-cpu-opp-microvolt.dtsi b/arch/arm/boot/dts/tegra30-cpu-opp-microvolt.dtsi index d682f7437146..1be715d2a442 100644 --- a/arch/arm/boot/dts/tegra30-cpu-opp-microvolt.dtsi +++ b/arch/arm/boot/dts/tegra30-cpu-opp-microvolt.dtsi @@ -74,22 +74,6 @@ opp-microvolt = <850000 850000 1250000>; }; - opp@475000000,850,0,1 { - opp-microvolt = <850000 850000 1250000>; - }; - - opp@475000000,850,0,4 { - opp-microvolt = <850000 850000 1250000>; - }; - - opp@475000000,850,0,7 { - opp-microvolt = <850000 850000 1250000>; - }; - - opp@475000000,850,0,8 { - opp-microvolt = <850000 850000 1250000>; - }; - opp@608000000,850 { opp-microvolt = <850000 850000 1250000>; }; @@ -106,62 +90,6 @@ opp-microvolt = <850000 850000 1250000>; }; - opp@640000000,850,1,1 { - opp-microvolt = <850000 850000 1250000>; - }; - - opp@640000000,850,2,1 { - opp-microvolt = <850000 850000 1250000>; - }; - - opp@640000000,850,3,1 { - opp-microvolt = <850000 850000 1250000>; - }; - - opp@640000000,850,1,4 { - opp-microvolt = <850000 850000 1250000>; - }; - - opp@640000000,850,2,4 { - opp-microvolt = <850000 850000 1250000>; - }; - - opp@640000000,850,3,4 { - opp-microvolt = <850000 850000 1250000>; - }; - - opp@640000000,850,1,7 { - opp-microvolt = <850000 850000 1250000>; - }; - - opp@640000000,850,2,7 { - opp-microvolt = <850000 850000 1250000>; - }; - - opp@640000000,850,3,7 { - opp-microvolt = <850000 850000 1250000>; - }; - - opp@640000000,850,4,7 { - opp-microvolt = <850000 850000 1250000>; - }; - - opp@640000000,850,1,8 { - opp-microvolt = <850000 850000 1250000>; - }; - - opp@640000000,850,2,8 { - opp-microvolt = <850000 850000 1250000>; - }; - - opp@640000000,850,3,8 { - opp-microvolt = <850000 850000 1250000>; - }; - - opp@640000000,850,4,8 { - opp-microvolt = <850000 850000 1250000>; - }; - opp@640000000,900 { opp-microvolt = <900000 900000 1250000>; }; @@ -170,94 +98,10 @@ opp-microvolt = <850000 850000 1250000>; }; - opp@760000000,850,3,1 { - opp-microvolt = <850000 850000 1250000>; - }; - - opp@760000000,850,3,2 { - opp-microvolt = <850000 850000 1250000>; - }; - - opp@760000000,850,3,3 { - opp-microvolt = <850000 850000 1250000>; - }; - - opp@760000000,850,3,4 { - opp-microvolt = <850000 850000 1250000>; - }; - - opp@760000000,850,3,7 { - opp-microvolt = <850000 850000 1250000>; - }; - - opp@760000000,850,4,7 { - opp-microvolt = <850000 850000 1250000>; - }; - - opp@760000000,850,3,8 { - opp-microvolt = <850000 850000 1250000>; - }; - - opp@760000000,850,4,8 { - opp-microvolt = <850000 850000 1250000>; - }; - - opp@760000000,850,0,10 { - opp-microvolt = <850000 850000 1250000>; - }; - opp@760000000,900 { opp-microvolt = <900000 900000 1250000>; }; - opp@760000000,900,1,1 { - opp-microvolt = <900000 900000 1250000>; - }; - - opp@760000000,900,2,1 { - opp-microvolt = <900000 900000 1250000>; - }; - - opp@760000000,900,1,2 { - opp-microvolt = <900000 900000 1250000>; - }; - - opp@760000000,900,2,2 { - opp-microvolt = <900000 900000 1250000>; - }; - - opp@760000000,900,1,3 { - opp-microvolt = <900000 900000 1250000>; - }; - - opp@760000000,900,2,3 { - opp-microvolt = <900000 900000 1250000>; - }; - - opp@760000000,900,1,4 { - opp-microvolt = <900000 900000 1250000>; - }; - - opp@760000000,900,2,4 { - opp-microvolt = <900000 900000 1250000>; - }; - - opp@760000000,900,1,7 { - opp-microvolt = <900000 900000 1250000>; - }; - - opp@760000000,900,2,7 { - opp-microvolt = <900000 900000 1250000>; - }; - - opp@760000000,900,1,8 { - opp-microvolt = <900000 900000 1250000>; - }; - - opp@760000000,900,2,8 { - opp-microvolt = <900000 900000 1250000>; - }; - opp@760000000,912 { opp-microvolt = <912000 912000 1250000>; }; @@ -282,90 +126,10 @@ opp-microvolt = <900000 900000 1250000>; }; - opp@860000000,900,2,1 { - opp-microvolt = <900000 900000 1250000>; - }; - - opp@860000000,900,3,1 { - opp-microvolt = <900000 900000 1250000>; - }; - - opp@860000000,900,2,2 { - opp-microvolt = <900000 900000 1250000>; - }; - - opp@860000000,900,3,2 { - opp-microvolt = <900000 900000 1250000>; - }; - - opp@860000000,900,2,3 { - opp-microvolt = <900000 900000 1250000>; - }; - - opp@860000000,900,3,3 { - opp-microvolt = <900000 900000 1250000>; - }; - - opp@860000000,900,2,4 { - opp-microvolt = <900000 900000 1250000>; - }; - - opp@860000000,900,3,4 { - opp-microvolt = <900000 900000 1250000>; - }; - - opp@860000000,900,2,7 { - opp-microvolt = <900000 900000 1250000>; - }; - - opp@860000000,900,3,7 { - opp-microvolt = <900000 900000 1250000>; - }; - - opp@860000000,900,4,7 { - opp-microvolt = <900000 900000 1250000>; - }; - - opp@860000000,900,2,8 { - opp-microvolt = <900000 900000 1250000>; - }; - - opp@860000000,900,3,8 { - opp-microvolt = <900000 900000 1250000>; - }; - - opp@860000000,900,4,8 { - opp-microvolt = <900000 900000 1250000>; - }; - opp@860000000,975 { opp-microvolt = <975000 975000 1250000>; }; - opp@860000000,975,1,1 { - opp-microvolt = <975000 975000 1250000>; - }; - - opp@860000000,975,1,2 { - opp-microvolt = <975000 975000 1250000>; - }; - - opp@860000000,975,1,3 { - opp-microvolt = <975000 975000 1250000>; - }; - - opp@860000000,975,1,4 { - opp-microvolt = <975000 975000 1250000>; - }; - - opp@860000000,975,1,7 { - opp-microvolt = <975000 975000 1250000>; - }; - - opp@860000000,975,1,8 { - opp-microvolt = <975000 975000 1250000>; - }; - opp@860000000,1000 { opp-microvolt = <1000000 1000000 1250000>; }; @@ -382,62 +146,6 @@ opp-microvolt = <975000 975000 1250000>; }; - opp@1000000000,975,2,1 { - opp-microvolt = <975000 975000 1250000>; - }; - - opp@1000000000,975,3,1 { - opp-microvolt = <975000 975000 1250000>; - }; - - opp@1000000000,975,2,2 { - opp-microvolt = <975000 975000 1250000>; - }; - - opp@1000000000,975,3,2 { - opp-microvolt = <975000 975000 1250000>; - }; - - opp@1000000000,975,2,3 { - opp-microvolt = <975000 975000 1250000>; - }; - - opp@1000000000,975,3,3 { - opp-microvolt = <975000 975000 1250000>; - }; - - opp@1000000000,975,2,4 { - opp-microvolt = <975000 975000 1250000>; - }; - - opp@1000000000,975,3,4 { - opp-microvolt = <975000 975000 1250000>; - }; - - opp@1000000000,975,2,7 { - opp-microvolt = <975000 975000 1250000>; - }; - - opp@1000000000,975,3,7 { - opp-microvolt = <975000 975000 1250000>; - }; - - opp@1000000000,975,4,7 { - opp-microvolt = <975000 975000 1250000>; - }; - - opp@1000000000,975,2,8 { - opp-microvolt = <975000 975000 1250000>; - }; - - opp@1000000000,975,3,8 { - opp-microvolt = <975000 975000 1250000>; - }; - - opp@1000000000,975,4,8 { - opp-microvolt = <975000 975000 1250000>; - }; - opp@1000000000,1000 { opp-microvolt = <1000000 1000000 1250000>; }; @@ -454,66 +162,10 @@ opp-microvolt = <975000 975000 1250000>; }; - opp@1100000000,975,3,1 { - opp-microvolt = <975000 975000 1250000>; - }; - - opp@1100000000,975,3,2 { - opp-microvolt = <975000 975000 1250000>; - }; - - opp@1100000000,975,3,3 { - opp-microvolt = <975000 975000 1250000>; - }; - - opp@1100000000,975,3,4 { - opp-microvolt = <975000 975000 1250000>; - }; - - opp@1100000000,975,3,7 { - opp-microvolt = <975000 975000 1250000>; - }; - - opp@1100000000,975,4,7 { - opp-microvolt = <975000 975000 1250000>; - }; - - opp@1100000000,975,3,8 { - opp-microvolt = <975000 975000 1250000>; - }; - - opp@1100000000,975,4,8 { - opp-microvolt = <975000 975000 1250000>; - }; - opp@1100000000,1000 { opp-microvolt = <1000000 1000000 1250000>; }; - opp@1100000000,1000,2,1 { - opp-microvolt = <1000000 1000000 1250000>; - }; - - opp@1100000000,1000,2,2 { - opp-microvolt = <1000000 1000000 1250000>; - }; - - opp@1100000000,1000,2,3 { - opp-microvolt = <1000000 1000000 1250000>; - }; - - opp@1100000000,1000,2,4 { - opp-microvolt = <1000000 1000000 1250000>; - }; - - opp@1100000000,1000,2,7 { - opp-microvolt = <1000000 1000000 1250000>; - }; - - opp@1100000000,1000,2,8 { - opp-microvolt = <1000000 1000000 1250000>; - }; - opp@1100000000,1025 { opp-microvolt = <1025000 1025000 1250000>; }; @@ -534,66 +186,10 @@ opp-microvolt = <1000000 1000000 1250000>; }; - opp@1200000000,1000,3,1 { - opp-microvolt = <1000000 1000000 1250000>; - }; - - opp@1200000000,1000,3,2 { - opp-microvolt = <1000000 1000000 1250000>; - }; - - opp@1200000000,1000,3,3 { - opp-microvolt = <1000000 1000000 1250000>; - }; - - opp@1200000000,1000,3,4 { - opp-microvolt = <1000000 1000000 1250000>; - }; - - opp@1200000000,1000,3,7 { - opp-microvolt = <1000000 1000000 1250000>; - }; - - opp@1200000000,1000,4,7 { - opp-microvolt = <1000000 1000000 1250000>; - }; - - opp@1200000000,1000,3,8 { - opp-microvolt = <1000000 1000000 1250000>; - }; - - opp@1200000000,1000,4,8 { - opp-microvolt = <1000000 1000000 1250000>; - }; - opp@1200000000,1025 { opp-microvolt = <1025000 1025000 1250000>; }; - opp@1200000000,1025,2,1 { - opp-microvolt = <1025000 1025000 1250000>; - }; - - opp@1200000000,1025,2,2 { - opp-microvolt = <1025000 1025000 1250000>; - }; - - opp@1200000000,1025,2,3 { - opp-microvolt = <1025000 1025000 1250000>; - }; - - opp@1200000000,1025,2,4 { - opp-microvolt = <1025000 1025000 1250000>; - }; - - opp@1200000000,1025,2,7 { - opp-microvolt = <1025000 1025000 1250000>; - }; - - opp@1200000000,1025,2,8 { - opp-microvolt = <1025000 1025000 1250000>; - }; - opp@1200000000,1050 { opp-microvolt = <1050000 1050000 1250000>; }; @@ -610,90 +206,18 @@ opp-microvolt = <1000000 1000000 1250000>; }; - opp@1300000000,1000,4,7 { - opp-microvolt = <1000000 1000000 1250000>; - }; - - opp@1300000000,1000,4,8 { - opp-microvolt = <1000000 1000000 1250000>; - }; - opp@1300000000,1025 { opp-microvolt = <1025000 1025000 1250000>; }; - opp@1300000000,1025,3,1 { - opp-microvolt = <1025000 1025000 1250000>; - }; - - opp@1300000000,1025,3,7 { - opp-microvolt = <1025000 1025000 1250000>; - }; - - opp@1300000000,1025,3,8 { - opp-microvolt = <1025000 1025000 1250000>; - }; - opp@1300000000,1050 { opp-microvolt = <1050000 1050000 1250000>; }; - opp@1300000000,1050,2,1 { - opp-microvolt = <1050000 1050000 1250000>; - }; - - opp@1300000000,1050,3,2 { - opp-microvolt = <1050000 1050000 1250000>; - }; - - opp@1300000000,1050,3,3 { - opp-microvolt = <1050000 1050000 1250000>; - }; - - opp@1300000000,1050,3,4 { - opp-microvolt = <1050000 1050000 1250000>; - }; - - opp@1300000000,1050,3,5 { - opp-microvolt = <1050000 1050000 1250000>; - }; - - opp@1300000000,1050,3,6 { - opp-microvolt = <1050000 1050000 1250000>; - }; - - opp@1300000000,1050,2,7 { - opp-microvolt = <1050000 1050000 1250000>; - }; - - opp@1300000000,1050,2,8 { - opp-microvolt = <1050000 1050000 1250000>; - }; - - opp@1300000000,1050,3,12 { - opp-microvolt = <1050000 1050000 1250000>; - }; - - opp@1300000000,1050,3,13 { - opp-microvolt = <1050000 1050000 1250000>; - }; - opp@1300000000,1075 { opp-microvolt = <1075000 1075000 1250000>; }; - opp@1300000000,1075,2,2 { - opp-microvolt = <1075000 1075000 1250000>; - }; - - opp@1300000000,1075,2,3 { - opp-microvolt = <1075000 1075000 1250000>; - }; - - opp@1300000000,1075,2,4 { - opp-microvolt = <1075000 1075000 1250000>; - }; - opp@1300000000,1100 { opp-microvolt = <1100000 1100000 1250000>; }; @@ -722,10 +246,6 @@ opp-microvolt = <1150000 1150000 1250000>; }; - opp@1400000000,1150,2,4 { - opp-microvolt = <1150000 1150000 1250000>; - }; - opp@1400000000,1175 { opp-microvolt = <1175000 1175000 1250000>; }; @@ -738,42 +258,10 @@ opp-microvolt = <1125000 1125000 1250000>; }; - opp@1500000000,1125,4,5 { - opp-microvolt = <1125000 1125000 1250000>; - }; - - opp@1500000000,1125,4,6 { - opp-microvolt = <1125000 1125000 1250000>; - }; - - opp@1500000000,1125,4,12 { - opp-microvolt = <1125000 1125000 1250000>; - }; - - opp@1500000000,1125,4,13 { - opp-microvolt = <1125000 1125000 1250000>; - }; - opp@1500000000,1150 { opp-microvolt = <1150000 1150000 1250000>; }; - opp@1500000000,1150,3,5 { - opp-microvolt = <1150000 1150000 1250000>; - }; - - opp@1500000000,1150,3,6 { - opp-microvolt = <1150000 1150000 1250000>; - }; - - opp@1500000000,1150,3,12 { - opp-microvolt = <1150000 1150000 1250000>; - }; - - opp@1500000000,1150,3,13 { - opp-microvolt = <1150000 1150000 1250000>; - }; - opp@1500000000,1200 { opp-microvolt = <1200000 1200000 1250000>; }; diff --git a/arch/arm/boot/dts/tegra30-cpu-opp.dtsi b/arch/arm/boot/dts/tegra30-cpu-opp.dtsi index 8e434f6713cd..0f7135006d19 100644 --- a/arch/arm/boot/dts/tegra30-cpu-opp.dtsi +++ b/arch/arm/boot/dts/tegra30-cpu-opp.dtsi @@ -109,31 +109,9 @@ opp@475000000,850 { clock-latency-ns = <100000>; - opp-supported-hw = <0x0F 0x0001>; - opp-hz = /bits/ 64 <475000000>; - }; - - opp@475000000,850,0,1 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x01 0x0002>; - opp-hz = /bits/ 64 <475000000>; - }; - - opp@475000000,850,0,4 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x01 0x0010>; - opp-hz = /bits/ 64 <475000000>; - }; - - opp@475000000,850,0,7 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x01 0x0080>; - opp-hz = /bits/ 64 <475000000>; - }; - - opp@475000000,850,0,8 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x01 0x0100>; + opp-supported-hw = <0x0F 0x0001>, <0x01 0x0002>, + <0x01 0x0010>, <0x01 0x0080>, + <0x01 0x0100>; opp-hz = /bits/ 64 <475000000>; }; @@ -157,91 +135,14 @@ opp@640000000,850 { clock-latency-ns = <100000>; - opp-supported-hw = <0x0F 0x0001>; - opp-hz = /bits/ 64 <640000000>; - }; - - opp@640000000,850,1,1 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x02 0x0002>; - opp-hz = /bits/ 64 <640000000>; - }; - - opp@640000000,850,2,1 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x04 0x0002>; - opp-hz = /bits/ 64 <640000000>; - }; - - opp@640000000,850,3,1 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x08 0x0002>; - opp-hz = /bits/ 64 <640000000>; - }; - - opp@640000000,850,1,4 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x02 0x0010>; - opp-hz = /bits/ 64 <640000000>; - }; - - opp@640000000,850,2,4 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x04 0x0010>; - opp-hz = /bits/ 64 <640000000>; - }; - - opp@640000000,850,3,4 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x08 0x0010>; - opp-hz = /bits/ 64 <640000000>; - }; - - opp@640000000,850,1,7 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x02 0x0080>; - opp-hz = /bits/ 64 <640000000>; - }; - - opp@640000000,850,2,7 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x04 0x0080>; - opp-hz = /bits/ 64 <640000000>; - }; - - opp@640000000,850,3,7 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x08 0x0080>; - opp-hz = /bits/ 64 <640000000>; - }; - - opp@640000000,850,4,7 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x10 0x0080>; - opp-hz = /bits/ 64 <640000000>; - }; - - opp@640000000,850,1,8 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x02 0x0100>; - opp-hz = /bits/ 64 <640000000>; - }; - - opp@640000000,850,2,8 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x04 0x0100>; - opp-hz = /bits/ 64 <640000000>; - }; - - opp@640000000,850,3,8 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x08 0x0100>; - opp-hz = /bits/ 64 <640000000>; - }; - - opp@640000000,850,4,8 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x10 0x0100>; + opp-supported-hw = <0x0F 0x0001>, <0x02 0x0002>, + <0x04 0x0002>, <0x08 0x0002>, + <0x02 0x0010>, <0x04 0x0010>, + <0x08 0x0010>, <0x02 0x0080>, + <0x04 0x0080>, <0x08 0x0080>, + <0x10 0x0080>, <0x02 0x0100>, + <0x04 0x0100>, <0x08 0x0100>, + <0x10 0x0100>; opp-hz = /bits/ 64 <640000000>; }; @@ -253,139 +154,23 @@ opp@760000000,850 { clock-latency-ns = <100000>; - opp-supported-hw = <0x1E 0x3461>; - opp-hz = /bits/ 64 <760000000>; - }; - - opp@760000000,850,3,1 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x08 0x0002>; - opp-hz = /bits/ 64 <760000000>; - }; - - opp@760000000,850,3,2 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x08 0x0004>; - opp-hz = /bits/ 64 <760000000>; - }; - - opp@760000000,850,3,3 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x08 0x0008>; - opp-hz = /bits/ 64 <760000000>; - }; - - opp@760000000,850,3,4 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x08 0x0010>; - opp-hz = /bits/ 64 <760000000>; - }; - - opp@760000000,850,3,7 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x08 0x0080>; - opp-hz = /bits/ 64 <760000000>; - }; - - opp@760000000,850,4,7 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x10 0x0080>; - opp-hz = /bits/ 64 <760000000>; - }; - - opp@760000000,850,3,8 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x08 0x0100>; - opp-hz = /bits/ 64 <760000000>; - }; - - opp@760000000,850,4,8 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x10 0x0100>; - opp-hz = /bits/ 64 <760000000>; - }; - - opp@760000000,850,0,10 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x01 0x0400>; + opp-supported-hw = <0x1E 0x3461>, <0x08 0x0002>, + <0x08 0x0004>, <0x08 0x0008>, + <0x08 0x0010>, <0x08 0x0080>, + <0x10 0x0080>, <0x08 0x0100>, + <0x10 0x0100>, <0x01 0x0400>; opp-hz = /bits/ 64 <760000000>; }; opp@760000000,900 { clock-latency-ns = <100000>; - opp-supported-hw = <0x01 0x0001>; - opp-hz = /bits/ 64 <760000000>; - }; - - opp@760000000,900,1,1 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x02 0x0002>; - opp-hz = /bits/ 64 <760000000>; - }; - - opp@760000000,900,2,1 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x04 0x0002>; - opp-hz = /bits/ 64 <760000000>; - }; - - opp@760000000,900,1,2 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x02 0x0004>; - opp-hz = /bits/ 64 <760000000>; - }; - - opp@760000000,900,2,2 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x04 0x0004>; - opp-hz = /bits/ 64 <760000000>; - }; - - opp@760000000,900,1,3 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x02 0x0008>; - opp-hz = /bits/ 64 <760000000>; - }; - - opp@760000000,900,2,3 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x04 0x0008>; - opp-hz = /bits/ 64 <760000000>; - }; - - opp@760000000,900,1,4 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x02 0x0010>; - opp-hz = /bits/ 64 <760000000>; - }; - - opp@760000000,900,2,4 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x04 0x0010>; - opp-hz = /bits/ 64 <760000000>; - }; - - opp@760000000,900,1,7 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x02 0x0080>; - opp-hz = /bits/ 64 <760000000>; - }; - - opp@760000000,900,2,7 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x04 0x0080>; - opp-hz = /bits/ 64 <760000000>; - }; - - opp@760000000,900,1,8 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x02 0x0100>; - opp-hz = /bits/ 64 <760000000>; - }; - - opp@760000000,900,2,8 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x04 0x0100>; + opp-supported-hw = <0x01 0x0001>, <0x02 0x0002>, + <0x04 0x0002>, <0x02 0x0004>, + <0x04 0x0004>, <0x02 0x0008>, + <0x04 0x0008>, <0x02 0x0010>, + <0x04 0x0010>, <0x02 0x0080>, + <0x04 0x0080>, <0x02 0x0100>, + <0x04 0x0100>; opp-hz = /bits/ 64 <760000000>; }; @@ -421,133 +206,23 @@ opp@860000000,900 { clock-latency-ns = <100000>; - opp-supported-hw = <0x02 0x0001>; - opp-hz = /bits/ 64 <860000000>; - }; - - opp@860000000,900,2,1 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x04 0x0002>; - opp-hz = /bits/ 64 <860000000>; - }; - - opp@860000000,900,3,1 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x08 0x0002>; - opp-hz = /bits/ 64 <860000000>; - }; - - opp@860000000,900,2,2 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x04 0x0004>; - opp-hz = /bits/ 64 <860000000>; - }; - - opp@860000000,900,3,2 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x08 0x0004>; - opp-hz = /bits/ 64 <860000000>; - }; - - opp@860000000,900,2,3 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x04 0x0008>; - opp-hz = /bits/ 64 <860000000>; - }; - - opp@860000000,900,3,3 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x08 0x0008>; - opp-hz = /bits/ 64 <860000000>; - }; - - opp@860000000,900,2,4 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x04 0x0010>; - opp-hz = /bits/ 64 <860000000>; - }; - - opp@860000000,900,3,4 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x08 0x0010>; - opp-hz = /bits/ 64 <860000000>; - }; - - opp@860000000,900,2,7 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x04 0x0080>; - opp-hz = /bits/ 64 <860000000>; - }; - - opp@860000000,900,3,7 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x08 0x0080>; - opp-hz = /bits/ 64 <860000000>; - }; - - opp@860000000,900,4,7 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x10 0x0080>; - opp-hz = /bits/ 64 <860000000>; - }; - - opp@860000000,900,2,8 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x04 0x0100>; - opp-hz = /bits/ 64 <860000000>; - }; - - opp@860000000,900,3,8 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x08 0x0100>; - opp-hz = /bits/ 64 <860000000>; - }; - - opp@860000000,900,4,8 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x10 0x0100>; + opp-supported-hw = <0x02 0x0001>, <0x04 0x0002>, + <0x08 0x0002>, <0x04 0x0004>, + <0x08 0x0004>, <0x04 0x0008>, + <0x08 0x0008>, <0x04 0x0010>, + <0x08 0x0010>, <0x04 0x0080>, + <0x08 0x0080>, <0x10 0x0080>, + <0x04 0x0100>, <0x08 0x0100>, + <0x10 0x0100>; opp-hz = /bits/ 64 <860000000>; }; opp@860000000,975 { clock-latency-ns = <100000>; - opp-supported-hw = <0x01 0x0001>; - opp-hz = /bits/ 64 <860000000>; - }; - - opp@860000000,975,1,1 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x02 0x0002>; - opp-hz = /bits/ 64 <860000000>; - }; - - opp@860000000,975,1,2 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x02 0x0004>; - opp-hz = /bits/ 64 <860000000>; - }; - - opp@860000000,975,1,3 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x02 0x0008>; - opp-hz = /bits/ 64 <860000000>; - }; - - opp@860000000,975,1,4 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x02 0x0010>; - opp-hz = /bits/ 64 <860000000>; - }; - - opp@860000000,975,1,7 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x02 0x0080>; - opp-hz = /bits/ 64 <860000000>; - }; - - opp@860000000,975,1,8 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x02 0x0100>; + opp-supported-hw = <0x01 0x0001>, <0x02 0x0002>, + <0x02 0x0004>, <0x02 0x0008>, + <0x02 0x0010>, <0x02 0x0080>, + <0x02 0x0100>; opp-hz = /bits/ 64 <860000000>; }; @@ -571,91 +246,14 @@ opp@1000000000,975 { clock-latency-ns = <100000>; - opp-supported-hw = <0x03 0x0001>; - opp-hz = /bits/ 64 <1000000000>; - }; - - opp@1000000000,975,2,1 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x04 0x0002>; - opp-hz = /bits/ 64 <1000000000>; - }; - - opp@1000000000,975,3,1 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x08 0x0002>; - opp-hz = /bits/ 64 <1000000000>; - }; - - opp@1000000000,975,2,2 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x04 0x0004>; - opp-hz = /bits/ 64 <1000000000>; - }; - - opp@1000000000,975,3,2 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x08 0x0004>; - opp-hz = /bits/ 64 <1000000000>; - }; - - opp@1000000000,975,2,3 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x04 0x0008>; - opp-hz = /bits/ 64 <1000000000>; - }; - - opp@1000000000,975,3,3 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x08 0x0008>; - opp-hz = /bits/ 64 <1000000000>; - }; - - opp@1000000000,975,2,4 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x04 0x0010>; - opp-hz = /bits/ 64 <1000000000>; - }; - - opp@1000000000,975,3,4 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x08 0x0010>; - opp-hz = /bits/ 64 <1000000000>; - }; - - opp@1000000000,975,2,7 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x04 0x0080>; - opp-hz = /bits/ 64 <1000000000>; - }; - - opp@1000000000,975,3,7 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x08 0x0080>; - opp-hz = /bits/ 64 <1000000000>; - }; - - opp@1000000000,975,4,7 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x10 0x0080>; - opp-hz = /bits/ 64 <1000000000>; - }; - - opp@1000000000,975,2,8 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x04 0x0100>; - opp-hz = /bits/ 64 <1000000000>; - }; - - opp@1000000000,975,3,8 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x08 0x0100>; - opp-hz = /bits/ 64 <1000000000>; - }; - - opp@1000000000,975,4,8 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x10 0x0100>; + opp-supported-hw = <0x03 0x0001>, <0x04 0x0002>, + <0x08 0x0002>, <0x04 0x0004>, + <0x08 0x0004>, <0x04 0x0008>, + <0x08 0x0008>, <0x04 0x0010>, + <0x08 0x0010>, <0x04 0x0080>, + <0x08 0x0080>, <0x10 0x0080>, + <0x04 0x0100>, <0x08 0x0100>, + <0x10 0x0100>; opp-hz = /bits/ 64 <1000000000>; }; @@ -679,97 +277,20 @@ opp@1100000000,975 { clock-latency-ns = <100000>; - opp-supported-hw = <0x06 0x0001>; - opp-hz = /bits/ 64 <1100000000>; - }; - - opp@1100000000,975,3,1 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x08 0x0002>; - opp-hz = /bits/ 64 <1100000000>; - }; - - opp@1100000000,975,3,2 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x08 0x0004>; - opp-hz = /bits/ 64 <1100000000>; - }; - - opp@1100000000,975,3,3 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x08 0x0008>; - opp-hz = /bits/ 64 <1100000000>; - }; - - opp@1100000000,975,3,4 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x08 0x0010>; - opp-hz = /bits/ 64 <1100000000>; - }; - - opp@1100000000,975,3,7 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x08 0x0080>; - opp-hz = /bits/ 64 <1100000000>; - }; - - opp@1100000000,975,4,7 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x10 0x0080>; - opp-hz = /bits/ 64 <1100000000>; - }; - - opp@1100000000,975,3,8 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x08 0x0100>; - opp-hz = /bits/ 64 <1100000000>; - }; - - opp@1100000000,975,4,8 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x10 0x0100>; + opp-supported-hw = <0x06 0x0001>, <0x08 0x0002>, + <0x08 0x0004>, <0x08 0x0008>, + <0x08 0x0010>, <0x08 0x0080>, + <0x10 0x0080>, <0x08 0x0100>, + <0x10 0x0100>; opp-hz = /bits/ 64 <1100000000>; }; opp@1100000000,1000 { clock-latency-ns = <100000>; - opp-supported-hw = <0x01 0x0001>; - opp-hz = /bits/ 64 <1100000000>; - }; - - opp@1100000000,1000,2,1 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x04 0x0002>; - opp-hz = /bits/ 64 <1100000000>; - }; - - opp@1100000000,1000,2,2 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x04 0x0004>; - opp-hz = /bits/ 64 <1100000000>; - }; - - opp@1100000000,1000,2,3 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x04 0x0008>; - opp-hz = /bits/ 64 <1100000000>; - }; - - opp@1100000000,1000,2,4 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x04 0x0010>; - opp-hz = /bits/ 64 <1100000000>; - }; - - opp@1100000000,1000,2,7 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x04 0x0080>; - opp-hz = /bits/ 64 <1100000000>; - }; - - opp@1100000000,1000,2,8 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x04 0x0100>; + opp-supported-hw = <0x01 0x0001>, <0x04 0x0002>, + <0x04 0x0004>, <0x04 0x0008>, + <0x04 0x0010>, <0x04 0x0080>, + <0x04 0x0100>; opp-hz = /bits/ 64 <1100000000>; }; @@ -799,97 +320,20 @@ opp@1200000000,1000 { clock-latency-ns = <100000>; - opp-supported-hw = <0x04 0x0001>; - opp-hz = /bits/ 64 <1200000000>; - }; - - opp@1200000000,1000,3,1 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x08 0x0002>; - opp-hz = /bits/ 64 <1200000000>; - }; - - opp@1200000000,1000,3,2 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x08 0x0004>; - opp-hz = /bits/ 64 <1200000000>; - }; - - opp@1200000000,1000,3,3 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x08 0x0008>; - opp-hz = /bits/ 64 <1200000000>; - }; - - opp@1200000000,1000,3,4 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x08 0x0010>; - opp-hz = /bits/ 64 <1200000000>; - }; - - opp@1200000000,1000,3,7 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x08 0x0080>; - opp-hz = /bits/ 64 <1200000000>; - }; - - opp@1200000000,1000,4,7 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x10 0x0080>; - opp-hz = /bits/ 64 <1200000000>; - }; - - opp@1200000000,1000,3,8 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x08 0x0100>; - opp-hz = /bits/ 64 <1200000000>; - }; - - opp@1200000000,1000,4,8 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x10 0x0100>; + opp-supported-hw = <0x04 0x0001>, <0x08 0x0002>, + <0x08 0x0004>, <0x08 0x0008>, + <0x08 0x0010>, <0x08 0x0080>, + <0x10 0x0080>, <0x08 0x0100>, + <0x10 0x0100>; opp-hz = /bits/ 64 <1200000000>; }; opp@1200000000,1025 { clock-latency-ns = <100000>; - opp-supported-hw = <0x02 0x0001>; - opp-hz = /bits/ 64 <1200000000>; - }; - - opp@1200000000,1025,2,1 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x04 0x0002>; - opp-hz = /bits/ 64 <1200000000>; - }; - - opp@1200000000,1025,2,2 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x04 0x0004>; - opp-hz = /bits/ 64 <1200000000>; - }; - - opp@1200000000,1025,2,3 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x04 0x0008>; - opp-hz = /bits/ 64 <1200000000>; - }; - - opp@1200000000,1025,2,4 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x04 0x0010>; - opp-hz = /bits/ 64 <1200000000>; - }; - - opp@1200000000,1025,2,7 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x04 0x0080>; - opp-hz = /bits/ 64 <1200000000>; - }; - - opp@1200000000,1025,2,8 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x04 0x0100>; + opp-supported-hw = <0x02 0x0001>, <0x04 0x0002>, + <0x04 0x0004>, <0x04 0x0008>, + <0x04 0x0010>, <0x04 0x0080>, + <0x04 0x0100>; opp-hz = /bits/ 64 <1200000000>; }; @@ -913,133 +357,33 @@ opp@1300000000,1000 { clock-latency-ns = <100000>; - opp-supported-hw = <0x08 0x0001>; - opp-hz = /bits/ 64 <1300000000>; - }; - - opp@1300000000,1000,4,7 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x10 0x0080>; - opp-hz = /bits/ 64 <1300000000>; - }; - - opp@1300000000,1000,4,8 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x10 0x0100>; + opp-supported-hw = <0x08 0x0001>, <0x10 0x0080>, + <0x10 0x0100>; opp-hz = /bits/ 64 <1300000000>; }; opp@1300000000,1025 { clock-latency-ns = <100000>; - opp-supported-hw = <0x04 0x0001>; - opp-hz = /bits/ 64 <1300000000>; - }; - - opp@1300000000,1025,3,1 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x08 0x0002>; - opp-hz = /bits/ 64 <1300000000>; - }; - - opp@1300000000,1025,3,7 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x08 0x0080>; - opp-hz = /bits/ 64 <1300000000>; - }; - - opp@1300000000,1025,3,8 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x08 0x0100>; + opp-supported-hw = <0x04 0x0001>, <0x08 0x0002>, + <0x08 0x0080>, <0x08 0x0100>; opp-hz = /bits/ 64 <1300000000>; }; opp@1300000000,1050 { clock-latency-ns = <100000>; - opp-supported-hw = <0x12 0x3061>; - opp-hz = /bits/ 64 <1300000000>; - }; - - opp@1300000000,1050,2,1 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x04 0x0002>; - opp-hz = /bits/ 64 <1300000000>; - }; - - opp@1300000000,1050,3,2 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x08 0x0004>; - opp-hz = /bits/ 64 <1300000000>; - }; - - opp@1300000000,1050,3,3 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x08 0x0008>; - opp-hz = /bits/ 64 <1300000000>; - }; - - opp@1300000000,1050,3,4 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x08 0x0010>; - opp-hz = /bits/ 64 <1300000000>; - }; - - opp@1300000000,1050,3,5 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x08 0x0020>; - opp-hz = /bits/ 64 <1300000000>; - }; - - opp@1300000000,1050,3,6 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x08 0x0040>; - opp-hz = /bits/ 64 <1300000000>; - }; - - opp@1300000000,1050,2,7 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x04 0x0080>; - opp-hz = /bits/ 64 <1300000000>; - }; - - opp@1300000000,1050,2,8 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x04 0x0100>; - opp-hz = /bits/ 64 <1300000000>; - }; - - opp@1300000000,1050,3,12 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x08 0x1000>; - opp-hz = /bits/ 64 <1300000000>; - }; - - opp@1300000000,1050,3,13 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x08 0x2000>; + opp-supported-hw = <0x12 0x3061>, <0x04 0x0002>, + <0x08 0x0004>, <0x08 0x0008>, + <0x08 0x0010>, <0x08 0x0020>, + <0x08 0x0040>, <0x04 0x0080>, + <0x04 0x0100>, <0x08 0x1000>, + <0x08 0x2000>; opp-hz = /bits/ 64 <1300000000>; }; opp@1300000000,1075 { clock-latency-ns = <100000>; - opp-supported-hw = <0x02 0x0182>; - opp-hz = /bits/ 64 <1300000000>; - }; - - opp@1300000000,1075,2,2 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x04 0x0004>; - opp-hz = /bits/ 64 <1300000000>; - }; - - opp@1300000000,1075,2,3 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x04 0x0008>; - opp-hz = /bits/ 64 <1300000000>; - }; - - opp@1300000000,1075,2,4 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x04 0x0010>; + opp-supported-hw = <0x02 0x0182>, <0x04 0x0004>, + <0x04 0x0008>, <0x04 0x0010>; opp-hz = /bits/ 64 <1300000000>; }; @@ -1081,13 +425,7 @@ opp@1400000000,1150 { clock-latency-ns = <100000>; - opp-supported-hw = <0x02 0x000C>; - opp-hz = /bits/ 64 <1400000000>; - }; - - opp@1400000000,1150,2,4 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x04 0x0010>; + opp-supported-hw = <0x02 0x000C>, <0x04 0x0010>; opp-hz = /bits/ 64 <1400000000>; }; @@ -1105,61 +443,17 @@ opp@1500000000,1125 { clock-latency-ns = <100000>; - opp-supported-hw = <0x08 0x0010>; - opp-hz = /bits/ 64 <1500000000>; - }; - - opp@1500000000,1125,4,5 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x10 0x0020>; - opp-hz = /bits/ 64 <1500000000>; - }; - - opp@1500000000,1125,4,6 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x10 0x0040>; - opp-hz = /bits/ 64 <1500000000>; - }; - - opp@1500000000,1125,4,12 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x10 0x1000>; - opp-hz = /bits/ 64 <1500000000>; - }; - - opp@1500000000,1125,4,13 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x10 0x2000>; + opp-supported-hw = <0x08 0x0010>, <0x10 0x0020>, + <0x10 0x0040>, <0x10 0x1000>, + <0x10 0x2000>; opp-hz = /bits/ 64 <1500000000>; }; opp@1500000000,1150 { clock-latency-ns = <100000>; - opp-supported-hw = <0x04 0x0010>; - opp-hz = /bits/ 64 <1500000000>; - }; - - opp@1500000000,1150,3,5 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x08 0x0020>; - opp-hz = /bits/ 64 <1500000000>; - }; - - opp@1500000000,1150,3,6 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x08 0x0040>; - opp-hz = /bits/ 64 <1500000000>; - }; - - opp@1500000000,1150,3,12 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x08 0x1000>; - opp-hz = /bits/ 64 <1500000000>; - }; - - opp@1500000000,1150,3,13 { - clock-latency-ns = <100000>; - opp-supported-hw = <0x08 0x2000>; + opp-supported-hw = <0x04 0x0010>, <0x08 0x0020>, + <0x08 0x0040>, <0x08 0x1000>, + <0x08 0x2000>; opp-hz = /bits/ 64 <1500000000>; }; diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c index f4b719bde763..7996c04393d5 100644 --- a/arch/arm/common/dmabounce.c +++ b/arch/arm/common/dmabounce.c @@ -24,7 +24,8 @@ #include <linux/slab.h> #include <linux/page-flags.h> #include <linux/device.h> -#include <linux/dma-mapping.h> +#include <linux/dma-direct.h> +#include <linux/dma-map-ops.h> #include <linux/dmapool.h> #include <linux/list.h> #include <linux/scatterlist.h> diff --git a/arch/arm/crypto/aes-neonbs-core.S b/arch/arm/crypto/aes-neonbs-core.S index cfaed4e67535..7d0cc7f226a5 100644 --- a/arch/arm/crypto/aes-neonbs-core.S +++ b/arch/arm/crypto/aes-neonbs-core.S @@ -77,11 +77,6 @@ vldr \out\()h, \sym + 8 .endm - .macro __adr, reg, lbl - adr \reg, \lbl -THUMB( orr \reg, \reg, #1 ) - .endm - .macro in_bs_ch, b0, b1, b2, b3, b4, b5, b6, b7 veor \b2, \b2, \b1 veor \b5, \b5, \b6 @@ -629,11 +624,11 @@ ENDPROC(aesbs_decrypt8) push {r4-r6, lr} ldr r5, [sp, #16] // number of blocks -99: __adr ip, 0f +99: adr ip, 0f and lr, r5, #7 cmp r5, #8 sub ip, ip, lr, lsl #2 - bxlt ip // computed goto if blocks < 8 + movlt pc, ip // computed goto if blocks < 8 vld1.8 {q0}, [r1]! vld1.8 {q1}, [r1]! @@ -648,11 +643,11 @@ ENDPROC(aesbs_decrypt8) mov rounds, r3 bl \do8 - __adr ip, 1f + adr ip, 1f and lr, r5, #7 cmp r5, #8 sub ip, ip, lr, lsl #2 - bxlt ip // computed goto if blocks < 8 + movlt pc, ip // computed goto if blocks < 8 vst1.8 {\o0}, [r0]! vst1.8 {\o1}, [r0]! @@ -689,12 +684,12 @@ ENTRY(aesbs_cbc_decrypt) push {r4-r6, lr} ldm ip, {r5-r6} // load args 4-5 -99: __adr ip, 0f +99: adr ip, 0f and lr, r5, #7 cmp r5, #8 sub ip, ip, lr, lsl #2 mov lr, r1 - bxlt ip // computed goto if blocks < 8 + movlt pc, ip // computed goto if blocks < 8 vld1.8 {q0}, [lr]! vld1.8 {q1}, [lr]! @@ -718,11 +713,11 @@ ENTRY(aesbs_cbc_decrypt) vmov q14, q8 vmov q15, q8 - __adr ip, 1f + adr ip, 1f and lr, r5, #7 cmp r5, #8 sub ip, ip, lr, lsl #2 - bxlt ip // computed goto if blocks < 8 + movlt pc, ip // computed goto if blocks < 8 vld1.8 {q9}, [r1]! vld1.8 {q10}, [r1]! @@ -733,9 +728,9 @@ ENTRY(aesbs_cbc_decrypt) vld1.8 {q15}, [r1]! W(nop) -1: __adr ip, 2f +1: adr ip, 2f sub ip, ip, lr, lsl #3 - bxlt ip // computed goto if blocks < 8 + movlt pc, ip // computed goto if blocks < 8 veor q0, q0, q8 vst1.8 {q0}, [r0]! @@ -804,13 +799,13 @@ ENTRY(aesbs_ctr_encrypt) vmov q6, q0 vmov q7, q0 - __adr ip, 0f + adr ip, 0f sub lr, r5, #1 and lr, lr, #7 cmp r5, #8 sub ip, ip, lr, lsl #5 sub ip, ip, lr, lsl #2 - bxlt ip // computed goto if blocks < 8 + movlt pc, ip // computed goto if blocks < 8 next_ctr q1 next_ctr q2 @@ -824,13 +819,13 @@ ENTRY(aesbs_ctr_encrypt) mov rounds, r3 bl aesbs_encrypt8 - __adr ip, 1f + adr ip, 1f and lr, r5, #7 cmp r5, #8 movgt r4, #0 ldrle r4, [sp, #40] // load final in the last round sub ip, ip, lr, lsl #2 - bxlt ip // computed goto if blocks < 8 + movlt pc, ip // computed goto if blocks < 8 vld1.8 {q8}, [r1]! vld1.8 {q9}, [r1]! @@ -843,10 +838,10 @@ ENTRY(aesbs_ctr_encrypt) 1: bne 2f vld1.8 {q15}, [r1]! -2: __adr ip, 3f +2: adr ip, 3f cmp r5, #8 sub ip, ip, lr, lsl #3 - bxlt ip // computed goto if blocks < 8 + movlt pc, ip // computed goto if blocks < 8 veor q0, q0, q8 vst1.8 {q0}, [r0]! @@ -900,12 +895,12 @@ __xts_prepare8: vshr.u64 d30, d31, #7 vmov q12, q14 - __adr ip, 0f + adr ip, 0f and r4, r6, #7 cmp r6, #8 sub ip, ip, r4, lsl #5 mov r4, sp - bxlt ip // computed goto if blocks < 8 + movlt pc, ip // computed goto if blocks < 8 vld1.8 {q0}, [r1]! next_tweak q12, q14, q15, q13 @@ -961,8 +956,7 @@ ENDPROC(__xts_prepare8) push {r4-r8, lr} mov r5, sp // preserve sp ldrd r6, r7, [sp, #24] // get blocks and iv args - ldr r8, [sp, #32] // reorder final tweak? - rsb r8, r8, #1 + rsb r8, ip, #1 sub ip, sp, #128 // make room for 8x tweak bic ip, ip, #0xf // align sp to 16 bytes mov sp, ip @@ -973,12 +967,12 @@ ENDPROC(__xts_prepare8) mov rounds, r3 bl \do8 - __adr ip, 0f + adr ip, 0f and lr, r6, #7 cmp r6, #8 sub ip, ip, lr, lsl #2 mov r4, sp - bxlt ip // computed goto if blocks < 8 + movlt pc, ip // computed goto if blocks < 8 vld1.8 {q8}, [r4, :128]! vld1.8 {q9}, [r4, :128]! @@ -989,9 +983,9 @@ ENDPROC(__xts_prepare8) vld1.8 {q14}, [r4, :128]! vld1.8 {q15}, [r4, :128] -0: __adr ip, 1f +0: adr ip, 1f sub ip, ip, lr, lsl #3 - bxlt ip // computed goto if blocks < 8 + movlt pc, ip // computed goto if blocks < 8 veor \o0, \o0, q8 vst1.8 {\o0}, [r0]! @@ -1018,9 +1012,11 @@ ENDPROC(__xts_prepare8) .endm ENTRY(aesbs_xts_encrypt) + mov ip, #0 // never reorder final tweak __xts_crypt aesbs_encrypt8, q0, q1, q4, q6, q3, q7, q2, q5 ENDPROC(aesbs_xts_encrypt) ENTRY(aesbs_xts_decrypt) + ldr ip, [sp, #8] // reorder final tweak? __xts_crypt aesbs_decrypt8, q0, q1, q6, q4, q2, q7, q3, q5 ENDPROC(aesbs_xts_decrypt) diff --git a/arch/arm/crypto/aes-neonbs-glue.c b/arch/arm/crypto/aes-neonbs-glue.c index e6fd32919c81..bda8bf17631e 100644 --- a/arch/arm/crypto/aes-neonbs-glue.c +++ b/arch/arm/crypto/aes-neonbs-glue.c @@ -8,7 +8,6 @@ #include <asm/neon.h> #include <asm/simd.h> #include <crypto/aes.h> -#include <crypto/cbc.h> #include <crypto/ctr.h> #include <crypto/internal/simd.h> #include <crypto/internal/skcipher.h> @@ -49,7 +48,7 @@ struct aesbs_ctx { struct aesbs_cbc_ctx { struct aesbs_ctx key; - struct crypto_cipher *enc_tfm; + struct crypto_skcipher *enc_tfm; }; struct aesbs_xts_ctx { @@ -140,19 +139,23 @@ static int aesbs_cbc_setkey(struct crypto_skcipher *tfm, const u8 *in_key, kernel_neon_end(); memzero_explicit(&rk, sizeof(rk)); - return crypto_cipher_setkey(ctx->enc_tfm, in_key, key_len); + return crypto_skcipher_setkey(ctx->enc_tfm, in_key, key_len); } -static void cbc_encrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst) +static int cbc_encrypt(struct skcipher_request *req) { + struct skcipher_request *subreq = skcipher_request_ctx(req); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); - crypto_cipher_encrypt_one(ctx->enc_tfm, dst, src); -} + skcipher_request_set_tfm(subreq, ctx->enc_tfm); + skcipher_request_set_callback(subreq, + skcipher_request_flags(req), + NULL, NULL); + skcipher_request_set_crypt(subreq, req->src, req->dst, + req->cryptlen, req->iv); -static int cbc_encrypt(struct skcipher_request *req) -{ - return crypto_cbc_encrypt_walk(req, cbc_encrypt_one); + return crypto_skcipher_encrypt(subreq); } static int cbc_decrypt(struct skcipher_request *req) @@ -183,20 +186,27 @@ static int cbc_decrypt(struct skcipher_request *req) return err; } -static int cbc_init(struct crypto_tfm *tfm) +static int cbc_init(struct crypto_skcipher *tfm) { - struct aesbs_cbc_ctx *ctx = crypto_tfm_ctx(tfm); + struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); + unsigned int reqsize; - ctx->enc_tfm = crypto_alloc_cipher("aes", 0, 0); + ctx->enc_tfm = crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(ctx->enc_tfm)) + return PTR_ERR(ctx->enc_tfm); - return PTR_ERR_OR_ZERO(ctx->enc_tfm); + reqsize = sizeof(struct skcipher_request); + reqsize += crypto_skcipher_reqsize(ctx->enc_tfm); + crypto_skcipher_set_reqsize(tfm, reqsize); + + return 0; } -static void cbc_exit(struct crypto_tfm *tfm) +static void cbc_exit(struct crypto_skcipher *tfm) { - struct aesbs_cbc_ctx *ctx = crypto_tfm_ctx(tfm); + struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); - crypto_free_cipher(ctx->enc_tfm); + crypto_free_skcipher(ctx->enc_tfm); } static int aesbs_ctr_setkey_sync(struct crypto_skcipher *tfm, const u8 *in_key, @@ -304,9 +314,9 @@ static int aesbs_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key, return aesbs_setkey(tfm, in_key, key_len); } -static int xts_init(struct crypto_tfm *tfm) +static int xts_init(struct crypto_skcipher *tfm) { - struct aesbs_xts_ctx *ctx = crypto_tfm_ctx(tfm); + struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm); ctx->cts_tfm = crypto_alloc_cipher("aes", 0, 0); if (IS_ERR(ctx->cts_tfm)) @@ -319,9 +329,9 @@ static int xts_init(struct crypto_tfm *tfm) return PTR_ERR_OR_ZERO(ctx->tweak_tfm); } -static void xts_exit(struct crypto_tfm *tfm) +static void xts_exit(struct crypto_skcipher *tfm) { - struct aesbs_xts_ctx *ctx = crypto_tfm_ctx(tfm); + struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm); crypto_free_cipher(ctx->tweak_tfm); crypto_free_cipher(ctx->cts_tfm); @@ -432,8 +442,6 @@ static struct skcipher_alg aes_algs[] = { { .base.cra_ctxsize = sizeof(struct aesbs_cbc_ctx), .base.cra_module = THIS_MODULE, .base.cra_flags = CRYPTO_ALG_INTERNAL, - .base.cra_init = cbc_init, - .base.cra_exit = cbc_exit, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, @@ -442,6 +450,8 @@ static struct skcipher_alg aes_algs[] = { { .setkey = aesbs_cbc_setkey, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, + .init = cbc_init, + .exit = cbc_exit, }, { .base.cra_name = "__ctr(aes)", .base.cra_driver_name = "__ctr-aes-neonbs", @@ -483,8 +493,6 @@ static struct skcipher_alg aes_algs[] = { { .base.cra_ctxsize = sizeof(struct aesbs_xts_ctx), .base.cra_module = THIS_MODULE, .base.cra_flags = CRYPTO_ALG_INTERNAL, - .base.cra_init = xts_init, - .base.cra_exit = xts_exit, .min_keysize = 2 * AES_MIN_KEY_SIZE, .max_keysize = 2 * AES_MAX_KEY_SIZE, @@ -493,6 +501,8 @@ static struct skcipher_alg aes_algs[] = { { .setkey = aesbs_xts_setkey, .encrypt = xts_encrypt, .decrypt = xts_decrypt, + .init = xts_init, + .exit = xts_exit, } }; static struct simd_skcipher_alg *aes_simd_algs[ARRAY_SIZE(aes_algs)]; diff --git a/arch/arm/crypto/curve25519-glue.c b/arch/arm/crypto/curve25519-glue.c index 776ae07e0469..31eb75b6002f 100644 --- a/arch/arm/crypto/curve25519-glue.c +++ b/arch/arm/crypto/curve25519-glue.c @@ -16,6 +16,7 @@ #include <linux/module.h> #include <linux/init.h> #include <linux/jump_label.h> +#include <linux/scatterlist.h> #include <crypto/curve25519.h> asmlinkage void curve25519_neon(u8 mypublic[CURVE25519_KEY_SIZE], diff --git a/arch/arm/crypto/poly1305-glue.c b/arch/arm/crypto/poly1305-glue.c index 13cfef4ae22e..3023c1acfa19 100644 --- a/arch/arm/crypto/poly1305-glue.c +++ b/arch/arm/crypto/poly1305-glue.c @@ -20,6 +20,7 @@ void poly1305_init_arm(void *state, const u8 *key); void poly1305_blocks_arm(void *state, const u8 *src, u32 len, u32 hibit); +void poly1305_blocks_neon(void *state, const u8 *src, u32 len, u32 hibit); void poly1305_emit_arm(void *state, u8 *digest, const u32 *nonce); void __weak poly1305_blocks_neon(void *state, const u8 *src, u32 len, u32 hibit) diff --git a/arch/arm/crypto/sha256-armv4.pl b/arch/arm/crypto/sha256-armv4.pl index 9f96ff48e4a8..f3a2b54efd4e 100644 --- a/arch/arm/crypto/sha256-armv4.pl +++ b/arch/arm/crypto/sha256-armv4.pl @@ -175,7 +175,6 @@ $code=<<___; #else .syntax unified # ifdef __thumb2__ -# define adrl adr .thumb # else .code 32 @@ -471,7 +470,8 @@ sha256_block_data_order_neon: stmdb sp!,{r4-r12,lr} sub $H,sp,#16*4+16 - adrl $Ktbl,K256 + adr $Ktbl,.Lsha256_block_data_order + sub $Ktbl,$Ktbl,#.Lsha256_block_data_order-K256 bic $H,$H,#15 @ align for 128-bit stores mov $t2,sp mov sp,$H @ alloca diff --git a/arch/arm/crypto/sha256-core.S_shipped b/arch/arm/crypto/sha256-core.S_shipped index ea04b2ab0c33..6363014a50d7 100644 --- a/arch/arm/crypto/sha256-core.S_shipped +++ b/arch/arm/crypto/sha256-core.S_shipped @@ -56,7 +56,6 @@ #else .syntax unified # ifdef __thumb2__ -# define adrl adr .thumb # else .code 32 @@ -1885,7 +1884,8 @@ sha256_block_data_order_neon: stmdb sp!,{r4-r12,lr} sub r11,sp,#16*4+16 - adrl r14,K256 + adr r14,.Lsha256_block_data_order + sub r14,r14,#.Lsha256_block_data_order-K256 bic r11,r11,#15 @ align for 128-bit stores mov r12,sp mov sp,r11 @ alloca diff --git a/arch/arm/crypto/sha512-armv4.pl b/arch/arm/crypto/sha512-armv4.pl index 69df68981acd..2fc3516912fa 100644 --- a/arch/arm/crypto/sha512-armv4.pl +++ b/arch/arm/crypto/sha512-armv4.pl @@ -212,7 +212,6 @@ $code=<<___; #else .syntax unified # ifdef __thumb2__ -# define adrl adr .thumb # else .code 32 @@ -602,7 +601,8 @@ sha512_block_data_order_neon: dmb @ errata #451034 on early Cortex A8 add $len,$inp,$len,lsl#7 @ len to point at the end of inp VFP_ABI_PUSH - adrl $Ktbl,K512 + adr $Ktbl,.Lsha512_block_data_order + sub $Ktbl,$Ktbl,.Lsha512_block_data_order-K512 vldmia $ctx,{$A-$H} @ load context .Loop_neon: ___ diff --git a/arch/arm/crypto/sha512-core.S_shipped b/arch/arm/crypto/sha512-core.S_shipped index cb147db5cbfe..03014624f2ab 100644 --- a/arch/arm/crypto/sha512-core.S_shipped +++ b/arch/arm/crypto/sha512-core.S_shipped @@ -79,7 +79,6 @@ #else .syntax unified # ifdef __thumb2__ -# define adrl adr .thumb # else .code 32 @@ -543,7 +542,8 @@ sha512_block_data_order_neon: dmb @ errata #451034 on early Cortex A8 add r2,r1,r2,lsl#7 @ len to point at the end of inp VFP_ABI_PUSH - adrl r3,K512 + adr r3,.Lsha512_block_data_order + sub r3,r3,.Lsha512_block_data_order-K512 vldmia r0,{d16-d23} @ load context .Loop_neon: vshr.u64 d24,d20,#14 @ 0 diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h index ed6073fee338..f0f54aef3724 100644 --- a/arch/arm/include/asm/checksum.h +++ b/arch/arm/include/asm/checksum.h @@ -35,23 +35,20 @@ __wsum csum_partial(const void *buff, int len, __wsum sum); */ __wsum -csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum); +csum_partial_copy_nocheck(const void *src, void *dst, int len); __wsum -csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr); +csum_partial_copy_from_user(const void __user *src, void *dst, int len); #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER +#define _HAVE_ARCH_CSUM_AND_COPY static inline -__wsum csum_and_copy_from_user (const void __user *src, void *dst, - int len, __wsum sum, int *err_ptr) +__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len) { - if (access_ok(src, len)) - return csum_partial_copy_from_user(src, dst, len, sum, err_ptr); + if (!access_ok(src, len)) + return 0; - if (len) - *err_ptr = -EFAULT; - - return sum; + return csum_partial_copy_from_user(src, dst, len); } /* diff --git a/arch/arm/include/asm/dma-contiguous.h b/arch/arm/include/asm/dma-contiguous.h deleted file mode 100644 index d785187a6f8a..000000000000 --- a/arch/arm/include/asm/dma-contiguous.h +++ /dev/null @@ -1,15 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef ASMARM_DMA_CONTIGUOUS_H -#define ASMARM_DMA_CONTIGUOUS_H - -#ifdef __KERNEL__ -#ifdef CONFIG_DMA_CMA - -#include <linux/types.h> - -void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size); - -#endif -#endif - -#endif diff --git a/arch/arm/include/asm/dma-direct.h b/arch/arm/include/asm/dma-direct.h index 7c3001a6a775..77fcb7ee5ec9 100644 --- a/arch/arm/include/asm/dma-direct.h +++ b/arch/arm/include/asm/dma-direct.h @@ -2,13 +2,44 @@ #ifndef ASM_ARM_DMA_DIRECT_H #define ASM_ARM_DMA_DIRECT_H 1 -static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr) +#include <asm/memory.h> + +/* + * dma_to_pfn/pfn_to_dma/virt_to_dma are architecture private + * functions used internally by the DMA-mapping API to provide DMA + * addresses. They must not be used by drivers. + */ +static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn) +{ + if (dev && dev->dma_range_map) + pfn = PFN_DOWN(translate_phys_to_dma(dev, PFN_PHYS(pfn))); + return (dma_addr_t)__pfn_to_bus(pfn); +} + +static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr) +{ + unsigned long pfn = __bus_to_pfn(addr); + + if (dev && dev->dma_range_map) + pfn = PFN_DOWN(translate_dma_to_phys(dev, PFN_PHYS(pfn))); + return pfn; +} + +static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) +{ + if (dev) + return pfn_to_dma(dev, virt_to_pfn(addr)); + + return (dma_addr_t)__virt_to_bus((unsigned long)(addr)); +} + +static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) { unsigned int offset = paddr & ~PAGE_MASK; return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset; } -static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dev_addr) +static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr) { unsigned int offset = dev_addr & ~PAGE_MASK; return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset; diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h index 86405cc81385..fe9ef6f79e9c 100644 --- a/arch/arm/include/asm/dma-iommu.h +++ b/arch/arm/include/asm/dma-iommu.h @@ -6,7 +6,6 @@ #include <linux/mm_types.h> #include <linux/scatterlist.h> -#include <linux/dma-debug.h> #include <linux/kref.h> struct dma_iommu_mapping { diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index bdd80ddbca34..77082246a5e1 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -6,9 +6,6 @@ #include <linux/mm_types.h> #include <linux/scatterlist.h> -#include <linux/dma-debug.h> - -#include <asm/memory.h> #include <xen/xen.h> #include <asm/xen/hypervisor.h> @@ -23,74 +20,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) return NULL; } -#ifdef __arch_page_to_dma -#error Please update to __arch_pfn_to_dma -#endif - -/* - * dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private - * functions used internally by the DMA-mapping API to provide DMA - * addresses. They must not be used by drivers. - */ -#ifndef __arch_pfn_to_dma -static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn) -{ - if (dev) - pfn -= dev->dma_pfn_offset; - return (dma_addr_t)__pfn_to_bus(pfn); -} - -static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr) -{ - unsigned long pfn = __bus_to_pfn(addr); - - if (dev) - pfn += dev->dma_pfn_offset; - - return pfn; -} - -static inline void *dma_to_virt(struct device *dev, dma_addr_t addr) -{ - if (dev) { - unsigned long pfn = dma_to_pfn(dev, addr); - - return phys_to_virt(__pfn_to_phys(pfn)); - } - - return (void *)__bus_to_virt((unsigned long)addr); -} - -static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) -{ - if (dev) - return pfn_to_dma(dev, virt_to_pfn(addr)); - - return (dma_addr_t)__virt_to_bus((unsigned long)(addr)); -} - -#else -static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn) -{ - return __arch_pfn_to_dma(dev, pfn); -} - -static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr) -{ - return __arch_dma_to_pfn(dev, addr); -} - -static inline void *dma_to_virt(struct device *dev, dma_addr_t addr) -{ - return __arch_dma_to_virt(dev, addr); -} - -static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) -{ - return __arch_virt_to_dma(dev, addr); -} -#endif - /** * arm_dma_alloc - allocate consistent memory for DMA * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices diff --git a/arch/arm/include/asm/efi.h b/arch/arm/include/asm/efi.h index 5dcf3c6011b7..3ee4f4381985 100644 --- a/arch/arm/include/asm/efi.h +++ b/arch/arm/include/asm/efi.h @@ -66,25 +66,24 @@ static inline void efifb_setup_from_dmi(struct screen_info *si, const char *opt) #define MAX_UNCOMP_KERNEL_SIZE SZ_32M /* - * The kernel zImage should preferably be located between 32 MB and 128 MB - * from the base of DRAM. The min address leaves space for a maximal size - * uncompressed image, and the max address is due to how the zImage decompressor - * picks a destination address. + * phys-to-virt patching requires that the physical to virtual offset fits + * into the immediate field of an add/sub instruction, which comes down to the + * 24 least significant bits being zero, and so the offset should be a multiple + * of 16 MB. Since PAGE_OFFSET itself is a multiple of 16 MB, the physical + * base should be aligned to 16 MB as well. */ -#define ZIMAGE_OFFSET_LIMIT SZ_128M -#define MIN_ZIMAGE_OFFSET MAX_UNCOMP_KERNEL_SIZE +#define EFI_PHYS_ALIGN SZ_16M -/* on ARM, the FDT should be located in the first 128 MB of RAM */ -static inline unsigned long efi_get_max_fdt_addr(unsigned long dram_base) +/* on ARM, the FDT should be located in a lowmem region */ +static inline unsigned long efi_get_max_fdt_addr(unsigned long image_addr) { - return dram_base + ZIMAGE_OFFSET_LIMIT; + return round_down(image_addr, EFI_PHYS_ALIGN) + SZ_512M; } /* on ARM, the initrd should be loaded in a lowmem region */ -static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base, - unsigned long image_addr) +static inline unsigned long efi_get_max_initrd_addr(unsigned long image_addr) { - return dram_base + SZ_512M; + return round_down(image_addr, EFI_PHYS_ALIGN) + SZ_512M; } struct efi_arm_entry_state { diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h index 7a88f160b1fb..b95848ed2bc7 100644 --- a/arch/arm/include/asm/hardirq.h +++ b/arch/arm/include/asm/hardirq.h @@ -6,29 +6,12 @@ #include <linux/threads.h> #include <asm/irq.h> -/* number of IPIS _not_ including IPI_CPU_BACKTRACE */ -#define NR_IPI 7 - typedef struct { unsigned int __softirq_pending; -#ifdef CONFIG_SMP - unsigned int ipi_irqs[NR_IPI]; -#endif } ____cacheline_aligned irq_cpustat_t; #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ -#define __inc_irq_stat(cpu, member) __IRQ_STAT(cpu, member)++ -#define __get_irq_stat(cpu, member) __IRQ_STAT(cpu, member) - -#ifdef CONFIG_SMP -u64 smp_irq_stat_cpu(unsigned int cpu); -#else -#define smp_irq_stat_cpu(cpu) 0 -#endif - -#define arch_irq_stat_cpu smp_irq_stat_cpu - #define __ARCH_IRQ_EXIT_IRQS_DISABLED 1 #endif /* __ASM_HARDIRQ_H */ diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h index a91f21e3c5b5..0ca55a607d0a 100644 --- a/arch/arm/include/asm/smp.h +++ b/arch/arm/include/asm/smp.h @@ -39,11 +39,10 @@ void handle_IPI(int ipinr, struct pt_regs *regs); */ extern void smp_init_cpus(void); - /* - * Provide a function to raise an IPI cross call on CPUs in callmap. + * Register IPI interrupts with the arch SMP code */ -extern void set_smp_cross_call(void (*)(const struct cpumask *, unsigned int)); +extern void set_smp_ipi_range(int ipi_base, int nr_ipi); /* * Called from platform specific assembly code, this is the diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index 9415222b49ad..b8cbe03ad260 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h @@ -59,6 +59,7 @@ __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr) #ifdef CONFIG_ARM_LPAE struct page *page = virt_to_page(pmdp); + pgtable_pmd_page_dtor(page); tlb_remove_table(tlb, page); #endif } diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h index e0593cf095d0..470299ee2fba 100644 --- a/arch/arm/include/asm/topology.h +++ b/arch/arm/include/asm/topology.h @@ -7,8 +7,13 @@ #include <linux/cpumask.h> #include <linux/arch_topology.h> +/* big.LITTLE switcher is incompatible with frequency invariance */ +#ifndef CONFIG_BL_SWITCHER /* Replace task scheduler's default frequency-invariant accounting */ +#define arch_set_freq_scale topology_set_freq_scale #define arch_scale_freq_capacity topology_get_freq_scale +#define arch_scale_freq_invariant topology_scale_freq_invariant +#endif /* Replace task scheduler's default cpu-invariant accounting */ #define arch_scale_cpu_capacity topology_get_cpu_scale diff --git a/arch/arm/kernel/vmlinux.lds.h b/arch/arm/include/asm/vmlinux.lds.h index 381a8e105fa5..4a91428c324d 100644 --- a/arch/arm/kernel/vmlinux.lds.h +++ b/arch/arm/include/asm/vmlinux.lds.h @@ -1,4 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ +#include <asm-generic/vmlinux.lds.h> #ifdef CONFIG_HOTPLUG_CPU #define ARM_CPU_DISCARD(x) @@ -49,8 +50,29 @@ EXIT_CALL \ ARM_MMU_DISCARD(*(.text.fixup)) \ ARM_MMU_DISCARD(*(__ex_table)) \ - *(.discard) \ - *(.discard.*) + COMMON_DISCARDS + +/* + * Sections that should stay zero sized, which is safer to explicitly + * check instead of blindly discarding. + */ +#define ARM_ASSERTS \ + .plt : { \ + *(.iplt) *(.rel.iplt) *(.iplt) *(.igot.plt) \ + } \ + ASSERT(SIZEOF(.plt) == 0, \ + "Unexpected run-time procedure linkages detected!") + +#define ARM_DETAILS \ + ELF_DETAILS \ + .ARM.attributes 0 : { *(.ARM.attributes) } + +#define ARM_STUBS_TEXT \ + *(.gnu.warning) \ + *(.glue_7) \ + *(.glue_7t) \ + *(.vfp11_veneer) \ + *(.v4_bx) #define ARM_TEXT \ IDMAP_TEXT \ @@ -64,9 +86,7 @@ CPUIDLE_TEXT \ LOCK_TEXT \ KPROBES_TEXT \ - *(.gnu.warning) \ - *(.glue_7) \ - *(.glue_7t) \ + ARM_STUBS_TEXT \ . = ALIGN(4); \ *(.got) /* Global offset table */ \ ARM_CPU_KEEP(PROC_INFO) diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h index 31bbc803cecb..dc7f6e91aafa 100644 --- a/arch/arm/include/asm/xen/page.h +++ b/arch/arm/include/asm/xen/page.h @@ -1 +1,6 @@ #include <xen/arm/page.h> + +static inline bool xen_kernel_unmapped_at_usr(void) +{ + return false; +} diff --git a/arch/arm/include/debug/8250.S b/arch/arm/include/debug/8250.S index e4a036f082c2..e3692a37cede 100644 --- a/arch/arm/include/debug/8250.S +++ b/arch/arm/include/debug/8250.S @@ -45,10 +45,11 @@ bne 1002b .endm - .macro waituart,rd,rx -#ifdef CONFIG_DEBUG_UART_8250_FLOW_CONTROL + .macro waituarttxrdy,rd,rx + .endm + + .macro waituartcts,rd,rx 1001: load \rd, [\rx, #UART_MSR << UART_SHIFT] tst \rd, #UART_MSR_CTS beq 1001b -#endif .endm diff --git a/arch/arm/include/debug/asm9260.S b/arch/arm/include/debug/asm9260.S index 0da1eb625331..5a0ce145c44a 100644 --- a/arch/arm/include/debug/asm9260.S +++ b/arch/arm/include/debug/asm9260.S @@ -11,7 +11,10 @@ ldr \rv, = CONFIG_DEBUG_UART_VIRT .endm - .macro waituart,rd,rx + .macro waituarttxrdy,rd,rx + .endm + + .macro waituartcts,rd,rx .endm .macro senduart,rd,rx diff --git a/arch/arm/include/debug/at91.S b/arch/arm/include/debug/at91.S index 6c91cbaaa20b..17722824e2f2 100644 --- a/arch/arm/include/debug/at91.S +++ b/arch/arm/include/debug/at91.S @@ -19,12 +19,15 @@ strb \rd, [\rx, #(AT91_DBGU_THR)] @ Write to Transmitter Holding Register .endm - .macro waituart,rd,rx + .macro waituarttxrdy,rd,rx 1001: ldr \rd, [\rx, #(AT91_DBGU_SR)] @ Read Status Register tst \rd, #AT91_DBGU_TXRDY @ DBGU_TXRDY = 1 when ready to transmit beq 1001b .endm + .macro waituartcts,rd,rx + .endm + .macro busyuart,rd,rx 1001: ldr \rd, [\rx, #(AT91_DBGU_SR)] @ Read Status Register tst \rd, #AT91_DBGU_TXEMPTY @ DBGU_TXEMPTY = 1 when transmission complete diff --git a/arch/arm/include/debug/bcm63xx.S b/arch/arm/include/debug/bcm63xx.S index 06a896227396..da65abb6738d 100644 --- a/arch/arm/include/debug/bcm63xx.S +++ b/arch/arm/include/debug/bcm63xx.S @@ -17,12 +17,15 @@ strb \rd, [\rx, #UART_FIFO_REG] .endm - .macro waituart, rd, rx + .macro waituarttxrdy, rd, rx 1001: ldr \rd, [\rx, #UART_IR_REG] tst \rd, #(1 << UART_IR_TXEMPTY) beq 1001b .endm + .macro waituartcts, rd, rx + .endm + .macro busyuart, rd, rx 1002: ldr \rd, [\rx, #UART_IR_REG] tst \rd, #(1 << UART_IR_TXTRESH) diff --git a/arch/arm/include/debug/brcmstb.S b/arch/arm/include/debug/brcmstb.S index 132a20c4a676..7ffe66993029 100644 --- a/arch/arm/include/debug/brcmstb.S +++ b/arch/arm/include/debug/brcmstb.S @@ -142,7 +142,10 @@ ARM_BE8( rev \rd, \rd ) bne 1002b .endm - .macro waituart,rd,rx + .macro waituarttxrdy,rd,rx + .endm + + .macro waituartcts,rd,rx .endm /* diff --git a/arch/arm/include/debug/clps711x.S b/arch/arm/include/debug/clps711x.S index 774a67ac3877..a983d12a6515 100644 --- a/arch/arm/include/debug/clps711x.S +++ b/arch/arm/include/debug/clps711x.S @@ -20,7 +20,10 @@ ldr \rp, =CLPS711X_UART_PADDR .endm - .macro waituart,rd,rx + .macro waituartcts,rd,rx + .endm + + .macro waituarttxrdy,rd,rx .endm .macro senduart,rd,rx diff --git a/arch/arm/include/debug/dc21285.S b/arch/arm/include/debug/dc21285.S index d7e8c71706ab..4ec0e5e31704 100644 --- a/arch/arm/include/debug/dc21285.S +++ b/arch/arm/include/debug/dc21285.S @@ -34,5 +34,8 @@ bne 1001b .endm - .macro waituart,rd,rx + .macro waituartcts,rd,rx + .endm + + .macro waituarttxrdy,rd,rx .endm diff --git a/arch/arm/include/debug/digicolor.S b/arch/arm/include/debug/digicolor.S index 256f5f4da275..443674cad76a 100644 --- a/arch/arm/include/debug/digicolor.S +++ b/arch/arm/include/debug/digicolor.S @@ -21,7 +21,10 @@ strb \rd, [\rx, #UA0_EMI_REC] .endm - .macro waituart,rd,rx + .macro waituartcts,rd,rx + .endm + + .macro waituarttxrdy,rd,rx .endm .macro busyuart,rd,rx diff --git a/arch/arm/include/debug/efm32.S b/arch/arm/include/debug/efm32.S index 5ed5028306f4..b0083d6e31e8 100644 --- a/arch/arm/include/debug/efm32.S +++ b/arch/arm/include/debug/efm32.S @@ -29,7 +29,10 @@ strb \rd, [\rx, #UARTn_TXDATA] .endm - .macro waituart,rd,rx + .macro waituartcts,rd,rx + .endm + + .macro waituarttxrdy,rd,rx 1001: ldr \rd, [\rx, #UARTn_STATUS] tst \rd, #UARTn_STATUS_TXBL beq 1001b diff --git a/arch/arm/include/debug/icedcc.S b/arch/arm/include/debug/icedcc.S index 74a0dd036a17..d5e65da8a687 100644 --- a/arch/arm/include/debug/icedcc.S +++ b/arch/arm/include/debug/icedcc.S @@ -23,7 +23,10 @@ beq 1001b .endm - .macro waituart, rd, rx + .macro waituartcts, rd, rx + .endm + + .macro waituarttxrdy, rd, rx mov \rd, #0x2000000 1001: subs \rd, \rd, #1 @@ -47,7 +50,10 @@ beq 1001b .endm - .macro waituart, rd, rx + .macro waituartcts, rd, rx + .endm + + .macro waituarttxrdy, rd, rx mov \rd, #0x10000000 1001: subs \rd, \rd, #1 @@ -72,7 +78,10 @@ .endm - .macro waituart, rd, rx + .macro waituartcts, rd, rx + .endm + + .macro waituarttxrdy, rd, rx mov \rd, #0x2000000 1001: subs \rd, \rd, #1 diff --git a/arch/arm/include/debug/imx.S b/arch/arm/include/debug/imx.S index 1c1b9d1da4c8..bb7b9550580c 100644 --- a/arch/arm/include/debug/imx.S +++ b/arch/arm/include/debug/imx.S @@ -35,7 +35,10 @@ str \rd, [\rx, #0x40] @ TXDATA .endm - .macro waituart,rd,rx + .macro waituartcts,rd,rx + .endm + + .macro waituarttxrdy,rd,rx .endm .macro busyuart,rd,rx diff --git a/arch/arm/include/debug/meson.S b/arch/arm/include/debug/meson.S index 1e501a0054ae..7b60e4401225 100644 --- a/arch/arm/include/debug/meson.S +++ b/arch/arm/include/debug/meson.S @@ -25,7 +25,10 @@ beq 1002b .endm - .macro waituart,rd,rx + .macro waituartcts,rd,rx + .endm + + .macro waituarttxrdy,rd,rx 1001: ldr \rd, [\rx, #MESON_AO_UART_STATUS] tst \rd, #MESON_AO_UART_TX_FIFO_FULL bne 1001b diff --git a/arch/arm/include/debug/msm.S b/arch/arm/include/debug/msm.S index 9405b71461da..530edc74f9a3 100644 --- a/arch/arm/include/debug/msm.S +++ b/arch/arm/include/debug/msm.S @@ -17,7 +17,10 @@ ARM_BE8(rev \rd, \rd ) str \rd, [\rx, #0x70] .endm - .macro waituart, rd, rx + .macro waituartcts,rd,rx + .endm + + .macro waituarttxrdy, rd, rx @ check for TX_EMT in UARTDM_SR ldr \rd, [\rx, #0x08] ARM_BE8(rev \rd, \rd ) diff --git a/arch/arm/include/debug/omap2plus.S b/arch/arm/include/debug/omap2plus.S index b5696a33ba0f..0680be6c79d3 100644 --- a/arch/arm/include/debug/omap2plus.S +++ b/arch/arm/include/debug/omap2plus.S @@ -75,5 +75,8 @@ omap_uart_lsr: .word 0 bne 1001b .endm - .macro waituart,rd,rx + .macro waituartcts,rd,rx + .endm + + .macro waituarttxrdy,rd,rx .endm diff --git a/arch/arm/include/debug/pl01x.S b/arch/arm/include/debug/pl01x.S index a2a553afe7b8..0c7bfa4c10db 100644 --- a/arch/arm/include/debug/pl01x.S +++ b/arch/arm/include/debug/pl01x.S @@ -26,7 +26,10 @@ strb \rd, [\rx, #UART01x_DR] .endm - .macro waituart,rd,rx + .macro waituartcts,rd,rx + .endm + + .macro waituarttxrdy,rd,rx 1001: ldr \rd, [\rx, #UART01x_FR] ARM_BE8( rev \rd, \rd ) tst \rd, #UART01x_FR_TXFF diff --git a/arch/arm/include/debug/renesas-scif.S b/arch/arm/include/debug/renesas-scif.S index 25f06663a9a4..8e433e981bbe 100644 --- a/arch/arm/include/debug/renesas-scif.S +++ b/arch/arm/include/debug/renesas-scif.S @@ -33,7 +33,10 @@ ldr \rv, =SCIF_VIRT .endm - .macro waituart, rd, rx + .macro waituartcts,rd,rx + .endm + + .macro waituarttxrdy, rd, rx 1001: ldrh \rd, [\rx, #FSR] tst \rd, #TDFE beq 1001b diff --git a/arch/arm/include/debug/sa1100.S b/arch/arm/include/debug/sa1100.S index 6109e6058e5b..7968ea52df3d 100644 --- a/arch/arm/include/debug/sa1100.S +++ b/arch/arm/include/debug/sa1100.S @@ -51,7 +51,10 @@ str \rd, [\rx, #UTDR] .endm - .macro waituart,rd,rx + .macro waituartcts,rd,rx + .endm + + .macro waituarttxrdy,rd,rx 1001: ldr \rd, [\rx, #UTSR1] tst \rd, #UTSR1_TNF beq 1001b diff --git a/arch/arm/include/debug/samsung.S b/arch/arm/include/debug/samsung.S index 69201d7fb48f..ab474d564a90 100644 --- a/arch/arm/include/debug/samsung.S +++ b/arch/arm/include/debug/samsung.S @@ -69,7 +69,10 @@ ARM_BE8(rev \rd, \rd) 1002: @ exit busyuart .endm - .macro waituart,rd,rx + .macro waituartcts,rd,rx + .endm + + .macro waituarttxrdy,rd,rx ldr \rd, [\rx, # S3C2410_UFCON] ARM_BE8(rev \rd, \rd) tst \rd, #S3C2410_UFCON_FIFOMODE @ fifo enabled? diff --git a/arch/arm/include/debug/sirf.S b/arch/arm/include/debug/sirf.S index e73e4de0a015..3612c7b9cbe7 100644 --- a/arch/arm/include/debug/sirf.S +++ b/arch/arm/include/debug/sirf.S @@ -29,7 +29,10 @@ .macro busyuart,rd,rx .endm - .macro waituart,rd,rx + .macro waituartcts,rd,rx + .endm + + .macro waituarttxrdy,rd,rx 1001: ldr \rd, [\rx, #SIRF_LLUART_TXFIFO_STATUS] tst \rd, #SIRF_LLUART_TXFIFO_EMPTY beq 1001b diff --git a/arch/arm/include/debug/sti.S b/arch/arm/include/debug/sti.S index 6b42c91f217d..72d052511890 100644 --- a/arch/arm/include/debug/sti.S +++ b/arch/arm/include/debug/sti.S @@ -45,7 +45,10 @@ strb \rd, [\rx, #ASC_TX_BUF_OFF] .endm - .macro waituart,rd,rx + .macro waituartcts,rd,rx + .endm + + .macro waituarttxrdy,rd,rx 1001: ldr \rd, [\rx, #ASC_STA_OFF] tst \rd, #ASC_STA_TX_FULL bne 1001b diff --git a/arch/arm/include/debug/stm32.S b/arch/arm/include/debug/stm32.S index f3c4a37210ed..b6d9df30e37d 100644 --- a/arch/arm/include/debug/stm32.S +++ b/arch/arm/include/debug/stm32.S @@ -27,7 +27,10 @@ strb \rd, [\rx, #STM32_USART_TDR_OFF] .endm -.macro waituart,rd,rx +.macro waituartcts,rd,rx +.endm + +.macro waituarttxrdy,rd,rx 1001: ldr \rd, [\rx, #(STM32_USART_SR_OFF)] @ Read Status Register tst \rd, #STM32_USART_TXE @ TXE = 1 = tx empty beq 1001b diff --git a/arch/arm/include/debug/tegra.S b/arch/arm/include/debug/tegra.S index 2148d0f88591..98daa7f48314 100644 --- a/arch/arm/include/debug/tegra.S +++ b/arch/arm/include/debug/tegra.S @@ -178,15 +178,16 @@ 1002: .endm - .macro waituart, rd, rx -#ifdef FLOW_CONTROL + .macro waituartcts, rd, rx cmp \rx, #0 beq 1002f 1001: ldrb \rd, [\rx, #UART_MSR << UART_SHIFT] tst \rd, #UART_MSR_CTS beq 1001b 1002: -#endif + .endm + + .macro waituarttxrdy,rd,rx .endm /* diff --git a/arch/arm/include/debug/vf.S b/arch/arm/include/debug/vf.S index 854d9bd82770..035bcbf117ab 100644 --- a/arch/arm/include/debug/vf.S +++ b/arch/arm/include/debug/vf.S @@ -29,5 +29,8 @@ beq 1001b @ wait until transmit done .endm - .macro waituart,rd,rx + .macro waituartcts,rd,rx + .endm + + .macro waituarttxrdy,rd,rx .endm diff --git a/arch/arm/include/debug/vt8500.S b/arch/arm/include/debug/vt8500.S index 8dc1df2d91b8..d01094fdbc8c 100644 --- a/arch/arm/include/debug/vt8500.S +++ b/arch/arm/include/debug/vt8500.S @@ -28,7 +28,10 @@ bne 1001b .endm - .macro waituart,rd,rx + .macro waituartcts,rd,rx + .endm + + .macro waituarttxrdy,rd,rx .endm #endif diff --git a/arch/arm/include/debug/zynq.S b/arch/arm/include/debug/zynq.S index 58d77c972fd6..5d42cc35ecf3 100644 --- a/arch/arm/include/debug/zynq.S +++ b/arch/arm/include/debug/zynq.S @@ -33,7 +33,10 @@ strb \rd, [\rx, #UART_FIFO_OFFSET] @ TXDATA .endm - .macro waituart,rd,rx + .macro waituartcts,rd,rx + .endm + + .macro waituarttxrdy,rd,rx 1001: ldr \rd, [\rx, #UART_SR_OFFSET] ARM_BE8( rev \rd, \rd ) tst \rd, #UART_SR_TXEMPTY diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S index e112072b579d..d92f44bdf438 100644 --- a/arch/arm/kernel/debug.S +++ b/arch/arm/kernel/debug.S @@ -89,11 +89,18 @@ ENTRY(printascii) 2: teq r1, #'\n' bne 3f mov r1, #'\r' - waituart r2, r3 +#ifdef CONFIG_DEBUG_UART_FLOW_CONTROL + waituartcts r2, r3 +#endif + waituarttxrdy r2, r3 senduart r1, r3 busyuart r2, r3 mov r1, #'\n' -3: waituart r2, r3 +3: +#ifdef CONFIG_DEBUG_UART_FLOW_CONTROL + waituartcts r2, r3 +#endif + waituarttxrdy r2, r3 senduart r1, r3 busyuart r2, r3 b 1b diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 7a4853b1213a..08660ae9dcbc 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -683,6 +683,40 @@ static void disable_single_step(struct perf_event *bp) arch_install_hw_breakpoint(bp); } +/* + * Arm32 hardware does not always report a watchpoint hit address that matches + * one of the watchpoints set. It can also report an address "near" the + * watchpoint if a single instruction access both watched and unwatched + * addresses. There is no straight-forward way, short of disassembling the + * offending instruction, to map that address back to the watchpoint. This + * function computes the distance of the memory access from the watchpoint as a + * heuristic for the likelyhood that a given access triggered the watchpoint. + * + * See this same function in the arm64 platform code, which has the same + * problem. + * + * The function returns the distance of the address from the bytes watched by + * the watchpoint. In case of an exact match, it returns 0. + */ +static u32 get_distance_from_watchpoint(unsigned long addr, u32 val, + struct arch_hw_breakpoint_ctrl *ctrl) +{ + u32 wp_low, wp_high; + u32 lens, lene; + + lens = __ffs(ctrl->len); + lene = __fls(ctrl->len); + + wp_low = val + lens; + wp_high = val + lene; + if (addr < wp_low) + return wp_low - addr; + else if (addr > wp_high) + return addr - wp_high; + else + return 0; +} + static int watchpoint_fault_on_uaccess(struct pt_regs *regs, struct arch_hw_breakpoint *info) { @@ -692,23 +726,25 @@ static int watchpoint_fault_on_uaccess(struct pt_regs *regs, static void watchpoint_handler(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { - int i, access; - u32 val, ctrl_reg, alignment_mask; + int i, access, closest_match = 0; + u32 min_dist = -1, dist; + u32 val, ctrl_reg; struct perf_event *wp, **slots; struct arch_hw_breakpoint *info; struct arch_hw_breakpoint_ctrl ctrl; slots = this_cpu_ptr(wp_on_reg); + /* + * Find all watchpoints that match the reported address. If no exact + * match is found. Attribute the hit to the closest watchpoint. + */ + rcu_read_lock(); for (i = 0; i < core_num_wrps; ++i) { - rcu_read_lock(); - wp = slots[i]; - if (wp == NULL) - goto unlock; + continue; - info = counter_arch_bp(wp); /* * The DFAR is an unknown value on debug architectures prior * to 7.1. Since we only allow a single watchpoint on these @@ -717,33 +753,31 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr, */ if (debug_arch < ARM_DEBUG_ARCH_V7_1) { BUG_ON(i > 0); + info = counter_arch_bp(wp); info->trigger = wp->attr.bp_addr; } else { - if (info->ctrl.len == ARM_BREAKPOINT_LEN_8) - alignment_mask = 0x7; - else - alignment_mask = 0x3; - - /* Check if the watchpoint value matches. */ - val = read_wb_reg(ARM_BASE_WVR + i); - if (val != (addr & ~alignment_mask)) - goto unlock; - - /* Possible match, check the byte address select. */ - ctrl_reg = read_wb_reg(ARM_BASE_WCR + i); - decode_ctrl_reg(ctrl_reg, &ctrl); - if (!((1 << (addr & alignment_mask)) & ctrl.len)) - goto unlock; - /* Check that the access type matches. */ if (debug_exception_updates_fsr()) { access = (fsr & ARM_FSR_ACCESS_MASK) ? HW_BREAKPOINT_W : HW_BREAKPOINT_R; if (!(access & hw_breakpoint_type(wp))) - goto unlock; + continue; } + val = read_wb_reg(ARM_BASE_WVR + i); + ctrl_reg = read_wb_reg(ARM_BASE_WCR + i); + decode_ctrl_reg(ctrl_reg, &ctrl); + dist = get_distance_from_watchpoint(addr, val, &ctrl); + if (dist < min_dist) { + min_dist = dist; + closest_match = i; + } + /* Is this an exact match? */ + if (dist != 0) + continue; + /* We have a winner. */ + info = counter_arch_bp(wp); info->trigger = addr; } @@ -765,13 +799,23 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr, * we can single-step over the watchpoint trigger. */ if (!is_default_overflow_handler(wp)) - goto unlock; - + continue; step: enable_single_step(wp, instruction_pointer(regs)); -unlock: - rcu_read_unlock(); } + + if (min_dist > 0 && min_dist != -1) { + /* No exact match found. */ + wp = slots[closest_match]; + info = counter_arch_bp(wp); + info->trigger = addr; + pr_debug("watchpoint fired: address = 0x%x\n", info->trigger); + perf_bp_event(wp, regs); + if (is_default_overflow_handler(wp)) + enable_single_step(wp, instruction_pointer(regs)); + } + + rcu_read_unlock(); } static void watchpoint_single_step_handler(unsigned long pc) diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index ee514034c0a1..698b6f636156 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -18,7 +18,6 @@ * IRQ's are in fact implemented a bit like signal handlers for the kernel. * Naturally it's not a 1:1 relation, but there are similarities. */ -#include <linux/kernel_stat.h> #include <linux/signal.h> #include <linux/ioport.h> #include <linux/interrupt.h> diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index d8e18cdd96d3..3f65d0ac9f63 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -843,20 +843,26 @@ early_param("mem", early_mem); static void __init request_standard_resources(const struct machine_desc *mdesc) { - struct memblock_region *region; + phys_addr_t start, end, res_end; struct resource *res; + u64 i; kernel_code.start = virt_to_phys(_text); kernel_code.end = virt_to_phys(__init_begin - 1); kernel_data.start = virt_to_phys(_sdata); kernel_data.end = virt_to_phys(_end - 1); - for_each_memblock(memory, region) { - phys_addr_t start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); - phys_addr_t end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; + for_each_mem_range(i, &start, &end) { unsigned long boot_alias_start; /* + * In memblock, end points to the first byte after the + * range while in resourses, end points to the last byte in + * the range. + */ + res_end = end - 1; + + /* * Some systems have a special memory alias which is only * used for booting. We need to advertise this region to * kexec-tools so they know where bootable RAM is located. @@ -869,7 +875,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc) __func__, sizeof(*res)); res->name = "System RAM (boot alias)"; res->start = boot_alias_start; - res->end = phys_to_idmap(end); + res->end = phys_to_idmap(res_end); res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; request_resource(&iomem_resource, res); } @@ -880,7 +886,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc) sizeof(*res)); res->name = "System RAM"; res->start = start; - res->end = end; + res->end = res_end; res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; request_resource(&iomem_resource, res); diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 5d9da61eff62..48099c6e1e4a 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -26,6 +26,7 @@ #include <linux/completion.h> #include <linux/cpufreq.h> #include <linux/irq_work.h> +#include <linux/kernel_stat.h> #include <linux/atomic.h> #include <asm/bugs.h> @@ -65,18 +66,26 @@ enum ipi_msg_type { IPI_CPU_STOP, IPI_IRQ_WORK, IPI_COMPLETION, + NR_IPI, /* * CPU_BACKTRACE is special and not included in NR_IPI * or tracable with trace_ipi_* */ - IPI_CPU_BACKTRACE, + IPI_CPU_BACKTRACE = NR_IPI, /* * SGI8-15 can be reserved by secure firmware, and thus may * not be usable by the kernel. Please keep the above limited * to at most 8 entries. */ + MAX_IPI }; +static int ipi_irq_base __read_mostly; +static int nr_ipi __read_mostly = NR_IPI; +static struct irq_desc *ipi_desc[MAX_IPI] __read_mostly; + +static void ipi_setup(int cpu); + static DECLARE_COMPLETION(cpu_running); static struct smp_operations smp_ops __ro_after_init; @@ -226,6 +235,17 @@ int platform_can_hotplug_cpu(unsigned int cpu) return cpu != 0; } +static void ipi_teardown(int cpu) +{ + int i; + + if (WARN_ON_ONCE(!ipi_irq_base)) + return; + + for (i = 0; i < nr_ipi; i++) + disable_percpu_irq(ipi_irq_base + i); +} + /* * __cpu_disable runs on the processor to be shutdown. */ @@ -247,6 +267,7 @@ int __cpu_disable(void) * and we must not schedule until we're ready to give up the cpu. */ set_cpu_online(cpu, false); + ipi_teardown(cpu); /* * OK - migrate IRQs away from this CPU @@ -422,6 +443,8 @@ asmlinkage void secondary_start_kernel(void) notify_cpu_starting(cpu); + ipi_setup(cpu); + calibrate_delay(); smp_store_cpu_info(cpu); @@ -500,14 +523,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus) } } -static void (*__smp_cross_call)(const struct cpumask *, unsigned int); - -void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int)) -{ - if (!__smp_cross_call) - __smp_cross_call = fn; -} - static const char *ipi_types[NR_IPI] __tracepoint_string = { #define S(x,s) [x] = s S(IPI_WAKEUP, "CPU wakeup interrupts"), @@ -519,38 +534,28 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = { S(IPI_COMPLETION, "completion interrupts"), }; -static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) -{ - trace_ipi_raise_rcuidle(target, ipi_types[ipinr]); - __smp_cross_call(target, ipinr); -} +static void smp_cross_call(const struct cpumask *target, unsigned int ipinr); void show_ipi_list(struct seq_file *p, int prec) { unsigned int cpu, i; for (i = 0; i < NR_IPI; i++) { + unsigned int irq; + + if (!ipi_desc[i]) + continue; + + irq = irq_desc_get_irq(ipi_desc[i]); seq_printf(p, "%*s%u: ", prec - 1, "IPI", i); for_each_online_cpu(cpu) - seq_printf(p, "%10u ", - __get_irq_stat(cpu, ipi_irqs[i])); + seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu)); seq_printf(p, " %s\n", ipi_types[i]); } } -u64 smp_irq_stat_cpu(unsigned int cpu) -{ - u64 sum = 0; - int i; - - for (i = 0; i < NR_IPI; i++) - sum += __get_irq_stat(cpu, ipi_irqs[i]); - - return sum; -} - void arch_send_call_function_ipi_mask(const struct cpumask *mask) { smp_cross_call(mask, IPI_CALL_FUNC); @@ -627,15 +632,12 @@ asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs) handle_IPI(ipinr, regs); } -void handle_IPI(int ipinr, struct pt_regs *regs) +static void do_handle_IPI(int ipinr) { unsigned int cpu = smp_processor_id(); - struct pt_regs *old_regs = set_irq_regs(regs); - if ((unsigned)ipinr < NR_IPI) { + if ((unsigned)ipinr < NR_IPI) trace_ipi_entry_rcuidle(ipi_types[ipinr]); - __inc_irq_stat(cpu, ipi_irqs[ipinr]); - } switch (ipinr) { case IPI_WAKEUP: @@ -643,9 +645,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs) #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST case IPI_TIMER: - irq_enter(); tick_receive_broadcast(); - irq_exit(); break; #endif @@ -654,36 +654,26 @@ void handle_IPI(int ipinr, struct pt_regs *regs) break; case IPI_CALL_FUNC: - irq_enter(); generic_smp_call_function_interrupt(); - irq_exit(); break; case IPI_CPU_STOP: - irq_enter(); ipi_cpu_stop(cpu); - irq_exit(); break; #ifdef CONFIG_IRQ_WORK case IPI_IRQ_WORK: - irq_enter(); irq_work_run(); - irq_exit(); break; #endif case IPI_COMPLETION: - irq_enter(); ipi_complete(cpu); - irq_exit(); break; case IPI_CPU_BACKTRACE: printk_nmi_enter(); - irq_enter(); - nmi_cpu_backtrace(regs); - irq_exit(); + nmi_cpu_backtrace(get_irq_regs()); printk_nmi_exit(); break; @@ -695,9 +685,67 @@ void handle_IPI(int ipinr, struct pt_regs *regs) if ((unsigned)ipinr < NR_IPI) trace_ipi_exit_rcuidle(ipi_types[ipinr]); +} + +/* Legacy version, should go away once all irqchips have been converted */ +void handle_IPI(int ipinr, struct pt_regs *regs) +{ + struct pt_regs *old_regs = set_irq_regs(regs); + + irq_enter(); + do_handle_IPI(ipinr); + irq_exit(); + set_irq_regs(old_regs); } +static irqreturn_t ipi_handler(int irq, void *data) +{ + do_handle_IPI(irq - ipi_irq_base); + return IRQ_HANDLED; +} + +static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) +{ + trace_ipi_raise_rcuidle(target, ipi_types[ipinr]); + __ipi_send_mask(ipi_desc[ipinr], target); +} + +static void ipi_setup(int cpu) +{ + int i; + + if (WARN_ON_ONCE(!ipi_irq_base)) + return; + + for (i = 0; i < nr_ipi; i++) + enable_percpu_irq(ipi_irq_base + i, 0); +} + +void __init set_smp_ipi_range(int ipi_base, int n) +{ + int i; + + WARN_ON(n < MAX_IPI); + nr_ipi = min(n, MAX_IPI); + + for (i = 0; i < nr_ipi; i++) { + int err; + + err = request_percpu_irq(ipi_base + i, ipi_handler, + "IPI", &irq_stat); + WARN_ON(err); + + ipi_desc[i] = irq_to_desc(ipi_base + i); + irq_set_status_flags(ipi_base + i, IRQ_HIDDEN); + } + + ipi_irq_base = ipi_base; + + /* Setup the boot CPU immediately */ + ipi_setup(smp_processor_id()); +} + void smp_send_reschedule(int cpu) { smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); @@ -805,7 +853,7 @@ core_initcall(register_cpufreq_notifier); static void raise_nmi(cpumask_t *mask) { - __smp_cross_call(mask, IPI_CPU_BACKTRACE); + __ipi_send_mask(ipi_desc[IPI_CPU_BACKTRACE], mask); } void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index b5adaf744630..ef0058de432b 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@ -178,15 +178,6 @@ static inline void update_cpu_capacity(unsigned int cpuid) {} #endif /* - * The current assumption is that we can power gate each core independently. - * This will be superseded by DT binding once available. - */ -const struct cpumask *cpu_corepower_mask(int cpu) -{ - return &cpu_topology[cpu].thread_sibling; -} - -/* * store_cpu_topology is called at boot when only one cpu is running * and with the mutex cpu_hotplug.lock locked, when several cpus have booted, * which prevents simultaneous write access to cpu_topology array @@ -241,20 +232,6 @@ topology_populated: update_siblings_masks(cpuid); } -static inline int cpu_corepower_flags(void) -{ - return SD_SHARE_PKG_RESOURCES | SD_SHARE_POWERDOMAIN; -} - -static struct sched_domain_topology_level arm_topology[] = { -#ifdef CONFIG_SCHED_MC - { cpu_corepower_mask, cpu_corepower_flags, SD_INIT_NAME(GMC) }, - { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) }, -#endif - { cpu_cpu_mask, SD_INIT_NAME(DIE) }, - { NULL, }, -}; - /* * init_cpu_topology is called at boot when only one cpu is running * which prevent simultaneous write access to cpu_topology array @@ -265,7 +242,4 @@ void __init init_cpu_topology(void) smp_wmb(); parse_dt_topology(); - - /* Set scheduler topology descriptor */ - set_sched_topology(arm_topology); } diff --git a/arch/arm/kernel/vmlinux-xip.lds.S b/arch/arm/kernel/vmlinux-xip.lds.S index 6d2be994ae58..50136828f5b5 100644 --- a/arch/arm/kernel/vmlinux-xip.lds.S +++ b/arch/arm/kernel/vmlinux-xip.lds.S @@ -9,15 +9,13 @@ #include <linux/sizes.h> -#include <asm-generic/vmlinux.lds.h> +#include <asm/vmlinux.lds.h> #include <asm/cache.h> #include <asm/thread_info.h> #include <asm/memory.h> #include <asm/mpu.h> #include <asm/page.h> -#include "vmlinux.lds.h" - OUTPUT_ARCH(arm) ENTRY(stext) @@ -152,6 +150,10 @@ SECTIONS _end = .; STABS_DEBUG + DWARF_DEBUG + ARM_DETAILS + + ARM_ASSERTS } /* diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 7f24bc08403e..5f4922e858d0 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -9,15 +9,13 @@ #else #include <linux/pgtable.h> -#include <asm-generic/vmlinux.lds.h> +#include <asm/vmlinux.lds.h> #include <asm/cache.h> #include <asm/thread_info.h> #include <asm/memory.h> #include <asm/mpu.h> #include <asm/page.h> -#include "vmlinux.lds.h" - OUTPUT_ARCH(arm) ENTRY(stext) @@ -151,6 +149,10 @@ SECTIONS _end = .; STABS_DEBUG + DWARF_DEBUG + ARM_DETAILS + + ARM_ASSERTS } #ifdef CONFIG_STRICT_KERNEL_RWX diff --git a/arch/arm/lib/csumpartialcopy.S b/arch/arm/lib/csumpartialcopy.S index 184d97254a7a..1ca6aadd649c 100644 --- a/arch/arm/lib/csumpartialcopy.S +++ b/arch/arm/lib/csumpartialcopy.S @@ -9,8 +9,8 @@ .text -/* Function: __u32 csum_partial_copy_nocheck(const char *src, char *dst, int len, __u32 sum) - * Params : r0 = src, r1 = dst, r2 = len, r3 = checksum +/* Function: __u32 csum_partial_copy_nocheck(const char *src, char *dst, int len) + * Params : r0 = src, r1 = dst, r2 = len * Returns : r0 = new checksum */ diff --git a/arch/arm/lib/csumpartialcopygeneric.S b/arch/arm/lib/csumpartialcopygeneric.S index 0b706a39a677..0fd5c10e90a7 100644 --- a/arch/arm/lib/csumpartialcopygeneric.S +++ b/arch/arm/lib/csumpartialcopygeneric.S @@ -86,6 +86,7 @@ sum .req r3 FN_ENTRY save_regs + mov sum, #-1 cmp len, #8 @ Ensure that we have at least blo .Lless8 @ 8 bytes to copy. diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S index 6bd3a93eaa3c..6928781e6bee 100644 --- a/arch/arm/lib/csumpartialcopyuser.S +++ b/arch/arm/lib/csumpartialcopyuser.S @@ -62,9 +62,9 @@ /* * unsigned int - * csum_partial_copy_from_user(const char *src, char *dst, int len, int sum, int *err_ptr) - * r0 = src, r1 = dst, r2 = len, r3 = sum, [sp] = *err_ptr - * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT + * csum_partial_copy_from_user(const char *src, char *dst, int len) + * r0 = src, r1 = dst, r2 = len + * Returns : r0 = checksum or 0 */ #define FN_ENTRY ENTRY(csum_partial_copy_from_user) @@ -73,25 +73,11 @@ #include "csumpartialcopygeneric.S" /* - * FIXME: minor buglet here - * We don't return the checksum for the data present in the buffer. To do - * so properly, we would have to add in whatever registers were loaded before - * the fault, which, with the current asm above is not predictable. + * We report fault by returning 0 csum - impossible in normal case, since + * we start with 0xffffffff for initial sum. */ .pushsection .text.fixup,"ax" .align 4 -9001: mov r4, #-EFAULT -#ifdef CONFIG_CPU_SW_DOMAIN_PAN - ldr r5, [sp, #9*4] @ *err_ptr -#else - ldr r5, [sp, #8*4] @ *err_ptr -#endif - str r4, [r5] - ldmia sp, {r1, r2} @ retrieve dst, len - add r2, r2, r1 - mov r0, #0 @ zero the buffer -9002: teq r2, r1 - strbne r0, [r1], #1 - bne 9002b +9001: mov r0, #0 load_regs .popsection diff --git a/arch/arm/mach-davinci/board-da830-evm.c b/arch/arm/mach-davinci/board-da830-evm.c index 1076886938b6..a20ba12d876c 100644 --- a/arch/arm/mach-davinci/board-da830-evm.c +++ b/arch/arm/mach-davinci/board-da830-evm.c @@ -306,7 +306,7 @@ static struct davinci_nand_pdata da830_evm_nand_pdata = { .core_chipsel = 1, .parts = da830_evm_nand_partitions, .nr_parts = ARRAY_SIZE(da830_evm_nand_partitions), - .ecc_mode = NAND_ECC_HW, + .engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST, .ecc_bits = 4, .bbt_options = NAND_BBT_USE_FLASH, .bbt_td = &da830_evm_nand_bbt_main_descr, diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c index 6751292e5f8f..428012687a80 100644 --- a/arch/arm/mach-davinci/board-da850-evm.c +++ b/arch/arm/mach-davinci/board-da850-evm.c @@ -239,7 +239,7 @@ static struct davinci_nand_pdata da850_evm_nandflash_data = { .core_chipsel = 1, .parts = da850_evm_nandflash_partition, .nr_parts = ARRAY_SIZE(da850_evm_nandflash_partition), - .ecc_mode = NAND_ECC_HW, + .engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST, .ecc_bits = 4, .bbt_options = NAND_BBT_USE_FLASH, .timing = &da850_evm_nandflash_timing, diff --git a/arch/arm/mach-davinci/board-dm355-evm.c b/arch/arm/mach-davinci/board-dm355-evm.c index 5113273fda69..3c5a9e3c128a 100644 --- a/arch/arm/mach-davinci/board-dm355-evm.c +++ b/arch/arm/mach-davinci/board-dm355-evm.c @@ -82,7 +82,7 @@ static struct davinci_nand_pdata davinci_nand_data = { .mask_chipsel = BIT(14), .parts = davinci_nand_partitions, .nr_parts = ARRAY_SIZE(davinci_nand_partitions), - .ecc_mode = NAND_ECC_HW, + .engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST, .bbt_options = NAND_BBT_USE_FLASH, .ecc_bits = 4, }; diff --git a/arch/arm/mach-davinci/board-dm355-leopard.c b/arch/arm/mach-davinci/board-dm355-leopard.c index b9e9950dd300..e475b2113e70 100644 --- a/arch/arm/mach-davinci/board-dm355-leopard.c +++ b/arch/arm/mach-davinci/board-dm355-leopard.c @@ -76,7 +76,8 @@ static struct davinci_nand_pdata davinci_nand_data = { .mask_chipsel = BIT(14), .parts = davinci_nand_partitions, .nr_parts = ARRAY_SIZE(davinci_nand_partitions), - .ecc_mode = NAND_ECC_HW_SYNDROME, + .engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST, + .ecc_placement = NAND_ECC_PLACEMENT_INTERLEAVED, .ecc_bits = 4, .bbt_options = NAND_BBT_USE_FLASH, }; diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c index 2328b15ac067..bdf31eb77620 100644 --- a/arch/arm/mach-davinci/board-dm365-evm.c +++ b/arch/arm/mach-davinci/board-dm365-evm.c @@ -146,7 +146,7 @@ static struct davinci_nand_pdata davinci_nand_data = { .mask_chipsel = BIT(14), .parts = davinci_nand_partitions, .nr_parts = ARRAY_SIZE(davinci_nand_partitions), - .ecc_mode = NAND_ECC_HW, + .engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST, .bbt_options = NAND_BBT_USE_FLASH, .ecc_bits = 4, }; diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c index a5d3708fedf6..bcb3c4070945 100644 --- a/arch/arm/mach-davinci/board-dm644x-evm.c +++ b/arch/arm/mach-davinci/board-dm644x-evm.c @@ -162,7 +162,7 @@ static struct davinci_nand_pdata davinci_evm_nandflash_data = { .core_chipsel = 0, .parts = davinci_evm_nandflash_partition, .nr_parts = ARRAY_SIZE(davinci_evm_nandflash_partition), - .ecc_mode = NAND_ECC_HW, + .engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST, .ecc_bits = 1, .bbt_options = NAND_BBT_USE_FLASH, .timing = &davinci_evm_nandflash_timing, diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c index dd7d60f4139a..8319a6067a68 100644 --- a/arch/arm/mach-davinci/board-dm646x-evm.c +++ b/arch/arm/mach-davinci/board-dm646x-evm.c @@ -91,7 +91,7 @@ static struct davinci_nand_pdata davinci_nand_data = { .mask_ale = 0x40000, .parts = davinci_nand_partitions, .nr_parts = ARRAY_SIZE(davinci_nand_partitions), - .ecc_mode = NAND_ECC_HW, + .engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST, .ecc_bits = 1, .options = 0, }; diff --git a/arch/arm/mach-davinci/board-mityomapl138.c b/arch/arm/mach-davinci/board-mityomapl138.c index 3382b93d9a2a..5205008c8061 100644 --- a/arch/arm/mach-davinci/board-mityomapl138.c +++ b/arch/arm/mach-davinci/board-mityomapl138.c @@ -432,7 +432,7 @@ static struct davinci_nand_pdata mityomapl138_nandflash_data = { .core_chipsel = 1, .parts = mityomapl138_nandflash_partition, .nr_parts = ARRAY_SIZE(mityomapl138_nandflash_partition), - .ecc_mode = NAND_ECC_HW, + .engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST, .bbt_options = NAND_BBT_USE_FLASH, .options = NAND_BUSWIDTH_16, .ecc_bits = 1, /* 4 bit mode is not supported with 16 bit NAND */ diff --git a/arch/arm/mach-davinci/board-neuros-osd2.c b/arch/arm/mach-davinci/board-neuros-osd2.c index 6cf46bbc7e1d..b4843f68bb57 100644 --- a/arch/arm/mach-davinci/board-neuros-osd2.c +++ b/arch/arm/mach-davinci/board-neuros-osd2.c @@ -90,7 +90,7 @@ static struct davinci_nand_pdata davinci_ntosd2_nandflash_data = { .core_chipsel = 0, .parts = davinci_ntosd2_nandflash_partition, .nr_parts = ARRAY_SIZE(davinci_ntosd2_nandflash_partition), - .ecc_mode = NAND_ECC_HW, + .engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST, .ecc_bits = 1, .bbt_options = NAND_BBT_USE_FLASH, }; diff --git a/arch/arm/mach-davinci/board-omapl138-hawk.c b/arch/arm/mach-davinci/board-omapl138-hawk.c index 6c79039002c9..88df8011a4e6 100644 --- a/arch/arm/mach-davinci/board-omapl138-hawk.c +++ b/arch/arm/mach-davinci/board-omapl138-hawk.c @@ -206,7 +206,7 @@ static struct davinci_nand_pdata omapl138_hawk_nandflash_data = { .core_chipsel = 1, .parts = omapl138_hawk_nandflash_partition, .nr_parts = ARRAY_SIZE(omapl138_hawk_nandflash_partition), - .ecc_mode = NAND_ECC_HW, + .engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST, .ecc_bits = 4, .bbt_options = NAND_BBT_USE_FLASH, .options = NAND_BUSWIDTH_16, diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c index feb206bdf6e1..bb368938fc49 100644 --- a/arch/arm/mach-davinci/devices-da8xx.c +++ b/arch/arm/mach-davinci/devices-da8xx.c @@ -10,7 +10,7 @@ #include <linux/clk-provider.h> #include <linux/clk.h> #include <linux/clkdev.h> -#include <linux/dma-contiguous.h> +#include <linux/dma-map-ops.h> #include <linux/dmaengine.h> #include <linux/init.h> #include <linux/io.h> @@ -884,6 +884,7 @@ early_param("rproc_mem", early_rproc_mem); void __init da8xx_rproc_reserve_cma(void) { + struct cma *cma; int ret; if (!rproc_base || !rproc_size) { @@ -897,13 +898,16 @@ void __init da8xx_rproc_reserve_cma(void) pr_info("%s: reserving 0x%lx @ 0x%lx...\n", __func__, rproc_size, (unsigned long)rproc_base); - ret = dma_declare_contiguous(&da8xx_dsp.dev, rproc_size, rproc_base, 0); - if (ret) - pr_err("%s: dma_declare_contiguous failed %d\n", __func__, ret); - else - rproc_mem_inited = true; + ret = dma_contiguous_reserve_area(rproc_size, rproc_base, 0, &cma, + true); + if (ret) { + pr_err("%s: dma_contiguous_reserve_area failed %d\n", + __func__, ret); + return; + } + da8xx_dsp.dev.cma_area = cma; + rproc_mem_inited = true; } - #else void __init da8xx_rproc_reserve_cma(void) diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c index 56bf29523c65..db607955a7e4 100644 --- a/arch/arm/mach-highbank/highbank.c +++ b/arch/arm/mach-highbank/highbank.c @@ -5,7 +5,7 @@ #include <linux/clk.h> #include <linux/clkdev.h> #include <linux/clocksource.h> -#include <linux/dma-mapping.h> +#include <linux/dma-map-ops.h> #include <linux/input.h> #include <linux/io.h> #include <linux/irqchip.h> diff --git a/arch/arm/mach-imx/mach-imx27_visstrim_m10.c b/arch/arm/mach-imx/mach-imx27_visstrim_m10.c index 3da4c0920198..a329e50928b6 100644 --- a/arch/arm/mach-imx/mach-imx27_visstrim_m10.c +++ b/arch/arm/mach-imx/mach-imx27_visstrim_m10.c @@ -16,7 +16,7 @@ #include <linux/input.h> #include <linux/gpio.h> #include <linux/delay.h> -#include <linux/dma-mapping.h> +#include <linux/dma-map-ops.h> #include <linux/leds.h> #include <linux/platform_data/asoc-mx27vis.h> #include <sound/tlv320aic32x4.h> diff --git a/arch/arm/mach-imx/mach-mx31moboard.c b/arch/arm/mach-imx/mach-mx31moboard.c index 96845a4eaf57..7f780ad2d459 100644 --- a/arch/arm/mach-imx/mach-mx31moboard.c +++ b/arch/arm/mach-imx/mach-mx31moboard.c @@ -4,7 +4,7 @@ */ #include <linux/delay.h> -#include <linux/dma-mapping.h> +#include <linux/dma-map-ops.h> #include <linux/gfp.h> #include <linux/gpio.h> #include <linux/init.h> diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c index 184262d660ba..000f672a94c9 100644 --- a/arch/arm/mach-ixp4xx/common.c +++ b/arch/arm/mach-ixp4xx/common.c @@ -29,6 +29,7 @@ #include <linux/sched_clock.h> #include <linux/irqchip/irq-ixp4xx.h> #include <linux/platform_data/timer-ixp4xx.h> +#include <linux/dma-map-ops.h> #include <mach/udc.h> #include <mach/hardware.h> #include <mach/io.h> diff --git a/arch/arm/mach-keystone/keystone.c b/arch/arm/mach-keystone/keystone.c index 638808c4e122..09a65c2dfd73 100644 --- a/arch/arm/mach-keystone/keystone.c +++ b/arch/arm/mach-keystone/keystone.c @@ -8,6 +8,7 @@ */ #include <linux/io.h> #include <linux/of.h> +#include <linux/dma-mapping.h> #include <linux/init.h> #include <linux/of_platform.h> #include <linux/of_address.h> @@ -24,8 +25,7 @@ #include "keystone.h" -static unsigned long keystone_dma_pfn_offset __read_mostly; - +#ifdef CONFIG_ARM_LPAE static int keystone_platform_notifier(struct notifier_block *nb, unsigned long event, void *data) { @@ -38,9 +38,12 @@ static int keystone_platform_notifier(struct notifier_block *nb, return NOTIFY_BAD; if (!dev->of_node) { - dev->dma_pfn_offset = keystone_dma_pfn_offset; - dev_err(dev, "set dma_pfn_offset%08lx\n", - dev->dma_pfn_offset); + int ret = dma_direct_set_offset(dev, KEYSTONE_HIGH_PHYS_START, + KEYSTONE_LOW_PHYS_START, + KEYSTONE_HIGH_PHYS_SIZE); + dev_err(dev, "set dma_offset%08llx%s\n", + KEYSTONE_HIGH_PHYS_START - KEYSTONE_LOW_PHYS_START, + ret ? " failed" : ""); } return NOTIFY_OK; } @@ -48,14 +51,14 @@ static int keystone_platform_notifier(struct notifier_block *nb, static struct notifier_block platform_nb = { .notifier_call = keystone_platform_notifier, }; +#endif /* CONFIG_ARM_LPAE */ static void __init keystone_init(void) { - if (PHYS_OFFSET >= KEYSTONE_HIGH_PHYS_START) { - keystone_dma_pfn_offset = PFN_DOWN(KEYSTONE_HIGH_PHYS_START - - KEYSTONE_LOW_PHYS_START); +#ifdef CONFIG_ARM_LPAE + if (PHYS_OFFSET >= KEYSTONE_HIGH_PHYS_START) bus_register_notifier(&platform_bus_type, &platform_nb); - } +#endif keystone_pm_runtime_init(); } diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c index 8f8748a0c84f..49e3c8d20c2f 100644 --- a/arch/arm/mach-mvebu/coherency.c +++ b/arch/arm/mach-mvebu/coherency.c @@ -25,7 +25,7 @@ #include <linux/of_address.h> #include <linux/io.h> #include <linux/smp.h> -#include <linux/dma-mapping.h> +#include <linux/dma-map-ops.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/mbus.h> diff --git a/arch/arm/mach-omap1/include/mach/memory.h b/arch/arm/mach-omap1/include/mach/memory.h index 1142560e0078..36bc0000cb6a 100644 --- a/arch/arm/mach-omap1/include/mach/memory.h +++ b/arch/arm/mach-omap1/include/mach/memory.h @@ -14,42 +14,11 @@ * OMAP-1510 bus address is translated into a Local Bus address if the * OMAP bus type is lbus. We do the address translation based on the * device overriding the defaults used in the dma-mapping API. - * Note that the is_lbus_device() test is not very efficient on 1510 - * because of the strncmp(). */ -#if defined(CONFIG_ARCH_OMAP15XX) && !defined(__ASSEMBLER__) /* * OMAP-1510 Local Bus address offset */ #define OMAP1510_LB_OFFSET UL(0x30000000) -#define virt_to_lbus(x) ((x) - PAGE_OFFSET + OMAP1510_LB_OFFSET) -#define lbus_to_virt(x) ((x) - OMAP1510_LB_OFFSET + PAGE_OFFSET) -#define is_lbus_device(dev) (cpu_is_omap15xx() && dev && (strncmp(dev_name(dev), "ohci", 4) == 0)) - -#define __arch_pfn_to_dma(dev, pfn) \ - ({ dma_addr_t __dma = __pfn_to_phys(pfn); \ - if (is_lbus_device(dev)) \ - __dma = __dma - PHYS_OFFSET + OMAP1510_LB_OFFSET; \ - __dma; }) - -#define __arch_dma_to_pfn(dev, addr) \ - ({ dma_addr_t __dma = addr; \ - if (is_lbus_device(dev)) \ - __dma += PHYS_OFFSET - OMAP1510_LB_OFFSET; \ - __phys_to_pfn(__dma); \ - }) - -#define __arch_dma_to_virt(dev, addr) ({ (void *) (is_lbus_device(dev) ? \ - lbus_to_virt(addr) : \ - __phys_to_virt(addr)); }) - -#define __arch_virt_to_dma(dev, addr) ({ unsigned long __addr = (unsigned long)(addr); \ - (dma_addr_t) (is_lbus_device(dev) ? \ - virt_to_lbus(__addr) : \ - __virt_to_phys(__addr)); }) - -#endif /* CONFIG_ARCH_OMAP15XX */ - #endif diff --git a/arch/arm/mach-omap1/usb.c b/arch/arm/mach-omap1/usb.c index d8e9bbda8f7b..ba8566204ea9 100644 --- a/arch/arm/mach-omap1/usb.c +++ b/arch/arm/mach-omap1/usb.c @@ -9,6 +9,7 @@ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> +#include <linux/dma-mapping.h> #include <linux/io.h> #include <asm/irq.h> @@ -542,6 +543,25 @@ bad: /* ULPD_APLL_CTRL */ #define APLL_NDPLL_SWITCH (1 << 0) +static int omap_1510_usb_ohci_notifier(struct notifier_block *nb, + unsigned long event, void *data) +{ + struct device *dev = data; + + if (event != BUS_NOTIFY_ADD_DEVICE) + return NOTIFY_DONE; + + if (strncmp(dev_name(dev), "ohci", 4) == 0 && + dma_direct_set_offset(dev, PHYS_OFFSET, OMAP1510_LB_OFFSET, + (u64)-1)) + WARN_ONCE(1, "failed to set DMA offset\n"); + return NOTIFY_OK; +} + +static struct notifier_block omap_1510_usb_ohci_nb = { + .notifier_call = omap_1510_usb_ohci_notifier, +}; + static void __init omap_1510_usb_init(struct omap_usb_config *config) { unsigned int val; @@ -600,6 +620,8 @@ static void __init omap_1510_usb_init(struct omap_usb_config *config) if (config->register_host) { int status; + bus_register_notifier(&platform_bus_type, + &omap_1510_usb_ohci_nb); ohci_device.dev.platform_data = config; status = platform_device_register(&ohci_device); if (status) diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c index 3d2c108e911e..431709725d02 100644 --- a/arch/arm/mach-pxa/tosa.c +++ b/arch/arm/mach-pxa/tosa.c @@ -369,6 +369,15 @@ static struct pxaficp_platform_data tosa_ficp_platform_data = { /* * Tosa AC IN */ +static struct gpiod_lookup_table tosa_power_gpiod_table = { + .dev_id = "gpio-charger", + .table = { + GPIO_LOOKUP("gpio-pxa", TOSA_GPIO_AC_IN, + NULL, GPIO_ACTIVE_LOW), + { }, + }, +}; + static char *tosa_ac_supplied_to[] = { "main-battery", "backup-battery", @@ -378,8 +387,6 @@ static char *tosa_ac_supplied_to[] = { static struct gpio_charger_platform_data tosa_power_data = { .name = "charger", .type = POWER_SUPPLY_TYPE_MAINS, - .gpio = TOSA_GPIO_AC_IN, - .gpio_active_low = 1, .supplied_to = tosa_ac_supplied_to, .num_supplicants = ARRAY_SIZE(tosa_ac_supplied_to), }; @@ -951,6 +958,7 @@ static void __init tosa_init(void) clk_add_alias("CLK_CK3P6MI", tc6393xb_device.name, "GPIO11_CLK", NULL); gpiod_add_lookup_table(&tosa_udc_gpiod_table); + gpiod_add_lookup_table(&tosa_power_gpiod_table); platform_add_devices(devices, ARRAY_SIZE(devices)); } diff --git a/arch/arm/mach-s3c24xx/common-smdk.c b/arch/arm/mach-s3c24xx/common-smdk.c index 75064dfaceb1..121646ad1bb1 100644 --- a/arch/arm/mach-s3c24xx/common-smdk.c +++ b/arch/arm/mach-s3c24xx/common-smdk.c @@ -191,7 +191,7 @@ static struct s3c2410_platform_nand smdk_nand_info = { .twrph1 = 20, .nr_sets = ARRAY_SIZE(smdk_nand_sets), .sets = smdk_nand_sets, - .ecc_mode = NAND_ECC_SOFT, + .engine_type = NAND_ECC_ENGINE_TYPE_SOFT, }; /* devices we initialise */ diff --git a/arch/arm/mach-s3c24xx/mach-anubis.c b/arch/arm/mach-s3c24xx/mach-anubis.c index 072966dcad78..28326241e360 100644 --- a/arch/arm/mach-s3c24xx/mach-anubis.c +++ b/arch/arm/mach-s3c24xx/mach-anubis.c @@ -218,7 +218,7 @@ static struct s3c2410_platform_nand __initdata anubis_nand_info = { .nr_sets = ARRAY_SIZE(anubis_nand_sets), .sets = anubis_nand_sets, .select_chip = anubis_nand_select, - .ecc_mode = NAND_ECC_SOFT, + .engine_type = NAND_ECC_ENGINE_TYPE_SOFT, }; /* IDE channels */ diff --git a/arch/arm/mach-s3c24xx/mach-at2440evb.c b/arch/arm/mach-s3c24xx/mach-at2440evb.c index 58c5ef3cf1d7..04dedebdb57c 100644 --- a/arch/arm/mach-s3c24xx/mach-at2440evb.c +++ b/arch/arm/mach-s3c24xx/mach-at2440evb.c @@ -109,7 +109,7 @@ static struct s3c2410_platform_nand __initdata at2440evb_nand_info = { .twrph1 = 40, .nr_sets = ARRAY_SIZE(at2440evb_nand_sets), .sets = at2440evb_nand_sets, - .ecc_mode = NAND_ECC_SOFT, + .engine_type = NAND_ECC_ENGINE_TYPE_SOFT, }; /* DM9000AEP 10/100 ethernet controller */ diff --git a/arch/arm/mach-s3c24xx/mach-bast.c b/arch/arm/mach-s3c24xx/mach-bast.c index a7c3955ae8f6..6465eab0ab3a 100644 --- a/arch/arm/mach-s3c24xx/mach-bast.c +++ b/arch/arm/mach-s3c24xx/mach-bast.c @@ -294,7 +294,7 @@ static struct s3c2410_platform_nand __initdata bast_nand_info = { .nr_sets = ARRAY_SIZE(bast_nand_sets), .sets = bast_nand_sets, .select_chip = bast_nand_select, - .ecc_mode = NAND_ECC_SOFT, + .engine_type = NAND_ECC_ENGINE_TYPE_SOFT, }; /* DM9000 */ diff --git a/arch/arm/mach-s3c24xx/mach-gta02.c b/arch/arm/mach-s3c24xx/mach-gta02.c index 594901f3b8e5..732748170751 100644 --- a/arch/arm/mach-s3c24xx/mach-gta02.c +++ b/arch/arm/mach-s3c24xx/mach-gta02.c @@ -15,6 +15,7 @@ #include <linux/delay.h> #include <linux/timer.h> #include <linux/init.h> +#include <linux/gpio/machine.h> #include <linux/gpio.h> #include <linux/gpio_keys.h> #include <linux/workqueue.h> @@ -416,7 +417,7 @@ static struct s3c2410_platform_nand __initdata gta02_nand_info = { .twrph1 = 15, .nr_sets = ARRAY_SIZE(gta02_nand_sets), .sets = gta02_nand_sets, - .ecc_mode = NAND_ECC_SOFT, + .engine_type = NAND_ECC_ENGINE_TYPE_SOFT, }; @@ -474,6 +475,20 @@ static struct platform_device gta02_buttons_device = { }, }; +static struct gpiod_lookup_table gta02_audio_gpio_table = { + .dev_id = "neo1973-audio", + .table = { + GPIO_LOOKUP("GPIOJ", 2, "amp-shut", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("GPIOJ", 1, "hp", GPIO_ACTIVE_HIGH), + { }, + }, +}; + +static struct platform_device gta02_audio = { + .name = "neo1973-audio", + .id = -1, +}; + static void __init gta02_map_io(void) { s3c24xx_init_io(gta02_iodesc, ARRAY_SIZE(gta02_iodesc)); @@ -498,6 +513,7 @@ static struct platform_device *gta02_devices[] __initdata = { >a02_buttons_device, &s3c_device_adc, &s3c_device_ts, + >a02_audio, }; static void gta02_poweroff(void) @@ -524,6 +540,7 @@ static void __init gta02_machine_init(void) i2c_register_board_info(0, gta02_i2c_devs, ARRAY_SIZE(gta02_i2c_devs)); + gpiod_add_lookup_table(>a02_audio_gpio_table); platform_add_devices(gta02_devices, ARRAY_SIZE(gta02_devices)); pm_power_off = gta02_poweroff; diff --git a/arch/arm/mach-s3c24xx/mach-h1940.c b/arch/arm/mach-s3c24xx/mach-h1940.c index f4710052843a..ecb84029e15c 100644 --- a/arch/arm/mach-s3c24xx/mach-h1940.c +++ b/arch/arm/mach-s3c24xx/mach-h1940.c @@ -475,6 +475,22 @@ static struct gpiod_lookup_table h1940_mmc_gpio_table = { }, }; +static struct gpiod_lookup_table h1940_audio_gpio_table = { + .dev_id = "h1940-audio", + .table = { + GPIO_LOOKUP("H1940_LATCH", + H1940_LATCH_AUDIO_POWER - H1940_LATCH_GPIO(0), + "speaker-power", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("GPIOG", 4, "hp", GPIO_ACTIVE_HIGH), + { }, + }, +}; + +static struct platform_device h1940_audio = { + .name = "h1940-audio", + .id = -1, +}; + static struct pwm_lookup h1940_pwm_lookup[] = { PWM_LOOKUP("samsung-pwm", 0, "pwm-backlight", NULL, 36296, PWM_POLARITY_NORMAL), @@ -651,6 +667,7 @@ static struct platform_device *h1940_devices[] __initdata = { &s3c_device_ts, &power_supply, &h1940_battery, + &h1940_audio, }; static void __init h1940_map_io(void) @@ -690,6 +707,7 @@ static void __init h1940_init(void) s3c24xx_fb_set_platdata(&h1940_fb_info); gpiod_add_lookup_table(&h1940_mmc_gpio_table); + gpiod_add_lookup_table(&h1940_audio_gpio_table); s3c24xx_mci_set_platdata(&h1940_mmc_cfg); s3c24xx_udc_set_platdata(&h1940_udc_cfg); s3c24xx_ts_set_platdata(&h1940_ts_cfg); diff --git a/arch/arm/mach-s3c24xx/mach-jive.c b/arch/arm/mach-s3c24xx/mach-jive.c index 885e8f12e4b9..8233dcff19e7 100644 --- a/arch/arm/mach-s3c24xx/mach-jive.c +++ b/arch/arm/mach-s3c24xx/mach-jive.c @@ -228,7 +228,7 @@ static struct s3c2410_platform_nand __initdata jive_nand_info = { .twrph1 = 40, .sets = jive_nand_sets, .nr_sets = ARRAY_SIZE(jive_nand_sets), - .ecc_mode = NAND_ECC_SOFT, + .engine_type = NAND_ECC_ENGINE_TYPE_SOFT, }; static int __init jive_mtdset(char *options) diff --git a/arch/arm/mach-s3c24xx/mach-mini2440.c b/arch/arm/mach-s3c24xx/mach-mini2440.c index 235749448311..057dcbaf1b22 100644 --- a/arch/arm/mach-s3c24xx/mach-mini2440.c +++ b/arch/arm/mach-s3c24xx/mach-mini2440.c @@ -296,7 +296,7 @@ static struct s3c2410_platform_nand mini2440_nand_info __initdata = { .nr_sets = ARRAY_SIZE(mini2440_nand_sets), .sets = mini2440_nand_sets, .ignore_unset_ecc = 1, - .ecc_mode = NAND_ECC_HW, + .engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST, }; /* DM9000AEP 10/100 ethernet controller */ diff --git a/arch/arm/mach-s3c24xx/mach-osiris.c b/arch/arm/mach-s3c24xx/mach-osiris.c index ee3630cb236a..157448827f61 100644 --- a/arch/arm/mach-s3c24xx/mach-osiris.c +++ b/arch/arm/mach-s3c24xx/mach-osiris.c @@ -234,7 +234,7 @@ static struct s3c2410_platform_nand __initdata osiris_nand_info = { .nr_sets = ARRAY_SIZE(osiris_nand_sets), .sets = osiris_nand_sets, .select_chip = osiris_nand_select, - .ecc_mode = NAND_ECC_SOFT, + .engine_type = NAND_ECC_ENGINE_TYPE_SOFT, }; /* PCMCIA control and configuration */ diff --git a/arch/arm/mach-s3c24xx/mach-qt2410.c b/arch/arm/mach-s3c24xx/mach-qt2410.c index ff9e3197309b..f3131d94e90b 100644 --- a/arch/arm/mach-s3c24xx/mach-qt2410.c +++ b/arch/arm/mach-s3c24xx/mach-qt2410.c @@ -287,7 +287,7 @@ static struct s3c2410_platform_nand __initdata qt2410_nand_info = { .twrph1 = 20, .nr_sets = ARRAY_SIZE(qt2410_nand_sets), .sets = qt2410_nand_sets, - .ecc_mode = NAND_ECC_SOFT, + .engine_type = NAND_ECC_ENGINE_TYPE_SOFT, }; /* UDC */ diff --git a/arch/arm/mach-s3c24xx/mach-rx1950.c b/arch/arm/mach-s3c24xx/mach-rx1950.c index fde98b175c75..3645b9c838d9 100644 --- a/arch/arm/mach-s3c24xx/mach-rx1950.c +++ b/arch/arm/mach-s3c24xx/mach-rx1950.c @@ -620,7 +620,7 @@ static struct s3c2410_platform_nand rx1950_nand_info = { .twrph1 = 15, .nr_sets = ARRAY_SIZE(rx1950_nand_sets), .sets = rx1950_nand_sets, - .ecc_mode = NAND_ECC_SOFT, + .engine_type = NAND_ECC_ENGINE_TYPE_SOFT, }; static struct s3c2410_udc_mach_info rx1950_udc_cfg __initdata = { @@ -728,6 +728,20 @@ static struct i2c_board_info rx1950_i2c_devices[] = { }, }; +static struct gpiod_lookup_table rx1950_audio_gpio_table = { + .dev_id = "rx1950-audio", + .table = { + GPIO_LOOKUP("GPIOG", 12, "hp-gpio", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("GPIOA", 1, "speaker-power", GPIO_ACTIVE_HIGH), + { }, + }, +}; + +static struct platform_device rx1950_audio = { + .name = "rx1950-audio", + .id = -1, +}; + static struct platform_device *rx1950_devices[] __initdata = { &s3c2410_device_dclk, &s3c_device_lcd, @@ -746,6 +760,7 @@ static struct platform_device *rx1950_devices[] __initdata = { &power_supply, &rx1950_battery, &rx1950_leds, + &rx1950_audio, }; static void __init rx1950_map_io(void) @@ -813,6 +828,7 @@ static void __init rx1950_init_machine(void) gpio_direction_output(S3C2410_GPJ(6), 0); pwm_add_table(rx1950_pwm_lookup, ARRAY_SIZE(rx1950_pwm_lookup)); + gpiod_add_lookup_table(&rx1950_audio_gpio_table); platform_add_devices(rx1950_devices, ARRAY_SIZE(rx1950_devices)); i2c_register_board_info(0, rx1950_i2c_devices, diff --git a/arch/arm/mach-s3c24xx/mach-rx3715.c b/arch/arm/mach-s3c24xx/mach-rx3715.c index 995f1ff34a1b..017010d67e01 100644 --- a/arch/arm/mach-s3c24xx/mach-rx3715.c +++ b/arch/arm/mach-s3c24xx/mach-rx3715.c @@ -158,7 +158,7 @@ static struct s3c2410_platform_nand __initdata rx3715_nand_info = { .twrph1 = 15, .nr_sets = ARRAY_SIZE(rx3715_nand_sets), .sets = rx3715_nand_sets, - .ecc_mode = NAND_ECC_SOFT, + .engine_type = NAND_ECC_ENGINE_TYPE_SOFT, }; static struct platform_device *rx3715_devices[] __initdata = { diff --git a/arch/arm/mach-s3c24xx/mach-vstms.c b/arch/arm/mach-s3c24xx/mach-vstms.c index d76b28b65e65..c5fa215a527e 100644 --- a/arch/arm/mach-s3c24xx/mach-vstms.c +++ b/arch/arm/mach-s3c24xx/mach-vstms.c @@ -112,7 +112,7 @@ static struct s3c2410_platform_nand __initdata vstms_nand_info = { .twrph1 = 20, .nr_sets = ARRAY_SIZE(vstms_nand_sets), .sets = vstms_nand_sets, - .ecc_mode = NAND_ECC_SOFT, + .engine_type = NAND_ECC_ENGINE_TYPE_SOFT, }; static struct platform_device *vstms_devices[] __initdata = { diff --git a/arch/arm/mach-s3c64xx/mach-hmt.c b/arch/arm/mach-s3c64xx/mach-hmt.c index e7080215c624..0d9acaf91701 100644 --- a/arch/arm/mach-s3c64xx/mach-hmt.c +++ b/arch/arm/mach-s3c64xx/mach-hmt.c @@ -199,7 +199,7 @@ static struct s3c2410_platform_nand hmt_nand_info = { .twrph1 = 40, .nr_sets = ARRAY_SIZE(hmt_nand_sets), .sets = hmt_nand_sets, - .ecc_mode = NAND_ECC_SOFT, + .engine_type = NAND_ECC_ENGINE_TYPE_SOFT, }; static struct gpio_led hmt_leds[] = { diff --git a/arch/arm/mach-s3c64xx/mach-mini6410.c b/arch/arm/mach-s3c64xx/mach-mini6410.c index 0dd36ae49e6a..6fbb57878746 100644 --- a/arch/arm/mach-s3c64xx/mach-mini6410.c +++ b/arch/arm/mach-s3c64xx/mach-mini6410.c @@ -136,7 +136,7 @@ static struct s3c2410_platform_nand mini6410_nand_info = { .twrph1 = 40, .nr_sets = ARRAY_SIZE(mini6410_nand_sets), .sets = mini6410_nand_sets, - .ecc_mode = NAND_ECC_SOFT, + .engine_type = NAND_ECC_ENGINE_TYPE_SOFT, }; static struct s3c_fb_pd_win mini6410_lcd_type0_fb_win = { diff --git a/arch/arm/mach-s3c64xx/mach-real6410.c b/arch/arm/mach-s3c64xx/mach-real6410.c index 0ff88b6859c4..1e98e530a6aa 100644 --- a/arch/arm/mach-s3c64xx/mach-real6410.c +++ b/arch/arm/mach-s3c64xx/mach-real6410.c @@ -188,7 +188,7 @@ static struct s3c2410_platform_nand real6410_nand_info = { .twrph1 = 40, .nr_sets = ARRAY_SIZE(real6410_nand_sets), .sets = real6410_nand_sets, - .ecc_mode = NAND_ECC_SOFT, + .engine_type = NAND_ECC_ENGINE_TYPE_SOFT, }; static struct platform_device *real6410_devices[] __initdata = { diff --git a/arch/arm/mach-sa1100/collie.c b/arch/arm/mach-sa1100/collie.c index 3cc2b71e16f0..bd3a52fd09ce 100644 --- a/arch/arm/mach-sa1100/collie.c +++ b/arch/arm/mach-sa1100/collie.c @@ -30,6 +30,7 @@ #include <linux/gpio_keys.h> #include <linux/input.h> #include <linux/gpio.h> +#include <linux/gpio/machine.h> #include <linux/power/gpio-charger.h> #include <video/sa1100fb.h> @@ -131,16 +132,23 @@ static struct irda_platform_data collie_ir_data = { /* * Collie AC IN */ +static struct gpiod_lookup_table collie_power_gpiod_table = { + .dev_id = "gpio-charger", + .table = { + GPIO_LOOKUP("gpio", COLLIE_GPIO_AC_IN, + NULL, GPIO_ACTIVE_HIGH), + { }, + }, +}; + static char *collie_ac_supplied_to[] = { "main-battery", "backup-battery", }; - static struct gpio_charger_platform_data collie_power_data = { .name = "charger", .type = POWER_SUPPLY_TYPE_MAINS, - .gpio = COLLIE_GPIO_AC_IN, .supplied_to = collie_ac_supplied_to, .num_supplicants = ARRAY_SIZE(collie_ac_supplied_to), }; @@ -386,6 +394,8 @@ static void __init collie_init(void) platform_scoop_config = &collie_pcmcia_config; + gpiod_add_lookup_table(&collie_power_gpiod_table); + ret = platform_add_devices(devices, ARRAY_SIZE(devices)); if (ret) { printk(KERN_WARNING "collie: Unable to register LoCoMo device\n"); diff --git a/arch/arm/mach-shmobile/setup-rcar-gen2.c b/arch/arm/mach-shmobile/setup-rcar-gen2.c index c42ff8c314c8..e00f5b3b9293 100644 --- a/arch/arm/mach-shmobile/setup-rcar-gen2.c +++ b/arch/arm/mach-shmobile/setup-rcar-gen2.c @@ -9,7 +9,7 @@ #include <linux/clocksource.h> #include <linux/device.h> -#include <linux/dma-contiguous.h> +#include <linux/dma-map-ops.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/memblock.h> diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index 12c26eb88afb..43d91bfd2360 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -1249,20 +1249,28 @@ static void __init l2c310_of_parse(const struct device_node *np, ret = of_property_read_u32(np, "prefetch-data", &val); if (ret == 0) { - if (val) + if (val) { prefetch |= L310_PREFETCH_CTRL_DATA_PREFETCH; - else + *aux_val |= L310_PREFETCH_CTRL_DATA_PREFETCH; + } else { prefetch &= ~L310_PREFETCH_CTRL_DATA_PREFETCH; + *aux_val &= ~L310_PREFETCH_CTRL_DATA_PREFETCH; + } + *aux_mask &= ~L310_PREFETCH_CTRL_DATA_PREFETCH; } else if (ret != -EINVAL) { pr_err("L2C-310 OF prefetch-data property value is missing\n"); } ret = of_property_read_u32(np, "prefetch-instr", &val); if (ret == 0) { - if (val) + if (val) { prefetch |= L310_PREFETCH_CTRL_INSTR_PREFETCH; - else + *aux_val |= L310_PREFETCH_CTRL_INSTR_PREFETCH; + } else { prefetch &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH; + *aux_val &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH; + } + *aux_mask &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH; } else if (ret != -EINVAL) { pr_err("L2C-310 OF prefetch-instr property value is missing\n"); } diff --git a/arch/arm/mm/dma-mapping-nommu.c b/arch/arm/mm/dma-mapping-nommu.c index 287ef898a55e..6bfd2b884505 100644 --- a/arch/arm/mm/dma-mapping-nommu.c +++ b/arch/arm/mm/dma-mapping-nommu.c @@ -8,6 +8,7 @@ #include <linux/export.h> #include <linux/mm.h> #include <linux/dma-direct.h> +#include <linux/dma-map-ops.h> #include <linux/scatterlist.h> #include <asm/cachetype.h> @@ -176,6 +177,8 @@ static void arm_nommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist const struct dma_map_ops arm_nommu_dma_ops = { .alloc = arm_nommu_dma_alloc, .free = arm_nommu_dma_free, + .alloc_pages = dma_direct_alloc_pages, + .free_pages = dma_direct_free_pages, .mmap = arm_nommu_dma_mmap, .map_page = arm_nommu_dma_map_page, .unmap_page = arm_nommu_dma_unmap_page, diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 8a8949174b1c..c4b8df2ad328 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -15,9 +15,7 @@ #include <linux/init.h> #include <linux/device.h> #include <linux/dma-direct.h> -#include <linux/dma-mapping.h> -#include <linux/dma-noncoherent.h> -#include <linux/dma-contiguous.h> +#include <linux/dma-map-ops.h> #include <linux/highmem.h> #include <linux/memblock.h> #include <linux/slab.h> @@ -35,7 +33,6 @@ #include <asm/dma-iommu.h> #include <asm/mach/map.h> #include <asm/system_info.h> -#include <asm/dma-contiguous.h> #include <xen/swiotlb-xen.h> #include "dma.h" @@ -199,6 +196,8 @@ static int arm_dma_supported(struct device *dev, u64 mask) const struct dma_map_ops arm_dma_ops = { .alloc = arm_dma_alloc, .free = arm_dma_free, + .alloc_pages = dma_direct_alloc_pages, + .free_pages = dma_direct_free_pages, .mmap = arm_dma_mmap, .get_sgtable = arm_dma_get_sgtable, .map_page = arm_dma_map_page, @@ -226,6 +225,8 @@ static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma, const struct dma_map_ops arm_coherent_dma_ops = { .alloc = arm_coherent_dma_alloc, .free = arm_coherent_dma_free, + .alloc_pages = dma_direct_alloc_pages, + .free_pages = dma_direct_free_pages, .mmap = arm_coherent_dma_mmap, .get_sgtable = arm_dma_get_sgtable, .map_page = arm_coherent_dma_map_page, diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 000c1b48e973..d57112a276f5 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -18,7 +18,7 @@ #include <linux/highmem.h> #include <linux/gfp.h> #include <linux/memblock.h> -#include <linux/dma-contiguous.h> +#include <linux/dma-map-ops.h> #include <linux/sizes.h> #include <linux/stop_machine.h> #include <linux/swiotlb.h> @@ -299,16 +299,14 @@ free_memmap(unsigned long start_pfn, unsigned long end_pfn) */ static void __init free_unused_memmap(void) { - unsigned long start, prev_end = 0; - struct memblock_region *reg; + unsigned long start, end, prev_end = 0; + int i; /* * This relies on each bank being in address order. * The banks are sorted previously in bootmem_init(). */ - for_each_memblock(memory, reg) { - start = memblock_region_memory_base_pfn(reg); - + for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) { #ifdef CONFIG_SPARSEMEM /* * Take care not to free memmap entries that don't exist @@ -336,8 +334,7 @@ static void __init free_unused_memmap(void) * memmap entries are valid from the bank end aligned to * MAX_ORDER_NR_PAGES. */ - prev_end = ALIGN(memblock_region_memory_end_pfn(reg), - MAX_ORDER_NR_PAGES); + prev_end = ALIGN(end, MAX_ORDER_NR_PAGES); } #ifdef CONFIG_SPARSEMEM @@ -347,61 +344,29 @@ static void __init free_unused_memmap(void) #endif } -#ifdef CONFIG_HIGHMEM -static inline void free_area_high(unsigned long pfn, unsigned long end) -{ - for (; pfn < end; pfn++) - free_highmem_page(pfn_to_page(pfn)); -} -#endif - static void __init free_highpages(void) { #ifdef CONFIG_HIGHMEM unsigned long max_low = max_low_pfn; - struct memblock_region *mem, *res; + phys_addr_t range_start, range_end; + u64 i; /* set highmem page free */ - for_each_memblock(memory, mem) { - unsigned long start = memblock_region_memory_base_pfn(mem); - unsigned long end = memblock_region_memory_end_pfn(mem); + for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, + &range_start, &range_end, NULL) { + unsigned long start = PHYS_PFN(range_start); + unsigned long end = PHYS_PFN(range_end); /* Ignore complete lowmem entries */ if (end <= max_low) continue; - if (memblock_is_nomap(mem)) - continue; - /* Truncate partial highmem entries */ if (start < max_low) start = max_low; - /* Find and exclude any reserved regions */ - for_each_memblock(reserved, res) { - unsigned long res_start, res_end; - - res_start = memblock_region_reserved_base_pfn(res); - res_end = memblock_region_reserved_end_pfn(res); - - if (res_end < start) - continue; - if (res_start < start) - res_start = start; - if (res_start > end) - res_start = end; - if (res_end > end) - res_end = end; - if (res_start != start) - free_area_high(start, res_start); - start = res_end; - if (start == end) - break; - } - - /* And now free anything which remains */ - if (start < end) - free_area_high(start, end); + for (; start < end; start++) + free_highmem_page(pfn_to_page(start)); } #endif } diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index c36f977b2ccb..ab69250a86bc 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -17,7 +17,6 @@ #include <asm/cp15.h> #include <asm/cputype.h> -#include <asm/sections.h> #include <asm/cachetype.h> #include <asm/fixmap.h> #include <asm/sections.h> @@ -1154,9 +1153,8 @@ phys_addr_t arm_lowmem_limit __initdata = 0; void __init adjust_lowmem_bounds(void) { - phys_addr_t memblock_limit = 0; - u64 vmalloc_limit; - struct memblock_region *reg; + phys_addr_t block_start, block_end, memblock_limit = 0; + u64 vmalloc_limit, i; phys_addr_t lowmem_limit = 0; /* @@ -1172,26 +1170,18 @@ void __init adjust_lowmem_bounds(void) * The first usable region must be PMD aligned. Mark its start * as MEMBLOCK_NOMAP if it isn't */ - for_each_memblock(memory, reg) { - if (!memblock_is_nomap(reg)) { - if (!IS_ALIGNED(reg->base, PMD_SIZE)) { - phys_addr_t len; + for_each_mem_range(i, &block_start, &block_end) { + if (!IS_ALIGNED(block_start, PMD_SIZE)) { + phys_addr_t len; - len = round_up(reg->base, PMD_SIZE) - reg->base; - memblock_mark_nomap(reg->base, len); - } - break; + len = round_up(block_start, PMD_SIZE) - block_start; + memblock_mark_nomap(block_start, len); } + break; } - for_each_memblock(memory, reg) { - phys_addr_t block_start = reg->base; - phys_addr_t block_end = reg->base + reg->size; - - if (memblock_is_nomap(reg)) - continue; - - if (reg->base < vmalloc_limit) { + for_each_mem_range(i, &block_start, &block_end) { + if (block_start < vmalloc_limit) { if (block_end > lowmem_limit) /* * Compare as u64 to ensure vmalloc_limit does @@ -1440,19 +1430,15 @@ static void __init kmap_init(void) static void __init map_lowmem(void) { - struct memblock_region *reg; phys_addr_t kernel_x_start = round_down(__pa(KERNEL_START), SECTION_SIZE); phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE); + phys_addr_t start, end; + u64 i; /* Map all the lowmem memory banks. */ - for_each_memblock(memory, reg) { - phys_addr_t start = reg->base; - phys_addr_t end = start + reg->size; + for_each_mem_range(i, &start, &end) { struct map_desc map; - if (memblock_is_nomap(reg)) - continue; - if (end > arm_lowmem_limit) end = arm_lowmem_limit; if (start >= end) diff --git a/arch/arm/mm/pmsa-v7.c b/arch/arm/mm/pmsa-v7.c index 699fa2e88725..88950e41a3a9 100644 --- a/arch/arm/mm/pmsa-v7.c +++ b/arch/arm/mm/pmsa-v7.c @@ -231,12 +231,12 @@ static int __init allocate_region(phys_addr_t base, phys_addr_t size, void __init pmsav7_adjust_lowmem_bounds(void) { phys_addr_t specified_mem_size = 0, total_mem_size = 0; - struct memblock_region *reg; - bool first = true; phys_addr_t mem_start; phys_addr_t mem_end; + phys_addr_t reg_start, reg_end; unsigned int mem_max_regions; - int num, i; + int num; + u64 i; /* Free-up PMSAv7_PROBE_REGION */ mpu_min_region_order = __mpu_min_region_order(); @@ -262,20 +262,19 @@ void __init pmsav7_adjust_lowmem_bounds(void) mem_max_regions -= num; #endif - for_each_memblock(memory, reg) { - if (first) { + for_each_mem_range(i, ®_start, ®_end) { + if (i == 0) { phys_addr_t phys_offset = PHYS_OFFSET; /* * Initially only use memory continuous from * PHYS_OFFSET */ - if (reg->base != phys_offset) + if (reg_start != phys_offset) panic("First memory bank must be contiguous from PHYS_OFFSET"); - mem_start = reg->base; - mem_end = reg->base + reg->size; - specified_mem_size = reg->size; - first = false; + mem_start = reg_start; + mem_end = reg_end; + specified_mem_size = mem_end - mem_start; } else { /* * memblock auto merges contiguous blocks, remove @@ -283,8 +282,8 @@ void __init pmsav7_adjust_lowmem_bounds(void) * blocks separately while iterating) */ pr_notice("Ignoring RAM after %pa, memory at %pa ignored\n", - &mem_end, ®->base); - memblock_remove(reg->base, 0 - reg->base); + &mem_end, ®_start); + memblock_remove(reg_start, 0 - reg_start); break; } } diff --git a/arch/arm/mm/pmsa-v8.c b/arch/arm/mm/pmsa-v8.c index 0d7d5fb59247..2de019f7503e 100644 --- a/arch/arm/mm/pmsa-v8.c +++ b/arch/arm/mm/pmsa-v8.c @@ -94,20 +94,19 @@ static __init bool is_region_fixed(int number) void __init pmsav8_adjust_lowmem_bounds(void) { phys_addr_t mem_end; - struct memblock_region *reg; - bool first = true; + phys_addr_t reg_start, reg_end; + u64 i; - for_each_memblock(memory, reg) { - if (first) { + for_each_mem_range(i, ®_start, ®_end) { + if (i == 0) { phys_addr_t phys_offset = PHYS_OFFSET; /* * Initially only use memory continuous from * PHYS_OFFSET */ - if (reg->base != phys_offset) + if (reg_start != phys_offset) panic("First memory bank must be contiguous from PHYS_OFFSET"); - mem_end = reg->base + reg->size; - first = false; + mem_end = reg_end; } else { /* * memblock auto merges contiguous blocks, remove @@ -115,8 +114,8 @@ void __init pmsav8_adjust_lowmem_bounds(void) * blocks separately while iterating) */ pr_notice("Ignoring RAM after %pa, memory at %pa ignored\n", - &mem_end, ®->base); - memblock_remove(reg->base, 0 - reg->base); + &mem_end, ®_start); + memblock_remove(reg_start, 0 - reg_start); break; } } diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c index feefa2055eba..a9653117ca0d 100644 --- a/arch/arm/probes/kprobes/core.c +++ b/arch/arm/probes/kprobes/core.c @@ -413,87 +413,15 @@ void __naked __kprobes kretprobe_trampoline(void) /* Called from kretprobe_trampoline */ static __used __kprobes void *trampoline_handler(struct pt_regs *regs) { - struct kretprobe_instance *ri = NULL; - struct hlist_head *head, empty_rp; - struct hlist_node *tmp; - unsigned long flags, orig_ret_address = 0; - unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; - kprobe_opcode_t *correct_ret_addr = NULL; - - INIT_HLIST_HEAD(&empty_rp); - kretprobe_hash_lock(current, &head, &flags); - - /* - * It is possible to have multiple instances associated with a given - * task either because multiple functions in the call path have - * a return probe installed on them, and/or more than one return - * probe was registered for a target function. - * - * We can handle this because: - * - instances are always inserted at the head of the list - * - when multiple return probes are registered for the same - * function, the first instance's ret_addr will point to the - * real return address, and all the rest will point to - * kretprobe_trampoline - */ - hlist_for_each_entry_safe(ri, tmp, head, hlist) { - if (ri->task != current) - /* another task is sharing our hash bucket */ - continue; - - orig_ret_address = (unsigned long)ri->ret_addr; - - if (orig_ret_address != trampoline_address) - /* - * This is the real return address. Any other - * instances associated with this task are for - * other calls deeper on the call stack - */ - break; - } - - kretprobe_assert(ri, orig_ret_address, trampoline_address); - - correct_ret_addr = ri->ret_addr; - hlist_for_each_entry_safe(ri, tmp, head, hlist) { - if (ri->task != current) - /* another task is sharing our hash bucket */ - continue; - - orig_ret_address = (unsigned long)ri->ret_addr; - if (ri->rp && ri->rp->handler) { - __this_cpu_write(current_kprobe, &ri->rp->kp); - get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; - ri->ret_addr = correct_ret_addr; - ri->rp->handler(ri, regs); - __this_cpu_write(current_kprobe, NULL); - } - - recycle_rp_inst(ri, &empty_rp); - - if (orig_ret_address != trampoline_address) - /* - * This is the real return address. Any other - * instances associated with this task are for - * other calls deeper on the call stack - */ - break; - } - - kretprobe_hash_unlock(current, &flags); - - hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { - hlist_del(&ri->hlist); - kfree(ri); - } - - return (void *)orig_ret_address; + return (void *)kretprobe_trampoline_handler(regs, &kretprobe_trampoline, + (void *)regs->ARM_fp); } void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { ri->ret_addr = (kprobe_opcode_t *)regs->ARM_lr; + ri->fp = (void *)regs->ARM_fp; /* Replace the return addr with trampoline addr. */ regs->ARM_lr = (unsigned long)&kretprobe_trampoline; diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl index 171077cbf419..d056a548358e 100644 --- a/arch/arm/tools/syscall.tbl +++ b/arch/arm/tools/syscall.tbl @@ -453,3 +453,4 @@ 437 common openat2 sys_openat2 438 common pidfd_getfd sys_pidfd_getfd 439 common faccessat2 sys_faccessat2 +440 common process_madvise sys_process_madvise diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c index a6ab3689b2f4..60e901cd0de6 100644 --- a/arch/arm/xen/enlighten.c +++ b/arch/arm/xen/enlighten.c @@ -158,7 +158,8 @@ static int xen_starting_cpu(unsigned int cpu) BUG_ON(err); per_cpu(xen_vcpu, cpu) = vcpup; - xen_setup_runstate_info(cpu); + if (!xen_kernel_unmapped_at_usr()) + xen_setup_runstate_info(cpu); after_register_vcpu_info: enable_percpu_irq(xen_events_irq, 0); @@ -387,7 +388,8 @@ static int __init xen_guest_init(void) return -EINVAL; } - xen_time_setup_guest(); + if (!xen_kernel_unmapped_at_usr()) + xen_time_setup_guest(); if (xen_initial_domain()) pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier); diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c index 396797ffe2b1..467fa225c3d0 100644 --- a/arch/arm/xen/mm.c +++ b/arch/arm/xen/mm.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only #include <linux/cpu.h> #include <linux/dma-direct.h> -#include <linux/dma-noncoherent.h> +#include <linux/dma-map-ops.h> #include <linux/gfp.h> #include <linux/highmem.h> #include <linux/export.h> @@ -25,11 +25,12 @@ unsigned long xen_get_swiotlb_free_pages(unsigned int order) { - struct memblock_region *reg; + phys_addr_t base; gfp_t flags = __GFP_NOWARN|__GFP_KSWAPD_RECLAIM; + u64 i; - for_each_memblock(memory, reg) { - if (reg->base < (phys_addr_t)0xffffffff) { + for_each_mem_range(i, &base, NULL) { + if (base < (phys_addr_t)0xffffffff) { if (IS_ENABLED(CONFIG_ZONE_DMA32)) flags |= __GFP_DMA32; else diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 6d232837cbee..893130ce1626 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -29,6 +29,7 @@ config ARM64 select ARCH_HAS_SETUP_DMA_OPS select ARCH_HAS_SET_DIRECT_MAP select ARCH_HAS_SET_MEMORY + select ARCH_STACKWALK select ARCH_HAS_STRICT_KERNEL_RWX select ARCH_HAS_STRICT_MODULE_RWX select ARCH_HAS_SYNC_DMA_FOR_DEVICE @@ -106,6 +107,7 @@ config ARM64 select GENERIC_CPU_VULNERABILITIES select GENERIC_EARLY_IOREMAP select GENERIC_IDLE_POLL_SETUP + select GENERIC_IRQ_IPI select GENERIC_IRQ_MULTI_HANDLER select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW @@ -211,12 +213,18 @@ config ARM64_PAGE_SHIFT default 14 if ARM64_16K_PAGES default 12 -config ARM64_CONT_SHIFT +config ARM64_CONT_PTE_SHIFT int default 5 if ARM64_64K_PAGES default 7 if ARM64_16K_PAGES default 4 +config ARM64_CONT_PMD_SHIFT + int + default 5 if ARM64_64K_PAGES + default 5 if ARM64_16K_PAGES + default 4 + config ARCH_MMAP_RND_BITS_MIN default 14 if ARM64_64K_PAGES default 16 if ARM64_16K_PAGES @@ -1033,19 +1041,6 @@ config ARCH_ENABLE_SPLIT_PMD_PTLOCK config CC_HAVE_SHADOW_CALL_STACK def_bool $(cc-option, -fsanitize=shadow-call-stack -ffixed-x18) -config SECCOMP - bool "Enable seccomp to safely compute untrusted bytecode" - help - This kernel feature is useful for number crunching applications - that may need to compute untrusted bytecode during their - execution. By using pipes or other transports made available to - the process as file descriptors supporting the read/write - syscalls, it's possible to isolate those applications in - their own address space using seccomp. Once seccomp is - enabled via prctl(PR_SET_SECCOMP), it cannot be disabled - and the task is only allowed to execute a few safe syscalls - defined by each seccomp mode. - config PARAVIRT bool "Enable paravirtualization code" help @@ -1165,32 +1160,6 @@ config UNMAP_KERNEL_AT_EL0 If unsure, say Y. -config HARDEN_BRANCH_PREDICTOR - bool "Harden the branch predictor against aliasing attacks" if EXPERT - default y - help - Speculation attacks against some high-performance processors rely on - being able to manipulate the branch predictor for a victim context by - executing aliasing branches in the attacker context. Such attacks - can be partially mitigated against by clearing internal branch - predictor state and limiting the prediction logic in some situations. - - This config option will take CPU-specific actions to harden the - branch predictor against aliasing attacks and may rely on specific - instruction sequences or control bits being set by the system - firmware. - - If unsure, say Y. - -config ARM64_SSBD - bool "Speculative Store Bypass Disable" if EXPERT - default y - help - This enables mitigation of the bypassing of previous stores - by speculative loads. - - If unsure, say Y. - config RODATA_FULL_DEFAULT_ENABLED bool "Apply r/o permissions of VM areas also to their linear aliases" default y @@ -1630,8 +1599,6 @@ config ARM64_BTI_KERNEL depends on CC_HAS_BRANCH_PROT_PAC_RET_BTI # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94697 depends on !CC_IS_GCC || GCC_VERSION >= 100100 - # https://reviews.llvm.org/rGb8ae3fdfa579dbf366b1bb1cbfdbf8c51db7fa55 - depends on !CC_IS_CLANG || CLANG_VERSION >= 100001 depends on !(CC_IS_CLANG && GCOV_KERNEL) depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS) help @@ -1664,6 +1631,39 @@ config ARCH_RANDOM provides a high bandwidth, cryptographically secure hardware random number generator. +config ARM64_AS_HAS_MTE + # Initial support for MTE went in binutils 2.32.0, checked with + # ".arch armv8.5-a+memtag" below. However, this was incomplete + # as a late addition to the final architecture spec (LDGM/STGM) + # is only supported in the newer 2.32.x and 2.33 binutils + # versions, hence the extra "stgm" instruction check below. + def_bool $(as-instr,.arch armv8.5-a+memtag\nstgm xzr$(comma)[x0]) + +config ARM64_MTE + bool "Memory Tagging Extension support" + default y + depends on ARM64_AS_HAS_MTE && ARM64_TAGGED_ADDR_ABI + select ARCH_USES_HIGH_VMA_FLAGS + help + Memory Tagging (part of the ARMv8.5 Extensions) provides + architectural support for run-time, always-on detection of + various classes of memory error to aid with software debugging + to eliminate vulnerabilities arising from memory-unsafe + languages. + + This option enables the support for the Memory Tagging + Extension at EL0 (i.e. for userspace). + + Selecting this option allows the feature to be detected at + runtime. Any secondary CPU not implementing this feature will + not be allowed a late bring-up. + + Userspace binaries that want to use this feature must + explicitly opt in. The mechanism for the userspace is + described in: + + Documentation/arm64/memory-tagging-extension.rst. + endmenu config ARM64_SVE @@ -1876,6 +1876,10 @@ config ARCH_ENABLE_HUGEPAGE_MIGRATION def_bool y depends on HUGETLB_PAGE && MIGRATION +config ARCH_ENABLE_THP_MIGRATION + def_bool y + depends on TRANSPARENT_HUGEPAGE + menu "Power management options" source "kernel/power/Kconfig" diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 130569f90c54..f4717facf31e 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -11,7 +11,6 @@ # Copyright (C) 1995-2001 by Russell King LDFLAGS_vmlinux :=--no-undefined -X -CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET) ifeq ($(CONFIG_RELOCATABLE), y) # Pass --no-apply-dynamic-relocs to restore pre-binutils-2.27 behaviour @@ -29,6 +28,10 @@ LDFLAGS_vmlinux += --fix-cortex-a53-843419 endif endif +# We never want expected sections to be placed heuristically by the +# linker. All sections should be explicitly named in the linker script. +LDFLAGS_vmlinux += $(call ld-option, --orphan-handling=warn) + ifeq ($(CONFIG_ARM64_USE_LSE_ATOMICS), y) ifneq ($(CONFIG_ARM64_LSE_ATOMICS), y) $(warning LSE atomics not supported by binutils) @@ -47,13 +50,16 @@ endif KBUILD_CFLAGS += -mgeneral-regs-only \ $(compat_vdso) $(cc_has_k_constraint) -KBUILD_CFLAGS += -fno-asynchronous-unwind-tables KBUILD_CFLAGS += $(call cc-disable-warning, psabi) KBUILD_AFLAGS += $(compat_vdso) KBUILD_CFLAGS += $(call cc-option,-mabi=lp64) KBUILD_AFLAGS += $(call cc-option,-mabi=lp64) +# Avoid generating .eh_frame* sections. +KBUILD_CFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables +KBUILD_AFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables + ifeq ($(CONFIG_STACKPROTECTOR_PER_TASK),y) prepare: stack_protector_prepare stack_protector_prepare: prepare0 @@ -132,9 +138,6 @@ endif # Default value head-y := arch/arm64/kernel/head.o -# The byte offset of the kernel image in RAM from the start of RAM. -TEXT_OFFSET := 0x0 - ifeq ($(CONFIG_KASAN_SW_TAGS), y) KASAN_SHADOW_SCALE_SHIFT := 4 else @@ -145,8 +148,6 @@ KBUILD_CFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT) KBUILD_CPPFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT) KBUILD_AFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT) -export TEXT_OFFSET - core-y += arch/arm64/ libs-y := arch/arm64/lib/ $(libs-y) libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a diff --git a/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi b/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi index 250fc01de78d..24aab3ea3f52 100644 --- a/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi +++ b/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi @@ -795,8 +795,8 @@ reg = <0x27>; interrupt-parent = <&gpa1>; interrupts = <3 IRQ_TYPE_LEVEL_HIGH>; - s3fwrn5,en-gpios = <&gpf1 4 GPIO_ACTIVE_HIGH>; - s3fwrn5,fw-gpios = <&gpj0 2 GPIO_ACTIVE_HIGH>; + en-gpios = <&gpf1 4 GPIO_ACTIVE_HIGH>; + wake-gpios = <&gpj0 2 GPIO_ACTIVE_HIGH>; }; }; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts index c2dc1232f93f..1efb61cff454 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts +++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts @@ -199,6 +199,7 @@ &enetc_port0 { phy-handle = <&sgmii_phy0>; phy-connection-type = "sgmii"; + managed = "in-band-status"; status = "okay"; mdio { diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi index 2bbc69b4dc99..d5b6c0a1c54a 100644 --- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi @@ -316,7 +316,7 @@ }; pcie_reset_pins: pcie-reset-pins { - groups = "pcie1"; + groups = "pcie1"; /* this actually controls "pcie1_reset" */ function = "gpio"; }; diff --git a/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts b/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts index d174ad214857..9a11e5c60c26 100644 --- a/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts +++ b/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts @@ -143,6 +143,56 @@ mdio: mdio-bus { #address-cells = <1>; #size-cells = <0>; + + switch@0 { + compatible = "mediatek,mt7531"; + reg = <0>; + reset-gpios = <&pio 54 0>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + label = "wan"; + }; + + port@1 { + reg = <1>; + label = "lan0"; + }; + + port@2 { + reg = <2>; + label = "lan1"; + }; + + port@3 { + reg = <3>; + label = "lan2"; + }; + + port@4 { + reg = <4>; + label = "lan3"; + }; + + port@6 { + reg = <6>; + label = "cpu"; + ethernet = <&gmac0>; + phy-mode = "2500base-x"; + + fixed-link { + speed = <2500>; + full-duplex; + pause; + }; + }; + }; + }; + }; }; diff --git a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts index 0b4de627f96e..08ad0ffb24df 100644 --- a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts +++ b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts @@ -105,20 +105,71 @@ pinctrl-0 = <ð_pins>; status = "okay"; - gmac1: mac@1 { + gmac0: mac@0 { compatible = "mediatek,eth-mac"; - reg = <1>; - phy-handle = <&phy5>; + reg = <0>; + phy-mode = "2500base-x"; + + fixed-link { + speed = <2500>; + full-duplex; + pause; + }; }; mdio-bus { #address-cells = <1>; #size-cells = <0>; - phy5: ethernet-phy@5 { - reg = <5>; - phy-mode = "sgmii"; + switch@0 { + compatible = "mediatek,mt7531"; + reg = <0>; + reset-gpios = <&pio 54 0>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + label = "lan0"; + }; + + port@1 { + reg = <1>; + label = "lan1"; + }; + + port@2 { + reg = <2>; + label = "lan2"; + }; + + port@3 { + reg = <3>; + label = "lan3"; + }; + + port@4 { + reg = <4>; + label = "wan"; + }; + + port@6 { + reg = <6>; + label = "cpu"; + ethernet = <&gmac0>; + phy-mode = "2500base-x"; + + fixed-link { + speed = <2500>; + full-duplex; + pause; + }; + }; + }; }; + }; }; diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c index da1034867aaa..8536008e3e35 100644 --- a/arch/arm64/crypto/ghash-ce-glue.c +++ b/arch/arm64/crypto/ghash-ce-glue.c @@ -347,7 +347,7 @@ static int gcm_encrypt(struct aead_request *req) u8 buf[AES_BLOCK_SIZE]; u8 iv[AES_BLOCK_SIZE]; u64 dg[2] = {}; - u128 lengths; + be128 lengths; u8 *tag; int err; @@ -461,7 +461,7 @@ static int gcm_decrypt(struct aead_request *req) u8 buf[AES_BLOCK_SIZE]; u8 iv[AES_BLOCK_SIZE]; u64 dg[2] = {}; - u128 lengths; + be128 lengths; u8 *tag; int err; diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c index 565ef604ca04..c63b99211db3 100644 --- a/arch/arm64/crypto/sha1-ce-glue.c +++ b/arch/arm64/crypto/sha1-ce-glue.c @@ -25,6 +25,9 @@ struct sha1_ce_state { u32 finalize; }; +extern const u32 sha1_ce_offsetof_count; +extern const u32 sha1_ce_offsetof_finalize; + asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src, int blocks); diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c index 9450d19b9e6e..5e956d7582a5 100644 --- a/arch/arm64/crypto/sha2-ce-glue.c +++ b/arch/arm64/crypto/sha2-ce-glue.c @@ -25,6 +25,9 @@ struct sha256_ce_state { u32 finalize; }; +extern const u32 sha256_ce_offsetof_count; +extern const u32 sha256_ce_offsetof_finalize; + asmlinkage void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src, int blocks); diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h index 6647ae4f0231..880b9054d75c 100644 --- a/arch/arm64/include/asm/arch_gicv3.h +++ b/arch/arm64/include/asm/arch_gicv3.h @@ -153,7 +153,7 @@ static inline bool gic_prio_masking_enabled(void) static inline void gic_pmr_mask_irqs(void) { - BUILD_BUG_ON(GICD_INT_DEF_PRI < (GIC_PRIO_IRQOFF | + BUILD_BUG_ON(GICD_INT_DEF_PRI < (__GIC_PRIO_IRQOFF | GIC_PRIO_PSR_I_SET)); BUILD_BUG_ON(GICD_INT_DEF_PRI >= GIC_PRIO_IRQON); /* @@ -162,6 +162,12 @@ static inline void gic_pmr_mask_irqs(void) * are applied to IRQ priorities */ BUILD_BUG_ON((0x80 | (GICD_INT_DEF_PRI >> 1)) >= GIC_PRIO_IRQON); + /* + * Same situation as above, but now we make sure that we can mask + * regular interrupts. + */ + BUILD_BUG_ON((0x80 | (GICD_INT_DEF_PRI >> 1)) < (__GIC_PRIO_IRQOFF_NS | + GIC_PRIO_PSR_I_SET)); gic_write_pmr(GIC_PRIO_IRQOFF); } diff --git a/arch/arm64/include/asm/archrandom.h b/arch/arm64/include/asm/archrandom.h index 44209f6146aa..ffb1a40d5475 100644 --- a/arch/arm64/include/asm/archrandom.h +++ b/arch/arm64/include/asm/archrandom.h @@ -79,10 +79,5 @@ arch_get_random_seed_long_early(unsigned long *v) } #define arch_get_random_seed_long_early arch_get_random_seed_long_early -#else - -static inline bool __arm64_rndr(unsigned long *v) { return false; } -static inline bool __init __early_cpu_has_rndr(void) { return false; } - #endif /* CONFIG_ARCH_RANDOM */ #endif /* _ASM_ARCHRANDOM_H */ diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index fb4c27506ef4..c3009b0e5239 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -45,6 +45,7 @@ #define rmb() dsb(ld) #define wmb() dsb(st) +#define dma_mb() dmb(osh) #define dma_rmb() dmb(oshld) #define dma_wmb() dmb(oshst) diff --git a/arch/arm64/include/asm/boot.h b/arch/arm64/include/asm/boot.h index c7f67da13cd9..3e7943fd17a4 100644 --- a/arch/arm64/include/asm/boot.h +++ b/arch/arm64/include/asm/boot.h @@ -13,8 +13,7 @@ #define MAX_FDT_SIZE SZ_2M /* - * arm64 requires the kernel image to placed - * TEXT_OFFSET bytes beyond a 2 MB aligned base + * arm64 requires the kernel image to placed at a 2 MB aligned base address */ #define MIN_KIMG_ALIGN SZ_2M diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h index 935d2aa231bf..23a9fb73c04f 100644 --- a/arch/arm64/include/asm/compat.h +++ b/arch/arm64/include/asm/compat.h @@ -35,8 +35,6 @@ typedef s32 compat_nlink_t; typedef u16 compat_ipc_pid_t; typedef u32 compat_caddr_t; typedef __kernel_fsid_t compat_fsid_t; -typedef s64 compat_s64; -typedef u64 compat_u64; struct compat_stat { #ifdef __AARCH64EB__ diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h index d28e8f37d3b4..e95c4df83911 100644 --- a/arch/arm64/include/asm/cpu_ops.h +++ b/arch/arm64/include/asm/cpu_ops.h @@ -21,7 +21,7 @@ * mechanism for doing so, tests whether it is possible to boot * the given CPU. * @cpu_boot: Boots a cpu into the kernel. - * @cpu_postboot: Optionally, perform any post-boot cleanup or necesary + * @cpu_postboot: Optionally, perform any post-boot cleanup or necessary * synchronisation. Called from the cpu being booted. * @cpu_can_disable: Determines whether a CPU can be disabled based on * mechanism-specific information. diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index 07b643a70710..42868dbd29fd 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -31,13 +31,13 @@ #define ARM64_HAS_DCPOP 21 #define ARM64_SVE 22 #define ARM64_UNMAP_KERNEL_AT_EL0 23 -#define ARM64_HARDEN_BRANCH_PREDICTOR 24 +#define ARM64_SPECTRE_V2 24 #define ARM64_HAS_RAS_EXTN 25 #define ARM64_WORKAROUND_843419 26 #define ARM64_HAS_CACHE_IDC 27 #define ARM64_HAS_CACHE_DIC 28 #define ARM64_HW_DBM 29 -#define ARM64_SSBD 30 +#define ARM64_SPECTRE_V4 30 #define ARM64_MISMATCHED_CACHE_TYPE 31 #define ARM64_HAS_STAGE2_FWB 32 #define ARM64_HAS_CRC32 33 @@ -64,7 +64,8 @@ #define ARM64_BTI 54 #define ARM64_HAS_ARMv8_4_TTL 55 #define ARM64_HAS_TLB_RANGE 56 +#define ARM64_MTE 57 -#define ARM64_NCAPS 57 +#define ARM64_NCAPS 58 #endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 89b4f0142c28..f7e7144af174 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -358,7 +358,7 @@ static inline int cpucap_default_scope(const struct arm64_cpu_capabilities *cap) } /* - * Generic helper for handling capabilties with multiple (match,enable) pairs + * Generic helper for handling capabilities with multiple (match,enable) pairs * of call backs, sharing the same capability bit. * Iterate over each entry to see if at least one matches. */ @@ -681,6 +681,12 @@ static __always_inline bool system_uses_irq_prio_masking(void) cpus_have_const_cap(ARM64_HAS_IRQ_PRIO_MASKING); } +static inline bool system_supports_mte(void) +{ + return IS_ENABLED(CONFIG_ARM64_MTE) && + cpus_have_const_cap(ARM64_MTE); +} + static inline bool system_has_prio_mask_debugging(void) { return IS_ENABLED(CONFIG_ARM64_DEBUG_PRIORITY_MASKING) && @@ -698,30 +704,6 @@ static inline bool system_supports_tlb_range(void) cpus_have_const_cap(ARM64_HAS_TLB_RANGE); } -#define ARM64_BP_HARDEN_UNKNOWN -1 -#define ARM64_BP_HARDEN_WA_NEEDED 0 -#define ARM64_BP_HARDEN_NOT_REQUIRED 1 - -int get_spectre_v2_workaround_state(void); - -#define ARM64_SSBD_UNKNOWN -1 -#define ARM64_SSBD_FORCE_DISABLE 0 -#define ARM64_SSBD_KERNEL 1 -#define ARM64_SSBD_FORCE_ENABLE 2 -#define ARM64_SSBD_MITIGATED 3 - -static inline int arm64_get_ssbd_state(void) -{ -#ifdef CONFIG_ARM64_SSBD - extern int ssbd_state; - return ssbd_state; -#else - return ARM64_SSBD_UNKNOWN; -#endif -} - -void arm64_set_ssbd_mitigation(bool state); - extern int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt); static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange) diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h index d4ab3f73e7a3..973b14415271 100644 --- a/arch/arm64/include/asm/efi.h +++ b/arch/arm64/include/asm/efi.h @@ -65,7 +65,7 @@ efi_status_t __efi_rt_asm_wrapper(void *, const char *, ...); (SEGMENT_ALIGN > THREAD_ALIGN ? SEGMENT_ALIGN : THREAD_ALIGN) /* on arm64, the FDT may be located anywhere in system RAM */ -static inline unsigned long efi_get_max_fdt_addr(unsigned long dram_base) +static inline unsigned long efi_get_max_fdt_addr(unsigned long image_addr) { return ULONG_MAX; } @@ -80,8 +80,7 @@ static inline unsigned long efi_get_max_fdt_addr(unsigned long dram_base) * apply to other bootloaders, and are required for some kernel * configurations. */ -static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base, - unsigned long image_addr) +static inline unsigned long efi_get_max_initrd_addr(unsigned long image_addr) { return (image_addr & ~(SZ_1G - 1UL)) + (1UL << (VA_BITS_MIN - 1)); } diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h index 035003acfa87..22c81f1edda2 100644 --- a/arch/arm64/include/asm/esr.h +++ b/arch/arm64/include/asm/esr.h @@ -35,7 +35,9 @@ #define ESR_ELx_EC_SYS64 (0x18) #define ESR_ELx_EC_SVE (0x19) #define ESR_ELx_EC_ERET (0x1a) /* EL2 only */ -/* Unallocated EC: 0x1b - 0x1E */ +/* Unallocated EC: 0x1B */ +#define ESR_ELx_EC_FPAC (0x1C) /* EL1 and above */ +/* Unallocated EC: 0x1D - 0x1E */ #define ESR_ELx_EC_IMP_DEF (0x1f) /* EL3 only */ #define ESR_ELx_EC_IABT_LOW (0x20) #define ESR_ELx_EC_IABT_CUR (0x21) diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h index 7577a754d443..99b9383cd036 100644 --- a/arch/arm64/include/asm/exception.h +++ b/arch/arm64/include/asm/exception.h @@ -47,4 +47,5 @@ void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr); void do_cp15instr(unsigned int esr, struct pt_regs *regs); void do_el0_svc(struct pt_regs *regs); void do_el0_svc_compat(struct pt_regs *regs); +void do_ptrauth_fault(struct pt_regs *regs, unsigned int esr); #endif /* __ASM_EXCEPTION_H */ diff --git a/arch/arm64/include/asm/extable.h b/arch/arm64/include/asm/extable.h index 840a35ed92ec..b15eb4a3e6b2 100644 --- a/arch/arm64/include/asm/extable.h +++ b/arch/arm64/include/asm/extable.h @@ -22,6 +22,15 @@ struct exception_table_entry #define ARCH_HAS_RELATIVE_EXTABLE +static inline bool in_bpf_jit(struct pt_regs *regs) +{ + if (!IS_ENABLED(CONFIG_BPF_JIT)) + return false; + + return regs->pc >= BPF_JIT_REGION_START && + regs->pc < BPF_JIT_REGION_END; +} + #ifdef CONFIG_BPF_JIT int arm64_bpf_fixup_exception(const struct exception_table_entry *ex, struct pt_regs *regs); diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index 59f10dd13f12..bec5f14b622a 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -69,6 +69,9 @@ static inline void *sve_pffr(struct thread_struct *thread) extern void sve_save_state(void *state, u32 *pfpsr); extern void sve_load_state(void const *state, u32 const *pfpsr, unsigned long vq_minus_1); +extern void sve_flush_live(void); +extern void sve_load_from_fpsimd_state(struct user_fpsimd_state const *state, + unsigned long vq_minus_1); extern unsigned int sve_get_vl(void); struct arm64_cpu_capabilities; diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h index 636e9d9c7929..af43367534c7 100644 --- a/arch/arm64/include/asm/fpsimdmacros.h +++ b/arch/arm64/include/asm/fpsimdmacros.h @@ -164,25 +164,59 @@ | ((\np) << 5) .endm +/* PFALSE P\np.B */ +.macro _sve_pfalse np + _sve_check_preg \np + .inst 0x2518e400 \ + | (\np) +.endm + .macro __for from:req, to:req .if (\from) == (\to) - _for__body \from + _for__body %\from .else - __for \from, (\from) + ((\to) - (\from)) / 2 - __for (\from) + ((\to) - (\from)) / 2 + 1, \to + __for %\from, %((\from) + ((\to) - (\from)) / 2) + __for %((\from) + ((\to) - (\from)) / 2 + 1), %\to .endif .endm .macro _for var:req, from:req, to:req, insn:vararg .macro _for__body \var:req + .noaltmacro \insn + .altmacro .endm + .altmacro __for \from, \to + .noaltmacro .purgem _for__body .endm +/* Update ZCR_EL1.LEN with the new VQ */ +.macro sve_load_vq xvqminus1, xtmp, xtmp2 + mrs_s \xtmp, SYS_ZCR_EL1 + bic \xtmp2, \xtmp, ZCR_ELx_LEN_MASK + orr \xtmp2, \xtmp2, \xvqminus1 + cmp \xtmp2, \xtmp + b.eq 921f + msr_s SYS_ZCR_EL1, \xtmp2 //self-synchronising +921: +.endm + +/* Preserve the first 128-bits of Znz and zero the rest. */ +.macro _sve_flush_z nz + _sve_check_zreg \nz + mov v\nz\().16b, v\nz\().16b +.endm + +.macro sve_flush + _for n, 0, 31, _sve_flush_z \n + _for n, 0, 15, _sve_pfalse \n + _sve_wrffr 0 +.endm + .macro sve_save nxbase, xpfpsr, nxtmp _for n, 0, 31, _sve_str_v \n, \nxbase, \n - 34 _for n, 0, 15, _sve_str_p \n, \nxbase, \n - 16 @@ -197,13 +231,7 @@ .endm .macro sve_load nxbase, xpfpsr, xvqminus1, nxtmp, xtmp2 - mrs_s x\nxtmp, SYS_ZCR_EL1 - bic \xtmp2, x\nxtmp, ZCR_ELx_LEN_MASK - orr \xtmp2, \xtmp2, \xvqminus1 - cmp \xtmp2, x\nxtmp - b.eq 921f - msr_s SYS_ZCR_EL1, \xtmp2 // self-synchronising -921: + sve_load_vq \xvqminus1, x\nxtmp, \xtmp2 _for n, 0, 31, _sve_ldr_v \n, \nxbase, \n - 34 _sve_ldr_p 0, \nxbase _sve_wrffr 0 diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h index 985493af704b..5ffa4bacdad3 100644 --- a/arch/arm64/include/asm/hardirq.h +++ b/arch/arm64/include/asm/hardirq.h @@ -13,21 +13,12 @@ #include <asm/kvm_arm.h> #include <asm/sysreg.h> -#define NR_IPI 7 - typedef struct { unsigned int __softirq_pending; - unsigned int ipi_irqs[NR_IPI]; } ____cacheline_aligned irq_cpustat_t; #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ -#define __inc_irq_stat(cpu, member) __IRQ_STAT(cpu, member)++ -#define __get_irq_stat(cpu, member) __IRQ_STAT(cpu, member) - -u64 smp_irq_stat_cpu(unsigned int cpu); -#define arch_irq_stat_cpu smp_irq_stat_cpu - #define __ARCH_IRQ_EXIT_IRQS_DISABLED 1 struct nmi_ctx { diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h index 22f73fe09030..9a5498c2c8ee 100644 --- a/arch/arm64/include/asm/hwcap.h +++ b/arch/arm64/include/asm/hwcap.h @@ -8,18 +8,27 @@ #include <uapi/asm/hwcap.h> #include <asm/cpufeature.h> +#define COMPAT_HWCAP_SWP (1 << 0) #define COMPAT_HWCAP_HALF (1 << 1) #define COMPAT_HWCAP_THUMB (1 << 2) +#define COMPAT_HWCAP_26BIT (1 << 3) #define COMPAT_HWCAP_FAST_MULT (1 << 4) +#define COMPAT_HWCAP_FPA (1 << 5) #define COMPAT_HWCAP_VFP (1 << 6) #define COMPAT_HWCAP_EDSP (1 << 7) +#define COMPAT_HWCAP_JAVA (1 << 8) +#define COMPAT_HWCAP_IWMMXT (1 << 9) +#define COMPAT_HWCAP_CRUNCH (1 << 10) +#define COMPAT_HWCAP_THUMBEE (1 << 11) #define COMPAT_HWCAP_NEON (1 << 12) #define COMPAT_HWCAP_VFPv3 (1 << 13) +#define COMPAT_HWCAP_VFPV3D16 (1 << 14) #define COMPAT_HWCAP_TLS (1 << 15) #define COMPAT_HWCAP_VFPv4 (1 << 16) #define COMPAT_HWCAP_IDIVA (1 << 17) #define COMPAT_HWCAP_IDIVT (1 << 18) #define COMPAT_HWCAP_IDIV (COMPAT_HWCAP_IDIVA|COMPAT_HWCAP_IDIVT) +#define COMPAT_HWCAP_VFPD32 (1 << 19) #define COMPAT_HWCAP_LPAE (1 << 20) #define COMPAT_HWCAP_EVTSTRM (1 << 21) @@ -95,7 +104,7 @@ #define KERNEL_HWCAP_DGH __khwcap2_feature(DGH) #define KERNEL_HWCAP_RNG __khwcap2_feature(RNG) #define KERNEL_HWCAP_BTI __khwcap2_feature(BTI) -/* reserved for KERNEL_HWCAP_MTE __khwcap2_feature(MTE) */ +#define KERNEL_HWCAP_MTE __khwcap2_feature(MTE) /* * This yields a mask that user programs can use to figure out what diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index 0bc46149e491..4b39293d0f72 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -359,9 +359,13 @@ __AARCH64_INSN_FUNCS(brk, 0xFFE0001F, 0xD4200000) __AARCH64_INSN_FUNCS(exception, 0xFF000000, 0xD4000000) __AARCH64_INSN_FUNCS(hint, 0xFFFFF01F, 0xD503201F) __AARCH64_INSN_FUNCS(br, 0xFFFFFC1F, 0xD61F0000) +__AARCH64_INSN_FUNCS(br_auth, 0xFEFFF800, 0xD61F0800) __AARCH64_INSN_FUNCS(blr, 0xFFFFFC1F, 0xD63F0000) +__AARCH64_INSN_FUNCS(blr_auth, 0xFEFFF800, 0xD63F0800) __AARCH64_INSN_FUNCS(ret, 0xFFFFFC1F, 0xD65F0000) +__AARCH64_INSN_FUNCS(ret_auth, 0xFFFFFBFF, 0xD65F0BFF) __AARCH64_INSN_FUNCS(eret, 0xFFFFFFFF, 0xD69F03E0) +__AARCH64_INSN_FUNCS(eret_auth, 0xFFFFFBFF, 0xD69F0BFF) __AARCH64_INSN_FUNCS(mrs, 0xFFF00000, 0xD5300000) __AARCH64_INSN_FUNCS(msr_imm, 0xFFF8F01F, 0xD500401F) __AARCH64_INSN_FUNCS(msr_reg, 0xFFF00000, 0xD5100000) diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index ff50dd731852..fd172c41df90 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h @@ -110,6 +110,7 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) #define __io_par(v) __iormb(v) #define __iowmb() dma_wmb() +#define __iomb() dma_mb() /* * Relaxed I/O memory access primitives. These follow the Device memory diff --git a/arch/arm64/include/asm/irq_work.h b/arch/arm64/include/asm/irq_work.h index 8a1ef1907760..a1020285ea75 100644 --- a/arch/arm64/include/asm/irq_work.h +++ b/arch/arm64/include/asm/irq_work.h @@ -2,11 +2,9 @@ #ifndef __ASM_IRQ_WORK_H #define __ASM_IRQ_WORK_H -#include <asm/smp.h> - static inline bool arch_irq_work_has_interrupt(void) { - return !!__smp_cross_call; + return true; } #endif /* __ASM_IRQ_WORK_H */ diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h index 329fb15f6bac..19ca76ea60d9 100644 --- a/arch/arm64/include/asm/kernel-pgtable.h +++ b/arch/arm64/include/asm/kernel-pgtable.h @@ -86,7 +86,7 @@ + EARLY_PGDS((vstart), (vend)) /* each PGDIR needs a next level page table */ \ + EARLY_PUDS((vstart), (vend)) /* each PUD needs a next level page table */ \ + EARLY_PMDS((vstart), (vend))) /* each PMD needs a next level page table */ -#define INIT_DIR_SIZE (PAGE_SIZE * EARLY_PAGES(KIMAGE_VADDR + TEXT_OFFSET, _end)) +#define INIT_DIR_SIZE (PAGE_SIZE * EARLY_PAGES(KIMAGE_VADDR, _end)) #define IDMAP_DIR_SIZE (IDMAP_PGTABLE_LEVELS * PAGE_SIZE) #ifdef CONFIG_ARM64_SW_TTBR0_PAN diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 1da8e3dc4455..64ce29378467 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -12,6 +12,7 @@ #include <asm/types.h> /* Hyp Configuration Register (HCR) bits */ +#define HCR_ATA (UL(1) << 56) #define HCR_FWB (UL(1) << 46) #define HCR_API (UL(1) << 41) #define HCR_APK (UL(1) << 40) @@ -66,7 +67,7 @@ * TWI: Trap WFI * TIDCP: Trap L2CTLR/L2ECTLR * BSU_IS: Upgrade barriers to the inner shareable domain - * FB: Force broadcast of all maintainance operations + * FB: Force broadcast of all maintenance operations * AMO: Override CPSR.A and enable signaling with VA * IMO: Override CPSR.I and enable signaling with VI * FMO: Override CPSR.F and enable signaling with VF @@ -78,7 +79,7 @@ HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \ HCR_FMO | HCR_IMO | HCR_PTW ) #define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF) -#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK) +#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK | HCR_ATA) #define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H) /* TCR_EL2 Registers bits */ diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 6f98fbd0ac81..7f7072f6cb45 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -9,9 +9,6 @@ #include <asm/virt.h> -#define VCPU_WORKAROUND_2_FLAG_SHIFT 0 -#define VCPU_WORKAROUND_2_FLAG (_AC(1, UL) << VCPU_WORKAROUND_2_FLAG_SHIFT) - #define ARM_EXIT_WITH_SERROR_BIT 31 #define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT)) #define ARM_EXCEPTION_IS_TRAP(x) (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_TRAP) @@ -102,11 +99,9 @@ DECLARE_KVM_HYP_SYM(__kvm_hyp_vector); #define __kvm_hyp_init CHOOSE_NVHE_SYM(__kvm_hyp_init) #define __kvm_hyp_vector CHOOSE_HYP_SYM(__kvm_hyp_vector) -#ifdef CONFIG_KVM_INDIRECT_VECTORS extern atomic_t arm64_el2_vector_last_slot; DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs); #define __bp_harden_hyp_vecs CHOOSE_HYP_SYM(__bp_harden_hyp_vecs) -#endif extern void __kvm_flush_vm_context(void); extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa, diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 1cc5f5f72d0b..5ef2669ccd6c 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -391,20 +391,6 @@ static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu) return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK; } -static inline bool kvm_arm_get_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu) -{ - return vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG; -} - -static inline void kvm_arm_set_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu, - bool flag) -{ - if (flag) - vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG; - else - vcpu->arch.workaround_flags &= ~VCPU_WORKAROUND_2_FLAG; -} - static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) { if (vcpu_mode_is_32bit(vcpu)) { diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 905c2b87e05a..bb5e5b88d439 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -631,46 +631,6 @@ static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {} static inline void kvm_clr_pmu_events(u32 clr) {} #endif -#define KVM_BP_HARDEN_UNKNOWN -1 -#define KVM_BP_HARDEN_WA_NEEDED 0 -#define KVM_BP_HARDEN_NOT_REQUIRED 1 - -static inline int kvm_arm_harden_branch_predictor(void) -{ - switch (get_spectre_v2_workaround_state()) { - case ARM64_BP_HARDEN_WA_NEEDED: - return KVM_BP_HARDEN_WA_NEEDED; - case ARM64_BP_HARDEN_NOT_REQUIRED: - return KVM_BP_HARDEN_NOT_REQUIRED; - case ARM64_BP_HARDEN_UNKNOWN: - default: - return KVM_BP_HARDEN_UNKNOWN; - } -} - -#define KVM_SSBD_UNKNOWN -1 -#define KVM_SSBD_FORCE_DISABLE 0 -#define KVM_SSBD_KERNEL 1 -#define KVM_SSBD_FORCE_ENABLE 2 -#define KVM_SSBD_MITIGATED 3 - -static inline int kvm_arm_have_ssbd(void) -{ - switch (arm64_get_ssbd_state()) { - case ARM64_SSBD_FORCE_DISABLE: - return KVM_SSBD_FORCE_DISABLE; - case ARM64_SSBD_KERNEL: - return KVM_SSBD_KERNEL; - case ARM64_SSBD_FORCE_ENABLE: - return KVM_SSBD_FORCE_ENABLE; - case ARM64_SSBD_MITIGATED: - return KVM_SSBD_MITIGATED; - case ARM64_SSBD_UNKNOWN: - default: - return KVM_SSBD_UNKNOWN; - } -} - void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu); void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu); diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 189839c3706a..cff1cebc7590 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -9,6 +9,7 @@ #include <asm/page.h> #include <asm/memory.h> +#include <asm/mmu.h> #include <asm/cpufeature.h> /* @@ -430,19 +431,17 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa, return ret; } -#ifdef CONFIG_KVM_INDIRECT_VECTORS /* * EL2 vectors can be mapped and rerouted in a number of ways, * depending on the kernel configuration and CPU present: * - * - If the CPU has the ARM64_HARDEN_BRANCH_PREDICTOR cap, the - * hardening sequence is placed in one of the vector slots, which is - * executed before jumping to the real vectors. + * - If the CPU is affected by Spectre-v2, the hardening sequence is + * placed in one of the vector slots, which is executed before jumping + * to the real vectors. * - * - If the CPU has both the ARM64_HARDEN_EL2_VECTORS cap and the - * ARM64_HARDEN_BRANCH_PREDICTOR cap, the slot containing the - * hardening sequence is mapped next to the idmap page, and executed - * before jumping to the real vectors. + * - If the CPU also has the ARM64_HARDEN_EL2_VECTORS cap, the slot + * containing the hardening sequence is mapped next to the idmap page, + * and executed before jumping to the real vectors. * * - If the CPU only has the ARM64_HARDEN_EL2_VECTORS cap, then an * empty slot is selected, mapped next to the idmap page, and @@ -452,19 +451,16 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa, * VHE, as we don't have hypervisor-specific mappings. If the system * is VHE and yet selects this capability, it will be ignored. */ -#include <asm/mmu.h> - extern void *__kvm_bp_vect_base; extern int __kvm_harden_el2_vector_slot; -/* This is called on both VHE and !VHE systems */ static inline void *kvm_get_hyp_vector(void) { struct bp_hardening_data *data = arm64_get_bp_hardening_data(); void *vect = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector)); int slot = -1; - if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) && data->fn) { + if (cpus_have_const_cap(ARM64_SPECTRE_V2) && data->fn) { vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs)); slot = data->hyp_vectors_slot; } @@ -481,76 +477,6 @@ static inline void *kvm_get_hyp_vector(void) return vect; } -/* This is only called on a !VHE system */ -static inline int kvm_map_vectors(void) -{ - /* - * HBP = ARM64_HARDEN_BRANCH_PREDICTOR - * HEL2 = ARM64_HARDEN_EL2_VECTORS - * - * !HBP + !HEL2 -> use direct vectors - * HBP + !HEL2 -> use hardened vectors in place - * !HBP + HEL2 -> allocate one vector slot and use exec mapping - * HBP + HEL2 -> use hardened vertors and use exec mapping - */ - if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) { - __kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs); - __kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base); - } - - if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) { - phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs); - unsigned long size = __BP_HARDEN_HYP_VECS_SZ; - - /* - * Always allocate a spare vector slot, as we don't - * know yet which CPUs have a BP hardening slot that - * we can reuse. - */ - __kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot); - BUG_ON(__kvm_harden_el2_vector_slot >= BP_HARDEN_EL2_SLOTS); - return create_hyp_exec_mappings(vect_pa, size, - &__kvm_bp_vect_base); - } - - return 0; -} -#else -static inline void *kvm_get_hyp_vector(void) -{ - return kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector)); -} - -static inline int kvm_map_vectors(void) -{ - return 0; -} -#endif - -#ifdef CONFIG_ARM64_SSBD -DECLARE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required); - -static inline int hyp_map_aux_data(void) -{ - int cpu, err; - - for_each_possible_cpu(cpu) { - u64 *ptr; - - ptr = per_cpu_ptr(&arm64_ssbd_callback_required, cpu); - err = create_hyp_mappings(ptr, ptr + 1, PAGE_HYP); - if (err) - return err; - } - return 0; -} -#else -static inline int hyp_map_aux_data(void) -{ - return 0; -} -#endif - #define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr) /* diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index afa722504bfd..43640d797455 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -126,13 +126,18 @@ /* * Memory types available. + * + * IMPORTANT: MT_NORMAL must be index 0 since vm_get_page_prot() may 'or' in + * the MT_NORMAL_TAGGED memory type for PROT_MTE mappings. Note + * that protection_map[] only contains MT_NORMAL attributes. */ -#define MT_DEVICE_nGnRnE 0 -#define MT_DEVICE_nGnRE 1 -#define MT_DEVICE_GRE 2 -#define MT_NORMAL_NC 3 -#define MT_NORMAL 4 -#define MT_NORMAL_WT 5 +#define MT_NORMAL 0 +#define MT_NORMAL_TAGGED 1 +#define MT_NORMAL_NC 2 +#define MT_NORMAL_WT 3 +#define MT_DEVICE_nGnRnE 4 +#define MT_DEVICE_nGnRE 5 +#define MT_DEVICE_GRE 6 /* * Memory types for Stage-2 translation @@ -169,7 +174,7 @@ extern s64 memstart_addr; /* PHYS_OFFSET - the physical address of the start of memory. */ #define PHYS_OFFSET ({ VM_BUG_ON(memstart_addr & 1); memstart_addr; }) -/* the virtual base of the kernel image (minus TEXT_OFFSET) */ +/* the virtual base of the kernel image */ extern u64 kimage_vaddr; /* the offset between the kernel virtual and physical mappings */ diff --git a/arch/arm64/include/asm/mman.h b/arch/arm64/include/asm/mman.h index 081ec8de9ea6..e3e28f7daf62 100644 --- a/arch/arm64/include/asm/mman.h +++ b/arch/arm64/include/asm/mman.h @@ -9,16 +9,53 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, unsigned long pkey __always_unused) { + unsigned long ret = 0; + if (system_supports_bti() && (prot & PROT_BTI)) - return VM_ARM64_BTI; + ret |= VM_ARM64_BTI; - return 0; + if (system_supports_mte() && (prot & PROT_MTE)) + ret |= VM_MTE; + + return ret; } #define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey) +static inline unsigned long arch_calc_vm_flag_bits(unsigned long flags) +{ + /* + * Only allow MTE on anonymous mappings as these are guaranteed to be + * backed by tags-capable memory. The vm_flags may be overridden by a + * filesystem supporting MTE (RAM-based). + */ + if (system_supports_mte() && (flags & MAP_ANONYMOUS)) + return VM_MTE_ALLOWED; + + return 0; +} +#define arch_calc_vm_flag_bits(flags) arch_calc_vm_flag_bits(flags) + static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags) { - return (vm_flags & VM_ARM64_BTI) ? __pgprot(PTE_GP) : __pgprot(0); + pteval_t prot = 0; + + if (vm_flags & VM_ARM64_BTI) + prot |= PTE_GP; + + /* + * There are two conditions required for returning a Normal Tagged + * memory type: (1) the user requested it via PROT_MTE passed to + * mmap() or mprotect() and (2) the corresponding vma supports MTE. We + * register (1) as VM_MTE in the vma->vm_flags and (2) as + * VM_MTE_ALLOWED. Note that the latter can only be set during the + * mmap() call since mprotect() does not accept MAP_* flags. + * Checking for VM_MTE only is sufficient since arch_validate_flags() + * does not permit (VM_MTE & !VM_MTE_ALLOWED). + */ + if (vm_flags & VM_MTE) + prot |= PTE_ATTRINDX(MT_NORMAL_TAGGED); + + return __pgprot(prot); } #define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags) @@ -30,8 +67,21 @@ static inline bool arch_validate_prot(unsigned long prot, if (system_supports_bti()) supported |= PROT_BTI; + if (system_supports_mte()) + supported |= PROT_MTE; + return (prot & ~supported) == 0; } #define arch_validate_prot(prot, addr) arch_validate_prot(prot, addr) +static inline bool arch_validate_flags(unsigned long vm_flags) +{ + if (!system_supports_mte()) + return true; + + /* only allow VM_MTE if VM_MTE_ALLOWED has been set previously */ + return !(vm_flags & VM_MTE) || (vm_flags & VM_MTE_ALLOWED); +} +#define arch_validate_flags(vm_flags) arch_validate_flags(vm_flags) + #endif /* ! __ASM_MMAN_H__ */ diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index a7a5ecaa2e83..b2e91c187e2a 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -17,11 +17,14 @@ #ifndef __ASSEMBLY__ +#include <linux/refcount.h> + typedef struct { atomic64_t id; #ifdef CONFIG_COMPAT void *sigpage; #endif + refcount_t pinned; void *vdso; unsigned long flags; } mm_context_t; @@ -45,7 +48,6 @@ struct bp_hardening_data { bp_hardening_cb_t fn; }; -#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void) @@ -57,21 +59,13 @@ static inline void arm64_apply_bp_hardening(void) { struct bp_hardening_data *d; - if (!cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) + if (!cpus_have_const_cap(ARM64_SPECTRE_V2)) return; d = arm64_get_bp_hardening_data(); if (d->fn) d->fn(); } -#else -static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void) -{ - return NULL; -} - -static inline void arm64_apply_bp_hardening(void) { } -#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */ extern void arm64_memblock_init(void); extern void paging_init(void); diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index f2d7537d6f83..0672236e1aea 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -177,7 +177,13 @@ static inline void cpu_replace_ttbr1(pgd_t *pgdp) #define destroy_context(mm) do { } while(0) void check_and_switch_context(struct mm_struct *mm); -#define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.id, 0); 0; }) +static inline int +init_new_context(struct task_struct *tsk, struct mm_struct *mm) +{ + atomic64_set(&mm->context.id, 0); + refcount_set(&mm->context.pinned, 0); + return 0; +} #ifdef CONFIG_ARM64_SW_TTBR0_PAN static inline void update_saved_ttbr0(struct task_struct *tsk, @@ -248,6 +254,9 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, void verify_cpu_asid_bits(void); void post_ttbr_update_workaround(void); +unsigned long arm64_mm_context_get(struct mm_struct *mm); +void arm64_mm_context_put(struct mm_struct *mm); + #endif /* !__ASSEMBLY__ */ #endif /* !__ASM_MMU_CONTEXT_H */ diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h new file mode 100644 index 000000000000..1c99fcadb58c --- /dev/null +++ b/arch/arm64/include/asm/mte.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020 ARM Ltd. + */ +#ifndef __ASM_MTE_H +#define __ASM_MTE_H + +#define MTE_GRANULE_SIZE UL(16) +#define MTE_GRANULE_MASK (~(MTE_GRANULE_SIZE - 1)) +#define MTE_TAG_SHIFT 56 +#define MTE_TAG_SIZE 4 + +#ifndef __ASSEMBLY__ + +#include <linux/page-flags.h> + +#include <asm/pgtable-types.h> + +void mte_clear_page_tags(void *addr); +unsigned long mte_copy_tags_from_user(void *to, const void __user *from, + unsigned long n); +unsigned long mte_copy_tags_to_user(void __user *to, void *from, + unsigned long n); +int mte_save_tags(struct page *page); +void mte_save_page_tags(const void *page_addr, void *tag_storage); +bool mte_restore_tags(swp_entry_t entry, struct page *page); +void mte_restore_page_tags(void *page_addr, const void *tag_storage); +void mte_invalidate_tags(int type, pgoff_t offset); +void mte_invalidate_tags_area(int type); +void *mte_allocate_tag_storage(void); +void mte_free_tag_storage(char *storage); + +#ifdef CONFIG_ARM64_MTE + +/* track which pages have valid allocation tags */ +#define PG_mte_tagged PG_arch_2 + +void mte_sync_tags(pte_t *ptep, pte_t pte); +void mte_copy_page_tags(void *kto, const void *kfrom); +void flush_mte_state(void); +void mte_thread_switch(struct task_struct *next); +void mte_suspend_exit(void); +long set_mte_ctrl(struct task_struct *task, unsigned long arg); +long get_mte_ctrl(struct task_struct *task); +int mte_ptrace_copy_tags(struct task_struct *child, long request, + unsigned long addr, unsigned long data); + +#else + +/* unused if !CONFIG_ARM64_MTE, silence the compiler */ +#define PG_mte_tagged 0 + +static inline void mte_sync_tags(pte_t *ptep, pte_t pte) +{ +} +static inline void mte_copy_page_tags(void *kto, const void *kfrom) +{ +} +static inline void flush_mte_state(void) +{ +} +static inline void mte_thread_switch(struct task_struct *next) +{ +} +static inline void mte_suspend_exit(void) +{ +} +static inline long set_mte_ctrl(struct task_struct *task, unsigned long arg) +{ + return 0; +} +static inline long get_mte_ctrl(struct task_struct *task) +{ + return 0; +} +static inline int mte_ptrace_copy_tags(struct task_struct *child, + long request, unsigned long addr, + unsigned long data) +{ + return -EIO; +} + +#endif + +#endif /* __ASSEMBLY__ */ +#endif /* __ASM_MTE_H */ diff --git a/arch/arm64/include/asm/numa.h b/arch/arm64/include/asm/numa.h index 626ad01e83bf..dd870390d639 100644 --- a/arch/arm64/include/asm/numa.h +++ b/arch/arm64/include/asm/numa.h @@ -25,6 +25,9 @@ const struct cpumask *cpumask_of_node(int node); /* Returns a pointer to the cpumask of CPUs on Node 'node'. */ static inline const struct cpumask *cpumask_of_node(int node) { + if (node == NUMA_NO_NODE) + return cpu_all_mask; + return node_to_cpumask_map[node]; } #endif diff --git a/arch/arm64/include/asm/page-def.h b/arch/arm64/include/asm/page-def.h index f99d48ecbeef..2403f7b4cdbf 100644 --- a/arch/arm64/include/asm/page-def.h +++ b/arch/arm64/include/asm/page-def.h @@ -11,13 +11,8 @@ #include <linux/const.h> /* PAGE_SHIFT determines the page size */ -/* CONT_SHIFT determines the number of pages which can be tracked together */ #define PAGE_SHIFT CONFIG_ARM64_PAGE_SHIFT -#define CONT_SHIFT CONFIG_ARM64_CONT_SHIFT #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) #define PAGE_MASK (~(PAGE_SIZE-1)) -#define CONT_SIZE (_AC(1, UL) << (CONT_SHIFT + PAGE_SHIFT)) -#define CONT_MASK (~(CONT_SIZE-1)) - #endif /* __ASM_PAGE_DEF_H */ diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h index c01b52add377..012cffc574e8 100644 --- a/arch/arm64/include/asm/page.h +++ b/arch/arm64/include/asm/page.h @@ -15,18 +15,25 @@ #include <linux/personality.h> /* for READ_IMPLIES_EXEC */ #include <asm/pgtable-types.h> -extern void __cpu_clear_user_page(void *p, unsigned long user); -extern void __cpu_copy_user_page(void *to, const void *from, - unsigned long user); +struct page; +struct vm_area_struct; + extern void copy_page(void *to, const void *from); extern void clear_page(void *to); +void copy_user_highpage(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma); +#define __HAVE_ARCH_COPY_USER_HIGHPAGE + +void copy_highpage(struct page *to, struct page *from); +#define __HAVE_ARCH_COPY_HIGHPAGE + #define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \ alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr) #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE -#define clear_user_page(addr,vaddr,pg) __cpu_clear_user_page(addr, vaddr) -#define copy_user_page(to,from,vaddr,pg) __cpu_copy_user_page(to, from, vaddr) +#define clear_user_page(page, vaddr, pg) clear_page(page) +#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) typedef struct page *pgtable_t; @@ -36,7 +43,7 @@ extern int pfn_valid(unsigned long); #endif /* !__ASSEMBLY__ */ -#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC +#define VM_DATA_DEFAULT_FLAGS (VM_DATA_FLAGS_TSK_EXEC | VM_MTE_ALLOWED) #include <asm-generic/getorder.h> diff --git a/arch/arm64/include/asm/pci.h b/arch/arm64/include/asm/pci.h index 70b323cf8300..b33ca260e3c9 100644 --- a/arch/arm64/include/asm/pci.h +++ b/arch/arm64/include/asm/pci.h @@ -17,6 +17,7 @@ #define pcibios_assign_all_busses() \ (pci_has_flag(PCI_REASSIGN_ALL_BUS)) +#define arch_can_pci_mmap_wc() 1 #define ARCH_GENERIC_PCI_MMAP_RESOURCE 1 extern int isa_dma_bridge_buggy; diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h index 2c2d7dbe8a02..60731f602d3e 100644 --- a/arch/arm64/include/asm/perf_event.h +++ b/arch/arm64/include/asm/perf_event.h @@ -236,6 +236,9 @@ #define ARMV8_PMU_USERENR_CR (1 << 2) /* Cycle counter can be read at EL0 */ #define ARMV8_PMU_USERENR_ER (1 << 3) /* Event counter can be read at EL0 */ +/* PMMIR_EL1.SLOTS mask */ +#define ARMV8_PMU_SLOTS_MASK 0xff + #ifdef CONFIG_PERF_EVENTS struct pt_regs; extern unsigned long perf_instruction_pointer(struct pt_regs *regs); diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index d400a4d9aee2..94b3f2ac2e9d 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -81,25 +81,15 @@ /* * Contiguous page definitions. */ -#ifdef CONFIG_ARM64_64K_PAGES -#define CONT_PTE_SHIFT (5 + PAGE_SHIFT) -#define CONT_PMD_SHIFT (5 + PMD_SHIFT) -#elif defined(CONFIG_ARM64_16K_PAGES) -#define CONT_PTE_SHIFT (7 + PAGE_SHIFT) -#define CONT_PMD_SHIFT (5 + PMD_SHIFT) -#else -#define CONT_PTE_SHIFT (4 + PAGE_SHIFT) -#define CONT_PMD_SHIFT (4 + PMD_SHIFT) -#endif - +#define CONT_PTE_SHIFT (CONFIG_ARM64_CONT_PTE_SHIFT + PAGE_SHIFT) #define CONT_PTES (1 << (CONT_PTE_SHIFT - PAGE_SHIFT)) #define CONT_PTE_SIZE (CONT_PTES * PAGE_SIZE) #define CONT_PTE_MASK (~(CONT_PTE_SIZE - 1)) + +#define CONT_PMD_SHIFT (CONFIG_ARM64_CONT_PMD_SHIFT + PMD_SHIFT) #define CONT_PMDS (1 << (CONT_PMD_SHIFT - PMD_SHIFT)) #define CONT_PMD_SIZE (CONT_PMDS * PMD_SIZE) #define CONT_PMD_MASK (~(CONT_PMD_SIZE - 1)) -/* the numerical offset of the PTE within a range of CONT_PTES */ -#define CONT_RANGE_OFFSET(addr) (((addr)>>PAGE_SHIFT)&(CONT_PTES-1)) /* * Hardware page table definitions. diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h index 4d867c6446c4..4cd0d6ca8aa1 100644 --- a/arch/arm64/include/asm/pgtable-prot.h +++ b/arch/arm64/include/asm/pgtable-prot.h @@ -19,6 +19,13 @@ #define PTE_DEVMAP (_AT(pteval_t, 1) << 57) #define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */ +/* + * This bit indicates that the entry is present i.e. pmd_page() + * still points to a valid huge page in memory even if the pmd + * has been invalidated. + */ +#define PMD_PRESENT_INVALID (_AT(pteval_t, 1) << 59) /* only when !PMD_SECT_VALID */ + #ifndef __ASSEMBLY__ #include <asm/cpufeature.h> @@ -50,6 +57,7 @@ extern bool arm64_use_ng_mappings; #define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC)) #define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT)) #define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL)) +#define PROT_NORMAL_TAGGED (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_TAGGED)) #define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE)) #define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL)) @@ -59,6 +67,7 @@ extern bool arm64_use_ng_mappings; #define _HYP_PAGE_DEFAULT _PAGE_DEFAULT #define PAGE_KERNEL __pgprot(PROT_NORMAL) +#define PAGE_KERNEL_TAGGED __pgprot(PROT_NORMAL_TAGGED) #define PAGE_KERNEL_RO __pgprot((PROT_NORMAL & ~PTE_WRITE) | PTE_RDONLY) #define PAGE_KERNEL_ROX __pgprot((PROT_NORMAL & ~(PTE_WRITE | PTE_PXN)) | PTE_RDONLY) #define PAGE_KERNEL_EXEC __pgprot(PROT_NORMAL & ~PTE_PXN) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index d5d3fbe73953..a11bf52e0c38 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -9,6 +9,7 @@ #include <asm/proc-fns.h> #include <asm/memory.h> +#include <asm/mte.h> #include <asm/pgtable-hwdef.h> #include <asm/pgtable-prot.h> #include <asm/tlbflush.h> @@ -35,11 +36,6 @@ extern struct page *vmemmap; -extern void __pte_error(const char *file, int line, unsigned long val); -extern void __pmd_error(const char *file, int line, unsigned long val); -extern void __pud_error(const char *file, int line, unsigned long val); -extern void __pgd_error(const char *file, int line, unsigned long val); - #ifdef CONFIG_TRANSPARENT_HUGEPAGE #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE @@ -51,13 +47,22 @@ extern void __pgd_error(const char *file, int line, unsigned long val); #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ /* + * Outside of a few very special situations (e.g. hibernation), we always + * use broadcast TLB invalidation instructions, therefore a spurious page + * fault on one CPU which has been handled concurrently by another CPU + * does not need to perform additional invalidation. + */ +#define flush_tlb_fix_spurious_fault(vma, address) do { } while (0) + +/* * ZERO_PAGE is a global shared page that is always zero: used * for zero-mapped memory areas etc.. */ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; #define ZERO_PAGE(vaddr) phys_to_page(__pa_symbol(empty_zero_page)) -#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte)) +#define pte_ERROR(e) \ + pr_err("%s:%d: bad pte %016llx.\n", __FILE__, __LINE__, pte_val(e)) /* * Macros to convert between a physical address and its placement in a @@ -90,6 +95,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; #define pte_user_exec(pte) (!(pte_val(pte) & PTE_UXN)) #define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT)) #define pte_devmap(pte) (!!(pte_val(pte) & PTE_DEVMAP)) +#define pte_tagged(pte) ((pte_val(pte) & PTE_ATTRINDX_MASK) == \ + PTE_ATTRINDX(MT_NORMAL_TAGGED)) #define pte_cont_addr_end(addr, end) \ ({ unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK; \ @@ -145,6 +152,18 @@ static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot) return pte; } +static inline pmd_t clear_pmd_bit(pmd_t pmd, pgprot_t prot) +{ + pmd_val(pmd) &= ~pgprot_val(prot); + return pmd; +} + +static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot) +{ + pmd_val(pmd) |= pgprot_val(prot); + return pmd; +} + static inline pte_t pte_wrprotect(pte_t pte) { pte = clear_pte_bit(pte, __pgprot(PTE_WRITE)); @@ -284,6 +303,10 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte)) __sync_icache_dcache(pte); + if (system_supports_mte() && + pte_present(pte) && pte_tagged(pte) && !pte_special(pte)) + mte_sync_tags(ptep, pte); + __check_racy_pte_update(mm, ptep, pte); set_pte(ptep, pte); @@ -363,15 +386,24 @@ static inline int pmd_protnone(pmd_t pmd) } #endif +#define pmd_present_invalid(pmd) (!!(pmd_val(pmd) & PMD_PRESENT_INVALID)) + +static inline int pmd_present(pmd_t pmd) +{ + return pte_present(pmd_pte(pmd)) || pmd_present_invalid(pmd); +} + /* * THP definitions. */ #ifdef CONFIG_TRANSPARENT_HUGEPAGE -#define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT)) +static inline int pmd_trans_huge(pmd_t pmd) +{ + return pmd_val(pmd) && pmd_present(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT); +} #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ -#define pmd_present(pmd) pte_present(pmd_pte(pmd)) #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd)) #define pmd_young(pmd) pte_young(pmd_pte(pmd)) #define pmd_valid(pmd) pte_valid(pmd_pte(pmd)) @@ -381,7 +413,14 @@ static inline int pmd_protnone(pmd_t pmd) #define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd))) #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) -#define pmd_mkinvalid(pmd) (__pmd(pmd_val(pmd) & ~PMD_SECT_VALID)) + +static inline pmd_t pmd_mkinvalid(pmd_t pmd) +{ + pmd = set_pmd_bit(pmd, __pgprot(PMD_PRESENT_INVALID)); + pmd = clear_pmd_bit(pmd, __pgprot(PMD_SECT_VALID)); + + return pmd; +} #define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd)) @@ -541,7 +580,8 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd) #if CONFIG_PGTABLE_LEVELS > 2 -#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd)) +#define pmd_ERROR(e) \ + pr_err("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e)) #define pud_none(pud) (!pud_val(pud)) #define pud_bad(pud) (!(pud_val(pud) & PUD_TABLE_BIT)) @@ -608,7 +648,8 @@ static inline unsigned long pud_page_vaddr(pud_t pud) #if CONFIG_PGTABLE_LEVELS > 3 -#define pud_ERROR(pud) __pud_error(__FILE__, __LINE__, pud_val(pud)) +#define pud_ERROR(e) \ + pr_err("%s:%d: bad pud %016llx.\n", __FILE__, __LINE__, pud_val(e)) #define p4d_none(p4d) (!p4d_val(p4d)) #define p4d_bad(p4d) (!(p4d_val(p4d) & 2)) @@ -667,15 +708,21 @@ static inline unsigned long p4d_page_vaddr(p4d_t p4d) #endif /* CONFIG_PGTABLE_LEVELS > 3 */ -#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd)) +#define pgd_ERROR(e) \ + pr_err("%s:%d: bad pgd %016llx.\n", __FILE__, __LINE__, pgd_val(e)) #define pgd_set_fixmap(addr) ((pgd_t *)set_fixmap_offset(FIX_PGD, addr)) #define pgd_clear_fixmap() clear_fixmap(FIX_PGD) static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { + /* + * Normal and Normal-Tagged are two different memory types and indices + * in MAIR_EL1. The mask below has to include PTE_ATTRINDX_MASK. + */ const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | - PTE_PROT_NONE | PTE_VALID | PTE_WRITE | PTE_GP; + PTE_PROT_NONE | PTE_VALID | PTE_WRITE | PTE_GP | + PTE_ATTRINDX_MASK; /* preserve the hardware dirty information */ if (pte_hw_dirty(pte)) pte = pte_mkdirty(pte); @@ -847,6 +894,11 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma, #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val }) +#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION +#define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) }) +#define __swp_entry_to_pmd(swp) __pmd((swp).val) +#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */ + /* * Ensure that there are not more swap files than can be encoded in the kernel * PTEs. @@ -855,6 +907,38 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma, extern int kern_addr_valid(unsigned long addr); +#ifdef CONFIG_ARM64_MTE + +#define __HAVE_ARCH_PREPARE_TO_SWAP +static inline int arch_prepare_to_swap(struct page *page) +{ + if (system_supports_mte()) + return mte_save_tags(page); + return 0; +} + +#define __HAVE_ARCH_SWAP_INVALIDATE +static inline void arch_swap_invalidate_page(int type, pgoff_t offset) +{ + if (system_supports_mte()) + mte_invalidate_tags(type, offset); +} + +static inline void arch_swap_invalidate_area(int type) +{ + if (system_supports_mte()) + mte_invalidate_tags_area(type); +} + +#define __HAVE_ARCH_SWAP_RESTORE +static inline void arch_swap_restore(swp_entry_t entry, struct page *page) +{ + if (system_supports_mte() && mte_restore_tags(entry, page)) + set_bit(PG_mte_tagged, &page->flags); +} + +#endif /* CONFIG_ARM64_MTE */ + /* * On AArch64, the cache coherency is handled via the set_pte_at() function. */ diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 240fe5e5b720..fce8cbecd6bc 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -38,6 +38,7 @@ #include <asm/pgtable-hwdef.h> #include <asm/pointer_auth.h> #include <asm/ptrace.h> +#include <asm/spectre.h> #include <asm/types.h> /* @@ -151,6 +152,10 @@ struct thread_struct { struct ptrauth_keys_user keys_user; struct ptrauth_keys_kernel keys_kernel; #endif +#ifdef CONFIG_ARM64_MTE + u64 sctlr_tcf0; + u64 gcr_user_incl; +#endif }; static inline void arch_thread_struct_whitelist(unsigned long *offset, @@ -197,40 +202,15 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc) regs->pmr_save = GIC_PRIO_IRQON; } -static inline void set_ssbs_bit(struct pt_regs *regs) -{ - regs->pstate |= PSR_SSBS_BIT; -} - -static inline void set_compat_ssbs_bit(struct pt_regs *regs) -{ - regs->pstate |= PSR_AA32_SSBS_BIT; -} - static inline void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp) { start_thread_common(regs, pc); regs->pstate = PSR_MODE_EL0t; - - if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE) - set_ssbs_bit(regs); - + spectre_v4_enable_task_mitigation(current); regs->sp = sp; } -static inline bool is_ttbr0_addr(unsigned long addr) -{ - /* entry assembly clears tags for TTBR0 addrs */ - return addr < TASK_SIZE; -} - -static inline bool is_ttbr1_addr(unsigned long addr) -{ - /* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */ - return arch_kasan_reset_tag(addr) >= PAGE_OFFSET; -} - #ifdef CONFIG_COMPAT static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp) @@ -244,13 +224,23 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc, regs->pstate |= PSR_AA32_E_BIT; #endif - if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE) - set_compat_ssbs_bit(regs); - + spectre_v4_enable_task_mitigation(current); regs->compat_sp = sp; } #endif +static inline bool is_ttbr0_addr(unsigned long addr) +{ + /* entry assembly clears tags for TTBR0 addrs */ + return addr < TASK_SIZE; +} + +static inline bool is_ttbr1_addr(unsigned long addr) +{ + /* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */ + return arch_kasan_reset_tag(addr) >= PAGE_OFFSET; +} + /* Forward declaration, a strange C thing */ struct task_struct; @@ -315,10 +305,10 @@ extern void __init minsigstksz_setup(void); #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI /* PR_{SET,GET}_TAGGED_ADDR_CTRL prctl */ -long set_tagged_addr_ctrl(unsigned long arg); -long get_tagged_addr_ctrl(void); -#define SET_TAGGED_ADDR_CTRL(arg) set_tagged_addr_ctrl(arg) -#define GET_TAGGED_ADDR_CTRL() get_tagged_addr_ctrl() +long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg); +long get_tagged_addr_ctrl(struct task_struct *task); +#define SET_TAGGED_ADDR_CTRL(arg) set_tagged_addr_ctrl(current, arg) +#define GET_TAGGED_ADDR_CTRL() get_tagged_addr_ctrl(current) #endif /* diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index 966ed30ed5f7..997cf8c8cd52 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h @@ -31,9 +31,21 @@ * interrupt disabling temporarily does not rely on IRQ priorities. */ #define GIC_PRIO_IRQON 0xe0 -#define GIC_PRIO_IRQOFF (GIC_PRIO_IRQON & ~0x80) +#define __GIC_PRIO_IRQOFF (GIC_PRIO_IRQON & ~0x80) +#define __GIC_PRIO_IRQOFF_NS 0xa0 #define GIC_PRIO_PSR_I_SET (1 << 4) +#define GIC_PRIO_IRQOFF \ + ({ \ + extern struct static_key_false gic_nonsecure_priorities;\ + u8 __prio = __GIC_PRIO_IRQOFF; \ + \ + if (static_branch_unlikely(&gic_nonsecure_priorities)) \ + __prio = __GIC_PRIO_IRQOFF_NS; \ + \ + __prio; \ + }) + /* Additional SPSR bits not exposed in the UABI */ #define PSR_MODE_THREAD_BIT (1 << 0) #define PSR_IL_BIT (1 << 20) diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h index 0eadbf933e35..2e7f529ec5a6 100644 --- a/arch/arm64/include/asm/smp.h +++ b/arch/arm64/include/asm/smp.h @@ -56,27 +56,15 @@ static inline void set_cpu_logical_map(int cpu, u64 hwid) struct seq_file; /* - * generate IPI list text - */ -extern void show_ipi_list(struct seq_file *p, int prec); - -/* - * Called from C code, this handles an IPI. - */ -extern void handle_IPI(int ipinr, struct pt_regs *regs); - -/* * Discover the set of possible CPUs and determine their * SMP operations. */ extern void smp_init_cpus(void); /* - * Provide a function to raise an IPI cross call on CPUs in callmap. + * Register IPI interrupts with the arch SMP code */ -extern void set_smp_cross_call(void (*)(const struct cpumask *, unsigned int)); - -extern void (*__smp_cross_call)(const struct cpumask *, unsigned int); +extern void set_smp_ipi_range(int ipi_base, int nr_ipi); /* * Called from the secondary holding pen, this is the secondary CPU entry point. diff --git a/arch/arm64/include/asm/spectre.h b/arch/arm64/include/asm/spectre.h new file mode 100644 index 000000000000..fcdfbce302bd --- /dev/null +++ b/arch/arm64/include/asm/spectre.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Interface for managing mitigations for Spectre vulnerabilities. + * + * Copyright (C) 2020 Google LLC + * Author: Will Deacon <will@kernel.org> + */ + +#ifndef __ASM_SPECTRE_H +#define __ASM_SPECTRE_H + +#include <asm/cpufeature.h> + +/* Watch out, ordering is important here. */ +enum mitigation_state { + SPECTRE_UNAFFECTED, + SPECTRE_MITIGATED, + SPECTRE_VULNERABLE, +}; + +struct task_struct; + +enum mitigation_state arm64_get_spectre_v2_state(void); +bool has_spectre_v2(const struct arm64_cpu_capabilities *cap, int scope); +void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused); + +enum mitigation_state arm64_get_spectre_v4_state(void); +bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope); +void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities *__unused); +void spectre_v4_enable_task_mitigation(struct task_struct *tsk); + +#endif /* __ASM_SPECTRE_H */ diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h index fc7613023c19..eb29b1fe8255 100644 --- a/arch/arm64/include/asm/stacktrace.h +++ b/arch/arm64/include/asm/stacktrace.h @@ -63,7 +63,7 @@ struct stackframe { extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame); extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame, - int (*fn)(struct stackframe *, void *), void *data); + bool (*fn)(void *, unsigned long), void *data); extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk, const char *loglvl); diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 554a7e8ecb07..d52c1b3ce589 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -91,10 +91,12 @@ #define PSTATE_PAN pstate_field(0, 4) #define PSTATE_UAO pstate_field(0, 3) #define PSTATE_SSBS pstate_field(3, 1) +#define PSTATE_TCO pstate_field(3, 4) #define SET_PSTATE_PAN(x) __emit_inst(0xd500401f | PSTATE_PAN | ((!!x) << PSTATE_Imm_shift)) #define SET_PSTATE_UAO(x) __emit_inst(0xd500401f | PSTATE_UAO | ((!!x) << PSTATE_Imm_shift)) #define SET_PSTATE_SSBS(x) __emit_inst(0xd500401f | PSTATE_SSBS | ((!!x) << PSTATE_Imm_shift)) +#define SET_PSTATE_TCO(x) __emit_inst(0xd500401f | PSTATE_TCO | ((!!x) << PSTATE_Imm_shift)) #define __SYS_BARRIER_INSN(CRm, op2, Rt) \ __emit_inst(0xd5000000 | sys_insn(0, 3, 3, (CRm), (op2)) | ((Rt) & 0x1f)) @@ -181,6 +183,8 @@ #define SYS_SCTLR_EL1 sys_reg(3, 0, 1, 0, 0) #define SYS_ACTLR_EL1 sys_reg(3, 0, 1, 0, 1) #define SYS_CPACR_EL1 sys_reg(3, 0, 1, 0, 2) +#define SYS_RGSR_EL1 sys_reg(3, 0, 1, 0, 5) +#define SYS_GCR_EL1 sys_reg(3, 0, 1, 0, 6) #define SYS_ZCR_EL1 sys_reg(3, 0, 1, 2, 0) @@ -218,6 +222,8 @@ #define SYS_ERXADDR_EL1 sys_reg(3, 0, 5, 4, 3) #define SYS_ERXMISC0_EL1 sys_reg(3, 0, 5, 5, 0) #define SYS_ERXMISC1_EL1 sys_reg(3, 0, 5, 5, 1) +#define SYS_TFSR_EL1 sys_reg(3, 0, 5, 6, 0) +#define SYS_TFSRE0_EL1 sys_reg(3, 0, 5, 6, 1) #define SYS_FAR_EL1 sys_reg(3, 0, 6, 0, 0) #define SYS_PAR_EL1 sys_reg(3, 0, 7, 4, 0) @@ -321,6 +327,8 @@ #define SYS_PMINTENSET_EL1 sys_reg(3, 0, 9, 14, 1) #define SYS_PMINTENCLR_EL1 sys_reg(3, 0, 9, 14, 2) +#define SYS_PMMIR_EL1 sys_reg(3, 0, 9, 14, 6) + #define SYS_MAIR_EL1 sys_reg(3, 0, 10, 2, 0) #define SYS_AMAIR_EL1 sys_reg(3, 0, 10, 3, 0) @@ -368,6 +376,7 @@ #define SYS_CCSIDR_EL1 sys_reg(3, 1, 0, 0, 0) #define SYS_CLIDR_EL1 sys_reg(3, 1, 0, 0, 1) +#define SYS_GMID_EL1 sys_reg(3, 1, 0, 0, 4) #define SYS_AIDR_EL1 sys_reg(3, 1, 0, 0, 7) #define SYS_CSSELR_EL1 sys_reg(3, 2, 0, 0, 0) @@ -460,6 +469,7 @@ #define SYS_ESR_EL2 sys_reg(3, 4, 5, 2, 0) #define SYS_VSESR_EL2 sys_reg(3, 4, 5, 2, 3) #define SYS_FPEXC32_EL2 sys_reg(3, 4, 5, 3, 0) +#define SYS_TFSR_EL2 sys_reg(3, 4, 5, 6, 0) #define SYS_FAR_EL2 sys_reg(3, 4, 6, 0, 0) #define SYS_VDISR_EL2 sys_reg(3, 4, 12, 1, 1) @@ -516,6 +526,7 @@ #define SYS_AFSR0_EL12 sys_reg(3, 5, 5, 1, 0) #define SYS_AFSR1_EL12 sys_reg(3, 5, 5, 1, 1) #define SYS_ESR_EL12 sys_reg(3, 5, 5, 2, 0) +#define SYS_TFSR_EL12 sys_reg(3, 5, 5, 6, 0) #define SYS_FAR_EL12 sys_reg(3, 5, 6, 0, 0) #define SYS_MAIR_EL12 sys_reg(3, 5, 10, 2, 0) #define SYS_AMAIR_EL12 sys_reg(3, 5, 10, 3, 0) @@ -531,6 +542,15 @@ /* Common SCTLR_ELx flags. */ #define SCTLR_ELx_DSSBS (BIT(44)) +#define SCTLR_ELx_ATA (BIT(43)) + +#define SCTLR_ELx_TCF_SHIFT 40 +#define SCTLR_ELx_TCF_NONE (UL(0x0) << SCTLR_ELx_TCF_SHIFT) +#define SCTLR_ELx_TCF_SYNC (UL(0x1) << SCTLR_ELx_TCF_SHIFT) +#define SCTLR_ELx_TCF_ASYNC (UL(0x2) << SCTLR_ELx_TCF_SHIFT) +#define SCTLR_ELx_TCF_MASK (UL(0x3) << SCTLR_ELx_TCF_SHIFT) + +#define SCTLR_ELx_ITFSB (BIT(37)) #define SCTLR_ELx_ENIA (BIT(31)) #define SCTLR_ELx_ENIB (BIT(30)) #define SCTLR_ELx_ENDA (BIT(27)) @@ -559,6 +579,14 @@ #endif /* SCTLR_EL1 specific flags. */ +#define SCTLR_EL1_ATA0 (BIT(42)) + +#define SCTLR_EL1_TCF0_SHIFT 38 +#define SCTLR_EL1_TCF0_NONE (UL(0x0) << SCTLR_EL1_TCF0_SHIFT) +#define SCTLR_EL1_TCF0_SYNC (UL(0x1) << SCTLR_EL1_TCF0_SHIFT) +#define SCTLR_EL1_TCF0_ASYNC (UL(0x2) << SCTLR_EL1_TCF0_SHIFT) +#define SCTLR_EL1_TCF0_MASK (UL(0x3) << SCTLR_EL1_TCF0_SHIFT) + #define SCTLR_EL1_BT1 (BIT(36)) #define SCTLR_EL1_BT0 (BIT(35)) #define SCTLR_EL1_UCI (BIT(26)) @@ -587,6 +615,7 @@ SCTLR_EL1_SA0 | SCTLR_EL1_SED | SCTLR_ELx_I |\ SCTLR_EL1_DZE | SCTLR_EL1_UCT |\ SCTLR_EL1_NTWE | SCTLR_ELx_IESB | SCTLR_EL1_SPAN |\ + SCTLR_ELx_ITFSB| SCTLR_ELx_ATA | SCTLR_EL1_ATA0 |\ ENDIAN_SET_EL1 | SCTLR_EL1_UCI | SCTLR_EL1_RES1) /* MAIR_ELx memory attributes (used by Linux) */ @@ -595,6 +624,7 @@ #define MAIR_ATTR_DEVICE_GRE UL(0x0c) #define MAIR_ATTR_NORMAL_NC UL(0x44) #define MAIR_ATTR_NORMAL_WT UL(0xbb) +#define MAIR_ATTR_NORMAL_TAGGED UL(0xf0) #define MAIR_ATTR_NORMAL UL(0xff) #define MAIR_ATTR_MASK UL(0xff) @@ -636,14 +666,22 @@ #define ID_AA64ISAR1_APA_SHIFT 4 #define ID_AA64ISAR1_DPB_SHIFT 0 -#define ID_AA64ISAR1_APA_NI 0x0 -#define ID_AA64ISAR1_APA_ARCHITECTED 0x1 -#define ID_AA64ISAR1_API_NI 0x0 -#define ID_AA64ISAR1_API_IMP_DEF 0x1 -#define ID_AA64ISAR1_GPA_NI 0x0 -#define ID_AA64ISAR1_GPA_ARCHITECTED 0x1 -#define ID_AA64ISAR1_GPI_NI 0x0 -#define ID_AA64ISAR1_GPI_IMP_DEF 0x1 +#define ID_AA64ISAR1_APA_NI 0x0 +#define ID_AA64ISAR1_APA_ARCHITECTED 0x1 +#define ID_AA64ISAR1_APA_ARCH_EPAC 0x2 +#define ID_AA64ISAR1_APA_ARCH_EPAC2 0x3 +#define ID_AA64ISAR1_APA_ARCH_EPAC2_FPAC 0x4 +#define ID_AA64ISAR1_APA_ARCH_EPAC2_FPAC_CMB 0x5 +#define ID_AA64ISAR1_API_NI 0x0 +#define ID_AA64ISAR1_API_IMP_DEF 0x1 +#define ID_AA64ISAR1_API_IMP_DEF_EPAC 0x2 +#define ID_AA64ISAR1_API_IMP_DEF_EPAC2 0x3 +#define ID_AA64ISAR1_API_IMP_DEF_EPAC2_FPAC 0x4 +#define ID_AA64ISAR1_API_IMP_DEF_EPAC2_FPAC_CMB 0x5 +#define ID_AA64ISAR1_GPA_NI 0x0 +#define ID_AA64ISAR1_GPA_ARCHITECTED 0x1 +#define ID_AA64ISAR1_GPI_NI 0x0 +#define ID_AA64ISAR1_GPI_IMP_DEF 0x1 /* id_aa64pfr0 */ #define ID_AA64PFR0_CSV3_SHIFT 60 @@ -686,6 +724,10 @@ #define ID_AA64PFR1_SSBS_PSTATE_INSNS 2 #define ID_AA64PFR1_BT_BTI 0x1 +#define ID_AA64PFR1_MTE_NI 0x0 +#define ID_AA64PFR1_MTE_EL0 0x1 +#define ID_AA64PFR1_MTE 0x2 + /* id_aa64zfr0 */ #define ID_AA64ZFR0_F64MM_SHIFT 56 #define ID_AA64ZFR0_F32MM_SHIFT 52 @@ -920,6 +962,28 @@ #define CPACR_EL1_ZEN_EL0EN (BIT(17)) /* enable EL0 access, if EL1EN set */ #define CPACR_EL1_ZEN (CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN) +/* TCR EL1 Bit Definitions */ +#define SYS_TCR_EL1_TCMA1 (BIT(58)) +#define SYS_TCR_EL1_TCMA0 (BIT(57)) + +/* GCR_EL1 Definitions */ +#define SYS_GCR_EL1_RRND (BIT(16)) +#define SYS_GCR_EL1_EXCL_MASK 0xffffUL + +/* RGSR_EL1 Definitions */ +#define SYS_RGSR_EL1_TAG_MASK 0xfUL +#define SYS_RGSR_EL1_SEED_SHIFT 8 +#define SYS_RGSR_EL1_SEED_MASK 0xffffUL + +/* GMID_EL1 field definitions */ +#define SYS_GMID_EL1_BS_SHIFT 0 +#define SYS_GMID_EL1_BS_SIZE 4 + +/* TFSR{,E0}_EL1 bit definitions */ +#define SYS_TFSR_EL1_TF0_SHIFT 0 +#define SYS_TFSR_EL1_TF1_SHIFT 1 +#define SYS_TFSR_EL1_TF0 (UL(1) << SYS_TFSR_EL1_TF0_SHIFT) +#define SYS_TFSR_EL1_TF1 (UK(2) << SYS_TFSR_EL1_TF1_SHIFT) /* Safe value for MPIDR_EL1: Bit31:RES1, Bit30:U:0, Bit24:MT:0 */ #define SYS_MPIDR_SAFE_VAL (BIT(31)) @@ -1024,6 +1088,13 @@ write_sysreg(__scs_new, sysreg); \ } while (0) +#define sysreg_clear_set_s(sysreg, clear, set) do { \ + u64 __scs_val = read_sysreg_s(sysreg); \ + u64 __scs_new = (__scs_val & ~(u64)(clear)) | (set); \ + if (__scs_new != __scs_val) \ + write_sysreg_s(__scs_new, sysreg); \ +} while (0) + #endif #endif /* __ASM_SYSREG_H */ diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index 5e784e16ee89..1fbab854a51b 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -67,6 +67,7 @@ void arch_release_task_struct(struct task_struct *tsk); #define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */ #define TIF_UPROBE 4 /* uprobe breakpoint or singlestep */ #define TIF_FSCHECK 5 /* Check FS is USER_DS on return */ +#define TIF_MTE_ASYNC_FAULT 6 /* MTE Asynchronous Tag Check Fault */ #define TIF_SYSCALL_TRACE 8 /* syscall trace active */ #define TIF_SYSCALL_AUDIT 9 /* syscall auditing */ #define TIF_SYSCALL_TRACEPOINT 10 /* syscall tracepoint for ftrace */ @@ -96,10 +97,11 @@ void arch_release_task_struct(struct task_struct *tsk); #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) #define _TIF_32BIT (1 << TIF_32BIT) #define _TIF_SVE (1 << TIF_SVE) +#define _TIF_MTE_ASYNC_FAULT (1 << TIF_MTE_ASYNC_FAULT) #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \ - _TIF_UPROBE | _TIF_FSCHECK) + _TIF_UPROBE | _TIF_FSCHECK | _TIF_MTE_ASYNC_FAULT) #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \ diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h index e042f6527981..11a465243f66 100644 --- a/arch/arm64/include/asm/topology.h +++ b/arch/arm64/include/asm/topology.h @@ -26,7 +26,9 @@ void topology_scale_freq_tick(void); #endif /* CONFIG_ARM64_AMU_EXTN */ /* Replace task scheduler's default frequency-invariant accounting */ +#define arch_set_freq_scale topology_set_freq_scale #define arch_scale_freq_capacity topology_get_freq_scale +#define arch_scale_freq_invariant topology_scale_freq_invariant /* Replace task scheduler's default cpu-invariant accounting */ #define arch_scale_cpu_capacity topology_get_cpu_scale diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h index cee5928e1b7d..d96dc2c7c09d 100644 --- a/arch/arm64/include/asm/traps.h +++ b/arch/arm64/include/asm/traps.h @@ -24,7 +24,7 @@ struct undef_hook { void register_undef_hook(struct undef_hook *hook); void unregister_undef_hook(struct undef_hook *hook); -void force_signal_inject(int signal, int code, unsigned long address); +void force_signal_inject(int signal, int code, unsigned long address, unsigned int err); void arm64_notify_segfault(unsigned long addr); void arm64_force_sig_fault(int signo, int code, void __user *addr, const char *str); void arm64_force_sig_mceerr(int code, void __user *addr, short lsb, const char *str); diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index 3b859596840d..b3b2019f8d16 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h @@ -38,7 +38,7 @@ #define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE + 5) #define __ARM_NR_COMPAT_END (__ARM_NR_COMPAT_BASE + 0x800) -#define __NR_compat_syscalls 440 +#define __NR_compat_syscalls 441 #endif #define __ARCH_WANT_SYS_CLONE diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index 734860ac7cf9..107f08e03b9f 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h @@ -53,7 +53,7 @@ __SYSCALL(__NR_lseek, compat_sys_lseek) #define __NR_getpid 20 __SYSCALL(__NR_getpid, sys_getpid) #define __NR_mount 21 -__SYSCALL(__NR_mount, compat_sys_mount) +__SYSCALL(__NR_mount, sys_mount) /* 22 was sys_umount */ __SYSCALL(22, sys_ni_syscall) #define __NR_setuid 23 @@ -301,9 +301,9 @@ __SYSCALL(__NR_flock, sys_flock) #define __NR_msync 144 __SYSCALL(__NR_msync, sys_msync) #define __NR_readv 145 -__SYSCALL(__NR_readv, compat_sys_readv) +__SYSCALL(__NR_readv, sys_readv) #define __NR_writev 146 -__SYSCALL(__NR_writev, compat_sys_writev) +__SYSCALL(__NR_writev, sys_writev) #define __NR_getsid 147 __SYSCALL(__NR_getsid, sys_getsid) #define __NR_fdatasync 148 @@ -697,7 +697,7 @@ __SYSCALL(__NR_sync_file_range2, compat_sys_aarch32_sync_file_range2) #define __NR_tee 342 __SYSCALL(__NR_tee, sys_tee) #define __NR_vmsplice 343 -__SYSCALL(__NR_vmsplice, compat_sys_vmsplice) +__SYSCALL(__NR_vmsplice, sys_vmsplice) #define __NR_move_pages 344 __SYSCALL(__NR_move_pages, compat_sys_move_pages) #define __NR_getcpu 345 @@ -763,9 +763,9 @@ __SYSCALL(__NR_sendmmsg, compat_sys_sendmmsg) #define __NR_setns 375 __SYSCALL(__NR_setns, sys_setns) #define __NR_process_vm_readv 376 -__SYSCALL(__NR_process_vm_readv, compat_sys_process_vm_readv) +__SYSCALL(__NR_process_vm_readv, sys_process_vm_readv) #define __NR_process_vm_writev 377 -__SYSCALL(__NR_process_vm_writev, compat_sys_process_vm_writev) +__SYSCALL(__NR_process_vm_writev, sys_process_vm_writev) #define __NR_kcmp 378 __SYSCALL(__NR_kcmp, sys_kcmp) #define __NR_finit_module 379 @@ -887,6 +887,8 @@ __SYSCALL(__NR_openat2, sys_openat2) __SYSCALL(__NR_pidfd_getfd, sys_pidfd_getfd) #define __NR_faccessat2 439 __SYSCALL(__NR_faccessat2, sys_faccessat2) +#define __NR_process_madvise 440 +__SYSCALL(__NR_process_madvise, sys_process_madvise) /* * Please add new compat syscalls above this comment and update diff --git a/arch/arm64/include/asm/xen/page.h b/arch/arm64/include/asm/xen/page.h index 31bbc803cecb..dffdc773221b 100644 --- a/arch/arm64/include/asm/xen/page.h +++ b/arch/arm64/include/asm/xen/page.h @@ -1 +1,7 @@ #include <xen/arm/page.h> +#include <asm/mmu.h> + +static inline bool xen_kernel_unmapped_at_usr(void) +{ + return arm64_kernel_unmapped_at_el0(); +} diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h index 912162f73529..b8f41aa234ee 100644 --- a/arch/arm64/include/uapi/asm/hwcap.h +++ b/arch/arm64/include/uapi/asm/hwcap.h @@ -74,6 +74,6 @@ #define HWCAP2_DGH (1 << 15) #define HWCAP2_RNG (1 << 16) #define HWCAP2_BTI (1 << 17) -/* reserved for HWCAP2_MTE (1 << 18) */ +#define HWCAP2_MTE (1 << 18) #endif /* _UAPI__ASM_HWCAP_H */ diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index ba85bb23f060..7d804fd0a682 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -242,6 +242,15 @@ struct kvm_vcpu_events { #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL 0 #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL 1 #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED 2 + +/* + * Only two states can be presented by the host kernel: + * - NOT_REQUIRED: the guest doesn't need to do anything + * - NOT_AVAIL: the guest isn't mitigated (it can still use SSBS if available) + * + * All the other values are deprecated. The host still accepts all + * values (they are ABI), but will narrow them to the above two. + */ #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2 KVM_REG_ARM_FW_REG(2) #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL 0 #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN 1 diff --git a/arch/arm64/include/uapi/asm/mman.h b/arch/arm64/include/uapi/asm/mman.h index 6fdd71eb644f..1e6482a838e1 100644 --- a/arch/arm64/include/uapi/asm/mman.h +++ b/arch/arm64/include/uapi/asm/mman.h @@ -5,5 +5,6 @@ #include <asm-generic/mman.h> #define PROT_BTI 0x10 /* BTI guarded page */ +#define PROT_MTE 0x20 /* Normal Tagged mapping */ #endif /* ! _UAPI__ASM_MMAN_H */ diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h index 42cbe34d95ce..758ae984ff97 100644 --- a/arch/arm64/include/uapi/asm/ptrace.h +++ b/arch/arm64/include/uapi/asm/ptrace.h @@ -51,6 +51,7 @@ #define PSR_PAN_BIT 0x00400000 #define PSR_UAO_BIT 0x00800000 #define PSR_DIT_BIT 0x01000000 +#define PSR_TCO_BIT 0x02000000 #define PSR_V_BIT 0x10000000 #define PSR_C_BIT 0x20000000 #define PSR_Z_BIT 0x40000000 @@ -75,6 +76,9 @@ /* syscall emulation path in ptrace */ #define PTRACE_SYSEMU 31 #define PTRACE_SYSEMU_SINGLESTEP 32 +/* MTE allocation tag access */ +#define PTRACE_PEEKMTETAGS 33 +#define PTRACE_POKEMTETAGS 34 #ifndef __ASSEMBLY__ diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index a561cbb91d4d..bbaf0bc4ad60 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -3,8 +3,6 @@ # Makefile for the linux kernel. # -CPPFLAGS_vmlinux.lds := -DTEXT_OFFSET=$(TEXT_OFFSET) -AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET) CFLAGS_armv8_deprecated.o := -I$(src) CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE) @@ -19,7 +17,7 @@ obj-y := debug-monitors.o entry.o irq.o fpsimd.o \ return_address.o cpuinfo.o cpu_errata.o \ cpufeature.o alternative.o cacheinfo.o \ smp.o smp_spin_table.o topology.o smccc-call.o \ - syscall.o + syscall.o proton-pack.o targets += efi-entry.o @@ -59,9 +57,9 @@ arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_CRASH_CORE) += crash_core.o obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o -obj-$(CONFIG_ARM64_SSBD) += ssbd.o obj-$(CONFIG_ARM64_PTR_AUTH) += pointer_auth.o obj-$(CONFIG_SHADOW_CALL_STACK) += scs.o +obj-$(CONFIG_ARM64_MTE) += mte.o obj-y += vdso/ probes/ obj-$(CONFIG_COMPAT_VDSO) += vdso32/ diff --git a/arch/arm64/kernel/cpu-reset.S b/arch/arm64/kernel/cpu-reset.S index 4a18055b2ff9..37721eb6f9a1 100644 --- a/arch/arm64/kernel/cpu-reset.S +++ b/arch/arm64/kernel/cpu-reset.S @@ -35,6 +35,10 @@ SYM_CODE_START(__cpu_soft_restart) mov_q x13, SCTLR_ELx_FLAGS bic x12, x12, x13 pre_disable_mmu_workaround + /* + * either disable EL1&0 translation regime or disable EL2&0 translation + * regime if HCR_EL2.E2H == 1 + */ msr sctlr_el1, x12 isb diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 560ba69e13c1..24d75af344b1 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -106,365 +106,6 @@ cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap) sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0); } -atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1); - -#include <asm/mmu_context.h> -#include <asm/cacheflush.h> - -DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); - -#ifdef CONFIG_KVM_INDIRECT_VECTORS -static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start, - const char *hyp_vecs_end) -{ - void *dst = lm_alias(__bp_harden_hyp_vecs + slot * SZ_2K); - int i; - - for (i = 0; i < SZ_2K; i += 0x80) - memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start); - - __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K); -} - -static void install_bp_hardening_cb(bp_hardening_cb_t fn, - const char *hyp_vecs_start, - const char *hyp_vecs_end) -{ - static DEFINE_RAW_SPINLOCK(bp_lock); - int cpu, slot = -1; - - /* - * detect_harden_bp_fw() passes NULL for the hyp_vecs start/end if - * we're a guest. Skip the hyp-vectors work. - */ - if (!hyp_vecs_start) { - __this_cpu_write(bp_hardening_data.fn, fn); - return; - } - - raw_spin_lock(&bp_lock); - for_each_possible_cpu(cpu) { - if (per_cpu(bp_hardening_data.fn, cpu) == fn) { - slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu); - break; - } - } - - if (slot == -1) { - slot = atomic_inc_return(&arm64_el2_vector_last_slot); - BUG_ON(slot >= BP_HARDEN_EL2_SLOTS); - __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end); - } - - __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot); - __this_cpu_write(bp_hardening_data.fn, fn); - raw_spin_unlock(&bp_lock); -} -#else -static void install_bp_hardening_cb(bp_hardening_cb_t fn, - const char *hyp_vecs_start, - const char *hyp_vecs_end) -{ - __this_cpu_write(bp_hardening_data.fn, fn); -} -#endif /* CONFIG_KVM_INDIRECT_VECTORS */ - -#include <linux/arm-smccc.h> - -static void __maybe_unused call_smc_arch_workaround_1(void) -{ - arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); -} - -static void call_hvc_arch_workaround_1(void) -{ - arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); -} - -static void qcom_link_stack_sanitization(void) -{ - u64 tmp; - - asm volatile("mov %0, x30 \n" - ".rept 16 \n" - "bl . + 4 \n" - ".endr \n" - "mov x30, %0 \n" - : "=&r" (tmp)); -} - -static bool __nospectre_v2; -static int __init parse_nospectre_v2(char *str) -{ - __nospectre_v2 = true; - return 0; -} -early_param("nospectre_v2", parse_nospectre_v2); - -/* - * -1: No workaround - * 0: No workaround required - * 1: Workaround installed - */ -static int detect_harden_bp_fw(void) -{ - bp_hardening_cb_t cb; - void *smccc_start, *smccc_end; - struct arm_smccc_res res; - u32 midr = read_cpuid_id(); - - arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, - ARM_SMCCC_ARCH_WORKAROUND_1, &res); - - switch ((int)res.a0) { - case 1: - /* Firmware says we're just fine */ - return 0; - case 0: - break; - default: - return -1; - } - - switch (arm_smccc_1_1_get_conduit()) { - case SMCCC_CONDUIT_HVC: - cb = call_hvc_arch_workaround_1; - /* This is a guest, no need to patch KVM vectors */ - smccc_start = NULL; - smccc_end = NULL; - break; - -#if IS_ENABLED(CONFIG_KVM) - case SMCCC_CONDUIT_SMC: - cb = call_smc_arch_workaround_1; - smccc_start = __smccc_workaround_1_smc; - smccc_end = __smccc_workaround_1_smc + - __SMCCC_WORKAROUND_1_SMC_SZ; - break; -#endif - - default: - return -1; - } - - if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) || - ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1)) - cb = qcom_link_stack_sanitization; - - if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) - install_bp_hardening_cb(cb, smccc_start, smccc_end); - - return 1; -} - -DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required); - -int ssbd_state __read_mostly = ARM64_SSBD_KERNEL; -static bool __ssb_safe = true; - -static const struct ssbd_options { - const char *str; - int state; -} ssbd_options[] = { - { "force-on", ARM64_SSBD_FORCE_ENABLE, }, - { "force-off", ARM64_SSBD_FORCE_DISABLE, }, - { "kernel", ARM64_SSBD_KERNEL, }, -}; - -static int __init ssbd_cfg(char *buf) -{ - int i; - - if (!buf || !buf[0]) - return -EINVAL; - - for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) { - int len = strlen(ssbd_options[i].str); - - if (strncmp(buf, ssbd_options[i].str, len)) - continue; - - ssbd_state = ssbd_options[i].state; - return 0; - } - - return -EINVAL; -} -early_param("ssbd", ssbd_cfg); - -void __init arm64_update_smccc_conduit(struct alt_instr *alt, - __le32 *origptr, __le32 *updptr, - int nr_inst) -{ - u32 insn; - - BUG_ON(nr_inst != 1); - - switch (arm_smccc_1_1_get_conduit()) { - case SMCCC_CONDUIT_HVC: - insn = aarch64_insn_get_hvc_value(); - break; - case SMCCC_CONDUIT_SMC: - insn = aarch64_insn_get_smc_value(); - break; - default: - return; - } - - *updptr = cpu_to_le32(insn); -} - -void __init arm64_enable_wa2_handling(struct alt_instr *alt, - __le32 *origptr, __le32 *updptr, - int nr_inst) -{ - BUG_ON(nr_inst != 1); - /* - * Only allow mitigation on EL1 entry/exit and guest - * ARCH_WORKAROUND_2 handling if the SSBD state allows it to - * be flipped. - */ - if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL) - *updptr = cpu_to_le32(aarch64_insn_gen_nop()); -} - -void arm64_set_ssbd_mitigation(bool state) -{ - int conduit; - - if (!IS_ENABLED(CONFIG_ARM64_SSBD)) { - pr_info_once("SSBD disabled by kernel configuration\n"); - return; - } - - if (this_cpu_has_cap(ARM64_SSBS)) { - if (state) - asm volatile(SET_PSTATE_SSBS(0)); - else - asm volatile(SET_PSTATE_SSBS(1)); - return; - } - - conduit = arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, state, - NULL); - - WARN_ON_ONCE(conduit == SMCCC_CONDUIT_NONE); -} - -static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry, - int scope) -{ - struct arm_smccc_res res; - bool required = true; - s32 val; - bool this_cpu_safe = false; - int conduit; - - WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); - - if (cpu_mitigations_off()) - ssbd_state = ARM64_SSBD_FORCE_DISABLE; - - /* delay setting __ssb_safe until we get a firmware response */ - if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list)) - this_cpu_safe = true; - - if (this_cpu_has_cap(ARM64_SSBS)) { - if (!this_cpu_safe) - __ssb_safe = false; - required = false; - goto out_printmsg; - } - - conduit = arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, - ARM_SMCCC_ARCH_WORKAROUND_2, &res); - - if (conduit == SMCCC_CONDUIT_NONE) { - ssbd_state = ARM64_SSBD_UNKNOWN; - if (!this_cpu_safe) - __ssb_safe = false; - return false; - } - - val = (s32)res.a0; - - switch (val) { - case SMCCC_RET_NOT_SUPPORTED: - ssbd_state = ARM64_SSBD_UNKNOWN; - if (!this_cpu_safe) - __ssb_safe = false; - return false; - - /* machines with mixed mitigation requirements must not return this */ - case SMCCC_RET_NOT_REQUIRED: - pr_info_once("%s mitigation not required\n", entry->desc); - ssbd_state = ARM64_SSBD_MITIGATED; - return false; - - case SMCCC_RET_SUCCESS: - __ssb_safe = false; - required = true; - break; - - case 1: /* Mitigation not required on this CPU */ - required = false; - break; - - default: - WARN_ON(1); - if (!this_cpu_safe) - __ssb_safe = false; - return false; - } - - switch (ssbd_state) { - case ARM64_SSBD_FORCE_DISABLE: - arm64_set_ssbd_mitigation(false); - required = false; - break; - - case ARM64_SSBD_KERNEL: - if (required) { - __this_cpu_write(arm64_ssbd_callback_required, 1); - arm64_set_ssbd_mitigation(true); - } - break; - - case ARM64_SSBD_FORCE_ENABLE: - arm64_set_ssbd_mitigation(true); - required = true; - break; - - default: - WARN_ON(1); - break; - } - -out_printmsg: - switch (ssbd_state) { - case ARM64_SSBD_FORCE_DISABLE: - pr_info_once("%s disabled from command-line\n", entry->desc); - break; - - case ARM64_SSBD_FORCE_ENABLE: - pr_info_once("%s forced from command-line\n", entry->desc); - break; - } - - return required; -} - -/* known invulnerable cores */ -static const struct midr_range arm64_ssb_cpus[] = { - MIDR_ALL_VERSIONS(MIDR_CORTEX_A35), - MIDR_ALL_VERSIONS(MIDR_CORTEX_A53), - MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), - MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53), - MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER), - MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER), - {}, -}; - #ifdef CONFIG_ARM64_ERRATUM_1463225 DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa); @@ -519,83 +160,6 @@ cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused) .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ CAP_MIDR_RANGE_LIST(midr_list) -/* Track overall mitigation state. We are only mitigated if all cores are ok */ -static bool __hardenbp_enab = true; -static bool __spectrev2_safe = true; - -int get_spectre_v2_workaround_state(void) -{ - if (__spectrev2_safe) - return ARM64_BP_HARDEN_NOT_REQUIRED; - - if (!__hardenbp_enab) - return ARM64_BP_HARDEN_UNKNOWN; - - return ARM64_BP_HARDEN_WA_NEEDED; -} - -/* - * List of CPUs that do not need any Spectre-v2 mitigation at all. - */ -static const struct midr_range spectre_v2_safe_list[] = { - MIDR_ALL_VERSIONS(MIDR_CORTEX_A35), - MIDR_ALL_VERSIONS(MIDR_CORTEX_A53), - MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), - MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53), - MIDR_ALL_VERSIONS(MIDR_HISI_TSV110), - MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER), - MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER), - { /* sentinel */ } -}; - -/* - * Track overall bp hardening for all heterogeneous cores in the machine. - * We are only considered "safe" if all booted cores are known safe. - */ -static bool __maybe_unused -check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope) -{ - int need_wa; - - WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); - - /* If the CPU has CSV2 set, we're safe */ - if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1), - ID_AA64PFR0_CSV2_SHIFT)) - return false; - - /* Alternatively, we have a list of unaffected CPUs */ - if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list)) - return false; - - /* Fallback to firmware detection */ - need_wa = detect_harden_bp_fw(); - if (!need_wa) - return false; - - __spectrev2_safe = false; - - if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) { - pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n"); - __hardenbp_enab = false; - return false; - } - - /* forced off */ - if (__nospectre_v2 || cpu_mitigations_off()) { - pr_info_once("spectrev2 mitigation disabled by command line option\n"); - __hardenbp_enab = false; - return false; - } - - if (need_wa < 0) { - pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n"); - __hardenbp_enab = false; - } - - return (need_wa > 0); -} - static const __maybe_unused struct midr_range tx2_family_cpus[] = { MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), @@ -887,9 +451,11 @@ const struct arm64_cpu_capabilities arm64_errata[] = { }, #endif { - .capability = ARM64_HARDEN_BRANCH_PREDICTOR, + .desc = "Spectre-v2", + .capability = ARM64_SPECTRE_V2, .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, - .matches = check_branch_predictor, + .matches = has_spectre_v2, + .cpu_enable = spectre_v2_enable_mitigation, }, #ifdef CONFIG_RANDOMIZE_BASE { @@ -899,11 +465,11 @@ const struct arm64_cpu_capabilities arm64_errata[] = { }, #endif { - .desc = "Speculative Store Bypass Disable", - .capability = ARM64_SSBD, + .desc = "Spectre-v4", + .capability = ARM64_SPECTRE_V4, .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, - .matches = has_ssbd_mitigation, - .midr_range_list = arm64_ssb_cpus, + .matches = has_spectre_v4, + .cpu_enable = spectre_v4_enable_mitigation, }, #ifdef CONFIG_ARM64_ERRATUM_1418040 { @@ -960,40 +526,3 @@ const struct arm64_cpu_capabilities arm64_errata[] = { { } }; - -ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, - char *buf) -{ - return sprintf(buf, "Mitigation: __user pointer sanitization\n"); -} - -ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, - char *buf) -{ - switch (get_spectre_v2_workaround_state()) { - case ARM64_BP_HARDEN_NOT_REQUIRED: - return sprintf(buf, "Not affected\n"); - case ARM64_BP_HARDEN_WA_NEEDED: - return sprintf(buf, "Mitigation: Branch predictor hardening\n"); - case ARM64_BP_HARDEN_UNKNOWN: - default: - return sprintf(buf, "Vulnerable\n"); - } -} - -ssize_t cpu_show_spec_store_bypass(struct device *dev, - struct device_attribute *attr, char *buf) -{ - if (__ssb_safe) - return sprintf(buf, "Not affected\n"); - - switch (ssbd_state) { - case ARM64_SSBD_KERNEL: - case ARM64_SSBD_FORCE_ENABLE: - if (IS_ENABLED(CONFIG_ARM64_SSBD)) - return sprintf(buf, - "Mitigation: Speculative Store Bypass disabled via prctl\n"); - } - - return sprintf(buf, "Vulnerable\n"); -} diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 6424584be01e..dcc165b3fc04 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -75,6 +75,7 @@ #include <asm/cpu_ops.h> #include <asm/fpsimd.h> #include <asm/mmu_context.h> +#include <asm/mte.h> #include <asm/processor.h> #include <asm/sysreg.h> #include <asm/traps.h> @@ -197,9 +198,9 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = { ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH), - FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_API_SHIFT, 4, 0), + FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_API_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH), - FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_APA_SHIFT, 4, 0), + FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_APA_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DPB_SHIFT, 4, 0), ARM64_FTR_END, }; @@ -227,7 +228,9 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = { static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = { ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_MPAMFRAC_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_RASFRAC_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_MTE), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_MTE_SHIFT, 4, ID_AA64PFR1_MTE_NI), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_BTI), FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_BT_SHIFT, 4, 0), ARM64_FTR_END, @@ -487,7 +490,7 @@ static const struct arm64_ftr_bits ftr_id_pfr1[] = { }; static const struct arm64_ftr_bits ftr_id_pfr2[] = { - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR2_SSBS_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR2_SSBS_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR2_CSV3_SHIFT, 4, 0), ARM64_FTR_END, }; @@ -1111,6 +1114,7 @@ u64 read_sanitised_ftr_reg(u32 id) return 0; return regp->sys_val; } +EXPORT_SYMBOL_GPL(read_sanitised_ftr_reg); #define read_sysreg_case(r) \ case r: return read_sysreg_s(r) @@ -1443,6 +1447,7 @@ static inline void __cpu_enable_hw_dbm(void) write_sysreg(tcr, tcr_el1); isb(); + local_flush_tlb_all(); } static bool cpu_has_broken_dbm(void) @@ -1583,48 +1588,6 @@ static void cpu_has_fwb(const struct arm64_cpu_capabilities *__unused) WARN_ON(val & (7 << 27 | 7 << 21)); } -#ifdef CONFIG_ARM64_SSBD -static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr) -{ - if (user_mode(regs)) - return 1; - - if (instr & BIT(PSTATE_Imm_shift)) - regs->pstate |= PSR_SSBS_BIT; - else - regs->pstate &= ~PSR_SSBS_BIT; - - arm64_skip_faulting_instruction(regs, 4); - return 0; -} - -static struct undef_hook ssbs_emulation_hook = { - .instr_mask = ~(1U << PSTATE_Imm_shift), - .instr_val = 0xd500401f | PSTATE_SSBS, - .fn = ssbs_emulation_handler, -}; - -static void cpu_enable_ssbs(const struct arm64_cpu_capabilities *__unused) -{ - static bool undef_hook_registered = false; - static DEFINE_RAW_SPINLOCK(hook_lock); - - raw_spin_lock(&hook_lock); - if (!undef_hook_registered) { - register_undef_hook(&ssbs_emulation_hook); - undef_hook_registered = true; - } - raw_spin_unlock(&hook_lock); - - if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) { - sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS); - arm64_set_ssbd_mitigation(false); - } else { - arm64_set_ssbd_mitigation(true); - } -} -#endif /* CONFIG_ARM64_SSBD */ - #ifdef CONFIG_ARM64_PAN static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused) { @@ -1648,11 +1611,37 @@ static void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused) #endif /* CONFIG_ARM64_RAS_EXTN */ #ifdef CONFIG_ARM64_PTR_AUTH -static bool has_address_auth(const struct arm64_cpu_capabilities *entry, - int __unused) +static bool has_address_auth_cpucap(const struct arm64_cpu_capabilities *entry, int scope) { - return __system_matches_cap(ARM64_HAS_ADDRESS_AUTH_ARCH) || - __system_matches_cap(ARM64_HAS_ADDRESS_AUTH_IMP_DEF); + int boot_val, sec_val; + + /* We don't expect to be called with SCOPE_SYSTEM */ + WARN_ON(scope == SCOPE_SYSTEM); + /* + * The ptr-auth feature levels are not intercompatible with lower + * levels. Hence we must match ptr-auth feature level of the secondary + * CPUs with that of the boot CPU. The level of boot cpu is fetched + * from the sanitised register whereas direct register read is done for + * the secondary CPUs. + * The sanitised feature state is guaranteed to match that of the + * boot CPU as a mismatched secondary CPU is parked before it gets + * a chance to update the state, with the capability. + */ + boot_val = cpuid_feature_extract_field(read_sanitised_ftr_reg(entry->sys_reg), + entry->field_pos, entry->sign); + if (scope & SCOPE_BOOT_CPU) + return boot_val >= entry->min_field_value; + /* Now check for the secondary CPUs with SCOPE_LOCAL_CPU scope */ + sec_val = cpuid_feature_extract_field(__read_sysreg_by_encoding(entry->sys_reg), + entry->field_pos, entry->sign); + return sec_val == boot_val; +} + +static bool has_address_auth_metacap(const struct arm64_cpu_capabilities *entry, + int scope) +{ + return has_address_auth_cpucap(cpu_hwcaps_ptrs[ARM64_HAS_ADDRESS_AUTH_ARCH], scope) || + has_address_auth_cpucap(cpu_hwcaps_ptrs[ARM64_HAS_ADDRESS_AUTH_IMP_DEF], scope); } static bool has_generic_auth(const struct arm64_cpu_capabilities *entry, @@ -1702,6 +1691,22 @@ static void bti_enable(const struct arm64_cpu_capabilities *__unused) } #endif /* CONFIG_ARM64_BTI */ +#ifdef CONFIG_ARM64_MTE +static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap) +{ + static bool cleared_zero_page = false; + + /* + * Clear the tags in the zero page. This needs to be done via the + * linear map which has the Tagged attribute. + */ + if (!cleared_zero_page) { + cleared_zero_page = true; + mte_clear_page_tags(lm_alias(empty_zero_page)); + } +} +#endif /* CONFIG_ARM64_MTE */ + /* Internal helper functions to match cpu capability type */ static bool cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap) @@ -1976,19 +1981,16 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .field_pos = ID_AA64ISAR0_CRC32_SHIFT, .min_field_value = 1, }, -#ifdef CONFIG_ARM64_SSBD { .desc = "Speculative Store Bypassing Safe (SSBS)", .capability = ARM64_SSBS, - .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, .matches = has_cpuid_feature, .sys_reg = SYS_ID_AA64PFR1_EL1, .field_pos = ID_AA64PFR1_SSBS_SHIFT, .sign = FTR_UNSIGNED, .min_field_value = ID_AA64PFR1_SSBS_PSTATE_ONLY, - .cpu_enable = cpu_enable_ssbs, }, -#endif #ifdef CONFIG_ARM64_CNP { .desc = "Common not Private translations", @@ -2021,7 +2023,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .sign = FTR_UNSIGNED, .field_pos = ID_AA64ISAR1_APA_SHIFT, .min_field_value = ID_AA64ISAR1_APA_ARCHITECTED, - .matches = has_cpuid_feature, + .matches = has_address_auth_cpucap, }, { .desc = "Address authentication (IMP DEF algorithm)", @@ -2031,12 +2033,12 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .sign = FTR_UNSIGNED, .field_pos = ID_AA64ISAR1_API_SHIFT, .min_field_value = ID_AA64ISAR1_API_IMP_DEF, - .matches = has_cpuid_feature, + .matches = has_address_auth_cpucap, }, { .capability = ARM64_HAS_ADDRESS_AUTH, .type = ARM64_CPUCAP_BOOT_CPU_FEATURE, - .matches = has_address_auth, + .matches = has_address_auth_metacap, }, { .desc = "Generic authentication (architected algorithm)", @@ -2121,6 +2123,19 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .sign = FTR_UNSIGNED, }, #endif +#ifdef CONFIG_ARM64_MTE + { + .desc = "Memory Tagging Extension", + .capability = ARM64_MTE, + .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, + .matches = has_cpuid_feature, + .sys_reg = SYS_ID_AA64PFR1_EL1, + .field_pos = ID_AA64PFR1_MTE_SHIFT, + .min_field_value = ID_AA64PFR1_MTE, + .sign = FTR_UNSIGNED, + .cpu_enable = cpu_enable_mte, + }, +#endif /* CONFIG_ARM64_MTE */ {}, }; @@ -2237,6 +2252,9 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_MULTI_CAP(ptr_auth_hwcap_addr_matches, CAP_HWCAP, KERNEL_HWCAP_PACA), HWCAP_MULTI_CAP(ptr_auth_hwcap_gen_matches, CAP_HWCAP, KERNEL_HWCAP_PACG), #endif +#ifdef CONFIG_ARM64_MTE + HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_MTE_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_MTE, CAP_HWCAP, KERNEL_HWCAP_MTE), +#endif /* CONFIG_ARM64_MTE */ {}, }; diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index d0076c2159e6..6a7bb3729d60 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -43,94 +43,93 @@ static const char *icache_policy_str[] = { unsigned long __icache_flags; static const char *const hwcap_str[] = { - "fp", - "asimd", - "evtstrm", - "aes", - "pmull", - "sha1", - "sha2", - "crc32", - "atomics", - "fphp", - "asimdhp", - "cpuid", - "asimdrdm", - "jscvt", - "fcma", - "lrcpc", - "dcpop", - "sha3", - "sm3", - "sm4", - "asimddp", - "sha512", - "sve", - "asimdfhm", - "dit", - "uscat", - "ilrcpc", - "flagm", - "ssbs", - "sb", - "paca", - "pacg", - "dcpodp", - "sve2", - "sveaes", - "svepmull", - "svebitperm", - "svesha3", - "svesm4", - "flagm2", - "frint", - "svei8mm", - "svef32mm", - "svef64mm", - "svebf16", - "i8mm", - "bf16", - "dgh", - "rng", - "bti", - /* reserved for "mte" */ - NULL + [KERNEL_HWCAP_FP] = "fp", + [KERNEL_HWCAP_ASIMD] = "asimd", + [KERNEL_HWCAP_EVTSTRM] = "evtstrm", + [KERNEL_HWCAP_AES] = "aes", + [KERNEL_HWCAP_PMULL] = "pmull", + [KERNEL_HWCAP_SHA1] = "sha1", + [KERNEL_HWCAP_SHA2] = "sha2", + [KERNEL_HWCAP_CRC32] = "crc32", + [KERNEL_HWCAP_ATOMICS] = "atomics", + [KERNEL_HWCAP_FPHP] = "fphp", + [KERNEL_HWCAP_ASIMDHP] = "asimdhp", + [KERNEL_HWCAP_CPUID] = "cpuid", + [KERNEL_HWCAP_ASIMDRDM] = "asimdrdm", + [KERNEL_HWCAP_JSCVT] = "jscvt", + [KERNEL_HWCAP_FCMA] = "fcma", + [KERNEL_HWCAP_LRCPC] = "lrcpc", + [KERNEL_HWCAP_DCPOP] = "dcpop", + [KERNEL_HWCAP_SHA3] = "sha3", + [KERNEL_HWCAP_SM3] = "sm3", + [KERNEL_HWCAP_SM4] = "sm4", + [KERNEL_HWCAP_ASIMDDP] = "asimddp", + [KERNEL_HWCAP_SHA512] = "sha512", + [KERNEL_HWCAP_SVE] = "sve", + [KERNEL_HWCAP_ASIMDFHM] = "asimdfhm", + [KERNEL_HWCAP_DIT] = "dit", + [KERNEL_HWCAP_USCAT] = "uscat", + [KERNEL_HWCAP_ILRCPC] = "ilrcpc", + [KERNEL_HWCAP_FLAGM] = "flagm", + [KERNEL_HWCAP_SSBS] = "ssbs", + [KERNEL_HWCAP_SB] = "sb", + [KERNEL_HWCAP_PACA] = "paca", + [KERNEL_HWCAP_PACG] = "pacg", + [KERNEL_HWCAP_DCPODP] = "dcpodp", + [KERNEL_HWCAP_SVE2] = "sve2", + [KERNEL_HWCAP_SVEAES] = "sveaes", + [KERNEL_HWCAP_SVEPMULL] = "svepmull", + [KERNEL_HWCAP_SVEBITPERM] = "svebitperm", + [KERNEL_HWCAP_SVESHA3] = "svesha3", + [KERNEL_HWCAP_SVESM4] = "svesm4", + [KERNEL_HWCAP_FLAGM2] = "flagm2", + [KERNEL_HWCAP_FRINT] = "frint", + [KERNEL_HWCAP_SVEI8MM] = "svei8mm", + [KERNEL_HWCAP_SVEF32MM] = "svef32mm", + [KERNEL_HWCAP_SVEF64MM] = "svef64mm", + [KERNEL_HWCAP_SVEBF16] = "svebf16", + [KERNEL_HWCAP_I8MM] = "i8mm", + [KERNEL_HWCAP_BF16] = "bf16", + [KERNEL_HWCAP_DGH] = "dgh", + [KERNEL_HWCAP_RNG] = "rng", + [KERNEL_HWCAP_BTI] = "bti", + [KERNEL_HWCAP_MTE] = "mte", }; #ifdef CONFIG_COMPAT +#define COMPAT_KERNEL_HWCAP(x) const_ilog2(COMPAT_HWCAP_ ## x) static const char *const compat_hwcap_str[] = { - "swp", - "half", - "thumb", - "26bit", - "fastmult", - "fpa", - "vfp", - "edsp", - "java", - "iwmmxt", - "crunch", - "thumbee", - "neon", - "vfpv3", - "vfpv3d16", - "tls", - "vfpv4", - "idiva", - "idivt", - "vfpd32", - "lpae", - "evtstrm", - NULL + [COMPAT_KERNEL_HWCAP(SWP)] = "swp", + [COMPAT_KERNEL_HWCAP(HALF)] = "half", + [COMPAT_KERNEL_HWCAP(THUMB)] = "thumb", + [COMPAT_KERNEL_HWCAP(26BIT)] = NULL, /* Not possible on arm64 */ + [COMPAT_KERNEL_HWCAP(FAST_MULT)] = "fastmult", + [COMPAT_KERNEL_HWCAP(FPA)] = NULL, /* Not possible on arm64 */ + [COMPAT_KERNEL_HWCAP(VFP)] = "vfp", + [COMPAT_KERNEL_HWCAP(EDSP)] = "edsp", + [COMPAT_KERNEL_HWCAP(JAVA)] = NULL, /* Not possible on arm64 */ + [COMPAT_KERNEL_HWCAP(IWMMXT)] = NULL, /* Not possible on arm64 */ + [COMPAT_KERNEL_HWCAP(CRUNCH)] = NULL, /* Not possible on arm64 */ + [COMPAT_KERNEL_HWCAP(THUMBEE)] = NULL, /* Not possible on arm64 */ + [COMPAT_KERNEL_HWCAP(NEON)] = "neon", + [COMPAT_KERNEL_HWCAP(VFPv3)] = "vfpv3", + [COMPAT_KERNEL_HWCAP(VFPV3D16)] = NULL, /* Not possible on arm64 */ + [COMPAT_KERNEL_HWCAP(TLS)] = "tls", + [COMPAT_KERNEL_HWCAP(VFPv4)] = "vfpv4", + [COMPAT_KERNEL_HWCAP(IDIVA)] = "idiva", + [COMPAT_KERNEL_HWCAP(IDIVT)] = "idivt", + [COMPAT_KERNEL_HWCAP(VFPD32)] = NULL, /* Not possible on arm64 */ + [COMPAT_KERNEL_HWCAP(LPAE)] = "lpae", + [COMPAT_KERNEL_HWCAP(EVTSTRM)] = "evtstrm", }; +#define COMPAT_KERNEL_HWCAP2(x) const_ilog2(COMPAT_HWCAP2_ ## x) static const char *const compat_hwcap2_str[] = { - "aes", - "pmull", - "sha1", - "sha2", - "crc32", - NULL + [COMPAT_KERNEL_HWCAP2(AES)] = "aes", + [COMPAT_KERNEL_HWCAP2(PMULL)] = "pmull", + [COMPAT_KERNEL_HWCAP2(SHA1)] = "sha1", + [COMPAT_KERNEL_HWCAP2(SHA2)] = "sha2", + [COMPAT_KERNEL_HWCAP2(CRC32)] = "crc32", }; #endif /* CONFIG_COMPAT */ @@ -166,16 +165,25 @@ static int c_show(struct seq_file *m, void *v) seq_puts(m, "Features\t:"); if (compat) { #ifdef CONFIG_COMPAT - for (j = 0; compat_hwcap_str[j]; j++) - if (compat_elf_hwcap & (1 << j)) + for (j = 0; j < ARRAY_SIZE(compat_hwcap_str); j++) { + if (compat_elf_hwcap & (1 << j)) { + /* + * Warn once if any feature should not + * have been present on arm64 platform. + */ + if (WARN_ON_ONCE(!compat_hwcap_str[j])) + continue; + seq_printf(m, " %s", compat_hwcap_str[j]); + } + } - for (j = 0; compat_hwcap2_str[j]; j++) + for (j = 0; j < ARRAY_SIZE(compat_hwcap2_str); j++) if (compat_elf_hwcap2 & (1 << j)) seq_printf(m, " %s", compat_hwcap2_str[j]); #endif /* CONFIG_COMPAT */ } else { - for (j = 0; hwcap_str[j]; j++) + for (j = 0; j < ARRAY_SIZE(hwcap_str); j++) if (cpu_have_feature(j)) seq_printf(m, " %s", hwcap_str[j]); } diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c index 7310a4f7f993..fa76151de6ff 100644 --- a/arch/arm64/kernel/debug-monitors.c +++ b/arch/arm64/kernel/debug-monitors.c @@ -384,7 +384,7 @@ void __init debug_traps_init(void) hook_debug_fault_code(DBG_ESR_EVT_HWSS, single_step_handler, SIGTRAP, TRAP_TRACE, "single-step handler"); hook_debug_fault_code(DBG_ESR_EVT_BRK, brk_handler, SIGTRAP, - TRAP_BRKPT, "ptrace BRK handler"); + TRAP_BRKPT, "BRK handler"); } /* Re-enable single step for syscall restarting. */ diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index d3be9dbf5490..43d4c329775f 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -66,6 +66,13 @@ static void notrace el1_dbg(struct pt_regs *regs, unsigned long esr) } NOKPROBE_SYMBOL(el1_dbg); +static void notrace el1_fpac(struct pt_regs *regs, unsigned long esr) +{ + local_daif_inherit(regs); + do_ptrauth_fault(regs, esr); +} +NOKPROBE_SYMBOL(el1_fpac); + asmlinkage void notrace el1_sync_handler(struct pt_regs *regs) { unsigned long esr = read_sysreg(esr_el1); @@ -92,6 +99,9 @@ asmlinkage void notrace el1_sync_handler(struct pt_regs *regs) case ESR_ELx_EC_BRK64: el1_dbg(regs, esr); break; + case ESR_ELx_EC_FPAC: + el1_fpac(regs, esr); + break; default: el1_inv(regs, esr); } @@ -227,6 +237,14 @@ static void notrace el0_svc(struct pt_regs *regs) } NOKPROBE_SYMBOL(el0_svc); +static void notrace el0_fpac(struct pt_regs *regs, unsigned long esr) +{ + user_exit_irqoff(); + local_daif_restore(DAIF_PROCCTX); + do_ptrauth_fault(regs, esr); +} +NOKPROBE_SYMBOL(el0_fpac); + asmlinkage void notrace el0_sync_handler(struct pt_regs *regs) { unsigned long esr = read_sysreg(esr_el1); @@ -272,6 +290,9 @@ asmlinkage void notrace el0_sync_handler(struct pt_regs *regs) case ESR_ELx_EC_BRK64: el0_dbg(regs, esr); break; + case ESR_ELx_EC_FPAC: + el0_fpac(regs, esr); + break; default: el0_inv(regs, esr); } diff --git a/arch/arm64/kernel/entry-fpsimd.S b/arch/arm64/kernel/entry-fpsimd.S index f880dd63ddc3..2ca395c25448 100644 --- a/arch/arm64/kernel/entry-fpsimd.S +++ b/arch/arm64/kernel/entry-fpsimd.S @@ -32,6 +32,7 @@ SYM_FUNC_START(fpsimd_load_state) SYM_FUNC_END(fpsimd_load_state) #ifdef CONFIG_ARM64_SVE + SYM_FUNC_START(sve_save_state) sve_save 0, x1, 2 ret @@ -46,4 +47,28 @@ SYM_FUNC_START(sve_get_vl) _sve_rdvl 0, 1 ret SYM_FUNC_END(sve_get_vl) + +/* + * Load SVE state from FPSIMD state. + * + * x0 = pointer to struct fpsimd_state + * x1 = VQ - 1 + * + * Each SVE vector will be loaded with the first 128-bits taken from FPSIMD + * and the rest zeroed. All the other SVE registers will be zeroed. + */ +SYM_FUNC_START(sve_load_from_fpsimd_state) + sve_load_vq x1, x2, x3 + fpsimd_restore x0, 8 + _for n, 0, 15, _sve_pfalse \n + _sve_wrffr 0 + ret +SYM_FUNC_END(sve_load_from_fpsimd_state) + +/* Zero all SVE registers but the first 128-bits of each vector */ +SYM_FUNC_START(sve_flush_live) + sve_flush + ret +SYM_FUNC_END(sve_flush_live) + #endif /* CONFIG_ARM64_SVE */ diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 55af8b504b65..f30007dff35f 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -132,9 +132,8 @@ alternative_else_nop_endif * them if required. */ .macro apply_ssbd, state, tmp1, tmp2 -#ifdef CONFIG_ARM64_SSBD -alternative_cb arm64_enable_wa2_handling - b .L__asm_ssbd_skip\@ +alternative_cb spectre_v4_patch_fw_mitigation_enable + b .L__asm_ssbd_skip\@ // Patched to NOP alternative_cb_end ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1 cbz \tmp2, .L__asm_ssbd_skip\@ @@ -142,10 +141,35 @@ alternative_cb_end tbnz \tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@ mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2 mov w1, #\state -alternative_cb arm64_update_smccc_conduit +alternative_cb spectre_v4_patch_fw_mitigation_conduit nop // Patched to SMC/HVC #0 alternative_cb_end .L__asm_ssbd_skip\@: + .endm + + /* Check for MTE asynchronous tag check faults */ + .macro check_mte_async_tcf, flgs, tmp +#ifdef CONFIG_ARM64_MTE +alternative_if_not ARM64_MTE + b 1f +alternative_else_nop_endif + mrs_s \tmp, SYS_TFSRE0_EL1 + tbz \tmp, #SYS_TFSR_EL1_TF0_SHIFT, 1f + /* Asynchronous TCF occurred for TTBR0 access, set the TI flag */ + orr \flgs, \flgs, #_TIF_MTE_ASYNC_FAULT + str \flgs, [tsk, #TSK_TI_FLAGS] + msr_s SYS_TFSRE0_EL1, xzr +1: +#endif + .endm + + /* Clear the MTE asynchronous tag check faults */ + .macro clear_mte_async_tcf +#ifdef CONFIG_ARM64_MTE +alternative_if ARM64_MTE + dsb ish + msr_s SYS_TFSRE0_EL1, xzr +alternative_else_nop_endif #endif .endm @@ -182,6 +206,8 @@ alternative_cb_end ldr x19, [tsk, #TSK_TI_FLAGS] disable_step_tsk x19, x20 + /* Check for asynchronous tag check faults in user space */ + check_mte_async_tcf x19, x22 apply_ssbd 1, x22, x23 ptrauth_keys_install_kernel tsk, x20, x22, x23 @@ -233,6 +259,13 @@ alternative_if ARM64_HAS_IRQ_PRIO_MASKING str x20, [sp, #S_PMR_SAVE] alternative_else_nop_endif + /* Re-enable tag checking (TCO set on exception entry) */ +#ifdef CONFIG_ARM64_MTE +alternative_if ARM64_MTE + SET_PSTATE_TCO(0) +alternative_else_nop_endif +#endif + /* * Registers that may be useful after this macro is invoked: * @@ -697,11 +730,9 @@ el0_irq_naked: bl trace_hardirqs_off #endif -#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR tbz x22, #55, 1f bl do_el0_irq_bp_hardening 1: -#endif irq_handler #ifdef CONFIG_TRACE_IRQFLAGS @@ -744,6 +775,8 @@ SYM_CODE_START_LOCAL(ret_to_user) and x2, x1, #_TIF_WORK_MASK cbnz x2, work_pending finish_ret_to_user: + /* Ignore asynchronous tag check faults in the uaccess routines */ + clear_mte_async_tcf enable_step_tsk x1, x2 #ifdef CONFIG_GCC_PLUGIN_STACKLEAK bl stackleak_erase diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 55c8f3ec6705..a6d688c10745 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -32,9 +32,11 @@ #include <linux/swab.h> #include <asm/esr.h> +#include <asm/exception.h> #include <asm/fpsimd.h> #include <asm/cpufeature.h> #include <asm/cputype.h> +#include <asm/neon.h> #include <asm/processor.h> #include <asm/simd.h> #include <asm/sigcontext.h> @@ -312,7 +314,7 @@ static void fpsimd_save(void) * re-enter user with corrupt state. * There's no way to recover, so kill it: */ - force_signal_inject(SIGKILL, SI_KERNEL, 0); + force_signal_inject(SIGKILL, SI_KERNEL, 0, 0); return; } @@ -928,7 +930,7 @@ void fpsimd_release_task(struct task_struct *dead_task) * the SVE access trap will be disabled the next time this task * reaches ret_to_user. * - * TIF_SVE should be clear on entry: otherwise, task_fpsimd_load() + * TIF_SVE should be clear on entry: otherwise, fpsimd_restore_current_state() * would have disabled the SVE access trap for userspace during * ret_to_user, making an SVE access trap impossible in that case. */ @@ -936,7 +938,7 @@ void do_sve_acc(unsigned int esr, struct pt_regs *regs) { /* Even if we chose not to use SVE, the hardware could still trap: */ if (unlikely(!system_supports_sve()) || WARN_ON(is_compat_task())) { - force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc); + force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); return; } diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 037421c66b14..d8d9caf02834 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -36,14 +36,10 @@ #include "efi-header.S" -#define __PHYS_OFFSET (KERNEL_START - TEXT_OFFSET) +#define __PHYS_OFFSET KERNEL_START -#if (TEXT_OFFSET & 0xfff) != 0 -#error TEXT_OFFSET must be at least 4KB aligned -#elif (PAGE_OFFSET & 0x1fffff) != 0 +#if (PAGE_OFFSET & 0x1fffff) != 0 #error PAGE_OFFSET must be at least 2MB aligned -#elif TEXT_OFFSET > 0x1fffff -#error TEXT_OFFSET must be less than 2MB #endif /* @@ -55,7 +51,7 @@ * x0 = physical address to the FDT blob. * * This code is mostly position independent so you call this at - * __pa(PAGE_OFFSET + TEXT_OFFSET). + * __pa(PAGE_OFFSET). * * Note that the callee-saved registers are used for storing variables * that are useful before the MMU is enabled. The allocations are described @@ -77,7 +73,7 @@ _head: b primary_entry // branch to kernel start, magic .long 0 // reserved #endif - le64sym _kernel_offset_le // Image load offset from start of RAM, little-endian + .quad 0 // Image load offset from start of RAM, little-endian le64sym _kernel_size_le // Effective size of kernel image, little-endian le64sym _kernel_flags_le // Informative flags, little-endian .quad 0 // reserved @@ -382,7 +378,7 @@ SYM_FUNC_START_LOCAL(__create_page_tables) * Map the kernel image (starting with PHYS_OFFSET). */ adrp x0, init_pg_dir - mov_q x5, KIMAGE_VADDR + TEXT_OFFSET // compile time __va(_text) + mov_q x5, KIMAGE_VADDR // compile time __va(_text) add x5, x5, x23 // add KASLR displacement mov x4, PTRS_PER_PGD adrp x6, _end // runtime __pa(_end) @@ -474,7 +470,7 @@ SYM_FUNC_END(__primary_switched) .pushsection ".rodata", "a" SYM_DATA_START(kimage_vaddr) - .quad _text - TEXT_OFFSET + .quad _text SYM_DATA_END(kimage_vaddr) EXPORT_SYMBOL(kimage_vaddr) .popsection diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c index 68e14152d6e9..42003774d261 100644 --- a/arch/arm64/kernel/hibernate.c +++ b/arch/arm64/kernel/hibernate.c @@ -21,7 +21,6 @@ #include <linux/sched.h> #include <linux/suspend.h> #include <linux/utsname.h> -#include <linux/version.h> #include <asm/barrier.h> #include <asm/cacheflush.h> @@ -31,6 +30,7 @@ #include <asm/kexec.h> #include <asm/memory.h> #include <asm/mmu_context.h> +#include <asm/mte.h> #include <asm/pgalloc.h> #include <asm/pgtable-hwdef.h> #include <asm/sections.h> @@ -285,6 +285,117 @@ static int create_safe_exec_page(void *src_start, size_t length, #define dcache_clean_range(start, end) __flush_dcache_area(start, (end - start)) +#ifdef CONFIG_ARM64_MTE + +static DEFINE_XARRAY(mte_pages); + +static int save_tags(struct page *page, unsigned long pfn) +{ + void *tag_storage, *ret; + + tag_storage = mte_allocate_tag_storage(); + if (!tag_storage) + return -ENOMEM; + + mte_save_page_tags(page_address(page), tag_storage); + + ret = xa_store(&mte_pages, pfn, tag_storage, GFP_KERNEL); + if (WARN(xa_is_err(ret), "Failed to store MTE tags")) { + mte_free_tag_storage(tag_storage); + return xa_err(ret); + } else if (WARN(ret, "swsusp: %s: Duplicate entry", __func__)) { + mte_free_tag_storage(ret); + } + + return 0; +} + +static void swsusp_mte_free_storage(void) +{ + XA_STATE(xa_state, &mte_pages, 0); + void *tags; + + xa_lock(&mte_pages); + xas_for_each(&xa_state, tags, ULONG_MAX) { + mte_free_tag_storage(tags); + } + xa_unlock(&mte_pages); + + xa_destroy(&mte_pages); +} + +static int swsusp_mte_save_tags(void) +{ + struct zone *zone; + unsigned long pfn, max_zone_pfn; + int ret = 0; + int n = 0; + + if (!system_supports_mte()) + return 0; + + for_each_populated_zone(zone) { + max_zone_pfn = zone_end_pfn(zone); + for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) { + struct page *page = pfn_to_online_page(pfn); + + if (!page) + continue; + + if (!test_bit(PG_mte_tagged, &page->flags)) + continue; + + ret = save_tags(page, pfn); + if (ret) { + swsusp_mte_free_storage(); + goto out; + } + + n++; + } + } + pr_info("Saved %d MTE pages\n", n); + +out: + return ret; +} + +static void swsusp_mte_restore_tags(void) +{ + XA_STATE(xa_state, &mte_pages, 0); + int n = 0; + void *tags; + + xa_lock(&mte_pages); + xas_for_each(&xa_state, tags, ULONG_MAX) { + unsigned long pfn = xa_state.xa_index; + struct page *page = pfn_to_online_page(pfn); + + mte_restore_page_tags(page_address(page), tags); + + mte_free_tag_storage(tags); + n++; + } + xa_unlock(&mte_pages); + + pr_info("Restored %d MTE pages\n", n); + + xa_destroy(&mte_pages); +} + +#else /* CONFIG_ARM64_MTE */ + +static int swsusp_mte_save_tags(void) +{ + return 0; +} + +static void swsusp_mte_restore_tags(void) +{ +} + +#endif /* CONFIG_ARM64_MTE */ + int swsusp_arch_suspend(void) { int ret = 0; @@ -302,6 +413,10 @@ int swsusp_arch_suspend(void) /* make the crash dump kernel image visible/saveable */ crash_prepare_suspend(); + ret = swsusp_mte_save_tags(); + if (ret) + return ret; + sleep_cpu = smp_processor_id(); ret = swsusp_save(); } else { @@ -315,6 +430,8 @@ int swsusp_arch_suspend(void) dcache_clean_range(__hyp_text_start, __hyp_text_end); } + swsusp_mte_restore_tags(); + /* make the crash dump kernel image protected again */ crash_post_resume(); @@ -332,11 +449,7 @@ int swsusp_arch_suspend(void) * mitigation off behind our back, let's set the state * to what we expect it to be. */ - switch (arm64_get_ssbd_state()) { - case ARM64_SSBD_FORCE_ENABLE: - case ARM64_SSBD_KERNEL: - arm64_set_ssbd_mitigation(true); - } + spectre_v4_enable_mitigation(NULL); } local_daif_restore(flags); diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h index 8982b68289b7..843ecfb16a69 100644 --- a/arch/arm64/kernel/image-vars.h +++ b/arch/arm64/kernel/image-vars.h @@ -64,12 +64,10 @@ __efistub__ctype = _ctype; #define KVM_NVHE_ALIAS(sym) __kvm_nvhe_##sym = sym; /* Alternative callbacks for init-time patching of nVHE hyp code. */ -KVM_NVHE_ALIAS(arm64_enable_wa2_handling); KVM_NVHE_ALIAS(kvm_patch_vector_branch); KVM_NVHE_ALIAS(kvm_update_va_mask); /* Global kernel state accessed by nVHE hyp code. */ -KVM_NVHE_ALIAS(arm64_ssbd_callback_required); KVM_NVHE_ALIAS(kvm_host_data); KVM_NVHE_ALIAS(kvm_vgic_global_state); @@ -101,6 +99,8 @@ KVM_NVHE_ALIAS(vgic_v3_cpuif_trap); /* Static key checked in pmr_sync(). */ #ifdef CONFIG_ARM64_PSEUDO_NMI KVM_NVHE_ALIAS(gic_pmr_sync); +/* Static key checked in GIC_PRIO_IRQOFF. */ +KVM_NVHE_ALIAS(gic_nonsecure_priorities); #endif /* EL2 exception handling */ diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h index c7d38c660372..7bc3ba897901 100644 --- a/arch/arm64/kernel/image.h +++ b/arch/arm64/kernel/image.h @@ -62,7 +62,6 @@ */ #define HEAD_SYMBOLS \ DEFINE_IMAGE_LE64(_kernel_size_le, _end - _text); \ - DEFINE_IMAGE_LE64(_kernel_offset_le, TEXT_OFFSET); \ DEFINE_IMAGE_LE64(_kernel_flags_le, __HEAD_FLAGS); #endif /* __ARM64_KERNEL_IMAGE_H */ diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index a107375005bc..6c0de2f60ea9 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -60,16 +60,10 @@ bool __kprobes aarch64_insn_is_steppable_hint(u32 insn) case AARCH64_INSN_HINT_XPACLRI: case AARCH64_INSN_HINT_PACIA_1716: case AARCH64_INSN_HINT_PACIB_1716: - case AARCH64_INSN_HINT_AUTIA_1716: - case AARCH64_INSN_HINT_AUTIB_1716: case AARCH64_INSN_HINT_PACIAZ: case AARCH64_INSN_HINT_PACIASP: case AARCH64_INSN_HINT_PACIBZ: case AARCH64_INSN_HINT_PACIBSP: - case AARCH64_INSN_HINT_AUTIAZ: - case AARCH64_INSN_HINT_AUTIASP: - case AARCH64_INSN_HINT_AUTIBZ: - case AARCH64_INSN_HINT_AUTIBSP: case AARCH64_INSN_HINT_BTI: case AARCH64_INSN_HINT_BTIC: case AARCH64_INSN_HINT_BTIJ: @@ -176,7 +170,7 @@ bool __kprobes aarch64_insn_uses_literal(u32 insn) bool __kprobes aarch64_insn_is_branch(u32 insn) { - /* b, bl, cb*, tb*, b.cond, br, blr */ + /* b, bl, cb*, tb*, ret*, b.cond, br*, blr* */ return aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn) || @@ -185,8 +179,11 @@ bool __kprobes aarch64_insn_is_branch(u32 insn) aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn) || aarch64_insn_is_ret(insn) || + aarch64_insn_is_ret_auth(insn) || aarch64_insn_is_br(insn) || + aarch64_insn_is_br_auth(insn) || aarch64_insn_is_blr(insn) || + aarch64_insn_is_blr_auth(insn) || aarch64_insn_is_bcond(insn); } diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c index 04a327ccf84d..9cf2fb87584a 100644 --- a/arch/arm64/kernel/irq.c +++ b/arch/arm64/kernel/irq.c @@ -10,10 +10,10 @@ * Copyright (C) 2012 ARM Ltd. */ -#include <linux/kernel_stat.h> #include <linux/irq.h> #include <linux/memory.h> #include <linux/smp.h> +#include <linux/hardirq.h> #include <linux/init.h> #include <linux/irqchip.h> #include <linux/kprobes.h> @@ -22,20 +22,11 @@ #include <asm/daifflags.h> #include <asm/vmap_stack.h> -unsigned long irq_err_count; - /* Only access this in an NMI enter/exit */ DEFINE_PER_CPU(struct nmi_ctx, nmi_contexts); DEFINE_PER_CPU(unsigned long *, irq_stack_ptr); -int arch_show_interrupts(struct seq_file *p, int prec) -{ - show_ipi_list(p, prec); - seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count); - return 0; -} - #ifdef CONFIG_VMAP_STACK static void init_irq_stacks(void) { diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c index 361a1143e09e..5b0e67b93cdc 100644 --- a/arch/arm64/kernel/machine_kexec_file.c +++ b/arch/arm64/kernel/machine_kexec_file.c @@ -215,8 +215,7 @@ static int prepare_elf_headers(void **addr, unsigned long *sz) phys_addr_t start, end; nr_ranges = 1; /* for exclusion of crashkernel region */ - for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE, - MEMBLOCK_NONE, &start, &end, NULL) + for_each_mem_range(i, &start, &end) nr_ranges++; cmem = kmalloc(struct_size(cmem, ranges, nr_ranges), GFP_KERNEL); @@ -225,8 +224,7 @@ static int prepare_elf_headers(void **addr, unsigned long *sz) cmem->max_nr_ranges = nr_ranges; cmem->nr_ranges = 0; - for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE, - MEMBLOCK_NONE, &start, &end, NULL) { + for_each_mem_range(i, &start, &end) { cmem->ranges[cmem->nr_ranges].start = start; cmem->ranges[cmem->nr_ranges].end = end - 1; cmem->nr_ranges++; diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c new file mode 100644 index 000000000000..52a0638ed967 --- /dev/null +++ b/arch/arm64/kernel/mte.c @@ -0,0 +1,336 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020 ARM Ltd. + */ + +#include <linux/bitops.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/prctl.h> +#include <linux/sched.h> +#include <linux/sched/mm.h> +#include <linux/string.h> +#include <linux/swap.h> +#include <linux/swapops.h> +#include <linux/thread_info.h> +#include <linux/uio.h> + +#include <asm/cpufeature.h> +#include <asm/mte.h> +#include <asm/ptrace.h> +#include <asm/sysreg.h> + +static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool check_swap) +{ + pte_t old_pte = READ_ONCE(*ptep); + + if (check_swap && is_swap_pte(old_pte)) { + swp_entry_t entry = pte_to_swp_entry(old_pte); + + if (!non_swap_entry(entry) && mte_restore_tags(entry, page)) + return; + } + + mte_clear_page_tags(page_address(page)); +} + +void mte_sync_tags(pte_t *ptep, pte_t pte) +{ + struct page *page = pte_page(pte); + long i, nr_pages = compound_nr(page); + bool check_swap = nr_pages == 1; + + /* if PG_mte_tagged is set, tags have already been initialised */ + for (i = 0; i < nr_pages; i++, page++) { + if (!test_and_set_bit(PG_mte_tagged, &page->flags)) + mte_sync_page_tags(page, ptep, check_swap); + } +} + +int memcmp_pages(struct page *page1, struct page *page2) +{ + char *addr1, *addr2; + int ret; + + addr1 = page_address(page1); + addr2 = page_address(page2); + ret = memcmp(addr1, addr2, PAGE_SIZE); + + if (!system_supports_mte() || ret) + return ret; + + /* + * If the page content is identical but at least one of the pages is + * tagged, return non-zero to avoid KSM merging. If only one of the + * pages is tagged, set_pte_at() may zero or change the tags of the + * other page via mte_sync_tags(). + */ + if (test_bit(PG_mte_tagged, &page1->flags) || + test_bit(PG_mte_tagged, &page2->flags)) + return addr1 != addr2; + + return ret; +} + +static void update_sctlr_el1_tcf0(u64 tcf0) +{ + /* ISB required for the kernel uaccess routines */ + sysreg_clear_set(sctlr_el1, SCTLR_EL1_TCF0_MASK, tcf0); + isb(); +} + +static void set_sctlr_el1_tcf0(u64 tcf0) +{ + /* + * mte_thread_switch() checks current->thread.sctlr_tcf0 as an + * optimisation. Disable preemption so that it does not see + * the variable update before the SCTLR_EL1.TCF0 one. + */ + preempt_disable(); + current->thread.sctlr_tcf0 = tcf0; + update_sctlr_el1_tcf0(tcf0); + preempt_enable(); +} + +static void update_gcr_el1_excl(u64 incl) +{ + u64 excl = ~incl & SYS_GCR_EL1_EXCL_MASK; + + /* + * Note that 'incl' is an include mask (controlled by the user via + * prctl()) while GCR_EL1 accepts an exclude mask. + * No need for ISB since this only affects EL0 currently, implicit + * with ERET. + */ + sysreg_clear_set_s(SYS_GCR_EL1, SYS_GCR_EL1_EXCL_MASK, excl); +} + +static void set_gcr_el1_excl(u64 incl) +{ + current->thread.gcr_user_incl = incl; + update_gcr_el1_excl(incl); +} + +void flush_mte_state(void) +{ + if (!system_supports_mte()) + return; + + /* clear any pending asynchronous tag fault */ + dsb(ish); + write_sysreg_s(0, SYS_TFSRE0_EL1); + clear_thread_flag(TIF_MTE_ASYNC_FAULT); + /* disable tag checking */ + set_sctlr_el1_tcf0(SCTLR_EL1_TCF0_NONE); + /* reset tag generation mask */ + set_gcr_el1_excl(0); +} + +void mte_thread_switch(struct task_struct *next) +{ + if (!system_supports_mte()) + return; + + /* avoid expensive SCTLR_EL1 accesses if no change */ + if (current->thread.sctlr_tcf0 != next->thread.sctlr_tcf0) + update_sctlr_el1_tcf0(next->thread.sctlr_tcf0); + update_gcr_el1_excl(next->thread.gcr_user_incl); +} + +void mte_suspend_exit(void) +{ + if (!system_supports_mte()) + return; + + update_gcr_el1_excl(current->thread.gcr_user_incl); +} + +long set_mte_ctrl(struct task_struct *task, unsigned long arg) +{ + u64 tcf0; + u64 gcr_incl = (arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT; + + if (!system_supports_mte()) + return 0; + + switch (arg & PR_MTE_TCF_MASK) { + case PR_MTE_TCF_NONE: + tcf0 = SCTLR_EL1_TCF0_NONE; + break; + case PR_MTE_TCF_SYNC: + tcf0 = SCTLR_EL1_TCF0_SYNC; + break; + case PR_MTE_TCF_ASYNC: + tcf0 = SCTLR_EL1_TCF0_ASYNC; + break; + default: + return -EINVAL; + } + + if (task != current) { + task->thread.sctlr_tcf0 = tcf0; + task->thread.gcr_user_incl = gcr_incl; + } else { + set_sctlr_el1_tcf0(tcf0); + set_gcr_el1_excl(gcr_incl); + } + + return 0; +} + +long get_mte_ctrl(struct task_struct *task) +{ + unsigned long ret; + + if (!system_supports_mte()) + return 0; + + ret = task->thread.gcr_user_incl << PR_MTE_TAG_SHIFT; + + switch (task->thread.sctlr_tcf0) { + case SCTLR_EL1_TCF0_NONE: + return PR_MTE_TCF_NONE; + case SCTLR_EL1_TCF0_SYNC: + ret |= PR_MTE_TCF_SYNC; + break; + case SCTLR_EL1_TCF0_ASYNC: + ret |= PR_MTE_TCF_ASYNC; + break; + } + + return ret; +} + +/* + * Access MTE tags in another process' address space as given in mm. Update + * the number of tags copied. Return 0 if any tags copied, error otherwise. + * Inspired by __access_remote_vm(). + */ +static int __access_remote_tags(struct mm_struct *mm, unsigned long addr, + struct iovec *kiov, unsigned int gup_flags) +{ + struct vm_area_struct *vma; + void __user *buf = kiov->iov_base; + size_t len = kiov->iov_len; + int ret; + int write = gup_flags & FOLL_WRITE; + + if (!access_ok(buf, len)) + return -EFAULT; + + if (mmap_read_lock_killable(mm)) + return -EIO; + + while (len) { + unsigned long tags, offset; + void *maddr; + struct page *page = NULL; + + ret = get_user_pages_remote(mm, addr, 1, gup_flags, &page, + &vma, NULL); + if (ret <= 0) + break; + + /* + * Only copy tags if the page has been mapped as PROT_MTE + * (PG_mte_tagged set). Otherwise the tags are not valid and + * not accessible to user. Moreover, an mprotect(PROT_MTE) + * would cause the existing tags to be cleared if the page + * was never mapped with PROT_MTE. + */ + if (!test_bit(PG_mte_tagged, &page->flags)) { + ret = -EOPNOTSUPP; + put_page(page); + break; + } + + /* limit access to the end of the page */ + offset = offset_in_page(addr); + tags = min(len, (PAGE_SIZE - offset) / MTE_GRANULE_SIZE); + + maddr = page_address(page); + if (write) { + tags = mte_copy_tags_from_user(maddr + offset, buf, tags); + set_page_dirty_lock(page); + } else { + tags = mte_copy_tags_to_user(buf, maddr + offset, tags); + } + put_page(page); + + /* error accessing the tracer's buffer */ + if (!tags) + break; + + len -= tags; + buf += tags; + addr += tags * MTE_GRANULE_SIZE; + } + mmap_read_unlock(mm); + + /* return an error if no tags copied */ + kiov->iov_len = buf - kiov->iov_base; + if (!kiov->iov_len) { + /* check for error accessing the tracee's address space */ + if (ret <= 0) + return -EIO; + else + return -EFAULT; + } + + return 0; +} + +/* + * Copy MTE tags in another process' address space at 'addr' to/from tracer's + * iovec buffer. Return 0 on success. Inspired by ptrace_access_vm(). + */ +static int access_remote_tags(struct task_struct *tsk, unsigned long addr, + struct iovec *kiov, unsigned int gup_flags) +{ + struct mm_struct *mm; + int ret; + + mm = get_task_mm(tsk); + if (!mm) + return -EPERM; + + if (!tsk->ptrace || (current != tsk->parent) || + ((get_dumpable(mm) != SUID_DUMP_USER) && + !ptracer_capable(tsk, mm->user_ns))) { + mmput(mm); + return -EPERM; + } + + ret = __access_remote_tags(mm, addr, kiov, gup_flags); + mmput(mm); + + return ret; +} + +int mte_ptrace_copy_tags(struct task_struct *child, long request, + unsigned long addr, unsigned long data) +{ + int ret; + struct iovec kiov; + struct iovec __user *uiov = (void __user *)data; + unsigned int gup_flags = FOLL_FORCE; + + if (!system_supports_mte()) + return -EIO; + + if (get_user(kiov.iov_base, &uiov->iov_base) || + get_user(kiov.iov_len, &uiov->iov_len)) + return -EFAULT; + + if (request == PTRACE_POKEMTETAGS) + gup_flags |= FOLL_WRITE; + + /* align addr to the MTE tag granule */ + addr &= MTE_GRANULE_MASK; + + ret = access_remote_tags(child, addr, &kiov, gup_flags); + if (!ret) + ret = put_user(kiov.iov_len, &uiov->iov_len); + + return ret; +} diff --git a/arch/arm64/kernel/perf_callchain.c b/arch/arm64/kernel/perf_callchain.c index b0e03e052dd1..88ff471b0bce 100644 --- a/arch/arm64/kernel/perf_callchain.c +++ b/arch/arm64/kernel/perf_callchain.c @@ -137,11 +137,11 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry, * whist unwinding the stackframe and is like a subroutine return so we use * the PC. */ -static int callchain_trace(struct stackframe *frame, void *data) +static bool callchain_trace(void *data, unsigned long pc) { struct perf_callchain_entry_ctx *entry = data; - perf_callchain_store(entry, frame->pc); - return 0; + perf_callchain_store(entry, pc); + return true; } void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 462f9a9cc44b..3605f77ad4df 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -69,6 +69,9 @@ static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL, [C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB, + [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD, + [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_LL_CACHE_RD, + [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_BR_PRED, [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED, }; @@ -302,13 +305,33 @@ static struct attribute_group armv8_pmuv3_format_attr_group = { .attrs = armv8_pmuv3_format_attrs, }; +static ssize_t slots_show(struct device *dev, struct device_attribute *attr, + char *page) +{ + struct pmu *pmu = dev_get_drvdata(dev); + struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu); + u32 slots = cpu_pmu->reg_pmmir & ARMV8_PMU_SLOTS_MASK; + + return snprintf(page, PAGE_SIZE, "0x%08x\n", slots); +} + +static DEVICE_ATTR_RO(slots); + +static struct attribute *armv8_pmuv3_caps_attrs[] = { + &dev_attr_slots.attr, + NULL, +}; + +static struct attribute_group armv8_pmuv3_caps_attr_group = { + .name = "caps", + .attrs = armv8_pmuv3_caps_attrs, +}; + /* * Perf Events' indices */ #define ARMV8_IDX_CYCLE_COUNTER 0 #define ARMV8_IDX_COUNTER0 1 -#define ARMV8_IDX_COUNTER_LAST(cpu_pmu) \ - (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1) /* @@ -348,6 +371,73 @@ static inline bool armv8pmu_event_is_chained(struct perf_event *event) #define ARMV8_IDX_TO_COUNTER(x) \ (((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK) +/* + * This code is really good + */ + +#define PMEVN_CASE(n, case_macro) \ + case n: case_macro(n); break + +#define PMEVN_SWITCH(x, case_macro) \ + do { \ + switch (x) { \ + PMEVN_CASE(0, case_macro); \ + PMEVN_CASE(1, case_macro); \ + PMEVN_CASE(2, case_macro); \ + PMEVN_CASE(3, case_macro); \ + PMEVN_CASE(4, case_macro); \ + PMEVN_CASE(5, case_macro); \ + PMEVN_CASE(6, case_macro); \ + PMEVN_CASE(7, case_macro); \ + PMEVN_CASE(8, case_macro); \ + PMEVN_CASE(9, case_macro); \ + PMEVN_CASE(10, case_macro); \ + PMEVN_CASE(11, case_macro); \ + PMEVN_CASE(12, case_macro); \ + PMEVN_CASE(13, case_macro); \ + PMEVN_CASE(14, case_macro); \ + PMEVN_CASE(15, case_macro); \ + PMEVN_CASE(16, case_macro); \ + PMEVN_CASE(17, case_macro); \ + PMEVN_CASE(18, case_macro); \ + PMEVN_CASE(19, case_macro); \ + PMEVN_CASE(20, case_macro); \ + PMEVN_CASE(21, case_macro); \ + PMEVN_CASE(22, case_macro); \ + PMEVN_CASE(23, case_macro); \ + PMEVN_CASE(24, case_macro); \ + PMEVN_CASE(25, case_macro); \ + PMEVN_CASE(26, case_macro); \ + PMEVN_CASE(27, case_macro); \ + PMEVN_CASE(28, case_macro); \ + PMEVN_CASE(29, case_macro); \ + PMEVN_CASE(30, case_macro); \ + default: WARN(1, "Invalid PMEV* index\n"); \ + } \ + } while (0) + +#define RETURN_READ_PMEVCNTRN(n) \ + return read_sysreg(pmevcntr##n##_el0) +static unsigned long read_pmevcntrn(int n) +{ + PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN); + return 0; +} + +#define WRITE_PMEVCNTRN(n) \ + write_sysreg(val, pmevcntr##n##_el0) +static void write_pmevcntrn(int n, unsigned long val) +{ + PMEVN_SWITCH(n, WRITE_PMEVCNTRN); +} + +#define WRITE_PMEVTYPERN(n) \ + write_sysreg(val, pmevtyper##n##_el0) +static void write_pmevtypern(int n, unsigned long val) +{ + PMEVN_SWITCH(n, WRITE_PMEVTYPERN); +} + static inline u32 armv8pmu_pmcr_read(void) { return read_sysreg(pmcr_el0); @@ -365,28 +455,16 @@ static inline int armv8pmu_has_overflowed(u32 pmovsr) return pmovsr & ARMV8_PMU_OVERFLOWED_MASK; } -static inline int armv8pmu_counter_valid(struct arm_pmu *cpu_pmu, int idx) -{ - return idx >= ARMV8_IDX_CYCLE_COUNTER && - idx <= ARMV8_IDX_COUNTER_LAST(cpu_pmu); -} - static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx) { return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx)); } -static inline void armv8pmu_select_counter(int idx) +static inline u32 armv8pmu_read_evcntr(int idx) { u32 counter = ARMV8_IDX_TO_COUNTER(idx); - write_sysreg(counter, pmselr_el0); - isb(); -} -static inline u64 armv8pmu_read_evcntr(int idx) -{ - armv8pmu_select_counter(idx); - return read_sysreg(pmxevcntr_el0); + return read_pmevcntrn(counter); } static inline u64 armv8pmu_read_hw_counter(struct perf_event *event) @@ -440,15 +518,11 @@ static u64 armv8pmu_unbias_long_counter(struct perf_event *event, u64 value) static u64 armv8pmu_read_counter(struct perf_event *event) { - struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; u64 value = 0; - if (!armv8pmu_counter_valid(cpu_pmu, idx)) - pr_err("CPU%u reading wrong counter %d\n", - smp_processor_id(), idx); - else if (idx == ARMV8_IDX_CYCLE_COUNTER) + if (idx == ARMV8_IDX_CYCLE_COUNTER) value = read_sysreg(pmccntr_el0); else value = armv8pmu_read_hw_counter(event); @@ -458,8 +532,9 @@ static u64 armv8pmu_read_counter(struct perf_event *event) static inline void armv8pmu_write_evcntr(int idx, u64 value) { - armv8pmu_select_counter(idx); - write_sysreg(value, pmxevcntr_el0); + u32 counter = ARMV8_IDX_TO_COUNTER(idx); + + write_pmevcntrn(counter, value); } static inline void armv8pmu_write_hw_counter(struct perf_event *event, @@ -477,16 +552,12 @@ static inline void armv8pmu_write_hw_counter(struct perf_event *event, static void armv8pmu_write_counter(struct perf_event *event, u64 value) { - struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; value = armv8pmu_bias_long_counter(event, value); - if (!armv8pmu_counter_valid(cpu_pmu, idx)) - pr_err("CPU%u writing wrong counter %d\n", - smp_processor_id(), idx); - else if (idx == ARMV8_IDX_CYCLE_COUNTER) + if (idx == ARMV8_IDX_CYCLE_COUNTER) write_sysreg(value, pmccntr_el0); else armv8pmu_write_hw_counter(event, value); @@ -494,9 +565,10 @@ static void armv8pmu_write_counter(struct perf_event *event, u64 value) static inline void armv8pmu_write_evtype(int idx, u32 val) { - armv8pmu_select_counter(idx); + u32 counter = ARMV8_IDX_TO_COUNTER(idx); + val &= ARMV8_PMU_EVTYPE_MASK; - write_sysreg(val, pmxevtyper_el0); + write_pmevtypern(counter, val); } static inline void armv8pmu_write_event_type(struct perf_event *event) @@ -516,7 +588,10 @@ static inline void armv8pmu_write_event_type(struct perf_event *event) armv8pmu_write_evtype(idx - 1, hwc->config_base); armv8pmu_write_evtype(idx, chain_evt); } else { - armv8pmu_write_evtype(idx, hwc->config_base); + if (idx == ARMV8_IDX_CYCLE_COUNTER) + write_sysreg(hwc->config_base, pmccfiltr_el0); + else + armv8pmu_write_evtype(idx, hwc->config_base); } } @@ -532,6 +607,11 @@ static u32 armv8pmu_event_cnten_mask(struct perf_event *event) static inline void armv8pmu_enable_counter(u32 mask) { + /* + * Make sure event configuration register writes are visible before we + * enable the counter. + * */ + isb(); write_sysreg(mask, pmcntenset_el0); } @@ -550,6 +630,11 @@ static inline void armv8pmu_enable_event_counter(struct perf_event *event) static inline void armv8pmu_disable_counter(u32 mask) { write_sysreg(mask, pmcntenclr_el0); + /* + * Make sure the effects of disabling the counter are visible before we + * start configuring the event. + */ + isb(); } static inline void armv8pmu_disable_event_counter(struct perf_event *event) @@ -606,15 +691,10 @@ static inline u32 armv8pmu_getreset_flags(void) static void armv8pmu_enable_event(struct perf_event *event) { - unsigned long flags; - struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); - /* * Enable counter and interrupt, and set the counter to count * the event that we're interested in. */ - raw_spin_lock_irqsave(&events->pmu_lock, flags); /* * Disable counter @@ -622,7 +702,7 @@ static void armv8pmu_enable_event(struct perf_event *event) armv8pmu_disable_event_counter(event); /* - * Set event (if destined for PMNx counters). + * Set event. */ armv8pmu_write_event_type(event); @@ -635,21 +715,10 @@ static void armv8pmu_enable_event(struct perf_event *event) * Enable counter */ armv8pmu_enable_event_counter(event); - - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void armv8pmu_disable_event(struct perf_event *event) { - unsigned long flags; - struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); - - /* - * Disable counter and interrupt - */ - raw_spin_lock_irqsave(&events->pmu_lock, flags); - /* * Disable counter */ @@ -659,30 +728,18 @@ static void armv8pmu_disable_event(struct perf_event *event) * Disable interrupt for this counter */ armv8pmu_disable_event_irq(event); - - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void armv8pmu_start(struct arm_pmu *cpu_pmu) { - unsigned long flags; - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); - - raw_spin_lock_irqsave(&events->pmu_lock, flags); /* Enable all counters */ armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E); - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void armv8pmu_stop(struct arm_pmu *cpu_pmu) { - unsigned long flags; - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); - - raw_spin_lock_irqsave(&events->pmu_lock, flags); /* Disable all counters */ armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E); - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu) @@ -735,20 +792,16 @@ static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu) if (!armpmu_event_set_period(event)) continue; + /* + * Perf event overflow will queue the processing of the event as + * an irq_work which will be taken care of in the handling of + * IPI_IRQ_WORK. + */ if (perf_event_overflow(event, &data, regs)) cpu_pmu->disable(event); } armv8pmu_start(cpu_pmu); - /* - * Handle the pending perf events. - * - * Note: this call *must* be run with interrupts disabled. For - * platforms that can have the PMU interrupts raised as an NMI, this - * will not work. - */ - irq_work_run(); - return IRQ_HANDLED; } @@ -997,6 +1050,12 @@ static void __armv8pmu_probe_pmu(void *info) bitmap_from_arr32(cpu_pmu->pmceid_ext_bitmap, pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS); + + /* store PMMIR_EL1 register for sysfs */ + if (pmuver >= ID_AA64DFR0_PMUVER_8_4 && (pmceid_raw[1] & BIT(31))) + cpu_pmu->reg_pmmir = read_cpuid(PMMIR_EL1); + else + cpu_pmu->reg_pmmir = 0; } static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu) @@ -1019,7 +1078,8 @@ static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu) static int armv8_pmu_init(struct arm_pmu *cpu_pmu, char *name, int (*map_event)(struct perf_event *event), const struct attribute_group *events, - const struct attribute_group *format) + const struct attribute_group *format, + const struct attribute_group *caps) { int ret = armv8pmu_probe_pmu(cpu_pmu); if (ret) @@ -1044,104 +1104,112 @@ static int armv8_pmu_init(struct arm_pmu *cpu_pmu, char *name, events : &armv8_pmuv3_events_attr_group; cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = format ? format : &armv8_pmuv3_format_attr_group; + cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_CAPS] = caps ? + caps : &armv8_pmuv3_caps_attr_group; return 0; } +static int armv8_pmu_init_nogroups(struct arm_pmu *cpu_pmu, char *name, + int (*map_event)(struct perf_event *event)) +{ + return armv8_pmu_init(cpu_pmu, name, map_event, NULL, NULL, NULL); +} + static int armv8_pmuv3_init(struct arm_pmu *cpu_pmu) { - return armv8_pmu_init(cpu_pmu, "armv8_pmuv3", - armv8_pmuv3_map_event, NULL, NULL); + return armv8_pmu_init_nogroups(cpu_pmu, "armv8_pmuv3", + armv8_pmuv3_map_event); } static int armv8_a34_pmu_init(struct arm_pmu *cpu_pmu) { - return armv8_pmu_init(cpu_pmu, "armv8_cortex_a34", - armv8_pmuv3_map_event, NULL, NULL); + return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a34", + armv8_pmuv3_map_event); } static int armv8_a35_pmu_init(struct arm_pmu *cpu_pmu) { - return armv8_pmu_init(cpu_pmu, "armv8_cortex_a35", - armv8_a53_map_event, NULL, NULL); + return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a35", + armv8_a53_map_event); } static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu) { - return armv8_pmu_init(cpu_pmu, "armv8_cortex_a53", - armv8_a53_map_event, NULL, NULL); + return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a53", + armv8_a53_map_event); } static int armv8_a55_pmu_init(struct arm_pmu *cpu_pmu) { - return armv8_pmu_init(cpu_pmu, "armv8_cortex_a55", - armv8_pmuv3_map_event, NULL, NULL); + return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a55", + armv8_pmuv3_map_event); } static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu) { - return armv8_pmu_init(cpu_pmu, "armv8_cortex_a57", - armv8_a57_map_event, NULL, NULL); + return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a57", + armv8_a57_map_event); } static int armv8_a65_pmu_init(struct arm_pmu *cpu_pmu) { - return armv8_pmu_init(cpu_pmu, "armv8_cortex_a65", - armv8_pmuv3_map_event, NULL, NULL); + return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a65", + armv8_pmuv3_map_event); } static int armv8_a72_pmu_init(struct arm_pmu *cpu_pmu) { - return armv8_pmu_init(cpu_pmu, "armv8_cortex_a72", - armv8_a57_map_event, NULL, NULL); + return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a72", + armv8_a57_map_event); } static int armv8_a73_pmu_init(struct arm_pmu *cpu_pmu) { - return armv8_pmu_init(cpu_pmu, "armv8_cortex_a73", - armv8_a73_map_event, NULL, NULL); + return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a73", + armv8_a73_map_event); } static int armv8_a75_pmu_init(struct arm_pmu *cpu_pmu) { - return armv8_pmu_init(cpu_pmu, "armv8_cortex_a75", - armv8_pmuv3_map_event, NULL, NULL); + return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a75", + armv8_pmuv3_map_event); } static int armv8_a76_pmu_init(struct arm_pmu *cpu_pmu) { - return armv8_pmu_init(cpu_pmu, "armv8_cortex_a76", - armv8_pmuv3_map_event, NULL, NULL); + return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a76", + armv8_pmuv3_map_event); } static int armv8_a77_pmu_init(struct arm_pmu *cpu_pmu) { - return armv8_pmu_init(cpu_pmu, "armv8_cortex_a77", - armv8_pmuv3_map_event, NULL, NULL); + return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a77", + armv8_pmuv3_map_event); } static int armv8_e1_pmu_init(struct arm_pmu *cpu_pmu) { - return armv8_pmu_init(cpu_pmu, "armv8_neoverse_e1", - armv8_pmuv3_map_event, NULL, NULL); + return armv8_pmu_init_nogroups(cpu_pmu, "armv8_neoverse_e1", + armv8_pmuv3_map_event); } static int armv8_n1_pmu_init(struct arm_pmu *cpu_pmu) { - return armv8_pmu_init(cpu_pmu, "armv8_neoverse_n1", - armv8_pmuv3_map_event, NULL, NULL); + return armv8_pmu_init_nogroups(cpu_pmu, "armv8_neoverse_n1", + armv8_pmuv3_map_event); } static int armv8_thunder_pmu_init(struct arm_pmu *cpu_pmu) { - return armv8_pmu_init(cpu_pmu, "armv8_cavium_thunder", - armv8_thunder_map_event, NULL, NULL); + return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cavium_thunder", + armv8_thunder_map_event); } static int armv8_vulcan_pmu_init(struct arm_pmu *cpu_pmu) { - return armv8_pmu_init(cpu_pmu, "armv8_brcm_vulcan", - armv8_vulcan_map_event, NULL, NULL); + return armv8_pmu_init_nogroups(cpu_pmu, "armv8_brcm_vulcan", + armv8_vulcan_map_event); } static const struct of_device_id armv8_pmu_of_device_ids[] = { diff --git a/arch/arm64/kernel/perf_regs.c b/arch/arm64/kernel/perf_regs.c index 666b225aeb3a..94e8718e7229 100644 --- a/arch/arm64/kernel/perf_regs.c +++ b/arch/arm64/kernel/perf_regs.c @@ -16,7 +16,7 @@ u64 perf_reg_value(struct pt_regs *regs, int idx) /* * Our handling of compat tasks (PERF_SAMPLE_REGS_ABI_32) is weird, but - * we're stuck with it for ABI compatability reasons. + * we're stuck with it for ABI compatibility reasons. * * For a 32-bit consumer inspecting a 32-bit task, then it will look at * the first 16 registers (see arch/arm/include/uapi/asm/perf_regs.h). diff --git a/arch/arm64/kernel/probes/decode-insn.c b/arch/arm64/kernel/probes/decode-insn.c index 263d5fba4c8a..104101f633b1 100644 --- a/arch/arm64/kernel/probes/decode-insn.c +++ b/arch/arm64/kernel/probes/decode-insn.c @@ -29,7 +29,8 @@ static bool __kprobes aarch64_insn_is_steppable(u32 insn) aarch64_insn_is_msr_imm(insn) || aarch64_insn_is_msr_reg(insn) || aarch64_insn_is_exception(insn) || - aarch64_insn_is_eret(insn)) + aarch64_insn_is_eret(insn) || + aarch64_insn_is_eret_auth(insn)) return false; /* @@ -42,8 +43,10 @@ static bool __kprobes aarch64_insn_is_steppable(u32 insn) != AARCH64_INSN_SPCLREG_DAIF; /* - * The HINT instruction is is problematic when single-stepping, - * except for the NOP case. + * The HINT instruction is steppable only if it is in whitelist + * and the rest of other such instructions are blocked for + * single stepping as they may cause exception or other + * unintended behaviour. */ if (aarch64_insn_is_hint(insn)) return aarch64_insn_is_steppable_hint(insn); diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index 5290f17a4d80..deba738142ed 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c @@ -464,87 +464,15 @@ int __init arch_populate_kprobe_blacklist(void) void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs) { - struct kretprobe_instance *ri = NULL; - struct hlist_head *head, empty_rp; - struct hlist_node *tmp; - unsigned long flags, orig_ret_address = 0; - unsigned long trampoline_address = - (unsigned long)&kretprobe_trampoline; - kprobe_opcode_t *correct_ret_addr = NULL; - - INIT_HLIST_HEAD(&empty_rp); - kretprobe_hash_lock(current, &head, &flags); - - /* - * It is possible to have multiple instances associated with a given - * task either because multiple functions in the call path have - * return probes installed on them, and/or more than one - * return probe was registered for a target function. - * - * We can handle this because: - * - instances are always pushed into the head of the list - * - when multiple return probes are registered for the same - * function, the (chronologically) first instance's ret_addr - * will be the real return address, and all the rest will - * point to kretprobe_trampoline. - */ - hlist_for_each_entry_safe(ri, tmp, head, hlist) { - if (ri->task != current) - /* another task is sharing our hash bucket */ - continue; - - orig_ret_address = (unsigned long)ri->ret_addr; - - if (orig_ret_address != trampoline_address) - /* - * This is the real return address. Any other - * instances associated with this task are for - * other calls deeper on the call stack - */ - break; - } - - kretprobe_assert(ri, orig_ret_address, trampoline_address); - - correct_ret_addr = ri->ret_addr; - hlist_for_each_entry_safe(ri, tmp, head, hlist) { - if (ri->task != current) - /* another task is sharing our hash bucket */ - continue; - - orig_ret_address = (unsigned long)ri->ret_addr; - if (ri->rp && ri->rp->handler) { - __this_cpu_write(current_kprobe, &ri->rp->kp); - get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; - ri->ret_addr = correct_ret_addr; - ri->rp->handler(ri, regs); - __this_cpu_write(current_kprobe, NULL); - } - - recycle_rp_inst(ri, &empty_rp); - - if (orig_ret_address != trampoline_address) - /* - * This is the real return address. Any other - * instances associated with this task are for - * other calls deeper on the call stack - */ - break; - } - - kretprobe_hash_unlock(current, &flags); - - hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { - hlist_del(&ri->hlist); - kfree(ri); - } - return (void *)orig_ret_address; + return (void *)kretprobe_trampoline_handler(regs, &kretprobe_trampoline, + (void *)kernel_stack_pointer(regs)); } void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { ri->ret_addr = (kprobe_opcode_t *)regs->regs[30]; + ri->fp = (void *)kernel_stack_pointer(regs); /* replace return addr (x30) with trampoline */ regs->regs[30] = (long)&kretprobe_trampoline; diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index f1804496b935..4784011cecac 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -21,6 +21,7 @@ #include <linux/lockdep.h> #include <linux/mman.h> #include <linux/mm.h> +#include <linux/nospec.h> #include <linux/stddef.h> #include <linux/sysctl.h> #include <linux/unistd.h> @@ -52,6 +53,7 @@ #include <asm/exec.h> #include <asm/fpsimd.h> #include <asm/mmu_context.h> +#include <asm/mte.h> #include <asm/processor.h> #include <asm/pointer_auth.h> #include <asm/stacktrace.h> @@ -239,7 +241,7 @@ static void print_pstate(struct pt_regs *regs) const char *btype_str = btypes[(pstate & PSR_BTYPE_MASK) >> PSR_BTYPE_SHIFT]; - printk("pstate: %08llx (%c%c%c%c %c%c%c%c %cPAN %cUAO BTYPE=%s)\n", + printk("pstate: %08llx (%c%c%c%c %c%c%c%c %cPAN %cUAO %cTCO BTYPE=%s)\n", pstate, pstate & PSR_N_BIT ? 'N' : 'n', pstate & PSR_Z_BIT ? 'Z' : 'z', @@ -251,6 +253,7 @@ static void print_pstate(struct pt_regs *regs) pstate & PSR_F_BIT ? 'F' : 'f', pstate & PSR_PAN_BIT ? '+' : '-', pstate & PSR_UAO_BIT ? '+' : '-', + pstate & PSR_TCO_BIT ? '+' : '-', btype_str); } } @@ -336,6 +339,7 @@ void flush_thread(void) tls_thread_flush(); flush_ptrace_hw_breakpoint(current); flush_tagged_addr_state(); + flush_mte_state(); } void release_thread(struct task_struct *dead_task) @@ -368,6 +372,9 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) dst->thread.sve_state = NULL; clear_tsk_thread_flag(dst, TIF_SVE); + /* clear any pending asynchronous tag fault raised by the parent */ + clear_tsk_thread_flag(dst, TIF_MTE_ASYNC_FAULT); + return 0; } @@ -421,8 +428,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, cpus_have_const_cap(ARM64_HAS_UAO)) childregs->pstate |= PSR_UAO_BIT; - if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) - set_ssbs_bit(childregs); + spectre_v4_enable_task_mitigation(p); if (system_uses_irq_prio_masking()) childregs->pmr_save = GIC_PRIO_IRQON; @@ -472,8 +478,6 @@ void uao_thread_switch(struct task_struct *next) */ static void ssbs_thread_switch(struct task_struct *next) { - struct pt_regs *regs = task_pt_regs(next); - /* * Nothing to do for kernel threads, but 'regs' may be junk * (e.g. idle task) so check the flags and bail early. @@ -485,18 +489,10 @@ static void ssbs_thread_switch(struct task_struct *next) * If all CPUs implement the SSBS extension, then we just need to * context-switch the PSTATE field. */ - if (cpu_have_feature(cpu_feature(SSBS))) + if (cpus_have_const_cap(ARM64_SSBS)) return; - /* If the mitigation is enabled, then we leave SSBS clear. */ - if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) || - test_tsk_thread_flag(next, TIF_SSBD)) - return; - - if (compat_user_mode(regs)) - set_compat_ssbs_bit(regs); - else if (user_mode(regs)) - set_ssbs_bit(regs); + spectre_v4_enable_task_mitigation(next); } /* @@ -571,6 +567,13 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, */ dsb(ish); + /* + * MTE thread switching must happen after the DSB above to ensure that + * any asynchronous tag check faults have been logged in the TFSR*_EL1 + * registers. + */ + mte_thread_switch(next); + /* the actual thread switch */ last = cpu_switch_to(prev, next); @@ -620,6 +623,11 @@ void arch_setup_new_exec(void) current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0; ptrauth_thread_init_user(current); + + if (task_spec_ssb_noexec(current)) { + arch_prctl_spec_ctrl_set(current, PR_SPEC_STORE_BYPASS, + PR_SPEC_ENABLE); + } } #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI @@ -628,11 +636,18 @@ void arch_setup_new_exec(void) */ static unsigned int tagged_addr_disabled; -long set_tagged_addr_ctrl(unsigned long arg) +long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg) { - if (is_compat_task()) + unsigned long valid_mask = PR_TAGGED_ADDR_ENABLE; + struct thread_info *ti = task_thread_info(task); + + if (is_compat_thread(ti)) return -EINVAL; - if (arg & ~PR_TAGGED_ADDR_ENABLE) + + if (system_supports_mte()) + valid_mask |= PR_MTE_TCF_MASK | PR_MTE_TAG_MASK; + + if (arg & ~valid_mask) return -EINVAL; /* @@ -642,20 +657,28 @@ long set_tagged_addr_ctrl(unsigned long arg) if (arg & PR_TAGGED_ADDR_ENABLE && tagged_addr_disabled) return -EINVAL; - update_thread_flag(TIF_TAGGED_ADDR, arg & PR_TAGGED_ADDR_ENABLE); + if (set_mte_ctrl(task, arg) != 0) + return -EINVAL; + + update_ti_thread_flag(ti, TIF_TAGGED_ADDR, arg & PR_TAGGED_ADDR_ENABLE); return 0; } -long get_tagged_addr_ctrl(void) +long get_tagged_addr_ctrl(struct task_struct *task) { - if (is_compat_task()) + long ret = 0; + struct thread_info *ti = task_thread_info(task); + + if (is_compat_thread(ti)) return -EINVAL; - if (test_thread_flag(TIF_TAGGED_ADDR)) - return PR_TAGGED_ADDR_ENABLE; + if (test_ti_thread_flag(ti, TIF_TAGGED_ADDR)) + ret = PR_TAGGED_ADDR_ENABLE; - return 0; + ret |= get_mte_ctrl(task); + + return ret; } /* diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c new file mode 100644 index 000000000000..68b710f1b43f --- /dev/null +++ b/arch/arm64/kernel/proton-pack.c @@ -0,0 +1,792 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Handle detection, reporting and mitigation of Spectre v1, v2 and v4, as + * detailed at: + * + * https://developer.arm.com/support/arm-security-updates/speculative-processor-vulnerability + * + * This code was originally written hastily under an awful lot of stress and so + * aspects of it are somewhat hacky. Unfortunately, changing anything in here + * instantly makes me feel ill. Thanks, Jann. Thann. + * + * Copyright (C) 2018 ARM Ltd, All Rights Reserved. + * Copyright (C) 2020 Google LLC + * + * "If there's something strange in your neighbourhood, who you gonna call?" + * + * Authors: Will Deacon <will@kernel.org> and Marc Zyngier <maz@kernel.org> + */ + +#include <linux/arm-smccc.h> +#include <linux/cpu.h> +#include <linux/device.h> +#include <linux/nospec.h> +#include <linux/prctl.h> +#include <linux/sched/task_stack.h> + +#include <asm/spectre.h> +#include <asm/traps.h> + +/* + * We try to ensure that the mitigation state can never change as the result of + * onlining a late CPU. + */ +static void update_mitigation_state(enum mitigation_state *oldp, + enum mitigation_state new) +{ + enum mitigation_state state; + + do { + state = READ_ONCE(*oldp); + if (new <= state) + break; + + /* Userspace almost certainly can't deal with this. */ + if (WARN_ON(system_capabilities_finalized())) + break; + } while (cmpxchg_relaxed(oldp, state, new) != state); +} + +/* + * Spectre v1. + * + * The kernel can't protect userspace for this one: it's each person for + * themselves. Advertise what we're doing and be done with it. + */ +ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "Mitigation: __user pointer sanitization\n"); +} + +/* + * Spectre v2. + * + * This one sucks. A CPU is either: + * + * - Mitigated in hardware and advertised by ID_AA64PFR0_EL1.CSV2. + * - Mitigated in hardware and listed in our "safe list". + * - Mitigated in software by firmware. + * - Mitigated in software by a CPU-specific dance in the kernel. + * - Vulnerable. + * + * It's not unlikely for different CPUs in a big.LITTLE system to fall into + * different camps. + */ +static enum mitigation_state spectre_v2_state; + +static bool __read_mostly __nospectre_v2; +static int __init parse_spectre_v2_param(char *str) +{ + __nospectre_v2 = true; + return 0; +} +early_param("nospectre_v2", parse_spectre_v2_param); + +static bool spectre_v2_mitigations_off(void) +{ + bool ret = __nospectre_v2 || cpu_mitigations_off(); + + if (ret) + pr_info_once("spectre-v2 mitigation disabled by command line option\n"); + + return ret; +} + +ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, + char *buf) +{ + switch (spectre_v2_state) { + case SPECTRE_UNAFFECTED: + return sprintf(buf, "Not affected\n"); + case SPECTRE_MITIGATED: + return sprintf(buf, "Mitigation: Branch predictor hardening\n"); + case SPECTRE_VULNERABLE: + fallthrough; + default: + return sprintf(buf, "Vulnerable\n"); + } +} + +static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void) +{ + u64 pfr0; + static const struct midr_range spectre_v2_safe_list[] = { + MIDR_ALL_VERSIONS(MIDR_CORTEX_A35), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A53), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), + MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53), + MIDR_ALL_VERSIONS(MIDR_HISI_TSV110), + MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER), + MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER), + { /* sentinel */ } + }; + + /* If the CPU has CSV2 set, we're safe */ + pfr0 = read_cpuid(ID_AA64PFR0_EL1); + if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT)) + return SPECTRE_UNAFFECTED; + + /* Alternatively, we have a list of unaffected CPUs */ + if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list)) + return SPECTRE_UNAFFECTED; + + return SPECTRE_VULNERABLE; +} + +#define SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED (1) + +static enum mitigation_state spectre_v2_get_cpu_fw_mitigation_state(void) +{ + int ret; + struct arm_smccc_res res; + + arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, + ARM_SMCCC_ARCH_WORKAROUND_1, &res); + + ret = res.a0; + switch (ret) { + case SMCCC_RET_SUCCESS: + return SPECTRE_MITIGATED; + case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED: + return SPECTRE_UNAFFECTED; + default: + fallthrough; + case SMCCC_RET_NOT_SUPPORTED: + return SPECTRE_VULNERABLE; + } +} + +bool has_spectre_v2(const struct arm64_cpu_capabilities *entry, int scope) +{ + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); + + if (spectre_v2_get_cpu_hw_mitigation_state() == SPECTRE_UNAFFECTED) + return false; + + if (spectre_v2_get_cpu_fw_mitigation_state() == SPECTRE_UNAFFECTED) + return false; + + return true; +} + +DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); + +enum mitigation_state arm64_get_spectre_v2_state(void) +{ + return spectre_v2_state; +} + +#ifdef CONFIG_KVM +#include <asm/cacheflush.h> +#include <asm/kvm_asm.h> + +atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1); + +static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start, + const char *hyp_vecs_end) +{ + void *dst = lm_alias(__bp_harden_hyp_vecs + slot * SZ_2K); + int i; + + for (i = 0; i < SZ_2K; i += 0x80) + memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start); + + __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K); +} + +static void install_bp_hardening_cb(bp_hardening_cb_t fn) +{ + static DEFINE_RAW_SPINLOCK(bp_lock); + int cpu, slot = -1; + const char *hyp_vecs_start = __smccc_workaround_1_smc; + const char *hyp_vecs_end = __smccc_workaround_1_smc + + __SMCCC_WORKAROUND_1_SMC_SZ; + + /* + * detect_harden_bp_fw() passes NULL for the hyp_vecs start/end if + * we're a guest. Skip the hyp-vectors work. + */ + if (!is_hyp_mode_available()) { + __this_cpu_write(bp_hardening_data.fn, fn); + return; + } + + raw_spin_lock(&bp_lock); + for_each_possible_cpu(cpu) { + if (per_cpu(bp_hardening_data.fn, cpu) == fn) { + slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu); + break; + } + } + + if (slot == -1) { + slot = atomic_inc_return(&arm64_el2_vector_last_slot); + BUG_ON(slot >= BP_HARDEN_EL2_SLOTS); + __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end); + } + + __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot); + __this_cpu_write(bp_hardening_data.fn, fn); + raw_spin_unlock(&bp_lock); +} +#else +static void install_bp_hardening_cb(bp_hardening_cb_t fn) +{ + __this_cpu_write(bp_hardening_data.fn, fn); +} +#endif /* CONFIG_KVM */ + +static void call_smc_arch_workaround_1(void) +{ + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); +} + +static void call_hvc_arch_workaround_1(void) +{ + arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); +} + +static void qcom_link_stack_sanitisation(void) +{ + u64 tmp; + + asm volatile("mov %0, x30 \n" + ".rept 16 \n" + "bl . + 4 \n" + ".endr \n" + "mov x30, %0 \n" + : "=&r" (tmp)); +} + +static enum mitigation_state spectre_v2_enable_fw_mitigation(void) +{ + bp_hardening_cb_t cb; + enum mitigation_state state; + + state = spectre_v2_get_cpu_fw_mitigation_state(); + if (state != SPECTRE_MITIGATED) + return state; + + if (spectre_v2_mitigations_off()) + return SPECTRE_VULNERABLE; + + switch (arm_smccc_1_1_get_conduit()) { + case SMCCC_CONDUIT_HVC: + cb = call_hvc_arch_workaround_1; + break; + + case SMCCC_CONDUIT_SMC: + cb = call_smc_arch_workaround_1; + break; + + default: + return SPECTRE_VULNERABLE; + } + + install_bp_hardening_cb(cb); + return SPECTRE_MITIGATED; +} + +static enum mitigation_state spectre_v2_enable_sw_mitigation(void) +{ + u32 midr; + + if (spectre_v2_mitigations_off()) + return SPECTRE_VULNERABLE; + + midr = read_cpuid_id(); + if (((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR) && + ((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR_V1)) + return SPECTRE_VULNERABLE; + + install_bp_hardening_cb(qcom_link_stack_sanitisation); + return SPECTRE_MITIGATED; +} + +void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused) +{ + enum mitigation_state state; + + WARN_ON(preemptible()); + + state = spectre_v2_get_cpu_hw_mitigation_state(); + if (state == SPECTRE_VULNERABLE) + state = spectre_v2_enable_fw_mitigation(); + if (state == SPECTRE_VULNERABLE) + state = spectre_v2_enable_sw_mitigation(); + + update_mitigation_state(&spectre_v2_state, state); +} + +/* + * Spectre v4. + * + * If you thought Spectre v2 was nasty, wait until you see this mess. A CPU is + * either: + * + * - Mitigated in hardware and listed in our "safe list". + * - Mitigated in hardware via PSTATE.SSBS. + * - Mitigated in software by firmware (sometimes referred to as SSBD). + * + * Wait, that doesn't sound so bad, does it? Keep reading... + * + * A major source of headaches is that the software mitigation is enabled both + * on a per-task basis, but can also be forced on for the kernel, necessitating + * both context-switch *and* entry/exit hooks. To make it even worse, some CPUs + * allow EL0 to toggle SSBS directly, which can end up with the prctl() state + * being stale when re-entering the kernel. The usual big.LITTLE caveats apply, + * so you can have systems that have both firmware and SSBS mitigations. This + * means we actually have to reject late onlining of CPUs with mitigations if + * all of the currently onlined CPUs are safelisted, as the mitigation tends to + * be opt-in for userspace. Yes, really, the cure is worse than the disease. + * + * The only good part is that if the firmware mitigation is present, then it is + * present for all CPUs, meaning we don't have to worry about late onlining of a + * vulnerable CPU if one of the boot CPUs is using the firmware mitigation. + * + * Give me a VAX-11/780 any day of the week... + */ +static enum mitigation_state spectre_v4_state; + +/* This is the per-cpu state tracking whether we need to talk to firmware */ +DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required); + +enum spectre_v4_policy { + SPECTRE_V4_POLICY_MITIGATION_DYNAMIC, + SPECTRE_V4_POLICY_MITIGATION_ENABLED, + SPECTRE_V4_POLICY_MITIGATION_DISABLED, +}; + +static enum spectre_v4_policy __read_mostly __spectre_v4_policy; + +static const struct spectre_v4_param { + const char *str; + enum spectre_v4_policy policy; +} spectre_v4_params[] = { + { "force-on", SPECTRE_V4_POLICY_MITIGATION_ENABLED, }, + { "force-off", SPECTRE_V4_POLICY_MITIGATION_DISABLED, }, + { "kernel", SPECTRE_V4_POLICY_MITIGATION_DYNAMIC, }, +}; +static int __init parse_spectre_v4_param(char *str) +{ + int i; + + if (!str || !str[0]) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(spectre_v4_params); i++) { + const struct spectre_v4_param *param = &spectre_v4_params[i]; + + if (strncmp(str, param->str, strlen(param->str))) + continue; + + __spectre_v4_policy = param->policy; + return 0; + } + + return -EINVAL; +} +early_param("ssbd", parse_spectre_v4_param); + +/* + * Because this was all written in a rush by people working in different silos, + * we've ended up with multiple command line options to control the same thing. + * Wrap these up in some helpers, which prefer disabling the mitigation if faced + * with contradictory parameters. The mitigation is always either "off", + * "dynamic" or "on". + */ +static bool spectre_v4_mitigations_off(void) +{ + bool ret = cpu_mitigations_off() || + __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DISABLED; + + if (ret) + pr_info_once("spectre-v4 mitigation disabled by command-line option\n"); + + return ret; +} + +/* Do we need to toggle the mitigation state on entry to/exit from the kernel? */ +static bool spectre_v4_mitigations_dynamic(void) +{ + return !spectre_v4_mitigations_off() && + __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DYNAMIC; +} + +static bool spectre_v4_mitigations_on(void) +{ + return !spectre_v4_mitigations_off() && + __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_ENABLED; +} + +ssize_t cpu_show_spec_store_bypass(struct device *dev, + struct device_attribute *attr, char *buf) +{ + switch (spectre_v4_state) { + case SPECTRE_UNAFFECTED: + return sprintf(buf, "Not affected\n"); + case SPECTRE_MITIGATED: + return sprintf(buf, "Mitigation: Speculative Store Bypass disabled via prctl\n"); + case SPECTRE_VULNERABLE: + fallthrough; + default: + return sprintf(buf, "Vulnerable\n"); + } +} + +enum mitigation_state arm64_get_spectre_v4_state(void) +{ + return spectre_v4_state; +} + +static enum mitigation_state spectre_v4_get_cpu_hw_mitigation_state(void) +{ + static const struct midr_range spectre_v4_safe_list[] = { + MIDR_ALL_VERSIONS(MIDR_CORTEX_A35), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A53), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), + MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53), + MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER), + MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER), + { /* sentinel */ }, + }; + + if (is_midr_in_range_list(read_cpuid_id(), spectre_v4_safe_list)) + return SPECTRE_UNAFFECTED; + + /* CPU features are detected first */ + if (this_cpu_has_cap(ARM64_SSBS)) + return SPECTRE_MITIGATED; + + return SPECTRE_VULNERABLE; +} + +static enum mitigation_state spectre_v4_get_cpu_fw_mitigation_state(void) +{ + int ret; + struct arm_smccc_res res; + + arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, + ARM_SMCCC_ARCH_WORKAROUND_2, &res); + + ret = res.a0; + switch (ret) { + case SMCCC_RET_SUCCESS: + return SPECTRE_MITIGATED; + case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED: + fallthrough; + case SMCCC_RET_NOT_REQUIRED: + return SPECTRE_UNAFFECTED; + default: + fallthrough; + case SMCCC_RET_NOT_SUPPORTED: + return SPECTRE_VULNERABLE; + } +} + +bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope) +{ + enum mitigation_state state; + + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); + + state = spectre_v4_get_cpu_hw_mitigation_state(); + if (state == SPECTRE_VULNERABLE) + state = spectre_v4_get_cpu_fw_mitigation_state(); + + return state != SPECTRE_UNAFFECTED; +} + +static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr) +{ + if (user_mode(regs)) + return 1; + + if (instr & BIT(PSTATE_Imm_shift)) + regs->pstate |= PSR_SSBS_BIT; + else + regs->pstate &= ~PSR_SSBS_BIT; + + arm64_skip_faulting_instruction(regs, 4); + return 0; +} + +static struct undef_hook ssbs_emulation_hook = { + .instr_mask = ~(1U << PSTATE_Imm_shift), + .instr_val = 0xd500401f | PSTATE_SSBS, + .fn = ssbs_emulation_handler, +}; + +static enum mitigation_state spectre_v4_enable_hw_mitigation(void) +{ + static bool undef_hook_registered = false; + static DEFINE_RAW_SPINLOCK(hook_lock); + enum mitigation_state state; + + /* + * If the system is mitigated but this CPU doesn't have SSBS, then + * we must be on the safelist and there's nothing more to do. + */ + state = spectre_v4_get_cpu_hw_mitigation_state(); + if (state != SPECTRE_MITIGATED || !this_cpu_has_cap(ARM64_SSBS)) + return state; + + raw_spin_lock(&hook_lock); + if (!undef_hook_registered) { + register_undef_hook(&ssbs_emulation_hook); + undef_hook_registered = true; + } + raw_spin_unlock(&hook_lock); + + if (spectre_v4_mitigations_off()) { + sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS); + asm volatile(SET_PSTATE_SSBS(1)); + return SPECTRE_VULNERABLE; + } + + /* SCTLR_EL1.DSSBS was initialised to 0 during boot */ + asm volatile(SET_PSTATE_SSBS(0)); + return SPECTRE_MITIGATED; +} + +/* + * Patch a branch over the Spectre-v4 mitigation code with a NOP so that + * we fallthrough and check whether firmware needs to be called on this CPU. + */ +void __init spectre_v4_patch_fw_mitigation_enable(struct alt_instr *alt, + __le32 *origptr, + __le32 *updptr, int nr_inst) +{ + BUG_ON(nr_inst != 1); /* Branch -> NOP */ + + if (spectre_v4_mitigations_off()) + return; + + if (cpus_have_final_cap(ARM64_SSBS)) + return; + + if (spectre_v4_mitigations_dynamic()) + *updptr = cpu_to_le32(aarch64_insn_gen_nop()); +} + +/* + * Patch a NOP in the Spectre-v4 mitigation code with an SMC/HVC instruction + * to call into firmware to adjust the mitigation state. + */ +void __init spectre_v4_patch_fw_mitigation_conduit(struct alt_instr *alt, + __le32 *origptr, + __le32 *updptr, int nr_inst) +{ + u32 insn; + + BUG_ON(nr_inst != 1); /* NOP -> HVC/SMC */ + + switch (arm_smccc_1_1_get_conduit()) { + case SMCCC_CONDUIT_HVC: + insn = aarch64_insn_get_hvc_value(); + break; + case SMCCC_CONDUIT_SMC: + insn = aarch64_insn_get_smc_value(); + break; + default: + return; + } + + *updptr = cpu_to_le32(insn); +} + +static enum mitigation_state spectre_v4_enable_fw_mitigation(void) +{ + enum mitigation_state state; + + state = spectre_v4_get_cpu_fw_mitigation_state(); + if (state != SPECTRE_MITIGATED) + return state; + + if (spectre_v4_mitigations_off()) { + arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, false, NULL); + return SPECTRE_VULNERABLE; + } + + arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, true, NULL); + + if (spectre_v4_mitigations_dynamic()) + __this_cpu_write(arm64_ssbd_callback_required, 1); + + return SPECTRE_MITIGATED; +} + +void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities *__unused) +{ + enum mitigation_state state; + + WARN_ON(preemptible()); + + state = spectre_v4_enable_hw_mitigation(); + if (state == SPECTRE_VULNERABLE) + state = spectre_v4_enable_fw_mitigation(); + + update_mitigation_state(&spectre_v4_state, state); +} + +static void __update_pstate_ssbs(struct pt_regs *regs, bool state) +{ + u64 bit = compat_user_mode(regs) ? PSR_AA32_SSBS_BIT : PSR_SSBS_BIT; + + if (state) + regs->pstate |= bit; + else + regs->pstate &= ~bit; +} + +void spectre_v4_enable_task_mitigation(struct task_struct *tsk) +{ + struct pt_regs *regs = task_pt_regs(tsk); + bool ssbs = false, kthread = tsk->flags & PF_KTHREAD; + + if (spectre_v4_mitigations_off()) + ssbs = true; + else if (spectre_v4_mitigations_dynamic() && !kthread) + ssbs = !test_tsk_thread_flag(tsk, TIF_SSBD); + + __update_pstate_ssbs(regs, ssbs); +} + +/* + * The Spectre-v4 mitigation can be controlled via a prctl() from userspace. + * This is interesting because the "speculation disabled" behaviour can be + * configured so that it is preserved across exec(), which means that the + * prctl() may be necessary even when PSTATE.SSBS can be toggled directly + * from userspace. + */ +static void ssbd_prctl_enable_mitigation(struct task_struct *task) +{ + task_clear_spec_ssb_noexec(task); + task_set_spec_ssb_disable(task); + set_tsk_thread_flag(task, TIF_SSBD); +} + +static void ssbd_prctl_disable_mitigation(struct task_struct *task) +{ + task_clear_spec_ssb_noexec(task); + task_clear_spec_ssb_disable(task); + clear_tsk_thread_flag(task, TIF_SSBD); +} + +static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl) +{ + switch (ctrl) { + case PR_SPEC_ENABLE: + /* Enable speculation: disable mitigation */ + /* + * Force disabled speculation prevents it from being + * re-enabled. + */ + if (task_spec_ssb_force_disable(task)) + return -EPERM; + + /* + * If the mitigation is forced on, then speculation is forced + * off and we again prevent it from being re-enabled. + */ + if (spectre_v4_mitigations_on()) + return -EPERM; + + ssbd_prctl_disable_mitigation(task); + break; + case PR_SPEC_FORCE_DISABLE: + /* Force disable speculation: force enable mitigation */ + /* + * If the mitigation is forced off, then speculation is forced + * on and we prevent it from being disabled. + */ + if (spectre_v4_mitigations_off()) + return -EPERM; + + task_set_spec_ssb_force_disable(task); + fallthrough; + case PR_SPEC_DISABLE: + /* Disable speculation: enable mitigation */ + /* Same as PR_SPEC_FORCE_DISABLE */ + if (spectre_v4_mitigations_off()) + return -EPERM; + + ssbd_prctl_enable_mitigation(task); + break; + case PR_SPEC_DISABLE_NOEXEC: + /* Disable speculation until execve(): enable mitigation */ + /* + * If the mitigation state is forced one way or the other, then + * we must fail now before we try to toggle it on execve(). + */ + if (task_spec_ssb_force_disable(task) || + spectre_v4_mitigations_off() || + spectre_v4_mitigations_on()) { + return -EPERM; + } + + ssbd_prctl_enable_mitigation(task); + task_set_spec_ssb_noexec(task); + break; + default: + return -ERANGE; + } + + spectre_v4_enable_task_mitigation(task); + return 0; +} + +int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which, + unsigned long ctrl) +{ + switch (which) { + case PR_SPEC_STORE_BYPASS: + return ssbd_prctl_set(task, ctrl); + default: + return -ENODEV; + } +} + +static int ssbd_prctl_get(struct task_struct *task) +{ + switch (spectre_v4_state) { + case SPECTRE_UNAFFECTED: + return PR_SPEC_NOT_AFFECTED; + case SPECTRE_MITIGATED: + if (spectre_v4_mitigations_on()) + return PR_SPEC_NOT_AFFECTED; + + if (spectre_v4_mitigations_dynamic()) + break; + + /* Mitigations are disabled, so we're vulnerable. */ + fallthrough; + case SPECTRE_VULNERABLE: + fallthrough; + default: + return PR_SPEC_ENABLE; + } + + /* Check the mitigation state for this task */ + if (task_spec_ssb_force_disable(task)) + return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; + + if (task_spec_ssb_noexec(task)) + return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC; + + if (task_spec_ssb_disable(task)) + return PR_SPEC_PRCTL | PR_SPEC_DISABLE; + + return PR_SPEC_PRCTL | PR_SPEC_ENABLE; +} + +int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) +{ + switch (which) { + case PR_SPEC_STORE_BYPASS: + return ssbd_prctl_get(task); + default: + return -ENODEV; + } +} diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index d8ebfd813e28..f49b349e16a3 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -34,6 +34,7 @@ #include <asm/cpufeature.h> #include <asm/debug-monitors.h> #include <asm/fpsimd.h> +#include <asm/mte.h> #include <asm/pointer_auth.h> #include <asm/stacktrace.h> #include <asm/syscall.h> @@ -1032,6 +1033,35 @@ static int pac_generic_keys_set(struct task_struct *target, #endif /* CONFIG_CHECKPOINT_RESTORE */ #endif /* CONFIG_ARM64_PTR_AUTH */ +#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI +static int tagged_addr_ctrl_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + long ctrl = get_tagged_addr_ctrl(target); + + if (IS_ERR_VALUE(ctrl)) + return ctrl; + + return membuf_write(&to, &ctrl, sizeof(ctrl)); +} + +static int tagged_addr_ctrl_set(struct task_struct *target, const struct + user_regset *regset, unsigned int pos, + unsigned int count, const void *kbuf, const + void __user *ubuf) +{ + int ret; + long ctrl; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 0, -1); + if (ret) + return ret; + + return set_tagged_addr_ctrl(target, ctrl); +} +#endif + enum aarch64_regset { REGSET_GPR, REGSET_FPR, @@ -1051,6 +1081,9 @@ enum aarch64_regset { REGSET_PACG_KEYS, #endif #endif +#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI + REGSET_TAGGED_ADDR_CTRL, +#endif }; static const struct user_regset aarch64_regsets[] = { @@ -1148,6 +1181,16 @@ static const struct user_regset aarch64_regsets[] = { }, #endif #endif +#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI + [REGSET_TAGGED_ADDR_CTRL] = { + .core_note_type = NT_ARM_TAGGED_ADDR_CTRL, + .n = 1, + .size = sizeof(long), + .align = sizeof(long), + .regset_get = tagged_addr_ctrl_get, + .set = tagged_addr_ctrl_set, + }, +#endif }; static const struct user_regset_view user_aarch64_view = { @@ -1691,6 +1734,12 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task) long arch_ptrace(struct task_struct *child, long request, unsigned long addr, unsigned long data) { + switch (request) { + case PTRACE_PEEKMTETAGS: + case PTRACE_POKEMTETAGS: + return mte_ptrace_copy_tags(child, request, addr, data); + } + return ptrace_request(child, request, addr, data); } @@ -1793,7 +1842,7 @@ void syscall_trace_exit(struct pt_regs *regs) * We also reserve IL for the kernel; SS is handled dynamically. */ #define SPSR_EL1_AARCH64_RES0_BITS \ - (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \ + (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 26) | GENMASK_ULL(23, 22) | \ GENMASK_ULL(20, 13) | GENMASK_ULL(5, 5)) #define SPSR_EL1_AARCH32_RES0_BITS \ (GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20)) diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S index 542d6edc6806..84eec95ec06c 100644 --- a/arch/arm64/kernel/relocate_kernel.S +++ b/arch/arm64/kernel/relocate_kernel.S @@ -36,18 +36,6 @@ SYM_CODE_START(arm64_relocate_new_kernel) mov x14, xzr /* x14 = entry ptr */ mov x13, xzr /* x13 = copy dest */ - /* Clear the sctlr_el2 flags. */ - mrs x0, CurrentEL - cmp x0, #CurrentEL_EL2 - b.ne 1f - mrs x0, sctlr_el2 - mov_q x1, SCTLR_ELx_FLAGS - bic x0, x0, x1 - pre_disable_mmu_workaround - msr sctlr_el2, x0 - isb -1: - /* Check if the new image needs relocation. */ tbnz x16, IND_DONE_BIT, .Ldone diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c index a5e8b3b9d798..a6d18755652f 100644 --- a/arch/arm64/kernel/return_address.c +++ b/arch/arm64/kernel/return_address.c @@ -18,16 +18,16 @@ struct return_address_data { void *addr; }; -static int save_return_addr(struct stackframe *frame, void *d) +static bool save_return_addr(void *d, unsigned long pc) { struct return_address_data *data = d; if (!data->level) { - data->addr = (void *)frame->pc; - return 1; + data->addr = (void *)pc; + return false; } else { --data->level; - return 0; + return true; } } NOKPROBE_SYMBOL(save_return_addr); diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 53acbeca4f57..133257ffd859 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -217,7 +217,7 @@ static void __init request_standard_resources(void) if (!standard_resources) panic("%s: Failed to allocate %zu bytes\n", __func__, res_size); - for_each_memblock(memory, region) { + for_each_mem_region(region) { res = &standard_resources[i++]; if (memblock_is_nomap(region)) { res->name = "reserved"; @@ -257,7 +257,7 @@ static int __init reserve_memblock_reserved_regions(void) if (!memblock_is_region_reserved(mem->start, mem_size)) continue; - for_each_reserved_mem_region(j, &r_start, &r_end) { + for_each_reserved_mem_range(j, &r_start, &r_end) { resource_size_t start, end; start = max(PFN_PHYS(PFN_DOWN(r_start)), mem->start); diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 3b4f31f35e45..bdcaaf091e1e 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -244,7 +244,8 @@ static int preserve_sve_context(struct sve_context __user *ctx) if (vq) { /* * This assumes that the SVE state has already been saved to - * the task struct by calling preserve_fpsimd_context(). + * the task struct by calling the function + * fpsimd_signal_preserve_current_state(). */ err |= __copy_to_user((char __user *)ctx + SVE_SIG_REGS_OFFSET, current->thread.sve_state, @@ -748,6 +749,9 @@ static void setup_return(struct pt_regs *regs, struct k_sigaction *ka, regs->pstate |= PSR_BTYPE_C; } + /* TCO (Tag Check Override) always cleared for signal handlers */ + regs->pstate &= ~PSR_TCO_BIT; + if (ka->sa.sa_flags & SA_RESTORER) sigtramp = ka->sa.sa_restorer; else @@ -932,6 +936,12 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, if (thread_flags & _TIF_UPROBE) uprobe_notify_resume(regs); + if (thread_flags & _TIF_MTE_ASYNC_FAULT) { + clear_thread_flag(TIF_MTE_ASYNC_FAULT); + send_sig_fault(SIGSEGV, SEGV_MTEAERR, + (void __user *)NULL, current); + } + if (thread_flags & _TIF_SIGPENDING) do_signal(regs); diff --git a/arch/arm64/kernel/smccc-call.S b/arch/arm64/kernel/smccc-call.S index 1f93809528a4..d62447964ed9 100644 --- a/arch/arm64/kernel/smccc-call.S +++ b/arch/arm64/kernel/smccc-call.S @@ -9,7 +9,6 @@ #include <asm/assembler.h> .macro SMCCC instr - .cfi_startproc \instr #0 ldr x4, [sp] stp x0, x1, [x4, #ARM_SMCCC_RES_X0_OFFS] @@ -21,7 +20,6 @@ b.ne 1f str x6, [x4, ARM_SMCCC_QUIRK_STATE_OFFS] 1: ret - .cfi_endproc .endm /* diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 355ee9eed4dd..82e75fc2c903 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -30,6 +30,7 @@ #include <linux/completion.h> #include <linux/of.h> #include <linux/irq_work.h> +#include <linux/kernel_stat.h> #include <linux/kexec.h> #include <linux/kvm_host.h> @@ -72,10 +73,18 @@ enum ipi_msg_type { IPI_CPU_CRASH_STOP, IPI_TIMER, IPI_IRQ_WORK, - IPI_WAKEUP + IPI_WAKEUP, + NR_IPI }; +static int ipi_irq_base __read_mostly; +static int nr_ipi __read_mostly = NR_IPI; +static struct irq_desc *ipi_desc[NR_IPI] __read_mostly; + +static void ipi_setup(int cpu); + #ifdef CONFIG_HOTPLUG_CPU +static void ipi_teardown(int cpu); static int op_cpu_kill(unsigned int cpu); #else static inline int op_cpu_kill(unsigned int cpu) @@ -237,6 +246,8 @@ asmlinkage notrace void secondary_start_kernel(void) */ notify_cpu_starting(cpu); + ipi_setup(cpu); + store_cpu_topology(cpu); numa_add_cpu(cpu); @@ -302,6 +313,7 @@ int __cpu_disable(void) * and we must not schedule until we're ready to give up the cpu. */ set_cpu_online(cpu, false); + ipi_teardown(cpu); /* * OK - migrate IRQs away from this CPU @@ -772,13 +784,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus) } } -void (*__smp_cross_call)(const struct cpumask *, unsigned int); - -void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int)) -{ - __smp_cross_call = fn; -} - static const char *ipi_types[NR_IPI] __tracepoint_string = { #define S(x,s) [x] = s S(IPI_RESCHEDULE, "Rescheduling interrupts"), @@ -790,35 +795,25 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = { S(IPI_WAKEUP, "CPU wake-up interrupts"), }; -static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) -{ - trace_ipi_raise(target, ipi_types[ipinr]); - __smp_cross_call(target, ipinr); -} +static void smp_cross_call(const struct cpumask *target, unsigned int ipinr); -void show_ipi_list(struct seq_file *p, int prec) +unsigned long irq_err_count; + +int arch_show_interrupts(struct seq_file *p, int prec) { unsigned int cpu, i; for (i = 0; i < NR_IPI; i++) { + unsigned int irq = irq_desc_get_irq(ipi_desc[i]); seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, prec >= 4 ? " " : ""); for_each_online_cpu(cpu) - seq_printf(p, "%10u ", - __get_irq_stat(cpu, ipi_irqs[i])); + seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu)); seq_printf(p, " %s\n", ipi_types[i]); } -} - -u64 smp_irq_stat_cpu(unsigned int cpu) -{ - u64 sum = 0; - int i; - for (i = 0; i < NR_IPI; i++) - sum += __get_irq_stat(cpu, ipi_irqs[i]); - - return sum; + seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count); + return 0; } void arch_send_call_function_ipi_mask(const struct cpumask *mask) @@ -841,8 +836,7 @@ void arch_send_wakeup_ipi_mask(const struct cpumask *mask) #ifdef CONFIG_IRQ_WORK void arch_irq_work_raise(void) { - if (__smp_cross_call) - smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK); + smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK); } #endif @@ -890,15 +884,12 @@ static void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs) /* * Main handler for inter-processor interrupts */ -void handle_IPI(int ipinr, struct pt_regs *regs) +static void do_handle_IPI(int ipinr) { unsigned int cpu = smp_processor_id(); - struct pt_regs *old_regs = set_irq_regs(regs); - if ((unsigned)ipinr < NR_IPI) { + if ((unsigned)ipinr < NR_IPI) trace_ipi_entry_rcuidle(ipi_types[ipinr]); - __inc_irq_stat(cpu, ipi_irqs[ipinr]); - } switch (ipinr) { case IPI_RESCHEDULE: @@ -906,21 +897,16 @@ void handle_IPI(int ipinr, struct pt_regs *regs) break; case IPI_CALL_FUNC: - irq_enter(); generic_smp_call_function_interrupt(); - irq_exit(); break; case IPI_CPU_STOP: - irq_enter(); local_cpu_stop(); - irq_exit(); break; case IPI_CPU_CRASH_STOP: if (IS_ENABLED(CONFIG_KEXEC_CORE)) { - irq_enter(); - ipi_cpu_crash_stop(cpu, regs); + ipi_cpu_crash_stop(cpu, get_irq_regs()); unreachable(); } @@ -928,17 +914,13 @@ void handle_IPI(int ipinr, struct pt_regs *regs) #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST case IPI_TIMER: - irq_enter(); tick_receive_broadcast(); - irq_exit(); break; #endif #ifdef CONFIG_IRQ_WORK case IPI_IRQ_WORK: - irq_enter(); irq_work_run(); - irq_exit(); break; #endif @@ -957,7 +939,66 @@ void handle_IPI(int ipinr, struct pt_regs *regs) if ((unsigned)ipinr < NR_IPI) trace_ipi_exit_rcuidle(ipi_types[ipinr]); - set_irq_regs(old_regs); +} + +static irqreturn_t ipi_handler(int irq, void *data) +{ + do_handle_IPI(irq - ipi_irq_base); + return IRQ_HANDLED; +} + +static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) +{ + trace_ipi_raise(target, ipi_types[ipinr]); + __ipi_send_mask(ipi_desc[ipinr], target); +} + +static void ipi_setup(int cpu) +{ + int i; + + if (WARN_ON_ONCE(!ipi_irq_base)) + return; + + for (i = 0; i < nr_ipi; i++) + enable_percpu_irq(ipi_irq_base + i, 0); +} + +#ifdef CONFIG_HOTPLUG_CPU +static void ipi_teardown(int cpu) +{ + int i; + + if (WARN_ON_ONCE(!ipi_irq_base)) + return; + + for (i = 0; i < nr_ipi; i++) + disable_percpu_irq(ipi_irq_base + i); +} +#endif + +void __init set_smp_ipi_range(int ipi_base, int n) +{ + int i; + + WARN_ON(n < NR_IPI); + nr_ipi = min(n, NR_IPI); + + for (i = 0; i < nr_ipi; i++) { + int err; + + err = request_percpu_irq(ipi_base + i, ipi_handler, + "IPI", &cpu_number); + WARN_ON(err); + + ipi_desc[i] = irq_to_desc(ipi_base + i); + irq_set_status_flags(ipi_base + i, IRQ_HIDDEN); + } + + ipi_irq_base = ipi_base; + + /* Setup the boot CPU immediately */ + ipi_setup(smp_processor_id()); } void smp_send_reschedule(int cpu) diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c index c8a3fee00c11..5892e79fa429 100644 --- a/arch/arm64/kernel/smp_spin_table.c +++ b/arch/arm64/kernel/smp_spin_table.c @@ -83,9 +83,9 @@ static int smp_spin_table_cpu_prepare(unsigned int cpu) /* * We write the release address as LE regardless of the native - * endianess of the kernel. Therefore, any boot-loaders that + * endianness of the kernel. Therefore, any boot-loaders that * read this address need to convert this address to the - * boot-loader's endianess before jumping. This is mandated by + * boot-loader's endianness before jumping. This is mandated by * the boot protocol. */ writeq_relaxed(__pa_symbol(secondary_holding_pen), release_addr); diff --git a/arch/arm64/kernel/ssbd.c b/arch/arm64/kernel/ssbd.c deleted file mode 100644 index b26955f56750..000000000000 --- a/arch/arm64/kernel/ssbd.c +++ /dev/null @@ -1,129 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2018 ARM Ltd, All Rights Reserved. - */ - -#include <linux/compat.h> -#include <linux/errno.h> -#include <linux/prctl.h> -#include <linux/sched.h> -#include <linux/sched/task_stack.h> -#include <linux/thread_info.h> - -#include <asm/cpufeature.h> - -static void ssbd_ssbs_enable(struct task_struct *task) -{ - u64 val = is_compat_thread(task_thread_info(task)) ? - PSR_AA32_SSBS_BIT : PSR_SSBS_BIT; - - task_pt_regs(task)->pstate |= val; -} - -static void ssbd_ssbs_disable(struct task_struct *task) -{ - u64 val = is_compat_thread(task_thread_info(task)) ? - PSR_AA32_SSBS_BIT : PSR_SSBS_BIT; - - task_pt_regs(task)->pstate &= ~val; -} - -/* - * prctl interface for SSBD - */ -static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl) -{ - int state = arm64_get_ssbd_state(); - - /* Unsupported */ - if (state == ARM64_SSBD_UNKNOWN) - return -ENODEV; - - /* Treat the unaffected/mitigated state separately */ - if (state == ARM64_SSBD_MITIGATED) { - switch (ctrl) { - case PR_SPEC_ENABLE: - return -EPERM; - case PR_SPEC_DISABLE: - case PR_SPEC_FORCE_DISABLE: - return 0; - } - } - - /* - * Things are a bit backward here: the arm64 internal API - * *enables the mitigation* when the userspace API *disables - * speculation*. So much fun. - */ - switch (ctrl) { - case PR_SPEC_ENABLE: - /* If speculation is force disabled, enable is not allowed */ - if (state == ARM64_SSBD_FORCE_ENABLE || - task_spec_ssb_force_disable(task)) - return -EPERM; - task_clear_spec_ssb_disable(task); - clear_tsk_thread_flag(task, TIF_SSBD); - ssbd_ssbs_enable(task); - break; - case PR_SPEC_DISABLE: - if (state == ARM64_SSBD_FORCE_DISABLE) - return -EPERM; - task_set_spec_ssb_disable(task); - set_tsk_thread_flag(task, TIF_SSBD); - ssbd_ssbs_disable(task); - break; - case PR_SPEC_FORCE_DISABLE: - if (state == ARM64_SSBD_FORCE_DISABLE) - return -EPERM; - task_set_spec_ssb_disable(task); - task_set_spec_ssb_force_disable(task); - set_tsk_thread_flag(task, TIF_SSBD); - ssbd_ssbs_disable(task); - break; - default: - return -ERANGE; - } - - return 0; -} - -int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which, - unsigned long ctrl) -{ - switch (which) { - case PR_SPEC_STORE_BYPASS: - return ssbd_prctl_set(task, ctrl); - default: - return -ENODEV; - } -} - -static int ssbd_prctl_get(struct task_struct *task) -{ - switch (arm64_get_ssbd_state()) { - case ARM64_SSBD_UNKNOWN: - return -ENODEV; - case ARM64_SSBD_FORCE_ENABLE: - return PR_SPEC_DISABLE; - case ARM64_SSBD_KERNEL: - if (task_spec_ssb_force_disable(task)) - return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; - if (task_spec_ssb_disable(task)) - return PR_SPEC_PRCTL | PR_SPEC_DISABLE; - return PR_SPEC_PRCTL | PR_SPEC_ENABLE; - case ARM64_SSBD_FORCE_DISABLE: - return PR_SPEC_ENABLE; - default: - return PR_SPEC_NOT_AFFECTED; - } -} - -int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) -{ - switch (which) { - case PR_SPEC_STORE_BYPASS: - return ssbd_prctl_get(task); - default: - return -ENODEV; - } -} diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index 2dd8e3b8b94b..fa56af1a59c3 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -118,12 +118,12 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) NOKPROBE_SYMBOL(unwind_frame); void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame, - int (*fn)(struct stackframe *, void *), void *data) + bool (*fn)(void *, unsigned long), void *data) { while (1) { int ret; - if (fn(frame, data)) + if (!fn(data, frame->pc)) break; ret = unwind_frame(tsk, frame); if (ret < 0) @@ -132,84 +132,89 @@ void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame, } NOKPROBE_SYMBOL(walk_stackframe); -#ifdef CONFIG_STACKTRACE -struct stack_trace_data { - struct stack_trace *trace; - unsigned int no_sched_functions; - unsigned int skip; -}; - -static int save_trace(struct stackframe *frame, void *d) +static void dump_backtrace_entry(unsigned long where, const char *loglvl) { - struct stack_trace_data *data = d; - struct stack_trace *trace = data->trace; - unsigned long addr = frame->pc; - - if (data->no_sched_functions && in_sched_functions(addr)) - return 0; - if (data->skip) { - data->skip--; - return 0; - } - - trace->entries[trace->nr_entries++] = addr; - - return trace->nr_entries >= trace->max_entries; + printk("%s %pS\n", loglvl, (void *)where); } -void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) +void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk, + const char *loglvl) { - struct stack_trace_data data; struct stackframe frame; + int skip = 0; - data.trace = trace; - data.skip = trace->skip; - data.no_sched_functions = 0; + pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); - start_backtrace(&frame, regs->regs[29], regs->pc); - walk_stackframe(current, &frame, save_trace, &data); -} -EXPORT_SYMBOL_GPL(save_stack_trace_regs); + if (regs) { + if (user_mode(regs)) + return; + skip = 1; + } -static noinline void __save_stack_trace(struct task_struct *tsk, - struct stack_trace *trace, unsigned int nosched) -{ - struct stack_trace_data data; - struct stackframe frame; + if (!tsk) + tsk = current; if (!try_get_task_stack(tsk)) return; - data.trace = trace; - data.skip = trace->skip; - data.no_sched_functions = nosched; - - if (tsk != current) { - start_backtrace(&frame, thread_saved_fp(tsk), - thread_saved_pc(tsk)); - } else { - /* We don't want this function nor the caller */ - data.skip += 2; + if (tsk == current) { start_backtrace(&frame, (unsigned long)__builtin_frame_address(0), - (unsigned long)__save_stack_trace); + (unsigned long)dump_backtrace); + } else { + /* + * task blocked in __switch_to + */ + start_backtrace(&frame, + thread_saved_fp(tsk), + thread_saved_pc(tsk)); } - walk_stackframe(tsk, &frame, save_trace, &data); + printk("%sCall trace:\n", loglvl); + do { + /* skip until specified stack frame */ + if (!skip) { + dump_backtrace_entry(frame.pc, loglvl); + } else if (frame.fp == regs->regs[29]) { + skip = 0; + /* + * Mostly, this is the case where this function is + * called in panic/abort. As exception handler's + * stack frame does not contain the corresponding pc + * at which an exception has taken place, use regs->pc + * instead. + */ + dump_backtrace_entry(regs->pc, loglvl); + } + } while (!unwind_frame(tsk, &frame)); put_task_stack(tsk); } -void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) +void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl) { - __save_stack_trace(tsk, trace, 1); + dump_backtrace(NULL, tsk, loglvl); + barrier(); } -EXPORT_SYMBOL_GPL(save_stack_trace_tsk); -void save_stack_trace(struct stack_trace *trace) +#ifdef CONFIG_STACKTRACE + +void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, + struct task_struct *task, struct pt_regs *regs) { - __save_stack_trace(current, trace, 0); + struct stackframe frame; + + if (regs) + start_backtrace(&frame, regs->regs[29], regs->pc); + else if (task == current) + start_backtrace(&frame, + (unsigned long)__builtin_frame_address(0), + (unsigned long)arch_stack_walk); + else + start_backtrace(&frame, thread_saved_fp(task), + thread_saved_pc(task)); + + walk_stackframe(task, &frame, consume_entry, cookie); } -EXPORT_SYMBOL_GPL(save_stack_trace); #endif diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c index c1dee9066ff9..96cd347c7a46 100644 --- a/arch/arm64/kernel/suspend.c +++ b/arch/arm64/kernel/suspend.c @@ -10,6 +10,7 @@ #include <asm/daifflags.h> #include <asm/debug-monitors.h> #include <asm/exec.h> +#include <asm/mte.h> #include <asm/memory.h> #include <asm/mmu_context.h> #include <asm/smp_plat.h> @@ -72,8 +73,10 @@ void notrace __cpu_suspend_exit(void) * have turned the mitigation on. If the user has forcefully * disabled it, make sure their wishes are obeyed. */ - if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) - arm64_set_ssbd_mitigation(false); + spectre_v4_enable_mitigation(NULL); + + /* Restore additional MTE-specific configuration */ + mte_suspend_exit(); } /* diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c index 5f0c04863d2c..e4c0dadf0d92 100644 --- a/arch/arm64/kernel/syscall.c +++ b/arch/arm64/kernel/syscall.c @@ -123,6 +123,16 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr, local_daif_restore(DAIF_PROCCTX); user_exit(); + if (system_supports_mte() && (flags & _TIF_MTE_ASYNC_FAULT)) { + /* + * Process the asynchronous tag check fault before the actual + * syscall. do_notify_resume() will send a signal to userspace + * before the syscall is restarted. + */ + regs->regs[0] = -ERESTARTNOINTR; + return; + } + if (has_syscall_work(flags)) { /* * The de-facto standard way to skip a system call using ptrace diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c index 0801a0f3c156..543c67cae02f 100644 --- a/arch/arm64/kernel/topology.c +++ b/arch/arm64/kernel/topology.c @@ -36,21 +36,23 @@ void store_cpu_topology(unsigned int cpuid) if (mpidr & MPIDR_UP_BITMASK) return; - /* Create cpu topology mapping based on MPIDR. */ - if (mpidr & MPIDR_MT_BITMASK) { - /* Multiprocessor system : Multi-threads per core */ - cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); - cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); - cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 2) | - MPIDR_AFFINITY_LEVEL(mpidr, 3) << 8; - } else { - /* Multiprocessor system : Single-thread per core */ - cpuid_topo->thread_id = -1; - cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); - cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 1) | - MPIDR_AFFINITY_LEVEL(mpidr, 2) << 8 | - MPIDR_AFFINITY_LEVEL(mpidr, 3) << 16; - } + /* + * This would be the place to create cpu topology based on MPIDR. + * + * However, it cannot be trusted to depict the actual topology; some + * pieces of the architecture enforce an artificial cap on Aff0 values + * (e.g. GICv3's ICC_SGI1R_EL1 limits it to 15), leading to an + * artificial cycling of Aff1, Aff2 and Aff3 values. IOW, these end up + * having absolutely no relationship to the actual underlying system + * topology, and cannot be reasonably used as core / package ID. + * + * If the MT bit is set, Aff0 *could* be used to define a thread ID, but + * we still wouldn't be able to obtain a sane core ID. This means we + * need to entirely ignore MPIDR for any topology deduction. + */ + cpuid_topo->thread_id = -1; + cpuid_topo->core_id = cpuid; + cpuid_topo->package_id = cpu_to_node(cpuid); pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n", cpuid, cpuid_topo->package_id, cpuid_topo->core_id, @@ -246,6 +248,13 @@ static int __init init_amu_fie(void) static_branch_enable(&amu_fie_key); } + /* + * If the system is not fully invariant after AMU init, disable + * partial use of counters for frequency invariance. + */ + if (!topology_scale_freq_invariant()) + static_branch_disable(&amu_fie_key); + free_valid_mask: free_cpumask_var(valid_cpus); @@ -253,7 +262,7 @@ free_valid_mask: } late_initcall_sync(init_amu_fie); -bool arch_freq_counters_available(struct cpumask *cpus) +bool arch_freq_counters_available(const struct cpumask *cpus) { return amu_freq_invariant() && cpumask_subset(cpus, amu_fie_cpus); diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 13ebd5ca2070..8af4e0e85736 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -34,6 +34,7 @@ #include <asm/daifflags.h> #include <asm/debug-monitors.h> #include <asm/esr.h> +#include <asm/extable.h> #include <asm/insn.h> #include <asm/kprobes.h> #include <asm/traps.h> @@ -53,11 +54,6 @@ static const char *handler[]= { int show_unhandled_signals = 0; -static void dump_backtrace_entry(unsigned long where, const char *loglvl) -{ - printk("%s %pS\n", loglvl, (void *)where); -} - static void dump_kernel_instr(const char *lvl, struct pt_regs *regs) { unsigned long addr = instruction_pointer(regs); @@ -83,66 +79,6 @@ static void dump_kernel_instr(const char *lvl, struct pt_regs *regs) printk("%sCode: %s\n", lvl, str); } -void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk, - const char *loglvl) -{ - struct stackframe frame; - int skip = 0; - - pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); - - if (regs) { - if (user_mode(regs)) - return; - skip = 1; - } - - if (!tsk) - tsk = current; - - if (!try_get_task_stack(tsk)) - return; - - if (tsk == current) { - start_backtrace(&frame, - (unsigned long)__builtin_frame_address(0), - (unsigned long)dump_backtrace); - } else { - /* - * task blocked in __switch_to - */ - start_backtrace(&frame, - thread_saved_fp(tsk), - thread_saved_pc(tsk)); - } - - printk("%sCall trace:\n", loglvl); - do { - /* skip until specified stack frame */ - if (!skip) { - dump_backtrace_entry(frame.pc, loglvl); - } else if (frame.fp == regs->regs[29]) { - skip = 0; - /* - * Mostly, this is the case where this function is - * called in panic/abort. As exception handler's - * stack frame does not contain the corresponding pc - * at which an exception has taken place, use regs->pc - * instead. - */ - dump_backtrace_entry(regs->pc, loglvl); - } - } while (!unwind_frame(tsk, &frame)); - - put_task_stack(tsk); -} - -void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl) -{ - dump_backtrace(NULL, tsk, loglvl); - barrier(); -} - #ifdef CONFIG_PREEMPT #define S_PREEMPT " PREEMPT" #elif defined(CONFIG_PREEMPT_RT) @@ -200,9 +136,9 @@ void die(const char *str, struct pt_regs *regs, int err) oops_exit(); if (in_interrupt()) - panic("Fatal exception in interrupt"); + panic("%s: Fatal exception in interrupt", str); if (panic_on_oops) - panic("Fatal exception"); + panic("%s: Fatal exception", str); raw_spin_unlock_irqrestore(&die_lock, flags); @@ -412,7 +348,7 @@ exit: return fn ? fn(regs, instr) : 1; } -void force_signal_inject(int signal, int code, unsigned long address) +void force_signal_inject(int signal, int code, unsigned long address, unsigned int err) { const char *desc; struct pt_regs *regs = current_pt_regs(); @@ -438,7 +374,7 @@ void force_signal_inject(int signal, int code, unsigned long address) signal = SIGKILL; } - arm64_notify_die(desc, regs, signal, code, (void __user *)address, 0); + arm64_notify_die(desc, regs, signal, code, (void __user *)address, err); } /* @@ -455,7 +391,7 @@ void arm64_notify_segfault(unsigned long addr) code = SEGV_ACCERR; mmap_read_unlock(current->mm); - force_signal_inject(SIGSEGV, code, addr); + force_signal_inject(SIGSEGV, code, addr, 0); } void do_undefinstr(struct pt_regs *regs) @@ -468,17 +404,28 @@ void do_undefinstr(struct pt_regs *regs) return; BUG_ON(!user_mode(regs)); - force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc); + force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); } NOKPROBE_SYMBOL(do_undefinstr); void do_bti(struct pt_regs *regs) { BUG_ON(!user_mode(regs)); - force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc); + force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); } NOKPROBE_SYMBOL(do_bti); +void do_ptrauth_fault(struct pt_regs *regs, unsigned int esr) +{ + /* + * Unexpected FPAC exception or pointer authentication failure in + * the kernel: kill the task before it does any more harm. + */ + BUG_ON(!user_mode(regs)); + force_signal_inject(SIGILL, ILL_ILLOPN, regs->pc, esr); +} +NOKPROBE_SYMBOL(do_ptrauth_fault); + #define __user_cache_maint(insn, address, res) \ if (address >= user_addr_max()) { \ res = -EFAULT; \ @@ -528,7 +475,7 @@ static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs) __user_cache_maint("ic ivau", address, ret); break; default: - force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc); + force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); return; } @@ -581,7 +528,7 @@ static void mrs_handler(unsigned int esr, struct pt_regs *regs) sysreg = esr_sys64_to_sysreg(esr); if (do_emulate_mrs(regs, sysreg, rt) != 0) - force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc); + force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); } static void wfi_handler(unsigned int esr, struct pt_regs *regs) @@ -775,6 +722,7 @@ static const char *esr_class_str[] = { [ESR_ELx_EC_SYS64] = "MSR/MRS (AArch64)", [ESR_ELx_EC_SVE] = "SVE", [ESR_ELx_EC_ERET] = "ERET/ERETAA/ERETAB", + [ESR_ELx_EC_FPAC] = "FPAC", [ESR_ELx_EC_IMP_DEF] = "EL3 IMP DEF", [ESR_ELx_EC_IABT_LOW] = "IABT (lower EL)", [ESR_ELx_EC_IABT_CUR] = "IABT (current EL)", @@ -935,26 +883,6 @@ asmlinkage void enter_from_user_mode(void) } NOKPROBE_SYMBOL(enter_from_user_mode); -void __pte_error(const char *file, int line, unsigned long val) -{ - pr_err("%s:%d: bad pte %016lx.\n", file, line, val); -} - -void __pmd_error(const char *file, int line, unsigned long val) -{ - pr_err("%s:%d: bad pmd %016lx.\n", file, line, val); -} - -void __pud_error(const char *file, int line, unsigned long val) -{ - pr_err("%s:%d: bad pud %016lx.\n", file, line, val); -} - -void __pgd_error(const char *file, int line, unsigned long val) -{ - pr_err("%s:%d: bad pgd %016lx.\n", file, line, val); -} - /* GENERIC_BUG traps */ int is_valid_bugaddr(unsigned long addr) @@ -994,6 +922,21 @@ static struct break_hook bug_break_hook = { .imm = BUG_BRK_IMM, }; +static int reserved_fault_handler(struct pt_regs *regs, unsigned int esr) +{ + pr_err("%s generated an invalid instruction at %pS!\n", + in_bpf_jit(regs) ? "BPF JIT" : "Kernel text patching", + (void *)instruction_pointer(regs)); + + /* We cannot handle this */ + return DBG_HOOK_ERROR; +} + +static struct break_hook fault_break_hook = { + .fn = reserved_fault_handler, + .imm = FAULT_BRK_IMM, +}; + #ifdef CONFIG_KASAN_SW_TAGS #define KASAN_ESR_RECOVER 0x20 @@ -1059,6 +1002,7 @@ int __init early_brk64(unsigned long addr, unsigned int esr, void __init trap_init(void) { register_kernel_break_hook(&bug_break_hook); + register_kernel_break_hook(&fault_break_hook); #ifdef CONFIG_KASAN_SW_TAGS register_kernel_break_hook(&kasan_break_hook); #endif diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index d4202a32abc9..debb8995d57f 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -30,15 +30,11 @@ #include <asm/vdso.h> extern char vdso_start[], vdso_end[]; -#ifdef CONFIG_COMPAT_VDSO extern char vdso32_start[], vdso32_end[]; -#endif /* CONFIG_COMPAT_VDSO */ enum vdso_abi { VDSO_ABI_AA64, -#ifdef CONFIG_COMPAT_VDSO VDSO_ABI_AA32, -#endif /* CONFIG_COMPAT_VDSO */ }; enum vvar_pages { @@ -284,21 +280,17 @@ up_fail: /* * Create and map the vectors page for AArch32 tasks. */ -#ifdef CONFIG_COMPAT_VDSO static int aarch32_vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma) { return __vdso_remap(VDSO_ABI_AA32, sm, new_vma); } -#endif /* CONFIG_COMPAT_VDSO */ enum aarch32_map { AA32_MAP_VECTORS, /* kuser helpers */ -#ifdef CONFIG_COMPAT_VDSO + AA32_MAP_SIGPAGE, AA32_MAP_VVAR, AA32_MAP_VDSO, -#endif - AA32_MAP_SIGPAGE }; static struct page *aarch32_vectors_page __ro_after_init; @@ -309,7 +301,10 @@ static struct vm_special_mapping aarch32_vdso_maps[] = { .name = "[vectors]", /* ABI */ .pages = &aarch32_vectors_page, }, -#ifdef CONFIG_COMPAT_VDSO + [AA32_MAP_SIGPAGE] = { + .name = "[sigpage]", /* ABI */ + .pages = &aarch32_sig_page, + }, [AA32_MAP_VVAR] = { .name = "[vvar]", .fault = vvar_fault, @@ -319,11 +314,6 @@ static struct vm_special_mapping aarch32_vdso_maps[] = { .name = "[vdso]", .mremap = aarch32_vdso_mremap, }, -#endif /* CONFIG_COMPAT_VDSO */ - [AA32_MAP_SIGPAGE] = { - .name = "[sigpage]", /* ABI */ - .pages = &aarch32_sig_page, - }, }; static int aarch32_alloc_kuser_vdso_page(void) @@ -362,25 +352,25 @@ static int aarch32_alloc_sigpage(void) return 0; } -#ifdef CONFIG_COMPAT_VDSO static int __aarch32_alloc_vdso_pages(void) { + + if (!IS_ENABLED(CONFIG_COMPAT_VDSO)) + return 0; + vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_maps[AA32_MAP_VVAR]; vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO]; return __vdso_init(VDSO_ABI_AA32); } -#endif /* CONFIG_COMPAT_VDSO */ static int __init aarch32_alloc_vdso_pages(void) { int ret; -#ifdef CONFIG_COMPAT_VDSO ret = __aarch32_alloc_vdso_pages(); if (ret) return ret; -#endif ret = aarch32_alloc_sigpage(); if (ret) @@ -449,14 +439,12 @@ int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) if (ret) goto out; -#ifdef CONFIG_COMPAT_VDSO - ret = __setup_additional_pages(VDSO_ABI_AA32, - mm, - bprm, - uses_interp); - if (ret) - goto out; -#endif /* CONFIG_COMPAT_VDSO */ + if (IS_ENABLED(CONFIG_COMPAT_VDSO)) { + ret = __setup_additional_pages(VDSO_ABI_AA32, mm, bprm, + uses_interp); + if (ret) + goto out; + } ret = aarch32_sigreturn_setup(mm); out: @@ -497,8 +485,7 @@ static int __init vdso_init(void) } arch_initcall(vdso_init); -int arch_setup_additional_pages(struct linux_binprm *bprm, - int uses_interp) +int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mm_struct *mm = current->mm; int ret; @@ -506,11 +493,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, if (mmap_write_lock_killable(mm)) return -EINTR; - ret = __setup_additional_pages(VDSO_ABI_AA64, - mm, - bprm, - uses_interp); - + ret = __setup_additional_pages(VDSO_ABI_AA64, mm, bprm, uses_interp); mmap_write_unlock(mm); return ret; diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile index 45d5cfe46429..04021a93171c 100644 --- a/arch/arm64/kernel/vdso/Makefile +++ b/arch/arm64/kernel/vdso/Makefile @@ -43,13 +43,6 @@ ifneq ($(c-gettimeofday-y),) CFLAGS_vgettimeofday.o += -include $(c-gettimeofday-y) endif -# Clang versions less than 8 do not support -mcmodel=tiny -ifeq ($(CONFIG_CC_IS_CLANG), y) - ifeq ($(shell test $(CONFIG_CLANG_VERSION) -lt 80000; echo $$?),0) - CFLAGS_REMOVE_vgettimeofday.o += -mcmodel=tiny - endif -endif - # Disable gcov profiling for VDSO code GCOV_PROFILE := n diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 7cba7623fcec..5ca957e656ab 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -6,6 +6,7 @@ */ #define RO_EXCEPTION_TABLE_ALIGN 8 +#define RUNTIME_DISCARD_EXIT #include <asm-generic/vmlinux.lds.h> #include <asm/cache.h> @@ -96,16 +97,13 @@ SECTIONS * matching the same input section name. There is no documented * order of matching. */ + DISCARDS /DISCARD/ : { - EXIT_CALL - *(.discard) - *(.discard.*) *(.interp .dynamic) *(.dynsym .dynstr .hash .gnu.hash) - *(.eh_frame) } - . = KIMAGE_VADDR + TEXT_OFFSET; + . = KIMAGE_VADDR; .head.text : { _text = .; @@ -131,6 +129,14 @@ SECTIONS *(.got) /* Global offset table */ } + /* + * Make sure that the .got.plt is either completely empty or it + * contains only the lazy dispatch entries. + */ + .got.plt : { *(.got.plt) } + ASSERT(SIZEOF(.got.plt) == 0 || SIZEOF(.got.plt) == 0x18, + "Unexpected GOT/PLT entries detected!") + . = ALIGN(SEGMENT_ALIGN); _etext = .; /* End of text section */ @@ -249,8 +255,22 @@ SECTIONS _end = .; STABS_DEBUG + DWARF_DEBUG + ELF_DETAILS HEAD_SYMBOLS + + /* + * Sections that should stay zero sized, which is safer to + * explicitly check instead of blindly discarding. + */ + .plt : { + *(.plt) *(.plt.*) *(.iplt) *(.igot) + } + ASSERT(SIZEOF(.plt) == 0, "Unexpected run-time procedure linkages detected!") + + .data.rel.ro : { *(.data.rel.ro) } + ASSERT(SIZEOF(.data.rel.ro) == 0, "Unexpected RELRO detected!") } #include "image-vars.h" @@ -274,4 +294,4 @@ ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) == PAGE_SIZE, /* * If padding is applied before .head.text, virt<->phys conversions will fail. */ -ASSERT(_text == (KIMAGE_VADDR + TEXT_OFFSET), "HEAD is misaligned") +ASSERT(_text == KIMAGE_VADDR, "HEAD is misaligned") diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index 318c8f2df245..043756db8f6e 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig @@ -57,9 +57,6 @@ config KVM_ARM_PMU Adds support for a virtual Performance Monitoring Unit (PMU) in virtual machines. -config KVM_INDIRECT_VECTORS - def_bool HARDEN_BRANCH_PREDICTOR || RANDOMIZE_BASE - endif # KVM endif # VIRTUALIZATION diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index b588c3b5c2f0..acf9a993dfb6 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -1259,6 +1259,40 @@ long kvm_arch_vm_ioctl(struct file *filp, } } +static int kvm_map_vectors(void) +{ + /* + * SV2 = ARM64_SPECTRE_V2 + * HEL2 = ARM64_HARDEN_EL2_VECTORS + * + * !SV2 + !HEL2 -> use direct vectors + * SV2 + !HEL2 -> use hardened vectors in place + * !SV2 + HEL2 -> allocate one vector slot and use exec mapping + * SV2 + HEL2 -> use hardened vectors and use exec mapping + */ + if (cpus_have_const_cap(ARM64_SPECTRE_V2)) { + __kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs); + __kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base); + } + + if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) { + phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs); + unsigned long size = __BP_HARDEN_HYP_VECS_SZ; + + /* + * Always allocate a spare vector slot, as we don't + * know yet which CPUs have a BP hardening slot that + * we can reuse. + */ + __kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot); + BUG_ON(__kvm_harden_el2_vector_slot >= BP_HARDEN_EL2_SLOTS); + return create_hyp_exec_mappings(vect_pa, size, + &__kvm_bp_vect_base); + } + + return 0; +} + static void cpu_init_hyp_mode(void) { phys_addr_t pgd_ptr; @@ -1295,7 +1329,7 @@ static void cpu_init_hyp_mode(void) * at EL2. */ if (this_cpu_has_cap(ARM64_SSBS) && - arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) { + arm64_get_spectre_v4_state() == SPECTRE_VULNERABLE) { kvm_call_hyp_nvhe(__kvm_enable_ssbs); } } @@ -1552,10 +1586,6 @@ static int init_hyp_mode(void) } } - err = hyp_map_aux_data(); - if (err) - kvm_err("Cannot map host auxiliary data: %d\n", err); - return 0; out_err: diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile index f54f0e89a71c..d898f0da5802 100644 --- a/arch/arm64/kvm/hyp/Makefile +++ b/arch/arm64/kvm/hyp/Makefile @@ -10,5 +10,4 @@ subdir-ccflags-y := -I$(incdir) \ -DDISABLE_BRANCH_PROFILING \ $(DISABLE_STACKLEAK_PLUGIN) -obj-$(CONFIG_KVM) += vhe/ nvhe/ -obj-$(CONFIG_KVM_INDIRECT_VECTORS) += smccc_wa.o +obj-$(CONFIG_KVM) += vhe/ nvhe/ smccc_wa.o diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index 46b4dab933d0..7ea277b82967 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -116,35 +116,6 @@ el1_hvc_guest: ARM_SMCCC_ARCH_WORKAROUND_2) cbnz w1, el1_trap -#ifdef CONFIG_ARM64_SSBD -alternative_cb arm64_enable_wa2_handling - b wa2_end -alternative_cb_end - get_vcpu_ptr x2, x0 - ldr x0, [x2, #VCPU_WORKAROUND_FLAGS] - - // Sanitize the argument and update the guest flags - ldr x1, [sp, #8] // Guest's x1 - clz w1, w1 // Murphy's device: - lsr w1, w1, #5 // w1 = !!w1 without using - eor w1, w1, #1 // the flags... - bfi x0, x1, #VCPU_WORKAROUND_2_FLAG_SHIFT, #1 - str x0, [x2, #VCPU_WORKAROUND_FLAGS] - - /* Check that we actually need to perform the call */ - hyp_ldr_this_cpu x0, arm64_ssbd_callback_required, x2 - cbz x0, wa2_end - - mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2 - smc #0 - - /* Don't leak data from the SMC call */ - mov x3, xzr -wa2_end: - mov x2, xzr - mov x1, xzr -#endif - wa_epilogue: mov x0, xzr add sp, sp, #16 @@ -288,7 +259,6 @@ SYM_CODE_START(__kvm_hyp_vector) valid_vect el1_error // Error 32-bit EL1 SYM_CODE_END(__kvm_hyp_vector) -#ifdef CONFIG_KVM_INDIRECT_VECTORS .macro hyp_ventry .align 7 1: esb @@ -338,4 +308,3 @@ SYM_CODE_START(__bp_harden_hyp_vecs) 1: .org __bp_harden_hyp_vecs + __BP_HARDEN_HYP_VECS_SZ .org 1b SYM_CODE_END(__bp_harden_hyp_vecs) -#endif diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 0261308bf944..d0f07e8cc3ff 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -479,39 +479,6 @@ exit: return false; } -static inline bool __needs_ssbd_off(struct kvm_vcpu *vcpu) -{ - if (!cpus_have_final_cap(ARM64_SSBD)) - return false; - - return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG); -} - -static inline void __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu) -{ -#ifdef CONFIG_ARM64_SSBD - /* - * The host runs with the workaround always present. If the - * guest wants it disabled, so be it... - */ - if (__needs_ssbd_off(vcpu) && - __hyp_this_cpu_read(arm64_ssbd_callback_required)) - arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL); -#endif -} - -static inline void __set_host_arch_workaround_state(struct kvm_vcpu *vcpu) -{ -#ifdef CONFIG_ARM64_SSBD - /* - * If the guest has disabled the workaround, bring it back on. - */ - if (__needs_ssbd_off(vcpu) && - __hyp_this_cpu_read(arm64_ssbd_callback_required)) - arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL); -#endif -} - static inline void __kvm_unexpected_el2_exception(void) { unsigned long addr, fixup; diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index 0970442d2dbc..8d3dd4f47924 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -202,8 +202,6 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) __debug_switch_to_guest(vcpu); - __set_guest_arch_workaround_state(vcpu); - do { /* Jump in the fire! */ exit_code = __guest_enter(vcpu, host_ctxt); @@ -211,8 +209,6 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) /* And we're baaack! */ } while (fixup_guest_exit(vcpu, &exit_code)); - __set_host_arch_workaround_state(vcpu); - __sysreg_save_state_nvhe(guest_ctxt); __sysreg32_save_state(vcpu); __timer_disable_traps(vcpu); diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c index c1da4f86ccac..ecf67e678203 100644 --- a/arch/arm64/kvm/hyp/vhe/switch.c +++ b/arch/arm64/kvm/hyp/vhe/switch.c @@ -131,8 +131,6 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) sysreg_restore_guest_state_vhe(guest_ctxt); __debug_switch_to_guest(vcpu); - __set_guest_arch_workaround_state(vcpu); - do { /* Jump in the fire! */ exit_code = __guest_enter(vcpu, host_ctxt); @@ -140,8 +138,6 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) /* And we're baaack! */ } while (fixup_guest_exit(vcpu, &exit_code)); - __set_host_arch_workaround_state(vcpu); - sysreg_save_guest_state_vhe(guest_ctxt); __deactivate_traps(vcpu); diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c index 550dfa3e53cd..9824025ccc5c 100644 --- a/arch/arm64/kvm/hypercalls.c +++ b/arch/arm64/kvm/hypercalls.c @@ -24,27 +24,36 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu) feature = smccc_get_arg1(vcpu); switch (feature) { case ARM_SMCCC_ARCH_WORKAROUND_1: - switch (kvm_arm_harden_branch_predictor()) { - case KVM_BP_HARDEN_UNKNOWN: + switch (arm64_get_spectre_v2_state()) { + case SPECTRE_VULNERABLE: break; - case KVM_BP_HARDEN_WA_NEEDED: + case SPECTRE_MITIGATED: val = SMCCC_RET_SUCCESS; break; - case KVM_BP_HARDEN_NOT_REQUIRED: + case SPECTRE_UNAFFECTED: val = SMCCC_RET_NOT_REQUIRED; break; } break; case ARM_SMCCC_ARCH_WORKAROUND_2: - switch (kvm_arm_have_ssbd()) { - case KVM_SSBD_FORCE_DISABLE: - case KVM_SSBD_UNKNOWN: + switch (arm64_get_spectre_v4_state()) { + case SPECTRE_VULNERABLE: break; - case KVM_SSBD_KERNEL: - val = SMCCC_RET_SUCCESS; - break; - case KVM_SSBD_FORCE_ENABLE: - case KVM_SSBD_MITIGATED: + case SPECTRE_MITIGATED: + /* + * SSBS everywhere: Indicate no firmware + * support, as the SSBS support will be + * indicated to the guest and the default is + * safe. + * + * Otherwise, expose a permanent mitigation + * to the guest, and hide SSBS so that the + * guest stays protected. + */ + if (cpus_have_final_cap(ARM64_SSBS)) + break; + fallthrough; + case SPECTRE_UNAFFECTED: val = SMCCC_RET_NOT_REQUIRED; break; } diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index f0d0312c0a55..81916e360b1e 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -269,6 +269,7 @@ void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) kvm_pmu_release_perf_event(&pmu->pmc[i]); + irq_work_sync(&vcpu->arch.pmu.overflow_work); } u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) @@ -434,6 +435,22 @@ void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) } /** + * When perf interrupt is an NMI, we cannot safely notify the vcpu corresponding + * to the event. + * This is why we need a callback to do it once outside of the NMI context. + */ +static void kvm_pmu_perf_overflow_notify_vcpu(struct irq_work *work) +{ + struct kvm_vcpu *vcpu; + struct kvm_pmu *pmu; + + pmu = container_of(work, struct kvm_pmu, overflow_work); + vcpu = kvm_pmc_to_vcpu(pmu->pmc); + + kvm_vcpu_kick(vcpu); +} + +/** * When the perf event overflows, set the overflow status and inform the vcpu. */ static void kvm_pmu_perf_overflow(struct perf_event *perf_event, @@ -465,7 +482,11 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event, if (kvm_pmu_overflow_status(vcpu)) { kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); - kvm_vcpu_kick(vcpu); + + if (!in_nmi()) + kvm_vcpu_kick(vcpu); + else + irq_work_queue(&vcpu->arch.pmu.overflow_work); } cpu_pmu->pmu.start(perf_event, PERF_EF_RELOAD); @@ -764,6 +785,9 @@ static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu) return ret; } + init_irq_work(&vcpu->arch.pmu.overflow_work, + kvm_pmu_perf_overflow_notify_vcpu); + vcpu->arch.pmu.created = true; return 0; } diff --git a/arch/arm64/kvm/psci.c b/arch/arm64/kvm/psci.c index 83415e96b589..db4056ecccfd 100644 --- a/arch/arm64/kvm/psci.c +++ b/arch/arm64/kvm/psci.c @@ -425,27 +425,30 @@ static int get_kernel_wa_level(u64 regid) { switch (regid) { case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1: - switch (kvm_arm_harden_branch_predictor()) { - case KVM_BP_HARDEN_UNKNOWN: + switch (arm64_get_spectre_v2_state()) { + case SPECTRE_VULNERABLE: return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL; - case KVM_BP_HARDEN_WA_NEEDED: + case SPECTRE_MITIGATED: return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL; - case KVM_BP_HARDEN_NOT_REQUIRED: + case SPECTRE_UNAFFECTED: return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED; } return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL; case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2: - switch (kvm_arm_have_ssbd()) { - case KVM_SSBD_FORCE_DISABLE: - return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL; - case KVM_SSBD_KERNEL: - return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL; - case KVM_SSBD_FORCE_ENABLE: - case KVM_SSBD_MITIGATED: + switch (arm64_get_spectre_v4_state()) { + case SPECTRE_MITIGATED: + /* + * As for the hypercall discovery, we pretend we + * don't have any FW mitigation if SSBS is there at + * all times. + */ + if (cpus_have_final_cap(ARM64_SSBS)) + return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL; + fallthrough; + case SPECTRE_UNAFFECTED: return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED; - case KVM_SSBD_UNKNOWN: - default: - return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN; + case SPECTRE_VULNERABLE: + return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL; } } @@ -462,14 +465,8 @@ int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) val = kvm_psci_version(vcpu, vcpu->kvm); break; case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1: - val = get_kernel_wa_level(reg->id) & KVM_REG_FEATURE_LEVEL_MASK; - break; case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2: val = get_kernel_wa_level(reg->id) & KVM_REG_FEATURE_LEVEL_MASK; - - if (val == KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL && - kvm_arm_get_vcpu_workaround_2_flag(vcpu)) - val |= KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED; break; default: return -ENOENT; @@ -527,34 +524,35 @@ int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED)) return -EINVAL; - wa_level = val & KVM_REG_FEATURE_LEVEL_MASK; - - if (get_kernel_wa_level(reg->id) < wa_level) - return -EINVAL; - /* The enabled bit must not be set unless the level is AVAIL. */ - if (wa_level != KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL && - wa_level != val) + if ((val & KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED) && + (val & KVM_REG_FEATURE_LEVEL_MASK) != KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL) return -EINVAL; - /* Are we finished or do we need to check the enable bit ? */ - if (kvm_arm_have_ssbd() != KVM_SSBD_KERNEL) - return 0; - /* - * If this kernel supports the workaround to be switched on - * or off, make sure it matches the requested setting. + * Map all the possible incoming states to the only two we + * really want to deal with. */ - switch (wa_level) { - case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL: - kvm_arm_set_vcpu_workaround_2_flag(vcpu, - val & KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED); + switch (val & KVM_REG_FEATURE_LEVEL_MASK) { + case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL: + case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN: + wa_level = KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL; break; + case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL: case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED: - kvm_arm_set_vcpu_workaround_2_flag(vcpu, true); + wa_level = KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED; break; + default: + return -EINVAL; } + /* + * We can deal with NOT_AVAIL on NOT_REQUIRED, but not the + * other way around. + */ + if (get_kernel_wa_level(reg->id) < wa_level) + return -EINVAL; + return 0; default: return -ENOENT; diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index ee33875c5c2a..f6e8b4a75cbb 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -319,10 +319,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) vcpu->arch.reset_state.reset = false; } - /* Default workaround setup is enabled (if supported) */ - if (kvm_arm_have_ssbd() == KVM_SSBD_KERNEL) - vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG; - /* Reset timer */ ret = kvm_timer_vcpu_reset(vcpu); out: diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 077293b5115f..9ca270603980 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1131,6 +1131,11 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, if (!vcpu_has_sve(vcpu)) val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT); val &= ~(0xfUL << ID_AA64PFR0_AMU_SHIFT); + if (!(val & (0xfUL << ID_AA64PFR0_CSV2_SHIFT)) && + arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED) + val |= (1UL << ID_AA64PFR0_CSV2_SHIFT); + } else if (id == SYS_ID_AA64PFR1_EL1) { + val &= ~(0xfUL << ID_AA64PFR1_MTE_SHIFT); } else if (id == SYS_ID_AA64ISAR1_EL1 && !vcpu_has_ptrauth(vcpu)) { val &= ~((0xfUL << ID_AA64ISAR1_APA_SHIFT) | (0xfUL << ID_AA64ISAR1_API_SHIFT) | @@ -1382,6 +1387,13 @@ static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, return true; } +static bool access_mte_regs(struct kvm_vcpu *vcpu, struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + kvm_inject_undefined(vcpu); + return false; +} + /* sys_reg_desc initialiser for known cpufeature ID registers */ #define ID_SANITISED(name) { \ SYS_DESC(SYS_##name), \ @@ -1547,6 +1559,10 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 }, { SYS_DESC(SYS_ACTLR_EL1), access_actlr, reset_actlr, ACTLR_EL1 }, { SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 }, + + { SYS_DESC(SYS_RGSR_EL1), access_mte_regs }, + { SYS_DESC(SYS_GCR_EL1), access_mte_regs }, + { SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility }, { SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 }, { SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 }, @@ -1571,6 +1587,9 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi }, { SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi }, + { SYS_DESC(SYS_TFSR_EL1), access_mte_regs }, + { SYS_DESC(SYS_TFSRE0_EL1), access_mte_regs }, + { SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 }, { SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 }, diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c index 5c786b915cd3..52d6f24f65dc 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c +++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c @@ -1001,8 +1001,8 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1) raw_spin_lock_irqsave(&irq->irq_lock, flags); /* - * An access targetting Group0 SGIs can only generate - * those, while an access targetting Group1 SGIs can + * An access targeting Group0 SGIs can only generate + * those, while an access targeting Group1 SGIs can * generate interrupts of either group. */ if (!irq->group || allow_group1) { diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile index 2fc253466dbf..d31e1169d9b8 100644 --- a/arch/arm64/lib/Makefile +++ b/arch/arm64/lib/Makefile @@ -16,3 +16,5 @@ lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o obj-$(CONFIG_CRC32) += crc32.o obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o + +obj-$(CONFIG_ARM64_MTE) += mte.o diff --git a/arch/arm64/lib/mte.S b/arch/arm64/lib/mte.S new file mode 100644 index 000000000000..03ca6d8b8670 --- /dev/null +++ b/arch/arm64/lib/mte.S @@ -0,0 +1,151 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020 ARM Ltd. + */ +#include <linux/linkage.h> + +#include <asm/alternative.h> +#include <asm/assembler.h> +#include <asm/mte.h> +#include <asm/page.h> +#include <asm/sysreg.h> + + .arch armv8.5-a+memtag + +/* + * multitag_transfer_size - set \reg to the block size that is accessed by the + * LDGM/STGM instructions. + */ + .macro multitag_transfer_size, reg, tmp + mrs_s \reg, SYS_GMID_EL1 + ubfx \reg, \reg, #SYS_GMID_EL1_BS_SHIFT, #SYS_GMID_EL1_BS_SIZE + mov \tmp, #4 + lsl \reg, \tmp, \reg + .endm + +/* + * Clear the tags in a page + * x0 - address of the page to be cleared + */ +SYM_FUNC_START(mte_clear_page_tags) + multitag_transfer_size x1, x2 +1: stgm xzr, [x0] + add x0, x0, x1 + tst x0, #(PAGE_SIZE - 1) + b.ne 1b + ret +SYM_FUNC_END(mte_clear_page_tags) + +/* + * Copy the tags from the source page to the destination one + * x0 - address of the destination page + * x1 - address of the source page + */ +SYM_FUNC_START(mte_copy_page_tags) + mov x2, x0 + mov x3, x1 + multitag_transfer_size x5, x6 +1: ldgm x4, [x3] + stgm x4, [x2] + add x2, x2, x5 + add x3, x3, x5 + tst x2, #(PAGE_SIZE - 1) + b.ne 1b + ret +SYM_FUNC_END(mte_copy_page_tags) + +/* + * Read tags from a user buffer (one tag per byte) and set the corresponding + * tags at the given kernel address. Used by PTRACE_POKEMTETAGS. + * x0 - kernel address (to) + * x1 - user buffer (from) + * x2 - number of tags/bytes (n) + * Returns: + * x0 - number of tags read/set + */ +SYM_FUNC_START(mte_copy_tags_from_user) + mov x3, x1 + cbz x2, 2f +1: + uao_user_alternative 2f, ldrb, ldtrb, w4, x1, 0 + lsl x4, x4, #MTE_TAG_SHIFT + stg x4, [x0], #MTE_GRANULE_SIZE + add x1, x1, #1 + subs x2, x2, #1 + b.ne 1b + + // exception handling and function return +2: sub x0, x1, x3 // update the number of tags set + ret +SYM_FUNC_END(mte_copy_tags_from_user) + +/* + * Get the tags from a kernel address range and write the tag values to the + * given user buffer (one tag per byte). Used by PTRACE_PEEKMTETAGS. + * x0 - user buffer (to) + * x1 - kernel address (from) + * x2 - number of tags/bytes (n) + * Returns: + * x0 - number of tags read/set + */ +SYM_FUNC_START(mte_copy_tags_to_user) + mov x3, x0 + cbz x2, 2f +1: + ldg x4, [x1] + ubfx x4, x4, #MTE_TAG_SHIFT, #MTE_TAG_SIZE + uao_user_alternative 2f, strb, sttrb, w4, x0, 0 + add x0, x0, #1 + add x1, x1, #MTE_GRANULE_SIZE + subs x2, x2, #1 + b.ne 1b + + // exception handling and function return +2: sub x0, x0, x3 // update the number of tags copied + ret +SYM_FUNC_END(mte_copy_tags_to_user) + +/* + * Save the tags in a page + * x0 - page address + * x1 - tag storage + */ +SYM_FUNC_START(mte_save_page_tags) + multitag_transfer_size x7, x5 +1: + mov x2, #0 +2: + ldgm x5, [x0] + orr x2, x2, x5 + add x0, x0, x7 + tst x0, #0xFF // 16 tag values fit in a register, + b.ne 2b // which is 16*16=256 bytes + + str x2, [x1], #8 + + tst x0, #(PAGE_SIZE - 1) + b.ne 1b + + ret +SYM_FUNC_END(mte_save_page_tags) + +/* + * Restore the tags in a page + * x0 - page address + * x1 - tag storage + */ +SYM_FUNC_START(mte_restore_page_tags) + multitag_transfer_size x7, x5 +1: + ldr x2, [x1], #8 +2: + stgm x2, [x0] + add x0, x0, x7 + tst x0, #0xFF + b.ne 2b + + tst x0, #(PAGE_SIZE - 1) + b.ne 1b + + ret +SYM_FUNC_END(mte_restore_page_tags) diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile index d91030f0ffee..5ead3c3de3b6 100644 --- a/arch/arm64/mm/Makefile +++ b/arch/arm64/mm/Makefile @@ -4,10 +4,11 @@ obj-y := dma-mapping.o extable.o fault.o init.o \ ioremap.o mmap.o pgd.o mmu.o \ context.o proc.o pageattr.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o -obj-$(CONFIG_PTDUMP_CORE) += dump.o +obj-$(CONFIG_PTDUMP_CORE) += ptdump.o obj-$(CONFIG_PTDUMP_DEBUGFS) += ptdump_debugfs.o obj-$(CONFIG_NUMA) += numa.o obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o +obj-$(CONFIG_ARM64_MTE) += mteswap.o KASAN_SANITIZE_physaddr.o += n obj-$(CONFIG_KASAN) += kasan_init.o diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index 9b11c096a042..001737a8f309 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -27,6 +27,10 @@ static DEFINE_PER_CPU(atomic64_t, active_asids); static DEFINE_PER_CPU(u64, reserved_asids); static cpumask_t tlb_flush_pending; +static unsigned long max_pinned_asids; +static unsigned long nr_pinned_asids; +static unsigned long *pinned_asid_map; + #define ASID_MASK (~GENMASK(asid_bits - 1, 0)) #define ASID_FIRST_VERSION (1UL << asid_bits) @@ -72,7 +76,7 @@ void verify_cpu_asid_bits(void) } } -static void set_kpti_asid_bits(void) +static void set_kpti_asid_bits(unsigned long *map) { unsigned int len = BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(unsigned long); /* @@ -81,13 +85,15 @@ static void set_kpti_asid_bits(void) * is set, then the ASID will map only userspace. Thus * mark even as reserved for kernel. */ - memset(asid_map, 0xaa, len); + memset(map, 0xaa, len); } static void set_reserved_asid_bits(void) { - if (arm64_kernel_unmapped_at_el0()) - set_kpti_asid_bits(); + if (pinned_asid_map) + bitmap_copy(asid_map, pinned_asid_map, NUM_USER_ASIDS); + else if (arm64_kernel_unmapped_at_el0()) + set_kpti_asid_bits(asid_map); else bitmap_clear(asid_map, 0, NUM_USER_ASIDS); } @@ -166,6 +172,14 @@ static u64 new_context(struct mm_struct *mm) return newasid; /* + * If it is pinned, we can keep using it. Note that reserved + * takes priority, because even if it is also pinned, we need to + * update the generation into the reserved_asids. + */ + if (refcount_read(&mm->context.pinned)) + return newasid; + + /* * We had a valid ASID in a previous life, so try to re-use * it if possible. */ @@ -256,6 +270,71 @@ switch_mm_fastpath: cpu_switch_mm(mm->pgd, mm); } +unsigned long arm64_mm_context_get(struct mm_struct *mm) +{ + unsigned long flags; + u64 asid; + + if (!pinned_asid_map) + return 0; + + raw_spin_lock_irqsave(&cpu_asid_lock, flags); + + asid = atomic64_read(&mm->context.id); + + if (refcount_inc_not_zero(&mm->context.pinned)) + goto out_unlock; + + if (nr_pinned_asids >= max_pinned_asids) { + asid = 0; + goto out_unlock; + } + + if (!asid_gen_match(asid)) { + /* + * We went through one or more rollover since that ASID was + * used. Ensure that it is still valid, or generate a new one. + */ + asid = new_context(mm); + atomic64_set(&mm->context.id, asid); + } + + nr_pinned_asids++; + __set_bit(asid2idx(asid), pinned_asid_map); + refcount_set(&mm->context.pinned, 1); + +out_unlock: + raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); + + asid &= ~ASID_MASK; + + /* Set the equivalent of USER_ASID_BIT */ + if (asid && arm64_kernel_unmapped_at_el0()) + asid |= 1; + + return asid; +} +EXPORT_SYMBOL_GPL(arm64_mm_context_get); + +void arm64_mm_context_put(struct mm_struct *mm) +{ + unsigned long flags; + u64 asid = atomic64_read(&mm->context.id); + + if (!pinned_asid_map) + return; + + raw_spin_lock_irqsave(&cpu_asid_lock, flags); + + if (refcount_dec_and_test(&mm->context.pinned)) { + __clear_bit(asid2idx(asid), pinned_asid_map); + nr_pinned_asids--; + } + + raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); +} +EXPORT_SYMBOL_GPL(arm64_mm_context_put); + /* Errata workaround post TTBRx_EL1 update. */ asmlinkage void post_ttbr_update_workaround(void) { @@ -296,8 +375,11 @@ static int asids_update_limit(void) { unsigned long num_available_asids = NUM_USER_ASIDS; - if (arm64_kernel_unmapped_at_el0()) + if (arm64_kernel_unmapped_at_el0()) { num_available_asids /= 2; + if (pinned_asid_map) + set_kpti_asid_bits(pinned_asid_map); + } /* * Expect allocation after rollover to fail if we don't have at least * one more ASID than CPUs. ASID #0 is reserved for init_mm. @@ -305,6 +387,13 @@ static int asids_update_limit(void) WARN_ON(num_available_asids - 1 <= num_possible_cpus()); pr_info("ASID allocator initialised with %lu entries\n", num_available_asids); + + /* + * There must always be an ASID available after rollover. Ensure that, + * even if all CPUs have a reserved ASID and the maximum number of ASIDs + * are pinned, there still is at least one empty slot in the ASID map. + */ + max_pinned_asids = num_available_asids - num_possible_cpus() - 2; return 0; } arch_initcall(asids_update_limit); @@ -319,13 +408,17 @@ static int asids_init(void) panic("Failed to allocate bitmap for %lu ASIDs\n", NUM_USER_ASIDS); + pinned_asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS), + sizeof(*pinned_asid_map), GFP_KERNEL); + nr_pinned_asids = 0; + /* * We cannot call set_reserved_asid_bits() here because CPU * caps are not finalized yet, so it is safer to assume KPTI * and reserve kernel ASID's from beginning. */ if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) - set_kpti_asid_bits(); + set_kpti_asid_bits(asid_map); return 0; } early_initcall(asids_init); diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c index 2ee7b73433a5..70a71f38b6a9 100644 --- a/arch/arm64/mm/copypage.c +++ b/arch/arm64/mm/copypage.c @@ -6,21 +6,32 @@ * Copyright (C) 2012 ARM Ltd. */ +#include <linux/bitops.h> #include <linux/mm.h> #include <asm/page.h> #include <asm/cacheflush.h> +#include <asm/cpufeature.h> +#include <asm/mte.h> -void __cpu_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) +void copy_highpage(struct page *to, struct page *from) { - struct page *page = virt_to_page(kto); + struct page *kto = page_address(to); + struct page *kfrom = page_address(from); + copy_page(kto, kfrom); - flush_dcache_page(page); + + if (system_supports_mte() && test_bit(PG_mte_tagged, &from->flags)) { + set_bit(PG_mte_tagged, &to->flags); + mte_copy_page_tags(kto, kfrom); + } } -EXPORT_SYMBOL_GPL(__cpu_copy_user_page); +EXPORT_SYMBOL(copy_highpage); -void __cpu_clear_user_page(void *kaddr, unsigned long vaddr) +void copy_user_highpage(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma) { - clear_page(kaddr); + copy_highpage(to, from); + flush_dcache_page(to); } -EXPORT_SYMBOL_GPL(__cpu_clear_user_page); +EXPORT_SYMBOL_GPL(copy_user_highpage); diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 6c45350e33aa..93e87b287556 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -6,7 +6,7 @@ #include <linux/gfp.h> #include <linux/cache.h> -#include <linux/dma-noncoherent.h> +#include <linux/dma-map-ops.h> #include <linux/dma-iommu.h> #include <xen/xen.h> #include <xen/swiotlb-xen.h> diff --git a/arch/arm64/mm/extable.c b/arch/arm64/mm/extable.c index eee1732ab6cd..aa0060178343 100644 --- a/arch/arm64/mm/extable.c +++ b/arch/arm64/mm/extable.c @@ -14,9 +14,7 @@ int fixup_exception(struct pt_regs *regs) if (!fixup) return 0; - if (IS_ENABLED(CONFIG_BPF_JIT) && - regs->pc >= BPF_JIT_REGION_START && - regs->pc < BPF_JIT_REGION_END) + if (in_bpf_jit(regs)) return arm64_bpf_fixup_exception(fixup, regs); regs->pc = (unsigned long)&fixup->fixup + fixup->fixup; diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index f07333e86c2f..94c99c1c19e3 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -218,7 +218,9 @@ int ptep_set_access_flags(struct vm_area_struct *vma, pteval = cmpxchg_relaxed(&pte_val(*ptep), old_pteval, pteval); } while (pteval != old_pteval); - flush_tlb_fix_spurious_fault(vma, address); + /* Invalidate a stale read-only entry */ + if (dirty) + flush_tlb_page(vma, address); return 1; } @@ -641,6 +643,13 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs) return 0; } +static int do_tag_check_fault(unsigned long addr, unsigned int esr, + struct pt_regs *regs) +{ + do_bad_area(addr, esr, regs); + return 0; +} + static const struct fault_info fault_info[] = { { do_bad, SIGKILL, SI_KERNEL, "ttbr address size fault" }, { do_bad, SIGKILL, SI_KERNEL, "level 1 address size fault" }, @@ -659,7 +668,7 @@ static const struct fault_info fault_info[] = { { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" }, { do_sea, SIGBUS, BUS_OBJERR, "synchronous external abort" }, - { do_bad, SIGKILL, SI_KERNEL, "unknown 17" }, + { do_tag_check_fault, SIGSEGV, SEGV_MTESERR, "synchronous tag check fault" }, { do_bad, SIGKILL, SI_KERNEL, "unknown 18" }, { do_bad, SIGKILL, SI_KERNEL, "unknown 19" }, { do_sea, SIGKILL, SI_KERNEL, "level 0 (translation table walk)" }, diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 481d22c32a2e..a53c1e0fb017 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -21,8 +21,7 @@ #include <linux/of.h> #include <linux/of_fdt.h> #include <linux/dma-direct.h> -#include <linux/dma-mapping.h> -#include <linux/dma-contiguous.h> +#include <linux/dma-map-ops.h> #include <linux/efi.h> #include <linux/swiotlb.h> #include <linux/vmalloc.h> @@ -429,6 +428,8 @@ void __init bootmem_init(void) arm64_hugetlb_cma_reserve(); #endif + dma_pernuma_cma_reserve(); + /* * sparse_init() tries to allocate memory from memblock, so must be * done after the fixed reservations @@ -471,12 +472,10 @@ static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn) */ static void __init free_unused_memmap(void) { - unsigned long start, prev_end = 0; - struct memblock_region *reg; - - for_each_memblock(memory, reg) { - start = __phys_to_pfn(reg->base); + unsigned long start, end, prev_end = 0; + int i; + for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) { #ifdef CONFIG_SPARSEMEM /* * Take care not to free memmap entries that don't exist due @@ -496,8 +495,7 @@ static void __init free_unused_memmap(void) * memmap entries are valid from the bank end aligned to * MAX_ORDER_NR_PAGES. */ - prev_end = ALIGN(__phys_to_pfn(reg->base + reg->size), - MAX_ORDER_NR_PAGES); + prev_end = ALIGN(end, MAX_ORDER_NR_PAGES); } #ifdef CONFIG_SPARSEMEM diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index 7291b26ce788..b24e43d20667 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c @@ -212,8 +212,8 @@ void __init kasan_init(void) { u64 kimg_shadow_start, kimg_shadow_end; u64 mod_shadow_start, mod_shadow_end; - struct memblock_region *reg; - int i; + phys_addr_t pa_start, pa_end; + u64 i; kimg_shadow_start = (u64)kasan_mem_to_shadow(_text) & PAGE_MASK; kimg_shadow_end = PAGE_ALIGN((u64)kasan_mem_to_shadow(_end)); @@ -246,9 +246,9 @@ void __init kasan_init(void) kasan_populate_early_shadow((void *)mod_shadow_end, (void *)kimg_shadow_start); - for_each_memblock(memory, reg) { - void *start = (void *)__phys_to_virt(reg->base); - void *end = (void *)__phys_to_virt(reg->base + reg->size); + for_each_mem_range(i, &pa_start, &pa_end) { + void *start = (void *)__phys_to_virt(pa_start); + void *end = (void *)__phys_to_virt(pa_end); if (start >= end) break; diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 75df62fea1b6..beff3ad8c7f8 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -43,7 +43,7 @@ u64 idmap_t0sz = TCR_T0SZ(VA_BITS); u64 idmap_ptrs_per_pgd = PTRS_PER_PGD; -u64 __section(".mmuoff.data.write") vabits_actual; +u64 __section(.mmuoff.data.write) vabits_actual; EXPORT_SYMBOL(vabits_actual); u64 kimage_voffset __ro_after_init; @@ -122,7 +122,7 @@ static bool pgattr_change_is_safe(u64 old, u64 new) * The following mapping attributes may be updated in live * kernel mappings without the need for break-before-make. */ - static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG; + pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG; /* creating or taking down mappings is always safe */ if (old == 0 || new == 0) @@ -136,6 +136,17 @@ static bool pgattr_change_is_safe(u64 old, u64 new) if (old & ~new & PTE_NG) return false; + /* + * Changing the memory type between Normal and Normal-Tagged is safe + * since Tagged is considered a permission attribute from the + * mismatched attribute aliases perspective. + */ + if (((old & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL) || + (old & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_TAGGED)) && + ((new & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL) || + (new & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_TAGGED))) + mask |= PTE_ATTRINDX_MASK; + return ((old ^ new) & ~mask) == 0; } @@ -462,8 +473,9 @@ static void __init map_mem(pgd_t *pgdp) { phys_addr_t kernel_start = __pa_symbol(_text); phys_addr_t kernel_end = __pa_symbol(__init_begin); - struct memblock_region *reg; + phys_addr_t start, end; int flags = 0; + u64 i; if (rodata_full || debug_pagealloc_enabled()) flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; @@ -482,16 +494,15 @@ static void __init map_mem(pgd_t *pgdp) #endif /* map all the memory banks */ - for_each_memblock(memory, reg) { - phys_addr_t start = reg->base; - phys_addr_t end = start + reg->size; - + for_each_mem_range(i, &start, &end) { if (start >= end) break; - if (memblock_is_nomap(reg)) - continue; - - __map_memblock(pgdp, start, end, PAGE_KERNEL, flags); + /* + * The linear map must allow allocation tags reading/writing + * if MTE is present. Otherwise, it has the same attributes as + * PAGE_KERNEL. + */ + __map_memblock(pgdp, start, end, PAGE_KERNEL_TAGGED, flags); } /* diff --git a/arch/arm64/mm/mteswap.c b/arch/arm64/mm/mteswap.c new file mode 100644 index 000000000000..c52c1847079c --- /dev/null +++ b/arch/arm64/mm/mteswap.c @@ -0,0 +1,83 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/pagemap.h> +#include <linux/xarray.h> +#include <linux/slab.h> +#include <linux/swap.h> +#include <linux/swapops.h> +#include <asm/mte.h> + +static DEFINE_XARRAY(mte_pages); + +void *mte_allocate_tag_storage(void) +{ + /* tags granule is 16 bytes, 2 tags stored per byte */ + return kmalloc(PAGE_SIZE / 16 / 2, GFP_KERNEL); +} + +void mte_free_tag_storage(char *storage) +{ + kfree(storage); +} + +int mte_save_tags(struct page *page) +{ + void *tag_storage, *ret; + + if (!test_bit(PG_mte_tagged, &page->flags)) + return 0; + + tag_storage = mte_allocate_tag_storage(); + if (!tag_storage) + return -ENOMEM; + + mte_save_page_tags(page_address(page), tag_storage); + + /* page_private contains the swap entry.val set in do_swap_page */ + ret = xa_store(&mte_pages, page_private(page), tag_storage, GFP_KERNEL); + if (WARN(xa_is_err(ret), "Failed to store MTE tags")) { + mte_free_tag_storage(tag_storage); + return xa_err(ret); + } else if (ret) { + /* Entry is being replaced, free the old entry */ + mte_free_tag_storage(ret); + } + + return 0; +} + +bool mte_restore_tags(swp_entry_t entry, struct page *page) +{ + void *tags = xa_load(&mte_pages, entry.val); + + if (!tags) + return false; + + mte_restore_page_tags(page_address(page), tags); + + return true; +} + +void mte_invalidate_tags(int type, pgoff_t offset) +{ + swp_entry_t entry = swp_entry(type, offset); + void *tags = xa_erase(&mte_pages, entry.val); + + mte_free_tag_storage(tags); +} + +void mte_invalidate_tags_area(int type) +{ + swp_entry_t entry = swp_entry(type, 0); + swp_entry_t last_entry = swp_entry(type + 1, 0); + void *tags; + + XA_STATE(xa_state, &mte_pages, entry.val); + + xa_lock(&mte_pages); + xas_for_each(&xa_state, tags, last_entry.val - 1) { + __xa_erase(&mte_pages, xa_state.xa_index); + mte_free_tag_storage(tags); + } + xa_unlock(&mte_pages); +} diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c index 73f8b49d485c..a8303bc6b62a 100644 --- a/arch/arm64/mm/numa.c +++ b/arch/arm64/mm/numa.c @@ -46,7 +46,11 @@ EXPORT_SYMBOL(node_to_cpumask_map); */ const struct cpumask *cpumask_of_node(int node) { - if (WARN_ON(node >= nr_node_ids)) + + if (node == NUMA_NO_NODE) + return cpu_all_mask; + + if (WARN_ON(node < 0 || node >= nr_node_ids)) return cpu_none_mask; if (WARN_ON(node_to_cpumask_map[node] == NULL)) @@ -350,7 +354,7 @@ static int __init numa_register_nodes(void) struct memblock_region *mblk; /* Check that valid nid is set to memblks */ - for_each_memblock(memory, mblk) { + for_each_mem_region(mblk) { int mblk_nid = memblock_get_region_node(mblk); if (mblk_nid == NUMA_NO_NODE || mblk_nid >= MAX_NUMNODES) { @@ -423,19 +427,16 @@ out_free_distance: */ static int __init dummy_numa_init(void) { + phys_addr_t start = memblock_start_of_DRAM(); + phys_addr_t end = memblock_end_of_DRAM(); int ret; - struct memblock_region *mblk; if (numa_off) pr_info("NUMA disabled\n"); /* Forced off on command line. */ - pr_info("Faking a node at [mem %#018Lx-%#018Lx]\n", - memblock_start_of_DRAM(), memblock_end_of_DRAM() - 1); - - for_each_memblock(memory, mblk) { - ret = numa_add_memblk(0, mblk->base, mblk->base + mblk->size); - if (!ret) - continue; + pr_info("Faking a node at [mem %#018Lx-%#018Lx]\n", start, end - 1); + ret = numa_add_memblk(0, start, end); + if (ret) { pr_err("NUMA init failed\n"); return ret; } @@ -448,7 +449,7 @@ static int __init dummy_numa_init(void) * arm64_numa_init() - Initialize NUMA * * Try each configured NUMA initialization method until one succeeds. The - * last fallback is dummy single node config encomapssing whole memory. + * last fallback is dummy single node config encompassing whole memory. */ void __init arm64_numa_init(void) { diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index 23f648c2a199..1b94f5b82654 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c @@ -8,6 +8,7 @@ #include <linux/sched.h> #include <linux/vmalloc.h> +#include <asm/cacheflush.h> #include <asm/set_memory.h> #include <asm/tlbflush.h> diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 796e47a571e6..23c326a06b2d 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -18,6 +18,7 @@ #include <asm/cpufeature.h> #include <asm/alternative.h> #include <asm/smp.h> +#include <asm/sysreg.h> #ifdef CONFIG_ARM64_64K_PAGES #define TCR_TG_FLAGS TCR_TG0_64K | TCR_TG1_64K @@ -44,14 +45,18 @@ #define TCR_KASAN_FLAGS 0 #endif -/* Default MAIR_EL1 */ +/* + * Default MAIR_EL1. MT_NORMAL_TAGGED is initially mapped as Normal memory and + * changed during __cpu_setup to Normal Tagged if the system supports MTE. + */ #define MAIR_EL1_SET \ (MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \ MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRE, MT_DEVICE_nGnRE) | \ MAIR_ATTRIDX(MAIR_ATTR_DEVICE_GRE, MT_DEVICE_GRE) | \ MAIR_ATTRIDX(MAIR_ATTR_NORMAL_NC, MT_NORMAL_NC) | \ MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) | \ - MAIR_ATTRIDX(MAIR_ATTR_NORMAL_WT, MT_NORMAL_WT)) + MAIR_ATTRIDX(MAIR_ATTR_NORMAL_WT, MT_NORMAL_WT) | \ + MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL_TAGGED)) #ifdef CONFIG_CPU_PM /** @@ -421,6 +426,29 @@ SYM_FUNC_START(__cpu_setup) * Memory region attributes */ mov_q x5, MAIR_EL1_SET +#ifdef CONFIG_ARM64_MTE + /* + * Update MAIR_EL1, GCR_EL1 and TFSR*_EL1 if MTE is supported + * (ID_AA64PFR1_EL1[11:8] > 1). + */ + mrs x10, ID_AA64PFR1_EL1 + ubfx x10, x10, #ID_AA64PFR1_MTE_SHIFT, #4 + cmp x10, #ID_AA64PFR1_MTE + b.lt 1f + + /* Normal Tagged memory type at the corresponding MAIR index */ + mov x10, #MAIR_ATTR_NORMAL_TAGGED + bfi x5, x10, #(8 * MT_NORMAL_TAGGED), #8 + + /* initialize GCR_EL1: all non-zero tags excluded by default */ + mov x10, #(SYS_GCR_EL1_RRND | SYS_GCR_EL1_EXCL_MASK) + msr_s SYS_GCR_EL1, x10 + + /* clear any pending tag check faults in TFSR*_EL1 */ + msr_s SYS_TFSR_EL1, xzr + msr_s SYS_TFSRE0_EL1, xzr +1: +#endif msr mair_el1, x5 /* * Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/ptdump.c index 0b8da1cc1c07..807dc634bbd2 100644 --- a/arch/arm64/mm/dump.c +++ b/arch/arm64/mm/ptdump.c @@ -41,6 +41,8 @@ static struct addr_marker address_markers[] = { { 0 /* KASAN_SHADOW_START */, "Kasan shadow start" }, { KASAN_SHADOW_END, "Kasan shadow end" }, #endif + { BPF_JIT_REGION_START, "BPF start" }, + { BPF_JIT_REGION_END, "BPF end" }, { MODULES_VADDR, "Modules start" }, { MODULES_END, "Modules end" }, { VMALLOC_START, "vmalloc() area" }, @@ -169,6 +171,10 @@ static const struct prot_bits pte_bits[] = { .mask = PTE_ATTRINDX_MASK, .val = PTE_ATTRINDX(MT_NORMAL), .set = "MEM/NORMAL", + }, { + .mask = PTE_ATTRINDX_MASK, + .val = PTE_ATTRINDX(MT_NORMAL_TAGGED), + .set = "MEM/NORMAL-TAGGED", } }; diff --git a/arch/c6x/include/asm/checksum.h b/arch/c6x/include/asm/checksum.h index 36770b8308d9..934918def632 100644 --- a/arch/c6x/include/asm/checksum.h +++ b/arch/c6x/include/asm/checksum.h @@ -26,6 +26,9 @@ csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, } #define csum_tcpudp_nofold csum_tcpudp_nofold +#define _HAVE_ARCH_CSUM_AND_COPY +extern __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len); + #include <asm-generic/checksum.h> #endif /* _ASM_C6X_CHECKSUM_H */ diff --git a/arch/c6x/kernel/setup.c b/arch/c6x/kernel/setup.c index 8ef35131f999..9254c3b794a5 100644 --- a/arch/c6x/kernel/setup.c +++ b/arch/c6x/kernel/setup.c @@ -287,7 +287,8 @@ notrace void __init machine_init(unsigned long dt_ptr) void __init setup_arch(char **cmdline_p) { - struct memblock_region *reg; + phys_addr_t start, end; + u64 i; printk(KERN_INFO "Initializing kernel\n"); @@ -351,9 +352,9 @@ void __init setup_arch(char **cmdline_p) disable_caching(ram_start, ram_end - 1); /* Set caching of external RAM used by Linux */ - for_each_memblock(memory, reg) - enable_caching(CACHE_REGION_START(reg->base), - CACHE_REGION_START(reg->base + reg->size - 1)); + for_each_mem_range(i, &start, &end) + enable_caching(CACHE_REGION_START(start), + CACHE_REGION_START(end - 1)); #ifdef CONFIG_BLK_DEV_INITRD /* diff --git a/arch/c6x/lib/csum_64plus.S b/arch/c6x/lib/csum_64plus.S index 9c07127485d1..57148866d8d3 100644 --- a/arch/c6x/lib/csum_64plus.S +++ b/arch/c6x/lib/csum_64plus.S @@ -24,7 +24,6 @@ ENTRY(csum_partial_copy_nocheck) MVC .S2 ILC,B30 - MV .D1X B6,A31 ; given csum ZERO .D1 A9 ; csum (a side) || ZERO .D2 B9 ; csum (b side) || SHRU .S2X A6,2,B5 ; len / 4 @@ -144,8 +143,7 @@ L91: SHRU .S2X A9,16,B4 SHRU .S1 A9,16,A0 [A0] BNOP .S1 L91,5 -L10: ADD .D1 A31,A9,A9 - MV .D1 A9,A4 +L10: MV .D1 A9,A4 BNOP .S2 B3,4 MVC .S2 B30,ILC diff --git a/arch/c6x/mm/dma-coherent.c b/arch/c6x/mm/dma-coherent.c index a5909091cb14..03df07a831fc 100644 --- a/arch/c6x/mm/dma-coherent.c +++ b/arch/c6x/mm/dma-coherent.c @@ -15,7 +15,7 @@ #include <linux/bitops.h> #include <linux/module.h> #include <linux/interrupt.h> -#include <linux/dma-noncoherent.h> +#include <linux/dma-map-ops.h> #include <linux/memblock.h> #include <asm/cacheflush.h> diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig index 3d5afb5f5685..7f424c85772c 100644 --- a/arch/csky/Kconfig +++ b/arch/csky/Kconfig @@ -309,16 +309,3 @@ endmenu source "arch/csky/Kconfig.platforms" source "kernel/Kconfig.hz" - -config SECCOMP - bool "Enable seccomp to safely compute untrusted bytecode" - help - This kernel feature is useful for number crunching applications - that may need to compute untrusted bytecode during their - execution. By using pipes or other transports made available to - the process as file descriptors supporting the read/write - syscalls, it's possible to isolate those applications in - their own address space using seccomp. Once seccomp is - enabled via prctl(PR_SET_SECCOMP), it cannot be disabled - and the task is only allowed to execute a few safe syscalls - defined by each seccomp mode. diff --git a/arch/csky/kernel/probes/kprobes.c b/arch/csky/kernel/probes/kprobes.c index f0f733b7ac5a..589f090f48b9 100644 --- a/arch/csky/kernel/probes/kprobes.c +++ b/arch/csky/kernel/probes/kprobes.c @@ -404,87 +404,14 @@ int __init arch_populate_kprobe_blacklist(void) void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs) { - struct kretprobe_instance *ri = NULL; - struct hlist_head *head, empty_rp; - struct hlist_node *tmp; - unsigned long flags, orig_ret_address = 0; - unsigned long trampoline_address = - (unsigned long)&kretprobe_trampoline; - kprobe_opcode_t *correct_ret_addr = NULL; - - INIT_HLIST_HEAD(&empty_rp); - kretprobe_hash_lock(current, &head, &flags); - - /* - * It is possible to have multiple instances associated with a given - * task either because multiple functions in the call path have - * return probes installed on them, and/or more than one - * return probe was registered for a target function. - * - * We can handle this because: - * - instances are always pushed into the head of the list - * - when multiple return probes are registered for the same - * function, the (chronologically) first instance's ret_addr - * will be the real return address, and all the rest will - * point to kretprobe_trampoline. - */ - hlist_for_each_entry_safe(ri, tmp, head, hlist) { - if (ri->task != current) - /* another task is sharing our hash bucket */ - continue; - - orig_ret_address = (unsigned long)ri->ret_addr; - - if (orig_ret_address != trampoline_address) - /* - * This is the real return address. Any other - * instances associated with this task are for - * other calls deeper on the call stack - */ - break; - } - - kretprobe_assert(ri, orig_ret_address, trampoline_address); - - correct_ret_addr = ri->ret_addr; - hlist_for_each_entry_safe(ri, tmp, head, hlist) { - if (ri->task != current) - /* another task is sharing our hash bucket */ - continue; - - orig_ret_address = (unsigned long)ri->ret_addr; - if (ri->rp && ri->rp->handler) { - __this_cpu_write(current_kprobe, &ri->rp->kp); - get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; - ri->ret_addr = correct_ret_addr; - ri->rp->handler(ri, regs); - __this_cpu_write(current_kprobe, NULL); - } - - recycle_rp_inst(ri, &empty_rp); - - if (orig_ret_address != trampoline_address) - /* - * This is the real return address. Any other - * instances associated with this task are for - * other calls deeper on the call stack - */ - break; - } - - kretprobe_hash_unlock(current, &flags); - - hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { - hlist_del(&ri->hlist); - kfree(ri); - } - return (void *)orig_ret_address; + return (void *)kretprobe_trampoline_handler(regs, &kretprobe_trampoline, NULL); } void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { ri->ret_addr = (kprobe_opcode_t *)regs->lr; + ri->fp = NULL; regs->lr = (unsigned long) &kretprobe_trampoline; } diff --git a/arch/csky/kernel/setup.c b/arch/csky/kernel/setup.c index 0481f4e34538..e4cab16056d6 100644 --- a/arch/csky/kernel/setup.c +++ b/arch/csky/kernel/setup.c @@ -7,7 +7,7 @@ #include <linux/of.h> #include <linux/of_fdt.h> #include <linux/start_kernel.h> -#include <linux/dma-contiguous.h> +#include <linux/dma-map-ops.h> #include <linux/screen_info.h> #include <asm/sections.h> #include <asm/mmu_context.h> diff --git a/arch/csky/kernel/vmlinux.lds.S b/arch/csky/kernel/vmlinux.lds.S index f05b413df328..f03033e17c29 100644 --- a/arch/csky/kernel/vmlinux.lds.S +++ b/arch/csky/kernel/vmlinux.lds.S @@ -109,6 +109,7 @@ SECTIONS STABS_DEBUG DWARF_DEBUG + ELF_DETAILS DISCARDS } diff --git a/arch/csky/mm/dma-mapping.c b/arch/csky/mm/dma-mapping.c index 8f6571ae27c8..c3a775a7e8f9 100644 --- a/arch/csky/mm/dma-mapping.c +++ b/arch/csky/mm/dma-mapping.c @@ -2,9 +2,7 @@ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #include <linux/cache.h> -#include <linux/dma-mapping.h> -#include <linux/dma-contiguous.h> -#include <linux/dma-noncoherent.h> +#include <linux/dma-map-ops.h> #include <linux/genalloc.h> #include <linux/highmem.h> #include <linux/io.h> diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c index 83ce3caf7313..aea0a40b77a9 100644 --- a/arch/h8300/kernel/process.c +++ b/arch/h8300/kernel/process.c @@ -172,5 +172,5 @@ asmlinkage int sys_clone(unsigned long __user *args) kargs.exit_signal = (lower_32_bits(clone_flags) & CSIGNAL); kargs.stack = newsp; - return _do_fork(&kargs); + return kernel_clone(&kargs); } diff --git a/arch/h8300/kernel/setup.c b/arch/h8300/kernel/setup.c index 28ac88358a89..0281f92eea3d 100644 --- a/arch/h8300/kernel/setup.c +++ b/arch/h8300/kernel/setup.c @@ -74,17 +74,15 @@ static void __init bootmem_init(void) memory_end = memory_start = 0; /* Find main memory where is the kernel */ - for_each_memblock(memory, region) { - memory_start = region->base; - memory_end = region->base + region->size; - } + memory_start = memblock_start_of_DRAM(); + memory_end = memblock_end_of_DRAM(); if (!memory_end) panic("No memory!"); /* setup bootmem globals (we use no_bootmem, but mm still depends on this) */ min_low_pfn = PFN_UP(memory_start); - max_low_pfn = PFN_DOWN(memblock_end_of_DRAM()); + max_low_pfn = PFN_DOWN(memory_end); max_pfn = max_low_pfn; memblock_reserve(__pa(_stext), _end - _stext); diff --git a/arch/hexagon/include/asm/checksum.h b/arch/hexagon/include/asm/checksum.h index a5c42f4614c1..4bc6ad96c4c5 100644 --- a/arch/hexagon/include/asm/checksum.h +++ b/arch/hexagon/include/asm/checksum.h @@ -10,17 +10,6 @@ unsigned int do_csum(const void *voidptr, int len); /* - * the same as csum_partial, but copies from src while it - * checksums - * - * here even more important to align src and dst on a 32-bit (or even - * better 64-bit) boundary - */ -#define csum_partial_copy_nocheck csum_partial_copy_nocheck -__wsum csum_partial_copy_nocheck(const void *src, void *dst, - int len, __wsum sum); - -/* * computes the checksum of the TCP/UDP pseudo-header * returns a 16-bit checksum, already complemented */ diff --git a/arch/hexagon/kernel/dma.c b/arch/hexagon/kernel/dma.c index 25f388d9cfcc..00b9a81075dd 100644 --- a/arch/hexagon/kernel/dma.c +++ b/arch/hexagon/kernel/dma.c @@ -5,7 +5,7 @@ * Copyright (c) 2010-2012, The Linux Foundation. All rights reserved. */ -#include <linux/dma-noncoherent.h> +#include <linux/dma-map-ops.h> #include <linux/memblock.h> #include <linux/genalloc.h> #include <linux/module.h> diff --git a/arch/hexagon/kernel/vmlinux.lds.S b/arch/hexagon/kernel/vmlinux.lds.S index 0ca2471ddb9f..35b18e55eae8 100644 --- a/arch/hexagon/kernel/vmlinux.lds.S +++ b/arch/hexagon/kernel/vmlinux.lds.S @@ -67,5 +67,6 @@ SECTIONS STABS_DEBUG DWARF_DEBUG + ELF_DETAILS } diff --git a/arch/hexagon/lib/checksum.c b/arch/hexagon/lib/checksum.c index c4a6b72d97de..ba50822a0800 100644 --- a/arch/hexagon/lib/checksum.c +++ b/arch/hexagon/lib/checksum.c @@ -176,14 +176,3 @@ unsigned int do_csum(const void *voidptr, int len) return 0xFFFF & sum0; } - -/* - * copy from ds while checksumming, otherwise like csum_partial - */ -__wsum -csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) -{ - memcpy(dst, src, len); - return csum_partial(dst, len, sum); -} -EXPORT_SYMBOL(csum_partial_copy_nocheck); diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 5b4ec80bf586..f11a8ebfe5c2 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -8,6 +8,7 @@ menu "Processor type and features" config IA64 bool + select ARCH_HAS_DMA_MARK_CLEAN select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_SERIO select ACPI @@ -32,8 +33,6 @@ config IA64 select TTY select HAVE_ARCH_TRACEHOOK select HAVE_VIRT_CPU_ACCOUNTING - select DMA_NONCOHERENT_MMAP - select ARCH_HAS_SYNC_DMA_FOR_CPU select VIRT_TO_BUS select GENERIC_IRQ_PROBE select GENERIC_PENDING_IRQ if SMP @@ -56,6 +55,7 @@ config IA64 select NEED_DMA_MAP_STATE select NEED_SG_DMA_LENGTH select NUMA if !FLATMEM + select PCI_MSI_ARCH_FALLBACKS if PCI_MSI default y help The Itanium Processor Family is Intel's 64-bit successor to @@ -362,15 +362,6 @@ config ARCH_PROC_KCORE_TEXT config IA64_MCA_RECOVERY tristate "MCA recovery from errors other than TLB." -config PERFMON - bool "Performance monitor support" - depends on BROKEN - help - Selects whether support for the IA-64 performance monitor hardware - is included in the kernel. This makes some kernel data-structures a - little bigger and slows down execution a bit, but it is generally - a good idea to turn this on. If you're unsure, say Y. - config IA64_PALINFO tristate "/proc/pal support" help diff --git a/arch/ia64/configs/bigsur_defconfig b/arch/ia64/configs/bigsur_defconfig index f3ba813a5b80..cfed5ed89301 100644 --- a/arch/ia64/configs/bigsur_defconfig +++ b/arch/ia64/configs/bigsur_defconfig @@ -11,7 +11,6 @@ CONFIG_SMP=y CONFIG_NR_CPUS=2 CONFIG_PREEMPT=y # CONFIG_VIRTUAL_MEM_MAP is not set -CONFIG_PERFMON=y CONFIG_IA64_PALINFO=y CONFIG_EFI_VARS=y CONFIG_BINFMT_MISC=m @@ -27,10 +26,9 @@ CONFIG_BLK_DEV_LOOP=m CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=m -CONFIG_IDE=m -CONFIG_BLK_DEV_IDECD=m -CONFIG_BLK_DEV_GENERIC=m -CONFIG_BLK_DEV_PIIX=m +CONFIG_ATA=m +CONFIG_ATA_GENERIC=m +CONFIG_ATA_PIIX=m CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_SCSI_CONSTANTS=y diff --git a/arch/ia64/configs/generic_defconfig b/arch/ia64/configs/generic_defconfig index cb267a07c57f..ca0d596c800d 100644 --- a/arch/ia64/configs/generic_defconfig +++ b/arch/ia64/configs/generic_defconfig @@ -18,7 +18,6 @@ CONFIG_IA64_CYCLONE=y CONFIG_SMP=y CONFIG_HOTPLUG_CPU=y CONFIG_IA64_MCA_RECOVERY=y -CONFIG_PERFMON=y CONFIG_IA64_PALINFO=y CONFIG_KEXEC=y CONFIG_CRASH_DUMP=y @@ -45,11 +44,10 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_SGI_XP=m -CONFIG_IDE=y -CONFIG_BLK_DEV_IDECD=y -CONFIG_BLK_DEV_GENERIC=y -CONFIG_BLK_DEV_CMD64X=y -CONFIG_BLK_DEV_PIIX=y +CONFIG_ATA=y +CONFIG_ATA_GENERIC=y +CONFIG_PATA_CMD64X=y +CONFIG_ATA_PIIX=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=m diff --git a/arch/ia64/configs/gensparse_defconfig b/arch/ia64/configs/gensparse_defconfig index 7e25f2f031b6..281eb9c544f9 100644 --- a/arch/ia64/configs/gensparse_defconfig +++ b/arch/ia64/configs/gensparse_defconfig @@ -17,7 +17,6 @@ CONFIG_NR_CPUS=512 CONFIG_HOTPLUG_CPU=y CONFIG_SPARSEMEM_MANUAL=y CONFIG_IA64_MCA_RECOVERY=y -CONFIG_PERFMON=y CONFIG_IA64_PALINFO=y CONFIG_EFI_VARS=y CONFIG_BINFMT_MISC=m @@ -36,12 +35,11 @@ CONFIG_BLK_DEV_LOOP=m CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y -CONFIG_IDE=y +CONFIG_ATA=y CONFIG_BLK_DEV_IDECD=y -CONFIG_IDE_GENERIC=y -CONFIG_BLK_DEV_GENERIC=y -CONFIG_BLK_DEV_CMD64X=y -CONFIG_BLK_DEV_PIIX=y +CONFIG_ATA_GENERIC=y +CONFIG_PATA_CMD64X=y +CONFIG_ATA_PIIX=y CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m diff --git a/arch/ia64/configs/tiger_defconfig b/arch/ia64/configs/tiger_defconfig index 3f486d5bdc2d..b4f9819a1a45 100644 --- a/arch/ia64/configs/tiger_defconfig +++ b/arch/ia64/configs/tiger_defconfig @@ -21,7 +21,6 @@ CONFIG_HOTPLUG_CPU=y CONFIG_PERMIT_BSP_REMOVE=y CONFIG_FORCE_CPEI_RETARGET=y CONFIG_IA64_MCA_RECOVERY=y -CONFIG_PERFMON=y CONFIG_IA64_PALINFO=y CONFIG_KEXEC=y CONFIG_EFI_VARS=y @@ -41,11 +40,10 @@ CONFIG_BLK_DEV_LOOP=m CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y -CONFIG_IDE=y -CONFIG_BLK_DEV_IDECD=y -CONFIG_BLK_DEV_GENERIC=y -CONFIG_BLK_DEV_CMD64X=y -CONFIG_BLK_DEV_PIIX=y +CONFIG_ATA=y +CONFIG_ATA_GENERIC=y +CONFIG_PATA_CMD64X=y +CONFIG_ATA_PIIX=y CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m diff --git a/arch/ia64/configs/zx1_defconfig b/arch/ia64/configs/zx1_defconfig index 261e98e1f5fe..629cb9cdf723 100644 --- a/arch/ia64/configs/zx1_defconfig +++ b/arch/ia64/configs/zx1_defconfig @@ -10,7 +10,6 @@ CONFIG_NR_CPUS=16 CONFIG_HOTPLUG_CPU=y CONFIG_FLATMEM_MANUAL=y CONFIG_IA64_MCA_RECOVERY=y -CONFIG_PERFMON=y CONFIG_IA64_PALINFO=y CONFIG_CRASH_DUMP=y CONFIG_EFI_VARS=y @@ -26,10 +25,9 @@ CONFIG_IP_MULTICAST=y CONFIG_NETFILTER=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_RAM=y -CONFIG_IDE=y -CONFIG_BLK_DEV_IDECD=y -CONFIG_BLK_DEV_GENERIC=y -CONFIG_BLK_DEV_CMD64X=y +CONFIG_ATA=y +CONFIG_ATA_GENERIC=y +CONFIG_PATA_CMD64X=y CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index 656a4888c300..9148ddbf02e5 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c @@ -33,7 +33,7 @@ #include <linux/bitops.h> /* hweight64() */ #include <linux/crash_dump.h> #include <linux/iommu-helper.h> -#include <linux/dma-mapping.h> +#include <linux/dma-map-ops.h> #include <linux/prefetch.h> #include <linux/swiotlb.h> @@ -485,8 +485,7 @@ sba_search_bitmap(struct ioc *ioc, struct device *dev, ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0); ASSERT(res_ptr < res_end); - boundary_size = (unsigned long long)dma_get_seg_boundary(dev) + 1; - boundary_size = ALIGN(boundary_size, 1ULL << iovp_shift) >> iovp_shift; + boundary_size = dma_get_seg_boundary_nr_pages(dev, iovp_shift); BUG_ON(ioc->ibase & ~iovp_mask); shift = ioc->ibase >> iovp_shift; @@ -2071,6 +2070,8 @@ static const struct dma_map_ops sba_dma_ops = { .dma_supported = sba_dma_supported, .mmap = dma_common_mmap, .get_sgtable = dma_common_get_sgtable, + .alloc_pages = dma_common_alloc_pages, + .free_pages = dma_common_free_pages, }; static int __init diff --git a/arch/ia64/include/asm/checksum.h b/arch/ia64/include/asm/checksum.h index 2a1c64629cdc..f3026213aa32 100644 --- a/arch/ia64/include/asm/checksum.h +++ b/arch/ia64/include/asm/checksum.h @@ -37,9 +37,6 @@ extern __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, */ extern __wsum csum_partial(const void *buff, int len, __wsum sum); -extern __wsum csum_partial_copy_nocheck(const void *src, void *dst, - int len, __wsum sum); - /* * This routine is used for miscellaneous IP-like checksums, mainly in * icmp.c diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h index 95a2ec37400f..2d8bcdc27d7f 100644 --- a/arch/ia64/include/asm/processor.h +++ b/arch/ia64/include/asm/processor.h @@ -280,15 +280,6 @@ struct thread_struct { __u64 map_base; /* base address for get_unmapped_area() */ __u64 rbs_bot; /* the base address for the RBS */ int last_fph_cpu; /* CPU that may hold the contents of f32-f127 */ - -#ifdef CONFIG_PERFMON - void *pfm_context; /* pointer to detailed PMU context */ - unsigned long pfm_needs_checking; /* when >0, pending perfmon work on kernel exit */ -# define INIT_THREAD_PM .pfm_context = NULL, \ - .pfm_needs_checking = 0UL, -#else -# define INIT_THREAD_PM -#endif unsigned long dbr[IA64_NUM_DBG_REGS]; unsigned long ibr[IA64_NUM_DBG_REGS]; struct ia64_fpreg fph[96]; /* saved/loaded on demand */ @@ -301,7 +292,6 @@ struct thread_struct { .map_base = DEFAULT_MAP_BASE, \ .rbs_bot = STACK_TOP - DEFAULT_USER_STACK_SIZE, \ .last_fph_cpu = -1, \ - INIT_THREAD_PM \ .dbr = {0, }, \ .ibr = {0, }, \ .fph = {{{{0}}}, } \ diff --git a/arch/ia64/include/asm/switch_to.h b/arch/ia64/include/asm/switch_to.h index 9011e90a6b97..a5a4e09468fa 100644 --- a/arch/ia64/include/asm/switch_to.h +++ b/arch/ia64/include/asm/switch_to.h @@ -31,16 +31,8 @@ extern struct task_struct *ia64_switch_to (void *next_task); extern void ia64_save_extra (struct task_struct *task); extern void ia64_load_extra (struct task_struct *task); -#ifdef CONFIG_PERFMON - DECLARE_PER_CPU(unsigned long, pfm_syst_info); -# define PERFMON_IS_SYSWIDE() (__this_cpu_read(pfm_syst_info) & 0x1) -#else -# define PERFMON_IS_SYSWIDE() (0) -#endif - #define IA64_HAS_EXTRA_STATE(t) \ - ((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID) \ - || PERFMON_IS_SYSWIDE()) + ((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID)) #define __switch_to(prev,next,last) do { \ if (IA64_HAS_EXTRA_STATE(prev)) \ diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index 1a8df6669eee..c89bd5f8cbf8 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile @@ -10,7 +10,7 @@ endif extra-y := head.o vmlinux.lds obj-y := entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \ - irq_lsapic.o ivt.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \ + irq_lsapic.o ivt.o pal.o patch.o process.o ptrace.o sal.o \ salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \ unwind.o mca.o mca_asm.o topology.o dma-mapping.o iosapic.o acpi.o \ acpi-ext.o @@ -21,7 +21,6 @@ obj-$(CONFIG_IA64_PALINFO) += palinfo.o obj-$(CONFIG_MODULES) += module.o obj-$(CONFIG_SMP) += smp.o smpboot.o obj-$(CONFIG_NUMA) += numa.o -obj-$(CONFIG_PERFMON) += perfmon_default_smpl.o obj-$(CONFIG_IA64_CYCLONE) += cyclone.o obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o obj-$(CONFIG_KPROBES) += kprobes.o @@ -41,7 +40,7 @@ obj-y += esi_stub.o # must be in kernel proper endif obj-$(CONFIG_INTEL_IOMMU) += pci-dma.o -obj-$(CONFIG_BINFMT_ELF) += elfcore.o +obj-$(CONFIG_ELF_CORE) += elfcore.o # fp_emulate() expects f2-f5,f16-f31 to contain the user-level state. CFLAGS_traps.o += -mfixed-range=f2-f5,f16-f31 diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c index 09ef9ce9988d..cd0c166bfbc2 100644 --- a/arch/ia64/kernel/dma-mapping.c +++ b/arch/ia64/kernel/dma-mapping.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -#include <linux/dma-direct.h> +#include <linux/dma-map-ops.h> #include <linux/export.h> /* Set this to 1 if there is a HW IOMMU in the system */ @@ -7,15 +7,3 @@ int iommu_detected __read_mostly; const struct dma_map_ops *dma_ops; EXPORT_SYMBOL(dma_ops); - -void *arch_dma_alloc(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) -{ - return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs); -} - -void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, - dma_addr_t dma_addr, unsigned long attrs) -{ - dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs); -} diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index 6fff934150eb..46e33c5cb53d 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c @@ -40,10 +40,6 @@ #include <asm/hw_irq.h> #include <asm/tlbflush.h> -#ifdef CONFIG_PERFMON -# include <asm/perfmon.h> -#endif - #define IRQ_DEBUG 0 #define IRQ_VECTOR_UNASSIGNED (0) @@ -627,9 +623,6 @@ init_IRQ (void) "irq_move"); } #endif -#ifdef CONFIG_PERFMON - pfm_init_percpu(); -#endif } void diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index 7a7df944d798..fc1ff8a4d7de 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c @@ -396,83 +396,9 @@ static void kretprobe_trampoline(void) { } -/* - * At this point the target function has been tricked into - * returning into our trampoline. Lookup the associated instance - * and then: - * - call the handler function - * - cleanup by marking the instance as unused - * - long jump back to the original return address - */ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) { - struct kretprobe_instance *ri = NULL; - struct hlist_head *head, empty_rp; - struct hlist_node *tmp; - unsigned long flags, orig_ret_address = 0; - unsigned long trampoline_address = - ((struct fnptr *)kretprobe_trampoline)->ip; - - INIT_HLIST_HEAD(&empty_rp); - kretprobe_hash_lock(current, &head, &flags); - - /* - * It is possible to have multiple instances associated with a given - * task either because an multiple functions in the call path - * have a return probe installed on them, and/or more than one return - * return probe was registered for a target function. - * - * We can handle this because: - * - instances are always inserted at the head of the list - * - when multiple return probes are registered for the same - * function, the first instance's ret_addr will point to the - * real return address, and all the rest will point to - * kretprobe_trampoline - */ - hlist_for_each_entry_safe(ri, tmp, head, hlist) { - if (ri->task != current) - /* another task is sharing our hash bucket */ - continue; - - orig_ret_address = (unsigned long)ri->ret_addr; - if (orig_ret_address != trampoline_address) - /* - * This is the real return address. Any other - * instances associated with this task are for - * other calls deeper on the call stack - */ - break; - } - - regs->cr_iip = orig_ret_address; - - hlist_for_each_entry_safe(ri, tmp, head, hlist) { - if (ri->task != current) - /* another task is sharing our hash bucket */ - continue; - - if (ri->rp && ri->rp->handler) - ri->rp->handler(ri, regs); - - orig_ret_address = (unsigned long)ri->ret_addr; - recycle_rp_inst(ri, &empty_rp); - - if (orig_ret_address != trampoline_address) - /* - * This is the real return address. Any other - * instances associated with this task are for - * other calls deeper on the call stack - */ - break; - } - kretprobe_assert(ri, orig_ret_address, trampoline_address); - - kretprobe_hash_unlock(current, &flags); - - hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { - hlist_del(&ri->hlist); - kfree(ri); - } + regs->cr_iip = __kretprobe_trampoline_handler(regs, kretprobe_trampoline, NULL); /* * By returning a non-zero value, we are telling * kprobe_handler() that we don't want the post_handler @@ -485,6 +411,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { ri->ret_addr = (kprobe_opcode_t *)regs->b0; + ri->fp = NULL; /* Replace the return addr with trampoline addr */ regs->b0 = ((struct fnptr *)kretprobe_trampoline)->ip; diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c deleted file mode 100644 index 0dc3611e7971..000000000000 --- a/arch/ia64/kernel/perfmon.c +++ /dev/null @@ -1,6703 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * This file implements the perfmon-2 subsystem which is used - * to program the IA-64 Performance Monitoring Unit (PMU). - * - * The initial version of perfmon.c was written by - * Ganesh Venkitachalam, IBM Corp. - * - * Then it was modified for perfmon-1.x by Stephane Eranian and - * David Mosberger, Hewlett Packard Co. - * - * Version Perfmon-2.x is a rewrite of perfmon-1.x - * by Stephane Eranian, Hewlett Packard Co. - * - * Copyright (C) 1999-2005 Hewlett Packard Co - * Stephane Eranian <eranian@hpl.hp.com> - * David Mosberger-Tang <davidm@hpl.hp.com> - * - * More information about perfmon available at: - * http://www.hpl.hp.com/research/linux/perfmon - */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/sched.h> -#include <linux/sched/task.h> -#include <linux/sched/task_stack.h> -#include <linux/interrupt.h> -#include <linux/proc_fs.h> -#include <linux/seq_file.h> -#include <linux/init.h> -#include <linux/vmalloc.h> -#include <linux/mm.h> -#include <linux/sysctl.h> -#include <linux/list.h> -#include <linux/file.h> -#include <linux/poll.h> -#include <linux/vfs.h> -#include <linux/smp.h> -#include <linux/pagemap.h> -#include <linux/mount.h> -#include <linux/pseudo_fs.h> -#include <linux/bitops.h> -#include <linux/capability.h> -#include <linux/rcupdate.h> -#include <linux/completion.h> -#include <linux/tracehook.h> -#include <linux/slab.h> -#include <linux/cpu.h> - -#include <asm/errno.h> -#include <asm/intrinsics.h> -#include <asm/page.h> -#include <asm/perfmon.h> -#include <asm/processor.h> -#include <asm/signal.h> -#include <linux/uaccess.h> -#include <asm/delay.h> - -#include "irq.h" - -#ifdef CONFIG_PERFMON -/* - * perfmon context state - */ -#define PFM_CTX_UNLOADED 1 /* context is not loaded onto any task */ -#define PFM_CTX_LOADED 2 /* context is loaded onto a task */ -#define PFM_CTX_MASKED 3 /* context is loaded but monitoring is masked due to overflow */ -#define PFM_CTX_ZOMBIE 4 /* owner of the context is closing it */ - -#define PFM_INVALID_ACTIVATION (~0UL) - -#define PFM_NUM_PMC_REGS 64 /* PMC save area for ctxsw */ -#define PFM_NUM_PMD_REGS 64 /* PMD save area for ctxsw */ - -/* - * depth of message queue - */ -#define PFM_MAX_MSGS 32 -#define PFM_CTXQ_EMPTY(g) ((g)->ctx_msgq_head == (g)->ctx_msgq_tail) - -/* - * type of a PMU register (bitmask). - * bitmask structure: - * bit0 : register implemented - * bit1 : end marker - * bit2-3 : reserved - * bit4 : pmc has pmc.pm - * bit5 : pmc controls a counter (has pmc.oi), pmd is used as counter - * bit6-7 : register type - * bit8-31: reserved - */ -#define PFM_REG_NOTIMPL 0x0 /* not implemented at all */ -#define PFM_REG_IMPL 0x1 /* register implemented */ -#define PFM_REG_END 0x2 /* end marker */ -#define PFM_REG_MONITOR (0x1<<4|PFM_REG_IMPL) /* a PMC with a pmc.pm field only */ -#define PFM_REG_COUNTING (0x2<<4|PFM_REG_MONITOR) /* a monitor + pmc.oi+ PMD used as a counter */ -#define PFM_REG_CONTROL (0x4<<4|PFM_REG_IMPL) /* PMU control register */ -#define PFM_REG_CONFIG (0x8<<4|PFM_REG_IMPL) /* configuration register */ -#define PFM_REG_BUFFER (0xc<<4|PFM_REG_IMPL) /* PMD used as buffer */ - -#define PMC_IS_LAST(i) (pmu_conf->pmc_desc[i].type & PFM_REG_END) -#define PMD_IS_LAST(i) (pmu_conf->pmd_desc[i].type & PFM_REG_END) - -#define PMC_OVFL_NOTIFY(ctx, i) ((ctx)->ctx_pmds[i].flags & PFM_REGFL_OVFL_NOTIFY) - -/* i assumed unsigned */ -#define PMC_IS_IMPL(i) (i< PMU_MAX_PMCS && (pmu_conf->pmc_desc[i].type & PFM_REG_IMPL)) -#define PMD_IS_IMPL(i) (i< PMU_MAX_PMDS && (pmu_conf->pmd_desc[i].type & PFM_REG_IMPL)) - -/* XXX: these assume that register i is implemented */ -#define PMD_IS_COUNTING(i) ((pmu_conf->pmd_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING) -#define PMC_IS_COUNTING(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING) -#define PMC_IS_MONITOR(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_MONITOR) == PFM_REG_MONITOR) -#define PMC_IS_CONTROL(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_CONTROL) == PFM_REG_CONTROL) - -#define PMC_DFL_VAL(i) pmu_conf->pmc_desc[i].default_value -#define PMC_RSVD_MASK(i) pmu_conf->pmc_desc[i].reserved_mask -#define PMD_PMD_DEP(i) pmu_conf->pmd_desc[i].dep_pmd[0] -#define PMC_PMD_DEP(i) pmu_conf->pmc_desc[i].dep_pmd[0] - -#define PFM_NUM_IBRS IA64_NUM_DBG_REGS -#define PFM_NUM_DBRS IA64_NUM_DBG_REGS - -#define CTX_OVFL_NOBLOCK(c) ((c)->ctx_fl_block == 0) -#define CTX_HAS_SMPL(c) ((c)->ctx_fl_is_sampling) -#define PFM_CTX_TASK(h) (h)->ctx_task - -#define PMU_PMC_OI 5 /* position of pmc.oi bit */ - -/* XXX: does not support more than 64 PMDs */ -#define CTX_USED_PMD(ctx, mask) (ctx)->ctx_used_pmds[0] |= (mask) -#define CTX_IS_USED_PMD(ctx, c) (((ctx)->ctx_used_pmds[0] & (1UL << (c))) != 0UL) - -#define CTX_USED_MONITOR(ctx, mask) (ctx)->ctx_used_monitors[0] |= (mask) - -#define CTX_USED_IBR(ctx,n) (ctx)->ctx_used_ibrs[(n)>>6] |= 1UL<< ((n) % 64) -#define CTX_USED_DBR(ctx,n) (ctx)->ctx_used_dbrs[(n)>>6] |= 1UL<< ((n) % 64) -#define CTX_USES_DBREGS(ctx) (((pfm_context_t *)(ctx))->ctx_fl_using_dbreg==1) -#define PFM_CODE_RR 0 /* requesting code range restriction */ -#define PFM_DATA_RR 1 /* requestion data range restriction */ - -#define PFM_CPUINFO_CLEAR(v) pfm_get_cpu_var(pfm_syst_info) &= ~(v) -#define PFM_CPUINFO_SET(v) pfm_get_cpu_var(pfm_syst_info) |= (v) -#define PFM_CPUINFO_GET() pfm_get_cpu_var(pfm_syst_info) - -#define RDEP(x) (1UL<<(x)) - -/* - * context protection macros - * in SMP: - * - we need to protect against CPU concurrency (spin_lock) - * - we need to protect against PMU overflow interrupts (local_irq_disable) - * in UP: - * - we need to protect against PMU overflow interrupts (local_irq_disable) - * - * spin_lock_irqsave()/spin_unlock_irqrestore(): - * in SMP: local_irq_disable + spin_lock - * in UP : local_irq_disable - * - * spin_lock()/spin_lock(): - * in UP : removed automatically - * in SMP: protect against context accesses from other CPU. interrupts - * are not masked. This is useful for the PMU interrupt handler - * because we know we will not get PMU concurrency in that code. - */ -#define PROTECT_CTX(c, f) \ - do { \ - DPRINT(("spinlock_irq_save ctx %p by [%d]\n", c, task_pid_nr(current))); \ - spin_lock_irqsave(&(c)->ctx_lock, f); \ - DPRINT(("spinlocked ctx %p by [%d]\n", c, task_pid_nr(current))); \ - } while(0) - -#define UNPROTECT_CTX(c, f) \ - do { \ - DPRINT(("spinlock_irq_restore ctx %p by [%d]\n", c, task_pid_nr(current))); \ - spin_unlock_irqrestore(&(c)->ctx_lock, f); \ - } while(0) - -#define PROTECT_CTX_NOPRINT(c, f) \ - do { \ - spin_lock_irqsave(&(c)->ctx_lock, f); \ - } while(0) - - -#define UNPROTECT_CTX_NOPRINT(c, f) \ - do { \ - spin_unlock_irqrestore(&(c)->ctx_lock, f); \ - } while(0) - - -#define PROTECT_CTX_NOIRQ(c) \ - do { \ - spin_lock(&(c)->ctx_lock); \ - } while(0) - -#define UNPROTECT_CTX_NOIRQ(c) \ - do { \ - spin_unlock(&(c)->ctx_lock); \ - } while(0) - - -#ifdef CONFIG_SMP - -#define GET_ACTIVATION() pfm_get_cpu_var(pmu_activation_number) -#define INC_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)++ -#define SET_ACTIVATION(c) (c)->ctx_last_activation = GET_ACTIVATION() - -#else /* !CONFIG_SMP */ -#define SET_ACTIVATION(t) do {} while(0) -#define GET_ACTIVATION(t) do {} while(0) -#define INC_ACTIVATION(t) do {} while(0) -#endif /* CONFIG_SMP */ - -#define SET_PMU_OWNER(t, c) do { pfm_get_cpu_var(pmu_owner) = (t); pfm_get_cpu_var(pmu_ctx) = (c); } while(0) -#define GET_PMU_OWNER() pfm_get_cpu_var(pmu_owner) -#define GET_PMU_CTX() pfm_get_cpu_var(pmu_ctx) - -#define LOCK_PFS(g) spin_lock_irqsave(&pfm_sessions.pfs_lock, g) -#define UNLOCK_PFS(g) spin_unlock_irqrestore(&pfm_sessions.pfs_lock, g) - -#define PFM_REG_RETFLAG_SET(flags, val) do { flags &= ~PFM_REG_RETFL_MASK; flags |= (val); } while(0) - -/* - * cmp0 must be the value of pmc0 - */ -#define PMC0_HAS_OVFL(cmp0) (cmp0 & ~0x1UL) - -#define PFMFS_MAGIC 0xa0b4d889 - -/* - * debugging - */ -#define PFM_DEBUGGING 1 -#ifdef PFM_DEBUGGING -#define DPRINT(a) \ - do { \ - if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __func__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \ - } while (0) - -#define DPRINT_ovfl(a) \ - do { \ - if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __func__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \ - } while (0) -#endif - -/* - * 64-bit software counter structure - * - * the next_reset_type is applied to the next call to pfm_reset_regs() - */ -typedef struct { - unsigned long val; /* virtual 64bit counter value */ - unsigned long lval; /* last reset value */ - unsigned long long_reset; /* reset value on sampling overflow */ - unsigned long short_reset; /* reset value on overflow */ - unsigned long reset_pmds[4]; /* which other pmds to reset when this counter overflows */ - unsigned long smpl_pmds[4]; /* which pmds are accessed when counter overflow */ - unsigned long seed; /* seed for random-number generator */ - unsigned long mask; /* mask for random-number generator */ - unsigned int flags; /* notify/do not notify */ - unsigned long eventid; /* overflow event identifier */ -} pfm_counter_t; - -/* - * context flags - */ -typedef struct { - unsigned int block:1; /* when 1, task will blocked on user notifications */ - unsigned int system:1; /* do system wide monitoring */ - unsigned int using_dbreg:1; /* using range restrictions (debug registers) */ - unsigned int is_sampling:1; /* true if using a custom format */ - unsigned int excl_idle:1; /* exclude idle task in system wide session */ - unsigned int going_zombie:1; /* context is zombie (MASKED+blocking) */ - unsigned int trap_reason:2; /* reason for going into pfm_handle_work() */ - unsigned int no_msg:1; /* no message sent on overflow */ - unsigned int can_restart:1; /* allowed to issue a PFM_RESTART */ - unsigned int reserved:22; -} pfm_context_flags_t; - -#define PFM_TRAP_REASON_NONE 0x0 /* default value */ -#define PFM_TRAP_REASON_BLOCK 0x1 /* we need to block on overflow */ -#define PFM_TRAP_REASON_RESET 0x2 /* we need to reset PMDs */ - - -/* - * perfmon context: encapsulates all the state of a monitoring session - */ - -typedef struct pfm_context { - spinlock_t ctx_lock; /* context protection */ - - pfm_context_flags_t ctx_flags; /* bitmask of flags (block reason incl.) */ - unsigned int ctx_state; /* state: active/inactive (no bitfield) */ - - struct task_struct *ctx_task; /* task to which context is attached */ - - unsigned long ctx_ovfl_regs[4]; /* which registers overflowed (notification) */ - - struct completion ctx_restart_done; /* use for blocking notification mode */ - - unsigned long ctx_used_pmds[4]; /* bitmask of PMD used */ - unsigned long ctx_all_pmds[4]; /* bitmask of all accessible PMDs */ - unsigned long ctx_reload_pmds[4]; /* bitmask of force reload PMD on ctxsw in */ - - unsigned long ctx_all_pmcs[4]; /* bitmask of all accessible PMCs */ - unsigned long ctx_reload_pmcs[4]; /* bitmask of force reload PMC on ctxsw in */ - unsigned long ctx_used_monitors[4]; /* bitmask of monitor PMC being used */ - - unsigned long ctx_pmcs[PFM_NUM_PMC_REGS]; /* saved copies of PMC values */ - - unsigned int ctx_used_ibrs[1]; /* bitmask of used IBR (speedup ctxsw in) */ - unsigned int ctx_used_dbrs[1]; /* bitmask of used DBR (speedup ctxsw in) */ - unsigned long ctx_dbrs[IA64_NUM_DBG_REGS]; /* DBR values (cache) when not loaded */ - unsigned long ctx_ibrs[IA64_NUM_DBG_REGS]; /* IBR values (cache) when not loaded */ - - pfm_counter_t ctx_pmds[PFM_NUM_PMD_REGS]; /* software state for PMDS */ - - unsigned long th_pmcs[PFM_NUM_PMC_REGS]; /* PMC thread save state */ - unsigned long th_pmds[PFM_NUM_PMD_REGS]; /* PMD thread save state */ - - unsigned long ctx_saved_psr_up; /* only contains psr.up value */ - - unsigned long ctx_last_activation; /* context last activation number for last_cpu */ - unsigned int ctx_last_cpu; /* CPU id of current or last CPU used (SMP only) */ - unsigned int ctx_cpu; /* cpu to which perfmon is applied (system wide) */ - - int ctx_fd; /* file descriptor used my this context */ - pfm_ovfl_arg_t ctx_ovfl_arg; /* argument to custom buffer format handler */ - - pfm_buffer_fmt_t *ctx_buf_fmt; /* buffer format callbacks */ - void *ctx_smpl_hdr; /* points to sampling buffer header kernel vaddr */ - unsigned long ctx_smpl_size; /* size of sampling buffer */ - void *ctx_smpl_vaddr; /* user level virtual address of smpl buffer */ - - wait_queue_head_t ctx_msgq_wait; - pfm_msg_t ctx_msgq[PFM_MAX_MSGS]; - int ctx_msgq_head; - int ctx_msgq_tail; - struct fasync_struct *ctx_async_queue; - - wait_queue_head_t ctx_zombieq; /* termination cleanup wait queue */ -} pfm_context_t; - -/* - * magic number used to verify that structure is really - * a perfmon context - */ -#define PFM_IS_FILE(f) ((f)->f_op == &pfm_file_ops) - -#define PFM_GET_CTX(t) ((pfm_context_t *)(t)->thread.pfm_context) - -#ifdef CONFIG_SMP -#define SET_LAST_CPU(ctx, v) (ctx)->ctx_last_cpu = (v) -#define GET_LAST_CPU(ctx) (ctx)->ctx_last_cpu -#else -#define SET_LAST_CPU(ctx, v) do {} while(0) -#define GET_LAST_CPU(ctx) do {} while(0) -#endif - - -#define ctx_fl_block ctx_flags.block -#define ctx_fl_system ctx_flags.system -#define ctx_fl_using_dbreg ctx_flags.using_dbreg -#define ctx_fl_is_sampling ctx_flags.is_sampling -#define ctx_fl_excl_idle ctx_flags.excl_idle -#define ctx_fl_going_zombie ctx_flags.going_zombie -#define ctx_fl_trap_reason ctx_flags.trap_reason -#define ctx_fl_no_msg ctx_flags.no_msg -#define ctx_fl_can_restart ctx_flags.can_restart - -#define PFM_SET_WORK_PENDING(t, v) do { (t)->thread.pfm_needs_checking = v; } while(0); -#define PFM_GET_WORK_PENDING(t) (t)->thread.pfm_needs_checking - -/* - * global information about all sessions - * mostly used to synchronize between system wide and per-process - */ -typedef struct { - spinlock_t pfs_lock; /* lock the structure */ - - unsigned int pfs_task_sessions; /* number of per task sessions */ - unsigned int pfs_sys_sessions; /* number of per system wide sessions */ - unsigned int pfs_sys_use_dbregs; /* incremented when a system wide session uses debug regs */ - unsigned int pfs_ptrace_use_dbregs; /* incremented when a process uses debug regs */ - struct task_struct *pfs_sys_session[NR_CPUS]; /* point to task owning a system-wide session */ -} pfm_session_t; - -/* - * information about a PMC or PMD. - * dep_pmd[]: a bitmask of dependent PMD registers - * dep_pmc[]: a bitmask of dependent PMC registers - */ -typedef int (*pfm_reg_check_t)(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs); -typedef struct { - unsigned int type; - int pm_pos; - unsigned long default_value; /* power-on default value */ - unsigned long reserved_mask; /* bitmask of reserved bits */ - pfm_reg_check_t read_check; - pfm_reg_check_t write_check; - unsigned long dep_pmd[4]; - unsigned long dep_pmc[4]; -} pfm_reg_desc_t; - -/* assume cnum is a valid monitor */ -#define PMC_PM(cnum, val) (((val) >> (pmu_conf->pmc_desc[cnum].pm_pos)) & 0x1) - -/* - * This structure is initialized at boot time and contains - * a description of the PMU main characteristics. - * - * If the probe function is defined, detection is based - * on its return value: - * - 0 means recognized PMU - * - anything else means not supported - * When the probe function is not defined, then the pmu_family field - * is used and it must match the host CPU family such that: - * - cpu->family & config->pmu_family != 0 - */ -typedef struct { - unsigned long ovfl_val; /* overflow value for counters */ - - pfm_reg_desc_t *pmc_desc; /* detailed PMC register dependencies descriptions */ - pfm_reg_desc_t *pmd_desc; /* detailed PMD register dependencies descriptions */ - - unsigned int num_pmcs; /* number of PMCS: computed at init time */ - unsigned int num_pmds; /* number of PMDS: computed at init time */ - unsigned long impl_pmcs[4]; /* bitmask of implemented PMCS */ - unsigned long impl_pmds[4]; /* bitmask of implemented PMDS */ - - char *pmu_name; /* PMU family name */ - unsigned int pmu_family; /* cpuid family pattern used to identify pmu */ - unsigned int flags; /* pmu specific flags */ - unsigned int num_ibrs; /* number of IBRS: computed at init time */ - unsigned int num_dbrs; /* number of DBRS: computed at init time */ - unsigned int num_counters; /* PMC/PMD counting pairs : computed at init time */ - int (*probe)(void); /* customized probe routine */ - unsigned int use_rr_dbregs:1; /* set if debug registers used for range restriction */ -} pmu_config_t; -/* - * PMU specific flags - */ -#define PFM_PMU_IRQ_RESEND 1 /* PMU needs explicit IRQ resend */ - -/* - * debug register related type definitions - */ -typedef struct { - unsigned long ibr_mask:56; - unsigned long ibr_plm:4; - unsigned long ibr_ig:3; - unsigned long ibr_x:1; -} ibr_mask_reg_t; - -typedef struct { - unsigned long dbr_mask:56; - unsigned long dbr_plm:4; - unsigned long dbr_ig:2; - unsigned long dbr_w:1; - unsigned long dbr_r:1; -} dbr_mask_reg_t; - -typedef union { - unsigned long val; - ibr_mask_reg_t ibr; - dbr_mask_reg_t dbr; -} dbreg_t; - - -/* - * perfmon command descriptions - */ -typedef struct { - int (*cmd_func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs); - char *cmd_name; - int cmd_flags; - unsigned int cmd_narg; - size_t cmd_argsize; - int (*cmd_getsize)(void *arg, size_t *sz); -} pfm_cmd_desc_t; - -#define PFM_CMD_FD 0x01 /* command requires a file descriptor */ -#define PFM_CMD_ARG_READ 0x02 /* command must read argument(s) */ -#define PFM_CMD_ARG_RW 0x04 /* command must read/write argument(s) */ -#define PFM_CMD_STOP 0x08 /* command does not work on zombie context */ - - -#define PFM_CMD_NAME(cmd) pfm_cmd_tab[(cmd)].cmd_name -#define PFM_CMD_READ_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_READ) -#define PFM_CMD_RW_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_RW) -#define PFM_CMD_USE_FD(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_FD) -#define PFM_CMD_STOPPED(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_STOP) - -#define PFM_CMD_ARG_MANY -1 /* cannot be zero */ - -typedef struct { - unsigned long pfm_spurious_ovfl_intr_count; /* keep track of spurious ovfl interrupts */ - unsigned long pfm_replay_ovfl_intr_count; /* keep track of replayed ovfl interrupts */ - unsigned long pfm_ovfl_intr_count; /* keep track of ovfl interrupts */ - unsigned long pfm_ovfl_intr_cycles; /* cycles spent processing ovfl interrupts */ - unsigned long pfm_ovfl_intr_cycles_min; /* min cycles spent processing ovfl interrupts */ - unsigned long pfm_ovfl_intr_cycles_max; /* max cycles spent processing ovfl interrupts */ - unsigned long pfm_smpl_handler_calls; - unsigned long pfm_smpl_handler_cycles; - char pad[SMP_CACHE_BYTES] ____cacheline_aligned; -} pfm_stats_t; - -/* - * perfmon internal variables - */ -static pfm_stats_t pfm_stats[NR_CPUS]; -static pfm_session_t pfm_sessions; /* global sessions information */ - -static DEFINE_SPINLOCK(pfm_alt_install_check); -static pfm_intr_handler_desc_t *pfm_alt_intr_handler; - -static struct proc_dir_entry *perfmon_dir; -static pfm_uuid_t pfm_null_uuid = {0,}; - -static spinlock_t pfm_buffer_fmt_lock; -static LIST_HEAD(pfm_buffer_fmt_list); - -static pmu_config_t *pmu_conf; - -/* sysctl() controls */ -pfm_sysctl_t pfm_sysctl; -EXPORT_SYMBOL(pfm_sysctl); - -static struct ctl_table pfm_ctl_table[] = { - { - .procname = "debug", - .data = &pfm_sysctl.debug, - .maxlen = sizeof(int), - .mode = 0666, - .proc_handler = proc_dointvec, - }, - { - .procname = "debug_ovfl", - .data = &pfm_sysctl.debug_ovfl, - .maxlen = sizeof(int), - .mode = 0666, - .proc_handler = proc_dointvec, - }, - { - .procname = "fastctxsw", - .data = &pfm_sysctl.fastctxsw, - .maxlen = sizeof(int), - .mode = 0600, - .proc_handler = proc_dointvec, - }, - { - .procname = "expert_mode", - .data = &pfm_sysctl.expert_mode, - .maxlen = sizeof(int), - .mode = 0600, - .proc_handler = proc_dointvec, - }, - {} -}; -static struct ctl_table pfm_sysctl_dir[] = { - { - .procname = "perfmon", - .mode = 0555, - .child = pfm_ctl_table, - }, - {} -}; -static struct ctl_table pfm_sysctl_root[] = { - { - .procname = "kernel", - .mode = 0555, - .child = pfm_sysctl_dir, - }, - {} -}; -static struct ctl_table_header *pfm_sysctl_header; - -static int pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs); - -#define pfm_get_cpu_var(v) __ia64_per_cpu_var(v) -#define pfm_get_cpu_data(a,b) per_cpu(a, b) - -static inline void -pfm_put_task(struct task_struct *task) -{ - if (task != current) put_task_struct(task); -} - -static inline unsigned long -pfm_protect_ctx_ctxsw(pfm_context_t *x) -{ - spin_lock(&(x)->ctx_lock); - return 0UL; -} - -static inline void -pfm_unprotect_ctx_ctxsw(pfm_context_t *x, unsigned long f) -{ - spin_unlock(&(x)->ctx_lock); -} - -/* forward declaration */ -static const struct dentry_operations pfmfs_dentry_operations; - -static int pfmfs_init_fs_context(struct fs_context *fc) -{ - struct pseudo_fs_context *ctx = init_pseudo(fc, PFMFS_MAGIC); - if (!ctx) - return -ENOMEM; - ctx->dops = &pfmfs_dentry_operations; - return 0; -} - -static struct file_system_type pfm_fs_type = { - .name = "pfmfs", - .init_fs_context = pfmfs_init_fs_context, - .kill_sb = kill_anon_super, -}; -MODULE_ALIAS_FS("pfmfs"); - -DEFINE_PER_CPU(unsigned long, pfm_syst_info); -DEFINE_PER_CPU(struct task_struct *, pmu_owner); -DEFINE_PER_CPU(pfm_context_t *, pmu_ctx); -DEFINE_PER_CPU(unsigned long, pmu_activation_number); -EXPORT_PER_CPU_SYMBOL_GPL(pfm_syst_info); - - -/* forward declaration */ -static const struct file_operations pfm_file_ops; - -/* - * forward declarations - */ -#ifndef CONFIG_SMP -static void pfm_lazy_save_regs (struct task_struct *ta); -#endif - -void dump_pmu_state(const char *); -static int pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs); - -#include "perfmon_itanium.h" -#include "perfmon_mckinley.h" -#include "perfmon_montecito.h" -#include "perfmon_generic.h" - -static pmu_config_t *pmu_confs[]={ - &pmu_conf_mont, - &pmu_conf_mck, - &pmu_conf_ita, - &pmu_conf_gen, /* must be last */ - NULL -}; - - -static int pfm_end_notify_user(pfm_context_t *ctx); - -static inline void -pfm_clear_psr_pp(void) -{ - ia64_rsm(IA64_PSR_PP); - ia64_srlz_i(); -} - -static inline void -pfm_set_psr_pp(void) -{ - ia64_ssm(IA64_PSR_PP); - ia64_srlz_i(); -} - -static inline void -pfm_clear_psr_up(void) -{ - ia64_rsm(IA64_PSR_UP); - ia64_srlz_i(); -} - -static inline void -pfm_set_psr_up(void) -{ - ia64_ssm(IA64_PSR_UP); - ia64_srlz_i(); -} - -static inline unsigned long -pfm_get_psr(void) -{ - unsigned long tmp; - tmp = ia64_getreg(_IA64_REG_PSR); - ia64_srlz_i(); - return tmp; -} - -static inline void -pfm_set_psr_l(unsigned long val) -{ - ia64_setreg(_IA64_REG_PSR_L, val); - ia64_srlz_i(); -} - -static inline void -pfm_freeze_pmu(void) -{ - ia64_set_pmc(0,1UL); - ia64_srlz_d(); -} - -static inline void -pfm_unfreeze_pmu(void) -{ - ia64_set_pmc(0,0UL); - ia64_srlz_d(); -} - -static inline void -pfm_restore_ibrs(unsigned long *ibrs, unsigned int nibrs) -{ - int i; - - for (i=0; i < nibrs; i++) { - ia64_set_ibr(i, ibrs[i]); - ia64_dv_serialize_instruction(); - } - ia64_srlz_i(); -} - -static inline void -pfm_restore_dbrs(unsigned long *dbrs, unsigned int ndbrs) -{ - int i; - - for (i=0; i < ndbrs; i++) { - ia64_set_dbr(i, dbrs[i]); - ia64_dv_serialize_data(); - } - ia64_srlz_d(); -} - -/* - * PMD[i] must be a counter. no check is made - */ -static inline unsigned long -pfm_read_soft_counter(pfm_context_t *ctx, int i) -{ - return ctx->ctx_pmds[i].val + (ia64_get_pmd(i) & pmu_conf->ovfl_val); -} - -/* - * PMD[i] must be a counter. no check is made - */ -static inline void -pfm_write_soft_counter(pfm_context_t *ctx, int i, unsigned long val) -{ - unsigned long ovfl_val = pmu_conf->ovfl_val; - - ctx->ctx_pmds[i].val = val & ~ovfl_val; - /* - * writing to unimplemented part is ignore, so we do not need to - * mask off top part - */ - ia64_set_pmd(i, val & ovfl_val); -} - -static pfm_msg_t * -pfm_get_new_msg(pfm_context_t *ctx) -{ - int idx, next; - - next = (ctx->ctx_msgq_tail+1) % PFM_MAX_MSGS; - - DPRINT(("ctx_fd=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail)); - if (next == ctx->ctx_msgq_head) return NULL; - - idx = ctx->ctx_msgq_tail; - ctx->ctx_msgq_tail = next; - - DPRINT(("ctx=%p head=%d tail=%d msg=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, idx)); - - return ctx->ctx_msgq+idx; -} - -static pfm_msg_t * -pfm_get_next_msg(pfm_context_t *ctx) -{ - pfm_msg_t *msg; - - DPRINT(("ctx=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail)); - - if (PFM_CTXQ_EMPTY(ctx)) return NULL; - - /* - * get oldest message - */ - msg = ctx->ctx_msgq+ctx->ctx_msgq_head; - - /* - * and move forward - */ - ctx->ctx_msgq_head = (ctx->ctx_msgq_head+1) % PFM_MAX_MSGS; - - DPRINT(("ctx=%p head=%d tail=%d type=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, msg->pfm_gen_msg.msg_type)); - - return msg; -} - -static void -pfm_reset_msgq(pfm_context_t *ctx) -{ - ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0; - DPRINT(("ctx=%p msgq reset\n", ctx)); -} - -static pfm_context_t * -pfm_context_alloc(int ctx_flags) -{ - pfm_context_t *ctx; - - /* - * allocate context descriptor - * must be able to free with interrupts disabled - */ - ctx = kzalloc(sizeof(pfm_context_t), GFP_KERNEL); - if (ctx) { - DPRINT(("alloc ctx @%p\n", ctx)); - - /* - * init context protection lock - */ - spin_lock_init(&ctx->ctx_lock); - - /* - * context is unloaded - */ - ctx->ctx_state = PFM_CTX_UNLOADED; - - /* - * initialization of context's flags - */ - ctx->ctx_fl_block = (ctx_flags & PFM_FL_NOTIFY_BLOCK) ? 1 : 0; - ctx->ctx_fl_system = (ctx_flags & PFM_FL_SYSTEM_WIDE) ? 1: 0; - ctx->ctx_fl_no_msg = (ctx_flags & PFM_FL_OVFL_NO_MSG) ? 1: 0; - /* - * will move to set properties - * ctx->ctx_fl_excl_idle = (ctx_flags & PFM_FL_EXCL_IDLE) ? 1: 0; - */ - - /* - * init restart semaphore to locked - */ - init_completion(&ctx->ctx_restart_done); - - /* - * activation is used in SMP only - */ - ctx->ctx_last_activation = PFM_INVALID_ACTIVATION; - SET_LAST_CPU(ctx, -1); - - /* - * initialize notification message queue - */ - ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0; - init_waitqueue_head(&ctx->ctx_msgq_wait); - init_waitqueue_head(&ctx->ctx_zombieq); - - } - return ctx; -} - -static void -pfm_context_free(pfm_context_t *ctx) -{ - if (ctx) { - DPRINT(("free ctx @%p\n", ctx)); - kfree(ctx); - } -} - -static void -pfm_mask_monitoring(struct task_struct *task) -{ - pfm_context_t *ctx = PFM_GET_CTX(task); - unsigned long mask, val, ovfl_mask; - int i; - - DPRINT_ovfl(("masking monitoring for [%d]\n", task_pid_nr(task))); - - ovfl_mask = pmu_conf->ovfl_val; - /* - * monitoring can only be masked as a result of a valid - * counter overflow. In UP, it means that the PMU still - * has an owner. Note that the owner can be different - * from the current task. However the PMU state belongs - * to the owner. - * In SMP, a valid overflow only happens when task is - * current. Therefore if we come here, we know that - * the PMU state belongs to the current task, therefore - * we can access the live registers. - * - * So in both cases, the live register contains the owner's - * state. We can ONLY touch the PMU registers and NOT the PSR. - * - * As a consequence to this call, the ctx->th_pmds[] array - * contains stale information which must be ignored - * when context is reloaded AND monitoring is active (see - * pfm_restart). - */ - mask = ctx->ctx_used_pmds[0]; - for (i = 0; mask; i++, mask>>=1) { - /* skip non used pmds */ - if ((mask & 0x1) == 0) continue; - val = ia64_get_pmd(i); - - if (PMD_IS_COUNTING(i)) { - /* - * we rebuild the full 64 bit value of the counter - */ - ctx->ctx_pmds[i].val += (val & ovfl_mask); - } else { - ctx->ctx_pmds[i].val = val; - } - DPRINT_ovfl(("pmd[%d]=0x%lx hw_pmd=0x%lx\n", - i, - ctx->ctx_pmds[i].val, - val & ovfl_mask)); - } - /* - * mask monitoring by setting the privilege level to 0 - * we cannot use psr.pp/psr.up for this, it is controlled by - * the user - * - * if task is current, modify actual registers, otherwise modify - * thread save state, i.e., what will be restored in pfm_load_regs() - */ - mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER; - for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) { - if ((mask & 0x1) == 0UL) continue; - ia64_set_pmc(i, ctx->th_pmcs[i] & ~0xfUL); - ctx->th_pmcs[i] &= ~0xfUL; - DPRINT_ovfl(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i])); - } - /* - * make all of this visible - */ - ia64_srlz_d(); -} - -/* - * must always be done with task == current - * - * context must be in MASKED state when calling - */ -static void -pfm_restore_monitoring(struct task_struct *task) -{ - pfm_context_t *ctx = PFM_GET_CTX(task); - unsigned long mask, ovfl_mask; - unsigned long psr, val; - int i, is_system; - - is_system = ctx->ctx_fl_system; - ovfl_mask = pmu_conf->ovfl_val; - - if (task != current) { - printk(KERN_ERR "perfmon.%d: invalid task[%d] current[%d]\n", __LINE__, task_pid_nr(task), task_pid_nr(current)); - return; - } - if (ctx->ctx_state != PFM_CTX_MASKED) { - printk(KERN_ERR "perfmon.%d: task[%d] current[%d] invalid state=%d\n", __LINE__, - task_pid_nr(task), task_pid_nr(current), ctx->ctx_state); - return; - } - psr = pfm_get_psr(); - /* - * monitoring is masked via the PMC. - * As we restore their value, we do not want each counter to - * restart right away. We stop monitoring using the PSR, - * restore the PMC (and PMD) and then re-establish the psr - * as it was. Note that there can be no pending overflow at - * this point, because monitoring was MASKED. - * - * system-wide session are pinned and self-monitoring - */ - if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) { - /* disable dcr pp */ - ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP); - pfm_clear_psr_pp(); - } else { - pfm_clear_psr_up(); - } - /* - * first, we restore the PMD - */ - mask = ctx->ctx_used_pmds[0]; - for (i = 0; mask; i++, mask>>=1) { - /* skip non used pmds */ - if ((mask & 0x1) == 0) continue; - - if (PMD_IS_COUNTING(i)) { - /* - * we split the 64bit value according to - * counter width - */ - val = ctx->ctx_pmds[i].val & ovfl_mask; - ctx->ctx_pmds[i].val &= ~ovfl_mask; - } else { - val = ctx->ctx_pmds[i].val; - } - ia64_set_pmd(i, val); - - DPRINT(("pmd[%d]=0x%lx hw_pmd=0x%lx\n", - i, - ctx->ctx_pmds[i].val, - val)); - } - /* - * restore the PMCs - */ - mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER; - for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) { - if ((mask & 0x1) == 0UL) continue; - ctx->th_pmcs[i] = ctx->ctx_pmcs[i]; - ia64_set_pmc(i, ctx->th_pmcs[i]); - DPRINT(("[%d] pmc[%d]=0x%lx\n", - task_pid_nr(task), i, ctx->th_pmcs[i])); - } - ia64_srlz_d(); - - /* - * must restore DBR/IBR because could be modified while masked - * XXX: need to optimize - */ - if (ctx->ctx_fl_using_dbreg) { - pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs); - pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs); - } - - /* - * now restore PSR - */ - if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) { - /* enable dcr pp */ - ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP); - ia64_srlz_i(); - } - pfm_set_psr_l(psr); -} - -static inline void -pfm_save_pmds(unsigned long *pmds, unsigned long mask) -{ - int i; - - ia64_srlz_d(); - - for (i=0; mask; i++, mask>>=1) { - if (mask & 0x1) pmds[i] = ia64_get_pmd(i); - } -} - -/* - * reload from thread state (used for ctxw only) - */ -static inline void -pfm_restore_pmds(unsigned long *pmds, unsigned long mask) -{ - int i; - unsigned long val, ovfl_val = pmu_conf->ovfl_val; - - for (i=0; mask; i++, mask>>=1) { - if ((mask & 0x1) == 0) continue; - val = PMD_IS_COUNTING(i) ? pmds[i] & ovfl_val : pmds[i]; - ia64_set_pmd(i, val); - } - ia64_srlz_d(); -} - -/* - * propagate PMD from context to thread-state - */ -static inline void -pfm_copy_pmds(struct task_struct *task, pfm_context_t *ctx) -{ - unsigned long ovfl_val = pmu_conf->ovfl_val; - unsigned long mask = ctx->ctx_all_pmds[0]; - unsigned long val; - int i; - - DPRINT(("mask=0x%lx\n", mask)); - - for (i=0; mask; i++, mask>>=1) { - - val = ctx->ctx_pmds[i].val; - - /* - * We break up the 64 bit value into 2 pieces - * the lower bits go to the machine state in the - * thread (will be reloaded on ctxsw in). - * The upper part stays in the soft-counter. - */ - if (PMD_IS_COUNTING(i)) { - ctx->ctx_pmds[i].val = val & ~ovfl_val; - val &= ovfl_val; - } - ctx->th_pmds[i] = val; - - DPRINT(("pmd[%d]=0x%lx soft_val=0x%lx\n", - i, - ctx->th_pmds[i], - ctx->ctx_pmds[i].val)); - } -} - -/* - * propagate PMC from context to thread-state - */ -static inline void -pfm_copy_pmcs(struct task_struct *task, pfm_context_t *ctx) -{ - unsigned long mask = ctx->ctx_all_pmcs[0]; - int i; - - DPRINT(("mask=0x%lx\n", mask)); - - for (i=0; mask; i++, mask>>=1) { - /* masking 0 with ovfl_val yields 0 */ - ctx->th_pmcs[i] = ctx->ctx_pmcs[i]; - DPRINT(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i])); - } -} - - - -static inline void -pfm_restore_pmcs(unsigned long *pmcs, unsigned long mask) -{ - int i; - - for (i=0; mask; i++, mask>>=1) { - if ((mask & 0x1) == 0) continue; - ia64_set_pmc(i, pmcs[i]); - } - ia64_srlz_d(); -} - -static inline int -pfm_uuid_cmp(pfm_uuid_t a, pfm_uuid_t b) -{ - return memcmp(a, b, sizeof(pfm_uuid_t)); -} - -static inline int -pfm_buf_fmt_exit(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, struct pt_regs *regs) -{ - int ret = 0; - if (fmt->fmt_exit) ret = (*fmt->fmt_exit)(task, buf, regs); - return ret; -} - -static inline int -pfm_buf_fmt_getsize(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags, int cpu, void *arg, unsigned long *size) -{ - int ret = 0; - if (fmt->fmt_getsize) ret = (*fmt->fmt_getsize)(task, flags, cpu, arg, size); - return ret; -} - - -static inline int -pfm_buf_fmt_validate(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags, - int cpu, void *arg) -{ - int ret = 0; - if (fmt->fmt_validate) ret = (*fmt->fmt_validate)(task, flags, cpu, arg); - return ret; -} - -static inline int -pfm_buf_fmt_init(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, unsigned int flags, - int cpu, void *arg) -{ - int ret = 0; - if (fmt->fmt_init) ret = (*fmt->fmt_init)(task, buf, flags, cpu, arg); - return ret; -} - -static inline int -pfm_buf_fmt_restart(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs) -{ - int ret = 0; - if (fmt->fmt_restart) ret = (*fmt->fmt_restart)(task, ctrl, buf, regs); - return ret; -} - -static inline int -pfm_buf_fmt_restart_active(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs) -{ - int ret = 0; - if (fmt->fmt_restart_active) ret = (*fmt->fmt_restart_active)(task, ctrl, buf, regs); - return ret; -} - -static pfm_buffer_fmt_t * -__pfm_find_buffer_fmt(pfm_uuid_t uuid) -{ - struct list_head * pos; - pfm_buffer_fmt_t * entry; - - list_for_each(pos, &pfm_buffer_fmt_list) { - entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list); - if (pfm_uuid_cmp(uuid, entry->fmt_uuid) == 0) - return entry; - } - return NULL; -} - -/* - * find a buffer format based on its uuid - */ -static pfm_buffer_fmt_t * -pfm_find_buffer_fmt(pfm_uuid_t uuid) -{ - pfm_buffer_fmt_t * fmt; - spin_lock(&pfm_buffer_fmt_lock); - fmt = __pfm_find_buffer_fmt(uuid); - spin_unlock(&pfm_buffer_fmt_lock); - return fmt; -} - -int -pfm_register_buffer_fmt(pfm_buffer_fmt_t *fmt) -{ - int ret = 0; - - /* some sanity checks */ - if (fmt == NULL || fmt->fmt_name == NULL) return -EINVAL; - - /* we need at least a handler */ - if (fmt->fmt_handler == NULL) return -EINVAL; - - /* - * XXX: need check validity of fmt_arg_size - */ - - spin_lock(&pfm_buffer_fmt_lock); - - if (__pfm_find_buffer_fmt(fmt->fmt_uuid)) { - printk(KERN_ERR "perfmon: duplicate sampling format: %s\n", fmt->fmt_name); - ret = -EBUSY; - goto out; - } - list_add(&fmt->fmt_list, &pfm_buffer_fmt_list); - printk(KERN_INFO "perfmon: added sampling format %s\n", fmt->fmt_name); - -out: - spin_unlock(&pfm_buffer_fmt_lock); - return ret; -} -EXPORT_SYMBOL(pfm_register_buffer_fmt); - -int -pfm_unregister_buffer_fmt(pfm_uuid_t uuid) -{ - pfm_buffer_fmt_t *fmt; - int ret = 0; - - spin_lock(&pfm_buffer_fmt_lock); - - fmt = __pfm_find_buffer_fmt(uuid); - if (!fmt) { - printk(KERN_ERR "perfmon: cannot unregister format, not found\n"); - ret = -EINVAL; - goto out; - } - list_del_init(&fmt->fmt_list); - printk(KERN_INFO "perfmon: removed sampling format: %s\n", fmt->fmt_name); - -out: - spin_unlock(&pfm_buffer_fmt_lock); - return ret; - -} -EXPORT_SYMBOL(pfm_unregister_buffer_fmt); - -static int -pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu) -{ - unsigned long flags; - /* - * validity checks on cpu_mask have been done upstream - */ - LOCK_PFS(flags); - - DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n", - pfm_sessions.pfs_sys_sessions, - pfm_sessions.pfs_task_sessions, - pfm_sessions.pfs_sys_use_dbregs, - is_syswide, - cpu)); - - if (is_syswide) { - /* - * cannot mix system wide and per-task sessions - */ - if (pfm_sessions.pfs_task_sessions > 0UL) { - DPRINT(("system wide not possible, %u conflicting task_sessions\n", - pfm_sessions.pfs_task_sessions)); - goto abort; - } - - if (pfm_sessions.pfs_sys_session[cpu]) goto error_conflict; - - DPRINT(("reserving system wide session on CPU%u currently on CPU%u\n", cpu, smp_processor_id())); - - pfm_sessions.pfs_sys_session[cpu] = task; - - pfm_sessions.pfs_sys_sessions++ ; - - } else { - if (pfm_sessions.pfs_sys_sessions) goto abort; - pfm_sessions.pfs_task_sessions++; - } - - DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n", - pfm_sessions.pfs_sys_sessions, - pfm_sessions.pfs_task_sessions, - pfm_sessions.pfs_sys_use_dbregs, - is_syswide, - cpu)); - - /* - * Force idle() into poll mode - */ - cpu_idle_poll_ctrl(true); - - UNLOCK_PFS(flags); - - return 0; - -error_conflict: - DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n", - task_pid_nr(pfm_sessions.pfs_sys_session[cpu]), - cpu)); -abort: - UNLOCK_PFS(flags); - - return -EBUSY; - -} - -static int -pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu) -{ - unsigned long flags; - /* - * validity checks on cpu_mask have been done upstream - */ - LOCK_PFS(flags); - - DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n", - pfm_sessions.pfs_sys_sessions, - pfm_sessions.pfs_task_sessions, - pfm_sessions.pfs_sys_use_dbregs, - is_syswide, - cpu)); - - - if (is_syswide) { - pfm_sessions.pfs_sys_session[cpu] = NULL; - /* - * would not work with perfmon+more than one bit in cpu_mask - */ - if (ctx && ctx->ctx_fl_using_dbreg) { - if (pfm_sessions.pfs_sys_use_dbregs == 0) { - printk(KERN_ERR "perfmon: invalid release for ctx %p sys_use_dbregs=0\n", ctx); - } else { - pfm_sessions.pfs_sys_use_dbregs--; - } - } - pfm_sessions.pfs_sys_sessions--; - } else { - pfm_sessions.pfs_task_sessions--; - } - DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n", - pfm_sessions.pfs_sys_sessions, - pfm_sessions.pfs_task_sessions, - pfm_sessions.pfs_sys_use_dbregs, - is_syswide, - cpu)); - - /* Undo forced polling. Last session reenables pal_halt */ - cpu_idle_poll_ctrl(false); - - UNLOCK_PFS(flags); - - return 0; -} - -/* - * removes virtual mapping of the sampling buffer. - * IMPORTANT: cannot be called with interrupts disable, e.g. inside - * a PROTECT_CTX() section. - */ -static int -pfm_remove_smpl_mapping(void *vaddr, unsigned long size) -{ - struct task_struct *task = current; - int r; - - /* sanity checks */ - if (task->mm == NULL || size == 0UL || vaddr == NULL) { - printk(KERN_ERR "perfmon: pfm_remove_smpl_mapping [%d] invalid context mm=%p\n", task_pid_nr(task), task->mm); - return -EINVAL; - } - - DPRINT(("smpl_vaddr=%p size=%lu\n", vaddr, size)); - - /* - * does the actual unmapping - */ - r = vm_munmap((unsigned long)vaddr, size); - - if (r !=0) { - printk(KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task_pid_nr(task), vaddr, size); - } - - DPRINT(("do_unmap(%p, %lu)=%d\n", vaddr, size, r)); - - return 0; -} - -/* - * free actual physical storage used by sampling buffer - */ -#if 0 -static int -pfm_free_smpl_buffer(pfm_context_t *ctx) -{ - pfm_buffer_fmt_t *fmt; - - if (ctx->ctx_smpl_hdr == NULL) goto invalid_free; - - /* - * we won't use the buffer format anymore - */ - fmt = ctx->ctx_buf_fmt; - - DPRINT(("sampling buffer @%p size %lu vaddr=%p\n", - ctx->ctx_smpl_hdr, - ctx->ctx_smpl_size, - ctx->ctx_smpl_vaddr)); - - pfm_buf_fmt_exit(fmt, current, NULL, NULL); - - /* - * free the buffer - */ - vfree(ctx->ctx_smpl_hdr); - - ctx->ctx_smpl_hdr = NULL; - ctx->ctx_smpl_size = 0UL; - - return 0; - -invalid_free: - printk(KERN_ERR "perfmon: pfm_free_smpl_buffer [%d] no buffer\n", task_pid_nr(current)); - return -EINVAL; -} -#endif - -static inline void -pfm_exit_smpl_buffer(pfm_buffer_fmt_t *fmt) -{ - if (fmt == NULL) return; - - pfm_buf_fmt_exit(fmt, current, NULL, NULL); - -} - -/* - * pfmfs should _never_ be mounted by userland - too much of security hassle, - * no real gain from having the whole whorehouse mounted. So we don't need - * any operations on the root directory. However, we need a non-trivial - * d_name - pfm: will go nicely and kill the special-casing in procfs. - */ -static struct vfsmount *pfmfs_mnt __read_mostly; - -static int __init -init_pfm_fs(void) -{ - int err = register_filesystem(&pfm_fs_type); - if (!err) { - pfmfs_mnt = kern_mount(&pfm_fs_type); - err = PTR_ERR(pfmfs_mnt); - if (IS_ERR(pfmfs_mnt)) - unregister_filesystem(&pfm_fs_type); - else - err = 0; - } - return err; -} - -static ssize_t -pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos) -{ - pfm_context_t *ctx; - pfm_msg_t *msg; - ssize_t ret; - unsigned long flags; - DECLARE_WAITQUEUE(wait, current); - if (PFM_IS_FILE(filp) == 0) { - printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current)); - return -EINVAL; - } - - ctx = filp->private_data; - if (ctx == NULL) { - printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", task_pid_nr(current)); - return -EINVAL; - } - - /* - * check even when there is no message - */ - if (size < sizeof(pfm_msg_t)) { - DPRINT(("message is too small ctx=%p (>=%ld)\n", ctx, sizeof(pfm_msg_t))); - return -EINVAL; - } - - PROTECT_CTX(ctx, flags); - - /* - * put ourselves on the wait queue - */ - add_wait_queue(&ctx->ctx_msgq_wait, &wait); - - - for(;;) { - /* - * check wait queue - */ - - set_current_state(TASK_INTERRUPTIBLE); - - DPRINT(("head=%d tail=%d\n", ctx->ctx_msgq_head, ctx->ctx_msgq_tail)); - - ret = 0; - if(PFM_CTXQ_EMPTY(ctx) == 0) break; - - UNPROTECT_CTX(ctx, flags); - - /* - * check non-blocking read - */ - ret = -EAGAIN; - if(filp->f_flags & O_NONBLOCK) break; - - /* - * check pending signals - */ - if(signal_pending(current)) { - ret = -EINTR; - break; - } - /* - * no message, so wait - */ - schedule(); - - PROTECT_CTX(ctx, flags); - } - DPRINT(("[%d] back to running ret=%ld\n", task_pid_nr(current), ret)); - set_current_state(TASK_RUNNING); - remove_wait_queue(&ctx->ctx_msgq_wait, &wait); - - if (ret < 0) goto abort; - - ret = -EINVAL; - msg = pfm_get_next_msg(ctx); - if (msg == NULL) { - printk(KERN_ERR "perfmon: pfm_read no msg for ctx=%p [%d]\n", ctx, task_pid_nr(current)); - goto abort_locked; - } - - DPRINT(("fd=%d type=%d\n", msg->pfm_gen_msg.msg_ctx_fd, msg->pfm_gen_msg.msg_type)); - - ret = -EFAULT; - if(copy_to_user(buf, msg, sizeof(pfm_msg_t)) == 0) ret = sizeof(pfm_msg_t); - -abort_locked: - UNPROTECT_CTX(ctx, flags); -abort: - return ret; -} - -static ssize_t -pfm_write(struct file *file, const char __user *ubuf, - size_t size, loff_t *ppos) -{ - DPRINT(("pfm_write called\n")); - return -EINVAL; -} - -static __poll_t -pfm_poll(struct file *filp, poll_table * wait) -{ - pfm_context_t *ctx; - unsigned long flags; - __poll_t mask = 0; - - if (PFM_IS_FILE(filp) == 0) { - printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current)); - return 0; - } - - ctx = filp->private_data; - if (ctx == NULL) { - printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", task_pid_nr(current)); - return 0; - } - - - DPRINT(("pfm_poll ctx_fd=%d before poll_wait\n", ctx->ctx_fd)); - - poll_wait(filp, &ctx->ctx_msgq_wait, wait); - - PROTECT_CTX(ctx, flags); - - if (PFM_CTXQ_EMPTY(ctx) == 0) - mask = EPOLLIN | EPOLLRDNORM; - - UNPROTECT_CTX(ctx, flags); - - DPRINT(("pfm_poll ctx_fd=%d mask=0x%x\n", ctx->ctx_fd, mask)); - - return mask; -} - -static long -pfm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) -{ - DPRINT(("pfm_ioctl called\n")); - return -EINVAL; -} - -/* - * interrupt cannot be masked when coming here - */ -static inline int -pfm_do_fasync(int fd, struct file *filp, pfm_context_t *ctx, int on) -{ - int ret; - - ret = fasync_helper (fd, filp, on, &ctx->ctx_async_queue); - - DPRINT(("pfm_fasync called by [%d] on ctx_fd=%d on=%d async_queue=%p ret=%d\n", - task_pid_nr(current), - fd, - on, - ctx->ctx_async_queue, ret)); - - return ret; -} - -static int -pfm_fasync(int fd, struct file *filp, int on) -{ - pfm_context_t *ctx; - int ret; - - if (PFM_IS_FILE(filp) == 0) { - printk(KERN_ERR "perfmon: pfm_fasync bad magic [%d]\n", task_pid_nr(current)); - return -EBADF; - } - - ctx = filp->private_data; - if (ctx == NULL) { - printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", task_pid_nr(current)); - return -EBADF; - } - /* - * we cannot mask interrupts during this call because this may - * may go to sleep if memory is not readily avalaible. - * - * We are protected from the conetxt disappearing by the get_fd()/put_fd() - * done in caller. Serialization of this function is ensured by caller. - */ - ret = pfm_do_fasync(fd, filp, ctx, on); - - - DPRINT(("pfm_fasync called on ctx_fd=%d on=%d async_queue=%p ret=%d\n", - fd, - on, - ctx->ctx_async_queue, ret)); - - return ret; -} - -#ifdef CONFIG_SMP -/* - * this function is exclusively called from pfm_close(). - * The context is not protected at that time, nor are interrupts - * on the remote CPU. That's necessary to avoid deadlocks. - */ -static void -pfm_syswide_force_stop(void *info) -{ - pfm_context_t *ctx = (pfm_context_t *)info; - struct pt_regs *regs = task_pt_regs(current); - struct task_struct *owner; - unsigned long flags; - int ret; - - if (ctx->ctx_cpu != smp_processor_id()) { - printk(KERN_ERR "perfmon: pfm_syswide_force_stop for CPU%d but on CPU%d\n", - ctx->ctx_cpu, - smp_processor_id()); - return; - } - owner = GET_PMU_OWNER(); - if (owner != ctx->ctx_task) { - printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected owner [%d] instead of [%d]\n", - smp_processor_id(), - task_pid_nr(owner), task_pid_nr(ctx->ctx_task)); - return; - } - if (GET_PMU_CTX() != ctx) { - printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected ctx %p instead of %p\n", - smp_processor_id(), - GET_PMU_CTX(), ctx); - return; - } - - DPRINT(("on CPU%d forcing system wide stop for [%d]\n", smp_processor_id(), task_pid_nr(ctx->ctx_task))); - /* - * the context is already protected in pfm_close(), we simply - * need to mask interrupts to avoid a PMU interrupt race on - * this CPU - */ - local_irq_save(flags); - - ret = pfm_context_unload(ctx, NULL, 0, regs); - if (ret) { - DPRINT(("context_unload returned %d\n", ret)); - } - - /* - * unmask interrupts, PMU interrupts are now spurious here - */ - local_irq_restore(flags); -} - -static void -pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx) -{ - int ret; - - DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu)); - ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 1); - DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret)); -} -#endif /* CONFIG_SMP */ - -/* - * called for each close(). Partially free resources. - * When caller is self-monitoring, the context is unloaded. - */ -static int -pfm_flush(struct file *filp, fl_owner_t id) -{ - pfm_context_t *ctx; - struct task_struct *task; - struct pt_regs *regs; - unsigned long flags; - unsigned long smpl_buf_size = 0UL; - void *smpl_buf_vaddr = NULL; - int state, is_system; - - if (PFM_IS_FILE(filp) == 0) { - DPRINT(("bad magic for\n")); - return -EBADF; - } - - ctx = filp->private_data; - if (ctx == NULL) { - printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", task_pid_nr(current)); - return -EBADF; - } - - /* - * remove our file from the async queue, if we use this mode. - * This can be done without the context being protected. We come - * here when the context has become unreachable by other tasks. - * - * We may still have active monitoring at this point and we may - * end up in pfm_overflow_handler(). However, fasync_helper() - * operates with interrupts disabled and it cleans up the - * queue. If the PMU handler is called prior to entering - * fasync_helper() then it will send a signal. If it is - * invoked after, it will find an empty queue and no - * signal will be sent. In both case, we are safe - */ - PROTECT_CTX(ctx, flags); - - state = ctx->ctx_state; - is_system = ctx->ctx_fl_system; - - task = PFM_CTX_TASK(ctx); - regs = task_pt_regs(task); - - DPRINT(("ctx_state=%d is_current=%d\n", - state, - task == current ? 1 : 0)); - - /* - * if state == UNLOADED, then task is NULL - */ - - /* - * we must stop and unload because we are losing access to the context. - */ - if (task == current) { -#ifdef CONFIG_SMP - /* - * the task IS the owner but it migrated to another CPU: that's bad - * but we must handle this cleanly. Unfortunately, the kernel does - * not provide a mechanism to block migration (while the context is loaded). - * - * We need to release the resource on the ORIGINAL cpu. - */ - if (is_system && ctx->ctx_cpu != smp_processor_id()) { - - DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu)); - /* - * keep context protected but unmask interrupt for IPI - */ - local_irq_restore(flags); - - pfm_syswide_cleanup_other_cpu(ctx); - - /* - * restore interrupt masking - */ - local_irq_save(flags); - - /* - * context is unloaded at this point - */ - } else -#endif /* CONFIG_SMP */ - { - - DPRINT(("forcing unload\n")); - /* - * stop and unload, returning with state UNLOADED - * and session unreserved. - */ - pfm_context_unload(ctx, NULL, 0, regs); - - DPRINT(("ctx_state=%d\n", ctx->ctx_state)); - } - } - - /* - * remove virtual mapping, if any, for the calling task. - * cannot reset ctx field until last user is calling close(). - * - * ctx_smpl_vaddr must never be cleared because it is needed - * by every task with access to the context - * - * When called from do_exit(), the mm context is gone already, therefore - * mm is NULL, i.e., the VMA is already gone and we do not have to - * do anything here - */ - if (ctx->ctx_smpl_vaddr && current->mm) { - smpl_buf_vaddr = ctx->ctx_smpl_vaddr; - smpl_buf_size = ctx->ctx_smpl_size; - } - - UNPROTECT_CTX(ctx, flags); - - /* - * if there was a mapping, then we systematically remove it - * at this point. Cannot be done inside critical section - * because some VM function reenables interrupts. - * - */ - if (smpl_buf_vaddr) pfm_remove_smpl_mapping(smpl_buf_vaddr, smpl_buf_size); - - return 0; -} -/* - * called either on explicit close() or from exit_files(). - * Only the LAST user of the file gets to this point, i.e., it is - * called only ONCE. - * - * IMPORTANT: we get called ONLY when the refcnt on the file gets to zero - * (fput()),i.e, last task to access the file. Nobody else can access the - * file at this point. - * - * When called from exit_files(), the VMA has been freed because exit_mm() - * is executed before exit_files(). - * - * When called from exit_files(), the current task is not yet ZOMBIE but we - * flush the PMU state to the context. - */ -static int -pfm_close(struct inode *inode, struct file *filp) -{ - pfm_context_t *ctx; - struct task_struct *task; - struct pt_regs *regs; - DECLARE_WAITQUEUE(wait, current); - unsigned long flags; - unsigned long smpl_buf_size = 0UL; - void *smpl_buf_addr = NULL; - int free_possible = 1; - int state, is_system; - - DPRINT(("pfm_close called private=%p\n", filp->private_data)); - - if (PFM_IS_FILE(filp) == 0) { - DPRINT(("bad magic\n")); - return -EBADF; - } - - ctx = filp->private_data; - if (ctx == NULL) { - printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", task_pid_nr(current)); - return -EBADF; - } - - PROTECT_CTX(ctx, flags); - - state = ctx->ctx_state; - is_system = ctx->ctx_fl_system; - - task = PFM_CTX_TASK(ctx); - regs = task_pt_regs(task); - - DPRINT(("ctx_state=%d is_current=%d\n", - state, - task == current ? 1 : 0)); - - /* - * if task == current, then pfm_flush() unloaded the context - */ - if (state == PFM_CTX_UNLOADED) goto doit; - - /* - * context is loaded/masked and task != current, we need to - * either force an unload or go zombie - */ - - /* - * The task is currently blocked or will block after an overflow. - * we must force it to wakeup to get out of the - * MASKED state and transition to the unloaded state by itself. - * - * This situation is only possible for per-task mode - */ - if (state == PFM_CTX_MASKED && CTX_OVFL_NOBLOCK(ctx) == 0) { - - /* - * set a "partial" zombie state to be checked - * upon return from down() in pfm_handle_work(). - * - * We cannot use the ZOMBIE state, because it is checked - * by pfm_load_regs() which is called upon wakeup from down(). - * In such case, it would free the context and then we would - * return to pfm_handle_work() which would access the - * stale context. Instead, we set a flag invisible to pfm_load_regs() - * but visible to pfm_handle_work(). - * - * For some window of time, we have a zombie context with - * ctx_state = MASKED and not ZOMBIE - */ - ctx->ctx_fl_going_zombie = 1; - - /* - * force task to wake up from MASKED state - */ - complete(&ctx->ctx_restart_done); - - DPRINT(("waking up ctx_state=%d\n", state)); - - /* - * put ourself to sleep waiting for the other - * task to report completion - * - * the context is protected by mutex, therefore there - * is no risk of being notified of completion before - * begin actually on the waitq. - */ - set_current_state(TASK_INTERRUPTIBLE); - add_wait_queue(&ctx->ctx_zombieq, &wait); - - UNPROTECT_CTX(ctx, flags); - - /* - * XXX: check for signals : - * - ok for explicit close - * - not ok when coming from exit_files() - */ - schedule(); - - - PROTECT_CTX(ctx, flags); - - - remove_wait_queue(&ctx->ctx_zombieq, &wait); - set_current_state(TASK_RUNNING); - - /* - * context is unloaded at this point - */ - DPRINT(("after zombie wakeup ctx_state=%d for\n", state)); - } - else if (task != current) { -#ifdef CONFIG_SMP - /* - * switch context to zombie state - */ - ctx->ctx_state = PFM_CTX_ZOMBIE; - - DPRINT(("zombie ctx for [%d]\n", task_pid_nr(task))); - /* - * cannot free the context on the spot. deferred until - * the task notices the ZOMBIE state - */ - free_possible = 0; -#else - pfm_context_unload(ctx, NULL, 0, regs); -#endif - } - -doit: - /* reload state, may have changed during opening of critical section */ - state = ctx->ctx_state; - - /* - * the context is still attached to a task (possibly current) - * we cannot destroy it right now - */ - - /* - * we must free the sampling buffer right here because - * we cannot rely on it being cleaned up later by the - * monitored task. It is not possible to free vmalloc'ed - * memory in pfm_load_regs(). Instead, we remove the buffer - * now. should there be subsequent PMU overflow originally - * meant for sampling, the will be converted to spurious - * and that's fine because the monitoring tools is gone anyway. - */ - if (ctx->ctx_smpl_hdr) { - smpl_buf_addr = ctx->ctx_smpl_hdr; - smpl_buf_size = ctx->ctx_smpl_size; - /* no more sampling */ - ctx->ctx_smpl_hdr = NULL; - ctx->ctx_fl_is_sampling = 0; - } - - DPRINT(("ctx_state=%d free_possible=%d addr=%p size=%lu\n", - state, - free_possible, - smpl_buf_addr, - smpl_buf_size)); - - if (smpl_buf_addr) pfm_exit_smpl_buffer(ctx->ctx_buf_fmt); - - /* - * UNLOADED that the session has already been unreserved. - */ - if (state == PFM_CTX_ZOMBIE) { - pfm_unreserve_session(ctx, ctx->ctx_fl_system , ctx->ctx_cpu); - } - - /* - * disconnect file descriptor from context must be done - * before we unlock. - */ - filp->private_data = NULL; - - /* - * if we free on the spot, the context is now completely unreachable - * from the callers side. The monitored task side is also cut, so we - * can freely cut. - * - * If we have a deferred free, only the caller side is disconnected. - */ - UNPROTECT_CTX(ctx, flags); - - /* - * All memory free operations (especially for vmalloc'ed memory) - * MUST be done with interrupts ENABLED. - */ - vfree(smpl_buf_addr); - - /* - * return the memory used by the context - */ - if (free_possible) pfm_context_free(ctx); - - return 0; -} - -static const struct file_operations pfm_file_ops = { - .llseek = no_llseek, - .read = pfm_read, - .write = pfm_write, - .poll = pfm_poll, - .unlocked_ioctl = pfm_ioctl, - .fasync = pfm_fasync, - .release = pfm_close, - .flush = pfm_flush -}; - -static char *pfmfs_dname(struct dentry *dentry, char *buffer, int buflen) -{ - return dynamic_dname(dentry, buffer, buflen, "pfm:[%lu]", - d_inode(dentry)->i_ino); -} - -static const struct dentry_operations pfmfs_dentry_operations = { - .d_delete = always_delete_dentry, - .d_dname = pfmfs_dname, -}; - - -static struct file * -pfm_alloc_file(pfm_context_t *ctx) -{ - struct file *file; - struct inode *inode; - struct path path; - struct qstr this = { .name = "" }; - - /* - * allocate a new inode - */ - inode = new_inode(pfmfs_mnt->mnt_sb); - if (!inode) - return ERR_PTR(-ENOMEM); - - DPRINT(("new inode ino=%ld @%p\n", inode->i_ino, inode)); - - inode->i_mode = S_IFCHR|S_IRUGO; - inode->i_uid = current_fsuid(); - inode->i_gid = current_fsgid(); - - /* - * allocate a new dcache entry - */ - path.dentry = d_alloc(pfmfs_mnt->mnt_root, &this); - if (!path.dentry) { - iput(inode); - return ERR_PTR(-ENOMEM); - } - path.mnt = mntget(pfmfs_mnt); - - d_add(path.dentry, inode); - - file = alloc_file(&path, FMODE_READ, &pfm_file_ops); - if (IS_ERR(file)) { - path_put(&path); - return file; - } - - file->f_flags = O_RDONLY; - file->private_data = ctx; - - return file; -} - -static int -pfm_remap_buffer(struct vm_area_struct *vma, unsigned long buf, unsigned long addr, unsigned long size) -{ - DPRINT(("CPU%d buf=0x%lx addr=0x%lx size=%ld\n", smp_processor_id(), buf, addr, size)); - - while (size > 0) { - unsigned long pfn = ia64_tpa(buf) >> PAGE_SHIFT; - - - if (remap_pfn_range(vma, addr, pfn, PAGE_SIZE, PAGE_READONLY)) - return -ENOMEM; - - addr += PAGE_SIZE; - buf += PAGE_SIZE; - size -= PAGE_SIZE; - } - return 0; -} - -/* - * allocate a sampling buffer and remaps it into the user address space of the task - */ -static int -pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned long rsize, void **user_vaddr) -{ - struct mm_struct *mm = task->mm; - struct vm_area_struct *vma = NULL; - unsigned long size; - void *smpl_buf; - - - /* - * the fixed header + requested size and align to page boundary - */ - size = PAGE_ALIGN(rsize); - - DPRINT(("sampling buffer rsize=%lu size=%lu bytes\n", rsize, size)); - - /* - * check requested size to avoid Denial-of-service attacks - * XXX: may have to refine this test - * Check against address space limit. - * - * if ((mm->total_vm << PAGE_SHIFT) + len> task->rlim[RLIMIT_AS].rlim_cur) - * return -ENOMEM; - */ - if (size > task_rlimit(task, RLIMIT_MEMLOCK)) - return -ENOMEM; - - /* - * We do the easy to undo allocations first. - */ - smpl_buf = vzalloc(size); - if (smpl_buf == NULL) { - DPRINT(("Can't allocate sampling buffer\n")); - return -ENOMEM; - } - - DPRINT(("smpl_buf @%p\n", smpl_buf)); - - /* allocate vma */ - vma = vm_area_alloc(mm); - if (!vma) { - DPRINT(("Cannot allocate vma\n")); - goto error_kmem; - } - - /* - * partially initialize the vma for the sampling buffer - */ - vma->vm_file = get_file(filp); - vma->vm_flags = VM_READ|VM_MAYREAD|VM_DONTEXPAND|VM_DONTDUMP; - vma->vm_page_prot = PAGE_READONLY; /* XXX may need to change */ - - /* - * Now we have everything we need and we can initialize - * and connect all the data structures - */ - - ctx->ctx_smpl_hdr = smpl_buf; - ctx->ctx_smpl_size = size; /* aligned size */ - - /* - * Let's do the difficult operations next. - * - * now we atomically find some area in the address space and - * remap the buffer in it. - */ - mmap_write_lock(task->mm); - - /* find some free area in address space, must have mmap sem held */ - vma->vm_start = get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE|MAP_ANONYMOUS); - if (IS_ERR_VALUE(vma->vm_start)) { - DPRINT(("Cannot find unmapped area for size %ld\n", size)); - mmap_write_unlock(task->mm); - goto error; - } - vma->vm_end = vma->vm_start + size; - vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT; - - DPRINT(("aligned size=%ld, hdr=%p mapped @0x%lx\n", size, ctx->ctx_smpl_hdr, vma->vm_start)); - - /* can only be applied to current task, need to have the mm semaphore held when called */ - if (pfm_remap_buffer(vma, (unsigned long)smpl_buf, vma->vm_start, size)) { - DPRINT(("Can't remap buffer\n")); - mmap_write_unlock(task->mm); - goto error; - } - - /* - * now insert the vma in the vm list for the process, must be - * done with mmap lock held - */ - insert_vm_struct(mm, vma); - - vm_stat_account(vma->vm_mm, vma->vm_flags, vma_pages(vma)); - mmap_write_unlock(task->mm); - - /* - * keep track of user level virtual address - */ - ctx->ctx_smpl_vaddr = (void *)vma->vm_start; - *(unsigned long *)user_vaddr = vma->vm_start; - - return 0; - -error: - vm_area_free(vma); -error_kmem: - vfree(smpl_buf); - - return -ENOMEM; -} - -/* - * XXX: do something better here - */ -static int -pfm_bad_permissions(struct task_struct *task) -{ - const struct cred *tcred; - kuid_t uid = current_uid(); - kgid_t gid = current_gid(); - int ret; - - rcu_read_lock(); - tcred = __task_cred(task); - - /* inspired by ptrace_attach() */ - DPRINT(("cur: uid=%d gid=%d task: euid=%d suid=%d uid=%d egid=%d sgid=%d\n", - from_kuid(&init_user_ns, uid), - from_kgid(&init_user_ns, gid), - from_kuid(&init_user_ns, tcred->euid), - from_kuid(&init_user_ns, tcred->suid), - from_kuid(&init_user_ns, tcred->uid), - from_kgid(&init_user_ns, tcred->egid), - from_kgid(&init_user_ns, tcred->sgid))); - - ret = ((!uid_eq(uid, tcred->euid)) - || (!uid_eq(uid, tcred->suid)) - || (!uid_eq(uid, tcred->uid)) - || (!gid_eq(gid, tcred->egid)) - || (!gid_eq(gid, tcred->sgid)) - || (!gid_eq(gid, tcred->gid))) && !capable(CAP_SYS_PTRACE); - - rcu_read_unlock(); - return ret; -} - -static int -pfarg_is_sane(struct task_struct *task, pfarg_context_t *pfx) -{ - int ctx_flags; - - /* valid signal */ - - ctx_flags = pfx->ctx_flags; - - if (ctx_flags & PFM_FL_SYSTEM_WIDE) { - - /* - * cannot block in this mode - */ - if (ctx_flags & PFM_FL_NOTIFY_BLOCK) { - DPRINT(("cannot use blocking mode when in system wide monitoring\n")); - return -EINVAL; - } - } else { - } - /* probably more to add here */ - - return 0; -} - -static int -pfm_setup_buffer_fmt(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned int ctx_flags, - unsigned int cpu, pfarg_context_t *arg) -{ - pfm_buffer_fmt_t *fmt = NULL; - unsigned long size = 0UL; - void *uaddr = NULL; - void *fmt_arg = NULL; - int ret = 0; -#define PFM_CTXARG_BUF_ARG(a) (pfm_buffer_fmt_t *)(a+1) - - /* invoke and lock buffer format, if found */ - fmt = pfm_find_buffer_fmt(arg->ctx_smpl_buf_id); - if (fmt == NULL) { - DPRINT(("[%d] cannot find buffer format\n", task_pid_nr(task))); - return -EINVAL; - } - - /* - * buffer argument MUST be contiguous to pfarg_context_t - */ - if (fmt->fmt_arg_size) fmt_arg = PFM_CTXARG_BUF_ARG(arg); - - ret = pfm_buf_fmt_validate(fmt, task, ctx_flags, cpu, fmt_arg); - - DPRINT(("[%d] after validate(0x%x,%d,%p)=%d\n", task_pid_nr(task), ctx_flags, cpu, fmt_arg, ret)); - - if (ret) goto error; - - /* link buffer format and context */ - ctx->ctx_buf_fmt = fmt; - ctx->ctx_fl_is_sampling = 1; /* assume record() is defined */ - - /* - * check if buffer format wants to use perfmon buffer allocation/mapping service - */ - ret = pfm_buf_fmt_getsize(fmt, task, ctx_flags, cpu, fmt_arg, &size); - if (ret) goto error; - - if (size) { - /* - * buffer is always remapped into the caller's address space - */ - ret = pfm_smpl_buffer_alloc(current, filp, ctx, size, &uaddr); - if (ret) goto error; - - /* keep track of user address of buffer */ - arg->ctx_smpl_vaddr = uaddr; - } - ret = pfm_buf_fmt_init(fmt, task, ctx->ctx_smpl_hdr, ctx_flags, cpu, fmt_arg); - -error: - return ret; -} - -static void -pfm_reset_pmu_state(pfm_context_t *ctx) -{ - int i; - - /* - * install reset values for PMC. - */ - for (i=1; PMC_IS_LAST(i) == 0; i++) { - if (PMC_IS_IMPL(i) == 0) continue; - ctx->ctx_pmcs[i] = PMC_DFL_VAL(i); - DPRINT(("pmc[%d]=0x%lx\n", i, ctx->ctx_pmcs[i])); - } - /* - * PMD registers are set to 0UL when the context in memset() - */ - - /* - * On context switched restore, we must restore ALL pmc and ALL pmd even - * when they are not actively used by the task. In UP, the incoming process - * may otherwise pick up left over PMC, PMD state from the previous process. - * As opposed to PMD, stale PMC can cause harm to the incoming - * process because they may change what is being measured. - * Therefore, we must systematically reinstall the entire - * PMC state. In SMP, the same thing is possible on the - * same CPU but also on between 2 CPUs. - * - * The problem with PMD is information leaking especially - * to user level when psr.sp=0 - * - * There is unfortunately no easy way to avoid this problem - * on either UP or SMP. This definitively slows down the - * pfm_load_regs() function. - */ - - /* - * bitmask of all PMCs accessible to this context - * - * PMC0 is treated differently. - */ - ctx->ctx_all_pmcs[0] = pmu_conf->impl_pmcs[0] & ~0x1; - - /* - * bitmask of all PMDs that are accessible to this context - */ - ctx->ctx_all_pmds[0] = pmu_conf->impl_pmds[0]; - - DPRINT(("<%d> all_pmcs=0x%lx all_pmds=0x%lx\n", ctx->ctx_fd, ctx->ctx_all_pmcs[0],ctx->ctx_all_pmds[0])); - - /* - * useful in case of re-enable after disable - */ - ctx->ctx_used_ibrs[0] = 0UL; - ctx->ctx_used_dbrs[0] = 0UL; -} - -static int -pfm_ctx_getsize(void *arg, size_t *sz) -{ - pfarg_context_t *req = (pfarg_context_t *)arg; - pfm_buffer_fmt_t *fmt; - - *sz = 0; - - if (!pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) return 0; - - fmt = pfm_find_buffer_fmt(req->ctx_smpl_buf_id); - if (fmt == NULL) { - DPRINT(("cannot find buffer format\n")); - return -EINVAL; - } - /* get just enough to copy in user parameters */ - *sz = fmt->fmt_arg_size; - DPRINT(("arg_size=%lu\n", *sz)); - - return 0; -} - - - -/* - * cannot attach if : - * - kernel task - * - task not owned by caller - * - task incompatible with context mode - */ -static int -pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task) -{ - /* - * no kernel task or task not owner by caller - */ - if (task->mm == NULL) { - DPRINT(("task [%d] has not memory context (kernel thread)\n", task_pid_nr(task))); - return -EPERM; - } - if (pfm_bad_permissions(task)) { - DPRINT(("no permission to attach to [%d]\n", task_pid_nr(task))); - return -EPERM; - } - /* - * cannot block in self-monitoring mode - */ - if (CTX_OVFL_NOBLOCK(ctx) == 0 && task == current) { - DPRINT(("cannot load a blocking context on self for [%d]\n", task_pid_nr(task))); - return -EINVAL; - } - - if (task->exit_state == EXIT_ZOMBIE) { - DPRINT(("cannot attach to zombie task [%d]\n", task_pid_nr(task))); - return -EBUSY; - } - - /* - * always ok for self - */ - if (task == current) return 0; - - if (!task_is_stopped_or_traced(task)) { - DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task_pid_nr(task), task->state)); - return -EBUSY; - } - /* - * make sure the task is off any CPU - */ - wait_task_inactive(task, 0); - - /* more to come... */ - - return 0; -} - -static int -pfm_get_task(pfm_context_t *ctx, pid_t pid, struct task_struct **task) -{ - struct task_struct *p = current; - int ret; - - /* XXX: need to add more checks here */ - if (pid < 2) return -EPERM; - - if (pid != task_pid_vnr(current)) { - /* make sure task cannot go away while we operate on it */ - p = find_get_task_by_vpid(pid); - if (!p) - return -ESRCH; - } - - ret = pfm_task_incompatible(ctx, p); - if (ret == 0) { - *task = p; - } else if (p != current) { - pfm_put_task(p); - } - return ret; -} - - - -static int -pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) -{ - pfarg_context_t *req = (pfarg_context_t *)arg; - struct file *filp; - struct path path; - int ctx_flags; - int fd; - int ret; - - /* let's check the arguments first */ - ret = pfarg_is_sane(current, req); - if (ret < 0) - return ret; - - ctx_flags = req->ctx_flags; - - ret = -ENOMEM; - - fd = get_unused_fd_flags(0); - if (fd < 0) - return fd; - - ctx = pfm_context_alloc(ctx_flags); - if (!ctx) - goto error; - - filp = pfm_alloc_file(ctx); - if (IS_ERR(filp)) { - ret = PTR_ERR(filp); - goto error_file; - } - - req->ctx_fd = ctx->ctx_fd = fd; - - /* - * does the user want to sample? - */ - if (pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) { - ret = pfm_setup_buffer_fmt(current, filp, ctx, ctx_flags, 0, req); - if (ret) - goto buffer_error; - } - - DPRINT(("ctx=%p flags=0x%x system=%d notify_block=%d excl_idle=%d no_msg=%d ctx_fd=%d\n", - ctx, - ctx_flags, - ctx->ctx_fl_system, - ctx->ctx_fl_block, - ctx->ctx_fl_excl_idle, - ctx->ctx_fl_no_msg, - ctx->ctx_fd)); - - /* - * initialize soft PMU state - */ - pfm_reset_pmu_state(ctx); - - fd_install(fd, filp); - - return 0; - -buffer_error: - path = filp->f_path; - put_filp(filp); - path_put(&path); - - if (ctx->ctx_buf_fmt) { - pfm_buf_fmt_exit(ctx->ctx_buf_fmt, current, NULL, regs); - } -error_file: - pfm_context_free(ctx); - -error: - put_unused_fd(fd); - return ret; -} - -static inline unsigned long -pfm_new_counter_value (pfm_counter_t *reg, int is_long_reset) -{ - unsigned long val = is_long_reset ? reg->long_reset : reg->short_reset; - unsigned long new_seed, old_seed = reg->seed, mask = reg->mask; - extern unsigned long carta_random32 (unsigned long seed); - - if (reg->flags & PFM_REGFL_RANDOM) { - new_seed = carta_random32(old_seed); - val -= (old_seed & mask); /* counter values are negative numbers! */ - if ((mask >> 32) != 0) - /* construct a full 64-bit random value: */ - new_seed |= carta_random32(old_seed >> 32) << 32; - reg->seed = new_seed; - } - reg->lval = val; - return val; -} - -static void -pfm_reset_regs_masked(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset) -{ - unsigned long mask = ovfl_regs[0]; - unsigned long reset_others = 0UL; - unsigned long val; - int i; - - /* - * now restore reset value on sampling overflowed counters - */ - mask >>= PMU_FIRST_COUNTER; - for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) { - - if ((mask & 0x1UL) == 0UL) continue; - - ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset); - reset_others |= ctx->ctx_pmds[i].reset_pmds[0]; - - DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val)); - } - - /* - * Now take care of resetting the other registers - */ - for(i = 0; reset_others; i++, reset_others >>= 1) { - - if ((reset_others & 0x1) == 0) continue; - - ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset); - - DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n", - is_long_reset ? "long" : "short", i, val)); - } -} - -static void -pfm_reset_regs(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset) -{ - unsigned long mask = ovfl_regs[0]; - unsigned long reset_others = 0UL; - unsigned long val; - int i; - - DPRINT_ovfl(("ovfl_regs=0x%lx is_long_reset=%d\n", ovfl_regs[0], is_long_reset)); - - if (ctx->ctx_state == PFM_CTX_MASKED) { - pfm_reset_regs_masked(ctx, ovfl_regs, is_long_reset); - return; - } - - /* - * now restore reset value on sampling overflowed counters - */ - mask >>= PMU_FIRST_COUNTER; - for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) { - - if ((mask & 0x1UL) == 0UL) continue; - - val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset); - reset_others |= ctx->ctx_pmds[i].reset_pmds[0]; - - DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val)); - - pfm_write_soft_counter(ctx, i, val); - } - - /* - * Now take care of resetting the other registers - */ - for(i = 0; reset_others; i++, reset_others >>= 1) { - - if ((reset_others & 0x1) == 0) continue; - - val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset); - - if (PMD_IS_COUNTING(i)) { - pfm_write_soft_counter(ctx, i, val); - } else { - ia64_set_pmd(i, val); - } - DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n", - is_long_reset ? "long" : "short", i, val)); - } - ia64_srlz_d(); -} - -static int -pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) -{ - struct task_struct *task; - pfarg_reg_t *req = (pfarg_reg_t *)arg; - unsigned long value, pmc_pm; - unsigned long smpl_pmds, reset_pmds, impl_pmds; - unsigned int cnum, reg_flags, flags, pmc_type; - int i, can_access_pmu = 0, is_loaded, is_system, expert_mode; - int is_monitor, is_counting, state; - int ret = -EINVAL; - pfm_reg_check_t wr_func; -#define PFM_CHECK_PMC_PM(x, y, z) ((x)->ctx_fl_system ^ PMC_PM(y, z)) - - state = ctx->ctx_state; - is_loaded = state == PFM_CTX_LOADED ? 1 : 0; - is_system = ctx->ctx_fl_system; - task = ctx->ctx_task; - impl_pmds = pmu_conf->impl_pmds[0]; - - if (state == PFM_CTX_ZOMBIE) return -EINVAL; - - if (is_loaded) { - /* - * In system wide and when the context is loaded, access can only happen - * when the caller is running on the CPU being monitored by the session. - * It does not have to be the owner (ctx_task) of the context per se. - */ - if (is_system && ctx->ctx_cpu != smp_processor_id()) { - DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu)); - return -EBUSY; - } - can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0; - } - expert_mode = pfm_sysctl.expert_mode; - - for (i = 0; i < count; i++, req++) { - - cnum = req->reg_num; - reg_flags = req->reg_flags; - value = req->reg_value; - smpl_pmds = req->reg_smpl_pmds[0]; - reset_pmds = req->reg_reset_pmds[0]; - flags = 0; - - - if (cnum >= PMU_MAX_PMCS) { - DPRINT(("pmc%u is invalid\n", cnum)); - goto error; - } - - pmc_type = pmu_conf->pmc_desc[cnum].type; - pmc_pm = (value >> pmu_conf->pmc_desc[cnum].pm_pos) & 0x1; - is_counting = (pmc_type & PFM_REG_COUNTING) == PFM_REG_COUNTING ? 1 : 0; - is_monitor = (pmc_type & PFM_REG_MONITOR) == PFM_REG_MONITOR ? 1 : 0; - - /* - * we reject all non implemented PMC as well - * as attempts to modify PMC[0-3] which are used - * as status registers by the PMU - */ - if ((pmc_type & PFM_REG_IMPL) == 0 || (pmc_type & PFM_REG_CONTROL) == PFM_REG_CONTROL) { - DPRINT(("pmc%u is unimplemented or no-access pmc_type=%x\n", cnum, pmc_type)); - goto error; - } - wr_func = pmu_conf->pmc_desc[cnum].write_check; - /* - * If the PMC is a monitor, then if the value is not the default: - * - system-wide session: PMCx.pm=1 (privileged monitor) - * - per-task : PMCx.pm=0 (user monitor) - */ - if (is_monitor && value != PMC_DFL_VAL(cnum) && is_system ^ pmc_pm) { - DPRINT(("pmc%u pmc_pm=%lu is_system=%d\n", - cnum, - pmc_pm, - is_system)); - goto error; - } - - if (is_counting) { - /* - * enforce generation of overflow interrupt. Necessary on all - * CPUs. - */ - value |= 1 << PMU_PMC_OI; - - if (reg_flags & PFM_REGFL_OVFL_NOTIFY) { - flags |= PFM_REGFL_OVFL_NOTIFY; - } - - if (reg_flags & PFM_REGFL_RANDOM) flags |= PFM_REGFL_RANDOM; - - /* verify validity of smpl_pmds */ - if ((smpl_pmds & impl_pmds) != smpl_pmds) { - DPRINT(("invalid smpl_pmds 0x%lx for pmc%u\n", smpl_pmds, cnum)); - goto error; - } - - /* verify validity of reset_pmds */ - if ((reset_pmds & impl_pmds) != reset_pmds) { - DPRINT(("invalid reset_pmds 0x%lx for pmc%u\n", reset_pmds, cnum)); - goto error; - } - } else { - if (reg_flags & (PFM_REGFL_OVFL_NOTIFY|PFM_REGFL_RANDOM)) { - DPRINT(("cannot set ovfl_notify or random on pmc%u\n", cnum)); - goto error; - } - /* eventid on non-counting monitors are ignored */ - } - - /* - * execute write checker, if any - */ - if (likely(expert_mode == 0 && wr_func)) { - ret = (*wr_func)(task, ctx, cnum, &value, regs); - if (ret) goto error; - ret = -EINVAL; - } - - /* - * no error on this register - */ - PFM_REG_RETFLAG_SET(req->reg_flags, 0); - - /* - * Now we commit the changes to the software state - */ - - /* - * update overflow information - */ - if (is_counting) { - /* - * full flag update each time a register is programmed - */ - ctx->ctx_pmds[cnum].flags = flags; - - ctx->ctx_pmds[cnum].reset_pmds[0] = reset_pmds; - ctx->ctx_pmds[cnum].smpl_pmds[0] = smpl_pmds; - ctx->ctx_pmds[cnum].eventid = req->reg_smpl_eventid; - - /* - * Mark all PMDS to be accessed as used. - * - * We do not keep track of PMC because we have to - * systematically restore ALL of them. - * - * We do not update the used_monitors mask, because - * if we have not programmed them, then will be in - * a quiescent state, therefore we will not need to - * mask/restore then when context is MASKED. - */ - CTX_USED_PMD(ctx, reset_pmds); - CTX_USED_PMD(ctx, smpl_pmds); - /* - * make sure we do not try to reset on - * restart because we have established new values - */ - if (state == PFM_CTX_MASKED) ctx->ctx_ovfl_regs[0] &= ~1UL << cnum; - } - /* - * Needed in case the user does not initialize the equivalent - * PMD. Clearing is done indirectly via pfm_reset_pmu_state() so there is no - * possible leak here. - */ - CTX_USED_PMD(ctx, pmu_conf->pmc_desc[cnum].dep_pmd[0]); - - /* - * keep track of the monitor PMC that we are using. - * we save the value of the pmc in ctx_pmcs[] and if - * the monitoring is not stopped for the context we also - * place it in the saved state area so that it will be - * picked up later by the context switch code. - * - * The value in ctx_pmcs[] can only be changed in pfm_write_pmcs(). - * - * The value in th_pmcs[] may be modified on overflow, i.e., when - * monitoring needs to be stopped. - */ - if (is_monitor) CTX_USED_MONITOR(ctx, 1UL << cnum); - - /* - * update context state - */ - ctx->ctx_pmcs[cnum] = value; - - if (is_loaded) { - /* - * write thread state - */ - if (is_system == 0) ctx->th_pmcs[cnum] = value; - - /* - * write hardware register if we can - */ - if (can_access_pmu) { - ia64_set_pmc(cnum, value); - } -#ifdef CONFIG_SMP - else { - /* - * per-task SMP only here - * - * we are guaranteed that the task is not running on the other CPU, - * we indicate that this PMD will need to be reloaded if the task - * is rescheduled on the CPU it ran last on. - */ - ctx->ctx_reload_pmcs[0] |= 1UL << cnum; - } -#endif - } - - DPRINT(("pmc[%u]=0x%lx ld=%d apmu=%d flags=0x%x all_pmcs=0x%lx used_pmds=0x%lx eventid=%ld smpl_pmds=0x%lx reset_pmds=0x%lx reloads_pmcs=0x%lx used_monitors=0x%lx ovfl_regs=0x%lx\n", - cnum, - value, - is_loaded, - can_access_pmu, - flags, - ctx->ctx_all_pmcs[0], - ctx->ctx_used_pmds[0], - ctx->ctx_pmds[cnum].eventid, - smpl_pmds, - reset_pmds, - ctx->ctx_reload_pmcs[0], - ctx->ctx_used_monitors[0], - ctx->ctx_ovfl_regs[0])); - } - - /* - * make sure the changes are visible - */ - if (can_access_pmu) ia64_srlz_d(); - - return 0; -error: - PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL); - return ret; -} - -static int -pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) -{ - struct task_struct *task; - pfarg_reg_t *req = (pfarg_reg_t *)arg; - unsigned long value, hw_value, ovfl_mask; - unsigned int cnum; - int i, can_access_pmu = 0, state; - int is_counting, is_loaded, is_system, expert_mode; - int ret = -EINVAL; - pfm_reg_check_t wr_func; - - - state = ctx->ctx_state; - is_loaded = state == PFM_CTX_LOADED ? 1 : 0; - is_system = ctx->ctx_fl_system; - ovfl_mask = pmu_conf->ovfl_val; - task = ctx->ctx_task; - - if (unlikely(state == PFM_CTX_ZOMBIE)) return -EINVAL; - - /* - * on both UP and SMP, we can only write to the PMC when the task is - * the owner of the local PMU. - */ - if (likely(is_loaded)) { - /* - * In system wide and when the context is loaded, access can only happen - * when the caller is running on the CPU being monitored by the session. - * It does not have to be the owner (ctx_task) of the context per se. - */ - if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) { - DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu)); - return -EBUSY; - } - can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0; - } - expert_mode = pfm_sysctl.expert_mode; - - for (i = 0; i < count; i++, req++) { - - cnum = req->reg_num; - value = req->reg_value; - - if (!PMD_IS_IMPL(cnum)) { - DPRINT(("pmd[%u] is unimplemented or invalid\n", cnum)); - goto abort_mission; - } - is_counting = PMD_IS_COUNTING(cnum); - wr_func = pmu_conf->pmd_desc[cnum].write_check; - - /* - * execute write checker, if any - */ - if (unlikely(expert_mode == 0 && wr_func)) { - unsigned long v = value; - - ret = (*wr_func)(task, ctx, cnum, &v, regs); - if (ret) goto abort_mission; - - value = v; - ret = -EINVAL; - } - - /* - * no error on this register - */ - PFM_REG_RETFLAG_SET(req->reg_flags, 0); - - /* - * now commit changes to software state - */ - hw_value = value; - - /* - * update virtualized (64bits) counter - */ - if (is_counting) { - /* - * write context state - */ - ctx->ctx_pmds[cnum].lval = value; - - /* - * when context is load we use the split value - */ - if (is_loaded) { - hw_value = value & ovfl_mask; - value = value & ~ovfl_mask; - } - } - /* - * update reset values (not just for counters) - */ - ctx->ctx_pmds[cnum].long_reset = req->reg_long_reset; - ctx->ctx_pmds[cnum].short_reset = req->reg_short_reset; - - /* - * update randomization parameters (not just for counters) - */ - ctx->ctx_pmds[cnum].seed = req->reg_random_seed; - ctx->ctx_pmds[cnum].mask = req->reg_random_mask; - - /* - * update context value - */ - ctx->ctx_pmds[cnum].val = value; - - /* - * Keep track of what we use - * - * We do not keep track of PMC because we have to - * systematically restore ALL of them. - */ - CTX_USED_PMD(ctx, PMD_PMD_DEP(cnum)); - - /* - * mark this PMD register used as well - */ - CTX_USED_PMD(ctx, RDEP(cnum)); - - /* - * make sure we do not try to reset on - * restart because we have established new values - */ - if (is_counting && state == PFM_CTX_MASKED) { - ctx->ctx_ovfl_regs[0] &= ~1UL << cnum; - } - - if (is_loaded) { - /* - * write thread state - */ - if (is_system == 0) ctx->th_pmds[cnum] = hw_value; - - /* - * write hardware register if we can - */ - if (can_access_pmu) { - ia64_set_pmd(cnum, hw_value); - } else { -#ifdef CONFIG_SMP - /* - * we are guaranteed that the task is not running on the other CPU, - * we indicate that this PMD will need to be reloaded if the task - * is rescheduled on the CPU it ran last on. - */ - ctx->ctx_reload_pmds[0] |= 1UL << cnum; -#endif - } - } - - DPRINT(("pmd[%u]=0x%lx ld=%d apmu=%d, hw_value=0x%lx ctx_pmd=0x%lx short_reset=0x%lx " - "long_reset=0x%lx notify=%c seed=0x%lx mask=0x%lx used_pmds=0x%lx reset_pmds=0x%lx reload_pmds=0x%lx all_pmds=0x%lx ovfl_regs=0x%lx\n", - cnum, - value, - is_loaded, - can_access_pmu, - hw_value, - ctx->ctx_pmds[cnum].val, - ctx->ctx_pmds[cnum].short_reset, - ctx->ctx_pmds[cnum].long_reset, - PMC_OVFL_NOTIFY(ctx, cnum) ? 'Y':'N', - ctx->ctx_pmds[cnum].seed, - ctx->ctx_pmds[cnum].mask, - ctx->ctx_used_pmds[0], - ctx->ctx_pmds[cnum].reset_pmds[0], - ctx->ctx_reload_pmds[0], - ctx->ctx_all_pmds[0], - ctx->ctx_ovfl_regs[0])); - } - - /* - * make changes visible - */ - if (can_access_pmu) ia64_srlz_d(); - - return 0; - -abort_mission: - /* - * for now, we have only one possibility for error - */ - PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL); - return ret; -} - -/* - * By the way of PROTECT_CONTEXT(), interrupts are masked while we are in this function. - * Therefore we know, we do not have to worry about the PMU overflow interrupt. If an - * interrupt is delivered during the call, it will be kept pending until we leave, making - * it appears as if it had been generated at the UNPROTECT_CONTEXT(). At least we are - * guaranteed to return consistent data to the user, it may simply be old. It is not - * trivial to treat the overflow while inside the call because you may end up in - * some module sampling buffer code causing deadlocks. - */ -static int -pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) -{ - struct task_struct *task; - unsigned long val = 0UL, lval, ovfl_mask, sval; - pfarg_reg_t *req = (pfarg_reg_t *)arg; - unsigned int cnum, reg_flags = 0; - int i, can_access_pmu = 0, state; - int is_loaded, is_system, is_counting, expert_mode; - int ret = -EINVAL; - pfm_reg_check_t rd_func; - - /* - * access is possible when loaded only for - * self-monitoring tasks or in UP mode - */ - - state = ctx->ctx_state; - is_loaded = state == PFM_CTX_LOADED ? 1 : 0; - is_system = ctx->ctx_fl_system; - ovfl_mask = pmu_conf->ovfl_val; - task = ctx->ctx_task; - - if (state == PFM_CTX_ZOMBIE) return -EINVAL; - - if (likely(is_loaded)) { - /* - * In system wide and when the context is loaded, access can only happen - * when the caller is running on the CPU being monitored by the session. - * It does not have to be the owner (ctx_task) of the context per se. - */ - if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) { - DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu)); - return -EBUSY; - } - /* - * this can be true when not self-monitoring only in UP - */ - can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0; - - if (can_access_pmu) ia64_srlz_d(); - } - expert_mode = pfm_sysctl.expert_mode; - - DPRINT(("ld=%d apmu=%d ctx_state=%d\n", - is_loaded, - can_access_pmu, - state)); - - /* - * on both UP and SMP, we can only read the PMD from the hardware register when - * the task is the owner of the local PMU. - */ - - for (i = 0; i < count; i++, req++) { - - cnum = req->reg_num; - reg_flags = req->reg_flags; - - if (unlikely(!PMD_IS_IMPL(cnum))) goto error; - /* - * we can only read the register that we use. That includes - * the one we explicitly initialize AND the one we want included - * in the sampling buffer (smpl_regs). - * - * Having this restriction allows optimization in the ctxsw routine - * without compromising security (leaks) - */ - if (unlikely(!CTX_IS_USED_PMD(ctx, cnum))) goto error; - - sval = ctx->ctx_pmds[cnum].val; - lval = ctx->ctx_pmds[cnum].lval; - is_counting = PMD_IS_COUNTING(cnum); - - /* - * If the task is not the current one, then we check if the - * PMU state is still in the local live register due to lazy ctxsw. - * If true, then we read directly from the registers. - */ - if (can_access_pmu){ - val = ia64_get_pmd(cnum); - } else { - /* - * context has been saved - * if context is zombie, then task does not exist anymore. - * In this case, we use the full value saved in the context (pfm_flush_regs()). - */ - val = is_loaded ? ctx->th_pmds[cnum] : 0UL; - } - rd_func = pmu_conf->pmd_desc[cnum].read_check; - - if (is_counting) { - /* - * XXX: need to check for overflow when loaded - */ - val &= ovfl_mask; - val += sval; - } - - /* - * execute read checker, if any - */ - if (unlikely(expert_mode == 0 && rd_func)) { - unsigned long v = val; - ret = (*rd_func)(ctx->ctx_task, ctx, cnum, &v, regs); - if (ret) goto error; - val = v; - ret = -EINVAL; - } - - PFM_REG_RETFLAG_SET(reg_flags, 0); - - DPRINT(("pmd[%u]=0x%lx\n", cnum, val)); - - /* - * update register return value, abort all if problem during copy. - * we only modify the reg_flags field. no check mode is fine because - * access has been verified upfront in sys_perfmonctl(). - */ - req->reg_value = val; - req->reg_flags = reg_flags; - req->reg_last_reset_val = lval; - } - - return 0; - -error: - PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL); - return ret; -} - -int -pfm_mod_write_pmcs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs) -{ - pfm_context_t *ctx; - - if (req == NULL) return -EINVAL; - - ctx = GET_PMU_CTX(); - - if (ctx == NULL) return -EINVAL; - - /* - * for now limit to current task, which is enough when calling - * from overflow handler - */ - if (task != current && ctx->ctx_fl_system == 0) return -EBUSY; - - return pfm_write_pmcs(ctx, req, nreq, regs); -} -EXPORT_SYMBOL(pfm_mod_write_pmcs); - -int -pfm_mod_read_pmds(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs) -{ - pfm_context_t *ctx; - - if (req == NULL) return -EINVAL; - - ctx = GET_PMU_CTX(); - - if (ctx == NULL) return -EINVAL; - - /* - * for now limit to current task, which is enough when calling - * from overflow handler - */ - if (task != current && ctx->ctx_fl_system == 0) return -EBUSY; - - return pfm_read_pmds(ctx, req, nreq, regs); -} -EXPORT_SYMBOL(pfm_mod_read_pmds); - -/* - * Only call this function when a process it trying to - * write the debug registers (reading is always allowed) - */ -int -pfm_use_debug_registers(struct task_struct *task) -{ - pfm_context_t *ctx = task->thread.pfm_context; - unsigned long flags; - int ret = 0; - - if (pmu_conf->use_rr_dbregs == 0) return 0; - - DPRINT(("called for [%d]\n", task_pid_nr(task))); - - /* - * do it only once - */ - if (task->thread.flags & IA64_THREAD_DBG_VALID) return 0; - - /* - * Even on SMP, we do not need to use an atomic here because - * the only way in is via ptrace() and this is possible only when the - * process is stopped. Even in the case where the ctxsw out is not totally - * completed by the time we come here, there is no way the 'stopped' process - * could be in the middle of fiddling with the pfm_write_ibr_dbr() routine. - * So this is always safe. - */ - if (ctx && ctx->ctx_fl_using_dbreg == 1) return -1; - - LOCK_PFS(flags); - - /* - * We cannot allow setting breakpoints when system wide monitoring - * sessions are using the debug registers. - */ - if (pfm_sessions.pfs_sys_use_dbregs> 0) - ret = -1; - else - pfm_sessions.pfs_ptrace_use_dbregs++; - - DPRINT(("ptrace_use_dbregs=%u sys_use_dbregs=%u by [%d] ret = %d\n", - pfm_sessions.pfs_ptrace_use_dbregs, - pfm_sessions.pfs_sys_use_dbregs, - task_pid_nr(task), ret)); - - UNLOCK_PFS(flags); - - return ret; -} - -/* - * This function is called for every task that exits with the - * IA64_THREAD_DBG_VALID set. This indicates a task which was - * able to use the debug registers for debugging purposes via - * ptrace(). Therefore we know it was not using them for - * performance monitoring, so we only decrement the number - * of "ptraced" debug register users to keep the count up to date - */ -int -pfm_release_debug_registers(struct task_struct *task) -{ - unsigned long flags; - int ret; - - if (pmu_conf->use_rr_dbregs == 0) return 0; - - LOCK_PFS(flags); - if (pfm_sessions.pfs_ptrace_use_dbregs == 0) { - printk(KERN_ERR "perfmon: invalid release for [%d] ptrace_use_dbregs=0\n", task_pid_nr(task)); - ret = -1; - } else { - pfm_sessions.pfs_ptrace_use_dbregs--; - ret = 0; - } - UNLOCK_PFS(flags); - - return ret; -} - -static int -pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) -{ - struct task_struct *task; - pfm_buffer_fmt_t *fmt; - pfm_ovfl_ctrl_t rst_ctrl; - int state, is_system; - int ret = 0; - - state = ctx->ctx_state; - fmt = ctx->ctx_buf_fmt; - is_system = ctx->ctx_fl_system; - task = PFM_CTX_TASK(ctx); - - switch(state) { - case PFM_CTX_MASKED: - break; - case PFM_CTX_LOADED: - if (CTX_HAS_SMPL(ctx) && fmt->fmt_restart_active) break; - fallthrough; - case PFM_CTX_UNLOADED: - case PFM_CTX_ZOMBIE: - DPRINT(("invalid state=%d\n", state)); - return -EBUSY; - default: - DPRINT(("state=%d, cannot operate (no active_restart handler)\n", state)); - return -EINVAL; - } - - /* - * In system wide and when the context is loaded, access can only happen - * when the caller is running on the CPU being monitored by the session. - * It does not have to be the owner (ctx_task) of the context per se. - */ - if (is_system && ctx->ctx_cpu != smp_processor_id()) { - DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu)); - return -EBUSY; - } - - /* sanity check */ - if (unlikely(task == NULL)) { - printk(KERN_ERR "perfmon: [%d] pfm_restart no task\n", task_pid_nr(current)); - return -EINVAL; - } - - if (task == current || is_system) { - - fmt = ctx->ctx_buf_fmt; - - DPRINT(("restarting self %d ovfl=0x%lx\n", - task_pid_nr(task), - ctx->ctx_ovfl_regs[0])); - - if (CTX_HAS_SMPL(ctx)) { - - prefetch(ctx->ctx_smpl_hdr); - - rst_ctrl.bits.mask_monitoring = 0; - rst_ctrl.bits.reset_ovfl_pmds = 0; - - if (state == PFM_CTX_LOADED) - ret = pfm_buf_fmt_restart_active(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs); - else - ret = pfm_buf_fmt_restart(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs); - } else { - rst_ctrl.bits.mask_monitoring = 0; - rst_ctrl.bits.reset_ovfl_pmds = 1; - } - - if (ret == 0) { - if (rst_ctrl.bits.reset_ovfl_pmds) - pfm_reset_regs(ctx, ctx->ctx_ovfl_regs, PFM_PMD_LONG_RESET); - - if (rst_ctrl.bits.mask_monitoring == 0) { - DPRINT(("resuming monitoring for [%d]\n", task_pid_nr(task))); - - if (state == PFM_CTX_MASKED) pfm_restore_monitoring(task); - } else { - DPRINT(("keeping monitoring stopped for [%d]\n", task_pid_nr(task))); - - // cannot use pfm_stop_monitoring(task, regs); - } - } - /* - * clear overflowed PMD mask to remove any stale information - */ - ctx->ctx_ovfl_regs[0] = 0UL; - - /* - * back to LOADED state - */ - ctx->ctx_state = PFM_CTX_LOADED; - - /* - * XXX: not really useful for self monitoring - */ - ctx->ctx_fl_can_restart = 0; - - return 0; - } - - /* - * restart another task - */ - - /* - * When PFM_CTX_MASKED, we cannot issue a restart before the previous - * one is seen by the task. - */ - if (state == PFM_CTX_MASKED) { - if (ctx->ctx_fl_can_restart == 0) return -EINVAL; - /* - * will prevent subsequent restart before this one is - * seen by other task - */ - ctx->ctx_fl_can_restart = 0; - } - - /* - * if blocking, then post the semaphore is PFM_CTX_MASKED, i.e. - * the task is blocked or on its way to block. That's the normal - * restart path. If the monitoring is not masked, then the task - * can be actively monitoring and we cannot directly intervene. - * Therefore we use the trap mechanism to catch the task and - * force it to reset the buffer/reset PMDs. - * - * if non-blocking, then we ensure that the task will go into - * pfm_handle_work() before returning to user mode. - * - * We cannot explicitly reset another task, it MUST always - * be done by the task itself. This works for system wide because - * the tool that is controlling the session is logically doing - * "self-monitoring". - */ - if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) { - DPRINT(("unblocking [%d]\n", task_pid_nr(task))); - complete(&ctx->ctx_restart_done); - } else { - DPRINT(("[%d] armed exit trap\n", task_pid_nr(task))); - - ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_RESET; - - PFM_SET_WORK_PENDING(task, 1); - - set_notify_resume(task); - - /* - * XXX: send reschedule if task runs on another CPU - */ - } - return 0; -} - -static int -pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) -{ - unsigned int m = *(unsigned int *)arg; - - pfm_sysctl.debug = m == 0 ? 0 : 1; - - printk(KERN_INFO "perfmon debugging %s (timing reset)\n", pfm_sysctl.debug ? "on" : "off"); - - if (m == 0) { - memset(pfm_stats, 0, sizeof(pfm_stats)); - for(m=0; m < NR_CPUS; m++) pfm_stats[m].pfm_ovfl_intr_cycles_min = ~0UL; - } - return 0; -} - -/* - * arg can be NULL and count can be zero for this function - */ -static int -pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) -{ - struct thread_struct *thread = NULL; - struct task_struct *task; - pfarg_dbreg_t *req = (pfarg_dbreg_t *)arg; - unsigned long flags; - dbreg_t dbreg; - unsigned int rnum; - int first_time; - int ret = 0, state; - int i, can_access_pmu = 0; - int is_system, is_loaded; - - if (pmu_conf->use_rr_dbregs == 0) return -EINVAL; - - state = ctx->ctx_state; - is_loaded = state == PFM_CTX_LOADED ? 1 : 0; - is_system = ctx->ctx_fl_system; - task = ctx->ctx_task; - - if (state == PFM_CTX_ZOMBIE) return -EINVAL; - - /* - * on both UP and SMP, we can only write to the PMC when the task is - * the owner of the local PMU. - */ - if (is_loaded) { - thread = &task->thread; - /* - * In system wide and when the context is loaded, access can only happen - * when the caller is running on the CPU being monitored by the session. - * It does not have to be the owner (ctx_task) of the context per se. - */ - if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) { - DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu)); - return -EBUSY; - } - can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0; - } - - /* - * we do not need to check for ipsr.db because we do clear ibr.x, dbr.r, and dbr.w - * ensuring that no real breakpoint can be installed via this call. - * - * IMPORTANT: regs can be NULL in this function - */ - - first_time = ctx->ctx_fl_using_dbreg == 0; - - /* - * don't bother if we are loaded and task is being debugged - */ - if (is_loaded && (thread->flags & IA64_THREAD_DBG_VALID) != 0) { - DPRINT(("debug registers already in use for [%d]\n", task_pid_nr(task))); - return -EBUSY; - } - - /* - * check for debug registers in system wide mode - * - * If though a check is done in pfm_context_load(), - * we must repeat it here, in case the registers are - * written after the context is loaded - */ - if (is_loaded) { - LOCK_PFS(flags); - - if (first_time && is_system) { - if (pfm_sessions.pfs_ptrace_use_dbregs) - ret = -EBUSY; - else - pfm_sessions.pfs_sys_use_dbregs++; - } - UNLOCK_PFS(flags); - } - - if (ret != 0) return ret; - - /* - * mark ourself as user of the debug registers for - * perfmon purposes. - */ - ctx->ctx_fl_using_dbreg = 1; - - /* - * clear hardware registers to make sure we don't - * pick up stale state. - * - * for a system wide session, we do not use - * thread.dbr, thread.ibr because this process - * never leaves the current CPU and the state - * is shared by all processes running on it - */ - if (first_time && can_access_pmu) { - DPRINT(("[%d] clearing ibrs, dbrs\n", task_pid_nr(task))); - for (i=0; i < pmu_conf->num_ibrs; i++) { - ia64_set_ibr(i, 0UL); - ia64_dv_serialize_instruction(); - } - ia64_srlz_i(); - for (i=0; i < pmu_conf->num_dbrs; i++) { - ia64_set_dbr(i, 0UL); - ia64_dv_serialize_data(); - } - ia64_srlz_d(); - } - - /* - * Now install the values into the registers - */ - for (i = 0; i < count; i++, req++) { - - rnum = req->dbreg_num; - dbreg.val = req->dbreg_value; - - ret = -EINVAL; - - if ((mode == PFM_CODE_RR && rnum >= PFM_NUM_IBRS) || ((mode == PFM_DATA_RR) && rnum >= PFM_NUM_DBRS)) { - DPRINT(("invalid register %u val=0x%lx mode=%d i=%d count=%d\n", - rnum, dbreg.val, mode, i, count)); - - goto abort_mission; - } - - /* - * make sure we do not install enabled breakpoint - */ - if (rnum & 0x1) { - if (mode == PFM_CODE_RR) - dbreg.ibr.ibr_x = 0; - else - dbreg.dbr.dbr_r = dbreg.dbr.dbr_w = 0; - } - - PFM_REG_RETFLAG_SET(req->dbreg_flags, 0); - - /* - * Debug registers, just like PMC, can only be modified - * by a kernel call. Moreover, perfmon() access to those - * registers are centralized in this routine. The hardware - * does not modify the value of these registers, therefore, - * if we save them as they are written, we can avoid having - * to save them on context switch out. This is made possible - * by the fact that when perfmon uses debug registers, ptrace() - * won't be able to modify them concurrently. - */ - if (mode == PFM_CODE_RR) { - CTX_USED_IBR(ctx, rnum); - - if (can_access_pmu) { - ia64_set_ibr(rnum, dbreg.val); - ia64_dv_serialize_instruction(); - } - - ctx->ctx_ibrs[rnum] = dbreg.val; - - DPRINT(("write ibr%u=0x%lx used_ibrs=0x%x ld=%d apmu=%d\n", - rnum, dbreg.val, ctx->ctx_used_ibrs[0], is_loaded, can_access_pmu)); - } else { - CTX_USED_DBR(ctx, rnum); - - if (can_access_pmu) { - ia64_set_dbr(rnum, dbreg.val); - ia64_dv_serialize_data(); - } - ctx->ctx_dbrs[rnum] = dbreg.val; - - DPRINT(("write dbr%u=0x%lx used_dbrs=0x%x ld=%d apmu=%d\n", - rnum, dbreg.val, ctx->ctx_used_dbrs[0], is_loaded, can_access_pmu)); - } - } - - return 0; - -abort_mission: - /* - * in case it was our first attempt, we undo the global modifications - */ - if (first_time) { - LOCK_PFS(flags); - if (ctx->ctx_fl_system) { - pfm_sessions.pfs_sys_use_dbregs--; - } - UNLOCK_PFS(flags); - ctx->ctx_fl_using_dbreg = 0; - } - /* - * install error return flag - */ - PFM_REG_RETFLAG_SET(req->dbreg_flags, PFM_REG_RETFL_EINVAL); - - return ret; -} - -static int -pfm_write_ibrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) -{ - return pfm_write_ibr_dbr(PFM_CODE_RR, ctx, arg, count, regs); -} - -static int -pfm_write_dbrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) -{ - return pfm_write_ibr_dbr(PFM_DATA_RR, ctx, arg, count, regs); -} - -int -pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs) -{ - pfm_context_t *ctx; - - if (req == NULL) return -EINVAL; - - ctx = GET_PMU_CTX(); - - if (ctx == NULL) return -EINVAL; - - /* - * for now limit to current task, which is enough when calling - * from overflow handler - */ - if (task != current && ctx->ctx_fl_system == 0) return -EBUSY; - - return pfm_write_ibrs(ctx, req, nreq, regs); -} -EXPORT_SYMBOL(pfm_mod_write_ibrs); - -int -pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs) -{ - pfm_context_t *ctx; - - if (req == NULL) return -EINVAL; - - ctx = GET_PMU_CTX(); - - if (ctx == NULL) return -EINVAL; - - /* - * for now limit to current task, which is enough when calling - * from overflow handler - */ - if (task != current && ctx->ctx_fl_system == 0) return -EBUSY; - - return pfm_write_dbrs(ctx, req, nreq, regs); -} -EXPORT_SYMBOL(pfm_mod_write_dbrs); - - -static int -pfm_get_features(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) -{ - pfarg_features_t *req = (pfarg_features_t *)arg; - - req->ft_version = PFM_VERSION; - return 0; -} - -static int -pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) -{ - struct pt_regs *tregs; - struct task_struct *task = PFM_CTX_TASK(ctx); - int state, is_system; - - state = ctx->ctx_state; - is_system = ctx->ctx_fl_system; - - /* - * context must be attached to issue the stop command (includes LOADED,MASKED,ZOMBIE) - */ - if (state == PFM_CTX_UNLOADED) return -EINVAL; - - /* - * In system wide and when the context is loaded, access can only happen - * when the caller is running on the CPU being monitored by the session. - * It does not have to be the owner (ctx_task) of the context per se. - */ - if (is_system && ctx->ctx_cpu != smp_processor_id()) { - DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu)); - return -EBUSY; - } - DPRINT(("task [%d] ctx_state=%d is_system=%d\n", - task_pid_nr(PFM_CTX_TASK(ctx)), - state, - is_system)); - /* - * in system mode, we need to update the PMU directly - * and the user level state of the caller, which may not - * necessarily be the creator of the context. - */ - if (is_system) { - /* - * Update local PMU first - * - * disable dcr pp - */ - ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP); - ia64_srlz_i(); - - /* - * update local cpuinfo - */ - PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP); - - /* - * stop monitoring, does srlz.i - */ - pfm_clear_psr_pp(); - - /* - * stop monitoring in the caller - */ - ia64_psr(regs)->pp = 0; - - return 0; - } - /* - * per-task mode - */ - - if (task == current) { - /* stop monitoring at kernel level */ - pfm_clear_psr_up(); - - /* - * stop monitoring at the user level - */ - ia64_psr(regs)->up = 0; - } else { - tregs = task_pt_regs(task); - - /* - * stop monitoring at the user level - */ - ia64_psr(tregs)->up = 0; - - /* - * monitoring disabled in kernel at next reschedule - */ - ctx->ctx_saved_psr_up = 0; - DPRINT(("task=[%d]\n", task_pid_nr(task))); - } - return 0; -} - - -static int -pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) -{ - struct pt_regs *tregs; - int state, is_system; - - state = ctx->ctx_state; - is_system = ctx->ctx_fl_system; - - if (state != PFM_CTX_LOADED) return -EINVAL; - - /* - * In system wide and when the context is loaded, access can only happen - * when the caller is running on the CPU being monitored by the session. - * It does not have to be the owner (ctx_task) of the context per se. - */ - if (is_system && ctx->ctx_cpu != smp_processor_id()) { - DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu)); - return -EBUSY; - } - - /* - * in system mode, we need to update the PMU directly - * and the user level state of the caller, which may not - * necessarily be the creator of the context. - */ - if (is_system) { - - /* - * set user level psr.pp for the caller - */ - ia64_psr(regs)->pp = 1; - - /* - * now update the local PMU and cpuinfo - */ - PFM_CPUINFO_SET(PFM_CPUINFO_DCR_PP); - - /* - * start monitoring at kernel level - */ - pfm_set_psr_pp(); - - /* enable dcr pp */ - ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP); - ia64_srlz_i(); - - return 0; - } - - /* - * per-process mode - */ - - if (ctx->ctx_task == current) { - - /* start monitoring at kernel level */ - pfm_set_psr_up(); - - /* - * activate monitoring at user level - */ - ia64_psr(regs)->up = 1; - - } else { - tregs = task_pt_regs(ctx->ctx_task); - - /* - * start monitoring at the kernel level the next - * time the task is scheduled - */ - ctx->ctx_saved_psr_up = IA64_PSR_UP; - - /* - * activate monitoring at user level - */ - ia64_psr(tregs)->up = 1; - } - return 0; -} - -static int -pfm_get_pmc_reset(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) -{ - pfarg_reg_t *req = (pfarg_reg_t *)arg; - unsigned int cnum; - int i; - int ret = -EINVAL; - - for (i = 0; i < count; i++, req++) { - - cnum = req->reg_num; - - if (!PMC_IS_IMPL(cnum)) goto abort_mission; - - req->reg_value = PMC_DFL_VAL(cnum); - - PFM_REG_RETFLAG_SET(req->reg_flags, 0); - - DPRINT(("pmc_reset_val pmc[%u]=0x%lx\n", cnum, req->reg_value)); - } - return 0; - -abort_mission: - PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL); - return ret; -} - -static int -pfm_check_task_exist(pfm_context_t *ctx) -{ - struct task_struct *g, *t; - int ret = -ESRCH; - - read_lock(&tasklist_lock); - - do_each_thread (g, t) { - if (t->thread.pfm_context == ctx) { - ret = 0; - goto out; - } - } while_each_thread (g, t); -out: - read_unlock(&tasklist_lock); - - DPRINT(("pfm_check_task_exist: ret=%d ctx=%p\n", ret, ctx)); - - return ret; -} - -static int -pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) -{ - struct task_struct *task; - struct thread_struct *thread; - struct pfm_context_t *old; - unsigned long flags; -#ifndef CONFIG_SMP - struct task_struct *owner_task = NULL; -#endif - pfarg_load_t *req = (pfarg_load_t *)arg; - unsigned long *pmcs_source, *pmds_source; - int the_cpu; - int ret = 0; - int state, is_system, set_dbregs = 0; - - state = ctx->ctx_state; - is_system = ctx->ctx_fl_system; - /* - * can only load from unloaded or terminated state - */ - if (state != PFM_CTX_UNLOADED) { - DPRINT(("cannot load to [%d], invalid ctx_state=%d\n", - req->load_pid, - ctx->ctx_state)); - return -EBUSY; - } - - DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg)); - - if (CTX_OVFL_NOBLOCK(ctx) == 0 && req->load_pid == current->pid) { - DPRINT(("cannot use blocking mode on self\n")); - return -EINVAL; - } - - ret = pfm_get_task(ctx, req->load_pid, &task); - if (ret) { - DPRINT(("load_pid [%d] get_task=%d\n", req->load_pid, ret)); - return ret; - } - - ret = -EINVAL; - - /* - * system wide is self monitoring only - */ - if (is_system && task != current) { - DPRINT(("system wide is self monitoring only load_pid=%d\n", - req->load_pid)); - goto error; - } - - thread = &task->thread; - - ret = 0; - /* - * cannot load a context which is using range restrictions, - * into a task that is being debugged. - */ - if (ctx->ctx_fl_using_dbreg) { - if (thread->flags & IA64_THREAD_DBG_VALID) { - ret = -EBUSY; - DPRINT(("load_pid [%d] task is debugged, cannot load range restrictions\n", req->load_pid)); - goto error; - } - LOCK_PFS(flags); - - if (is_system) { - if (pfm_sessions.pfs_ptrace_use_dbregs) { - DPRINT(("cannot load [%d] dbregs in use\n", - task_pid_nr(task))); - ret = -EBUSY; - } else { - pfm_sessions.pfs_sys_use_dbregs++; - DPRINT(("load [%d] increased sys_use_dbreg=%u\n", task_pid_nr(task), pfm_sessions.pfs_sys_use_dbregs)); - set_dbregs = 1; - } - } - - UNLOCK_PFS(flags); - - if (ret) goto error; - } - - /* - * SMP system-wide monitoring implies self-monitoring. - * - * The programming model expects the task to - * be pinned on a CPU throughout the session. - * Here we take note of the current CPU at the - * time the context is loaded. No call from - * another CPU will be allowed. - * - * The pinning via shed_setaffinity() - * must be done by the calling task prior - * to this call. - * - * systemwide: keep track of CPU this session is supposed to run on - */ - the_cpu = ctx->ctx_cpu = smp_processor_id(); - - ret = -EBUSY; - /* - * now reserve the session - */ - ret = pfm_reserve_session(current, is_system, the_cpu); - if (ret) goto error; - - /* - * task is necessarily stopped at this point. - * - * If the previous context was zombie, then it got removed in - * pfm_save_regs(). Therefore we should not see it here. - * If we see a context, then this is an active context - * - * XXX: needs to be atomic - */ - DPRINT(("before cmpxchg() old_ctx=%p new_ctx=%p\n", - thread->pfm_context, ctx)); - - ret = -EBUSY; - old = ia64_cmpxchg(acq, &thread->pfm_context, NULL, ctx, sizeof(pfm_context_t *)); - if (old != NULL) { - DPRINT(("load_pid [%d] already has a context\n", req->load_pid)); - goto error_unres; - } - - pfm_reset_msgq(ctx); - - ctx->ctx_state = PFM_CTX_LOADED; - - /* - * link context to task - */ - ctx->ctx_task = task; - - if (is_system) { - /* - * we load as stopped - */ - PFM_CPUINFO_SET(PFM_CPUINFO_SYST_WIDE); - PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP); - - if (ctx->ctx_fl_excl_idle) PFM_CPUINFO_SET(PFM_CPUINFO_EXCL_IDLE); - } else { - thread->flags |= IA64_THREAD_PM_VALID; - } - - /* - * propagate into thread-state - */ - pfm_copy_pmds(task, ctx); - pfm_copy_pmcs(task, ctx); - - pmcs_source = ctx->th_pmcs; - pmds_source = ctx->th_pmds; - - /* - * always the case for system-wide - */ - if (task == current) { - - if (is_system == 0) { - - /* allow user level control */ - ia64_psr(regs)->sp = 0; - DPRINT(("clearing psr.sp for [%d]\n", task_pid_nr(task))); - - SET_LAST_CPU(ctx, smp_processor_id()); - INC_ACTIVATION(); - SET_ACTIVATION(ctx); -#ifndef CONFIG_SMP - /* - * push the other task out, if any - */ - owner_task = GET_PMU_OWNER(); - if (owner_task) pfm_lazy_save_regs(owner_task); -#endif - } - /* - * load all PMD from ctx to PMU (as opposed to thread state) - * restore all PMC from ctx to PMU - */ - pfm_restore_pmds(pmds_source, ctx->ctx_all_pmds[0]); - pfm_restore_pmcs(pmcs_source, ctx->ctx_all_pmcs[0]); - - ctx->ctx_reload_pmcs[0] = 0UL; - ctx->ctx_reload_pmds[0] = 0UL; - - /* - * guaranteed safe by earlier check against DBG_VALID - */ - if (ctx->ctx_fl_using_dbreg) { - pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs); - pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs); - } - /* - * set new ownership - */ - SET_PMU_OWNER(task, ctx); - - DPRINT(("context loaded on PMU for [%d]\n", task_pid_nr(task))); - } else { - /* - * when not current, task MUST be stopped, so this is safe - */ - regs = task_pt_regs(task); - - /* force a full reload */ - ctx->ctx_last_activation = PFM_INVALID_ACTIVATION; - SET_LAST_CPU(ctx, -1); - - /* initial saved psr (stopped) */ - ctx->ctx_saved_psr_up = 0UL; - ia64_psr(regs)->up = ia64_psr(regs)->pp = 0; - } - - ret = 0; - -error_unres: - if (ret) pfm_unreserve_session(ctx, ctx->ctx_fl_system, the_cpu); -error: - /* - * we must undo the dbregs setting (for system-wide) - */ - if (ret && set_dbregs) { - LOCK_PFS(flags); - pfm_sessions.pfs_sys_use_dbregs--; - UNLOCK_PFS(flags); - } - /* - * release task, there is now a link with the context - */ - if (is_system == 0 && task != current) { - pfm_put_task(task); - - if (ret == 0) { - ret = pfm_check_task_exist(ctx); - if (ret) { - ctx->ctx_state = PFM_CTX_UNLOADED; - ctx->ctx_task = NULL; - } - } - } - return ret; -} - -/* - * in this function, we do not need to increase the use count - * for the task via get_task_struct(), because we hold the - * context lock. If the task were to disappear while having - * a context attached, it would go through pfm_exit_thread() - * which also grabs the context lock and would therefore be blocked - * until we are here. - */ -static void pfm_flush_pmds(struct task_struct *, pfm_context_t *ctx); - -static int -pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) -{ - struct task_struct *task = PFM_CTX_TASK(ctx); - struct pt_regs *tregs; - int prev_state, is_system; - int ret; - - DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task_pid_nr(task) : -1)); - - prev_state = ctx->ctx_state; - is_system = ctx->ctx_fl_system; - - /* - * unload only when necessary - */ - if (prev_state == PFM_CTX_UNLOADED) { - DPRINT(("ctx_state=%d, nothing to do\n", prev_state)); - return 0; - } - - /* - * clear psr and dcr bits - */ - ret = pfm_stop(ctx, NULL, 0, regs); - if (ret) return ret; - - ctx->ctx_state = PFM_CTX_UNLOADED; - - /* - * in system mode, we need to update the PMU directly - * and the user level state of the caller, which may not - * necessarily be the creator of the context. - */ - if (is_system) { - - /* - * Update cpuinfo - * - * local PMU is taken care of in pfm_stop() - */ - PFM_CPUINFO_CLEAR(PFM_CPUINFO_SYST_WIDE); - PFM_CPUINFO_CLEAR(PFM_CPUINFO_EXCL_IDLE); - - /* - * save PMDs in context - * release ownership - */ - pfm_flush_pmds(current, ctx); - - /* - * at this point we are done with the PMU - * so we can unreserve the resource. - */ - if (prev_state != PFM_CTX_ZOMBIE) - pfm_unreserve_session(ctx, 1 , ctx->ctx_cpu); - - /* - * disconnect context from task - */ - task->thread.pfm_context = NULL; - /* - * disconnect task from context - */ - ctx->ctx_task = NULL; - - /* - * There is nothing more to cleanup here. - */ - return 0; - } - - /* - * per-task mode - */ - tregs = task == current ? regs : task_pt_regs(task); - - if (task == current) { - /* - * cancel user level control - */ - ia64_psr(regs)->sp = 1; - - DPRINT(("setting psr.sp for [%d]\n", task_pid_nr(task))); - } - /* - * save PMDs to context - * release ownership - */ - pfm_flush_pmds(task, ctx); - - /* - * at this point we are done with the PMU - * so we can unreserve the resource. - * - * when state was ZOMBIE, we have already unreserved. - */ - if (prev_state != PFM_CTX_ZOMBIE) - pfm_unreserve_session(ctx, 0 , ctx->ctx_cpu); - - /* - * reset activation counter and psr - */ - ctx->ctx_last_activation = PFM_INVALID_ACTIVATION; - SET_LAST_CPU(ctx, -1); - - /* - * PMU state will not be restored - */ - task->thread.flags &= ~IA64_THREAD_PM_VALID; - - /* - * break links between context and task - */ - task->thread.pfm_context = NULL; - ctx->ctx_task = NULL; - - PFM_SET_WORK_PENDING(task, 0); - - ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE; - ctx->ctx_fl_can_restart = 0; - ctx->ctx_fl_going_zombie = 0; - - DPRINT(("disconnected [%d] from context\n", task_pid_nr(task))); - - return 0; -} - - -/* - * called only from exit_thread() - * we come here only if the task has a context attached (loaded or masked) - */ -void -pfm_exit_thread(struct task_struct *task) -{ - pfm_context_t *ctx; - unsigned long flags; - struct pt_regs *regs = task_pt_regs(task); - int ret, state; - int free_ok = 0; - - ctx = PFM_GET_CTX(task); - - PROTECT_CTX(ctx, flags); - - DPRINT(("state=%d task [%d]\n", ctx->ctx_state, task_pid_nr(task))); - - state = ctx->ctx_state; - switch(state) { - case PFM_CTX_UNLOADED: - /* - * only comes to this function if pfm_context is not NULL, i.e., cannot - * be in unloaded state - */ - printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task_pid_nr(task)); - break; - case PFM_CTX_LOADED: - case PFM_CTX_MASKED: - ret = pfm_context_unload(ctx, NULL, 0, regs); - if (ret) { - printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret); - } - DPRINT(("ctx unloaded for current state was %d\n", state)); - - pfm_end_notify_user(ctx); - break; - case PFM_CTX_ZOMBIE: - ret = pfm_context_unload(ctx, NULL, 0, regs); - if (ret) { - printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret); - } - free_ok = 1; - break; - default: - printk(KERN_ERR "perfmon: pfm_exit_thread [%d] unexpected state=%d\n", task_pid_nr(task), state); - break; - } - UNPROTECT_CTX(ctx, flags); - - { u64 psr = pfm_get_psr(); - BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP)); - BUG_ON(GET_PMU_OWNER()); - BUG_ON(ia64_psr(regs)->up); - BUG_ON(ia64_psr(regs)->pp); - } - - /* - * All memory free operations (especially for vmalloc'ed memory) - * MUST be done with interrupts ENABLED. - */ - if (free_ok) pfm_context_free(ctx); -} - -/* - * functions MUST be listed in the increasing order of their index (see permfon.h) - */ -#define PFM_CMD(name, flags, arg_count, arg_type, getsz) { name, #name, flags, arg_count, sizeof(arg_type), getsz } -#define PFM_CMD_S(name, flags) { name, #name, flags, 0, 0, NULL } -#define PFM_CMD_PCLRWS (PFM_CMD_FD|PFM_CMD_ARG_RW|PFM_CMD_STOP) -#define PFM_CMD_PCLRW (PFM_CMD_FD|PFM_CMD_ARG_RW) -#define PFM_CMD_NONE { NULL, "no-cmd", 0, 0, 0, NULL} - -static pfm_cmd_desc_t pfm_cmd_tab[]={ -/* 0 */PFM_CMD_NONE, -/* 1 */PFM_CMD(pfm_write_pmcs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL), -/* 2 */PFM_CMD(pfm_write_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL), -/* 3 */PFM_CMD(pfm_read_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL), -/* 4 */PFM_CMD_S(pfm_stop, PFM_CMD_PCLRWS), -/* 5 */PFM_CMD_S(pfm_start, PFM_CMD_PCLRWS), -/* 6 */PFM_CMD_NONE, -/* 7 */PFM_CMD_NONE, -/* 8 */PFM_CMD(pfm_context_create, PFM_CMD_ARG_RW, 1, pfarg_context_t, pfm_ctx_getsize), -/* 9 */PFM_CMD_NONE, -/* 10 */PFM_CMD_S(pfm_restart, PFM_CMD_PCLRW), -/* 11 */PFM_CMD_NONE, -/* 12 */PFM_CMD(pfm_get_features, PFM_CMD_ARG_RW, 1, pfarg_features_t, NULL), -/* 13 */PFM_CMD(pfm_debug, 0, 1, unsigned int, NULL), -/* 14 */PFM_CMD_NONE, -/* 15 */PFM_CMD(pfm_get_pmc_reset, PFM_CMD_ARG_RW, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL), -/* 16 */PFM_CMD(pfm_context_load, PFM_CMD_PCLRWS, 1, pfarg_load_t, NULL), -/* 17 */PFM_CMD_S(pfm_context_unload, PFM_CMD_PCLRWS), -/* 18 */PFM_CMD_NONE, -/* 19 */PFM_CMD_NONE, -/* 20 */PFM_CMD_NONE, -/* 21 */PFM_CMD_NONE, -/* 22 */PFM_CMD_NONE, -/* 23 */PFM_CMD_NONE, -/* 24 */PFM_CMD_NONE, -/* 25 */PFM_CMD_NONE, -/* 26 */PFM_CMD_NONE, -/* 27 */PFM_CMD_NONE, -/* 28 */PFM_CMD_NONE, -/* 29 */PFM_CMD_NONE, -/* 30 */PFM_CMD_NONE, -/* 31 */PFM_CMD_NONE, -/* 32 */PFM_CMD(pfm_write_ibrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL), -/* 33 */PFM_CMD(pfm_write_dbrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL) -}; -#define PFM_CMD_COUNT (sizeof(pfm_cmd_tab)/sizeof(pfm_cmd_desc_t)) - -static int -pfm_check_task_state(pfm_context_t *ctx, int cmd, unsigned long flags) -{ - struct task_struct *task; - int state, old_state; - -recheck: - state = ctx->ctx_state; - task = ctx->ctx_task; - - if (task == NULL) { - DPRINT(("context %d no task, state=%d\n", ctx->ctx_fd, state)); - return 0; - } - - DPRINT(("context %d state=%d [%d] task_state=%ld must_stop=%d\n", - ctx->ctx_fd, - state, - task_pid_nr(task), - task->state, PFM_CMD_STOPPED(cmd))); - - /* - * self-monitoring always ok. - * - * for system-wide the caller can either be the creator of the - * context (to one to which the context is attached to) OR - * a task running on the same CPU as the session. - */ - if (task == current || ctx->ctx_fl_system) return 0; - - /* - * we are monitoring another thread - */ - switch(state) { - case PFM_CTX_UNLOADED: - /* - * if context is UNLOADED we are safe to go - */ - return 0; - case PFM_CTX_ZOMBIE: - /* - * no command can operate on a zombie context - */ - DPRINT(("cmd %d state zombie cannot operate on context\n", cmd)); - return -EINVAL; - case PFM_CTX_MASKED: - /* - * PMU state has been saved to software even though - * the thread may still be running. - */ - if (cmd != PFM_UNLOAD_CONTEXT) return 0; - } - - /* - * context is LOADED or MASKED. Some commands may need to have - * the task stopped. - * - * We could lift this restriction for UP but it would mean that - * the user has no guarantee the task would not run between - * two successive calls to perfmonctl(). That's probably OK. - * If this user wants to ensure the task does not run, then - * the task must be stopped. - */ - if (PFM_CMD_STOPPED(cmd)) { - if (!task_is_stopped_or_traced(task)) { - DPRINT(("[%d] task not in stopped state\n", task_pid_nr(task))); - return -EBUSY; - } - /* - * task is now stopped, wait for ctxsw out - * - * This is an interesting point in the code. - * We need to unprotect the context because - * the pfm_save_regs() routines needs to grab - * the same lock. There are danger in doing - * this because it leaves a window open for - * another task to get access to the context - * and possibly change its state. The one thing - * that is not possible is for the context to disappear - * because we are protected by the VFS layer, i.e., - * get_fd()/put_fd(). - */ - old_state = state; - - UNPROTECT_CTX(ctx, flags); - - wait_task_inactive(task, 0); - - PROTECT_CTX(ctx, flags); - - /* - * we must recheck to verify if state has changed - */ - if (ctx->ctx_state != old_state) { - DPRINT(("old_state=%d new_state=%d\n", old_state, ctx->ctx_state)); - goto recheck; - } - } - return 0; -} - -/* - * system-call entry point (must return long) - */ -asmlinkage long -sys_perfmonctl (int fd, int cmd, void __user *arg, int count) -{ - struct fd f = {NULL, 0}; - pfm_context_t *ctx = NULL; - unsigned long flags = 0UL; - void *args_k = NULL; - long ret; /* will expand int return types */ - size_t base_sz, sz, xtra_sz = 0; - int narg, completed_args = 0, call_made = 0, cmd_flags; - int (*func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs); - int (*getsize)(void *arg, size_t *sz); -#define PFM_MAX_ARGSIZE 4096 - - /* - * reject any call if perfmon was disabled at initialization - */ - if (unlikely(pmu_conf == NULL)) return -ENOSYS; - - if (unlikely(cmd < 0 || cmd >= PFM_CMD_COUNT)) { - DPRINT(("invalid cmd=%d\n", cmd)); - return -EINVAL; - } - - func = pfm_cmd_tab[cmd].cmd_func; - narg = pfm_cmd_tab[cmd].cmd_narg; - base_sz = pfm_cmd_tab[cmd].cmd_argsize; - getsize = pfm_cmd_tab[cmd].cmd_getsize; - cmd_flags = pfm_cmd_tab[cmd].cmd_flags; - - if (unlikely(func == NULL)) { - DPRINT(("invalid cmd=%d\n", cmd)); - return -EINVAL; - } - - DPRINT(("cmd=%s idx=%d narg=0x%x argsz=%lu count=%d\n", - PFM_CMD_NAME(cmd), - cmd, - narg, - base_sz, - count)); - - /* - * check if number of arguments matches what the command expects - */ - if (unlikely((narg == PFM_CMD_ARG_MANY && count <= 0) || (narg > 0 && narg != count))) - return -EINVAL; - -restart_args: - sz = xtra_sz + base_sz*count; - /* - * limit abuse to min page size - */ - if (unlikely(sz > PFM_MAX_ARGSIZE)) { - printk(KERN_ERR "perfmon: [%d] argument too big %lu\n", task_pid_nr(current), sz); - return -E2BIG; - } - - /* - * allocate default-sized argument buffer - */ - if (likely(count && args_k == NULL)) { - args_k = kmalloc(PFM_MAX_ARGSIZE, GFP_KERNEL); - if (args_k == NULL) return -ENOMEM; - } - - ret = -EFAULT; - - /* - * copy arguments - * - * assume sz = 0 for command without parameters - */ - if (sz && copy_from_user(args_k, arg, sz)) { - DPRINT(("cannot copy_from_user %lu bytes @%p\n", sz, arg)); - goto error_args; - } - - /* - * check if command supports extra parameters - */ - if (completed_args == 0 && getsize) { - /* - * get extra parameters size (based on main argument) - */ - ret = (*getsize)(args_k, &xtra_sz); - if (ret) goto error_args; - - completed_args = 1; - - DPRINT(("restart_args sz=%lu xtra_sz=%lu\n", sz, xtra_sz)); - - /* retry if necessary */ - if (likely(xtra_sz)) goto restart_args; - } - - if (unlikely((cmd_flags & PFM_CMD_FD) == 0)) goto skip_fd; - - ret = -EBADF; - - f = fdget(fd); - if (unlikely(f.file == NULL)) { - DPRINT(("invalid fd %d\n", fd)); - goto error_args; - } - if (unlikely(PFM_IS_FILE(f.file) == 0)) { - DPRINT(("fd %d not related to perfmon\n", fd)); - goto error_args; - } - - ctx = f.file->private_data; - if (unlikely(ctx == NULL)) { - DPRINT(("no context for fd %d\n", fd)); - goto error_args; - } - prefetch(&ctx->ctx_state); - - PROTECT_CTX(ctx, flags); - - /* - * check task is stopped - */ - ret = pfm_check_task_state(ctx, cmd, flags); - if (unlikely(ret)) goto abort_locked; - -skip_fd: - ret = (*func)(ctx, args_k, count, task_pt_regs(current)); - - call_made = 1; - -abort_locked: - if (likely(ctx)) { - DPRINT(("context unlocked\n")); - UNPROTECT_CTX(ctx, flags); - } - - /* copy argument back to user, if needed */ - if (call_made && PFM_CMD_RW_ARG(cmd) && copy_to_user(arg, args_k, base_sz*count)) ret = -EFAULT; - -error_args: - if (f.file) - fdput(f); - - kfree(args_k); - - DPRINT(("cmd=%s ret=%ld\n", PFM_CMD_NAME(cmd), ret)); - - return ret; -} - -static void -pfm_resume_after_ovfl(pfm_context_t *ctx, unsigned long ovfl_regs, struct pt_regs *regs) -{ - pfm_buffer_fmt_t *fmt = ctx->ctx_buf_fmt; - pfm_ovfl_ctrl_t rst_ctrl; - int state; - int ret = 0; - - state = ctx->ctx_state; - /* - * Unlock sampling buffer and reset index atomically - * XXX: not really needed when blocking - */ - if (CTX_HAS_SMPL(ctx)) { - - rst_ctrl.bits.mask_monitoring = 0; - rst_ctrl.bits.reset_ovfl_pmds = 0; - - if (state == PFM_CTX_LOADED) - ret = pfm_buf_fmt_restart_active(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs); - else - ret = pfm_buf_fmt_restart(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs); - } else { - rst_ctrl.bits.mask_monitoring = 0; - rst_ctrl.bits.reset_ovfl_pmds = 1; - } - - if (ret == 0) { - if (rst_ctrl.bits.reset_ovfl_pmds) { - pfm_reset_regs(ctx, &ovfl_regs, PFM_PMD_LONG_RESET); - } - if (rst_ctrl.bits.mask_monitoring == 0) { - DPRINT(("resuming monitoring\n")); - if (ctx->ctx_state == PFM_CTX_MASKED) pfm_restore_monitoring(current); - } else { - DPRINT(("stopping monitoring\n")); - //pfm_stop_monitoring(current, regs); - } - ctx->ctx_state = PFM_CTX_LOADED; - } -} - -/* - * context MUST BE LOCKED when calling - * can only be called for current - */ -static void -pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs) -{ - int ret; - - DPRINT(("entering for [%d]\n", task_pid_nr(current))); - - ret = pfm_context_unload(ctx, NULL, 0, regs); - if (ret) { - printk(KERN_ERR "pfm_context_force_terminate: [%d] unloaded failed with %d\n", task_pid_nr(current), ret); - } - - /* - * and wakeup controlling task, indicating we are now disconnected - */ - wake_up_interruptible(&ctx->ctx_zombieq); - - /* - * given that context is still locked, the controlling - * task will only get access when we return from - * pfm_handle_work(). - */ -} - -static int pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds); - - /* - * pfm_handle_work() can be called with interrupts enabled - * (TIF_NEED_RESCHED) or disabled. The down_interruptible - * call may sleep, therefore we must re-enable interrupts - * to avoid deadlocks. It is safe to do so because this function - * is called ONLY when returning to user level (pUStk=1), in which case - * there is no risk of kernel stack overflow due to deep - * interrupt nesting. - */ -void -pfm_handle_work(void) -{ - pfm_context_t *ctx; - struct pt_regs *regs; - unsigned long flags, dummy_flags; - unsigned long ovfl_regs; - unsigned int reason; - int ret; - - ctx = PFM_GET_CTX(current); - if (ctx == NULL) { - printk(KERN_ERR "perfmon: [%d] has no PFM context\n", - task_pid_nr(current)); - return; - } - - PROTECT_CTX(ctx, flags); - - PFM_SET_WORK_PENDING(current, 0); - - regs = task_pt_regs(current); - - /* - * extract reason for being here and clear - */ - reason = ctx->ctx_fl_trap_reason; - ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE; - ovfl_regs = ctx->ctx_ovfl_regs[0]; - - DPRINT(("reason=%d state=%d\n", reason, ctx->ctx_state)); - - /* - * must be done before we check for simple-reset mode - */ - if (ctx->ctx_fl_going_zombie || ctx->ctx_state == PFM_CTX_ZOMBIE) - goto do_zombie; - - //if (CTX_OVFL_NOBLOCK(ctx)) goto skip_blocking; - if (reason == PFM_TRAP_REASON_RESET) - goto skip_blocking; - - /* - * restore interrupt mask to what it was on entry. - * Could be enabled/diasbled. - */ - UNPROTECT_CTX(ctx, flags); - - /* - * force interrupt enable because of down_interruptible() - */ - local_irq_enable(); - - DPRINT(("before block sleeping\n")); - - /* - * may go through without blocking on SMP systems - * if restart has been received already by the time we call down() - */ - ret = wait_for_completion_interruptible(&ctx->ctx_restart_done); - - DPRINT(("after block sleeping ret=%d\n", ret)); - - /* - * lock context and mask interrupts again - * We save flags into a dummy because we may have - * altered interrupts mask compared to entry in this - * function. - */ - PROTECT_CTX(ctx, dummy_flags); - - /* - * we need to read the ovfl_regs only after wake-up - * because we may have had pfm_write_pmds() in between - * and that can changed PMD values and therefore - * ovfl_regs is reset for these new PMD values. - */ - ovfl_regs = ctx->ctx_ovfl_regs[0]; - - if (ctx->ctx_fl_going_zombie) { -do_zombie: - DPRINT(("context is zombie, bailing out\n")); - pfm_context_force_terminate(ctx, regs); - goto nothing_to_do; - } - /* - * in case of interruption of down() we don't restart anything - */ - if (ret < 0) - goto nothing_to_do; - -skip_blocking: - pfm_resume_after_ovfl(ctx, ovfl_regs, regs); - ctx->ctx_ovfl_regs[0] = 0UL; - -nothing_to_do: - /* - * restore flags as they were upon entry - */ - UNPROTECT_CTX(ctx, flags); -} - -static int -pfm_notify_user(pfm_context_t *ctx, pfm_msg_t *msg) -{ - if (ctx->ctx_state == PFM_CTX_ZOMBIE) { - DPRINT(("ignoring overflow notification, owner is zombie\n")); - return 0; - } - - DPRINT(("waking up somebody\n")); - - if (msg) wake_up_interruptible(&ctx->ctx_msgq_wait); - - /* - * safe, we are not in intr handler, nor in ctxsw when - * we come here - */ - kill_fasync (&ctx->ctx_async_queue, SIGIO, POLL_IN); - - return 0; -} - -static int -pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds) -{ - pfm_msg_t *msg = NULL; - - if (ctx->ctx_fl_no_msg == 0) { - msg = pfm_get_new_msg(ctx); - if (msg == NULL) { - printk(KERN_ERR "perfmon: pfm_ovfl_notify_user no more notification msgs\n"); - return -1; - } - - msg->pfm_ovfl_msg.msg_type = PFM_MSG_OVFL; - msg->pfm_ovfl_msg.msg_ctx_fd = ctx->ctx_fd; - msg->pfm_ovfl_msg.msg_active_set = 0; - msg->pfm_ovfl_msg.msg_ovfl_pmds[0] = ovfl_pmds; - msg->pfm_ovfl_msg.msg_ovfl_pmds[1] = 0UL; - msg->pfm_ovfl_msg.msg_ovfl_pmds[2] = 0UL; - msg->pfm_ovfl_msg.msg_ovfl_pmds[3] = 0UL; - msg->pfm_ovfl_msg.msg_tstamp = 0UL; - } - - DPRINT(("ovfl msg: msg=%p no_msg=%d fd=%d ovfl_pmds=0x%lx\n", - msg, - ctx->ctx_fl_no_msg, - ctx->ctx_fd, - ovfl_pmds)); - - return pfm_notify_user(ctx, msg); -} - -static int -pfm_end_notify_user(pfm_context_t *ctx) -{ - pfm_msg_t *msg; - - msg = pfm_get_new_msg(ctx); - if (msg == NULL) { - printk(KERN_ERR "perfmon: pfm_end_notify_user no more notification msgs\n"); - return -1; - } - /* no leak */ - memset(msg, 0, sizeof(*msg)); - - msg->pfm_end_msg.msg_type = PFM_MSG_END; - msg->pfm_end_msg.msg_ctx_fd = ctx->ctx_fd; - msg->pfm_ovfl_msg.msg_tstamp = 0UL; - - DPRINT(("end msg: msg=%p no_msg=%d ctx_fd=%d\n", - msg, - ctx->ctx_fl_no_msg, - ctx->ctx_fd)); - - return pfm_notify_user(ctx, msg); -} - -/* - * main overflow processing routine. - * it can be called from the interrupt path or explicitly during the context switch code - */ -static void pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, - unsigned long pmc0, struct pt_regs *regs) -{ - pfm_ovfl_arg_t *ovfl_arg; - unsigned long mask; - unsigned long old_val, ovfl_val, new_val; - unsigned long ovfl_notify = 0UL, ovfl_pmds = 0UL, smpl_pmds = 0UL, reset_pmds; - unsigned long tstamp; - pfm_ovfl_ctrl_t ovfl_ctrl; - unsigned int i, has_smpl; - int must_notify = 0; - - if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) goto stop_monitoring; - - /* - * sanity test. Should never happen - */ - if (unlikely((pmc0 & 0x1) == 0)) goto sanity_check; - - tstamp = ia64_get_itc(); - mask = pmc0 >> PMU_FIRST_COUNTER; - ovfl_val = pmu_conf->ovfl_val; - has_smpl = CTX_HAS_SMPL(ctx); - - DPRINT_ovfl(("pmc0=0x%lx pid=%d iip=0x%lx, %s " - "used_pmds=0x%lx\n", - pmc0, - task ? task_pid_nr(task): -1, - (regs ? regs->cr_iip : 0), - CTX_OVFL_NOBLOCK(ctx) ? "nonblocking" : "blocking", - ctx->ctx_used_pmds[0])); - - - /* - * first we update the virtual counters - * assume there was a prior ia64_srlz_d() issued - */ - for (i = PMU_FIRST_COUNTER; mask ; i++, mask >>= 1) { - - /* skip pmd which did not overflow */ - if ((mask & 0x1) == 0) continue; - - /* - * Note that the pmd is not necessarily 0 at this point as qualified events - * may have happened before the PMU was frozen. The residual count is not - * taken into consideration here but will be with any read of the pmd via - * pfm_read_pmds(). - */ - old_val = new_val = ctx->ctx_pmds[i].val; - new_val += 1 + ovfl_val; - ctx->ctx_pmds[i].val = new_val; - - /* - * check for overflow condition - */ - if (likely(old_val > new_val)) { - ovfl_pmds |= 1UL << i; - if (PMC_OVFL_NOTIFY(ctx, i)) ovfl_notify |= 1UL << i; - } - - DPRINT_ovfl(("ctx_pmd[%d].val=0x%lx old_val=0x%lx pmd=0x%lx ovfl_pmds=0x%lx ovfl_notify=0x%lx\n", - i, - new_val, - old_val, - ia64_get_pmd(i) & ovfl_val, - ovfl_pmds, - ovfl_notify)); - } - - /* - * there was no 64-bit overflow, nothing else to do - */ - if (ovfl_pmds == 0UL) return; - - /* - * reset all control bits - */ - ovfl_ctrl.val = 0; - reset_pmds = 0UL; - - /* - * if a sampling format module exists, then we "cache" the overflow by - * calling the module's handler() routine. - */ - if (has_smpl) { - unsigned long start_cycles, end_cycles; - unsigned long pmd_mask; - int j, k, ret = 0; - int this_cpu = smp_processor_id(); - - pmd_mask = ovfl_pmds >> PMU_FIRST_COUNTER; - ovfl_arg = &ctx->ctx_ovfl_arg; - - prefetch(ctx->ctx_smpl_hdr); - - for(i=PMU_FIRST_COUNTER; pmd_mask && ret == 0; i++, pmd_mask >>=1) { - - mask = 1UL << i; - - if ((pmd_mask & 0x1) == 0) continue; - - ovfl_arg->ovfl_pmd = (unsigned char )i; - ovfl_arg->ovfl_notify = ovfl_notify & mask ? 1 : 0; - ovfl_arg->active_set = 0; - ovfl_arg->ovfl_ctrl.val = 0; /* module must fill in all fields */ - ovfl_arg->smpl_pmds[0] = smpl_pmds = ctx->ctx_pmds[i].smpl_pmds[0]; - - ovfl_arg->pmd_value = ctx->ctx_pmds[i].val; - ovfl_arg->pmd_last_reset = ctx->ctx_pmds[i].lval; - ovfl_arg->pmd_eventid = ctx->ctx_pmds[i].eventid; - - /* - * copy values of pmds of interest. Sampling format may copy them - * into sampling buffer. - */ - if (smpl_pmds) { - for(j=0, k=0; smpl_pmds; j++, smpl_pmds >>=1) { - if ((smpl_pmds & 0x1) == 0) continue; - ovfl_arg->smpl_pmds_values[k++] = PMD_IS_COUNTING(j) ? pfm_read_soft_counter(ctx, j) : ia64_get_pmd(j); - DPRINT_ovfl(("smpl_pmd[%d]=pmd%u=0x%lx\n", k-1, j, ovfl_arg->smpl_pmds_values[k-1])); - } - } - - pfm_stats[this_cpu].pfm_smpl_handler_calls++; - - start_cycles = ia64_get_itc(); - - /* - * call custom buffer format record (handler) routine - */ - ret = (*ctx->ctx_buf_fmt->fmt_handler)(task, ctx->ctx_smpl_hdr, ovfl_arg, regs, tstamp); - - end_cycles = ia64_get_itc(); - - /* - * For those controls, we take the union because they have - * an all or nothing behavior. - */ - ovfl_ctrl.bits.notify_user |= ovfl_arg->ovfl_ctrl.bits.notify_user; - ovfl_ctrl.bits.block_task |= ovfl_arg->ovfl_ctrl.bits.block_task; - ovfl_ctrl.bits.mask_monitoring |= ovfl_arg->ovfl_ctrl.bits.mask_monitoring; - /* - * build the bitmask of pmds to reset now - */ - if (ovfl_arg->ovfl_ctrl.bits.reset_ovfl_pmds) reset_pmds |= mask; - - pfm_stats[this_cpu].pfm_smpl_handler_cycles += end_cycles - start_cycles; - } - /* - * when the module cannot handle the rest of the overflows, we abort right here - */ - if (ret && pmd_mask) { - DPRINT(("handler aborts leftover ovfl_pmds=0x%lx\n", - pmd_mask<<PMU_FIRST_COUNTER)); - } - /* - * remove the pmds we reset now from the set of pmds to reset in pfm_restart() - */ - ovfl_pmds &= ~reset_pmds; - } else { - /* - * when no sampling module is used, then the default - * is to notify on overflow if requested by user - */ - ovfl_ctrl.bits.notify_user = ovfl_notify ? 1 : 0; - ovfl_ctrl.bits.block_task = ovfl_notify ? 1 : 0; - ovfl_ctrl.bits.mask_monitoring = ovfl_notify ? 1 : 0; /* XXX: change for saturation */ - ovfl_ctrl.bits.reset_ovfl_pmds = ovfl_notify ? 0 : 1; - /* - * if needed, we reset all overflowed pmds - */ - if (ovfl_notify == 0) reset_pmds = ovfl_pmds; - } - - DPRINT_ovfl(("ovfl_pmds=0x%lx reset_pmds=0x%lx\n", ovfl_pmds, reset_pmds)); - - /* - * reset the requested PMD registers using the short reset values - */ - if (reset_pmds) { - unsigned long bm = reset_pmds; - pfm_reset_regs(ctx, &bm, PFM_PMD_SHORT_RESET); - } - - if (ovfl_notify && ovfl_ctrl.bits.notify_user) { - /* - * keep track of what to reset when unblocking - */ - ctx->ctx_ovfl_regs[0] = ovfl_pmds; - - /* - * check for blocking context - */ - if (CTX_OVFL_NOBLOCK(ctx) == 0 && ovfl_ctrl.bits.block_task) { - - ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_BLOCK; - - /* - * set the perfmon specific checking pending work for the task - */ - PFM_SET_WORK_PENDING(task, 1); - - /* - * when coming from ctxsw, current still points to the - * previous task, therefore we must work with task and not current. - */ - set_notify_resume(task); - } - /* - * defer until state is changed (shorten spin window). the context is locked - * anyway, so the signal receiver would come spin for nothing. - */ - must_notify = 1; - } - - DPRINT_ovfl(("owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx masked=%d\n", - GET_PMU_OWNER() ? task_pid_nr(GET_PMU_OWNER()) : -1, - PFM_GET_WORK_PENDING(task), - ctx->ctx_fl_trap_reason, - ovfl_pmds, - ovfl_notify, - ovfl_ctrl.bits.mask_monitoring ? 1 : 0)); - /* - * in case monitoring must be stopped, we toggle the psr bits - */ - if (ovfl_ctrl.bits.mask_monitoring) { - pfm_mask_monitoring(task); - ctx->ctx_state = PFM_CTX_MASKED; - ctx->ctx_fl_can_restart = 1; - } - - /* - * send notification now - */ - if (must_notify) pfm_ovfl_notify_user(ctx, ovfl_notify); - - return; - -sanity_check: - printk(KERN_ERR "perfmon: CPU%d overflow handler [%d] pmc0=0x%lx\n", - smp_processor_id(), - task ? task_pid_nr(task) : -1, - pmc0); - return; - -stop_monitoring: - /* - * in SMP, zombie context is never restored but reclaimed in pfm_load_regs(). - * Moreover, zombies are also reclaimed in pfm_save_regs(). Therefore we can - * come here as zombie only if the task is the current task. In which case, we - * can access the PMU hardware directly. - * - * Note that zombies do have PM_VALID set. So here we do the minimal. - * - * In case the context was zombified it could not be reclaimed at the time - * the monitoring program exited. At this point, the PMU reservation has been - * returned, the sampiing buffer has been freed. We must convert this call - * into a spurious interrupt. However, we must also avoid infinite overflows - * by stopping monitoring for this task. We can only come here for a per-task - * context. All we need to do is to stop monitoring using the psr bits which - * are always task private. By re-enabling secure montioring, we ensure that - * the monitored task will not be able to re-activate monitoring. - * The task will eventually be context switched out, at which point the context - * will be reclaimed (that includes releasing ownership of the PMU). - * - * So there might be a window of time where the number of per-task session is zero - * yet one PMU might have a owner and get at most one overflow interrupt for a zombie - * context. This is safe because if a per-task session comes in, it will push this one - * out and by the virtue on pfm_save_regs(), this one will disappear. If a system wide - * session is force on that CPU, given that we use task pinning, pfm_save_regs() will - * also push our zombie context out. - * - * Overall pretty hairy stuff.... - */ - DPRINT(("ctx is zombie for [%d], converted to spurious\n", task ? task_pid_nr(task): -1)); - pfm_clear_psr_up(); - ia64_psr(regs)->up = 0; - ia64_psr(regs)->sp = 1; - return; -} - -static int -pfm_do_interrupt_handler(void *arg, struct pt_regs *regs) -{ - struct task_struct *task; - pfm_context_t *ctx; - unsigned long flags; - u64 pmc0; - int this_cpu = smp_processor_id(); - int retval = 0; - - pfm_stats[this_cpu].pfm_ovfl_intr_count++; - - /* - * srlz.d done before arriving here - */ - pmc0 = ia64_get_pmc(0); - - task = GET_PMU_OWNER(); - ctx = GET_PMU_CTX(); - - /* - * if we have some pending bits set - * assumes : if any PMC0.bit[63-1] is set, then PMC0.fr = 1 - */ - if (PMC0_HAS_OVFL(pmc0) && task) { - /* - * we assume that pmc0.fr is always set here - */ - - /* sanity check */ - if (!ctx) goto report_spurious1; - - if (ctx->ctx_fl_system == 0 && (task->thread.flags & IA64_THREAD_PM_VALID) == 0) - goto report_spurious2; - - PROTECT_CTX_NOPRINT(ctx, flags); - - pfm_overflow_handler(task, ctx, pmc0, regs); - - UNPROTECT_CTX_NOPRINT(ctx, flags); - - } else { - pfm_stats[this_cpu].pfm_spurious_ovfl_intr_count++; - retval = -1; - } - /* - * keep it unfrozen at all times - */ - pfm_unfreeze_pmu(); - - return retval; - -report_spurious1: - printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d has no PFM context\n", - this_cpu, task_pid_nr(task)); - pfm_unfreeze_pmu(); - return -1; -report_spurious2: - printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d, invalid flag\n", - this_cpu, - task_pid_nr(task)); - pfm_unfreeze_pmu(); - return -1; -} - -static irqreturn_t -pfm_interrupt_handler(int irq, void *arg) -{ - unsigned long start_cycles, total_cycles; - unsigned long min, max; - int this_cpu; - int ret; - struct pt_regs *regs = get_irq_regs(); - - this_cpu = get_cpu(); - if (likely(!pfm_alt_intr_handler)) { - min = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min; - max = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max; - - start_cycles = ia64_get_itc(); - - ret = pfm_do_interrupt_handler(arg, regs); - - total_cycles = ia64_get_itc(); - - /* - * don't measure spurious interrupts - */ - if (likely(ret == 0)) { - total_cycles -= start_cycles; - - if (total_cycles < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = total_cycles; - if (total_cycles > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = total_cycles; - - pfm_stats[this_cpu].pfm_ovfl_intr_cycles += total_cycles; - } - } - else { - (*pfm_alt_intr_handler->handler)(irq, arg, regs); - } - - put_cpu(); - return IRQ_HANDLED; -} - -/* - * /proc/perfmon interface, for debug only - */ - -#define PFM_PROC_SHOW_HEADER ((void *)(long)nr_cpu_ids+1) - -static void * -pfm_proc_start(struct seq_file *m, loff_t *pos) -{ - if (*pos == 0) { - return PFM_PROC_SHOW_HEADER; - } - - while (*pos <= nr_cpu_ids) { - if (cpu_online(*pos - 1)) { - return (void *)*pos; - } - ++*pos; - } - return NULL; -} - -static void * -pfm_proc_next(struct seq_file *m, void *v, loff_t *pos) -{ - ++*pos; - return pfm_proc_start(m, pos); -} - -static void -pfm_proc_stop(struct seq_file *m, void *v) -{ -} - -static void -pfm_proc_show_header(struct seq_file *m) -{ - struct list_head * pos; - pfm_buffer_fmt_t * entry; - unsigned long flags; - - seq_printf(m, - "perfmon version : %u.%u\n" - "model : %s\n" - "fastctxsw : %s\n" - "expert mode : %s\n" - "ovfl_mask : 0x%lx\n" - "PMU flags : 0x%x\n", - PFM_VERSION_MAJ, PFM_VERSION_MIN, - pmu_conf->pmu_name, - pfm_sysctl.fastctxsw > 0 ? "Yes": "No", - pfm_sysctl.expert_mode > 0 ? "Yes": "No", - pmu_conf->ovfl_val, - pmu_conf->flags); - - LOCK_PFS(flags); - - seq_printf(m, - "proc_sessions : %u\n" - "sys_sessions : %u\n" - "sys_use_dbregs : %u\n" - "ptrace_use_dbregs : %u\n", - pfm_sessions.pfs_task_sessions, - pfm_sessions.pfs_sys_sessions, - pfm_sessions.pfs_sys_use_dbregs, - pfm_sessions.pfs_ptrace_use_dbregs); - - UNLOCK_PFS(flags); - - spin_lock(&pfm_buffer_fmt_lock); - - list_for_each(pos, &pfm_buffer_fmt_list) { - entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list); - seq_printf(m, "format : %16phD %s\n", - entry->fmt_uuid, entry->fmt_name); - } - spin_unlock(&pfm_buffer_fmt_lock); - -} - -static int -pfm_proc_show(struct seq_file *m, void *v) -{ - unsigned long psr; - unsigned int i; - int cpu; - - if (v == PFM_PROC_SHOW_HEADER) { - pfm_proc_show_header(m); - return 0; - } - - /* show info for CPU (v - 1) */ - - cpu = (long)v - 1; - seq_printf(m, - "CPU%-2d overflow intrs : %lu\n" - "CPU%-2d overflow cycles : %lu\n" - "CPU%-2d overflow min : %lu\n" - "CPU%-2d overflow max : %lu\n" - "CPU%-2d smpl handler calls : %lu\n" - "CPU%-2d smpl handler cycles : %lu\n" - "CPU%-2d spurious intrs : %lu\n" - "CPU%-2d replay intrs : %lu\n" - "CPU%-2d syst_wide : %d\n" - "CPU%-2d dcr_pp : %d\n" - "CPU%-2d exclude idle : %d\n" - "CPU%-2d owner : %d\n" - "CPU%-2d context : %p\n" - "CPU%-2d activations : %lu\n", - cpu, pfm_stats[cpu].pfm_ovfl_intr_count, - cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles, - cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_min, - cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_max, - cpu, pfm_stats[cpu].pfm_smpl_handler_calls, - cpu, pfm_stats[cpu].pfm_smpl_handler_cycles, - cpu, pfm_stats[cpu].pfm_spurious_ovfl_intr_count, - cpu, pfm_stats[cpu].pfm_replay_ovfl_intr_count, - cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_SYST_WIDE ? 1 : 0, - cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_DCR_PP ? 1 : 0, - cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_EXCL_IDLE ? 1 : 0, - cpu, pfm_get_cpu_data(pmu_owner, cpu) ? pfm_get_cpu_data(pmu_owner, cpu)->pid: -1, - cpu, pfm_get_cpu_data(pmu_ctx, cpu), - cpu, pfm_get_cpu_data(pmu_activation_number, cpu)); - - if (num_online_cpus() == 1 && pfm_sysctl.debug > 0) { - - psr = pfm_get_psr(); - - ia64_srlz_d(); - - seq_printf(m, - "CPU%-2d psr : 0x%lx\n" - "CPU%-2d pmc0 : 0x%lx\n", - cpu, psr, - cpu, ia64_get_pmc(0)); - - for (i=0; PMC_IS_LAST(i) == 0; i++) { - if (PMC_IS_COUNTING(i) == 0) continue; - seq_printf(m, - "CPU%-2d pmc%u : 0x%lx\n" - "CPU%-2d pmd%u : 0x%lx\n", - cpu, i, ia64_get_pmc(i), - cpu, i, ia64_get_pmd(i)); - } - } - return 0; -} - -const struct seq_operations pfm_seq_ops = { - .start = pfm_proc_start, - .next = pfm_proc_next, - .stop = pfm_proc_stop, - .show = pfm_proc_show -}; - -/* - * we come here as soon as local_cpu_data->pfm_syst_wide is set. this happens - * during pfm_enable() hence before pfm_start(). We cannot assume monitoring - * is active or inactive based on mode. We must rely on the value in - * local_cpu_data->pfm_syst_info - */ -void -pfm_syst_wide_update_task(struct task_struct *task, unsigned long info, int is_ctxswin) -{ - struct pt_regs *regs; - unsigned long dcr; - unsigned long dcr_pp; - - dcr_pp = info & PFM_CPUINFO_DCR_PP ? 1 : 0; - - /* - * pid 0 is guaranteed to be the idle task. There is one such task with pid 0 - * on every CPU, so we can rely on the pid to identify the idle task. - */ - if ((info & PFM_CPUINFO_EXCL_IDLE) == 0 || task->pid) { - regs = task_pt_regs(task); - ia64_psr(regs)->pp = is_ctxswin ? dcr_pp : 0; - return; - } - /* - * if monitoring has started - */ - if (dcr_pp) { - dcr = ia64_getreg(_IA64_REG_CR_DCR); - /* - * context switching in? - */ - if (is_ctxswin) { - /* mask monitoring for the idle task */ - ia64_setreg(_IA64_REG_CR_DCR, dcr & ~IA64_DCR_PP); - pfm_clear_psr_pp(); - ia64_srlz_i(); - return; - } - /* - * context switching out - * restore monitoring for next task - * - * Due to inlining this odd if-then-else construction generates - * better code. - */ - ia64_setreg(_IA64_REG_CR_DCR, dcr |IA64_DCR_PP); - pfm_set_psr_pp(); - ia64_srlz_i(); - } -} - -#ifdef CONFIG_SMP - -static void -pfm_force_cleanup(pfm_context_t *ctx, struct pt_regs *regs) -{ - struct task_struct *task = ctx->ctx_task; - - ia64_psr(regs)->up = 0; - ia64_psr(regs)->sp = 1; - - if (GET_PMU_OWNER() == task) { - DPRINT(("cleared ownership for [%d]\n", - task_pid_nr(ctx->ctx_task))); - SET_PMU_OWNER(NULL, NULL); - } - - /* - * disconnect the task from the context and vice-versa - */ - PFM_SET_WORK_PENDING(task, 0); - - task->thread.pfm_context = NULL; - task->thread.flags &= ~IA64_THREAD_PM_VALID; - - DPRINT(("force cleanup for [%d]\n", task_pid_nr(task))); -} - - -/* - * in 2.6, interrupts are masked when we come here and the runqueue lock is held - */ -void -pfm_save_regs(struct task_struct *task) -{ - pfm_context_t *ctx; - unsigned long flags; - u64 psr; - - - ctx = PFM_GET_CTX(task); - if (ctx == NULL) return; - - /* - * we always come here with interrupts ALREADY disabled by - * the scheduler. So we simply need to protect against concurrent - * access, not CPU concurrency. - */ - flags = pfm_protect_ctx_ctxsw(ctx); - - if (ctx->ctx_state == PFM_CTX_ZOMBIE) { - struct pt_regs *regs = task_pt_regs(task); - - pfm_clear_psr_up(); - - pfm_force_cleanup(ctx, regs); - - BUG_ON(ctx->ctx_smpl_hdr); - - pfm_unprotect_ctx_ctxsw(ctx, flags); - - pfm_context_free(ctx); - return; - } - - /* - * save current PSR: needed because we modify it - */ - ia64_srlz_d(); - psr = pfm_get_psr(); - - BUG_ON(psr & (IA64_PSR_I)); - - /* - * stop monitoring: - * This is the last instruction which may generate an overflow - * - * We do not need to set psr.sp because, it is irrelevant in kernel. - * It will be restored from ipsr when going back to user level - */ - pfm_clear_psr_up(); - - /* - * keep a copy of psr.up (for reload) - */ - ctx->ctx_saved_psr_up = psr & IA64_PSR_UP; - - /* - * release ownership of this PMU. - * PM interrupts are masked, so nothing - * can happen. - */ - SET_PMU_OWNER(NULL, NULL); - - /* - * we systematically save the PMD as we have no - * guarantee we will be schedule at that same - * CPU again. - */ - pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]); - - /* - * save pmc0 ia64_srlz_d() done in pfm_save_pmds() - * we will need it on the restore path to check - * for pending overflow. - */ - ctx->th_pmcs[0] = ia64_get_pmc(0); - - /* - * unfreeze PMU if had pending overflows - */ - if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu(); - - /* - * finally, allow context access. - * interrupts will still be masked after this call. - */ - pfm_unprotect_ctx_ctxsw(ctx, flags); -} - -#else /* !CONFIG_SMP */ -void -pfm_save_regs(struct task_struct *task) -{ - pfm_context_t *ctx; - u64 psr; - - ctx = PFM_GET_CTX(task); - if (ctx == NULL) return; - - /* - * save current PSR: needed because we modify it - */ - psr = pfm_get_psr(); - - BUG_ON(psr & (IA64_PSR_I)); - - /* - * stop monitoring: - * This is the last instruction which may generate an overflow - * - * We do not need to set psr.sp because, it is irrelevant in kernel. - * It will be restored from ipsr when going back to user level - */ - pfm_clear_psr_up(); - - /* - * keep a copy of psr.up (for reload) - */ - ctx->ctx_saved_psr_up = psr & IA64_PSR_UP; -} - -static void -pfm_lazy_save_regs (struct task_struct *task) -{ - pfm_context_t *ctx; - unsigned long flags; - - { u64 psr = pfm_get_psr(); - BUG_ON(psr & IA64_PSR_UP); - } - - ctx = PFM_GET_CTX(task); - - /* - * we need to mask PMU overflow here to - * make sure that we maintain pmc0 until - * we save it. overflow interrupts are - * treated as spurious if there is no - * owner. - * - * XXX: I don't think this is necessary - */ - PROTECT_CTX(ctx,flags); - - /* - * release ownership of this PMU. - * must be done before we save the registers. - * - * after this call any PMU interrupt is treated - * as spurious. - */ - SET_PMU_OWNER(NULL, NULL); - - /* - * save all the pmds we use - */ - pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]); - - /* - * save pmc0 ia64_srlz_d() done in pfm_save_pmds() - * it is needed to check for pended overflow - * on the restore path - */ - ctx->th_pmcs[0] = ia64_get_pmc(0); - - /* - * unfreeze PMU if had pending overflows - */ - if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu(); - - /* - * now get can unmask PMU interrupts, they will - * be treated as purely spurious and we will not - * lose any information - */ - UNPROTECT_CTX(ctx,flags); -} -#endif /* CONFIG_SMP */ - -#ifdef CONFIG_SMP -/* - * in 2.6, interrupts are masked when we come here and the runqueue lock is held - */ -void -pfm_load_regs (struct task_struct *task) -{ - pfm_context_t *ctx; - unsigned long pmc_mask = 0UL, pmd_mask = 0UL; - unsigned long flags; - u64 psr, psr_up; - int need_irq_resend; - - ctx = PFM_GET_CTX(task); - if (unlikely(ctx == NULL)) return; - - BUG_ON(GET_PMU_OWNER()); - - /* - * possible on unload - */ - if (unlikely((task->thread.flags & IA64_THREAD_PM_VALID) == 0)) return; - - /* - * we always come here with interrupts ALREADY disabled by - * the scheduler. So we simply need to protect against concurrent - * access, not CPU concurrency. - */ - flags = pfm_protect_ctx_ctxsw(ctx); - psr = pfm_get_psr(); - - need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND; - - BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP)); - BUG_ON(psr & IA64_PSR_I); - - if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) { - struct pt_regs *regs = task_pt_regs(task); - - BUG_ON(ctx->ctx_smpl_hdr); - - pfm_force_cleanup(ctx, regs); - - pfm_unprotect_ctx_ctxsw(ctx, flags); - - /* - * this one (kmalloc'ed) is fine with interrupts disabled - */ - pfm_context_free(ctx); - - return; - } - - /* - * we restore ALL the debug registers to avoid picking up - * stale state. - */ - if (ctx->ctx_fl_using_dbreg) { - pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs); - pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs); - } - /* - * retrieve saved psr.up - */ - psr_up = ctx->ctx_saved_psr_up; - - /* - * if we were the last user of the PMU on that CPU, - * then nothing to do except restore psr - */ - if (GET_LAST_CPU(ctx) == smp_processor_id() && ctx->ctx_last_activation == GET_ACTIVATION()) { - - /* - * retrieve partial reload masks (due to user modifications) - */ - pmc_mask = ctx->ctx_reload_pmcs[0]; - pmd_mask = ctx->ctx_reload_pmds[0]; - - } else { - /* - * To avoid leaking information to the user level when psr.sp=0, - * we must reload ALL implemented pmds (even the ones we don't use). - * In the kernel we only allow PFM_READ_PMDS on registers which - * we initialized or requested (sampling) so there is no risk there. - */ - pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0]; - - /* - * ALL accessible PMCs are systematically reloaded, unused registers - * get their default (from pfm_reset_pmu_state()) values to avoid picking - * up stale configuration. - * - * PMC0 is never in the mask. It is always restored separately. - */ - pmc_mask = ctx->ctx_all_pmcs[0]; - } - /* - * when context is MASKED, we will restore PMC with plm=0 - * and PMD with stale information, but that's ok, nothing - * will be captured. - * - * XXX: optimize here - */ - if (pmd_mask) pfm_restore_pmds(ctx->th_pmds, pmd_mask); - if (pmc_mask) pfm_restore_pmcs(ctx->th_pmcs, pmc_mask); - - /* - * check for pending overflow at the time the state - * was saved. - */ - if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) { - /* - * reload pmc0 with the overflow information - * On McKinley PMU, this will trigger a PMU interrupt - */ - ia64_set_pmc(0, ctx->th_pmcs[0]); - ia64_srlz_d(); - ctx->th_pmcs[0] = 0UL; - - /* - * will replay the PMU interrupt - */ - if (need_irq_resend) ia64_resend_irq(IA64_PERFMON_VECTOR); - - pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++; - } - - /* - * we just did a reload, so we reset the partial reload fields - */ - ctx->ctx_reload_pmcs[0] = 0UL; - ctx->ctx_reload_pmds[0] = 0UL; - - SET_LAST_CPU(ctx, smp_processor_id()); - - /* - * dump activation value for this PMU - */ - INC_ACTIVATION(); - /* - * record current activation for this context - */ - SET_ACTIVATION(ctx); - - /* - * establish new ownership. - */ - SET_PMU_OWNER(task, ctx); - - /* - * restore the psr.up bit. measurement - * is active again. - * no PMU interrupt can happen at this point - * because we still have interrupts disabled. - */ - if (likely(psr_up)) pfm_set_psr_up(); - - /* - * allow concurrent access to context - */ - pfm_unprotect_ctx_ctxsw(ctx, flags); -} -#else /* !CONFIG_SMP */ -/* - * reload PMU state for UP kernels - * in 2.5 we come here with interrupts disabled - */ -void -pfm_load_regs (struct task_struct *task) -{ - pfm_context_t *ctx; - struct task_struct *owner; - unsigned long pmd_mask, pmc_mask; - u64 psr, psr_up; - int need_irq_resend; - - owner = GET_PMU_OWNER(); - ctx = PFM_GET_CTX(task); - psr = pfm_get_psr(); - - BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP)); - BUG_ON(psr & IA64_PSR_I); - - /* - * we restore ALL the debug registers to avoid picking up - * stale state. - * - * This must be done even when the task is still the owner - * as the registers may have been modified via ptrace() - * (not perfmon) by the previous task. - */ - if (ctx->ctx_fl_using_dbreg) { - pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs); - pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs); - } - - /* - * retrieved saved psr.up - */ - psr_up = ctx->ctx_saved_psr_up; - need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND; - - /* - * short path, our state is still there, just - * need to restore psr and we go - * - * we do not touch either PMC nor PMD. the psr is not touched - * by the overflow_handler. So we are safe w.r.t. to interrupt - * concurrency even without interrupt masking. - */ - if (likely(owner == task)) { - if (likely(psr_up)) pfm_set_psr_up(); - return; - } - - /* - * someone else is still using the PMU, first push it out and - * then we'll be able to install our stuff ! - * - * Upon return, there will be no owner for the current PMU - */ - if (owner) pfm_lazy_save_regs(owner); - - /* - * To avoid leaking information to the user level when psr.sp=0, - * we must reload ALL implemented pmds (even the ones we don't use). - * In the kernel we only allow PFM_READ_PMDS on registers which - * we initialized or requested (sampling) so there is no risk there. - */ - pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0]; - - /* - * ALL accessible PMCs are systematically reloaded, unused registers - * get their default (from pfm_reset_pmu_state()) values to avoid picking - * up stale configuration. - * - * PMC0 is never in the mask. It is always restored separately - */ - pmc_mask = ctx->ctx_all_pmcs[0]; - - pfm_restore_pmds(ctx->th_pmds, pmd_mask); - pfm_restore_pmcs(ctx->th_pmcs, pmc_mask); - - /* - * check for pending overflow at the time the state - * was saved. - */ - if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) { - /* - * reload pmc0 with the overflow information - * On McKinley PMU, this will trigger a PMU interrupt - */ - ia64_set_pmc(0, ctx->th_pmcs[0]); - ia64_srlz_d(); - - ctx->th_pmcs[0] = 0UL; - - /* - * will replay the PMU interrupt - */ - if (need_irq_resend) ia64_resend_irq(IA64_PERFMON_VECTOR); - - pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++; - } - - /* - * establish new ownership. - */ - SET_PMU_OWNER(task, ctx); - - /* - * restore the psr.up bit. measurement - * is active again. - * no PMU interrupt can happen at this point - * because we still have interrupts disabled. - */ - if (likely(psr_up)) pfm_set_psr_up(); -} -#endif /* CONFIG_SMP */ - -/* - * this function assumes monitoring is stopped - */ -static void -pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx) -{ - u64 pmc0; - unsigned long mask2, val, pmd_val, ovfl_val; - int i, can_access_pmu = 0; - int is_self; - - /* - * is the caller the task being monitored (or which initiated the - * session for system wide measurements) - */ - is_self = ctx->ctx_task == task ? 1 : 0; - - /* - * can access PMU is task is the owner of the PMU state on the current CPU - * or if we are running on the CPU bound to the context in system-wide mode - * (that is not necessarily the task the context is attached to in this mode). - * In system-wide we always have can_access_pmu true because a task running on an - * invalid processor is flagged earlier in the call stack (see pfm_stop). - */ - can_access_pmu = (GET_PMU_OWNER() == task) || (ctx->ctx_fl_system && ctx->ctx_cpu == smp_processor_id()); - if (can_access_pmu) { - /* - * Mark the PMU as not owned - * This will cause the interrupt handler to do nothing in case an overflow - * interrupt was in-flight - * This also guarantees that pmc0 will contain the final state - * It virtually gives us full control on overflow processing from that point - * on. - */ - SET_PMU_OWNER(NULL, NULL); - DPRINT(("releasing ownership\n")); - - /* - * read current overflow status: - * - * we are guaranteed to read the final stable state - */ - ia64_srlz_d(); - pmc0 = ia64_get_pmc(0); /* slow */ - - /* - * reset freeze bit, overflow status information destroyed - */ - pfm_unfreeze_pmu(); - } else { - pmc0 = ctx->th_pmcs[0]; - /* - * clear whatever overflow status bits there were - */ - ctx->th_pmcs[0] = 0; - } - ovfl_val = pmu_conf->ovfl_val; - /* - * we save all the used pmds - * we take care of overflows for counting PMDs - * - * XXX: sampling situation is not taken into account here - */ - mask2 = ctx->ctx_used_pmds[0]; - - DPRINT(("is_self=%d ovfl_val=0x%lx mask2=0x%lx\n", is_self, ovfl_val, mask2)); - - for (i = 0; mask2; i++, mask2>>=1) { - - /* skip non used pmds */ - if ((mask2 & 0x1) == 0) continue; - - /* - * can access PMU always true in system wide mode - */ - val = pmd_val = can_access_pmu ? ia64_get_pmd(i) : ctx->th_pmds[i]; - - if (PMD_IS_COUNTING(i)) { - DPRINT(("[%d] pmd[%d] ctx_pmd=0x%lx hw_pmd=0x%lx\n", - task_pid_nr(task), - i, - ctx->ctx_pmds[i].val, - val & ovfl_val)); - - /* - * we rebuild the full 64 bit value of the counter - */ - val = ctx->ctx_pmds[i].val + (val & ovfl_val); - - /* - * now everything is in ctx_pmds[] and we need - * to clear the saved context from save_regs() such that - * pfm_read_pmds() gets the correct value - */ - pmd_val = 0UL; - - /* - * take care of overflow inline - */ - if (pmc0 & (1UL << i)) { - val += 1 + ovfl_val; - DPRINT(("[%d] pmd[%d] overflowed\n", task_pid_nr(task), i)); - } - } - - DPRINT(("[%d] ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task_pid_nr(task), i, val, pmd_val)); - - if (is_self) ctx->th_pmds[i] = pmd_val; - - ctx->ctx_pmds[i].val = val; - } -} - -static void -pfm_alt_save_pmu_state(void *data) -{ - struct pt_regs *regs; - - regs = task_pt_regs(current); - - DPRINT(("called\n")); - - /* - * should not be necessary but - * let's take not risk - */ - pfm_clear_psr_up(); - pfm_clear_psr_pp(); - ia64_psr(regs)->pp = 0; - - /* - * This call is required - * May cause a spurious interrupt on some processors - */ - pfm_freeze_pmu(); - - ia64_srlz_d(); -} - -void -pfm_alt_restore_pmu_state(void *data) -{ - struct pt_regs *regs; - - regs = task_pt_regs(current); - - DPRINT(("called\n")); - - /* - * put PMU back in state expected - * by perfmon - */ - pfm_clear_psr_up(); - pfm_clear_psr_pp(); - ia64_psr(regs)->pp = 0; - - /* - * perfmon runs with PMU unfrozen at all times - */ - pfm_unfreeze_pmu(); - - ia64_srlz_d(); -} - -int -pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl) -{ - int ret, i; - int reserve_cpu; - - /* some sanity checks */ - if (hdl == NULL || hdl->handler == NULL) return -EINVAL; - - /* do the easy test first */ - if (pfm_alt_intr_handler) return -EBUSY; - - /* one at a time in the install or remove, just fail the others */ - if (!spin_trylock(&pfm_alt_install_check)) { - return -EBUSY; - } - - /* reserve our session */ - for_each_online_cpu(reserve_cpu) { - ret = pfm_reserve_session(NULL, 1, reserve_cpu); - if (ret) goto cleanup_reserve; - } - - /* save the current system wide pmu states */ - on_each_cpu(pfm_alt_save_pmu_state, NULL, 1); - - /* officially change to the alternate interrupt handler */ - pfm_alt_intr_handler = hdl; - - spin_unlock(&pfm_alt_install_check); - - return 0; - -cleanup_reserve: - for_each_online_cpu(i) { - /* don't unreserve more than we reserved */ - if (i >= reserve_cpu) break; - - pfm_unreserve_session(NULL, 1, i); - } - - spin_unlock(&pfm_alt_install_check); - - return ret; -} -EXPORT_SYMBOL_GPL(pfm_install_alt_pmu_interrupt); - -int -pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl) -{ - int i; - - if (hdl == NULL) return -EINVAL; - - /* cannot remove someone else's handler! */ - if (pfm_alt_intr_handler != hdl) return -EINVAL; - - /* one at a time in the install or remove, just fail the others */ - if (!spin_trylock(&pfm_alt_install_check)) { - return -EBUSY; - } - - pfm_alt_intr_handler = NULL; - - on_each_cpu(pfm_alt_restore_pmu_state, NULL, 1); - - for_each_online_cpu(i) { - pfm_unreserve_session(NULL, 1, i); - } - - spin_unlock(&pfm_alt_install_check); - - return 0; -} -EXPORT_SYMBOL_GPL(pfm_remove_alt_pmu_interrupt); - -/* - * perfmon initialization routine, called from the initcall() table - */ -static int init_pfm_fs(void); - -static int __init -pfm_probe_pmu(void) -{ - pmu_config_t **p; - int family; - - family = local_cpu_data->family; - p = pmu_confs; - - while(*p) { - if ((*p)->probe) { - if ((*p)->probe() == 0) goto found; - } else if ((*p)->pmu_family == family || (*p)->pmu_family == 0xff) { - goto found; - } - p++; - } - return -1; -found: - pmu_conf = *p; - return 0; -} - -int __init -pfm_init(void) -{ - unsigned int n, n_counters, i; - - printk("perfmon: version %u.%u IRQ %u\n", - PFM_VERSION_MAJ, - PFM_VERSION_MIN, - IA64_PERFMON_VECTOR); - - if (pfm_probe_pmu()) { - printk(KERN_INFO "perfmon: disabled, there is no support for processor family %d\n", - local_cpu_data->family); - return -ENODEV; - } - - /* - * compute the number of implemented PMD/PMC from the - * description tables - */ - n = 0; - for (i=0; PMC_IS_LAST(i) == 0; i++) { - if (PMC_IS_IMPL(i) == 0) continue; - pmu_conf->impl_pmcs[i>>6] |= 1UL << (i&63); - n++; - } - pmu_conf->num_pmcs = n; - - n = 0; n_counters = 0; - for (i=0; PMD_IS_LAST(i) == 0; i++) { - if (PMD_IS_IMPL(i) == 0) continue; - pmu_conf->impl_pmds[i>>6] |= 1UL << (i&63); - n++; - if (PMD_IS_COUNTING(i)) n_counters++; - } - pmu_conf->num_pmds = n; - pmu_conf->num_counters = n_counters; - - /* - * sanity checks on the number of debug registers - */ - if (pmu_conf->use_rr_dbregs) { - if (pmu_conf->num_ibrs > IA64_NUM_DBG_REGS) { - printk(KERN_INFO "perfmon: unsupported number of code debug registers (%u)\n", pmu_conf->num_ibrs); - pmu_conf = NULL; - return -1; - } - if (pmu_conf->num_dbrs > IA64_NUM_DBG_REGS) { - printk(KERN_INFO "perfmon: unsupported number of data debug registers (%u)\n", pmu_conf->num_ibrs); - pmu_conf = NULL; - return -1; - } - } - - printk("perfmon: %s PMU detected, %u PMCs, %u PMDs, %u counters (%lu bits)\n", - pmu_conf->pmu_name, - pmu_conf->num_pmcs, - pmu_conf->num_pmds, - pmu_conf->num_counters, - ffz(pmu_conf->ovfl_val)); - - /* sanity check */ - if (pmu_conf->num_pmds >= PFM_NUM_PMD_REGS || pmu_conf->num_pmcs >= PFM_NUM_PMC_REGS) { - printk(KERN_ERR "perfmon: not enough pmc/pmd, perfmon disabled\n"); - pmu_conf = NULL; - return -1; - } - - /* - * create /proc/perfmon (mostly for debugging purposes) - */ - perfmon_dir = proc_create_seq("perfmon", S_IRUGO, NULL, &pfm_seq_ops); - if (perfmon_dir == NULL) { - printk(KERN_ERR "perfmon: cannot create /proc entry, perfmon disabled\n"); - pmu_conf = NULL; - return -1; - } - - /* - * create /proc/sys/kernel/perfmon (for debugging purposes) - */ - pfm_sysctl_header = register_sysctl_table(pfm_sysctl_root); - - /* - * initialize all our spinlocks - */ - spin_lock_init(&pfm_sessions.pfs_lock); - spin_lock_init(&pfm_buffer_fmt_lock); - - init_pfm_fs(); - - for(i=0; i < NR_CPUS; i++) pfm_stats[i].pfm_ovfl_intr_cycles_min = ~0UL; - - return 0; -} - -__initcall(pfm_init); - -/* - * this function is called before pfm_init() - */ -void -pfm_init_percpu (void) -{ - static int first_time=1; - /* - * make sure no measurement is active - * (may inherit programmed PMCs from EFI). - */ - pfm_clear_psr_pp(); - pfm_clear_psr_up(); - - /* - * we run with the PMU not frozen at all times - */ - pfm_unfreeze_pmu(); - - if (first_time) { - register_percpu_irq(IA64_PERFMON_VECTOR, pfm_interrupt_handler, - 0, "perfmon"); - first_time=0; - } - - ia64_setreg(_IA64_REG_CR_PMV, IA64_PERFMON_VECTOR); - ia64_srlz_d(); -} - -/* - * used for debug purposes only - */ -void -dump_pmu_state(const char *from) -{ - struct task_struct *task; - struct pt_regs *regs; - pfm_context_t *ctx; - unsigned long psr, dcr, info, flags; - int i, this_cpu; - - local_irq_save(flags); - - this_cpu = smp_processor_id(); - regs = task_pt_regs(current); - info = PFM_CPUINFO_GET(); - dcr = ia64_getreg(_IA64_REG_CR_DCR); - - if (info == 0 && ia64_psr(regs)->pp == 0 && (dcr & IA64_DCR_PP) == 0) { - local_irq_restore(flags); - return; - } - - printk("CPU%d from %s() current [%d] iip=0x%lx %s\n", - this_cpu, - from, - task_pid_nr(current), - regs->cr_iip, - current->comm); - - task = GET_PMU_OWNER(); - ctx = GET_PMU_CTX(); - - printk("->CPU%d owner [%d] ctx=%p\n", this_cpu, task ? task_pid_nr(task) : -1, ctx); - - psr = pfm_get_psr(); - - printk("->CPU%d pmc0=0x%lx psr.pp=%d psr.up=%d dcr.pp=%d syst_info=0x%lx user_psr.up=%d user_psr.pp=%d\n", - this_cpu, - ia64_get_pmc(0), - psr & IA64_PSR_PP ? 1 : 0, - psr & IA64_PSR_UP ? 1 : 0, - dcr & IA64_DCR_PP ? 1 : 0, - info, - ia64_psr(regs)->up, - ia64_psr(regs)->pp); - - ia64_psr(regs)->up = 0; - ia64_psr(regs)->pp = 0; - - for (i=1; PMC_IS_LAST(i) == 0; i++) { - if (PMC_IS_IMPL(i) == 0) continue; - printk("->CPU%d pmc[%d]=0x%lx thread_pmc[%d]=0x%lx\n", this_cpu, i, ia64_get_pmc(i), i, ctx->th_pmcs[i]); - } - - for (i=1; PMD_IS_LAST(i) == 0; i++) { - if (PMD_IS_IMPL(i) == 0) continue; - printk("->CPU%d pmd[%d]=0x%lx thread_pmd[%d]=0x%lx\n", this_cpu, i, ia64_get_pmd(i), i, ctx->th_pmds[i]); - } - - if (ctx) { - printk("->CPU%d ctx_state=%d vaddr=%p addr=%p fd=%d ctx_task=[%d] saved_psr_up=0x%lx\n", - this_cpu, - ctx->ctx_state, - ctx->ctx_smpl_vaddr, - ctx->ctx_smpl_hdr, - ctx->ctx_msgq_head, - ctx->ctx_msgq_tail, - ctx->ctx_saved_psr_up); - } - local_irq_restore(flags); -} - -/* - * called from process.c:copy_thread(). task is new child. - */ -void -pfm_inherit(struct task_struct *task, struct pt_regs *regs) -{ - struct thread_struct *thread; - - DPRINT(("perfmon: pfm_inherit clearing state for [%d]\n", task_pid_nr(task))); - - thread = &task->thread; - - /* - * cut links inherited from parent (current) - */ - thread->pfm_context = NULL; - - PFM_SET_WORK_PENDING(task, 0); - - /* - * the psr bits are already set properly in copy_threads() - */ -} -#else /* !CONFIG_PERFMON */ -asmlinkage long -sys_perfmonctl (int fd, int cmd, void *arg, int count) -{ - return -ENOSYS; -} -#endif /* CONFIG_PERFMON */ diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index f19cb97c0098..f25f2f723196 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -51,10 +51,6 @@ #include "entry.h" -#ifdef CONFIG_PERFMON -# include <asm/perfmon.h> -#endif - #include "sigframe.h" void (*ia64_mark_idle)(int); @@ -174,15 +170,6 @@ do_notify_resume_user(sigset_t *unused, struct sigscratch *scr, long in_syscall) return; } -#ifdef CONFIG_PERFMON - if (current->thread.pfm_needs_checking) - /* - * Note: pfm_handle_work() allow us to call it with interrupts - * disabled, and may enable interrupts within the function. - */ - pfm_handle_work(); -#endif - /* deal with pending signal delivery */ if (test_thread_flag(TIF_SIGPENDING)) { local_irq_enable(); /* force interrupt enable */ @@ -264,41 +251,15 @@ void arch_cpu_idle(void) void ia64_save_extra (struct task_struct *task) { -#ifdef CONFIG_PERFMON - unsigned long info; -#endif - if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0) ia64_save_debug_regs(&task->thread.dbr[0]); - -#ifdef CONFIG_PERFMON - if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0) - pfm_save_regs(task); - - info = __this_cpu_read(pfm_syst_info); - if (info & PFM_CPUINFO_SYST_WIDE) - pfm_syst_wide_update_task(task, info, 0); -#endif } void ia64_load_extra (struct task_struct *task) { -#ifdef CONFIG_PERFMON - unsigned long info; -#endif - if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0) ia64_load_debug_regs(&task->thread.dbr[0]); - -#ifdef CONFIG_PERFMON - if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0) - pfm_load_regs(task); - - info = __this_cpu_read(pfm_syst_info); - if (info & PFM_CPUINFO_SYST_WIDE) - pfm_syst_wide_update_task(task, info, 1); -#endif } /* @@ -310,7 +271,7 @@ ia64_load_extra (struct task_struct *task) * * <clone syscall> <some kernel call frames> * sys_clone : - * _do_fork _do_fork + * kernel_clone kernel_clone * copy_thread copy_thread * * This means that the stack layout is as follows: @@ -432,11 +393,6 @@ copy_thread(unsigned long clone_flags, unsigned long user_stack_base, */ child_ptregs->cr_ipsr = ((child_ptregs->cr_ipsr | IA64_PSR_BITS_TO_SET) & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_PP | IA64_PSR_UP)); - -#ifdef CONFIG_PERFMON - if (current->thread.pfm_context) - pfm_inherit(p, child_ptregs); -#endif return retval; } @@ -455,7 +411,7 @@ asmlinkage long ia64_clone(unsigned long clone_flags, unsigned long stack_start, .tls = tls, }; - return _do_fork(&args); + return kernel_clone(&args); } static void @@ -563,15 +519,6 @@ exit_thread (struct task_struct *tsk) { ia64_drop_fpu(tsk); -#ifdef CONFIG_PERFMON - /* if needed, stop monitoring and flush state to perfmon context */ - if (tsk->thread.pfm_context) - pfm_exit_thread(tsk); - - /* free debug register resources */ - if (tsk->thread.flags & IA64_THREAD_DBG_VALID) - pfm_release_debug_registers(tsk); -#endif } unsigned long diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c index 33ca9fa0fbf5..75c070aed81e 100644 --- a/arch/ia64/kernel/ptrace.c +++ b/arch/ia64/kernel/ptrace.c @@ -30,9 +30,6 @@ #include <asm/rse.h> #include <linux/uaccess.h> #include <asm/unwind.h> -#ifdef CONFIG_PERFMON -#include <asm/perfmon.h> -#endif #include "entry.h" @@ -1951,27 +1948,6 @@ access_uarea(struct task_struct *child, unsigned long addr, "address 0x%lx\n", addr); return -1; } -#ifdef CONFIG_PERFMON - /* - * Check if debug registers are used by perfmon. This - * test must be done once we know that we can do the - * operation, i.e. the arguments are all valid, but - * before we start modifying the state. - * - * Perfmon needs to keep a count of how many processes - * are trying to modify the debug registers for system - * wide monitoring sessions. - * - * We also include read access here, because they may - * cause the PMU-installed debug register state - * (dbr[], ibr[]) to be reset. The two arrays are also - * used by perfmon, but we do not use - * IA64_THREAD_DBG_VALID. The registers are restored - * by the PMU context switch code. - */ - if (pfm_use_debug_registers(child)) - return -1; -#endif if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) { child->thread.flags |= IA64_THREAD_DBG_VALID; diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index c29c600d7967..093040f7e626 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -355,10 +355,6 @@ smp_callin (void) extern void ia64_init_itm(void); extern volatile int time_keeper_id; -#ifdef CONFIG_PERFMON - extern void pfm_init_percpu(void); -#endif - cpuid = smp_processor_id(); phys_id = hard_smp_processor_id(); itc_master = time_keeper_id; @@ -389,10 +385,6 @@ smp_callin (void) ia64_mca_cmc_vector_setup(); /* Setup vector on AP */ -#ifdef CONFIG_PERFMON - pfm_init_percpu(); -#endif - local_irq_enable(); if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) { diff --git a/arch/ia64/kernel/syscalls/syscall.tbl b/arch/ia64/kernel/syscalls/syscall.tbl index f52a41f4c340..b96ed8b8a508 100644 --- a/arch/ia64/kernel/syscalls/syscall.tbl +++ b/arch/ia64/kernel/syscalls/syscall.tbl @@ -160,7 +160,7 @@ 148 common mmap2 sys_mmap2 149 common pciconfig_read sys_pciconfig_read 150 common pciconfig_write sys_pciconfig_write -151 common perfmonctl sys_perfmonctl +151 common perfmonctl sys_ni_syscall 152 common sigaltstack sys_sigaltstack 153 common rt_sigaction sys_rt_sigaction 154 common rt_sigpending sys_rt_sigpending @@ -360,3 +360,4 @@ 437 common openat2 sys_openat2 438 common pidfd_getfd sys_pidfd_getfd 439 common faccessat2 sys_faccessat2 +440 common process_madvise sys_process_madvise diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index d259690eb91a..9b265783be6a 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S @@ -218,6 +218,7 @@ SECTIONS { STABS_DEBUG DWARF_DEBUG + ELF_DETAILS /* Default discards */ DISCARDS diff --git a/arch/ia64/lib/Makefile b/arch/ia64/lib/Makefile index 82118b38532f..081fcba01dc0 100644 --- a/arch/ia64/lib/Makefile +++ b/arch/ia64/lib/Makefile @@ -12,7 +12,6 @@ lib-y := io.o __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o \ lib-$(CONFIG_ITANIUM) += copy_page.o copy_user.o memcpy.o lib-$(CONFIG_MCKINLEY) += copy_page_mck.o memcpy_mck.o -lib-$(CONFIG_PERFMON) += carta_random.o AFLAGS___divdi3.o = AFLAGS___udivdi3.o = -DUNSIGNED diff --git a/arch/ia64/lib/carta_random.S b/arch/ia64/lib/carta_random.S deleted file mode 100644 index 1a4a639dc42f..000000000000 --- a/arch/ia64/lib/carta_random.S +++ /dev/null @@ -1,55 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Fast, simple, yet decent quality random number generator based on - * a paper by David G. Carta ("Two Fast Implementations of the - * `Minimal Standard' Random Number Generator," Communications of the - * ACM, January, 1990). - * - * Copyright (C) 2002 Hewlett-Packard Co - * David Mosberger-Tang <davidm@hpl.hp.com> - */ - -#include <asm/asmmacro.h> - -#define a r2 -#define m r3 -#define lo r8 -#define hi r9 -#define t0 r16 -#define t1 r17 -#define seed r32 - -GLOBAL_ENTRY(carta_random32) - movl a = (16807 << 16) | 16807 - ;; - pmpyshr2.u t0 = a, seed, 0 - pmpyshr2.u t1 = a, seed, 16 - ;; - unpack2.l t0 = t1, t0 - dep m = -1, r0, 0, 31 - ;; - zxt4 lo = t0 - shr.u hi = t0, 32 - ;; - dep t0 = 0, hi, 15, 49 // t0 = (hi & 0x7fff) - ;; - shl t0 = t0, 16 // t0 = (hi & 0x7fff) << 16 - shr t1 = hi, 15 // t1 = (hi >> 15) - ;; - add lo = lo, t0 - ;; - cmp.gtu p6, p0 = lo, m - ;; -(p6) and lo = lo, m - ;; -(p6) add lo = 1, lo - ;; - add lo = lo, t1 - ;; - cmp.gtu p6, p0 = lo, m - ;; -(p6) and lo = lo, m - ;; -(p6) add lo = 1, lo - br.ret.sptk.many rp -END(carta_random32) diff --git a/arch/ia64/lib/csum_partial_copy.c b/arch/ia64/lib/csum_partial_copy.c index 6e82e0be8040..917e3138b277 100644 --- a/arch/ia64/lib/csum_partial_copy.c +++ b/arch/ia64/lib/csum_partial_copy.c @@ -96,18 +96,3 @@ unsigned long do_csum_c(const unsigned char * buff, int len, unsigned int psum) out: return result; } - -/* - * XXX Fixme - * - * This is very ugly but temporary. THIS NEEDS SERIOUS ENHANCEMENTS. - * But it's very tricky to get right even in C. - */ -__wsum -csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) -{ - memcpy(dst, src, len); - return csum_partial(dst, len, sum); -} - -EXPORT_SYMBOL(csum_partial_copy_nocheck); diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 8e7b8c6c576e..ef12e097f318 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -8,7 +8,7 @@ #include <linux/kernel.h> #include <linux/init.h> -#include <linux/dma-noncoherent.h> +#include <linux/dma-map-ops.h> #include <linux/dmar.h> #include <linux/efi.h> #include <linux/elf.h> @@ -73,8 +73,7 @@ __ia64_sync_icache_dcache (pte_t pte) * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to * flush them when they get mapped into an executable vm-area. */ -void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, - enum dma_data_direction dir) +void arch_dma_mark_clean(phys_addr_t paddr, size_t size) { unsigned long pfn = PHYS_PFN(paddr); @@ -538,7 +537,7 @@ virtual_memmap_init(u64 start, u64 end, void *arg) if (map_start < map_end) memmap_init_zone((unsigned long)(map_end - map_start), args->nid, args->zone, page_to_pfn(map_start), - MEMINIT_EARLY, NULL); + MEMINIT_EARLY, NULL, MIGRATE_MOVABLE); return 0; } @@ -548,7 +547,7 @@ memmap_init (unsigned long size, int nid, unsigned long zone, { if (!vmem_map) { memmap_init_zone(size, nid, zone, start_pfn, - MEMINIT_EARLY, NULL); + MEMINIT_EARLY, NULL, MIGRATE_MOVABLE); } else { struct page *start; struct memmap_init_callback_data args; diff --git a/arch/ia64/oprofile/Makefile b/arch/ia64/oprofile/Makefile index cd134d6643bf..fc7944d462f4 100644 --- a/arch/ia64/oprofile/Makefile +++ b/arch/ia64/oprofile/Makefile @@ -8,4 +8,3 @@ DRIVER_OBJS := $(addprefix ../../../drivers/oprofile/, \ timer_int.o ) oprofile-y := $(DRIVER_OBJS) init.o backtrace.o -oprofile-$(CONFIG_PERFMON) += perfmon.o diff --git a/arch/ia64/oprofile/init.c b/arch/ia64/oprofile/init.c index 31b545c35460..a692ba16a07b 100644 --- a/arch/ia64/oprofile/init.c +++ b/arch/ia64/oprofile/init.c @@ -18,21 +18,11 @@ extern void ia64_backtrace(struct pt_regs * const regs, unsigned int depth); int __init oprofile_arch_init(struct oprofile_operations *ops) { - int ret = -ENODEV; - -#ifdef CONFIG_PERFMON - /* perfmon_init() can fail, but we have no way to report it */ - ret = perfmon_init(ops); -#endif ops->backtrace = ia64_backtrace; - - return ret; + return -ENODEV; } void oprofile_arch_exit(void) { -#ifdef CONFIG_PERFMON - perfmon_exit(); -#endif } diff --git a/arch/ia64/oprofile/perfmon.c b/arch/ia64/oprofile/perfmon.c deleted file mode 100644 index 192d3e8e1f65..000000000000 --- a/arch/ia64/oprofile/perfmon.c +++ /dev/null @@ -1,99 +0,0 @@ -/** - * @file perfmon.c - * - * @remark Copyright 2003 OProfile authors - * @remark Read the file COPYING - * - * @author John Levon <levon@movementarian.org> - */ - -#include <linux/kernel.h> -#include <linux/oprofile.h> -#include <linux/sched.h> -#include <asm/perfmon.h> -#include <asm/ptrace.h> -#include <asm/errno.h> - -static int allow_ints; - -static int -perfmon_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, - struct pt_regs *regs, unsigned long stamp) -{ - int event = arg->pmd_eventid; - - arg->ovfl_ctrl.bits.reset_ovfl_pmds = 1; - - /* the owner of the oprofile event buffer may have exited - * without perfmon being shutdown (e.g. SIGSEGV) - */ - if (allow_ints) - oprofile_add_sample(regs, event); - return 0; -} - - -static int perfmon_start(void) -{ - allow_ints = 1; - return 0; -} - - -static void perfmon_stop(void) -{ - allow_ints = 0; -} - - -#define OPROFILE_FMT_UUID { \ - 0x77, 0x7a, 0x6e, 0x61, 0x20, 0x65, 0x73, 0x69, 0x74, 0x6e, 0x72, 0x20, 0x61, 0x65, 0x0a, 0x6c } - -static pfm_buffer_fmt_t oprofile_fmt = { - .fmt_name = "oprofile_format", - .fmt_uuid = OPROFILE_FMT_UUID, - .fmt_handler = perfmon_handler, -}; - - -static char *get_cpu_type(void) -{ - __u8 family = local_cpu_data->family; - - switch (family) { - case 0x07: - return "ia64/itanium"; - case 0x1f: - return "ia64/itanium2"; - default: - return "ia64/ia64"; - } -} - - -/* all the ops are handled via userspace for IA64 perfmon */ - -static int using_perfmon; - -int perfmon_init(struct oprofile_operations *ops) -{ - int ret = pfm_register_buffer_fmt(&oprofile_fmt); - if (ret) - return -ENODEV; - - ops->cpu_type = get_cpu_type(); - ops->start = perfmon_start; - ops->stop = perfmon_stop; - using_perfmon = 1; - printk(KERN_INFO "oprofile: using perfmon.\n"); - return 0; -} - - -void perfmon_exit(void) -{ - if (!using_perfmon) - return; - - pfm_unregister_buffer_fmt(oprofile_fmt.fmt_uuid); -} diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index 6f2f38d05772..c830705bd3ac 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig @@ -6,32 +6,33 @@ config M68K select ARCH_HAS_BINFMT_FLAT select ARCH_HAS_DMA_PREP_COHERENT if HAS_DMA && MMU && !COLDFIRE select ARCH_HAS_SYNC_DMA_FOR_DEVICE if HAS_DMA + select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS select ARCH_MIGHT_HAVE_PC_PARPORT if ISA select ARCH_NO_PREEMPT if !COLDFIRE + select ARCH_WANT_IPC_PARSE_VERSION select BINFMT_FLAT_ARGVP_ENVP_ON_STACK select DMA_DIRECT_REMAP if HAS_DMA && MMU && !COLDFIRE - select HAVE_IDE - select HAVE_AOUT if MMU - select HAVE_ASM_MODVERSIONS - select HAVE_DEBUG_BUGVERBOSE - select GENERIC_IRQ_SHOW select GENERIC_ATOMIC64 - select NO_DMA if !MMU && !COLDFIRE - select HAVE_UID16 - select VIRT_TO_BUS - select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS select GENERIC_CPU_DEVICES select GENERIC_IOMAP + select GENERIC_IRQ_SHOW select GENERIC_STRNCPY_FROM_USER if MMU select GENERIC_STRNLEN_USER if MMU - select ARCH_WANT_IPC_PARSE_VERSION + select HAVE_AOUT if MMU + select HAVE_ASM_MODVERSIONS + select HAVE_DEBUG_BUGVERBOSE select HAVE_FUTEX_CMPXCHG if MMU && FUTEX + select HAVE_IDE select HAVE_MOD_ARCH_SPECIFIC + select HAVE_UID16 + select MMU_GATHER_NO_RANGE if MMU select MODULES_USE_ELF_REL select MODULES_USE_ELF_RELA - select OLD_SIGSUSPEND3 + select NO_DMA if !MMU && !COLDFIRE select OLD_SIGACTION - select MMU_GATHER_NO_RANGE if MMU + select OLD_SIGSUSPEND3 + select UACCESS_MEMCPY if !MMU + select VIRT_TO_BUS config CPU_BIG_ENDIAN def_bool y diff --git a/arch/m68k/amiga/config.c b/arch/m68k/amiga/config.c index 8f23b2fab64c..bee9f240f35d 100644 --- a/arch/m68k/amiga/config.c +++ b/arch/m68k/amiga/config.c @@ -214,7 +214,7 @@ static void __init amiga_identify(void) switch (amiga_model) { case AMI_UNKNOWN: - goto Generic; + break; case AMI_600: case AMI_1200: @@ -227,7 +227,7 @@ static void __init amiga_identify(void) case AMI_2000: case AMI_2500: AMIGAHW_SET(A2000_CLK); /* Is this correct for all models? */ - goto Generic; + break; case AMI_3000: case AMI_3000T: @@ -238,7 +238,7 @@ static void __init amiga_identify(void) AMIGAHW_SET(A3000_SCSI); AMIGAHW_SET(A3000_CLK); AMIGAHW_SET(ZORRO3); - goto Generic; + break; case AMI_4000T: AMIGAHW_SET(A4000_SCSI); @@ -247,68 +247,12 @@ static void __init amiga_identify(void) AMIGAHW_SET(A4000_IDE); AMIGAHW_SET(A3000_CLK); AMIGAHW_SET(ZORRO3); - goto Generic; + break; case AMI_CDTV: case AMI_CD32: AMIGAHW_SET(CD_ROM); AMIGAHW_SET(A2000_CLK); /* Is this correct? */ - goto Generic; - - Generic: - AMIGAHW_SET(AMI_VIDEO); - AMIGAHW_SET(AMI_BLITTER); - AMIGAHW_SET(AMI_AUDIO); - AMIGAHW_SET(AMI_FLOPPY); - AMIGAHW_SET(AMI_KEYBOARD); - AMIGAHW_SET(AMI_MOUSE); - AMIGAHW_SET(AMI_SERIAL); - AMIGAHW_SET(AMI_PARALLEL); - AMIGAHW_SET(CHIP_RAM); - AMIGAHW_SET(PAULA); - - switch (amiga_chipset) { - case CS_OCS: - case CS_ECS: - case CS_AGA: - switch (amiga_custom.deniseid & 0xf) { - case 0x0c: - AMIGAHW_SET(DENISE_HR); - break; - case 0x08: - AMIGAHW_SET(LISA); - break; - } - break; - default: - AMIGAHW_SET(DENISE); - break; - } - switch ((amiga_custom.vposr>>8) & 0x7f) { - case 0x00: - AMIGAHW_SET(AGNUS_PAL); - break; - case 0x10: - AMIGAHW_SET(AGNUS_NTSC); - break; - case 0x20: - case 0x21: - AMIGAHW_SET(AGNUS_HR_PAL); - break; - case 0x30: - case 0x31: - AMIGAHW_SET(AGNUS_HR_NTSC); - break; - case 0x22: - case 0x23: - AMIGAHW_SET(ALICE_PAL); - break; - case 0x32: - case 0x33: - AMIGAHW_SET(ALICE_NTSC); - break; - } - AMIGAHW_SET(ZORRO); break; case AMI_DRACO: @@ -318,6 +262,60 @@ static void __init amiga_identify(void) panic("Unknown Amiga Model"); } + AMIGAHW_SET(AMI_VIDEO); + AMIGAHW_SET(AMI_BLITTER); + AMIGAHW_SET(AMI_AUDIO); + AMIGAHW_SET(AMI_FLOPPY); + AMIGAHW_SET(AMI_KEYBOARD); + AMIGAHW_SET(AMI_MOUSE); + AMIGAHW_SET(AMI_SERIAL); + AMIGAHW_SET(AMI_PARALLEL); + AMIGAHW_SET(CHIP_RAM); + AMIGAHW_SET(PAULA); + + switch (amiga_chipset) { + case CS_OCS: + case CS_ECS: + case CS_AGA: + switch (amiga_custom.deniseid & 0xf) { + case 0x0c: + AMIGAHW_SET(DENISE_HR); + break; + case 0x08: + AMIGAHW_SET(LISA); + break; + default: + AMIGAHW_SET(DENISE); + break; + } + break; + } + switch ((amiga_custom.vposr>>8) & 0x7f) { + case 0x00: + AMIGAHW_SET(AGNUS_PAL); + break; + case 0x10: + AMIGAHW_SET(AGNUS_NTSC); + break; + case 0x20: + case 0x21: + AMIGAHW_SET(AGNUS_HR_PAL); + break; + case 0x30: + case 0x31: + AMIGAHW_SET(AGNUS_HR_NTSC); + break; + case 0x22: + case 0x23: + AMIGAHW_SET(ALICE_PAL); + break; + case 0x32: + case 0x33: + AMIGAHW_SET(ALICE_NTSC); + break; + } + AMIGAHW_SET(ZORRO); + #define AMIGAHW_ANNOUNCE(name, str) \ if (AMIGAHW_PRESENT(name)) \ pr_cont(str) diff --git a/arch/m68k/coldfire/device.c b/arch/m68k/coldfire/device.c index 9ef4ec0aea00..59f7dfe50a4d 100644 --- a/arch/m68k/coldfire/device.c +++ b/arch/m68k/coldfire/device.c @@ -554,7 +554,7 @@ static struct platform_device mcf_edma = { }; #endif /* IS_ENABLED(CONFIG_MCF_EDMA) */ -#if IS_ENABLED(CONFIG_MMC) +#ifdef MCFSDHC_BASE static struct mcf_esdhc_platform_data mcf_esdhc_data = { .max_bus_width = 4, .cd_type = ESDHC_CD_NONE, @@ -579,7 +579,7 @@ static struct platform_device mcf_esdhc = { .resource = mcf_esdhc_resources, .dev.platform_data = &mcf_esdhc_data, }; -#endif /* IS_ENABLED(CONFIG_MMC) */ +#endif /* MCFSDHC_BASE */ static struct platform_device *mcf_devices[] __initdata = { &mcf_uart, @@ -613,7 +613,7 @@ static struct platform_device *mcf_devices[] __initdata = { #if IS_ENABLED(CONFIG_MCF_EDMA) &mcf_edma, #endif -#if IS_ENABLED(CONFIG_MMC) +#ifdef MCFSDHC_BASE &mcf_esdhc, #endif }; diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig index 522dcf624aa5..3cd76bfaee03 100644 --- a/arch/m68k/configs/mac_defconfig +++ b/arch/m68k/configs/mac_defconfig @@ -317,6 +317,7 @@ CONFIG_DUMMY_IRQ=m CONFIG_IDE=y CONFIG_IDE_GD_ATAPI=y CONFIG_BLK_DEV_IDECD=y +CONFIG_BLK_DEV_PLATFORM=y CONFIG_BLK_DEV_MAC_IDE=y CONFIG_RAID_ATTRS=m CONFIG_SCSI=y diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig index 2433409f4369..c3d6faa7894f 100644 --- a/arch/m68k/configs/multi_defconfig +++ b/arch/m68k/configs/multi_defconfig @@ -346,6 +346,7 @@ CONFIG_DUMMY_IRQ=m CONFIG_IDE=y CONFIG_IDE_GD_ATAPI=y CONFIG_BLK_DEV_IDECD=y +CONFIG_BLK_DEV_PLATFORM=y CONFIG_BLK_DEV_GAYLE=y CONFIG_BLK_DEV_BUDDHA=y CONFIG_BLK_DEV_FALCON_IDE=y diff --git a/arch/m68k/include/asm/checksum.h b/arch/m68k/include/asm/checksum.h index 3f2c15d6f18c..692e7b6cc042 100644 --- a/arch/m68k/include/asm/checksum.h +++ b/arch/m68k/include/asm/checksum.h @@ -31,14 +31,13 @@ __wsum csum_partial(const void *buff, int len, __wsum sum); */ #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER +#define _HAVE_ARCH_CSUM_AND_COPY extern __wsum csum_and_copy_from_user(const void __user *src, void *dst, - int len, __wsum sum, - int *csum_err); + int len); extern __wsum csum_partial_copy_nocheck(const void *src, - void *dst, int len, - __wsum sum); + void *dst, int len); /* * This is a version of ip_fast_csum() optimized for IP headers, diff --git a/arch/m68k/include/asm/thread_info.h b/arch/m68k/include/asm/thread_info.h index 015f1ca38305..3689c6718c88 100644 --- a/arch/m68k/include/asm/thread_info.h +++ b/arch/m68k/include/asm/thread_info.h @@ -68,4 +68,12 @@ static inline struct thread_info *current_thread_info(void) #define TIF_MEMDIE 16 /* is terminating due to OOM killer */ #define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal */ +#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) +#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) +#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) +#define _TIF_DELAYED_TRACE (1 << TIF_DELAYED_TRACE) +#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) +#define _TIF_MEMDIE (1 << TIF_MEMDIE) +#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) + #endif /* _ASM_M68K_THREAD_INFO_H */ diff --git a/arch/m68k/include/asm/uaccess.h b/arch/m68k/include/asm/uaccess.h index e896466a41a4..f98208ccbbcd 100644 --- a/arch/m68k/include/asm/uaccess.h +++ b/arch/m68k/include/asm/uaccess.h @@ -1,7 +1,397 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifdef __uClinux__ -#include <asm/uaccess_no.h> +#ifndef __M68K_UACCESS_H +#define __M68K_UACCESS_H + +#ifdef CONFIG_MMU + +/* + * User space memory access functions + */ +#include <linux/compiler.h> +#include <linux/types.h> +#include <asm/segment.h> +#include <asm/extable.h> + +/* We let the MMU do all checking */ +static inline int access_ok(const void __user *addr, + unsigned long size) +{ + return 1; +} + +/* + * Not all varients of the 68k family support the notion of address spaces. + * The traditional 680x0 parts do, and they use the sfc/dfc registers and + * the "moves" instruction to access user space from kernel space. Other + * family members like ColdFire don't support this, and only have a single + * address space, and use the usual "move" instruction for user space access. + * + * Outside of this difference the user space access functions are the same. + * So lets keep the code simple and just define in what we need to use. + */ +#ifdef CONFIG_CPU_HAS_ADDRESS_SPACES +#define MOVES "moves" #else -#include <asm/uaccess_mm.h> +#define MOVES "move" #endif -#include <asm/extable.h> + +extern int __put_user_bad(void); +extern int __get_user_bad(void); + +#define __put_user_asm(res, x, ptr, bwl, reg, err) \ +asm volatile ("\n" \ + "1: "MOVES"."#bwl" %2,%1\n" \ + "2:\n" \ + " .section .fixup,\"ax\"\n" \ + " .even\n" \ + "10: moveq.l %3,%0\n" \ + " jra 2b\n" \ + " .previous\n" \ + "\n" \ + " .section __ex_table,\"a\"\n" \ + " .align 4\n" \ + " .long 1b,10b\n" \ + " .long 2b,10b\n" \ + " .previous" \ + : "+d" (res), "=m" (*(ptr)) \ + : #reg (x), "i" (err)) + +/* + * These are the main single-value transfer routines. They automatically + * use the right size if we just have the right pointer type. + */ + +#define __put_user(x, ptr) \ +({ \ + typeof(*(ptr)) __pu_val = (x); \ + int __pu_err = 0; \ + __chk_user_ptr(ptr); \ + switch (sizeof (*(ptr))) { \ + case 1: \ + __put_user_asm(__pu_err, __pu_val, ptr, b, d, -EFAULT); \ + break; \ + case 2: \ + __put_user_asm(__pu_err, __pu_val, ptr, w, r, -EFAULT); \ + break; \ + case 4: \ + __put_user_asm(__pu_err, __pu_val, ptr, l, r, -EFAULT); \ + break; \ + case 8: \ + { \ + const void __user *__pu_ptr = (ptr); \ + asm volatile ("\n" \ + "1: "MOVES".l %2,(%1)+\n" \ + "2: "MOVES".l %R2,(%1)\n" \ + "3:\n" \ + " .section .fixup,\"ax\"\n" \ + " .even\n" \ + "10: movel %3,%0\n" \ + " jra 3b\n" \ + " .previous\n" \ + "\n" \ + " .section __ex_table,\"a\"\n" \ + " .align 4\n" \ + " .long 1b,10b\n" \ + " .long 2b,10b\n" \ + " .long 3b,10b\n" \ + " .previous" \ + : "+d" (__pu_err), "+a" (__pu_ptr) \ + : "r" (__pu_val), "i" (-EFAULT) \ + : "memory"); \ + break; \ + } \ + default: \ + __pu_err = __put_user_bad(); \ + break; \ + } \ + __pu_err; \ +}) +#define put_user(x, ptr) __put_user(x, ptr) + + +#define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({ \ + type __gu_val; \ + asm volatile ("\n" \ + "1: "MOVES"."#bwl" %2,%1\n" \ + "2:\n" \ + " .section .fixup,\"ax\"\n" \ + " .even\n" \ + "10: move.l %3,%0\n" \ + " sub.l %1,%1\n" \ + " jra 2b\n" \ + " .previous\n" \ + "\n" \ + " .section __ex_table,\"a\"\n" \ + " .align 4\n" \ + " .long 1b,10b\n" \ + " .previous" \ + : "+d" (res), "=&" #reg (__gu_val) \ + : "m" (*(ptr)), "i" (err)); \ + (x) = (__force typeof(*(ptr)))(__force unsigned long)__gu_val; \ +}) + +#define __get_user(x, ptr) \ +({ \ + int __gu_err = 0; \ + __chk_user_ptr(ptr); \ + switch (sizeof(*(ptr))) { \ + case 1: \ + __get_user_asm(__gu_err, x, ptr, u8, b, d, -EFAULT); \ + break; \ + case 2: \ + __get_user_asm(__gu_err, x, ptr, u16, w, r, -EFAULT); \ + break; \ + case 4: \ + __get_user_asm(__gu_err, x, ptr, u32, l, r, -EFAULT); \ + break; \ + case 8: { \ + const void __user *__gu_ptr = (ptr); \ + union { \ + u64 l; \ + __typeof__(*(ptr)) t; \ + } __gu_val; \ + asm volatile ("\n" \ + "1: "MOVES".l (%2)+,%1\n" \ + "2: "MOVES".l (%2),%R1\n" \ + "3:\n" \ + " .section .fixup,\"ax\"\n" \ + " .even\n" \ + "10: move.l %3,%0\n" \ + " sub.l %1,%1\n" \ + " sub.l %R1,%R1\n" \ + " jra 3b\n" \ + " .previous\n" \ + "\n" \ + " .section __ex_table,\"a\"\n" \ + " .align 4\n" \ + " .long 1b,10b\n" \ + " .long 2b,10b\n" \ + " .previous" \ + : "+d" (__gu_err), "=&r" (__gu_val.l), \ + "+a" (__gu_ptr) \ + : "i" (-EFAULT) \ + : "memory"); \ + (x) = __gu_val.t; \ + break; \ + } \ + default: \ + __gu_err = __get_user_bad(); \ + break; \ + } \ + __gu_err; \ +}) +#define get_user(x, ptr) __get_user(x, ptr) + +unsigned long __generic_copy_from_user(void *to, const void __user *from, unsigned long n); +unsigned long __generic_copy_to_user(void __user *to, const void *from, unsigned long n); + +#define __suffix0 +#define __suffix1 b +#define __suffix2 w +#define __suffix4 l + +#define ____constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)\ + asm volatile ("\n" \ + "1: "MOVES"."#s1" (%2)+,%3\n" \ + " move."#s1" %3,(%1)+\n" \ + " .ifnc \""#s2"\",\"\"\n" \ + "2: "MOVES"."#s2" (%2)+,%3\n" \ + " move."#s2" %3,(%1)+\n" \ + " .ifnc \""#s3"\",\"\"\n" \ + "3: "MOVES"."#s3" (%2)+,%3\n" \ + " move."#s3" %3,(%1)+\n" \ + " .endif\n" \ + " .endif\n" \ + "4:\n" \ + " .section __ex_table,\"a\"\n" \ + " .align 4\n" \ + " .long 1b,10f\n" \ + " .ifnc \""#s2"\",\"\"\n" \ + " .long 2b,20f\n" \ + " .ifnc \""#s3"\",\"\"\n" \ + " .long 3b,30f\n" \ + " .endif\n" \ + " .endif\n" \ + " .previous\n" \ + "\n" \ + " .section .fixup,\"ax\"\n" \ + " .even\n" \ + "10: addq.l #"#n1",%0\n" \ + " .ifnc \""#s2"\",\"\"\n" \ + "20: addq.l #"#n2",%0\n" \ + " .ifnc \""#s3"\",\"\"\n" \ + "30: addq.l #"#n3",%0\n" \ + " .endif\n" \ + " .endif\n" \ + " jra 4b\n" \ + " .previous\n" \ + : "+d" (res), "+&a" (to), "+a" (from), "=&d" (tmp) \ + : : "memory") + +#define ___constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)\ + ____constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3) +#define __constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3) \ + ___constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, \ + __suffix##n1, __suffix##n2, __suffix##n3) + +static __always_inline unsigned long +__constant_copy_from_user(void *to, const void __user *from, unsigned long n) +{ + unsigned long res = 0, tmp; + + switch (n) { + case 1: + __constant_copy_from_user_asm(res, to, from, tmp, 1, 0, 0); + break; + case 2: + __constant_copy_from_user_asm(res, to, from, tmp, 2, 0, 0); + break; + case 3: + __constant_copy_from_user_asm(res, to, from, tmp, 2, 1, 0); + break; + case 4: + __constant_copy_from_user_asm(res, to, from, tmp, 4, 0, 0); + break; + case 5: + __constant_copy_from_user_asm(res, to, from, tmp, 4, 1, 0); + break; + case 6: + __constant_copy_from_user_asm(res, to, from, tmp, 4, 2, 0); + break; + case 7: + __constant_copy_from_user_asm(res, to, from, tmp, 4, 2, 1); + break; + case 8: + __constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 0); + break; + case 9: + __constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 1); + break; + case 10: + __constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 2); + break; + case 12: + __constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 4); + break; + default: + /* we limit the inlined version to 3 moves */ + return __generic_copy_from_user(to, from, n); + } + + return res; +} + +#define __constant_copy_to_user_asm(res, to, from, tmp, n, s1, s2, s3) \ + asm volatile ("\n" \ + " move."#s1" (%2)+,%3\n" \ + "11: "MOVES"."#s1" %3,(%1)+\n" \ + "12: move."#s2" (%2)+,%3\n" \ + "21: "MOVES"."#s2" %3,(%1)+\n" \ + "22:\n" \ + " .ifnc \""#s3"\",\"\"\n" \ + " move."#s3" (%2)+,%3\n" \ + "31: "MOVES"."#s3" %3,(%1)+\n" \ + "32:\n" \ + " .endif\n" \ + "4:\n" \ + "\n" \ + " .section __ex_table,\"a\"\n" \ + " .align 4\n" \ + " .long 11b,5f\n" \ + " .long 12b,5f\n" \ + " .long 21b,5f\n" \ + " .long 22b,5f\n" \ + " .ifnc \""#s3"\",\"\"\n" \ + " .long 31b,5f\n" \ + " .long 32b,5f\n" \ + " .endif\n" \ + " .previous\n" \ + "\n" \ + " .section .fixup,\"ax\"\n" \ + " .even\n" \ + "5: moveq.l #"#n",%0\n" \ + " jra 4b\n" \ + " .previous\n" \ + : "+d" (res), "+a" (to), "+a" (from), "=&d" (tmp) \ + : : "memory") + +static __always_inline unsigned long +__constant_copy_to_user(void __user *to, const void *from, unsigned long n) +{ + unsigned long res = 0, tmp; + + switch (n) { + case 1: + __put_user_asm(res, *(u8 *)from, (u8 __user *)to, b, d, 1); + break; + case 2: + __put_user_asm(res, *(u16 *)from, (u16 __user *)to, w, r, 2); + break; + case 3: + __constant_copy_to_user_asm(res, to, from, tmp, 3, w, b,); + break; + case 4: + __put_user_asm(res, *(u32 *)from, (u32 __user *)to, l, r, 4); + break; + case 5: + __constant_copy_to_user_asm(res, to, from, tmp, 5, l, b,); + break; + case 6: + __constant_copy_to_user_asm(res, to, from, tmp, 6, l, w,); + break; + case 7: + __constant_copy_to_user_asm(res, to, from, tmp, 7, l, w, b); + break; + case 8: + __constant_copy_to_user_asm(res, to, from, tmp, 8, l, l,); + break; + case 9: + __constant_copy_to_user_asm(res, to, from, tmp, 9, l, l, b); + break; + case 10: + __constant_copy_to_user_asm(res, to, from, tmp, 10, l, l, w); + break; + case 12: + __constant_copy_to_user_asm(res, to, from, tmp, 12, l, l, l); + break; + default: + /* limit the inlined version to 3 moves */ + return __generic_copy_to_user(to, from, n); + } + + return res; +} + +static inline unsigned long +raw_copy_from_user(void *to, const void __user *from, unsigned long n) +{ + if (__builtin_constant_p(n)) + return __constant_copy_from_user(to, from, n); + return __generic_copy_from_user(to, from, n); +} + +static inline unsigned long +raw_copy_to_user(void __user *to, const void *from, unsigned long n) +{ + if (__builtin_constant_p(n)) + return __constant_copy_to_user(to, from, n); + return __generic_copy_to_user(to, from, n); +} +#define INLINE_COPY_FROM_USER +#define INLINE_COPY_TO_USER + +#define user_addr_max() \ + (uaccess_kernel() ? ~0UL : TASK_SIZE) + +extern long strncpy_from_user(char *dst, const char __user *src, long count); +extern __must_check long strnlen_user(const char __user *str, long n); + +unsigned long __clear_user(void __user *to, unsigned long n); + +#define clear_user __clear_user + +#else /* !CONFIG_MMU */ +#include <asm-generic/uaccess.h> +#endif + +#endif /* _M68K_UACCESS_H */ diff --git a/arch/m68k/include/asm/uaccess_mm.h b/arch/m68k/include/asm/uaccess_mm.h deleted file mode 100644 index 9ae9f8d05925..000000000000 --- a/arch/m68k/include/asm/uaccess_mm.h +++ /dev/null @@ -1,390 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __M68K_UACCESS_H -#define __M68K_UACCESS_H - -/* - * User space memory access functions - */ -#include <linux/compiler.h> -#include <linux/types.h> -#include <asm/segment.h> - -/* We let the MMU do all checking */ -static inline int access_ok(const void __user *addr, - unsigned long size) -{ - return 1; -} - -/* - * Not all varients of the 68k family support the notion of address spaces. - * The traditional 680x0 parts do, and they use the sfc/dfc registers and - * the "moves" instruction to access user space from kernel space. Other - * family members like ColdFire don't support this, and only have a single - * address space, and use the usual "move" instruction for user space access. - * - * Outside of this difference the user space access functions are the same. - * So lets keep the code simple and just define in what we need to use. - */ -#ifdef CONFIG_CPU_HAS_ADDRESS_SPACES -#define MOVES "moves" -#else -#define MOVES "move" -#endif - -extern int __put_user_bad(void); -extern int __get_user_bad(void); - -#define __put_user_asm(res, x, ptr, bwl, reg, err) \ -asm volatile ("\n" \ - "1: "MOVES"."#bwl" %2,%1\n" \ - "2:\n" \ - " .section .fixup,\"ax\"\n" \ - " .even\n" \ - "10: moveq.l %3,%0\n" \ - " jra 2b\n" \ - " .previous\n" \ - "\n" \ - " .section __ex_table,\"a\"\n" \ - " .align 4\n" \ - " .long 1b,10b\n" \ - " .long 2b,10b\n" \ - " .previous" \ - : "+d" (res), "=m" (*(ptr)) \ - : #reg (x), "i" (err)) - -/* - * These are the main single-value transfer routines. They automatically - * use the right size if we just have the right pointer type. - */ - -#define __put_user(x, ptr) \ -({ \ - typeof(*(ptr)) __pu_val = (x); \ - int __pu_err = 0; \ - __chk_user_ptr(ptr); \ - switch (sizeof (*(ptr))) { \ - case 1: \ - __put_user_asm(__pu_err, __pu_val, ptr, b, d, -EFAULT); \ - break; \ - case 2: \ - __put_user_asm(__pu_err, __pu_val, ptr, w, r, -EFAULT); \ - break; \ - case 4: \ - __put_user_asm(__pu_err, __pu_val, ptr, l, r, -EFAULT); \ - break; \ - case 8: \ - { \ - const void __user *__pu_ptr = (ptr); \ - asm volatile ("\n" \ - "1: "MOVES".l %2,(%1)+\n" \ - "2: "MOVES".l %R2,(%1)\n" \ - "3:\n" \ - " .section .fixup,\"ax\"\n" \ - " .even\n" \ - "10: movel %3,%0\n" \ - " jra 3b\n" \ - " .previous\n" \ - "\n" \ - " .section __ex_table,\"a\"\n" \ - " .align 4\n" \ - " .long 1b,10b\n" \ - " .long 2b,10b\n" \ - " .long 3b,10b\n" \ - " .previous" \ - : "+d" (__pu_err), "+a" (__pu_ptr) \ - : "r" (__pu_val), "i" (-EFAULT) \ - : "memory"); \ - break; \ - } \ - default: \ - __pu_err = __put_user_bad(); \ - break; \ - } \ - __pu_err; \ -}) -#define put_user(x, ptr) __put_user(x, ptr) - - -#define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({ \ - type __gu_val; \ - asm volatile ("\n" \ - "1: "MOVES"."#bwl" %2,%1\n" \ - "2:\n" \ - " .section .fixup,\"ax\"\n" \ - " .even\n" \ - "10: move.l %3,%0\n" \ - " sub.l %1,%1\n" \ - " jra 2b\n" \ - " .previous\n" \ - "\n" \ - " .section __ex_table,\"a\"\n" \ - " .align 4\n" \ - " .long 1b,10b\n" \ - " .previous" \ - : "+d" (res), "=&" #reg (__gu_val) \ - : "m" (*(ptr)), "i" (err)); \ - (x) = (__force typeof(*(ptr)))(__force unsigned long)__gu_val; \ -}) - -#define __get_user(x, ptr) \ -({ \ - int __gu_err = 0; \ - __chk_user_ptr(ptr); \ - switch (sizeof(*(ptr))) { \ - case 1: \ - __get_user_asm(__gu_err, x, ptr, u8, b, d, -EFAULT); \ - break; \ - case 2: \ - __get_user_asm(__gu_err, x, ptr, u16, w, r, -EFAULT); \ - break; \ - case 4: \ - __get_user_asm(__gu_err, x, ptr, u32, l, r, -EFAULT); \ - break; \ - case 8: { \ - const void __user *__gu_ptr = (ptr); \ - union { \ - u64 l; \ - __typeof__(*(ptr)) t; \ - } __gu_val; \ - asm volatile ("\n" \ - "1: "MOVES".l (%2)+,%1\n" \ - "2: "MOVES".l (%2),%R1\n" \ - "3:\n" \ - " .section .fixup,\"ax\"\n" \ - " .even\n" \ - "10: move.l %3,%0\n" \ - " sub.l %1,%1\n" \ - " sub.l %R1,%R1\n" \ - " jra 3b\n" \ - " .previous\n" \ - "\n" \ - " .section __ex_table,\"a\"\n" \ - " .align 4\n" \ - " .long 1b,10b\n" \ - " .long 2b,10b\n" \ - " .previous" \ - : "+d" (__gu_err), "=&r" (__gu_val.l), \ - "+a" (__gu_ptr) \ - : "i" (-EFAULT) \ - : "memory"); \ - (x) = __gu_val.t; \ - break; \ - } \ - default: \ - __gu_err = __get_user_bad(); \ - break; \ - } \ - __gu_err; \ -}) -#define get_user(x, ptr) __get_user(x, ptr) - -unsigned long __generic_copy_from_user(void *to, const void __user *from, unsigned long n); -unsigned long __generic_copy_to_user(void __user *to, const void *from, unsigned long n); - -#define __suffix0 -#define __suffix1 b -#define __suffix2 w -#define __suffix4 l - -#define ____constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)\ - asm volatile ("\n" \ - "1: "MOVES"."#s1" (%2)+,%3\n" \ - " move."#s1" %3,(%1)+\n" \ - " .ifnc \""#s2"\",\"\"\n" \ - "2: "MOVES"."#s2" (%2)+,%3\n" \ - " move."#s2" %3,(%1)+\n" \ - " .ifnc \""#s3"\",\"\"\n" \ - "3: "MOVES"."#s3" (%2)+,%3\n" \ - " move."#s3" %3,(%1)+\n" \ - " .endif\n" \ - " .endif\n" \ - "4:\n" \ - " .section __ex_table,\"a\"\n" \ - " .align 4\n" \ - " .long 1b,10f\n" \ - " .ifnc \""#s2"\",\"\"\n" \ - " .long 2b,20f\n" \ - " .ifnc \""#s3"\",\"\"\n" \ - " .long 3b,30f\n" \ - " .endif\n" \ - " .endif\n" \ - " .previous\n" \ - "\n" \ - " .section .fixup,\"ax\"\n" \ - " .even\n" \ - "10: addq.l #"#n1",%0\n" \ - " .ifnc \""#s2"\",\"\"\n" \ - "20: addq.l #"#n2",%0\n" \ - " .ifnc \""#s3"\",\"\"\n" \ - "30: addq.l #"#n3",%0\n" \ - " .endif\n" \ - " .endif\n" \ - " jra 4b\n" \ - " .previous\n" \ - : "+d" (res), "+&a" (to), "+a" (from), "=&d" (tmp) \ - : : "memory") - -#define ___constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)\ - ____constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3) -#define __constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3) \ - ___constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, \ - __suffix##n1, __suffix##n2, __suffix##n3) - -static __always_inline unsigned long -__constant_copy_from_user(void *to, const void __user *from, unsigned long n) -{ - unsigned long res = 0, tmp; - - switch (n) { - case 1: - __constant_copy_from_user_asm(res, to, from, tmp, 1, 0, 0); - break; - case 2: - __constant_copy_from_user_asm(res, to, from, tmp, 2, 0, 0); - break; - case 3: - __constant_copy_from_user_asm(res, to, from, tmp, 2, 1, 0); - break; - case 4: - __constant_copy_from_user_asm(res, to, from, tmp, 4, 0, 0); - break; - case 5: - __constant_copy_from_user_asm(res, to, from, tmp, 4, 1, 0); - break; - case 6: - __constant_copy_from_user_asm(res, to, from, tmp, 4, 2, 0); - break; - case 7: - __constant_copy_from_user_asm(res, to, from, tmp, 4, 2, 1); - break; - case 8: - __constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 0); - break; - case 9: - __constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 1); - break; - case 10: - __constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 2); - break; - case 12: - __constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 4); - break; - default: - /* we limit the inlined version to 3 moves */ - return __generic_copy_from_user(to, from, n); - } - - return res; -} - -#define __constant_copy_to_user_asm(res, to, from, tmp, n, s1, s2, s3) \ - asm volatile ("\n" \ - " move."#s1" (%2)+,%3\n" \ - "11: "MOVES"."#s1" %3,(%1)+\n" \ - "12: move."#s2" (%2)+,%3\n" \ - "21: "MOVES"."#s2" %3,(%1)+\n" \ - "22:\n" \ - " .ifnc \""#s3"\",\"\"\n" \ - " move."#s3" (%2)+,%3\n" \ - "31: "MOVES"."#s3" %3,(%1)+\n" \ - "32:\n" \ - " .endif\n" \ - "4:\n" \ - "\n" \ - " .section __ex_table,\"a\"\n" \ - " .align 4\n" \ - " .long 11b,5f\n" \ - " .long 12b,5f\n" \ - " .long 21b,5f\n" \ - " .long 22b,5f\n" \ - " .ifnc \""#s3"\",\"\"\n" \ - " .long 31b,5f\n" \ - " .long 32b,5f\n" \ - " .endif\n" \ - " .previous\n" \ - "\n" \ - " .section .fixup,\"ax\"\n" \ - " .even\n" \ - "5: moveq.l #"#n",%0\n" \ - " jra 4b\n" \ - " .previous\n" \ - : "+d" (res), "+a" (to), "+a" (from), "=&d" (tmp) \ - : : "memory") - -static __always_inline unsigned long -__constant_copy_to_user(void __user *to, const void *from, unsigned long n) -{ - unsigned long res = 0, tmp; - - switch (n) { - case 1: - __put_user_asm(res, *(u8 *)from, (u8 __user *)to, b, d, 1); - break; - case 2: - __put_user_asm(res, *(u16 *)from, (u16 __user *)to, w, r, 2); - break; - case 3: - __constant_copy_to_user_asm(res, to, from, tmp, 3, w, b,); - break; - case 4: - __put_user_asm(res, *(u32 *)from, (u32 __user *)to, l, r, 4); - break; - case 5: - __constant_copy_to_user_asm(res, to, from, tmp, 5, l, b,); - break; - case 6: - __constant_copy_to_user_asm(res, to, from, tmp, 6, l, w,); - break; - case 7: - __constant_copy_to_user_asm(res, to, from, tmp, 7, l, w, b); - break; - case 8: - __constant_copy_to_user_asm(res, to, from, tmp, 8, l, l,); - break; - case 9: - __constant_copy_to_user_asm(res, to, from, tmp, 9, l, l, b); - break; - case 10: - __constant_copy_to_user_asm(res, to, from, tmp, 10, l, l, w); - break; - case 12: - __constant_copy_to_user_asm(res, to, from, tmp, 12, l, l, l); - break; - default: - /* limit the inlined version to 3 moves */ - return __generic_copy_to_user(to, from, n); - } - - return res; -} - -static inline unsigned long -raw_copy_from_user(void *to, const void __user *from, unsigned long n) -{ - if (__builtin_constant_p(n)) - return __constant_copy_from_user(to, from, n); - return __generic_copy_from_user(to, from, n); -} - -static inline unsigned long -raw_copy_to_user(void __user *to, const void *from, unsigned long n) -{ - if (__builtin_constant_p(n)) - return __constant_copy_to_user(to, from, n); - return __generic_copy_to_user(to, from, n); -} -#define INLINE_COPY_FROM_USER -#define INLINE_COPY_TO_USER - -#define user_addr_max() \ - (uaccess_kernel() ? ~0UL : TASK_SIZE) - -extern long strncpy_from_user(char *dst, const char __user *src, long count); -extern __must_check long strnlen_user(const char __user *str, long n); - -unsigned long __clear_user(void __user *to, unsigned long n); - -#define clear_user __clear_user - -#endif /* _M68K_UACCESS_H */ diff --git a/arch/m68k/include/asm/uaccess_no.h b/arch/m68k/include/asm/uaccess_no.h deleted file mode 100644 index dcfb69361408..000000000000 --- a/arch/m68k/include/asm/uaccess_no.h +++ /dev/null @@ -1,160 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __M68KNOMMU_UACCESS_H -#define __M68KNOMMU_UACCESS_H - -/* - * User space memory access functions - */ -#include <linux/string.h> - -#include <asm/segment.h> - -#define access_ok(addr,size) _access_ok((unsigned long)(addr),(size)) - -/* - * It is not enough to just have access_ok check for a real RAM address. - * This would disallow the case of code/ro-data running XIP in flash/rom. - * Ideally we would check the possible flash ranges too, but that is - * currently not so easy. - */ -static inline int _access_ok(unsigned long addr, unsigned long size) -{ - return 1; -} - -/* - * These are the main single-value transfer routines. They automatically - * use the right size if we just have the right pointer type. - */ - -#define put_user(x, ptr) \ -({ \ - int __pu_err = 0; \ - typeof(*(ptr)) __pu_val = (x); \ - switch (sizeof (*(ptr))) { \ - case 1: \ - __put_user_asm(__pu_err, __pu_val, ptr, b); \ - break; \ - case 2: \ - __put_user_asm(__pu_err, __pu_val, ptr, w); \ - break; \ - case 4: \ - __put_user_asm(__pu_err, __pu_val, ptr, l); \ - break; \ - case 8: \ - memcpy((void __force *)ptr, &__pu_val, sizeof(*(ptr))); \ - break; \ - default: \ - __pu_err = __put_user_bad(); \ - break; \ - } \ - __pu_err; \ -}) -#define __put_user(x, ptr) put_user(x, ptr) - -extern int __put_user_bad(void); - -/* - * Tell gcc we read from memory instead of writing: this is because - * we do not write to any memory gcc knows about, so there are no - * aliasing issues. - */ - -#define __ptr(x) ((unsigned long __user *)(x)) - -#define __put_user_asm(err,x,ptr,bwl) \ - __asm__ ("move" #bwl " %0,%1" \ - : /* no outputs */ \ - :"d" (x),"m" (*__ptr(ptr)) : "memory") - -#define get_user(x, ptr) \ -({ \ - int __gu_err = 0; \ - switch (sizeof(*(ptr))) { \ - case 1: \ - __get_user_asm(__gu_err, x, ptr, b, "=d"); \ - break; \ - case 2: \ - __get_user_asm(__gu_err, x, ptr, w, "=r"); \ - break; \ - case 4: \ - __get_user_asm(__gu_err, x, ptr, l, "=r"); \ - break; \ - case 8: { \ - union { \ - u64 l; \ - __typeof__(*(ptr)) t; \ - } __gu_val; \ - memcpy(&__gu_val.l, (const void __force *)ptr, sizeof(__gu_val.l)); \ - (x) = __gu_val.t; \ - break; \ - } \ - default: \ - __gu_err = __get_user_bad(); \ - break; \ - } \ - __gu_err; \ -}) -#define __get_user(x, ptr) get_user(x, ptr) - -extern int __get_user_bad(void); - -#define __get_user_asm(err,x,ptr,bwl,reg) \ - __asm__ ("move" #bwl " %1,%0" \ - : "=d" (x) \ - : "m" (*__ptr(ptr))) - -static inline unsigned long -raw_copy_from_user(void *to, const void __user *from, unsigned long n) -{ - memcpy(to, (__force const void *)from, n); - return 0; -} - -static inline unsigned long -raw_copy_to_user(void __user *to, const void *from, unsigned long n) -{ - memcpy((__force void *)to, from, n); - return 0; -} -#define INLINE_COPY_FROM_USER -#define INLINE_COPY_TO_USER - -/* - * Copy a null terminated string from userspace. - */ - -static inline long -strncpy_from_user(char *dst, const char *src, long count) -{ - char *tmp; - strncpy(dst, src, count); - for (tmp = dst; *tmp && count > 0; tmp++, count--) - ; - return(tmp - dst); /* DAVIDM should we count a NUL ? check getname */ -} - -/* - * Return the size of a string (including the ending 0) - * - * Return 0 on exception, a value greater than N if too long - */ -static inline long strnlen_user(const char *src, long n) -{ - return(strlen(src) + 1); /* DAVIDM make safer */ -} - -/* - * Zero Userspace - */ - -static inline unsigned long -__clear_user(void *to, unsigned long n) -{ - memset(to, 0, n); - return 0; -} - -#define clear_user(to,n) __clear_user(to,n) - -#endif /* _M68KNOMMU_UACCESS_H */ diff --git a/arch/m68k/kernel/dma.c b/arch/m68k/kernel/dma.c index b1ca3522eccc..1c1b875fadc1 100644 --- a/arch/m68k/kernel/dma.c +++ b/arch/m68k/kernel/dma.c @@ -6,7 +6,7 @@ #undef DEBUG -#include <linux/dma-noncoherent.h> +#include <linux/dma-map-ops.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/platform_device.h> diff --git a/arch/m68k/kernel/head.S b/arch/m68k/kernel/head.S index 29de2b3108ea..493c95db0e51 100644 --- a/arch/m68k/kernel/head.S +++ b/arch/m68k/kernel/head.S @@ -57,7 +57,7 @@ * Of course, readability is a subjective issue, so it will never be * argued that that goal was accomplished. It was merely a goal. * A key way to help make code more readable is to give good - * documentation. So, the first thing you will find is exaustive + * documentation. So, the first thing you will find is exhaustive * write-ups on the structure of the file, and the features of the * functional subroutines. * @@ -1304,7 +1304,7 @@ L(mmu_fixup_done): * mmu_engage * * This chunk of code performs the gruesome task of engaging the MMU. - * The reason its gruesome is because when the MMU becomes engaged it + * The reason it's gruesome is because when the MMU becomes engaged it * maps logical addresses to physical addresses. The Program Counter * register is then passed through the MMU before the next instruction * is fetched (the instruction following the engage MMU instruction). @@ -1369,7 +1369,7 @@ L(mmu_fixup_done): /* * After this point no new memory is allocated and * the start of available memory is stored in availmem. - * (The bootmem allocator requires now the physicall address.) + * (The bootmem allocator requires now the physical address.) */ movel L(memory_start),availmem @@ -1547,7 +1547,7 @@ func_return get_bi_record * seven bits of the logical address (LA) are used as an * index into the "root table." Each entry in the root * table has a bit which specifies if it's a valid pointer to a - * pointer table. Each entry defines a 32KMeg range of memory. + * pointer table. Each entry defines a 32Meg range of memory. * If an entry is invalid then that logical range of 32M is * invalid and references to that range of memory (when the MMU * is enabled) will fault. If the entry is valid, then it does @@ -1584,7 +1584,7 @@ func_return get_bi_record * bits 17..12 - index into the Page Table * bits 11..0 - offset into a particular 4K page * - * The algorithms which follows do one thing: they abstract + * The algorithms which follow do one thing: they abstract * the MMU hardware. For example, there are three kinds of * cache settings that are relevant. Either, memory is * being mapped in which case it is either Kernel Code (or @@ -2082,7 +2082,7 @@ func_return mmu_map_tt * mmu_map * * This routine will map a range of memory using a pointer - * table and allocating the pages on the fly from the kernel. + * table and allocate the pages on the fly from the kernel. * The pointer table does not have to be already linked into * the root table, this routine will do that if necessary. * @@ -2528,7 +2528,7 @@ func_start mmu_get_root_table_entry,%d0/%a1 /* Find the start of free memory, get_bi_record does this for us, * as the bootinfo structure is located directly behind the kernel - * and and we simply search for the last entry. + * we simply search for the last entry. */ get_bi_record BI_LAST addw #PAGESIZE-1,%a0 @@ -2654,7 +2654,7 @@ func_start mmu_get_page_table_entry,%d0/%a1 jne 2f /* If the page table entry doesn't exist, we allocate a complete new - * page and use it as one continues big page table which can cover + * page and use it as one continuous big page table which can cover * 4MB of memory, nearly almost all mappings have that alignment. */ get_new_page diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c index 6492a2c54dbc..08359a6e058f 100644 --- a/arch/m68k/kernel/process.c +++ b/arch/m68k/kernel/process.c @@ -107,10 +107,10 @@ void flush_thread(void) * on top of pt_regs, which means that sys_clone() arguments would be * buried. We could, of course, copy them, but it's too costly for no * good reason - generic clone() would have to copy them *again* for - * _do_fork() anyway. So in this case it's actually better to pass pt_regs * - * and extract arguments for _do_fork() from there. Eventually we might - * go for calling _do_fork() directly from the wrapper, but only after we - * are finished with _do_fork() prototype conversion. + * kernel_clone() anyway. So in this case it's actually better to pass pt_regs * + * and extract arguments for kernel_clone() from there. Eventually we might + * go for calling kernel_clone() directly from the wrapper, but only after we + * are finished with kernel_clone() prototype conversion. */ asmlinkage int m68k_clone(struct pt_regs *regs) { @@ -125,7 +125,7 @@ asmlinkage int m68k_clone(struct pt_regs *regs) .tls = regs->d5, }; - return _do_fork(&args); + return kernel_clone(&args); } /* diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c index a98fca977073..a85f59bc1c35 100644 --- a/arch/m68k/kernel/signal.c +++ b/arch/m68k/kernel/signal.c @@ -920,7 +920,8 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set, err |= __put_user(0x70004e40 + (__NR_sigreturn << 16), (long __user *)(frame->retcode)); #else - err |= __put_user((void *) ret_from_user_signal, &frame->pretcode); + err |= __put_user((long) ret_from_user_signal, + (long __user *) &frame->pretcode); #endif if (err) @@ -1004,7 +1005,8 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, err |= __put_user(0x4e40, (short __user *)(frame->retcode + 4)); #endif #else - err |= __put_user((void *) ret_from_user_rt_signal, &frame->pretcode); + err |= __put_user((long) ret_from_user_rt_signal, + (long __user *) &frame->pretcode); #endif /* CONFIG_MMU */ if (err) diff --git a/arch/m68k/kernel/syscalls/syscall.tbl b/arch/m68k/kernel/syscalls/syscall.tbl index 81fc799d8392..625fb6d32842 100644 --- a/arch/m68k/kernel/syscalls/syscall.tbl +++ b/arch/m68k/kernel/syscalls/syscall.tbl @@ -439,3 +439,4 @@ 437 common openat2 sys_openat2 438 common pidfd_getfd sys_pidfd_getfd 439 common faccessat2 sys_faccessat2 +440 common process_madvise sys_process_madvise diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c index 546e81935fe8..9e1261462bcc 100644 --- a/arch/m68k/kernel/traps.c +++ b/arch/m68k/kernel/traps.c @@ -845,7 +845,6 @@ static void show_trace(unsigned long *stack, const char *loglvl) void show_registers(struct pt_regs *regs) { struct frame *fp = (struct frame *)regs; - mm_segment_t old_fs = get_fs(); u16 c, *cp; unsigned long addr; int i; @@ -918,10 +917,9 @@ void show_registers(struct pt_regs *regs) show_stack(NULL, (unsigned long *)addr, KERN_INFO); pr_info("Code:"); - set_fs(KERNEL_DS); cp = (u16 *)regs->pc; for (i = -8; i < 16; i++) { - if (get_user(c, cp + i) && i >= 0) { + if (get_kernel_nofault(c, cp + i) && i >= 0) { pr_cont(" Bad PC value."); break; } @@ -930,7 +928,6 @@ void show_registers(struct pt_regs *regs) else pr_cont(" <%04x>", c); } - set_fs(old_fs); pr_cont("\n"); } diff --git a/arch/m68k/lib/checksum.c b/arch/m68k/lib/checksum.c index 31797be9a3dc..7e6afeae6217 100644 --- a/arch/m68k/lib/checksum.c +++ b/arch/m68k/lib/checksum.c @@ -129,8 +129,7 @@ EXPORT_SYMBOL(csum_partial); */ __wsum -csum_and_copy_from_user(const void __user *src, void *dst, - int len, __wsum sum, int *csum_err) +csum_and_copy_from_user(const void __user *src, void *dst, int len) { /* * GCC doesn't like more than 10 operands for the asm @@ -138,6 +137,7 @@ csum_and_copy_from_user(const void __user *src, void *dst, * code. */ unsigned long tmp1, tmp2; + __wsum sum = ~0U; __asm__("movel %2,%4\n\t" "btst #1,%4\n\t" /* Check alignment */ @@ -236,84 +236,33 @@ csum_and_copy_from_user(const void __user *src, void *dst, "clrl %5\n\t" "addxl %5,%0\n\t" /* add X bit */ "7:\t" - "clrl %5\n" /* no error - clear return value */ - "8:\n" ".section .fixup,\"ax\"\n" ".even\n" - /* If any exception occurs zero out the rest. - Similarities with the code above are intentional :-) */ + /* If any exception occurs, return 0 */ "90:\t" - "clrw %3@+\n\t" - "movel %1,%4\n\t" - "lsrl #5,%1\n\t" - "jeq 1f\n\t" - "subql #1,%1\n" - "91:\t" - "clrl %3@+\n" - "92:\t" - "clrl %3@+\n" - "93:\t" - "clrl %3@+\n" - "94:\t" - "clrl %3@+\n" - "95:\t" - "clrl %3@+\n" - "96:\t" - "clrl %3@+\n" - "97:\t" - "clrl %3@+\n" - "98:\t" - "clrl %3@+\n\t" - "dbra %1,91b\n\t" - "clrw %1\n\t" - "subql #1,%1\n\t" - "jcc 91b\n" - "1:\t" - "movel %4,%1\n\t" - "andw #0x1c,%4\n\t" - "jeq 1f\n\t" - "lsrw #2,%4\n\t" - "subqw #1,%4\n" - "99:\t" - "clrl %3@+\n\t" - "dbra %4,99b\n\t" - "1:\t" - "andw #3,%1\n\t" - "jeq 9f\n" - "100:\t" - "clrw %3@+\n\t" - "tstw %1\n\t" - "jeq 9f\n" - "101:\t" - "clrb %3@+\n" - "9:\t" -#define STR(X) STR1(X) -#define STR1(X) #X - "moveq #-" STR(EFAULT) ",%5\n\t" - "jra 8b\n" + "clrl %0\n" + "jra 7b\n" ".previous\n" ".section __ex_table,\"a\"\n" ".long 10b,90b\n" - ".long 11b,91b\n" - ".long 12b,92b\n" - ".long 13b,93b\n" - ".long 14b,94b\n" - ".long 15b,95b\n" - ".long 16b,96b\n" - ".long 17b,97b\n" - ".long 18b,98b\n" - ".long 19b,99b\n" - ".long 20b,100b\n" - ".long 21b,101b\n" + ".long 11b,90b\n" + ".long 12b,90b\n" + ".long 13b,90b\n" + ".long 14b,90b\n" + ".long 15b,90b\n" + ".long 16b,90b\n" + ".long 17b,90b\n" + ".long 18b,90b\n" + ".long 19b,90b\n" + ".long 20b,90b\n" + ".long 21b,90b\n" ".previous" : "=d" (sum), "=d" (len), "=a" (src), "=a" (dst), "=&d" (tmp1), "=d" (tmp2) : "0" (sum), "1" (len), "2" (src), "3" (dst) ); - *csum_err = tmp2; - - return(sum); + return sum; } EXPORT_SYMBOL(csum_and_copy_from_user); @@ -324,9 +273,10 @@ EXPORT_SYMBOL(csum_and_copy_from_user); */ __wsum -csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) +csum_partial_copy_nocheck(const void *src, void *dst, int len) { unsigned long tmp1, tmp2; + __wsum sum = 0; __asm__("movel %2,%4\n\t" "btst #1,%4\n\t" /* Check alignment */ "jeq 2f\n\t" diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c index a621fcc1a576..0ac53d87493c 100644 --- a/arch/m68k/mac/config.c +++ b/arch/m68k/mac/config.c @@ -24,6 +24,7 @@ #include <linux/init.h> #include <linux/vt_kern.h> #include <linux/platform_device.h> +#include <linux/ata_platform.h> #include <linux/adb.h> #include <linux/cuda.h> #include <linux/pmu.h> @@ -940,6 +941,26 @@ static const struct resource mac_scsi_ccl_rsrc[] __initconst = { }, }; +static const struct resource mac_ide_quadra_rsrc[] __initconst = { + DEFINE_RES_MEM(0x50F1A000, 0x104), + DEFINE_RES_IRQ(IRQ_NUBUS_F), +}; + +static const struct resource mac_ide_pb_rsrc[] __initconst = { + DEFINE_RES_MEM(0x50F1A000, 0x104), + DEFINE_RES_IRQ(IRQ_NUBUS_C), +}; + +static const struct resource mac_pata_baboon_rsrc[] __initconst = { + DEFINE_RES_MEM(0x50F1A000, 0x38), + DEFINE_RES_MEM(0x50F1A038, 0x04), + DEFINE_RES_IRQ(IRQ_BABOON_1), +}; + +static const struct pata_platform_info mac_pata_baboon_data __initconst = { + .ioport_shift = 2, +}; + int __init mac_platform_init(void) { phys_addr_t swim_base = 0; @@ -1049,6 +1070,26 @@ int __init mac_platform_init(void) } /* + * IDE device + */ + + switch (macintosh_config->ide_type) { + case MAC_IDE_QUADRA: + platform_device_register_simple("mac_ide", -1, + mac_ide_quadra_rsrc, ARRAY_SIZE(mac_ide_quadra_rsrc)); + break; + case MAC_IDE_PB: + platform_device_register_simple("mac_ide", -1, + mac_ide_pb_rsrc, ARRAY_SIZE(mac_ide_pb_rsrc)); + break; + case MAC_IDE_BABOON: + platform_device_register_resndata(NULL, "pata_platform", -1, + mac_pata_baboon_rsrc, ARRAY_SIZE(mac_pata_baboon_rsrc), + &mac_pata_baboon_data, sizeof(mac_pata_baboon_data)); + break; + } + + /* * Ethernet device */ diff --git a/arch/m68k/mac/macboing.c b/arch/m68k/mac/macboing.c index 388780797f7d..4de6229c7bfd 100644 --- a/arch/m68k/mac/macboing.c +++ b/arch/m68k/mac/macboing.c @@ -116,7 +116,7 @@ static void mac_init_asc( void ) * support 16-bit stereo output, but only mono input." * * Technical Information Library (TIL) article number 16405. - * http://support.apple.com/kb/TA32601 + * https://support.apple.com/kb/TA32601 * * --David Kilzer */ diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c index 2b9cb4a62281..eac9dde65193 100644 --- a/arch/m68k/mm/mcfmmu.c +++ b/arch/m68k/mm/mcfmmu.c @@ -42,7 +42,7 @@ void __init paging_init(void) unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 }; int i; - empty_zero_page = (void *) memblock_alloc(PAGE_SIZE, PAGE_SIZE); + empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE); if (!empty_zero_page) panic("%s: Failed to allocate %lu bytes align=0x%lx\n", __func__, PAGE_SIZE, PAGE_SIZE); diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c index 2bb006bdc31c..3a653f0a4188 100644 --- a/arch/m68k/mm/motorola.c +++ b/arch/m68k/mm/motorola.c @@ -226,8 +226,8 @@ static pte_t * __init kernel_page_table(void) { pte_t *pte_table = last_pte_table; - if (((unsigned long)last_pte_table & ~PAGE_MASK) == 0) { - pte_table = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); + if (PAGE_ALIGNED(last_pte_table)) { + pte_table = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); if (!pte_table) { panic("%s: Failed to allocate %lu bytes align=%lx\n", __func__, PAGE_SIZE, PAGE_SIZE); @@ -274,9 +274,8 @@ static pmd_t * __init kernel_ptr_table(void) } last_pmd_table += PTRS_PER_PMD; - if (((unsigned long)last_pmd_table & ~PAGE_MASK) == 0) { - last_pmd_table = (pmd_t *)memblock_alloc_low(PAGE_SIZE, - PAGE_SIZE); + if (PAGE_ALIGNED(last_pmd_table)) { + last_pmd_table = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); if (!last_pmd_table) panic("%s: Failed to allocate %lu bytes align=%lx\n", __func__, PAGE_SIZE, PAGE_SIZE); diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index d262ac0c8714..37bd6a5f38fb 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -26,6 +26,7 @@ config MICROBLAZE select GENERIC_SCHED_CLOCK select HAVE_ARCH_HASH select HAVE_ARCH_KGDB + select HAVE_ARCH_SECCOMP select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_CONTIGUOUS select HAVE_DYNAMIC_FTRACE @@ -120,23 +121,6 @@ config CMDLINE_FORCE Set this to have arguments from the default kernel command string override those passed by the boot loader. -config SECCOMP - bool "Enable seccomp to safely compute untrusted bytecode" - depends on PROC_FS - default y - help - This kernel feature is useful for number crunching applications - that may need to compute untrusted bytecode during their - execution. By using pipes or other transports made available to - the process as file descriptors supporting the read/write - syscalls, it's possible to isolate those applications in - their own address space using seccomp. Once seccomp is - enabled via /proc/<pid>/seccomp, it cannot be disabled - and the task is only allowed to execute a few safe syscalls - defined by each seccomp mode. - - If unsure, say Y. Only embedded should say N here. - endmenu menu "Kernel features" diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild index 2e87a9b6d312..63bce836b9f1 100644 --- a/arch/microblaze/include/asm/Kbuild +++ b/arch/microblaze/include/asm/Kbuild @@ -1,7 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 generated-y += syscall_table.h generic-y += extable.h -generic-y += hw_irq.h generic-y += kvm_para.h generic-y += local64.h generic-y += mcs_spinlock.h diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c index d7bebd04247b..04d091ade417 100644 --- a/arch/microblaze/kernel/dma.c +++ b/arch/microblaze/kernel/dma.c @@ -8,9 +8,8 @@ */ #include <linux/device.h> -#include <linux/dma-noncoherent.h> +#include <linux/dma-map-ops.h> #include <linux/gfp.h> -#include <linux/dma-debug.h> #include <linux/export.h> #include <linux/bug.h> #include <asm/cacheflush.h> diff --git a/arch/microblaze/kernel/syscalls/syscall.tbl b/arch/microblaze/kernel/syscalls/syscall.tbl index b4e263916f41..aae729c95cf9 100644 --- a/arch/microblaze/kernel/syscalls/syscall.tbl +++ b/arch/microblaze/kernel/syscalls/syscall.tbl @@ -445,3 +445,4 @@ 437 common openat2 sys_openat2 438 common pidfd_getfd sys_pidfd_getfd 439 common faccessat2 sys_faccessat2 +440 common process_madvise sys_process_madvise diff --git a/arch/microblaze/mm/consistent.c b/arch/microblaze/mm/consistent.c index e09b66e43cb6..81dffe43b18c 100644 --- a/arch/microblaze/mm/consistent.c +++ b/arch/microblaze/mm/consistent.c @@ -11,7 +11,7 @@ #include <linux/types.h> #include <linux/mm.h> #include <linux/init.h> -#include <linux/dma-noncoherent.h> +#include <linux/dma-map-ops.h> #include <asm/cpuinfo.h> #include <asm/cacheflush.h> diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index 3344d4a1fe89..45da639bd22c 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c @@ -7,7 +7,7 @@ * for more details. */ -#include <linux/dma-contiguous.h> +#include <linux/dma-map-ops.h> #include <linux/memblock.h> #include <linux/init.h> #include <linux/kernel.h> @@ -108,15 +108,15 @@ static void __init paging_init(void) void __init setup_memory(void) { - struct memblock_region *reg; - #ifndef CONFIG_MMU u32 kernel_align_start, kernel_align_size; + phys_addr_t start, end; + u64 i; /* Find main memory where is the kernel */ - for_each_memblock(memory, reg) { - memory_start = (u32)reg->base; - lowmem_size = reg->size; + for_each_mem_range(i, &start, &end) { + memory_start = start; + lowmem_size = end - start; if ((memory_start <= (u32)_text) && ((u32)_text <= (memory_start + lowmem_size - 1))) { memory_size = lowmem_size; @@ -164,17 +164,6 @@ void __init setup_memory(void) pr_info("%s: max_low_pfn: %#lx\n", __func__, max_low_pfn); pr_info("%s: max_pfn: %#lx\n", __func__, max_pfn); - /* Add active regions with valid PFNs */ - for_each_memblock(memory, reg) { - unsigned long start_pfn, end_pfn; - - start_pfn = memblock_region_memory_base_pfn(reg); - end_pfn = memblock_region_memory_end_pfn(reg); - memblock_set_node(start_pfn << PAGE_SHIFT, - (end_pfn - start_pfn) << PAGE_SHIFT, - &memblock.memory, 0); - } - paging_init(); } diff --git a/arch/mips/Kbuild.platforms b/arch/mips/Kbuild.platforms index a13c4cf6e608..5483e38b5dc7 100644 --- a/arch/mips/Kbuild.platforms +++ b/arch/mips/Kbuild.platforms @@ -13,7 +13,6 @@ platform-$(CONFIG_MIPS_COBALT) += cobalt/ platform-$(CONFIG_MACH_DECSTATION) += dec/ platform-$(CONFIG_MIPS_GENERIC) += generic/ platform-$(CONFIG_MACH_JAZZ) += jazz/ -platform-$(CONFIG_MACH_INGENIC) += jz4740/ platform-$(CONFIG_LANTIQ) += lantiq/ platform-$(CONFIG_MACH_LOONGSON2EF) += loongson2ef/ platform-$(CONFIG_MACH_LOONGSON32) += loongson32/ @@ -22,7 +21,6 @@ platform-$(CONFIG_MIPS_MALTA) += mti-malta/ platform-$(CONFIG_NLM_COMMON) += netlogic/ platform-$(CONFIG_PIC32MZDA) += pic32/ platform-$(CONFIG_MACH_PISTACHIO) += pistachio/ -platform-$(CONFIG_SOC_PNX833X) += pnx833x/ platform-$(CONFIG_RALINK) += ralink/ platform-$(CONFIG_MIKROTIK_RB532) += rb532/ platform-$(CONFIG_SGI_IP22) += sgi-ip22/ diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 8f328298f8cc..bc04cf000e94 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -86,6 +86,7 @@ config MIPS select MODULES_USE_ELF_REL if MODULES select MODULES_USE_ELF_RELA if MODULES && 64BIT select PERF_USE_VMALLOC + select PCI_MSI_ARCH_FALLBACKS if PCI_MSI select RTC_LIB select SYSCTL_EXCEPTION_TRACE select VIRT_TO_BUS @@ -93,14 +94,34 @@ config MIPS config MIPS_FIXUP_BIGPHYS_ADDR bool +config MIPS_GENERIC + bool + +config MACH_INGENIC + bool + select SYS_SUPPORTS_32BIT_KERNEL + select SYS_SUPPORTS_LITTLE_ENDIAN + select SYS_SUPPORTS_ZBOOT + select DMA_NONCOHERENT + select IRQ_MIPS_CPU + select PINCTRL + select GPIOLIB + select COMMON_CLK + select GENERIC_IRQ_CHIP + select BUILTIN_DTB if MIPS_NO_APPENDED_DTB + select USE_OF + select CPU_SUPPORTS_CPUFREQ + select MIPS_EXTERNAL_TIMER + menu "Machine selection" choice prompt "System type" - default MIPS_GENERIC + default MIPS_GENERIC_KERNEL -config MIPS_GENERIC +config MIPS_GENERIC_KERNEL bool "Generic board-agnostic MIPS kernel" + select MIPS_GENERIC select BOOT_RAW select BUILTIN_DTB select CEVT_R4K @@ -137,6 +158,7 @@ config MIPS_GENERIC select SYS_SUPPORTS_MULTITHREADING select SYS_SUPPORTS_RELOCATABLE select SYS_SUPPORTS_SMARTMIPS + select SYS_SUPPORTS_ZBOOT select UHI_BOOT select USB_EHCI_BIG_ENDIAN_DESC if CPU_BIG_ENDIAN select USB_EHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN @@ -389,20 +411,11 @@ config MACH_JAZZ Members include the Acer PICA, MIPS Magnum 4000, MIPS Millennium and Olivetti M700-10 workstations. -config MACH_INGENIC +config MACH_INGENIC_SOC bool "Ingenic SoC based machines" - select SYS_SUPPORTS_32BIT_KERNEL - select SYS_SUPPORTS_LITTLE_ENDIAN + select MIPS_GENERIC + select MACH_INGENIC select SYS_SUPPORTS_ZBOOT_UART16550 - select CPU_SUPPORTS_HUGEPAGES - select DMA_NONCOHERENT - select IRQ_MIPS_CPU - select PINCTRL - select GPIOLIB - select COMMON_CLK - select GENERIC_IRQ_CHIP - select BUILTIN_DTB if MIPS_NO_APPENDED_DTB - select USE_OF config LANTIQ bool "Lantiq based platforms" @@ -475,6 +488,7 @@ config MACH_LOONGSON64 select SYS_SUPPORTS_ZBOOT select ZONE_DMA32 select NUMA + select SMP select COMMON_CLK select USE_OF select BUILTIN_DTB @@ -568,6 +582,7 @@ config MIPS_MALTA select SYS_SUPPORTS_VPE_LOADER select SYS_SUPPORTS_ZBOOT select USE_OF + select WAR_ICACHE_REFILLS select ZONE_DMA32 if 64BIT help This enables support for the MIPS Technologies Malta evaluation @@ -589,19 +604,6 @@ config MACH_VR41XX select SYS_SUPPORTS_MIPS16 select GPIOLIB -config NXP_STB220 - bool "NXP STB220 board" - select SOC_PNX833X - help - Support for NXP Semiconductors STB220 Development Board. - -config NXP_STB225 - bool "NXP 225 board" - select SOC_PNX833X - select SOC_PNX8335 - help - Support for NXP Semiconductors STB225 Development Board. - config RALINK bool "Ralink based machines" select CEVT_R4K @@ -615,6 +617,7 @@ config RALINK select SYS_SUPPORTS_32BIT_KERNEL select SYS_SUPPORTS_LITTLE_ENDIAN select SYS_SUPPORTS_MIPS16 + select SYS_SUPPORTS_ZBOOT select SYS_HAS_EARLY_PRINTK select CLKDEV_LOOKUP select ARCH_HAS_RESET_CONTROLLER @@ -651,6 +654,9 @@ config SGI_IP22 select SYS_SUPPORTS_32BIT_KERNEL select SYS_SUPPORTS_64BIT_KERNEL select SYS_SUPPORTS_BIG_ENDIAN + select WAR_R4600_V1_INDEX_ICACHEOP + select WAR_R4600_V1_HIT_CACHEOP + select WAR_R4600_V2_HIT_CACHEOP select MIPS_L1_CACHE_SHIFT_7 help This are the SGI Indy, Challenge S and Indigo2, as well as certain @@ -678,6 +684,7 @@ config SGI_IP27 select SYS_SUPPORTS_BIG_ENDIAN select SYS_SUPPORTS_NUMA select SYS_SUPPORTS_SMP + select WAR_R10000_LLSC select MIPS_L1_CACHE_SHIFT_7 select NUMA help @@ -713,6 +720,7 @@ config SGI_IP28 select SYS_HAS_EARLY_PRINTK select SYS_SUPPORTS_64BIT_KERNEL select SYS_SUPPORTS_BIG_ENDIAN + select WAR_R10000_LLSC select MIPS_L1_CACHE_SHIFT_7 help This is the SGI Indigo2 with R10000 processor. To compile a Linux @@ -739,6 +747,7 @@ config SGI_IP30 select SYS_SUPPORTS_64BIT_KERNEL select SYS_SUPPORTS_BIG_ENDIAN select SYS_SUPPORTS_SMP + select WAR_R10000_LLSC select MIPS_L1_CACHE_SHIFT_7 select ARC_MEMORY help @@ -766,6 +775,7 @@ config SGI_IP32 select SYS_HAS_CPU_NEVADA select SYS_SUPPORTS_64BIT_KERNEL select SYS_SUPPORTS_BIG_ENDIAN + select WAR_ICACHE_REFILLS help If you want this kernel to run on SGI O2 workstation, say Y here. @@ -889,6 +899,7 @@ config SNI_RM select SYS_SUPPORTS_BIG_ENDIAN select SYS_SUPPORTS_HIGHMEM select SYS_SUPPORTS_LITTLE_ENDIAN + select WAR_R4600_V2_HIT_CACHEOP help The SNI RM200/300/400 are MIPS-based machines manufactured by Siemens Nixdorf Informationssysteme (SNI), parent company of Pyramid @@ -900,6 +911,7 @@ config MACH_TX39XX config MACH_TX49XX bool "Toshiba TX49 series based machines" + select WAR_TX49XX_ICACHE_INDEX_INV config MIKROTIK_RB532 bool "Mikrotik RB532 boards" @@ -1025,8 +1037,8 @@ source "arch/mips/bcm47xx/Kconfig" source "arch/mips/bcm63xx/Kconfig" source "arch/mips/bmips/Kconfig" source "arch/mips/generic/Kconfig" +source "arch/mips/ingenic/Kconfig" source "arch/mips/jazz/Kconfig" -source "arch/mips/jz4740/Kconfig" source "arch/mips/lantiq/Kconfig" source "arch/mips/pic32/Kconfig" source "arch/mips/pistachio/Kconfig" @@ -1135,7 +1147,6 @@ config DMA_NONCOHERENT select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_HAS_DMA_SET_UNCACHED select DMA_NONCOHERENT_MMAP - select DMA_NONCOHERENT_CACHE_SYNC select NEED_DMA_MAP_STATE config SYS_HAS_EARLY_PRINTK @@ -1267,23 +1278,6 @@ config PCI_XTALK_BRIDGE config NO_EXCEPT_FILL bool -config SOC_PNX833X - bool - select CEVT_R4K - select CSRC_R4K - select IRQ_MIPS_CPU - select DMA_NONCOHERENT - select SYS_HAS_CPU_MIPS32_R2 - select SYS_SUPPORTS_32BIT_KERNEL - select SYS_SUPPORTS_LITTLE_ENDIAN - select SYS_SUPPORTS_BIG_ENDIAN - select SYS_SUPPORTS_MIPS16 - select CPU_MIPSR2_IRQ_VI - -config SOC_PNX8335 - bool - select SOC_PNX833X - config MIPS_SPRAM bool @@ -1620,7 +1614,6 @@ config CPU_P5600 select CPU_SUPPORTS_32BIT_KERNEL select CPU_SUPPORTS_HIGHMEM select CPU_SUPPORTS_MSA - select CPU_SUPPORTS_UNCACHED_ACCELERATED select CPU_SUPPORTS_CPUFREQ select CPU_MIPSR2_IRQ_VI select CPU_MIPSR2_IRQ_EI @@ -1891,6 +1884,7 @@ config SYS_SUPPORTS_ZBOOT select HAVE_KERNEL_LZMA select HAVE_KERNEL_LZO select HAVE_KERNEL_XZ + select HAVE_KERNEL_ZSTD config SYS_SUPPORTS_ZBOOT_UART16550 bool @@ -2272,7 +2266,7 @@ config FORCE_MAX_ZONEORDER default "13" if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_32KB range 12 64 if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_16KB default "12" if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_16KB - range 11 64 + range 0 64 default "11" help The kernel memory allocator divides physically contiguous memory @@ -2638,6 +2632,76 @@ config MIPS_ASID_BITS_VARIABLE config MIPS_CRC_SUPPORT bool +# R4600 erratum. Due to the lack of errata information the exact +# technical details aren't known. I've experimentally found that disabling +# interrupts during indexed I-cache flushes seems to be sufficient to deal +# with the issue. +config WAR_R4600_V1_INDEX_ICACHEOP + bool + +# Pleasures of the R4600 V1.x. Cite from the IDT R4600 V1.7 errata: +# +# 18. The CACHE instructions Hit_Writeback_Invalidate_D, Hit_Writeback_D, +# Hit_Invalidate_D and Create_Dirty_Excl_D should only be +# executed if there is no other dcache activity. If the dcache is +# accessed for another instruction immeidately preceding when these +# cache instructions are executing, it is possible that the dcache +# tag match outputs used by these cache instructions will be +# incorrect. These cache instructions should be preceded by at least +# four instructions that are not any kind of load or store +# instruction. +# +# This is not allowed: lw +# nop +# nop +# nop +# cache Hit_Writeback_Invalidate_D +# +# This is allowed: lw +# nop +# nop +# nop +# nop +# cache Hit_Writeback_Invalidate_D +config WAR_R4600_V1_HIT_CACHEOP + bool + +# Writeback and invalidate the primary cache dcache before DMA. +# +# R4600 v2.0 bug: "The CACHE instructions Hit_Writeback_Inv_D, +# Hit_Writeback_D, Hit_Invalidate_D and Create_Dirty_Exclusive_D will only +# operate correctly if the internal data cache refill buffer is empty. These +# CACHE instructions should be separated from any potential data cache miss +# by a load instruction to an uncached address to empty the response buffer." +# (Revision 2.0 device errata from IDT available on https://www.idt.com/ +# in .pdf format.) +config WAR_R4600_V2_HIT_CACHEOP + bool + +# From TX49/H2 manual: "If the instruction (i.e. CACHE) is issued for +# the line which this instruction itself exists, the following +# operation is not guaranteed." +# +# Workaround: do two phase flushing for Index_Invalidate_I +config WAR_TX49XX_ICACHE_INDEX_INV + bool + +# The RM7000 processors and the E9000 cores have a bug (though PMC-Sierra +# opposes it being called that) where invalid instructions in the same +# I-cache line worth of instructions being fetched may case spurious +# exceptions. +config WAR_ICACHE_REFILLS + bool + +# On the R10000 up to version 2.6 (not sure about 2.7) there is a bug that +# may cause ll / sc and lld / scd sequences to execute non-atomically. +config WAR_R10000_LLSC + bool + +# 34K core erratum: "Problems Executing the TLBR Instruction" +config WAR_MIPS34K_MISSED_ITLB + bool + # # - Highmem only makes sense for the 32-bit kernel. # - The current highmem code will only work properly on physically indexed @@ -3005,23 +3069,6 @@ config PHYSICAL_START specified in the "crashkernel=YM@XM" command line boot parameter passed to the panic-ed kernel). -config SECCOMP - bool "Enable seccomp to safely compute untrusted bytecode" - depends on PROC_FS - default y - help - This kernel feature is useful for number crunching applications - that may need to compute untrusted bytecode during their - execution. By using pipes or other transports made available to - the process as file descriptors supporting the read/write - syscalls, it's possible to isolate those applications in - their own address space using seccomp. Once seccomp is - enabled via /proc/<pid>/seccomp, it cannot be disabled - and the task is only allowed to execute a few safe syscalls - defined by each seccomp mode. - - If unsure, say Y. Only embedded should say N here. - config MIPS_O32_FP64_SUPPORT bool "Support for O32 binaries using 64-bit FP" if !CPU_MIPSR6 depends on 32BIT || MIPS32_O32 diff --git a/arch/mips/alchemy/Kconfig b/arch/mips/alchemy/Kconfig index 83b288b95b16..69734120ada1 100644 --- a/arch/mips/alchemy/Kconfig +++ b/arch/mips/alchemy/Kconfig @@ -1,12 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 -# au1000-style gpio and interrupt controllers -config ALCHEMY_GPIOINT_AU1000 - bool - -# au1300-style GPIO/INT controller -config ALCHEMY_GPIOINT_AU1300 - bool - choice prompt "Machine type" depends on MIPS_ALCHEMY @@ -15,7 +7,6 @@ choice config MIPS_MTX1 bool "4G Systems MTX-1 board" select HAVE_PCI - select ALCHEMY_GPIOINT_AU1000 select SYS_SUPPORTS_LITTLE_ENDIAN select SYS_HAS_EARLY_PRINTK @@ -33,13 +24,11 @@ config MIPS_DB1XXX config MIPS_XXS1500 bool "MyCable XXS1500 board" - select ALCHEMY_GPIOINT_AU1000 select SYS_SUPPORTS_LITTLE_ENDIAN select SYS_HAS_EARLY_PRINTK config MIPS_GPR bool "Trapeze ITS GPR board" - select ALCHEMY_GPIOINT_AU1000 select HAVE_PCI select SYS_SUPPORTS_LITTLE_ENDIAN select SYS_HAS_EARLY_PRINTK diff --git a/arch/mips/alchemy/board-gpr.c b/arch/mips/alchemy/board-gpr.c index 6c47318946e4..f587c40b6d00 100644 --- a/arch/mips/alchemy/board-gpr.c +++ b/arch/mips/alchemy/board-gpr.c @@ -31,23 +31,6 @@ const char *get_system_type(void) return "GPR"; } -void __init prom_init(void) -{ - unsigned char *memsize_str; - unsigned long memsize; - - prom_argc = fw_arg0; - prom_argv = (char **)fw_arg1; - prom_envp = (char **)fw_arg2; - - prom_init_cmdline(); - - memsize_str = prom_getenv("memsize"); - if (!memsize_str || kstrtoul(memsize_str, 0, &memsize)) - memsize = 0x04000000; - add_memory_region(0, memsize, BOOT_MEM_RAM); -} - void prom_putchar(char c) { alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c); diff --git a/arch/mips/alchemy/board-mtx1.c b/arch/mips/alchemy/board-mtx1.c index 23093535399f..68ea57511629 100644 --- a/arch/mips/alchemy/board-mtx1.c +++ b/arch/mips/alchemy/board-mtx1.c @@ -30,23 +30,6 @@ const char *get_system_type(void) return "MTX-1"; } -void __init prom_init(void) -{ - unsigned char *memsize_str; - unsigned long memsize; - - prom_argc = fw_arg0; - prom_argv = (char **)fw_arg1; - prom_envp = (char **)fw_arg2; - - prom_init_cmdline(); - - memsize_str = prom_getenv("memsize"); - if (!memsize_str || kstrtoul(memsize_str, 0, &memsize)) - memsize = 0x04000000; - add_memory_region(0, memsize, BOOT_MEM_RAM); -} - void prom_putchar(char c) { alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c); diff --git a/arch/mips/alchemy/board-xxs1500.c b/arch/mips/alchemy/board-xxs1500.c index c67dfe1f4997..b184baa4e56a 100644 --- a/arch/mips/alchemy/board-xxs1500.c +++ b/arch/mips/alchemy/board-xxs1500.c @@ -25,24 +25,6 @@ const char *get_system_type(void) return "XXS1500"; } -void __init prom_init(void) -{ - unsigned char *memsize_str; - unsigned long memsize; - - prom_argc = fw_arg0; - prom_argv = (char **)fw_arg1; - prom_envp = (char **)fw_arg2; - - prom_init_cmdline(); - - memsize_str = prom_getenv("memsize"); - if (!memsize_str || kstrtoul(memsize_str, 0, &memsize)) - memsize = 0x04000000; - - add_memory_region(0, memsize, BOOT_MEM_RAM); -} - void prom_putchar(char c) { alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c); diff --git a/arch/mips/alchemy/common/prom.c b/arch/mips/alchemy/common/prom.c index af312b5e33f6..d910c0a64de9 100644 --- a/arch/mips/alchemy/common/prom.c +++ b/arch/mips/alchemy/common/prom.c @@ -34,6 +34,9 @@ */ #include <linux/init.h> +#include <linux/kernel.h> +#include <linux/memblock.h> +#include <linux/sizes.h> #include <linux/string.h> #include <asm/bootinfo.h> @@ -76,6 +79,24 @@ char *prom_getenv(char *envname) return NULL; } +void __init prom_init(void) +{ + unsigned char *memsize_str; + unsigned long memsize; + + prom_argc = (int)fw_arg0; + prom_argv = (char **)fw_arg1; + prom_envp = (char **)fw_arg2; + + prom_init_cmdline(); + + memsize_str = prom_getenv("memsize"); + if (!memsize_str || kstrtoul(memsize_str, 0, &memsize)) + memsize = SZ_64M; /* minimum memsize is 64MB RAM */ + + memblock_add(0, memsize); +} + static inline unsigned char str2hexnum(unsigned char c) { if (c >= '0' && c <= '9') diff --git a/arch/mips/alchemy/devboards/db1300.c b/arch/mips/alchemy/devboards/db1300.c index 8ac1f56ee57d..cd72eaa1168f 100644 --- a/arch/mips/alchemy/devboards/db1300.c +++ b/arch/mips/alchemy/devboards/db1300.c @@ -731,6 +731,7 @@ static struct platform_device db1300_lcd_dev = { /**********************************************************************/ +#if IS_ENABLED(CONFIG_TOUCHSCREEN_WM97XX) static void db1300_wm97xx_irqen(struct wm97xx *wm, int enable) { if (enable) @@ -762,6 +763,12 @@ static int db1300_wm97xx_probe(struct platform_device *pdev) return wm97xx_register_mach_ops(wm, &db1300_wm97xx_ops); } +#else +static int db1300_wm97xx_probe(struct platform_device *pdev) +{ + return -ENODEV; +} +#endif static struct platform_driver db1300_wm97xx_driver = { .driver.name = "wm97xx-touch", diff --git a/arch/mips/alchemy/devboards/platform.c b/arch/mips/alchemy/devboards/platform.c index 8d4b65c3268a..754bdd2ca630 100644 --- a/arch/mips/alchemy/devboards/platform.c +++ b/arch/mips/alchemy/devboards/platform.c @@ -20,23 +20,6 @@ #include <prom.h> -void __init prom_init(void) -{ - unsigned char *memsize_str; - unsigned long memsize; - - prom_argc = (int)fw_arg0; - prom_argv = (char **)fw_arg1; - prom_envp = (char **)fw_arg2; - - prom_init_cmdline(); - memsize_str = prom_getenv("memsize"); - if (!memsize_str || kstrtoul(memsize_str, 0, &memsize)) - memsize = 64 << 20; /* all devboards have at least 64MB RAM */ - - add_memory_region(0, memsize, BOOT_MEM_RAM); -} - void prom_putchar(char c) { if (alchemy_get_cputype() == ALCHEMY_CPU_AU1300) diff --git a/arch/mips/ar7/memory.c b/arch/mips/ar7/memory.c index ad6efb36ebfe..787716c5e946 100644 --- a/arch/mips/ar7/memory.c +++ b/arch/mips/ar7/memory.c @@ -47,7 +47,7 @@ void __init prom_meminit(void) unsigned long pages; pages = memsize() >> PAGE_SHIFT; - add_memory_region(PHYS_OFFSET, pages << PAGE_SHIFT, BOOT_MEM_RAM); + memblock_add(PHYS_OFFSET, pages << PAGE_SHIFT); } void __init prom_free_prom_memory(void) diff --git a/arch/mips/ath25/ar2315.c b/arch/mips/ath25/ar2315.c index e7b53e3960c8..9dbed7b5ea76 100644 --- a/arch/mips/ath25/ar2315.c +++ b/arch/mips/ath25/ar2315.c @@ -19,6 +19,7 @@ #include <linux/bitops.h> #include <linux/irqdomain.h> #include <linux/interrupt.h> +#include <linux/memblock.h> #include <linux/platform_device.h> #include <linux/reboot.h> #include <asm/bootinfo.h> @@ -266,7 +267,7 @@ void __init ar2315_plat_mem_setup(void) memsize <<= 1 + ATH25_REG_MS(memcfg, AR2315_MEM_CFG_COL_WIDTH); memsize <<= 1 + ATH25_REG_MS(memcfg, AR2315_MEM_CFG_ROW_WIDTH); memsize <<= 3; - add_memory_region(0, memsize, BOOT_MEM_RAM); + memblock_add(0, memsize); iounmap(sdram_base); ar2315_rst_base = ioremap(AR2315_RST_BASE, AR2315_RST_SIZE); diff --git a/arch/mips/ath25/ar5312.c b/arch/mips/ath25/ar5312.c index 42bf2afb4765..23c879f4b734 100644 --- a/arch/mips/ath25/ar5312.c +++ b/arch/mips/ath25/ar5312.c @@ -19,6 +19,7 @@ #include <linux/bitops.h> #include <linux/irqdomain.h> #include <linux/interrupt.h> +#include <linux/memblock.h> #include <linux/platform_device.h> #include <linux/mtd/physmap.h> #include <linux/reboot.h> @@ -363,7 +364,7 @@ void __init ar5312_plat_mem_setup(void) memsize = (bank0_ac ? (1 << (bank0_ac + 1)) : 0) + (bank1_ac ? (1 << (bank1_ac + 1)) : 0); memsize <<= 20; - add_memory_region(0, memsize, BOOT_MEM_RAM); + memblock_add(0, memsize); iounmap(sdram_base); ar5312_rst_base = ioremap(AR5312_RST_BASE, AR5312_RST_SIZE); diff --git a/arch/mips/bcm47xx/prom.c b/arch/mips/bcm47xx/prom.c index 135a5407f015..3e2a8166377f 100644 --- a/arch/mips/bcm47xx/prom.c +++ b/arch/mips/bcm47xx/prom.c @@ -27,6 +27,7 @@ #include <linux/init.h> #include <linux/types.h> #include <linux/kernel.h> +#include <linux/memblock.h> #include <linux/spinlock.h> #include <linux/ssb/ssb_driver_chipcommon.h> #include <linux/ssb/ssb_regs.h> @@ -97,7 +98,7 @@ static __init void prom_init_mem(void) */ if (c->cputype == CPU_74K && (mem == (128 << 20))) mem -= 0x1000; - add_memory_region(0, mem, BOOT_MEM_RAM); + memblock_add(0, mem); } /* diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c index 01427bde2397..94bf839576c1 100644 --- a/arch/mips/bcm47xx/setup.c +++ b/arch/mips/bcm47xx/setup.c @@ -141,7 +141,7 @@ static void __init bcm47xx_register_bcma(void) /* * Memory setup is done in the early part of MIPS's arch_mem_init. It's supposed - * to detect memory and record it with add_memory_region. + * to detect memory and record it with memblock_add. * Any extra initializaion performed here must not use kmalloc or bootmem. */ void __init plat_mem_setup(void) diff --git a/arch/mips/bcm63xx/boards/board_bcm963xx.c b/arch/mips/bcm63xx/boards/board_bcm963xx.c index 230bf27c1fb8..01aff80a5967 100644 --- a/arch/mips/bcm63xx/boards/board_bcm963xx.c +++ b/arch/mips/bcm63xx/boards/board_bcm963xx.c @@ -1,8 +1,5 @@ +// SPDX-License-Identifier: GPL-2.0-only /* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr> * Copyright (C) 2008 Florian Fainelli <florian@openwrt.org> */ @@ -32,7 +29,6 @@ #include <uapi/linux/bcm933xx_hcs.h> - #define HCS_OFFSET_128K 0x20000 static struct board_info board; @@ -42,30 +38,28 @@ static struct board_info board; */ #ifdef CONFIG_BCM63XX_CPU_3368 static struct board_info __initdata board_cvg834g = { - .name = "CVG834G_E15R3921", - .expected_cpu_id = 0x3368, - - .has_uart0 = 1, - .has_uart1 = 1, + .name = "CVG834G_E15R3921", + .expected_cpu_id = 0x3368, - .has_enet0 = 1, - .has_pci = 1, + .ephy_reset_gpio = 36, + .ephy_reset_gpio_flags = GPIOF_INIT_HIGH, + .has_pci = 1, + .has_uart0 = 1, + .has_uart1 = 1, + .has_enet0 = 1, .enet0 = { - .has_phy = 1, - .use_internal_phy = 1, + .has_phy = 1, + .use_internal_phy = 1, }, .leds = { { - .name = "CVG834G:green:power", - .gpio = 37, + .name = "CVG834G:green:power", + .gpio = 37, .default_trigger= "default-on", }, }, - - .ephy_reset_gpio = 36, - .ephy_reset_gpio_flags = GPIOF_INIT_HIGH, }; #endif /* CONFIG_BCM63XX_CPU_3368 */ @@ -74,44 +68,44 @@ static struct board_info __initdata board_cvg834g = { */ #ifdef CONFIG_BCM63XX_CPU_6328 static struct board_info __initdata board_96328avng = { - .name = "96328avng", - .expected_cpu_id = 0x6328, + .name = "96328avng", + .expected_cpu_id = 0x6328, - .has_uart0 = 1, - .has_pci = 1, - .has_usbd = 0, + .has_pci = 1, + .has_uart0 = 1, + .has_usbd = 0, .usbd = { - .use_fullspeed = 0, - .port_no = 0, + .use_fullspeed = 0, + .port_no = 0, }, .leds = { { - .name = "96328avng::ppp-fail", - .gpio = 2, - .active_low = 1, + .name = "96328avng::ppp-fail", + .gpio = 2, + .active_low = 1, }, { - .name = "96328avng::power", - .gpio = 4, - .active_low = 1, + .name = "96328avng::power", + .gpio = 4, + .active_low = 1, .default_trigger = "default-on", }, { - .name = "96328avng::power-fail", - .gpio = 8, - .active_low = 1, + .name = "96328avng::power-fail", + .gpio = 8, + .active_low = 1, }, { - .name = "96328avng::wps", - .gpio = 9, - .active_low = 1, + .name = "96328avng::wps", + .gpio = 9, + .active_low = 1, }, { - .name = "96328avng::ppp", - .gpio = 11, - .active_low = 1, + .name = "96328avng::ppp", + .gpio = 11, + .active_low = 1, }, }, }; @@ -122,85 +116,86 @@ static struct board_info __initdata board_96328avng = { */ #ifdef CONFIG_BCM63XX_CPU_6338 static struct board_info __initdata board_96338gw = { - .name = "96338GW", - .expected_cpu_id = 0x6338, + .name = "96338GW", + .expected_cpu_id = 0x6338, - .has_uart0 = 1, - .has_enet0 = 1, + .has_ohci0 = 1, + .has_uart0 = 1, + + .has_enet0 = 1, .enet0 = { - .force_speed_100 = 1, - .force_duplex_full = 1, + .force_speed_100 = 1, + .force_duplex_full = 1, }, - .has_ohci0 = 1, - .leds = { { - .name = "adsl", - .gpio = 3, - .active_low = 1, + .name = "adsl", + .gpio = 3, + .active_low = 1, }, { - .name = "ses", - .gpio = 5, - .active_low = 1, + .name = "ses", + .gpio = 5, + .active_low = 1, }, { - .name = "ppp-fail", - .gpio = 4, - .active_low = 1, + .name = "ppp-fail", + .gpio = 4, + .active_low = 1, }, { - .name = "power", - .gpio = 0, - .active_low = 1, + .name = "power", + .gpio = 0, + .active_low = 1, .default_trigger = "default-on", }, { - .name = "stop", - .gpio = 1, - .active_low = 1, + .name = "stop", + .gpio = 1, + .active_low = 1, } }, }; static struct board_info __initdata board_96338w = { - .name = "96338W", - .expected_cpu_id = 0x6338, + .name = "96338W", + .expected_cpu_id = 0x6338, + + .has_uart0 = 1, - .has_uart0 = 1, - .has_enet0 = 1, + .has_enet0 = 1, .enet0 = { - .force_speed_100 = 1, - .force_duplex_full = 1, + .force_speed_100 = 1, + .force_duplex_full = 1, }, .leds = { { - .name = "adsl", - .gpio = 3, - .active_low = 1, + .name = "adsl", + .gpio = 3, + .active_low = 1, }, { - .name = "ses", - .gpio = 5, - .active_low = 1, + .name = "ses", + .gpio = 5, + .active_low = 1, }, { - .name = "ppp-fail", - .gpio = 4, - .active_low = 1, + .name = "ppp-fail", + .gpio = 4, + .active_low = 1, }, { - .name = "power", - .gpio = 0, - .active_low = 1, + .name = "power", + .gpio = 0, + .active_low = 1, .default_trigger = "default-on", }, { - .name = "stop", - .gpio = 1, - .active_low = 1, + .name = "stop", + .gpio = 1, + .active_low = 1, }, }, }; @@ -211,10 +206,10 @@ static struct board_info __initdata board_96338w = { */ #ifdef CONFIG_BCM63XX_CPU_6345 static struct board_info __initdata board_96345gw2 = { - .name = "96345GW2", - .expected_cpu_id = 0x6345, + .name = "96345GW2", + .expected_cpu_id = 0x6345, - .has_uart0 = 1, + .has_uart0 = 1, }; #endif /* CONFIG_BCM63XX_CPU_6345 */ @@ -223,286 +218,282 @@ static struct board_info __initdata board_96345gw2 = { */ #ifdef CONFIG_BCM63XX_CPU_6348 static struct board_info __initdata board_96348r = { - .name = "96348R", - .expected_cpu_id = 0x6348, + .name = "96348R", + .expected_cpu_id = 0x6348, - .has_uart0 = 1, - .has_enet0 = 1, - .has_pci = 1, + .has_pci = 1, + .has_uart0 = 1, + .has_enet0 = 1, .enet0 = { - .has_phy = 1, - .use_internal_phy = 1, + .has_phy = 1, + .use_internal_phy = 1, }, .leds = { { - .name = "adsl-fail", - .gpio = 2, - .active_low = 1, + .name = "adsl-fail", + .gpio = 2, + .active_low = 1, }, { - .name = "ppp", - .gpio = 3, - .active_low = 1, + .name = "ppp", + .gpio = 3, + .active_low = 1, }, { - .name = "ppp-fail", - .gpio = 4, - .active_low = 1, + .name = "ppp-fail", + .gpio = 4, + .active_low = 1, }, { - .name = "power", - .gpio = 0, - .active_low = 1, + .name = "power", + .gpio = 0, + .active_low = 1, .default_trigger = "default-on", }, { - .name = "stop", - .gpio = 1, - .active_low = 1, + .name = "stop", + .gpio = 1, + .active_low = 1, }, }, }; static struct board_info __initdata board_96348gw_10 = { - .name = "96348GW-10", - .expected_cpu_id = 0x6348, + .name = "96348GW-10", + .expected_cpu_id = 0x6348, - .has_uart0 = 1, - .has_enet0 = 1, - .has_enet1 = 1, - .has_pci = 1, + .has_ohci0 = 1, + .has_pccard = 1, + .has_pci = 1, + .has_uart0 = 1, + .has_enet0 = 1, .enet0 = { - .has_phy = 1, - .use_internal_phy = 1, + .has_phy = 1, + .use_internal_phy = 1, }, + + .has_enet1 = 1, .enet1 = { - .force_speed_100 = 1, - .force_duplex_full = 1, + .force_speed_100 = 1, + .force_duplex_full = 1, }, - .has_ohci0 = 1, - .has_pccard = 1, - .has_ehci0 = 1, - .leds = { { - .name = "adsl-fail", - .gpio = 2, - .active_low = 1, + .name = "adsl-fail", + .gpio = 2, + .active_low = 1, }, { - .name = "ppp", - .gpio = 3, - .active_low = 1, + .name = "ppp", + .gpio = 3, + .active_low = 1, }, { - .name = "ppp-fail", - .gpio = 4, - .active_low = 1, + .name = "ppp-fail", + .gpio = 4, + .active_low = 1, }, { - .name = "power", - .gpio = 0, - .active_low = 1, + .name = "power", + .gpio = 0, + .active_low = 1, .default_trigger = "default-on", }, { - .name = "stop", - .gpio = 1, - .active_low = 1, + .name = "stop", + .gpio = 1, + .active_low = 1, }, }, }; static struct board_info __initdata board_96348gw_11 = { - .name = "96348GW-11", - .expected_cpu_id = 0x6348, + .name = "96348GW-11", + .expected_cpu_id = 0x6348, - .has_uart0 = 1, - .has_enet0 = 1, - .has_enet1 = 1, - .has_pci = 1, + .has_ohci0 = 1, + .has_pccard = 1, + .has_pci = 1, + .has_uart0 = 1, + .has_enet0 = 1, .enet0 = { - .has_phy = 1, - .use_internal_phy = 1, + .has_phy = 1, + .use_internal_phy = 1, }, + .has_enet1 = 1, .enet1 = { - .force_speed_100 = 1, - .force_duplex_full = 1, + .force_speed_100 = 1, + .force_duplex_full = 1, }, - - .has_ohci0 = 1, - .has_pccard = 1, - .has_ehci0 = 1, - .leds = { { - .name = "adsl-fail", - .gpio = 2, - .active_low = 1, + .name = "adsl-fail", + .gpio = 2, + .active_low = 1, }, { - .name = "ppp", - .gpio = 3, - .active_low = 1, + .name = "ppp", + .gpio = 3, + .active_low = 1, }, { - .name = "ppp-fail", - .gpio = 4, - .active_low = 1, + .name = "ppp-fail", + .gpio = 4, + .active_low = 1, }, { - .name = "power", - .gpio = 0, - .active_low = 1, + .name = "power", + .gpio = 0, + .active_low = 1, .default_trigger = "default-on", }, { - .name = "stop", - .gpio = 1, - .active_low = 1, + .name = "stop", + .gpio = 1, + .active_low = 1, }, }, }; static struct board_info __initdata board_96348gw = { - .name = "96348GW", - .expected_cpu_id = 0x6348, + .name = "96348GW", + .expected_cpu_id = 0x6348, - .has_uart0 = 1, - .has_enet0 = 1, - .has_enet1 = 1, - .has_pci = 1, + .has_ohci0 = 1, + .has_pci = 1, + .has_uart0 = 1, + .has_enet0 = 1, .enet0 = { - .has_phy = 1, - .use_internal_phy = 1, + .has_phy = 1, + .use_internal_phy = 1, }, + + .has_enet1 = 1, .enet1 = { - .force_speed_100 = 1, - .force_duplex_full = 1, + .force_speed_100 = 1, + .force_duplex_full = 1, }, - .has_ohci0 = 1, - .leds = { { - .name = "adsl-fail", - .gpio = 2, - .active_low = 1, + .name = "adsl-fail", + .gpio = 2, + .active_low = 1, }, { - .name = "ppp", - .gpio = 3, - .active_low = 1, + .name = "ppp", + .gpio = 3, + .active_low = 1, }, { - .name = "ppp-fail", - .gpio = 4, - .active_low = 1, + .name = "ppp-fail", + .gpio = 4, + .active_low = 1, }, { - .name = "power", - .gpio = 0, - .active_low = 1, + .name = "power", + .gpio = 0, + .active_low = 1, .default_trigger = "default-on", }, { - .name = "stop", - .gpio = 1, - .active_low = 1, + .name = "stop", + .gpio = 1, + .active_low = 1, }, }, }; static struct board_info __initdata board_FAST2404 = { - .name = "F@ST2404", - .expected_cpu_id = 0x6348, + .name = "F@ST2404", + .expected_cpu_id = 0x6348, - .has_uart0 = 1, - .has_enet0 = 1, - .has_enet1 = 1, - .has_pci = 1, + .has_ohci0 = 1, + .has_pccard = 1, + .has_pci = 1, + .has_uart0 = 1, + .has_enet0 = 1, .enet0 = { - .has_phy = 1, - .use_internal_phy = 1, + .has_phy = 1, + .use_internal_phy = 1, }, + .has_enet1 = 1, .enet1 = { - .force_speed_100 = 1, - .force_duplex_full = 1, + .force_speed_100 = 1, + .force_duplex_full = 1, }, - - .has_ohci0 = 1, - .has_pccard = 1, - .has_ehci0 = 1, }; static struct board_info __initdata board_rta1025w_16 = { - .name = "RTA1025W_16", - .expected_cpu_id = 0x6348, + .name = "RTA1025W_16", + .expected_cpu_id = 0x6348, - .has_enet0 = 1, - .has_enet1 = 1, - .has_pci = 1, + .has_pci = 1, + .has_enet0 = 1, .enet0 = { - .has_phy = 1, - .use_internal_phy = 1, + .has_phy = 1, + .use_internal_phy = 1, }, + + .has_enet1 = 1, .enet1 = { - .force_speed_100 = 1, - .force_duplex_full = 1, + .force_speed_100 = 1, + .force_duplex_full = 1, }, }; static struct board_info __initdata board_DV201AMR = { - .name = "DV201AMR", - .expected_cpu_id = 0x6348, + .name = "DV201AMR", + .expected_cpu_id = 0x6348, - .has_uart0 = 1, - .has_pci = 1, - .has_ohci0 = 1, + .has_ohci0 = 1, + .has_pci = 1, + .has_uart0 = 1, - .has_enet0 = 1, - .has_enet1 = 1, + .has_enet0 = 1, .enet0 = { - .has_phy = 1, - .use_internal_phy = 1, + .has_phy = 1, + .use_internal_phy = 1, }, + + .has_enet1 = 1, .enet1 = { - .force_speed_100 = 1, - .force_duplex_full = 1, + .force_speed_100 = 1, + .force_duplex_full = 1, }, }; static struct board_info __initdata board_96348gw_a = { - .name = "96348GW-A", - .expected_cpu_id = 0x6348, + .name = "96348GW-A", + .expected_cpu_id = 0x6348, - .has_uart0 = 1, - .has_enet0 = 1, - .has_enet1 = 1, - .has_pci = 1, + .has_ohci0 = 1, + .has_pci = 1, + .has_uart0 = 1, + .has_enet0 = 1, .enet0 = { - .has_phy = 1, - .use_internal_phy = 1, + .has_phy = 1, + .use_internal_phy = 1, }, + + .has_enet1 = 1, .enet1 = { - .force_speed_100 = 1, - .force_duplex_full = 1, + .force_speed_100 = 1, + .force_duplex_full = 1, }, - - .has_ohci0 = 1, }; #endif /* CONFIG_BCM63XX_CPU_6348 */ @@ -511,146 +502,142 @@ static struct board_info __initdata board_96348gw_a = { */ #ifdef CONFIG_BCM63XX_CPU_6358 static struct board_info __initdata board_96358vw = { - .name = "96358VW", - .expected_cpu_id = 0x6358, + .name = "96358VW", + .expected_cpu_id = 0x6358, - .has_uart0 = 1, - .has_enet0 = 1, - .has_enet1 = 1, - .has_pci = 1, + .has_ehci0 = 1, + .has_ohci0 = 1, + .has_pccard = 1, + .has_pci = 1, + .has_uart0 = 1, + .has_enet0 = 1, .enet0 = { - .has_phy = 1, - .use_internal_phy = 1, + .has_phy = 1, + .use_internal_phy = 1, }, + .has_enet1 = 1, .enet1 = { - .force_speed_100 = 1, - .force_duplex_full = 1, + .force_speed_100 = 1, + .force_duplex_full = 1, }, - .has_ohci0 = 1, - .has_pccard = 1, - .has_ehci0 = 1, - .leds = { { - .name = "adsl-fail", - .gpio = 15, - .active_low = 1, + .name = "adsl-fail", + .gpio = 15, + .active_low = 1, }, { - .name = "ppp", - .gpio = 22, - .active_low = 1, + .name = "ppp", + .gpio = 22, + .active_low = 1, }, { - .name = "ppp-fail", - .gpio = 23, - .active_low = 1, + .name = "ppp-fail", + .gpio = 23, + .active_low = 1, }, { - .name = "power", - .gpio = 4, + .name = "power", + .gpio = 4, .default_trigger = "default-on", }, { - .name = "stop", - .gpio = 5, + .name = "stop", + .gpio = 5, }, }, }; static struct board_info __initdata board_96358vw2 = { - .name = "96358VW2", - .expected_cpu_id = 0x6358, + .name = "96358VW2", + .expected_cpu_id = 0x6358, - .has_uart0 = 1, - .has_enet0 = 1, - .has_enet1 = 1, - .has_pci = 1, + .has_ehci0 = 1, + .has_ohci0 = 1, + .has_pccard = 1, + .has_pci = 1, + .has_uart0 = 1, + .has_enet0 = 1, .enet0 = { - .has_phy = 1, - .use_internal_phy = 1, + .has_phy = 1, + .use_internal_phy = 1, }, + .has_enet1 = 1, .enet1 = { - .force_speed_100 = 1, - .force_duplex_full = 1, + .force_speed_100 = 1, + .force_duplex_full = 1, }, - - .has_ohci0 = 1, - .has_pccard = 1, - .has_ehci0 = 1, - .leds = { { - .name = "adsl", - .gpio = 22, - .active_low = 1, + .name = "adsl", + .gpio = 22, + .active_low = 1, }, { - .name = "ppp-fail", - .gpio = 23, + .name = "ppp-fail", + .gpio = 23, }, { - .name = "power", - .gpio = 5, - .active_low = 1, + .name = "power", + .gpio = 5, + .active_low = 1, .default_trigger = "default-on", }, { - .name = "stop", - .gpio = 4, - .active_low = 1, + .name = "stop", + .gpio = 4, + .active_low = 1, }, }, }; static struct board_info __initdata board_AGPFS0 = { - .name = "AGPF-S0", - .expected_cpu_id = 0x6358, + .name = "AGPF-S0", + .expected_cpu_id = 0x6358, - .has_uart0 = 1, - .has_enet0 = 1, - .has_enet1 = 1, - .has_pci = 1, + .has_ehci0 = 1, + .has_ohci0 = 1, + .has_pci = 1, + .has_uart0 = 1, + .has_enet0 = 1, .enet0 = { - .has_phy = 1, - .use_internal_phy = 1, + .has_phy = 1, + .use_internal_phy = 1, }, + .has_enet1 = 1, .enet1 = { - .force_speed_100 = 1, - .force_duplex_full = 1, + .force_speed_100 = 1, + .force_duplex_full = 1, }, - - .has_ohci0 = 1, - .has_ehci0 = 1, }; static struct board_info __initdata board_DWVS0 = { - .name = "DWV-S0", - .expected_cpu_id = 0x6358, + .name = "DWV-S0", + .expected_cpu_id = 0x6358, - .has_enet0 = 1, - .has_enet1 = 1, - .has_pci = 1, + .has_ehci0 = 1, + .has_ohci0 = 1, + .has_pci = 1, + .has_enet0 = 1, .enet0 = { - .has_phy = 1, - .use_internal_phy = 1, + .has_phy = 1, + .use_internal_phy = 1, }, + .has_enet1 = 1, .enet1 = { - .force_speed_100 = 1, - .force_duplex_full = 1, + .force_speed_100 = 1, + .force_duplex_full = 1, }, - - .has_ohci0 = 1, }; #endif /* CONFIG_BCM63XX_CPU_6358 */ diff --git a/arch/mips/bcm63xx/setup.c b/arch/mips/bcm63xx/setup.c index e28ee9a7cc7e..d811e3e03f81 100644 --- a/arch/mips/bcm63xx/setup.c +++ b/arch/mips/bcm63xx/setup.c @@ -146,7 +146,7 @@ void __init plat_time_init(void) void __init plat_mem_setup(void) { - add_memory_region(0, bcm63xx_get_memory_size(), BOOT_MEM_RAM); + memblock_add(0, bcm63xx_get_memory_size()); _machine_halt = bcm63xx_machine_halt; _machine_restart = __bcm63xx_machine_reboot; diff --git a/arch/mips/bmips/dma.c b/arch/mips/bmips/dma.c index df56bf4179e3..49061b870680 100644 --- a/arch/mips/bmips/dma.c +++ b/arch/mips/bmips/dma.c @@ -40,7 +40,7 @@ static struct bmips_dma_range *bmips_dma_ranges; #define FLUSH_RAC 0x100 -dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t pa) +dma_addr_t phys_to_dma(struct device *dev, phys_addr_t pa) { struct bmips_dma_range *r; @@ -52,7 +52,7 @@ dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t pa) return pa; } -phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr) +phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dma_addr) { struct bmips_dma_range *r; diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile index 6e56caef69f0..d66511825fe1 100644 --- a/arch/mips/boot/compressed/Makefile +++ b/arch/mips/boot/compressed/Makefile @@ -22,7 +22,12 @@ KBUILD_CFLAGS := $(filter-out -pg, $(KBUILD_CFLAGS)) KBUILD_CFLAGS := $(filter-out -fstack-protector, $(KBUILD_CFLAGS)) -KBUILD_CFLAGS := $(KBUILD_CFLAGS) -D__KERNEL__ \ +# Disable lq/sq in zboot +ifdef CONFIG_CPU_LOONGSON64 +KBUILD_CFLAGS := $(filter-out -march=loongson3a, $(KBUILD_CFLAGS)) -march=mips64r2 +endif + +KBUILD_CFLAGS := $(KBUILD_CFLAGS) -D__KERNEL__ -D__DISABLE_EXPORTS \ -DBOOT_HEAP_SIZE=$(BOOT_HEAP_SIZE) -D"VMLINUX_LOAD_ADDRESS_ULL=$(VMLINUX_LOAD_ADDRESS)ull" KBUILD_AFLAGS := $(KBUILD_AFLAGS) -D__ASSEMBLY__ \ @@ -70,6 +75,7 @@ tool_$(CONFIG_KERNEL_LZ4) = lz4 tool_$(CONFIG_KERNEL_LZMA) = lzma tool_$(CONFIG_KERNEL_LZO) = lzo tool_$(CONFIG_KERNEL_XZ) = xzkern +tool_$(CONFIG_KERNEL_ZSTD) = zstd22 targets += vmlinux.bin.z $(obj)/vmlinux.bin.z: $(obj)/vmlinux.bin FORCE diff --git a/arch/mips/boot/compressed/decompress.c b/arch/mips/boot/compressed/decompress.c index 88f5d637b1c4..c61c641674e6 100644 --- a/arch/mips/boot/compressed/decompress.c +++ b/arch/mips/boot/compressed/decompress.c @@ -72,6 +72,10 @@ void error(char *x) #include "../../../../lib/decompress_unxz.c" #endif +#ifdef CONFIG_KERNEL_ZSTD +#include "../../../../lib/decompress_unzstd.c" +#endif + const unsigned long __stack_chk_guard = 0x000a0dff; void __stack_chk_fail(void) diff --git a/arch/mips/boot/compressed/string.c b/arch/mips/boot/compressed/string.c index 43beecc3587c..0b593b709228 100644 --- a/arch/mips/boot/compressed/string.c +++ b/arch/mips/boot/compressed/string.c @@ -5,6 +5,7 @@ * Very small subset of simple string routines */ +#include <linux/compiler_attributes.h> #include <linux/types.h> void *memcpy(void *dest, const void *src, size_t n) @@ -27,3 +28,19 @@ void *memset(void *s, int c, size_t n) ss[i] = c; return s; } + +void * __weak memmove(void *dest, const void *src, size_t n) +{ + unsigned int i; + const char *s = src; + char *d = dest; + + if ((uintptr_t)dest < (uintptr_t)src) { + for (i = 0; i < n; i++) + d[i] = s[i]; + } else { + for (i = n; i > 0; i--) + d[i - 1] = s[i - 1]; + } + return dest; +} diff --git a/arch/mips/boot/dts/ingenic/jz4725b.dtsi b/arch/mips/boot/dts/ingenic/jz4725b.dtsi index a8fca560878d..a1f0b71c9223 100644 --- a/arch/mips/boot/dts/ingenic/jz4725b.dtsi +++ b/arch/mips/boot/dts/ingenic/jz4725b.dtsi @@ -7,6 +7,20 @@ #size-cells = <1>; compatible = "ingenic,jz4725b"; + cpus { + #address-cells = <1>; + #size-cells = <0>; + + cpu0: cpu@0 { + device_type = "cpu"; + compatible = "ingenic,xburst-mxu1.0"; + reg = <0>; + + clocks = <&cgu JZ4725B_CLK_CCLK>; + clock-names = "cpu"; + }; + }; + cpuintc: interrupt-controller { #address-cells = <0>; #interrupt-cells = <1>; diff --git a/arch/mips/boot/dts/ingenic/jz4740.dtsi b/arch/mips/boot/dts/ingenic/jz4740.dtsi index 1520585c235c..eee523678ce5 100644 --- a/arch/mips/boot/dts/ingenic/jz4740.dtsi +++ b/arch/mips/boot/dts/ingenic/jz4740.dtsi @@ -7,6 +7,20 @@ #size-cells = <1>; compatible = "ingenic,jz4740"; + cpus { + #address-cells = <1>; + #size-cells = <0>; + + cpu0: cpu@0 { + device_type = "cpu"; + compatible = "ingenic,xburst-mxu1.0"; + reg = <0>; + + clocks = <&cgu JZ4740_CLK_CCLK>; + clock-names = "cpu"; + }; + }; + cpuintc: interrupt-controller { #address-cells = <0>; #interrupt-cells = <1>; diff --git a/arch/mips/boot/dts/ingenic/jz4770.dtsi b/arch/mips/boot/dts/ingenic/jz4770.dtsi index fa11ac950499..018721a9eea9 100644 --- a/arch/mips/boot/dts/ingenic/jz4770.dtsi +++ b/arch/mips/boot/dts/ingenic/jz4770.dtsi @@ -1,5 +1,4 @@ // SPDX-License-Identifier: GPL-2.0 - #include <dt-bindings/clock/jz4770-cgu.h> #include <dt-bindings/clock/ingenic,tcu.h> @@ -8,6 +7,20 @@ #size-cells = <1>; compatible = "ingenic,jz4770"; + cpus { + #address-cells = <1>; + #size-cells = <0>; + + cpu0: cpu@0 { + device_type = "cpu"; + compatible = "ingenic,xburst-fpu1.0-mxu1.1"; + reg = <0>; + + clocks = <&cgu JZ4770_CLK_CCLK>; + clock-names = "cpu"; + }; + }; + cpuintc: interrupt-controller { #address-cells = <0>; #interrupt-cells = <1>; diff --git a/arch/mips/boot/dts/ingenic/jz4780.dtsi b/arch/mips/boot/dts/ingenic/jz4780.dtsi index b7f409a7cf5d..dfb5a7e1bb21 100644 --- a/arch/mips/boot/dts/ingenic/jz4780.dtsi +++ b/arch/mips/boot/dts/ingenic/jz4780.dtsi @@ -8,6 +8,29 @@ #size-cells = <1>; compatible = "ingenic,jz4780"; + cpus { + #address-cells = <1>; + #size-cells = <0>; + + cpu0: cpu@0 { + device_type = "cpu"; + compatible = "ingenic,xburst-fpu1.0-mxu1.1"; + reg = <0>; + + clocks = <&cgu JZ4780_CLK_CPU>; + clock-names = "cpu"; + }; + + cpu1: cpu@1 { + device_type = "cpu"; + compatible = "ingenic,xburst-fpu1.0-mxu1.1"; + reg = <1>; + + clocks = <&cgu JZ4780_CLK_CORE1>; + clock-names = "cpu"; + }; + }; + cpuintc: interrupt-controller { #address-cells = <0>; #interrupt-cells = <1>; diff --git a/arch/mips/boot/dts/ingenic/qi_lb60.dts b/arch/mips/boot/dts/ingenic/qi_lb60.dts index bf298268f1a1..ba0218971572 100644 --- a/arch/mips/boot/dts/ingenic/qi_lb60.dts +++ b/arch/mips/boot/dts/ingenic/qi_lb60.dts @@ -109,74 +109,73 @@ debounce-delay-ms = <10>; wakeup-source; - row-gpios = <&gpd 18 0 &gpd 19 0 &gpd 20 0 &gpd 21 0 - &gpd 22 0 &gpd 23 0 &gpd 24 0 &gpd 26 0>; - col-gpios = <&gpc 10 0 &gpc 11 0 &gpc 12 0 &gpc 13 0 - &gpc 14 0 &gpc 15 0 &gpc 16 0 &gpc 17 0>; + row-gpios = <&gpd 18 0>, <&gpd 19 0>, <&gpd 20 0>, <&gpd 21 0>, + <&gpd 22 0>, <&gpd 23 0>, <&gpd 24 0>, <&gpd 26 0>; + col-gpios = <&gpc 10 0>, <&gpc 11 0>, <&gpc 12 0>, <&gpc 13 0>, + <&gpc 14 0>, <&gpc 15 0>, <&gpc 16 0>, <&gpc 17 0>; gpio-activelow; - linux,keymap = < - MATRIX_KEY(0, 0, KEY_F1) /* S2 */ - MATRIX_KEY(0, 1, KEY_F2) /* S3 */ - MATRIX_KEY(0, 2, KEY_F3) /* S4 */ - MATRIX_KEY(0, 3, KEY_F4) /* S5 */ - MATRIX_KEY(0, 4, KEY_F5) /* S6 */ - MATRIX_KEY(0, 5, KEY_F6) /* S7 */ - MATRIX_KEY(0, 6, KEY_F7) /* S8 */ - - MATRIX_KEY(1, 0, KEY_Q) /* S10 */ - MATRIX_KEY(1, 1, KEY_W) /* S11 */ - MATRIX_KEY(1, 2, KEY_E) /* S12 */ - MATRIX_KEY(1, 3, KEY_R) /* S13 */ - MATRIX_KEY(1, 4, KEY_T) /* S14 */ - MATRIX_KEY(1, 5, KEY_Y) /* S15 */ - MATRIX_KEY(1, 6, KEY_U) /* S16 */ - MATRIX_KEY(1, 7, KEY_I) /* S17 */ - MATRIX_KEY(2, 0, KEY_A) /* S18 */ - MATRIX_KEY(2, 1, KEY_S) /* S19 */ - MATRIX_KEY(2, 2, KEY_D) /* S20 */ - MATRIX_KEY(2, 3, KEY_F) /* S21 */ - MATRIX_KEY(2, 4, KEY_G) /* S22 */ - MATRIX_KEY(2, 5, KEY_H) /* S23 */ - MATRIX_KEY(2, 6, KEY_J) /* S24 */ - MATRIX_KEY(2, 7, KEY_K) /* S25 */ - MATRIX_KEY(3, 0, KEY_ESC) /* S26 */ - MATRIX_KEY(3, 1, KEY_Z) /* S27 */ - MATRIX_KEY(3, 2, KEY_X) /* S28 */ - MATRIX_KEY(3, 3, KEY_C) /* S29 */ - MATRIX_KEY(3, 4, KEY_V) /* S30 */ - MATRIX_KEY(3, 5, KEY_B) /* S31 */ - MATRIX_KEY(3, 6, KEY_N) /* S32 */ - MATRIX_KEY(3, 7, KEY_M) /* S33 */ - MATRIX_KEY(4, 0, KEY_TAB) /* S34 */ - MATRIX_KEY(4, 1, KEY_CAPSLOCK) /* S35 */ - MATRIX_KEY(4, 2, KEY_BACKSLASH) /* S36 */ - MATRIX_KEY(4, 3, KEY_APOSTROPHE) /* S37 */ - MATRIX_KEY(4, 4, KEY_COMMA) /* S38 */ - MATRIX_KEY(4, 5, KEY_DOT) /* S39 */ - MATRIX_KEY(4, 6, KEY_SLASH) /* S40 */ - MATRIX_KEY(4, 7, KEY_UP) /* S41 */ - MATRIX_KEY(5, 0, KEY_O) /* S42 */ - MATRIX_KEY(5, 1, KEY_L) /* S43 */ - MATRIX_KEY(5, 2, KEY_EQUAL) /* S44 */ - MATRIX_KEY(5, 3, KEY_QI_UPRED) /* S45 */ - MATRIX_KEY(5, 4, KEY_SPACE) /* S46 */ - MATRIX_KEY(5, 5, KEY_QI_QI) /* S47 */ - MATRIX_KEY(5, 6, KEY_RIGHTCTRL) /* S48 */ - MATRIX_KEY(5, 7, KEY_LEFT) /* S49 */ - MATRIX_KEY(6, 0, KEY_F8) /* S50 */ - MATRIX_KEY(6, 1, KEY_P) /* S51 */ - MATRIX_KEY(6, 2, KEY_BACKSPACE)/* S52 */ - MATRIX_KEY(6, 3, KEY_ENTER) /* S53 */ - MATRIX_KEY(6, 4, KEY_QI_VOLUP) /* S54 */ - MATRIX_KEY(6, 5, KEY_QI_VOLDOWN) /* S55 */ - MATRIX_KEY(6, 6, KEY_DOWN) /* S56 */ - MATRIX_KEY(6, 7, KEY_RIGHT) /* S57 */ - - MATRIX_KEY(7, 0, KEY_LEFTSHIFT) /* S58 */ - MATRIX_KEY(7, 1, KEY_LEFTALT) /* S59 */ - MATRIX_KEY(7, 2, KEY_QI_FN) /* S60 */ - >; + linux,keymap = + <MATRIX_KEY(0, 0, KEY_F1)>, /* S2 */ + <MATRIX_KEY(0, 1, KEY_F2)>, /* S3 */ + <MATRIX_KEY(0, 2, KEY_F3)>, /* S4 */ + <MATRIX_KEY(0, 3, KEY_F4)>, /* S5 */ + <MATRIX_KEY(0, 4, KEY_F5)>, /* S6 */ + <MATRIX_KEY(0, 5, KEY_F6)>, /* S7 */ + <MATRIX_KEY(0, 6, KEY_F7)>, /* S8 */ + + <MATRIX_KEY(1, 0, KEY_Q)>, /* S10 */ + <MATRIX_KEY(1, 1, KEY_W)>, /* S11 */ + <MATRIX_KEY(1, 2, KEY_E)>, /* S12 */ + <MATRIX_KEY(1, 3, KEY_R)>, /* S13 */ + <MATRIX_KEY(1, 4, KEY_T)>, /* S14 */ + <MATRIX_KEY(1, 5, KEY_Y)>, /* S15 */ + <MATRIX_KEY(1, 6, KEY_U)>, /* S16 */ + <MATRIX_KEY(1, 7, KEY_I)>, /* S17 */ + <MATRIX_KEY(2, 0, KEY_A)>, /* S18 */ + <MATRIX_KEY(2, 1, KEY_S)>, /* S19 */ + <MATRIX_KEY(2, 2, KEY_D)>, /* S20 */ + <MATRIX_KEY(2, 3, KEY_F)>, /* S21 */ + <MATRIX_KEY(2, 4, KEY_G)>, /* S22 */ + <MATRIX_KEY(2, 5, KEY_H)>, /* S23 */ + <MATRIX_KEY(2, 6, KEY_J)>, /* S24 */ + <MATRIX_KEY(2, 7, KEY_K)>, /* S25 */ + <MATRIX_KEY(3, 0, KEY_ESC)>, /* S26 */ + <MATRIX_KEY(3, 1, KEY_Z)>, /* S27 */ + <MATRIX_KEY(3, 2, KEY_X)>, /* S28 */ + <MATRIX_KEY(3, 3, KEY_C)>, /* S29 */ + <MATRIX_KEY(3, 4, KEY_V)>, /* S30 */ + <MATRIX_KEY(3, 5, KEY_B)>, /* S31 */ + <MATRIX_KEY(3, 6, KEY_N)>, /* S32 */ + <MATRIX_KEY(3, 7, KEY_M)>, /* S33 */ + <MATRIX_KEY(4, 0, KEY_TAB)>, /* S34 */ + <MATRIX_KEY(4, 1, KEY_CAPSLOCK)>, /* S35 */ + <MATRIX_KEY(4, 2, KEY_BACKSLASH)>, /* S36 */ + <MATRIX_KEY(4, 3, KEY_APOSTROPHE)>, /* S37 */ + <MATRIX_KEY(4, 4, KEY_COMMA)>, /* S38 */ + <MATRIX_KEY(4, 5, KEY_DOT)>, /* S39 */ + <MATRIX_KEY(4, 6, KEY_SLASH)>, /* S40 */ + <MATRIX_KEY(4, 7, KEY_UP)>, /* S41 */ + <MATRIX_KEY(5, 0, KEY_O)>, /* S42 */ + <MATRIX_KEY(5, 1, KEY_L)>, /* S43 */ + <MATRIX_KEY(5, 2, KEY_EQUAL)>, /* S44 */ + <MATRIX_KEY(5, 3, KEY_QI_UPRED)>, /* S45 */ + <MATRIX_KEY(5, 4, KEY_SPACE)>, /* S46 */ + <MATRIX_KEY(5, 5, KEY_QI_QI)>, /* S47 */ + <MATRIX_KEY(5, 6, KEY_RIGHTCTRL)>, /* S48 */ + <MATRIX_KEY(5, 7, KEY_LEFT)>, /* S49 */ + <MATRIX_KEY(6, 0, KEY_F8)>, /* S50 */ + <MATRIX_KEY(6, 1, KEY_P)>, /* S51 */ + <MATRIX_KEY(6, 2, KEY_BACKSPACE)>,/* S52 */ + <MATRIX_KEY(6, 3, KEY_ENTER)>, /* S53 */ + <MATRIX_KEY(6, 4, KEY_QI_VOLUP)>, /* S54 */ + <MATRIX_KEY(6, 5, KEY_QI_VOLDOWN)>, /* S55 */ + <MATRIX_KEY(6, 6, KEY_DOWN)>, /* S56 */ + <MATRIX_KEY(6, 7, KEY_RIGHT)>, /* S57 */ + + <MATRIX_KEY(7, 0, KEY_LEFTSHIFT)>, /* S58 */ + <MATRIX_KEY(7, 1, KEY_LEFTALT)>, /* S59 */ + <MATRIX_KEY(7, 2, KEY_QI_FN)>; /* S60 */ }; spi { @@ -261,12 +260,12 @@ #address-cells = <1>; #size-cells = <0>; - ingenic,bch-controller = <&ecc>; + ecc-engine = <&ecc>; pinctrl-names = "default"; pinctrl-0 = <&pins_nemc>; - rb-gpios = <&gpc 30 GPIO_ACTIVE_LOW>; + rb-gpios = <&gpc 30 GPIO_ACTIVE_HIGH>; nand@1 { reg = <1>; @@ -324,7 +323,7 @@ pins_nemc: nemc { function = "nand"; - groups = "nand-cs1"; + groups = "nand-fre-fwe", "nand-cs1"; }; pins_uart0: uart0 { diff --git a/arch/mips/boot/dts/ingenic/x1000.dtsi b/arch/mips/boot/dts/ingenic/x1000.dtsi index 9de9e7c2d523..1f1f896dd1f7 100644 --- a/arch/mips/boot/dts/ingenic/x1000.dtsi +++ b/arch/mips/boot/dts/ingenic/x1000.dtsi @@ -8,6 +8,20 @@ #size-cells = <1>; compatible = "ingenic,x1000", "ingenic,x1000e"; + cpus { + #address-cells = <1>; + #size-cells = <0>; + + cpu0: cpu@0 { + device_type = "cpu"; + compatible = "ingenic,xburst-fpu1.0-mxu1.1"; + reg = <0>; + + clocks = <&cgu X1000_CLK_CPU>; + clock-names = "cpu"; + }; + }; + cpuintc: interrupt-controller { #address-cells = <0>; #interrupt-cells = <1>; diff --git a/arch/mips/boot/dts/ingenic/x1830.dtsi b/arch/mips/boot/dts/ingenic/x1830.dtsi index eb1214481a33..b05dac3ae308 100644 --- a/arch/mips/boot/dts/ingenic/x1830.dtsi +++ b/arch/mips/boot/dts/ingenic/x1830.dtsi @@ -8,6 +8,20 @@ #size-cells = <1>; compatible = "ingenic,x1830"; + cpus { + #address-cells = <1>; + #size-cells = <0>; + + cpu0: cpu@0 { + device_type = "cpu"; + compatible = "ingenic,xburst-fpu2.0-mxu2.0"; + reg = <0>; + + clocks = <&cgu X1830_CLK_CPU>; + clock-names = "cpu"; + }; + }; + cpuintc: interrupt-controller { #address-cells = <0>; #interrupt-cells = <1>; diff --git a/arch/mips/boot/dts/loongson/ls7a-pch.dtsi b/arch/mips/boot/dts/loongson/ls7a-pch.dtsi index e574a062dfae..f99a7a11fded 100644 --- a/arch/mips/boot/dts/loongson/ls7a-pch.dtsi +++ b/arch/mips/boot/dts/loongson/ls7a-pch.dtsi @@ -19,6 +19,45 @@ #interrupt-cells = <2>; }; + ls7a_uart0: serial@10080000 { + compatible = "ns16550a"; + reg = <0 0x10080000 0 0x100>; + clock-frequency = <50000000>; + interrupt-parent = <&pic>; + interrupts = <8 IRQ_TYPE_LEVEL_HIGH>; + no-loopback-test; + }; + + ls7a_uart1: serial@10080100 { + status = "disabled"; + compatible = "ns16550a"; + reg = <0 0x10080100 0 0x100>; + clock-frequency = <50000000>; + interrupt-parent = <&pic>; + interrupts = <8 IRQ_TYPE_LEVEL_HIGH>; + no-loopback-test; + }; + + ls7a_uart2: serial@10080200 { + status = "disabled"; + compatible = "ns16550a"; + reg = <0 0x10080200 0 0x100>; + clock-frequency = <50000000>; + interrupt-parent = <&pic>; + interrupts = <8 IRQ_TYPE_LEVEL_HIGH>; + no-loopback-test; + }; + + ls7a_uart3: serial@10080300 { + status = "disabled"; + compatible = "ns16550a"; + reg = <0 0x10080300 0 0x100>; + clock-frequency = <50000000>; + interrupt-parent = <&pic>; + interrupts = <8 IRQ_TYPE_LEVEL_HIGH>; + no-loopback-test; + }; + pci@1a000000 { compatible = "loongson,ls7a-pci"; device_type = "pci"; diff --git a/arch/mips/boot/dts/mscc/ocelot.dtsi b/arch/mips/boot/dts/mscc/ocelot.dtsi index f94e8a02ed06..535a98284dcb 100644 --- a/arch/mips/boot/dts/mscc/ocelot.dtsi +++ b/arch/mips/boot/dts/mscc/ocelot.dtsi @@ -134,11 +134,13 @@ <0x1280000 0x100>, <0x1800000 0x80000>, <0x1880000 0x10000>, + <0x1040000 0x10000>, + <0x1050000 0x10000>, <0x1060000 0x10000>; reg-names = "sys", "rew", "qs", "ptp", "port0", "port1", "port2", "port3", "port4", "port5", "port6", "port7", "port8", "port9", "port10", "qsys", - "ana", "s2"; + "ana", "s0", "s1", "s2"; interrupts = <18 21 22>; interrupt-names = "ptp_rdy", "xtr", "inj"; diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c index 14ea680d180e..df70308db0e6 100644 --- a/arch/mips/cavium-octeon/dma-octeon.c +++ b/arch/mips/cavium-octeon/dma-octeon.c @@ -168,7 +168,7 @@ void __init octeon_pci_dma_init(void) } #endif /* CONFIG_PCI */ -dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr) +dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) { #ifdef CONFIG_PCI if (dev && dev_is_pci(dev)) @@ -177,7 +177,7 @@ dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr) return paddr; } -phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr) +phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) { #ifdef CONFIG_PCI if (dev && dev_is_pci(dev)) @@ -190,25 +190,25 @@ char *octeon_swiotlb; void __init plat_swiotlb_setup(void) { - struct memblock_region *mem; + phys_addr_t start, end; phys_addr_t max_addr; phys_addr_t addr_size; size_t swiotlbsize; unsigned long swiotlb_nslabs; + u64 i; max_addr = 0; addr_size = 0; - for_each_memblock(memory, mem) { + for_each_mem_range(i, &start, &end) { /* These addresses map low for PCI. */ - if (mem->base > 0x410000000ull && !OCTEON_IS_OCTEON2()) + if (start > 0x410000000ull && !OCTEON_IS_OCTEON2()) continue; - addr_size += mem->size; - - if (max_addr < mem->base + mem->size) - max_addr = mem->base + mem->size; + addr_size += (end - start); + if (max_addr < end) + max_addr = end; } swiotlbsize = PAGE_SIZE; diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index 4f34d92b52f9..561389d3fadb 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c @@ -16,6 +16,7 @@ #include <linux/export.h> #include <linux/interrupt.h> #include <linux/io.h> +#include <linux/memblock.h> #include <linux/serial.h> #include <linux/smp.h> #include <linux/types.h> @@ -930,7 +931,7 @@ static __init void memory_exclude_page(u64 addr, u64 *mem, u64 *size) { if (addr > *mem && addr < *mem + *size) { u64 inc = addr - *mem; - add_memory_region(*mem, inc, BOOT_MEM_RAM); + memblock_add(*mem, inc); *mem += inc; *size -= inc; } @@ -992,19 +993,18 @@ void __init plat_mem_setup(void) /* Crashkernel ignores bootmem list. It relies on mem=X@Y option */ #ifdef CONFIG_CRASH_DUMP - add_memory_region(reserve_low_mem, max_memory, BOOT_MEM_RAM); + memblock_add(reserve_low_mem, max_memory); total += max_memory; #else #ifdef CONFIG_KEXEC if (crashk_size > 0) { - add_memory_region(crashk_base, crashk_size, BOOT_MEM_RAM); + memblock_add(crashk_base, crashk_size); crashk_end = crashk_base + crashk_size; } #endif /* - * When allocating memory, we want incrementing addresses from - * bootmem_alloc so the code in add_memory_region can merge - * regions next to each other. + * When allocating memory, we want incrementing addresses, + * which is handled by memblock */ cvmx_bootmem_lock(); while (total < max_memory) { @@ -1039,13 +1039,9 @@ void __init plat_mem_setup(void) */ if (memory < crashk_base && end > crashk_end) { /* region is fully in */ - add_memory_region(memory, - crashk_base - memory, - BOOT_MEM_RAM); + memblock_add(memory, crashk_base - memory); total += crashk_base - memory; - add_memory_region(crashk_end, - end - crashk_end, - BOOT_MEM_RAM); + memblock_add(crashk_end, end - crashk_end); total += end - crashk_end; continue; } @@ -1073,7 +1069,7 @@ void __init plat_mem_setup(void) */ mem_alloc_size -= end - crashk_base; #endif - add_memory_region(memory, mem_alloc_size, BOOT_MEM_RAM); + memblock_add(memory, mem_alloc_size); total += mem_alloc_size; /* Recovering mem_alloc_size */ mem_alloc_size = 4 << 20; @@ -1088,7 +1084,7 @@ void __init plat_mem_setup(void) /* Adjust for physical offset. */ kernel_start &= ~0xffffffff80000000ULL; - add_memory_region(kernel_start, kernel_size, BOOT_MEM_RAM); + memblock_add(kernel_start, kernel_size); #endif /* CONFIG_CRASH_DUMP */ #ifdef CONFIG_CAVIUM_RESERVE32 @@ -1126,7 +1122,7 @@ EXPORT_SYMBOL(prom_putchar); void __init prom_free_prom_memory(void) { - if (CAVIUM_OCTEON_DCACHE_PREFETCH_WAR) { + if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { /* Check for presence of Core-14449 fix. */ u32 insn; u32 *foo; diff --git a/arch/mips/cobalt/setup.c b/arch/mips/cobalt/setup.c index c136a18c7221..46581e686882 100644 --- a/arch/mips/cobalt/setup.c +++ b/arch/mips/cobalt/setup.c @@ -13,6 +13,7 @@ #include <linux/interrupt.h> #include <linux/io.h> #include <linux/ioport.h> +#include <linux/memblock.h> #include <linux/pm.h> #include <asm/bootinfo.h> @@ -112,7 +113,7 @@ void __init prom_init(void) strlcat(arcs_cmdline, " ", COMMAND_LINE_SIZE); } - add_memory_region(0x0, memsz, BOOT_MEM_RAM); + memblock_add(0, memsz); setup_8250_early_printk_port(CKSEG1ADDR(0x1c800000), 0, 0); } diff --git a/arch/mips/configs/ci20_defconfig b/arch/mips/configs/ci20_defconfig index 0a46199fdc3f..052c5ad0f2b1 100644 --- a/arch/mips/configs/ci20_defconfig +++ b/arch/mips/configs/ci20_defconfig @@ -22,7 +22,7 @@ CONFIG_EMBEDDED=y # CONFIG_VM_EVENT_COUNTERS is not set # CONFIG_COMPAT_BRK is not set CONFIG_SLAB=y -CONFIG_MACH_INGENIC=y +CONFIG_MACH_INGENIC_SOC=y CONFIG_JZ4780_CI20=y CONFIG_HIGHMEM=y CONFIG_HZ_100=y @@ -42,7 +42,7 @@ CONFIG_IP_PNP_DHCP=y # CONFIG_IPV6 is not set # CONFIG_WIRELESS is not set CONFIG_DEVTMPFS=y -# CONFIG_FW_LOADER is not set +CONFIG_FW_LOADER=m # CONFIG_ALLOW_DEV_COREDUMP is not set CONFIG_MTD=y CONFIG_MTD_RAW_NAND=y diff --git a/arch/mips/configs/cu1000-neo_defconfig b/arch/mips/configs/cu1000-neo_defconfig index e924c817f73d..55d0690a3ffe 100644 --- a/arch/mips/configs/cu1000-neo_defconfig +++ b/arch/mips/configs/cu1000-neo_defconfig @@ -1,5 +1,3 @@ -CONFIG_LOCALVERSION_AUTO=y -CONFIG_KERNEL_GZIP=y CONFIG_SYSVIPC=y CONFIG_NO_HZ_IDLE=y CONFIG_HIGH_RES_TIMERS=y @@ -9,7 +7,6 @@ CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_CGROUPS=y CONFIG_MEMCG=y -CONFIG_MEMCG_KMEM=y CONFIG_CGROUP_SCHED=y CONFIG_CGROUP_FREEZER=y CONFIG_CGROUP_DEVICE=y @@ -22,7 +19,7 @@ CONFIG_EMBEDDED=y # CONFIG_VM_EVENT_COUNTERS is not set # CONFIG_COMPAT_BRK is not set CONFIG_SLAB=y -CONFIG_MACH_INGENIC=y +CONFIG_MACH_INGENIC_SOC=y CONFIG_X1000_CU1000_NEO=y CONFIG_HIGHMEM=y CONFIG_HZ_100=y @@ -31,7 +28,6 @@ CONFIG_HZ_100=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set # CONFIG_COMPACTION is not set CONFIG_CMA=y -CONFIG_CMA_AREAS=7 CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -40,19 +36,16 @@ CONFIG_CFG80211=y CONFIG_UEVENT_HELPER=y CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y -# CONFIG_FW_LOADER is not set # CONFIG_ALLOW_DEV_COREDUMP is not set CONFIG_NETDEVICES=y CONFIG_STMMAC_ETH=y CONFIG_SMSC_PHY=y CONFIG_BRCMFMAC=y -# CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set CONFIG_VT_HW_CONSOLE_BINDING=y CONFIG_LEGACY_PTY_COUNT=2 -CONFIG_SERIAL_EARLYCON=y CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_NR_UARTS=3 @@ -66,8 +59,6 @@ CONFIG_GPIO_SYSFS=y CONFIG_SENSORS_ADS7828=y CONFIG_WATCHDOG=y CONFIG_JZ4740_WDT=y -# CONFIG_LCD_CLASS_DEVICE is not set -# CONFIG_BACKLIGHT_CLASS_DEVICE is not set # CONFIG_VGA_CONSOLE is not set # CONFIG_HID is not set # CONFIG_USB_SUPPORT is not set @@ -82,8 +73,6 @@ CONFIG_RTC_DRV_JZ4740=y CONFIG_DMADEVICES=y CONFIG_DMA_JZ4780=y # CONFIG_IOMMU_SUPPORT is not set -CONFIG_NVMEM=y -CONFIG_NVMEM_SYSFS=y CONFIG_EXT4_FS=y # CONFIG_DNOTIFY is not set CONFIG_AUTOFS_FS=y @@ -108,8 +97,8 @@ CONFIG_CONSOLE_LOGLEVEL_QUIET=15 CONFIG_MESSAGE_LOGLEVEL_DEFAULT=7 CONFIG_DEBUG_INFO=y CONFIG_STRIP_ASM_SYMS=y -CONFIG_DEBUG_FS=y CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_FS=y CONFIG_PANIC_ON_OOPS=y CONFIG_PANIC_TIMEOUT=10 # CONFIG_SCHED_DEBUG is not set diff --git a/arch/mips/configs/cu1830-neo_defconfig b/arch/mips/configs/cu1830-neo_defconfig index cbfb62900273..e7064851a47a 100644 --- a/arch/mips/configs/cu1830-neo_defconfig +++ b/arch/mips/configs/cu1830-neo_defconfig @@ -1,5 +1,3 @@ -CONFIG_LOCALVERSION_AUTO=y -CONFIG_KERNEL_GZIP=y CONFIG_SYSVIPC=y CONFIG_NO_HZ_IDLE=y CONFIG_HIGH_RES_TIMERS=y @@ -9,7 +7,6 @@ CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_CGROUPS=y CONFIG_MEMCG=y -CONFIG_MEMCG_KMEM=y CONFIG_CGROUP_SCHED=y CONFIG_CGROUP_FREEZER=y CONFIG_CGROUP_DEVICE=y @@ -22,7 +19,7 @@ CONFIG_EMBEDDED=y # CONFIG_VM_EVENT_COUNTERS is not set # CONFIG_COMPAT_BRK is not set CONFIG_SLAB=y -CONFIG_MACH_INGENIC=y +CONFIG_MACH_INGENIC_SOC=y CONFIG_X1830_CU1830_NEO=y CONFIG_HIGHMEM=y CONFIG_HZ_100=y @@ -31,7 +28,6 @@ CONFIG_HZ_100=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set # CONFIG_COMPACTION is not set CONFIG_CMA=y -CONFIG_CMA_AREAS=7 CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -40,7 +36,6 @@ CONFIG_CFG80211=y CONFIG_UEVENT_HELPER=y CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y -# CONFIG_FW_LOADER is not set # CONFIG_ALLOW_DEV_COREDUMP is not set CONFIG_MD=y CONFIG_BLK_DEV_MD=y @@ -49,13 +44,11 @@ CONFIG_NETDEVICES=y CONFIG_STMMAC_ETH=y CONFIG_ICPLUS_PHY=y CONFIG_BRCMFMAC=y -# CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set CONFIG_VT_HW_CONSOLE_BINDING=y CONFIG_LEGACY_PTY_COUNT=2 -CONFIG_SERIAL_EARLYCON=y CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_NR_UARTS=2 @@ -69,8 +62,6 @@ CONFIG_GPIO_SYSFS=y CONFIG_SENSORS_ADS7828=y CONFIG_WATCHDOG=y CONFIG_JZ4740_WDT=y -# CONFIG_LCD_CLASS_DEVICE is not set -# CONFIG_BACKLIGHT_CLASS_DEVICE is not set # CONFIG_VGA_CONSOLE is not set # CONFIG_HID is not set # CONFIG_USB_SUPPORT is not set @@ -85,8 +76,6 @@ CONFIG_RTC_DRV_JZ4740=y CONFIG_DMADEVICES=y CONFIG_DMA_JZ4780=y # CONFIG_IOMMU_SUPPORT is not set -CONFIG_NVMEM=y -CONFIG_NVMEM_SYSFS=y CONFIG_EXT4_FS=y # CONFIG_DNOTIFY is not set CONFIG_AUTOFS_FS=y @@ -111,8 +100,8 @@ CONFIG_CONSOLE_LOGLEVEL_QUIET=15 CONFIG_MESSAGE_LOGLEVEL_DEFAULT=7 CONFIG_DEBUG_INFO=y CONFIG_STRIP_ASM_SYMS=y -CONFIG_DEBUG_FS=y CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_FS=y CONFIG_PANIC_ON_OOPS=y CONFIG_PANIC_TIMEOUT=10 # CONFIG_SCHED_DEBUG is not set diff --git a/arch/mips/configs/gcw0_defconfig b/arch/mips/configs/gcw0_defconfig index 4994749b9eaa..7e28a4fe9d84 100644 --- a/arch/mips/configs/gcw0_defconfig +++ b/arch/mips/configs/gcw0_defconfig @@ -4,7 +4,7 @@ CONFIG_HIGH_RES_TIMERS=y CONFIG_PREEMPT_VOLUNTARY=y CONFIG_EMBEDDED=y CONFIG_PROFILING=y -CONFIG_MACH_INGENIC=y +CONFIG_MACH_INGENIC_SOC=y CONFIG_JZ4770_GCW0=y CONFIG_HIGHMEM=y # CONFIG_SECCOMP is not set diff --git a/arch/mips/configs/loongson3_defconfig b/arch/mips/configs/loongson3_defconfig index a65b08de4098..38a817ead8e7 100644 --- a/arch/mips/configs/loongson3_defconfig +++ b/arch/mips/configs/loongson3_defconfig @@ -30,7 +30,6 @@ CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y CONFIG_MACH_LOONGSON64=y CONFIG_CPU_HAS_MSA=y -CONFIG_SMP=y CONFIG_NR_CPUS=16 CONFIG_HZ_256=y CONFIG_KEXEC=y @@ -403,7 +402,6 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_DEFLATE=m CONFIG_PRINTK_TIME=y -CONFIG_FRAME_WARN=1024 CONFIG_STRIP_ASM_SYMS=y CONFIG_MAGIC_SYSRQ=y # CONFIG_SCHED_DEBUG is not set diff --git a/arch/mips/configs/pnx8335_stb225_defconfig b/arch/mips/configs/pnx8335_stb225_defconfig deleted file mode 100644 index d06db6b87959..000000000000 --- a/arch/mips/configs/pnx8335_stb225_defconfig +++ /dev/null @@ -1,77 +0,0 @@ -# CONFIG_LOCALVERSION_AUTO is not set -# CONFIG_SWAP is not set -CONFIG_SYSVIPC=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y -CONFIG_PREEMPT_VOLUNTARY=y -CONFIG_LOG_BUF_SHIFT=14 -CONFIG_EXPERT=y -CONFIG_SLAB=y -CONFIG_NXP_STB225=y -CONFIG_CPU_LITTLE_ENDIAN=y -CONFIG_HZ_128=y -# CONFIG_SECCOMP is not set -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -# CONFIG_BLK_DEV_BSG is not set -CONFIG_NET=y -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -CONFIG_INET_AH=y -# CONFIG_IPV6 is not set -CONFIG_MTD=y -CONFIG_MTD_CMDLINE_PARTS=y -CONFIG_MTD_BLOCK=y -CONFIG_MTD_CFI=y -CONFIG_MTD_CFI_ADV_OPTIONS=y -CONFIG_MTD_CFI_LE_BYTE_SWAP=y -CONFIG_MTD_CFI_GEOMETRY=y -CONFIG_MTD_CFI_AMDSTD=y -CONFIG_MTD_PHYSMAP=y -CONFIG_BLK_DEV_LOOP=y -CONFIG_BLK_DEV_SD=y -# CONFIG_SCSI_LOWLEVEL is not set -CONFIG_ATA=y -CONFIG_NETDEVICES=y -CONFIG_INPUT_EVDEV=m -CONFIG_INPUT_EVBUG=m -# CONFIG_INPUT_KEYBOARD is not set -# CONFIG_INPUT_MOUSE is not set -# CONFIG_VT_CONSOLE is not set -# CONFIG_LEGACY_PTYS is not set -CONFIG_SERIAL_PNX8XXX=y -CONFIG_SERIAL_PNX8XXX_CONSOLE=y -CONFIG_HW_RANDOM=y -CONFIG_I2C=y -CONFIG_I2C_CHARDEV=y -# CONFIG_HWMON is not set -CONFIG_FB=y -# CONFIG_VGA_CONSOLE is not set -CONFIG_SOUND=m -CONFIG_SND=m -CONFIG_SND_VERBOSE_PRINTK=y -CONFIG_SND_DEBUG=y -CONFIG_SND_SEQUENCER=m -CONFIG_EXT2_FS=m -# CONFIG_DNOTIFY is not set -CONFIG_MSDOS_FS=m -CONFIG_VFAT_FS=m -CONFIG_TMPFS=y -CONFIG_JFFS2_FS=y -CONFIG_CRAMFS=y -CONFIG_NFS_FS=y -CONFIG_ROOT_NFS=y -CONFIG_NFSD=m -CONFIG_NFSD_V3=y -CONFIG_NLS=y -CONFIG_NLS_CODEPAGE_437=m -CONFIG_NLS_CODEPAGE_850=m -CONFIG_NLS_CODEPAGE_932=m -CONFIG_NLS_ASCII=m -CONFIG_NLS_ISO8859_1=m -CONFIG_NLS_ISO8859_15=m -CONFIG_NLS_UTF8=m diff --git a/arch/mips/configs/qi_lb60_defconfig b/arch/mips/configs/qi_lb60_defconfig index 81bfbee72b0c..b4448d0876d5 100644 --- a/arch/mips/configs/qi_lb60_defconfig +++ b/arch/mips/configs/qi_lb60_defconfig @@ -7,7 +7,8 @@ CONFIG_EMBEDDED=y # CONFIG_VM_EVENT_COUNTERS is not set # CONFIG_COMPAT_BRK is not set CONFIG_SLAB=y -CONFIG_MACH_INGENIC=y +CONFIG_MACH_INGENIC_SOC=y +CONFIG_JZ4740_QI_LB60=y CONFIG_HZ_100=y # CONFIG_SECCOMP is not set CONFIG_MODULES=y @@ -72,9 +73,7 @@ CONFIG_DRM=y CONFIG_DRM_FBDEV_OVERALLOC=200 CONFIG_DRM_PANEL_SIMPLE=y CONFIG_DRM_INGENIC=y -# CONFIG_LCD_CLASS_DEVICE is not set CONFIG_BACKLIGHT_CLASS_DEVICE=y -# CONFIG_BACKLIGHT_GENERIC is not set # CONFIG_VGA_CONSOLE is not set CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_LOGO=y @@ -170,9 +169,9 @@ CONFIG_PRINTK_TIME=y CONFIG_DEBUG_INFO=y CONFIG_STRIP_ASM_SYMS=y CONFIG_READABLE_ASM=y +CONFIG_KGDB=y CONFIG_DEBUG_KMEMLEAK=y CONFIG_DEBUG_MEMORY_INIT=y CONFIG_DEBUG_STACKOVERFLOW=y CONFIG_PANIC_ON_OOPS=y # CONFIG_FTRACE is not set -CONFIG_KGDB=y diff --git a/arch/mips/configs/rs90_defconfig b/arch/mips/configs/rs90_defconfig index de6752051ecc..dfbb9fed9a42 100644 --- a/arch/mips/configs/rs90_defconfig +++ b/arch/mips/configs/rs90_defconfig @@ -19,7 +19,7 @@ CONFIG_EMBEDDED=y # CONFIG_PERF_EVENTS is not set CONFIG_SLAB=y CONFIG_PROFILING=y -CONFIG_MACH_INGENIC=y +CONFIG_MACH_INGENIC_SOC=y CONFIG_JZ4740_RS90=y CONFIG_PAGE_SIZE_16KB=y CONFIG_HZ_100=y @@ -80,8 +80,8 @@ CONFIG_KEYBOARD_GPIO=y # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set CONFIG_LEGACY_PTY_COUNT=2 -# CONFIG_DEVMEM is not set # CONFIG_HW_RANDOM is not set +# CONFIG_DEVMEM is not set # CONFIG_I2C_COMPAT is not set # CONFIG_I2C_HELPER_AUTO is not set CONFIG_POWER_SUPPLY=y diff --git a/arch/mips/dec/prom/memory.c b/arch/mips/dec/prom/memory.c index 5073d2ed78bb..44490c30d63b 100644 --- a/arch/mips/dec/prom/memory.c +++ b/arch/mips/dec/prom/memory.c @@ -12,7 +12,6 @@ #include <linux/types.h> #include <asm/addrspace.h> -#include <asm/bootinfo.h> #include <asm/dec/machtype.h> #include <asm/dec/prom.h> #include <asm/page.h> @@ -28,7 +27,7 @@ volatile unsigned long mem_err; /* So we know an error occurred */ #define CHUNK_SIZE 0x400000 -static inline void pmax_setup_memory_region(void) +static __init void pmax_setup_memory_region(void) { volatile unsigned char *memory_page, dummy; char old_handler[0x80]; @@ -50,15 +49,14 @@ static inline void pmax_setup_memory_region(void) } memcpy((void *)(CKSEG0 + 0x80), &old_handler, 0x80); - add_memory_region(0, (unsigned long)memory_page - CKSEG1 - CHUNK_SIZE, - BOOT_MEM_RAM); + memblock_add(0, (unsigned long)memory_page - CKSEG1 - CHUNK_SIZE); } /* * Use the REX prom calls to get hold of the memory bitmap, and thence * determine memory size. */ -static inline void rex_setup_memory_region(void) +static __init void rex_setup_memory_region(void) { int i, bitmap_size; unsigned long mem_start = 0, mem_size = 0; @@ -76,13 +74,13 @@ static inline void rex_setup_memory_region(void) else if (!mem_size) mem_start += (8 * bm->pagesize); else { - add_memory_region(mem_start, mem_size, BOOT_MEM_RAM); + memblock_add(mem_start, mem_size); mem_start += mem_size + (8 * bm->pagesize); mem_size = 0; } } if (mem_size) - add_memory_region(mem_start, mem_size, BOOT_MEM_RAM); + memblock_add(mem_start, mem_size); } void __init prom_meminit(u32 magic) diff --git a/arch/mips/dec/setup.c b/arch/mips/dec/setup.c index d4e868b828e5..eaad0ed4b523 100644 --- a/arch/mips/dec/setup.c +++ b/arch/mips/dec/setup.c @@ -6,7 +6,7 @@ * for more details. * * Copyright (C) 1998 Harald Koerfgen - * Copyright (C) 2000, 2001, 2002, 2003, 2005 Maciej W. Rozycki + * Copyright (C) 2000, 2001, 2002, 2003, 2005, 2020 Maciej W. Rozycki */ #include <linux/console.h> #include <linux/export.h> @@ -15,6 +15,7 @@ #include <linux/ioport.h> #include <linux/irq.h> #include <linux/irqnr.h> +#include <linux/memblock.h> #include <linux/param.h> #include <linux/percpu-defs.h> #include <linux/sched.h> @@ -22,6 +23,7 @@ #include <linux/types.h> #include <linux/pm.h> +#include <asm/addrspace.h> #include <asm/bootinfo.h> #include <asm/cpu.h> #include <asm/cpu-features.h> @@ -29,7 +31,9 @@ #include <asm/irq.h> #include <asm/irq_cpu.h> #include <asm/mipsregs.h> +#include <asm/page.h> #include <asm/reboot.h> +#include <asm/sections.h> #include <asm/time.h> #include <asm/traps.h> #include <asm/wbflush.h> @@ -146,6 +150,9 @@ void __init plat_mem_setup(void) ioport_resource.start = ~0UL; ioport_resource.end = 0UL; + + /* Stay away from the firmware working memory area for now. */ + memblock_reserve(PHYS_OFFSET, __pa_symbol(&_text) - PHYS_OFFSET); } /* diff --git a/arch/mips/fw/arc/memory.c b/arch/mips/fw/arc/memory.c index da0712ad85f5..37625ae5e35d 100644 --- a/arch/mips/fw/arc/memory.c +++ b/arch/mips/fw/arc/memory.c @@ -68,20 +68,24 @@ static char *arc_mtypes[8] = { : arc_mtypes[a.arc] #endif +enum { + mem_free, mem_prom_used, mem_reserved +}; + static inline int memtype_classify_arcs(union linux_memtypes type) { switch (type.arcs) { case arcs_fcontig: case arcs_free: - return BOOT_MEM_RAM; + return mem_free; case arcs_atmp: - return BOOT_MEM_ROM_DATA; + return mem_prom_used; case arcs_eblock: case arcs_rvpage: case arcs_bmem: case arcs_prog: case arcs_aperm: - return BOOT_MEM_RESERVED; + return mem_reserved; default: BUG(); } @@ -93,15 +97,15 @@ static inline int memtype_classify_arc(union linux_memtypes type) switch (type.arc) { case arc_free: case arc_fcontig: - return BOOT_MEM_RAM; + return mem_free; case arc_atmp: - return BOOT_MEM_ROM_DATA; + return mem_prom_used; case arc_eblock: case arc_rvpage: case arc_bmem: case arc_prog: case arc_aperm: - return BOOT_MEM_RESERVED; + return mem_reserved; default: BUG(); } @@ -143,9 +147,17 @@ void __weak __init prom_meminit(void) size = p->pages << ARC_PAGE_SHIFT; type = prom_memtype_classify(p->type); - add_memory_region(base, size, type); + /* ignore mirrored RAM on IP28/IP30 */ + if (base < PHYS_OFFSET) + continue; + + memblock_add(base, size); + + if (type == mem_reserved) + memblock_reserve(base, size); - if (type == BOOT_MEM_ROM_DATA) { + if (type == mem_prom_used) { + memblock_reserve(base, size); if (nr_prom_mem >= 5) { pr_err("Too many ROM DATA regions"); continue; diff --git a/arch/mips/fw/sni/sniprom.c b/arch/mips/fw/sni/sniprom.c index 80112f2298b6..8f6730376a42 100644 --- a/arch/mips/fw/sni/sniprom.c +++ b/arch/mips/fw/sni/sniprom.c @@ -11,6 +11,7 @@ #include <linux/kernel.h> #include <linux/init.h> +#include <linux/memblock.h> #include <linux/string.h> #include <linux/console.h> @@ -131,8 +132,7 @@ static void __init sni_mem_init(void) } pr_debug("Bank%d: %08x @ %08x\n", i, memconf[i].size, memconf[i].base); - add_memory_region(memconf[i].base, memconf[i].size, - BOOT_MEM_RAM); + memblock_add(memconf[i].base, memconf[i].size); } } diff --git a/arch/mips/generic/Kconfig b/arch/mips/generic/Kconfig index fd6019802657..55d9aed7ced9 100644 --- a/arch/mips/generic/Kconfig +++ b/arch/mips/generic/Kconfig @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -if MIPS_GENERIC +if MIPS_GENERIC_KERNEL config LEGACY_BOARDS bool @@ -73,6 +73,12 @@ config FIT_IMAGE_FDT_OCELOT from Microsemi in the FIT kernel image. This requires u-boot on the platform. +config BOARD_INGENIC + bool "Support boards based on Ingenic SoCs" + select MACH_INGENIC_GENERIC + help + Enable support for boards based on Ingenic SoCs. + config VIRT_BOARD_RANCHU bool "Support Ranchu platform for Android emulator" help diff --git a/arch/mips/generic/Makefile b/arch/mips/generic/Makefile index 2384a6b09e4c..e37a59bae0a6 100644 --- a/arch/mips/generic/Makefile +++ b/arch/mips/generic/Makefile @@ -11,4 +11,5 @@ obj-y += proc.o obj-$(CONFIG_YAMON_DT_SHIM) += yamon-dt.o obj-$(CONFIG_LEGACY_BOARD_SEAD3) += board-sead3.o obj-$(CONFIG_LEGACY_BOARD_OCELOT) += board-ocelot.o +obj-$(CONFIG_MACH_INGENIC) += board-ingenic.o obj-$(CONFIG_VIRT_BOARD_RANCHU) += board-ranchu.o diff --git a/arch/mips/generic/Platform b/arch/mips/generic/Platform index 53c33cb72974..f8ef2f9d107e 100644 --- a/arch/mips/generic/Platform +++ b/arch/mips/generic/Platform @@ -8,8 +8,12 @@ # option) any later version. # +# Note: order matters, keep the asm/mach-generic include last. +cflags-$(CONFIG_MACH_INGENIC_SOC) += -I$(srctree)/arch/mips/include/asm/mach-ingenic cflags-$(CONFIG_MIPS_GENERIC) += -I$(srctree)/arch/mips/include/asm/mach-generic + load-$(CONFIG_MIPS_GENERIC) += 0xffffffff80100000 +zload-$(CONFIG_MIPS_GENERIC) += 0xffffffff81000000 all-$(CONFIG_MIPS_GENERIC) := vmlinux.gz.itb its-y := vmlinux.its.S diff --git a/arch/mips/generic/board-ingenic.c b/arch/mips/generic/board-ingenic.c new file mode 100644 index 000000000000..0cec0bea13d6 --- /dev/null +++ b/arch/mips/generic/board-ingenic.c @@ -0,0 +1,120 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Support for Ingenic SoCs + * + * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de> + * Copyright (C) 2011, Maarten ter Huurne <maarten@treewalker.org> + * Copyright (C) 2020 Paul Cercueil <paul@crapouillou.net> + */ + +#include <linux/of_address.h> +#include <linux/of_fdt.h> +#include <linux/pm.h> +#include <linux/sizes.h> +#include <linux/suspend.h> +#include <linux/types.h> + +#include <asm/bootinfo.h> +#include <asm/machine.h> +#include <asm/reboot.h> + +static __init char *ingenic_get_system_type(unsigned long machtype) +{ + switch (machtype) { + case MACH_INGENIC_X2000E: + return "X2000E"; + case MACH_INGENIC_X2000: + return "X2000"; + case MACH_INGENIC_X1830: + return "X1830"; + case MACH_INGENIC_X1000E: + return "X1000E"; + case MACH_INGENIC_X1000: + return "X1000"; + case MACH_INGENIC_JZ4780: + return "JZ4780"; + case MACH_INGENIC_JZ4775: + return "JZ4775"; + case MACH_INGENIC_JZ4770: + return "JZ4770"; + case MACH_INGENIC_JZ4725B: + return "JZ4725B"; + default: + return "JZ4740"; + } +} + +static __init const void *ingenic_fixup_fdt(const void *fdt, const void *match_data) +{ + /* + * Old devicetree files for the qi,lb60 board did not have a /memory + * node. Hardcode the memory info here. + */ + if (!fdt_node_check_compatible(fdt, 0, "qi,lb60") && + fdt_path_offset(fdt, "/memory") < 0) + early_init_dt_add_memory_arch(0, SZ_32M); + + mips_machtype = (unsigned long)match_data; + system_type = ingenic_get_system_type(mips_machtype); + + return fdt; +} + +static const struct of_device_id ingenic_of_match[] __initconst = { + { .compatible = "ingenic,jz4740", .data = (void *)MACH_INGENIC_JZ4740 }, + { .compatible = "ingenic,jz4725b", .data = (void *)MACH_INGENIC_JZ4725B }, + { .compatible = "ingenic,jz4770", .data = (void *)MACH_INGENIC_JZ4770 }, + { .compatible = "ingenic,jz4775", .data = (void *)MACH_INGENIC_JZ4775 }, + { .compatible = "ingenic,jz4780", .data = (void *)MACH_INGENIC_JZ4780 }, + { .compatible = "ingenic,x1000", .data = (void *)MACH_INGENIC_X1000 }, + { .compatible = "ingenic,x1000e", .data = (void *)MACH_INGENIC_X1000E }, + { .compatible = "ingenic,x1830", .data = (void *)MACH_INGENIC_X1830 }, + { .compatible = "ingenic,x2000", .data = (void *)MACH_INGENIC_X2000 }, + { .compatible = "ingenic,x2000e", .data = (void *)MACH_INGENIC_X2000E }, + {} +}; + +MIPS_MACHINE(ingenic) = { + .matches = ingenic_of_match, + .fixup_fdt = ingenic_fixup_fdt, +}; + +static void ingenic_wait_instr(void) +{ + __asm__(".set push;\n" + ".set mips3;\n" + "wait;\n" + ".set pop;\n" + ); +} + +static void ingenic_halt(void) +{ + for (;;) + ingenic_wait_instr(); +} + +static int __maybe_unused ingenic_pm_enter(suspend_state_t state) +{ + ingenic_wait_instr(); + + return 0; +} + +static const struct platform_suspend_ops ingenic_pm_ops __maybe_unused = { + .valid = suspend_valid_only_mem, + .enter = ingenic_pm_enter, +}; + +static int __init ingenic_pm_init(void) +{ + if (boot_cpu_type() == CPU_XBURST) { + if (IS_ENABLED(CONFIG_PM_SLEEP)) + suspend_set_ops(&ingenic_pm_ops); + _machine_halt = ingenic_halt; + } + + return 0; + +} +late_initcall(ingenic_pm_init); diff --git a/arch/mips/generic/init.c b/arch/mips/generic/init.c index 805d0135a9f4..66a19337d2ab 100644 --- a/arch/mips/generic/init.c +++ b/arch/mips/generic/init.c @@ -39,12 +39,11 @@ void __init *plat_get_fdt(void) /* Already set up */ return (void *)fdt; - if ((fw_arg0 == -2) && !fdt_check_header((void *)fw_passed_dtb)) { + if (fw_passed_dtb && !fdt_check_header((void *)fw_passed_dtb)) { /* - * We booted using the UHI boot protocol, so we have been - * provided with the appropriate device tree for the board. - * Make use of it & search for any machine struct based upon - * the root compatible string. + * We have been provided with the appropriate device tree for + * the board. Make use of it & search for any machine struct + * based upon the root compatible string. */ fdt = (void *)fw_passed_dtb; @@ -106,7 +105,7 @@ void __init plat_mem_setup(void) if (mach && mach->fixup_fdt) fdt = mach->fixup_fdt(fdt, mach_match_data); - strlcpy(arcs_cmdline, boot_command_line, COMMAND_LINE_SIZE); + fw_init_cmdline(); __dt_setup_arch((void *)fdt); } diff --git a/arch/mips/generic/proc.c b/arch/mips/generic/proc.c index 4c992809cc3f..cce2fde219a3 100644 --- a/arch/mips/generic/proc.c +++ b/arch/mips/generic/proc.c @@ -8,11 +8,16 @@ #include <asm/bootinfo.h> +char *system_type; + const char *get_system_type(void) { const char *str; int err; + if (system_type) + return system_type; + err = of_property_read_string(of_root, "model", &str); if (!err) return str; diff --git a/arch/mips/include/asm/bootinfo.h b/arch/mips/include/asm/bootinfo.h index 147c9327ce04..aa03b1237155 100644 --- a/arch/mips/include/asm/bootinfo.h +++ b/arch/mips/include/asm/bootinfo.h @@ -79,8 +79,10 @@ enum ingenic_machine_type { MACH_INGENIC_JZ4775, MACH_INGENIC_JZ4780, MACH_INGENIC_X1000, + MACH_INGENIC_X1000E, MACH_INGENIC_X1830, MACH_INGENIC_X2000, + MACH_INGENIC_X2000E, }; extern char *system_type; @@ -88,13 +90,6 @@ const char *get_system_type(void); extern unsigned long mips_machtype; -#define BOOT_MEM_RAM 1 -#define BOOT_MEM_ROM_DATA 2 -#define BOOT_MEM_RESERVED 3 -#define BOOT_MEM_INIT_RAM 4 -#define BOOT_MEM_NOMAP 5 - -extern void add_memory_region(phys_addr_t start, phys_addr_t size, long type); extern void detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max); extern void prom_init(void); diff --git a/arch/mips/include/asm/checksum.h b/arch/mips/include/asm/checksum.h index 181f7d14efb9..5f80c28f5253 100644 --- a/arch/mips/include/asm/checksum.h +++ b/arch/mips/include/asm/checksum.h @@ -34,42 +34,17 @@ */ __wsum csum_partial(const void *buff, int len, __wsum sum); -__wsum __csum_partial_copy_kernel(const void *src, void *dst, - int len, __wsum sum, int *err_ptr); - -__wsum __csum_partial_copy_from_user(const void *src, void *dst, - int len, __wsum sum, int *err_ptr); -__wsum __csum_partial_copy_to_user(const void *src, void *dst, - int len, __wsum sum, int *err_ptr); -/* - * this is a new version of the above that records errors it finds in *errp, - * but continues and zeros the rest of the buffer. - */ -static inline -__wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len, - __wsum sum, int *err_ptr) -{ - might_fault(); - if (uaccess_kernel()) - return __csum_partial_copy_kernel((__force void *)src, dst, - len, sum, err_ptr); - else - return __csum_partial_copy_from_user((__force void *)src, dst, - len, sum, err_ptr); -} +__wsum __csum_partial_copy_from_user(const void __user *src, void *dst, int len); +__wsum __csum_partial_copy_to_user(const void *src, void __user *dst, int len); #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER static inline -__wsum csum_and_copy_from_user(const void __user *src, void *dst, - int len, __wsum sum, int *err_ptr) +__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len) { - if (access_ok(src, len)) - return csum_partial_copy_from_user(src, dst, len, sum, - err_ptr); - if (len) - *err_ptr = -EFAULT; - - return sum; + might_fault(); + if (!access_ok(src, len)) + return 0; + return __csum_partial_copy_from_user(src, dst, len); } /* @@ -77,33 +52,24 @@ __wsum csum_and_copy_from_user(const void __user *src, void *dst, */ #define HAVE_CSUM_COPY_USER static inline -__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len, - __wsum sum, int *err_ptr) +__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len) { might_fault(); - if (access_ok(dst, len)) { - if (uaccess_kernel()) - return __csum_partial_copy_kernel(src, - (__force void *)dst, - len, sum, err_ptr); - else - return __csum_partial_copy_to_user(src, - (__force void *)dst, - len, sum, err_ptr); - } - if (len) - *err_ptr = -EFAULT; - - return (__force __wsum)-1; /* invalid checksum */ + if (!access_ok(dst, len)) + return 0; + return __csum_partial_copy_to_user(src, dst, len); } /* * the same as csum_partial, but copies from user space (but on MIPS * we have just one address space, so this is identical to the above) */ -__wsum csum_partial_copy_nocheck(const void *src, void *dst, - int len, __wsum sum); -#define csum_partial_copy_nocheck csum_partial_copy_nocheck +#define _HAVE_ARCH_CSUM_AND_COPY +__wsum __csum_partial_copy_nocheck(const void *src, void *dst, int len); +static inline __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len) +{ + return __csum_partial_copy_nocheck(src, dst, len); +} /* * Fold a partial checksum without adding pseudo headers diff --git a/arch/mips/include/asm/compat.h b/arch/mips/include/asm/compat.h index 255afcdd79c9..65975712a22d 100644 --- a/arch/mips/include/asm/compat.h +++ b/arch/mips/include/asm/compat.h @@ -26,8 +26,6 @@ typedef s32 compat_caddr_t; typedef struct { s32 val[2]; } compat_fsid_t; -typedef s64 compat_s64; -typedef u64 compat_u64; struct compat_stat { compat_dev_t st_dev; diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h index 78cf7e300f12..f2e216eef7da 100644 --- a/arch/mips/include/asm/cpu-features.h +++ b/arch/mips/include/asm/cpu-features.h @@ -171,9 +171,6 @@ #ifndef cpu_has_llsc #define cpu_has_llsc __isa_ge_or_opt(1, MIPS_CPU_LLSC) #endif -#ifndef cpu_has_bp_ghist -#define cpu_has_bp_ghist __opt(MIPS_CPU_BP_GHIST) -#endif #ifndef kernel_uses_llsc #define kernel_uses_llsc cpu_has_llsc #endif diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h index 388a82f28a87..c9222cc2244f 100644 --- a/arch/mips/include/asm/cpu.h +++ b/arch/mips/include/asm/cpu.h @@ -398,7 +398,6 @@ enum cpu_type_enum { #define MIPS_CPU_RW_LLB BIT_ULL(32) /* LLADDR/LLB writes are allowed */ #define MIPS_CPU_LPA BIT_ULL(33) /* CPU supports Large Physical Addressing */ #define MIPS_CPU_CDMM BIT_ULL(34) /* CPU has Common Device Memory Map */ -#define MIPS_CPU_BP_GHIST BIT_ULL(35) /* R12K+ Branch Prediction Global History */ #define MIPS_CPU_SP BIT_ULL(36) /* Small (1KB) page support */ #define MIPS_CPU_FTLB BIT_ULL(37) /* CPU has Fixed-page-size TLB */ #define MIPS_CPU_NAN_LEGACY BIT_ULL(38) /* Legacy NaN implemented */ diff --git a/arch/mips/include/asm/dma-direct.h b/arch/mips/include/asm/dma-direct.h index 14e352651ce9..9a640118316c 100644 --- a/arch/mips/include/asm/dma-direct.h +++ b/arch/mips/include/asm/dma-direct.h @@ -2,7 +2,7 @@ #ifndef _MIPS_DMA_DIRECT_H #define _MIPS_DMA_DIRECT_H 1 -dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr); -phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr); +dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr); +phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr); #endif /* _MIPS_DMA_DIRECT_H */ diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h index 2bf8f6014579..d85248404c52 100644 --- a/arch/mips/include/asm/futex.h +++ b/arch/mips/include/asm/futex.h @@ -21,7 +21,7 @@ #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ { \ - if (cpu_has_llsc && R10000_LLSC_WAR) { \ + if (cpu_has_llsc && IS_ENABLED(CONFIG_WAR_R10000_LLSC)) { \ __asm__ __volatile__( \ " .set push \n" \ " .set noat \n" \ @@ -133,7 +133,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, if (!access_ok(uaddr, sizeof(u32))) return -EFAULT; - if (cpu_has_llsc && R10000_LLSC_WAR) { + if (cpu_has_llsc && IS_ENABLED(CONFIG_WAR_R10000_LLSC)) { __asm__ __volatile__( "# futex_atomic_cmpxchg_inatomic \n" " .set push \n" diff --git a/arch/mips/include/asm/idle.h b/arch/mips/include/asm/idle.h index 655a6dbc861a..0992cad9c632 100644 --- a/arch/mips/include/asm/idle.h +++ b/arch/mips/include/asm/idle.h @@ -15,6 +15,8 @@ static inline int using_rollback_handler(void) return cpu_wait == r4k_wait; } +extern void __init check_wait(void); + extern int mips_cpuidle_wait_enter(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index); diff --git a/arch/mips/include/asm/jazzdma.h b/arch/mips/include/asm/jazzdma.h index d13f940022d5..c831da7fa898 100644 --- a/arch/mips/include/asm/jazzdma.h +++ b/arch/mips/include/asm/jazzdma.h @@ -10,8 +10,6 @@ */ extern unsigned long vdma_alloc(unsigned long paddr, unsigned long size); extern int vdma_free(unsigned long laddr); -extern int vdma_remap(unsigned long laddr, unsigned long paddr, - unsigned long size); extern unsigned long vdma_phys2log(unsigned long paddr); extern unsigned long vdma_log2phys(unsigned long laddr); extern void vdma_stats(void); /* for debugging only */ diff --git a/arch/mips/include/asm/llsc.h b/arch/mips/include/asm/llsc.h index c49738bc3bda..ec09fe5d6d6c 100644 --- a/arch/mips/include/asm/llsc.h +++ b/arch/mips/include/asm/llsc.h @@ -28,7 +28,7 @@ * works around a bug present in R10000 CPUs prior to revision 3.0 that could * cause ll-sc sequences to execute non-atomically. */ -#if R10000_LLSC_WAR +#ifdef CONFIG_WAR_R10000_LLSC # define __SC_BEQZ "beqzl " #elif MIPS_ISA_REV >= 6 # define __SC_BEQZ "beqzc " diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h index fef0fda8f82f..ecda7295ddcd 100644 --- a/arch/mips/include/asm/local.h +++ b/arch/mips/include/asm/local.h @@ -31,7 +31,7 @@ static __inline__ long local_add_return(long i, local_t * l) { unsigned long result; - if (kernel_uses_llsc && R10000_LLSC_WAR) { + if (kernel_uses_llsc && IS_ENABLED(CONFIG_WAR_R10000_LLSC)) { unsigned long temp; __asm__ __volatile__( @@ -80,7 +80,7 @@ static __inline__ long local_sub_return(long i, local_t * l) { unsigned long result; - if (kernel_uses_llsc && R10000_LLSC_WAR) { + if (kernel_uses_llsc && IS_ENABLED(CONFIG_WAR_R10000_LLSC)) { unsigned long temp; __asm__ __volatile__( diff --git a/arch/mips/include/asm/m48t37.h b/arch/mips/include/asm/m48t37.h deleted file mode 100644 index 3687a02e692b..000000000000 --- a/arch/mips/include/asm/m48t37.h +++ /dev/null @@ -1,36 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Registers for the SGS-Thomson M48T37 Timekeeper RAM chip - */ -#ifndef _ASM_M48T37_H -#define _ASM_M48T37_H - -#include <linux/spinlock.h> - -extern spinlock_t rtc_lock; - -struct m48t37_rtc { - volatile u8 pad[0x7ff0]; /* NVRAM */ - volatile u8 flags; - volatile u8 century; - volatile u8 alarm_sec; - volatile u8 alarm_min; - volatile u8 alarm_hour; - volatile u8 alarm_data; - volatile u8 interrupts; - volatile u8 watchdog; - volatile u8 control; - volatile u8 sec; - volatile u8 min; - volatile u8 hour; - volatile u8 day; - volatile u8 date; - volatile u8 month; - volatile u8 year; -}; - -#define M48T37_RTC_SET 0x80 -#define M48T37_RTC_STOPPED 0x80 -#define M48T37_RTC_READ 0x40 - -#endif /* _ASM_M48T37_H */ diff --git a/arch/mips/include/asm/mach-au1x00/cpu-feature-overrides.h b/arch/mips/include/asm/mach-au1x00/cpu-feature-overrides.h index ecfbb5aeada3..e6e527224a15 100644 --- a/arch/mips/include/asm/mach-au1x00/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-au1x00/cpu-feature-overrides.h @@ -39,7 +39,6 @@ #define cpu_has_guestctl2 0 #define cpu_has_guestid 0 #define cpu_has_drg 0 -#define cpu_has_bp_ghist 0 #define cpu_has_mips16 0 #define cpu_has_mips16e2 0 #define cpu_has_mdmx 0 diff --git a/arch/mips/include/asm/mach-au1x00/gpio-au1300.h b/arch/mips/include/asm/mach-au1x00/gpio-au1300.h index d25846a1291f..d16add7ba49d 100644 --- a/arch/mips/include/asm/mach-au1x00/gpio-au1300.h +++ b/arch/mips/include/asm/mach-au1x00/gpio-au1300.h @@ -120,141 +120,4 @@ static inline int au1300_gpio_getinitlvl(unsigned int gpio) return (v >> gpio) & 1; } -/**********************************************************************/ - -/* Linux gpio framework integration. -* -* 4 use cases of Alchemy GPIOS: -*(1) GPIOLIB=y, ALCHEMY_GPIO_INDIRECT=y: -* Board must register gpiochips. -*(2) GPIOLIB=y, ALCHEMY_GPIO_INDIRECT=n: -* A gpiochip for the 75 GPIOs is registered. -* -*(3) GPIOLIB=n, ALCHEMY_GPIO_INDIRECT=y: -* the boards' gpio.h must provide the linux gpio wrapper functions, -* -*(4) GPIOLIB=n, ALCHEMY_GPIO_INDIRECT=n: -* inlinable gpio functions are provided which enable access to the -* Au1300 gpios only by using the numbers straight out of the data- -* sheets. - -* Cases 1 and 3 are intended for boards which want to provide their own -* GPIO namespace and -operations (i.e. for example you have 8 GPIOs -* which are in part provided by spare Au1300 GPIO pins and in part by -* an external FPGA but you still want them to be accessible in linux -* as gpio0-7. The board can of course use the alchemy_gpioX_* functions -* as required). -*/ - -#ifndef CONFIG_GPIOLIB - -#ifdef CONFIG_ALCHEMY_GPIOINT_AU1300 - -#ifndef CONFIG_ALCHEMY_GPIO_INDIRECT /* case (4) */ - -static inline int gpio_direction_input(unsigned int gpio) -{ - return au1300_gpio_direction_input(gpio); -} - -static inline int gpio_direction_output(unsigned int gpio, int v) -{ - return au1300_gpio_direction_output(gpio, v); -} - -static inline int gpio_get_value(unsigned int gpio) -{ - return au1300_gpio_get_value(gpio); -} - -static inline void gpio_set_value(unsigned int gpio, int v) -{ - au1300_gpio_set_value(gpio, v); -} - -static inline int gpio_get_value_cansleep(unsigned gpio) -{ - return gpio_get_value(gpio); -} - -static inline void gpio_set_value_cansleep(unsigned gpio, int value) -{ - gpio_set_value(gpio, value); -} - -static inline int gpio_is_valid(unsigned int gpio) -{ - return au1300_gpio_is_valid(gpio); -} - -static inline int gpio_cansleep(unsigned int gpio) -{ - return au1300_gpio_cansleep(gpio); -} - -static inline int gpio_to_irq(unsigned int gpio) -{ - return au1300_gpio_to_irq(gpio); -} - -static inline int irq_to_gpio(unsigned int irq) -{ - return au1300_irq_to_gpio(irq); -} - -static inline int gpio_request(unsigned int gpio, const char *label) -{ - return 0; -} - -static inline int gpio_request_one(unsigned gpio, - unsigned long flags, const char *label) -{ - return 0; -} - -static inline int gpio_request_array(struct gpio *array, size_t num) -{ - return 0; -} - -static inline void gpio_free(unsigned gpio) -{ -} - -static inline void gpio_free_array(struct gpio *array, size_t num) -{ -} - -static inline int gpio_set_debounce(unsigned gpio, unsigned debounce) -{ - return -ENOSYS; -} - -static inline void gpio_unexport(unsigned gpio) -{ -} - -static inline int gpio_export(unsigned gpio, bool direction_may_change) -{ - return -ENOSYS; -} - -static inline int gpio_sysfs_set_active_low(unsigned gpio, int value) -{ - return -ENOSYS; -} - -static inline int gpio_export_link(struct device *dev, const char *name, - unsigned gpio) -{ - return -ENOSYS; -} - -#endif /* !CONFIG_ALCHEMY_GPIO_INDIRECT */ - -#endif /* CONFIG_ALCHEMY_GPIOINT_AU1300 */ - -#endif /* CONFIG GPIOLIB */ - #endif /* _GPIO_AU1300_H_ */ diff --git a/arch/mips/include/asm/mach-bcm47xx/bcm47xx.h b/arch/mips/include/asm/mach-bcm47xx/bcm47xx.h index d7f1ef246d5c..93817bfb7fb2 100644 --- a/arch/mips/include/asm/mach-bcm47xx/bcm47xx.h +++ b/arch/mips/include/asm/mach-bcm47xx/bcm47xx.h @@ -10,6 +10,7 @@ #include <linux/bcma/bcma.h> #include <linux/bcma/bcma_soc.h> #include <linux/bcm47xx_nvram.h> +#include <linux/bcm47xx_sprom.h> enum bcm47xx_bus_type { #ifdef CONFIG_BCM47XX_SSB @@ -32,9 +33,6 @@ union bcm47xx_bus { extern union bcm47xx_bus bcm47xx_bus; extern enum bcm47xx_bus_type bcm47xx_bus_type; -void bcm47xx_fill_sprom(struct ssb_sprom *sprom, const char *prefix, - bool fallback); - void bcm47xx_set_system_type(u16 chip_id); #endif /* __ASM_BCM47XX_H */ diff --git a/arch/mips/include/asm/mach-cavium-octeon/war.h b/arch/mips/include/asm/mach-cavium-octeon/war.h deleted file mode 100644 index 2421411b7636..000000000000 --- a/arch/mips/include/asm/mach-cavium-octeon/war.h +++ /dev/null @@ -1,27 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org> - * Copyright (C) 2008 Cavium Networks <support@caviumnetworks.com> - */ -#ifndef __ASM_MIPS_MACH_CAVIUM_OCTEON_WAR_H -#define __ASM_MIPS_MACH_CAVIUM_OCTEON_WAR_H - -#define R4600_V1_INDEX_ICACHEOP_WAR 0 -#define R4600_V1_HIT_CACHEOP_WAR 0 -#define R4600_V2_HIT_CACHEOP_WAR 0 -#define BCM1250_M3_WAR 0 -#define SIBYTE_1956_WAR 0 -#define MIPS4K_ICACHE_REFILL_WAR 0 -#define MIPS_CACHE_SYNC_WAR 0 -#define TX49XX_ICACHE_INDEX_INV_WAR 0 -#define ICACHE_REFILLS_WORKAROUND_WAR 0 -#define R10000_LLSC_WAR 0 -#define MIPS34K_MISSED_ITLB_WAR 0 - -#define CAVIUM_OCTEON_DCACHE_PREFETCH_WAR \ - OCTEON_IS_MODEL(OCTEON_CN6XXX) - -#endif /* __ASM_MIPS_MACH_CAVIUM_OCTEON_WAR_H */ diff --git a/arch/mips/include/asm/mach-generic/irq.h b/arch/mips/include/asm/mach-generic/irq.h index 72ac2c202c55..079889ced4f3 100644 --- a/arch/mips/include/asm/mach-generic/irq.h +++ b/arch/mips/include/asm/mach-generic/irq.h @@ -9,7 +9,7 @@ #define __ASM_MACH_GENERIC_IRQ_H #ifndef NR_IRQS -#define NR_IRQS 128 +#define NR_IRQS 256 #endif #ifdef CONFIG_I8259 diff --git a/arch/mips/include/asm/mach-generic/war.h b/arch/mips/include/asm/mach-generic/war.h deleted file mode 100644 index f0f4a35d0870..000000000000 --- a/arch/mips/include/asm/mach-generic/war.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org> - */ -#ifndef __ASM_MACH_GENERIC_WAR_H -#define __ASM_MACH_GENERIC_WAR_H - -#define R4600_V1_INDEX_ICACHEOP_WAR 0 -#define R4600_V1_HIT_CACHEOP_WAR 0 -#define R4600_V2_HIT_CACHEOP_WAR 0 -#define BCM1250_M3_WAR 0 -#define SIBYTE_1956_WAR 0 -#define MIPS4K_ICACHE_REFILL_WAR 0 -#define MIPS_CACHE_SYNC_WAR 0 -#define TX49XX_ICACHE_INDEX_INV_WAR 0 -#define ICACHE_REFILLS_WORKAROUND_WAR 0 -#define R10000_LLSC_WAR 0 -#define MIPS34K_MISSED_ITLB_WAR 0 - -#endif /* __ASM_MACH_GENERIC_WAR_H */ diff --git a/arch/mips/include/asm/mach-jz4740/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ingenic/cpu-feature-overrides.h index 7c5e576f9d96..7c5e576f9d96 100644 --- a/arch/mips/include/asm/mach-jz4740/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-ingenic/cpu-feature-overrides.h diff --git a/arch/mips/include/asm/mach-ip22/war.h b/arch/mips/include/asm/mach-ip22/war.h deleted file mode 100644 index b48eb4ac362d..000000000000 --- a/arch/mips/include/asm/mach-ip22/war.h +++ /dev/null @@ -1,27 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org> - */ -#ifndef __ASM_MIPS_MACH_IP22_WAR_H -#define __ASM_MIPS_MACH_IP22_WAR_H - -/* - * R4600 CPU modules for the Indy come with both V1.7 and V2.0 processors. - */ - -#define R4600_V1_INDEX_ICACHEOP_WAR 1 -#define R4600_V1_HIT_CACHEOP_WAR 1 -#define R4600_V2_HIT_CACHEOP_WAR 1 -#define BCM1250_M3_WAR 0 -#define SIBYTE_1956_WAR 0 -#define MIPS4K_ICACHE_REFILL_WAR 0 -#define MIPS_CACHE_SYNC_WAR 0 -#define TX49XX_ICACHE_INDEX_INV_WAR 0 -#define ICACHE_REFILLS_WORKAROUND_WAR 0 -#define R10000_LLSC_WAR 0 -#define MIPS34K_MISSED_ITLB_WAR 0 - -#endif /* __ASM_MIPS_MACH_IP22_WAR_H */ diff --git a/arch/mips/include/asm/mach-ip27/kmalloc.h b/arch/mips/include/asm/mach-ip27/kmalloc.h deleted file mode 100644 index 82c23ce2afa7..000000000000 --- a/arch/mips/include/asm/mach-ip27/kmalloc.h +++ /dev/null @@ -1,8 +0,0 @@ -#ifndef __ASM_MACH_IP27_KMALLOC_H -#define __ASM_MACH_IP27_KMALLOC_H - -/* - * All happy, no need to define ARCH_DMA_MINALIGN - */ - -#endif /* __ASM_MACH_IP27_KMALLOC_H */ diff --git a/arch/mips/include/asm/mach-ip27/war.h b/arch/mips/include/asm/mach-ip27/war.h deleted file mode 100644 index ef3efce0094a..000000000000 --- a/arch/mips/include/asm/mach-ip27/war.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org> - */ -#ifndef __ASM_MIPS_MACH_IP27_WAR_H -#define __ASM_MIPS_MACH_IP27_WAR_H - -#define R4600_V1_INDEX_ICACHEOP_WAR 0 -#define R4600_V1_HIT_CACHEOP_WAR 0 -#define R4600_V2_HIT_CACHEOP_WAR 0 -#define BCM1250_M3_WAR 0 -#define SIBYTE_1956_WAR 0 -#define MIPS4K_ICACHE_REFILL_WAR 0 -#define MIPS_CACHE_SYNC_WAR 0 -#define TX49XX_ICACHE_INDEX_INV_WAR 0 -#define ICACHE_REFILLS_WORKAROUND_WAR 0 -#define R10000_LLSC_WAR 1 -#define MIPS34K_MISSED_ITLB_WAR 0 - -#endif /* __ASM_MIPS_MACH_IP27_WAR_H */ diff --git a/arch/mips/include/asm/mach-ip28/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ip28/cpu-feature-overrides.h index ba8b4e30b3e2..613bbc10c1f2 100644 --- a/arch/mips/include/asm/mach-ip28/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-ip28/cpu-feature-overrides.h @@ -25,7 +25,7 @@ #define cpu_has_mcheck 0 #define cpu_has_ejtag 0 -#define cpu_has_llsc 1 +#define cpu_has_llsc 0 #define cpu_has_vtag_icache 0 #define cpu_has_dc_aliases 0 /* see probe_pcache() */ #define cpu_has_ic_fills_f_dc 0 diff --git a/arch/mips/include/asm/mach-ip28/war.h b/arch/mips/include/asm/mach-ip28/war.h deleted file mode 100644 index 61cd67354829..000000000000 --- a/arch/mips/include/asm/mach-ip28/war.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org> - */ -#ifndef __ASM_MIPS_MACH_IP28_WAR_H -#define __ASM_MIPS_MACH_IP28_WAR_H - -#define R4600_V1_INDEX_ICACHEOP_WAR 0 -#define R4600_V1_HIT_CACHEOP_WAR 0 -#define R4600_V2_HIT_CACHEOP_WAR 0 -#define BCM1250_M3_WAR 0 -#define SIBYTE_1956_WAR 0 -#define MIPS4K_ICACHE_REFILL_WAR 0 -#define MIPS_CACHE_SYNC_WAR 0 -#define TX49XX_ICACHE_INDEX_INV_WAR 0 -#define ICACHE_REFILLS_WORKAROUND_WAR 0 -#define R10000_LLSC_WAR 1 -#define MIPS34K_MISSED_ITLB_WAR 0 - -#endif /* __ASM_MIPS_MACH_IP28_WAR_H */ diff --git a/arch/mips/include/asm/mach-ip30/irq.h b/arch/mips/include/asm/mach-ip30/irq.h deleted file mode 100644 index 27ba899c95be..000000000000 --- a/arch/mips/include/asm/mach-ip30/irq.h +++ /dev/null @@ -1,87 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * HEART IRQ defines - * - * Copyright (C) 2009 Johannes Dickgreber <tanzy@gmx.de> - * 2014-2016 Joshua Kinard <kumba@gentoo.org> - * - */ - -#ifndef __ASM_MACH_IP30_IRQ_H -#define __ASM_MACH_IP30_IRQ_H - -/* - * HEART has 64 hardware interrupts, but use 128 to leave room for a few - * software interrupts as well (such as the CPU timer interrupt. - */ -#define NR_IRQS 128 - -extern void __init ip30_install_ipi(void); - -/* - * HEART has 64 interrupt vectors available to it, subdivided into five - * priority levels. They are numbered 0 to 63. - */ -#define HEART_NUM_IRQS 64 - -/* - * These are the five interrupt priority levels and their corresponding - * CPU IPx interrupt pins. - * - * Level 4 - Error Interrupts. - * Level 3 - HEART timer interrupt. - * Level 2 - CPU IPI, CPU debug, power putton, general device interrupts. - * Level 1 - General device interrupts. - * Level 0 - General device GFX flow control interrupts. - */ -#define HEART_L4_INT_MASK 0xfff8000000000000ULL /* IP6 */ -#define HEART_L3_INT_MASK 0x0004000000000000ULL /* IP5 */ -#define HEART_L2_INT_MASK 0x0003ffff00000000ULL /* IP4 */ -#define HEART_L1_INT_MASK 0x00000000ffff0000ULL /* IP3 */ -#define HEART_L0_INT_MASK 0x000000000000ffffULL /* IP2 */ - -/* HEART L0 Interrupts (Low Priority) */ -#define HEART_L0_INT_GENERIC 0 -#define HEART_L0_INT_FLOW_CTRL_HWTR_0 1 -#define HEART_L0_INT_FLOW_CTRL_HWTR_1 2 - -/* HEART L2 Interrupts (High Priority) */ -#define HEART_L2_INT_RESCHED_CPU_0 46 -#define HEART_L2_INT_RESCHED_CPU_1 47 -#define HEART_L2_INT_CALL_CPU_0 48 -#define HEART_L2_INT_CALL_CPU_1 49 - -/* HEART L3 Interrupts (Compare/Counter Timer) */ -#define HEART_L3_INT_TIMER 50 - -/* HEART L4 Interrupts (Errors) */ -#define HEART_L4_INT_XWID_ERR_9 51 -#define HEART_L4_INT_XWID_ERR_A 52 -#define HEART_L4_INT_XWID_ERR_B 53 -#define HEART_L4_INT_XWID_ERR_C 54 -#define HEART_L4_INT_XWID_ERR_D 55 -#define HEART_L4_INT_XWID_ERR_E 56 -#define HEART_L4_INT_XWID_ERR_F 57 -#define HEART_L4_INT_XWID_ERR_XBOW 58 -#define HEART_L4_INT_CPU_BUS_ERR_0 59 -#define HEART_L4_INT_CPU_BUS_ERR_1 60 -#define HEART_L4_INT_CPU_BUS_ERR_2 61 -#define HEART_L4_INT_CPU_BUS_ERR_3 62 -#define HEART_L4_INT_HEART_EXCP 63 - -/* - * Power Switch is wired via BaseIO BRIDGE slot #6. - * - * ACFail is wired via BaseIO BRIDGE slot #7. - */ -#define IP30_POWER_IRQ HEART_L2_INT_POWER_BTN - -#include <asm/mach-generic/irq.h> - -#define IP30_HEART_L0_IRQ (MIPS_CPU_IRQ_BASE + 2) -#define IP30_HEART_L1_IRQ (MIPS_CPU_IRQ_BASE + 3) -#define IP30_HEART_L2_IRQ (MIPS_CPU_IRQ_BASE + 4) -#define IP30_HEART_TIMER_IRQ (MIPS_CPU_IRQ_BASE + 5) -#define IP30_HEART_ERR_IRQ (MIPS_CPU_IRQ_BASE + 6) - -#endif /* __ASM_MACH_IP30_IRQ_H */ diff --git a/arch/mips/include/asm/mach-ip30/war.h b/arch/mips/include/asm/mach-ip30/war.h deleted file mode 100644 index a1fa0c1f5300..000000000000 --- a/arch/mips/include/asm/mach-ip30/war.h +++ /dev/null @@ -1,24 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org> - */ -#ifndef __ASM_MIPS_MACH_IP30_WAR_H -#define __ASM_MIPS_MACH_IP30_WAR_H - -#define R4600_V1_INDEX_ICACHEOP_WAR 0 -#define R4600_V1_HIT_CACHEOP_WAR 0 -#define R4600_V2_HIT_CACHEOP_WAR 0 -#define BCM1250_M3_WAR 0 -#define SIBYTE_1956_WAR 0 -#define MIPS4K_ICACHE_REFILL_WAR 0 -#define MIPS_CACHE_SYNC_WAR 0 -#define TX49XX_ICACHE_INDEX_INV_WAR 0 -#define ICACHE_REFILLS_WORKAROUND_WAR 0 -#ifdef CONFIG_CPU_R10000 -#define R10000_LLSC_WAR 1 -#else -#define R10000_LLSC_WAR 0 -#endif -#define MIPS34K_MISSED_ITLB_WAR 0 - -#endif /* __ASM_MIPS_MACH_IP30_WAR_H */ diff --git a/arch/mips/include/asm/mach-ip32/war.h b/arch/mips/include/asm/mach-ip32/war.h deleted file mode 100644 index e77b9d1b6c96..000000000000 --- a/arch/mips/include/asm/mach-ip32/war.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org> - */ -#ifndef __ASM_MIPS_MACH_IP32_WAR_H -#define __ASM_MIPS_MACH_IP32_WAR_H - -#define R4600_V1_INDEX_ICACHEOP_WAR 0 -#define R4600_V1_HIT_CACHEOP_WAR 0 -#define R4600_V2_HIT_CACHEOP_WAR 0 -#define BCM1250_M3_WAR 0 -#define SIBYTE_1956_WAR 0 -#define MIPS4K_ICACHE_REFILL_WAR 0 -#define MIPS_CACHE_SYNC_WAR 0 -#define TX49XX_ICACHE_INDEX_INV_WAR 0 -#define ICACHE_REFILLS_WORKAROUND_WAR 1 -#define R10000_LLSC_WAR 0 -#define MIPS34K_MISSED_ITLB_WAR 0 - -#endif /* __ASM_MIPS_MACH_IP32_WAR_H */ diff --git a/arch/mips/include/asm/mach-jz4740/irq.h b/arch/mips/include/asm/mach-jz4740/irq.h deleted file mode 100644 index 27c543bd340f..000000000000 --- a/arch/mips/include/asm/mach-jz4740/irq.h +++ /dev/null @@ -1,13 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de> - * JZ4740 IRQ definitions - */ - -#ifndef __ASM_MACH_JZ4740_IRQ_H__ -#define __ASM_MACH_JZ4740_IRQ_H__ - -#define MIPS_CPU_IRQ_BASE 0 -#define NR_IRQS 256 - -#endif diff --git a/arch/mips/include/asm/mach-loongson2ef/mc146818rtc.h b/arch/mips/include/asm/mach-loongson2ef/mc146818rtc.h deleted file mode 100644 index 00d602629a55..000000000000 --- a/arch/mips/include/asm/mach-loongson2ef/mc146818rtc.h +++ /dev/null @@ -1,36 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1998, 2001, 03, 07 by Ralf Baechle (ralf@linux-mips.org) - * - * RTC routines for PC style attached Dallas chip. - */ -#ifndef __ASM_MACH_LOONGSON2EF_MC146818RTC_H -#define __ASM_MACH_LOONGSON2EF_MC146818RTC_H - -#include <linux/io.h> - -#define RTC_PORT(x) (0x70 + (x)) -#define RTC_IRQ 8 - -static inline unsigned char CMOS_READ(unsigned long addr) -{ - outb_p(addr, RTC_PORT(0)); - return inb_p(RTC_PORT(1)); -} - -static inline void CMOS_WRITE(unsigned char data, unsigned long addr) -{ - outb_p(addr, RTC_PORT(0)); - outb_p(data, RTC_PORT(1)); -} - -#define RTC_ALWAYS_BCD 0 - -#ifndef mc146818_decode_year -#define mc146818_decode_year(year) ((year) < 70 ? (year) + 2000 : (year) + 1970) -#endif - -#endif /* __ASM_MACH_LOONGSON2EF_MC146818RTC_H */ diff --git a/arch/mips/include/asm/mach-loongson64/irq.h b/arch/mips/include/asm/mach-loongson64/irq.h index bf2480923154..98ea977cf0b8 100644 --- a/arch/mips/include/asm/mach-loongson64/irq.h +++ b/arch/mips/include/asm/mach-loongson64/irq.h @@ -5,7 +5,8 @@ /* cpu core interrupt numbers */ #define NR_IRQS_LEGACY 16 #define NR_MIPS_CPU_IRQS 8 -#define NR_IRQS (NR_IRQS_LEGACY + NR_MIPS_CPU_IRQS + 256) +#define NR_MAX_CHAINED_IRQS 40 /* Chained IRQs means those not directly used by devices */ +#define NR_IRQS (NR_IRQS_LEGACY + NR_MIPS_CPU_IRQS + NR_MAX_CHAINED_IRQS + 256) #define MIPS_CPU_IRQ_BASE NR_IRQS_LEGACY diff --git a/arch/mips/include/asm/mach-loongson64/mmzone.h b/arch/mips/include/asm/mach-loongson64/mmzone.h index 5eaca4fe3f92..ebb1deaa77b9 100644 --- a/arch/mips/include/asm/mach-loongson64/mmzone.h +++ b/arch/mips/include/asm/mach-loongson64/mmzone.h @@ -10,13 +10,9 @@ #define _ASM_MACH_LOONGSON64_MMZONE_H #define NODE_ADDRSPACE_SHIFT 44 -#define NODE0_ADDRSPACE_OFFSET 0x000000000000UL -#define NODE1_ADDRSPACE_OFFSET 0x100000000000UL -#define NODE2_ADDRSPACE_OFFSET 0x200000000000UL -#define NODE3_ADDRSPACE_OFFSET 0x300000000000UL #define pa_to_nid(addr) (((addr) & 0xf00000000000) >> NODE_ADDRSPACE_SHIFT) -#define nid_to_addrbase(nid) ((nid) << NODE_ADDRSPACE_SHIFT) +#define nid_to_addrbase(nid) ((unsigned long)(nid) << NODE_ADDRSPACE_SHIFT) extern struct pglist_data *__node_data[]; diff --git a/arch/mips/include/asm/mach-malta/malta-dtshim.h b/arch/mips/include/asm/mach-malta/malta-dtshim.h deleted file mode 100644 index 7c97b710121d..000000000000 --- a/arch/mips/include/asm/mach-malta/malta-dtshim.h +++ /dev/null @@ -1,25 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Copyright (C) 2015 Imagination Technologies - * Author: Paul Burton <paul.burton@mips.com> - */ - -#ifndef __MIPS_MALTA_DTSHIM_H__ -#define __MIPS_MALTA_DTSHIM_H__ - -#include <linux/init.h> - -#ifdef CONFIG_MIPS_MALTA - -extern void __init *malta_dt_shim(void *fdt); - -#else /* !CONFIG_MIPS_MALTA */ - -static inline void *malta_dt_shim(void *fdt) -{ - return fdt; -} - -#endif /* !CONFIG_MIPS_MALTA */ - -#endif /* __MIPS_MALTA_DTSHIM_H__ */ diff --git a/arch/mips/include/asm/mach-malta/malta-pm.h b/arch/mips/include/asm/mach-malta/malta-pm.h deleted file mode 100644 index 2a5146d79313..000000000000 --- a/arch/mips/include/asm/mach-malta/malta-pm.h +++ /dev/null @@ -1,33 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Copyright (C) 2014 Imagination Technologies - * Author: Paul Burton <paul.burton@mips.com> - */ - -#ifndef __ASM_MIPS_MACH_MALTA_PM_H__ -#define __ASM_MIPS_MACH_MALTA_PM_H__ - -#include <asm/mips-boards/piix4.h> - -#ifdef CONFIG_MIPS_MALTA_PM - -/** - * mips_pm_suspend - enter a suspend state - * @state: the state to enter, one of PIIX4_FUNC3IO_PMCNTRL_SUS_TYP_* - * - * Enters a suspend state via the Malta's PIIX4. If the state to be entered - * is one which loses context (eg. SOFF) then this function will never - * return. - */ -extern int mips_pm_suspend(unsigned state); - -#else /* !CONFIG_MIPS_MALTA_PM */ - -static inline int mips_pm_suspend(unsigned state) -{ - return -EINVAL; -} - -#endif /* !CONFIG_MIPS_MALTA_PM */ - -#endif /* __ASM_MIPS_MACH_MALTA_PM_H__ */ diff --git a/arch/mips/include/asm/mach-malta/war.h b/arch/mips/include/asm/mach-malta/war.h deleted file mode 100644 index d62d2ffe515e..000000000000 --- a/arch/mips/include/asm/mach-malta/war.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org> - */ -#ifndef __ASM_MIPS_MACH_MIPS_WAR_H -#define __ASM_MIPS_MACH_MIPS_WAR_H - -#define R4600_V1_INDEX_ICACHEOP_WAR 0 -#define R4600_V1_HIT_CACHEOP_WAR 0 -#define R4600_V2_HIT_CACHEOP_WAR 0 -#define BCM1250_M3_WAR 0 -#define SIBYTE_1956_WAR 0 -#define MIPS4K_ICACHE_REFILL_WAR 1 -#define MIPS_CACHE_SYNC_WAR 1 -#define TX49XX_ICACHE_INDEX_INV_WAR 0 -#define ICACHE_REFILLS_WORKAROUND_WAR 1 -#define R10000_LLSC_WAR 0 -#define MIPS34K_MISSED_ITLB_WAR 0 - -#endif /* __ASM_MIPS_MACH_MIPS_WAR_H */ diff --git a/arch/mips/include/asm/mach-paravirt/cpu-feature-overrides.h b/arch/mips/include/asm/mach-paravirt/cpu-feature-overrides.h deleted file mode 100644 index 23ecf816daa7..000000000000 --- a/arch/mips/include/asm/mach-paravirt/cpu-feature-overrides.h +++ /dev/null @@ -1,35 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2013 Cavium, Inc. - */ -#ifndef __ASM_MACH_PARAVIRT_CPU_FEATURE_OVERRIDES_H -#define __ASM_MACH_PARAVIRT_CPU_FEATURE_OVERRIDES_H - -#define cpu_has_4kex 1 -#define cpu_has_3k_cache 0 -#define cpu_has_tx39_cache 0 -#define cpu_has_counter 1 -#define cpu_has_llsc 1 -/* - * We Disable LL/SC on non SMP systems as it is faster to disable - * interrupts for atomic access than a LL/SC. - */ -#ifdef CONFIG_SMP -# define kernel_uses_llsc 1 -#else -# define kernel_uses_llsc 0 -#endif - -#ifdef CONFIG_CPU_CAVIUM_OCTEON -#define cpu_dcache_line_size() 128 -#define cpu_icache_line_size() 128 -#define cpu_has_octeon_cache 1 -#define cpu_has_4k_cache 0 -#else -#define cpu_has_4k_cache 1 -#endif - -#endif /* __ASM_MACH_PARAVIRT_CPU_FEATURE_OVERRIDES_H */ diff --git a/arch/mips/include/asm/mach-paravirt/irq.h b/arch/mips/include/asm/mach-paravirt/irq.h deleted file mode 100644 index 9b4d35eca977..000000000000 --- a/arch/mips/include/asm/mach-paravirt/irq.h +++ /dev/null @@ -1,19 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2013 Cavium, Inc. - */ -#ifndef __ASM_MACH_PARAVIRT_IRQ_H__ -#define __ASM_MACH_PARAVIRT_IRQ_H__ - -#define NR_IRQS 64 -#define MIPS_CPU_IRQ_BASE 1 - -#define MIPS_IRQ_PCIA (MIPS_CPU_IRQ_BASE + 8) - -#define MIPS_IRQ_MBOX0 (MIPS_CPU_IRQ_BASE + 32) -#define MIPS_IRQ_MBOX1 (MIPS_CPU_IRQ_BASE + 33) - -#endif /* __ASM_MACH_PARAVIRT_IRQ_H__ */ diff --git a/arch/mips/include/asm/mach-paravirt/kernel-entry-init.h b/arch/mips/include/asm/mach-paravirt/kernel-entry-init.h deleted file mode 100644 index c9f5769dfc8f..000000000000 --- a/arch/mips/include/asm/mach-paravirt/kernel-entry-init.h +++ /dev/null @@ -1,52 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2013 Cavium, Inc - */ -#ifndef __ASM_MACH_PARAVIRT_KERNEL_ENTRY_H -#define __ASM_MACH_PARAVIRT_KERNEL_ENTRY_H - -#define CP0_EBASE $15, 1 - - .macro kernel_entry_setup -#ifdef CONFIG_SMP - mfc0 t0, CP0_EBASE - andi t0, t0, 0x3ff # CPUNum - beqz t0, 1f - # CPUs other than zero goto smp_bootstrap - j smp_bootstrap -#endif /* CONFIG_SMP */ - -1: - .endm - -/* - * Do SMP slave processor setup necessary before we can safely execute - * C code. - */ - .macro smp_slave_setup - mfc0 t0, CP0_EBASE - andi t0, t0, 0x3ff # CPUNum - slti t1, t0, NR_CPUS - bnez t1, 1f -2: - di - wait - b 2b # Unknown CPU, loop forever. -1: - PTR_LA t1, paravirt_smp_sp - PTR_SLL t0, PTR_SCALESHIFT - PTR_ADDU t1, t1, t0 -3: - PTR_L sp, 0(t1) - beqz sp, 3b # Spin until told to proceed. - - PTR_LA t1, paravirt_smp_gp - PTR_ADDU t1, t1, t0 - sync - PTR_L gp, 0(t1) - .endm - -#endif /* __ASM_MACH_PARAVIRT_KERNEL_ENTRY_H */ diff --git a/arch/mips/include/asm/mach-pnx833x/gpio.h b/arch/mips/include/asm/mach-pnx833x/gpio.h deleted file mode 100644 index 85b5b8e26118..000000000000 --- a/arch/mips/include/asm/mach-pnx833x/gpio.h +++ /dev/null @@ -1,159 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * gpio.h: GPIO Support for PNX833X. - * - * Copyright 2008 NXP Semiconductors - * Chris Steel <chris.steel@nxp.com> - * Daniel Laird <daniel.j.laird@nxp.com> - */ -#ifndef __ASM_MIPS_MACH_PNX833X_GPIO_H -#define __ASM_MIPS_MACH_PNX833X_GPIO_H - -/* BIG FAT WARNING: races danger! - No protections exist here. Current users are only early init code, - when locking is not needed because no concurrency yet exists there, - and GPIO IRQ dispatcher, which does locking. - However, if many uses will ever happen, proper locking will be needed - - including locking between different uses -*/ - -#include <asm/mach-pnx833x/pnx833x.h> - -#define SET_REG_BIT(reg, bit) do { (reg |= (1 << (bit))); } while (0) -#define CLEAR_REG_BIT(reg, bit) do { (reg &= ~(1 << (bit))); } while (0) - -/* Initialize GPIO to a known state */ -static inline void pnx833x_gpio_init(void) -{ - PNX833X_PIO_DIR = 0; - PNX833X_PIO_DIR2 = 0; - PNX833X_PIO_SEL = 0; - PNX833X_PIO_SEL2 = 0; - PNX833X_PIO_INT_EDGE = 0; - PNX833X_PIO_INT_HI = 0; - PNX833X_PIO_INT_LO = 0; - - /* clear any GPIO interrupt requests */ - PNX833X_PIO_INT_CLEAR = 0xffff; - PNX833X_PIO_INT_CLEAR = 0; - PNX833X_PIO_INT_ENABLE = 0; -} - -/* Select GPIO direction for a pin */ -static inline void pnx833x_gpio_select_input(unsigned int pin) -{ - if (pin < 32) - CLEAR_REG_BIT(PNX833X_PIO_DIR, pin); - else - CLEAR_REG_BIT(PNX833X_PIO_DIR2, pin & 31); -} -static inline void pnx833x_gpio_select_output(unsigned int pin) -{ - if (pin < 32) - SET_REG_BIT(PNX833X_PIO_DIR, pin); - else - SET_REG_BIT(PNX833X_PIO_DIR2, pin & 31); -} - -/* Select GPIO or alternate function for a pin */ -static inline void pnx833x_gpio_select_function_io(unsigned int pin) -{ - if (pin < 32) - CLEAR_REG_BIT(PNX833X_PIO_SEL, pin); - else - CLEAR_REG_BIT(PNX833X_PIO_SEL2, pin & 31); -} -static inline void pnx833x_gpio_select_function_alt(unsigned int pin) -{ - if (pin < 32) - SET_REG_BIT(PNX833X_PIO_SEL, pin); - else - SET_REG_BIT(PNX833X_PIO_SEL2, pin & 31); -} - -/* Read GPIO pin */ -static inline int pnx833x_gpio_read(unsigned int pin) -{ - if (pin < 32) - return (PNX833X_PIO_IN >> pin) & 1; - else - return (PNX833X_PIO_IN2 >> (pin & 31)) & 1; -} - -/* Write GPIO pin */ -static inline void pnx833x_gpio_write(unsigned int val, unsigned int pin) -{ - if (pin < 32) { - if (val) - SET_REG_BIT(PNX833X_PIO_OUT, pin); - else - CLEAR_REG_BIT(PNX833X_PIO_OUT, pin); - } else { - if (val) - SET_REG_BIT(PNX833X_PIO_OUT2, pin & 31); - else - CLEAR_REG_BIT(PNX833X_PIO_OUT2, pin & 31); - } -} - -/* Configure GPIO interrupt */ -#define GPIO_INT_NONE 0 -#define GPIO_INT_LEVEL_LOW 1 -#define GPIO_INT_LEVEL_HIGH 2 -#define GPIO_INT_EDGE_RISING 3 -#define GPIO_INT_EDGE_FALLING 4 -#define GPIO_INT_EDGE_BOTH 5 -static inline void pnx833x_gpio_setup_irq(int when, unsigned int pin) -{ - switch (when) { - case GPIO_INT_LEVEL_LOW: - CLEAR_REG_BIT(PNX833X_PIO_INT_EDGE, pin); - CLEAR_REG_BIT(PNX833X_PIO_INT_HI, pin); - SET_REG_BIT(PNX833X_PIO_INT_LO, pin); - break; - case GPIO_INT_LEVEL_HIGH: - CLEAR_REG_BIT(PNX833X_PIO_INT_EDGE, pin); - SET_REG_BIT(PNX833X_PIO_INT_HI, pin); - CLEAR_REG_BIT(PNX833X_PIO_INT_LO, pin); - break; - case GPIO_INT_EDGE_RISING: - SET_REG_BIT(PNX833X_PIO_INT_EDGE, pin); - SET_REG_BIT(PNX833X_PIO_INT_HI, pin); - CLEAR_REG_BIT(PNX833X_PIO_INT_LO, pin); - break; - case GPIO_INT_EDGE_FALLING: - SET_REG_BIT(PNX833X_PIO_INT_EDGE, pin); - CLEAR_REG_BIT(PNX833X_PIO_INT_HI, pin); - SET_REG_BIT(PNX833X_PIO_INT_LO, pin); - break; - case GPIO_INT_EDGE_BOTH: - SET_REG_BIT(PNX833X_PIO_INT_EDGE, pin); - SET_REG_BIT(PNX833X_PIO_INT_HI, pin); - SET_REG_BIT(PNX833X_PIO_INT_LO, pin); - break; - default: - CLEAR_REG_BIT(PNX833X_PIO_INT_EDGE, pin); - CLEAR_REG_BIT(PNX833X_PIO_INT_HI, pin); - CLEAR_REG_BIT(PNX833X_PIO_INT_LO, pin); - break; - } -} - -/* Enable/disable GPIO interrupt */ -static inline void pnx833x_gpio_enable_irq(unsigned int pin) -{ - SET_REG_BIT(PNX833X_PIO_INT_ENABLE, pin); -} -static inline void pnx833x_gpio_disable_irq(unsigned int pin) -{ - CLEAR_REG_BIT(PNX833X_PIO_INT_ENABLE, pin); -} - -/* Clear GPIO interrupt request */ -static inline void pnx833x_gpio_clear_irq(unsigned int pin) -{ - SET_REG_BIT(PNX833X_PIO_INT_CLEAR, pin); - CLEAR_REG_BIT(PNX833X_PIO_INT_CLEAR, pin); -} - -#endif diff --git a/arch/mips/include/asm/mach-pnx833x/irq-mapping.h b/arch/mips/include/asm/mach-pnx833x/irq-mapping.h deleted file mode 100644 index 32d8063c1bbc..000000000000 --- a/arch/mips/include/asm/mach-pnx833x/irq-mapping.h +++ /dev/null @@ -1,112 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ - -/* - * irq.h: IRQ mappings for PNX833X. - * - * Copyright 2008 NXP Semiconductors - * Chris Steel <chris.steel@nxp.com> - * Daniel Laird <daniel.j.laird@nxp.com> - */ - -#ifndef __ASM_MIPS_MACH_PNX833X_IRQ_MAPPING_H -#define __ASM_MIPS_MACH_PNX833X_IRQ_MAPPING_H -/* - * The "IRQ numbers" are completely virtual. - * - * In PNX8330/1, we have 48 interrupt lines, numbered from 1 to 48. - * Let's use numbers 1..48 for PIC interrupts, number 0 for timer interrupt, - * numbers 49..64 for (virtual) GPIO interrupts. - * - * In PNX8335, we have 57 interrupt lines, numbered from 1 to 57, - * connected to PIC, which uses core hardware interrupt 2, and also - * a timer interrupt through hardware interrupt 5. - * Let's use numbers 1..64 for PIC interrupts, number 0 for timer interrupt, - * numbers 65..80 for (virtual) GPIO interrupts. - * - */ -#include <irq.h> - -#define PNX833X_TIMER_IRQ (MIPS_CPU_IRQ_BASE + 7) - -/* Interrupts supported by PIC */ -#define PNX833X_PIC_I2C0_INT (PNX833X_PIC_IRQ_BASE + 1) -#define PNX833X_PIC_I2C1_INT (PNX833X_PIC_IRQ_BASE + 2) -#define PNX833X_PIC_UART0_INT (PNX833X_PIC_IRQ_BASE + 3) -#define PNX833X_PIC_UART1_INT (PNX833X_PIC_IRQ_BASE + 4) -#define PNX833X_PIC_TS_IN0_DV_INT (PNX833X_PIC_IRQ_BASE + 5) -#define PNX833X_PIC_TS_IN0_DMA_INT (PNX833X_PIC_IRQ_BASE + 6) -#define PNX833X_PIC_GPIO_INT (PNX833X_PIC_IRQ_BASE + 7) -#define PNX833X_PIC_AUDIO_DEC_INT (PNX833X_PIC_IRQ_BASE + 8) -#define PNX833X_PIC_VIDEO_DEC_INT (PNX833X_PIC_IRQ_BASE + 9) -#define PNX833X_PIC_CONFIG_INT (PNX833X_PIC_IRQ_BASE + 10) -#define PNX833X_PIC_AOI_INT (PNX833X_PIC_IRQ_BASE + 11) -#define PNX833X_PIC_SYNC_INT (PNX833X_PIC_IRQ_BASE + 12) -#define PNX8330_PIC_SPU_INT (PNX833X_PIC_IRQ_BASE + 13) -#define PNX8335_PIC_SATA_INT (PNX833X_PIC_IRQ_BASE + 13) -#define PNX833X_PIC_OSD_INT (PNX833X_PIC_IRQ_BASE + 14) -#define PNX833X_PIC_DISP1_INT (PNX833X_PIC_IRQ_BASE + 15) -#define PNX833X_PIC_DEINTERLACER_INT (PNX833X_PIC_IRQ_BASE + 16) -#define PNX833X_PIC_DISPLAY2_INT (PNX833X_PIC_IRQ_BASE + 17) -#define PNX833X_PIC_VC_INT (PNX833X_PIC_IRQ_BASE + 18) -#define PNX833X_PIC_SC_INT (PNX833X_PIC_IRQ_BASE + 19) -#define PNX833X_PIC_IDE_INT (PNX833X_PIC_IRQ_BASE + 20) -#define PNX833X_PIC_IDE_DMA_INT (PNX833X_PIC_IRQ_BASE + 21) -#define PNX833X_PIC_TS_IN1_DV_INT (PNX833X_PIC_IRQ_BASE + 22) -#define PNX833X_PIC_TS_IN1_DMA_INT (PNX833X_PIC_IRQ_BASE + 23) -#define PNX833X_PIC_SGDX_DMA_INT (PNX833X_PIC_IRQ_BASE + 24) -#define PNX833X_PIC_TS_OUT_INT (PNX833X_PIC_IRQ_BASE + 25) -#define PNX833X_PIC_IR_INT (PNX833X_PIC_IRQ_BASE + 26) -#define PNX833X_PIC_VMSP1_INT (PNX833X_PIC_IRQ_BASE + 27) -#define PNX833X_PIC_VMSP2_INT (PNX833X_PIC_IRQ_BASE + 28) -#define PNX833X_PIC_PIBC_INT (PNX833X_PIC_IRQ_BASE + 29) -#define PNX833X_PIC_TS_IN0_TRD_INT (PNX833X_PIC_IRQ_BASE + 30) -#define PNX833X_PIC_SGDX_TPD_INT (PNX833X_PIC_IRQ_BASE + 31) -#define PNX833X_PIC_USB_INT (PNX833X_PIC_IRQ_BASE + 32) -#define PNX833X_PIC_TS_IN1_TRD_INT (PNX833X_PIC_IRQ_BASE + 33) -#define PNX833X_PIC_CLOCK_INT (PNX833X_PIC_IRQ_BASE + 34) -#define PNX833X_PIC_SGDX_PARSER_INT (PNX833X_PIC_IRQ_BASE + 35) -#define PNX833X_PIC_VMSP_DMA_INT (PNX833X_PIC_IRQ_BASE + 36) - -#if defined(CONFIG_SOC_PNX8335) -#define PNX8335_PIC_MIU_INT (PNX833X_PIC_IRQ_BASE + 37) -#define PNX8335_PIC_AVCHIP_IRQ_INT (PNX833X_PIC_IRQ_BASE + 38) -#define PNX8335_PIC_SYNC_HD_INT (PNX833X_PIC_IRQ_BASE + 39) -#define PNX8335_PIC_DISP_HD_INT (PNX833X_PIC_IRQ_BASE + 40) -#define PNX8335_PIC_DISP_SCALER_INT (PNX833X_PIC_IRQ_BASE + 41) -#define PNX8335_PIC_OSD_HD1_INT (PNX833X_PIC_IRQ_BASE + 42) -#define PNX8335_PIC_DTL_WRITER_Y_INT (PNX833X_PIC_IRQ_BASE + 43) -#define PNX8335_PIC_DTL_WRITER_C_INT (PNX833X_PIC_IRQ_BASE + 44) -#define PNX8335_PIC_DTL_EMULATOR_Y_IR_INT (PNX833X_PIC_IRQ_BASE + 45) -#define PNX8335_PIC_DTL_EMULATOR_C_IR_INT (PNX833X_PIC_IRQ_BASE + 46) -#define PNX8335_PIC_DENC_TTX_INT (PNX833X_PIC_IRQ_BASE + 47) -#define PNX8335_PIC_MMI_SIF0_INT (PNX833X_PIC_IRQ_BASE + 48) -#define PNX8335_PIC_MMI_SIF1_INT (PNX833X_PIC_IRQ_BASE + 49) -#define PNX8335_PIC_MMI_CDMMU_INT (PNX833X_PIC_IRQ_BASE + 50) -#define PNX8335_PIC_PIBCS_INT (PNX833X_PIC_IRQ_BASE + 51) -#define PNX8335_PIC_ETHERNET_INT (PNX833X_PIC_IRQ_BASE + 52) -#define PNX8335_PIC_VMSP1_0_INT (PNX833X_PIC_IRQ_BASE + 53) -#define PNX8335_PIC_VMSP1_1_INT (PNX833X_PIC_IRQ_BASE + 54) -#define PNX8335_PIC_VMSP1_DMA_INT (PNX833X_PIC_IRQ_BASE + 55) -#define PNX8335_PIC_TDGR_DE_INT (PNX833X_PIC_IRQ_BASE + 56) -#define PNX8335_PIC_IR1_IRQ_INT (PNX833X_PIC_IRQ_BASE + 57) -#endif - -/* GPIO interrupts */ -#define PNX833X_GPIO_0_INT (PNX833X_GPIO_IRQ_BASE + 0) -#define PNX833X_GPIO_1_INT (PNX833X_GPIO_IRQ_BASE + 1) -#define PNX833X_GPIO_2_INT (PNX833X_GPIO_IRQ_BASE + 2) -#define PNX833X_GPIO_3_INT (PNX833X_GPIO_IRQ_BASE + 3) -#define PNX833X_GPIO_4_INT (PNX833X_GPIO_IRQ_BASE + 4) -#define PNX833X_GPIO_5_INT (PNX833X_GPIO_IRQ_BASE + 5) -#define PNX833X_GPIO_6_INT (PNX833X_GPIO_IRQ_BASE + 6) -#define PNX833X_GPIO_7_INT (PNX833X_GPIO_IRQ_BASE + 7) -#define PNX833X_GPIO_8_INT (PNX833X_GPIO_IRQ_BASE + 8) -#define PNX833X_GPIO_9_INT (PNX833X_GPIO_IRQ_BASE + 9) -#define PNX833X_GPIO_10_INT (PNX833X_GPIO_IRQ_BASE + 10) -#define PNX833X_GPIO_11_INT (PNX833X_GPIO_IRQ_BASE + 11) -#define PNX833X_GPIO_12_INT (PNX833X_GPIO_IRQ_BASE + 12) -#define PNX833X_GPIO_13_INT (PNX833X_GPIO_IRQ_BASE + 13) -#define PNX833X_GPIO_14_INT (PNX833X_GPIO_IRQ_BASE + 14) -#define PNX833X_GPIO_15_INT (PNX833X_GPIO_IRQ_BASE + 15) - -#endif diff --git a/arch/mips/include/asm/mach-pnx833x/irq.h b/arch/mips/include/asm/mach-pnx833x/irq.h deleted file mode 100644 index b7a6dab5b9f7..000000000000 --- a/arch/mips/include/asm/mach-pnx833x/irq.h +++ /dev/null @@ -1,40 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * irq.h: IRQ mappings for PNX833X. - * - * Copyright 2008 NXP Semiconductors - * Chris Steel <chris.steel@nxp.com> - * Daniel Laird <daniel.j.laird@nxp.com> - */ - -#ifndef __ASM_MIPS_MACH_PNX833X_IRQ_H -#define __ASM_MIPS_MACH_PNX833X_IRQ_H -/* - * The "IRQ numbers" are completely virtual. - * - * In PNX8330/1, we have 48 interrupt lines, numbered from 1 to 48. - * Let's use numbers 1..48 for PIC interrupts, number 0 for timer interrupt, - * numbers 49..64 for (virtual) GPIO interrupts. - * - * In PNX8335, we have 57 interrupt lines, numbered from 1 to 57, - * connected to PIC, which uses core hardware interrupt 2, and also - * a timer interrupt through hardware interrupt 5. - * Let's use numbers 1..64 for PIC interrupts, number 0 for timer interrupt, - * numbers 65..80 for (virtual) GPIO interrupts. - * - */ -#if defined(CONFIG_SOC_PNX8335) - #define PNX833X_PIC_NUM_IRQ 58 -#else - #define PNX833X_PIC_NUM_IRQ 37 -#endif - -#define MIPS_CPU_NUM_IRQ 8 -#define PNX833X_GPIO_NUM_IRQ 16 - -#define MIPS_CPU_IRQ_BASE 0 -#define PNX833X_PIC_IRQ_BASE (MIPS_CPU_IRQ_BASE + MIPS_CPU_NUM_IRQ) -#define PNX833X_GPIO_IRQ_BASE (PNX833X_PIC_IRQ_BASE + PNX833X_PIC_NUM_IRQ) -#define NR_IRQS (MIPS_CPU_NUM_IRQ + PNX833X_PIC_NUM_IRQ + PNX833X_GPIO_NUM_IRQ) - -#endif diff --git a/arch/mips/include/asm/mach-pnx833x/pnx833x.h b/arch/mips/include/asm/mach-pnx833x/pnx833x.h deleted file mode 100644 index 00bb67a36386..000000000000 --- a/arch/mips/include/asm/mach-pnx833x/pnx833x.h +++ /dev/null @@ -1,189 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * pnx833x.h: Register mappings for PNX833X. - * - * Copyright 2008 NXP Semiconductors - * Chris Steel <chris.steel@nxp.com> - * Daniel Laird <daniel.j.laird@nxp.com> - */ -#ifndef __ASM_MIPS_MACH_PNX833X_PNX833X_H -#define __ASM_MIPS_MACH_PNX833X_PNX833X_H - -/* All regs are accessed in KSEG1 */ -#define PNX833X_BASE (0xa0000000ul + 0x17E00000ul) - -#define PNX833X_REG(offs) (*((volatile unsigned long *)(PNX833X_BASE + offs))) - -/* Registers are named exactly as in PNX833X docs, just with PNX833X_ prefix */ - -/* Read access to multibit fields */ -#define PNX833X_BIT(val, reg, field) ((val) & PNX833X_##reg##_##field) -#define PNX833X_REGBIT(reg, field) PNX833X_BIT(PNX833X_##reg, reg, field) - -/* Use PNX833X_FIELD to extract a field from val */ -#define PNX_FIELD(cpu, val, reg, field) \ - (((val) & PNX##cpu##_##reg##_##field##_MASK) >> \ - PNX##cpu##_##reg##_##field##_SHIFT) -#define PNX833X_FIELD(val, reg, field) PNX_FIELD(833X, val, reg, field) -#define PNX8330_FIELD(val, reg, field) PNX_FIELD(8330, val, reg, field) -#define PNX8335_FIELD(val, reg, field) PNX_FIELD(8335, val, reg, field) - -/* Use PNX833X_REGFIELD to extract a field from a register */ -#define PNX833X_REGFIELD(reg, field) PNX833X_FIELD(PNX833X_##reg, reg, field) -#define PNX8330_REGFIELD(reg, field) PNX8330_FIELD(PNX8330_##reg, reg, field) -#define PNX8335_REGFIELD(reg, field) PNX8335_FIELD(PNX8335_##reg, reg, field) - - -#define PNX_WRITEFIELD(cpu, val, reg, field) \ - (PNX##cpu##_##reg = (PNX##cpu##_##reg & ~(PNX##cpu##_##reg##_##field##_MASK)) | \ - ((val) << PNX##cpu##_##reg##_##field##_SHIFT)) -#define PNX833X_WRITEFIELD(val, reg, field) \ - PNX_WRITEFIELD(833X, val, reg, field) -#define PNX8330_WRITEFIELD(val, reg, field) \ - PNX_WRITEFIELD(8330, val, reg, field) -#define PNX8335_WRITEFIELD(val, reg, field) \ - PNX_WRITEFIELD(8335, val, reg, field) - - -/* Macros to detect CPU type */ - -#define PNX833X_CONFIG_MODULE_ID PNX833X_REG(0x7FFC) -#define PNX833X_CONFIG_MODULE_ID_MAJREV_MASK 0x0000f000 -#define PNX833X_CONFIG_MODULE_ID_MAJREV_SHIFT 12 -#define PNX8330_CONFIG_MODULE_MAJREV 4 -#define PNX8335_CONFIG_MODULE_MAJREV 5 -#define CPU_IS_PNX8330 (PNX833X_REGFIELD(CONFIG_MODULE_ID, MAJREV) == \ - PNX8330_CONFIG_MODULE_MAJREV) -#define CPU_IS_PNX8335 (PNX833X_REGFIELD(CONFIG_MODULE_ID, MAJREV) == \ - PNX8335_CONFIG_MODULE_MAJREV) - - - -#define PNX833X_RESET_CONTROL PNX833X_REG(0x8004) -#define PNX833X_RESET_CONTROL_2 PNX833X_REG(0x8014) - -#define PNX833X_PIC_REG(offs) PNX833X_REG(0x01000 + (offs)) -#define PNX833X_PIC_INT_PRIORITY PNX833X_PIC_REG(0x0) -#define PNX833X_PIC_INT_SRC PNX833X_PIC_REG(0x4) -#define PNX833X_PIC_INT_SRC_INT_SRC_MASK 0x00000FF8ul /* bits 11:3 */ -#define PNX833X_PIC_INT_SRC_INT_SRC_SHIFT 3 -#define PNX833X_PIC_INT_REG(irq) PNX833X_PIC_REG(0x10 + 4*(irq)) - -#define PNX833X_CLOCK_CPUCP_CTL PNX833X_REG(0x9228) -#define PNX833X_CLOCK_CPUCP_CTL_EXIT_RESET 0x00000002ul /* bit 1 */ -#define PNX833X_CLOCK_CPUCP_CTL_DIV_CLOCK_MASK 0x00000018ul /* bits 4:3 */ -#define PNX833X_CLOCK_CPUCP_CTL_DIV_CLOCK_SHIFT 3 - -#define PNX8335_CLOCK_PLL_CPU_CTL PNX833X_REG(0x9020) -#define PNX8335_CLOCK_PLL_CPU_CTL_FREQ_MASK 0x1f -#define PNX8335_CLOCK_PLL_CPU_CTL_FREQ_SHIFT 0 - -#define PNX833X_CONFIG_MUX PNX833X_REG(0x7004) -#define PNX833X_CONFIG_MUX_IDE_MUX 0x00000080 /* bit 7 */ - -#define PNX8330_CONFIG_POLYFUSE_7 PNX833X_REG(0x7040) -#define PNX8330_CONFIG_POLYFUSE_7_BOOT_MODE_MASK 0x00180000 -#define PNX8330_CONFIG_POLYFUSE_7_BOOT_MODE_SHIFT 19 - -#define PNX833X_PIO_IN PNX833X_REG(0xF000) -#define PNX833X_PIO_OUT PNX833X_REG(0xF004) -#define PNX833X_PIO_DIR PNX833X_REG(0xF008) -#define PNX833X_PIO_SEL PNX833X_REG(0xF014) -#define PNX833X_PIO_INT_EDGE PNX833X_REG(0xF020) -#define PNX833X_PIO_INT_HI PNX833X_REG(0xF024) -#define PNX833X_PIO_INT_LO PNX833X_REG(0xF028) -#define PNX833X_PIO_INT_STATUS PNX833X_REG(0xFFE0) -#define PNX833X_PIO_INT_ENABLE PNX833X_REG(0xFFE4) -#define PNX833X_PIO_INT_CLEAR PNX833X_REG(0xFFE8) -#define PNX833X_PIO_IN2 PNX833X_REG(0xF05C) -#define PNX833X_PIO_OUT2 PNX833X_REG(0xF060) -#define PNX833X_PIO_DIR2 PNX833X_REG(0xF064) -#define PNX833X_PIO_SEL2 PNX833X_REG(0xF068) - -#define PNX833X_UART0_PORTS_START (PNX833X_BASE + 0xB000) -#define PNX833X_UART0_PORTS_END (PNX833X_BASE + 0xBFFF) -#define PNX833X_UART1_PORTS_START (PNX833X_BASE + 0xC000) -#define PNX833X_UART1_PORTS_END (PNX833X_BASE + 0xCFFF) - -#define PNX833X_USB_PORTS_START (PNX833X_BASE + 0x19000) -#define PNX833X_USB_PORTS_END (PNX833X_BASE + 0x19FFF) - -#define PNX833X_CONFIG_USB PNX833X_REG(0x7008) - -#define PNX833X_I2C0_PORTS_START (PNX833X_BASE + 0xD000) -#define PNX833X_I2C0_PORTS_END (PNX833X_BASE + 0xDFFF) -#define PNX833X_I2C1_PORTS_START (PNX833X_BASE + 0xE000) -#define PNX833X_I2C1_PORTS_END (PNX833X_BASE + 0xEFFF) - -#define PNX833X_IDE_PORTS_START (PNX833X_BASE + 0x1A000) -#define PNX833X_IDE_PORTS_END (PNX833X_BASE + 0x1AFFF) -#define PNX833X_IDE_MODULE_ID PNX833X_REG(0x1AFFC) - -#define PNX833X_IDE_MODULE_ID_MODULE_ID_MASK 0xFFFF0000 -#define PNX833X_IDE_MODULE_ID_MODULE_ID_SHIFT 16 -#define PNX833X_IDE_MODULE_ID_VALUE 0xA009 - - -#define PNX833X_MIU_SEL0 PNX833X_REG(0x2004) -#define PNX833X_MIU_SEL0_TIMING PNX833X_REG(0x2008) -#define PNX833X_MIU_SEL1 PNX833X_REG(0x200C) -#define PNX833X_MIU_SEL1_TIMING PNX833X_REG(0x2010) -#define PNX833X_MIU_SEL2 PNX833X_REG(0x2014) -#define PNX833X_MIU_SEL2_TIMING PNX833X_REG(0x2018) -#define PNX833X_MIU_SEL3 PNX833X_REG(0x201C) -#define PNX833X_MIU_SEL3_TIMING PNX833X_REG(0x2020) - -#define PNX833X_MIU_SEL0_SPI_MODE_ENABLE_MASK (1 << 14) -#define PNX833X_MIU_SEL0_SPI_MODE_ENABLE_SHIFT 14 - -#define PNX833X_MIU_SEL0_BURST_MODE_ENABLE_MASK (1 << 7) -#define PNX833X_MIU_SEL0_BURST_MODE_ENABLE_SHIFT 7 - -#define PNX833X_MIU_SEL0_BURST_PAGE_LEN_MASK (0xF << 9) -#define PNX833X_MIU_SEL0_BURST_PAGE_LEN_SHIFT 9 - -#define PNX833X_MIU_CONFIG_SPI PNX833X_REG(0x2000) - -#define PNX833X_MIU_CONFIG_SPI_OPCODE_MASK (0xFF << 3) -#define PNX833X_MIU_CONFIG_SPI_OPCODE_SHIFT 3 - -#define PNX833X_MIU_CONFIG_SPI_DATA_ENABLE_MASK (1 << 2) -#define PNX833X_MIU_CONFIG_SPI_DATA_ENABLE_SHIFT 2 - -#define PNX833X_MIU_CONFIG_SPI_ADDR_ENABLE_MASK (1 << 1) -#define PNX833X_MIU_CONFIG_SPI_ADDR_ENABLE_SHIFT 1 - -#define PNX833X_MIU_CONFIG_SPI_SYNC_MASK (1 << 0) -#define PNX833X_MIU_CONFIG_SPI_SYNC_SHIFT 0 - -#define PNX833X_WRITE_CONFIG_SPI(opcode, data_enable, addr_enable, sync) \ - (PNX833X_MIU_CONFIG_SPI = \ - ((opcode) << PNX833X_MIU_CONFIG_SPI_OPCODE_SHIFT) | \ - ((data_enable) << PNX833X_MIU_CONFIG_SPI_DATA_ENABLE_SHIFT) | \ - ((addr_enable) << PNX833X_MIU_CONFIG_SPI_ADDR_ENABLE_SHIFT) | \ - ((sync) << PNX833X_MIU_CONFIG_SPI_SYNC_SHIFT)) - -#define PNX8335_IP3902_PORTS_START (PNX833X_BASE + 0x2F000) -#define PNX8335_IP3902_PORTS_END (PNX833X_BASE + 0x2FFFF) -#define PNX8335_IP3902_MODULE_ID PNX833X_REG(0x2FFFC) - -#define PNX8335_IP3902_MODULE_ID_MODULE_ID_MASK 0xFFFF0000 -#define PNX8335_IP3902_MODULE_ID_MODULE_ID_SHIFT 16 -#define PNX8335_IP3902_MODULE_ID_VALUE 0x3902 - - /* I/O location(gets remapped)*/ -#define PNX8335_NAND_BASE 0x18000000 -/* I/O location with CLE high */ -#define PNX8335_NAND_CLE_MASK 0x00100000 -/* I/O location with ALE high */ -#define PNX8335_NAND_ALE_MASK 0x00010000 - -#define PNX8335_SATA_PORTS_START (PNX833X_BASE + 0x2E000) -#define PNX8335_SATA_PORTS_END (PNX833X_BASE + 0x2EFFF) -#define PNX8335_SATA_MODULE_ID PNX833X_REG(0x2EFFC) - -#define PNX8335_SATA_MODULE_ID_MODULE_ID_MASK 0xFFFF0000 -#define PNX8335_SATA_MODULE_ID_MODULE_ID_SHIFT 16 -#define PNX8335_SATA_MODULE_ID_VALUE 0xA099 - -#endif diff --git a/arch/mips/include/asm/mach-rc32434/war.h b/arch/mips/include/asm/mach-rc32434/war.h deleted file mode 100644 index af430d26f713..000000000000 --- a/arch/mips/include/asm/mach-rc32434/war.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org> - */ -#ifndef __ASM_MIPS_MACH_MIPS_WAR_H -#define __ASM_MIPS_MACH_MIPS_WAR_H - -#define R4600_V1_INDEX_ICACHEOP_WAR 0 -#define R4600_V1_HIT_CACHEOP_WAR 0 -#define R4600_V2_HIT_CACHEOP_WAR 0 -#define BCM1250_M3_WAR 0 -#define SIBYTE_1956_WAR 0 -#define MIPS4K_ICACHE_REFILL_WAR 1 -#define MIPS_CACHE_SYNC_WAR 0 -#define TX49XX_ICACHE_INDEX_INV_WAR 0 -#define ICACHE_REFILLS_WORKAROUND_WAR 0 -#define R10000_LLSC_WAR 0 -#define MIPS34K_MISSED_ITLB_WAR 0 - -#endif /* __ASM_MIPS_MACH_MIPS_WAR_H */ diff --git a/arch/mips/include/asm/mach-rm/war.h b/arch/mips/include/asm/mach-rm/war.h deleted file mode 100644 index eca16d167c2f..000000000000 --- a/arch/mips/include/asm/mach-rm/war.h +++ /dev/null @@ -1,27 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org> - */ -#ifndef __ASM_MIPS_MACH_RM_WAR_H -#define __ASM_MIPS_MACH_RM_WAR_H - -/* - * The RM200C seems to have been shipped only with V2.0 R4600s - */ - -#define R4600_V1_INDEX_ICACHEOP_WAR 0 -#define R4600_V1_HIT_CACHEOP_WAR 0 -#define R4600_V2_HIT_CACHEOP_WAR 1 -#define BCM1250_M3_WAR 0 -#define SIBYTE_1956_WAR 0 -#define MIPS4K_ICACHE_REFILL_WAR 0 -#define MIPS_CACHE_SYNC_WAR 0 -#define TX49XX_ICACHE_INDEX_INV_WAR 0 -#define ICACHE_REFILLS_WORKAROUND_WAR 0 -#define R10000_LLSC_WAR 0 -#define MIPS34K_MISSED_ITLB_WAR 0 - -#endif /* __ASM_MIPS_MACH_RM_WAR_H */ diff --git a/arch/mips/include/asm/mach-sibyte/war.h b/arch/mips/include/asm/mach-sibyte/war.h deleted file mode 100644 index 4755b6116807..000000000000 --- a/arch/mips/include/asm/mach-sibyte/war.h +++ /dev/null @@ -1,38 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org> - */ -#ifndef __ASM_MIPS_MACH_SIBYTE_WAR_H -#define __ASM_MIPS_MACH_SIBYTE_WAR_H - -#define R4600_V1_INDEX_ICACHEOP_WAR 0 -#define R4600_V1_HIT_CACHEOP_WAR 0 -#define R4600_V2_HIT_CACHEOP_WAR 0 - -#if defined(CONFIG_SB1_PASS_2_WORKAROUNDS) - -#ifndef __ASSEMBLY__ -extern int sb1250_m3_workaround_needed(void); -#endif - -#define BCM1250_M3_WAR sb1250_m3_workaround_needed() -#define SIBYTE_1956_WAR 1 - -#else - -#define BCM1250_M3_WAR 0 -#define SIBYTE_1956_WAR 0 - -#endif - -#define MIPS4K_ICACHE_REFILL_WAR 0 -#define MIPS_CACHE_SYNC_WAR 0 -#define TX49XX_ICACHE_INDEX_INV_WAR 0 -#define ICACHE_REFILLS_WORKAROUND_WAR 0 -#define R10000_LLSC_WAR 0 -#define MIPS34K_MISSED_ITLB_WAR 0 - -#endif /* __ASM_MIPS_MACH_SIBYTE_WAR_H */ diff --git a/arch/mips/include/asm/mach-tx49xx/war.h b/arch/mips/include/asm/mach-tx49xx/war.h deleted file mode 100644 index 445abb4eb769..000000000000 --- a/arch/mips/include/asm/mach-tx49xx/war.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org> - */ -#ifndef __ASM_MIPS_MACH_TX49XX_WAR_H -#define __ASM_MIPS_MACH_TX49XX_WAR_H - -#define R4600_V1_INDEX_ICACHEOP_WAR 0 -#define R4600_V1_HIT_CACHEOP_WAR 0 -#define R4600_V2_HIT_CACHEOP_WAR 0 -#define BCM1250_M3_WAR 0 -#define SIBYTE_1956_WAR 0 -#define MIPS4K_ICACHE_REFILL_WAR 0 -#define MIPS_CACHE_SYNC_WAR 0 -#define TX49XX_ICACHE_INDEX_INV_WAR 1 -#define ICACHE_REFILLS_WORKAROUND_WAR 0 -#define R10000_LLSC_WAR 0 -#define MIPS34K_MISSED_ITLB_WAR 0 - -#endif /* __ASM_MIPS_MACH_TX49XX_WAR_H */ diff --git a/arch/mips/include/asm/mips-boards/malta.h b/arch/mips/include/asm/mips-boards/malta.h index 65de4fb06096..254be3d62519 100644 --- a/arch/mips/include/asm/mips-boards/malta.h +++ b/arch/mips/include/asm/mips-boards/malta.h @@ -92,4 +92,6 @@ static inline unsigned long get_msc_port_base(unsigned long reg) #define MALTA_JMPRS_REG 0x1f000210 +extern void __init *malta_dt_shim(void *fdt); + #endif /* __ASM_MIPS_BOARDS_MALTA_H */ diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index 4ddc12e4444a..a0e8ae5497b6 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -389,6 +389,13 @@ #define ST0_CU3 0x80000000 #define ST0_XX 0x80000000 /* MIPS IV naming */ +/* in-kernel enabled CUs */ +#ifdef CONFIG_CPU_LOONGSON64 +#define ST0_KERNEL_CUMASK (ST0_CU0 | ST0_CU2) +#else +#define ST0_KERNEL_CUMASK ST0_CU0 +#endif + /* * Bitfields and bit numbers in the coprocessor 0 IntCtl register. (MIPSR2) */ @@ -1706,12 +1713,6 @@ do { \ #define read_c0_count() __read_32bit_c0_register($9, 0) #define write_c0_count(val) __write_32bit_c0_register($9, 0, val) -#define read_c0_count2() __read_32bit_c0_register($9, 6) /* pnx8550 */ -#define write_c0_count2(val) __write_32bit_c0_register($9, 6, val) - -#define read_c0_count3() __read_32bit_c0_register($9, 7) /* pnx8550 */ -#define write_c0_count3(val) __write_32bit_c0_register($9, 7, val) - #define read_c0_entryhi() __read_ulong_c0_register($10, 0) #define write_c0_entryhi(val) __write_ulong_c0_register($10, 0, val) @@ -1730,12 +1731,6 @@ do { \ #define read_c0_guestctl0ext() __read_32bit_c0_register($11, 4) #define write_c0_guestctl0ext(val) __write_32bit_c0_register($11, 4, val) -#define read_c0_compare2() __read_32bit_c0_register($11, 6) /* pnx8550 */ -#define write_c0_compare2(val) __write_32bit_c0_register($11, 6, val) - -#define read_c0_compare3() __read_32bit_c0_register($11, 7) /* pnx8550 */ -#define write_c0_compare3(val) __write_32bit_c0_register($11, 7, val) - #define read_c0_status() __read_32bit_c0_register($12, 0) #define write_c0_status(val) __write_32bit_c0_register($12, 0, val) @@ -2728,7 +2723,7 @@ static inline void tlb_probe(void) static inline void tlb_read(void) { -#if MIPS34K_MISSED_ITLB_WAR +#ifdef CONFIG_WAR_MIPS34K_MISSED_ITLB int res = 0; __asm__ __volatile__( @@ -2750,7 +2745,7 @@ static inline void tlb_read(void) "tlbr\n\t" ".set reorder"); -#if MIPS34K_MISSED_ITLB_WAR +#ifdef CONFIG_WAR_MIPS34K_MISSED_ITLB if ((res & _ULCAST_(1))) __asm__ __volatile__( " .set push \n" diff --git a/arch/mips/include/asm/netlogic/psb-bootinfo.h b/arch/mips/include/asm/netlogic/psb-bootinfo.h index 6878307f0ee6..c716e9397113 100644 --- a/arch/mips/include/asm/netlogic/psb-bootinfo.h +++ b/arch/mips/include/asm/netlogic/psb-bootinfo.h @@ -77,21 +77,6 @@ struct psb_info { uint64_t avail_mem_map; }; -enum { - NETLOGIC_IO_SPACE = 0x10, - PCIX_IO_SPACE, - PCIX_CFG_SPACE, - PCIX_MEMORY_SPACE, - HT_IO_SPACE, - HT_CFG_SPACE, - HT_MEMORY_SPACE, - SRAM_SPACE, - FLASH_CONTROLLER_SPACE -}; - -#define NLM_MAX_ARGS 64 -#define NLM_MAX_ENVS 32 - /* This is what netlboot passes and linux boot_mem_map is subtly different */ #define NLM_BOOT_MEM_MAP_MAX 32 struct nlm_boot_mem_map { @@ -102,6 +87,7 @@ struct nlm_boot_mem_map { uint32_t type; /* type of memory segment */ } map[NLM_BOOT_MEM_MAP_MAX]; }; +#define NLM_BOOT_MEM_RAM 1 /* Pointer to saved boot loader info */ extern struct psb_info nlm_prom_info; diff --git a/arch/mips/include/asm/octeon/cvmx-bootinfo.h b/arch/mips/include/asm/octeon/cvmx-bootinfo.h index 62787765575e..c114a7ba0bad 100644 --- a/arch/mips/include/asm/octeon/cvmx-bootinfo.h +++ b/arch/mips/include/asm/octeon/cvmx-bootinfo.h @@ -295,6 +295,8 @@ enum cvmx_board_types_enum { */ CVMX_BOARD_TYPE_CUST_PRIVATE_MIN = 20001, CVMX_BOARD_TYPE_UBNT_E100 = 20002, + CVMX_BOARD_TYPE_UBNT_E200 = 20003, + CVMX_BOARD_TYPE_UBNT_E220 = 20005, CVMX_BOARD_TYPE_CUST_DSR1000N = 20006, CVMX_BOARD_TYPE_KONTRON_S1901 = 21901, CVMX_BOARD_TYPE_CUST_PRIVATE_MAX = 30000, @@ -396,6 +398,8 @@ static inline const char *cvmx_board_type_to_string(enum /* Customer private range */ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_PRIVATE_MIN) ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_UBNT_E100) + ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_UBNT_E200) + ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_UBNT_E220) ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_DSR1000N) ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_KONTRON_S1901) ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_PRIVATE_MAX) diff --git a/arch/mips/include/asm/pgtable-bits.h b/arch/mips/include/asm/pgtable-bits.h index e26dc41a8a68..2362842ee2b5 100644 --- a/arch/mips/include/asm/pgtable-bits.h +++ b/arch/mips/include/asm/pgtable-bits.h @@ -249,11 +249,6 @@ static inline uint64_t pte_to_entrylo(unsigned long pte_val) #define _CACHE_CACHABLE_NONCOHERENT (5<<_CACHE_SHIFT) -#elif defined(CONFIG_MACH_INGENIC) - -/* Ingenic uses the WA bit to achieve write-combine memory writes */ -#define _CACHE_UNCACHED_ACCELERATED (1<<_CACHE_SHIFT) - #endif #ifndef _CACHE_CACHABLE_NO_WA diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index dd7a0f552cac..e5ef0fdd4838 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -37,8 +37,6 @@ struct vm_area_struct; _PAGE_GLOBAL | _page_cachable_default) #define PAGE_KERNEL_NC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ _PAGE_GLOBAL | _CACHE_CACHABLE_NONCOHERENT) -#define PAGE_USERIO __pgprot(_PAGE_PRESENT | _PAGE_WRITE | \ - _page_cachable_default) #define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \ __WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED) diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h index 856e12f6063d..7834e7c0c78a 100644 --- a/arch/mips/include/asm/processor.h +++ b/arch/mips/include/asm/processor.h @@ -29,6 +29,7 @@ */ extern unsigned int vced_count, vcei_count; +extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); #ifdef CONFIG_32BIT #ifdef CONFIG_KVM_GUEST diff --git a/arch/mips/include/asm/r4k-timer.h b/arch/mips/include/asm/r4k-timer.h index afe9e0e03fe9..6e7361629348 100644 --- a/arch/mips/include/asm/r4k-timer.h +++ b/arch/mips/include/asm/r4k-timer.h @@ -5,8 +5,8 @@ * * Copyright (C) 2008 by Ralf Baechle (ralf@linux-mips.org) */ -#ifndef __ASM_R4K_TYPES_H -#define __ASM_R4K_TYPES_H +#ifndef __ASM_R4K_TIMER_H +#define __ASM_R4K_TIMER_H #include <linux/compiler.h> @@ -27,4 +27,4 @@ static inline void synchronise_count_slave(int cpu) #endif -#endif /* __ASM_R4K_TYPES_H */ +#endif /* __ASM_R4K_TIMER_H */ diff --git a/arch/mips/include/asm/sgi/heart.h b/arch/mips/include/asm/sgi/heart.h index c423221b4792..0d03751955c4 100644 --- a/arch/mips/include/asm/sgi/heart.h +++ b/arch/mips/include/asm/sgi/heart.h @@ -264,6 +264,57 @@ struct ip30_heart_regs { /* 0x0ff00000 */ #define HC_NCOR_MEM_ERR BIT(1) #define HC_COR_MEM_ERR BIT(0) +/* + * HEART has 64 interrupt vectors available to it, subdivided into five + * priority levels. They are numbered 0 to 63. + */ +#define HEART_NUM_IRQS 64 + +/* + * These are the five interrupt priority levels and their corresponding + * CPU IPx interrupt pins. + * + * Level 4 - Error Interrupts. + * Level 3 - HEART timer interrupt. + * Level 2 - CPU IPI, CPU debug, power putton, general device interrupts. + * Level 1 - General device interrupts. + * Level 0 - General device GFX flow control interrupts. + */ +#define HEART_L4_INT_MASK 0xfff8000000000000ULL /* IP6 */ +#define HEART_L3_INT_MASK 0x0004000000000000ULL /* IP5 */ +#define HEART_L2_INT_MASK 0x0003ffff00000000ULL /* IP4 */ +#define HEART_L1_INT_MASK 0x00000000ffff0000ULL /* IP3 */ +#define HEART_L0_INT_MASK 0x000000000000ffffULL /* IP2 */ + +/* HEART L0 Interrupts (Low Priority) */ +#define HEART_L0_INT_GENERIC 0 +#define HEART_L0_INT_FLOW_CTRL_HWTR_0 1 +#define HEART_L0_INT_FLOW_CTRL_HWTR_1 2 + +/* HEART L2 Interrupts (High Priority) */ +#define HEART_L2_INT_RESCHED_CPU_0 46 +#define HEART_L2_INT_RESCHED_CPU_1 47 +#define HEART_L2_INT_CALL_CPU_0 48 +#define HEART_L2_INT_CALL_CPU_1 49 + +/* HEART L3 Interrupts (Compare/Counter Timer) */ +#define HEART_L3_INT_TIMER 50 + +/* HEART L4 Interrupts (Errors) */ +#define HEART_L4_INT_XWID_ERR_9 51 +#define HEART_L4_INT_XWID_ERR_A 52 +#define HEART_L4_INT_XWID_ERR_B 53 +#define HEART_L4_INT_XWID_ERR_C 54 +#define HEART_L4_INT_XWID_ERR_D 55 +#define HEART_L4_INT_XWID_ERR_E 56 +#define HEART_L4_INT_XWID_ERR_F 57 +#define HEART_L4_INT_XWID_ERR_XBOW 58 +#define HEART_L4_INT_CPU_BUS_ERR_0 59 +#define HEART_L4_INT_CPU_BUS_ERR_1 60 +#define HEART_L4_INT_CPU_BUS_ERR_2 61 +#define HEART_L4_INT_CPU_BUS_ERR_3 62 +#define HEART_L4_INT_HEART_EXCP 63 + extern struct ip30_heart_regs __iomem *heart_regs; #define heart_read ____raw_readq diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h index 3e8d2aaf96af..aa430a6c68b2 100644 --- a/arch/mips/include/asm/stackframe.h +++ b/arch/mips/include/asm/stackframe.h @@ -450,7 +450,7 @@ */ .macro CLI mfc0 t0, CP0_STATUS - li t1, ST0_CU0 | STATMASK + li t1, ST0_KERNEL_CUMASK | STATMASK or t0, t1 xori t0, STATMASK mtc0 t0, CP0_STATUS @@ -463,7 +463,7 @@ */ .macro STI mfc0 t0, CP0_STATUS - li t1, ST0_CU0 | STATMASK + li t1, ST0_KERNEL_CUMASK | STATMASK or t0, t1 xori t0, STATMASK & ~1 mtc0 t0, CP0_STATUS @@ -477,7 +477,7 @@ */ .macro KMODE mfc0 t0, CP0_STATUS - li t1, ST0_CU0 | (STATMASK & ~1) + li t1, ST0_KERNEL_CUMASK | (STATMASK & ~1) #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) andi t2, t0, ST0_IEP srl t2, 2 diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h index 0b0a93bf83cd..a4374b4cb88f 100644 --- a/arch/mips/include/asm/switch_to.h +++ b/arch/mips/include/asm/switch_to.h @@ -117,6 +117,8 @@ do { \ __restore_dsp(next); \ } \ if (cop2_present) { \ + u32 status = read_c0_status(); \ + \ set_c0_status(ST0_CU2); \ if ((KSTK_STATUS(prev) & ST0_CU2)) { \ if (cop2_lazy_restore) \ @@ -127,7 +129,7 @@ do { \ !cop2_lazy_restore) { \ cop2_restore(next); \ } \ - clear_c0_status(ST0_CU2); \ + write_c0_status(status); \ } \ __clear_r5_hw_ll_bit(); \ __clear_software_ll_bit(); \ diff --git a/arch/mips/include/asm/txx9/tx4939.h b/arch/mips/include/asm/txx9/tx4939.h index 00805ac6e9fc..abf980af9ef4 100644 --- a/arch/mips/include/asm/txx9/tx4939.h +++ b/arch/mips/include/asm/txx9/tx4939.h @@ -498,7 +498,6 @@ struct tx4939_vpc_desc { ((((mst) + 245/2) / 245UL * 429 * 16 + 19) / 19 / 2) void tx4939_wdt_init(void); -void tx4939_add_memory_regions(void); void tx4939_setup(void); void tx4939_time_init(unsigned int tmrnr); void tx4939_sio_init(unsigned int sclk, unsigned int cts_mask); diff --git a/arch/mips/include/asm/war.h b/arch/mips/include/asm/war.h index e43f800e662d..21443f096238 100644 --- a/arch/mips/include/asm/war.h +++ b/arch/mips/include/asm/war.h @@ -9,8 +9,6 @@ #ifndef _ASM_WAR_H #define _ASM_WAR_H -#include <war.h> - /* * Work around certain R4000 CPU errata (as implemented by GCC): * @@ -72,152 +70,4 @@ #define DADDI_WAR 0 #endif -/* - * Another R4600 erratum. Due to the lack of errata information the exact - * technical details aren't known. I've experimentally found that disabling - * interrupts during indexed I-cache flushes seems to be sufficient to deal - * with the issue. - */ -#ifndef R4600_V1_INDEX_ICACHEOP_WAR -#error Check setting of R4600_V1_INDEX_ICACHEOP_WAR for your platform -#endif - -/* - * Pleasures of the R4600 V1.x. Cite from the IDT R4600 V1.7 errata: - * - * 18. The CACHE instructions Hit_Writeback_Invalidate_D, Hit_Writeback_D, - * Hit_Invalidate_D and Create_Dirty_Excl_D should only be - * executed if there is no other dcache activity. If the dcache is - * accessed for another instruction immeidately preceding when these - * cache instructions are executing, it is possible that the dcache - * tag match outputs used by these cache instructions will be - * incorrect. These cache instructions should be preceded by at least - * four instructions that are not any kind of load or store - * instruction. - * - * This is not allowed: lw - * nop - * nop - * nop - * cache Hit_Writeback_Invalidate_D - * - * This is allowed: lw - * nop - * nop - * nop - * nop - * cache Hit_Writeback_Invalidate_D - */ -#ifndef R4600_V1_HIT_CACHEOP_WAR -#error Check setting of R4600_V1_HIT_CACHEOP_WAR for your platform -#endif - - -/* - * Writeback and invalidate the primary cache dcache before DMA. - * - * R4600 v2.0 bug: "The CACHE instructions Hit_Writeback_Inv_D, - * Hit_Writeback_D, Hit_Invalidate_D and Create_Dirty_Exclusive_D will only - * operate correctly if the internal data cache refill buffer is empty. These - * CACHE instructions should be separated from any potential data cache miss - * by a load instruction to an uncached address to empty the response buffer." - * (Revision 2.0 device errata from IDT available on https://www.idt.com/ - * in .pdf format.) - */ -#ifndef R4600_V2_HIT_CACHEOP_WAR -#error Check setting of R4600_V2_HIT_CACHEOP_WAR for your platform -#endif - -/* - * Workaround for the Sibyte M3 errata the text of which can be found at - * - * http://sibyte.broadcom.com/hw/bcm1250/docs/pass2errata.txt - * - * This will enable the use of a special TLB refill handler which does a - * consistency check on the information in c0_badvaddr and c0_entryhi and - * will just return and take the exception again if the information was - * found to be inconsistent. - */ -#ifndef BCM1250_M3_WAR -#error Check setting of BCM1250_M3_WAR for your platform -#endif - -/* - * This is a DUART workaround related to glitches around register accesses - */ -#ifndef SIBYTE_1956_WAR -#error Check setting of SIBYTE_1956_WAR for your platform -#endif - -/* - * Fill buffers not flushed on CACHE instructions - * - * Hit_Invalidate_I cacheops invalidate an icache line but the refill - * for that line can get stale data from the fill buffer instead of - * accessing memory if the previous icache miss was also to that line. - * - * Workaround: generate an icache refill from a different line - * - * Affects: - * MIPS 4K RTL revision <3.0, PRID revision <4 - */ -#ifndef MIPS4K_ICACHE_REFILL_WAR -#error Check setting of MIPS4K_ICACHE_REFILL_WAR for your platform -#endif - -/* - * Missing implicit forced flush of evictions caused by CACHE - * instruction - * - * Evictions caused by a CACHE instructions are not forced on to the - * bus. The BIU gives higher priority to fetches than to the data from - * the eviction buffer and no collision detection is performed between - * fetches and pending data from the eviction buffer. - * - * Workaround: Execute a SYNC instruction after the cache instruction - * - * Affects: - * MIPS 5Kc,5Kf RTL revision <2.3, PRID revision <8 - * MIPS 20Kc RTL revision <4.0, PRID revision <? - */ -#ifndef MIPS_CACHE_SYNC_WAR -#error Check setting of MIPS_CACHE_SYNC_WAR for your platform -#endif - -/* - * From TX49/H2 manual: "If the instruction (i.e. CACHE) is issued for - * the line which this instruction itself exists, the following - * operation is not guaranteed." - * - * Workaround: do two phase flushing for Index_Invalidate_I - */ -#ifndef TX49XX_ICACHE_INDEX_INV_WAR -#error Check setting of TX49XX_ICACHE_INDEX_INV_WAR for your platform -#endif - -/* - * The RM7000 processors and the E9000 cores have a bug (though PMC-Sierra - * opposes it being called that) where invalid instructions in the same - * I-cache line worth of instructions being fetched may case spurious - * exceptions. - */ -#ifndef ICACHE_REFILLS_WORKAROUND_WAR -#error Check setting of ICACHE_REFILLS_WORKAROUND_WAR for your platform -#endif - -/* - * On the R10000 up to version 2.6 (not sure about 2.7) there is a bug that - * may cause ll / sc and lld / scd sequences to execute non-atomically. - */ -#ifndef R10000_LLSC_WAR -#error Check setting of R10000_LLSC_WAR for your platform -#endif - -/* - * 34K core erratum: "Problems Executing the TLBR Instruction" - */ -#ifndef MIPS34K_MISSED_ITLB_WAR -#error Check setting of MIPS34K_MISSED_ITLB_WAR for your platform -#endif - #endif /* _ASM_WAR_H */ diff --git a/arch/mips/jz4740/Kconfig b/arch/mips/ingenic/Kconfig index c2a6fbf8e411..3238e16febd5 100644 --- a/arch/mips/jz4740/Kconfig +++ b/arch/mips/ingenic/Kconfig @@ -1,15 +1,21 @@ # SPDX-License-Identifier: GPL-2.0 + +config MACH_INGENIC_GENERIC + bool + select MACH_INGENIC + select MACH_JZ4740 + select MACH_JZ4770 + select MACH_JZ4780 + select MACH_X1000 + choice prompt "Machine type" - depends on MACH_INGENIC + depends on MACH_INGENIC_SOC default INGENIC_GENERIC_BOARD config INGENIC_GENERIC_BOARD bool "Generic board" - select MACH_JZ4740 - select MACH_JZ4770 - select MACH_JZ4780 - select MACH_X1000 + select MACH_INGENIC_GENERIC config JZ4740_QI_LB60 bool "Qi Hardware Ben NanoNote" diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c index 014773f0bfcd..461457b28982 100644 --- a/arch/mips/jazz/jazzdma.c +++ b/arch/mips/jazz/jazzdma.c @@ -16,8 +16,7 @@ #include <linux/memblock.h> #include <linux/spinlock.h> #include <linux/gfp.h> -#include <linux/dma-direct.h> -#include <linux/dma-noncoherent.h> +#include <linux/dma-map-ops.h> #include <asm/mipsregs.h> #include <asm/jazz.h> #include <asm/io.h> @@ -210,76 +209,6 @@ int vdma_free(unsigned long laddr) EXPORT_SYMBOL(vdma_free); /* - * Map certain page(s) to another physical address. - * Caller must have allocated the page(s) before. - */ -int vdma_remap(unsigned long laddr, unsigned long paddr, unsigned long size) -{ - int first, pages; - - if (laddr > 0xffffff) { - if (vdma_debug) - printk - ("vdma_map: Invalid logical address: %08lx\n", - laddr); - return -EINVAL; /* invalid logical address */ - } - if (paddr > 0x1fffffff) { - if (vdma_debug) - printk - ("vdma_map: Invalid physical address: %08lx\n", - paddr); - return -EINVAL; /* invalid physical address */ - } - - pages = (((paddr & (VDMA_PAGESIZE - 1)) + size) >> 12) + 1; - first = laddr >> 12; - if (vdma_debug) - printk("vdma_remap: first=%x, pages=%x\n", first, pages); - if (first + pages > VDMA_PGTBL_ENTRIES) { - if (vdma_debug) - printk("vdma_alloc: Invalid size: %08lx\n", size); - return -EINVAL; - } - - paddr &= ~(VDMA_PAGESIZE - 1); - while (pages > 0 && first < VDMA_PGTBL_ENTRIES) { - if (pgtbl[first].owner != laddr) { - if (vdma_debug) - printk("Trying to remap other's pages.\n"); - return -EPERM; /* not owner */ - } - pgtbl[first].frame = paddr; - paddr += VDMA_PAGESIZE; - first++; - pages--; - } - - /* - * Update translation table - */ - r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0); - - if (vdma_debug > 2) { - int i; - pages = (((paddr & (VDMA_PAGESIZE - 1)) + size) >> 12) + 1; - first = laddr >> 12; - printk("LADDR: "); - for (i = first; i < first + pages; i++) - printk("%08x ", i << 12); - printk("\nPADDR: "); - for (i = first; i < first + pages; i++) - printk("%08x ", pgtbl[i].frame); - printk("\nOWNER: "); - for (i = first; i < first + pages; i++) - printk("%08x ", pgtbl[i].owner); - printk("\n"); - } - - return 0; -} - -/* * Translate a physical address to a logical address. * This will return the logical address of the first * match. @@ -562,26 +491,34 @@ int vdma_get_enable(int channel) static void *jazz_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) { + struct page *page; void *ret; - ret = dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs); - if (!ret) - return NULL; + if (attrs & DMA_ATTR_NO_WARN) + gfp |= __GFP_NOWARN; - *dma_handle = vdma_alloc(virt_to_phys(ret), size); - if (*dma_handle == DMA_MAPPING_ERROR) { - dma_direct_free_pages(dev, size, ret, *dma_handle, attrs); + size = PAGE_ALIGN(size); + page = alloc_pages(gfp, get_order(size)); + if (!page) return NULL; - } - - return ret; + ret = page_address(page); + memset(ret, 0, size); + *dma_handle = vdma_alloc(virt_to_phys(ret), size); + if (*dma_handle == DMA_MAPPING_ERROR) + goto out_free_pages; + arch_dma_prep_coherent(page, size); + return (void *)(UNCAC_BASE + __pa(ret)); + +out_free_pages: + __free_pages(page, get_order(size)); + return NULL; } static void jazz_dma_free(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs) { vdma_free(dma_handle); - dma_direct_free_pages(dev, size, vaddr, dma_handle, attrs); + __free_pages(virt_to_page(vaddr), get_order(size)); } static dma_addr_t jazz_dma_map_page(struct device *dev, struct page *page, @@ -678,9 +615,9 @@ const struct dma_map_ops jazz_dma_ops = { .sync_single_for_device = jazz_dma_sync_single_for_device, .sync_sg_for_cpu = jazz_dma_sync_sg_for_cpu, .sync_sg_for_device = jazz_dma_sync_sg_for_device, - .dma_supported = dma_direct_supported, - .cache_sync = arch_dma_cache_sync, .mmap = dma_common_mmap, .get_sgtable = dma_common_get_sgtable, + .alloc_pages = dma_common_alloc_pages, + .free_pages = dma_common_free_pages, }; EXPORT_SYMBOL(jazz_dma_ops); diff --git a/arch/mips/jz4740/Makefile b/arch/mips/jz4740/Makefile deleted file mode 100644 index f96c0f5eca44..000000000000 --- a/arch/mips/jz4740/Makefile +++ /dev/null @@ -1,9 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -# -# Makefile for the Ingenic JZ4740. -# - -# Object file lists. -obj-y += setup.o - -CFLAGS_setup.o = -I$(src)/../../../scripts/dtc/libfdt diff --git a/arch/mips/jz4740/Platform b/arch/mips/jz4740/Platform deleted file mode 100644 index bd35d0621b13..000000000000 --- a/arch/mips/jz4740/Platform +++ /dev/null @@ -1,3 +0,0 @@ -cflags-$(CONFIG_MACH_INGENIC) += -I$(srctree)/arch/mips/include/asm/mach-jz4740 -load-$(CONFIG_MACH_INGENIC) += 0xffffffff80010000 -zload-$(CONFIG_MACH_INGENIC) += 0xffffffff81000000 diff --git a/arch/mips/jz4740/setup.c b/arch/mips/jz4740/setup.c deleted file mode 100644 index 51d906325ce6..000000000000 --- a/arch/mips/jz4740/setup.c +++ /dev/null @@ -1,145 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de> - * Copyright (C) 2011, Maarten ter Huurne <maarten@treewalker.org> - * JZ4740 setup code - */ - -#include <linux/clocksource.h> -#include <linux/init.h> -#include <linux/io.h> -#include <linux/irqchip.h> -#include <linux/kernel.h> -#include <linux/libfdt.h> -#include <linux/of_clk.h> -#include <linux/of_fdt.h> -#include <linux/pm.h> -#include <linux/sizes.h> -#include <linux/suspend.h> - -#include <asm/bootinfo.h> -#include <asm/fw/fw.h> -#include <asm/prom.h> -#include <asm/reboot.h> -#include <asm/time.h> - -static unsigned long __init get_board_mach_type(const void *fdt) -{ - if (!fdt_node_check_compatible(fdt, 0, "ingenic,x2000")) - return MACH_INGENIC_X2000; - if (!fdt_node_check_compatible(fdt, 0, "ingenic,x1830")) - return MACH_INGENIC_X1830; - if (!fdt_node_check_compatible(fdt, 0, "ingenic,x1000")) - return MACH_INGENIC_X1000; - if (!fdt_node_check_compatible(fdt, 0, "ingenic,jz4780")) - return MACH_INGENIC_JZ4780; - if (!fdt_node_check_compatible(fdt, 0, "ingenic,jz4770")) - return MACH_INGENIC_JZ4770; - if (!fdt_node_check_compatible(fdt, 0, "ingenic,jz4725b")) - return MACH_INGENIC_JZ4725B; - - return MACH_INGENIC_JZ4740; -} - -void __init plat_mem_setup(void) -{ - void *dtb = (void *)fw_passed_dtb; - - __dt_setup_arch(dtb); - - /* - * Old devicetree files for the qi,lb60 board did not have a /memory - * node. Hardcode the memory info here. - */ - if (!fdt_node_check_compatible(dtb, 0, "qi,lb60") && - fdt_path_offset(dtb, "/memory") < 0) - early_init_dt_add_memory_arch(0, SZ_32M); - - mips_machtype = get_board_mach_type(dtb); -} - -void __init device_tree_init(void) -{ - if (!initial_boot_params) - return; - - unflatten_and_copy_device_tree(); -} - -const char *get_system_type(void) -{ - switch (mips_machtype) { - case MACH_INGENIC_X2000: - return "X2000"; - case MACH_INGENIC_X1830: - return "X1830"; - case MACH_INGENIC_X1000: - return "X1000"; - case MACH_INGENIC_JZ4780: - return "JZ4780"; - case MACH_INGENIC_JZ4770: - return "JZ4770"; - case MACH_INGENIC_JZ4725B: - return "JZ4725B"; - default: - return "JZ4740"; - } -} - -void __init arch_init_irq(void) -{ - irqchip_init(); -} - -void __init plat_time_init(void) -{ - of_clk_init(NULL); - timer_probe(); -} - -void __init prom_init(void) -{ - fw_init_cmdline(); -} - -void __init prom_free_prom_memory(void) -{ -} - -static void jz4740_wait_instr(void) -{ - __asm__(".set push;\n" - ".set mips3;\n" - "wait;\n" - ".set pop;\n" - ); -} - -static void jz4740_halt(void) -{ - for (;;) - jz4740_wait_instr(); -} - -static int __maybe_unused jz4740_pm_enter(suspend_state_t state) -{ - jz4740_wait_instr(); - - return 0; -} - -static const struct platform_suspend_ops jz4740_pm_ops __maybe_unused = { - .valid = suspend_valid_only_mem, - .enter = jz4740_pm_enter, -}; - -static int __init jz4740_pm_init(void) -{ - if (IS_ENABLED(CONFIG_PM_SLEEP)) - suspend_set_ops(&jz4740_pm_ops); - _machine_halt = jz4740_halt; - - return 0; - -} -late_initcall(jz4740_pm_init); diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index 13a26d254829..2a05b923f579 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -5,11 +5,17 @@ extra-y := head.o vmlinux.lds -obj-y += cmpxchg.o cpu-probe.o branch.o elf.o entry.o genex.o idle.o irq.o \ +obj-y += branch.o cmpxchg.o elf.o entry.o genex.o idle.o irq.o \ process.o prom.o ptrace.o reset.o setup.o signal.o \ syscall.o time.o topology.o traps.o unaligned.o watch.o \ vdso.o cacheinfo.o +ifdef CONFIG_CPU_R3K_TLB +obj-y += cpu-r3k-probe.o +else +obj-y += cpu-probe.o +endif + ifdef CONFIG_FUNCTION_TRACER CFLAGS_REMOVE_ftrace.o = -pg CFLAGS_REMOVE_early_printk.o = -pg @@ -42,6 +48,7 @@ sw-$(CONFIG_CPU_TX39XX) := r2300_switch.o sw-$(CONFIG_CPU_CAVIUM_OCTEON) := octeon_switch.o obj-y += $(sw-y) +obj-$(CONFIG_MIPS_FP_SUPPORT) += fpu-probe.o obj-$(CONFIG_CPU_R2300_FPU) += r2300_fpu.o obj-$(CONFIG_CPU_R4K_FPU) += r4k_fpu.o diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c index fb3e203698ea..0216ff24c392 100644 --- a/arch/mips/kernel/branch.c +++ b/arch/mips/kernel/branch.c @@ -20,6 +20,8 @@ #include <asm/ptrace.h> #include <linux/uaccess.h> +#include "probes-common.h" + /* * Calculate and return exception PC in case of branch delay slot * for microMIPS and MIPS16e. It does not clear the ISA mode bit. diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index e2955f1f6316..e6853697a056 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -28,336 +28,14 @@ #include <asm/spram.h> #include <linux/uaccess.h> +#include "fpu-probe.h" + #include <asm/mach-loongson64/cpucfg-emul.h> /* Hardware capabilities */ unsigned int elf_hwcap __read_mostly; EXPORT_SYMBOL_GPL(elf_hwcap); -#ifdef CONFIG_MIPS_FP_SUPPORT - -/* - * Get the FPU Implementation/Revision. - */ -static inline unsigned long cpu_get_fpu_id(void) -{ - unsigned long tmp, fpu_id; - - tmp = read_c0_status(); - __enable_fpu(FPU_AS_IS); - fpu_id = read_32bit_cp1_register(CP1_REVISION); - write_c0_status(tmp); - return fpu_id; -} - -/* - * Check if the CPU has an external FPU. - */ -static inline int __cpu_has_fpu(void) -{ - return (cpu_get_fpu_id() & FPIR_IMP_MASK) != FPIR_IMP_NONE; -} - -/* - * Determine the FCSR mask for FPU hardware. - */ -static inline void cpu_set_fpu_fcsr_mask(struct cpuinfo_mips *c) -{ - unsigned long sr, mask, fcsr, fcsr0, fcsr1; - - fcsr = c->fpu_csr31; - mask = FPU_CSR_ALL_X | FPU_CSR_ALL_E | FPU_CSR_ALL_S | FPU_CSR_RM; - - sr = read_c0_status(); - __enable_fpu(FPU_AS_IS); - - fcsr0 = fcsr & mask; - write_32bit_cp1_register(CP1_STATUS, fcsr0); - fcsr0 = read_32bit_cp1_register(CP1_STATUS); - - fcsr1 = fcsr | ~mask; - write_32bit_cp1_register(CP1_STATUS, fcsr1); - fcsr1 = read_32bit_cp1_register(CP1_STATUS); - - write_32bit_cp1_register(CP1_STATUS, fcsr); - - write_c0_status(sr); - - c->fpu_msk31 = ~(fcsr0 ^ fcsr1) & ~mask; -} - -/* - * Determine the IEEE 754 NaN encodings and ABS.fmt/NEG.fmt execution modes - * supported by FPU hardware. - */ -static void cpu_set_fpu_2008(struct cpuinfo_mips *c) -{ - if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 | - MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 | - MIPS_CPU_ISA_M32R5 | MIPS_CPU_ISA_M64R5 | - MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) { - unsigned long sr, fir, fcsr, fcsr0, fcsr1; - - sr = read_c0_status(); - __enable_fpu(FPU_AS_IS); - - fir = read_32bit_cp1_register(CP1_REVISION); - if (fir & MIPS_FPIR_HAS2008) { - fcsr = read_32bit_cp1_register(CP1_STATUS); - - /* - * MAC2008 toolchain never landed in real world, so we're only - * testing wether it can be disabled and don't try to enabled - * it. - */ - fcsr0 = fcsr & ~(FPU_CSR_ABS2008 | FPU_CSR_NAN2008 | FPU_CSR_MAC2008); - write_32bit_cp1_register(CP1_STATUS, fcsr0); - fcsr0 = read_32bit_cp1_register(CP1_STATUS); - - fcsr1 = fcsr | FPU_CSR_ABS2008 | FPU_CSR_NAN2008; - write_32bit_cp1_register(CP1_STATUS, fcsr1); - fcsr1 = read_32bit_cp1_register(CP1_STATUS); - - write_32bit_cp1_register(CP1_STATUS, fcsr); - - if (c->isa_level & (MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2)) { - /* - * The bit for MAC2008 might be reused by R6 in future, - * so we only test for R2-R5. - */ - if (fcsr0 & FPU_CSR_MAC2008) - c->options |= MIPS_CPU_MAC_2008_ONLY; - } - - if (!(fcsr0 & FPU_CSR_NAN2008)) - c->options |= MIPS_CPU_NAN_LEGACY; - if (fcsr1 & FPU_CSR_NAN2008) - c->options |= MIPS_CPU_NAN_2008; - - if ((fcsr0 ^ fcsr1) & FPU_CSR_ABS2008) - c->fpu_msk31 &= ~FPU_CSR_ABS2008; - else - c->fpu_csr31 |= fcsr & FPU_CSR_ABS2008; - - if ((fcsr0 ^ fcsr1) & FPU_CSR_NAN2008) - c->fpu_msk31 &= ~FPU_CSR_NAN2008; - else - c->fpu_csr31 |= fcsr & FPU_CSR_NAN2008; - } else { - c->options |= MIPS_CPU_NAN_LEGACY; - } - - write_c0_status(sr); - } else { - c->options |= MIPS_CPU_NAN_LEGACY; - } -} - -/* - * IEEE 754 conformance mode to use. Affects the NaN encoding and the - * ABS.fmt/NEG.fmt execution mode. - */ -static enum { STRICT, LEGACY, STD2008, RELAXED } ieee754 = STRICT; - -/* - * Set the IEEE 754 NaN encodings and the ABS.fmt/NEG.fmt execution modes - * to support by the FPU emulator according to the IEEE 754 conformance - * mode selected. Note that "relaxed" straps the emulator so that it - * allows 2008-NaN binaries even for legacy processors. - */ -static void cpu_set_nofpu_2008(struct cpuinfo_mips *c) -{ - c->options &= ~(MIPS_CPU_NAN_2008 | MIPS_CPU_NAN_LEGACY); - c->fpu_csr31 &= ~(FPU_CSR_ABS2008 | FPU_CSR_NAN2008); - c->fpu_msk31 &= ~(FPU_CSR_ABS2008 | FPU_CSR_NAN2008); - - switch (ieee754) { - case STRICT: - if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 | - MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 | - MIPS_CPU_ISA_M32R5 | MIPS_CPU_ISA_M64R5 | - MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) { - c->options |= MIPS_CPU_NAN_2008 | MIPS_CPU_NAN_LEGACY; - } else { - c->options |= MIPS_CPU_NAN_LEGACY; - c->fpu_msk31 |= FPU_CSR_ABS2008 | FPU_CSR_NAN2008; - } - break; - case LEGACY: - c->options |= MIPS_CPU_NAN_LEGACY; - c->fpu_msk31 |= FPU_CSR_ABS2008 | FPU_CSR_NAN2008; - break; - case STD2008: - c->options |= MIPS_CPU_NAN_2008; - c->fpu_csr31 |= FPU_CSR_ABS2008 | FPU_CSR_NAN2008; - c->fpu_msk31 |= FPU_CSR_ABS2008 | FPU_CSR_NAN2008; - break; - case RELAXED: - c->options |= MIPS_CPU_NAN_2008 | MIPS_CPU_NAN_LEGACY; - break; - } -} - -/* - * Override the IEEE 754 NaN encoding and ABS.fmt/NEG.fmt execution mode - * according to the "ieee754=" parameter. - */ -static void cpu_set_nan_2008(struct cpuinfo_mips *c) -{ - switch (ieee754) { - case STRICT: - mips_use_nan_legacy = !!cpu_has_nan_legacy; - mips_use_nan_2008 = !!cpu_has_nan_2008; - break; - case LEGACY: - mips_use_nan_legacy = !!cpu_has_nan_legacy; - mips_use_nan_2008 = !cpu_has_nan_legacy; - break; - case STD2008: - mips_use_nan_legacy = !cpu_has_nan_2008; - mips_use_nan_2008 = !!cpu_has_nan_2008; - break; - case RELAXED: - mips_use_nan_legacy = true; - mips_use_nan_2008 = true; - break; - } -} - -/* - * IEEE 754 NaN encoding and ABS.fmt/NEG.fmt execution mode override - * settings: - * - * strict: accept binaries that request a NaN encoding supported by the FPU - * legacy: only accept legacy-NaN binaries - * 2008: only accept 2008-NaN binaries - * relaxed: accept any binaries regardless of whether supported by the FPU - */ -static int __init ieee754_setup(char *s) -{ - if (!s) - return -1; - else if (!strcmp(s, "strict")) - ieee754 = STRICT; - else if (!strcmp(s, "legacy")) - ieee754 = LEGACY; - else if (!strcmp(s, "2008")) - ieee754 = STD2008; - else if (!strcmp(s, "relaxed")) - ieee754 = RELAXED; - else - return -1; - - if (!(boot_cpu_data.options & MIPS_CPU_FPU)) - cpu_set_nofpu_2008(&boot_cpu_data); - cpu_set_nan_2008(&boot_cpu_data); - - return 0; -} - -early_param("ieee754", ieee754_setup); - -/* - * Set the FIR feature flags for the FPU emulator. - */ -static void cpu_set_nofpu_id(struct cpuinfo_mips *c) -{ - u32 value; - - value = 0; - if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 | - MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 | - MIPS_CPU_ISA_M32R5 | MIPS_CPU_ISA_M64R5 | - MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) - value |= MIPS_FPIR_D | MIPS_FPIR_S; - if (c->isa_level & (MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 | - MIPS_CPU_ISA_M32R5 | MIPS_CPU_ISA_M64R5 | - MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) - value |= MIPS_FPIR_F64 | MIPS_FPIR_L | MIPS_FPIR_W; - if (c->options & MIPS_CPU_NAN_2008) - value |= MIPS_FPIR_HAS2008; - c->fpu_id = value; -} - -/* Determined FPU emulator mask to use for the boot CPU with "nofpu". */ -static unsigned int mips_nofpu_msk31; - -/* - * Set options for FPU hardware. - */ -static void cpu_set_fpu_opts(struct cpuinfo_mips *c) -{ - c->fpu_id = cpu_get_fpu_id(); - mips_nofpu_msk31 = c->fpu_msk31; - - if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 | - MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 | - MIPS_CPU_ISA_M32R5 | MIPS_CPU_ISA_M64R5 | - MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) { - if (c->fpu_id & MIPS_FPIR_3D) - c->ases |= MIPS_ASE_MIPS3D; - if (c->fpu_id & MIPS_FPIR_UFRP) - c->options |= MIPS_CPU_UFR; - if (c->fpu_id & MIPS_FPIR_FREP) - c->options |= MIPS_CPU_FRE; - } - - cpu_set_fpu_fcsr_mask(c); - cpu_set_fpu_2008(c); - cpu_set_nan_2008(c); -} - -/* - * Set options for the FPU emulator. - */ -static void cpu_set_nofpu_opts(struct cpuinfo_mips *c) -{ - c->options &= ~MIPS_CPU_FPU; - c->fpu_msk31 = mips_nofpu_msk31; - - cpu_set_nofpu_2008(c); - cpu_set_nan_2008(c); - cpu_set_nofpu_id(c); -} - -static int mips_fpu_disabled; - -static int __init fpu_disable(char *s) -{ - cpu_set_nofpu_opts(&boot_cpu_data); - mips_fpu_disabled = 1; - - return 1; -} - -__setup("nofpu", fpu_disable); - -#else /* !CONFIG_MIPS_FP_SUPPORT */ - -#define mips_fpu_disabled 1 - -static inline unsigned long cpu_get_fpu_id(void) -{ - return FPIR_IMP_NONE; -} - -static inline int __cpu_has_fpu(void) -{ - return 0; -} - -static void cpu_set_fpu_opts(struct cpuinfo_mips *c) -{ - /* no-op */ -} - -static void cpu_set_nofpu_opts(struct cpuinfo_mips *c) -{ - /* no-op */ -} - -#endif /* CONFIG_MIPS_FP_SUPPORT */ - static inline unsigned long cpu_get_msa_id(void) { unsigned long status, msa_id; @@ -1600,8 +1278,9 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX | MIPS_CPU_FPU | MIPS_CPU_32FPR | MIPS_CPU_COUNTER | MIPS_CPU_WATCH | - MIPS_CPU_LLSC | MIPS_CPU_BP_GHIST; + MIPS_CPU_LLSC; c->tlbsize = 64; + write_c0_r10k_diag(read_c0_r10k_diag() | R10K_DIAG_E_GHIST); break; case PRID_IMP_R14000: if (((c->processor_id >> 4) & 0x0f) > 2) { @@ -1615,8 +1294,9 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX | MIPS_CPU_FPU | MIPS_CPU_32FPR | MIPS_CPU_COUNTER | MIPS_CPU_WATCH | - MIPS_CPU_LLSC | MIPS_CPU_BP_GHIST; + MIPS_CPU_LLSC; c->tlbsize = 64; + write_c0_r10k_diag(read_c0_r10k_diag() | R10K_DIAG_E_GHIST); break; case PRID_IMP_LOONGSON_64C: /* Loongson-2/3 */ switch (c->processor_id & PRID_REV_MASK) { @@ -2123,7 +1803,10 @@ static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu) /* XBurst does not implement the CP0 counter. */ c->options &= ~MIPS_CPU_COUNTER; - BUG_ON(!__builtin_constant_p(cpu_has_counter) || cpu_has_counter); + BUG_ON(__builtin_constant_p(cpu_has_counter) && cpu_has_counter); + + /* XBurst has virtually tagged icache */ + c->icache.flags |= MIPS_CACHE_VTAG; switch (c->processor_id & PRID_IMP_MASK) { @@ -2169,8 +1852,9 @@ static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu) /* XBurst®1 with MXU2.0 SIMD ISA */ case PRID_IMP_XBURST_REV2: + /* Ingenic uses the WA bit to achieve write-combine memory writes */ + c->writecombine = _CACHE_CACHABLE_WA; c->cputype = CPU_XBURST; - c->writecombine = _CACHE_UNCACHED_ACCELERATED; __cpu_name[cpu] = "Ingenic XBurst"; break; @@ -2372,10 +2056,6 @@ void cpu_probe(void) else cpu_set_nofpu_opts(c); - if (cpu_has_bp_ghist) - write_c0_r10k_diag(read_c0_r10k_diag() | - R10K_DIAG_E_GHIST); - if (cpu_has_mips_r2_r6) { c->srsets = ((read_c0_srsctl() >> 26) & 0x0f) + 1; /* R2 has Performance Counter Interrupt indicator */ diff --git a/arch/mips/kernel/cpu-r3k-probe.c b/arch/mips/kernel/cpu-r3k-probe.c new file mode 100644 index 000000000000..abdbbe8c5a43 --- /dev/null +++ b/arch/mips/kernel/cpu-r3k-probe.c @@ -0,0 +1,171 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Processor capabilities determination functions. + * + * Copyright (C) xxxx the Anonymous + * Copyright (C) 1994 - 2006 Ralf Baechle + * Copyright (C) 2003, 2004 Maciej W. Rozycki + * Copyright (C) 2001, 2004, 2011, 2012 MIPS Technologies, Inc. + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/ptrace.h> +#include <linux/smp.h> +#include <linux/stddef.h> +#include <linux/export.h> + +#include <asm/bugs.h> +#include <asm/cpu.h> +#include <asm/cpu-features.h> +#include <asm/cpu-type.h> +#include <asm/fpu.h> +#include <asm/mipsregs.h> +#include <asm/elf.h> + +#include "fpu-probe.h" + +/* Hardware capabilities */ +unsigned int elf_hwcap __read_mostly; +EXPORT_SYMBOL_GPL(elf_hwcap); + +void __init check_bugs32(void) +{ + +} + +/* + * Probe whether cpu has config register by trying to play with + * alternate cache bit and see whether it matters. + * It's used by cpu_probe to distinguish between R3000A and R3081. + */ +static inline int cpu_has_confreg(void) +{ +#ifdef CONFIG_CPU_R3000 + extern unsigned long r3k_cache_size(unsigned long); + unsigned long size1, size2; + unsigned long cfg = read_c0_conf(); + + size1 = r3k_cache_size(ST0_ISC); + write_c0_conf(cfg ^ R30XX_CONF_AC); + size2 = r3k_cache_size(ST0_ISC); + write_c0_conf(cfg); + return size1 != size2; +#else + return 0; +#endif +} + +static inline void set_elf_platform(int cpu, const char *plat) +{ + if (cpu == 0) + __elf_platform = plat; +} + +const char *__cpu_name[NR_CPUS]; +const char *__elf_platform; +const char *__elf_base_platform; + +void cpu_probe(void) +{ + struct cpuinfo_mips *c = ¤t_cpu_data; + unsigned int cpu = smp_processor_id(); + + /* + * Set a default elf platform, cpu probe may later + * overwrite it with a more precise value + */ + set_elf_platform(cpu, "mips"); + + c->processor_id = PRID_IMP_UNKNOWN; + c->fpu_id = FPIR_IMP_NONE; + c->cputype = CPU_UNKNOWN; + c->writecombine = _CACHE_UNCACHED; + + c->fpu_csr31 = FPU_CSR_RN; + c->fpu_msk31 = FPU_CSR_RSVD | FPU_CSR_ABS2008 | FPU_CSR_NAN2008 | + FPU_CSR_CONDX | FPU_CSR_FS; + + c->srsets = 1; + + c->processor_id = read_c0_prid(); + switch (c->processor_id & (PRID_COMP_MASK | PRID_IMP_MASK)) { + case PRID_COMP_LEGACY | PRID_IMP_R2000: + c->cputype = CPU_R2000; + __cpu_name[cpu] = "R2000"; + c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE | + MIPS_CPU_NOFPUEX; + if (__cpu_has_fpu()) + c->options |= MIPS_CPU_FPU; + c->tlbsize = 64; + break; + case PRID_COMP_LEGACY | PRID_IMP_R3000: + if ((c->processor_id & PRID_REV_MASK) == PRID_REV_R3000A) { + if (cpu_has_confreg()) { + c->cputype = CPU_R3081E; + __cpu_name[cpu] = "R3081"; + } else { + c->cputype = CPU_R3000A; + __cpu_name[cpu] = "R3000A"; + } + } else { + c->cputype = CPU_R3000; + __cpu_name[cpu] = "R3000"; + } + c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE | + MIPS_CPU_NOFPUEX; + if (__cpu_has_fpu()) + c->options |= MIPS_CPU_FPU; + c->tlbsize = 64; + break; + case PRID_COMP_LEGACY | PRID_IMP_TX39: + c->options = MIPS_CPU_TLB | MIPS_CPU_TX39_CACHE; + + if ((c->processor_id & 0xf0) == (PRID_REV_TX3927 & 0xf0)) { + c->cputype = CPU_TX3927; + __cpu_name[cpu] = "TX3927"; + c->tlbsize = 64; + } else { + switch (c->processor_id & PRID_REV_MASK) { + case PRID_REV_TX3912: + c->cputype = CPU_TX3912; + __cpu_name[cpu] = "TX3912"; + c->tlbsize = 32; + break; + case PRID_REV_TX3922: + c->cputype = CPU_TX3922; + __cpu_name[cpu] = "TX3922"; + c->tlbsize = 64; + break; + } + } + break; + } + + BUG_ON(!__cpu_name[cpu]); + BUG_ON(c->cputype == CPU_UNKNOWN); + + /* + * Platform code can force the cpu type to optimize code + * generation. In that case be sure the cpu type is correctly + * manually setup otherwise it could trigger some nasty bugs. + */ + BUG_ON(current_cpu_type() != c->cputype); + + if (mips_fpu_disabled) + c->options &= ~MIPS_CPU_FPU; + + if (c->options & MIPS_CPU_FPU) + cpu_set_fpu_opts(c); + else + cpu_set_nofpu_opts(c); +} + +void cpu_report(void) +{ + struct cpuinfo_mips *c = ¤t_cpu_data; + + pr_info("CPU%d revision is: %08x (%s)\n", + smp_processor_id(), c->processor_id, cpu_name_string()); + if (c->options & MIPS_CPU_FPU) + pr_info("FPU revision is: %08x\n", c->fpu_id); +} diff --git a/arch/mips/kernel/fpu-probe.c b/arch/mips/kernel/fpu-probe.c new file mode 100644 index 000000000000..e689d6a83234 --- /dev/null +++ b/arch/mips/kernel/fpu-probe.c @@ -0,0 +1,321 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Processor capabilities determination functions. + * + * Copyright (C) xxxx the Anonymous + * Copyright (C) 1994 - 2006 Ralf Baechle + * Copyright (C) 2003, 2004 Maciej W. Rozycki + * Copyright (C) 2001, 2004, 2011, 2012 MIPS Technologies, Inc. + */ + +#include <linux/init.h> +#include <linux/kernel.h> + +#include <asm/bugs.h> +#include <asm/cpu.h> +#include <asm/cpu-features.h> +#include <asm/cpu-type.h> +#include <asm/elf.h> +#include <asm/fpu.h> +#include <asm/mipsregs.h> + +#include "fpu-probe.h" + +/* + * Get the FPU Implementation/Revision. + */ +static inline unsigned long cpu_get_fpu_id(void) +{ + unsigned long tmp, fpu_id; + + tmp = read_c0_status(); + __enable_fpu(FPU_AS_IS); + fpu_id = read_32bit_cp1_register(CP1_REVISION); + write_c0_status(tmp); + return fpu_id; +} + +/* + * Check if the CPU has an external FPU. + */ +int __cpu_has_fpu(void) +{ + return (cpu_get_fpu_id() & FPIR_IMP_MASK) != FPIR_IMP_NONE; +} + +/* + * Determine the FCSR mask for FPU hardware. + */ +static inline void cpu_set_fpu_fcsr_mask(struct cpuinfo_mips *c) +{ + unsigned long sr, mask, fcsr, fcsr0, fcsr1; + + fcsr = c->fpu_csr31; + mask = FPU_CSR_ALL_X | FPU_CSR_ALL_E | FPU_CSR_ALL_S | FPU_CSR_RM; + + sr = read_c0_status(); + __enable_fpu(FPU_AS_IS); + + fcsr0 = fcsr & mask; + write_32bit_cp1_register(CP1_STATUS, fcsr0); + fcsr0 = read_32bit_cp1_register(CP1_STATUS); + + fcsr1 = fcsr | ~mask; + write_32bit_cp1_register(CP1_STATUS, fcsr1); + fcsr1 = read_32bit_cp1_register(CP1_STATUS); + + write_32bit_cp1_register(CP1_STATUS, fcsr); + + write_c0_status(sr); + + c->fpu_msk31 = ~(fcsr0 ^ fcsr1) & ~mask; +} + +/* + * Determine the IEEE 754 NaN encodings and ABS.fmt/NEG.fmt execution modes + * supported by FPU hardware. + */ +static void cpu_set_fpu_2008(struct cpuinfo_mips *c) +{ + if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 | + MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 | + MIPS_CPU_ISA_M32R5 | MIPS_CPU_ISA_M64R5 | + MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) { + unsigned long sr, fir, fcsr, fcsr0, fcsr1; + + sr = read_c0_status(); + __enable_fpu(FPU_AS_IS); + + fir = read_32bit_cp1_register(CP1_REVISION); + if (fir & MIPS_FPIR_HAS2008) { + fcsr = read_32bit_cp1_register(CP1_STATUS); + + /* + * MAC2008 toolchain never landed in real world, so + * we're only testing whether it can be disabled and + * don't try to enabled it. + */ + fcsr0 = fcsr & ~(FPU_CSR_ABS2008 | FPU_CSR_NAN2008 | + FPU_CSR_MAC2008); + write_32bit_cp1_register(CP1_STATUS, fcsr0); + fcsr0 = read_32bit_cp1_register(CP1_STATUS); + + fcsr1 = fcsr | FPU_CSR_ABS2008 | FPU_CSR_NAN2008; + write_32bit_cp1_register(CP1_STATUS, fcsr1); + fcsr1 = read_32bit_cp1_register(CP1_STATUS); + + write_32bit_cp1_register(CP1_STATUS, fcsr); + + if (c->isa_level & (MIPS_CPU_ISA_M32R2 | + MIPS_CPU_ISA_M64R2)) { + /* + * The bit for MAC2008 might be reused by R6 + * in future, so we only test for R2-R5. + */ + if (fcsr0 & FPU_CSR_MAC2008) + c->options |= MIPS_CPU_MAC_2008_ONLY; + } + + if (!(fcsr0 & FPU_CSR_NAN2008)) + c->options |= MIPS_CPU_NAN_LEGACY; + if (fcsr1 & FPU_CSR_NAN2008) + c->options |= MIPS_CPU_NAN_2008; + + if ((fcsr0 ^ fcsr1) & FPU_CSR_ABS2008) + c->fpu_msk31 &= ~FPU_CSR_ABS2008; + else + c->fpu_csr31 |= fcsr & FPU_CSR_ABS2008; + + if ((fcsr0 ^ fcsr1) & FPU_CSR_NAN2008) + c->fpu_msk31 &= ~FPU_CSR_NAN2008; + else + c->fpu_csr31 |= fcsr & FPU_CSR_NAN2008; + } else { + c->options |= MIPS_CPU_NAN_LEGACY; + } + + write_c0_status(sr); + } else { + c->options |= MIPS_CPU_NAN_LEGACY; + } +} + +/* + * IEEE 754 conformance mode to use. Affects the NaN encoding and the + * ABS.fmt/NEG.fmt execution mode. + */ +static enum { STRICT, LEGACY, STD2008, RELAXED } ieee754 = STRICT; + +/* + * Set the IEEE 754 NaN encodings and the ABS.fmt/NEG.fmt execution modes + * to support by the FPU emulator according to the IEEE 754 conformance + * mode selected. Note that "relaxed" straps the emulator so that it + * allows 2008-NaN binaries even for legacy processors. + */ +static void cpu_set_nofpu_2008(struct cpuinfo_mips *c) +{ + c->options &= ~(MIPS_CPU_NAN_2008 | MIPS_CPU_NAN_LEGACY); + c->fpu_csr31 &= ~(FPU_CSR_ABS2008 | FPU_CSR_NAN2008); + c->fpu_msk31 &= ~(FPU_CSR_ABS2008 | FPU_CSR_NAN2008); + + switch (ieee754) { + case STRICT: + if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 | + MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 | + MIPS_CPU_ISA_M32R5 | MIPS_CPU_ISA_M64R5 | + MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) { + c->options |= MIPS_CPU_NAN_2008 | MIPS_CPU_NAN_LEGACY; + } else { + c->options |= MIPS_CPU_NAN_LEGACY; + c->fpu_msk31 |= FPU_CSR_ABS2008 | FPU_CSR_NAN2008; + } + break; + case LEGACY: + c->options |= MIPS_CPU_NAN_LEGACY; + c->fpu_msk31 |= FPU_CSR_ABS2008 | FPU_CSR_NAN2008; + break; + case STD2008: + c->options |= MIPS_CPU_NAN_2008; + c->fpu_csr31 |= FPU_CSR_ABS2008 | FPU_CSR_NAN2008; + c->fpu_msk31 |= FPU_CSR_ABS2008 | FPU_CSR_NAN2008; + break; + case RELAXED: + c->options |= MIPS_CPU_NAN_2008 | MIPS_CPU_NAN_LEGACY; + break; + } +} + +/* + * Override the IEEE 754 NaN encoding and ABS.fmt/NEG.fmt execution mode + * according to the "ieee754=" parameter. + */ +static void cpu_set_nan_2008(struct cpuinfo_mips *c) +{ + switch (ieee754) { + case STRICT: + mips_use_nan_legacy = !!cpu_has_nan_legacy; + mips_use_nan_2008 = !!cpu_has_nan_2008; + break; + case LEGACY: + mips_use_nan_legacy = !!cpu_has_nan_legacy; + mips_use_nan_2008 = !cpu_has_nan_legacy; + break; + case STD2008: + mips_use_nan_legacy = !cpu_has_nan_2008; + mips_use_nan_2008 = !!cpu_has_nan_2008; + break; + case RELAXED: + mips_use_nan_legacy = true; + mips_use_nan_2008 = true; + break; + } +} + +/* + * IEEE 754 NaN encoding and ABS.fmt/NEG.fmt execution mode override + * settings: + * + * strict: accept binaries that request a NaN encoding supported by the FPU + * legacy: only accept legacy-NaN binaries + * 2008: only accept 2008-NaN binaries + * relaxed: accept any binaries regardless of whether supported by the FPU + */ +static int __init ieee754_setup(char *s) +{ + if (!s) + return -1; + else if (!strcmp(s, "strict")) + ieee754 = STRICT; + else if (!strcmp(s, "legacy")) + ieee754 = LEGACY; + else if (!strcmp(s, "2008")) + ieee754 = STD2008; + else if (!strcmp(s, "relaxed")) + ieee754 = RELAXED; + else + return -1; + + if (!(boot_cpu_data.options & MIPS_CPU_FPU)) + cpu_set_nofpu_2008(&boot_cpu_data); + cpu_set_nan_2008(&boot_cpu_data); + + return 0; +} + +early_param("ieee754", ieee754_setup); + +/* + * Set the FIR feature flags for the FPU emulator. + */ +static void cpu_set_nofpu_id(struct cpuinfo_mips *c) +{ + u32 value; + + value = 0; + if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 | + MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 | + MIPS_CPU_ISA_M32R5 | MIPS_CPU_ISA_M64R5 | + MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) + value |= MIPS_FPIR_D | MIPS_FPIR_S; + if (c->isa_level & (MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 | + MIPS_CPU_ISA_M32R5 | MIPS_CPU_ISA_M64R5 | + MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) + value |= MIPS_FPIR_F64 | MIPS_FPIR_L | MIPS_FPIR_W; + if (c->options & MIPS_CPU_NAN_2008) + value |= MIPS_FPIR_HAS2008; + c->fpu_id = value; +} + +/* Determined FPU emulator mask to use for the boot CPU with "nofpu". */ +static unsigned int mips_nofpu_msk31; + +/* + * Set options for FPU hardware. + */ +void cpu_set_fpu_opts(struct cpuinfo_mips *c) +{ + c->fpu_id = cpu_get_fpu_id(); + mips_nofpu_msk31 = c->fpu_msk31; + + if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 | + MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 | + MIPS_CPU_ISA_M32R5 | MIPS_CPU_ISA_M64R5 | + MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) { + if (c->fpu_id & MIPS_FPIR_3D) + c->ases |= MIPS_ASE_MIPS3D; + if (c->fpu_id & MIPS_FPIR_UFRP) + c->options |= MIPS_CPU_UFR; + if (c->fpu_id & MIPS_FPIR_FREP) + c->options |= MIPS_CPU_FRE; + } + + cpu_set_fpu_fcsr_mask(c); + cpu_set_fpu_2008(c); + cpu_set_nan_2008(c); +} + +/* + * Set options for the FPU emulator. + */ +void cpu_set_nofpu_opts(struct cpuinfo_mips *c) +{ + c->options &= ~MIPS_CPU_FPU; + c->fpu_msk31 = mips_nofpu_msk31; + + cpu_set_nofpu_2008(c); + cpu_set_nan_2008(c); + cpu_set_nofpu_id(c); +} + +int mips_fpu_disabled; + +static int __init fpu_disable(char *s) +{ + cpu_set_nofpu_opts(&boot_cpu_data); + mips_fpu_disabled = 1; + + return 1; +} + +__setup("nofpu", fpu_disable); + diff --git a/arch/mips/kernel/fpu-probe.h b/arch/mips/kernel/fpu-probe.h new file mode 100644 index 000000000000..951ce50890d0 --- /dev/null +++ b/arch/mips/kernel/fpu-probe.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include <linux/kernel.h> + +#include <asm/cpu.h> +#include <asm/cpu-info.h> + +#ifdef CONFIG_MIPS_FP_SUPPORT + +extern int mips_fpu_disabled; + +int __cpu_has_fpu(void); +void cpu_set_fpu_opts(struct cpuinfo_mips *c); +void cpu_set_nofpu_opts(struct cpuinfo_mips *c); + +#else /* !CONFIG_MIPS_FP_SUPPORT */ + +#define mips_fpu_disabled 1 + +static inline unsigned long cpu_get_fpu_id(void) +{ + return FPIR_IMP_NONE; +} + +static inline int __cpu_has_fpu(void) +{ + return 0; +} + +static inline void cpu_set_fpu_opts(struct cpuinfo_mips *c) +{ + /* no-op */ +} + +static inline void cpu_set_nofpu_opts(struct cpuinfo_mips *c) +{ + /* no-op */ +} + +#endif /* CONFIG_MIPS_FP_SUPPORT */ diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index 2625232bfe52..f57e68f40a34 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c @@ -37,10 +37,6 @@ void arch_ftrace_update_code(int command) ftrace_modify_all_code(command); } -#endif - -#ifdef CONFIG_DYNAMIC_FTRACE - #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ #define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */ #define JUMP_RANGE_MASK ((1UL << 28) - 1) diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S index 7dd234e788e6..61b73580b877 100644 --- a/arch/mips/kernel/head.S +++ b/arch/mips/kernel/head.S @@ -35,7 +35,7 @@ .macro setup_c0_status set clr .set push mfc0 t0, CP0_STATUS - or t0, ST0_CU0|\set|0x1f|\clr + or t0, ST0_KERNEL_CUMASK|\set|0x1f|\clr xor t0, 0x1f|\clr mtc0 t0, CP0_STATUS .set noreorder diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c index d043c2f897fc..54dfba8fa77c 100644 --- a/arch/mips/kernel/kprobes.c +++ b/arch/mips/kernel/kprobes.c @@ -477,6 +477,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { ri->ret_addr = (kprobe_opcode_t *) regs->regs[31]; + ri->fp = NULL; /* Replace the return addr with trampoline addr */ regs->regs[31] = (unsigned long)kretprobe_trampoline; @@ -488,57 +489,8 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, static int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) { - struct kretprobe_instance *ri = NULL; - struct hlist_head *head, empty_rp; - struct hlist_node *tmp; - unsigned long flags, orig_ret_address = 0; - unsigned long trampoline_address = (unsigned long)kretprobe_trampoline; - - INIT_HLIST_HEAD(&empty_rp); - kretprobe_hash_lock(current, &head, &flags); - - /* - * It is possible to have multiple instances associated with a given - * task either because an multiple functions in the call path - * have a return probe installed on them, and/or more than one return - * return probe was registered for a target function. - * - * We can handle this because: - * - instances are always inserted at the head of the list - * - when multiple return probes are registered for the same - * function, the first instance's ret_addr will point to the - * real return address, and all the rest will point to - * kretprobe_trampoline - */ - hlist_for_each_entry_safe(ri, tmp, head, hlist) { - if (ri->task != current) - /* another task is sharing our hash bucket */ - continue; - - if (ri->rp && ri->rp->handler) - ri->rp->handler(ri, regs); - - orig_ret_address = (unsigned long)ri->ret_addr; - recycle_rp_inst(ri, &empty_rp); - - if (orig_ret_address != trampoline_address) - /* - * This is the real return address. Any other - * instances associated with this task are for - * other calls deeper on the call stack - */ - break; - } - - kretprobe_assert(ri, orig_ret_address, trampoline_address); - instruction_pointer(regs) = orig_ret_address; - - kretprobe_hash_unlock(current, &flags); - - hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { - hlist_del(&ri->hlist); - kfree(ri); - } + instruction_pointer(regs) = __kretprobe_trampoline_handler(regs, + kretprobe_trampoline, NULL); /* * By returning a non-zero value, we are telling * kprobe_handler() that we don't want the post_handler diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c index 1a08428eedcf..6c590ef27648 100644 --- a/arch/mips/kernel/mips-mt-fpaff.c +++ b/arch/mips/kernel/mips-mt-fpaff.c @@ -167,7 +167,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len, return -EINVAL; get_online_cpus(); - read_lock(&tasklist_lock); + rcu_read_lock(); retval = -ESRCH; p = find_process_by_pid(pid); @@ -181,7 +181,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len, cpumask_and(&mask, &allowed, cpu_active_mask); out_unlock: - read_unlock(&tasklist_lock); + rcu_read_unlock(); put_online_cpus(); if (retval) return retval; diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index f5dc316a826a..75ebd8d7bd5d 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -52,6 +52,7 @@ #include <asm/inst.h> #include <asm/stacktrace.h> #include <asm/irq_regs.h> +#include <asm/exec.h> #ifdef CONFIG_HOTPLUG_CPU void arch_cpu_idle_dead(void) @@ -68,7 +69,7 @@ void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp) unsigned long status; /* New thread loses kernel privileges. */ - status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_FR|KU_MASK); + status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_CU2|ST0_FR|KU_MASK); status |= KU_USER; regs->cp0_status = status; lose_fpu(0); @@ -133,7 +134,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, childregs = (struct pt_regs *) childksp - 1; /* Put the stack after the struct pt_regs. */ childksp = (unsigned long) childregs; - p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1); + p->thread.cp0_status = (read_c0_status() & ~(ST0_CU2|ST0_CU1)) | ST0_KERNEL_CUMASK; if (unlikely(p->flags & PF_KTHREAD)) { /* kernel thread */ unsigned long status = p->thread.cp0_status; @@ -279,7 +280,21 @@ static inline int is_ra_save_ins(union mips_instruction *ip, int *poff) *poff = ip->i_format.simmediate / sizeof(ulong); return 1; } - +#ifdef CONFIG_CPU_LOONGSON64 + if ((ip->loongson3_lswc2_format.opcode == swc2_op) && + (ip->loongson3_lswc2_format.ls == 1) && + (ip->loongson3_lswc2_format.fr == 0) && + (ip->loongson3_lswc2_format.base == 29)) { + if (ip->loongson3_lswc2_format.rt == 31) { + *poff = ip->loongson3_lswc2_format.offset << 1; + return 1; + } + if (ip->loongson3_lswc2_format.rq == 31) { + *poff = (ip->loongson3_lswc2_format.offset << 1) + 1; + return 1; + } + } +#endif return 0; #endif } diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c index 9e50dc8df2f6..6abebd57b218 100644 --- a/arch/mips/kernel/prom.c +++ b/arch/mips/kernel/prom.c @@ -36,31 +36,6 @@ char *mips_get_machine_name(void) } #ifdef CONFIG_USE_OF -void __init early_init_dt_add_memory_arch(u64 base, u64 size) -{ - if (base >= PHYS_ADDR_MAX) { - pr_warn("Trying to add an invalid memory region, skipped\n"); - return; - } - - /* Truncate the passed memory region instead of type casting */ - if (base + size - 1 >= PHYS_ADDR_MAX || base + size < base) { - pr_warn("Truncate memory region %llx @ %llx to size %llx\n", - size, base, PHYS_ADDR_MAX - base); - size = PHYS_ADDR_MAX - base; - } - - add_memory_region(base, size, BOOT_MEM_RAM); -} - -int __init early_init_dt_reserve_memory_arch(phys_addr_t base, - phys_addr_t size, bool nomap) -{ - add_memory_region(base, size, - nomap ? BOOT_MEM_NOMAP : BOOT_MEM_RESERVED); - - return 0; -} void __init __dt_setup_arch(void *bph) { diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index bf5f5acab0a8..fccdbe2e7c2b 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -24,7 +24,7 @@ #include <linux/kexec.h> #include <linux/sizes.h> #include <linux/device.h> -#include <linux/dma-contiguous.h> +#include <linux/dma-map-ops.h> #include <linux/decompress/generic.h> #include <linux/of_fdt.h> #include <linux/of_reserved_mem.h> @@ -91,45 +91,6 @@ unsigned long ARCH_PFN_OFFSET; EXPORT_SYMBOL(ARCH_PFN_OFFSET); #endif -void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type) -{ - /* - * Note: This function only exists for historical reason, - * new code should use memblock_add or memblock_add_node instead. - */ - - /* - * If the region reaches the top of the physical address space, adjust - * the size slightly so that (start + size) doesn't overflow - */ - if (start + size - 1 == PHYS_ADDR_MAX) - --size; - - /* Sanity check */ - if (start + size < start) { - pr_warn("Trying to add an invalid memory region, skipped\n"); - return; - } - - if (start < PHYS_OFFSET) - return; - - memblock_add(start, size); - /* Reserve any memory except the ordinary RAM ranges. */ - switch (type) { - case BOOT_MEM_RAM: - break; - - case BOOT_MEM_NOMAP: /* Discard the range from the system. */ - memblock_remove(start, size); - break; - - default: /* Reserve the rest of the memory types at boot time */ - memblock_reserve(start, size); - break; - } -} - void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max) { void *dm = &detect_magic; @@ -146,7 +107,7 @@ void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_add ((unsigned long long) sz_min) / SZ_1M, ((unsigned long long) sz_max) / SZ_1M); - add_memory_region(start, size, BOOT_MEM_RAM); + memblock_add(start, size); } /* @@ -300,8 +261,9 @@ static void __init bootmem_init(void) static void __init bootmem_init(void) { - struct memblock_region *mem; phys_addr_t ramstart, ramend; + phys_addr_t start, end; + u64 i; ramstart = memblock_start_of_DRAM(); ramend = memblock_end_of_DRAM(); @@ -338,18 +300,13 @@ static void __init bootmem_init(void) min_low_pfn = ARCH_PFN_OFFSET; max_pfn = PFN_DOWN(ramend); - for_each_memblock(memory, mem) { - unsigned long start = memblock_region_memory_base_pfn(mem); - unsigned long end = memblock_region_memory_end_pfn(mem); - + for_each_mem_range(i, &start, &end) { /* * Skip highmem here so we get an accurate max_low_pfn if low * memory stops short of high memory. * If the region overlaps HIGHMEM_START, end is clipped so * max_pfn excludes the highmem portion. */ - if (memblock_is_nomap(mem)) - continue; if (start >= PFN_DOWN(HIGHMEM_START)) continue; if (end > PFN_DOWN(HIGHMEM_START)) @@ -400,7 +357,7 @@ static int __init early_parse_mem(char *p) if (*p == '@') start = memparse(p + 1, &p); - add_memory_region(start, size, BOOT_MEM_RAM); + memblock_add(start, size); return 0; } @@ -426,13 +383,14 @@ static int __init early_parse_memmap(char *p) if (*p == '@') { start_at = memparse(p+1, &p); - add_memory_region(start_at, mem_size, BOOT_MEM_RAM); + memblock_add(start_at, mem_size); } else if (*p == '#') { pr_err("\"memmap=nn#ss\" (force ACPI data) invalid on MIPS\n"); return -EINVAL; } else if (*p == '$') { start_at = memparse(p+1, &p); - add_memory_region(start_at, mem_size, BOOT_MEM_RESERVED); + memblock_add(start_at, mem_size); + memblock_reserve(start_at, mem_size); } else { pr_err("\"memmap\" invalid format!\n"); return -EINVAL; @@ -447,16 +405,15 @@ static int __init early_parse_memmap(char *p) early_param("memmap", early_parse_memmap); #ifdef CONFIG_PROC_VMCORE -unsigned long setup_elfcorehdr, setup_elfcorehdr_size; +static unsigned long setup_elfcorehdr, setup_elfcorehdr_size; static int __init early_parse_elfcorehdr(char *p) { - struct memblock_region *mem; + phys_addr_t start, end; + u64 i; setup_elfcorehdr = memparse(p, &p); - for_each_memblock(memory, mem) { - unsigned long start = mem->base; - unsigned long end = start + mem->size; + for_each_mem_range(i, &start, &end) { if (setup_elfcorehdr >= start && setup_elfcorehdr < end) { /* * Reserve from the elf core header to the end of @@ -477,6 +434,11 @@ early_param("elfcorehdr", early_parse_elfcorehdr); #endif #ifdef CONFIG_KEXEC + +/* 64M alignment for crash kernel regions */ +#define CRASH_ALIGN SZ_64M +#define CRASH_ADDR_MAX SZ_512M + static void __init mips_parse_crashkernel(void) { unsigned long long total_mem; @@ -489,9 +451,22 @@ static void __init mips_parse_crashkernel(void) if (ret != 0 || crash_size <= 0) return; - if (!memblock_find_in_range(crash_base, crash_base + crash_size, crash_size, 1)) { - pr_warn("Invalid memory region reserved for crash kernel\n"); - return; + if (crash_base <= 0) { + crash_base = memblock_find_in_range(CRASH_ALIGN, CRASH_ADDR_MAX, + crash_size, CRASH_ALIGN); + if (!crash_base) { + pr_warn("crashkernel reservation failed - No suitable area found.\n"); + return; + } + } else { + unsigned long long start; + + start = memblock_find_in_range(crash_base, crash_base + crash_size, + crash_size, 1); + if (start != crash_base) { + pr_warn("Invalid memory region reserved for crash kernel\n"); + return; + } } crashk_res.start = crash_base; @@ -626,7 +601,7 @@ static void __init bootcmdline_init(void) * arch_mem_init - initialize memory management subsystem * * o plat_mem_setup() detects the memory configuration and will record detected - * memory areas using add_memory_region. + * memory areas using memblock_add. * * At this stage the memory configuration of the system is known to the * kernel but generic memory management system is still entirely uninitialized. @@ -720,7 +695,8 @@ static void __init arch_mem_init(char **cmdline_p) static void __init resource_init(void) { - struct memblock_region *region; + phys_addr_t start, end; + u64 i; if (UNCAC_BASE != IO_BASE) return; @@ -732,9 +708,7 @@ static void __init resource_init(void) bss_resource.start = __pa_symbol(&__bss_start); bss_resource.end = __pa_symbol(&__bss_stop) - 1; - for_each_memblock(memory, region) { - phys_addr_t start = PFN_PHYS(memblock_region_memory_base_pfn(region)); - phys_addr_t end = PFN_PHYS(memblock_region_memory_end_pfn(region)) - 1; + for_each_mem_range(i, &start, &end) { struct resource *res; res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES); @@ -743,7 +717,12 @@ static void __init resource_init(void) sizeof(struct resource)); res->start = start; - res->end = end; + /* + * In memblock, end points to the first byte after the + * range while in resourses, end points to the last byte in + * the range. + */ + res->end = end - 1; res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; res->name = "System RAM"; diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index a0262729cd4c..f44265025281 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c @@ -545,6 +545,12 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) return err ?: protected_restore_fp_context(sc); } +#ifdef CONFIG_WAR_ICACHE_REFILLS +#define SIGMASK ~(cpu_icache_line_size()-1) +#else +#define SIGMASK ALMASK +#endif + void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, size_t frame_size) { @@ -565,7 +571,7 @@ void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, sp = sigsp(sp, ksig); - return (void __user *)((sp - frame_size) & (ICACHE_REFILLS_WORKAROUND_WAR ? ~(cpu_icache_line_size()-1) : ALMASK)); + return (void __user *)((sp - frame_size) & SIGMASK); } /* diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index c333e5788664..2afa3eef486a 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c @@ -106,7 +106,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new) if (unlikely(!access_ok((const void __user *)addr, 4))) return -EINVAL; - if (cpu_has_llsc && R10000_LLSC_WAR) { + if (cpu_has_llsc && IS_ENABLED(CONFIG_WAR_R10000_LLSC)) { __asm__ __volatile__ ( " .set push \n" " .set arch=r4000 \n" diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl index f9df9edb67a4..32817c954435 100644 --- a/arch/mips/kernel/syscalls/syscall_n32.tbl +++ b/arch/mips/kernel/syscalls/syscall_n32.tbl @@ -25,8 +25,8 @@ 15 n32 ioctl compat_sys_ioctl 16 n32 pread64 sys_pread64 17 n32 pwrite64 sys_pwrite64 -18 n32 readv compat_sys_readv -19 n32 writev compat_sys_writev +18 n32 readv sys_readv +19 n32 writev sys_writev 20 n32 access sys_access 21 n32 pipe sysm_pipe 22 n32 _newselect compat_sys_select @@ -167,7 +167,7 @@ 157 n32 sync sys_sync 158 n32 acct sys_acct 159 n32 settimeofday compat_sys_settimeofday -160 n32 mount compat_sys_mount +160 n32 mount sys_mount 161 n32 umount2 sys_umount 162 n32 swapon sys_swapon 163 n32 swapoff sys_swapoff @@ -278,7 +278,7 @@ 267 n32 splice sys_splice 268 n32 sync_file_range sys_sync_file_range 269 n32 tee sys_tee -270 n32 vmsplice compat_sys_vmsplice +270 n32 vmsplice sys_vmsplice 271 n32 move_pages compat_sys_move_pages 272 n32 set_robust_list compat_sys_set_robust_list 273 n32 get_robust_list compat_sys_get_robust_list @@ -317,8 +317,8 @@ 306 n32 syncfs sys_syncfs 307 n32 sendmmsg compat_sys_sendmmsg 308 n32 setns sys_setns -309 n32 process_vm_readv compat_sys_process_vm_readv -310 n32 process_vm_writev compat_sys_process_vm_writev +309 n32 process_vm_readv sys_process_vm_readv +310 n32 process_vm_writev sys_process_vm_writev 311 n32 kcmp sys_kcmp 312 n32 finit_module sys_finit_module 313 n32 sched_setattr sys_sched_setattr @@ -378,3 +378,4 @@ 437 n32 openat2 sys_openat2 438 n32 pidfd_getfd sys_pidfd_getfd 439 n32 faccessat2 sys_faccessat2 +440 n32 process_madvise sys_process_madvise diff --git a/arch/mips/kernel/syscalls/syscall_n64.tbl b/arch/mips/kernel/syscalls/syscall_n64.tbl index 557f9954a2b9..9e4ea3c31b1c 100644 --- a/arch/mips/kernel/syscalls/syscall_n64.tbl +++ b/arch/mips/kernel/syscalls/syscall_n64.tbl @@ -354,3 +354,4 @@ 437 n64 openat2 sys_openat2 438 n64 pidfd_getfd sys_pidfd_getfd 439 n64 faccessat2 sys_faccessat2 +440 n64 process_madvise sys_process_madvise diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl index 195b43cf27c8..29f5f28cf5ce 100644 --- a/arch/mips/kernel/syscalls/syscall_o32.tbl +++ b/arch/mips/kernel/syscalls/syscall_o32.tbl @@ -29,7 +29,7 @@ 18 o32 unused18 sys_ni_syscall 19 o32 lseek sys_lseek 20 o32 getpid sys_getpid -21 o32 mount sys_mount compat_sys_mount +21 o32 mount sys_mount 22 o32 umount sys_oldumount 23 o32 setuid sys_setuid 24 o32 getuid sys_getuid @@ -156,8 +156,8 @@ 142 o32 _newselect sys_select compat_sys_select 143 o32 flock sys_flock 144 o32 msync sys_msync -145 o32 readv sys_readv compat_sys_readv -146 o32 writev sys_writev compat_sys_writev +145 o32 readv sys_readv +146 o32 writev sys_writev 147 o32 cacheflush sys_cacheflush 148 o32 cachectl sys_cachectl 149 o32 sysmips __sys_sysmips @@ -318,7 +318,7 @@ 304 o32 splice sys_splice 305 o32 sync_file_range sys_sync_file_range sys32_sync_file_range 306 o32 tee sys_tee -307 o32 vmsplice sys_vmsplice compat_sys_vmsplice +307 o32 vmsplice sys_vmsplice 308 o32 move_pages sys_move_pages compat_sys_move_pages 309 o32 set_robust_list sys_set_robust_list compat_sys_set_robust_list 310 o32 get_robust_list sys_get_robust_list compat_sys_get_robust_list @@ -356,8 +356,8 @@ 342 o32 syncfs sys_syncfs 343 o32 sendmmsg sys_sendmmsg compat_sys_sendmmsg 344 o32 setns sys_setns -345 o32 process_vm_readv sys_process_vm_readv compat_sys_process_vm_readv -346 o32 process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev +345 o32 process_vm_readv sys_process_vm_readv +346 o32 process_vm_writev sys_process_vm_writev 347 o32 kcmp sys_kcmp 348 o32 finit_module sys_finit_module 349 o32 sched_setattr sys_sched_setattr @@ -427,3 +427,4 @@ 437 o32 openat2 sys_openat2 438 o32 pidfd_getfd sys_pidfd_getfd 439 o32 faccessat2 sys_faccessat2 +440 o32 process_madvise sys_process_madvise diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index cf788591f091..e0352958e2f7 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -2204,7 +2204,7 @@ static void configure_status(void) * flag that some firmware may have left set and the TS bit (for * IP27). Set XX for ISA IV code to work. */ - unsigned int status_set = ST0_CU0; + unsigned int status_set = ST0_KERNEL_CUMASK; #ifdef CONFIG_64BIT status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX; #endif diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index f185a85a27c1..5e97e9d02f98 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S @@ -202,6 +202,7 @@ SECTIONS STABS_DEBUG DWARF_DEBUG + ELF_DETAILS /* These must appear regardless of . */ .gptab.sdata : { diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c index b10342018d19..917fac1636b7 100644 --- a/arch/mips/lantiq/xway/sysctrl.c +++ b/arch/mips/lantiq/xway/sysctrl.c @@ -112,11 +112,15 @@ static u32 pmu_clk_cr_b[] = { #define PMU_PPE_DP BIT(23) #define PMU_PPE_DPLUS BIT(24) #define PMU_USB1_P BIT(26) +#define PMU_GPHY3 BIT(26) /* grx390 */ #define PMU_USB1 BIT(27) #define PMU_SWITCH BIT(28) #define PMU_PPE_TOP BIT(29) +#define PMU_GPHY0 BIT(29) /* ar10, xrx390 */ #define PMU_GPHY BIT(30) +#define PMU_GPHY1 BIT(30) /* ar10, xrx390 */ #define PMU_PCIE_CLK BIT(31) +#define PMU_GPHY2 BIT(31) /* ar10, xrx390 */ #define PMU1_PCIE_PHY BIT(0) /* vr9-specific,moved in ar10/grx390 */ #define PMU1_PCIE_CTL BIT(1) @@ -465,6 +469,9 @@ void __init ltq_soc_init(void) if (of_machine_is_compatible("lantiq,grx390") || of_machine_is_compatible("lantiq,ar10")) { + clkdev_add_pmu("1e108000.switch", "gphy0", 0, 0, PMU_GPHY0); + clkdev_add_pmu("1e108000.switch", "gphy1", 0, 0, PMU_GPHY1); + clkdev_add_pmu("1e108000.switch", "gphy2", 0, 0, PMU_GPHY2); clkdev_add_pmu("1f203018.usb2-phy", "phy", 1, 2, PMU_ANALOG_USB0_P); clkdev_add_pmu("1f203034.usb2-phy", "phy", 1, 2, PMU_ANALOG_USB1_P); /* rc 0 */ @@ -496,6 +503,7 @@ void __init ltq_soc_init(void) } else if (of_machine_is_compatible("lantiq,grx390")) { clkdev_add_static(ltq_grx390_cpu_hz(), ltq_grx390_fpi_hz(), ltq_grx390_fpi_hz(), ltq_grx390_pp32_hz()); + clkdev_add_pmu("1e108000.switch", "gphy3", 0, 0, PMU_GPHY3); clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0); clkdev_add_pmu("1e106000.usb", "otg", 1, 0, PMU_USB1); /* rc 2 */ @@ -514,8 +522,6 @@ void __init ltq_soc_init(void) clkdev_add_pmu("1e10b308.eth", NULL, 0, 0, PMU_SWITCH | PMU_PPE_DP | PMU_PPE_TC); clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF); - clkdev_add_pmu("1e108000.switch", "gphy0", 0, 0, PMU_GPHY); - clkdev_add_pmu("1e108000.switch", "gphy1", 0, 0, PMU_GPHY); clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU); clkdev_add_pmu("1e116000.mei", "afe", 1, 2, PMU_ANALOG_DSL_AFE); clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE); diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S index 87fda0713b84..a46db0807195 100644 --- a/arch/mips/lib/csum_partial.S +++ b/arch/mips/lib/csum_partial.S @@ -308,8 +308,8 @@ EXPORT_SYMBOL(csum_partial) /* * checksum and copy routines based on memcpy.S * - * csum_partial_copy_nocheck(src, dst, len, sum) - * __csum_partial_copy_kernel(src, dst, len, sum, errp) + * csum_partial_copy_nocheck(src, dst, len) + * __csum_partial_copy_kernel(src, dst, len) * * See "Spec" in memcpy.S for details. Unlike __copy_user, all * function in this file use the standard calling convention. @@ -318,26 +318,11 @@ EXPORT_SYMBOL(csum_partial) #define src a0 #define dst a1 #define len a2 -#define psum a3 #define sum v0 #define odd t8 -#define errptr t9 /* - * The exception handler for loads requires that: - * 1- AT contain the address of the byte just past the end of the source - * of the copy, - * 2- src_entry <= src < AT, and - * 3- (dst - src) == (dst_entry - src_entry), - * The _entry suffix denotes values when __copy_user was called. - * - * (1) is set up up by __csum_partial_copy_from_user and maintained by - * not writing AT in __csum_partial_copy - * (2) is met by incrementing src by the number of bytes copied - * (3) is met by not doing loads between a pair of increments of dst and src - * - * The exception handlers for stores stores -EFAULT to errptr and return. - * These handlers do not need to overwrite any data. + * All exception handlers simply return 0. */ /* Instruction type */ @@ -358,11 +343,11 @@ EXPORT_SYMBOL(csum_partial) * addr : Address * handler : Exception handler */ -#define EXC(insn, type, reg, addr, handler) \ +#define EXC(insn, type, reg, addr) \ .if \mode == LEGACY_MODE; \ 9: insn reg, addr; \ .section __ex_table,"a"; \ - PTR 9b, handler; \ + PTR 9b, .L_exc; \ .previous; \ /* This is enabled in EVA mode */ \ .else; \ @@ -371,7 +356,7 @@ EXPORT_SYMBOL(csum_partial) ((\to == USEROP) && (type == ST_INSN)); \ 9: __BUILD_EVA_INSN(insn##e, reg, addr); \ .section __ex_table,"a"; \ - PTR 9b, handler; \ + PTR 9b, .L_exc; \ .previous; \ .else; \ /* EVA without exception */ \ @@ -384,14 +369,14 @@ EXPORT_SYMBOL(csum_partial) #ifdef USE_DOUBLE #define LOADK ld /* No exception */ -#define LOAD(reg, addr, handler) EXC(ld, LD_INSN, reg, addr, handler) -#define LOADBU(reg, addr, handler) EXC(lbu, LD_INSN, reg, addr, handler) -#define LOADL(reg, addr, handler) EXC(ldl, LD_INSN, reg, addr, handler) -#define LOADR(reg, addr, handler) EXC(ldr, LD_INSN, reg, addr, handler) -#define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler) -#define STOREL(reg, addr, handler) EXC(sdl, ST_INSN, reg, addr, handler) -#define STORER(reg, addr, handler) EXC(sdr, ST_INSN, reg, addr, handler) -#define STORE(reg, addr, handler) EXC(sd, ST_INSN, reg, addr, handler) +#define LOAD(reg, addr) EXC(ld, LD_INSN, reg, addr) +#define LOADBU(reg, addr) EXC(lbu, LD_INSN, reg, addr) +#define LOADL(reg, addr) EXC(ldl, LD_INSN, reg, addr) +#define LOADR(reg, addr) EXC(ldr, LD_INSN, reg, addr) +#define STOREB(reg, addr) EXC(sb, ST_INSN, reg, addr) +#define STOREL(reg, addr) EXC(sdl, ST_INSN, reg, addr) +#define STORER(reg, addr) EXC(sdr, ST_INSN, reg, addr) +#define STORE(reg, addr) EXC(sd, ST_INSN, reg, addr) #define ADD daddu #define SUB dsubu #define SRL dsrl @@ -404,14 +389,14 @@ EXPORT_SYMBOL(csum_partial) #else #define LOADK lw /* No exception */ -#define LOAD(reg, addr, handler) EXC(lw, LD_INSN, reg, addr, handler) -#define LOADBU(reg, addr, handler) EXC(lbu, LD_INSN, reg, addr, handler) -#define LOADL(reg, addr, handler) EXC(lwl, LD_INSN, reg, addr, handler) -#define LOADR(reg, addr, handler) EXC(lwr, LD_INSN, reg, addr, handler) -#define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler) -#define STOREL(reg, addr, handler) EXC(swl, ST_INSN, reg, addr, handler) -#define STORER(reg, addr, handler) EXC(swr, ST_INSN, reg, addr, handler) -#define STORE(reg, addr, handler) EXC(sw, ST_INSN, reg, addr, handler) +#define LOAD(reg, addr) EXC(lw, LD_INSN, reg, addr) +#define LOADBU(reg, addr) EXC(lbu, LD_INSN, reg, addr) +#define LOADL(reg, addr) EXC(lwl, LD_INSN, reg, addr) +#define LOADR(reg, addr) EXC(lwr, LD_INSN, reg, addr) +#define STOREB(reg, addr) EXC(sb, ST_INSN, reg, addr) +#define STOREL(reg, addr) EXC(swl, ST_INSN, reg, addr) +#define STORER(reg, addr) EXC(swr, ST_INSN, reg, addr) +#define STORE(reg, addr) EXC(sw, ST_INSN, reg, addr) #define ADD addu #define SUB subu #define SRL srl @@ -450,22 +435,9 @@ EXPORT_SYMBOL(csum_partial) .set at=v1 #endif - .macro __BUILD_CSUM_PARTIAL_COPY_USER mode, from, to, __nocheck + .macro __BUILD_CSUM_PARTIAL_COPY_USER mode, from, to - PTR_ADDU AT, src, len /* See (1) above. */ - /* initialize __nocheck if this the first time we execute this - * macro - */ -#ifdef CONFIG_64BIT - move errptr, a4 -#else - lw errptr, 16(sp) -#endif - .if \__nocheck == 1 - FEXPORT(csum_partial_copy_nocheck) - EXPORT_SYMBOL(csum_partial_copy_nocheck) - .endif - move sum, zero + li sum, -1 move odd, zero /* * Note: dst & src may be unaligned, len may be 0 @@ -497,31 +469,31 @@ EXPORT_SYMBOL(csum_partial) SUB len, 8*NBYTES # subtract here for bgez loop .align 4 1: - LOAD(t0, UNIT(0)(src), .Ll_exc\@) - LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@) - LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@) - LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@) - LOAD(t4, UNIT(4)(src), .Ll_exc_copy\@) - LOAD(t5, UNIT(5)(src), .Ll_exc_copy\@) - LOAD(t6, UNIT(6)(src), .Ll_exc_copy\@) - LOAD(t7, UNIT(7)(src), .Ll_exc_copy\@) + LOAD(t0, UNIT(0)(src)) + LOAD(t1, UNIT(1)(src)) + LOAD(t2, UNIT(2)(src)) + LOAD(t3, UNIT(3)(src)) + LOAD(t4, UNIT(4)(src)) + LOAD(t5, UNIT(5)(src)) + LOAD(t6, UNIT(6)(src)) + LOAD(t7, UNIT(7)(src)) SUB len, len, 8*NBYTES ADD src, src, 8*NBYTES - STORE(t0, UNIT(0)(dst), .Ls_exc\@) + STORE(t0, UNIT(0)(dst)) ADDC(t0, t1) - STORE(t1, UNIT(1)(dst), .Ls_exc\@) + STORE(t1, UNIT(1)(dst)) ADDC(sum, t0) - STORE(t2, UNIT(2)(dst), .Ls_exc\@) + STORE(t2, UNIT(2)(dst)) ADDC(t2, t3) - STORE(t3, UNIT(3)(dst), .Ls_exc\@) + STORE(t3, UNIT(3)(dst)) ADDC(sum, t2) - STORE(t4, UNIT(4)(dst), .Ls_exc\@) + STORE(t4, UNIT(4)(dst)) ADDC(t4, t5) - STORE(t5, UNIT(5)(dst), .Ls_exc\@) + STORE(t5, UNIT(5)(dst)) ADDC(sum, t4) - STORE(t6, UNIT(6)(dst), .Ls_exc\@) + STORE(t6, UNIT(6)(dst)) ADDC(t6, t7) - STORE(t7, UNIT(7)(dst), .Ls_exc\@) + STORE(t7, UNIT(7)(dst)) ADDC(sum, t6) .set reorder /* DADDI_WAR */ ADD dst, dst, 8*NBYTES @@ -541,19 +513,19 @@ EXPORT_SYMBOL(csum_partial) /* * len >= 4*NBYTES */ - LOAD(t0, UNIT(0)(src), .Ll_exc\@) - LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@) - LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@) - LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@) + LOAD(t0, UNIT(0)(src)) + LOAD(t1, UNIT(1)(src)) + LOAD(t2, UNIT(2)(src)) + LOAD(t3, UNIT(3)(src)) SUB len, len, 4*NBYTES ADD src, src, 4*NBYTES - STORE(t0, UNIT(0)(dst), .Ls_exc\@) + STORE(t0, UNIT(0)(dst)) ADDC(t0, t1) - STORE(t1, UNIT(1)(dst), .Ls_exc\@) + STORE(t1, UNIT(1)(dst)) ADDC(sum, t0) - STORE(t2, UNIT(2)(dst), .Ls_exc\@) + STORE(t2, UNIT(2)(dst)) ADDC(t2, t3) - STORE(t3, UNIT(3)(dst), .Ls_exc\@) + STORE(t3, UNIT(3)(dst)) ADDC(sum, t2) .set reorder /* DADDI_WAR */ ADD dst, dst, 4*NBYTES @@ -566,10 +538,10 @@ EXPORT_SYMBOL(csum_partial) beq rem, len, .Lcopy_bytes\@ nop 1: - LOAD(t0, 0(src), .Ll_exc\@) + LOAD(t0, 0(src)) ADD src, src, NBYTES SUB len, len, NBYTES - STORE(t0, 0(dst), .Ls_exc\@) + STORE(t0, 0(dst)) ADDC(sum, t0) .set reorder /* DADDI_WAR */ ADD dst, dst, NBYTES @@ -592,10 +564,10 @@ EXPORT_SYMBOL(csum_partial) ADD t1, dst, len # t1 is just past last byte of dst li bits, 8*NBYTES SLL rem, len, 3 # rem = number of bits to keep - LOAD(t0, 0(src), .Ll_exc\@) + LOAD(t0, 0(src)) SUB bits, bits, rem # bits = number of bits to discard SHIFT_DISCARD t0, t0, bits - STREST(t0, -1(t1), .Ls_exc\@) + STREST(t0, -1(t1)) SHIFT_DISCARD_REVERT t0, t0, bits .set reorder ADDC(sum, t0) @@ -612,12 +584,12 @@ EXPORT_SYMBOL(csum_partial) * Set match = (src and dst have same alignment) */ #define match rem - LDFIRST(t3, FIRST(0)(src), .Ll_exc\@) + LDFIRST(t3, FIRST(0)(src)) ADD t2, zero, NBYTES - LDREST(t3, REST(0)(src), .Ll_exc_copy\@) + LDREST(t3, REST(0)(src)) SUB t2, t2, t1 # t2 = number of bytes copied xor match, t0, t1 - STFIRST(t3, FIRST(0)(dst), .Ls_exc\@) + STFIRST(t3, FIRST(0)(dst)) SLL t4, t1, 3 # t4 = number of bits to discard SHIFT_DISCARD t3, t3, t4 /* no SHIFT_DISCARD_REVERT to handle odd buffer properly */ @@ -639,26 +611,26 @@ EXPORT_SYMBOL(csum_partial) * It's OK to load FIRST(N+1) before REST(N) because the two addresses * are to the same unit (unless src is aligned, but it's not). */ - LDFIRST(t0, FIRST(0)(src), .Ll_exc\@) - LDFIRST(t1, FIRST(1)(src), .Ll_exc_copy\@) + LDFIRST(t0, FIRST(0)(src)) + LDFIRST(t1, FIRST(1)(src)) SUB len, len, 4*NBYTES - LDREST(t0, REST(0)(src), .Ll_exc_copy\@) - LDREST(t1, REST(1)(src), .Ll_exc_copy\@) - LDFIRST(t2, FIRST(2)(src), .Ll_exc_copy\@) - LDFIRST(t3, FIRST(3)(src), .Ll_exc_copy\@) - LDREST(t2, REST(2)(src), .Ll_exc_copy\@) - LDREST(t3, REST(3)(src), .Ll_exc_copy\@) + LDREST(t0, REST(0)(src)) + LDREST(t1, REST(1)(src)) + LDFIRST(t2, FIRST(2)(src)) + LDFIRST(t3, FIRST(3)(src)) + LDREST(t2, REST(2)(src)) + LDREST(t3, REST(3)(src)) ADD src, src, 4*NBYTES #ifdef CONFIG_CPU_SB1 nop # improves slotting #endif - STORE(t0, UNIT(0)(dst), .Ls_exc\@) + STORE(t0, UNIT(0)(dst)) ADDC(t0, t1) - STORE(t1, UNIT(1)(dst), .Ls_exc\@) + STORE(t1, UNIT(1)(dst)) ADDC(sum, t0) - STORE(t2, UNIT(2)(dst), .Ls_exc\@) + STORE(t2, UNIT(2)(dst)) ADDC(t2, t3) - STORE(t3, UNIT(3)(dst), .Ls_exc\@) + STORE(t3, UNIT(3)(dst)) ADDC(sum, t2) .set reorder /* DADDI_WAR */ ADD dst, dst, 4*NBYTES @@ -671,11 +643,11 @@ EXPORT_SYMBOL(csum_partial) beq rem, len, .Lcopy_bytes\@ nop 1: - LDFIRST(t0, FIRST(0)(src), .Ll_exc\@) - LDREST(t0, REST(0)(src), .Ll_exc_copy\@) + LDFIRST(t0, FIRST(0)(src)) + LDREST(t0, REST(0)(src)) ADD src, src, NBYTES SUB len, len, NBYTES - STORE(t0, 0(dst), .Ls_exc\@) + STORE(t0, 0(dst)) ADDC(sum, t0) .set reorder /* DADDI_WAR */ ADD dst, dst, NBYTES @@ -696,11 +668,10 @@ EXPORT_SYMBOL(csum_partial) #endif move t2, zero # partial word li t3, SHIFT_START # shift -/* use .Ll_exc_copy here to return correct sum on fault */ #define COPY_BYTE(N) \ - LOADBU(t0, N(src), .Ll_exc_copy\@); \ + LOADBU(t0, N(src)); \ SUB len, len, 1; \ - STOREB(t0, N(dst), .Ls_exc\@); \ + STOREB(t0, N(dst)); \ SLLV t0, t0, t3; \ addu t3, SHIFT_INC; \ beqz len, .Lcopy_bytes_done\@; \ @@ -714,9 +685,9 @@ EXPORT_SYMBOL(csum_partial) COPY_BYTE(4) COPY_BYTE(5) #endif - LOADBU(t0, NBYTES-2(src), .Ll_exc_copy\@) + LOADBU(t0, NBYTES-2(src)) SUB len, len, 1 - STOREB(t0, NBYTES-2(dst), .Ls_exc\@) + STOREB(t0, NBYTES-2(dst)) SLLV t0, t0, t3 or t2, t0 .Lcopy_bytes_done\@: @@ -753,97 +724,31 @@ EXPORT_SYMBOL(csum_partial) #endif .set pop .set reorder - ADDC32(sum, psum) jr ra .set noreorder + .endm -.Ll_exc_copy\@: - /* - * Copy bytes from src until faulting load address (or until a - * lb faults) - * - * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28) - * may be more than a byte beyond the last address. - * Hence, the lb below may get an exception. - * - * Assumes src < THREAD_BUADDR($28) - */ - LOADK t0, TI_TASK($28) - li t2, SHIFT_START - LOADK t0, THREAD_BUADDR(t0) -1: - LOADBU(t1, 0(src), .Ll_exc\@) - ADD src, src, 1 - sb t1, 0(dst) # can't fault -- we're copy_from_user - SLLV t1, t1, t2 - addu t2, SHIFT_INC - ADDC(sum, t1) - .set reorder /* DADDI_WAR */ - ADD dst, dst, 1 - bne src, t0, 1b - .set noreorder -.Ll_exc\@: - LOADK t0, TI_TASK($28) - nop - LOADK t0, THREAD_BUADDR(t0) # t0 is just past last good address - nop - SUB len, AT, t0 # len number of uncopied bytes - /* - * Here's where we rely on src and dst being incremented in tandem, - * See (3) above. - * dst += (fault addr - src) to put dst at first byte to clear - */ - ADD dst, t0 # compute start address in a1 - SUB dst, src - /* - * Clear len bytes starting at dst. Can't call __bzero because it - * might modify len. An inefficient loop for these rare times... - */ - .set reorder /* DADDI_WAR */ - SUB src, len, 1 - beqz len, .Ldone\@ - .set noreorder -1: sb zero, 0(dst) - ADD dst, dst, 1 - .set push - .set noat -#ifndef CONFIG_CPU_DADDI_WORKAROUNDS - bnez src, 1b - SUB src, src, 1 -#else - li v1, 1 - bnez src, 1b - SUB src, src, v1 -#endif - li v1, -EFAULT - b .Ldone\@ - sw v1, (errptr) - -.Ls_exc\@: - li v0, -1 /* invalid checksum */ - li v1, -EFAULT + .set noreorder +.L_exc: jr ra - sw v1, (errptr) - .set pop - .endm + li v0, 0 -LEAF(__csum_partial_copy_kernel) -EXPORT_SYMBOL(__csum_partial_copy_kernel) +FEXPORT(__csum_partial_copy_nocheck) +EXPORT_SYMBOL(__csum_partial_copy_nocheck) #ifndef CONFIG_EVA FEXPORT(__csum_partial_copy_to_user) EXPORT_SYMBOL(__csum_partial_copy_to_user) FEXPORT(__csum_partial_copy_from_user) EXPORT_SYMBOL(__csum_partial_copy_from_user) #endif -__BUILD_CSUM_PARTIAL_COPY_USER LEGACY_MODE USEROP USEROP 1 -END(__csum_partial_copy_kernel) +__BUILD_CSUM_PARTIAL_COPY_USER LEGACY_MODE USEROP USEROP #ifdef CONFIG_EVA LEAF(__csum_partial_copy_to_user) -__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE KERNELOP USEROP 0 +__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE KERNELOP USEROP END(__csum_partial_copy_to_user) LEAF(__csum_partial_copy_from_user) -__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE USEROP KERNELOP 0 +__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE USEROP KERNELOP END(__csum_partial_copy_from_user) #endif diff --git a/arch/mips/loongson2ef/common/mem.c b/arch/mips/loongson2ef/common/mem.c index ae21f1c62baa..057d58bb470e 100644 --- a/arch/mips/loongson2ef/common/mem.c +++ b/arch/mips/loongson2ef/common/mem.c @@ -17,10 +17,7 @@ u32 memsize, highmemsize; void __init prom_init_memory(void) { - add_memory_region(0x0, (memsize << 20), BOOT_MEM_RAM); - - add_memory_region(memsize << 20, LOONGSON_PCI_MEM_START - (memsize << - 20), BOOT_MEM_RESERVED); + memblock_add(0x0, (memsize << 20)); #ifdef CONFIG_CPU_SUPPORTS_ADDRWINCFG { @@ -41,12 +38,7 @@ void __init prom_init_memory(void) #ifdef CONFIG_64BIT if (highmemsize > 0) - add_memory_region(LOONGSON_HIGHMEM_START, - highmemsize << 20, BOOT_MEM_RAM); - - add_memory_region(LOONGSON_PCI_MEM_END + 1, LOONGSON_HIGHMEM_START - - LOONGSON_PCI_MEM_END - 1, BOOT_MEM_RESERVED); - + memblock_add(LOONGSON_HIGHMEM_START, highmemsize << 20); #endif /* !CONFIG_64BIT */ } diff --git a/arch/mips/loongson2ef/fuloong-2e/dma.c b/arch/mips/loongson2ef/fuloong-2e/dma.c index e122292bf666..cea167d8aba8 100644 --- a/arch/mips/loongson2ef/fuloong-2e/dma.c +++ b/arch/mips/loongson2ef/fuloong-2e/dma.c @@ -1,12 +1,12 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/dma-direct.h> -dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr) +dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) { return paddr | 0x80000000; } -phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr) +phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dma_addr) { return dma_addr & 0x7fffffff; } diff --git a/arch/mips/loongson2ef/lemote-2f/dma.c b/arch/mips/loongson2ef/lemote-2f/dma.c index abf0e39d7e46..3c9e99456357 100644 --- a/arch/mips/loongson2ef/lemote-2f/dma.c +++ b/arch/mips/loongson2ef/lemote-2f/dma.c @@ -1,12 +1,12 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/dma-direct.h> -dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr) +dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) { return paddr | 0x80000000; } -phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr) +phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dma_addr) { if (dma_addr > 0x8fffffff) return dma_addr; diff --git a/arch/mips/loongson32/common/prom.c b/arch/mips/loongson32/common/prom.c index fd76114fa3b0..c133b5adf34e 100644 --- a/arch/mips/loongson32/common/prom.c +++ b/arch/mips/loongson32/common/prom.c @@ -7,8 +7,8 @@ #include <linux/io.h> #include <linux/init.h> +#include <linux/memblock.h> #include <linux/serial_reg.h> -#include <asm/bootinfo.h> #include <asm/fw/fw.h> #include <loongson1.h> @@ -42,5 +42,5 @@ void __init prom_free_prom_memory(void) void __init plat_mem_setup(void) { - add_memory_region(0x0, (memsize << 20), BOOT_MEM_RAM); + memblock_add(0x0, (memsize << 20)); } diff --git a/arch/mips/loongson64/dma.c b/arch/mips/loongson64/dma.c index dbfe6e82fddd..364f2f27c872 100644 --- a/arch/mips/loongson64/dma.c +++ b/arch/mips/loongson64/dma.c @@ -4,7 +4,7 @@ #include <linux/swiotlb.h> #include <boot_param.h> -dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr) +dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) { /* We extract 2bit node id (bit 44~47, only bit 44~45 used now) from * Loongson-3's 48bit address space and embed it into 40bit */ @@ -13,7 +13,7 @@ dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr) return ((nid << 44) ^ paddr) | (nid << node_id_offset); } -phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr) +phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) { /* We extract 2bit node id (bit 44~47, only bit 44~45 used now) from * Loongson-3's 48bit address space and embed it into 40bit */ diff --git a/arch/mips/loongson64/numa.c b/arch/mips/loongson64/numa.c index ea8bb1bc667e..cf9459f79f9b 100644 --- a/arch/mips/loongson64/numa.c +++ b/arch/mips/loongson64/numa.c @@ -98,27 +98,6 @@ static void __init init_topology_matrix(void) } } -static unsigned long nid_to_addroffset(unsigned int nid) -{ - unsigned long result; - switch (nid) { - case 0: - default: - result = NODE0_ADDRSPACE_OFFSET; - break; - case 1: - result = NODE1_ADDRSPACE_OFFSET; - break; - case 2: - result = NODE2_ADDRSPACE_OFFSET; - break; - case 3: - result = NODE3_ADDRSPACE_OFFSET; - break; - } - return result; -} - static void __init szmem(unsigned int node) { u32 i, mem_type; @@ -146,7 +125,7 @@ static void __init szmem(unsigned int node) pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n", start_pfn, end_pfn, num_physpages); memblock_add_node(PFN_PHYS(start_pfn), - PFN_PHYS(end_pfn - start_pfn), node); + PFN_PHYS(node_psize), node); break; case SYSTEM_RAM_HIGH: start_pfn = ((node_id << 44) + mem_start) >> PAGE_SHIFT; @@ -158,7 +137,7 @@ static void __init szmem(unsigned int node) pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n", start_pfn, end_pfn, num_physpages); memblock_add_node(PFN_PHYS(start_pfn), - PFN_PHYS(end_pfn - start_pfn), node); + PFN_PHYS(node_psize), node); break; case SYSTEM_RAM_RESERVED: pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx MB\n", @@ -175,7 +154,7 @@ static void __init node_mem_init(unsigned int node) unsigned long node_addrspace_offset; unsigned long start_pfn, end_pfn; - node_addrspace_offset = nid_to_addroffset(node); + node_addrspace_offset = nid_to_addrbase(node); pr_info("Node%d's addrspace_offset is 0x%lx\n", node, node_addrspace_offset); @@ -242,9 +221,7 @@ void __init paging_init(void) unsigned long zones_size[MAX_NR_ZONES] = {0, }; pagetable_init(); -#ifdef CONFIG_ZONE_DMA32 zones_size[ZONE_DMA32] = MAX_DMA32_PFN; -#endif zones_size[ZONE_NORMAL] = max_low_pfn; free_area_init(zones_size); } diff --git a/arch/mips/loongson64/reset.c b/arch/mips/loongson64/reset.c index bc7671079f0c..3bb8a1ed9348 100644 --- a/arch/mips/loongson64/reset.c +++ b/arch/mips/loongson64/reset.c @@ -15,11 +15,6 @@ #include <loongson.h> #include <boot_param.h> -static inline void loongson_reboot(void) -{ - ((void (*)(void))ioremap(LOONGSON_BOOT_BASE, 4)) (); -} - static void loongson_restart(char *command) { diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 0ef717093262..9cede7ce37e6 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -130,9 +130,10 @@ struct bcache_ops *bcops = &no_sc_ops; #define R4600_HIT_CACHEOP_WAR_IMPL \ do { \ - if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) \ + if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP) && \ + cpu_is_r4600_v2_x()) \ *(volatile unsigned long *)CKSEG1; \ - if (R4600_V1_HIT_CACHEOP_WAR) \ + if (IS_ENABLED(CONFIG_WAR_R4600_V1_HIT_CACHEOP)) \ __asm__ __volatile__("nop;nop;nop;nop"); \ } while (0) @@ -238,7 +239,7 @@ static void r4k_blast_dcache_setup(void) r4k_blast_dcache = blast_dcache128; } -/* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */ +/* force code alignment (used for CONFIG_WAR_TX49XX_ICACHE_INDEX_INV) */ #define JUMP_TO_ALIGN(order) \ __asm__ __volatile__( \ "b\t1f\n\t" \ @@ -366,10 +367,11 @@ static void r4k_blast_icache_page_indexed_setup(void) else if (ic_lsize == 16) r4k_blast_icache_page_indexed = blast_icache16_page_indexed; else if (ic_lsize == 32) { - if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x()) + if (IS_ENABLED(CONFIG_WAR_R4600_V1_INDEX_ICACHEOP) && + cpu_is_r4600_v1_x()) r4k_blast_icache_page_indexed = blast_icache32_r4600_v1_page_indexed; - else if (TX49XX_ICACHE_INDEX_INV_WAR) + else if (IS_ENABLED(CONFIG_WAR_TX49XX_ICACHE_INDEX_INV)) r4k_blast_icache_page_indexed = tx49_blast_icache32_page_indexed; else if (current_cpu_type() == CPU_LOONGSON2EF) @@ -394,9 +396,10 @@ static void r4k_blast_icache_setup(void) else if (ic_lsize == 16) r4k_blast_icache = blast_icache16; else if (ic_lsize == 32) { - if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x()) + if (IS_ENABLED(CONFIG_WAR_R4600_V1_INDEX_ICACHEOP) && + cpu_is_r4600_v1_x()) r4k_blast_icache = blast_r4600_v1_icache32; - else if (TX49XX_ICACHE_INDEX_INV_WAR) + else if (IS_ENABLED(CONFIG_WAR_TX49XX_ICACHE_INDEX_INV)) r4k_blast_icache = tx49_blast_icache32; else if (current_cpu_type() == CPU_LOONGSON2EF) r4k_blast_icache = loongson2_blast_icache32; diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c index 563c2c0d0c81..38d3d9143b47 100644 --- a/arch/mips/mm/dma-noncoherent.c +++ b/arch/mips/mm/dma-noncoherent.c @@ -5,8 +5,7 @@ * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. */ #include <linux/dma-direct.h> -#include <linux/dma-noncoherent.h> -#include <linux/dma-contiguous.h> +#include <linux/dma-map-ops.h> #include <linux/highmem.h> #include <asm/cache.h> @@ -55,22 +54,34 @@ void *arch_dma_set_uncached(void *addr, size_t size) return (void *)(__pa(addr) + UNCAC_BASE); } -static inline void dma_sync_virt(void *addr, size_t size, +static inline void dma_sync_virt_for_device(void *addr, size_t size, enum dma_data_direction dir) { switch (dir) { case DMA_TO_DEVICE: dma_cache_wback((unsigned long)addr, size); break; - case DMA_FROM_DEVICE: dma_cache_inv((unsigned long)addr, size); break; - case DMA_BIDIRECTIONAL: dma_cache_wback_inv((unsigned long)addr, size); break; + default: + BUG(); + } +} +static inline void dma_sync_virt_for_cpu(void *addr, size_t size, + enum dma_data_direction dir) +{ + switch (dir) { + case DMA_TO_DEVICE: + break; + case DMA_FROM_DEVICE: + case DMA_BIDIRECTIONAL: + dma_cache_inv((unsigned long)addr, size); + break; default: BUG(); } @@ -82,7 +93,7 @@ static inline void dma_sync_virt(void *addr, size_t size, * configured then the bulk of this loop gets optimized out. */ static inline void dma_sync_phys(phys_addr_t paddr, size_t size, - enum dma_data_direction dir) + enum dma_data_direction dir, bool for_device) { struct page *page = pfn_to_page(paddr >> PAGE_SHIFT); unsigned long offset = paddr & ~PAGE_MASK; @@ -90,18 +101,20 @@ static inline void dma_sync_phys(phys_addr_t paddr, size_t size, do { size_t len = left; + void *addr; if (PageHighMem(page)) { - void *addr; - if (offset + len > PAGE_SIZE) len = PAGE_SIZE - offset; + } + + addr = kmap_atomic(page); + if (for_device) + dma_sync_virt_for_device(addr + offset, len, dir); + else + dma_sync_virt_for_cpu(addr + offset, len, dir); + kunmap_atomic(addr); - addr = kmap_atomic(page); - dma_sync_virt(addr + offset, len, dir); - kunmap_atomic(addr); - } else - dma_sync_virt(page_address(page) + offset, size, dir); offset = 0; page++; left -= len; @@ -111,7 +124,7 @@ static inline void dma_sync_phys(phys_addr_t paddr, size_t size, void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, enum dma_data_direction dir) { - dma_sync_phys(paddr, size, dir); + dma_sync_phys(paddr, size, dir, true); } #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU @@ -119,18 +132,10 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, enum dma_data_direction dir) { if (cpu_needs_post_dma_flush()) - dma_sync_phys(paddr, size, dir); + dma_sync_phys(paddr, size, dir, false); } #endif -void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); - - dma_sync_virt(vaddr, size, direction); -} - #ifdef CONFIG_DMA_PERDEV_COHERENT void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, const struct iommu_ops *iommu, bool coherent) diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c index cd805b005509..504bc4047c4c 100644 --- a/arch/mips/mm/page.c +++ b/arch/mips/mm/page.c @@ -250,14 +250,16 @@ static inline void build_clear_pref(u32 **buf, int off) if (cpu_has_cache_cdex_s) { uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0); } else if (cpu_has_cache_cdex_p) { - if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) { + if (IS_ENABLED(CONFIG_WAR_R4600_V1_HIT_CACHEOP) && + cpu_is_r4600_v1_x()) { uasm_i_nop(buf); uasm_i_nop(buf); uasm_i_nop(buf); uasm_i_nop(buf); } - if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) + if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP) && + cpu_is_r4600_v2_x()) uasm_i_lw(buf, ZERO, ZERO, AT); uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0); @@ -302,7 +304,7 @@ void build_clear_page(void) else uasm_i_ori(&buf, A2, A0, off); - if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) + if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP) && cpu_is_r4600_v2_x()) uasm_i_lui(&buf, AT, uasm_rel_hi(0xa0000000)); off = cache_line_size ? min(8, pref_bias_clear_store / cache_line_size) @@ -402,14 +404,16 @@ static inline void build_copy_store_pref(u32 **buf, int off) if (cpu_has_cache_cdex_s) { uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0); } else if (cpu_has_cache_cdex_p) { - if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) { + if (IS_ENABLED(CONFIG_WAR_R4600_V1_HIT_CACHEOP) && + cpu_is_r4600_v1_x()) { uasm_i_nop(buf); uasm_i_nop(buf); uasm_i_nop(buf); uasm_i_nop(buf); } - if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) + if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP) && + cpu_is_r4600_v2_x()) uasm_i_lw(buf, ZERO, ZERO, AT); uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0); @@ -453,7 +457,7 @@ void build_copy_page(void) else uasm_i_ori(&buf, A2, A0, off); - if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) + if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP) && cpu_is_r4600_v2_x()) uasm_i_lui(&buf, AT, uasm_rel_hi(0xa0000000)); off = cache_line_size ? min(8, pref_bias_copy_load / cache_line_size) * diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c index 97dc0511e63f..dd0a5becaabd 100644 --- a/arch/mips/mm/sc-mips.c +++ b/arch/mips/mm/sc-mips.c @@ -228,6 +228,7 @@ static inline int __init mips_sc_probe(void) * contradicted by all documentation. */ case MACH_INGENIC_JZ4770: + case MACH_INGENIC_JZ4775: c->scache.ways = 4; break; @@ -236,6 +237,7 @@ static inline int __init mips_sc_probe(void) * but that is contradicted by all documentation. */ case MACH_INGENIC_X1000: + case MACH_INGENIC_X1000E: c->scache.sets = 256; c->scache.ways = 4; break; diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 14f8ba93367f..a7521b8f7658 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -83,14 +83,18 @@ static inline int r4k_250MHZhwbug(void) return 0; } +extern int sb1250_m3_workaround_needed(void); + static inline int __maybe_unused bcm1250_m3_war(void) { - return BCM1250_M3_WAR; + if (IS_ENABLED(CONFIG_SB1_PASS_2_WORKAROUNDS)) + return sb1250_m3_workaround_needed(); + return 0; } static inline int __maybe_unused r10000_llsc_war(void) { - return R10000_LLSC_WAR; + return IS_ENABLED(CONFIG_WAR_R10000_LLSC); } static int use_bbit_insns(void) diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c index c56f129c9a4b..81dd226d6b6b 100644 --- a/arch/mips/mm/uasm.c +++ b/arch/mips/mm/uasm.c @@ -394,7 +394,7 @@ I_u2u1u3(_lddir) void uasm_i_pref(u32 **buf, unsigned int a, signed int b, unsigned int c) { - if (CAVIUM_OCTEON_DCACHE_PREFETCH_WAR && a <= 24 && a != 5) + if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && a <= 24 && a != 5) /* * As per erratum Core-14449, replace prefetches 0-4, * 6-24 with 'pref 28'. diff --git a/arch/mips/mti-malta/malta-setup.c b/arch/mips/mti-malta/malta-setup.c index c4ad5a9b4bc1..e1fb8b534944 100644 --- a/arch/mips/mti-malta/malta-setup.c +++ b/arch/mips/mti-malta/malta-setup.c @@ -16,7 +16,6 @@ #include <asm/dma-coherence.h> #include <asm/fw/fw.h> -#include <asm/mach-malta/malta-dtshim.h> #include <asm/mips-cps.h> #include <asm/mips-boards/generic.h> #include <asm/mips-boards/malta.h> diff --git a/arch/mips/netlogic/xlp/setup.c b/arch/mips/netlogic/xlp/setup.c index 1a0fc5b62ba4..9adc0c1b4ffc 100644 --- a/arch/mips/netlogic/xlp/setup.c +++ b/arch/mips/netlogic/xlp/setup.c @@ -70,7 +70,7 @@ static void nlm_fixup_mem(void) const int pref_backup = 512; struct memblock_region *mem; - for_each_memblock(memory, mem) { + for_each_mem_region(mem) { memblock_remove(mem->base + mem->size - pref_backup, pref_backup); } @@ -89,7 +89,7 @@ static void __init xlp_init_mem_from_bars(void) if (map[i] > 0x10000000 && map[i] < 0x20000000) map[i] = 0x20000000; - add_memory_region(map[i], map[i+1] - map[i], BOOT_MEM_RAM); + memblock_add(map[i], map[i+1] - map[i]); } } diff --git a/arch/mips/netlogic/xlr/setup.c b/arch/mips/netlogic/xlr/setup.c index 72ceddc9a03f..627e88101316 100644 --- a/arch/mips/netlogic/xlr/setup.c +++ b/arch/mips/netlogic/xlr/setup.c @@ -34,6 +34,7 @@ #include <linux/kernel.h> #include <linux/serial_8250.h> +#include <linux/memblock.h> #include <linux/pm.h> #include <asm/idle.h> @@ -149,7 +150,7 @@ static void prom_add_memory(void) bootm = (void *)(long)nlm_prom_info.psb_mem_map; for (i = 0; i < bootm->nr_map; i++) { - if (bootm->map[i].type != BOOT_MEM_RAM) + if (bootm->map[i].type != NLM_BOOT_MEM_RAM) continue; start = bootm->map[i].addr; size = bootm->map[i].size; @@ -158,7 +159,7 @@ static void prom_add_memory(void) if (i == 0 && start == 0 && size == 0x0c000000) size = 0x0ff00000; - add_memory_region(start, size - pref_backup, BOOT_MEM_RAM); + memblock_add(start, size - pref_backup); } } diff --git a/arch/mips/pci/pci-ar2315.c b/arch/mips/pci/pci-ar2315.c index 490953f51528..0b15730cef88 100644 --- a/arch/mips/pci/pci-ar2315.c +++ b/arch/mips/pci/pci-ar2315.c @@ -170,12 +170,12 @@ static inline dma_addr_t ar2315_dev_offset(struct device *dev) return 0; } -dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr) +dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) { return paddr + ar2315_dev_offset(dev); } -phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr) +phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dma_addr) { return dma_addr - ar2315_dev_offset(dev); } @@ -423,9 +423,8 @@ static int ar2315_pci_probe(struct platform_device *pdev) return -EINVAL; apc->irq = irq; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "ar2315-pci-ctrl"); - apc->mmr_mem = devm_ioremap_resource(dev, res); + apc->mmr_mem = devm_platform_ioremap_resource_byname(pdev, + "ar2315-pci-ctrl"); if (IS_ERR(apc->mmr_mem)) return PTR_ERR(apc->mmr_mem); diff --git a/arch/mips/pci/pci-ar71xx.c b/arch/mips/pci/pci-ar71xx.c index a9f8e7c881bd..118760b3fa82 100644 --- a/arch/mips/pci/pci-ar71xx.c +++ b/arch/mips/pci/pci-ar71xx.c @@ -336,8 +336,8 @@ static int ar71xx_pci_probe(struct platform_device *pdev) if (!apc) return -ENOMEM; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg_base"); - apc->cfg_base = devm_ioremap_resource(&pdev->dev, res); + apc->cfg_base = devm_platform_ioremap_resource_byname(pdev, + "cfg_base"); if (IS_ERR(apc->cfg_base)) return PTR_ERR(apc->cfg_base); diff --git a/arch/mips/pci/pci-ar724x.c b/arch/mips/pci/pci-ar724x.c index 869d5c9a2f8d..807558b251ef 100644 --- a/arch/mips/pci/pci-ar724x.c +++ b/arch/mips/pci/pci-ar724x.c @@ -372,18 +372,15 @@ static int ar724x_pci_probe(struct platform_device *pdev) if (!apc) return -ENOMEM; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl_base"); - apc->ctrl_base = devm_ioremap_resource(&pdev->dev, res); + apc->ctrl_base = devm_platform_ioremap_resource_byname(pdev, "ctrl_base"); if (IS_ERR(apc->ctrl_base)) return PTR_ERR(apc->ctrl_base); - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg_base"); - apc->devcfg_base = devm_ioremap_resource(&pdev->dev, res); + apc->devcfg_base = devm_platform_ioremap_resource_byname(pdev, "cfg_base"); if (IS_ERR(apc->devcfg_base)) return PTR_ERR(apc->devcfg_base); - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "crp_base"); - apc->crp_base = devm_ioremap_resource(&pdev->dev, res); + apc->crp_base = devm_platform_ioremap_resource_byname(pdev, "crp_base"); if (IS_ERR(apc->crp_base)) return PTR_ERR(apc->crp_base); diff --git a/arch/mips/pci/pci-xtalk-bridge.c b/arch/mips/pci/pci-xtalk-bridge.c index 9b3cc775c55e..50f7d42cca5a 100644 --- a/arch/mips/pci/pci-xtalk-bridge.c +++ b/arch/mips/pci/pci-xtalk-bridge.c @@ -25,7 +25,7 @@ /* * Common phys<->dma mapping for platforms using pci xtalk bridge */ -dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr) +dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) { struct pci_dev *pdev = to_pci_dev(dev); struct bridge_controller *bc = BRIDGE_CONTROLLER(pdev->bus); @@ -33,7 +33,7 @@ dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr) return bc->baddr + paddr; } -phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr) +phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dma_addr) { return dma_addr & ~(0xffUL << 56); } diff --git a/arch/mips/pnx833x/Makefile b/arch/mips/pnx833x/Makefile deleted file mode 100644 index 927268a58237..000000000000 --- a/arch/mips/pnx833x/Makefile +++ /dev/null @@ -1,4 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -obj-$(CONFIG_SOC_PNX833X) += common/ -obj-$(CONFIG_NXP_STB220) += stb22x/ -obj-$(CONFIG_NXP_STB225) += stb22x/ diff --git a/arch/mips/pnx833x/Platform b/arch/mips/pnx833x/Platform deleted file mode 100644 index e5286a49fc3e..000000000000 --- a/arch/mips/pnx833x/Platform +++ /dev/null @@ -1,4 +0,0 @@ -# NXP STB225 -cflags-$(CONFIG_SOC_PNX833X) += -I$(srctree)/arch/mips/include/asm/mach-pnx833x -load-$(CONFIG_NXP_STB220) += 0xffffffff80001000 -load-$(CONFIG_NXP_STB225) += 0xffffffff80001000 diff --git a/arch/mips/pnx833x/common/Makefile b/arch/mips/pnx833x/common/Makefile deleted file mode 100644 index 9b4d394112b0..000000000000 --- a/arch/mips/pnx833x/common/Makefile +++ /dev/null @@ -1,2 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -obj-y := interrupts.o platform.o prom.o setup.o reset.o diff --git a/arch/mips/pnx833x/common/interrupts.c b/arch/mips/pnx833x/common/interrupts.c deleted file mode 100644 index 2fbbabcac386..000000000000 --- a/arch/mips/pnx833x/common/interrupts.c +++ /dev/null @@ -1,303 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * interrupts.c: Interrupt mappings for PNX833X. - * - * Copyright 2008 NXP Semiconductors - * Chris Steel <chris.steel@nxp.com> - * Daniel Laird <daniel.j.laird@nxp.com> - */ -#include <linux/kernel.h> -#include <linux/irq.h> -#include <linux/hardirq.h> -#include <linux/interrupt.h> -#include <asm/mipsregs.h> -#include <asm/irq_cpu.h> -#include <asm/setup.h> -#include <irq.h> -#include <irq-mapping.h> -#include <gpio.h> - -static int mips_cpu_timer_irq; - -static const unsigned int irq_prio[PNX833X_PIC_NUM_IRQ] = -{ - 0, /* unused */ - 4, /* PNX833X_PIC_I2C0_INT 1 */ - 4, /* PNX833X_PIC_I2C1_INT 2 */ - 1, /* PNX833X_PIC_UART0_INT 3 */ - 1, /* PNX833X_PIC_UART1_INT 4 */ - 6, /* PNX833X_PIC_TS_IN0_DV_INT 5 */ - 6, /* PNX833X_PIC_TS_IN0_DMA_INT 6 */ - 7, /* PNX833X_PIC_GPIO_INT 7 */ - 4, /* PNX833X_PIC_AUDIO_DEC_INT 8 */ - 5, /* PNX833X_PIC_VIDEO_DEC_INT 9 */ - 4, /* PNX833X_PIC_CONFIG_INT 10 */ - 4, /* PNX833X_PIC_AOI_INT 11 */ - 9, /* PNX833X_PIC_SYNC_INT 12 */ - 9, /* PNX8335_PIC_SATA_INT 13 */ - 4, /* PNX833X_PIC_OSD_INT 14 */ - 9, /* PNX833X_PIC_DISP1_INT 15 */ - 4, /* PNX833X_PIC_DEINTERLACER_INT 16 */ - 9, /* PNX833X_PIC_DISPLAY2_INT 17 */ - 4, /* PNX833X_PIC_VC_INT 18 */ - 4, /* PNX833X_PIC_SC_INT 19 */ - 9, /* PNX833X_PIC_IDE_INT 20 */ - 9, /* PNX833X_PIC_IDE_DMA_INT 21 */ - 6, /* PNX833X_PIC_TS_IN1_DV_INT 22 */ - 6, /* PNX833X_PIC_TS_IN1_DMA_INT 23 */ - 4, /* PNX833X_PIC_SGDX_DMA_INT 24 */ - 4, /* PNX833X_PIC_TS_OUT_INT 25 */ - 4, /* PNX833X_PIC_IR_INT 26 */ - 3, /* PNX833X_PIC_VMSP1_INT 27 */ - 3, /* PNX833X_PIC_VMSP2_INT 28 */ - 4, /* PNX833X_PIC_PIBC_INT 29 */ - 4, /* PNX833X_PIC_TS_IN0_TRD_INT 30 */ - 4, /* PNX833X_PIC_SGDX_TPD_INT 31 */ - 5, /* PNX833X_PIC_USB_INT 32 */ - 4, /* PNX833X_PIC_TS_IN1_TRD_INT 33 */ - 4, /* PNX833X_PIC_CLOCK_INT 34 */ - 4, /* PNX833X_PIC_SGDX_PARSER_INT 35 */ - 4, /* PNX833X_PIC_VMSP_DMA_INT 36 */ -#if defined(CONFIG_SOC_PNX8335) - 4, /* PNX8335_PIC_MIU_INT 37 */ - 4, /* PNX8335_PIC_AVCHIP_IRQ_INT 38 */ - 9, /* PNX8335_PIC_SYNC_HD_INT 39 */ - 9, /* PNX8335_PIC_DISP_HD_INT 40 */ - 9, /* PNX8335_PIC_DISP_SCALER_INT 41 */ - 4, /* PNX8335_PIC_OSD_HD1_INT 42 */ - 4, /* PNX8335_PIC_DTL_WRITER_Y_INT 43 */ - 4, /* PNX8335_PIC_DTL_WRITER_C_INT 44 */ - 4, /* PNX8335_PIC_DTL_EMULATOR_Y_IR_INT 45 */ - 4, /* PNX8335_PIC_DTL_EMULATOR_C_IR_INT 46 */ - 4, /* PNX8335_PIC_DENC_TTX_INT 47 */ - 4, /* PNX8335_PIC_MMI_SIF0_INT 48 */ - 4, /* PNX8335_PIC_MMI_SIF1_INT 49 */ - 4, /* PNX8335_PIC_MMI_CDMMU_INT 50 */ - 4, /* PNX8335_PIC_PIBCS_INT 51 */ - 12, /* PNX8335_PIC_ETHERNET_INT 52 */ - 3, /* PNX8335_PIC_VMSP1_0_INT 53 */ - 3, /* PNX8335_PIC_VMSP1_1_INT 54 */ - 4, /* PNX8335_PIC_VMSP1_DMA_INT 55 */ - 4, /* PNX8335_PIC_TDGR_DE_INT 56 */ - 4, /* PNX8335_PIC_IR1_IRQ_INT 57 */ -#endif -}; - -static void pnx833x_timer_dispatch(void) -{ - do_IRQ(mips_cpu_timer_irq); -} - -static void pic_dispatch(void) -{ - unsigned int irq = PNX833X_REGFIELD(PIC_INT_SRC, INT_SRC); - - if ((irq >= 1) && (irq < (PNX833X_PIC_NUM_IRQ))) { - unsigned long priority = PNX833X_PIC_INT_PRIORITY; - PNX833X_PIC_INT_PRIORITY = irq_prio[irq]; - - if (irq == PNX833X_PIC_GPIO_INT) { - unsigned long mask = PNX833X_PIO_INT_STATUS & PNX833X_PIO_INT_ENABLE; - int pin; - while ((pin = ffs(mask & 0xffff))) { - pin -= 1; - do_IRQ(PNX833X_GPIO_IRQ_BASE + pin); - mask &= ~(1 << pin); - } - } else { - do_IRQ(irq + PNX833X_PIC_IRQ_BASE); - } - - PNX833X_PIC_INT_PRIORITY = priority; - } else { - printk(KERN_ERR "plat_irq_dispatch: unexpected irq %u\n", irq); - } -} - -asmlinkage void plat_irq_dispatch(void) -{ - unsigned int pending = read_c0_status() & read_c0_cause(); - - if (pending & STATUSF_IP4) - pic_dispatch(); - else if (pending & STATUSF_IP7) - do_IRQ(PNX833X_TIMER_IRQ); - else - spurious_interrupt(); -} - -static inline void pnx833x_hard_enable_pic_irq(unsigned int irq) -{ - /* Currently we do this by setting IRQ priority to 1. - If priority support is being implemented, 1 should be repalced - by a better value. */ - PNX833X_PIC_INT_REG(irq) = irq_prio[irq]; -} - -static inline void pnx833x_hard_disable_pic_irq(unsigned int irq) -{ - /* Disable IRQ by writing setting it's priority to 0 */ - PNX833X_PIC_INT_REG(irq) = 0; -} - -static DEFINE_RAW_SPINLOCK(pnx833x_irq_lock); - -static unsigned int pnx833x_startup_pic_irq(unsigned int irq) -{ - unsigned long flags; - unsigned int pic_irq = irq - PNX833X_PIC_IRQ_BASE; - - raw_spin_lock_irqsave(&pnx833x_irq_lock, flags); - pnx833x_hard_enable_pic_irq(pic_irq); - raw_spin_unlock_irqrestore(&pnx833x_irq_lock, flags); - return 0; -} - -static void pnx833x_enable_pic_irq(struct irq_data *d) -{ - unsigned long flags; - unsigned int pic_irq = d->irq - PNX833X_PIC_IRQ_BASE; - - raw_spin_lock_irqsave(&pnx833x_irq_lock, flags); - pnx833x_hard_enable_pic_irq(pic_irq); - raw_spin_unlock_irqrestore(&pnx833x_irq_lock, flags); -} - -static void pnx833x_disable_pic_irq(struct irq_data *d) -{ - unsigned long flags; - unsigned int pic_irq = d->irq - PNX833X_PIC_IRQ_BASE; - - raw_spin_lock_irqsave(&pnx833x_irq_lock, flags); - pnx833x_hard_disable_pic_irq(pic_irq); - raw_spin_unlock_irqrestore(&pnx833x_irq_lock, flags); -} - -static DEFINE_RAW_SPINLOCK(pnx833x_gpio_pnx833x_irq_lock); - -static void pnx833x_enable_gpio_irq(struct irq_data *d) -{ - int pin = d->irq - PNX833X_GPIO_IRQ_BASE; - unsigned long flags; - raw_spin_lock_irqsave(&pnx833x_gpio_pnx833x_irq_lock, flags); - pnx833x_gpio_enable_irq(pin); - raw_spin_unlock_irqrestore(&pnx833x_gpio_pnx833x_irq_lock, flags); -} - -static void pnx833x_disable_gpio_irq(struct irq_data *d) -{ - int pin = d->irq - PNX833X_GPIO_IRQ_BASE; - unsigned long flags; - raw_spin_lock_irqsave(&pnx833x_gpio_pnx833x_irq_lock, flags); - pnx833x_gpio_disable_irq(pin); - raw_spin_unlock_irqrestore(&pnx833x_gpio_pnx833x_irq_lock, flags); -} - -static int pnx833x_set_type_gpio_irq(struct irq_data *d, unsigned int flow_type) -{ - int pin = d->irq - PNX833X_GPIO_IRQ_BASE; - int gpio_mode; - - switch (flow_type) { - case IRQ_TYPE_EDGE_RISING: - gpio_mode = GPIO_INT_EDGE_RISING; - break; - case IRQ_TYPE_EDGE_FALLING: - gpio_mode = GPIO_INT_EDGE_FALLING; - break; - case IRQ_TYPE_EDGE_BOTH: - gpio_mode = GPIO_INT_EDGE_BOTH; - break; - case IRQ_TYPE_LEVEL_HIGH: - gpio_mode = GPIO_INT_LEVEL_HIGH; - break; - case IRQ_TYPE_LEVEL_LOW: - gpio_mode = GPIO_INT_LEVEL_LOW; - break; - default: - gpio_mode = GPIO_INT_NONE; - break; - } - - pnx833x_gpio_setup_irq(gpio_mode, pin); - - return 0; -} - -static struct irq_chip pnx833x_pic_irq_type = { - .name = "PNX-PIC", - .irq_enable = pnx833x_enable_pic_irq, - .irq_disable = pnx833x_disable_pic_irq, -}; - -static struct irq_chip pnx833x_gpio_irq_type = { - .name = "PNX-GPIO", - .irq_enable = pnx833x_enable_gpio_irq, - .irq_disable = pnx833x_disable_gpio_irq, - .irq_set_type = pnx833x_set_type_gpio_irq, -}; - -void __init arch_init_irq(void) -{ - unsigned int irq; - - /* setup standard internal cpu irqs */ - mips_cpu_irq_init(); - - /* Set IRQ information in irq_desc */ - for (irq = PNX833X_PIC_IRQ_BASE; irq < (PNX833X_PIC_IRQ_BASE + PNX833X_PIC_NUM_IRQ); irq++) { - pnx833x_hard_disable_pic_irq(irq); - irq_set_chip_and_handler(irq, &pnx833x_pic_irq_type, - handle_simple_irq); - } - - for (irq = PNX833X_GPIO_IRQ_BASE; irq < (PNX833X_GPIO_IRQ_BASE + PNX833X_GPIO_NUM_IRQ); irq++) - irq_set_chip_and_handler(irq, &pnx833x_gpio_irq_type, - handle_simple_irq); - - /* Set PIC priority limiter register to 0 */ - PNX833X_PIC_INT_PRIORITY = 0; - - /* Setup GPIO IRQ dispatching */ - pnx833x_startup_pic_irq(PNX833X_PIC_GPIO_INT); - - /* Enable PIC IRQs (HWIRQ2) */ - if (cpu_has_vint) - set_vi_handler(4, pic_dispatch); - - write_c0_status(read_c0_status() | IE_IRQ2); -} - -unsigned int get_c0_compare_int(void) -{ - if (cpu_has_vint) - set_vi_handler(cp0_compare_irq, pnx833x_timer_dispatch); - - mips_cpu_timer_irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq; - return mips_cpu_timer_irq; -} - -void __init plat_time_init(void) -{ - /* calculate mips_hpt_frequency based on PNX833X_CLOCK_CPUCP_CTL reg */ - - extern unsigned long mips_hpt_frequency; - unsigned long reg = PNX833X_CLOCK_CPUCP_CTL; - - if (!(PNX833X_BIT(reg, CLOCK_CPUCP_CTL, EXIT_RESET))) { - /* Functional clock is disabled so use crystal frequency */ - mips_hpt_frequency = 25; - } else { -#if defined(CONFIG_SOC_PNX8335) - /* Functional clock is enabled, so get clock multiplier */ - mips_hpt_frequency = 90 + (10 * PNX8335_REGFIELD(CLOCK_PLL_CPU_CTL, FREQ)); -#else - static const unsigned long int freq[4] = {240, 160, 120, 80}; - mips_hpt_frequency = freq[PNX833X_FIELD(reg, CLOCK_CPUCP_CTL, DIV_CLOCK)]; -#endif - } - - printk(KERN_INFO "CPU clock is %ld MHz\n", mips_hpt_frequency); - - mips_hpt_frequency *= 500000; -} diff --git a/arch/mips/pnx833x/common/platform.c b/arch/mips/pnx833x/common/platform.c deleted file mode 100644 index 5fa0373f1c9e..000000000000 --- a/arch/mips/pnx833x/common/platform.c +++ /dev/null @@ -1,224 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * platform.c: platform support for PNX833X. - * - * Copyright 2008 NXP Semiconductors - * Chris Steel <chris.steel@nxp.com> - * Daniel Laird <daniel.j.laird@nxp.com> - * - * Based on software written by: - * Nikita Youshchenko <yoush@debian.org>, based on PNX8550 code. - */ -#include <linux/device.h> -#include <linux/dma-mapping.h> -#include <linux/platform_device.h> -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/resource.h> -#include <linux/serial.h> -#include <linux/serial_pnx8xxx.h> -#include <linux/mtd/platnand.h> - -#include <irq.h> -#include <irq-mapping.h> -#include <pnx833x.h> - -static u64 uart_dmamask = DMA_BIT_MASK(32); - -static struct resource pnx833x_uart_resources[] = { - [0] = { - .start = PNX833X_UART0_PORTS_START, - .end = PNX833X_UART0_PORTS_END, - .flags = IORESOURCE_MEM, - }, - [1] = { - .start = PNX833X_PIC_UART0_INT, - .end = PNX833X_PIC_UART0_INT, - .flags = IORESOURCE_IRQ, - }, - [2] = { - .start = PNX833X_UART1_PORTS_START, - .end = PNX833X_UART1_PORTS_END, - .flags = IORESOURCE_MEM, - }, - [3] = { - .start = PNX833X_PIC_UART1_INT, - .end = PNX833X_PIC_UART1_INT, - .flags = IORESOURCE_IRQ, - }, -}; - -struct pnx8xxx_port pnx8xxx_ports[] = { - [0] = { - .port = { - .type = PORT_PNX8XXX, - .iotype = UPIO_MEM, - .membase = (void __iomem *)PNX833X_UART0_PORTS_START, - .mapbase = PNX833X_UART0_PORTS_START, - .irq = PNX833X_PIC_UART0_INT, - .uartclk = 3692300, - .fifosize = 16, - .flags = UPF_BOOT_AUTOCONF, - .line = 0, - }, - }, - [1] = { - .port = { - .type = PORT_PNX8XXX, - .iotype = UPIO_MEM, - .membase = (void __iomem *)PNX833X_UART1_PORTS_START, - .mapbase = PNX833X_UART1_PORTS_START, - .irq = PNX833X_PIC_UART1_INT, - .uartclk = 3692300, - .fifosize = 16, - .flags = UPF_BOOT_AUTOCONF, - .line = 1, - }, - }, -}; - -static struct platform_device pnx833x_uart_device = { - .name = "pnx8xxx-uart", - .id = -1, - .dev = { - .dma_mask = &uart_dmamask, - .coherent_dma_mask = DMA_BIT_MASK(32), - .platform_data = pnx8xxx_ports, - }, - .num_resources = ARRAY_SIZE(pnx833x_uart_resources), - .resource = pnx833x_uart_resources, -}; - -static u64 ehci_dmamask = DMA_BIT_MASK(32); - -static struct resource pnx833x_usb_ehci_resources[] = { - [0] = { - .start = PNX833X_USB_PORTS_START, - .end = PNX833X_USB_PORTS_END, - .flags = IORESOURCE_MEM, - }, - [1] = { - .start = PNX833X_PIC_USB_INT, - .end = PNX833X_PIC_USB_INT, - .flags = IORESOURCE_IRQ, - }, -}; - -static struct platform_device pnx833x_usb_ehci_device = { - .name = "pnx833x-ehci", - .id = -1, - .dev = { - .dma_mask = &ehci_dmamask, - .coherent_dma_mask = DMA_BIT_MASK(32), - }, - .num_resources = ARRAY_SIZE(pnx833x_usb_ehci_resources), - .resource = pnx833x_usb_ehci_resources, -}; - -static u64 ethernet_dmamask = DMA_BIT_MASK(32); - -static struct resource pnx833x_ethernet_resources[] = { - [0] = { - .start = PNX8335_IP3902_PORTS_START, - .end = PNX8335_IP3902_PORTS_END, - .flags = IORESOURCE_MEM, - }, -#ifdef CONFIG_SOC_PNX8335 - [1] = { - .start = PNX8335_PIC_ETHERNET_INT, - .end = PNX8335_PIC_ETHERNET_INT, - .flags = IORESOURCE_IRQ, - }, -#endif -}; - -static struct platform_device pnx833x_ethernet_device = { - .name = "ip3902-eth", - .id = -1, - .dev = { - .dma_mask = ðernet_dmamask, - .coherent_dma_mask = DMA_BIT_MASK(32), - }, - .num_resources = ARRAY_SIZE(pnx833x_ethernet_resources), - .resource = pnx833x_ethernet_resources, -}; - -static struct resource pnx833x_sata_resources[] = { - [0] = { - .start = PNX8335_SATA_PORTS_START, - .end = PNX8335_SATA_PORTS_END, - .flags = IORESOURCE_MEM, - }, - [1] = { - .start = PNX8335_PIC_SATA_INT, - .end = PNX8335_PIC_SATA_INT, - .flags = IORESOURCE_IRQ, - }, -}; - -static struct platform_device pnx833x_sata_device = { - .name = "pnx833x-sata", - .id = -1, - .num_resources = ARRAY_SIZE(pnx833x_sata_resources), - .resource = pnx833x_sata_resources, -}; - -static void -pnx833x_flash_nand_cmd_ctrl(struct nand_chip *this, int cmd, unsigned int ctrl) -{ - unsigned long nandaddr = (unsigned long)this->legacy.IO_ADDR_W; - - if (cmd == NAND_CMD_NONE) - return; - - if (ctrl & NAND_CLE) - writeb(cmd, (void __iomem *)(nandaddr + PNX8335_NAND_CLE_MASK)); - else - writeb(cmd, (void __iomem *)(nandaddr + PNX8335_NAND_ALE_MASK)); -} - -static struct platform_nand_data pnx833x_flash_nand_data = { - .chip = { - .nr_chips = 1, - .chip_delay = 25, - }, - .ctrl = { - .cmd_ctrl = pnx833x_flash_nand_cmd_ctrl - } -}; - -/* - * Set start to be the correct address (PNX8335_NAND_BASE with no 0xb!!), - * 12 bytes more seems to be the standard that allows for NAND access. - */ -static struct resource pnx833x_flash_nand_resource = { - .start = PNX8335_NAND_BASE, - .end = PNX8335_NAND_BASE + 12, - .flags = IORESOURCE_MEM, -}; - -static struct platform_device pnx833x_flash_nand = { - .name = "gen_nand", - .id = -1, - .num_resources = 1, - .resource = &pnx833x_flash_nand_resource, - .dev = { - .platform_data = &pnx833x_flash_nand_data, - }, -}; - -static struct platform_device *pnx833x_platform_devices[] __initdata = { - &pnx833x_uart_device, - &pnx833x_usb_ehci_device, - &pnx833x_ethernet_device, - &pnx833x_sata_device, - &pnx833x_flash_nand, -}; - -static int __init pnx833x_platform_init(void) -{ - return platform_add_devices(pnx833x_platform_devices, - ARRAY_SIZE(pnx833x_platform_devices)); -} - -arch_initcall(pnx833x_platform_init); diff --git a/arch/mips/pnx833x/common/prom.c b/arch/mips/pnx833x/common/prom.c deleted file mode 100644 index 12733ef25782..000000000000 --- a/arch/mips/pnx833x/common/prom.c +++ /dev/null @@ -1,51 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * prom.c: - * - * Copyright 2008 NXP Semiconductors - * Chris Steel <chris.steel@nxp.com> - * Daniel Laird <daniel.j.laird@nxp.com> - * - * Based on software written by: - * Nikita Youshchenko <yoush@debian.org>, based on PNX8550 code. - */ -#include <linux/init.h> -#include <asm/bootinfo.h> -#include <linux/string.h> - -void __init prom_init_cmdline(void) -{ - int argc = fw_arg0; - char **argv = (char **)fw_arg1; - char *c = &(arcs_cmdline[0]); - int i; - - for (i = 1; i < argc; i++) { - strcpy(c, argv[i]); - c += strlen(argv[i]); - if (i < argc-1) - *c++ = ' '; - } - *c = 0; -} - -char __init *prom_getenv(char *envname) -{ - extern char **prom_envp; - char **env = prom_envp; - int i; - - i = strlen(envname); - - while (*env) { - if (strncmp(envname, *env, i) == 0 && *(*env+i) == '=') - return *env + i + 1; - env++; - } - - return 0; -} - -void __init prom_free_prom_memory(void) -{ -} diff --git a/arch/mips/pnx833x/common/reset.c b/arch/mips/pnx833x/common/reset.c deleted file mode 100644 index b48e83bf912b..000000000000 --- a/arch/mips/pnx833x/common/reset.c +++ /dev/null @@ -1,31 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * reset.c: reset support for PNX833X. - * - * Copyright 2008 NXP Semiconductors - * Chris Steel <chris.steel@nxp.com> - * Daniel Laird <daniel.j.laird@nxp.com> - * - * Based on software written by: - * Nikita Youshchenko <yoush@debian.org>, based on PNX8550 code. - */ -#include <linux/reboot.h> -#include <pnx833x.h> - -void pnx833x_machine_restart(char *command) -{ - PNX833X_RESET_CONTROL_2 = 0; - PNX833X_RESET_CONTROL = 0; -} - -void pnx833x_machine_halt(void) -{ - while (1) - __asm__ __volatile__ ("wait"); - -} - -void pnx833x_machine_power_off(void) -{ - pnx833x_machine_halt(); -} diff --git a/arch/mips/pnx833x/common/setup.c b/arch/mips/pnx833x/common/setup.c deleted file mode 100644 index abf68d92ce4a..000000000000 --- a/arch/mips/pnx833x/common/setup.c +++ /dev/null @@ -1,48 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * setup.c: Setup PNX833X Soc. - * - * Copyright 2008 NXP Semiconductors - * Chris Steel <chris.steel@nxp.com> - * Daniel Laird <daniel.j.laird@nxp.com> - * - * Based on software written by: - * Nikita Youshchenko <yoush@debian.org>, based on PNX8550 code. - */ -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/ioport.h> -#include <linux/io.h> -#include <linux/pci.h> -#include <asm/reboot.h> -#include <pnx833x.h> -#include <gpio.h> - -extern void pnx833x_board_setup(void); -extern void pnx833x_machine_restart(char *); -extern void pnx833x_machine_halt(void); -extern void pnx833x_machine_power_off(void); - -int __init plat_mem_setup(void) -{ - /* set mips clock to 320MHz */ -#if defined(CONFIG_SOC_PNX8335) - PNX8335_WRITEFIELD(0x17, CLOCK_PLL_CPU_CTL, FREQ); -#endif - pnx833x_gpio_init(); /* so it will be ready in board_setup() */ - - pnx833x_board_setup(); - - _machine_restart = pnx833x_machine_restart; - _machine_halt = pnx833x_machine_halt; - pm_power_off = pnx833x_machine_power_off; - - /* IO/MEM resources. */ - set_io_port_base(KSEG1); - ioport_resource.start = 0; - ioport_resource.end = ~0; - iomem_resource.start = 0; - iomem_resource.end = ~0; - - return 0; -} diff --git a/arch/mips/pnx833x/stb22x/Makefile b/arch/mips/pnx833x/stb22x/Makefile deleted file mode 100644 index 7c5ddf36b735..000000000000 --- a/arch/mips/pnx833x/stb22x/Makefile +++ /dev/null @@ -1,2 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -obj-y := board.o diff --git a/arch/mips/pnx833x/stb22x/board.c b/arch/mips/pnx833x/stb22x/board.c deleted file mode 100644 index 93d8e7b73427..000000000000 --- a/arch/mips/pnx833x/stb22x/board.c +++ /dev/null @@ -1,120 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * board.c: STB225 board support. - * - * Copyright 2008 NXP Semiconductors - * Chris Steel <chris.steel@nxp.com> - * Daniel Laird <daniel.j.laird@nxp.com> - * - * Based on software written by: - * Nikita Youshchenko <yoush@debian.org>, based on PNX8550 code. - */ -#include <linux/init.h> -#include <asm/bootinfo.h> -#include <linux/mm.h> -#include <pnx833x.h> -#include <gpio.h> - -/* endianess twiddlers */ -#define PNX8335_DEBUG0 0x4400 -#define PNX8335_DEBUG1 0x4404 -#define PNX8335_DEBUG2 0x4408 -#define PNX8335_DEBUG3 0x440c -#define PNX8335_DEBUG4 0x4410 -#define PNX8335_DEBUG5 0x4414 -#define PNX8335_DEBUG6 0x4418 -#define PNX8335_DEBUG7 0x441c - -int prom_argc; -char **prom_argv, **prom_envp; - -extern void prom_init_cmdline(void); -extern char *prom_getenv(char *envname); - -const char *get_system_type(void) -{ - return "NXP STB22x"; -} - -static inline unsigned long env_or_default(char *env, unsigned long dfl) -{ - char *str = prom_getenv(env); - return str ? simple_strtol(str, 0, 0) : dfl; -} - -void __init prom_init(void) -{ - unsigned long memsize; - - prom_argc = fw_arg0; - prom_argv = (char **)fw_arg1; - prom_envp = (char **)fw_arg2; - - prom_init_cmdline(); - - memsize = env_or_default("memsize", 0x02000000); - add_memory_region(0, memsize, BOOT_MEM_RAM); -} - -void __init pnx833x_board_setup(void) -{ - pnx833x_gpio_select_function_alt(4); - pnx833x_gpio_select_output(4); - pnx833x_gpio_select_function_alt(5); - pnx833x_gpio_select_input(5); - pnx833x_gpio_select_function_alt(6); - pnx833x_gpio_select_input(6); - pnx833x_gpio_select_function_alt(7); - pnx833x_gpio_select_output(7); - - pnx833x_gpio_select_function_alt(25); - pnx833x_gpio_select_function_alt(26); - - pnx833x_gpio_select_function_alt(27); - pnx833x_gpio_select_function_alt(28); - pnx833x_gpio_select_function_alt(29); - pnx833x_gpio_select_function_alt(30); - pnx833x_gpio_select_function_alt(31); - pnx833x_gpio_select_function_alt(32); - pnx833x_gpio_select_function_alt(33); - -#if IS_ENABLED(CONFIG_MTD_NAND_PLATFORM) - /* Setup MIU for NAND access on CS0... - * - * (it seems that we must also configure CS1 for reliable operation, - * otherwise the first read ID command will fail if it's read as 4 bytes - * but pass if it's read as 1 word.) - */ - - /* Setup MIU CS0 & CS1 timing */ - PNX833X_MIU_SEL0 = 0; - PNX833X_MIU_SEL1 = 0; - PNX833X_MIU_SEL0_TIMING = 0x50003081; - PNX833X_MIU_SEL1_TIMING = 0x50003081; - - /* Setup GPIO 00 for use as MIU CS1 (CS0 is not multiplexed, so does not need this) */ - pnx833x_gpio_select_function_alt(0); - - /* Setup GPIO 04 to input NAND read/busy signal */ - pnx833x_gpio_select_function_io(4); - pnx833x_gpio_select_input(4); - - /* Setup GPIO 05 to disable NAND write protect */ - pnx833x_gpio_select_function_io(5); - pnx833x_gpio_select_output(5); - pnx833x_gpio_write(1, 5); - -#elif IS_ENABLED(CONFIG_MTD_CFI) - - /* Set up MIU for 16-bit NOR access on CS0 and CS1... */ - - /* Setup MIU CS0 & CS1 timing */ - PNX833X_MIU_SEL0 = 1; - PNX833X_MIU_SEL1 = 1; - PNX833X_MIU_SEL0_TIMING = 0x6A08D082; - PNX833X_MIU_SEL1_TIMING = 0x6A08D082; - - /* Setup GPIO 00 for use as MIU CS1 (CS0 is not multiplexed, so does not need this) */ - pnx833x_gpio_select_function_alt(0); -#endif -} diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c index 90c6d4a11c5d..cbae9d23ab7f 100644 --- a/arch/mips/ralink/of.c +++ b/arch/mips/ralink/of.c @@ -84,8 +84,7 @@ void __init plat_mem_setup(void) if (memory_dtb) of_scan_flat_dt(early_init_dt_scan_memory, NULL); else if (soc_info.mem_size) - add_memory_region(soc_info.mem_base, soc_info.mem_size * SZ_1M, - BOOT_MEM_RAM); + memblock_add(soc_info.mem_base, soc_info.mem_size * SZ_1M); else detect_memory_region(soc_info.mem_base, soc_info.mem_size_min * SZ_1M, diff --git a/arch/mips/rb532/prom.c b/arch/mips/rb532/prom.c index 303cc3dc1749..a9d1f2019dc3 100644 --- a/arch/mips/rb532/prom.c +++ b/arch/mips/rb532/prom.c @@ -126,5 +126,5 @@ void __init prom_init(void) /* give all RAM to boot allocator, * except for the first 0x400 and the last 0x200 bytes */ - add_memory_region(ddrbase + 0x400, memsize - 0x600, BOOT_MEM_RAM); + memblock_add(ddrbase + 0x400, memsize - 0x600); } diff --git a/arch/mips/sgi-ip30/ip30-common.h b/arch/mips/sgi-ip30/ip30-common.h index d2bcaee712f3..7b5db24b6279 100644 --- a/arch/mips/sgi-ip30/ip30-common.h +++ b/arch/mips/sgi-ip30/ip30-common.h @@ -3,6 +3,20 @@ #ifndef __IP30_COMMON_H #define __IP30_COMMON_H +/* + * Power Switch is wired via BaseIO BRIDGE slot #6. + * + * ACFail is wired via BaseIO BRIDGE slot #7. + */ +#define IP30_POWER_IRQ HEART_L2_INT_POWER_BTN + +#define IP30_HEART_L0_IRQ (MIPS_CPU_IRQ_BASE + 2) +#define IP30_HEART_L1_IRQ (MIPS_CPU_IRQ_BASE + 3) +#define IP30_HEART_L2_IRQ (MIPS_CPU_IRQ_BASE + 4) +#define IP30_HEART_TIMER_IRQ (MIPS_CPU_IRQ_BASE + 5) +#define IP30_HEART_ERR_IRQ (MIPS_CPU_IRQ_BASE + 6) + +extern void __init ip30_install_ipi(void); extern struct plat_smp_ops ip30_smp_ops; extern void __init ip30_per_cpu_init(void); diff --git a/arch/mips/sgi-ip30/ip30-irq.c b/arch/mips/sgi-ip30/ip30-irq.c index c2ffcb920250..e8374e4c705b 100644 --- a/arch/mips/sgi-ip30/ip30-irq.c +++ b/arch/mips/sgi-ip30/ip30-irq.c @@ -14,6 +14,8 @@ #include <asm/irq_cpu.h> #include <asm/sgi/heart.h> +#include "ip30-common.h" + struct heart_irq_data { u64 *irq_mask; int cpu; diff --git a/arch/mips/sgi-ip32/ip32-dma.c b/arch/mips/sgi-ip32/ip32-dma.c index fa7b17cb5385..20c6da9d76bc 100644 --- a/arch/mips/sgi-ip32/ip32-dma.c +++ b/arch/mips/sgi-ip32/ip32-dma.c @@ -18,7 +18,7 @@ #define RAM_OFFSET_MASK 0x3fffffffUL -dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr) +dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) { dma_addr_t dma_addr = paddr & RAM_OFFSET_MASK; @@ -27,7 +27,7 @@ dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr) return dma_addr; } -phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr) +phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dma_addr) { phys_addr_t paddr = dma_addr & RAM_OFFSET_MASK; diff --git a/arch/mips/sgi-ip32/ip32-memory.c b/arch/mips/sgi-ip32/ip32-memory.c index 62b956cc2d1d..0f53fed39da6 100644 --- a/arch/mips/sgi-ip32/ip32-memory.c +++ b/arch/mips/sgi-ip32/ip32-memory.c @@ -9,6 +9,7 @@ #include <linux/types.h> #include <linux/init.h> #include <linux/kernel.h> +#include <linux/memblock.h> #include <linux/mm.h> #include <asm/ip32/crime.h> @@ -36,7 +37,7 @@ void __init prom_meminit(void) printk("CRIME MC: bank %u base 0x%016Lx size %LuMiB\n", bank, base, size >> 20); - add_memory_region(base, size, BOOT_MEM_RAM); + memblock_add(base, size); } } diff --git a/arch/mips/sgi-ip32/ip32-setup.c b/arch/mips/sgi-ip32/ip32-setup.c index 3abd1465ec02..8019dae1721a 100644 --- a/arch/mips/sgi-ip32/ip32-setup.c +++ b/arch/mips/sgi-ip32/ip32-setup.c @@ -12,12 +12,10 @@ #include <linux/console.h> #include <linux/init.h> #include <linux/interrupt.h> -#include <linux/mc146818rtc.h> #include <linux/param.h> #include <linux/sched.h> #include <asm/bootinfo.h> -#include <asm/mc146818-time.h> #include <asm/mipsregs.h> #include <asm/mmu_context.h> #include <asm/sgialib.h> diff --git a/arch/mips/sibyte/common/cfe.c b/arch/mips/sibyte/common/cfe.c index cbf5939ed53a..89f7fca45152 100644 --- a/arch/mips/sibyte/common/cfe.c +++ b/arch/mips/sibyte/common/cfe.c @@ -114,16 +114,14 @@ static __init void prom_meminit(void) if (initrd_start) { if ((initrd_pstart > addr) && (initrd_pstart < (addr + size))) { - add_memory_region(addr, - initrd_pstart - addr, - BOOT_MEM_RAM); + memblock_add(addr, + initrd_pstart - addr); rd_flag = 1; } if ((initrd_pend > addr) && (initrd_pend < (addr + size))) { - add_memory_region(initrd_pend, - (addr + size) - initrd_pend, - BOOT_MEM_RAM); + memblock_add(initrd_pend, + (addr + size) - initrd_pend); rd_flag = 1; } } @@ -142,7 +140,7 @@ static __init void prom_meminit(void) */ if (size > 512) size -= 512; - add_memory_region(addr, size, BOOT_MEM_RAM); + memblock_add(addr, size); } board_mem_region_addrs[board_mem_region_count] = addr; board_mem_region_sizes[board_mem_region_count] = size; @@ -158,8 +156,8 @@ static __init void prom_meminit(void) } #ifdef CONFIG_BLK_DEV_INITRD if (initrd_start) { - add_memory_region(initrd_pstart, initrd_pend - initrd_pstart, - BOOT_MEM_RESERVED); + memblock_add(initrd_pstart, initrd_pend - initrd_pstart); + memblock_reserve(initrd_pstart, initrd_pend - initrd_pstart); } #endif } diff --git a/arch/mips/txx9/generic/setup_tx4939.c b/arch/mips/txx9/generic/setup_tx4939.c index 360c388f4c82..bf8a3cdababf 100644 --- a/arch/mips/txx9/generic/setup_tx4939.c +++ b/arch/mips/txx9/generic/setup_tx4939.c @@ -22,7 +22,6 @@ #include <linux/mtd/physmap.h> #include <linux/platform_device.h> #include <linux/platform_data/txx9/ndfmc.h> -#include <asm/bootinfo.h> #include <asm/reboot.h> #include <asm/traps.h> #include <asm/txx9irq.h> @@ -94,22 +93,6 @@ static struct resource tx4939_sdram_resource[4]; static struct resource tx4939_sram_resource; #define TX4939_SRAM_SIZE 0x800 -void __init tx4939_add_memory_regions(void) -{ - int i; - unsigned long start, size; - u64 win; - - for (i = 0; i < 4; i++) { - if (!((__u32)____raw_readq(&tx4939_ddrcptr->winen) & (1 << i))) - continue; - win = ____raw_readq(&tx4939_ddrcptr->win[i]); - start = (unsigned long)(win >> 48); - size = (((unsigned long)(win >> 32) & 0xffff) + 1) - start; - add_memory_region(start << 20, size << 20, BOOT_MEM_RAM); - } -} - void __init tx4939_setup(void) { int i; diff --git a/arch/mips/txx9/jmr3927/prom.c b/arch/mips/txx9/jmr3927/prom.c index 68a96473c134..53c68de54d30 100644 --- a/arch/mips/txx9/jmr3927/prom.c +++ b/arch/mips/txx9/jmr3927/prom.c @@ -37,7 +37,7 @@ */ #include <linux/init.h> #include <linux/kernel.h> -#include <asm/bootinfo.h> +#include <linux/memblock.h> #include <asm/txx9/generic.h> #include <asm/txx9/jmr3927.h> @@ -47,6 +47,6 @@ void __init jmr3927_prom_init(void) if ((tx3927_ccfgptr->ccfg & TX3927_CCFG_TLBOFF) == 0) pr_err("TX3927 TLB off\n"); - add_memory_region(0, JMR3927_SDRAM_SIZE, BOOT_MEM_RAM); + memblock_add(0, JMR3927_SDRAM_SIZE); txx9_sio_putchar_init(TX3927_SIO_REG(1)); } diff --git a/arch/mips/txx9/rbtx4927/prom.c b/arch/mips/txx9/rbtx4927/prom.c index fe6d0b54763f..9b4acff826eb 100644 --- a/arch/mips/txx9/rbtx4927/prom.c +++ b/arch/mips/txx9/rbtx4927/prom.c @@ -29,13 +29,14 @@ * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ + #include <linux/init.h> -#include <asm/bootinfo.h> +#include <linux/memblock.h> #include <asm/txx9/generic.h> #include <asm/txx9/rbtx4927.h> void __init rbtx4927_prom_init(void) { - add_memory_region(0, tx4927_get_mem_size(), BOOT_MEM_RAM); + memblock_add(0, tx4927_get_mem_size()); txx9_sio_putchar_init(TX4927_SIO_REG(0) & 0xfffffffffULL); } diff --git a/arch/mips/txx9/rbtx4938/prom.c b/arch/mips/txx9/rbtx4938/prom.c index 2b36a2ee744c..0de84716a428 100644 --- a/arch/mips/txx9/rbtx4938/prom.c +++ b/arch/mips/txx9/rbtx4938/prom.c @@ -12,12 +12,11 @@ #include <linux/init.h> #include <linux/memblock.h> -#include <asm/bootinfo.h> #include <asm/txx9/generic.h> #include <asm/txx9/rbtx4938.h> void __init rbtx4938_prom_init(void) { - add_memory_region(0, tx4938_get_mem_size(), BOOT_MEM_RAM); + memblock_add(0, tx4938_get_mem_size()); txx9_sio_putchar_init(TX4938_SIO_REG(0) & 0xfffffffffULL); } diff --git a/arch/mips/txx9/rbtx4939/prom.c b/arch/mips/txx9/rbtx4939/prom.c index bd277ecb4ad6..ba25ba1bd2ec 100644 --- a/arch/mips/txx9/rbtx4939/prom.c +++ b/arch/mips/txx9/rbtx4939/prom.c @@ -7,11 +7,23 @@ */ #include <linux/init.h> +#include <linux/memblock.h> #include <asm/txx9/generic.h> #include <asm/txx9/rbtx4939.h> void __init rbtx4939_prom_init(void) { - tx4939_add_memory_regions(); + unsigned long start, size; + u64 win; + int i; + + for (i = 0; i < 4; i++) { + if (!((__u32)____raw_readq(&tx4939_ddrcptr->winen) & (1 << i))) + continue; + win = ____raw_readq(&tx4939_ddrcptr->win[i]); + start = (unsigned long)(win >> 48); + size = (((unsigned long)(win >> 32) & 0xffff) + 1) - start; + memblock_add(start << 20, size << 20); + } txx9_sio_putchar_init(TX4939_SIO_REG(0) & 0xfffffffffULL); } diff --git a/arch/nds32/kernel/dma.c b/arch/nds32/kernel/dma.c index 69d762182d49..2ac8e6c82a61 100644 --- a/arch/nds32/kernel/dma.c +++ b/arch/nds32/kernel/dma.c @@ -3,7 +3,7 @@ #include <linux/types.h> #include <linux/mm.h> -#include <linux/dma-noncoherent.h> +#include <linux/dma-map-ops.h> #include <linux/cache.h> #include <linux/highmem.h> #include <asm/cacheflush.h> diff --git a/arch/nds32/kernel/setup.c b/arch/nds32/kernel/setup.c index a066efbe53c0..c356e484dcab 100644 --- a/arch/nds32/kernel/setup.c +++ b/arch/nds32/kernel/setup.c @@ -249,12 +249,8 @@ static void __init setup_memory(void) memory_end = memory_start = 0; /* Find main memory where is the kernel */ - for_each_memblock(memory, region) { - memory_start = region->base; - memory_end = region->base + region->size; - pr_info("%s: Memory: 0x%x-0x%x\n", __func__, - memory_start, memory_end); - } + memory_start = memblock_start_of_DRAM(); + memory_end = memblock_end_of_DRAM(); if (!memory_end) { panic("No memory!"); diff --git a/arch/nds32/kernel/vmlinux.lds.S b/arch/nds32/kernel/vmlinux.lds.S index 7a6c1cefe3fe..6a91b965fb1e 100644 --- a/arch/nds32/kernel/vmlinux.lds.S +++ b/arch/nds32/kernel/vmlinux.lds.S @@ -64,6 +64,7 @@ SECTIONS STABS_DEBUG DWARF_DEBUG + ELF_DETAILS DISCARDS } diff --git a/arch/nios2/include/asm/checksum.h b/arch/nios2/include/asm/checksum.h index b4316c361729..69004e07a1ba 100644 --- a/arch/nios2/include/asm/checksum.h +++ b/arch/nios2/include/asm/checksum.h @@ -12,10 +12,6 @@ /* Take these from lib/checksum.c */ extern __wsum csum_partial(const void *buff, int len, __wsum sum); -__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, - __wsum sum); -#define csum_partial_copy_nocheck csum_partial_copy_nocheck - extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl); extern __sum16 ip_compute_csum(const void *buff, int len); diff --git a/arch/nios2/kernel/process.c b/arch/nios2/kernel/process.c index 88a4ec03edab..4ffe857e6ada 100644 --- a/arch/nios2/kernel/process.c +++ b/arch/nios2/kernel/process.c @@ -266,5 +266,5 @@ asmlinkage int nios2_clone(unsigned long clone_flags, unsigned long newsp, .tls = tls, }; - return _do_fork(&args); + return kernel_clone(&args); } diff --git a/arch/nios2/kernel/vmlinux.lds.S b/arch/nios2/kernel/vmlinux.lds.S index c55a7cfa1075..126e114744cb 100644 --- a/arch/nios2/kernel/vmlinux.lds.S +++ b/arch/nios2/kernel/vmlinux.lds.S @@ -58,6 +58,7 @@ SECTIONS STABS_DEBUG DWARF_DEBUG + ELF_DETAILS DISCARDS } diff --git a/arch/openrisc/kernel/dma.c b/arch/openrisc/kernel/dma.c index 345727638d52..1b16d97e7da7 100644 --- a/arch/openrisc/kernel/dma.c +++ b/arch/openrisc/kernel/dma.c @@ -13,7 +13,7 @@ * DMA mapping callbacks... */ -#include <linux/dma-noncoherent.h> +#include <linux/dma-map-ops.h> #include <linux/pagewalk.h> #include <asm/cpuinfo.h> diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c index 13c87f1f872b..2416a9f91533 100644 --- a/arch/openrisc/kernel/setup.c +++ b/arch/openrisc/kernel/setup.c @@ -48,17 +48,12 @@ static void __init setup_memory(void) unsigned long ram_start_pfn; unsigned long ram_end_pfn; phys_addr_t memory_start, memory_end; - struct memblock_region *region; memory_end = memory_start = 0; /* Find main memory where is the kernel, we assume its the only one */ - for_each_memblock(memory, region) { - memory_start = region->base; - memory_end = region->base + region->size; - printk(KERN_INFO "%s: Memory: 0x%x-0x%x\n", __func__, - memory_start, memory_end); - } + memory_start = memblock_start_of_DRAM(); + memory_end = memblock_end_of_DRAM(); if (!memory_end) { panic("No memory!"); diff --git a/arch/openrisc/kernel/vmlinux.lds.S b/arch/openrisc/kernel/vmlinux.lds.S index 22fbc5fb24b3..d5c7bb0fae57 100644 --- a/arch/openrisc/kernel/vmlinux.lds.S +++ b/arch/openrisc/kernel/vmlinux.lds.S @@ -103,6 +103,7 @@ SECTIONS /* Throw in the debugging sections */ STABS_DEBUG DWARF_DEBUG + ELF_DETAILS /* Sections to be discarded -- must be last */ DISCARDS diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c index 3d7c79c7745d..8348feaaf46e 100644 --- a/arch/openrisc/mm/init.c +++ b/arch/openrisc/mm/init.c @@ -64,6 +64,7 @@ extern const char _s_kernel_ro[], _e_kernel_ro[]; */ static void __init map_ram(void) { + phys_addr_t start, end; unsigned long v, p, e; pgprot_t prot; pgd_t *pge; @@ -71,6 +72,7 @@ static void __init map_ram(void) pud_t *pue; pmd_t *pme; pte_t *pte; + u64 i; /* These mark extents of read-only kernel pages... * ...from vmlinux.lds.S */ @@ -78,9 +80,9 @@ static void __init map_ram(void) v = PAGE_OFFSET; - for_each_memblock(memory, region) { - p = (u32) region->base & PAGE_MASK; - e = p + (u32) region->size; + for_each_mem_range(i, &start, &end) { + p = (u32) start & PAGE_MASK; + e = (u32) end; v = (u32) __va(p); pge = pgd_offset_k(v); diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 3b0f53dd70bc..a1167ada29b6 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -195,7 +195,6 @@ config PA11 depends on PA7000 || PA7100LC || PA7200 || PA7300LC select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE - select DMA_NONCOHERENT_CACHE_SYNC config PREFETCH def_bool y @@ -376,21 +375,6 @@ config KEXEC_FILE endmenu +source "drivers/firmware/Kconfig" source "drivers/parisc/Kconfig" - -config SECCOMP - def_bool y - prompt "Enable seccomp to safely compute untrusted bytecode" - help - This kernel feature is useful for number crunching applications - that may need to compute untrusted bytecode during their - execution. By using pipes or other transports made available to - the process as file descriptors supporting the read/write - syscalls, it's possible to isolate those applications in - their own address space using seccomp. Once seccomp is - enabled via prctl(PR_SET_SECCOMP), it cannot be disabled - and the task is only allowed to execute a few safe syscalls - defined by each seccomp mode. - - If unsure, say Y. Only embedded should say N here. diff --git a/arch/parisc/boot/compressed/vmlinux.lds.S b/arch/parisc/boot/compressed/vmlinux.lds.S index 2ac3a643f2eb..ab7b43990857 100644 --- a/arch/parisc/boot/compressed/vmlinux.lds.S +++ b/arch/parisc/boot/compressed/vmlinux.lds.S @@ -84,6 +84,7 @@ SECTIONS } STABS_DEBUG + ELF_DETAILS .note 0 : { *(.note) } /* Sections to be discarded */ diff --git a/arch/parisc/configs/generic-32bit_defconfig b/arch/parisc/configs/generic-32bit_defconfig index 61bac8ff8f22..3cbcfad5f724 100644 --- a/arch/parisc/configs/generic-32bit_defconfig +++ b/arch/parisc/configs/generic-32bit_defconfig @@ -52,10 +52,6 @@ CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_CRYPTOLOOP=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=6144 -CONFIG_IDE=y -CONFIG_BLK_DEV_IDECD=y -CONFIG_BLK_DEV_GENERIC=y -CONFIG_BLK_DEV_NS87415=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y CONFIG_BLK_DEV_SR=y @@ -65,6 +61,8 @@ CONFIG_SCSI_SYM53C8XX_2=y CONFIG_SCSI_ZALON=y CONFIG_SCSI_DH=y CONFIG_ATA=y +CONFIG_ATA_GENERIC=y +CONFIG_PATA_NS87415=y CONFIG_MD=y CONFIG_BLK_DEV_MD=m CONFIG_MD_LINEAR=m diff --git a/arch/parisc/configs/generic-64bit_defconfig b/arch/parisc/configs/generic-64bit_defconfig index 59561e04e659..7e2d7026285e 100644 --- a/arch/parisc/configs/generic-64bit_defconfig +++ b/arch/parisc/configs/generic-64bit_defconfig @@ -58,11 +58,6 @@ CONFIG_PCI_IOV=y CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y CONFIG_BLK_DEV_LOOP=y -CONFIG_IDE=y -CONFIG_IDE_GD=m -CONFIG_IDE_GD_ATAPI=y -CONFIG_BLK_DEV_IDECD=m -CONFIG_BLK_DEV_NS87415=y # CONFIG_SCSI_PROC_FS is not set CONFIG_BLK_DEV_SD=y CONFIG_BLK_DEV_SR=y @@ -76,6 +71,7 @@ CONFIG_SCSI_ZALON=y CONFIG_SCSI_QLA_ISCSI=m CONFIG_SCSI_DH=y CONFIG_ATA=y +CONFIG_PATA_NS87415=y CONFIG_PATA_SIL680=y CONFIG_ATA_GENERIC=y CONFIG_MD=y diff --git a/arch/parisc/include/asm/barrier.h b/arch/parisc/include/asm/barrier.h index 640d46edf32e..c705decf2bed 100644 --- a/arch/parisc/include/asm/barrier.h +++ b/arch/parisc/include/asm/barrier.h @@ -2,11 +2,15 @@ #ifndef __ASM_BARRIER_H #define __ASM_BARRIER_H +#include <asm/alternative.h> + #ifndef __ASSEMBLY__ /* The synchronize caches instruction executes as a nop on systems in which all memory references are performed in order. */ -#define synchronize_caches() __asm__ __volatile__ ("sync" : : : "memory") +#define synchronize_caches() asm volatile("sync" \ + ALTERNATIVE(ALT_COND_NO_SMP, INSN_NOP) \ + : : : "memory") #if defined(CONFIG_SMP) #define mb() do { synchronize_caches(); } while (0) diff --git a/arch/parisc/include/asm/checksum.h b/arch/parisc/include/asm/checksum.h index fe8c63b2d2c3..3c43baca7b39 100644 --- a/arch/parisc/include/asm/checksum.h +++ b/arch/parisc/include/asm/checksum.h @@ -19,14 +19,6 @@ extern __wsum csum_partial(const void *, int, __wsum); /* - * The same as csum_partial, but copies from src while it checksums. - * - * Here even more important to align src and dst on a 32-bit (or even - * better 64-bit) boundary - */ -extern __wsum csum_partial_copy_nocheck(const void *, void *, int, __wsum); - -/* * Optimized for IP headers, which always checksum on 4 octet boundaries. * * Written by Randolph Chung <tausq@debian.org>, and then mucked with by @@ -181,25 +173,5 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, return csum_fold(sum); } -/* - * Copy and checksum to user - */ -#define HAVE_CSUM_COPY_USER -static __inline__ __wsum csum_and_copy_to_user(const void *src, - void __user *dst, - int len, __wsum sum, - int *err_ptr) -{ - /* code stolen from include/asm-mips64 */ - sum = csum_partial(src, len, sum); - - if (copy_to_user(dst, src, len)) { - *err_ptr = -EFAULT; - return (__force __wsum)-1; - } - - return sum; -} - #endif diff --git a/arch/parisc/include/asm/cmpxchg.h b/arch/parisc/include/asm/cmpxchg.h index 068958575871..cf5ee9b0b393 100644 --- a/arch/parisc/include/asm/cmpxchg.h +++ b/arch/parisc/include/asm/cmpxchg.h @@ -14,22 +14,22 @@ extern void __xchg_called_with_bad_pointer(void); /* __xchg32/64 defined in arch/parisc/lib/bitops.c */ -extern unsigned long __xchg8(char, char *); -extern unsigned long __xchg32(int, int *); +extern unsigned long __xchg8(char, volatile char *); +extern unsigned long __xchg32(int, volatile int *); #ifdef CONFIG_64BIT -extern unsigned long __xchg64(unsigned long, unsigned long *); +extern unsigned long __xchg64(unsigned long, volatile unsigned long *); #endif /* optimizer better get rid of switch since size is a constant */ static inline unsigned long -__xchg(unsigned long x, __volatile__ void *ptr, int size) +__xchg(unsigned long x, volatile void *ptr, int size) { switch (size) { #ifdef CONFIG_64BIT - case 8: return __xchg64(x, (unsigned long *) ptr); + case 8: return __xchg64(x, (volatile unsigned long *) ptr); #endif - case 4: return __xchg32((int) x, (int *) ptr); - case 1: return __xchg8((char) x, (char *) ptr); + case 4: return __xchg32((int) x, (volatile int *) ptr); + case 1: return __xchg8((char) x, (volatile char *) ptr); } __xchg_called_with_bad_pointer(); return x; diff --git a/arch/parisc/include/asm/compat.h b/arch/parisc/include/asm/compat.h index 2f4f66a3bac0..8f33085ff1bd 100644 --- a/arch/parisc/include/asm/compat.h +++ b/arch/parisc/include/asm/compat.h @@ -22,8 +22,6 @@ typedef u32 compat_dev_t; typedef u16 compat_nlink_t; typedef u16 compat_ipc_pid_t; typedef u32 compat_caddr_t; -typedef s64 compat_s64; -typedef u64 compat_u64; struct compat_stat { compat_dev_t st_dev; /* dev_t is 32 bits on parisc */ diff --git a/arch/parisc/include/asm/futex.h b/arch/parisc/include/asm/futex.h index c459f656c8c3..fceb9cf02fb3 100644 --- a/arch/parisc/include/asm/futex.h +++ b/arch/parisc/include/asm/futex.h @@ -16,7 +16,7 @@ static inline void _futex_spin_lock_irqsave(u32 __user *uaddr, unsigned long int *flags) { extern u32 lws_lock_start[]; - long index = ((long)uaddr & 0xf0) >> 2; + long index = ((long)uaddr & 0x3f8) >> 1; arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index]; local_irq_save(*flags); arch_spin_lock(s); @@ -26,7 +26,7 @@ static inline void _futex_spin_unlock_irqrestore(u32 __user *uaddr, unsigned long int *flags) { extern u32 lws_lock_start[]; - long index = ((long)uaddr & 0xf0) >> 2; + long index = ((long)uaddr & 0x3f8) >> 1; arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index]; arch_spin_unlock(s); local_irq_restore(*flags); diff --git a/arch/parisc/include/asm/socket.h b/arch/parisc/include/asm/socket.h index 79feff1b0721..33500c9f6e5e 100644 --- a/arch/parisc/include/asm/socket.h +++ b/arch/parisc/include/asm/socket.h @@ -4,8 +4,8 @@ #include <uapi/asm/socket.h> -/* O_NONBLOCK clashes with the bits used for socket types. Therefore we - * have to define SOCK_NONBLOCK to a different value here. +/* O_NONBLOCK clashed with the bits used for socket types. Therefore we + * had to define SOCK_NONBLOCK to a different value here. */ #define SOCK_NONBLOCK 0x40000000 diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h index 51b6c47f802f..fa5ee8a45dbd 100644 --- a/arch/parisc/include/asm/spinlock.h +++ b/arch/parisc/include/asm/spinlock.h @@ -10,13 +10,21 @@ static inline int arch_spin_is_locked(arch_spinlock_t *x) { volatile unsigned int *a = __ldcw_align(x); - return *a == 0; + return READ_ONCE(*a) == 0; } -#define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0) +static inline void arch_spin_lock(arch_spinlock_t *x) +{ + volatile unsigned int *a; + + a = __ldcw_align(x); + while (__ldcw(a) == 0) + while (*a == 0) + continue; +} static inline void arch_spin_lock_flags(arch_spinlock_t *x, - unsigned long flags) + unsigned long flags) { volatile unsigned int *a; @@ -25,10 +33,8 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *x, while (*a == 0) if (flags & PSW_SM_I) { local_irq_enable(); - cpu_relax(); local_irq_disable(); - } else - cpu_relax(); + } } #define arch_spin_lock_flags arch_spin_lock_flags @@ -44,12 +50,9 @@ static inline void arch_spin_unlock(arch_spinlock_t *x) static inline int arch_spin_trylock(arch_spinlock_t *x) { volatile unsigned int *a; - int ret; a = __ldcw_align(x); - ret = __ldcw(a) != 0; - - return ret; + return __ldcw(a) != 0; } /* diff --git a/arch/parisc/include/uapi/asm/fcntl.h b/arch/parisc/include/uapi/asm/fcntl.h index 03ce20e5ad7d..03dee816cb13 100644 --- a/arch/parisc/include/uapi/asm/fcntl.h +++ b/arch/parisc/include/uapi/asm/fcntl.h @@ -3,22 +3,19 @@ #define _PARISC_FCNTL_H #define O_APPEND 000000010 -#define O_BLKSEEK 000000100 /* HPUX only */ #define O_CREAT 000000400 /* not fcntl */ #define O_EXCL 000002000 /* not fcntl */ #define O_LARGEFILE 000004000 #define __O_SYNC 000100000 #define O_SYNC (__O_SYNC|O_DSYNC) -#define O_NONBLOCK 000200004 /* HPUX has separate NDELAY & NONBLOCK */ +#define O_NONBLOCK 000200000 #define O_NOCTTY 000400000 /* not fcntl */ -#define O_DSYNC 001000000 /* HPUX only */ -#define O_RSYNC 002000000 /* HPUX only */ +#define O_DSYNC 001000000 #define O_NOATIME 004000000 #define O_CLOEXEC 010000000 /* set close_on_exec */ #define O_DIRECTORY 000010000 /* must be a directory */ #define O_NOFOLLOW 000000200 /* don't follow links */ -#define O_INVISIBLE 004000000 /* invisible I/O, for DMAPI/XDSM */ #define O_PATH 020000000 #define __O_TMPFILE 040000000 diff --git a/arch/parisc/include/uapi/asm/mman.h b/arch/parisc/include/uapi/asm/mman.h index 6fd8871e4081..ab78cba446ed 100644 --- a/arch/parisc/include/uapi/asm/mman.h +++ b/arch/parisc/include/uapi/asm/mman.h @@ -25,6 +25,7 @@ #define MAP_STACK 0x40000 /* give out an address that is best suited for process/thread stacks */ #define MAP_HUGETLB 0x80000 /* create a huge page mapping */ #define MAP_FIXED_NOREPLACE 0x100000 /* MAP_FIXED which doesn't unmap underlying mapping */ +#define MAP_UNINITIALIZED 0 /* uninitialized anonymous mmap */ #define MS_SYNC 1 /* synchronous memory sync */ #define MS_ASYNC 2 /* sync memory asynchronously */ diff --git a/arch/parisc/include/uapi/asm/signal.h b/arch/parisc/include/uapi/asm/signal.h index d38563a394f2..e605197b462c 100644 --- a/arch/parisc/include/uapi/asm/signal.h +++ b/arch/parisc/include/uapi/asm/signal.h @@ -35,11 +35,11 @@ #define SIGURG 29 #define SIGXFSZ 30 #define SIGUNUSED 31 -#define SIGSYS 31 /* Linux doesn't use this */ +#define SIGSYS 31 /* These should not be considered constants from userland. */ #define SIGRTMIN 32 -#define SIGRTMAX _NSIG /* it's 44 under HP/UX */ +#define SIGRTMAX _NSIG /* * SA_FLAGS values: @@ -61,7 +61,6 @@ #define SA_NODEFER 0x00000020 #define SA_RESTART 0x00000040 #define SA_NOCLDWAIT 0x00000080 -#define _SA_SIGGFAULT 0x00000100 /* HPUX */ #define SA_NOMASK SA_NODEFER #define SA_ONESHOT SA_RESETHAND diff --git a/arch/parisc/install.sh b/arch/parisc/install.sh index 6f68784fea25..056d588befdd 100644 --- a/arch/parisc/install.sh +++ b/arch/parisc/install.sh @@ -43,7 +43,7 @@ fi # Default install -if [ "$(basename $2)" = "zImage" ]; then +if [ "$(basename $2)" = "vmlinuz" ]; then # Compressed install echo "Installing compressed kernel" base=vmlinuz diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index b5e1d9f1b440..86a1a63563fd 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -383,12 +383,12 @@ EXPORT_SYMBOL(flush_kernel_icache_range_asm); static unsigned long parisc_cache_flush_threshold __ro_after_init = FLUSH_THRESHOLD; #define FLUSH_TLB_THRESHOLD (16*1024) /* 16 KiB minimum TLB threshold */ -static unsigned long parisc_tlb_flush_threshold __ro_after_init = FLUSH_TLB_THRESHOLD; +static unsigned long parisc_tlb_flush_threshold __ro_after_init = ~0UL; void __init parisc_setup_cache_timing(void) { unsigned long rangetime, alltime; - unsigned long size, start; + unsigned long size; unsigned long threshold; alltime = mfctl(16); @@ -422,14 +422,9 @@ void __init parisc_setup_cache_timing(void) goto set_tlb_threshold; } - size = 0; - start = (unsigned long) _text; + size = (unsigned long)_end - (unsigned long)_text; rangetime = mfctl(16); - while (start < (unsigned long) _end) { - flush_tlb_kernel_range(start, start + PAGE_SIZE); - start += PAGE_SIZE; - size += PAGE_SIZE; - } + flush_tlb_kernel_range((unsigned long)_text, (unsigned long)_end); rangetime = mfctl(16) - rangetime; alltime = mfctl(16); @@ -444,8 +439,11 @@ void __init parisc_setup_cache_timing(void) threshold/1024); set_tlb_threshold: - if (threshold > parisc_tlb_flush_threshold) + if (threshold > FLUSH_TLB_THRESHOLD) parisc_tlb_flush_threshold = threshold; + else + parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD; + printk(KERN_INFO "TLB flush threshold set to %lu KiB\n", parisc_tlb_flush_threshold/1024); } diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c index a5f3e50fe976..80fa0650736b 100644 --- a/arch/parisc/kernel/drivers.c +++ b/arch/parisc/kernel/drivers.c @@ -30,6 +30,7 @@ #include <linux/spinlock.h> #include <linux/string.h> #include <linux/export.h> +#include <linux/dma-map-ops.h> #include <asm/hardware.h> #include <asm/io.h> #include <asm/pdc.h> diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index 519f9056fd00..f6f28e41bb5e 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -899,20 +899,20 @@ intr_check_sig: * Only do signals if we are returning to user space */ LDREG PT_IASQ0(%r16), %r20 - cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* backward */ + cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* forward */ LDREG PT_IASQ1(%r16), %r20 - cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* backward */ - - /* NOTE: We need to enable interrupts if we have to deliver - * signals. We used to do this earlier but it caused kernel - * stack overflows. */ - ssm PSW_SM_I, %r0 + cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* forward */ copy %r0, %r25 /* long in_syscall = 0 */ #ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ #endif + /* NOTE: We need to enable interrupts if we have to deliver + * signals. We used to do this earlier but it caused kernel + * stack overflows. */ + ssm PSW_SM_I, %r0 + BL do_notify_resume,%r2 copy %r16, %r26 /* struct pt_regs *regs */ diff --git a/arch/parisc/kernel/inventory.c b/arch/parisc/kernel/inventory.c index 9298f2285510..7ab2f2a54400 100644 --- a/arch/parisc/kernel/inventory.c +++ b/arch/parisc/kernel/inventory.c @@ -19,6 +19,7 @@ #include <linux/init.h> #include <linux/slab.h> #include <linux/mm.h> +#include <linux/platform_device.h> #include <asm/hardware.h> #include <asm/io.h> #include <asm/mmzone.h> @@ -641,4 +642,33 @@ void __init do_device_inventory(void) if (pa_serialize_tlb_flushes) pr_info("Merced bus found: Enable PxTLB serialization.\n"); #endif + +#if defined(CONFIG_FW_CFG_SYSFS) + if (running_on_qemu) { + struct resource res[3] = {0,}; + unsigned int base; + + base = ((unsigned long long) PAGE0->pad0[2] << 32) + | PAGE0->pad0[3]; /* SeaBIOS stored it here */ + + res[0].name = "fw_cfg"; + res[0].start = base; + res[0].end = base + 8 - 1; + res[0].flags = IORESOURCE_MEM; + + res[1].name = "ctrl"; + res[1].start = 0; + res[1].flags = IORESOURCE_REG; + + res[2].name = "data"; + res[2].start = 4; + res[2].flags = IORESOURCE_REG; + + if (base) { + pr_info("Found qemu fw_cfg interface at %#08x\n", base); + platform_device_register_simple("fw_cfg", + PLATFORM_DEVID_NONE, res, 3); + } + } +#endif } diff --git a/arch/parisc/kernel/kprobes.c b/arch/parisc/kernel/kprobes.c index 77ec51818916..6d21a515eea5 100644 --- a/arch/parisc/kernel/kprobes.c +++ b/arch/parisc/kernel/kprobes.c @@ -191,80 +191,11 @@ static struct kprobe trampoline_p = { static int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) { - struct kretprobe_instance *ri = NULL; - struct hlist_head *head, empty_rp; - struct hlist_node *tmp; - unsigned long flags, orig_ret_address = 0; - unsigned long trampoline_address = (unsigned long)trampoline_p.addr; - kprobe_opcode_t *correct_ret_addr = NULL; - - INIT_HLIST_HEAD(&empty_rp); - kretprobe_hash_lock(current, &head, &flags); - - /* - * It is possible to have multiple instances associated with a given - * task either because multiple functions in the call path have - * a return probe installed on them, and/or more than one return - * probe was registered for a target function. - * - * We can handle this because: - * - instances are always inserted at the head of the list - * - when multiple return probes are registered for the same - * function, the first instance's ret_addr will point to the - * real return address, and all the rest will point to - * kretprobe_trampoline - */ - hlist_for_each_entry_safe(ri, tmp, head, hlist) { - if (ri->task != current) - /* another task is sharing our hash bucket */ - continue; - - orig_ret_address = (unsigned long)ri->ret_addr; - - if (orig_ret_address != trampoline_address) - /* - * This is the real return address. Any other - * instances associated with this task are for - * other calls deeper on the call stack - */ - break; - } - - kretprobe_assert(ri, orig_ret_address, trampoline_address); - - correct_ret_addr = ri->ret_addr; - hlist_for_each_entry_safe(ri, tmp, head, hlist) { - if (ri->task != current) - /* another task is sharing our hash bucket */ - continue; - - orig_ret_address = (unsigned long)ri->ret_addr; - if (ri->rp && ri->rp->handler) { - __this_cpu_write(current_kprobe, &ri->rp->kp); - get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; - ri->ret_addr = correct_ret_addr; - ri->rp->handler(ri, regs); - __this_cpu_write(current_kprobe, NULL); - } - - recycle_rp_inst(ri, &empty_rp); - - if (orig_ret_address != trampoline_address) - /* - * This is the real return address. Any other - * instances associated with this task are for - * other calls deeper on the call stack - */ - break; - } + unsigned long orig_ret_address; - kretprobe_hash_unlock(current, &flags); - - hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { - hlist_del(&ri->hlist); - kfree(ri); - } + orig_ret_address = __kretprobe_trampoline_handler(regs, trampoline_p.addr, NULL); instruction_pointer_set(regs, orig_ret_address); + return 1; } @@ -272,6 +203,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { ri->ret_addr = (kprobe_opcode_t *)regs->gr[2]; + ri->fp = NULL; /* Replace the return addr with trampoline addr. */ regs->gr[2] = (unsigned long)trampoline_p.addr; diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c index 38c68e131bbe..36610a5c029f 100644 --- a/arch/parisc/kernel/pci-dma.c +++ b/arch/parisc/kernel/pci-dma.c @@ -26,7 +26,7 @@ #include <linux/string.h> #include <linux/types.h> #include <linux/dma-direct.h> -#include <linux/dma-noncoherent.h> +#include <linux/dma-map-ops.h> #include <asm/cacheflush.h> #include <asm/dma.h> /* for DMA_CHUNK_SIZE */ @@ -454,9 +454,3 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, { flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size); } - -void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size, - enum dma_data_direction direction) -{ - flush_kernel_dcache_range((unsigned long)vaddr, size); -} diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index 6271139d2213..10227f667c8a 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c @@ -173,9 +173,12 @@ ipi_interrupt(int irq, void *dev_id) this_cpu, which); return IRQ_NONE; } /* Switch */ - /* let in any pending interrupts */ - local_irq_enable(); - local_irq_disable(); + + /* before doing more, let in any pending interrupts */ + if (ops) { + local_irq_enable(); + local_irq_disable(); + } } /* while (ops) */ } return IRQ_HANDLED; diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index 3ad61a177f5b..322503780db6 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S @@ -571,8 +571,8 @@ lws_compare_and_swap: ldil L%lws_lock_start, %r20 ldo R%lws_lock_start(%r20), %r28 - /* Extract four bits from r26 and hash lock (Bits 4-7) */ - extru %r26, 27, 4, %r20 + /* Extract eight bits from r26 and hash lock (Bits 3-11) */ + extru %r26, 28, 8, %r20 /* Find lock to use, the hash is either one of 0 to 15, multiplied by 16 (keep it 16-byte aligned) @@ -761,8 +761,8 @@ cas2_lock_start: ldil L%lws_lock_start, %r20 ldo R%lws_lock_start(%r20), %r28 - /* Extract four bits from r26 and hash lock (Bits 4-7) */ - extru %r26, 27, 4, %r20 + /* Extract eight bits from r26 and hash lock (Bits 3-11) */ + extru %r26, 28, 8, %r20 /* Find lock to use, the hash is either one of 0 to 15, multiplied by 16 (keep it 16-byte aligned) @@ -950,7 +950,7 @@ END(sys_call_table64) .align L1_CACHE_BYTES ENTRY(lws_lock_start) /* lws locks */ - .rept 16 + .rept 256 /* Keep locks aligned at 16-bytes */ .word 1 .word 0 diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl index def64d221cd4..38c63e5404bc 100644 --- a/arch/parisc/kernel/syscalls/syscall.tbl +++ b/arch/parisc/kernel/syscalls/syscall.tbl @@ -29,7 +29,7 @@ 18 common stat sys_newstat compat_sys_newstat 19 common lseek sys_lseek compat_sys_lseek 20 common getpid sys_getpid -21 common mount sys_mount compat_sys_mount +21 common mount sys_mount 22 common bind sys_bind 23 common setuid sys_setuid 24 common getuid sys_getuid @@ -159,8 +159,8 @@ 142 common _newselect sys_select compat_sys_select 143 common flock sys_flock 144 common msync sys_msync -145 common readv sys_readv compat_sys_readv -146 common writev sys_writev compat_sys_writev +145 common readv sys_readv +146 common writev sys_writev 147 common getsid sys_getsid 148 common fdatasync sys_fdatasync 149 common _sysctl sys_ni_syscall @@ -330,7 +330,7 @@ 292 32 sync_file_range parisc_sync_file_range 292 64 sync_file_range sys_sync_file_range 293 common tee sys_tee -294 common vmsplice sys_vmsplice compat_sys_vmsplice +294 common vmsplice sys_vmsplice 295 common move_pages sys_move_pages compat_sys_move_pages 296 common getcpu sys_getcpu 297 common epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait @@ -372,8 +372,8 @@ 327 common syncfs sys_syncfs 328 common setns sys_setns 329 common sendmmsg sys_sendmmsg compat_sys_sendmmsg -330 common process_vm_readv sys_process_vm_readv compat_sys_process_vm_readv -331 common process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev +330 common process_vm_readv sys_process_vm_readv +331 common process_vm_writev sys_process_vm_writev 332 common kcmp sys_kcmp 333 common finit_module sys_finit_module 334 common sched_setattr sys_sched_setattr @@ -437,3 +437,4 @@ 437 common openat2 sys_openat2 438 common pidfd_getfd sys_pidfd_getfd 439 common faccessat2 sys_faccessat2 +440 common process_madvise sys_process_madvise diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S index 53e29d88f99c..2769eb991f58 100644 --- a/arch/parisc/kernel/vmlinux.lds.S +++ b/arch/parisc/kernel/vmlinux.lds.S @@ -164,6 +164,7 @@ SECTIONS _end = . ; STABS_DEBUG + ELF_DETAILS .note 0 : { *(.note) } /* Sections to be discarded */ diff --git a/arch/parisc/lib/bitops.c b/arch/parisc/lib/bitops.c index 2e4d1f05a926..9ac683bf6ae7 100644 --- a/arch/parisc/lib/bitops.c +++ b/arch/parisc/lib/bitops.c @@ -18,7 +18,7 @@ arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = { #endif #ifdef CONFIG_64BIT -unsigned long __xchg64(unsigned long x, unsigned long *ptr) +unsigned long __xchg64(unsigned long x, volatile unsigned long *ptr) { unsigned long temp, flags; @@ -30,7 +30,7 @@ unsigned long __xchg64(unsigned long x, unsigned long *ptr) } #endif -unsigned long __xchg32(int x, int *ptr) +unsigned long __xchg32(int x, volatile int *ptr) { unsigned long flags; long temp; @@ -43,7 +43,7 @@ unsigned long __xchg32(int x, int *ptr) } -unsigned long __xchg8(char x, char *ptr) +unsigned long __xchg8(char x, volatile char *ptr) { unsigned long flags; long temp; diff --git a/arch/parisc/lib/checksum.c b/arch/parisc/lib/checksum.c index c6f161583549..4818f3db84a5 100644 --- a/arch/parisc/lib/checksum.c +++ b/arch/parisc/lib/checksum.c @@ -106,20 +106,3 @@ __wsum csum_partial(const void *buff, int len, __wsum sum) } EXPORT_SYMBOL(csum_partial); - -/* - * copy while checksumming, otherwise like csum_partial - */ -__wsum csum_partial_copy_nocheck(const void *src, void *dst, - int len, __wsum sum) -{ - /* - * It's 2:30 am and I don't feel like doing it real ... - * This is lots slower than the real thing (tm) - */ - sum = csum_partial(src, len, sum); - memcpy(dst, src, len); - - return sum; -} -EXPORT_SYMBOL(csum_partial_copy_nocheck); diff --git a/arch/parisc/lib/iomap.c b/arch/parisc/lib/iomap.c index ce400417d54e..f03adb1999e7 100644 --- a/arch/parisc/lib/iomap.c +++ b/arch/parisc/lib/iomap.c @@ -346,6 +346,16 @@ u64 ioread64be(const void __iomem *addr) return *((u64 *)addr); } +u64 ioread64_hi_lo(const void __iomem *addr) +{ + u32 low, high; + + high = ioread32(addr + sizeof(u32)); + low = ioread32(addr); + + return low + ((u64)high << 32); +} + void iowrite8(u8 datum, void __iomem *addr) { if (unlikely(INDIRECT_ADDR(addr))) { @@ -409,6 +419,12 @@ void iowrite64be(u64 datum, void __iomem *addr) } } +void iowrite64_hi_lo(u64 val, void __iomem *addr) +{ + iowrite32(val >> 32, addr + sizeof(u32)); + iowrite32(val, addr); +} + /* Repeating interfaces */ void ioread8_rep(const void __iomem *addr, void *dst, unsigned long count) @@ -511,6 +527,7 @@ EXPORT_SYMBOL(ioread32); EXPORT_SYMBOL(ioread32be); EXPORT_SYMBOL(ioread64); EXPORT_SYMBOL(ioread64be); +EXPORT_SYMBOL(ioread64_hi_lo); EXPORT_SYMBOL(iowrite8); EXPORT_SYMBOL(iowrite16); EXPORT_SYMBOL(iowrite16be); @@ -518,6 +535,7 @@ EXPORT_SYMBOL(iowrite32); EXPORT_SYMBOL(iowrite32be); EXPORT_SYMBOL(iowrite64); EXPORT_SYMBOL(iowrite64be); +EXPORT_SYMBOL(iowrite64_hi_lo); EXPORT_SYMBOL(ioread8_rep); EXPORT_SYMBOL(ioread16_rep); EXPORT_SYMBOL(ioread32_rep); diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 787e829b6f25..e9f13fe08492 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -59,7 +59,10 @@ config HAVE_SETUP_PER_CPU_AREA def_bool PPC64 config NEED_PER_CPU_EMBED_FIRST_CHUNK - def_bool PPC64 + def_bool y if PPC64 + +config NEED_PER_CPU_PAGE_FIRST_CHUNK + def_bool y if PPC64 config NR_IRQS int "Number of virtual interrupt numbers" @@ -135,7 +138,7 @@ config PPC select ARCH_HAS_STRICT_KERNEL_RWX if (PPC32 && !HIBERNATION) select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAS_UACCESS_FLUSHCACHE - select ARCH_HAS_UACCESS_MCSAFE if PPC64 + select ARCH_HAS_COPY_MC if PPC64 select ARCH_HAS_UBSAN_SANITIZE_ALL select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_KEEP_MEMBLOCK @@ -148,6 +151,7 @@ config PPC select ARCH_USE_QUEUED_RWLOCKS if PPC_QUEUED_SPINLOCKS select ARCH_USE_QUEUED_SPINLOCKS if PPC_QUEUED_SPINLOCKS select ARCH_WANT_IPC_PARSE_VERSION + select ARCH_WANT_IRQS_OFF_ACTIVATE_MM select ARCH_WEAK_RELEASE_ACQUIRE select BINFMT_ELF select BUILDTIME_TABLE_SORT @@ -245,6 +249,7 @@ config PPC select OLD_SIGACTION if PPC32 select OLD_SIGSUSPEND select PCI_DOMAINS if PCI + select PCI_MSI_ARCH_FALLBACKS if PCI_MSI select PCI_SYSCALL if PCI select PPC_DAWR if PPC64 select RTC_LIB @@ -945,23 +950,6 @@ config ARCH_WANTS_FREEZER_CONTROL source "kernel/power/Kconfig" -config SECCOMP - bool "Enable seccomp to safely compute untrusted bytecode" - depends on PROC_FS - default y - help - This kernel feature is useful for number crunching applications - that may need to compute untrusted bytecode during their - execution. By using pipes or other transports made available to - the process as file descriptors supporting the read/write - syscalls, it's possible to isolate those applications in - their own address space using seccomp. Once seccomp is - enabled via /proc/<pid>/seccomp, it cannot be disabled - and the task is only allowed to execute a few safe syscalls - defined by each seccomp mode. - - If unsure, say Y. Only embedded should say N here. - config PPC_MEM_KEYS prompt "PowerPC Memory Protection Keys" def_bool y @@ -980,7 +968,7 @@ config PPC_MEM_KEYS config PPC_SECURE_BOOT prompt "Enable secure boot support" bool - depends on PPC_POWERNV + depends on PPC_POWERNV || PPC_PSERIES depends on IMA_ARCH_POLICY imply IMA_SECURE_AND_OR_TRUSTED_BOOT help @@ -1000,6 +988,19 @@ config PPC_SECVAR_SYSFS read/write operations on these variables. Say Y if you have secure boot enabled and want to expose variables to userspace. +config PPC_RTAS_FILTER + bool "Enable filtering of RTAS syscalls" + default y + depends on PPC_RTAS + help + The RTAS syscall API has security issues that could be used to + compromise system integrity. This option enforces restrictions on the + RTAS calls and arguments passed by userspace programs to mitigate + these issues. + + Say Y unless you know what you are doing and the filter is causing + problems for you. + endmenu config ISA_DMA_API diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 3e8da9cf2eb9..c4f9dbd12577 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -264,7 +264,8 @@ KBUILD_CFLAGS += $(cpu-as-y) KBUILD_AFLAGS += $(aflags-y) KBUILD_CFLAGS += $(cflags-y) -head-y := arch/powerpc/kernel/head_$(BITS).o +head-$(CONFIG_PPC64) := arch/powerpc/kernel/head_64.o +head-$(CONFIG_PPC_BOOK3S_32) := arch/powerpc/kernel/head_book3s_32.o head-$(CONFIG_PPC_8xx) := arch/powerpc/kernel/head_8xx.o head-$(CONFIG_40x) := arch/powerpc/kernel/head_40x.o head-$(CONFIG_44x) := arch/powerpc/kernel/head_44x.o diff --git a/arch/powerpc/Makefile.postlink b/arch/powerpc/Makefile.postlink index 2268396ff4bb..a6c77f4d32b2 100644 --- a/arch/powerpc/Makefile.postlink +++ b/arch/powerpc/Makefile.postlink @@ -18,7 +18,7 @@ quiet_cmd_relocs_check = CHKREL $@ ifdef CONFIG_PPC_BOOK3S_64 cmd_relocs_check = \ $(CONFIG_SHELL) $(srctree)/arch/powerpc/tools/relocs_check.sh "$(OBJDUMP)" "$(NM)" "$@" ; \ - $(BASH) $(srctree)/arch/powerpc/tools/unrel_branch_check.sh "$(OBJDUMP)" "$@" + $(BASH) $(srctree)/arch/powerpc/tools/unrel_branch_check.sh "$(OBJDUMP)" "$(NM)" "$@" else cmd_relocs_check = \ $(CONFIG_SHELL) $(srctree)/arch/powerpc/tools/relocs_check.sh "$(OBJDUMP)" "$(NM)" "$@" diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile index b88fd27a45f0..f8ce6d2dde7b 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile @@ -7,7 +7,7 @@ # Based on coffboot by Paul Mackerras # Simplified for ppc64 by Todd Inglett # -# NOTE: this code is built for 32 bit in ELF32 format even though +# NOTE: this code may be built for 32 bit in ELF32 format even though # it packages a 64 bit kernel. We do this to simplify the # bootloader and increase compatibility with OpenFirmware. # diff --git a/arch/powerpc/boot/dts/fsl/t1024rdb.dts b/arch/powerpc/boot/dts/fsl/t1024rdb.dts index 73a645324bc1..dbcd31cc35dc 100644 --- a/arch/powerpc/boot/dts/fsl/t1024rdb.dts +++ b/arch/powerpc/boot/dts/fsl/t1024rdb.dts @@ -161,7 +161,6 @@ rtc@68 { compatible = "dallas,ds1339"; reg = <0x68>; - interrupts = <0x1 0x1 0 0>; }; }; diff --git a/arch/powerpc/boot/dts/fsl/t1040rdb.dts b/arch/powerpc/boot/dts/fsl/t1040rdb.dts index 65ff34c49025..af0c8a6f5613 100644 --- a/arch/powerpc/boot/dts/fsl/t1040rdb.dts +++ b/arch/powerpc/boot/dts/fsl/t1040rdb.dts @@ -64,6 +64,40 @@ phy_sgmii_2: ethernet-phy@3 { reg = <0x03>; }; + + /* VSC8514 QSGMII PHY */ + phy_qsgmii_0: ethernet-phy@4 { + reg = <0x4>; + }; + + phy_qsgmii_1: ethernet-phy@5 { + reg = <0x5>; + }; + + phy_qsgmii_2: ethernet-phy@6 { + reg = <0x6>; + }; + + phy_qsgmii_3: ethernet-phy@7 { + reg = <0x7>; + }; + + /* VSC8514 QSGMII PHY */ + phy_qsgmii_4: ethernet-phy@8 { + reg = <0x8>; + }; + + phy_qsgmii_5: ethernet-phy@9 { + reg = <0x9>; + }; + + phy_qsgmii_6: ethernet-phy@a { + reg = <0xa>; + }; + + phy_qsgmii_7: ethernet-phy@b { + reg = <0xb>; + }; }; }; }; @@ -76,3 +110,76 @@ }; #include "t1040si-post.dtsi" + +&seville_switch { + status = "okay"; +}; + +&seville_port0 { + managed = "in-band-status"; + phy-handle = <&phy_qsgmii_0>; + phy-mode = "qsgmii"; + label = "ETH5"; + status = "okay"; +}; + +&seville_port1 { + managed = "in-band-status"; + phy-handle = <&phy_qsgmii_1>; + phy-mode = "qsgmii"; + label = "ETH4"; + status = "okay"; +}; + +&seville_port2 { + managed = "in-band-status"; + phy-handle = <&phy_qsgmii_2>; + phy-mode = "qsgmii"; + label = "ETH7"; + status = "okay"; +}; + +&seville_port3 { + managed = "in-band-status"; + phy-handle = <&phy_qsgmii_3>; + phy-mode = "qsgmii"; + label = "ETH6"; + status = "okay"; +}; + +&seville_port4 { + managed = "in-band-status"; + phy-handle = <&phy_qsgmii_4>; + phy-mode = "qsgmii"; + label = "ETH9"; + status = "okay"; +}; + +&seville_port5 { + managed = "in-band-status"; + phy-handle = <&phy_qsgmii_5>; + phy-mode = "qsgmii"; + label = "ETH8"; + status = "okay"; +}; + +&seville_port6 { + managed = "in-band-status"; + phy-handle = <&phy_qsgmii_6>; + phy-mode = "qsgmii"; + label = "ETH11"; + status = "okay"; +}; + +&seville_port7 { + managed = "in-band-status"; + phy-handle = <&phy_qsgmii_7>; + phy-mode = "qsgmii"; + label = "ETH10"; + status = "okay"; +}; + +&seville_port8 { + ethernet = <&enet0>; + status = "okay"; +}; diff --git a/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi b/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi index 315d0557eefc..f58eb820eb5e 100644 --- a/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi @@ -628,6 +628,84 @@ status = "disabled"; }; }; + + seville_switch: ethernet-switch@800000 { + compatible = "mscc,vsc9953-switch"; + reg = <0x800000 0x290000>; + interrupts = <26 2 0 0>; + interrupt-names = "xtr"; + little-endian; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + seville_port0: port@0 { + reg = <0>; + status = "disabled"; + }; + + seville_port1: port@1 { + reg = <1>; + status = "disabled"; + }; + + seville_port2: port@2 { + reg = <2>; + status = "disabled"; + }; + + seville_port3: port@3 { + reg = <3>; + status = "disabled"; + }; + + seville_port4: port@4 { + reg = <4>; + status = "disabled"; + }; + + seville_port5: port@5 { + reg = <5>; + status = "disabled"; + }; + + seville_port6: port@6 { + reg = <6>; + status = "disabled"; + }; + + seville_port7: port@7 { + reg = <7>; + status = "disabled"; + }; + + seville_port8: port@8 { + reg = <8>; + phy-mode = "internal"; + status = "disabled"; + + fixed-link { + speed = <2500>; + full-duplex; + }; + }; + + seville_port9: port@9 { + reg = <9>; + phy-mode = "internal"; + status = "disabled"; + + fixed-link { + speed = <2500>; + full-duplex; + }; + }; + }; + }; }; &qe { diff --git a/arch/powerpc/boot/dts/fsl/t4240rdb.dts b/arch/powerpc/boot/dts/fsl/t4240rdb.dts index a56a705d41f7..145896f2eef6 100644 --- a/arch/powerpc/boot/dts/fsl/t4240rdb.dts +++ b/arch/powerpc/boot/dts/fsl/t4240rdb.dts @@ -144,7 +144,6 @@ rtc@68 { compatible = "dallas,ds1374"; reg = <0x68>; - interrupts = <0x1 0x1 0 0>; }; }; diff --git a/arch/powerpc/boot/util.S b/arch/powerpc/boot/util.S index f11f0589a669..d03cdb7606dc 100644 --- a/arch/powerpc/boot/util.S +++ b/arch/powerpc/boot/util.S @@ -18,7 +18,7 @@ .text -/* udelay (on non-601 processors) needs to know the period of the +/* udelay needs to know the period of the * timebase in nanoseconds. This used to be hardcoded to be 60ns * (period of 66MHz/4). Now a variable is used that is initialized to * 60 for backward compatibility, but it can be overridden as necessary @@ -37,19 +37,6 @@ timebase_period_ns: */ .globl udelay udelay: - mfspr r4,SPRN_PVR - srwi r4,r4,16 - cmpwi 0,r4,1 /* 601 ? */ - bne .Ludelay_not_601 -00: li r0,86 /* Instructions / microsecond? */ - mtctr r0 -10: addi r0,r0,0 /* NOP */ - bdnz 10b - subic. r3,r3,1 - bne 00b - blr - -.Ludelay_not_601: mulli r4,r3,1000 /* nanoseconds */ /* Change r4 to be the number of ticks using: * (nanoseconds + (timebase_period_ns - 1 )) / timebase_period_ns diff --git a/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig b/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig index 0683d8c292a8..cea72e85ed26 100644 --- a/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig +++ b/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig @@ -29,9 +29,9 @@ CONFIG_SYN_COOKIES=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=32768 -CONFIG_IDE=y -CONFIG_BLK_DEV_GENERIC=y -CONFIG_BLK_DEV_VIA82CXXX=y +CONFIG_ATA=y +CONFIG_ATA_GENERIC=y +CONFIG_PATA_VIA=y CONFIG_NETDEVICES=y CONFIG_GIANFAR=y CONFIG_E1000=y diff --git a/arch/powerpc/configs/85xx/tqm8540_defconfig b/arch/powerpc/configs/85xx/tqm8540_defconfig index 98982a0e82d8..bbf040aa1f9a 100644 --- a/arch/powerpc/configs/85xx/tqm8540_defconfig +++ b/arch/powerpc/configs/85xx/tqm8540_defconfig @@ -30,9 +30,9 @@ CONFIG_MTD_CFI_AMDSTD=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=32768 -CONFIG_IDE=y -CONFIG_BLK_DEV_GENERIC=y -CONFIG_BLK_DEV_VIA82CXXX=y +CONFIG_ATA=y +CONFIG_ATA_GENERIC=y +CONFIG_PATA_VIA=y CONFIG_NETDEVICES=y CONFIG_GIANFAR=y CONFIG_E100=y diff --git a/arch/powerpc/configs/85xx/tqm8541_defconfig b/arch/powerpc/configs/85xx/tqm8541_defconfig index a6e21db1dafe..523ad8dcfd9d 100644 --- a/arch/powerpc/configs/85xx/tqm8541_defconfig +++ b/arch/powerpc/configs/85xx/tqm8541_defconfig @@ -30,9 +30,9 @@ CONFIG_MTD_CFI_AMDSTD=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=32768 -CONFIG_IDE=y -CONFIG_BLK_DEV_GENERIC=y -CONFIG_BLK_DEV_VIA82CXXX=y +CONFIG_ATA=y +CONFIG_ATA_GENERIC=y +CONFIG_PATA_VIA=y CONFIG_NETDEVICES=y CONFIG_GIANFAR=y CONFIG_E100=y diff --git a/arch/powerpc/configs/85xx/tqm8555_defconfig b/arch/powerpc/configs/85xx/tqm8555_defconfig index ca1de3979474..0032ce1e8c9c 100644 --- a/arch/powerpc/configs/85xx/tqm8555_defconfig +++ b/arch/powerpc/configs/85xx/tqm8555_defconfig @@ -30,9 +30,9 @@ CONFIG_MTD_CFI_AMDSTD=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=32768 -CONFIG_IDE=y -CONFIG_BLK_DEV_GENERIC=y -CONFIG_BLK_DEV_VIA82CXXX=y +CONFIG_ATA=y +CONFIG_ATA_GENERIC=y +CONFIG_PATA_VIA=y CONFIG_NETDEVICES=y CONFIG_GIANFAR=y CONFIG_E100=y diff --git a/arch/powerpc/configs/85xx/tqm8560_defconfig b/arch/powerpc/configs/85xx/tqm8560_defconfig index ca3b8c8ef30f..a80b971f7d6e 100644 --- a/arch/powerpc/configs/85xx/tqm8560_defconfig +++ b/arch/powerpc/configs/85xx/tqm8560_defconfig @@ -30,9 +30,9 @@ CONFIG_MTD_CFI_AMDSTD=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=32768 -CONFIG_IDE=y -CONFIG_BLK_DEV_GENERIC=y -CONFIG_BLK_DEV_VIA82CXXX=y +CONFIG_ATA=y +CONFIG_ATA_GENERIC=y +CONFIG_PATA_VIA=y CONFIG_NETDEVICES=y CONFIG_GIANFAR=y CONFIG_E100=y diff --git a/arch/powerpc/crypto/crc-vpmsum_test.c b/arch/powerpc/crypto/crc-vpmsum_test.c index dce86e75f1a8..c1c1ef9457fb 100644 --- a/arch/powerpc/crypto/crc-vpmsum_test.c +++ b/arch/powerpc/crypto/crc-vpmsum_test.c @@ -9,6 +9,7 @@ #include <crypto/internal/hash.h> #include <linux/init.h> #include <linux/module.h> +#include <linux/random.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/cpufeature.h> @@ -22,10 +23,11 @@ static unsigned long iterations = 10000; static int __init crc_test_init(void) { u16 crc16 = 0, verify16 = 0; - u32 crc32 = 0, verify32 = 0; __le32 verify32le = 0; unsigned char *data; + u32 verify32 = 0; unsigned long i; + __le32 crc32; int ret; struct crypto_shash *crct10dif_tfm; @@ -98,7 +100,7 @@ static int __init crc_test_init(void) crypto_shash_final(crc32c_shash, (u8 *)(&crc32)); verify32 = le32_to_cpu(verify32le); verify32le = ~cpu_to_le32(__crc32c_le(~verify32, data+offset, len)); - if (crc32 != (u32)verify32le) { + if (crc32 != verify32le) { pr_err("FAILURE in CRC32: got 0x%08x expected 0x%08x (len %lu)\n", crc32, verify32, len); break; diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h index de14b1a34d56..d0b832cbbec8 100644 --- a/arch/powerpc/include/asm/asm-prototypes.h +++ b/arch/powerpc/include/asm/asm-prototypes.h @@ -67,6 +67,7 @@ void single_step_exception(struct pt_regs *regs); void program_check_exception(struct pt_regs *regs); void alignment_exception(struct pt_regs *regs); void StackOverflow(struct pt_regs *regs); +void stack_overflow_exception(struct pt_regs *regs); void kernel_fp_unavailable_exception(struct pt_regs *regs); void altivec_unavailable_exception(struct pt_regs *regs); void vsx_unavailable_exception(struct pt_regs *regs); @@ -144,7 +145,9 @@ void _kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr); void _kvmppc_save_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr); /* Patch sites */ -extern s32 patch__call_flush_branch_caches; +extern s32 patch__call_flush_branch_caches1; +extern s32 patch__call_flush_branch_caches2; +extern s32 patch__call_flush_branch_caches3; extern s32 patch__flush_count_cache_return; extern s32 patch__flush_link_stack_return; extern s32 patch__call_kvm_flush_link_stack; diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h index 082b98808701..b6ac4f86c87b 100644 --- a/arch/powerpc/include/asm/book3s/64/hash-4k.h +++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h @@ -13,20 +13,24 @@ */ #define MAX_EA_BITS_PER_CONTEXT 46 -#define REGION_SHIFT (MAX_EA_BITS_PER_CONTEXT - 2) /* - * Our page table limit us to 64TB. Hence for the kernel mapping, - * each MAP area is limited to 16 TB. - * The four map areas are: linear mapping, vmap, IO and vmemmap + * Our page table limit us to 64TB. For 64TB physical memory, we only need 64GB + * of vmemmap space. To better support sparse memory layout, we use 61TB + * linear map range, 1TB of vmalloc, 1TB of I/O and 1TB of vmememmap. */ +#define REGION_SHIFT (40) #define H_KERN_MAP_SIZE (ASM_CONST(1) << REGION_SHIFT) /* - * Define the address range of the kernel non-linear virtual area - * 16TB + * Limits the linear mapping range */ -#define H_KERN_VIRT_START ASM_CONST(0xc000100000000000) +#define H_MAX_PHYSMEM_BITS 46 + +/* + * Define the address range of the kernel non-linear virtual area (61TB) + */ +#define H_KERN_VIRT_START ASM_CONST(0xc0003d0000000000) #ifndef __ASSEMBLY__ #define H_PTE_TABLE_SIZE (sizeof(pte_t) << H_PTE_INDEX_SIZE) diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h index f20de1149ebe..338e62fbea0b 100644 --- a/arch/powerpc/include/asm/book3s/64/hash-64k.h +++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h @@ -7,6 +7,19 @@ #define H_PUD_INDEX_SIZE 10 // size: 8B << 10 = 8KB, maps 2^10 x 16GB = 16TB #define H_PGD_INDEX_SIZE 8 // size: 8B << 8 = 2KB, maps 2^8 x 16TB = 4PB +/* + * If we store section details in page->flags we can't increase the MAX_PHYSMEM_BITS + * if we increase SECTIONS_WIDTH we will not store node details in page->flags and + * page_to_nid does a page->section->node lookup + * Hence only increase for VMEMMAP. Further depending on SPARSEMEM_EXTREME reduce + * memory requirements with large number of sections. + * 51 bits is the max physical real address on POWER9 + */ +#if defined(CONFIG_SPARSEMEM_VMEMMAP) && defined(CONFIG_SPARSEMEM_EXTREME) +#define H_MAX_PHYSMEM_BITS 51 +#else +#define H_MAX_PHYSMEM_BITS 46 +#endif /* * Each context is 512TB size. SLB miss for first context/default context diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h index 93d18da5e7ec..683a9c7d1b03 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h +++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h @@ -577,8 +577,8 @@ extern void slb_set_size(u16 size); * For vmalloc and memmap, we use just one context with 512TB. With 64 byte * struct page size, we need ony 32 TB in memmap for 2PB (51 bits (MAX_PHYSMEM_BITS)). */ -#if (MAX_PHYSMEM_BITS > MAX_EA_BITS_PER_CONTEXT) -#define MAX_KERNEL_CTX_CNT (1UL << (MAX_PHYSMEM_BITS - MAX_EA_BITS_PER_CONTEXT)) +#if (H_MAX_PHYSMEM_BITS > MAX_EA_BITS_PER_CONTEXT) +#define MAX_KERNEL_CTX_CNT (1UL << (H_MAX_PHYSMEM_BITS - MAX_EA_BITS_PER_CONTEXT)) #else #define MAX_KERNEL_CTX_CNT 1 #endif diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h index b392384a3b15..e0b52940e43c 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu.h +++ b/arch/powerpc/include/asm/book3s/64/mmu.h @@ -27,21 +27,6 @@ struct mmu_psize_def { extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; #endif /* __ASSEMBLY__ */ -/* - * If we store section details in page->flags we can't increase the MAX_PHYSMEM_BITS - * if we increase SECTIONS_WIDTH we will not store node details in page->flags and - * page_to_nid does a page->section->node lookup - * Hence only increase for VMEMMAP. Further depending on SPARSEMEM_EXTREME reduce - * memory requirements with large number of sections. - * 51 bits is the max physical real address on POWER9 - */ -#if defined(CONFIG_SPARSEMEM_VMEMMAP) && defined(CONFIG_SPARSEMEM_EXTREME) && \ - defined(CONFIG_PPC_64K_PAGES) -#define MAX_PHYSMEM_BITS 51 -#else -#define MAX_PHYSMEM_BITS 46 -#endif - /* 64-bit classic hash table MMU */ #include <asm/book3s/64/mmu-hash.h> @@ -85,7 +70,7 @@ extern unsigned int mmu_base_pid; /* * memory block size used with radix translation. */ -extern unsigned int __ro_after_init radix_mem_block_size; +extern unsigned long __ro_after_init radix_mem_block_size; #define PRTB_SIZE_SHIFT (mmu_pid_bits + 4) #define PRTB_ENTRIES (1ul << mmu_pid_bits) diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 495fc0ccb453..cd3feeac6e87 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -294,6 +294,13 @@ extern unsigned long pci_io_base; #include <asm/book3s/64/hash.h> #include <asm/book3s/64/radix.h> +#if H_MAX_PHYSMEM_BITS > R_MAX_PHYSMEM_BITS +#define MAX_PHYSMEM_BITS H_MAX_PHYSMEM_BITS +#else +#define MAX_PHYSMEM_BITS R_MAX_PHYSMEM_BITS +#endif + + #ifdef CONFIG_PPC_64K_PAGES #include <asm/book3s/64/pgtable-64k.h> #else @@ -615,7 +622,7 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) VM_BUG_ON(pfn >> (64 - PAGE_SHIFT)); VM_BUG_ON((pfn << PAGE_SHIFT) & ~PTE_RPN_MASK); - return __pte(((pte_basic_t)pfn << PAGE_SHIFT) | pgprot_val(pgprot)); + return __pte(((pte_basic_t)pfn << PAGE_SHIFT) | pgprot_val(pgprot) | _PAGE_PTE); } static inline unsigned long pte_pfn(pte_t pte) @@ -651,11 +658,6 @@ static inline pte_t pte_mkexec(pte_t pte) return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_EXEC)); } -static inline pte_t pte_mkpte(pte_t pte) -{ - return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_PTE)); -} - static inline pte_t pte_mkwrite(pte_t pte) { /* @@ -819,6 +821,14 @@ static inline int pte_none(pte_t pte) static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte, int percpu) { + + VM_WARN_ON(!(pte_raw(pte) & cpu_to_be64(_PAGE_PTE))); + /* + * Keep the _PAGE_PTE added till we are sure we handle _PAGE_PTE + * in all the callers. + */ + pte = __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_PTE)); + if (radix_enabled()) return radix__set_pte_at(mm, addr, ptep, pte, percpu); return hash__set_pte_at(mm, addr, ptep, pte, percpu); @@ -866,6 +876,13 @@ static inline bool pte_ci(pte_t pte) static inline void pmd_clear(pmd_t *pmdp) { + if (IS_ENABLED(CONFIG_DEBUG_VM) && !radix_enabled()) { + /* + * Don't use this if we can possibly have a hash page table + * entry mapping this. + */ + WARN_ON((pmd_val(*pmdp) & (H_PAGE_HASHPTE | _PAGE_PTE)) == (H_PAGE_HASHPTE | _PAGE_PTE)); + } *pmdp = __pmd(0); } @@ -914,6 +931,13 @@ static inline int pmd_bad(pmd_t pmd) static inline void pud_clear(pud_t *pudp) { + if (IS_ENABLED(CONFIG_DEBUG_VM) && !radix_enabled()) { + /* + * Don't use this if we can possibly have a hash page table + * entry mapping this. + */ + WARN_ON((pud_val(*pudp) & (H_PAGE_HASHPTE | _PAGE_PTE)) == (H_PAGE_HASHPTE | _PAGE_PTE)); + } *pudp = __pud(0); } diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h index 0cba794c4fb8..c7813dc628fc 100644 --- a/arch/powerpc/include/asm/book3s/64/radix.h +++ b/arch/powerpc/include/asm/book3s/64/radix.h @@ -91,6 +91,22 @@ * +------------------------------+ Kernel linear (0xc.....) */ + +/* + * If we store section details in page->flags we can't increase the MAX_PHYSMEM_BITS + * if we increase SECTIONS_WIDTH we will not store node details in page->flags and + * page_to_nid does a page->section->node lookup + * Hence only increase for VMEMMAP. Further depending on SPARSEMEM_EXTREME reduce + * memory requirements with large number of sections. + * 51 bits is the max physical real address on POWER9 + */ + +#if defined(CONFIG_SPARSEMEM_VMEMMAP) && defined(CONFIG_SPARSEMEM_EXTREME) +#define R_MAX_PHYSMEM_BITS 51 +#else +#define R_MAX_PHYSMEM_BITS 46 +#endif + #define RADIX_KERN_VIRT_START ASM_CONST(0xc008000000000000) /* * 49 = MAX_EA_BITS_PER_CONTEXT (hash specific). To make sure we pick diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h index 54764c6e922d..138e46d8c04e 100644 --- a/arch/powerpc/include/asm/cacheflush.h +++ b/arch/powerpc/include/asm/cacheflush.h @@ -98,6 +98,16 @@ static inline void invalidate_dcache_range(unsigned long start, mb(); /* sync */ } +#ifdef CONFIG_4xx +static inline void flush_instruction_cache(void) +{ + iccci((void *)KERNELBASE); + isync(); +} +#else +void flush_instruction_cache(void); +#endif + #include <asm-generic/cacheflush.h> #endif /* _ASM_POWERPC_CACHEFLUSH_H */ diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h index 9cce06194dcc..82f099ba2411 100644 --- a/arch/powerpc/include/asm/checksum.h +++ b/arch/powerpc/include/asm/checksum.h @@ -18,19 +18,18 @@ * Like csum_partial, this must be called with even lengths, * except for the last fragment. */ -extern __wsum csum_partial_copy_generic(const void *src, void *dst, - int len, __wsum sum, - int *src_err, int *dst_err); +extern __wsum csum_partial_copy_generic(const void *src, void *dst, int len); #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER extern __wsum csum_and_copy_from_user(const void __user *src, void *dst, - int len, __wsum sum, int *err_ptr); + int len); #define HAVE_CSUM_COPY_USER extern __wsum csum_and_copy_to_user(const void *src, void __user *dst, - int len, __wsum sum, int *err_ptr); + int len); -#define csum_partial_copy_nocheck(src, dst, len, sum) \ - csum_partial_copy_generic((src), (dst), (len), (sum), NULL, NULL) +#define _HAVE_ARCH_CSUM_AND_COPY +#define csum_partial_copy_nocheck(src, dst, len) \ + csum_partial_copy_generic((src), (dst), (len)) /* diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h index 3e3cdfaa76c6..9191fc29e6ed 100644 --- a/arch/powerpc/include/asm/compat.h +++ b/arch/powerpc/include/asm/compat.h @@ -27,8 +27,6 @@ typedef s16 compat_nlink_t; typedef u16 compat_ipc_pid_t; typedef u32 compat_caddr_t; typedef __kernel_fsid_t compat_fsid_t; -typedef s64 compat_s64; -typedef u64 compat_u64; struct compat_stat { compat_dev_t st_dev; diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index 32a15dc49e8c..93bc70d4c9a1 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -9,11 +9,6 @@ #ifndef __ASSEMBLY__ -/* - * Added to include __machine_check_early_realmode_* functions - */ -#include <asm/mce.h> - /* This structure can grow, it's real size is used by head.S code * via the mkdefs mechanism. */ @@ -170,6 +165,7 @@ static inline void cpu_feature_keys_init(void) { } #else /* CONFIG_PPC32 */ /* Define these to 0 for the sake of tests in common code */ #define CPU_FTR_PPC_LE (0) +#define CPU_FTR_SPE (0) #endif /* @@ -299,8 +295,6 @@ static inline void cpu_feature_keys_init(void) { } #define CPU_FTR_MAYBE_CAN_NAP 0 #endif -#define CPU_FTRS_PPC601 (CPU_FTR_COMMON | \ - CPU_FTR_COHERENT_ICACHE) #define CPU_FTRS_603 (CPU_FTR_COMMON | CPU_FTR_MAYBE_CAN_DOZE | \ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE | CPU_FTR_NOEXECUTE) #define CPU_FTRS_604 (CPU_FTR_COMMON | CPU_FTR_PPC_LE) @@ -516,10 +510,8 @@ static inline void cpu_feature_keys_init(void) { } #else enum { CPU_FTRS_POSSIBLE = -#ifdef CONFIG_PPC_BOOK3S_601 - CPU_FTRS_PPC601 | -#elif defined(CONFIG_PPC_BOOK3S_32) - CPU_FTRS_PPC601 | CPU_FTRS_603 | CPU_FTRS_604 | CPU_FTRS_740_NOTAU | +#ifdef CONFIG_PPC_BOOK3S_32 + CPU_FTRS_603 | CPU_FTRS_604 | CPU_FTRS_740_NOTAU | CPU_FTRS_740 | CPU_FTRS_750 | CPU_FTRS_750FX1 | CPU_FTRS_750FX2 | CPU_FTRS_750FX | CPU_FTRS_750GX | CPU_FTRS_7400_NOTAU | CPU_FTRS_7400 | CPU_FTRS_7450_20 | @@ -594,9 +586,7 @@ enum { #else enum { CPU_FTRS_ALWAYS = -#ifdef CONFIG_PPC_BOOK3S_601 - CPU_FTRS_PPC601 & -#elif defined(CONFIG_PPC_BOOK3S_32) +#ifdef CONFIG_PPC_BOOK3S_32 CPU_FTRS_603 & CPU_FTRS_604 & CPU_FTRS_740_NOTAU & CPU_FTRS_740 & CPU_FTRS_750 & CPU_FTRS_750FX1 & CPU_FTRS_750FX2 & CPU_FTRS_750FX & CPU_FTRS_750GX & diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h index deb99fd6e060..98c8bd155bf9 100644 --- a/arch/powerpc/include/asm/cputhreads.h +++ b/arch/powerpc/include/asm/cputhreads.h @@ -23,7 +23,6 @@ extern int threads_per_core; extern int threads_per_subcore; extern int threads_shift; -extern bool has_big_cores; extern cpumask_t threads_core_mask; #else #define threads_per_core 1 diff --git a/arch/powerpc/include/asm/delay.h b/arch/powerpc/include/asm/delay.h index 66963f7d3e64..51bb8c1476c7 100644 --- a/arch/powerpc/include/asm/delay.h +++ b/arch/powerpc/include/asm/delay.h @@ -54,7 +54,7 @@ extern void udelay(unsigned long usecs); ({ \ typeof(condition) __ret; \ unsigned long __loops = tb_ticks_per_usec * timeout; \ - unsigned long __start = get_tbl(); \ + unsigned long __start = mftb(); \ \ if (delay) { \ while (!(__ret = (condition)) && \ diff --git a/arch/powerpc/include/asm/dma-direct.h b/arch/powerpc/include/asm/dma-direct.h index abc154d784b0..128304cbee1d 100644 --- a/arch/powerpc/include/asm/dma-direct.h +++ b/arch/powerpc/include/asm/dma-direct.h @@ -2,12 +2,12 @@ #ifndef ASM_POWERPC_DMA_DIRECT_H #define ASM_POWERPC_DMA_DIRECT_H 1 -static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr) +static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) { return paddr + dev->archdata.dma_offset; } -static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr) +static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) { return daddr - dev->archdata.dma_offset; } diff --git a/arch/powerpc/include/asm/drmem.h b/arch/powerpc/include/asm/drmem.h index 17ccc6474ab6..bf2402fed3e0 100644 --- a/arch/powerpc/include/asm/drmem.h +++ b/arch/powerpc/include/asm/drmem.h @@ -8,26 +8,39 @@ #ifndef _ASM_POWERPC_LMB_H #define _ASM_POWERPC_LMB_H +#include <linux/sched.h> + struct drmem_lmb { u64 base_addr; u32 drc_index; u32 aa_index; u32 flags; -#ifdef CONFIG_MEMORY_HOTPLUG - int nid; -#endif }; struct drmem_lmb_info { struct drmem_lmb *lmbs; int n_lmbs; - u32 lmb_size; + u64 lmb_size; }; extern struct drmem_lmb_info *drmem_info; +static inline struct drmem_lmb *drmem_lmb_next(struct drmem_lmb *lmb, + const struct drmem_lmb *start) +{ + /* + * DLPAR code paths can take several milliseconds per element + * when interacting with firmware. Ensure that we don't + * unfairly monopolize the CPU. + */ + if (((++lmb - start) % 16) == 0) + cond_resched(); + + return lmb; +} + #define for_each_drmem_lmb_in_range(lmb, start, end) \ - for ((lmb) = (start); (lmb) < (end); (lmb)++) + for ((lmb) = (start); (lmb) < (end); lmb = drmem_lmb_next(lmb, start)) #define for_each_drmem_lmb(lmb) \ for_each_drmem_lmb_in_range((lmb), \ @@ -67,7 +80,7 @@ struct of_drconf_cell_v2 { #define DRCONF_MEM_RESERVED 0x00000080 #define DRCONF_MEM_HOTREMOVABLE 0x00000100 -static inline u32 drmem_lmb_size(void) +static inline u64 drmem_lmb_size(void) { return drmem_info->lmb_size; } @@ -105,22 +118,4 @@ static inline void invalidate_lmb_associativity_index(struct drmem_lmb *lmb) lmb->aa_index = 0xffffffff; } -#ifdef CONFIG_MEMORY_HOTPLUG -static inline void lmb_set_nid(struct drmem_lmb *lmb) -{ - lmb->nid = memory_add_physaddr_to_nid(lmb->base_addr); -} -static inline void lmb_clear_nid(struct drmem_lmb *lmb) -{ - lmb->nid = -1; -} -#else -static inline void lmb_set_nid(struct drmem_lmb *lmb) -{ -} -static inline void lmb_clear_nid(struct drmem_lmb *lmb) -{ -} -#endif - #endif /* _ASM_POWERPC_LMB_H */ diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h index d5f369bcd130..b1a5bba2e0b9 100644 --- a/arch/powerpc/include/asm/eeh.h +++ b/arch/powerpc/include/asm/eeh.h @@ -27,7 +27,6 @@ struct pci_dn; #define EEH_FORCE_DISABLED 0x02 /* EEH disabled */ #define EEH_PROBE_MODE_DEV 0x04 /* From PCI device */ #define EEH_PROBE_MODE_DEVTREE 0x08 /* From device tree */ -#define EEH_VALID_PE_ZERO 0x10 /* PE#0 is valid */ #define EEH_ENABLE_IO_FOR_LOG 0x20 /* Enable IO for log */ #define EEH_EARLY_DUMP_LOG 0x40 /* Dump log immediately */ @@ -74,7 +73,6 @@ struct pci_dn; struct eeh_pe { int type; /* PE type: PHB/Bus/Device */ int state; /* PE EEH dependent mode */ - int config_addr; /* Traditional PCI address */ int addr; /* PE configuration address */ struct pci_controller *phb; /* Associated PHB */ struct pci_bus *bus; /* Top PCI bus for bus PE */ @@ -216,7 +214,6 @@ enum { struct eeh_ops { char *name; - int (*init)(void); struct eeh_dev *(*probe)(struct pci_dev *pdev); int (*set_option)(struct eeh_pe *pe, int option); int (*get_state)(struct eeh_pe *pe, int *delay); @@ -281,8 +278,7 @@ int eeh_phb_pe_create(struct pci_controller *phb); int eeh_wait_state(struct eeh_pe *pe, int max_wait); struct eeh_pe *eeh_phb_pe_get(struct pci_controller *phb); struct eeh_pe *eeh_pe_next(struct eeh_pe *pe, struct eeh_pe *root); -struct eeh_pe *eeh_pe_get(struct pci_controller *phb, - int pe_no, int config_addr); +struct eeh_pe *eeh_pe_get(struct pci_controller *phb, int pe_no); int eeh_pe_tree_insert(struct eeh_dev *edev, struct eeh_pe *new_pe_parent); int eeh_pe_tree_remove(struct eeh_dev *edev); void eeh_pe_update_time_stamp(struct eeh_pe *pe); @@ -295,8 +291,7 @@ const char *eeh_pe_loc_get(struct eeh_pe *pe); struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe); void eeh_show_enabled(void); -int __init eeh_ops_register(struct eeh_ops *ops); -int __exit eeh_ops_unregister(const char *name); +int __init eeh_init(struct eeh_ops *ops); int eeh_check_failure(const volatile void __iomem *token); int eeh_dev_check_failure(struct eeh_dev *edev); void eeh_addr_cache_init(void); diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index fbb377055471..c1fbccb04390 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h @@ -375,11 +375,13 @@ #define H_CPU_CHAR_THREAD_RECONFIG_CTRL (1ull << 57) // IBM bit 6 #define H_CPU_CHAR_COUNT_CACHE_DISABLED (1ull << 56) // IBM bit 7 #define H_CPU_CHAR_BCCTR_FLUSH_ASSIST (1ull << 54) // IBM bit 9 +#define H_CPU_CHAR_BCCTR_LINK_FLUSH_ASSIST (1ull << 52) // IBM bit 11 #define H_CPU_BEHAV_FAVOUR_SECURITY (1ull << 63) // IBM bit 0 #define H_CPU_BEHAV_L1D_FLUSH_PR (1ull << 62) // IBM bit 1 #define H_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ull << 61) // IBM bit 2 #define H_CPU_BEHAV_FLUSH_COUNT_CACHE (1ull << 58) // IBM bit 5 +#define H_CPU_BEHAV_FLUSH_LINK_STACK (1ull << 57) // IBM bit 6 /* Flag values used in H_REGISTER_PROC_TBL hcall */ #define PROC_TABLE_OP_MASK 0x18 @@ -560,6 +562,42 @@ struct hv_guest_state { /* Latest version of hv_guest_state structure */ #define HV_GUEST_STATE_VERSION 1 +/* + * From the document "H_GetPerformanceCounterInfo Interface" v1.07 + * + * H_GET_PERF_COUNTER_INFO argument + */ +struct hv_get_perf_counter_info_params { + __be32 counter_request; /* I */ + __be32 starting_index; /* IO */ + __be16 secondary_index; /* IO */ + __be16 returned_values; /* O */ + __be32 detail_rc; /* O, only needed when called via *_norets() */ + + /* + * O, size each of counter_value element in bytes, only set for version + * >= 0x3 + */ + __be16 cv_element_size; + + /* I, 0 (zero) for versions < 0x3 */ + __u8 counter_info_version_in; + + /* O, 0 (zero) if version < 0x3. Must be set to 0 when making hcall */ + __u8 counter_info_version_out; + __u8 reserved[0xC]; + __u8 counter_value[]; +} __packed; + +#define HGPCI_REQ_BUFFER_SIZE 4096 +#define HGPCI_MAX_DATA_BYTES \ + (HGPCI_REQ_BUFFER_SIZE - sizeof(struct hv_get_perf_counter_info_params)) + +struct hv_gpci_request_buffer { + struct hv_get_perf_counter_info_params params; + uint8_t bytes[HGPCI_MAX_DATA_BYTES]; +} __packed; + #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_HVCALL_H */ diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h index db206a7f38e2..abebfbee5b1c 100644 --- a/arch/powerpc/include/asm/hw_breakpoint.h +++ b/arch/powerpc/include/asm/hw_breakpoint.h @@ -10,6 +10,7 @@ #define _PPC_BOOK3S_64_HW_BREAKPOINT_H #include <asm/cpu_has_feature.h> +#include <asm/inst.h> #ifdef __KERNEL__ struct arch_hw_breakpoint { @@ -17,6 +18,7 @@ struct arch_hw_breakpoint { u16 type; u16 len; /* length of the target data symbol */ u16 hw_len; /* length programmed in hw */ + u8 flags; }; /* Note: Don't change the first 6 bits below as they are in the same order @@ -36,12 +38,15 @@ struct arch_hw_breakpoint { #define HW_BRK_TYPE_PRIV_ALL (HW_BRK_TYPE_USER | HW_BRK_TYPE_KERNEL | \ HW_BRK_TYPE_HYP) +#define HW_BRK_FLAG_DISABLED 0x1 + /* Minimum granularity */ #ifdef CONFIG_PPC_8xx #define HW_BREAKPOINT_SIZE 0x4 #else #define HW_BREAKPOINT_SIZE 0x8 #endif +#define HW_BREAKPOINT_SIZE_QUADWORD 0x10 #define DABR_MAX_LEN 8 #define DAWR_MAX_LEN 512 @@ -51,6 +56,13 @@ static inline int nr_wp_slots(void) return cpu_has_feature(CPU_FTR_DAWR1) ? 2 : 1; } +bool wp_check_constraints(struct pt_regs *regs, struct ppc_inst instr, + unsigned long ea, int type, int size, + struct arch_hw_breakpoint *info); + +void wp_get_instr_detail(struct pt_regs *regs, struct ppc_inst *instr, + int *type, int *size, unsigned long *ea); + #ifdef CONFIG_HAVE_HW_BREAKPOINT #include <linux/kdebug.h> #include <asm/reg.h> diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index 35060be09073..0363734ff56e 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -25,9 +25,8 @@ #define PACA_IRQ_DBELL 0x02 #define PACA_IRQ_EE 0x04 #define PACA_IRQ_DEC 0x08 /* Or FIT */ -#define PACA_IRQ_EE_EDGE 0x10 /* BookE only */ -#define PACA_IRQ_HMI 0x20 -#define PACA_IRQ_PMI 0x40 +#define PACA_IRQ_HMI 0x10 +#define PACA_IRQ_PMI 0x20 /* * Some soft-masked interrupts must be hard masked until they are replayed @@ -369,12 +368,6 @@ static inline void may_hard_irq_enable(void) { } #define ARCH_IRQ_INIT_FLAGS IRQ_NOREQUEST -/* - * interrupt-retrigger: should we handle this via lost interrupts and IPIs - * or should we not care like we do now ? --BenH. - */ -struct irq_chip; - #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_HW_IRQ_H */ diff --git a/arch/powerpc/include/asm/icswx.h b/arch/powerpc/include/asm/icswx.h index b0c70a35fd0e..f6599ccb3012 100644 --- a/arch/powerpc/include/asm/icswx.h +++ b/arch/powerpc/include/asm/icswx.h @@ -156,8 +156,7 @@ struct coprocessor_request_block { u8 reserved[32]; struct coprocessor_status_block csb; -} __packed; - +} __aligned(128); /* RFC02167 Initiate Coprocessor Instructions document * Chapter 8.2.1.1.1 RS @@ -188,6 +187,9 @@ static inline int icswx(__be32 ccw, struct coprocessor_request_block *crb) __be64 ccw_reg = ccw; u32 cr; + /* NB: the same structures are used by VAS-NX */ + BUILD_BUG_ON(sizeof(*crb) != 128); + __asm__ __volatile__( PPC_ICSWX(%1,0,%2) "\n" "mfcr %0\n" diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h index 5032f1593299..deef7c94d7b6 100644 --- a/arch/powerpc/include/asm/iommu.h +++ b/arch/powerpc/include/asm/iommu.h @@ -12,7 +12,7 @@ #include <linux/compiler.h> #include <linux/spinlock.h> #include <linux/device.h> -#include <linux/dma-mapping.h> +#include <linux/dma-map-ops.h> #include <linux/bitops.h> #include <asm/machdep.h> #include <asm/types.h> diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h index 814dfab7e392..4f983ca4030a 100644 --- a/arch/powerpc/include/asm/irq.h +++ b/arch/powerpc/include/asm/irq.h @@ -35,7 +35,6 @@ static __inline__ int irq_canonicalize(int irq) extern int distribute_irqs; -struct irqaction; struct pt_regs; #define __ARCH_HAS_DO_SOFTIRQ diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index a90b892f0bfe..95081078aa8a 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h @@ -65,7 +65,6 @@ struct machdep_calls { void __noreturn (*restart)(char *cmd); void __noreturn (*halt)(void); void (*panic)(char *str); - void (*cpu_die)(void); long (*time_init)(void); /* Optional, may be NULL */ @@ -222,8 +221,6 @@ struct machdep_calls { extern void e500_idle(void); extern void power4_idle(void); -extern void power7_idle(void); -extern void power9_idle(void); extern void ppc6xx_idle(void); extern void book3e_idle(void); diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index 7f3658a97384..e02aa793420b 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h @@ -244,7 +244,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, */ static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) { - switch_mm(prev, next, current); + switch_mm_irqs_off(prev, next, current); } /* We don't currently use enter_lazy_tlb() for anything */ diff --git a/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h b/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h index e752a5807a59..39be9aea86db 100644 --- a/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h +++ b/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h @@ -65,4 +65,18 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, pte_update(mm, addr, ptep, clr, set, 1); } +#ifdef CONFIG_PPC_4K_PAGES +static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma, + struct page *page, int writable) +{ + size_t size = huge_page_size(hstate_vma(vma)); + + if (size == SZ_16K) + return __pte(pte_val(entry) & ~_PAGE_HUGE); + else + return entry; +} +#define arch_make_huge_pte arch_make_huge_pte +#endif + #endif /* _ASM_POWERPC_NOHASH_32_HUGETLB_8XX_H */ diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index b9e134d0f03a..ee2243ba96cf 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -227,6 +227,19 @@ static inline void pmd_clear(pmd_t *pmdp) */ #ifdef CONFIG_PPC_8xx static pmd_t *pmd_off(struct mm_struct *mm, unsigned long addr); +static int hugepd_ok(hugepd_t hpd); + +static int number_of_cells_per_pte(pmd_t *pmd, pte_basic_t val, int huge) +{ + if (!huge) + return PAGE_SIZE / SZ_4K; + else if (hugepd_ok(*((hugepd_t *)pmd))) + return 1; + else if (IS_ENABLED(CONFIG_PPC_4K_PAGES) && !(val & _PAGE_HUGE)) + return SZ_16K / SZ_4K; + else + return SZ_512K / SZ_4K; +} static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p, unsigned long clr, unsigned long set, int huge) @@ -237,12 +250,7 @@ static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, p int num, i; pmd_t *pmd = pmd_off(mm, addr); - if (!huge) - num = PAGE_SIZE / SZ_4K; - else if ((pmd_val(*pmd) & _PMD_PAGE_MASK) != _PMD_PAGE_8M) - num = SZ_512K / SZ_4K; - else - num = 1; + num = number_of_cells_per_pte(pmd, new, huge); for (i = 0; i < num; i++, entry++, new += SZ_4K) *entry = new; diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h index 4b7c3472eab1..6277e7596ae5 100644 --- a/arch/powerpc/include/asm/nohash/pgtable.h +++ b/arch/powerpc/include/asm/nohash/pgtable.h @@ -140,11 +140,6 @@ static inline pte_t pte_mkold(pte_t pte) return __pte(pte_val(pte) & ~_PAGE_ACCESSED); } -static inline pte_t pte_mkpte(pte_t pte) -{ - return pte; -} - static inline pte_t pte_mkspecial(pte_t pte) { return __pte(pte_val(pte) | _PAGE_SPECIAL); diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h index 63ed7e3b0ba3..6436f0b41539 100644 --- a/arch/powerpc/include/asm/pci.h +++ b/arch/powerpc/include/asm/pci.h @@ -9,7 +9,7 @@ #include <linux/types.h> #include <linux/slab.h> #include <linux/string.h> -#include <linux/dma-mapping.h> +#include <linux/dma-map-ops.h> #include <linux/scatterlist.h> #include <asm/machdep.h> diff --git a/arch/powerpc/include/asm/pnv-ocxl.h b/arch/powerpc/include/asm/pnv-ocxl.h index ee79d2cd9fb6..d37ededca3ee 100644 --- a/arch/powerpc/include/asm/pnv-ocxl.h +++ b/arch/powerpc/include/asm/pnv-ocxl.h @@ -28,7 +28,4 @@ int pnv_ocxl_spa_setup(struct pci_dev *dev, void *spa_mem, int PE_mask, void **p void pnv_ocxl_spa_release(void *platform_data); int pnv_ocxl_spa_remove_pe_from_cache(void *platform_data, int pe_handle); -int pnv_ocxl_alloc_xive_irq(u32 *irq, u64 *trigger_addr); -void pnv_ocxl_free_xive_irq(u32 irq); - #endif /* _ASM_PNV_OCXL_H */ diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index b4cc6608131c..511786f0e40d 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -382,16 +382,6 @@ n: #endif /* various errata or part fixups */ -#ifdef CONFIG_PPC601_SYNC_FIX -#define SYNC sync; isync -#define SYNC_601 sync -#define ISYNC_601 isync -#else -#define SYNC -#define SYNC_601 -#define ISYNC_601 -#endif - #if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_FSL_BOOK3E) #define MFTB(dest) \ 90: mfspr dest, SPRN_TBRL; \ @@ -411,8 +401,7 @@ END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96) #define MFTBU(dest) mfspr dest, SPRN_TBRU #endif -/* tlbsync is not implemented on 601 */ -#if !defined(CONFIG_SMP) || defined(CONFIG_PPC_BOOK3S_601) +#ifndef CONFIG_SMP #define TLBSYNC #else #define TLBSYNC tlbsync; sync diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index ed0d633ab5aa..365290b9a24b 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -220,6 +220,7 @@ struct thread_struct { unsigned long tm_tar; unsigned long tm_ppr; unsigned long tm_dscr; + unsigned long tm_amr; /* * Checkpointed FP and VSX 0-31 register set. @@ -432,16 +433,10 @@ enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF}; extern int powersave_nap; /* set if nap mode can be used in idle loop */ extern void power7_idle_type(unsigned long type); -extern void power9_idle_type(unsigned long stop_psscr_val, +extern void arch300_idle_type(unsigned long stop_psscr_val, unsigned long stop_psscr_mask); -extern void flush_instruction_cache(void); -extern void hard_reset_now(void); -extern void poweroff_now(void); extern int fix_alignment(struct pt_regs *); -extern void cvt_fd(float *from, double *to); -extern void cvt_df(double *from, float *to); -extern void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val); #ifdef CONFIG_PPC64 /* diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h index 155a197c0aa1..e2c778c176a3 100644 --- a/arch/powerpc/include/asm/ptrace.h +++ b/arch/powerpc/include/asm/ptrace.h @@ -243,11 +243,7 @@ static inline void set_trap_norestart(struct pt_regs *regs) } #define arch_has_single_step() (1) -#ifndef CONFIG_PPC_BOOK3S_601 #define arch_has_block_step() (true) -#else -#define arch_has_block_step() (false) -#endif #define ARCH_HAS_USER_SINGLE_STEP_REPORT /* diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 88fb88491fe9..f877a576b338 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -521,6 +521,8 @@ #define SPRN_TSCR 0x399 /* Thread Switch Control Register */ #define SPRN_DEC 0x016 /* Decrement Register */ +#define SPRN_PIT 0x3DB /* Programmable Interval Timer (40x/BOOKE) */ + #define SPRN_DER 0x095 /* Debug Enable Register */ #define DER_RSTE 0x40000000 /* Reset Interrupt */ #define DER_CHSTPE 0x20000000 /* Check Stop */ @@ -817,7 +819,7 @@ #define THRM1_TIN (1 << 31) #define THRM1_TIV (1 << 30) #define THRM1_THRES(x) ((x&0x7f)<<23) -#define THRM3_SITV(x) ((x&0x3fff)<<1) +#define THRM3_SITV(x) ((x & 0x1fff) << 1) #define THRM1_TID (1<<2) #define THRM1_TIE (1<<1) #define THRM1_V (1<<0) @@ -1353,6 +1355,7 @@ #define PVR_POWER8NVL 0x004C #define PVR_POWER8 0x004D #define PVR_POWER9 0x004E +#define PVR_POWER10 0x0080 #define PVR_BE 0x0070 #define PVR_PA6T 0x0090 @@ -1416,8 +1419,7 @@ static inline void msr_check_and_clear(unsigned long bits) __msr_check_and_clear(bits); } -#ifdef __powerpc64__ -#if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_FSL_BOOK3E) +#if defined(CONFIG_PPC_CELL) || defined(CONFIG_E500) #define mftb() ({unsigned long rval; \ asm volatile( \ "90: mfspr %0, %2;\n" \ @@ -1427,29 +1429,23 @@ static inline void msr_check_and_clear(unsigned long bits) : "=r" (rval) \ : "i" (CPU_FTR_CELL_TB_BUG), "i" (SPRN_TBRL) : "cr0"); \ rval;}) +#elif defined(CONFIG_PPC_8xx) +#define mftb() ({unsigned long rval; \ + asm volatile("mftbl %0" : "=r" (rval)); rval;}) #else #define mftb() ({unsigned long rval; \ asm volatile("mfspr %0, %1" : \ "=r" (rval) : "i" (SPRN_TBRL)); rval;}) #endif /* !CONFIG_PPC_CELL */ -#else /* __powerpc64__ */ - #if defined(CONFIG_PPC_8xx) -#define mftbl() ({unsigned long rval; \ - asm volatile("mftbl %0" : "=r" (rval)); rval;}) #define mftbu() ({unsigned long rval; \ asm volatile("mftbu %0" : "=r" (rval)); rval;}) #else -#define mftbl() ({unsigned long rval; \ - asm volatile("mfspr %0, %1" : "=r" (rval) : \ - "i" (SPRN_TBRL)); rval;}) #define mftbu() ({unsigned long rval; \ asm volatile("mfspr %0, %1" : "=r" (rval) : \ "i" (SPRN_TBRU)); rval;}) #endif -#define mftb() mftbl() -#endif /* !__powerpc64__ */ #define mttbl(v) asm volatile("mttbl %0":: "r"(v)) #define mttbu(v) asm volatile("mttbu %0":: "r"(v)) diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h index ff30f1076162..29a948e0c0f2 100644 --- a/arch/powerpc/include/asm/reg_booke.h +++ b/arch/powerpc/include/asm/reg_booke.h @@ -174,7 +174,6 @@ #define SPRN_L1CSR1 0x3F3 /* L1 Cache Control and Status Register 1 */ #define SPRN_MMUCSR0 0x3F4 /* MMU Control and Status Register 0 */ #define SPRN_MMUCFG 0x3F7 /* MMU Configuration Register */ -#define SPRN_PIT 0x3DB /* Programmable Interval Timer */ #define SPRN_BUCSR 0x3F5 /* Branch Unit Control and Status */ #define SPRN_L2CSR0 0x3F9 /* L2 Data Cache Control and Status Register 0 */ #define SPRN_L2CSR1 0x3FA /* L2 Data Cache Control and Status Register 1 */ diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h index 49a25e2400f2..b2035b2f57ce 100644 --- a/arch/powerpc/include/asm/smp.h +++ b/arch/powerpc/include/asm/smp.h @@ -28,8 +28,8 @@ extern int boot_cpuid; extern int spinning_secondaries; extern u32 *cpu_to_phys_id; +extern bool coregroup_enabled; -extern void cpu_die(void); extern int cpu_to_chip_id(int cpu); #ifdef CONFIG_SMP @@ -50,6 +50,9 @@ struct smp_ops_t { int (*cpu_disable)(void); void (*cpu_die)(unsigned int nr); int (*cpu_bootable)(unsigned int nr); +#ifdef CONFIG_HOTPLUG_CPU + void (*cpu_offline_self)(void); +#endif }; extern int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us); @@ -118,11 +121,6 @@ static inline struct cpumask *cpu_sibling_mask(int cpu) return per_cpu(cpu_sibling_map, cpu); } -static inline struct cpumask *cpu_core_mask(int cpu) -{ - return per_cpu(cpu_core_map, cpu); -} - static inline struct cpumask *cpu_l2_cache_mask(int cpu) { return per_cpu(cpu_l2_cache_map, cpu); @@ -135,6 +133,19 @@ static inline struct cpumask *cpu_smallcore_mask(int cpu) extern int cpu_to_core_id(int cpu); +extern bool has_big_cores; + +#define cpu_smt_mask cpu_smt_mask +#ifdef CONFIG_SCHED_SMT +static inline const struct cpumask *cpu_smt_mask(int cpu) +{ + if (has_big_cores) + return per_cpu(cpu_smallcore_map, cpu); + + return per_cpu(cpu_sibling_map, cpu); +} +#endif /* CONFIG_SCHED_SMT */ + /* Since OpenPIC has only 4 IPIs, we use slightly different message numbers. * * Make sure this matches openpic_request_IPIs in open_pic.c, or what shows up @@ -243,7 +254,6 @@ extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); * 64-bit but defining them all here doesn't harm */ extern void generic_secondary_smp_init(void); -extern void generic_secondary_thread_init(void); extern unsigned long __secondary_hold_spinloop; extern unsigned long __secondary_hold_acknowledge; extern char __secondary_hold; diff --git a/arch/powerpc/include/asm/string.h b/arch/powerpc/include/asm/string.h index 283552cd0e58..2aa0e31e6884 100644 --- a/arch/powerpc/include/asm/string.h +++ b/arch/powerpc/include/asm/string.h @@ -53,9 +53,7 @@ void *__memmove(void *to, const void *from, __kernel_size_t n); #ifndef CONFIG_KASAN #define __HAVE_ARCH_MEMSET32 #define __HAVE_ARCH_MEMSET64 -#define __HAVE_ARCH_MEMCPY_MCSAFE -extern int memcpy_mcsafe(void *dst, const void *src, __kernel_size_t sz); extern void *__memset16(uint16_t *, uint16_t v, __kernel_size_t); extern void *__memset32(uint32_t *, uint32_t v, __kernel_size_t); extern void *__memset64(uint64_t *, uint64_t v, __kernel_size_t); diff --git a/arch/powerpc/include/asm/svm.h b/arch/powerpc/include/asm/svm.h index 85580b30aba4..7546402d796a 100644 --- a/arch/powerpc/include/asm/svm.h +++ b/arch/powerpc/include/asm/svm.h @@ -15,6 +15,8 @@ static inline bool is_secure_guest(void) return mfmsr() & MSR_S; } +void __init svm_swiotlb_init(void); + void dtl_cache_ctor(void *addr); #define get_dtl_cache_ctor() (is_secure_guest() ? dtl_cache_ctor : NULL) @@ -25,6 +27,8 @@ static inline bool is_secure_guest(void) return false; } +static inline void svm_swiotlb_init(void) {} + #define get_dtl_cache_ctor() NULL #endif /* CONFIG_PPC_SVM */ diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h index aca70fb43147..1d67bc8d7bc6 100644 --- a/arch/powerpc/include/asm/synch.h +++ b/arch/powerpc/include/asm/synch.h @@ -3,8 +3,9 @@ #define _ASM_POWERPC_SYNCH_H #ifdef __KERNEL__ +#include <asm/cputable.h> #include <asm/feature-fixups.h> -#include <asm/asm-const.h> +#include <asm/ppc-opcode.h> #ifndef __ASSEMBLY__ extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup; @@ -20,6 +21,22 @@ static inline void isync(void) { __asm__ __volatile__ ("isync" : : : "memory"); } + +static inline void ppc_after_tlbiel_barrier(void) +{ + asm volatile("ptesync": : :"memory"); + /* + * POWER9, POWER10 need a cp_abort after tlbiel to ensure the copy is + * invalidated correctly. If this is not done, the paste can take data + * from the physical address that was translated at copy time. + * + * POWER9 in practice does not need this, because address spaces with + * accelerators mapped will use tlbie (which does invalidate the copy) + * to invalidate translations. It's not possible to limit POWER10 this + * way due to local copy-paste. + */ + asm volatile(ASM_FTR_IFSET(PPC_CP_ABORT, "", %0) : : "i" (CPU_FTR_ARCH_31) : "memory"); +} #endif /* __ASSEMBLY__ */ #if defined(__powerpc64__) diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h index cb326720a8a1..2f566c1a754c 100644 --- a/arch/powerpc/include/asm/time.h +++ b/arch/powerpc/include/asm/time.h @@ -38,44 +38,10 @@ struct div_result { u64 result_low; }; -/* Accessor functions for the timebase (RTC on 601) registers. */ -#define __USE_RTC() (IS_ENABLED(CONFIG_PPC_BOOK3S_601)) - -#ifdef CONFIG_PPC64 - /* For compatibility, get_tbl() is defined as get_tb() on ppc64 */ -#define get_tbl get_tb - -#else - static inline unsigned long get_tbl(void) { - return mftbl(); -} - -static inline unsigned int get_tbu(void) -{ - return mftbu(); -} -#endif /* !CONFIG_PPC64 */ - -static inline unsigned int get_rtcl(void) -{ - unsigned int rtcl; - - asm volatile("mfrtcl %0" : "=r" (rtcl)); - return rtcl; -} - -static inline u64 get_rtc(void) -{ - unsigned int hi, lo, hi2; - - do { - asm volatile("mfrtcu %0; mfrtcl %1; mfrtcu %2" - : "=r" (hi), "=r" (lo), "=r" (hi2)); - } while (hi2 != hi); - return (u64)hi * 1000000000 + lo; + return mftb(); } static inline u64 get_vtb(void) @@ -87,30 +53,21 @@ static inline u64 get_vtb(void) return 0; } -#ifdef CONFIG_PPC64 -static inline u64 get_tb(void) -{ - return mftb(); -} -#else /* CONFIG_PPC64 */ static inline u64 get_tb(void) { unsigned int tbhi, tblo, tbhi2; + if (IS_ENABLED(CONFIG_PPC64)) + return mftb(); + do { - tbhi = get_tbu(); - tblo = get_tbl(); - tbhi2 = get_tbu(); + tbhi = mftbu(); + tblo = mftb(); + tbhi2 = mftbu(); } while (tbhi != tbhi2); return ((u64)tbhi << 32) | tblo; } -#endif /* !CONFIG_PPC64 */ - -static inline u64 get_tb_or_rtc(void) -{ - return __USE_RTC() ? get_rtc() : get_tb(); -} static inline void set_tb(unsigned int upper, unsigned int lower) { @@ -127,11 +84,10 @@ static inline void set_tb(unsigned int upper, unsigned int lower) */ static inline u64 get_dec(void) { -#if defined(CONFIG_40x) - return (mfspr(SPRN_PIT)); -#else - return (mfspr(SPRN_DEC)); -#endif + if (IS_ENABLED(CONFIG_40x)) + return mfspr(SPRN_PIT); + + return mfspr(SPRN_DEC); } /* @@ -141,23 +97,17 @@ static inline u64 get_dec(void) */ static inline void set_dec(u64 val) { -#if defined(CONFIG_40x) - mtspr(SPRN_PIT, (u32) val); -#else -#ifndef CONFIG_BOOKE - --val; -#endif - mtspr(SPRN_DEC, val); -#endif /* not 40x */ + if (IS_ENABLED(CONFIG_40x)) + mtspr(SPRN_PIT, (u32)val); + else if (IS_ENABLED(CONFIG_BOOKE)) + mtspr(SPRN_DEC, val); + else + mtspr(SPRN_DEC, val - 1); } static inline unsigned long tb_ticks_since(unsigned long tstamp) { - if (__USE_RTC()) { - int delta = get_rtcl() - (unsigned int) tstamp; - return delta < 0 ? delta + 1000000000 : delta; - } - return get_tbl() - tstamp; + return mftb() - tstamp; } #define mulhwu(x,y) \ diff --git a/arch/powerpc/include/asm/timex.h b/arch/powerpc/include/asm/timex.h index 6047402b0a4d..95988870a57b 100644 --- a/arch/powerpc/include/asm/timex.h +++ b/arch/powerpc/include/asm/timex.h @@ -17,9 +17,6 @@ typedef unsigned long cycles_t; static inline cycles_t get_cycles(void) { - if (IS_ENABLED(CONFIG_PPC_BOOK3S_601)) - return 0; - return mftb(); } diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h index fbc6f3002f23..d97f061fecac 100644 --- a/arch/powerpc/include/asm/tlb.h +++ b/arch/powerpc/include/asm/tlb.h @@ -66,19 +66,6 @@ static inline int mm_is_thread_local(struct mm_struct *mm) return false; return cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)); } -static inline void mm_reset_thread_local(struct mm_struct *mm) -{ - WARN_ON(atomic_read(&mm->context.copros) > 0); - /* - * It's possible for mm_access to take a reference on mm_users to - * access the remote mm from another thread, but it's not allowed - * to set mm_cpumask, so mm_users may be > 1 here. - */ - WARN_ON(current->mm != mm); - atomic_set(&mm->context.active_cpus, 1); - cpumask_clear(mm_cpumask(mm)); - cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); -} #else /* CONFIG_PPC_BOOK3S_64 */ static inline int mm_is_thread_local(struct mm_struct *mm) { diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h index f0b6300e7dd3..8728590f514a 100644 --- a/arch/powerpc/include/asm/topology.h +++ b/arch/powerpc/include/asm/topology.h @@ -86,14 +86,27 @@ static inline int cpu_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc) #endif /* CONFIG_NUMA */ +struct drmem_lmb; +int of_drconf_to_nid_single(struct drmem_lmb *lmb); + #if defined(CONFIG_NUMA) && defined(CONFIG_PPC_SPLPAR) extern int find_and_online_cpu_nid(int cpu); +extern int cpu_to_coregroup_id(int cpu); #else static inline int find_and_online_cpu_nid(int cpu) { return 0; } +static inline int cpu_to_coregroup_id(int cpu) +{ +#ifdef CONFIG_SMP + return cpu_to_core_id(cpu); +#else + return 0; +#endif +} + #endif /* CONFIG_NUMA && CONFIG_PPC_SPLPAR */ #include <asm-generic/topology.h> @@ -104,15 +117,10 @@ static inline int find_and_online_cpu_nid(int cpu) #ifdef CONFIG_PPC64 #include <asm/smp.h> -#ifdef CONFIG_PPC_SPLPAR -int get_physical_package_id(int cpu); -#define topology_physical_package_id(cpu) (get_physical_package_id(cpu)) -#else #define topology_physical_package_id(cpu) (cpu_to_chip_id(cpu)) -#endif #define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu)) -#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu)) +#define topology_core_cpumask(cpu) (cpu_cpu_mask(cpu)) #define topology_core_id(cpu) (cpu_to_core_id(cpu)) #endif diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index 00699903f1ef..604d705f1bb8 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -151,52 +151,16 @@ static inline int __access_ok(unsigned long addr, unsigned long size, extern long __put_user_bad(void); -/* - * We don't tell gcc that we are accessing memory, but this is OK - * because we do not write to any memory gcc knows about, so there - * are no aliasing issues. - */ -#define __put_user_asm(x, addr, err, op) \ - __asm__ __volatile__( \ - "1: " op " %1,0(%2) # put_user\n" \ - "2:\n" \ - ".section .fixup,\"ax\"\n" \ - "3: li %0,%3\n" \ - " b 2b\n" \ - ".previous\n" \ - EX_TABLE(1b, 3b) \ - : "=r" (err) \ - : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err)) - -#ifdef __powerpc64__ -#define __put_user_asm2(x, ptr, retval) \ - __put_user_asm(x, ptr, retval, "std") -#else /* __powerpc64__ */ -#define __put_user_asm2(x, addr, err) \ - __asm__ __volatile__( \ - "1: stw %1,0(%2)\n" \ - "2: stw %1+1,4(%2)\n" \ - "3:\n" \ - ".section .fixup,\"ax\"\n" \ - "4: li %0,%3\n" \ - " b 3b\n" \ - ".previous\n" \ - EX_TABLE(1b, 4b) \ - EX_TABLE(2b, 4b) \ - : "=r" (err) \ - : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err)) -#endif /* __powerpc64__ */ - #define __put_user_size_allowed(x, ptr, size, retval) \ do { \ + __label__ __pu_failed; \ + \ retval = 0; \ - switch (size) { \ - case 1: __put_user_asm(x, ptr, retval, "stb"); break; \ - case 2: __put_user_asm(x, ptr, retval, "sth"); break; \ - case 4: __put_user_asm(x, ptr, retval, "stw"); break; \ - case 8: __put_user_asm2(x, ptr, retval); break; \ - default: __put_user_bad(); \ - } \ + __put_user_size_goto(x, ptr, size, __pu_failed); \ + break; \ + \ +__pu_failed: \ + retval = -EFAULT; \ } while (0) #define __put_user_size(x, ptr, size, retval) \ @@ -249,12 +213,17 @@ do { \ }) +/* + * We don't tell gcc that we are accessing memory, but this is OK + * because we do not write to any memory gcc knows about, so there + * are no aliasing issues. + */ #define __put_user_asm_goto(x, addr, label, op) \ asm volatile goto( \ "1: " op "%U1%X1 %0,%1 # put_user\n" \ EX_TABLE(1b, %l2) \ : \ - : "r" (x), "m" (*addr) \ + : "r" (x), "m<>" (*addr) \ : \ : label) @@ -316,7 +285,7 @@ extern long __get_user_bad(void); #define __get_user_asm(x, addr, err, op) \ __asm__ __volatile__( \ - "1: "op" %1,0(%2) # get_user\n" \ + "1: "op"%U2%X2 %1, %2 # get_user\n" \ "2:\n" \ ".section .fixup,\"ax\"\n" \ "3: li %0,%3\n" \ @@ -325,7 +294,7 @@ extern long __get_user_bad(void); ".previous\n" \ EX_TABLE(1b, 3b) \ : "=r" (err), "=r" (x) \ - : "b" (addr), "i" (-EFAULT), "0" (err)) + : "m<>" (*addr), "i" (-EFAULT), "0" (err)) #ifdef __powerpc64__ #define __get_user_asm2(x, addr, err) \ @@ -333,8 +302,8 @@ extern long __get_user_bad(void); #else /* __powerpc64__ */ #define __get_user_asm2(x, addr, err) \ __asm__ __volatile__( \ - "1: lwz %1,0(%2)\n" \ - "2: lwz %1+1,4(%2)\n" \ + "1: lwz%X2 %1, %2\n" \ + "2: lwz%X2 %L1, %L2\n" \ "3:\n" \ ".section .fixup,\"ax\"\n" \ "4: li %0,%3\n" \ @@ -345,7 +314,7 @@ extern long __get_user_bad(void); EX_TABLE(1b, 4b) \ EX_TABLE(2b, 4b) \ : "=r" (err), "=&r" (x) \ - : "b" (addr), "i" (-EFAULT), "0" (err)) + : "m" (*addr), "i" (-EFAULT), "0" (err)) #endif /* __powerpc64__ */ #define __get_user_size_allowed(x, ptr, size, retval) \ @@ -355,10 +324,10 @@ do { \ if (size > sizeof(x)) \ (x) = __get_user_bad(); \ switch (size) { \ - case 1: __get_user_asm(x, ptr, retval, "lbz"); break; \ - case 2: __get_user_asm(x, ptr, retval, "lhz"); break; \ - case 4: __get_user_asm(x, ptr, retval, "lwz"); break; \ - case 8: __get_user_asm2(x, ptr, retval); break; \ + case 1: __get_user_asm(x, (u8 __user *)ptr, retval, "lbz"); break; \ + case 2: __get_user_asm(x, (u16 __user *)ptr, retval, "lhz"); break; \ + case 4: __get_user_asm(x, (u32 __user *)ptr, retval, "lwz"); break; \ + case 8: __get_user_asm2(x, (u64 __user *)ptr, retval); break; \ default: (x) = __get_user_bad(); \ } \ } while (0) @@ -435,6 +404,32 @@ do { \ extern unsigned long __copy_tofrom_user(void __user *to, const void __user *from, unsigned long size); +#ifdef CONFIG_ARCH_HAS_COPY_MC +unsigned long __must_check +copy_mc_generic(void *to, const void *from, unsigned long size); + +static inline unsigned long __must_check +copy_mc_to_kernel(void *to, const void *from, unsigned long size) +{ + return copy_mc_generic(to, from, size); +} +#define copy_mc_to_kernel copy_mc_to_kernel + +static inline unsigned long __must_check +copy_mc_to_user(void __user *to, const void *from, unsigned long n) +{ + if (likely(check_copy_size(from, n, true))) { + if (access_ok(to, n)) { + allow_write_to_user(to, n); + n = copy_mc_generic((void *)to, from, n); + prevent_write_to_user(to, n); + } + } + + return n; +} +#endif + #ifdef __powerpc64__ static inline unsigned long raw_copy_in_user(void __user *to, const void __user *from, unsigned long n) @@ -523,20 +518,6 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n) return ret; } -static __always_inline unsigned long __must_check -copy_to_user_mcsafe(void __user *to, const void *from, unsigned long n) -{ - if (likely(check_copy_size(from, n, true))) { - if (access_ok(to, n)) { - allow_write_to_user(to, n); - n = memcpy_mcsafe((void *)to, from, n); - prevent_write_to_user(to, n); - } - } - - return n; -} - unsigned long __arch_clear_user(void __user *addr, unsigned long size); static inline unsigned long clear_user(void __user *addr, unsigned long size) diff --git a/arch/powerpc/include/uapi/asm/ptrace.h b/arch/powerpc/include/uapi/asm/ptrace.h index f5f1ccc740fc..7004cfea3f5f 100644 --- a/arch/powerpc/include/uapi/asm/ptrace.h +++ b/arch/powerpc/include/uapi/asm/ptrace.h @@ -222,6 +222,7 @@ struct ppc_debug_info { #define PPC_DEBUG_FEATURE_DATA_BP_RANGE 0x0000000000000004 #define PPC_DEBUG_FEATURE_DATA_BP_MASK 0x0000000000000008 #define PPC_DEBUG_FEATURE_DATA_BP_DAWR 0x0000000000000010 +#define PPC_DEBUG_FEATURE_DATA_BP_ARCH_31 0x0000000000000020 #ifndef __ASSEMBLY__ diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index cbf41fb4ee89..bf0bf1b900d2 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -45,7 +45,8 @@ obj-y := cputable.o syscalls.o \ signal.o sysfs.o cacheinfo.o time.o \ prom.o traps.o setup-common.o \ udbg.o misc.o io.o misc_$(BITS).o \ - of_platform.o prom_parse.o firmware.o + of_platform.o prom_parse.o firmware.o \ + hw_breakpoint_constraints.o obj-y += ptrace/ obj-$(CONFIG_PPC64) += setup_64.o \ paca.o nvram_64.o note.o syscall_64.o @@ -94,7 +95,8 @@ obj-$(CONFIG_PPC_FSL_BOOK3E) += cpu_setup_fsl_booke.o obj-$(CONFIG_PPC_DOORBELL) += dbell.o obj-$(CONFIG_JUMP_LABEL) += jump_label.o -extra-y := head_$(BITS).o +extra-$(CONFIG_PPC64) := head_64.o +extra-$(CONFIG_PPC_BOOK3S_32) := head_book3s_32.o extra-$(CONFIG_40x) := head_40x.o extra-$(CONFIG_44x) := head_44x.o extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 8711c2164b45..c2722ff36e98 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -176,6 +176,7 @@ int main(void) OFFSET(THREAD_TM_TAR, thread_struct, tm_tar); OFFSET(THREAD_TM_PPR, thread_struct, tm_ppr); OFFSET(THREAD_TM_DSCR, thread_struct, tm_dscr); + OFFSET(THREAD_TM_AMR, thread_struct, tm_amr); OFFSET(PT_CKPT_REGS, thread_struct, ckpt_regs); OFFSET(THREAD_CKVRSTATE, thread_struct, ckvr_state.vr); OFFSET(THREAD_CKVRSAVE, thread_struct, ckvrsave); diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c index 02300edc6989..c22a8e0dbc93 100644 --- a/arch/powerpc/kernel/btext.c +++ b/arch/powerpc/kernel/btext.c @@ -95,19 +95,10 @@ void __init btext_prepare_BAT(void) boot_text_mapped = 0; return; } - if (PVR_VER(mfspr(SPRN_PVR)) != 1) { - /* 603, 604, G3, G4, ... */ - lowbits = addr & ~0xFF000000UL; - addr &= 0xFF000000UL; - disp_BAT[0] = vaddr | (BL_16M<<2) | 2; - disp_BAT[1] = addr | (_PAGE_NO_CACHE | _PAGE_GUARDED | BPP_RW); - } else { - /* 601 */ - lowbits = addr & ~0xFF800000UL; - addr &= 0xFF800000UL; - disp_BAT[0] = vaddr | (_PAGE_NO_CACHE | PP_RWXX) | 4; - disp_BAT[1] = addr | BL_8M | 0x40; - } + lowbits = addr & ~0xFF000000UL; + addr &= 0xFF000000UL; + disp_BAT[0] = vaddr | (BL_16M<<2) | 2; + disp_BAT[1] = addr | (_PAGE_NO_CACHE | _PAGE_GUARDED | BPP_RW); logicalDisplayBase = (void *) (vaddr + lowbits); } #endif diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 2aa89c6b2896..492c0b36aff6 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -16,6 +16,7 @@ #include <asm/oprofile_impl.h> #include <asm/cputable.h> #include <asm/prom.h> /* for PTRRELOC on ARCH=ppc */ +#include <asm/mce.h> #include <asm/mmu.h> #include <asm/setup.h> @@ -608,21 +609,6 @@ static struct cpu_spec __initdata cpu_specs[] = { #endif /* CONFIG_PPC_BOOK3S_64 */ #ifdef CONFIG_PPC32 -#ifdef CONFIG_PPC_BOOK3S_601 - { /* 601 */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x00010000, - .cpu_name = "601", - .cpu_features = CPU_FTRS_PPC601, - .cpu_user_features = COMMON_USER | PPC_FEATURE_601_INSTR | - PPC_FEATURE_UNIFIED_CACHE | PPC_FEATURE_NO_TB, - .mmu_features = MMU_FTR_HPTE_TABLE, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_generic, - .platform = "ppc601", - }, -#endif /* CONFIG_PPC_BOOK3S_601 */ #ifdef CONFIG_PPC_BOOK3S_6xx { /* 603 */ .pvr_mask = 0xffff0000, diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c index 9053fc9d20c7..a1c744194018 100644 --- a/arch/powerpc/kernel/dma-iommu.c +++ b/arch/powerpc/kernel/dma-iommu.c @@ -138,4 +138,6 @@ const struct dma_map_ops dma_iommu_ops = { .get_required_mask = dma_iommu_get_required_mask, .mmap = dma_common_mmap, .get_sgtable = dma_common_get_sgtable, + .alloc_pages = dma_common_alloc_pages, + .free_pages = dma_common_free_pages, }; diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c index f204ad79b6b5..1098863e17ee 100644 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c @@ -17,6 +17,7 @@ #include <asm/cputable.h> #include <asm/dt_cpu_ftrs.h> +#include <asm/mce.h> #include <asm/mmu.h> #include <asm/oprofile_impl.h> #include <asm/prom.h> diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index 94682382fc8c..0e160dffcb86 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -466,7 +466,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev) return 0; } - if (!pe->addr && !pe->config_addr) { + if (!pe->addr) { eeh_stats.no_cfg_addr++; return 0; } @@ -929,56 +929,6 @@ void eeh_save_bars(struct eeh_dev *edev) edev->config_space[1] |= PCI_COMMAND_MASTER; } -/** - * eeh_ops_register - Register platform dependent EEH operations - * @ops: platform dependent EEH operations - * - * Register the platform dependent EEH operation callback - * functions. The platform should call this function before - * any other EEH operations. - */ -int __init eeh_ops_register(struct eeh_ops *ops) -{ - if (!ops->name) { - pr_warn("%s: Invalid EEH ops name for %p\n", - __func__, ops); - return -EINVAL; - } - - if (eeh_ops && eeh_ops != ops) { - pr_warn("%s: EEH ops of platform %s already existing (%s)\n", - __func__, eeh_ops->name, ops->name); - return -EEXIST; - } - - eeh_ops = ops; - - return 0; -} - -/** - * eeh_ops_unregister - Unreigster platform dependent EEH operations - * @name: name of EEH platform operations - * - * Unregister the platform dependent EEH operation callback - * functions. - */ -int __exit eeh_ops_unregister(const char *name) -{ - if (!name || !strlen(name)) { - pr_warn("%s: Invalid EEH ops name\n", - __func__); - return -EINVAL; - } - - if (eeh_ops && !strcmp(eeh_ops->name, name)) { - eeh_ops = NULL; - return 0; - } - - return -EEXIST; -} - static int eeh_reboot_notifier(struct notifier_block *nb, unsigned long action, void *unused) { @@ -990,54 +940,6 @@ static struct notifier_block eeh_reboot_nb = { .notifier_call = eeh_reboot_notifier, }; -/** - * eeh_init - EEH initialization - * - * Initialize EEH by trying to enable it for all of the adapters in the system. - * As a side effect we can determine here if eeh is supported at all. - * Note that we leave EEH on so failed config cycles won't cause a machine - * check. If a user turns off EEH for a particular adapter they are really - * telling Linux to ignore errors. Some hardware (e.g. POWER5) won't - * grant access to a slot if EEH isn't enabled, and so we always enable - * EEH for all slots/all devices. - * - * The eeh-force-off option disables EEH checking globally, for all slots. - * Even if force-off is set, the EEH hardware is still enabled, so that - * newer systems can boot. - */ -static int eeh_init(void) -{ - struct pci_controller *hose, *tmp; - int ret = 0; - - /* Register reboot notifier */ - ret = register_reboot_notifier(&eeh_reboot_nb); - if (ret) { - pr_warn("%s: Failed to register notifier (%d)\n", - __func__, ret); - return ret; - } - - /* call platform initialization function */ - if (!eeh_ops) { - pr_warn("%s: Platform EEH operation not found\n", - __func__); - return -EEXIST; - } else if ((ret = eeh_ops->init())) - return ret; - - /* Initialize PHB PEs */ - list_for_each_entry_safe(hose, tmp, &hose_list, list_node) - eeh_phb_pe_create(hose); - - eeh_addr_cache_init(); - - /* Initialize EEH event */ - return eeh_event_init(); -} - -core_initcall_sync(eeh_init); - static int eeh_device_notifier(struct notifier_block *nb, unsigned long action, void *data) { @@ -1062,12 +964,47 @@ static struct notifier_block eeh_device_nb = { .notifier_call = eeh_device_notifier, }; -static __init int eeh_set_bus_notifier(void) +/** + * eeh_init - System wide EEH initialization + * + * It's the platform's job to call this from an arch_initcall(). + */ +int eeh_init(struct eeh_ops *ops) { - bus_register_notifier(&pci_bus_type, &eeh_device_nb); - return 0; + struct pci_controller *hose, *tmp; + int ret = 0; + + /* the platform should only initialise EEH once */ + if (WARN_ON(eeh_ops)) + return -EEXIST; + if (WARN_ON(!ops)) + return -ENOENT; + eeh_ops = ops; + + /* Register reboot notifier */ + ret = register_reboot_notifier(&eeh_reboot_nb); + if (ret) { + pr_warn("%s: Failed to register reboot notifier (%d)\n", + __func__, ret); + return ret; + } + + ret = bus_register_notifier(&pci_bus_type, &eeh_device_nb); + if (ret) { + pr_warn("%s: Failed to register bus notifier (%d)\n", + __func__, ret); + return ret; + } + + /* Initialize PHB PEs */ + list_for_each_entry_safe(hose, tmp, &hose_list, list_node) + eeh_phb_pe_create(hose); + + eeh_addr_cache_init(); + + /* Initialize EEH event */ + return eeh_event_init(); } -arch_initcall(eeh_set_bus_notifier); /** * eeh_probe_device() - Perform EEH initialization for the indicated pci device @@ -1720,7 +1657,7 @@ static ssize_t eeh_force_recover_write(struct file *filp, return -ENODEV; /* Retrieve PE */ - pe = eeh_pe_get(hose, pe_no, 0); + pe = eeh_pe_get(hose, pe_no); if (!pe) return -ENODEV; diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c index d2aaaa73fdd5..845e024321d4 100644 --- a/arch/powerpc/kernel/eeh_pe.c +++ b/arch/powerpc/kernel/eeh_pe.c @@ -251,43 +251,21 @@ void eeh_pe_dev_traverse(struct eeh_pe *root, /** * __eeh_pe_get - Check the PE address - * @data: EEH PE - * @flag: EEH device * * For one particular PE, it can be identified by PE address * or tranditional BDF address. BDF address is composed of * Bus/Device/Function number. The extra data referred by flag * indicates which type of address should be used. */ -struct eeh_pe_get_flag { - int pe_no; - int config_addr; -}; - static void *__eeh_pe_get(struct eeh_pe *pe, void *flag) { - struct eeh_pe_get_flag *tmp = (struct eeh_pe_get_flag *) flag; + int *target_pe = flag; - /* Unexpected PHB PE */ + /* PHB PEs are special and should be ignored */ if (pe->type & EEH_PE_PHB) return NULL; - /* - * We prefer PE address. For most cases, we should - * have non-zero PE address - */ - if (eeh_has_flag(EEH_VALID_PE_ZERO)) { - if (tmp->pe_no == pe->addr) - return pe; - } else { - if (tmp->pe_no && - (tmp->pe_no == pe->addr)) - return pe; - } - - /* Try BDF address */ - if (tmp->config_addr && - (tmp->config_addr == pe->config_addr)) + if (*target_pe == pe->addr) return pe; return NULL; @@ -297,7 +275,6 @@ static void *__eeh_pe_get(struct eeh_pe *pe, void *flag) * eeh_pe_get - Search PE based on the given address * @phb: PCI controller * @pe_no: PE number - * @config_addr: Config address * * Search the corresponding PE based on the specified address which * is included in the eeh device. The function is used to check if @@ -306,16 +283,11 @@ static void *__eeh_pe_get(struct eeh_pe *pe, void *flag) * which is composed of PCI bus/device/function number, or unified * PE address. */ -struct eeh_pe *eeh_pe_get(struct pci_controller *phb, - int pe_no, int config_addr) +struct eeh_pe *eeh_pe_get(struct pci_controller *phb, int pe_no) { struct eeh_pe *root = eeh_phb_pe_get(phb); - struct eeh_pe_get_flag tmp = { pe_no, config_addr }; - struct eeh_pe *pe; - pe = eeh_pe_traverse(root, __eeh_pe_get, &tmp); - - return pe; + return eeh_pe_traverse(root, __eeh_pe_get, &pe_no); } /** @@ -336,19 +308,13 @@ int eeh_pe_tree_insert(struct eeh_dev *edev, struct eeh_pe *new_pe_parent) struct pci_controller *hose = edev->controller; struct eeh_pe *pe, *parent; - /* Check if the PE number is valid */ - if (!eeh_has_flag(EEH_VALID_PE_ZERO) && !edev->pe_config_addr) { - eeh_edev_err(edev, "PE#0 is invalid for this PHB!\n"); - return -EINVAL; - } - /* * Search the PE has been existing or not according * to the PE address. If that has been existing, the * PE should be composed of PCI bus and its subordinate * components. */ - pe = eeh_pe_get(hose, edev->pe_config_addr, edev->bdfn); + pe = eeh_pe_get(hose, edev->pe_config_addr); if (pe) { if (pe->type & EEH_PE_INVALID) { list_add_tail(&edev->entry, &pe->edevs); @@ -388,8 +354,8 @@ int eeh_pe_tree_insert(struct eeh_dev *edev, struct eeh_pe *new_pe_parent) pr_err("%s: out of memory!\n", __func__); return -ENOMEM; } - pe->addr = edev->pe_config_addr; - pe->config_addr = edev->bdfn; + + pe->addr = edev->pe_config_addr; /* * Put the new EEH PE into hierarchy tree. If the parent diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index f4d0af8e1136..8cdc8bcde703 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -234,7 +234,6 @@ transfer_to_handler_cont: mtspr SPRN_SRR0,r11 mtspr SPRN_SRR1,r10 mtlr r9 - SYNC RFI /* jump to handler, enable MMU */ #if defined (CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500) @@ -264,7 +263,6 @@ _ASM_NOKPROBE_SYMBOL(transfer_to_handler_cont) LOAD_REG_IMMEDIATE(r0, MSR_KERNEL) mtspr SPRN_SRR0,r12 mtspr SPRN_SRR1,r0 - SYNC RFI reenable_mmu: @@ -323,7 +321,6 @@ stack_ovf: #endif mtspr SPRN_SRR0,r9 mtspr SPRN_SRR1,r10 - SYNC RFI _ASM_NOKPROBE_SYMBOL(stack_ovf) #endif @@ -411,7 +408,6 @@ ret_from_syscall: /* disable interrupts so current_thread_info()->flags can't change */ LOAD_REG_IMMEDIATE(r10,MSR_KERNEL) /* doesn't include MSR_EE */ /* Note: We don't bother telling lockdep about it */ - SYNC mtmsr r10 lwz r9,TI_FLAGS(r2) li r8,-MAX_ERRNO @@ -474,7 +470,6 @@ syscall_exit_finish: #endif mtspr SPRN_SRR0,r7 mtspr SPRN_SRR1,r8 - SYNC RFI _ASM_NOKPROBE_SYMBOL(syscall_exit_finish) #ifdef CONFIG_44x @@ -567,7 +562,6 @@ syscall_exit_work: * lockdep as we are supposed to have IRQs on at this point */ ori r10,r10,MSR_EE - SYNC mtmsr r10 /* Save NVGPRS if they're not saved already */ @@ -606,7 +600,6 @@ ret_from_kernel_syscall: #endif mtspr SPRN_SRR0, r9 mtspr SPRN_SRR1, r10 - SYNC RFI _ASM_NOKPROBE_SYMBOL(ret_from_kernel_syscall) @@ -810,7 +803,6 @@ fast_exception_return: REST_GPR(9, r11) REST_GPR(12, r11) lwz r11,GPR11(r11) - SYNC RFI _ASM_NOKPROBE_SYMBOL(fast_exception_return) @@ -819,19 +811,11 @@ _ASM_NOKPROBE_SYMBOL(fast_exception_return) 1: lis r3,exc_exit_restart_end@ha addi r3,r3,exc_exit_restart_end@l cmplw r12,r3 -#ifdef CONFIG_PPC_BOOK3S_601 - bge 2b -#else bge 3f -#endif lis r4,exc_exit_restart@ha addi r4,r4,exc_exit_restart@l cmplw r12,r4 -#ifdef CONFIG_PPC_BOOK3S_601 - blt 2b -#else blt 3f -#endif lis r3,fee_restarts@ha tophys(r3,r3) lwz r5,fee_restarts@l(r3) @@ -848,7 +832,6 @@ fee_restarts: /* aargh, a nonrecoverable interrupt, panic */ /* aargh, we don't know which trap this is */ -/* but the 601 doesn't implement the RI bit, so assume it's OK */ 3: li r10,-1 stw r10,_TRAP(r11) @@ -872,7 +855,6 @@ ret_from_except: * from the interrupt. */ /* Note: We don't bother telling lockdep about it */ LOAD_REG_IMMEDIATE(r10,MSR_KERNEL) - SYNC /* Some chip revs have problems here... */ mtmsr r10 /* disable interrupts */ lwz r3,_MSR(r1) /* Returning to user mode? */ @@ -1035,7 +1017,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX) * exc_exit_restart below. -- paulus */ LOAD_REG_IMMEDIATE(r10,MSR_KERNEL & ~MSR_RI) - SYNC mtmsr r10 /* clear the RI bit */ .globl exc_exit_restart exc_exit_restart: @@ -1046,7 +1027,6 @@ exc_exit_restart: lwz r1,GPR1(r1) .globl exc_exit_restart_end exc_exit_restart_end: - SYNC RFI _ASM_NOKPROBE_SYMBOL(exc_exit_restart) _ASM_NOKPROBE_SYMBOL(exc_exit_restart_end) @@ -1274,7 +1254,6 @@ do_resched: /* r10 contains MSR_KERNEL here */ mfmsr r10 #endif ori r10,r10,MSR_EE - SYNC mtmsr r10 /* hard-enable interrupts */ bl schedule recheck: @@ -1283,7 +1262,6 @@ recheck: * TI_FLAGS aren't advertised. */ LOAD_REG_IMMEDIATE(r10,MSR_KERNEL) - SYNC mtmsr r10 /* disable interrupts */ lwz r9,TI_FLAGS(r2) andi. r0,r9,_TIF_NEED_RESCHED @@ -1292,7 +1270,6 @@ recheck: beq restore_user do_user_signal: /* r10 contains MSR_KERNEL here */ ori r10,r10,MSR_EE - SYNC mtmsr r10 /* hard-enable interrupts */ /* save r13-r31 in the exception frame, if not already done */ lwz r3,_TRAP(r1) @@ -1316,19 +1293,11 @@ nonrecoverable: lis r10,exc_exit_restart_end@ha addi r10,r10,exc_exit_restart_end@l cmplw r12,r10 -#ifdef CONFIG_PPC_BOOK3S_601 - bgelr -#else bge 3f -#endif lis r11,exc_exit_restart@ha addi r11,r11,exc_exit_restart@l cmplw r12,r11 -#ifdef CONFIG_PPC_BOOK3S_601 - bltlr -#else blt 3f -#endif lis r10,ee_restarts@ha lwz r12,ee_restarts@l(r10) addi r12,r12,1 @@ -1336,7 +1305,6 @@ nonrecoverable: mr r12,r11 /* restart at exc_exit_restart */ blr 3: /* OK, we can't recover, kill this process */ - /* but the 601 doesn't implement the RI bit, so assume it's OK */ lwz r3,_TRAP(r1) andi. r0,r3,1 beq 5f @@ -1382,8 +1350,7 @@ _GLOBAL(enter_rtas) mfmsr r9 stw r9,8(r1) LOAD_REG_IMMEDIATE(r0,MSR_KERNEL) - SYNC /* disable interrupts so SRR0/1 */ - mtmsr r0 /* don't get trashed */ + mtmsr r0 /* disable interrupts so SRR0/1 don't get trashed */ li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR) mtlr r6 stw r7, THREAD + RTAS_SP(r2) diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 733e40eba4eb..2f3846192ec7 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -430,7 +430,11 @@ _ASM_NOKPROBE_SYMBOL(save_nvgprs); #define FLUSH_COUNT_CACHE \ 1: nop; \ - patch_site 1b, patch__call_flush_branch_caches + patch_site 1b, patch__call_flush_branch_caches1; \ +1: nop; \ + patch_site 1b, patch__call_flush_branch_caches2; \ +1: nop; \ + patch_site 1b, patch__call_flush_branch_caches3 .macro nops number .rept \number @@ -512,7 +516,7 @@ _GLOBAL(_switch) kuap_check_amr r9, r10 - FLUSH_COUNT_CACHE + FLUSH_COUNT_CACHE /* Clobbers r9, ctr */ /* * On SMP kernels, care must be taken because a task may be diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index d9ed79415100..f579ce46eef2 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -988,7 +988,6 @@ kernel_dbg_exc: .endm masked_interrupt_book3e_0x500: - // XXX When adding support for EPR, use PACA_IRQ_EE_EDGE masked_interrupt_book3e PACA_IRQ_EE 1 masked_interrupt_book3e_0x900: @@ -1303,16 +1302,6 @@ fast_exception_return: addi r3,r1,STACK_FRAME_OVERHEAD; bl do_IRQ b ret_from_except -1: cmpwi cr0,r3,0xf00 - bne 1f - addi r3,r1,STACK_FRAME_OVERHEAD; - bl performance_monitor_exception - b ret_from_except -1: cmpwi cr0,r3,0xe60 - bne 1f - addi r3,r1,STACK_FRAME_OVERHEAD; - bl handle_hmi_exception - b ret_from_except 1: cmpwi cr0,r3,0x900 bne 1f addi r3,r1,STACK_FRAME_OVERHEAD; diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index 10ebb4bf71ad..8482739d42f3 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -191,13 +191,13 @@ int is_fadump_active(void) */ static bool is_fadump_mem_area_contiguous(u64 d_start, u64 d_end) { - struct memblock_region *reg; + phys_addr_t reg_start, reg_end; bool ret = false; - u64 start, end; + u64 i, start, end; - for_each_memblock(memory, reg) { - start = max_t(u64, d_start, reg->base); - end = min_t(u64, d_end, (reg->base + reg->size)); + for_each_mem_range(i, ®_start, ®_end) { + start = max_t(u64, d_start, reg_start); + end = min_t(u64, d_end, reg_end); if (d_start < end) { /* Memory hole from d_start to start */ if (start > d_start) @@ -422,34 +422,34 @@ static int __init add_boot_mem_regions(unsigned long mstart, static int __init fadump_get_boot_mem_regions(void) { - unsigned long base, size, cur_size, hole_size, last_end; + unsigned long size, cur_size, hole_size, last_end; unsigned long mem_size = fw_dump.boot_memory_size; - struct memblock_region *reg; + phys_addr_t reg_start, reg_end; int ret = 1; + u64 i; fw_dump.boot_mem_regs_cnt = 0; last_end = 0; hole_size = 0; cur_size = 0; - for_each_memblock(memory, reg) { - base = reg->base; - size = reg->size; - hole_size += (base - last_end); + for_each_mem_range(i, ®_start, ®_end) { + size = reg_end - reg_start; + hole_size += (reg_start - last_end); if ((cur_size + size) >= mem_size) { size = (mem_size - cur_size); - ret = add_boot_mem_regions(base, size); + ret = add_boot_mem_regions(reg_start, size); break; } mem_size -= size; cur_size += size; - ret = add_boot_mem_regions(base, size); + ret = add_boot_mem_regions(reg_start, size); if (!ret) break; - last_end = base + size; + last_end = reg_end; } fw_dump.boot_mem_top = PAGE_ALIGN(fw_dump.boot_memory_size + hole_size); @@ -754,10 +754,8 @@ u32 *fadump_regs_to_elf_notes(u32 *buf, struct pt_regs *regs) void fadump_update_elfcore_header(char *bufp) { - struct elfhdr *elf; struct elf_phdr *phdr; - elf = (struct elfhdr *)bufp; bufp += sizeof(struct elfhdr); /* First note is a place holder for cpu notes info. */ @@ -985,9 +983,8 @@ static int fadump_init_elfcore_header(char *bufp) */ static int fadump_setup_crash_memory_ranges(void) { - struct memblock_region *reg; - u64 start, end; - int i, ret; + u64 i, start, end; + int ret; pr_debug("Setup crash memory ranges.\n"); crash_mrange_info.mem_range_cnt = 0; @@ -1005,10 +1002,7 @@ static int fadump_setup_crash_memory_ranges(void) return ret; } - for_each_memblock(memory, reg) { - start = (u64)reg->base; - end = start + (u64)reg->size; - + for_each_mem_range(i, &start, &end) { /* * skip the memory chunk that is already added * (0 through boot_memory_top). @@ -1242,14 +1236,17 @@ static void fadump_free_reserved_memory(unsigned long start_pfn, */ static void fadump_release_reserved_area(u64 start, u64 end) { + unsigned long reg_spfn, reg_epfn; u64 tstart, tend, spfn, epfn; - struct memblock_region *reg; + int i; spfn = PHYS_PFN(start); epfn = PHYS_PFN(end); - for_each_memblock(memory, reg) { - tstart = max_t(u64, spfn, memblock_region_memory_base_pfn(reg)); - tend = min_t(u64, epfn, memblock_region_memory_end_pfn(reg)); + + for_each_mem_pfn_range(i, MAX_NUMNODES, ®_spfn, ®_epfn, NULL) { + tstart = max_t(u64, spfn, reg_spfn); + tend = min_t(u64, epfn, reg_epfn); + if (tstart < tend) { fadump_free_reserved_memory(tstart, tend); @@ -1684,12 +1681,10 @@ int __init fadump_reserve_mem(void) /* Preserve everything above the base address */ static void __init fadump_reserve_crash_area(u64 base) { - struct memblock_region *reg; - u64 mstart, msize; + u64 i, mstart, mend, msize; - for_each_memblock(memory, reg) { - mstart = reg->base; - msize = reg->size; + for_each_mem_range(i, &mstart, &mend) { + msize = mend - mstart; if ((mstart + msize) < base) continue; diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S index 4ae39db70044..3ff9a8fafa46 100644 --- a/arch/powerpc/kernel/fpu.S +++ b/arch/powerpc/kernel/fpu.S @@ -87,7 +87,6 @@ BEGIN_FTR_SECTION oris r5,r5,MSR_VSX@h END_FTR_SECTION_IFSET(CPU_FTR_VSX) #endif - SYNC MTMSRD(r5) /* enable use of fpu now */ isync /* enable use of FP after return */ @@ -134,18 +133,3 @@ _GLOBAL(save_fpu) mffs fr0 stfd fr0,FPSTATE_FPSCR(r6) blr - -/* - * These are used in the alignment trap handler when emulating - * single-precision loads and stores. - */ - -_GLOBAL(cvt_fd) - lfs 0,0(r3) - stfd 0,0(r4) - blr - -_GLOBAL(cvt_df) - lfd 0,0(r3) - stfs 0,0(r4) - blr diff --git a/arch/powerpc/kernel/head_32.h b/arch/powerpc/kernel/head_32.h index 9abec6cd099c..7c767765071d 100644 --- a/arch/powerpc/kernel/head_32.h +++ b/arch/powerpc/kernel/head_32.h @@ -40,48 +40,52 @@ .macro EXCEPTION_PROLOG_1 for_rtas=0 #ifdef CONFIG_VMAP_STACK - .ifeq \for_rtas - li r11, MSR_KERNEL & ~(MSR_IR | MSR_RI) /* can take DTLB miss */ - mtmsr r11 - isync - .endif - subi r11, r1, INT_FRAME_SIZE /* use r1 if kernel */ + mr r11, r1 + subi r1, r1, INT_FRAME_SIZE /* use r1 if kernel */ + beq 1f + mfspr r1,SPRN_SPRG_THREAD + lwz r1,TASK_STACK-THREAD(r1) + addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE #else - tophys(r11,r1) /* use tophys(r1) if kernel */ - subi r11, r11, INT_FRAME_SIZE /* alloc exc. frame */ -#endif + subi r11, r1, INT_FRAME_SIZE /* use r1 if kernel */ beq 1f mfspr r11,SPRN_SPRG_THREAD - tovirt_vmstack r11, r11 lwz r11,TASK_STACK-THREAD(r11) addi r11, r11, THREAD_SIZE - INT_FRAME_SIZE - tophys_novmstack r11, r11 +#endif 1: + tophys_novmstack r11, r11 #ifdef CONFIG_VMAP_STACK - mtcrf 0x7f, r11 + mtcrf 0x7f, r1 bt 32 - THREAD_ALIGN_SHIFT, stack_overflow #endif .endm .macro EXCEPTION_PROLOG_2 handle_dar_dsisr=0 -#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_PPC_BOOK3S) -BEGIN_MMU_FTR_SECTION +#ifdef CONFIG_VMAP_STACK mtcr r10 -FTR_SECTION_ELSE - stw r10, _CCR(r11) -ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE) + li r10, MSR_KERNEL & ~(MSR_IR | MSR_RI) /* can take DTLB miss */ + mtmsr r10 + isync #else stw r10,_CCR(r11) /* save registers */ #endif mfspr r10, SPRN_SPRG_SCRATCH0 +#ifdef CONFIG_VMAP_STACK + stw r11,GPR1(r1) + stw r11,0(r1) + mr r11, r1 +#else + stw r1,GPR1(r11) + stw r1,0(r11) + tovirt(r1, r11) /* set new kernel sp */ +#endif stw r12,GPR12(r11) stw r9,GPR9(r11) stw r10,GPR10(r11) -#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_PPC_BOOK3S) -BEGIN_MMU_FTR_SECTION +#ifdef CONFIG_VMAP_STACK mfcr r10 stw r10, _CCR(r11) -END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE) #endif mfspr r12,SPRN_SPRG_SCRATCH1 stw r12,GPR11(r11) @@ -97,19 +101,12 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE) stw r10, _DSISR(r11) .endif lwz r9, SRR1(r12) -#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_PPC_BOOK3S) -BEGIN_MMU_FTR_SECTION andi. r10, r9, MSR_PR -END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE) -#endif lwz r12, SRR0(r12) #else mfspr r12,SPRN_SRR0 mfspr r9,SPRN_SRR1 #endif - stw r1,GPR1(r11) - stw r1,0(r11) - tovirt_novmstack r1, r11 /* set new kernel sp */ #ifdef CONFIG_40x rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */ #else @@ -225,7 +222,6 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE) #endif mtspr SPRN_SRR1,r10 mtspr SPRN_SRR0,r11 - SYNC RFI /* jump to handler, enable MMU */ 99: b ret_from_kernel_syscall .endm @@ -327,20 +323,19 @@ label: .macro vmap_stack_overflow_exception #ifdef CONFIG_VMAP_STACK #ifdef CONFIG_SMP - mfspr r11, SPRN_SPRG_THREAD - tovirt(r11, r11) - lwz r11, TASK_CPU - THREAD(r11) - slwi r11, r11, 3 - addis r11, r11, emergency_ctx@ha + mfspr r1, SPRN_SPRG_THREAD + lwz r1, TASK_CPU - THREAD(r1) + slwi r1, r1, 3 + addis r1, r1, emergency_ctx@ha #else - lis r11, emergency_ctx@ha + lis r1, emergency_ctx@ha #endif - lwz r11, emergency_ctx@l(r11) - cmpwi cr1, r11, 0 + lwz r1, emergency_ctx@l(r1) + cmpwi cr1, r1, 0 bne cr1, 1f - lis r11, init_thread_union@ha - addi r11, r11, init_thread_union@l -1: addi r11, r11, THREAD_SIZE - INT_FRAME_SIZE + lis r1, init_thread_union@ha + addi r1, r1, init_thread_union@l +1: addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE EXCEPTION_PROLOG_2 SAVE_NVGPRS(r11) addi r3, r1, STACK_FRAME_OVERHEAD diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S index 5b282d9965a5..44c9018aed1b 100644 --- a/arch/powerpc/kernel/head_40x.S +++ b/arch/powerpc/kernel/head_40x.S @@ -72,7 +72,6 @@ turn_on_mmu: lis r0,start_here@h ori r0,r0,start_here@l mtspr SPRN_SRR0,r0 - SYNC rfi /* enables MMU */ b . /* prevent prefetch past rfi */ diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 0e05a9a47a4b..1510b2a56669 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -300,9 +300,6 @@ _GLOBAL(fsl_secondary_thread_init) rlwimi r3, r3, 30, 2, 30 mtspr SPRN_PIR, r3 1: -#endif - -_GLOBAL(generic_secondary_thread_init) mr r24,r3 /* turn on 64-bit mode */ @@ -312,13 +309,13 @@ _GLOBAL(generic_secondary_thread_init) bl relative_toc tovirt(r2,r2) -#ifdef CONFIG_PPC_BOOK3E /* Book3E initialization */ mr r3,r24 bl book3e_secondary_thread_init -#endif b generic_secondary_common_init +#endif /* CONFIG_PPC_BOOK3E */ + /* * On pSeries and most other platforms, secondary processors spin * in the following code. diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_book3s_32.S index f3ab94d73936..5eb9eedac920 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_book3s_32.S @@ -34,16 +34,6 @@ #include "head_32.h" -/* 601 only have IBAT */ -#ifdef CONFIG_PPC_BOOK3S_601 -#define LOAD_BAT(n, reg, RA, RB) \ - li RA,0; \ - mtspr SPRN_IBAT##n##U,RA; \ - lwz RA,(n*16)+0(reg); \ - lwz RB,(n*16)+4(reg); \ - mtspr SPRN_IBAT##n##U,RA; \ - mtspr SPRN_IBAT##n##L,RB -#else #define LOAD_BAT(n, reg, RA, RB) \ /* see the comment for clear_bats() -- Cort */ \ li RA,0; \ @@ -57,11 +47,10 @@ lwz RB,(n*16)+12(reg); \ mtspr SPRN_DBAT##n##U,RA; \ mtspr SPRN_DBAT##n##L,RB -#endif __HEAD .stabs "arch/powerpc/kernel/",N_SO,0,0,0f - .stabs "head_32.S",N_SO,0,0,0f + .stabs "head_book3s_32.S",N_SO,0,0,0f 0: _ENTRY(_stext); @@ -166,9 +155,9 @@ __after_mmu_off: bl initial_bats bl load_segment_registers -#ifdef CONFIG_KASAN +BEGIN_MMU_FTR_SECTION bl early_hash_table -#endif +END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE) #if defined(CONFIG_BOOTX_TEXT) bl setup_disp_bat #endif @@ -185,10 +174,8 @@ __after_mmu_off: bl reloc_offset li r24,0 /* cpu# */ bl call_setup_cpu /* Call setup_cpu for this CPU */ -#ifdef CONFIG_PPC_BOOK3S_32 bl reloc_offset bl init_idle_6xx -#endif /* CONFIG_PPC_BOOK3S_32 */ /* @@ -219,7 +206,6 @@ turn_on_mmu: lis r0,start_here@h ori r0,r0,start_here@l mtspr SPRN_SRR0,r0 - SYNC RFI /* enables MMU */ /* @@ -274,14 +260,8 @@ __secondary_hold_acknowledge: DO_KVM 0x200 MachineCheck: EXCEPTION_PROLOG_0 -#ifdef CONFIG_VMAP_STACK - li r11, MSR_KERNEL & ~(MSR_IR | MSR_RI) /* can take DTLB miss */ - mtmsr r11 - isync -#endif #ifdef CONFIG_PPC_CHRP mfspr r11, SPRN_SPRG_THREAD - tovirt_vmstack r11, r11 lwz r11, RTAS_SP(r11) cmpwi cr1, r11, 0 bne cr1, 7f @@ -439,7 +419,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE) SystemCall: SYSCALL_ENTRY 0xc00 -/* Single step - not used on 601 */ EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD) EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_STD) @@ -790,14 +769,12 @@ fast_hash_page_return: mtcr r11 lwz r11, THR11(r10) mfspr r10, SPRN_SPRG_SCRATCH0 - SYNC RFI 1: /* ISI */ mtcr r11 mfspr r11, SPRN_SPRG_SCRATCH1 mfspr r10, SPRN_SPRG_SCRATCH0 - SYNC RFI stack_overflow: @@ -888,7 +865,6 @@ __secondary_start_pmac_0: set to map the 0xf0000000 - 0xffffffff region */ mfmsr r0 rlwinm r0,r0,0,28,26 /* clear DR (0x10) */ - SYNC mtmsr r0 isync @@ -900,10 +876,8 @@ __secondary_start: lis r3,-KERNELBASE@h mr r4,r24 bl call_setup_cpu /* Call setup_cpu for this CPU */ -#ifdef CONFIG_PPC_BOOK3S_32 lis r3,-KERNELBASE@h bl init_idle_6xx -#endif /* CONFIG_PPC_BOOK3S_32 */ /* get current's stack and current */ lis r2,secondary_current@ha @@ -936,7 +910,6 @@ __secondary_start: ori r3,r3,start_secondary@l mtspr SPRN_SRR0,r3 mtspr SPRN_SRR1,r4 - SYNC RFI #endif /* CONFIG_SMP */ @@ -945,21 +918,9 @@ __secondary_start: #endif /* - * Those generic dummy functions are kept for CPUs not - * included in CONFIG_PPC_BOOK3S_32 - */ -#if !defined(CONFIG_PPC_BOOK3S_32) -_ENTRY(__save_cpu_setup) - blr -_ENTRY(__restore_cpu_setup) - blr -#endif /* !defined(CONFIG_PPC_BOOK3S_32) */ - -/* * Load stuff into the MMU. Intended to be called with * IR=0 and DR=0. */ -#ifdef CONFIG_KASAN early_hash_table: sync /* Force all PTE updates to finish */ isync @@ -970,8 +931,10 @@ early_hash_table: lis r6, early_hash - PAGE_OFFSET@h ori r6, r6, 3 /* 256kB table */ mtspr SPRN_SDR1, r6 + lis r6, early_hash@h + lis r3, Hash@ha + stw r6, Hash@l(r3) blr -#endif load_up_mmu: sync /* Force all PTE updates to finish */ @@ -985,8 +948,7 @@ load_up_mmu: lwz r6,_SDR1@l(r6) mtspr SPRN_SDR1,r6 -/* Load the BAT registers with the values set up by MMU_init. - MMU_init takes care of whether we're on a 601 or not. */ +/* Load the BAT registers with the values set up by MMU_init. */ lis r3,BATS@ha addi r3,r3,BATS@l tophys(r3,r3) @@ -1002,7 +964,7 @@ BEGIN_MMU_FTR_SECTION END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) blr -load_segment_registers: +_GLOBAL(load_segment_registers) li r0, NUM_USER_SEGMENTS /* load up user segment register values */ mtctr r0 /* for context 0 */ li r3, 0 /* Kp = 0, Ks = 0, VSID = 0 */ @@ -1061,11 +1023,7 @@ start_here: bl machine_init bl __save_cpu_setup bl MMU_init -#ifdef CONFIG_KASAN -BEGIN_MMU_FTR_SECTION bl MMU_init_hw_patch -END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE) -#endif /* * Go back to running unmapped so we can load up new values @@ -1080,7 +1038,6 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE) .align 4 mtspr SPRN_SRR0,r4 mtspr SPRN_SRR1,r3 - SYNC RFI /* Load up the kernel context */ 2: bl load_up_mmu @@ -1092,7 +1049,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE) */ lis r5, abatron_pteptrs@h ori r5, r5, abatron_pteptrs@l - stw r5, 0xf0(r0) /* This much match your Abatron config */ + stw r5, 0xf0(0) /* This much match your Abatron config */ lis r6, swapper_pg_dir@h ori r6, r6, swapper_pg_dir@l tophys(r5, r5) @@ -1105,7 +1062,6 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE) ori r3,r3,start_kernel@l mtspr SPRN_SRR0,r3 mtspr SPRN_SRR1,r4 - SYNC RFI /* @@ -1165,7 +1121,6 @@ EXPORT_SYMBOL(switch_mmu_context) clear_bats: li r10,0 -#ifndef CONFIG_PPC_BOOK3S_601 mtspr SPRN_DBAT0U,r10 mtspr SPRN_DBAT0L,r10 mtspr SPRN_DBAT1U,r10 @@ -1174,7 +1129,6 @@ clear_bats: mtspr SPRN_DBAT2L,r10 mtspr SPRN_DBAT3U,r10 mtspr SPRN_DBAT3L,r10 -#endif mtspr SPRN_IBAT0U,r10 mtspr SPRN_IBAT0L,r10 mtspr SPRN_IBAT1U,r10 @@ -1223,7 +1177,6 @@ _ENTRY(update_bats) .align 4 mtspr SPRN_SRR0, r4 mtspr SPRN_SRR1, r3 - SYNC RFI 1: bl clear_bats lis r3, BATS@ha @@ -1243,7 +1196,6 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) mtmsr r3 mtspr SPRN_SRR0, r7 mtspr SPRN_SRR1, r6 - SYNC RFI flush_tlbs: @@ -1267,26 +1219,9 @@ mmu_off: sync RFI -/* - * On 601, we use 3 BATs to map up to 24M of RAM at _PAGE_OFFSET - * (we keep one for debugging) and on others, we use one 256M BAT. - */ +/* We use one BAT to map up to 256M of RAM at _PAGE_OFFSET */ initial_bats: lis r11,PAGE_OFFSET@h -#ifdef CONFIG_PPC_BOOK3S_601 - ori r11,r11,4 /* set up BAT registers for 601 */ - li r8,0x7f /* valid, block length = 8MB */ - mtspr SPRN_IBAT0U,r11 /* N.B. 601 has valid bit in */ - mtspr SPRN_IBAT0L,r8 /* lower BAT register */ - addis r11,r11,0x800000@h - addis r8,r8,0x800000@h - mtspr SPRN_IBAT1U,r11 - mtspr SPRN_IBAT1L,r8 - addis r11,r11,0x800000@h - addis r8,r8,0x800000@h - mtspr SPRN_IBAT2U,r11 - mtspr SPRN_IBAT2L,r8 -#else tophys(r8,r11) #ifdef CONFIG_SMP ori r8,r8,0x12 /* R/W access, M=1 */ @@ -1295,11 +1230,10 @@ initial_bats: #endif /* CONFIG_SMP */ ori r11,r11,BL_256M<<2|0x2 /* set up BAT registers for 604 */ - mtspr SPRN_DBAT0L,r8 /* N.B. 6xx (not 601) have valid */ + mtspr SPRN_DBAT0L,r8 /* N.B. 6xx have valid */ mtspr SPRN_DBAT0U,r11 /* bit in upper BAT register */ mtspr SPRN_IBAT0L,r8 mtspr SPRN_IBAT0U,r11 -#endif isync blr @@ -1317,13 +1251,8 @@ setup_disp_bat: beqlr lwz r11,0(r8) lwz r8,4(r8) -#ifndef CONFIG_PPC_BOOK3S_601 mtspr SPRN_DBAT3L,r8 mtspr SPRN_DBAT3U,r11 -#else - mtspr SPRN_IBAT3L,r8 - mtspr SPRN_IBAT3U,r11 -#endif blr #endif /* CONFIG_BOOTX_TEXT */ diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h index 18f87bf9e32b..71c359d438b5 100644 --- a/arch/powerpc/kernel/head_booke.h +++ b/arch/powerpc/kernel/head_booke.h @@ -176,7 +176,6 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV) #endif mtspr SPRN_SRR1,r10 mtspr SPRN_SRR0,r11 - SYNC RFI /* jump to handler, enable MMU */ 99: b ret_from_kernel_syscall .endm diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c index 1f4a1efa0074..f4e8f21046f5 100644 --- a/arch/powerpc/kernel/hw_breakpoint.c +++ b/arch/powerpc/kernel/hw_breakpoint.c @@ -494,151 +494,6 @@ reset: } } -static bool dar_in_user_range(unsigned long dar, struct arch_hw_breakpoint *info) -{ - return ((info->address <= dar) && (dar - info->address < info->len)); -} - -static bool ea_user_range_overlaps(unsigned long ea, int size, - struct arch_hw_breakpoint *info) -{ - return ((ea < info->address + info->len) && - (ea + size > info->address)); -} - -static bool dar_in_hw_range(unsigned long dar, struct arch_hw_breakpoint *info) -{ - unsigned long hw_start_addr, hw_end_addr; - - hw_start_addr = ALIGN_DOWN(info->address, HW_BREAKPOINT_SIZE); - hw_end_addr = ALIGN(info->address + info->len, HW_BREAKPOINT_SIZE); - - return ((hw_start_addr <= dar) && (hw_end_addr > dar)); -} - -static bool ea_hw_range_overlaps(unsigned long ea, int size, - struct arch_hw_breakpoint *info) -{ - unsigned long hw_start_addr, hw_end_addr; - - hw_start_addr = ALIGN_DOWN(info->address, HW_BREAKPOINT_SIZE); - hw_end_addr = ALIGN(info->address + info->len, HW_BREAKPOINT_SIZE); - - return ((ea < hw_end_addr) && (ea + size > hw_start_addr)); -} - -/* - * If hw has multiple DAWR registers, we also need to check all - * dawrx constraint bits to confirm this is _really_ a valid event. - * If type is UNKNOWN, but privilege level matches, consider it as - * a positive match. - */ -static bool check_dawrx_constraints(struct pt_regs *regs, int type, - struct arch_hw_breakpoint *info) -{ - if (OP_IS_LOAD(type) && !(info->type & HW_BRK_TYPE_READ)) - return false; - - /* - * The Cache Management instructions other than dcbz never - * cause a match. i.e. if type is CACHEOP, the instruction - * is dcbz, and dcbz is treated as Store. - */ - if ((OP_IS_STORE(type) || type == CACHEOP) && !(info->type & HW_BRK_TYPE_WRITE)) - return false; - - if (is_kernel_addr(regs->nip) && !(info->type & HW_BRK_TYPE_KERNEL)) - return false; - - if (user_mode(regs) && !(info->type & HW_BRK_TYPE_USER)) - return false; - - return true; -} - -/* - * Return true if the event is valid wrt dawr configuration, - * including extraneous exception. Otherwise return false. - */ -static bool check_constraints(struct pt_regs *regs, struct ppc_inst instr, - unsigned long ea, int type, int size, - struct arch_hw_breakpoint *info) -{ - bool in_user_range = dar_in_user_range(regs->dar, info); - bool dawrx_constraints; - - /* - * 8xx supports only one breakpoint and thus we can - * unconditionally return true. - */ - if (IS_ENABLED(CONFIG_PPC_8xx)) { - if (!in_user_range) - info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ; - return true; - } - - if (unlikely(ppc_inst_equal(instr, ppc_inst(0)))) { - if (cpu_has_feature(CPU_FTR_ARCH_31) && - !dar_in_hw_range(regs->dar, info)) - return false; - - return true; - } - - dawrx_constraints = check_dawrx_constraints(regs, type, info); - - if (type == UNKNOWN) { - if (cpu_has_feature(CPU_FTR_ARCH_31) && - !dar_in_hw_range(regs->dar, info)) - return false; - - return dawrx_constraints; - } - - if (ea_user_range_overlaps(ea, size, info)) - return dawrx_constraints; - - if (ea_hw_range_overlaps(ea, size, info)) { - if (dawrx_constraints) { - info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ; - return true; - } - } - return false; -} - -static int cache_op_size(void) -{ -#ifdef __powerpc64__ - return ppc64_caches.l1d.block_size; -#else - return L1_CACHE_BYTES; -#endif -} - -static void get_instr_detail(struct pt_regs *regs, struct ppc_inst *instr, - int *type, int *size, unsigned long *ea) -{ - struct instruction_op op; - - if (__get_user_instr_inatomic(*instr, (void __user *)regs->nip)) - return; - - analyse_instr(&op, regs, *instr); - *type = GETTYPE(op.type); - *ea = op.ea; -#ifdef __powerpc64__ - if (!(regs->msr & MSR_64BIT)) - *ea &= 0xffffffffUL; -#endif - - *size = GETSIZE(op.type); - if (*type == CACHEOP) { - *size = cache_op_size(); - *ea &= ~(*size - 1); - } -} - static bool is_larx_stcx_instr(int type) { return type == LARX || type == STCX; @@ -722,7 +577,7 @@ int hw_breakpoint_handler(struct die_args *args) rcu_read_lock(); if (!IS_ENABLED(CONFIG_PPC_8xx)) - get_instr_detail(regs, &instr, &type, &size, &ea); + wp_get_instr_detail(regs, &instr, &type, &size, &ea); for (i = 0; i < nr_wp_slots(); i++) { bp[i] = __this_cpu_read(bp_per_reg[i]); @@ -732,7 +587,7 @@ int hw_breakpoint_handler(struct die_args *args) info[i] = counter_arch_bp(bp[i]); info[i]->type &= ~HW_BRK_TYPE_EXTRANEOUS_IRQ; - if (check_constraints(regs, instr, ea, type, size, info[i])) { + if (wp_check_constraints(regs, instr, ea, type, size, info[i])) { if (!IS_ENABLED(CONFIG_PPC_8xx) && ppc_inst_equal(instr, ppc_inst(0))) { handler_error(bp[i], info[i]); diff --git a/arch/powerpc/kernel/hw_breakpoint_constraints.c b/arch/powerpc/kernel/hw_breakpoint_constraints.c new file mode 100644 index 000000000000..867ee4aa026a --- /dev/null +++ b/arch/powerpc/kernel/hw_breakpoint_constraints.c @@ -0,0 +1,162 @@ +// SPDX-License-Identifier: GPL-2.0+ +#include <linux/kernel.h> +#include <linux/uaccess.h> +#include <linux/sched.h> +#include <asm/hw_breakpoint.h> +#include <asm/sstep.h> +#include <asm/cache.h> + +static bool dar_in_user_range(unsigned long dar, struct arch_hw_breakpoint *info) +{ + return ((info->address <= dar) && (dar - info->address < info->len)); +} + +static bool ea_user_range_overlaps(unsigned long ea, int size, + struct arch_hw_breakpoint *info) +{ + return ((ea < info->address + info->len) && + (ea + size > info->address)); +} + +static bool dar_in_hw_range(unsigned long dar, struct arch_hw_breakpoint *info) +{ + unsigned long hw_start_addr, hw_end_addr; + + hw_start_addr = ALIGN_DOWN(info->address, HW_BREAKPOINT_SIZE); + hw_end_addr = ALIGN(info->address + info->len, HW_BREAKPOINT_SIZE); + + return ((hw_start_addr <= dar) && (hw_end_addr > dar)); +} + +static bool ea_hw_range_overlaps(unsigned long ea, int size, + struct arch_hw_breakpoint *info) +{ + unsigned long hw_start_addr, hw_end_addr; + unsigned long align_size = HW_BREAKPOINT_SIZE; + + /* + * On p10 predecessors, quadword is handle differently then + * other instructions. + */ + if (!cpu_has_feature(CPU_FTR_ARCH_31) && size == 16) + align_size = HW_BREAKPOINT_SIZE_QUADWORD; + + hw_start_addr = ALIGN_DOWN(info->address, align_size); + hw_end_addr = ALIGN(info->address + info->len, align_size); + + return ((ea < hw_end_addr) && (ea + size > hw_start_addr)); +} + +/* + * If hw has multiple DAWR registers, we also need to check all + * dawrx constraint bits to confirm this is _really_ a valid event. + * If type is UNKNOWN, but privilege level matches, consider it as + * a positive match. + */ +static bool check_dawrx_constraints(struct pt_regs *regs, int type, + struct arch_hw_breakpoint *info) +{ + if (OP_IS_LOAD(type) && !(info->type & HW_BRK_TYPE_READ)) + return false; + + /* + * The Cache Management instructions other than dcbz never + * cause a match. i.e. if type is CACHEOP, the instruction + * is dcbz, and dcbz is treated as Store. + */ + if ((OP_IS_STORE(type) || type == CACHEOP) && !(info->type & HW_BRK_TYPE_WRITE)) + return false; + + if (is_kernel_addr(regs->nip) && !(info->type & HW_BRK_TYPE_KERNEL)) + return false; + + if (user_mode(regs) && !(info->type & HW_BRK_TYPE_USER)) + return false; + + return true; +} + +/* + * Return true if the event is valid wrt dawr configuration, + * including extraneous exception. Otherwise return false. + */ +bool wp_check_constraints(struct pt_regs *regs, struct ppc_inst instr, + unsigned long ea, int type, int size, + struct arch_hw_breakpoint *info) +{ + bool in_user_range = dar_in_user_range(regs->dar, info); + bool dawrx_constraints; + + /* + * 8xx supports only one breakpoint and thus we can + * unconditionally return true. + */ + if (IS_ENABLED(CONFIG_PPC_8xx)) { + if (!in_user_range) + info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ; + return true; + } + + if (unlikely(ppc_inst_equal(instr, ppc_inst(0)))) { + if (cpu_has_feature(CPU_FTR_ARCH_31) && + !dar_in_hw_range(regs->dar, info)) + return false; + + return true; + } + + dawrx_constraints = check_dawrx_constraints(regs, type, info); + + if (type == UNKNOWN) { + if (cpu_has_feature(CPU_FTR_ARCH_31) && + !dar_in_hw_range(regs->dar, info)) + return false; + + return dawrx_constraints; + } + + if (ea_user_range_overlaps(ea, size, info)) + return dawrx_constraints; + + if (ea_hw_range_overlaps(ea, size, info)) { + if (dawrx_constraints) { + info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ; + return true; + } + } + return false; +} + +static int cache_op_size(void) +{ +#ifdef __powerpc64__ + return ppc64_caches.l1d.block_size; +#else + return L1_CACHE_BYTES; +#endif +} + +void wp_get_instr_detail(struct pt_regs *regs, struct ppc_inst *instr, + int *type, int *size, unsigned long *ea) +{ + struct instruction_op op; + + if (__get_user_instr_inatomic(*instr, (void __user *)regs->nip)) + return; + + analyse_instr(&op, regs, *instr); + *type = GETTYPE(op.type); + *ea = op.ea; +#ifdef __powerpc64__ + if (!(regs->msr & MSR_64BIT)) + *ea &= 0xffffffffUL; +#endif + + *size = GETSIZE(op.type); + if (*type == CACHEOP) { + *size = cache_op_size(); + *ea &= ~(*size - 1); + } else if (*type == LOAD_VMX || *type == STORE_VMX) { + *ea &= ~(*size - 1); + } +} diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c index 422e31d2f5a2..ae0e2632393d 100644 --- a/arch/powerpc/kernel/idle.c +++ b/arch/powerpc/kernel/idle.c @@ -41,14 +41,6 @@ static int __init powersave_off(char *arg) } __setup("powersave=off", powersave_off); -#ifdef CONFIG_HOTPLUG_CPU -void arch_cpu_idle_dead(void) -{ - sched_preempt_enable_no_resched(); - cpu_die(); -} -#endif - void arch_cpu_idle(void) { ppc64_runlatch_off(); diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index 9704f3f76e63..5b69a6a72a0e 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -172,7 +172,6 @@ static unsigned long iommu_range_alloc(struct device *dev, int largealloc = npages > 15; int pass = 0; unsigned long align_mask; - unsigned long boundary_size; unsigned long flags; unsigned int pool_nr; struct iommu_pool *pool; @@ -236,15 +235,9 @@ again: } } - if (dev) - boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, - 1 << tbl->it_page_shift); - else - boundary_size = ALIGN(1UL << 32, 1 << tbl->it_page_shift); - /* 4GB boundary for iseries_hv_alloc and iseries_hv_map */ - n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset, - boundary_size >> tbl->it_page_shift, align_mask); + dma_get_seg_boundary_nr_pages(dev, tbl->it_page_shift), + align_mask); if (n == -1) { if (likely(pass == 0)) { /* First try the pool from the start */ diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index bf21ebd36190..7d0f7682d01d 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -104,7 +104,7 @@ static inline notrace unsigned long get_irq_happened(void) static inline notrace int decrementer_check_overflow(void) { - u64 now = get_tb_or_rtc(); + u64 now = get_tb(); u64 *next_tb = this_cpu_ptr(&decrementers_next_tb); return now >= *next_tb; @@ -113,7 +113,7 @@ static inline notrace int decrementer_check_overflow(void) #ifdef CONFIG_PPC_BOOK3E /* This is called whenever we are re-enabling interrupts - * and returns either 0 (nothing to do) or 500/900/280/a00/e80 if + * and returns either 0 (nothing to do) or 500/900/280 if * there's an EE, DEC or DBELL to generate. * * This is called in two contexts: From arch_local_irq_restore() @@ -181,16 +181,6 @@ notrace unsigned int __check_irq_replay(void) return 0x500; } - /* - * Check if an EPR external interrupt happened this bit is typically - * set if we need to handle another "edge" interrupt from within the - * MPIC "EPR" handler. - */ - if (happened & PACA_IRQ_EE_EDGE) { - local_paca->irq_happened &= ~PACA_IRQ_EE_EDGE; - return 0x500; - } - if (happened & PACA_IRQ_DBELL) { local_paca->irq_happened &= ~PACA_IRQ_DBELL; return 0x280; @@ -201,6 +191,25 @@ notrace unsigned int __check_irq_replay(void) return 0; } + +/* + * This is specifically called by assembly code to re-enable interrupts + * if they are currently disabled. This is typically called before + * schedule() or do_signal() when returning to userspace. We do it + * in C to avoid the burden of dealing with lockdep etc... + * + * NOTE: This is called with interrupts hard disabled but not marked + * as such in paca->irq_happened, so we need to resync this. + */ +void notrace restore_interrupts(void) +{ + if (irqs_disabled()) { + local_paca->irq_happened |= PACA_IRQ_HARD_DIS; + local_irq_enable(); + } else + __hard_irq_enable(); +} + #endif /* CONFIG_PPC_BOOK3E */ void replay_soft_interrupts(void) @@ -214,7 +223,7 @@ void replay_soft_interrupts(void) struct pt_regs regs; ppc_save_regs(®s); - regs.softe = IRQS_ALL_DISABLED; + regs.softe = IRQS_ENABLED; again: if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) @@ -270,19 +279,6 @@ again: hard_irq_disable(); } - /* - * Check if an EPR external interrupt happened this bit is typically - * set if we need to handle another "edge" interrupt from within the - * MPIC "EPR" handler. - */ - if (IS_ENABLED(CONFIG_PPC_BOOK3E) && (happened & PACA_IRQ_EE_EDGE)) { - local_paca->irq_happened &= ~PACA_IRQ_EE_EDGE; - regs.trap = 0x500; - do_IRQ(®s); - if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) - hard_irq_disable(); - } - if (IS_ENABLED(CONFIG_PPC_DOORBELL) && (happened & PACA_IRQ_DBELL)) { local_paca->irq_happened &= ~PACA_IRQ_DBELL; if (IS_ENABLED(CONFIG_PPC_BOOK3E)) @@ -368,6 +364,12 @@ notrace void arch_local_irq_restore(unsigned long mask) } } + /* + * Disable preempt here, so that the below preempt_enable will + * perform resched if required (a replayed interrupt may set + * need_resched). + */ + preempt_disable(); irq_soft_mask_set(IRQS_ALL_DISABLED); trace_hardirqs_off(); @@ -377,28 +379,11 @@ notrace void arch_local_irq_restore(unsigned long mask) trace_hardirqs_on(); irq_soft_mask_set(IRQS_ENABLED); __hard_irq_enable(); + preempt_enable(); } EXPORT_SYMBOL(arch_local_irq_restore); /* - * This is specifically called by assembly code to re-enable interrupts - * if they are currently disabled. This is typically called before - * schedule() or do_signal() when returning to userspace. We do it - * in C to avoid the burden of dealing with lockdep etc... - * - * NOTE: This is called with interrupts hard disabled but not marked - * as such in paca->irq_happened, so we need to resync this. - */ -void notrace restore_interrupts(void) -{ - if (irqs_disabled()) { - local_paca->irq_happened |= PACA_IRQ_HARD_DIS; - local_irq_enable(); - } else - __hard_irq_enable(); -} - -/* * This is a helper to use when about to go into idle low-power * when the latter has the side effect of re-enabling interrupts * (such as calling H_CEDE under pHyp). diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index 6ab9b4d037c3..01ab2163659e 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -218,6 +218,7 @@ bool arch_kprobe_on_func_entry(unsigned long offset) void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { ri->ret_addr = (kprobe_opcode_t *)regs->link; + ri->fp = NULL; /* Replace the return addr with trampoline addr */ regs->link = (unsigned long)kretprobe_trampoline; @@ -396,50 +397,9 @@ asm(".global kretprobe_trampoline\n" */ static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) { - struct kretprobe_instance *ri = NULL; - struct hlist_head *head, empty_rp; - struct hlist_node *tmp; - unsigned long flags, orig_ret_address = 0; - unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; - - INIT_HLIST_HEAD(&empty_rp); - kretprobe_hash_lock(current, &head, &flags); - - /* - * It is possible to have multiple instances associated with a given - * task either because an multiple functions in the call path - * have a return probe installed on them, and/or more than one return - * return probe was registered for a target function. - * - * We can handle this because: - * - instances are always inserted at the head of the list - * - when multiple return probes are registered for the same - * function, the first instance's ret_addr will point to the - * real return address, and all the rest will point to - * kretprobe_trampoline - */ - hlist_for_each_entry_safe(ri, tmp, head, hlist) { - if (ri->task != current) - /* another task is sharing our hash bucket */ - continue; - - if (ri->rp && ri->rp->handler) - ri->rp->handler(ri, regs); - - orig_ret_address = (unsigned long)ri->ret_addr; - recycle_rp_inst(ri, &empty_rp); - - if (orig_ret_address != trampoline_address) - /* - * This is the real return address. Any other - * instances associated with this task are for - * other calls deeper on the call stack - */ - break; - } - - kretprobe_assert(ri, orig_ret_address, trampoline_address); + unsigned long orig_ret_address; + orig_ret_address = __kretprobe_trampoline_handler(regs, &kretprobe_trampoline, NULL); /* * We get here through one of two paths: * 1. by taking a trap -> kprobe_handler() -> here @@ -458,13 +418,6 @@ static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) regs->nip = orig_ret_address - 4; regs->link = orig_ret_address; - kretprobe_hash_unlock(current, &flags); - - hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { - hlist_del(&ri->hlist); - kfree(ri); - } - return 0; } NOKPROBE_SYMBOL(trampoline_probe_handler); diff --git a/arch/powerpc/kernel/l2cr_6xx.S b/arch/powerpc/kernel/l2cr_6xx.S index 5f07aa5e9851..225511d73bef 100644 --- a/arch/powerpc/kernel/l2cr_6xx.S +++ b/arch/powerpc/kernel/l2cr_6xx.S @@ -256,7 +256,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450) sync /* Restore MSR (restores EE and DR bits to original state) */ - SYNC mtmsr r7 isync @@ -377,7 +376,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_L3CR) 1: bdnz 1b /* Restore MSR (restores EE and DR bits to original state) */ -4: SYNC +4: mtmsr r7 isync blr diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index b24f866fef81..717e658b90fd 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S @@ -215,19 +215,6 @@ _GLOBAL(low_choose_7447a_dfs) #endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_PPC_BOOK3S_32 */ -/* - * complement mask on the msr then "or" some values on. - * _nmask_and_or_msr(nmask, value_to_or) - */ -_GLOBAL(_nmask_and_or_msr) - mfmsr r0 /* Get current msr */ - andc r0,r0,r3 /* And off the bits set in r3 (first parm) */ - or r0,r0,r4 /* Or on the bits in r4 (second parm) */ - SYNC /* Some chip revs have problems here... */ - mtmsr r0 /* Update machine state */ - isync - blr /* Done */ - #ifdef CONFIG_40x /* @@ -268,41 +255,6 @@ _ASM_NOKPROBE_SYMBOL(real_writeb) #endif /* CONFIG_40x */ - -/* - * Flush instruction cache. - * This is a no-op on the 601. - */ -#ifndef CONFIG_PPC_8xx -_GLOBAL(flush_instruction_cache) -#if defined(CONFIG_4xx) - lis r3, KERNELBASE@h - iccci 0,r3 -#elif defined(CONFIG_FSL_BOOKE) -#ifdef CONFIG_E200 - mfspr r3,SPRN_L1CSR0 - ori r3,r3,L1CSR0_CFI|L1CSR0_CLFC - /* msync; isync recommended here */ - mtspr SPRN_L1CSR0,r3 - isync - blr -#endif - mfspr r3,SPRN_L1CSR1 - ori r3,r3,L1CSR1_ICFI|L1CSR1_ICLFR - mtspr SPRN_L1CSR1,r3 -#elif defined(CONFIG_PPC_BOOK3S_601) - blr /* for 601, do nothing */ -#else - /* 603/604 processor - use invalidate-all bit in HID0 */ - mfspr r3,SPRN_HID0 - ori r3,r3,HID0_ICFI - mtspr SPRN_HID0,r3 -#endif /* CONFIG_4xx */ - isync - blr -EXPORT_SYMBOL(flush_instruction_cache) -#endif /* CONFIG_PPC_8xx */ - /* * Copy a whole page. We use the dcbz instruction on the destination * to reduce memory traffic (it eliminates the unnecessary reads of diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index 7bb46ad98207..070465825c21 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -365,7 +365,6 @@ _GLOBAL(kexec_smp_wait) li r4,KEXEC_STATE_REAL_MODE stb r4,PACAKEXECSTATE(r13) - SYNC b kexec_wait diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 73a57043ee66..d421a2c7f822 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -124,10 +124,8 @@ unsigned long notrace msr_check_and_set(unsigned long bits) newmsr = oldmsr | bits; -#ifdef CONFIG_VSX if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP)) newmsr |= MSR_VSX; -#endif if (oldmsr != newmsr) mtmsr_isync(newmsr); @@ -144,10 +142,8 @@ void notrace __msr_check_and_clear(unsigned long bits) newmsr = oldmsr & ~bits; -#ifdef CONFIG_VSX if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP)) newmsr &= ~MSR_VSX; -#endif if (oldmsr != newmsr) mtmsr_isync(newmsr); @@ -162,10 +158,8 @@ static void __giveup_fpu(struct task_struct *tsk) save_fpu(tsk); msr = tsk->thread.regs->msr; msr &= ~(MSR_FP|MSR_FE0|MSR_FE1); -#ifdef CONFIG_VSX if (cpu_has_feature(CPU_FTR_VSX)) msr &= ~MSR_VSX; -#endif tsk->thread.regs->msr = msr; } @@ -235,6 +229,8 @@ void enable_kernel_fp(void) } } EXPORT_SYMBOL(enable_kernel_fp); +#else +static inline void __giveup_fpu(struct task_struct *tsk) { } #endif /* CONFIG_PPC_FPU */ #ifdef CONFIG_ALTIVEC @@ -245,10 +241,8 @@ static void __giveup_altivec(struct task_struct *tsk) save_altivec(tsk); msr = tsk->thread.regs->msr; msr &= ~MSR_VEC; -#ifdef CONFIG_VSX if (cpu_has_feature(CPU_FTR_VSX)) msr &= ~MSR_VSX; -#endif tsk->thread.regs->msr = msr; } @@ -414,21 +408,14 @@ static unsigned long msr_all_available; static int __init init_msr_all_available(void) { -#ifdef CONFIG_PPC_FPU - msr_all_available |= MSR_FP; -#endif -#ifdef CONFIG_ALTIVEC + if (IS_ENABLED(CONFIG_PPC_FPU)) + msr_all_available |= MSR_FP; if (cpu_has_feature(CPU_FTR_ALTIVEC)) msr_all_available |= MSR_VEC; -#endif -#ifdef CONFIG_VSX if (cpu_has_feature(CPU_FTR_VSX)) msr_all_available |= MSR_VSX; -#endif -#ifdef CONFIG_SPE if (cpu_has_feature(CPU_FTR_SPE)) msr_all_available |= MSR_SPE; -#endif return 0; } @@ -452,18 +439,12 @@ void giveup_all(struct task_struct *tsk) WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC))); -#ifdef CONFIG_PPC_FPU if (usermsr & MSR_FP) __giveup_fpu(tsk); -#endif -#ifdef CONFIG_ALTIVEC if (usermsr & MSR_VEC) __giveup_altivec(tsk); -#endif -#ifdef CONFIG_SPE if (usermsr & MSR_SPE) __giveup_spe(tsk); -#endif msr_check_and_clear(msr_all_available); } @@ -509,19 +490,18 @@ static bool should_restore_altivec(void) { return false; } static void do_restore_altivec(void) { } #endif /* CONFIG_ALTIVEC */ -#ifdef CONFIG_VSX static bool should_restore_vsx(void) { if (cpu_has_feature(CPU_FTR_VSX)) return true; return false; } +#ifdef CONFIG_VSX static void do_restore_vsx(void) { current->thread.used_vsr = 1; } #else -static bool should_restore_vsx(void) { return false; } static void do_restore_vsx(void) { } #endif /* CONFIG_VSX */ @@ -581,7 +561,7 @@ void notrace restore_math(struct pt_regs *regs) regs->msr |= new_msr | fpexc_mode; } } -#endif +#endif /* CONFIG_PPC_BOOK3S_64 */ static void save_all(struct task_struct *tsk) { @@ -642,6 +622,44 @@ void do_send_trap(struct pt_regs *regs, unsigned long address, (void __user *)address); } #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ + +static void do_break_handler(struct pt_regs *regs) +{ + struct arch_hw_breakpoint null_brk = {0}; + struct arch_hw_breakpoint *info; + struct ppc_inst instr = ppc_inst(0); + int type = 0; + int size = 0; + unsigned long ea; + int i; + + /* + * If underneath hw supports only one watchpoint, we know it + * caused exception. 8xx also falls into this category. + */ + if (nr_wp_slots() == 1) { + __set_breakpoint(0, &null_brk); + current->thread.hw_brk[0] = null_brk; + current->thread.hw_brk[0].flags |= HW_BRK_FLAG_DISABLED; + return; + } + + /* Otherwise findout which DAWR caused exception and disable it. */ + wp_get_instr_detail(regs, &instr, &type, &size, &ea); + + for (i = 0; i < nr_wp_slots(); i++) { + info = ¤t->thread.hw_brk[i]; + if (!info->address) + continue; + + if (wp_check_constraints(regs, instr, ea, type, size, info)) { + __set_breakpoint(i, &null_brk); + current->thread.hw_brk[i] = null_brk; + current->thread.hw_brk[i].flags |= HW_BRK_FLAG_DISABLED; + } + } +} + void do_break (struct pt_regs *regs, unsigned long address, unsigned long error_code) { @@ -653,6 +671,16 @@ void do_break (struct pt_regs *regs, unsigned long address, if (debugger_break_match(regs)) return; + /* + * We reach here only when watchpoint exception is generated by ptrace + * event (or hw is buggy!). Now if CONFIG_HAVE_HW_BREAKPOINT is set, + * watchpoint is already handled by hw_breakpoint_handler() so we don't + * have to do anything. But when CONFIG_HAVE_HW_BREAKPOINT is not set, + * we need to manually handle the watchpoint here. + */ + if (!IS_ENABLED(CONFIG_HAVE_HW_BREAKPOINT)) + do_break_handler(regs); + /* Deliver the signal to userspace */ force_sig_fault(SIGTRAP, TRAP_HWBKPT, (void __user *)address); } @@ -783,9 +811,8 @@ static void switch_hw_breakpoint(struct task_struct *new) static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) { mtspr(SPRN_DAC1, dabr); -#ifdef CONFIG_PPC_47x - isync(); -#endif + if (IS_ENABLED(CONFIG_PPC_47x)) + isync(); return 0; } #elif defined(CONFIG_PPC_BOOK3S) @@ -1256,15 +1283,17 @@ struct task_struct *__switch_to(struct task_struct *prev, restore_math(current->thread.regs); /* - * The copy-paste buffer can only store into foreign real - * addresses, so unprivileged processes can not see the - * data or use it in any way unless they have foreign real - * mappings. If the new process has the foreign real address - * mappings, we must issue a cp_abort to clear any state and - * prevent snooping, corruption or a covert channel. + * On POWER9 the copy-paste buffer can only paste into + * foreign real addresses, so unprivileged processes can not + * see the data or use it in any way unless they have + * foreign real mappings. If the new process has the foreign + * real address mappings, we must issue a cp_abort to clear + * any state and prevent snooping, corruption or a covert + * channel. ISA v3.1 supports paste into local memory. */ if (current->mm && - atomic_read(¤t->mm->context.vas_windows)) + (cpu_has_feature(CPU_FTR_ARCH_31) || + atomic_read(¤t->mm->context.vas_windows))) asm volatile(PPC_CP_ABORT); } #endif /* CONFIG_PPC_BOOK3S_64 */ @@ -1453,12 +1482,13 @@ void show_regs(struct pt_regs * regs) trap = TRAP(regs); if (!trap_is_syscall(regs) && cpu_has_feature(CPU_FTR_CFAR)) pr_cont("CFAR: "REG" ", regs->orig_gpr3); - if (trap == 0x200 || trap == 0x300 || trap == 0x600) -#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) - pr_cont("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr); -#else - pr_cont("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr); -#endif + if (trap == 0x200 || trap == 0x300 || trap == 0x600) { + if (IS_ENABLED(CONFIG_4xx) || IS_ENABLED(CONFIG_BOOKE)) + pr_cont("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr); + else + pr_cont("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr); + } + #ifdef CONFIG_PPC64 pr_cont("IRQMASK: %lx ", regs->softe); #endif @@ -1475,14 +1505,14 @@ void show_regs(struct pt_regs * regs) break; } pr_cont("\n"); -#ifdef CONFIG_KALLSYMS /* * Lookup NIP late so we have the best change of getting the * above info out without failing */ - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip); - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link); -#endif + if (IS_ENABLED(CONFIG_KALLSYMS)) { + printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip); + printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link); + } show_stack(current, (unsigned long *) regs->gpr[1], KERN_DEFAULT); if (!user_mode(regs)) show_instructions(regs); @@ -1731,11 +1761,9 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) #ifdef CONFIG_PPC64 unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */ -#ifdef CONFIG_PPC_BOOK3S_64 - if (!radix_enabled()) + if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !radix_enabled()) preload_new_slb_context(start, sp); #endif -#endif /* * If we exec out of a kernel thread then thread.regs will not be @@ -1866,7 +1894,6 @@ int set_fpexc_mode(struct task_struct *tsk, unsigned int val) * fpexc_mode. fpexc_mode is also used for setting FP exception * mode (asyn, precise, disabled) for 'Classic' FP. */ if (val & PR_FP_EXC_SW_ENABLE) { -#ifdef CONFIG_SPE if (cpu_has_feature(CPU_FTR_SPE)) { /* * When the sticky exception bits are set @@ -1880,16 +1907,15 @@ int set_fpexc_mode(struct task_struct *tsk, unsigned int val) * anyway to restore the prctl settings from * the saved environment. */ +#ifdef CONFIG_SPE tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR); tsk->thread.fpexc_mode = val & (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT); +#endif return 0; } else { return -EINVAL; } -#else - return -EINVAL; -#endif } /* on a CONFIG_SPE this does not hurt us. The bits that @@ -1908,10 +1934,9 @@ int set_fpexc_mode(struct task_struct *tsk, unsigned int val) int get_fpexc_mode(struct task_struct *tsk, unsigned long adr) { - unsigned int val; + unsigned int val = 0; - if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) -#ifdef CONFIG_SPE + if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) { if (cpu_has_feature(CPU_FTR_SPE)) { /* * When the sticky exception bits are set @@ -1925,15 +1950,15 @@ int get_fpexc_mode(struct task_struct *tsk, unsigned long adr) * anyway to restore the prctl settings from * the saved environment. */ +#ifdef CONFIG_SPE tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR); val = tsk->thread.fpexc_mode; +#endif } else return -EINVAL; -#else - return -EINVAL; -#endif - else + } else { val = __unpack_fe01(tsk->thread.fpexc_mode); + } return put_user(val, (unsigned int __user *) adr); } @@ -2102,10 +2127,8 @@ void show_stack(struct task_struct *tsk, unsigned long *stack, unsigned long sp, ip, lr, newsp; int count = 0; int firstframe = 1; -#ifdef CONFIG_FUNCTION_GRAPH_TRACER unsigned long ret_addr; int ftrace_idx = 0; -#endif if (tsk == NULL) tsk = current; @@ -2133,12 +2156,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack, if (!firstframe || ip != lr) { printk("%s["REG"] ["REG"] %pS", loglvl, sp, ip, (void *)ip); -#ifdef CONFIG_FUNCTION_GRAPH_TRACER ret_addr = ftrace_graph_ret_addr(current, &ftrace_idx, ip, stack); if (ret_addr != ip) pr_cont(" (%pS)", (void *)ret_addr); -#endif if (firstframe) pr_cont(" (unreliable)"); pr_cont("\n"); diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index d8a2fb87ba0c..c1545f22c077 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -776,6 +776,11 @@ void __init early_init_devtree(void *params) limit = ALIGN(memory_limit ?: memblock_phys_mem_size(), PAGE_SIZE); memblock_enforce_memory_limit(limit); +#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_4K_PAGES) + if (!early_radix_enabled()) + memblock_cap_memory_range(0, 1UL << (H_MAX_PHYSMEM_BITS)); +#endif + memblock_allow_resize(); memblock_dump_all(); diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index ae7ec9903191..5090a5ab54e5 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -2422,10 +2422,19 @@ static void __init prom_check_displays(void) u32 width, height, pitch, addr; prom_printf("Setting btext !\n"); - prom_getprop(node, "width", &width, 4); - prom_getprop(node, "height", &height, 4); - prom_getprop(node, "linebytes", &pitch, 4); - prom_getprop(node, "address", &addr, 4); + + if (prom_getprop(node, "width", &width, 4) == PROM_ERROR) + return; + + if (prom_getprop(node, "height", &height, 4) == PROM_ERROR) + return; + + if (prom_getprop(node, "linebytes", &pitch, 4) == PROM_ERROR) + return; + + if (prom_getprop(node, "address", &addr, 4) == PROM_ERROR) + return; + prom_printf("W=%d H=%d LB=%d addr=0x%x\n", width, height, pitch, addr); btext_setup_display(width, height, 8, pitch, addr); diff --git a/arch/powerpc/kernel/ptrace/ptrace-noadv.c b/arch/powerpc/kernel/ptrace/ptrace-noadv.c index 697c7e4b5877..aa36fcad36cd 100644 --- a/arch/powerpc/kernel/ptrace/ptrace-noadv.c +++ b/arch/powerpc/kernel/ptrace/ptrace-noadv.c @@ -57,6 +57,8 @@ void ppc_gethwdinfo(struct ppc_debug_info *dbginfo) } else { dbginfo->features = 0; } + if (cpu_has_feature(CPU_FTR_ARCH_31)) + dbginfo->features |= PPC_DEBUG_FEATURE_DATA_BP_ARCH_31; } int ptrace_get_debugreg(struct task_struct *child, unsigned long addr, @@ -217,8 +219,9 @@ long ppc_set_hwdebug(struct task_struct *child, struct ppc_hw_breakpoint *bp_inf return -EIO; brk.address = ALIGN_DOWN(bp_info->addr, HW_BREAKPOINT_SIZE); - brk.type = HW_BRK_TYPE_TRANSLATE; + brk.type = HW_BRK_TYPE_TRANSLATE | HW_BRK_TYPE_PRIV_ALL; brk.len = DABR_MAX_LEN; + brk.hw_len = DABR_MAX_LEN; if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ) brk.type |= HW_BRK_TYPE_READ; if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE) @@ -286,11 +289,13 @@ long ppc_del_hwdebug(struct task_struct *child, long data) } return ret; #else /* CONFIG_HAVE_HW_BREAKPOINT */ - if (child->thread.hw_brk[data - 1].address == 0) + if (!(child->thread.hw_brk[data - 1].flags & HW_BRK_FLAG_DISABLED) && + child->thread.hw_brk[data - 1].address == 0) return -ENOENT; child->thread.hw_brk[data - 1].address = 0; child->thread.hw_brk[data - 1].type = 0; + child->thread.hw_brk[data - 1].flags = 0; #endif /* CONFIG_HAVE_HW_BREAKPOINT */ return 0; diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 806d554ce357..954f41676f69 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -992,6 +992,147 @@ struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log, return NULL; } +#ifdef CONFIG_PPC_RTAS_FILTER + +/* + * The sys_rtas syscall, as originally designed, allows root to pass + * arbitrary physical addresses to RTAS calls. A number of RTAS calls + * can be abused to write to arbitrary memory and do other things that + * are potentially harmful to system integrity, and thus should only + * be used inside the kernel and not exposed to userspace. + * + * All known legitimate users of the sys_rtas syscall will only ever + * pass addresses that fall within the RMO buffer, and use a known + * subset of RTAS calls. + * + * Accordingly, we filter RTAS requests to check that the call is + * permitted, and that provided pointers fall within the RMO buffer. + * The rtas_filters list contains an entry for each permitted call, + * with the indexes of the parameters which are expected to contain + * addresses and sizes of buffers allocated inside the RMO buffer. + */ +struct rtas_filter { + const char *name; + int token; + /* Indexes into the args buffer, -1 if not used */ + int buf_idx1; + int size_idx1; + int buf_idx2; + int size_idx2; + + int fixed_size; +}; + +static struct rtas_filter rtas_filters[] __ro_after_init = { + { "ibm,activate-firmware", -1, -1, -1, -1, -1 }, + { "ibm,configure-connector", -1, 0, -1, 1, -1, 4096 }, /* Special cased */ + { "display-character", -1, -1, -1, -1, -1 }, + { "ibm,display-message", -1, 0, -1, -1, -1 }, + { "ibm,errinjct", -1, 2, -1, -1, -1, 1024 }, + { "ibm,close-errinjct", -1, -1, -1, -1, -1 }, + { "ibm,open-errinct", -1, -1, -1, -1, -1 }, + { "ibm,get-config-addr-info2", -1, -1, -1, -1, -1 }, + { "ibm,get-dynamic-sensor-state", -1, 1, -1, -1, -1 }, + { "ibm,get-indices", -1, 2, 3, -1, -1 }, + { "get-power-level", -1, -1, -1, -1, -1 }, + { "get-sensor-state", -1, -1, -1, -1, -1 }, + { "ibm,get-system-parameter", -1, 1, 2, -1, -1 }, + { "get-time-of-day", -1, -1, -1, -1, -1 }, + { "ibm,get-vpd", -1, 0, -1, 1, 2 }, + { "ibm,lpar-perftools", -1, 2, 3, -1, -1 }, + { "ibm,platform-dump", -1, 4, 5, -1, -1 }, + { "ibm,read-slot-reset-state", -1, -1, -1, -1, -1 }, + { "ibm,scan-log-dump", -1, 0, 1, -1, -1 }, + { "ibm,set-dynamic-indicator", -1, 2, -1, -1, -1 }, + { "ibm,set-eeh-option", -1, -1, -1, -1, -1 }, + { "set-indicator", -1, -1, -1, -1, -1 }, + { "set-power-level", -1, -1, -1, -1, -1 }, + { "set-time-for-power-on", -1, -1, -1, -1, -1 }, + { "ibm,set-system-parameter", -1, 1, -1, -1, -1 }, + { "set-time-of-day", -1, -1, -1, -1, -1 }, + { "ibm,suspend-me", -1, -1, -1, -1, -1 }, + { "ibm,update-nodes", -1, 0, -1, -1, -1, 4096 }, + { "ibm,update-properties", -1, 0, -1, -1, -1, 4096 }, + { "ibm,physical-attestation", -1, 0, 1, -1, -1 }, +}; + +static bool in_rmo_buf(u32 base, u32 end) +{ + return base >= rtas_rmo_buf && + base < (rtas_rmo_buf + RTAS_RMOBUF_MAX) && + base <= end && + end >= rtas_rmo_buf && + end < (rtas_rmo_buf + RTAS_RMOBUF_MAX); +} + +static bool block_rtas_call(int token, int nargs, + struct rtas_args *args) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(rtas_filters); i++) { + struct rtas_filter *f = &rtas_filters[i]; + u32 base, size, end; + + if (token != f->token) + continue; + + if (f->buf_idx1 != -1) { + base = be32_to_cpu(args->args[f->buf_idx1]); + if (f->size_idx1 != -1) + size = be32_to_cpu(args->args[f->size_idx1]); + else if (f->fixed_size) + size = f->fixed_size; + else + size = 1; + + end = base + size - 1; + if (!in_rmo_buf(base, end)) + goto err; + } + + if (f->buf_idx2 != -1) { + base = be32_to_cpu(args->args[f->buf_idx2]); + if (f->size_idx2 != -1) + size = be32_to_cpu(args->args[f->size_idx2]); + else if (f->fixed_size) + size = f->fixed_size; + else + size = 1; + end = base + size - 1; + + /* + * Special case for ibm,configure-connector where the + * address can be 0 + */ + if (!strcmp(f->name, "ibm,configure-connector") && + base == 0) + return false; + + if (!in_rmo_buf(base, end)) + goto err; + } + + return false; + } + +err: + pr_err_ratelimited("sys_rtas: RTAS call blocked - exploit attempt?\n"); + pr_err_ratelimited("sys_rtas: token=0x%x, nargs=%d (called by %s)\n", + token, nargs, current->comm); + return true; +} + +#else + +static bool block_rtas_call(int token, int nargs, + struct rtas_args *args) +{ + return false; +} + +#endif /* CONFIG_PPC_RTAS_FILTER */ + /* We assume to be passed big endian arguments */ SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs) { @@ -1029,6 +1170,9 @@ SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs) args.rets = &args.args[nargs]; memset(args.rets, 0, nret * sizeof(rtas_arg_t)); + if (block_rtas_call(token, nargs, &args)) + return -EINVAL; + /* Need to handle ibm,suspend_me call specially */ if (token == ibm_suspend_me_token) { @@ -1090,6 +1234,9 @@ void __init rtas_initialize(void) unsigned long rtas_region = RTAS_INSTANTIATE_MAX; u32 base, size, entry; int no_base, no_size, no_entry; +#ifdef CONFIG_PPC_RTAS_FILTER + int i; +#endif /* Get RTAS dev node and fill up our "rtas" structure with infos * about it. @@ -1129,6 +1276,12 @@ void __init rtas_initialize(void) #ifdef CONFIG_RTAS_ERROR_LOGGING rtas_last_error_token = rtas_token("rtas-last-error"); #endif + +#ifdef CONFIG_PPC_RTAS_FILTER + for (i = 0; i < ARRAY_SIZE(rtas_filters); i++) { + rtas_filters[i].token = rtas_token(rtas_filters[i].name); + } +#endif } int __init early_init_dt_scan_rtas(unsigned long node, diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c index c9876aab3142..e4e1a94ccf6a 100644 --- a/arch/powerpc/kernel/security.c +++ b/arch/powerpc/kernel/security.c @@ -430,30 +430,44 @@ device_initcall(stf_barrier_debugfs_init); static void update_branch_cache_flush(void) { + u32 *site; + #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE + site = &patch__call_kvm_flush_link_stack; // This controls the branch from guest_exit_cont to kvm_flush_link_stack if (link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE) { - patch_instruction_site(&patch__call_kvm_flush_link_stack, - ppc_inst(PPC_INST_NOP)); + patch_instruction_site(site, ppc_inst(PPC_INST_NOP)); } else { // Could use HW flush, but that could also flush count cache - patch_branch_site(&patch__call_kvm_flush_link_stack, - (u64)&kvm_flush_link_stack, BRANCH_SET_LINK); + patch_branch_site(site, (u64)&kvm_flush_link_stack, BRANCH_SET_LINK); } #endif + // Patch out the bcctr first, then nop the rest + site = &patch__call_flush_branch_caches3; + patch_instruction_site(site, ppc_inst(PPC_INST_NOP)); + site = &patch__call_flush_branch_caches2; + patch_instruction_site(site, ppc_inst(PPC_INST_NOP)); + site = &patch__call_flush_branch_caches1; + patch_instruction_site(site, ppc_inst(PPC_INST_NOP)); + // This controls the branch from _switch to flush_branch_caches if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE && link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE) { - patch_instruction_site(&patch__call_flush_branch_caches, - ppc_inst(PPC_INST_NOP)); + // Nothing to be done + } else if (count_cache_flush_type == BRANCH_CACHE_FLUSH_HW && link_stack_flush_type == BRANCH_CACHE_FLUSH_HW) { - patch_instruction_site(&patch__call_flush_branch_caches, - ppc_inst(PPC_INST_BCCTR_FLUSH)); + // Patch in the bcctr last + site = &patch__call_flush_branch_caches1; + patch_instruction_site(site, ppc_inst(0x39207fff)); // li r9,0x7fff + site = &patch__call_flush_branch_caches2; + patch_instruction_site(site, ppc_inst(0x7d2903a6)); // mtctr r9 + site = &patch__call_flush_branch_caches3; + patch_instruction_site(site, ppc_inst(PPC_INST_BCCTR_FLUSH)); + } else { - patch_branch_site(&patch__call_flush_branch_caches, - (u64)&flush_branch_caches, BRANCH_SET_LINK); + patch_branch_site(site, (u64)&flush_branch_caches, BRANCH_SET_LINK); // If we just need to flush the link stack, early return if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE) { diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 1823706ae076..057d6b8e9bb0 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -223,6 +223,6 @@ __init void initialize_cache_info(void) dcache_bsize = cur_cpu_spec->dcache_bsize; icache_bsize = cur_cpu_spec->icache_bsize; ucache_bsize = 0; - if (IS_ENABLED(CONFIG_PPC_BOOK3S_601) || IS_ENABLED(CONFIG_E200)) + if (IS_ENABLED(CONFIG_E200)) ucache_bsize = icache_bsize = dcache_bsize; } diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 6be430107c6f..bb9cab3641d7 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -66,6 +66,7 @@ #include <asm/feature-fixups.h> #include <asm/kup.h> #include <asm/early_ioremap.h> +#include <asm/pgalloc.h> #include "setup.h" @@ -756,17 +757,46 @@ void __init emergency_stack_init(void) } #ifdef CONFIG_SMP -#define PCPU_DYN_SIZE () - -static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align) +/** + * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu + * @cpu: cpu to allocate for + * @size: size allocation in bytes + * @align: alignment + * + * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper + * does the right thing for NUMA regardless of the current + * configuration. + * + * RETURNS: + * Pointer to the allocated area on success, NULL on failure. + */ +static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size, + size_t align) { - return memblock_alloc_try_nid(size, align, __pa(MAX_DMA_ADDRESS), - MEMBLOCK_ALLOC_ACCESSIBLE, - early_cpu_to_node(cpu)); + const unsigned long goal = __pa(MAX_DMA_ADDRESS); +#ifdef CONFIG_NEED_MULTIPLE_NODES + int node = early_cpu_to_node(cpu); + void *ptr; + if (!node_online(node) || !NODE_DATA(node)) { + ptr = memblock_alloc_from(size, align, goal); + pr_info("cpu %d has no node %d or node-local memory\n", + cpu, node); + pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n", + cpu, size, __pa(ptr)); + } else { + ptr = memblock_alloc_try_nid(size, align, goal, + MEMBLOCK_ALLOC_ACCESSIBLE, node); + pr_debug("per cpu data for cpu%d %lu bytes on node%d at " + "%016lx\n", cpu, size, node, __pa(ptr)); + } + return ptr; +#else + return memblock_alloc_from(size, align, goal); +#endif } -static void __init pcpu_fc_free(void *ptr, size_t size) +static void __init pcpu_free_bootmem(void *ptr, size_t size) { memblock_free(__pa(ptr), size); } @@ -782,13 +812,58 @@ static int pcpu_cpu_distance(unsigned int from, unsigned int to) unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; EXPORT_SYMBOL(__per_cpu_offset); +static void __init pcpu_populate_pte(unsigned long addr) +{ + pgd_t *pgd = pgd_offset_k(addr); + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd; + + p4d = p4d_offset(pgd, addr); + if (p4d_none(*p4d)) { + pud_t *new; + + new = memblock_alloc(PUD_TABLE_SIZE, PUD_TABLE_SIZE); + if (!new) + goto err_alloc; + p4d_populate(&init_mm, p4d, new); + } + + pud = pud_offset(p4d, addr); + if (pud_none(*pud)) { + pmd_t *new; + + new = memblock_alloc(PMD_TABLE_SIZE, PMD_TABLE_SIZE); + if (!new) + goto err_alloc; + pud_populate(&init_mm, pud, new); + } + + pmd = pmd_offset(pud, addr); + if (!pmd_present(*pmd)) { + pte_t *new; + + new = memblock_alloc(PTE_TABLE_SIZE, PTE_TABLE_SIZE); + if (!new) + goto err_alloc; + pmd_populate_kernel(&init_mm, pmd, new); + } + + return; + +err_alloc: + panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n", + __func__, PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); +} + + void __init setup_per_cpu_areas(void) { const size_t dyn_size = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE; size_t atom_size; unsigned long delta; unsigned int cpu; - int rc; + int rc = -EINVAL; /* * Linear mapping is one of 4K, 1M and 16M. For 4K, no need @@ -800,8 +875,18 @@ void __init setup_per_cpu_areas(void) else atom_size = 1 << 20; - rc = pcpu_embed_first_chunk(0, dyn_size, atom_size, pcpu_cpu_distance, - pcpu_fc_alloc, pcpu_fc_free); + if (pcpu_chosen_fc != PCPU_FC_PAGE) { + rc = pcpu_embed_first_chunk(0, dyn_size, atom_size, pcpu_cpu_distance, + pcpu_alloc_bootmem, pcpu_free_bootmem); + if (rc) + pr_warn("PERCPU: %s allocator failed (%d), " + "falling back to page size\n", + pcpu_fc_names[pcpu_chosen_fc], rc); + } + + if (rc < 0) + rc = pcpu_page_first_chunk(0, pcpu_alloc_bootmem, pcpu_free_bootmem, + pcpu_populate_pte); if (rc < 0) panic("cannot initialize percpu area (err=%d)", rc); diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 8261999c7d52..0dc1b8591cc8 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -75,17 +75,28 @@ static DEFINE_PER_CPU(int, cpu_state) = { 0 }; struct task_struct *secondary_current; bool has_big_cores; +bool coregroup_enabled; DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map); DEFINE_PER_CPU(cpumask_var_t, cpu_smallcore_map); DEFINE_PER_CPU(cpumask_var_t, cpu_l2_cache_map); DEFINE_PER_CPU(cpumask_var_t, cpu_core_map); +DEFINE_PER_CPU(cpumask_var_t, cpu_coregroup_map); EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); EXPORT_PER_CPU_SYMBOL(cpu_l2_cache_map); EXPORT_PER_CPU_SYMBOL(cpu_core_map); EXPORT_SYMBOL_GPL(has_big_cores); +enum { +#ifdef CONFIG_SCHED_SMT + smt_idx, +#endif + cache_idx, + mc_idx, + die_idx, +}; + #define MAX_THREAD_LIST_SIZE 8 #define THREAD_GROUP_SHARE_L1 1 struct thread_groups { @@ -660,6 +671,28 @@ static void set_cpus_unrelated(int i, int j, #endif /* + * Extends set_cpus_related. Instead of setting one CPU at a time in + * dstmask, set srcmask at oneshot. dstmask should be super set of srcmask. + */ +static void or_cpumasks_related(int i, int j, struct cpumask *(*srcmask)(int), + struct cpumask *(*dstmask)(int)) +{ + struct cpumask *mask; + int k; + + mask = srcmask(j); + for_each_cpu(k, srcmask(i)) + cpumask_or(dstmask(k), dstmask(k), mask); + + if (i == j) + return; + + mask = srcmask(i); + for_each_cpu(k, srcmask(j)) + cpumask_or(dstmask(k), dstmask(k), mask); +} + +/* * parse_thread_groups: Parses the "ibm,thread-groups" device tree * property for the CPU device node @dn and stores * the parsed output in the thread_groups @@ -789,10 +822,6 @@ static int init_cpu_l1_cache_map(int cpu) if (err) goto out; - zalloc_cpumask_var_node(&per_cpu(cpu_l1_cache_map, cpu), - GFP_KERNEL, - cpu_to_node(cpu)); - cpu_group_start = get_cpu_thread_group_start(cpu, &tg); if (unlikely(cpu_group_start == -1)) { @@ -801,6 +830,9 @@ static int init_cpu_l1_cache_map(int cpu) goto out; } + zalloc_cpumask_var_node(&per_cpu(cpu_l1_cache_map, cpu), + GFP_KERNEL, cpu_to_node(cpu)); + for (i = first_thread; i < first_thread + threads_per_core; i++) { int i_group_start = get_cpu_thread_group_start(i, &tg); @@ -819,6 +851,74 @@ out: return err; } +static bool shared_caches; + +#ifdef CONFIG_SCHED_SMT +/* cpumask of CPUs with asymmetric SMT dependency */ +static int powerpc_smt_flags(void) +{ + int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES; + + if (cpu_has_feature(CPU_FTR_ASYM_SMT)) { + printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n"); + flags |= SD_ASYM_PACKING; + } + return flags; +} +#endif + +/* + * P9 has a slightly odd architecture where pairs of cores share an L2 cache. + * This topology makes it *much* cheaper to migrate tasks between adjacent cores + * since the migrated task remains cache hot. We want to take advantage of this + * at the scheduler level so an extra topology level is required. + */ +static int powerpc_shared_cache_flags(void) +{ + return SD_SHARE_PKG_RESOURCES; +} + +/* + * We can't just pass cpu_l2_cache_mask() directly because + * returns a non-const pointer and the compiler barfs on that. + */ +static const struct cpumask *shared_cache_mask(int cpu) +{ + return per_cpu(cpu_l2_cache_map, cpu); +} + +#ifdef CONFIG_SCHED_SMT +static const struct cpumask *smallcore_smt_mask(int cpu) +{ + return cpu_smallcore_mask(cpu); +} +#endif + +static struct cpumask *cpu_coregroup_mask(int cpu) +{ + return per_cpu(cpu_coregroup_map, cpu); +} + +static bool has_coregroup_support(void) +{ + return coregroup_enabled; +} + +static const struct cpumask *cpu_mc_mask(int cpu) +{ + return cpu_coregroup_mask(cpu); +} + +static struct sched_domain_topology_level powerpc_topology[] = { +#ifdef CONFIG_SCHED_SMT + { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) }, +#endif + { shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE) }, + { cpu_mc_mask, SD_INIT_NAME(MC) }, + { cpu_cpu_mask, SD_INIT_NAME(DIE) }, + { NULL, }, +}; + static int init_big_cores(void) { int cpu; @@ -861,6 +961,11 @@ void __init smp_prepare_cpus(unsigned int max_cpus) GFP_KERNEL, cpu_to_node(cpu)); zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu), GFP_KERNEL, cpu_to_node(cpu)); + if (has_coregroup_support()) + zalloc_cpumask_var_node(&per_cpu(cpu_coregroup_map, cpu), + GFP_KERNEL, cpu_to_node(cpu)); + +#ifdef CONFIG_NEED_MULTIPLE_NODES /* * numa_node_id() works after this. */ @@ -869,12 +974,21 @@ void __init smp_prepare_cpus(unsigned int max_cpus) set_cpu_numa_mem(cpu, local_memory_node(numa_cpu_lookup_table[cpu])); } +#endif + /* + * cpu_core_map is now more updated and exists only since + * its been exported for long. It only will have a snapshot + * of cpu_cpu_mask. + */ + cpumask_copy(per_cpu(cpu_core_map, cpu), cpu_cpu_mask(cpu)); } /* Init the cpumasks so the boot CPU is related to itself */ cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid)); cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid)); - cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid)); + + if (has_coregroup_support()) + cpumask_set_cpu(boot_cpuid, cpu_coregroup_mask(boot_cpuid)); init_big_cores(); if (has_big_cores) { @@ -1126,30 +1240,61 @@ static struct device_node *cpu_to_l2cache(int cpu) return cache; } -static bool update_mask_by_l2(int cpu, struct cpumask *(*mask_fn)(int)) +static bool update_mask_by_l2(int cpu) { + struct cpumask *(*submask_fn)(int) = cpu_sibling_mask; struct device_node *l2_cache, *np; + cpumask_var_t mask; int i; l2_cache = cpu_to_l2cache(cpu); - if (!l2_cache) + if (!l2_cache) { + struct cpumask *(*sibling_mask)(int) = cpu_sibling_mask; + + /* + * If no l2cache for this CPU, assume all siblings to share + * cache with this CPU. + */ + if (has_big_cores) + sibling_mask = cpu_smallcore_mask; + + for_each_cpu(i, sibling_mask(cpu)) + set_cpus_related(cpu, i, cpu_l2_cache_mask); + return false; + } + + alloc_cpumask_var_node(&mask, GFP_KERNEL, cpu_to_node(cpu)); + cpumask_and(mask, cpu_online_mask, cpu_cpu_mask(cpu)); - for_each_cpu(i, cpu_online_mask) { + if (has_big_cores) + submask_fn = cpu_smallcore_mask; + + /* Update l2-cache mask with all the CPUs that are part of submask */ + or_cpumasks_related(cpu, cpu, submask_fn, cpu_l2_cache_mask); + + /* Skip all CPUs already part of current CPU l2-cache mask */ + cpumask_andnot(mask, mask, cpu_l2_cache_mask(cpu)); + + for_each_cpu(i, mask) { /* * when updating the marks the current CPU has not been marked * online, but we need to update the cache masks */ np = cpu_to_l2cache(i); - if (!np) - continue; - if (np == l2_cache) - set_cpus_related(cpu, i, mask_fn); + /* Skip all CPUs already part of current CPU l2-cache */ + if (np == l2_cache) { + or_cpumasks_related(cpu, i, submask_fn, cpu_l2_cache_mask); + cpumask_andnot(mask, mask, submask_fn(i)); + } else { + cpumask_andnot(mask, mask, cpu_l2_cache_mask(i)); + } of_node_put(np); } of_node_put(l2_cache); + free_cpumask_var(mask); return true; } @@ -1157,59 +1302,75 @@ static bool update_mask_by_l2(int cpu, struct cpumask *(*mask_fn)(int)) #ifdef CONFIG_HOTPLUG_CPU static void remove_cpu_from_masks(int cpu) { + struct cpumask *(*mask_fn)(int) = cpu_sibling_mask; int i; - /* NB: cpu_core_mask is a superset of the others */ - for_each_cpu(i, cpu_core_mask(cpu)) { - set_cpus_unrelated(cpu, i, cpu_core_mask); + if (shared_caches) + mask_fn = cpu_l2_cache_mask; + + for_each_cpu(i, mask_fn(cpu)) { set_cpus_unrelated(cpu, i, cpu_l2_cache_mask); set_cpus_unrelated(cpu, i, cpu_sibling_mask); if (has_big_cores) set_cpus_unrelated(cpu, i, cpu_smallcore_mask); } + + if (has_coregroup_support()) { + for_each_cpu(i, cpu_coregroup_mask(cpu)) + set_cpus_unrelated(cpu, i, cpu_coregroup_mask); + } } #endif static inline void add_cpu_to_smallcore_masks(int cpu) { - struct cpumask *this_l1_cache_map = per_cpu(cpu_l1_cache_map, cpu); - int i, first_thread = cpu_first_thread_sibling(cpu); + int i; if (!has_big_cores) return; cpumask_set_cpu(cpu, cpu_smallcore_mask(cpu)); - for (i = first_thread; i < first_thread + threads_per_core; i++) { - if (cpu_online(i) && cpumask_test_cpu(i, this_l1_cache_map)) + for_each_cpu(i, per_cpu(cpu_l1_cache_map, cpu)) { + if (cpu_online(i)) set_cpus_related(i, cpu, cpu_smallcore_mask); } } -int get_physical_package_id(int cpu) +static void update_coregroup_mask(int cpu) { - int pkg_id = cpu_to_chip_id(cpu); + struct cpumask *(*submask_fn)(int) = cpu_sibling_mask; + cpumask_var_t mask; + int coregroup_id = cpu_to_coregroup_id(cpu); + int i; - /* - * If the platform is PowerNV or Guest on KVM, ibm,chip-id is - * defined. Hence we would return the chip-id as the result of - * get_physical_package_id. - */ - if (pkg_id == -1 && firmware_has_feature(FW_FEATURE_LPAR) && - IS_ENABLED(CONFIG_PPC_SPLPAR)) { - struct device_node *np = of_get_cpu_node(cpu, NULL); - pkg_id = of_node_to_nid(np); - of_node_put(np); - } + alloc_cpumask_var_node(&mask, GFP_KERNEL, cpu_to_node(cpu)); + cpumask_and(mask, cpu_online_mask, cpu_cpu_mask(cpu)); + + if (shared_caches) + submask_fn = cpu_l2_cache_mask; - return pkg_id; + /* Update coregroup mask with all the CPUs that are part of submask */ + or_cpumasks_related(cpu, cpu, submask_fn, cpu_coregroup_mask); + + /* Skip all CPUs already part of coregroup mask */ + cpumask_andnot(mask, mask, cpu_coregroup_mask(cpu)); + + for_each_cpu(i, mask) { + /* Skip all CPUs not part of this coregroup */ + if (coregroup_id == cpu_to_coregroup_id(i)) { + or_cpumasks_related(cpu, i, submask_fn, cpu_coregroup_mask); + cpumask_andnot(mask, mask, submask_fn(i)); + } else { + cpumask_andnot(mask, mask, cpu_coregroup_mask(i)); + } + } + free_cpumask_var(mask); } -EXPORT_SYMBOL_GPL(get_physical_package_id); static void add_cpu_to_masks(int cpu) { int first_thread = cpu_first_thread_sibling(cpu); - int pkg_id = get_physical_package_id(cpu); int i; /* @@ -1223,36 +1384,16 @@ static void add_cpu_to_masks(int cpu) set_cpus_related(i, cpu, cpu_sibling_mask); add_cpu_to_smallcore_masks(cpu); - /* - * Copy the thread sibling mask into the cache sibling mask - * and mark any CPUs that share an L2 with this CPU. - */ - for_each_cpu(i, cpu_sibling_mask(cpu)) - set_cpus_related(cpu, i, cpu_l2_cache_mask); - update_mask_by_l2(cpu, cpu_l2_cache_mask); + update_mask_by_l2(cpu); - /* - * Copy the cache sibling mask into core sibling mask and mark - * any CPUs on the same chip as this CPU. - */ - for_each_cpu(i, cpu_l2_cache_mask(cpu)) - set_cpus_related(cpu, i, cpu_core_mask); - - if (pkg_id == -1) - return; - - for_each_cpu(i, cpu_online_mask) - if (get_physical_package_id(i) == pkg_id) - set_cpus_related(cpu, i, cpu_core_mask); + if (has_coregroup_support()) + update_coregroup_mask(cpu); } -static bool shared_caches; - /* Activate a secondary processor. */ void start_secondary(void *unused) { unsigned int cpu = smp_processor_id(); - struct cpumask *(*sibling_mask)(int) = cpu_sibling_mask; mmgrab(&init_mm); current->active_mm = &init_mm; @@ -1278,14 +1419,20 @@ void start_secondary(void *unused) /* Update topology CPU masks */ add_cpu_to_masks(cpu); - if (has_big_cores) - sibling_mask = cpu_smallcore_mask; /* * Check for any shared caches. Note that this must be done on a * per-core basis because one core in the pair might be disabled. */ - if (!cpumask_equal(cpu_l2_cache_mask(cpu), sibling_mask(cpu))) - shared_caches = true; + if (!shared_caches) { + struct cpumask *(*sibling_mask)(int) = cpu_sibling_mask; + struct cpumask *mask = cpu_l2_cache_mask(cpu); + + if (has_big_cores) + sibling_mask = cpu_smallcore_mask; + + if (cpumask_weight(mask) > cpumask_weight(sibling_mask(cpu))) + shared_caches = true; + } set_numa_node(numa_cpu_lookup_table[cpu]); set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu])); @@ -1311,63 +1458,44 @@ int setup_profiling_timer(unsigned int multiplier) return 0; } -#ifdef CONFIG_SCHED_SMT -/* cpumask of CPUs with asymetric SMT dependancy */ -static int powerpc_smt_flags(void) +static void fixup_topology(void) { - int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES; + int i; - if (cpu_has_feature(CPU_FTR_ASYM_SMT)) { - printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n"); - flags |= SD_ASYM_PACKING; +#ifdef CONFIG_SCHED_SMT + if (has_big_cores) { + pr_info("Big cores detected but using small core scheduling\n"); + powerpc_topology[smt_idx].mask = smallcore_smt_mask; } - return flags; -} #endif -static struct sched_domain_topology_level powerpc_topology[] = { -#ifdef CONFIG_SCHED_SMT - { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) }, -#endif - { cpu_cpu_mask, SD_INIT_NAME(DIE) }, - { NULL, }, -}; + if (!has_coregroup_support()) + powerpc_topology[mc_idx].mask = powerpc_topology[cache_idx].mask; -/* - * P9 has a slightly odd architecture where pairs of cores share an L2 cache. - * This topology makes it *much* cheaper to migrate tasks between adjacent cores - * since the migrated task remains cache hot. We want to take advantage of this - * at the scheduler level so an extra topology level is required. - */ -static int powerpc_shared_cache_flags(void) -{ - return SD_SHARE_PKG_RESOURCES; -} + /* + * Try to consolidate topology levels here instead of + * allowing scheduler to degenerate. + * - Dont consolidate if masks are different. + * - Dont consolidate if sd_flags exists and are different. + */ + for (i = 1; i <= die_idx; i++) { + if (powerpc_topology[i].mask != powerpc_topology[i - 1].mask) + continue; -/* - * We can't just pass cpu_l2_cache_mask() directly because - * returns a non-const pointer and the compiler barfs on that. - */ -static const struct cpumask *shared_cache_mask(int cpu) -{ - return cpu_l2_cache_mask(cpu); -} + if (powerpc_topology[i].sd_flags && powerpc_topology[i - 1].sd_flags && + powerpc_topology[i].sd_flags != powerpc_topology[i - 1].sd_flags) + continue; -#ifdef CONFIG_SCHED_SMT -static const struct cpumask *smallcore_smt_mask(int cpu) -{ - return cpu_smallcore_mask(cpu); -} -#endif + if (!powerpc_topology[i - 1].sd_flags) + powerpc_topology[i - 1].sd_flags = powerpc_topology[i].sd_flags; -static struct sched_domain_topology_level power9_topology[] = { -#ifdef CONFIG_SCHED_SMT - { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) }, + powerpc_topology[i].mask = powerpc_topology[i + 1].mask; + powerpc_topology[i].sd_flags = powerpc_topology[i + 1].sd_flags; +#ifdef CONFIG_SCHED_DEBUG + powerpc_topology[i].name = powerpc_topology[i + 1].name; #endif - { shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE) }, - { cpu_cpu_mask, SD_INIT_NAME(DIE) }, - { NULL, }, -}; + } +} void __init smp_cpus_done(unsigned int max_cpus) { @@ -1382,24 +1510,8 @@ void __init smp_cpus_done(unsigned int max_cpus) dump_numa_cpu_topology(); -#ifdef CONFIG_SCHED_SMT - if (has_big_cores) { - pr_info("Big cores detected but using small core scheduling\n"); - power9_topology[0].mask = smallcore_smt_mask; - powerpc_topology[0].mask = smallcore_smt_mask; - } -#endif - /* - * If any CPU detects that it's sharing a cache with another CPU then - * use the deeper topology that is aware of this sharing. - */ - if (shared_caches) { - pr_info("Using shared cache scheduler topology\n"); - set_sched_topology(power9_topology); - } else { - pr_info("Using standard scheduler topology\n"); - set_sched_topology(powerpc_topology); - } + fixup_topology(); + set_sched_topology(powerpc_topology); } #ifdef CONFIG_HOTPLUG_CPU @@ -1429,16 +1541,18 @@ void __cpu_die(unsigned int cpu) smp_ops->cpu_die(cpu); } -void cpu_die(void) +void arch_cpu_idle_dead(void) { + sched_preempt_enable_no_resched(); + /* * Disable on the down path. This will be re-enabled by * start_secondary() via start_secondary_resume() below */ this_cpu_disable_ftrace(); - if (ppc_md.cpu_die) - ppc_md.cpu_die(); + if (smp_ops->cpu_offline_self) + smp_ops->cpu_offline_self(); /* If we return, we re-enter start_secondary */ start_secondary_resume(); diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl index c2d737ff2e7b..1275daec7fec 100644 --- a/arch/powerpc/kernel/syscalls/syscall.tbl +++ b/arch/powerpc/kernel/syscalls/syscall.tbl @@ -34,7 +34,7 @@ 18 spu oldstat sys_ni_syscall 19 common lseek sys_lseek compat_sys_lseek 20 common getpid sys_getpid -21 nospu mount sys_mount compat_sys_mount +21 nospu mount sys_mount 22 32 umount sys_oldumount 22 64 umount sys_ni_syscall 22 spu umount sys_ni_syscall @@ -193,8 +193,8 @@ 142 common _newselect sys_select compat_sys_select 143 common flock sys_flock 144 common msync sys_msync -145 common readv sys_readv compat_sys_readv -146 common writev sys_writev compat_sys_writev +145 common readv sys_readv +146 common writev sys_writev 147 common getsid sys_getsid 148 common fdatasync sys_fdatasync 149 nospu _sysctl sys_ni_syscall @@ -369,7 +369,7 @@ 282 common unshare sys_unshare 283 common splice sys_splice 284 common tee sys_tee -285 common vmsplice sys_vmsplice compat_sys_vmsplice +285 common vmsplice sys_vmsplice 286 common openat sys_openat compat_sys_openat 287 common mkdirat sys_mkdirat 288 common mknodat sys_mknodat @@ -449,8 +449,8 @@ 348 common syncfs sys_syncfs 349 common sendmmsg sys_sendmmsg compat_sys_sendmmsg 350 common setns sys_setns -351 nospu process_vm_readv sys_process_vm_readv compat_sys_process_vm_readv -352 nospu process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev +351 nospu process_vm_readv sys_process_vm_readv +352 nospu process_vm_writev sys_process_vm_writev 353 nospu finit_module sys_finit_module 354 nospu kcmp sys_kcmp 355 common sched_setattr sys_sched_setattr @@ -529,3 +529,4 @@ 437 common openat2 sys_openat2 438 common pidfd_getfd sys_pidfd_getfd 439 common faccessat2 sys_faccessat2 +440 common process_madvise sys_process_madvise diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index 46b4ebc33db7..2e08640bb3b4 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c @@ -32,29 +32,27 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices); -/* - * SMT snooze delay stuff, 64-bit only for now - */ - #ifdef CONFIG_PPC64 -/* Time in microseconds we delay before sleeping in the idle loop */ -static DEFINE_PER_CPU(long, smt_snooze_delay) = { 100 }; +/* + * Snooze delay has not been hooked up since 3fa8cad82b94 ("powerpc/pseries/cpuidle: + * smt-snooze-delay cleanup.") and has been broken even longer. As was foretold in + * 2014: + * + * "ppc64_util currently utilises it. Once we fix ppc64_util, propose to clean + * up the kernel code." + * + * powerpc-utils stopped using it as of 1.3.8. At some point in the future this + * code should be removed. + */ static ssize_t store_smt_snooze_delay(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct cpu *cpu = container_of(dev, struct cpu, dev); - ssize_t ret; - long snooze; - - ret = sscanf(buf, "%ld", &snooze); - if (ret != 1) - return -EINVAL; - - per_cpu(smt_snooze_delay, cpu->dev.id) = snooze; + pr_warn_once("%s (%d) stored to unsupported smt_snooze_delay, which has no effect.\n", + current->comm, current->pid); return count; } @@ -62,9 +60,9 @@ static ssize_t show_smt_snooze_delay(struct device *dev, struct device_attribute *attr, char *buf) { - struct cpu *cpu = container_of(dev, struct cpu, dev); - - return sprintf(buf, "%ld\n", per_cpu(smt_snooze_delay, cpu->dev.id)); + pr_warn_once("%s (%d) read from unsupported smt_snooze_delay\n", + current->comm, current->pid); + return sprintf(buf, "100\n"); } static DEVICE_ATTR(smt_snooze_delay, 0644, show_smt_snooze_delay, @@ -72,16 +70,10 @@ static DEVICE_ATTR(smt_snooze_delay, 0644, show_smt_snooze_delay, static int __init setup_smt_snooze_delay(char *str) { - unsigned int cpu; - long snooze; - if (!cpu_has_feature(CPU_FTR_SMT)) return 1; - snooze = simple_strtol(str, NULL, 10); - for_each_possible_cpu(cpu) - per_cpu(smt_snooze_delay, cpu) = snooze; - + pr_warn("smt-snooze-delay command line option has no effect\n"); return 1; } __setup("smt-snooze-delay=", setup_smt_snooze_delay); @@ -225,14 +217,13 @@ static DEVICE_ATTR(dscr_default, 0600, static void sysfs_create_dscr_default(void) { if (cpu_has_feature(CPU_FTR_DSCR)) { - int err = 0; int cpu; dscr_default = spr_default_dscr; for_each_possible_cpu(cpu) paca_ptrs[cpu]->dscr_default = dscr_default; - err = device_create_file(cpu_subsys.dev_root, &dev_attr_dscr_default); + device_create_file(cpu_subsys.dev_root, &dev_attr_dscr_default); } } #endif /* CONFIG_PPC64 */ @@ -1168,6 +1159,7 @@ static int __init topology_init(void) for_each_possible_cpu(cpu) { struct cpu *c = &per_cpu(cpu_devices, cpu); +#ifdef CONFIG_HOTPLUG_CPU /* * For now, we just see if the system supports making * the RTAS calls for CPU hotplug. But, there may be a @@ -1175,8 +1167,9 @@ static int __init topology_init(void) * CPU. For instance, the boot cpu might never be valid * for hotplugging. */ - if (ppc_md.cpu_die) + if (smp_ops->cpu_offline_self) c->hotpluggable = 1; +#endif if (cpu_online(cpu) || c->hotpluggable) { register_cpu(c, cpu); diff --git a/arch/powerpc/kernel/tau_6xx.c b/arch/powerpc/kernel/tau_6xx.c index e2ab8a111b69..0b4694b8d248 100644 --- a/arch/powerpc/kernel/tau_6xx.c +++ b/arch/powerpc/kernel/tau_6xx.c @@ -13,13 +13,14 @@ */ #include <linux/errno.h> -#include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/param.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/init.h> +#include <linux/delay.h> +#include <linux/workqueue.h> #include <asm/io.h> #include <asm/reg.h> @@ -39,9 +40,7 @@ static struct tau_temp unsigned char grew; } tau[NR_CPUS]; -struct timer_list tau_timer; - -#undef DEBUG +static bool tau_int_enable; /* TODO: put these in a /proc interface, with some sanity checks, and maybe * dynamic adjustment to minimize # of interrupts */ @@ -50,72 +49,49 @@ struct timer_list tau_timer; #define step_size 2 /* step size when temp goes out of range */ #define window_expand 1 /* expand the window by this much */ /* configurable values for shrinking the window */ -#define shrink_timer 2*HZ /* period between shrinking the window */ +#define shrink_timer 2000 /* period between shrinking the window */ #define min_window 2 /* minimum window size, degrees C */ static void set_thresholds(unsigned long cpu) { -#ifdef CONFIG_TAU_INT - /* - * setup THRM1, - * threshold, valid bit, enable interrupts, interrupt when below threshold - */ - mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TIE | THRM1_TID); + u32 maybe_tie = tau_int_enable ? THRM1_TIE : 0; - /* setup THRM2, - * threshold, valid bit, enable interrupts, interrupt when above threshold - */ - mtspr (SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V | THRM1_TIE); -#else - /* same thing but don't enable interrupts */ - mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TID); - mtspr(SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V); -#endif + /* setup THRM1, threshold, valid bit, interrupt when below threshold */ + mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | maybe_tie | THRM1_TID); + + /* setup THRM2, threshold, valid bit, interrupt when above threshold */ + mtspr(SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V | maybe_tie); } static void TAUupdate(int cpu) { - unsigned thrm; - -#ifdef DEBUG - printk("TAUupdate "); -#endif + u32 thrm; + u32 bits = THRM1_TIV | THRM1_TIN | THRM1_V; /* if both thresholds are crossed, the step_sizes cancel out * and the window winds up getting expanded twice. */ - if((thrm = mfspr(SPRN_THRM1)) & THRM1_TIV){ /* is valid? */ - if(thrm & THRM1_TIN){ /* crossed low threshold */ - if (tau[cpu].low >= step_size){ - tau[cpu].low -= step_size; - tau[cpu].high -= (step_size - window_expand); - } - tau[cpu].grew = 1; -#ifdef DEBUG - printk("low threshold crossed "); -#endif + thrm = mfspr(SPRN_THRM1); + if ((thrm & bits) == bits) { + mtspr(SPRN_THRM1, 0); + + if (tau[cpu].low >= step_size) { + tau[cpu].low -= step_size; + tau[cpu].high -= (step_size - window_expand); } + tau[cpu].grew = 1; + pr_debug("%s: low threshold crossed\n", __func__); } - if((thrm = mfspr(SPRN_THRM2)) & THRM1_TIV){ /* is valid? */ - if(thrm & THRM1_TIN){ /* crossed high threshold */ - if (tau[cpu].high <= 127-step_size){ - tau[cpu].low += (step_size - window_expand); - tau[cpu].high += step_size; - } - tau[cpu].grew = 1; -#ifdef DEBUG - printk("high threshold crossed "); -#endif + thrm = mfspr(SPRN_THRM2); + if ((thrm & bits) == bits) { + mtspr(SPRN_THRM2, 0); + + if (tau[cpu].high <= 127 - step_size) { + tau[cpu].low += (step_size - window_expand); + tau[cpu].high += step_size; } + tau[cpu].grew = 1; + pr_debug("%s: high threshold crossed\n", __func__); } - -#ifdef DEBUG - printk("grew = %d\n", tau[cpu].grew); -#endif - -#ifndef CONFIG_TAU_INT /* tau_timeout will do this if not using interrupts */ - set_thresholds(cpu); -#endif - } #ifdef CONFIG_TAU_INT @@ -140,17 +116,16 @@ void TAUException(struct pt_regs * regs) static void tau_timeout(void * info) { int cpu; - unsigned long flags; int size; int shrink; - /* disabling interrupts *should* be okay */ - local_irq_save(flags); cpu = smp_processor_id(); -#ifndef CONFIG_TAU_INT - TAUupdate(cpu); -#endif + if (!tau_int_enable) + TAUupdate(cpu); + + /* Stop thermal sensor comparisons and interrupts */ + mtspr(SPRN_THRM3, 0); size = tau[cpu].high - tau[cpu].low; if (size > min_window && ! tau[cpu].grew) { @@ -173,32 +148,26 @@ static void tau_timeout(void * info) set_thresholds(cpu); - /* - * Do the enable every time, since otherwise a bunch of (relatively) - * complex sleep code needs to be added. One mtspr every time - * tau_timeout is called is probably not a big deal. - * - * Enable thermal sensor and set up sample interval timer - * need 20 us to do the compare.. until a nice 'cpu_speed' function - * call is implemented, just assume a 500 mhz clock. It doesn't really - * matter if we take too long for a compare since it's all interrupt - * driven anyway. - * - * use a extra long time.. (60 us @ 500 mhz) + /* Restart thermal sensor comparisons and interrupts. + * The "PowerPC 740 and PowerPC 750 Microprocessor Datasheet" + * recommends that "the maximum value be set in THRM3 under all + * conditions." */ - mtspr(SPRN_THRM3, THRM3_SITV(500*60) | THRM3_E); - - local_irq_restore(flags); + mtspr(SPRN_THRM3, THRM3_SITV(0x1fff) | THRM3_E); } -static void tau_timeout_smp(struct timer_list *unused) -{ +static struct workqueue_struct *tau_workq; - /* schedule ourselves to be run again */ - mod_timer(&tau_timer, jiffies + shrink_timer) ; +static void tau_work_func(struct work_struct *work) +{ + msleep(shrink_timer); on_each_cpu(tau_timeout, NULL, 0); + /* schedule ourselves to be run again */ + queue_work(tau_workq, work); } +DECLARE_WORK(tau_work, tau_work_func); + /* * setup the TAU * @@ -231,21 +200,19 @@ static int __init TAU_init(void) return 1; } + tau_int_enable = IS_ENABLED(CONFIG_TAU_INT) && + !strcmp(cur_cpu_spec->platform, "ppc750"); - /* first, set up the window shrinking timer */ - timer_setup(&tau_timer, tau_timeout_smp, 0); - tau_timer.expires = jiffies + shrink_timer; - add_timer(&tau_timer); + tau_workq = alloc_workqueue("tau", WQ_UNBOUND, 1, 0); + if (!tau_workq) + return -ENOMEM; on_each_cpu(TAU_init_smp, NULL, 0); - printk("Thermal assist unit "); -#ifdef CONFIG_TAU_INT - printk("using interrupts, "); -#else - printk("using timers, "); -#endif - printk("shrink_timer: %d jiffies\n", shrink_timer); + queue_work(tau_workq, &tau_work); + + pr_info("Thermal assist unit using %s, shrink_timer: %d ms\n", + tau_int_enable ? "interrupts" : "workqueue", shrink_timer); tau_initialized = 1; return 0; diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index f85539ebb513..74efe46f5532 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -75,15 +75,6 @@ #include <linux/clockchips.h> #include <linux/timekeeper_internal.h> -static u64 rtc_read(struct clocksource *); -static struct clocksource clocksource_rtc = { - .name = "rtc", - .rating = 400, - .flags = CLOCK_SOURCE_IS_CONTINUOUS, - .mask = CLOCKSOURCE_MASK(64), - .read = rtc_read, -}; - static u64 timebase_read(struct clocksource *); static struct clocksource clocksource_timebase = { .name = "timebase", @@ -447,19 +438,9 @@ void vtime_flush(struct task_struct *tsk) void __delay(unsigned long loops) { unsigned long start; - int diff; spin_begin(); - if (__USE_RTC()) { - start = get_rtcl(); - do { - /* the RTCL register wraps at 1000000000 */ - diff = get_rtcl() - start; - if (diff < 0) - diff += 1000000000; - spin_cpu_relax(); - } while (diff < loops); - } else if (tb_invalid) { + if (tb_invalid) { /* * TB is in error state and isn't ticking anymore. * HMI handler was unable to recover from TB error. @@ -467,8 +448,8 @@ void __delay(unsigned long loops) */ spin_cpu_relax(); } else { - start = get_tbl(); - while (get_tbl() - start < loops) + start = mftb(); + while (mftb() - start < loops) spin_cpu_relax(); } spin_end(); @@ -614,7 +595,7 @@ void timer_interrupt(struct pt_regs *regs) irq_work_run(); } - now = get_tb_or_rtc(); + now = get_tb(); if (now >= *next_tb) { *next_tb = ~(u64)0; if (evt->event_handler) @@ -696,8 +677,6 @@ EXPORT_SYMBOL_GPL(tb_to_ns); */ notrace unsigned long long sched_clock(void) { - if (__USE_RTC()) - return get_rtc(); return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift; } @@ -847,11 +826,6 @@ void read_persistent_clock64(struct timespec64 *ts) } /* clocksource code */ -static notrace u64 rtc_read(struct clocksource *cs) -{ - return (u64)get_rtc(); -} - static notrace u64 timebase_read(struct clocksource *cs) { return (u64)get_tb(); @@ -948,12 +922,7 @@ void update_vsyscall_tz(void) static void __init clocksource_init(void) { - struct clocksource *clock; - - if (__USE_RTC()) - clock = &clocksource_rtc; - else - clock = &clocksource_timebase; + struct clocksource *clock = &clocksource_timebase; if (clocksource_register_hz(clock, tb_ticks_per_sec)) { printk(KERN_ERR "clocksource: %s is already registered\n", @@ -968,7 +937,7 @@ static void __init clocksource_init(void) static int decrementer_set_next_event(unsigned long evt, struct clock_event_device *dev) { - __this_cpu_write(decrementers_next_tb, get_tb_or_rtc() + evt); + __this_cpu_write(decrementers_next_tb, get_tb() + evt); set_dec(evt); /* We may have raced with new irq work */ @@ -1071,17 +1040,12 @@ void __init time_init(void) u64 scale; unsigned shift; - if (__USE_RTC()) { - /* 601 processor: dec counts down by 128 every 128ns */ - ppc_tb_freq = 1000000000; - } else { - /* Normal PowerPC with timebase register */ - ppc_md.calibrate_decr(); - printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n", - ppc_tb_freq / 1000000, ppc_tb_freq % 1000000); - printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n", - ppc_proc_freq / 1000000, ppc_proc_freq % 1000000); - } + /* Normal PowerPC with timebase register */ + ppc_md.calibrate_decr(); + printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n", + ppc_tb_freq / 1000000, ppc_tb_freq % 1000000); + printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n", + ppc_proc_freq / 1000000, ppc_proc_freq % 1000000); tb_ticks_per_jiffy = ppc_tb_freq / HZ; tb_ticks_per_sec = ppc_tb_freq; @@ -1107,7 +1071,7 @@ void __init time_init(void) tb_to_ns_scale = scale; tb_to_ns_shift = shift; /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */ - boot_tb = get_tb_or_rtc(); + boot_tb = get_tb(); /* If platform provided a timezone (pmac), we correct the time */ if (timezone_offset) { diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S index 6ba0fdd1e7f8..2b91f233b05d 100644 --- a/arch/powerpc/kernel/tm.S +++ b/arch/powerpc/kernel/tm.S @@ -122,6 +122,13 @@ _GLOBAL(tm_reclaim) std r3, STK_PARAM(R3)(r1) SAVE_NVGPRS(r1) + /* + * Save kernel live AMR since it will be clobbered by treclaim + * but can be used elsewhere later in kernel space. + */ + mfspr r3, SPRN_AMR + std r3, TM_FRAME_L1(r1) + /* We need to setup MSR for VSX register save instructions. */ mfmsr r14 mr r15, r14 @@ -245,7 +252,7 @@ _GLOBAL(tm_reclaim) * but is used in signal return to 'wind back' to the abort handler. */ - /* ******************** CR,LR,CCR,MSR ********** */ + /* ***************** CTR, LR, CR, XER ********** */ mfctr r3 mflr r4 mfcr r5 @@ -256,7 +263,6 @@ _GLOBAL(tm_reclaim) std r5, _CCR(r7) std r6, _XER(r7) - /* ******************** TAR, DSCR ********** */ mfspr r3, SPRN_TAR mfspr r4, SPRN_DSCR @@ -264,6 +270,10 @@ _GLOBAL(tm_reclaim) std r3, THREAD_TM_TAR(r12) std r4, THREAD_TM_DSCR(r12) + /* ******************** AMR **************** */ + mfspr r3, SPRN_AMR + std r3, THREAD_TM_AMR(r12) + /* * MSR and flags: We don't change CRs, and we don't need to alter MSR. */ @@ -308,7 +318,9 @@ _GLOBAL(tm_reclaim) std r3, THREAD_TM_TFHAR(r12) std r4, THREAD_TM_TFIAR(r12) - /* AMR is checkpointed too, but is unsupported by Linux. */ + /* Restore kernel live AMR */ + ld r8, TM_FRAME_L1(r1) + mtspr SPRN_AMR, r8 /* Restore original MSR/IRQ state & clear TM mode */ ld r14, TM_FRAME_L0(r1) /* Orig MSR */ @@ -355,6 +367,13 @@ _GLOBAL(__tm_recheckpoint) */ SAVE_NVGPRS(r1) + /* + * Save kernel live AMR since it will be clobbered for trechkpt + * but can be used elsewhere later in kernel space. + */ + mfspr r8, SPRN_AMR + std r8, TM_FRAME_L0(r1) + /* Load complete register state from ts_ckpt* registers */ addi r7, r3, PT_CKPT_REGS /* Thread's ckpt_regs */ @@ -404,7 +423,7 @@ _GLOBAL(__tm_recheckpoint) restore_gprs: - /* ******************** CR,LR,CCR,MSR ********** */ + /* ****************** CTR, LR, XER ************* */ ld r4, _CTR(r7) ld r5, _LINK(r7) ld r8, _XER(r7) @@ -417,6 +436,10 @@ restore_gprs: ld r4, THREAD_TM_TAR(r3) mtspr SPRN_TAR, r4 + /* ******************** AMR ******************** */ + ld r4, THREAD_TM_AMR(r3) + mtspr SPRN_AMR, r4 + /* Load up the PPR and DSCR in GPRs only at this stage */ ld r5, THREAD_TM_DSCR(r3) ld r6, THREAD_TM_PPR(r3) @@ -509,6 +532,10 @@ restore_gprs: li r4, MSR_RI mtmsrd r4, 1 + /* Restore kernel live AMR */ + ld r8, TM_FRAME_L0(r1) + mtspr SPRN_AMR, r8 + REST_NVGPRS(r1) addi r1, r1, TM_FRAME_SIZE diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index d1ebe152f210..c5f39f13e96e 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -529,9 +529,6 @@ out: * Check if the NIP corresponds to the address of a sync * instruction for which there is an entry in the exception * table. - * Note that the 601 only takes a machine check on TEA - * (transfer error ack) signal assertion, and does not - * set any of the top 16 bits of SRR1. * -- paulus. */ static inline int check_io_access(struct pt_regs *regs) @@ -796,7 +793,6 @@ int machine_check_generic(struct pt_regs *regs) case 0x80000: pr_cont("Machine check signal\n"); break; - case 0: /* for 601 */ case 0x40000: case 0x140000: /* 7450 MSS error and TEA */ pr_cont("Transfer error ack signal\n"); diff --git a/arch/powerpc/kernel/vdso32/datapage.S b/arch/powerpc/kernel/vdso32/datapage.S index 217bb630f8f9..1d23e2771dba 100644 --- a/arch/powerpc/kernel/vdso32/datapage.S +++ b/arch/powerpc/kernel/vdso32/datapage.S @@ -47,7 +47,6 @@ V_FUNCTION_END(__kernel_get_syscall_map) * * returns the timebase frequency in HZ */ -#ifndef CONFIG_PPC_BOOK3S_601 V_FUNCTION_BEGIN(__kernel_get_tbfreq) .cfi_startproc mflr r12 @@ -60,4 +59,3 @@ V_FUNCTION_BEGIN(__kernel_get_tbfreq) blr .cfi_endproc V_FUNCTION_END(__kernel_get_tbfreq) -#endif diff --git a/arch/powerpc/kernel/vdso32/vdso32.lds.S b/arch/powerpc/kernel/vdso32/vdso32.lds.S index 5206c2eb2a1d..7eadac74c7f9 100644 --- a/arch/powerpc/kernel/vdso32/vdso32.lds.S +++ b/arch/powerpc/kernel/vdso32/vdso32.lds.S @@ -144,13 +144,11 @@ VERSION __kernel_datapage_offset; __kernel_get_syscall_map; -#ifndef CONFIG_PPC_BOOK3S_601 __kernel_gettimeofday; __kernel_clock_gettime; __kernel_clock_getres; __kernel_time; __kernel_get_tbfreq; -#endif __kernel_sync_dicache; __kernel_sync_dicache_p5; __kernel_sigtramp32; diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index 326e113d2e45..e0548b4950de 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -360,8 +360,8 @@ SECTIONS PROVIDE32 (end = .); STABS_DEBUG - DWARF_DEBUG + ELF_DETAILS DISCARDS /DISCARD/ : { diff --git a/arch/powerpc/kexec/file_load_64.c b/arch/powerpc/kexec/file_load_64.c index 53bb71e3a2e1..c69bcf9b547a 100644 --- a/arch/powerpc/kexec/file_load_64.c +++ b/arch/powerpc/kexec/file_load_64.c @@ -138,15 +138,13 @@ out: */ static int get_crash_memory_ranges(struct crash_mem **mem_ranges) { - struct memblock_region *reg; + phys_addr_t base, end; struct crash_mem *tmem; + u64 i; int ret; - for_each_memblock(memory, reg) { - u64 base, size; - - base = (u64)reg->base; - size = (u64)reg->size; + for_each_mem_range(i, &base, &end) { + u64 size = end - base; /* Skip backup memory region, which needs a separate entry */ if (base == BACKUP_SRC_START) { @@ -250,8 +248,7 @@ static int __locate_mem_hole_top_down(struct kexec_buf *kbuf, phys_addr_t start, end; u64 i; - for_each_mem_range_rev(i, &memblock.memory, NULL, NUMA_NO_NODE, - MEMBLOCK_NONE, &start, &end, NULL) { + for_each_mem_range_rev(i, &start, &end) { /* * memblock uses [start, end) convention while it is * [start, end] here. Fix the off-by-one to have the @@ -350,8 +347,7 @@ static int __locate_mem_hole_bottom_up(struct kexec_buf *kbuf, phys_addr_t start, end; u64 i; - for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE, - MEMBLOCK_NONE, &start, &end, NULL) { + for_each_mem_range(i, &start, &end) { /* * memblock uses [start, end) convention while it is * [start, end] here. Fix the off-by-one to have the diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 4ba06a2a306c..3bd3118c7633 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -3530,6 +3530,13 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit, */ asm volatile("eieio; tlbsync; ptesync"); + /* + * cp_abort is required if the processor supports local copy-paste + * to clear the copy buffer that was under control of the guest. + */ + if (cpu_has_feature(CPU_FTR_ARCH_31)) + asm volatile(PPC_CP_ABORT); + mtspr(SPRN_LPID, vcpu->kvm->arch.host_lpid); /* restore host LPID */ isync(); diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c index 073617ce83e0..8f58dd20b362 100644 --- a/arch/powerpc/kvm/book3s_hv_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_builtin.c @@ -95,23 +95,15 @@ EXPORT_SYMBOL_GPL(kvm_free_hpt_cma); void __init kvm_cma_reserve(void) { unsigned long align_size; - struct memblock_region *reg; - phys_addr_t selected_size = 0; + phys_addr_t selected_size; /* * We need CMA reservation only when we are in HV mode */ if (!cpu_has_feature(CPU_FTR_HVMODE)) return; - /* - * We cannot use memblock_phys_mem_size() here, because - * memblock_analyze() has not been called yet. - */ - for_each_memblock(memory, reg) - selected_size += memblock_region_memory_end_pfn(reg) - - memblock_region_memory_base_pfn(reg); - selected_size = (selected_size * kvm_cma_resv_ratio / 100) << PAGE_SHIFT; + selected_size = PAGE_ALIGN(memblock_phys_mem_size() * kvm_cma_resv_ratio / 100); if (selected_size) { pr_info("%s: reserving %ld MiB for global area\n", __func__, (unsigned long)selected_size / SZ_1M); diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 799d6d0f4ead..cd9995ee8441 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -1831,6 +1831,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_RADIX_PREFETCH_BUG) #endif /* CONFIG_PPC_RADIX_MMU */ /* + * cp_abort is required if the processor supports local copy-paste + * to clear the copy buffer that was under control of the guest. + */ +BEGIN_FTR_SECTION + PPC_CP_ABORT +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_31) + + /* * POWER7/POWER8 guest -> host partition switch code. * We don't have to lock against tlbies but we do * have to coordinate the hardware threads. diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c index 7705d5557239..84e5a2dc8be5 100644 --- a/arch/powerpc/kvm/book3s_hv_uvmem.c +++ b/arch/powerpc/kvm/book3s_hv_uvmem.c @@ -687,9 +687,9 @@ static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm) struct kvmppc_uvmem_page_pvt *pvt; unsigned long pfn_last, pfn_first; - pfn_first = kvmppc_uvmem_pgmap.res.start >> PAGE_SHIFT; + pfn_first = kvmppc_uvmem_pgmap.range.start >> PAGE_SHIFT; pfn_last = pfn_first + - (resource_size(&kvmppc_uvmem_pgmap.res) >> PAGE_SHIFT); + (range_len(&kvmppc_uvmem_pgmap.range) >> PAGE_SHIFT); spin_lock(&kvmppc_uvmem_bitmap_lock); bit = find_first_zero_bit(kvmppc_uvmem_bitmap, @@ -1007,7 +1007,7 @@ static vm_fault_t kvmppc_uvmem_migrate_to_ram(struct vm_fault *vmf) static void kvmppc_uvmem_page_free(struct page *page) { unsigned long pfn = page_to_pfn(page) - - (kvmppc_uvmem_pgmap.res.start >> PAGE_SHIFT); + (kvmppc_uvmem_pgmap.range.start >> PAGE_SHIFT); struct kvmppc_uvmem_page_pvt *pvt; spin_lock(&kvmppc_uvmem_bitmap_lock); @@ -1170,7 +1170,9 @@ int kvmppc_uvmem_init(void) } kvmppc_uvmem_pgmap.type = MEMORY_DEVICE_PRIVATE; - kvmppc_uvmem_pgmap.res = *res; + kvmppc_uvmem_pgmap.range.start = res->start; + kvmppc_uvmem_pgmap.range.end = res->end; + kvmppc_uvmem_pgmap.nr_range = 1; kvmppc_uvmem_pgmap.ops = &kvmppc_uvmem_ops; /* just one global instance: */ kvmppc_uvmem_pgmap.owner = &kvmppc_uvmem_pgmap; @@ -1205,7 +1207,7 @@ void kvmppc_uvmem_free(void) return; memunmap_pages(&kvmppc_uvmem_pgmap); - release_mem_region(kvmppc_uvmem_pgmap.res.start, - resource_size(&kvmppc_uvmem_pgmap.res)); + release_mem_region(kvmppc_uvmem_pgmap.range.start, + range_len(&kvmppc_uvmem_pgmap.range)); kfree(kvmppc_uvmem_bitmap); } diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index d66a645503eb..69a91b571845 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile @@ -39,7 +39,7 @@ obj-$(CONFIG_PPC_BOOK3S_64) += copyuser_power7.o copypage_power7.o \ memcpy_power7.o obj64-y += copypage_64.o copyuser_64.o mem_64.o hweight_64.o \ - memcpy_64.o memcpy_mcsafe_64.o + memcpy_64.o copy_mc_64.o ifndef CONFIG_PPC_QUEUED_SPINLOCKS obj64-$(CONFIG_SMP) += locks.o diff --git a/arch/powerpc/lib/checksum_32.S b/arch/powerpc/lib/checksum_32.S index ecd150dc3ed9..27d9070617df 100644 --- a/arch/powerpc/lib/checksum_32.S +++ b/arch/powerpc/lib/checksum_32.S @@ -78,12 +78,10 @@ EXPORT_SYMBOL(__csum_partial) /* * Computes the checksum of a memory block at src, length len, - * and adds in "sum" (32-bit), while copying the block to dst. - * If an access exception occurs on src or dst, it stores -EFAULT - * to *src_err or *dst_err respectively, and (for an error on - * src) zeroes the rest of dst. + * and adds in 0xffffffff, while copying the block to dst. + * If an access exception occurs it returns zero. * - * csum_partial_copy_generic(src, dst, len, sum, src_err, dst_err) + * csum_partial_copy_generic(src, dst, len) */ #define CSUM_COPY_16_BYTES_WITHEX(n) \ 8 ## n ## 0: \ @@ -108,14 +106,14 @@ EXPORT_SYMBOL(__csum_partial) adde r12,r12,r10 #define CSUM_COPY_16_BYTES_EXCODE(n) \ - EX_TABLE(8 ## n ## 0b, src_error); \ - EX_TABLE(8 ## n ## 1b, src_error); \ - EX_TABLE(8 ## n ## 2b, src_error); \ - EX_TABLE(8 ## n ## 3b, src_error); \ - EX_TABLE(8 ## n ## 4b, dst_error); \ - EX_TABLE(8 ## n ## 5b, dst_error); \ - EX_TABLE(8 ## n ## 6b, dst_error); \ - EX_TABLE(8 ## n ## 7b, dst_error); + EX_TABLE(8 ## n ## 0b, fault); \ + EX_TABLE(8 ## n ## 1b, fault); \ + EX_TABLE(8 ## n ## 2b, fault); \ + EX_TABLE(8 ## n ## 3b, fault); \ + EX_TABLE(8 ## n ## 4b, fault); \ + EX_TABLE(8 ## n ## 5b, fault); \ + EX_TABLE(8 ## n ## 6b, fault); \ + EX_TABLE(8 ## n ## 7b, fault); .text .stabs "arch/powerpc/lib/",N_SO,0,0,0f @@ -127,11 +125,8 @@ LG_CACHELINE_BYTES = L1_CACHE_SHIFT CACHELINE_MASK = (L1_CACHE_BYTES-1) _GLOBAL(csum_partial_copy_generic) - stwu r1,-16(r1) - stw r7,12(r1) - stw r8,8(r1) - - addic r12,r6,0 + li r12,-1 + addic r0,r0,0 /* clear carry */ addi r6,r4,-4 neg r0,r4 addi r4,r3,-4 @@ -241,39 +236,23 @@ _GLOBAL(csum_partial_copy_generic) slwi r0,r0,8 adde r12,r12,r0 66: addze r3,r12 - addi r1,r1,16 beqlr+ cr7 rlwinm r3,r3,8,0,31 /* odd destination address: rotate one byte */ blr -/* read fault */ -src_error: - lwz r7,12(r1) - addi r1,r1,16 - cmpwi cr0,r7,0 - beqlr - li r0,-EFAULT - stw r0,0(r7) - blr -/* write fault */ -dst_error: - lwz r8,8(r1) - addi r1,r1,16 - cmpwi cr0,r8,0 - beqlr - li r0,-EFAULT - stw r0,0(r8) +fault: + li r3,0 blr - EX_TABLE(70b, src_error); - EX_TABLE(71b, dst_error); - EX_TABLE(72b, src_error); - EX_TABLE(73b, dst_error); - EX_TABLE(54b, dst_error); + EX_TABLE(70b, fault); + EX_TABLE(71b, fault); + EX_TABLE(72b, fault); + EX_TABLE(73b, fault); + EX_TABLE(54b, fault); /* * this stuff handles faults in the cacheline loop and branches to either - * src_error (if in read part) or dst_error (if in write part) + * fault (if in read part) or fault (if in write part) */ CSUM_COPY_16_BYTES_EXCODE(0) #if L1_CACHE_BYTES >= 32 @@ -290,12 +269,12 @@ dst_error: #endif #endif - EX_TABLE(30b, src_error); - EX_TABLE(31b, dst_error); - EX_TABLE(40b, src_error); - EX_TABLE(41b, dst_error); - EX_TABLE(50b, src_error); - EX_TABLE(51b, dst_error); + EX_TABLE(30b, fault); + EX_TABLE(31b, fault); + EX_TABLE(40b, fault); + EX_TABLE(41b, fault); + EX_TABLE(50b, fault); + EX_TABLE(51b, fault); EXPORT_SYMBOL(csum_partial_copy_generic) diff --git a/arch/powerpc/lib/checksum_64.S b/arch/powerpc/lib/checksum_64.S index 514978f908d4..98ff51bd2f7d 100644 --- a/arch/powerpc/lib/checksum_64.S +++ b/arch/powerpc/lib/checksum_64.S @@ -182,34 +182,33 @@ EXPORT_SYMBOL(__csum_partial) .macro srcnr 100: - EX_TABLE(100b,.Lsrc_error_nr) + EX_TABLE(100b,.Lerror_nr) .endm .macro source 150: - EX_TABLE(150b,.Lsrc_error) + EX_TABLE(150b,.Lerror) .endm .macro dstnr 200: - EX_TABLE(200b,.Ldest_error_nr) + EX_TABLE(200b,.Lerror_nr) .endm .macro dest 250: - EX_TABLE(250b,.Ldest_error) + EX_TABLE(250b,.Lerror) .endm /* * Computes the checksum of a memory block at src, length len, - * and adds in "sum" (32-bit), while copying the block to dst. - * If an access exception occurs on src or dst, it stores -EFAULT - * to *src_err or *dst_err respectively. The caller must take any action - * required in this case (zeroing memory, recalculating partial checksum etc). + * and adds in 0xffffffff (32-bit), while copying the block to dst. + * If an access exception occurs, it returns 0. * - * csum_partial_copy_generic(r3=src, r4=dst, r5=len, r6=sum, r7=src_err, r8=dst_err) + * csum_partial_copy_generic(r3=src, r4=dst, r5=len) */ _GLOBAL(csum_partial_copy_generic) + li r6,-1 addic r0,r6,0 /* clear carry */ srdi. r6,r5,3 /* less than 8 bytes? */ @@ -401,29 +400,15 @@ dstnr; stb r6,0(r4) srdi r3,r3,32 blr -.Lsrc_error: +.Lerror: ld r14,STK_REG(R14)(r1) ld r15,STK_REG(R15)(r1) ld r16,STK_REG(R16)(r1) addi r1,r1,STACKFRAMESIZE -.Lsrc_error_nr: - cmpdi 0,r7,0 - beqlr - li r6,-EFAULT - stw r6,0(r7) +.Lerror_nr: + li r3,0 blr -.Ldest_error: - ld r14,STK_REG(R14)(r1) - ld r15,STK_REG(R15)(r1) - ld r16,STK_REG(R16)(r1) - addi r1,r1,STACKFRAMESIZE -.Ldest_error_nr: - cmpdi 0,r8,0 - beqlr - li r6,-EFAULT - stw r6,0(r8) - blr EXPORT_SYMBOL(csum_partial_copy_generic) /* diff --git a/arch/powerpc/lib/checksum_wrappers.c b/arch/powerpc/lib/checksum_wrappers.c index fabe4db28726..b895166afc82 100644 --- a/arch/powerpc/lib/checksum_wrappers.c +++ b/arch/powerpc/lib/checksum_wrappers.c @@ -12,83 +12,37 @@ #include <linux/uaccess.h> __wsum csum_and_copy_from_user(const void __user *src, void *dst, - int len, __wsum sum, int *err_ptr) + int len) { - unsigned int csum; + __wsum csum; might_sleep(); - allow_read_from_user(src, len); - - *err_ptr = 0; - - if (!len) { - csum = 0; - goto out; - } - if (unlikely((len < 0) || !access_ok(src, len))) { - *err_ptr = -EFAULT; - csum = (__force unsigned int)sum; - goto out; - } + if (unlikely(!access_ok(src, len))) + return 0; - csum = csum_partial_copy_generic((void __force *)src, dst, - len, sum, err_ptr, NULL); - - if (unlikely(*err_ptr)) { - int missing = __copy_from_user(dst, src, len); - - if (missing) { - memset(dst + len - missing, 0, missing); - *err_ptr = -EFAULT; - } else { - *err_ptr = 0; - } + allow_read_from_user(src, len); - csum = csum_partial(dst, len, sum); - } + csum = csum_partial_copy_generic((void __force *)src, dst, len); -out: prevent_read_from_user(src, len); - return (__force __wsum)csum; + return csum; } EXPORT_SYMBOL(csum_and_copy_from_user); -__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len, - __wsum sum, int *err_ptr) +__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len) { - unsigned int csum; + __wsum csum; might_sleep(); - allow_write_to_user(dst, len); - - *err_ptr = 0; - - if (!len) { - csum = 0; - goto out; - } + if (unlikely(!access_ok(dst, len))) + return 0; - if (unlikely((len < 0) || !access_ok(dst, len))) { - *err_ptr = -EFAULT; - csum = -1; /* invalid checksum */ - goto out; - } - - csum = csum_partial_copy_generic(src, (void __force *)dst, - len, sum, NULL, err_ptr); - - if (unlikely(*err_ptr)) { - csum = csum_partial(src, len, sum); + allow_write_to_user(dst, len); - if (copy_to_user(dst, src, len)) { - *err_ptr = -EFAULT; - csum = -1; /* invalid checksum */ - } - } + csum = csum_partial_copy_generic(src, (void __force *)dst, len); -out: prevent_write_to_user(dst, len); - return (__force __wsum)csum; + return csum; } EXPORT_SYMBOL(csum_and_copy_to_user); diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c index 8c3934ea6220..2333625b5e31 100644 --- a/arch/powerpc/lib/code-patching.c +++ b/arch/powerpc/lib/code-patching.c @@ -21,21 +21,18 @@ static int __patch_instruction(struct ppc_inst *exec_addr, struct ppc_inst instr, struct ppc_inst *patch_addr) { - int err = 0; - - if (!ppc_inst_prefixed(instr)) { - __put_user_asm(ppc_inst_val(instr), patch_addr, err, "stw"); - } else { - __put_user_asm(ppc_inst_as_u64(instr), patch_addr, err, "std"); - } - - if (err) - return err; + if (!ppc_inst_prefixed(instr)) + __put_user_asm_goto(ppc_inst_val(instr), patch_addr, failed, "stw"); + else + __put_user_asm_goto(ppc_inst_as_u64(instr), patch_addr, failed, "std"); asm ("dcbst 0, %0; sync; icbi 0,%1; sync; isync" :: "r" (patch_addr), "r" (exec_addr)); return 0; + +failed: + return -EFAULT; } int raw_patch_instruction(struct ppc_inst *addr, struct ppc_inst instr) diff --git a/arch/powerpc/lib/memcpy_mcsafe_64.S b/arch/powerpc/lib/copy_mc_64.S index cb882d9a6d8a..88d46c471493 100644 --- a/arch/powerpc/lib/memcpy_mcsafe_64.S +++ b/arch/powerpc/lib/copy_mc_64.S @@ -50,7 +50,7 @@ err3; stb r0,0(r3) blr -_GLOBAL(memcpy_mcsafe) +_GLOBAL(copy_mc_generic) mr r7,r5 cmpldi r5,16 blt .Lshort_copy @@ -239,4 +239,4 @@ err1; stb r0,0(r3) 15: li r3,0 blr -EXPORT_SYMBOL_GPL(memcpy_mcsafe); +EXPORT_SYMBOL_GPL(copy_mc_generic); diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index caee8cc77e19..e9dcaba9a4f8 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c @@ -219,10 +219,13 @@ static nokprobe_inline unsigned long mlsd_8lsd_ea(unsigned int instr, ea += regs->gpr[ra]; else if (!prefix_r && !ra) ; /* Leave ea as is */ - else if (prefix_r && !ra) + else if (prefix_r) ea += regs->nip; - else if (prefix_r && ra) - ; /* Invalid form. Should already be checked for by caller! */ + + /* + * (prefix_r && ra) is an invalid form. Should already be + * checked for by caller! + */ return ea; } diff --git a/arch/powerpc/mm/book3s32/hash_low.S b/arch/powerpc/mm/book3s32/hash_low.S index 1690d369688b..b2c912e517b9 100644 --- a/arch/powerpc/mm/book3s32/hash_low.S +++ b/arch/powerpc/mm/book3s32/hash_low.S @@ -15,6 +15,7 @@ */ #include <linux/pgtable.h> +#include <linux/init.h> #include <asm/reg.h> #include <asm/page.h> #include <asm/cputable.h> @@ -199,11 +200,9 @@ _GLOBAL(add_hash_page) * covered by a BAT). -- paulus */ mfmsr r9 - SYNC rlwinm r0,r9,0,17,15 /* clear bit 16 (MSR_EE) */ rlwinm r0,r0,0,28,26 /* clear MSR_DR */ mtmsr r0 - SYNC_601 isync #ifdef CONFIG_SMP @@ -262,7 +261,6 @@ _GLOBAL(add_hash_page) /* reenable interrupts and DR */ mtmsr r9 - SYNC_601 isync lwz r0,4(r1) @@ -287,9 +285,9 @@ _ASM_NOKPROBE_SYMBOL(add_hash_page) * * For speed, 4 of the instructions get patched once the size and * physical address of the hash table are known. These definitions - * of Hash_base and Hash_bits below are just an example. + * of Hash_base and Hash_bits below are for the early hash table. */ -Hash_base = 0xc0180000 +Hash_base = early_hash Hash_bits = 12 /* e.g. 256kB hash table */ Hash_msk = (((1 << Hash_bits) - 1) * 64) @@ -310,6 +308,7 @@ Hash_msk = (((1 << Hash_bits) - 1) * 64) #define HASH_LEFT 31-(LG_PTEG_SIZE+Hash_bits-1) #define HASH_RIGHT 31-LG_PTEG_SIZE +__REF _GLOBAL(create_hpte) /* Convert linux-style PTE (r5) to low word of PPC-style PTE (r8) */ rlwinm r8,r5,32-9,30,30 /* _PAGE_RW -> PP msb */ @@ -476,6 +475,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) sync /* make sure pte updates get to memory */ blr + .previous _ASM_NOKPROBE_SYMBOL(create_hpte) .section .bss @@ -496,6 +496,7 @@ htab_hash_searches: * * We assume that there is a hash table in use (Hash != 0). */ +__REF _GLOBAL(flush_hash_pages) /* * We disable interrupts here, even on UP, because we want @@ -506,11 +507,9 @@ _GLOBAL(flush_hash_pages) * covered by a BAT). -- paulus */ mfmsr r10 - SYNC rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ rlwinm r0,r0,0,28,26 /* clear MSR_DR */ mtmsr r0 - SYNC_601 isync /* First find a PTE in the range that has _PAGE_HASHPTE set */ @@ -629,9 +628,9 @@ _GLOBAL(flush_hash_pages) #endif 19: mtmsr r10 - SYNC_601 isync blr + .previous EXPORT_SYMBOL(flush_hash_pages) _ASM_NOKPROBE_SYMBOL(flush_hash_pages) @@ -643,11 +642,9 @@ _GLOBAL(_tlbie) lwz r8,TASK_CPU(r2) oris r8,r8,11 mfmsr r10 - SYNC rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ rlwinm r0,r0,0,28,26 /* clear DR */ mtmsr r0 - SYNC_601 isync lis r9,mmu_hash_lock@h ori r9,r9,mmu_hash_lock@l @@ -664,7 +661,6 @@ _GLOBAL(_tlbie) li r0,0 stw r0,0(r9) /* clear mmu_hash_lock */ mtmsr r10 - SYNC_601 isync #else /* CONFIG_SMP */ tlbie r3 @@ -681,11 +677,9 @@ _GLOBAL(_tlbia) lwz r8,TASK_CPU(r2) oris r8,r8,10 mfmsr r10 - SYNC rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ rlwinm r0,r0,0,28,26 /* clear DR */ mtmsr r0 - SYNC_601 isync lis r9,mmu_hash_lock@h ori r9,r9,mmu_hash_lock@l @@ -709,7 +703,6 @@ _GLOBAL(_tlbia) li r0,0 stw r0,0(r9) /* clear mmu_hash_lock */ mtmsr r10 - SYNC_601 isync #endif /* CONFIG_SMP */ blr diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c index d426eaf76bb0..a59e7ec98180 100644 --- a/arch/powerpc/mm/book3s32/mmu.c +++ b/arch/powerpc/mm/book3s32/mmu.c @@ -31,6 +31,8 @@ #include <mm/mmu_decl.h> +u8 __initdata early_hash[SZ_256K] __aligned(SZ_256K) = {0}; + struct hash_pte *Hash; static unsigned long Hash_size, Hash_mask; unsigned long _SDR1; @@ -73,23 +75,13 @@ unsigned long p_block_mapped(phys_addr_t pa) static int find_free_bat(void) { int b; + int n = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4; - if (IS_ENABLED(CONFIG_PPC_BOOK3S_601)) { - for (b = 0; b < 4; b++) { - struct ppc_bat *bat = BATS[b]; - - if (!(bat[0].batl & 0x40)) - return b; - } - } else { - int n = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4; + for (b = 0; b < n; b++) { + struct ppc_bat *bat = BATS[b]; - for (b = 0; b < n; b++) { - struct ppc_bat *bat = BATS[b]; - - if (!(bat[1].batu & 3)) - return b; - } + if (!(bat[1].batu & 3)) + return b; } return -1; } @@ -97,7 +89,7 @@ static int find_free_bat(void) /* * This function calculates the size of the larger block usable to map the * beginning of an area based on the start address and size of that area: - * - max block size is 8M on 601 and 256 on other 6xx. + * - max block size is 256 on 6xx. * - base address must be aligned to the block size. So the maximum block size * is identified by the lowest bit set to 1 in the base address (for instance * if base is 0x16000000, max size is 0x02000000). @@ -106,7 +98,7 @@ static int find_free_bat(void) */ static unsigned int block_size(unsigned long base, unsigned long top) { - unsigned int max_size = IS_ENABLED(CONFIG_PPC_BOOK3S_601) ? SZ_8M : SZ_256M; + unsigned int max_size = SZ_256M; unsigned int base_shift = (ffs(base) - 1) & 31; unsigned int block_shift = (fls(top - base) - 1) & 31; @@ -117,7 +109,6 @@ static unsigned int block_size(unsigned long base, unsigned long top) * Set up one of the IBAT (block address translation) register pairs. * The parameters are not checked; in particular size must be a power * of 2 between 128k and 256M. - * Only for 603+ ... */ static void setibat(int index, unsigned long virt, phys_addr_t phys, unsigned int size, pgprot_t prot) @@ -214,9 +205,6 @@ void mmu_mark_initmem_nx(void) unsigned long border = (unsigned long)__init_begin - PAGE_OFFSET; unsigned long size; - if (IS_ENABLED(CONFIG_PPC_BOOK3S_601)) - return; - for (i = 0; i < nb - 1 && base < top && top - base > (128 << 10);) { size = block_size(base, top); setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_TEXT); @@ -253,9 +241,6 @@ void mmu_mark_rodata_ro(void) int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4; int i; - if (IS_ENABLED(CONFIG_PPC_BOOK3S_601)) - return; - for (i = 0; i < nb; i++) { struct ppc_bat *bat = BATS[i]; @@ -294,35 +279,22 @@ void __init setbat(int index, unsigned long virt, phys_addr_t phys, flags &= ~_PAGE_COHERENT; bl = (size >> 17) - 1; - if (!IS_ENABLED(CONFIG_PPC_BOOK3S_601)) { - /* 603, 604, etc. */ - /* Do DBAT first */ - wimgxpp = flags & (_PAGE_WRITETHRU | _PAGE_NO_CACHE - | _PAGE_COHERENT | _PAGE_GUARDED); - wimgxpp |= (flags & _PAGE_RW)? BPP_RW: BPP_RX; - bat[1].batu = virt | (bl << 2) | 2; /* Vs=1, Vp=0 */ - bat[1].batl = BAT_PHYS_ADDR(phys) | wimgxpp; - if (flags & _PAGE_USER) - bat[1].batu |= 1; /* Vp = 1 */ - if (flags & _PAGE_GUARDED) { - /* G bit must be zero in IBATs */ - flags &= ~_PAGE_EXEC; - } - if (flags & _PAGE_EXEC) - bat[0] = bat[1]; - else - bat[0].batu = bat[0].batl = 0; - } else { - /* 601 cpu */ - if (bl > BL_8M) - bl = BL_8M; - wimgxpp = flags & (_PAGE_WRITETHRU | _PAGE_NO_CACHE - | _PAGE_COHERENT); - wimgxpp |= (flags & _PAGE_RW)? - ((flags & _PAGE_USER)? PP_RWRW: PP_RWXX): PP_RXRX; - bat->batu = virt | wimgxpp | 4; /* Ks=0, Ku=1 */ - bat->batl = phys | bl | 0x40; /* V=1 */ + /* Do DBAT first */ + wimgxpp = flags & (_PAGE_WRITETHRU | _PAGE_NO_CACHE + | _PAGE_COHERENT | _PAGE_GUARDED); + wimgxpp |= (flags & _PAGE_RW)? BPP_RW: BPP_RX; + bat[1].batu = virt | (bl << 2) | 2; /* Vs=1, Vp=0 */ + bat[1].batl = BAT_PHYS_ADDR(phys) | wimgxpp; + if (flags & _PAGE_USER) + bat[1].batu |= 1; /* Vp = 1 */ + if (flags & _PAGE_GUARDED) { + /* G bit must be zero in IBATs */ + flags &= ~_PAGE_EXEC; } + if (flags & _PAGE_EXEC) + bat[0] = bat[1]; + else + bat[0].batu = bat[0].batl = 0; bat_addrs[index].start = virt; bat_addrs[index].limit = virt + ((bl + 1) << 17) - 1; @@ -425,15 +397,6 @@ void __init MMU_init_hw(void) hash_mb2 = hash_mb = 32 - LG_HPTEG_SIZE - lg_n_hpteg; if (lg_n_hpteg > 16) hash_mb2 = 16 - LG_HPTEG_SIZE; - - /* - * When KASAN is selected, there is already an early temporary hash - * table and the switch to the final hash table is done later. - */ - if (IS_ENABLED(CONFIG_KASAN)) - return; - - MMU_init_hw_patch(); } void __init MMU_init_hw_patch(void) @@ -441,6 +404,9 @@ void __init MMU_init_hw_patch(void) unsigned int hmask = Hash_mask >> (16 - LG_HPTEG_SIZE); unsigned int hash = (unsigned int)Hash - PAGE_OFFSET; + if (!mmu_has_feature(MMU_FTR_HPTE_TABLE)) + return; + if (ppc_md.progress) ppc_md.progress("hash:patch", 0x345); if (ppc_md.progress) @@ -474,11 +440,7 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base, */ BUG_ON(first_memblock_base != 0); - /* 601 can only access 16MB at the moment */ - if (IS_ENABLED(CONFIG_PPC_BOOK3S_601)) - memblock_set_current_limit(min_t(u64, first_memblock_size, 0x01000000)); - else /* Anything else has 256M mapped */ - memblock_set_current_limit(min_t(u64, first_memblock_size, 0x10000000)); + memblock_set_current_limit(min_t(u64, first_memblock_size, SZ_256M)); } void __init print_system_hash_info(void) diff --git a/arch/powerpc/mm/book3s64/hash_native.c b/arch/powerpc/mm/book3s64/hash_native.c index cf20e5229ce1..0203cdf48c54 100644 --- a/arch/powerpc/mm/book3s64/hash_native.c +++ b/arch/powerpc/mm/book3s64/hash_native.c @@ -82,7 +82,7 @@ static void tlbiel_all_isa206(unsigned int num_sets, unsigned int is) for (set = 0; set < num_sets; set++) tlbiel_hash_set_isa206(set, is); - asm volatile("ptesync": : :"memory"); + ppc_after_tlbiel_barrier(); } static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is) @@ -110,7 +110,7 @@ static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is) */ tlbiel_hash_set_isa300(0, is, 0, 2, 1); - asm volatile("ptesync": : :"memory"); + ppc_after_tlbiel_barrier(); asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT "; isync" : : :"memory"); } @@ -303,7 +303,7 @@ static inline void tlbie(unsigned long vpn, int psize, int apsize, asm volatile("ptesync": : :"memory"); if (use_local) { __tlbiel(vpn, psize, apsize, ssize); - asm volatile("ptesync": : :"memory"); + ppc_after_tlbiel_barrier(); } else { __tlbie(vpn, psize, apsize, ssize); fixup_tlbie_vpn(vpn, psize, apsize, ssize); @@ -879,7 +879,7 @@ static void native_flush_hash_range(unsigned long number, int local) __tlbiel(vpn, psize, psize, ssize); } pte_iterate_hashed_end(); } - asm volatile("ptesync":::"memory"); + ppc_after_tlbiel_barrier(); } else { int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index c663e7ba801f..24702c0a92e0 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -7,7 +7,7 @@ * * SMP scalability work: * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM - * + * * Module name: htab.c * * Description: @@ -260,8 +260,12 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend, DBG("htab_bolt_mapping(%lx..%lx -> %lx (%lx,%d,%d)\n", vstart, vend, pstart, prot, psize, ssize); - for (vaddr = vstart, paddr = pstart; vaddr < vend; - vaddr += step, paddr += step) { + /* Carefully map only the possible range */ + vaddr = ALIGN(vstart, step); + paddr = ALIGN(pstart, step); + vend = ALIGN_DOWN(vend, step); + + for (; vaddr < vend; vaddr += step, paddr += step) { unsigned long hash, hpteg; unsigned long vsid = get_kernel_vsid(vaddr, ssize); unsigned long vpn = hpt_vpn(vaddr, vsid, ssize); @@ -343,7 +347,9 @@ int htab_remove_mapping(unsigned long vstart, unsigned long vend, if (!mmu_hash_ops.hpte_removebolted) return -ENODEV; - for (vaddr = vstart; vaddr < vend; vaddr += step) { + /* Unmap the full range specificied */ + vaddr = ALIGN_DOWN(vstart, step); + for (;vaddr < vend; vaddr += step) { rc = mmu_hash_ops.hpte_removebolted(vaddr, psize, ssize); if (rc == -ENOENT) { ret = -ENOENT; @@ -867,8 +873,8 @@ static void __init htab_initialize(void) unsigned long table; unsigned long pteg_count; unsigned long prot; - unsigned long base = 0, size = 0; - struct memblock_region *reg; + phys_addr_t base = 0, size = 0, end; + u64 i; DBG(" -> htab_initialize()\n"); @@ -884,7 +890,7 @@ static void __init htab_initialize(void) /* * Calculate the required size of the htab. We want the number of * PTEGs to equal one half the number of real pages. - */ + */ htab_size_bytes = htab_get_table_size(); pteg_count = htab_size_bytes >> 7; @@ -894,7 +900,7 @@ static void __init htab_initialize(void) firmware_has_feature(FW_FEATURE_PS3_LV1)) { /* Using a hypervisor which owns the htab */ htab_address = NULL; - _SDR1 = 0; + _SDR1 = 0; #ifdef CONFIG_FA_DUMP /* * If firmware assisted dump is active firmware preserves @@ -960,9 +966,9 @@ static void __init htab_initialize(void) #endif /* CONFIG_DEBUG_PAGEALLOC */ /* create bolted the linear mapping in the hash table */ - for_each_memblock(memory, reg) { - base = (unsigned long)__va(reg->base); - size = reg->size; + for_each_mem_range(i, &base, &end) { + size = end - base; + base = (unsigned long)__va(base); DBG("creating mapping for region: %lx..%lx (prot: %lx)\n", base, size, prot); diff --git a/arch/powerpc/mm/book3s64/internal.h b/arch/powerpc/mm/book3s64/internal.h index 7eda0d30d765..c12d78ee42f5 100644 --- a/arch/powerpc/mm/book3s64/internal.h +++ b/arch/powerpc/mm/book3s64/internal.h @@ -13,4 +13,6 @@ static inline bool stress_slb(void) return static_branch_unlikely(&stress_slb_key); } +void slb_setup_new_exec(void); + #endif /* ARCH_POWERPC_MM_BOOK3S64_INTERNAL_H */ diff --git a/arch/powerpc/mm/book3s64/mmu_context.c b/arch/powerpc/mm/book3s64/mmu_context.c index 0ba30b8b935b..1c54821de7bf 100644 --- a/arch/powerpc/mm/book3s64/mmu_context.c +++ b/arch/powerpc/mm/book3s64/mmu_context.c @@ -21,6 +21,8 @@ #include <asm/mmu_context.h> #include <asm/pgalloc.h> +#include "internal.h" + static DEFINE_IDA(mmu_context_ida); static int alloc_context_id(int min_id, int max_id) @@ -48,8 +50,6 @@ int hash__alloc_context_id(void) } EXPORT_SYMBOL_GPL(hash__alloc_context_id); -void slb_setup_new_exec(void); - static int realloc_context_ids(mm_context_t *ctx) { int i, id; diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index d5f0c10d752a..3adcf730f478 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -34,7 +34,7 @@ unsigned int mmu_pid_bits; unsigned int mmu_base_pid; -unsigned int radix_mem_block_size __ro_after_init; +unsigned long radix_mem_block_size __ro_after_init; static __ref void *early_alloc_pgtable(unsigned long size, int nid, unsigned long region_start, unsigned long region_end) @@ -276,6 +276,7 @@ static int __meminit create_physical_mapping(unsigned long start, int psize; start = ALIGN(start, PAGE_SIZE); + end = ALIGN_DOWN(end, PAGE_SIZE); for (addr = start; addr < end; addr += mapping_size) { unsigned long gap, previous_size; int rc; @@ -329,7 +330,8 @@ static int __meminit create_physical_mapping(unsigned long start, static void __init radix_init_pgtable(void) { unsigned long rts_field; - struct memblock_region *reg; + phys_addr_t start, end; + u64 i; /* We don't support slb for radix */ mmu_slb_size = 0; @@ -337,20 +339,19 @@ static void __init radix_init_pgtable(void) /* * Create the linear mapping */ - for_each_memblock(memory, reg) { + for_each_mem_range(i, &start, &end) { /* * The memblock allocator is up at this point, so the * page tables will be allocated within the range. No * need or a node (which we don't have yet). */ - if ((reg->base + reg->size) >= RADIX_VMALLOC_START) { + if (end >= RADIX_VMALLOC_START) { pr_warn("Outside the supported range\n"); continue; } - WARN_ON(create_physical_mapping(reg->base, - reg->base + reg->size, + WARN_ON(create_physical_mapping(start, end, radix_mem_block_size, -1, PAGE_KERNEL)); } @@ -497,7 +498,7 @@ static int __init probe_memory_block_size(unsigned long node, const char *uname, depth, void *data) { unsigned long *mem_block_size = (unsigned long *)data; - const __be64 *prop; + const __be32 *prop; int len; if (depth != 1) @@ -507,13 +508,14 @@ static int __init probe_memory_block_size(unsigned long node, const char *uname, return 0; prop = of_get_flat_dt_prop(node, "ibm,lmb-size", &len); - if (!prop || len < sizeof(__be64)) + + if (!prop || len < dt_root_size_cells * sizeof(__be32)) /* * Nothing in the device tree */ *mem_block_size = MIN_MEMORY_BLOCK_SIZE; else - *mem_block_size = be64_to_cpup(prop); + *mem_block_size = of_read_number(prop, dt_root_size_cells); return 1; } diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c index 0d233763441f..b487b489d4b6 100644 --- a/arch/powerpc/mm/book3s64/radix_tlb.c +++ b/arch/powerpc/mm/book3s64/radix_tlb.c @@ -65,7 +65,7 @@ static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is) for (set = 1; set < num_sets; set++) tlbiel_radix_set_isa300(set, is, 0, RIC_FLUSH_TLB, 1); - asm volatile("ptesync": : :"memory"); + ppc_after_tlbiel_barrier(); } void radix__tlbiel_all(unsigned int action) @@ -296,7 +296,7 @@ static __always_inline void _tlbiel_pid(unsigned long pid, unsigned long ric) /* For PWC, only one flush is needed */ if (ric == RIC_FLUSH_PWC) { - asm volatile("ptesync": : :"memory"); + ppc_after_tlbiel_barrier(); return; } @@ -304,7 +304,7 @@ static __always_inline void _tlbiel_pid(unsigned long pid, unsigned long ric) for (set = 1; set < POWER9_TLB_SETS_RADIX ; set++) __tlbiel_pid(pid, set, RIC_FLUSH_TLB); - asm volatile("ptesync": : :"memory"); + ppc_after_tlbiel_barrier(); asm volatile(PPC_RADIX_INVALIDATE_ERAT_USER "; isync" : : :"memory"); } @@ -431,7 +431,7 @@ static __always_inline void _tlbiel_va(unsigned long va, unsigned long pid, asm volatile("ptesync": : :"memory"); __tlbiel_va(va, pid, ap, ric); - asm volatile("ptesync": : :"memory"); + ppc_after_tlbiel_barrier(); } static inline void _tlbiel_va_range(unsigned long start, unsigned long end, @@ -442,7 +442,7 @@ static inline void _tlbiel_va_range(unsigned long start, unsigned long end, if (also_pwc) __tlbiel_pid(pid, 0, RIC_FLUSH_PWC); __tlbiel_va_range(start, end, pid, page_size, psize); - asm volatile("ptesync": : :"memory"); + ppc_after_tlbiel_barrier(); } static inline void __tlbie_va_range(unsigned long start, unsigned long end, @@ -645,19 +645,29 @@ static void do_exit_flush_lazy_tlb(void *arg) struct mm_struct *mm = arg; unsigned long pid = mm->context.id; + /* + * A kthread could have done a mmget_not_zero() after the flushing CPU + * checked mm_is_singlethreaded, and be in the process of + * kthread_use_mm when interrupted here. In that case, current->mm will + * be set to mm, because kthread_use_mm() setting ->mm and switching to + * the mm is done with interrupts off. + */ if (current->mm == mm) - return; /* Local CPU */ + goto out_flush; if (current->active_mm == mm) { - /* - * Must be a kernel thread because sender is single-threaded. - */ - BUG_ON(current->mm); + WARN_ON_ONCE(current->mm != NULL); + /* Is a kernel thread and is using mm as the lazy tlb */ mmgrab(&init_mm); - switch_mm(mm, &init_mm, current); current->active_mm = &init_mm; + switch_mm_irqs_off(mm, &init_mm, current); mmdrop(mm); } + + atomic_dec(&mm->context.active_cpus); + cpumask_clear_cpu(smp_processor_id(), mm_cpumask(mm)); + +out_flush: _tlbiel_pid(pid, RIC_FLUSH_ALL); } @@ -672,7 +682,6 @@ static void exit_flush_lazy_tlbs(struct mm_struct *mm) */ smp_call_function_many(mm_cpumask(mm), do_exit_flush_lazy_tlb, (void *)mm, 1); - mm_reset_thread_local(mm); } void radix__flush_tlb_mm(struct mm_struct *mm) @@ -940,7 +949,7 @@ is_local: if (hflush) __tlbiel_va_range(hstart, hend, pid, PMD_SIZE, MMU_PAGE_2M); - asm volatile("ptesync": : :"memory"); + ppc_after_tlbiel_barrier(); } else if (cputlb_use_tlbie()) { asm volatile("ptesync": : :"memory"); __tlbie_va_range(start, end, pid, page_size, mmu_virtual_psize); diff --git a/arch/powerpc/mm/book3s64/slb.c b/arch/powerpc/mm/book3s64/slb.c index 156c38f89511..c30fcbfa0e32 100644 --- a/arch/powerpc/mm/book3s64/slb.c +++ b/arch/powerpc/mm/book3s64/slb.c @@ -765,8 +765,8 @@ static long slb_allocate_kernel(unsigned long ea, unsigned long id) if (id == LINEAR_MAP_REGION_ID) { - /* We only support upto MAX_PHYSMEM_BITS */ - if ((ea & EA_MASK) > (1UL << MAX_PHYSMEM_BITS)) + /* We only support upto H_MAX_PHYSMEM_BITS */ + if ((ea & EA_MASK) > (1UL << H_MAX_PHYSMEM_BITS)) return -EFAULT; flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_linear_psize].sllp; diff --git a/arch/powerpc/mm/dma-noncoherent.c b/arch/powerpc/mm/dma-noncoherent.c index 5ab4f868e919..30260b5d146d 100644 --- a/arch/powerpc/mm/dma-noncoherent.c +++ b/arch/powerpc/mm/dma-noncoherent.c @@ -11,7 +11,7 @@ #include <linux/types.h> #include <linux/highmem.h> #include <linux/dma-direct.h> -#include <linux/dma-noncoherent.h> +#include <linux/dma-map-ops.h> #include <asm/tlbflush.h> #include <asm/dma.h> diff --git a/arch/powerpc/mm/drmem.c b/arch/powerpc/mm/drmem.c index b2eeea39684c..9af3832c9d8d 100644 --- a/arch/powerpc/mm/drmem.c +++ b/arch/powerpc/mm/drmem.c @@ -389,10 +389,8 @@ static void __init init_drmem_v1_lmbs(const __be32 *prop) if (!drmem_info->lmbs) return; - for_each_drmem_lmb(lmb) { + for_each_drmem_lmb(lmb) read_drconf_v1_cell(lmb, &prop); - lmb_set_nid(lmb); - } } static void __init init_drmem_v2_lmbs(const __be32 *prop) @@ -437,8 +435,6 @@ static void __init init_drmem_v2_lmbs(const __be32 *prop) lmb->aa_index = dr_cell.aa_index; lmb->flags = dr_cell.flags; - - lmb_set_nid(lmb); } } } diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 26292544630f..36c3800769fb 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -180,7 +180,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz if (!hpdp) return NULL; - if (IS_ENABLED(CONFIG_PPC_8xx) && sz == SZ_512K) + if (IS_ENABLED(CONFIG_PPC_8xx) && pshift < PMD_SHIFT) return pte_alloc_map(mm, (pmd_t *)hpdp, addr); BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp)); @@ -330,10 +330,24 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif get_hugepd_cache_index(pdshift - shift)); } -static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, unsigned long addr) +static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, + unsigned long addr, unsigned long end, + unsigned long floor, unsigned long ceiling) { + unsigned long start = addr; pgtable_t token = pmd_pgtable(*pmd); + start &= PMD_MASK; + if (start < floor) + return; + if (ceiling) { + ceiling &= PMD_MASK; + if (!ceiling) + return; + } + if (end - 1 > ceiling - 1) + return; + pmd_clear(pmd); pte_free_tlb(tlb, token, addr); mm_dec_nr_ptes(tlb->mm); @@ -363,7 +377,7 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, */ WARN_ON(!IS_ENABLED(CONFIG_PPC_8xx)); - hugetlb_free_pte_range(tlb, pmd, addr); + hugetlb_free_pte_range(tlb, pmd, addr, end, floor, ceiling); continue; } diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 8459056cce67..386be136026e 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -162,16 +162,16 @@ static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node) return next++; } -static __meminit void vmemmap_list_populate(unsigned long phys, - unsigned long start, - int node) +static __meminit int vmemmap_list_populate(unsigned long phys, + unsigned long start, + int node) { struct vmemmap_backing *vmem_back; vmem_back = vmemmap_list_alloc(node); if (unlikely(!vmem_back)) { - WARN_ON(1); - return; + pr_debug("vmemap list allocation failed\n"); + return -ENOMEM; } vmem_back->phys = phys; @@ -179,6 +179,7 @@ static __meminit void vmemmap_list_populate(unsigned long phys, vmem_back->list = vmemmap_list; vmemmap_list = vmem_back; + return 0; } static bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long start, @@ -199,6 +200,7 @@ static bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long star int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, struct vmem_altmap *altmap) { + bool altmap_alloc; unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; /* Align to the page size of the linear mapping. */ @@ -228,13 +230,32 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, p = vmemmap_alloc_block_buf(page_size, node, altmap); if (!p) pr_debug("altmap block allocation failed, falling back to system memory"); + else + altmap_alloc = true; } - if (!p) + if (!p) { p = vmemmap_alloc_block_buf(page_size, node, NULL); + altmap_alloc = false; + } if (!p) return -ENOMEM; - vmemmap_list_populate(__pa(p), start, node); + if (vmemmap_list_populate(__pa(p), start, node)) { + /* + * If we don't populate vmemap list, we don't have + * the ability to free the allocated vmemmap + * pages in section_deactivate. Hence free them + * here. + */ + int nr_pfns = page_size >> PAGE_SHIFT; + unsigned long page_order = get_order(page_size); + + if (altmap_alloc) + vmem_altmap_free(altmap, nr_pfns); + else + free_pages((unsigned long)p, page_order); + return -ENOMEM; + } pr_debug(" * %016lx..%016lx allocated at %p\n", start, start + page_size, p); @@ -264,10 +285,8 @@ static unsigned long vmemmap_list_free(unsigned long start) vmem_back_prev = vmem_back; } - if (unlikely(!vmem_back)) { - WARN_ON(1); + if (unlikely(!vmem_back)) return 0; - } /* remove it from vmemmap_list */ if (vmem_back == vmemmap_list) /* remove head */ diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c index fb294046e00e..cf8770b1a692 100644 --- a/arch/powerpc/mm/kasan/kasan_init_32.c +++ b/arch/powerpc/mm/kasan/kasan_init_32.c @@ -127,8 +127,7 @@ void __init kasan_mmu_init(void) { int ret; - if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE) || - IS_ENABLED(CONFIG_KASAN_VMALLOC)) { + if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) { ret = kasan_init_shadow_page_tables(KASAN_SHADOW_START, KASAN_SHADOW_END); if (ret) @@ -138,12 +137,12 @@ void __init kasan_mmu_init(void) void __init kasan_init(void) { - struct memblock_region *reg; + phys_addr_t base, end; + u64 i; + int ret; - for_each_memblock(memory, reg) { - phys_addr_t base = reg->base; - phys_addr_t top = min(base + reg->size, total_lowmem); - int ret; + for_each_mem_range(i, &base, &end) { + phys_addr_t top = min(end, total_lowmem); if (base >= top) continue; @@ -153,6 +152,13 @@ void __init kasan_init(void) panic("kasan: kasan_init_region() failed"); } + if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) { + ret = kasan_init_shadow_page_tables(KASAN_SHADOW_START, KASAN_SHADOW_END); + + if (ret) + panic("kasan: kasan_init_shadow_page_tables() failed"); + } + kasan_remap_early_shadow_ro(); clear_page(kasan_early_shadow_page); @@ -168,22 +174,6 @@ void __init kasan_late_init(void) kasan_unmap_early_shadow_vmalloc(); } -#ifdef CONFIG_PPC_BOOK3S_32 -u8 __initdata early_hash[256 << 10] __aligned(256 << 10) = {0}; - -static void __init kasan_early_hash_table(void) -{ - unsigned int hash = __pa(early_hash); - - modify_instruction_site(&patch__hash_page_A0, 0xffff, hash >> 16); - modify_instruction_site(&patch__flush_hash_A0, 0xffff, hash >> 16); - - Hash = (struct hash_pte *)early_hash; -} -#else -static void __init kasan_early_hash_table(void) {} -#endif - void __init kasan_early_init(void) { unsigned long addr = KASAN_SHADOW_START; @@ -199,7 +189,4 @@ void __init kasan_early_init(void) next = pgd_addr_end(addr, end); pmd_populate_kernel(&init_mm, pmd, kasan_early_shadow_pte); } while (pmd++, addr = next, addr != end); - - if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) - kasan_early_hash_table(); } diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 42e25874f5a8..01ec2a252f09 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -49,6 +49,7 @@ #include <asm/swiotlb.h> #include <asm/rtas.h> #include <asm/kasan.h> +#include <asm/svm.h> #include <mm/mmu_decl.h> @@ -184,15 +185,16 @@ void __init initmem_init(void) /* mark pages that don't exist as nosave */ static int __init mark_nonram_nosave(void) { - struct memblock_region *reg, *prev = NULL; - - for_each_memblock(memory, reg) { - if (prev && - memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg)) - register_nosave_region(memblock_region_memory_end_pfn(prev), - memblock_region_memory_base_pfn(reg)); - prev = reg; + unsigned long spfn, epfn, prev = 0; + int i; + + for_each_mem_pfn_range(i, MAX_NUMNODES, &spfn, &epfn, NULL) { + if (prev && prev < spfn) + register_nosave_region(prev, spfn); + + prev = epfn; } + return 0; } #else /* CONFIG_NEED_MULTIPLE_NODES */ @@ -282,7 +284,10 @@ void __init mem_init(void) * back to to-down. */ memblock_set_bottom_up(true); - swiotlb_init(0); + if (is_secure_guest()) + svm_swiotlb_init(); + else + swiotlb_init(0); #endif high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); @@ -584,20 +589,24 @@ void flush_icache_user_page(struct vm_area_struct *vma, struct page *page, */ static int __init add_system_ram_resources(void) { - struct memblock_region *reg; + phys_addr_t start, end; + u64 i; - for_each_memblock(memory, reg) { + for_each_mem_range(i, &start, &end) { struct resource *res; - unsigned long base = reg->base; - unsigned long size = reg->size; res = kzalloc(sizeof(struct resource), GFP_KERNEL); WARN_ON(!res); if (res) { res->name = "System RAM"; - res->start = base; - res->end = base + size - 1; + res->start = start; + /* + * In memblock, end points to the first byte after + * the range while in resourses, end points to the + * last byte in the range. + */ + res->end = end - 1; res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; WARN_ON(request_resource(&iomem_resource, res) < 0); } diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c index d2b37146ae6c..231ca95f9ffb 100644 --- a/arch/powerpc/mm/nohash/8xx.c +++ b/arch/powerpc/mm/nohash/8xx.c @@ -244,13 +244,6 @@ void set_context(unsigned long id, pgd_t *pgd) mb(); } -void flush_instruction_cache(void) -{ - isync(); - mtspr(SPRN_IC_CST, IDC_INVALL); - isync(); -} - #ifdef CONFIG_PPC_KUEP void __init setup_kuep(bool disabled) { diff --git a/arch/powerpc/mm/nohash/fsl_booke.c b/arch/powerpc/mm/nohash/fsl_booke.c index 0c294827d6e5..36bda962d3b3 100644 --- a/arch/powerpc/mm/nohash/fsl_booke.c +++ b/arch/powerpc/mm/nohash/fsl_booke.c @@ -219,6 +219,22 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) return tlbcam_addrs[tlbcam_index - 1].limit - PAGE_OFFSET + 1; } +void flush_instruction_cache(void) +{ + unsigned long tmp; + + if (IS_ENABLED(CONFIG_E200)) { + tmp = mfspr(SPRN_L1CSR0); + tmp |= L1CSR0_CFI | L1CSR0_CLFC; + mtspr(SPRN_L1CSR0, tmp); + } else { + tmp = mfspr(SPRN_L1CSR1); + tmp |= L1CSR1_ICFI | L1CSR1_ICLFR; + mtspr(SPRN_L1CSR1, tmp); + } + isync(); +} + /* * MMU_init_hw does the chip-specific initialization of the MMU hardware. */ diff --git a/arch/powerpc/mm/nohash/tlb.c b/arch/powerpc/mm/nohash/tlb.c index 14514585db98..5872f69141d5 100644 --- a/arch/powerpc/mm/nohash/tlb.c +++ b/arch/powerpc/mm/nohash/tlb.c @@ -83,16 +83,12 @@ struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { }; #elif defined(CONFIG_PPC_8xx) struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { - /* we only manage 4k and 16k pages as normal pages */ -#ifdef CONFIG_PPC_4K_PAGES [MMU_PAGE_4K] = { .shift = 12, }, -#else [MMU_PAGE_16K] = { .shift = 14, }, -#endif [MMU_PAGE_512K] = { .shift = 19, }, diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 1f61fa2148b5..63f61d8b55e5 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -430,7 +430,7 @@ static int of_get_assoc_arrays(struct assoc_arrays *aa) * This is like of_node_to_nid_single() for memory represented in the * ibm,dynamic-reconfiguration-memory node. */ -static int of_drconf_to_nid_single(struct drmem_lmb *lmb) +int of_drconf_to_nid_single(struct drmem_lmb *lmb) { struct assoc_arrays aa = { .arrays = NULL }; int default_nid = NUMA_NO_NODE; @@ -507,6 +507,11 @@ static int numa_setup_cpu(unsigned long lcpu) int fcpu = cpu_first_thread_sibling(lcpu); int nid = NUMA_NO_NODE; + if (!cpu_present(lcpu)) { + set_cpu_numa_node(lcpu, first_online_node); + return first_online_node; + } + /* * If a valid cpu-to-node mapping is already available, use it * directly instead of querying the firmware, since it represents @@ -723,21 +728,22 @@ static int __init parse_numa_properties(void) */ for_each_present_cpu(i) { struct device_node *cpu; - int nid; - - cpu = of_get_cpu_node(i, NULL); - BUG_ON(!cpu); - nid = of_node_to_nid_single(cpu); - of_node_put(cpu); + int nid = vphn_get_nid(i); /* * Don't fall back to default_nid yet -- we will plug * cpus into nodes once the memory scan has discovered * the topology. */ - if (nid < 0) - continue; - node_set_online(nid); + if (nid == NUMA_NO_NODE) { + cpu = of_get_cpu_node(i, NULL); + BUG_ON(!cpu); + nid = of_node_to_nid_single(cpu); + of_node_put(cpu); + } + + if (likely(nid > 0)) + node_set_online(nid); } get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells); @@ -804,17 +810,14 @@ static void __init setup_nonnuma(void) unsigned long total_ram = memblock_phys_mem_size(); unsigned long start_pfn, end_pfn; unsigned int nid = 0; - struct memblock_region *reg; + int i; printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", top_of_ram, total_ram); printk(KERN_DEBUG "Memory hole size: %ldMB\n", (top_of_ram - total_ram) >> 20); - for_each_memblock(memory, reg) { - start_pfn = memblock_region_memory_base_pfn(reg); - end_pfn = memblock_region_memory_end_pfn(reg); - + for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) { fake_numa_create_new_node(end_pfn, &nid); memblock_set_node(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn), @@ -891,7 +894,9 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn) static void __init find_possible_nodes(void) { struct device_node *rtas; - u32 numnodes, i; + const __be32 *domains; + int prop_length, max_nodes; + u32 i; if (!numa_enabled) return; @@ -900,16 +905,31 @@ static void __init find_possible_nodes(void) if (!rtas) return; - if (of_property_read_u32_index(rtas, - "ibm,max-associativity-domains", - min_common_depth, &numnodes)) - goto out; + /* + * ibm,current-associativity-domains is a fairly recent property. If + * it doesn't exist, then fallback on ibm,max-associativity-domains. + * Current denotes what the platform can support compared to max + * which denotes what the Hypervisor can support. + */ + domains = of_get_property(rtas, "ibm,current-associativity-domains", + &prop_length); + if (!domains) { + domains = of_get_property(rtas, "ibm,max-associativity-domains", + &prop_length); + if (!domains) + goto out; + } - for (i = 0; i < numnodes; i++) { + max_nodes = of_read_number(&domains[min_common_depth], 1); + for (i = 0; i < max_nodes; i++) { if (!node_possible(i)) node_set(i, node_possible_map); } + prop_length /= sizeof(int); + if (prop_length > min_common_depth + 2) + coregroup_enabled = 1; + out: of_node_put(rtas); } @@ -918,6 +938,16 @@ void __init mem_topology_setup(void) { int cpu; + /* + * Linux/mm assumes node 0 to be online at boot. However this is not + * true on PowerPC, where node 0 is similar to any other node, it + * could be cpuless, memoryless node. So force node 0 to be offline + * for now. This will prevent cpuless, memoryless node 0 showing up + * unnecessarily as online. If a node has cpus or memory that need + * to be online, then node will anyway be marked online. + */ + node_set_offline(0); + if (parse_numa_properties()) setup_nonnuma(); @@ -935,8 +965,17 @@ void __init mem_topology_setup(void) reset_numa_cpu_lookup_table(); - for_each_present_cpu(cpu) + for_each_possible_cpu(cpu) { + /* + * Powerpc with CONFIG_NUMA always used to have a node 0, + * even if it was memoryless or cpuless. For all cpus that + * are possible but not present, cpu_to_node() would point + * to node 0. To remove a cpuless, memoryless dummy node, + * powerpc need to make sure all possible but not present + * cpu_to_node are set to a proper node. + */ numa_setup_cpu(cpu); + } } void __init initmem_init(void) @@ -1203,6 +1242,31 @@ int find_and_online_cpu_nid(int cpu) return new_nid; } +int cpu_to_coregroup_id(int cpu) +{ + __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0}; + int index; + + if (cpu < 0 || cpu > nr_cpu_ids) + return -1; + + if (!coregroup_enabled) + goto out; + + if (!firmware_has_feature(FW_FEATURE_VPHN)) + goto out; + + if (vphn_get_associativity(cpu, associativity)) + goto out; + + index = of_read_number(associativity, 1); + if (index > min_common_depth + 1) + return of_read_number(&associativity[index - 1], 1); + +out: + return cpu_to_core_id(cpu); +} + static int topology_update_init(void) { topology_inited = 1; diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c index 9c0547d77af3..15555c95cebc 100644 --- a/arch/powerpc/mm/pgtable.c +++ b/arch/powerpc/mm/pgtable.c @@ -184,9 +184,6 @@ void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, */ VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep)); - /* Add the pte bit when trying to set a pte */ - pte = pte_mkpte(pte); - /* Note: mm->context.id might not yet have been assigned as * this context might not have been activated yet when this * is called. @@ -266,8 +263,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_ pmd_t *pmd = pmd_off(mm, addr); pte_basic_t val; pte_basic_t *entry = &ptep->pte; - int num = is_hugepd(*((hugepd_t *)pmd)) ? 1 : SZ_512K / SZ_4K; - int i; + int num, i; /* * Make sure hardware valid bit is not set. We don't do @@ -275,11 +271,12 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_ */ VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep)); - pte = pte_mkpte(pte); - pte = set_pte_filter(pte); val = pte_val(pte); + + num = number_of_cells_per_pte(pmd, val, 1); + for (i = 0; i < num; i++, entry++, val += SZ_4K) *entry = val; } diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 6eb4eab79385..079159e97bca 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -123,11 +123,11 @@ static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top) void __init mapin_ram(void) { - struct memblock_region *reg; + phys_addr_t base, end; + u64 i; - for_each_memblock(memory, reg) { - phys_addr_t base = reg->base; - phys_addr_t top = min(base + reg->size, total_lowmem); + for_each_mem_range(i, &base, &end) { + phys_addr_t top = min(end, total_lowmem); if (base >= top) continue; diff --git a/arch/powerpc/mm/ptdump/8xx.c b/arch/powerpc/mm/ptdump/8xx.c index 8a797dcbf475..86da2a669680 100644 --- a/arch/powerpc/mm/ptdump/8xx.c +++ b/arch/powerpc/mm/ptdump/8xx.c @@ -11,8 +11,13 @@ static const struct flag_info flag_array[] = { { +#ifdef CONFIG_PPC_16K_PAGES .mask = _PAGE_HUGE, .val = _PAGE_HUGE, +#else + .mask = _PAGE_SPS, + .val = _PAGE_SPS, +#endif .set = "huge", .clear = " ", }, { diff --git a/arch/powerpc/mm/ptdump/bats.c b/arch/powerpc/mm/ptdump/bats.c index e29b338d499f..c4c628b03cf8 100644 --- a/arch/powerpc/mm/ptdump/bats.c +++ b/arch/powerpc/mm/ptdump/bats.c @@ -12,62 +12,6 @@ #include "ptdump.h" -static char *pp_601(int k, int pp) -{ - if (pp == 0) - return k ? " " : "rwx"; - if (pp == 1) - return k ? "r x" : "rwx"; - if (pp == 2) - return "rwx"; - return "r x"; -} - -static void bat_show_601(struct seq_file *m, int idx, u32 lower, u32 upper) -{ - u32 blpi = upper & 0xfffe0000; - u32 k = (upper >> 2) & 3; - u32 pp = upper & 3; - phys_addr_t pbn = PHYS_BAT_ADDR(lower); - u32 bsm = lower & 0x3ff; - u32 size = (bsm + 1) << 17; - - seq_printf(m, "%d: ", idx); - if (!(lower & 0x40)) { - seq_puts(m, " -\n"); - return; - } - - seq_printf(m, "0x%08x-0x%08x ", blpi, blpi + size - 1); -#ifdef CONFIG_PHYS_64BIT - seq_printf(m, "0x%016llx ", pbn); -#else - seq_printf(m, "0x%08x ", pbn); -#endif - pt_dump_size(m, size); - - seq_printf(m, "Kernel %s User %s", pp_601(k & 2, pp), pp_601(k & 1, pp)); - - seq_puts(m, lower & _PAGE_WRITETHRU ? "w " : " "); - seq_puts(m, lower & _PAGE_NO_CACHE ? "i " : " "); - seq_puts(m, lower & _PAGE_COHERENT ? "m " : " "); - seq_puts(m, "\n"); -} - -#define BAT_SHOW_601(_m, _n, _l, _u) bat_show_601(_m, _n, mfspr(_l), mfspr(_u)) - -static int bats_show_601(struct seq_file *m, void *v) -{ - seq_puts(m, "---[ Block Address Translation ]---\n"); - - BAT_SHOW_601(m, 0, SPRN_IBAT0L, SPRN_IBAT0U); - BAT_SHOW_601(m, 1, SPRN_IBAT1L, SPRN_IBAT1U); - BAT_SHOW_601(m, 2, SPRN_IBAT2L, SPRN_IBAT2U); - BAT_SHOW_601(m, 3, SPRN_IBAT3L, SPRN_IBAT3U); - - return 0; -} - static void bat_show_603(struct seq_file *m, int idx, u32 lower, u32 upper, bool is_d) { u32 bepi = upper & 0xfffe0000; @@ -146,9 +90,6 @@ static int bats_show_603(struct seq_file *m, void *v) static int bats_open(struct inode *inode, struct file *file) { - if (IS_ENABLED(CONFIG_PPC_BOOK3S_601)) - return single_open(file, bats_show_601, NULL); - return single_open(file, bats_show_603, NULL); } diff --git a/arch/powerpc/oprofile/cell/spu_task_sync.c b/arch/powerpc/oprofile/cell/spu_task_sync.c index df59d0bb121f..489f993100d5 100644 --- a/arch/powerpc/oprofile/cell/spu_task_sync.c +++ b/arch/powerpc/oprofile/cell/spu_task_sync.c @@ -572,7 +572,7 @@ void spu_sync_buffer(int spu_num, unsigned int *samples, * samples are recorded. * No big deal -- so we just drop a few samples. */ - pr_debug("SPU_PROF: No cached SPU contex " + pr_debug("SPU_PROF: No cached SPU context " "for SPU #%d. Dropping samples.\n", spu_num); goto out; } diff --git a/arch/powerpc/perf/hv-gpci-requests.h b/arch/powerpc/perf/hv-gpci-requests.h index e608f9db12dd..8965b4463d43 100644 --- a/arch/powerpc/perf/hv-gpci-requests.h +++ b/arch/powerpc/perf/hv-gpci-requests.h @@ -95,7 +95,7 @@ REQUEST(__field(0, 8, partition_id) #define REQUEST_NAME system_performance_capabilities #define REQUEST_NUM 0x40 -#define REQUEST_IDX_KIND "starting_index=0xffffffffffffffff" +#define REQUEST_IDX_KIND "starting_index=0xffffffff" #include I(REQUEST_BEGIN) REQUEST(__field(0, 1, perf_collect_privileged) __field(0x1, 1, capability_mask) @@ -223,7 +223,7 @@ REQUEST(__field(0, 2, partition_id) #define REQUEST_NAME system_hypervisor_times #define REQUEST_NUM 0xF0 -#define REQUEST_IDX_KIND "starting_index=0xffffffffffffffff" +#define REQUEST_IDX_KIND "starting_index=0xffffffff" #include I(REQUEST_BEGIN) REQUEST(__count(0, 8, time_spent_to_dispatch_virtual_processors) __count(0x8, 8, time_spent_processing_virtual_processor_timers) @@ -234,7 +234,7 @@ REQUEST(__count(0, 8, time_spent_to_dispatch_virtual_processors) #define REQUEST_NAME system_tlbie_count_and_time #define REQUEST_NUM 0xF4 -#define REQUEST_IDX_KIND "starting_index=0xffffffffffffffff" +#define REQUEST_IDX_KIND "starting_index=0xffffffff" #include I(REQUEST_BEGIN) REQUEST(__count(0, 8, tlbie_instructions_issued) /* diff --git a/arch/powerpc/perf/hv-gpci.c b/arch/powerpc/perf/hv-gpci.c index 6884d16ec19b..d48413e28c39 100644 --- a/arch/powerpc/perf/hv-gpci.c +++ b/arch/powerpc/perf/hv-gpci.c @@ -48,6 +48,8 @@ EVENT_DEFINE_RANGE_FORMAT(length, config1, 24, 31); /* u32, byte offset */ EVENT_DEFINE_RANGE_FORMAT(offset, config1, 32, 63); +static cpumask_t hv_gpci_cpumask; + static struct attribute *format_attrs[] = { &format_attr_request.attr, &format_attr_starting_index.attr, @@ -94,7 +96,15 @@ static ssize_t kernel_version_show(struct device *dev, return sprintf(page, "0x%x\n", COUNTER_INFO_VERSION_CURRENT); } +static ssize_t cpumask_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return cpumap_print_to_pagebuf(true, buf, &hv_gpci_cpumask); +} + static DEVICE_ATTR_RO(kernel_version); +static DEVICE_ATTR_RO(cpumask); + HV_CAPS_ATTR(version, "0x%x\n"); HV_CAPS_ATTR(ga, "%d\n"); HV_CAPS_ATTR(expanded, "%d\n"); @@ -111,6 +121,15 @@ static struct attribute *interface_attrs[] = { NULL, }; +static struct attribute *cpumask_attrs[] = { + &dev_attr_cpumask.attr, + NULL, +}; + +static struct attribute_group cpumask_attr_group = { + .attrs = cpumask_attrs, +}; + static struct attribute_group interface_group = { .name = "interface", .attrs = interface_attrs, @@ -120,20 +139,12 @@ static const struct attribute_group *attr_groups[] = { &format_group, &event_group, &interface_group, + &cpumask_attr_group, NULL, }; -#define HGPCI_REQ_BUFFER_SIZE 4096 -#define HGPCI_MAX_DATA_BYTES \ - (HGPCI_REQ_BUFFER_SIZE - sizeof(struct hv_get_perf_counter_info_params)) - static DEFINE_PER_CPU(char, hv_gpci_reqb[HGPCI_REQ_BUFFER_SIZE]) __aligned(sizeof(uint64_t)); -struct hv_gpci_request_buffer { - struct hv_get_perf_counter_info_params params; - uint8_t bytes[HGPCI_MAX_DATA_BYTES]; -} __packed; - static unsigned long single_gpci_request(u32 req, u32 starting_index, u16 secondary_index, u8 version_in, u32 offset, u8 length, u64 *value) @@ -275,6 +286,45 @@ static struct pmu h_gpci_pmu = { .capabilities = PERF_PMU_CAP_NO_EXCLUDE, }; +static int ppc_hv_gpci_cpu_online(unsigned int cpu) +{ + if (cpumask_empty(&hv_gpci_cpumask)) + cpumask_set_cpu(cpu, &hv_gpci_cpumask); + + return 0; +} + +static int ppc_hv_gpci_cpu_offline(unsigned int cpu) +{ + int target; + + /* Check if exiting cpu is used for collecting gpci events */ + if (!cpumask_test_and_clear_cpu(cpu, &hv_gpci_cpumask)) + return 0; + + /* Find a new cpu to collect gpci events */ + target = cpumask_last(cpu_active_mask); + + if (target < 0 || target >= nr_cpu_ids) { + pr_err("hv_gpci: CPU hotplug init failed\n"); + return -1; + } + + /* Migrate gpci events to the new target */ + cpumask_set_cpu(target, &hv_gpci_cpumask); + perf_pmu_migrate_context(&h_gpci_pmu, cpu, target); + + return 0; +} + +static int hv_gpci_cpu_hotplug_init(void) +{ + return cpuhp_setup_state(CPUHP_AP_PERF_POWERPC_HV_GPCI_ONLINE, + "perf/powerpc/hv_gcpi:online", + ppc_hv_gpci_cpu_online, + ppc_hv_gpci_cpu_offline); +} + static int hv_gpci_init(void) { int r; @@ -295,6 +345,11 @@ static int hv_gpci_init(void) return -ENODEV; } + /* init cpuhotplug */ + r = hv_gpci_cpu_hotplug_init(); + if (r) + return r; + /* sampling not supported */ h_gpci_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; diff --git a/arch/powerpc/perf/hv-gpci.h b/arch/powerpc/perf/hv-gpci.h index a3053eda5dcc..4d108262bed7 100644 --- a/arch/powerpc/perf/hv-gpci.h +++ b/arch/powerpc/perf/hv-gpci.h @@ -2,33 +2,6 @@ #ifndef LINUX_POWERPC_PERF_HV_GPCI_H_ #define LINUX_POWERPC_PERF_HV_GPCI_H_ -#include <linux/types.h> - -/* From the document "H_GetPerformanceCounterInfo Interface" v1.07 */ - -/* H_GET_PERF_COUNTER_INFO argument */ -struct hv_get_perf_counter_info_params { - __be32 counter_request; /* I */ - __be32 starting_index; /* IO */ - __be16 secondary_index; /* IO */ - __be16 returned_values; /* O */ - __be32 detail_rc; /* O, only needed when called via *_norets() */ - - /* - * O, size each of counter_value element in bytes, only set for version - * >= 0x3 - */ - __be16 cv_element_size; - - /* I, 0 (zero) for versions < 0x3 */ - __u8 counter_info_version_in; - - /* O, 0 (zero) if version < 0x3. Must be set to 0 when making hcall */ - __u8 counter_info_version_out; - __u8 reserved[0xC]; - __u8 counter_value[]; -} __packed; - /* * counter info version => fw version/reference (spec version) * diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c index 62d0b54086f8..9ed4fcccf8a9 100644 --- a/arch/powerpc/perf/imc-pmu.c +++ b/arch/powerpc/perf/imc-pmu.c @@ -1426,8 +1426,6 @@ static void trace_imc_event_del(struct perf_event *event, int flags) static int trace_imc_event_init(struct perf_event *event) { - struct task_struct *target; - if (event->attr.type != event->pmu->type) return -ENOENT; @@ -1458,7 +1456,6 @@ static int trace_imc_event_init(struct perf_event *event) mutex_unlock(&imc_global_refc.lock); event->hw.idx = -1; - target = event->hw.target; event->pmu->task_ctx_nr = perf_hw_context; event->destroy = reset_global_refc; diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c index 964437adec18..2848904df638 100644 --- a/arch/powerpc/perf/isa207-common.c +++ b/arch/powerpc/perf/isa207-common.c @@ -288,6 +288,15 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp) mask |= CNST_PMC_MASK(pmc); value |= CNST_PMC_VAL(pmc); + + /* + * PMC5 and PMC6 are used to count cycles and instructions and + * they do not support most of the constraint bits. Add a check + * to exclude PMC5/6 from most of the constraints except for + * EBB/BHRB. + */ + if (pmc >= 5) + goto ebb_bhrb; } if (pmc <= 4) { @@ -357,6 +366,7 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp) } } +ebb_bhrb: if (!pmc && ebb) /* EBB events must specify the PMC */ return -1; diff --git a/arch/powerpc/perf/isa207-common.h b/arch/powerpc/perf/isa207-common.h index 044de65e96b9..7025de5e60e7 100644 --- a/arch/powerpc/perf/isa207-common.h +++ b/arch/powerpc/perf/isa207-common.h @@ -13,6 +13,8 @@ #include <asm/firmware.h> #include <asm/cputable.h> +#include "internal.h" + #define EVENT_EBB_MASK 1ull #define EVENT_EBB_SHIFT PERF_EVENT_CONFIG_EBB_SHIFT #define EVENT_BHRB_MASK 1ull diff --git a/arch/powerpc/perf/power10-pmu.c b/arch/powerpc/perf/power10-pmu.c index 83148656b524..9dbe8f9b89b4 100644 --- a/arch/powerpc/perf/power10-pmu.c +++ b/arch/powerpc/perf/power10-pmu.c @@ -9,7 +9,6 @@ #define pr_fmt(fmt) "power10-pmu: " fmt #include "isa207-common.h" -#include "internal.h" /* * Raw event encoding for Power10: diff --git a/arch/powerpc/perf/power5+-pmu.c b/arch/powerpc/perf/power5+-pmu.c index a62b2cd7914f..3e64b4a1511f 100644 --- a/arch/powerpc/perf/power5+-pmu.c +++ b/arch/powerpc/perf/power5+-pmu.c @@ -10,6 +10,8 @@ #include <asm/reg.h> #include <asm/cputable.h> +#include "internal.h" + /* * Bits in event code for POWER5+ (POWER5 GS) and POWER5++ (POWER5 GS DD3) */ diff --git a/arch/powerpc/perf/power5-pmu.c b/arch/powerpc/perf/power5-pmu.c index 8732b587cf71..017bb19b73fb 100644 --- a/arch/powerpc/perf/power5-pmu.c +++ b/arch/powerpc/perf/power5-pmu.c @@ -10,6 +10,8 @@ #include <asm/reg.h> #include <asm/cputable.h> +#include "internal.h" + /* * Bits in event code for POWER5 (not POWER5++) */ diff --git a/arch/powerpc/perf/power6-pmu.c b/arch/powerpc/perf/power6-pmu.c index 0e318cf87129..189974478e9f 100644 --- a/arch/powerpc/perf/power6-pmu.c +++ b/arch/powerpc/perf/power6-pmu.c @@ -10,6 +10,8 @@ #include <asm/reg.h> #include <asm/cputable.h> +#include "internal.h" + /* * Bits in event code for POWER6 */ diff --git a/arch/powerpc/perf/power7-pmu.c b/arch/powerpc/perf/power7-pmu.c index 5e0bf09cf077..bacfab104a1a 100644 --- a/arch/powerpc/perf/power7-pmu.c +++ b/arch/powerpc/perf/power7-pmu.c @@ -10,6 +10,8 @@ #include <asm/reg.h> #include <asm/cputable.h> +#include "internal.h" + /* * Bits in event code for POWER7 */ diff --git a/arch/powerpc/perf/ppc970-pmu.c b/arch/powerpc/perf/ppc970-pmu.c index d35223fb112c..7d78df97f272 100644 --- a/arch/powerpc/perf/ppc970-pmu.c +++ b/arch/powerpc/perf/ppc970-pmu.c @@ -9,6 +9,8 @@ #include <asm/reg.h> #include <asm/cputable.h> +#include "internal.h" + /* * Bits in event code for PPC970 */ diff --git a/arch/powerpc/platforms/44x/machine_check.c b/arch/powerpc/platforms/44x/machine_check.c index 90ad6ac529d2..a5c898bb9bab 100644 --- a/arch/powerpc/platforms/44x/machine_check.c +++ b/arch/powerpc/platforms/44x/machine_check.c @@ -7,6 +7,7 @@ #include <linux/ptrace.h> #include <asm/reg.h> +#include <asm/cacheflush.h> int machine_check_440A(struct pt_regs *regs) { diff --git a/arch/powerpc/platforms/44x/ppc476.c b/arch/powerpc/platforms/44x/ppc476.c index cba83eee685c..07f7e3ce67b5 100644 --- a/arch/powerpc/platforms/44x/ppc476.c +++ b/arch/powerpc/platforms/44x/ppc476.c @@ -86,8 +86,7 @@ static void __noreturn avr_reset_system(char *cmd) avr_halt_system(AVR_PWRCTL_RESET); } -static int avr_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int avr_probe(struct i2c_client *client) { avr_i2c_client = client; ppc_md.restart = avr_reset_system; @@ -104,7 +103,7 @@ static struct i2c_driver avr_driver = { .driver = { .name = "akebono-avr", }, - .probe = avr_probe, + .probe_new = avr_probe, .id_table = avr_id, }; diff --git a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c index 0967bdfb1691..409481016928 100644 --- a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c +++ b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c @@ -142,7 +142,7 @@ static int mcu_gpiochip_remove(struct mcu *mcu) return 0; } -static int mcu_probe(struct i2c_client *client, const struct i2c_device_id *id) +static int mcu_probe(struct i2c_client *client) { struct mcu *mcu; int ret; @@ -221,7 +221,7 @@ static struct i2c_driver mcu_driver = { .name = "mcu-mpc8349emitx", .of_match_table = mcu_of_match_table, }, - .probe = mcu_probe, + .probe_new = mcu_probe, .remove = mcu_remove, .id_table = mcu_ids, }; diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c index fda108bae95f..c6df294054fe 100644 --- a/arch/powerpc/platforms/85xx/smp.c +++ b/arch/powerpc/platforms/85xx/smp.c @@ -112,7 +112,7 @@ static void mpc85xx_take_timebase(void) local_irq_restore(flags); } -static void smp_85xx_mach_cpu_die(void) +static void smp_85xx_cpu_offline_self(void) { unsigned int cpu = smp_processor_id(); @@ -506,7 +506,7 @@ void __init mpc85xx_smp_init(void) if (qoriq_pm_ops) { smp_85xx_ops.give_timebase = mpc85xx_give_timebase; smp_85xx_ops.take_timebase = mpc85xx_take_timebase; - ppc_md.cpu_die = smp_85xx_mach_cpu_die; + smp_85xx_ops.cpu_offline_self = smp_85xx_cpu_offline_self; smp_85xx_ops.cpu_die = qoriq_cpu_kill; } #endif diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig index fb7515b4fa9c..7a5e8f4541e3 100644 --- a/arch/powerpc/platforms/Kconfig +++ b/arch/powerpc/platforms/Kconfig @@ -199,21 +199,6 @@ source "drivers/cpuidle/Kconfig" endmenu -config PPC601_SYNC_FIX - bool "Workarounds for PPC601 bugs" - depends on PPC_BOOK3S_601 && PPC_PMAC - default y - help - Some versions of the PPC601 (the first PowerPC chip) have bugs which - mean that extra synchronization instructions are required near - certain instructions, typically those that make major changes to the - CPU state. These extra instructions reduce performance slightly. - If you say N here, these extra instructions will not be included, - resulting in a kernel which will run faster but may not run at all - on some systems with the PPC601 chip. - - If in doubt, say Y here. - config TAU bool "On-chip CPU temperature sensor support" depends on PPC_BOOK3S_32 @@ -223,12 +208,11 @@ config TAU temperature within 2-4 degrees Celsius. This option shows the current on-die temperature in /proc/cpuinfo if the cpu supports it. - Unfortunately, on some chip revisions, this sensor is very inaccurate - and in many cases, does not work at all, so don't assume the cpu - temp is actually what /proc/cpuinfo says it is. + Unfortunately, this sensor is very inaccurate when uncalibrated, so + don't assume the cpu temp is actually what /proc/cpuinfo says it is. config TAU_INT - bool "Interrupt driven TAU driver (DANGEROUS)" + bool "Interrupt driven TAU driver (EXPERIMENTAL)" depends on TAU help The TAU supports an interrupt driven mode which causes an interrupt @@ -236,12 +220,7 @@ config TAU_INT to get notified the temp has exceeded a range. With this option off, a timer is used to re-check the temperature periodically. - However, on some cpus it appears that the TAU interrupt hardware - is buggy and can cause a situation which would lead unexplained hard - lockups. - - Unless you are extending the TAU driver, or enjoy kernel/hardware - debugging, leave this option off. + If in doubt, say N here. config TAU_AVERAGE bool "Average high and low temp" diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 1dc9d3c81872..c194c4ae8bc7 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -20,7 +20,7 @@ choice depends on PPC32 help There are five families of 32 bit PowerPC chips supported. - The most common ones are the desktop and server CPUs (601, 603, + The most common ones are the desktop and server CPUs (603, 604, 740, 750, 74xx) CPUs from Freescale and IBM, with their embedded 512x/52xx/82xx/83xx/86xx counterparts. The other embedded parts, namely 4xx, 8xx, e200 (55xx) and e500 @@ -30,7 +30,7 @@ choice If unsure, select 52xx/6xx/7xx/74xx/82xx/83xx/86xx. config PPC_BOOK3S_6xx - bool "512x/52xx/6xx/7xx/74xx/82xx/83xx/86xx except 601" + bool "512x/52xx/6xx/7xx/74xx/82xx/83xx/86xx" select PPC_BOOK3S_32 select PPC_FPU select PPC_HAVE_PMU_SUPPORT @@ -38,13 +38,6 @@ config PPC_BOOK3S_6xx select PPC_HAVE_KUAP select HAVE_ARCH_VMAP_STACK if !ADB_PMU -config PPC_BOOK3S_601 - bool "PowerPC 601" - select PPC_BOOK3S_32 - select PPC_FPU - select PPC_HAVE_KUAP - select HAVE_ARCH_VMAP_STACK - config PPC_85xx bool "Freescale 85xx" select E500 @@ -490,13 +483,12 @@ endmenu config VDSO32 def_bool y - depends on PPC32 || CPU_BIG_ENDIAN + depends on PPC32 || COMPAT help This symbol controls whether we build the 32-bit VDSO. We obviously want to do that if we're building a 32-bit kernel. If we're building - a 64-bit kernel then we only want a 32-bit VDSO if we're building for - big endian. That is because the only little endian configuration we - support is ppc64le which is 64-bit only. + a 64-bit kernel then we only want a 32-bit VDSO if we're also enabling + COMPAT. choice prompt "Endianness selection" diff --git a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c index 15437abe1f6d..b95c3380d2b5 100644 --- a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c +++ b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c @@ -147,7 +147,8 @@ static void __noreturn mpc7448_hpc2_restart(char *cmd) local_irq_disable(); /* Set exception prefix high - to the firmware */ - _nmask_and_or_msr(0, MSR_IP); + mtmsr(mfmsr() | MSR_IP); + isync(); for (;;) ; /* Spin until reset happens */ } diff --git a/arch/powerpc/platforms/embedded6xx/storcenter.c b/arch/powerpc/platforms/embedded6xx/storcenter.c index ed1914dd34bb..e346ddcef45e 100644 --- a/arch/powerpc/platforms/embedded6xx/storcenter.c +++ b/arch/powerpc/platforms/embedded6xx/storcenter.c @@ -101,7 +101,8 @@ static void __noreturn storcenter_restart(char *cmd) local_irq_disable(); /* Set exception prefix high - to the firmware */ - _nmask_and_or_msr(0, MSR_IP); + mtmsr(mfmsr() | MSR_IP); + isync(); /* Wait for reset to happen */ for (;;) ; diff --git a/arch/powerpc/platforms/powermac/pmac.h b/arch/powerpc/platforms/powermac/pmac.h index 16a52afdb76e..0d715db434dc 100644 --- a/arch/powerpc/platforms/powermac/pmac.h +++ b/arch/powerpc/platforms/powermac/pmac.h @@ -34,7 +34,7 @@ extern void pmac_check_ht_link(void); extern void pmac_setup_smp(void); extern int psurge_secondary_virq; -extern void low_cpu_die(void) __attribute__((noreturn)); +extern void low_cpu_offline_self(void) __attribute__((noreturn)); extern int pmac_nvram_init(void); extern void pmac_pic_init(void); diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c index f002b0fa69b8..2e2cc0c75d87 100644 --- a/arch/powerpc/platforms/powermac/setup.c +++ b/arch/powerpc/platforms/powermac/setup.c @@ -284,7 +284,7 @@ static void __init pmac_setup_arch(void) /* 604, G3, G4 etc. */ loops_per_jiffy = *fp / HZ; else - /* 601, 603, etc. */ + /* 603, etc. */ loops_per_jiffy = *fp / (2 * HZ); of_node_put(cpu); break; diff --git a/arch/powerpc/platforms/powermac/sleep.S b/arch/powerpc/platforms/powermac/sleep.S index f9a680fdd9c4..7e0f8ba6e54a 100644 --- a/arch/powerpc/platforms/powermac/sleep.S +++ b/arch/powerpc/platforms/powermac/sleep.S @@ -201,8 +201,8 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) addi r3,r3,sleep_storage@l stw r5,0(r3) - .globl low_cpu_die -low_cpu_die: + .globl low_cpu_offline_self +low_cpu_offline_self: /* Flush & disable all caches */ bl flush_disable_caches @@ -244,7 +244,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450) mtmsr r2 isync b 1b -_ASM_NOKPROBE_SYMBOL(low_cpu_die) +_ASM_NOKPROBE_SYMBOL(low_cpu_offline_self) /* * Here is the resume code. */ @@ -294,14 +294,7 @@ grackle_wake_up: * we do any r1 memory access as we are not sure they * are in a sane state above the first 256Mb region */ - li r0,16 /* load up segment register values */ - mtctr r0 /* for context 0 */ - lis r3,0x2000 /* Ku = 1, VSID = 0 */ - li r4,0 -3: mtsrin r3,r4 - addi r3,r3,0x111 /* increment VSID */ - addis r4,r4,0x1000 /* address of next segment */ - bdnz 3b + bl load_segment_registers sync isync diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index eb23264910e1..74ebe664b016 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c @@ -270,10 +270,6 @@ static void __init smp_psurge_probe(void) int i, ncpus; struct device_node *dn; - /* We don't do SMP on the PPC601 -- paulus */ - if (PVR_VER(mfspr(SPRN_PVR)) == 1) - return; - /* * The powersurge cpu board can be used in the generation * of powermacs that have a socket for an upgradeable cpu card, @@ -920,7 +916,7 @@ static int smp_core99_cpu_disable(void) #ifdef CONFIG_PPC32 -static void pmac_cpu_die(void) +static void pmac_cpu_offline_self(void) { int cpu = smp_processor_id(); @@ -930,12 +926,12 @@ static void pmac_cpu_die(void) generic_set_cpu_dead(cpu); smp_wmb(); mb(); - low_cpu_die(); + low_cpu_offline_self(); } #else /* CONFIG_PPC32 */ -static void pmac_cpu_die(void) +static void pmac_cpu_offline_self(void) { int cpu = smp_processor_id(); @@ -1020,7 +1016,7 @@ void __init pmac_setup_smp(void) #endif /* CONFIG_PPC_PMAC32_PSURGE */ #ifdef CONFIG_HOTPLUG_CPU - ppc_md.cpu_die = pmac_cpu_die; + smp_ops->cpu_offline_self = pmac_cpu_offline_self; #endif } diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c index 9af8c3b98853..89e22c460ebf 100644 --- a/arch/powerpc/platforms/powernv/eeh-powernv.c +++ b/arch/powerpc/platforms/powernv/eeh-powernv.c @@ -38,60 +38,12 @@ static int eeh_event_irq = -EINVAL; -void pnv_pcibios_bus_add_device(struct pci_dev *pdev) +static void pnv_pcibios_bus_add_device(struct pci_dev *pdev) { dev_dbg(&pdev->dev, "EEH: Setting up device\n"); eeh_probe_device(pdev); } -static int pnv_eeh_init(void) -{ - struct pci_controller *hose; - struct pnv_phb *phb; - int max_diag_size = PNV_PCI_DIAG_BUF_SIZE; - - if (!firmware_has_feature(FW_FEATURE_OPAL)) { - pr_warn("%s: OPAL is required !\n", - __func__); - return -EINVAL; - } - - /* Set probe mode */ - eeh_add_flag(EEH_PROBE_MODE_DEV); - - /* - * P7IOC blocks PCI config access to frozen PE, but PHB3 - * doesn't do that. So we have to selectively enable I/O - * prior to collecting error log. - */ - list_for_each_entry(hose, &hose_list, list_node) { - phb = hose->private_data; - - if (phb->model == PNV_PHB_MODEL_P7IOC) - eeh_add_flag(EEH_ENABLE_IO_FOR_LOG); - - if (phb->diag_data_size > max_diag_size) - max_diag_size = phb->diag_data_size; - - /* - * PE#0 should be regarded as valid by EEH core - * if it's not the reserved one. Currently, we - * have the reserved PE#255 and PE#127 for PHB3 - * and P7IOC separately. So we should regard - * PE#0 as valid for PHB3 and P7IOC. - */ - if (phb->ioda.reserved_pe_idx != 0) - eeh_add_flag(EEH_VALID_PE_ZERO); - - break; - } - - eeh_set_pe_aux_size(max_diag_size); - ppc_md.pcibios_bus_add_device = pnv_pcibios_bus_add_device; - - return 0; -} - static irqreturn_t pnv_eeh_event(int irq, void *data) { /* @@ -135,7 +87,7 @@ static ssize_t pnv_eeh_ei_write(struct file *filp, return -EINVAL; /* Retrieve PE */ - pe = eeh_pe_get(hose, pe_no, 0); + pe = eeh_pe_get(hose, pe_no); if (!pe) return -ENODEV; @@ -190,7 +142,7 @@ PNV_EEH_DBGFS_ENTRY(inbB, 0xE10); #endif /* CONFIG_DEBUG_FS */ -void pnv_eeh_enable_phbs(void) +static void pnv_eeh_enable_phbs(void) { struct pci_controller *hose; struct pnv_phb *phb; @@ -354,7 +306,7 @@ static struct eeh_pe *pnv_eeh_get_upstream_pe(struct pci_dev *pdev) if (parent) { struct pnv_ioda_pe *ioda_pe = pnv_ioda_get_pe(parent); - return eeh_pe_get(phb->hose, ioda_pe->pe_number, 0); + return eeh_pe_get(phb->hose, ioda_pe->pe_number); } return NULL; @@ -1406,7 +1358,7 @@ static int pnv_eeh_get_pe(struct pci_controller *hose, } /* Find the PE according to PE# */ - dev_pe = eeh_pe_get(hose, pe_no, 0); + dev_pe = eeh_pe_get(hose, pe_no); if (!dev_pe) return -EEXIST; @@ -1674,7 +1626,6 @@ static int pnv_eeh_restore_config(struct eeh_dev *edev) static struct eeh_ops pnv_eeh_ops = { .name = "powernv", - .init = pnv_eeh_init, .probe = pnv_eeh_probe, .set_option = pnv_eeh_set_option, .get_state = pnv_eeh_get_state, @@ -1715,9 +1666,44 @@ DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pnv_pci_fixup_vf_mps); */ static int __init eeh_powernv_init(void) { + int max_diag_size = PNV_PCI_DIAG_BUF_SIZE; + struct pci_controller *hose; + struct pnv_phb *phb; int ret = -EINVAL; - ret = eeh_ops_register(&pnv_eeh_ops); + if (!firmware_has_feature(FW_FEATURE_OPAL)) { + pr_warn("%s: OPAL is required !\n", __func__); + return -EINVAL; + } + + /* Set probe mode */ + eeh_add_flag(EEH_PROBE_MODE_DEV); + + /* + * P7IOC blocks PCI config access to frozen PE, but PHB3 + * doesn't do that. So we have to selectively enable I/O + * prior to collecting error log. + */ + list_for_each_entry(hose, &hose_list, list_node) { + phb = hose->private_data; + + if (phb->model == PNV_PHB_MODEL_P7IOC) + eeh_add_flag(EEH_ENABLE_IO_FOR_LOG); + + if (phb->diag_data_size > max_diag_size) + max_diag_size = phb->diag_data_size; + + break; + } + + /* + * eeh_init() allocates the eeh_pe and its aux data buf so the + * size needs to be set before calling eeh_init(). + */ + eeh_set_pe_aux_size(max_diag_size); + ppc_md.pcibios_bus_add_device = pnv_pcibios_bus_add_device; + + ret = eeh_init(&pnv_eeh_ops); if (!ret) pr_info("EEH: PowerNV platform initialized\n"); else @@ -1725,4 +1711,4 @@ static int __init eeh_powernv_init(void) return ret; } -machine_early_initcall(powernv, eeh_powernv_init); +machine_arch_initcall(powernv, eeh_powernv_init); diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c index 345ab062b21a..1ed7c5286487 100644 --- a/arch/powerpc/platforms/powernv/idle.c +++ b/arch/powerpc/platforms/powernv/idle.c @@ -565,7 +565,7 @@ void power7_idle_type(unsigned long type) irq_set_pending_from_srr1(srr1); } -void power7_idle(void) +static void power7_idle(void) { if (!powersave_nap) return; @@ -659,20 +659,6 @@ static unsigned long power9_idle_stop(unsigned long psscr, bool mmu_on) mmcr0 = mfspr(SPRN_MMCR0); } - if (cpu_has_feature(CPU_FTR_ARCH_31)) { - /* - * POWER10 uses MMCRA (BHRBRD) as BHRB disable bit. - * If the user hasn't asked for the BHRB to be - * written, the value of MMCRA[BHRBRD] is 1. - * On wakeup from stop, MMCRA[BHRBD] will be 0, - * since it is previleged resource and will be lost. - * Thus, if we do not save and restore the MMCRA[BHRBD], - * hardware will be needlessly writing to the BHRB - * in problem mode. - */ - mmcra = mfspr(SPRN_MMCRA); - } - if ((psscr & PSSCR_RL_MASK) >= deep_spr_loss_state) { sprs.lpcr = mfspr(SPRN_LPCR); sprs.hfscr = mfspr(SPRN_HFSCR); @@ -735,10 +721,6 @@ static unsigned long power9_idle_stop(unsigned long psscr, bool mmu_on) mtspr(SPRN_MMCR0, mmcr0); } - /* Reload MMCRA to restore BHRB disable bit for POWER10 */ - if (cpu_has_feature(CPU_FTR_ARCH_31)) - mtspr(SPRN_MMCRA, mmcra); - /* * DD2.2 and earlier need to set then clear bit 60 in MMCRA * to ensure the PMU starts running. @@ -823,73 +805,6 @@ out: return srr1; } -#ifdef CONFIG_HOTPLUG_CPU -static unsigned long power9_offline_stop(unsigned long psscr) -{ - unsigned long srr1; - -#ifndef CONFIG_KVM_BOOK3S_HV_POSSIBLE - __ppc64_runlatch_off(); - srr1 = power9_idle_stop(psscr, true); - __ppc64_runlatch_on(); -#else - /* - * Tell KVM we're entering idle. - * This does not have to be done in real mode because the P9 MMU - * is independent per-thread. Some steppings share radix/hash mode - * between threads, but in that case KVM has a barrier sync in real - * mode before and after switching between radix and hash. - * - * kvm_start_guest must still be called in real mode though, hence - * the false argument. - */ - local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_IDLE; - - __ppc64_runlatch_off(); - srr1 = power9_idle_stop(psscr, false); - __ppc64_runlatch_on(); - - local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_KERNEL; - /* Order setting hwthread_state vs. testing hwthread_req */ - smp_mb(); - if (local_paca->kvm_hstate.hwthread_req) - srr1 = idle_kvm_start_guest(srr1); - mtmsr(MSR_KERNEL); -#endif - - return srr1; -} -#endif - -void power9_idle_type(unsigned long stop_psscr_val, - unsigned long stop_psscr_mask) -{ - unsigned long psscr; - unsigned long srr1; - - if (!prep_irq_for_idle_irqsoff()) - return; - - psscr = mfspr(SPRN_PSSCR); - psscr = (psscr & ~stop_psscr_mask) | stop_psscr_val; - - __ppc64_runlatch_off(); - srr1 = power9_idle_stop(psscr, true); - __ppc64_runlatch_on(); - - fini_irq_for_idle_irqsoff(); - - irq_set_pending_from_srr1(srr1); -} - -/* - * Used for ppc_md.power_save which needs a function with no parameters - */ -void power9_idle(void) -{ - power9_idle_type(pnv_default_stop_val, pnv_default_stop_mask); -} - #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE /* * This is used in working around bugs in thread reconfiguration @@ -962,6 +877,198 @@ void pnv_power9_force_smt4_release(void) EXPORT_SYMBOL_GPL(pnv_power9_force_smt4_release); #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ +struct p10_sprs { + /* + * SPRs that get lost in shallow states: + * + * P10 loses CR, LR, CTR, FPSCR, VSCR, XER, TAR, SPRG2, and HSPRG1 + * isa300 idle routines restore CR, LR. + * CTR is volatile + * idle thread doesn't use FP or VEC + * kernel doesn't use TAR + * HSPRG1 is only live in HV interrupt entry + * SPRG2 is only live in KVM guests, KVM handles it. + */ +}; + +static unsigned long power10_idle_stop(unsigned long psscr, bool mmu_on) +{ + int cpu = raw_smp_processor_id(); + int first = cpu_first_thread_sibling(cpu); + unsigned long *state = &paca_ptrs[first]->idle_state; + unsigned long core_thread_mask = (1UL << threads_per_core) - 1; + unsigned long srr1; + unsigned long pls; +// struct p10_sprs sprs = {}; /* avoid false used-uninitialised */ + bool sprs_saved = false; + + if (!(psscr & (PSSCR_EC|PSSCR_ESL))) { + /* EC=ESL=0 case */ + + BUG_ON(!mmu_on); + + /* + * Wake synchronously. SRESET via xscom may still cause + * a 0x100 powersave wakeup with SRR1 reason! + */ + srr1 = isa300_idle_stop_noloss(psscr); /* go idle */ + if (likely(!srr1)) + return 0; + + /* + * Registers not saved, can't recover! + * This would be a hardware bug + */ + BUG_ON((srr1 & SRR1_WAKESTATE) != SRR1_WS_NOLOSS); + + goto out; + } + + /* EC=ESL=1 case */ + if ((psscr & PSSCR_RL_MASK) >= deep_spr_loss_state) { + /* XXX: save SPRs for deep state loss here. */ + + sprs_saved = true; + + atomic_start_thread_idle(); + } + + srr1 = isa300_idle_stop_mayloss(psscr); /* go idle */ + + psscr = mfspr(SPRN_PSSCR); + + WARN_ON_ONCE(!srr1); + WARN_ON_ONCE(mfmsr() & (MSR_IR|MSR_DR)); + + if (unlikely((srr1 & SRR1_WAKEMASK_P8) == SRR1_WAKEHMI)) + hmi_exception_realmode(NULL); + + /* + * On POWER10, SRR1 bits do not match exactly as expected. + * SRR1_WS_GPRLOSS (10b) can also result in SPR loss, so + * just always test PSSCR for SPR/TB state loss. + */ + pls = (psscr & PSSCR_PLS) >> PSSCR_PLS_SHIFT; + if (likely(pls < deep_spr_loss_state)) { + if (sprs_saved) + atomic_stop_thread_idle(); + goto out; + } + + /* HV state loss */ + BUG_ON(!sprs_saved); + + atomic_lock_thread_idle(); + + if ((*state & core_thread_mask) != 0) + goto core_woken; + + /* XXX: restore per-core SPRs here */ + + if (pls >= pnv_first_tb_loss_level) { + /* TB loss */ + if (opal_resync_timebase() != OPAL_SUCCESS) + BUG(); + } + + /* + * isync after restoring shared SPRs and before unlocking. Unlock + * only contains hwsync which does not necessarily do the right + * thing for SPRs. + */ + isync(); + +core_woken: + atomic_unlock_and_stop_thread_idle(); + + /* XXX: restore per-thread SPRs here */ + + if (!radix_enabled()) + __slb_restore_bolted_realmode(); + +out: + if (mmu_on) + mtmsr(MSR_KERNEL); + + return srr1; +} + +#ifdef CONFIG_HOTPLUG_CPU +static unsigned long arch300_offline_stop(unsigned long psscr) +{ + unsigned long srr1; + +#ifndef CONFIG_KVM_BOOK3S_HV_POSSIBLE + __ppc64_runlatch_off(); + if (cpu_has_feature(CPU_FTR_ARCH_31)) + srr1 = power10_idle_stop(psscr, true); + else + srr1 = power9_idle_stop(psscr, true); + __ppc64_runlatch_on(); +#else + /* + * Tell KVM we're entering idle. + * This does not have to be done in real mode because the P9 MMU + * is independent per-thread. Some steppings share radix/hash mode + * between threads, but in that case KVM has a barrier sync in real + * mode before and after switching between radix and hash. + * + * kvm_start_guest must still be called in real mode though, hence + * the false argument. + */ + local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_IDLE; + + __ppc64_runlatch_off(); + if (cpu_has_feature(CPU_FTR_ARCH_31)) + srr1 = power10_idle_stop(psscr, false); + else + srr1 = power9_idle_stop(psscr, false); + __ppc64_runlatch_on(); + + local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_KERNEL; + /* Order setting hwthread_state vs. testing hwthread_req */ + smp_mb(); + if (local_paca->kvm_hstate.hwthread_req) + srr1 = idle_kvm_start_guest(srr1); + mtmsr(MSR_KERNEL); +#endif + + return srr1; +} +#endif + +void arch300_idle_type(unsigned long stop_psscr_val, + unsigned long stop_psscr_mask) +{ + unsigned long psscr; + unsigned long srr1; + + if (!prep_irq_for_idle_irqsoff()) + return; + + psscr = mfspr(SPRN_PSSCR); + psscr = (psscr & ~stop_psscr_mask) | stop_psscr_val; + + __ppc64_runlatch_off(); + if (cpu_has_feature(CPU_FTR_ARCH_31)) + srr1 = power10_idle_stop(psscr, true); + else + srr1 = power9_idle_stop(psscr, true); + __ppc64_runlatch_on(); + + fini_irq_for_idle_irqsoff(); + + irq_set_pending_from_srr1(srr1); +} + +/* + * Used for ppc_md.power_save which needs a function with no parameters + */ +static void arch300_idle(void) +{ + arch300_idle_type(pnv_default_stop_val, pnv_default_stop_mask); +} + #ifdef CONFIG_HOTPLUG_CPU void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val) @@ -995,7 +1102,7 @@ unsigned long pnv_cpu_offline(unsigned int cpu) psscr = mfspr(SPRN_PSSCR); psscr = (psscr & ~pnv_deepest_stop_psscr_mask) | pnv_deepest_stop_psscr_val; - srr1 = power9_offline_stop(psscr); + srr1 = arch300_offline_stop(psscr); } else if (cpu_has_feature(CPU_FTR_ARCH_206) && power7_offline_type) { srr1 = power7_offline(); } else { @@ -1093,11 +1200,15 @@ int validate_psscr_val_mask(u64 *psscr_val, u64 *psscr_mask, u32 flags) * @dt_idle_states: Number of idle state entries * Returns 0 on success */ -static void __init pnv_power9_idle_init(void) +static void __init pnv_arch300_idle_init(void) { u64 max_residency_ns = 0; int i; + /* stop is not really architected, we only have p9,p10 drivers */ + if (!pvr_version_is(PVR_POWER10) && !pvr_version_is(PVR_POWER9)) + return; + /* * pnv_deepest_stop_{val,mask} should be set to values corresponding to * the deepest stop state. @@ -1112,6 +1223,11 @@ static void __init pnv_power9_idle_init(void) struct pnv_idle_states_t *state = &pnv_idle_states[i]; u64 psscr_rl = state->psscr_val & PSSCR_RL_MASK; + /* No deep loss driver implemented for POWER10 yet */ + if (pvr_version_is(PVR_POWER10) && + state->flags & (OPAL_PM_TIMEBASE_STOP|OPAL_PM_LOSE_FULL_CONTEXT)) + continue; + if ((state->flags & OPAL_PM_TIMEBASE_STOP) && (pnv_first_tb_loss_level > psscr_rl)) pnv_first_tb_loss_level = psscr_rl; @@ -1162,7 +1278,7 @@ static void __init pnv_power9_idle_init(void) if (unlikely(!default_stop_found)) { pr_warn("cpuidle-powernv: No suitable default stop state found. Disabling platform idle.\n"); } else { - ppc_md.power_save = power9_idle; + ppc_md.power_save = arch300_idle; pr_info("cpuidle-powernv: Default stop: psscr = 0x%016llx,mask=0x%016llx\n", pnv_default_stop_val, pnv_default_stop_mask); } @@ -1224,7 +1340,7 @@ static void __init pnv_probe_idle_states(void) } if (cpu_has_feature(CPU_FTR_ARCH_300)) - pnv_power9_idle_init(); + pnv_arch300_idle_init(); for (i = 0; i < nr_pnv_idle_states; i++) supported_cpuidle_states |= pnv_idle_states[i].flags; @@ -1295,7 +1411,7 @@ static int pnv_parse_cpuidle_dt(void) for (i = 0; i < nr_idle_states; i++) pnv_idle_states[i].residency_ns = temp_u32[i]; - /* For power9 */ + /* For power9 and later */ if (cpu_has_feature(CPU_FTR_ARCH_300)) { /* Read pm_crtl_val */ if (of_property_read_u64_array(np, "ibm,cpu-idle-state-psscr", @@ -1358,8 +1474,8 @@ static int __init pnv_init_idle_states(void) if (!cpu_has_feature(CPU_FTR_ARCH_300)) { /* P7/P8 nap */ p->thread_idle_state = PNV_THREAD_RUNNING; - } else { - /* P9 stop */ + } else if (pvr_version_is(PVR_POWER9)) { + /* P9 stop workarounds */ #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE p->requested_psscr = 0; atomic_set(&p->dont_stop, 0); diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c index 13b369d2cc45..6828108486f8 100644 --- a/arch/powerpc/platforms/powernv/memtrace.c +++ b/arch/powerpc/platforms/powernv/memtrace.c @@ -224,7 +224,7 @@ static int memtrace_online(void) ent->mem = 0; } - if (add_memory(ent->nid, ent->start, ent->size)) { + if (add_memory(ent->nid, ent->start, ent->size, MHP_NONE)) { pr_err("Failed to add trace memory to node %d\n", ent->nid); ret += 1; diff --git a/arch/powerpc/platforms/powernv/ocxl.c b/arch/powerpc/platforms/powernv/ocxl.c index 8c65aacda9c8..ecdad219d704 100644 --- a/arch/powerpc/platforms/powernv/ocxl.c +++ b/arch/powerpc/platforms/powernv/ocxl.c @@ -2,7 +2,6 @@ // Copyright 2017 IBM Corp. #include <asm/pnv-ocxl.h> #include <asm/opal.h> -#include <asm/xive.h> #include <misc/ocxl-config.h> #include "pci.h" @@ -484,32 +483,3 @@ int pnv_ocxl_spa_remove_pe_from_cache(void *platform_data, int pe_handle) return rc; } EXPORT_SYMBOL_GPL(pnv_ocxl_spa_remove_pe_from_cache); - -int pnv_ocxl_alloc_xive_irq(u32 *irq, u64 *trigger_addr) -{ - __be64 flags, trigger_page; - s64 rc; - u32 hwirq; - - hwirq = xive_native_alloc_irq(); - if (!hwirq) - return -ENOENT; - - rc = opal_xive_get_irq_info(hwirq, &flags, NULL, &trigger_page, NULL, - NULL); - if (rc || !trigger_page) { - xive_native_free_irq(hwirq); - return -ENOENT; - } - *irq = hwirq; - *trigger_addr = be64_to_cpu(trigger_page); - return 0; - -} -EXPORT_SYMBOL_GPL(pnv_ocxl_alloc_xive_irq); - -void pnv_ocxl_free_xive_irq(u32 irq) -{ - xive_native_free_irq(irq); -} -EXPORT_SYMBOL_GPL(pnv_ocxl_free_xive_irq); diff --git a/arch/powerpc/platforms/powernv/opal-core.c b/arch/powerpc/platforms/powernv/opal-core.c index 6dba3b62269f..23571f0b555a 100644 --- a/arch/powerpc/platforms/powernv/opal-core.c +++ b/arch/powerpc/platforms/powernv/opal-core.c @@ -510,7 +510,7 @@ static void __init opalcore_config_init(void) idx = be32_to_cpu(opalc_metadata->region_cnt); if (idx > MAX_PT_LOAD_CNT) { pr_warn("WARNING: OPAL regions count (%d) adjusted to limit (%d)", - MAX_PT_LOAD_CNT, idx); + idx, MAX_PT_LOAD_CNT); idx = MAX_PT_LOAD_CNT; } for (i = 0; i < idx; i++) { diff --git a/arch/powerpc/platforms/powernv/opal-elog.c b/arch/powerpc/platforms/powernv/opal-elog.c index 62ef7ad995da..5e33b1fc67c2 100644 --- a/arch/powerpc/platforms/powernv/opal-elog.c +++ b/arch/powerpc/platforms/powernv/opal-elog.c @@ -179,14 +179,14 @@ static ssize_t raw_attr_read(struct file *filep, struct kobject *kobj, return count; } -static struct elog_obj *create_elog_obj(uint64_t id, size_t size, uint64_t type) +static void create_elog_obj(uint64_t id, size_t size, uint64_t type) { struct elog_obj *elog; int rc; elog = kzalloc(sizeof(*elog), GFP_KERNEL); if (!elog) - return NULL; + return; elog->kobj.kset = elog_kset; @@ -219,18 +219,37 @@ static struct elog_obj *create_elog_obj(uint64_t id, size_t size, uint64_t type) rc = kobject_add(&elog->kobj, NULL, "0x%llx", id); if (rc) { kobject_put(&elog->kobj); - return NULL; + return; } + /* + * As soon as the sysfs file for this elog is created/activated there is + * a chance the opal_errd daemon (or any userspace) might read and + * acknowledge the elog before kobject_uevent() is called. If that + * happens then there is a potential race between + * elog_ack_store->kobject_put() and kobject_uevent() which leads to a + * use-after-free of a kernfs object resulting in a kernel crash. + * + * To avoid that, we need to take a reference on behalf of the bin file, + * so that our reference remains valid while we call kobject_uevent(). + * We then drop our reference before exiting the function, leaving the + * bin file to drop the last reference (if it hasn't already). + */ + + /* Take a reference for the bin file */ + kobject_get(&elog->kobj); rc = sysfs_create_bin_file(&elog->kobj, &elog->raw_attr); - if (rc) { + if (rc == 0) { + kobject_uevent(&elog->kobj, KOBJ_ADD); + } else { + /* Drop the reference taken for the bin file */ kobject_put(&elog->kobj); - return NULL; } - kobject_uevent(&elog->kobj, KOBJ_ADD); + /* Drop our reference */ + kobject_put(&elog->kobj); - return elog; + return; } static irqreturn_t elog_event(int irq, void *data) diff --git a/arch/powerpc/platforms/powernv/opal-msglog.c b/arch/powerpc/platforms/powernv/opal-msglog.c index d26da19a611f..d3b6e135c18b 100644 --- a/arch/powerpc/platforms/powernv/opal-msglog.c +++ b/arch/powerpc/platforms/powernv/opal-msglog.c @@ -12,6 +12,8 @@ #include <linux/types.h> #include <asm/barrier.h> +#include "powernv.h" + /* OPAL in-memory console. Defined in OPAL source at core/console.c */ struct memcons { __be64 magic; diff --git a/arch/powerpc/platforms/powernv/opal-prd.c b/arch/powerpc/platforms/powernv/opal-prd.c index 45f4223a790f..deddaebf8c14 100644 --- a/arch/powerpc/platforms/powernv/opal-prd.c +++ b/arch/powerpc/platforms/powernv/opal-prd.c @@ -24,7 +24,7 @@ #include <linux/uaccess.h> -/** +/* * The msg member must be at the end of the struct, as it's followed by the * message data. */ diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 023a4f987bb2..2b4ceb5e6ce4 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -894,7 +894,6 @@ int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe) int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe) { - struct pci_dev *parent; uint8_t bcomp, dcomp, fcomp; long rc, rid_end, rid; @@ -904,7 +903,6 @@ int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe) dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER; fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER; - parent = pe->pbus->self; if (pe->flags & PNV_IODA_PE_BUS_ALL) count = resource_size(&pe->pbus->busn_res); else @@ -925,12 +923,6 @@ int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe) } rid_end = pe->rid + (count << 8); } else { -#ifdef CONFIG_PCI_IOV - if (pe->flags & PNV_IODA_PE_VF) - parent = pe->parent_dev; - else -#endif /* CONFIG_PCI_IOV */ - parent = pe->pdev->bus->self; bcomp = OpalPciBusAll; dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER; fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER; diff --git a/arch/powerpc/platforms/powernv/powernv.h b/arch/powerpc/platforms/powernv/powernv.h index 1aa51c4fa904..11df4e16a1cc 100644 --- a/arch/powerpc/platforms/powernv/powernv.h +++ b/arch/powerpc/platforms/powernv/powernv.h @@ -2,6 +2,13 @@ #ifndef _POWERNV_H #define _POWERNV_H +/* + * There's various hacks scattered throughout the generic powerpc arch code + * that needs to call into powernv platform stuff. The prototypes for those + * functions are in asm/powernv.h + */ +#include <asm/powernv.h> + #ifdef CONFIG_SMP extern void pnv_smp_init(void); #else diff --git a/arch/powerpc/platforms/powernv/rng.c b/arch/powerpc/platforms/powernv/rng.c index 8035caf6e297..72c25295c1c2 100644 --- a/arch/powerpc/platforms/powernv/rng.c +++ b/arch/powerpc/platforms/powernv/rng.c @@ -65,7 +65,7 @@ int powernv_get_random_real_mode(unsigned long *v) return 1; } -int powernv_get_random_darn(unsigned long *v) +static int powernv_get_random_darn(unsigned long *v) { unsigned long val; diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index 7fcb88623081..9acaa0f131b9 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c @@ -130,6 +130,28 @@ static void pnv_setup_rfi_flush(void) setup_count_cache_flush(); } +static void __init pnv_check_guarded_cores(void) +{ + struct device_node *dn; + int bad_count = 0; + + for_each_node_by_type(dn, "cpu") { + if (of_property_match_string(dn, "status", "bad") >= 0) + bad_count++; + }; + + if (bad_count) { + printk(" _ _______________\n"); + pr_cont(" | | / \\\n"); + pr_cont(" | | | WARNING! |\n"); + pr_cont(" | | | |\n"); + pr_cont(" | | | It looks like |\n"); + pr_cont(" |_| | you have %*d |\n", 3, bad_count); + pr_cont(" _ | guarded cores |\n"); + pr_cont(" (_) \\_______________/\n"); + } +} + static void __init pnv_setup_arch(void) { set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT); @@ -150,6 +172,8 @@ static void __init pnv_setup_arch(void) /* Enable NAP mode */ powersave_nap = 1; + pnv_check_guarded_cores(); + /* XXX PMCS */ } diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c index b2ba3e95bda7..54c4ba45c7ce 100644 --- a/arch/powerpc/platforms/powernv/smp.c +++ b/arch/powerpc/platforms/powernv/smp.c @@ -43,7 +43,7 @@ #include <asm/udbg.h> #define DBG(fmt...) udbg_printf(fmt) #else -#define DBG(fmt...) +#define DBG(fmt...) do { } while (0) #endif static void pnv_smp_setup_cpu(int cpu) @@ -158,7 +158,7 @@ static void pnv_flush_interrupts(void) } } -static void pnv_smp_cpu_kill_self(void) +static void pnv_cpu_offline_self(void) { unsigned long srr1, unexpected_mask, wmask; unsigned int cpu; @@ -417,6 +417,7 @@ static struct smp_ops_t pnv_smp_ops = { #ifdef CONFIG_HOTPLUG_CPU .cpu_disable = pnv_smp_cpu_disable, .cpu_die = generic_cpu_die, + .cpu_offline_self = pnv_cpu_offline_self, #endif /* CONFIG_HOTPLUG_CPU */ }; @@ -430,7 +431,6 @@ void __init pnv_smp_init(void) smp_ops = &pnv_smp_ops; #ifdef CONFIG_HOTPLUG_CPU - ppc_md.cpu_die = pnv_smp_cpu_kill_self; #ifdef CONFIG_KEXEC_CORE crash_wake_offline = 1; #endif diff --git a/arch/powerpc/platforms/powernv/vas-window.c b/arch/powerpc/platforms/powernv/vas-window.c index 6434f9cb5aed..5f5fe63a3d1c 100644 --- a/arch/powerpc/platforms/powernv/vas-window.c +++ b/arch/powerpc/platforms/powernv/vas-window.c @@ -186,7 +186,7 @@ static void unmap_winctx_mmio_bars(struct vas_window *window) * OS/User Window Context (UWC) MMIO Base Address Region for the given window. * Map these bus addresses and save the mapped kernel addresses in @window. */ -int map_winctx_mmio_bars(struct vas_window *window) +static int map_winctx_mmio_bars(struct vas_window *window) { int len; u64 start; @@ -214,7 +214,7 @@ int map_winctx_mmio_bars(struct vas_window *window) * registers are not sequential. And, we can only write to offsets * with valid registers. */ -void reset_window_regs(struct vas_window *window) +static void reset_window_regs(struct vas_window *window) { write_hvwc_reg(window, VREG(LPID), 0ULL); write_hvwc_reg(window, VREG(PID), 0ULL); @@ -357,7 +357,8 @@ static void init_rsvd_tx_buf_count(struct vas_window *txwin, * as a one-time task? That could work for NX but what about other * receivers? Let the receivers tell us the rx-fifo buffers for now. */ -int init_winctx_regs(struct vas_window *window, struct vas_winctx *winctx) +static void init_winctx_regs(struct vas_window *window, + struct vas_winctx *winctx) { u64 val; int fifo_size; @@ -499,8 +500,6 @@ int init_winctx_regs(struct vas_window *window, struct vas_winctx *winctx) val = SET_FIELD(VAS_WINCTL_NX_WIN, val, winctx->nx_win); val = SET_FIELD(VAS_WINCTL_OPEN, val, 1); write_hvwc_reg(window, VREG(WINCTL), val); - - return 0; } static void vas_release_window_id(struct ida *ida, int winid) diff --git a/arch/powerpc/platforms/ps3/spu.c b/arch/powerpc/platforms/ps3/spu.c index 1193c294b8d0..0c252478e556 100644 --- a/arch/powerpc/platforms/ps3/spu.c +++ b/arch/powerpc/platforms/ps3/spu.c @@ -448,7 +448,7 @@ static void ps3_disable_spu(struct spu_context *ctx) ctx->ops->runcntl_stop(ctx); } -const struct spu_management_ops spu_management_ps3_ops = { +static const struct spu_management_ops spu_management_ps3_ops = { .enumerate_spus = ps3_enumerate_spus, .create_spu = ps3_create_spu, .destroy_spu = ps3_destroy_spu, @@ -589,7 +589,7 @@ static u64 resource_allocation_enable_get(struct spu *spu) return 0; /* No support. */ } -const struct spu_priv1_ops spu_priv1_ps3_ops = { +static const struct spu_priv1_ops spu_priv1_ps3_ops = { .int_mask_and = int_mask_and, .int_mask_or = int_mask_or, .int_mask_set = int_mask_set, diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c index 3542b7bd6a46..c62aaa29a9d5 100644 --- a/arch/powerpc/platforms/ps3/system-bus.c +++ b/arch/powerpc/platforms/ps3/system-bus.c @@ -9,7 +9,7 @@ #include <linux/kernel.h> #include <linux/init.h> #include <linux/export.h> -#include <linux/dma-mapping.h> +#include <linux/dma-map-ops.h> #include <linux/err.h> #include <linux/slab.h> @@ -696,6 +696,8 @@ static const struct dma_map_ops ps3_sb_dma_ops = { .unmap_page = ps3_unmap_page, .mmap = dma_common_mmap, .get_sgtable = dma_common_get_sgtable, + .alloc_pages = dma_common_alloc_pages, + .free_pages = dma_common_free_pages, }; static const struct dma_map_ops ps3_ioc0_dma_ops = { @@ -708,6 +710,8 @@ static const struct dma_map_ops ps3_ioc0_dma_ops = { .unmap_page = ps3_unmap_page, .mmap = dma_common_mmap, .get_sgtable = dma_common_get_sgtable, + .alloc_pages = dma_common_alloc_pages, + .free_pages = dma_common_free_pages, }; /** diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c index cb2d9a970b7b..cf024fa37bda 100644 --- a/arch/powerpc/platforms/pseries/eeh_pseries.c +++ b/arch/powerpc/platforms/pseries/eeh_pseries.c @@ -33,8 +33,6 @@ #include <asm/ppc-pci.h> #include <asm/rtas.h> -static int pseries_eeh_get_pe_addr(struct pci_dn *pdn); - /* RTAS tokens */ static int ibm_set_eeh_option; static int ibm_set_slot_reset; @@ -86,42 +84,43 @@ void pseries_pcibios_bus_add_device(struct pci_dev *pdev) /** - * pseries_eeh_get_config_addr - Retrieve config address + * pseries_eeh_get_pe_config_addr - Find the pe_config_addr for a device + * @pdn: pci_dn of the input device + * + * The EEH RTAS calls use a tuple consisting of: (buid_hi, buid_lo, + * pe_config_addr) as a handle to a given PE. This function finds the + * pe_config_addr based on the device's config addr. * - * Retrieve the assocated config address. Actually, there're 2 RTAS - * function calls dedicated for the purpose. We need implement - * it through the new function and then the old one. Besides, - * you should make sure the config address is figured out from - * FDT node before calling the function. + * Keep in mind that the pe_config_addr *might* be numerically identical to the + * device's config addr, but the two are conceptually distinct. * - * It's notable that zero'ed return value means invalid PE config - * address. + * Returns the pe_config_addr, or a negative error code. */ -static int pseries_eeh_get_config_addr(struct pci_controller *phb, int config_addr) +static int pseries_eeh_get_pe_config_addr(struct pci_dn *pdn) { - int ret = 0; - int rets[3]; + int config_addr = rtas_config_addr(pdn->busno, pdn->devfn, 0); + struct pci_controller *phb = pdn->phb; + int ret, rets[3]; if (ibm_get_config_addr_info2 != RTAS_UNKNOWN_SERVICE) { /* - * First of all, we need to make sure there has one PE - * associated with the device. Otherwise, PE address is - * meaningless. + * First of all, use function 1 to determine if this device is + * part of a PE or not. ret[0] being zero indicates it's not. */ ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets, config_addr, BUID_HI(phb->buid), BUID_LO(phb->buid), 1); if (ret || (rets[0] == 0)) - return 0; + return -ENOENT; - /* Retrieve the associated PE config address */ + /* Retrieve the associated PE config address with function 0 */ ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets, config_addr, BUID_HI(phb->buid), BUID_LO(phb->buid), 0); if (ret) { pr_warn("%s: Failed to get address for PHB#%x-PE#%x\n", __func__, phb->global_number, config_addr); - return 0; + return -ENXIO; } return rets[0]; @@ -134,13 +133,20 @@ static int pseries_eeh_get_config_addr(struct pci_controller *phb, int config_ad if (ret) { pr_warn("%s: Failed to get address for PHB#%x-PE#%x\n", __func__, phb->global_number, config_addr); - return 0; + return -ENXIO; } return rets[0]; } - return ret; + /* + * PAPR does describe a process for finding the pe_config_addr that was + * used before the ibm,get-config-addr-info calls were added. However, + * I haven't found *any* systems that don't have that RTAS call + * implemented. If you happen to find one that needs the old DT based + * process, patches are welcome! + */ + return -ENOENT; } /** @@ -161,8 +167,7 @@ static int pseries_eeh_phb_reset(struct pci_controller *phb, int config_addr, in BUID_LO(phb->buid), option); /* If fundamental-reset not supported, try hot-reset */ - if (option == EEH_RESET_FUNDAMENTAL && - ret == -8) { + if (option == EEH_RESET_FUNDAMENTAL && ret == -8) { option = EEH_RESET_HOT; ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL, config_addr, BUID_HI(phb->buid), @@ -170,8 +175,7 @@ static int pseries_eeh_phb_reset(struct pci_controller *phb, int config_addr, in } /* We need reset hold or settlement delay */ - if (option == EEH_RESET_FUNDAMENTAL || - option == EEH_RESET_HOT) + if (option == EEH_RESET_FUNDAMENTAL || option == EEH_RESET_HOT) msleep(EEH_PE_RST_HOLD_TIME); else msleep(EEH_PE_RST_SETTLE_TIME); @@ -239,88 +243,6 @@ static unsigned char slot_errbuf[RTAS_ERROR_LOG_MAX]; static DEFINE_SPINLOCK(slot_errbuf_lock); static int eeh_error_buf_size; -/** - * pseries_eeh_init - EEH platform dependent initialization - * - * EEH platform dependent initialization on pseries. - */ -static int pseries_eeh_init(void) -{ - struct pci_controller *phb; - struct pci_dn *pdn; - int addr, config_addr; - - /* figure out EEH RTAS function call tokens */ - ibm_set_eeh_option = rtas_token("ibm,set-eeh-option"); - ibm_set_slot_reset = rtas_token("ibm,set-slot-reset"); - ibm_read_slot_reset_state2 = rtas_token("ibm,read-slot-reset-state2"); - ibm_read_slot_reset_state = rtas_token("ibm,read-slot-reset-state"); - ibm_slot_error_detail = rtas_token("ibm,slot-error-detail"); - ibm_get_config_addr_info2 = rtas_token("ibm,get-config-addr-info2"); - ibm_get_config_addr_info = rtas_token("ibm,get-config-addr-info"); - ibm_configure_pe = rtas_token("ibm,configure-pe"); - - /* - * ibm,configure-pe and ibm,configure-bridge have the same semantics, - * however ibm,configure-pe can be faster. If we can't find - * ibm,configure-pe then fall back to using ibm,configure-bridge. - */ - if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE) - ibm_configure_pe = rtas_token("ibm,configure-bridge"); - - /* - * Necessary sanity check. We needn't check "get-config-addr-info" - * and its variant since the old firmware probably support address - * of domain/bus/slot/function for EEH RTAS operations. - */ - if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE || - ibm_set_slot_reset == RTAS_UNKNOWN_SERVICE || - (ibm_read_slot_reset_state2 == RTAS_UNKNOWN_SERVICE && - ibm_read_slot_reset_state == RTAS_UNKNOWN_SERVICE) || - ibm_slot_error_detail == RTAS_UNKNOWN_SERVICE || - ibm_configure_pe == RTAS_UNKNOWN_SERVICE) { - pr_info("EEH functionality not supported\n"); - return -EINVAL; - } - - /* Initialize error log lock and size */ - spin_lock_init(&slot_errbuf_lock); - eeh_error_buf_size = rtas_token("rtas-error-log-max"); - if (eeh_error_buf_size == RTAS_UNKNOWN_SERVICE) { - pr_info("%s: unknown EEH error log size\n", - __func__); - eeh_error_buf_size = 1024; - } else if (eeh_error_buf_size > RTAS_ERROR_LOG_MAX) { - pr_info("%s: EEH error log size %d exceeds the maximal %d\n", - __func__, eeh_error_buf_size, RTAS_ERROR_LOG_MAX); - eeh_error_buf_size = RTAS_ERROR_LOG_MAX; - } - - /* Set EEH probe mode */ - eeh_add_flag(EEH_PROBE_MODE_DEVTREE | EEH_ENABLE_IO_FOR_LOG); - - /* Set EEH machine dependent code */ - ppc_md.pcibios_bus_add_device = pseries_pcibios_bus_add_device; - - if (is_kdump_kernel() || reset_devices) { - pr_info("Issue PHB reset ...\n"); - list_for_each_entry(phb, &hose_list, list_node) { - pdn = list_first_entry(&PCI_DN(phb->dn)->child_list, struct pci_dn, list); - addr = (pdn->busno << 16) | (pdn->devfn << 8); - config_addr = pseries_eeh_get_config_addr(phb, addr); - /* invalid PE config addr */ - if (config_addr == 0) - continue; - - pseries_eeh_phb_reset(phb, config_addr, EEH_RESET_FUNDAMENTAL); - pseries_eeh_phb_reset(phb, config_addr, EEH_RESET_DEACTIVATE); - pseries_eeh_phb_configure_bridge(phb, config_addr); - } - } - - return 0; -} - static int pseries_eeh_cap_start(struct pci_dn *pdn) { u32 status; @@ -439,10 +361,9 @@ static struct eeh_pe *pseries_eeh_pe_get_parent(struct eeh_dev *edev) */ void pseries_eeh_init_edev(struct pci_dn *pdn) { + struct eeh_pe pe, *parent; struct eeh_dev *edev; - struct eeh_pe pe; u32 pcie_flags; - int enable = 0; int ret; if (WARN_ON_ONCE(!eeh_has_flag(EEH_PROBE_MODE_DEVTREE))) @@ -499,51 +420,38 @@ void pseries_eeh_init_edev(struct pci_dn *pdn) } } - /* Initialize the fake PE */ + /* first up, find the pe_config_addr for the PE containing the device */ + ret = pseries_eeh_get_pe_config_addr(pdn); + if (ret < 0) { + eeh_edev_dbg(edev, "Unable to find pe_config_addr\n"); + goto err; + } + + /* Try enable EEH on the fake PE */ memset(&pe, 0, sizeof(struct eeh_pe)); pe.phb = pdn->phb; - pe.config_addr = (pdn->busno << 16) | (pdn->devfn << 8); + pe.addr = ret; - /* Enable EEH on the device */ eeh_edev_dbg(edev, "Enabling EEH on device\n"); ret = eeh_ops->set_option(&pe, EEH_OPT_ENABLE); if (ret) { eeh_edev_dbg(edev, "EEH failed to enable on device (code %d)\n", ret); - } else { - struct eeh_pe *parent; + goto err; + } - /* Retrieve PE address */ - edev->pe_config_addr = pseries_eeh_get_pe_addr(pdn); - pe.addr = edev->pe_config_addr; + edev->pe_config_addr = pe.addr; - /* Some older systems (Power4) allow the ibm,set-eeh-option - * call to succeed even on nodes where EEH is not supported. - * Verify support explicitly. - */ - ret = eeh_ops->get_state(&pe, NULL); - if (ret > 0 && ret != EEH_STATE_NOT_SUPPORT) - enable = 1; + eeh_add_flag(EEH_ENABLED); - /* - * This device doesn't support EEH, but it may have an - * EEH parent. In this case any error on the device will - * freeze the PE of it's upstream bridge, so added it to - * the upstream PE. - */ - parent = pseries_eeh_pe_get_parent(edev); - if (parent && !enable) - edev->pe_config_addr = parent->addr; + parent = pseries_eeh_pe_get_parent(edev); + eeh_pe_tree_insert(edev, parent); + eeh_save_bars(edev); + eeh_edev_dbg(edev, "EEH enabled for device"); - if (enable || parent) { - eeh_add_flag(EEH_ENABLED); - eeh_pe_tree_insert(edev, parent); - } - eeh_edev_dbg(edev, "EEH is %s on device (code %d)\n", - (enable ? "enabled" : "unsupported"), ret); - } + return; - /* Save memory bars */ - eeh_save_bars(edev); +err: + eeh_edev_dbg(edev, "EEH is unsupported on device (code = %d)\n", ret); } static struct eeh_dev *pseries_eeh_probe(struct pci_dev *pdev) @@ -600,7 +508,6 @@ EXPORT_SYMBOL_GPL(pseries_eeh_init_edev_recursive); static int pseries_eeh_set_option(struct eeh_pe *pe, int option) { int ret = 0; - int config_addr; /* * When we're enabling or disabling EEH functioality on @@ -613,85 +520,23 @@ static int pseries_eeh_set_option(struct eeh_pe *pe, int option) case EEH_OPT_ENABLE: case EEH_OPT_THAW_MMIO: case EEH_OPT_THAW_DMA: - config_addr = pe->config_addr; - if (pe->addr) - config_addr = pe->addr; break; case EEH_OPT_FREEZE_PE: /* Not support */ return 0; default: - pr_err("%s: Invalid option %d\n", - __func__, option); + pr_err("%s: Invalid option %d\n", __func__, option); return -EINVAL; } ret = rtas_call(ibm_set_eeh_option, 4, 1, NULL, - config_addr, BUID_HI(pe->phb->buid), + pe->addr, BUID_HI(pe->phb->buid), BUID_LO(pe->phb->buid), option); return ret; } /** - * pseries_eeh_get_pe_addr - Retrieve PE address - * @pe: EEH PE - * - * Retrieve the assocated PE address. Actually, there're 2 RTAS - * function calls dedicated for the purpose. We need implement - * it through the new function and then the old one. Besides, - * you should make sure the config address is figured out from - * FDT node before calling the function. - * - * It's notable that zero'ed return value means invalid PE config - * address. - */ -static int pseries_eeh_get_pe_addr(struct pci_dn *pdn) -{ - int config_addr = rtas_config_addr(pdn->busno, pdn->devfn, 0); - unsigned long buid = pdn->phb->buid; - int ret = 0; - int rets[3]; - - if (ibm_get_config_addr_info2 != RTAS_UNKNOWN_SERVICE) { - /* - * First of all, we need to make sure there has one PE - * associated with the device. Otherwise, PE address is - * meaningless. - */ - ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets, - config_addr, BUID_HI(buid), BUID_LO(buid), 1); - if (ret || (rets[0] == 0)) - return 0; - - /* Retrieve the associated PE config address */ - ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets, - config_addr, BUID_HI(buid), BUID_LO(buid), 0); - if (ret) { - pr_warn("%s: Failed to get address for PHB#%x-PE#%x\n", - __func__, pdn->phb->global_number, config_addr); - return 0; - } - - return rets[0]; - } - - if (ibm_get_config_addr_info != RTAS_UNKNOWN_SERVICE) { - ret = rtas_call(ibm_get_config_addr_info, 4, 2, rets, - config_addr, BUID_HI(buid), BUID_LO(buid), 0); - if (ret) { - pr_warn("%s: Failed to get address for PHB#%x-PE#%x\n", - __func__, pdn->phb->global_number, config_addr); - return 0; - } - - return rets[0]; - } - - return ret; -} - -/** * pseries_eeh_get_state - Retrieve PE state * @pe: EEH PE * @delay: suggested time to wait if state is unavailable @@ -706,25 +551,19 @@ static int pseries_eeh_get_pe_addr(struct pci_dn *pdn) */ static int pseries_eeh_get_state(struct eeh_pe *pe, int *delay) { - int config_addr; int ret; int rets[4]; int result; - /* Figure out PE config address if possible */ - config_addr = pe->config_addr; - if (pe->addr) - config_addr = pe->addr; - if (ibm_read_slot_reset_state2 != RTAS_UNKNOWN_SERVICE) { ret = rtas_call(ibm_read_slot_reset_state2, 3, 4, rets, - config_addr, BUID_HI(pe->phb->buid), + pe->addr, BUID_HI(pe->phb->buid), BUID_LO(pe->phb->buid)); } else if (ibm_read_slot_reset_state != RTAS_UNKNOWN_SERVICE) { /* Fake PE unavailable info */ rets[2] = 0; ret = rtas_call(ibm_read_slot_reset_state, 3, 3, rets, - config_addr, BUID_HI(pe->phb->buid), + pe->addr, BUID_HI(pe->phb->buid), BUID_LO(pe->phb->buid)); } else { return EEH_STATE_NOT_SUPPORT; @@ -778,14 +617,7 @@ static int pseries_eeh_get_state(struct eeh_pe *pe, int *delay) */ static int pseries_eeh_reset(struct eeh_pe *pe, int option) { - int config_addr; - - /* Figure out PE address */ - config_addr = pe->config_addr; - if (pe->addr) - config_addr = pe->addr; - - return pseries_eeh_phb_reset(pe->phb, config_addr, option); + return pseries_eeh_phb_reset(pe->phb, pe->addr, option); } /** @@ -801,19 +633,13 @@ static int pseries_eeh_reset(struct eeh_pe *pe, int option) */ static int pseries_eeh_get_log(struct eeh_pe *pe, int severity, char *drv_log, unsigned long len) { - int config_addr; unsigned long flags; int ret; spin_lock_irqsave(&slot_errbuf_lock, flags); memset(slot_errbuf, 0, eeh_error_buf_size); - /* Figure out the PE address */ - config_addr = pe->config_addr; - if (pe->addr) - config_addr = pe->addr; - - ret = rtas_call(ibm_slot_error_detail, 8, 1, NULL, config_addr, + ret = rtas_call(ibm_slot_error_detail, 8, 1, NULL, pe->addr, BUID_HI(pe->phb->buid), BUID_LO(pe->phb->buid), virt_to_phys(drv_log), len, virt_to_phys(slot_errbuf), eeh_error_buf_size, @@ -832,14 +658,7 @@ static int pseries_eeh_get_log(struct eeh_pe *pe, int severity, char *drv_log, u */ static int pseries_eeh_configure_bridge(struct eeh_pe *pe) { - int config_addr; - - /* Figure out the PE address */ - config_addr = pe->config_addr; - if (pe->addr) - config_addr = pe->addr; - - return pseries_eeh_phb_configure_bridge(pe->phb, config_addr); + return pseries_eeh_phb_configure_bridge(pe->phb, pe->addr); } /** @@ -954,8 +773,7 @@ static int pseries_notify_resume(struct eeh_dev *edev) if (!edev) return -EEXIST; - if (rtas_token("ibm,open-sriov-allow-unfreeze") - == RTAS_UNKNOWN_SERVICE) + if (rtas_token("ibm,open-sriov-allow-unfreeze") == RTAS_UNKNOWN_SERVICE) return -EINVAL; if (edev->pdev->is_physfn || edev->pdev->is_virtfn) @@ -967,7 +785,6 @@ static int pseries_notify_resume(struct eeh_dev *edev) static struct eeh_ops pseries_eeh_ops = { .name = "pseries", - .init = pseries_eeh_init, .probe = pseries_eeh_probe, .set_option = pseries_eeh_set_option, .get_state = pseries_eeh_get_state, @@ -992,15 +809,84 @@ static struct eeh_ops pseries_eeh_ops = { */ static int __init eeh_pseries_init(void) { - int ret; + struct pci_controller *phb; + struct pci_dn *pdn; + int ret, config_addr; - ret = eeh_ops_register(&pseries_eeh_ops); + /* figure out EEH RTAS function call tokens */ + ibm_set_eeh_option = rtas_token("ibm,set-eeh-option"); + ibm_set_slot_reset = rtas_token("ibm,set-slot-reset"); + ibm_read_slot_reset_state2 = rtas_token("ibm,read-slot-reset-state2"); + ibm_read_slot_reset_state = rtas_token("ibm,read-slot-reset-state"); + ibm_slot_error_detail = rtas_token("ibm,slot-error-detail"); + ibm_get_config_addr_info2 = rtas_token("ibm,get-config-addr-info2"); + ibm_get_config_addr_info = rtas_token("ibm,get-config-addr-info"); + ibm_configure_pe = rtas_token("ibm,configure-pe"); + + /* + * ibm,configure-pe and ibm,configure-bridge have the same semantics, + * however ibm,configure-pe can be faster. If we can't find + * ibm,configure-pe then fall back to using ibm,configure-bridge. + */ + if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE) + ibm_configure_pe = rtas_token("ibm,configure-bridge"); + + /* + * Necessary sanity check. We needn't check "get-config-addr-info" + * and its variant since the old firmware probably support address + * of domain/bus/slot/function for EEH RTAS operations. + */ + if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE || + ibm_set_slot_reset == RTAS_UNKNOWN_SERVICE || + (ibm_read_slot_reset_state2 == RTAS_UNKNOWN_SERVICE && + ibm_read_slot_reset_state == RTAS_UNKNOWN_SERVICE) || + ibm_slot_error_detail == RTAS_UNKNOWN_SERVICE || + ibm_configure_pe == RTAS_UNKNOWN_SERVICE) { + pr_info("EEH functionality not supported\n"); + return -EINVAL; + } + + /* Initialize error log lock and size */ + spin_lock_init(&slot_errbuf_lock); + eeh_error_buf_size = rtas_token("rtas-error-log-max"); + if (eeh_error_buf_size == RTAS_UNKNOWN_SERVICE) { + pr_info("%s: unknown EEH error log size\n", + __func__); + eeh_error_buf_size = 1024; + } else if (eeh_error_buf_size > RTAS_ERROR_LOG_MAX) { + pr_info("%s: EEH error log size %d exceeds the maximal %d\n", + __func__, eeh_error_buf_size, RTAS_ERROR_LOG_MAX); + eeh_error_buf_size = RTAS_ERROR_LOG_MAX; + } + + /* Set EEH probe mode */ + eeh_add_flag(EEH_PROBE_MODE_DEVTREE | EEH_ENABLE_IO_FOR_LOG); + + /* Set EEH machine dependent code */ + ppc_md.pcibios_bus_add_device = pseries_pcibios_bus_add_device; + + if (is_kdump_kernel() || reset_devices) { + pr_info("Issue PHB reset ...\n"); + list_for_each_entry(phb, &hose_list, list_node) { + pdn = list_first_entry(&PCI_DN(phb->dn)->child_list, struct pci_dn, list); + config_addr = pseries_eeh_get_pe_config_addr(pdn); + + /* invalid PE config addr */ + if (config_addr < 0) + continue; + + pseries_eeh_phb_reset(phb, config_addr, EEH_RESET_FUNDAMENTAL); + pseries_eeh_phb_reset(phb, config_addr, EEH_RESET_DEACTIVATE); + pseries_eeh_phb_configure_bridge(phb, config_addr); + } + } + + ret = eeh_init(&pseries_eeh_ops); if (!ret) pr_info("EEH: pSeries platform initialized\n"); else pr_info("EEH: pSeries platform initialization failure (%d)\n", ret); - return ret; } -machine_early_initcall(pseries, eeh_pseries_init); +machine_arch_initcall(pseries, eeh_pseries_init); diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index 7a974ed6b240..f2837e33bf5d 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c @@ -55,7 +55,7 @@ static void rtas_stop_self(void) panic("Alas, I survived.\n"); } -static void pseries_mach_cpu_die(void) +static void pseries_cpu_offline_self(void) { unsigned int hwcpu = hard_smp_processor_id(); @@ -102,7 +102,7 @@ static int pseries_cpu_disable(void) * to self-destroy so that the cpu-offline thread can send the CPU_DEAD * notifications. * - * OTOH, pseries_mach_cpu_die() is called by the @cpu when it wants to + * OTOH, pseries_cpu_offline_self() is called by the @cpu when it wants to * self-destruct. */ static void pseries_cpu_die(unsigned int cpu) @@ -901,7 +901,7 @@ static int __init pseries_cpu_hotplug_init(void) return 0; } - ppc_md.cpu_die = pseries_mach_cpu_die; + smp_ops->cpu_offline_self = pseries_cpu_offline_self; smp_ops->cpu_disable = pseries_cpu_disable; smp_ops->cpu_die = pseries_cpu_die; diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index 5d545b78111f..7efe6ec5d14a 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c @@ -30,12 +30,17 @@ unsigned long pseries_memory_block_size(void) np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); if (np) { - const __be64 *size; + int len; + int size_cells; + const __be32 *prop; - size = of_get_property(np, "ibm,lmb-size", NULL); - if (size) - memblock_size = be64_to_cpup(size); + size_cells = of_n_size_cells(np); + + prop = of_get_property(np, "ibm,lmb-size", &len); + if (prop && len >= size_cells * sizeof(__be32)) + memblock_size = of_read_number(prop, size_cells); of_node_put(np); + } else if (machine_is(pseries)) { /* This fallback really only applies to pseries */ unsigned int memzero_size = 0; @@ -277,7 +282,7 @@ static int dlpar_offline_lmb(struct drmem_lmb *lmb) return dlpar_change_lmb_state(lmb, false); } -static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size) +static int pseries_remove_memblock(unsigned long base, unsigned long memblock_size) { unsigned long block_sz, start_pfn; int sections_per_block; @@ -308,10 +313,11 @@ out: static int pseries_remove_mem_node(struct device_node *np) { - const __be32 *regs; + const __be32 *prop; unsigned long base; - unsigned int lmb_size; + unsigned long lmb_size; int ret = -EINVAL; + int addr_cells, size_cells; /* * Check to see if we are actually removing memory @@ -322,12 +328,19 @@ static int pseries_remove_mem_node(struct device_node *np) /* * Find the base address and size of the memblock */ - regs = of_get_property(np, "reg", NULL); - if (!regs) + prop = of_get_property(np, "reg", NULL); + if (!prop) return ret; - base = be64_to_cpu(*(unsigned long *)regs); - lmb_size = be32_to_cpu(regs[3]); + addr_cells = of_n_addr_cells(np); + size_cells = of_n_size_cells(np); + + /* + * "reg" property represents (addr,size) tuple. + */ + base = of_read_number(prop, addr_cells); + prop += addr_cells; + lmb_size = of_read_number(prop, size_cells); pseries_remove_memblock(base, lmb_size); return 0; @@ -354,25 +367,32 @@ static int dlpar_add_lmb(struct drmem_lmb *); static int dlpar_remove_lmb(struct drmem_lmb *lmb) { + struct memory_block *mem_block; unsigned long block_sz; int rc; if (!lmb_is_removable(lmb)) return -EINVAL; + mem_block = lmb_to_memblock(lmb); + if (mem_block == NULL) + return -EINVAL; + rc = dlpar_offline_lmb(lmb); - if (rc) + if (rc) { + put_device(&mem_block->dev); return rc; + } block_sz = pseries_memory_block_size(); - __remove_memory(lmb->nid, lmb->base_addr, block_sz); + __remove_memory(mem_block->nid, lmb->base_addr, block_sz); + put_device(&mem_block->dev); /* Update memory regions for memory remove */ memblock_remove(lmb->base_addr, block_sz); invalidate_lmb_associativity_index(lmb); - lmb_clear_nid(lmb); lmb->flags &= ~DRCONF_MEM_ASSIGNED; return 0; @@ -557,7 +577,7 @@ static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index) #else static inline int pseries_remove_memblock(unsigned long base, - unsigned int memblock_size) + unsigned long memblock_size) { return -EOPNOTSUPP; } @@ -591,7 +611,7 @@ static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index) static int dlpar_add_lmb(struct drmem_lmb *lmb) { unsigned long block_sz; - int rc; + int nid, rc; if (lmb->flags & DRCONF_MEM_ASSIGNED) return -EINVAL; @@ -602,11 +622,15 @@ static int dlpar_add_lmb(struct drmem_lmb *lmb) return rc; } - lmb_set_nid(lmb); block_sz = memory_block_size_bytes(); + /* Find the node id for this LMB. Fake one if necessary. */ + nid = of_drconf_to_nid_single(lmb); + if (nid < 0 || !node_possible(nid)) + nid = first_online_node; + /* Add the memory */ - rc = __add_memory(lmb->nid, lmb->base_addr, block_sz); + rc = __add_memory(nid, lmb->base_addr, block_sz, MHP_NONE); if (rc) { invalidate_lmb_associativity_index(lmb); return rc; @@ -614,9 +638,8 @@ static int dlpar_add_lmb(struct drmem_lmb *lmb) rc = dlpar_online_lmb(lmb); if (rc) { - __remove_memory(lmb->nid, lmb->base_addr, block_sz); + __remove_memory(nid, lmb->base_addr, block_sz); invalidate_lmb_associativity_index(lmb); - lmb_clear_nid(lmb); } else { lmb->flags |= DRCONF_MEM_ASSIGNED; } @@ -878,10 +901,11 @@ int dlpar_memory(struct pseries_hp_errorlog *hp_elog) static int pseries_add_mem_node(struct device_node *np) { - const __be32 *regs; + const __be32 *prop; unsigned long base; - unsigned int lmb_size; + unsigned long lmb_size; int ret = -EINVAL; + int addr_cells, size_cells; /* * Check to see if we are actually adding memory @@ -892,12 +916,18 @@ static int pseries_add_mem_node(struct device_node *np) /* * Find the base and size of the memblock */ - regs = of_get_property(np, "reg", NULL); - if (!regs) + prop = of_get_property(np, "reg", NULL); + if (!prop) return ret; - base = be64_to_cpu(*(unsigned long *)regs); - lmb_size = be32_to_cpu(regs[3]); + addr_cells = of_n_addr_cells(np); + size_cells = of_n_size_cells(np); + /* + * "reg" property represents (addr,size) tuple. + */ + base = of_read_number(prop, addr_cells); + prop += addr_cells; + lmb_size = of_read_number(prop, size_cells); /* * Update memory region to represent the memory add diff --git a/arch/powerpc/platforms/pseries/hvCall_inst.c b/arch/powerpc/platforms/pseries/hvCall_inst.c index c40c62ec432e..2c59b4986ea5 100644 --- a/arch/powerpc/platforms/pseries/hvCall_inst.c +++ b/arch/powerpc/platforms/pseries/hvCall_inst.c @@ -70,31 +70,14 @@ static int hc_show(struct seq_file *m, void *p) return 0; } -static const struct seq_operations hcall_inst_seq_ops = { +static const struct seq_operations hcall_inst_sops = { .start = hc_start, .next = hc_next, .stop = hc_stop, .show = hc_show }; -static int hcall_inst_seq_open(struct inode *inode, struct file *file) -{ - int rc; - struct seq_file *seq; - - rc = seq_open(file, &hcall_inst_seq_ops); - seq = file->private_data; - seq->private = file_inode(file)->i_private; - - return rc; -} - -static const struct file_operations hcall_inst_seq_fops = { - .open = hcall_inst_seq_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; +DEFINE_SEQ_ATTRIBUTE(hcall_inst); #define HCALL_ROOT_DIR "hcall_inst" #define CPU_NAME_BUF_SIZE 32 @@ -149,7 +132,7 @@ static int __init hcall_inst_init(void) snprintf(cpu_name_buf, CPU_NAME_BUF_SIZE, "cpu%d", cpu); debugfs_create_file(cpu_name_buf, 0444, hcall_root, per_cpu(hcall_stats, cpu), - &hcall_inst_seq_fops); + &hcall_inst_fops); } return 0; diff --git a/arch/powerpc/platforms/pseries/ibmebus.c b/arch/powerpc/platforms/pseries/ibmebus.c index a6f101c958e8..8c6e509f6967 100644 --- a/arch/powerpc/platforms/pseries/ibmebus.c +++ b/arch/powerpc/platforms/pseries/ibmebus.c @@ -40,7 +40,7 @@ #include <linux/export.h> #include <linux/console.h> #include <linux/kobject.h> -#include <linux/dma-mapping.h> +#include <linux/dma-map-ops.h> #include <linux/interrupt.h> #include <linux/of.h> #include <linux/slab.h> diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index 6d47b4a3ce39..e4198700ed1a 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -39,6 +39,20 @@ #include "pseries.h" +enum { + DDW_QUERY_PE_DMA_WIN = 0, + DDW_CREATE_PE_DMA_WIN = 1, + DDW_REMOVE_PE_DMA_WIN = 2, + + DDW_APPLICABLE_SIZE +}; + +enum { + DDW_EXT_SIZE = 0, + DDW_EXT_RESET_DMA_WIN = 1, + DDW_EXT_QUERY_OUT_SIZE = 2 +}; + static struct iommu_table_group *iommu_pseries_alloc_group(int node) { struct iommu_table_group *table_group; @@ -334,7 +348,7 @@ struct direct_window { /* Dynamic DMA Window support */ struct ddw_query_response { u32 windows_available; - u32 largest_available_block; + u64 largest_available_block; u32 page_size; u32 migration_capable; }; @@ -767,25 +781,14 @@ static int __init disable_ddw_setup(char *str) early_param("disable_ddw", disable_ddw_setup); -static void remove_ddw(struct device_node *np, bool remove_prop) +static void remove_dma_window(struct device_node *np, u32 *ddw_avail, + struct property *win) { struct dynamic_dma_window_prop *dwp; - struct property *win64; - u32 ddw_avail[3]; u64 liobn; - int ret = 0; - - ret = of_property_read_u32_array(np, "ibm,ddw-applicable", - &ddw_avail[0], 3); - - win64 = of_find_property(np, DIRECT64_PROPNAME, NULL); - if (!win64) - return; - - if (ret || win64->length < sizeof(*dwp)) - goto delprop; + int ret; - dwp = win64->value; + dwp = win->value; liobn = (u64)be32_to_cpu(dwp->liobn); /* clear the whole window, note the arg is in kernel pages */ @@ -798,19 +801,39 @@ static void remove_ddw(struct device_node *np, bool remove_prop) pr_debug("%pOF successfully cleared tces in window.\n", np); - ret = rtas_call(ddw_avail[2], 1, 1, NULL, liobn); + ret = rtas_call(ddw_avail[DDW_REMOVE_PE_DMA_WIN], 1, 1, NULL, liobn); if (ret) pr_warn("%pOF: failed to remove direct window: rtas returned " "%d to ibm,remove-pe-dma-window(%x) %llx\n", - np, ret, ddw_avail[2], liobn); + np, ret, ddw_avail[DDW_REMOVE_PE_DMA_WIN], liobn); else pr_debug("%pOF: successfully removed direct window: rtas returned " "%d to ibm,remove-pe-dma-window(%x) %llx\n", - np, ret, ddw_avail[2], liobn); + np, ret, ddw_avail[DDW_REMOVE_PE_DMA_WIN], liobn); +} + +static void remove_ddw(struct device_node *np, bool remove_prop) +{ + struct property *win; + u32 ddw_avail[DDW_APPLICABLE_SIZE]; + int ret = 0; -delprop: - if (remove_prop) - ret = of_remove_property(np, win64); + ret = of_property_read_u32_array(np, "ibm,ddw-applicable", + &ddw_avail[0], DDW_APPLICABLE_SIZE); + if (ret) + return; + + win = of_find_property(np, DIRECT64_PROPNAME, NULL); + if (!win) + return; + + if (win->length >= sizeof(struct dynamic_dma_window_prop)) + remove_dma_window(np, ddw_avail, win); + + if (!remove_prop) + return; + + ret = of_remove_property(np, win); if (ret) pr_warn("%pOF: failed to remove direct window property: %d\n", np, ret); @@ -869,14 +892,62 @@ static int find_existing_ddw_windows(void) } machine_arch_initcall(pseries, find_existing_ddw_windows); +/** + * ddw_read_ext - Get the value of an DDW extension + * @np: device node from which the extension value is to be read. + * @extnum: index number of the extension. + * @value: pointer to return value, modified when extension is available. + * + * Checks if "ibm,ddw-extensions" exists for this node, and get the value + * on index 'extnum'. + * It can be used only to check if a property exists, passing value == NULL. + * + * Returns: + * 0 if extension successfully read + * -EINVAL if the "ibm,ddw-extensions" does not exist, + * -ENODATA if "ibm,ddw-extensions" does not have a value, and + * -EOVERFLOW if "ibm,ddw-extensions" does not contain this extension. + */ +static inline int ddw_read_ext(const struct device_node *np, int extnum, + u32 *value) +{ + static const char propname[] = "ibm,ddw-extensions"; + u32 count; + int ret; + + ret = of_property_read_u32_index(np, propname, DDW_EXT_SIZE, &count); + if (ret) + return ret; + + if (count < extnum) + return -EOVERFLOW; + + if (!value) + value = &count; + + return of_property_read_u32_index(np, propname, extnum, value); +} + static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail, - struct ddw_query_response *query) + struct ddw_query_response *query, + struct device_node *parent) { struct device_node *dn; struct pci_dn *pdn; - u32 cfg_addr; + u32 cfg_addr, ext_query, query_out[5]; u64 buid; - int ret; + int ret, out_sz; + + /* + * From LoPAR level 2.8, "ibm,ddw-extensions" index 3 can rule how many + * output parameters ibm,query-pe-dma-windows will have, ranging from + * 5 to 6. + */ + ret = ddw_read_ext(parent, DDW_EXT_QUERY_OUT_SIZE, &ext_query); + if (!ret && ext_query == 1) + out_sz = 6; + else + out_sz = 5; /* * Get the config address and phb buid of the PE window. @@ -889,11 +960,28 @@ static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail, buid = pdn->phb->buid; cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8)); - ret = rtas_call(ddw_avail[0], 3, 5, (u32 *)query, - cfg_addr, BUID_HI(buid), BUID_LO(buid)); - dev_info(&dev->dev, "ibm,query-pe-dma-windows(%x) %x %x %x" - " returned %d\n", ddw_avail[0], cfg_addr, BUID_HI(buid), - BUID_LO(buid), ret); + ret = rtas_call(ddw_avail[DDW_QUERY_PE_DMA_WIN], 3, out_sz, query_out, + cfg_addr, BUID_HI(buid), BUID_LO(buid)); + dev_info(&dev->dev, "ibm,query-pe-dma-windows(%x) %x %x %x returned %d\n", + ddw_avail[DDW_QUERY_PE_DMA_WIN], cfg_addr, BUID_HI(buid), + BUID_LO(buid), ret); + + switch (out_sz) { + case 5: + query->windows_available = query_out[0]; + query->largest_available_block = query_out[1]; + query->page_size = query_out[2]; + query->migration_capable = query_out[3]; + break; + case 6: + query->windows_available = query_out[0]; + query->largest_available_block = ((u64)query_out[1] << 32) | + query_out[2]; + query->page_size = query_out[3]; + query->migration_capable = query_out[4]; + break; + } + return ret; } @@ -920,15 +1008,16 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail, do { /* extra outputs are LIOBN and dma-addr (hi, lo) */ - ret = rtas_call(ddw_avail[1], 5, 4, (u32 *)create, - cfg_addr, BUID_HI(buid), BUID_LO(buid), - page_shift, window_shift); + ret = rtas_call(ddw_avail[DDW_CREATE_PE_DMA_WIN], 5, 4, + (u32 *)create, cfg_addr, BUID_HI(buid), + BUID_LO(buid), page_shift, window_shift); } while (rtas_busy_delay(ret)); dev_info(&dev->dev, "ibm,create-pe-dma-window(%x) %x %x %x %x %x returned %d " - "(liobn = 0x%x starting addr = %x %x)\n", ddw_avail[1], - cfg_addr, BUID_HI(buid), BUID_LO(buid), page_shift, - window_shift, ret, create->liobn, create->addr_hi, create->addr_lo); + "(liobn = 0x%x starting addr = %x %x)\n", + ddw_avail[DDW_CREATE_PE_DMA_WIN], cfg_addr, BUID_HI(buid), + BUID_LO(buid), page_shift, window_shift, ret, create->liobn, + create->addr_hi, create->addr_lo); return ret; } @@ -978,6 +1067,38 @@ static phys_addr_t ddw_memory_hotplug_max(void) } /* + * Platforms supporting the DDW option starting with LoPAR level 2.7 implement + * ibm,ddw-extensions, which carries the rtas token for + * ibm,reset-pe-dma-windows. + * That rtas-call can be used to restore the default DMA window for the device. + */ +static void reset_dma_window(struct pci_dev *dev, struct device_node *par_dn) +{ + int ret; + u32 cfg_addr, reset_dma_win; + u64 buid; + struct device_node *dn; + struct pci_dn *pdn; + + ret = ddw_read_ext(par_dn, DDW_EXT_RESET_DMA_WIN, &reset_dma_win); + if (ret) + return; + + dn = pci_device_to_OF_node(dev); + pdn = PCI_DN(dn); + buid = pdn->phb->buid; + cfg_addr = (pdn->busno << 16) | (pdn->devfn << 8); + + ret = rtas_call(reset_dma_win, 3, 1, NULL, cfg_addr, BUID_HI(buid), + BUID_LO(buid)); + if (ret) + dev_info(&dev->dev, + "ibm,reset-pe-dma-windows(%x) %x %x %x returned %d ", + reset_dma_win, cfg_addr, BUID_HI(buid), BUID_LO(buid), + ret); +} + +/* * If the PE supports dynamic dma windows, and there is space for a table * that can map all pages in a linear offset, then setup such a table, * and record the dma-offset in the struct device. @@ -996,11 +1117,12 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn) int page_shift; u64 dma_addr, max_addr; struct device_node *dn; - u32 ddw_avail[3]; + u32 ddw_avail[DDW_APPLICABLE_SIZE]; struct direct_window *window; struct property *win64; struct dynamic_dma_window_prop *ddwprop; struct failed_ddw_pdn *fpdn; + bool default_win_removed = false; mutex_lock(&direct_window_init_mutex); @@ -1029,7 +1151,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn) * the property is actually in the parent, not the PE */ ret = of_property_read_u32_array(pdn, "ibm,ddw-applicable", - &ddw_avail[0], 3); + &ddw_avail[0], DDW_APPLICABLE_SIZE); if (ret) goto out_failed; @@ -1040,18 +1162,42 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn) * of page sizes: supported and supported for migrate-dma. */ dn = pci_device_to_OF_node(dev); - ret = query_ddw(dev, ddw_avail, &query); + ret = query_ddw(dev, ddw_avail, &query, pdn); if (ret != 0) goto out_failed; + /* + * If there is no window available, remove the default DMA window, + * if it's present. This will make all the resources available to the + * new DDW window. + * If anything fails after this, we need to restore it, so also check + * for extensions presence. + */ if (query.windows_available == 0) { - /* - * no additional windows are available for this device. - * We might be able to reallocate the existing window, - * trading in for a larger page size. - */ - dev_dbg(&dev->dev, "no free dynamic windows"); - goto out_failed; + struct property *default_win; + int reset_win_ext; + + default_win = of_find_property(pdn, "ibm,dma-window", NULL); + if (!default_win) + goto out_failed; + + reset_win_ext = ddw_read_ext(pdn, DDW_EXT_RESET_DMA_WIN, NULL); + if (reset_win_ext) + goto out_failed; + + remove_dma_window(pdn, ddw_avail, default_win); + default_win_removed = true; + + /* Query again, to check if the window is available */ + ret = query_ddw(dev, ddw_avail, &query, pdn); + if (ret != 0) + goto out_failed; + + if (query.windows_available == 0) { + /* no windows are available for this device. */ + dev_dbg(&dev->dev, "no free dynamic windows"); + goto out_failed; + } } if (query.page_size & 4) { page_shift = 24; /* 16MB */ @@ -1068,7 +1214,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn) /* check largest block * page size > max memory hotplug addr */ max_addr = ddw_memory_hotplug_max(); if (query.largest_available_block < (max_addr >> page_shift)) { - dev_dbg(&dev->dev, "can't map partition max 0x%llx with %u " + dev_dbg(&dev->dev, "can't map partition max 0x%llx with %llu " "%llu-sized pages\n", max_addr, query.largest_available_block, 1ULL << page_shift); goto out_failed; @@ -1142,6 +1288,8 @@ out_free_prop: kfree(win64); out_failed: + if (default_win_removed) + reset_dma_window(dev, pdn); fpdn = kzalloc(sizeof(*fpdn), GFP_KERNEL); if (!fpdn) diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index baf24eacd268..764170fdb0f7 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -1724,6 +1724,7 @@ void __init hpte_init_pseries(void) pseries_lpar_register_process_table(0, 0, 0); } +#ifdef CONFIG_PPC_RADIX_MMU void radix_init_pseries(void) { pr_info("Using radix MMU under hypervisor\n"); @@ -1731,6 +1732,7 @@ void radix_init_pseries(void) pseries_lpar_register_process_table(__pa(process_tb), 0, PRTB_SIZE_SHIFT - 12); } +#endif #ifdef CONFIG_PPC_SMLPAR #define CMO_FREE_HINT_DEFAULT 1 diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c index b8d28ab88178..e278390ab28d 100644 --- a/arch/powerpc/platforms/pseries/lparcfg.c +++ b/arch/powerpc/platforms/pseries/lparcfg.c @@ -136,6 +136,39 @@ static unsigned int h_get_ppp(struct hvcall_ppp_data *ppp_data) return rc; } +static void show_gpci_data(struct seq_file *m) +{ + struct hv_gpci_request_buffer *buf; + unsigned int affinity_score; + long ret; + + buf = kmalloc(sizeof(*buf), GFP_KERNEL); + if (buf == NULL) + return; + + /* + * Show the local LPAR's affinity score. + * + * 0xB1 selects the Affinity_Domain_Info_By_Partition subcall. + * The score is at byte 0xB in the output buffer. + */ + memset(&buf->params, 0, sizeof(buf->params)); + buf->params.counter_request = cpu_to_be32(0xB1); + buf->params.starting_index = cpu_to_be32(-1); /* local LPAR */ + buf->params.counter_info_version_in = 0x5; /* v5+ for score */ + ret = plpar_hcall_norets(H_GET_PERF_COUNTER_INFO, virt_to_phys(buf), + sizeof(*buf)); + if (ret != H_SUCCESS) { + pr_debug("hcall failed: H_GET_PERF_COUNTER_INFO: %ld, %x\n", + ret, be32_to_cpu(buf->params.detail_rc)); + goto out; + } + affinity_score = buf->bytes[0xB]; + seq_printf(m, "partition_affinity_score=%u\n", affinity_score); +out: + kfree(buf); +} + static unsigned h_pic(unsigned long *pool_idle_time, unsigned long *num_procs) { @@ -487,6 +520,8 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v) partition_active_processors * 100); } + show_gpci_data(m); + seq_printf(m, "partition_active_processors=%d\n", partition_active_processors); diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c index a88a707a608a..835163f54244 100644 --- a/arch/powerpc/platforms/pseries/papr_scm.c +++ b/arch/powerpc/platforms/pseries/papr_scm.c @@ -785,7 +785,8 @@ static int papr_scm_ndctl(struct nvdimm_bus_descriptor *nd_desc, static ssize_t perf_stats_show(struct device *dev, struct device_attribute *attr, char *buf) { - int index, rc; + int index; + ssize_t rc; struct seq_buf s; struct papr_scm_perf_stat *stat; struct papr_scm_perf_stats *stats; @@ -820,9 +821,9 @@ static ssize_t perf_stats_show(struct device *dev, free_stats: kfree(stats); - return rc ? rc : seq_buf_used(&s); + return rc ? rc : (ssize_t)seq_buf_used(&s); } -DEVICE_ATTR_ADMIN_RO(perf_stats); +static DEVICE_ATTR_ADMIN_RO(perf_stats); static ssize_t flags_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -897,6 +898,9 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p) p->bus_desc.of_node = p->pdev->dev.of_node; p->bus_desc.provider_name = kstrdup(p->pdev->name, GFP_KERNEL); + /* Set the dimm command family mask to accept PDSMs */ + set_bit(NVDIMM_FAMILY_PAPR, &p->bus_desc.dimm_family_mask); + if (!p->bus_desc.provider_name) return -ENOMEM; diff --git a/arch/powerpc/platforms/pseries/rng.c b/arch/powerpc/platforms/pseries/rng.c index bbb97169bf63..6268545947b8 100644 --- a/arch/powerpc/platforms/pseries/rng.c +++ b/arch/powerpc/platforms/pseries/rng.c @@ -36,6 +36,7 @@ static __init int rng_init(void) ppc_md.get_random_seed = pseries_get_random_long; + of_node_put(dn); return 0; } machine_subsys_initcall(pseries, rng_init); diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 2f4ee0a90284..633c45ec406d 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -519,9 +519,15 @@ static void init_cpu_char_feature_flags(struct h_cpu_char_result *result) if (result->character & H_CPU_CHAR_BCCTR_FLUSH_ASSIST) security_ftr_set(SEC_FTR_BCCTR_FLUSH_ASSIST); + if (result->character & H_CPU_CHAR_BCCTR_LINK_FLUSH_ASSIST) + security_ftr_set(SEC_FTR_BCCTR_LINK_FLUSH_ASSIST); + if (result->behaviour & H_CPU_BEHAV_FLUSH_COUNT_CACHE) security_ftr_set(SEC_FTR_FLUSH_COUNT_CACHE); + if (result->behaviour & H_CPU_BEHAV_FLUSH_LINK_STACK) + security_ftr_set(SEC_FTR_FLUSH_LINK_STACK); + /* * The features below are enabled by default, so we instead look to see * if firmware has *disabled* them, and clear them if so. diff --git a/arch/powerpc/platforms/pseries/svm.c b/arch/powerpc/platforms/pseries/svm.c index e6d7a344d9f2..7b739cc7a8a9 100644 --- a/arch/powerpc/platforms/pseries/svm.c +++ b/arch/powerpc/platforms/pseries/svm.c @@ -7,6 +7,7 @@ */ #include <linux/mm.h> +#include <linux/memblock.h> #include <asm/machdep.h> #include <asm/svm.h> #include <asm/swiotlb.h> @@ -35,6 +36,31 @@ static int __init init_svm(void) } machine_early_initcall(pseries, init_svm); +/* + * Initialize SWIOTLB. Essentially the same as swiotlb_init(), except that it + * can allocate the buffer anywhere in memory. Since the hypervisor doesn't have + * any addressing limitation, we don't need to allocate it in low addresses. + */ +void __init svm_swiotlb_init(void) +{ + unsigned char *vstart; + unsigned long bytes, io_tlb_nslabs; + + io_tlb_nslabs = (swiotlb_size_or_default() >> IO_TLB_SHIFT); + io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); + + bytes = io_tlb_nslabs << IO_TLB_SHIFT; + + vstart = memblock_alloc(PAGE_ALIGN(bytes), PAGE_SIZE); + if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, false)) + return; + + if (io_tlb_start) + memblock_free_early(io_tlb_start, + PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); + panic("SVM: Cannot allocate SWIOTLB buffer"); +} + int set_memory_encrypted(unsigned long addr, int numpages) { if (!PAGE_ALIGNED(addr)) diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c index 0487b26f6f1a..b2797cfe4e2b 100644 --- a/arch/powerpc/platforms/pseries/vio.c +++ b/arch/powerpc/platforms/pseries/vio.c @@ -20,7 +20,7 @@ #include <linux/console.h> #include <linux/export.h> #include <linux/mm.h> -#include <linux/dma-mapping.h> +#include <linux/dma-map-ops.h> #include <linux/kobject.h> #include <asm/iommu.h> @@ -608,6 +608,8 @@ static const struct dma_map_ops vio_dma_mapping_ops = { .get_required_mask = dma_iommu_get_required_mask, .mmap = dma_common_mmap, .get_sgtable = dma_common_get_sgtable, + .alloc_pages = dma_common_alloc_pages, + .free_pages = dma_common_free_pages, }; /** diff --git a/arch/powerpc/sysdev/xics/icp-hv.c b/arch/powerpc/sysdev/xics/icp-hv.c index ad8117148ea3..21b9d1bf39ff 100644 --- a/arch/powerpc/sysdev/xics/icp-hv.c +++ b/arch/powerpc/sysdev/xics/icp-hv.c @@ -174,6 +174,7 @@ int icp_hv_init(void) icp_ops = &icp_hv_ops; + of_node_put(np); return 0; } diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c index f591be9f01f4..a80440af491a 100644 --- a/arch/powerpc/sysdev/xive/common.c +++ b/arch/powerpc/sysdev/xive/common.c @@ -1565,7 +1565,7 @@ static int __init xive_off(char *arg) } __setup("xive=off", xive_off); -void xive_debug_show_cpu(struct seq_file *m, int cpu) +static void xive_debug_show_cpu(struct seq_file *m, int cpu) { struct xive_cpu *xc = per_cpu(xive_cpu, cpu); @@ -1599,7 +1599,7 @@ void xive_debug_show_cpu(struct seq_file *m, int cpu) seq_puts(m, "\n"); } -void xive_debug_show_irq(struct seq_file *m, u32 hw_irq, struct irq_data *d) +static void xive_debug_show_irq(struct seq_file *m, u32 hw_irq, struct irq_data *d) { struct irq_chip *chip = irq_data_get_irq_chip(d); int rc; diff --git a/arch/powerpc/tools/checkpatch.sh b/arch/powerpc/tools/checkpatch.sh index 3ce5c093b19d..91c04802ec31 100755 --- a/arch/powerpc/tools/checkpatch.sh +++ b/arch/powerpc/tools/checkpatch.sh @@ -9,7 +9,6 @@ script_base=$(realpath $(dirname $0)) exec $script_base/../../../scripts/checkpatch.pl \ --subjective \ --no-summary \ - --max-line-length=90 \ --show-types \ --ignore ARCH_INCLUDE_LINUX \ --ignore BIT_MACRO \ diff --git a/arch/powerpc/tools/unrel_branch_check.sh b/arch/powerpc/tools/unrel_branch_check.sh index 6e6a30aea3ed..8301efee1e6c 100755 --- a/arch/powerpc/tools/unrel_branch_check.sh +++ b/arch/powerpc/tools/unrel_branch_check.sh @@ -1,60 +1,79 @@ -# Copyright © 2016 IBM Corporation +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0+ +# Copyright © 2016,2020 IBM Corporation # -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version -# 2 of the License, or (at your option) any later version. -# -# This script checks the relocations of a vmlinux for "suspicious" -# branches from unrelocated code (head_64.S code). - -# Turn this on if you want more debug output: -# set -x +# This script checks the unrelocated code of a vmlinux for "suspicious" +# branches to relocated code (head_64.S code). -# Have Kbuild supply the path to objdump so we handle cross compilation. +# Have Kbuild supply the path to objdump and nm so we handle cross compilation. objdump="$1" -vmlinux="$2" - -#__end_interrupts should be located within the first 64K - -end_intr=0x$( -$objdump -R "$vmlinux" -d --start-address=0xc000000000000000 \ - --stop-address=0xc000000000010000 | -grep '\<__end_interrupts>:' | -awk '{print $1}' -) - -BRANCHES=$( -$objdump -R "$vmlinux" -D --start-address=0xc000000000000000 \ - --stop-address=${end_intr} | -grep -e "^c[0-9a-f]*:[[:space:]]*\([0-9a-f][0-9a-f][[:space:]]\)\{4\}[[:space:]]*b" | -grep -v '\<__start_initialization_multiplatform>' | -grep -v -e 'b.\?.\?ctr' | -grep -v -e 'b.\?.\?lr' | -sed -e 's/\bbt.\?[[:space:]]*[[:digit:]][[:digit:]]*,/beq/' \ - -e 's/\bbf.\?[[:space:]]*[[:digit:]][[:digit:]]*,/bne/' \ - -e 's/[[:space:]]0x/ /' \ - -e 's/://' | -awk '{ print $1 ":" $6 ":0x" $7 ":" $8 " "}' -) - -for tuple in $BRANCHES -do - from=`echo $tuple | cut -d':' -f1` - branch=`echo $tuple | cut -d':' -f2` - to=`echo $tuple | cut -d':' -f3 | sed 's/cr[0-7],//'` - sym=`echo $tuple | cut -d':' -f4` - - if (( $to > $end_intr )) - then - if [ -z "$bad_branches" ]; then - echo "WARNING: Unrelocated relative branches" - bad_branches="yes" +nm="$2" +vmlinux="$3" + +kstart=0xc000000000000000 + +end_intr=0x$($nm -p "$vmlinux" | + sed -E -n '/\s+[[:alpha:]]\s+__end_interrupts\s*$/{s///p;q}') +if [ "$end_intr" = "0x" ]; then + exit 0 +fi + +# we know that there is a correct branch to +# __start_initialization_multiplatform, so find its address +# so we can exclude it. +sim=0x$($nm -p "$vmlinux" | + sed -E -n '/\s+[[:alpha:]]\s+__start_initialization_multiplatform\s*$/{s///p;q}') + +$objdump -D --no-show-raw-insn --start-address="$kstart" --stop-address="$end_intr" "$vmlinux" | +sed -E -n ' +# match lines that start with a kernel address +/^c[0-9a-f]*:\s*b/ { + # drop branches via ctr or lr + /\<b.?.?(ct|l)r/d + # cope with some differences between Clang and GNU objdumps + s/\<bt.?\s*[[:digit:]]+,/beq/ + s/\<bf.?\s*[[:digit:]]+,/bne/ + # tidy up + s/\s0x/ / + s/:// + # format for the loop below + s/^(\S+)\s+(\S+)\s+(\S+)\s*(\S*).*$/\1:\2:\3:\4/ + # strip out condition registers + s/:cr[0-7],/:/ + p +}' | { + +all_good=true +while IFS=: read -r from branch to sym; do + case "$to" in + c*) to="0x$to" + ;; + .+*) + to=${to#.+} + if [ "$branch" = 'b' ]; then + if (( to >= 0x2000000 )); then + to=$(( to - 0x4000000 )) + fi + elif (( to >= 0x8000 )); then + to=$(( to - 0x10000 )) + fi + printf -v to '0x%x' $(( "0x$from" + to )) + ;; + *) printf 'Unkown branch format\n' + ;; + esac + if [ "$to" = "$sim" ]; then + continue + fi + if (( to > end_intr )); then + if $all_good; then + printf '%s\n' 'WARNING: Unrelocated relative branches' + all_good=false fi - echo "$from $branch-> $to $sym" + printf '%s %s-> %s %s\n' "$from" "$branch" "$to" "$sym" fi done -if [ -z "$bad_branches" ]; then - exit 0 -fi +$all_good + +} diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index df7bca00f5ec..55c43a6c9111 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -969,6 +969,7 @@ static void insert_cpu_bpts(void) brk.address = dabr[i].address; brk.type = (dabr[i].enabled & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL; brk.len = 8; + brk.hw_len = 8; __set_breakpoint(i, &brk); } } diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 7766e1289468..483fc555fc34 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -38,6 +38,7 @@ config RISCV select GENERIC_ARCH_TOPOLOGY if SMP select GENERIC_ATOMIC64 if !64BIT select GENERIC_CLOCKEVENTS + select GENERIC_EARLY_IOREMAP select GENERIC_GETTIMEOFDAY if HAVE_GENERIC_VDSO select GENERIC_IOREMAP select GENERIC_IRQ_MULTI_HANDLER @@ -334,19 +335,6 @@ menu "Kernel features" source "kernel/Kconfig.hz" -config SECCOMP - bool "Enable seccomp to safely compute untrusted bytecode" - help - This kernel feature is useful for number crunching applications - that may need to compute untrusted bytecode during their - execution. By using pipes or other transports made available to - the process as file descriptors supporting the read/write - syscalls, it's possible to isolate those applications in - their own address space using seccomp. Once seccomp is - enabled via prctl(PR_SET_SECCOMP), it cannot be disabled - and the task is only allowed to execute a few safe syscalls - defined by each seccomp mode. - config RISCV_SBI_V01 bool "SBI v0.1 support" default y @@ -401,6 +389,28 @@ config CMDLINE_FORCE endchoice +config EFI_STUB + bool + +config EFI + bool "UEFI runtime support" + depends on OF + select LIBFDT + select UCS2_STRING + select EFI_PARAMS_FROM_FDT + select EFI_STUB + select EFI_GENERIC_STUB + select EFI_RUNTIME_WRAPPERS + select RISCV_ISA_C + depends on MMU + default y + help + This option provides support for runtime services provided + by UEFI firmware (such as non-volatile variables, realtime + clock, and platform reset). A UEFI stub is also provided to + allow the kernel to be booted as an EFI application. This + is only useful on systems that have UEFI firmware. + endmenu config BUILTIN_DTB @@ -413,3 +423,5 @@ menu "Power management options" source "kernel/power/Kconfig" endmenu + +source "drivers/firmware/Kconfig" diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile index fb6e37db836d..10df59f28add 100644 --- a/arch/riscv/Makefile +++ b/arch/riscv/Makefile @@ -80,6 +80,7 @@ head-y := arch/riscv/kernel/head.o core-y += arch/riscv/ libs-y += arch/riscv/lib/ +libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a PHONY += vdso_install vdso_install: diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig index d58c93efb603..d222d353d86d 100644 --- a/arch/riscv/configs/defconfig +++ b/arch/riscv/configs/defconfig @@ -130,3 +130,4 @@ CONFIG_DEBUG_BLOCK_EXT_DEVT=y # CONFIG_RUNTIME_TESTING_MENU is not set CONFIG_MEMTEST=y # CONFIG_SYSFS_SYSCALL is not set +CONFIG_EFI=y diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild index 3d9410bb4de0..59dd7be55005 100644 --- a/arch/riscv/include/asm/Kbuild +++ b/arch/riscv/include/asm/Kbuild @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 +generic-y += early_ioremap.h generic-y += extable.h generic-y += flat.h generic-y += kvm_para.h diff --git a/arch/riscv/include/asm/cacheinfo.h b/arch/riscv/include/asm/cacheinfo.h index 5d9662e9aba8..d1a365215ec0 100644 --- a/arch/riscv/include/asm/cacheinfo.h +++ b/arch/riscv/include/asm/cacheinfo.h @@ -1,4 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020 SiFive + */ #ifndef _ASM_RISCV_CACHEINFO_H #define _ASM_RISCV_CACHEINFO_H @@ -11,5 +14,7 @@ struct riscv_cacheinfo_ops { }; void riscv_set_cacheinfo_ops(struct riscv_cacheinfo_ops *ops); +uintptr_t get_cache_size(u32 level, enum cache_type type); +uintptr_t get_cache_geometry(u32 level, enum cache_type type); #endif /* _ASM_RISCV_CACHEINFO_H */ diff --git a/arch/riscv/include/asm/efi.h b/arch/riscv/include/asm/efi.h new file mode 100644 index 000000000000..7542282f1141 --- /dev/null +++ b/arch/riscv/include/asm/efi.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020 Western Digital Corporation or its affiliates. + */ +#ifndef _ASM_EFI_H +#define _ASM_EFI_H + +#include <asm/csr.h> +#include <asm/io.h> +#include <asm/mmu_context.h> +#include <asm/ptrace.h> +#include <asm/tlbflush.h> + +#ifdef CONFIG_EFI +extern void efi_init(void); +#else +#define efi_init() +#endif + +int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md); +int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md); + +#define arch_efi_call_virt_setup() efi_virtmap_load() +#define arch_efi_call_virt_teardown() efi_virtmap_unload() + +#define arch_efi_call_virt(p, f, args...) p->f(args) + +#define ARCH_EFI_IRQ_FLAGS_MASK (SR_IE | SR_SPIE) + +/* on RISC-V, the FDT may be located anywhere in system RAM */ +static inline unsigned long efi_get_max_fdt_addr(unsigned long image_addr) +{ + return ULONG_MAX; +} + +/* Load initrd at enough distance from DRAM start */ +static inline unsigned long efi_get_max_initrd_addr(unsigned long image_addr) +{ + return image_addr + SZ_256M; +} + +#define alloc_screen_info(x...) (&screen_info) + +static inline void free_screen_info(struct screen_info *si) +{ +} + +static inline void efifb_setup_from_dmi(struct screen_info *si, const char *opt) +{ +} + +void efi_virtmap_load(void); +void efi_virtmap_unload(void); + +#endif /* _ASM_EFI_H */ diff --git a/arch/riscv/include/asm/elf.h b/arch/riscv/include/asm/elf.h index d83a4efd052b..5c725e1df58b 100644 --- a/arch/riscv/include/asm/elf.h +++ b/arch/riscv/include/asm/elf.h @@ -11,6 +11,7 @@ #include <uapi/asm/elf.h> #include <asm/auxvec.h> #include <asm/byteorder.h> +#include <asm/cacheinfo.h> /* * These are used to set parameters in the core dumps. @@ -61,6 +62,18 @@ extern unsigned long elf_hwcap; do { \ NEW_AUX_ENT(AT_SYSINFO_EHDR, \ (elf_addr_t)current->mm->context.vdso); \ + NEW_AUX_ENT(AT_L1I_CACHESIZE, \ + get_cache_size(1, CACHE_TYPE_INST)); \ + NEW_AUX_ENT(AT_L1I_CACHEGEOMETRY, \ + get_cache_geometry(1, CACHE_TYPE_INST)); \ + NEW_AUX_ENT(AT_L1D_CACHESIZE, \ + get_cache_size(1, CACHE_TYPE_DATA)); \ + NEW_AUX_ENT(AT_L1D_CACHEGEOMETRY, \ + get_cache_geometry(1, CACHE_TYPE_DATA)); \ + NEW_AUX_ENT(AT_L2_CACHESIZE, \ + get_cache_size(2, CACHE_TYPE_UNIFIED)); \ + NEW_AUX_ENT(AT_L2_CACHEGEOMETRY, \ + get_cache_geometry(2, CACHE_TYPE_UNIFIED)); \ } while (0) #define ARCH_HAS_SETUP_ADDITIONAL_PAGES struct linux_binprm; diff --git a/arch/riscv/include/asm/fixmap.h b/arch/riscv/include/asm/fixmap.h index 1ff075a8dfc7..54cbf07fb4e9 100644 --- a/arch/riscv/include/asm/fixmap.h +++ b/arch/riscv/include/asm/fixmap.h @@ -22,14 +22,24 @@ */ enum fixed_addresses { FIX_HOLE, -#define FIX_FDT_SIZE SZ_1M - FIX_FDT_END, - FIX_FDT = FIX_FDT_END + FIX_FDT_SIZE / PAGE_SIZE - 1, FIX_PTE, FIX_PMD, FIX_TEXT_POKE1, FIX_TEXT_POKE0, FIX_EARLYCON_MEM_BASE, + + __end_of_permanent_fixed_addresses, + /* + * Temporary boot-time mappings, used by early_ioremap(), + * before ioremap() is functional. + */ +#define NR_FIX_BTMAPS (SZ_256K / PAGE_SIZE) +#define FIX_BTMAPS_SLOTS 7 +#define TOTAL_FIX_BTMAPS (NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS) + + FIX_BTMAP_END = __end_of_permanent_fixed_addresses, + FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1, + __end_of_fixed_addresses }; diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h index 3835c3295dc5..c025a746a148 100644 --- a/arch/riscv/include/asm/io.h +++ b/arch/riscv/include/asm/io.h @@ -14,6 +14,7 @@ #include <linux/types.h> #include <linux/pgtable.h> #include <asm/mmiowb.h> +#include <asm/early_ioremap.h> /* * MMIO access functions are separated out to break dependency cycles diff --git a/arch/riscv/include/asm/mmu.h b/arch/riscv/include/asm/mmu.h index 967eacb01ab5..dabcf2cfb3dc 100644 --- a/arch/riscv/include/asm/mmu.h +++ b/arch/riscv/include/asm/mmu.h @@ -20,6 +20,8 @@ typedef struct { #endif } mm_context_t; +void __init create_pgd_mapping(pgd_t *pgdp, uintptr_t va, phys_addr_t pa, + phys_addr_t sz, pgprot_t prot); #endif /* __ASSEMBLY__ */ #endif /* _ASM_RISCV_MMU_H */ diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index eaea1f717010..183f1f4b2ae6 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -100,6 +100,10 @@ #define PAGE_KERNEL __pgprot(_PAGE_KERNEL) #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL | _PAGE_EXEC) +#define PAGE_KERNEL_READ __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE) +#define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL | _PAGE_EXEC) +#define PAGE_KERNEL_READ_EXEC __pgprot((_PAGE_KERNEL & ~_PAGE_WRITE) \ + | _PAGE_EXEC) #define PAGE_TABLE __pgprot(_PAGE_TABLE) @@ -464,6 +468,7 @@ static inline void __kernel_map_pages(struct page *page, int numpages, int enabl #define kern_addr_valid(addr) (1) /* FIXME */ extern void *dtb_early_va; +extern uintptr_t dtb_early_pa; void setup_bootmem(void); void paging_init(void); diff --git a/arch/riscv/include/asm/sections.h b/arch/riscv/include/asm/sections.h new file mode 100644 index 000000000000..3a9971b1210f --- /dev/null +++ b/arch/riscv/include/asm/sections.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020 Western Digital Corporation or its affiliates. + */ +#ifndef __ASM_SECTIONS_H +#define __ASM_SECTIONS_H + +#include <asm-generic/sections.h> + +extern char _start[]; +extern char _start_kernel[]; + +#endif /* __ASM_SECTIONS_H */ diff --git a/arch/riscv/include/uapi/asm/auxvec.h b/arch/riscv/include/uapi/asm/auxvec.h index d86cb17bbabe..32c73ba1d531 100644 --- a/arch/riscv/include/uapi/asm/auxvec.h +++ b/arch/riscv/include/uapi/asm/auxvec.h @@ -10,4 +10,28 @@ /* vDSO location */ #define AT_SYSINFO_EHDR 33 +/* + * The set of entries below represent more extensive information + * about the caches, in the form of two entry per cache type, + * one entry containing the cache size in bytes, and the other + * containing the cache line size in bytes in the bottom 16 bits + * and the cache associativity in the next 16 bits. + * + * The associativity is such that if N is the 16-bit value, the + * cache is N way set associative. A value if 0xffff means fully + * associative, a value of 1 means directly mapped. + * + * For all these fields, a value of 0 means that the information + * is not known. + */ +#define AT_L1I_CACHESIZE 40 +#define AT_L1I_CACHEGEOMETRY 41 +#define AT_L1D_CACHESIZE 42 +#define AT_L1D_CACHEGEOMETRY 43 +#define AT_L2_CACHESIZE 44 +#define AT_L2_CACHEGEOMETRY 45 + +/* entries in ARCH_DLINFO */ +#define AT_VECTOR_SIZE_ARCH 7 + #endif /* _UAPI_ASM_RISCV_AUXVEC_H */ diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index dc93710f0b2f..fa896c5f7ccb 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile @@ -55,4 +55,6 @@ obj-$(CONFIG_KGDB) += kgdb.o obj-$(CONFIG_JUMP_LABEL) += jump_label.o +obj-$(CONFIG_EFI) += efi.o + clean: diff --git a/arch/riscv/kernel/cacheinfo.c b/arch/riscv/kernel/cacheinfo.c index bd0f122965c3..de59dd457b41 100644 --- a/arch/riscv/kernel/cacheinfo.c +++ b/arch/riscv/kernel/cacheinfo.c @@ -3,7 +3,6 @@ * Copyright (C) 2017 SiFive */ -#include <linux/cacheinfo.h> #include <linux/cpu.h> #include <linux/of.h> #include <linux/of_device.h> @@ -25,12 +24,84 @@ cache_get_priv_group(struct cacheinfo *this_leaf) return NULL; } -static void ci_leaf_init(struct cacheinfo *this_leaf, - struct device_node *node, - enum cache_type type, unsigned int level) +static struct cacheinfo *get_cacheinfo(u32 level, enum cache_type type) +{ + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(smp_processor_id()); + struct cacheinfo *this_leaf; + int index; + + for (index = 0; index < this_cpu_ci->num_leaves; index++) { + this_leaf = this_cpu_ci->info_list + index; + if (this_leaf->level == level && this_leaf->type == type) + return this_leaf; + } + + return NULL; +} + +uintptr_t get_cache_size(u32 level, enum cache_type type) +{ + struct cacheinfo *this_leaf = get_cacheinfo(level, type); + + return this_leaf ? this_leaf->size : 0; +} + +uintptr_t get_cache_geometry(u32 level, enum cache_type type) +{ + struct cacheinfo *this_leaf = get_cacheinfo(level, type); + + return this_leaf ? (this_leaf->ways_of_associativity << 16 | + this_leaf->coherency_line_size) : + 0; +} + +static void ci_leaf_init(struct cacheinfo *this_leaf, enum cache_type type, + unsigned int level, unsigned int size, + unsigned int sets, unsigned int line_size) { this_leaf->level = level; this_leaf->type = type; + this_leaf->size = size; + this_leaf->number_of_sets = sets; + this_leaf->coherency_line_size = line_size; + + /* + * If the cache is fully associative, there is no need to + * check the other properties. + */ + if (sets == 1) + return; + + /* + * Set the ways number for n-ways associative, make sure + * all properties are big than zero. + */ + if (sets > 0 && size > 0 && line_size > 0) + this_leaf->ways_of_associativity = (size / sets) / line_size; +} + +static void fill_cacheinfo(struct cacheinfo **this_leaf, + struct device_node *node, unsigned int level) +{ + unsigned int size, sets, line_size; + + if (!of_property_read_u32(node, "cache-size", &size) && + !of_property_read_u32(node, "cache-block-size", &line_size) && + !of_property_read_u32(node, "cache-sets", &sets)) { + ci_leaf_init((*this_leaf)++, CACHE_TYPE_UNIFIED, level, size, sets, line_size); + } + + if (!of_property_read_u32(node, "i-cache-size", &size) && + !of_property_read_u32(node, "i-cache-sets", &sets) && + !of_property_read_u32(node, "i-cache-block-size", &line_size)) { + ci_leaf_init((*this_leaf)++, CACHE_TYPE_INST, level, size, sets, line_size); + } + + if (!of_property_read_u32(node, "d-cache-size", &size) && + !of_property_read_u32(node, "d-cache-sets", &sets) && + !of_property_read_u32(node, "d-cache-block-size", &line_size)) { + ci_leaf_init((*this_leaf)++, CACHE_TYPE_DATA, level, size, sets, line_size); + } } static int __init_cache_level(unsigned int cpu) @@ -83,29 +154,24 @@ static int __populate_cache_leaves(unsigned int cpu) struct device_node *prev = NULL; int levels = 1, level = 1; - if (of_property_read_bool(np, "cache-size")) - ci_leaf_init(this_leaf++, np, CACHE_TYPE_UNIFIED, level); - if (of_property_read_bool(np, "i-cache-size")) - ci_leaf_init(this_leaf++, np, CACHE_TYPE_INST, level); - if (of_property_read_bool(np, "d-cache-size")) - ci_leaf_init(this_leaf++, np, CACHE_TYPE_DATA, level); + /* Level 1 caches in cpu node */ + fill_cacheinfo(&this_leaf, np, level); + /* Next level caches in cache nodes */ prev = np; while ((np = of_find_next_cache_node(np))) { of_node_put(prev); prev = np; + if (!of_device_is_compatible(np, "cache")) break; if (of_property_read_u32(np, "cache-level", &level)) break; if (level <= levels) break; - if (of_property_read_bool(np, "cache-size")) - ci_leaf_init(this_leaf++, np, CACHE_TYPE_UNIFIED, level); - if (of_property_read_bool(np, "i-cache-size")) - ci_leaf_init(this_leaf++, np, CACHE_TYPE_INST, level); - if (of_property_read_bool(np, "d-cache-size")) - ci_leaf_init(this_leaf++, np, CACHE_TYPE_DATA, level); + + fill_cacheinfo(&this_leaf, np, level); + levels = level; } of_node_put(np); diff --git a/arch/riscv/kernel/efi-header.S b/arch/riscv/kernel/efi-header.S new file mode 100644 index 000000000000..8e733aa48ba6 --- /dev/null +++ b/arch/riscv/kernel/efi-header.S @@ -0,0 +1,111 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020 Western Digital Corporation or its affiliates. + * Adapted from arch/arm64/kernel/efi-header.S + */ + +#include <linux/pe.h> +#include <linux/sizes.h> + + .macro __EFI_PE_HEADER + .long PE_MAGIC +coff_header: +#ifdef CONFIG_64BIT + .short IMAGE_FILE_MACHINE_RISCV64 // Machine +#else + .short IMAGE_FILE_MACHINE_RISCV32 // Machine +#endif + .short section_count // NumberOfSections + .long 0 // TimeDateStamp + .long 0 // PointerToSymbolTable + .long 0 // NumberOfSymbols + .short section_table - optional_header // SizeOfOptionalHeader + .short IMAGE_FILE_DEBUG_STRIPPED | \ + IMAGE_FILE_EXECUTABLE_IMAGE | \ + IMAGE_FILE_LINE_NUMS_STRIPPED // Characteristics + +optional_header: +#ifdef CONFIG_64BIT + .short PE_OPT_MAGIC_PE32PLUS // PE32+ format +#else + .short PE_OPT_MAGIC_PE32 // PE32 format +#endif + .byte 0x02 // MajorLinkerVersion + .byte 0x14 // MinorLinkerVersion + .long __pecoff_text_end - efi_header_end // SizeOfCode + .long __pecoff_data_virt_size // SizeOfInitializedData + .long 0 // SizeOfUninitializedData + .long __efistub_efi_pe_entry - _start // AddressOfEntryPoint + .long efi_header_end - _start // BaseOfCode +#ifdef CONFIG_32BIT + .long __pecoff_text_end - _start // BaseOfData +#endif + +extra_header_fields: + .quad 0 // ImageBase + .long PECOFF_SECTION_ALIGNMENT // SectionAlignment + .long PECOFF_FILE_ALIGNMENT // FileAlignment + .short 0 // MajorOperatingSystemVersion + .short 0 // MinorOperatingSystemVersion + .short LINUX_EFISTUB_MAJOR_VERSION // MajorImageVersion + .short LINUX_EFISTUB_MINOR_VERSION // MinorImageVersion + .short 0 // MajorSubsystemVersion + .short 0 // MinorSubsystemVersion + .long 0 // Win32VersionValue + + .long _end - _start // SizeOfImage + + // Everything before the kernel image is considered part of the header + .long efi_header_end - _start // SizeOfHeaders + .long 0 // CheckSum + .short IMAGE_SUBSYSTEM_EFI_APPLICATION // Subsystem + .short 0 // DllCharacteristics + .quad 0 // SizeOfStackReserve + .quad 0 // SizeOfStackCommit + .quad 0 // SizeOfHeapReserve + .quad 0 // SizeOfHeapCommit + .long 0 // LoaderFlags + .long (section_table - .) / 8 // NumberOfRvaAndSizes + + .quad 0 // ExportTable + .quad 0 // ImportTable + .quad 0 // ResourceTable + .quad 0 // ExceptionTable + .quad 0 // CertificationTable + .quad 0 // BaseRelocationTable + + // Section table +section_table: + .ascii ".text\0\0\0" + .long __pecoff_text_end - efi_header_end // VirtualSize + .long efi_header_end - _start // VirtualAddress + .long __pecoff_text_end - efi_header_end // SizeOfRawData + .long efi_header_end - _start // PointerToRawData + + .long 0 // PointerToRelocations + .long 0 // PointerToLineNumbers + .short 0 // NumberOfRelocations + .short 0 // NumberOfLineNumbers + .long IMAGE_SCN_CNT_CODE | \ + IMAGE_SCN_MEM_READ | \ + IMAGE_SCN_MEM_EXECUTE // Characteristics + + .ascii ".data\0\0\0" + .long __pecoff_data_virt_size // VirtualSize + .long __pecoff_text_end - _start // VirtualAddress + .long __pecoff_data_raw_size // SizeOfRawData + .long __pecoff_text_end - _start // PointerToRawData + + .long 0 // PointerToRelocations + .long 0 // PointerToLineNumbers + .short 0 // NumberOfRelocations + .short 0 // NumberOfLineNumbers + .long IMAGE_SCN_CNT_INITIALIZED_DATA | \ + IMAGE_SCN_MEM_READ | \ + IMAGE_SCN_MEM_WRITE // Characteristics + + .set section_count, (. - section_table) / 40 + + .balign 0x1000 +efi_header_end: + .endm diff --git a/arch/riscv/kernel/efi.c b/arch/riscv/kernel/efi.c new file mode 100644 index 000000000000..024159298231 --- /dev/null +++ b/arch/riscv/kernel/efi.c @@ -0,0 +1,96 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020 Western Digital Corporation or its affiliates. + * Adapted from arch/arm64/kernel/efi.c + */ + +#include <linux/efi.h> +#include <linux/init.h> + +#include <asm/efi.h> +#include <asm/pgtable.h> +#include <asm/pgtable-bits.h> + +/* + * Only regions of type EFI_RUNTIME_SERVICES_CODE need to be + * executable, everything else can be mapped with the XN bits + * set. Also take the new (optional) RO/XP bits into account. + */ +static __init pgprot_t efimem_to_pgprot_map(efi_memory_desc_t *md) +{ + u64 attr = md->attribute; + u32 type = md->type; + + if (type == EFI_MEMORY_MAPPED_IO) + return PAGE_KERNEL; + + /* R-- */ + if ((attr & (EFI_MEMORY_XP | EFI_MEMORY_RO)) == + (EFI_MEMORY_XP | EFI_MEMORY_RO)) + return PAGE_KERNEL_READ; + + /* R-X */ + if (attr & EFI_MEMORY_RO) + return PAGE_KERNEL_READ_EXEC; + + /* RW- */ + if (((attr & (EFI_MEMORY_RP | EFI_MEMORY_WP | EFI_MEMORY_XP)) == + EFI_MEMORY_XP) || + type != EFI_RUNTIME_SERVICES_CODE) + return PAGE_KERNEL; + + /* RWX */ + return PAGE_KERNEL_EXEC; +} + +int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md) +{ + pgprot_t prot = __pgprot(pgprot_val(efimem_to_pgprot_map(md)) & + ~(_PAGE_GLOBAL)); + int i; + + /* RISC-V maps one page at a time */ + for (i = 0; i < md->num_pages; i++) + create_pgd_mapping(mm->pgd, md->virt_addr + i * PAGE_SIZE, + md->phys_addr + i * PAGE_SIZE, + PAGE_SIZE, prot); + return 0; +} + +static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data) +{ + efi_memory_desc_t *md = data; + pte_t pte = READ_ONCE(*ptep); + unsigned long val; + + if (md->attribute & EFI_MEMORY_RO) { + val = pte_val(pte) & ~_PAGE_WRITE; + val = pte_val(pte) | _PAGE_READ; + pte = __pte(val); + } + if (md->attribute & EFI_MEMORY_XP) { + val = pte_val(pte) & ~_PAGE_EXEC; + pte = __pte(val); + } + set_pte(ptep, pte); + + return 0; +} + +int __init efi_set_mapping_permissions(struct mm_struct *mm, + efi_memory_desc_t *md) +{ + BUG_ON(md->type != EFI_RUNTIME_SERVICES_CODE && + md->type != EFI_RUNTIME_SERVICES_DATA); + + /* + * Calling apply_to_page_range() is only safe on regions that are + * guaranteed to be mapped down to pages. Since we are only called + * for regions that have been mapped using efi_create_mapping() above + * (and this is checked by the generic Memory Attributes table parsing + * routines), there is no need to check that again here. + */ + return apply_to_page_range(mm, md->virt_addr, + md->num_pages << EFI_PAGE_SHIFT, + set_permissions, md); +} diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S index 0a4e81b8dc79..11e2a4fe66e0 100644 --- a/arch/riscv/kernel/head.S +++ b/arch/riscv/kernel/head.S @@ -3,7 +3,6 @@ * Copyright (C) 2012 Regents of the University of California */ -#include <asm/thread_info.h> #include <asm/asm-offsets.h> #include <asm/asm.h> #include <linux/init.h> @@ -13,6 +12,7 @@ #include <asm/csr.h> #include <asm/hwcap.h> #include <asm/image.h> +#include "efi-header.S" __HEAD ENTRY(_start) @@ -22,10 +22,18 @@ ENTRY(_start) * Do not modify it without modifying the structure and all bootloaders * that expects this header format!! */ +#ifdef CONFIG_EFI + /* + * This instruction decodes to "MZ" ASCII required by UEFI. + */ + c.li s4,-13 + j _start_kernel +#else /* jump to start kernel */ j _start_kernel /* reserved */ .word 0 +#endif .balign 8 #if __riscv_xlen == 64 /* Image load offset(2MB) from start of RAM */ @@ -43,7 +51,14 @@ ENTRY(_start) .ascii RISCV_IMAGE_MAGIC .balign 4 .ascii RISCV_IMAGE_MAGIC2 +#ifdef CONFIG_EFI + .word pe_head_start - _start +pe_head_start: + + __EFI_PE_HEADER +#else .word 0 +#endif .align 2 #ifdef CONFIG_MMU @@ -259,7 +274,6 @@ clear_bss_done: #endif /* Start the kernel */ call soc_early_init - call parse_dtb tail start_kernel .Lsecondary_start: diff --git a/arch/riscv/kernel/head.h b/arch/riscv/kernel/head.h index 105fb0496b24..b48dda3d04f6 100644 --- a/arch/riscv/kernel/head.h +++ b/arch/riscv/kernel/head.h @@ -16,6 +16,4 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa); extern void *__cpu_up_stack_pointer[]; extern void *__cpu_up_task_pointer[]; -void __init parse_dtb(void); - #endif /* __ASM_HEAD_H */ diff --git a/arch/riscv/kernel/image-vars.h b/arch/riscv/kernel/image-vars.h new file mode 100644 index 000000000000..8c212efb37a6 --- /dev/null +++ b/arch/riscv/kernel/image-vars.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020 Western Digital Corporation or its affiliates. + * Linker script variables to be set after section resolution, as + * ld.lld does not like variables assigned before SECTIONS is processed. + * Based on arch/arm64/kerne/image-vars.h + */ +#ifndef __RISCV_KERNEL_IMAGE_VARS_H +#define __RISCV_KERNEL_IMAGE_VARS_H + +#ifndef LINKER_SCRIPT +#error This file should only be included in vmlinux.lds.S +#endif + +#ifdef CONFIG_EFI + +/* + * The EFI stub has its own symbol namespace prefixed by __efistub_, to + * isolate it from the kernel proper. The following symbols are legally + * accessed by the stub, so provide some aliases to make them accessible. + * Only include data symbols here, or text symbols of functions that are + * guaranteed to be safe when executed at another offset than they were + * linked at. The routines below are all implemented in assembler in a + * position independent manner + */ +__efistub_memcmp = memcmp; +__efistub_memchr = memchr; +__efistub_memcpy = memcpy; +__efistub_memmove = memmove; +__efistub_memset = memset; +__efistub_strlen = strlen; +__efistub_strnlen = strnlen; +__efistub_strcmp = strcmp; +__efistub_strncmp = strncmp; +__efistub_strrchr = strrchr; + +#ifdef CONFIG_KASAN +__efistub___memcpy = memcpy; +__efistub___memmove = memmove; +__efistub___memset = memset; +#endif + +__efistub__start = _start; +__efistub__start_kernel = _start_kernel; +__efistub__end = _end; +__efistub__edata = _edata; +__efistub_screen_info = screen_info; + +#endif + +#endif /* __RISCV_KERNEL_IMAGE_VARS_H */ diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index 2c6dd329312b..4c96ac198e14 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -17,19 +17,22 @@ #include <linux/sched/task.h> #include <linux/swiotlb.h> #include <linux/smp.h> +#include <linux/efi.h> #include <asm/cpu_ops.h> +#include <asm/early_ioremap.h> #include <asm/setup.h> #include <asm/sections.h> #include <asm/sbi.h> #include <asm/tlbflush.h> #include <asm/thread_info.h> #include <asm/kasan.h> +#include <asm/efi.h> #include "head.h" -#ifdef CONFIG_DUMMY_CONSOLE -struct screen_info screen_info = { +#if defined(CONFIG_DUMMY_CONSOLE) || defined(CONFIG_EFI) +struct screen_info screen_info __section(.data) = { .orig_video_lines = 30, .orig_video_cols = 80, .orig_video_mode = 0, @@ -48,8 +51,9 @@ atomic_t hart_lottery __section(.sdata); unsigned long boot_cpu_hartid; static DEFINE_PER_CPU(struct cpu, cpu_devices); -void __init parse_dtb(void) +static void __init parse_dtb(void) { + /* Early scan of device tree from init memory */ if (early_init_dt_scan(dtb_early_va)) return; @@ -62,6 +66,7 @@ void __init parse_dtb(void) void __init setup_arch(char **cmdline_p) { + parse_dtb(); init_mm.start_code = (unsigned long) _stext; init_mm.end_code = (unsigned long) _etext; init_mm.end_data = (unsigned long) _edata; @@ -69,14 +74,19 @@ void __init setup_arch(char **cmdline_p) *cmdline_p = boot_command_line; + early_ioremap_setup(); parse_early_param(); + efi_init(); setup_bootmem(); paging_init(); #if IS_ENABLED(CONFIG_BUILTIN_DTB) unflatten_and_copy_device_tree(); #else - unflatten_device_tree(); + if (early_init_dt_verify(__va(dtb_early_pa))) + unflatten_device_tree(); + else + pr_err("No DTB found in kernel mappings\n"); #endif #ifdef CONFIG_SWIOTLB diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S index 34d00d9e6eac..3ffbd6cbdb86 100644 --- a/arch/riscv/kernel/vmlinux.lds.S +++ b/arch/riscv/kernel/vmlinux.lds.S @@ -10,6 +10,7 @@ #include <asm/cache.h> #include <asm/thread_info.h> #include <asm/set_memory.h> +#include "image-vars.h" #include <linux/sizes.h> OUTPUT_ARCH(riscv) @@ -17,6 +18,9 @@ ENTRY(_start) jiffies = jiffies_64; +PECOFF_SECTION_ALIGNMENT = 0x1000; +PECOFF_FILE_ALIGNMENT = 0x200; + SECTIONS { /* Beginning of code and text segment */ @@ -66,6 +70,11 @@ SECTIONS _etext = .; } +#ifdef CONFIG_EFI + . = ALIGN(PECOFF_SECTION_ALIGNMENT); + __pecoff_text_end = .; +#endif + INIT_DATA_SECTION(16) /* Start of data section */ @@ -84,20 +93,31 @@ SECTIONS .sdata : { __global_pointer$ = . + 0x800; *(.sdata*) - /* End of data section */ - _edata = .; } +#ifdef CONFIG_EFI + .pecoff_edata_padding : { BYTE(0); . = ALIGN(PECOFF_FILE_ALIGNMENT); } + __pecoff_data_raw_size = ABSOLUTE(. - __pecoff_text_end); +#endif + + /* End of data section */ + _edata = .; + BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 0) .rel.dyn : { *(.rel.dyn*) } +#ifdef CONFIG_EFI + . = ALIGN(PECOFF_SECTION_ALIGNMENT); + __pecoff_data_virt_size = ABSOLUTE(. - __pecoff_text_end); +#endif _end = .; STABS_DEBUG DWARF_DEBUG + ELF_DETAILS DISCARDS } diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c index 716d64e36f83..1359e21c0c62 100644 --- a/arch/riscv/mm/fault.c +++ b/arch/riscv/mm/fault.c @@ -19,6 +19,167 @@ #include "../kernel/head.h" +static inline void no_context(struct pt_regs *regs, unsigned long addr) +{ + /* Are we prepared to handle this kernel fault? */ + if (fixup_exception(regs)) + return; + + /* + * Oops. The kernel tried to access some bad page. We'll have to + * terminate things with extreme prejudice. + */ + bust_spinlocks(1); + pr_alert("Unable to handle kernel %s at virtual address " REG_FMT "\n", + (addr < PAGE_SIZE) ? "NULL pointer dereference" : + "paging request", addr); + die(regs, "Oops"); + do_exit(SIGKILL); +} + +static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault) +{ + if (fault & VM_FAULT_OOM) { + /* + * We ran out of memory, call the OOM killer, and return the userspace + * (which will retry the fault, or kill us if we got oom-killed). + */ + if (!user_mode(regs)) { + no_context(regs, addr); + return; + } + pagefault_out_of_memory(); + return; + } else if (fault & VM_FAULT_SIGBUS) { + /* Kernel mode? Handle exceptions or die */ + if (!user_mode(regs)) { + no_context(regs, addr); + return; + } + do_trap(regs, SIGBUS, BUS_ADRERR, addr); + return; + } + BUG(); +} + +static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr) +{ + /* + * Something tried to access memory that isn't in our memory map. + * Fix it, but check if it's kernel or user first. + */ + mmap_read_unlock(mm); + /* User mode accesses just cause a SIGSEGV */ + if (user_mode(regs)) { + do_trap(regs, SIGSEGV, code, addr); + return; + } + + no_context(regs, addr); +} + +static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long addr) +{ + pgd_t *pgd, *pgd_k; + pud_t *pud, *pud_k; + p4d_t *p4d, *p4d_k; + pmd_t *pmd, *pmd_k; + pte_t *pte_k; + int index; + + /* User mode accesses just cause a SIGSEGV */ + if (user_mode(regs)) + return do_trap(regs, SIGSEGV, code, addr); + + /* + * Synchronize this task's top level page-table + * with the 'reference' page table. + * + * Do _not_ use "tsk->active_mm->pgd" here. + * We might be inside an interrupt in the middle + * of a task switch. + */ + index = pgd_index(addr); + pgd = (pgd_t *)pfn_to_virt(csr_read(CSR_SATP)) + index; + pgd_k = init_mm.pgd + index; + + if (!pgd_present(*pgd_k)) { + no_context(regs, addr); + return; + } + set_pgd(pgd, *pgd_k); + + p4d = p4d_offset(pgd, addr); + p4d_k = p4d_offset(pgd_k, addr); + if (!p4d_present(*p4d_k)) { + no_context(regs, addr); + return; + } + + pud = pud_offset(p4d, addr); + pud_k = pud_offset(p4d_k, addr); + if (!pud_present(*pud_k)) { + no_context(regs, addr); + return; + } + + /* + * Since the vmalloc area is global, it is unnecessary + * to copy individual PTEs + */ + pmd = pmd_offset(pud, addr); + pmd_k = pmd_offset(pud_k, addr); + if (!pmd_present(*pmd_k)) { + no_context(regs, addr); + return; + } + set_pmd(pmd, *pmd_k); + + /* + * Make sure the actual PTE exists as well to + * catch kernel vmalloc-area accesses to non-mapped + * addresses. If we don't do this, this will just + * silently loop forever. + */ + pte_k = pte_offset_kernel(pmd_k, addr); + if (!pte_present(*pte_k)) { + no_context(regs, addr); + return; + } + + /* + * The kernel assumes that TLBs don't cache invalid + * entries, but in RISC-V, SFENCE.VMA specifies an + * ordering constraint, not a cache flush; it is + * necessary even after writing invalid entries. + */ + local_flush_tlb_page(addr); +} + +static inline bool access_error(unsigned long cause, struct vm_area_struct *vma) +{ + switch (cause) { + case EXC_INST_PAGE_FAULT: + if (!(vma->vm_flags & VM_EXEC)) { + return true; + } + break; + case EXC_LOAD_PAGE_FAULT: + if (!(vma->vm_flags & VM_READ)) { + return true; + } + break; + case EXC_STORE_PAGE_FAULT: + if (!(vma->vm_flags & VM_WRITE)) { + return true; + } + break; + default: + panic("%s: unhandled cause %lu", __func__, cause); + } + return false; +} + /* * This routine handles page faults. It determines the address and the * problem, and then passes it off to one of the appropriate routines. @@ -48,8 +209,10 @@ asmlinkage void do_page_fault(struct pt_regs *regs) * only copy the information from the master page table, * nothing more. */ - if (unlikely((addr >= VMALLOC_START) && (addr <= VMALLOC_END))) - goto vmalloc_fault; + if (unlikely((addr >= VMALLOC_START) && (addr <= VMALLOC_END))) { + vmalloc_fault(regs, code, addr); + return; + } /* Enable interrupts if they were enabled in the parent context. */ if (likely(regs->status & SR_PIE)) @@ -59,25 +222,37 @@ asmlinkage void do_page_fault(struct pt_regs *regs) * If we're in an interrupt, have no user context, or are running * in an atomic region, then we must not take the fault. */ - if (unlikely(faulthandler_disabled() || !mm)) - goto no_context; + if (unlikely(faulthandler_disabled() || !mm)) { + no_context(regs, addr); + return; + } if (user_mode(regs)) flags |= FAULT_FLAG_USER; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); + if (cause == EXC_STORE_PAGE_FAULT) + flags |= FAULT_FLAG_WRITE; + else if (cause == EXC_INST_PAGE_FAULT) + flags |= FAULT_FLAG_INSTRUCTION; retry: mmap_read_lock(mm); vma = find_vma(mm, addr); - if (unlikely(!vma)) - goto bad_area; + if (unlikely(!vma)) { + bad_area(regs, mm, code, addr); + return; + } if (likely(vma->vm_start <= addr)) goto good_area; - if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) - goto bad_area; - if (unlikely(expand_stack(vma, addr))) - goto bad_area; + if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) { + bad_area(regs, mm, code, addr); + return; + } + if (unlikely(expand_stack(vma, addr))) { + bad_area(regs, mm, code, addr); + return; + } /* * Ok, we have a good vm_area for this memory access, so @@ -86,22 +261,9 @@ retry: good_area: code = SEGV_ACCERR; - switch (cause) { - case EXC_INST_PAGE_FAULT: - if (!(vma->vm_flags & VM_EXEC)) - goto bad_area; - break; - case EXC_LOAD_PAGE_FAULT: - if (!(vma->vm_flags & VM_READ)) - goto bad_area; - break; - case EXC_STORE_PAGE_FAULT: - if (!(vma->vm_flags & VM_WRITE)) - goto bad_area; - flags |= FAULT_FLAG_WRITE; - break; - default: - panic("%s: unhandled cause %lu", __func__, cause); + if (unlikely(access_error(cause, vma))) { + bad_area(regs, mm, code, addr); + return; } /* @@ -119,144 +281,22 @@ good_area: if (fault_signal_pending(fault, regs)) return; - if (unlikely(fault & VM_FAULT_ERROR)) { - if (fault & VM_FAULT_OOM) - goto out_of_memory; - else if (fault & VM_FAULT_SIGBUS) - goto do_sigbus; - BUG(); - } - - if (flags & FAULT_FLAG_ALLOW_RETRY) { - if (fault & VM_FAULT_RETRY) { - flags |= FAULT_FLAG_TRIED; + if (unlikely((fault & VM_FAULT_RETRY) && (flags & FAULT_FLAG_ALLOW_RETRY))) { + flags |= FAULT_FLAG_TRIED; - /* - * No need to mmap_read_unlock(mm) as we would - * have already released it in __lock_page_or_retry - * in mm/filemap.c. - */ - goto retry; - } + /* + * No need to mmap_read_unlock(mm) as we would + * have already released it in __lock_page_or_retry + * in mm/filemap.c. + */ + goto retry; } mmap_read_unlock(mm); - return; - /* - * Something tried to access memory that isn't in our memory map. - * Fix it, but check if it's kernel or user first. - */ -bad_area: - mmap_read_unlock(mm); - /* User mode accesses just cause a SIGSEGV */ - if (user_mode(regs)) { - do_trap(regs, SIGSEGV, code, addr); + if (unlikely(fault & VM_FAULT_ERROR)) { + mm_fault_error(regs, addr, fault); return; } - -no_context: - /* Are we prepared to handle this kernel fault? */ - if (fixup_exception(regs)) - return; - - /* - * Oops. The kernel tried to access some bad page. We'll have to - * terminate things with extreme prejudice. - */ - bust_spinlocks(1); - pr_alert("Unable to handle kernel %s at virtual address " REG_FMT "\n", - (addr < PAGE_SIZE) ? "NULL pointer dereference" : - "paging request", addr); - die(regs, "Oops"); - do_exit(SIGKILL); - - /* - * We ran out of memory, call the OOM killer, and return the userspace - * (which will retry the fault, or kill us if we got oom-killed). - */ -out_of_memory: - mmap_read_unlock(mm); - if (!user_mode(regs)) - goto no_context; - pagefault_out_of_memory(); - return; - -do_sigbus: - mmap_read_unlock(mm); - /* Kernel mode? Handle exceptions or die */ - if (!user_mode(regs)) - goto no_context; - do_trap(regs, SIGBUS, BUS_ADRERR, addr); return; - -vmalloc_fault: - { - pgd_t *pgd, *pgd_k; - pud_t *pud, *pud_k; - p4d_t *p4d, *p4d_k; - pmd_t *pmd, *pmd_k; - pte_t *pte_k; - int index; - - /* User mode accesses just cause a SIGSEGV */ - if (user_mode(regs)) - return do_trap(regs, SIGSEGV, code, addr); - - /* - * Synchronize this task's top level page-table - * with the 'reference' page table. - * - * Do _not_ use "tsk->active_mm->pgd" here. - * We might be inside an interrupt in the middle - * of a task switch. - */ - index = pgd_index(addr); - pgd = (pgd_t *)pfn_to_virt(csr_read(CSR_SATP)) + index; - pgd_k = init_mm.pgd + index; - - if (!pgd_present(*pgd_k)) - goto no_context; - set_pgd(pgd, *pgd_k); - - p4d = p4d_offset(pgd, addr); - p4d_k = p4d_offset(pgd_k, addr); - if (!p4d_present(*p4d_k)) - goto no_context; - - pud = pud_offset(p4d, addr); - pud_k = pud_offset(p4d_k, addr); - if (!pud_present(*pud_k)) - goto no_context; - - /* - * Since the vmalloc area is global, it is unnecessary - * to copy individual PTEs - */ - pmd = pmd_offset(pud, addr); - pmd_k = pmd_offset(pud_k, addr); - if (!pmd_present(*pmd_k)) - goto no_context; - set_pmd(pmd, *pmd_k); - - /* - * Make sure the actual PTE exists as well to - * catch kernel vmalloc-area accesses to non-mapped - * addresses. If we don't do this, this will just - * silently loop forever. - */ - pte_k = pte_offset_kernel(pmd_k, addr); - if (!pte_present(*pte_k)) - goto no_context; - - /* - * The kernel assumes that TLBs don't cache invalid - * entries, but in RISC-V, SFENCE.VMA specifies an - * ordering constraint, not a cache flush; it is - * necessary even after writing invalid entries. - */ - local_flush_tlb_page(addr); - - return; - } } diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index f750e012dbe5..ea933b789a88 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -28,7 +28,18 @@ unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] EXPORT_SYMBOL(empty_zero_page); extern char _start[]; -void *dtb_early_va; +#define DTB_EARLY_BASE_VA PGDIR_SIZE +void *dtb_early_va __initdata; +uintptr_t dtb_early_pa __initdata; + +struct pt_alloc_ops { + pte_t *(*get_pte_virt)(phys_addr_t pa); + phys_addr_t (*alloc_pte)(uintptr_t va); +#ifndef __PAGETABLE_PMD_FOLDED + pmd_t *(*get_pmd_virt)(phys_addr_t pa); + phys_addr_t (*alloc_pmd)(uintptr_t va); +#endif +}; static void __init zone_sizes_init(void) { @@ -141,25 +152,23 @@ disable: } #endif /* CONFIG_BLK_DEV_INITRD */ -static phys_addr_t dtb_early_pa __initdata; - void __init setup_bootmem(void) { - struct memblock_region *reg; phys_addr_t mem_size = 0; phys_addr_t total_mem = 0; - phys_addr_t mem_start, end = 0; + phys_addr_t mem_start, start, end = 0; phys_addr_t vmlinux_end = __pa_symbol(&_end); phys_addr_t vmlinux_start = __pa_symbol(&_start); + u64 i; /* Find the memory region containing the kernel */ - for_each_memblock(memory, reg) { - end = reg->base + reg->size; + for_each_mem_range(i, &start, &end) { + phys_addr_t size = end - start; if (!total_mem) - mem_start = reg->base; - if (reg->base <= vmlinux_start && vmlinux_end <= end) - BUG_ON(reg->size == 0); - total_mem = total_mem + reg->size; + mem_start = start; + if (start <= vmlinux_start && vmlinux_end <= end) + BUG_ON(size == 0); + total_mem = total_mem + size; } /* @@ -191,18 +200,11 @@ void __init setup_bootmem(void) early_init_fdt_scan_reserved_mem(); memblock_allow_resize(); memblock_dump_all(); - - for_each_memblock(memory, reg) { - unsigned long start_pfn = memblock_region_memory_base_pfn(reg); - unsigned long end_pfn = memblock_region_memory_end_pfn(reg); - - memblock_set_node(PFN_PHYS(start_pfn), - PFN_PHYS(end_pfn - start_pfn), - &memblock.memory, 0); - } } #ifdef CONFIG_MMU +static struct pt_alloc_ops pt_ops; + unsigned long va_pa_offset; EXPORT_SYMBOL(va_pa_offset); unsigned long pfn_base; @@ -211,7 +213,6 @@ EXPORT_SYMBOL(pfn_base); pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss; pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss; pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss; -static bool mmu_enabled; #define MAX_EARLY_MAPPING_SIZE SZ_128M @@ -233,27 +234,46 @@ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot) local_flush_tlb_page(addr); } -static pte_t *__init get_pte_virt(phys_addr_t pa) +static inline pte_t *__init get_pte_virt_early(phys_addr_t pa) { - if (mmu_enabled) { - clear_fixmap(FIX_PTE); - return (pte_t *)set_fixmap_offset(FIX_PTE, pa); - } else { - return (pte_t *)((uintptr_t)pa); - } + return (pte_t *)((uintptr_t)pa); +} + +static inline pte_t *__init get_pte_virt_fixmap(phys_addr_t pa) +{ + clear_fixmap(FIX_PTE); + return (pte_t *)set_fixmap_offset(FIX_PTE, pa); +} + +static inline pte_t *get_pte_virt_late(phys_addr_t pa) +{ + return (pte_t *) __va(pa); } -static phys_addr_t __init alloc_pte(uintptr_t va) +static inline phys_addr_t __init alloc_pte_early(uintptr_t va) { /* * We only create PMD or PGD early mappings so we * should never reach here with MMU disabled. */ - BUG_ON(!mmu_enabled); + BUG(); +} +static inline phys_addr_t __init alloc_pte_fixmap(uintptr_t va) +{ return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); } +static phys_addr_t alloc_pte_late(uintptr_t va) +{ + unsigned long vaddr; + + vaddr = __get_free_page(GFP_KERNEL); + if (!vaddr || !pgtable_pte_page_ctor(virt_to_page(vaddr))) + BUG(); + return __pa(vaddr); +} + static void __init create_pte_mapping(pte_t *ptep, uintptr_t va, phys_addr_t pa, phys_addr_t sz, pgprot_t prot) @@ -278,28 +298,46 @@ pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss; #endif pmd_t early_pmd[PTRS_PER_PMD * NUM_EARLY_PMDS] __initdata __aligned(PAGE_SIZE); -static pmd_t *__init get_pmd_virt(phys_addr_t pa) +static pmd_t *__init get_pmd_virt_early(phys_addr_t pa) { - if (mmu_enabled) { - clear_fixmap(FIX_PMD); - return (pmd_t *)set_fixmap_offset(FIX_PMD, pa); - } else { - return (pmd_t *)((uintptr_t)pa); - } + /* Before MMU is enabled */ + return (pmd_t *)((uintptr_t)pa); } -static phys_addr_t __init alloc_pmd(uintptr_t va) +static pmd_t *__init get_pmd_virt_fixmap(phys_addr_t pa) { - uintptr_t pmd_num; + clear_fixmap(FIX_PMD); + return (pmd_t *)set_fixmap_offset(FIX_PMD, pa); +} - if (mmu_enabled) - return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); +static pmd_t *get_pmd_virt_late(phys_addr_t pa) +{ + return (pmd_t *) __va(pa); +} + +static phys_addr_t __init alloc_pmd_early(uintptr_t va) +{ + uintptr_t pmd_num; pmd_num = (va - PAGE_OFFSET) >> PGDIR_SHIFT; BUG_ON(pmd_num >= NUM_EARLY_PMDS); return (uintptr_t)&early_pmd[pmd_num * PTRS_PER_PMD]; } +static phys_addr_t __init alloc_pmd_fixmap(uintptr_t va) +{ + return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); +} + +static phys_addr_t alloc_pmd_late(uintptr_t va) +{ + unsigned long vaddr; + + vaddr = __get_free_page(GFP_KERNEL); + BUG_ON(!vaddr); + return __pa(vaddr); +} + static void __init create_pmd_mapping(pmd_t *pmdp, uintptr_t va, phys_addr_t pa, phys_addr_t sz, pgprot_t prot) @@ -315,34 +353,34 @@ static void __init create_pmd_mapping(pmd_t *pmdp, } if (pmd_none(pmdp[pmd_idx])) { - pte_phys = alloc_pte(va); + pte_phys = pt_ops.alloc_pte(va); pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pte_phys), PAGE_TABLE); - ptep = get_pte_virt(pte_phys); + ptep = pt_ops.get_pte_virt(pte_phys); memset(ptep, 0, PAGE_SIZE); } else { pte_phys = PFN_PHYS(_pmd_pfn(pmdp[pmd_idx])); - ptep = get_pte_virt(pte_phys); + ptep = pt_ops.get_pte_virt(pte_phys); } create_pte_mapping(ptep, va, pa, sz, prot); } #define pgd_next_t pmd_t -#define alloc_pgd_next(__va) alloc_pmd(__va) -#define get_pgd_next_virt(__pa) get_pmd_virt(__pa) +#define alloc_pgd_next(__va) pt_ops.alloc_pmd(__va) +#define get_pgd_next_virt(__pa) pt_ops.get_pmd_virt(__pa) #define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot) \ create_pmd_mapping(__nextp, __va, __pa, __sz, __prot) #define fixmap_pgd_next fixmap_pmd #else #define pgd_next_t pte_t -#define alloc_pgd_next(__va) alloc_pte(__va) -#define get_pgd_next_virt(__pa) get_pte_virt(__pa) +#define alloc_pgd_next(__va) pt_ops.alloc_pte(__va) +#define get_pgd_next_virt(__pa) pt_ops.get_pte_virt(__pa) #define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot) \ create_pte_mapping(__nextp, __va, __pa, __sz, __prot) #define fixmap_pgd_next fixmap_pte #endif -static void __init create_pgd_mapping(pgd_t *pgdp, +void __init create_pgd_mapping(pgd_t *pgdp, uintptr_t va, phys_addr_t pa, phys_addr_t sz, pgprot_t prot) { @@ -398,10 +436,13 @@ static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size) asmlinkage void __init setup_vm(uintptr_t dtb_pa) { - uintptr_t va, end_va; + uintptr_t va, pa, end_va; uintptr_t load_pa = (uintptr_t)(&_start); uintptr_t load_sz = (uintptr_t)(&_end) - load_pa; uintptr_t map_size = best_map_size(load_pa, MAX_EARLY_MAPPING_SIZE); +#ifndef __PAGETABLE_PMD_FOLDED + pmd_t fix_bmap_spmd, fix_bmap_epmd; +#endif va_pa_offset = PAGE_OFFSET - load_pa; pfn_base = PFN_DOWN(load_pa); @@ -417,6 +458,12 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) BUG_ON((load_pa % map_size) != 0); BUG_ON(load_sz > MAX_EARLY_MAPPING_SIZE); + pt_ops.alloc_pte = alloc_pte_early; + pt_ops.get_pte_virt = get_pte_virt_early; +#ifndef __PAGETABLE_PMD_FOLDED + pt_ops.alloc_pmd = alloc_pmd_early; + pt_ops.get_pmd_virt = get_pmd_virt_early; +#endif /* Setup early PGD for fixmap */ create_pgd_mapping(early_pg_dir, FIXADDR_START, (uintptr_t)fixmap_pgd_next, PGDIR_SIZE, PAGE_TABLE); @@ -447,42 +494,71 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) load_pa + (va - PAGE_OFFSET), map_size, PAGE_KERNEL_EXEC); - /* Create fixed mapping for early FDT parsing */ - end_va = __fix_to_virt(FIX_FDT) + FIX_FDT_SIZE; - for (va = __fix_to_virt(FIX_FDT); va < end_va; va += PAGE_SIZE) - create_pte_mapping(fixmap_pte, va, - dtb_pa + (va - __fix_to_virt(FIX_FDT)), - PAGE_SIZE, PAGE_KERNEL); - - /* Save pointer to DTB for early FDT parsing */ - dtb_early_va = (void *)fix_to_virt(FIX_FDT) + (dtb_pa & ~PAGE_MASK); - /* Save physical address for memblock reservation */ + /* Create two consecutive PGD mappings for FDT early scan */ + pa = dtb_pa & ~(PGDIR_SIZE - 1); + create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA, + pa, PGDIR_SIZE, PAGE_KERNEL); + create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA + PGDIR_SIZE, + pa + PGDIR_SIZE, PGDIR_SIZE, PAGE_KERNEL); + dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PGDIR_SIZE - 1)); dtb_early_pa = dtb_pa; + + /* + * Bootime fixmap only can handle PMD_SIZE mapping. Thus, boot-ioremap + * range can not span multiple pmds. + */ + BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT) + != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT)); + +#ifndef __PAGETABLE_PMD_FOLDED + /* + * Early ioremap fixmap is already created as it lies within first 2MB + * of fixmap region. We always map PMD_SIZE. Thus, both FIX_BTMAP_END + * FIX_BTMAP_BEGIN should lie in the same pmd. Verify that and warn + * the user if not. + */ + fix_bmap_spmd = fixmap_pmd[pmd_index(__fix_to_virt(FIX_BTMAP_BEGIN))]; + fix_bmap_epmd = fixmap_pmd[pmd_index(__fix_to_virt(FIX_BTMAP_END))]; + if (pmd_val(fix_bmap_spmd) != pmd_val(fix_bmap_epmd)) { + WARN_ON(1); + pr_warn("fixmap btmap start [%08lx] != end [%08lx]\n", + pmd_val(fix_bmap_spmd), pmd_val(fix_bmap_epmd)); + pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", + fix_to_virt(FIX_BTMAP_BEGIN)); + pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n", + fix_to_virt(FIX_BTMAP_END)); + + pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END); + pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN); + } +#endif } static void __init setup_vm_final(void) { uintptr_t va, map_size; phys_addr_t pa, start, end; - struct memblock_region *reg; - - /* Set mmu_enabled flag */ - mmu_enabled = true; + u64 i; + /** + * MMU is enabled at this point. But page table setup is not complete yet. + * fixmap page table alloc functions should be used at this point + */ + pt_ops.alloc_pte = alloc_pte_fixmap; + pt_ops.get_pte_virt = get_pte_virt_fixmap; +#ifndef __PAGETABLE_PMD_FOLDED + pt_ops.alloc_pmd = alloc_pmd_fixmap; + pt_ops.get_pmd_virt = get_pmd_virt_fixmap; +#endif /* Setup swapper PGD for fixmap */ create_pgd_mapping(swapper_pg_dir, FIXADDR_START, __pa_symbol(fixmap_pgd_next), PGDIR_SIZE, PAGE_TABLE); /* Map all memory banks */ - for_each_memblock(memory, reg) { - start = reg->base; - end = start + reg->size; - + for_each_mem_range(i, &start, &end) { if (start >= end) break; - if (memblock_is_nomap(reg)) - continue; if (start <= __pa(PAGE_OFFSET) && __pa(PAGE_OFFSET) < end) start = __pa(PAGE_OFFSET); @@ -502,6 +578,14 @@ static void __init setup_vm_final(void) /* Move to swapper page table */ csr_write(CSR_SATP, PFN_DOWN(__pa_symbol(swapper_pg_dir)) | SATP_MODE); local_flush_tlb_all(); + + /* generic page allocation functions must be used to setup page table */ + pt_ops.alloc_pte = alloc_pte_late; + pt_ops.get_pte_virt = get_pte_virt_late; +#ifndef __PAGETABLE_PMD_FOLDED + pt_ops.alloc_pmd = alloc_pmd_late; + pt_ops.get_pmd_virt = get_pmd_virt_late; +#endif } #else asmlinkage void __init setup_vm(uintptr_t dtb_pa) @@ -545,7 +629,7 @@ static void __init resource_init(void) { struct memblock_region *region; - for_each_memblock(memory, region) { + for_each_mem_region(region) { struct resource *res; res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES); diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c index 87b4ab3d3c77..12ddd1f6bf70 100644 --- a/arch/riscv/mm/kasan_init.c +++ b/arch/riscv/mm/kasan_init.c @@ -85,16 +85,16 @@ static void __init populate(void *start, void *end) void __init kasan_init(void) { - struct memblock_region *reg; - unsigned long i; + phys_addr_t _start, _end; + u64 i; kasan_populate_early_shadow((void *)KASAN_SHADOW_START, (void *)kasan_mem_to_shadow((void *) VMALLOC_END)); - for_each_memblock(memory, reg) { - void *start = (void *)__va(reg->base); - void *end = (void *)__va(reg->base + reg->size); + for_each_mem_range(i, &_start, &_end) { + void *start = (void *)_start; + void *end = (void *)_end; if (start >= end) break; diff --git a/arch/riscv/mm/ptdump.c b/arch/riscv/mm/ptdump.c index 0831c2e61a8f..ace74dec7492 100644 --- a/arch/riscv/mm/ptdump.c +++ b/arch/riscv/mm/ptdump.c @@ -3,6 +3,7 @@ * Copyright (C) 2019 SiFive */ +#include <linux/efi.h> #include <linux/init.h> #include <linux/debugfs.h> #include <linux/seq_file.h> @@ -49,6 +50,14 @@ struct addr_marker { const char *name; }; +/* Private information for debugfs */ +struct ptd_mm_info { + struct mm_struct *mm; + const struct addr_marker *markers; + unsigned long base_addr; + unsigned long end; +}; + static struct addr_marker address_markers[] = { #ifdef CONFIG_KASAN {KASAN_SHADOW_START, "Kasan shadow start"}, @@ -68,6 +77,28 @@ static struct addr_marker address_markers[] = { {-1, NULL}, }; +static struct ptd_mm_info kernel_ptd_info = { + .mm = &init_mm, + .markers = address_markers, + .base_addr = KERN_VIRT_START, + .end = ULONG_MAX, +}; + +#ifdef CONFIG_EFI +static struct addr_marker efi_addr_markers[] = { + { 0, "UEFI runtime start" }, + { SZ_1G, "UEFI runtime end" }, + { -1, NULL } +}; + +static struct ptd_mm_info efi_ptd_info = { + .mm = &efi_mm, + .markers = efi_addr_markers, + .base_addr = 0, + .end = SZ_2G, +}; +#endif + /* Page Table Entry */ struct prot_bits { u64 mask; @@ -245,22 +276,22 @@ static void note_page(struct ptdump_state *pt_st, unsigned long addr, } } -static void ptdump_walk(struct seq_file *s) +static void ptdump_walk(struct seq_file *s, struct ptd_mm_info *pinfo) { struct pg_state st = { .seq = s, - .marker = address_markers, + .marker = pinfo->markers, .level = -1, .ptdump = { .note_page = note_page, .range = (struct ptdump_range[]) { - {KERN_VIRT_START, ULONG_MAX}, + {pinfo->base_addr, pinfo->end}, {0, 0} } } }; - ptdump_walk_pgd(&st.ptdump, &init_mm, NULL); + ptdump_walk_pgd(&st.ptdump, pinfo->mm, NULL); } void ptdump_check_wx(void) @@ -293,7 +324,7 @@ void ptdump_check_wx(void) static int ptdump_show(struct seq_file *m, void *v) { - ptdump_walk(m); + ptdump_walk(m, m->private); return 0; } @@ -308,8 +339,13 @@ static int ptdump_init(void) for (j = 0; j < ARRAY_SIZE(pte_bits); j++) pg_level[i].mask |= pte_bits[j].mask; - debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, + debugfs_create_file("kernel_page_tables", 0400, NULL, &kernel_ptd_info, &ptdump_fops); +#ifdef CONFIG_EFI + if (efi_enabled(EFI_RUNTIME_SERVICES)) + debugfs_create_file("efi_page_tables", 0400, NULL, &efi_ptd_info, + &ptdump_fops); +#endif return 0; } diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index b29fcc66ec39..4a00351dec89 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -60,6 +60,7 @@ config S390 def_bool y select ARCH_BINFMT_ELF_STATE select ARCH_HAS_DEBUG_VM_PGTABLE + select ARCH_HAS_DEBUG_WX select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_FORTIFY_SOURCE @@ -73,6 +74,7 @@ config S390 select ARCH_HAS_STRICT_MODULE_RWX select ARCH_HAS_SYSCALL_WRAPPER select ARCH_HAS_UBSAN_SANITIZE_ALL + select ARCH_HAS_VDSO_DATA select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_INLINE_READ_LOCK select ARCH_INLINE_READ_LOCK_BH @@ -118,6 +120,8 @@ config S390 select GENERIC_CPU_AUTOPROBE select GENERIC_CPU_VULNERABILITIES select GENERIC_FIND_FIRST_BIT + select GENERIC_GETTIMEOFDAY + select GENERIC_PTDUMP select GENERIC_SMP_IDLE_THREAD select GENERIC_TIME_VSYSCALL select HAVE_ALIGNED_STRUCT_PAGE if SLUB @@ -149,6 +153,7 @@ config S390 select HAVE_FUNCTION_TRACER select HAVE_FUTEX_CMPXCHG if FUTEX select HAVE_GCC_PLUGINS + select HAVE_GENERIC_VDSO select HAVE_KERNEL_BZIP2 select HAVE_KERNEL_GZIP select HAVE_KERNEL_LZ4 @@ -185,6 +190,7 @@ config S390 select OLD_SIGSUSPEND3 select PCI_DOMAINS if PCI select PCI_MSI if PCI + select PCI_MSI_ARCH_FALLBACKS if PCI_MSI select SPARSE_IRQ select SYSCTL_EXCEPTION_TRACE select THREAD_INFO_IN_TASK @@ -791,23 +797,6 @@ config CRASH_DUMP endmenu -config SECCOMP - def_bool y - prompt "Enable seccomp to safely compute untrusted bytecode" - depends on PROC_FS - help - This kernel feature is useful for number crunching applications - that may need to compute untrusted bytecode during their - execution. By using pipes or other transports made available to - the process as file descriptors supporting the read/write - syscalls, it's possible to isolate those applications in - their own address space using seccomp. Once seccomp is - enabled via /proc/<pid>/seccomp, it cannot be disabled - and the task is only allowed to execute a few safe syscalls - defined by each seccomp mode. - - If unsure, say Y. - config CCW def_bool y diff --git a/arch/s390/Kconfig.debug b/arch/s390/Kconfig.debug index 761fe2b0b2f6..ab48b694ade8 100644 --- a/arch/s390/Kconfig.debug +++ b/arch/s390/Kconfig.debug @@ -3,17 +3,5 @@ config TRACE_IRQFLAGS_SUPPORT def_bool y -config S390_PTDUMP - bool "Export kernel pagetable layout to userspace via debugfs" - depends on DEBUG_KERNEL - select DEBUG_FS - help - Say Y here if you want to show the kernel pagetable layout in a - debugfs file. This information is only useful for kernel developers - who are working in architecture specific areas of the kernel. - It is probably not a good idea to enable this feature in a production - kernel. - If in doubt, say "N" - config EARLY_PRINTK def_bool y diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile index 45b33b83de08..41a64b8dce25 100644 --- a/arch/s390/boot/Makefile +++ b/arch/s390/boot/Makefile @@ -73,7 +73,3 @@ $(obj)/startup.a: $(OBJECTS) FORCE install: sh -x $(srctree)/$(obj)/install.sh $(KERNELRELEASE) $(obj)/bzImage \ System.map "$(INSTALL_PATH)" - -chkbss := $(obj-y) -chkbss-target := startup.a -include $(srctree)/arch/s390/scripts/Makefile.chkbss diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile index fa529c5b4486..b235ed95a3d8 100644 --- a/arch/s390/boot/compressed/Makefile +++ b/arch/s390/boot/compressed/Makefile @@ -62,7 +62,3 @@ $(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y) FORCE OBJCOPYFLAGS_piggy.o := -I binary -O elf64-s390 -B s390:64-bit --rename-section .data=.vmlinux.bin.compressed $(obj)/piggy.o: $(obj)/vmlinux.bin$(suffix-y) FORCE $(call if_changed,objcopy) - -chkbss := $(filter-out piggy.o info.o, $(obj-y)) -chkbss-target := vmlinux.bin -include $(srctree)/arch/s390/scripts/Makefile.chkbss diff --git a/arch/s390/boot/compressed/decompressor.c b/arch/s390/boot/compressed/decompressor.c index 368fd372c875..3061b11c4d27 100644 --- a/arch/s390/boot/compressed/decompressor.c +++ b/arch/s390/boot/compressed/decompressor.c @@ -16,7 +16,6 @@ * gzip declarations */ #define STATIC static -#define STATIC_RW_DATA static __section(.data) #undef memset #undef memcpy diff --git a/arch/s390/boot/compressed/vmlinux.lds.S b/arch/s390/boot/compressed/vmlinux.lds.S index 44561b2c3712..9427e2cd0c15 100644 --- a/arch/s390/boot/compressed/vmlinux.lds.S +++ b/arch/s390/boot/compressed/vmlinux.lds.S @@ -59,6 +59,19 @@ SECTIONS BOOT_DATA_PRESERVED /* + * This is the BSS section of the decompressor and not of the decompressed Linux kernel. + * It will consume place in the decompressor's image. + */ + . = ALIGN(8); + .bss : { + _bss = . ; + *(.bss) + *(.bss.*) + *(COMMON) + _ebss = .; + } + + /* * uncompressed image info used by the decompressor it should match * struct vmlinux_info. It comes from .vmlinux.info section of * uncompressed vmlinux in a form of info.o @@ -81,15 +94,6 @@ SECTIONS FILL(0xff); . = ALIGN(4096); } - . = ALIGN(256); - .bss : { - _bss = . ; - *(.bss) - *(.bss.*) - *(COMMON) - . = ALIGN(8); /* For convenience during zeroing */ - _ebss = .; - } _end = .; /* Sections to be discarded */ diff --git a/arch/s390/boot/head.S b/arch/s390/boot/head.S index dae10961d072..1a2c2b1ed964 100644 --- a/arch/s390/boot/head.S +++ b/arch/s390/boot/head.S @@ -360,22 +360,23 @@ ENTRY(startup_kdump) # the save area and does disabled wait with a faulty address. # ENTRY(startup_pgm_check_handler) - stmg %r0,%r15,__LC_SAVE_AREA_SYNC - la %r1,4095 - stctg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r1) - mvc __LC_GPREGS_SAVE_AREA-4095(128,%r1),__LC_SAVE_AREA_SYNC - mvc __LC_PSW_SAVE_AREA-4095(16,%r1),__LC_PGM_OLD_PSW + stmg %r8,%r15,__LC_SAVE_AREA_SYNC + la %r8,4095 + stctg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r8) + stmg %r0,%r7,__LC_GPREGS_SAVE_AREA-4095(%r8) + mvc __LC_GPREGS_SAVE_AREA-4095+64(64,%r8),__LC_SAVE_AREA_SYNC + mvc __LC_PSW_SAVE_AREA-4095(16,%r8),__LC_PGM_OLD_PSW mvc __LC_RETURN_PSW(16),__LC_PGM_OLD_PSW ni __LC_RETURN_PSW,0xfc # remove IO and EX bits ni __LC_RETURN_PSW+1,0xfb # remove MCHK bit oi __LC_RETURN_PSW+1,0x2 # set wait state bit - larl %r2,.Lold_psw_disabled_wait - stg %r2,__LC_PGM_NEW_PSW+8 - l %r15,.Ldump_info_stack-.Lold_psw_disabled_wait(%r2) + larl %r9,.Lold_psw_disabled_wait + stg %r9,__LC_PGM_NEW_PSW+8 + l %r15,.Ldump_info_stack-.Lold_psw_disabled_wait(%r9) brasl %r14,print_pgm_check_info .Lold_psw_disabled_wait: - la %r1,4095 - lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1) + la %r8,4095 + lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r8) lpswe __LC_RETURN_PSW # disabled wait .Ldump_info_stack: .long 0x5000 + PAGE_SIZE - STACK_FRAME_OVERHEAD diff --git a/arch/s390/boot/ipl_parm.c b/arch/s390/boot/ipl_parm.c index 8e222a666025..f94b91d72620 100644 --- a/arch/s390/boot/ipl_parm.c +++ b/arch/s390/boot/ipl_parm.c @@ -21,7 +21,7 @@ unsigned long __bootdata(memory_end); int __bootdata(memory_end_set); int __bootdata(noexec_disabled); -int kaslr_enabled __section(.data); +int kaslr_enabled; static inline int __diag308(unsigned long subcode, void *addr) { @@ -70,30 +70,44 @@ static size_t scpdata_length(const u8 *buf, size_t count) static size_t ipl_block_get_ascii_scpdata(char *dest, size_t size, const struct ipl_parameter_block *ipb) { - size_t count; - size_t i; + const __u8 *scp_data; + __u32 scp_data_len; int has_lowercase; + size_t count = 0; + size_t i; + + switch (ipb->pb0_hdr.pbt) { + case IPL_PBT_FCP: + scp_data_len = ipb->fcp.scp_data_len; + scp_data = ipb->fcp.scp_data; + break; + case IPL_PBT_NVME: + scp_data_len = ipb->nvme.scp_data_len; + scp_data = ipb->nvme.scp_data; + break; + default: + goto out; + } - count = min(size - 1, scpdata_length(ipb->fcp.scp_data, - ipb->fcp.scp_data_len)); + count = min(size - 1, scpdata_length(scp_data, scp_data_len)); if (!count) goto out; has_lowercase = 0; for (i = 0; i < count; i++) { - if (!isascii(ipb->fcp.scp_data[i])) { + if (!isascii(scp_data[i])) { count = 0; goto out; } - if (!has_lowercase && islower(ipb->fcp.scp_data[i])) + if (!has_lowercase && islower(scp_data[i])) has_lowercase = 1; } if (has_lowercase) - memcpy(dest, ipb->fcp.scp_data, count); + memcpy(dest, scp_data, count); else for (i = 0; i < count; i++) - dest[i] = tolower(ipb->fcp.scp_data[i]); + dest[i] = tolower(scp_data[i]); out: dest[count] = '\0'; return count; @@ -115,6 +129,7 @@ static void append_ipl_block_parm(void) parm, COMMAND_LINE_SIZE - len - 1, &ipl_block); break; case IPL_PBT_FCP: + case IPL_PBT_NVME: rc = ipl_block_get_ascii_scpdata( parm, COMMAND_LINE_SIZE - len - 1, &ipl_block); break; @@ -209,7 +224,7 @@ static void modify_fac_list(char *str) check_cleared_facilities(); } -static char command_line_buf[COMMAND_LINE_SIZE] __section(.data); +static char command_line_buf[COMMAND_LINE_SIZE]; void parse_boot_command_line(void) { char *param, *val; @@ -230,7 +245,7 @@ void parse_boot_command_line(void) if (!strcmp(param, "vmalloc") && val) vmalloc_size = round_up(memparse(val, NULL), PAGE_SIZE); - if (!strcmp(param, "dfltcc")) { + if (!strcmp(param, "dfltcc") && val) { if (!strcmp(val, "off")) zlib_dfltcc_support = ZLIB_DFLTCC_DISABLED; else if (!strcmp(val, "on")) @@ -254,17 +269,34 @@ void parse_boot_command_line(void) if (!strcmp(param, "nokaslr")) kaslr_enabled = 0; + +#if IS_ENABLED(CONFIG_KVM) + if (!strcmp(param, "prot_virt")) { + rc = kstrtobool(val, &enabled); + if (!rc && enabled) + prot_virt_host = 1; + } +#endif } } +static inline bool is_ipl_block_dump(void) +{ + if (ipl_block.pb0_hdr.pbt == IPL_PBT_FCP && + ipl_block.fcp.opt == IPL_PB0_FCP_OPT_DUMP) + return true; + if (ipl_block.pb0_hdr.pbt == IPL_PBT_NVME && + ipl_block.nvme.opt == IPL_PB0_NVME_OPT_DUMP) + return true; + return false; +} + void setup_memory_end(void) { #ifdef CONFIG_CRASH_DUMP if (OLDMEM_BASE) { kaslr_enabled = 0; - } else if (ipl_block_valid && - ipl_block.pb0_hdr.pbt == IPL_PBT_FCP && - ipl_block.fcp.opt == IPL_PB0_FCP_OPT_DUMP) { + } else if (ipl_block_valid && is_ipl_block_dump()) { kaslr_enabled = 0; if (!sclp_early_get_hsa_size(&memory_end) && memory_end) memory_end_set = 1; diff --git a/arch/s390/boot/kaslr.c b/arch/s390/boot/kaslr.c index d4442163ffa9..d844a5ef9089 100644 --- a/arch/s390/boot/kaslr.c +++ b/arch/s390/boot/kaslr.c @@ -42,7 +42,7 @@ static int check_prng(void) return PRNG_MODE_TDES; } -static unsigned long get_random(unsigned long limit) +static int get_random(unsigned long limit, unsigned long *value) { struct prng_parm prng = { /* initial parameter block for tdes mode, copied from libica */ @@ -84,19 +84,101 @@ static unsigned long get_random(unsigned long limit) (u8 *) &random, sizeof(random)); break; default: - random = 0; + return -1; } - return random % limit; + *value = random % limit; + return 0; +} + +/* + * To randomize kernel base address we have to consider several facts: + * 1. physical online memory might not be continuous and have holes. mem_detect + * info contains list of online memory ranges we should consider. + * 2. we have several memory regions which are occupied and we should not + * overlap and destroy them. Currently safe_addr tells us the border below + * which all those occupied regions are. We are safe to use anything above + * safe_addr. + * 3. the upper limit might apply as well, even if memory above that limit is + * online. Currently those limitations are: + * 3.1. Limit set by "mem=" kernel command line option + * 3.2. memory reserved at the end for kasan initialization. + * 4. kernel base address must be aligned to THREAD_SIZE (kernel stack size). + * Which is required for CONFIG_CHECK_STACK. Currently THREAD_SIZE is 4 pages + * (16 pages when the kernel is built with kasan enabled) + * Assumptions: + * 1. kernel size (including .bss size) and upper memory limit are page aligned. + * 2. mem_detect memory region start is THREAD_SIZE aligned / end is PAGE_SIZE + * aligned (in practice memory configurations granularity on z/VM and LPAR + * is 1mb). + * + * To guarantee uniform distribution of kernel base address among all suitable + * addresses we generate random value just once. For that we need to build a + * continuous range in which every value would be suitable. We can build this + * range by simply counting all suitable addresses (let's call them positions) + * which would be valid as kernel base address. To count positions we iterate + * over online memory ranges. For each range which is big enough for the + * kernel image we count all suitable addresses we can put the kernel image at + * that is + * (end - start - kernel_size) / THREAD_SIZE + 1 + * Two functions count_valid_kernel_positions and position_to_address help + * to count positions in memory range given and then convert position back + * to address. + */ +static unsigned long count_valid_kernel_positions(unsigned long kernel_size, + unsigned long _min, + unsigned long _max) +{ + unsigned long start, end, pos = 0; + int i; + + for_each_mem_detect_block(i, &start, &end) { + if (_min >= end) + continue; + if (start >= _max) + break; + start = max(_min, start); + end = min(_max, end); + if (end - start < kernel_size) + continue; + pos += (end - start - kernel_size) / THREAD_SIZE + 1; + } + + return pos; +} + +static unsigned long position_to_address(unsigned long pos, unsigned long kernel_size, + unsigned long _min, unsigned long _max) +{ + unsigned long start, end; + int i; + + for_each_mem_detect_block(i, &start, &end) { + if (_min >= end) + continue; + if (start >= _max) + break; + start = max(_min, start); + end = min(_max, end); + if (end - start < kernel_size) + continue; + if ((end - start - kernel_size) / THREAD_SIZE + 1 >= pos) + return start + (pos - 1) * THREAD_SIZE; + pos -= (end - start - kernel_size) / THREAD_SIZE + 1; + } + + return 0; } unsigned long get_random_base(unsigned long safe_addr) { - unsigned long memory_limit = memory_end_set ? memory_end : 0; - unsigned long base, start, end, kernel_size; - unsigned long block_sum, offset; + unsigned long memory_limit = get_mem_detect_end(); + unsigned long base_pos, max_pos, kernel_size; unsigned long kasan_needs; int i; + if (memory_end_set) + memory_limit = min(memory_limit, memory_end); + if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE) { if (safe_addr < INITRD_START + INITRD_SIZE) safe_addr = INITRD_START + INITRD_SIZE; @@ -126,45 +208,17 @@ unsigned long get_random_base(unsigned long safe_addr) } kernel_size = vmlinux.image_size + vmlinux.bss_size; - block_sum = 0; - for_each_mem_detect_block(i, &start, &end) { - if (memory_limit) { - if (start >= memory_limit) - break; - if (end > memory_limit) - end = memory_limit; - } - if (end - start < kernel_size) - continue; - block_sum += end - start - kernel_size; - } - if (!block_sum) { + if (safe_addr + kernel_size > memory_limit) + return 0; + + max_pos = count_valid_kernel_positions(kernel_size, safe_addr, memory_limit); + if (!max_pos) { sclp_early_printk("KASLR disabled: not enough memory\n"); return 0; } - base = get_random(block_sum); - if (base == 0) + /* we need a value in the range [1, base_pos] inclusive */ + if (get_random(max_pos, &base_pos)) return 0; - if (base < safe_addr) - base = safe_addr; - block_sum = offset = 0; - for_each_mem_detect_block(i, &start, &end) { - if (memory_limit) { - if (start >= memory_limit) - break; - if (end > memory_limit) - end = memory_limit; - } - if (end - start < kernel_size) - continue; - block_sum += end - start - kernel_size; - if (base <= block_sum) { - base = start + base - offset; - base = ALIGN_DOWN(base, THREAD_SIZE); - break; - } - offset = block_sum; - } - return base; + return position_to_address(base_pos + 1, kernel_size, safe_addr, memory_limit); } diff --git a/arch/s390/boot/pgm_check_info.c b/arch/s390/boot/pgm_check_info.c index 83b5b7915c32..a3c9862bcede 100644 --- a/arch/s390/boot/pgm_check_info.c +++ b/arch/s390/boot/pgm_check_info.c @@ -2,6 +2,7 @@ #include <linux/kernel.h> #include <linux/string.h> #include <asm/lowcore.h> +#include <asm/setup.h> #include <asm/sclp.h> #include "boot.h" @@ -32,7 +33,8 @@ void print_pgm_check_info(void) char *p; add_str(buf, "Linux version "); - strlcat(buf, kernel_version, sizeof(buf)); + strlcat(buf, kernel_version, sizeof(buf) - 1); + strlcat(buf, "\n", sizeof(buf)); sclp_early_printk(buf); p = add_str(buf, "Kernel fault: interruption code "); @@ -42,6 +44,13 @@ void print_pgm_check_info(void) add_str(p, "\n"); sclp_early_printk(buf); + if (kaslr_enabled) { + p = add_str(buf, "Kernel random base: "); + p = add_val_as_hex(p, __kaslr_offset); + add_str(p, "\n"); + sclp_early_printk(buf); + } + p = add_str(buf, "PSW : "); p = add_val_as_hex(p, S390_lowcore.psw_save_area.mask); p = add_str(p, " "); diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c index 3b3a11f95269..90842936545b 100644 --- a/arch/s390/boot/startup.c +++ b/arch/s390/boot/startup.c @@ -48,8 +48,6 @@ struct diag_ops __bootdata_preserved(diag_dma_ops) = { }; static struct diag210 _diag210_tmp_dma __section(.dma.data); struct diag210 *__bootdata_preserved(__diag210_tmp_dma) = &_diag210_tmp_dma; -void _swsusp_reset_dma(void); -unsigned long __bootdata_preserved(__swsusp_reset_dma) = __pa(_swsusp_reset_dma); void error(char *x) { @@ -120,6 +118,9 @@ static void handle_relocs(unsigned long offset) } } +/* + * This function clears the BSS section of the decompressed Linux kernel and NOT the decompressor's. + */ static void clear_bss_section(void) { memset((void *)vmlinux.default_lma + vmlinux.image_size, 0, vmlinux.bss_size); diff --git a/arch/s390/boot/text_dma.S b/arch/s390/boot/text_dma.S index 9715715c4c28..f7c77cd518f2 100644 --- a/arch/s390/boot/text_dma.S +++ b/arch/s390/boot/text_dma.S @@ -97,23 +97,6 @@ ENTRY(_diag0c_dma) ENDPROC(_diag0c_dma) /* - * void _swsusp_reset_dma(void) - */ -ENTRY(_swsusp_reset_dma) - larl %r1,restart_entry - larl %r2,.Lrestart_diag308_psw - og %r1,0(%r2) - stg %r1,0(%r0) - lghi %r0,0 - diag %r0,%r0,0x308 -restart_entry: - lhi %r1,1 - sigp %r1,%r0,SIGP_SET_ARCHITECTURE - sam64 - BR_EX_DMA_r14 -ENDPROC(_swsusp_reset_dma) - -/* * void _diag308_reset_dma(void) * * Calls diag 308 subcode 1 and continues execution diff --git a/arch/s390/boot/uv.c b/arch/s390/boot/uv.c index f887a479cdc7..a15c033f53ca 100644 --- a/arch/s390/boot/uv.c +++ b/arch/s390/boot/uv.c @@ -7,6 +7,9 @@ #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST int __bootdata_preserved(prot_virt_guest); #endif +#if IS_ENABLED(CONFIG_KVM) +int __bootdata_preserved(prot_virt_host); +#endif struct uv_info __bootdata_preserved(uv_info); void uv_query_info(void) diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig index 7228aabe9da6..0784bf3caf43 100644 --- a/arch/s390/configs/debug_defconfig +++ b/arch/s390/configs/debug_defconfig @@ -775,6 +775,8 @@ CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_PAGEALLOC=y CONFIG_PAGE_OWNER=y CONFIG_DEBUG_RODATA_TEST=y +CONFIG_DEBUG_WX=y +CONFIG_PTDUMP_DEBUGFS=y CONFIG_DEBUG_OBJECTS=y CONFIG_DEBUG_OBJECTS_SELFTEST=y CONFIG_DEBUG_OBJECTS_FREE=y @@ -822,7 +824,6 @@ CONFIG_FTRACE_SYSCALLS=y CONFIG_BLK_DEV_IO_TRACE=y CONFIG_BPF_KPROBE_OVERRIDE=y CONFIG_HIST_TRIGGERS=y -CONFIG_S390_PTDUMP=y CONFIG_NOTIFIER_ERROR_INJECTION=m CONFIG_NETDEV_NOTIFIER_ERROR_INJECT=m CONFIG_FAULT_INJECTION=y diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig index fab03b7a6932..905bc8c4cfaf 100644 --- a/arch/s390/configs/defconfig +++ b/arch/s390/configs/defconfig @@ -759,6 +759,8 @@ CONFIG_GDB_SCRIPTS=y CONFIG_FRAME_WARN=1024 CONFIG_DEBUG_SECTION_MISMATCH=y CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_WX=y +CONFIG_PTDUMP_DEBUGFS=y CONFIG_DEBUG_MEMORY_INIT=y CONFIG_PANIC_ON_OOPS=y CONFIG_TEST_LOCKUP=m @@ -775,7 +777,6 @@ CONFIG_FTRACE_SYSCALLS=y CONFIG_BLK_DEV_IO_TRACE=y CONFIG_BPF_KPROBE_OVERRIDE=y CONFIG_HIST_TRIGGERS=y -CONFIG_S390_PTDUMP=y CONFIG_LKDTM=m CONFIG_PERCPU_TEST=m CONFIG_ATOMIC64_SELFTEST=y diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h index 3cfe1eb89838..c0be5fe1ddba 100644 --- a/arch/s390/include/asm/ccwdev.h +++ b/arch/s390/include/asm/ccwdev.h @@ -238,7 +238,10 @@ extern void ccw_device_get_schid(struct ccw_device *, struct subchannel_id *); struct channel_path_desc_fmt0 *ccw_device_get_chp_desc(struct ccw_device *, int); u8 *ccw_device_get_util_str(struct ccw_device *cdev, int chp_idx); int ccw_device_pnso(struct ccw_device *cdev, - struct chsc_pnso_area *pnso_area, - struct chsc_pnso_resume_token resume_token, - int cnc); + struct chsc_pnso_area *pnso_area, u8 oc, + struct chsc_pnso_resume_token resume_token, int cnc); +int ccw_device_get_cssid(struct ccw_device *cdev, u8 *cssid); +int ccw_device_get_iid(struct ccw_device *cdev, u8 *iid); +int ccw_device_get_chpid(struct ccw_device *cdev, int chp_idx, u8 *chpid); +int ccw_device_get_chid(struct ccw_device *cdev, int chp_idx, u16 *chid); #endif /* _S390_CCWDEV_H_ */ diff --git a/arch/s390/include/asm/checksum.h b/arch/s390/include/asm/checksum.h index 6d01c96aeb5c..a8c02cfbc712 100644 --- a/arch/s390/include/asm/checksum.h +++ b/arch/s390/include/asm/checksum.h @@ -13,21 +13,21 @@ #define _S390_CHECKSUM_H #include <linux/uaccess.h> +#include <linux/in6.h> /* - * computes the checksum of a memory block at buff, length len, - * and adds in "sum" (32-bit) + * Computes the checksum of a memory block at buff, length len, + * and adds in "sum" (32-bit). * - * returns a 32-bit number suitable for feeding into itself - * or csum_tcpudp_magic + * Returns a 32-bit number suitable for feeding into itself + * or csum_tcpudp_magic. * - * this function must be called with even lengths, except - * for the last fragment, which may be odd + * This function must be called with even lengths, except + * for the last fragment, which may be odd. * - * it's best to have buff aligned on a 32-bit boundary + * It's best to have buff aligned on a 32-bit boundary. */ -static inline __wsum -csum_partial(const void *buff, int len, __wsum sum) +static inline __wsum csum_partial(const void *buff, int len, __wsum sum) { register unsigned long reg2 asm("2") = (unsigned long) buff; register unsigned long reg3 asm("3") = (unsigned long) len; @@ -39,82 +39,92 @@ csum_partial(const void *buff, int len, __wsum sum) return sum; } -static inline __wsum -csum_partial_copy_nocheck (const void *src, void *dst, int len, __wsum sum) -{ - memcpy(dst,src,len); - return csum_partial(dst, len, sum); -} - /* - * Fold a partial checksum without adding pseudo headers + * Fold a partial checksum without adding pseudo headers. */ static inline __sum16 csum_fold(__wsum sum) { u32 csum = (__force u32) sum; - csum += (csum >> 16) + (csum << 16); + csum += (csum >> 16) | (csum << 16); csum >>= 16; return (__force __sum16) ~csum; } /* - * This is a version of ip_compute_csum() optimized for IP headers, - * which always checksum on 4 octet boundaries. - * + * This is a version of ip_compute_csum() optimized for IP headers, + * which always checksums on 4 octet boundaries. */ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) { - return csum_fold(csum_partial(iph, ihl*4, 0)); + __u64 csum = 0; + __u32 *ptr = (u32 *)iph; + + csum += *ptr++; + csum += *ptr++; + csum += *ptr++; + csum += *ptr++; + ihl -= 4; + while (ihl--) + csum += *ptr++; + csum += (csum >> 32) | (csum << 32); + return csum_fold((__force __wsum)(csum >> 32)); } /* - * computes the checksum of the TCP/UDP pseudo-header - * returns a 32-bit checksum + * Computes the checksum of the TCP/UDP pseudo-header. + * Returns a 32-bit checksum. */ -static inline __wsum -csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, __u8 proto, - __wsum sum) +static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, + __u8 proto, __wsum sum) { - __u32 csum = (__force __u32)sum; + __u64 csum = (__force __u64)sum; csum += (__force __u32)saddr; - if (csum < (__force __u32)saddr) - csum++; - csum += (__force __u32)daddr; - if (csum < (__force __u32)daddr) - csum++; - - csum += len + proto; - if (csum < len + proto) - csum++; - - return (__force __wsum)csum; + csum += len; + csum += proto; + csum += (csum >> 32) | (csum << 32); + return (__force __wsum)(csum >> 32); } /* - * computes the checksum of the TCP/UDP pseudo-header - * returns a 16-bit checksum, already complemented + * Computes the checksum of the TCP/UDP pseudo-header. + * Returns a 16-bit checksum, already complemented. */ - -static inline __sum16 -csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len, __u8 proto, - __wsum sum) +static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len, + __u8 proto, __wsum sum) { - return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); + return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); } /* - * this routine is used for miscellaneous IP-like checksums, mainly - * in icmp.c + * Used for miscellaneous IP-like checksums, mainly icmp. */ - static inline __sum16 ip_compute_csum(const void *buff, int len) { return csum_fold(csum_partial(buff, len, 0)); } -#endif /* _S390_CHECKSUM_H */ - +#define _HAVE_ARCH_IPV6_CSUM +static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr, + const struct in6_addr *daddr, + __u32 len, __u8 proto, __wsum csum) +{ + __u64 sum = (__force __u64)csum; + + sum += (__force __u32)saddr->s6_addr32[0]; + sum += (__force __u32)saddr->s6_addr32[1]; + sum += (__force __u32)saddr->s6_addr32[2]; + sum += (__force __u32)saddr->s6_addr32[3]; + sum += (__force __u32)daddr->s6_addr32[0]; + sum += (__force __u32)daddr->s6_addr32[1]; + sum += (__force __u32)daddr->s6_addr32[2]; + sum += (__force __u32)daddr->s6_addr32[3]; + sum += len; + sum += proto; + sum += (sum >> 32) | (sum << 32); + return csum_fold((__force __wsum)(sum >> 32)); +} +#endif /* _S390_CHECKSUM_H */ diff --git a/arch/s390/include/asm/chsc.h b/arch/s390/include/asm/chsc.h index 36ce2d25a5fc..ae4d2549cd67 100644 --- a/arch/s390/include/asm/chsc.h +++ b/arch/s390/include/asm/chsc.h @@ -12,6 +12,13 @@ #include <uapi/asm/chsc.h> /** + * Operation codes for CHSC PNSO: + * PNSO_OC_NET_BRIDGE_INFO - only addresses that are visible to a bridgeport + * PNSO_OC_NET_ADDR_INFO - all addresses + */ +#define PNSO_OC_NET_BRIDGE_INFO 0 +#define PNSO_OC_NET_ADDR_INFO 3 +/** * struct chsc_pnso_naid_l2 - network address information descriptor * @nit: Network interface token * @addr_lnid: network address and logical network id (VLAN ID) diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h index b5bfb3123cb1..5c58756d6476 100644 --- a/arch/s390/include/asm/cio.h +++ b/arch/s390/include/asm/cio.h @@ -356,7 +356,6 @@ static inline u8 pathmask_to_pos(u8 mask) return 8 - ffs(mask); } -void channel_subsystem_reinit(void); extern void css_schedule_reprobe(void); extern void *cio_dma_zalloc(size_t size); @@ -372,6 +371,7 @@ struct gen_pool *cio_gp_dma_create(struct device *dma_dev, int nr_pages); /* Function from drivers/s390/cio/chsc.c */ int chsc_sstpc(void *page, unsigned int op, u16 ctrl, u64 *clock_delta); int chsc_sstpi(void *page, void *result, size_t size); +int chsc_stzi(void *page, void *result, size_t size); int chsc_sgib(u32 origin); #endif diff --git a/arch/s390/include/asm/clocksource.h b/arch/s390/include/asm/clocksource.h new file mode 100644 index 000000000000..03434369fce4 --- /dev/null +++ b/arch/s390/include/asm/clocksource.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* s390-specific clocksource additions */ + +#ifndef _ASM_S390_CLOCKSOURCE_H +#define _ASM_S390_CLOCKSOURCE_H + +#endif /* _ASM_S390_CLOCKSOURCE_H */ diff --git a/arch/s390/include/asm/clp.h b/arch/s390/include/asm/clp.h index 3925b0f085b7..10919eeb7533 100644 --- a/arch/s390/include/asm/clp.h +++ b/arch/s390/include/asm/clp.h @@ -5,6 +5,9 @@ /* CLP common request & response block size */ #define CLP_BLK_SIZE PAGE_SIZE +/* Call Logical Processor - Command Code */ +#define CLP_SLPC 0x0001 + #define CLP_LPS_BASE 0 #define CLP_LPS_PCI 2 diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h index 9547cd5d6cdc..ea5b9c34b7be 100644 --- a/arch/s390/include/asm/compat.h +++ b/arch/s390/include/asm/compat.h @@ -63,8 +63,6 @@ typedef u16 compat_nlink_t; typedef u16 compat_ipc_pid_t; typedef u32 compat_caddr_t; typedef __kernel_fsid_t compat_fsid_t; -typedef s64 compat_s64; -typedef u64 compat_u64; typedef struct { u32 mask; diff --git a/arch/s390/include/asm/css_chars.h b/arch/s390/include/asm/css_chars.h index 480bb02ccacd..638137d46c85 100644 --- a/arch/s390/include/asm/css_chars.h +++ b/arch/s390/include/asm/css_chars.h @@ -36,7 +36,9 @@ struct css_general_char { u64 alt_ssi : 1; /* bit 108 */ u64 : 1; u64 narf : 1; /* bit 110 */ - u64 : 12; + u64 : 5; + u64 enarf: 1; /* bit 116 */ + u64 : 6; u64 util_str : 1;/* bit 123 */ } __packed; diff --git a/arch/s390/include/asm/gmap.h b/arch/s390/include/asm/gmap.h index a816fb4734b8..40264f60b0da 100644 --- a/arch/s390/include/asm/gmap.h +++ b/arch/s390/include/asm/gmap.h @@ -140,8 +140,6 @@ int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte); void gmap_register_pte_notifier(struct gmap_notifier *); void gmap_unregister_pte_notifier(struct gmap_notifier *); -void gmap_pte_notify(struct mm_struct *, unsigned long addr, pte_t *, - unsigned long bits); int gmap_mprotect_notify(struct gmap *, unsigned long start, unsigned long len, int prot); diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h index da014e4f8113..28664ee0abc1 100644 --- a/arch/s390/include/asm/io.h +++ b/arch/s390/include/asm/io.h @@ -12,6 +12,7 @@ #include <linux/kernel.h> #include <asm/page.h> +#include <asm/pgtable.h> #include <asm/pci_io.h> #define xlate_dev_mem_ptr xlate_dev_mem_ptr @@ -26,7 +27,10 @@ void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr); #define IO_SPACE_LIMIT 0 +void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot); void __iomem *ioremap(phys_addr_t addr, size_t size); +void __iomem *ioremap_wc(phys_addr_t addr, size_t size); +void __iomem *ioremap_wt(phys_addr_t addr, size_t size); void iounmap(volatile void __iomem *addr); static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) @@ -52,6 +56,10 @@ static inline void ioport_unmap(void __iomem *p) #define pci_iomap_wc pci_iomap_wc #define pci_iomap_wc_range pci_iomap_wc_range +#define ioremap ioremap +#define ioremap_wt ioremap_wt +#define ioremap_wc ioremap_wc + #define memcpy_fromio(dst, src, count) zpci_memcpy_fromio(dst, src, count) #define memcpy_toio(dst, src, count) zpci_memcpy_toio(dst, src, count) #define memset_io(dst, val, count) zpci_memset_io(dst, val, count) diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h index 7d5cfdda5277..a9e2c7295b35 100644 --- a/arch/s390/include/asm/ipl.h +++ b/arch/s390/include/asm/ipl.h @@ -66,6 +66,7 @@ enum ipl_type { IPL_TYPE_FCP_DUMP = 8, IPL_TYPE_NSS = 16, IPL_TYPE_NVME = 32, + IPL_TYPE_NVME_DUMP = 64, }; struct ipl_info @@ -94,6 +95,12 @@ extern struct ipl_info ipl_info; extern void setup_ipl(void); extern void set_os_info_reipl_block(void); +static inline bool is_ipl_type_dump(void) +{ + return (ipl_info.type == IPL_TYPE_FCP_DUMP) || + (ipl_info.type == IPL_TYPE_NVME_DUMP); +} + struct ipl_report { struct ipl_parameter_block *ipib; struct list_head components; diff --git a/arch/s390/include/asm/kasan.h b/arch/s390/include/asm/kasan.h index 89d6886040c8..e9bf486de136 100644 --- a/arch/s390/include/asm/kasan.h +++ b/arch/s390/include/asm/kasan.h @@ -19,6 +19,7 @@ extern void kasan_early_init(void); extern void kasan_copy_shadow(pgd_t *dst); extern void kasan_free_early_identity(void); +extern unsigned long kasan_vmax; #else static inline void kasan_early_init(void) { } static inline void kasan_copy_shadow(pgd_t *dst) { } diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h index 99b92c3e46b0..b5380a251df2 100644 --- a/arch/s390/include/asm/pci.h +++ b/arch/s390/include/asm/pci.h @@ -208,9 +208,8 @@ int zpci_unregister_ioat(struct zpci_dev *, u8); void zpci_remove_reserved_devices(void); /* CLP */ +int clp_setup_writeback_mio(void); int clp_scan_pci_devices(void); -int clp_rescan_pci_devices(void); -int clp_rescan_pci_devices_simple(u32 *fid); int clp_add_pci_device(u32, u32, int); int clp_enable_fh(struct zpci_dev *, u8); int clp_disable_fh(struct zpci_dev *); @@ -232,12 +231,10 @@ static inline bool zpci_use_mio(struct zpci_dev *zdev) /* Error handling and recovery */ void zpci_event_error(void *); void zpci_event_availability(void *); -void zpci_rescan(void); bool zpci_is_enabled(void); #else /* CONFIG_PCI */ static inline void zpci_event_error(void *e) {} static inline void zpci_event_availability(void *e) {} -static inline void zpci_rescan(void) {} #endif /* CONFIG_PCI */ #ifdef CONFIG_HOTPLUG_PCI_S390 @@ -282,7 +279,6 @@ int zpci_debug_init(void); void zpci_debug_exit(void); void zpci_debug_init_device(struct zpci_dev *, const char *); void zpci_debug_exit_device(struct zpci_dev *); -void zpci_debug_info(struct zpci_dev *, struct seq_file *); /* Error reporting */ int zpci_report_error(struct pci_dev *, struct zpci_report_error_header *); diff --git a/arch/s390/include/asm/pci_clp.h b/arch/s390/include/asm/pci_clp.h index eb51272dd2cc..1f4b666e85ee 100644 --- a/arch/s390/include/asm/pci_clp.h +++ b/arch/s390/include/asm/pci_clp.h @@ -7,6 +7,7 @@ /* * Call Logical Processor - Command Codes */ +#define CLP_SLPC 0x0001 #define CLP_LIST_PCI 0x0002 #define CLP_QUERY_PCI_FN 0x0003 #define CLP_QUERY_PCI_FNGRP 0x0004 @@ -51,6 +52,19 @@ struct clp_fh_list_entry { extern bool zpci_unique_uid; +struct clp_rsp_slpc_pci { + struct clp_rsp_hdr hdr; + u32 reserved2[4]; + u32 lpif[8]; + u32 reserved3[4]; + u32 vwb : 1; + u32 : 1; + u32 mio_wb : 6; + u32 : 24; + u32 reserved5[3]; + u32 lpic[8]; +} __packed; + /* List PCI functions request */ struct clp_req_list_pci { struct clp_req_hdr hdr; @@ -172,6 +186,11 @@ struct clp_rsp_set_pci { } __packed; /* Combined request/response block structures used by clp insn */ +struct clp_req_rsp_slpc_pci { + struct clp_req_slpc request; + struct clp_rsp_slpc_pci response; +} __packed; + struct clp_req_rsp_list_pci { struct clp_req_list_pci request; struct clp_rsp_list_pci response; diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h index 74a352f8c0d1..d1297d6bbdcf 100644 --- a/arch/s390/include/asm/pgalloc.h +++ b/arch/s390/include/asm/pgalloc.h @@ -146,8 +146,6 @@ static inline void pmd_populate(struct mm_struct *mm, #define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte) #define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte) -extern void rcu_table_freelist_finish(void); - void vmem_map_init(void); void *vmem_crst_alloc(unsigned long val); pte_t *vmem_pte_alloc(void); diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index b55561cc8786..6b8d8c69b1a1 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -89,6 +89,7 @@ extern unsigned long VMALLOC_START; extern unsigned long VMALLOC_END; #define VMALLOC_DEFAULT_SIZE ((128UL << 30) - MODULES_LEN) extern struct page *vmemmap; +extern unsigned long vmemmap_size; #define VMEM_MAX_PHYS ((unsigned long) vmemmap) @@ -1186,6 +1187,12 @@ void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr); void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr); void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr); +#define pgprot_writecombine pgprot_writecombine +pgprot_t pgprot_writecombine(pgprot_t prot); + +#define pgprot_writethrough pgprot_writethrough +pgprot_t pgprot_writethrough(pgprot_t prot); + /* * Certain architectures need to do special things when PTEs * within a page table are directly modified. Thus, the following @@ -1209,7 +1216,8 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) { pte_t __pte; - pte_val(__pte) = physpage + pgprot_val(pgprot); + + pte_val(__pte) = physpage | pgprot_val(pgprot); if (!MACHINE_HAS_NX) pte_val(__pte) &= ~_PAGE_NOEXEC; return pte_mkyoung(__pte); diff --git a/arch/s390/include/asm/ptdump.h b/arch/s390/include/asm/ptdump.h new file mode 100644 index 000000000000..f960b2896606 --- /dev/null +++ b/arch/s390/include/asm/ptdump.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_S390_PTDUMP_H +#define _ASM_S390_PTDUMP_H + +void ptdump_check_wx(void); + +static inline void debug_checkwx(void) +{ + if (IS_ENABLED(CONFIG_DEBUG_WX)) + ptdump_check_wx(); +} + +#endif /* _ASM_S390_PTDUMP_H */ diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h index e69dbf438f99..19e84c95d1e7 100644 --- a/arch/s390/include/asm/qdio.h +++ b/arch/s390/include/asm/qdio.h @@ -26,9 +26,9 @@ /** * struct qdesfmt0 - queue descriptor, format 0 - * @sliba: storage list information block address - * @sla: storage list address - * @slsba: storage list state block address + * @sliba: absolute address of storage list information block + * @sla: absolute address of storage list + * @slsba: absolute address of storage list state block * @akey: access key for SLIB * @bkey: access key for SL * @ckey: access key for SBALs @@ -56,7 +56,7 @@ struct qdesfmt0 { * @oqdcnt: output queue descriptor count * @iqdsz: input queue descriptor size * @oqdsz: output queue descriptor size - * @qiba: queue information block address + * @qiba: absolute address of queue information block * @qkey: queue information block key * @qdf0: queue descriptions */ @@ -327,7 +327,6 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int, * struct qdio_initialize - qdio initialization data * @q_format: queue format * @qdr_ac: feature flags to set - * @adapter_name: name for the adapter * @qib_param_field_format: format for qib_parm_field * @qib_param_field: pointer to 128 bytes or NULL, if no param field * @qib_rflags: rflags to set @@ -347,7 +346,6 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int, struct qdio_initialize { unsigned char q_format; unsigned char qdr_ac; - unsigned char adapter_name[8]; unsigned int qib_param_field_format; unsigned char *qib_param_field; unsigned char qib_rflags; diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h index c563f8368b19..a7bdd128d85b 100644 --- a/arch/s390/include/asm/sclp.h +++ b/arch/s390/include/asm/sclp.h @@ -114,8 +114,7 @@ int sclp_early_get_core_info(struct sclp_core_info *info); void sclp_early_get_ipl_info(struct sclp_ipl_info *info); void sclp_early_detect(void); void sclp_early_printk(const char *s); -void sclp_early_printk_force(const char *s); -void __sclp_early_printk(const char *s, unsigned int len, unsigned int force); +void __sclp_early_printk(const char *s, unsigned int len); int sclp_early_get_memsize(unsigned long *mem); int sclp_early_get_hsa_size(unsigned long *hsa_size); @@ -129,6 +128,8 @@ int sclp_chp_deconfigure(struct chp_id chpid); int sclp_chp_read_info(struct sclp_chp_info *info); int sclp_pci_configure(u32 fid); int sclp_pci_deconfigure(u32 fid); +int sclp_ap_configure(u32 apid); +int sclp_ap_deconfigure(u32 apid); int sclp_pci_report(struct zpci_report_error_header *report, u32 fh, u32 fid); int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count); int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count); diff --git a/arch/s390/include/asm/set_memory.h b/arch/s390/include/asm/set_memory.h index c59a83536c70..a22a5a81811c 100644 --- a/arch/s390/include/asm/set_memory.h +++ b/arch/s390/include/asm/set_memory.h @@ -2,6 +2,10 @@ #ifndef _ASMS390_SET_MEMORY_H #define _ASMS390_SET_MEMORY_H +#include <linux/mutex.h> + +extern struct mutex cpa_mutex; + #define SET_MEMORY_RO 1UL #define SET_MEMORY_RW 2UL #define SET_MEMORY_NX 4UL diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h index 534f212753d6..bdb242a1544e 100644 --- a/arch/s390/include/asm/setup.h +++ b/arch/s390/include/asm/setup.h @@ -92,7 +92,9 @@ extern int memory_end_set; extern unsigned long memory_end; extern unsigned long vmalloc_size; extern unsigned long max_physmem_end; -extern unsigned long __swsusp_reset_dma; + +/* The Write Back bit position in the physaddr is given by the SLPC PCI */ +extern unsigned long mio_wb_bit_mask; #define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM) #define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM) @@ -119,9 +121,6 @@ extern unsigned int console_mode; extern unsigned int console_devno; extern unsigned int console_irq; -extern char vmhalt_cmd[]; -extern char vmpoff_cmd[]; - #define CONSOLE_IS_UNDEFINED (console_mode == 0) #define CONSOLE_IS_SCLP (console_mode == 1) #define CONSOLE_IS_3215 (console_mode == 2) diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h index 7e155fb6c254..01e360004481 100644 --- a/arch/s390/include/asm/smp.h +++ b/arch/s390/include/asm/smp.h @@ -31,7 +31,6 @@ extern void smp_emergency_stop(void); extern int smp_find_processor_id(u16 address); extern int smp_store_status(int cpu); extern void smp_save_dump_cpus(void); -extern int smp_vcpu_scheduled(int cpu); extern void smp_yield_cpu(int cpu); extern void smp_cpu_set_polarization(int cpu, int val); extern int smp_cpu_get_polarization(int cpu); diff --git a/arch/s390/include/asm/stp.h b/arch/s390/include/asm/stp.h index f0ddefb06ec8..ba07463897c1 100644 --- a/arch/s390/include/asm/stp.h +++ b/arch/s390/include/asm/stp.h @@ -6,43 +6,89 @@ #ifndef __S390_STP_H #define __S390_STP_H +#include <linux/compiler.h> + /* notifier for syncs */ extern struct atomic_notifier_head s390_epoch_delta_notifier; /* STP interruption parameter */ struct stp_irq_parm { - unsigned int _pad0 : 14; - unsigned int tsc : 1; /* Timing status change */ - unsigned int lac : 1; /* Link availability change */ - unsigned int tcpc : 1; /* Time control parameter change */ - unsigned int _pad2 : 15; -} __attribute__ ((packed)); + u32 : 14; + u32 tsc : 1; /* Timing status change */ + u32 lac : 1; /* Link availability change */ + u32 tcpc : 1; /* Time control parameter change */ + u32 : 15; +} __packed; #define STP_OP_SYNC 1 #define STP_OP_CTRL 3 struct stp_sstpi { - unsigned int rsvd0; - unsigned int rsvd1 : 8; - unsigned int stratum : 8; - unsigned int vbits : 16; - unsigned int leaps : 16; - unsigned int tmd : 4; - unsigned int ctn : 4; - unsigned int rsvd2 : 3; - unsigned int c : 1; - unsigned int tst : 4; - unsigned int tzo : 16; - unsigned int dsto : 16; - unsigned int ctrl : 16; - unsigned int rsvd3 : 16; - unsigned int tto; - unsigned int rsvd4; - unsigned int ctnid[3]; - unsigned int rsvd5; - unsigned int todoff[4]; - unsigned int rsvd6[48]; -} __attribute__ ((packed)); + u32 : 32; + u32 tu : 1; + u32 lu : 1; + u32 : 6; + u32 stratum : 8; + u32 vbits : 16; + u32 leaps : 16; + u32 tmd : 4; + u32 ctn : 4; + u32 : 3; + u32 c : 1; + u32 tst : 4; + u32 tzo : 16; + u32 dsto : 16; + u32 ctrl : 16; + u32 : 16; + u32 tto; + u32 : 32; + u32 ctnid[3]; + u32 : 32; + u32 todoff[4]; + u32 rsvd[48]; +} __packed; + +struct stp_tzib { + u32 tzan : 16; + u32 : 16; + u32 tzo : 16; + u32 dsto : 16; + u32 stn; + u32 dstn; + u64 dst_on_alg; + u64 dst_off_alg; +} __packed; + +struct stp_tcpib { + u32 atcode : 4; + u32 ntcode : 4; + u32 d : 1; + u32 : 23; + s32 tto; + struct stp_tzib atzib; + struct stp_tzib ntzib; + s32 adst_offset : 16; + s32 ndst_offset : 16; + u32 rsvd1; + u64 ntzib_update; + u64 ndsto_update; +} __packed; + +struct stp_lsoib { + u32 p : 1; + u32 : 31; + s32 also : 16; + s32 nlso : 16; + u64 nlsout; +} __packed; + +struct stp_stzi { + u32 rsvd0[3]; + u64 data_ts; + u32 rsvd1[22]; + struct stp_tcpib tcpib; + struct stp_lsoib lsoib; +} __packed; /* Functions needed by the machine check handler */ int stp_sync_check(void); diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h index acce6a08a1fa..6448bb5be10c 100644 --- a/arch/s390/include/asm/tlbflush.h +++ b/arch/s390/include/asm/tlbflush.h @@ -30,8 +30,6 @@ static inline void __tlb_flush_idte(unsigned long asce) : : "a" (opt), "a" (asce) : "cc"); } -void smp_ptlb_all(void); - /* * Flush all TLB entries on all CPUs. */ diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index f09444d6aeab..c868e7ee49b3 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -60,6 +60,9 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n); #define INLINE_COPY_TO_USER #endif +int __put_user_bad(void) __attribute__((noreturn)); +int __get_user_bad(void) __attribute__((noreturn)); + #ifdef CONFIG_HAVE_MARCH_Z10_FEATURES #define __put_get_user_asm(to, from, size, spec) \ @@ -109,6 +112,9 @@ static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned lon (unsigned long *)x, size, spec); break; + default: + __put_user_bad(); + break; } return rc; } @@ -139,6 +145,9 @@ static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsign (unsigned long __user *)ptr, size, spec); break; + default: + __get_user_bad(); + break; } return rc; } @@ -179,7 +188,7 @@ static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long s default: \ __put_user_bad(); \ break; \ - } \ + } \ __builtin_expect(__pu_err, 0); \ }) @@ -190,8 +199,6 @@ static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long s }) -int __put_user_bad(void) __attribute__((noreturn)); - #define __get_user(x, ptr) \ ({ \ int __gu_err = -EFAULT; \ @@ -238,8 +245,6 @@ int __put_user_bad(void) __attribute__((noreturn)); __get_user(x, ptr); \ }) -int __get_user_bad(void) __attribute__((noreturn)); - unsigned long __must_check raw_copy_in_user(void __user *to, const void __user *from, unsigned long n); @@ -278,4 +283,115 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo int copy_to_user_real(void __user *dest, void *src, unsigned long count); void *s390_kernel_write(void *dst, const void *src, size_t size); +#define HAVE_GET_KERNEL_NOFAULT + +int __noreturn __put_kernel_bad(void); + +#define __put_kernel_asm(val, to, insn) \ +({ \ + int __rc; \ + \ + asm volatile( \ + "0: " insn " %2,%1\n" \ + "1: xr %0,%0\n" \ + "2:\n" \ + ".pushsection .fixup, \"ax\"\n" \ + "3: lhi %0,%3\n" \ + " jg 2b\n" \ + ".popsection\n" \ + EX_TABLE(0b,3b) EX_TABLE(1b,3b) \ + : "=d" (__rc), "+Q" (*(to)) \ + : "d" (val), "K" (-EFAULT) \ + : "cc"); \ + __rc; \ +}) + +#define __put_kernel_nofault(dst, src, type, err_label) \ +do { \ + u64 __x = (u64)(*((type *)(src))); \ + int __pk_err; \ + \ + switch (sizeof(type)) { \ + case 1: \ + __pk_err = __put_kernel_asm(__x, (type *)(dst), "stc"); \ + break; \ + case 2: \ + __pk_err = __put_kernel_asm(__x, (type *)(dst), "sth"); \ + break; \ + case 4: \ + __pk_err = __put_kernel_asm(__x, (type *)(dst), "st"); \ + break; \ + case 8: \ + __pk_err = __put_kernel_asm(__x, (type *)(dst), "stg"); \ + break; \ + default: \ + __pk_err = __put_kernel_bad(); \ + break; \ + } \ + if (unlikely(__pk_err)) \ + goto err_label; \ +} while (0) + +int __noreturn __get_kernel_bad(void); + +#define __get_kernel_asm(val, from, insn) \ +({ \ + int __rc; \ + \ + asm volatile( \ + "0: " insn " %1,%2\n" \ + "1: xr %0,%0\n" \ + "2:\n" \ + ".pushsection .fixup, \"ax\"\n" \ + "3: lhi %0,%3\n" \ + " jg 2b\n" \ + ".popsection\n" \ + EX_TABLE(0b,3b) EX_TABLE(1b,3b) \ + : "=d" (__rc), "+d" (val) \ + : "Q" (*(from)), "K" (-EFAULT) \ + : "cc"); \ + __rc; \ +}) + +#define __get_kernel_nofault(dst, src, type, err_label) \ +do { \ + int __gk_err; \ + \ + switch (sizeof(type)) { \ + case 1: { \ + u8 __x = 0; \ + \ + __gk_err = __get_kernel_asm(__x, (type *)(src), "ic"); \ + *((type *)(dst)) = (type)__x; \ + break; \ + }; \ + case 2: { \ + u16 __x = 0; \ + \ + __gk_err = __get_kernel_asm(__x, (type *)(src), "lh"); \ + *((type *)(dst)) = (type)__x; \ + break; \ + }; \ + case 4: { \ + u32 __x = 0; \ + \ + __gk_err = __get_kernel_asm(__x, (type *)(src), "l"); \ + *((type *)(dst)) = (type)__x; \ + break; \ + }; \ + case 8: { \ + u64 __x = 0; \ + \ + __gk_err = __get_kernel_asm(__x, (type *)(src), "lg"); \ + *((type *)(dst)) = (type)__x; \ + break; \ + }; \ + default: \ + __gk_err = __get_kernel_bad(); \ + break; \ + } \ + if (unlikely(__gk_err)) \ + goto err_label; \ +} while (0) + #endif /* __S390_UACCESS_H */ diff --git a/arch/s390/include/asm/uv.h b/arch/s390/include/asm/uv.h index cff4b4c99b75..0325fc0469b7 100644 --- a/arch/s390/include/asm/uv.h +++ b/arch/s390/include/asm/uv.h @@ -33,6 +33,7 @@ #define UVC_CMD_DESTROY_SEC_CPU 0x0121 #define UVC_CMD_CONV_TO_SEC_STOR 0x0200 #define UVC_CMD_CONV_FROM_SEC_STOR 0x0201 +#define UVC_CMD_DESTR_SEC_STOR 0x0202 #define UVC_CMD_SET_SEC_CONF_PARAMS 0x0300 #define UVC_CMD_UNPACK_IMG 0x0301 #define UVC_CMD_VERIFY_IMG 0x0302 @@ -344,6 +345,7 @@ static inline int is_prot_virt_host(void) } int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb); +int uv_destroy_page(unsigned long paddr); int uv_convert_from_secure(unsigned long paddr); int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr); @@ -354,6 +356,11 @@ void adjust_to_uv_max(unsigned long *vmax); static inline void setup_uv(void) {} static inline void adjust_to_uv_max(unsigned long *vmax) {} +static inline int uv_destroy_page(unsigned long paddr) +{ + return 0; +} + static inline int uv_convert_from_secure(unsigned long paddr) { return 0; diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h index 0cd085cdeb4f..29b44a930e71 100644 --- a/arch/s390/include/asm/vdso.h +++ b/arch/s390/include/asm/vdso.h @@ -2,6 +2,8 @@ #ifndef __S390_VDSO_H__ #define __S390_VDSO_H__ +#include <vdso/datapage.h> + /* Default link addresses for the vDSOs */ #define VDSO32_LBASE 0 #define VDSO64_LBASE 0 @@ -18,30 +20,7 @@ * itself and may change without notice. */ -struct vdso_data { - __u64 tb_update_count; /* Timebase atomicity ctr 0x00 */ - __u64 xtime_tod_stamp; /* TOD clock for xtime 0x08 */ - __u64 xtime_clock_sec; /* Kernel time 0x10 */ - __u64 xtime_clock_nsec; /* 0x18 */ - __u64 xtime_coarse_sec; /* Coarse kernel time 0x20 */ - __u64 xtime_coarse_nsec; /* 0x28 */ - __u64 wtom_clock_sec; /* Wall to monotonic clock 0x30 */ - __u64 wtom_clock_nsec; /* 0x38 */ - __u64 wtom_coarse_sec; /* Coarse wall to monotonic 0x40 */ - __u64 wtom_coarse_nsec; /* 0x48 */ - __u32 tz_minuteswest; /* Minutes west of Greenwich 0x50 */ - __u32 tz_dsttime; /* Type of dst correction 0x54 */ - __u32 ectg_available; /* ECTG instruction present 0x58 */ - __u32 tk_mult; /* Mult. used for xtime_nsec 0x5c */ - __u32 tk_shift; /* Shift used for xtime_nsec 0x60 */ - __u32 ts_dir; /* TOD steering direction 0x64 */ - __u64 ts_end; /* TOD steering end 0x68 */ - __u32 hrtimer_res; /* hrtimer resolution 0x70 */ -}; - struct vdso_per_cpu_data { - __u64 ectg_timer_base; - __u64 ectg_user_time; /* * Note: node_id and cpu_nr must be at adjacent memory locations. * VDSO userspace must read both values with a single instruction. @@ -56,9 +35,7 @@ struct vdso_per_cpu_data { }; extern struct vdso_data *vdso_data; -extern struct vdso_data boot_vdso_data; -void vdso_alloc_boot_cpu(struct lowcore *lowcore); int vdso_alloc_per_cpu(struct lowcore *lowcore); void vdso_free_per_cpu(struct lowcore *lowcore); diff --git a/arch/s390/include/asm/vdso/clocksource.h b/arch/s390/include/asm/vdso/clocksource.h new file mode 100644 index 000000000000..a93eda0ce7bb --- /dev/null +++ b/arch/s390/include/asm/vdso/clocksource.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_VDSO_CLOCKSOURCE_H +#define __ASM_VDSO_CLOCKSOURCE_H + +#define VDSO_ARCH_CLOCKMODES \ + VDSO_CLOCKMODE_TOD + +#endif /* __ASM_VDSO_CLOCKSOURCE_H */ diff --git a/arch/s390/include/asm/vdso/data.h b/arch/s390/include/asm/vdso/data.h new file mode 100644 index 000000000000..7b3cdb4a5f48 --- /dev/null +++ b/arch/s390/include/asm/vdso/data.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __S390_ASM_VDSO_DATA_H +#define __S390_ASM_VDSO_DATA_H + +#include <linux/types.h> +#include <vdso/datapage.h> + +struct arch_vdso_data { + __u64 tod_steering_delta; + __u64 tod_steering_end; +}; + +#endif /* __S390_ASM_VDSO_DATA_H */ diff --git a/arch/s390/include/asm/vdso/gettimeofday.h b/arch/s390/include/asm/vdso/gettimeofday.h new file mode 100644 index 000000000000..bf123065ad3b --- /dev/null +++ b/arch/s390/include/asm/vdso/gettimeofday.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef ASM_VDSO_GETTIMEOFDAY_H +#define ASM_VDSO_GETTIMEOFDAY_H + +#define VDSO_HAS_TIME 1 + +#define VDSO_HAS_CLOCK_GETRES 1 + +#include <asm/timex.h> +#include <asm/unistd.h> +#include <asm/vdso.h> +#include <linux/compiler.h> + +#define vdso_calc_delta __arch_vdso_calc_delta +static __always_inline u64 __arch_vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult) +{ + return (cycles - last) * mult; +} + +static __always_inline const struct vdso_data *__arch_get_vdso_data(void) +{ + return _vdso_data; +} + +static inline u64 __arch_get_hw_counter(s32 clock_mode, const struct vdso_data *vd) +{ + const struct vdso_data *vdso = __arch_get_vdso_data(); + u64 adj, now; + + now = get_tod_clock(); + adj = vdso->arch_data.tod_steering_end - now; + if (unlikely((s64) adj > 0)) + now += (vdso->arch_data.tod_steering_delta < 0) ? (adj >> 15) : -(adj >> 15); + return now; +} + +static __always_inline +long clock_gettime_fallback(clockid_t clkid, struct __kernel_timespec *ts) +{ + register unsigned long r1 __asm__("r1") = __NR_clock_gettime; + register unsigned long r2 __asm__("r2") = (unsigned long)clkid; + register void *r3 __asm__("r3") = ts; + + asm ("svc 0\n" : "+d" (r2) : "d" (r1), "d" (r3) : "cc", "memory"); + return r2; +} + +static __always_inline +long gettimeofday_fallback(register struct __kernel_old_timeval *tv, + register struct timezone *tz) +{ + register unsigned long r1 __asm__("r1") = __NR_gettimeofday; + register unsigned long r2 __asm__("r2") = (unsigned long)tv; + register void *r3 __asm__("r3") = tz; + + asm ("svc 0\n" : "+d" (r2) : "d" (r1), "d" (r3) : "cc", "memory"); + return r2; +} + +static __always_inline +long clock_getres_fallback(clockid_t clkid, struct __kernel_timespec *ts) +{ + register unsigned long r1 __asm__("r1") = __NR_clock_getres; + register unsigned long r2 __asm__("r2") = (unsigned long)clkid; + register void *r3 __asm__("r3") = ts; + + asm ("svc 0\n" : "+d" (r2) : "d" (r1), "d" (r3) : "cc", "memory"); + return r2; +} + +#endif diff --git a/arch/s390/include/asm/vdso/processor.h b/arch/s390/include/asm/vdso/processor.h new file mode 100644 index 000000000000..cfcc3e117c4c --- /dev/null +++ b/arch/s390/include/asm/vdso/processor.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef __ASM_VDSO_PROCESSOR_H +#define __ASM_VDSO_PROCESSOR_H + +#define cpu_relax() barrier() + +#endif /* __ASM_VDSO_PROCESSOR_H */ diff --git a/arch/s390/include/asm/vdso/vdso.h b/arch/s390/include/asm/vdso/vdso.h new file mode 100644 index 000000000000..e69de29bb2d1 --- /dev/null +++ b/arch/s390/include/asm/vdso/vdso.h diff --git a/arch/s390/include/asm/vdso/vsyscall.h b/arch/s390/include/asm/vdso/vsyscall.h new file mode 100644 index 000000000000..6c67c08cefdd --- /dev/null +++ b/arch/s390/include/asm/vdso/vsyscall.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_VDSO_VSYSCALL_H +#define __ASM_VDSO_VSYSCALL_H + +#ifndef __ASSEMBLY__ + +#include <linux/hrtimer.h> +#include <linux/timekeeper_internal.h> +#include <vdso/datapage.h> +#include <asm/vdso.h> +/* + * Update the vDSO data page to keep in sync with kernel timekeeping. + */ + +static __always_inline struct vdso_data *__s390_get_k_vdso_data(void) +{ + return vdso_data; +} +#define __arch_get_k_vdso_data __s390_get_k_vdso_data + +/* The asm-generic header needs to be included after the definitions above */ +#include <asm-generic/vdso/vsyscall.h> + +#endif /* !__ASSEMBLY__ */ + +#endif /* __ASM_VDSO_VSYSCALL_H */ diff --git a/arch/s390/include/asm/vtimer.h b/arch/s390/include/asm/vtimer.h index 42f707d1c1e8..e601adaa6320 100644 --- a/arch/s390/include/asm/vtimer.h +++ b/arch/s390/include/asm/vtimer.h @@ -25,8 +25,6 @@ extern void add_virt_timer_periodic(struct vtimer_list *timer); extern int mod_virt_timer(struct vtimer_list *timer, u64 expires); extern int mod_virt_timer_periodic(struct vtimer_list *timer, u64 expires); extern int del_virt_timer(struct vtimer_list *timer); - -extern void init_cpu_vtimer(void); extern void vtime_init(void); #endif /* _ASM_S390_TIMER_H */ diff --git a/arch/s390/include/uapi/asm/pkey.h b/arch/s390/include/uapi/asm/pkey.h index d27d7d329263..7349e96d28a0 100644 --- a/arch/s390/include/uapi/asm/pkey.h +++ b/arch/s390/include/uapi/asm/pkey.h @@ -35,12 +35,16 @@ #define PKEY_KEYTYPE_AES_128 1 #define PKEY_KEYTYPE_AES_192 2 #define PKEY_KEYTYPE_AES_256 3 +#define PKEY_KEYTYPE_ECC 4 /* the newer ioctls use a pkey_key_type enum for type information */ enum pkey_key_type { PKEY_TYPE_CCA_DATA = (__u32) 1, PKEY_TYPE_CCA_CIPHER = (__u32) 2, PKEY_TYPE_EP11 = (__u32) 3, + PKEY_TYPE_CCA_ECC = (__u32) 0x1f, + PKEY_TYPE_EP11_AES = (__u32) 6, + PKEY_TYPE_EP11_ECC = (__u32) 7, }; /* the newer ioctls use a pkey_key_size enum for key size information */ @@ -89,6 +93,20 @@ struct pkey_clrkey { }; /* + * EP11 key blobs of type PKEY_TYPE_EP11_AES and PKEY_TYPE_EP11_ECC + * are ep11 blobs prepended by this header: + */ +struct ep11kblob_header { + __u8 type; /* always 0x00 */ + __u8 hver; /* header version, currently needs to be 0x00 */ + __u16 len; /* total length in bytes (including this header) */ + __u8 version; /* PKEY_TYPE_EP11_AES or PKEY_TYPE_EP11_ECC */ + __u8 res0; /* unused */ + __u16 bitlen; /* clear key bit len, 0 for unknown */ + __u8 res1[8]; /* unused */ +} __packed; + +/* * Generate CCA AES secure key. */ struct pkey_genseck { @@ -304,7 +322,7 @@ struct pkey_verifykey2 { #define PKEY_VERIFYKEY2 _IOWR(PKEY_IOCTL_MAGIC, 0x17, struct pkey_verifykey2) /* - * Transform a key blob (of any type) into a protected key, version 2. + * Transform a key blob into a protected key, version 2. * There needs to be a list of apqns given with at least one entry in there. * All apqns in the list need to be exact apqns, 0xFFFF as ANY card or domain * is not supported. The implementation walks through the list of apqns and @@ -313,6 +331,8 @@ struct pkey_verifykey2 { * list is tried until success (return 0) or the end of the list is reached * (return -1 with errno ENODEV). You may use the PKEY_APQNS4K ioctl to * generate a list of apqns based on the key. + * Deriving ECC protected keys from ECC secure keys is not supported with + * this ioctl, use PKEY_KBLOB2PROTK3 for this purpose. */ struct pkey_kblob2pkey2 { __u8 __user *key; /* in: pointer to key blob */ @@ -326,17 +346,17 @@ struct pkey_kblob2pkey2 { /* * Build a list of APQNs based on a key blob given. * Is able to find out which type of secure key is given (CCA AES secure - * key, CCA AES cipher key or EP11 AES key) and tries to find all matching - * crypto cards based on the MKVP and maybe other criterias (like CCA AES - * cipher keys need a CEX5C or higher, EP11 keys with BLOB_PKEY_EXTRACTABLE - * need a CEX7 and EP11 api version 4). The list of APQNs is further filtered - * by the key's mkvp which needs to match to either the current mkvp (CCA and - * EP11) or the alternate mkvp (old mkvp, CCA adapters only) of the apqns. The - * flags argument may be used to limit the matching apqns. If the - * PKEY_FLAGS_MATCH_CUR_MKVP is given, only the current mkvp of each apqn is - * compared. Likewise with the PKEY_FLAGS_MATCH_ALT_MKVP. If both are given, it - * is assumed to return apqns where either the current or the alternate mkvp - * matches. At least one of the matching flags needs to be given. + * key, CCA AES cipher key, CCA ECC private key, EP11 AES key, EP11 ECC private + * key) and tries to find all matching crypto cards based on the MKVP and maybe + * other criterias (like CCA AES cipher keys need a CEX5C or higher, EP11 keys + * with BLOB_PKEY_EXTRACTABLE need a CEX7 and EP11 api version 4). The list of + * APQNs is further filtered by the key's mkvp which needs to match to either + * the current mkvp (CCA and EP11) or the alternate mkvp (old mkvp, CCA adapters + * only) of the apqns. The flags argument may be used to limit the matching + * apqns. If the PKEY_FLAGS_MATCH_CUR_MKVP is given, only the current mkvp of + * each apqn is compared. Likewise with the PKEY_FLAGS_MATCH_ALT_MKVP. If both + * are given, it is assumed to return apqns where either the current or the + * alternate mkvp matches. At least one of the matching flags needs to be given. * The flags argument for EP11 keys has no further action and is currently * ignored (but needs to be given as PKEY_FLAGS_MATCH_CUR_MKVP) as there is only * the wkvp from the key to match against the apqn's wkvp. @@ -365,9 +385,10 @@ struct pkey_apqns4key { * restrict the list by given master key verification patterns. * For different key types there may be different ways to match the * master key verification patterns. For CCA keys (CCA data key and CCA - * cipher key) the first 8 bytes of cur_mkvp refer to the current mkvp value - * of the apqn and the first 8 bytes of the alt_mkvp refer to the old mkvp. - * The flags argument controls if the apqns current and/or alternate mkvp + * cipher key) the first 8 bytes of cur_mkvp refer to the current AES mkvp value + * of the apqn and the first 8 bytes of the alt_mkvp refer to the old AES mkvp. + * For CCA ECC keys it is similar but the match is against the APKA current/old + * mkvp. The flags argument controls if the apqns current and/or alternate mkvp * should match. If the PKEY_FLAGS_MATCH_CUR_MKVP is given, only the current * mkvp of each apqn is compared. Likewise with the PKEY_FLAGS_MATCH_ALT_MKVP. * If both are given, it is assumed to return apqns where either the @@ -397,4 +418,30 @@ struct pkey_apqns4keytype { }; #define PKEY_APQNS4KT _IOWR(PKEY_IOCTL_MAGIC, 0x1C, struct pkey_apqns4keytype) +/* + * Transform a key blob into a protected key, version 3. + * The difference to version 2 of this ioctl is that the protected key + * buffer is now explicitly and not within a struct pkey_protkey any more. + * So this ioctl is also able to handle EP11 and CCA ECC secure keys and + * provide ECC protected keys. + * There needs to be a list of apqns given with at least one entry in there. + * All apqns in the list need to be exact apqns, 0xFFFF as ANY card or domain + * is not supported. The implementation walks through the list of apqns and + * tries to send the request to each apqn without any further checking (like + * card type or online state). If the apqn fails, simple the next one in the + * list is tried until success (return 0) or the end of the list is reached + * (return -1 with errno ENODEV). You may use the PKEY_APQNS4K ioctl to + * generate a list of apqns based on the key. + */ +struct pkey_kblob2pkey3 { + __u8 __user *key; /* in: pointer to key blob */ + __u32 keylen; /* in: key blob size */ + struct pkey_apqn __user *apqns; /* in: ptr to list of apqn targets */ + __u32 apqn_entries; /* in: # of apqn target list entries */ + __u32 pkeytype; /* out: prot key type (enum pkey_key_type) */ + __u32 pkeylen; /* in/out: size of pkey buffer/actual len of pkey */ + __u8 __user *pkey; /* in: pkey blob buffer space ptr */ +}; +#define PKEY_KBLOB2PROTK3 _IOWR(PKEY_IOCTL_MAGIC, 0x1D, struct pkey_kblob2pkey3) + #endif /* _UAPI_PKEY_H */ diff --git a/arch/s390/include/uapi/asm/sie.h b/arch/s390/include/uapi/asm/sie.h index 6ca1e68d7103..ede318653c87 100644 --- a/arch/s390/include/uapi/asm/sie.h +++ b/arch/s390/include/uapi/asm/sie.h @@ -29,7 +29,7 @@ { 0x13, "SIGP conditional emergency signal" }, \ { 0x15, "SIGP sense running" }, \ { 0x16, "SIGP set multithreading"}, \ - { 0x17, "SIGP store additional status ait address"} + { 0x17, "SIGP store additional status at address"} #define icpt_prog_codes \ { 0x0001, "Prog Operation" }, \ diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index efca70970761..dd73b7f07423 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -57,6 +57,7 @@ obj-$(CONFIG_COMPAT) += $(compat-obj-y) obj-$(CONFIG_EARLY_PRINTK) += early_printk.o obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_KPROBES) += kprobes.o +obj-$(CONFIG_KPROBES) += kprobes_insn_page.o obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_UPROBES) += uprobes.o diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index 5d8cc1864566..ece58f2217cb 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -59,26 +59,6 @@ int main(void) OFFSET(__SF_SIE_REASON, stack_frame, empty1[2]); OFFSET(__SF_SIE_FLAGS, stack_frame, empty1[3]); BLANK(); - /* timeval/timezone offsets for use by vdso */ - OFFSET(__VDSO_UPD_COUNT, vdso_data, tb_update_count); - OFFSET(__VDSO_XTIME_STAMP, vdso_data, xtime_tod_stamp); - OFFSET(__VDSO_XTIME_SEC, vdso_data, xtime_clock_sec); - OFFSET(__VDSO_XTIME_NSEC, vdso_data, xtime_clock_nsec); - OFFSET(__VDSO_XTIME_CRS_SEC, vdso_data, xtime_coarse_sec); - OFFSET(__VDSO_XTIME_CRS_NSEC, vdso_data, xtime_coarse_nsec); - OFFSET(__VDSO_WTOM_SEC, vdso_data, wtom_clock_sec); - OFFSET(__VDSO_WTOM_NSEC, vdso_data, wtom_clock_nsec); - OFFSET(__VDSO_WTOM_CRS_SEC, vdso_data, wtom_coarse_sec); - OFFSET(__VDSO_WTOM_CRS_NSEC, vdso_data, wtom_coarse_nsec); - OFFSET(__VDSO_TIMEZONE, vdso_data, tz_minuteswest); - OFFSET(__VDSO_ECTG_OK, vdso_data, ectg_available); - OFFSET(__VDSO_TK_MULT, vdso_data, tk_mult); - OFFSET(__VDSO_TK_SHIFT, vdso_data, tk_shift); - OFFSET(__VDSO_TS_DIR, vdso_data, ts_dir); - OFFSET(__VDSO_TS_END, vdso_data, ts_end); - OFFSET(__VDSO_CLOCK_REALTIME_RES, vdso_data, hrtimer_res); - OFFSET(__VDSO_ECTG_BASE, vdso_per_cpu_data, ectg_timer_base); - OFFSET(__VDSO_ECTG_USER, vdso_per_cpu_data, ectg_user_time); OFFSET(__VDSO_GETCPU_VAL, vdso_per_cpu_data, getcpu_val); BLANK(); /* constants used by the vdso */ diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c index c42ce348103c..205b2e2648aa 100644 --- a/arch/s390/kernel/crash_dump.c +++ b/arch/s390/kernel/crash_dump.c @@ -141,7 +141,7 @@ int copy_oldmem_kernel(void *dst, void *src, size_t count) while (count) { from = __pa(src); if (!OLDMEM_BASE && from < sclp.hsa_size) { - /* Copy from zfcpdump HSA area */ + /* Copy from zfcp/nvme dump HSA area */ len = min(count, sclp.hsa_size - from); rc = memcpy_hsa_kernel(dst, from, len); if (rc) @@ -184,7 +184,7 @@ static int copy_oldmem_user(void __user *dst, void *src, size_t count) while (count) { from = __pa(src); if (!OLDMEM_BASE && from < sclp.hsa_size) { - /* Copy from zfcpdump HSA area */ + /* Copy from zfcp/nvme dump HSA area */ len = min(count, sclp.hsa_size - from); rc = memcpy_hsa_user(dst, from, len); if (rc) @@ -258,7 +258,7 @@ static int remap_oldmem_pfn_range_kdump(struct vm_area_struct *vma, } /* - * Remap "oldmem" for zfcpdump + * Remap "oldmem" for zfcp/nvme dump * * We only map available memory above HSA size. Memory below HSA size * is read on demand using the copy_oldmem_page() function. @@ -283,7 +283,7 @@ static int remap_oldmem_pfn_range_zfcpdump(struct vm_area_struct *vma, } /* - * Remap "oldmem" for kdump or zfcpdump + * Remap "oldmem" for kdump or zfcp/nvme dump */ int remap_oldmem_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot) @@ -632,11 +632,11 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size) u32 alloc_size; u64 hdr_off; - /* If we are not in kdump or zfcpdump mode return */ - if (!OLDMEM_BASE && ipl_info.type != IPL_TYPE_FCP_DUMP) + /* If we are not in kdump or zfcp/nvme dump mode return */ + if (!OLDMEM_BASE && !is_ipl_type_dump()) return 0; - /* If we cannot get HSA size for zfcpdump return error */ - if (ipl_info.type == IPL_TYPE_FCP_DUMP && !sclp.hsa_size) + /* If we cannot get HSA size for zfcp/nvme dump return error */ + if (is_ipl_type_dump() && !sclp.hsa_size) return -ENODEV; /* For kdump, exclude previous crashkernel memory */ diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c index ccba63aaeb47..b8b0cd7b008f 100644 --- a/arch/s390/kernel/diag.c +++ b/arch/s390/kernel/diag.c @@ -104,18 +104,7 @@ static const struct seq_operations show_diag_stat_sops = { .show = show_diag_stat, }; -static int show_diag_stat_open(struct inode *inode, struct file *file) -{ - return seq_open(file, &show_diag_stat_sops); -} - -static const struct file_operations show_diag_stat_fops = { - .open = show_diag_stat_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; - +DEFINE_SEQ_ATTRIBUTE(show_diag_stat); static int __init show_diag_stat_init(void) { diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c index f304802ecf7b..a7eab7be4db0 100644 --- a/arch/s390/kernel/dis.c +++ b/arch/s390/kernel/dis.c @@ -482,31 +482,37 @@ static int print_insn(char *buffer, unsigned char *code, unsigned long addr) return (int) (ptr - buffer); } +static int copy_from_regs(struct pt_regs *regs, void *dst, void *src, int len) +{ + if (user_mode(regs)) { + if (copy_from_user(dst, (char __user *)src, len)) + return -EFAULT; + } else { + if (copy_from_kernel_nofault(dst, src, len)) + return -EFAULT; + } + return 0; +} + void show_code(struct pt_regs *regs) { char *mode = user_mode(regs) ? "User" : "Krnl"; unsigned char code[64]; char buffer[128], *ptr; - mm_segment_t old_fs; unsigned long addr; int start, end, opsize, hops, i; /* Get a snapshot of the 64 bytes surrounding the fault address. */ - old_fs = get_fs(); - set_fs(user_mode(regs) ? USER_DS : KERNEL_DS); for (start = 32; start && regs->psw.addr >= 34 - start; start -= 2) { addr = regs->psw.addr - 34 + start; - if (__copy_from_user(code + start - 2, - (char __user *) addr, 2)) + if (copy_from_regs(regs, code + start - 2, (void *)addr, 2)) break; } for (end = 32; end < 64; end += 2) { addr = regs->psw.addr + end - 32; - if (__copy_from_user(code + end, - (char __user *) addr, 2)) + if (copy_from_regs(regs, code + end, (void *)addr, 2)) break; } - set_fs(old_fs); /* Code snapshot useable ? */ if ((regs->psw.addr & 1) || start >= end) { printk("%s Code: Bad PSW.\n", mode); diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 078277231858..705844f73934 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -274,19 +274,6 @@ static int __init disable_vector_extension(char *str) } early_param("novx", disable_vector_extension); -static int __init cad_setup(char *str) -{ - bool enabled; - int rc; - - rc = kstrtobool(str, &enabled); - if (!rc && enabled && test_facility(128)) - /* Enable problem state CAD. */ - __ctl_set_bit(2, 3); - return rc; -} -early_param("cad", cad_setup); - char __bootdata(early_command_line)[COMMAND_LINE_SIZE]; static void __init setup_boot_command_line(void) { diff --git a/arch/s390/kernel/early_printk.c b/arch/s390/kernel/early_printk.c index 6f24d83bc5dc..d9d53f44008a 100644 --- a/arch/s390/kernel/early_printk.c +++ b/arch/s390/kernel/early_printk.c @@ -10,7 +10,7 @@ static void sclp_early_write(struct console *con, const char *s, unsigned int len) { - __sclp_early_printk(s, len, 0); + __sclp_early_printk(s, len); } static struct console sclp_early_console = { diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 23edf196d3dc..86235919c2d1 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -435,10 +435,8 @@ ENTRY(system_call) jz .Lsysc_skip_fpu brasl %r14,load_fpu_regs .Lsysc_skip_fpu: - lg %r14,__LC_VDSO_PER_CPU mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) stpt __LC_EXIT_TIMER - mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER lmg %r0,%r15,__PT_R0(%r11) b __LC_RETURN_LPSWE @@ -797,13 +795,11 @@ ENTRY(io_int_handler) TRACE_IRQS_ON 0: #endif - lg %r14,__LC_VDSO_PER_CPU mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) tm __PT_PSW+1(%r11),0x01 # returning to user ? jno .Lio_exit_kernel BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP stpt __LC_EXIT_TIMER - mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER .Lio_exit_kernel: lmg %r0,%r15,__PT_R0(%r11) b __LC_RETURN_LPSWE @@ -1213,14 +1209,12 @@ ENTRY(mcck_int_handler) brasl %r14,s390_handle_mcck TRACE_IRQS_ON .Lmcck_return: - lg %r14,__LC_VDSO_PER_CPU lmg %r0,%r10,__PT_R0(%r11) mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? jno 0f BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP stpt __LC_EXIT_TIMER - mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER 0: lmg %r11,%r15,__PT_R11(%r11) b __LC_RETURN_MCCK_LPSWE diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h index a44ddc2f2dec..d2ca3fe51f8e 100644 --- a/arch/s390/kernel/entry.h +++ b/arch/s390/kernel/entry.h @@ -9,7 +9,6 @@ #include <asm/idle.h> extern void *restart_stack; -extern unsigned long suspend_zero_pages; void system_call(void); void pgm_check_handler(void); @@ -17,7 +16,6 @@ void ext_int_handler(void); void io_int_handler(void); void mcck_int_handler(void); void restart_int_handler(void); -void restart_call_handler(void); asmlinkage long do_syscall_trace_enter(struct pt_regs *regs); asmlinkage void do_syscall_trace_exit(struct pt_regs *regs); @@ -62,12 +60,10 @@ void do_notify_resume(struct pt_regs *regs); void __init init_IRQ(void); void do_IRQ(struct pt_regs *regs, int irq); void do_restart(void); -void __init startup_init_nobss(void); void __init startup_init(void); void die(struct pt_regs *regs, const char *str); int setup_profiling_timer(unsigned int multiplier); void __init time_init(void); -void s390_early_resume(void); unsigned long prepare_ftrace_return(unsigned long parent, unsigned long sp, unsigned long ip); struct s390_mmap_arg_struct; @@ -92,4 +88,6 @@ void set_fs_fixup(void); unsigned long stack_alloc(void); void stack_free(unsigned long stack); +extern char kprobes_insn_page[]; + #endif /* _ENTRY_H */ diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 90a2a17239b0..98b3aca1de8e 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -40,10 +40,12 @@ #define IPL_FCP_STR "fcp" #define IPL_FCP_DUMP_STR "fcp_dump" #define IPL_NVME_STR "nvme" +#define IPL_NVME_DUMP_STR "nvme_dump" #define IPL_NSS_STR "nss" #define DUMP_CCW_STR "ccw" #define DUMP_FCP_STR "fcp" +#define DUMP_NVME_STR "nvme" #define DUMP_NONE_STR "none" /* @@ -96,6 +98,8 @@ static char *ipl_type_str(enum ipl_type type) return IPL_NSS_STR; case IPL_TYPE_NVME: return IPL_NVME_STR; + case IPL_TYPE_NVME_DUMP: + return IPL_NVME_DUMP_STR; case IPL_TYPE_UNKNOWN: default: return IPL_UNKNOWN_STR; @@ -106,6 +110,7 @@ enum dump_type { DUMP_TYPE_NONE = 1, DUMP_TYPE_CCW = 2, DUMP_TYPE_FCP = 4, + DUMP_TYPE_NVME = 8, }; static char *dump_type_str(enum dump_type type) @@ -117,6 +122,8 @@ static char *dump_type_str(enum dump_type type) return DUMP_CCW_STR; case DUMP_TYPE_FCP: return DUMP_FCP_STR; + case DUMP_TYPE_NVME: + return DUMP_NVME_STR; default: return NULL; } @@ -144,10 +151,12 @@ static struct ipl_parameter_block *reipl_block_actual; static int dump_capabilities = DUMP_TYPE_NONE; static enum dump_type dump_type = DUMP_TYPE_NONE; static struct ipl_parameter_block *dump_block_fcp; +static struct ipl_parameter_block *dump_block_nvme; static struct ipl_parameter_block *dump_block_ccw; static struct sclp_ipl_info sclp_ipl_info; +static bool reipl_nvme_clear; static bool reipl_fcp_clear; static bool reipl_ccw_clear; @@ -266,7 +275,10 @@ static __init enum ipl_type get_ipl_type(void) else return IPL_TYPE_FCP; case IPL_PBT_NVME: - return IPL_TYPE_NVME; + if (ipl_block.nvme.opt == IPL_PB0_NVME_OPT_DUMP) + return IPL_TYPE_NVME_DUMP; + else + return IPL_TYPE_NVME; } return IPL_TYPE_UNKNOWN; } @@ -324,6 +336,7 @@ static ssize_t sys_ipl_device_show(struct kobject *kobj, case IPL_TYPE_FCP_DUMP: return sprintf(page, "0.0.%04x\n", ipl_block.fcp.devno); case IPL_TYPE_NVME: + case IPL_TYPE_NVME_DUMP: return sprintf(page, "%08ux\n", ipl_block.nvme.fid); default: return 0; @@ -531,6 +544,7 @@ static int __init ipl_init(void) rc = sysfs_create_group(&ipl_kset->kobj, &ipl_fcp_attr_group); break; case IPL_TYPE_NVME: + case IPL_TYPE_NVME_DUMP: rc = sysfs_create_group(&ipl_kset->kobj, &ipl_nvme_attr_group); break; default: @@ -873,6 +887,24 @@ static struct attribute_group reipl_nvme_attr_group = { .bin_attrs = reipl_nvme_bin_attrs }; +static ssize_t reipl_nvme_clear_show(struct kobject *kobj, + struct kobj_attribute *attr, char *page) +{ + return sprintf(page, "%u\n", reipl_nvme_clear); +} + +static ssize_t reipl_nvme_clear_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t len) +{ + if (strtobool(buf, &reipl_nvme_clear) < 0) + return -EINVAL; + return len; +} + +static struct kobj_attribute sys_reipl_nvme_clear_attr = + __ATTR(clear, 0644, reipl_nvme_clear_show, reipl_nvme_clear_store); + /* CCW reipl device attributes */ DEFINE_IPL_CCW_ATTR_RW(reipl_ccw, device, reipl_block_ccw->ccw); @@ -1099,7 +1131,10 @@ static void __reipl_run(void *unused) break; case IPL_TYPE_NVME: diag308(DIAG308_SET, reipl_block_nvme); - diag308(DIAG308_LOAD_CLEAR, NULL); + if (reipl_nvme_clear) + diag308(DIAG308_LOAD_CLEAR, NULL); + else + diag308(DIAG308_LOAD_NORMAL, NULL); break; case IPL_TYPE_NSS: diag308(DIAG308_SET, reipl_block_nss); @@ -1109,6 +1144,7 @@ static void __reipl_run(void *unused) diag308(DIAG308_LOAD_CLEAR, NULL); break; case IPL_TYPE_FCP_DUMP: + case IPL_TYPE_NVME_DUMP: break; } disabled_wait(); @@ -1219,8 +1255,9 @@ static int __init reipl_fcp_init(void) &sys_reipl_fcp_clear_attr.attr); if (rc) goto out2; - } else + } else { reipl_fcp_clear = true; + } if (ipl_info.type == IPL_TYPE_FCP) { memcpy(reipl_block_fcp, &ipl_block, sizeof(ipl_block)); @@ -1266,10 +1303,16 @@ static int __init reipl_nvme_init(void) } rc = sysfs_create_group(&reipl_nvme_kset->kobj, &reipl_nvme_attr_group); - if (rc) { - kset_unregister(reipl_nvme_kset); - free_page((unsigned long) reipl_block_nvme); - return rc; + if (rc) + goto out1; + + if (test_facility(141)) { + rc = sysfs_create_file(&reipl_nvme_kset->kobj, + &sys_reipl_nvme_clear_attr.attr); + if (rc) + goto out2; + } else { + reipl_nvme_clear = true; } if (ipl_info.type == IPL_TYPE_NVME) { @@ -1290,6 +1333,13 @@ static int __init reipl_nvme_init(void) } reipl_capabilities |= IPL_TYPE_NVME; return 0; + +out2: + sysfs_remove_group(&reipl_nvme_kset->kobj, &reipl_nvme_attr_group); +out1: + kset_unregister(reipl_nvme_kset); + free_page((unsigned long) reipl_block_nvme); + return rc; } static int __init reipl_type_init(void) @@ -1382,6 +1432,29 @@ static struct attribute_group dump_fcp_attr_group = { .attrs = dump_fcp_attrs, }; +/* NVME dump device attributes */ +DEFINE_IPL_ATTR_RW(dump_nvme, fid, "0x%08llx\n", "%llx\n", + dump_block_nvme->nvme.fid); +DEFINE_IPL_ATTR_RW(dump_nvme, nsid, "0x%08llx\n", "%llx\n", + dump_block_nvme->nvme.nsid); +DEFINE_IPL_ATTR_RW(dump_nvme, bootprog, "%lld\n", "%llx\n", + dump_block_nvme->nvme.bootprog); +DEFINE_IPL_ATTR_RW(dump_nvme, br_lba, "%lld\n", "%llx\n", + dump_block_nvme->nvme.br_lba); + +static struct attribute *dump_nvme_attrs[] = { + &sys_dump_nvme_fid_attr.attr, + &sys_dump_nvme_nsid_attr.attr, + &sys_dump_nvme_bootprog_attr.attr, + &sys_dump_nvme_br_lba_attr.attr, + NULL, +}; + +static struct attribute_group dump_nvme_attr_group = { + .name = IPL_NVME_STR, + .attrs = dump_nvme_attrs, +}; + /* CCW dump device attributes */ DEFINE_IPL_CCW_ATTR_RW(dump_ccw, device, dump_block_ccw->ccw); @@ -1423,6 +1496,8 @@ static ssize_t dump_type_store(struct kobject *kobj, rc = dump_set_type(DUMP_TYPE_CCW); else if (strncmp(buf, DUMP_FCP_STR, strlen(DUMP_FCP_STR)) == 0) rc = dump_set_type(DUMP_TYPE_FCP); + else if (strncmp(buf, DUMP_NVME_STR, strlen(DUMP_NVME_STR)) == 0) + rc = dump_set_type(DUMP_TYPE_NVME); return (rc != 0) ? rc : len; } @@ -1450,6 +1525,9 @@ static void __dump_run(void *unused) case DUMP_TYPE_FCP: diag308_dump(dump_block_fcp); break; + case DUMP_TYPE_NVME: + diag308_dump(dump_block_nvme); + break; default: break; } @@ -1506,6 +1584,29 @@ static int __init dump_fcp_init(void) return 0; } +static int __init dump_nvme_init(void) +{ + int rc; + + if (!sclp_ipl_info.has_dump) + return 0; /* LDIPL DUMP is not installed */ + dump_block_nvme = (void *) get_zeroed_page(GFP_KERNEL); + if (!dump_block_nvme) + return -ENOMEM; + rc = sysfs_create_group(&dump_kset->kobj, &dump_nvme_attr_group); + if (rc) { + free_page((unsigned long)dump_block_nvme); + return rc; + } + dump_block_nvme->hdr.len = IPL_BP_NVME_LEN; + dump_block_nvme->hdr.version = IPL_PARM_BLOCK_VERSION; + dump_block_nvme->fcp.len = IPL_BP0_NVME_LEN; + dump_block_nvme->fcp.pbt = IPL_PBT_NVME; + dump_block_nvme->fcp.opt = IPL_PB0_NVME_OPT_DUMP; + dump_capabilities |= DUMP_TYPE_NVME; + return 0; +} + static int __init dump_init(void) { int rc; @@ -1524,6 +1625,9 @@ static int __init dump_init(void) rc = dump_fcp_init(); if (rc) return rc; + rc = dump_nvme_init(); + if (rc) + return rc; dump_set_type(DUMP_TYPE_NONE); return 0; } @@ -1956,6 +2060,7 @@ void __init setup_ipl(void) ipl_info.data.fcp.lun = ipl_block.fcp.lun; break; case IPL_TYPE_NVME: + case IPL_TYPE_NVME_DUMP: ipl_info.data.nvme.fid = ipl_block.nvme.fid; ipl_info.data.nvme.nsid = ipl_block.nvme.nsid; break; diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index d2a71d872638..aae24dc75df6 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c @@ -7,6 +7,7 @@ * s390 port, used ppc64 as template. Mike Grundy <grundym@us.ibm.com> */ +#include <linux/moduleloader.h> #include <linux/kprobes.h> #include <linux/ptrace.h> #include <linux/preempt.h> @@ -21,6 +22,7 @@ #include <asm/set_memory.h> #include <asm/sections.h> #include <asm/dis.h> +#include "entry.h" DEFINE_PER_CPU(struct kprobe *, current_kprobe); DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); @@ -30,19 +32,32 @@ struct kretprobe_blackpoint kretprobe_blacklist[] = { }; DEFINE_INSN_CACHE_OPS(s390_insn); static int insn_page_in_use; -static char insn_page[PAGE_SIZE] __aligned(PAGE_SIZE); + +void *alloc_insn_page(void) +{ + void *page; + + page = module_alloc(PAGE_SIZE); + if (!page) + return NULL; + __set_memory((unsigned long) page, 1, SET_MEMORY_RO | SET_MEMORY_X); + return page; +} + +void free_insn_page(void *page) +{ + module_memfree(page); +} static void *alloc_s390_insn_page(void) { if (xchg(&insn_page_in_use, 1) == 1) return NULL; - set_memory_x((unsigned long) &insn_page, 1); - return &insn_page; + return &kprobes_insn_page; } static void free_s390_insn_page(void *page) { - set_memory_nx((unsigned long) page, 1); xchg(&insn_page_in_use, 0); } @@ -56,25 +71,29 @@ struct kprobe_insn_cache kprobe_s390_insn_slots = { static void copy_instruction(struct kprobe *p) { + kprobe_opcode_t insn[MAX_INSN_SIZE]; s64 disp, new_disp; u64 addr, new_addr; + unsigned int len; - memcpy(p->ainsn.insn, p->addr, insn_length(*p->addr >> 8)); - p->opcode = p->ainsn.insn[0]; - if (!probe_is_insn_relative_long(p->ainsn.insn)) - return; - /* - * For pc-relative instructions in RIL-b or RIL-c format patch the - * RI2 displacement field. We have already made sure that the insn - * slot for the patched instruction is within the same 2GB area - * as the original instruction (either kernel image or module area). - * Therefore the new displacement will always fit. - */ - disp = *(s32 *)&p->ainsn.insn[1]; - addr = (u64)(unsigned long)p->addr; - new_addr = (u64)(unsigned long)p->ainsn.insn; - new_disp = ((addr + (disp * 2)) - new_addr) / 2; - *(s32 *)&p->ainsn.insn[1] = new_disp; + len = insn_length(*p->addr >> 8); + memcpy(&insn, p->addr, len); + p->opcode = insn[0]; + if (probe_is_insn_relative_long(&insn[0])) { + /* + * For pc-relative instructions in RIL-b or RIL-c format patch + * the RI2 displacement field. We have already made sure that + * the insn slot for the patched instruction is within the same + * 2GB area as the original instruction (either kernel image or + * module area). Therefore the new displacement will always fit. + */ + disp = *(s32 *)&insn[1]; + addr = (u64)(unsigned long)p->addr; + new_addr = (u64)(unsigned long)p->ainsn.insn; + new_disp = ((addr + (disp * 2)) - new_addr) / 2; + *(s32 *)&insn[1] = new_disp; + } + s390_kernel_write(p->ainsn.insn, &insn, len); } NOKPROBE_SYMBOL(copy_instruction); @@ -228,6 +247,7 @@ NOKPROBE_SYMBOL(pop_kprobe); void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14]; + ri->fp = NULL; /* Replace the return addr with trampoline addr */ regs->gprs[14] = (unsigned long) &kretprobe_trampoline; @@ -331,83 +351,7 @@ static void __used kretprobe_trampoline_holder(void) */ static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) { - struct kretprobe_instance *ri; - struct hlist_head *head, empty_rp; - struct hlist_node *tmp; - unsigned long flags, orig_ret_address; - unsigned long trampoline_address; - kprobe_opcode_t *correct_ret_addr; - - INIT_HLIST_HEAD(&empty_rp); - kretprobe_hash_lock(current, &head, &flags); - - /* - * It is possible to have multiple instances associated with a given - * task either because an multiple functions in the call path - * have a return probe installed on them, and/or more than one return - * return probe was registered for a target function. - * - * We can handle this because: - * - instances are always inserted at the head of the list - * - when multiple return probes are registered for the same - * function, the first instance's ret_addr will point to the - * real return address, and all the rest will point to - * kretprobe_trampoline - */ - ri = NULL; - orig_ret_address = 0; - correct_ret_addr = NULL; - trampoline_address = (unsigned long) &kretprobe_trampoline; - hlist_for_each_entry_safe(ri, tmp, head, hlist) { - if (ri->task != current) - /* another task is sharing our hash bucket */ - continue; - - orig_ret_address = (unsigned long) ri->ret_addr; - - if (orig_ret_address != trampoline_address) - /* - * This is the real return address. Any other - * instances associated with this task are for - * other calls deeper on the call stack - */ - break; - } - - kretprobe_assert(ri, orig_ret_address, trampoline_address); - - correct_ret_addr = ri->ret_addr; - hlist_for_each_entry_safe(ri, tmp, head, hlist) { - if (ri->task != current) - /* another task is sharing our hash bucket */ - continue; - - orig_ret_address = (unsigned long) ri->ret_addr; - - if (ri->rp && ri->rp->handler) { - ri->ret_addr = correct_ret_addr; - ri->rp->handler(ri, regs); - } - - recycle_rp_inst(ri, &empty_rp); - - if (orig_ret_address != trampoline_address) - /* - * This is the real return address. Any other - * instances associated with this task are for - * other calls deeper on the call stack - */ - break; - } - - regs->psw.addr = orig_ret_address; - - kretprobe_hash_unlock(current, &flags); - - hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { - hlist_del(&ri->hlist); - kfree(ri); - } + regs->psw.addr = __kretprobe_trampoline_handler(regs, &kretprobe_trampoline, NULL); /* * By returning a non-zero value, we are telling * kprobe_handler() that we don't want the post_handler diff --git a/arch/s390/kernel/kprobes_insn_page.S b/arch/s390/kernel/kprobes_insn_page.S new file mode 100644 index 000000000000..f6cb022ef8c8 --- /dev/null +++ b/arch/s390/kernel/kprobes_insn_page.S @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include <linux/linkage.h> + +/* + * insn_page is a special 4k aligned dummy function for kprobes. + * It will contain all kprobed instructions that are out-of-line executed. + * The page must be within the kernel image to guarantee that the + * out-of-line instructions are within 2GB distance of their original + * location. Using a dummy function ensures that the insn_page is within + * the text section of the kernel and mapped read-only/executable from + * the beginning on, thus avoiding to split large mappings if the page + * would be in the data section instead. + */ + .section .kprobes.text, "ax" + .align 4096 +ENTRY(kprobes_insn_page) + .rept 2048 + .word 0x07fe + .endr +ENDPROC(kprobes_insn_page) + .previous diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index c2c1b4e723ea..4d843e64496f 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -37,7 +37,7 @@ #include <linux/root_dev.h> #include <linux/console.h> #include <linux/kernel_stat.h> -#include <linux/dma-contiguous.h> +#include <linux/dma-map-ops.h> #include <linux/device.h> #include <linux/notifier.h> #include <linux/pfn.h> @@ -102,7 +102,6 @@ struct mem_detect_info __bootdata(mem_detect); struct exception_table_entry *__bootdata_preserved(__start_dma_ex_table); struct exception_table_entry *__bootdata_preserved(__stop_dma_ex_table); -unsigned long __bootdata_preserved(__swsusp_reset_dma); unsigned long __bootdata_preserved(__stext_dma); unsigned long __bootdata_preserved(__etext_dma); unsigned long __bootdata_preserved(__sdma); @@ -119,6 +118,7 @@ EXPORT_SYMBOL(VMALLOC_END); struct page *vmemmap; EXPORT_SYMBOL(vmemmap); +unsigned long vmemmap_size; unsigned long MODULES_VADDR; unsigned long MODULES_END; @@ -128,6 +128,12 @@ struct lowcore *lowcore_ptr[NR_CPUS]; EXPORT_SYMBOL(lowcore_ptr); /* + * The Write Back bit position in the physaddr is given by the SLPC PCI. + * Leaving the mask zero always uses write through which is safe + */ +unsigned long mio_wb_bit_mask __ro_after_init; + +/* * This is set up by the setup-routine at boot-time * for S390 need to find out, what we have to setup * using address 0x10400 ... @@ -245,7 +251,7 @@ static void __init conmode_default(void) #ifdef CONFIG_CRASH_DUMP static void __init setup_zfcpdump(void) { - if (ipl_info.type != IPL_TYPE_FCP_DUMP) + if (!is_ipl_type_dump()) return; if (OLDMEM_BASE) return; @@ -300,7 +306,7 @@ void machine_power_off(void) void (*pm_power_off)(void) = machine_power_off; EXPORT_SYMBOL_GPL(pm_power_off); -void *restart_stack __section(.data); +void *restart_stack; unsigned long stack_alloc(void) { @@ -366,8 +372,12 @@ void __init arch_call_rest_init(void) static void __init setup_lowcore_dat_off(void) { + unsigned long int_psw_mask = PSW_KERNEL_BITS; struct lowcore *lc; + if (IS_ENABLED(CONFIG_KASAN)) + int_psw_mask |= PSW_MASK_DAT; + /* * Setup lowcore for boot cpu */ @@ -379,15 +389,15 @@ static void __init setup_lowcore_dat_off(void) lc->restart_psw.mask = PSW_KERNEL_BITS; lc->restart_psw.addr = (unsigned long) restart_int_handler; - lc->external_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK; + lc->external_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK; lc->external_new_psw.addr = (unsigned long) ext_int_handler; - lc->svc_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK; + lc->svc_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK; lc->svc_new_psw.addr = (unsigned long) system_call; - lc->program_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK; + lc->program_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK; lc->program_new_psw.addr = (unsigned long) pgm_check_handler; lc->mcck_new_psw.mask = PSW_KERNEL_BITS; lc->mcck_new_psw.addr = (unsigned long) mcck_int_handler; - lc->io_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK; + lc->io_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK; lc->io_new_psw.addr = (unsigned long) io_int_handler; lc->clock_comparator = clock_comparator_max; lc->nodat_stack = ((unsigned long) &init_thread_union) @@ -402,7 +412,6 @@ static void __init setup_lowcore_dat_off(void) memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list, sizeof(lc->alt_stfle_fac_list)); nmi_alloc_boot_cpu(lc); - vdso_alloc_boot_cpu(lc); lc->sync_enter_timer = S390_lowcore.sync_enter_timer; lc->async_enter_timer = S390_lowcore.async_enter_timer; lc->exit_timer = S390_lowcore.exit_timer; @@ -484,8 +493,9 @@ static struct resource __initdata *standard_resources[] = { static void __init setup_resources(void) { struct resource *res, *std_res, *sub_res; - struct memblock_region *reg; + phys_addr_t start, end; int j; + u64 i; code_resource.start = (unsigned long) _text; code_resource.end = (unsigned long) _etext - 1; @@ -494,7 +504,7 @@ static void __init setup_resources(void) bss_resource.start = (unsigned long) __bss_start; bss_resource.end = (unsigned long) __bss_stop - 1; - for_each_memblock(memory, reg) { + for_each_mem_range(i, &start, &end) { res = memblock_alloc(sizeof(*res), 8); if (!res) panic("%s: Failed to allocate %zu bytes align=0x%x\n", @@ -502,8 +512,13 @@ static void __init setup_resources(void) res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM; res->name = "System RAM"; - res->start = reg->base; - res->end = reg->base + reg->size - 1; + res->start = start; + /* + * In memblock, end points to the first byte after the + * range while in resourses, end points to the last byte in + * the range. + */ + res->end = end - 1; request_resource(&iomem_resource, res); for (j = 0; j < ARRAY_SIZE(standard_resources); j++) { @@ -546,22 +561,17 @@ static void __init setup_memory_end(void) unsigned long vmax, tmp; /* Choose kernel address space layout: 3 or 4 levels. */ - if (IS_ENABLED(CONFIG_KASAN)) { - vmax = IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING) - ? _REGION1_SIZE - : _REGION2_SIZE; - } else { - tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE; - tmp = tmp * (sizeof(struct page) + PAGE_SIZE); - if (tmp + vmalloc_size + MODULES_LEN <= _REGION2_SIZE) - vmax = _REGION2_SIZE; /* 3-level kernel page table */ - else - vmax = _REGION1_SIZE; /* 4-level kernel page table */ - } - + tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE; + tmp = tmp * (sizeof(struct page) + PAGE_SIZE); + if (tmp + vmalloc_size + MODULES_LEN <= _REGION2_SIZE) + vmax = _REGION2_SIZE; /* 3-level kernel page table */ + else + vmax = _REGION1_SIZE; /* 4-level kernel page table */ if (is_prot_virt_host()) adjust_to_uv_max(&vmax); - +#ifdef CONFIG_KASAN + vmax = kasan_vmax; +#endif /* module area is at the end of the kernel address space. */ MODULES_END = vmax; MODULES_VADDR = MODULES_END - MODULES_LEN; @@ -580,9 +590,14 @@ static void __init setup_memory_end(void) /* Take care that memory_end is set and <= vmemmap */ memory_end = min(memory_end ?: max_physmem_end, (unsigned long)vmemmap); #ifdef CONFIG_KASAN - /* fit in kasan shadow memory region between 1:1 and vmemmap */ memory_end = min(memory_end, KASAN_SHADOW_START); - vmemmap = max(vmemmap, (struct page *)KASAN_SHADOW_END); +#endif + vmemmap_size = SECTION_ALIGN_UP(memory_end / PAGE_SIZE) * sizeof(struct page); +#ifdef CONFIG_KASAN + /* move vmemmap above kasan shadow only if stands in a way */ + if (KASAN_SHADOW_END > (unsigned long)vmemmap && + (unsigned long)vmemmap + vmemmap_size > KASAN_SHADOW_START) + vmemmap = max(vmemmap, (struct page *)KASAN_SHADOW_END); #endif max_pfn = max_low_pfn = PFN_DOWN(memory_end); memblock_remove(memory_end, ULONG_MAX); @@ -776,8 +791,8 @@ static void __init memblock_add_mem_detect_info(void) unsigned long start, end; int i; - memblock_dbg("physmem info source: %s (%hhd)\n", - get_mem_info_source(), mem_detect.info_source); + pr_debug("physmem info source: %s (%hhd)\n", + get_mem_info_source(), mem_detect.info_source); /* keep memblock lists close to the kernel */ memblock_set_bottom_up(true); for_each_mem_detect_block(i, &start, &end) { @@ -819,14 +834,15 @@ static void __init reserve_kernel(void) static void __init setup_memory(void) { - struct memblock_region *reg; + phys_addr_t start, end; + u64 i; /* * Init storage key for present memory */ - for_each_memblock(memory, reg) { - storage_key_init_range(reg->base, reg->base + reg->size); - } + for_each_mem_range(i, &start, &end) + storage_key_init_range(start, end); + psw_set_key(PAGE_DEFAULT_KEY); /* Only cosmetics */ @@ -1126,8 +1142,7 @@ void __init setup_arch(char **cmdline_p) free_mem_detect_info(); remove_oldmem(); - if (is_prot_virt_host()) - setup_uv(); + setup_uv(); setup_memory_end(); setup_memory(); dma_contiguous_reserve(memory_end); @@ -1171,7 +1186,7 @@ void __init setup_arch(char **cmdline_p) if (IS_ENABLED(CONFIG_EXPOLINE)) nospec_init_branches(); - /* Setup zfcpdump support */ + /* Setup zfcp/nvme dump support */ setup_zfcpdump(); /* Add system specific data to the random pool */ diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 85700bd85f98..ebfe86d097f0 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -606,14 +606,14 @@ int smp_store_status(int cpu) /* * Collect CPU state of the previous, crashed system. * There are four cases: - * 1) standard zfcp dump - * condition: OLDMEM_BASE == NULL && ipl_info.type == IPL_TYPE_FCP_DUMP + * 1) standard zfcp/nvme dump + * condition: OLDMEM_BASE == NULL && is_ipl_type_dump() == true * The state for all CPUs except the boot CPU needs to be collected * with sigp stop-and-store-status. The boot CPU state is located in * the absolute lowcore of the memory stored in the HSA. The zcore code * will copy the boot CPU state from the HSA. - * 2) stand-alone kdump for SCSI (zfcp dump with swapped memory) - * condition: OLDMEM_BASE != NULL && ipl_info.type == IPL_TYPE_FCP_DUMP + * 2) stand-alone kdump for SCSI/NVMe (zfcp/nvme dump with swapped memory) + * condition: OLDMEM_BASE != NULL && is_ipl_type_dump() == true * The state for all CPUs except the boot CPU needs to be collected * with sigp stop-and-store-status. The firmware or the boot-loader * stored the registers of the boot CPU in the absolute lowcore in the @@ -660,7 +660,7 @@ void __init smp_save_dump_cpus(void) unsigned long page; bool is_boot_cpu; - if (!(OLDMEM_BASE || ipl_info.type == IPL_TYPE_FCP_DUMP)) + if (!(OLDMEM_BASE || is_ipl_type_dump())) /* No previous system present, normal boot. */ return; /* Allocate a page as dumping area for the store status sigps */ @@ -686,7 +686,7 @@ void __init smp_save_dump_cpus(void) /* Get the vector registers */ smp_save_cpu_vxrs(sa, addr, is_boot_cpu, page); /* - * For a zfcp dump OLDMEM_BASE == NULL and the registers + * For a zfcp/nvme dump OLDMEM_BASE == NULL and the registers * of the boot CPU are stored in the HSA. To retrieve * these registers an SCLP request is required which is * done by drivers/s390/char/zcore.c:init_cpu_info() diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c index fc5419ac64c8..7f1266c24f6b 100644 --- a/arch/s390/kernel/stacktrace.c +++ b/arch/s390/kernel/stacktrace.c @@ -19,7 +19,7 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, unwind_for_each_frame(&state, task, regs, 0) { addr = unwind_get_return_address(&state); - if (!addr || !consume_entry(cookie, addr, false)) + if (!addr || !consume_entry(cookie, addr)) break; } } @@ -56,7 +56,7 @@ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, return -EINVAL; #endif - if (!consume_entry(cookie, addr, false)) + if (!consume_entry(cookie, addr)) return -EINVAL; } diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl index 10456bc936fb..28c168000483 100644 --- a/arch/s390/kernel/syscalls/syscall.tbl +++ b/arch/s390/kernel/syscalls/syscall.tbl @@ -26,7 +26,7 @@ 16 32 lchown - sys_lchown16 19 common lseek sys_lseek compat_sys_lseek 20 common getpid sys_getpid sys_getpid -21 common mount sys_mount compat_sys_mount +21 common mount sys_mount sys_mount 22 common umount sys_oldumount sys_oldumount 23 32 setuid - sys_setuid16 24 32 getuid - sys_getuid16 @@ -134,8 +134,8 @@ 142 64 select sys_select - 143 common flock sys_flock sys_flock 144 common msync sys_msync sys_msync -145 common readv sys_readv compat_sys_readv -146 common writev sys_writev compat_sys_writev +145 common readv sys_readv sys_readv +146 common writev sys_writev sys_writev 147 common getsid sys_getsid sys_getsid 148 common fdatasync sys_fdatasync sys_fdatasync 149 common _sysctl - - @@ -316,7 +316,7 @@ 306 common splice sys_splice sys_splice 307 common sync_file_range sys_sync_file_range compat_sys_s390_sync_file_range 308 common tee sys_tee sys_tee -309 common vmsplice sys_vmsplice compat_sys_vmsplice +309 common vmsplice sys_vmsplice sys_vmsplice 310 common move_pages sys_move_pages compat_sys_move_pages 311 common getcpu sys_getcpu sys_getcpu 312 common epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait @@ -347,8 +347,8 @@ 337 common clock_adjtime sys_clock_adjtime sys_clock_adjtime32 338 common syncfs sys_syncfs sys_syncfs 339 common setns sys_setns sys_setns -340 common process_vm_readv sys_process_vm_readv compat_sys_process_vm_readv -341 common process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev +340 common process_vm_readv sys_process_vm_readv sys_process_vm_readv +341 common process_vm_writev sys_process_vm_writev sys_process_vm_writev 342 common s390_runtime_instr sys_s390_runtime_instr sys_s390_runtime_instr 343 common kcmp sys_kcmp sys_kcmp 344 common finit_module sys_finit_module sys_finit_module @@ -442,3 +442,4 @@ 437 common openat2 sys_openat2 sys_openat2 438 common pidfd_getfd sys_pidfd_getfd sys_pidfd_getfd 439 common faccessat2 sys_faccessat2 sys_faccessat2 +440 common process_madvise sys_process_madvise sys_process_madvise diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 513e59d08a55..0ac30ee2c633 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -41,6 +41,9 @@ #include <linux/gfp.h> #include <linux/kprobes.h> #include <linux/uaccess.h> +#include <vdso/vsyscall.h> +#include <vdso/clocksource.h> +#include <vdso/helpers.h> #include <asm/facility.h> #include <asm/delay.h> #include <asm/div64.h> @@ -84,7 +87,7 @@ void __init time_early_init(void) /* Initialize TOD steering parameters */ tod_steering_end = *(unsigned long long *) &tod_clock_base[1]; - vdso_data->ts_end = tod_steering_end; + vdso_data->arch_data.tod_steering_end = tod_steering_end; if (!test_facility(28)) return; @@ -257,6 +260,7 @@ static struct clocksource clocksource_tod = { .mult = 1000, .shift = 12, .flags = CLOCK_SOURCE_IS_CONTINUOUS, + .vdso_clock_mode = VDSO_CLOCKMODE_TOD, }; struct clocksource * __init clocksource_default_clock(void) @@ -264,56 +268,6 @@ struct clocksource * __init clocksource_default_clock(void) return &clocksource_tod; } -void update_vsyscall(struct timekeeper *tk) -{ - u64 nsecps; - - if (tk->tkr_mono.clock != &clocksource_tod) - return; - - /* Make userspace gettimeofday spin until we're done. */ - ++vdso_data->tb_update_count; - smp_wmb(); - vdso_data->xtime_tod_stamp = tk->tkr_mono.cycle_last; - vdso_data->xtime_clock_sec = tk->xtime_sec; - vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec; - vdso_data->wtom_clock_sec = - tk->xtime_sec + tk->wall_to_monotonic.tv_sec; - vdso_data->wtom_clock_nsec = tk->tkr_mono.xtime_nsec + - + ((u64) tk->wall_to_monotonic.tv_nsec << tk->tkr_mono.shift); - nsecps = (u64) NSEC_PER_SEC << tk->tkr_mono.shift; - while (vdso_data->wtom_clock_nsec >= nsecps) { - vdso_data->wtom_clock_nsec -= nsecps; - vdso_data->wtom_clock_sec++; - } - - vdso_data->xtime_coarse_sec = tk->xtime_sec; - vdso_data->xtime_coarse_nsec = - (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift); - vdso_data->wtom_coarse_sec = - vdso_data->xtime_coarse_sec + tk->wall_to_monotonic.tv_sec; - vdso_data->wtom_coarse_nsec = - vdso_data->xtime_coarse_nsec + tk->wall_to_monotonic.tv_nsec; - while (vdso_data->wtom_coarse_nsec >= NSEC_PER_SEC) { - vdso_data->wtom_coarse_nsec -= NSEC_PER_SEC; - vdso_data->wtom_coarse_sec++; - } - - vdso_data->tk_mult = tk->tkr_mono.mult; - vdso_data->tk_shift = tk->tkr_mono.shift; - vdso_data->hrtimer_res = hrtimer_resolution; - smp_wmb(); - ++vdso_data->tb_update_count; -} - -extern struct timezone sys_tz; - -void update_vsyscall_tz(void) -{ - vdso_data->tz_minuteswest = sys_tz.tz_minuteswest; - vdso_data->tz_dsttime = sys_tz.tz_dsttime; -} - /* * Initialize the TOD clock and the CPU timer of * the boot cpu. @@ -342,11 +296,12 @@ void __init time_init(void) } static DEFINE_PER_CPU(atomic_t, clock_sync_word); -static DEFINE_MUTEX(clock_sync_mutex); +static DEFINE_MUTEX(stp_mutex); static unsigned long clock_sync_flags; -#define CLOCK_SYNC_HAS_STP 0 -#define CLOCK_SYNC_STP 1 +#define CLOCK_SYNC_HAS_STP 0 +#define CLOCK_SYNC_STP 1 +#define CLOCK_SYNC_STPINFO_VALID 2 /* * The get_clock function for the physical clock. It will get the current @@ -431,7 +386,6 @@ static void clock_sync_global(unsigned long long delta) /* Epoch overflow */ tod_clock_base[0]++; /* Adjust TOD steering parameters. */ - vdso_data->tb_update_count++; now = get_tod_clock(); adj = tod_steering_end - now; if (unlikely((s64) adj >= 0)) @@ -443,9 +397,8 @@ static void clock_sync_global(unsigned long long delta) panic("TOD clock sync offset %lli is too large to drift\n", tod_steering_delta); tod_steering_end = now + (abs(tod_steering_delta) << 15); - vdso_data->ts_dir = (tod_steering_delta < 0) ? 0 : 1; - vdso_data->ts_end = tod_steering_end; - vdso_data->tb_update_count++; + vdso_data->arch_data.tod_steering_end = tod_steering_end; + /* Update LPAR offset. */ if (ptff_query(PTFF_QTO) && ptff(&qto, sizeof(qto), PTFF_QTO) == 0) lpar_offset = qto.tod_epoch_difference; @@ -492,7 +445,6 @@ static struct stp_sstpi stp_info; static void *stp_page; static void stp_work_fn(struct work_struct *work); -static DEFINE_MUTEX(stp_work_mutex); static DECLARE_WORK(stp_work, stp_work_fn); static struct timer_list stp_timer; @@ -583,10 +535,26 @@ void stp_queue_work(void) queue_work(time_sync_wq, &stp_work); } +static int __store_stpinfo(void) +{ + int rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi)); + + if (rc) + clear_bit(CLOCK_SYNC_STPINFO_VALID, &clock_sync_flags); + else + set_bit(CLOCK_SYNC_STPINFO_VALID, &clock_sync_flags); + return rc; +} + +static int stpinfo_valid(void) +{ + return stp_online && test_bit(CLOCK_SYNC_STPINFO_VALID, &clock_sync_flags); +} + static int stp_sync_clock(void *data) { struct clock_sync_data *sync = data; - unsigned long long clock_delta; + unsigned long long clock_delta, flags; static int first; int rc; @@ -599,16 +567,17 @@ static int stp_sync_clock(void *data) if (stp_info.todoff[0] || stp_info.todoff[1] || stp_info.todoff[2] || stp_info.todoff[3] || stp_info.tmd != 2) { + flags = vdso_update_begin(); rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0, &clock_delta); if (rc == 0) { sync->clock_delta = clock_delta; clock_sync_global(clock_delta); - rc = chsc_sstpi(stp_page, &stp_info, - sizeof(struct stp_sstpi)); + rc = __store_stpinfo(); if (rc == 0 && stp_info.tmd != 2) rc = -EAGAIN; } + vdso_update_end(flags); } sync->in_sync = rc ? -EAGAIN : 1; xchg(&first, 0); @@ -628,6 +597,81 @@ static int stp_sync_clock(void *data) return 0; } +static int stp_clear_leap(void) +{ + struct __kernel_timex txc; + int ret; + + memset(&txc, 0, sizeof(txc)); + + ret = do_adjtimex(&txc); + if (ret < 0) + return ret; + + txc.modes = ADJ_STATUS; + txc.status &= ~(STA_INS|STA_DEL); + return do_adjtimex(&txc); +} + +static void stp_check_leap(void) +{ + struct stp_stzi stzi; + struct stp_lsoib *lsoib = &stzi.lsoib; + struct __kernel_timex txc; + int64_t timediff; + int leapdiff, ret; + + if (!stp_info.lu || !check_sync_clock()) { + /* + * Either a scheduled leap second was removed by the operator, + * or STP is out of sync. In both cases, clear the leap second + * kernel flags. + */ + if (stp_clear_leap() < 0) + pr_err("failed to clear leap second flags\n"); + return; + } + + if (chsc_stzi(stp_page, &stzi, sizeof(stzi))) { + pr_err("stzi failed\n"); + return; + } + + timediff = tod_to_ns(lsoib->nlsout - get_tod_clock()) / NSEC_PER_SEC; + leapdiff = lsoib->nlso - lsoib->also; + + if (leapdiff != 1 && leapdiff != -1) { + pr_err("Cannot schedule %d leap seconds\n", leapdiff); + return; + } + + if (timediff < 0) { + if (stp_clear_leap() < 0) + pr_err("failed to clear leap second flags\n"); + } else if (timediff < 7200) { + memset(&txc, 0, sizeof(txc)); + ret = do_adjtimex(&txc); + if (ret < 0) + return; + + txc.modes = ADJ_STATUS; + if (leapdiff > 0) + txc.status |= STA_INS; + else + txc.status |= STA_DEL; + ret = do_adjtimex(&txc); + if (ret < 0) + pr_err("failed to set leap second flags\n"); + /* arm Timer to clear leap second flags */ + mod_timer(&stp_timer, jiffies + msecs_to_jiffies(14400 * MSEC_PER_SEC)); + } else { + /* The day the leap second is scheduled for hasn't been reached. Retry + * in one hour. + */ + mod_timer(&stp_timer, jiffies + msecs_to_jiffies(3600 * MSEC_PER_SEC)); + } +} + /* * STP work. Check for the STP state and take over the clock * synchronization if the STP clock source is usable. @@ -638,7 +682,7 @@ static void stp_work_fn(struct work_struct *work) int rc; /* prevent multiple execution. */ - mutex_lock(&stp_work_mutex); + mutex_lock(&stp_mutex); if (!stp_online) { chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000, NULL); @@ -646,23 +690,22 @@ static void stp_work_fn(struct work_struct *work) goto out_unlock; } - rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0xb0e0, NULL); + rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0xf0e0, NULL); if (rc) goto out_unlock; - rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi)); + rc = __store_stpinfo(); if (rc || stp_info.c == 0) goto out_unlock; /* Skip synchronization if the clock is already in sync. */ - if (check_sync_clock()) - goto out_unlock; - - memset(&stp_sync, 0, sizeof(stp_sync)); - cpus_read_lock(); - atomic_set(&stp_sync.cpus, num_online_cpus() - 1); - stop_machine_cpuslocked(stp_sync_clock, &stp_sync, cpu_online_mask); - cpus_read_unlock(); + if (!check_sync_clock()) { + memset(&stp_sync, 0, sizeof(stp_sync)); + cpus_read_lock(); + atomic_set(&stp_sync.cpus, num_online_cpus() - 1); + stop_machine_cpuslocked(stp_sync_clock, &stp_sync, cpu_online_mask); + cpus_read_unlock(); + } if (!check_sync_clock()) /* @@ -670,9 +713,11 @@ static void stp_work_fn(struct work_struct *work) * Retry after a second. */ mod_timer(&stp_timer, jiffies + msecs_to_jiffies(MSEC_PER_SEC)); + else if (stp_info.lu) + stp_check_leap(); out_unlock: - mutex_unlock(&stp_work_mutex); + mutex_unlock(&stp_mutex); } /* @@ -687,10 +732,14 @@ static ssize_t ctn_id_show(struct device *dev, struct device_attribute *attr, char *buf) { - if (!stp_online) - return -ENODATA; - return sprintf(buf, "%016llx\n", - *(unsigned long long *) stp_info.ctnid); + ssize_t ret = -ENODATA; + + mutex_lock(&stp_mutex); + if (stpinfo_valid()) + ret = sprintf(buf, "%016llx\n", + *(unsigned long long *) stp_info.ctnid); + mutex_unlock(&stp_mutex); + return ret; } static DEVICE_ATTR_RO(ctn_id); @@ -699,9 +748,13 @@ static ssize_t ctn_type_show(struct device *dev, struct device_attribute *attr, char *buf) { - if (!stp_online) - return -ENODATA; - return sprintf(buf, "%i\n", stp_info.ctn); + ssize_t ret = -ENODATA; + + mutex_lock(&stp_mutex); + if (stpinfo_valid()) + ret = sprintf(buf, "%i\n", stp_info.ctn); + mutex_unlock(&stp_mutex); + return ret; } static DEVICE_ATTR_RO(ctn_type); @@ -710,9 +763,13 @@ static ssize_t dst_offset_show(struct device *dev, struct device_attribute *attr, char *buf) { - if (!stp_online || !(stp_info.vbits & 0x2000)) - return -ENODATA; - return sprintf(buf, "%i\n", (int)(s16) stp_info.dsto); + ssize_t ret = -ENODATA; + + mutex_lock(&stp_mutex); + if (stpinfo_valid() && (stp_info.vbits & 0x2000)) + ret = sprintf(buf, "%i\n", (int)(s16) stp_info.dsto); + mutex_unlock(&stp_mutex); + return ret; } static DEVICE_ATTR_RO(dst_offset); @@ -721,20 +778,56 @@ static ssize_t leap_seconds_show(struct device *dev, struct device_attribute *attr, char *buf) { - if (!stp_online || !(stp_info.vbits & 0x8000)) - return -ENODATA; - return sprintf(buf, "%i\n", (int)(s16) stp_info.leaps); + ssize_t ret = -ENODATA; + + mutex_lock(&stp_mutex); + if (stpinfo_valid() && (stp_info.vbits & 0x8000)) + ret = sprintf(buf, "%i\n", (int)(s16) stp_info.leaps); + mutex_unlock(&stp_mutex); + return ret; } static DEVICE_ATTR_RO(leap_seconds); +static ssize_t leap_seconds_scheduled_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct stp_stzi stzi; + ssize_t ret; + + mutex_lock(&stp_mutex); + if (!stpinfo_valid() || !(stp_info.vbits & 0x8000) || !stp_info.lu) { + mutex_unlock(&stp_mutex); + return -ENODATA; + } + + ret = chsc_stzi(stp_page, &stzi, sizeof(stzi)); + mutex_unlock(&stp_mutex); + if (ret < 0) + return ret; + + if (!stzi.lsoib.p) + return sprintf(buf, "0,0\n"); + + return sprintf(buf, "%llu,%d\n", + tod_to_ns(stzi.lsoib.nlsout - TOD_UNIX_EPOCH) / NSEC_PER_SEC, + stzi.lsoib.nlso - stzi.lsoib.also); +} + +static DEVICE_ATTR_RO(leap_seconds_scheduled); + static ssize_t stratum_show(struct device *dev, struct device_attribute *attr, char *buf) { - if (!stp_online) - return -ENODATA; - return sprintf(buf, "%i\n", (int)(s16) stp_info.stratum); + ssize_t ret = -ENODATA; + + mutex_lock(&stp_mutex); + if (stpinfo_valid()) + ret = sprintf(buf, "%i\n", (int)(s16) stp_info.stratum); + mutex_unlock(&stp_mutex); + return ret; } static DEVICE_ATTR_RO(stratum); @@ -743,9 +836,13 @@ static ssize_t time_offset_show(struct device *dev, struct device_attribute *attr, char *buf) { - if (!stp_online || !(stp_info.vbits & 0x0800)) - return -ENODATA; - return sprintf(buf, "%i\n", (int) stp_info.tto); + ssize_t ret = -ENODATA; + + mutex_lock(&stp_mutex); + if (stpinfo_valid() && (stp_info.vbits & 0x0800)) + ret = sprintf(buf, "%i\n", (int) stp_info.tto); + mutex_unlock(&stp_mutex); + return ret; } static DEVICE_ATTR_RO(time_offset); @@ -754,9 +851,13 @@ static ssize_t time_zone_offset_show(struct device *dev, struct device_attribute *attr, char *buf) { - if (!stp_online || !(stp_info.vbits & 0x4000)) - return -ENODATA; - return sprintf(buf, "%i\n", (int)(s16) stp_info.tzo); + ssize_t ret = -ENODATA; + + mutex_lock(&stp_mutex); + if (stpinfo_valid() && (stp_info.vbits & 0x4000)) + ret = sprintf(buf, "%i\n", (int)(s16) stp_info.tzo); + mutex_unlock(&stp_mutex); + return ret; } static DEVICE_ATTR_RO(time_zone_offset); @@ -765,9 +866,13 @@ static ssize_t timing_mode_show(struct device *dev, struct device_attribute *attr, char *buf) { - if (!stp_online) - return -ENODATA; - return sprintf(buf, "%i\n", stp_info.tmd); + ssize_t ret = -ENODATA; + + mutex_lock(&stp_mutex); + if (stpinfo_valid()) + ret = sprintf(buf, "%i\n", stp_info.tmd); + mutex_unlock(&stp_mutex); + return ret; } static DEVICE_ATTR_RO(timing_mode); @@ -776,9 +881,13 @@ static ssize_t timing_state_show(struct device *dev, struct device_attribute *attr, char *buf) { - if (!stp_online) - return -ENODATA; - return sprintf(buf, "%i\n", stp_info.tst); + ssize_t ret = -ENODATA; + + mutex_lock(&stp_mutex); + if (stpinfo_valid()) + ret = sprintf(buf, "%i\n", stp_info.tst); + mutex_unlock(&stp_mutex); + return ret; } static DEVICE_ATTR_RO(timing_state); @@ -801,14 +910,14 @@ static ssize_t online_store(struct device *dev, return -EINVAL; if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags)) return -EOPNOTSUPP; - mutex_lock(&clock_sync_mutex); + mutex_lock(&stp_mutex); stp_online = value; if (stp_online) set_bit(CLOCK_SYNC_STP, &clock_sync_flags); else clear_bit(CLOCK_SYNC_STP, &clock_sync_flags); queue_work(time_sync_wq, &stp_work); - mutex_unlock(&clock_sync_mutex); + mutex_unlock(&stp_mutex); return count; } @@ -824,6 +933,7 @@ static struct device_attribute *stp_attributes[] = { &dev_attr_dst_offset, &dev_attr_leap_seconds, &dev_attr_online, + &dev_attr_leap_seconds_scheduled, &dev_attr_stratum, &dev_attr_time_offset, &dev_attr_time_zone_offset, diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c index c296e5c8dbf9..14bd9d58edc9 100644 --- a/arch/s390/kernel/uv.c +++ b/arch/s390/kernel/uv.c @@ -26,33 +26,10 @@ int __bootdata_preserved(prot_virt_guest); struct uv_info __bootdata_preserved(uv_info); #if IS_ENABLED(CONFIG_KVM) -int prot_virt_host; +int __bootdata_preserved(prot_virt_host); EXPORT_SYMBOL(prot_virt_host); EXPORT_SYMBOL(uv_info); -static int __init prot_virt_setup(char *val) -{ - bool enabled; - int rc; - - rc = kstrtobool(val, &enabled); - if (!rc && enabled) - prot_virt_host = 1; - - if (is_prot_virt_guest() && prot_virt_host) { - prot_virt_host = 0; - pr_warn("Protected virtualization not available in protected guests."); - } - - if (prot_virt_host && !test_facility(158)) { - prot_virt_host = 0; - pr_warn("Protected virtualization not supported by the hardware."); - } - - return rc; -} -early_param("prot_virt", prot_virt_setup); - static int __init uv_init(unsigned long stor_base, unsigned long stor_len) { struct uv_cb_init uvcb = { @@ -74,6 +51,24 @@ void __init setup_uv(void) { unsigned long uv_stor_base; + /* + * keep these conditions in line with kasan init code has_uv_sec_stor_limit() + */ + if (!is_prot_virt_host()) + return; + + if (is_prot_virt_guest()) { + prot_virt_host = 0; + pr_warn("Protected virtualization not available in protected guests."); + return; + } + + if (!test_facility(158)) { + prot_virt_host = 0; + pr_warn("Protected virtualization not supported by the hardware."); + return; + } + uv_stor_base = (unsigned long)memblock_alloc_try_nid( uv_info.uv_base_stor_len, SZ_1M, SZ_2G, MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE); @@ -98,7 +93,8 @@ fail: void adjust_to_uv_max(unsigned long *vmax) { - *vmax = min_t(unsigned long, *vmax, uv_info.max_sec_stor_addr); + if (uv_info.max_sec_stor_addr) + *vmax = min_t(unsigned long, *vmax, uv_info.max_sec_stor_addr); } /* @@ -119,6 +115,26 @@ static int uv_pin_shared(unsigned long paddr) } /* + * Requests the Ultravisor to destroy a guest page and make it + * accessible to the host. The destroy clears the page instead of + * exporting. + * + * @paddr: Absolute host address of page to be destroyed + */ +int uv_destroy_page(unsigned long paddr) +{ + struct uv_cb_cfs uvcb = { + .header.cmd = UVC_CMD_DESTR_SEC_STOR, + .header.len = sizeof(uvcb), + .paddr = paddr + }; + + if (uv_call(0, (u64)&uvcb)) + return -EINVAL; + return 0; +} + +/* * Requests the Ultravisor to encrypt a guest page and make it * accessible to the host for paging (export). * diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c index c4baefaa6e34..f9da5b149141 100644 --- a/arch/s390/kernel/vdso.c +++ b/arch/s390/kernel/vdso.c @@ -20,6 +20,8 @@ #include <linux/security.h> #include <linux/memblock.h> #include <linux/compat.h> +#include <linux/binfmts.h> +#include <vdso/datapage.h> #include <asm/asm-offsets.h> #include <asm/processor.h> #include <asm/mmu.h> @@ -96,35 +98,12 @@ static union { struct vdso_data data; u8 page[PAGE_SIZE]; } vdso_data_store __page_aligned_data; -struct vdso_data *vdso_data = &vdso_data_store.data; - -/* - * Setup vdso data page. - */ -static void __init vdso_init_data(struct vdso_data *vd) -{ - vd->ectg_available = test_facility(31); -} - +struct vdso_data *vdso_data = (struct vdso_data *)&vdso_data_store.data; /* * Allocate/free per cpu vdso data. */ #define SEGMENT_ORDER 2 -/* - * The initial vdso_data structure for the boot CPU. Eventually - * it is replaced with a properly allocated structure in vdso_init. - * This is necessary because a valid S390_lowcore.vdso_per_cpu_data - * pointer is required to be able to return from an interrupt or - * program check. See the exit paths in entry.S. - */ -struct vdso_data boot_vdso_data __initdata; - -void __init vdso_alloc_boot_cpu(struct lowcore *lowcore) -{ - lowcore->vdso_per_cpu_data = (unsigned long) &boot_vdso_data; -} - int vdso_alloc_per_cpu(struct lowcore *lowcore) { unsigned long segment_table, page_table, page_frame; @@ -246,8 +225,6 @@ static int __init vdso_init(void) { int i; - vdso_init_data(vdso_data); - /* Calculate the size of the 64 bit vDSO */ vdso64_pages = ((&vdso64_end - &vdso64_start + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile index 4a66a1cb919b..3d3303283181 100644 --- a/arch/s390/kernel/vdso64/Makefile +++ b/arch/s390/kernel/vdso64/Makefile @@ -1,17 +1,23 @@ # SPDX-License-Identifier: GPL-2.0 -# List of files in the vdso, has to be asm only for now +# List of files in the vdso KCOV_INSTRUMENT := n +ARCH_REL_TYPE_ABS := R_390_COPY|R_390_GLOB_DAT|R_390_JMP_SLOT|R_390_RELATIVE +ARCH_REL_TYPE_ABS += R_390_GOT|R_390_PLT -obj-vdso64 = gettimeofday.o clock_getres.o clock_gettime.o note.o getcpu.o +include $(srctree)/lib/vdso/Makefile +obj-vdso64 = vdso_user_wrapper.o note.o getcpu.o +obj-cvdso64 = vdso64_generic.o +CFLAGS_REMOVE_vdso64_generic.o = -pg $(CC_FLAGS_FTRACE) $(CC_FLAGS_EXPOLINE) # Build rules -targets := $(obj-vdso64) vdso64.so vdso64.so.dbg +targets := $(obj-vdso64) $(obj-cvdso64) vdso64.so vdso64.so.dbg obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64)) +obj-cvdso64 := $(addprefix $(obj)/, $(obj-cvdso64)) KBUILD_AFLAGS += -DBUILD_VDSO -KBUILD_CFLAGS += -DBUILD_VDSO +KBUILD_CFLAGS += -DBUILD_VDSO -DDISABLE_BRANCH_PROFILING KBUILD_AFLAGS_64 := $(filter-out -m64,$(KBUILD_AFLAGS)) KBUILD_AFLAGS_64 += -m64 -s @@ -37,7 +43,7 @@ KASAN_SANITIZE := n $(obj)/vdso64_wrapper.o : $(obj)/vdso64.so # link rule for the .so file, .lds has to be first -$(obj)/vdso64.so.dbg: $(obj)/vdso64.lds $(obj-vdso64) FORCE +$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) $(obj-cvdso64) FORCE $(call if_changed,ld) # strip rule for the .so file @@ -49,9 +55,14 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE $(obj-vdso64): %.o: %.S FORCE $(call if_changed_dep,vdso64as) +$(obj-cvdso64): %.o: %.c FORCE + $(call if_changed_dep,vdso64cc) + # actual build commands quiet_cmd_vdso64as = VDSO64A $@ cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $< +quiet_cmd_vdso64cc = VDSO64C $@ + cmd_vdso64cc = $(CC) $(c_flags) -c -o $@ $< # install commands for the unstripped file quiet_cmd_vdso_install = INSTALL $@ diff --git a/arch/s390/kernel/vdso64/clock_getres.S b/arch/s390/kernel/vdso64/clock_getres.S deleted file mode 100644 index 0c79caa32b59..000000000000 --- a/arch/s390/kernel/vdso64/clock_getres.S +++ /dev/null @@ -1,50 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Userland implementation of clock_getres() for 64 bits processes in a - * s390 kernel for use in the vDSO - * - * Copyright IBM Corp. 2008 - * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) - */ -#include <asm/vdso.h> -#include <asm/asm-offsets.h> -#include <asm/unistd.h> -#include <asm/dwarf.h> - - .text - .align 4 - .globl __kernel_clock_getres - .type __kernel_clock_getres,@function -__kernel_clock_getres: - CFI_STARTPROC - larl %r1,3f - lg %r0,0(%r1) - cghi %r2,__CLOCK_REALTIME_COARSE - je 0f - cghi %r2,__CLOCK_MONOTONIC_COARSE - je 0f - larl %r1,_vdso_data - llgf %r0,__VDSO_CLOCK_REALTIME_RES(%r1) - cghi %r2,__CLOCK_REALTIME - je 0f - cghi %r2,__CLOCK_MONOTONIC - je 0f - cghi %r2,__CLOCK_THREAD_CPUTIME_ID - je 0f - cghi %r2,-2 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */ - jne 2f - larl %r5,_vdso_data - icm %r0,15,__LC_ECTG_OK(%r5) - jz 2f -0: ltgr %r3,%r3 - jz 1f /* res == NULL */ - xc 0(8,%r3),0(%r3) /* set tp->tv_sec to zero */ - stg %r0,8(%r3) /* store tp->tv_usec */ -1: lghi %r2,0 - br %r14 -2: lghi %r1,__NR_clock_getres /* fallback to svc */ - svc 0 - br %r14 - CFI_ENDPROC -3: .quad __CLOCK_COARSE_RES - .size __kernel_clock_getres,.-__kernel_clock_getres diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S deleted file mode 100644 index 9d2ee79b90f2..000000000000 --- a/arch/s390/kernel/vdso64/clock_gettime.S +++ /dev/null @@ -1,163 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Userland implementation of clock_gettime() for 64 bits processes in a - * s390 kernel for use in the vDSO - * - * Copyright IBM Corp. 2008 - * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) - */ -#include <asm/vdso.h> -#include <asm/asm-offsets.h> -#include <asm/unistd.h> -#include <asm/dwarf.h> -#include <asm/ptrace.h> - - .text - .align 4 - .globl __kernel_clock_gettime - .type __kernel_clock_gettime,@function -__kernel_clock_gettime: - CFI_STARTPROC - aghi %r15,-16 - CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16 - CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD - larl %r5,_vdso_data - cghi %r2,__CLOCK_REALTIME_COARSE - je 4f - cghi %r2,__CLOCK_REALTIME - je 5f - cghi %r2,-3 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */ - je 9f - cghi %r2,__CLOCK_MONOTONIC_COARSE - je 3f - cghi %r2,__CLOCK_MONOTONIC - jne 12f - - /* CLOCK_MONOTONIC */ -0: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */ - tmll %r4,0x0001 /* pending update ? loop */ - jnz 0b - stcke 0(%r15) /* Store TOD clock */ - lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ - lg %r0,__VDSO_WTOM_SEC(%r5) - lg %r1,1(%r15) - sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ - msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */ - alg %r1,__VDSO_WTOM_NSEC(%r5) - srlg %r1,%r1,0(%r2) /* >> tk->shift */ - clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */ - jne 0b - larl %r5,13f -1: clg %r1,0(%r5) - jl 2f - slg %r1,0(%r5) - aghi %r0,1 - j 1b -2: stg %r0,0(%r3) /* store tp->tv_sec */ - stg %r1,8(%r3) /* store tp->tv_nsec */ - lghi %r2,0 - aghi %r15,16 - CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD - CFI_RESTORE 15 - br %r14 - - /* CLOCK_MONOTONIC_COARSE */ - CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16 - CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD -3: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */ - tmll %r4,0x0001 /* pending update ? loop */ - jnz 3b - lg %r0,__VDSO_WTOM_CRS_SEC(%r5) - lg %r1,__VDSO_WTOM_CRS_NSEC(%r5) - clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */ - jne 3b - j 2b - - /* CLOCK_REALTIME_COARSE */ -4: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */ - tmll %r4,0x0001 /* pending update ? loop */ - jnz 4b - lg %r0,__VDSO_XTIME_CRS_SEC(%r5) - lg %r1,__VDSO_XTIME_CRS_NSEC(%r5) - clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */ - jne 4b - j 7f - - /* CLOCK_REALTIME */ -5: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */ - tmll %r4,0x0001 /* pending update ? loop */ - jnz 5b - stcke 0(%r15) /* Store TOD clock */ - lg %r1,1(%r15) - lg %r0,__VDSO_TS_END(%r5) /* TOD steering end time */ - slgr %r0,%r1 /* now - ts_steering_end */ - ltgr %r0,%r0 /* past end of steering ? */ - jm 17f - srlg %r0,%r0,15 /* 1 per 2^16 */ - tm __VDSO_TS_DIR+3(%r5),0x01 /* steering direction? */ - jz 18f - lcgr %r0,%r0 /* negative TOD offset */ -18: algr %r1,%r0 /* add steering offset */ -17: lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ - sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ - msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */ - alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */ - srlg %r1,%r1,0(%r2) /* >> tk->shift */ - lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */ - clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */ - jne 5b - larl %r5,13f -6: clg %r1,0(%r5) - jl 7f - slg %r1,0(%r5) - aghi %r0,1 - j 6b -7: stg %r0,0(%r3) /* store tp->tv_sec */ - stg %r1,8(%r3) /* store tp->tv_nsec */ - lghi %r2,0 - aghi %r15,16 - CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD - CFI_RESTORE 15 - br %r14 - - /* CPUCLOCK_VIRT for this thread */ - CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16 - CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD -9: lghi %r4,0 - icm %r0,15,__VDSO_ECTG_OK(%r5) - jz 12f - sacf 256 /* Magic ectg instruction */ - .insn ssf,0xc80100000000,__VDSO_ECTG_BASE(4),__VDSO_ECTG_USER(4),4 - sacf 0 - algr %r1,%r0 /* r1 = cputime as TOD value */ - mghi %r1,1000 /* convert to nanoseconds */ - srlg %r1,%r1,12 /* r1 = cputime in nanosec */ - lgr %r4,%r1 - larl %r5,13f - srlg %r1,%r1,9 /* divide by 1000000000 */ - mlg %r0,8(%r5) - srlg %r0,%r0,11 /* r0 = tv_sec */ - stg %r0,0(%r3) - msg %r0,0(%r5) /* calculate tv_nsec */ - slgr %r4,%r0 /* r4 = tv_nsec */ - stg %r4,8(%r3) - lghi %r2,0 - aghi %r15,16 - CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD - CFI_RESTORE 15 - br %r14 - - /* Fallback to system call */ - CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16 - CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD -12: lghi %r1,__NR_clock_gettime - svc 0 - aghi %r15,16 - CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD - CFI_RESTORE 15 - br %r14 - CFI_ENDPROC - -13: .quad 1000000000 -14: .quad 19342813113834067 - .size __kernel_clock_gettime,.-__kernel_clock_gettime diff --git a/arch/s390/kernel/vdso64/gettimeofday.S b/arch/s390/kernel/vdso64/gettimeofday.S deleted file mode 100644 index aebe10dc7c99..000000000000 --- a/arch/s390/kernel/vdso64/gettimeofday.S +++ /dev/null @@ -1,71 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Userland implementation of gettimeofday() for 64 bits processes in a - * s390 kernel for use in the vDSO - * - * Copyright IBM Corp. 2008 - * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) - */ -#include <asm/vdso.h> -#include <asm/asm-offsets.h> -#include <asm/unistd.h> -#include <asm/dwarf.h> -#include <asm/ptrace.h> - - .text - .align 4 - .globl __kernel_gettimeofday - .type __kernel_gettimeofday,@function -__kernel_gettimeofday: - CFI_STARTPROC - aghi %r15,-16 - CFI_ADJUST_CFA_OFFSET 16 - CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD - larl %r5,_vdso_data -0: ltgr %r3,%r3 /* check if tz is NULL */ - je 1f - mvc 0(8,%r3),__VDSO_TIMEZONE(%r5) -1: ltgr %r2,%r2 /* check if tv is NULL */ - je 4f - lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */ - tmll %r4,0x0001 /* pending update ? loop */ - jnz 0b - stcke 0(%r15) /* Store TOD clock */ - lg %r1,1(%r15) - lg %r0,__VDSO_TS_END(%r5) /* TOD steering end time */ - slgr %r0,%r1 /* now - ts_steering_end */ - ltgr %r0,%r0 /* past end of steering ? */ - jm 6f - srlg %r0,%r0,15 /* 1 per 2^16 */ - tm __VDSO_TS_DIR+3(%r5),0x01 /* steering direction? */ - jz 7f - lcgr %r0,%r0 /* negative TOD offset */ -7: algr %r1,%r0 /* add steering offset */ -6: sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ - msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */ - alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */ - lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */ - clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */ - jne 0b - lgf %r5,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ - srlg %r1,%r1,0(%r5) /* >> tk->shift */ - larl %r5,5f -2: clg %r1,0(%r5) - jl 3f - slg %r1,0(%r5) - aghi %r0,1 - j 2b -3: stg %r0,0(%r2) /* store tv->tv_sec */ - slgr %r0,%r0 /* tv_nsec -> tv_usec */ - ml %r0,8(%r5) - srlg %r0,%r0,6 - stg %r0,8(%r2) /* store tv->tv_usec */ -4: lghi %r2,0 - aghi %r15,16 - CFI_ADJUST_CFA_OFFSET -16 - CFI_RESTORE 15 - br %r14 - CFI_ENDPROC -5: .quad 1000000000 - .long 274877907 - .size __kernel_gettimeofday,.-__kernel_gettimeofday diff --git a/arch/s390/kernel/vdso64/vdso64_generic.c b/arch/s390/kernel/vdso64/vdso64_generic.c new file mode 100644 index 000000000000..a8cef7e4d137 --- /dev/null +++ b/arch/s390/kernel/vdso64/vdso64_generic.c @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: GPL-2.0 +#include "../../../../lib/vdso/gettimeofday.c" + +int __s390_vdso_gettimeofday(struct __kernel_old_timeval *tv, + struct timezone *tz) +{ + return __cvdso_gettimeofday(tv, tz); +} + +int __s390_vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts) +{ + return __cvdso_clock_gettime(clock, ts); +} + +int __s390_vdso_clock_getres(clockid_t clock, struct __kernel_timespec *ts) +{ + return __cvdso_clock_getres(clock, ts); +} diff --git a/arch/s390/kernel/vdso64/vdso_user_wrapper.S b/arch/s390/kernel/vdso64/vdso_user_wrapper.S new file mode 100644 index 000000000000..a775d7e52872 --- /dev/null +++ b/arch/s390/kernel/vdso64/vdso_user_wrapper.S @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include <asm/vdso.h> +#include <asm/unistd.h> +#include <asm/asm-offsets.h> +#include <asm/dwarf.h> +#include <asm/ptrace.h> + +#define WRAPPER_FRAME_SIZE (STACK_FRAME_OVERHEAD+8) + +/* + * Older glibc version called vdso without allocating a stackframe. This wrapper + * is just used to allocate a stackframe. See + * https://sourceware.org/git/?p=glibc.git;a=commit;h=478593e6374f3818da39332260dc453cb19cfa1e + * for details. + */ +.macro vdso_func func + .globl __kernel_\func + .type __kernel_\func,@function + .align 8 +__kernel_\func: + CFI_STARTPROC + aghi %r15,-WRAPPER_FRAME_SIZE + CFI_DEF_CFA_OFFSET (STACK_FRAME_OVERHEAD + WRAPPER_FRAME_SIZE) + CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD + stg %r14,STACK_FRAME_OVERHEAD(%r15) + brasl %r14,__s390_vdso_\func + lg %r14,STACK_FRAME_OVERHEAD(%r15) + aghi %r15,WRAPPER_FRAME_SIZE + CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD + CFI_RESTORE 15 + br %r14 + CFI_ENDPROC + .size __kernel_\func,.-__kernel_\func +.endm + +vdso_func gettimeofday +vdso_func clock_getres +vdso_func clock_gettime diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index 37695499717d..177ccfbda40a 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -181,6 +181,7 @@ SECTIONS /* Debugging sections. */ STABS_DEBUG DWARF_DEBUG + ELF_DETAILS /* Sections to be discarded */ DISCARDS diff --git a/arch/s390/lib/string.c b/arch/s390/lib/string.c index 0e30e6e43b0c..93b3209b94a2 100644 --- a/arch/s390/lib/string.c +++ b/arch/s390/lib/string.c @@ -333,7 +333,7 @@ EXPORT_SYMBOL(memchr); * memcmp - Compare two areas of memory * @s1: One area of memory * @s2: Another area of memory - * @count: The size of the area. + * @n: The size of the area. */ #ifdef __HAVE_ARCH_MEMCMP int memcmp(const void *s1, const void *s2, size_t n) diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile index 3175413186b9..cd67e94c16aa 100644 --- a/arch/s390/mm/Makefile +++ b/arch/s390/mm/Makefile @@ -8,7 +8,7 @@ obj-y += page-states.o pageattr.o pgtable.o pgalloc.o obj-$(CONFIG_CMM) += cmm.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o -obj-$(CONFIG_S390_PTDUMP) += dump_pagetables.o +obj-$(CONFIG_PTDUMP_CORE) += dump_pagetables.o obj-$(CONFIG_PGSTE) += gmap.o KASAN_SANITIZE_kasan_init.o := n diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c index c2ac9b8ae612..8f9ff7e7187d 100644 --- a/arch/s390/mm/dump_pagetables.c +++ b/arch/s390/mm/dump_pagetables.c @@ -1,9 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 +#include <linux/set_memory.h> +#include <linux/ptdump.h> #include <linux/seq_file.h> #include <linux/debugfs.h> -#include <linux/sched.h> #include <linux/mm.h> #include <linux/kasan.h> +#include <asm/ptdump.h> #include <asm/kasan.h> #include <asm/sections.h> @@ -15,264 +17,235 @@ struct addr_marker { }; enum address_markers_idx { - IDENTITY_NR = 0, + IDENTITY_BEFORE_NR = 0, + IDENTITY_BEFORE_END_NR, KERNEL_START_NR, KERNEL_END_NR, + IDENTITY_AFTER_NR, + IDENTITY_AFTER_END_NR, #ifdef CONFIG_KASAN KASAN_SHADOW_START_NR, KASAN_SHADOW_END_NR, #endif VMEMMAP_NR, + VMEMMAP_END_NR, VMALLOC_NR, + VMALLOC_END_NR, MODULES_NR, + MODULES_END_NR, }; static struct addr_marker address_markers[] = { - [IDENTITY_NR] = {0, "Identity Mapping"}, + [IDENTITY_BEFORE_NR] = {0, "Identity Mapping Start"}, + [IDENTITY_BEFORE_END_NR] = {(unsigned long)_stext, "Identity Mapping End"}, [KERNEL_START_NR] = {(unsigned long)_stext, "Kernel Image Start"}, [KERNEL_END_NR] = {(unsigned long)_end, "Kernel Image End"}, + [IDENTITY_AFTER_NR] = {(unsigned long)_end, "Identity Mapping Start"}, + [IDENTITY_AFTER_END_NR] = {0, "Identity Mapping End"}, #ifdef CONFIG_KASAN [KASAN_SHADOW_START_NR] = {KASAN_SHADOW_START, "Kasan Shadow Start"}, [KASAN_SHADOW_END_NR] = {KASAN_SHADOW_END, "Kasan Shadow End"}, #endif - [VMEMMAP_NR] = {0, "vmemmap Area"}, - [VMALLOC_NR] = {0, "vmalloc Area"}, - [MODULES_NR] = {0, "Modules Area"}, + [VMEMMAP_NR] = {0, "vmemmap Area Start"}, + [VMEMMAP_END_NR] = {0, "vmemmap Area End"}, + [VMALLOC_NR] = {0, "vmalloc Area Start"}, + [VMALLOC_END_NR] = {0, "vmalloc Area End"}, + [MODULES_NR] = {0, "Modules Area Start"}, + [MODULES_END_NR] = {0, "Modules Area End"}, { -1, NULL } }; struct pg_state { + struct ptdump_state ptdump; + struct seq_file *seq; int level; unsigned int current_prot; + bool check_wx; + unsigned long wx_pages; unsigned long start_address; - unsigned long current_address; const struct addr_marker *marker; }; +#define pt_dump_seq_printf(m, fmt, args...) \ +({ \ + struct seq_file *__m = (m); \ + \ + if (__m) \ + seq_printf(__m, fmt, ##args); \ +}) + +#define pt_dump_seq_puts(m, fmt) \ +({ \ + struct seq_file *__m = (m); \ + \ + if (__m) \ + seq_printf(__m, fmt); \ +}) + static void print_prot(struct seq_file *m, unsigned int pr, int level) { static const char * const level_name[] = { "ASCE", "PGD", "PUD", "PMD", "PTE" }; - seq_printf(m, "%s ", level_name[level]); + pt_dump_seq_printf(m, "%s ", level_name[level]); if (pr & _PAGE_INVALID) { - seq_printf(m, "I\n"); + pt_dump_seq_printf(m, "I\n"); return; } - seq_puts(m, (pr & _PAGE_PROTECT) ? "RO " : "RW "); - seq_puts(m, (pr & _PAGE_NOEXEC) ? "NX\n" : "X\n"); + pt_dump_seq_puts(m, (pr & _PAGE_PROTECT) ? "RO " : "RW "); + pt_dump_seq_puts(m, (pr & _PAGE_NOEXEC) ? "NX\n" : "X\n"); } -static void note_page(struct seq_file *m, struct pg_state *st, - unsigned int new_prot, int level) +static void note_prot_wx(struct pg_state *st, unsigned long addr) +{ +#ifdef CONFIG_DEBUG_WX + if (!st->check_wx) + return; + if (st->current_prot & _PAGE_INVALID) + return; + if (st->current_prot & _PAGE_PROTECT) + return; + if (st->current_prot & _PAGE_NOEXEC) + return; + /* The first lowcore page is currently still W+X. */ + if (addr == PAGE_SIZE) + return; + WARN_ONCE(1, "s390/mm: Found insecure W+X mapping at address %pS\n", + (void *)st->start_address); + st->wx_pages += (addr - st->start_address) / PAGE_SIZE; +#endif /* CONFIG_DEBUG_WX */ +} + +static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level, u64 val) { - static const char units[] = "KMGTPE"; int width = sizeof(unsigned long) * 2; + static const char units[] = "KMGTPE"; const char *unit = units; - unsigned int prot, cur; unsigned long delta; + struct pg_state *st; + struct seq_file *m; + unsigned int prot; - /* - * If we have a "break" in the series, we need to flush the state - * that we have now. "break" is either changing perms, levels or - * address space marker. - */ - prot = new_prot; - cur = st->current_prot; - - if (!st->level) { - /* First entry */ - st->current_prot = new_prot; + st = container_of(pt_st, struct pg_state, ptdump); + m = st->seq; + prot = val & (_PAGE_PROTECT | _PAGE_NOEXEC); + if (level == 4 && (val & _PAGE_INVALID)) + prot = _PAGE_INVALID; + /* For pmd_none() & friends val gets passed as zero. */ + if (level != 4 && !val) + prot = _PAGE_INVALID; + /* Final flush from generic code. */ + if (level == -1) + addr = max_addr; + if (st->level == -1) { + pt_dump_seq_printf(m, "---[ %s ]---\n", st->marker->name); + st->start_address = addr; + st->current_prot = prot; st->level = level; - st->marker = address_markers; - seq_printf(m, "---[ %s ]---\n", st->marker->name); - } else if (prot != cur || level != st->level || - st->current_address >= st->marker[1].start_address) { - /* Print the actual finished series */ - seq_printf(m, "0x%0*lx-0x%0*lx ", - width, st->start_address, - width, st->current_address); - delta = (st->current_address - st->start_address) >> 10; + } else if (prot != st->current_prot || level != st->level || + addr >= st->marker[1].start_address) { + note_prot_wx(st, addr); + pt_dump_seq_printf(m, "0x%0*lx-0x%0*lx ", + width, st->start_address, + width, addr); + delta = (addr - st->start_address) >> 10; while (!(delta & 0x3ff) && unit[1]) { delta >>= 10; unit++; } - seq_printf(m, "%9lu%c ", delta, *unit); + pt_dump_seq_printf(m, "%9lu%c ", delta, *unit); print_prot(m, st->current_prot, st->level); - while (st->current_address >= st->marker[1].start_address) { + while (addr >= st->marker[1].start_address) { st->marker++; - seq_printf(m, "---[ %s ]---\n", st->marker->name); + pt_dump_seq_printf(m, "---[ %s ]---\n", st->marker->name); } - st->start_address = st->current_address; - st->current_prot = new_prot; + st->start_address = addr; + st->current_prot = prot; st->level = level; } } -#ifdef CONFIG_KASAN -static void note_kasan_early_shadow_page(struct seq_file *m, - struct pg_state *st) -{ - unsigned int prot; - - prot = pte_val(*kasan_early_shadow_pte) & - (_PAGE_PROTECT | _PAGE_INVALID | _PAGE_NOEXEC); - note_page(m, st, prot, 4); -} -#endif - -/* - * The actual page table walker functions. In order to keep the - * implementation of print_prot() short, we only check and pass - * _PAGE_INVALID and _PAGE_PROTECT flags to note_page() if a region, - * segment or page table entry is invalid or read-only. - * After all it's just a hint that the current level being walked - * contains an invalid or read-only entry. - */ -static void walk_pte_level(struct seq_file *m, struct pg_state *st, - pmd_t *pmd, unsigned long addr) -{ - unsigned int prot; - pte_t *pte; - int i; - - for (i = 0; i < PTRS_PER_PTE && addr < max_addr; i++) { - st->current_address = addr; - pte = pte_offset_kernel(pmd, addr); - prot = pte_val(*pte) & - (_PAGE_PROTECT | _PAGE_INVALID | _PAGE_NOEXEC); - note_page(m, st, prot, 4); - addr += PAGE_SIZE; - } -} - -static void walk_pmd_level(struct seq_file *m, struct pg_state *st, - pud_t *pud, unsigned long addr) -{ - unsigned int prot; - pmd_t *pmd; - int i; - -#ifdef CONFIG_KASAN - if ((pud_val(*pud) & PAGE_MASK) == __pa(kasan_early_shadow_pmd)) { - note_kasan_early_shadow_page(m, st); - return; - } -#endif - - pmd = pmd_offset(pud, addr); - for (i = 0; i < PTRS_PER_PMD && addr < max_addr; i++, pmd++) { - st->current_address = addr; - if (!pmd_none(*pmd)) { - if (pmd_large(*pmd)) { - prot = pmd_val(*pmd) & - (_SEGMENT_ENTRY_PROTECT | - _SEGMENT_ENTRY_NOEXEC); - note_page(m, st, prot, 3); - } else - walk_pte_level(m, st, pmd, addr); - } else - note_page(m, st, _PAGE_INVALID, 3); - addr += PMD_SIZE; - } -} - -static void walk_pud_level(struct seq_file *m, struct pg_state *st, - p4d_t *p4d, unsigned long addr) +#ifdef CONFIG_DEBUG_WX +void ptdump_check_wx(void) { - unsigned int prot; - pud_t *pud; - int i; + struct pg_state st = { + .ptdump = { + .note_page = note_page, + .range = (struct ptdump_range[]) { + {.start = 0, .end = max_addr}, + {.start = 0, .end = 0}, + } + }, + .seq = NULL, + .level = -1, + .current_prot = 0, + .check_wx = true, + .wx_pages = 0, + .start_address = 0, + .marker = (struct addr_marker[]) { + { .start_address = 0, .name = NULL}, + { .start_address = -1, .name = NULL}, + }, + }; -#ifdef CONFIG_KASAN - if ((p4d_val(*p4d) & PAGE_MASK) == __pa(kasan_early_shadow_pud)) { - note_kasan_early_shadow_page(m, st); + if (!MACHINE_HAS_NX) return; - } -#endif - - pud = pud_offset(p4d, addr); - for (i = 0; i < PTRS_PER_PUD && addr < max_addr; i++, pud++) { - st->current_address = addr; - if (!pud_none(*pud)) - if (pud_large(*pud)) { - prot = pud_val(*pud) & - (_REGION_ENTRY_PROTECT | - _REGION_ENTRY_NOEXEC); - note_page(m, st, prot, 2); - } else - walk_pmd_level(m, st, pud, addr); - else - note_page(m, st, _PAGE_INVALID, 2); - addr += PUD_SIZE; - } + ptdump_walk_pgd(&st.ptdump, &init_mm, NULL); + if (st.wx_pages) + pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found\n", st.wx_pages); + else + pr_info("Checked W+X mappings: passed, no unexpected W+X pages found\n"); } +#endif /* CONFIG_DEBUG_WX */ -static void walk_p4d_level(struct seq_file *m, struct pg_state *st, - pgd_t *pgd, unsigned long addr) +#ifdef CONFIG_PTDUMP_DEBUGFS +static int ptdump_show(struct seq_file *m, void *v) { - p4d_t *p4d; - int i; - -#ifdef CONFIG_KASAN - if ((pgd_val(*pgd) & PAGE_MASK) == __pa(kasan_early_shadow_p4d)) { - note_kasan_early_shadow_page(m, st); - return; - } -#endif + struct pg_state st = { + .ptdump = { + .note_page = note_page, + .range = (struct ptdump_range[]) { + {.start = 0, .end = max_addr}, + {.start = 0, .end = 0}, + } + }, + .seq = m, + .level = -1, + .current_prot = 0, + .check_wx = false, + .wx_pages = 0, + .start_address = 0, + .marker = address_markers, + }; - p4d = p4d_offset(pgd, addr); - for (i = 0; i < PTRS_PER_P4D && addr < max_addr; i++, p4d++) { - st->current_address = addr; - if (!p4d_none(*p4d)) - walk_pud_level(m, st, p4d, addr); - else - note_page(m, st, _PAGE_INVALID, 2); - addr += P4D_SIZE; - } + get_online_mems(); + mutex_lock(&cpa_mutex); + ptdump_walk_pgd(&st.ptdump, &init_mm, NULL); + mutex_unlock(&cpa_mutex); + put_online_mems(); + return 0; } +DEFINE_SHOW_ATTRIBUTE(ptdump); +#endif /* CONFIG_PTDUMP_DEBUGFS */ -static void walk_pgd_level(struct seq_file *m) +/* + * Heapsort from lib/sort.c is not a stable sorting algorithm, do a simple + * insertion sort to preserve the original order of markers with the same + * start address. + */ +static void sort_address_markers(void) { - unsigned long addr = 0; - struct pg_state st; - pgd_t *pgd; - int i; + struct addr_marker tmp; + int i, j; - memset(&st, 0, sizeof(st)); - for (i = 0; i < PTRS_PER_PGD && addr < max_addr; i++) { - st.current_address = addr; - pgd = pgd_offset_k(addr); - if (!pgd_none(*pgd)) - walk_p4d_level(m, &st, pgd, addr); - else - note_page(m, &st, _PAGE_INVALID, 1); - addr += PGDIR_SIZE; - cond_resched(); + for (i = 1; i < ARRAY_SIZE(address_markers) - 1; i++) { + tmp = address_markers[i]; + for (j = i - 1; j >= 0 && address_markers[j].start_address > tmp.start_address; j--) + address_markers[j + 1] = address_markers[j]; + address_markers[j + 1] = tmp; } - /* Flush out the last page */ - st.current_address = max_addr; - note_page(m, &st, 0, 0); } -static int ptdump_show(struct seq_file *m, void *v) -{ - walk_pgd_level(m); - return 0; -} - -static int ptdump_open(struct inode *inode, struct file *filp) -{ - return single_open(filp, ptdump_show, NULL); -} - -static const struct file_operations ptdump_fops = { - .open = ptdump_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - static int pt_dump_init(void) { /* @@ -282,10 +255,17 @@ static int pt_dump_init(void) */ max_addr = (S390_lowcore.kernel_asce & _REGION_ENTRY_TYPE_MASK) >> 2; max_addr = 1UL << (max_addr * 11 + 31); + address_markers[IDENTITY_AFTER_END_NR].start_address = memory_end; address_markers[MODULES_NR].start_address = MODULES_VADDR; + address_markers[MODULES_END_NR].start_address = MODULES_END; address_markers[VMEMMAP_NR].start_address = (unsigned long) vmemmap; + address_markers[VMEMMAP_END_NR].start_address = (unsigned long)vmemmap + vmemmap_size; address_markers[VMALLOC_NR].start_address = VMALLOC_START; + address_markers[VMALLOC_END_NR].start_address = VMALLOC_END; + sort_address_markers(); +#ifdef CONFIG_PTDUMP_DEBUGFS debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops); +#endif /* CONFIG_PTDUMP_DEBUGFS */ return 0; } device_initcall(pt_dump_init); diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index 373542ca1113..cfb0017f33a7 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c @@ -2679,7 +2679,7 @@ static int __s390_reset_acc(pte_t *ptep, unsigned long addr, pte_t pte = READ_ONCE(*ptep); if (pte_present(pte)) - WARN_ON_ONCE(uv_convert_from_secure(pte_val(pte) & PAGE_MASK)); + WARN_ON_ONCE(uv_destroy_page(pte_val(pte) & PAGE_MASK)); return 0; } diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 0d282081dc1f..d3ddb4361361 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -34,6 +34,7 @@ #include <asm/processor.h> #include <linux/uaccess.h> #include <asm/pgalloc.h> +#include <asm/ptdump.h> #include <asm/dma.h> #include <asm/lowcore.h> #include <asm/tlb.h> @@ -129,6 +130,7 @@ void mark_rodata_ro(void) set_memory_ro((unsigned long)__start_ro_after_init, size >> PAGE_SHIFT); pr_info("Write protected read-only-after-init data: %luk\n", size >> 10); + debug_checkwx(); } int set_memory_encrypted(unsigned long addr, int numpages) diff --git a/arch/s390/mm/kasan_init.c b/arch/s390/mm/kasan_init.c index 99dd1c63a065..5646b39c728a 100644 --- a/arch/s390/mm/kasan_init.c +++ b/arch/s390/mm/kasan_init.c @@ -11,7 +11,9 @@ #include <asm/facility.h> #include <asm/sections.h> #include <asm/setup.h> +#include <asm/uv.h> +unsigned long kasan_vmax; static unsigned long segment_pos __initdata; static unsigned long segment_low __initdata; static unsigned long pgalloc_pos __initdata; @@ -99,8 +101,12 @@ static void __init kasan_early_vmemmap_populate(unsigned long address, pgt_prot_zero = pgprot_val(PAGE_KERNEL_RO); if (!has_nx) pgt_prot_zero &= ~_PAGE_NOEXEC; - pgt_prot = pgprot_val(PAGE_KERNEL_EXEC); - sgt_prot = pgprot_val(SEGMENT_KERNEL_EXEC); + pgt_prot = pgprot_val(PAGE_KERNEL); + sgt_prot = pgprot_val(SEGMENT_KERNEL); + if (!has_nx || mode == POPULATE_ONE2ONE) { + pgt_prot &= ~_PAGE_NOEXEC; + sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC; + } while (address < end) { pg_dir = pgd_offset_k(address); @@ -252,14 +258,31 @@ static void __init kasan_early_detect_facilities(void) } } +static bool __init has_uv_sec_stor_limit(void) +{ + /* + * keep these conditions in line with setup_uv() + */ + if (!is_prot_virt_host()) + return false; + + if (is_prot_virt_guest()) + return false; + + if (!test_facility(158)) + return false; + + return !!uv_info.max_sec_stor_addr; +} + void __init kasan_early_init(void) { unsigned long untracked_mem_end; unsigned long shadow_alloc_size; + unsigned long vmax_unlimited; unsigned long initrd_end; unsigned long asce_type; unsigned long memsize; - unsigned long vmax; unsigned long pgt_prot = pgprot_val(PAGE_KERNEL_RO); pte_t pte_z; pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY); @@ -287,7 +310,9 @@ void __init kasan_early_init(void) BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, P4D_SIZE)); crst_table_init((unsigned long *)early_pg_dir, _REGION2_ENTRY_EMPTY); - untracked_mem_end = vmax = _REGION1_SIZE; + untracked_mem_end = kasan_vmax = vmax_unlimited = _REGION1_SIZE; + if (has_uv_sec_stor_limit()) + kasan_vmax = min(vmax_unlimited, uv_info.max_sec_stor_addr); asce_type = _ASCE_TYPE_REGION2; } else { /* 3 level paging */ @@ -295,7 +320,7 @@ void __init kasan_early_init(void) BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PUD_SIZE)); crst_table_init((unsigned long *)early_pg_dir, _REGION3_ENTRY_EMPTY); - untracked_mem_end = vmax = _REGION2_SIZE; + untracked_mem_end = kasan_vmax = vmax_unlimited = _REGION2_SIZE; asce_type = _ASCE_TYPE_REGION3; } @@ -365,17 +390,20 @@ void __init kasan_early_init(void) /* populate kasan shadow (for identity mapping and zero page mapping) */ kasan_early_vmemmap_populate(__sha(0), __sha(memsize), POPULATE_MAP); if (IS_ENABLED(CONFIG_MODULES)) - untracked_mem_end = vmax - MODULES_LEN; + untracked_mem_end = kasan_vmax - MODULES_LEN; if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) { - untracked_mem_end = vmax - vmalloc_size - MODULES_LEN; + untracked_mem_end = kasan_vmax - vmalloc_size - MODULES_LEN; /* shallowly populate kasan shadow for vmalloc and modules */ kasan_early_vmemmap_populate(__sha(untracked_mem_end), - __sha(vmax), POPULATE_SHALLOW); + __sha(kasan_vmax), POPULATE_SHALLOW); } /* populate kasan shadow for untracked memory */ kasan_early_vmemmap_populate(__sha(max_physmem_end), __sha(untracked_mem_end), POPULATE_ZERO_SHADOW); + kasan_early_vmemmap_populate(__sha(kasan_vmax), + __sha(vmax_unlimited), + POPULATE_ZERO_SHADOW); /* memory allocated for identity mapping structs will be freed later */ pgalloc_freeable = pgalloc_pos; /* populate identity mapping */ diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c index fc141893d028..567c69f3069e 100644 --- a/arch/s390/mm/page-states.c +++ b/arch/s390/mm/page-states.c @@ -183,9 +183,9 @@ static void mark_kernel_pgd(void) void __init cmma_init_nodat(void) { - struct memblock_region *reg; struct page *page; unsigned long start, end, ix; + int i; if (cmma_flag < 2) return; @@ -193,9 +193,7 @@ void __init cmma_init_nodat(void) mark_kernel_pgd(); /* Set all kernel pages not used for page tables to stable/no-dat */ - for_each_memblock(memory, reg) { - start = memblock_region_memory_base_pfn(reg); - end = memblock_region_memory_end_pfn(reg); + for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) { page = pfn_to_page(start); for (ix = start; ix < end; ix++, page++) { if (__test_and_clear_bit(PG_arch_1, &page->flags)) diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c index c5c52ec2b46f..ed8e5b3575d5 100644 --- a/arch/s390/mm/pageattr.c +++ b/arch/s390/mm/pageattr.c @@ -278,7 +278,7 @@ static int walk_p4d_level(pgd_t *pgd, unsigned long addr, unsigned long end, return rc; } -static DEFINE_MUTEX(cpa_mutex); +DEFINE_MUTEX(cpa_mutex); static int change_page_attr(unsigned long addr, unsigned long end, unsigned long flags) diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 0d25f743b270..18205f851c24 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -24,6 +24,26 @@ #include <asm/mmu_context.h> #include <asm/page-states.h> +pgprot_t pgprot_writecombine(pgprot_t prot) +{ + /* + * mio_wb_bit_mask may be set on a different CPU, but it is only set + * once at init and only read afterwards. + */ + return __pgprot(pgprot_val(prot) | mio_wb_bit_mask); +} +EXPORT_SYMBOL_GPL(pgprot_writecombine); + +pgprot_t pgprot_writethrough(pgprot_t prot) +{ + /* + * mio_wb_bit_mask may be set on a different CPU, but it is only set + * once at init and only read afterwards. + */ + return __pgprot(pgprot_val(prot) & ~mio_wb_bit_mask); +} +EXPORT_SYMBOL_GPL(pgprot_writethrough); + static inline void ptep_ipte_local(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int nodat) { diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index eddf71c22875..b239f2ba93b0 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -555,10 +555,11 @@ int vmem_add_mapping(unsigned long start, unsigned long size) */ void __init vmem_map_init(void) { - struct memblock_region *reg; + phys_addr_t base, end; + u64 i; - for_each_memblock(memory, reg) - vmem_add_range(reg->base, reg->size); + for_each_mem_range(i, &base, &end) + vmem_add_range(base, end - base); __set_memory((unsigned long)_stext, (unsigned long)(_etext - _stext) >> PAGE_SHIFT, SET_MEMORY_RO | SET_MEMORY_X); diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index be4b8532dd3c..0a4182792876 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -50,7 +50,6 @@ struct bpf_jit { int r14_thunk_ip; /* Address of expoline thunk for 'br %r14' */ int tail_call_start; /* Tail call start offset */ int excnt; /* Number of exception table entries */ - int labels[1]; /* Labels for local jumps */ }; #define SEEN_MEM BIT(0) /* use mem[] for temporary storage */ @@ -229,18 +228,18 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) REG_SET_SEEN(b3); \ }) -#define EMIT6_PCREL_LABEL(op1, op2, b1, b2, label, mask) \ +#define EMIT6_PCREL_RIEB(op1, op2, b1, b2, mask, target) \ ({ \ - int rel = (jit->labels[label] - jit->prg) >> 1; \ + unsigned int rel = (int)((target) - jit->prg) / 2; \ _EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), \ (op2) | (mask) << 12); \ REG_SET_SEEN(b1); \ REG_SET_SEEN(b2); \ }) -#define EMIT6_PCREL_IMM_LABEL(op1, op2, b1, imm, label, mask) \ +#define EMIT6_PCREL_RIEC(op1, op2, b1, imm, mask, target) \ ({ \ - int rel = (jit->labels[label] - jit->prg) >> 1; \ + unsigned int rel = (int)((target) - jit->prg) / 2; \ _EMIT6((op1) | (reg_high(b1) | (mask)) << 16 | \ (rel & 0xffff), (op2) | ((imm) & 0xff) << 8); \ REG_SET_SEEN(b1); \ @@ -1282,7 +1281,9 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, EMIT4(0xb9040000, BPF_REG_0, REG_2); break; } - case BPF_JMP | BPF_TAIL_CALL: + case BPF_JMP | BPF_TAIL_CALL: { + int patch_1_clrj, patch_2_clij, patch_3_brc; + /* * Implicit input: * B1: pointer to ctx @@ -1300,16 +1301,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2, offsetof(struct bpf_array, map.max_entries)); /* if ((u32)%b3 >= (u32)%w1) goto out; */ - if (!is_first_pass(jit) && can_use_rel(jit, jit->labels[0])) { - /* clrj %b3,%w1,0xa,label0 */ - EMIT6_PCREL_LABEL(0xec000000, 0x0077, BPF_REG_3, - REG_W1, 0, 0xa); - } else { - /* clr %b3,%w1 */ - EMIT2(0x1500, BPF_REG_3, REG_W1); - /* brcl 0xa,label0 */ - EMIT6_PCREL_RILC(0xc0040000, 0xa, jit->labels[0]); - } + /* clrj %b3,%w1,0xa,out */ + patch_1_clrj = jit->prg; + EMIT6_PCREL_RIEB(0xec000000, 0x0077, BPF_REG_3, REG_W1, 0xa, + jit->prg); /* * if (tail_call_cnt++ > MAX_TAIL_CALL_CNT) @@ -1324,16 +1319,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, EMIT4_IMM(0xa7080000, REG_W0, 1); /* laal %w1,%w0,off(%r15) */ EMIT6_DISP_LH(0xeb000000, 0x00fa, REG_W1, REG_W0, REG_15, off); - if (!is_first_pass(jit) && can_use_rel(jit, jit->labels[0])) { - /* clij %w1,MAX_TAIL_CALL_CNT,0x2,label0 */ - EMIT6_PCREL_IMM_LABEL(0xec000000, 0x007f, REG_W1, - MAX_TAIL_CALL_CNT, 0, 0x2); - } else { - /* clfi %w1,MAX_TAIL_CALL_CNT */ - EMIT6_IMM(0xc20f0000, REG_W1, MAX_TAIL_CALL_CNT); - /* brcl 0x2,label0 */ - EMIT6_PCREL_RILC(0xc0040000, 0x2, jit->labels[0]); - } + /* clij %w1,MAX_TAIL_CALL_CNT,0x2,out */ + patch_2_clij = jit->prg; + EMIT6_PCREL_RIEC(0xec000000, 0x007f, REG_W1, MAX_TAIL_CALL_CNT, + 2, jit->prg); /* * prog = array->ptrs[index]; @@ -1348,13 +1337,9 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, /* ltg %r1,prog(%b2,%r1) */ EMIT6_DISP_LH(0xe3000000, 0x0002, REG_1, BPF_REG_2, REG_1, offsetof(struct bpf_array, ptrs)); - if (!is_first_pass(jit) && can_use_rel(jit, jit->labels[0])) { - /* brc 0x8,label0 */ - EMIT4_PCREL_RIC(0xa7040000, 0x8, jit->labels[0]); - } else { - /* brcl 0x8,label0 */ - EMIT6_PCREL_RILC(0xc0040000, 0x8, jit->labels[0]); - } + /* brc 0x8,out */ + patch_3_brc = jit->prg; + EMIT4_PCREL_RIC(0xa7040000, 8, jit->prg); /* * Restore registers before calling function @@ -1371,8 +1356,16 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, /* bc 0xf,tail_call_start(%r1) */ _EMIT4(0x47f01000 + jit->tail_call_start); /* out: */ - jit->labels[0] = jit->prg; + if (jit->prg_buf) { + *(u16 *)(jit->prg_buf + patch_1_clrj + 2) = + (jit->prg - patch_1_clrj) >> 1; + *(u16 *)(jit->prg_buf + patch_2_clij + 2) = + (jit->prg - patch_2_clij) >> 1; + *(u16 *)(jit->prg_buf + patch_3_brc + 2) = + (jit->prg - patch_3_brc) >> 1; + } break; + } case BPF_JMP | BPF_EXIT: /* return b0 */ last = (i == fp->len - 1) ? 1 : 0; if (last) diff --git a/arch/s390/pci/Makefile b/arch/s390/pci/Makefile index b4e3c84772a1..bf557a1b789c 100644 --- a/arch/s390/pci/Makefile +++ b/arch/s390/pci/Makefile @@ -6,3 +6,4 @@ obj-$(CONFIG_PCI) += pci.o pci_irq.o pci_dma.o pci_clp.o pci_sysfs.o \ pci_event.o pci_debug.o pci_insn.o pci_mmio.o \ pci_bus.o +obj-$(CONFIG_PCI_IOV) += pci_iov.o diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index 1804230dd8d8..570016ae8bcd 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -37,6 +37,7 @@ #include <asm/pci_dma.h> #include "pci_bus.h" +#include "pci_iov.h" /* list of all detected zpci devices */ static LIST_HEAD(zpci_list); @@ -226,7 +227,7 @@ void __iowrite64_copy(void __iomem *to, const void *from, size_t count) zpci_memcpy_toio(to, from, count); } -void __iomem *ioremap(phys_addr_t addr, size_t size) +static void __iomem *__ioremap(phys_addr_t addr, size_t size, pgprot_t prot) { unsigned long offset, vaddr; struct vm_struct *area; @@ -247,14 +248,37 @@ void __iomem *ioremap(phys_addr_t addr, size_t size) return NULL; vaddr = (unsigned long) area->addr; - if (ioremap_page_range(vaddr, vaddr + size, addr, PAGE_KERNEL)) { + if (ioremap_page_range(vaddr, vaddr + size, addr, prot)) { free_vm_area(area); return NULL; } return (void __iomem *) ((unsigned long) area->addr + offset); } + +void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot) +{ + return __ioremap(addr, size, __pgprot(prot)); +} +EXPORT_SYMBOL(ioremap_prot); + +void __iomem *ioremap(phys_addr_t addr, size_t size) +{ + return __ioremap(addr, size, PAGE_KERNEL); +} EXPORT_SYMBOL(ioremap); +void __iomem *ioremap_wc(phys_addr_t addr, size_t size) +{ + return __ioremap(addr, size, pgprot_writecombine(PAGE_KERNEL)); +} +EXPORT_SYMBOL(ioremap_wc); + +void __iomem *ioremap_wt(phys_addr_t addr, size_t size) +{ + return __ioremap(addr, size, pgprot_writethrough(PAGE_KERNEL)); +} +EXPORT_SYMBOL(ioremap_wt); + void iounmap(volatile void __iomem *addr) { if (static_branch_likely(&have_mio)) @@ -390,15 +414,6 @@ static struct pci_ops pci_root_ops = { .write = pci_write, }; -#ifdef CONFIG_PCI_IOV -static struct resource iov_res = { - .name = "PCI IOV res", - .start = 0, - .end = -1, - .flags = IORESOURCE_MEM, -}; -#endif - static void zpci_map_resources(struct pci_dev *pdev) { struct zpci_dev *zdev = to_zpci(pdev); @@ -419,16 +434,7 @@ static void zpci_map_resources(struct pci_dev *pdev) pdev->resource[i].end = pdev->resource[i].start + len - 1; } -#ifdef CONFIG_PCI_IOV - for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { - int bar = i + PCI_IOV_RESOURCES; - - len = pci_resource_len(pdev, bar); - if (!len) - continue; - pdev->resource[bar].parent = &iov_res; - } -#endif + zpci_iov_map_resources(pdev); } static void zpci_unmap_resources(struct pci_dev *pdev) @@ -684,7 +690,7 @@ void zpci_remove_device(struct zpci_dev *zdev) pdev = pci_get_slot(zbus->bus, zdev->devfn); if (pdev) { if (pdev->is_virtfn) - return zpci_remove_virtfn(pdev, zdev->vfn); + return zpci_iov_remove_virtfn(pdev, zdev->vfn); pci_stop_and_remove_bus_device_locked(pdev); } } @@ -788,6 +794,9 @@ static int zpci_mem_init(void) if (!zpci_iomap_bitmap) goto error_iomap_bitmap; + if (static_branch_likely(&have_mio)) + clp_setup_writeback_mio(); + return 0; error_iomap_bitmap: kfree(zpci_iomap_start); @@ -885,9 +894,3 @@ out: return rc; } subsys_initcall_sync(pci_base_init); - -void zpci_rescan(void) -{ - if (zpci_is_enabled()) - clp_rescan_pci_devices_simple(NULL); -} diff --git a/arch/s390/pci/pci_bus.c b/arch/s390/pci/pci_bus.c index 5967f3014156..0c0db7c3a404 100644 --- a/arch/s390/pci/pci_bus.c +++ b/arch/s390/pci/pci_bus.c @@ -24,6 +24,7 @@ #include <asm/pci_dma.h> #include "pci_bus.h" +#include "pci_iov.h" static LIST_HEAD(zbus_list); static DEFINE_SPINLOCK(zbus_list_lock); @@ -126,69 +127,6 @@ static struct zpci_bus *zpci_bus_alloc(int pchid) return zbus; } -#ifdef CONFIG_PCI_IOV -static int zpci_bus_link_virtfn(struct pci_dev *pdev, - struct pci_dev *virtfn, int vfid) -{ - int rc; - - rc = pci_iov_sysfs_link(pdev, virtfn, vfid); - if (rc) - return rc; - - virtfn->is_virtfn = 1; - virtfn->multifunction = 0; - virtfn->physfn = pci_dev_get(pdev); - - return 0; -} - -static int zpci_bus_setup_virtfn(struct zpci_bus *zbus, - struct pci_dev *virtfn, int vfn) -{ - int i, cand_devfn; - struct zpci_dev *zdev; - struct pci_dev *pdev; - int vfid = vfn - 1; /* Linux' vfid's start at 0 vfn at 1*/ - int rc = 0; - - if (!zbus->multifunction) - return 0; - - /* If the parent PF for the given VF is also configured in the - * instance, it must be on the same zbus. - * We can then identify the parent PF by checking what - * devfn the VF would have if it belonged to that PF using the PF's - * stride and offset. Only if this candidate devfn matches the - * actual devfn will we link both functions. - */ - for (i = 0; i < ZPCI_FUNCTIONS_PER_BUS; i++) { - zdev = zbus->function[i]; - if (zdev && zdev->is_physfn) { - pdev = pci_get_slot(zbus->bus, zdev->devfn); - if (!pdev) - continue; - cand_devfn = pci_iov_virtfn_devfn(pdev, vfid); - if (cand_devfn == virtfn->devfn) { - rc = zpci_bus_link_virtfn(pdev, virtfn, vfid); - /* balance pci_get_slot() */ - pci_dev_put(pdev); - break; - } - /* balance pci_get_slot() */ - pci_dev_put(pdev); - } - } - return rc; -} -#else -static inline int zpci_bus_setup_virtfn(struct zpci_bus *zbus, - struct pci_dev *virtfn, int vfn) -{ - return 0; -} -#endif - void pcibios_bus_add_device(struct pci_dev *pdev) { struct zpci_dev *zdev = to_zpci(pdev); @@ -198,7 +136,7 @@ void pcibios_bus_add_device(struct pci_dev *pdev) * perform PF/VF linking. */ if (zdev->vfn) - zpci_bus_setup_virtfn(zdev->zbus, pdev, zdev->vfn); + zpci_iov_setup_virtfn(zdev->zbus, pdev, zdev->vfn); } diff --git a/arch/s390/pci/pci_bus.h b/arch/s390/pci/pci_bus.h index 4972433df458..f8dfac0b5b71 100644 --- a/arch/s390/pci/pci_bus.h +++ b/arch/s390/pci/pci_bus.h @@ -9,7 +9,6 @@ int zpci_bus_device_register(struct zpci_dev *zdev, struct pci_ops *ops); void zpci_bus_device_unregister(struct zpci_dev *zdev); -int zpci_bus_init(void); void zpci_release_device(struct kref *kref); static inline void zpci_zdev_put(struct zpci_dev *zdev) @@ -30,15 +29,3 @@ static inline struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus, return (devfn >= ZPCI_FUNCTIONS_PER_BUS) ? NULL : zbus->function[devfn]; } -#ifdef CONFIG_PCI_IOV -static inline void zpci_remove_virtfn(struct pci_dev *pdev, int vfn) -{ - - pci_lock_rescan_remove(); - /* Linux' vfid's start at 0 vfn at 1 */ - pci_iov_remove_virtfn(pdev->physfn, vfn - 1); - pci_unlock_rescan_remove(); -} -#else /* CONFIG_PCI_IOV */ -static inline void zpci_remove_virtfn(struct pci_dev *pdev, int vfn) {} -#endif /* CONFIG_PCI_IOV */ diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c index 7e735f41a0a6..5a34a1359dc5 100644 --- a/arch/s390/pci/pci_clp.c +++ b/arch/s390/pci/pci_clp.c @@ -244,6 +244,7 @@ error: return rc; } +static int clp_refresh_fh(u32 fid); /* * Enable/Disable a given PCI function and update its function handle if * necessary @@ -286,7 +287,41 @@ static int clp_set_pci_fn(struct zpci_dev *zdev, u8 nr_dma_as, u8 command) } else if (!rc && rrb->response.hdr.rsp == CLP_RC_SETPCIFN_ALRDY && rrb->response.fh == 0) { /* Function is already in desired state - update handle */ - rc = clp_rescan_pci_devices_simple(&fid); + rc = clp_refresh_fh(fid); + } + clp_free_block(rrb); + return rc; +} + +int clp_setup_writeback_mio(void) +{ + struct clp_req_rsp_slpc_pci *rrb; + u8 wb_bit_pos; + int rc; + + rrb = clp_alloc_block(GFP_KERNEL); + if (!rrb) + return -ENOMEM; + + memset(rrb, 0, sizeof(*rrb)); + rrb->request.hdr.len = sizeof(rrb->request); + rrb->request.hdr.cmd = CLP_SLPC; + rrb->response.hdr.len = sizeof(rrb->response); + + rc = clp_req(rrb, CLP_LPS_PCI); + if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) { + if (rrb->response.vwb) { + wb_bit_pos = rrb->response.mio_wb; + set_bit_inv(wb_bit_pos, &mio_wb_bit_mask); + zpci_dbg(3, "wb bit: %d\n", wb_bit_pos); + } else { + zpci_dbg(3, "wb bit: n.a.\n"); + } + + } else { + zpci_err("SLPC PCI:\n"); + zpci_err_clp(rrb->response.hdr.rsp, rc); + rc = -EIO; } clp_free_block(rrb); return rc; @@ -374,24 +409,6 @@ static void __clp_add(struct clp_fh_list_entry *entry, void *data) clp_add_pci_device(entry->fid, entry->fh, entry->config_state); } -static void __clp_update(struct clp_fh_list_entry *entry, void *data) -{ - struct zpci_dev *zdev; - u32 *fid = data; - - if (!entry->vendor_id) - return; - - if (fid && *fid != entry->fid) - return; - - zdev = get_zdev_by_fid(entry->fid); - if (!zdev) - return; - - zdev->fh = entry->fh; -} - int clp_scan_pci_devices(void) { struct clp_req_rsp_list_pci *rrb; @@ -407,27 +424,25 @@ int clp_scan_pci_devices(void) return rc; } -int clp_rescan_pci_devices(void) +static void __clp_refresh_fh(struct clp_fh_list_entry *entry, void *data) { - struct clp_req_rsp_list_pci *rrb; - int rc; - - zpci_remove_reserved_devices(); + struct zpci_dev *zdev; + u32 fid = *((u32 *)data); - rrb = clp_alloc_block(GFP_KERNEL); - if (!rrb) - return -ENOMEM; + if (!entry->vendor_id || fid != entry->fid) + return; - rc = clp_list_pci(rrb, NULL, __clp_add); + zdev = get_zdev_by_fid(fid); + if (!zdev) + return; - clp_free_block(rrb); - return rc; + zdev->fh = entry->fh; } -/* Rescan PCI functions and refresh function handles. If fid is non-NULL only - * refresh the handle of the function matching @fid +/* + * Refresh the function handle of the function matching @fid */ -int clp_rescan_pci_devices_simple(u32 *fid) +static int clp_refresh_fh(u32 fid) { struct clp_req_rsp_list_pci *rrb; int rc; @@ -436,7 +451,7 @@ int clp_rescan_pci_devices_simple(u32 *fid) if (!rrb) return -ENOMEM; - rc = clp_list_pci(rrb, fid, __clp_update); + rc = clp_list_pci(rrb, &fid, __clp_refresh_fh); clp_free_block(rrb); return rc; @@ -495,7 +510,7 @@ static int clp_base_command(struct clp_req *req, struct clp_req_hdr *lpcb) } } -static int clp_pci_slpc(struct clp_req *req, struct clp_req_rsp_slpc *lpcb) +static int clp_pci_slpc(struct clp_req *req, struct clp_req_rsp_slpc_pci *lpcb) { unsigned long limit = PAGE_SIZE - sizeof(lpcb->request); diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c index 64b1399a73f0..ebc9a49523aa 100644 --- a/arch/s390/pci/pci_dma.c +++ b/arch/s390/pci/pci_dma.c @@ -10,7 +10,7 @@ #include <linux/slab.h> #include <linux/export.h> #include <linux/iommu-helper.h> -#include <linux/dma-mapping.h> +#include <linux/dma-map-ops.h> #include <linux/vmalloc.h> #include <linux/pci.h> #include <asm/pci_dma.h> @@ -261,13 +261,11 @@ static unsigned long __dma_alloc_iommu(struct device *dev, unsigned long start, int size) { struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); - unsigned long boundary_size; - boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, - PAGE_SIZE) >> PAGE_SHIFT; return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages, start, size, zdev->start_dma >> PAGE_SHIFT, - boundary_size, 0); + dma_get_seg_boundary_nr_pages(dev, PAGE_SHIFT), + 0); } static dma_addr_t dma_alloc_address(struct device *dev, int size) @@ -670,6 +668,8 @@ const struct dma_map_ops s390_pci_dma_ops = { .unmap_page = s390_dma_unmap_pages, .mmap = dma_common_mmap, .get_sgtable = dma_common_get_sgtable, + .alloc_pages = dma_common_alloc_pages, + .free_pages = dma_common_free_pages, /* dma_supported is unconditionally true without a callback */ }; EXPORT_SYMBOL_GPL(s390_pci_dma_ops); diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c index d9ae7456dd4c..d33f21545dfd 100644 --- a/arch/s390/pci/pci_event.c +++ b/arch/s390/pci/pci_event.c @@ -152,7 +152,8 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf) } break; case 0x0306: /* 0x308 or 0x302 for multiple devices */ - clp_rescan_pci_devices(); + zpci_remove_reserved_devices(); + clp_scan_pci_devices(); break; case 0x0308: /* Standby -> Reserved */ if (!zdev) diff --git a/arch/s390/pci/pci_iov.c b/arch/s390/pci/pci_iov.c new file mode 100644 index 000000000000..ead062bf2b41 --- /dev/null +++ b/arch/s390/pci/pci_iov.c @@ -0,0 +1,99 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright IBM Corp. 2020 + * + * Author(s): + * Niklas Schnelle <schnelle@linux.ibm.com> + * + */ + +#define KMSG_COMPONENT "zpci" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/kernel.h> +#include <linux/pci.h> + +#include "pci_iov.h" + +static struct resource iov_res = { + .name = "PCI IOV res", + .start = 0, + .end = -1, + .flags = IORESOURCE_MEM, +}; + +void zpci_iov_map_resources(struct pci_dev *pdev) +{ + resource_size_t len; + int i; + + for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { + int bar = i + PCI_IOV_RESOURCES; + + len = pci_resource_len(pdev, bar); + if (!len) + continue; + pdev->resource[bar].parent = &iov_res; + } +} + +void zpci_iov_remove_virtfn(struct pci_dev *pdev, int vfn) +{ + pci_lock_rescan_remove(); + /* Linux' vfid's start at 0 vfn at 1 */ + pci_iov_remove_virtfn(pdev->physfn, vfn - 1); + pci_unlock_rescan_remove(); +} + +static int zpci_iov_link_virtfn(struct pci_dev *pdev, struct pci_dev *virtfn, int vfid) +{ + int rc; + + rc = pci_iov_sysfs_link(pdev, virtfn, vfid); + if (rc) + return rc; + + virtfn->is_virtfn = 1; + virtfn->multifunction = 0; + virtfn->physfn = pci_dev_get(pdev); + + return 0; +} + +int zpci_iov_setup_virtfn(struct zpci_bus *zbus, struct pci_dev *virtfn, int vfn) +{ + int i, cand_devfn; + struct zpci_dev *zdev; + struct pci_dev *pdev; + int vfid = vfn - 1; /* Linux' vfid's start at 0 vfn at 1*/ + int rc = 0; + + if (!zbus->multifunction) + return 0; + + /* If the parent PF for the given VF is also configured in the + * instance, it must be on the same zbus. + * We can then identify the parent PF by checking what + * devfn the VF would have if it belonged to that PF using the PF's + * stride and offset. Only if this candidate devfn matches the + * actual devfn will we link both functions. + */ + for (i = 0; i < ZPCI_FUNCTIONS_PER_BUS; i++) { + zdev = zbus->function[i]; + if (zdev && zdev->is_physfn) { + pdev = pci_get_slot(zbus->bus, zdev->devfn); + if (!pdev) + continue; + cand_devfn = pci_iov_virtfn_devfn(pdev, vfid); + if (cand_devfn == virtfn->devfn) { + rc = zpci_iov_link_virtfn(pdev, virtfn, vfid); + /* balance pci_get_slot() */ + pci_dev_put(pdev); + break; + } + /* balance pci_get_slot() */ + pci_dev_put(pdev); + } + } + return rc; +} diff --git a/arch/s390/pci/pci_iov.h b/arch/s390/pci/pci_iov.h new file mode 100644 index 000000000000..b2c828003bad --- /dev/null +++ b/arch/s390/pci/pci_iov.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright IBM Corp. 2020 + * + * Author(s): + * Niklas Schnelle <schnelle@linux.ibm.com> + * + */ + +#ifndef __S390_PCI_IOV_H +#define __S390_PCI_IOV_H + +#ifdef CONFIG_PCI_IOV +void zpci_iov_remove_virtfn(struct pci_dev *pdev, int vfn); + +void zpci_iov_map_resources(struct pci_dev *pdev); + +int zpci_iov_setup_virtfn(struct zpci_bus *zbus, struct pci_dev *virtfn, int vfn); + +#else /* CONFIG_PCI_IOV */ +static inline void zpci_iov_remove_virtfn(struct pci_dev *pdev, int vfn) {} + +static inline void zpci_iov_map_resources(struct pci_dev *pdev) {} + +static inline int zpci_iov_setup_virtfn(struct zpci_bus *zbus, struct pci_dev *virtfn, int vfn) +{ + return 0; +} +#endif /* CONFIG_PCI_IOV */ +#endif /* __S390_PCI_IOV_h */ diff --git a/arch/s390/scripts/Makefile.chkbss b/arch/s390/scripts/Makefile.chkbss deleted file mode 100644 index f4f4c2c6dee9..000000000000 --- a/arch/s390/scripts/Makefile.chkbss +++ /dev/null @@ -1,20 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 - -chkbss-target ?= built-in.a -$(obj)/$(chkbss-target): chkbss - -chkbss-files := $(addsuffix .chkbss, $(chkbss)) -clean-files += $(chkbss-files) - -PHONY += chkbss -chkbss: $(addprefix $(obj)/, $(chkbss-files)) - -quiet_cmd_chkbss = CHKBSS $< - cmd_chkbss = \ - if ! $(OBJSIZE) --common $< | $(AWK) 'END { if ($$3) exit 1 }'; then \ - echo "error: $< .bss section is not empty" >&2; exit 1; \ - fi; \ - touch $@; - -$(obj)/%.o.chkbss: $(obj)/%.o - $(call cmd,chkbss) diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index d20927128fce..18278152c91c 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -600,22 +600,6 @@ config PHYSICAL_START where the fail safe kernel needs to run at a different address than the panic-ed kernel. -config SECCOMP - bool "Enable seccomp to safely compute untrusted bytecode" - depends on PROC_FS - help - This kernel feature is useful for number crunching applications - that may need to compute untrusted bytecode during their - execution. By using pipes or other transports made available to - the process as file descriptors supporting the read/write - syscalls, it's possible to isolate those applications in - their own address space using seccomp. Once seccomp is - enabled via prctl, it cannot be disabled and the task is only - allowed to execute a few safe syscalls defined by each seccomp - mode. - - If unsure, say N. - config SMP bool "Symmetric multi-processing support" depends on SYS_SUPPORTS_SMP diff --git a/arch/sh/boards/mach-ap325rxa/setup.c b/arch/sh/boards/mach-ap325rxa/setup.c index 665cad452798..bac8a058ebd7 100644 --- a/arch/sh/boards/mach-ap325rxa/setup.c +++ b/arch/sh/boards/mach-ap325rxa/setup.c @@ -13,6 +13,7 @@ #include <cpu/sh7723.h> +#include <linux/dma-map-ops.h> #include <linux/clkdev.h> #include <linux/delay.h> #include <linux/device.h> diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c index dd427bac5cde..bab91a99124e 100644 --- a/arch/sh/boards/mach-ecovec24/setup.c +++ b/arch/sh/boards/mach-ecovec24/setup.c @@ -36,6 +36,7 @@ #include <linux/usb/r8a66597.h> #include <linux/usb/renesas_usbhs.h> #include <linux/videodev2.h> +#include <linux/dma-map-ops.h> #include <media/drv-intf/renesas-ceu.h> #include <media/i2c/mt9t112.h> diff --git a/arch/sh/boards/mach-kfr2r09/setup.c b/arch/sh/boards/mach-kfr2r09/setup.c index 96538ba3aa32..eeb5ce341efd 100644 --- a/arch/sh/boards/mach-kfr2r09/setup.c +++ b/arch/sh/boards/mach-kfr2r09/setup.c @@ -14,7 +14,6 @@ #include <linux/clkdev.h> #include <linux/delay.h> -#include <linux/dma-mapping.h> #include <linux/gpio.h> #include <linux/gpio/machine.h> #include <linux/i2c.h> @@ -33,6 +32,7 @@ #include <linux/sh_intc.h> #include <linux/usb/r8a66597.h> #include <linux/videodev2.h> +#include <linux/dma-map-ops.h> #include <mach/kfr2r09.h> diff --git a/arch/sh/boards/mach-migor/setup.c b/arch/sh/boards/mach-migor/setup.c index 9ed369dad62d..6703a2122c0d 100644 --- a/arch/sh/boards/mach-migor/setup.c +++ b/arch/sh/boards/mach-migor/setup.c @@ -5,7 +5,7 @@ * Copyright (C) 2008 Magnus Damm */ #include <linux/clkdev.h> -#include <linux/dma-mapping.h> +#include <linux/dma-map-ops.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/interrupt.h> diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c index 32f5dd944889..8d6541ba0186 100644 --- a/arch/sh/boards/mach-se/7724/setup.c +++ b/arch/sh/boards/mach-se/7724/setup.c @@ -32,6 +32,7 @@ #include <linux/smc91x.h> #include <linux/usb/r8a66597.h> #include <linux/videodev2.h> +#include <linux/dma-map-ops.h> #include <mach-se/mach/se7724.h> #include <media/drv-intf/renesas-ceu.h> diff --git a/arch/sh/drivers/pci/fixups-dreamcast.c b/arch/sh/drivers/pci/fixups-dreamcast.c index 7be8694c0d13..41e4daee8f04 100644 --- a/arch/sh/drivers/pci/fixups-dreamcast.c +++ b/arch/sh/drivers/pci/fixups-dreamcast.c @@ -19,7 +19,7 @@ #include <linux/init.h> #include <linux/irq.h> #include <linux/pci.h> -#include <linux/dma-mapping.h> +#include <linux/dma-map-ops.h> #include <asm/io.h> #include <asm/irq.h> diff --git a/arch/sh/drivers/pci/pci.c b/arch/sh/drivers/pci/pci.c index 6ab0b7377f66..a3903304f33f 100644 --- a/arch/sh/drivers/pci/pci.c +++ b/arch/sh/drivers/pci/pci.c @@ -13,7 +13,6 @@ #include <linux/pci.h> #include <linux/init.h> #include <linux/types.h> -#include <linux/dma-debug.h> #include <linux/io.h> #include <linux/mutex.h> #include <linux/spinlock.h> diff --git a/arch/sh/drivers/pci/pcie-sh7786.c b/arch/sh/drivers/pci/pcie-sh7786.c index e0b568aaa701..4468289ab2ca 100644 --- a/arch/sh/drivers/pci/pcie-sh7786.c +++ b/arch/sh/drivers/pci/pcie-sh7786.c @@ -12,6 +12,7 @@ #include <linux/io.h> #include <linux/async.h> #include <linux/delay.h> +#include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/clk.h> #include <linux/sh_clk.h> @@ -31,6 +32,8 @@ struct sh7786_pcie_port { static struct sh7786_pcie_port *sh7786_pcie_ports; static unsigned int nr_ports; static unsigned long dma_pfn_offset; +size_t memsize; +u64 memstart; static struct sh7786_pcie_hwops { int (*core_init)(void); @@ -301,7 +304,6 @@ static int __init pcie_init(struct sh7786_pcie_port *port) struct pci_channel *chan = port->hose; unsigned int data; phys_addr_t memstart, memend; - size_t memsize; int ret, i, win; /* Begin initialization */ @@ -368,8 +370,6 @@ static int __init pcie_init(struct sh7786_pcie_port *port) memstart = ALIGN_DOWN(memstart, memsize); memsize = roundup_pow_of_two(memend - memstart); - dma_pfn_offset = memstart >> PAGE_SHIFT; - /* * If there's more than 512MB of memory, we need to roll over to * LAR1/LAMR1. @@ -487,7 +487,8 @@ int pcibios_map_platform_irq(const struct pci_dev *pdev, u8 slot, u8 pin) void pcibios_bus_add_device(struct pci_dev *pdev) { - pdev->dev.dma_pfn_offset = dma_pfn_offset; + dma_direct_set_offset(&pdev->dev, __pa(memory_start), + __pa(memory_start) - memstart, memsize); } static int __init sh7786_pcie_core_init(void) diff --git a/arch/sh/include/asm/checksum_32.h b/arch/sh/include/asm/checksum_32.h index 91571a42e44e..1a391e3a7659 100644 --- a/arch/sh/include/asm/checksum_32.h +++ b/arch/sh/include/asm/checksum_32.h @@ -30,10 +30,9 @@ asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum); * better 64-bit) boundary */ -asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, - int len, __wsum sum, - int *src_err_ptr, int *dst_err_ptr); +asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, int len); +#define _HAVE_ARCH_CSUM_AND_COPY /* * Note: when you get a NULL pointer exception here this means someone * passed in an incorrect kernel address to one of these functions. @@ -42,23 +41,18 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, * access_ok(). */ static inline -__wsum csum_partial_copy_nocheck(const void *src, void *dst, - int len, __wsum sum) +__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len) { - return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL); + return csum_partial_copy_generic(src, dst, len); } #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER static inline -__wsum csum_and_copy_from_user(const void __user *src, void *dst, - int len, __wsum sum, int *err_ptr) +__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len) { - if (access_ok(src, len)) - return csum_partial_copy_generic((__force const void *)src, dst, - len, sum, err_ptr, NULL); - if (len) - *err_ptr = -EFAULT; - return sum; + if (!access_ok(src, len)) + return 0; + return csum_partial_copy_generic((__force const void *)src, dst, len); } /* @@ -199,16 +193,10 @@ static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr, #define HAVE_CSUM_COPY_USER static inline __wsum csum_and_copy_to_user(const void *src, void __user *dst, - int len, __wsum sum, - int *err_ptr) + int len) { - if (access_ok(dst, len)) - return csum_partial_copy_generic((__force const void *)src, - dst, len, sum, NULL, err_ptr); - - if (len) - *err_ptr = -EFAULT; - - return (__force __wsum)-1; /* invalid checksum */ + if (!access_ok(dst, len)) + return 0; + return csum_partial_copy_generic((__force const void *)src, dst, len); } #endif /* __ASM_SH_CHECKSUM_H */ diff --git a/arch/sh/kernel/dma-coherent.c b/arch/sh/kernel/dma-coherent.c index cd46a9825e3c..6a44c0e7ba40 100644 --- a/arch/sh/kernel/dma-coherent.c +++ b/arch/sh/kernel/dma-coherent.c @@ -3,7 +3,7 @@ * Copyright (C) 2004 - 2007 Paul Mundt */ #include <linux/mm.h> -#include <linux/dma-noncoherent.h> +#include <linux/dma-map-ops.h> #include <asm/cacheflush.h> #include <asm/addrspace.h> diff --git a/arch/sh/kernel/kprobes.c b/arch/sh/kernel/kprobes.c index 318296f48f1a..756100b01e84 100644 --- a/arch/sh/kernel/kprobes.c +++ b/arch/sh/kernel/kprobes.c @@ -204,6 +204,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { ri->ret_addr = (kprobe_opcode_t *) regs->pr; + ri->fp = NULL; /* Replace the return addr with trampoline addr */ regs->pr = (unsigned long)kretprobe_trampoline; @@ -302,62 +303,9 @@ static void __used kretprobe_trampoline_holder(void) */ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) { - struct kretprobe_instance *ri = NULL; - struct hlist_head *head, empty_rp; - struct hlist_node *tmp; - unsigned long flags, orig_ret_address = 0; - unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; + regs->pc = __kretprobe_trampoline_handler(regs, &kretprobe_trampoline, NULL); - INIT_HLIST_HEAD(&empty_rp); - kretprobe_hash_lock(current, &head, &flags); - - /* - * It is possible to have multiple instances associated with a given - * task either because an multiple functions in the call path - * have a return probe installed on them, and/or more then one return - * return probe was registered for a target function. - * - * We can handle this because: - * - instances are always inserted at the head of the list - * - when multiple return probes are registered for the same - * function, the first instance's ret_addr will point to the - * real return address, and all the rest will point to - * kretprobe_trampoline - */ - hlist_for_each_entry_safe(ri, tmp, head, hlist) { - if (ri->task != current) - /* another task is sharing our hash bucket */ - continue; - - if (ri->rp && ri->rp->handler) { - __this_cpu_write(current_kprobe, &ri->rp->kp); - ri->rp->handler(ri, regs); - __this_cpu_write(current_kprobe, NULL); - } - - orig_ret_address = (unsigned long)ri->ret_addr; - recycle_rp_inst(ri, &empty_rp); - - if (orig_ret_address != trampoline_address) - /* - * This is the real return address. Any other - * instances associated with this task are for - * other calls deeper on the call stack - */ - break; - } - - kretprobe_assert(ri, orig_ret_address, trampoline_address); - - regs->pc = orig_ret_address; - kretprobe_hash_unlock(current, &flags); - - hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { - hlist_del(&ri->hlist); - kfree(ri); - } - - return orig_ret_address; + return 1; } static int __kprobes post_kprobe_handler(struct pt_regs *regs) diff --git a/arch/sh/kernel/syscalls/syscall.tbl b/arch/sh/kernel/syscalls/syscall.tbl index ae0a00beea5f..783738448ff5 100644 --- a/arch/sh/kernel/syscalls/syscall.tbl +++ b/arch/sh/kernel/syscalls/syscall.tbl @@ -442,3 +442,4 @@ 437 common openat2 sys_openat2 438 common pidfd_getfd sys_pidfd_getfd 439 common faccessat2 sys_faccessat2 +440 common process_madvise sys_process_madvise diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S index bde7a6c01aaf..3161b9ccd2a5 100644 --- a/arch/sh/kernel/vmlinux.lds.S +++ b/arch/sh/kernel/vmlinux.lds.S @@ -76,6 +76,7 @@ SECTIONS STABS_DEBUG DWARF_DEBUG + ELF_DETAILS DISCARDS } diff --git a/arch/sh/lib/checksum.S b/arch/sh/lib/checksum.S index 97b5c2d9fec4..3e07074e0098 100644 --- a/arch/sh/lib/checksum.S +++ b/arch/sh/lib/checksum.S @@ -173,47 +173,27 @@ ENTRY(csum_partial) mov r6, r0 /* -unsigned int csum_partial_copy_generic (const char *src, char *dst, int len, - int sum, int *src_err_ptr, int *dst_err_ptr) +unsigned int csum_partial_copy_generic (const char *src, char *dst, int len) */ /* - * Copy from ds while checksumming, otherwise like csum_partial - * - * The macros SRC and DST specify the type of access for the instruction. - * thus we can call a custom exception handler for all access types. - * - * FIXME: could someone double-check whether I haven't mixed up some SRC and - * DST definitions? It's damn hard to trigger all cases. I hope I got - * them all but there's no guarantee. + * Copy from ds while checksumming, otherwise like csum_partial with initial + * sum being ~0U */ -#define SRC(...) \ +#define EXC(...) \ 9999: __VA_ARGS__ ; \ .section __ex_table, "a"; \ .long 9999b, 6001f ; \ .previous -#define DST(...) \ - 9999: __VA_ARGS__ ; \ - .section __ex_table, "a"; \ - .long 9999b, 6002f ; \ - .previous - ! ! r4: const char *SRC ! r5: char *DST ! r6: int LEN -! r7: int SUM -! -! on stack: -! int *SRC_ERR_PTR -! int *DST_ERR_PTR ! ENTRY(csum_partial_copy_generic) - mov.l r5,@-r15 - mov.l r6,@-r15 - + mov #-1,r7 mov #3,r0 ! Check src and dest are equally aligned mov r4,r1 and r0,r1 @@ -243,11 +223,11 @@ ENTRY(csum_partial_copy_generic) clrt .align 2 5: -SRC( mov.b @r4+,r1 ) -SRC( mov.b @r4+,r0 ) +EXC( mov.b @r4+,r1 ) +EXC( mov.b @r4+,r0 ) extu.b r1,r1 -DST( mov.b r1,@r5 ) -DST( mov.b r0,@(1,r5) ) +EXC( mov.b r1,@r5 ) +EXC( mov.b r0,@(1,r5) ) extu.b r0,r0 add #2,r5 @@ -276,8 +256,8 @@ DST( mov.b r0,@(1,r5) ) ! Handle first two bytes as a special case .align 2 1: -SRC( mov.w @r4+,r0 ) -DST( mov.w r0,@r5 ) +EXC( mov.w @r4+,r0 ) +EXC( mov.w r0,@r5 ) add #2,r5 extu.w r0,r0 addc r0,r7 @@ -292,32 +272,32 @@ DST( mov.w r0,@r5 ) clrt .align 2 1: -SRC( mov.l @r4+,r0 ) -SRC( mov.l @r4+,r1 ) +EXC( mov.l @r4+,r0 ) +EXC( mov.l @r4+,r1 ) addc r0,r7 -DST( mov.l r0,@r5 ) -DST( mov.l r1,@(4,r5) ) +EXC( mov.l r0,@r5 ) +EXC( mov.l r1,@(4,r5) ) addc r1,r7 -SRC( mov.l @r4+,r0 ) -SRC( mov.l @r4+,r1 ) +EXC( mov.l @r4+,r0 ) +EXC( mov.l @r4+,r1 ) addc r0,r7 -DST( mov.l r0,@(8,r5) ) -DST( mov.l r1,@(12,r5) ) +EXC( mov.l r0,@(8,r5) ) +EXC( mov.l r1,@(12,r5) ) addc r1,r7 -SRC( mov.l @r4+,r0 ) -SRC( mov.l @r4+,r1 ) +EXC( mov.l @r4+,r0 ) +EXC( mov.l @r4+,r1 ) addc r0,r7 -DST( mov.l r0,@(16,r5) ) -DST( mov.l r1,@(20,r5) ) +EXC( mov.l r0,@(16,r5) ) +EXC( mov.l r1,@(20,r5) ) addc r1,r7 -SRC( mov.l @r4+,r0 ) -SRC( mov.l @r4+,r1 ) +EXC( mov.l @r4+,r0 ) +EXC( mov.l @r4+,r1 ) addc r0,r7 -DST( mov.l r0,@(24,r5) ) -DST( mov.l r1,@(28,r5) ) +EXC( mov.l r0,@(24,r5) ) +EXC( mov.l r1,@(28,r5) ) addc r1,r7 add #32,r5 movt r0 @@ -335,9 +315,9 @@ DST( mov.l r1,@(28,r5) ) clrt shlr2 r6 3: -SRC( mov.l @r4+,r0 ) +EXC( mov.l @r4+,r0 ) addc r0,r7 -DST( mov.l r0,@r5 ) +EXC( mov.l r0,@r5 ) add #4,r5 movt r0 dt r6 @@ -353,8 +333,8 @@ DST( mov.l r0,@r5 ) mov #2,r1 cmp/hs r1,r6 bf 5f -SRC( mov.w @r4+,r0 ) -DST( mov.w r0,@r5 ) +EXC( mov.w @r4+,r0 ) +EXC( mov.w r0,@r5 ) extu.w r0,r0 add #2,r5 cmp/eq r1,r6 @@ -363,8 +343,8 @@ DST( mov.w r0,@r5 ) shll16 r0 addc r0,r7 5: -SRC( mov.b @r4+,r0 ) -DST( mov.b r0,@r5 ) +EXC( mov.b @r4+,r0 ) +EXC( mov.b r0,@r5 ) extu.b r0,r0 #ifndef __LITTLE_ENDIAN__ shll8 r0 @@ -373,42 +353,13 @@ DST( mov.b r0,@r5 ) mov #0,r0 addc r0,r7 7: -5000: # Exception handler: .section .fixup, "ax" 6001: - mov.l @(8,r15),r0 ! src_err_ptr - mov #-EFAULT,r1 - mov.l r1,@r0 - - ! zero the complete destination - computing the rest - ! is too much work - mov.l @(4,r15),r5 ! dst - mov.l @r15,r6 ! len - mov #0,r7 -1: mov.b r7,@r5 - dt r6 - bf/s 1b - add #1,r5 - mov.l 8000f,r0 - jmp @r0 - nop - .align 2 -8000: .long 5000b - -6002: - mov.l @(12,r15),r0 ! dst_err_ptr - mov #-EFAULT,r1 - mov.l r1,@r0 - mov.l 8001f,r0 - jmp @r0 - nop - .align 2 -8001: .long 5000b - + rts + mov #0,r0 .previous - add #8,r15 rts mov r7,r0 diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 4735176ab811..3348e0c4d769 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -226,15 +226,12 @@ void __init allocate_pgdat(unsigned int nid) static void __init do_init_bootmem(void) { - struct memblock_region *reg; + unsigned long start_pfn, end_pfn; + int i; /* Add active regions with valid PFNs. */ - for_each_memblock(memory, reg) { - unsigned long start_pfn, end_pfn; - start_pfn = memblock_region_memory_base_pfn(reg); - end_pfn = memblock_region_memory_end_pfn(reg); + for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) __add_active_range(0, start_pfn, end_pfn); - } /* All of system RAM sits in node 0 for the non-NUMA case */ allocate_pgdat(0); diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index efeff2c896a5..096530eac8e1 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -23,6 +23,7 @@ config SPARC select HAVE_OPROFILE select HAVE_ARCH_KGDB if !SMP || SPARC64 select HAVE_ARCH_TRACEHOOK + select HAVE_ARCH_SECCOMP if SPARC64 select HAVE_EXIT_THREAD select HAVE_PCI select SYSCTL_EXCEPTION_TRACE @@ -43,6 +44,7 @@ config SPARC select GENERIC_STRNLEN_USER select MODULES_USE_ELF_RELA select PCI_SYSCALL if PCI + select PCI_MSI_ARCH_FALLBACKS if PCI_MSI select ODD_RT_SIGACTION select OLD_SIGSUSPEND select CPU_NO_EFFICIENT_FFS @@ -226,23 +228,6 @@ config EARLYFB help Say Y here to enable a faster early framebuffer boot console. -config SECCOMP - bool "Enable seccomp to safely compute untrusted bytecode" - depends on SPARC64 && PROC_FS - default y - help - This kernel feature is useful for number crunching applications - that may need to compute untrusted bytecode during their - execution. By using pipes or other transports made available to - the process as file descriptors supporting the read/write - syscalls, it's possible to isolate those applications in - their own address space using seccomp. Once seccomp is - enabled via /proc/<pid>/seccomp, it cannot be disabled - and the task is only allowed to execute a few safe syscalls - defined by each seccomp mode. - - If unsure, say Y. Only embedded should say N here. - config HOTPLUG_CPU bool "Support for hot-pluggable CPUs" depends on SPARC64 && SMP diff --git a/arch/sparc/include/asm/checksum.h b/arch/sparc/include/asm/checksum.h index a6256cb6fc5c..f2ac13323b6d 100644 --- a/arch/sparc/include/asm/checksum.h +++ b/arch/sparc/include/asm/checksum.h @@ -1,7 +1,9 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef ___ASM_SPARC_CHECKSUM_H #define ___ASM_SPARC_CHECKSUM_H +#define _HAVE_ARCH_CSUM_AND_COPY #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER +#define HAVE_CSUM_COPY_USER #if defined(__sparc__) && defined(__arch64__) #include <asm/checksum_64.h> #else diff --git a/arch/sparc/include/asm/checksum_32.h b/arch/sparc/include/asm/checksum_32.h index 479a0b812af5..ce11e0ad80c7 100644 --- a/arch/sparc/include/asm/checksum_32.h +++ b/arch/sparc/include/asm/checksum_32.h @@ -42,7 +42,7 @@ __wsum csum_partial(const void *buff, int len, __wsum sum); unsigned int __csum_partial_copy_sparc_generic (const unsigned char *, unsigned char *); static inline __wsum -csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) +csum_partial_copy_nocheck(const void *src, void *dst, int len) { register unsigned int ret asm("o0") = (unsigned int)src; register char *d asm("o1") = dst; @@ -50,9 +50,9 @@ csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) __asm__ __volatile__ ( "call __csum_partial_copy_sparc_generic\n\t" - " mov %6, %%g7\n" + " mov -1, %%g7\n" : "=&r" (ret), "=&r" (d), "=&r" (l) - : "0" (ret), "1" (d), "2" (l), "r" (sum) + : "0" (ret), "1" (d), "2" (l) : "o2", "o3", "o4", "o5", "o7", "g2", "g3", "g4", "g5", "g7", "memory", "cc"); @@ -60,65 +60,19 @@ csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) } static inline __wsum -csum_and_copy_from_user(const void __user *src, void *dst, int len, - __wsum sum, int *err) - { - register unsigned long ret asm("o0") = (unsigned long)src; - register char *d asm("o1") = dst; - register int l asm("g1") = len; - register __wsum s asm("g7") = sum; - - if (unlikely(!access_ok(src, len))) { - if (len) - *err = -EFAULT; - return sum; - } - - __asm__ __volatile__ ( - ".section __ex_table,#alloc\n\t" - ".align 4\n\t" - ".word 1f,2\n\t" - ".previous\n" - "1:\n\t" - "call __csum_partial_copy_sparc_generic\n\t" - " st %8, [%%sp + 64]\n" - : "=&r" (ret), "=&r" (d), "=&r" (l), "=&r" (s) - : "0" (ret), "1" (d), "2" (l), "3" (s), "r" (err) - : "o2", "o3", "o4", "o5", "o7", "g2", "g3", "g4", "g5", - "cc", "memory"); - return (__force __wsum)ret; +csum_and_copy_from_user(const void __user *src, void *dst, int len) +{ + if (unlikely(!access_ok(src, len))) + return 0; + return csum_partial_copy_nocheck((__force void *)src, dst, len); } -#define HAVE_CSUM_COPY_USER - static inline __wsum -csum_and_copy_to_user(const void *src, void __user *dst, int len, - __wsum sum, int *err) +csum_and_copy_to_user(const void *src, void __user *dst, int len) { - if (!access_ok(dst, len)) { - *err = -EFAULT; - return sum; - } else { - register unsigned long ret asm("o0") = (unsigned long)src; - register char __user *d asm("o1") = dst; - register int l asm("g1") = len; - register __wsum s asm("g7") = sum; - - __asm__ __volatile__ ( - ".section __ex_table,#alloc\n\t" - ".align 4\n\t" - ".word 1f,1\n\t" - ".previous\n" - "1:\n\t" - "call __csum_partial_copy_sparc_generic\n\t" - " st %8, [%%sp + 64]\n" - : "=&r" (ret), "=&r" (d), "=&r" (l), "=&r" (s) - : "0" (ret), "1" (d), "2" (l), "3" (s), "r" (err) - : "o2", "o3", "o4", "o5", "o7", - "g2", "g3", "g4", "g5", - "cc", "memory"); - return (__force __wsum)ret; - } + if (!access_ok(dst, len)) + return 0; + return csum_partial_copy_nocheck(src, (__force void *)dst, len); } /* ihl is always 5 or greater, almost always is 5, and iph is word aligned diff --git a/arch/sparc/include/asm/checksum_64.h b/arch/sparc/include/asm/checksum_64.h index 0fa4433f5662..d6b59461e064 100644 --- a/arch/sparc/include/asm/checksum_64.h +++ b/arch/sparc/include/asm/checksum_64.h @@ -38,42 +38,9 @@ __wsum csum_partial(const void * buff, int len, __wsum sum); * here even more important to align src and dst on a 32-bit (or even * better 64-bit) boundary */ -__wsum csum_partial_copy_nocheck(const void *src, void *dst, - int len, __wsum sum); - -long __csum_partial_copy_from_user(const void __user *src, - void *dst, int len, - __wsum sum); - -static inline __wsum -csum_and_copy_from_user(const void __user *src, - void *dst, int len, - __wsum sum, int *err) -{ - long ret = __csum_partial_copy_from_user(src, dst, len, sum); - if (ret < 0) - *err = -EFAULT; - return (__force __wsum) ret; -} - -/* - * Copy and checksum to user - */ -#define HAVE_CSUM_COPY_USER -long __csum_partial_copy_to_user(const void *src, - void __user *dst, int len, - __wsum sum); - -static inline __wsum -csum_and_copy_to_user(const void *src, - void __user *dst, int len, - __wsum sum, int *err) -{ - long ret = __csum_partial_copy_to_user(src, dst, len, sum); - if (ret < 0) - *err = -EFAULT; - return (__force __wsum) ret; -} +__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len); +__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len); +__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len); /* ihl is always 5 or greater, almost always is 5, and iph is word aligned * the majority of the time. diff --git a/arch/sparc/include/asm/compat.h b/arch/sparc/include/asm/compat.h index 40a267b3bd52..b85842cda99f 100644 --- a/arch/sparc/include/asm/compat.h +++ b/arch/sparc/include/asm/compat.h @@ -21,8 +21,7 @@ typedef s16 compat_nlink_t; typedef u16 compat_ipc_pid_t; typedef u32 compat_caddr_t; typedef __kernel_fsid_t compat_fsid_t; -typedef s64 compat_s64; -typedef u64 compat_u64; + struct compat_stat { compat_dev_t st_dev; compat_ino_t st_ino; diff --git a/arch/sparc/kernel/iommu-common.c b/arch/sparc/kernel/iommu-common.c index 59cb16691322..23ca75f09277 100644 --- a/arch/sparc/kernel/iommu-common.c +++ b/arch/sparc/kernel/iommu-common.c @@ -166,13 +166,6 @@ unsigned long iommu_tbl_range_alloc(struct device *dev, } } - if (dev) - boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, - 1 << iommu->table_shift); - else - boundary_size = ALIGN(1ULL << 32, 1 << iommu->table_shift); - - boundary_size = boundary_size >> iommu->table_shift; /* * if the skip_span_boundary_check had been set during init, we set * things up so that iommu_is_span_boundary() merely checks if the @@ -181,6 +174,9 @@ unsigned long iommu_tbl_range_alloc(struct device *dev, if ((iommu->flags & IOMMU_NO_SPAN_BOUND) != 0) { shift = 0; boundary_size = iommu->poolsize * iommu->nr_pools; + } else { + boundary_size = dma_get_seg_boundary_nr_pages(dev, + iommu->table_shift); } n = iommu_area_alloc(iommu->map, limit, start, npages, shift, boundary_size, align_mask); diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index 4ae7388b1bff..a034f571d869 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c @@ -10,7 +10,7 @@ #include <linux/slab.h> #include <linux/delay.h> #include <linux/device.h> -#include <linux/dma-mapping.h> +#include <linux/dma-map-ops.h> #include <linux/errno.h> #include <linux/iommu-helper.h> #include <linux/bitmap.h> @@ -472,8 +472,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, outs->dma_length = 0; max_seg_size = dma_get_max_seg_size(dev); - seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, - IO_PAGE_SIZE) >> IO_PAGE_SHIFT; + seg_boundary_size = dma_get_seg_boundary_nr_pages(dev, IO_PAGE_SHIFT); base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT; for_each_sg(sglist, s, nelems, i) { unsigned long paddr, npages, entry, out_entry = 0, slen; diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c index d6874c9b639f..8e1d72a16759 100644 --- a/arch/sparc/kernel/ioport.c +++ b/arch/sparc/kernel/ioport.c @@ -38,7 +38,7 @@ #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/scatterlist.h> -#include <linux/dma-noncoherent.h> +#include <linux/dma-map-ops.h> #include <linux/of_device.h> #include <asm/io.h> diff --git a/arch/sparc/kernel/kprobes.c b/arch/sparc/kernel/kprobes.c index dfbca2470536..217c21a6986a 100644 --- a/arch/sparc/kernel/kprobes.c +++ b/arch/sparc/kernel/kprobes.c @@ -453,6 +453,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { ri->ret_addr = (kprobe_opcode_t *)(regs->u_regs[UREG_RETPC] + 8); + ri->fp = NULL; /* Replace the return addr with trampoline addr */ regs->u_regs[UREG_RETPC] = @@ -465,58 +466,12 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, static int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) { - struct kretprobe_instance *ri = NULL; - struct hlist_head *head, empty_rp; - struct hlist_node *tmp; - unsigned long flags, orig_ret_address = 0; - unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; + unsigned long orig_ret_address = 0; - INIT_HLIST_HEAD(&empty_rp); - kretprobe_hash_lock(current, &head, &flags); - - /* - * It is possible to have multiple instances associated with a given - * task either because an multiple functions in the call path - * have a return probe installed on them, and/or more than one return - * return probe was registered for a target function. - * - * We can handle this because: - * - instances are always inserted at the head of the list - * - when multiple return probes are registered for the same - * function, the first instance's ret_addr will point to the - * real return address, and all the rest will point to - * kretprobe_trampoline - */ - hlist_for_each_entry_safe(ri, tmp, head, hlist) { - if (ri->task != current) - /* another task is sharing our hash bucket */ - continue; - - if (ri->rp && ri->rp->handler) - ri->rp->handler(ri, regs); - - orig_ret_address = (unsigned long)ri->ret_addr; - recycle_rp_inst(ri, &empty_rp); - - if (orig_ret_address != trampoline_address) - /* - * This is the real return address. Any other - * instances associated with this task are for - * other calls deeper on the call stack - */ - break; - } - - kretprobe_assert(ri, orig_ret_address, trampoline_address); + orig_ret_address = __kretprobe_trampoline_handler(regs, &kretprobe_trampoline, NULL); regs->tpc = orig_ret_address; regs->tnpc = orig_ret_address + 4; - kretprobe_hash_unlock(current, &flags); - - hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { - hlist_del(&ri->hlist); - kfree(ri); - } /* * By returning a non-zero value, we are telling * kprobe_handler() that we don't want the post_handler diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c index 14b93c5564e3..9de57e88f7a1 100644 --- a/arch/sparc/kernel/pci_sun4v.c +++ b/arch/sparc/kernel/pci_sun4v.c @@ -16,6 +16,7 @@ #include <linux/export.h> #include <linux/log2.h> #include <linux/of_device.h> +#include <linux/dma-map-ops.h> #include <asm/iommu-common.h> #include <asm/iommu.h> @@ -508,8 +509,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, iommu_batch_start(dev, prot, ~0UL); max_seg_size = dma_get_max_seg_size(dev); - seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, - IO_PAGE_SIZE) >> IO_PAGE_SHIFT; + seg_boundary_size = dma_get_seg_boundary_nr_pages(dev, IO_PAGE_SHIFT); mask = *dev->dma_mask; if (!iommu_use_atu(iommu, mask)) diff --git a/arch/sparc/kernel/process.c b/arch/sparc/kernel/process.c index 5234b5ccc0b9..0442ab00518d 100644 --- a/arch/sparc/kernel/process.c +++ b/arch/sparc/kernel/process.c @@ -25,7 +25,7 @@ asmlinkage long sparc_fork(struct pt_regs *regs) .stack = regs->u_regs[UREG_FP], }; - ret = _do_fork(&args); + ret = kernel_clone(&args); /* If we get an error and potentially restart the system * call, we're screwed because copy_thread() clobbered @@ -50,7 +50,7 @@ asmlinkage long sparc_vfork(struct pt_regs *regs) .stack = regs->u_regs[UREG_FP], }; - ret = _do_fork(&args); + ret = kernel_clone(&args); /* If we get an error and potentially restart the system * call, we're screwed because copy_thread() clobbered @@ -96,7 +96,7 @@ asmlinkage long sparc_clone(struct pt_regs *regs) else args.stack = regs->u_regs[UREG_FP]; - ret = _do_fork(&args); + ret = kernel_clone(&args); /* If we get an error and potentially restart the system * call, we're screwed because copy_thread() clobbered diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index e286e2badc8a..e38d8bf454e8 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -1039,38 +1039,9 @@ void smp_fetch_global_pmu(void) * are flush_tlb_*() routines, and these run after flush_cache_*() * which performs the flushw. * - * The SMP TLB coherency scheme we use works as follows: - * - * 1) mm->cpu_vm_mask is a bit mask of which cpus an address - * space has (potentially) executed on, this is the heuristic - * we use to avoid doing cross calls. - * - * Also, for flushing from kswapd and also for clones, we - * use cpu_vm_mask as the list of cpus to make run the TLB. - * - * 2) TLB context numbers are shared globally across all processors - * in the system, this allows us to play several games to avoid - * cross calls. - * - * One invariant is that when a cpu switches to a process, and - * that processes tsk->active_mm->cpu_vm_mask does not have the - * current cpu's bit set, that tlb context is flushed locally. - * - * If the address space is non-shared (ie. mm->count == 1) we avoid - * cross calls when we want to flush the currently running process's - * tlb state. This is done by clearing all cpu bits except the current - * processor's in current->mm->cpu_vm_mask and performing the - * flush locally only. This will force any subsequent cpus which run - * this task to flush the context from the local tlb if the process - * migrates to another cpu (again). - * - * 3) For shared address spaces (threads) and swapping we bite the - * bullet for most cases and perform the cross call (but only to - * the cpus listed in cpu_vm_mask). - * - * The performance gain from "optimizing" away the cross call for threads is - * questionable (in theory the big win for threads is the massive sharing of - * address space state across processors). + * mm->cpu_vm_mask is a bit mask of which cpus an address + * space has (potentially) executed on, this is the heuristic + * we use to limit cross calls. */ /* This currently is only used by the hugetlb arch pre-fault @@ -1080,18 +1051,13 @@ void smp_fetch_global_pmu(void) void smp_flush_tlb_mm(struct mm_struct *mm) { u32 ctx = CTX_HWBITS(mm->context); - int cpu = get_cpu(); - if (atomic_read(&mm->mm_users) == 1) { - cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); - goto local_flush_and_out; - } + get_cpu(); smp_cross_call_masked(&xcall_flush_tlb_mm, ctx, 0, 0, mm_cpumask(mm)); -local_flush_and_out: __flush_tlb_mm(ctx, SECONDARY_CONTEXT); put_cpu(); @@ -1114,17 +1080,15 @@ void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long { u32 ctx = CTX_HWBITS(mm->context); struct tlb_pending_info info; - int cpu = get_cpu(); + + get_cpu(); info.ctx = ctx; info.nr = nr; info.vaddrs = vaddrs; - if (mm == current->mm && atomic_read(&mm->mm_users) == 1) - cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); - else - smp_call_function_many(mm_cpumask(mm), tlb_pending_func, - &info, 1); + smp_call_function_many(mm_cpumask(mm), tlb_pending_func, + &info, 1); __flush_tlb_pending(ctx, nr, vaddrs); @@ -1134,14 +1098,13 @@ void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr) { unsigned long context = CTX_HWBITS(mm->context); - int cpu = get_cpu(); - if (mm == current->mm && atomic_read(&mm->mm_users) == 1) - cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); - else - smp_cross_call_masked(&xcall_flush_tlb_page, - context, vaddr, 0, - mm_cpumask(mm)); + get_cpu(); + + smp_cross_call_masked(&xcall_flush_tlb_page, + context, vaddr, 0, + mm_cpumask(mm)); + __flush_tlb_page(context, vaddr); put_cpu(); diff --git a/arch/sparc/kernel/syscalls/syscall.tbl b/arch/sparc/kernel/syscalls/syscall.tbl index 4af114e84f20..78160260991b 100644 --- a/arch/sparc/kernel/syscalls/syscall.tbl +++ b/arch/sparc/kernel/syscalls/syscall.tbl @@ -38,7 +38,7 @@ 23 64 setuid sys_setuid 24 32 getuid sys_getuid16 24 64 getuid sys_getuid -25 common vmsplice sys_vmsplice compat_sys_vmsplice +25 common vmsplice sys_vmsplice 26 common ptrace sys_ptrace compat_sys_ptrace 27 common alarm sys_alarm 28 common sigaltstack sys_sigaltstack compat_sys_sigaltstack @@ -149,8 +149,8 @@ 117 common getrusage sys_getrusage compat_sys_getrusage 118 common getsockopt sys_getsockopt sys_getsockopt 119 common getcwd sys_getcwd -120 common readv sys_readv compat_sys_readv -121 common writev sys_writev compat_sys_writev +120 common readv sys_readv +121 common writev sys_writev 122 common settimeofday sys_settimeofday compat_sys_settimeofday 123 32 fchown sys_fchown16 123 64 fchown sys_fchown @@ -201,7 +201,7 @@ 164 64 utrap_install sys_utrap_install 165 common quotactl sys_quotactl 166 common set_tid_address sys_set_tid_address -167 common mount sys_mount compat_sys_mount +167 common mount sys_mount 168 common ustat sys_ustat compat_sys_ustat 169 common setxattr sys_setxattr 170 common lsetxattr sys_lsetxattr @@ -406,8 +406,8 @@ 335 common syncfs sys_syncfs 336 common sendmmsg sys_sendmmsg compat_sys_sendmmsg 337 common setns sys_setns -338 common process_vm_readv sys_process_vm_readv compat_sys_process_vm_readv -339 common process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev +338 common process_vm_readv sys_process_vm_readv +339 common process_vm_writev sys_process_vm_writev 340 32 kern_features sys_ni_syscall sys_kern_features 340 64 kern_features sys_kern_features 341 common kcmp sys_kcmp @@ -485,3 +485,4 @@ 437 common openat2 sys_openat2 438 common pidfd_getfd sys_pidfd_getfd 439 common faccessat2 sys_faccessat2 +440 common process_madvise sys_process_madvise diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S index f99e99e58075..d55ae65a07ad 100644 --- a/arch/sparc/kernel/vmlinux.lds.S +++ b/arch/sparc/kernel/vmlinux.lds.S @@ -187,6 +187,7 @@ SECTIONS STABS_DEBUG DWARF_DEBUG + ELF_DETAILS DISCARDS } diff --git a/arch/sparc/lib/checksum_32.S b/arch/sparc/lib/checksum_32.S index 6a5469c97246..7488d130faf7 100644 --- a/arch/sparc/lib/checksum_32.S +++ b/arch/sparc/lib/checksum_32.S @@ -144,44 +144,21 @@ cpte: bne csum_partial_end_cruft ! yep, handle it cpout: retl ! get outta here mov %o2, %o0 ! return computed csum - .globl __csum_partial_copy_start, __csum_partial_copy_end -__csum_partial_copy_start: - /* Work around cpp -rob */ #define ALLOC #alloc #define EXECINSTR #execinstr -#define EX(x,y,a,b) \ -98: x,y; \ - .section .fixup,ALLOC,EXECINSTR; \ - .align 4; \ -99: ba 30f; \ - a, b, %o3; \ - .section __ex_table,ALLOC; \ - .align 4; \ - .word 98b, 99b; \ - .text; \ - .align 4 - -#define EX2(x,y) \ -98: x,y; \ - .section __ex_table,ALLOC; \ - .align 4; \ - .word 98b, 30f; \ - .text; \ - .align 4 - -#define EX3(x,y) \ +#define EX(x,y) \ 98: x,y; \ .section __ex_table,ALLOC; \ .align 4; \ - .word 98b, 96f; \ + .word 98b, cc_fault; \ .text; \ .align 4 -#define EXT(start,end,handler) \ +#define EXT(start,end) \ .section __ex_table,ALLOC; \ .align 4; \ - .word start, 0, end, handler; \ + .word start, 0, end, cc_fault; \ .text; \ .align 4 @@ -252,21 +229,21 @@ __csum_partial_copy_start: cc_end_cruft: be 1f andcc %o3, 4, %g0 - EX(ldd [%o0 + 0x00], %g2, and %o3, 0xf) + EX(ldd [%o0 + 0x00], %g2) add %o1, 8, %o1 addcc %g2, %g7, %g7 add %o0, 8, %o0 addxcc %g3, %g7, %g7 - EX2(st %g2, [%o1 - 0x08]) + EX(st %g2, [%o1 - 0x08]) addx %g0, %g7, %g7 andcc %o3, 4, %g0 - EX2(st %g3, [%o1 - 0x04]) + EX(st %g3, [%o1 - 0x04]) 1: be 1f andcc %o3, 3, %o3 - EX(ld [%o0 + 0x00], %g2, add %o3, 4) + EX(ld [%o0 + 0x00], %g2) add %o1, 4, %o1 addcc %g2, %g7, %g7 - EX2(st %g2, [%o1 - 0x04]) + EX(st %g2, [%o1 - 0x04]) addx %g0, %g7, %g7 andcc %o3, 3, %g0 add %o0, 4, %o0 @@ -276,14 +253,14 @@ cc_end_cruft: subcc %o3, 2, %o3 b 4f or %g0, %g0, %o4 -2: EX(lduh [%o0 + 0x00], %o4, add %o3, 2) +2: EX(lduh [%o0 + 0x00], %o4) add %o0, 2, %o0 - EX2(sth %o4, [%o1 + 0x00]) + EX(sth %o4, [%o1 + 0x00]) be 6f add %o1, 2, %o1 sll %o4, 16, %o4 -4: EX(ldub [%o0 + 0x00], %o5, add %g0, 1) - EX2(stb %o5, [%o1 + 0x00]) +4: EX(ldub [%o0 + 0x00], %o5) + EX(stb %o5, [%o1 + 0x00]) sll %o5, 8, %o5 or %o5, %o4, %o4 6: addcc %o4, %g7, %g7 @@ -306,9 +283,9 @@ cc_dword_align: andcc %o0, 0x2, %g0 be 1f andcc %o0, 0x4, %g0 - EX(lduh [%o0 + 0x00], %g4, add %g1, 0) + EX(lduh [%o0 + 0x00], %g4) sub %g1, 2, %g1 - EX2(sth %g4, [%o1 + 0x00]) + EX(sth %g4, [%o1 + 0x00]) add %o0, 2, %o0 sll %g4, 16, %g4 addcc %g4, %g7, %g7 @@ -322,9 +299,9 @@ cc_dword_align: or %g3, %g7, %g7 1: be 3f andcc %g1, 0xffffff80, %g0 - EX(ld [%o0 + 0x00], %g4, add %g1, 0) + EX(ld [%o0 + 0x00], %g4) sub %g1, 4, %g1 - EX2(st %g4, [%o1 + 0x00]) + EX(st %g4, [%o1 + 0x00]) add %o0, 4, %o0 addcc %g4, %g7, %g7 add %o1, 4, %o1 @@ -354,7 +331,7 @@ __csum_partial_copy_sparc_generic: CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3) CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3) CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3) -10: EXT(5b, 10b, 20f) ! note for exception handling +10: EXT(5b, 10b) ! note for exception handling sub %g1, 128, %g1 ! detract from length addx %g0, %g7, %g7 ! add in last carry bit andcc %g1, 0xffffff80, %g0 ! more to csum? @@ -379,7 +356,7 @@ cctbl: CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x68,%g2,%g3,%g4,%g5) CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x28,%g2,%g3,%g4,%g5) CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x18,%g2,%g3,%g4,%g5) CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x08,%g2,%g3,%g4,%g5) -12: EXT(cctbl, 12b, 22f) ! note for exception table handling +12: EXT(cctbl, 12b) ! note for exception table handling addx %g0, %g7, %g7 andcc %o3, 0xf, %g0 ! check for low bits set ccte: bne cc_end_cruft ! something left, handle it out of band @@ -390,7 +367,7 @@ ccdbl: CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x00,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3) CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3) CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3) -11: EXT(ccdbl, 11b, 21f) ! note for exception table handling +11: EXT(ccdbl, 11b) ! note for exception table handling sub %g1, 128, %g1 ! detract from length addx %g0, %g7, %g7 ! add in last carry bit andcc %g1, 0xffffff80, %g0 ! more to csum? @@ -407,9 +384,9 @@ ccslow: cmp %g1, 0 be,a 1f srl %g1, 1, %g4 sub %g1, 1, %g1 - EX(ldub [%o0], %g5, add %g1, 1) + EX(ldub [%o0], %g5) add %o0, 1, %o0 - EX2(stb %g5, [%o1]) + EX(stb %g5, [%o1]) srl %g1, 1, %g4 add %o1, 1, %o1 1: cmp %g4, 0 @@ -418,34 +395,34 @@ ccslow: cmp %g1, 0 andcc %o0, 2, %g0 be,a 1f srl %g4, 1, %g4 - EX(lduh [%o0], %o4, add %g1, 0) + EX(lduh [%o0], %o4) sub %g1, 2, %g1 srl %o4, 8, %g2 sub %g4, 1, %g4 - EX2(stb %g2, [%o1]) + EX(stb %g2, [%o1]) add %o4, %g5, %g5 - EX2(stb %o4, [%o1 + 1]) + EX(stb %o4, [%o1 + 1]) add %o0, 2, %o0 srl %g4, 1, %g4 add %o1, 2, %o1 1: cmp %g4, 0 be,a 2f andcc %g1, 2, %g0 - EX3(ld [%o0], %o4) + EX(ld [%o0], %o4) 5: srl %o4, 24, %g2 srl %o4, 16, %g3 - EX2(stb %g2, [%o1]) + EX(stb %g2, [%o1]) srl %o4, 8, %g2 - EX2(stb %g3, [%o1 + 1]) + EX(stb %g3, [%o1 + 1]) add %o0, 4, %o0 - EX2(stb %g2, [%o1 + 2]) + EX(stb %g2, [%o1 + 2]) addcc %o4, %g5, %g5 - EX2(stb %o4, [%o1 + 3]) + EX(stb %o4, [%o1 + 3]) addx %g5, %g0, %g5 ! I am now to lazy to optimize this (question it add %o1, 4, %o1 ! is worthy). Maybe some day - with the sll/srl subcc %g4, 1, %g4 ! tricks bne,a 5b - EX3(ld [%o0], %o4) + EX(ld [%o0], %o4) sll %g5, 16, %g2 srl %g5, 16, %g5 srl %g2, 16, %g2 @@ -453,19 +430,19 @@ ccslow: cmp %g1, 0 add %g2, %g5, %g5 2: be,a 3f andcc %g1, 1, %g0 - EX(lduh [%o0], %o4, and %g1, 3) + EX(lduh [%o0], %o4) andcc %g1, 1, %g0 srl %o4, 8, %g2 add %o0, 2, %o0 - EX2(stb %g2, [%o1]) + EX(stb %g2, [%o1]) add %g5, %o4, %g5 - EX2(stb %o4, [%o1 + 1]) + EX(stb %o4, [%o1 + 1]) add %o1, 2, %o1 3: be,a 1f sll %g5, 16, %o4 - EX(ldub [%o0], %g2, add %g0, 1) + EX(ldub [%o0], %g2) sll %g2, 8, %o4 - EX2(stb %g2, [%o1]) + EX(stb %g2, [%o1]) add %g5, %o4, %g5 sll %g5, 16, %o4 1: addcc %o4, %g5, %g5 @@ -481,113 +458,10 @@ ccslow: cmp %g1, 0 4: addcc %g7, %g5, %g7 retl addx %g0, %g7, %o0 -__csum_partial_copy_end: /* We do these strange calculations for the csum_*_from_user case only, ie. * we only bother with faults on loads... */ -/* o2 = ((g2%20)&3)*8 - * o3 = g1 - (g2/20)*32 - o2 */ -20: - cmp %g2, 20 - blu,a 1f - and %g2, 3, %o2 - sub %g1, 32, %g1 - b 20b - sub %g2, 20, %g2 -1: - sll %o2, 3, %o2 - b 31f - sub %g1, %o2, %o3 - -/* o2 = (!(g2 & 15) ? 0 : (((g2 & 15) + 1) & ~1)*8) - * o3 = g1 - (g2/16)*32 - o2 */ -21: - andcc %g2, 15, %o3 - srl %g2, 4, %g2 - be,a 1f - clr %o2 - add %o3, 1, %o3 - and %o3, 14, %o3 - sll %o3, 3, %o2 -1: - sll %g2, 5, %g2 - sub %g1, %g2, %o3 - b 31f - sub %o3, %o2, %o3 - -/* o0 += (g2/10)*16 - 0x70 - * 01 += (g2/10)*16 - 0x70 - * o2 = (g2 % 10) ? 8 : 0 - * o3 += 0x70 - (g2/10)*16 - o2 */ -22: - cmp %g2, 10 - blu,a 1f - sub %o0, 0x70, %o0 - add %o0, 16, %o0 - add %o1, 16, %o1 - sub %o3, 16, %o3 - b 22b - sub %g2, 10, %g2 -1: - sub %o1, 0x70, %o1 - add %o3, 0x70, %o3 - clr %o2 - tst %g2 - bne,a 1f - mov 8, %o2 -1: - b 31f - sub %o3, %o2, %o3 -96: - and %g1, 3, %g1 - sll %g4, 2, %g4 - add %g1, %g4, %o3 -30: -/* %o1 is dst - * %o3 is # bytes to zero out - * %o4 is faulting address - * %o5 is %pc where fault occurred */ - clr %o2 -31: -/* %o0 is src - * %o1 is dst - * %o2 is # of bytes to copy from src to dst - * %o3 is # bytes to zero out - * %o4 is faulting address - * %o5 is %pc where fault occurred */ - save %sp, -104, %sp - mov %i5, %o0 - mov %i7, %o1 - mov %i4, %o2 - call lookup_fault - mov %g7, %i4 - cmp %o0, 2 - bne 1f - add %g0, -EFAULT, %i5 - tst %i2 - be 2f - mov %i0, %o1 - mov %i1, %o0 -5: - call memcpy - mov %i2, %o2 - tst %o0 - bne,a 2f - add %i3, %i2, %i3 - add %i1, %i2, %i1 -2: - mov %i1, %o0 -6: - call __bzero - mov %i3, %o1 -1: - ld [%sp + 168], %o2 ! struct_ptr of parent - st %i5, [%o2] +cc_fault: ret - restore - - .section __ex_table,#alloc - .align 4 - .word 5b,2 - .word 6b,2 + clr %o0 diff --git a/arch/sparc/lib/csum_copy.S b/arch/sparc/lib/csum_copy.S index 26c644ba3ecb..0c0268e77155 100644 --- a/arch/sparc/lib/csum_copy.S +++ b/arch/sparc/lib/csum_copy.S @@ -68,9 +68,10 @@ .globl FUNC_NAME .type FUNC_NAME,#function EXPORT_SYMBOL(FUNC_NAME) -FUNC_NAME: /* %o0=src, %o1=dst, %o2=len, %o3=sum */ +FUNC_NAME: /* %o0=src, %o1=dst, %o2=len */ LOAD(prefetch, %o0 + 0x000, #n_reads) xor %o0, %o1, %g1 + mov 1, %o3 clr %o4 andcc %g1, 0x3, %g0 bne,pn %icc, 95f diff --git a/arch/sparc/lib/csum_copy_from_user.S b/arch/sparc/lib/csum_copy_from_user.S index d20b9594f0c7..b0ba8d4dd439 100644 --- a/arch/sparc/lib/csum_copy_from_user.S +++ b/arch/sparc/lib/csum_copy_from_user.S @@ -9,14 +9,14 @@ .section .fixup, "ax"; \ .align 4; \ 99: retl; \ - mov -1, %o0; \ + mov 0, %o0; \ .section __ex_table,"a";\ .align 4; \ .word 98b, 99b; \ .text; \ .align 4; -#define FUNC_NAME __csum_partial_copy_from_user +#define FUNC_NAME csum_and_copy_from_user #define LOAD(type,addr,dest) type##a [addr] %asi, dest #include "csum_copy.S" diff --git a/arch/sparc/lib/csum_copy_to_user.S b/arch/sparc/lib/csum_copy_to_user.S index d71c0c81e8ab..91ba36dbf7d2 100644 --- a/arch/sparc/lib/csum_copy_to_user.S +++ b/arch/sparc/lib/csum_copy_to_user.S @@ -9,14 +9,14 @@ .section .fixup,"ax"; \ .align 4; \ 99: retl; \ - mov -1, %o0; \ + mov 0, %o0; \ .section __ex_table,"a";\ .align 4; \ .word 98b, 99b; \ .text; \ .align 4; -#define FUNC_NAME __csum_partial_copy_to_user +#define FUNC_NAME csum_and_copy_to_user #define STORE(type,src,addr) type##a src, [addr] %asi #include "csum_copy.S" diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c index 8071bfd72349..40ce087dfecf 100644 --- a/arch/sparc/mm/fault_32.c +++ b/arch/sparc/mm/fault_32.c @@ -288,8 +288,6 @@ no_context: if (fixup > 10) { extern const unsigned int __memset_start[]; extern const unsigned int __memset_end[]; - extern const unsigned int __csum_partial_copy_start[]; - extern const unsigned int __csum_partial_copy_end[]; #ifdef DEBUG_EXCEPTIONS printk("Exception: PC<%08lx> faddr<%08lx>\n", @@ -298,9 +296,7 @@ no_context: regs->pc, fixup, g2); #endif if ((regs->pc >= (unsigned long)__memset_start && - regs->pc < (unsigned long)__memset_end) || - (regs->pc >= (unsigned long)__csum_partial_copy_start && - regs->pc < (unsigned long)__csum_partial_copy_end)) { + regs->pc < (unsigned long)__memset_end)) { regs->u_regs[UREG_I4] = address; regs->u_regs[UREG_I5] = regs->pc; } diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index fad6d3129904..96edf64d4fb3 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -1192,18 +1192,14 @@ int of_node_to_nid(struct device_node *dp) static void __init add_node_ranges(void) { - struct memblock_region *reg; + phys_addr_t start, end; unsigned long prev_max; + u64 i; memblock_resized: prev_max = memblock.memory.max; - for_each_memblock(memory, reg) { - unsigned long size = reg->size; - unsigned long start, end; - - start = reg->base; - end = start + size; + for_each_mem_range(i, &start, &end) { while (start < end) { unsigned long this_end; int nid; @@ -1211,7 +1207,7 @@ memblock_resized: this_end = memblock_nid_range(start, end, &nid); numadbg("Setting memblock NUMA node nid[%d] " - "start[%lx] end[%lx]\n", + "start[%llx] end[%lx]\n", nid, start, this_end); memblock_set_node(start, this_end - start, diff --git a/arch/sparc/mm/io-unit.c b/arch/sparc/mm/io-unit.c index 430a47a1b6ae..bf3e6d2fe5d9 100644 --- a/arch/sparc/mm/io-unit.c +++ b/arch/sparc/mm/io-unit.c @@ -11,7 +11,7 @@ #include <linux/spinlock.h> #include <linux/mm.h> #include <linux/bitops.h> -#include <linux/dma-mapping.h> +#include <linux/dma-map-ops.h> #include <linux/of.h> #include <linux/of_device.h> diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c index 3a388b1c5d4b..0c0342e5b10d 100644 --- a/arch/sparc/mm/iommu.c +++ b/arch/sparc/mm/iommu.c @@ -12,7 +12,7 @@ #include <linux/init.h> #include <linux/mm.h> #include <linux/slab.h> -#include <linux/dma-mapping.h> +#include <linux/dma-map-ops.h> #include <linux/of.h> #include <linux/of_device.h> diff --git a/arch/um/Kconfig b/arch/um/Kconfig index eb51fec75948..16187211d059 100644 --- a/arch/um/Kconfig +++ b/arch/um/Kconfig @@ -62,12 +62,12 @@ config NR_CPUS source "arch/$(HEADER_ARCH)/um/Kconfig" -config FORBID_STATIC_LINK - bool +config MAY_HAVE_RUNTIME_DEPS + bool config STATIC_LINK bool "Force a static link" - depends on !FORBID_STATIC_LINK + depends on CC_CAN_LINK_STATIC_NO_RUNTIME_DEPS || !MAY_HAVE_RUNTIME_DEPS help This option gives you the ability to force a static link of UML. Normally, UML is linked as a shared binary. This is inconvenient for @@ -173,22 +173,6 @@ config PGTABLE_LEVELS default 3 if 3_LEVEL_PGTABLES default 2 -config SECCOMP - def_bool y - prompt "Enable seccomp to safely compute untrusted bytecode" - help - This kernel feature is useful for number crunching applications - that may need to compute untrusted bytecode during their - execution. By using pipes or other transports made available to - the process as file descriptors supporting the read/write - syscalls, it's possible to isolate those applications in - their own address space using seccomp. Once seccomp is - enabled via prctl(PR_SET_SECCOMP), it cannot be disabled - and the task is only allowed to execute a few safe syscalls - defined by each seccomp mode. - - If unsure, say Y. - config UML_TIME_TRAVEL_SUPPORT bool prompt "Support time-travel mode (e.g. for test execution)" diff --git a/arch/um/drivers/Kconfig b/arch/um/drivers/Kconfig index 9160ead56e33..2e7b8e0e7194 100644 --- a/arch/um/drivers/Kconfig +++ b/arch/um/drivers/Kconfig @@ -234,7 +234,7 @@ config UML_NET_DAEMON config UML_NET_VECTOR bool "Vector I/O high performance network devices" depends on UML_NET - select FORBID_STATIC_LINK + select MAY_HAVE_RUNTIME_DEPS help This User-Mode Linux network driver uses multi-message send and receive functions. The host running the UML guest must have @@ -246,7 +246,7 @@ config UML_NET_VECTOR config UML_NET_VDE bool "VDE transport (obsolete)" depends on UML_NET - select FORBID_STATIC_LINK + select MAY_HAVE_RUNTIME_DEPS help This User-Mode Linux network transport allows one or more running UMLs on a single host to communicate with each other and also @@ -294,7 +294,7 @@ config UML_NET_MCAST config UML_NET_PCAP bool "pcap transport (obsolete)" depends on UML_NET - select FORBID_STATIC_LINK + select MAY_HAVE_RUNTIME_DEPS help The pcap transport makes a pcap packet stream on the host look like an ethernet device inside UML. This is useful for making diff --git a/arch/um/drivers/daemon_user.c b/arch/um/drivers/daemon_user.c index 3695821d06a2..785baedc3555 100644 --- a/arch/um/drivers/daemon_user.c +++ b/arch/um/drivers/daemon_user.c @@ -7,6 +7,7 @@ */ #include <stdint.h> +#include <string.h> #include <unistd.h> #include <errno.h> #include <sys/types.h> diff --git a/arch/um/drivers/pcap_user.c b/arch/um/drivers/pcap_user.c index bbd20638788a..52ddda3e3b10 100644 --- a/arch/um/drivers/pcap_user.c +++ b/arch/um/drivers/pcap_user.c @@ -32,7 +32,7 @@ static int pcap_user_init(void *data, void *dev) return 0; } -static int pcap_open(void *data) +static int pcap_user_open(void *data) { struct pcap_data *pri = data; __u32 netmask; @@ -44,14 +44,14 @@ static int pcap_open(void *data) if (pri->filter != NULL) { err = dev_netmask(pri->dev, &netmask); if (err < 0) { - printk(UM_KERN_ERR "pcap_open : dev_netmask failed\n"); + printk(UM_KERN_ERR "pcap_user_open : dev_netmask failed\n"); return -EIO; } pri->compiled = uml_kmalloc(sizeof(struct bpf_program), UM_GFP_KERNEL); if (pri->compiled == NULL) { - printk(UM_KERN_ERR "pcap_open : kmalloc failed\n"); + printk(UM_KERN_ERR "pcap_user_open : kmalloc failed\n"); return -ENOMEM; } @@ -59,14 +59,14 @@ static int pcap_open(void *data) (struct bpf_program *) pri->compiled, pri->filter, pri->optimize, netmask); if (err < 0) { - printk(UM_KERN_ERR "pcap_open : pcap_compile failed - " + printk(UM_KERN_ERR "pcap_user_open : pcap_compile failed - " "'%s'\n", pcap_geterr(pri->pcap)); goto out; } err = pcap_setfilter(pri->pcap, pri->compiled); if (err < 0) { - printk(UM_KERN_ERR "pcap_open : pcap_setfilter " + printk(UM_KERN_ERR "pcap_user_open : pcap_setfilter " "failed - '%s'\n", pcap_geterr(pri->pcap)); goto out; } @@ -127,7 +127,7 @@ int pcap_user_read(int fd, void *buffer, int len, struct pcap_data *pri) const struct net_user_info pcap_user_info = { .init = pcap_user_init, - .open = pcap_open, + .open = pcap_user_open, .close = NULL, .remove = pcap_remove, .add_address = NULL, diff --git a/arch/um/drivers/slip_user.c b/arch/um/drivers/slip_user.c index 8016d32b6809..482a19c5105c 100644 --- a/arch/um/drivers/slip_user.c +++ b/arch/um/drivers/slip_user.c @@ -9,7 +9,7 @@ #include <errno.h> #include <fcntl.h> #include <string.h> -#include <sys/termios.h> +#include <termios.h> #include <sys/wait.h> #include <net_user.h> #include <os.h> diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c index 8735c468230a..555203e3e7b4 100644 --- a/arch/um/drivers/vector_kern.c +++ b/arch/um/drivers/vector_kern.c @@ -1403,7 +1403,7 @@ static int vector_net_load_bpf_flash(struct net_device *dev, kfree(vp->bpf->filter); vp->bpf->filter = NULL; } else { - vp->bpf = kmalloc(sizeof(struct sock_fprog), GFP_KERNEL); + vp->bpf = kmalloc(sizeof(struct sock_fprog), GFP_ATOMIC); if (vp->bpf == NULL) { netdev_err(dev, "failed to allocate memory for firmware\n"); goto flash_fail; @@ -1415,7 +1415,7 @@ static int vector_net_load_bpf_flash(struct net_device *dev, if (request_firmware(&fw, efl->data, &vdevice->pdev.dev)) goto flash_fail; - vp->bpf->filter = kmemdup(fw->data, fw->size, GFP_KERNEL); + vp->bpf->filter = kmemdup(fw->data, fw->size, GFP_ATOMIC); if (!vp->bpf->filter) goto free_buffer; diff --git a/arch/um/drivers/vector_user.c b/arch/um/drivers/vector_user.c index c4a0f26b2824..bae53220ce26 100644 --- a/arch/um/drivers/vector_user.c +++ b/arch/um/drivers/vector_user.c @@ -18,9 +18,7 @@ #include <fcntl.h> #include <sys/socket.h> #include <sys/un.h> -#include <net/ethernet.h> #include <netinet/ip.h> -#include <netinet/ether.h> #include <linux/if_ether.h> #include <linux/if_packet.h> #include <sys/wait.h> @@ -39,6 +37,7 @@ #define ID_MAX 2 #define TOKEN_IFNAME "ifname" +#define TOKEN_SCRIPT "ifup" #define TRANS_RAW "raw" #define TRANS_RAW_LEN strlen(TRANS_RAW) @@ -55,6 +54,9 @@ #define MAX_UN_LEN 107 +static const char padchar[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; +static const char *template = "tapXXXXXX"; + /* This is very ugly and brute force lookup, but it is done * only once at initialization so not worth doing hashes or * anything more intelligent @@ -191,16 +193,21 @@ raw_fd_cleanup: return err; } + static struct vector_fds *user_init_tap_fds(struct arglist *ifspec) { - int fd = -1; + int fd = -1, i; char *iface; struct vector_fds *result = NULL; + bool dynamic = false; + char dynamic_ifname[IFNAMSIZ]; + char *argv[] = {NULL, NULL, NULL, NULL}; iface = uml_vector_fetch_arg(ifspec, TOKEN_IFNAME); if (iface == NULL) { - printk(UM_KERN_ERR "uml_tap: failed to parse interface spec\n"); - goto tap_cleanup; + dynamic = true; + iface = dynamic_ifname; + srand(getpid()); } result = uml_kmalloc(sizeof(struct vector_fds), UM_GFP_KERNEL); @@ -214,14 +221,30 @@ static struct vector_fds *user_init_tap_fds(struct arglist *ifspec) result->remote_addr_size = 0; /* TAP */ + do { + if (dynamic) { + strcpy(iface, template); + for (i = 0; i < strlen(iface); i++) { + if (iface[i] == 'X') { + iface[i] = padchar[rand() % strlen(padchar)]; + } + } + } + fd = create_tap_fd(iface); + if ((fd < 0) && (!dynamic)) { + printk(UM_KERN_ERR "uml_tap: failed to create tun interface\n"); + goto tap_cleanup; + } + result->tx_fd = fd; + result->rx_fd = fd; + } while (fd < 0); - fd = create_tap_fd(iface); - if (fd < 0) { - printk(UM_KERN_ERR "uml_tap: failed to create tun interface\n"); - goto tap_cleanup; + argv[0] = uml_vector_fetch_arg(ifspec, TOKEN_SCRIPT); + if (argv[0]) { + argv[1] = iface; + run_helper(NULL, NULL, argv); } - result->tx_fd = fd; - result->rx_fd = fd; + return result; tap_cleanup: printk(UM_KERN_ERR "user_init_tap: init failed, error %d", fd); @@ -233,6 +256,7 @@ static struct vector_fds *user_init_hybrid_fds(struct arglist *ifspec) { char *iface; struct vector_fds *result = NULL; + char *argv[] = {NULL, NULL, NULL, NULL}; iface = uml_vector_fetch_arg(ifspec, TOKEN_IFNAME); if (iface == NULL) { @@ -266,6 +290,12 @@ static struct vector_fds *user_init_hybrid_fds(struct arglist *ifspec) "uml_tap: failed to create paired raw socket: %i\n", result->rx_fd); goto hybrid_cleanup; } + + argv[0] = uml_vector_fetch_arg(ifspec, TOKEN_SCRIPT); + if (argv[0]) { + argv[1] = iface; + run_helper(NULL, NULL, argv); + } return result; hybrid_cleanup: printk(UM_KERN_ERR "user_init_hybrid: init failed"); @@ -332,7 +362,7 @@ static struct vector_fds *user_init_unix_fds(struct arglist *ifspec, int id) } switch (id) { case ID_BESS: - if (connect(fd, remote_addr, sizeof(struct sockaddr_un)) < 0) { + if (connect(fd, (const struct sockaddr *) remote_addr, sizeof(struct sockaddr_un)) < 0) { printk(UM_KERN_ERR "bess open:cannot connect to %s %i", remote_addr->sun_path, -errno); goto unix_cleanup; } @@ -399,8 +429,7 @@ static struct vector_fds *user_init_fd_fds(struct arglist *ifspec) fd_cleanup: if (fd >= 0) os_close_file(fd); - if (result != NULL) - kfree(result); + kfree(result); return NULL; } @@ -410,6 +439,7 @@ static struct vector_fds *user_init_raw_fds(struct arglist *ifspec) int err = -ENOMEM; char *iface; struct vector_fds *result = NULL; + char *argv[] = {NULL, NULL, NULL, NULL}; iface = uml_vector_fetch_arg(ifspec, TOKEN_IFNAME); if (iface == NULL) @@ -432,6 +462,11 @@ static struct vector_fds *user_init_raw_fds(struct arglist *ifspec) result->remote_addr = NULL; result->remote_addr_size = 0; } + argv[0] = uml_vector_fetch_arg(ifspec, TOKEN_SCRIPT); + if (argv[0]) { + argv[1] = iface; + run_helper(NULL, NULL, argv); + } return result; raw_cleanup: printk(UM_KERN_ERR "user_init_raw: init failed, error %d", err); @@ -789,10 +824,12 @@ void *uml_vector_user_bpf(char *filename) return false; } bpf_prog = uml_kmalloc(sizeof(struct sock_fprog), UM_GFP_KERNEL); - if (bpf_prog != NULL) { - bpf_prog->len = statbuf.st_size / sizeof(struct sock_filter); - bpf_prog->filter = NULL; + if (bpf_prog == NULL) { + printk(KERN_ERR "Failed to allocate bpf prog buffer"); + return NULL; } + bpf_prog->len = statbuf.st_size / sizeof(struct sock_filter); + bpf_prog->filter = NULL; ffd = os_open_file(filename, of_read(OPENFLAGS()), 0); if (ffd < 0) { printk(KERN_ERR "Error %d opening bpf file", -errno); diff --git a/arch/um/kernel/dyn.lds.S b/arch/um/kernel/dyn.lds.S index f5001481010c..dacbfabf66d8 100644 --- a/arch/um/kernel/dyn.lds.S +++ b/arch/um/kernel/dyn.lds.S @@ -164,8 +164,8 @@ SECTIONS PROVIDE (end = .); STABS_DEBUG - DWARF_DEBUG + ELF_DETAILS DISCARDS } diff --git a/arch/um/kernel/sigio.c b/arch/um/kernel/sigio.c index 10c99e058fca..d1cffc2a7f21 100644 --- a/arch/um/kernel/sigio.c +++ b/arch/um/kernel/sigio.c @@ -35,14 +35,14 @@ int write_sigio_irq(int fd) } /* These are called from os-Linux/sigio.c to protect its pollfds arrays. */ -static DEFINE_SPINLOCK(sigio_spinlock); +static DEFINE_MUTEX(sigio_mutex); void sigio_lock(void) { - spin_lock(&sigio_spinlock); + mutex_lock(&sigio_mutex); } void sigio_unlock(void) { - spin_unlock(&sigio_spinlock); + mutex_unlock(&sigio_mutex); } diff --git a/arch/um/kernel/sysrq.c b/arch/um/kernel/sysrq.c index acbc879d2773..7452f70d50d0 100644 --- a/arch/um/kernel/sysrq.c +++ b/arch/um/kernel/sysrq.c @@ -47,12 +47,10 @@ void show_stack(struct task_struct *task, unsigned long *stack, if (kstack_end(stack)) break; if (i && ((i % STACKSLOTS_PER_LINE) == 0)) - printk("%s\n", loglvl); + pr_cont("\n"); pr_cont(" %08lx", *stack++); } - printk("%s\n", loglvl); printk("%sCall Trace:\n", loglvl); dump_trace(current, &stackops, (void *)loglvl); - printk("%s\n", loglvl); } diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c index 25eaa6a0c658..3d109ff3309b 100644 --- a/arch/um/kernel/time.c +++ b/arch/um/kernel/time.c @@ -70,13 +70,17 @@ static void time_travel_handle_message(struct um_timetravel_msg *msg, * read of the message and write of the ACK. */ if (mode != TTMH_READ) { + bool disabled = irqs_disabled(); + + BUG_ON(mode == TTMH_IDLE && !disabled); + + if (disabled) + local_irq_enable(); while (os_poll(1, &time_travel_ext_fd) != 0) { - if (mode == TTMH_IDLE) { - BUG_ON(!irqs_disabled()); - local_irq_enable(); - local_irq_disable(); - } + /* nothing */ } + if (disabled) + local_irq_disable(); } ret = os_read_file(time_travel_ext_fd, msg, sizeof(*msg)); @@ -102,6 +106,7 @@ static void time_travel_handle_message(struct um_timetravel_msg *msg, break; } + resp.seq = msg->seq; os_write_file(time_travel_ext_fd, &resp, sizeof(resp)); } diff --git a/arch/um/kernel/uml.lds.S b/arch/um/kernel/uml.lds.S index 3b6dab3d4501..45d957d7004c 100644 --- a/arch/um/kernel/uml.lds.S +++ b/arch/um/kernel/uml.lds.S @@ -108,8 +108,8 @@ SECTIONS PROVIDE (end = .); STABS_DEBUG - DWARF_DEBUG + ELF_DETAILS DISCARDS } diff --git a/arch/um/os-Linux/umid.c b/arch/um/os-Linux/umid.c index 9e16078a4bf8..1d7558dac75f 100644 --- a/arch/um/os-Linux/umid.c +++ b/arch/um/os-Linux/umid.c @@ -97,7 +97,7 @@ static int remove_files_and_dir(char *dir) while ((ent = readdir(directory)) != NULL) { if (!strcmp(ent->d_name, ".") || !strcmp(ent->d_name, "..")) continue; - len = strlen(dir) + sizeof("/") + strlen(ent->d_name) + 1; + len = strlen(dir) + strlen("/") + strlen(ent->d_name) + 1; if (len > sizeof(file)) { ret = -E2BIG; goto out; @@ -135,7 +135,7 @@ out: */ static inline int is_umdir_used(char *dir) { - char pid[sizeof("nnnnn\0")], *end, *file; + char pid[sizeof("nnnnnnnnn")], *end, *file; int dead, fd, p, n, err; size_t filelen; @@ -217,10 +217,10 @@ static int umdir_take_if_dead(char *dir) static void __init create_pid_file(void) { - char pid[sizeof("nnnnn\0")], *file; + char pid[sizeof("nnnnnnnnn")], *file; int fd, n; - n = strlen(uml_dir) + UMID_LEN + sizeof("/pid\0"); + n = strlen(uml_dir) + UMID_LEN + sizeof("/pid"); file = malloc(n); if (!file) return; diff --git a/arch/um/os-Linux/util.c b/arch/um/os-Linux/util.c index ecf2f390fad2..07327425d06e 100644 --- a/arch/um/os-Linux/util.c +++ b/arch/um/os-Linux/util.c @@ -10,7 +10,7 @@ #include <signal.h> #include <string.h> #include <termios.h> -#include <wait.h> +#include <sys/wait.h> #include <sys/mman.h> #include <sys/utsname.h> #include <init.h> diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 7101ac64bb20..f6946b81f74a 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -75,7 +75,7 @@ config X86 select ARCH_HAS_PTE_DEVMAP if X86_64 select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64 - select ARCH_HAS_UACCESS_MCSAFE if X86_64 && X86_MCE + select ARCH_HAS_COPY_MC if X86_64 select ARCH_HAS_SET_MEMORY select ARCH_HAS_SET_DIRECT_MAP select ARCH_HAS_STRICT_KERNEL_RWX @@ -215,6 +215,8 @@ config X86 select HAVE_FUNCTION_ARG_ACCESS_API select HAVE_STACKPROTECTOR if CC_HAS_SANE_STACKPROTECTOR select HAVE_STACK_VALIDATION if X86_64 + select HAVE_STATIC_CALL + select HAVE_STATIC_CALL_INLINE if HAVE_STACK_VALIDATION select HAVE_RSEQ select HAVE_SYSCALL_TRACEPOINTS select HAVE_UNSTABLE_SCHED_CLOCK @@ -230,6 +232,7 @@ config X86 select RTC_MC146818_LIB select SPARSE_IRQ select SRCU + select STACK_VALIDATION if HAVE_STACK_VALIDATION && (HAVE_STATIC_CALL_INLINE || RETPOLINE) select SYSCTL_EXCEPTION_TRACE select THREAD_INFO_IN_TASK select USER_STACKTRACE_SUPPORT @@ -451,7 +454,6 @@ config GOLDFISH config RETPOLINE bool "Avoid speculative indirect branches in kernel" default y - select STACK_VALIDATION if HAVE_STACK_VALIDATION help Compile kernel with the retpoline compiler options to guard against kernel-to-user data leaks by avoiding speculative indirect @@ -1521,6 +1523,7 @@ config AMD_MEM_ENCRYPT select DYNAMIC_PHYSICAL_MASK select ARCH_USE_MEMREMAP_PROT select ARCH_HAS_FORCE_DMA_UNENCRYPTED + select INSTRUCTION_DECODER help Say yes to enable support for the encryption of system memory. This requires an AMD processor that supports Secure Memory @@ -1968,22 +1971,6 @@ config EFI_MIXED If unsure, say N. -config SECCOMP - def_bool y - prompt "Enable seccomp to safely compute untrusted bytecode" - help - This kernel feature is useful for number crunching applications - that may need to compute untrusted bytecode during their - execution. By using pipes or other transports made available to - the process as file descriptors supporting the read/write - syscalls, it's possible to isolate those applications in - their own address space using seccomp. Once seccomp is - enabled via prctl(PR_SET_SECCOMP), it cannot be disabled - and the task is only allowed to execute a few safe syscalls - defined by each seccomp mode. - - If unsure, say Y. Only embedded should say N here. - source "kernel/Kconfig.hz" config KEXEC diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index ee1d3c5834c6..27b5e2bc6a01 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug @@ -62,7 +62,7 @@ config EARLY_PRINTK_USB_XDBC You should normally say N here, unless you want to debug early crashes or need a very simple printk logging facility. -config MCSAFE_TEST +config COPY_MC_TEST def_bool n config EFI_PGT_DUMP diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 4346ffb2e39f..154259f18b8b 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -209,6 +209,10 @@ ifdef CONFIG_X86_64 LDFLAGS_vmlinux += -z max-page-size=0x200000 endif +# We never want expected sections to be placed heuristically by the +# linker. All sections should be explicitly named in the linker script. +LDFLAGS_vmlinux += $(call ld-option, --orphan-handling=warn) + archscripts: scripts_basic $(Q)$(MAKE) $(build)=arch/x86/tools relocs diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index ff7894f39e0e..ee249088cbfe 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -29,10 +29,10 @@ targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \ vmlinux.bin.xz vmlinux.bin.lzo vmlinux.bin.lz4 vmlinux.bin.zst KBUILD_CFLAGS := -m$(BITS) -O2 -KBUILD_CFLAGS += -fno-strict-aliasing $(call cc-option, -fPIE, -fPIC) +KBUILD_CFLAGS += -fno-strict-aliasing -fPIE KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING cflags-$(CONFIG_X86_32) := -march=i386 -cflags-$(CONFIG_X86_64) := -mcmodel=small +cflags-$(CONFIG_X86_64) := -mcmodel=small -mno-red-zone KBUILD_CFLAGS += $(cflags-y) KBUILD_CFLAGS += -mno-mmx -mno-sse KBUILD_CFLAGS += -ffreestanding @@ -45,24 +45,24 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables KBUILD_CFLAGS += -D__DISABLE_EXPORTS # Disable relocation relaxation in case the link is not PIE. KBUILD_CFLAGS += $(call as-option,-Wa$(comma)-mrelax-relocations=no) +KBUILD_CFLAGS += -include $(srctree)/include/linux/hidden.h + +# sev-es.c indirectly inludes inat-table.h which is generated during +# compilation and stored in $(objtree). Add the directory to the includes so +# that the compiler finds it even with out-of-tree builds (make O=/some/path). +CFLAGS_sev-es.o += -I$(objtree)/arch/x86/lib/ KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ GCOV_PROFILE := n UBSAN_SANITIZE :=n KBUILD_LDFLAGS := -m elf_$(UTS_MACHINE) +KBUILD_LDFLAGS += $(call ld-option,--no-ld-generated-unwind-info) # Compressed kernel should be built as PIE since it may be loaded at any # address by the bootloader. -ifeq ($(CONFIG_X86_32),y) -KBUILD_LDFLAGS += $(call ld-option, -pie) $(call ld-option, --no-dynamic-linker) -else -# To build 64-bit compressed kernel as PIE, we disable relocation -# overflow check to avoid relocation overflow error with a new linker -# command-line option, -z noreloc-overflow. -KBUILD_LDFLAGS += $(shell $(LD) --help 2>&1 | grep -q "\-z noreloc-overflow" \ - && echo "-z noreloc-overflow -pie --no-dynamic-linker") -endif -LDFLAGS_vmlinux := -T +LDFLAGS_vmlinux := -pie $(call ld-option, --no-dynamic-linker) +LDFLAGS_vmlinux += $(call ld-option, --orphan-handling=warn) +LDFLAGS_vmlinux += -T hostprogs := mkpiggy HOST_EXTRACFLAGS += -I$(srctree)/tools/include @@ -86,9 +86,11 @@ vmlinux-objs-y := $(obj)/vmlinux.lds $(obj)/kernel_info.o $(obj)/head_$(BITS).o vmlinux-objs-$(CONFIG_EARLY_PRINTK) += $(obj)/early_serial_console.o vmlinux-objs-$(CONFIG_RANDOMIZE_BASE) += $(obj)/kaslr.o ifdef CONFIG_X86_64 - vmlinux-objs-$(CONFIG_RANDOMIZE_BASE) += $(obj)/kaslr_64.o + vmlinux-objs-y += $(obj)/ident_map_64.o + vmlinux-objs-y += $(obj)/idt_64.o $(obj)/idt_handlers_64.o vmlinux-objs-y += $(obj)/mem_encrypt.o vmlinux-objs-y += $(obj)/pgtable_64.o + vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/sev-es.o endif vmlinux-objs-$(CONFIG_ACPI) += $(obj)/acpi.o @@ -96,30 +98,8 @@ vmlinux-objs-$(CONFIG_ACPI) += $(obj)/acpi.o vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_thunk_$(BITS).o efi-obj-$(CONFIG_EFI_STUB) = $(objtree)/drivers/firmware/efi/libstub/lib.a -# The compressed kernel is built with -fPIC/-fPIE so that a boot loader -# can place it anywhere in memory and it will still run. However, since -# it is executed as-is without any ELF relocation processing performed -# (and has already had all relocation sections stripped from the binary), -# none of the code can use data relocations (e.g. static assignments of -# pointer values), since they will be meaningless at runtime. This check -# will refuse to link the vmlinux if any of these relocations are found. -quiet_cmd_check_data_rel = DATAREL $@ -define cmd_check_data_rel - for obj in $(filter %.o,$^); do \ - $(READELF) -S $$obj | grep -qF .rel.local && { \ - echo "error: $$obj has data relocations!" >&2; \ - exit 1; \ - } || true; \ - done -endef - -# We need to run two commands under "if_changed", so merge them into a -# single invocation. -quiet_cmd_check-and-link-vmlinux = LD $@ - cmd_check-and-link-vmlinux = $(cmd_check_data_rel); $(cmd_ld) - $(obj)/vmlinux: $(vmlinux-objs-y) $(efi-obj-y) FORCE - $(call if_changed,check-and-link-vmlinux) + $(call if_changed,ld) OBJCOPYFLAGS_vmlinux.bin := -R .comment -S $(obj)/vmlinux.bin: vmlinux FORCE diff --git a/arch/x86/boot/compressed/cpuflags.c b/arch/x86/boot/compressed/cpuflags.c index 6448a8196d32..0cc1323896d1 100644 --- a/arch/x86/boot/compressed/cpuflags.c +++ b/arch/x86/boot/compressed/cpuflags.c @@ -1,6 +1,4 @@ // SPDX-License-Identifier: GPL-2.0 -#ifdef CONFIG_RANDOMIZE_BASE - #include "../cpuflags.c" bool has_cpuflag(int flag) @@ -9,5 +7,3 @@ bool has_cpuflag(int flag) return test_bit(flag, cpu.flags); } - -#endif diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S index 03557f2174bf..659fad53ca82 100644 --- a/arch/x86/boot/compressed/head_32.S +++ b/arch/x86/boot/compressed/head_32.S @@ -33,32 +33,13 @@ #include <asm/bootparam.h> /* - * The 32-bit x86 assembler in binutils 2.26 will generate R_386_GOT32X - * relocation to get the symbol address in PIC. When the compressed x86 - * kernel isn't built as PIC, the linker optimizes R_386_GOT32X - * relocations to their fixed symbol addresses. However, when the - * compressed x86 kernel is loaded at a different address, it leads - * to the following load failure: - * - * Failed to allocate space for phdrs - * - * during the decompression stage. - * - * If the compressed x86 kernel is relocatable at run-time, it should be - * compiled with -fPIE, instead of -fPIC, if possible and should be built as - * Position Independent Executable (PIE) so that linker won't optimize - * R_386_GOT32X relocation to its fixed symbol address. Older - * linkers generate R_386_32 relocations against locally defined symbols, - * _bss, _ebss, _got, _egot and _end, in PIE. It isn't wrong, just less - * optimal than R_386_RELATIVE. But the x86 kernel fails to properly handle - * R_386_32 relocations when relocating the kernel. To generate - * R_386_RELATIVE relocations, we mark _bss, _ebss, _got, _egot and _end as - * hidden: + * These symbols needed to be marked as .hidden to prevent the BFD linker from + * generating R_386_32 (rather than R_386_RELATIVE) relocations for them when + * the 32-bit compressed kernel is linked as PIE. This is no longer necessary, + * but it doesn't hurt to keep them .hidden. */ .hidden _bss .hidden _ebss - .hidden _got - .hidden _egot .hidden _end __HEAD @@ -77,10 +58,10 @@ SYM_FUNC_START(startup_32) leal (BP_scratch+4)(%esi), %esp call 1f 1: popl %edx - subl $1b, %edx + addl $_GLOBAL_OFFSET_TABLE_+(.-1b), %edx /* Load new GDT */ - leal gdt(%edx), %eax + leal gdt@GOTOFF(%edx), %eax movl %eax, 2(%eax) lgdt (%eax) @@ -93,14 +74,16 @@ SYM_FUNC_START(startup_32) movl %eax, %ss /* - * %edx contains the address we are loaded at by the boot loader and %ebx - * contains the address where we should move the kernel image temporarily - * for safe in-place decompression. %ebp contains the address that the kernel - * will be decompressed to. + * %edx contains the address we are loaded at by the boot loader (plus the + * offset to the GOT). The below code calculates %ebx to be the address where + * we should move the kernel image temporarily for safe in-place decompression + * (again, plus the offset to the GOT). + * + * %ebp is calculated to be the address that the kernel will be decompressed to. */ #ifdef CONFIG_RELOCATABLE - movl %edx, %ebx + leal startup_32@GOTOFF(%edx), %ebx #ifdef CONFIG_EFI_STUB /* @@ -111,7 +94,7 @@ SYM_FUNC_START(startup_32) * image_offset = startup_32 - image_base * Otherwise image_offset will be zero and has no effect on the calculations. */ - subl image_offset(%edx), %ebx + subl image_offset@GOTOFF(%edx), %ebx #endif movl BP_kernel_alignment(%esi), %eax @@ -128,10 +111,10 @@ SYM_FUNC_START(startup_32) movl %ebx, %ebp // Save the output address for later /* Target address to relocate to for decompression */ addl BP_init_size(%esi), %ebx - subl $_end, %ebx + subl $_end@GOTOFF, %ebx /* Set up the stack */ - leal boot_stack_end(%ebx), %esp + leal boot_stack_end@GOTOFF(%ebx), %esp /* Zero EFLAGS */ pushl $0 @@ -142,8 +125,8 @@ SYM_FUNC_START(startup_32) * where decompression in place becomes safe. */ pushl %esi - leal (_bss-4)(%edx), %esi - leal (_bss-4)(%ebx), %edi + leal (_bss@GOTOFF-4)(%edx), %esi + leal (_bss@GOTOFF-4)(%ebx), %edi movl $(_bss - startup_32), %ecx shrl $2, %ecx std @@ -156,14 +139,14 @@ SYM_FUNC_START(startup_32) * during extract_kernel below. To avoid any issues, repoint the GDTR * to the new copy of the GDT. */ - leal gdt(%ebx), %eax + leal gdt@GOTOFF(%ebx), %eax movl %eax, 2(%eax) lgdt (%eax) /* * Jump to the relocated address. */ - leal .Lrelocated(%ebx), %eax + leal .Lrelocated@GOTOFF(%ebx), %eax jmp *%eax SYM_FUNC_END(startup_32) @@ -173,7 +156,7 @@ SYM_FUNC_START_ALIAS(efi_stub_entry) add $0x4, %esp movl 8(%esp), %esi /* save boot_params pointer */ call efi_main - leal startup_32(%eax), %eax + /* efi_main returns the possibly relocated address of startup_32 */ jmp *%eax SYM_FUNC_END(efi32_stub_entry) SYM_FUNC_END_ALIAS(efi_stub_entry) @@ -186,40 +169,26 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated) * Clear BSS (stack is currently empty) */ xorl %eax, %eax - leal _bss(%ebx), %edi - leal _ebss(%ebx), %ecx + leal _bss@GOTOFF(%ebx), %edi + leal _ebss@GOTOFF(%ebx), %ecx subl %edi, %ecx shrl $2, %ecx rep stosl /* - * Adjust our own GOT - */ - leal _got(%ebx), %edx - leal _egot(%ebx), %ecx -1: - cmpl %ecx, %edx - jae 2f - addl %ebx, (%edx) - addl $4, %edx - jmp 1b -2: - -/* * Do the extraction, and jump to the new kernel.. */ - /* push arguments for extract_kernel: */ - pushl $z_output_len /* decompressed length, end of relocs */ - - pushl %ebp /* output address */ - - pushl $z_input_len /* input_len */ - leal input_data(%ebx), %eax - pushl %eax /* input_data */ - leal boot_heap(%ebx), %eax - pushl %eax /* heap area */ - pushl %esi /* real mode pointer */ - call extract_kernel /* returns kernel location in %eax */ + /* push arguments for extract_kernel: */ + + pushl output_len@GOTOFF(%ebx) /* decompressed length, end of relocs */ + pushl %ebp /* output address */ + pushl input_len@GOTOFF(%ebx) /* input_len */ + leal input_data@GOTOFF(%ebx), %eax + pushl %eax /* input_data */ + leal boot_heap@GOTOFF(%ebx), %eax + pushl %eax /* heap area */ + pushl %esi /* real mode pointer */ + call extract_kernel /* returns kernel location in %eax */ addl $24, %esp /* diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index 97d37f0a34f5..1c80f1738fd9 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -33,6 +33,7 @@ #include <asm/processor-flags.h> #include <asm/asm-offsets.h> #include <asm/bootparam.h> +#include <asm/desc_defs.h> #include "pgtable.h" /* @@ -40,11 +41,35 @@ */ .hidden _bss .hidden _ebss - .hidden _got - .hidden _egot .hidden _end __HEAD + +/* + * This macro gives the relative virtual address of X, i.e. the offset of X + * from startup_32. This is the same as the link-time virtual address of X, + * since startup_32 is at 0, but defining it this way tells the + * assembler/linker that we do not want the actual run-time address of X. This + * prevents the linker from trying to create unwanted run-time relocation + * entries for the reference when the compressed kernel is linked as PIE. + * + * A reference X(%reg) will result in the link-time VA of X being stored with + * the instruction, and a run-time R_X86_64_RELATIVE relocation entry that + * adds the 64-bit base address where the kernel is loaded. + * + * Replacing it with (X-startup_32)(%reg) results in the offset being stored, + * and no run-time relocation. + * + * The macro should be used as a displacement with a base register containing + * the run-time address of startup_32 [i.e. rva(X)(%reg)], or as an immediate + * [$ rva(X)]. + * + * This macro can only be used from within the .head.text section, since the + * expression requires startup_32 to be in the same section as the code being + * assembled. + */ +#define rva(X) ((X) - startup_32) + .code32 SYM_FUNC_START(startup_32) /* @@ -67,10 +92,10 @@ SYM_FUNC_START(startup_32) leal (BP_scratch+4)(%esi), %esp call 1f 1: popl %ebp - subl $1b, %ebp + subl $ rva(1b), %ebp /* Load new GDT with the 64bit segments using 32bit descriptor */ - leal gdt(%ebp), %eax + leal rva(gdt)(%ebp), %eax movl %eax, 2(%eax) lgdt (%eax) @@ -83,7 +108,7 @@ SYM_FUNC_START(startup_32) movl %eax, %ss /* setup a stack and make sure cpu supports long mode. */ - leal boot_stack_end(%ebp), %esp + leal rva(boot_stack_end)(%ebp), %esp call verify_cpu testl %eax, %eax @@ -110,7 +135,7 @@ SYM_FUNC_START(startup_32) * image_offset = startup_32 - image_base * Otherwise image_offset will be zero and has no effect on the calculations. */ - subl image_offset(%ebp), %ebx + subl rva(image_offset)(%ebp), %ebx #endif movl BP_kernel_alignment(%esi), %eax @@ -126,7 +151,7 @@ SYM_FUNC_START(startup_32) /* Target address to relocate to for decompression */ addl BP_init_size(%esi), %ebx - subl $_end, %ebx + subl $ rva(_end), %ebx /* * Prepare for entering 64 bit mode @@ -154,19 +179,19 @@ SYM_FUNC_START(startup_32) 1: /* Initialize Page tables to 0 */ - leal pgtable(%ebx), %edi + leal rva(pgtable)(%ebx), %edi xorl %eax, %eax movl $(BOOT_INIT_PGT_SIZE/4), %ecx rep stosl /* Build Level 4 */ - leal pgtable + 0(%ebx), %edi + leal rva(pgtable + 0)(%ebx), %edi leal 0x1007 (%edi), %eax movl %eax, 0(%edi) addl %edx, 4(%edi) /* Build Level 3 */ - leal pgtable + 0x1000(%ebx), %edi + leal rva(pgtable + 0x1000)(%ebx), %edi leal 0x1007(%edi), %eax movl $4, %ecx 1: movl %eax, 0x00(%edi) @@ -177,7 +202,7 @@ SYM_FUNC_START(startup_32) jnz 1b /* Build Level 2 */ - leal pgtable + 0x2000(%ebx), %edi + leal rva(pgtable + 0x2000)(%ebx), %edi movl $0x00000183, %eax movl $2048, %ecx 1: movl %eax, 0(%edi) @@ -188,7 +213,7 @@ SYM_FUNC_START(startup_32) jnz 1b /* Enable the boot page tables */ - leal pgtable(%ebx), %eax + leal rva(pgtable)(%ebx), %eax movl %eax, %cr3 /* Enable Long mode in EFER (Extended Feature Enable Register) */ @@ -213,14 +238,14 @@ SYM_FUNC_START(startup_32) * We place all of the values on our mini stack so lret can * used to perform that far jump. */ - leal startup_64(%ebp), %eax + leal rva(startup_64)(%ebp), %eax #ifdef CONFIG_EFI_MIXED - movl efi32_boot_args(%ebp), %edi + movl rva(efi32_boot_args)(%ebp), %edi cmp $0, %edi jz 1f - leal efi64_stub_entry(%ebp), %eax - movl efi32_boot_args+4(%ebp), %esi - movl efi32_boot_args+8(%ebp), %edx // saved bootparams pointer + leal rva(efi64_stub_entry)(%ebp), %eax + movl rva(efi32_boot_args+4)(%ebp), %esi + movl rva(efi32_boot_args+8)(%ebp), %edx // saved bootparams pointer cmpl $0, %edx jnz 1f /* @@ -231,7 +256,7 @@ SYM_FUNC_START(startup_32) * the correct stack alignment for entry. */ subl $40, %esp - leal efi_pe_entry(%ebp), %eax + leal rva(efi_pe_entry)(%ebp), %eax movl %edi, %ecx // MS calling convention movl %esi, %edx 1: @@ -257,18 +282,18 @@ SYM_FUNC_START(efi32_stub_entry) call 1f 1: pop %ebp - subl $1b, %ebp + subl $ rva(1b), %ebp - movl %esi, efi32_boot_args+8(%ebp) + movl %esi, rva(efi32_boot_args+8)(%ebp) SYM_INNER_LABEL(efi32_pe_stub_entry, SYM_L_LOCAL) - movl %ecx, efi32_boot_args(%ebp) - movl %edx, efi32_boot_args+4(%ebp) - movb $0, efi_is64(%ebp) + movl %ecx, rva(efi32_boot_args)(%ebp) + movl %edx, rva(efi32_boot_args+4)(%ebp) + movb $0, rva(efi_is64)(%ebp) /* Save firmware GDTR and code/data selectors */ - sgdtl efi32_boot_gdt(%ebp) - movw %cs, efi32_boot_cs(%ebp) - movw %ds, efi32_boot_ds(%ebp) + sgdtl rva(efi32_boot_gdt)(%ebp) + movw %cs, rva(efi32_boot_cs)(%ebp) + movw %ds, rva(efi32_boot_ds)(%ebp) /* Disable paging */ movl %cr0, %eax @@ -347,30 +372,11 @@ SYM_CODE_START(startup_64) /* Target address to relocate to for decompression */ movl BP_init_size(%rsi), %ebx - subl $_end, %ebx + subl $ rva(_end), %ebx addq %rbp, %rbx /* Set up the stack */ - leaq boot_stack_end(%rbx), %rsp - - /* - * paging_prepare() and cleanup_trampoline() below can have GOT - * references. Adjust the table with address we are running at. - * - * Zero RAX for adjust_got: the GOT was not adjusted before; - * there's no adjustment to undo. - */ - xorq %rax, %rax - - /* - * Calculate the address the binary is loaded at and use it as - * a GOT adjustment. - */ - call 1f -1: popq %rdi - subq $1b, %rdi - - call .Ladjust_got + leaq rva(boot_stack_end)(%rbx), %rsp /* * At this point we are in long mode with 4-level paging enabled, @@ -410,6 +416,10 @@ SYM_CODE_START(startup_64) .Lon_kernel_cs: + pushq %rsi + call load_stage1_idt + popq %rsi + /* * paging_prepare() sets up the trampoline and checks if we need to * enable 5-level paging. @@ -444,7 +454,7 @@ SYM_CODE_START(startup_64) lretq trampoline_return: /* Restore the stack, the 32-bit trampoline uses its own stack */ - leaq boot_stack_end(%rbx), %rsp + leaq rva(boot_stack_end)(%rbx), %rsp /* * cleanup_trampoline() would restore trampoline memory. @@ -456,7 +466,7 @@ trampoline_return: * this function call. */ pushq %rsi - leaq top_pgtable(%rbx), %rdi + leaq rva(top_pgtable)(%rbx), %rdi call cleanup_trampoline popq %rsi @@ -464,30 +474,15 @@ trampoline_return: pushq $0 popfq - /* - * Previously we've adjusted the GOT with address the binary was - * loaded at. Now we need to re-adjust for relocation address. - * - * Calculate the address the binary is loaded at, so that we can - * undo the previous GOT adjustment. - */ - call 1f -1: popq %rax - subq $1b, %rax - - /* The new adjustment is the relocation address */ - movq %rbx, %rdi - call .Ladjust_got - /* * Copy the compressed kernel to the end of our buffer * where decompression in place becomes safe. */ pushq %rsi leaq (_bss-8)(%rip), %rsi - leaq (_bss-8)(%rbx), %rdi - movq $_bss /* - $startup_32 */, %rcx - shrq $3, %rcx + leaq rva(_bss-8)(%rbx), %rdi + movl $(_bss - startup_32), %ecx + shrl $3, %ecx std rep movsq cld @@ -498,15 +493,15 @@ trampoline_return: * during extract_kernel below. To avoid any issues, repoint the GDTR * to the new copy of the GDT. */ - leaq gdt64(%rbx), %rax - leaq gdt(%rbx), %rdx + leaq rva(gdt64)(%rbx), %rax + leaq rva(gdt)(%rbx), %rdx movq %rdx, 2(%rax) lgdt (%rax) /* * Jump to the relocated address. */ - leaq .Lrelocated(%rbx), %rax + leaq rva(.Lrelocated)(%rbx), %rax jmp *%rax SYM_CODE_END(startup_64) @@ -518,7 +513,7 @@ SYM_FUNC_START_ALIAS(efi_stub_entry) movq %rdx, %rbx /* save boot_params pointer */ call efi_main movq %rbx,%rsi - leaq startup_64(%rax), %rax + leaq rva(startup_64)(%rax), %rax jmp *%rax SYM_FUNC_END(efi64_stub_entry) SYM_FUNC_END_ALIAS(efi_stub_entry) @@ -538,15 +533,30 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated) rep stosq /* + * If running as an SEV guest, the encryption mask is required in the + * page-table setup code below. When the guest also has SEV-ES enabled + * set_sev_encryption_mask() will cause #VC exceptions, but the stage2 + * handler can't map its GHCB because the page-table is not set up yet. + * So set up the encryption mask here while still on the stage1 #VC + * handler. Then load stage2 IDT and switch to the kernel's own + * page-table. + */ + pushq %rsi + call set_sev_encryption_mask + call load_stage2_idt + call initialize_identity_maps + popq %rsi + +/* * Do the extraction, and jump to the new kernel.. */ pushq %rsi /* Save the real mode argument */ movq %rsi, %rdi /* real mode address */ leaq boot_heap(%rip), %rsi /* malloc area for uncompression */ leaq input_data(%rip), %rdx /* input_data */ - movl $z_input_len, %ecx /* input_len */ + movl input_len(%rip), %ecx /* input_len */ movq %rbp, %r8 /* output target address */ - movl $z_output_len, %r9d /* decompressed length, end of relocs */ + movl output_len(%rip), %r9d /* decompressed length, end of relocs */ call extract_kernel /* returns kernel location in %rax */ popq %rsi @@ -556,27 +566,6 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated) jmp *%rax SYM_FUNC_END(.Lrelocated) -/* - * Adjust the global offset table - * - * RAX is the previous adjustment of the table to undo (use 0 if it's the - * first time we touch GOT). - * RDI is the new adjustment to apply. - */ -.Ladjust_got: - /* Walk through the GOT adding the address to the entries */ - leaq _got(%rip), %rdx - leaq _egot(%rip), %rcx -1: - cmpq %rcx, %rdx - jae 2f - subq %rax, (%rdx) /* Undo previous adjustment */ - addq %rdi, (%rdx) /* Apply the new adjustment */ - addq $8, %rdx - jmp 1b -2: - ret - .code32 /* * This is the 32-bit trampoline that will be copied over to low memory. @@ -690,10 +679,21 @@ SYM_DATA_START_LOCAL(gdt) .quad 0x0000000000000000 /* TS continued */ SYM_DATA_END_LABEL(gdt, SYM_L_LOCAL, gdt_end) +SYM_DATA_START(boot_idt_desc) + .word boot_idt_end - boot_idt - 1 + .quad 0 +SYM_DATA_END(boot_idt_desc) + .balign 8 +SYM_DATA_START(boot_idt) + .rept BOOT_IDT_ENTRIES + .quad 0 + .quad 0 + .endr +SYM_DATA_END_LABEL(boot_idt, SYM_L_GLOBAL, boot_idt_end) + #ifdef CONFIG_EFI_STUB SYM_DATA(image_offset, .long 0) #endif - #ifdef CONFIG_EFI_MIXED SYM_DATA_LOCAL(efi32_boot_args, .long 0, 0, 0) SYM_DATA(efi_is64, .byte 1) @@ -702,7 +702,7 @@ SYM_DATA(efi_is64, .byte 1) #define BS32_handle_protocol 88 // offsetof(efi_boot_services_32_t, handle_protocol) #define LI32_image_base 32 // offsetof(efi_loaded_image_32_t, image_base) - .text + __HEAD .code32 SYM_FUNC_START(efi32_pe_entry) /* @@ -724,12 +724,12 @@ SYM_FUNC_START(efi32_pe_entry) call 1f 1: pop %ebx - subl $1b, %ebx + subl $ rva(1b), %ebx /* Get the loaded image protocol pointer from the image handle */ leal -4(%ebp), %eax pushl %eax // &loaded_image - leal loaded_image_proto(%ebx), %eax + leal rva(loaded_image_proto)(%ebx), %eax pushl %eax // pass the GUID address pushl 8(%ebp) // pass the image handle @@ -764,7 +764,7 @@ SYM_FUNC_START(efi32_pe_entry) * use it before we get to the 64-bit efi_pe_entry() in C code. */ subl %esi, %ebx - movl %ebx, image_offset(%ebp) // save image_offset + movl %ebx, rva(image_offset)(%ebp) // save image_offset jmp efi32_pe_stub_entry 2: popl %edi // restore callee-save registers diff --git a/arch/x86/boot/compressed/ident_map_64.c b/arch/x86/boot/compressed/ident_map_64.c new file mode 100644 index 000000000000..063a60edcf99 --- /dev/null +++ b/arch/x86/boot/compressed/ident_map_64.c @@ -0,0 +1,349 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This code is used on x86_64 to create page table identity mappings on + * demand by building up a new set of page tables (or appending to the + * existing ones), and then switching over to them when ready. + * + * Copyright (C) 2015-2016 Yinghai Lu + * Copyright (C) 2016 Kees Cook + */ + +/* + * Since we're dealing with identity mappings, physical and virtual + * addresses are the same, so override these defines which are ultimately + * used by the headers in misc.h. + */ +#define __pa(x) ((unsigned long)(x)) +#define __va(x) ((void *)((unsigned long)(x))) + +/* No PAGE_TABLE_ISOLATION support needed either: */ +#undef CONFIG_PAGE_TABLE_ISOLATION + +#include "error.h" +#include "misc.h" + +/* These actually do the work of building the kernel identity maps. */ +#include <linux/pgtable.h> +#include <asm/cmpxchg.h> +#include <asm/trap_pf.h> +#include <asm/trapnr.h> +#include <asm/init.h> +/* Use the static base for this part of the boot process */ +#undef __PAGE_OFFSET +#define __PAGE_OFFSET __PAGE_OFFSET_BASE +#include "../../mm/ident_map.c" + +#ifdef CONFIG_X86_5LEVEL +unsigned int __pgtable_l5_enabled; +unsigned int pgdir_shift = 39; +unsigned int ptrs_per_p4d = 1; +#endif + +/* Used by PAGE_KERN* macros: */ +pteval_t __default_kernel_pte_mask __read_mostly = ~0; + +/* Used to track our page table allocation area. */ +struct alloc_pgt_data { + unsigned char *pgt_buf; + unsigned long pgt_buf_size; + unsigned long pgt_buf_offset; +}; + +/* + * Allocates space for a page table entry, using struct alloc_pgt_data + * above. Besides the local callers, this is used as the allocation + * callback in mapping_info below. + */ +static void *alloc_pgt_page(void *context) +{ + struct alloc_pgt_data *pages = (struct alloc_pgt_data *)context; + unsigned char *entry; + + /* Validate there is space available for a new page. */ + if (pages->pgt_buf_offset >= pages->pgt_buf_size) { + debug_putstr("out of pgt_buf in " __FILE__ "!?\n"); + debug_putaddr(pages->pgt_buf_offset); + debug_putaddr(pages->pgt_buf_size); + return NULL; + } + + entry = pages->pgt_buf + pages->pgt_buf_offset; + pages->pgt_buf_offset += PAGE_SIZE; + + return entry; +} + +/* Used to track our allocated page tables. */ +static struct alloc_pgt_data pgt_data; + +/* The top level page table entry pointer. */ +static unsigned long top_level_pgt; + +phys_addr_t physical_mask = (1ULL << __PHYSICAL_MASK_SHIFT) - 1; + +/* + * Mapping information structure passed to kernel_ident_mapping_init(). + * Due to relocation, pointers must be assigned at run time not build time. + */ +static struct x86_mapping_info mapping_info; + +/* + * Adds the specified range to the identity mappings. + */ +static void add_identity_map(unsigned long start, unsigned long end) +{ + int ret; + + /* Align boundary to 2M. */ + start = round_down(start, PMD_SIZE); + end = round_up(end, PMD_SIZE); + if (start >= end) + return; + + /* Build the mapping. */ + ret = kernel_ident_mapping_init(&mapping_info, (pgd_t *)top_level_pgt, start, end); + if (ret) + error("Error: kernel_ident_mapping_init() failed\n"); +} + +/* Locates and clears a region for a new top level page table. */ +void initialize_identity_maps(void) +{ + /* Exclude the encryption mask from __PHYSICAL_MASK */ + physical_mask &= ~sme_me_mask; + + /* Init mapping_info with run-time function/buffer pointers. */ + mapping_info.alloc_pgt_page = alloc_pgt_page; + mapping_info.context = &pgt_data; + mapping_info.page_flag = __PAGE_KERNEL_LARGE_EXEC | sme_me_mask; + mapping_info.kernpg_flag = _KERNPG_TABLE; + + /* + * It should be impossible for this not to already be true, + * but since calling this a second time would rewind the other + * counters, let's just make sure this is reset too. + */ + pgt_data.pgt_buf_offset = 0; + + /* + * If we came here via startup_32(), cr3 will be _pgtable already + * and we must append to the existing area instead of entirely + * overwriting it. + * + * With 5-level paging, we use '_pgtable' to allocate the p4d page table, + * the top-level page table is allocated separately. + * + * p4d_offset(top_level_pgt, 0) would cover both the 4- and 5-level + * cases. On 4-level paging it's equal to 'top_level_pgt'. + */ + top_level_pgt = read_cr3_pa(); + if (p4d_offset((pgd_t *)top_level_pgt, 0) == (p4d_t *)_pgtable) { + pgt_data.pgt_buf = _pgtable + BOOT_INIT_PGT_SIZE; + pgt_data.pgt_buf_size = BOOT_PGT_SIZE - BOOT_INIT_PGT_SIZE; + memset(pgt_data.pgt_buf, 0, pgt_data.pgt_buf_size); + } else { + pgt_data.pgt_buf = _pgtable; + pgt_data.pgt_buf_size = BOOT_PGT_SIZE; + memset(pgt_data.pgt_buf, 0, pgt_data.pgt_buf_size); + top_level_pgt = (unsigned long)alloc_pgt_page(&pgt_data); + } + + /* + * New page-table is set up - map the kernel image and load it + * into cr3. + */ + add_identity_map((unsigned long)_head, (unsigned long)_end); + write_cr3(top_level_pgt); +} + +/* + * This switches the page tables to the new level4 that has been built + * via calls to add_identity_map() above. If booted via startup_32(), + * this is effectively a no-op. + */ +void finalize_identity_maps(void) +{ + write_cr3(top_level_pgt); +} + +static pte_t *split_large_pmd(struct x86_mapping_info *info, + pmd_t *pmdp, unsigned long __address) +{ + unsigned long page_flags; + unsigned long address; + pte_t *pte; + pmd_t pmd; + int i; + + pte = (pte_t *)info->alloc_pgt_page(info->context); + if (!pte) + return NULL; + + address = __address & PMD_MASK; + /* No large page - clear PSE flag */ + page_flags = info->page_flag & ~_PAGE_PSE; + + /* Populate the PTEs */ + for (i = 0; i < PTRS_PER_PMD; i++) { + set_pte(&pte[i], __pte(address | page_flags)); + address += PAGE_SIZE; + } + + /* + * Ideally we need to clear the large PMD first and do a TLB + * flush before we write the new PMD. But the 2M range of the + * PMD might contain the code we execute and/or the stack + * we are on, so we can't do that. But that should be safe here + * because we are going from large to small mappings and we are + * also the only user of the page-table, so there is no chance + * of a TLB multihit. + */ + pmd = __pmd((unsigned long)pte | info->kernpg_flag); + set_pmd(pmdp, pmd); + /* Flush TLB to establish the new PMD */ + write_cr3(top_level_pgt); + + return pte + pte_index(__address); +} + +static void clflush_page(unsigned long address) +{ + unsigned int flush_size; + char *cl, *start, *end; + + /* + * Hardcode cl-size to 64 - CPUID can't be used here because that might + * cause another #VC exception and the GHCB is not ready to use yet. + */ + flush_size = 64; + start = (char *)(address & PAGE_MASK); + end = start + PAGE_SIZE; + + /* + * First make sure there are no pending writes on the cache-lines to + * flush. + */ + asm volatile("mfence" : : : "memory"); + + for (cl = start; cl != end; cl += flush_size) + clflush(cl); +} + +static int set_clr_page_flags(struct x86_mapping_info *info, + unsigned long address, + pteval_t set, pteval_t clr) +{ + pgd_t *pgdp = (pgd_t *)top_level_pgt; + p4d_t *p4dp; + pud_t *pudp; + pmd_t *pmdp; + pte_t *ptep, pte; + + /* + * First make sure there is a PMD mapping for 'address'. + * It should already exist, but keep things generic. + * + * To map the page just read from it and fault it in if there is no + * mapping yet. add_identity_map() can't be called here because that + * would unconditionally map the address on PMD level, destroying any + * PTE-level mappings that might already exist. Use assembly here so + * the access won't be optimized away. + */ + asm volatile("mov %[address], %%r9" + :: [address] "g" (*(unsigned long *)address) + : "r9", "memory"); + + /* + * The page is mapped at least with PMD size - so skip checks and walk + * directly to the PMD. + */ + p4dp = p4d_offset(pgdp, address); + pudp = pud_offset(p4dp, address); + pmdp = pmd_offset(pudp, address); + + if (pmd_large(*pmdp)) + ptep = split_large_pmd(info, pmdp, address); + else + ptep = pte_offset_kernel(pmdp, address); + + if (!ptep) + return -ENOMEM; + + /* + * Changing encryption attributes of a page requires to flush it from + * the caches. + */ + if ((set | clr) & _PAGE_ENC) + clflush_page(address); + + /* Update PTE */ + pte = *ptep; + pte = pte_set_flags(pte, set); + pte = pte_clear_flags(pte, clr); + set_pte(ptep, pte); + + /* Flush TLB after changing encryption attribute */ + write_cr3(top_level_pgt); + + return 0; +} + +int set_page_decrypted(unsigned long address) +{ + return set_clr_page_flags(&mapping_info, address, 0, _PAGE_ENC); +} + +int set_page_encrypted(unsigned long address) +{ + return set_clr_page_flags(&mapping_info, address, _PAGE_ENC, 0); +} + +int set_page_non_present(unsigned long address) +{ + return set_clr_page_flags(&mapping_info, address, 0, _PAGE_PRESENT); +} + +static void do_pf_error(const char *msg, unsigned long error_code, + unsigned long address, unsigned long ip) +{ + error_putstr(msg); + + error_putstr("\nError Code: "); + error_puthex(error_code); + error_putstr("\nCR2: 0x"); + error_puthex(address); + error_putstr("\nRIP relative to _head: 0x"); + error_puthex(ip - (unsigned long)_head); + error_putstr("\n"); + + error("Stopping.\n"); +} + +void do_boot_page_fault(struct pt_regs *regs, unsigned long error_code) +{ + unsigned long address = native_read_cr2(); + unsigned long end; + bool ghcb_fault; + + ghcb_fault = sev_es_check_ghcb_fault(address); + + address &= PMD_MASK; + end = address + PMD_SIZE; + + /* + * Check for unexpected error codes. Unexpected are: + * - Faults on present pages + * - User faults + * - Reserved bits set + */ + if (error_code & (X86_PF_PROT | X86_PF_USER | X86_PF_RSVD)) + do_pf_error("Unexpected page-fault:", error_code, address, regs->ip); + else if (ghcb_fault) + do_pf_error("Page-fault on GHCB page:", error_code, address, regs->ip); + + /* + * Error code is sane - now identity map the 2M region around + * the faulting address. + */ + add_identity_map(address, end); +} diff --git a/arch/x86/boot/compressed/idt_64.c b/arch/x86/boot/compressed/idt_64.c new file mode 100644 index 000000000000..804a502ee0d2 --- /dev/null +++ b/arch/x86/boot/compressed/idt_64.c @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include <asm/trap_pf.h> +#include <asm/segment.h> +#include <asm/trapnr.h> +#include "misc.h" + +static void set_idt_entry(int vector, void (*handler)(void)) +{ + unsigned long address = (unsigned long)handler; + gate_desc entry; + + memset(&entry, 0, sizeof(entry)); + + entry.offset_low = (u16)(address & 0xffff); + entry.segment = __KERNEL_CS; + entry.bits.type = GATE_TRAP; + entry.bits.p = 1; + entry.offset_middle = (u16)((address >> 16) & 0xffff); + entry.offset_high = (u32)(address >> 32); + + memcpy(&boot_idt[vector], &entry, sizeof(entry)); +} + +/* Have this here so we don't need to include <asm/desc.h> */ +static void load_boot_idt(const struct desc_ptr *dtr) +{ + asm volatile("lidt %0"::"m" (*dtr)); +} + +/* Setup IDT before kernel jumping to .Lrelocated */ +void load_stage1_idt(void) +{ + boot_idt_desc.address = (unsigned long)boot_idt; + + + if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) + set_idt_entry(X86_TRAP_VC, boot_stage1_vc); + + load_boot_idt(&boot_idt_desc); +} + +/* Setup IDT after kernel jumping to .Lrelocated */ +void load_stage2_idt(void) +{ + boot_idt_desc.address = (unsigned long)boot_idt; + + set_idt_entry(X86_TRAP_PF, boot_page_fault); + +#ifdef CONFIG_AMD_MEM_ENCRYPT + set_idt_entry(X86_TRAP_VC, boot_stage2_vc); +#endif + + load_boot_idt(&boot_idt_desc); +} diff --git a/arch/x86/boot/compressed/idt_handlers_64.S b/arch/x86/boot/compressed/idt_handlers_64.S new file mode 100644 index 000000000000..22890e199f5b --- /dev/null +++ b/arch/x86/boot/compressed/idt_handlers_64.S @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Early IDT handler entry points + * + * Copyright (C) 2019 SUSE + * + * Author: Joerg Roedel <jroedel@suse.de> + */ + +#include <asm/segment.h> + +/* For ORIG_RAX */ +#include "../../entry/calling.h" + +.macro EXCEPTION_HANDLER name function error_code=0 +SYM_FUNC_START(\name) + + /* Build pt_regs */ + .if \error_code == 0 + pushq $0 + .endif + + pushq %rdi + pushq %rsi + pushq %rdx + pushq %rcx + pushq %rax + pushq %r8 + pushq %r9 + pushq %r10 + pushq %r11 + pushq %rbx + pushq %rbp + pushq %r12 + pushq %r13 + pushq %r14 + pushq %r15 + + /* Call handler with pt_regs */ + movq %rsp, %rdi + /* Error code is second parameter */ + movq ORIG_RAX(%rsp), %rsi + call \function + + /* Restore regs */ + popq %r15 + popq %r14 + popq %r13 + popq %r12 + popq %rbp + popq %rbx + popq %r11 + popq %r10 + popq %r9 + popq %r8 + popq %rax + popq %rcx + popq %rdx + popq %rsi + popq %rdi + + /* Remove error code and return */ + addq $8, %rsp + + iretq +SYM_FUNC_END(\name) + .endm + + .text + .code64 + +EXCEPTION_HANDLER boot_page_fault do_boot_page_fault error_code=1 + +#ifdef CONFIG_AMD_MEM_ENCRYPT +EXCEPTION_HANDLER boot_stage1_vc do_vc_no_ghcb error_code=1 +EXCEPTION_HANDLER boot_stage2_vc do_boot_stage2_vc error_code=1 +#endif diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c index dde7cb3724df..b59547ce5b19 100644 --- a/arch/x86/boot/compressed/kaslr.c +++ b/arch/x86/boot/compressed/kaslr.c @@ -36,17 +36,12 @@ #define STATIC #include <linux/decompress/mm.h> -#ifdef CONFIG_X86_5LEVEL -unsigned int __pgtable_l5_enabled; -unsigned int pgdir_shift __ro_after_init = 39; -unsigned int ptrs_per_p4d __ro_after_init = 1; -#endif +#define _SETUP +#include <asm/setup.h> /* For COMMAND_LINE_SIZE */ +#undef _SETUP extern unsigned long get_cmd_line_ptr(void); -/* Used by PAGE_KERN* macros: */ -pteval_t __default_kernel_pte_mask __read_mostly = ~0; - /* Simplified build-specific string for starting entropy. */ static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@" LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION; @@ -87,8 +82,11 @@ static unsigned long get_boot_seed(void) static bool memmap_too_large; -/* Store memory limit specified by "mem=nn[KMG]" or "memmap=nn[KMG]" */ -static unsigned long long mem_limit = ULLONG_MAX; +/* + * Store memory limit: MAXMEM on 64-bit and KERNEL_IMAGE_SIZE on 32-bit. + * It may be reduced by "mem=nn[KMG]" or "memmap=nn[KMG]" command line options. + */ +static u64 mem_limit; /* Number of immovable memory regions */ static int num_immovable_mem; @@ -131,8 +129,7 @@ enum parse_mode { }; static int -parse_memmap(char *p, unsigned long long *start, unsigned long long *size, - enum parse_mode mode) +parse_memmap(char *p, u64 *start, u64 *size, enum parse_mode mode) { char *oldp; @@ -162,7 +159,7 @@ parse_memmap(char *p, unsigned long long *start, unsigned long long *size, */ *size = 0; } else { - unsigned long long flags; + u64 flags; /* * efi_fake_mem=nn@ss:attr the attr specifies @@ -201,7 +198,7 @@ static void mem_avoid_memmap(enum parse_mode mode, char *str) while (str && (i < MAX_MEMMAP_REGIONS)) { int rc; - unsigned long long start, size; + u64 start, size; char *k = strchr(str, ','); if (k) @@ -214,7 +211,7 @@ static void mem_avoid_memmap(enum parse_mode mode, char *str) if (start == 0) { /* Store the specified memory limit if size > 0 */ - if (size > 0) + if (size > 0 && size < mem_limit) mem_limit = size; continue; @@ -261,15 +258,15 @@ static void parse_gb_huge_pages(char *param, char *val) static void handle_mem_options(void) { char *args = (char *)get_cmd_line_ptr(); - size_t len = strlen((char *)args); + size_t len; char *tmp_cmdline; char *param, *val; u64 mem_size; - if (!strstr(args, "memmap=") && !strstr(args, "mem=") && - !strstr(args, "hugepages")) + if (!args) return; + len = strnlen(args, COMMAND_LINE_SIZE-1); tmp_cmdline = malloc(len + 1); if (!tmp_cmdline) error("Failed to allocate space for tmp_cmdline"); @@ -284,14 +281,12 @@ static void handle_mem_options(void) while (*args) { args = next_arg(args, ¶m, &val); /* Stop at -- */ - if (!val && strcmp(param, "--") == 0) { - warn("Only '--' specified in cmdline"); - goto out; - } + if (!val && strcmp(param, "--") == 0) + break; if (!strcmp(param, "memmap")) { mem_avoid_memmap(PARSE_MEMMAP, val); - } else if (strstr(param, "hugepages")) { + } else if (IS_ENABLED(CONFIG_X86_64) && strstr(param, "hugepages")) { parse_gb_huge_pages(param, val); } else if (!strcmp(param, "mem")) { char *p = val; @@ -300,21 +295,23 @@ static void handle_mem_options(void) continue; mem_size = memparse(p, &p); if (mem_size == 0) - goto out; + break; - mem_limit = mem_size; + if (mem_size < mem_limit) + mem_limit = mem_size; } else if (!strcmp(param, "efi_fake_mem")) { mem_avoid_memmap(PARSE_EFI, val); } } -out: free(tmp_cmdline); return; } /* - * In theory, KASLR can put the kernel anywhere in the range of [16M, 64T). + * In theory, KASLR can put the kernel anywhere in the range of [16M, MAXMEM) + * on 64-bit, and [16M, KERNEL_IMAGE_SIZE) on 32-bit. + * * The mem_avoid array is used to store the ranges that need to be avoided * when KASLR searches for an appropriate random address. We must avoid any * regions that are unsafe to overlap with during decompression, and other @@ -392,8 +389,7 @@ static void mem_avoid_init(unsigned long input, unsigned long input_size, { unsigned long init_size = boot_params->hdr.init_size; u64 initrd_start, initrd_size; - u64 cmd_line, cmd_line_size; - char *ptr; + unsigned long cmd_line, cmd_line_size; /* * Avoid the region that is unsafe to overlap during @@ -401,8 +397,6 @@ static void mem_avoid_init(unsigned long input, unsigned long input_size, */ mem_avoid[MEM_AVOID_ZO_RANGE].start = input; mem_avoid[MEM_AVOID_ZO_RANGE].size = (output + init_size) - input; - add_identity_map(mem_avoid[MEM_AVOID_ZO_RANGE].start, - mem_avoid[MEM_AVOID_ZO_RANGE].size); /* Avoid initrd. */ initrd_start = (u64)boot_params->ext_ramdisk_image << 32; @@ -414,22 +408,17 @@ static void mem_avoid_init(unsigned long input, unsigned long input_size, /* No need to set mapping for initrd, it will be handled in VO. */ /* Avoid kernel command line. */ - cmd_line = (u64)boot_params->ext_cmd_line_ptr << 32; - cmd_line |= boot_params->hdr.cmd_line_ptr; + cmd_line = get_cmd_line_ptr(); /* Calculate size of cmd_line. */ - ptr = (char *)(unsigned long)cmd_line; - for (cmd_line_size = 0; ptr[cmd_line_size++];) - ; - mem_avoid[MEM_AVOID_CMDLINE].start = cmd_line; - mem_avoid[MEM_AVOID_CMDLINE].size = cmd_line_size; - add_identity_map(mem_avoid[MEM_AVOID_CMDLINE].start, - mem_avoid[MEM_AVOID_CMDLINE].size); + if (cmd_line) { + cmd_line_size = strnlen((char *)cmd_line, COMMAND_LINE_SIZE-1) + 1; + mem_avoid[MEM_AVOID_CMDLINE].start = cmd_line; + mem_avoid[MEM_AVOID_CMDLINE].size = cmd_line_size; + } /* Avoid boot parameters. */ mem_avoid[MEM_AVOID_BOOTPARAMS].start = (unsigned long)boot_params; mem_avoid[MEM_AVOID_BOOTPARAMS].size = sizeof(*boot_params); - add_identity_map(mem_avoid[MEM_AVOID_BOOTPARAMS].start, - mem_avoid[MEM_AVOID_BOOTPARAMS].size); /* We don't need to set a mapping for setup_data. */ @@ -438,11 +427,6 @@ static void mem_avoid_init(unsigned long input, unsigned long input_size, /* Enumerate the immovable memory regions */ num_immovable_mem = count_immovable_mem_regions(); - -#ifdef CONFIG_X86_VERBOSE_BOOTUP - /* Make sure video RAM can be used. */ - add_identity_map(0, PMD_SIZE); -#endif } /* @@ -454,7 +438,7 @@ static bool mem_avoid_overlap(struct mem_vector *img, { int i; struct setup_data *ptr; - unsigned long earliest = img->start + img->size; + u64 earliest = img->start + img->size; bool is_overlapping = false; for (i = 0; i < MEM_AVOID_MAX; i++) { @@ -499,18 +483,16 @@ static bool mem_avoid_overlap(struct mem_vector *img, } struct slot_area { - unsigned long addr; - int num; + u64 addr; + unsigned long num; }; #define MAX_SLOT_AREA 100 static struct slot_area slot_areas[MAX_SLOT_AREA]; - +static unsigned int slot_area_index; static unsigned long slot_max; -static unsigned long slot_area_index; - static void store_slot_info(struct mem_vector *region, unsigned long image_size) { struct slot_area slot_area; @@ -519,13 +501,10 @@ static void store_slot_info(struct mem_vector *region, unsigned long image_size) return; slot_area.addr = region->start; - slot_area.num = (region->size - image_size) / - CONFIG_PHYSICAL_ALIGN + 1; + slot_area.num = 1 + (region->size - image_size) / CONFIG_PHYSICAL_ALIGN; - if (slot_area.num > 0) { - slot_areas[slot_area_index++] = slot_area; - slot_max += slot_area.num; - } + slot_areas[slot_area_index++] = slot_area; + slot_max += slot_area.num; } /* @@ -535,57 +514,53 @@ static void store_slot_info(struct mem_vector *region, unsigned long image_size) static void process_gb_huge_pages(struct mem_vector *region, unsigned long image_size) { - unsigned long addr, size = 0; + u64 pud_start, pud_end; + unsigned long gb_huge_pages; struct mem_vector tmp; - int i = 0; - if (!max_gb_huge_pages) { + if (!IS_ENABLED(CONFIG_X86_64) || !max_gb_huge_pages) { store_slot_info(region, image_size); return; } - addr = ALIGN(region->start, PUD_SIZE); - /* Did we raise the address above the passed in memory entry? */ - if (addr < region->start + region->size) - size = region->size - (addr - region->start); - - /* Check how many 1GB huge pages can be filtered out: */ - while (size > PUD_SIZE && max_gb_huge_pages) { - size -= PUD_SIZE; - max_gb_huge_pages--; - i++; - } + /* Are there any 1GB pages in the region? */ + pud_start = ALIGN(region->start, PUD_SIZE); + pud_end = ALIGN_DOWN(region->start + region->size, PUD_SIZE); /* No good 1GB huge pages found: */ - if (!i) { + if (pud_start >= pud_end) { store_slot_info(region, image_size); return; } - /* - * Skip those 'i'*1GB good huge pages, and continue checking and - * processing the remaining head or tail part of the passed region - * if available. - */ - - if (addr >= region->start + image_size) { + /* Check if the head part of the region is usable. */ + if (pud_start >= region->start + image_size) { tmp.start = region->start; - tmp.size = addr - region->start; + tmp.size = pud_start - region->start; store_slot_info(&tmp, image_size); } - size = region->size - (addr - region->start) - i * PUD_SIZE; - if (size >= image_size) { - tmp.start = addr + i * PUD_SIZE; - tmp.size = size; + /* Skip the good 1GB pages. */ + gb_huge_pages = (pud_end - pud_start) >> PUD_SHIFT; + if (gb_huge_pages > max_gb_huge_pages) { + pud_end = pud_start + (max_gb_huge_pages << PUD_SHIFT); + max_gb_huge_pages = 0; + } else { + max_gb_huge_pages -= gb_huge_pages; + } + + /* Check if the tail part of the region is usable. */ + if (region->start + region->size >= pud_end + image_size) { + tmp.start = pud_end; + tmp.size = region->start + region->size - pud_end; store_slot_info(&tmp, image_size); } } -static unsigned long slots_fetch_random(void) +static u64 slots_fetch_random(void) { unsigned long slot; - int i; + unsigned int i; /* Handle case of no slots stored. */ if (slot_max == 0) @@ -598,7 +573,7 @@ static unsigned long slots_fetch_random(void) slot -= slot_areas[i].num; continue; } - return slot_areas[i].addr + slot * CONFIG_PHYSICAL_ALIGN; + return slot_areas[i].addr + ((u64)slot * CONFIG_PHYSICAL_ALIGN); } if (i == slot_area_index) @@ -611,49 +586,23 @@ static void __process_mem_region(struct mem_vector *entry, unsigned long image_size) { struct mem_vector region, overlap; - unsigned long start_orig, end; - struct mem_vector cur_entry; + u64 region_end; - /* On 32-bit, ignore entries entirely above our maximum. */ - if (IS_ENABLED(CONFIG_X86_32) && entry->start >= KERNEL_IMAGE_SIZE) - return; - - /* Ignore entries entirely below our minimum. */ - if (entry->start + entry->size < minimum) - return; - - /* Ignore entries above memory limit */ - end = min(entry->size + entry->start, mem_limit); - if (entry->start >= end) - return; - cur_entry.start = entry->start; - cur_entry.size = end - entry->start; - - region.start = cur_entry.start; - region.size = cur_entry.size; + /* Enforce minimum and memory limit. */ + region.start = max_t(u64, entry->start, minimum); + region_end = min(entry->start + entry->size, mem_limit); /* Give up if slot area array is full. */ while (slot_area_index < MAX_SLOT_AREA) { - start_orig = region.start; - - /* Potentially raise address to minimum location. */ - if (region.start < minimum) - region.start = minimum; - /* Potentially raise address to meet alignment needs. */ region.start = ALIGN(region.start, CONFIG_PHYSICAL_ALIGN); /* Did we raise the address above the passed in memory entry? */ - if (region.start > cur_entry.start + cur_entry.size) + if (region.start > region_end) return; /* Reduce size by any delta from the original address. */ - region.size -= region.start - start_orig; - - /* On 32-bit, reduce region size to fit within max size. */ - if (IS_ENABLED(CONFIG_X86_32) && - region.start + region.size > KERNEL_IMAGE_SIZE) - region.size = KERNEL_IMAGE_SIZE - region.start; + region.size = region_end - region.start; /* Return if region can't contain decompressed kernel */ if (region.size < image_size) @@ -666,27 +615,19 @@ static void __process_mem_region(struct mem_vector *entry, } /* Store beginning of region if holds at least image_size. */ - if (overlap.start > region.start + image_size) { - struct mem_vector beginning; - - beginning.start = region.start; - beginning.size = overlap.start - region.start; - process_gb_huge_pages(&beginning, image_size); + if (overlap.start >= region.start + image_size) { + region.size = overlap.start - region.start; + process_gb_huge_pages(®ion, image_size); } - /* Return if overlap extends to or past end of region. */ - if (overlap.start + overlap.size >= region.start + region.size) - return; - /* Clip off the overlapping region and start over. */ - region.size -= overlap.start - region.start + overlap.size; region.start = overlap.start + overlap.size; } } static bool process_mem_region(struct mem_vector *region, - unsigned long long minimum, - unsigned long long image_size) + unsigned long minimum, + unsigned long image_size) { int i; /* @@ -709,7 +650,7 @@ static bool process_mem_region(struct mem_vector *region, * immovable memory and @region. */ for (i = 0; i < num_immovable_mem; i++) { - unsigned long long start, end, entry_end, region_end; + u64 start, end, entry_end, region_end; struct mem_vector entry; if (!mem_overlaps(region, &immovable_mem[i])) @@ -736,8 +677,8 @@ static bool process_mem_region(struct mem_vector *region, #ifdef CONFIG_EFI /* - * Returns true if mirror region found (and must have been processed - * for slots adding) + * Returns true if we processed the EFI memmap, which we prefer over the E820 + * table if it is available. */ static bool process_efi_entries(unsigned long minimum, unsigned long image_size) @@ -839,20 +780,30 @@ static void process_e820_entries(unsigned long minimum, static unsigned long find_random_phys_addr(unsigned long minimum, unsigned long image_size) { + u64 phys_addr; + + /* Bail out early if it's impossible to succeed. */ + if (minimum + image_size > mem_limit) + return 0; + /* Check if we had too many memmaps. */ if (memmap_too_large) { debug_putstr("Aborted memory entries scan (more than 4 memmap= args)!\n"); return 0; } - /* Make sure minimum is aligned. */ - minimum = ALIGN(minimum, CONFIG_PHYSICAL_ALIGN); + if (!process_efi_entries(minimum, image_size)) + process_e820_entries(minimum, image_size); + + phys_addr = slots_fetch_random(); - if (process_efi_entries(minimum, image_size)) - return slots_fetch_random(); + /* Perform a final check to make sure the address is in range. */ + if (phys_addr < minimum || phys_addr + image_size > mem_limit) { + warn("Invalid physical address chosen!\n"); + return 0; + } - process_e820_entries(minimum, image_size); - return slots_fetch_random(); + return (unsigned long)phys_addr; } static unsigned long find_random_virt_addr(unsigned long minimum, @@ -860,18 +811,12 @@ static unsigned long find_random_virt_addr(unsigned long minimum, { unsigned long slots, random_addr; - /* Make sure minimum is aligned. */ - minimum = ALIGN(minimum, CONFIG_PHYSICAL_ALIGN); - /* Align image_size for easy slot calculations. */ - image_size = ALIGN(image_size, CONFIG_PHYSICAL_ALIGN); - /* * There are how many CONFIG_PHYSICAL_ALIGN-sized slots * that can hold image_size within the range of minimum to * KERNEL_IMAGE_SIZE? */ - slots = (KERNEL_IMAGE_SIZE - minimum - image_size) / - CONFIG_PHYSICAL_ALIGN + 1; + slots = 1 + (KERNEL_IMAGE_SIZE - minimum - image_size) / CONFIG_PHYSICAL_ALIGN; random_addr = kaslr_get_random_long("Virtual") % slots; @@ -905,8 +850,10 @@ void choose_random_location(unsigned long input, boot_params->hdr.loadflags |= KASLR_FLAG; - /* Prepare to add new identity pagetables on demand. */ - initialize_identity_maps(); + if (IS_ENABLED(CONFIG_X86_32)) + mem_limit = KERNEL_IMAGE_SIZE; + else + mem_limit = MAXMEM; /* Record the various known unsafe memory ranges. */ mem_avoid_init(input, input_size, *output); @@ -917,6 +864,8 @@ void choose_random_location(unsigned long input, * location: */ min_addr = min(*output, 512UL << 20); + /* Make sure minimum is aligned. */ + min_addr = ALIGN(min_addr, CONFIG_PHYSICAL_ALIGN); /* Walk available memory entries to find a random address. */ random_addr = find_random_phys_addr(min_addr, output_size); @@ -924,19 +873,8 @@ void choose_random_location(unsigned long input, warn("Physical KASLR disabled: no suitable memory region!"); } else { /* Update the new physical address location. */ - if (*output != random_addr) { - add_identity_map(random_addr, output_size); + if (*output != random_addr) *output = random_addr; - } - - /* - * This loads the identity mapping page table. - * This should only be done if a new physical address - * is found for the kernel, otherwise we should keep - * the old page table to make it be like the "nokaslr" - * case. - */ - finalize_identity_maps(); } diff --git a/arch/x86/boot/compressed/kaslr_64.c b/arch/x86/boot/compressed/kaslr_64.c deleted file mode 100644 index f9c5c13d979b..000000000000 --- a/arch/x86/boot/compressed/kaslr_64.c +++ /dev/null @@ -1,153 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * This code is used on x86_64 to create page table identity mappings on - * demand by building up a new set of page tables (or appending to the - * existing ones), and then switching over to them when ready. - * - * Copyright (C) 2015-2016 Yinghai Lu - * Copyright (C) 2016 Kees Cook - */ - -/* - * Since we're dealing with identity mappings, physical and virtual - * addresses are the same, so override these defines which are ultimately - * used by the headers in misc.h. - */ -#define __pa(x) ((unsigned long)(x)) -#define __va(x) ((void *)((unsigned long)(x))) - -/* No PAGE_TABLE_ISOLATION support needed either: */ -#undef CONFIG_PAGE_TABLE_ISOLATION - -#include "misc.h" - -/* These actually do the work of building the kernel identity maps. */ -#include <linux/pgtable.h> -#include <asm/init.h> -/* Use the static base for this part of the boot process */ -#undef __PAGE_OFFSET -#define __PAGE_OFFSET __PAGE_OFFSET_BASE -#include "../../mm/ident_map.c" - -/* Used to track our page table allocation area. */ -struct alloc_pgt_data { - unsigned char *pgt_buf; - unsigned long pgt_buf_size; - unsigned long pgt_buf_offset; -}; - -/* - * Allocates space for a page table entry, using struct alloc_pgt_data - * above. Besides the local callers, this is used as the allocation - * callback in mapping_info below. - */ -static void *alloc_pgt_page(void *context) -{ - struct alloc_pgt_data *pages = (struct alloc_pgt_data *)context; - unsigned char *entry; - - /* Validate there is space available for a new page. */ - if (pages->pgt_buf_offset >= pages->pgt_buf_size) { - debug_putstr("out of pgt_buf in " __FILE__ "!?\n"); - debug_putaddr(pages->pgt_buf_offset); - debug_putaddr(pages->pgt_buf_size); - return NULL; - } - - entry = pages->pgt_buf + pages->pgt_buf_offset; - pages->pgt_buf_offset += PAGE_SIZE; - - return entry; -} - -/* Used to track our allocated page tables. */ -static struct alloc_pgt_data pgt_data; - -/* The top level page table entry pointer. */ -static unsigned long top_level_pgt; - -phys_addr_t physical_mask = (1ULL << __PHYSICAL_MASK_SHIFT) - 1; - -/* - * Mapping information structure passed to kernel_ident_mapping_init(). - * Due to relocation, pointers must be assigned at run time not build time. - */ -static struct x86_mapping_info mapping_info; - -/* Locates and clears a region for a new top level page table. */ -void initialize_identity_maps(void) -{ - /* If running as an SEV guest, the encryption mask is required. */ - set_sev_encryption_mask(); - - /* Exclude the encryption mask from __PHYSICAL_MASK */ - physical_mask &= ~sme_me_mask; - - /* Init mapping_info with run-time function/buffer pointers. */ - mapping_info.alloc_pgt_page = alloc_pgt_page; - mapping_info.context = &pgt_data; - mapping_info.page_flag = __PAGE_KERNEL_LARGE_EXEC | sme_me_mask; - mapping_info.kernpg_flag = _KERNPG_TABLE; - - /* - * It should be impossible for this not to already be true, - * but since calling this a second time would rewind the other - * counters, let's just make sure this is reset too. - */ - pgt_data.pgt_buf_offset = 0; - - /* - * If we came here via startup_32(), cr3 will be _pgtable already - * and we must append to the existing area instead of entirely - * overwriting it. - * - * With 5-level paging, we use '_pgtable' to allocate the p4d page table, - * the top-level page table is allocated separately. - * - * p4d_offset(top_level_pgt, 0) would cover both the 4- and 5-level - * cases. On 4-level paging it's equal to 'top_level_pgt'. - */ - top_level_pgt = read_cr3_pa(); - if (p4d_offset((pgd_t *)top_level_pgt, 0) == (p4d_t *)_pgtable) { - debug_putstr("booted via startup_32()\n"); - pgt_data.pgt_buf = _pgtable + BOOT_INIT_PGT_SIZE; - pgt_data.pgt_buf_size = BOOT_PGT_SIZE - BOOT_INIT_PGT_SIZE; - memset(pgt_data.pgt_buf, 0, pgt_data.pgt_buf_size); - } else { - debug_putstr("booted via startup_64()\n"); - pgt_data.pgt_buf = _pgtable; - pgt_data.pgt_buf_size = BOOT_PGT_SIZE; - memset(pgt_data.pgt_buf, 0, pgt_data.pgt_buf_size); - top_level_pgt = (unsigned long)alloc_pgt_page(&pgt_data); - } -} - -/* - * Adds the specified range to what will become the new identity mappings. - * Once all ranges have been added, the new mapping is activated by calling - * finalize_identity_maps() below. - */ -void add_identity_map(unsigned long start, unsigned long size) -{ - unsigned long end = start + size; - - /* Align boundary to 2M. */ - start = round_down(start, PMD_SIZE); - end = round_up(end, PMD_SIZE); - if (start >= end) - return; - - /* Build the mapping. */ - kernel_ident_mapping_init(&mapping_info, (pgd_t *)top_level_pgt, - start, end); -} - -/* - * This switches the page tables to the new level4 that has been built - * via calls to add_identity_map() above. If booted via startup_32(), - * this is effectively a no-op. - */ -void finalize_identity_maps(void) -{ - write_cr3(top_level_pgt); -} diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index e478e40fbe5a..267e7f93050e 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c @@ -442,6 +442,13 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap, parse_elf(output); handle_relocations(output, output_len, virt_addr); debug_putstr("done.\nBooting the kernel.\n"); + + /* + * Flush GHCB from cache and map it encrypted again when running as + * SEV-ES guest. + */ + sev_es_shutdown_ghcb(); + return output; } diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h index 726e264410ff..6d31f1b4c4d1 100644 --- a/arch/x86/boot/compressed/misc.h +++ b/arch/x86/boot/compressed/misc.h @@ -23,6 +23,7 @@ #include <asm/page.h> #include <asm/boot.h> #include <asm/bootparam.h> +#include <asm/desc_defs.h> #define BOOT_CTYPE_H #include <linux/acpi.h> @@ -36,6 +37,9 @@ #define memptr unsigned #endif +/* boot/compressed/vmlinux start and end markers */ +extern char _head[], _end[]; + /* misc.c */ extern memptr free_mem_ptr; extern memptr free_mem_end_ptr; @@ -70,8 +74,8 @@ int cmdline_find_option(const char *option, char *buffer, int bufsize); int cmdline_find_option_bool(const char *option); struct mem_vector { - unsigned long long start; - unsigned long long size; + u64 start; + u64 size; }; #if CONFIG_RANDOMIZE_BASE @@ -81,8 +85,6 @@ void choose_random_location(unsigned long input, unsigned long *output, unsigned long output_size, unsigned long *virt_addr); -/* cpuflags.c */ -bool has_cpuflag(int flag); #else static inline void choose_random_location(unsigned long input, unsigned long input_size, @@ -93,18 +95,14 @@ static inline void choose_random_location(unsigned long input, } #endif +/* cpuflags.c */ +bool has_cpuflag(int flag); + #ifdef CONFIG_X86_64 -void initialize_identity_maps(void); -void add_identity_map(unsigned long start, unsigned long size); -void finalize_identity_maps(void); +extern int set_page_decrypted(unsigned long address); +extern int set_page_encrypted(unsigned long address); +extern int set_page_non_present(unsigned long address); extern unsigned char _pgtable[]; -#else -static inline void initialize_identity_maps(void) -{ } -static inline void add_identity_map(unsigned long start, unsigned long size) -{ } -static inline void finalize_identity_maps(void) -{ } #endif #ifdef CONFIG_EARLY_PRINTK @@ -119,6 +117,17 @@ static inline void console_init(void) void set_sev_encryption_mask(void); +#ifdef CONFIG_AMD_MEM_ENCRYPT +void sev_es_shutdown_ghcb(void); +extern bool sev_es_check_ghcb_fault(unsigned long address); +#else +static inline void sev_es_shutdown_ghcb(void) { } +static inline bool sev_es_check_ghcb_fault(unsigned long address) +{ + return false; +} +#endif + /* acpi.c */ #ifdef CONFIG_ACPI acpi_physical_address get_rsdp_addr(void); @@ -133,4 +142,21 @@ int count_immovable_mem_regions(void); static inline int count_immovable_mem_regions(void) { return 0; } #endif +/* ident_map_64.c */ +#ifdef CONFIG_X86_5LEVEL +extern unsigned int __pgtable_l5_enabled, pgdir_shift, ptrs_per_p4d; +#endif + +/* Used by PAGE_KERN* macros: */ +extern pteval_t __default_kernel_pte_mask; + +/* idt_64.c */ +extern gate_desc boot_idt[BOOT_IDT_ENTRIES]; +extern struct desc_ptr boot_idt_desc; + +/* IDT Entry Points */ +void boot_page_fault(void); +void boot_stage1_vc(void); +void boot_stage2_vc(void); + #endif /* BOOT_COMPRESSED_MISC_H */ diff --git a/arch/x86/boot/compressed/mkpiggy.c b/arch/x86/boot/compressed/mkpiggy.c index 7e01248765b2..52aa56cdbacc 100644 --- a/arch/x86/boot/compressed/mkpiggy.c +++ b/arch/x86/boot/compressed/mkpiggy.c @@ -60,6 +60,12 @@ int main(int argc, char *argv[]) printf(".incbin \"%s\"\n", argv[1]); printf("input_data_end:\n"); + printf(".section \".rodata\",\"a\",@progbits\n"); + printf(".globl input_len\n"); + printf("input_len:\n\t.long %lu\n", ilen); + printf(".globl output_len\n"); + printf("output_len:\n\t.long %lu\n", (unsigned long)olen); + retval = 0; bail: if (f) diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c index c8862696a47b..7d0394f4ebf9 100644 --- a/arch/x86/boot/compressed/pgtable_64.c +++ b/arch/x86/boot/compressed/pgtable_64.c @@ -5,15 +5,6 @@ #include "pgtable.h" #include "../string.h" -/* - * __force_order is used by special_insns.h asm code to force instruction - * serialization. - * - * It is not referenced from the code, but GCC < 5 with -fPIE would fail - * due to an undefined symbol. Define it to make these ancient GCCs work. - */ -unsigned long __force_order; - #define BIOS_START_MIN 0x20000U /* 128K, less than this is insane */ #define BIOS_START_MAX 0x9f000U /* 640K, absolute maximum */ diff --git a/arch/x86/boot/compressed/sev-es.c b/arch/x86/boot/compressed/sev-es.c new file mode 100644 index 000000000000..954cb2702e23 --- /dev/null +++ b/arch/x86/boot/compressed/sev-es.c @@ -0,0 +1,214 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * AMD Encrypted Register State Support + * + * Author: Joerg Roedel <jroedel@suse.de> + */ + +/* + * misc.h needs to be first because it knows how to include the other kernel + * headers in the pre-decompression code in a way that does not break + * compilation. + */ +#include "misc.h" + +#include <asm/pgtable_types.h> +#include <asm/sev-es.h> +#include <asm/trapnr.h> +#include <asm/trap_pf.h> +#include <asm/msr-index.h> +#include <asm/fpu/xcr.h> +#include <asm/ptrace.h> +#include <asm/svm.h> + +#include "error.h" + +struct ghcb boot_ghcb_page __aligned(PAGE_SIZE); +struct ghcb *boot_ghcb; + +/* + * Copy a version of this function here - insn-eval.c can't be used in + * pre-decompression code. + */ +static bool insn_has_rep_prefix(struct insn *insn) +{ + int i; + + insn_get_prefixes(insn); + + for (i = 0; i < insn->prefixes.nbytes; i++) { + insn_byte_t p = insn->prefixes.bytes[i]; + + if (p == 0xf2 || p == 0xf3) + return true; + } + + return false; +} + +/* + * Only a dummy for insn_get_seg_base() - Early boot-code is 64bit only and + * doesn't use segments. + */ +static unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx) +{ + return 0UL; +} + +static inline u64 sev_es_rd_ghcb_msr(void) +{ + unsigned long low, high; + + asm volatile("rdmsr" : "=a" (low), "=d" (high) : + "c" (MSR_AMD64_SEV_ES_GHCB)); + + return ((high << 32) | low); +} + +static inline void sev_es_wr_ghcb_msr(u64 val) +{ + u32 low, high; + + low = val & 0xffffffffUL; + high = val >> 32; + + asm volatile("wrmsr" : : "c" (MSR_AMD64_SEV_ES_GHCB), + "a"(low), "d" (high) : "memory"); +} + +static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt) +{ + char buffer[MAX_INSN_SIZE]; + enum es_result ret; + + memcpy(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE); + + insn_init(&ctxt->insn, buffer, MAX_INSN_SIZE, 1); + insn_get_length(&ctxt->insn); + + ret = ctxt->insn.immediate.got ? ES_OK : ES_DECODE_FAILED; + + return ret; +} + +static enum es_result vc_write_mem(struct es_em_ctxt *ctxt, + void *dst, char *buf, size_t size) +{ + memcpy(dst, buf, size); + + return ES_OK; +} + +static enum es_result vc_read_mem(struct es_em_ctxt *ctxt, + void *src, char *buf, size_t size) +{ + memcpy(buf, src, size); + + return ES_OK; +} + +#undef __init +#undef __pa +#define __init +#define __pa(x) ((unsigned long)(x)) + +#define __BOOT_COMPRESSED + +/* Basic instruction decoding support needed */ +#include "../../lib/inat.c" +#include "../../lib/insn.c" + +/* Include code for early handlers */ +#include "../../kernel/sev-es-shared.c" + +static bool early_setup_sev_es(void) +{ + if (!sev_es_negotiate_protocol()) + sev_es_terminate(GHCB_SEV_ES_REASON_PROTOCOL_UNSUPPORTED); + + if (set_page_decrypted((unsigned long)&boot_ghcb_page)) + return false; + + /* Page is now mapped decrypted, clear it */ + memset(&boot_ghcb_page, 0, sizeof(boot_ghcb_page)); + + boot_ghcb = &boot_ghcb_page; + + /* Initialize lookup tables for the instruction decoder */ + inat_init_tables(); + + return true; +} + +void sev_es_shutdown_ghcb(void) +{ + if (!boot_ghcb) + return; + + if (!sev_es_check_cpu_features()) + error("SEV-ES CPU Features missing."); + + /* + * GHCB Page must be flushed from the cache and mapped encrypted again. + * Otherwise the running kernel will see strange cache effects when + * trying to use that page. + */ + if (set_page_encrypted((unsigned long)&boot_ghcb_page)) + error("Can't map GHCB page encrypted"); + + /* + * GHCB page is mapped encrypted again and flushed from the cache. + * Mark it non-present now to catch bugs when #VC exceptions trigger + * after this point. + */ + if (set_page_non_present((unsigned long)&boot_ghcb_page)) + error("Can't unmap GHCB page"); +} + +bool sev_es_check_ghcb_fault(unsigned long address) +{ + /* Check whether the fault was on the GHCB page */ + return ((address & PAGE_MASK) == (unsigned long)&boot_ghcb_page); +} + +void do_boot_stage2_vc(struct pt_regs *regs, unsigned long exit_code) +{ + struct es_em_ctxt ctxt; + enum es_result result; + + if (!boot_ghcb && !early_setup_sev_es()) + sev_es_terminate(GHCB_SEV_ES_REASON_GENERAL_REQUEST); + + vc_ghcb_invalidate(boot_ghcb); + result = vc_init_em_ctxt(&ctxt, regs, exit_code); + if (result != ES_OK) + goto finish; + + switch (exit_code) { + case SVM_EXIT_RDTSC: + case SVM_EXIT_RDTSCP: + result = vc_handle_rdtsc(boot_ghcb, &ctxt, exit_code); + break; + case SVM_EXIT_IOIO: + result = vc_handle_ioio(boot_ghcb, &ctxt); + break; + case SVM_EXIT_CPUID: + result = vc_handle_cpuid(boot_ghcb, &ctxt); + break; + default: + result = ES_UNSUPPORTED; + break; + } + +finish: + if (result == ES_OK) { + vc_finish_insn(&ctxt); + } else if (result != ES_RETRY) { + /* + * For now, just halt the machine. That makes debugging easier, + * later we just call sev_es_terminate() here. + */ + while (true) + asm volatile("hlt\n"); + } +} diff --git a/arch/x86/boot/compressed/vmlinux.lds.S b/arch/x86/boot/compressed/vmlinux.lds.S index 8f1025d1f681..112b2375d021 100644 --- a/arch/x86/boot/compressed/vmlinux.lds.S +++ b/arch/x86/boot/compressed/vmlinux.lds.S @@ -42,12 +42,6 @@ SECTIONS *(.rodata.*) _erodata = . ; } - .got : { - _got = .; - KEEP(*(.got.plt)) - KEEP(*(.got)) - _egot = .; - } .data : { _data = . ; *(.data) @@ -75,5 +69,49 @@ SECTIONS . = ALIGN(PAGE_SIZE); /* keep ZO size page aligned */ _end = .; + STABS_DEBUG + DWARF_DEBUG + ELF_DETAILS + DISCARDS + /DISCARD/ : { + *(.dynamic) *(.dynsym) *(.dynstr) *(.dynbss) + *(.hash) *(.gnu.hash) + *(.note.*) + } + + .got.plt (INFO) : { + *(.got.plt) + } + ASSERT(SIZEOF(.got.plt) == 0 || +#ifdef CONFIG_X86_64 + SIZEOF(.got.plt) == 0x18, +#else + SIZEOF(.got.plt) == 0xc, +#endif + "Unexpected GOT/PLT entries detected!") + + /* + * Sections that should stay zero sized, which is safer to + * explicitly check instead of blindly discarding. + */ + .got : { + *(.got) + } + ASSERT(SIZEOF(.got) == 0, "Unexpected GOT entries detected!") + + .plt : { + *(.plt) *(.plt.*) + } + ASSERT(SIZEOF(.plt) == 0, "Unexpected run-time procedure linkages detected!") + + .rel.dyn : { + *(.rel.*) *(.rel_*) + } + ASSERT(SIZEOF(.rel.dyn) == 0, "Unexpected run-time relocations (.rel) detected!") + + .rela.dyn : { + *(.rela.*) *(.rela_*) + } + ASSERT(SIZEOF(.rela.dyn) == 0, "Unexpected run-time relocations (.rela) detected!") } diff --git a/arch/x86/boot/setup.ld b/arch/x86/boot/setup.ld index 24c95522f231..49546c247ae2 100644 --- a/arch/x86/boot/setup.ld +++ b/arch/x86/boot/setup.ld @@ -20,7 +20,7 @@ SECTIONS .initdata : { *(.initdata) } __end_init = .; - .text : { *(.text) } + .text : { *(.text .text.*) } .text32 : { *(.text32) } . = ALIGN(16); diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c index c8b8c1a8d1fc..a3725ad46c5a 100644 --- a/arch/x86/boot/tools/build.c +++ b/arch/x86/boot/tools/build.c @@ -416,8 +416,6 @@ int main(int argc, char ** argv) /* Set the default root device */ put_unaligned_le16(DEFAULT_ROOT_DEV, &buf[508]); - printf("Setup is %d bytes (padded to %d bytes).\n", c, i); - /* Open and stat the kernel file */ fd = open(argv[2], O_RDONLY); if (fd < 0) @@ -425,7 +423,6 @@ int main(int argc, char ** argv) if (fstat(fd, &sb)) die("Unable to stat `%s': %m", argv[2]); sz = sb.st_size; - printf("System is %d kB\n", (sz+1023)/1024); kernel = mmap(NULL, sz, PROT_READ, MAP_SHARED, fd, 0); if (kernel == MAP_FAILED) die("Unable to mmap '%s': %m", argv[2]); @@ -488,7 +485,6 @@ int main(int argc, char ** argv) } /* Write the CRC */ - printf("CRC %x\n", crc); put_unaligned_le32(crc, buf); if (fwrite(buf, 1, 4, dest) != 4) die("Writing CRC failed"); diff --git a/arch/x86/crypto/blake2s-glue.c b/arch/x86/crypto/blake2s-glue.c index 6737bcea1fa1..c025a01cf708 100644 --- a/arch/x86/crypto/blake2s-glue.c +++ b/arch/x86/crypto/blake2s-glue.c @@ -11,6 +11,7 @@ #include <linux/jump_label.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/sizes.h> #include <asm/cpufeature.h> #include <asm/fpu/api.h> diff --git a/arch/x86/crypto/chacha_glue.c b/arch/x86/crypto/chacha_glue.c index e67a59130025..7b3a1cf0984b 100644 --- a/arch/x86/crypto/chacha_glue.c +++ b/arch/x86/crypto/chacha_glue.c @@ -12,6 +12,7 @@ #include <crypto/internal/skcipher.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/sizes.h> #include <asm/simd.h> asmlinkage void chacha_block_xor_ssse3(u32 *state, u8 *dst, const u8 *src, diff --git a/arch/x86/crypto/crc32c-intel_glue.c b/arch/x86/crypto/crc32c-intel_glue.c index d2d069bd459b..feccb5254c7e 100644 --- a/arch/x86/crypto/crc32c-intel_glue.c +++ b/arch/x86/crypto/crc32c-intel_glue.c @@ -28,9 +28,9 @@ #define SCALE_F sizeof(unsigned long) #ifdef CONFIG_X86_64 -#define REX_PRE "0x48, " +#define CRC32_INST "crc32q %1, %q0" #else -#define REX_PRE +#define CRC32_INST "crc32l %1, %0" #endif #ifdef CONFIG_X86_64 @@ -48,11 +48,8 @@ asmlinkage unsigned int crc_pcl(const u8 *buffer, int len, static u32 crc32c_intel_le_hw_byte(u32 crc, unsigned char const *data, size_t length) { while (length--) { - __asm__ __volatile__( - ".byte 0xf2, 0xf, 0x38, 0xf0, 0xf1" - :"=S"(crc) - :"0"(crc), "c"(*data) - ); + asm("crc32b %1, %0" + : "+r" (crc) : "rm" (*data)); data++; } @@ -66,11 +63,8 @@ static u32 __pure crc32c_intel_le_hw(u32 crc, unsigned char const *p, size_t len unsigned long *ptmp = (unsigned long *)p; while (iquotient--) { - __asm__ __volatile__( - ".byte 0xf2, " REX_PRE "0xf, 0x38, 0xf1, 0xf1;" - :"=S"(crc) - :"0"(crc), "c"(*ptmp) - ); + asm(CRC32_INST + : "+r" (crc) : "rm" (*ptmp)); ptmp++; } diff --git a/arch/x86/crypto/curve25519-x86_64.c b/arch/x86/crypto/curve25519-x86_64.c index 8acbb6584a37..5af8021b98ce 100644 --- a/arch/x86/crypto/curve25519-x86_64.c +++ b/arch/x86/crypto/curve25519-x86_64.c @@ -11,6 +11,7 @@ #include <linux/jump_label.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/scatterlist.h> #include <asm/cpufeature.h> #include <asm/processor.h> @@ -45,11 +46,11 @@ static inline u64 add_scalar(u64 *out, const u64 *f1, u64 f2) asm volatile( /* Clear registers to propagate the carry bit */ - " xor %%r8, %%r8;" - " xor %%r9, %%r9;" - " xor %%r10, %%r10;" - " xor %%r11, %%r11;" - " xor %1, %1;" + " xor %%r8d, %%r8d;" + " xor %%r9d, %%r9d;" + " xor %%r10d, %%r10d;" + " xor %%r11d, %%r11d;" + " xor %k1, %k1;" /* Begin addition chain */ " addq 0(%3), %0;" @@ -93,7 +94,7 @@ static inline void fadd(u64 *out, const u64 *f1, const u64 *f2) " cmovc %0, %%rax;" /* Step 2: Add carry*38 to the original sum */ - " xor %%rcx, %%rcx;" + " xor %%ecx, %%ecx;" " add %%rax, %%r8;" " adcx %%rcx, %%r9;" " movq %%r9, 8(%1);" @@ -165,28 +166,28 @@ static inline void fmul(u64 *out, const u64 *f1, const u64 *f2, u64 *tmp) /* Compute src1[0] * src2 */ " movq 0(%1), %%rdx;" - " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " movq %%r8, 0(%0);" + " mulxq 0(%3), %%r8, %%r9;" " xor %%r10d, %%r10d;" " movq %%r8, 0(%0);" " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " movq %%r10, 8(%0);" " mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " mov $0, %%rax;" " adox %%rdx, %%rax;" /* Compute src1[1] * src2 */ " movq 8(%1), %%rdx;" - " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 8(%0), %%r8;" " movq %%r8, 8(%0);" + " mulxq 0(%3), %%r8, %%r9;" " xor %%r10d, %%r10d;" " adcxq 8(%0), %%r8;" " movq %%r8, 8(%0);" " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 16(%0);" " mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " mov $0, %%r8;" " mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;" " adox %%rdx, %%rax;" " adcx %%r8, %%rax;" /* Compute src1[2] * src2 */ " movq 16(%1), %%rdx;" - " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 16(%0), %%r8;" " movq %%r8, 16(%0);" + " mulxq 0(%3), %%r8, %%r9;" " xor %%r10d, %%r10d;" " adcxq 16(%0), %%r8;" " movq %%r8, 16(%0);" " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 24(%0);" " mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " mov $0, %%r8;" " mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;" " adox %%rdx, %%rax;" " adcx %%r8, %%rax;" /* Compute src1[3] * src2 */ " movq 24(%1), %%rdx;" - " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 24(%0), %%r8;" " movq %%r8, 24(%0);" + " mulxq 0(%3), %%r8, %%r9;" " xor %%r10d, %%r10d;" " adcxq 24(%0), %%r8;" " movq %%r8, 24(%0);" " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 32(%0);" " mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " movq %%rbx, 40(%0);" " mov $0, %%r8;" " mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " movq %%r14, 48(%0);" " mov $0, %%rax;" @@ -200,7 +201,7 @@ static inline void fmul(u64 *out, const u64 *f1, const u64 *f2, u64 *tmp) /* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */ " mov $38, %%rdx;" " mulxq 32(%1), %%r8, %%r13;" - " xor %3, %3;" + " xor %k3, %k3;" " adoxq 0(%1), %%r8;" " mulxq 40(%1), %%r9, %%rbx;" " adcx %%r13, %%r9;" @@ -246,28 +247,28 @@ static inline void fmul2(u64 *out, const u64 *f1, const u64 *f2, u64 *tmp) /* Compute src1[0] * src2 */ " movq 0(%1), %%rdx;" - " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " movq %%r8, 0(%0);" + " mulxq 0(%3), %%r8, %%r9;" " xor %%r10d, %%r10d;" " movq %%r8, 0(%0);" " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " movq %%r10, 8(%0);" " mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " mov $0, %%rax;" " adox %%rdx, %%rax;" /* Compute src1[1] * src2 */ " movq 8(%1), %%rdx;" - " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 8(%0), %%r8;" " movq %%r8, 8(%0);" + " mulxq 0(%3), %%r8, %%r9;" " xor %%r10d, %%r10d;" " adcxq 8(%0), %%r8;" " movq %%r8, 8(%0);" " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 16(%0);" " mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " mov $0, %%r8;" " mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;" " adox %%rdx, %%rax;" " adcx %%r8, %%rax;" /* Compute src1[2] * src2 */ " movq 16(%1), %%rdx;" - " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 16(%0), %%r8;" " movq %%r8, 16(%0);" + " mulxq 0(%3), %%r8, %%r9;" " xor %%r10d, %%r10d;" " adcxq 16(%0), %%r8;" " movq %%r8, 16(%0);" " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 24(%0);" " mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " mov $0, %%r8;" " mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;" " adox %%rdx, %%rax;" " adcx %%r8, %%rax;" /* Compute src1[3] * src2 */ " movq 24(%1), %%rdx;" - " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 24(%0), %%r8;" " movq %%r8, 24(%0);" + " mulxq 0(%3), %%r8, %%r9;" " xor %%r10d, %%r10d;" " adcxq 24(%0), %%r8;" " movq %%r8, 24(%0);" " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 32(%0);" " mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " movq %%rbx, 40(%0);" " mov $0, %%r8;" " mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " movq %%r14, 48(%0);" " mov $0, %%rax;" @@ -277,29 +278,29 @@ static inline void fmul2(u64 *out, const u64 *f1, const u64 *f2, u64 *tmp) /* Compute src1[0] * src2 */ " movq 32(%1), %%rdx;" - " mulxq 32(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " movq %%r8, 64(%0);" - " mulxq 40(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " movq %%r10, 72(%0);" + " mulxq 32(%3), %%r8, %%r9;" " xor %%r10d, %%r10d;" " movq %%r8, 64(%0);" + " mulxq 40(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " movq %%r10, 72(%0);" " mulxq 48(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " mulxq 56(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " mov $0, %%rax;" " adox %%rdx, %%rax;" /* Compute src1[1] * src2 */ " movq 40(%1), %%rdx;" - " mulxq 32(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 72(%0), %%r8;" " movq %%r8, 72(%0);" - " mulxq 40(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 80(%0);" + " mulxq 32(%3), %%r8, %%r9;" " xor %%r10d, %%r10d;" " adcxq 72(%0), %%r8;" " movq %%r8, 72(%0);" + " mulxq 40(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 80(%0);" " mulxq 48(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " mov $0, %%r8;" " mulxq 56(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;" " adox %%rdx, %%rax;" " adcx %%r8, %%rax;" /* Compute src1[2] * src2 */ " movq 48(%1), %%rdx;" - " mulxq 32(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 80(%0), %%r8;" " movq %%r8, 80(%0);" - " mulxq 40(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 88(%0);" + " mulxq 32(%3), %%r8, %%r9;" " xor %%r10d, %%r10d;" " adcxq 80(%0), %%r8;" " movq %%r8, 80(%0);" + " mulxq 40(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 88(%0);" " mulxq 48(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " mov $0, %%r8;" " mulxq 56(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;" " adox %%rdx, %%rax;" " adcx %%r8, %%rax;" /* Compute src1[3] * src2 */ " movq 56(%1), %%rdx;" - " mulxq 32(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 88(%0), %%r8;" " movq %%r8, 88(%0);" - " mulxq 40(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 96(%0);" + " mulxq 32(%3), %%r8, %%r9;" " xor %%r10d, %%r10d;" " adcxq 88(%0), %%r8;" " movq %%r8, 88(%0);" + " mulxq 40(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 96(%0);" " mulxq 48(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " movq %%rbx, 104(%0);" " mov $0, %%r8;" " mulxq 56(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " movq %%r14, 112(%0);" " mov $0, %%rax;" " adox %%rdx, %%rax;" " adcx %%r8, %%rax;" " movq %%rax, 120(%0);" @@ -312,7 +313,7 @@ static inline void fmul2(u64 *out, const u64 *f1, const u64 *f2, u64 *tmp) /* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */ " mov $38, %%rdx;" " mulxq 32(%1), %%r8, %%r13;" - " xor %3, %3;" + " xor %k3, %k3;" " adoxq 0(%1), %%r8;" " mulxq 40(%1), %%r9, %%rbx;" " adcx %%r13, %%r9;" @@ -345,7 +346,7 @@ static inline void fmul2(u64 *out, const u64 *f1, const u64 *f2, u64 *tmp) /* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */ " mov $38, %%rdx;" " mulxq 96(%1), %%r8, %%r13;" - " xor %3, %3;" + " xor %k3, %k3;" " adoxq 64(%1), %%r8;" " mulxq 104(%1), %%r9, %%rbx;" " adcx %%r13, %%r9;" @@ -516,7 +517,7 @@ static inline void fsqr(u64 *out, const u64 *f, u64 *tmp) /* Step 1: Compute all partial products */ " movq 0(%1), %%rdx;" /* f[0] */ - " mulxq 8(%1), %%r8, %%r14;" " xor %%r15, %%r15;" /* f[1]*f[0] */ + " mulxq 8(%1), %%r8, %%r14;" " xor %%r15d, %%r15d;" /* f[1]*f[0] */ " mulxq 16(%1), %%r9, %%r10;" " adcx %%r14, %%r9;" /* f[2]*f[0] */ " mulxq 24(%1), %%rax, %%rcx;" " adcx %%rax, %%r10;" /* f[3]*f[0] */ " movq 24(%1), %%rdx;" /* f[3] */ @@ -526,7 +527,7 @@ static inline void fsqr(u64 *out, const u64 *f, u64 *tmp) " mulxq 16(%1), %%rax, %%rcx;" " mov $0, %%r14;" /* f[2]*f[1] */ /* Step 2: Compute two parallel carry chains */ - " xor %%r15, %%r15;" + " xor %%r15d, %%r15d;" " adox %%rax, %%r10;" " adcx %%r8, %%r8;" " adox %%rcx, %%r11;" @@ -563,7 +564,7 @@ static inline void fsqr(u64 *out, const u64 *f, u64 *tmp) /* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */ " mov $38, %%rdx;" " mulxq 32(%1), %%r8, %%r13;" - " xor %%rcx, %%rcx;" + " xor %%ecx, %%ecx;" " adoxq 0(%1), %%r8;" " mulxq 40(%1), %%r9, %%rbx;" " adcx %%r13, %%r9;" @@ -607,7 +608,7 @@ static inline void fsqr2(u64 *out, const u64 *f, u64 *tmp) asm volatile( /* Step 1: Compute all partial products */ " movq 0(%1), %%rdx;" /* f[0] */ - " mulxq 8(%1), %%r8, %%r14;" " xor %%r15, %%r15;" /* f[1]*f[0] */ + " mulxq 8(%1), %%r8, %%r14;" " xor %%r15d, %%r15d;" /* f[1]*f[0] */ " mulxq 16(%1), %%r9, %%r10;" " adcx %%r14, %%r9;" /* f[2]*f[0] */ " mulxq 24(%1), %%rax, %%rcx;" " adcx %%rax, %%r10;" /* f[3]*f[0] */ " movq 24(%1), %%rdx;" /* f[3] */ @@ -617,7 +618,7 @@ static inline void fsqr2(u64 *out, const u64 *f, u64 *tmp) " mulxq 16(%1), %%rax, %%rcx;" " mov $0, %%r14;" /* f[2]*f[1] */ /* Step 2: Compute two parallel carry chains */ - " xor %%r15, %%r15;" + " xor %%r15d, %%r15d;" " adox %%rax, %%r10;" " adcx %%r8, %%r8;" " adox %%rcx, %%r11;" @@ -647,7 +648,7 @@ static inline void fsqr2(u64 *out, const u64 *f, u64 *tmp) /* Step 1: Compute all partial products */ " movq 32(%1), %%rdx;" /* f[0] */ - " mulxq 40(%1), %%r8, %%r14;" " xor %%r15, %%r15;" /* f[1]*f[0] */ + " mulxq 40(%1), %%r8, %%r14;" " xor %%r15d, %%r15d;" /* f[1]*f[0] */ " mulxq 48(%1), %%r9, %%r10;" " adcx %%r14, %%r9;" /* f[2]*f[0] */ " mulxq 56(%1), %%rax, %%rcx;" " adcx %%rax, %%r10;" /* f[3]*f[0] */ " movq 56(%1), %%rdx;" /* f[3] */ @@ -657,7 +658,7 @@ static inline void fsqr2(u64 *out, const u64 *f, u64 *tmp) " mulxq 48(%1), %%rax, %%rcx;" " mov $0, %%r14;" /* f[2]*f[1] */ /* Step 2: Compute two parallel carry chains */ - " xor %%r15, %%r15;" + " xor %%r15d, %%r15d;" " adox %%rax, %%r10;" " adcx %%r8, %%r8;" " adox %%rcx, %%r11;" @@ -692,7 +693,7 @@ static inline void fsqr2(u64 *out, const u64 *f, u64 *tmp) /* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */ " mov $38, %%rdx;" " mulxq 32(%1), %%r8, %%r13;" - " xor %%rcx, %%rcx;" + " xor %%ecx, %%ecx;" " adoxq 0(%1), %%r8;" " mulxq 40(%1), %%r9, %%rbx;" " adcx %%r13, %%r9;" @@ -725,7 +726,7 @@ static inline void fsqr2(u64 *out, const u64 *f, u64 *tmp) /* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */ " mov $38, %%rdx;" " mulxq 96(%1), %%r8, %%r13;" - " xor %%rcx, %%rcx;" + " xor %%ecx, %%ecx;" " adoxq 64(%1), %%r8;" " mulxq 104(%1), %%r9, %%rbx;" " adcx %%r13, %%r9;" diff --git a/arch/x86/crypto/nhpoly1305-avx2-glue.c b/arch/x86/crypto/nhpoly1305-avx2-glue.c index 80fcb85736e1..8ea5ab0f1ca7 100644 --- a/arch/x86/crypto/nhpoly1305-avx2-glue.c +++ b/arch/x86/crypto/nhpoly1305-avx2-glue.c @@ -10,6 +10,7 @@ #include <crypto/internal/simd.h> #include <crypto/nhpoly1305.h> #include <linux/module.h> +#include <linux/sizes.h> #include <asm/simd.h> asmlinkage void nh_avx2(const u32 *key, const u8 *message, size_t message_len, diff --git a/arch/x86/crypto/nhpoly1305-sse2-glue.c b/arch/x86/crypto/nhpoly1305-sse2-glue.c index cc6b7c1a2705..2b353d42ed13 100644 --- a/arch/x86/crypto/nhpoly1305-sse2-glue.c +++ b/arch/x86/crypto/nhpoly1305-sse2-glue.c @@ -10,6 +10,7 @@ #include <crypto/internal/simd.h> #include <crypto/nhpoly1305.h> #include <linux/module.h> +#include <linux/sizes.h> #include <asm/simd.h> asmlinkage void nh_sse2(const u32 *key, const u8 *message, size_t message_len, diff --git a/arch/x86/crypto/poly1305-x86_64-cryptogams.pl b/arch/x86/crypto/poly1305-x86_64-cryptogams.pl index 137edcf038cb..7d568012cc15 100644 --- a/arch/x86/crypto/poly1305-x86_64-cryptogams.pl +++ b/arch/x86/crypto/poly1305-x86_64-cryptogams.pl @@ -246,7 +246,7 @@ $code.=<<___ if (!$kernel); ___ &declare_function("poly1305_init_x86_64", 32, 3); $code.=<<___; - xor %rax,%rax + xor %eax,%eax mov %rax,0($ctx) # initialize hash value mov %rax,8($ctx) mov %rax,16($ctx) @@ -2853,7 +2853,7 @@ $code.=<<___; .type poly1305_init_base2_44,\@function,3 .align 32 poly1305_init_base2_44: - xor %rax,%rax + xor %eax,%eax mov %rax,0($ctx) # initialize hash value mov %rax,8($ctx) mov %rax,16($ctx) @@ -3947,7 +3947,7 @@ xor128_decrypt_n_pad: mov \$16,$len sub %r10,$len xor %eax,%eax - xor %r11,%r11 + xor %r11d,%r11d .Loop_dec_byte: mov ($inp,$otp),%r11b mov ($otp),%al @@ -4085,7 +4085,7 @@ avx_handler: .long 0xa548f3fc # cld; rep movsq mov $disp,%rsi - xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER + xor %ecx,%ecx # arg1, UNW_FLAG_NHANDLER mov 8(%rsi),%rdx # arg2, disp->ImageBase mov 0(%rsi),%r8 # arg3, disp->ControlPc mov 16(%rsi),%r9 # arg4, disp->FunctionEntry diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c index dfe921efa9b2..e508dbd91813 100644 --- a/arch/x86/crypto/poly1305_glue.c +++ b/arch/x86/crypto/poly1305_glue.c @@ -11,6 +11,7 @@ #include <linux/jump_label.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/sizes.h> #include <asm/intel-family.h> #include <asm/simd.h> @@ -157,9 +158,6 @@ static unsigned int crypto_poly1305_setdctxkey(struct poly1305_desc_ctx *dctx, dctx->s[1] = get_unaligned_le32(&inp[4]); dctx->s[2] = get_unaligned_le32(&inp[8]); dctx->s[3] = get_unaligned_le32(&inp[12]); - inp += POLY1305_BLOCK_SIZE; - len -= POLY1305_BLOCK_SIZE; - acc += POLY1305_BLOCK_SIZE; dctx->sset = true; } } diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index ae9b0d4615b3..07a9331d55e7 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -6,7 +6,6 @@ #include <asm/percpu.h> #include <asm/asm-offsets.h> #include <asm/processor-flags.h> -#include <asm/inst.h> /* diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index d977079a7d02..cad08703c4ad 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -46,13 +46,13 @@ .code64 .section .entry.text, "ax" -#ifdef CONFIG_PARAVIRT +#ifdef CONFIG_PARAVIRT_XXL SYM_CODE_START(native_usergs_sysret64) UNWIND_HINT_EMPTY swapgs sysretq SYM_CODE_END(native_usergs_sysret64) -#endif /* CONFIG_PARAVIRT */ +#endif /* CONFIG_PARAVIRT_XXL */ /* * 64-bit SYSCALL instruction entry. Up to 6 arguments in registers. @@ -101,6 +101,8 @@ SYM_CODE_START(entry_SYSCALL_64) SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp +SYM_INNER_LABEL(entry_SYSCALL_64_safe_stack, SYM_L_GLOBAL) + /* Construct struct pt_regs on stack */ pushq $__USER_DS /* pt_regs->ss */ pushq PER_CPU_VAR(cpu_tss_rw + TSS_sp2) /* pt_regs->sp */ @@ -446,6 +448,84 @@ _ASM_NOKPROBE(\asmsym) SYM_CODE_END(\asmsym) .endm +#ifdef CONFIG_AMD_MEM_ENCRYPT +/** + * idtentry_vc - Macro to generate entry stub for #VC + * @vector: Vector number + * @asmsym: ASM symbol for the entry point + * @cfunc: C function to be called + * + * The macro emits code to set up the kernel context for #VC. The #VC handler + * runs on an IST stack and needs to be able to cause nested #VC exceptions. + * + * To make this work the #VC entry code tries its best to pretend it doesn't use + * an IST stack by switching to the task stack if coming from user-space (which + * includes early SYSCALL entry path) or back to the stack in the IRET frame if + * entered from kernel-mode. + * + * If entered from kernel-mode the return stack is validated first, and if it is + * not safe to use (e.g. because it points to the entry stack) the #VC handler + * will switch to a fall-back stack (VC2) and call a special handler function. + * + * The macro is only used for one vector, but it is planned to be extended in + * the future for the #HV exception. + */ +.macro idtentry_vc vector asmsym cfunc +SYM_CODE_START(\asmsym) + UNWIND_HINT_IRET_REGS + ASM_CLAC + + /* + * If the entry is from userspace, switch stacks and treat it as + * a normal entry. + */ + testb $3, CS-ORIG_RAX(%rsp) + jnz .Lfrom_usermode_switch_stack_\@ + + /* + * paranoid_entry returns SWAPGS flag for paranoid_exit in EBX. + * EBX == 0 -> SWAPGS, EBX == 1 -> no SWAPGS + */ + call paranoid_entry + + UNWIND_HINT_REGS + + /* + * Switch off the IST stack to make it free for nested exceptions. The + * vc_switch_off_ist() function will switch back to the interrupted + * stack if it is safe to do so. If not it switches to the VC fall-back + * stack. + */ + movq %rsp, %rdi /* pt_regs pointer */ + call vc_switch_off_ist + movq %rax, %rsp /* Switch to new stack */ + + UNWIND_HINT_REGS + + /* Update pt_regs */ + movq ORIG_RAX(%rsp), %rsi /* get error code into 2nd argument*/ + movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */ + + movq %rsp, %rdi /* pt_regs pointer */ + + call \cfunc + + /* + * No need to switch back to the IST stack. The current stack is either + * identical to the stack in the IRET frame or the VC fall-back stack, + * so it is definitly mapped even with PTI enabled. + */ + jmp paranoid_exit + + /* Switch to the regular task stack */ +.Lfrom_usermode_switch_stack_\@: + idtentry_body safe_stack_\cfunc, has_error_code=1 + +_ASM_NOKPROBE(\asmsym) +SYM_CODE_END(\asmsym) +.endm +#endif + /* * Double fault entry. Straight paranoid. No checks from which context * this comes because for the espfix induced #DF this would do the wrong @@ -842,8 +922,9 @@ SYM_CODE_START_LOCAL(paranoid_entry) * retrieve and set the current CPUs kernel GSBASE. The stored value * has to be restored in paranoid_exit unconditionally. * - * The MSR write ensures that no subsequent load is based on a - * mispredicted GSBASE. No extra FENCE required. + * The unconditional write to GS base below ensures that no subsequent + * loads based on a mispredicted GS base can happen, therefore no LFENCE + * is needed here. */ SAVE_AND_SET_GSBASE scratch_reg=%rax save_reg=%rbx ret diff --git a/arch/x86/entry/syscall_x32.c b/arch/x86/entry/syscall_x32.c index 1583831f61a9..f2fe0a33bcfd 100644 --- a/arch/x86/entry/syscall_x32.c +++ b/arch/x86/entry/syscall_x32.c @@ -12,8 +12,13 @@ * Reuse the 64-bit entry points for the x32 versions that occupy different * slots in the syscall table. */ +#define __x32_sys_readv __x64_sys_readv +#define __x32_sys_writev __x64_sys_writev #define __x32_sys_getsockopt __x64_sys_getsockopt #define __x32_sys_setsockopt __x64_sys_setsockopt +#define __x32_sys_vmsplice __x64_sys_vmsplice +#define __x32_sys_process_vm_readv __x64_sys_process_vm_readv +#define __x32_sys_process_vm_writev __x64_sys_process_vm_writev #define __SYSCALL_64(nr, sym) diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl index 9d1102873666..0d0667a9fbd7 100644 --- a/arch/x86/entry/syscalls/syscall_32.tbl +++ b/arch/x86/entry/syscalls/syscall_32.tbl @@ -32,7 +32,7 @@ 18 i386 oldstat sys_stat 19 i386 lseek sys_lseek compat_sys_lseek 20 i386 getpid sys_getpid -21 i386 mount sys_mount compat_sys_mount +21 i386 mount sys_mount 22 i386 umount sys_oldumount 23 i386 setuid sys_setuid16 24 i386 getuid sys_getuid16 @@ -142,7 +142,7 @@ 128 i386 init_module sys_init_module 129 i386 delete_module sys_delete_module 130 i386 get_kernel_syms -131 i386 quotactl sys_quotactl compat_sys_quotactl32 +131 i386 quotactl sys_quotactl 132 i386 getpgid sys_getpgid 133 i386 fchdir sys_fchdir 134 i386 bdflush sys_bdflush @@ -156,8 +156,8 @@ 142 i386 _newselect sys_select compat_sys_select 143 i386 flock sys_flock 144 i386 msync sys_msync -145 i386 readv sys_readv compat_sys_readv -146 i386 writev sys_writev compat_sys_writev +145 i386 readv sys_readv +146 i386 writev sys_writev 147 i386 getsid sys_getsid 148 i386 fdatasync sys_fdatasync 149 i386 _sysctl sys_ni_syscall @@ -327,7 +327,7 @@ 313 i386 splice sys_splice 314 i386 sync_file_range sys_ia32_sync_file_range 315 i386 tee sys_tee -316 i386 vmsplice sys_vmsplice compat_sys_vmsplice +316 i386 vmsplice sys_vmsplice 317 i386 move_pages sys_move_pages compat_sys_move_pages 318 i386 getcpu sys_getcpu 319 i386 epoll_pwait sys_epoll_pwait @@ -358,8 +358,8 @@ 344 i386 syncfs sys_syncfs 345 i386 sendmmsg sys_sendmmsg compat_sys_sendmmsg 346 i386 setns sys_setns -347 i386 process_vm_readv sys_process_vm_readv compat_sys_process_vm_readv -348 i386 process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev +347 i386 process_vm_readv sys_process_vm_readv +348 i386 process_vm_writev sys_process_vm_writev 349 i386 kcmp sys_kcmp 350 i386 finit_module sys_finit_module 351 i386 sched_setattr sys_sched_setattr @@ -444,3 +444,4 @@ 437 i386 openat2 sys_openat2 438 i386 pidfd_getfd sys_pidfd_getfd 439 i386 faccessat2 sys_faccessat2 +440 i386 process_madvise sys_process_madvise diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl index f30d6ae9a688..1f47e24fb65c 100644 --- a/arch/x86/entry/syscalls/syscall_64.tbl +++ b/arch/x86/entry/syscalls/syscall_64.tbl @@ -361,6 +361,7 @@ 437 common openat2 sys_openat2 438 common pidfd_getfd sys_pidfd_getfd 439 common faccessat2 sys_faccessat2 +440 common process_madvise sys_process_madvise # # x32-specific system call numbers start at 512 to avoid cache impact @@ -371,8 +372,8 @@ 512 x32 rt_sigaction compat_sys_rt_sigaction 513 x32 rt_sigreturn compat_sys_x32_rt_sigreturn 514 x32 ioctl compat_sys_ioctl -515 x32 readv compat_sys_readv -516 x32 writev compat_sys_writev +515 x32 readv sys_readv +516 x32 writev sys_writev 517 x32 recvfrom compat_sys_recvfrom 518 x32 sendmsg compat_sys_sendmsg 519 x32 recvmsg compat_sys_recvmsg @@ -388,15 +389,15 @@ 529 x32 waitid compat_sys_waitid 530 x32 set_robust_list compat_sys_set_robust_list 531 x32 get_robust_list compat_sys_get_robust_list -532 x32 vmsplice compat_sys_vmsplice +532 x32 vmsplice sys_vmsplice 533 x32 move_pages compat_sys_move_pages 534 x32 preadv compat_sys_preadv64 535 x32 pwritev compat_sys_pwritev64 536 x32 rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo 537 x32 recvmmsg compat_sys_recvmmsg_time64 538 x32 sendmmsg compat_sys_sendmmsg -539 x32 process_vm_readv compat_sys_process_vm_readv -540 x32 process_vm_writev compat_sys_process_vm_writev +539 x32 process_vm_readv sys_process_vm_readv +540 x32 process_vm_writev sys_process_vm_writev 541 x32 setsockopt sys_setsockopt 542 x32 getsockopt sys_getsockopt 543 x32 io_setup compat_sys_io_setup diff --git a/arch/x86/entry/vdso/vdso32/vclock_gettime.c b/arch/x86/entry/vdso/vdso32/vclock_gettime.c index 84a4a73f77f7..283ed9d00426 100644 --- a/arch/x86/entry/vdso/vdso32/vclock_gettime.c +++ b/arch/x86/entry/vdso/vdso32/vclock_gettime.c @@ -14,6 +14,7 @@ #undef CONFIG_ILLEGAL_POINTER_VALUE #undef CONFIG_SPARSEMEM_VMEMMAP #undef CONFIG_NR_CPUS +#undef CONFIG_PARAVIRT_XXL #define CONFIG_X86_32 1 #define CONFIG_PGTABLE_LEVELS 2 diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c index 26c36357c4c9..40669eac9d6d 100644 --- a/arch/x86/events/amd/ibs.c +++ b/arch/x86/events/amd/ibs.c @@ -89,6 +89,7 @@ struct perf_ibs { u64 max_period; unsigned long offset_mask[1]; int offset_max; + unsigned int fetch_count_reset_broken : 1; struct cpu_perf_ibs __percpu *pcpu; struct attribute **format_attrs; @@ -334,11 +335,18 @@ static u64 get_ibs_op_count(u64 config) { u64 count = 0; - if (config & IBS_OP_VAL) - count += (config & IBS_OP_MAX_CNT) << 4; /* cnt rolled over */ - - if (ibs_caps & IBS_CAPS_RDWROPCNT) - count += (config & IBS_OP_CUR_CNT) >> 32; + /* + * If the internal 27-bit counter rolled over, the count is MaxCnt + * and the lower 7 bits of CurCnt are randomized. + * Otherwise CurCnt has the full 27-bit current counter value. + */ + if (config & IBS_OP_VAL) { + count = (config & IBS_OP_MAX_CNT) << 4; + if (ibs_caps & IBS_CAPS_OPCNTEXT) + count += config & IBS_OP_MAX_CNT_EXT_MASK; + } else if (ibs_caps & IBS_CAPS_RDWROPCNT) { + count = (config & IBS_OP_CUR_CNT) >> 32; + } return count; } @@ -363,7 +371,12 @@ perf_ibs_event_update(struct perf_ibs *perf_ibs, struct perf_event *event, static inline void perf_ibs_enable_event(struct perf_ibs *perf_ibs, struct hw_perf_event *hwc, u64 config) { - wrmsrl(hwc->config_base, hwc->config | config | perf_ibs->enable_mask); + u64 tmp = hwc->config | config; + + if (perf_ibs->fetch_count_reset_broken) + wrmsrl(hwc->config_base, tmp & ~perf_ibs->enable_mask); + + wrmsrl(hwc->config_base, tmp | perf_ibs->enable_mask); } /* @@ -394,7 +407,7 @@ static void perf_ibs_start(struct perf_event *event, int flags) struct hw_perf_event *hwc = &event->hw; struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu); struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu); - u64 period; + u64 period, config = 0; if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) return; @@ -403,13 +416,19 @@ static void perf_ibs_start(struct perf_event *event, int flags) hwc->state = 0; perf_ibs_set_period(perf_ibs, hwc, &period); + if (perf_ibs == &perf_ibs_op && (ibs_caps & IBS_CAPS_OPCNTEXT)) { + config |= period & IBS_OP_MAX_CNT_EXT_MASK; + period &= ~IBS_OP_MAX_CNT_EXT_MASK; + } + config |= period >> 4; + /* * Set STARTED before enabling the hardware, such that a subsequent NMI * must observe it. */ set_bit(IBS_STARTED, pcpu->state); clear_bit(IBS_STOPPING, pcpu->state); - perf_ibs_enable_event(perf_ibs, hwc, period >> 4); + perf_ibs_enable_event(perf_ibs, hwc, config); perf_event_update_userpage(event); } @@ -577,7 +596,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs) struct perf_ibs_data ibs_data; int offset, size, check_rip, offset_max, throttle = 0; unsigned int msr; - u64 *buf, *config, period; + u64 *buf, *config, period, new_config = 0; if (!test_bit(IBS_STARTED, pcpu->state)) { fail: @@ -626,18 +645,24 @@ fail: perf_ibs->offset_max, offset + 1); } while (offset < offset_max); + /* + * Read IbsBrTarget, IbsOpData4, and IbsExtdCtl separately + * depending on their availability. + * Can't add to offset_max as they are staggered + */ if (event->attr.sample_type & PERF_SAMPLE_RAW) { - /* - * Read IbsBrTarget and IbsOpData4 separately - * depending on their availability. - * Can't add to offset_max as they are staggered - */ - if (ibs_caps & IBS_CAPS_BRNTRGT) { - rdmsrl(MSR_AMD64_IBSBRTARGET, *buf++); - size++; + if (perf_ibs == &perf_ibs_op) { + if (ibs_caps & IBS_CAPS_BRNTRGT) { + rdmsrl(MSR_AMD64_IBSBRTARGET, *buf++); + size++; + } + if (ibs_caps & IBS_CAPS_OPDATA4) { + rdmsrl(MSR_AMD64_IBSOPDATA4, *buf++); + size++; + } } - if (ibs_caps & IBS_CAPS_OPDATA4) { - rdmsrl(MSR_AMD64_IBSOPDATA4, *buf++); + if (perf_ibs == &perf_ibs_fetch && (ibs_caps & IBS_CAPS_FETCHCTLEXTD)) { + rdmsrl(MSR_AMD64_ICIBSEXTDCTL, *buf++); size++; } } @@ -666,13 +691,17 @@ out: if (throttle) { perf_ibs_stop(event, 0); } else { - period >>= 4; - - if ((ibs_caps & IBS_CAPS_RDWROPCNT) && - (*config & IBS_OP_CNT_CTL)) - period |= *config & IBS_OP_CUR_CNT_RAND; + if (perf_ibs == &perf_ibs_op) { + if (ibs_caps & IBS_CAPS_OPCNTEXT) { + new_config = period & IBS_OP_MAX_CNT_EXT_MASK; + period &= ~IBS_OP_MAX_CNT_EXT_MASK; + } + if ((ibs_caps & IBS_CAPS_RDWROPCNT) && (*config & IBS_OP_CNT_CTL)) + new_config |= *config & IBS_OP_CUR_CNT_RAND; + } + new_config |= period >> 4; - perf_ibs_enable_event(perf_ibs, hwc, period); + perf_ibs_enable_event(perf_ibs, hwc, new_config); } perf_event_update_userpage(event); @@ -733,12 +762,26 @@ static __init void perf_event_ibs_init(void) { struct attribute **attr = ibs_op_format_attrs; + /* + * Some chips fail to reset the fetch count when it is written; instead + * they need a 0-1 transition of IbsFetchEn. + */ + if (boot_cpu_data.x86 >= 0x16 && boot_cpu_data.x86 <= 0x18) + perf_ibs_fetch.fetch_count_reset_broken = 1; + perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch"); if (ibs_caps & IBS_CAPS_OPCNT) { perf_ibs_op.config_mask |= IBS_OP_CNT_CTL; *attr++ = &format_attr_cnt_ctl.attr; } + + if (ibs_caps & IBS_CAPS_OPCNTEXT) { + perf_ibs_op.max_period |= IBS_OP_MAX_CNT_EXT_MASK; + perf_ibs_op.config_mask |= IBS_OP_MAX_CNT_EXT_MASK; + perf_ibs_op.cnt_mask |= IBS_OP_MAX_CNT_EXT_MASK; + } + perf_ibs_pmu_init(&perf_ibs_op, "ibs_op"); register_nmi_handler(NMI_LOCAL, perf_ibs_nmi_handler, 0, "perf_ibs"); diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c index fb616203ce42..be50ef8572cc 100644 --- a/arch/x86/events/amd/iommu.c +++ b/arch/x86/events/amd/iommu.c @@ -379,7 +379,7 @@ static __init int _init_events_attrs(void) while (amd_iommu_v2_event_descs[i].attr.attr.name) i++; - attrs = kcalloc(i + 1, sizeof(struct attribute **), GFP_KERNEL); + attrs = kcalloc(i + 1, sizeof(*attrs), GFP_KERNEL); if (!attrs) return -ENOMEM; diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c index 76400c052b0e..7f014d450bc2 100644 --- a/arch/x86/events/amd/uncore.c +++ b/arch/x86/events/amd/uncore.c @@ -181,28 +181,28 @@ static void amd_uncore_del(struct perf_event *event, int flags) } /* - * Convert logical CPU number to L3 PMC Config ThreadMask format + * Return a full thread and slice mask unless user + * has provided them */ -static u64 l3_thread_slice_mask(int cpu) +static u64 l3_thread_slice_mask(u64 config) { - u64 thread_mask, core = topology_core_id(cpu); - unsigned int shift, thread = 0; + if (boot_cpu_data.x86 <= 0x18) + return ((config & AMD64_L3_SLICE_MASK) ? : AMD64_L3_SLICE_MASK) | + ((config & AMD64_L3_THREAD_MASK) ? : AMD64_L3_THREAD_MASK); - if (topology_smt_supported() && !topology_is_primary_thread(cpu)) - thread = 1; - - if (boot_cpu_data.x86 <= 0x18) { - shift = AMD64_L3_THREAD_SHIFT + 2 * (core % 4) + thread; - thread_mask = BIT_ULL(shift); - - return AMD64_L3_SLICE_MASK | thread_mask; - } - - core = (core << AMD64_L3_COREID_SHIFT) & AMD64_L3_COREID_MASK; - shift = AMD64_L3_THREAD_SHIFT + thread; - thread_mask = BIT_ULL(shift); + /* + * If the user doesn't specify a threadmask, they're not trying to + * count core 0, so we enable all cores & threads. + * We'll also assume that they want to count slice 0 if they specify + * a threadmask and leave sliceid and enallslices unpopulated. + */ + if (!(config & AMD64_L3_F19H_THREAD_MASK)) + return AMD64_L3_F19H_THREAD_MASK | AMD64_L3_EN_ALL_SLICES | + AMD64_L3_EN_ALL_CORES; - return AMD64_L3_EN_ALL_SLICES | core | thread_mask; + return config & (AMD64_L3_F19H_THREAD_MASK | AMD64_L3_SLICEID_MASK | + AMD64_L3_EN_ALL_CORES | AMD64_L3_EN_ALL_SLICES | + AMD64_L3_COREID_MASK); } static int amd_uncore_event_init(struct perf_event *event) @@ -232,7 +232,7 @@ static int amd_uncore_event_init(struct perf_event *event) * For other events, the two fields do not affect the count. */ if (l3_mask && is_llc_event(event)) - hwc->config |= l3_thread_slice_mask(event->cpu); + hwc->config |= l3_thread_slice_mask(event->attr.config); uncore = event_to_amd_uncore(event); if (!uncore) @@ -274,47 +274,72 @@ static struct attribute_group amd_uncore_attr_group = { .attrs = amd_uncore_attrs, }; -/* - * Similar to PMU_FORMAT_ATTR but allowing for format_attr to be assigned based - * on family - */ -#define AMD_FORMAT_ATTR(_dev, _name, _format) \ -static ssize_t \ -_dev##_show##_name(struct device *dev, \ - struct device_attribute *attr, \ - char *page) \ -{ \ - BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ - return sprintf(page, _format "\n"); \ -} \ -static struct device_attribute format_attr_##_dev##_name = __ATTR_RO(_dev); - -/* Used for each uncore counter type */ -#define AMD_ATTRIBUTE(_name) \ -static struct attribute *amd_uncore_format_attr_##_name[] = { \ - &format_attr_event_##_name.attr, \ - &format_attr_umask.attr, \ - NULL, \ -}; \ -static struct attribute_group amd_uncore_format_group_##_name = { \ - .name = "format", \ - .attrs = amd_uncore_format_attr_##_name, \ -}; \ -static const struct attribute_group *amd_uncore_attr_groups_##_name[] = { \ - &amd_uncore_attr_group, \ - &amd_uncore_format_group_##_name, \ - NULL, \ +#define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \ +static ssize_t __uncore_##_var##_show(struct kobject *kobj, \ + struct kobj_attribute *attr, \ + char *page) \ +{ \ + BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ + return sprintf(page, _format "\n"); \ +} \ +static struct kobj_attribute format_attr_##_var = \ + __ATTR(_name, 0444, __uncore_##_var##_show, NULL) + +DEFINE_UNCORE_FORMAT_ATTR(event12, event, "config:0-7,32-35"); +DEFINE_UNCORE_FORMAT_ATTR(event14, event, "config:0-7,32-35,59-60"); /* F17h+ DF */ +DEFINE_UNCORE_FORMAT_ATTR(event8, event, "config:0-7"); /* F17h+ L3 */ +DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); +DEFINE_UNCORE_FORMAT_ATTR(coreid, coreid, "config:42-44"); /* F19h L3 */ +DEFINE_UNCORE_FORMAT_ATTR(slicemask, slicemask, "config:48-51"); /* F17h L3 */ +DEFINE_UNCORE_FORMAT_ATTR(threadmask8, threadmask, "config:56-63"); /* F17h L3 */ +DEFINE_UNCORE_FORMAT_ATTR(threadmask2, threadmask, "config:56-57"); /* F19h L3 */ +DEFINE_UNCORE_FORMAT_ATTR(enallslices, enallslices, "config:46"); /* F19h L3 */ +DEFINE_UNCORE_FORMAT_ATTR(enallcores, enallcores, "config:47"); /* F19h L3 */ +DEFINE_UNCORE_FORMAT_ATTR(sliceid, sliceid, "config:48-50"); /* F19h L3 */ + +static struct attribute *amd_uncore_df_format_attr[] = { + &format_attr_event12.attr, /* event14 if F17h+ */ + &format_attr_umask.attr, + NULL, +}; + +static struct attribute *amd_uncore_l3_format_attr[] = { + &format_attr_event12.attr, /* event8 if F17h+ */ + &format_attr_umask.attr, + NULL, /* slicemask if F17h, coreid if F19h */ + NULL, /* threadmask8 if F17h, enallslices if F19h */ + NULL, /* enallcores if F19h */ + NULL, /* sliceid if F19h */ + NULL, /* threadmask2 if F19h */ + NULL, +}; + +static struct attribute_group amd_uncore_df_format_group = { + .name = "format", + .attrs = amd_uncore_df_format_attr, }; -AMD_FORMAT_ATTR(event, , "config:0-7,32-35"); -AMD_FORMAT_ATTR(umask, , "config:8-15"); -AMD_FORMAT_ATTR(event, _df, "config:0-7,32-35,59-60"); -AMD_FORMAT_ATTR(event, _l3, "config:0-7"); -AMD_ATTRIBUTE(df); -AMD_ATTRIBUTE(l3); +static struct attribute_group amd_uncore_l3_format_group = { + .name = "format", + .attrs = amd_uncore_l3_format_attr, +}; + +static const struct attribute_group *amd_uncore_df_attr_groups[] = { + &amd_uncore_attr_group, + &amd_uncore_df_format_group, + NULL, +}; + +static const struct attribute_group *amd_uncore_l3_attr_groups[] = { + &amd_uncore_attr_group, + &amd_uncore_l3_format_group, + NULL, +}; static struct pmu amd_nb_pmu = { .task_ctx_nr = perf_invalid_context, + .attr_groups = amd_uncore_df_attr_groups, + .name = "amd_nb", .event_init = amd_uncore_event_init, .add = amd_uncore_add, .del = amd_uncore_del, @@ -326,6 +351,8 @@ static struct pmu amd_nb_pmu = { static struct pmu amd_llc_pmu = { .task_ctx_nr = perf_invalid_context, + .attr_groups = amd_uncore_l3_attr_groups, + .name = "amd_l2", .event_init = amd_uncore_event_init, .add = amd_uncore_add, .del = amd_uncore_del, @@ -529,6 +556,8 @@ static int amd_uncore_cpu_dead(unsigned int cpu) static int __init amd_uncore_init(void) { + struct attribute **df_attr = amd_uncore_df_format_attr; + struct attribute **l3_attr = amd_uncore_l3_format_attr; int ret = -ENODEV; if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && @@ -538,6 +567,8 @@ static int __init amd_uncore_init(void) if (!boot_cpu_has(X86_FEATURE_TOPOEXT)) return -ENODEV; + num_counters_nb = NUM_COUNTERS_NB; + num_counters_llc = NUM_COUNTERS_L2; if (boot_cpu_data.x86 >= 0x17) { /* * For F17h and above, the Northbridge counters are @@ -545,27 +576,16 @@ static int __init amd_uncore_init(void) * counters are supported too. The PMUs are exported * based on family as either L2 or L3 and NB or DF. */ - num_counters_nb = NUM_COUNTERS_NB; num_counters_llc = NUM_COUNTERS_L3; amd_nb_pmu.name = "amd_df"; amd_llc_pmu.name = "amd_l3"; - format_attr_event_df.show = &event_show_df; - format_attr_event_l3.show = &event_show_l3; l3_mask = true; - } else { - num_counters_nb = NUM_COUNTERS_NB; - num_counters_llc = NUM_COUNTERS_L2; - amd_nb_pmu.name = "amd_nb"; - amd_llc_pmu.name = "amd_l2"; - format_attr_event_df = format_attr_event; - format_attr_event_l3 = format_attr_event; - l3_mask = false; } - amd_nb_pmu.attr_groups = amd_uncore_attr_groups_df; - amd_llc_pmu.attr_groups = amd_uncore_attr_groups_l3; - if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) { + if (boot_cpu_data.x86 >= 0x17) + *df_attr = &format_attr_event14.attr; + amd_uncore_nb = alloc_percpu(struct amd_uncore *); if (!amd_uncore_nb) { ret = -ENOMEM; @@ -575,13 +595,29 @@ static int __init amd_uncore_init(void) if (ret) goto fail_nb; - pr_info("%s NB counters detected\n", - boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ? - "HYGON" : "AMD"); + pr_info("%d %s %s counters detected\n", num_counters_nb, + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ? "HYGON" : "", + amd_nb_pmu.name); + ret = 0; } if (boot_cpu_has(X86_FEATURE_PERFCTR_LLC)) { + if (boot_cpu_data.x86 >= 0x19) { + *l3_attr++ = &format_attr_event8.attr; + *l3_attr++ = &format_attr_umask.attr; + *l3_attr++ = &format_attr_coreid.attr; + *l3_attr++ = &format_attr_enallslices.attr; + *l3_attr++ = &format_attr_enallcores.attr; + *l3_attr++ = &format_attr_sliceid.attr; + *l3_attr++ = &format_attr_threadmask2.attr; + } else if (boot_cpu_data.x86 >= 0x17) { + *l3_attr++ = &format_attr_event8.attr; + *l3_attr++ = &format_attr_umask.attr; + *l3_attr++ = &format_attr_slicemask.attr; + *l3_attr++ = &format_attr_threadmask8.attr; + } + amd_uncore_llc = alloc_percpu(struct amd_uncore *); if (!amd_uncore_llc) { ret = -ENOMEM; @@ -591,9 +627,9 @@ static int __init amd_uncore_init(void) if (ret) goto fail_llc; - pr_info("%s LLC counters detected\n", - boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ? - "HYGON" : "AMD"); + pr_info("%d %s %s counters detected\n", num_counters_llc, + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ? "HYGON" : "", + amd_llc_pmu.name); ret = 0; } diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 1cbf57dc2ac8..a88c94d65693 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -28,6 +28,7 @@ #include <linux/bitops.h> #include <linux/device.h> #include <linux/nospec.h> +#include <linux/static_call.h> #include <asm/apic.h> #include <asm/stacktrace.h> @@ -52,6 +53,34 @@ DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { DEFINE_STATIC_KEY_FALSE(rdpmc_never_available_key); DEFINE_STATIC_KEY_FALSE(rdpmc_always_available_key); +/* + * This here uses DEFINE_STATIC_CALL_NULL() to get a static_call defined + * from just a typename, as opposed to an actual function. + */ +DEFINE_STATIC_CALL_NULL(x86_pmu_handle_irq, *x86_pmu.handle_irq); +DEFINE_STATIC_CALL_NULL(x86_pmu_disable_all, *x86_pmu.disable_all); +DEFINE_STATIC_CALL_NULL(x86_pmu_enable_all, *x86_pmu.enable_all); +DEFINE_STATIC_CALL_NULL(x86_pmu_enable, *x86_pmu.enable); +DEFINE_STATIC_CALL_NULL(x86_pmu_disable, *x86_pmu.disable); + +DEFINE_STATIC_CALL_NULL(x86_pmu_add, *x86_pmu.add); +DEFINE_STATIC_CALL_NULL(x86_pmu_del, *x86_pmu.del); +DEFINE_STATIC_CALL_NULL(x86_pmu_read, *x86_pmu.read); + +DEFINE_STATIC_CALL_NULL(x86_pmu_schedule_events, *x86_pmu.schedule_events); +DEFINE_STATIC_CALL_NULL(x86_pmu_get_event_constraints, *x86_pmu.get_event_constraints); +DEFINE_STATIC_CALL_NULL(x86_pmu_put_event_constraints, *x86_pmu.put_event_constraints); + +DEFINE_STATIC_CALL_NULL(x86_pmu_start_scheduling, *x86_pmu.start_scheduling); +DEFINE_STATIC_CALL_NULL(x86_pmu_commit_scheduling, *x86_pmu.commit_scheduling); +DEFINE_STATIC_CALL_NULL(x86_pmu_stop_scheduling, *x86_pmu.stop_scheduling); + +DEFINE_STATIC_CALL_NULL(x86_pmu_sched_task, *x86_pmu.sched_task); +DEFINE_STATIC_CALL_NULL(x86_pmu_swap_task_ctx, *x86_pmu.swap_task_ctx); + +DEFINE_STATIC_CALL_NULL(x86_pmu_drain_pebs, *x86_pmu.drain_pebs); +DEFINE_STATIC_CALL_NULL(x86_pmu_pebs_aliases, *x86_pmu.pebs_aliases); + u64 __read_mostly hw_cache_event_ids [PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] @@ -76,6 +105,9 @@ u64 x86_perf_event_update(struct perf_event *event) if (unlikely(!hwc->event_base)) return 0; + if (unlikely(is_topdown_count(event)) && x86_pmu.update_topdown_event) + return x86_pmu.update_topdown_event(event); + /* * Careful: an NMI might modify the previous event value. * @@ -660,7 +692,7 @@ static void x86_pmu_disable(struct pmu *pmu) cpuc->enabled = 0; barrier(); - x86_pmu.disable_all(); + static_call(x86_pmu_disable_all)(); } void x86_pmu_enable_all(int added) @@ -907,8 +939,7 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) if (cpuc->txn_flags & PERF_PMU_TXN_ADD) n0 -= cpuc->n_txn; - if (x86_pmu.start_scheduling) - x86_pmu.start_scheduling(cpuc); + static_call_cond(x86_pmu_start_scheduling)(cpuc); for (i = 0, wmin = X86_PMC_IDX_MAX, wmax = 0; i < n; i++) { c = cpuc->event_constraint[i]; @@ -925,7 +956,7 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) * change due to external factors (sibling state, allow_tfa). */ if (!c || (c->flags & PERF_X86_EVENT_DYNAMIC)) { - c = x86_pmu.get_event_constraints(cpuc, i, cpuc->event_list[i]); + c = static_call(x86_pmu_get_event_constraints)(cpuc, i, cpuc->event_list[i]); cpuc->event_constraint[i] = c; } @@ -1008,8 +1039,7 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) if (!unsched && assign) { for (i = 0; i < n; i++) { e = cpuc->event_list[i]; - if (x86_pmu.commit_scheduling) - x86_pmu.commit_scheduling(cpuc, i, assign[i]); + static_call_cond(x86_pmu_commit_scheduling)(cpuc, i, assign[i]); } } else { for (i = n0; i < n; i++) { @@ -1018,19 +1048,56 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) /* * release events that failed scheduling */ - if (x86_pmu.put_event_constraints) - x86_pmu.put_event_constraints(cpuc, e); + static_call_cond(x86_pmu_put_event_constraints)(cpuc, e); cpuc->event_constraint[i] = NULL; } } - if (x86_pmu.stop_scheduling) - x86_pmu.stop_scheduling(cpuc); + static_call_cond(x86_pmu_stop_scheduling)(cpuc); return unsched ? -EINVAL : 0; } +static int add_nr_metric_event(struct cpu_hw_events *cpuc, + struct perf_event *event) +{ + if (is_metric_event(event)) { + if (cpuc->n_metric == INTEL_TD_METRIC_NUM) + return -EINVAL; + cpuc->n_metric++; + cpuc->n_txn_metric++; + } + + return 0; +} + +static void del_nr_metric_event(struct cpu_hw_events *cpuc, + struct perf_event *event) +{ + if (is_metric_event(event)) + cpuc->n_metric--; +} + +static int collect_event(struct cpu_hw_events *cpuc, struct perf_event *event, + int max_count, int n) +{ + + if (x86_pmu.intel_cap.perf_metrics && add_nr_metric_event(cpuc, event)) + return -EINVAL; + + if (n >= max_count + cpuc->n_metric) + return -EINVAL; + + cpuc->event_list[n] = event; + if (is_counter_pair(&event->hw)) { + cpuc->n_pair++; + cpuc->n_txn_pair++; + } + + return 0; +} + /* * dogrp: true if must collect siblings events (group) * returns total number of events and error code @@ -1067,28 +1134,22 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, } if (is_x86_event(leader)) { - if (n >= max_count) + if (collect_event(cpuc, leader, max_count, n)) return -EINVAL; - cpuc->event_list[n] = leader; n++; - if (is_counter_pair(&leader->hw)) - cpuc->n_pair++; } + if (!dogrp) return n; for_each_sibling_event(event, leader) { - if (!is_x86_event(event) || - event->state <= PERF_EVENT_STATE_OFF) + if (!is_x86_event(event) || event->state <= PERF_EVENT_STATE_OFF) continue; - if (n >= max_count) + if (collect_event(cpuc, event, max_count, n)) return -EINVAL; - cpuc->event_list[n] = event; n++; - if (is_counter_pair(&event->hw)) - cpuc->n_pair++; } return n; } @@ -1110,11 +1171,16 @@ static inline void x86_assign_hw_event(struct perf_event *event, hwc->event_base = 0; break; + case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END: + /* All the metric events are mapped onto the fixed counter 3. */ + idx = INTEL_PMC_IDX_FIXED_SLOTS; + /* fall through */ case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS-1: hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (idx - INTEL_PMC_IDX_FIXED); - hwc->event_base_rdpmc = (idx - INTEL_PMC_IDX_FIXED) | 1<<30; + hwc->event_base_rdpmc = (idx - INTEL_PMC_IDX_FIXED) | + INTEL_PMC_FIXED_RDPMC_BASE; break; default: @@ -1226,7 +1292,7 @@ static void x86_pmu_enable(struct pmu *pmu) cpuc->enabled = 1; barrier(); - x86_pmu.enable_all(added); + static_call(x86_pmu_enable_all)(added); } static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); @@ -1245,6 +1311,10 @@ int x86_perf_event_set_period(struct perf_event *event) if (unlikely(!hwc->event_base)) return 0; + if (unlikely(is_topdown_count(event)) && + x86_pmu.set_topdown_event_period) + return x86_pmu.set_topdown_event_period(event); + /* * If we are way outside a reasonable range then just skip forward: */ @@ -1284,11 +1354,11 @@ int x86_perf_event_set_period(struct perf_event *event) wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask); /* - * Clear the Merge event counter's upper 16 bits since + * Sign extend the Merge event counter's upper 16 bits since * we currently declare a 48-bit counter width */ if (is_counter_pair(hwc)) - wrmsrl(x86_pmu_event_addr(idx + 1), 0); + wrmsrl(x86_pmu_event_addr(idx + 1), 0xffff); /* * Due to erratum on certan cpu we need @@ -1347,7 +1417,7 @@ static int x86_pmu_add(struct perf_event *event, int flags) if (cpuc->txn_flags & PERF_PMU_TXN_ADD) goto done_collect; - ret = x86_pmu.schedule_events(cpuc, n, assign); + ret = static_call(x86_pmu_schedule_events)(cpuc, n, assign); if (ret) goto out; /* @@ -1365,13 +1435,11 @@ done_collect: cpuc->n_added += n - n0; cpuc->n_txn += n - n0; - if (x86_pmu.add) { - /* - * This is before x86_pmu_enable() will call x86_pmu_start(), - * so we enable LBRs before an event needs them etc.. - */ - x86_pmu.add(event); - } + /* + * This is before x86_pmu_enable() will call x86_pmu_start(), + * so we enable LBRs before an event needs them etc.. + */ + static_call_cond(x86_pmu_add)(event); ret = 0; out: @@ -1399,7 +1467,7 @@ static void x86_pmu_start(struct perf_event *event, int flags) cpuc->events[idx] = event; __set_bit(idx, cpuc->active_mask); __set_bit(idx, cpuc->running); - x86_pmu.enable(event); + static_call(x86_pmu_enable)(event); perf_event_update_userpage(event); } @@ -1469,7 +1537,7 @@ void x86_pmu_stop(struct perf_event *event, int flags) struct hw_perf_event *hwc = &event->hw; if (test_bit(hwc->idx, cpuc->active_mask)) { - x86_pmu.disable(event); + static_call(x86_pmu_disable)(event); __clear_bit(hwc->idx, cpuc->active_mask); cpuc->events[hwc->idx] = NULL; WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); @@ -1519,8 +1587,7 @@ static void x86_pmu_del(struct perf_event *event, int flags) if (i >= cpuc->n_events - cpuc->n_added) --cpuc->n_added; - if (x86_pmu.put_event_constraints) - x86_pmu.put_event_constraints(cpuc, event); + static_call_cond(x86_pmu_put_event_constraints)(cpuc, event); /* Delete the array entry. */ while (++i < cpuc->n_events) { @@ -1529,17 +1596,18 @@ static void x86_pmu_del(struct perf_event *event, int flags) } cpuc->event_constraint[i-1] = NULL; --cpuc->n_events; + if (x86_pmu.intel_cap.perf_metrics) + del_nr_metric_event(cpuc, event); perf_event_update_userpage(event); do_del: - if (x86_pmu.del) { - /* - * This is after x86_pmu_stop(); so we disable LBRs after any - * event can need them etc.. - */ - x86_pmu.del(event); - } + + /* + * This is after x86_pmu_stop(); so we disable LBRs after any + * event can need them etc.. + */ + static_call_cond(x86_pmu_del)(event); } int x86_pmu_handle_irq(struct pt_regs *regs) @@ -1617,7 +1685,7 @@ perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs) return NMI_DONE; start_clock = sched_clock(); - ret = x86_pmu.handle_irq(regs); + ret = static_call(x86_pmu_handle_irq)(regs); finish_clock = sched_clock(); perf_sample_event_took(finish_clock - start_clock); @@ -1830,6 +1898,38 @@ ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event) static struct attribute_group x86_pmu_attr_group; static struct attribute_group x86_pmu_caps_group; +static void x86_pmu_static_call_update(void) +{ + static_call_update(x86_pmu_handle_irq, x86_pmu.handle_irq); + static_call_update(x86_pmu_disable_all, x86_pmu.disable_all); + static_call_update(x86_pmu_enable_all, x86_pmu.enable_all); + static_call_update(x86_pmu_enable, x86_pmu.enable); + static_call_update(x86_pmu_disable, x86_pmu.disable); + + static_call_update(x86_pmu_add, x86_pmu.add); + static_call_update(x86_pmu_del, x86_pmu.del); + static_call_update(x86_pmu_read, x86_pmu.read); + + static_call_update(x86_pmu_schedule_events, x86_pmu.schedule_events); + static_call_update(x86_pmu_get_event_constraints, x86_pmu.get_event_constraints); + static_call_update(x86_pmu_put_event_constraints, x86_pmu.put_event_constraints); + + static_call_update(x86_pmu_start_scheduling, x86_pmu.start_scheduling); + static_call_update(x86_pmu_commit_scheduling, x86_pmu.commit_scheduling); + static_call_update(x86_pmu_stop_scheduling, x86_pmu.stop_scheduling); + + static_call_update(x86_pmu_sched_task, x86_pmu.sched_task); + static_call_update(x86_pmu_swap_task_ctx, x86_pmu.swap_task_ctx); + + static_call_update(x86_pmu_drain_pebs, x86_pmu.drain_pebs); + static_call_update(x86_pmu_pebs_aliases, x86_pmu.pebs_aliases); +} + +static void _x86_pmu_read(struct perf_event *event) +{ + x86_perf_event_update(event); +} + static int __init init_hw_perf_events(void) { struct x86_pmu_quirk *quirk; @@ -1898,6 +1998,11 @@ static int __init init_hw_perf_events(void) pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed); pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl); + if (!x86_pmu.read) + x86_pmu.read = _x86_pmu_read; + + x86_pmu_static_call_update(); + /* * Install callbacks. Core will call them for each online * cpu. @@ -1934,11 +2039,9 @@ out: } early_initcall(init_hw_perf_events); -static inline void x86_pmu_read(struct perf_event *event) +static void x86_pmu_read(struct perf_event *event) { - if (x86_pmu.read) - return x86_pmu.read(event); - x86_perf_event_update(event); + static_call(x86_pmu_read)(event); } /* @@ -1962,6 +2065,8 @@ static void x86_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags) perf_pmu_disable(pmu); __this_cpu_write(cpu_hw_events.n_txn, 0); + __this_cpu_write(cpu_hw_events.n_txn_pair, 0); + __this_cpu_write(cpu_hw_events.n_txn_metric, 0); } /* @@ -1987,6 +2092,8 @@ static void x86_pmu_cancel_txn(struct pmu *pmu) */ __this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn)); __this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn)); + __this_cpu_sub(cpu_hw_events.n_pair, __this_cpu_read(cpu_hw_events.n_txn_pair)); + __this_cpu_sub(cpu_hw_events.n_metric, __this_cpu_read(cpu_hw_events.n_txn_metric)); perf_pmu_enable(pmu); } @@ -2015,7 +2122,7 @@ static int x86_pmu_commit_txn(struct pmu *pmu) if (!x86_pmu_initialized()) return -EAGAIN; - ret = x86_pmu.schedule_events(cpuc, n, assign); + ret = static_call(x86_pmu_schedule_events)(cpuc, n, assign); if (ret) return ret; @@ -2208,17 +2315,15 @@ static void x86_pmu_event_unmapped(struct perf_event *event, struct mm_struct *m static int x86_pmu_event_idx(struct perf_event *event) { - int idx = event->hw.idx; + struct hw_perf_event *hwc = &event->hw; - if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED)) + if (!(hwc->flags & PERF_X86_EVENT_RDPMC_ALLOWED)) return 0; - if (x86_pmu.num_counters_fixed && idx >= INTEL_PMC_IDX_FIXED) { - idx -= INTEL_PMC_IDX_FIXED; - idx |= 1 << 30; - } - - return idx + 1; + if (is_metric_idx(hwc->idx)) + return INTEL_PMC_FIXED_RDPMC_METRICS + 1; + else + return hwc->event_base_rdpmc + 1; } static ssize_t get_attr_rdpmc(struct device *cdev, @@ -2308,15 +2413,13 @@ static const struct attribute_group *x86_pmu_attr_groups[] = { static void x86_pmu_sched_task(struct perf_event_context *ctx, bool sched_in) { - if (x86_pmu.sched_task) - x86_pmu.sched_task(ctx, sched_in); + static_call_cond(x86_pmu_sched_task)(ctx, sched_in); } static void x86_pmu_swap_task_ctx(struct perf_event_context *prev, struct perf_event_context *next) { - if (x86_pmu.swap_task_ctx) - x86_pmu.swap_task_ctx(prev, next); + static_call_cond(x86_pmu_swap_task_ctx)(prev, next); } void perf_check_microcode(void) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 31e6887d24f1..f1926e9f2143 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -243,10 +243,14 @@ static struct extra_reg intel_skl_extra_regs[] __read_mostly = { static struct event_constraint intel_icl_event_constraints[] = { FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ - INTEL_UEVENT_CONSTRAINT(0x1c0, 0), /* INST_RETIRED.PREC_DIST */ + FIXED_EVENT_CONSTRAINT(0x01c0, 0), /* INST_RETIRED.PREC_DIST */ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */ + METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0), + METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1), + METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2), + METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3), INTEL_EVENT_CONSTRAINT_RANGE(0x03, 0x0a, 0xf), INTEL_EVENT_CONSTRAINT_RANGE(0x1f, 0x28, 0xf), INTEL_EVENT_CONSTRAINT(0x32, 0xf), /* SW_PREFETCH_ACCESS.* */ @@ -309,6 +313,12 @@ EVENT_ATTR_STR_HT(topdown-recovery-bubbles, td_recovery_bubbles, EVENT_ATTR_STR_HT(topdown-recovery-bubbles.scale, td_recovery_bubbles_scale, "4", "2"); +EVENT_ATTR_STR(slots, slots, "event=0x00,umask=0x4"); +EVENT_ATTR_STR(topdown-retiring, td_retiring, "event=0x00,umask=0x80"); +EVENT_ATTR_STR(topdown-bad-spec, td_bad_spec, "event=0x00,umask=0x81"); +EVENT_ATTR_STR(topdown-fe-bound, td_fe_bound, "event=0x00,umask=0x82"); +EVENT_ATTR_STR(topdown-be-bound, td_be_bound, "event=0x00,umask=0x83"); + static struct attribute *snb_events_attrs[] = { EVENT_PTR(td_slots_issued), EVENT_PTR(td_slots_retired), @@ -2165,11 +2175,24 @@ static inline void intel_clear_masks(struct perf_event *event, int idx) static void intel_pmu_disable_fixed(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; - int idx = hwc->idx - INTEL_PMC_IDX_FIXED; u64 ctrl_val, mask; + int idx = hwc->idx; - mask = 0xfULL << (idx * 4); + if (is_topdown_idx(idx)) { + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + + /* + * When there are other active TopDown events, + * don't disable the fixed counter 3. + */ + if (*(u64 *)cpuc->active_mask & INTEL_PMC_OTHER_TOPDOWN_BITS(idx)) + return; + idx = INTEL_PMC_IDX_FIXED_SLOTS; + } + intel_clear_masks(event, idx); + + mask = 0xfULL << ((idx - INTEL_PMC_IDX_FIXED) * 4); rdmsrl(hwc->config_base, ctrl_val); ctrl_val &= ~mask; wrmsrl(hwc->config_base, ctrl_val); @@ -2180,17 +2203,28 @@ static void intel_pmu_disable_event(struct perf_event *event) struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; - if (idx < INTEL_PMC_IDX_FIXED) { + switch (idx) { + case 0 ... INTEL_PMC_IDX_FIXED - 1: intel_clear_masks(event, idx); x86_pmu_disable_event(event); - } else if (idx < INTEL_PMC_IDX_FIXED_BTS) { - intel_clear_masks(event, idx); + break; + case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1: + case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END: intel_pmu_disable_fixed(event); - } else if (idx == INTEL_PMC_IDX_FIXED_BTS) { + break; + case INTEL_PMC_IDX_FIXED_BTS: intel_pmu_disable_bts(); intel_pmu_drain_bts_buffer(); - } else if (idx == INTEL_PMC_IDX_FIXED_VLBR) + return; + case INTEL_PMC_IDX_FIXED_VLBR: intel_clear_masks(event, idx); + break; + default: + intel_clear_masks(event, idx); + pr_warn("Failed to disable the event with invalid index %d\n", + idx); + return; + } /* * Needs to be called after x86_pmu_disable_event, @@ -2208,10 +2242,189 @@ static void intel_pmu_del_event(struct perf_event *event) intel_pmu_pebs_del(event); } +static int icl_set_topdown_event_period(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + s64 left = local64_read(&hwc->period_left); + + /* + * The values in PERF_METRICS MSR are derived from fixed counter 3. + * Software should start both registers, PERF_METRICS and fixed + * counter 3, from zero. + * Clear PERF_METRICS and Fixed counter 3 in initialization. + * After that, both MSRs will be cleared for each read. + * Don't need to clear them again. + */ + if (left == x86_pmu.max_period) { + wrmsrl(MSR_CORE_PERF_FIXED_CTR3, 0); + wrmsrl(MSR_PERF_METRICS, 0); + hwc->saved_slots = 0; + hwc->saved_metric = 0; + } + + if ((hwc->saved_slots) && is_slots_event(event)) { + wrmsrl(MSR_CORE_PERF_FIXED_CTR3, hwc->saved_slots); + wrmsrl(MSR_PERF_METRICS, hwc->saved_metric); + } + + perf_event_update_userpage(event); + + return 0; +} + +static inline u64 icl_get_metrics_event_value(u64 metric, u64 slots, int idx) +{ + u32 val; + + /* + * The metric is reported as an 8bit integer fraction + * suming up to 0xff. + * slots-in-metric = (Metric / 0xff) * slots + */ + val = (metric >> ((idx - INTEL_PMC_IDX_METRIC_BASE) * 8)) & 0xff; + return mul_u64_u32_div(slots, val, 0xff); +} + +static u64 icl_get_topdown_value(struct perf_event *event, + u64 slots, u64 metrics) +{ + int idx = event->hw.idx; + u64 delta; + + if (is_metric_idx(idx)) + delta = icl_get_metrics_event_value(metrics, slots, idx); + else + delta = slots; + + return delta; +} + +static void __icl_update_topdown_event(struct perf_event *event, + u64 slots, u64 metrics, + u64 last_slots, u64 last_metrics) +{ + u64 delta, last = 0; + + delta = icl_get_topdown_value(event, slots, metrics); + if (last_slots) + last = icl_get_topdown_value(event, last_slots, last_metrics); + + /* + * The 8bit integer fraction of metric may be not accurate, + * especially when the changes is very small. + * For example, if only a few bad_spec happens, the fraction + * may be reduced from 1 to 0. If so, the bad_spec event value + * will be 0 which is definitely less than the last value. + * Avoid update event->count for this case. + */ + if (delta > last) { + delta -= last; + local64_add(delta, &event->count); + } +} + +static void update_saved_topdown_regs(struct perf_event *event, + u64 slots, u64 metrics) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + struct perf_event *other; + int idx; + + event->hw.saved_slots = slots; + event->hw.saved_metric = metrics; + + for_each_set_bit(idx, cpuc->active_mask, INTEL_PMC_IDX_TD_BE_BOUND + 1) { + if (!is_topdown_idx(idx)) + continue; + other = cpuc->events[idx]; + other->hw.saved_slots = slots; + other->hw.saved_metric = metrics; + } +} + +/* + * Update all active Topdown events. + * + * The PERF_METRICS and Fixed counter 3 are read separately. The values may be + * modify by a NMI. PMU has to be disabled before calling this function. + */ +static u64 icl_update_topdown_event(struct perf_event *event) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + struct perf_event *other; + u64 slots, metrics; + bool reset = true; + int idx; + + /* read Fixed counter 3 */ + rdpmcl((3 | INTEL_PMC_FIXED_RDPMC_BASE), slots); + if (!slots) + return 0; + + /* read PERF_METRICS */ + rdpmcl(INTEL_PMC_FIXED_RDPMC_METRICS, metrics); + + for_each_set_bit(idx, cpuc->active_mask, INTEL_PMC_IDX_TD_BE_BOUND + 1) { + if (!is_topdown_idx(idx)) + continue; + other = cpuc->events[idx]; + __icl_update_topdown_event(other, slots, metrics, + event ? event->hw.saved_slots : 0, + event ? event->hw.saved_metric : 0); + } + + /* + * Check and update this event, which may have been cleared + * in active_mask e.g. x86_pmu_stop() + */ + if (event && !test_bit(event->hw.idx, cpuc->active_mask)) { + __icl_update_topdown_event(event, slots, metrics, + event->hw.saved_slots, + event->hw.saved_metric); + + /* + * In x86_pmu_stop(), the event is cleared in active_mask first, + * then drain the delta, which indicates context switch for + * counting. + * Save metric and slots for context switch. + * Don't need to reset the PERF_METRICS and Fixed counter 3. + * Because the values will be restored in next schedule in. + */ + update_saved_topdown_regs(event, slots, metrics); + reset = false; + } + + if (reset) { + /* The fixed counter 3 has to be written before the PERF_METRICS. */ + wrmsrl(MSR_CORE_PERF_FIXED_CTR3, 0); + wrmsrl(MSR_PERF_METRICS, 0); + if (event) + update_saved_topdown_regs(event, 0, 0); + } + + return slots; +} + +static void intel_pmu_read_topdown_event(struct perf_event *event) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + + /* Only need to call update_topdown_event() once for group read. */ + if ((cpuc->txn_flags & PERF_PMU_TXN_READ) && + !is_slots_event(event)) + return; + + perf_pmu_disable(event->pmu); + x86_pmu.update_topdown_event(event); + perf_pmu_enable(event->pmu); +} + static void intel_pmu_read_event(struct perf_event *event) { if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD) intel_pmu_auto_reload_read(event); + else if (is_topdown_count(event) && x86_pmu.update_topdown_event) + intel_pmu_read_topdown_event(event); else x86_perf_event_update(event); } @@ -2219,8 +2432,22 @@ static void intel_pmu_read_event(struct perf_event *event) static void intel_pmu_enable_fixed(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; - int idx = hwc->idx - INTEL_PMC_IDX_FIXED; u64 ctrl_val, mask, bits = 0; + int idx = hwc->idx; + + if (is_topdown_idx(idx)) { + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + /* + * When there are other active TopDown events, + * don't enable the fixed counter 3 again. + */ + if (*(u64 *)cpuc->active_mask & INTEL_PMC_OTHER_TOPDOWN_BITS(idx)) + return; + + idx = INTEL_PMC_IDX_FIXED_SLOTS; + } + + intel_set_masks(event, idx); /* * Enable IRQ generation (0x8), if not PEBS, @@ -2240,6 +2467,7 @@ static void intel_pmu_enable_fixed(struct perf_event *event) if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY) bits |= 0x4; + idx -= INTEL_PMC_IDX_FIXED; bits <<= (idx * 4); mask = 0xfULL << (idx * 4); @@ -2262,18 +2490,27 @@ static void intel_pmu_enable_event(struct perf_event *event) if (unlikely(event->attr.precise_ip)) intel_pmu_pebs_enable(event); - if (idx < INTEL_PMC_IDX_FIXED) { + switch (idx) { + case 0 ... INTEL_PMC_IDX_FIXED - 1: intel_set_masks(event, idx); __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE); - } else if (idx < INTEL_PMC_IDX_FIXED_BTS) { - intel_set_masks(event, idx); + break; + case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1: + case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END: intel_pmu_enable_fixed(event); - } else if (idx == INTEL_PMC_IDX_FIXED_BTS) { + break; + case INTEL_PMC_IDX_FIXED_BTS: if (!__this_cpu_read(cpu_hw_events.enabled)) return; intel_pmu_enable_bts(hwc->config); - } else if (idx == INTEL_PMC_IDX_FIXED_VLBR) + break; + case INTEL_PMC_IDX_FIXED_VLBR: intel_set_masks(event, idx); + break; + default: + pr_warn("Failed to enable the event with invalid index %d\n", + idx); + } } static void intel_pmu_add_event(struct perf_event *event) @@ -2389,7 +2626,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status) /* * PEBS overflow sets bit 62 in the global status register */ - if (__test_and_clear_bit(62, (unsigned long *)&status)) { + if (__test_and_clear_bit(GLOBAL_STATUS_BUFFER_OVF_BIT, (unsigned long *)&status)) { u64 pebs_enabled = cpuc->pebs_enabled; handled++; @@ -2410,7 +2647,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status) /* * Intel PT */ - if (__test_and_clear_bit(55, (unsigned long *)&status)) { + if (__test_and_clear_bit(GLOBAL_STATUS_TRACE_TOPAPMI_BIT, (unsigned long *)&status)) { handled++; if (unlikely(perf_guest_cbs && perf_guest_cbs->is_in_guest() && perf_guest_cbs->handle_intel_pt_intr)) @@ -2420,6 +2657,15 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status) } /* + * Intel Perf mertrics + */ + if (__test_and_clear_bit(GLOBAL_STATUS_PERF_METRICS_OVF_BIT, (unsigned long *)&status)) { + handled++; + if (x86_pmu.update_topdown_event) + x86_pmu.update_topdown_event(NULL); + } + + /* * Checkpointed counters can lead to 'spurious' PMIs because the * rollback caused by the PMI will have cleared the overflow status * bit. Therefore always force probe these counters. @@ -3355,6 +3601,56 @@ static int intel_pmu_hw_config(struct perf_event *event) if (event->attr.type != PERF_TYPE_RAW) return 0; + /* + * Config Topdown slots and metric events + * + * The slots event on Fixed Counter 3 can support sampling, + * which will be handled normally in x86_perf_event_update(). + * + * Metric events don't support sampling and require being paired + * with a slots event as group leader. When the slots event + * is used in a metrics group, it too cannot support sampling. + */ + if (x86_pmu.intel_cap.perf_metrics && is_topdown_event(event)) { + if (event->attr.config1 || event->attr.config2) + return -EINVAL; + + /* + * The TopDown metrics events and slots event don't + * support any filters. + */ + if (event->attr.config & X86_ALL_EVENT_FLAGS) + return -EINVAL; + + if (is_metric_event(event)) { + struct perf_event *leader = event->group_leader; + + /* The metric events don't support sampling. */ + if (is_sampling_event(event)) + return -EINVAL; + + /* The metric events require a slots group leader. */ + if (!is_slots_event(leader)) + return -EINVAL; + + /* + * The leader/SLOTS must not be a sampling event for + * metric use; hardware requires it starts at 0 when used + * in conjunction with MSR_PERF_METRICS. + */ + if (is_sampling_event(leader)) + return -EINVAL; + + event->event_caps |= PERF_EV_CAP_SIBLING; + /* + * Only once we have a METRICs sibling do we + * need TopDown magic. + */ + leader->hw.flags |= PERF_X86_EVENT_TOPDOWN; + event->hw.flags |= PERF_X86_EVENT_TOPDOWN; + } + } + if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY)) return 0; @@ -3787,6 +4083,17 @@ static void intel_pmu_cpu_starting(int cpu) if (x86_pmu.counter_freezing) enable_counter_freeze(); + /* Disable perf metrics if any added CPU doesn't support it. */ + if (x86_pmu.intel_cap.perf_metrics) { + union perf_capabilities perf_cap; + + rdmsrl(MSR_IA32_PERF_CAPABILITIES, perf_cap.capabilities); + if (!perf_cap.perf_metrics) { + x86_pmu.intel_cap.perf_metrics = 0; + x86_pmu.intel_ctrl &= ~(1ULL << GLOBAL_CTRL_EN_PERF_METRICS); + } + } + if (!cpuc->shared_regs) return; @@ -4355,6 +4662,15 @@ static struct attribute *icl_events_attrs[] = { NULL, }; +static struct attribute *icl_td_events_attrs[] = { + EVENT_PTR(slots), + EVENT_PTR(td_retiring), + EVENT_PTR(td_bad_spec), + EVENT_PTR(td_fe_bound), + EVENT_PTR(td_be_bound), + NULL, +}; + static struct attribute *icl_tsx_events_attrs[] = { EVENT_PTR(tx_start), EVENT_PTR(tx_abort), @@ -4830,6 +5146,7 @@ __init int intel_pmu_init(void) case INTEL_FAM6_ATOM_TREMONT_D: case INTEL_FAM6_ATOM_TREMONT: + case INTEL_FAM6_ATOM_TREMONT_L: x86_pmu.late_ack = true; memcpy(hw_cache_event_ids, glp_hw_cache_event_ids, sizeof(hw_cache_event_ids)); @@ -5139,10 +5456,13 @@ __init int intel_pmu_init(void) hsw_format_attr : nhm_format_attr; extra_skl_attr = skl_format_attr; mem_attr = icl_events_attrs; + td_attr = icl_td_events_attrs; tsx_attr = icl_tsx_events_attrs; x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xca, .umask=0x02); x86_pmu.lbr_pt_coexist = true; intel_pmu_pebs_data_source_skl(pmem); + x86_pmu.update_topdown_event = icl_update_topdown_event; + x86_pmu.set_topdown_event_period = icl_set_topdown_event_period; pr_cont("Icelake events, "); name = "icelake"; break; @@ -5198,6 +5518,15 @@ __init int intel_pmu_init(void) * counter, so do not extend mask to generic counters */ for_each_event_constraint(c, x86_pmu.event_constraints) { + /* + * Don't extend the topdown slots and metrics + * events to the generic counters. + */ + if (c->idxmsk64 & INTEL_PMC_MSK_TOPDOWN) { + c->weight = hweight64(c->idxmsk64); + continue; + } + if (c->cmask == FIXED_EVENT_FLAGS && c->idxmsk64 != INTEL_PMC_MSK_FIXED_REF_CYCLES) { c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1; @@ -5253,6 +5582,9 @@ __init int intel_pmu_init(void) if (x86_pmu.counter_freezing) x86_pmu.handle_irq = intel_pmu_handle_irq_v4; + if (x86_pmu.intel_cap.perf_metrics) + x86_pmu.intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS; + return 0; } diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 86848c57b55e..404315df1e16 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -670,9 +670,7 @@ unlock: static inline void intel_pmu_drain_pebs_buffer(void) { - struct pt_regs regs; - - x86_pmu.drain_pebs(®s); + x86_pmu.drain_pebs(NULL); } /* @@ -1737,6 +1735,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event, struct x86_perf_regs perf_regs; struct pt_regs *regs = &perf_regs.regs; void *at = get_next_pebs_record_by_bit(base, top, bit); + struct pt_regs dummy_iregs; if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) { /* @@ -1749,6 +1748,9 @@ static void __intel_pmu_pebs_event(struct perf_event *event, } else if (!intel_pmu_save_and_restart(event)) return; + if (!iregs) + iregs = &dummy_iregs; + while (count > 1) { setup_sample(event, iregs, at, &data, regs); perf_event_output(event, &data, regs); @@ -1758,16 +1760,22 @@ static void __intel_pmu_pebs_event(struct perf_event *event, } setup_sample(event, iregs, at, &data, regs); - - /* - * All but the last records are processed. - * The last one is left to be able to call the overflow handler. - */ - if (perf_event_overflow(event, &data, regs)) { - x86_pmu_stop(event, 0); - return; + if (iregs == &dummy_iregs) { + /* + * The PEBS records may be drained in the non-overflow context, + * e.g., large PEBS + context switch. Perf should treat the + * last record the same as other PEBS records, and doesn't + * invoke the generic overflow handler. + */ + perf_event_output(event, &data, regs); + } else { + /* + * All but the last records are processed. + * The last one is left to be able to call the overflow handler. + */ + if (perf_event_overflow(event, &data, regs)) + x86_pmu_stop(event, 0); } - } static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index d5c6d3b340c5..86d012b3e0b4 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c @@ -12,6 +12,8 @@ struct intel_uncore_type **uncore_mmio_uncores = empty_uncore; static bool pcidrv_registered; struct pci_driver *uncore_pci_driver; +/* The PCI driver for the device which the uncore doesn't own. */ +struct pci_driver *uncore_pci_sub_driver; /* pci bus to socket mapping */ DEFINE_RAW_SPINLOCK(pci2phy_map_lock); struct list_head pci2phy_map_head = LIST_HEAD_INIT(pci2phy_map_head); @@ -989,65 +991,71 @@ uncore_types_init(struct intel_uncore_type **types, bool setid) } /* - * add a pci uncore device + * Get the die information of a PCI device. + * @pdev: The PCI device. + * @phys_id: The physical socket id which the device maps to. + * @die: The die id which the device maps to. */ -static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +static int uncore_pci_get_dev_die_info(struct pci_dev *pdev, + int *phys_id, int *die) { - struct intel_uncore_type *type; - struct intel_uncore_pmu *pmu = NULL; - struct intel_uncore_box *box; - int phys_id, die, ret; - - phys_id = uncore_pcibus_to_physid(pdev->bus); - if (phys_id < 0) + *phys_id = uncore_pcibus_to_physid(pdev->bus); + if (*phys_id < 0) return -ENODEV; - die = (topology_max_die_per_package() > 1) ? phys_id : - topology_phys_to_logical_pkg(phys_id); - if (die < 0) + *die = (topology_max_die_per_package() > 1) ? *phys_id : + topology_phys_to_logical_pkg(*phys_id); + if (*die < 0) return -EINVAL; - if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) { - int idx = UNCORE_PCI_DEV_IDX(id->driver_data); - - uncore_extra_pci_dev[die].dev[idx] = pdev; - pci_set_drvdata(pdev, NULL); - return 0; - } - - type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)]; + return 0; +} - /* - * Some platforms, e.g. Knights Landing, use a common PCI device ID - * for multiple instances of an uncore PMU device type. We should check - * PCI slot and func to indicate the uncore box. - */ - if (id->driver_data & ~0xffff) { - struct pci_driver *pci_drv = pdev->driver; - const struct pci_device_id *ids = pci_drv->id_table; - unsigned int devfn; - - while (ids && ids->vendor) { - if ((ids->vendor == pdev->vendor) && - (ids->device == pdev->device)) { - devfn = PCI_DEVFN(UNCORE_PCI_DEV_DEV(ids->driver_data), - UNCORE_PCI_DEV_FUNC(ids->driver_data)); - if (devfn == pdev->devfn) { - pmu = &type->pmus[UNCORE_PCI_DEV_IDX(ids->driver_data)]; - break; - } +/* + * Find the PMU of a PCI device. + * @pdev: The PCI device. + * @ids: The ID table of the available PCI devices with a PMU. + */ +static struct intel_uncore_pmu * +uncore_pci_find_dev_pmu(struct pci_dev *pdev, const struct pci_device_id *ids) +{ + struct intel_uncore_pmu *pmu = NULL; + struct intel_uncore_type *type; + kernel_ulong_t data; + unsigned int devfn; + + while (ids && ids->vendor) { + if ((ids->vendor == pdev->vendor) && + (ids->device == pdev->device)) { + data = ids->driver_data; + devfn = PCI_DEVFN(UNCORE_PCI_DEV_DEV(data), + UNCORE_PCI_DEV_FUNC(data)); + if (devfn == pdev->devfn) { + type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(data)]; + pmu = &type->pmus[UNCORE_PCI_DEV_IDX(data)]; + break; } - ids++; } - if (pmu == NULL) - return -ENODEV; - } else { - /* - * for performance monitoring unit with multiple boxes, - * each box has a different function id. - */ - pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)]; + ids++; } + return pmu; +} + +/* + * Register the PMU for a PCI device + * @pdev: The PCI device. + * @type: The corresponding PMU type of the device. + * @pmu: The corresponding PMU of the device. + * @phys_id: The physical socket id which the device maps to. + * @die: The die id which the device maps to. + */ +static int uncore_pci_pmu_register(struct pci_dev *pdev, + struct intel_uncore_type *type, + struct intel_uncore_pmu *pmu, + int phys_id, int die) +{ + struct intel_uncore_box *box; + int ret; if (WARN_ON_ONCE(pmu->boxes[die] != NULL)) return -EINVAL; @@ -1067,7 +1075,6 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id box->pci_dev = pdev; box->pmu = pmu; uncore_box_init(box); - pci_set_drvdata(pdev, box); pmu->boxes[die] = box; if (atomic_inc_return(&pmu->activeboxes) > 1) @@ -1076,7 +1083,6 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id /* First active box registers the pmu */ ret = uncore_pmu_register(pmu); if (ret) { - pci_set_drvdata(pdev, NULL); pmu->boxes[die] = NULL; uncore_box_exit(box); kfree(box); @@ -1084,18 +1090,87 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id return ret; } +/* + * add a pci uncore device + */ +static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct intel_uncore_type *type; + struct intel_uncore_pmu *pmu = NULL; + int phys_id, die, ret; + + ret = uncore_pci_get_dev_die_info(pdev, &phys_id, &die); + if (ret) + return ret; + + if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) { + int idx = UNCORE_PCI_DEV_IDX(id->driver_data); + + uncore_extra_pci_dev[die].dev[idx] = pdev; + pci_set_drvdata(pdev, NULL); + return 0; + } + + type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)]; + + /* + * Some platforms, e.g. Knights Landing, use a common PCI device ID + * for multiple instances of an uncore PMU device type. We should check + * PCI slot and func to indicate the uncore box. + */ + if (id->driver_data & ~0xffff) { + struct pci_driver *pci_drv = pdev->driver; + + pmu = uncore_pci_find_dev_pmu(pdev, pci_drv->id_table); + if (pmu == NULL) + return -ENODEV; + } else { + /* + * for performance monitoring unit with multiple boxes, + * each box has a different function id. + */ + pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)]; + } + + ret = uncore_pci_pmu_register(pdev, type, pmu, phys_id, die); + + pci_set_drvdata(pdev, pmu->boxes[die]); + + return ret; +} + +/* + * Unregister the PMU of a PCI device + * @pmu: The corresponding PMU is unregistered. + * @phys_id: The physical socket id which the device maps to. + * @die: The die id which the device maps to. + */ +static void uncore_pci_pmu_unregister(struct intel_uncore_pmu *pmu, + int phys_id, int die) +{ + struct intel_uncore_box *box = pmu->boxes[die]; + + if (WARN_ON_ONCE(phys_id != box->pci_phys_id)) + return; + + pmu->boxes[die] = NULL; + if (atomic_dec_return(&pmu->activeboxes) == 0) + uncore_pmu_unregister(pmu); + uncore_box_exit(box); + kfree(box); +} + static void uncore_pci_remove(struct pci_dev *pdev) { struct intel_uncore_box *box; struct intel_uncore_pmu *pmu; int i, phys_id, die; - phys_id = uncore_pcibus_to_physid(pdev->bus); + if (uncore_pci_get_dev_die_info(pdev, &phys_id, &die)) + return; box = pci_get_drvdata(pdev); if (!box) { - die = (topology_max_die_per_package() > 1) ? phys_id : - topology_phys_to_logical_pkg(phys_id); for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) { if (uncore_extra_pci_dev[die].dev[i] == pdev) { uncore_extra_pci_dev[die].dev[i] = NULL; @@ -1107,15 +1182,84 @@ static void uncore_pci_remove(struct pci_dev *pdev) } pmu = box->pmu; - if (WARN_ON_ONCE(phys_id != box->pci_phys_id)) - return; pci_set_drvdata(pdev, NULL); - pmu->boxes[box->dieid] = NULL; - if (atomic_dec_return(&pmu->activeboxes) == 0) - uncore_pmu_unregister(pmu); - uncore_box_exit(box); - kfree(box); + + uncore_pci_pmu_unregister(pmu, phys_id, die); +} + +static int uncore_bus_notify(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct device *dev = data; + struct pci_dev *pdev = to_pci_dev(dev); + struct intel_uncore_pmu *pmu; + int phys_id, die; + + /* Unregister the PMU when the device is going to be deleted. */ + if (action != BUS_NOTIFY_DEL_DEVICE) + return NOTIFY_DONE; + + pmu = uncore_pci_find_dev_pmu(pdev, uncore_pci_sub_driver->id_table); + if (!pmu) + return NOTIFY_DONE; + + if (uncore_pci_get_dev_die_info(pdev, &phys_id, &die)) + return NOTIFY_DONE; + + uncore_pci_pmu_unregister(pmu, phys_id, die); + + return NOTIFY_OK; +} + +static struct notifier_block uncore_notifier = { + .notifier_call = uncore_bus_notify, +}; + +static void uncore_pci_sub_driver_init(void) +{ + const struct pci_device_id *ids = uncore_pci_sub_driver->id_table; + struct intel_uncore_type *type; + struct intel_uncore_pmu *pmu; + struct pci_dev *pci_sub_dev; + bool notify = false; + unsigned int devfn; + int phys_id, die; + + while (ids && ids->vendor) { + pci_sub_dev = NULL; + type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(ids->driver_data)]; + /* + * Search the available device, and register the + * corresponding PMU. + */ + while ((pci_sub_dev = pci_get_device(PCI_VENDOR_ID_INTEL, + ids->device, pci_sub_dev))) { + devfn = PCI_DEVFN(UNCORE_PCI_DEV_DEV(ids->driver_data), + UNCORE_PCI_DEV_FUNC(ids->driver_data)); + if (devfn != pci_sub_dev->devfn) + continue; + + pmu = &type->pmus[UNCORE_PCI_DEV_IDX(ids->driver_data)]; + if (!pmu) + continue; + + if (uncore_pci_get_dev_die_info(pci_sub_dev, + &phys_id, &die)) + continue; + + if (!uncore_pci_pmu_register(pci_sub_dev, type, pmu, + phys_id, die)) + notify = true; + } + ids++; + } + + if (notify && bus_register_notifier(&pci_bus_type, &uncore_notifier)) + notify = false; + + if (!notify) + uncore_pci_sub_driver = NULL; } static int __init uncore_pci_init(void) @@ -1141,6 +1285,9 @@ static int __init uncore_pci_init(void) if (ret) goto errtype; + if (uncore_pci_sub_driver) + uncore_pci_sub_driver_init(); + pcidrv_registered = true; return 0; @@ -1158,6 +1305,8 @@ static void uncore_pci_exit(void) { if (pcidrv_registered) { pcidrv_registered = false; + if (uncore_pci_sub_driver) + bus_unregister_notifier(&pci_bus_type, &uncore_notifier); pci_unregister_driver(uncore_pci_driver); uncore_types_exit(uncore_pci_uncores); kfree(uncore_extra_pci_dev); @@ -1478,12 +1627,12 @@ static const struct intel_uncore_init_fun icl_uncore_init __initconst = { }; static const struct intel_uncore_init_fun tgl_uncore_init __initconst = { - .cpu_init = icl_uncore_cpu_init, + .cpu_init = tgl_uncore_cpu_init, .mmio_init = tgl_uncore_mmio_init, }; static const struct intel_uncore_init_fun tgl_l_uncore_init __initconst = { - .cpu_init = icl_uncore_cpu_init, + .cpu_init = tgl_uncore_cpu_init, .mmio_init = tgl_l_uncore_mmio_init, }; diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h index 105fdc69825e..83d2a7d490e0 100644 --- a/arch/x86/events/intel/uncore.h +++ b/arch/x86/events/intel/uncore.h @@ -552,6 +552,7 @@ extern struct intel_uncore_type **uncore_msr_uncores; extern struct intel_uncore_type **uncore_pci_uncores; extern struct intel_uncore_type **uncore_mmio_uncores; extern struct pci_driver *uncore_pci_driver; +extern struct pci_driver *uncore_pci_sub_driver; extern raw_spinlock_t pci2phy_map_lock; extern struct list_head pci2phy_map_head; extern struct pci_extra_dev *uncore_extra_pci_dev; @@ -567,6 +568,7 @@ void snb_uncore_cpu_init(void); void nhm_uncore_cpu_init(void); void skl_uncore_cpu_init(void); void icl_uncore_cpu_init(void); +void tgl_uncore_cpu_init(void); void tgl_uncore_mmio_init(void); void tgl_l_uncore_mmio_init(void); int snb_pci2phy_map_init(int devid); diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c index 6a4ca27b2c9e..39e632ed6ca9 100644 --- a/arch/x86/events/intel/uncore_snb.c +++ b/arch/x86/events/intel/uncore_snb.c @@ -126,6 +126,10 @@ #define ICL_UNC_CBO_0_PER_CTR0 0x702 #define ICL_UNC_CBO_MSR_OFFSET 0x8 +/* ICL ARB register */ +#define ICL_UNC_ARB_PER_CTR 0x3b1 +#define ICL_UNC_ARB_PERFEVTSEL 0x3b3 + DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); @@ -313,15 +317,21 @@ void skl_uncore_cpu_init(void) snb_uncore_arb.ops = &skl_uncore_msr_ops; } +static struct intel_uncore_ops icl_uncore_msr_ops = { + .disable_event = snb_uncore_msr_disable_event, + .enable_event = snb_uncore_msr_enable_event, + .read_counter = uncore_msr_read_counter, +}; + static struct intel_uncore_type icl_uncore_cbox = { .name = "cbox", - .num_counters = 4, + .num_counters = 2, .perf_ctr_bits = 44, .perf_ctr = ICL_UNC_CBO_0_PER_CTR0, .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0, .event_mask = SNB_UNC_RAW_EVENT_MASK, .msr_offset = ICL_UNC_CBO_MSR_OFFSET, - .ops = &skl_uncore_msr_ops, + .ops = &icl_uncore_msr_ops, .format_group = &snb_uncore_format_group, }; @@ -350,13 +360,25 @@ static struct intel_uncore_type icl_uncore_clockbox = { .single_fixed = 1, .event_mask = SNB_UNC_CTL_EV_SEL_MASK, .format_group = &icl_uncore_clock_format_group, - .ops = &skl_uncore_msr_ops, + .ops = &icl_uncore_msr_ops, .event_descs = icl_uncore_events, }; +static struct intel_uncore_type icl_uncore_arb = { + .name = "arb", + .num_counters = 1, + .num_boxes = 1, + .perf_ctr_bits = 44, + .perf_ctr = ICL_UNC_ARB_PER_CTR, + .event_ctl = ICL_UNC_ARB_PERFEVTSEL, + .event_mask = SNB_UNC_RAW_EVENT_MASK, + .ops = &icl_uncore_msr_ops, + .format_group = &snb_uncore_format_group, +}; + static struct intel_uncore_type *icl_msr_uncores[] = { &icl_uncore_cbox, - &snb_uncore_arb, + &icl_uncore_arb, &icl_uncore_clockbox, NULL, }; @@ -374,6 +396,21 @@ void icl_uncore_cpu_init(void) { uncore_msr_uncores = icl_msr_uncores; icl_uncore_cbox.num_boxes = icl_get_cbox_num(); +} + +static struct intel_uncore_type *tgl_msr_uncores[] = { + &icl_uncore_cbox, + &snb_uncore_arb, + &icl_uncore_clockbox, + NULL, +}; + +void tgl_uncore_cpu_init(void) +{ + uncore_msr_uncores = tgl_msr_uncores; + icl_uncore_cbox.num_boxes = icl_get_cbox_num(); + icl_uncore_cbox.ops = &skl_uncore_msr_ops; + icl_uncore_clockbox.ops = &skl_uncore_msr_ops; snb_uncore_arb.ops = &skl_uncore_msr_ops; } diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index 62e88ad919ff..7bdb1821215d 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -393,6 +393,11 @@ #define SNR_M2M_PCI_PMON_BOX_CTL 0x438 #define SNR_M2M_PCI_PMON_UMASK_EXT 0xff +/* SNR PCIE3 */ +#define SNR_PCIE3_PCI_PMON_CTL0 0x508 +#define SNR_PCIE3_PCI_PMON_CTR0 0x4e8 +#define SNR_PCIE3_PCI_PMON_BOX_CTL 0x4e0 + /* SNR IMC */ #define SNR_IMC_MMIO_PMON_FIXED_CTL 0x54 #define SNR_IMC_MMIO_PMON_FIXED_CTR 0x38 @@ -3749,7 +3754,9 @@ static int skx_iio_set_mapping(struct intel_uncore_type *type) ret = skx_iio_get_topology(type); if (ret) - return ret; + goto clear_attr_update; + + ret = -ENOMEM; /* One more for NULL. */ attrs = kcalloc((uncore_max_dies() + 1), sizeof(*attrs), GFP_KERNEL); @@ -3781,8 +3788,9 @@ err: kfree(eas); kfree(attrs); kfree(type->topology); +clear_attr_update: type->attr_update = NULL; - return -ENOMEM; + return ret; } static void skx_iio_cleanup_mapping(struct intel_uncore_type *type) @@ -4551,12 +4559,46 @@ static struct intel_uncore_type snr_uncore_m2m = { .format_group = &snr_m2m_uncore_format_group, }; +static void snr_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event) +{ + struct pci_dev *pdev = box->pci_dev; + struct hw_perf_event *hwc = &event->hw; + + pci_write_config_dword(pdev, hwc->config_base, (u32)(hwc->config | SNBEP_PMON_CTL_EN)); + pci_write_config_dword(pdev, hwc->config_base + 4, (u32)(hwc->config >> 32)); +} + +static struct intel_uncore_ops snr_pcie3_uncore_pci_ops = { + .init_box = snr_m2m_uncore_pci_init_box, + .disable_box = snbep_uncore_pci_disable_box, + .enable_box = snbep_uncore_pci_enable_box, + .disable_event = snbep_uncore_pci_disable_event, + .enable_event = snr_uncore_pci_enable_event, + .read_counter = snbep_uncore_pci_read_counter, +}; + +static struct intel_uncore_type snr_uncore_pcie3 = { + .name = "pcie3", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .perf_ctr = SNR_PCIE3_PCI_PMON_CTR0, + .event_ctl = SNR_PCIE3_PCI_PMON_CTL0, + .event_mask = SKX_IIO_PMON_RAW_EVENT_MASK, + .event_mask_ext = SKX_IIO_PMON_RAW_EVENT_MASK_EXT, + .box_ctl = SNR_PCIE3_PCI_PMON_BOX_CTL, + .ops = &snr_pcie3_uncore_pci_ops, + .format_group = &skx_uncore_iio_format_group, +}; + enum { SNR_PCI_UNCORE_M2M, + SNR_PCI_UNCORE_PCIE3, }; static struct intel_uncore_type *snr_pci_uncores[] = { [SNR_PCI_UNCORE_M2M] = &snr_uncore_m2m, + [SNR_PCI_UNCORE_PCIE3] = &snr_uncore_pcie3, NULL, }; @@ -4573,6 +4615,19 @@ static struct pci_driver snr_uncore_pci_driver = { .id_table = snr_uncore_pci_ids, }; +static const struct pci_device_id snr_uncore_pci_sub_ids[] = { + { /* PCIe3 RP */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x334a), + .driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 0, SNR_PCI_UNCORE_PCIE3, 0), + }, + { /* end: all zeroes */ } +}; + +static struct pci_driver snr_uncore_pci_sub_driver = { + .name = "snr_uncore_sub", + .id_table = snr_uncore_pci_sub_ids, +}; + int snr_uncore_pci_init(void) { /* SNR UBOX DID */ @@ -4584,6 +4639,7 @@ int snr_uncore_pci_init(void) uncore_pci_uncores = snr_pci_uncores; uncore_pci_driver = &snr_uncore_pci_driver; + uncore_pci_sub_driver = &snr_uncore_pci_sub_driver; return 0; } @@ -4751,10 +4807,10 @@ static struct uncore_event_desc snr_uncore_imc_freerunning_events[] = { INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10"), INTEL_UNCORE_EVENT_DESC(read, "event=0xff,umask=0x20"), - INTEL_UNCORE_EVENT_DESC(read.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(read.scale, "6.103515625e-5"), INTEL_UNCORE_EVENT_DESC(read.unit, "MiB"), INTEL_UNCORE_EVENT_DESC(write, "event=0xff,umask=0x21"), - INTEL_UNCORE_EVENT_DESC(write.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(write.scale, "6.103515625e-5"), INTEL_UNCORE_EVENT_DESC(write.unit, "MiB"), { /* end: all zeroes */ }, }; @@ -5212,17 +5268,17 @@ static struct uncore_event_desc icx_uncore_imc_freerunning_events[] = { INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10"), INTEL_UNCORE_EVENT_DESC(read, "event=0xff,umask=0x20"), - INTEL_UNCORE_EVENT_DESC(read.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(read.scale, "6.103515625e-5"), INTEL_UNCORE_EVENT_DESC(read.unit, "MiB"), INTEL_UNCORE_EVENT_DESC(write, "event=0xff,umask=0x21"), - INTEL_UNCORE_EVENT_DESC(write.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(write.scale, "6.103515625e-5"), INTEL_UNCORE_EVENT_DESC(write.unit, "MiB"), INTEL_UNCORE_EVENT_DESC(ddrt_read, "event=0xff,umask=0x30"), - INTEL_UNCORE_EVENT_DESC(ddrt_read.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(ddrt_read.scale, "6.103515625e-5"), INTEL_UNCORE_EVENT_DESC(ddrt_read.unit, "MiB"), INTEL_UNCORE_EVENT_DESC(ddrt_write, "event=0xff,umask=0x31"), - INTEL_UNCORE_EVENT_DESC(ddrt_write.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(ddrt_write.scale, "6.103515625e-5"), INTEL_UNCORE_EVENT_DESC(ddrt_write.unit, "MiB"), { /* end: all zeroes */ }, }; diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c index a949f6f55991..4be8f9cabd07 100644 --- a/arch/x86/events/msr.c +++ b/arch/x86/events/msr.c @@ -78,6 +78,7 @@ static bool test_intel(int idx, void *data) case INTEL_FAM6_ATOM_GOLDMONT_PLUS: case INTEL_FAM6_ATOM_TREMONT_D: case INTEL_FAM6_ATOM_TREMONT: + case INTEL_FAM6_ATOM_TREMONT_L: case INTEL_FAM6_XEON_PHI_KNL: case INTEL_FAM6_XEON_PHI_KNM: diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 7b68ab5f19e7..ee2b9b9fc2a5 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -79,6 +79,31 @@ static inline bool constraint_match(struct event_constraint *c, u64 ecode) #define PERF_X86_EVENT_PEBS_VIA_PT 0x0800 /* use PT buffer for PEBS */ #define PERF_X86_EVENT_PAIR 0x1000 /* Large Increment per Cycle */ #define PERF_X86_EVENT_LBR_SELECT 0x2000 /* Save/Restore MSR_LBR_SELECT */ +#define PERF_X86_EVENT_TOPDOWN 0x4000 /* Count Topdown slots/metrics events */ + +static inline bool is_topdown_count(struct perf_event *event) +{ + return event->hw.flags & PERF_X86_EVENT_TOPDOWN; +} + +static inline bool is_metric_event(struct perf_event *event) +{ + u64 config = event->attr.config; + + return ((config & ARCH_PERFMON_EVENTSEL_EVENT) == 0) && + ((config & INTEL_ARCH_EVENT_MASK) >= INTEL_TD_METRIC_RETIRING) && + ((config & INTEL_ARCH_EVENT_MASK) <= INTEL_TD_METRIC_MAX); +} + +static inline bool is_slots_event(struct perf_event *event) +{ + return (event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_TD_SLOTS; +} + +static inline bool is_topdown_event(struct perf_event *event) +{ + return is_metric_event(event) || is_slots_event(event); +} struct amd_nb { int nb_id; /* NorthBridge id */ @@ -210,6 +235,8 @@ struct cpu_hw_events { they've never been enabled yet */ int n_txn; /* the # last events in the below arrays; added in the current transaction */ + int n_txn_pair; + int n_txn_metric; int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */ u64 tags[X86_PMC_IDX_MAX]; @@ -285,6 +312,12 @@ struct cpu_hw_events { u64 tfa_shadow; /* + * Perf Metrics + */ + /* number of accepted metrics events */ + int n_metric; + + /* * AMD specific bits */ struct amd_nb *amd_nb; @@ -376,6 +409,19 @@ struct cpu_hw_events { EVENT_CONSTRAINT(c, (1ULL << (32+n)), FIXED_EVENT_FLAGS) /* + * The special metric counters do not actually exist. They are calculated from + * the combination of the FxCtr3 + MSR_PERF_METRICS. + * + * The special metric counters are mapped to a dummy offset for the scheduler. + * The sharing between multiple users of the same metric without multiplexing + * is not allowed, even though the hardware supports that in principle. + */ + +#define METRIC_EVENT_CONSTRAINT(c, n) \ + EVENT_CONSTRAINT(c, (1ULL << (INTEL_PMC_IDX_METRIC_BASE + n)), \ + INTEL_ARCH_EVENT_MASK) + +/* * Constraint on the Event code + UMask */ #define INTEL_UEVENT_CONSTRAINT(c, n) \ @@ -537,7 +583,7 @@ union perf_capabilities { */ u64 full_width_write:1; u64 pebs_baseline:1; - u64 pebs_metrics_available:1; + u64 perf_metrics:1; u64 pebs_output_pt_available:1; }; u64 capabilities; @@ -727,6 +773,12 @@ struct x86_pmu { atomic_t lbr_exclusive[x86_lbr_exclusive_max]; /* + * Intel perf metrics + */ + u64 (*update_topdown_event)(struct perf_event *event); + int (*set_topdown_event_period)(struct perf_event *event); + + /* * perf task context (i.e. struct perf_event_context::task_ctx_data) * switch helper to bridge calls from perf/core to perf/x86. * See struct pmu::swap_task_ctx() usage for examples; diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c index 67b411f7e8c4..7c0120e2e957 100644 --- a/arch/x86/events/rapl.c +++ b/arch/x86/events/rapl.c @@ -815,6 +815,7 @@ static const struct x86_cpu_id rapl_model_match[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &model_spr), X86_MATCH_VENDOR_FAM(AMD, 0x17, &model_amd_fam17h), X86_MATCH_VENDOR_FAM(HYGON, 0x18, &model_amd_fam17h), + X86_MATCH_VENDOR_FAM(AMD, 0x19, &model_amd_fam17h), {}, }; MODULE_DEVICE_TABLE(x86cpu, rapl_model_match); diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c index 6035df1b49e1..e04d90af4c27 100644 --- a/arch/x86/hyperv/hv_init.c +++ b/arch/x86/hyperv/hv_init.c @@ -148,9 +148,9 @@ static inline bool hv_reenlightenment_available(void) * Check for required features and priviliges to make TSC frequency * change notifications work. */ - return ms_hyperv.features & HV_X64_ACCESS_FREQUENCY_MSRS && + return ms_hyperv.features & HV_ACCESS_FREQUENCY_MSRS && ms_hyperv.misc_features & HV_FEATURE_FREQUENCY_MSRS_AVAILABLE && - ms_hyperv.features & HV_X64_ACCESS_REENLIGHTENMENT; + ms_hyperv.features & HV_ACCESS_REENLIGHTENMENT; } DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_reenlightenment) @@ -330,8 +330,8 @@ void __init hyperv_init(void) return; /* Absolutely required MSRs */ - required_msrs = HV_X64_MSR_HYPERCALL_AVAILABLE | - HV_X64_MSR_VP_INDEX_AVAILABLE; + required_msrs = HV_MSR_HYPERCALL_AVAILABLE | + HV_MSR_VP_INDEX_AVAILABLE; if ((ms_hyperv.features & required_msrs) != required_msrs) return; diff --git a/arch/x86/hyperv/hv_spinlock.c b/arch/x86/hyperv/hv_spinlock.c index 07f21a06392f..f3270c1fc48c 100644 --- a/arch/x86/hyperv/hv_spinlock.c +++ b/arch/x86/hyperv/hv_spinlock.c @@ -66,7 +66,7 @@ void __init hv_init_spinlocks(void) { if (!hv_pvspin || !apic || !(ms_hyperv.hints & HV_X64_CLUSTER_IPI_RECOMMENDED) || - !(ms_hyperv.features & HV_X64_MSR_GUEST_IDLE_AVAILABLE)) { + !(ms_hyperv.features & HV_MSR_GUEST_IDLE_AVAILABLE)) { pr_info("PV spinlocks disabled\n"); return; } diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 2cc44e957c31..1c129abb7f09 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -519,6 +519,14 @@ static inline bool apic_id_is_primary_thread(unsigned int id) { return false; } static inline void apic_smt_update(void) { } #endif +struct msi_msg; + +#ifdef CONFIG_PCI_MSI +void x86_vector_msi_compose_msg(struct irq_data *data, struct msi_msg *msg); +#else +# define x86_vector_msi_compose_msg NULL +#endif + extern void ioapic_zap_locks(void); #endif /* _ASM_X86_APIC_H */ diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h index 5a42f9206138..51e2bf27cc9b 100644 --- a/arch/x86/include/asm/asm-prototypes.h +++ b/arch/x86/include/asm/asm-prototypes.h @@ -5,6 +5,7 @@ #include <asm/string.h> #include <asm/page.h> #include <asm/checksum.h> +#include <asm/mce.h> #include <asm-generic/asm-prototypes.h> diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h index 5c15f95b1ba7..0603c7423aca 100644 --- a/arch/x86/include/asm/asm.h +++ b/arch/x86/include/asm/asm.h @@ -135,14 +135,21 @@ # define _ASM_EXTABLE_UA(from, to) \ _ASM_EXTABLE_HANDLE(from, to, ex_handler_uaccess) +# define _ASM_EXTABLE_CPY(from, to) \ + _ASM_EXTABLE_HANDLE(from, to, ex_handler_copy) + # define _ASM_EXTABLE_FAULT(from, to) \ _ASM_EXTABLE_HANDLE(from, to, ex_handler_fault) -# define _ASM_NOKPROBE(entry) \ +# ifdef CONFIG_KPROBES +# define _ASM_NOKPROBE(entry) \ .pushsection "_kprobe_blacklist","aw" ; \ _ASM_ALIGN ; \ _ASM_PTR (entry); \ .popsection +# else +# define _ASM_NOKPROBE(entry) +# endif #else /* ! __ASSEMBLY__ */ # define _EXPAND_EXTABLE_HANDLE(x) #x @@ -160,6 +167,9 @@ # define _ASM_EXTABLE_UA(from, to) \ _ASM_EXTABLE_HANDLE(from, to, ex_handler_uaccess) +# define _ASM_EXTABLE_CPY(from, to) \ + _ASM_EXTABLE_HANDLE(from, to, ex_handler_copy) + # define _ASM_EXTABLE_FAULT(from, to) \ _ASM_EXTABLE_HANDLE(from, to, ex_handler_fault) diff --git a/arch/x86/include/asm/checksum.h b/arch/x86/include/asm/checksum.h index 0ada98d5d09f..bca625a60186 100644 --- a/arch/x86/include/asm/checksum.h +++ b/arch/x86/include/asm/checksum.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER 1 #define HAVE_CSUM_COPY_USER +#define _HAVE_ARCH_CSUM_AND_COPY #ifdef CONFIG_X86_32 # include <asm/checksum_32.h> #else diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h index 11624c8a9d8d..17da95387997 100644 --- a/arch/x86/include/asm/checksum_32.h +++ b/arch/x86/include/asm/checksum_32.h @@ -27,9 +27,7 @@ asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum); * better 64-bit) boundary */ -asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, - int len, __wsum sum, - int *src_err_ptr, int *dst_err_ptr); +asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, int len); /* * Note: when you get a NULL pointer exception here this means someone @@ -38,26 +36,20 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, * If you use these functions directly please don't forget the * access_ok(). */ -static inline __wsum csum_partial_copy_nocheck(const void *src, void *dst, - int len, __wsum sum) +static inline __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len) { - return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL); + return csum_partial_copy_generic(src, dst, len); } static inline __wsum csum_and_copy_from_user(const void __user *src, - void *dst, int len, - __wsum sum, int *err_ptr) + void *dst, int len) { __wsum ret; might_sleep(); - if (!user_access_begin(src, len)) { - if (len) - *err_ptr = -EFAULT; - return sum; - } - ret = csum_partial_copy_generic((__force void *)src, dst, - len, sum, err_ptr, NULL); + if (!user_access_begin(src, len)) + return 0; + ret = csum_partial_copy_generic((__force void *)src, dst, len); user_access_end(); return ret; @@ -178,23 +170,17 @@ static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr, */ static inline __wsum csum_and_copy_to_user(const void *src, void __user *dst, - int len, __wsum sum, - int *err_ptr) + int len) { __wsum ret; might_sleep(); - if (user_access_begin(dst, len)) { - ret = csum_partial_copy_generic(src, (__force void *)dst, - len, sum, NULL, err_ptr); - user_access_end(); - return ret; - } + if (!user_access_begin(dst, len)) + return 0; - if (len) - *err_ptr = -EFAULT; - - return (__force __wsum)-1; /* invalid checksum */ + ret = csum_partial_copy_generic(src, (__force void *)dst, len); + user_access_end(); + return ret; } #endif /* _ASM_X86_CHECKSUM_32_H */ diff --git a/arch/x86/include/asm/checksum_64.h b/arch/x86/include/asm/checksum_64.h index 0a289b87e872..407beebadaf4 100644 --- a/arch/x86/include/asm/checksum_64.h +++ b/arch/x86/include/asm/checksum_64.h @@ -130,17 +130,11 @@ static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, extern __wsum csum_partial(const void *buff, int len, __wsum sum); /* Do not call this directly. Use the wrappers below */ -extern __visible __wsum csum_partial_copy_generic(const void *src, const void *dst, - int len, __wsum sum, - int *src_err_ptr, int *dst_err_ptr); +extern __visible __wsum csum_partial_copy_generic(const void *src, void *dst, int len); - -extern __wsum csum_and_copy_from_user(const void __user *src, void *dst, - int len, __wsum isum, int *errp); -extern __wsum csum_and_copy_to_user(const void *src, void __user *dst, - int len, __wsum isum, int *errp); -extern __wsum csum_partial_copy_nocheck(const void *src, void *dst, - int len, __wsum sum); +extern __wsum csum_and_copy_from_user(const void __user *src, void *dst, int len); +extern __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len); +extern __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len); /** * ip_compute_csum - Compute an 16bit IP checksum. diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h index d4edf281fff4..0e327a01f50f 100644 --- a/arch/x86/include/asm/compat.h +++ b/arch/x86/include/asm/compat.h @@ -27,8 +27,6 @@ typedef u16 compat_nlink_t; typedef u16 compat_ipc_pid_t; typedef u32 compat_caddr_t; typedef __kernel_fsid_t compat_fsid_t; -typedef s64 __attribute__((aligned(4))) compat_s64; -typedef u64 __attribute__((aligned(4))) compat_u64; struct compat_stat { compat_dev_t st_dev; @@ -211,6 +209,7 @@ static inline bool in_compat_syscall(void) return in_32bit_syscall(); } #define in_compat_syscall in_compat_syscall /* override the generic impl */ +#define compat_need_64bit_alignment_fixup in_ia32_syscall #endif struct compat_siginfo; diff --git a/arch/x86/include/asm/copy_mc_test.h b/arch/x86/include/asm/copy_mc_test.h new file mode 100644 index 000000000000..e4991ba96726 --- /dev/null +++ b/arch/x86/include/asm/copy_mc_test.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _COPY_MC_TEST_H_ +#define _COPY_MC_TEST_H_ + +#ifndef __ASSEMBLY__ +#ifdef CONFIG_COPY_MC_TEST +extern unsigned long copy_mc_test_src; +extern unsigned long copy_mc_test_dst; + +static inline void copy_mc_inject_src(void *addr) +{ + if (addr) + copy_mc_test_src = (unsigned long) addr; + else + copy_mc_test_src = ~0UL; +} + +static inline void copy_mc_inject_dst(void *addr) +{ + if (addr) + copy_mc_test_dst = (unsigned long) addr; + else + copy_mc_test_dst = ~0UL; +} +#else /* CONFIG_COPY_MC_TEST */ +static inline void copy_mc_inject_src(void *addr) +{ +} + +static inline void copy_mc_inject_dst(void *addr) +{ +} +#endif /* CONFIG_COPY_MC_TEST */ + +#else /* __ASSEMBLY__ */ +#include <asm/export.h> + +#ifdef CONFIG_COPY_MC_TEST +.macro COPY_MC_TEST_CTL + .pushsection .data + .align 8 + .globl copy_mc_test_src + copy_mc_test_src: + .quad 0 + EXPORT_SYMBOL_GPL(copy_mc_test_src) + .globl copy_mc_test_dst + copy_mc_test_dst: + .quad 0 + EXPORT_SYMBOL_GPL(copy_mc_test_dst) + .popsection +.endm + +.macro COPY_MC_TEST_SRC reg count target + leaq \count(\reg), %r9 + cmp copy_mc_test_src, %r9 + ja \target +.endm + +.macro COPY_MC_TEST_DST reg count target + leaq \count(\reg), %r9 + cmp copy_mc_test_dst, %r9 + ja \target +.endm +#else +.macro COPY_MC_TEST_CTL +.endm + +.macro COPY_MC_TEST_SRC reg count target +.endm + +.macro COPY_MC_TEST_DST reg count target +.endm +#endif /* CONFIG_COPY_MC_TEST */ +#endif /* __ASSEMBLY__ */ +#endif /* _COPY_MC_TEST_H_ */ diff --git a/arch/x86/include/asm/cpu_entry_area.h b/arch/x86/include/asm/cpu_entry_area.h index 8902fdb7de13..3d52b094850a 100644 --- a/arch/x86/include/asm/cpu_entry_area.h +++ b/arch/x86/include/asm/cpu_entry_area.h @@ -11,25 +11,29 @@ #ifdef CONFIG_X86_64 /* Macro to enforce the same ordering and stack sizes */ -#define ESTACKS_MEMBERS(guardsize) \ - char DF_stack_guard[guardsize]; \ - char DF_stack[EXCEPTION_STKSZ]; \ - char NMI_stack_guard[guardsize]; \ - char NMI_stack[EXCEPTION_STKSZ]; \ - char DB_stack_guard[guardsize]; \ - char DB_stack[EXCEPTION_STKSZ]; \ - char MCE_stack_guard[guardsize]; \ - char MCE_stack[EXCEPTION_STKSZ]; \ - char IST_top_guard[guardsize]; \ +#define ESTACKS_MEMBERS(guardsize, optional_stack_size) \ + char DF_stack_guard[guardsize]; \ + char DF_stack[EXCEPTION_STKSZ]; \ + char NMI_stack_guard[guardsize]; \ + char NMI_stack[EXCEPTION_STKSZ]; \ + char DB_stack_guard[guardsize]; \ + char DB_stack[EXCEPTION_STKSZ]; \ + char MCE_stack_guard[guardsize]; \ + char MCE_stack[EXCEPTION_STKSZ]; \ + char VC_stack_guard[guardsize]; \ + char VC_stack[optional_stack_size]; \ + char VC2_stack_guard[guardsize]; \ + char VC2_stack[optional_stack_size]; \ + char IST_top_guard[guardsize]; \ /* The exception stacks' physical storage. No guard pages required */ struct exception_stacks { - ESTACKS_MEMBERS(0) + ESTACKS_MEMBERS(0, 0) }; /* The effective cpu entry area mapping with guard pages. */ struct cea_exception_stacks { - ESTACKS_MEMBERS(PAGE_SIZE) + ESTACKS_MEMBERS(PAGE_SIZE, EXCEPTION_STKSZ) }; /* @@ -40,6 +44,8 @@ enum exception_stack_ordering { ESTACK_NMI, ESTACK_DB, ESTACK_MCE, + ESTACK_VC, + ESTACK_VC2, N_EXCEPTION_STACKS }; @@ -139,4 +145,7 @@ static inline struct entry_stack *cpu_entry_stack(int cpu) #define __this_cpu_ist_top_va(name) \ CEA_ESTACK_TOP(__this_cpu_read(cea_exception_stacks), name) +#define __this_cpu_ist_bottom_va(name) \ + CEA_ESTACK_BOT(__this_cpu_read(cea_exception_stacks), name) + #endif diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 2901d5df4366..dad350d42ecf 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -96,7 +96,7 @@ #define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in IA32 userspace */ #define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in IA32 userspace */ #define X86_FEATURE_REP_GOOD ( 3*32+16) /* REP microcode works well */ -/* free ( 3*32+17) */ +#define X86_FEATURE_SME_COHERENT ( 3*32+17) /* "" AMD hardware-enforced cache coherency */ #define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" LFENCE synchronizes RDTSC */ #define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */ #define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */ @@ -236,6 +236,7 @@ #define X86_FEATURE_EPT_AD ( 8*32+17) /* Intel Extended Page Table access-dirty bit */ #define X86_FEATURE_VMCALL ( 8*32+18) /* "" Hypervisor supports the VMCALL instruction */ #define X86_FEATURE_VMW_VMMCALL ( 8*32+19) /* "" VMware prefers VMMCALL hypercall instruction */ +#define X86_FEATURE_SEV_ES ( 8*32+20) /* AMD Secure Encrypted Virtualization - Encrypted State */ /* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */ #define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/ @@ -288,6 +289,7 @@ #define X86_FEATURE_FENCE_SWAPGS_USER (11*32+ 4) /* "" LFENCE in user entry SWAPGS path */ #define X86_FEATURE_FENCE_SWAPGS_KERNEL (11*32+ 5) /* "" LFENCE in kernel entry SWAPGS path */ #define X86_FEATURE_SPLIT_LOCK_DETECT (11*32+ 6) /* #AC for split lock */ +#define X86_FEATURE_PER_THREAD_MBA (11*32+ 7) /* "" Per-thread Memory Bandwidth Allocation */ /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */ #define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */ @@ -353,6 +355,7 @@ #define X86_FEATURE_CLDEMOTE (16*32+25) /* CLDEMOTE instruction */ #define X86_FEATURE_MOVDIRI (16*32+27) /* MOVDIRI instruction */ #define X86_FEATURE_MOVDIR64B (16*32+28) /* MOVDIR64B instruction */ +#define X86_FEATURE_ENQCMD (16*32+29) /* ENQCMD and ENQCMDS instructions */ /* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */ #define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* MCA overflow recovery support */ @@ -368,6 +371,7 @@ #define X86_FEATURE_MD_CLEAR (18*32+10) /* VERW clears CPU buffers */ #define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */ #define X86_FEATURE_SERIALIZE (18*32+14) /* SERIALIZE instruction */ +#define X86_FEATURE_TSXLDTRK (18*32+16) /* TSX Suspend Load Address Tracking */ #define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */ #define X86_FEATURE_ARCH_LBR (18*32+19) /* Intel ARCH LBR */ #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h index e89558a3fe4a..cfdf307ddc01 100644 --- a/arch/x86/include/asm/debugreg.h +++ b/arch/x86/include/asm/debugreg.h @@ -90,8 +90,6 @@ static __always_inline bool hw_breakpoint_active(void) return __this_cpu_read(cpu_dr7) & DR_GLOBAL_ENABLE_MASK; } -extern void aout_dump_debugregs(struct user *dump); - extern void hw_breakpoint_restore(void); static __always_inline unsigned long local_db_save(void) diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h index 1ced11d31932..476082a83d1c 100644 --- a/arch/x86/include/asm/desc.h +++ b/arch/x86/include/asm/desc.h @@ -383,6 +383,33 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit) void alloc_intr_gate(unsigned int n, const void *addr); +static inline void init_idt_data(struct idt_data *data, unsigned int n, + const void *addr) +{ + BUG_ON(n > 0xFF); + + memset(data, 0, sizeof(*data)); + data->vector = n; + data->addr = addr; + data->segment = __KERNEL_CS; + data->bits.type = GATE_INTERRUPT; + data->bits.p = 1; +} + +static inline void idt_init_desc(gate_desc *gate, const struct idt_data *d) +{ + unsigned long addr = (unsigned long) d->addr; + + gate->offset_low = (u16) addr; + gate->segment = (u16) d->segment; + gate->bits = d->bits; + gate->offset_middle = (u16) (addr >> 16); +#ifdef CONFIG_X86_64 + gate->offset_high = (u32) (addr >> 32); + gate->reserved = 0; +#endif +} + extern unsigned long system_vectors[]; extern void load_current_idt(void); diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h index a91f3b6e4f2a..f7e7099af595 100644 --- a/arch/x86/include/asm/desc_defs.h +++ b/arch/x86/include/asm/desc_defs.h @@ -74,6 +74,13 @@ struct idt_bits { p : 1; } __attribute__((packed)); +struct idt_data { + unsigned int vector; + unsigned int segment; + struct idt_bits bits; + const void *addr; +}; + struct gate_struct { u16 offset_low; u16 segment; @@ -109,6 +116,9 @@ struct desc_ptr { #endif /* !__ASSEMBLY__ */ +/* Boot IDT definitions */ +#define BOOT_IDT_ENTRIES 32 + /* Access rights as returned by LAR */ #define AR_TYPE_RODATA (0 * (1 << 9)) #define AR_TYPE_RWDATA (1 * (1 << 9)) diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h index 4ea8584682f9..5861d34f9771 100644 --- a/arch/x86/include/asm/disabled-features.h +++ b/arch/x86/include/asm/disabled-features.h @@ -56,6 +56,12 @@ # define DISABLE_PTI (1 << (X86_FEATURE_PTI & 31)) #endif +#ifdef CONFIG_IOMMU_SUPPORT +# define DISABLE_ENQCMD 0 +#else +# define DISABLE_ENQCMD (1 << (X86_FEATURE_ENQCMD & 31)) +#endif + /* * Make sure to add features to the correct mask */ @@ -75,7 +81,8 @@ #define DISABLED_MASK13 0 #define DISABLED_MASK14 0 #define DISABLED_MASK15 0 -#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57|DISABLE_UMIP) +#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57|DISABLE_UMIP| \ + DISABLE_ENQCMD) #define DISABLED_MASK17 0 #define DISABLED_MASK18 0 #define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19) diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index fed67eafcacc..bb1654fe0ce7 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h @@ -8,10 +8,8 @@ */ #include <linux/scatterlist.h> -#include <linux/dma-debug.h> #include <asm/io.h> #include <asm/swiotlb.h> -#include <linux/dma-contiguous.h> extern int iommu_merge; extern int panic_on_overflow; diff --git a/arch/x86/include/asm/extable.h b/arch/x86/include/asm/extable.h index d8c2198d543b..1f0cbc52937c 100644 --- a/arch/x86/include/asm/extable.h +++ b/arch/x86/include/asm/extable.h @@ -29,10 +29,17 @@ struct pt_regs; (b)->handler = (tmp).handler - (delta); \ } while (0) +enum handler_type { + EX_HANDLER_NONE, + EX_HANDLER_FAULT, + EX_HANDLER_UACCESS, + EX_HANDLER_OTHER +}; + extern int fixup_exception(struct pt_regs *regs, int trapnr, unsigned long error_code, unsigned long fault_addr); extern int fixup_bug(struct pt_regs *regs, int trapnr); -extern bool ex_has_fault_handler(unsigned long ip); +extern enum handler_type ex_get_fault_handler_type(unsigned long ip); extern void early_fixup_exception(struct pt_regs *regs, int trapnr); #endif diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index 0f0dd645b594..77217bd292bd 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h @@ -99,7 +99,7 @@ enum fixed_addresses { FIX_PCIE_MCFG, #endif #endif -#ifdef CONFIG_PARAVIRT +#ifdef CONFIG_PARAVIRT_XXL FIX_PARAVIRT_BOOTMAP, #endif #ifdef CONFIG_X86_INTEL_MID diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h index b774c52e5411..dcd9503b1098 100644 --- a/arch/x86/include/asm/fpu/api.h +++ b/arch/x86/include/asm/fpu/api.h @@ -62,4 +62,16 @@ extern void switch_fpu_return(void); */ extern int cpu_has_xfeatures(u64 xfeatures_mask, const char **feature_name); +/* + * Tasks that are not using SVA have mm->pasid set to zero to note that they + * will not have the valid bit set in MSR_IA32_PASID while they are running. + */ +#define PASID_DISABLED 0 + +#ifdef CONFIG_IOMMU_SUPPORT +/* Update current's PASID MSR/state by mm's PASID. */ +void update_pasid(void); +#else +static inline void update_pasid(void) { } +#endif #endif /* _ASM_X86_FPU_API_H */ diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h index 0a460f2a3f90..8d33ad80704f 100644 --- a/arch/x86/include/asm/fpu/internal.h +++ b/arch/x86/include/asm/fpu/internal.h @@ -19,6 +19,7 @@ #include <asm/user.h> #include <asm/fpu/api.h> #include <asm/fpu/xstate.h> +#include <asm/fpu/xcr.h> #include <asm/cpufeature.h> #include <asm/trace/fpu.h> @@ -583,38 +584,13 @@ static inline void switch_fpu_finish(struct fpu *new_fpu) pkru_val = pk->pkru; } __write_pkru(pkru_val); -} - -/* - * MXCSR and XCR definitions: - */ - -static inline void ldmxcsr(u32 mxcsr) -{ - asm volatile("ldmxcsr %0" :: "m" (mxcsr)); -} - -extern unsigned int mxcsr_feature_mask; - -#define XCR_XFEATURE_ENABLED_MASK 0x00000000 -static inline u64 xgetbv(u32 index) -{ - u32 eax, edx; - - asm volatile(".byte 0x0f,0x01,0xd0" /* xgetbv */ - : "=a" (eax), "=d" (edx) - : "c" (index)); - return eax + ((u64)edx << 32); -} - -static inline void xsetbv(u32 index, u64 value) -{ - u32 eax = value; - u32 edx = value >> 32; - - asm volatile(".byte 0x0f,0x01,0xd1" /* xsetbv */ - : : "a" (eax), "d" (edx), "c" (index)); + /* + * Expensive PASID MSR write will be avoided in update_pasid() because + * TIF_NEED_FPU_LOAD was set. And the PASID state won't be updated + * unless it's different from mm->pasid to reduce overhead. + */ + update_pasid(); } #endif /* _ASM_X86_FPU_INTERNAL_H */ diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h index c87364ea6446..f5a38a5f3ae1 100644 --- a/arch/x86/include/asm/fpu/types.h +++ b/arch/x86/include/asm/fpu/types.h @@ -114,7 +114,7 @@ enum xfeature { XFEATURE_Hi16_ZMM, XFEATURE_PT_UNIMPLEMENTED_SO_FAR, XFEATURE_PKRU, - XFEATURE_RSRVD_COMP_10, + XFEATURE_PASID, XFEATURE_RSRVD_COMP_11, XFEATURE_RSRVD_COMP_12, XFEATURE_RSRVD_COMP_13, @@ -134,6 +134,7 @@ enum xfeature { #define XFEATURE_MASK_Hi16_ZMM (1 << XFEATURE_Hi16_ZMM) #define XFEATURE_MASK_PT (1 << XFEATURE_PT_UNIMPLEMENTED_SO_FAR) #define XFEATURE_MASK_PKRU (1 << XFEATURE_PKRU) +#define XFEATURE_MASK_PASID (1 << XFEATURE_PASID) #define XFEATURE_MASK_LBR (1 << XFEATURE_LBR) #define XFEATURE_MASK_FPSSE (XFEATURE_MASK_FP | XFEATURE_MASK_SSE) @@ -256,6 +257,14 @@ struct arch_lbr_state { struct lbr_entry entries[]; } __packed; +/* + * State component 10 is supervisor state used for context-switching the + * PASID state. + */ +struct ia32_pasid_state { + u64 pasid; +} __packed; + struct xstate_header { u64 xfeatures; u64 xcomp_bv; diff --git a/arch/x86/include/asm/fpu/xcr.h b/arch/x86/include/asm/fpu/xcr.h new file mode 100644 index 000000000000..1c7ab8d95da5 --- /dev/null +++ b/arch/x86/include/asm/fpu/xcr.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_X86_FPU_XCR_H +#define _ASM_X86_FPU_XCR_H + +/* + * MXCSR and XCR definitions: + */ + +static inline void ldmxcsr(u32 mxcsr) +{ + asm volatile("ldmxcsr %0" :: "m" (mxcsr)); +} + +extern unsigned int mxcsr_feature_mask; + +#define XCR_XFEATURE_ENABLED_MASK 0x00000000 + +static inline u64 xgetbv(u32 index) +{ + u32 eax, edx; + + asm volatile("xgetbv" : "=a" (eax), "=d" (edx) : "c" (index)); + return eax + ((u64)edx << 32); +} + +static inline void xsetbv(u32 index, u64 value) +{ + u32 eax = value; + u32 edx = value >> 32; + + asm volatile("xsetbv" :: "a" (eax), "d" (edx), "c" (index)); +} + +#endif /* _ASM_X86_FPU_XCR_H */ diff --git a/arch/x86/include/asm/fpu/xstate.h b/arch/x86/include/asm/fpu/xstate.h index 14ab815132d4..47a92232d595 100644 --- a/arch/x86/include/asm/fpu/xstate.h +++ b/arch/x86/include/asm/fpu/xstate.h @@ -35,7 +35,7 @@ XFEATURE_MASK_BNDCSR) /* All currently supported supervisor features */ -#define XFEATURE_MASK_SUPERVISOR_SUPPORTED (0) +#define XFEATURE_MASK_SUPERVISOR_SUPPORTED (XFEATURE_MASK_PASID) /* * A supervisor state component may not always contain valuable information, diff --git a/arch/x86/include/asm/fsgsbase.h b/arch/x86/include/asm/fsgsbase.h index d552646411a9..35cff5f2becf 100644 --- a/arch/x86/include/asm/fsgsbase.h +++ b/arch/x86/include/asm/fsgsbase.h @@ -57,7 +57,7 @@ static inline unsigned long x86_fsbase_read_cpu(void) { unsigned long fsbase; - if (static_cpu_has(X86_FEATURE_FSGSBASE)) + if (boot_cpu_has(X86_FEATURE_FSGSBASE)) fsbase = rdfsbase(); else rdmsrl(MSR_FS_BASE, fsbase); @@ -67,7 +67,7 @@ static inline unsigned long x86_fsbase_read_cpu(void) static inline void x86_fsbase_write_cpu(unsigned long fsbase) { - if (static_cpu_has(X86_FEATURE_FSGSBASE)) + if (boot_cpu_has(X86_FEATURE_FSGSBASE)) wrfsbase(fsbase); else wrmsrl(MSR_FS_BASE, fsbase); diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index 74c12437401e..a4aeeaace040 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h @@ -36,61 +36,56 @@ struct msi_desc; enum irq_alloc_type { X86_IRQ_ALLOC_TYPE_IOAPIC = 1, X86_IRQ_ALLOC_TYPE_HPET, - X86_IRQ_ALLOC_TYPE_MSI, - X86_IRQ_ALLOC_TYPE_MSIX, + X86_IRQ_ALLOC_TYPE_PCI_MSI, + X86_IRQ_ALLOC_TYPE_PCI_MSIX, X86_IRQ_ALLOC_TYPE_DMAR, X86_IRQ_ALLOC_TYPE_UV, + X86_IRQ_ALLOC_TYPE_IOAPIC_GET_PARENT, + X86_IRQ_ALLOC_TYPE_HPET_GET_PARENT, }; +struct ioapic_alloc_info { + int pin; + int node; + u32 trigger : 1; + u32 polarity : 1; + u32 valid : 1; + struct IO_APIC_route_entry *entry; +}; + +struct uv_alloc_info { + int limit; + int blade; + unsigned long offset; + char *name; + +}; + +/** + * irq_alloc_info - X86 specific interrupt allocation info + * @type: X86 specific allocation type + * @flags: Flags for allocation tweaks + * @devid: Device ID for allocations + * @hwirq: Associated hw interrupt number in the domain + * @mask: CPU mask for vector allocation + * @desc: Pointer to msi descriptor + * @data: Allocation specific data + * + * @ioapic: IOAPIC specific allocation data + * @uv: UV specific allocation data +*/ struct irq_alloc_info { enum irq_alloc_type type; u32 flags; - const struct cpumask *mask; /* CPU mask for vector allocation */ + u32 devid; + irq_hw_number_t hwirq; + const struct cpumask *mask; + struct msi_desc *desc; + void *data; + union { - int unused; -#ifdef CONFIG_HPET_TIMER - struct { - int hpet_id; - int hpet_index; - void *hpet_data; - }; -#endif -#ifdef CONFIG_PCI_MSI - struct { - struct pci_dev *msi_dev; - irq_hw_number_t msi_hwirq; - }; -#endif -#ifdef CONFIG_X86_IO_APIC - struct { - int ioapic_id; - int ioapic_pin; - int ioapic_node; - u32 ioapic_trigger : 1; - u32 ioapic_polarity : 1; - u32 ioapic_valid : 1; - struct IO_APIC_route_entry *ioapic_entry; - }; -#endif -#ifdef CONFIG_DMAR_TABLE - struct { - int dmar_id; - void *dmar_data; - }; -#endif -#ifdef CONFIG_X86_UV - struct { - int uv_limit; - int uv_blade; - unsigned long uv_offset; - char *uv_name; - }; -#endif -#if IS_ENABLED(CONFIG_VMD) - struct { - struct msi_desc *desc; - }; -#endif + struct ioapic_alloc_info ioapic; + struct uv_alloc_info uv; }; }; diff --git a/arch/x86/include/asm/hyperv-tlfs.h b/arch/x86/include/asm/hyperv-tlfs.h index 7a4d2062385c..0ed20e8bba9e 100644 --- a/arch/x86/include/asm/hyperv-tlfs.h +++ b/arch/x86/include/asm/hyperv-tlfs.h @@ -28,39 +28,6 @@ #define HYPERV_CPUID_MAX 0x4000ffff /* - * Aliases for Group A features that have X64 in the name. - * On x86/x64 these are HYPERV_CPUID_FEATURES.EAX bits. - */ - -#define HV_X64_MSR_VP_RUNTIME_AVAILABLE \ - HV_MSR_VP_RUNTIME_AVAILABLE -#define HV_X64_MSR_SYNIC_AVAILABLE \ - HV_MSR_SYNIC_AVAILABLE -#define HV_X64_MSR_APIC_ACCESS_AVAILABLE \ - HV_MSR_APIC_ACCESS_AVAILABLE -#define HV_X64_MSR_HYPERCALL_AVAILABLE \ - HV_MSR_HYPERCALL_AVAILABLE -#define HV_X64_MSR_VP_INDEX_AVAILABLE \ - HV_MSR_VP_INDEX_AVAILABLE -#define HV_X64_MSR_RESET_AVAILABLE \ - HV_MSR_RESET_AVAILABLE -#define HV_X64_MSR_GUEST_IDLE_AVAILABLE \ - HV_MSR_GUEST_IDLE_AVAILABLE -#define HV_X64_ACCESS_FREQUENCY_MSRS \ - HV_ACCESS_FREQUENCY_MSRS -#define HV_X64_ACCESS_REENLIGHTENMENT \ - HV_ACCESS_REENLIGHTENMENT -#define HV_X64_ACCESS_TSC_INVARIANT \ - HV_ACCESS_TSC_INVARIANT - -/* - * Aliases for Group B features that have X64 in the name. - * On x86/x64 these are HYPERV_CPUID_FEATURES.EBX bits. - */ -#define HV_X64_POST_MESSAGES HV_POST_MESSAGES -#define HV_X64_SIGNAL_EVENTS HV_SIGNAL_EVENTS - -/* * Group D Features. The bit assignments are custom to each architecture. * On x86/x64 these are HYPERV_CPUID_FEATURES.EDX bits. */ diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h index a0638640f1ed..b2442eb0ac2f 100644 --- a/arch/x86/include/asm/idtentry.h +++ b/arch/x86/include/asm/idtentry.h @@ -309,6 +309,19 @@ static __always_inline void __##func(struct pt_regs *regs) __visible void noist_##func(struct pt_regs *regs) /** + * DECLARE_IDTENTRY_VC - Declare functions for the VC entry point + * @vector: Vector number (ignored for C) + * @func: Function name of the entry point + * + * Maps to DECLARE_IDTENTRY_RAW_ERRORCODE, but declares also the + * safe_stack C handler. + */ +#define DECLARE_IDTENTRY_VC(vector, func) \ + DECLARE_IDTENTRY_RAW_ERRORCODE(vector, func); \ + __visible noinstr void ist_##func(struct pt_regs *regs, unsigned long error_code); \ + __visible noinstr void safe_stack_##func(struct pt_regs *regs, unsigned long error_code) + +/** * DEFINE_IDTENTRY_IST - Emit code for IST entry points * @func: Function name of the entry point * @@ -347,6 +360,35 @@ static __always_inline void __##func(struct pt_regs *regs) #define DEFINE_IDTENTRY_DF(func) \ DEFINE_IDTENTRY_RAW_ERRORCODE(func) +/** + * DEFINE_IDTENTRY_VC_SAFE_STACK - Emit code for VMM communication handler + which runs on a safe stack. + * @func: Function name of the entry point + * + * Maps to DEFINE_IDTENTRY_RAW_ERRORCODE + */ +#define DEFINE_IDTENTRY_VC_SAFE_STACK(func) \ + DEFINE_IDTENTRY_RAW_ERRORCODE(safe_stack_##func) + +/** + * DEFINE_IDTENTRY_VC_IST - Emit code for VMM communication handler + which runs on the VC fall-back stack + * @func: Function name of the entry point + * + * Maps to DEFINE_IDTENTRY_RAW_ERRORCODE + */ +#define DEFINE_IDTENTRY_VC_IST(func) \ + DEFINE_IDTENTRY_RAW_ERRORCODE(ist_##func) + +/** + * DEFINE_IDTENTRY_VC - Emit code for VMM communication handler + * @func: Function name of the entry point + * + * Maps to DEFINE_IDTENTRY_RAW_ERRORCODE + */ +#define DEFINE_IDTENTRY_VC(func) \ + DEFINE_IDTENTRY_RAW_ERRORCODE(func) + #else /* CONFIG_X86_64 */ /** @@ -433,6 +475,9 @@ __visible noinstr void func(struct pt_regs *regs, \ # define DECLARE_IDTENTRY_XENCB(vector, func) \ DECLARE_IDTENTRY(vector, func) +# define DECLARE_IDTENTRY_VC(vector, func) \ + idtentry_vc vector asm_##func func + #else # define DECLARE_IDTENTRY_MCE(vector, func) \ DECLARE_IDTENTRY(vector, func) @@ -547,7 +592,7 @@ DECLARE_IDTENTRY_RAW(X86_TRAP_MC, exc_machine_check); /* NMI */ DECLARE_IDTENTRY_NMI(X86_TRAP_NMI, exc_nmi); -#if defined(CONFIG_XEN_PV) && defined(CONFIG_X86_64) +#ifdef CONFIG_XEN_PV DECLARE_IDTENTRY_RAW(X86_TRAP_NMI, xenpv_exc_nmi); #endif @@ -557,13 +602,18 @@ DECLARE_IDTENTRY_DEBUG(X86_TRAP_DB, exc_debug); #else DECLARE_IDTENTRY_RAW(X86_TRAP_DB, exc_debug); #endif -#if defined(CONFIG_XEN_PV) && defined(CONFIG_X86_64) +#ifdef CONFIG_XEN_PV DECLARE_IDTENTRY_RAW(X86_TRAP_DB, xenpv_exc_debug); #endif /* #DF */ DECLARE_IDTENTRY_DF(X86_TRAP_DF, exc_double_fault); +/* #VC */ +#ifdef CONFIG_AMD_MEM_ENCRYPT +DECLARE_IDTENTRY_VC(X86_TRAP_VC, exc_vmm_communication); +#endif + #ifdef CONFIG_XEN_PV DECLARE_IDTENTRY_XENCB(X86_TRAP_OTHER, exc_xen_hypervisor_callback); #endif @@ -591,10 +641,6 @@ DECLARE_IDTENTRY_SYSVEC(CALL_FUNCTION_VECTOR, sysvec_call_function); #endif #ifdef CONFIG_X86_LOCAL_APIC -# ifdef CONFIG_X86_UV -DECLARE_IDTENTRY_SYSVEC(UV_BAU_MESSAGE, sysvec_uv_bau_message); -# endif - # ifdef CONFIG_X86_MCE_THRESHOLD DECLARE_IDTENTRY_SYSVEC(THRESHOLD_APIC_VECTOR, sysvec_threshold); # endif diff --git a/arch/x86/include/asm/insn-eval.h b/arch/x86/include/asm/insn-eval.h index 2b6ccf2c49f1..a0f839aa144d 100644 --- a/arch/x86/include/asm/insn-eval.h +++ b/arch/x86/include/asm/insn-eval.h @@ -15,9 +15,15 @@ #define INSN_CODE_SEG_OPND_SZ(params) (params & 0xf) #define INSN_CODE_SEG_PARAMS(oper_sz, addr_sz) (oper_sz | (addr_sz << 4)) +bool insn_has_rep_prefix(struct insn *insn); void __user *insn_get_addr_ref(struct insn *insn, struct pt_regs *regs); int insn_get_modrm_rm_off(struct insn *insn, struct pt_regs *regs); +int insn_get_modrm_reg_off(struct insn *insn, struct pt_regs *regs); unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx); int insn_get_code_seg_params(struct pt_regs *regs); +int insn_fetch_from_user(struct pt_regs *regs, + unsigned char buf[MAX_INSN_SIZE]); +bool insn_decode(struct insn *insn, struct pt_regs *regs, + unsigned char buf[MAX_INSN_SIZE], int buf_size); #endif /* _ASM_X86_INSN_EVAL_H */ diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index e1aa17a468a8..d726459d08e5 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h @@ -401,7 +401,7 @@ extern bool phys_mem_access_encrypted(unsigned long phys_addr, /** * iosubmit_cmds512 - copy data to single MMIO location, in 512-bit units - * @__dst: destination, in MMIO space (must be 512-bit aligned) + * @dst: destination, in MMIO space (must be 512-bit aligned) * @src: source * @count: number of 512 bits quantities to submit * @@ -412,25 +412,14 @@ extern bool phys_mem_access_encrypted(unsigned long phys_addr, * Warning: Do not use this helper unless your driver has checked that the CPU * instruction is supported on the platform. */ -static inline void iosubmit_cmds512(void __iomem *__dst, const void *src, +static inline void iosubmit_cmds512(void __iomem *dst, const void *src, size_t count) { - /* - * Note that this isn't an "on-stack copy", just definition of "dst" - * as a pointer to 64-bytes of stuff that is going to be overwritten. - * In the MOVDIR64B case that may be needed as you can use the - * MOVDIR64B instruction to copy arbitrary memory around. This trick - * lets the compiler know how much gets clobbered. - */ - volatile struct { char _[64]; } *dst = __dst; const u8 *from = src; const u8 *end = from + count * 64; while (from < end) { - /* MOVDIR64B [rdx], rax */ - asm volatile(".byte 0x66, 0x0f, 0x38, 0xf8, 0x02" - : "=m" (dst) - : "d" (from), "a" (dst)); + movdir64b(dst, from); from += 64; } } diff --git a/arch/x86/include/asm/irq_remapping.h b/arch/x86/include/asm/irq_remapping.h index 4bc985f1e2e4..af4a151d70b3 100644 --- a/arch/x86/include/asm/irq_remapping.h +++ b/arch/x86/include/asm/irq_remapping.h @@ -45,8 +45,6 @@ extern int irq_remap_enable_fault_handling(void); extern void panic_if_irq_remap(const char *msg); extern struct irq_domain * -irq_remapping_get_ir_irq_domain(struct irq_alloc_info *info); -extern struct irq_domain * irq_remapping_get_irq_domain(struct irq_alloc_info *info); /* Create PCI MSI/MSIx irqdomain, use @parent as the parent irqdomain. */ @@ -74,12 +72,6 @@ static inline void panic_if_irq_remap(const char *msg) } static inline struct irq_domain * -irq_remapping_get_ir_irq_domain(struct irq_alloc_info *info) -{ - return NULL; -} - -static inline struct irq_domain * irq_remapping_get_irq_domain(struct irq_alloc_info *info) { return NULL; diff --git a/arch/x86/include/asm/irqdomain.h b/arch/x86/include/asm/irqdomain.h index c066ffae222b..cd684d45cb5f 100644 --- a/arch/x86/include/asm/irqdomain.h +++ b/arch/x86/include/asm/irqdomain.h @@ -51,9 +51,13 @@ extern int mp_irqdomain_ioapic_idx(struct irq_domain *domain); #endif /* CONFIG_X86_IO_APIC */ #ifdef CONFIG_PCI_MSI -extern void arch_init_msi_domain(struct irq_domain *domain); +void x86_create_pci_msi_domain(void); +struct irq_domain *native_create_pci_msi_domain(void); +extern struct irq_domain *x86_pci_msi_default_domain; #else -static inline void arch_init_msi_domain(struct irq_domain *domain) { } +static inline void x86_create_pci_msi_domain(void) { } +#define native_create_pci_msi_domain NULL +#define x86_pci_msi_default_domain NULL #endif #endif diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h index 143bc9abe99c..991a7ad540c7 100644 --- a/arch/x86/include/asm/kprobes.h +++ b/arch/x86/include/asm/kprobes.h @@ -106,5 +106,9 @@ extern int kprobe_exceptions_notify(struct notifier_block *self, extern int kprobe_int3_handler(struct pt_regs *regs); extern int kprobe_debug_handler(struct pt_regs *regs); +#else + +static inline int kprobe_debug_handler(struct pt_regs *regs) { return 0; } + #endif /* CONFIG_KPROBES */ #endif /* _ASM_X86_KPROBES_H */ diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index cf503824529c..a0f147893a04 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -136,9 +136,24 @@ #define MCE_HANDLED_NFIT BIT_ULL(3) #define MCE_HANDLED_EDAC BIT_ULL(4) #define MCE_HANDLED_MCELOG BIT_ULL(5) + +/* + * Indicates an MCE which has happened in kernel space but from + * which the kernel can recover simply by executing fixup_exception() + * so that an error is returned to the caller of the function that + * hit the machine check. + */ #define MCE_IN_KERNEL_RECOV BIT_ULL(6) /* + * Indicates an MCE that happened in kernel space while copying data + * from user. In this case fixup_exception() gets the kernel to the + * error exit for the copy function. Machine check handler can then + * treat it like a fault taken in user mode. + */ +#define MCE_IN_KERNEL_COPYIN BIT_ULL(7) + +/* * This structure contains all data related to the MCE log. Also * carries a signature to make it easier to find from external * debugging tools. Each entry is only valid when its finished flag @@ -174,6 +189,15 @@ extern void mce_unregister_decode_chain(struct notifier_block *nb); extern int mce_p5_enabled; +#ifdef CONFIG_ARCH_HAS_COPY_MC +extern void enable_copy_mc_fragile(void); +unsigned long __must_check copy_mc_fragile(void *dst, const void *src, unsigned cnt); +#else +static inline void enable_copy_mc_fragile(void) +{ +} +#endif + #ifdef CONFIG_X86_MCE int mcheck_init(void); void mcheck_cpu_init(struct cpuinfo_x86 *c); @@ -200,12 +224,8 @@ void mce_setup(struct mce *m); void mce_log(struct mce *m); DECLARE_PER_CPU(struct device *, mce_device); -/* - * Maximum banks number. - * This is the limit of the current register layout on - * Intel CPUs. - */ -#define MAX_NR_BANKS 32 +/* Maximum number of MCA banks per CPU. */ +#define MAX_NR_BANKS 64 #ifdef CONFIG_X86_MCE_INTEL void mce_intel_feature_init(struct cpuinfo_x86 *c); @@ -328,7 +348,6 @@ enum smca_bank_types { struct smca_hwid { unsigned int bank_type; /* Use with smca_bank_types for easy indexing. */ u32 hwid_mcatype; /* (hwid,mcatype) tuple */ - u32 xec_bitmap; /* Bitmap of valid ExtErrorCodes; current max is 21. */ u8 count; /* Number of instances. */ }; diff --git a/arch/x86/include/asm/mcsafe_test.h b/arch/x86/include/asm/mcsafe_test.h deleted file mode 100644 index eb59804b6201..000000000000 --- a/arch/x86/include/asm/mcsafe_test.h +++ /dev/null @@ -1,75 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _MCSAFE_TEST_H_ -#define _MCSAFE_TEST_H_ - -#ifndef __ASSEMBLY__ -#ifdef CONFIG_MCSAFE_TEST -extern unsigned long mcsafe_test_src; -extern unsigned long mcsafe_test_dst; - -static inline void mcsafe_inject_src(void *addr) -{ - if (addr) - mcsafe_test_src = (unsigned long) addr; - else - mcsafe_test_src = ~0UL; -} - -static inline void mcsafe_inject_dst(void *addr) -{ - if (addr) - mcsafe_test_dst = (unsigned long) addr; - else - mcsafe_test_dst = ~0UL; -} -#else /* CONFIG_MCSAFE_TEST */ -static inline void mcsafe_inject_src(void *addr) -{ -} - -static inline void mcsafe_inject_dst(void *addr) -{ -} -#endif /* CONFIG_MCSAFE_TEST */ - -#else /* __ASSEMBLY__ */ -#include <asm/export.h> - -#ifdef CONFIG_MCSAFE_TEST -.macro MCSAFE_TEST_CTL - .pushsection .data - .align 8 - .globl mcsafe_test_src - mcsafe_test_src: - .quad 0 - EXPORT_SYMBOL_GPL(mcsafe_test_src) - .globl mcsafe_test_dst - mcsafe_test_dst: - .quad 0 - EXPORT_SYMBOL_GPL(mcsafe_test_dst) - .popsection -.endm - -.macro MCSAFE_TEST_SRC reg count target - leaq \count(\reg), %r9 - cmp mcsafe_test_src, %r9 - ja \target -.endm - -.macro MCSAFE_TEST_DST reg count target - leaq \count(\reg), %r9 - cmp mcsafe_test_dst, %r9 - ja \target -.endm -#else -.macro MCSAFE_TEST_CTL -.endm - -.macro MCSAFE_TEST_SRC reg count target -.endm - -.macro MCSAFE_TEST_DST reg count target -.endm -#endif /* CONFIG_MCSAFE_TEST */ -#endif /* __ASSEMBLY__ */ -#endif /* _MCSAFE_TEST_H_ */ diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h index 5049f6c22683..c9f5df0a1c10 100644 --- a/arch/x86/include/asm/mem_encrypt.h +++ b/arch/x86/include/asm/mem_encrypt.h @@ -19,6 +19,7 @@ #ifdef CONFIG_AMD_MEM_ENCRYPT extern u64 sme_me_mask; +extern u64 sev_status; extern bool sev_enabled; void sme_encrypt_execute(unsigned long encrypted_kernel_vaddr, @@ -48,8 +49,10 @@ void __init mem_encrypt_free_decrypted_mem(void); /* Architecture __weak replacement functions */ void __init mem_encrypt_init(void); +void __init sev_es_init_vc_handling(void); bool sme_active(void); bool sev_active(void); +bool sev_es_active(void); #define __bss_decrypted __attribute__((__section__(".bss..decrypted"))) @@ -70,8 +73,10 @@ static inline void __init sme_early_init(void) { } static inline void __init sme_encrypt_kernel(struct boot_params *bp) { } static inline void __init sme_enable(struct boot_params *bp) { } +static inline void sev_es_init_vc_handling(void) { } static inline bool sme_active(void) { return false; } static inline bool sev_active(void) { return false; } +static inline bool sev_es_active(void) { return false; } static inline int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size) { return 0; } diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h index 606cbaebd336..e90ac7e9ae2c 100644 --- a/arch/x86/include/asm/mpspec.h +++ b/arch/x86/include/asm/mpspec.h @@ -67,21 +67,11 @@ static inline void find_smp_config(void) #ifdef CONFIG_X86_MPPARSE extern void e820__memblock_alloc_reserved_mpc_new(void); extern int enable_update_mptable; -extern int default_mpc_apic_id(struct mpc_cpu *m); -extern void default_smp_read_mpc_oem(struct mpc_table *mpc); -# ifdef CONFIG_X86_IO_APIC -extern void default_mpc_oem_bus_info(struct mpc_bus *m, char *str); -# else -# define default_mpc_oem_bus_info NULL -# endif extern void default_find_smp_config(void); extern void default_get_smp_config(unsigned int early); #else static inline void e820__memblock_alloc_reserved_mpc_new(void) { } #define enable_update_mptable 0 -#define default_mpc_apic_id NULL -#define default_smp_read_mpc_oem NULL -#define default_mpc_oem_bus_info NULL #define default_find_smp_config x86_init_noop #define default_get_smp_config x86_init_uint_noop #endif diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index 4f77b8f22e54..ffc289992d1b 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -54,6 +54,7 @@ typedef int (*hyperv_fill_flush_list_func)( #define hv_enable_vdso_clocksource() \ vclocks_set_used(VDSO_CLOCKMODE_HVCLOCK); #define hv_get_raw_timer() rdtsc_ordered() +#define hv_get_vector() HYPERVISOR_CALLBACK_VECTOR /* * Reference to pv_ops must be inline so objtool diff --git a/arch/x86/include/asm/msi.h b/arch/x86/include/asm/msi.h index 25ddd0916bb2..cd30013d15d3 100644 --- a/arch/x86/include/asm/msi.h +++ b/arch/x86/include/asm/msi.h @@ -9,6 +9,4 @@ typedef struct irq_alloc_info msi_alloc_info_t; int pci_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec, msi_alloc_info_t *arg); -void pci_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc); - #endif /* _ASM_X86_MSI_H */ diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 2859ee4f39a8..972a34d93505 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -257,6 +257,9 @@ #define MSR_IA32_LASTINTFROMIP 0x000001dd #define MSR_IA32_LASTINTTOIP 0x000001de +#define MSR_IA32_PASID 0x00000d93 +#define MSR_IA32_PASID_VALID BIT_ULL(31) + /* DEBUGCTLMSR bits (others vary by model): */ #define DEBUGCTLMSR_LBR (1UL << 0) /* last branch recording */ #define DEBUGCTLMSR_BTF_SHIFT 1 @@ -464,11 +467,15 @@ #define MSR_AMD64_IBSOP_REG_MASK ((1UL<<MSR_AMD64_IBSOP_REG_COUNT)-1) #define MSR_AMD64_IBSCTL 0xc001103a #define MSR_AMD64_IBSBRTARGET 0xc001103b +#define MSR_AMD64_ICIBSEXTDCTL 0xc001103c #define MSR_AMD64_IBSOPDATA4 0xc001103d #define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */ +#define MSR_AMD64_SEV_ES_GHCB 0xc0010130 #define MSR_AMD64_SEV 0xc0010131 #define MSR_AMD64_SEV_ENABLED_BIT 0 +#define MSR_AMD64_SEV_ES_ENABLED_BIT 1 #define MSR_AMD64_SEV_ENABLED BIT_ULL(MSR_AMD64_SEV_ENABLED_BIT) +#define MSR_AMD64_SEV_ES_ENABLED BIT_ULL(MSR_AMD64_SEV_ES_ENABLED_BIT) #define MSR_AMD64_VIRT_SPEC_CTRL 0xc001011f @@ -857,11 +864,14 @@ #define MSR_CORE_PERF_FIXED_CTR0 0x00000309 #define MSR_CORE_PERF_FIXED_CTR1 0x0000030a #define MSR_CORE_PERF_FIXED_CTR2 0x0000030b +#define MSR_CORE_PERF_FIXED_CTR3 0x0000030c #define MSR_CORE_PERF_FIXED_CTR_CTRL 0x0000038d #define MSR_CORE_PERF_GLOBAL_STATUS 0x0000038e #define MSR_CORE_PERF_GLOBAL_CTRL 0x0000038f #define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x00000390 +#define MSR_PERF_METRICS 0x00000329 + /* PERF_GLOBAL_OVF_CTL bits */ #define MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI_BIT 55 #define MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI (1ULL << MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI_BIT) diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h index 86f20d520a07..0b4920a7238e 100644 --- a/arch/x86/include/asm/msr.h +++ b/arch/x86/include/asm/msr.h @@ -60,22 +60,20 @@ struct saved_msrs { #define EAX_EDX_RET(val, low, high) "=A" (val) #endif -#ifdef CONFIG_TRACEPOINTS /* * Be very careful with includes. This header is prone to include loops. */ #include <asm/atomic.h> #include <linux/tracepoint-defs.h> -extern struct tracepoint __tracepoint_read_msr; -extern struct tracepoint __tracepoint_write_msr; -extern struct tracepoint __tracepoint_rdpmc; -#define msr_tracepoint_active(t) static_key_false(&(t).key) +#ifdef CONFIG_TRACEPOINTS +DECLARE_TRACEPOINT(read_msr); +DECLARE_TRACEPOINT(write_msr); +DECLARE_TRACEPOINT(rdpmc); extern void do_trace_write_msr(unsigned int msr, u64 val, int failed); extern void do_trace_read_msr(unsigned int msr, u64 val, int failed); extern void do_trace_rdpmc(unsigned int msr, u64 val, int failed); #else -#define msr_tracepoint_active(t) false static inline void do_trace_write_msr(unsigned int msr, u64 val, int failed) {} static inline void do_trace_read_msr(unsigned int msr, u64 val, int failed) {} static inline void do_trace_rdpmc(unsigned int msr, u64 val, int failed) {} @@ -128,7 +126,7 @@ static inline unsigned long long native_read_msr(unsigned int msr) val = __rdmsr(msr); - if (msr_tracepoint_active(__tracepoint_read_msr)) + if (tracepoint_enabled(read_msr)) do_trace_read_msr(msr, val, 0); return val; @@ -150,7 +148,7 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr, _ASM_EXTABLE(2b, 3b) : [err] "=r" (*err), EAX_EDX_RET(val, low, high) : "c" (msr), [fault] "i" (-EIO)); - if (msr_tracepoint_active(__tracepoint_read_msr)) + if (tracepoint_enabled(read_msr)) do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), *err); return EAX_EDX_VAL(val, low, high); } @@ -161,7 +159,7 @@ native_write_msr(unsigned int msr, u32 low, u32 high) { __wrmsr(msr, low, high); - if (msr_tracepoint_active(__tracepoint_write_msr)) + if (tracepoint_enabled(write_msr)) do_trace_write_msr(msr, ((u64)high << 32 | low), 0); } @@ -181,7 +179,7 @@ native_write_msr_safe(unsigned int msr, u32 low, u32 high) : "c" (msr), "0" (low), "d" (high), [fault] "i" (-EIO) : "memory"); - if (msr_tracepoint_active(__tracepoint_write_msr)) + if (tracepoint_enabled(write_msr)) do_trace_write_msr(msr, ((u64)high << 32 | low), err); return err; } @@ -248,7 +246,7 @@ static inline unsigned long long native_read_pmc(int counter) DECLARE_ARGS(val, low, high); asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter)); - if (msr_tracepoint_active(__tracepoint_rdpmc)) + if (tracepoint_enabled(rdpmc)) do_trace_rdpmc(counter, EAX_EDX_VAL(val, low, high), 0); return EAX_EDX_VAL(val, low, high); } diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index e7752b4038ff..cb9ad6b73973 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -4,7 +4,7 @@ #define _ASM_X86_NOSPEC_BRANCH_H_ #include <linux/static_key.h> -#include <linux/frame.h> +#include <linux/objtool.h> #include <asm/alternative.h> #include <asm/alternative-asm.h> @@ -314,19 +314,19 @@ static inline void mds_idle_clear_cpu_buffers(void) * lfence * jmp spec_trap * do_rop: - * mov %rax,(%rsp) for x86_64 + * mov %rcx,(%rsp) for x86_64 * mov %edx,(%esp) for x86_32 * retq * * Without retpolines configured: * - * jmp *%rax for x86_64 + * jmp *%rcx for x86_64 * jmp *%edx for x86_32 */ #ifdef CONFIG_RETPOLINE # ifdef CONFIG_X86_64 -# define RETPOLINE_RAX_BPF_JIT_SIZE 17 -# define RETPOLINE_RAX_BPF_JIT() \ +# define RETPOLINE_RCX_BPF_JIT_SIZE 17 +# define RETPOLINE_RCX_BPF_JIT() \ do { \ EMIT1_off32(0xE8, 7); /* callq do_rop */ \ /* spec_trap: */ \ @@ -334,7 +334,7 @@ do { \ EMIT3(0x0F, 0xAE, 0xE8); /* lfence */ \ EMIT2(0xEB, 0xF9); /* jmp spec_trap */ \ /* do_rop: */ \ - EMIT4(0x48, 0x89, 0x04, 0x24); /* mov %rax,(%rsp) */ \ + EMIT4(0x48, 0x89, 0x0C, 0x24); /* mov %rcx,(%rsp) */ \ EMIT1(0xC3); /* retq */ \ } while (0) # else /* !CONFIG_X86_64 */ @@ -352,9 +352,9 @@ do { \ # endif #else /* !CONFIG_RETPOLINE */ # ifdef CONFIG_X86_64 -# define RETPOLINE_RAX_BPF_JIT_SIZE 2 -# define RETPOLINE_RAX_BPF_JIT() \ - EMIT2(0xFF, 0xE0); /* jmp *%rax */ +# define RETPOLINE_RCX_BPF_JIT_SIZE 2 +# define RETPOLINE_RCX_BPF_JIT() \ + EMIT2(0xFF, 0xE1); /* jmp *%rcx */ # else /* !CONFIG_X86_64 */ # define RETPOLINE_EDX_BPF_JIT() \ EMIT2(0xFF, 0xE2) /* jmp *%edx */ diff --git a/arch/x86/include/asm/numa.h b/arch/x86/include/asm/numa.h index bbfde3d2662f..e3bae2b60a0d 100644 --- a/arch/x86/include/asm/numa.h +++ b/arch/x86/include/asm/numa.h @@ -3,6 +3,7 @@ #define _ASM_X86_NUMA_H #include <linux/nodemask.h> +#include <linux/errno.h> #include <asm/topology.h> #include <asm/apicdef.h> @@ -62,12 +63,14 @@ extern void numa_clear_node(int cpu); extern void __init init_cpu_to_node(void); extern void numa_add_cpu(int cpu); extern void numa_remove_cpu(int cpu); +extern void init_gi_nodes(void); #else /* CONFIG_NUMA */ static inline void numa_set_node(int cpu, int node) { } static inline void numa_clear_node(int cpu) { } static inline void init_cpu_to_node(void) { } static inline void numa_add_cpu(int cpu) { } static inline void numa_remove_cpu(int cpu) { } +static inline void init_gi_nodes(void) { } #endif /* CONFIG_NUMA */ #ifdef CONFIG_DEBUG_PER_CPU_MAPS @@ -77,7 +80,12 @@ void debug_cpumask_set_cpu(int cpu, int node, bool enable); #ifdef CONFIG_NUMA_EMU #define FAKE_NODE_MIN_SIZE ((u64)32 << 20) #define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1UL)) -void numa_emu_cmdline(char *); +int numa_emu_cmdline(char *str); +#else /* CONFIG_NUMA_EMU */ +static inline int numa_emu_cmdline(char *str) +{ + return -EINVAL; +} #endif /* CONFIG_NUMA_EMU */ #endif /* _ASM_X86_NUMA_H */ diff --git a/arch/x86/include/asm/orc_types.h b/arch/x86/include/asm/orc_types.h index d25534940bde..fdbffec4cfde 100644 --- a/arch/x86/include/asm/orc_types.h +++ b/arch/x86/include/asm/orc_types.h @@ -39,27 +39,6 @@ #define ORC_REG_SP_INDIRECT 9 #define ORC_REG_MAX 15 -/* - * ORC_TYPE_CALL: Indicates that sp_reg+sp_offset resolves to PREV_SP (the - * caller's SP right before it made the call). Used for all callable - * functions, i.e. all C code and all callable asm functions. - * - * ORC_TYPE_REGS: Used in entry code to indicate that sp_reg+sp_offset points - * to a fully populated pt_regs from a syscall, interrupt, or exception. - * - * ORC_TYPE_REGS_IRET: Used in entry code to indicate that sp_reg+sp_offset - * points to the iret return frame. - * - * The UNWIND_HINT macros are used only for the unwind_hint struct. They - * aren't used in struct orc_entry due to size and complexity constraints. - * Objtool converts them to real types when it converts the hints to orc - * entries. - */ -#define ORC_TYPE_CALL 0 -#define ORC_TYPE_REGS 1 -#define ORC_TYPE_REGS_IRET 2 -#define UNWIND_HINT_TYPE_RET_OFFSET 3 - #ifndef __ASSEMBLY__ /* * This struct is more or less a vastly simplified version of the DWARF Call @@ -78,19 +57,6 @@ struct orc_entry { unsigned end:1; } __packed; -/* - * This struct is used by asm and inline asm code to manually annotate the - * location of registers on the stack for the ORC unwinder. - * - * Type can be either ORC_TYPE_* or UNWIND_HINT_TYPE_*. - */ -struct unwind_hint { - u32 ip; - s16 sp_offset; - u8 sp_reg; - u8 type; - u8 end; -}; #endif /* __ASSEMBLY__ */ #endif /* _ORC_TYPES_H */ diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h index 288b065955b7..d0c6c10c18a0 100644 --- a/arch/x86/include/asm/page_64_types.h +++ b/arch/x86/include/asm/page_64_types.h @@ -28,6 +28,7 @@ #define IST_INDEX_NMI 1 #define IST_INDEX_DB 2 #define IST_INDEX_MCE 3 +#define IST_INDEX_VC 4 /* * Set __PAGE_OFFSET to the most negative possible address + diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 3d2afecde50c..d25cc6830e89 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -160,8 +160,6 @@ static inline void wbinvd(void) PVOP_VCALL0(cpu.wbinvd); } -#define get_kernel_rpl() (pv_info.kernel_rpl) - static inline u64 paravirt_read_msr(unsigned msr) { return PVOP_CALL1(u64, cpu.read_msr, msr); @@ -277,12 +275,10 @@ static inline void load_TLS(struct thread_struct *t, unsigned cpu) PVOP_VCALL2(cpu.load_tls, t, cpu); } -#ifdef CONFIG_X86_64 static inline void load_gs_index(unsigned int gs) { PVOP_VCALL1(cpu.load_gs_index, gs); } -#endif static inline void write_ldt_entry(struct desc_struct *dt, int entry, const void *desc) @@ -375,52 +371,22 @@ static inline void paravirt_release_p4d(unsigned long pfn) static inline pte_t __pte(pteval_t val) { - pteval_t ret; - - if (sizeof(pteval_t) > sizeof(long)) - ret = PVOP_CALLEE2(pteval_t, mmu.make_pte, val, (u64)val >> 32); - else - ret = PVOP_CALLEE1(pteval_t, mmu.make_pte, val); - - return (pte_t) { .pte = ret }; + return (pte_t) { PVOP_CALLEE1(pteval_t, mmu.make_pte, val) }; } static inline pteval_t pte_val(pte_t pte) { - pteval_t ret; - - if (sizeof(pteval_t) > sizeof(long)) - ret = PVOP_CALLEE2(pteval_t, mmu.pte_val, - pte.pte, (u64)pte.pte >> 32); - else - ret = PVOP_CALLEE1(pteval_t, mmu.pte_val, pte.pte); - - return ret; + return PVOP_CALLEE1(pteval_t, mmu.pte_val, pte.pte); } static inline pgd_t __pgd(pgdval_t val) { - pgdval_t ret; - - if (sizeof(pgdval_t) > sizeof(long)) - ret = PVOP_CALLEE2(pgdval_t, mmu.make_pgd, val, (u64)val >> 32); - else - ret = PVOP_CALLEE1(pgdval_t, mmu.make_pgd, val); - - return (pgd_t) { ret }; + return (pgd_t) { PVOP_CALLEE1(pgdval_t, mmu.make_pgd, val) }; } static inline pgdval_t pgd_val(pgd_t pgd) { - pgdval_t ret; - - if (sizeof(pgdval_t) > sizeof(long)) - ret = PVOP_CALLEE2(pgdval_t, mmu.pgd_val, - pgd.pgd, (u64)pgd.pgd >> 32); - else - ret = PVOP_CALLEE1(pgdval_t, mmu.pgd_val, pgd.pgd); - - return ret; + return PVOP_CALLEE1(pgdval_t, mmu.pgd_val, pgd.pgd); } #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION @@ -438,78 +404,34 @@ static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned pte_t *ptep, pte_t old_pte, pte_t pte) { - if (sizeof(pteval_t) > sizeof(long)) - /* 5 arg words */ - pv_ops.mmu.ptep_modify_prot_commit(vma, addr, ptep, pte); - else - PVOP_VCALL4(mmu.ptep_modify_prot_commit, - vma, addr, ptep, pte.pte); + PVOP_VCALL4(mmu.ptep_modify_prot_commit, vma, addr, ptep, pte.pte); } static inline void set_pte(pte_t *ptep, pte_t pte) { - if (sizeof(pteval_t) > sizeof(long)) - PVOP_VCALL3(mmu.set_pte, ptep, pte.pte, (u64)pte.pte >> 32); - else - PVOP_VCALL2(mmu.set_pte, ptep, pte.pte); -} - -static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pte) -{ - if (sizeof(pteval_t) > sizeof(long)) - /* 5 arg words */ - pv_ops.mmu.set_pte_at(mm, addr, ptep, pte); - else - PVOP_VCALL4(mmu.set_pte_at, mm, addr, ptep, pte.pte); + PVOP_VCALL2(mmu.set_pte, ptep, pte.pte); } static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) { - pmdval_t val = native_pmd_val(pmd); - - if (sizeof(pmdval_t) > sizeof(long)) - PVOP_VCALL3(mmu.set_pmd, pmdp, val, (u64)val >> 32); - else - PVOP_VCALL2(mmu.set_pmd, pmdp, val); + PVOP_VCALL2(mmu.set_pmd, pmdp, native_pmd_val(pmd)); } -#if CONFIG_PGTABLE_LEVELS >= 3 static inline pmd_t __pmd(pmdval_t val) { - pmdval_t ret; - - if (sizeof(pmdval_t) > sizeof(long)) - ret = PVOP_CALLEE2(pmdval_t, mmu.make_pmd, val, (u64)val >> 32); - else - ret = PVOP_CALLEE1(pmdval_t, mmu.make_pmd, val); - - return (pmd_t) { ret }; + return (pmd_t) { PVOP_CALLEE1(pmdval_t, mmu.make_pmd, val) }; } static inline pmdval_t pmd_val(pmd_t pmd) { - pmdval_t ret; - - if (sizeof(pmdval_t) > sizeof(long)) - ret = PVOP_CALLEE2(pmdval_t, mmu.pmd_val, - pmd.pmd, (u64)pmd.pmd >> 32); - else - ret = PVOP_CALLEE1(pmdval_t, mmu.pmd_val, pmd.pmd); - - return ret; + return PVOP_CALLEE1(pmdval_t, mmu.pmd_val, pmd.pmd); } static inline void set_pud(pud_t *pudp, pud_t pud) { - pudval_t val = native_pud_val(pud); - - if (sizeof(pudval_t) > sizeof(long)) - PVOP_VCALL3(mmu.set_pud, pudp, val, (u64)val >> 32); - else - PVOP_VCALL2(mmu.set_pud, pudp, val); + PVOP_VCALL2(mmu.set_pud, pudp, native_pud_val(pud)); } -#if CONFIG_PGTABLE_LEVELS >= 4 + static inline pud_t __pud(pudval_t val) { pudval_t ret; @@ -526,7 +448,7 @@ static inline pudval_t pud_val(pud_t pud) static inline void pud_clear(pud_t *pudp) { - set_pud(pudp, __pud(0)); + set_pud(pudp, native_make_pud(0)); } static inline void set_p4d(p4d_t *p4dp, p4d_t p4d) @@ -563,40 +485,17 @@ static inline void __set_pgd(pgd_t *pgdp, pgd_t pgd) } while (0) #define pgd_clear(pgdp) do { \ - if (pgtable_l5_enabled()) \ - set_pgd(pgdp, __pgd(0)); \ + if (pgtable_l5_enabled()) \ + set_pgd(pgdp, native_make_pgd(0)); \ } while (0) #endif /* CONFIG_PGTABLE_LEVELS == 5 */ static inline void p4d_clear(p4d_t *p4dp) { - set_p4d(p4dp, __p4d(0)); + set_p4d(p4dp, native_make_p4d(0)); } -#endif /* CONFIG_PGTABLE_LEVELS == 4 */ - -#endif /* CONFIG_PGTABLE_LEVELS >= 3 */ - -#ifdef CONFIG_X86_PAE -/* Special-case pte-setting operations for PAE, which can't update a - 64-bit pte atomically */ -static inline void set_pte_atomic(pte_t *ptep, pte_t pte) -{ - PVOP_VCALL3(mmu.set_pte_atomic, ptep, pte.pte, pte.pte >> 32); -} - -static inline void pte_clear(struct mm_struct *mm, unsigned long addr, - pte_t *ptep) -{ - PVOP_VCALL3(mmu.pte_clear, mm, addr, ptep); -} - -static inline void pmd_clear(pmd_t *pmdp) -{ - PVOP_VCALL1(mmu.pmd_clear, pmdp); -} -#else /* !CONFIG_X86_PAE */ static inline void set_pte_atomic(pte_t *ptep, pte_t pte) { set_pte(ptep, pte); @@ -605,14 +504,13 @@ static inline void set_pte_atomic(pte_t *ptep, pte_t pte) static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - set_pte_at(mm, addr, ptep, __pte(0)); + set_pte(ptep, native_make_pte(0)); } static inline void pmd_clear(pmd_t *pmdp) { - set_pmd(pmdp, __pmd(0)); + set_pmd(pmdp, native_make_pmd(0)); } -#endif /* CONFIG_X86_PAE */ #define __HAVE_ARCH_START_CONTEXT_SWITCH static inline void arch_start_context_switch(struct task_struct *prev) @@ -682,16 +580,9 @@ bool __raw_callee_save___native_vcpu_is_preempted(long cpu); #endif /* SMP && PARAVIRT_SPINLOCKS */ #ifdef CONFIG_X86_32 -#define PV_SAVE_REGS "pushl %ecx; pushl %edx;" -#define PV_RESTORE_REGS "popl %edx; popl %ecx;" - /* save and restore all caller-save registers, except return value */ #define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;" #define PV_RESTORE_ALL_CALLER_REGS "popl %ecx;" - -#define PV_FLAGS_ARG "0" -#define PV_EXTRA_CLOBBERS -#define PV_VEXTRA_CLOBBERS #else /* save and restore all caller-save registers, except return value */ #define PV_SAVE_ALL_CALLER_REGS \ @@ -712,14 +603,6 @@ bool __raw_callee_save___native_vcpu_is_preempted(long cpu); "pop %rsi;" \ "pop %rdx;" \ "pop %rcx;" - -/* We save some registers, but all of them, that's too much. We clobber all - * caller saved registers but the argument parameter */ -#define PV_SAVE_REGS "pushq %%rdi;" -#define PV_RESTORE_REGS "popq %%rdi;" -#define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi" -#define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi" -#define PV_FLAGS_ARG "D" #endif /* diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 8dfcb2508e6d..0fad9f61c76a 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -68,13 +68,8 @@ struct paravirt_callee_save { /* general info */ struct pv_info { #ifdef CONFIG_PARAVIRT_XXL - unsigned int kernel_rpl; - int shared_kernel_pmd; - -#ifdef CONFIG_X86_64 u16 extra_user_64bit_cs; /* __USER_CS if none */ #endif -#endif const char *name; }; @@ -126,9 +121,7 @@ struct pv_cpu_ops { void (*set_ldt)(const void *desc, unsigned entries); unsigned long (*store_tr)(void); void (*load_tls)(struct thread_struct *t, unsigned int cpu); -#ifdef CONFIG_X86_64 void (*load_gs_index)(unsigned int idx); -#endif void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum, const void *desc); void (*write_gdt_entry)(struct desc_struct *, @@ -249,8 +242,6 @@ struct pv_mmu_ops { /* Pagetable manipulation functions */ void (*set_pte)(pte_t *ptep, pte_t pteval); - void (*set_pte_at)(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pteval); void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval); pte_t (*ptep_modify_prot_start)(struct vm_area_struct *vma, unsigned long addr, @@ -264,21 +255,11 @@ struct pv_mmu_ops { struct paravirt_callee_save pgd_val; struct paravirt_callee_save make_pgd; -#if CONFIG_PGTABLE_LEVELS >= 3 -#ifdef CONFIG_X86_PAE - void (*set_pte_atomic)(pte_t *ptep, pte_t pteval); - void (*pte_clear)(struct mm_struct *mm, unsigned long addr, - pte_t *ptep); - void (*pmd_clear)(pmd_t *pmdp); - -#endif /* CONFIG_X86_PAE */ - void (*set_pud)(pud_t *pudp, pud_t pudval); struct paravirt_callee_save pmd_val; struct paravirt_callee_save make_pmd; -#if CONFIG_PGTABLE_LEVELS >= 4 struct paravirt_callee_save pud_val; struct paravirt_callee_save make_pud; @@ -291,10 +272,6 @@ struct pv_mmu_ops { void (*set_pgd)(pgd_t *pgdp, pgd_t pgdval); #endif /* CONFIG_PGTABLE_LEVELS >= 5 */ -#endif /* CONFIG_PGTABLE_LEVELS >= 4 */ - -#endif /* CONFIG_PGTABLE_LEVELS >= 3 */ - struct pv_lazy_ops lazy_mode; /* dom0 ops */ diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h index 7ccb338507e3..d2c76c8d8cfd 100644 --- a/arch/x86/include/asm/pci.h +++ b/arch/x86/include/asm/pci.h @@ -105,17 +105,6 @@ static inline void early_quirks(void) { } extern void pci_iommu_alloc(void); -#ifdef CONFIG_PCI_MSI -/* implemented in arch/x86/kernel/apic/io_apic. */ -struct msi_desc; -int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); -void native_teardown_msi_irq(unsigned int irq); -void native_restore_msi_irqs(struct pci_dev *dev); -#else -#define native_setup_msi_irqs NULL -#define native_teardown_msi_irq NULL -#endif - /* generic pci stuff */ #include <asm-generic/pci.h> diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h index 73bb404f4d2a..490411dba438 100644 --- a/arch/x86/include/asm/pci_x86.h +++ b/arch/x86/include/asm/pci_x86.h @@ -114,9 +114,20 @@ extern const struct pci_raw_ops pci_direct_conf1; extern bool port_cf9_safe; /* arch_initcall level */ +#ifdef CONFIG_PCI_DIRECT extern int pci_direct_probe(void); extern void pci_direct_init(int type); +#else +static inline int pci_direct_probe(void) { return -1; } +static inline void pci_direct_init(int type) { } +#endif + +#ifdef CONFIG_PCI_BIOS extern void pci_pcbios_init(void); +#else +static inline void pci_pcbios_init(void) { } +#endif + extern void __init dmi_check_pciprobe(void); extern void __init dmi_check_skip_isa_align(void); diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index 0c1b13720525..6960cd6d1f23 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -196,13 +196,29 @@ struct x86_pmu_capability { * Fixed-purpose performance events: */ +/* RDPMC offset for Fixed PMCs */ +#define INTEL_PMC_FIXED_RDPMC_BASE (1 << 30) +#define INTEL_PMC_FIXED_RDPMC_METRICS (1 << 29) + /* - * All 3 fixed-mode PMCs are configured via this single MSR: + * All the fixed-mode PMCs are configured via this single MSR: */ #define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d /* - * The counts are available in three separate MSRs: + * There is no event-code assigned to the fixed-mode PMCs. + * + * For a fixed-mode PMC, which has an equivalent event on a general-purpose + * PMC, the event-code of the equivalent event is used for the fixed-mode PMC, + * e.g., Instr_Retired.Any and CPU_CLK_Unhalted.Core. + * + * For a fixed-mode PMC, which doesn't have an equivalent event, a + * pseudo-encoding is used, e.g., CPU_CLK_Unhalted.Ref and TOPDOWN.SLOTS. + * The pseudo event-code for a fixed-mode PMC must be 0x00. + * The pseudo umask-code is 0xX. The X equals the index of the fixed + * counter + 1, e.g., the fixed counter 2 has the pseudo-encoding 0x0300. + * + * The counts are available in separate MSRs: */ /* Instr_Retired.Any: */ @@ -213,29 +229,84 @@ struct x86_pmu_capability { #define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a #define INTEL_PMC_IDX_FIXED_CPU_CYCLES (INTEL_PMC_IDX_FIXED + 1) -/* CPU_CLK_Unhalted.Ref: */ +/* CPU_CLK_Unhalted.Ref: event=0x00,umask=0x3 (pseudo-encoding) */ #define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b #define INTEL_PMC_IDX_FIXED_REF_CYCLES (INTEL_PMC_IDX_FIXED + 2) #define INTEL_PMC_MSK_FIXED_REF_CYCLES (1ULL << INTEL_PMC_IDX_FIXED_REF_CYCLES) +/* TOPDOWN.SLOTS: event=0x00,umask=0x4 (pseudo-encoding) */ +#define MSR_ARCH_PERFMON_FIXED_CTR3 0x30c +#define INTEL_PMC_IDX_FIXED_SLOTS (INTEL_PMC_IDX_FIXED + 3) +#define INTEL_PMC_MSK_FIXED_SLOTS (1ULL << INTEL_PMC_IDX_FIXED_SLOTS) + /* * We model BTS tracing as another fixed-mode PMC. * - * We choose a value in the middle of the fixed event range, since lower + * We choose the value 47 for the fixed index of BTS, since lower * values are used by actual fixed events and higher values are used * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr. */ -#define INTEL_PMC_IDX_FIXED_BTS (INTEL_PMC_IDX_FIXED + 16) +#define INTEL_PMC_IDX_FIXED_BTS (INTEL_PMC_IDX_FIXED + 15) + +/* + * The PERF_METRICS MSR is modeled as several magic fixed-mode PMCs, one for + * each TopDown metric event. + * + * Internally the TopDown metric events are mapped to the FxCtr 3 (SLOTS). + */ +#define INTEL_PMC_IDX_METRIC_BASE (INTEL_PMC_IDX_FIXED + 16) +#define INTEL_PMC_IDX_TD_RETIRING (INTEL_PMC_IDX_METRIC_BASE + 0) +#define INTEL_PMC_IDX_TD_BAD_SPEC (INTEL_PMC_IDX_METRIC_BASE + 1) +#define INTEL_PMC_IDX_TD_FE_BOUND (INTEL_PMC_IDX_METRIC_BASE + 2) +#define INTEL_PMC_IDX_TD_BE_BOUND (INTEL_PMC_IDX_METRIC_BASE + 3) +#define INTEL_PMC_IDX_METRIC_END INTEL_PMC_IDX_TD_BE_BOUND +#define INTEL_PMC_MSK_TOPDOWN ((0xfull << INTEL_PMC_IDX_METRIC_BASE) | \ + INTEL_PMC_MSK_FIXED_SLOTS) -#define GLOBAL_STATUS_COND_CHG BIT_ULL(63) -#define GLOBAL_STATUS_BUFFER_OVF BIT_ULL(62) -#define GLOBAL_STATUS_UNC_OVF BIT_ULL(61) -#define GLOBAL_STATUS_ASIF BIT_ULL(60) -#define GLOBAL_STATUS_COUNTERS_FROZEN BIT_ULL(59) -#define GLOBAL_STATUS_LBRS_FROZEN_BIT 58 -#define GLOBAL_STATUS_LBRS_FROZEN BIT_ULL(GLOBAL_STATUS_LBRS_FROZEN_BIT) -#define GLOBAL_STATUS_TRACE_TOPAPMI BIT_ULL(55) +/* + * There is no event-code assigned to the TopDown events. + * + * For the slots event, use the pseudo code of the fixed counter 3. + * + * For the metric events, the pseudo event-code is 0x00. + * The pseudo umask-code starts from the middle of the pseudo event + * space, 0x80. + */ +#define INTEL_TD_SLOTS 0x0400 /* TOPDOWN.SLOTS */ +/* Level 1 metrics */ +#define INTEL_TD_METRIC_RETIRING 0x8000 /* Retiring metric */ +#define INTEL_TD_METRIC_BAD_SPEC 0x8100 /* Bad speculation metric */ +#define INTEL_TD_METRIC_FE_BOUND 0x8200 /* FE bound metric */ +#define INTEL_TD_METRIC_BE_BOUND 0x8300 /* BE bound metric */ +#define INTEL_TD_METRIC_MAX INTEL_TD_METRIC_BE_BOUND +#define INTEL_TD_METRIC_NUM 4 + +static inline bool is_metric_idx(int idx) +{ + return (unsigned)(idx - INTEL_PMC_IDX_METRIC_BASE) < INTEL_TD_METRIC_NUM; +} + +static inline bool is_topdown_idx(int idx) +{ + return is_metric_idx(idx) || idx == INTEL_PMC_IDX_FIXED_SLOTS; +} +#define INTEL_PMC_OTHER_TOPDOWN_BITS(bit) \ + (~(0x1ull << bit) & INTEL_PMC_MSK_TOPDOWN) + +#define GLOBAL_STATUS_COND_CHG BIT_ULL(63) +#define GLOBAL_STATUS_BUFFER_OVF_BIT 62 +#define GLOBAL_STATUS_BUFFER_OVF BIT_ULL(GLOBAL_STATUS_BUFFER_OVF_BIT) +#define GLOBAL_STATUS_UNC_OVF BIT_ULL(61) +#define GLOBAL_STATUS_ASIF BIT_ULL(60) +#define GLOBAL_STATUS_COUNTERS_FROZEN BIT_ULL(59) +#define GLOBAL_STATUS_LBRS_FROZEN_BIT 58 +#define GLOBAL_STATUS_LBRS_FROZEN BIT_ULL(GLOBAL_STATUS_LBRS_FROZEN_BIT) +#define GLOBAL_STATUS_TRACE_TOPAPMI_BIT 55 +#define GLOBAL_STATUS_TRACE_TOPAPMI BIT_ULL(GLOBAL_STATUS_TRACE_TOPAPMI_BIT) +#define GLOBAL_STATUS_PERF_METRICS_OVF_BIT 48 + +#define GLOBAL_CTRL_EN_PERF_METRICS 48 /* * We model guest LBR event tracing as another fixed-mode PMC like BTS. * @@ -334,6 +405,7 @@ struct pebs_xmm { #define IBS_OP_ENABLE (1ULL<<17) #define IBS_OP_MAX_CNT 0x0000FFFFULL #define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */ +#define IBS_OP_MAX_CNT_EXT_MASK (0x7FULL<<20) /* separate upper 7 bits */ #define IBS_RIP_INVALID (1ULL<<38) #ifdef CONFIG_X86_LOCAL_APIC diff --git a/arch/x86/include/asm/pgtable-3level_types.h b/arch/x86/include/asm/pgtable-3level_types.h index 80fbb4a9ed87..56baf43befb4 100644 --- a/arch/x86/include/asm/pgtable-3level_types.h +++ b/arch/x86/include/asm/pgtable-3level_types.h @@ -20,12 +20,7 @@ typedef union { } pte_t; #endif /* !__ASSEMBLY__ */ -#ifdef CONFIG_PARAVIRT_XXL -#define SHARED_KERNEL_PMD ((!static_cpu_has(X86_FEATURE_PTI) && \ - (pv_info.shared_kernel_pmd))) -#else #define SHARED_KERNEL_PMD (!static_cpu_has(X86_FEATURE_PTI)) -#endif #define ARCH_PAGE_TABLE_SYNC_MASK (SHARED_KERNEL_PMD ? 0 : PGTBL_PMD_MODIFIED) diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index b836138ce852..a02c67291cfc 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -28,7 +28,7 @@ #include <asm-generic/pgtable_uffd.h> extern pgd_t early_top_pgt[PTRS_PER_PGD]; -int __init __early_make_pgtable(unsigned long address, pmdval_t pmd); +bool __init __early_make_pgtable(unsigned long address, pmdval_t pmd); void ptdump_walk_pgd_level(struct seq_file *m, struct mm_struct *mm); void ptdump_walk_pgd_level_debugfs(struct seq_file *m, struct mm_struct *mm, @@ -63,7 +63,6 @@ extern pmdval_t early_pmd_flags; #include <asm/paravirt.h> #else /* !CONFIG_PARAVIRT_XXL */ #define set_pte(ptep, pte) native_set_pte(ptep, pte) -#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte) #define set_pte_atomic(ptep, pte) \ native_set_pte_atomic(ptep, pte) @@ -1033,10 +1032,10 @@ static inline pud_t native_local_pudp_get_and_clear(pud_t *pudp) return res; } -static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep , pte_t pte) +static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte) { - native_set_pte(ptep, pte); + set_pte(ptep, pte); } static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h index 8f63efb2a2cc..52e5f5f2240d 100644 --- a/arch/x86/include/asm/pgtable_64_types.h +++ b/arch/x86/include/asm/pgtable_64_types.h @@ -159,6 +159,4 @@ extern unsigned int ptrs_per_p4d; #define PGD_KERNEL_START ((PAGE_SIZE / 2) / sizeof(pgd_t)) -#define ARCH_PAGE_TABLE_SYNC_MASK (pgtable_l5_enabled() ? PGTBL_PGD_MODIFIED : PGTBL_P4D_MODIFIED) - #endif /* _ASM_X86_PGTABLE_64_DEFS_H */ diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 97143d87994c..5ac507586769 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -517,7 +517,7 @@ struct thread_struct { /* Save middle states of ptrace breakpoints */ struct perf_event *ptrace_bps[HBP_NUM]; /* Debug status used for traps, single steps, etc... */ - unsigned long debugreg6; + unsigned long virtual_dr6; /* Keep track of the exact dr7 value set by the user */ unsigned long ptrace_dr7; /* Fault info: */ @@ -696,6 +696,7 @@ extern void load_direct_gdt(int); extern void load_fixmap_gdt(int); extern void load_percpu_segment(int); extern void cpu_init(void); +extern void cpu_init_exception_handling(void); extern void cr4_init(void); static inline unsigned long get_debugctlmsr(void) diff --git a/arch/x86/include/asm/proto.h b/arch/x86/include/asm/proto.h index 28996fe19301..2c35f1c01a2d 100644 --- a/arch/x86/include/asm/proto.h +++ b/arch/x86/include/asm/proto.h @@ -10,6 +10,7 @@ void syscall_init(void); #ifdef CONFIG_X86_64 void entry_SYSCALL_64(void); +void entry_SYSCALL_64_safe_stack(void); long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2); #endif diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h index b35030eeec36..5db5d083c873 100644 --- a/arch/x86/include/asm/realmode.h +++ b/arch/x86/include/asm/realmode.h @@ -21,6 +21,9 @@ struct real_mode_header { /* SMP trampoline */ u32 trampoline_start; u32 trampoline_header; +#ifdef CONFIG_AMD_MEM_ENCRYPT + u32 sev_es_trampoline_start; +#endif #ifdef CONFIG_X86_64 u32 trampoline_pgd; #endif @@ -57,6 +60,9 @@ extern unsigned char real_mode_blob_end[]; extern unsigned long initial_code; extern unsigned long initial_gs; extern unsigned long initial_stack; +#ifdef CONFIG_AMD_MEM_ENCRYPT +extern unsigned long initial_vc_handler; +#endif extern unsigned char real_mode_blob[]; extern unsigned char real_mode_relocs[]; @@ -66,6 +72,7 @@ extern unsigned char startup_32_smp[]; extern unsigned char boot_gdt[]; #else extern unsigned char secondary_startup_64[]; +extern unsigned char secondary_startup_64_no_verify[]; #endif static inline size_t real_mode_size_needed(void) diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h index 6847d85400a8..3ff0d48469f2 100644 --- a/arch/x86/include/asm/required-features.h +++ b/arch/x86/include/asm/required-features.h @@ -54,7 +54,7 @@ #endif #ifdef CONFIG_X86_64 -#ifdef CONFIG_PARAVIRT +#ifdef CONFIG_PARAVIRT_XXL /* Paravirtualized systems may not have PSE or PGE available */ #define NEED_PSE 0 #define NEED_PGE 0 diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h index 9646c300f128..7fdd4facfce7 100644 --- a/arch/x86/include/asm/segment.h +++ b/arch/x86/include/asm/segment.h @@ -222,15 +222,11 @@ #endif -#ifndef CONFIG_PARAVIRT_XXL -# define get_kernel_rpl() 0 -#endif - #define IDT_ENTRIES 256 #define NUM_EXCEPTION_VECTORS 32 /* Bitmask of exception vectors which push an error code on the stack: */ -#define EXCEPTION_ERRCODE_MASK 0x00027d00 +#define EXCEPTION_ERRCODE_MASK 0x20027d00 #define GDT_SIZE (GDT_ENTRIES*8) #define GDT_ENTRY_TLS_ENTRIES 3 diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index 84b645cc8bc9..7d7a064af6ff 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h @@ -39,6 +39,8 @@ void vsmp_init(void); static inline void vsmp_init(void) { } #endif +struct pt_regs; + void setup_bios_corruption_check(void); void early_platform_quirks(void); @@ -48,7 +50,9 @@ extern void reserve_standard_io_resources(void); extern void i386_reserve_resources(void); extern unsigned long __startup_64(unsigned long physaddr, struct boot_params *bp); extern unsigned long __startup_secondary_64(void); -extern int early_make_pgtable(unsigned long address); +extern void startup_64_setup_env(unsigned long physbase); +extern void early_setup_idt(void); +extern void __init do_early_exception(struct pt_regs *regs, int trapnr); #ifdef CONFIG_X86_INTEL_MID extern void x86_intel_mid_early_setup(void); diff --git a/arch/x86/include/asm/sev-es.h b/arch/x86/include/asm/sev-es.h new file mode 100644 index 000000000000..cf1d957c7091 --- /dev/null +++ b/arch/x86/include/asm/sev-es.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * AMD Encrypted Register State Support + * + * Author: Joerg Roedel <jroedel@suse.de> + */ + +#ifndef __ASM_ENCRYPTED_STATE_H +#define __ASM_ENCRYPTED_STATE_H + +#include <linux/types.h> +#include <asm/insn.h> + +#define GHCB_SEV_INFO 0x001UL +#define GHCB_SEV_INFO_REQ 0x002UL +#define GHCB_INFO(v) ((v) & 0xfffUL) +#define GHCB_PROTO_MAX(v) (((v) >> 48) & 0xffffUL) +#define GHCB_PROTO_MIN(v) (((v) >> 32) & 0xffffUL) +#define GHCB_PROTO_OUR 0x0001UL +#define GHCB_SEV_CPUID_REQ 0x004UL +#define GHCB_CPUID_REQ_EAX 0 +#define GHCB_CPUID_REQ_EBX 1 +#define GHCB_CPUID_REQ_ECX 2 +#define GHCB_CPUID_REQ_EDX 3 +#define GHCB_CPUID_REQ(fn, reg) (GHCB_SEV_CPUID_REQ | \ + (((unsigned long)reg & 3) << 30) | \ + (((unsigned long)fn) << 32)) + +#define GHCB_PROTOCOL_MAX 0x0001UL +#define GHCB_DEFAULT_USAGE 0x0000UL + +#define GHCB_SEV_CPUID_RESP 0x005UL +#define GHCB_SEV_TERMINATE 0x100UL +#define GHCB_SEV_TERMINATE_REASON(reason_set, reason_val) \ + (((((u64)reason_set) & 0x7) << 12) | \ + ((((u64)reason_val) & 0xff) << 16)) +#define GHCB_SEV_ES_REASON_GENERAL_REQUEST 0 +#define GHCB_SEV_ES_REASON_PROTOCOL_UNSUPPORTED 1 + +#define GHCB_SEV_GHCB_RESP_CODE(v) ((v) & 0xfff) +#define VMGEXIT() { asm volatile("rep; vmmcall\n\r"); } + +enum es_result { + ES_OK, /* All good */ + ES_UNSUPPORTED, /* Requested operation not supported */ + ES_VMM_ERROR, /* Unexpected state from the VMM */ + ES_DECODE_FAILED, /* Instruction decoding failed */ + ES_EXCEPTION, /* Instruction caused exception */ + ES_RETRY, /* Retry instruction emulation */ +}; + +struct es_fault_info { + unsigned long vector; + unsigned long error_code; + unsigned long cr2; +}; + +struct pt_regs; + +/* ES instruction emulation context */ +struct es_em_ctxt { + struct pt_regs *regs; + struct insn insn; + struct es_fault_info fi; +}; + +void do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code); + +static inline u64 lower_bits(u64 val, unsigned int bits) +{ + u64 mask = (1ULL << bits) - 1; + + return (val & mask); +} + +struct real_mode_header; +enum stack_type; + +/* Early IDT entry points for #VC handler */ +extern void vc_no_ghcb(void); +extern void vc_boot_ghcb(void); +extern bool handle_vc_boot_ghcb(struct pt_regs *regs); + +#ifdef CONFIG_AMD_MEM_ENCRYPT +extern struct static_key_false sev_es_enable_key; +extern void __sev_es_ist_enter(struct pt_regs *regs); +extern void __sev_es_ist_exit(void); +static __always_inline void sev_es_ist_enter(struct pt_regs *regs) +{ + if (static_branch_unlikely(&sev_es_enable_key)) + __sev_es_ist_enter(regs); +} +static __always_inline void sev_es_ist_exit(void) +{ + if (static_branch_unlikely(&sev_es_enable_key)) + __sev_es_ist_exit(); +} +extern int sev_es_setup_ap_jump_table(struct real_mode_header *rmh); +extern void __sev_es_nmi_complete(void); +static __always_inline void sev_es_nmi_complete(void) +{ + if (static_branch_unlikely(&sev_es_enable_key)) + __sev_es_nmi_complete(); +} +extern int __init sev_es_efi_map_ghcbs(pgd_t *pgd); +#else +static inline void sev_es_ist_enter(struct pt_regs *regs) { } +static inline void sev_es_ist_exit(void) { } +static inline int sev_es_setup_ap_jump_table(struct real_mode_header *rmh) { return 0; } +static inline void sev_es_nmi_complete(void) { } +static inline int sev_es_efi_map_ghcbs(pgd_t *pgd) { return 0; } +#endif + +#endif diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h index 59a3e13204c3..cc177b4431ae 100644 --- a/arch/x86/include/asm/special_insns.h +++ b/arch/x86/include/asm/special_insns.h @@ -11,45 +11,47 @@ #include <linux/jump_label.h> /* - * Volatile isn't enough to prevent the compiler from reordering the - * read/write functions for the control registers and messing everything up. - * A memory clobber would solve the problem, but would prevent reordering of - * all loads stores around it, which can hurt performance. Solution is to - * use a variable and mimic reads and writes to it to enforce serialization + * The compiler should not reorder volatile asm statements with respect to each + * other: they should execute in program order. However GCC 4.9.x and 5.x have + * a bug (which was fixed in 8.1, 7.3 and 6.5) where they might reorder + * volatile asm. The write functions are not affected since they have memory + * clobbers preventing reordering. To prevent reads from being reordered with + * respect to writes, use a dummy memory operand. */ -extern unsigned long __force_order; + +#define __FORCE_ORDER "m"(*(unsigned int *)0x1000UL) void native_write_cr0(unsigned long val); static inline unsigned long native_read_cr0(void) { unsigned long val; - asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order)); + asm volatile("mov %%cr0,%0\n\t" : "=r" (val) : __FORCE_ORDER); return val; } static __always_inline unsigned long native_read_cr2(void) { unsigned long val; - asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order)); + asm volatile("mov %%cr2,%0\n\t" : "=r" (val) : __FORCE_ORDER); return val; } static __always_inline void native_write_cr2(unsigned long val) { - asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order)); + asm volatile("mov %0,%%cr2": : "r" (val) : "memory"); } static inline unsigned long __native_read_cr3(void) { unsigned long val; - asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order)); + asm volatile("mov %%cr3,%0\n\t" : "=r" (val) : __FORCE_ORDER); return val; } static inline void native_write_cr3(unsigned long val) { - asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order)); + asm volatile("mov %0,%%cr3": : "r" (val) : "memory"); } static inline unsigned long native_read_cr4(void) @@ -64,10 +66,10 @@ static inline unsigned long native_read_cr4(void) asm volatile("1: mov %%cr4, %0\n" "2:\n" _ASM_EXTABLE(1b, 2b) - : "=r" (val), "=m" (__force_order) : "0" (0)); + : "=r" (val) : "0" (0), __FORCE_ORDER); #else /* CR4 always exists on x86_64. */ - asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order)); + asm volatile("mov %%cr4,%0\n\t" : "=r" (val) : __FORCE_ORDER); #endif return val; } @@ -234,6 +236,76 @@ static inline void clwb(volatile void *__p) #define nop() asm volatile ("nop") +static inline void serialize(void) +{ + /* Instruction opcode for SERIALIZE; supported in binutils >= 2.35. */ + asm volatile(".byte 0xf, 0x1, 0xe8" ::: "memory"); +} + +/* The dst parameter must be 64-bytes aligned */ +static inline void movdir64b(void *dst, const void *src) +{ + const struct { char _[64]; } *__src = src; + struct { char _[64]; } *__dst = dst; + + /* + * MOVDIR64B %(rdx), rax. + * + * Both __src and __dst must be memory constraints in order to tell the + * compiler that no other memory accesses should be reordered around + * this one. + * + * Also, both must be supplied as lvalues because this tells + * the compiler what the object is (its size) the instruction accesses. + * I.e., not the pointers but what they point to, thus the deref'ing '*'. + */ + asm volatile(".byte 0x66, 0x0f, 0x38, 0xf8, 0x02" + : "+m" (*__dst) + : "m" (*__src), "a" (__dst), "d" (__src)); +} + +/** + * enqcmds - Enqueue a command in supervisor (CPL0) mode + * @dst: destination, in MMIO space (must be 512-bit aligned) + * @src: 512 bits memory operand + * + * The ENQCMDS instruction allows software to write a 512-bit command to + * a 512-bit-aligned special MMIO region that supports the instruction. + * A return status is loaded into the ZF flag in the RFLAGS register. + * ZF = 0 equates to success, and ZF = 1 indicates retry or error. + * + * This function issues the ENQCMDS instruction to submit data from + * kernel space to MMIO space, in a unit of 512 bits. Order of data access + * is not guaranteed, nor is a memory barrier performed afterwards. It + * returns 0 on success and -EAGAIN on failure. + * + * Warning: Do not use this helper unless your driver has checked that the + * ENQCMDS instruction is supported on the platform and the device accepts + * ENQCMDS. + */ +static inline int enqcmds(void __iomem *dst, const void *src) +{ + const struct { char _[64]; } *__src = src; + struct { char _[64]; } *__dst = dst; + int zf; + + /* + * ENQCMDS %(rdx), rax + * + * See movdir64b()'s comment on operand specification. + */ + asm volatile(".byte 0xf3, 0x0f, 0x38, 0xf8, 0x02, 0x66, 0x90" + CC_SET(z) + : CC_OUT(z) (zf), "+m" (*__dst) + : "m" (*__src), "a" (__dst), "d" (__src)); + + /* Submission failure is indicated via EFLAGS.ZF=1 */ + if (zf) + return -EAGAIN; + + return 0; +} + #endif /* __KERNEL__ */ #endif /* _ASM_X86_SPECIAL_INSNS_H */ diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h index 5ae5a68e469d..49600643faba 100644 --- a/arch/x86/include/asm/stacktrace.h +++ b/arch/x86/include/asm/stacktrace.h @@ -35,6 +35,8 @@ bool in_entry_stack(unsigned long *stack, struct stack_info *info); int get_stack_info(unsigned long *stack, struct task_struct *task, struct stack_info *info, unsigned long *visit_mask); +bool get_stack_info_noinstr(unsigned long *stack, struct task_struct *task, + struct stack_info *info); const char *stack_type_name(enum stack_type type); diff --git a/arch/x86/include/asm/static_call.h b/arch/x86/include/asm/static_call.h new file mode 100644 index 000000000000..c37f11999d0c --- /dev/null +++ b/arch/x86/include/asm/static_call.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_STATIC_CALL_H +#define _ASM_STATIC_CALL_H + +#include <asm/text-patching.h> + +/* + * For CONFIG_HAVE_STATIC_CALL_INLINE, this is a temporary trampoline which + * uses the current value of the key->func pointer to do an indirect jump to + * the function. This trampoline is only used during boot, before the call + * sites get patched by static_call_update(). The name of this trampoline has + * a magical aspect: objtool uses it to find static call sites so it can create + * the .static_call_sites section. + * + * For CONFIG_HAVE_STATIC_CALL, this is a permanent trampoline which + * does a direct jump to the function. The direct jump gets patched by + * static_call_update(). + * + * Having the trampoline in a special section forces GCC to emit a JMP.d32 when + * it does tail-call optimization on the call; since you cannot compute the + * relative displacement across sections. + */ + +#define __ARCH_DEFINE_STATIC_CALL_TRAMP(name, insns) \ + asm(".pushsection .static_call.text, \"ax\" \n" \ + ".align 4 \n" \ + ".globl " STATIC_CALL_TRAMP_STR(name) " \n" \ + STATIC_CALL_TRAMP_STR(name) ": \n" \ + insns " \n" \ + ".type " STATIC_CALL_TRAMP_STR(name) ", @function \n" \ + ".size " STATIC_CALL_TRAMP_STR(name) ", . - " STATIC_CALL_TRAMP_STR(name) " \n" \ + ".popsection \n") + +#define ARCH_DEFINE_STATIC_CALL_TRAMP(name, func) \ + __ARCH_DEFINE_STATIC_CALL_TRAMP(name, ".byte 0xe9; .long " #func " - (. + 4)") + +#define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) \ + __ARCH_DEFINE_STATIC_CALL_TRAMP(name, "ret; nop; nop; nop; nop") + +#endif /* _ASM_STATIC_CALL_H */ diff --git a/arch/x86/include/asm/string_64.h b/arch/x86/include/asm/string_64.h index 75314c3dbe47..6e450827f677 100644 --- a/arch/x86/include/asm/string_64.h +++ b/arch/x86/include/asm/string_64.h @@ -82,38 +82,6 @@ int strcmp(const char *cs, const char *ct); #endif -#define __HAVE_ARCH_MEMCPY_MCSAFE 1 -__must_check unsigned long __memcpy_mcsafe(void *dst, const void *src, - size_t cnt); -DECLARE_STATIC_KEY_FALSE(mcsafe_key); - -/** - * memcpy_mcsafe - copy memory with indication if a machine check happened - * - * @dst: destination address - * @src: source address - * @cnt: number of bytes to copy - * - * Low level memory copy function that catches machine checks - * We only call into the "safe" function on systems that can - * actually do machine check recovery. Everyone else can just - * use memcpy(). - * - * Return 0 for success, or number of bytes not copied if there was an - * exception. - */ -static __always_inline __must_check unsigned long -memcpy_mcsafe(void *dst, const void *src, size_t cnt) -{ -#ifdef CONFIG_X86_MCE - if (static_branch_unlikely(&mcsafe_key)) - return __memcpy_mcsafe(dst, src, cnt); - else -#endif - memcpy(dst, src, cnt); - return 0; -} - #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE #define __HAVE_ARCH_MEMCPY_FLUSHCACHE 1 void __memcpy_flushcache(void *dst, const void *src, size_t cnt); diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index 8a1f5382a4ea..cf13f9e78585 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h @@ -150,14 +150,14 @@ struct __attribute__ ((__packed__)) vmcb_control_area { #define SVM_NESTED_CTL_NP_ENABLE BIT(0) #define SVM_NESTED_CTL_SEV_ENABLE BIT(1) -struct __attribute__ ((__packed__)) vmcb_seg { +struct vmcb_seg { u16 selector; u16 attrib; u32 limit; u64 base; -}; +} __packed; -struct __attribute__ ((__packed__)) vmcb_save_area { +struct vmcb_save_area { struct vmcb_seg es; struct vmcb_seg cs; struct vmcb_seg ss; @@ -200,20 +200,67 @@ struct __attribute__ ((__packed__)) vmcb_save_area { u64 br_to; u64 last_excp_from; u64 last_excp_to; -}; + /* + * The following part of the save area is valid only for + * SEV-ES guests when referenced through the GHCB. + */ + u8 reserved_7[104]; + u64 reserved_8; /* rax already available at 0x01f8 */ + u64 rcx; + u64 rdx; + u64 rbx; + u64 reserved_9; /* rsp already available at 0x01d8 */ + u64 rbp; + u64 rsi; + u64 rdi; + u64 r8; + u64 r9; + u64 r10; + u64 r11; + u64 r12; + u64 r13; + u64 r14; + u64 r15; + u8 reserved_10[16]; + u64 sw_exit_code; + u64 sw_exit_info_1; + u64 sw_exit_info_2; + u64 sw_scratch; + u8 reserved_11[56]; + u64 xcr0; + u8 valid_bitmap[16]; + u64 x87_state_gpa; +} __packed; + +struct ghcb { + struct vmcb_save_area save; + u8 reserved_save[2048 - sizeof(struct vmcb_save_area)]; + + u8 shared_buffer[2032]; + + u8 reserved_1[10]; + u16 protocol_version; /* negotiated SEV-ES/GHCB protocol version */ + u32 ghcb_usage; +} __packed; + + +#define EXPECTED_VMCB_SAVE_AREA_SIZE 1032 +#define EXPECTED_VMCB_CONTROL_AREA_SIZE 256 +#define EXPECTED_GHCB_SIZE PAGE_SIZE static inline void __unused_size_checks(void) { - BUILD_BUG_ON(sizeof(struct vmcb_save_area) != 0x298); - BUILD_BUG_ON(sizeof(struct vmcb_control_area) != 256); + BUILD_BUG_ON(sizeof(struct vmcb_save_area) != EXPECTED_VMCB_SAVE_AREA_SIZE); + BUILD_BUG_ON(sizeof(struct vmcb_control_area) != EXPECTED_VMCB_CONTROL_AREA_SIZE); + BUILD_BUG_ON(sizeof(struct ghcb) != EXPECTED_GHCB_SIZE); } -struct __attribute__ ((__packed__)) vmcb { +struct vmcb { struct vmcb_control_area control; u8 reserved_control[1024 - sizeof(struct vmcb_control_area)]; struct vmcb_save_area save; -}; +} __packed; #define SVM_CPUID_FUNC 0x8000000a @@ -298,4 +345,47 @@ struct __attribute__ ((__packed__)) vmcb { #define SVM_CR0_SELECTIVE_MASK (X86_CR0_TS | X86_CR0_MP) +/* GHCB Accessor functions */ + +#define GHCB_BITMAP_IDX(field) \ + (offsetof(struct vmcb_save_area, field) / sizeof(u64)) + +#define DEFINE_GHCB_ACCESSORS(field) \ + static inline bool ghcb_##field##_is_valid(const struct ghcb *ghcb) \ + { \ + return test_bit(GHCB_BITMAP_IDX(field), \ + (unsigned long *)&ghcb->save.valid_bitmap); \ + } \ + \ + static inline void ghcb_set_##field(struct ghcb *ghcb, u64 value) \ + { \ + __set_bit(GHCB_BITMAP_IDX(field), \ + (unsigned long *)&ghcb->save.valid_bitmap); \ + ghcb->save.field = value; \ + } + +DEFINE_GHCB_ACCESSORS(cpl) +DEFINE_GHCB_ACCESSORS(rip) +DEFINE_GHCB_ACCESSORS(rsp) +DEFINE_GHCB_ACCESSORS(rax) +DEFINE_GHCB_ACCESSORS(rcx) +DEFINE_GHCB_ACCESSORS(rdx) +DEFINE_GHCB_ACCESSORS(rbx) +DEFINE_GHCB_ACCESSORS(rbp) +DEFINE_GHCB_ACCESSORS(rsi) +DEFINE_GHCB_ACCESSORS(rdi) +DEFINE_GHCB_ACCESSORS(r8) +DEFINE_GHCB_ACCESSORS(r9) +DEFINE_GHCB_ACCESSORS(r10) +DEFINE_GHCB_ACCESSORS(r11) +DEFINE_GHCB_ACCESSORS(r12) +DEFINE_GHCB_ACCESSORS(r13) +DEFINE_GHCB_ACCESSORS(r14) +DEFINE_GHCB_ACCESSORS(r15) +DEFINE_GHCB_ACCESSORS(sw_exit_code) +DEFINE_GHCB_ACCESSORS(sw_exit_info_1) +DEFINE_GHCB_ACCESSORS(sw_exit_info_2) +DEFINE_GHCB_ACCESSORS(sw_scratch) +DEFINE_GHCB_ACCESSORS(xcr0) + #endif diff --git a/arch/x86/include/asm/sync_core.h b/arch/x86/include/asm/sync_core.h index fdb5b356e59b..0fd4a9dfb29c 100644 --- a/arch/x86/include/asm/sync_core.h +++ b/arch/x86/include/asm/sync_core.h @@ -5,6 +5,7 @@ #include <linux/preempt.h> #include <asm/processor.h> #include <asm/cpufeature.h> +#include <asm/special_insns.h> #ifdef CONFIG_X86_32 static inline void iret_to_self(void) @@ -46,22 +47,34 @@ static inline void iret_to_self(void) * * b) Text was modified on a different CPU, may subsequently be * executed on this CPU, and you want to make sure the new version - * gets executed. This generally means you're calling this in a IPI. + * gets executed. This generally means you're calling this in an IPI. * * If you're calling this for a different reason, you're probably doing * it wrong. + * + * Like all of Linux's memory ordering operations, this is a + * compiler barrier as well. */ static inline void sync_core(void) { /* - * There are quite a few ways to do this. IRET-to-self is nice - * because it works on every CPU, at any CPL (so it's compatible - * with paravirtualization), and it never exits to a hypervisor. - * The only down sides are that it's a bit slow (it seems to be - * a bit more than 2x slower than the fastest options) and that - * it unmasks NMIs. The "push %cs" is needed because, in - * paravirtual environments, __KERNEL_CS may not be a valid CS - * value when we do IRET directly. + * The SERIALIZE instruction is the most straightforward way to + * do this, but it is not universally available. + */ + if (static_cpu_has(X86_FEATURE_SERIALIZE)) { + serialize(); + return; + } + + /* + * For all other processors, there are quite a few ways to do this. + * IRET-to-self is nice because it works on every CPU, at any CPL + * (so it's compatible with paravirtualization), and it never exits + * to a hypervisor. The only downsides are that it's a bit slow + * (it seems to be a bit more than 2x slower than the fastest + * options) and that it unmasks NMIs. The "push %cs" is needed, + * because in paravirtual environments __KERNEL_CS may not be a + * valid CS value when we do IRET directly. * * In case NMI unmasking or performance ever becomes a problem, * the next best option appears to be MOV-to-CR2 and an @@ -71,9 +84,6 @@ static inline void sync_core(void) * CPUID is the conventional way, but it's nasty: it doesn't * exist on some 486-like CPUs, and it usually exits to a * hypervisor. - * - * Like all of Linux's memory ordering operations, this is a - * compiler barrier as well. */ iret_to_self(); } diff --git a/arch/x86/include/asm/text-patching.h b/arch/x86/include/asm/text-patching.h index 6593b42cb379..b7421780e4e9 100644 --- a/arch/x86/include/asm/text-patching.h +++ b/arch/x86/include/asm/text-patching.h @@ -53,6 +53,9 @@ extern void text_poke_finish(void); #define INT3_INSN_SIZE 1 #define INT3_INSN_OPCODE 0xCC +#define RET_INSN_SIZE 1 +#define RET_INSN_OPCODE 0xC3 + #define CALL_INSN_SIZE 5 #define CALL_INSN_OPCODE 0xE8 @@ -73,6 +76,7 @@ static __always_inline int text_opcode_size(u8 opcode) switch(opcode) { __CASE(INT3); + __CASE(RET); __CASE(CALL); __CASE(JMP32); __CASE(JMP8); @@ -141,11 +145,26 @@ void int3_emulate_push(struct pt_regs *regs, unsigned long val) } static __always_inline +unsigned long int3_emulate_pop(struct pt_regs *regs) +{ + unsigned long val = *(unsigned long *)regs->sp; + regs->sp += sizeof(unsigned long); + return val; +} + +static __always_inline void int3_emulate_call(struct pt_regs *regs, unsigned long func) { int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + CALL_INSN_SIZE); int3_emulate_jmp(regs, func); } + +static __always_inline +void int3_emulate_ret(struct pt_regs *regs) +{ + unsigned long ip = int3_emulate_pop(regs); + int3_emulate_jmp(regs, ip); +} #endif /* !CONFIG_UML_X86 */ #endif /* _ASM_X86_TEXT_PATCHING_H */ diff --git a/arch/x86/include/asm/trap_pf.h b/arch/x86/include/asm/trap_pf.h new file mode 100644 index 000000000000..305bc1214aef --- /dev/null +++ b/arch/x86/include/asm/trap_pf.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_X86_TRAP_PF_H +#define _ASM_X86_TRAP_PF_H + +/* + * Page fault error code bits: + * + * bit 0 == 0: no page found 1: protection fault + * bit 1 == 0: read access 1: write access + * bit 2 == 0: kernel-mode access 1: user-mode access + * bit 3 == 1: use of reserved bit detected + * bit 4 == 1: fault was an instruction fetch + * bit 5 == 1: protection keys block access + */ +enum x86_pf_error_code { + X86_PF_PROT = 1 << 0, + X86_PF_WRITE = 1 << 1, + X86_PF_USER = 1 << 2, + X86_PF_RSVD = 1 << 3, + X86_PF_INSTR = 1 << 4, + X86_PF_PK = 1 << 5, +}; + +#endif /* _ASM_X86_TRAP_PF_H */ diff --git a/arch/x86/include/asm/trapnr.h b/arch/x86/include/asm/trapnr.h index 082f45631fa9..f5d2325aa0b7 100644 --- a/arch/x86/include/asm/trapnr.h +++ b/arch/x86/include/asm/trapnr.h @@ -26,6 +26,7 @@ #define X86_TRAP_XF 19 /* SIMD Floating-Point Exception */ #define X86_TRAP_VE 20 /* Virtualization Exception */ #define X86_TRAP_CP 21 /* Control Protection Exception */ +#define X86_TRAP_VC 29 /* VMM Communication Exception */ #define X86_TRAP_IRET 32 /* IRET Exception */ #endif diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h index 714b1a30e7b0..7f7200021bd1 100644 --- a/arch/x86/include/asm/traps.h +++ b/arch/x86/include/asm/traps.h @@ -8,12 +8,14 @@ #include <asm/debugreg.h> #include <asm/idtentry.h> #include <asm/siginfo.h> /* TRAP_TRACE, ... */ +#include <asm/trap_pf.h> #ifdef CONFIG_X86_64 asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs); asmlinkage __visible notrace struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s); void __init trap_init(void); +asmlinkage __visible noinstr struct pt_regs *vc_switch_off_ist(struct pt_regs *eregs); #endif #ifdef CONFIG_X86_F00F_BUG @@ -35,28 +37,12 @@ extern int panic_on_unrecovered_nmi; void math_emulate(struct math_emu_info *); +bool fault_in_kernel_space(unsigned long address); + #ifdef CONFIG_VMAP_STACK void __noreturn handle_stack_overflow(const char *message, struct pt_regs *regs, unsigned long fault_address); #endif -/* - * Page fault error code bits: - * - * bit 0 == 0: no page found 1: protection fault - * bit 1 == 0: read access 1: write access - * bit 2 == 0: kernel-mode access 1: user-mode access - * bit 3 == 1: use of reserved bit detected - * bit 4 == 1: fault was an instruction fetch - * bit 5 == 1: protection keys block access - */ -enum x86_pf_error_code { - X86_PF_PROT = 1 << 0, - X86_PF_WRITE = 1 << 1, - X86_PF_USER = 1 << 2, - X86_PF_RSVD = 1 << 3, - X86_PF_INSTR = 1 << 4, - X86_PF_PK = 1 << 5, -}; #endif /* _ASM_X86_TRAPS_H */ diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index ecefaffd15d4..477c503f2753 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -96,25 +96,14 @@ static inline bool pagefault_disabled(void); likely(!__range_not_ok(addr, size, user_addr_max())); \ }) -/* - * These are the main single-value transfer routines. They automatically - * use the right size if we just have the right pointer type. - * - * This gets kind of ugly. We want to return _two_ values in "get_user()" - * and yet we don't want to do any pointers, because that is too much - * of a performance impact. Thus we have a few rather ugly macros here, - * and hide all the ugliness from the user. - * - * The "__xxx" versions of the user access functions are versions that - * do not verify the address space, that must have been done previously - * with a separate "access_ok()" call (this is used when we do multiple - * accesses to the same area of user memory). - */ - extern int __get_user_1(void); extern int __get_user_2(void); extern int __get_user_4(void); extern int __get_user_8(void); +extern int __get_user_nocheck_1(void); +extern int __get_user_nocheck_2(void); +extern int __get_user_nocheck_4(void); +extern int __get_user_nocheck_8(void); extern int __get_user_bad(void); #define __uaccess_begin() stac() @@ -138,25 +127,12 @@ extern int __get_user_bad(void); #define __typefits(x,type,not) \ __builtin_choose_expr(sizeof(x)<=sizeof(type),(unsigned type)0,not) -/** - * get_user - Get a simple variable from user space. - * @x: Variable to store result. - * @ptr: Source address, in user space. - * - * Context: User context only. This function may sleep if pagefaults are - * enabled. - * - * This macro copies a single simple variable from user space to kernel - * space. It supports simple types like char and int, but not larger - * data types like structures or arrays. - * - * @ptr must have pointer-to-simple-variable type, and the result of - * dereferencing @ptr must be assignable to @x without a cast. - * - * Return: zero on success, or -EFAULT on error. - * On error, the variable @x is set to zero. - */ /* + * This is used for both get_user() and __get_user() to expand to + * the proper special function call that has odd calling conventions + * due to returning both a value and an error, and that depends on + * the size of the pointer passed in. + * * Careful: we have to cast the result to the type of the pointer * for sign reasons. * @@ -169,13 +145,12 @@ extern int __get_user_bad(void); * Clang/LLVM cares about the size of the register, but still wants * the base register for something that ends up being a pair. */ -#define get_user(x, ptr) \ +#define do_get_user_call(fn,x,ptr) \ ({ \ int __ret_gu; \ register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \ __chk_user_ptr(ptr); \ - might_fault(); \ - asm volatile("call __get_user_%P4" \ + asm volatile("call __" #fn "_%P4" \ : "=a" (__ret_gu), "=r" (__val_gu), \ ASM_CALL_CONSTRAINT \ : "0" (ptr), "i" (sizeof(*(ptr)))); \ @@ -183,10 +158,48 @@ extern int __get_user_bad(void); __builtin_expect(__ret_gu, 0); \ }) -#define __put_user_x(size, x, ptr, __ret_pu) \ - asm volatile("call __put_user_" #size : "=a" (__ret_pu) \ - : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") +/** + * get_user - Get a simple variable from user space. + * @x: Variable to store result. + * @ptr: Source address, in user space. + * + * Context: User context only. This function may sleep if pagefaults are + * enabled. + * + * This macro copies a single simple variable from user space to kernel + * space. It supports simple types like char and int, but not larger + * data types like structures or arrays. + * + * @ptr must have pointer-to-simple-variable type, and the result of + * dereferencing @ptr must be assignable to @x without a cast. + * + * Return: zero on success, or -EFAULT on error. + * On error, the variable @x is set to zero. + */ +#define get_user(x,ptr) ({ might_fault(); do_get_user_call(get_user,x,ptr); }) +/** + * __get_user - Get a simple variable from user space, with less checking. + * @x: Variable to store result. + * @ptr: Source address, in user space. + * + * Context: User context only. This function may sleep if pagefaults are + * enabled. + * + * This macro copies a single simple variable from user space to kernel + * space. It supports simple types like char and int, but not larger + * data types like structures or arrays. + * + * @ptr must have pointer-to-simple-variable type, and the result of + * dereferencing @ptr must be assignable to @x without a cast. + * + * Caller must check the pointer with access_ok() before calling this + * function. + * + * Return: zero on success, or -EFAULT on error. + * On error, the variable @x is set to zero. + */ +#define __get_user(x,ptr) do_get_user_call(get_user_nocheck,x,ptr) #ifdef CONFIG_X86_32 @@ -199,25 +212,41 @@ extern int __get_user_bad(void); : : "A" (x), "r" (addr) \ : : label) -#define __put_user_x8(x, ptr, __ret_pu) \ - asm volatile("call __put_user_8" : "=a" (__ret_pu) \ - : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") #else #define __put_user_goto_u64(x, ptr, label) \ __put_user_goto(x, ptr, "q", "er", label) -#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu) #endif extern void __put_user_bad(void); /* * Strange magic calling convention: pointer in %ecx, - * value in %eax(:%edx), return value in %eax. clobbers %rbx + * value in %eax(:%edx), return value in %ecx. clobbers %rbx */ extern void __put_user_1(void); extern void __put_user_2(void); extern void __put_user_4(void); extern void __put_user_8(void); +extern void __put_user_nocheck_1(void); +extern void __put_user_nocheck_2(void); +extern void __put_user_nocheck_4(void); +extern void __put_user_nocheck_8(void); + +#define do_put_user_call(fn,x,ptr) \ +({ \ + int __ret_pu; \ + register __typeof__(*(ptr)) __val_pu asm("%"_ASM_AX); \ + __chk_user_ptr(ptr); \ + __val_pu = (x); \ + asm volatile("call __" #fn "_%P[size]" \ + : "=c" (__ret_pu), \ + ASM_CALL_CONSTRAINT \ + : "0" (ptr), \ + "r" (__val_pu), \ + [size] "i" (sizeof(*(ptr))) \ + :"ebx"); \ + __builtin_expect(__ret_pu, 0); \ +}) /** * put_user - Write a simple value into user space. @@ -236,32 +265,29 @@ extern void __put_user_8(void); * * Return: zero on success, or -EFAULT on error. */ -#define put_user(x, ptr) \ -({ \ - int __ret_pu; \ - __typeof__(*(ptr)) __pu_val; \ - __chk_user_ptr(ptr); \ - might_fault(); \ - __pu_val = x; \ - switch (sizeof(*(ptr))) { \ - case 1: \ - __put_user_x(1, __pu_val, ptr, __ret_pu); \ - break; \ - case 2: \ - __put_user_x(2, __pu_val, ptr, __ret_pu); \ - break; \ - case 4: \ - __put_user_x(4, __pu_val, ptr, __ret_pu); \ - break; \ - case 8: \ - __put_user_x8(__pu_val, ptr, __ret_pu); \ - break; \ - default: \ - __put_user_x(X, __pu_val, ptr, __ret_pu); \ - break; \ - } \ - __builtin_expect(__ret_pu, 0); \ -}) +#define put_user(x, ptr) ({ might_fault(); do_put_user_call(put_user,x,ptr); }) + +/** + * __put_user - Write a simple value into user space, with less checking. + * @x: Value to copy to user space. + * @ptr: Destination address, in user space. + * + * Context: User context only. This function may sleep if pagefaults are + * enabled. + * + * This macro copies a single simple value from kernel space to user + * space. It supports simple types like char and int, but not larger + * data types like structures or arrays. + * + * @ptr must have pointer-to-simple-variable type, and @x must be assignable + * to the result of dereferencing @ptr. + * + * Caller must check the pointer with access_ok() before calling this + * function. + * + * Return: zero on success, or -EFAULT on error. + */ +#define __put_user(x, ptr) do_put_user_call(put_user_nocheck,x,ptr) #define __put_user_size(x, ptr, size, label) \ do { \ @@ -284,6 +310,55 @@ do { \ } \ } while (0) +#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT + +#ifdef CONFIG_X86_32 +#define __get_user_asm_u64(x, ptr, label) do { \ + unsigned int __gu_low, __gu_high; \ + const unsigned int __user *__gu_ptr; \ + __gu_ptr = (const void __user *)(ptr); \ + __get_user_asm(__gu_low, ptr, "l", "=r", label); \ + __get_user_asm(__gu_high, ptr+1, "l", "=r", label); \ + (x) = ((unsigned long long)__gu_high << 32) | __gu_low; \ +} while (0) +#else +#define __get_user_asm_u64(x, ptr, label) \ + __get_user_asm(x, ptr, "q", "=r", label) +#endif + +#define __get_user_size(x, ptr, size, label) \ +do { \ + __chk_user_ptr(ptr); \ + switch (size) { \ + unsigned char x_u8__; \ + case 1: \ + __get_user_asm(x_u8__, ptr, "b", "=q", label); \ + (x) = x_u8__; \ + break; \ + case 2: \ + __get_user_asm(x, ptr, "w", "=r", label); \ + break; \ + case 4: \ + __get_user_asm(x, ptr, "l", "=r", label); \ + break; \ + case 8: \ + __get_user_asm_u64(x, ptr, label); \ + break; \ + default: \ + (x) = __get_user_bad(); \ + } \ +} while (0) + +#define __get_user_asm(x, addr, itype, ltype, label) \ + asm_volatile_goto("\n" \ + "1: mov"itype" %[umem],%[output]\n" \ + _ASM_EXTABLE_UA(1b, %l2) \ + : [output] ltype(x) \ + : [umem] "m" (__m(addr)) \ + : : label) + +#else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT + #ifdef CONFIG_X86_32 #define __get_user_asm_u64(x, ptr, retval) \ ({ \ @@ -343,7 +418,7 @@ do { \ "2:\n" \ ".section .fixup,\"ax\"\n" \ "3: mov %[efault],%[errout]\n" \ - " xor"itype" %[output],%[output]\n" \ + " xorl %k[output],%k[output]\n" \ " jmp 2b\n" \ ".previous\n" \ _ASM_EXTABLE_UA(1b, 3b) \ @@ -352,33 +427,7 @@ do { \ : [umem] "m" (__m(addr)), \ [efault] "i" (-EFAULT), "0" (err)) -#define __put_user_nocheck(x, ptr, size) \ -({ \ - __label__ __pu_label; \ - int __pu_err = -EFAULT; \ - __typeof__(*(ptr)) __pu_val = (x); \ - __typeof__(ptr) __pu_ptr = (ptr); \ - __typeof__(size) __pu_size = (size); \ - __uaccess_begin(); \ - __put_user_size(__pu_val, __pu_ptr, __pu_size, __pu_label); \ - __pu_err = 0; \ -__pu_label: \ - __uaccess_end(); \ - __builtin_expect(__pu_err, 0); \ -}) - -#define __get_user_nocheck(x, ptr, size) \ -({ \ - int __gu_err; \ - __inttype(*(ptr)) __gu_val; \ - __typeof__(ptr) __gu_ptr = (ptr); \ - __typeof__(size) __gu_size = (size); \ - __uaccess_begin_nospec(); \ - __get_user_size(__gu_val, __gu_ptr, __gu_size, __gu_err); \ - __uaccess_end(); \ - (x) = (__force __typeof__(*(ptr)))__gu_val; \ - __builtin_expect(__gu_err, 0); \ -}) +#endif // CONFIG_CC_ASM_GOTO_OUTPUT /* FIXME: this hack is definitely wrong -AK */ struct __large_struct { unsigned long buf[100]; }; @@ -396,55 +445,6 @@ struct __large_struct { unsigned long buf[100]; }; : : ltype(x), "m" (__m(addr)) \ : : label) -/** - * __get_user - Get a simple variable from user space, with less checking. - * @x: Variable to store result. - * @ptr: Source address, in user space. - * - * Context: User context only. This function may sleep if pagefaults are - * enabled. - * - * This macro copies a single simple variable from user space to kernel - * space. It supports simple types like char and int, but not larger - * data types like structures or arrays. - * - * @ptr must have pointer-to-simple-variable type, and the result of - * dereferencing @ptr must be assignable to @x without a cast. - * - * Caller must check the pointer with access_ok() before calling this - * function. - * - * Return: zero on success, or -EFAULT on error. - * On error, the variable @x is set to zero. - */ - -#define __get_user(x, ptr) \ - __get_user_nocheck((x), (ptr), sizeof(*(ptr))) - -/** - * __put_user - Write a simple value into user space, with less checking. - * @x: Value to copy to user space. - * @ptr: Destination address, in user space. - * - * Context: User context only. This function may sleep if pagefaults are - * enabled. - * - * This macro copies a single simple value from kernel space to user - * space. It supports simple types like char and int, but not larger - * data types like structures or arrays. - * - * @ptr must have pointer-to-simple-variable type, and @x must be assignable - * to the result of dereferencing @ptr. - * - * Caller must check the pointer with access_ok() before calling this - * function. - * - * Return: zero on success, or -EFAULT on error. - */ - -#define __put_user(x, ptr) \ - __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) - extern unsigned long copy_from_user_nmi(void *to, const void __user *from, unsigned long n); extern __must_check long @@ -455,6 +455,15 @@ extern __must_check long strnlen_user(const char __user *str, long n); unsigned long __must_check clear_user(void __user *mem, unsigned long len); unsigned long __must_check __clear_user(void __user *mem, unsigned long len); +#ifdef CONFIG_ARCH_HAS_COPY_MC +unsigned long __must_check +copy_mc_to_kernel(void *to, const void *from, unsigned len); +#define copy_mc_to_kernel copy_mc_to_kernel + +unsigned long __must_check +copy_mc_to_user(void *to, const void *from, unsigned len); +#endif + /* * movsl can be slow when source and dest are not both 8-byte aligned */ @@ -494,6 +503,14 @@ static __must_check __always_inline bool user_access_begin(const void __user *pt #define unsafe_put_user(x, ptr, label) \ __put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label) +#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT +#define unsafe_get_user(x, ptr, err_label) \ +do { \ + __inttype(*(ptr)) __gu_val; \ + __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), err_label); \ + (x) = (__force __typeof__(*(ptr)))__gu_val; \ +} while (0) +#else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT #define unsafe_get_user(x, ptr, err_label) \ do { \ int __gu_err; \ @@ -502,6 +519,7 @@ do { \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ if (unlikely(__gu_err)) goto err_label; \ } while (0) +#endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT /* * We want the unsafe accessors to always be inlined and use @@ -528,6 +546,11 @@ do { \ #define HAVE_GET_KERNEL_NOFAULT +#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT +#define __get_kernel_nofault(dst, src, type, err_label) \ + __get_user_size(*((type *)(dst)), (__force type __user *)(src), \ + sizeof(type), err_label) +#else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT #define __get_kernel_nofault(dst, src, type, err_label) \ do { \ int __kr_err; \ @@ -537,6 +560,7 @@ do { \ if (unlikely(__kr_err)) \ goto err_label; \ } while (0) +#endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT #define __put_kernel_nofault(dst, src, type, err_label) \ __put_user_size(*((type *)(src)), (__force type __user *)(dst), \ diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index bc10e3dc64fe..e7265a552f4f 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -47,22 +47,6 @@ copy_user_generic(void *to, const void *from, unsigned len) } static __always_inline __must_check unsigned long -copy_to_user_mcsafe(void *to, const void *from, unsigned len) -{ - unsigned long ret; - - __uaccess_begin(); - /* - * Note, __memcpy_mcsafe() is explicitly used since it can - * handle exceptions / faults. memcpy_mcsafe() may fall back to - * memcpy() which lacks this handling. - */ - ret = __memcpy_mcsafe(to, from, len); - __uaccess_end(); - return ret; -} - -static __always_inline __must_check unsigned long raw_copy_from_user(void *dst, const void __user *src, unsigned long size) { return copy_user_generic(dst, (__force void *)src, size); @@ -102,8 +86,4 @@ __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size) kasan_check_write(dst, size); return __copy_user_flushcache(dst, src, size); } - -unsigned long -mcsafe_handle_tail(char *to, char *from, unsigned len); - #endif /* _ASM_X86_UACCESS_64_H */ diff --git a/arch/x86/include/asm/unwind_hints.h b/arch/x86/include/asm/unwind_hints.h index 7d903fdb3f43..664d4610d700 100644 --- a/arch/x86/include/asm/unwind_hints.h +++ b/arch/x86/include/asm/unwind_hints.h @@ -1,51 +1,17 @@ #ifndef _ASM_X86_UNWIND_HINTS_H #define _ASM_X86_UNWIND_HINTS_H +#include <linux/objtool.h> + #include "orc_types.h" #ifdef __ASSEMBLY__ -/* - * In asm, there are two kinds of code: normal C-type callable functions and - * the rest. The normal callable functions can be called by other code, and - * don't do anything unusual with the stack. Such normal callable functions - * are annotated with the ENTRY/ENDPROC macros. Most asm code falls in this - * category. In this case, no special debugging annotations are needed because - * objtool can automatically generate the ORC data for the ORC unwinder to read - * at runtime. - * - * Anything which doesn't fall into the above category, such as syscall and - * interrupt handlers, tends to not be called directly by other functions, and - * often does unusual non-C-function-type things with the stack pointer. Such - * code needs to be annotated such that objtool can understand it. The - * following CFI hint macros are for this type of code. - * - * These macros provide hints to objtool about the state of the stack at each - * instruction. Objtool starts from the hints and follows the code flow, - * making automatic CFI adjustments when it sees pushes and pops, filling out - * the debuginfo as necessary. It will also warn if it sees any - * inconsistencies. - */ -.macro UNWIND_HINT sp_reg=ORC_REG_SP sp_offset=0 type=ORC_TYPE_CALL end=0 -#ifdef CONFIG_STACK_VALIDATION -.Lunwind_hint_ip_\@: - .pushsection .discard.unwind_hints - /* struct unwind_hint */ - .long .Lunwind_hint_ip_\@ - . - .short \sp_offset - .byte \sp_reg - .byte \type - .byte \end - .balign 4 - .popsection -#endif -.endm - .macro UNWIND_HINT_EMPTY - UNWIND_HINT sp_reg=ORC_REG_UNDEFINED end=1 + UNWIND_HINT sp_reg=ORC_REG_UNDEFINED type=UNWIND_HINT_TYPE_CALL end=1 .endm -.macro UNWIND_HINT_REGS base=%rsp offset=0 indirect=0 extra=1 iret=0 +.macro UNWIND_HINT_REGS base=%rsp offset=0 indirect=0 extra=1 partial=0 .if \base == %rsp .if \indirect .set sp_reg, ORC_REG_SP_INDIRECT @@ -66,24 +32,24 @@ .set sp_offset, \offset - .if \iret - .set type, ORC_TYPE_REGS_IRET + .if \partial + .set type, UNWIND_HINT_TYPE_REGS_PARTIAL .elseif \extra == 0 - .set type, ORC_TYPE_REGS_IRET + .set type, UNWIND_HINT_TYPE_REGS_PARTIAL .set sp_offset, \offset + (16*8) .else - .set type, ORC_TYPE_REGS + .set type, UNWIND_HINT_TYPE_REGS .endif UNWIND_HINT sp_reg=sp_reg sp_offset=sp_offset type=type .endm .macro UNWIND_HINT_IRET_REGS base=%rsp offset=0 - UNWIND_HINT_REGS base=\base offset=\offset iret=1 + UNWIND_HINT_REGS base=\base offset=\offset partial=1 .endm .macro UNWIND_HINT_FUNC sp_offset=8 - UNWIND_HINT sp_offset=\sp_offset + UNWIND_HINT sp_reg=ORC_REG_SP sp_offset=\sp_offset type=UNWIND_HINT_TYPE_CALL .endm /* @@ -92,7 +58,7 @@ * initial_func_cfi. */ .macro UNWIND_HINT_RET_OFFSET sp_offset=8 - UNWIND_HINT type=UNWIND_HINT_TYPE_RET_OFFSET sp_offset=\sp_offset + UNWIND_HINT sp_reg=ORC_REG_SP type=UNWIND_HINT_TYPE_RET_OFFSET sp_offset=\sp_offset .endm #endif /* __ASSEMBLY__ */ diff --git a/arch/x86/include/asm/uv/bios.h b/arch/x86/include/asm/uv/bios.h index 70050d0136c3..08b3d810dfba 100644 --- a/arch/x86/include/asm/uv/bios.h +++ b/arch/x86/include/asm/uv/bios.h @@ -5,8 +5,9 @@ /* * UV BIOS layer definitions. * - * Copyright (c) 2008-2009 Silicon Graphics, Inc. All Rights Reserved. - * Copyright (c) Russ Anderson <rja@sgi.com> + * (C) Copyright 2020 Hewlett Packard Enterprise Development LP + * Copyright (C) 2007-2017 Silicon Graphics, Inc. All rights reserved. + * Copyright (c) Russ Anderson <rja@sgi.com> */ #include <linux/rtc.h> @@ -71,6 +72,11 @@ struct uv_gam_range_entry { u32 limit; /* PA bits 56:26 (UV_GAM_RANGE_SHFT) */ }; +#define UV_AT_SIZE 8 /* 7 character arch type + NULL char */ +struct uv_arch_type_entry { + char archtype[UV_AT_SIZE]; +}; + #define UV_SYSTAB_SIG "UVST" #define UV_SYSTAB_VERSION_1 1 /* UV2/3 BIOS version */ #define UV_SYSTAB_VERSION_UV4 0x400 /* UV4 BIOS base version */ @@ -79,10 +85,14 @@ struct uv_gam_range_entry { #define UV_SYSTAB_VERSION_UV4_3 0x403 /* - GAM Range PXM Value */ #define UV_SYSTAB_VERSION_UV4_LATEST UV_SYSTAB_VERSION_UV4_3 +#define UV_SYSTAB_VERSION_UV5 0x500 /* UV5 GAM base version */ +#define UV_SYSTAB_VERSION_UV5_LATEST UV_SYSTAB_VERSION_UV5 + #define UV_SYSTAB_TYPE_UNUSED 0 /* End of table (offset == 0) */ #define UV_SYSTAB_TYPE_GAM_PARAMS 1 /* GAM PARAM conversions */ #define UV_SYSTAB_TYPE_GAM_RNG_TBL 2 /* GAM entry table */ -#define UV_SYSTAB_TYPE_MAX 3 +#define UV_SYSTAB_TYPE_ARCH_TYPE 3 /* UV arch type */ +#define UV_SYSTAB_TYPE_MAX 4 /* * The UV system table describes specific firmware @@ -133,6 +143,7 @@ extern s64 uv_bios_reserved_page_pa(u64, u64 *, u64 *, u64 *); extern int uv_bios_set_legacy_vga_target(bool decode, int domain, int bus); extern int uv_bios_init(void); +extern unsigned long get_uv_systab_phys(bool msg); extern unsigned long sn_rtc_cycles_per_second; extern int uv_type; diff --git a/arch/x86/include/asm/uv/uv.h b/arch/x86/include/asm/uv/uv.h index e48aea9ba47d..172d3e4a9e4b 100644 --- a/arch/x86/include/asm/uv/uv.h +++ b/arch/x86/include/asm/uv/uv.h @@ -35,10 +35,8 @@ extern int is_uv_hubbed(int uvtype); extern void uv_cpu_init(void); extern void uv_nmi_init(void); extern void uv_system_init(void); -extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, - const struct flush_tlb_info *info); -#else /* X86_UV */ +#else /* !X86_UV */ static inline enum uv_system_type get_uv_system_type(void) { return UV_NONE; } static inline bool is_early_uv_system(void) { return 0; } diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h deleted file mode 100644 index cd24804955d7..000000000000 --- a/arch/x86/include/asm/uv/uv_bau.h +++ /dev/null @@ -1,755 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * SGI UV Broadcast Assist Unit definitions - * - * Copyright (C) 2008-2011 Silicon Graphics, Inc. All rights reserved. - */ - -#ifndef _ASM_X86_UV_UV_BAU_H -#define _ASM_X86_UV_UV_BAU_H - -#include <linux/bitmap.h> -#include <asm/idtentry.h> - -#define BITSPERBYTE 8 - -/* - * Broadcast Assist Unit messaging structures - * - * Selective Broadcast activations are induced by software action - * specifying a particular 8-descriptor "set" via a 6-bit index written - * to an MMR. - * Thus there are 64 unique 512-byte sets of SB descriptors - one set for - * each 6-bit index value. These descriptor sets are mapped in sequence - * starting with set 0 located at the address specified in the - * BAU_SB_DESCRIPTOR_BASE register, set 1 is located at BASE + 512, - * set 2 is at BASE + 2*512, set 3 at BASE + 3*512, and so on. - * - * We will use one set for sending BAU messages from each of the - * cpu's on the uvhub. - * - * TLB shootdown will use the first of the 8 descriptors of each set. - * Each of the descriptors is 64 bytes in size (8*64 = 512 bytes in a set). - */ - -#define MAX_CPUS_PER_UVHUB 128 -#define MAX_CPUS_PER_SOCKET 64 -#define ADP_SZ 64 /* hardware-provided max. */ -#define UV_CPUS_PER_AS 32 /* hardware-provided max. */ -#define ITEMS_PER_DESC 8 -/* the 'throttle' to prevent the hardware stay-busy bug */ -#define MAX_BAU_CONCURRENT 3 -#define UV_ACT_STATUS_MASK 0x3 -#define UV_ACT_STATUS_SIZE 2 -#define UV_DISTRIBUTION_SIZE 256 -#define UV_SW_ACK_NPENDING 8 -#define UV_NET_ENDPOINT_INTD 0x28 -#define UV_PAYLOADQ_GNODE_SHIFT 49 -#define UV_PTC_BASENAME "sgi_uv/ptc_statistics" -#define UV_BAU_BASENAME "sgi_uv/bau_tunables" -#define UV_BAU_TUNABLES_DIR "sgi_uv" -#define UV_BAU_TUNABLES_FILE "bau_tunables" -#define WHITESPACE " \t\n" -#define cpubit_isset(cpu, bau_local_cpumask) \ - test_bit((cpu), (bau_local_cpumask).bits) - -/* [19:16] SOFT_ACK timeout period 19: 1 is urgency 7 17:16 1 is multiplier */ -/* - * UV2: Bit 19 selects between - * (0): 10 microsecond timebase and - * (1): 80 microseconds - * we're using 560us - */ -#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD (15UL) -/* assuming UV3 is the same */ - -#define BAU_MISC_CONTROL_MULT_MASK 3 - -#define UVH_AGING_PRESCALE_SEL 0x000000b000UL -/* [30:28] URGENCY_7 an index into a table of times */ -#define BAU_URGENCY_7_SHIFT 28 -#define BAU_URGENCY_7_MASK 7 - -#define UVH_TRANSACTION_TIMEOUT 0x000000b200UL -/* [45:40] BAU - BAU transaction timeout select - a multiplier */ -#define BAU_TRANS_SHIFT 40 -#define BAU_TRANS_MASK 0x3f - -/* - * shorten some awkward names - */ -#define AS_PUSH_SHIFT UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT -#define SOFTACK_MSHIFT UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT -#define SOFTACK_PSHIFT UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT -#define SOFTACK_TIMEOUT_PERIOD UV_INTD_SOFT_ACK_TIMEOUT_PERIOD -#define PREFETCH_HINT_SHFT UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_PREFETCH_HINT_SHFT -#define SB_STATUS_SHFT UV3H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT -#define write_gmmr uv_write_global_mmr64 -#define write_lmmr uv_write_local_mmr -#define read_lmmr uv_read_local_mmr -#define read_gmmr uv_read_global_mmr64 - -/* - * bits in UVH_LB_BAU_SB_ACTIVATION_STATUS_0/1 - */ -#define DS_IDLE 0 -#define DS_ACTIVE 1 -#define DS_DESTINATION_TIMEOUT 2 -#define DS_SOURCE_TIMEOUT 3 -/* - * bits put together from HRP_LB_BAU_SB_ACTIVATION_STATUS_0/1/2 - * values 1 and 3 will not occur - * Decoded meaning ERROR BUSY AUX ERR - * ------------------------------- ---- ----- ------- - * IDLE 0 0 0 - * BUSY (active) 0 1 0 - * SW Ack Timeout (destination) 1 0 0 - * SW Ack INTD rejected (strong NACK) 1 0 1 - * Source Side Time Out Detected 1 1 0 - * Destination Side PUT Failed 1 1 1 - */ -#define UV2H_DESC_IDLE 0 -#define UV2H_DESC_BUSY 2 -#define UV2H_DESC_DEST_TIMEOUT 4 -#define UV2H_DESC_DEST_STRONG_NACK 5 -#define UV2H_DESC_SOURCE_TIMEOUT 6 -#define UV2H_DESC_DEST_PUT_ERR 7 - -/* - * delay for 'plugged' timeout retries, in microseconds - */ -#define PLUGGED_DELAY 10 - -/* - * threshholds at which to use IPI to free resources - */ -/* after this # consecutive 'plugged' timeouts, use IPI to release resources */ -#define PLUGSB4RESET 100 -/* after this many consecutive timeouts, use IPI to release resources */ -#define TIMEOUTSB4RESET 1 -/* at this number uses of IPI to release resources, giveup the request */ -#define IPI_RESET_LIMIT 1 -/* after this # consecutive successes, bump up the throttle if it was lowered */ -#define COMPLETE_THRESHOLD 5 -/* after this # of giveups (fall back to kernel IPI's) disable the use of - the BAU for a period of time */ -#define GIVEUP_LIMIT 100 - -#define UV_LB_SUBNODEID 0x10 - -#define UV_SA_SHFT UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT -#define UV_SA_MASK UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK -/* 4 bits of software ack period */ -#define UV2_ACK_MASK 0x7UL -#define UV2_ACK_UNITS_SHFT 3 -#define UV2_EXT_SHFT UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT - -/* - * number of entries in the destination side payload queue - */ -#define DEST_Q_SIZE 20 -/* - * number of destination side software ack resources - */ -#define DEST_NUM_RESOURCES 8 -/* - * completion statuses for sending a TLB flush message - */ -#define FLUSH_RETRY_PLUGGED 1 -#define FLUSH_RETRY_TIMEOUT 2 -#define FLUSH_GIVEUP 3 -#define FLUSH_COMPLETE 4 - -/* - * tuning the action when the numalink network is extremely delayed - */ -#define CONGESTED_RESPONSE_US 1000 /* 'long' response time, in - microseconds */ -#define CONGESTED_REPS 10 /* long delays averaged over - this many broadcasts */ -#define DISABLED_PERIOD 10 /* time for the bau to be - disabled, in seconds */ -/* see msg_type: */ -#define MSG_NOOP 0 -#define MSG_REGULAR 1 -#define MSG_RETRY 2 - -#define BAU_DESC_QUALIFIER 0x534749 - -enum uv_bau_version { - UV_BAU_V2 = 2, - UV_BAU_V3, - UV_BAU_V4, -}; - -/* - * Distribution: 32 bytes (256 bits) (bytes 0-0x1f of descriptor) - * If the 'multilevel' flag in the header portion of the descriptor - * has been set to 0, then endpoint multi-unicast mode is selected. - * The distribution specification (32 bytes) is interpreted as a 256-bit - * distribution vector. Adjacent bits correspond to consecutive even numbered - * nodeIDs. The result of adding the index of a given bit to the 15-bit - * 'base_dest_nasid' field of the header corresponds to the - * destination nodeID associated with that specified bit. - */ -struct pnmask { - unsigned long bits[BITS_TO_LONGS(UV_DISTRIBUTION_SIZE)]; -}; - -/* - * mask of cpu's on a uvhub - * (during initialization we need to check that unsigned long has - * enough bits for max. cpu's per uvhub) - */ -struct bau_local_cpumask { - unsigned long bits; -}; - -/* - * Payload: 16 bytes (128 bits) (bytes 0x20-0x2f of descriptor) - * only 12 bytes (96 bits) of the payload area are usable. - * An additional 3 bytes (bits 27:4) of the header address are carried - * to the next bytes of the destination payload queue. - * And an additional 2 bytes of the header Suppl_A field are also - * carried to the destination payload queue. - * But the first byte of the Suppl_A becomes bits 127:120 (the 16th byte) - * of the destination payload queue, which is written by the hardware - * with the s/w ack resource bit vector. - * [ effective message contents (16 bytes (128 bits) maximum), not counting - * the s/w ack bit vector ] - */ - -/** - * struct uv2_3_bau_msg_payload - defines payload for INTD transactions - * @address: Signifies a page or all TLB's of the cpu - * @sending_cpu: CPU from which the message originates - * @acknowledge_count: CPUs on the destination Hub that received the interrupt - */ -struct uv2_3_bau_msg_payload { - u64 address; - u16 sending_cpu; - u16 acknowledge_count; -}; - -/** - * struct uv4_bau_msg_payload - defines payload for INTD transactions - * @address: Signifies a page or all TLB's of the cpu - * @sending_cpu: CPU from which the message originates - * @acknowledge_count: CPUs on the destination Hub that received the interrupt - * @qualifier: Set by source to verify origin of INTD broadcast - */ -struct uv4_bau_msg_payload { - u64 address; - u16 sending_cpu; - u16 acknowledge_count; - u32 reserved:8; - u32 qualifier:24; -}; - -/* - * UV2 Message header: 16 bytes (128 bits) (bytes 0x30-0x3f of descriptor) - * see figure 9-2 of harp_sys.pdf - * assuming UV3 is the same - */ -struct uv2_3_bau_msg_header { - unsigned int base_dest_nasid:15; /* nasid of the first bit */ - /* bits 14:0 */ /* in uvhub map */ - unsigned int dest_subnodeid:5; /* must be 0x10, for the LB */ - /* bits 19:15 */ - unsigned int rsvd_1:1; /* must be zero */ - /* bit 20 */ - /* Address bits 59:21 */ - /* bits 25:2 of address (44:21) are payload */ - /* these next 24 bits become bytes 12-14 of msg */ - /* bits 28:21 land in byte 12 */ - unsigned int replied_to:1; /* sent as 0 by the source to - byte 12 */ - /* bit 21 */ - unsigned int msg_type:3; /* software type of the - message */ - /* bits 24:22 */ - unsigned int canceled:1; /* message canceled, resource - is to be freed*/ - /* bit 25 */ - unsigned int payload_1:3; /* not currently used */ - /* bits 28:26 */ - - /* bits 36:29 land in byte 13 */ - unsigned int payload_2a:3; /* not currently used */ - unsigned int payload_2b:5; /* not currently used */ - /* bits 36:29 */ - - /* bits 44:37 land in byte 14 */ - unsigned int payload_3:8; /* not currently used */ - /* bits 44:37 */ - - unsigned int rsvd_2:7; /* reserved */ - /* bits 51:45 */ - unsigned int swack_flag:1; /* software acknowledge flag */ - /* bit 52 */ - unsigned int rsvd_3a:3; /* must be zero */ - unsigned int rsvd_3b:8; /* must be zero */ - unsigned int rsvd_3c:8; /* must be zero */ - unsigned int rsvd_3d:3; /* must be zero */ - /* bits 74:53 */ - unsigned int fairness:3; /* usually zero */ - /* bits 77:75 */ - - unsigned int sequence:16; /* message sequence number */ - /* bits 93:78 Suppl_A */ - unsigned int chaining:1; /* next descriptor is part of - this activation*/ - /* bit 94 */ - unsigned int multilevel:1; /* multi-level multicast - format */ - /* bit 95 */ - unsigned int rsvd_4:24; /* ordered / source node / - source subnode / aging - must be zero */ - /* bits 119:96 */ - unsigned int command:8; /* message type */ - /* bits 127:120 */ -}; - -/* - * The activation descriptor: - * The format of the message to send, plus all accompanying control - * Should be 64 bytes - */ -struct bau_desc { - struct pnmask distribution; - /* - * message template, consisting of header and payload: - */ - union bau_msg_header { - struct uv2_3_bau_msg_header uv2_3_hdr; - } header; - - union bau_payload_header { - struct uv2_3_bau_msg_payload uv2_3; - struct uv4_bau_msg_payload uv4; - } payload; -}; -/* UV2: - * -payload-- ---------header------ - * bytes 0-11 bits 70-78 bits 21-44 - * A B (2) C (3) - * - * A/B/C are moved to: - * A C B - * bytes 0-11 bytes 12-14 bytes 16-17 (byte 15 filled in by hw as vector) - * ------------payload queue----------- - */ - -/* - * The payload queue on the destination side is an array of these. - * With BAU_MISC_CONTROL set for software acknowledge mode, the messages - * are 32 bytes (2 micropackets) (256 bits) in length, but contain only 17 - * bytes of usable data, including the sw ack vector in byte 15 (bits 127:120) - * (12 bytes come from bau_msg_payload, 3 from payload_1, 2 from - * swack_vec and payload_2) - * "Enabling Software Acknowledgment mode (see Section 4.3.3 Software - * Acknowledge Processing) also selects 32 byte (17 bytes usable) payload - * operation." - */ -struct bau_pq_entry { - unsigned long address; /* signifies a page or all TLB's - of the cpu */ - /* 64 bits, bytes 0-7 */ - unsigned short sending_cpu; /* cpu that sent the message */ - /* 16 bits, bytes 8-9 */ - unsigned short acknowledge_count; /* filled in by destination */ - /* 16 bits, bytes 10-11 */ - /* these next 3 bytes come from bits 58-81 of the message header */ - unsigned short replied_to:1; /* sent as 0 by the source */ - unsigned short msg_type:3; /* software message type */ - unsigned short canceled:1; /* sent as 0 by the source */ - unsigned short unused1:3; /* not currently using */ - /* byte 12 */ - unsigned char unused2a; /* not currently using */ - /* byte 13 */ - unsigned char unused2; /* not currently using */ - /* byte 14 */ - unsigned char swack_vec; /* filled in by the hardware */ - /* byte 15 (bits 127:120) */ - unsigned short sequence; /* message sequence number */ - /* bytes 16-17 */ - unsigned char unused4[2]; /* not currently using bytes 18-19 */ - /* bytes 18-19 */ - int number_of_cpus; /* filled in at destination */ - /* 32 bits, bytes 20-23 (aligned) */ - unsigned char unused5[8]; /* not using */ - /* bytes 24-31 */ -}; - -struct msg_desc { - struct bau_pq_entry *msg; - int msg_slot; - struct bau_pq_entry *queue_first; - struct bau_pq_entry *queue_last; -}; - -struct reset_args { - int sender; -}; - -/* - * This structure is allocated per_cpu for UV TLB shootdown statistics. - */ -struct ptc_stats { - /* sender statistics */ - unsigned long s_giveup; /* number of fall backs to - IPI-style flushes */ - unsigned long s_requestor; /* number of shootdown - requests */ - unsigned long s_stimeout; /* source side timeouts */ - unsigned long s_dtimeout; /* destination side timeouts */ - unsigned long s_strongnacks; /* number of strong nack's */ - unsigned long s_time; /* time spent in sending side */ - unsigned long s_retriesok; /* successful retries */ - unsigned long s_ntargcpu; /* total number of cpu's - targeted */ - unsigned long s_ntargself; /* times the sending cpu was - targeted */ - unsigned long s_ntarglocals; /* targets of cpus on the local - blade */ - unsigned long s_ntargremotes; /* targets of cpus on remote - blades */ - unsigned long s_ntarglocaluvhub; /* targets of the local hub */ - unsigned long s_ntargremoteuvhub; /* remotes hubs targeted */ - unsigned long s_ntarguvhub; /* total number of uvhubs - targeted */ - unsigned long s_ntarguvhub16; /* number of times target - hubs >= 16*/ - unsigned long s_ntarguvhub8; /* number of times target - hubs >= 8 */ - unsigned long s_ntarguvhub4; /* number of times target - hubs >= 4 */ - unsigned long s_ntarguvhub2; /* number of times target - hubs >= 2 */ - unsigned long s_ntarguvhub1; /* number of times target - hubs == 1 */ - unsigned long s_resets_plug; /* ipi-style resets from plug - state */ - unsigned long s_resets_timeout; /* ipi-style resets from - timeouts */ - unsigned long s_busy; /* status stayed busy past - s/w timer */ - unsigned long s_throttles; /* waits in throttle */ - unsigned long s_retry_messages; /* retry broadcasts */ - unsigned long s_bau_reenabled; /* for bau enable/disable */ - unsigned long s_bau_disabled; /* for bau enable/disable */ - unsigned long s_uv2_wars; /* uv2 workaround, perm. busy */ - unsigned long s_uv2_wars_hw; /* uv2 workaround, hiwater */ - unsigned long s_uv2_war_waits; /* uv2 workaround, long waits */ - unsigned long s_overipilimit; /* over the ipi reset limit */ - unsigned long s_giveuplimit; /* disables, over giveup limit*/ - unsigned long s_enters; /* entries to the driver */ - unsigned long s_ipifordisabled; /* fall back to IPI; disabled */ - unsigned long s_plugged; /* plugged by h/w bug*/ - unsigned long s_congested; /* giveup on long wait */ - /* destination statistics */ - unsigned long d_alltlb; /* times all tlb's on this - cpu were flushed */ - unsigned long d_onetlb; /* times just one tlb on this - cpu was flushed */ - unsigned long d_multmsg; /* interrupts with multiple - messages */ - unsigned long d_nomsg; /* interrupts with no message */ - unsigned long d_time; /* time spent on destination - side */ - unsigned long d_requestee; /* number of messages - processed */ - unsigned long d_retries; /* number of retry messages - processed */ - unsigned long d_canceled; /* number of messages canceled - by retries */ - unsigned long d_nocanceled; /* retries that found nothing - to cancel */ - unsigned long d_resets; /* number of ipi-style requests - processed */ - unsigned long d_rcanceled; /* number of messages canceled - by resets */ -}; - -struct tunables { - int *tunp; - int deflt; -}; - -struct hub_and_pnode { - short uvhub; - short pnode; -}; - -struct socket_desc { - short num_cpus; - short cpu_number[MAX_CPUS_PER_SOCKET]; -}; - -struct uvhub_desc { - unsigned short socket_mask; - short num_cpus; - short uvhub; - short pnode; - struct socket_desc socket[2]; -}; - -/** - * struct bau_control - * @status_mmr: location of status mmr, determined by uvhub_cpu - * @status_index: index of ERR|BUSY bits in status mmr, determined by uvhub_cpu - * - * Per-cpu control struct containing CPU topology information and BAU tuneables. - */ -struct bau_control { - struct bau_desc *descriptor_base; - struct bau_pq_entry *queue_first; - struct bau_pq_entry *queue_last; - struct bau_pq_entry *bau_msg_head; - struct bau_control *uvhub_master; - struct bau_control *socket_master; - struct ptc_stats *statp; - cpumask_t *cpumask; - unsigned long timeout_interval; - unsigned long set_bau_on_time; - atomic_t active_descriptor_count; - int plugged_tries; - int timeout_tries; - int ipi_attempts; - int conseccompletes; - u64 status_mmr; - int status_index; - bool nobau; - short baudisabled; - short cpu; - short osnode; - short uvhub_cpu; - short uvhub; - short uvhub_version; - short cpus_in_socket; - short cpus_in_uvhub; - short partition_base_pnode; - short busy; /* all were busy (war) */ - unsigned short message_number; - unsigned short uvhub_quiesce; - short socket_acknowledge_count[DEST_Q_SIZE]; - cycles_t send_message; - cycles_t period_end; - cycles_t period_time; - spinlock_t uvhub_lock; - spinlock_t queue_lock; - spinlock_t disable_lock; - /* tunables */ - int max_concurr; - int max_concurr_const; - int plugged_delay; - int plugsb4reset; - int timeoutsb4reset; - int ipi_reset_limit; - int complete_threshold; - int cong_response_us; - int cong_reps; - cycles_t disabled_period; - int period_giveups; - int giveup_limit; - long period_requests; - struct hub_and_pnode *thp; -}; - -/* Abstracted BAU functions */ -struct bau_operations { - unsigned long (*read_l_sw_ack)(void); - unsigned long (*read_g_sw_ack)(int pnode); - unsigned long (*bau_gpa_to_offset)(unsigned long vaddr); - void (*write_l_sw_ack)(unsigned long mmr); - void (*write_g_sw_ack)(int pnode, unsigned long mmr); - void (*write_payload_first)(int pnode, unsigned long mmr); - void (*write_payload_last)(int pnode, unsigned long mmr); - int (*wait_completion)(struct bau_desc*, - struct bau_control*, long try); -}; - -static inline void write_mmr_data_broadcast(int pnode, unsigned long mmr_image) -{ - write_gmmr(pnode, UVH_BAU_DATA_BROADCAST, mmr_image); -} - -static inline void write_mmr_descriptor_base(int pnode, unsigned long mmr_image) -{ - write_gmmr(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE, mmr_image); -} - -static inline void write_mmr_activation(unsigned long index) -{ - write_lmmr(UVH_LB_BAU_SB_ACTIVATION_CONTROL, index); -} - -static inline void write_gmmr_activation(int pnode, unsigned long mmr_image) -{ - write_gmmr(pnode, UVH_LB_BAU_SB_ACTIVATION_CONTROL, mmr_image); -} - -static inline void write_mmr_proc_payload_first(int pnode, unsigned long mmr_image) -{ - write_gmmr(pnode, UV4H_LB_PROC_INTD_QUEUE_FIRST, mmr_image); -} - -static inline void write_mmr_proc_payload_last(int pnode, unsigned long mmr_image) -{ - write_gmmr(pnode, UV4H_LB_PROC_INTD_QUEUE_LAST, mmr_image); -} - -static inline void write_mmr_payload_first(int pnode, unsigned long mmr_image) -{ - write_gmmr(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST, mmr_image); -} - -static inline void write_mmr_payload_tail(int pnode, unsigned long mmr_image) -{ - write_gmmr(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL, mmr_image); -} - -static inline void write_mmr_payload_last(int pnode, unsigned long mmr_image) -{ - write_gmmr(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST, mmr_image); -} - -static inline void write_mmr_misc_control(int pnode, unsigned long mmr_image) -{ - write_gmmr(pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image); -} - -static inline unsigned long read_mmr_misc_control(int pnode) -{ - return read_gmmr(pnode, UVH_LB_BAU_MISC_CONTROL); -} - -static inline void write_mmr_sw_ack(unsigned long mr) -{ - uv_write_local_mmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, mr); -} - -static inline void write_gmmr_sw_ack(int pnode, unsigned long mr) -{ - write_gmmr(pnode, UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, mr); -} - -static inline unsigned long read_mmr_sw_ack(void) -{ - return read_lmmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE); -} - -static inline unsigned long read_gmmr_sw_ack(int pnode) -{ - return read_gmmr(pnode, UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE); -} - -static inline void write_mmr_proc_sw_ack(unsigned long mr) -{ - uv_write_local_mmr(UV4H_LB_PROC_INTD_SOFT_ACK_CLEAR, mr); -} - -static inline void write_gmmr_proc_sw_ack(int pnode, unsigned long mr) -{ - write_gmmr(pnode, UV4H_LB_PROC_INTD_SOFT_ACK_CLEAR, mr); -} - -static inline unsigned long read_mmr_proc_sw_ack(void) -{ - return read_lmmr(UV4H_LB_PROC_INTD_SOFT_ACK_PENDING); -} - -static inline unsigned long read_gmmr_proc_sw_ack(int pnode) -{ - return read_gmmr(pnode, UV4H_LB_PROC_INTD_SOFT_ACK_PENDING); -} - -static inline void write_mmr_data_config(int pnode, unsigned long mr) -{ - uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, mr); -} - -static inline int bau_uvhub_isset(int uvhub, struct pnmask *dstp) -{ - return constant_test_bit(uvhub, &dstp->bits[0]); -} -static inline void bau_uvhub_set(int pnode, struct pnmask *dstp) -{ - __set_bit(pnode, &dstp->bits[0]); -} -static inline void bau_uvhubs_clear(struct pnmask *dstp, - int nbits) -{ - bitmap_zero(&dstp->bits[0], nbits); -} -static inline int bau_uvhub_weight(struct pnmask *dstp) -{ - return bitmap_weight((unsigned long *)&dstp->bits[0], - UV_DISTRIBUTION_SIZE); -} - -static inline void bau_cpubits_clear(struct bau_local_cpumask *dstp, int nbits) -{ - bitmap_zero(&dstp->bits, nbits); -} - -struct atomic_short { - short counter; -}; - -/* - * atomic_read_short - read a short atomic variable - * @v: pointer of type atomic_short - * - * Atomically reads the value of @v. - */ -static inline int atomic_read_short(const struct atomic_short *v) -{ - return v->counter; -} - -/* - * atom_asr - add and return a short int - * @i: short value to add - * @v: pointer of type atomic_short - * - * Atomically adds @i to @v and returns @i + @v - */ -static inline int atom_asr(short i, struct atomic_short *v) -{ - short __i = i; - asm volatile(LOCK_PREFIX "xaddw %0, %1" - : "+r" (i), "+m" (v->counter) - : : "memory"); - return i + __i; -} - -/* - * conditionally add 1 to *v, unless *v is >= u - * return 0 if we cannot add 1 to *v because it is >= u - * return 1 if we can add 1 to *v because it is < u - * the add is atomic - * - * This is close to atomic_add_unless(), but this allows the 'u' value - * to be lowered below the current 'v'. atomic_add_unless can only stop - * on equal. - */ -static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u) -{ - spin_lock(lock); - if (atomic_read(v) >= u) { - spin_unlock(lock); - return 0; - } - atomic_inc(v); - spin_unlock(lock); - return 1; -} - -void uv_bau_message_interrupt(struct pt_regs *regs); - -#endif /* _ASM_X86_UV_UV_BAU_H */ diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h index 100d66806503..5002f52be332 100644 --- a/arch/x86/include/asm/uv/uv_hub.h +++ b/arch/x86/include/asm/uv/uv_hub.h @@ -5,6 +5,7 @@ * * SGI UV architectural definitions * + * (C) Copyright 2020 Hewlett Packard Enterprise Development LP * Copyright (C) 2007-2014 Silicon Graphics, Inc. All rights reserved. */ @@ -129,17 +130,6 @@ */ #define UV_MAX_NASID_VALUE (UV_MAX_NUMALINK_BLADES * 2) -/* System Controller Interface Reg info */ -struct uv_scir_s { - struct timer_list timer; - unsigned long offset; - unsigned long last; - unsigned long idle_on; - unsigned long idle_off; - unsigned char state; - unsigned char enabled; -}; - /* GAM (globally addressed memory) range table */ struct uv_gam_range_s { u32 limit; /* PA bits 56:26 (GAM_RANGE_SHFT) */ @@ -155,6 +145,8 @@ struct uv_gam_range_s { * available in the L3 cache on the cpu socket for the node. */ struct uv_hub_info_s { + unsigned int hub_type; + unsigned char hub_revision; unsigned long global_mmr_base; unsigned long global_mmr_shift; unsigned long gpa_mask; @@ -167,9 +159,9 @@ struct uv_hub_info_s { unsigned char m_val; unsigned char n_val; unsigned char gr_table_len; - unsigned char hub_revision; unsigned char apic_pnode_shift; unsigned char gpa_shift; + unsigned char nasid_shift; unsigned char m_shift; unsigned char n_lshift; unsigned int gnode_extra; @@ -191,16 +183,13 @@ struct uv_hub_info_s { struct uv_cpu_info_s { void *p_uv_hub_info; unsigned char blade_cpu_id; - struct uv_scir_s scir; + void *reserved; }; DECLARE_PER_CPU(struct uv_cpu_info_s, __uv_cpu_info); #define uv_cpu_info this_cpu_ptr(&__uv_cpu_info) #define uv_cpu_info_per(cpu) (&per_cpu(__uv_cpu_info, cpu)) -#define uv_scir_info (&uv_cpu_info->scir) -#define uv_cpu_scir_info(cpu) (&uv_cpu_info_per(cpu)->scir) - /* Node specific hub common info struct */ extern void **__uv_hub_info_list; static inline struct uv_hub_info_s *uv_hub_info_list(int node) @@ -219,6 +208,17 @@ static inline struct uv_hub_info_s *uv_cpu_hub_info(int cpu) return (struct uv_hub_info_s *)uv_cpu_info_per(cpu)->p_uv_hub_info; } +static inline int uv_hub_type(void) +{ + return uv_hub_info->hub_type; +} + +static inline __init void uv_hub_type_set(int uvmask) +{ + uv_hub_info->hub_type = uvmask; +} + + /* * HUB revision ranges for each UV HUB architecture. * This is a software convention - NOT the hardware revision numbers in @@ -228,39 +228,31 @@ static inline struct uv_hub_info_s *uv_cpu_hub_info(int cpu) #define UV3_HUB_REVISION_BASE 5 #define UV4_HUB_REVISION_BASE 7 #define UV4A_HUB_REVISION_BASE 8 /* UV4 (fixed) rev 2 */ +#define UV5_HUB_REVISION_BASE 9 -static inline int is_uv2_hub(void) -{ - return is_uv_hubbed(uv(2)); -} - -static inline int is_uv3_hub(void) -{ - return is_uv_hubbed(uv(3)); -} +static inline int is_uv(int uvmask) { return uv_hub_type() & uvmask; } +static inline int is_uv1_hub(void) { return 0; } +static inline int is_uv2_hub(void) { return is_uv(UV2); } +static inline int is_uv3_hub(void) { return is_uv(UV3); } +static inline int is_uv4a_hub(void) { return is_uv(UV4A); } +static inline int is_uv4_hub(void) { return is_uv(UV4); } +static inline int is_uv5_hub(void) { return is_uv(UV5); } -/* First test "is UV4A", then "is UV4" */ -static inline int is_uv4a_hub(void) -{ - if (is_uv_hubbed(uv(4))) - return (uv_hub_info->hub_revision == UV4A_HUB_REVISION_BASE); - return 0; -} +/* + * UV4A is a revision of UV4. So on UV4A, both is_uv4_hub() and + * is_uv4a_hub() return true, While on UV4, only is_uv4_hub() + * returns true. So to get true results, first test if is UV4A, + * then test if is UV4. + */ -static inline int is_uv4_hub(void) -{ - return is_uv_hubbed(uv(4)); -} +/* UVX class: UV2,3,4 */ +static inline int is_uvx_hub(void) { return is_uv(UVX); } -static inline int is_uvx_hub(void) -{ - return (is_uv_hubbed(-2) >= uv(2)); -} +/* UVY class: UV5,..? */ +static inline int is_uvy_hub(void) { return is_uv(UVY); } -static inline int is_uv_hub(void) -{ - return is_uvx_hub(); -} +/* Any UV Hubbed System */ +static inline int is_uv_hub(void) { return is_uv(UV_ANY); } union uvh_apicid { unsigned long v; @@ -282,9 +274,11 @@ union uvh_apicid { * g - GNODE (full 15-bit global nasid, right shifted 1) * p - PNODE (local part of nsids, right shifted 1) */ -#define UV_NASID_TO_PNODE(n) (((n) >> 1) & uv_hub_info->pnode_mask) +#define UV_NASID_TO_PNODE(n) \ + (((n) >> uv_hub_info->nasid_shift) & uv_hub_info->pnode_mask) #define UV_PNODE_TO_GNODE(p) ((p) |uv_hub_info->gnode_extra) -#define UV_PNODE_TO_NASID(p) (UV_PNODE_TO_GNODE(p) << 1) +#define UV_PNODE_TO_NASID(p) \ + (UV_PNODE_TO_GNODE(p) << uv_hub_info->nasid_shift) #define UV2_LOCAL_MMR_BASE 0xfa000000UL #define UV2_GLOBAL_MMR32_BASE 0xfc000000UL @@ -297,29 +291,42 @@ union uvh_apicid { #define UV3_GLOBAL_MMR32_SIZE (32UL * 1024 * 1024) #define UV4_LOCAL_MMR_BASE 0xfa000000UL -#define UV4_GLOBAL_MMR32_BASE 0xfc000000UL +#define UV4_GLOBAL_MMR32_BASE 0 #define UV4_LOCAL_MMR_SIZE (32UL * 1024 * 1024) -#define UV4_GLOBAL_MMR32_SIZE (16UL * 1024 * 1024) +#define UV4_GLOBAL_MMR32_SIZE 0 + +#define UV5_LOCAL_MMR_BASE 0xfa000000UL +#define UV5_GLOBAL_MMR32_BASE 0 +#define UV5_LOCAL_MMR_SIZE (32UL * 1024 * 1024) +#define UV5_GLOBAL_MMR32_SIZE 0 #define UV_LOCAL_MMR_BASE ( \ - is_uv2_hub() ? UV2_LOCAL_MMR_BASE : \ - is_uv3_hub() ? UV3_LOCAL_MMR_BASE : \ - /*is_uv4_hub*/ UV4_LOCAL_MMR_BASE) + is_uv(UV2) ? UV2_LOCAL_MMR_BASE : \ + is_uv(UV3) ? UV3_LOCAL_MMR_BASE : \ + is_uv(UV4) ? UV4_LOCAL_MMR_BASE : \ + is_uv(UV5) ? UV5_LOCAL_MMR_BASE : \ + 0) #define UV_GLOBAL_MMR32_BASE ( \ - is_uv2_hub() ? UV2_GLOBAL_MMR32_BASE : \ - is_uv3_hub() ? UV3_GLOBAL_MMR32_BASE : \ - /*is_uv4_hub*/ UV4_GLOBAL_MMR32_BASE) + is_uv(UV2) ? UV2_GLOBAL_MMR32_BASE : \ + is_uv(UV3) ? UV3_GLOBAL_MMR32_BASE : \ + is_uv(UV4) ? UV4_GLOBAL_MMR32_BASE : \ + is_uv(UV5) ? UV5_GLOBAL_MMR32_BASE : \ + 0) #define UV_LOCAL_MMR_SIZE ( \ - is_uv2_hub() ? UV2_LOCAL_MMR_SIZE : \ - is_uv3_hub() ? UV3_LOCAL_MMR_SIZE : \ - /*is_uv4_hub*/ UV4_LOCAL_MMR_SIZE) + is_uv(UV2) ? UV2_LOCAL_MMR_SIZE : \ + is_uv(UV3) ? UV3_LOCAL_MMR_SIZE : \ + is_uv(UV4) ? UV4_LOCAL_MMR_SIZE : \ + is_uv(UV5) ? UV5_LOCAL_MMR_SIZE : \ + 0) #define UV_GLOBAL_MMR32_SIZE ( \ - is_uv2_hub() ? UV2_GLOBAL_MMR32_SIZE : \ - is_uv3_hub() ? UV3_GLOBAL_MMR32_SIZE : \ - /*is_uv4_hub*/ UV4_GLOBAL_MMR32_SIZE) + is_uv(UV2) ? UV2_GLOBAL_MMR32_SIZE : \ + is_uv(UV3) ? UV3_GLOBAL_MMR32_SIZE : \ + is_uv(UV4) ? UV4_GLOBAL_MMR32_SIZE : \ + is_uv(UV5) ? UV5_GLOBAL_MMR32_SIZE : \ + 0) #define UV_GLOBAL_MMR64_BASE (uv_hub_info->global_mmr_base) @@ -720,7 +727,7 @@ extern void uv_nmi_setup_hubless(void); #define UVH_TSC_SYNC_SHIFT_UV2K 16 /* UV2/3k have different bits */ #define UVH_TSC_SYNC_MASK 3 /* 0011 */ #define UVH_TSC_SYNC_VALID 3 /* 0011 */ -#define UVH_TSC_SYNC_INVALID 2 /* 0010 */ +#define UVH_TSC_SYNC_UNKNOWN 0 /* 0000 */ /* BMC sets a bit this MMR non-zero before sending an NMI */ #define UVH_NMI_MMR UVH_BIOS_KERNEL_MMR @@ -728,19 +735,6 @@ extern void uv_nmi_setup_hubless(void); #define UVH_NMI_MMR_SHIFT 63 #define UVH_NMI_MMR_TYPE "SCRATCH5" -/* Newer SMM NMI handler, not present in all systems */ -#define UVH_NMI_MMRX UVH_EVENT_OCCURRED0 -#define UVH_NMI_MMRX_CLEAR UVH_EVENT_OCCURRED0_ALIAS -#define UVH_NMI_MMRX_SHIFT UVH_EVENT_OCCURRED0_EXTIO_INT0_SHFT -#define UVH_NMI_MMRX_TYPE "EXTIO_INT0" - -/* Non-zero indicates newer SMM NMI handler present */ -#define UVH_NMI_MMRX_SUPPORTED UVH_EXTIO_INT0_BROADCAST - -/* Indicates to BIOS that we want to use the newer SMM NMI handler */ -#define UVH_NMI_MMRX_REQ UVH_BIOS_KERNEL_MMR_ALIAS_2 -#define UVH_NMI_MMRX_REQ_SHIFT 62 - struct uv_hub_nmi_s { raw_spinlock_t nmi_lock; atomic_t in_nmi; /* flag this node in UV NMI IRQ */ @@ -772,29 +766,6 @@ DECLARE_PER_CPU(struct uv_cpu_nmi_s, uv_cpu_nmi); #define UV_NMI_STATE_DUMP 2 #define UV_NMI_STATE_DUMP_DONE 3 -/* Update SCIR state */ -static inline void uv_set_scir_bits(unsigned char value) -{ - if (uv_scir_info->state != value) { - uv_scir_info->state = value; - uv_write_local_mmr8(uv_scir_info->offset, value); - } -} - -static inline unsigned long uv_scir_offset(int apicid) -{ - return SCIR_LOCAL_MMR_BASE | (apicid & 0x3f); -} - -static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value) -{ - if (uv_cpu_scir_info(cpu)->state != value) { - uv_write_global_mmr8(uv_cpu_to_pnode(cpu), - uv_cpu_scir_info(cpu)->offset, value); - uv_cpu_scir_info(cpu)->state = value; - } -} - /* * Get the minimum revision number of the hub chips within the partition. * (See UVx_HUB_REVISION_BASE above for specific values.) diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h index 775bf143a072..57fa67373262 100644 --- a/arch/x86/include/asm/uv/uv_mmrs.h +++ b/arch/x86/include/asm/uv/uv_mmrs.h @@ -3,8 +3,9 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * SGI UV MMR definitions + * HPE UV MMR definitions * + * (C) Copyright 2020 Hewlett Packard Enterprise Development LP * Copyright (C) 2007-2016 Silicon Graphics, Inc. All rights reserved. */ @@ -18,42 +19,43 @@ * grouped by architecture types. * * UVH - definitions common to all UV hub types. - * UVXH - definitions common to all UV eXtended hub types (currently 2, 3, 4). - * UV2H - definitions specific to UV type 2 hub. - * UV3H - definitions specific to UV type 3 hub. + * UVXH - definitions common to UVX class (2, 3, 4). + * UVYH - definitions common to UVY class (5). + * UV5H - definitions specific to UV type 5 hub. + * UV4AH - definitions specific to UV type 4A hub. * UV4H - definitions specific to UV type 4 hub. - * - * So in general, MMR addresses and structures are identical on all hubs types. - * These MMRs are identified as: - * #define UVH_xxx <address> - * union uvh_xxx { - * unsigned long v; - * struct uvh_int_cmpd_s { - * } s; - * }; + * UV3H - definitions specific to UV type 3 hub. + * UV2H - definitions specific to UV type 2 hub. * * If the MMR exists on all hub types but have different addresses, - * use a conditional operator to define the value at runtime. - * #define UV2Hxxx b - * #define UV3Hxxx c - * #define UV4Hxxx d - * #define UV4AHxxx e - * #define UVHxxx (is_uv2_hub() ? UV2Hxxx : - * (is_uv3_hub() ? UV3Hxxx : - * (is_uv4a_hub() ? UV4AHxxx : - * UV4Hxxx)) + * use a conditional operator to define the value at runtime. Any + * that are not defined are blank. + * (UV4A variations only generated if different from uv4) + * #define UVHxxx ( + * is_uv(UV5) ? UV5Hxxx value : + * is_uv(UV4A) ? UV4AHxxx value : + * is_uv(UV4) ? UV4Hxxx value : + * is_uv(UV3) ? UV3Hxxx value : + * is_uv(UV2) ? UV2Hxxx value : + * <ucv> or <undef value>) + * + * Class UVX has UVs (2|3|4|4A). + * Class UVY has UVs (5). * * union uvh_xxx { * unsigned long v; * struct uvh_xxx_s { # Common fields only * } s; - * struct uv2h_xxx_s { # Full UV2 definition (*) - * } s2; - * struct uv3h_xxx_s { # Full UV3 definition (*) - * } s3; - * (NOTE: No struct uv4ah_xxx_s members exist) + * struct uv5h_xxx_s { # Full UV5 definition (*) + * } s5; + * struct uv4ah_xxx_s { # Full UV4A definition (*) + * } s4a; * struct uv4h_xxx_s { # Full UV4 definition (*) * } s4; + * struct uv3h_xxx_s { # Full UV3 definition (*) + * } s3; + * struct uv2h_xxx_s { # Full UV2 definition (*) + * } s2; * }; * (* - if present and different than the common struct) * @@ -62,429 +64,499 @@ * if the contents is the same for all hubs, only the "s" structure is * generated. * - * If the MMR exists on ONLY 1 type of hub, no generic definition is - * generated: - * #define UVnH_xxx <uvn address> - * union uvnh_xxx { - * unsigned long v; - * struct uvh_int_cmpd_s { - * } sn; - * }; - * - * (GEN Flags: mflags_opt= undefs=function UV234=UVXH) + * (GEN Flags: undefs=function) */ + /* UV bit masks */ +#define UV2 (1 << 0) +#define UV3 (1 << 1) +#define UV4 (1 << 2) +#define UV4A (1 << 3) +#define UV5 (1 << 4) +#define UVX (UV2|UV3|UV4) +#define UVY (UV5) +#define UV_ANY (~0) + + + + #define UV_MMR_ENABLE (1UL << 63) +#define UV1_HUB_PART_NUMBER 0x88a5 #define UV2_HUB_PART_NUMBER 0x8eb8 #define UV2_HUB_PART_NUMBER_X 0x1111 #define UV3_HUB_PART_NUMBER 0x9578 #define UV3_HUB_PART_NUMBER_X 0x4321 #define UV4_HUB_PART_NUMBER 0x99a1 +#define UV5_HUB_PART_NUMBER 0xa171 /* Error function to catch undefined references */ extern unsigned long uv_undefined(char *str); /* ========================================================================= */ -/* UVH_BAU_DATA_BROADCAST */ -/* ========================================================================= */ -#define UVH_BAU_DATA_BROADCAST 0x61688UL - -#define UV2H_BAU_DATA_BROADCAST_32 0x440 -#define UV3H_BAU_DATA_BROADCAST_32 0x440 -#define UV4H_BAU_DATA_BROADCAST_32 0x360 -#define UVH_BAU_DATA_BROADCAST_32 ( \ - is_uv2_hub() ? UV2H_BAU_DATA_BROADCAST_32 : \ - is_uv3_hub() ? UV3H_BAU_DATA_BROADCAST_32 : \ - /*is_uv4_hub*/ UV4H_BAU_DATA_BROADCAST_32) - -#define UVH_BAU_DATA_BROADCAST_ENABLE_SHFT 0 -#define UVH_BAU_DATA_BROADCAST_ENABLE_MASK 0x0000000000000001UL - - -union uvh_bau_data_broadcast_u { - unsigned long v; - struct uvh_bau_data_broadcast_s { - unsigned long enable:1; /* RW */ - unsigned long rsvd_1_63:63; - } s; -}; - -/* ========================================================================= */ -/* UVH_BAU_DATA_CONFIG */ -/* ========================================================================= */ -#define UVH_BAU_DATA_CONFIG 0x61680UL - -#define UV2H_BAU_DATA_CONFIG_32 0x438 -#define UV3H_BAU_DATA_CONFIG_32 0x438 -#define UV4H_BAU_DATA_CONFIG_32 0x358 -#define UVH_BAU_DATA_CONFIG_32 ( \ - is_uv2_hub() ? UV2H_BAU_DATA_CONFIG_32 : \ - is_uv3_hub() ? UV3H_BAU_DATA_CONFIG_32 : \ - /*is_uv4_hub*/ UV4H_BAU_DATA_CONFIG_32) - -#define UVH_BAU_DATA_CONFIG_VECTOR_SHFT 0 -#define UVH_BAU_DATA_CONFIG_DM_SHFT 8 -#define UVH_BAU_DATA_CONFIG_DESTMODE_SHFT 11 -#define UVH_BAU_DATA_CONFIG_STATUS_SHFT 12 -#define UVH_BAU_DATA_CONFIG_P_SHFT 13 -#define UVH_BAU_DATA_CONFIG_T_SHFT 15 -#define UVH_BAU_DATA_CONFIG_M_SHFT 16 -#define UVH_BAU_DATA_CONFIG_APIC_ID_SHFT 32 -#define UVH_BAU_DATA_CONFIG_VECTOR_MASK 0x00000000000000ffUL -#define UVH_BAU_DATA_CONFIG_DM_MASK 0x0000000000000700UL -#define UVH_BAU_DATA_CONFIG_DESTMODE_MASK 0x0000000000000800UL -#define UVH_BAU_DATA_CONFIG_STATUS_MASK 0x0000000000001000UL -#define UVH_BAU_DATA_CONFIG_P_MASK 0x0000000000002000UL -#define UVH_BAU_DATA_CONFIG_T_MASK 0x0000000000008000UL -#define UVH_BAU_DATA_CONFIG_M_MASK 0x0000000000010000UL -#define UVH_BAU_DATA_CONFIG_APIC_ID_MASK 0xffffffff00000000UL - - -union uvh_bau_data_config_u { - unsigned long v; - struct uvh_bau_data_config_s { - unsigned long vector_:8; /* RW */ - unsigned long dm:3; /* RW */ - unsigned long destmode:1; /* RW */ - unsigned long status:1; /* RO */ - unsigned long p:1; /* RO */ - unsigned long rsvd_14:1; - unsigned long t:1; /* RO */ - unsigned long m:1; /* RW */ - unsigned long rsvd_17_31:15; - unsigned long apic_id:32; /* RW */ - } s; -}; - -/* ========================================================================= */ /* UVH_EVENT_OCCURRED0 */ /* ========================================================================= */ #define UVH_EVENT_OCCURRED0 0x70000UL -#define UVH_EVENT_OCCURRED0_32 0x5e8 +/* UVH common defines*/ #define UVH_EVENT_OCCURRED0_LB_HCERR_SHFT 0 -#define UVH_EVENT_OCCURRED0_RH_AOERR0_SHFT 11 #define UVH_EVENT_OCCURRED0_LB_HCERR_MASK 0x0000000000000001UL -#define UVH_EVENT_OCCURRED0_RH_AOERR0_MASK 0x0000000000000800UL +/* UVXH common defines */ #define UVXH_EVENT_OCCURRED0_RH_HCERR_SHFT 2 -#define UVXH_EVENT_OCCURRED0_LH0_HCERR_SHFT 3 -#define UVXH_EVENT_OCCURRED0_LH1_HCERR_SHFT 4 -#define UVXH_EVENT_OCCURRED0_GR0_HCERR_SHFT 5 -#define UVXH_EVENT_OCCURRED0_GR1_HCERR_SHFT 6 -#define UVXH_EVENT_OCCURRED0_NI0_HCERR_SHFT 7 -#define UVXH_EVENT_OCCURRED0_NI1_HCERR_SHFT 8 -#define UVXH_EVENT_OCCURRED0_LB_AOERR0_SHFT 9 -#define UVXH_EVENT_OCCURRED0_LH0_AOERR0_SHFT 12 -#define UVXH_EVENT_OCCURRED0_LH1_AOERR0_SHFT 13 -#define UVXH_EVENT_OCCURRED0_GR0_AOERR0_SHFT 14 -#define UVXH_EVENT_OCCURRED0_GR1_AOERR0_SHFT 15 -#define UVXH_EVENT_OCCURRED0_XB_AOERR0_SHFT 16 #define UVXH_EVENT_OCCURRED0_RH_HCERR_MASK 0x0000000000000004UL +#define UVXH_EVENT_OCCURRED0_LH0_HCERR_SHFT 3 #define UVXH_EVENT_OCCURRED0_LH0_HCERR_MASK 0x0000000000000008UL +#define UVXH_EVENT_OCCURRED0_LH1_HCERR_SHFT 4 #define UVXH_EVENT_OCCURRED0_LH1_HCERR_MASK 0x0000000000000010UL +#define UVXH_EVENT_OCCURRED0_GR0_HCERR_SHFT 5 #define UVXH_EVENT_OCCURRED0_GR0_HCERR_MASK 0x0000000000000020UL +#define UVXH_EVENT_OCCURRED0_GR1_HCERR_SHFT 6 #define UVXH_EVENT_OCCURRED0_GR1_HCERR_MASK 0x0000000000000040UL +#define UVXH_EVENT_OCCURRED0_NI0_HCERR_SHFT 7 #define UVXH_EVENT_OCCURRED0_NI0_HCERR_MASK 0x0000000000000080UL +#define UVXH_EVENT_OCCURRED0_NI1_HCERR_SHFT 8 #define UVXH_EVENT_OCCURRED0_NI1_HCERR_MASK 0x0000000000000100UL +#define UVXH_EVENT_OCCURRED0_LB_AOERR0_SHFT 9 #define UVXH_EVENT_OCCURRED0_LB_AOERR0_MASK 0x0000000000000200UL +#define UVXH_EVENT_OCCURRED0_RH_AOERR0_SHFT 11 +#define UVXH_EVENT_OCCURRED0_RH_AOERR0_MASK 0x0000000000000800UL +#define UVXH_EVENT_OCCURRED0_LH0_AOERR0_SHFT 12 #define UVXH_EVENT_OCCURRED0_LH0_AOERR0_MASK 0x0000000000001000UL +#define UVXH_EVENT_OCCURRED0_LH1_AOERR0_SHFT 13 #define UVXH_EVENT_OCCURRED0_LH1_AOERR0_MASK 0x0000000000002000UL +#define UVXH_EVENT_OCCURRED0_GR0_AOERR0_SHFT 14 #define UVXH_EVENT_OCCURRED0_GR0_AOERR0_MASK 0x0000000000004000UL +#define UVXH_EVENT_OCCURRED0_GR1_AOERR0_SHFT 15 #define UVXH_EVENT_OCCURRED0_GR1_AOERR0_MASK 0x0000000000008000UL +#define UVXH_EVENT_OCCURRED0_XB_AOERR0_SHFT 16 #define UVXH_EVENT_OCCURRED0_XB_AOERR0_MASK 0x0000000000010000UL -#define UV2H_EVENT_OCCURRED0_QP_HCERR_SHFT 1 -#define UV2H_EVENT_OCCURRED0_QP_AOERR0_SHFT 10 -#define UV2H_EVENT_OCCURRED0_RT_AOERR0_SHFT 17 -#define UV2H_EVENT_OCCURRED0_NI0_AOERR0_SHFT 18 -#define UV2H_EVENT_OCCURRED0_NI1_AOERR0_SHFT 19 -#define UV2H_EVENT_OCCURRED0_LB_AOERR1_SHFT 20 -#define UV2H_EVENT_OCCURRED0_QP_AOERR1_SHFT 21 -#define UV2H_EVENT_OCCURRED0_RH_AOERR1_SHFT 22 -#define UV2H_EVENT_OCCURRED0_LH0_AOERR1_SHFT 23 -#define UV2H_EVENT_OCCURRED0_LH1_AOERR1_SHFT 24 -#define UV2H_EVENT_OCCURRED0_GR0_AOERR1_SHFT 25 -#define UV2H_EVENT_OCCURRED0_GR1_AOERR1_SHFT 26 -#define UV2H_EVENT_OCCURRED0_XB_AOERR1_SHFT 27 -#define UV2H_EVENT_OCCURRED0_RT_AOERR1_SHFT 28 -#define UV2H_EVENT_OCCURRED0_NI0_AOERR1_SHFT 29 -#define UV2H_EVENT_OCCURRED0_NI1_AOERR1_SHFT 30 -#define UV2H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 31 -#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 32 -#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 33 -#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 34 -#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 35 -#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 36 -#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 37 -#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 38 -#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 39 -#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 40 -#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 41 -#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 42 -#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 43 -#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 44 -#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 45 -#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 46 -#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 47 -#define UV2H_EVENT_OCCURRED0_L1_NMI_INT_SHFT 48 -#define UV2H_EVENT_OCCURRED0_STOP_CLOCK_SHFT 49 -#define UV2H_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 50 -#define UV2H_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 51 -#define UV2H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 52 -#define UV2H_EVENT_OCCURRED0_IPI_INT_SHFT 53 -#define UV2H_EVENT_OCCURRED0_EXTIO_INT0_SHFT 54 -#define UV2H_EVENT_OCCURRED0_EXTIO_INT1_SHFT 55 -#define UV2H_EVENT_OCCURRED0_EXTIO_INT2_SHFT 56 -#define UV2H_EVENT_OCCURRED0_EXTIO_INT3_SHFT 57 -#define UV2H_EVENT_OCCURRED0_PROFILE_INT_SHFT 58 -#define UV2H_EVENT_OCCURRED0_QP_HCERR_MASK 0x0000000000000002UL -#define UV2H_EVENT_OCCURRED0_QP_AOERR0_MASK 0x0000000000000400UL -#define UV2H_EVENT_OCCURRED0_RT_AOERR0_MASK 0x0000000000020000UL -#define UV2H_EVENT_OCCURRED0_NI0_AOERR0_MASK 0x0000000000040000UL -#define UV2H_EVENT_OCCURRED0_NI1_AOERR0_MASK 0x0000000000080000UL -#define UV2H_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000100000UL -#define UV2H_EVENT_OCCURRED0_QP_AOERR1_MASK 0x0000000000200000UL -#define UV2H_EVENT_OCCURRED0_RH_AOERR1_MASK 0x0000000000400000UL -#define UV2H_EVENT_OCCURRED0_LH0_AOERR1_MASK 0x0000000000800000UL -#define UV2H_EVENT_OCCURRED0_LH1_AOERR1_MASK 0x0000000001000000UL -#define UV2H_EVENT_OCCURRED0_GR0_AOERR1_MASK 0x0000000002000000UL -#define UV2H_EVENT_OCCURRED0_GR1_AOERR1_MASK 0x0000000004000000UL -#define UV2H_EVENT_OCCURRED0_XB_AOERR1_MASK 0x0000000008000000UL -#define UV2H_EVENT_OCCURRED0_RT_AOERR1_MASK 0x0000000010000000UL -#define UV2H_EVENT_OCCURRED0_NI0_AOERR1_MASK 0x0000000020000000UL -#define UV2H_EVENT_OCCURRED0_NI1_AOERR1_MASK 0x0000000040000000UL -#define UV2H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000000080000000UL -#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000000100000000UL -#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000000200000000UL -#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000000400000000UL -#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000000800000000UL -#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000001000000000UL -#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000002000000000UL -#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000004000000000UL -#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0000008000000000UL -#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0000010000000000UL -#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0000020000000000UL -#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0000040000000000UL -#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0000080000000000UL -#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0000100000000000UL -#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0000200000000000UL -#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0000400000000000UL -#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0000800000000000UL -#define UV2H_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0001000000000000UL -#define UV2H_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0002000000000000UL -#define UV2H_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0004000000000000UL -#define UV2H_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x0008000000000000UL -#define UV2H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x0010000000000000UL -#define UV2H_EVENT_OCCURRED0_IPI_INT_MASK 0x0020000000000000UL -#define UV2H_EVENT_OCCURRED0_EXTIO_INT0_MASK 0x0040000000000000UL -#define UV2H_EVENT_OCCURRED0_EXTIO_INT1_MASK 0x0080000000000000UL -#define UV2H_EVENT_OCCURRED0_EXTIO_INT2_MASK 0x0100000000000000UL -#define UV2H_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x0200000000000000UL -#define UV2H_EVENT_OCCURRED0_PROFILE_INT_MASK 0x0400000000000000UL - -#define UV3H_EVENT_OCCURRED0_QP_HCERR_SHFT 1 -#define UV3H_EVENT_OCCURRED0_QP_AOERR0_SHFT 10 -#define UV3H_EVENT_OCCURRED0_RT_AOERR0_SHFT 17 -#define UV3H_EVENT_OCCURRED0_NI0_AOERR0_SHFT 18 -#define UV3H_EVENT_OCCURRED0_NI1_AOERR0_SHFT 19 -#define UV3H_EVENT_OCCURRED0_LB_AOERR1_SHFT 20 -#define UV3H_EVENT_OCCURRED0_QP_AOERR1_SHFT 21 -#define UV3H_EVENT_OCCURRED0_RH_AOERR1_SHFT 22 -#define UV3H_EVENT_OCCURRED0_LH0_AOERR1_SHFT 23 -#define UV3H_EVENT_OCCURRED0_LH1_AOERR1_SHFT 24 -#define UV3H_EVENT_OCCURRED0_GR0_AOERR1_SHFT 25 -#define UV3H_EVENT_OCCURRED0_GR1_AOERR1_SHFT 26 -#define UV3H_EVENT_OCCURRED0_XB_AOERR1_SHFT 27 -#define UV3H_EVENT_OCCURRED0_RT_AOERR1_SHFT 28 -#define UV3H_EVENT_OCCURRED0_NI0_AOERR1_SHFT 29 -#define UV3H_EVENT_OCCURRED0_NI1_AOERR1_SHFT 30 -#define UV3H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 31 -#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 32 -#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 33 -#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 34 -#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 35 -#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 36 -#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 37 -#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 38 -#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 39 -#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 40 -#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 41 -#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 42 -#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 43 -#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 44 -#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 45 -#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 46 -#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 47 -#define UV3H_EVENT_OCCURRED0_L1_NMI_INT_SHFT 48 -#define UV3H_EVENT_OCCURRED0_STOP_CLOCK_SHFT 49 -#define UV3H_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 50 -#define UV3H_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 51 -#define UV3H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 52 -#define UV3H_EVENT_OCCURRED0_IPI_INT_SHFT 53 -#define UV3H_EVENT_OCCURRED0_EXTIO_INT0_SHFT 54 -#define UV3H_EVENT_OCCURRED0_EXTIO_INT1_SHFT 55 -#define UV3H_EVENT_OCCURRED0_EXTIO_INT2_SHFT 56 -#define UV3H_EVENT_OCCURRED0_EXTIO_INT3_SHFT 57 -#define UV3H_EVENT_OCCURRED0_PROFILE_INT_SHFT 58 -#define UV3H_EVENT_OCCURRED0_QP_HCERR_MASK 0x0000000000000002UL -#define UV3H_EVENT_OCCURRED0_QP_AOERR0_MASK 0x0000000000000400UL -#define UV3H_EVENT_OCCURRED0_RT_AOERR0_MASK 0x0000000000020000UL -#define UV3H_EVENT_OCCURRED0_NI0_AOERR0_MASK 0x0000000000040000UL -#define UV3H_EVENT_OCCURRED0_NI1_AOERR0_MASK 0x0000000000080000UL -#define UV3H_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000100000UL -#define UV3H_EVENT_OCCURRED0_QP_AOERR1_MASK 0x0000000000200000UL -#define UV3H_EVENT_OCCURRED0_RH_AOERR1_MASK 0x0000000000400000UL -#define UV3H_EVENT_OCCURRED0_LH0_AOERR1_MASK 0x0000000000800000UL -#define UV3H_EVENT_OCCURRED0_LH1_AOERR1_MASK 0x0000000001000000UL -#define UV3H_EVENT_OCCURRED0_GR0_AOERR1_MASK 0x0000000002000000UL -#define UV3H_EVENT_OCCURRED0_GR1_AOERR1_MASK 0x0000000004000000UL -#define UV3H_EVENT_OCCURRED0_XB_AOERR1_MASK 0x0000000008000000UL -#define UV3H_EVENT_OCCURRED0_RT_AOERR1_MASK 0x0000000010000000UL -#define UV3H_EVENT_OCCURRED0_NI0_AOERR1_MASK 0x0000000020000000UL -#define UV3H_EVENT_OCCURRED0_NI1_AOERR1_MASK 0x0000000040000000UL -#define UV3H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000000080000000UL -#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000000100000000UL -#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000000200000000UL -#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000000400000000UL -#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000000800000000UL -#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000001000000000UL -#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000002000000000UL -#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000004000000000UL -#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0000008000000000UL -#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0000010000000000UL -#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0000020000000000UL -#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0000040000000000UL -#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0000080000000000UL -#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0000100000000000UL -#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0000200000000000UL -#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0000400000000000UL -#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0000800000000000UL -#define UV3H_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0001000000000000UL -#define UV3H_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0002000000000000UL -#define UV3H_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0004000000000000UL -#define UV3H_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x0008000000000000UL -#define UV3H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x0010000000000000UL -#define UV3H_EVENT_OCCURRED0_IPI_INT_MASK 0x0020000000000000UL -#define UV3H_EVENT_OCCURRED0_EXTIO_INT0_MASK 0x0040000000000000UL -#define UV3H_EVENT_OCCURRED0_EXTIO_INT1_MASK 0x0080000000000000UL -#define UV3H_EVENT_OCCURRED0_EXTIO_INT2_MASK 0x0100000000000000UL -#define UV3H_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x0200000000000000UL -#define UV3H_EVENT_OCCURRED0_PROFILE_INT_MASK 0x0400000000000000UL - +/* UVYH common defines */ +#define UVYH_EVENT_OCCURRED0_KT_HCERR_SHFT 1 +#define UVYH_EVENT_OCCURRED0_KT_HCERR_MASK 0x0000000000000002UL +#define UVYH_EVENT_OCCURRED0_RH0_HCERR_SHFT 2 +#define UVYH_EVENT_OCCURRED0_RH0_HCERR_MASK 0x0000000000000004UL +#define UVYH_EVENT_OCCURRED0_RH1_HCERR_SHFT 3 +#define UVYH_EVENT_OCCURRED0_RH1_HCERR_MASK 0x0000000000000008UL +#define UVYH_EVENT_OCCURRED0_LH0_HCERR_SHFT 4 +#define UVYH_EVENT_OCCURRED0_LH0_HCERR_MASK 0x0000000000000010UL +#define UVYH_EVENT_OCCURRED0_LH1_HCERR_SHFT 5 +#define UVYH_EVENT_OCCURRED0_LH1_HCERR_MASK 0x0000000000000020UL +#define UVYH_EVENT_OCCURRED0_LH2_HCERR_SHFT 6 +#define UVYH_EVENT_OCCURRED0_LH2_HCERR_MASK 0x0000000000000040UL +#define UVYH_EVENT_OCCURRED0_LH3_HCERR_SHFT 7 +#define UVYH_EVENT_OCCURRED0_LH3_HCERR_MASK 0x0000000000000080UL +#define UVYH_EVENT_OCCURRED0_XB_HCERR_SHFT 8 +#define UVYH_EVENT_OCCURRED0_XB_HCERR_MASK 0x0000000000000100UL +#define UVYH_EVENT_OCCURRED0_RDM_HCERR_SHFT 9 +#define UVYH_EVENT_OCCURRED0_RDM_HCERR_MASK 0x0000000000000200UL +#define UVYH_EVENT_OCCURRED0_NI0_HCERR_SHFT 10 +#define UVYH_EVENT_OCCURRED0_NI0_HCERR_MASK 0x0000000000000400UL +#define UVYH_EVENT_OCCURRED0_NI1_HCERR_SHFT 11 +#define UVYH_EVENT_OCCURRED0_NI1_HCERR_MASK 0x0000000000000800UL +#define UVYH_EVENT_OCCURRED0_LB_AOERR0_SHFT 12 +#define UVYH_EVENT_OCCURRED0_LB_AOERR0_MASK 0x0000000000001000UL +#define UVYH_EVENT_OCCURRED0_KT_AOERR0_SHFT 13 +#define UVYH_EVENT_OCCURRED0_KT_AOERR0_MASK 0x0000000000002000UL +#define UVYH_EVENT_OCCURRED0_RH0_AOERR0_SHFT 14 +#define UVYH_EVENT_OCCURRED0_RH0_AOERR0_MASK 0x0000000000004000UL +#define UVYH_EVENT_OCCURRED0_RH1_AOERR0_SHFT 15 +#define UVYH_EVENT_OCCURRED0_RH1_AOERR0_MASK 0x0000000000008000UL +#define UVYH_EVENT_OCCURRED0_LH0_AOERR0_SHFT 16 +#define UVYH_EVENT_OCCURRED0_LH0_AOERR0_MASK 0x0000000000010000UL +#define UVYH_EVENT_OCCURRED0_LH1_AOERR0_SHFT 17 +#define UVYH_EVENT_OCCURRED0_LH1_AOERR0_MASK 0x0000000000020000UL +#define UVYH_EVENT_OCCURRED0_LH2_AOERR0_SHFT 18 +#define UVYH_EVENT_OCCURRED0_LH2_AOERR0_MASK 0x0000000000040000UL +#define UVYH_EVENT_OCCURRED0_LH3_AOERR0_SHFT 19 +#define UVYH_EVENT_OCCURRED0_LH3_AOERR0_MASK 0x0000000000080000UL +#define UVYH_EVENT_OCCURRED0_XB_AOERR0_SHFT 20 +#define UVYH_EVENT_OCCURRED0_XB_AOERR0_MASK 0x0000000000100000UL +#define UVYH_EVENT_OCCURRED0_RDM_AOERR0_SHFT 21 +#define UVYH_EVENT_OCCURRED0_RDM_AOERR0_MASK 0x0000000000200000UL +#define UVYH_EVENT_OCCURRED0_RT0_AOERR0_SHFT 22 +#define UVYH_EVENT_OCCURRED0_RT0_AOERR0_MASK 0x0000000000400000UL +#define UVYH_EVENT_OCCURRED0_RT1_AOERR0_SHFT 23 +#define UVYH_EVENT_OCCURRED0_RT1_AOERR0_MASK 0x0000000000800000UL +#define UVYH_EVENT_OCCURRED0_NI0_AOERR0_SHFT 24 +#define UVYH_EVENT_OCCURRED0_NI0_AOERR0_MASK 0x0000000001000000UL +#define UVYH_EVENT_OCCURRED0_NI1_AOERR0_SHFT 25 +#define UVYH_EVENT_OCCURRED0_NI1_AOERR0_MASK 0x0000000002000000UL +#define UVYH_EVENT_OCCURRED0_LB_AOERR1_SHFT 26 +#define UVYH_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000004000000UL +#define UVYH_EVENT_OCCURRED0_KT_AOERR1_SHFT 27 +#define UVYH_EVENT_OCCURRED0_KT_AOERR1_MASK 0x0000000008000000UL +#define UVYH_EVENT_OCCURRED0_RH0_AOERR1_SHFT 28 +#define UVYH_EVENT_OCCURRED0_RH0_AOERR1_MASK 0x0000000010000000UL +#define UVYH_EVENT_OCCURRED0_RH1_AOERR1_SHFT 29 +#define UVYH_EVENT_OCCURRED0_RH1_AOERR1_MASK 0x0000000020000000UL +#define UVYH_EVENT_OCCURRED0_LH0_AOERR1_SHFT 30 +#define UVYH_EVENT_OCCURRED0_LH0_AOERR1_MASK 0x0000000040000000UL +#define UVYH_EVENT_OCCURRED0_LH1_AOERR1_SHFT 31 +#define UVYH_EVENT_OCCURRED0_LH1_AOERR1_MASK 0x0000000080000000UL +#define UVYH_EVENT_OCCURRED0_LH2_AOERR1_SHFT 32 +#define UVYH_EVENT_OCCURRED0_LH2_AOERR1_MASK 0x0000000100000000UL +#define UVYH_EVENT_OCCURRED0_LH3_AOERR1_SHFT 33 +#define UVYH_EVENT_OCCURRED0_LH3_AOERR1_MASK 0x0000000200000000UL +#define UVYH_EVENT_OCCURRED0_XB_AOERR1_SHFT 34 +#define UVYH_EVENT_OCCURRED0_XB_AOERR1_MASK 0x0000000400000000UL +#define UVYH_EVENT_OCCURRED0_RDM_AOERR1_SHFT 35 +#define UVYH_EVENT_OCCURRED0_RDM_AOERR1_MASK 0x0000000800000000UL +#define UVYH_EVENT_OCCURRED0_RT0_AOERR1_SHFT 36 +#define UVYH_EVENT_OCCURRED0_RT0_AOERR1_MASK 0x0000001000000000UL +#define UVYH_EVENT_OCCURRED0_RT1_AOERR1_SHFT 37 +#define UVYH_EVENT_OCCURRED0_RT1_AOERR1_MASK 0x0000002000000000UL +#define UVYH_EVENT_OCCURRED0_NI0_AOERR1_SHFT 38 +#define UVYH_EVENT_OCCURRED0_NI0_AOERR1_MASK 0x0000004000000000UL +#define UVYH_EVENT_OCCURRED0_NI1_AOERR1_SHFT 39 +#define UVYH_EVENT_OCCURRED0_NI1_AOERR1_MASK 0x0000008000000000UL +#define UVYH_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 40 +#define UVYH_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000010000000000UL +#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 41 +#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000020000000000UL +#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 42 +#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000040000000000UL +#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 43 +#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000080000000000UL +#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 44 +#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000100000000000UL +#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 45 +#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000200000000000UL +#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 46 +#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000400000000000UL +#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 47 +#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000800000000000UL +#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 48 +#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0001000000000000UL +#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 49 +#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0002000000000000UL +#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 50 +#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0004000000000000UL +#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 51 +#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0008000000000000UL +#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 52 +#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0010000000000000UL +#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 53 +#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0020000000000000UL +#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 54 +#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0040000000000000UL +#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 55 +#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0080000000000000UL +#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 56 +#define UVYH_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0100000000000000UL +#define UVYH_EVENT_OCCURRED0_L1_NMI_INT_SHFT 57 +#define UVYH_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0200000000000000UL +#define UVYH_EVENT_OCCURRED0_STOP_CLOCK_SHFT 58 +#define UVYH_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0400000000000000UL +#define UVYH_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 59 +#define UVYH_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0800000000000000UL +#define UVYH_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 60 +#define UVYH_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x1000000000000000UL +#define UVYH_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 61 +#define UVYH_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x2000000000000000UL + +/* UV4 unique defines */ #define UV4H_EVENT_OCCURRED0_KT_HCERR_SHFT 1 -#define UV4H_EVENT_OCCURRED0_KT_AOERR0_SHFT 10 -#define UV4H_EVENT_OCCURRED0_RTQ0_AOERR0_SHFT 17 -#define UV4H_EVENT_OCCURRED0_RTQ1_AOERR0_SHFT 18 -#define UV4H_EVENT_OCCURRED0_RTQ2_AOERR0_SHFT 19 -#define UV4H_EVENT_OCCURRED0_RTQ3_AOERR0_SHFT 20 -#define UV4H_EVENT_OCCURRED0_NI0_AOERR0_SHFT 21 -#define UV4H_EVENT_OCCURRED0_NI1_AOERR0_SHFT 22 -#define UV4H_EVENT_OCCURRED0_LB_AOERR1_SHFT 23 -#define UV4H_EVENT_OCCURRED0_KT_AOERR1_SHFT 24 -#define UV4H_EVENT_OCCURRED0_RH_AOERR1_SHFT 25 -#define UV4H_EVENT_OCCURRED0_LH0_AOERR1_SHFT 26 -#define UV4H_EVENT_OCCURRED0_LH1_AOERR1_SHFT 27 -#define UV4H_EVENT_OCCURRED0_GR0_AOERR1_SHFT 28 -#define UV4H_EVENT_OCCURRED0_GR1_AOERR1_SHFT 29 -#define UV4H_EVENT_OCCURRED0_XB_AOERR1_SHFT 30 -#define UV4H_EVENT_OCCURRED0_RTQ0_AOERR1_SHFT 31 -#define UV4H_EVENT_OCCURRED0_RTQ1_AOERR1_SHFT 32 -#define UV4H_EVENT_OCCURRED0_RTQ2_AOERR1_SHFT 33 -#define UV4H_EVENT_OCCURRED0_RTQ3_AOERR1_SHFT 34 -#define UV4H_EVENT_OCCURRED0_NI0_AOERR1_SHFT 35 -#define UV4H_EVENT_OCCURRED0_NI1_AOERR1_SHFT 36 -#define UV4H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 37 -#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 38 -#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 39 -#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 40 -#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 41 -#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 42 -#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 43 -#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 44 -#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 45 -#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 46 -#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 47 -#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 48 -#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 49 -#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 50 -#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 51 -#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 52 -#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 53 -#define UV4H_EVENT_OCCURRED0_L1_NMI_INT_SHFT 54 -#define UV4H_EVENT_OCCURRED0_STOP_CLOCK_SHFT 55 -#define UV4H_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 56 -#define UV4H_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 57 -#define UV4H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 58 -#define UV4H_EVENT_OCCURRED0_IPI_INT_SHFT 59 -#define UV4H_EVENT_OCCURRED0_EXTIO_INT0_SHFT 60 -#define UV4H_EVENT_OCCURRED0_EXTIO_INT1_SHFT 61 -#define UV4H_EVENT_OCCURRED0_EXTIO_INT2_SHFT 62 -#define UV4H_EVENT_OCCURRED0_EXTIO_INT3_SHFT 63 #define UV4H_EVENT_OCCURRED0_KT_HCERR_MASK 0x0000000000000002UL +#define UV4H_EVENT_OCCURRED0_KT_AOERR0_SHFT 10 #define UV4H_EVENT_OCCURRED0_KT_AOERR0_MASK 0x0000000000000400UL +#define UV4H_EVENT_OCCURRED0_RTQ0_AOERR0_SHFT 17 #define UV4H_EVENT_OCCURRED0_RTQ0_AOERR0_MASK 0x0000000000020000UL +#define UV4H_EVENT_OCCURRED0_RTQ1_AOERR0_SHFT 18 #define UV4H_EVENT_OCCURRED0_RTQ1_AOERR0_MASK 0x0000000000040000UL +#define UV4H_EVENT_OCCURRED0_RTQ2_AOERR0_SHFT 19 #define UV4H_EVENT_OCCURRED0_RTQ2_AOERR0_MASK 0x0000000000080000UL +#define UV4H_EVENT_OCCURRED0_RTQ3_AOERR0_SHFT 20 #define UV4H_EVENT_OCCURRED0_RTQ3_AOERR0_MASK 0x0000000000100000UL +#define UV4H_EVENT_OCCURRED0_NI0_AOERR0_SHFT 21 #define UV4H_EVENT_OCCURRED0_NI0_AOERR0_MASK 0x0000000000200000UL +#define UV4H_EVENT_OCCURRED0_NI1_AOERR0_SHFT 22 #define UV4H_EVENT_OCCURRED0_NI1_AOERR0_MASK 0x0000000000400000UL +#define UV4H_EVENT_OCCURRED0_LB_AOERR1_SHFT 23 #define UV4H_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000800000UL +#define UV4H_EVENT_OCCURRED0_KT_AOERR1_SHFT 24 #define UV4H_EVENT_OCCURRED0_KT_AOERR1_MASK 0x0000000001000000UL +#define UV4H_EVENT_OCCURRED0_RH_AOERR1_SHFT 25 #define UV4H_EVENT_OCCURRED0_RH_AOERR1_MASK 0x0000000002000000UL +#define UV4H_EVENT_OCCURRED0_LH0_AOERR1_SHFT 26 #define UV4H_EVENT_OCCURRED0_LH0_AOERR1_MASK 0x0000000004000000UL +#define UV4H_EVENT_OCCURRED0_LH1_AOERR1_SHFT 27 #define UV4H_EVENT_OCCURRED0_LH1_AOERR1_MASK 0x0000000008000000UL +#define UV4H_EVENT_OCCURRED0_GR0_AOERR1_SHFT 28 #define UV4H_EVENT_OCCURRED0_GR0_AOERR1_MASK 0x0000000010000000UL +#define UV4H_EVENT_OCCURRED0_GR1_AOERR1_SHFT 29 #define UV4H_EVENT_OCCURRED0_GR1_AOERR1_MASK 0x0000000020000000UL +#define UV4H_EVENT_OCCURRED0_XB_AOERR1_SHFT 30 #define UV4H_EVENT_OCCURRED0_XB_AOERR1_MASK 0x0000000040000000UL +#define UV4H_EVENT_OCCURRED0_RTQ0_AOERR1_SHFT 31 #define UV4H_EVENT_OCCURRED0_RTQ0_AOERR1_MASK 0x0000000080000000UL +#define UV4H_EVENT_OCCURRED0_RTQ1_AOERR1_SHFT 32 #define UV4H_EVENT_OCCURRED0_RTQ1_AOERR1_MASK 0x0000000100000000UL +#define UV4H_EVENT_OCCURRED0_RTQ2_AOERR1_SHFT 33 #define UV4H_EVENT_OCCURRED0_RTQ2_AOERR1_MASK 0x0000000200000000UL +#define UV4H_EVENT_OCCURRED0_RTQ3_AOERR1_SHFT 34 #define UV4H_EVENT_OCCURRED0_RTQ3_AOERR1_MASK 0x0000000400000000UL +#define UV4H_EVENT_OCCURRED0_NI0_AOERR1_SHFT 35 #define UV4H_EVENT_OCCURRED0_NI0_AOERR1_MASK 0x0000000800000000UL +#define UV4H_EVENT_OCCURRED0_NI1_AOERR1_SHFT 36 #define UV4H_EVENT_OCCURRED0_NI1_AOERR1_MASK 0x0000001000000000UL +#define UV4H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 37 #define UV4H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000002000000000UL +#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 38 #define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000004000000000UL +#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 39 #define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000008000000000UL +#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 40 #define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000010000000000UL +#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 41 #define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000020000000000UL +#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 42 #define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000040000000000UL +#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 43 #define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000080000000000UL +#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 44 #define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000100000000000UL +#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 45 #define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0000200000000000UL +#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 46 #define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0000400000000000UL +#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 47 #define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0000800000000000UL +#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 48 #define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0001000000000000UL +#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 49 #define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0002000000000000UL +#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 50 #define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0004000000000000UL +#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 51 #define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0008000000000000UL +#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 52 #define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0010000000000000UL +#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 53 #define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0020000000000000UL +#define UV4H_EVENT_OCCURRED0_L1_NMI_INT_SHFT 54 #define UV4H_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0040000000000000UL +#define UV4H_EVENT_OCCURRED0_STOP_CLOCK_SHFT 55 #define UV4H_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0080000000000000UL +#define UV4H_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 56 #define UV4H_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0100000000000000UL +#define UV4H_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 57 #define UV4H_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x0200000000000000UL +#define UV4H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 58 #define UV4H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x0400000000000000UL +#define UV4H_EVENT_OCCURRED0_IPI_INT_SHFT 59 #define UV4H_EVENT_OCCURRED0_IPI_INT_MASK 0x0800000000000000UL +#define UV4H_EVENT_OCCURRED0_EXTIO_INT0_SHFT 60 #define UV4H_EVENT_OCCURRED0_EXTIO_INT0_MASK 0x1000000000000000UL +#define UV4H_EVENT_OCCURRED0_EXTIO_INT1_SHFT 61 #define UV4H_EVENT_OCCURRED0_EXTIO_INT1_MASK 0x2000000000000000UL +#define UV4H_EVENT_OCCURRED0_EXTIO_INT2_SHFT 62 #define UV4H_EVENT_OCCURRED0_EXTIO_INT2_MASK 0x4000000000000000UL +#define UV4H_EVENT_OCCURRED0_EXTIO_INT3_SHFT 63 #define UV4H_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x8000000000000000UL +/* UV3 unique defines */ +#define UV3H_EVENT_OCCURRED0_QP_HCERR_SHFT 1 +#define UV3H_EVENT_OCCURRED0_QP_HCERR_MASK 0x0000000000000002UL +#define UV3H_EVENT_OCCURRED0_QP_AOERR0_SHFT 10 +#define UV3H_EVENT_OCCURRED0_QP_AOERR0_MASK 0x0000000000000400UL +#define UV3H_EVENT_OCCURRED0_RT_AOERR0_SHFT 17 +#define UV3H_EVENT_OCCURRED0_RT_AOERR0_MASK 0x0000000000020000UL +#define UV3H_EVENT_OCCURRED0_NI0_AOERR0_SHFT 18 +#define UV3H_EVENT_OCCURRED0_NI0_AOERR0_MASK 0x0000000000040000UL +#define UV3H_EVENT_OCCURRED0_NI1_AOERR0_SHFT 19 +#define UV3H_EVENT_OCCURRED0_NI1_AOERR0_MASK 0x0000000000080000UL +#define UV3H_EVENT_OCCURRED0_LB_AOERR1_SHFT 20 +#define UV3H_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000100000UL +#define UV3H_EVENT_OCCURRED0_QP_AOERR1_SHFT 21 +#define UV3H_EVENT_OCCURRED0_QP_AOERR1_MASK 0x0000000000200000UL +#define UV3H_EVENT_OCCURRED0_RH_AOERR1_SHFT 22 +#define UV3H_EVENT_OCCURRED0_RH_AOERR1_MASK 0x0000000000400000UL +#define UV3H_EVENT_OCCURRED0_LH0_AOERR1_SHFT 23 +#define UV3H_EVENT_OCCURRED0_LH0_AOERR1_MASK 0x0000000000800000UL +#define UV3H_EVENT_OCCURRED0_LH1_AOERR1_SHFT 24 +#define UV3H_EVENT_OCCURRED0_LH1_AOERR1_MASK 0x0000000001000000UL +#define UV3H_EVENT_OCCURRED0_GR0_AOERR1_SHFT 25 +#define UV3H_EVENT_OCCURRED0_GR0_AOERR1_MASK 0x0000000002000000UL +#define UV3H_EVENT_OCCURRED0_GR1_AOERR1_SHFT 26 +#define UV3H_EVENT_OCCURRED0_GR1_AOERR1_MASK 0x0000000004000000UL +#define UV3H_EVENT_OCCURRED0_XB_AOERR1_SHFT 27 +#define UV3H_EVENT_OCCURRED0_XB_AOERR1_MASK 0x0000000008000000UL +#define UV3H_EVENT_OCCURRED0_RT_AOERR1_SHFT 28 +#define UV3H_EVENT_OCCURRED0_RT_AOERR1_MASK 0x0000000010000000UL +#define UV3H_EVENT_OCCURRED0_NI0_AOERR1_SHFT 29 +#define UV3H_EVENT_OCCURRED0_NI0_AOERR1_MASK 0x0000000020000000UL +#define UV3H_EVENT_OCCURRED0_NI1_AOERR1_SHFT 30 +#define UV3H_EVENT_OCCURRED0_NI1_AOERR1_MASK 0x0000000040000000UL +#define UV3H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 31 +#define UV3H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000000080000000UL +#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 32 +#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000000100000000UL +#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 33 +#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000000200000000UL +#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 34 +#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000000400000000UL +#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 35 +#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000000800000000UL +#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 36 +#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000001000000000UL +#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 37 +#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000002000000000UL +#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 38 +#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000004000000000UL +#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 39 +#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0000008000000000UL +#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 40 +#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0000010000000000UL +#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 41 +#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0000020000000000UL +#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 42 +#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0000040000000000UL +#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 43 +#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0000080000000000UL +#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 44 +#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0000100000000000UL +#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 45 +#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0000200000000000UL +#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 46 +#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0000400000000000UL +#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 47 +#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0000800000000000UL +#define UV3H_EVENT_OCCURRED0_L1_NMI_INT_SHFT 48 +#define UV3H_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0001000000000000UL +#define UV3H_EVENT_OCCURRED0_STOP_CLOCK_SHFT 49 +#define UV3H_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0002000000000000UL +#define UV3H_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 50 +#define UV3H_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0004000000000000UL +#define UV3H_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 51 +#define UV3H_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x0008000000000000UL +#define UV3H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 52 +#define UV3H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x0010000000000000UL +#define UV3H_EVENT_OCCURRED0_IPI_INT_SHFT 53 +#define UV3H_EVENT_OCCURRED0_IPI_INT_MASK 0x0020000000000000UL +#define UV3H_EVENT_OCCURRED0_EXTIO_INT0_SHFT 54 +#define UV3H_EVENT_OCCURRED0_EXTIO_INT0_MASK 0x0040000000000000UL +#define UV3H_EVENT_OCCURRED0_EXTIO_INT1_SHFT 55 +#define UV3H_EVENT_OCCURRED0_EXTIO_INT1_MASK 0x0080000000000000UL +#define UV3H_EVENT_OCCURRED0_EXTIO_INT2_SHFT 56 +#define UV3H_EVENT_OCCURRED0_EXTIO_INT2_MASK 0x0100000000000000UL +#define UV3H_EVENT_OCCURRED0_EXTIO_INT3_SHFT 57 +#define UV3H_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x0200000000000000UL +#define UV3H_EVENT_OCCURRED0_PROFILE_INT_SHFT 58 +#define UV3H_EVENT_OCCURRED0_PROFILE_INT_MASK 0x0400000000000000UL + +/* UV2 unique defines */ +#define UV2H_EVENT_OCCURRED0_QP_HCERR_SHFT 1 +#define UV2H_EVENT_OCCURRED0_QP_HCERR_MASK 0x0000000000000002UL +#define UV2H_EVENT_OCCURRED0_QP_AOERR0_SHFT 10 +#define UV2H_EVENT_OCCURRED0_QP_AOERR0_MASK 0x0000000000000400UL +#define UV2H_EVENT_OCCURRED0_RT_AOERR0_SHFT 17 +#define UV2H_EVENT_OCCURRED0_RT_AOERR0_MASK 0x0000000000020000UL +#define UV2H_EVENT_OCCURRED0_NI0_AOERR0_SHFT 18 +#define UV2H_EVENT_OCCURRED0_NI0_AOERR0_MASK 0x0000000000040000UL +#define UV2H_EVENT_OCCURRED0_NI1_AOERR0_SHFT 19 +#define UV2H_EVENT_OCCURRED0_NI1_AOERR0_MASK 0x0000000000080000UL +#define UV2H_EVENT_OCCURRED0_LB_AOERR1_SHFT 20 +#define UV2H_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000100000UL +#define UV2H_EVENT_OCCURRED0_QP_AOERR1_SHFT 21 +#define UV2H_EVENT_OCCURRED0_QP_AOERR1_MASK 0x0000000000200000UL +#define UV2H_EVENT_OCCURRED0_RH_AOERR1_SHFT 22 +#define UV2H_EVENT_OCCURRED0_RH_AOERR1_MASK 0x0000000000400000UL +#define UV2H_EVENT_OCCURRED0_LH0_AOERR1_SHFT 23 +#define UV2H_EVENT_OCCURRED0_LH0_AOERR1_MASK 0x0000000000800000UL +#define UV2H_EVENT_OCCURRED0_LH1_AOERR1_SHFT 24 +#define UV2H_EVENT_OCCURRED0_LH1_AOERR1_MASK 0x0000000001000000UL +#define UV2H_EVENT_OCCURRED0_GR0_AOERR1_SHFT 25 +#define UV2H_EVENT_OCCURRED0_GR0_AOERR1_MASK 0x0000000002000000UL +#define UV2H_EVENT_OCCURRED0_GR1_AOERR1_SHFT 26 +#define UV2H_EVENT_OCCURRED0_GR1_AOERR1_MASK 0x0000000004000000UL +#define UV2H_EVENT_OCCURRED0_XB_AOERR1_SHFT 27 +#define UV2H_EVENT_OCCURRED0_XB_AOERR1_MASK 0x0000000008000000UL +#define UV2H_EVENT_OCCURRED0_RT_AOERR1_SHFT 28 +#define UV2H_EVENT_OCCURRED0_RT_AOERR1_MASK 0x0000000010000000UL +#define UV2H_EVENT_OCCURRED0_NI0_AOERR1_SHFT 29 +#define UV2H_EVENT_OCCURRED0_NI0_AOERR1_MASK 0x0000000020000000UL +#define UV2H_EVENT_OCCURRED0_NI1_AOERR1_SHFT 30 +#define UV2H_EVENT_OCCURRED0_NI1_AOERR1_MASK 0x0000000040000000UL +#define UV2H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 31 +#define UV2H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000000080000000UL +#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 32 +#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000000100000000UL +#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 33 +#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000000200000000UL +#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 34 +#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000000400000000UL +#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 35 +#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000000800000000UL +#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 36 +#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000001000000000UL +#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 37 +#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000002000000000UL +#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 38 +#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000004000000000UL +#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 39 +#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0000008000000000UL +#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 40 +#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0000010000000000UL +#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 41 +#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0000020000000000UL +#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 42 +#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0000040000000000UL +#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 43 +#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0000080000000000UL +#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 44 +#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0000100000000000UL +#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 45 +#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0000200000000000UL +#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 46 +#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0000400000000000UL +#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 47 +#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0000800000000000UL +#define UV2H_EVENT_OCCURRED0_L1_NMI_INT_SHFT 48 +#define UV2H_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0001000000000000UL +#define UV2H_EVENT_OCCURRED0_STOP_CLOCK_SHFT 49 +#define UV2H_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0002000000000000UL +#define UV2H_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 50 +#define UV2H_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0004000000000000UL +#define UV2H_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 51 +#define UV2H_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x0008000000000000UL +#define UV2H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 52 +#define UV2H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x0010000000000000UL +#define UV2H_EVENT_OCCURRED0_IPI_INT_SHFT 53 +#define UV2H_EVENT_OCCURRED0_IPI_INT_MASK 0x0020000000000000UL +#define UV2H_EVENT_OCCURRED0_EXTIO_INT0_SHFT 54 +#define UV2H_EVENT_OCCURRED0_EXTIO_INT0_MASK 0x0040000000000000UL +#define UV2H_EVENT_OCCURRED0_EXTIO_INT1_SHFT 55 +#define UV2H_EVENT_OCCURRED0_EXTIO_INT1_MASK 0x0080000000000000UL +#define UV2H_EVENT_OCCURRED0_EXTIO_INT2_SHFT 56 +#define UV2H_EVENT_OCCURRED0_EXTIO_INT2_MASK 0x0100000000000000UL +#define UV2H_EVENT_OCCURRED0_EXTIO_INT3_SHFT 57 +#define UV2H_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x0200000000000000UL +#define UV2H_EVENT_OCCURRED0_PROFILE_INT_SHFT 58 +#define UV2H_EVENT_OCCURRED0_PROFILE_INT_MASK 0x0400000000000000UL + +#define UVH_EVENT_OCCURRED0_EXTIO_INT0_MASK ( \ + is_uv(UV4) ? 0x1000000000000000UL : \ + is_uv(UV3) ? 0x0040000000000000UL : \ + is_uv(UV2) ? 0x0040000000000000UL : \ + 0) #define UVH_EVENT_OCCURRED0_EXTIO_INT0_SHFT ( \ - is_uv2_hub() ? UV2H_EVENT_OCCURRED0_EXTIO_INT0_SHFT : \ - is_uv3_hub() ? UV3H_EVENT_OCCURRED0_EXTIO_INT0_SHFT : \ - /*is_uv4_hub*/ UV4H_EVENT_OCCURRED0_EXTIO_INT0_SHFT) + is_uv(UV4) ? 60 : \ + is_uv(UV3) ? 54 : \ + is_uv(UV2) ? 54 : \ + -1) union uvh_event_occurred0_u { unsigned long v; + + /* UVH common struct */ struct uvh_event_occurred0_s { - unsigned long lb_hcerr:1; /* RW, W1C */ - unsigned long rsvd_1_10:10; - unsigned long rh_aoerr0:1; /* RW, W1C */ - unsigned long rsvd_12_63:52; + unsigned long lb_hcerr:1; /* RW */ + unsigned long rsvd_1_63:63; } s; + + /* UVXH common struct */ struct uvxh_event_occurred0_s { unsigned long lb_hcerr:1; /* RW */ unsigned long rsvd_1:1; @@ -505,6 +577,142 @@ union uvh_event_occurred0_u { unsigned long xb_aoerr0:1; /* RW */ unsigned long rsvd_17_63:47; } sx; + + /* UVYH common struct */ + struct uvyh_event_occurred0_s { + unsigned long lb_hcerr:1; /* RW */ + unsigned long kt_hcerr:1; /* RW */ + unsigned long rh0_hcerr:1; /* RW */ + unsigned long rh1_hcerr:1; /* RW */ + unsigned long lh0_hcerr:1; /* RW */ + unsigned long lh1_hcerr:1; /* RW */ + unsigned long lh2_hcerr:1; /* RW */ + unsigned long lh3_hcerr:1; /* RW */ + unsigned long xb_hcerr:1; /* RW */ + unsigned long rdm_hcerr:1; /* RW */ + unsigned long ni0_hcerr:1; /* RW */ + unsigned long ni1_hcerr:1; /* RW */ + unsigned long lb_aoerr0:1; /* RW */ + unsigned long kt_aoerr0:1; /* RW */ + unsigned long rh0_aoerr0:1; /* RW */ + unsigned long rh1_aoerr0:1; /* RW */ + unsigned long lh0_aoerr0:1; /* RW */ + unsigned long lh1_aoerr0:1; /* RW */ + unsigned long lh2_aoerr0:1; /* RW */ + unsigned long lh3_aoerr0:1; /* RW */ + unsigned long xb_aoerr0:1; /* RW */ + unsigned long rdm_aoerr0:1; /* RW */ + unsigned long rt0_aoerr0:1; /* RW */ + unsigned long rt1_aoerr0:1; /* RW */ + unsigned long ni0_aoerr0:1; /* RW */ + unsigned long ni1_aoerr0:1; /* RW */ + unsigned long lb_aoerr1:1; /* RW */ + unsigned long kt_aoerr1:1; /* RW */ + unsigned long rh0_aoerr1:1; /* RW */ + unsigned long rh1_aoerr1:1; /* RW */ + unsigned long lh0_aoerr1:1; /* RW */ + unsigned long lh1_aoerr1:1; /* RW */ + unsigned long lh2_aoerr1:1; /* RW */ + unsigned long lh3_aoerr1:1; /* RW */ + unsigned long xb_aoerr1:1; /* RW */ + unsigned long rdm_aoerr1:1; /* RW */ + unsigned long rt0_aoerr1:1; /* RW */ + unsigned long rt1_aoerr1:1; /* RW */ + unsigned long ni0_aoerr1:1; /* RW */ + unsigned long ni1_aoerr1:1; /* RW */ + unsigned long system_shutdown_int:1; /* RW */ + unsigned long lb_irq_int_0:1; /* RW */ + unsigned long lb_irq_int_1:1; /* RW */ + unsigned long lb_irq_int_2:1; /* RW */ + unsigned long lb_irq_int_3:1; /* RW */ + unsigned long lb_irq_int_4:1; /* RW */ + unsigned long lb_irq_int_5:1; /* RW */ + unsigned long lb_irq_int_6:1; /* RW */ + unsigned long lb_irq_int_7:1; /* RW */ + unsigned long lb_irq_int_8:1; /* RW */ + unsigned long lb_irq_int_9:1; /* RW */ + unsigned long lb_irq_int_10:1; /* RW */ + unsigned long lb_irq_int_11:1; /* RW */ + unsigned long lb_irq_int_12:1; /* RW */ + unsigned long lb_irq_int_13:1; /* RW */ + unsigned long lb_irq_int_14:1; /* RW */ + unsigned long lb_irq_int_15:1; /* RW */ + unsigned long l1_nmi_int:1; /* RW */ + unsigned long stop_clock:1; /* RW */ + unsigned long asic_to_l1:1; /* RW */ + unsigned long l1_to_asic:1; /* RW */ + unsigned long la_seq_trigger:1; /* RW */ + unsigned long rsvd_62_63:2; + } sy; + + /* UV5 unique struct */ + struct uv5h_event_occurred0_s { + unsigned long lb_hcerr:1; /* RW */ + unsigned long kt_hcerr:1; /* RW */ + unsigned long rh0_hcerr:1; /* RW */ + unsigned long rh1_hcerr:1; /* RW */ + unsigned long lh0_hcerr:1; /* RW */ + unsigned long lh1_hcerr:1; /* RW */ + unsigned long lh2_hcerr:1; /* RW */ + unsigned long lh3_hcerr:1; /* RW */ + unsigned long xb_hcerr:1; /* RW */ + unsigned long rdm_hcerr:1; /* RW */ + unsigned long ni0_hcerr:1; /* RW */ + unsigned long ni1_hcerr:1; /* RW */ + unsigned long lb_aoerr0:1; /* RW */ + unsigned long kt_aoerr0:1; /* RW */ + unsigned long rh0_aoerr0:1; /* RW */ + unsigned long rh1_aoerr0:1; /* RW */ + unsigned long lh0_aoerr0:1; /* RW */ + unsigned long lh1_aoerr0:1; /* RW */ + unsigned long lh2_aoerr0:1; /* RW */ + unsigned long lh3_aoerr0:1; /* RW */ + unsigned long xb_aoerr0:1; /* RW */ + unsigned long rdm_aoerr0:1; /* RW */ + unsigned long rt0_aoerr0:1; /* RW */ + unsigned long rt1_aoerr0:1; /* RW */ + unsigned long ni0_aoerr0:1; /* RW */ + unsigned long ni1_aoerr0:1; /* RW */ + unsigned long lb_aoerr1:1; /* RW */ + unsigned long kt_aoerr1:1; /* RW */ + unsigned long rh0_aoerr1:1; /* RW */ + unsigned long rh1_aoerr1:1; /* RW */ + unsigned long lh0_aoerr1:1; /* RW */ + unsigned long lh1_aoerr1:1; /* RW */ + unsigned long lh2_aoerr1:1; /* RW */ + unsigned long lh3_aoerr1:1; /* RW */ + unsigned long xb_aoerr1:1; /* RW */ + unsigned long rdm_aoerr1:1; /* RW */ + unsigned long rt0_aoerr1:1; /* RW */ + unsigned long rt1_aoerr1:1; /* RW */ + unsigned long ni0_aoerr1:1; /* RW */ + unsigned long ni1_aoerr1:1; /* RW */ + unsigned long system_shutdown_int:1; /* RW */ + unsigned long lb_irq_int_0:1; /* RW */ + unsigned long lb_irq_int_1:1; /* RW */ + unsigned long lb_irq_int_2:1; /* RW */ + unsigned long lb_irq_int_3:1; /* RW */ + unsigned long lb_irq_int_4:1; /* RW */ + unsigned long lb_irq_int_5:1; /* RW */ + unsigned long lb_irq_int_6:1; /* RW */ + unsigned long lb_irq_int_7:1; /* RW */ + unsigned long lb_irq_int_8:1; /* RW */ + unsigned long lb_irq_int_9:1; /* RW */ + unsigned long lb_irq_int_10:1; /* RW */ + unsigned long lb_irq_int_11:1; /* RW */ + unsigned long lb_irq_int_12:1; /* RW */ + unsigned long lb_irq_int_13:1; /* RW */ + unsigned long lb_irq_int_14:1; /* RW */ + unsigned long lb_irq_int_15:1; /* RW */ + unsigned long l1_nmi_int:1; /* RW */ + unsigned long stop_clock:1; /* RW */ + unsigned long asic_to_l1:1; /* RW */ + unsigned long l1_to_asic:1; /* RW */ + unsigned long la_seq_trigger:1; /* RW */ + unsigned long rsvd_62_63:2; + } s5; + + /* UV4 unique struct */ struct uv4h_event_occurred0_s { unsigned long lb_hcerr:1; /* RW */ unsigned long kt_hcerr:1; /* RW */ @@ -571,13 +779,1355 @@ union uvh_event_occurred0_u { unsigned long extio_int2:1; /* RW */ unsigned long extio_int3:1; /* RW */ } s4; + + /* UV3 unique struct */ + struct uv3h_event_occurred0_s { + unsigned long lb_hcerr:1; /* RW */ + unsigned long qp_hcerr:1; /* RW */ + unsigned long rh_hcerr:1; /* RW */ + unsigned long lh0_hcerr:1; /* RW */ + unsigned long lh1_hcerr:1; /* RW */ + unsigned long gr0_hcerr:1; /* RW */ + unsigned long gr1_hcerr:1; /* RW */ + unsigned long ni0_hcerr:1; /* RW */ + unsigned long ni1_hcerr:1; /* RW */ + unsigned long lb_aoerr0:1; /* RW */ + unsigned long qp_aoerr0:1; /* RW */ + unsigned long rh_aoerr0:1; /* RW */ + unsigned long lh0_aoerr0:1; /* RW */ + unsigned long lh1_aoerr0:1; /* RW */ + unsigned long gr0_aoerr0:1; /* RW */ + unsigned long gr1_aoerr0:1; /* RW */ + unsigned long xb_aoerr0:1; /* RW */ + unsigned long rt_aoerr0:1; /* RW */ + unsigned long ni0_aoerr0:1; /* RW */ + unsigned long ni1_aoerr0:1; /* RW */ + unsigned long lb_aoerr1:1; /* RW */ + unsigned long qp_aoerr1:1; /* RW */ + unsigned long rh_aoerr1:1; /* RW */ + unsigned long lh0_aoerr1:1; /* RW */ + unsigned long lh1_aoerr1:1; /* RW */ + unsigned long gr0_aoerr1:1; /* RW */ + unsigned long gr1_aoerr1:1; /* RW */ + unsigned long xb_aoerr1:1; /* RW */ + unsigned long rt_aoerr1:1; /* RW */ + unsigned long ni0_aoerr1:1; /* RW */ + unsigned long ni1_aoerr1:1; /* RW */ + unsigned long system_shutdown_int:1; /* RW */ + unsigned long lb_irq_int_0:1; /* RW */ + unsigned long lb_irq_int_1:1; /* RW */ + unsigned long lb_irq_int_2:1; /* RW */ + unsigned long lb_irq_int_3:1; /* RW */ + unsigned long lb_irq_int_4:1; /* RW */ + unsigned long lb_irq_int_5:1; /* RW */ + unsigned long lb_irq_int_6:1; /* RW */ + unsigned long lb_irq_int_7:1; /* RW */ + unsigned long lb_irq_int_8:1; /* RW */ + unsigned long lb_irq_int_9:1; /* RW */ + unsigned long lb_irq_int_10:1; /* RW */ + unsigned long lb_irq_int_11:1; /* RW */ + unsigned long lb_irq_int_12:1; /* RW */ + unsigned long lb_irq_int_13:1; /* RW */ + unsigned long lb_irq_int_14:1; /* RW */ + unsigned long lb_irq_int_15:1; /* RW */ + unsigned long l1_nmi_int:1; /* RW */ + unsigned long stop_clock:1; /* RW */ + unsigned long asic_to_l1:1; /* RW */ + unsigned long l1_to_asic:1; /* RW */ + unsigned long la_seq_trigger:1; /* RW */ + unsigned long ipi_int:1; /* RW */ + unsigned long extio_int0:1; /* RW */ + unsigned long extio_int1:1; /* RW */ + unsigned long extio_int2:1; /* RW */ + unsigned long extio_int3:1; /* RW */ + unsigned long profile_int:1; /* RW */ + unsigned long rsvd_59_63:5; + } s3; + + /* UV2 unique struct */ + struct uv2h_event_occurred0_s { + unsigned long lb_hcerr:1; /* RW */ + unsigned long qp_hcerr:1; /* RW */ + unsigned long rh_hcerr:1; /* RW */ + unsigned long lh0_hcerr:1; /* RW */ + unsigned long lh1_hcerr:1; /* RW */ + unsigned long gr0_hcerr:1; /* RW */ + unsigned long gr1_hcerr:1; /* RW */ + unsigned long ni0_hcerr:1; /* RW */ + unsigned long ni1_hcerr:1; /* RW */ + unsigned long lb_aoerr0:1; /* RW */ + unsigned long qp_aoerr0:1; /* RW */ + unsigned long rh_aoerr0:1; /* RW */ + unsigned long lh0_aoerr0:1; /* RW */ + unsigned long lh1_aoerr0:1; /* RW */ + unsigned long gr0_aoerr0:1; /* RW */ + unsigned long gr1_aoerr0:1; /* RW */ + unsigned long xb_aoerr0:1; /* RW */ + unsigned long rt_aoerr0:1; /* RW */ + unsigned long ni0_aoerr0:1; /* RW */ + unsigned long ni1_aoerr0:1; /* RW */ + unsigned long lb_aoerr1:1; /* RW */ + unsigned long qp_aoerr1:1; /* RW */ + unsigned long rh_aoerr1:1; /* RW */ + unsigned long lh0_aoerr1:1; /* RW */ + unsigned long lh1_aoerr1:1; /* RW */ + unsigned long gr0_aoerr1:1; /* RW */ + unsigned long gr1_aoerr1:1; /* RW */ + unsigned long xb_aoerr1:1; /* RW */ + unsigned long rt_aoerr1:1; /* RW */ + unsigned long ni0_aoerr1:1; /* RW */ + unsigned long ni1_aoerr1:1; /* RW */ + unsigned long system_shutdown_int:1; /* RW */ + unsigned long lb_irq_int_0:1; /* RW */ + unsigned long lb_irq_int_1:1; /* RW */ + unsigned long lb_irq_int_2:1; /* RW */ + unsigned long lb_irq_int_3:1; /* RW */ + unsigned long lb_irq_int_4:1; /* RW */ + unsigned long lb_irq_int_5:1; /* RW */ + unsigned long lb_irq_int_6:1; /* RW */ + unsigned long lb_irq_int_7:1; /* RW */ + unsigned long lb_irq_int_8:1; /* RW */ + unsigned long lb_irq_int_9:1; /* RW */ + unsigned long lb_irq_int_10:1; /* RW */ + unsigned long lb_irq_int_11:1; /* RW */ + unsigned long lb_irq_int_12:1; /* RW */ + unsigned long lb_irq_int_13:1; /* RW */ + unsigned long lb_irq_int_14:1; /* RW */ + unsigned long lb_irq_int_15:1; /* RW */ + unsigned long l1_nmi_int:1; /* RW */ + unsigned long stop_clock:1; /* RW */ + unsigned long asic_to_l1:1; /* RW */ + unsigned long l1_to_asic:1; /* RW */ + unsigned long la_seq_trigger:1; /* RW */ + unsigned long ipi_int:1; /* RW */ + unsigned long extio_int0:1; /* RW */ + unsigned long extio_int1:1; /* RW */ + unsigned long extio_int2:1; /* RW */ + unsigned long extio_int3:1; /* RW */ + unsigned long profile_int:1; /* RW */ + unsigned long rsvd_59_63:5; + } s2; }; /* ========================================================================= */ /* UVH_EVENT_OCCURRED0_ALIAS */ /* ========================================================================= */ #define UVH_EVENT_OCCURRED0_ALIAS 0x70008UL -#define UVH_EVENT_OCCURRED0_ALIAS_32 0x5f0 + + +/* ========================================================================= */ +/* UVH_EVENT_OCCURRED1 */ +/* ========================================================================= */ +#define UVH_EVENT_OCCURRED1 0x70080UL + + + +/* UVYH common defines */ +#define UVYH_EVENT_OCCURRED1_IPI_INT_SHFT 0 +#define UVYH_EVENT_OCCURRED1_IPI_INT_MASK 0x0000000000000001UL +#define UVYH_EVENT_OCCURRED1_EXTIO_INT0_SHFT 1 +#define UVYH_EVENT_OCCURRED1_EXTIO_INT0_MASK 0x0000000000000002UL +#define UVYH_EVENT_OCCURRED1_EXTIO_INT1_SHFT 2 +#define UVYH_EVENT_OCCURRED1_EXTIO_INT1_MASK 0x0000000000000004UL +#define UVYH_EVENT_OCCURRED1_EXTIO_INT2_SHFT 3 +#define UVYH_EVENT_OCCURRED1_EXTIO_INT2_MASK 0x0000000000000008UL +#define UVYH_EVENT_OCCURRED1_EXTIO_INT3_SHFT 4 +#define UVYH_EVENT_OCCURRED1_EXTIO_INT3_MASK 0x0000000000000010UL +#define UVYH_EVENT_OCCURRED1_PROFILE_INT_SHFT 5 +#define UVYH_EVENT_OCCURRED1_PROFILE_INT_MASK 0x0000000000000020UL +#define UVYH_EVENT_OCCURRED1_BAU_DATA_SHFT 6 +#define UVYH_EVENT_OCCURRED1_BAU_DATA_MASK 0x0000000000000040UL +#define UVYH_EVENT_OCCURRED1_PROC_GENERAL_SHFT 7 +#define UVYH_EVENT_OCCURRED1_PROC_GENERAL_MASK 0x0000000000000080UL +#define UVYH_EVENT_OCCURRED1_XH_TLB_INT0_SHFT 8 +#define UVYH_EVENT_OCCURRED1_XH_TLB_INT0_MASK 0x0000000000000100UL +#define UVYH_EVENT_OCCURRED1_XH_TLB_INT1_SHFT 9 +#define UVYH_EVENT_OCCURRED1_XH_TLB_INT1_MASK 0x0000000000000200UL +#define UVYH_EVENT_OCCURRED1_XH_TLB_INT2_SHFT 10 +#define UVYH_EVENT_OCCURRED1_XH_TLB_INT2_MASK 0x0000000000000400UL +#define UVYH_EVENT_OCCURRED1_XH_TLB_INT3_SHFT 11 +#define UVYH_EVENT_OCCURRED1_XH_TLB_INT3_MASK 0x0000000000000800UL +#define UVYH_EVENT_OCCURRED1_XH_TLB_INT4_SHFT 12 +#define UVYH_EVENT_OCCURRED1_XH_TLB_INT4_MASK 0x0000000000001000UL +#define UVYH_EVENT_OCCURRED1_XH_TLB_INT5_SHFT 13 +#define UVYH_EVENT_OCCURRED1_XH_TLB_INT5_MASK 0x0000000000002000UL +#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT0_SHFT 14 +#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT0_MASK 0x0000000000004000UL +#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT1_SHFT 15 +#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT1_MASK 0x0000000000008000UL +#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT2_SHFT 16 +#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT2_MASK 0x0000000000010000UL +#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT3_SHFT 17 +#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT3_MASK 0x0000000000020000UL +#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT4_SHFT 18 +#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT4_MASK 0x0000000000040000UL +#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT5_SHFT 19 +#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT5_MASK 0x0000000000080000UL +#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT6_SHFT 20 +#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT6_MASK 0x0000000000100000UL +#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT7_SHFT 21 +#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT7_MASK 0x0000000000200000UL +#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT8_SHFT 22 +#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT8_MASK 0x0000000000400000UL +#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT9_SHFT 23 +#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT9_MASK 0x0000000000800000UL +#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT10_SHFT 24 +#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT10_MASK 0x0000000001000000UL +#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT11_SHFT 25 +#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT11_MASK 0x0000000002000000UL +#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT12_SHFT 26 +#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT12_MASK 0x0000000004000000UL +#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT13_SHFT 27 +#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT13_MASK 0x0000000008000000UL +#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT14_SHFT 28 +#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT14_MASK 0x0000000010000000UL +#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT15_SHFT 29 +#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT15_MASK 0x0000000020000000UL +#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT16_SHFT 30 +#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT16_MASK 0x0000000040000000UL +#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT17_SHFT 31 +#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT17_MASK 0x0000000080000000UL +#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT18_SHFT 32 +#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT18_MASK 0x0000000100000000UL +#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT19_SHFT 33 +#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT19_MASK 0x0000000200000000UL +#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT20_SHFT 34 +#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT20_MASK 0x0000000400000000UL +#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT21_SHFT 35 +#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT21_MASK 0x0000000800000000UL +#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT22_SHFT 36 +#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT22_MASK 0x0000001000000000UL +#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT23_SHFT 37 +#define UVYH_EVENT_OCCURRED1_RDM_TLB_INT23_MASK 0x0000002000000000UL + +/* UV4 unique defines */ +#define UV4H_EVENT_OCCURRED1_PROFILE_INT_SHFT 0 +#define UV4H_EVENT_OCCURRED1_PROFILE_INT_MASK 0x0000000000000001UL +#define UV4H_EVENT_OCCURRED1_BAU_DATA_SHFT 1 +#define UV4H_EVENT_OCCURRED1_BAU_DATA_MASK 0x0000000000000002UL +#define UV4H_EVENT_OCCURRED1_PROC_GENERAL_SHFT 2 +#define UV4H_EVENT_OCCURRED1_PROC_GENERAL_MASK 0x0000000000000004UL +#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT0_SHFT 3 +#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT0_MASK 0x0000000000000008UL +#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT1_SHFT 4 +#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT1_MASK 0x0000000000000010UL +#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT2_SHFT 5 +#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT2_MASK 0x0000000000000020UL +#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT3_SHFT 6 +#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT3_MASK 0x0000000000000040UL +#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT4_SHFT 7 +#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT4_MASK 0x0000000000000080UL +#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT5_SHFT 8 +#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT5_MASK 0x0000000000000100UL +#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT6_SHFT 9 +#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT6_MASK 0x0000000000000200UL +#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT7_SHFT 10 +#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT7_MASK 0x0000000000000400UL +#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT8_SHFT 11 +#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT8_MASK 0x0000000000000800UL +#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT9_SHFT 12 +#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT9_MASK 0x0000000000001000UL +#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT10_SHFT 13 +#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT10_MASK 0x0000000000002000UL +#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT11_SHFT 14 +#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT11_MASK 0x0000000000004000UL +#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT12_SHFT 15 +#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT12_MASK 0x0000000000008000UL +#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT13_SHFT 16 +#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT13_MASK 0x0000000000010000UL +#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT14_SHFT 17 +#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT14_MASK 0x0000000000020000UL +#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT15_SHFT 18 +#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT15_MASK 0x0000000000040000UL +#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT16_SHFT 19 +#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT16_MASK 0x0000000000080000UL +#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT17_SHFT 20 +#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT17_MASK 0x0000000000100000UL +#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT18_SHFT 21 +#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT18_MASK 0x0000000000200000UL +#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT19_SHFT 22 +#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT19_MASK 0x0000000000400000UL +#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT20_SHFT 23 +#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT20_MASK 0x0000000000800000UL +#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT21_SHFT 24 +#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT21_MASK 0x0000000001000000UL +#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT22_SHFT 25 +#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT22_MASK 0x0000000002000000UL +#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT23_SHFT 26 +#define UV4H_EVENT_OCCURRED1_GR0_TLB_INT23_MASK 0x0000000004000000UL +#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT0_SHFT 27 +#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT0_MASK 0x0000000008000000UL +#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT1_SHFT 28 +#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT1_MASK 0x0000000010000000UL +#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT2_SHFT 29 +#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT2_MASK 0x0000000020000000UL +#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT3_SHFT 30 +#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT3_MASK 0x0000000040000000UL +#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT4_SHFT 31 +#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT4_MASK 0x0000000080000000UL +#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT5_SHFT 32 +#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT5_MASK 0x0000000100000000UL +#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT6_SHFT 33 +#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT6_MASK 0x0000000200000000UL +#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT7_SHFT 34 +#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT7_MASK 0x0000000400000000UL +#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT8_SHFT 35 +#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT8_MASK 0x0000000800000000UL +#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT9_SHFT 36 +#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT9_MASK 0x0000001000000000UL +#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT10_SHFT 37 +#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT10_MASK 0x0000002000000000UL +#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT11_SHFT 38 +#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT11_MASK 0x0000004000000000UL +#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT12_SHFT 39 +#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT12_MASK 0x0000008000000000UL +#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT13_SHFT 40 +#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT13_MASK 0x0000010000000000UL +#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT14_SHFT 41 +#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT14_MASK 0x0000020000000000UL +#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT15_SHFT 42 +#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT15_MASK 0x0000040000000000UL +#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT16_SHFT 43 +#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT16_MASK 0x0000080000000000UL +#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT17_SHFT 44 +#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT17_MASK 0x0000100000000000UL +#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT18_SHFT 45 +#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT18_MASK 0x0000200000000000UL +#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT19_SHFT 46 +#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT19_MASK 0x0000400000000000UL +#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT20_SHFT 47 +#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT20_MASK 0x0000800000000000UL +#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT21_SHFT 48 +#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT21_MASK 0x0001000000000000UL +#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT22_SHFT 49 +#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT22_MASK 0x0002000000000000UL +#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT23_SHFT 50 +#define UV4H_EVENT_OCCURRED1_GR1_TLB_INT23_MASK 0x0004000000000000UL + +/* UV3 unique defines */ +#define UV3H_EVENT_OCCURRED1_BAU_DATA_SHFT 0 +#define UV3H_EVENT_OCCURRED1_BAU_DATA_MASK 0x0000000000000001UL +#define UV3H_EVENT_OCCURRED1_POWER_MANAGEMENT_REQ_SHFT 1 +#define UV3H_EVENT_OCCURRED1_POWER_MANAGEMENT_REQ_MASK 0x0000000000000002UL +#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT0_SHFT 2 +#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT0_MASK 0x0000000000000004UL +#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT1_SHFT 3 +#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT1_MASK 0x0000000000000008UL +#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT2_SHFT 4 +#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT2_MASK 0x0000000000000010UL +#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT3_SHFT 5 +#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT3_MASK 0x0000000000000020UL +#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT4_SHFT 6 +#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT4_MASK 0x0000000000000040UL +#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT5_SHFT 7 +#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT5_MASK 0x0000000000000080UL +#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT6_SHFT 8 +#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT6_MASK 0x0000000000000100UL +#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT7_SHFT 9 +#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT7_MASK 0x0000000000000200UL +#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT8_SHFT 10 +#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT8_MASK 0x0000000000000400UL +#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT9_SHFT 11 +#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT9_MASK 0x0000000000000800UL +#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT10_SHFT 12 +#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT10_MASK 0x0000000000001000UL +#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT11_SHFT 13 +#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT11_MASK 0x0000000000002000UL +#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT12_SHFT 14 +#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT12_MASK 0x0000000000004000UL +#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT13_SHFT 15 +#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT13_MASK 0x0000000000008000UL +#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT14_SHFT 16 +#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT14_MASK 0x0000000000010000UL +#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT15_SHFT 17 +#define UV3H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT15_MASK 0x0000000000020000UL +#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT0_SHFT 18 +#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT0_MASK 0x0000000000040000UL +#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT1_SHFT 19 +#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT1_MASK 0x0000000000080000UL +#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT2_SHFT 20 +#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT2_MASK 0x0000000000100000UL +#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT3_SHFT 21 +#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT3_MASK 0x0000000000200000UL +#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT4_SHFT 22 +#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT4_MASK 0x0000000000400000UL +#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT5_SHFT 23 +#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT5_MASK 0x0000000000800000UL +#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT6_SHFT 24 +#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT6_MASK 0x0000000001000000UL +#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT7_SHFT 25 +#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT7_MASK 0x0000000002000000UL +#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT8_SHFT 26 +#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT8_MASK 0x0000000004000000UL +#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT9_SHFT 27 +#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT9_MASK 0x0000000008000000UL +#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT10_SHFT 28 +#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT10_MASK 0x0000000010000000UL +#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT11_SHFT 29 +#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT11_MASK 0x0000000020000000UL +#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT12_SHFT 30 +#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT12_MASK 0x0000000040000000UL +#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT13_SHFT 31 +#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT13_MASK 0x0000000080000000UL +#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT14_SHFT 32 +#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT14_MASK 0x0000000100000000UL +#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT15_SHFT 33 +#define UV3H_EVENT_OCCURRED1_GR0_TLB_INT15_MASK 0x0000000200000000UL +#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT0_SHFT 34 +#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT0_MASK 0x0000000400000000UL +#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT1_SHFT 35 +#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT1_MASK 0x0000000800000000UL +#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT2_SHFT 36 +#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT2_MASK 0x0000001000000000UL +#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT3_SHFT 37 +#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT3_MASK 0x0000002000000000UL +#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT4_SHFT 38 +#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT4_MASK 0x0000004000000000UL +#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT5_SHFT 39 +#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT5_MASK 0x0000008000000000UL +#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT6_SHFT 40 +#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT6_MASK 0x0000010000000000UL +#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT7_SHFT 41 +#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT7_MASK 0x0000020000000000UL +#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT8_SHFT 42 +#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT8_MASK 0x0000040000000000UL +#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT9_SHFT 43 +#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT9_MASK 0x0000080000000000UL +#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT10_SHFT 44 +#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT10_MASK 0x0000100000000000UL +#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT11_SHFT 45 +#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT11_MASK 0x0000200000000000UL +#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT12_SHFT 46 +#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT12_MASK 0x0000400000000000UL +#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT13_SHFT 47 +#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT13_MASK 0x0000800000000000UL +#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT14_SHFT 48 +#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT14_MASK 0x0001000000000000UL +#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT15_SHFT 49 +#define UV3H_EVENT_OCCURRED1_GR1_TLB_INT15_MASK 0x0002000000000000UL +#define UV3H_EVENT_OCCURRED1_RTC_INTERVAL_INT_SHFT 50 +#define UV3H_EVENT_OCCURRED1_RTC_INTERVAL_INT_MASK 0x0004000000000000UL +#define UV3H_EVENT_OCCURRED1_BAU_DASHBOARD_INT_SHFT 51 +#define UV3H_EVENT_OCCURRED1_BAU_DASHBOARD_INT_MASK 0x0008000000000000UL + +/* UV2 unique defines */ +#define UV2H_EVENT_OCCURRED1_BAU_DATA_SHFT 0 +#define UV2H_EVENT_OCCURRED1_BAU_DATA_MASK 0x0000000000000001UL +#define UV2H_EVENT_OCCURRED1_POWER_MANAGEMENT_REQ_SHFT 1 +#define UV2H_EVENT_OCCURRED1_POWER_MANAGEMENT_REQ_MASK 0x0000000000000002UL +#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT0_SHFT 2 +#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT0_MASK 0x0000000000000004UL +#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT1_SHFT 3 +#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT1_MASK 0x0000000000000008UL +#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT2_SHFT 4 +#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT2_MASK 0x0000000000000010UL +#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT3_SHFT 5 +#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT3_MASK 0x0000000000000020UL +#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT4_SHFT 6 +#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT4_MASK 0x0000000000000040UL +#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT5_SHFT 7 +#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT5_MASK 0x0000000000000080UL +#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT6_SHFT 8 +#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT6_MASK 0x0000000000000100UL +#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT7_SHFT 9 +#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT7_MASK 0x0000000000000200UL +#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT8_SHFT 10 +#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT8_MASK 0x0000000000000400UL +#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT9_SHFT 11 +#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT9_MASK 0x0000000000000800UL +#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT10_SHFT 12 +#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT10_MASK 0x0000000000001000UL +#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT11_SHFT 13 +#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT11_MASK 0x0000000000002000UL +#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT12_SHFT 14 +#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT12_MASK 0x0000000000004000UL +#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT13_SHFT 15 +#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT13_MASK 0x0000000000008000UL +#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT14_SHFT 16 +#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT14_MASK 0x0000000000010000UL +#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT15_SHFT 17 +#define UV2H_EVENT_OCCURRED1_MESSAGE_ACCELERATOR_INT15_MASK 0x0000000000020000UL +#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT0_SHFT 18 +#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT0_MASK 0x0000000000040000UL +#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT1_SHFT 19 +#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT1_MASK 0x0000000000080000UL +#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT2_SHFT 20 +#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT2_MASK 0x0000000000100000UL +#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT3_SHFT 21 +#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT3_MASK 0x0000000000200000UL +#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT4_SHFT 22 +#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT4_MASK 0x0000000000400000UL +#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT5_SHFT 23 +#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT5_MASK 0x0000000000800000UL +#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT6_SHFT 24 +#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT6_MASK 0x0000000001000000UL +#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT7_SHFT 25 +#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT7_MASK 0x0000000002000000UL +#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT8_SHFT 26 +#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT8_MASK 0x0000000004000000UL +#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT9_SHFT 27 +#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT9_MASK 0x0000000008000000UL +#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT10_SHFT 28 +#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT10_MASK 0x0000000010000000UL +#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT11_SHFT 29 +#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT11_MASK 0x0000000020000000UL +#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT12_SHFT 30 +#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT12_MASK 0x0000000040000000UL +#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT13_SHFT 31 +#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT13_MASK 0x0000000080000000UL +#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT14_SHFT 32 +#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT14_MASK 0x0000000100000000UL +#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT15_SHFT 33 +#define UV2H_EVENT_OCCURRED1_GR0_TLB_INT15_MASK 0x0000000200000000UL +#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT0_SHFT 34 +#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT0_MASK 0x0000000400000000UL +#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT1_SHFT 35 +#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT1_MASK 0x0000000800000000UL +#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT2_SHFT 36 +#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT2_MASK 0x0000001000000000UL +#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT3_SHFT 37 +#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT3_MASK 0x0000002000000000UL +#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT4_SHFT 38 +#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT4_MASK 0x0000004000000000UL +#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT5_SHFT 39 +#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT5_MASK 0x0000008000000000UL +#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT6_SHFT 40 +#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT6_MASK 0x0000010000000000UL +#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT7_SHFT 41 +#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT7_MASK 0x0000020000000000UL +#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT8_SHFT 42 +#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT8_MASK 0x0000040000000000UL +#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT9_SHFT 43 +#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT9_MASK 0x0000080000000000UL +#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT10_SHFT 44 +#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT10_MASK 0x0000100000000000UL +#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT11_SHFT 45 +#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT11_MASK 0x0000200000000000UL +#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT12_SHFT 46 +#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT12_MASK 0x0000400000000000UL +#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT13_SHFT 47 +#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT13_MASK 0x0000800000000000UL +#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT14_SHFT 48 +#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT14_MASK 0x0001000000000000UL +#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT15_SHFT 49 +#define UV2H_EVENT_OCCURRED1_GR1_TLB_INT15_MASK 0x0002000000000000UL +#define UV2H_EVENT_OCCURRED1_RTC_INTERVAL_INT_SHFT 50 +#define UV2H_EVENT_OCCURRED1_RTC_INTERVAL_INT_MASK 0x0004000000000000UL +#define UV2H_EVENT_OCCURRED1_BAU_DASHBOARD_INT_SHFT 51 +#define UV2H_EVENT_OCCURRED1_BAU_DASHBOARD_INT_MASK 0x0008000000000000UL + +#define UVH_EVENT_OCCURRED1_EXTIO_INT0_MASK ( \ + is_uv(UV5) ? 0x0000000000000002UL : \ + 0) +#define UVH_EVENT_OCCURRED1_EXTIO_INT0_SHFT ( \ + is_uv(UV5) ? 1 : \ + -1) + +union uvyh_event_occurred1_u { + unsigned long v; + + /* UVYH common struct */ + struct uvyh_event_occurred1_s { + unsigned long ipi_int:1; /* RW */ + unsigned long extio_int0:1; /* RW */ + unsigned long extio_int1:1; /* RW */ + unsigned long extio_int2:1; /* RW */ + unsigned long extio_int3:1; /* RW */ + unsigned long profile_int:1; /* RW */ + unsigned long bau_data:1; /* RW */ + unsigned long proc_general:1; /* RW */ + unsigned long xh_tlb_int0:1; /* RW */ + unsigned long xh_tlb_int1:1; /* RW */ + unsigned long xh_tlb_int2:1; /* RW */ + unsigned long xh_tlb_int3:1; /* RW */ + unsigned long xh_tlb_int4:1; /* RW */ + unsigned long xh_tlb_int5:1; /* RW */ + unsigned long rdm_tlb_int0:1; /* RW */ + unsigned long rdm_tlb_int1:1; /* RW */ + unsigned long rdm_tlb_int2:1; /* RW */ + unsigned long rdm_tlb_int3:1; /* RW */ + unsigned long rdm_tlb_int4:1; /* RW */ + unsigned long rdm_tlb_int5:1; /* RW */ + unsigned long rdm_tlb_int6:1; /* RW */ + unsigned long rdm_tlb_int7:1; /* RW */ + unsigned long rdm_tlb_int8:1; /* RW */ + unsigned long rdm_tlb_int9:1; /* RW */ + unsigned long rdm_tlb_int10:1; /* RW */ + unsigned long rdm_tlb_int11:1; /* RW */ + unsigned long rdm_tlb_int12:1; /* RW */ + unsigned long rdm_tlb_int13:1; /* RW */ + unsigned long rdm_tlb_int14:1; /* RW */ + unsigned long rdm_tlb_int15:1; /* RW */ + unsigned long rdm_tlb_int16:1; /* RW */ + unsigned long rdm_tlb_int17:1; /* RW */ + unsigned long rdm_tlb_int18:1; /* RW */ + unsigned long rdm_tlb_int19:1; /* RW */ + unsigned long rdm_tlb_int20:1; /* RW */ + unsigned long rdm_tlb_int21:1; /* RW */ + unsigned long rdm_tlb_int22:1; /* RW */ + unsigned long rdm_tlb_int23:1; /* RW */ + unsigned long rsvd_38_63:26; + } sy; + + /* UV5 unique struct */ + struct uv5h_event_occurred1_s { + unsigned long ipi_int:1; /* RW */ + unsigned long extio_int0:1; /* RW */ + unsigned long extio_int1:1; /* RW */ + unsigned long extio_int2:1; /* RW */ + unsigned long extio_int3:1; /* RW */ + unsigned long profile_int:1; /* RW */ + unsigned long bau_data:1; /* RW */ + unsigned long proc_general:1; /* RW */ + unsigned long xh_tlb_int0:1; /* RW */ + unsigned long xh_tlb_int1:1; /* RW */ + unsigned long xh_tlb_int2:1; /* RW */ + unsigned long xh_tlb_int3:1; /* RW */ + unsigned long xh_tlb_int4:1; /* RW */ + unsigned long xh_tlb_int5:1; /* RW */ + unsigned long rdm_tlb_int0:1; /* RW */ + unsigned long rdm_tlb_int1:1; /* RW */ + unsigned long rdm_tlb_int2:1; /* RW */ + unsigned long rdm_tlb_int3:1; /* RW */ + unsigned long rdm_tlb_int4:1; /* RW */ + unsigned long rdm_tlb_int5:1; /* RW */ + unsigned long rdm_tlb_int6:1; /* RW */ + unsigned long rdm_tlb_int7:1; /* RW */ + unsigned long rdm_tlb_int8:1; /* RW */ + unsigned long rdm_tlb_int9:1; /* RW */ + unsigned long rdm_tlb_int10:1; /* RW */ + unsigned long rdm_tlb_int11:1; /* RW */ + unsigned long rdm_tlb_int12:1; /* RW */ + unsigned long rdm_tlb_int13:1; /* RW */ + unsigned long rdm_tlb_int14:1; /* RW */ + unsigned long rdm_tlb_int15:1; /* RW */ + unsigned long rdm_tlb_int16:1; /* RW */ + unsigned long rdm_tlb_int17:1; /* RW */ + unsigned long rdm_tlb_int18:1; /* RW */ + unsigned long rdm_tlb_int19:1; /* RW */ + unsigned long rdm_tlb_int20:1; /* RW */ + unsigned long rdm_tlb_int21:1; /* RW */ + unsigned long rdm_tlb_int22:1; /* RW */ + unsigned long rdm_tlb_int23:1; /* RW */ + unsigned long rsvd_38_63:26; + } s5; + + /* UV4 unique struct */ + struct uv4h_event_occurred1_s { + unsigned long profile_int:1; /* RW */ + unsigned long bau_data:1; /* RW */ + unsigned long proc_general:1; /* RW */ + unsigned long gr0_tlb_int0:1; /* RW */ + unsigned long gr0_tlb_int1:1; /* RW */ + unsigned long gr0_tlb_int2:1; /* RW */ + unsigned long gr0_tlb_int3:1; /* RW */ + unsigned long gr0_tlb_int4:1; /* RW */ + unsigned long gr0_tlb_int5:1; /* RW */ + unsigned long gr0_tlb_int6:1; /* RW */ + unsigned long gr0_tlb_int7:1; /* RW */ + unsigned long gr0_tlb_int8:1; /* RW */ + unsigned long gr0_tlb_int9:1; /* RW */ + unsigned long gr0_tlb_int10:1; /* RW */ + unsigned long gr0_tlb_int11:1; /* RW */ + unsigned long gr0_tlb_int12:1; /* RW */ + unsigned long gr0_tlb_int13:1; /* RW */ + unsigned long gr0_tlb_int14:1; /* RW */ + unsigned long gr0_tlb_int15:1; /* RW */ + unsigned long gr0_tlb_int16:1; /* RW */ + unsigned long gr0_tlb_int17:1; /* RW */ + unsigned long gr0_tlb_int18:1; /* RW */ + unsigned long gr0_tlb_int19:1; /* RW */ + unsigned long gr0_tlb_int20:1; /* RW */ + unsigned long gr0_tlb_int21:1; /* RW */ + unsigned long gr0_tlb_int22:1; /* RW */ + unsigned long gr0_tlb_int23:1; /* RW */ + unsigned long gr1_tlb_int0:1; /* RW */ + unsigned long gr1_tlb_int1:1; /* RW */ + unsigned long gr1_tlb_int2:1; /* RW */ + unsigned long gr1_tlb_int3:1; /* RW */ + unsigned long gr1_tlb_int4:1; /* RW */ + unsigned long gr1_tlb_int5:1; /* RW */ + unsigned long gr1_tlb_int6:1; /* RW */ + unsigned long gr1_tlb_int7:1; /* RW */ + unsigned long gr1_tlb_int8:1; /* RW */ + unsigned long gr1_tlb_int9:1; /* RW */ + unsigned long gr1_tlb_int10:1; /* RW */ + unsigned long gr1_tlb_int11:1; /* RW */ + unsigned long gr1_tlb_int12:1; /* RW */ + unsigned long gr1_tlb_int13:1; /* RW */ + unsigned long gr1_tlb_int14:1; /* RW */ + unsigned long gr1_tlb_int15:1; /* RW */ + unsigned long gr1_tlb_int16:1; /* RW */ + unsigned long gr1_tlb_int17:1; /* RW */ + unsigned long gr1_tlb_int18:1; /* RW */ + unsigned long gr1_tlb_int19:1; /* RW */ + unsigned long gr1_tlb_int20:1; /* RW */ + unsigned long gr1_tlb_int21:1; /* RW */ + unsigned long gr1_tlb_int22:1; /* RW */ + unsigned long gr1_tlb_int23:1; /* RW */ + unsigned long rsvd_51_63:13; + } s4; + + /* UV3 unique struct */ + struct uv3h_event_occurred1_s { + unsigned long bau_data:1; /* RW */ + unsigned long power_management_req:1; /* RW */ + unsigned long message_accelerator_int0:1; /* RW */ + unsigned long message_accelerator_int1:1; /* RW */ + unsigned long message_accelerator_int2:1; /* RW */ + unsigned long message_accelerator_int3:1; /* RW */ + unsigned long message_accelerator_int4:1; /* RW */ + unsigned long message_accelerator_int5:1; /* RW */ + unsigned long message_accelerator_int6:1; /* RW */ + unsigned long message_accelerator_int7:1; /* RW */ + unsigned long message_accelerator_int8:1; /* RW */ + unsigned long message_accelerator_int9:1; /* RW */ + unsigned long message_accelerator_int10:1; /* RW */ + unsigned long message_accelerator_int11:1; /* RW */ + unsigned long message_accelerator_int12:1; /* RW */ + unsigned long message_accelerator_int13:1; /* RW */ + unsigned long message_accelerator_int14:1; /* RW */ + unsigned long message_accelerator_int15:1; /* RW */ + unsigned long gr0_tlb_int0:1; /* RW */ + unsigned long gr0_tlb_int1:1; /* RW */ + unsigned long gr0_tlb_int2:1; /* RW */ + unsigned long gr0_tlb_int3:1; /* RW */ + unsigned long gr0_tlb_int4:1; /* RW */ + unsigned long gr0_tlb_int5:1; /* RW */ + unsigned long gr0_tlb_int6:1; /* RW */ + unsigned long gr0_tlb_int7:1; /* RW */ + unsigned long gr0_tlb_int8:1; /* RW */ + unsigned long gr0_tlb_int9:1; /* RW */ + unsigned long gr0_tlb_int10:1; /* RW */ + unsigned long gr0_tlb_int11:1; /* RW */ + unsigned long gr0_tlb_int12:1; /* RW */ + unsigned long gr0_tlb_int13:1; /* RW */ + unsigned long gr0_tlb_int14:1; /* RW */ + unsigned long gr0_tlb_int15:1; /* RW */ + unsigned long gr1_tlb_int0:1; /* RW */ + unsigned long gr1_tlb_int1:1; /* RW */ + unsigned long gr1_tlb_int2:1; /* RW */ + unsigned long gr1_tlb_int3:1; /* RW */ + unsigned long gr1_tlb_int4:1; /* RW */ + unsigned long gr1_tlb_int5:1; /* RW */ + unsigned long gr1_tlb_int6:1; /* RW */ + unsigned long gr1_tlb_int7:1; /* RW */ + unsigned long gr1_tlb_int8:1; /* RW */ + unsigned long gr1_tlb_int9:1; /* RW */ + unsigned long gr1_tlb_int10:1; /* RW */ + unsigned long gr1_tlb_int11:1; /* RW */ + unsigned long gr1_tlb_int12:1; /* RW */ + unsigned long gr1_tlb_int13:1; /* RW */ + unsigned long gr1_tlb_int14:1; /* RW */ + unsigned long gr1_tlb_int15:1; /* RW */ + unsigned long rtc_interval_int:1; /* RW */ + unsigned long bau_dashboard_int:1; /* RW */ + unsigned long rsvd_52_63:12; + } s3; + + /* UV2 unique struct */ + struct uv2h_event_occurred1_s { + unsigned long bau_data:1; /* RW */ + unsigned long power_management_req:1; /* RW */ + unsigned long message_accelerator_int0:1; /* RW */ + unsigned long message_accelerator_int1:1; /* RW */ + unsigned long message_accelerator_int2:1; /* RW */ + unsigned long message_accelerator_int3:1; /* RW */ + unsigned long message_accelerator_int4:1; /* RW */ + unsigned long message_accelerator_int5:1; /* RW */ + unsigned long message_accelerator_int6:1; /* RW */ + unsigned long message_accelerator_int7:1; /* RW */ + unsigned long message_accelerator_int8:1; /* RW */ + unsigned long message_accelerator_int9:1; /* RW */ + unsigned long message_accelerator_int10:1; /* RW */ + unsigned long message_accelerator_int11:1; /* RW */ + unsigned long message_accelerator_int12:1; /* RW */ + unsigned long message_accelerator_int13:1; /* RW */ + unsigned long message_accelerator_int14:1; /* RW */ + unsigned long message_accelerator_int15:1; /* RW */ + unsigned long gr0_tlb_int0:1; /* RW */ + unsigned long gr0_tlb_int1:1; /* RW */ + unsigned long gr0_tlb_int2:1; /* RW */ + unsigned long gr0_tlb_int3:1; /* RW */ + unsigned long gr0_tlb_int4:1; /* RW */ + unsigned long gr0_tlb_int5:1; /* RW */ + unsigned long gr0_tlb_int6:1; /* RW */ + unsigned long gr0_tlb_int7:1; /* RW */ + unsigned long gr0_tlb_int8:1; /* RW */ + unsigned long gr0_tlb_int9:1; /* RW */ + unsigned long gr0_tlb_int10:1; /* RW */ + unsigned long gr0_tlb_int11:1; /* RW */ + unsigned long gr0_tlb_int12:1; /* RW */ + unsigned long gr0_tlb_int13:1; /* RW */ + unsigned long gr0_tlb_int14:1; /* RW */ + unsigned long gr0_tlb_int15:1; /* RW */ + unsigned long gr1_tlb_int0:1; /* RW */ + unsigned long gr1_tlb_int1:1; /* RW */ + unsigned long gr1_tlb_int2:1; /* RW */ + unsigned long gr1_tlb_int3:1; /* RW */ + unsigned long gr1_tlb_int4:1; /* RW */ + unsigned long gr1_tlb_int5:1; /* RW */ + unsigned long gr1_tlb_int6:1; /* RW */ + unsigned long gr1_tlb_int7:1; /* RW */ + unsigned long gr1_tlb_int8:1; /* RW */ + unsigned long gr1_tlb_int9:1; /* RW */ + unsigned long gr1_tlb_int10:1; /* RW */ + unsigned long gr1_tlb_int11:1; /* RW */ + unsigned long gr1_tlb_int12:1; /* RW */ + unsigned long gr1_tlb_int13:1; /* RW */ + unsigned long gr1_tlb_int14:1; /* RW */ + unsigned long gr1_tlb_int15:1; /* RW */ + unsigned long rtc_interval_int:1; /* RW */ + unsigned long bau_dashboard_int:1; /* RW */ + unsigned long rsvd_52_63:12; + } s2; +}; + +/* ========================================================================= */ +/* UVH_EVENT_OCCURRED1_ALIAS */ +/* ========================================================================= */ +#define UVH_EVENT_OCCURRED1_ALIAS 0x70088UL + + +/* ========================================================================= */ +/* UVH_EVENT_OCCURRED2 */ +/* ========================================================================= */ +#define UVH_EVENT_OCCURRED2 0x70100UL + + + +/* UVYH common defines */ +#define UVYH_EVENT_OCCURRED2_RTC_INTERVAL_INT_SHFT 0 +#define UVYH_EVENT_OCCURRED2_RTC_INTERVAL_INT_MASK 0x0000000000000001UL +#define UVYH_EVENT_OCCURRED2_BAU_DASHBOARD_INT_SHFT 1 +#define UVYH_EVENT_OCCURRED2_BAU_DASHBOARD_INT_MASK 0x0000000000000002UL +#define UVYH_EVENT_OCCURRED2_RTC_0_SHFT 2 +#define UVYH_EVENT_OCCURRED2_RTC_0_MASK 0x0000000000000004UL +#define UVYH_EVENT_OCCURRED2_RTC_1_SHFT 3 +#define UVYH_EVENT_OCCURRED2_RTC_1_MASK 0x0000000000000008UL +#define UVYH_EVENT_OCCURRED2_RTC_2_SHFT 4 +#define UVYH_EVENT_OCCURRED2_RTC_2_MASK 0x0000000000000010UL +#define UVYH_EVENT_OCCURRED2_RTC_3_SHFT 5 +#define UVYH_EVENT_OCCURRED2_RTC_3_MASK 0x0000000000000020UL +#define UVYH_EVENT_OCCURRED2_RTC_4_SHFT 6 +#define UVYH_EVENT_OCCURRED2_RTC_4_MASK 0x0000000000000040UL +#define UVYH_EVENT_OCCURRED2_RTC_5_SHFT 7 +#define UVYH_EVENT_OCCURRED2_RTC_5_MASK 0x0000000000000080UL +#define UVYH_EVENT_OCCURRED2_RTC_6_SHFT 8 +#define UVYH_EVENT_OCCURRED2_RTC_6_MASK 0x0000000000000100UL +#define UVYH_EVENT_OCCURRED2_RTC_7_SHFT 9 +#define UVYH_EVENT_OCCURRED2_RTC_7_MASK 0x0000000000000200UL +#define UVYH_EVENT_OCCURRED2_RTC_8_SHFT 10 +#define UVYH_EVENT_OCCURRED2_RTC_8_MASK 0x0000000000000400UL +#define UVYH_EVENT_OCCURRED2_RTC_9_SHFT 11 +#define UVYH_EVENT_OCCURRED2_RTC_9_MASK 0x0000000000000800UL +#define UVYH_EVENT_OCCURRED2_RTC_10_SHFT 12 +#define UVYH_EVENT_OCCURRED2_RTC_10_MASK 0x0000000000001000UL +#define UVYH_EVENT_OCCURRED2_RTC_11_SHFT 13 +#define UVYH_EVENT_OCCURRED2_RTC_11_MASK 0x0000000000002000UL +#define UVYH_EVENT_OCCURRED2_RTC_12_SHFT 14 +#define UVYH_EVENT_OCCURRED2_RTC_12_MASK 0x0000000000004000UL +#define UVYH_EVENT_OCCURRED2_RTC_13_SHFT 15 +#define UVYH_EVENT_OCCURRED2_RTC_13_MASK 0x0000000000008000UL +#define UVYH_EVENT_OCCURRED2_RTC_14_SHFT 16 +#define UVYH_EVENT_OCCURRED2_RTC_14_MASK 0x0000000000010000UL +#define UVYH_EVENT_OCCURRED2_RTC_15_SHFT 17 +#define UVYH_EVENT_OCCURRED2_RTC_15_MASK 0x0000000000020000UL +#define UVYH_EVENT_OCCURRED2_RTC_16_SHFT 18 +#define UVYH_EVENT_OCCURRED2_RTC_16_MASK 0x0000000000040000UL +#define UVYH_EVENT_OCCURRED2_RTC_17_SHFT 19 +#define UVYH_EVENT_OCCURRED2_RTC_17_MASK 0x0000000000080000UL +#define UVYH_EVENT_OCCURRED2_RTC_18_SHFT 20 +#define UVYH_EVENT_OCCURRED2_RTC_18_MASK 0x0000000000100000UL +#define UVYH_EVENT_OCCURRED2_RTC_19_SHFT 21 +#define UVYH_EVENT_OCCURRED2_RTC_19_MASK 0x0000000000200000UL +#define UVYH_EVENT_OCCURRED2_RTC_20_SHFT 22 +#define UVYH_EVENT_OCCURRED2_RTC_20_MASK 0x0000000000400000UL +#define UVYH_EVENT_OCCURRED2_RTC_21_SHFT 23 +#define UVYH_EVENT_OCCURRED2_RTC_21_MASK 0x0000000000800000UL +#define UVYH_EVENT_OCCURRED2_RTC_22_SHFT 24 +#define UVYH_EVENT_OCCURRED2_RTC_22_MASK 0x0000000001000000UL +#define UVYH_EVENT_OCCURRED2_RTC_23_SHFT 25 +#define UVYH_EVENT_OCCURRED2_RTC_23_MASK 0x0000000002000000UL +#define UVYH_EVENT_OCCURRED2_RTC_24_SHFT 26 +#define UVYH_EVENT_OCCURRED2_RTC_24_MASK 0x0000000004000000UL +#define UVYH_EVENT_OCCURRED2_RTC_25_SHFT 27 +#define UVYH_EVENT_OCCURRED2_RTC_25_MASK 0x0000000008000000UL +#define UVYH_EVENT_OCCURRED2_RTC_26_SHFT 28 +#define UVYH_EVENT_OCCURRED2_RTC_26_MASK 0x0000000010000000UL +#define UVYH_EVENT_OCCURRED2_RTC_27_SHFT 29 +#define UVYH_EVENT_OCCURRED2_RTC_27_MASK 0x0000000020000000UL +#define UVYH_EVENT_OCCURRED2_RTC_28_SHFT 30 +#define UVYH_EVENT_OCCURRED2_RTC_28_MASK 0x0000000040000000UL +#define UVYH_EVENT_OCCURRED2_RTC_29_SHFT 31 +#define UVYH_EVENT_OCCURRED2_RTC_29_MASK 0x0000000080000000UL +#define UVYH_EVENT_OCCURRED2_RTC_30_SHFT 32 +#define UVYH_EVENT_OCCURRED2_RTC_30_MASK 0x0000000100000000UL +#define UVYH_EVENT_OCCURRED2_RTC_31_SHFT 33 +#define UVYH_EVENT_OCCURRED2_RTC_31_MASK 0x0000000200000000UL + +/* UV4 unique defines */ +#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT0_SHFT 0 +#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT0_MASK 0x0000000000000001UL +#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT1_SHFT 1 +#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT1_MASK 0x0000000000000002UL +#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT2_SHFT 2 +#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT2_MASK 0x0000000000000004UL +#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT3_SHFT 3 +#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT3_MASK 0x0000000000000008UL +#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT4_SHFT 4 +#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT4_MASK 0x0000000000000010UL +#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT5_SHFT 5 +#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT5_MASK 0x0000000000000020UL +#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT6_SHFT 6 +#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT6_MASK 0x0000000000000040UL +#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT7_SHFT 7 +#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT7_MASK 0x0000000000000080UL +#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT8_SHFT 8 +#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT8_MASK 0x0000000000000100UL +#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT9_SHFT 9 +#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT9_MASK 0x0000000000000200UL +#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT10_SHFT 10 +#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT10_MASK 0x0000000000000400UL +#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT11_SHFT 11 +#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT11_MASK 0x0000000000000800UL +#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT12_SHFT 12 +#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT12_MASK 0x0000000000001000UL +#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT13_SHFT 13 +#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT13_MASK 0x0000000000002000UL +#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT14_SHFT 14 +#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT14_MASK 0x0000000000004000UL +#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT15_SHFT 15 +#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT15_MASK 0x0000000000008000UL +#define UV4H_EVENT_OCCURRED2_RTC_INTERVAL_INT_SHFT 16 +#define UV4H_EVENT_OCCURRED2_RTC_INTERVAL_INT_MASK 0x0000000000010000UL +#define UV4H_EVENT_OCCURRED2_BAU_DASHBOARD_INT_SHFT 17 +#define UV4H_EVENT_OCCURRED2_BAU_DASHBOARD_INT_MASK 0x0000000000020000UL +#define UV4H_EVENT_OCCURRED2_RTC_0_SHFT 18 +#define UV4H_EVENT_OCCURRED2_RTC_0_MASK 0x0000000000040000UL +#define UV4H_EVENT_OCCURRED2_RTC_1_SHFT 19 +#define UV4H_EVENT_OCCURRED2_RTC_1_MASK 0x0000000000080000UL +#define UV4H_EVENT_OCCURRED2_RTC_2_SHFT 20 +#define UV4H_EVENT_OCCURRED2_RTC_2_MASK 0x0000000000100000UL +#define UV4H_EVENT_OCCURRED2_RTC_3_SHFT 21 +#define UV4H_EVENT_OCCURRED2_RTC_3_MASK 0x0000000000200000UL +#define UV4H_EVENT_OCCURRED2_RTC_4_SHFT 22 +#define UV4H_EVENT_OCCURRED2_RTC_4_MASK 0x0000000000400000UL +#define UV4H_EVENT_OCCURRED2_RTC_5_SHFT 23 +#define UV4H_EVENT_OCCURRED2_RTC_5_MASK 0x0000000000800000UL +#define UV4H_EVENT_OCCURRED2_RTC_6_SHFT 24 +#define UV4H_EVENT_OCCURRED2_RTC_6_MASK 0x0000000001000000UL +#define UV4H_EVENT_OCCURRED2_RTC_7_SHFT 25 +#define UV4H_EVENT_OCCURRED2_RTC_7_MASK 0x0000000002000000UL +#define UV4H_EVENT_OCCURRED2_RTC_8_SHFT 26 +#define UV4H_EVENT_OCCURRED2_RTC_8_MASK 0x0000000004000000UL +#define UV4H_EVENT_OCCURRED2_RTC_9_SHFT 27 +#define UV4H_EVENT_OCCURRED2_RTC_9_MASK 0x0000000008000000UL +#define UV4H_EVENT_OCCURRED2_RTC_10_SHFT 28 +#define UV4H_EVENT_OCCURRED2_RTC_10_MASK 0x0000000010000000UL +#define UV4H_EVENT_OCCURRED2_RTC_11_SHFT 29 +#define UV4H_EVENT_OCCURRED2_RTC_11_MASK 0x0000000020000000UL +#define UV4H_EVENT_OCCURRED2_RTC_12_SHFT 30 +#define UV4H_EVENT_OCCURRED2_RTC_12_MASK 0x0000000040000000UL +#define UV4H_EVENT_OCCURRED2_RTC_13_SHFT 31 +#define UV4H_EVENT_OCCURRED2_RTC_13_MASK 0x0000000080000000UL +#define UV4H_EVENT_OCCURRED2_RTC_14_SHFT 32 +#define UV4H_EVENT_OCCURRED2_RTC_14_MASK 0x0000000100000000UL +#define UV4H_EVENT_OCCURRED2_RTC_15_SHFT 33 +#define UV4H_EVENT_OCCURRED2_RTC_15_MASK 0x0000000200000000UL +#define UV4H_EVENT_OCCURRED2_RTC_16_SHFT 34 +#define UV4H_EVENT_OCCURRED2_RTC_16_MASK 0x0000000400000000UL +#define UV4H_EVENT_OCCURRED2_RTC_17_SHFT 35 +#define UV4H_EVENT_OCCURRED2_RTC_17_MASK 0x0000000800000000UL +#define UV4H_EVENT_OCCURRED2_RTC_18_SHFT 36 +#define UV4H_EVENT_OCCURRED2_RTC_18_MASK 0x0000001000000000UL +#define UV4H_EVENT_OCCURRED2_RTC_19_SHFT 37 +#define UV4H_EVENT_OCCURRED2_RTC_19_MASK 0x0000002000000000UL +#define UV4H_EVENT_OCCURRED2_RTC_20_SHFT 38 +#define UV4H_EVENT_OCCURRED2_RTC_20_MASK 0x0000004000000000UL +#define UV4H_EVENT_OCCURRED2_RTC_21_SHFT 39 +#define UV4H_EVENT_OCCURRED2_RTC_21_MASK 0x0000008000000000UL +#define UV4H_EVENT_OCCURRED2_RTC_22_SHFT 40 +#define UV4H_EVENT_OCCURRED2_RTC_22_MASK 0x0000010000000000UL +#define UV4H_EVENT_OCCURRED2_RTC_23_SHFT 41 +#define UV4H_EVENT_OCCURRED2_RTC_23_MASK 0x0000020000000000UL +#define UV4H_EVENT_OCCURRED2_RTC_24_SHFT 42 +#define UV4H_EVENT_OCCURRED2_RTC_24_MASK 0x0000040000000000UL +#define UV4H_EVENT_OCCURRED2_RTC_25_SHFT 43 +#define UV4H_EVENT_OCCURRED2_RTC_25_MASK 0x0000080000000000UL +#define UV4H_EVENT_OCCURRED2_RTC_26_SHFT 44 +#define UV4H_EVENT_OCCURRED2_RTC_26_MASK 0x0000100000000000UL +#define UV4H_EVENT_OCCURRED2_RTC_27_SHFT 45 +#define UV4H_EVENT_OCCURRED2_RTC_27_MASK 0x0000200000000000UL +#define UV4H_EVENT_OCCURRED2_RTC_28_SHFT 46 +#define UV4H_EVENT_OCCURRED2_RTC_28_MASK 0x0000400000000000UL +#define UV4H_EVENT_OCCURRED2_RTC_29_SHFT 47 +#define UV4H_EVENT_OCCURRED2_RTC_29_MASK 0x0000800000000000UL +#define UV4H_EVENT_OCCURRED2_RTC_30_SHFT 48 +#define UV4H_EVENT_OCCURRED2_RTC_30_MASK 0x0001000000000000UL +#define UV4H_EVENT_OCCURRED2_RTC_31_SHFT 49 +#define UV4H_EVENT_OCCURRED2_RTC_31_MASK 0x0002000000000000UL + +/* UV3 unique defines */ +#define UV3H_EVENT_OCCURRED2_RTC_0_SHFT 0 +#define UV3H_EVENT_OCCURRED2_RTC_0_MASK 0x0000000000000001UL +#define UV3H_EVENT_OCCURRED2_RTC_1_SHFT 1 +#define UV3H_EVENT_OCCURRED2_RTC_1_MASK 0x0000000000000002UL +#define UV3H_EVENT_OCCURRED2_RTC_2_SHFT 2 +#define UV3H_EVENT_OCCURRED2_RTC_2_MASK 0x0000000000000004UL +#define UV3H_EVENT_OCCURRED2_RTC_3_SHFT 3 +#define UV3H_EVENT_OCCURRED2_RTC_3_MASK 0x0000000000000008UL +#define UV3H_EVENT_OCCURRED2_RTC_4_SHFT 4 +#define UV3H_EVENT_OCCURRED2_RTC_4_MASK 0x0000000000000010UL +#define UV3H_EVENT_OCCURRED2_RTC_5_SHFT 5 +#define UV3H_EVENT_OCCURRED2_RTC_5_MASK 0x0000000000000020UL +#define UV3H_EVENT_OCCURRED2_RTC_6_SHFT 6 +#define UV3H_EVENT_OCCURRED2_RTC_6_MASK 0x0000000000000040UL +#define UV3H_EVENT_OCCURRED2_RTC_7_SHFT 7 +#define UV3H_EVENT_OCCURRED2_RTC_7_MASK 0x0000000000000080UL +#define UV3H_EVENT_OCCURRED2_RTC_8_SHFT 8 +#define UV3H_EVENT_OCCURRED2_RTC_8_MASK 0x0000000000000100UL +#define UV3H_EVENT_OCCURRED2_RTC_9_SHFT 9 +#define UV3H_EVENT_OCCURRED2_RTC_9_MASK 0x0000000000000200UL +#define UV3H_EVENT_OCCURRED2_RTC_10_SHFT 10 +#define UV3H_EVENT_OCCURRED2_RTC_10_MASK 0x0000000000000400UL +#define UV3H_EVENT_OCCURRED2_RTC_11_SHFT 11 +#define UV3H_EVENT_OCCURRED2_RTC_11_MASK 0x0000000000000800UL +#define UV3H_EVENT_OCCURRED2_RTC_12_SHFT 12 +#define UV3H_EVENT_OCCURRED2_RTC_12_MASK 0x0000000000001000UL +#define UV3H_EVENT_OCCURRED2_RTC_13_SHFT 13 +#define UV3H_EVENT_OCCURRED2_RTC_13_MASK 0x0000000000002000UL +#define UV3H_EVENT_OCCURRED2_RTC_14_SHFT 14 +#define UV3H_EVENT_OCCURRED2_RTC_14_MASK 0x0000000000004000UL +#define UV3H_EVENT_OCCURRED2_RTC_15_SHFT 15 +#define UV3H_EVENT_OCCURRED2_RTC_15_MASK 0x0000000000008000UL +#define UV3H_EVENT_OCCURRED2_RTC_16_SHFT 16 +#define UV3H_EVENT_OCCURRED2_RTC_16_MASK 0x0000000000010000UL +#define UV3H_EVENT_OCCURRED2_RTC_17_SHFT 17 +#define UV3H_EVENT_OCCURRED2_RTC_17_MASK 0x0000000000020000UL +#define UV3H_EVENT_OCCURRED2_RTC_18_SHFT 18 +#define UV3H_EVENT_OCCURRED2_RTC_18_MASK 0x0000000000040000UL +#define UV3H_EVENT_OCCURRED2_RTC_19_SHFT 19 +#define UV3H_EVENT_OCCURRED2_RTC_19_MASK 0x0000000000080000UL +#define UV3H_EVENT_OCCURRED2_RTC_20_SHFT 20 +#define UV3H_EVENT_OCCURRED2_RTC_20_MASK 0x0000000000100000UL +#define UV3H_EVENT_OCCURRED2_RTC_21_SHFT 21 +#define UV3H_EVENT_OCCURRED2_RTC_21_MASK 0x0000000000200000UL +#define UV3H_EVENT_OCCURRED2_RTC_22_SHFT 22 +#define UV3H_EVENT_OCCURRED2_RTC_22_MASK 0x0000000000400000UL +#define UV3H_EVENT_OCCURRED2_RTC_23_SHFT 23 +#define UV3H_EVENT_OCCURRED2_RTC_23_MASK 0x0000000000800000UL +#define UV3H_EVENT_OCCURRED2_RTC_24_SHFT 24 +#define UV3H_EVENT_OCCURRED2_RTC_24_MASK 0x0000000001000000UL +#define UV3H_EVENT_OCCURRED2_RTC_25_SHFT 25 +#define UV3H_EVENT_OCCURRED2_RTC_25_MASK 0x0000000002000000UL +#define UV3H_EVENT_OCCURRED2_RTC_26_SHFT 26 +#define UV3H_EVENT_OCCURRED2_RTC_26_MASK 0x0000000004000000UL +#define UV3H_EVENT_OCCURRED2_RTC_27_SHFT 27 +#define UV3H_EVENT_OCCURRED2_RTC_27_MASK 0x0000000008000000UL +#define UV3H_EVENT_OCCURRED2_RTC_28_SHFT 28 +#define UV3H_EVENT_OCCURRED2_RTC_28_MASK 0x0000000010000000UL +#define UV3H_EVENT_OCCURRED2_RTC_29_SHFT 29 +#define UV3H_EVENT_OCCURRED2_RTC_29_MASK 0x0000000020000000UL +#define UV3H_EVENT_OCCURRED2_RTC_30_SHFT 30 +#define UV3H_EVENT_OCCURRED2_RTC_30_MASK 0x0000000040000000UL +#define UV3H_EVENT_OCCURRED2_RTC_31_SHFT 31 +#define UV3H_EVENT_OCCURRED2_RTC_31_MASK 0x0000000080000000UL + +/* UV2 unique defines */ +#define UV2H_EVENT_OCCURRED2_RTC_0_SHFT 0 +#define UV2H_EVENT_OCCURRED2_RTC_0_MASK 0x0000000000000001UL +#define UV2H_EVENT_OCCURRED2_RTC_1_SHFT 1 +#define UV2H_EVENT_OCCURRED2_RTC_1_MASK 0x0000000000000002UL +#define UV2H_EVENT_OCCURRED2_RTC_2_SHFT 2 +#define UV2H_EVENT_OCCURRED2_RTC_2_MASK 0x0000000000000004UL +#define UV2H_EVENT_OCCURRED2_RTC_3_SHFT 3 +#define UV2H_EVENT_OCCURRED2_RTC_3_MASK 0x0000000000000008UL +#define UV2H_EVENT_OCCURRED2_RTC_4_SHFT 4 +#define UV2H_EVENT_OCCURRED2_RTC_4_MASK 0x0000000000000010UL +#define UV2H_EVENT_OCCURRED2_RTC_5_SHFT 5 +#define UV2H_EVENT_OCCURRED2_RTC_5_MASK 0x0000000000000020UL +#define UV2H_EVENT_OCCURRED2_RTC_6_SHFT 6 +#define UV2H_EVENT_OCCURRED2_RTC_6_MASK 0x0000000000000040UL +#define UV2H_EVENT_OCCURRED2_RTC_7_SHFT 7 +#define UV2H_EVENT_OCCURRED2_RTC_7_MASK 0x0000000000000080UL +#define UV2H_EVENT_OCCURRED2_RTC_8_SHFT 8 +#define UV2H_EVENT_OCCURRED2_RTC_8_MASK 0x0000000000000100UL +#define UV2H_EVENT_OCCURRED2_RTC_9_SHFT 9 +#define UV2H_EVENT_OCCURRED2_RTC_9_MASK 0x0000000000000200UL +#define UV2H_EVENT_OCCURRED2_RTC_10_SHFT 10 +#define UV2H_EVENT_OCCURRED2_RTC_10_MASK 0x0000000000000400UL +#define UV2H_EVENT_OCCURRED2_RTC_11_SHFT 11 +#define UV2H_EVENT_OCCURRED2_RTC_11_MASK 0x0000000000000800UL +#define UV2H_EVENT_OCCURRED2_RTC_12_SHFT 12 +#define UV2H_EVENT_OCCURRED2_RTC_12_MASK 0x0000000000001000UL +#define UV2H_EVENT_OCCURRED2_RTC_13_SHFT 13 +#define UV2H_EVENT_OCCURRED2_RTC_13_MASK 0x0000000000002000UL +#define UV2H_EVENT_OCCURRED2_RTC_14_SHFT 14 +#define UV2H_EVENT_OCCURRED2_RTC_14_MASK 0x0000000000004000UL +#define UV2H_EVENT_OCCURRED2_RTC_15_SHFT 15 +#define UV2H_EVENT_OCCURRED2_RTC_15_MASK 0x0000000000008000UL +#define UV2H_EVENT_OCCURRED2_RTC_16_SHFT 16 +#define UV2H_EVENT_OCCURRED2_RTC_16_MASK 0x0000000000010000UL +#define UV2H_EVENT_OCCURRED2_RTC_17_SHFT 17 +#define UV2H_EVENT_OCCURRED2_RTC_17_MASK 0x0000000000020000UL +#define UV2H_EVENT_OCCURRED2_RTC_18_SHFT 18 +#define UV2H_EVENT_OCCURRED2_RTC_18_MASK 0x0000000000040000UL +#define UV2H_EVENT_OCCURRED2_RTC_19_SHFT 19 +#define UV2H_EVENT_OCCURRED2_RTC_19_MASK 0x0000000000080000UL +#define UV2H_EVENT_OCCURRED2_RTC_20_SHFT 20 +#define UV2H_EVENT_OCCURRED2_RTC_20_MASK 0x0000000000100000UL +#define UV2H_EVENT_OCCURRED2_RTC_21_SHFT 21 +#define UV2H_EVENT_OCCURRED2_RTC_21_MASK 0x0000000000200000UL +#define UV2H_EVENT_OCCURRED2_RTC_22_SHFT 22 +#define UV2H_EVENT_OCCURRED2_RTC_22_MASK 0x0000000000400000UL +#define UV2H_EVENT_OCCURRED2_RTC_23_SHFT 23 +#define UV2H_EVENT_OCCURRED2_RTC_23_MASK 0x0000000000800000UL +#define UV2H_EVENT_OCCURRED2_RTC_24_SHFT 24 +#define UV2H_EVENT_OCCURRED2_RTC_24_MASK 0x0000000001000000UL +#define UV2H_EVENT_OCCURRED2_RTC_25_SHFT 25 +#define UV2H_EVENT_OCCURRED2_RTC_25_MASK 0x0000000002000000UL +#define UV2H_EVENT_OCCURRED2_RTC_26_SHFT 26 +#define UV2H_EVENT_OCCURRED2_RTC_26_MASK 0x0000000004000000UL +#define UV2H_EVENT_OCCURRED2_RTC_27_SHFT 27 +#define UV2H_EVENT_OCCURRED2_RTC_27_MASK 0x0000000008000000UL +#define UV2H_EVENT_OCCURRED2_RTC_28_SHFT 28 +#define UV2H_EVENT_OCCURRED2_RTC_28_MASK 0x0000000010000000UL +#define UV2H_EVENT_OCCURRED2_RTC_29_SHFT 29 +#define UV2H_EVENT_OCCURRED2_RTC_29_MASK 0x0000000020000000UL +#define UV2H_EVENT_OCCURRED2_RTC_30_SHFT 30 +#define UV2H_EVENT_OCCURRED2_RTC_30_MASK 0x0000000040000000UL +#define UV2H_EVENT_OCCURRED2_RTC_31_SHFT 31 +#define UV2H_EVENT_OCCURRED2_RTC_31_MASK 0x0000000080000000UL + +#define UVH_EVENT_OCCURRED2_RTC_1_MASK ( \ + is_uv(UV5) ? 0x0000000000000008UL : \ + is_uv(UV4) ? 0x0000000000080000UL : \ + is_uv(UV3) ? 0x0000000000000002UL : \ + is_uv(UV2) ? 0x0000000000000002UL : \ + 0) +#define UVH_EVENT_OCCURRED2_RTC_1_SHFT ( \ + is_uv(UV5) ? 3 : \ + is_uv(UV4) ? 19 : \ + is_uv(UV3) ? 1 : \ + is_uv(UV2) ? 1 : \ + -1) + +union uvyh_event_occurred2_u { + unsigned long v; + + /* UVYH common struct */ + struct uvyh_event_occurred2_s { + unsigned long rtc_interval_int:1; /* RW */ + unsigned long bau_dashboard_int:1; /* RW */ + unsigned long rtc_0:1; /* RW */ + unsigned long rtc_1:1; /* RW */ + unsigned long rtc_2:1; /* RW */ + unsigned long rtc_3:1; /* RW */ + unsigned long rtc_4:1; /* RW */ + unsigned long rtc_5:1; /* RW */ + unsigned long rtc_6:1; /* RW */ + unsigned long rtc_7:1; /* RW */ + unsigned long rtc_8:1; /* RW */ + unsigned long rtc_9:1; /* RW */ + unsigned long rtc_10:1; /* RW */ + unsigned long rtc_11:1; /* RW */ + unsigned long rtc_12:1; /* RW */ + unsigned long rtc_13:1; /* RW */ + unsigned long rtc_14:1; /* RW */ + unsigned long rtc_15:1; /* RW */ + unsigned long rtc_16:1; /* RW */ + unsigned long rtc_17:1; /* RW */ + unsigned long rtc_18:1; /* RW */ + unsigned long rtc_19:1; /* RW */ + unsigned long rtc_20:1; /* RW */ + unsigned long rtc_21:1; /* RW */ + unsigned long rtc_22:1; /* RW */ + unsigned long rtc_23:1; /* RW */ + unsigned long rtc_24:1; /* RW */ + unsigned long rtc_25:1; /* RW */ + unsigned long rtc_26:1; /* RW */ + unsigned long rtc_27:1; /* RW */ + unsigned long rtc_28:1; /* RW */ + unsigned long rtc_29:1; /* RW */ + unsigned long rtc_30:1; /* RW */ + unsigned long rtc_31:1; /* RW */ + unsigned long rsvd_34_63:30; + } sy; + + /* UV5 unique struct */ + struct uv5h_event_occurred2_s { + unsigned long rtc_interval_int:1; /* RW */ + unsigned long bau_dashboard_int:1; /* RW */ + unsigned long rtc_0:1; /* RW */ + unsigned long rtc_1:1; /* RW */ + unsigned long rtc_2:1; /* RW */ + unsigned long rtc_3:1; /* RW */ + unsigned long rtc_4:1; /* RW */ + unsigned long rtc_5:1; /* RW */ + unsigned long rtc_6:1; /* RW */ + unsigned long rtc_7:1; /* RW */ + unsigned long rtc_8:1; /* RW */ + unsigned long rtc_9:1; /* RW */ + unsigned long rtc_10:1; /* RW */ + unsigned long rtc_11:1; /* RW */ + unsigned long rtc_12:1; /* RW */ + unsigned long rtc_13:1; /* RW */ + unsigned long rtc_14:1; /* RW */ + unsigned long rtc_15:1; /* RW */ + unsigned long rtc_16:1; /* RW */ + unsigned long rtc_17:1; /* RW */ + unsigned long rtc_18:1; /* RW */ + unsigned long rtc_19:1; /* RW */ + unsigned long rtc_20:1; /* RW */ + unsigned long rtc_21:1; /* RW */ + unsigned long rtc_22:1; /* RW */ + unsigned long rtc_23:1; /* RW */ + unsigned long rtc_24:1; /* RW */ + unsigned long rtc_25:1; /* RW */ + unsigned long rtc_26:1; /* RW */ + unsigned long rtc_27:1; /* RW */ + unsigned long rtc_28:1; /* RW */ + unsigned long rtc_29:1; /* RW */ + unsigned long rtc_30:1; /* RW */ + unsigned long rtc_31:1; /* RW */ + unsigned long rsvd_34_63:30; + } s5; + + /* UV4 unique struct */ + struct uv4h_event_occurred2_s { + unsigned long message_accelerator_int0:1; /* RW */ + unsigned long message_accelerator_int1:1; /* RW */ + unsigned long message_accelerator_int2:1; /* RW */ + unsigned long message_accelerator_int3:1; /* RW */ + unsigned long message_accelerator_int4:1; /* RW */ + unsigned long message_accelerator_int5:1; /* RW */ + unsigned long message_accelerator_int6:1; /* RW */ + unsigned long message_accelerator_int7:1; /* RW */ + unsigned long message_accelerator_int8:1; /* RW */ + unsigned long message_accelerator_int9:1; /* RW */ + unsigned long message_accelerator_int10:1; /* RW */ + unsigned long message_accelerator_int11:1; /* RW */ + unsigned long message_accelerator_int12:1; /* RW */ + unsigned long message_accelerator_int13:1; /* RW */ + unsigned long message_accelerator_int14:1; /* RW */ + unsigned long message_accelerator_int15:1; /* RW */ + unsigned long rtc_interval_int:1; /* RW */ + unsigned long bau_dashboard_int:1; /* RW */ + unsigned long rtc_0:1; /* RW */ + unsigned long rtc_1:1; /* RW */ + unsigned long rtc_2:1; /* RW */ + unsigned long rtc_3:1; /* RW */ + unsigned long rtc_4:1; /* RW */ + unsigned long rtc_5:1; /* RW */ + unsigned long rtc_6:1; /* RW */ + unsigned long rtc_7:1; /* RW */ + unsigned long rtc_8:1; /* RW */ + unsigned long rtc_9:1; /* RW */ + unsigned long rtc_10:1; /* RW */ + unsigned long rtc_11:1; /* RW */ + unsigned long rtc_12:1; /* RW */ + unsigned long rtc_13:1; /* RW */ + unsigned long rtc_14:1; /* RW */ + unsigned long rtc_15:1; /* RW */ + unsigned long rtc_16:1; /* RW */ + unsigned long rtc_17:1; /* RW */ + unsigned long rtc_18:1; /* RW */ + unsigned long rtc_19:1; /* RW */ + unsigned long rtc_20:1; /* RW */ + unsigned long rtc_21:1; /* RW */ + unsigned long rtc_22:1; /* RW */ + unsigned long rtc_23:1; /* RW */ + unsigned long rtc_24:1; /* RW */ + unsigned long rtc_25:1; /* RW */ + unsigned long rtc_26:1; /* RW */ + unsigned long rtc_27:1; /* RW */ + unsigned long rtc_28:1; /* RW */ + unsigned long rtc_29:1; /* RW */ + unsigned long rtc_30:1; /* RW */ + unsigned long rtc_31:1; /* RW */ + unsigned long rsvd_50_63:14; + } s4; + + /* UV3 unique struct */ + struct uv3h_event_occurred2_s { + unsigned long rtc_0:1; /* RW */ + unsigned long rtc_1:1; /* RW */ + unsigned long rtc_2:1; /* RW */ + unsigned long rtc_3:1; /* RW */ + unsigned long rtc_4:1; /* RW */ + unsigned long rtc_5:1; /* RW */ + unsigned long rtc_6:1; /* RW */ + unsigned long rtc_7:1; /* RW */ + unsigned long rtc_8:1; /* RW */ + unsigned long rtc_9:1; /* RW */ + unsigned long rtc_10:1; /* RW */ + unsigned long rtc_11:1; /* RW */ + unsigned long rtc_12:1; /* RW */ + unsigned long rtc_13:1; /* RW */ + unsigned long rtc_14:1; /* RW */ + unsigned long rtc_15:1; /* RW */ + unsigned long rtc_16:1; /* RW */ + unsigned long rtc_17:1; /* RW */ + unsigned long rtc_18:1; /* RW */ + unsigned long rtc_19:1; /* RW */ + unsigned long rtc_20:1; /* RW */ + unsigned long rtc_21:1; /* RW */ + unsigned long rtc_22:1; /* RW */ + unsigned long rtc_23:1; /* RW */ + unsigned long rtc_24:1; /* RW */ + unsigned long rtc_25:1; /* RW */ + unsigned long rtc_26:1; /* RW */ + unsigned long rtc_27:1; /* RW */ + unsigned long rtc_28:1; /* RW */ + unsigned long rtc_29:1; /* RW */ + unsigned long rtc_30:1; /* RW */ + unsigned long rtc_31:1; /* RW */ + unsigned long rsvd_32_63:32; + } s3; + + /* UV2 unique struct */ + struct uv2h_event_occurred2_s { + unsigned long rtc_0:1; /* RW */ + unsigned long rtc_1:1; /* RW */ + unsigned long rtc_2:1; /* RW */ + unsigned long rtc_3:1; /* RW */ + unsigned long rtc_4:1; /* RW */ + unsigned long rtc_5:1; /* RW */ + unsigned long rtc_6:1; /* RW */ + unsigned long rtc_7:1; /* RW */ + unsigned long rtc_8:1; /* RW */ + unsigned long rtc_9:1; /* RW */ + unsigned long rtc_10:1; /* RW */ + unsigned long rtc_11:1; /* RW */ + unsigned long rtc_12:1; /* RW */ + unsigned long rtc_13:1; /* RW */ + unsigned long rtc_14:1; /* RW */ + unsigned long rtc_15:1; /* RW */ + unsigned long rtc_16:1; /* RW */ + unsigned long rtc_17:1; /* RW */ + unsigned long rtc_18:1; /* RW */ + unsigned long rtc_19:1; /* RW */ + unsigned long rtc_20:1; /* RW */ + unsigned long rtc_21:1; /* RW */ + unsigned long rtc_22:1; /* RW */ + unsigned long rtc_23:1; /* RW */ + unsigned long rtc_24:1; /* RW */ + unsigned long rtc_25:1; /* RW */ + unsigned long rtc_26:1; /* RW */ + unsigned long rtc_27:1; /* RW */ + unsigned long rtc_28:1; /* RW */ + unsigned long rtc_29:1; /* RW */ + unsigned long rtc_30:1; /* RW */ + unsigned long rtc_31:1; /* RW */ + unsigned long rsvd_32_63:32; + } s2; +}; + +/* ========================================================================= */ +/* UVH_EVENT_OCCURRED2_ALIAS */ +/* ========================================================================= */ +#define UVH_EVENT_OCCURRED2_ALIAS 0x70108UL /* ========================================================================= */ @@ -585,51 +2135,148 @@ union uvh_event_occurred0_u { /* ========================================================================= */ #define UVH_EXTIO_INT0_BROADCAST 0x61448UL -#define UV2H_EXTIO_INT0_BROADCAST_32 0x3f0 -#define UV3H_EXTIO_INT0_BROADCAST_32 0x3f0 -#define UV4H_EXTIO_INT0_BROADCAST_32 0x310 -#define UVH_EXTIO_INT0_BROADCAST_32 ( \ - is_uv2_hub() ? UV2H_EXTIO_INT0_BROADCAST_32 : \ - is_uv3_hub() ? UV3H_EXTIO_INT0_BROADCAST_32 : \ - /*is_uv4_hub*/ UV4H_EXTIO_INT0_BROADCAST_32) - +/* UVH common defines*/ #define UVH_EXTIO_INT0_BROADCAST_ENABLE_SHFT 0 #define UVH_EXTIO_INT0_BROADCAST_ENABLE_MASK 0x0000000000000001UL union uvh_extio_int0_broadcast_u { unsigned long v; + + /* UVH common struct */ struct uvh_extio_int0_broadcast_s { unsigned long enable:1; /* RW */ unsigned long rsvd_1_63:63; } s; + + /* UV5 unique struct */ + struct uv5h_extio_int0_broadcast_s { + unsigned long enable:1; /* RW */ + unsigned long rsvd_1_63:63; + } s5; + + /* UV4 unique struct */ + struct uv4h_extio_int0_broadcast_s { + unsigned long enable:1; /* RW */ + unsigned long rsvd_1_63:63; + } s4; + + /* UV3 unique struct */ + struct uv3h_extio_int0_broadcast_s { + unsigned long enable:1; /* RW */ + unsigned long rsvd_1_63:63; + } s3; + + /* UV2 unique struct */ + struct uv2h_extio_int0_broadcast_s { + unsigned long enable:1; /* RW */ + unsigned long rsvd_1_63:63; + } s2; +}; + +/* ========================================================================= */ +/* UVH_GR0_GAM_GR_CONFIG */ +/* ========================================================================= */ +#define UVH_GR0_GAM_GR_CONFIG ( \ + is_uv(UV5) ? 0x600028UL : \ + is_uv(UV4) ? 0x600028UL : \ + is_uv(UV3) ? 0xc00028UL : \ + is_uv(UV2) ? 0xc00028UL : \ + 0) + + + +/* UVYH common defines */ +#define UVYH_GR0_GAM_GR_CONFIG_SUBSPACE_SHFT 10 +#define UVYH_GR0_GAM_GR_CONFIG_SUBSPACE_MASK 0x0000000000000400UL + +/* UV4 unique defines */ +#define UV4H_GR0_GAM_GR_CONFIG_SUBSPACE_SHFT 10 +#define UV4H_GR0_GAM_GR_CONFIG_SUBSPACE_MASK 0x0000000000000400UL + +/* UV3 unique defines */ +#define UV3H_GR0_GAM_GR_CONFIG_M_SKT_SHFT 0 +#define UV3H_GR0_GAM_GR_CONFIG_M_SKT_MASK 0x000000000000003fUL +#define UV3H_GR0_GAM_GR_CONFIG_SUBSPACE_SHFT 10 +#define UV3H_GR0_GAM_GR_CONFIG_SUBSPACE_MASK 0x0000000000000400UL + +/* UV2 unique defines */ +#define UV2H_GR0_GAM_GR_CONFIG_N_GR_SHFT 0 +#define UV2H_GR0_GAM_GR_CONFIG_N_GR_MASK 0x000000000000000fUL + + +union uvyh_gr0_gam_gr_config_u { + unsigned long v; + + /* UVYH common struct */ + struct uvyh_gr0_gam_gr_config_s { + unsigned long rsvd_0_9:10; + unsigned long subspace:1; /* RW */ + unsigned long rsvd_11_63:53; + } sy; + + /* UV5 unique struct */ + struct uv5h_gr0_gam_gr_config_s { + unsigned long rsvd_0_9:10; + unsigned long subspace:1; /* RW */ + unsigned long rsvd_11_63:53; + } s5; + + /* UV4 unique struct */ + struct uv4h_gr0_gam_gr_config_s { + unsigned long rsvd_0_9:10; + unsigned long subspace:1; /* RW */ + unsigned long rsvd_11_63:53; + } s4; + + /* UV3 unique struct */ + struct uv3h_gr0_gam_gr_config_s { + unsigned long m_skt:6; /* RW */ + unsigned long undef_6_9:4; /* Undefined */ + unsigned long subspace:1; /* RW */ + unsigned long reserved:53; + } s3; + + /* UV2 unique struct */ + struct uv2h_gr0_gam_gr_config_s { + unsigned long n_gr:4; /* RW */ + unsigned long reserved:60; + } s2; }; /* ========================================================================= */ /* UVH_GR0_TLB_INT0_CONFIG */ /* ========================================================================= */ -#define UVH_GR0_TLB_INT0_CONFIG 0x61b00UL - -#define UVH_GR0_TLB_INT0_CONFIG_VECTOR_SHFT 0 -#define UVH_GR0_TLB_INT0_CONFIG_DM_SHFT 8 -#define UVH_GR0_TLB_INT0_CONFIG_DESTMODE_SHFT 11 -#define UVH_GR0_TLB_INT0_CONFIG_STATUS_SHFT 12 -#define UVH_GR0_TLB_INT0_CONFIG_P_SHFT 13 -#define UVH_GR0_TLB_INT0_CONFIG_T_SHFT 15 -#define UVH_GR0_TLB_INT0_CONFIG_M_SHFT 16 -#define UVH_GR0_TLB_INT0_CONFIG_APIC_ID_SHFT 32 -#define UVH_GR0_TLB_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL -#define UVH_GR0_TLB_INT0_CONFIG_DM_MASK 0x0000000000000700UL -#define UVH_GR0_TLB_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL -#define UVH_GR0_TLB_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL -#define UVH_GR0_TLB_INT0_CONFIG_P_MASK 0x0000000000002000UL -#define UVH_GR0_TLB_INT0_CONFIG_T_MASK 0x0000000000008000UL -#define UVH_GR0_TLB_INT0_CONFIG_M_MASK 0x0000000000010000UL -#define UVH_GR0_TLB_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL +#define UVH_GR0_TLB_INT0_CONFIG ( \ + is_uv(UV4) ? 0x61b00UL : \ + is_uv(UV3) ? 0x61b00UL : \ + is_uv(UV2) ? 0x61b00UL : \ + uv_undefined("UVH_GR0_TLB_INT0_CONFIG")) + + +/* UVXH common defines */ +#define UVXH_GR0_TLB_INT0_CONFIG_VECTOR_SHFT 0 +#define UVXH_GR0_TLB_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL +#define UVXH_GR0_TLB_INT0_CONFIG_DM_SHFT 8 +#define UVXH_GR0_TLB_INT0_CONFIG_DM_MASK 0x0000000000000700UL +#define UVXH_GR0_TLB_INT0_CONFIG_DESTMODE_SHFT 11 +#define UVXH_GR0_TLB_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL +#define UVXH_GR0_TLB_INT0_CONFIG_STATUS_SHFT 12 +#define UVXH_GR0_TLB_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL +#define UVXH_GR0_TLB_INT0_CONFIG_P_SHFT 13 +#define UVXH_GR0_TLB_INT0_CONFIG_P_MASK 0x0000000000002000UL +#define UVXH_GR0_TLB_INT0_CONFIG_T_SHFT 15 +#define UVXH_GR0_TLB_INT0_CONFIG_T_MASK 0x0000000000008000UL +#define UVXH_GR0_TLB_INT0_CONFIG_M_SHFT 16 +#define UVXH_GR0_TLB_INT0_CONFIG_M_MASK 0x0000000000010000UL +#define UVXH_GR0_TLB_INT0_CONFIG_APIC_ID_SHFT 32 +#define UVXH_GR0_TLB_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL union uvh_gr0_tlb_int0_config_u { unsigned long v; + + /* UVH common struct */ struct uvh_gr0_tlb_int0_config_s { unsigned long vector_:8; /* RW */ unsigned long dm:3; /* RW */ @@ -642,33 +2289,97 @@ union uvh_gr0_tlb_int0_config_u { unsigned long rsvd_17_31:15; unsigned long apic_id:32; /* RW */ } s; + + /* UVXH common struct */ + struct uvxh_gr0_tlb_int0_config_s { + unsigned long vector_:8; /* RW */ + unsigned long dm:3; /* RW */ + unsigned long destmode:1; /* RW */ + unsigned long status:1; /* RO */ + unsigned long p:1; /* RO */ + unsigned long rsvd_14:1; + unsigned long t:1; /* RO */ + unsigned long m:1; /* RW */ + unsigned long rsvd_17_31:15; + unsigned long apic_id:32; /* RW */ + } sx; + + /* UV4 unique struct */ + struct uv4h_gr0_tlb_int0_config_s { + unsigned long vector_:8; /* RW */ + unsigned long dm:3; /* RW */ + unsigned long destmode:1; /* RW */ + unsigned long status:1; /* RO */ + unsigned long p:1; /* RO */ + unsigned long rsvd_14:1; + unsigned long t:1; /* RO */ + unsigned long m:1; /* RW */ + unsigned long rsvd_17_31:15; + unsigned long apic_id:32; /* RW */ + } s4; + + /* UV3 unique struct */ + struct uv3h_gr0_tlb_int0_config_s { + unsigned long vector_:8; /* RW */ + unsigned long dm:3; /* RW */ + unsigned long destmode:1; /* RW */ + unsigned long status:1; /* RO */ + unsigned long p:1; /* RO */ + unsigned long rsvd_14:1; + unsigned long t:1; /* RO */ + unsigned long m:1; /* RW */ + unsigned long rsvd_17_31:15; + unsigned long apic_id:32; /* RW */ + } s3; + + /* UV2 unique struct */ + struct uv2h_gr0_tlb_int0_config_s { + unsigned long vector_:8; /* RW */ + unsigned long dm:3; /* RW */ + unsigned long destmode:1; /* RW */ + unsigned long status:1; /* RO */ + unsigned long p:1; /* RO */ + unsigned long rsvd_14:1; + unsigned long t:1; /* RO */ + unsigned long m:1; /* RW */ + unsigned long rsvd_17_31:15; + unsigned long apic_id:32; /* RW */ + } s2; }; /* ========================================================================= */ /* UVH_GR0_TLB_INT1_CONFIG */ /* ========================================================================= */ -#define UVH_GR0_TLB_INT1_CONFIG 0x61b40UL - -#define UVH_GR0_TLB_INT1_CONFIG_VECTOR_SHFT 0 -#define UVH_GR0_TLB_INT1_CONFIG_DM_SHFT 8 -#define UVH_GR0_TLB_INT1_CONFIG_DESTMODE_SHFT 11 -#define UVH_GR0_TLB_INT1_CONFIG_STATUS_SHFT 12 -#define UVH_GR0_TLB_INT1_CONFIG_P_SHFT 13 -#define UVH_GR0_TLB_INT1_CONFIG_T_SHFT 15 -#define UVH_GR0_TLB_INT1_CONFIG_M_SHFT 16 -#define UVH_GR0_TLB_INT1_CONFIG_APIC_ID_SHFT 32 -#define UVH_GR0_TLB_INT1_CONFIG_VECTOR_MASK 0x00000000000000ffUL -#define UVH_GR0_TLB_INT1_CONFIG_DM_MASK 0x0000000000000700UL -#define UVH_GR0_TLB_INT1_CONFIG_DESTMODE_MASK 0x0000000000000800UL -#define UVH_GR0_TLB_INT1_CONFIG_STATUS_MASK 0x0000000000001000UL -#define UVH_GR0_TLB_INT1_CONFIG_P_MASK 0x0000000000002000UL -#define UVH_GR0_TLB_INT1_CONFIG_T_MASK 0x0000000000008000UL -#define UVH_GR0_TLB_INT1_CONFIG_M_MASK 0x0000000000010000UL -#define UVH_GR0_TLB_INT1_CONFIG_APIC_ID_MASK 0xffffffff00000000UL +#define UVH_GR0_TLB_INT1_CONFIG ( \ + is_uv(UV4) ? 0x61b40UL : \ + is_uv(UV3) ? 0x61b40UL : \ + is_uv(UV2) ? 0x61b40UL : \ + uv_undefined("UVH_GR0_TLB_INT1_CONFIG")) + + +/* UVXH common defines */ +#define UVXH_GR0_TLB_INT1_CONFIG_VECTOR_SHFT 0 +#define UVXH_GR0_TLB_INT1_CONFIG_VECTOR_MASK 0x00000000000000ffUL +#define UVXH_GR0_TLB_INT1_CONFIG_DM_SHFT 8 +#define UVXH_GR0_TLB_INT1_CONFIG_DM_MASK 0x0000000000000700UL +#define UVXH_GR0_TLB_INT1_CONFIG_DESTMODE_SHFT 11 +#define UVXH_GR0_TLB_INT1_CONFIG_DESTMODE_MASK 0x0000000000000800UL +#define UVXH_GR0_TLB_INT1_CONFIG_STATUS_SHFT 12 +#define UVXH_GR0_TLB_INT1_CONFIG_STATUS_MASK 0x0000000000001000UL +#define UVXH_GR0_TLB_INT1_CONFIG_P_SHFT 13 +#define UVXH_GR0_TLB_INT1_CONFIG_P_MASK 0x0000000000002000UL +#define UVXH_GR0_TLB_INT1_CONFIG_T_SHFT 15 +#define UVXH_GR0_TLB_INT1_CONFIG_T_MASK 0x0000000000008000UL +#define UVXH_GR0_TLB_INT1_CONFIG_M_SHFT 16 +#define UVXH_GR0_TLB_INT1_CONFIG_M_MASK 0x0000000000010000UL +#define UVXH_GR0_TLB_INT1_CONFIG_APIC_ID_SHFT 32 +#define UVXH_GR0_TLB_INT1_CONFIG_APIC_ID_MASK 0xffffffff00000000UL union uvh_gr0_tlb_int1_config_u { unsigned long v; + + /* UVH common struct */ struct uvh_gr0_tlb_int1_config_s { unsigned long vector_:8; /* RW */ unsigned long dm:3; /* RW */ @@ -681,382 +2392,97 @@ union uvh_gr0_tlb_int1_config_u { unsigned long rsvd_17_31:15; unsigned long apic_id:32; /* RW */ } s; -}; -/* ========================================================================= */ -/* UVH_GR0_TLB_MMR_CONTROL */ -/* ========================================================================= */ -#define UV2H_GR0_TLB_MMR_CONTROL 0xc01080UL -#define UV3H_GR0_TLB_MMR_CONTROL 0xc01080UL -#define UV4H_GR0_TLB_MMR_CONTROL 0x601080UL -#define UVH_GR0_TLB_MMR_CONTROL ( \ - is_uv2_hub() ? UV2H_GR0_TLB_MMR_CONTROL : \ - is_uv3_hub() ? UV3H_GR0_TLB_MMR_CONTROL : \ - /*is_uv4_hub*/ UV4H_GR0_TLB_MMR_CONTROL) - -#define UVH_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0 -#define UVH_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16 -#define UVH_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20 -#define UVH_GR0_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30 -#define UVH_GR0_TLB_MMR_CONTROL_MMR_READ_SHFT 31 -#define UVH_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL -#define UVH_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL -#define UVH_GR0_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL -#define UVH_GR0_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL - -#define UVXH_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0 -#define UVXH_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16 -#define UVXH_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20 -#define UVXH_GR0_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30 -#define UVXH_GR0_TLB_MMR_CONTROL_MMR_READ_SHFT 31 -#define UVXH_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32 -#define UVXH_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL -#define UVXH_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL -#define UVXH_GR0_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL -#define UVXH_GR0_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL -#define UVXH_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL - -#define UV2H_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0 -#define UV2H_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT 12 -#define UV2H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16 -#define UV2H_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20 -#define UV2H_GR0_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30 -#define UV2H_GR0_TLB_MMR_CONTROL_MMR_READ_SHFT 31 -#define UV2H_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32 -#define UV2H_GR0_TLB_MMR_CONTROL_MMR_INJ_CON_SHFT 48 -#define UV2H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_SHFT 52 -#define UV2H_GR0_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL -#define UV2H_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL -#define UV2H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL -#define UV2H_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL -#define UV2H_GR0_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL -#define UV2H_GR0_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL -#define UV2H_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL -#define UV2H_GR0_TLB_MMR_CONTROL_MMR_INJ_CON_MASK 0x0001000000000000UL -#define UV2H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_MASK 0x0010000000000000UL - -#define UV3H_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0 -#define UV3H_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT 12 -#define UV3H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16 -#define UV3H_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20 -#define UV3H_GR0_TLB_MMR_CONTROL_ECC_SEL_SHFT 21 -#define UV3H_GR0_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30 -#define UV3H_GR0_TLB_MMR_CONTROL_MMR_READ_SHFT 31 -#define UV3H_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32 -#define UV3H_GR0_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL -#define UV3H_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL -#define UV3H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL -#define UV3H_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL -#define UV3H_GR0_TLB_MMR_CONTROL_ECC_SEL_MASK 0x0000000000200000UL -#define UV3H_GR0_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL -#define UV3H_GR0_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL -#define UV3H_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL - -#define UV4H_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0 -#define UV4H_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT 13 -#define UV4H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16 -#define UV4H_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20 -#define UV4H_GR0_TLB_MMR_CONTROL_ECC_SEL_SHFT 21 -#define UV4H_GR0_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30 -#define UV4H_GR0_TLB_MMR_CONTROL_MMR_READ_SHFT 31 -#define UV4H_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32 -#define UV4H_GR0_TLB_MMR_CONTROL_PAGE_SIZE_SHFT 59 -#define UV4H_GR0_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000001fffUL -#define UV4H_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000006000UL -#define UV4H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL -#define UV4H_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL -#define UV4H_GR0_TLB_MMR_CONTROL_ECC_SEL_MASK 0x0000000000200000UL -#define UV4H_GR0_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL -#define UV4H_GR0_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL -#define UV4H_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL -#define UV4H_GR0_TLB_MMR_CONTROL_PAGE_SIZE_MASK 0xf800000000000000UL - -#define UVH_GR0_TLB_MMR_CONTROL_INDEX_MASK ( \ - is_uv2_hub() ? UV2H_GR0_TLB_MMR_CONTROL_INDEX_MASK : \ - is_uv3_hub() ? UV3H_GR0_TLB_MMR_CONTROL_INDEX_MASK : \ - /*is_uv4_hub*/ UV4H_GR0_TLB_MMR_CONTROL_INDEX_MASK) -#define UVH_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK ( \ - is_uv2_hub() ? UV2H_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK : \ - is_uv3_hub() ? UV3H_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK : \ - /*is_uv4_hub*/ UV4H_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK) -#define UVH_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT ( \ - is_uv2_hub() ? UV2H_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT : \ - is_uv3_hub() ? UV3H_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT : \ - /*is_uv4_hub*/ UV4H_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT) - -union uvh_gr0_tlb_mmr_control_u { - unsigned long v; - struct uvh_gr0_tlb_mmr_control_s { - unsigned long rsvd_0_15:16; - unsigned long auto_valid_en:1; /* RW */ - unsigned long rsvd_17_19:3; - unsigned long mmr_hash_index_en:1; /* RW */ - unsigned long rsvd_21_29:9; - unsigned long mmr_write:1; /* WP */ - unsigned long mmr_read:1; /* WP */ - unsigned long rsvd_32_48:17; - unsigned long rsvd_49_51:3; - unsigned long rsvd_52_63:12; - } s; - struct uvxh_gr0_tlb_mmr_control_s { - unsigned long rsvd_0_15:16; - unsigned long auto_valid_en:1; /* RW */ - unsigned long rsvd_17_19:3; - unsigned long mmr_hash_index_en:1; /* RW */ - unsigned long rsvd_21_29:9; - unsigned long mmr_write:1; /* WP */ - unsigned long mmr_read:1; /* WP */ - unsigned long mmr_op_done:1; /* RW */ - unsigned long rsvd_33_47:15; - unsigned long rsvd_48:1; - unsigned long rsvd_49_51:3; - unsigned long rsvd_52_63:12; + /* UVXH common struct */ + struct uvxh_gr0_tlb_int1_config_s { + unsigned long vector_:8; /* RW */ + unsigned long dm:3; /* RW */ + unsigned long destmode:1; /* RW */ + unsigned long status:1; /* RO */ + unsigned long p:1; /* RO */ + unsigned long rsvd_14:1; + unsigned long t:1; /* RO */ + unsigned long m:1; /* RW */ + unsigned long rsvd_17_31:15; + unsigned long apic_id:32; /* RW */ } sx; - struct uv2h_gr0_tlb_mmr_control_s { - unsigned long index:12; /* RW */ - unsigned long mem_sel:2; /* RW */ - unsigned long rsvd_14_15:2; - unsigned long auto_valid_en:1; /* RW */ - unsigned long rsvd_17_19:3; - unsigned long mmr_hash_index_en:1; /* RW */ - unsigned long rsvd_21_29:9; - unsigned long mmr_write:1; /* WP */ - unsigned long mmr_read:1; /* WP */ - unsigned long mmr_op_done:1; /* RW */ - unsigned long rsvd_33_47:15; - unsigned long mmr_inj_con:1; /* RW */ - unsigned long rsvd_49_51:3; - unsigned long mmr_inj_tlbram:1; /* RW */ - unsigned long rsvd_53_63:11; - } s2; - struct uv3h_gr0_tlb_mmr_control_s { - unsigned long index:12; /* RW */ - unsigned long mem_sel:2; /* RW */ - unsigned long rsvd_14_15:2; - unsigned long auto_valid_en:1; /* RW */ - unsigned long rsvd_17_19:3; - unsigned long mmr_hash_index_en:1; /* RW */ - unsigned long ecc_sel:1; /* RW */ - unsigned long rsvd_22_29:8; - unsigned long mmr_write:1; /* WP */ - unsigned long mmr_read:1; /* WP */ - unsigned long mmr_op_done:1; /* RW */ - unsigned long rsvd_33_47:15; - unsigned long undef_48:1; /* Undefined */ - unsigned long rsvd_49_51:3; - unsigned long undef_52:1; /* Undefined */ - unsigned long rsvd_53_63:11; - } s3; - struct uv4h_gr0_tlb_mmr_control_s { - unsigned long index:13; /* RW */ - unsigned long mem_sel:2; /* RW */ - unsigned long rsvd_15:1; - unsigned long auto_valid_en:1; /* RW */ - unsigned long rsvd_17_19:3; - unsigned long mmr_hash_index_en:1; /* RW */ - unsigned long ecc_sel:1; /* RW */ - unsigned long rsvd_22_29:8; - unsigned long mmr_write:1; /* WP */ - unsigned long mmr_read:1; /* WP */ - unsigned long mmr_op_done:1; /* RW */ - unsigned long rsvd_33_47:15; - unsigned long undef_48:1; /* Undefined */ - unsigned long rsvd_49_51:3; - unsigned long rsvd_52_58:7; - unsigned long page_size:5; /* RW */ + + /* UV4 unique struct */ + struct uv4h_gr0_tlb_int1_config_s { + unsigned long vector_:8; /* RW */ + unsigned long dm:3; /* RW */ + unsigned long destmode:1; /* RW */ + unsigned long status:1; /* RO */ + unsigned long p:1; /* RO */ + unsigned long rsvd_14:1; + unsigned long t:1; /* RO */ + unsigned long m:1; /* RW */ + unsigned long rsvd_17_31:15; + unsigned long apic_id:32; /* RW */ } s4; -}; -/* ========================================================================= */ -/* UVH_GR0_TLB_MMR_READ_DATA_HI */ -/* ========================================================================= */ -#define UV2H_GR0_TLB_MMR_READ_DATA_HI 0xc010a0UL -#define UV3H_GR0_TLB_MMR_READ_DATA_HI 0xc010a0UL -#define UV4H_GR0_TLB_MMR_READ_DATA_HI 0x6010a0UL -#define UVH_GR0_TLB_MMR_READ_DATA_HI ( \ - is_uv2_hub() ? UV2H_GR0_TLB_MMR_READ_DATA_HI : \ - is_uv3_hub() ? UV3H_GR0_TLB_MMR_READ_DATA_HI : \ - /*is_uv4_hub*/ UV4H_GR0_TLB_MMR_READ_DATA_HI) - -#define UVH_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0 - -#define UVXH_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0 - -#define UV2H_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0 -#define UV2H_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT 41 -#define UV2H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43 -#define UV2H_GR0_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44 -#define UV2H_GR0_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL -#define UV2H_GR0_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL -#define UV2H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL -#define UV2H_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL - -#define UV3H_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0 -#define UV3H_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT 41 -#define UV3H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43 -#define UV3H_GR0_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44 -#define UV3H_GR0_TLB_MMR_READ_DATA_HI_AA_EXT_SHFT 45 -#define UV3H_GR0_TLB_MMR_READ_DATA_HI_WAY_ECC_SHFT 55 -#define UV3H_GR0_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL -#define UV3H_GR0_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL -#define UV3H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL -#define UV3H_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL -#define UV3H_GR0_TLB_MMR_READ_DATA_HI_AA_EXT_MASK 0x0000200000000000UL -#define UV3H_GR0_TLB_MMR_READ_DATA_HI_WAY_ECC_MASK 0xff80000000000000UL - -#define UV4H_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0 -#define UV4H_GR0_TLB_MMR_READ_DATA_HI_PNID_SHFT 34 -#define UV4H_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT 49 -#define UV4H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 51 -#define UV4H_GR0_TLB_MMR_READ_DATA_HI_LARGER_SHFT 52 -#define UV4H_GR0_TLB_MMR_READ_DATA_HI_AA_EXT_SHFT 53 -#define UV4H_GR0_TLB_MMR_READ_DATA_HI_WAY_ECC_SHFT 55 -#define UV4H_GR0_TLB_MMR_READ_DATA_HI_PFN_MASK 0x00000003ffffffffUL -#define UV4H_GR0_TLB_MMR_READ_DATA_HI_PNID_MASK 0x0001fffc00000000UL -#define UV4H_GR0_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0006000000000000UL -#define UV4H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0008000000000000UL -#define UV4H_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0010000000000000UL -#define UV4H_GR0_TLB_MMR_READ_DATA_HI_AA_EXT_MASK 0x0020000000000000UL -#define UV4H_GR0_TLB_MMR_READ_DATA_HI_WAY_ECC_MASK 0xff80000000000000UL - - -union uvh_gr0_tlb_mmr_read_data_hi_u { - unsigned long v; - struct uv2h_gr0_tlb_mmr_read_data_hi_s { - unsigned long pfn:41; /* RO */ - unsigned long gaa:2; /* RO */ - unsigned long dirty:1; /* RO */ - unsigned long larger:1; /* RO */ - unsigned long rsvd_45_63:19; - } s2; - struct uv3h_gr0_tlb_mmr_read_data_hi_s { - unsigned long pfn:41; /* RO */ - unsigned long gaa:2; /* RO */ - unsigned long dirty:1; /* RO */ - unsigned long larger:1; /* RO */ - unsigned long aa_ext:1; /* RO */ - unsigned long undef_46_54:9; /* Undefined */ - unsigned long way_ecc:9; /* RO */ + /* UV3 unique struct */ + struct uv3h_gr0_tlb_int1_config_s { + unsigned long vector_:8; /* RW */ + unsigned long dm:3; /* RW */ + unsigned long destmode:1; /* RW */ + unsigned long status:1; /* RO */ + unsigned long p:1; /* RO */ + unsigned long rsvd_14:1; + unsigned long t:1; /* RO */ + unsigned long m:1; /* RW */ + unsigned long rsvd_17_31:15; + unsigned long apic_id:32; /* RW */ } s3; - struct uv4h_gr0_tlb_mmr_read_data_hi_s { - unsigned long pfn:34; /* RO */ - unsigned long pnid:15; /* RO */ - unsigned long gaa:2; /* RO */ - unsigned long dirty:1; /* RO */ - unsigned long larger:1; /* RO */ - unsigned long aa_ext:1; /* RO */ - unsigned long undef_54:1; /* Undefined */ - unsigned long way_ecc:9; /* RO */ - } s4; -}; -/* ========================================================================= */ -/* UVH_GR0_TLB_MMR_READ_DATA_LO */ -/* ========================================================================= */ -#define UV2H_GR0_TLB_MMR_READ_DATA_LO 0xc010a8UL -#define UV3H_GR0_TLB_MMR_READ_DATA_LO 0xc010a8UL -#define UV4H_GR0_TLB_MMR_READ_DATA_LO 0x6010a8UL -#define UVH_GR0_TLB_MMR_READ_DATA_LO ( \ - is_uv2_hub() ? UV2H_GR0_TLB_MMR_READ_DATA_LO : \ - is_uv3_hub() ? UV3H_GR0_TLB_MMR_READ_DATA_LO : \ - /*is_uv4_hub*/ UV4H_GR0_TLB_MMR_READ_DATA_LO) - -#define UVH_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT 0 -#define UVH_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT 39 -#define UVH_GR0_TLB_MMR_READ_DATA_LO_VALID_SHFT 63 -#define UVH_GR0_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL -#define UVH_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL -#define UVH_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL - -#define UVXH_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT 0 -#define UVXH_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT 39 -#define UVXH_GR0_TLB_MMR_READ_DATA_LO_VALID_SHFT 63 -#define UVXH_GR0_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL -#define UVXH_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL -#define UVXH_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL - -#define UV2H_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT 0 -#define UV2H_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT 39 -#define UV2H_GR0_TLB_MMR_READ_DATA_LO_VALID_SHFT 63 -#define UV2H_GR0_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL -#define UV2H_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL -#define UV2H_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL - -#define UV3H_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT 0 -#define UV3H_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT 39 -#define UV3H_GR0_TLB_MMR_READ_DATA_LO_VALID_SHFT 63 -#define UV3H_GR0_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL -#define UV3H_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL -#define UV3H_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL - -#define UV4H_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT 0 -#define UV4H_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT 39 -#define UV4H_GR0_TLB_MMR_READ_DATA_LO_VALID_SHFT 63 -#define UV4H_GR0_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL -#define UV4H_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL -#define UV4H_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL - - -union uvh_gr0_tlb_mmr_read_data_lo_u { - unsigned long v; - struct uvh_gr0_tlb_mmr_read_data_lo_s { - unsigned long vpn:39; /* RO */ - unsigned long asid:24; /* RO */ - unsigned long valid:1; /* RO */ - } s; - struct uvxh_gr0_tlb_mmr_read_data_lo_s { - unsigned long vpn:39; /* RO */ - unsigned long asid:24; /* RO */ - unsigned long valid:1; /* RO */ - } sx; - struct uv2h_gr0_tlb_mmr_read_data_lo_s { - unsigned long vpn:39; /* RO */ - unsigned long asid:24; /* RO */ - unsigned long valid:1; /* RO */ + /* UV2 unique struct */ + struct uv2h_gr0_tlb_int1_config_s { + unsigned long vector_:8; /* RW */ + unsigned long dm:3; /* RW */ + unsigned long destmode:1; /* RW */ + unsigned long status:1; /* RO */ + unsigned long p:1; /* RO */ + unsigned long rsvd_14:1; + unsigned long t:1; /* RO */ + unsigned long m:1; /* RW */ + unsigned long rsvd_17_31:15; + unsigned long apic_id:32; /* RW */ } s2; - struct uv3h_gr0_tlb_mmr_read_data_lo_s { - unsigned long vpn:39; /* RO */ - unsigned long asid:24; /* RO */ - unsigned long valid:1; /* RO */ - } s3; - struct uv4h_gr0_tlb_mmr_read_data_lo_s { - unsigned long vpn:39; /* RO */ - unsigned long asid:24; /* RO */ - unsigned long valid:1; /* RO */ - } s4; }; /* ========================================================================= */ /* UVH_GR1_TLB_INT0_CONFIG */ /* ========================================================================= */ -#define UV2H_GR1_TLB_INT0_CONFIG 0x61f00UL -#define UV3H_GR1_TLB_INT0_CONFIG 0x61f00UL -#define UV4H_GR1_TLB_INT0_CONFIG 0x62100UL #define UVH_GR1_TLB_INT0_CONFIG ( \ - is_uv2_hub() ? UV2H_GR1_TLB_INT0_CONFIG : \ - is_uv3_hub() ? UV3H_GR1_TLB_INT0_CONFIG : \ - /*is_uv4_hub*/ UV4H_GR1_TLB_INT0_CONFIG) - -#define UVH_GR1_TLB_INT0_CONFIG_VECTOR_SHFT 0 -#define UVH_GR1_TLB_INT0_CONFIG_DM_SHFT 8 -#define UVH_GR1_TLB_INT0_CONFIG_DESTMODE_SHFT 11 -#define UVH_GR1_TLB_INT0_CONFIG_STATUS_SHFT 12 -#define UVH_GR1_TLB_INT0_CONFIG_P_SHFT 13 -#define UVH_GR1_TLB_INT0_CONFIG_T_SHFT 15 -#define UVH_GR1_TLB_INT0_CONFIG_M_SHFT 16 -#define UVH_GR1_TLB_INT0_CONFIG_APIC_ID_SHFT 32 -#define UVH_GR1_TLB_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL -#define UVH_GR1_TLB_INT0_CONFIG_DM_MASK 0x0000000000000700UL -#define UVH_GR1_TLB_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL -#define UVH_GR1_TLB_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL -#define UVH_GR1_TLB_INT0_CONFIG_P_MASK 0x0000000000002000UL -#define UVH_GR1_TLB_INT0_CONFIG_T_MASK 0x0000000000008000UL -#define UVH_GR1_TLB_INT0_CONFIG_M_MASK 0x0000000000010000UL -#define UVH_GR1_TLB_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL + is_uv(UV4) ? 0x62100UL : \ + is_uv(UV3) ? 0x61f00UL : \ + is_uv(UV2) ? 0x61f00UL : \ + uv_undefined("UVH_GR1_TLB_INT0_CONFIG")) + + +/* UVXH common defines */ +#define UVXH_GR1_TLB_INT0_CONFIG_VECTOR_SHFT 0 +#define UVXH_GR1_TLB_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL +#define UVXH_GR1_TLB_INT0_CONFIG_DM_SHFT 8 +#define UVXH_GR1_TLB_INT0_CONFIG_DM_MASK 0x0000000000000700UL +#define UVXH_GR1_TLB_INT0_CONFIG_DESTMODE_SHFT 11 +#define UVXH_GR1_TLB_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL +#define UVXH_GR1_TLB_INT0_CONFIG_STATUS_SHFT 12 +#define UVXH_GR1_TLB_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL +#define UVXH_GR1_TLB_INT0_CONFIG_P_SHFT 13 +#define UVXH_GR1_TLB_INT0_CONFIG_P_MASK 0x0000000000002000UL +#define UVXH_GR1_TLB_INT0_CONFIG_T_SHFT 15 +#define UVXH_GR1_TLB_INT0_CONFIG_T_MASK 0x0000000000008000UL +#define UVXH_GR1_TLB_INT0_CONFIG_M_SHFT 16 +#define UVXH_GR1_TLB_INT0_CONFIG_M_MASK 0x0000000000010000UL +#define UVXH_GR1_TLB_INT0_CONFIG_APIC_ID_SHFT 32 +#define UVXH_GR1_TLB_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL union uvh_gr1_tlb_int0_config_u { unsigned long v; + + /* UVH common struct */ struct uvh_gr1_tlb_int0_config_s { unsigned long vector_:8; /* RW */ unsigned long dm:3; /* RW */ @@ -1069,39 +2495,97 @@ union uvh_gr1_tlb_int0_config_u { unsigned long rsvd_17_31:15; unsigned long apic_id:32; /* RW */ } s; + + /* UVXH common struct */ + struct uvxh_gr1_tlb_int0_config_s { + unsigned long vector_:8; /* RW */ + unsigned long dm:3; /* RW */ + unsigned long destmode:1; /* RW */ + unsigned long status:1; /* RO */ + unsigned long p:1; /* RO */ + unsigned long rsvd_14:1; + unsigned long t:1; /* RO */ + unsigned long m:1; /* RW */ + unsigned long rsvd_17_31:15; + unsigned long apic_id:32; /* RW */ + } sx; + + /* UV4 unique struct */ + struct uv4h_gr1_tlb_int0_config_s { + unsigned long vector_:8; /* RW */ + unsigned long dm:3; /* RW */ + unsigned long destmode:1; /* RW */ + unsigned long status:1; /* RO */ + unsigned long p:1; /* RO */ + unsigned long rsvd_14:1; + unsigned long t:1; /* RO */ + unsigned long m:1; /* RW */ + unsigned long rsvd_17_31:15; + unsigned long apic_id:32; /* RW */ + } s4; + + /* UV3 unique struct */ + struct uv3h_gr1_tlb_int0_config_s { + unsigned long vector_:8; /* RW */ + unsigned long dm:3; /* RW */ + unsigned long destmode:1; /* RW */ + unsigned long status:1; /* RO */ + unsigned long p:1; /* RO */ + unsigned long rsvd_14:1; + unsigned long t:1; /* RO */ + unsigned long m:1; /* RW */ + unsigned long rsvd_17_31:15; + unsigned long apic_id:32; /* RW */ + } s3; + + /* UV2 unique struct */ + struct uv2h_gr1_tlb_int0_config_s { + unsigned long vector_:8; /* RW */ + unsigned long dm:3; /* RW */ + unsigned long destmode:1; /* RW */ + unsigned long status:1; /* RO */ + unsigned long p:1; /* RO */ + unsigned long rsvd_14:1; + unsigned long t:1; /* RO */ + unsigned long m:1; /* RW */ + unsigned long rsvd_17_31:15; + unsigned long apic_id:32; /* RW */ + } s2; }; /* ========================================================================= */ /* UVH_GR1_TLB_INT1_CONFIG */ /* ========================================================================= */ -#define UV2H_GR1_TLB_INT1_CONFIG 0x61f40UL -#define UV3H_GR1_TLB_INT1_CONFIG 0x61f40UL -#define UV4H_GR1_TLB_INT1_CONFIG 0x62140UL #define UVH_GR1_TLB_INT1_CONFIG ( \ - is_uv2_hub() ? UV2H_GR1_TLB_INT1_CONFIG : \ - is_uv3_hub() ? UV3H_GR1_TLB_INT1_CONFIG : \ - /*is_uv4_hub*/ UV4H_GR1_TLB_INT1_CONFIG) - -#define UVH_GR1_TLB_INT1_CONFIG_VECTOR_SHFT 0 -#define UVH_GR1_TLB_INT1_CONFIG_DM_SHFT 8 -#define UVH_GR1_TLB_INT1_CONFIG_DESTMODE_SHFT 11 -#define UVH_GR1_TLB_INT1_CONFIG_STATUS_SHFT 12 -#define UVH_GR1_TLB_INT1_CONFIG_P_SHFT 13 -#define UVH_GR1_TLB_INT1_CONFIG_T_SHFT 15 -#define UVH_GR1_TLB_INT1_CONFIG_M_SHFT 16 -#define UVH_GR1_TLB_INT1_CONFIG_APIC_ID_SHFT 32 -#define UVH_GR1_TLB_INT1_CONFIG_VECTOR_MASK 0x00000000000000ffUL -#define UVH_GR1_TLB_INT1_CONFIG_DM_MASK 0x0000000000000700UL -#define UVH_GR1_TLB_INT1_CONFIG_DESTMODE_MASK 0x0000000000000800UL -#define UVH_GR1_TLB_INT1_CONFIG_STATUS_MASK 0x0000000000001000UL -#define UVH_GR1_TLB_INT1_CONFIG_P_MASK 0x0000000000002000UL -#define UVH_GR1_TLB_INT1_CONFIG_T_MASK 0x0000000000008000UL -#define UVH_GR1_TLB_INT1_CONFIG_M_MASK 0x0000000000010000UL -#define UVH_GR1_TLB_INT1_CONFIG_APIC_ID_MASK 0xffffffff00000000UL + is_uv(UV4) ? 0x62140UL : \ + is_uv(UV3) ? 0x61f40UL : \ + is_uv(UV2) ? 0x61f40UL : \ + uv_undefined("UVH_GR1_TLB_INT1_CONFIG")) + + +/* UVXH common defines */ +#define UVXH_GR1_TLB_INT1_CONFIG_VECTOR_SHFT 0 +#define UVXH_GR1_TLB_INT1_CONFIG_VECTOR_MASK 0x00000000000000ffUL +#define UVXH_GR1_TLB_INT1_CONFIG_DM_SHFT 8 +#define UVXH_GR1_TLB_INT1_CONFIG_DM_MASK 0x0000000000000700UL +#define UVXH_GR1_TLB_INT1_CONFIG_DESTMODE_SHFT 11 +#define UVXH_GR1_TLB_INT1_CONFIG_DESTMODE_MASK 0x0000000000000800UL +#define UVXH_GR1_TLB_INT1_CONFIG_STATUS_SHFT 12 +#define UVXH_GR1_TLB_INT1_CONFIG_STATUS_MASK 0x0000000000001000UL +#define UVXH_GR1_TLB_INT1_CONFIG_P_SHFT 13 +#define UVXH_GR1_TLB_INT1_CONFIG_P_MASK 0x0000000000002000UL +#define UVXH_GR1_TLB_INT1_CONFIG_T_SHFT 15 +#define UVXH_GR1_TLB_INT1_CONFIG_T_MASK 0x0000000000008000UL +#define UVXH_GR1_TLB_INT1_CONFIG_M_SHFT 16 +#define UVXH_GR1_TLB_INT1_CONFIG_M_MASK 0x0000000000010000UL +#define UVXH_GR1_TLB_INT1_CONFIG_APIC_ID_SHFT 32 +#define UVXH_GR1_TLB_INT1_CONFIG_APIC_ID_MASK 0xffffffff00000000UL union uvh_gr1_tlb_int1_config_u { unsigned long v; + + /* UVH common struct */ struct uvh_gr1_tlb_int1_config_s { unsigned long vector_:8; /* RW */ unsigned long dm:3; /* RW */ @@ -1114,337 +2598,62 @@ union uvh_gr1_tlb_int1_config_u { unsigned long rsvd_17_31:15; unsigned long apic_id:32; /* RW */ } s; -}; -/* ========================================================================= */ -/* UVH_GR1_TLB_MMR_CONTROL */ -/* ========================================================================= */ -#define UV2H_GR1_TLB_MMR_CONTROL 0x1001080UL -#define UV3H_GR1_TLB_MMR_CONTROL 0x1001080UL -#define UV4H_GR1_TLB_MMR_CONTROL 0x701080UL -#define UVH_GR1_TLB_MMR_CONTROL ( \ - is_uv2_hub() ? UV2H_GR1_TLB_MMR_CONTROL : \ - is_uv3_hub() ? UV3H_GR1_TLB_MMR_CONTROL : \ - /*is_uv4_hub*/ UV4H_GR1_TLB_MMR_CONTROL) - -#define UVH_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0 -#define UVH_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16 -#define UVH_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20 -#define UVH_GR1_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30 -#define UVH_GR1_TLB_MMR_CONTROL_MMR_READ_SHFT 31 -#define UVH_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL -#define UVH_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL -#define UVH_GR1_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL -#define UVH_GR1_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL - -#define UVXH_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0 -#define UVXH_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16 -#define UVXH_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20 -#define UVXH_GR1_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30 -#define UVXH_GR1_TLB_MMR_CONTROL_MMR_READ_SHFT 31 -#define UVXH_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32 -#define UVXH_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL -#define UVXH_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL -#define UVXH_GR1_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL -#define UVXH_GR1_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL -#define UVXH_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL - -#define UV2H_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0 -#define UV2H_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT 12 -#define UV2H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16 -#define UV2H_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20 -#define UV2H_GR1_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30 -#define UV2H_GR1_TLB_MMR_CONTROL_MMR_READ_SHFT 31 -#define UV2H_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32 -#define UV2H_GR1_TLB_MMR_CONTROL_MMR_INJ_CON_SHFT 48 -#define UV2H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_SHFT 52 -#define UV2H_GR1_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL -#define UV2H_GR1_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL -#define UV2H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL -#define UV2H_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL -#define UV2H_GR1_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL -#define UV2H_GR1_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL -#define UV2H_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL -#define UV2H_GR1_TLB_MMR_CONTROL_MMR_INJ_CON_MASK 0x0001000000000000UL -#define UV2H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_MASK 0x0010000000000000UL - -#define UV3H_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0 -#define UV3H_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT 12 -#define UV3H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16 -#define UV3H_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20 -#define UV3H_GR1_TLB_MMR_CONTROL_ECC_SEL_SHFT 21 -#define UV3H_GR1_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30 -#define UV3H_GR1_TLB_MMR_CONTROL_MMR_READ_SHFT 31 -#define UV3H_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32 -#define UV3H_GR1_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL -#define UV3H_GR1_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL -#define UV3H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL -#define UV3H_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL -#define UV3H_GR1_TLB_MMR_CONTROL_ECC_SEL_MASK 0x0000000000200000UL -#define UV3H_GR1_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL -#define UV3H_GR1_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL -#define UV3H_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL - -#define UV4H_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0 -#define UV4H_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT 13 -#define UV4H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16 -#define UV4H_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20 -#define UV4H_GR1_TLB_MMR_CONTROL_ECC_SEL_SHFT 21 -#define UV4H_GR1_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30 -#define UV4H_GR1_TLB_MMR_CONTROL_MMR_READ_SHFT 31 -#define UV4H_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32 -#define UV4H_GR1_TLB_MMR_CONTROL_PAGE_SIZE_SHFT 59 -#define UV4H_GR1_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000001fffUL -#define UV4H_GR1_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000006000UL -#define UV4H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL -#define UV4H_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL -#define UV4H_GR1_TLB_MMR_CONTROL_ECC_SEL_MASK 0x0000000000200000UL -#define UV4H_GR1_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL -#define UV4H_GR1_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL -#define UV4H_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL -#define UV4H_GR1_TLB_MMR_CONTROL_PAGE_SIZE_MASK 0xf800000000000000UL - - -union uvh_gr1_tlb_mmr_control_u { - unsigned long v; - struct uvh_gr1_tlb_mmr_control_s { - unsigned long rsvd_0_15:16; - unsigned long auto_valid_en:1; /* RW */ - unsigned long rsvd_17_19:3; - unsigned long mmr_hash_index_en:1; /* RW */ - unsigned long rsvd_21_29:9; - unsigned long mmr_write:1; /* WP */ - unsigned long mmr_read:1; /* WP */ - unsigned long rsvd_32_48:17; - unsigned long rsvd_49_51:3; - unsigned long rsvd_52_63:12; - } s; - struct uvxh_gr1_tlb_mmr_control_s { - unsigned long rsvd_0_15:16; - unsigned long auto_valid_en:1; /* RW */ - unsigned long rsvd_17_19:3; - unsigned long mmr_hash_index_en:1; /* RW */ - unsigned long rsvd_21_29:9; - unsigned long mmr_write:1; /* WP */ - unsigned long mmr_read:1; /* WP */ - unsigned long mmr_op_done:1; /* RW */ - unsigned long rsvd_33_47:15; - unsigned long rsvd_48:1; - unsigned long rsvd_49_51:3; - unsigned long rsvd_52_63:12; + /* UVXH common struct */ + struct uvxh_gr1_tlb_int1_config_s { + unsigned long vector_:8; /* RW */ + unsigned long dm:3; /* RW */ + unsigned long destmode:1; /* RW */ + unsigned long status:1; /* RO */ + unsigned long p:1; /* RO */ + unsigned long rsvd_14:1; + unsigned long t:1; /* RO */ + unsigned long m:1; /* RW */ + unsigned long rsvd_17_31:15; + unsigned long apic_id:32; /* RW */ } sx; - struct uv2h_gr1_tlb_mmr_control_s { - unsigned long index:12; /* RW */ - unsigned long mem_sel:2; /* RW */ - unsigned long rsvd_14_15:2; - unsigned long auto_valid_en:1; /* RW */ - unsigned long rsvd_17_19:3; - unsigned long mmr_hash_index_en:1; /* RW */ - unsigned long rsvd_21_29:9; - unsigned long mmr_write:1; /* WP */ - unsigned long mmr_read:1; /* WP */ - unsigned long mmr_op_done:1; /* RW */ - unsigned long rsvd_33_47:15; - unsigned long mmr_inj_con:1; /* RW */ - unsigned long rsvd_49_51:3; - unsigned long mmr_inj_tlbram:1; /* RW */ - unsigned long rsvd_53_63:11; - } s2; - struct uv3h_gr1_tlb_mmr_control_s { - unsigned long index:12; /* RW */ - unsigned long mem_sel:2; /* RW */ - unsigned long rsvd_14_15:2; - unsigned long auto_valid_en:1; /* RW */ - unsigned long rsvd_17_19:3; - unsigned long mmr_hash_index_en:1; /* RW */ - unsigned long ecc_sel:1; /* RW */ - unsigned long rsvd_22_29:8; - unsigned long mmr_write:1; /* WP */ - unsigned long mmr_read:1; /* WP */ - unsigned long mmr_op_done:1; /* RW */ - unsigned long rsvd_33_47:15; - unsigned long undef_48:1; /* Undefined */ - unsigned long rsvd_49_51:3; - unsigned long undef_52:1; /* Undefined */ - unsigned long rsvd_53_63:11; - } s3; - struct uv4h_gr1_tlb_mmr_control_s { - unsigned long index:13; /* RW */ - unsigned long mem_sel:2; /* RW */ - unsigned long rsvd_15:1; - unsigned long auto_valid_en:1; /* RW */ - unsigned long rsvd_17_19:3; - unsigned long mmr_hash_index_en:1; /* RW */ - unsigned long ecc_sel:1; /* RW */ - unsigned long rsvd_22_29:8; - unsigned long mmr_write:1; /* WP */ - unsigned long mmr_read:1; /* WP */ - unsigned long mmr_op_done:1; /* RW */ - unsigned long rsvd_33_47:15; - unsigned long undef_48:1; /* Undefined */ - unsigned long rsvd_49_51:3; - unsigned long rsvd_52_58:7; - unsigned long page_size:5; /* RW */ + + /* UV4 unique struct */ + struct uv4h_gr1_tlb_int1_config_s { + unsigned long vector_:8; /* RW */ + unsigned long dm:3; /* RW */ + unsigned long destmode:1; /* RW */ + unsigned long status:1; /* RO */ + unsigned long p:1; /* RO */ + unsigned long rsvd_14:1; + unsigned long t:1; /* RO */ + unsigned long m:1; /* RW */ + unsigned long rsvd_17_31:15; + unsigned long apic_id:32; /* RW */ } s4; -}; -/* ========================================================================= */ -/* UVH_GR1_TLB_MMR_READ_DATA_HI */ -/* ========================================================================= */ -#define UV2H_GR1_TLB_MMR_READ_DATA_HI 0x10010a0UL -#define UV3H_GR1_TLB_MMR_READ_DATA_HI 0x10010a0UL -#define UV4H_GR1_TLB_MMR_READ_DATA_HI 0x7010a0UL -#define UVH_GR1_TLB_MMR_READ_DATA_HI ( \ - is_uv2_hub() ? UV2H_GR1_TLB_MMR_READ_DATA_HI : \ - is_uv3_hub() ? UV3H_GR1_TLB_MMR_READ_DATA_HI : \ - /*is_uv4_hub*/ UV4H_GR1_TLB_MMR_READ_DATA_HI) - -#define UVH_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0 - -#define UVXH_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0 - -#define UV2H_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0 -#define UV2H_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT 41 -#define UV2H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43 -#define UV2H_GR1_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44 -#define UV2H_GR1_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL -#define UV2H_GR1_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL -#define UV2H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL -#define UV2H_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL - -#define UV3H_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0 -#define UV3H_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT 41 -#define UV3H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43 -#define UV3H_GR1_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44 -#define UV3H_GR1_TLB_MMR_READ_DATA_HI_AA_EXT_SHFT 45 -#define UV3H_GR1_TLB_MMR_READ_DATA_HI_WAY_ECC_SHFT 55 -#define UV3H_GR1_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL -#define UV3H_GR1_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL -#define UV3H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL -#define UV3H_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL -#define UV3H_GR1_TLB_MMR_READ_DATA_HI_AA_EXT_MASK 0x0000200000000000UL -#define UV3H_GR1_TLB_MMR_READ_DATA_HI_WAY_ECC_MASK 0xff80000000000000UL - -#define UV4H_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0 -#define UV4H_GR1_TLB_MMR_READ_DATA_HI_PNID_SHFT 34 -#define UV4H_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT 49 -#define UV4H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 51 -#define UV4H_GR1_TLB_MMR_READ_DATA_HI_LARGER_SHFT 52 -#define UV4H_GR1_TLB_MMR_READ_DATA_HI_AA_EXT_SHFT 53 -#define UV4H_GR1_TLB_MMR_READ_DATA_HI_WAY_ECC_SHFT 55 -#define UV4H_GR1_TLB_MMR_READ_DATA_HI_PFN_MASK 0x00000003ffffffffUL -#define UV4H_GR1_TLB_MMR_READ_DATA_HI_PNID_MASK 0x0001fffc00000000UL -#define UV4H_GR1_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0006000000000000UL -#define UV4H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0008000000000000UL -#define UV4H_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0010000000000000UL -#define UV4H_GR1_TLB_MMR_READ_DATA_HI_AA_EXT_MASK 0x0020000000000000UL -#define UV4H_GR1_TLB_MMR_READ_DATA_HI_WAY_ECC_MASK 0xff80000000000000UL - - -union uvh_gr1_tlb_mmr_read_data_hi_u { - unsigned long v; - struct uv2h_gr1_tlb_mmr_read_data_hi_s { - unsigned long pfn:41; /* RO */ - unsigned long gaa:2; /* RO */ - unsigned long dirty:1; /* RO */ - unsigned long larger:1; /* RO */ - unsigned long rsvd_45_63:19; - } s2; - struct uv3h_gr1_tlb_mmr_read_data_hi_s { - unsigned long pfn:41; /* RO */ - unsigned long gaa:2; /* RO */ - unsigned long dirty:1; /* RO */ - unsigned long larger:1; /* RO */ - unsigned long aa_ext:1; /* RO */ - unsigned long undef_46_54:9; /* Undefined */ - unsigned long way_ecc:9; /* RO */ + /* UV3 unique struct */ + struct uv3h_gr1_tlb_int1_config_s { + unsigned long vector_:8; /* RW */ + unsigned long dm:3; /* RW */ + unsigned long destmode:1; /* RW */ + unsigned long status:1; /* RO */ + unsigned long p:1; /* RO */ + unsigned long rsvd_14:1; + unsigned long t:1; /* RO */ + unsigned long m:1; /* RW */ + unsigned long rsvd_17_31:15; + unsigned long apic_id:32; /* RW */ } s3; - struct uv4h_gr1_tlb_mmr_read_data_hi_s { - unsigned long pfn:34; /* RO */ - unsigned long pnid:15; /* RO */ - unsigned long gaa:2; /* RO */ - unsigned long dirty:1; /* RO */ - unsigned long larger:1; /* RO */ - unsigned long aa_ext:1; /* RO */ - unsigned long undef_54:1; /* Undefined */ - unsigned long way_ecc:9; /* RO */ - } s4; -}; -/* ========================================================================= */ -/* UVH_GR1_TLB_MMR_READ_DATA_LO */ -/* ========================================================================= */ -#define UV2H_GR1_TLB_MMR_READ_DATA_LO 0x10010a8UL -#define UV3H_GR1_TLB_MMR_READ_DATA_LO 0x10010a8UL -#define UV4H_GR1_TLB_MMR_READ_DATA_LO 0x7010a8UL -#define UVH_GR1_TLB_MMR_READ_DATA_LO ( \ - is_uv2_hub() ? UV2H_GR1_TLB_MMR_READ_DATA_LO : \ - is_uv3_hub() ? UV3H_GR1_TLB_MMR_READ_DATA_LO : \ - /*is_uv4_hub*/ UV4H_GR1_TLB_MMR_READ_DATA_LO) - -#define UVH_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT 0 -#define UVH_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT 39 -#define UVH_GR1_TLB_MMR_READ_DATA_LO_VALID_SHFT 63 -#define UVH_GR1_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL -#define UVH_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL -#define UVH_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL - -#define UVXH_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT 0 -#define UVXH_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT 39 -#define UVXH_GR1_TLB_MMR_READ_DATA_LO_VALID_SHFT 63 -#define UVXH_GR1_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL -#define UVXH_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL -#define UVXH_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL - -#define UV2H_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT 0 -#define UV2H_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT 39 -#define UV2H_GR1_TLB_MMR_READ_DATA_LO_VALID_SHFT 63 -#define UV2H_GR1_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL -#define UV2H_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL -#define UV2H_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL - -#define UV3H_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT 0 -#define UV3H_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT 39 -#define UV3H_GR1_TLB_MMR_READ_DATA_LO_VALID_SHFT 63 -#define UV3H_GR1_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL -#define UV3H_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL -#define UV3H_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL - -#define UV4H_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT 0 -#define UV4H_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT 39 -#define UV4H_GR1_TLB_MMR_READ_DATA_LO_VALID_SHFT 63 -#define UV4H_GR1_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL -#define UV4H_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL -#define UV4H_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL - - -union uvh_gr1_tlb_mmr_read_data_lo_u { - unsigned long v; - struct uvh_gr1_tlb_mmr_read_data_lo_s { - unsigned long vpn:39; /* RO */ - unsigned long asid:24; /* RO */ - unsigned long valid:1; /* RO */ - } s; - struct uvxh_gr1_tlb_mmr_read_data_lo_s { - unsigned long vpn:39; /* RO */ - unsigned long asid:24; /* RO */ - unsigned long valid:1; /* RO */ - } sx; - struct uv2h_gr1_tlb_mmr_read_data_lo_s { - unsigned long vpn:39; /* RO */ - unsigned long asid:24; /* RO */ - unsigned long valid:1; /* RO */ + /* UV2 unique struct */ + struct uv2h_gr1_tlb_int1_config_s { + unsigned long vector_:8; /* RW */ + unsigned long dm:3; /* RW */ + unsigned long destmode:1; /* RW */ + unsigned long status:1; /* RO */ + unsigned long p:1; /* RO */ + unsigned long rsvd_14:1; + unsigned long t:1; /* RO */ + unsigned long m:1; /* RW */ + unsigned long rsvd_17_31:15; + unsigned long apic_id:32; /* RW */ } s2; - struct uv3h_gr1_tlb_mmr_read_data_lo_s { - unsigned long vpn:39; /* RO */ - unsigned long asid:24; /* RO */ - unsigned long valid:1; /* RO */ - } s3; - struct uv4h_gr1_tlb_mmr_read_data_lo_s { - unsigned long vpn:39; /* RO */ - unsigned long asid:24; /* RO */ - unsigned long valid:1; /* RO */ - } s4; }; /* ========================================================================= */ @@ -1452,52 +2661,43 @@ union uvh_gr1_tlb_mmr_read_data_lo_u { /* ========================================================================= */ #define UVH_INT_CMPB 0x22080UL +/* UVH common defines*/ #define UVH_INT_CMPB_REAL_TIME_CMPB_SHFT 0 #define UVH_INT_CMPB_REAL_TIME_CMPB_MASK 0x00ffffffffffffffUL union uvh_int_cmpb_u { unsigned long v; + + /* UVH common struct */ struct uvh_int_cmpb_s { unsigned long real_time_cmpb:56; /* RW */ unsigned long rsvd_56_63:8; } s; -}; - -/* ========================================================================= */ -/* UVH_INT_CMPC */ -/* ========================================================================= */ -#define UVH_INT_CMPC 0x22100UL - - -#define UVXH_INT_CMPC_REAL_TIME_CMP_2_SHFT 0 -#define UVXH_INT_CMPC_REAL_TIME_CMP_2_MASK 0x00ffffffffffffffUL - -union uvh_int_cmpc_u { - unsigned long v; - struct uvh_int_cmpc_s { - unsigned long real_time_cmpc:56; /* RW */ + /* UV5 unique struct */ + struct uv5h_int_cmpb_s { + unsigned long real_time_cmpb:56; /* RW */ unsigned long rsvd_56_63:8; - } s; -}; + } s5; -/* ========================================================================= */ -/* UVH_INT_CMPD */ -/* ========================================================================= */ -#define UVH_INT_CMPD 0x22180UL - - -#define UVXH_INT_CMPD_REAL_TIME_CMP_3_SHFT 0 -#define UVXH_INT_CMPD_REAL_TIME_CMP_3_MASK 0x00ffffffffffffffUL + /* UV4 unique struct */ + struct uv4h_int_cmpb_s { + unsigned long real_time_cmpb:56; /* RW */ + unsigned long rsvd_56_63:8; + } s4; + /* UV3 unique struct */ + struct uv3h_int_cmpb_s { + unsigned long real_time_cmpb:56; /* RW */ + unsigned long rsvd_56_63:8; + } s3; -union uvh_int_cmpd_u { - unsigned long v; - struct uvh_int_cmpd_s { - unsigned long real_time_cmpd:56; /* RW */ + /* UV2 unique struct */ + struct uv2h_int_cmpb_s { + unsigned long real_time_cmpb:56; /* RW */ unsigned long rsvd_56_63:8; - } s; + } s2; }; /* ========================================================================= */ @@ -1505,28 +2705,23 @@ union uvh_int_cmpd_u { /* ========================================================================= */ #define UVH_IPI_INT 0x60500UL -#define UV2H_IPI_INT_32 0x348 -#define UV3H_IPI_INT_32 0x348 -#define UV4H_IPI_INT_32 0x268 -#define UVH_IPI_INT_32 ( \ - is_uv2_hub() ? UV2H_IPI_INT_32 : \ - is_uv3_hub() ? UV3H_IPI_INT_32 : \ - /*is_uv4_hub*/ UV4H_IPI_INT_32) - +/* UVH common defines*/ #define UVH_IPI_INT_VECTOR_SHFT 0 -#define UVH_IPI_INT_DELIVERY_MODE_SHFT 8 -#define UVH_IPI_INT_DESTMODE_SHFT 11 -#define UVH_IPI_INT_APIC_ID_SHFT 16 -#define UVH_IPI_INT_SEND_SHFT 63 #define UVH_IPI_INT_VECTOR_MASK 0x00000000000000ffUL +#define UVH_IPI_INT_DELIVERY_MODE_SHFT 8 #define UVH_IPI_INT_DELIVERY_MODE_MASK 0x0000000000000700UL +#define UVH_IPI_INT_DESTMODE_SHFT 11 #define UVH_IPI_INT_DESTMODE_MASK 0x0000000000000800UL +#define UVH_IPI_INT_APIC_ID_SHFT 16 #define UVH_IPI_INT_APIC_ID_MASK 0x0000ffffffff0000UL +#define UVH_IPI_INT_SEND_SHFT 63 #define UVH_IPI_INT_SEND_MASK 0x8000000000000000UL union uvh_ipi_int_u { unsigned long v; + + /* UVH common struct */ struct uvh_ipi_int_s { unsigned long vector_:8; /* RW */ unsigned long delivery_mode:3; /* RW */ @@ -1536,903 +2731,105 @@ union uvh_ipi_int_u { unsigned long rsvd_48_62:15; unsigned long send:1; /* WP */ } s; -}; - -/* ========================================================================= */ -/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST */ -/* ========================================================================= */ -#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST 0x320050UL -#define UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST 0x320050UL -#define UV4H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST uv_undefined("UV4H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST") -#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST ( \ - is_uv2_hub() ? UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST : \ - is_uv3_hub() ? UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST : \ - /*is_uv4_hub*/ UV4H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST) -#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_32 0x9c0 - - -#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_SHFT 4 -#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_NODE_ID_SHFT 49 -#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_MASK 0x000007fffffffff0UL -#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_NODE_ID_MASK 0x7ffe000000000000UL - -#define UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_SHFT 4 -#define UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_NODE_ID_SHFT 49 -#define UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_MASK 0x000007fffffffff0UL -#define UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_NODE_ID_MASK 0x7ffe000000000000UL - - -union uvh_lb_bau_intd_payload_queue_first_u { - unsigned long v; - struct uv2h_lb_bau_intd_payload_queue_first_s { - unsigned long rsvd_0_3:4; - unsigned long address:39; /* RW */ - unsigned long rsvd_43_48:6; - unsigned long node_id:14; /* RW */ - unsigned long rsvd_63:1; - } s2; - struct uv3h_lb_bau_intd_payload_queue_first_s { - unsigned long rsvd_0_3:4; - unsigned long address:39; /* RW */ - unsigned long rsvd_43_48:6; - unsigned long node_id:14; /* RW */ - unsigned long rsvd_63:1; - } s3; -}; - -/* ========================================================================= */ -/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST */ -/* ========================================================================= */ -#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST 0x320060UL -#define UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST 0x320060UL -#define UV4H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST uv_undefined("UV4H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST") -#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST ( \ - is_uv2_hub() ? UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST : \ - is_uv3_hub() ? UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST : \ - /*is_uv4_hub*/ UV4H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST) -#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_32 0x9c8 - - -#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_SHFT 4 -#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_MASK 0x000007fffffffff0UL - -#define UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_SHFT 4 -#define UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_MASK 0x000007fffffffff0UL - - -union uvh_lb_bau_intd_payload_queue_last_u { - unsigned long v; - struct uv2h_lb_bau_intd_payload_queue_last_s { - unsigned long rsvd_0_3:4; - unsigned long address:39; /* RW */ - unsigned long rsvd_43_63:21; - } s2; - struct uv3h_lb_bau_intd_payload_queue_last_s { - unsigned long rsvd_0_3:4; - unsigned long address:39; /* RW */ - unsigned long rsvd_43_63:21; - } s3; -}; - -/* ========================================================================= */ -/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL */ -/* ========================================================================= */ -#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL 0x320070UL -#define UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL 0x320070UL -#define UV4H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL uv_undefined("UV4H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL") -#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL ( \ - is_uv2_hub() ? UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL : \ - is_uv3_hub() ? UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL : \ - /*is_uv4_hub*/ UV4H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL) -#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_32 0x9d0 - - -#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_SHFT 4 -#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_MASK 0x000007fffffffff0UL - -#define UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_SHFT 4 -#define UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_MASK 0x000007fffffffff0UL + /* UV5 unique struct */ + struct uv5h_ipi_int_s { + unsigned long vector_:8; /* RW */ + unsigned long delivery_mode:3; /* RW */ + unsigned long destmode:1; /* RW */ + unsigned long rsvd_12_15:4; + unsigned long apic_id:32; /* RW */ + unsigned long rsvd_48_62:15; + unsigned long send:1; /* WP */ + } s5; -union uvh_lb_bau_intd_payload_queue_tail_u { - unsigned long v; - struct uv2h_lb_bau_intd_payload_queue_tail_s { - unsigned long rsvd_0_3:4; - unsigned long address:39; /* RW */ - unsigned long rsvd_43_63:21; - } s2; - struct uv3h_lb_bau_intd_payload_queue_tail_s { - unsigned long rsvd_0_3:4; - unsigned long address:39; /* RW */ - unsigned long rsvd_43_63:21; - } s3; -}; + /* UV4 unique struct */ + struct uv4h_ipi_int_s { + unsigned long vector_:8; /* RW */ + unsigned long delivery_mode:3; /* RW */ + unsigned long destmode:1; /* RW */ + unsigned long rsvd_12_15:4; + unsigned long apic_id:32; /* RW */ + unsigned long rsvd_48_62:15; + unsigned long send:1; /* WP */ + } s4; -/* ========================================================================= */ -/* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE */ -/* ========================================================================= */ -#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE 0x320080UL -#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE 0x320080UL -#define UV4H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE uv_undefined("UV4H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE") -#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE ( \ - is_uv2_hub() ? UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE : \ - is_uv3_hub() ? UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE : \ - /*is_uv4_hub*/ UV4H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE) -#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_32 0xa68 - - -#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_SHFT 0 -#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_1_SHFT 1 -#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_2_SHFT 2 -#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_3_SHFT 3 -#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_4_SHFT 4 -#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_5_SHFT 5 -#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_6_SHFT 6 -#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_7_SHFT 7 -#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_0_SHFT 8 -#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_1_SHFT 9 -#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_2_SHFT 10 -#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_3_SHFT 11 -#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_4_SHFT 12 -#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_5_SHFT 13 -#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_6_SHFT 14 -#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_7_SHFT 15 -#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_MASK 0x0000000000000001UL -#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_1_MASK 0x0000000000000002UL -#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_2_MASK 0x0000000000000004UL -#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_3_MASK 0x0000000000000008UL -#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_4_MASK 0x0000000000000010UL -#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_5_MASK 0x0000000000000020UL -#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_6_MASK 0x0000000000000040UL -#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_7_MASK 0x0000000000000080UL -#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_0_MASK 0x0000000000000100UL -#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_1_MASK 0x0000000000000200UL -#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_2_MASK 0x0000000000000400UL -#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_3_MASK 0x0000000000000800UL -#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_4_MASK 0x0000000000001000UL -#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_5_MASK 0x0000000000002000UL -#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_6_MASK 0x0000000000004000UL -#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_7_MASK 0x0000000000008000UL - -#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_SHFT 0 -#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_1_SHFT 1 -#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_2_SHFT 2 -#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_3_SHFT 3 -#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_4_SHFT 4 -#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_5_SHFT 5 -#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_6_SHFT 6 -#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_7_SHFT 7 -#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_0_SHFT 8 -#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_1_SHFT 9 -#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_2_SHFT 10 -#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_3_SHFT 11 -#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_4_SHFT 12 -#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_5_SHFT 13 -#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_6_SHFT 14 -#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_7_SHFT 15 -#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_MASK 0x0000000000000001UL -#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_1_MASK 0x0000000000000002UL -#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_2_MASK 0x0000000000000004UL -#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_3_MASK 0x0000000000000008UL -#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_4_MASK 0x0000000000000010UL -#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_5_MASK 0x0000000000000020UL -#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_6_MASK 0x0000000000000040UL -#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_7_MASK 0x0000000000000080UL -#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_0_MASK 0x0000000000000100UL -#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_1_MASK 0x0000000000000200UL -#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_2_MASK 0x0000000000000400UL -#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_3_MASK 0x0000000000000800UL -#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_4_MASK 0x0000000000001000UL -#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_5_MASK 0x0000000000002000UL -#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_6_MASK 0x0000000000004000UL -#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_7_MASK 0x0000000000008000UL - - -union uvh_lb_bau_intd_software_acknowledge_u { - unsigned long v; - struct uv2h_lb_bau_intd_software_acknowledge_s { - unsigned long pending_0:1; /* RW */ - unsigned long pending_1:1; /* RW */ - unsigned long pending_2:1; /* RW */ - unsigned long pending_3:1; /* RW */ - unsigned long pending_4:1; /* RW */ - unsigned long pending_5:1; /* RW */ - unsigned long pending_6:1; /* RW */ - unsigned long pending_7:1; /* RW */ - unsigned long timeout_0:1; /* RW */ - unsigned long timeout_1:1; /* RW */ - unsigned long timeout_2:1; /* RW */ - unsigned long timeout_3:1; /* RW */ - unsigned long timeout_4:1; /* RW */ - unsigned long timeout_5:1; /* RW */ - unsigned long timeout_6:1; /* RW */ - unsigned long timeout_7:1; /* RW */ - unsigned long rsvd_16_63:48; - } s2; - struct uv3h_lb_bau_intd_software_acknowledge_s { - unsigned long pending_0:1; /* RW */ - unsigned long pending_1:1; /* RW */ - unsigned long pending_2:1; /* RW */ - unsigned long pending_3:1; /* RW */ - unsigned long pending_4:1; /* RW */ - unsigned long pending_5:1; /* RW */ - unsigned long pending_6:1; /* RW */ - unsigned long pending_7:1; /* RW */ - unsigned long timeout_0:1; /* RW */ - unsigned long timeout_1:1; /* RW */ - unsigned long timeout_2:1; /* RW */ - unsigned long timeout_3:1; /* RW */ - unsigned long timeout_4:1; /* RW */ - unsigned long timeout_5:1; /* RW */ - unsigned long timeout_6:1; /* RW */ - unsigned long timeout_7:1; /* RW */ - unsigned long rsvd_16_63:48; + /* UV3 unique struct */ + struct uv3h_ipi_int_s { + unsigned long vector_:8; /* RW */ + unsigned long delivery_mode:3; /* RW */ + unsigned long destmode:1; /* RW */ + unsigned long rsvd_12_15:4; + unsigned long apic_id:32; /* RW */ + unsigned long rsvd_48_62:15; + unsigned long send:1; /* WP */ } s3; -}; -/* ========================================================================= */ -/* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS */ -/* ========================================================================= */ -#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS 0x320088UL -#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS 0x320088UL -#define UV4H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS uv_undefined("UV4H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS") -#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS ( \ - is_uv2_hub() ? UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS : \ - is_uv3_hub() ? UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS : \ - /*is_uv4_hub*/ UV4H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS) -#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS_32 0xa70 - - -/* ========================================================================= */ -/* UVH_LB_BAU_MISC_CONTROL */ -/* ========================================================================= */ -#define UV2H_LB_BAU_MISC_CONTROL 0x320170UL -#define UV3H_LB_BAU_MISC_CONTROL 0x320170UL -#define UV4H_LB_BAU_MISC_CONTROL 0xc8170UL -#define UVH_LB_BAU_MISC_CONTROL ( \ - is_uv2_hub() ? UV2H_LB_BAU_MISC_CONTROL : \ - is_uv3_hub() ? UV3H_LB_BAU_MISC_CONTROL : \ - /*is_uv4_hub*/ UV4H_LB_BAU_MISC_CONTROL) - -#define UV2H_LB_BAU_MISC_CONTROL_32 0xa10 -#define UV3H_LB_BAU_MISC_CONTROL_32 0xa10 -#define UV4H_LB_BAU_MISC_CONTROL_32 0xa18 -#define UVH_LB_BAU_MISC_CONTROL_32 ( \ - is_uv2_hub() ? UV2H_LB_BAU_MISC_CONTROL_32 : \ - is_uv3_hub() ? UV3H_LB_BAU_MISC_CONTROL_32 : \ - /*is_uv4_hub*/ UV4H_LB_BAU_MISC_CONTROL_32) - -#define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0 -#define UVH_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8 -#define UVH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9 -#define UVH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10 -#define UVH_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11 -#define UVH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14 -#define UVH_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20 -#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21 -#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22 -#define UVH_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23 -#define UVH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24 -#define UVH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27 -#define UVH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28 -#define UVH_LB_BAU_MISC_CONTROL_FUN_SHFT 48 -#define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL -#define UVH_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL -#define UVH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL -#define UVH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL -#define UVH_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL -#define UVH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL -#define UVH_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL -#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL -#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL -#define UVH_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL -#define UVH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL -#define UVH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL -#define UVH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL -#define UVH_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL - -#define UVXH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0 -#define UVXH_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8 -#define UVXH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9 -#define UVXH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10 -#define UVXH_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11 -#define UVXH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14 -#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20 -#define UVXH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21 -#define UVXH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22 -#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23 -#define UVXH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24 -#define UVXH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27 -#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28 -#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_SHFT 29 -#define UVXH_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_SHFT 30 -#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_SHFT 31 -#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_SHFT 32 -#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT 33 -#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_SHFT 34 -#define UVXH_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT 35 -#define UVXH_LB_BAU_MISC_CONTROL_FUN_SHFT 48 -#define UVXH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL -#define UVXH_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL -#define UVXH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL -#define UVXH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL -#define UVXH_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL -#define UVXH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL -#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL -#define UVXH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL -#define UVXH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL -#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL -#define UVXH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL -#define UVXH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL -#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL -#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_MASK 0x0000000020000000UL -#define UVXH_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_MASK 0x0000000040000000UL -#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_MASK 0x0000000080000000UL -#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_MASK 0x0000000100000000UL -#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_MASK 0x0000000200000000UL -#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_MASK 0x0000000400000000UL -#define UVXH_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_MASK 0x0000000800000000UL -#define UVXH_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL - -#define UV2H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0 -#define UV2H_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8 -#define UV2H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9 -#define UV2H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10 -#define UV2H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11 -#define UV2H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14 -#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT 15 -#define UV2H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT 16 -#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20 -#define UV2H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21 -#define UV2H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22 -#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23 -#define UV2H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24 -#define UV2H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27 -#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28 -#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_SHFT 29 -#define UV2H_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_SHFT 30 -#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_SHFT 31 -#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_SHFT 32 -#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT 33 -#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_SHFT 34 -#define UV2H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT 35 -#define UV2H_LB_BAU_MISC_CONTROL_FUN_SHFT 48 -#define UV2H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL -#define UV2H_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL -#define UV2H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL -#define UV2H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL -#define UV2H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL -#define UV2H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL -#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK 0x0000000000008000UL -#define UV2H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK 0x00000000000f0000UL -#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL -#define UV2H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL -#define UV2H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL -#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL -#define UV2H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL -#define UV2H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL -#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL -#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_MASK 0x0000000020000000UL -#define UV2H_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_MASK 0x0000000040000000UL -#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_MASK 0x0000000080000000UL -#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_MASK 0x0000000100000000UL -#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_MASK 0x0000000200000000UL -#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_MASK 0x0000000400000000UL -#define UV2H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_MASK 0x0000000800000000UL -#define UV2H_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL - -#define UV3H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0 -#define UV3H_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8 -#define UV3H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9 -#define UV3H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10 -#define UV3H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11 -#define UV3H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14 -#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT 15 -#define UV3H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT 16 -#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20 -#define UV3H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21 -#define UV3H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22 -#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23 -#define UV3H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24 -#define UV3H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27 -#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28 -#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_SHFT 29 -#define UV3H_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_SHFT 30 -#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_SHFT 31 -#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_SHFT 32 -#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT 33 -#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_SHFT 34 -#define UV3H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT 35 -#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_QUIESCE_MSGS_TO_QPI_SHFT 36 -#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_PREFETCH_HINT_SHFT 37 -#define UV3H_LB_BAU_MISC_CONTROL_THREAD_KILL_TIMEBASE_SHFT 38 -#define UV3H_LB_BAU_MISC_CONTROL_FUN_SHFT 48 -#define UV3H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL -#define UV3H_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL -#define UV3H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL -#define UV3H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL -#define UV3H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL -#define UV3H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL -#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK 0x0000000000008000UL -#define UV3H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK 0x00000000000f0000UL -#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL -#define UV3H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL -#define UV3H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL -#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL -#define UV3H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL -#define UV3H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL -#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL -#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_MASK 0x0000000020000000UL -#define UV3H_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_MASK 0x0000000040000000UL -#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_MASK 0x0000000080000000UL -#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_MASK 0x0000000100000000UL -#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_MASK 0x0000000200000000UL -#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_MASK 0x0000000400000000UL -#define UV3H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_MASK 0x0000000800000000UL -#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_QUIESCE_MSGS_TO_QPI_MASK 0x0000001000000000UL -#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_PREFETCH_HINT_MASK 0x0000002000000000UL -#define UV3H_LB_BAU_MISC_CONTROL_THREAD_KILL_TIMEBASE_MASK 0x00003fc000000000UL -#define UV3H_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL - -#define UV4H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0 -#define UV4H_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8 -#define UV4H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9 -#define UV4H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10 -#define UV4H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11 -#define UV4H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14 -#define UV4H_LB_BAU_MISC_CONTROL_RESERVED_15_19_SHFT 15 -#define UV4H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20 -#define UV4H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21 -#define UV4H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22 -#define UV4H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23 -#define UV4H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24 -#define UV4H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27 -#define UV4H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28 -#define UV4H_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_SHFT 29 -#define UV4H_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_SHFT 30 -#define UV4H_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_SHFT 31 -#define UV4H_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_SHFT 32 -#define UV4H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT 33 -#define UV4H_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_SHFT 34 -#define UV4H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT 35 -#define UV4H_LB_BAU_MISC_CONTROL_SUPPRESS_QUIESCE_MSGS_TO_QPI_SHFT 36 -#define UV4H_LB_BAU_MISC_CONTROL_RESERVED_37_SHFT 37 -#define UV4H_LB_BAU_MISC_CONTROL_THREAD_KILL_TIMEBASE_SHFT 38 -#define UV4H_LB_BAU_MISC_CONTROL_ADDRESS_INTERLEAVE_SELECT_SHFT 46 -#define UV4H_LB_BAU_MISC_CONTROL_FUN_SHFT 48 -#define UV4H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL -#define UV4H_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL -#define UV4H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL -#define UV4H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL -#define UV4H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL -#define UV4H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL -#define UV4H_LB_BAU_MISC_CONTROL_RESERVED_15_19_MASK 0x00000000000f8000UL -#define UV4H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL -#define UV4H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL -#define UV4H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL -#define UV4H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL -#define UV4H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL -#define UV4H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL -#define UV4H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL -#define UV4H_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_MASK 0x0000000020000000UL -#define UV4H_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_MASK 0x0000000040000000UL -#define UV4H_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_MASK 0x0000000080000000UL -#define UV4H_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_MASK 0x0000000100000000UL -#define UV4H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_MASK 0x0000000200000000UL -#define UV4H_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_MASK 0x0000000400000000UL -#define UV4H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_MASK 0x0000000800000000UL -#define UV4H_LB_BAU_MISC_CONTROL_SUPPRESS_QUIESCE_MSGS_TO_QPI_MASK 0x0000001000000000UL -#define UV4H_LB_BAU_MISC_CONTROL_RESERVED_37_MASK 0x0000002000000000UL -#define UV4H_LB_BAU_MISC_CONTROL_THREAD_KILL_TIMEBASE_MASK 0x00003fc000000000UL -#define UV4H_LB_BAU_MISC_CONTROL_ADDRESS_INTERLEAVE_SELECT_MASK 0x0000400000000000UL -#define UV4H_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL - -#define UV4H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK \ - uv_undefined("UV4H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK") -#define UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK ( \ - is_uv2_hub() ? UV2H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK : \ - is_uv3_hub() ? UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK : \ - /*is_uv4_hub*/ UV4H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK) -#define UV4H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT \ - uv_undefined("UV4H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT") -#define UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT ( \ - is_uv2_hub() ? UV2H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT : \ - is_uv3_hub() ? UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT : \ - /*is_uv4_hub*/ UV4H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT) -#define UV4H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK \ - uv_undefined("UV4H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK") -#define UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK ( \ - is_uv2_hub() ? UV2H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK : \ - is_uv3_hub() ? UV3H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK : \ - /*is_uv4_hub*/ UV4H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK) -#define UV4H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT \ - uv_undefined("UV4H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT") -#define UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT ( \ - is_uv2_hub() ? UV2H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT : \ - is_uv3_hub() ? UV3H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT : \ - /*is_uv4_hub*/ UV4H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT) - -union uvh_lb_bau_misc_control_u { - unsigned long v; - struct uvh_lb_bau_misc_control_s { - unsigned long rejection_delay:8; /* RW */ - unsigned long apic_mode:1; /* RW */ - unsigned long force_broadcast:1; /* RW */ - unsigned long force_lock_nop:1; /* RW */ - unsigned long qpi_agent_presence_vector:3; /* RW */ - unsigned long descriptor_fetch_mode:1; /* RW */ - unsigned long rsvd_15_19:5; - unsigned long enable_dual_mapping_mode:1; /* RW */ - unsigned long vga_io_port_decode_enable:1; /* RW */ - unsigned long vga_io_port_16_bit_decode:1; /* RW */ - unsigned long suppress_dest_registration:1; /* RW */ - unsigned long programmed_initial_priority:3; /* RW */ - unsigned long use_incoming_priority:1; /* RW */ - unsigned long enable_programmed_initial_priority:1;/* RW */ - unsigned long rsvd_29_47:19; - unsigned long fun:16; /* RW */ - } s; - struct uvxh_lb_bau_misc_control_s { - unsigned long rejection_delay:8; /* RW */ - unsigned long apic_mode:1; /* RW */ - unsigned long force_broadcast:1; /* RW */ - unsigned long force_lock_nop:1; /* RW */ - unsigned long qpi_agent_presence_vector:3; /* RW */ - unsigned long descriptor_fetch_mode:1; /* RW */ - unsigned long rsvd_15_19:5; - unsigned long enable_dual_mapping_mode:1; /* RW */ - unsigned long vga_io_port_decode_enable:1; /* RW */ - unsigned long vga_io_port_16_bit_decode:1; /* RW */ - unsigned long suppress_dest_registration:1; /* RW */ - unsigned long programmed_initial_priority:3; /* RW */ - unsigned long use_incoming_priority:1; /* RW */ - unsigned long enable_programmed_initial_priority:1;/* RW */ - unsigned long enable_automatic_apic_mode_selection:1;/* RW */ - unsigned long apic_mode_status:1; /* RO */ - unsigned long suppress_interrupts_to_self:1; /* RW */ - unsigned long enable_lock_based_system_flush:1;/* RW */ - unsigned long enable_extended_sb_status:1; /* RW */ - unsigned long suppress_int_prio_udt_to_self:1;/* RW */ - unsigned long use_legacy_descriptor_formats:1;/* RW */ - unsigned long rsvd_36_47:12; - unsigned long fun:16; /* RW */ - } sx; - struct uv2h_lb_bau_misc_control_s { - unsigned long rejection_delay:8; /* RW */ - unsigned long apic_mode:1; /* RW */ - unsigned long force_broadcast:1; /* RW */ - unsigned long force_lock_nop:1; /* RW */ - unsigned long qpi_agent_presence_vector:3; /* RW */ - unsigned long descriptor_fetch_mode:1; /* RW */ - unsigned long enable_intd_soft_ack_mode:1; /* RW */ - unsigned long intd_soft_ack_timeout_period:4; /* RW */ - unsigned long enable_dual_mapping_mode:1; /* RW */ - unsigned long vga_io_port_decode_enable:1; /* RW */ - unsigned long vga_io_port_16_bit_decode:1; /* RW */ - unsigned long suppress_dest_registration:1; /* RW */ - unsigned long programmed_initial_priority:3; /* RW */ - unsigned long use_incoming_priority:1; /* RW */ - unsigned long enable_programmed_initial_priority:1;/* RW */ - unsigned long enable_automatic_apic_mode_selection:1;/* RW */ - unsigned long apic_mode_status:1; /* RO */ - unsigned long suppress_interrupts_to_self:1; /* RW */ - unsigned long enable_lock_based_system_flush:1;/* RW */ - unsigned long enable_extended_sb_status:1; /* RW */ - unsigned long suppress_int_prio_udt_to_self:1;/* RW */ - unsigned long use_legacy_descriptor_formats:1;/* RW */ - unsigned long rsvd_36_47:12; - unsigned long fun:16; /* RW */ + /* UV2 unique struct */ + struct uv2h_ipi_int_s { + unsigned long vector_:8; /* RW */ + unsigned long delivery_mode:3; /* RW */ + unsigned long destmode:1; /* RW */ + unsigned long rsvd_12_15:4; + unsigned long apic_id:32; /* RW */ + unsigned long rsvd_48_62:15; + unsigned long send:1; /* WP */ } s2; - struct uv3h_lb_bau_misc_control_s { - unsigned long rejection_delay:8; /* RW */ - unsigned long apic_mode:1; /* RW */ - unsigned long force_broadcast:1; /* RW */ - unsigned long force_lock_nop:1; /* RW */ - unsigned long qpi_agent_presence_vector:3; /* RW */ - unsigned long descriptor_fetch_mode:1; /* RW */ - unsigned long enable_intd_soft_ack_mode:1; /* RW */ - unsigned long intd_soft_ack_timeout_period:4; /* RW */ - unsigned long enable_dual_mapping_mode:1; /* RW */ - unsigned long vga_io_port_decode_enable:1; /* RW */ - unsigned long vga_io_port_16_bit_decode:1; /* RW */ - unsigned long suppress_dest_registration:1; /* RW */ - unsigned long programmed_initial_priority:3; /* RW */ - unsigned long use_incoming_priority:1; /* RW */ - unsigned long enable_programmed_initial_priority:1;/* RW */ - unsigned long enable_automatic_apic_mode_selection:1;/* RW */ - unsigned long apic_mode_status:1; /* RO */ - unsigned long suppress_interrupts_to_self:1; /* RW */ - unsigned long enable_lock_based_system_flush:1;/* RW */ - unsigned long enable_extended_sb_status:1; /* RW */ - unsigned long suppress_int_prio_udt_to_self:1;/* RW */ - unsigned long use_legacy_descriptor_formats:1;/* RW */ - unsigned long suppress_quiesce_msgs_to_qpi:1; /* RW */ - unsigned long enable_intd_prefetch_hint:1; /* RW */ - unsigned long thread_kill_timebase:8; /* RW */ - unsigned long rsvd_46_47:2; - unsigned long fun:16; /* RW */ - } s3; - struct uv4h_lb_bau_misc_control_s { - unsigned long rejection_delay:8; /* RW */ - unsigned long apic_mode:1; /* RW */ - unsigned long force_broadcast:1; /* RW */ - unsigned long force_lock_nop:1; /* RW */ - unsigned long qpi_agent_presence_vector:3; /* RW */ - unsigned long descriptor_fetch_mode:1; /* RW */ - unsigned long rsvd_15_19:5; - unsigned long enable_dual_mapping_mode:1; /* RW */ - unsigned long vga_io_port_decode_enable:1; /* RW */ - unsigned long vga_io_port_16_bit_decode:1; /* RW */ - unsigned long suppress_dest_registration:1; /* RW */ - unsigned long programmed_initial_priority:3; /* RW */ - unsigned long use_incoming_priority:1; /* RW */ - unsigned long enable_programmed_initial_priority:1;/* RW */ - unsigned long enable_automatic_apic_mode_selection:1;/* RW */ - unsigned long apic_mode_status:1; /* RO */ - unsigned long suppress_interrupts_to_self:1; /* RW */ - unsigned long enable_lock_based_system_flush:1;/* RW */ - unsigned long enable_extended_sb_status:1; /* RW */ - unsigned long suppress_int_prio_udt_to_self:1;/* RW */ - unsigned long use_legacy_descriptor_formats:1;/* RW */ - unsigned long suppress_quiesce_msgs_to_qpi:1; /* RW */ - unsigned long rsvd_37:1; - unsigned long thread_kill_timebase:8; /* RW */ - unsigned long address_interleave_select:1; /* RW */ - unsigned long rsvd_47:1; - unsigned long fun:16; /* RW */ - } s4; -}; - -/* ========================================================================= */ -/* UVH_LB_BAU_SB_ACTIVATION_CONTROL */ -/* ========================================================================= */ -#define UV2H_LB_BAU_SB_ACTIVATION_CONTROL 0x320020UL -#define UV3H_LB_BAU_SB_ACTIVATION_CONTROL 0x320020UL -#define UV4H_LB_BAU_SB_ACTIVATION_CONTROL 0xc8020UL -#define UVH_LB_BAU_SB_ACTIVATION_CONTROL ( \ - is_uv2_hub() ? UV2H_LB_BAU_SB_ACTIVATION_CONTROL : \ - is_uv3_hub() ? UV3H_LB_BAU_SB_ACTIVATION_CONTROL : \ - /*is_uv4_hub*/ UV4H_LB_BAU_SB_ACTIVATION_CONTROL) - -#define UV2H_LB_BAU_SB_ACTIVATION_CONTROL_32 0x9a8 -#define UV3H_LB_BAU_SB_ACTIVATION_CONTROL_32 0x9a8 -#define UV4H_LB_BAU_SB_ACTIVATION_CONTROL_32 0x9c8 -#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_32 ( \ - is_uv2_hub() ? UV2H_LB_BAU_SB_ACTIVATION_CONTROL_32 : \ - is_uv3_hub() ? UV3H_LB_BAU_SB_ACTIVATION_CONTROL_32 : \ - /*is_uv4_hub*/ UV4H_LB_BAU_SB_ACTIVATION_CONTROL_32) - -#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_SHFT 0 -#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT 62 -#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INIT_SHFT 63 -#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_MASK 0x000000000000003fUL -#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_MASK 0x4000000000000000UL -#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INIT_MASK 0x8000000000000000UL - - -union uvh_lb_bau_sb_activation_control_u { - unsigned long v; - struct uvh_lb_bau_sb_activation_control_s { - unsigned long index:6; /* RW */ - unsigned long rsvd_6_61:56; - unsigned long push:1; /* WP */ - unsigned long init:1; /* WP */ - } s; -}; - -/* ========================================================================= */ -/* UVH_LB_BAU_SB_ACTIVATION_STATUS_0 */ -/* ========================================================================= */ -#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_0 0x320030UL -#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_0 0x320030UL -#define UV4H_LB_BAU_SB_ACTIVATION_STATUS_0 0xc8030UL -#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0 ( \ - is_uv2_hub() ? UV2H_LB_BAU_SB_ACTIVATION_STATUS_0 : \ - is_uv3_hub() ? UV3H_LB_BAU_SB_ACTIVATION_STATUS_0 : \ - /*is_uv4_hub*/ UV4H_LB_BAU_SB_ACTIVATION_STATUS_0) - -#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_0_32 0x9b0 -#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_0_32 0x9b0 -#define UV4H_LB_BAU_SB_ACTIVATION_STATUS_0_32 0x9d0 -#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_32 ( \ - is_uv2_hub() ? UV2H_LB_BAU_SB_ACTIVATION_STATUS_0_32 : \ - is_uv3_hub() ? UV3H_LB_BAU_SB_ACTIVATION_STATUS_0_32 : \ - /*is_uv4_hub*/ UV4H_LB_BAU_SB_ACTIVATION_STATUS_0_32) - -#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_SHFT 0 -#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_MASK 0xffffffffffffffffUL - - -union uvh_lb_bau_sb_activation_status_0_u { - unsigned long v; - struct uvh_lb_bau_sb_activation_status_0_s { - unsigned long status:64; /* RW */ - } s; -}; - -/* ========================================================================= */ -/* UVH_LB_BAU_SB_ACTIVATION_STATUS_1 */ -/* ========================================================================= */ -#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_1 0x320040UL -#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_1 0x320040UL -#define UV4H_LB_BAU_SB_ACTIVATION_STATUS_1 0xc8040UL -#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1 ( \ - is_uv2_hub() ? UV2H_LB_BAU_SB_ACTIVATION_STATUS_1 : \ - is_uv3_hub() ? UV3H_LB_BAU_SB_ACTIVATION_STATUS_1 : \ - /*is_uv4_hub*/ UV4H_LB_BAU_SB_ACTIVATION_STATUS_1) - -#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_1_32 0x9b8 -#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_1_32 0x9b8 -#define UV4H_LB_BAU_SB_ACTIVATION_STATUS_1_32 0x9d8 -#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_32 ( \ - is_uv2_hub() ? UV2H_LB_BAU_SB_ACTIVATION_STATUS_1_32 : \ - is_uv3_hub() ? UV3H_LB_BAU_SB_ACTIVATION_STATUS_1_32 : \ - /*is_uv4_hub*/ UV4H_LB_BAU_SB_ACTIVATION_STATUS_1_32) - -#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_SHFT 0 -#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_MASK 0xffffffffffffffffUL - - -union uvh_lb_bau_sb_activation_status_1_u { - unsigned long v; - struct uvh_lb_bau_sb_activation_status_1_s { - unsigned long status:64; /* RW */ - } s; }; /* ========================================================================= */ -/* UVH_LB_BAU_SB_DESCRIPTOR_BASE */ -/* ========================================================================= */ -#define UV2H_LB_BAU_SB_DESCRIPTOR_BASE 0x320010UL -#define UV3H_LB_BAU_SB_DESCRIPTOR_BASE 0x320010UL -#define UV4H_LB_BAU_SB_DESCRIPTOR_BASE 0xc8010UL -#define UVH_LB_BAU_SB_DESCRIPTOR_BASE ( \ - is_uv2_hub() ? UV2H_LB_BAU_SB_DESCRIPTOR_BASE : \ - is_uv3_hub() ? UV3H_LB_BAU_SB_DESCRIPTOR_BASE : \ - /*is_uv4_hub*/ UV4H_LB_BAU_SB_DESCRIPTOR_BASE) - -#define UV2H_LB_BAU_SB_DESCRIPTOR_BASE_32 0x9a0 -#define UV3H_LB_BAU_SB_DESCRIPTOR_BASE_32 0x9a0 -#define UV4H_LB_BAU_SB_DESCRIPTOR_BASE_32 0x9c0 -#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_32 ( \ - is_uv2_hub() ? UV2H_LB_BAU_SB_DESCRIPTOR_BASE_32 : \ - is_uv3_hub() ? UV3H_LB_BAU_SB_DESCRIPTOR_BASE_32 : \ - /*is_uv4_hub*/ UV4H_LB_BAU_SB_DESCRIPTOR_BASE_32) - -#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_SHFT 12 - -#define UV2H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT 49 -#define UV2H_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_MASK 0x000007fffffff000UL -#define UV2H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK 0x7ffe000000000000UL - -#define UV3H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT 49 -#define UV3H_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_MASK 0x000007fffffff000UL -#define UV3H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK 0x7ffe000000000000UL - -#define UV4H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT 49 -#define UV4H_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_MASK 0x00003ffffffff000UL -#define UV4H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK 0x7ffe000000000000UL - -#define UV4AH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT 53 -#define UV4AH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_MASK 0x000ffffffffff000UL -#define UV4AH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK 0xffe0000000000000UL - -#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT ( \ - is_uv2_hub() ? UV2H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT : \ - is_uv3_hub() ? UV3H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT : \ - is_uv4a_hub() ? UV4AH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT : \ - /*is_uv4_hub*/ UV4H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT) - -#define UVH_LB_BAU_SB_DESCRIPTOR_PAGE_ADDRESS_MASK ( \ - is_uv2_hub() ? UV2H_LB_BAU_SB_DESCRIPTOR_PAGE_ADDRESS_MASK : \ - is_uv3_hub() ? UV3H_LB_BAU_SB_DESCRIPTOR_PAGE_ADDRESS_MASK : \ - is_uv4a_hub() ? UV4AH_LB_BAU_SB_DESCRIPTOR_PAGE_ADDRESS_MASK : \ - /*is_uv4_hub*/ UV4H_LB_BAU_SB_DESCRIPTOR_PAGE_ADDRESS_MASK) - -#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK ( \ - is_uv2_hub() ? UV2H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK : \ - is_uv3_hub() ? UV3H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK : \ - is_uv4a_hub() ? UV4AH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK : \ - /*is_uv4_hub*/ UV4H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK) - -/* ========================================================================= */ /* UVH_NODE_ID */ /* ========================================================================= */ #define UVH_NODE_ID 0x0UL -#define UV2H_NODE_ID 0x0UL -#define UV3H_NODE_ID 0x0UL -#define UV4H_NODE_ID 0x0UL +/* UVH common defines*/ #define UVH_NODE_ID_FORCE1_SHFT 0 -#define UVH_NODE_ID_MANUFACTURER_SHFT 1 -#define UVH_NODE_ID_PART_NUMBER_SHFT 12 -#define UVH_NODE_ID_REVISION_SHFT 28 -#define UVH_NODE_ID_NODE_ID_SHFT 32 #define UVH_NODE_ID_FORCE1_MASK 0x0000000000000001UL +#define UVH_NODE_ID_MANUFACTURER_SHFT 1 #define UVH_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL +#define UVH_NODE_ID_PART_NUMBER_SHFT 12 #define UVH_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL +#define UVH_NODE_ID_REVISION_SHFT 28 #define UVH_NODE_ID_REVISION_MASK 0x00000000f0000000UL -#define UVH_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL +#define UVH_NODE_ID_NODE_ID_SHFT 32 +#define UVH_NODE_ID_NI_PORT_SHFT 57 -#define UVXH_NODE_ID_FORCE1_SHFT 0 -#define UVXH_NODE_ID_MANUFACTURER_SHFT 1 -#define UVXH_NODE_ID_PART_NUMBER_SHFT 12 -#define UVXH_NODE_ID_REVISION_SHFT 28 -#define UVXH_NODE_ID_NODE_ID_SHFT 32 -#define UVXH_NODE_ID_NODES_PER_BIT_SHFT 50 -#define UVXH_NODE_ID_NI_PORT_SHFT 57 -#define UVXH_NODE_ID_FORCE1_MASK 0x0000000000000001UL -#define UVXH_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL -#define UVXH_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL -#define UVXH_NODE_ID_REVISION_MASK 0x00000000f0000000UL +/* UVXH common defines */ #define UVXH_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL +#define UVXH_NODE_ID_NODES_PER_BIT_SHFT 50 #define UVXH_NODE_ID_NODES_PER_BIT_MASK 0x01fc000000000000UL #define UVXH_NODE_ID_NI_PORT_MASK 0x3e00000000000000UL -#define UV2H_NODE_ID_FORCE1_SHFT 0 -#define UV2H_NODE_ID_MANUFACTURER_SHFT 1 -#define UV2H_NODE_ID_PART_NUMBER_SHFT 12 -#define UV2H_NODE_ID_REVISION_SHFT 28 -#define UV2H_NODE_ID_NODE_ID_SHFT 32 -#define UV2H_NODE_ID_NODES_PER_BIT_SHFT 50 -#define UV2H_NODE_ID_NI_PORT_SHFT 57 -#define UV2H_NODE_ID_FORCE1_MASK 0x0000000000000001UL -#define UV2H_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL -#define UV2H_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL -#define UV2H_NODE_ID_REVISION_MASK 0x00000000f0000000UL -#define UV2H_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL -#define UV2H_NODE_ID_NODES_PER_BIT_MASK 0x01fc000000000000UL -#define UV2H_NODE_ID_NI_PORT_MASK 0x3e00000000000000UL - -#define UV3H_NODE_ID_FORCE1_SHFT 0 -#define UV3H_NODE_ID_MANUFACTURER_SHFT 1 -#define UV3H_NODE_ID_PART_NUMBER_SHFT 12 -#define UV3H_NODE_ID_REVISION_SHFT 28 -#define UV3H_NODE_ID_NODE_ID_SHFT 32 -#define UV3H_NODE_ID_ROUTER_SELECT_SHFT 48 -#define UV3H_NODE_ID_RESERVED_2_SHFT 49 -#define UV3H_NODE_ID_NODES_PER_BIT_SHFT 50 -#define UV3H_NODE_ID_NI_PORT_SHFT 57 -#define UV3H_NODE_ID_FORCE1_MASK 0x0000000000000001UL -#define UV3H_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL -#define UV3H_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL -#define UV3H_NODE_ID_REVISION_MASK 0x00000000f0000000UL -#define UV3H_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL -#define UV3H_NODE_ID_ROUTER_SELECT_MASK 0x0001000000000000UL -#define UV3H_NODE_ID_RESERVED_2_MASK 0x0002000000000000UL -#define UV3H_NODE_ID_NODES_PER_BIT_MASK 0x01fc000000000000UL -#define UV3H_NODE_ID_NI_PORT_MASK 0x3e00000000000000UL - -#define UV4H_NODE_ID_FORCE1_SHFT 0 -#define UV4H_NODE_ID_MANUFACTURER_SHFT 1 -#define UV4H_NODE_ID_PART_NUMBER_SHFT 12 -#define UV4H_NODE_ID_REVISION_SHFT 28 -#define UV4H_NODE_ID_NODE_ID_SHFT 32 +/* UVYH common defines */ +#define UVYH_NODE_ID_NODE_ID_MASK 0x0000007f00000000UL +#define UVYH_NODE_ID_NI_PORT_MASK 0x7e00000000000000UL + +/* UV4 unique defines */ #define UV4H_NODE_ID_ROUTER_SELECT_SHFT 48 -#define UV4H_NODE_ID_RESERVED_2_SHFT 49 -#define UV4H_NODE_ID_NODES_PER_BIT_SHFT 50 -#define UV4H_NODE_ID_NI_PORT_SHFT 57 -#define UV4H_NODE_ID_FORCE1_MASK 0x0000000000000001UL -#define UV4H_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL -#define UV4H_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL -#define UV4H_NODE_ID_REVISION_MASK 0x00000000f0000000UL -#define UV4H_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL #define UV4H_NODE_ID_ROUTER_SELECT_MASK 0x0001000000000000UL +#define UV4H_NODE_ID_RESERVED_2_SHFT 49 #define UV4H_NODE_ID_RESERVED_2_MASK 0x0002000000000000UL -#define UV4H_NODE_ID_NODES_PER_BIT_MASK 0x01fc000000000000UL -#define UV4H_NODE_ID_NI_PORT_MASK 0x3e00000000000000UL + +/* UV3 unique defines */ +#define UV3H_NODE_ID_ROUTER_SELECT_SHFT 48 +#define UV3H_NODE_ID_ROUTER_SELECT_MASK 0x0001000000000000UL +#define UV3H_NODE_ID_RESERVED_2_SHFT 49 +#define UV3H_NODE_ID_RESERVED_2_MASK 0x0002000000000000UL union uvh_node_id_u { unsigned long v; + + /* UVH common struct */ struct uvh_node_id_s { unsigned long force1:1; /* RO */ unsigned long manufacturer:11; /* RO */ unsigned long part_number:16; /* RO */ unsigned long revision:4; /* RO */ - unsigned long node_id:15; /* RW */ - unsigned long rsvd_47_63:17; + unsigned long rsvd_32_63:32; } s; + + /* UVXH common struct */ struct uvxh_node_id_s { unsigned long force1:1; /* RO */ unsigned long manufacturer:11; /* RO */ @@ -2444,17 +2841,47 @@ union uvh_node_id_u { unsigned long ni_port:5; /* RO */ unsigned long rsvd_62_63:2; } sx; - struct uv2h_node_id_s { + + /* UVYH common struct */ + struct uvyh_node_id_s { + unsigned long force1:1; /* RO */ + unsigned long manufacturer:11; /* RO */ + unsigned long part_number:16; /* RO */ + unsigned long revision:4; /* RO */ + unsigned long node_id:7; /* RW */ + unsigned long rsvd_39_56:18; + unsigned long ni_port:6; /* RO */ + unsigned long rsvd_63:1; + } sy; + + /* UV5 unique struct */ + struct uv5h_node_id_s { + unsigned long force1:1; /* RO */ + unsigned long manufacturer:11; /* RO */ + unsigned long part_number:16; /* RO */ + unsigned long revision:4; /* RO */ + unsigned long node_id:7; /* RW */ + unsigned long rsvd_39_56:18; + unsigned long ni_port:6; /* RO */ + unsigned long rsvd_63:1; + } s5; + + /* UV4 unique struct */ + struct uv4h_node_id_s { unsigned long force1:1; /* RO */ unsigned long manufacturer:11; /* RO */ unsigned long part_number:16; /* RO */ unsigned long revision:4; /* RO */ unsigned long node_id:15; /* RW */ - unsigned long rsvd_47_49:3; + unsigned long rsvd_47:1; + unsigned long router_select:1; /* RO */ + unsigned long rsvd_49:1; unsigned long nodes_per_bit:7; /* RO */ unsigned long ni_port:5; /* RO */ unsigned long rsvd_62_63:2; - } s2; + } s4; + + /* UV3 unique struct */ struct uv3h_node_id_s { unsigned long force1:1; /* RO */ unsigned long manufacturer:11; /* RO */ @@ -2468,186 +2895,569 @@ union uvh_node_id_u { unsigned long ni_port:5; /* RO */ unsigned long rsvd_62_63:2; } s3; - struct uv4h_node_id_s { + + /* UV2 unique struct */ + struct uv2h_node_id_s { unsigned long force1:1; /* RO */ unsigned long manufacturer:11; /* RO */ unsigned long part_number:16; /* RO */ unsigned long revision:4; /* RO */ unsigned long node_id:15; /* RW */ - unsigned long rsvd_47:1; - unsigned long router_select:1; /* RO */ - unsigned long rsvd_49:1; + unsigned long rsvd_47_49:3; unsigned long nodes_per_bit:7; /* RO */ unsigned long ni_port:5; /* RO */ unsigned long rsvd_62_63:2; - } s4; + } s2; +}; + +/* ========================================================================= */ +/* UVH_NODE_PRESENT_0 */ +/* ========================================================================= */ +#define UVH_NODE_PRESENT_0 ( \ + is_uv(UV5) ? 0x1400UL : \ + 0) + + +/* UVYH common defines */ +#define UVYH_NODE_PRESENT_0_NODES_SHFT 0 +#define UVYH_NODE_PRESENT_0_NODES_MASK 0xffffffffffffffffUL + + +union uvh_node_present_0_u { + unsigned long v; + + /* UVH common struct */ + struct uvh_node_present_0_s { + unsigned long nodes:64; /* RW */ + } s; + + /* UVYH common struct */ + struct uvyh_node_present_0_s { + unsigned long nodes:64; /* RW */ + } sy; + + /* UV5 unique struct */ + struct uv5h_node_present_0_s { + unsigned long nodes:64; /* RW */ + } s5; +}; + +/* ========================================================================= */ +/* UVH_NODE_PRESENT_1 */ +/* ========================================================================= */ +#define UVH_NODE_PRESENT_1 ( \ + is_uv(UV5) ? 0x1408UL : \ + 0) + + +/* UVYH common defines */ +#define UVYH_NODE_PRESENT_1_NODES_SHFT 0 +#define UVYH_NODE_PRESENT_1_NODES_MASK 0xffffffffffffffffUL + + +union uvh_node_present_1_u { + unsigned long v; + + /* UVH common struct */ + struct uvh_node_present_1_s { + unsigned long nodes:64; /* RW */ + } s; + + /* UVYH common struct */ + struct uvyh_node_present_1_s { + unsigned long nodes:64; /* RW */ + } sy; + + /* UV5 unique struct */ + struct uv5h_node_present_1_s { + unsigned long nodes:64; /* RW */ + } s5; }; /* ========================================================================= */ /* UVH_NODE_PRESENT_TABLE */ /* ========================================================================= */ -#define UVH_NODE_PRESENT_TABLE 0x1400UL +#define UVH_NODE_PRESENT_TABLE ( \ + is_uv(UV4) ? 0x1400UL : \ + is_uv(UV3) ? 0x1400UL : \ + is_uv(UV2) ? 0x1400UL : \ + 0) -#define UV2H_NODE_PRESENT_TABLE_DEPTH 16 -#define UV3H_NODE_PRESENT_TABLE_DEPTH 16 -#define UV4H_NODE_PRESENT_TABLE_DEPTH 4 #define UVH_NODE_PRESENT_TABLE_DEPTH ( \ - is_uv2_hub() ? UV2H_NODE_PRESENT_TABLE_DEPTH : \ - is_uv3_hub() ? UV3H_NODE_PRESENT_TABLE_DEPTH : \ - /*is_uv4_hub*/ UV4H_NODE_PRESENT_TABLE_DEPTH) + is_uv(UV4) ? 4 : \ + is_uv(UV3) ? 16 : \ + is_uv(UV2) ? 16 : \ + 0) -#define UVH_NODE_PRESENT_TABLE_NODES_SHFT 0 -#define UVH_NODE_PRESENT_TABLE_NODES_MASK 0xffffffffffffffffUL + +/* UVXH common defines */ +#define UVXH_NODE_PRESENT_TABLE_NODES_SHFT 0 +#define UVXH_NODE_PRESENT_TABLE_NODES_MASK 0xffffffffffffffffUL union uvh_node_present_table_u { unsigned long v; + + /* UVH common struct */ struct uvh_node_present_table_s { unsigned long nodes:64; /* RW */ } s; + + /* UVXH common struct */ + struct uvxh_node_present_table_s { + unsigned long nodes:64; /* RW */ + } sx; + + /* UV4 unique struct */ + struct uv4h_node_present_table_s { + unsigned long nodes:64; /* RW */ + } s4; + + /* UV3 unique struct */ + struct uv3h_node_present_table_s { + unsigned long nodes:64; /* RW */ + } s3; + + /* UV2 unique struct */ + struct uv2h_node_present_table_s { + unsigned long nodes:64; /* RW */ + } s2; +}; + +/* ========================================================================= */ +/* UVH_RH10_GAM_ADDR_MAP_CONFIG */ +/* ========================================================================= */ +#define UVH_RH10_GAM_ADDR_MAP_CONFIG ( \ + is_uv(UV5) ? 0x470000UL : \ + 0) + + +/* UVYH common defines */ +#define UVYH_RH10_GAM_ADDR_MAP_CONFIG_N_SKT_SHFT 6 +#define UVYH_RH10_GAM_ADDR_MAP_CONFIG_N_SKT_MASK 0x00000000000001c0UL +#define UVYH_RH10_GAM_ADDR_MAP_CONFIG_LS_ENABLE_SHFT 12 +#define UVYH_RH10_GAM_ADDR_MAP_CONFIG_LS_ENABLE_MASK 0x0000000000001000UL +#define UVYH_RH10_GAM_ADDR_MAP_CONFIG_MK_TME_KEYID_BITS_SHFT 16 +#define UVYH_RH10_GAM_ADDR_MAP_CONFIG_MK_TME_KEYID_BITS_MASK 0x00000000000f0000UL + + +union uvh_rh10_gam_addr_map_config_u { + unsigned long v; + + /* UVH common struct */ + struct uvh_rh10_gam_addr_map_config_s { + unsigned long undef_0_5:6; /* Undefined */ + unsigned long n_skt:3; /* RW */ + unsigned long undef_9_11:3; /* Undefined */ + unsigned long ls_enable:1; /* RW */ + unsigned long undef_13_15:3; /* Undefined */ + unsigned long mk_tme_keyid_bits:4; /* RW */ + unsigned long rsvd_20_63:44; + } s; + + /* UVYH common struct */ + struct uvyh_rh10_gam_addr_map_config_s { + unsigned long undef_0_5:6; /* Undefined */ + unsigned long n_skt:3; /* RW */ + unsigned long undef_9_11:3; /* Undefined */ + unsigned long ls_enable:1; /* RW */ + unsigned long undef_13_15:3; /* Undefined */ + unsigned long mk_tme_keyid_bits:4; /* RW */ + unsigned long rsvd_20_63:44; + } sy; + + /* UV5 unique struct */ + struct uv5h_rh10_gam_addr_map_config_s { + unsigned long undef_0_5:6; /* Undefined */ + unsigned long n_skt:3; /* RW */ + unsigned long undef_9_11:3; /* Undefined */ + unsigned long ls_enable:1; /* RW */ + unsigned long undef_13_15:3; /* Undefined */ + unsigned long mk_tme_keyid_bits:4; /* RW */ + } s5; }; /* ========================================================================= */ -/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR */ +/* UVH_RH10_GAM_GRU_OVERLAY_CONFIG */ /* ========================================================================= */ -#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR 0x16000c8UL -#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR 0x16000c8UL -#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR 0x4800c8UL -#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR ( \ - is_uv2_hub() ? UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR : \ - is_uv3_hub() ? UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR : \ - /*is_uv4_hub*/ UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR) - -#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24 -#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48 -#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_SHFT 63 -#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_MASK 0x00000000ff000000UL -#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_MASK 0x001f000000000000UL -#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_MASK 0x8000000000000000UL - -#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24 -#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48 -#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_SHFT 63 -#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_MASK 0x00000000ff000000UL -#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_MASK 0x001f000000000000UL -#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_MASK 0x8000000000000000UL - -#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24 -#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48 -#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_SHFT 63 -#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_MASK 0x00000000ff000000UL -#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_MASK 0x001f000000000000UL -#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_MASK 0x8000000000000000UL - -#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24 -#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48 -#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_SHFT 63 -#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_MASK 0x00000000ff000000UL -#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_MASK 0x001f000000000000UL -#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_MASK 0x8000000000000000UL - -#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24 -#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48 -#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_SHFT 63 -#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_MASK 0x00000000ff000000UL -#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_MASK 0x001f000000000000UL -#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_MASK 0x8000000000000000UL - - -union uvh_rh_gam_alias210_overlay_config_0_mmr_u { +#define UVH_RH10_GAM_GRU_OVERLAY_CONFIG ( \ + is_uv(UV5) ? 0x4700b0UL : \ + 0) + + +/* UVYH common defines */ +#define UVYH_RH10_GAM_GRU_OVERLAY_CONFIG_BASE_SHFT 25 +#define UVYH_RH10_GAM_GRU_OVERLAY_CONFIG_BASE_MASK 0x000ffffffe000000UL +#define UVYH_RH10_GAM_GRU_OVERLAY_CONFIG_N_GRU_SHFT 52 +#define UVYH_RH10_GAM_GRU_OVERLAY_CONFIG_N_GRU_MASK 0x0070000000000000UL +#define UVYH_RH10_GAM_GRU_OVERLAY_CONFIG_ENABLE_SHFT 63 +#define UVYH_RH10_GAM_GRU_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL + +#define UVH_RH10_GAM_GRU_OVERLAY_CONFIG_BASE_MASK ( \ + is_uv(UV5) ? 0x000ffffffe000000UL : \ + 0) +#define UVH_RH10_GAM_GRU_OVERLAY_CONFIG_BASE_SHFT ( \ + is_uv(UV5) ? 25 : \ + -1) + +union uvh_rh10_gam_gru_overlay_config_u { unsigned long v; - struct uvh_rh_gam_alias210_overlay_config_0_mmr_s { - unsigned long rsvd_0_23:24; - unsigned long base:8; /* RW */ - unsigned long rsvd_32_47:16; - unsigned long m_alias:5; /* RW */ - unsigned long rsvd_53_62:10; + + /* UVH common struct */ + struct uvh_rh10_gam_gru_overlay_config_s { + unsigned long undef_0_24:25; /* Undefined */ + unsigned long base:27; /* RW */ + unsigned long n_gru:3; /* RW */ + unsigned long undef_55_62:8; /* Undefined */ unsigned long enable:1; /* RW */ } s; - struct uvxh_rh_gam_alias210_overlay_config_0_mmr_s { - unsigned long rsvd_0_23:24; - unsigned long base:8; /* RW */ - unsigned long rsvd_32_47:16; - unsigned long m_alias:5; /* RW */ - unsigned long rsvd_53_62:10; + + /* UVYH common struct */ + struct uvyh_rh10_gam_gru_overlay_config_s { + unsigned long undef_0_24:25; /* Undefined */ + unsigned long base:27; /* RW */ + unsigned long n_gru:3; /* RW */ + unsigned long undef_55_62:8; /* Undefined */ unsigned long enable:1; /* RW */ - } sx; - struct uv2h_rh_gam_alias210_overlay_config_0_mmr_s { - unsigned long rsvd_0_23:24; - unsigned long base:8; /* RW */ - unsigned long rsvd_32_47:16; - unsigned long m_alias:5; /* RW */ - unsigned long rsvd_53_62:10; + } sy; + + /* UV5 unique struct */ + struct uv5h_rh10_gam_gru_overlay_config_s { + unsigned long undef_0_24:25; /* Undefined */ + unsigned long base:27; /* RW */ + unsigned long n_gru:3; /* RW */ + unsigned long undef_55_62:8; /* Undefined */ unsigned long enable:1; /* RW */ - } s2; - struct uv3h_rh_gam_alias210_overlay_config_0_mmr_s { - unsigned long rsvd_0_23:24; - unsigned long base:8; /* RW */ - unsigned long rsvd_32_47:16; - unsigned long m_alias:5; /* RW */ - unsigned long rsvd_53_62:10; + } s5; +}; + +/* ========================================================================= */ +/* UVH_RH10_GAM_MMIOH_OVERLAY_CONFIG0 */ +/* ========================================================================= */ +#define UVH_RH10_GAM_MMIOH_OVERLAY_CONFIG0 ( \ + is_uv(UV5) ? 0x473000UL : \ + 0) + + +/* UVYH common defines */ +#define UVYH_RH10_GAM_MMIOH_OVERLAY_CONFIG0_BASE_SHFT 26 +#define UVYH_RH10_GAM_MMIOH_OVERLAY_CONFIG0_BASE_MASK 0x000ffffffc000000UL +#define UVYH_RH10_GAM_MMIOH_OVERLAY_CONFIG0_M_IO_SHFT 52 +#define UVYH_RH10_GAM_MMIOH_OVERLAY_CONFIG0_M_IO_MASK 0x03f0000000000000UL +#define UVYH_RH10_GAM_MMIOH_OVERLAY_CONFIG0_ENABLE_SHFT 63 +#define UVYH_RH10_GAM_MMIOH_OVERLAY_CONFIG0_ENABLE_MASK 0x8000000000000000UL + +#define UVH_RH10_GAM_MMIOH_OVERLAY_CONFIG0_BASE_MASK ( \ + is_uv(UV5) ? 0x000ffffffc000000UL : \ + 0) +#define UVH_RH10_GAM_MMIOH_OVERLAY_CONFIG0_BASE_SHFT ( \ + is_uv(UV5) ? 26 : \ + -1) + +union uvh_rh10_gam_mmioh_overlay_config0_u { + unsigned long v; + + /* UVH common struct */ + struct uvh_rh10_gam_mmioh_overlay_config0_s { + unsigned long rsvd_0_25:26; + unsigned long base:26; /* RW */ + unsigned long m_io:6; /* RW */ + unsigned long n_io:4; + unsigned long undef_62:1; /* Undefined */ unsigned long enable:1; /* RW */ - } s3; - struct uv4h_rh_gam_alias210_overlay_config_0_mmr_s { - unsigned long rsvd_0_23:24; - unsigned long base:8; /* RW */ - unsigned long rsvd_32_47:16; - unsigned long m_alias:5; /* RW */ - unsigned long rsvd_53_62:10; + } s; + + /* UVYH common struct */ + struct uvyh_rh10_gam_mmioh_overlay_config0_s { + unsigned long rsvd_0_25:26; + unsigned long base:26; /* RW */ + unsigned long m_io:6; /* RW */ + unsigned long n_io:4; + unsigned long undef_62:1; /* Undefined */ unsigned long enable:1; /* RW */ + } sy; + + /* UV5 unique struct */ + struct uv5h_rh10_gam_mmioh_overlay_config0_s { + unsigned long rsvd_0_25:26; + unsigned long base:26; /* RW */ + unsigned long m_io:6; /* RW */ + unsigned long n_io:4; + unsigned long undef_62:1; /* Undefined */ + unsigned long enable:1; /* RW */ + } s5; +}; + +/* ========================================================================= */ +/* UVH_RH10_GAM_MMIOH_OVERLAY_CONFIG1 */ +/* ========================================================================= */ +#define UVH_RH10_GAM_MMIOH_OVERLAY_CONFIG1 ( \ + is_uv(UV5) ? 0x474000UL : \ + 0) + + +/* UVYH common defines */ +#define UVYH_RH10_GAM_MMIOH_OVERLAY_CONFIG1_BASE_SHFT 26 +#define UVYH_RH10_GAM_MMIOH_OVERLAY_CONFIG1_BASE_MASK 0x000ffffffc000000UL +#define UVYH_RH10_GAM_MMIOH_OVERLAY_CONFIG1_M_IO_SHFT 52 +#define UVYH_RH10_GAM_MMIOH_OVERLAY_CONFIG1_M_IO_MASK 0x03f0000000000000UL +#define UVYH_RH10_GAM_MMIOH_OVERLAY_CONFIG1_ENABLE_SHFT 63 +#define UVYH_RH10_GAM_MMIOH_OVERLAY_CONFIG1_ENABLE_MASK 0x8000000000000000UL + +#define UVH_RH10_GAM_MMIOH_OVERLAY_CONFIG1_BASE_MASK ( \ + is_uv(UV5) ? 0x000ffffffc000000UL : \ + 0) +#define UVH_RH10_GAM_MMIOH_OVERLAY_CONFIG1_BASE_SHFT ( \ + is_uv(UV5) ? 26 : \ + -1) + +union uvh_rh10_gam_mmioh_overlay_config1_u { + unsigned long v; + + /* UVH common struct */ + struct uvh_rh10_gam_mmioh_overlay_config1_s { + unsigned long rsvd_0_25:26; + unsigned long base:26; /* RW */ + unsigned long m_io:6; /* RW */ + unsigned long n_io:4; + unsigned long undef_62:1; /* Undefined */ + unsigned long enable:1; /* RW */ + } s; + + /* UVYH common struct */ + struct uvyh_rh10_gam_mmioh_overlay_config1_s { + unsigned long rsvd_0_25:26; + unsigned long base:26; /* RW */ + unsigned long m_io:6; /* RW */ + unsigned long n_io:4; + unsigned long undef_62:1; /* Undefined */ + unsigned long enable:1; /* RW */ + } sy; + + /* UV5 unique struct */ + struct uv5h_rh10_gam_mmioh_overlay_config1_s { + unsigned long rsvd_0_25:26; + unsigned long base:26; /* RW */ + unsigned long m_io:6; /* RW */ + unsigned long n_io:4; + unsigned long undef_62:1; /* Undefined */ + unsigned long enable:1; /* RW */ + } s5; +}; + +/* ========================================================================= */ +/* UVH_RH10_GAM_MMIOH_REDIRECT_CONFIG0 */ +/* ========================================================================= */ +#define UVH_RH10_GAM_MMIOH_REDIRECT_CONFIG0 ( \ + is_uv(UV5) ? 0x473800UL : \ + 0) + +#define UVH_RH10_GAM_MMIOH_REDIRECT_CONFIG0_DEPTH ( \ + is_uv(UV5) ? 128 : \ + 0) + + +/* UVYH common defines */ +#define UVYH_RH10_GAM_MMIOH_REDIRECT_CONFIG0_NASID_SHFT 0 +#define UVYH_RH10_GAM_MMIOH_REDIRECT_CONFIG0_NASID_MASK 0x000000000000007fUL + + +union uvh_rh10_gam_mmioh_redirect_config0_u { + unsigned long v; + + /* UVH common struct */ + struct uvh_rh10_gam_mmioh_redirect_config0_s { + unsigned long nasid:7; /* RW */ + unsigned long rsvd_7_63:57; + } s; + + /* UVYH common struct */ + struct uvyh_rh10_gam_mmioh_redirect_config0_s { + unsigned long nasid:7; /* RW */ + unsigned long rsvd_7_63:57; + } sy; + + /* UV5 unique struct */ + struct uv5h_rh10_gam_mmioh_redirect_config0_s { + unsigned long nasid:7; /* RW */ + unsigned long rsvd_7_63:57; + } s5; +}; + +/* ========================================================================= */ +/* UVH_RH10_GAM_MMIOH_REDIRECT_CONFIG1 */ +/* ========================================================================= */ +#define UVH_RH10_GAM_MMIOH_REDIRECT_CONFIG1 ( \ + is_uv(UV5) ? 0x474800UL : \ + 0) + +#define UVH_RH10_GAM_MMIOH_REDIRECT_CONFIG1_DEPTH ( \ + is_uv(UV5) ? 128 : \ + 0) + + +/* UVYH common defines */ +#define UVYH_RH10_GAM_MMIOH_REDIRECT_CONFIG1_NASID_SHFT 0 +#define UVYH_RH10_GAM_MMIOH_REDIRECT_CONFIG1_NASID_MASK 0x000000000000007fUL + + +union uvh_rh10_gam_mmioh_redirect_config1_u { + unsigned long v; + + /* UVH common struct */ + struct uvh_rh10_gam_mmioh_redirect_config1_s { + unsigned long nasid:7; /* RW */ + unsigned long rsvd_7_63:57; + } s; + + /* UVYH common struct */ + struct uvyh_rh10_gam_mmioh_redirect_config1_s { + unsigned long nasid:7; /* RW */ + unsigned long rsvd_7_63:57; + } sy; + + /* UV5 unique struct */ + struct uv5h_rh10_gam_mmioh_redirect_config1_s { + unsigned long nasid:7; /* RW */ + unsigned long rsvd_7_63:57; + } s5; +}; + +/* ========================================================================= */ +/* UVH_RH10_GAM_MMR_OVERLAY_CONFIG */ +/* ========================================================================= */ +#define UVH_RH10_GAM_MMR_OVERLAY_CONFIG ( \ + is_uv(UV5) ? 0x470090UL : \ + 0) + + +/* UVYH common defines */ +#define UVYH_RH10_GAM_MMR_OVERLAY_CONFIG_BASE_SHFT 25 +#define UVYH_RH10_GAM_MMR_OVERLAY_CONFIG_BASE_MASK 0x000ffffffe000000UL +#define UVYH_RH10_GAM_MMR_OVERLAY_CONFIG_ENABLE_SHFT 63 +#define UVYH_RH10_GAM_MMR_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL + +#define UVH_RH10_GAM_MMR_OVERLAY_CONFIG_BASE_MASK ( \ + is_uv(UV5) ? 0x000ffffffe000000UL : \ + 0) +#define UVH_RH10_GAM_MMR_OVERLAY_CONFIG_BASE_SHFT ( \ + is_uv(UV5) ? 25 : \ + -1) + +union uvh_rh10_gam_mmr_overlay_config_u { + unsigned long v; + + /* UVH common struct */ + struct uvh_rh10_gam_mmr_overlay_config_s { + unsigned long undef_0_24:25; /* Undefined */ + unsigned long base:27; /* RW */ + unsigned long undef_52_62:11; /* Undefined */ + unsigned long enable:1; /* RW */ + } s; + + /* UVYH common struct */ + struct uvyh_rh10_gam_mmr_overlay_config_s { + unsigned long undef_0_24:25; /* Undefined */ + unsigned long base:27; /* RW */ + unsigned long undef_52_62:11; /* Undefined */ + unsigned long enable:1; /* RW */ + } sy; + + /* UV5 unique struct */ + struct uv5h_rh10_gam_mmr_overlay_config_s { + unsigned long undef_0_24:25; /* Undefined */ + unsigned long base:27; /* RW */ + unsigned long undef_52_62:11; /* Undefined */ + unsigned long enable:1; /* RW */ + } s5; +}; + +/* ========================================================================= */ +/* UVH_RH_GAM_ADDR_MAP_CONFIG */ +/* ========================================================================= */ +#define UVH_RH_GAM_ADDR_MAP_CONFIG ( \ + is_uv(UV4) ? 0x480000UL : \ + is_uv(UV3) ? 0x1600000UL : \ + is_uv(UV2) ? 0x1600000UL : \ + 0) + + +/* UVXH common defines */ +#define UVXH_RH_GAM_ADDR_MAP_CONFIG_N_SKT_SHFT 6 +#define UVXH_RH_GAM_ADDR_MAP_CONFIG_N_SKT_MASK 0x00000000000003c0UL + +/* UV3 unique defines */ +#define UV3H_RH_GAM_ADDR_MAP_CONFIG_M_SKT_SHFT 0 +#define UV3H_RH_GAM_ADDR_MAP_CONFIG_M_SKT_MASK 0x000000000000003fUL + +/* UV2 unique defines */ +#define UV2H_RH_GAM_ADDR_MAP_CONFIG_M_SKT_SHFT 0 +#define UV2H_RH_GAM_ADDR_MAP_CONFIG_M_SKT_MASK 0x000000000000003fUL + + +union uvh_rh_gam_addr_map_config_u { + unsigned long v; + + /* UVH common struct */ + struct uvh_rh_gam_addr_map_config_s { + unsigned long rsvd_0_5:6; + unsigned long n_skt:4; /* RW */ + unsigned long rsvd_10_63:54; + } s; + + /* UVXH common struct */ + struct uvxh_rh_gam_addr_map_config_s { + unsigned long rsvd_0_5:6; + unsigned long n_skt:4; /* RW */ + unsigned long rsvd_10_63:54; + } sx; + + /* UV4 unique struct */ + struct uv4h_rh_gam_addr_map_config_s { + unsigned long rsvd_0_5:6; + unsigned long n_skt:4; /* RW */ + unsigned long rsvd_10_63:54; } s4; + + /* UV3 unique struct */ + struct uv3h_rh_gam_addr_map_config_s { + unsigned long m_skt:6; /* RW */ + unsigned long n_skt:4; /* RW */ + unsigned long rsvd_10_63:54; + } s3; + + /* UV2 unique struct */ + struct uv2h_rh_gam_addr_map_config_s { + unsigned long m_skt:6; /* RW */ + unsigned long n_skt:4; /* RW */ + unsigned long rsvd_10_63:54; + } s2; }; /* ========================================================================= */ -/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR */ +/* UVH_RH_GAM_ALIAS_0_OVERLAY_CONFIG */ /* ========================================================================= */ -#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR 0x16000d8UL -#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR 0x16000d8UL -#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR 0x4800d8UL -#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR ( \ - is_uv2_hub() ? UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR : \ - is_uv3_hub() ? UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR : \ - /*is_uv4_hub*/ UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR) - -#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24 -#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48 -#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_SHFT 63 -#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_MASK 0x00000000ff000000UL -#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_MASK 0x001f000000000000UL -#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_MASK 0x8000000000000000UL - -#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24 -#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48 -#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_SHFT 63 -#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_MASK 0x00000000ff000000UL -#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_MASK 0x001f000000000000UL -#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_MASK 0x8000000000000000UL - -#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24 -#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48 -#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_SHFT 63 -#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_MASK 0x00000000ff000000UL -#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_MASK 0x001f000000000000UL -#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_MASK 0x8000000000000000UL - -#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24 -#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48 -#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_SHFT 63 -#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_MASK 0x00000000ff000000UL -#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_MASK 0x001f000000000000UL -#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_MASK 0x8000000000000000UL - -#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24 -#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48 -#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_SHFT 63 -#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_MASK 0x00000000ff000000UL -#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_MASK 0x001f000000000000UL -#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_MASK 0x8000000000000000UL - - -union uvh_rh_gam_alias210_overlay_config_1_mmr_u { +#define UVH_RH_GAM_ALIAS_0_OVERLAY_CONFIG ( \ + is_uv(UV4) ? 0x4800c8UL : \ + is_uv(UV3) ? 0x16000c8UL : \ + is_uv(UV2) ? 0x16000c8UL : \ + 0) + + +/* UVXH common defines */ +#define UVXH_RH_GAM_ALIAS_0_OVERLAY_CONFIG_BASE_SHFT 24 +#define UVXH_RH_GAM_ALIAS_0_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL +#define UVXH_RH_GAM_ALIAS_0_OVERLAY_CONFIG_M_ALIAS_SHFT 48 +#define UVXH_RH_GAM_ALIAS_0_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL +#define UVXH_RH_GAM_ALIAS_0_OVERLAY_CONFIG_ENABLE_SHFT 63 +#define UVXH_RH_GAM_ALIAS_0_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL + + +union uvh_rh_gam_alias_0_overlay_config_u { unsigned long v; - struct uvh_rh_gam_alias210_overlay_config_1_mmr_s { + + /* UVH common struct */ + struct uvh_rh_gam_alias_0_overlay_config_s { unsigned long rsvd_0_23:24; unsigned long base:8; /* RW */ unsigned long rsvd_32_47:16; @@ -2655,7 +3465,9 @@ union uvh_rh_gam_alias210_overlay_config_1_mmr_u { unsigned long rsvd_53_62:10; unsigned long enable:1; /* RW */ } s; - struct uvxh_rh_gam_alias210_overlay_config_1_mmr_s { + + /* UVXH common struct */ + struct uvxh_rh_gam_alias_0_overlay_config_s { unsigned long rsvd_0_23:24; unsigned long base:8; /* RW */ unsigned long rsvd_32_47:16; @@ -2663,15 +3475,19 @@ union uvh_rh_gam_alias210_overlay_config_1_mmr_u { unsigned long rsvd_53_62:10; unsigned long enable:1; /* RW */ } sx; - struct uv2h_rh_gam_alias210_overlay_config_1_mmr_s { + + /* UV4 unique struct */ + struct uv4h_rh_gam_alias_0_overlay_config_s { unsigned long rsvd_0_23:24; unsigned long base:8; /* RW */ unsigned long rsvd_32_47:16; unsigned long m_alias:5; /* RW */ unsigned long rsvd_53_62:10; unsigned long enable:1; /* RW */ - } s2; - struct uv3h_rh_gam_alias210_overlay_config_1_mmr_s { + } s4; + + /* UV3 unique struct */ + struct uv3h_rh_gam_alias_0_overlay_config_s { unsigned long rsvd_0_23:24; unsigned long base:8; /* RW */ unsigned long rsvd_32_47:16; @@ -2679,66 +3495,96 @@ union uvh_rh_gam_alias210_overlay_config_1_mmr_u { unsigned long rsvd_53_62:10; unsigned long enable:1; /* RW */ } s3; - struct uv4h_rh_gam_alias210_overlay_config_1_mmr_s { + + /* UV2 unique struct */ + struct uv2h_rh_gam_alias_0_overlay_config_s { unsigned long rsvd_0_23:24; unsigned long base:8; /* RW */ unsigned long rsvd_32_47:16; unsigned long m_alias:5; /* RW */ unsigned long rsvd_53_62:10; unsigned long enable:1; /* RW */ + } s2; +}; + +/* ========================================================================= */ +/* UVH_RH_GAM_ALIAS_0_REDIRECT_CONFIG */ +/* ========================================================================= */ +#define UVH_RH_GAM_ALIAS_0_REDIRECT_CONFIG ( \ + is_uv(UV4) ? 0x4800d0UL : \ + is_uv(UV3) ? 0x16000d0UL : \ + is_uv(UV2) ? 0x16000d0UL : \ + 0) + + +/* UVXH common defines */ +#define UVXH_RH_GAM_ALIAS_0_REDIRECT_CONFIG_DEST_BASE_SHFT 24 +#define UVXH_RH_GAM_ALIAS_0_REDIRECT_CONFIG_DEST_BASE_MASK 0x00003fffff000000UL + + +union uvh_rh_gam_alias_0_redirect_config_u { + unsigned long v; + + /* UVH common struct */ + struct uvh_rh_gam_alias_0_redirect_config_s { + unsigned long rsvd_0_23:24; + unsigned long dest_base:22; /* RW */ + unsigned long rsvd_46_63:18; + } s; + + /* UVXH common struct */ + struct uvxh_rh_gam_alias_0_redirect_config_s { + unsigned long rsvd_0_23:24; + unsigned long dest_base:22; /* RW */ + unsigned long rsvd_46_63:18; + } sx; + + /* UV4 unique struct */ + struct uv4h_rh_gam_alias_0_redirect_config_s { + unsigned long rsvd_0_23:24; + unsigned long dest_base:22; /* RW */ + unsigned long rsvd_46_63:18; } s4; + + /* UV3 unique struct */ + struct uv3h_rh_gam_alias_0_redirect_config_s { + unsigned long rsvd_0_23:24; + unsigned long dest_base:22; /* RW */ + unsigned long rsvd_46_63:18; + } s3; + + /* UV2 unique struct */ + struct uv2h_rh_gam_alias_0_redirect_config_s { + unsigned long rsvd_0_23:24; + unsigned long dest_base:22; /* RW */ + unsigned long rsvd_46_63:18; + } s2; }; /* ========================================================================= */ -/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR */ +/* UVH_RH_GAM_ALIAS_1_OVERLAY_CONFIG */ /* ========================================================================= */ -#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR 0x16000e8UL -#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR 0x16000e8UL -#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR 0x4800e8UL -#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR ( \ - is_uv2_hub() ? UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR : \ - is_uv3_hub() ? UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR : \ - /*is_uv4_hub*/ UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR) - -#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24 -#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48 -#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_SHFT 63 -#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_MASK 0x00000000ff000000UL -#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_MASK 0x001f000000000000UL -#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_MASK 0x8000000000000000UL - -#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24 -#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48 -#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_SHFT 63 -#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_MASK 0x00000000ff000000UL -#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_MASK 0x001f000000000000UL -#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_MASK 0x8000000000000000UL - -#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24 -#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48 -#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_SHFT 63 -#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_MASK 0x00000000ff000000UL -#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_MASK 0x001f000000000000UL -#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_MASK 0x8000000000000000UL - -#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24 -#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48 -#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_SHFT 63 -#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_MASK 0x00000000ff000000UL -#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_MASK 0x001f000000000000UL -#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_MASK 0x8000000000000000UL - -#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24 -#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48 -#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_SHFT 63 -#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_MASK 0x00000000ff000000UL -#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_MASK 0x001f000000000000UL -#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_MASK 0x8000000000000000UL - - -union uvh_rh_gam_alias210_overlay_config_2_mmr_u { +#define UVH_RH_GAM_ALIAS_1_OVERLAY_CONFIG ( \ + is_uv(UV4) ? 0x4800d8UL : \ + is_uv(UV3) ? 0x16000d8UL : \ + is_uv(UV2) ? 0x16000d8UL : \ + 0) + + +/* UVXH common defines */ +#define UVXH_RH_GAM_ALIAS_1_OVERLAY_CONFIG_BASE_SHFT 24 +#define UVXH_RH_GAM_ALIAS_1_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL +#define UVXH_RH_GAM_ALIAS_1_OVERLAY_CONFIG_M_ALIAS_SHFT 48 +#define UVXH_RH_GAM_ALIAS_1_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL +#define UVXH_RH_GAM_ALIAS_1_OVERLAY_CONFIG_ENABLE_SHFT 63 +#define UVXH_RH_GAM_ALIAS_1_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL + + +union uvh_rh_gam_alias_1_overlay_config_u { unsigned long v; - struct uvh_rh_gam_alias210_overlay_config_2_mmr_s { + + /* UVH common struct */ + struct uvh_rh_gam_alias_1_overlay_config_s { unsigned long rsvd_0_23:24; unsigned long base:8; /* RW */ unsigned long rsvd_32_47:16; @@ -2746,7 +3592,9 @@ union uvh_rh_gam_alias210_overlay_config_2_mmr_u { unsigned long rsvd_53_62:10; unsigned long enable:1; /* RW */ } s; - struct uvxh_rh_gam_alias210_overlay_config_2_mmr_s { + + /* UVXH common struct */ + struct uvxh_rh_gam_alias_1_overlay_config_s { unsigned long rsvd_0_23:24; unsigned long base:8; /* RW */ unsigned long rsvd_32_47:16; @@ -2754,15 +3602,19 @@ union uvh_rh_gam_alias210_overlay_config_2_mmr_u { unsigned long rsvd_53_62:10; unsigned long enable:1; /* RW */ } sx; - struct uv2h_rh_gam_alias210_overlay_config_2_mmr_s { + + /* UV4 unique struct */ + struct uv4h_rh_gam_alias_1_overlay_config_s { unsigned long rsvd_0_23:24; unsigned long base:8; /* RW */ unsigned long rsvd_32_47:16; unsigned long m_alias:5; /* RW */ unsigned long rsvd_53_62:10; unsigned long enable:1; /* RW */ - } s2; - struct uv3h_rh_gam_alias210_overlay_config_2_mmr_s { + } s4; + + /* UV3 unique struct */ + struct uv3h_rh_gam_alias_1_overlay_config_s { unsigned long rsvd_0_23:24; unsigned long base:8; /* RW */ unsigned long rsvd_32_47:16; @@ -2770,321 +3622,289 @@ union uvh_rh_gam_alias210_overlay_config_2_mmr_u { unsigned long rsvd_53_62:10; unsigned long enable:1; /* RW */ } s3; - struct uv4h_rh_gam_alias210_overlay_config_2_mmr_s { + + /* UV2 unique struct */ + struct uv2h_rh_gam_alias_1_overlay_config_s { unsigned long rsvd_0_23:24; unsigned long base:8; /* RW */ unsigned long rsvd_32_47:16; unsigned long m_alias:5; /* RW */ unsigned long rsvd_53_62:10; unsigned long enable:1; /* RW */ - } s4; + } s2; }; /* ========================================================================= */ -/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR */ +/* UVH_RH_GAM_ALIAS_1_REDIRECT_CONFIG */ /* ========================================================================= */ -#define UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL -#define UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL -#define UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x4800d0UL -#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR ( \ - is_uv2_hub() ? UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR : \ - is_uv3_hub() ? UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR : \ - /*is_uv4_hub*/ UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR) - -#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 24 -#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_MASK 0x00003fffff000000UL - -#define UVXH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 24 -#define UVXH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_MASK 0x00003fffff000000UL - -#define UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 24 -#define UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_MASK 0x00003fffff000000UL +#define UVH_RH_GAM_ALIAS_1_REDIRECT_CONFIG ( \ + is_uv(UV4) ? 0x4800e0UL : \ + is_uv(UV3) ? 0x16000e0UL : \ + is_uv(UV2) ? 0x16000e0UL : \ + 0) -#define UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 24 -#define UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_MASK 0x00003fffff000000UL -#define UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 24 -#define UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_MASK 0x00003fffff000000UL +/* UVXH common defines */ +#define UVXH_RH_GAM_ALIAS_1_REDIRECT_CONFIG_DEST_BASE_SHFT 24 +#define UVXH_RH_GAM_ALIAS_1_REDIRECT_CONFIG_DEST_BASE_MASK 0x00003fffff000000UL -union uvh_rh_gam_alias210_redirect_config_0_mmr_u { +union uvh_rh_gam_alias_1_redirect_config_u { unsigned long v; - struct uvh_rh_gam_alias210_redirect_config_0_mmr_s { + + /* UVH common struct */ + struct uvh_rh_gam_alias_1_redirect_config_s { unsigned long rsvd_0_23:24; unsigned long dest_base:22; /* RW */ unsigned long rsvd_46_63:18; } s; - struct uvxh_rh_gam_alias210_redirect_config_0_mmr_s { + + /* UVXH common struct */ + struct uvxh_rh_gam_alias_1_redirect_config_s { unsigned long rsvd_0_23:24; unsigned long dest_base:22; /* RW */ unsigned long rsvd_46_63:18; } sx; - struct uv2h_rh_gam_alias210_redirect_config_0_mmr_s { + + /* UV4 unique struct */ + struct uv4h_rh_gam_alias_1_redirect_config_s { unsigned long rsvd_0_23:24; unsigned long dest_base:22; /* RW */ unsigned long rsvd_46_63:18; - } s2; - struct uv3h_rh_gam_alias210_redirect_config_0_mmr_s { + } s4; + + /* UV3 unique struct */ + struct uv3h_rh_gam_alias_1_redirect_config_s { unsigned long rsvd_0_23:24; unsigned long dest_base:22; /* RW */ unsigned long rsvd_46_63:18; } s3; - struct uv4h_rh_gam_alias210_redirect_config_0_mmr_s { + + /* UV2 unique struct */ + struct uv2h_rh_gam_alias_1_redirect_config_s { unsigned long rsvd_0_23:24; unsigned long dest_base:22; /* RW */ unsigned long rsvd_46_63:18; - } s4; + } s2; }; /* ========================================================================= */ -/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR */ +/* UVH_RH_GAM_ALIAS_2_OVERLAY_CONFIG */ /* ========================================================================= */ -#define UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR 0x16000e0UL -#define UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR 0x16000e0UL -#define UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR 0x4800e0UL -#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR ( \ - is_uv2_hub() ? UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR : \ - is_uv3_hub() ? UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR : \ - /*is_uv4_hub*/ UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR) - -#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_SHFT 24 -#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_MASK 0x00003fffff000000UL - -#define UVXH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_SHFT 24 -#define UVXH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_MASK 0x00003fffff000000UL +#define UVH_RH_GAM_ALIAS_2_OVERLAY_CONFIG ( \ + is_uv(UV4) ? 0x4800e8UL : \ + is_uv(UV3) ? 0x16000e8UL : \ + is_uv(UV2) ? 0x16000e8UL : \ + 0) -#define UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_SHFT 24 -#define UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_MASK 0x00003fffff000000UL -#define UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_SHFT 24 -#define UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_MASK 0x00003fffff000000UL +/* UVXH common defines */ +#define UVXH_RH_GAM_ALIAS_2_OVERLAY_CONFIG_BASE_SHFT 24 +#define UVXH_RH_GAM_ALIAS_2_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL +#define UVXH_RH_GAM_ALIAS_2_OVERLAY_CONFIG_M_ALIAS_SHFT 48 +#define UVXH_RH_GAM_ALIAS_2_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL +#define UVXH_RH_GAM_ALIAS_2_OVERLAY_CONFIG_ENABLE_SHFT 63 +#define UVXH_RH_GAM_ALIAS_2_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL -#define UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_SHFT 24 -#define UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_MASK 0x00003fffff000000UL - -union uvh_rh_gam_alias210_redirect_config_1_mmr_u { +union uvh_rh_gam_alias_2_overlay_config_u { unsigned long v; - struct uvh_rh_gam_alias210_redirect_config_1_mmr_s { + + /* UVH common struct */ + struct uvh_rh_gam_alias_2_overlay_config_s { unsigned long rsvd_0_23:24; - unsigned long dest_base:22; /* RW */ - unsigned long rsvd_46_63:18; + unsigned long base:8; /* RW */ + unsigned long rsvd_32_47:16; + unsigned long m_alias:5; /* RW */ + unsigned long rsvd_53_62:10; + unsigned long enable:1; /* RW */ } s; - struct uvxh_rh_gam_alias210_redirect_config_1_mmr_s { + + /* UVXH common struct */ + struct uvxh_rh_gam_alias_2_overlay_config_s { unsigned long rsvd_0_23:24; - unsigned long dest_base:22; /* RW */ - unsigned long rsvd_46_63:18; + unsigned long base:8; /* RW */ + unsigned long rsvd_32_47:16; + unsigned long m_alias:5; /* RW */ + unsigned long rsvd_53_62:10; + unsigned long enable:1; /* RW */ } sx; - struct uv2h_rh_gam_alias210_redirect_config_1_mmr_s { + + /* UV4 unique struct */ + struct uv4h_rh_gam_alias_2_overlay_config_s { unsigned long rsvd_0_23:24; - unsigned long dest_base:22; /* RW */ - unsigned long rsvd_46_63:18; - } s2; - struct uv3h_rh_gam_alias210_redirect_config_1_mmr_s { + unsigned long base:8; /* RW */ + unsigned long rsvd_32_47:16; + unsigned long m_alias:5; /* RW */ + unsigned long rsvd_53_62:10; + unsigned long enable:1; /* RW */ + } s4; + + /* UV3 unique struct */ + struct uv3h_rh_gam_alias_2_overlay_config_s { unsigned long rsvd_0_23:24; - unsigned long dest_base:22; /* RW */ - unsigned long rsvd_46_63:18; + unsigned long base:8; /* RW */ + unsigned long rsvd_32_47:16; + unsigned long m_alias:5; /* RW */ + unsigned long rsvd_53_62:10; + unsigned long enable:1; /* RW */ } s3; - struct uv4h_rh_gam_alias210_redirect_config_1_mmr_s { + + /* UV2 unique struct */ + struct uv2h_rh_gam_alias_2_overlay_config_s { unsigned long rsvd_0_23:24; - unsigned long dest_base:22; /* RW */ - unsigned long rsvd_46_63:18; - } s4; + unsigned long base:8; /* RW */ + unsigned long rsvd_32_47:16; + unsigned long m_alias:5; /* RW */ + unsigned long rsvd_53_62:10; + unsigned long enable:1; /* RW */ + } s2; }; /* ========================================================================= */ -/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR */ +/* UVH_RH_GAM_ALIAS_2_REDIRECT_CONFIG */ /* ========================================================================= */ -#define UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR 0x16000f0UL -#define UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR 0x16000f0UL -#define UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR 0x4800f0UL -#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR ( \ - is_uv2_hub() ? UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR : \ - is_uv3_hub() ? UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR : \ - /*is_uv4_hub*/ UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR) - -#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_SHFT 24 -#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_MASK 0x00003fffff000000UL - -#define UVXH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_SHFT 24 -#define UVXH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_MASK 0x00003fffff000000UL - -#define UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_SHFT 24 -#define UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_MASK 0x00003fffff000000UL +#define UVH_RH_GAM_ALIAS_2_REDIRECT_CONFIG ( \ + is_uv(UV4) ? 0x4800f0UL : \ + is_uv(UV3) ? 0x16000f0UL : \ + is_uv(UV2) ? 0x16000f0UL : \ + 0) -#define UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_SHFT 24 -#define UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_MASK 0x00003fffff000000UL -#define UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_SHFT 24 -#define UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_MASK 0x00003fffff000000UL +/* UVXH common defines */ +#define UVXH_RH_GAM_ALIAS_2_REDIRECT_CONFIG_DEST_BASE_SHFT 24 +#define UVXH_RH_GAM_ALIAS_2_REDIRECT_CONFIG_DEST_BASE_MASK 0x00003fffff000000UL -union uvh_rh_gam_alias210_redirect_config_2_mmr_u { +union uvh_rh_gam_alias_2_redirect_config_u { unsigned long v; - struct uvh_rh_gam_alias210_redirect_config_2_mmr_s { + + /* UVH common struct */ + struct uvh_rh_gam_alias_2_redirect_config_s { unsigned long rsvd_0_23:24; unsigned long dest_base:22; /* RW */ unsigned long rsvd_46_63:18; } s; - struct uvxh_rh_gam_alias210_redirect_config_2_mmr_s { + + /* UVXH common struct */ + struct uvxh_rh_gam_alias_2_redirect_config_s { unsigned long rsvd_0_23:24; unsigned long dest_base:22; /* RW */ unsigned long rsvd_46_63:18; } sx; - struct uv2h_rh_gam_alias210_redirect_config_2_mmr_s { + + /* UV4 unique struct */ + struct uv4h_rh_gam_alias_2_redirect_config_s { unsigned long rsvd_0_23:24; unsigned long dest_base:22; /* RW */ unsigned long rsvd_46_63:18; - } s2; - struct uv3h_rh_gam_alias210_redirect_config_2_mmr_s { + } s4; + + /* UV3 unique struct */ + struct uv3h_rh_gam_alias_2_redirect_config_s { unsigned long rsvd_0_23:24; unsigned long dest_base:22; /* RW */ unsigned long rsvd_46_63:18; } s3; - struct uv4h_rh_gam_alias210_redirect_config_2_mmr_s { + + /* UV2 unique struct */ + struct uv2h_rh_gam_alias_2_redirect_config_s { unsigned long rsvd_0_23:24; unsigned long dest_base:22; /* RW */ unsigned long rsvd_46_63:18; - } s4; -}; - -/* ========================================================================= */ -/* UVH_RH_GAM_CONFIG_MMR */ -/* ========================================================================= */ -#define UV2H_RH_GAM_CONFIG_MMR 0x1600000UL -#define UV3H_RH_GAM_CONFIG_MMR 0x1600000UL -#define UV4H_RH_GAM_CONFIG_MMR 0x480000UL -#define UVH_RH_GAM_CONFIG_MMR ( \ - is_uv2_hub() ? UV2H_RH_GAM_CONFIG_MMR : \ - is_uv3_hub() ? UV3H_RH_GAM_CONFIG_MMR : \ - /*is_uv4_hub*/ UV4H_RH_GAM_CONFIG_MMR) - -#define UVH_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6 -#define UVH_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL - -#define UVXH_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6 -#define UVXH_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL - -#define UV2H_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0 -#define UV2H_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6 -#define UV2H_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL -#define UV2H_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL - -#define UV3H_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0 -#define UV3H_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6 -#define UV3H_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL -#define UV3H_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL - -#define UV4H_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6 -#define UV4H_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL - - -union uvh_rh_gam_config_mmr_u { - unsigned long v; - struct uvh_rh_gam_config_mmr_s { - unsigned long rsvd_0_5:6; - unsigned long n_skt:4; /* RW */ - unsigned long rsvd_10_63:54; - } s; - struct uvxh_rh_gam_config_mmr_s { - unsigned long rsvd_0_5:6; - unsigned long n_skt:4; /* RW */ - unsigned long rsvd_10_63:54; - } sx; - struct uv2h_rh_gam_config_mmr_s { - unsigned long m_skt:6; /* RW */ - unsigned long n_skt:4; /* RW */ - unsigned long rsvd_10_63:54; } s2; - struct uv3h_rh_gam_config_mmr_s { - unsigned long m_skt:6; /* RW */ - unsigned long n_skt:4; /* RW */ - unsigned long rsvd_10_63:54; - } s3; - struct uv4h_rh_gam_config_mmr_s { - unsigned long rsvd_0_5:6; - unsigned long n_skt:4; /* RW */ - unsigned long rsvd_10_63:54; - } s4; }; /* ========================================================================= */ -/* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */ +/* UVH_RH_GAM_GRU_OVERLAY_CONFIG */ /* ========================================================================= */ -#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL -#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL -#define UV4H_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x480010UL -#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR ( \ - is_uv2_hub() ? UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR : \ - is_uv3_hub() ? UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR : \ - /*is_uv4_hub*/ UV4H_RH_GAM_GRU_OVERLAY_CONFIG_MMR) - -#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52 -#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63 -#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL -#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL - -#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52 -#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63 -#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL -#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL - -#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28 -#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52 -#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63 -#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL -#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL -#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL - -#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28 -#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52 -#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_MODE_SHFT 62 -#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63 -#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL -#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL -#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_MODE_MASK 0x4000000000000000UL -#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL - -#define UV4H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 26 -#define UV4H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52 -#define UV4H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63 -#define UV4H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL -#define UV4H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL -#define UV4H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL - -#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK ( \ - is_uv2_hub() ? UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK : \ - is_uv3_hub() ? UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK : \ - /*is_uv4_hub*/ UV4H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK) -#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT ( \ - is_uv2_hub() ? UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT : \ - is_uv3_hub() ? UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT : \ - /*is_uv4_hub*/ UV4H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT) - -union uvh_rh_gam_gru_overlay_config_mmr_u { +#define UVH_RH_GAM_GRU_OVERLAY_CONFIG ( \ + is_uv(UV4) ? 0x480010UL : \ + is_uv(UV3) ? 0x1600010UL : \ + is_uv(UV2) ? 0x1600010UL : \ + 0) + + +/* UVXH common defines */ +#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_N_GRU_SHFT 52 +#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_N_GRU_MASK 0x00f0000000000000UL +#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_ENABLE_SHFT 63 +#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL + +/* UV4A unique defines */ +#define UV4AH_RH_GAM_GRU_OVERLAY_CONFIG_BASE_SHFT 26 +#define UV4AH_RH_GAM_GRU_OVERLAY_CONFIG_BASE_MASK 0x000ffffffc000000UL + +/* UV4 unique defines */ +#define UV4H_RH_GAM_GRU_OVERLAY_CONFIG_BASE_SHFT 26 +#define UV4H_RH_GAM_GRU_OVERLAY_CONFIG_BASE_MASK 0x00003ffffc000000UL + +/* UV3 unique defines */ +#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_BASE_SHFT 28 +#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_BASE_MASK 0x00003ffff0000000UL +#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MODE_SHFT 62 +#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MODE_MASK 0x4000000000000000UL + +/* UV2 unique defines */ +#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_BASE_SHFT 28 +#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_BASE_MASK 0x00003ffff0000000UL + +#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_BASE_MASK ( \ + is_uv(UV4A) ? 0x000ffffffc000000UL : \ + is_uv(UV4) ? 0x00003ffffc000000UL : \ + is_uv(UV3) ? 0x00003ffff0000000UL : \ + is_uv(UV2) ? 0x00003ffff0000000UL : \ + 0) +#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_BASE_SHFT ( \ + is_uv(UV4) ? 26 : \ + is_uv(UV3) ? 28 : \ + is_uv(UV2) ? 28 : \ + -1) + +union uvh_rh_gam_gru_overlay_config_u { unsigned long v; - struct uvh_rh_gam_gru_overlay_config_mmr_s { - unsigned long rsvd_0_51:52; + + /* UVH common struct */ + struct uvh_rh_gam_gru_overlay_config_s { + unsigned long rsvd_0_45:46; + unsigned long rsvd_46_51:6; unsigned long n_gru:4; /* RW */ unsigned long rsvd_56_62:7; unsigned long enable:1; /* RW */ } s; - struct uvxh_rh_gam_gru_overlay_config_mmr_s { + + /* UVXH common struct */ + struct uvxh_rh_gam_gru_overlay_config_s { unsigned long rsvd_0_45:46; unsigned long rsvd_46_51:6; unsigned long n_gru:4; /* RW */ unsigned long rsvd_56_62:7; unsigned long enable:1; /* RW */ } sx; - struct uv2h_rh_gam_gru_overlay_config_mmr_s { - unsigned long rsvd_0_27:28; - unsigned long base:18; /* RW */ + + /* UV4A unique struct */ + struct uv4ah_rh_gam_gru_overlay_config_s { + unsigned long rsvd_0_24:25; + unsigned long undef_25:1; /* Undefined */ + unsigned long base:26; /* RW */ + unsigned long n_gru:4; /* RW */ + unsigned long rsvd_56_62:7; + unsigned long enable:1; /* RW */ + } s4a; + + /* UV4 unique struct */ + struct uv4h_rh_gam_gru_overlay_config_s { + unsigned long rsvd_0_24:25; + unsigned long undef_25:1; /* Undefined */ + unsigned long base:20; /* RW */ unsigned long rsvd_46_51:6; unsigned long n_gru:4; /* RW */ unsigned long rsvd_56_62:7; unsigned long enable:1; /* RW */ - } s2; - struct uv3h_rh_gam_gru_overlay_config_mmr_s { + } s4; + + /* UV3 unique struct */ + struct uv3h_rh_gam_gru_overlay_config_s { unsigned long rsvd_0_27:28; unsigned long base:18; /* RW */ unsigned long rsvd_46_51:6; @@ -3093,86 +3913,141 @@ union uvh_rh_gam_gru_overlay_config_mmr_u { unsigned long mode:1; /* RW */ unsigned long enable:1; /* RW */ } s3; - struct uv4h_rh_gam_gru_overlay_config_mmr_s { - unsigned long rsvd_0_24:25; - unsigned long undef_25:1; /* Undefined */ - unsigned long base:20; /* RW */ + + /* UV2 unique struct */ + struct uv2h_rh_gam_gru_overlay_config_s { + unsigned long rsvd_0_27:28; + unsigned long base:18; /* RW */ unsigned long rsvd_46_51:6; unsigned long n_gru:4; /* RW */ unsigned long rsvd_56_62:7; unsigned long enable:1; /* RW */ - } s4; + } s2; }; /* ========================================================================= */ -/* UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR */ +/* UVH_RH_GAM_MMIOH_OVERLAY_CONFIG */ /* ========================================================================= */ -#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR uv_undefined("UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR") -#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR 0x1603000UL -#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR 0x483000UL -#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR ( \ - is_uv2_hub() ? UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR : \ - is_uv3_hub() ? UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR : \ - /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR) - - -#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_SHFT 26 -#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT 46 -#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_SHFT 63 -#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_MASK 0x00003ffffc000000UL -#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_MASK 0x000fc00000000000UL -#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_MASK 0x8000000000000000UL - -#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_SHFT 26 -#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT 46 -#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_SHFT 63 -#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_MASK 0x00003ffffc000000UL -#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_MASK 0x000fc00000000000UL -#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_MASK 0x8000000000000000UL - -#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT 52 -#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_MASK 0x000ffffffc000000UL -#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_MASK 0x03f0000000000000UL -#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_MASK 0x8000000000000000UL - -#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT ( \ - is_uv3_hub() ? UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT : \ - is_uv4a_hub() ? UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT : \ - /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT) - -#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_MASK ( \ - is_uv3_hub() ? UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_MASK : \ - is_uv4a_hub() ? UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_MASK : \ - /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_MASK) - -#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_MASK ( \ - is_uv3_hub() ? UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_MASK : \ - is_uv4a_hub() ? UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_MASK : \ - /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_MASK) - -#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_MASK ( \ - is_uv3_hub() ? UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_MASK : \ - is_uv4a_hub() ? UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_MASK : \ - /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_MASK) - -union uvh_rh_gam_mmioh_overlay_config0_mmr_u { +#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG ( \ + is_uv(UV2) ? 0x1600030UL : \ + 0) + + + +/* UV2 unique defines */ +#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_BASE_SHFT 27 +#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_BASE_MASK 0x00003ffff8000000UL +#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_M_IO_SHFT 46 +#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_M_IO_MASK 0x000fc00000000000UL +#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_N_IO_SHFT 52 +#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_N_IO_MASK 0x00f0000000000000UL +#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_ENABLE_SHFT 63 +#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL + +#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_BASE_SHFT ( \ + is_uv(UV2) ? 27 : \ + uv_undefined("UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_BASE_SHFT")) + +union uvh_rh_gam_mmioh_overlay_config_u { unsigned long v; - struct uv3h_rh_gam_mmioh_overlay_config0_mmr_s { + + /* UVH common struct */ + struct uvh_rh_gam_mmioh_overlay_config_s { + unsigned long rsvd_0_26:27; + unsigned long base:19; /* RW */ + unsigned long m_io:6; /* RW */ + unsigned long n_io:4; /* RW */ + unsigned long rsvd_56_62:7; + unsigned long enable:1; /* RW */ + } s; + + /* UVXH common struct */ + struct uvxh_rh_gam_mmioh_overlay_config_s { + unsigned long rsvd_0_26:27; + unsigned long base:19; /* RW */ + unsigned long m_io:6; /* RW */ + unsigned long n_io:4; /* RW */ + unsigned long rsvd_56_62:7; + unsigned long enable:1; /* RW */ + } sx; + + /* UV2 unique struct */ + struct uv2h_rh_gam_mmioh_overlay_config_s { + unsigned long rsvd_0_26:27; + unsigned long base:19; /* RW */ + unsigned long m_io:6; /* RW */ + unsigned long n_io:4; /* RW */ + unsigned long rsvd_56_62:7; + unsigned long enable:1; /* RW */ + } s2; +}; + +/* ========================================================================= */ +/* UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0 */ +/* ========================================================================= */ +#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0 ( \ + is_uv(UV4) ? 0x483000UL : \ + is_uv(UV3) ? 0x1603000UL : \ + 0) + +/* UV4A unique defines */ +#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_BASE_SHFT 26 +#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_BASE_MASK 0x000ffffffc000000UL +#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_M_IO_SHFT 52 +#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_M_IO_MASK 0x03f0000000000000UL +#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_ENABLE_SHFT 63 +#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_ENABLE_MASK 0x8000000000000000UL + +/* UV4 unique defines */ +#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_BASE_SHFT 26 +#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_BASE_MASK 0x00003ffffc000000UL +#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_M_IO_SHFT 46 +#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_M_IO_MASK 0x000fc00000000000UL +#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_ENABLE_SHFT 63 +#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_ENABLE_MASK 0x8000000000000000UL + +/* UV3 unique defines */ +#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_BASE_SHFT 26 +#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_BASE_MASK 0x00003ffffc000000UL +#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_M_IO_SHFT 46 +#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_M_IO_MASK 0x000fc00000000000UL +#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_ENABLE_SHFT 63 +#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_ENABLE_MASK 0x8000000000000000UL + +#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_BASE_MASK ( \ + is_uv(UV4A) ? 0x000ffffffc000000UL : \ + is_uv(UV4) ? 0x00003ffffc000000UL : \ + is_uv(UV3) ? 0x00003ffffc000000UL : \ + 0) +#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_BASE_SHFT ( \ + is_uv(UV4) ? 26 : \ + is_uv(UV3) ? 26 : \ + -1) + +union uvh_rh_gam_mmioh_overlay_config0_u { + unsigned long v; + + /* UVH common struct */ + struct uvh_rh_gam_mmioh_overlay_config0_s { unsigned long rsvd_0_25:26; unsigned long base:20; /* RW */ unsigned long m_io:6; /* RW */ unsigned long n_io:4; unsigned long rsvd_56_62:7; unsigned long enable:1; /* RW */ - } s3; - struct uv4h_rh_gam_mmioh_overlay_config0_mmr_s { + } s; + + /* UVXH common struct */ + struct uvxh_rh_gam_mmioh_overlay_config0_s { unsigned long rsvd_0_25:26; unsigned long base:20; /* RW */ unsigned long m_io:6; /* RW */ unsigned long n_io:4; unsigned long rsvd_56_62:7; unsigned long enable:1; /* RW */ - } s4; + } sx; + + /* UV4A unique struct */ struct uv4ah_rh_gam_mmioh_overlay_config0_mmr_s { unsigned long rsvd_0_25:26; unsigned long base:26; /* RW */ @@ -3181,71 +4056,94 @@ union uvh_rh_gam_mmioh_overlay_config0_mmr_u { unsigned long undef_62:1; /* Undefined */ unsigned long enable:1; /* RW */ } s4a; + + /* UV4 unique struct */ + struct uv4h_rh_gam_mmioh_overlay_config0_s { + unsigned long rsvd_0_25:26; + unsigned long base:20; /* RW */ + unsigned long m_io:6; /* RW */ + unsigned long n_io:4; + unsigned long rsvd_56_62:7; + unsigned long enable:1; /* RW */ + } s4; + + /* UV3 unique struct */ + struct uv3h_rh_gam_mmioh_overlay_config0_s { + unsigned long rsvd_0_25:26; + unsigned long base:20; /* RW */ + unsigned long m_io:6; /* RW */ + unsigned long n_io:4; + unsigned long rsvd_56_62:7; + unsigned long enable:1; /* RW */ + } s3; }; /* ========================================================================= */ -/* UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR */ +/* UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1 */ /* ========================================================================= */ -#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR uv_undefined("UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR") -#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR 0x1603000UL -#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR 0x484000UL -#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR ( \ - is_uv2_hub() ? UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR : \ - is_uv3_hub() ? UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR : \ - /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR) - - -#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_SHFT 26 -#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT 46 -#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_ENABLE_SHFT 63 -#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_MASK 0x00003ffffc000000UL -#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_MASK 0x000fc00000000000UL -#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_ENABLE_MASK 0x8000000000000000UL - -#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_SHFT 26 -#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT 46 -#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_ENABLE_SHFT 63 -#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_MASK 0x00003ffffc000000UL -#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_MASK 0x000fc00000000000UL -#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_ENABLE_MASK 0x8000000000000000UL - -#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT 52 -#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_MASK 0x000ffffffc000000UL -#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_MASK 0x03f0000000000000UL - -#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT ( \ - is_uv3_hub() ? UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT : \ - is_uv4a_hub() ? UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT : \ - /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT) - -#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_MASK ( \ - is_uv3_hub() ? UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_MASK : \ - is_uv4a_hub() ? UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_MASK : \ - /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_MASK) - -#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_MASK ( \ - is_uv3_hub() ? UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_MASK : \ - is_uv4a_hub() ? UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_MASK : \ - /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_MASK) - -union uvh_rh_gam_mmioh_overlay_config1_mmr_u { +#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1 ( \ + is_uv(UV4) ? 0x484000UL : \ + is_uv(UV3) ? 0x1604000UL : \ + 0) + +/* UV4A unique defines */ +#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG1_BASE_SHFT 26 +#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG1_BASE_MASK 0x000ffffffc000000UL +#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG1_M_IO_SHFT 52 +#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG1_M_IO_MASK 0x03f0000000000000UL +#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG1_ENABLE_SHFT 63 +#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG1_ENABLE_MASK 0x8000000000000000UL + +/* UV4 unique defines */ +#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_BASE_SHFT 26 +#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_BASE_MASK 0x00003ffffc000000UL +#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_M_IO_SHFT 46 +#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_M_IO_MASK 0x000fc00000000000UL +#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_ENABLE_SHFT 63 +#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_ENABLE_MASK 0x8000000000000000UL + +/* UV3 unique defines */ +#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_BASE_SHFT 26 +#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_BASE_MASK 0x00003ffffc000000UL +#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_M_IO_SHFT 46 +#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_M_IO_MASK 0x000fc00000000000UL +#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_ENABLE_SHFT 63 +#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_ENABLE_MASK 0x8000000000000000UL + +#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_BASE_MASK ( \ + is_uv(UV4A) ? 0x000ffffffc000000UL : \ + is_uv(UV4) ? 0x00003ffffc000000UL : \ + is_uv(UV3) ? 0x00003ffffc000000UL : \ + 0) +#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_BASE_SHFT ( \ + is_uv(UV4) ? 26 : \ + is_uv(UV3) ? 26 : \ + -1) + +union uvh_rh_gam_mmioh_overlay_config1_u { unsigned long v; - struct uv3h_rh_gam_mmioh_overlay_config1_mmr_s { + + /* UVH common struct */ + struct uvh_rh_gam_mmioh_overlay_config1_s { unsigned long rsvd_0_25:26; unsigned long base:20; /* RW */ unsigned long m_io:6; /* RW */ unsigned long n_io:4; unsigned long rsvd_56_62:7; unsigned long enable:1; /* RW */ - } s3; - struct uv4h_rh_gam_mmioh_overlay_config1_mmr_s { + } s; + + /* UVXH common struct */ + struct uvxh_rh_gam_mmioh_overlay_config1_s { unsigned long rsvd_0_25:26; unsigned long base:20; /* RW */ unsigned long m_io:6; /* RW */ unsigned long n_io:4; unsigned long rsvd_56_62:7; unsigned long enable:1; /* RW */ - } s4; + } sx; + + /* UV4A unique struct */ struct uv4ah_rh_gam_mmioh_overlay_config1_mmr_s { unsigned long rsvd_0_25:26; unsigned long base:26; /* RW */ @@ -3254,232 +4152,275 @@ union uvh_rh_gam_mmioh_overlay_config1_mmr_u { unsigned long undef_62:1; /* Undefined */ unsigned long enable:1; /* RW */ } s4a; -}; -/* ========================================================================= */ -/* UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR */ -/* ========================================================================= */ -#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR 0x1600030UL -#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR uv_undefined("UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR") -#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR uv_undefined("UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR") -#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR ( \ - is_uv2_hub() ? UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR : \ - is_uv3_hub() ? UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR : \ - /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR) - - -#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT 27 -#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_SHFT 46 -#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_SHFT 52 -#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63 -#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff8000000UL -#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_MASK 0x000fc00000000000UL -#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_MASK 0x00f0000000000000UL -#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL - - -union uvh_rh_gam_mmioh_overlay_config_mmr_u { - unsigned long v; - struct uv2h_rh_gam_mmioh_overlay_config_mmr_s { - unsigned long rsvd_0_26:27; - unsigned long base:19; /* RW */ + /* UV4 unique struct */ + struct uv4h_rh_gam_mmioh_overlay_config1_s { + unsigned long rsvd_0_25:26; + unsigned long base:20; /* RW */ unsigned long m_io:6; /* RW */ - unsigned long n_io:4; /* RW */ + unsigned long n_io:4; unsigned long rsvd_56_62:7; unsigned long enable:1; /* RW */ - } s2; + } s4; + + /* UV3 unique struct */ + struct uv3h_rh_gam_mmioh_overlay_config1_s { + unsigned long rsvd_0_25:26; + unsigned long base:20; /* RW */ + unsigned long m_io:6; /* RW */ + unsigned long n_io:4; + unsigned long rsvd_56_62:7; + unsigned long enable:1; /* RW */ + } s3; }; /* ========================================================================= */ -/* UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR */ +/* UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0 */ /* ========================================================================= */ -#define UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR uv_undefined("UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR") -#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR 0x1603800UL -#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR 0x483800UL -#define UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR ( \ - is_uv2_hub() ? UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR : \ - is_uv3_hub() ? UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR : \ - /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR) +#define UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0 ( \ + is_uv(UV4) ? 0x483800UL : \ + is_uv(UV3) ? 0x1603800UL : \ + 0) -#define UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH uv_undefined("UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH") -#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH 128 -#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH 128 -#define UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH ( \ - is_uv2_hub() ? UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH : \ - is_uv3_hub() ? UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH : \ - /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH) +#define UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0_DEPTH ( \ + is_uv(UV4) ? 128 : \ + is_uv(UV3) ? 128 : \ + 0) +/* UV4A unique defines */ +#define UV4AH_RH_GAM_MMIOH_REDIRECT_CONFIG0_NASID_SHFT 0 +#define UV4AH_RH_GAM_MMIOH_REDIRECT_CONFIG0_NASID_MASK 0x0000000000000fffUL -#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_SHFT 0 -#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_MASK 0x0000000000007fffUL +/* UV4 unique defines */ +#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG0_NASID_SHFT 0 +#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG0_NASID_MASK 0x0000000000007fffUL -#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_SHFT 0 -#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_MASK 0x0000000000007fffUL +/* UV3 unique defines */ +#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_NASID_SHFT 0 +#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_NASID_MASK 0x0000000000007fffUL -#define UV4AH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_MASK 0x0000000000000fffUL -#define UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_MASK ( \ - is_uv3_hub() ? UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_MASK : \ - is_uv4a_hub() ? UV4AH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_MASK : \ - /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_MASK) - -union uvh_rh_gam_mmioh_redirect_config0_mmr_u { +union uvh_rh_gam_mmioh_redirect_config0_u { unsigned long v; - struct uv3h_rh_gam_mmioh_redirect_config0_mmr_s { + + /* UVH common struct */ + struct uvh_rh_gam_mmioh_redirect_config0_s { unsigned long nasid:15; /* RW */ unsigned long rsvd_15_63:49; - } s3; - struct uv4h_rh_gam_mmioh_redirect_config0_mmr_s { + } s; + + /* UVXH common struct */ + struct uvxh_rh_gam_mmioh_redirect_config0_s { unsigned long nasid:15; /* RW */ unsigned long rsvd_15_63:49; - } s4; - struct uv4ah_rh_gam_mmioh_redirect_config0_mmr_s { + } sx; + + struct uv4ah_rh_gam_mmioh_redirect_config0_s { unsigned long nasid:12; /* RW */ unsigned long rsvd_12_63:52; } s4a; + + /* UV4 unique struct */ + struct uv4h_rh_gam_mmioh_redirect_config0_s { + unsigned long nasid:15; /* RW */ + unsigned long rsvd_15_63:49; + } s4; + + /* UV3 unique struct */ + struct uv3h_rh_gam_mmioh_redirect_config0_s { + unsigned long nasid:15; /* RW */ + unsigned long rsvd_15_63:49; + } s3; }; /* ========================================================================= */ -/* UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR */ +/* UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1 */ /* ========================================================================= */ -#define UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR uv_undefined("UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR") -#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR 0x1604800UL -#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR 0x484800UL -#define UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR ( \ - is_uv2_hub() ? UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR : \ - is_uv3_hub() ? UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR : \ - /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR) - -#define UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH uv_undefined("UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH") -#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH 128 -#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH 128 -#define UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH ( \ - is_uv2_hub() ? UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH : \ - is_uv3_hub() ? UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH : \ - /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH) +#define UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1 ( \ + is_uv(UV4) ? 0x484800UL : \ + is_uv(UV3) ? 0x1604800UL : \ + 0) +#define UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1_DEPTH ( \ + is_uv(UV4) ? 128 : \ + is_uv(UV3) ? 128 : \ + 0) -#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_SHFT 0 -#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_MASK 0x0000000000007fffUL +/* UV4A unique defines */ +#define UV4AH_RH_GAM_MMIOH_REDIRECT_CONFIG0_NASID_SHFT 0 +#define UV4AH_RH_GAM_MMIOH_REDIRECT_CONFIG0_NASID_MASK 0x0000000000000fffUL -#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_SHFT 0 -#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_MASK 0x0000000000007fffUL +/* UV4 unique defines */ +#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG1_NASID_SHFT 0 +#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG1_NASID_MASK 0x0000000000007fffUL -#define UV4AH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_MASK 0x0000000000000fffUL +/* UV3 unique defines */ +#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_NASID_SHFT 0 +#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_NASID_MASK 0x0000000000007fffUL -#define UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_MASK ( \ - is_uv3_hub() ? UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_MASK : \ - is_uv4a_hub() ? UV4AH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_MASK : \ - /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_MASK) -union uvh_rh_gam_mmioh_redirect_config1_mmr_u { +union uvh_rh_gam_mmioh_redirect_config1_u { unsigned long v; - struct uv3h_rh_gam_mmioh_redirect_config1_mmr_s { + + /* UVH common struct */ + struct uvh_rh_gam_mmioh_redirect_config1_s { unsigned long nasid:15; /* RW */ unsigned long rsvd_15_63:49; - } s3; - struct uv4h_rh_gam_mmioh_redirect_config1_mmr_s { + } s; + + /* UVXH common struct */ + struct uvxh_rh_gam_mmioh_redirect_config1_s { unsigned long nasid:15; /* RW */ unsigned long rsvd_15_63:49; - } s4; - struct uv4ah_rh_gam_mmioh_redirect_config1_mmr_s { + } sx; + + struct uv4ah_rh_gam_mmioh_redirect_config1_s { unsigned long nasid:12; /* RW */ unsigned long rsvd_12_63:52; } s4a; + + /* UV4 unique struct */ + struct uv4h_rh_gam_mmioh_redirect_config1_s { + unsigned long nasid:15; /* RW */ + unsigned long rsvd_15_63:49; + } s4; + + /* UV3 unique struct */ + struct uv3h_rh_gam_mmioh_redirect_config1_s { + unsigned long nasid:15; /* RW */ + unsigned long rsvd_15_63:49; + } s3; }; /* ========================================================================= */ -/* UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR */ +/* UVH_RH_GAM_MMR_OVERLAY_CONFIG */ /* ========================================================================= */ -#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL -#define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL -#define UV4H_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x480028UL -#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR ( \ - is_uv2_hub() ? UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR : \ - is_uv3_hub() ? UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR : \ - /*is_uv4_hub*/ UV4H_RH_GAM_MMR_OVERLAY_CONFIG_MMR) - -#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26 -#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63 -#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL -#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL - -#define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26 -#define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63 -#define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL -#define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL - -#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26 -#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63 -#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL -#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL - -#define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26 -#define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63 -#define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL -#define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL - -#define UV4H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26 -#define UV4H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63 -#define UV4H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL -#define UV4H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL - - -union uvh_rh_gam_mmr_overlay_config_mmr_u { +#define UVH_RH_GAM_MMR_OVERLAY_CONFIG ( \ + is_uv(UV4) ? 0x480028UL : \ + is_uv(UV3) ? 0x1600028UL : \ + is_uv(UV2) ? 0x1600028UL : \ + 0) + + +/* UVXH common defines */ +#define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_BASE_SHFT 26 +#define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_BASE_MASK ( \ + is_uv(UV4A) ? 0x000ffffffc000000UL : \ + is_uv(UV4) ? 0x00003ffffc000000UL : \ + is_uv(UV3) ? 0x00003ffffc000000UL : \ + is_uv(UV2) ? 0x00003ffffc000000UL : \ + 0) +#define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_ENABLE_SHFT 63 +#define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL + +/* UV4A unique defines */ +#define UV4AH_RH_GAM_GRU_OVERLAY_CONFIG_BASE_SHFT 26 +#define UV4AH_RH_GAM_GRU_OVERLAY_CONFIG_BASE_MASK 0x000ffffffc000000UL + +#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_BASE_MASK ( \ + is_uv(UV4A) ? 0x000ffffffc000000UL : \ + is_uv(UV4) ? 0x00003ffffc000000UL : \ + is_uv(UV3) ? 0x00003ffffc000000UL : \ + is_uv(UV2) ? 0x00003ffffc000000UL : \ + 0) + +#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_BASE_SHFT ( \ + is_uv(UV4) ? 26 : \ + is_uv(UV3) ? 26 : \ + is_uv(UV2) ? 26 : \ + -1) + +union uvh_rh_gam_mmr_overlay_config_u { unsigned long v; - struct uvh_rh_gam_mmr_overlay_config_mmr_s { + + /* UVH common struct */ + struct uvh_rh_gam_mmr_overlay_config_s { unsigned long rsvd_0_25:26; unsigned long base:20; /* RW */ unsigned long rsvd_46_62:17; unsigned long enable:1; /* RW */ } s; - struct uvxh_rh_gam_mmr_overlay_config_mmr_s { + + /* UVXH common struct */ + struct uvxh_rh_gam_mmr_overlay_config_s { unsigned long rsvd_0_25:26; unsigned long base:20; /* RW */ unsigned long rsvd_46_62:17; unsigned long enable:1; /* RW */ } sx; - struct uv2h_rh_gam_mmr_overlay_config_mmr_s { + + /* UV4 unique struct */ + struct uv4h_rh_gam_mmr_overlay_config_s { unsigned long rsvd_0_25:26; unsigned long base:20; /* RW */ unsigned long rsvd_46_62:17; unsigned long enable:1; /* RW */ - } s2; - struct uv3h_rh_gam_mmr_overlay_config_mmr_s { + } s4; + + /* UV3 unique struct */ + struct uv3h_rh_gam_mmr_overlay_config_s { unsigned long rsvd_0_25:26; unsigned long base:20; /* RW */ unsigned long rsvd_46_62:17; unsigned long enable:1; /* RW */ } s3; - struct uv4h_rh_gam_mmr_overlay_config_mmr_s { + + /* UV2 unique struct */ + struct uv2h_rh_gam_mmr_overlay_config_s { unsigned long rsvd_0_25:26; unsigned long base:20; /* RW */ unsigned long rsvd_46_62:17; unsigned long enable:1; /* RW */ - } s4; + } s2; }; /* ========================================================================= */ /* UVH_RTC */ /* ========================================================================= */ -#define UV2H_RTC 0x340000UL -#define UV3H_RTC 0x340000UL -#define UV4H_RTC 0xe0000UL #define UVH_RTC ( \ - is_uv2_hub() ? UV2H_RTC : \ - is_uv3_hub() ? UV3H_RTC : \ - /*is_uv4_hub*/ UV4H_RTC) + is_uv(UV5) ? 0xe0000UL : \ + is_uv(UV4) ? 0xe0000UL : \ + is_uv(UV3) ? 0x340000UL : \ + is_uv(UV2) ? 0x340000UL : \ + 0) +/* UVH common defines*/ #define UVH_RTC_REAL_TIME_CLOCK_SHFT 0 #define UVH_RTC_REAL_TIME_CLOCK_MASK 0x00ffffffffffffffUL union uvh_rtc_u { unsigned long v; + + /* UVH common struct */ struct uvh_rtc_s { unsigned long real_time_clock:56; /* RW */ unsigned long rsvd_56_63:8; } s; + + /* UV5 unique struct */ + struct uv5h_rtc_s { + unsigned long real_time_clock:56; /* RW */ + unsigned long rsvd_56_63:8; + } s5; + + /* UV4 unique struct */ + struct uv4h_rtc_s { + unsigned long real_time_clock:56; /* RW */ + unsigned long rsvd_56_63:8; + } s4; + + /* UV3 unique struct */ + struct uv3h_rtc_s { + unsigned long real_time_clock:56; /* RW */ + unsigned long rsvd_56_63:8; + } s3; + + /* UV2 unique struct */ + struct uv2h_rtc_s { + unsigned long real_time_clock:56; /* RW */ + unsigned long rsvd_56_63:8; + } s2; }; /* ========================================================================= */ @@ -3487,26 +4428,29 @@ union uvh_rtc_u { /* ========================================================================= */ #define UVH_RTC1_INT_CONFIG 0x615c0UL +/* UVH common defines*/ #define UVH_RTC1_INT_CONFIG_VECTOR_SHFT 0 -#define UVH_RTC1_INT_CONFIG_DM_SHFT 8 -#define UVH_RTC1_INT_CONFIG_DESTMODE_SHFT 11 -#define UVH_RTC1_INT_CONFIG_STATUS_SHFT 12 -#define UVH_RTC1_INT_CONFIG_P_SHFT 13 -#define UVH_RTC1_INT_CONFIG_T_SHFT 15 -#define UVH_RTC1_INT_CONFIG_M_SHFT 16 -#define UVH_RTC1_INT_CONFIG_APIC_ID_SHFT 32 #define UVH_RTC1_INT_CONFIG_VECTOR_MASK 0x00000000000000ffUL +#define UVH_RTC1_INT_CONFIG_DM_SHFT 8 #define UVH_RTC1_INT_CONFIG_DM_MASK 0x0000000000000700UL +#define UVH_RTC1_INT_CONFIG_DESTMODE_SHFT 11 #define UVH_RTC1_INT_CONFIG_DESTMODE_MASK 0x0000000000000800UL +#define UVH_RTC1_INT_CONFIG_STATUS_SHFT 12 #define UVH_RTC1_INT_CONFIG_STATUS_MASK 0x0000000000001000UL +#define UVH_RTC1_INT_CONFIG_P_SHFT 13 #define UVH_RTC1_INT_CONFIG_P_MASK 0x0000000000002000UL +#define UVH_RTC1_INT_CONFIG_T_SHFT 15 #define UVH_RTC1_INT_CONFIG_T_MASK 0x0000000000008000UL +#define UVH_RTC1_INT_CONFIG_M_SHFT 16 #define UVH_RTC1_INT_CONFIG_M_MASK 0x0000000000010000UL +#define UVH_RTC1_INT_CONFIG_APIC_ID_SHFT 32 #define UVH_RTC1_INT_CONFIG_APIC_ID_MASK 0xffffffff00000000UL union uvh_rtc1_int_config_u { unsigned long v; + + /* UVH common struct */ struct uvh_rtc1_int_config_s { unsigned long vector_:8; /* RW */ unsigned long dm:3; /* RW */ @@ -3519,591 +4463,175 @@ union uvh_rtc1_int_config_u { unsigned long rsvd_17_31:15; unsigned long apic_id:32; /* RW */ } s; -}; - -/* ========================================================================= */ -/* UVH_SCRATCH5 */ -/* ========================================================================= */ -#define UV2H_SCRATCH5 0x2d0200UL -#define UV3H_SCRATCH5 0x2d0200UL -#define UV4H_SCRATCH5 0xb0200UL -#define UVH_SCRATCH5 ( \ - is_uv2_hub() ? UV2H_SCRATCH5 : \ - is_uv3_hub() ? UV3H_SCRATCH5 : \ - /*is_uv4_hub*/ UV4H_SCRATCH5) - -#define UV2H_SCRATCH5_32 0x778 -#define UV3H_SCRATCH5_32 0x778 -#define UV4H_SCRATCH5_32 0x798 -#define UVH_SCRATCH5_32 ( \ - is_uv2_hub() ? UV2H_SCRATCH5_32 : \ - is_uv3_hub() ? UV3H_SCRATCH5_32 : \ - /*is_uv4_hub*/ UV4H_SCRATCH5_32) - -#define UVH_SCRATCH5_SCRATCH5_SHFT 0 -#define UVH_SCRATCH5_SCRATCH5_MASK 0xffffffffffffffffUL - - -union uvh_scratch5_u { - unsigned long v; - struct uvh_scratch5_s { - unsigned long scratch5:64; /* RW, W1CS */ - } s; -}; -/* ========================================================================= */ -/* UVH_SCRATCH5_ALIAS */ -/* ========================================================================= */ -#define UV2H_SCRATCH5_ALIAS 0x2d0208UL -#define UV3H_SCRATCH5_ALIAS 0x2d0208UL -#define UV4H_SCRATCH5_ALIAS 0xb0208UL -#define UVH_SCRATCH5_ALIAS ( \ - is_uv2_hub() ? UV2H_SCRATCH5_ALIAS : \ - is_uv3_hub() ? UV3H_SCRATCH5_ALIAS : \ - /*is_uv4_hub*/ UV4H_SCRATCH5_ALIAS) - -#define UV2H_SCRATCH5_ALIAS_32 0x780 -#define UV3H_SCRATCH5_ALIAS_32 0x780 -#define UV4H_SCRATCH5_ALIAS_32 0x7a0 -#define UVH_SCRATCH5_ALIAS_32 ( \ - is_uv2_hub() ? UV2H_SCRATCH5_ALIAS_32 : \ - is_uv3_hub() ? UV3H_SCRATCH5_ALIAS_32 : \ - /*is_uv4_hub*/ UV4H_SCRATCH5_ALIAS_32) - - -/* ========================================================================= */ -/* UVH_SCRATCH5_ALIAS_2 */ -/* ========================================================================= */ -#define UV2H_SCRATCH5_ALIAS_2 0x2d0210UL -#define UV3H_SCRATCH5_ALIAS_2 0x2d0210UL -#define UV4H_SCRATCH5_ALIAS_2 0xb0210UL -#define UVH_SCRATCH5_ALIAS_2 ( \ - is_uv2_hub() ? UV2H_SCRATCH5_ALIAS_2 : \ - is_uv3_hub() ? UV3H_SCRATCH5_ALIAS_2 : \ - /*is_uv4_hub*/ UV4H_SCRATCH5_ALIAS_2) -#define UVH_SCRATCH5_ALIAS_2_32 0x788 - - -/* ========================================================================= */ -/* UVXH_EVENT_OCCURRED2 */ -/* ========================================================================= */ -#define UVXH_EVENT_OCCURRED2 0x70100UL - -#define UV2H_EVENT_OCCURRED2_32 0xb68 -#define UV3H_EVENT_OCCURRED2_32 0xb68 -#define UV4H_EVENT_OCCURRED2_32 0x608 -#define UVH_EVENT_OCCURRED2_32 ( \ - is_uv2_hub() ? UV2H_EVENT_OCCURRED2_32 : \ - is_uv3_hub() ? UV3H_EVENT_OCCURRED2_32 : \ - /*is_uv4_hub*/ UV4H_EVENT_OCCURRED2_32) - - -#define UV2H_EVENT_OCCURRED2_RTC_0_SHFT 0 -#define UV2H_EVENT_OCCURRED2_RTC_1_SHFT 1 -#define UV2H_EVENT_OCCURRED2_RTC_2_SHFT 2 -#define UV2H_EVENT_OCCURRED2_RTC_3_SHFT 3 -#define UV2H_EVENT_OCCURRED2_RTC_4_SHFT 4 -#define UV2H_EVENT_OCCURRED2_RTC_5_SHFT 5 -#define UV2H_EVENT_OCCURRED2_RTC_6_SHFT 6 -#define UV2H_EVENT_OCCURRED2_RTC_7_SHFT 7 -#define UV2H_EVENT_OCCURRED2_RTC_8_SHFT 8 -#define UV2H_EVENT_OCCURRED2_RTC_9_SHFT 9 -#define UV2H_EVENT_OCCURRED2_RTC_10_SHFT 10 -#define UV2H_EVENT_OCCURRED2_RTC_11_SHFT 11 -#define UV2H_EVENT_OCCURRED2_RTC_12_SHFT 12 -#define UV2H_EVENT_OCCURRED2_RTC_13_SHFT 13 -#define UV2H_EVENT_OCCURRED2_RTC_14_SHFT 14 -#define UV2H_EVENT_OCCURRED2_RTC_15_SHFT 15 -#define UV2H_EVENT_OCCURRED2_RTC_16_SHFT 16 -#define UV2H_EVENT_OCCURRED2_RTC_17_SHFT 17 -#define UV2H_EVENT_OCCURRED2_RTC_18_SHFT 18 -#define UV2H_EVENT_OCCURRED2_RTC_19_SHFT 19 -#define UV2H_EVENT_OCCURRED2_RTC_20_SHFT 20 -#define UV2H_EVENT_OCCURRED2_RTC_21_SHFT 21 -#define UV2H_EVENT_OCCURRED2_RTC_22_SHFT 22 -#define UV2H_EVENT_OCCURRED2_RTC_23_SHFT 23 -#define UV2H_EVENT_OCCURRED2_RTC_24_SHFT 24 -#define UV2H_EVENT_OCCURRED2_RTC_25_SHFT 25 -#define UV2H_EVENT_OCCURRED2_RTC_26_SHFT 26 -#define UV2H_EVENT_OCCURRED2_RTC_27_SHFT 27 -#define UV2H_EVENT_OCCURRED2_RTC_28_SHFT 28 -#define UV2H_EVENT_OCCURRED2_RTC_29_SHFT 29 -#define UV2H_EVENT_OCCURRED2_RTC_30_SHFT 30 -#define UV2H_EVENT_OCCURRED2_RTC_31_SHFT 31 -#define UV2H_EVENT_OCCURRED2_RTC_0_MASK 0x0000000000000001UL -#define UV2H_EVENT_OCCURRED2_RTC_1_MASK 0x0000000000000002UL -#define UV2H_EVENT_OCCURRED2_RTC_2_MASK 0x0000000000000004UL -#define UV2H_EVENT_OCCURRED2_RTC_3_MASK 0x0000000000000008UL -#define UV2H_EVENT_OCCURRED2_RTC_4_MASK 0x0000000000000010UL -#define UV2H_EVENT_OCCURRED2_RTC_5_MASK 0x0000000000000020UL -#define UV2H_EVENT_OCCURRED2_RTC_6_MASK 0x0000000000000040UL -#define UV2H_EVENT_OCCURRED2_RTC_7_MASK 0x0000000000000080UL -#define UV2H_EVENT_OCCURRED2_RTC_8_MASK 0x0000000000000100UL -#define UV2H_EVENT_OCCURRED2_RTC_9_MASK 0x0000000000000200UL -#define UV2H_EVENT_OCCURRED2_RTC_10_MASK 0x0000000000000400UL -#define UV2H_EVENT_OCCURRED2_RTC_11_MASK 0x0000000000000800UL -#define UV2H_EVENT_OCCURRED2_RTC_12_MASK 0x0000000000001000UL -#define UV2H_EVENT_OCCURRED2_RTC_13_MASK 0x0000000000002000UL -#define UV2H_EVENT_OCCURRED2_RTC_14_MASK 0x0000000000004000UL -#define UV2H_EVENT_OCCURRED2_RTC_15_MASK 0x0000000000008000UL -#define UV2H_EVENT_OCCURRED2_RTC_16_MASK 0x0000000000010000UL -#define UV2H_EVENT_OCCURRED2_RTC_17_MASK 0x0000000000020000UL -#define UV2H_EVENT_OCCURRED2_RTC_18_MASK 0x0000000000040000UL -#define UV2H_EVENT_OCCURRED2_RTC_19_MASK 0x0000000000080000UL -#define UV2H_EVENT_OCCURRED2_RTC_20_MASK 0x0000000000100000UL -#define UV2H_EVENT_OCCURRED2_RTC_21_MASK 0x0000000000200000UL -#define UV2H_EVENT_OCCURRED2_RTC_22_MASK 0x0000000000400000UL -#define UV2H_EVENT_OCCURRED2_RTC_23_MASK 0x0000000000800000UL -#define UV2H_EVENT_OCCURRED2_RTC_24_MASK 0x0000000001000000UL -#define UV2H_EVENT_OCCURRED2_RTC_25_MASK 0x0000000002000000UL -#define UV2H_EVENT_OCCURRED2_RTC_26_MASK 0x0000000004000000UL -#define UV2H_EVENT_OCCURRED2_RTC_27_MASK 0x0000000008000000UL -#define UV2H_EVENT_OCCURRED2_RTC_28_MASK 0x0000000010000000UL -#define UV2H_EVENT_OCCURRED2_RTC_29_MASK 0x0000000020000000UL -#define UV2H_EVENT_OCCURRED2_RTC_30_MASK 0x0000000040000000UL -#define UV2H_EVENT_OCCURRED2_RTC_31_MASK 0x0000000080000000UL - -#define UV3H_EVENT_OCCURRED2_RTC_0_SHFT 0 -#define UV3H_EVENT_OCCURRED2_RTC_1_SHFT 1 -#define UV3H_EVENT_OCCURRED2_RTC_2_SHFT 2 -#define UV3H_EVENT_OCCURRED2_RTC_3_SHFT 3 -#define UV3H_EVENT_OCCURRED2_RTC_4_SHFT 4 -#define UV3H_EVENT_OCCURRED2_RTC_5_SHFT 5 -#define UV3H_EVENT_OCCURRED2_RTC_6_SHFT 6 -#define UV3H_EVENT_OCCURRED2_RTC_7_SHFT 7 -#define UV3H_EVENT_OCCURRED2_RTC_8_SHFT 8 -#define UV3H_EVENT_OCCURRED2_RTC_9_SHFT 9 -#define UV3H_EVENT_OCCURRED2_RTC_10_SHFT 10 -#define UV3H_EVENT_OCCURRED2_RTC_11_SHFT 11 -#define UV3H_EVENT_OCCURRED2_RTC_12_SHFT 12 -#define UV3H_EVENT_OCCURRED2_RTC_13_SHFT 13 -#define UV3H_EVENT_OCCURRED2_RTC_14_SHFT 14 -#define UV3H_EVENT_OCCURRED2_RTC_15_SHFT 15 -#define UV3H_EVENT_OCCURRED2_RTC_16_SHFT 16 -#define UV3H_EVENT_OCCURRED2_RTC_17_SHFT 17 -#define UV3H_EVENT_OCCURRED2_RTC_18_SHFT 18 -#define UV3H_EVENT_OCCURRED2_RTC_19_SHFT 19 -#define UV3H_EVENT_OCCURRED2_RTC_20_SHFT 20 -#define UV3H_EVENT_OCCURRED2_RTC_21_SHFT 21 -#define UV3H_EVENT_OCCURRED2_RTC_22_SHFT 22 -#define UV3H_EVENT_OCCURRED2_RTC_23_SHFT 23 -#define UV3H_EVENT_OCCURRED2_RTC_24_SHFT 24 -#define UV3H_EVENT_OCCURRED2_RTC_25_SHFT 25 -#define UV3H_EVENT_OCCURRED2_RTC_26_SHFT 26 -#define UV3H_EVENT_OCCURRED2_RTC_27_SHFT 27 -#define UV3H_EVENT_OCCURRED2_RTC_28_SHFT 28 -#define UV3H_EVENT_OCCURRED2_RTC_29_SHFT 29 -#define UV3H_EVENT_OCCURRED2_RTC_30_SHFT 30 -#define UV3H_EVENT_OCCURRED2_RTC_31_SHFT 31 -#define UV3H_EVENT_OCCURRED2_RTC_0_MASK 0x0000000000000001UL -#define UV3H_EVENT_OCCURRED2_RTC_1_MASK 0x0000000000000002UL -#define UV3H_EVENT_OCCURRED2_RTC_2_MASK 0x0000000000000004UL -#define UV3H_EVENT_OCCURRED2_RTC_3_MASK 0x0000000000000008UL -#define UV3H_EVENT_OCCURRED2_RTC_4_MASK 0x0000000000000010UL -#define UV3H_EVENT_OCCURRED2_RTC_5_MASK 0x0000000000000020UL -#define UV3H_EVENT_OCCURRED2_RTC_6_MASK 0x0000000000000040UL -#define UV3H_EVENT_OCCURRED2_RTC_7_MASK 0x0000000000000080UL -#define UV3H_EVENT_OCCURRED2_RTC_8_MASK 0x0000000000000100UL -#define UV3H_EVENT_OCCURRED2_RTC_9_MASK 0x0000000000000200UL -#define UV3H_EVENT_OCCURRED2_RTC_10_MASK 0x0000000000000400UL -#define UV3H_EVENT_OCCURRED2_RTC_11_MASK 0x0000000000000800UL -#define UV3H_EVENT_OCCURRED2_RTC_12_MASK 0x0000000000001000UL -#define UV3H_EVENT_OCCURRED2_RTC_13_MASK 0x0000000000002000UL -#define UV3H_EVENT_OCCURRED2_RTC_14_MASK 0x0000000000004000UL -#define UV3H_EVENT_OCCURRED2_RTC_15_MASK 0x0000000000008000UL -#define UV3H_EVENT_OCCURRED2_RTC_16_MASK 0x0000000000010000UL -#define UV3H_EVENT_OCCURRED2_RTC_17_MASK 0x0000000000020000UL -#define UV3H_EVENT_OCCURRED2_RTC_18_MASK 0x0000000000040000UL -#define UV3H_EVENT_OCCURRED2_RTC_19_MASK 0x0000000000080000UL -#define UV3H_EVENT_OCCURRED2_RTC_20_MASK 0x0000000000100000UL -#define UV3H_EVENT_OCCURRED2_RTC_21_MASK 0x0000000000200000UL -#define UV3H_EVENT_OCCURRED2_RTC_22_MASK 0x0000000000400000UL -#define UV3H_EVENT_OCCURRED2_RTC_23_MASK 0x0000000000800000UL -#define UV3H_EVENT_OCCURRED2_RTC_24_MASK 0x0000000001000000UL -#define UV3H_EVENT_OCCURRED2_RTC_25_MASK 0x0000000002000000UL -#define UV3H_EVENT_OCCURRED2_RTC_26_MASK 0x0000000004000000UL -#define UV3H_EVENT_OCCURRED2_RTC_27_MASK 0x0000000008000000UL -#define UV3H_EVENT_OCCURRED2_RTC_28_MASK 0x0000000010000000UL -#define UV3H_EVENT_OCCURRED2_RTC_29_MASK 0x0000000020000000UL -#define UV3H_EVENT_OCCURRED2_RTC_30_MASK 0x0000000040000000UL -#define UV3H_EVENT_OCCURRED2_RTC_31_MASK 0x0000000080000000UL + /* UV5 unique struct */ + struct uv5h_rtc1_int_config_s { + unsigned long vector_:8; /* RW */ + unsigned long dm:3; /* RW */ + unsigned long destmode:1; /* RW */ + unsigned long status:1; /* RO */ + unsigned long p:1; /* RO */ + unsigned long rsvd_14:1; + unsigned long t:1; /* RO */ + unsigned long m:1; /* RW */ + unsigned long rsvd_17_31:15; + unsigned long apic_id:32; /* RW */ + } s5; -#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT0_SHFT 0 -#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT1_SHFT 1 -#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT2_SHFT 2 -#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT3_SHFT 3 -#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT4_SHFT 4 -#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT5_SHFT 5 -#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT6_SHFT 6 -#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT7_SHFT 7 -#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT8_SHFT 8 -#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT9_SHFT 9 -#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT10_SHFT 10 -#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT11_SHFT 11 -#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT12_SHFT 12 -#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT13_SHFT 13 -#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT14_SHFT 14 -#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT15_SHFT 15 -#define UV4H_EVENT_OCCURRED2_RTC_INTERVAL_INT_SHFT 16 -#define UV4H_EVENT_OCCURRED2_BAU_DASHBOARD_INT_SHFT 17 -#define UV4H_EVENT_OCCURRED2_RTC_0_SHFT 18 -#define UV4H_EVENT_OCCURRED2_RTC_1_SHFT 19 -#define UV4H_EVENT_OCCURRED2_RTC_2_SHFT 20 -#define UV4H_EVENT_OCCURRED2_RTC_3_SHFT 21 -#define UV4H_EVENT_OCCURRED2_RTC_4_SHFT 22 -#define UV4H_EVENT_OCCURRED2_RTC_5_SHFT 23 -#define UV4H_EVENT_OCCURRED2_RTC_6_SHFT 24 -#define UV4H_EVENT_OCCURRED2_RTC_7_SHFT 25 -#define UV4H_EVENT_OCCURRED2_RTC_8_SHFT 26 -#define UV4H_EVENT_OCCURRED2_RTC_9_SHFT 27 -#define UV4H_EVENT_OCCURRED2_RTC_10_SHFT 28 -#define UV4H_EVENT_OCCURRED2_RTC_11_SHFT 29 -#define UV4H_EVENT_OCCURRED2_RTC_12_SHFT 30 -#define UV4H_EVENT_OCCURRED2_RTC_13_SHFT 31 -#define UV4H_EVENT_OCCURRED2_RTC_14_SHFT 32 -#define UV4H_EVENT_OCCURRED2_RTC_15_SHFT 33 -#define UV4H_EVENT_OCCURRED2_RTC_16_SHFT 34 -#define UV4H_EVENT_OCCURRED2_RTC_17_SHFT 35 -#define UV4H_EVENT_OCCURRED2_RTC_18_SHFT 36 -#define UV4H_EVENT_OCCURRED2_RTC_19_SHFT 37 -#define UV4H_EVENT_OCCURRED2_RTC_20_SHFT 38 -#define UV4H_EVENT_OCCURRED2_RTC_21_SHFT 39 -#define UV4H_EVENT_OCCURRED2_RTC_22_SHFT 40 -#define UV4H_EVENT_OCCURRED2_RTC_23_SHFT 41 -#define UV4H_EVENT_OCCURRED2_RTC_24_SHFT 42 -#define UV4H_EVENT_OCCURRED2_RTC_25_SHFT 43 -#define UV4H_EVENT_OCCURRED2_RTC_26_SHFT 44 -#define UV4H_EVENT_OCCURRED2_RTC_27_SHFT 45 -#define UV4H_EVENT_OCCURRED2_RTC_28_SHFT 46 -#define UV4H_EVENT_OCCURRED2_RTC_29_SHFT 47 -#define UV4H_EVENT_OCCURRED2_RTC_30_SHFT 48 -#define UV4H_EVENT_OCCURRED2_RTC_31_SHFT 49 -#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT0_MASK 0x0000000000000001UL -#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT1_MASK 0x0000000000000002UL -#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT2_MASK 0x0000000000000004UL -#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT3_MASK 0x0000000000000008UL -#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT4_MASK 0x0000000000000010UL -#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT5_MASK 0x0000000000000020UL -#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT6_MASK 0x0000000000000040UL -#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT7_MASK 0x0000000000000080UL -#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT8_MASK 0x0000000000000100UL -#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT9_MASK 0x0000000000000200UL -#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT10_MASK 0x0000000000000400UL -#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT11_MASK 0x0000000000000800UL -#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT12_MASK 0x0000000000001000UL -#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT13_MASK 0x0000000000002000UL -#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT14_MASK 0x0000000000004000UL -#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT15_MASK 0x0000000000008000UL -#define UV4H_EVENT_OCCURRED2_RTC_INTERVAL_INT_MASK 0x0000000000010000UL -#define UV4H_EVENT_OCCURRED2_BAU_DASHBOARD_INT_MASK 0x0000000000020000UL -#define UV4H_EVENT_OCCURRED2_RTC_0_MASK 0x0000000000040000UL -#define UV4H_EVENT_OCCURRED2_RTC_1_MASK 0x0000000000080000UL -#define UV4H_EVENT_OCCURRED2_RTC_2_MASK 0x0000000000100000UL -#define UV4H_EVENT_OCCURRED2_RTC_3_MASK 0x0000000000200000UL -#define UV4H_EVENT_OCCURRED2_RTC_4_MASK 0x0000000000400000UL -#define UV4H_EVENT_OCCURRED2_RTC_5_MASK 0x0000000000800000UL -#define UV4H_EVENT_OCCURRED2_RTC_6_MASK 0x0000000001000000UL -#define UV4H_EVENT_OCCURRED2_RTC_7_MASK 0x0000000002000000UL -#define UV4H_EVENT_OCCURRED2_RTC_8_MASK 0x0000000004000000UL -#define UV4H_EVENT_OCCURRED2_RTC_9_MASK 0x0000000008000000UL -#define UV4H_EVENT_OCCURRED2_RTC_10_MASK 0x0000000010000000UL -#define UV4H_EVENT_OCCURRED2_RTC_11_MASK 0x0000000020000000UL -#define UV4H_EVENT_OCCURRED2_RTC_12_MASK 0x0000000040000000UL -#define UV4H_EVENT_OCCURRED2_RTC_13_MASK 0x0000000080000000UL -#define UV4H_EVENT_OCCURRED2_RTC_14_MASK 0x0000000100000000UL -#define UV4H_EVENT_OCCURRED2_RTC_15_MASK 0x0000000200000000UL -#define UV4H_EVENT_OCCURRED2_RTC_16_MASK 0x0000000400000000UL -#define UV4H_EVENT_OCCURRED2_RTC_17_MASK 0x0000000800000000UL -#define UV4H_EVENT_OCCURRED2_RTC_18_MASK 0x0000001000000000UL -#define UV4H_EVENT_OCCURRED2_RTC_19_MASK 0x0000002000000000UL -#define UV4H_EVENT_OCCURRED2_RTC_20_MASK 0x0000004000000000UL -#define UV4H_EVENT_OCCURRED2_RTC_21_MASK 0x0000008000000000UL -#define UV4H_EVENT_OCCURRED2_RTC_22_MASK 0x0000010000000000UL -#define UV4H_EVENT_OCCURRED2_RTC_23_MASK 0x0000020000000000UL -#define UV4H_EVENT_OCCURRED2_RTC_24_MASK 0x0000040000000000UL -#define UV4H_EVENT_OCCURRED2_RTC_25_MASK 0x0000080000000000UL -#define UV4H_EVENT_OCCURRED2_RTC_26_MASK 0x0000100000000000UL -#define UV4H_EVENT_OCCURRED2_RTC_27_MASK 0x0000200000000000UL -#define UV4H_EVENT_OCCURRED2_RTC_28_MASK 0x0000400000000000UL -#define UV4H_EVENT_OCCURRED2_RTC_29_MASK 0x0000800000000000UL -#define UV4H_EVENT_OCCURRED2_RTC_30_MASK 0x0001000000000000UL -#define UV4H_EVENT_OCCURRED2_RTC_31_MASK 0x0002000000000000UL + /* UV4 unique struct */ + struct uv4h_rtc1_int_config_s { + unsigned long vector_:8; /* RW */ + unsigned long dm:3; /* RW */ + unsigned long destmode:1; /* RW */ + unsigned long status:1; /* RO */ + unsigned long p:1; /* RO */ + unsigned long rsvd_14:1; + unsigned long t:1; /* RO */ + unsigned long m:1; /* RW */ + unsigned long rsvd_17_31:15; + unsigned long apic_id:32; /* RW */ + } s4; -#define UVXH_EVENT_OCCURRED2_RTC_1_MASK ( \ - is_uv2_hub() ? UV2H_EVENT_OCCURRED2_RTC_1_MASK : \ - is_uv3_hub() ? UV3H_EVENT_OCCURRED2_RTC_1_MASK : \ - /*is_uv4_hub*/ UV4H_EVENT_OCCURRED2_RTC_1_MASK) + /* UV3 unique struct */ + struct uv3h_rtc1_int_config_s { + unsigned long vector_:8; /* RW */ + unsigned long dm:3; /* RW */ + unsigned long destmode:1; /* RW */ + unsigned long status:1; /* RO */ + unsigned long p:1; /* RO */ + unsigned long rsvd_14:1; + unsigned long t:1; /* RO */ + unsigned long m:1; /* RW */ + unsigned long rsvd_17_31:15; + unsigned long apic_id:32; /* RW */ + } s3; -union uvh_event_occurred2_u { - unsigned long v; - struct uv2h_event_occurred2_s { - unsigned long rtc_0:1; /* RW */ - unsigned long rtc_1:1; /* RW */ - unsigned long rtc_2:1; /* RW */ - unsigned long rtc_3:1; /* RW */ - unsigned long rtc_4:1; /* RW */ - unsigned long rtc_5:1; /* RW */ - unsigned long rtc_6:1; /* RW */ - unsigned long rtc_7:1; /* RW */ - unsigned long rtc_8:1; /* RW */ - unsigned long rtc_9:1; /* RW */ - unsigned long rtc_10:1; /* RW */ - unsigned long rtc_11:1; /* RW */ - unsigned long rtc_12:1; /* RW */ - unsigned long rtc_13:1; /* RW */ - unsigned long rtc_14:1; /* RW */ - unsigned long rtc_15:1; /* RW */ - unsigned long rtc_16:1; /* RW */ - unsigned long rtc_17:1; /* RW */ - unsigned long rtc_18:1; /* RW */ - unsigned long rtc_19:1; /* RW */ - unsigned long rtc_20:1; /* RW */ - unsigned long rtc_21:1; /* RW */ - unsigned long rtc_22:1; /* RW */ - unsigned long rtc_23:1; /* RW */ - unsigned long rtc_24:1; /* RW */ - unsigned long rtc_25:1; /* RW */ - unsigned long rtc_26:1; /* RW */ - unsigned long rtc_27:1; /* RW */ - unsigned long rtc_28:1; /* RW */ - unsigned long rtc_29:1; /* RW */ - unsigned long rtc_30:1; /* RW */ - unsigned long rtc_31:1; /* RW */ - unsigned long rsvd_32_63:32; + /* UV2 unique struct */ + struct uv2h_rtc1_int_config_s { + unsigned long vector_:8; /* RW */ + unsigned long dm:3; /* RW */ + unsigned long destmode:1; /* RW */ + unsigned long status:1; /* RO */ + unsigned long p:1; /* RO */ + unsigned long rsvd_14:1; + unsigned long t:1; /* RO */ + unsigned long m:1; /* RW */ + unsigned long rsvd_17_31:15; + unsigned long apic_id:32; /* RW */ } s2; - struct uv3h_event_occurred2_s { - unsigned long rtc_0:1; /* RW */ - unsigned long rtc_1:1; /* RW */ - unsigned long rtc_2:1; /* RW */ - unsigned long rtc_3:1; /* RW */ - unsigned long rtc_4:1; /* RW */ - unsigned long rtc_5:1; /* RW */ - unsigned long rtc_6:1; /* RW */ - unsigned long rtc_7:1; /* RW */ - unsigned long rtc_8:1; /* RW */ - unsigned long rtc_9:1; /* RW */ - unsigned long rtc_10:1; /* RW */ - unsigned long rtc_11:1; /* RW */ - unsigned long rtc_12:1; /* RW */ - unsigned long rtc_13:1; /* RW */ - unsigned long rtc_14:1; /* RW */ - unsigned long rtc_15:1; /* RW */ - unsigned long rtc_16:1; /* RW */ - unsigned long rtc_17:1; /* RW */ - unsigned long rtc_18:1; /* RW */ - unsigned long rtc_19:1; /* RW */ - unsigned long rtc_20:1; /* RW */ - unsigned long rtc_21:1; /* RW */ - unsigned long rtc_22:1; /* RW */ - unsigned long rtc_23:1; /* RW */ - unsigned long rtc_24:1; /* RW */ - unsigned long rtc_25:1; /* RW */ - unsigned long rtc_26:1; /* RW */ - unsigned long rtc_27:1; /* RW */ - unsigned long rtc_28:1; /* RW */ - unsigned long rtc_29:1; /* RW */ - unsigned long rtc_30:1; /* RW */ - unsigned long rtc_31:1; /* RW */ - unsigned long rsvd_32_63:32; - } s3; - struct uv4h_event_occurred2_s { - unsigned long message_accelerator_int0:1; /* RW */ - unsigned long message_accelerator_int1:1; /* RW */ - unsigned long message_accelerator_int2:1; /* RW */ - unsigned long message_accelerator_int3:1; /* RW */ - unsigned long message_accelerator_int4:1; /* RW */ - unsigned long message_accelerator_int5:1; /* RW */ - unsigned long message_accelerator_int6:1; /* RW */ - unsigned long message_accelerator_int7:1; /* RW */ - unsigned long message_accelerator_int8:1; /* RW */ - unsigned long message_accelerator_int9:1; /* RW */ - unsigned long message_accelerator_int10:1; /* RW */ - unsigned long message_accelerator_int11:1; /* RW */ - unsigned long message_accelerator_int12:1; /* RW */ - unsigned long message_accelerator_int13:1; /* RW */ - unsigned long message_accelerator_int14:1; /* RW */ - unsigned long message_accelerator_int15:1; /* RW */ - unsigned long rtc_interval_int:1; /* RW */ - unsigned long bau_dashboard_int:1; /* RW */ - unsigned long rtc_0:1; /* RW */ - unsigned long rtc_1:1; /* RW */ - unsigned long rtc_2:1; /* RW */ - unsigned long rtc_3:1; /* RW */ - unsigned long rtc_4:1; /* RW */ - unsigned long rtc_5:1; /* RW */ - unsigned long rtc_6:1; /* RW */ - unsigned long rtc_7:1; /* RW */ - unsigned long rtc_8:1; /* RW */ - unsigned long rtc_9:1; /* RW */ - unsigned long rtc_10:1; /* RW */ - unsigned long rtc_11:1; /* RW */ - unsigned long rtc_12:1; /* RW */ - unsigned long rtc_13:1; /* RW */ - unsigned long rtc_14:1; /* RW */ - unsigned long rtc_15:1; /* RW */ - unsigned long rtc_16:1; /* RW */ - unsigned long rtc_17:1; /* RW */ - unsigned long rtc_18:1; /* RW */ - unsigned long rtc_19:1; /* RW */ - unsigned long rtc_20:1; /* RW */ - unsigned long rtc_21:1; /* RW */ - unsigned long rtc_22:1; /* RW */ - unsigned long rtc_23:1; /* RW */ - unsigned long rtc_24:1; /* RW */ - unsigned long rtc_25:1; /* RW */ - unsigned long rtc_26:1; /* RW */ - unsigned long rtc_27:1; /* RW */ - unsigned long rtc_28:1; /* RW */ - unsigned long rtc_29:1; /* RW */ - unsigned long rtc_30:1; /* RW */ - unsigned long rtc_31:1; /* RW */ - unsigned long rsvd_50_63:14; - } s4; }; /* ========================================================================= */ -/* UVXH_EVENT_OCCURRED2_ALIAS */ +/* UVH_SCRATCH5 */ /* ========================================================================= */ -#define UVXH_EVENT_OCCURRED2_ALIAS 0x70108UL - -#define UV2H_EVENT_OCCURRED2_ALIAS_32 0xb70 -#define UV3H_EVENT_OCCURRED2_ALIAS_32 0xb70 -#define UV4H_EVENT_OCCURRED2_ALIAS_32 0x610 -#define UVH_EVENT_OCCURRED2_ALIAS_32 ( \ - is_uv2_hub() ? UV2H_EVENT_OCCURRED2_ALIAS_32 : \ - is_uv3_hub() ? UV3H_EVENT_OCCURRED2_ALIAS_32 : \ - /*is_uv4_hub*/ UV4H_EVENT_OCCURRED2_ALIAS_32) +#define UVH_SCRATCH5 ( \ + is_uv(UV5) ? 0xb0200UL : \ + is_uv(UV4) ? 0xb0200UL : \ + is_uv(UV3) ? 0x2d0200UL : \ + is_uv(UV2) ? 0x2d0200UL : \ + 0) +#define UV5H_SCRATCH5 0xb0200UL +#define UV4H_SCRATCH5 0xb0200UL +#define UV3H_SCRATCH5 0x2d0200UL +#define UV2H_SCRATCH5 0x2d0200UL +/* UVH common defines*/ +#define UVH_SCRATCH5_SCRATCH5_SHFT 0 +#define UVH_SCRATCH5_SCRATCH5_MASK 0xffffffffffffffffUL -/* ========================================================================= */ -/* UVXH_LB_BAU_SB_ACTIVATION_STATUS_2 */ -/* ========================================================================= */ -#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2 0x320130UL -#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_2 0x320130UL -#define UV4H_LB_BAU_SB_ACTIVATION_STATUS_2 0xc8130UL -#define UVH_LB_BAU_SB_ACTIVATION_STATUS_2 ( \ - is_uv2_hub() ? UV2H_LB_BAU_SB_ACTIVATION_STATUS_2 : \ - is_uv3_hub() ? UV3H_LB_BAU_SB_ACTIVATION_STATUS_2 : \ - /*is_uv4_hub*/ UV4H_LB_BAU_SB_ACTIVATION_STATUS_2) +/* UVXH common defines */ +#define UVXH_SCRATCH5_SCRATCH5_SHFT 0 +#define UVXH_SCRATCH5_SCRATCH5_MASK 0xffffffffffffffffUL -#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_32 0x9f0 -#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_2_32 0x9f0 -#define UV4H_LB_BAU_SB_ACTIVATION_STATUS_2_32 0xa10 -#define UVH_LB_BAU_SB_ACTIVATION_STATUS_2_32 ( \ - is_uv2_hub() ? UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_32 : \ - is_uv3_hub() ? UV3H_LB_BAU_SB_ACTIVATION_STATUS_2_32 : \ - /*is_uv4_hub*/ UV4H_LB_BAU_SB_ACTIVATION_STATUS_2_32) +/* UVYH common defines */ +#define UVYH_SCRATCH5_SCRATCH5_SHFT 0 +#define UVYH_SCRATCH5_SCRATCH5_MASK 0xffffffffffffffffUL -#define UVXH_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_SHFT 0 -#define UVXH_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_MASK 0xffffffffffffffffUL +/* UV5 unique defines */ +#define UV5H_SCRATCH5_SCRATCH5_SHFT 0 +#define UV5H_SCRATCH5_SCRATCH5_MASK 0xffffffffffffffffUL -#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_SHFT 0 -#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_MASK 0xffffffffffffffffUL +/* UV4 unique defines */ +#define UV4H_SCRATCH5_SCRATCH5_SHFT 0 +#define UV4H_SCRATCH5_SCRATCH5_MASK 0xffffffffffffffffUL -#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_SHFT 0 -#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_MASK 0xffffffffffffffffUL +/* UV3 unique defines */ +#define UV3H_SCRATCH5_SCRATCH5_SHFT 0 +#define UV3H_SCRATCH5_SCRATCH5_MASK 0xffffffffffffffffUL -#define UV4H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_SHFT 0 -#define UV4H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_MASK 0xffffffffffffffffUL +/* UV2 unique defines */ +#define UV2H_SCRATCH5_SCRATCH5_SHFT 0 +#define UV2H_SCRATCH5_SCRATCH5_MASK 0xffffffffffffffffUL -union uvxh_lb_bau_sb_activation_status_2_u { +union uvh_scratch5_u { unsigned long v; - struct uvxh_lb_bau_sb_activation_status_2_s { - unsigned long aux_error:64; /* RW */ - } sx; - struct uv2h_lb_bau_sb_activation_status_2_s { - unsigned long aux_error:64; /* RW */ - } s2; - struct uv3h_lb_bau_sb_activation_status_2_s { - unsigned long aux_error:64; /* RW */ - } s3; - struct uv4h_lb_bau_sb_activation_status_2_s { - unsigned long aux_error:64; /* RW */ - } s4; -}; -/* ========================================================================= */ -/* UV3H_GR0_GAM_GR_CONFIG */ -/* ========================================================================= */ -#define UV3H_GR0_GAM_GR_CONFIG 0xc00028UL - -#define UV3H_GR0_GAM_GR_CONFIG_M_SKT_SHFT 0 -#define UV3H_GR0_GAM_GR_CONFIG_SUBSPACE_SHFT 10 -#define UV3H_GR0_GAM_GR_CONFIG_M_SKT_MASK 0x000000000000003fUL -#define UV3H_GR0_GAM_GR_CONFIG_SUBSPACE_MASK 0x0000000000000400UL + /* UVH common struct */ + struct uvh_scratch5_s { + unsigned long scratch5:64; /* RW */ + } s; -union uv3h_gr0_gam_gr_config_u { - unsigned long v; - struct uv3h_gr0_gam_gr_config_s { - unsigned long m_skt:6; /* RW */ - unsigned long undef_6_9:4; /* Undefined */ - unsigned long subspace:1; /* RW */ - unsigned long reserved:53; - } s3; -}; + /* UVXH common struct */ + struct uvxh_scratch5_s { + unsigned long scratch5:64; /* RW */ + } sx; -/* ========================================================================= */ -/* UV4H_LB_PROC_INTD_QUEUE_FIRST */ -/* ========================================================================= */ -#define UV4H_LB_PROC_INTD_QUEUE_FIRST 0xa4100UL + /* UVYH common struct */ + struct uvyh_scratch5_s { + unsigned long scratch5:64; /* RW */ + } sy; -#define UV4H_LB_PROC_INTD_QUEUE_FIRST_FIRST_PAYLOAD_ADDRESS_SHFT 6 -#define UV4H_LB_PROC_INTD_QUEUE_FIRST_FIRST_PAYLOAD_ADDRESS_MASK 0x00003fffffffffc0UL + /* UV5 unique struct */ + struct uv5h_scratch5_s { + unsigned long scratch5:64; /* RW */ + } s5; -union uv4h_lb_proc_intd_queue_first_u { - unsigned long v; - struct uv4h_lb_proc_intd_queue_first_s { - unsigned long undef_0_5:6; /* Undefined */ - unsigned long first_payload_address:40; /* RW */ + /* UV4 unique struct */ + struct uv4h_scratch5_s { + unsigned long scratch5:64; /* RW */ } s4; -}; -/* ========================================================================= */ -/* UV4H_LB_PROC_INTD_QUEUE_LAST */ -/* ========================================================================= */ -#define UV4H_LB_PROC_INTD_QUEUE_LAST 0xa4108UL - -#define UV4H_LB_PROC_INTD_QUEUE_LAST_LAST_PAYLOAD_ADDRESS_SHFT 5 -#define UV4H_LB_PROC_INTD_QUEUE_LAST_LAST_PAYLOAD_ADDRESS_MASK 0x00003fffffffffe0UL + /* UV3 unique struct */ + struct uv3h_scratch5_s { + unsigned long scratch5:64; /* RW */ + } s3; -union uv4h_lb_proc_intd_queue_last_u { - unsigned long v; - struct uv4h_lb_proc_intd_queue_last_s { - unsigned long undef_0_4:5; /* Undefined */ - unsigned long last_payload_address:41; /* RW */ - } s4; + /* UV2 unique struct */ + struct uv2h_scratch5_s { + unsigned long scratch5:64; /* RW */ + } s2; }; /* ========================================================================= */ -/* UV4H_LB_PROC_INTD_SOFT_ACK_CLEAR */ +/* UVH_SCRATCH5_ALIAS */ /* ========================================================================= */ -#define UV4H_LB_PROC_INTD_SOFT_ACK_CLEAR 0xa4118UL +#define UVH_SCRATCH5_ALIAS ( \ + is_uv(UV5) ? 0xb0208UL : \ + is_uv(UV4) ? 0xb0208UL : \ + is_uv(UV3) ? 0x2d0208UL : \ + is_uv(UV2) ? 0x2d0208UL : \ + 0) +#define UV5H_SCRATCH5_ALIAS 0xb0208UL +#define UV4H_SCRATCH5_ALIAS 0xb0208UL +#define UV3H_SCRATCH5_ALIAS 0x2d0208UL +#define UV2H_SCRATCH5_ALIAS 0x2d0208UL -#define UV4H_LB_PROC_INTD_SOFT_ACK_CLEAR_SOFT_ACK_PENDING_FLAGS_SHFT 0 -#define UV4H_LB_PROC_INTD_SOFT_ACK_CLEAR_SOFT_ACK_PENDING_FLAGS_MASK 0x00000000000000ffUL - -union uv4h_lb_proc_intd_soft_ack_clear_u { - unsigned long v; - struct uv4h_lb_proc_intd_soft_ack_clear_s { - unsigned long soft_ack_pending_flags:8; /* WP */ - } s4; -}; /* ========================================================================= */ -/* UV4H_LB_PROC_INTD_SOFT_ACK_PENDING */ +/* UVH_SCRATCH5_ALIAS_2 */ /* ========================================================================= */ -#define UV4H_LB_PROC_INTD_SOFT_ACK_PENDING 0xa4110UL - -#define UV4H_LB_PROC_INTD_SOFT_ACK_PENDING_SOFT_ACK_FLAGS_SHFT 0 -#define UV4H_LB_PROC_INTD_SOFT_ACK_PENDING_SOFT_ACK_FLAGS_MASK 0x00000000000000ffUL +#define UVH_SCRATCH5_ALIAS_2 ( \ + is_uv(UV5) ? 0xb0210UL : \ + is_uv(UV4) ? 0xb0210UL : \ + is_uv(UV3) ? 0x2d0210UL : \ + is_uv(UV2) ? 0x2d0210UL : \ + 0) +#define UV5H_SCRATCH5_ALIAS_2 0xb0210UL +#define UV4H_SCRATCH5_ALIAS_2 0xb0210UL +#define UV3H_SCRATCH5_ALIAS_2 0x2d0210UL +#define UV2H_SCRATCH5_ALIAS_2 0x2d0210UL -union uv4h_lb_proc_intd_soft_ack_pending_u { - unsigned long v; - struct uv4h_lb_proc_intd_soft_ack_pending_s { - unsigned long soft_ack_flags:8; /* RW */ - } s4; -}; #endif /* _ASM_X86_UV_UV_MMRS_H */ diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h index 6807153c0410..dde5b3f1e7cd 100644 --- a/arch/x86/include/asm/x86_init.h +++ b/arch/x86/include/asm/x86_init.h @@ -4,29 +4,22 @@ #include <asm/bootparam.h> +struct ghcb; struct mpc_bus; struct mpc_cpu; +struct pt_regs; struct mpc_table; struct cpuinfo_x86; +struct irq_domain; /** * struct x86_init_mpparse - platform specific mpparse ops - * @mpc_record: platform specific mpc record accounting * @setup_ioapic_ids: platform specific ioapic id override - * @mpc_apic_id: platform specific mpc apic id assignment - * @smp_read_mpc_oem: platform specific oem mpc table setup - * @mpc_oem_pci_bus: platform specific pci bus setup (default NULL) - * @mpc_oem_bus_info: platform specific mpc bus info * @find_smp_config: find the smp configuration * @get_smp_config: get the smp configuration */ struct x86_init_mpparse { - void (*mpc_record)(unsigned int mode); void (*setup_ioapic_ids)(void); - int (*mpc_apic_id)(struct mpc_cpu *m); - void (*smp_read_mpc_oem)(struct mpc_table *mpc); - void (*mpc_oem_pci_bus)(struct mpc_bus *m); - void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name); void (*find_smp_config)(void); void (*get_smp_config)(unsigned int early); }; @@ -52,12 +45,14 @@ struct x86_init_resources { * @intr_init: interrupt init code * @intr_mode_select: interrupt delivery mode selection * @intr_mode_init: interrupt delivery mode setup + * @create_pci_msi_domain: Create the PCI/MSI interrupt domain */ struct x86_init_irqs { void (*pre_vector_init)(void); void (*intr_init)(void); void (*intr_mode_select)(void); void (*intr_mode_init)(void); + struct irq_domain *(*create_pci_msi_domain)(void); }; /** @@ -236,10 +231,22 @@ struct x86_legacy_features { /** * struct x86_hyper_runtime - x86 hypervisor specific runtime callbacks * - * @pin_vcpu: pin current vcpu to specified physical cpu (run rarely) + * @pin_vcpu: pin current vcpu to specified physical + * cpu (run rarely) + * @sev_es_hcall_prepare: Load additional hypervisor-specific + * state into the GHCB when doing a VMMCALL under + * SEV-ES. Called from the #VC exception handler. + * @sev_es_hcall_finish: Copies state from the GHCB back into the + * processor (or pt_regs). Also runs checks on the + * state returned from the hypervisor after a + * VMMCALL under SEV-ES. Needs to return 'false' + * if the checks fail. Called from the #VC + * exception handler. */ struct x86_hyper_runtime { void (*pin_vcpu)(int cpu); + void (*sev_es_hcall_prepare)(struct ghcb *ghcb, struct pt_regs *regs); + bool (*sev_es_hcall_finish)(struct ghcb *ghcb, struct pt_regs *regs); }; /** @@ -283,9 +290,6 @@ struct x86_platform_ops { struct pci_dev; struct x86_msi_ops { - int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type); - void (*teardown_msi_irq)(unsigned int irq); - void (*teardown_msi_irqs)(struct pci_dev *dev); void (*restore_msi_irqs)(struct pci_dev *dev); }; diff --git a/arch/x86/include/uapi/asm/svm.h b/arch/x86/include/uapi/asm/svm.h index 2e8a30f06c74..a7a3403645e5 100644 --- a/arch/x86/include/uapi/asm/svm.h +++ b/arch/x86/include/uapi/asm/svm.h @@ -29,6 +29,7 @@ #define SVM_EXIT_WRITE_DR6 0x036 #define SVM_EXIT_WRITE_DR7 0x037 #define SVM_EXIT_EXCP_BASE 0x040 +#define SVM_EXIT_LAST_EXCP 0x05f #define SVM_EXIT_INTR 0x060 #define SVM_EXIT_NMI 0x061 #define SVM_EXIT_SMI 0x062 @@ -80,6 +81,16 @@ #define SVM_EXIT_AVIC_INCOMPLETE_IPI 0x401 #define SVM_EXIT_AVIC_UNACCELERATED_ACCESS 0x402 +/* SEV-ES software-defined VMGEXIT events */ +#define SVM_VMGEXIT_MMIO_READ 0x80000001 +#define SVM_VMGEXIT_MMIO_WRITE 0x80000002 +#define SVM_VMGEXIT_NMI_COMPLETE 0x80000003 +#define SVM_VMGEXIT_AP_HLT_LOOP 0x80000004 +#define SVM_VMGEXIT_AP_JUMP_TABLE 0x80000005 +#define SVM_VMGEXIT_SET_AP_JUMP_TABLE 0 +#define SVM_VMGEXIT_GET_AP_JUMP_TABLE 1 +#define SVM_VMGEXIT_UNSUPPORTED_EVENT 0x8000ffff + #define SVM_EXIT_ERR -1 #define SVM_EXIT_REASONS \ diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index e77261db2391..04ceea8f4a89 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -20,6 +20,7 @@ CFLAGS_REMOVE_kvmclock.o = -pg CFLAGS_REMOVE_ftrace.o = -pg CFLAGS_REMOVE_early_printk.o = -pg CFLAGS_REMOVE_head64.o = -pg +CFLAGS_REMOVE_sev-es.o = -pg endif KASAN_SANITIZE_head$(BITS).o := n @@ -27,6 +28,7 @@ KASAN_SANITIZE_dumpstack.o := n KASAN_SANITIZE_dumpstack_$(BITS).o := n KASAN_SANITIZE_stacktrace.o := n KASAN_SANITIZE_paravirt.o := n +KASAN_SANITIZE_sev-es.o := n # With some compiler versions the generated code results in boot hangs, caused # by several compilation units. To be safe, disable all instrumentation. @@ -68,6 +70,7 @@ obj-y += tsc.o tsc_msr.o io_delay.o rtc.o obj-y += pci-iommu_table.o obj-y += resource.o obj-y += irqflags.o +obj-y += static_call.o obj-y += process.o obj-y += fpu/ @@ -145,6 +148,7 @@ obj-$(CONFIG_UNWINDER_ORC) += unwind_orc.o obj-$(CONFIG_UNWINDER_FRAME_POINTER) += unwind_frame.o obj-$(CONFIG_UNWINDER_GUESS) += unwind_guess.o +obj-$(CONFIG_AMD_MEM_ENCRYPT) += sev-es.o ### # 64 bit specific files ifeq ($(CONFIG_X86_64),y) diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index cdaab30880b9..4adbe65afe23 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -1103,6 +1103,10 @@ noinstr int poke_int3_handler(struct pt_regs *regs) */ goto out_put; + case RET_INSN_OPCODE: + int3_emulate_ret(regs); + break; + case CALL_INSN_OPCODE: int3_emulate_call(regs, (long)ip + tp->rel32); break; @@ -1277,6 +1281,7 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr, switch (tp->opcode) { case INT3_INSN_OPCODE: + case RET_INSN_OPCODE: break; case CALL_INSN_OPCODE: diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c index e89031e9c847..9ac696487b13 100644 --- a/arch/x86/kernel/amd_gart_64.c +++ b/arch/x86/kernel/amd_gart_64.c @@ -32,6 +32,7 @@ #include <linux/gfp.h> #include <linux/atomic.h> #include <linux/dma-direct.h> +#include <linux/dma-map-ops.h> #include <asm/mtrr.h> #include <asm/proto.h> #include <asm/iommu.h> @@ -96,8 +97,7 @@ static unsigned long alloc_iommu(struct device *dev, int size, base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev), PAGE_SIZE) >> PAGE_SHIFT; - boundary_size = ALIGN((u64)dma_get_seg_boundary(dev) + 1, - PAGE_SIZE) >> PAGE_SHIFT; + boundary_size = dma_get_seg_boundary_nr_pages(dev, PAGE_SHIFT); spin_lock_irqsave(&iommu_bitmap_lock, flags); offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit, @@ -468,7 +468,7 @@ gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, { void *vaddr; - vaddr = dma_direct_alloc_pages(dev, size, dma_addr, flag, attrs); + vaddr = dma_direct_alloc(dev, size, dma_addr, flag, attrs); if (!vaddr || !force_iommu || dev->coherent_dma_mask <= DMA_BIT_MASK(24)) return vaddr; @@ -480,7 +480,7 @@ gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, goto out_free; return vaddr; out_free: - dma_direct_free_pages(dev, size, vaddr, *dma_addr, attrs); + dma_direct_free(dev, size, vaddr, *dma_addr, attrs); return NULL; } @@ -490,7 +490,7 @@ gart_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_addr, unsigned long attrs) { gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, 0); - dma_direct_free_pages(dev, size, vaddr, dma_addr, attrs); + dma_direct_free(dev, size, vaddr, dma_addr, attrs); } static int no_agp; @@ -678,6 +678,8 @@ static const struct dma_map_ops gart_dma_ops = { .get_sgtable = dma_common_get_sgtable, .dma_supported = dma_direct_supported, .get_required_mask = dma_direct_get_required_mask, + .alloc_pages = dma_direct_alloc_pages, + .free_pages = dma_direct_free_pages, }; static void gart_iommu_shutdown(void) diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 5f943b938167..b3eef1d5c903 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -1429,6 +1429,9 @@ void __init apic_intr_mode_init(void) break; } + if (x86_platform.apic_post_init) + x86_platform.apic_post_init(); + apic_bsp_setup(upmode); } diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 21f9c7f11779..7b3c7e0d4a09 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -860,10 +860,10 @@ void ioapic_set_alloc_attr(struct irq_alloc_info *info, int node, { init_irq_alloc_info(info, NULL); info->type = X86_IRQ_ALLOC_TYPE_IOAPIC; - info->ioapic_node = node; - info->ioapic_trigger = trigger; - info->ioapic_polarity = polarity; - info->ioapic_valid = 1; + info->ioapic.node = node; + info->ioapic.trigger = trigger; + info->ioapic.polarity = polarity; + info->ioapic.valid = 1; } #ifndef CONFIG_ACPI @@ -878,32 +878,32 @@ static void ioapic_copy_alloc_attr(struct irq_alloc_info *dst, copy_irq_alloc_info(dst, src); dst->type = X86_IRQ_ALLOC_TYPE_IOAPIC; - dst->ioapic_id = mpc_ioapic_id(ioapic_idx); - dst->ioapic_pin = pin; - dst->ioapic_valid = 1; - if (src && src->ioapic_valid) { - dst->ioapic_node = src->ioapic_node; - dst->ioapic_trigger = src->ioapic_trigger; - dst->ioapic_polarity = src->ioapic_polarity; + dst->devid = mpc_ioapic_id(ioapic_idx); + dst->ioapic.pin = pin; + dst->ioapic.valid = 1; + if (src && src->ioapic.valid) { + dst->ioapic.node = src->ioapic.node; + dst->ioapic.trigger = src->ioapic.trigger; + dst->ioapic.polarity = src->ioapic.polarity; } else { - dst->ioapic_node = NUMA_NO_NODE; + dst->ioapic.node = NUMA_NO_NODE; if (acpi_get_override_irq(gsi, &trigger, &polarity) >= 0) { - dst->ioapic_trigger = trigger; - dst->ioapic_polarity = polarity; + dst->ioapic.trigger = trigger; + dst->ioapic.polarity = polarity; } else { /* * PCI interrupts are always active low level * triggered. */ - dst->ioapic_trigger = IOAPIC_LEVEL; - dst->ioapic_polarity = IOAPIC_POL_LOW; + dst->ioapic.trigger = IOAPIC_LEVEL; + dst->ioapic.polarity = IOAPIC_POL_LOW; } } } static int ioapic_alloc_attr_node(struct irq_alloc_info *info) { - return (info && info->ioapic_valid) ? info->ioapic_node : NUMA_NO_NODE; + return (info && info->ioapic.valid) ? info->ioapic.node : NUMA_NO_NODE; } static void mp_register_handler(unsigned int irq, unsigned long trigger) @@ -933,14 +933,14 @@ static bool mp_check_pin_attr(int irq, struct irq_alloc_info *info) * pin with real trigger and polarity attributes. */ if (irq < nr_legacy_irqs() && data->count == 1) { - if (info->ioapic_trigger != data->trigger) - mp_register_handler(irq, info->ioapic_trigger); - data->entry.trigger = data->trigger = info->ioapic_trigger; - data->entry.polarity = data->polarity = info->ioapic_polarity; + if (info->ioapic.trigger != data->trigger) + mp_register_handler(irq, info->ioapic.trigger); + data->entry.trigger = data->trigger = info->ioapic.trigger; + data->entry.polarity = data->polarity = info->ioapic.polarity; } - return data->trigger == info->ioapic_trigger && - data->polarity == info->ioapic_polarity; + return data->trigger == info->ioapic.trigger && + data->polarity == info->ioapic.polarity; } static int alloc_irq_from_domain(struct irq_domain *domain, int ioapic, u32 gsi, @@ -1002,7 +1002,7 @@ static int alloc_isa_irq_from_domain(struct irq_domain *domain, if (!mp_check_pin_attr(irq, info)) return -EBUSY; if (__add_pin_to_irq_node(irq_data->chip_data, node, ioapic, - info->ioapic_pin)) + info->ioapic.pin)) return -ENOMEM; } else { info->flags |= X86_IRQ_ALLOC_LEGACY; @@ -2092,8 +2092,8 @@ static int mp_alloc_timer_irq(int ioapic, int pin) struct irq_alloc_info info; ioapic_set_alloc_attr(&info, NUMA_NO_NODE, 0, 0); - info.ioapic_id = mpc_ioapic_id(ioapic); - info.ioapic_pin = pin; + info.devid = mpc_ioapic_id(ioapic); + info.ioapic.pin = pin; mutex_lock(&ioapic_mutex); irq = alloc_isa_irq_from_domain(domain, 0, ioapic, pin, &info); mutex_unlock(&ioapic_mutex); @@ -2297,9 +2297,9 @@ static int mp_irqdomain_create(int ioapic) return 0; init_irq_alloc_info(&info, NULL); - info.type = X86_IRQ_ALLOC_TYPE_IOAPIC; - info.ioapic_id = mpc_ioapic_id(ioapic); - parent = irq_remapping_get_ir_irq_domain(&info); + info.type = X86_IRQ_ALLOC_TYPE_IOAPIC_GET_PARENT; + info.devid = mpc_ioapic_id(ioapic); + parent = irq_remapping_get_irq_domain(&info); if (!parent) parent = x86_vector_domain; else @@ -2933,9 +2933,9 @@ int mp_ioapic_registered(u32 gsi_base) static void mp_irqdomain_get_attr(u32 gsi, struct mp_chip_data *data, struct irq_alloc_info *info) { - if (info && info->ioapic_valid) { - data->trigger = info->ioapic_trigger; - data->polarity = info->ioapic_polarity; + if (info && info->ioapic.valid) { + data->trigger = info->ioapic.trigger; + data->polarity = info->ioapic.polarity; } else if (acpi_get_override_irq(gsi, &data->trigger, &data->polarity) < 0) { /* PCI interrupts are always active low level triggered. */ @@ -2981,7 +2981,7 @@ int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq, return -EINVAL; ioapic = mp_irqdomain_ioapic_idx(domain); - pin = info->ioapic_pin; + pin = info->ioapic.pin; if (irq_find_mapping(domain, (irq_hw_number_t)pin) > 0) return -EEXIST; @@ -2989,7 +2989,7 @@ int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq, if (!data) return -ENOMEM; - info->ioapic_entry = &data->entry; + info->ioapic.entry = &data->entry; ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, info); if (ret < 0) { kfree(data); @@ -2997,7 +2997,7 @@ int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq, } INIT_LIST_HEAD(&data->irq_2_pin); - irq_data->hwirq = info->ioapic_pin; + irq_data->hwirq = info->ioapic.pin; irq_data->chip = (domain->parent == x86_vector_domain) ? &ioapic_chip : &ioapic_ir_chip; irq_data->chip_data = data; @@ -3007,8 +3007,8 @@ int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq, add_pin_to_irq_node(data, ioapic_alloc_attr_node(info), ioapic, pin); local_irq_save(flags); - if (info->ioapic_entry) - mp_setup_entry(cfg, data, info->ioapic_entry); + if (info->ioapic.entry) + mp_setup_entry(cfg, data, info->ioapic.entry); mp_register_handler(virq, data->trigger); if (virq < nr_legacy_irqs()) legacy_pic->mask(virq); diff --git a/arch/x86/kernel/apic/msi.c b/arch/x86/kernel/apic/msi.c index c2b2911feeef..6313f0a05db7 100644 --- a/arch/x86/kernel/apic/msi.c +++ b/arch/x86/kernel/apic/msi.c @@ -21,7 +21,7 @@ #include <asm/apic.h> #include <asm/irq_remapping.h> -static struct irq_domain *msi_default_domain; +struct irq_domain *x86_pci_msi_default_domain __ro_after_init; static void __irq_msi_compose_msg(struct irq_cfg *cfg, struct msi_msg *msg) { @@ -45,7 +45,7 @@ static void __irq_msi_compose_msg(struct irq_cfg *cfg, struct msi_msg *msg) MSI_DATA_VECTOR(cfg->vector); } -static void irq_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) +void x86_vector_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) { __irq_msi_compose_msg(irqd_cfg(data), msg); } @@ -177,40 +177,10 @@ static struct irq_chip pci_msi_controller = { .irq_mask = pci_msi_mask_irq, .irq_ack = irq_chip_ack_parent, .irq_retrigger = irq_chip_retrigger_hierarchy, - .irq_compose_msi_msg = irq_msi_compose_msg, .irq_set_affinity = msi_set_affinity, .flags = IRQCHIP_SKIP_SET_WAKE, }; -int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) -{ - struct irq_domain *domain; - struct irq_alloc_info info; - - init_irq_alloc_info(&info, NULL); - info.type = X86_IRQ_ALLOC_TYPE_MSI; - info.msi_dev = dev; - - domain = irq_remapping_get_irq_domain(&info); - if (domain == NULL) - domain = msi_default_domain; - if (domain == NULL) - return -ENOSYS; - - return msi_domain_alloc_irqs(domain, &dev->dev, nvec); -} - -void native_teardown_msi_irq(unsigned int irq) -{ - irq_domain_free_irqs(irq, 1); -} - -static irq_hw_number_t pci_msi_get_hwirq(struct msi_domain_info *info, - msi_alloc_info_t *arg) -{ - return arg->msi_hwirq; -} - int pci_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec, msi_alloc_info_t *arg) { @@ -218,11 +188,10 @@ int pci_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec, struct msi_desc *desc = first_pci_msi_entry(pdev); init_irq_alloc_info(arg, NULL); - arg->msi_dev = pdev; if (desc->msi_attrib.is_msix) { - arg->type = X86_IRQ_ALLOC_TYPE_MSIX; + arg->type = X86_IRQ_ALLOC_TYPE_PCI_MSIX; } else { - arg->type = X86_IRQ_ALLOC_TYPE_MSI; + arg->type = X86_IRQ_ALLOC_TYPE_PCI_MSI; arg->flags |= X86_IRQ_ALLOC_CONTIGUOUS_VECTORS; } @@ -230,16 +199,8 @@ int pci_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec, } EXPORT_SYMBOL_GPL(pci_msi_prepare); -void pci_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc) -{ - arg->msi_hwirq = pci_msi_domain_calc_hwirq(arg->msi_dev, desc); -} -EXPORT_SYMBOL_GPL(pci_msi_set_desc); - static struct msi_domain_ops pci_msi_domain_ops = { - .get_hwirq = pci_msi_get_hwirq, .msi_prepare = pci_msi_prepare, - .set_desc = pci_msi_set_desc, }; static struct msi_domain_info pci_msi_domain_info = { @@ -251,25 +212,32 @@ static struct msi_domain_info pci_msi_domain_info = { .handler_name = "edge", }; -void __init arch_init_msi_domain(struct irq_domain *parent) +struct irq_domain * __init native_create_pci_msi_domain(void) { struct fwnode_handle *fn; + struct irq_domain *d; if (disable_apic) - return; + return NULL; fn = irq_domain_alloc_named_fwnode("PCI-MSI"); - if (fn) { - msi_default_domain = - pci_msi_create_irq_domain(fn, &pci_msi_domain_info, - parent); - } - if (!msi_default_domain) { + if (!fn) + return NULL; + + d = pci_msi_create_irq_domain(fn, &pci_msi_domain_info, + x86_vector_domain); + if (!d) { irq_domain_free_fwnode(fn); - pr_warn("failed to initialize irqdomain for MSI/MSI-x.\n"); + pr_warn("Failed to initialize PCI-MSI irqdomain.\n"); } else { - msi_default_domain->flags |= IRQ_DOMAIN_MSI_NOMASK_QUIRK; + d->flags |= IRQ_DOMAIN_MSI_NOMASK_QUIRK; } + return d; +} + +void __init x86_create_pci_msi_domain(void) +{ + x86_pci_msi_default_domain = x86_init.irqs.create_pci_msi_domain(); } #ifdef CONFIG_IRQ_REMAP @@ -279,7 +247,6 @@ static struct irq_chip pci_msi_ir_controller = { .irq_mask = pci_msi_mask_irq, .irq_ack = irq_chip_ack_parent, .irq_retrigger = irq_chip_retrigger_hierarchy, - .irq_set_vcpu_affinity = irq_chip_set_vcpu_affinity_parent, .flags = IRQCHIP_SKIP_SET_WAKE, }; @@ -321,35 +288,28 @@ static struct irq_chip dmar_msi_controller = { .irq_ack = irq_chip_ack_parent, .irq_set_affinity = msi_domain_set_affinity, .irq_retrigger = irq_chip_retrigger_hierarchy, - .irq_compose_msi_msg = irq_msi_compose_msg, .irq_write_msi_msg = dmar_msi_write_msg, .flags = IRQCHIP_SKIP_SET_WAKE, }; -static irq_hw_number_t dmar_msi_get_hwirq(struct msi_domain_info *info, - msi_alloc_info_t *arg) -{ - return arg->dmar_id; -} - static int dmar_msi_init(struct irq_domain *domain, struct msi_domain_info *info, unsigned int virq, irq_hw_number_t hwirq, msi_alloc_info_t *arg) { - irq_domain_set_info(domain, virq, arg->dmar_id, info->chip, NULL, - handle_edge_irq, arg->dmar_data, "edge"); + irq_domain_set_info(domain, virq, arg->devid, info->chip, NULL, + handle_edge_irq, arg->data, "edge"); return 0; } static struct msi_domain_ops dmar_msi_domain_ops = { - .get_hwirq = dmar_msi_get_hwirq, .msi_init = dmar_msi_init, }; static struct msi_domain_info dmar_msi_domain_info = { .ops = &dmar_msi_domain_ops, .chip = &dmar_msi_controller, + .flags = MSI_FLAG_USE_DEF_DOM_OPS, }; static struct irq_domain *dmar_get_irq_domain(void) @@ -384,8 +344,9 @@ int dmar_alloc_hwirq(int id, int node, void *arg) init_irq_alloc_info(&info, NULL); info.type = X86_IRQ_ALLOC_TYPE_DMAR; - info.dmar_id = id; - info.dmar_data = arg; + info.devid = id; + info.hwirq = id; + info.data = arg; return irq_domain_alloc_irqs(domain, 1, node, &info); } @@ -419,24 +380,17 @@ static struct irq_chip hpet_msi_controller __ro_after_init = { .irq_ack = irq_chip_ack_parent, .irq_set_affinity = msi_domain_set_affinity, .irq_retrigger = irq_chip_retrigger_hierarchy, - .irq_compose_msi_msg = irq_msi_compose_msg, .irq_write_msi_msg = hpet_msi_write_msg, .flags = IRQCHIP_SKIP_SET_WAKE, }; -static irq_hw_number_t hpet_msi_get_hwirq(struct msi_domain_info *info, - msi_alloc_info_t *arg) -{ - return arg->hpet_index; -} - static int hpet_msi_init(struct irq_domain *domain, struct msi_domain_info *info, unsigned int virq, irq_hw_number_t hwirq, msi_alloc_info_t *arg) { irq_set_status_flags(virq, IRQ_MOVE_PCNTXT); - irq_domain_set_info(domain, virq, arg->hpet_index, info->chip, NULL, - handle_edge_irq, arg->hpet_data, "edge"); + irq_domain_set_info(domain, virq, arg->hwirq, info->chip, NULL, + handle_edge_irq, arg->data, "edge"); return 0; } @@ -448,7 +402,6 @@ static void hpet_msi_free(struct irq_domain *domain, } static struct msi_domain_ops hpet_msi_domain_ops = { - .get_hwirq = hpet_msi_get_hwirq, .msi_init = hpet_msi_init, .msi_free = hpet_msi_free, }; @@ -456,6 +409,7 @@ static struct msi_domain_ops hpet_msi_domain_ops = { static struct msi_domain_info hpet_msi_domain_info = { .ops = &hpet_msi_domain_ops, .chip = &hpet_msi_controller, + .flags = MSI_FLAG_USE_DEF_DOM_OPS, }; struct irq_domain *hpet_create_irq_domain(int hpet_id) @@ -476,9 +430,9 @@ struct irq_domain *hpet_create_irq_domain(int hpet_id) domain_info->data = (void *)(long)hpet_id; init_irq_alloc_info(&info, NULL); - info.type = X86_IRQ_ALLOC_TYPE_HPET; - info.hpet_id = hpet_id; - parent = irq_remapping_get_ir_irq_domain(&info); + info.type = X86_IRQ_ALLOC_TYPE_HPET_GET_PARENT; + info.devid = hpet_id; + parent = irq_remapping_get_irq_domain(&info); if (parent == NULL) parent = x86_vector_domain; else @@ -506,9 +460,9 @@ int hpet_assign_irq(struct irq_domain *domain, struct hpet_channel *hc, init_irq_alloc_info(&info, NULL); info.type = X86_IRQ_ALLOC_TYPE_HPET; - info.hpet_data = hc; - info.hpet_id = hpet_dev_id(domain); - info.hpet_index = dev_num; + info.data = hc; + info.devid = hpet_dev_id(domain); + info.hwirq = dev_num; return irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, &info); } diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c index 99ee61c9ba54..67b6f7c049ec 100644 --- a/arch/x86/kernel/apic/probe_32.c +++ b/arch/x86/kernel/apic/probe_32.c @@ -170,9 +170,6 @@ void __init default_setup_apic_routing(void) if (apic->setup_apic_routing) apic->setup_apic_routing(); - - if (x86_platform.apic_post_init) - x86_platform.apic_post_init(); } void __init generic_apic_probe(void) diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c index bd3835d6b535..c46720f185c0 100644 --- a/arch/x86/kernel/apic/probe_64.c +++ b/arch/x86/kernel/apic/probe_64.c @@ -32,9 +32,6 @@ void __init default_setup_apic_routing(void) break; } } - - if (x86_platform.apic_post_init) - x86_platform.apic_post_init(); } int __init default_acpi_madt_oem_check(char *oem_id, char *oem_table_id) diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c index f8a56b5dc29f..1eac53632786 100644 --- a/arch/x86/kernel/apic/vector.c +++ b/arch/x86/kernel/apic/vector.c @@ -714,8 +714,6 @@ int __init arch_early_irq_init(void) BUG_ON(x86_vector_domain == NULL); irq_set_default_host(x86_vector_domain); - arch_init_msi_domain(x86_vector_domain); - BUG_ON(!alloc_cpumask_var(&vector_searchmask, GFP_KERNEL)); /* @@ -824,6 +822,7 @@ static struct irq_chip lapic_controller = { .name = "APIC", .irq_ack = apic_ack_edge, .irq_set_affinity = apic_set_affinity, + .irq_compose_msi_msg = x86_vector_msi_compose_msg, .irq_retrigger = apic_retrigger_irq, }; diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 0b6eea3f54e6..714233cee0b5 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -5,6 +5,7 @@ * * SGI UV APIC functions (note: not an Intel compatible APIC) * + * (C) Copyright 2020 Hewlett Packard Enterprise Development LP * Copyright (C) 2007-2014 Silicon Graphics, Inc. All rights reserved. */ #include <linux/crash_dump.h> @@ -29,19 +30,24 @@ static int uv_hubbed_system; static int uv_hubless_system; static u64 gru_start_paddr, gru_end_paddr; static union uvh_apicid uvh_apicid; +static int uv_node_id; -/* Unpack OEM/TABLE ID's to be NULL terminated strings */ +/* Unpack AT/OEM/TABLE ID's to be NULL terminated strings */ +static u8 uv_archtype[UV_AT_SIZE]; static u8 oem_id[ACPI_OEM_ID_SIZE + 1]; static u8 oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1]; -/* Information derived from CPUID: */ +/* Information derived from CPUID and some UV MMRs */ static struct { unsigned int apicid_shift; unsigned int apicid_mask; unsigned int socketid_shift; /* aka pnode_shift for UV2/3 */ unsigned int pnode_mask; + unsigned int nasid_shift; unsigned int gpa_shift; unsigned int gnode_shift; + unsigned int m_skt; + unsigned int n_skt; } uv_cpuid; static int uv_min_hub_revision_id; @@ -77,6 +83,9 @@ static unsigned long __init uv_early_read_mmr(unsigned long addr) static inline bool is_GRU_range(u64 start, u64 end) { + if (!gru_start_paddr) + return false; + return start >= gru_start_paddr && end <= gru_end_paddr; } @@ -85,43 +94,102 @@ static bool uv_is_untracked_pat_range(u64 start, u64 end) return is_ISA_range(start, end) || is_GRU_range(start, end); } -static int __init early_get_pnodeid(void) +static void __init early_get_pnodeid(void) { - union uvh_node_id_u node_id; - union uvh_rh_gam_config_mmr_u m_n_config; int pnode; - /* Currently, all blades have same revision number */ + uv_cpuid.m_skt = 0; + if (UVH_RH10_GAM_ADDR_MAP_CONFIG) { + union uvh_rh10_gam_addr_map_config_u m_n_config; + + m_n_config.v = uv_early_read_mmr(UVH_RH10_GAM_ADDR_MAP_CONFIG); + uv_cpuid.n_skt = m_n_config.s.n_skt; + uv_cpuid.nasid_shift = 0; + } else if (UVH_RH_GAM_ADDR_MAP_CONFIG) { + union uvh_rh_gam_addr_map_config_u m_n_config; + + m_n_config.v = uv_early_read_mmr(UVH_RH_GAM_ADDR_MAP_CONFIG); + uv_cpuid.n_skt = m_n_config.s.n_skt; + if (is_uv(UV3)) + uv_cpuid.m_skt = m_n_config.s3.m_skt; + if (is_uv(UV2)) + uv_cpuid.m_skt = m_n_config.s2.m_skt; + uv_cpuid.nasid_shift = 1; + } else { + unsigned long GAM_ADDR_MAP_CONFIG = 0; + + WARN(GAM_ADDR_MAP_CONFIG == 0, + "UV: WARN: GAM_ADDR_MAP_CONFIG is not available\n"); + uv_cpuid.n_skt = 0; + uv_cpuid.nasid_shift = 0; + } + + if (is_uv(UV4|UVY)) + uv_cpuid.gnode_shift = 2; /* min partition is 4 sockets */ + + uv_cpuid.pnode_mask = (1 << uv_cpuid.n_skt) - 1; + pnode = (uv_node_id >> uv_cpuid.nasid_shift) & uv_cpuid.pnode_mask; + uv_cpuid.gpa_shift = 46; /* Default unless changed */ + + pr_info("UV: n_skt:%d pnmsk:%x pn:%x\n", + uv_cpuid.n_skt, uv_cpuid.pnode_mask, pnode); +} + +/* Running on a UV Hubbed system, determine which UV Hub Type it is */ +static int __init early_set_hub_type(void) +{ + union uvh_node_id_u node_id; + + /* + * The NODE_ID MMR is always at offset 0. + * Contains the chip part # + revision. + * Node_id field started with 15 bits, + * ... now 7 but upper 8 are masked to 0. + * All blades/nodes have the same part # and hub revision. + */ node_id.v = uv_early_read_mmr(UVH_NODE_ID); - m_n_config.v = uv_early_read_mmr(UVH_RH_GAM_CONFIG_MMR); - uv_min_hub_revision_id = node_id.s.revision; + uv_node_id = node_id.sx.node_id; switch (node_id.s.part_number) { - case UV2_HUB_PART_NUMBER: - case UV2_HUB_PART_NUMBER_X: - uv_min_hub_revision_id += UV2_HUB_REVISION_BASE - 1; + + case UV5_HUB_PART_NUMBER: + uv_min_hub_revision_id = node_id.s.revision + + UV5_HUB_REVISION_BASE; + uv_hub_type_set(UV5); break; + + /* UV4/4A only have a revision difference */ + case UV4_HUB_PART_NUMBER: + uv_min_hub_revision_id = node_id.s.revision + + UV4_HUB_REVISION_BASE; + uv_hub_type_set(UV4); + if (uv_min_hub_revision_id == UV4A_HUB_REVISION_BASE) + uv_hub_type_set(UV4|UV4A); + break; + case UV3_HUB_PART_NUMBER: case UV3_HUB_PART_NUMBER_X: - uv_min_hub_revision_id += UV3_HUB_REVISION_BASE; + uv_min_hub_revision_id = node_id.s.revision + + UV3_HUB_REVISION_BASE; + uv_hub_type_set(UV3); break; - /* Update: UV4A has only a modified revision to indicate HUB fixes */ - case UV4_HUB_PART_NUMBER: - uv_min_hub_revision_id += UV4_HUB_REVISION_BASE - 1; - uv_cpuid.gnode_shift = 2; /* min partition is 4 sockets */ + case UV2_HUB_PART_NUMBER: + case UV2_HUB_PART_NUMBER_X: + uv_min_hub_revision_id = node_id.s.revision + + UV2_HUB_REVISION_BASE - 1; + uv_hub_type_set(UV2); break; + + default: + return 0; } - uv_hub_info->hub_revision = uv_min_hub_revision_id; - uv_cpuid.pnode_mask = (1 << m_n_config.s.n_skt) - 1; - pnode = (node_id.s.node_id >> 1) & uv_cpuid.pnode_mask; - uv_cpuid.gpa_shift = 46; /* Default unless changed */ + pr_info("UV: part#:%x rev:%d rev_id:%d UVtype:0x%x\n", + node_id.s.part_number, node_id.s.revision, + uv_min_hub_revision_id, is_uv(~0)); - pr_info("UV: rev:%d part#:%x nodeid:%04x n_skt:%d pnmsk:%x pn:%x\n", - node_id.s.revision, node_id.s.part_number, node_id.s.node_id, - m_n_config.s.n_skt, uv_cpuid.pnode_mask, pnode); - return pnode; + return 1; } static void __init uv_tsc_check_sync(void) @@ -130,38 +198,41 @@ static void __init uv_tsc_check_sync(void) int sync_state; int mmr_shift; char *state; - bool valid; - /* Accommodate different UV arch BIOSes */ + /* Different returns from different UV BIOS versions */ mmr = uv_early_read_mmr(UVH_TSC_SYNC_MMR); mmr_shift = is_uv2_hub() ? UVH_TSC_SYNC_SHIFT_UV2K : UVH_TSC_SYNC_SHIFT; sync_state = (mmr >> mmr_shift) & UVH_TSC_SYNC_MASK; + /* Check if TSC is valid for all sockets */ switch (sync_state) { case UVH_TSC_SYNC_VALID: state = "in sync"; - valid = true; + mark_tsc_async_resets("UV BIOS"); break; - case UVH_TSC_SYNC_INVALID: - state = "unstable"; - valid = false; + /* If BIOS state unknown, don't do anything */ + case UVH_TSC_SYNC_UNKNOWN: + state = "unknown"; break; + + /* Otherwise, BIOS indicates problem with TSC */ default: - state = "unknown: assuming valid"; - valid = true; + state = "unstable"; + mark_tsc_unstable("UV BIOS"); break; } pr_info("UV: TSC sync state from BIOS:0%d(%s)\n", sync_state, state); - - /* Mark flag that says TSC != 0 is valid for socket 0 */ - if (valid) - mark_tsc_async_resets("UV BIOS"); - else - mark_tsc_unstable("UV BIOS"); } +/* Selector for (4|4A|5) structs */ +#define uvxy_field(sname, field, undef) ( \ + is_uv(UV4A) ? sname.s4a.field : \ + is_uv(UV4) ? sname.s4.field : \ + is_uv(UV3) ? sname.s3.field : \ + undef) + /* [Copied from arch/x86/kernel/cpu/topology.c:detect_extended_topology()] */ #define SMT_LEVEL 0 /* Leaf 0xb SMT level */ @@ -221,29 +292,110 @@ static void __init uv_stringify(int len, char *to, char *from) strncpy(to, from, len-1); } -static int __init uv_acpi_madt_oem_check(char *_oem_id, char *_oem_table_id) +/* Find UV arch type entry in UVsystab */ +static unsigned long __init early_find_archtype(struct uv_systab *st) +{ + int i; + + for (i = 0; st->entry[i].type != UV_SYSTAB_TYPE_UNUSED; i++) { + unsigned long ptr = st->entry[i].offset; + + if (!ptr) + continue; + ptr += (unsigned long)st; + if (st->entry[i].type == UV_SYSTAB_TYPE_ARCH_TYPE) + return ptr; + } + return 0; +} + +/* Validate UV arch type field in UVsystab */ +static int __init decode_arch_type(unsigned long ptr) +{ + struct uv_arch_type_entry *uv_ate = (struct uv_arch_type_entry *)ptr; + int n = strlen(uv_ate->archtype); + + if (n > 0 && n < sizeof(uv_ate->archtype)) { + pr_info("UV: UVarchtype received from BIOS\n"); + uv_stringify(UV_AT_SIZE, uv_archtype, uv_ate->archtype); + return 1; + } + return 0; +} + +/* Determine if UV arch type entry might exist in UVsystab */ +static int __init early_get_arch_type(void) { - int pnodeid; - int uv_apic; + unsigned long uvst_physaddr, uvst_size, ptr; + struct uv_systab *st; + u32 rev; + int ret; + + uvst_physaddr = get_uv_systab_phys(0); + if (!uvst_physaddr) + return 0; + + st = early_memremap_ro(uvst_physaddr, sizeof(struct uv_systab)); + if (!st) { + pr_err("UV: Cannot access UVsystab, remap failed\n"); + return 0; + } + rev = st->revision; + if (rev < UV_SYSTAB_VERSION_UV5) { + early_memunmap(st, sizeof(struct uv_systab)); + return 0; + } + + uvst_size = st->size; + early_memunmap(st, sizeof(struct uv_systab)); + st = early_memremap_ro(uvst_physaddr, uvst_size); + if (!st) { + pr_err("UV: Cannot access UVarchtype, remap failed\n"); + return 0; + } + + ptr = early_find_archtype(st); + if (!ptr) { + early_memunmap(st, uvst_size); + return 0; + } + + ret = decode_arch_type(ptr); + early_memunmap(st, uvst_size); + return ret; +} + +static int __init uv_set_system_type(char *_oem_id) +{ + /* Save OEM_ID passed from ACPI MADT */ uv_stringify(sizeof(oem_id), oem_id, _oem_id); - uv_stringify(sizeof(oem_table_id), oem_table_id, _oem_table_id); - if (strncmp(oem_id, "SGI", 3) != 0) { - if (strncmp(oem_id, "NSGI", 4) != 0) + /* Check if BIOS sent us a UVarchtype */ + if (!early_get_arch_type()) + + /* If not use OEM ID for UVarchtype */ + uv_stringify(UV_AT_SIZE, uv_archtype, _oem_id); + + /* Check if not hubbed */ + if (strncmp(uv_archtype, "SGI", 3) != 0) { + + /* (Not hubbed), check if not hubless */ + if (strncmp(uv_archtype, "NSGI", 4) != 0) + + /* (Not hubless), not a UV */ return 0; - /* UV4 Hubless, CH, (0x11:UV4+Any) */ - if (strncmp(oem_id, "NSGI4", 5) == 0) + /* UV4 Hubless: CH */ + if (strncmp(uv_archtype, "NSGI4", 5) == 0) uv_hubless_system = 0x11; - /* UV3 Hubless, UV300/MC990X w/o hub (0x9:UV3+Any) */ + /* UV3 Hubless: UV300/MC990X w/o hub */ else uv_hubless_system = 0x9; - pr_info("UV: OEM IDs %s/%s, HUBLESS(0x%x)\n", - oem_id, oem_table_id, uv_hubless_system); - + pr_info("UV: OEM IDs %s/%s, SystemType %d, HUBLESS ID %x\n", + oem_id, oem_table_id, uv_system_type, uv_hubless_system); return 0; } @@ -252,60 +404,83 @@ static int __init uv_acpi_madt_oem_check(char *_oem_id, char *_oem_table_id) return 0; } - /* Set up early hub type field in uv_hub_info for Node 0 */ - uv_cpu_info->p_uv_hub_info = &uv_hub_info_node0; + /* Set hubbed type if true */ + uv_hub_info->hub_revision = + !strncmp(uv_archtype, "SGI5", 4) ? UV5_HUB_REVISION_BASE : + !strncmp(uv_archtype, "SGI4", 4) ? UV4_HUB_REVISION_BASE : + !strncmp(uv_archtype, "SGI3", 4) ? UV3_HUB_REVISION_BASE : + !strcmp(uv_archtype, "SGI2") ? UV2_HUB_REVISION_BASE : 0; + + switch (uv_hub_info->hub_revision) { + case UV5_HUB_REVISION_BASE: + uv_hubbed_system = 0x21; + uv_hub_type_set(UV5); + break; - /* - * Determine UV arch type. - * SGI2: UV2000/3000 - * SGI3: UV300 (truncated to 4 chars because of different varieties) - * SGI4: UV400 (truncated to 4 chars because of different varieties) - */ - if (!strncmp(oem_id, "SGI4", 4)) { - uv_hub_info->hub_revision = UV4_HUB_REVISION_BASE; + case UV4_HUB_REVISION_BASE: uv_hubbed_system = 0x11; + uv_hub_type_set(UV4); + break; - } else if (!strncmp(oem_id, "SGI3", 4)) { - uv_hub_info->hub_revision = UV3_HUB_REVISION_BASE; + case UV3_HUB_REVISION_BASE: uv_hubbed_system = 0x9; + uv_hub_type_set(UV3); + break; - } else if (!strcmp(oem_id, "SGI2")) { - uv_hub_info->hub_revision = UV2_HUB_REVISION_BASE; + case UV2_HUB_REVISION_BASE: uv_hubbed_system = 0x5; + uv_hub_type_set(UV2); + break; - } else { - uv_hub_info->hub_revision = 0; - goto badbios; + default: + return 0; } - pnodeid = early_get_pnodeid(); - early_get_apic_socketid_shift(); + /* Get UV hub chip part number & revision */ + early_set_hub_type(); + /* Other UV setup functions */ + early_get_pnodeid(); + early_get_apic_socketid_shift(); x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range; x86_platform.nmi_init = uv_nmi_init; + uv_tsc_check_sync(); + + return 1; +} + +/* Called early to probe for the correct APIC driver */ +static int __init uv_acpi_madt_oem_check(char *_oem_id, char *_oem_table_id) +{ + /* Set up early hub info fields for Node 0 */ + uv_cpu_info->p_uv_hub_info = &uv_hub_info_node0; + + /* If not UV, return. */ + if (likely(uv_set_system_type(_oem_id) == 0)) + return 0; + + /* Save and Decode OEM Table ID */ + uv_stringify(sizeof(oem_table_id), oem_table_id, _oem_table_id); - if (!strcmp(oem_table_id, "UVX")) { - /* This is the most common hardware variant: */ + /* This is the most common hardware variant, x2apic mode */ + if (!strcmp(oem_table_id, "UVX")) uv_system_type = UV_X2APIC; - uv_apic = 0; - } else if (!strcmp(oem_table_id, "UVL")) { - /* Only used for very small systems: */ + /* Only used for very small systems, usually 1 chassis, legacy mode */ + else if (!strcmp(oem_table_id, "UVL")) uv_system_type = UV_LEGACY_APIC; - uv_apic = 0; - } else { + else goto badbios; - } - pr_info("UV: OEM IDs %s/%s, System/HUB Types %d/%d, uv_apic %d\n", oem_id, oem_table_id, uv_system_type, uv_min_hub_revision_id, uv_apic); - uv_tsc_check_sync(); + pr_info("UV: OEM IDs %s/%s, System/UVType %d/0x%x, HUB RevID %d\n", + oem_id, oem_table_id, uv_system_type, is_uv(UV_ANY), + uv_min_hub_revision_id); - return uv_apic; + return 0; badbios: - pr_err("UV: OEM_ID:%s OEM_TABLE_ID:%s\n", oem_id, oem_table_id); - pr_err("Current UV Type or BIOS not supported\n"); + pr_err("UV: UVarchtype:%s not supported\n", uv_archtype); BUG(); } @@ -673,12 +848,12 @@ static struct apic apic_x2apic_uv_x __ro_after_init = { }; #define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_LENGTH 3 -#define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT +#define DEST_SHIFT UVXH_RH_GAM_ALIAS_0_REDIRECT_CONFIG_DEST_BASE_SHFT static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size) { - union uvh_rh_gam_alias210_overlay_config_2_mmr_u alias; - union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect; + union uvh_rh_gam_alias_2_overlay_config_u alias; + union uvh_rh_gam_alias_2_redirect_config_u redirect; unsigned long m_redirect; unsigned long m_overlay; int i; @@ -686,16 +861,16 @@ static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size) for (i = 0; i < UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_LENGTH; i++) { switch (i) { case 0: - m_redirect = UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR; - m_overlay = UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR; + m_redirect = UVH_RH_GAM_ALIAS_0_REDIRECT_CONFIG; + m_overlay = UVH_RH_GAM_ALIAS_0_OVERLAY_CONFIG; break; case 1: - m_redirect = UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR; - m_overlay = UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR; + m_redirect = UVH_RH_GAM_ALIAS_1_REDIRECT_CONFIG; + m_overlay = UVH_RH_GAM_ALIAS_1_OVERLAY_CONFIG; break; case 2: - m_redirect = UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR; - m_overlay = UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR; + m_redirect = UVH_RH_GAM_ALIAS_2_REDIRECT_CONFIG; + m_overlay = UVH_RH_GAM_ALIAS_2_OVERLAY_CONFIG; break; } alias.v = uv_read_local_mmr(m_overlay); @@ -710,6 +885,7 @@ static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size) } enum map_type {map_wb, map_uc}; +static const char * const mt[] = { "WB", "UC" }; static __init void map_high(char *id, unsigned long base, int pshift, int bshift, int max_pnode, enum map_type map_type) { @@ -721,23 +897,36 @@ static __init void map_high(char *id, unsigned long base, int pshift, int bshift pr_info("UV: Map %s_HI base address NULL\n", id); return; } - pr_debug("UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr, paddr + bytes); if (map_type == map_uc) init_extra_mapping_uc(paddr, bytes); else init_extra_mapping_wb(paddr, bytes); + + pr_info("UV: Map %s_HI 0x%lx - 0x%lx %s (%d segments)\n", + id, paddr, paddr + bytes, mt[map_type], max_pnode + 1); } static __init void map_gru_high(int max_pnode) { - union uvh_rh_gam_gru_overlay_config_mmr_u gru; - int shift = UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT; - unsigned long mask = UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK; - unsigned long base; + union uvh_rh_gam_gru_overlay_config_u gru; + unsigned long mask, base; + int shift; + + if (UVH_RH_GAM_GRU_OVERLAY_CONFIG) { + gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG); + shift = UVH_RH_GAM_GRU_OVERLAY_CONFIG_BASE_SHFT; + mask = UVH_RH_GAM_GRU_OVERLAY_CONFIG_BASE_MASK; + } else if (UVH_RH10_GAM_GRU_OVERLAY_CONFIG) { + gru.v = uv_read_local_mmr(UVH_RH10_GAM_GRU_OVERLAY_CONFIG); + shift = UVH_RH10_GAM_GRU_OVERLAY_CONFIG_BASE_SHFT; + mask = UVH_RH10_GAM_GRU_OVERLAY_CONFIG_BASE_MASK; + } else { + pr_err("UV: GRU unavailable (no MMR)\n"); + return; + } - gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR); if (!gru.s.enable) { - pr_info("UV: GRU disabled\n"); + pr_info("UV: GRU disabled (by BIOS)\n"); return; } @@ -749,62 +938,104 @@ static __init void map_gru_high(int max_pnode) static __init void map_mmr_high(int max_pnode) { - union uvh_rh_gam_mmr_overlay_config_mmr_u mmr; - int shift = UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT; + unsigned long base; + int shift; + bool enable; + + if (UVH_RH10_GAM_MMR_OVERLAY_CONFIG) { + union uvh_rh10_gam_mmr_overlay_config_u mmr; + + mmr.v = uv_read_local_mmr(UVH_RH10_GAM_MMR_OVERLAY_CONFIG); + enable = mmr.s.enable; + base = mmr.s.base; + shift = UVH_RH10_GAM_MMR_OVERLAY_CONFIG_BASE_SHFT; + } else if (UVH_RH_GAM_MMR_OVERLAY_CONFIG) { + union uvh_rh_gam_mmr_overlay_config_u mmr; + + mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG); + enable = mmr.s.enable; + base = mmr.s.base; + shift = UVH_RH_GAM_MMR_OVERLAY_CONFIG_BASE_SHFT; + } else { + pr_err("UV:%s:RH_GAM_MMR_OVERLAY_CONFIG MMR undefined?\n", + __func__); + return; + } - mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR); - if (mmr.s.enable) - map_high("MMR", mmr.s.base, shift, shift, max_pnode, map_uc); + if (enable) + map_high("MMR", base, shift, shift, max_pnode, map_uc); else pr_info("UV: MMR disabled\n"); } -/* UV3/4 have identical MMIOH overlay configs, UV4A is slightly different */ -static __init void map_mmioh_high_uv34(int index, int min_pnode, int max_pnode) -{ - unsigned long overlay; - unsigned long mmr; - unsigned long base; - unsigned long nasid_mask; - unsigned long m_overlay; - int i, n, shift, m_io, max_io; - int nasid, lnasid, fi, li; - char *id; - - if (index == 0) { - id = "MMIOH0"; - m_overlay = UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR; - overlay = uv_read_local_mmr(m_overlay); - base = overlay & UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_MASK; - mmr = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR; - m_io = (overlay & UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_MASK) - >> UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT; - shift = UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT; - n = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH; - nasid_mask = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_MASK; - } else { - id = "MMIOH1"; - m_overlay = UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR; - overlay = uv_read_local_mmr(m_overlay); - base = overlay & UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_MASK; - mmr = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR; - m_io = (overlay & UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_MASK) - >> UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT; - shift = UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT; - n = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH; - nasid_mask = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_MASK; +/* Arch specific ENUM cases */ +enum mmioh_arch { + UV2_MMIOH = -1, + UVY_MMIOH0, UVY_MMIOH1, + UVX_MMIOH0, UVX_MMIOH1, +}; + +/* Calculate and Map MMIOH Regions */ +static void __init calc_mmioh_map(enum mmioh_arch index, + int min_pnode, int max_pnode, + int shift, unsigned long base, int m_io, int n_io) +{ + unsigned long mmr, nasid_mask; + int nasid, min_nasid, max_nasid, lnasid, mapped; + int i, fi, li, n, max_io; + char id[8]; + + /* One (UV2) mapping */ + if (index == UV2_MMIOH) { + strncpy(id, "MMIOH", sizeof(id)); + max_io = max_pnode; + mapped = 0; + goto map_exit; } - pr_info("UV: %s overlay 0x%lx base:0x%lx m_io:%d\n", id, overlay, base, m_io); - if (!(overlay & UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_MASK)) { - pr_info("UV: %s disabled\n", id); + + /* small and large MMIOH mappings */ + switch (index) { + case UVY_MMIOH0: + mmr = UVH_RH10_GAM_MMIOH_REDIRECT_CONFIG0; + nasid_mask = UVH_RH10_GAM_MMIOH_OVERLAY_CONFIG0_BASE_MASK; + n = UVH_RH10_GAM_MMIOH_REDIRECT_CONFIG0_DEPTH; + min_nasid = min_pnode; + max_nasid = max_pnode; + mapped = 1; + break; + case UVY_MMIOH1: + mmr = UVH_RH10_GAM_MMIOH_REDIRECT_CONFIG1; + nasid_mask = UVH_RH10_GAM_MMIOH_OVERLAY_CONFIG1_BASE_MASK; + n = UVH_RH10_GAM_MMIOH_REDIRECT_CONFIG1_DEPTH; + min_nasid = min_pnode; + max_nasid = max_pnode; + mapped = 1; + break; + case UVX_MMIOH0: + mmr = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0; + nasid_mask = UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_BASE_MASK; + n = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0_DEPTH; + min_nasid = min_pnode * 2; + max_nasid = max_pnode * 2; + mapped = 1; + break; + case UVX_MMIOH1: + mmr = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1; + nasid_mask = UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_BASE_MASK; + n = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1_DEPTH; + min_nasid = min_pnode * 2; + max_nasid = max_pnode * 2; + mapped = 1; + break; + default: + pr_err("UV:%s:Invalid mapping type:%d\n", __func__, index); return; } - /* Convert to NASID: */ - min_pnode *= 2; - max_pnode *= 2; - max_io = lnasid = fi = li = -1; + /* enum values chosen so (index mod 2) is MMIOH 0/1 (low/high) */ + snprintf(id, sizeof(id), "MMIOH%d", index%2); + max_io = lnasid = fi = li = -1; for (i = 0; i < n; i++) { unsigned long m_redirect = mmr + i * 8; unsigned long redirect = uv_read_local_mmr(m_redirect); @@ -814,9 +1045,12 @@ static __init void map_mmioh_high_uv34(int index, int min_pnode, int max_pnode) pr_info("UV: %s redirect base 0x%lx(@0x%lx) 0x%04x\n", id, redirect, m_redirect, nasid); - /* Invalid NASID: */ - if (nasid < min_pnode || max_pnode < nasid) + /* Invalid NASID check */ + if (nasid < min_nasid || max_nasid < nasid) { + pr_err("UV:%s:Invalid NASID:%x (range:%x..%x)\n", + __func__, index, min_nasid, max_nasid); nasid = -1; + } if (nasid == lnasid) { li = i; @@ -839,7 +1073,8 @@ static __init void map_mmioh_high_uv34(int index, int min_pnode, int max_pnode) } addr1 = (base << shift) + f * (1ULL << m_io); addr2 = (base << shift) + (l + 1) * (1ULL << m_io); - pr_info("UV: %s[%03d..%03d] NASID 0x%04x ADDR 0x%016lx - 0x%016lx\n", id, fi, li, lnasid, addr1, addr2); + pr_info("UV: %s[%03d..%03d] NASID 0x%04x ADDR 0x%016lx - 0x%016lx\n", + id, fi, li, lnasid, addr1, addr2); if (max_io < l) max_io = l; } @@ -847,49 +1082,93 @@ static __init void map_mmioh_high_uv34(int index, int min_pnode, int max_pnode) lnasid = nasid; } - pr_info("UV: %s base:0x%lx shift:%d M_IO:%d MAX_IO:%d\n", id, base, shift, m_io, max_io); +map_exit: + pr_info("UV: %s base:0x%lx shift:%d m_io:%d max_io:%d max_pnode:0x%x\n", + id, base, shift, m_io, max_io, max_pnode); - if (max_io >= 0) + if (max_io >= 0 && !mapped) map_high(id, base, shift, m_io, max_io, map_uc); } static __init void map_mmioh_high(int min_pnode, int max_pnode) { - union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh; - unsigned long mmr, base; - int shift, enable, m_io, n_io; + /* UVY flavor */ + if (UVH_RH10_GAM_MMIOH_OVERLAY_CONFIG0) { + union uvh_rh10_gam_mmioh_overlay_config0_u mmioh0; + union uvh_rh10_gam_mmioh_overlay_config1_u mmioh1; + + mmioh0.v = uv_read_local_mmr(UVH_RH10_GAM_MMIOH_OVERLAY_CONFIG0); + if (unlikely(mmioh0.s.enable == 0)) + pr_info("UV: MMIOH0 disabled\n"); + else + calc_mmioh_map(UVY_MMIOH0, min_pnode, max_pnode, + UVH_RH10_GAM_MMIOH_OVERLAY_CONFIG0_BASE_SHFT, + mmioh0.s.base, mmioh0.s.m_io, mmioh0.s.n_io); - if (is_uv3_hub() || is_uv4_hub()) { - /* Map both MMIOH regions: */ - map_mmioh_high_uv34(0, min_pnode, max_pnode); - map_mmioh_high_uv34(1, min_pnode, max_pnode); + mmioh1.v = uv_read_local_mmr(UVH_RH10_GAM_MMIOH_OVERLAY_CONFIG1); + if (unlikely(mmioh1.s.enable == 0)) + pr_info("UV: MMIOH1 disabled\n"); + else + calc_mmioh_map(UVY_MMIOH1, min_pnode, max_pnode, + UVH_RH10_GAM_MMIOH_OVERLAY_CONFIG1_BASE_SHFT, + mmioh1.s.base, mmioh1.s.m_io, mmioh1.s.n_io); return; } + /* UVX flavor */ + if (UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0) { + union uvh_rh_gam_mmioh_overlay_config0_u mmioh0; + union uvh_rh_gam_mmioh_overlay_config1_u mmioh1; + + mmioh0.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0); + if (unlikely(mmioh0.s.enable == 0)) + pr_info("UV: MMIOH0 disabled\n"); + else { + unsigned long base = uvxy_field(mmioh0, base, 0); + int m_io = uvxy_field(mmioh0, m_io, 0); + int n_io = uvxy_field(mmioh0, n_io, 0); + + calc_mmioh_map(UVX_MMIOH0, min_pnode, max_pnode, + UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_BASE_SHFT, + base, m_io, n_io); + } - if (is_uv2_hub()) { - mmr = UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR; - shift = UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT; - mmioh.v = uv_read_local_mmr(mmr); - enable = !!mmioh.s2.enable; - base = mmioh.s2.base; - m_io = mmioh.s2.m_io; - n_io = mmioh.s2.n_io; - - if (enable) { - max_pnode &= (1 << n_io) - 1; - pr_info("UV: base:0x%lx shift:%d N_IO:%d M_IO:%d max_pnode:0x%x\n", - base, shift, m_io, n_io, max_pnode); - map_high("MMIOH", base, shift, m_io, max_pnode, map_uc); - } else { - pr_info("UV: MMIOH disabled\n"); + mmioh1.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1); + if (unlikely(mmioh1.s.enable == 0)) + pr_info("UV: MMIOH1 disabled\n"); + else { + unsigned long base = uvxy_field(mmioh1, base, 0); + int m_io = uvxy_field(mmioh1, m_io, 0); + int n_io = uvxy_field(mmioh1, n_io, 0); + + calc_mmioh_map(UVX_MMIOH1, min_pnode, max_pnode, + UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_BASE_SHFT, + base, m_io, n_io); } + return; + } + + /* UV2 flavor */ + if (UVH_RH_GAM_MMIOH_OVERLAY_CONFIG) { + union uvh_rh_gam_mmioh_overlay_config_u mmioh; + + mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG); + if (unlikely(mmioh.s2.enable == 0)) + pr_info("UV: MMIOH disabled\n"); + else + calc_mmioh_map(UV2_MMIOH, min_pnode, max_pnode, + UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_BASE_SHFT, + mmioh.s2.base, mmioh.s2.m_io, mmioh.s2.n_io); + return; } } static __init void map_low_mmrs(void) { - init_extra_mapping_uc(UV_GLOBAL_MMR32_BASE, UV_GLOBAL_MMR32_SIZE); - init_extra_mapping_uc(UV_LOCAL_MMR_BASE, UV_LOCAL_MMR_SIZE); + if (UV_GLOBAL_MMR32_BASE) + init_extra_mapping_uc(UV_GLOBAL_MMR32_BASE, UV_GLOBAL_MMR32_SIZE); + + if (UV_LOCAL_MMR_BASE) + init_extra_mapping_uc(UV_LOCAL_MMR_BASE, UV_LOCAL_MMR_SIZE); } static __init void uv_rtc_init(void) @@ -909,85 +1188,6 @@ static __init void uv_rtc_init(void) } } -/* - * percpu heartbeat timer - */ -static void uv_heartbeat(struct timer_list *timer) -{ - unsigned char bits = uv_scir_info->state; - - /* Flip heartbeat bit: */ - bits ^= SCIR_CPU_HEARTBEAT; - - /* Is this CPU idle? */ - if (idle_cpu(raw_smp_processor_id())) - bits &= ~SCIR_CPU_ACTIVITY; - else - bits |= SCIR_CPU_ACTIVITY; - - /* Update system controller interface reg: */ - uv_set_scir_bits(bits); - - /* Enable next timer period: */ - mod_timer(timer, jiffies + SCIR_CPU_HB_INTERVAL); -} - -static int uv_heartbeat_enable(unsigned int cpu) -{ - while (!uv_cpu_scir_info(cpu)->enabled) { - struct timer_list *timer = &uv_cpu_scir_info(cpu)->timer; - - uv_set_cpu_scir_bits(cpu, SCIR_CPU_HEARTBEAT|SCIR_CPU_ACTIVITY); - timer_setup(timer, uv_heartbeat, TIMER_PINNED); - timer->expires = jiffies + SCIR_CPU_HB_INTERVAL; - add_timer_on(timer, cpu); - uv_cpu_scir_info(cpu)->enabled = 1; - - /* Also ensure that boot CPU is enabled: */ - cpu = 0; - } - return 0; -} - -#ifdef CONFIG_HOTPLUG_CPU -static int uv_heartbeat_disable(unsigned int cpu) -{ - if (uv_cpu_scir_info(cpu)->enabled) { - uv_cpu_scir_info(cpu)->enabled = 0; - del_timer(&uv_cpu_scir_info(cpu)->timer); - } - uv_set_cpu_scir_bits(cpu, 0xff); - return 0; -} - -static __init void uv_scir_register_cpu_notifier(void) -{ - cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/x2apic-uvx:online", - uv_heartbeat_enable, uv_heartbeat_disable); -} - -#else /* !CONFIG_HOTPLUG_CPU */ - -static __init void uv_scir_register_cpu_notifier(void) -{ -} - -static __init int uv_init_heartbeat(void) -{ - int cpu; - - if (is_uv_system()) { - for_each_online_cpu(cpu) - uv_heartbeat_enable(cpu); - } - - return 0; -} - -late_initcall(uv_init_heartbeat); - -#endif /* !CONFIG_HOTPLUG_CPU */ - /* Direct Legacy VGA I/O traffic to designated IOH */ static int uv_set_vga_state(struct pci_dev *pdev, bool decode, unsigned int command_bits, u32 flags) { @@ -1027,26 +1227,22 @@ struct mn { unsigned char n_lshift; }; +/* Initialize caller's MN struct and fill in values */ static void get_mn(struct mn *mnp) { - union uvh_rh_gam_config_mmr_u m_n_config; - union uv3h_gr0_gam_gr_config_u m_gr_config; - - /* Make sure the whole structure is well initialized: */ memset(mnp, 0, sizeof(*mnp)); - - m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR); - mnp->n_val = m_n_config.s.n_skt; - - if (is_uv4_hub()) { + mnp->n_val = uv_cpuid.n_skt; + if (is_uv(UV4|UVY)) { mnp->m_val = 0; mnp->n_lshift = 0; } else if (is_uv3_hub()) { - mnp->m_val = m_n_config.s3.m_skt; - m_gr_config.v = uv_read_local_mmr(UV3H_GR0_GAM_GR_CONFIG); + union uvyh_gr0_gam_gr_config_u m_gr_config; + + mnp->m_val = uv_cpuid.m_skt; + m_gr_config.v = uv_read_local_mmr(UVH_GR0_GAM_GR_CONFIG); mnp->n_lshift = m_gr_config.s3.m_skt; } else if (is_uv2_hub()) { - mnp->m_val = m_n_config.s2.m_skt; + mnp->m_val = uv_cpuid.m_skt; mnp->n_lshift = mnp->m_val == 40 ? 40 : 39; } mnp->m_shift = mnp->m_val ? 64 - mnp->m_val : 0; @@ -1054,7 +1250,6 @@ static void get_mn(struct mn *mnp) static void __init uv_init_hub_info(struct uv_hub_info_s *hi) { - union uvh_node_id_u node_id; struct mn mn; get_mn(&mn); @@ -1067,7 +1262,9 @@ static void __init uv_init_hub_info(struct uv_hub_info_s *hi) hi->m_shift = mn.m_shift; hi->n_lshift = mn.n_lshift ? mn.n_lshift : 0; hi->hub_revision = uv_hub_info->hub_revision; + hi->hub_type = uv_hub_info->hub_type; hi->pnode_mask = uv_cpuid.pnode_mask; + hi->nasid_shift = uv_cpuid.nasid_shift; hi->min_pnode = _min_pnode; hi->min_socket = _min_socket; hi->pnode_to_socket = _pnode_to_socket; @@ -1076,9 +1273,8 @@ static void __init uv_init_hub_info(struct uv_hub_info_s *hi) hi->gr_table_len = _gr_table_len; hi->gr_table = _gr_table; - node_id.v = uv_read_local_mmr(UVH_NODE_ID); uv_cpuid.gnode_shift = max_t(unsigned int, uv_cpuid.gnode_shift, mn.n_val); - hi->gnode_extra = (node_id.s.node_id & ~((1 << uv_cpuid.gnode_shift) - 1)) >> 1; + hi->gnode_extra = (uv_node_id & ~((1 << uv_cpuid.gnode_shift) - 1)) >> 1; if (mn.m_val) hi->gnode_upper = (u64)hi->gnode_extra << mn.m_val; @@ -1090,7 +1286,9 @@ static void __init uv_init_hub_info(struct uv_hub_info_s *hi) hi->gpa_shift = uv_gp_table->gpa_shift; hi->gpa_mask = (1UL << hi->gpa_shift) - 1; } else { - hi->global_mmr_base = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) & ~UV_MMR_ENABLE; + hi->global_mmr_base = + uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG) & + ~UV_MMR_ENABLE; hi->global_mmr_shift = _UV_GLOBAL_MMR64_PNODE_SHIFT; } @@ -1101,7 +1299,11 @@ static void __init uv_init_hub_info(struct uv_hub_info_s *hi) /* Show system specific info: */ pr_info("UV: N:%d M:%d m_shift:%d n_lshift:%d\n", hi->n_val, hi->m_val, hi->m_shift, hi->n_lshift); pr_info("UV: gpa_mask/shift:0x%lx/%d pnode_mask:0x%x apic_pns:%d\n", hi->gpa_mask, hi->gpa_shift, hi->pnode_mask, hi->apic_pnode_shift); - pr_info("UV: mmr_base/shift:0x%lx/%ld gru_base/shift:0x%lx/%ld\n", hi->global_mmr_base, hi->global_mmr_shift, hi->global_gru_base, hi->global_gru_shift); + pr_info("UV: mmr_base/shift:0x%lx/%ld\n", hi->global_mmr_base, hi->global_mmr_shift); + if (hi->global_gru_base) + pr_info("UV: gru_base/shift:0x%lx/%ld\n", + hi->global_gru_base, hi->global_gru_shift); + pr_info("UV: gnode_upper:0x%lx gnode_extra:0x%x\n", hi->gnode_upper, hi->gnode_extra); } @@ -1173,21 +1375,25 @@ static void __init decode_gam_rng_tbl(unsigned long ptr) pr_info("UV: GRT: %d entries, sockets(min:%x,max:%x) pnodes(min:%x,max:%x)\n", index, _min_socket, _max_socket, _min_pnode, _max_pnode); } +/* Walk through UVsystab decoding the fields */ static int __init decode_uv_systab(void) { struct uv_systab *st; int i; - /* If system is uv3 or lower, there is no extended UVsystab */ - if (is_uv_hubbed(0xfffffe) < uv(4) && is_uv_hubless(0xfffffe) < uv(4)) - return 0; /* No extended UVsystab required */ - + /* Get mapped UVsystab pointer */ st = uv_systab; + + /* If UVsystab is version 1, there is no extended UVsystab */ + if (st && st->revision == UV_SYSTAB_VERSION_1) + return 0; + if ((!st) || (st->revision < UV_SYSTAB_VERSION_UV4_LATEST)) { int rev = st ? st->revision : 0; - pr_err("UV: BIOS UVsystab version(%x) mismatch, expecting(%x)\n", rev, UV_SYSTAB_VERSION_UV4_LATEST); - pr_err("UV: Cannot support UV operations, switching to generic PC\n"); + pr_err("UV: BIOS UVsystab mismatch, (%x < %x)\n", + rev, UV_SYSTAB_VERSION_UV4_LATEST); + pr_err("UV: Does not support UV, switch to non-UV x86_64\n"); uv_system_type = UV_NONE; return -EINVAL; @@ -1199,7 +1405,8 @@ static int __init decode_uv_systab(void) if (!ptr) continue; - ptr = ptr + (unsigned long)st; + /* point to payload */ + ptr += (unsigned long)st; switch (st->entry[i].type) { case UV_SYSTAB_TYPE_GAM_PARAMS: @@ -1209,32 +1416,49 @@ static int __init decode_uv_systab(void) case UV_SYSTAB_TYPE_GAM_RNG_TBL: decode_gam_rng_tbl(ptr); break; + + case UV_SYSTAB_TYPE_ARCH_TYPE: + /* already processed in early startup */ + break; + + default: + pr_err("UV:%s:Unrecognized UV_SYSTAB_TYPE:%d, skipped\n", + __func__, st->entry[i].type); + break; } } return 0; } -/* - * Set up physical blade translations from UVH_NODE_PRESENT_TABLE - * .. NB: UVH_NODE_PRESENT_TABLE is going away, - * .. being replaced by GAM Range Table - */ +/* Set up physical blade translations from UVH_NODE_PRESENT_TABLE */ static __init void boot_init_possible_blades(struct uv_hub_info_s *hub_info) { + unsigned long np; int i, uv_pb = 0; - pr_info("UV: NODE_PRESENT_DEPTH = %d\n", UVH_NODE_PRESENT_TABLE_DEPTH); - for (i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) { - unsigned long np; - - np = uv_read_local_mmr(UVH_NODE_PRESENT_TABLE + i * 8); - if (np) + if (UVH_NODE_PRESENT_TABLE) { + pr_info("UV: NODE_PRESENT_DEPTH = %d\n", + UVH_NODE_PRESENT_TABLE_DEPTH); + for (i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) { + np = uv_read_local_mmr(UVH_NODE_PRESENT_TABLE + i * 8); pr_info("UV: NODE_PRESENT(%d) = 0x%016lx\n", i, np); - + uv_pb += hweight64(np); + } + } + if (UVH_NODE_PRESENT_0) { + np = uv_read_local_mmr(UVH_NODE_PRESENT_0); + pr_info("UV: NODE_PRESENT_0 = 0x%016lx\n", np); + uv_pb += hweight64(np); + } + if (UVH_NODE_PRESENT_1) { + np = uv_read_local_mmr(UVH_NODE_PRESENT_1); + pr_info("UV: NODE_PRESENT_1 = 0x%016lx\n", np); uv_pb += hweight64(np); } if (uv_possible_blades != uv_pb) uv_possible_blades = uv_pb; + + pr_info("UV: number nodes/possible blades %d\n", uv_pb); } static void __init build_socket_tables(void) @@ -1253,7 +1477,7 @@ static void __init build_socket_tables(void) pr_info("UV: No UVsystab socket table, ignoring\n"); return; } - pr_crit("UV: Error: UVsystab address translations not available!\n"); + pr_err("UV: Error: UVsystab address translations not available!\n"); BUG(); } @@ -1379,9 +1603,9 @@ static int __maybe_unused proc_hubless_show(struct seq_file *file, void *data) return 0; } -static int __maybe_unused proc_oemid_show(struct seq_file *file, void *data) +static int __maybe_unused proc_archtype_show(struct seq_file *file, void *data) { - seq_printf(file, "%s/%s\n", oem_id, oem_table_id); + seq_printf(file, "%s/%s\n", uv_archtype, oem_table_id); return 0; } @@ -1390,7 +1614,7 @@ static __init void uv_setup_proc_files(int hubless) struct proc_dir_entry *pde; pde = proc_mkdir(UV_PROC_NODE, NULL); - proc_create_single("oemid", 0, pde, proc_oemid_show); + proc_create_single("archtype", 0, pde, proc_archtype_show); if (hubless) proc_create_single("hubless", 0, pde, proc_hubless_show); else @@ -1429,7 +1653,8 @@ static void __init uv_system_init_hub(void) struct uv_hub_info_s hub_info = {0}; int bytes, cpu, nodeid; unsigned short min_pnode = 9999, max_pnode = 0; - char *hub = is_uv4_hub() ? "UV400" : + char *hub = is_uv5_hub() ? "UV500" : + is_uv4_hub() ? "UV400" : is_uv3_hub() ? "UV300" : is_uv2_hub() ? "UV2000/3000" : NULL; @@ -1441,12 +1666,14 @@ static void __init uv_system_init_hub(void) map_low_mmrs(); - /* Get uv_systab for decoding: */ + /* Get uv_systab for decoding, setup UV BIOS calls */ uv_bios_init(); /* If there's an UVsystab problem then abort UV init: */ - if (decode_uv_systab() < 0) + if (decode_uv_systab() < 0) { + pr_err("UV: Mangled UVsystab format\n"); return; + } build_socket_tables(); build_uv_gr_table(); @@ -1517,8 +1744,6 @@ static void __init uv_system_init_hub(void) uv_hub_info_list(numa_node_id)->pnode = pnode; else if (uv_cpu_hub_info(cpu)->pnode == 0xffff) uv_cpu_hub_info(cpu)->pnode = pnode; - - uv_cpu_scir_info(cpu)->offset = uv_scir_offset(apicid); } for_each_node(nodeid) { @@ -1547,7 +1772,6 @@ static void __init uv_system_init_hub(void) uv_nmi_setup(); uv_cpu_init(); - uv_scir_register_cpu_notifier(); uv_setup_proc_files(0); /* Register Legacy VGA I/O redirection handler: */ diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index dcc3d943c68f..6062ce586b95 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -614,7 +614,7 @@ static void early_detect_mem_encrypt(struct cpuinfo_x86 *c) * If BIOS has not enabled SME then don't advertise the * SME feature (set in scattered.c). * For SEV: If BIOS has not enabled SEV then don't advertise the - * SEV feature (set in scattered.c). + * SEV and SEV_ES feature (set in scattered.c). * * In all cases, since support for SME and SEV requires long mode, * don't advertise the feature under CONFIG_X86_32. @@ -645,6 +645,7 @@ clear_all: setup_clear_cpu_cap(X86_FEATURE_SME); clear_sev: setup_clear_cpu_cap(X86_FEATURE_SEV); + setup_clear_cpu_cap(X86_FEATURE_SEV_ES); } } diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index c5cf336e5077..345f7d905db6 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c @@ -65,6 +65,9 @@ static void init_c3(struct cpuinfo_x86 *c) c->x86_cache_alignment = c->x86_clflush_size * 2; set_cpu_cap(c, X86_FEATURE_REP_GOOD); } + + if (c->x86 >= 7) + set_cpu_cap(c, X86_FEATURE_REP_GOOD); } enum { @@ -90,18 +93,15 @@ enum { static void early_init_centaur(struct cpuinfo_x86 *c) { - switch (c->x86) { #ifdef CONFIG_X86_32 - case 5: - /* Emulate MTRRs using Centaur's MCR. */ + /* Emulate MTRRs using Centaur's MCR. */ + if (c->x86 == 5) set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR); - break; #endif - case 6: - if (c->x86_model >= 0xf) - set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); - break; - } + if ((c->x86 == 6 && c->x86_model >= 0xf) || + (c->x86 >= 7)) + set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); + #ifdef CONFIG_X86_64 set_cpu_cap(c, X86_FEATURE_SYSENTER32); #endif @@ -145,9 +145,8 @@ static void init_centaur(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); } - switch (c->x86) { #ifdef CONFIG_X86_32 - case 5: + if (c->x86 == 5) { switch (c->x86_model) { case 4: name = "C6"; @@ -207,12 +206,10 @@ static void init_centaur(struct cpuinfo_x86 *c) c->x86_cache_size = (cc>>24)+(dd>>24); } sprintf(c->x86_model_id, "WinChip %s", name); - break; + } #endif - case 6: + if (c->x86 == 6 || c->x86 >= 7) init_c3(c); - break; - } #ifdef CONFIG_X86_64 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); #endif diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index c5d6f17d9b9d..35ad8480c464 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -23,6 +23,7 @@ #include <linux/syscore_ops.h> #include <linux/pgtable.h> +#include <asm/cmdline.h> #include <asm/stackprotector.h> #include <asm/perf_event.h> #include <asm/mmu_context.h> @@ -359,7 +360,7 @@ void native_write_cr0(unsigned long val) unsigned long bits_missing = 0; set_register: - asm volatile("mov %0,%%cr0": "+r" (val), "+m" (__force_order)); + asm volatile("mov %0,%%cr0": "+r" (val) : : "memory"); if (static_branch_likely(&cr_pinning)) { if (unlikely((val & X86_CR0_WP) != X86_CR0_WP)) { @@ -378,7 +379,7 @@ void native_write_cr4(unsigned long val) unsigned long bits_changed = 0; set_register: - asm volatile("mov %0,%%cr4": "+r" (val), "+m" (cr4_pinned_bits)); + asm volatile("mov %0,%%cr4": "+r" (val) : : "memory"); if (static_branch_likely(&cr_pinning)) { if (unlikely((val & cr4_pinned_mask) != cr4_pinned_bits)) { @@ -1221,6 +1222,59 @@ static void detect_nopl(void) } /* + * We parse cpu parameters early because fpu__init_system() is executed + * before parse_early_param(). + */ +static void __init cpu_parse_early_param(void) +{ + char arg[128]; + char *argptr = arg; + int arglen, res, bit; + +#ifdef CONFIG_X86_32 + if (cmdline_find_option_bool(boot_command_line, "no387")) +#ifdef CONFIG_MATH_EMULATION + setup_clear_cpu_cap(X86_FEATURE_FPU); +#else + pr_err("Option 'no387' required CONFIG_MATH_EMULATION enabled.\n"); +#endif + + if (cmdline_find_option_bool(boot_command_line, "nofxsr")) + setup_clear_cpu_cap(X86_FEATURE_FXSR); +#endif + + if (cmdline_find_option_bool(boot_command_line, "noxsave")) + setup_clear_cpu_cap(X86_FEATURE_XSAVE); + + if (cmdline_find_option_bool(boot_command_line, "noxsaveopt")) + setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); + + if (cmdline_find_option_bool(boot_command_line, "noxsaves")) + setup_clear_cpu_cap(X86_FEATURE_XSAVES); + + arglen = cmdline_find_option(boot_command_line, "clearcpuid", arg, sizeof(arg)); + if (arglen <= 0) + return; + + pr_info("Clearing CPUID bits:"); + do { + res = get_option(&argptr, &bit); + if (res == 0 || res == 3) + break; + + /* If the argument was too long, the last bit may be cut off */ + if (res == 1 && arglen >= sizeof(arg)) + break; + + if (bit >= 0 && bit < NCAPINTS * 32) { + pr_cont(" " X86_CAP_FMT, x86_cap_flag(bit)); + setup_clear_cpu_cap(bit); + } + } while (res == 2); + pr_cont("\n"); +} + +/* * Do minimum CPU detection early. * Fields really needed: vendor, cpuid_level, family, model, mask, * cache alignment. @@ -1255,6 +1309,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) get_cpu_cap(c); get_cpu_address_sizes(c); setup_force_cpu_cap(X86_FEATURE_CPUID); + cpu_parse_early_param(); if (this_cpu->c_early_init) this_cpu->c_early_init(c); @@ -1413,15 +1468,7 @@ static void generic_identify(struct cpuinfo_x86 *c) * ESPFIX issue, we can change this. */ #ifdef CONFIG_X86_32 -# ifdef CONFIG_PARAVIRT_XXL - do { - extern void native_iret(void); - if (pv_ops.cpu.iret == native_iret) - set_cpu_bug(c, X86_BUG_ESPFIX); - } while (0); -# else set_cpu_bug(c, X86_BUG_ESPFIX); -# endif #endif } @@ -1829,6 +1876,8 @@ static inline void tss_setup_ist(struct tss_struct *tss) tss->x86_tss.ist[IST_INDEX_NMI] = __this_cpu_ist_top_va(NMI); tss->x86_tss.ist[IST_INDEX_DB] = __this_cpu_ist_top_va(DB); tss->x86_tss.ist[IST_INDEX_MCE] = __this_cpu_ist_top_va(MCE); + /* Only mapped when SEV-ES is active */ + tss->x86_tss.ist[IST_INDEX_VC] = __this_cpu_ist_top_va(VC); } #else /* CONFIG_X86_64 */ @@ -1861,6 +1910,29 @@ static inline void tss_setup_io_bitmap(struct tss_struct *tss) } /* + * Setup everything needed to handle exceptions from the IDT, including the IST + * exceptions which use paranoid_entry(). + */ +void cpu_init_exception_handling(void) +{ + struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw); + int cpu = raw_smp_processor_id(); + + /* paranoid_entry() gets the CPU number from the GDT */ + setup_getcpu(cpu); + + /* IST vectors need TSS to be set up. */ + tss_setup_ist(tss); + tss_setup_io_bitmap(tss); + set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss); + + load_TR_desc(); + + /* Finally load the IDT */ + load_current_idt(); +} + +/* * cpu_init() initializes state that is per-CPU. Some data is already * initialized (naturally) in the bootstrap process, such as the GDT * and IDT. We reload them nevertheless, this function acts as a diff --git a/arch/x86/kernel/cpu/cpuid-deps.c b/arch/x86/kernel/cpu/cpuid-deps.c index 3cbe24ca80ab..d502241995a3 100644 --- a/arch/x86/kernel/cpu/cpuid-deps.c +++ b/arch/x86/kernel/cpu/cpuid-deps.c @@ -69,6 +69,8 @@ static const struct cpuid_dep cpuid_deps[] = { { X86_FEATURE_CQM_MBM_TOTAL, X86_FEATURE_CQM_LLC }, { X86_FEATURE_CQM_MBM_LOCAL, X86_FEATURE_CQM_LLC }, { X86_FEATURE_AVX512_BF16, X86_FEATURE_AVX512VL }, + { X86_FEATURE_ENQCMD, X86_FEATURE_XSAVES }, + { X86_FEATURE_PER_THREAD_MBA, X86_FEATURE_MBA }, {} }; diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c index 99be063fcb1b..0c6b02dd744c 100644 --- a/arch/x86/kernel/cpu/mce/amd.c +++ b/arch/x86/kernel/cpu/mce/amd.c @@ -132,49 +132,49 @@ static enum smca_bank_types smca_get_bank_type(unsigned int bank) } static struct smca_hwid smca_hwid_mcatypes[] = { - /* { bank_type, hwid_mcatype, xec_bitmap } */ + /* { bank_type, hwid_mcatype } */ /* Reserved type */ - { SMCA_RESERVED, HWID_MCATYPE(0x00, 0x0), 0x0 }, + { SMCA_RESERVED, HWID_MCATYPE(0x00, 0x0) }, /* ZN Core (HWID=0xB0) MCA types */ - { SMCA_LS, HWID_MCATYPE(0xB0, 0x0), 0x1FFFFF }, - { SMCA_LS_V2, HWID_MCATYPE(0xB0, 0x10), 0xFFFFFF }, - { SMCA_IF, HWID_MCATYPE(0xB0, 0x1), 0x3FFF }, - { SMCA_L2_CACHE, HWID_MCATYPE(0xB0, 0x2), 0xF }, - { SMCA_DE, HWID_MCATYPE(0xB0, 0x3), 0x1FF }, + { SMCA_LS, HWID_MCATYPE(0xB0, 0x0) }, + { SMCA_LS_V2, HWID_MCATYPE(0xB0, 0x10) }, + { SMCA_IF, HWID_MCATYPE(0xB0, 0x1) }, + { SMCA_L2_CACHE, HWID_MCATYPE(0xB0, 0x2) }, + { SMCA_DE, HWID_MCATYPE(0xB0, 0x3) }, /* HWID 0xB0 MCATYPE 0x4 is Reserved */ - { SMCA_EX, HWID_MCATYPE(0xB0, 0x5), 0xFFF }, - { SMCA_FP, HWID_MCATYPE(0xB0, 0x6), 0x7F }, - { SMCA_L3_CACHE, HWID_MCATYPE(0xB0, 0x7), 0xFF }, + { SMCA_EX, HWID_MCATYPE(0xB0, 0x5) }, + { SMCA_FP, HWID_MCATYPE(0xB0, 0x6) }, + { SMCA_L3_CACHE, HWID_MCATYPE(0xB0, 0x7) }, /* Data Fabric MCA types */ - { SMCA_CS, HWID_MCATYPE(0x2E, 0x0), 0x1FF }, - { SMCA_PIE, HWID_MCATYPE(0x2E, 0x1), 0x1F }, - { SMCA_CS_V2, HWID_MCATYPE(0x2E, 0x2), 0x3FFF }, + { SMCA_CS, HWID_MCATYPE(0x2E, 0x0) }, + { SMCA_PIE, HWID_MCATYPE(0x2E, 0x1) }, + { SMCA_CS_V2, HWID_MCATYPE(0x2E, 0x2) }, /* Unified Memory Controller MCA type */ - { SMCA_UMC, HWID_MCATYPE(0x96, 0x0), 0xFF }, + { SMCA_UMC, HWID_MCATYPE(0x96, 0x0) }, /* Parameter Block MCA type */ - { SMCA_PB, HWID_MCATYPE(0x05, 0x0), 0x1 }, + { SMCA_PB, HWID_MCATYPE(0x05, 0x0) }, /* Platform Security Processor MCA type */ - { SMCA_PSP, HWID_MCATYPE(0xFF, 0x0), 0x1 }, - { SMCA_PSP_V2, HWID_MCATYPE(0xFF, 0x1), 0x3FFFF }, + { SMCA_PSP, HWID_MCATYPE(0xFF, 0x0) }, + { SMCA_PSP_V2, HWID_MCATYPE(0xFF, 0x1) }, /* System Management Unit MCA type */ - { SMCA_SMU, HWID_MCATYPE(0x01, 0x0), 0x1 }, - { SMCA_SMU_V2, HWID_MCATYPE(0x01, 0x1), 0x7FF }, + { SMCA_SMU, HWID_MCATYPE(0x01, 0x0) }, + { SMCA_SMU_V2, HWID_MCATYPE(0x01, 0x1) }, /* Microprocessor 5 Unit MCA type */ - { SMCA_MP5, HWID_MCATYPE(0x01, 0x2), 0x3FF }, + { SMCA_MP5, HWID_MCATYPE(0x01, 0x2) }, /* Northbridge IO Unit MCA type */ - { SMCA_NBIO, HWID_MCATYPE(0x18, 0x0), 0x1F }, + { SMCA_NBIO, HWID_MCATYPE(0x18, 0x0) }, /* PCI Express Unit MCA type */ - { SMCA_PCIE, HWID_MCATYPE(0x46, 0x0), 0x1F }, + { SMCA_PCIE, HWID_MCATYPE(0x46, 0x0) }, }; struct smca_bank smca_banks[MAX_NR_BANKS]; diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index fc4f8c04bdb5..1c08cb9eb9f6 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -40,7 +40,6 @@ #include <linux/debugfs.h> #include <linux/irq_work.h> #include <linux/export.h> -#include <linux/jump_label.h> #include <linux/set_memory.h> #include <linux/sync_core.h> #include <linux/task_work.h> @@ -373,42 +372,105 @@ static int msr_to_offset(u32 msr) return -1; } +__visible bool ex_handler_rdmsr_fault(const struct exception_table_entry *fixup, + struct pt_regs *regs, int trapnr, + unsigned long error_code, + unsigned long fault_addr) +{ + pr_emerg("MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pS)\n", + (unsigned int)regs->cx, regs->ip, (void *)regs->ip); + + show_stack_regs(regs); + + panic("MCA architectural violation!\n"); + + while (true) + cpu_relax(); + + return true; +} + /* MSR access wrappers used for error injection */ -static u64 mce_rdmsrl(u32 msr) +static noinstr u64 mce_rdmsrl(u32 msr) { - u64 v; + DECLARE_ARGS(val, low, high); if (__this_cpu_read(injectm.finished)) { - int offset = msr_to_offset(msr); + int offset; + u64 ret; + + instrumentation_begin(); + offset = msr_to_offset(msr); if (offset < 0) - return 0; - return *(u64 *)((char *)this_cpu_ptr(&injectm) + offset); - } + ret = 0; + else + ret = *(u64 *)((char *)this_cpu_ptr(&injectm) + offset); - if (rdmsrl_safe(msr, &v)) { - WARN_ONCE(1, "mce: Unable to read MSR 0x%x!\n", msr); - /* - * Return zero in case the access faulted. This should - * not happen normally but can happen if the CPU does - * something weird, or if the code is buggy. - */ - v = 0; + instrumentation_end(); + + return ret; } - return v; + /* + * RDMSR on MCA MSRs should not fault. If they do, this is very much an + * architectural violation and needs to be reported to hw vendor. Panic + * the box to not allow any further progress. + */ + asm volatile("1: rdmsr\n" + "2:\n" + _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_rdmsr_fault) + : EAX_EDX_RET(val, low, high) : "c" (msr)); + + + return EAX_EDX_VAL(val, low, high); +} + +__visible bool ex_handler_wrmsr_fault(const struct exception_table_entry *fixup, + struct pt_regs *regs, int trapnr, + unsigned long error_code, + unsigned long fault_addr) +{ + pr_emerg("MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pS)\n", + (unsigned int)regs->cx, (unsigned int)regs->dx, (unsigned int)regs->ax, + regs->ip, (void *)regs->ip); + + show_stack_regs(regs); + + panic("MCA architectural violation!\n"); + + while (true) + cpu_relax(); + + return true; } -static void mce_wrmsrl(u32 msr, u64 v) +static noinstr void mce_wrmsrl(u32 msr, u64 v) { + u32 low, high; + if (__this_cpu_read(injectm.finished)) { - int offset = msr_to_offset(msr); + int offset; + + instrumentation_begin(); + offset = msr_to_offset(msr); if (offset >= 0) *(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v; + + instrumentation_end(); + return; } - wrmsrl(msr, v); + + low = (u32)v; + high = (u32)(v >> 32); + + /* See comment in mce_rdmsrl() */ + asm volatile("1: wrmsr\n" + "2:\n" + _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_fault) + : : "c" (msr), "a"(low), "d" (high) : "memory"); } /* @@ -745,7 +807,7 @@ log_it: goto clear_it; mce_read_aux(&m, i); - m.severity = mce_severity(&m, mca_cfg.tolerant, NULL, false); + m.severity = mce_severity(&m, NULL, mca_cfg.tolerant, NULL, false); /* * Don't get the IP here because it's unlikely to * have anything to do with the actual error location. @@ -794,7 +856,7 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp, quirk_no_way_out(i, m, regs); m->bank = i; - if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) { + if (mce_severity(m, regs, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) { mce_read_aux(m, i); *msg = tmp; return 1; @@ -872,7 +934,6 @@ static void mce_reign(void) struct mce *m = NULL; int global_worst = 0; char *msg = NULL; - char *nmsg = NULL; /* * This CPU is the Monarch and the other CPUs have run @@ -880,12 +941,10 @@ static void mce_reign(void) * Grade the severity of the errors of all the CPUs. */ for_each_possible_cpu(cpu) { - int severity = mce_severity(&per_cpu(mces_seen, cpu), - mca_cfg.tolerant, - &nmsg, true); - if (severity > global_worst) { - msg = nmsg; - global_worst = severity; + struct mce *mtmp = &per_cpu(mces_seen, cpu); + + if (mtmp->severity > global_worst) { + global_worst = mtmp->severity; m = &per_cpu(mces_seen, cpu); } } @@ -895,8 +954,11 @@ static void mce_reign(void) * This dumps all the mces in the log buffer and stops the * other CPUs. */ - if (m && global_worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) + if (m && global_worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) { + /* call mce_severity() to get "msg" for panic */ + mce_severity(m, NULL, mca_cfg.tolerant, &msg, true); mce_panic("Fatal machine check", m, msg); + } /* * For UC somewhere we let the CPU who detects it handle it. @@ -1105,7 +1167,7 @@ static noinstr bool mce_check_crashing_cpu(void) return false; } -static void __mc_scan_banks(struct mce *m, struct mce *final, +static void __mc_scan_banks(struct mce *m, struct pt_regs *regs, struct mce *final, unsigned long *toclear, unsigned long *valid_banks, int no_way_out, int *worst) { @@ -1140,7 +1202,7 @@ static void __mc_scan_banks(struct mce *m, struct mce *final, /* Set taint even when machine check was not enabled. */ add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); - severity = mce_severity(m, cfg->tolerant, NULL, true); + severity = mce_severity(m, regs, cfg->tolerant, NULL, true); /* * When machine check was for corrected/deferred handler don't @@ -1188,13 +1250,34 @@ static void kill_me_maybe(struct callback_head *cb) if (!p->mce_ripv) flags |= MF_MUST_KILL; - if (!memory_failure(p->mce_addr >> PAGE_SHIFT, flags)) { + if (!memory_failure(p->mce_addr >> PAGE_SHIFT, flags) && + !(p->mce_kflags & MCE_IN_KERNEL_COPYIN)) { set_mce_nospec(p->mce_addr >> PAGE_SHIFT, p->mce_whole_page); + sync_core(); return; } - pr_err("Memory error not recovered"); - kill_me_now(cb); + if (p->mce_vaddr != (void __user *)-1l) { + force_sig_mceerr(BUS_MCEERR_AR, p->mce_vaddr, PAGE_SHIFT); + } else { + pr_err("Memory error not recovered"); + kill_me_now(cb); + } +} + +static void queue_task_work(struct mce *m, int kill_it) +{ + current->mce_addr = m->addr; + current->mce_kflags = m->kflags; + current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV); + current->mce_whole_page = whole_page(m); + + if (kill_it) + current->mce_kill_me.func = kill_me_now; + else + current->mce_kill_me.func = kill_me_maybe; + + task_work_add(current, ¤t->mce_kill_me, true); } /* @@ -1291,7 +1374,7 @@ noinstr void do_machine_check(struct pt_regs *regs) order = mce_start(&no_way_out); } - __mc_scan_banks(&m, final, toclear, valid_banks, no_way_out, &worst); + __mc_scan_banks(&m, regs, final, toclear, valid_banks, no_way_out, &worst); if (!no_way_out) mce_clear_state(toclear); @@ -1313,7 +1396,7 @@ noinstr void do_machine_check(struct pt_regs *regs) * make sure we have the right "msg". */ if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) { - mce_severity(&m, cfg->tolerant, &msg, true); + mce_severity(&m, regs, cfg->tolerant, &msg, true); mce_panic("Local fatal machine check!", &m, msg); } } @@ -1330,25 +1413,16 @@ noinstr void do_machine_check(struct pt_regs *regs) if (worst > 0) irq_work_queue(&mce_irq_work); - mce_wrmsrl(MSR_IA32_MCG_STATUS, 0); - - sync_core(); - if (worst != MCE_AR_SEVERITY && !kill_it) - return; + goto out; /* Fault was in user mode and we need to take some action */ if ((m.cs & 3) == 3) { /* If this triggers there is no way to recover. Die hard. */ BUG_ON(!on_thread_stack() || !user_mode(regs)); - current->mce_addr = m.addr; - current->mce_ripv = !!(m.mcgstatus & MCG_STATUS_RIPV); - current->mce_whole_page = whole_page(&m); - current->mce_kill_me.func = kill_me_maybe; - if (kill_it) - current->mce_kill_me.func = kill_me_now; - task_work_add(current, ¤t->mce_kill_me, true); + queue_task_work(&m, kill_it); + } else { /* * Handle an MCE which has happened in kernel space but from @@ -1363,7 +1437,12 @@ noinstr void do_machine_check(struct pt_regs *regs) if (!fixup_exception(regs, X86_TRAP_MC, 0, 0)) mce_panic("Failed kernel mode recovery", &m, msg); } + + if (m.kflags & MCE_IN_KERNEL_COPYIN) + queue_task_work(&m, kill_it); } +out: + mce_wrmsrl(MSR_IA32_MCG_STATUS, 0); } EXPORT_SYMBOL_GPL(do_machine_check); @@ -2064,7 +2143,7 @@ void mce_disable_bank(int bank) and older. * mce=nobootlog Don't log MCEs from before booting. * mce=bios_cmci_threshold Don't program the CMCI threshold - * mce=recovery force enable memcpy_mcsafe() + * mce=recovery force enable copy_mc_fragile() */ static int __init mcheck_enable(char *str) { @@ -2672,13 +2751,10 @@ static void __init mcheck_debugfs_init(void) static void __init mcheck_debugfs_init(void) { } #endif -DEFINE_STATIC_KEY_FALSE(mcsafe_key); -EXPORT_SYMBOL_GPL(mcsafe_key); - static int __init mcheck_late_init(void) { if (mca_cfg.recovery) - static_branch_inc(&mcsafe_key); + enable_copy_mc_fragile(); mcheck_debugfs_init(); diff --git a/arch/x86/kernel/cpu/mce/dev-mcelog.c b/arch/x86/kernel/cpu/mce/dev-mcelog.c index 03e51053592a..100fbeebdc72 100644 --- a/arch/x86/kernel/cpu/mce/dev-mcelog.c +++ b/arch/x86/kernel/cpu/mce/dev-mcelog.c @@ -67,7 +67,9 @@ static int dev_mce_log(struct notifier_block *nb, unsigned long val, unlock: mutex_unlock(&mce_chrdev_read_mutex); - mce->kflags |= MCE_HANDLED_MCELOG; + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) + mce->kflags |= MCE_HANDLED_MCELOG; + return NOTIFY_OK; } diff --git a/arch/x86/kernel/cpu/mce/internal.h b/arch/x86/kernel/cpu/mce/internal.h index 6473070b5da4..88dcc79cfb07 100644 --- a/arch/x86/kernel/cpu/mce/internal.h +++ b/arch/x86/kernel/cpu/mce/internal.h @@ -38,7 +38,8 @@ int mce_gen_pool_add(struct mce *mce); int mce_gen_pool_init(void); struct llist_node *mce_gen_pool_prepare_records(void); -extern int (*mce_severity)(struct mce *a, int tolerant, char **msg, bool is_excp); +extern int (*mce_severity)(struct mce *a, struct pt_regs *regs, + int tolerant, char **msg, bool is_excp); struct dentry *mce_get_debugfs_dir(void); extern mce_banks_t mce_banks_ce_disabled; @@ -185,4 +186,14 @@ extern bool amd_filter_mce(struct mce *m); static inline bool amd_filter_mce(struct mce *m) { return false; }; #endif +__visible bool ex_handler_rdmsr_fault(const struct exception_table_entry *fixup, + struct pt_regs *regs, int trapnr, + unsigned long error_code, + unsigned long fault_addr); + +__visible bool ex_handler_wrmsr_fault(const struct exception_table_entry *fixup, + struct pt_regs *regs, int trapnr, + unsigned long error_code, + unsigned long fault_addr); + #endif /* __X86_MCE_INTERNAL_H__ */ diff --git a/arch/x86/kernel/cpu/mce/severity.c b/arch/x86/kernel/cpu/mce/severity.c index e1da619add19..83df991314c5 100644 --- a/arch/x86/kernel/cpu/mce/severity.c +++ b/arch/x86/kernel/cpu/mce/severity.c @@ -9,9 +9,14 @@ #include <linux/seq_file.h> #include <linux/init.h> #include <linux/debugfs.h> -#include <asm/mce.h> #include <linux/uaccess.h> +#include <asm/mce.h> +#include <asm/intel-family.h> +#include <asm/traps.h> +#include <asm/insn.h> +#include <asm/insn-eval.h> + #include "internal.h" /* @@ -40,9 +45,14 @@ static struct severity { unsigned char context; unsigned char excp; unsigned char covered; + unsigned char cpu_model; + unsigned char cpu_minstepping; + unsigned char bank_lo, bank_hi; char *msg; } severities[] = { #define MCESEV(s, m, c...) { .sev = MCE_ ## s ## _SEVERITY, .msg = m, ## c } +#define BANK_RANGE(l, h) .bank_lo = l, .bank_hi = h +#define MODEL_STEPPING(m, s) .cpu_model = m, .cpu_minstepping = s #define KERNEL .context = IN_KERNEL #define USER .context = IN_USER #define KERNEL_RECOV .context = IN_KERNEL_RECOV @@ -90,14 +100,9 @@ static struct severity { EXCP, KERNEL_RECOV, MCGMASK(MCG_STATUS_RIPV, 0) ), MCESEV( - DEFERRED, "Deferred error", - NOSER, MASK(MCI_STATUS_UC|MCI_STATUS_DEFERRED|MCI_STATUS_POISON, MCI_STATUS_DEFERRED) - ), - MCESEV( KEEP, "Corrected error", NOSER, BITCLR(MCI_STATUS_UC) ), - /* * known AO MCACODs reported via MCE or CMC: * @@ -113,6 +118,18 @@ static struct severity { AO, "Action optional: last level cache writeback error", SER, MASK(MCI_UC_AR|MCACOD, MCI_STATUS_UC|MCACOD_L3WB) ), + /* + * Quirk for Skylake/Cascade Lake. Patrol scrubber may be configured + * to report uncorrected errors using CMCI with a special signature. + * UC=0, MSCOD=0x0010, MCACOD=binary(000X 0000 1100 XXXX) reported + * in one of the memory controller banks. + * Set severity to "AO" for same action as normal patrol scrub error. + */ + MCESEV( + AO, "Uncorrected Patrol Scrub Error", + SER, MASK(MCI_STATUS_UC|MCI_ADDR|0xffffeff0, MCI_ADDR|0x001000c0), + MODEL_STEPPING(INTEL_FAM6_SKYLAKE_X, 4), BANK_RANGE(13, 18) + ), /* ignore OVER for UCNA */ MCESEV( @@ -198,6 +215,47 @@ static struct severity { #define mc_recoverable(mcg) (((mcg) & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) == \ (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) +static bool is_copy_from_user(struct pt_regs *regs) +{ + u8 insn_buf[MAX_INSN_SIZE]; + struct insn insn; + unsigned long addr; + + if (copy_from_kernel_nofault(insn_buf, (void *)regs->ip, MAX_INSN_SIZE)) + return false; + + kernel_insn_init(&insn, insn_buf, MAX_INSN_SIZE); + insn_get_opcode(&insn); + if (!insn.opcode.got) + return false; + + switch (insn.opcode.value) { + /* MOV mem,reg */ + case 0x8A: case 0x8B: + /* MOVZ mem,reg */ + case 0xB60F: case 0xB70F: + insn_get_modrm(&insn); + insn_get_sib(&insn); + if (!insn.modrm.got || !insn.sib.got) + return false; + addr = (unsigned long)insn_get_addr_ref(&insn, regs); + break; + /* REP MOVS */ + case 0xA4: case 0xA5: + addr = regs->si; + break; + default: + return false; + } + + if (fault_in_kernel_space(addr)) + return false; + + current->mce_vaddr = (void __user *)addr; + + return true; +} + /* * If mcgstatus indicated that ip/cs on the stack were * no good, then "m->cs" will be zero and we will have @@ -209,15 +267,25 @@ static struct severity { * distinguish an exception taken in user from from one * taken in the kernel. */ -static int error_context(struct mce *m) +static int error_context(struct mce *m, struct pt_regs *regs) { + enum handler_type t; + if ((m->cs & 3) == 3) return IN_USER; + if (!mc_recoverable(m->mcgstatus)) + return IN_KERNEL; - if (mc_recoverable(m->mcgstatus) && ex_has_fault_handler(m->ip)) { + t = ex_get_fault_handler_type(m->ip); + if (t == EX_HANDLER_FAULT) { m->kflags |= MCE_IN_KERNEL_RECOV; return IN_KERNEL_RECOV; } + if (t == EX_HANDLER_UACCESS && regs && is_copy_from_user(regs)) { + m->kflags |= MCE_IN_KERNEL_RECOV; + m->kflags |= MCE_IN_KERNEL_COPYIN; + return IN_KERNEL_RECOV; + } return IN_KERNEL; } @@ -253,9 +321,10 @@ static int mce_severity_amd_smca(struct mce *m, enum context err_ctx) * See AMD Error Scope Hierarchy table in a newer BKDG. For example * 49125_15h_Models_30h-3Fh_BKDG.pdf, section "RAS Features" */ -static int mce_severity_amd(struct mce *m, int tolerant, char **msg, bool is_excp) +static int mce_severity_amd(struct mce *m, struct pt_regs *regs, int tolerant, + char **msg, bool is_excp) { - enum context ctx = error_context(m); + enum context ctx = error_context(m, regs); /* Processor Context Corrupt, no need to fumble too much, die! */ if (m->status & MCI_STATUS_PCC) @@ -305,10 +374,11 @@ static int mce_severity_amd(struct mce *m, int tolerant, char **msg, bool is_exc return MCE_KEEP_SEVERITY; } -static int mce_severity_intel(struct mce *m, int tolerant, char **msg, bool is_excp) +static int mce_severity_intel(struct mce *m, struct pt_regs *regs, + int tolerant, char **msg, bool is_excp) { enum exception excp = (is_excp ? EXCP_CONTEXT : NO_EXCP); - enum context ctx = error_context(m); + enum context ctx = error_context(m, regs); struct severity *s; for (s = severities;; s++) { @@ -324,6 +394,12 @@ static int mce_severity_intel(struct mce *m, int tolerant, char **msg, bool is_e continue; if (s->excp && excp != s->excp) continue; + if (s->cpu_model && boot_cpu_data.x86_model != s->cpu_model) + continue; + if (s->cpu_minstepping && boot_cpu_data.x86_stepping < s->cpu_minstepping) + continue; + if (s->bank_lo && (m->bank < s->bank_lo || m->bank > s->bank_hi)) + continue; if (msg) *msg = s->msg; s->covered = 1; @@ -336,7 +412,7 @@ static int mce_severity_intel(struct mce *m, int tolerant, char **msg, bool is_e } /* Default to mce_severity_intel */ -int (*mce_severity)(struct mce *m, int tolerant, char **msg, bool is_excp) = +int (*mce_severity)(struct mce *m, struct pt_regs *regs, int tolerant, char **msg, bool is_excp) = mce_severity_intel; void __init mcheck_vendor_init_severity(void) diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index 31125448b174..05ef1f4550cb 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c @@ -55,9 +55,14 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_callback) set_irq_regs(old_regs); } -void hv_setup_vmbus_irq(void (*handler)(void)) +int hv_setup_vmbus_irq(int irq, void (*handler)(void)) { + /* + * The 'irq' argument is ignored on x86/x64 because a hard-coded + * interrupt vector is used for Hyper-V interrupts. + */ vmbus_handler = handler; + return 0; } void hv_remove_vmbus_irq(void) @@ -248,7 +253,7 @@ static void __init ms_hyperv_init_platform(void) hv_host_info_edx >> 24, hv_host_info_edx & 0xFFFFFF); } - if (ms_hyperv.features & HV_X64_ACCESS_FREQUENCY_MSRS && + if (ms_hyperv.features & HV_ACCESS_FREQUENCY_MSRS && ms_hyperv.misc_features & HV_FEATURE_FREQUENCY_MSRS_AVAILABLE) { x86_platform.calibrate_tsc = hv_get_tsc_khz; x86_platform.calibrate_cpu = hv_get_tsc_khz; @@ -270,7 +275,7 @@ static void __init ms_hyperv_init_platform(void) crash_kexec_post_notifiers = true; #ifdef CONFIG_X86_LOCAL_APIC - if (ms_hyperv.features & HV_X64_ACCESS_FREQUENCY_MSRS && + if (ms_hyperv.features & HV_ACCESS_FREQUENCY_MSRS && ms_hyperv.misc_features & HV_FEATURE_FREQUENCY_MSRS_AVAILABLE) { /* * Get the APIC frequency. @@ -296,7 +301,7 @@ static void __init ms_hyperv_init_platform(void) machine_ops.shutdown = hv_machine_shutdown; machine_ops.crash_shutdown = hv_machine_crash_shutdown; #endif - if (ms_hyperv.features & HV_X64_ACCESS_TSC_INVARIANT) { + if (ms_hyperv.features & HV_ACCESS_TSC_INVARIANT) { wrmsrl(HV_X64_MSR_TSC_INVARIANT_CONTROL, 0x1); setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE); } else { @@ -330,7 +335,7 @@ static void __init ms_hyperv_init_platform(void) alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, asm_sysvec_hyperv_callback); /* Setup the IDT for reenlightenment notifications */ - if (ms_hyperv.features & HV_X64_ACCESS_REENLIGHTENMENT) { + if (ms_hyperv.features & HV_ACCESS_REENLIGHTENMENT) { alloc_intr_gate(HYPERV_REENLIGHTENMENT_VECTOR, asm_sysvec_hyperv_reenlightenment); } diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index 6a9df71c1b9e..e5f4ee8f4c3b 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -168,6 +168,7 @@ struct rdt_resource rdt_resources_all[] = { .name = "MB", .domains = domain_init(RDT_RESOURCE_MBA), .cache_level = 3, + .parse_ctrlval = parse_bw, .format_str = "%d=%*u", .fflags = RFTYPE_RES_MB, }, @@ -254,22 +255,30 @@ static bool __get_mem_config_intel(struct rdt_resource *r) { union cpuid_0x10_3_eax eax; union cpuid_0x10_x_edx edx; - u32 ebx, ecx; + u32 ebx, ecx, max_delay; cpuid_count(0x00000010, 3, &eax.full, &ebx, &ecx, &edx.full); r->num_closid = edx.split.cos_max + 1; - r->membw.max_delay = eax.split.max_delay + 1; + max_delay = eax.split.max_delay + 1; r->default_ctrl = MAX_MBA_BW; + r->membw.arch_needs_linear = true; if (ecx & MBA_IS_LINEAR) { r->membw.delay_linear = true; - r->membw.min_bw = MAX_MBA_BW - r->membw.max_delay; - r->membw.bw_gran = MAX_MBA_BW - r->membw.max_delay; + r->membw.min_bw = MAX_MBA_BW - max_delay; + r->membw.bw_gran = MAX_MBA_BW - max_delay; } else { if (!rdt_get_mb_table(r)) return false; + r->membw.arch_needs_linear = false; } r->data_width = 3; + if (boot_cpu_has(X86_FEATURE_PER_THREAD_MBA)) + r->membw.throttle_mode = THREAD_THROTTLE_PER_THREAD; + else + r->membw.throttle_mode = THREAD_THROTTLE_MAX; + thread_throttle_mode_init(); + r->alloc_capable = true; r->alloc_enabled = true; @@ -288,7 +297,13 @@ static bool __rdt_get_mem_config_amd(struct rdt_resource *r) /* AMD does not use delay */ r->membw.delay_linear = false; + r->membw.arch_needs_linear = false; + /* + * AMD does not use memory delay throttle model to control + * the allocation like Intel does. + */ + r->membw.throttle_mode = THREAD_THROTTLE_UNDEFINED; r->membw.min_bw = 0; r->membw.bw_gran = 1; /* Max value is 2048, Data width should be 4 in decimal */ @@ -346,19 +361,6 @@ static void rdt_get_cdp_l2_config(void) rdt_get_cdp_config(RDT_RESOURCE_L2, RDT_RESOURCE_L2CODE); } -static int get_cache_id(int cpu, int level) -{ - struct cpu_cacheinfo *ci = get_cpu_cacheinfo(cpu); - int i; - - for (i = 0; i < ci->num_leaves; i++) { - if (ci->info_list[i].level == level) - return ci->info_list[i].id; - } - - return -1; -} - static void mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r) { @@ -556,13 +558,13 @@ static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d) */ static void domain_add_cpu(int cpu, struct rdt_resource *r) { - int id = get_cache_id(cpu, r->cache_level); + int id = get_cpu_cacheinfo_id(cpu, r->cache_level); struct list_head *add_pos = NULL; struct rdt_domain *d; d = rdt_find_domain(r, id, &add_pos); if (IS_ERR(d)) { - pr_warn("Could't find cache id for cpu %d\n", cpu); + pr_warn("Couldn't find cache id for CPU %d\n", cpu); return; } @@ -602,12 +604,12 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r) static void domain_remove_cpu(int cpu, struct rdt_resource *r) { - int id = get_cache_id(cpu, r->cache_level); + int id = get_cpu_cacheinfo_id(cpu, r->cache_level); struct rdt_domain *d; d = rdt_find_domain(r, id, NULL); if (IS_ERR_OR_NULL(d)) { - pr_warn("Could't find cache id for cpu %d\n", cpu); + pr_warn("Couldn't find cache id for CPU %d\n", cpu); return; } @@ -918,12 +920,12 @@ static __init void rdt_init_res_defs_intel(void) r->rid == RDT_RESOURCE_L3CODE || r->rid == RDT_RESOURCE_L2 || r->rid == RDT_RESOURCE_L2DATA || - r->rid == RDT_RESOURCE_L2CODE) - r->cbm_validate = cbm_validate_intel; - else if (r->rid == RDT_RESOURCE_MBA) { + r->rid == RDT_RESOURCE_L2CODE) { + r->cache.arch_has_sparse_bitmaps = false; + r->cache.arch_has_empty_bitmaps = false; + } else if (r->rid == RDT_RESOURCE_MBA) { r->msr_base = MSR_IA32_MBA_THRTL_BASE; r->msr_update = mba_wrmsr_intel; - r->parse_ctrlval = parse_bw_intel; } } } @@ -938,12 +940,12 @@ static __init void rdt_init_res_defs_amd(void) r->rid == RDT_RESOURCE_L3CODE || r->rid == RDT_RESOURCE_L2 || r->rid == RDT_RESOURCE_L2DATA || - r->rid == RDT_RESOURCE_L2CODE) - r->cbm_validate = cbm_validate_amd; - else if (r->rid == RDT_RESOURCE_MBA) { + r->rid == RDT_RESOURCE_L2CODE) { + r->cache.arch_has_sparse_bitmaps = true; + r->cache.arch_has_empty_bitmaps = true; + } else if (r->rid == RDT_RESOURCE_MBA) { r->msr_base = MSR_IA32_MBA_BW_BASE; r->msr_update = mba_wrmsr_amd; - r->parse_ctrlval = parse_bw_amd; } } } diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c index 934c8fb8a64a..c877642e8a14 100644 --- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c +++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c @@ -23,53 +23,6 @@ /* * Check whether MBA bandwidth percentage value is correct. The value is - * checked against the minimum and maximum bandwidth values specified by - * the hardware. The allocated bandwidth percentage is rounded to the next - * control step available on the hardware. - */ -static bool bw_validate_amd(char *buf, unsigned long *data, - struct rdt_resource *r) -{ - unsigned long bw; - int ret; - - ret = kstrtoul(buf, 10, &bw); - if (ret) { - rdt_last_cmd_printf("Non-decimal digit in MB value %s\n", buf); - return false; - } - - if (bw < r->membw.min_bw || bw > r->default_ctrl) { - rdt_last_cmd_printf("MB value %ld out of range [%d,%d]\n", bw, - r->membw.min_bw, r->default_ctrl); - return false; - } - - *data = roundup(bw, (unsigned long)r->membw.bw_gran); - return true; -} - -int parse_bw_amd(struct rdt_parse_data *data, struct rdt_resource *r, - struct rdt_domain *d) -{ - unsigned long bw_val; - - if (d->have_new_ctrl) { - rdt_last_cmd_printf("Duplicate domain %d\n", d->id); - return -EINVAL; - } - - if (!bw_validate_amd(data->buf, &bw_val, r)) - return -EINVAL; - - d->new_ctrl = bw_val; - d->have_new_ctrl = true; - - return 0; -} - -/* - * Check whether MBA bandwidth percentage value is correct. The value is * checked against the minimum and max bandwidth values specified by the * hardware. The allocated bandwidth percentage is rounded to the next * control step available on the hardware. @@ -82,7 +35,7 @@ static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r) /* * Only linear delay values is supported for current Intel SKUs. */ - if (!r->membw.delay_linear) { + if (!r->membw.delay_linear && r->membw.arch_needs_linear) { rdt_last_cmd_puts("No support for non-linear MB domains\n"); return false; } @@ -104,8 +57,8 @@ static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r) return true; } -int parse_bw_intel(struct rdt_parse_data *data, struct rdt_resource *r, - struct rdt_domain *d) +int parse_bw(struct rdt_parse_data *data, struct rdt_resource *r, + struct rdt_domain *d) { unsigned long bw_val; @@ -123,12 +76,14 @@ int parse_bw_intel(struct rdt_parse_data *data, struct rdt_resource *r, } /* - * Check whether a cache bit mask is valid. The SDM says: + * Check whether a cache bit mask is valid. + * For Intel the SDM says: * Please note that all (and only) contiguous '1' combinations * are allowed (e.g. FFFFH, 0FF0H, 003CH, etc.). * Additionally Haswell requires at least two bits set. + * AMD allows non-contiguous bitmasks. */ -bool cbm_validate_intel(char *buf, u32 *data, struct rdt_resource *r) +static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r) { unsigned long first_bit, zero_bit, val; unsigned int cbm_len = r->cache.cbm_len; @@ -140,7 +95,8 @@ bool cbm_validate_intel(char *buf, u32 *data, struct rdt_resource *r) return false; } - if (val == 0 || val > r->default_ctrl) { + if ((!r->cache.arch_has_empty_bitmaps && val == 0) || + val > r->default_ctrl) { rdt_last_cmd_puts("Mask out of range\n"); return false; } @@ -148,7 +104,9 @@ bool cbm_validate_intel(char *buf, u32 *data, struct rdt_resource *r) first_bit = find_first_bit(&val, cbm_len); zero_bit = find_next_zero_bit(&val, cbm_len, first_bit); - if (find_next_bit(&val, cbm_len, zero_bit) < cbm_len) { + /* Are non-contiguous bitmaps allowed? */ + if (!r->cache.arch_has_sparse_bitmaps && + (find_next_bit(&val, cbm_len, zero_bit) < cbm_len)) { rdt_last_cmd_printf("The mask %lx has non-consecutive 1-bits\n", val); return false; } @@ -164,30 +122,6 @@ bool cbm_validate_intel(char *buf, u32 *data, struct rdt_resource *r) } /* - * Check whether a cache bit mask is valid. AMD allows non-contiguous - * bitmasks - */ -bool cbm_validate_amd(char *buf, u32 *data, struct rdt_resource *r) -{ - unsigned long val; - int ret; - - ret = kstrtoul(buf, 16, &val); - if (ret) { - rdt_last_cmd_printf("Non-hex character in the mask %s\n", buf); - return false; - } - - if (val > r->default_ctrl) { - rdt_last_cmd_puts("Mask out of range\n"); - return false; - } - - *data = val; - return true; -} - -/* * Read one cache bit mask (hex). Check that it is valid for the current * resource type. */ @@ -212,7 +146,7 @@ int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r, return -EINVAL; } - if (!r->cbm_validate(data->buf, &cbm_val, r)) + if (!cbm_validate(data->buf, &cbm_val, r)) return -EINVAL; if ((rdtgrp->mode == RDT_MODE_EXCLUSIVE || diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 5ffa32256b3b..80fa997fae60 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -283,7 +283,6 @@ struct rftype { * struct mbm_state - status for each MBM counter in each domain * @chunks: Total data moved (multiply by rdt_group.mon_scale to get bytes) * @prev_msr Value of IA32_QM_CTR for this RMID last time we read it - * @chunks_bw Total local data moved. Used for bandwidth calculation * @prev_bw_msr:Value of previous IA32_QM_CTR for bandwidth counting * @prev_bw The most recent bandwidth in MBps * @delta_bw Difference between the current and previous bandwidth @@ -292,7 +291,6 @@ struct rftype { struct mbm_state { u64 chunks; u64 prev_msr; - u64 chunks_bw; u64 prev_bw_msr; u32 prev_bw; u32 delta_bw; @@ -360,6 +358,8 @@ struct msr_param { * in a cache bit mask * @shareable_bits: Bitmask of shareable resource with other * executing entities + * @arch_has_sparse_bitmaps: True if a bitmap like f00f is valid. + * @arch_has_empty_bitmaps: True if the '0' bitmap is valid. */ struct rdt_cache { unsigned int cbm_len; @@ -367,25 +367,43 @@ struct rdt_cache { unsigned int cbm_idx_mult; unsigned int cbm_idx_offset; unsigned int shareable_bits; + bool arch_has_sparse_bitmaps; + bool arch_has_empty_bitmaps; +}; + +/** + * enum membw_throttle_mode - System's memory bandwidth throttling mode + * @THREAD_THROTTLE_UNDEFINED: Not relevant to the system + * @THREAD_THROTTLE_MAX: Memory bandwidth is throttled at the core + * always using smallest bandwidth percentage + * assigned to threads, aka "max throttling" + * @THREAD_THROTTLE_PER_THREAD: Memory bandwidth is throttled at the thread + */ +enum membw_throttle_mode { + THREAD_THROTTLE_UNDEFINED = 0, + THREAD_THROTTLE_MAX, + THREAD_THROTTLE_PER_THREAD, }; /** * struct rdt_membw - Memory bandwidth allocation related data - * @max_delay: Max throttle delay. Delay is the hardware - * representation for memory bandwidth. * @min_bw: Minimum memory bandwidth percentage user can request * @bw_gran: Granularity at which the memory bandwidth is allocated * @delay_linear: True if memory B/W delay is in linear scale + * @arch_needs_linear: True if we can't configure non-linear resources + * @throttle_mode: Bandwidth throttling mode when threads request + * different memory bandwidths * @mba_sc: True if MBA software controller(mba_sc) is enabled * @mb_map: Mapping of memory B/W percentage to memory B/W delay */ struct rdt_membw { - u32 max_delay; - u32 min_bw; - u32 bw_gran; - u32 delay_linear; - bool mba_sc; - u32 *mb_map; + u32 min_bw; + u32 bw_gran; + u32 delay_linear; + bool arch_needs_linear; + enum membw_throttle_mode throttle_mode; + bool mba_sc; + u32 *mb_map; }; static inline bool is_llc_occupancy_enabled(void) @@ -437,7 +455,6 @@ struct rdt_parse_data { * @cache: Cache allocation related data * @format_str: Per resource format string to show domain value * @parse_ctrlval: Per resource function pointer to parse control values - * @cbm_validate Cache bitmask validate function * @evt_list: List of monitoring events * @num_rmid: Number of RMIDs available * @mon_scale: cqm counter * mon_scale = occupancy in bytes @@ -464,7 +481,6 @@ struct rdt_resource { int (*parse_ctrlval)(struct rdt_parse_data *data, struct rdt_resource *r, struct rdt_domain *d); - bool (*cbm_validate)(char *buf, u32 *data, struct rdt_resource *r); struct list_head evt_list; int num_rmid; unsigned int mon_scale; @@ -474,10 +490,8 @@ struct rdt_resource { int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r, struct rdt_domain *d); -int parse_bw_intel(struct rdt_parse_data *data, struct rdt_resource *r, - struct rdt_domain *d); -int parse_bw_amd(struct rdt_parse_data *data, struct rdt_resource *r, - struct rdt_domain *d); +int parse_bw(struct rdt_parse_data *data, struct rdt_resource *r, + struct rdt_domain *d); extern struct mutex rdtgroup_mutex; @@ -609,8 +623,7 @@ void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms); void cqm_handle_limbo(struct work_struct *work); bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d); void __check_limbo(struct rdt_domain *d, bool force_free); -bool cbm_validate_intel(char *buf, u32 *data, struct rdt_resource *r); -bool cbm_validate_amd(char *buf, u32 *data, struct rdt_resource *r); void rdt_domain_reconfigure_cdp(struct rdt_resource *r); +void __init thread_throttle_mode_init(void); #endif /* _ASM_X86_RESCTRL_INTERNAL_H */ diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index 837d7d012b7b..54dffe574e67 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -279,8 +279,7 @@ static void mbm_bw_count(u32 rmid, struct rmid_read *rr) return; chunks = mbm_overflow_count(m->prev_bw_msr, tval, rr->r->mbm_width); - m->chunks_bw += chunks; - m->chunks = m->chunks_bw; + m->chunks += chunks; cur_bw = (chunks * r->mon_scale) >> 20; if (m->delta_comp) @@ -478,19 +477,13 @@ void cqm_handle_limbo(struct work_struct *work) mutex_lock(&rdtgroup_mutex); r = &rdt_resources_all[RDT_RESOURCE_L3]; - d = get_domain_from_cpu(cpu, r); - - if (!d) { - pr_warn_once("Failure to get domain for limbo worker\n"); - goto out_unlock; - } + d = container_of(work, struct rdt_domain, cqm_limbo.work); __check_limbo(d, false); if (has_busy_rmid(r, d)) schedule_delayed_work_on(cpu, &d->cqm_limbo, delay); -out_unlock: mutex_unlock(&rdtgroup_mutex); } @@ -520,10 +513,7 @@ void mbm_handle_overflow(struct work_struct *work) goto out_unlock; r = &rdt_resources_all[RDT_RESOURCE_L3]; - - d = get_domain_from_cpu(cpu, r); - if (!d) - goto out_unlock; + d = container_of(work, struct rdt_domain, mbm_over.work); list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) { mbm_update(r, d, prgrp->mon.rmid); diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 3f844f14fc0a..b494187632b2 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -592,6 +592,18 @@ static int __rdtgroup_move_task(struct task_struct *tsk, return ret; } +static bool is_closid_match(struct task_struct *t, struct rdtgroup *r) +{ + return (rdt_alloc_capable && + (r->type == RDTCTRL_GROUP) && (t->closid == r->closid)); +} + +static bool is_rmid_match(struct task_struct *t, struct rdtgroup *r) +{ + return (rdt_mon_capable && + (r->type == RDTMON_GROUP) && (t->rmid == r->mon.rmid)); +} + /** * rdtgroup_tasks_assigned - Test if tasks have been assigned to resource group * @r: Resource group @@ -607,8 +619,7 @@ int rdtgroup_tasks_assigned(struct rdtgroup *r) rcu_read_lock(); for_each_process_thread(p, t) { - if ((r->type == RDTCTRL_GROUP && t->closid == r->closid) || - (r->type == RDTMON_GROUP && t->rmid == r->mon.rmid)) { + if (is_closid_match(t, r) || is_rmid_match(t, r)) { ret = 1; break; } @@ -706,8 +717,7 @@ static void show_rdt_tasks(struct rdtgroup *r, struct seq_file *s) rcu_read_lock(); for_each_process_thread(p, t) { - if ((r->type == RDTCTRL_GROUP && t->closid == r->closid) || - (r->type == RDTMON_GROUP && t->rmid == r->mon.rmid)) + if (is_closid_match(t, r) || is_rmid_match(t, r)) seq_printf(s, "%d\n", t->pid); } rcu_read_unlock(); @@ -1017,6 +1027,19 @@ static int max_threshold_occ_show(struct kernfs_open_file *of, return 0; } +static int rdt_thread_throttle_mode_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct rdt_resource *r = of->kn->parent->priv; + + if (r->membw.throttle_mode == THREAD_THROTTLE_PER_THREAD) + seq_puts(seq, "per-thread\n"); + else + seq_puts(seq, "max\n"); + + return 0; +} + static ssize_t max_threshold_occ_write(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { @@ -1513,6 +1536,17 @@ static struct rftype res_common_files[] = { .seq_show = rdt_delay_linear_show, .fflags = RF_CTRL_INFO | RFTYPE_RES_MB, }, + /* + * Platform specific which (if any) capabilities are provided by + * thread_throttle_mode. Defer "fflags" initialization to platform + * discovery. + */ + { + .name = "thread_throttle_mode", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_thread_throttle_mode_show, + }, { .name = "max_threshold_occupancy", .mode = 0644, @@ -1583,7 +1617,7 @@ static int rdtgroup_add_files(struct kernfs_node *kn, unsigned long fflags) lockdep_assert_held(&rdtgroup_mutex); for (rft = rfts; rft < rfts + len; rft++) { - if ((fflags & rft->fflags) == rft->fflags) { + if (rft->fflags && ((fflags & rft->fflags) == rft->fflags)) { ret = rdtgroup_add_file(kn, rft); if (ret) goto error; @@ -1600,6 +1634,33 @@ error: return ret; } +static struct rftype *rdtgroup_get_rftype_by_name(const char *name) +{ + struct rftype *rfts, *rft; + int len; + + rfts = res_common_files; + len = ARRAY_SIZE(res_common_files); + + for (rft = rfts; rft < rfts + len; rft++) { + if (!strcmp(rft->name, name)) + return rft; + } + + return NULL; +} + +void __init thread_throttle_mode_init(void) +{ + struct rftype *rft; + + rft = rdtgroup_get_rftype_by_name("thread_throttle_mode"); + if (!rft) + return; + + rft->fflags = RF_CTRL_INFO | RFTYPE_RES_MB; +} + /** * rdtgroup_kn_mode_restrict - Restrict user access to named resctrl file * @r: The resource group with which the file is associated. @@ -2245,18 +2306,6 @@ static int reset_all_ctrls(struct rdt_resource *r) return 0; } -static bool is_closid_match(struct task_struct *t, struct rdtgroup *r) -{ - return (rdt_alloc_capable && - (r->type == RDTCTRL_GROUP) && (t->closid == r->closid)); -} - -static bool is_rmid_match(struct task_struct *t, struct rdtgroup *r) -{ - return (rdt_mon_capable && - (r->type == RDTMON_GROUP) && (t->rmid == r->mon.rmid)); -} - /* * Move tasks from one to the other group. If @from is NULL, then all tasks * in the systems are moved unconditionally (used for teardown). @@ -3196,7 +3245,7 @@ int __init rdtgroup_init(void) * It may also be ok since that would enable debugging of RDT before * resctrl is mounted. * The reason why the debugfs directory is created here and not in - * rdt_mount() is because rdt_mount() takes rdtgroup_mutex and + * rdt_get_tree() is because rdt_get_tree() takes rdtgroup_mutex and * during the debugfs directory creation also &sb->s_type->i_mutex_key * (the lockdep class of inode->i_rwsem). Other filesystem * interactions (eg. SyS_getdents) have the lock ordering: diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c index 62b137c3c97a..866c9a9bcdee 100644 --- a/arch/x86/kernel/cpu/scattered.c +++ b/arch/x86/kernel/cpu/scattered.c @@ -35,12 +35,15 @@ static const struct cpuid_bit cpuid_bits[] = { { X86_FEATURE_CDP_L3, CPUID_ECX, 2, 0x00000010, 1 }, { X86_FEATURE_CDP_L2, CPUID_ECX, 2, 0x00000010, 2 }, { X86_FEATURE_MBA, CPUID_EBX, 3, 0x00000010, 0 }, + { X86_FEATURE_PER_THREAD_MBA, CPUID_ECX, 0, 0x00000010, 3 }, { X86_FEATURE_HW_PSTATE, CPUID_EDX, 7, 0x80000007, 0 }, { X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 }, { X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 }, { X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 }, { X86_FEATURE_SME, CPUID_EAX, 0, 0x8000001f, 0 }, { X86_FEATURE_SEV, CPUID_EAX, 1, 0x8000001f, 0 }, + { X86_FEATURE_SEV_ES, CPUID_EAX, 3, 0x8000001f, 0 }, + { X86_FEATURE_SME_COHERENT, CPUID_EAX, 10, 0x8000001f, 0 }, { 0, 0, 0, 0, 0 } }; diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c index 9b6fafa69be9..924571fe5864 100644 --- a/arch/x86/kernel/cpu/vmware.c +++ b/arch/x86/kernel/cpu/vmware.c @@ -33,6 +33,7 @@ #include <asm/timer.h> #include <asm/apic.h> #include <asm/vmware.h> +#include <asm/svm.h> #undef pr_fmt #define pr_fmt(fmt) "vmware: " fmt @@ -476,10 +477,49 @@ static bool __init vmware_legacy_x2apic_available(void) (eax & (1 << VMWARE_CMD_LEGACY_X2APIC)) != 0; } +#ifdef CONFIG_AMD_MEM_ENCRYPT +static void vmware_sev_es_hcall_prepare(struct ghcb *ghcb, + struct pt_regs *regs) +{ + /* Copy VMWARE specific Hypercall parameters to the GHCB */ + ghcb_set_rip(ghcb, regs->ip); + ghcb_set_rbx(ghcb, regs->bx); + ghcb_set_rcx(ghcb, regs->cx); + ghcb_set_rdx(ghcb, regs->dx); + ghcb_set_rsi(ghcb, regs->si); + ghcb_set_rdi(ghcb, regs->di); + ghcb_set_rbp(ghcb, regs->bp); +} + +static bool vmware_sev_es_hcall_finish(struct ghcb *ghcb, struct pt_regs *regs) +{ + if (!(ghcb_rbx_is_valid(ghcb) && + ghcb_rcx_is_valid(ghcb) && + ghcb_rdx_is_valid(ghcb) && + ghcb_rsi_is_valid(ghcb) && + ghcb_rdi_is_valid(ghcb) && + ghcb_rbp_is_valid(ghcb))) + return false; + + regs->bx = ghcb->save.rbx; + regs->cx = ghcb->save.rcx; + regs->dx = ghcb->save.rdx; + regs->si = ghcb->save.rsi; + regs->di = ghcb->save.rdi; + regs->bp = ghcb->save.rbp; + + return true; +} +#endif + const __initconst struct hypervisor_x86 x86_hyper_vmware = { - .name = "VMware", - .detect = vmware_platform, - .type = X86_HYPER_VMWARE, - .init.init_platform = vmware_platform_setup, - .init.x2apic_available = vmware_legacy_x2apic_available, + .name = "VMware", + .detect = vmware_platform, + .type = X86_HYPER_VMWARE, + .init.init_platform = vmware_platform_setup, + .init.x2apic_available = vmware_legacy_x2apic_available, +#ifdef CONFIG_AMD_MEM_ENCRYPT + .runtime.sev_es_hcall_prepare = vmware_sev_es_hcall_prepare, + .runtime.sev_es_hcall_finish = vmware_sev_es_hcall_finish, +#endif }; diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c index a0e8fc7d85f1..ddffd80f5c52 100644 --- a/arch/x86/kernel/devicetree.c +++ b/arch/x86/kernel/devicetree.c @@ -229,8 +229,8 @@ static int dt_irqdomain_alloc(struct irq_domain *domain, unsigned int virq, it = &of_ioapic_type[type_index]; ioapic_set_alloc_attr(&tmp, NUMA_NO_NODE, it->trigger, it->polarity); - tmp.ioapic_id = mpc_ioapic_id(mp_irqdomain_ioapic_idx(domain)); - tmp.ioapic_pin = fwspec->param[0]; + tmp.devid = mpc_ioapic_id(mp_irqdomain_ioapic_idx(domain)); + tmp.ioapic.pin = fwspec->param[0]; return mp_irqdomain_alloc(domain, virq, nr_irqs, &tmp); } diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index 48ce44576947..25c06b67e7e0 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -29,8 +29,8 @@ static int die_counter; static struct pt_regs exec_summary_regs; -bool in_task_stack(unsigned long *stack, struct task_struct *task, - struct stack_info *info) +bool noinstr in_task_stack(unsigned long *stack, struct task_struct *task, + struct stack_info *info) { unsigned long *begin = task_stack_page(task); unsigned long *end = task_stack_page(task) + THREAD_SIZE; @@ -46,7 +46,8 @@ bool in_task_stack(unsigned long *stack, struct task_struct *task, return true; } -bool in_entry_stack(unsigned long *stack, struct stack_info *info) +/* Called from get_stack_info_noinstr - so must be noinstr too */ +bool noinstr in_entry_stack(unsigned long *stack, struct stack_info *info) { struct entry_stack *ss = cpu_entry_stack(smp_processor_id()); @@ -115,7 +116,8 @@ void show_opcodes(struct pt_regs *regs, const char *loglvl) unsigned long prologue = regs->ip - PROLOGUE_SIZE; if (copy_code(regs, opcodes, prologue, sizeof(opcodes))) { - printk("%sCode: Bad RIP value.\n", loglvl); + printk("%sCode: Unable to access opcode bytes at RIP 0x%lx.\n", + loglvl, prologue); } else { printk("%sCode: %" __stringify(PROLOGUE_SIZE) "ph <%02x> %" __stringify(EPILOGUE_SIZE) "ph\n", loglvl, opcodes, diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c index 4a94d38cd141..1dd851397bd9 100644 --- a/arch/x86/kernel/dumpstack_64.c +++ b/arch/x86/kernel/dumpstack_64.c @@ -24,11 +24,13 @@ static const char * const exception_stack_names[] = { [ ESTACK_NMI ] = "NMI", [ ESTACK_DB ] = "#DB", [ ESTACK_MCE ] = "#MC", + [ ESTACK_VC ] = "#VC", + [ ESTACK_VC2 ] = "#VC2", }; const char *stack_type_name(enum stack_type type) { - BUILD_BUG_ON(N_EXCEPTION_STACKS != 4); + BUILD_BUG_ON(N_EXCEPTION_STACKS != 6); if (type == STACK_TYPE_IRQ) return "IRQ"; @@ -79,16 +81,18 @@ struct estack_pages estack_pages[CEA_ESTACK_PAGES] ____cacheline_aligned = { EPAGERANGE(NMI), EPAGERANGE(DB), EPAGERANGE(MCE), + EPAGERANGE(VC), + EPAGERANGE(VC2), }; -static bool in_exception_stack(unsigned long *stack, struct stack_info *info) +static __always_inline bool in_exception_stack(unsigned long *stack, struct stack_info *info) { unsigned long begin, end, stk = (unsigned long)stack; const struct estack_pages *ep; struct pt_regs *regs; unsigned int k; - BUILD_BUG_ON(N_EXCEPTION_STACKS != 4); + BUILD_BUG_ON(N_EXCEPTION_STACKS != 6); begin = (unsigned long)__this_cpu_read(cea_exception_stacks); /* @@ -122,7 +126,7 @@ static bool in_exception_stack(unsigned long *stack, struct stack_info *info) return true; } -static bool in_irq_stack(unsigned long *stack, struct stack_info *info) +static __always_inline bool in_irq_stack(unsigned long *stack, struct stack_info *info) { unsigned long *end = (unsigned long *)this_cpu_read(hardirq_stack_ptr); unsigned long *begin = end - (IRQ_STACK_SIZE / sizeof(long)); @@ -147,32 +151,38 @@ static bool in_irq_stack(unsigned long *stack, struct stack_info *info) return true; } -int get_stack_info(unsigned long *stack, struct task_struct *task, - struct stack_info *info, unsigned long *visit_mask) +bool noinstr get_stack_info_noinstr(unsigned long *stack, struct task_struct *task, + struct stack_info *info) { - if (!stack) - goto unknown; - - task = task ? : current; - if (in_task_stack(stack, task, info)) - goto recursion_check; + return true; if (task != current) - goto unknown; + return false; if (in_exception_stack(stack, info)) - goto recursion_check; + return true; if (in_irq_stack(stack, info)) - goto recursion_check; + return true; if (in_entry_stack(stack, info)) - goto recursion_check; + return true; + + return false; +} + +int get_stack_info(unsigned long *stack, struct task_struct *task, + struct stack_info *info, unsigned long *visit_mask) +{ + task = task ? : current; - goto unknown; + if (!stack) + goto unknown; + + if (!get_stack_info_noinstr(stack, task, info)) + goto unknown; -recursion_check: /* * Make sure we don't iterate through any given stack more than once. * If it comes up a second time then there's something wrong going on: diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 983cd53ed4c9..22aad412f965 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c @@ -305,6 +305,20 @@ static int __init cpcompare(const void *a, const void *b) return (ap->addr != ap->entry->addr) - (bp->addr != bp->entry->addr); } +static bool e820_nomerge(enum e820_type type) +{ + /* + * These types may indicate distinct platform ranges aligned to + * numa node, protection domain, performance domain, or other + * boundaries. Do not merge them. + */ + if (type == E820_TYPE_PRAM) + return true; + if (type == E820_TYPE_SOFT_RESERVED) + return true; + return false; +} + int __init e820__update_table(struct e820_table *table) { struct e820_entry *entries = table->entries; @@ -380,7 +394,7 @@ int __init e820__update_table(struct e820_table *table) } /* Continue building up new map based on this information: */ - if (current_type != last_type || current_type == E820_TYPE_PRAM) { + if (current_type != last_type || e820_nomerge(current_type)) { if (last_type != 0) { new_entries[new_nr_entries].size = change_point[chg_idx]->addr - last_addr; /* Move forward only if the new size was non-zero: */ diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c index 61ddc3a5e5c2..701f196d7c68 100644 --- a/arch/x86/kernel/fpu/init.c +++ b/arch/x86/kernel/fpu/init.c @@ -5,7 +5,6 @@ #include <asm/fpu/internal.h> #include <asm/tlbflush.h> #include <asm/setup.h> -#include <asm/cmdline.h> #include <linux/sched.h> #include <linux/sched/task.h> @@ -238,51 +237,11 @@ static void __init fpu__init_system_ctx_switch(void) } /* - * We parse fpu parameters early because fpu__init_system() is executed - * before parse_early_param(). - */ -static void __init fpu__init_parse_early_param(void) -{ - char arg[32]; - char *argptr = arg; - int bit; - -#ifdef CONFIG_X86_32 - if (cmdline_find_option_bool(boot_command_line, "no387")) -#ifdef CONFIG_MATH_EMULATION - setup_clear_cpu_cap(X86_FEATURE_FPU); -#else - pr_err("Option 'no387' required CONFIG_MATH_EMULATION enabled.\n"); -#endif - - if (cmdline_find_option_bool(boot_command_line, "nofxsr")) - setup_clear_cpu_cap(X86_FEATURE_FXSR); -#endif - - if (cmdline_find_option_bool(boot_command_line, "noxsave")) - setup_clear_cpu_cap(X86_FEATURE_XSAVE); - - if (cmdline_find_option_bool(boot_command_line, "noxsaveopt")) - setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); - - if (cmdline_find_option_bool(boot_command_line, "noxsaves")) - setup_clear_cpu_cap(X86_FEATURE_XSAVES); - - if (cmdline_find_option(boot_command_line, "clearcpuid", arg, - sizeof(arg)) && - get_option(&argptr, &bit) && - bit >= 0 && - bit < NCAPINTS * 32) - setup_clear_cpu_cap(bit); -} - -/* * Called on the boot CPU once per system bootup, to set up the initial * FPU state that is later cloned into all processes: */ void __init fpu__init_system(struct cpuinfo_x86 *c) { - fpu__init_parse_early_param(); fpu__init_system_early_generic(c); /* diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index 038e19c0019e..5d8047441a0a 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -37,6 +37,7 @@ static const char *xfeature_names[] = "AVX-512 ZMM_Hi256" , "Processor Trace (unused)" , "Protection Keys User registers", + "PASID state", "unknown xstate feature" , }; @@ -51,6 +52,7 @@ static short xsave_cpuid_features[] __initdata = { X86_FEATURE_AVX512F, X86_FEATURE_INTEL_PT, X86_FEATURE_PKU, + X86_FEATURE_ENQCMD, }; /* @@ -318,6 +320,7 @@ static void __init print_xstate_features(void) print_xstate_feature(XFEATURE_MASK_ZMM_Hi256); print_xstate_feature(XFEATURE_MASK_Hi16_ZMM); print_xstate_feature(XFEATURE_MASK_PKRU); + print_xstate_feature(XFEATURE_MASK_PASID); } /* @@ -592,6 +595,7 @@ static void check_xstate_against_struct(int nr) XCHECK_SZ(sz, nr, XFEATURE_ZMM_Hi256, struct avx_512_zmm_uppers_state); XCHECK_SZ(sz, nr, XFEATURE_Hi16_ZMM, struct avx_512_hi16_state); XCHECK_SZ(sz, nr, XFEATURE_PKRU, struct pkru_state); + XCHECK_SZ(sz, nr, XFEATURE_PASID, struct ia32_pasid_state); /* * Make *SURE* to add any feature numbers in below if @@ -601,7 +605,7 @@ static void check_xstate_against_struct(int nr) if ((nr < XFEATURE_YMM) || (nr >= XFEATURE_MAX) || (nr == XFEATURE_PT_UNIMPLEMENTED_SO_FAR) || - ((nr >= XFEATURE_RSRVD_COMP_10) && (nr <= XFEATURE_LBR))) { + ((nr >= XFEATURE_RSRVD_COMP_11) && (nr <= XFEATURE_LBR))) { WARN_ONCE(1, "no structure for xstate: %d\n", nr); XSTATE_WARN_ON(1); } @@ -1398,3 +1402,60 @@ int proc_pid_arch_status(struct seq_file *m, struct pid_namespace *ns, return 0; } #endif /* CONFIG_PROC_PID_ARCH_STATUS */ + +#ifdef CONFIG_IOMMU_SUPPORT +void update_pasid(void) +{ + u64 pasid_state; + u32 pasid; + + if (!cpu_feature_enabled(X86_FEATURE_ENQCMD)) + return; + + if (!current->mm) + return; + + pasid = READ_ONCE(current->mm->pasid); + /* Set the valid bit in the PASID MSR/state only for valid pasid. */ + pasid_state = pasid == PASID_DISABLED ? + pasid : pasid | MSR_IA32_PASID_VALID; + + /* + * No need to hold fregs_lock() since the task's fpstate won't + * be changed by others (e.g. ptrace) while the task is being + * switched to or is in IPI. + */ + if (!test_thread_flag(TIF_NEED_FPU_LOAD)) { + /* The MSR is active and can be directly updated. */ + wrmsrl(MSR_IA32_PASID, pasid_state); + } else { + struct fpu *fpu = ¤t->thread.fpu; + struct ia32_pasid_state *ppasid_state; + struct xregs_state *xsave; + + /* + * The CPU's xstate registers are not currently active. Just + * update the PASID state in the memory buffer here. The + * PASID MSR will be loaded when returning to user mode. + */ + xsave = &fpu->state.xsave; + xsave->header.xfeatures |= XFEATURE_MASK_PASID; + ppasid_state = get_xsave_addr(xsave, XFEATURE_PASID); + /* + * Since XFEATURE_MASK_PASID is set in xfeatures, ppasid_state + * won't be NULL and no need to check its value. + * + * Only update the task's PASID state when it's different + * from the mm's pasid. + */ + if (ppasid_state->pasid != pasid_state) { + /* + * Invalid fpregs so that state restoring will pick up + * the PASID state. + */ + __fpu_invalidate_fpregs_state(fpu); + ppasid_state->pasid = pasid_state; + } + } +} +#endif /* CONFIG_IOMMU_SUPPORT */ diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index cbb71c1b574f..4199f25c0063 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -36,6 +36,11 @@ #include <asm/microcode.h> #include <asm/kasan.h> #include <asm/fixmap.h> +#include <asm/realmode.h> +#include <asm/desc.h> +#include <asm/extable.h> +#include <asm/trapnr.h> +#include <asm/sev-es.h> /* * Manage page tables very early on. @@ -61,6 +66,24 @@ unsigned long vmemmap_base __ro_after_init = __VMEMMAP_BASE_L4; EXPORT_SYMBOL(vmemmap_base); #endif +/* + * GDT used on the boot CPU before switching to virtual addresses. + */ +static struct desc_struct startup_gdt[GDT_ENTRIES] = { + [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff), + [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff), + [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff), +}; + +/* + * Address needs to be set at runtime because it references the startup_gdt + * while the kernel still uses a direct mapping. + */ +static struct desc_ptr startup_gdt_descr = { + .size = sizeof(startup_gdt), + .address = 0, +}; + #define __head __section(.head.text) static void __head *fixup_pointer(void *ptr, unsigned long physaddr) @@ -297,7 +320,7 @@ static void __init reset_early_page_tables(void) } /* Create a new PMD entry */ -int __init __early_make_pgtable(unsigned long address, pmdval_t pmd) +bool __init __early_make_pgtable(unsigned long address, pmdval_t pmd) { unsigned long physaddr = address - __PAGE_OFFSET; pgdval_t pgd, *pgd_p; @@ -307,7 +330,7 @@ int __init __early_make_pgtable(unsigned long address, pmdval_t pmd) /* Invalid address or early pgt is done ? */ if (physaddr >= MAXMEM || read_cr3_pa() != __pa_nodebug(early_top_pgt)) - return -1; + return false; again: pgd_p = &early_top_pgt[pgd_index(address)].pgd; @@ -364,10 +387,10 @@ again: } pmd_p[pmd_index(address)] = pmd; - return 0; + return true; } -int __init early_make_pgtable(unsigned long address) +static bool __init early_make_pgtable(unsigned long address) { unsigned long physaddr = address - __PAGE_OFFSET; pmdval_t pmd; @@ -377,6 +400,19 @@ int __init early_make_pgtable(unsigned long address) return __early_make_pgtable(address, pmd); } +void __init do_early_exception(struct pt_regs *regs, int trapnr) +{ + if (trapnr == X86_TRAP_PF && + early_make_pgtable(native_read_cr2())) + return; + + if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT) && + trapnr == X86_TRAP_VC && handle_vc_boot_ghcb(regs)) + return; + + early_fixup_exception(regs, trapnr); +} + /* Don't add a printk in there. printk relies on the PDA which is not initialized yet. */ static void __init clear_bss(void) @@ -489,3 +525,81 @@ void __init x86_64_start_reservations(char *real_mode_data) start_kernel(); } + +/* + * Data structures and code used for IDT setup in head_64.S. The bringup-IDT is + * used until the idt_table takes over. On the boot CPU this happens in + * x86_64_start_kernel(), on secondary CPUs in start_secondary(). In both cases + * this happens in the functions called from head_64.S. + * + * The idt_table can't be used that early because all the code modifying it is + * in idt.c and can be instrumented by tracing or KASAN, which both don't work + * during early CPU bringup. Also the idt_table has the runtime vectors + * configured which require certain CPU state to be setup already (like TSS), + * which also hasn't happened yet in early CPU bringup. + */ +static gate_desc bringup_idt_table[NUM_EXCEPTION_VECTORS] __page_aligned_data; + +static struct desc_ptr bringup_idt_descr = { + .size = (NUM_EXCEPTION_VECTORS * sizeof(gate_desc)) - 1, + .address = 0, /* Set at runtime */ +}; + +static void set_bringup_idt_handler(gate_desc *idt, int n, void *handler) +{ +#ifdef CONFIG_AMD_MEM_ENCRYPT + struct idt_data data; + gate_desc desc; + + init_idt_data(&data, n, handler); + idt_init_desc(&desc, &data); + native_write_idt_entry(idt, n, &desc); +#endif +} + +/* This runs while still in the direct mapping */ +static void startup_64_load_idt(unsigned long physbase) +{ + struct desc_ptr *desc = fixup_pointer(&bringup_idt_descr, physbase); + gate_desc *idt = fixup_pointer(bringup_idt_table, physbase); + + + if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) { + void *handler; + + /* VMM Communication Exception */ + handler = fixup_pointer(vc_no_ghcb, physbase); + set_bringup_idt_handler(idt, X86_TRAP_VC, handler); + } + + desc->address = (unsigned long)idt; + native_load_idt(desc); +} + +/* This is used when running on kernel addresses */ +void early_setup_idt(void) +{ + /* VMM Communication Exception */ + if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) + set_bringup_idt_handler(bringup_idt_table, X86_TRAP_VC, vc_boot_ghcb); + + bringup_idt_descr.address = (unsigned long)bringup_idt_table; + native_load_idt(&bringup_idt_descr); +} + +/* + * Setup boot CPU state needed before kernel switches to virtual addresses. + */ +void __head startup_64_setup_env(unsigned long physbase) +{ + /* Load GDT */ + startup_gdt_descr.address = (unsigned long)fixup_pointer(startup_gdt, physbase); + native_load_gdt(&startup_gdt_descr); + + /* New GDT is live - reload data segment registers */ + asm volatile("movl %%eax, %%ds\n" + "movl %%eax, %%ss\n" + "movl %%eax, %%es\n" : : "a"(__KERNEL_DS) : "memory"); + + startup_64_load_idt(physbase); +} diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 16da4ac01597..7eb2a1c87969 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -73,6 +73,20 @@ SYM_CODE_START_NOALIGN(startup_64) /* Set up the stack for verify_cpu(), similar to initial_stack below */ leaq (__end_init_task - SIZEOF_PTREGS)(%rip), %rsp + leaq _text(%rip), %rdi + pushq %rsi + call startup_64_setup_env + popq %rsi + + /* Now switch to __KERNEL_CS so IRET works reliably */ + pushq $__KERNEL_CS + leaq .Lon_kernel_cs(%rip), %rax + pushq %rax + lretq + +.Lon_kernel_cs: + UNWIND_HINT_EMPTY + /* Sanitize CPU configuration */ call verify_cpu @@ -112,6 +126,18 @@ SYM_CODE_START(secondary_startup_64) call verify_cpu /* + * The secondary_startup_64_no_verify entry point is only used by + * SEV-ES guests. In those guests the call to verify_cpu() would cause + * #VC exceptions which can not be handled at this stage of secondary + * CPU bringup. + * + * All non SEV-ES systems, especially Intel systems, need to execute + * verify_cpu() above to make sure NX is enabled. + */ +SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL) + UNWIND_HINT_EMPTY + + /* * Retrieve the modifier (SME encryption mask if SME is active) to be * added to the initial pgdir entry that will be programmed into CR3. */ @@ -144,33 +170,6 @@ SYM_CODE_START(secondary_startup_64) 1: UNWIND_HINT_EMPTY - /* Check if nx is implemented */ - movl $0x80000001, %eax - cpuid - movl %edx,%edi - - /* Setup EFER (Extended Feature Enable Register) */ - movl $MSR_EFER, %ecx - rdmsr - btsl $_EFER_SCE, %eax /* Enable System Call */ - btl $20,%edi /* No Execute supported? */ - jnc 1f - btsl $_EFER_NX, %eax - btsq $_PAGE_BIT_NX,early_pmd_flags(%rip) -1: wrmsr /* Make changes effective */ - - /* Setup cr0 */ - movl $CR0_STATE, %eax - /* Make changes effective */ - movq %rax, %cr0 - - /* Setup a boot time stack */ - movq initial_stack(%rip), %rsp - - /* zero EFLAGS after setting rsp */ - pushq $0 - popfq - /* * We must switch to a new descriptor in kernel space for the GDT * because soon the kernel won't have access anymore to the userspace @@ -205,6 +204,41 @@ SYM_CODE_START(secondary_startup_64) movl initial_gs+4(%rip),%edx wrmsr + /* + * Setup a boot time stack - Any secondary CPU will have lost its stack + * by now because the cr3-switch above unmaps the real-mode stack + */ + movq initial_stack(%rip), %rsp + + /* Setup and Load IDT */ + pushq %rsi + call early_setup_idt + popq %rsi + + /* Check if nx is implemented */ + movl $0x80000001, %eax + cpuid + movl %edx,%edi + + /* Setup EFER (Extended Feature Enable Register) */ + movl $MSR_EFER, %ecx + rdmsr + btsl $_EFER_SCE, %eax /* Enable System Call */ + btl $20,%edi /* No Execute supported? */ + jnc 1f + btsl $_EFER_NX, %eax + btsq $_PAGE_BIT_NX,early_pmd_flags(%rip) +1: wrmsr /* Make changes effective */ + + /* Setup cr0 */ + movl $CR0_STATE, %eax + /* Make changes effective */ + movq %rax, %cr0 + + /* zero EFLAGS after setting rsp */ + pushq $0 + popfq + /* rsi is pointer to real mode structure with interesting info. pass it to C */ movq %rsi, %rdi @@ -259,11 +293,47 @@ SYM_CODE_START(start_cpu0) SYM_CODE_END(start_cpu0) #endif +#ifdef CONFIG_AMD_MEM_ENCRYPT +/* + * VC Exception handler used during early boot when running on kernel + * addresses, but before the switch to the idt_table can be made. + * The early_idt_handler_array can't be used here because it calls into a lot + * of __init code and this handler is also used during CPU offlining/onlining. + * Therefore this handler ends up in the .text section so that it stays around + * when .init.text is freed. + */ +SYM_CODE_START_NOALIGN(vc_boot_ghcb) + UNWIND_HINT_IRET_REGS offset=8 + + /* Build pt_regs */ + PUSH_AND_CLEAR_REGS + + /* Call C handler */ + movq %rsp, %rdi + movq ORIG_RAX(%rsp), %rsi + movq initial_vc_handler(%rip), %rax + ANNOTATE_RETPOLINE_SAFE + call *%rax + + /* Unwind pt_regs */ + POP_REGS + + /* Remove Error Code */ + addq $8, %rsp + + /* Pure iret required here - don't use INTERRUPT_RETURN */ + iretq +SYM_CODE_END(vc_boot_ghcb) +#endif + /* Both SMP bootup and ACPI suspend change these variables */ __REFDATA .balign 8 SYM_DATA(initial_code, .quad x86_64_start_kernel) SYM_DATA(initial_gs, .quad INIT_PER_CPU_VAR(fixed_percpu_data)) +#ifdef CONFIG_AMD_MEM_ENCRYPT +SYM_DATA(initial_vc_handler, .quad handle_vc_boot_ghcb) +#endif /* * The SIZEOF_PTREGS gap is a convention which helps the in-kernel unwinder @@ -319,22 +389,43 @@ SYM_CODE_START_LOCAL(early_idt_handler_common) pushq %r15 /* pt_regs->r15 */ UNWIND_HINT_REGS - cmpq $14,%rsi /* Page fault? */ - jnz 10f - GET_CR2_INTO(%rdi) /* can clobber %rax if pv */ - call early_make_pgtable - andl %eax,%eax - jz 20f /* All good */ - -10: movq %rsp,%rdi /* RDI = pt_regs; RSI is already trapnr */ - call early_fixup_exception + call do_early_exception -20: decl early_recursion_flag(%rip) jmp restore_regs_and_return_to_kernel SYM_CODE_END(early_idt_handler_common) +#ifdef CONFIG_AMD_MEM_ENCRYPT +/* + * VC Exception handler used during very early boot. The + * early_idt_handler_array can't be used because it returns via the + * paravirtualized INTERRUPT_RETURN and pv-ops don't work that early. + * + * This handler will end up in the .init.text section and not be + * available to boot secondary CPUs. + */ +SYM_CODE_START_NOALIGN(vc_no_ghcb) + UNWIND_HINT_IRET_REGS offset=8 + + /* Build pt_regs */ + PUSH_AND_CLEAR_REGS + + /* Call C handler */ + movq %rsp, %rdi + movq ORIG_RAX(%rsp), %rsi + call do_vc_no_ghcb + + /* Unwind pt_regs */ + POP_REGS + + /* Remove Error Code */ + addq $8, %rsp + + /* Pure iret required here - don't use INTERRUPT_RETURN */ + iretq +SYM_CODE_END(vc_no_ghcb) +#endif #define SYM_DATA_START_PAGE_ALIGNED(name) \ SYM_START(name, SYM_L_GLOBAL, .balign PAGE_SIZE) diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index b98ff620ba77..03aa33b58165 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c @@ -442,42 +442,6 @@ int hw_breakpoint_arch_parse(struct perf_event *bp, } /* - * Dump the debug register contents to the user. - * We can't dump our per cpu values because it - * may contain cpu wide breakpoint, something that - * doesn't belong to the current task. - * - * TODO: include non-ptrace user breakpoints (perf) - */ -void aout_dump_debugregs(struct user *dump) -{ - int i; - int dr7 = 0; - struct perf_event *bp; - struct arch_hw_breakpoint *info; - struct thread_struct *thread = ¤t->thread; - - for (i = 0; i < HBP_NUM; i++) { - bp = thread->ptrace_bps[i]; - - if (bp && !bp->attr.disabled) { - dump->u_debugreg[i] = bp->attr.bp_addr; - info = counter_arch_bp(bp); - dr7 |= encode_dr7(i, info->len, info->type); - } else { - dump->u_debugreg[i] = 0; - } - } - - dump->u_debugreg[4] = 0; - dump->u_debugreg[5] = 0; - dump->u_debugreg[6] = current->thread.debugreg6; - - dump->u_debugreg[7] = dr7; -} -EXPORT_SYMBOL_GPL(aout_dump_debugregs); - -/* * Release the user breakpoints used by ptrace */ void flush_ptrace_hw_breakpoint(struct task_struct *tsk) @@ -490,7 +454,7 @@ void flush_ptrace_hw_breakpoint(struct task_struct *tsk) t->ptrace_bps[i] = NULL; } - t->debugreg6 = 0; + t->virtual_dr6 = 0; t->ptrace_dr7 = 0; } @@ -500,7 +464,7 @@ void hw_breakpoint_restore(void) set_debugreg(__this_cpu_read(cpu_debugreg[1]), 1); set_debugreg(__this_cpu_read(cpu_debugreg[2]), 2); set_debugreg(__this_cpu_read(cpu_debugreg[3]), 3); - set_debugreg(current->thread.debugreg6, 6); + set_debugreg(DR6_RESERVED, 6); set_debugreg(__this_cpu_read(cpu_dr7), 7); } EXPORT_SYMBOL_GPL(hw_breakpoint_restore); @@ -523,10 +487,10 @@ EXPORT_SYMBOL_GPL(hw_breakpoint_restore); */ static int hw_breakpoint_handler(struct die_args *args) { - int i, cpu, rc = NOTIFY_STOP; + int i, rc = NOTIFY_STOP; struct perf_event *bp; - unsigned long dr6; unsigned long *dr6_p; + unsigned long dr6; /* The DR6 value is pointed by args->err */ dr6_p = (unsigned long *)ERR_PTR(args->err); @@ -540,14 +504,6 @@ static int hw_breakpoint_handler(struct die_args *args) if ((dr6 & DR_TRAP_BITS) == 0) return NOTIFY_DONE; - /* - * Assert that local interrupts are disabled - * Reset the DRn bits in the virtualized register value. - * The ptrace trigger routine will add in whatever is needed. - */ - current->thread.debugreg6 &= ~DR_TRAP_BITS; - cpu = get_cpu(); - /* Handle all the breakpoints that were triggered */ for (i = 0; i < HBP_NUM; ++i) { if (likely(!(dr6 & (DR_TRAP0 << i)))) @@ -561,7 +517,7 @@ static int hw_breakpoint_handler(struct die_args *args) */ rcu_read_lock(); - bp = per_cpu(bp_per_reg[i], cpu); + bp = this_cpu_read(bp_per_reg[i]); /* * Reset the 'i'th TRAP bit in dr6 to denote completion of * exception handling @@ -592,12 +548,10 @@ static int hw_breakpoint_handler(struct die_args *args) * breakpoints (to generate signals) and b) when the system has * taken exception due to multiple causes */ - if ((current->thread.debugreg6 & DR_TRAP_BITS) || + if ((current->thread.virtual_dr6 & DR_TRAP_BITS) || (dr6 & (~DR_TRAP_BITS))) rc = NOTIFY_DONE; - put_cpu(); - return rc; } diff --git a/arch/x86/kernel/idt.c b/arch/x86/kernel/idt.c index 7ecf9babf0cb..ee1a283f8e96 100644 --- a/arch/x86/kernel/idt.c +++ b/arch/x86/kernel/idt.c @@ -11,13 +11,6 @@ #include <asm/desc.h> #include <asm/hw_irq.h> -struct idt_data { - unsigned int vector; - unsigned int segment; - struct idt_bits bits; - const void *addr; -}; - #define DPL0 0x0 #define DPL3 0x3 @@ -149,9 +142,6 @@ static const __initconst struct idt_data apic_idts[] = { # ifdef CONFIG_IRQ_WORK INTG(IRQ_WORK_VECTOR, asm_sysvec_irq_work), # endif -# ifdef CONFIG_X86_UV - INTG(UV_BAU_MESSAGE, asm_sysvec_uv_bau_message), -# endif INTG(SPURIOUS_APIC_VECTOR, asm_sysvec_spurious_apic_interrupt), INTG(ERROR_APIC_VECTOR, asm_sysvec_error_interrupt), #endif @@ -178,20 +168,6 @@ bool idt_is_f00f_address(unsigned long address) } #endif -static inline void idt_init_desc(gate_desc *gate, const struct idt_data *d) -{ - unsigned long addr = (unsigned long) d->addr; - - gate->offset_low = (u16) addr; - gate->segment = (u16) d->segment; - gate->bits = d->bits; - gate->offset_middle = (u16) (addr >> 16); -#ifdef CONFIG_X86_64 - gate->offset_high = (u32) (addr >> 32); - gate->reserved = 0; -#endif -} - static __init void idt_setup_from_table(gate_desc *idt, const struct idt_data *t, int size, bool sys) { @@ -209,14 +185,7 @@ static __init void set_intr_gate(unsigned int n, const void *addr) { struct idt_data data; - BUG_ON(n > 0xFF); - - memset(&data, 0, sizeof(data)); - data.vector = n; - data.addr = addr; - data.segment = __KERNEL_CS; - data.bits.type = GATE_INTERRUPT; - data.bits.p = 1; + init_idt_data(&data, n, addr); idt_setup_from_table(idt_table, &data, 1, false); } @@ -257,11 +226,14 @@ static const __initconst struct idt_data early_pf_idts[] = { * cpu_init() when the TSS has been initialized. */ static const __initconst struct idt_data ist_idts[] = { - ISTG(X86_TRAP_DB, asm_exc_debug, IST_INDEX_DB), - ISTG(X86_TRAP_NMI, asm_exc_nmi, IST_INDEX_NMI), - ISTG(X86_TRAP_DF, asm_exc_double_fault, IST_INDEX_DF), + ISTG(X86_TRAP_DB, asm_exc_debug, IST_INDEX_DB), + ISTG(X86_TRAP_NMI, asm_exc_nmi, IST_INDEX_NMI), + ISTG(X86_TRAP_DF, asm_exc_double_fault, IST_INDEX_DF), #ifdef CONFIG_X86_MCE - ISTG(X86_TRAP_MC, asm_exc_machine_check, IST_INDEX_MCE), + ISTG(X86_TRAP_MC, asm_exc_machine_check, IST_INDEX_MCE), +#endif +#ifdef CONFIG_AMD_MEM_ENCRYPT + ISTG(X86_TRAP_VC, asm_exc_vmm_communication, IST_INDEX_VC), #endif }; diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index c2f02f308ecf..ff7878df96b4 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c @@ -629,9 +629,10 @@ static void kgdb_hw_overflow_handler(struct perf_event *event, struct task_struct *tsk = current; int i; - for (i = 0; i < 4; i++) + for (i = 0; i < 4; i++) { if (breakinfo[i].enabled) - tsk->thread.debugreg6 |= (DR_TRAP0 << i); + tsk->thread.virtual_dr6 |= (DR_TRAP0 << i); + } } void kgdb_arch_late(void) diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index fdadc37d72af..547c7abb39f5 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -38,9 +38,9 @@ #include <linux/kdebug.h> #include <linux/kallsyms.h> #include <linux/ftrace.h> -#include <linux/frame.h> #include <linux/kasan.h> #include <linux/moduleloader.h> +#include <linux/objtool.h> #include <linux/vmalloc.h> #include <linux/pgtable.h> @@ -767,124 +767,21 @@ asm( NOKPROBE_SYMBOL(kretprobe_trampoline); STACK_FRAME_NON_STANDARD(kretprobe_trampoline); + /* * Called from kretprobe_trampoline */ __used __visible void *trampoline_handler(struct pt_regs *regs) { - struct kretprobe_instance *ri = NULL; - struct hlist_head *head, empty_rp; - struct hlist_node *tmp; - unsigned long flags, orig_ret_address = 0; - unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; - kprobe_opcode_t *correct_ret_addr = NULL; - void *frame_pointer; - bool skipped = false; - - /* - * Set a dummy kprobe for avoiding kretprobe recursion. - * Since kretprobe never run in kprobe handler, kprobe must not - * be running at this point. - */ - kprobe_busy_begin(); - - INIT_HLIST_HEAD(&empty_rp); - kretprobe_hash_lock(current, &head, &flags); /* fixup registers */ regs->cs = __KERNEL_CS; #ifdef CONFIG_X86_32 - regs->cs |= get_kernel_rpl(); regs->gs = 0; #endif - /* We use pt_regs->sp for return address holder. */ - frame_pointer = ®s->sp; - regs->ip = trampoline_address; + regs->ip = (unsigned long)&kretprobe_trampoline; regs->orig_ax = ~0UL; - /* - * It is possible to have multiple instances associated with a given - * task either because multiple functions in the call path have - * return probes installed on them, and/or more than one - * return probe was registered for a target function. - * - * We can handle this because: - * - instances are always pushed into the head of the list - * - when multiple return probes are registered for the same - * function, the (chronologically) first instance's ret_addr - * will be the real return address, and all the rest will - * point to kretprobe_trampoline. - */ - hlist_for_each_entry(ri, head, hlist) { - if (ri->task != current) - /* another task is sharing our hash bucket */ - continue; - /* - * Return probes must be pushed on this hash list correct - * order (same as return order) so that it can be popped - * correctly. However, if we find it is pushed it incorrect - * order, this means we find a function which should not be - * probed, because the wrong order entry is pushed on the - * path of processing other kretprobe itself. - */ - if (ri->fp != frame_pointer) { - if (!skipped) - pr_warn("kretprobe is stacked incorrectly. Trying to fixup.\n"); - skipped = true; - continue; - } - - orig_ret_address = (unsigned long)ri->ret_addr; - if (skipped) - pr_warn("%ps must be blacklisted because of incorrect kretprobe order\n", - ri->rp->kp.addr); - - if (orig_ret_address != trampoline_address) - /* - * This is the real return address. Any other - * instances associated with this task are for - * other calls deeper on the call stack - */ - break; - } - - kretprobe_assert(ri, orig_ret_address, trampoline_address); - - correct_ret_addr = ri->ret_addr; - hlist_for_each_entry_safe(ri, tmp, head, hlist) { - if (ri->task != current) - /* another task is sharing our hash bucket */ - continue; - if (ri->fp != frame_pointer) - continue; - - orig_ret_address = (unsigned long)ri->ret_addr; - if (ri->rp && ri->rp->handler) { - __this_cpu_write(current_kprobe, &ri->rp->kp); - ri->ret_addr = correct_ret_addr; - ri->rp->handler(ri, regs); - __this_cpu_write(current_kprobe, &kprobe_busy); - } - - recycle_rp_inst(ri, &empty_rp); - - if (orig_ret_address != trampoline_address) - /* - * This is the real return address. Any other - * instances associated with this task are for - * other calls deeper on the call stack - */ - break; - } - - kretprobe_hash_unlock(current, &flags); - - kprobe_busy_end(); - - hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { - hlist_del(&ri->hlist); - kfree(ri); - } - return (void *)orig_ret_address; + return (void *)kretprobe_trampoline_handler(regs, &kretprobe_trampoline, ®s->sp); } NOKPROBE_SYMBOL(trampoline_handler); diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index 40f380461e6d..041f0b50bc27 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c @@ -16,8 +16,9 @@ #include <linux/kdebug.h> #include <linux/kallsyms.h> #include <linux/ftrace.h> -#include <linux/frame.h> +#include <linux/objtool.h> #include <linux/pgtable.h> +#include <linux/static_call.h> #include <asm/text-patching.h> #include <asm/cacheflush.h> @@ -181,7 +182,6 @@ optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs) /* Save skipped registers */ regs->cs = __KERNEL_CS; #ifdef CONFIG_X86_32 - regs->cs |= get_kernel_rpl(); regs->gs = 0; #endif regs->ip = (unsigned long)op->kp.addr + INT3_INSN_SIZE; @@ -210,7 +210,8 @@ static int copy_optimized_instructions(u8 *dest, u8 *src, u8 *real) /* Check whether the address range is reserved */ if (ftrace_text_reserved(src, src + len - 1) || alternatives_text_reserved(src, src + len - 1) || - jump_label_text_reserved(src, src + len - 1)) + jump_label_text_reserved(src, src + len - 1) || + static_call_text_reserved(src, src + len - 1)) return -EBUSY; return len; diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 9663ba31347c..1c0f2560a41c 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -36,6 +36,8 @@ #include <asm/hypervisor.h> #include <asm/tlb.h> #include <asm/cpuidle_haltpoll.h> +#include <asm/ptrace.h> +#include <asm/svm.h> DEFINE_STATIC_KEY_FALSE(kvm_async_pf_enabled); @@ -744,13 +746,34 @@ static void __init kvm_init_platform(void) x86_platform.apic_post_init = kvm_apic_init; } +#if defined(CONFIG_AMD_MEM_ENCRYPT) +static void kvm_sev_es_hcall_prepare(struct ghcb *ghcb, struct pt_regs *regs) +{ + /* RAX and CPL are already in the GHCB */ + ghcb_set_rbx(ghcb, regs->bx); + ghcb_set_rcx(ghcb, regs->cx); + ghcb_set_rdx(ghcb, regs->dx); + ghcb_set_rsi(ghcb, regs->si); +} + +static bool kvm_sev_es_hcall_finish(struct ghcb *ghcb, struct pt_regs *regs) +{ + /* No checking of the return state needed */ + return true; +} +#endif + const __initconst struct hypervisor_x86 x86_hyper_kvm = { - .name = "KVM", - .detect = kvm_detect, - .type = X86_HYPER_KVM, - .init.guest_late_init = kvm_guest_init, - .init.x2apic_available = kvm_para_available, - .init.init_platform = kvm_init_platform, + .name = "KVM", + .detect = kvm_detect, + .type = X86_HYPER_KVM, + .init.guest_late_init = kvm_guest_init, + .init.x2apic_available = kvm_para_available, + .init.init_platform = kvm_init_platform, +#if defined(CONFIG_AMD_MEM_ENCRYPT) + .runtime.sev_es_hcall_prepare = kvm_sev_es_hcall_prepare, + .runtime.sev_es_hcall_finish = kvm_sev_es_hcall_finish, +#endif }; static __init int activate_jump_labels(void) diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index baa21090c9be..8f06449aab27 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -24,7 +24,6 @@ #include <asm/irqdomain.h> #include <asm/mtrr.h> #include <asm/mpspec.h> -#include <asm/io_apic.h> #include <asm/proto.h> #include <asm/bios_ebda.h> #include <asm/e820/api.h> @@ -46,11 +45,6 @@ static int __init mpf_checksum(unsigned char *mp, int len) return sum & 0xFF; } -int __init default_mpc_apic_id(struct mpc_cpu *m) -{ - return m->apicid; -} - static void __init MP_processor_info(struct mpc_cpu *m) { int apicid; @@ -61,7 +55,7 @@ static void __init MP_processor_info(struct mpc_cpu *m) return; } - apicid = x86_init.mpparse.mpc_apic_id(m); + apicid = m->apicid; if (m->cpuflag & CPU_BOOTPROCESSOR) { bootup_cpu = " (Bootup-CPU)"; @@ -73,7 +67,7 @@ static void __init MP_processor_info(struct mpc_cpu *m) } #ifdef CONFIG_X86_IO_APIC -void __init default_mpc_oem_bus_info(struct mpc_bus *m, char *str) +static void __init mpc_oem_bus_info(struct mpc_bus *m, char *str) { memcpy(str, m->bustype, 6); str[6] = 0; @@ -84,7 +78,7 @@ static void __init MP_bus_info(struct mpc_bus *m) { char str[7]; - x86_init.mpparse.mpc_oem_bus_info(m, str); + mpc_oem_bus_info(m, str); #if MAX_MP_BUSSES < 256 if (m->busid >= MAX_MP_BUSSES) { @@ -100,9 +94,6 @@ static void __init MP_bus_info(struct mpc_bus *m) mp_bus_id_to_type[m->busid] = MP_BUS_ISA; #endif } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) { - if (x86_init.mpparse.mpc_oem_pci_bus) - x86_init.mpparse.mpc_oem_pci_bus(m); - clear_bit(m->busid, mp_bus_not_pci); #ifdef CONFIG_EISA mp_bus_id_to_type[m->busid] = MP_BUS_PCI; @@ -198,8 +189,6 @@ static void __init smp_dump_mptable(struct mpc_table *mpc, unsigned char *mpt) 1, mpc, mpc->length, 1); } -void __init default_smp_read_mpc_oem(struct mpc_table *mpc) { } - static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early) { char str[16]; @@ -218,14 +207,7 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early) if (early) return 1; - if (mpc->oemptr) - x86_init.mpparse.smp_read_mpc_oem(mpc); - - /* - * Now process the configuration blocks. - */ - x86_init.mpparse.mpc_record(0); - + /* Now process the configuration blocks. */ while (count < mpc->length) { switch (*mpt) { case MP_PROCESSOR: @@ -256,7 +238,6 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early) count = mpc->length; break; } - x86_init.mpparse.mpc_record(1); } if (!num_processors) diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c index 49dcfb85e773..c0d409810658 100644 --- a/arch/x86/kernel/msr.c +++ b/arch/x86/kernel/msr.c @@ -80,18 +80,30 @@ static ssize_t msr_read(struct file *file, char __user *buf, static int filter_write(u32 reg) { + /* + * MSRs writes usually happen all at once, and can easily saturate kmsg. + * Only allow one message every 30 seconds. + * + * It's possible to be smarter here and do it (for example) per-MSR, but + * it would certainly be more complex, and this is enough at least to + * avoid saturating the ring buffer. + */ + static DEFINE_RATELIMIT_STATE(fw_rs, 30 * HZ, 1); + switch (allow_writes) { case MSR_WRITES_ON: return 0; case MSR_WRITES_OFF: return -EPERM; default: break; } + if (!__ratelimit(&fw_rs)) + return 0; + if (reg == MSR_IA32_ENERGY_PERF_BIAS) return 0; - pr_err_ratelimited("Write to unrecognized MSR 0x%x by %s\n" - "Please report to x86@kernel.org\n", - reg, current->comm); + pr_err("Write to unrecognized MSR 0x%x by %s (pid: %d). Please report to x86@kernel.org.\n", + reg, current->comm, current->pid); return 0; } diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index 4fc9954a9560..4bc77aaf1303 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c @@ -33,6 +33,7 @@ #include <asm/reboot.h> #include <asm/cache.h> #include <asm/nospec-branch.h> +#include <asm/sev-es.h> #define CREATE_TRACE_POINTS #include <trace/events/nmi.h> @@ -102,7 +103,6 @@ fs_initcall(nmi_warning_debugfs); static void nmi_check_duration(struct nmiaction *action, u64 duration) { - u64 whole_msecs = READ_ONCE(action->max_duration); int remainder_ns, decimal_msecs; if (duration < nmi_longest_ns || duration < action->max_duration) @@ -110,12 +110,12 @@ static void nmi_check_duration(struct nmiaction *action, u64 duration) action->max_duration = duration; - remainder_ns = do_div(whole_msecs, (1000 * 1000)); + remainder_ns = do_div(duration, (1000 * 1000)); decimal_msecs = remainder_ns / 1000; printk_ratelimited(KERN_INFO "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n", - action->handler, whole_msecs, decimal_msecs); + action->handler, duration, decimal_msecs); } static int nmi_handle(unsigned int type, struct pt_regs *regs) @@ -477,6 +477,12 @@ DEFINE_IDTENTRY_RAW(exc_nmi) { bool irq_state; + /* + * Re-enable NMIs right here when running as an SEV-ES guest. This might + * cause nested NMIs, but those can be handled safely. + */ + sev_es_nmi_complete(); + if (IS_ENABLED(CONFIG_SMP) && arch_cpu_is_offline(smp_processor_id())) return; @@ -488,6 +494,12 @@ DEFINE_IDTENTRY_RAW(exc_nmi) this_cpu_write(nmi_cr2, read_cr2()); nmi_restart: + /* + * Needs to happen before DR7 is accessed, because the hypervisor can + * intercept DR7 reads/writes, turning those into #VC exceptions. + */ + sev_es_ist_enter(regs); + this_cpu_write(nmi_dr7, local_db_save()); irq_state = idtentry_enter_nmi(regs); @@ -501,6 +513,8 @@ nmi_restart: local_db_restore(this_cpu_read(nmi_dr7)); + sev_es_ist_exit(); + if (unlikely(this_cpu_read(nmi_cr2) != read_cr2())) write_cr2(this_cpu_read(nmi_cr2)); if (this_cpu_dec_return(nmi_state)) diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index de2138ba38e5..6c3407ba6ee9 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -263,13 +263,8 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void) struct pv_info pv_info = { .name = "bare hardware", #ifdef CONFIG_PARAVIRT_XXL - .kernel_rpl = 0, - .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */ - -#ifdef CONFIG_X86_64 .extra_user_64bit_cs = __USER_CS, #endif -#endif }; /* 64-bit pagetable entries */ @@ -305,9 +300,7 @@ struct paravirt_patch_template pv_ops = { .cpu.load_idt = native_load_idt, .cpu.store_tr = native_store_tr, .cpu.load_tls = native_load_tls, -#ifdef CONFIG_X86_64 .cpu.load_gs_index = native_load_gs_index, -#endif .cpu.write_ldt_entry = native_write_ldt_entry, .cpu.write_gdt_entry = native_write_gdt_entry, .cpu.write_idt_entry = native_write_idt_entry, @@ -317,9 +310,7 @@ struct paravirt_patch_template pv_ops = { .cpu.load_sp0 = native_load_sp0, -#ifdef CONFIG_X86_64 .cpu.usergs_sysret64 = native_usergs_sysret64, -#endif .cpu.iret = native_iret, .cpu.swapgs = native_swapgs, @@ -369,24 +360,16 @@ struct paravirt_patch_template pv_ops = { .mmu.release_p4d = paravirt_nop, .mmu.set_pte = native_set_pte, - .mmu.set_pte_at = native_set_pte_at, .mmu.set_pmd = native_set_pmd, .mmu.ptep_modify_prot_start = __ptep_modify_prot_start, .mmu.ptep_modify_prot_commit = __ptep_modify_prot_commit, -#if CONFIG_PGTABLE_LEVELS >= 3 -#ifdef CONFIG_X86_PAE - .mmu.set_pte_atomic = native_set_pte_atomic, - .mmu.pte_clear = native_pte_clear, - .mmu.pmd_clear = native_pmd_clear, -#endif .mmu.set_pud = native_set_pud, .mmu.pmd_val = PTE_IDENT, .mmu.make_pmd = PTE_IDENT, -#if CONFIG_PGTABLE_LEVELS >= 4 .mmu.pud_val = PTE_IDENT, .mmu.make_pud = PTE_IDENT, @@ -398,8 +381,6 @@ struct paravirt_patch_template pv_ops = { .mmu.set_pgd = native_set_pgd, #endif /* CONFIG_PGTABLE_LEVELS >= 5 */ -#endif /* CONFIG_PGTABLE_LEVELS >= 4 */ -#endif /* CONFIG_PGTABLE_LEVELS >= 3 */ .mmu.pte_val = PTE_IDENT, .mmu.pgd_val = PTE_IDENT, diff --git a/arch/x86/kernel/paravirt_patch.c b/arch/x86/kernel/paravirt_patch.c index 3eff63c090d2..ace6e334cb39 100644 --- a/arch/x86/kernel/paravirt_patch.c +++ b/arch/x86/kernel/paravirt_patch.c @@ -26,14 +26,10 @@ struct patch_xxl { const unsigned char mmu_read_cr3[3]; const unsigned char mmu_write_cr3[3]; const unsigned char irq_restore_fl[2]; -# ifdef CONFIG_X86_64 const unsigned char cpu_wbinvd[2]; const unsigned char cpu_usergs_sysret64[6]; const unsigned char cpu_swapgs[3]; const unsigned char mov64[3]; -# else - const unsigned char cpu_iret[1]; -# endif }; static const struct patch_xxl patch_data_xxl = { @@ -42,7 +38,6 @@ static const struct patch_xxl patch_data_xxl = { .irq_save_fl = { 0x9c, 0x58 }, // pushf; pop %[re]ax .mmu_read_cr2 = { 0x0f, 0x20, 0xd0 }, // mov %cr2, %[re]ax .mmu_read_cr3 = { 0x0f, 0x20, 0xd8 }, // mov %cr3, %[re]ax -# ifdef CONFIG_X86_64 .mmu_write_cr3 = { 0x0f, 0x22, 0xdf }, // mov %rdi, %cr3 .irq_restore_fl = { 0x57, 0x9d }, // push %rdi; popfq .cpu_wbinvd = { 0x0f, 0x09 }, // wbinvd @@ -50,19 +45,11 @@ static const struct patch_xxl patch_data_xxl = { 0x48, 0x0f, 0x07 }, // swapgs; sysretq .cpu_swapgs = { 0x0f, 0x01, 0xf8 }, // swapgs .mov64 = { 0x48, 0x89, 0xf8 }, // mov %rdi, %rax -# else - .mmu_write_cr3 = { 0x0f, 0x22, 0xd8 }, // mov %eax, %cr3 - .irq_restore_fl = { 0x50, 0x9d }, // push %eax; popf - .cpu_iret = { 0xcf }, // iret -# endif }; unsigned int paravirt_patch_ident_64(void *insn_buff, unsigned int len) { -#ifdef CONFIG_X86_64 return PATCH(xxl, mov64, insn_buff, len); -#endif - return 0; } # endif /* CONFIG_PARAVIRT_XXL */ @@ -98,13 +85,9 @@ unsigned int native_patch(u8 type, void *insn_buff, unsigned long addr, PATCH_CASE(mmu, read_cr3, xxl, insn_buff, len); PATCH_CASE(mmu, write_cr3, xxl, insn_buff, len); -# ifdef CONFIG_X86_64 PATCH_CASE(cpu, usergs_sysret64, xxl, insn_buff, len); PATCH_CASE(cpu, swapgs, xxl, insn_buff, len); PATCH_CASE(cpu, wbinvd, xxl, insn_buff, len); -# else - PATCH_CASE(cpu, iret, xxl, insn_buff, len); -# endif #endif #ifdef CONFIG_PARAVIRT_SPINLOCKS diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 5dcedad21dff..de234e7a8962 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 +#include <linux/dma-map-ops.h> #include <linux/dma-direct.h> -#include <linux/dma-debug.h> #include <linux/iommu.h> #include <linux/dmar.h> #include <linux/export.h> diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 9afefe325acb..df342bedea88 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -407,7 +407,7 @@ unsigned long x86_gsbase_read_cpu_inactive(void) { unsigned long gsbase; - if (static_cpu_has(X86_FEATURE_FSGSBASE)) { + if (boot_cpu_has(X86_FEATURE_FSGSBASE)) { unsigned long flags; local_irq_save(flags); @@ -422,7 +422,7 @@ unsigned long x86_gsbase_read_cpu_inactive(void) void x86_gsbase_write_cpu_inactive(unsigned long gsbase) { - if (static_cpu_has(X86_FEATURE_FSGSBASE)) { + if (boot_cpu_has(X86_FEATURE_FSGSBASE)) { unsigned long flags; local_irq_save(flags); @@ -439,7 +439,7 @@ unsigned long x86_fsbase_read_task(struct task_struct *task) if (task == current) fsbase = x86_fsbase_read_cpu(); - else if (static_cpu_has(X86_FEATURE_FSGSBASE) || + else if (boot_cpu_has(X86_FEATURE_FSGSBASE) || (task->thread.fsindex == 0)) fsbase = task->thread.fsbase; else @@ -454,7 +454,7 @@ unsigned long x86_gsbase_read_task(struct task_struct *task) if (task == current) gsbase = x86_gsbase_read_cpu_inactive(); - else if (static_cpu_has(X86_FEATURE_FSGSBASE) || + else if (boot_cpu_has(X86_FEATURE_FSGSBASE) || (task->thread.gsindex == 0)) gsbase = task->thread.gsbase; else diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index e7537c5440bb..bedca011459c 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -465,7 +465,7 @@ static void ptrace_triggered(struct perf_event *bp, break; } - thread->debugreg6 |= (DR_TRAP0 << i); + thread->virtual_dr6 |= (DR_TRAP0 << i); } /* @@ -601,7 +601,7 @@ static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n) if (bp) val = bp->hw.info.address; } else if (n == 6) { - val = thread->debugreg6; + val = thread->virtual_dr6 ^ DR6_RESERVED; /* Flip back to arch polarity */ } else if (n == 7) { val = thread->ptrace_dr7; } @@ -657,7 +657,7 @@ static int ptrace_set_debugreg(struct task_struct *tsk, int n, if (n < HBP_NUM) { rc = ptrace_set_breakpoint_addr(tsk, n, val); } else if (n == 6) { - thread->debugreg6 = val; + thread->virtual_dr6 = val ^ DR6_RESERVED; /* Flip to positive polarity */ rc = 0; } else if (n == 7) { rc = ptrace_write_dr7(tsk, val); diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c index 1b10717c9321..6d0df6a58873 100644 --- a/arch/x86/kernel/quirks.c +++ b/arch/x86/kernel/quirks.c @@ -8,6 +8,7 @@ #include <asm/hpet.h> #include <asm/setup.h> +#include <asm/mce.h> #if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SMP) && defined(CONFIG_PCI) @@ -624,10 +625,6 @@ static void amd_disable_seq_and_redirect_scrub(struct pci_dev *dev) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3, amd_disable_seq_and_redirect_scrub); -#if defined(CONFIG_X86_64) && defined(CONFIG_X86_MCE) -#include <linux/jump_label.h> -#include <asm/string_64.h> - /* Ivy Bridge, Haswell, Broadwell */ static void quirk_intel_brickland_xeon_ras_cap(struct pci_dev *pdev) { @@ -636,7 +633,7 @@ static void quirk_intel_brickland_xeon_ras_cap(struct pci_dev *pdev) pci_read_config_dword(pdev, 0x84, &capid0); if (capid0 & 0x10) - static_branch_inc(&mcsafe_key); + enable_copy_mc_fragile(); } /* Skylake */ @@ -653,7 +650,7 @@ static void quirk_intel_purley_xeon_ras_cap(struct pci_dev *pdev) * enabled, so memory machine check recovery is also enabled. */ if ((capid0 & 0xc0) == 0xc0 || (capid5 & 0x1e0)) - static_branch_inc(&mcsafe_key); + enable_copy_mc_fragile(); } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x0ec3, quirk_intel_brickland_xeon_ras_cap); @@ -661,7 +658,6 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, quirk_intel_brickland_xeon_ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, quirk_intel_brickland_xeon_ras_cap); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2083, quirk_intel_purley_xeon_ras_cap); #endif -#endif bool x86_apple_machine; EXPORT_SYMBOL(x86_apple_machine); diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index a515e2d230b7..db115943e8bd 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -10,7 +10,7 @@ #include <linux/sched.h> #include <linux/tboot.h> #include <linux/delay.h> -#include <linux/frame.h> +#include <linux/objtool.h> #include <linux/pgtable.h> #include <acpi/reboot.h> #include <asm/io.h> diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 3511736fbc74..84f581c91db4 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -7,6 +7,7 @@ */ #include <linux/console.h> #include <linux/crash_dump.h> +#include <linux/dma-map-ops.h> #include <linux/dmi.h> #include <linux/efi.h> #include <linux/init_ohci1394_dma.h> @@ -19,6 +20,8 @@ #include <linux/hugetlb.h> #include <linux/tboot.h> #include <linux/usb/xhci-dbgp.h> +#include <linux/static_call.h> +#include <linux/swiotlb.h> #include <uapi/linux/mount.h> @@ -263,16 +266,12 @@ static void __init relocate_initrd(void) u64 area_size = PAGE_ALIGN(ramdisk_size); /* We need to move the initrd down into directly mapped mem */ - relocated_ramdisk = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped), - area_size, PAGE_SIZE); - + relocated_ramdisk = memblock_phys_alloc_range(area_size, PAGE_SIZE, 0, + PFN_PHYS(max_pfn_mapped)); if (!relocated_ramdisk) panic("Cannot find place for new RAMDISK of size %lld\n", ramdisk_size); - /* Note: this includes all the mem currently occupied by - the initrd, we rely on that fact to keep the data intact. */ - memblock_reserve(relocated_ramdisk, area_size); initrd_start = relocated_ramdisk + PAGE_OFFSET; initrd_end = initrd_start + ramdisk_size; printk(KERN_INFO "Allocated new RAMDISK: [mem %#010llx-%#010llx]\n", @@ -299,13 +298,13 @@ static void __init early_reserve_initrd(void) memblock_reserve(ramdisk_image, ramdisk_end - ramdisk_image); } + static void __init reserve_initrd(void) { /* Assume only end is not page aligned */ u64 ramdisk_image = get_ramdisk_image(); u64 ramdisk_size = get_ramdisk_size(); u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size); - u64 mapped_size; if (!boot_params.hdr.type_of_loader || !ramdisk_image || !ramdisk_size) @@ -313,12 +312,6 @@ static void __init reserve_initrd(void) initrd_start = 0; - mapped_size = memblock_mem_size(max_pfn_mapped); - if (ramdisk_size >= (mapped_size>>1)) - panic("initrd too large to handle, " - "disabling initrd (%lld needed, %lld available)\n", - ramdisk_size, mapped_size>>1); - printk(KERN_INFO "RAMDISK: [mem %#010llx-%#010llx]\n", ramdisk_image, ramdisk_end - 1); @@ -430,13 +423,13 @@ static int __init reserve_crashkernel_low(void) { #ifdef CONFIG_X86_64 unsigned long long base, low_base = 0, low_size = 0; - unsigned long total_low_mem; + unsigned long low_mem_limit; int ret; - total_low_mem = memblock_mem_size(1UL << (32 - PAGE_SHIFT)); + low_mem_limit = min(memblock_phys_mem_size(), CRASH_ADDR_LOW_MAX); /* crashkernel=Y,low */ - ret = parse_crashkernel_low(boot_command_line, total_low_mem, &low_size, &base); + ret = parse_crashkernel_low(boot_command_line, low_mem_limit, &low_size, &base); if (ret) { /* * two parts from kernel/dma/swiotlb.c: @@ -454,23 +447,17 @@ static int __init reserve_crashkernel_low(void) return 0; } - low_base = memblock_find_in_range(0, 1ULL << 32, low_size, CRASH_ALIGN); + low_base = memblock_phys_alloc_range(low_size, CRASH_ALIGN, 0, CRASH_ADDR_LOW_MAX); if (!low_base) { pr_err("Cannot reserve %ldMB crashkernel low memory, please try smaller size.\n", (unsigned long)(low_size >> 20)); return -ENOMEM; } - ret = memblock_reserve(low_base, low_size); - if (ret) { - pr_err("%s: Error reserving crashkernel low memblock.\n", __func__); - return ret; - } - - pr_info("Reserving %ldMB of low memory at %ldMB for crashkernel (System low RAM: %ldMB)\n", + pr_info("Reserving %ldMB of low memory at %ldMB for crashkernel (low RAM limit: %ldMB)\n", (unsigned long)(low_size >> 20), (unsigned long)(low_base >> 20), - (unsigned long)(total_low_mem >> 20)); + (unsigned long)(low_mem_limit >> 20)); crashk_low_res.start = low_base; crashk_low_res.end = low_base + low_size - 1; @@ -514,13 +501,13 @@ static void __init reserve_crashkernel(void) * unless "crashkernel=size[KMG],high" is specified. */ if (!high) - crash_base = memblock_find_in_range(CRASH_ALIGN, - CRASH_ADDR_LOW_MAX, - crash_size, CRASH_ALIGN); + crash_base = memblock_phys_alloc_range(crash_size, + CRASH_ALIGN, CRASH_ALIGN, + CRASH_ADDR_LOW_MAX); if (!crash_base) - crash_base = memblock_find_in_range(CRASH_ALIGN, - CRASH_ADDR_HIGH_MAX, - crash_size, CRASH_ALIGN); + crash_base = memblock_phys_alloc_range(crash_size, + CRASH_ALIGN, CRASH_ALIGN, + CRASH_ADDR_HIGH_MAX); if (!crash_base) { pr_info("crashkernel reservation failed - No suitable area found.\n"); return; @@ -528,19 +515,13 @@ static void __init reserve_crashkernel(void) } else { unsigned long long start; - start = memblock_find_in_range(crash_base, - crash_base + crash_size, - crash_size, 1 << 20); + start = memblock_phys_alloc_range(crash_size, SZ_1M, crash_base, + crash_base + crash_size); if (start != crash_base) { pr_info("crashkernel reservation failed - memory is in use.\n"); return; } } - ret = memblock_reserve(crash_base, crash_size); - if (ret) { - pr_err("%s: Error reserving crashkernel memblock.\n", __func__); - return; - } if (crash_base >= (1ULL << 32) && reserve_crashkernel_low()) { memblock_free(crash_base, crash_size); @@ -849,6 +830,7 @@ void __init setup_arch(char **cmdline_p) early_cpu_init(); arch_init_ideal_nops(); jump_label_init(); + static_call_init(); early_ioremap_init(); setup_olpc_ofw_pgd(); @@ -1077,6 +1059,7 @@ void __init setup_arch(char **cmdline_p) efi_fake_memmap(); efi_find_mirror(); efi_esrt_init(); + efi_mokvar_table_init(); /* * The EFI specification says that boot service code won't be @@ -1218,6 +1201,7 @@ void __init setup_arch(char **cmdline_p) prefill_possible_map(); init_cpu_to_node(); + init_gi_nodes(); io_apic_init_mappings(); diff --git a/arch/x86/kernel/sev-es-shared.c b/arch/x86/kernel/sev-es-shared.c new file mode 100644 index 000000000000..5f83ccaab877 --- /dev/null +++ b/arch/x86/kernel/sev-es-shared.c @@ -0,0 +1,507 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * AMD Encrypted Register State Support + * + * Author: Joerg Roedel <jroedel@suse.de> + * + * This file is not compiled stand-alone. It contains code shared + * between the pre-decompression boot code and the running Linux kernel + * and is included directly into both code-bases. + */ + +#ifndef __BOOT_COMPRESSED +#define error(v) pr_err(v) +#define has_cpuflag(f) boot_cpu_has(f) +#endif + +static bool __init sev_es_check_cpu_features(void) +{ + if (!has_cpuflag(X86_FEATURE_RDRAND)) { + error("RDRAND instruction not supported - no trusted source of randomness available\n"); + return false; + } + + return true; +} + +static void sev_es_terminate(unsigned int reason) +{ + u64 val = GHCB_SEV_TERMINATE; + + /* + * Tell the hypervisor what went wrong - only reason-set 0 is + * currently supported. + */ + val |= GHCB_SEV_TERMINATE_REASON(0, reason); + + /* Request Guest Termination from Hypvervisor */ + sev_es_wr_ghcb_msr(val); + VMGEXIT(); + + while (true) + asm volatile("hlt\n" : : : "memory"); +} + +static bool sev_es_negotiate_protocol(void) +{ + u64 val; + + /* Do the GHCB protocol version negotiation */ + sev_es_wr_ghcb_msr(GHCB_SEV_INFO_REQ); + VMGEXIT(); + val = sev_es_rd_ghcb_msr(); + + if (GHCB_INFO(val) != GHCB_SEV_INFO) + return false; + + if (GHCB_PROTO_MAX(val) < GHCB_PROTO_OUR || + GHCB_PROTO_MIN(val) > GHCB_PROTO_OUR) + return false; + + return true; +} + +static __always_inline void vc_ghcb_invalidate(struct ghcb *ghcb) +{ + memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap)); +} + +static bool vc_decoding_needed(unsigned long exit_code) +{ + /* Exceptions don't require to decode the instruction */ + return !(exit_code >= SVM_EXIT_EXCP_BASE && + exit_code <= SVM_EXIT_LAST_EXCP); +} + +static enum es_result vc_init_em_ctxt(struct es_em_ctxt *ctxt, + struct pt_regs *regs, + unsigned long exit_code) +{ + enum es_result ret = ES_OK; + + memset(ctxt, 0, sizeof(*ctxt)); + ctxt->regs = regs; + + if (vc_decoding_needed(exit_code)) + ret = vc_decode_insn(ctxt); + + return ret; +} + +static void vc_finish_insn(struct es_em_ctxt *ctxt) +{ + ctxt->regs->ip += ctxt->insn.length; +} + +static enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb, + struct es_em_ctxt *ctxt, + u64 exit_code, u64 exit_info_1, + u64 exit_info_2) +{ + enum es_result ret; + + /* Fill in protocol and format specifiers */ + ghcb->protocol_version = GHCB_PROTOCOL_MAX; + ghcb->ghcb_usage = GHCB_DEFAULT_USAGE; + + ghcb_set_sw_exit_code(ghcb, exit_code); + ghcb_set_sw_exit_info_1(ghcb, exit_info_1); + ghcb_set_sw_exit_info_2(ghcb, exit_info_2); + + sev_es_wr_ghcb_msr(__pa(ghcb)); + VMGEXIT(); + + if ((ghcb->save.sw_exit_info_1 & 0xffffffff) == 1) { + u64 info = ghcb->save.sw_exit_info_2; + unsigned long v; + + info = ghcb->save.sw_exit_info_2; + v = info & SVM_EVTINJ_VEC_MASK; + + /* Check if exception information from hypervisor is sane. */ + if ((info & SVM_EVTINJ_VALID) && + ((v == X86_TRAP_GP) || (v == X86_TRAP_UD)) && + ((info & SVM_EVTINJ_TYPE_MASK) == SVM_EVTINJ_TYPE_EXEPT)) { + ctxt->fi.vector = v; + if (info & SVM_EVTINJ_VALID_ERR) + ctxt->fi.error_code = info >> 32; + ret = ES_EXCEPTION; + } else { + ret = ES_VMM_ERROR; + } + } else { + ret = ES_OK; + } + + return ret; +} + +/* + * Boot VC Handler - This is the first VC handler during boot, there is no GHCB + * page yet, so it only supports the MSR based communication with the + * hypervisor and only the CPUID exit-code. + */ +void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code) +{ + unsigned int fn = lower_bits(regs->ax, 32); + unsigned long val; + + /* Only CPUID is supported via MSR protocol */ + if (exit_code != SVM_EXIT_CPUID) + goto fail; + + sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, GHCB_CPUID_REQ_EAX)); + VMGEXIT(); + val = sev_es_rd_ghcb_msr(); + if (GHCB_SEV_GHCB_RESP_CODE(val) != GHCB_SEV_CPUID_RESP) + goto fail; + regs->ax = val >> 32; + + sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, GHCB_CPUID_REQ_EBX)); + VMGEXIT(); + val = sev_es_rd_ghcb_msr(); + if (GHCB_SEV_GHCB_RESP_CODE(val) != GHCB_SEV_CPUID_RESP) + goto fail; + regs->bx = val >> 32; + + sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, GHCB_CPUID_REQ_ECX)); + VMGEXIT(); + val = sev_es_rd_ghcb_msr(); + if (GHCB_SEV_GHCB_RESP_CODE(val) != GHCB_SEV_CPUID_RESP) + goto fail; + regs->cx = val >> 32; + + sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, GHCB_CPUID_REQ_EDX)); + VMGEXIT(); + val = sev_es_rd_ghcb_msr(); + if (GHCB_SEV_GHCB_RESP_CODE(val) != GHCB_SEV_CPUID_RESP) + goto fail; + regs->dx = val >> 32; + + /* Skip over the CPUID two-byte opcode */ + regs->ip += 2; + + return; + +fail: + sev_es_wr_ghcb_msr(GHCB_SEV_TERMINATE); + VMGEXIT(); + + /* Shouldn't get here - if we do halt the machine */ + while (true) + asm volatile("hlt\n"); +} + +static enum es_result vc_insn_string_read(struct es_em_ctxt *ctxt, + void *src, char *buf, + unsigned int data_size, + unsigned int count, + bool backwards) +{ + int i, b = backwards ? -1 : 1; + enum es_result ret = ES_OK; + + for (i = 0; i < count; i++) { + void *s = src + (i * data_size * b); + char *d = buf + (i * data_size); + + ret = vc_read_mem(ctxt, s, d, data_size); + if (ret != ES_OK) + break; + } + + return ret; +} + +static enum es_result vc_insn_string_write(struct es_em_ctxt *ctxt, + void *dst, char *buf, + unsigned int data_size, + unsigned int count, + bool backwards) +{ + int i, s = backwards ? -1 : 1; + enum es_result ret = ES_OK; + + for (i = 0; i < count; i++) { + void *d = dst + (i * data_size * s); + char *b = buf + (i * data_size); + + ret = vc_write_mem(ctxt, d, b, data_size); + if (ret != ES_OK) + break; + } + + return ret; +} + +#define IOIO_TYPE_STR BIT(2) +#define IOIO_TYPE_IN 1 +#define IOIO_TYPE_INS (IOIO_TYPE_IN | IOIO_TYPE_STR) +#define IOIO_TYPE_OUT 0 +#define IOIO_TYPE_OUTS (IOIO_TYPE_OUT | IOIO_TYPE_STR) + +#define IOIO_REP BIT(3) + +#define IOIO_ADDR_64 BIT(9) +#define IOIO_ADDR_32 BIT(8) +#define IOIO_ADDR_16 BIT(7) + +#define IOIO_DATA_32 BIT(6) +#define IOIO_DATA_16 BIT(5) +#define IOIO_DATA_8 BIT(4) + +#define IOIO_SEG_ES (0 << 10) +#define IOIO_SEG_DS (3 << 10) + +static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo) +{ + struct insn *insn = &ctxt->insn; + *exitinfo = 0; + + switch (insn->opcode.bytes[0]) { + /* INS opcodes */ + case 0x6c: + case 0x6d: + *exitinfo |= IOIO_TYPE_INS; + *exitinfo |= IOIO_SEG_ES; + *exitinfo |= (ctxt->regs->dx & 0xffff) << 16; + break; + + /* OUTS opcodes */ + case 0x6e: + case 0x6f: + *exitinfo |= IOIO_TYPE_OUTS; + *exitinfo |= IOIO_SEG_DS; + *exitinfo |= (ctxt->regs->dx & 0xffff) << 16; + break; + + /* IN immediate opcodes */ + case 0xe4: + case 0xe5: + *exitinfo |= IOIO_TYPE_IN; + *exitinfo |= (u64)insn->immediate.value << 16; + break; + + /* OUT immediate opcodes */ + case 0xe6: + case 0xe7: + *exitinfo |= IOIO_TYPE_OUT; + *exitinfo |= (u64)insn->immediate.value << 16; + break; + + /* IN register opcodes */ + case 0xec: + case 0xed: + *exitinfo |= IOIO_TYPE_IN; + *exitinfo |= (ctxt->regs->dx & 0xffff) << 16; + break; + + /* OUT register opcodes */ + case 0xee: + case 0xef: + *exitinfo |= IOIO_TYPE_OUT; + *exitinfo |= (ctxt->regs->dx & 0xffff) << 16; + break; + + default: + return ES_DECODE_FAILED; + } + + switch (insn->opcode.bytes[0]) { + case 0x6c: + case 0x6e: + case 0xe4: + case 0xe6: + case 0xec: + case 0xee: + /* Single byte opcodes */ + *exitinfo |= IOIO_DATA_8; + break; + default: + /* Length determined by instruction parsing */ + *exitinfo |= (insn->opnd_bytes == 2) ? IOIO_DATA_16 + : IOIO_DATA_32; + } + switch (insn->addr_bytes) { + case 2: + *exitinfo |= IOIO_ADDR_16; + break; + case 4: + *exitinfo |= IOIO_ADDR_32; + break; + case 8: + *exitinfo |= IOIO_ADDR_64; + break; + } + + if (insn_has_rep_prefix(insn)) + *exitinfo |= IOIO_REP; + + return ES_OK; +} + +static enum es_result vc_handle_ioio(struct ghcb *ghcb, struct es_em_ctxt *ctxt) +{ + struct pt_regs *regs = ctxt->regs; + u64 exit_info_1, exit_info_2; + enum es_result ret; + + ret = vc_ioio_exitinfo(ctxt, &exit_info_1); + if (ret != ES_OK) + return ret; + + if (exit_info_1 & IOIO_TYPE_STR) { + + /* (REP) INS/OUTS */ + + bool df = ((regs->flags & X86_EFLAGS_DF) == X86_EFLAGS_DF); + unsigned int io_bytes, exit_bytes; + unsigned int ghcb_count, op_count; + unsigned long es_base; + u64 sw_scratch; + + /* + * For the string variants with rep prefix the amount of in/out + * operations per #VC exception is limited so that the kernel + * has a chance to take interrupts and re-schedule while the + * instruction is emulated. + */ + io_bytes = (exit_info_1 >> 4) & 0x7; + ghcb_count = sizeof(ghcb->shared_buffer) / io_bytes; + + op_count = (exit_info_1 & IOIO_REP) ? regs->cx : 1; + exit_info_2 = min(op_count, ghcb_count); + exit_bytes = exit_info_2 * io_bytes; + + es_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_ES); + + /* Read bytes of OUTS into the shared buffer */ + if (!(exit_info_1 & IOIO_TYPE_IN)) { + ret = vc_insn_string_read(ctxt, + (void *)(es_base + regs->si), + ghcb->shared_buffer, io_bytes, + exit_info_2, df); + if (ret) + return ret; + } + + /* + * Issue an VMGEXIT to the HV to consume the bytes from the + * shared buffer or to have it write them into the shared buffer + * depending on the instruction: OUTS or INS. + */ + sw_scratch = __pa(ghcb) + offsetof(struct ghcb, shared_buffer); + ghcb_set_sw_scratch(ghcb, sw_scratch); + ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_IOIO, + exit_info_1, exit_info_2); + if (ret != ES_OK) + return ret; + + /* Read bytes from shared buffer into the guest's destination. */ + if (exit_info_1 & IOIO_TYPE_IN) { + ret = vc_insn_string_write(ctxt, + (void *)(es_base + regs->di), + ghcb->shared_buffer, io_bytes, + exit_info_2, df); + if (ret) + return ret; + + if (df) + regs->di -= exit_bytes; + else + regs->di += exit_bytes; + } else { + if (df) + regs->si -= exit_bytes; + else + regs->si += exit_bytes; + } + + if (exit_info_1 & IOIO_REP) + regs->cx -= exit_info_2; + + ret = regs->cx ? ES_RETRY : ES_OK; + + } else { + + /* IN/OUT into/from rAX */ + + int bits = (exit_info_1 & 0x70) >> 1; + u64 rax = 0; + + if (!(exit_info_1 & IOIO_TYPE_IN)) + rax = lower_bits(regs->ax, bits); + + ghcb_set_rax(ghcb, rax); + + ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_IOIO, exit_info_1, 0); + if (ret != ES_OK) + return ret; + + if (exit_info_1 & IOIO_TYPE_IN) { + if (!ghcb_rax_is_valid(ghcb)) + return ES_VMM_ERROR; + regs->ax = lower_bits(ghcb->save.rax, bits); + } + } + + return ret; +} + +static enum es_result vc_handle_cpuid(struct ghcb *ghcb, + struct es_em_ctxt *ctxt) +{ + struct pt_regs *regs = ctxt->regs; + u32 cr4 = native_read_cr4(); + enum es_result ret; + + ghcb_set_rax(ghcb, regs->ax); + ghcb_set_rcx(ghcb, regs->cx); + + if (cr4 & X86_CR4_OSXSAVE) + /* Safe to read xcr0 */ + ghcb_set_xcr0(ghcb, xgetbv(XCR_XFEATURE_ENABLED_MASK)); + else + /* xgetbv will cause #GP - use reset value for xcr0 */ + ghcb_set_xcr0(ghcb, 1); + + ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_CPUID, 0, 0); + if (ret != ES_OK) + return ret; + + if (!(ghcb_rax_is_valid(ghcb) && + ghcb_rbx_is_valid(ghcb) && + ghcb_rcx_is_valid(ghcb) && + ghcb_rdx_is_valid(ghcb))) + return ES_VMM_ERROR; + + regs->ax = ghcb->save.rax; + regs->bx = ghcb->save.rbx; + regs->cx = ghcb->save.rcx; + regs->dx = ghcb->save.rdx; + + return ES_OK; +} + +static enum es_result vc_handle_rdtsc(struct ghcb *ghcb, + struct es_em_ctxt *ctxt, + unsigned long exit_code) +{ + bool rdtscp = (exit_code == SVM_EXIT_RDTSCP); + enum es_result ret; + + ret = sev_es_ghcb_hv_call(ghcb, ctxt, exit_code, 0, 0); + if (ret != ES_OK) + return ret; + + if (!(ghcb_rax_is_valid(ghcb) && ghcb_rdx_is_valid(ghcb) && + (!rdtscp || ghcb_rcx_is_valid(ghcb)))) + return ES_VMM_ERROR; + + ctxt->regs->ax = ghcb->save.rax; + ctxt->regs->dx = ghcb->save.rdx; + if (rdtscp) + ctxt->regs->cx = ghcb->save.rcx; + + return ES_OK; +} diff --git a/arch/x86/kernel/sev-es.c b/arch/x86/kernel/sev-es.c new file mode 100644 index 000000000000..4a96726fbaf8 --- /dev/null +++ b/arch/x86/kernel/sev-es.c @@ -0,0 +1,1404 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * AMD Memory Encryption Support + * + * Copyright (C) 2019 SUSE + * + * Author: Joerg Roedel <jroedel@suse.de> + */ + +#define pr_fmt(fmt) "SEV-ES: " fmt + +#include <linux/sched/debug.h> /* For show_regs() */ +#include <linux/percpu-defs.h> +#include <linux/mem_encrypt.h> +#include <linux/lockdep.h> +#include <linux/printk.h> +#include <linux/mm_types.h> +#include <linux/set_memory.h> +#include <linux/memblock.h> +#include <linux/kernel.h> +#include <linux/mm.h> + +#include <asm/cpu_entry_area.h> +#include <asm/stacktrace.h> +#include <asm/sev-es.h> +#include <asm/insn-eval.h> +#include <asm/fpu/internal.h> +#include <asm/processor.h> +#include <asm/realmode.h> +#include <asm/traps.h> +#include <asm/svm.h> +#include <asm/smp.h> +#include <asm/cpu.h> + +#define DR7_RESET_VALUE 0x400 + +/* For early boot hypervisor communication in SEV-ES enabled guests */ +static struct ghcb boot_ghcb_page __bss_decrypted __aligned(PAGE_SIZE); + +/* + * Needs to be in the .data section because we need it NULL before bss is + * cleared + */ +static struct ghcb __initdata *boot_ghcb; + +/* #VC handler runtime per-CPU data */ +struct sev_es_runtime_data { + struct ghcb ghcb_page; + + /* Physical storage for the per-CPU IST stack of the #VC handler */ + char ist_stack[EXCEPTION_STKSZ] __aligned(PAGE_SIZE); + + /* + * Physical storage for the per-CPU fall-back stack of the #VC handler. + * The fall-back stack is used when it is not safe to switch back to the + * interrupted stack in the #VC entry code. + */ + char fallback_stack[EXCEPTION_STKSZ] __aligned(PAGE_SIZE); + + /* + * Reserve one page per CPU as backup storage for the unencrypted GHCB. + * It is needed when an NMI happens while the #VC handler uses the real + * GHCB, and the NMI handler itself is causing another #VC exception. In + * that case the GHCB content of the first handler needs to be backed up + * and restored. + */ + struct ghcb backup_ghcb; + + /* + * Mark the per-cpu GHCBs as in-use to detect nested #VC exceptions. + * There is no need for it to be atomic, because nothing is written to + * the GHCB between the read and the write of ghcb_active. So it is safe + * to use it when a nested #VC exception happens before the write. + * + * This is necessary for example in the #VC->NMI->#VC case when the NMI + * happens while the first #VC handler uses the GHCB. When the NMI code + * raises a second #VC handler it might overwrite the contents of the + * GHCB written by the first handler. To avoid this the content of the + * GHCB is saved and restored when the GHCB is detected to be in use + * already. + */ + bool ghcb_active; + bool backup_ghcb_active; + + /* + * Cached DR7 value - write it on DR7 writes and return it on reads. + * That value will never make it to the real hardware DR7 as debugging + * is currently unsupported in SEV-ES guests. + */ + unsigned long dr7; +}; + +struct ghcb_state { + struct ghcb *ghcb; +}; + +static DEFINE_PER_CPU(struct sev_es_runtime_data*, runtime_data); +DEFINE_STATIC_KEY_FALSE(sev_es_enable_key); + +/* Needed in vc_early_forward_exception */ +void do_early_exception(struct pt_regs *regs, int trapnr); + +static void __init setup_vc_stacks(int cpu) +{ + struct sev_es_runtime_data *data; + struct cpu_entry_area *cea; + unsigned long vaddr; + phys_addr_t pa; + + data = per_cpu(runtime_data, cpu); + cea = get_cpu_entry_area(cpu); + + /* Map #VC IST stack */ + vaddr = CEA_ESTACK_BOT(&cea->estacks, VC); + pa = __pa(data->ist_stack); + cea_set_pte((void *)vaddr, pa, PAGE_KERNEL); + + /* Map VC fall-back stack */ + vaddr = CEA_ESTACK_BOT(&cea->estacks, VC2); + pa = __pa(data->fallback_stack); + cea_set_pte((void *)vaddr, pa, PAGE_KERNEL); +} + +static __always_inline bool on_vc_stack(unsigned long sp) +{ + return ((sp >= __this_cpu_ist_bottom_va(VC)) && (sp < __this_cpu_ist_top_va(VC))); +} + +/* + * This function handles the case when an NMI is raised in the #VC exception + * handler entry code. In this case, the IST entry for #VC must be adjusted, so + * that any subsequent #VC exception will not overwrite the stack contents of the + * interrupted #VC handler. + * + * The IST entry is adjusted unconditionally so that it can be also be + * unconditionally adjusted back in sev_es_ist_exit(). Otherwise a nested + * sev_es_ist_exit() call may adjust back the IST entry too early. + */ +void noinstr __sev_es_ist_enter(struct pt_regs *regs) +{ + unsigned long old_ist, new_ist; + + /* Read old IST entry */ + old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]); + + /* Make room on the IST stack */ + if (on_vc_stack(regs->sp)) + new_ist = ALIGN_DOWN(regs->sp, 8) - sizeof(old_ist); + else + new_ist = old_ist - sizeof(old_ist); + + /* Store old IST entry */ + *(unsigned long *)new_ist = old_ist; + + /* Set new IST entry */ + this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], new_ist); +} + +void noinstr __sev_es_ist_exit(void) +{ + unsigned long ist; + + /* Read IST entry */ + ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]); + + if (WARN_ON(ist == __this_cpu_ist_top_va(VC))) + return; + + /* Read back old IST entry and write it to the TSS */ + this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], *(unsigned long *)ist); +} + +static __always_inline struct ghcb *sev_es_get_ghcb(struct ghcb_state *state) +{ + struct sev_es_runtime_data *data; + struct ghcb *ghcb; + + data = this_cpu_read(runtime_data); + ghcb = &data->ghcb_page; + + if (unlikely(data->ghcb_active)) { + /* GHCB is already in use - save its contents */ + + if (unlikely(data->backup_ghcb_active)) + return NULL; + + /* Mark backup_ghcb active before writing to it */ + data->backup_ghcb_active = true; + + state->ghcb = &data->backup_ghcb; + + /* Backup GHCB content */ + *state->ghcb = *ghcb; + } else { + state->ghcb = NULL; + data->ghcb_active = true; + } + + return ghcb; +} + +static __always_inline void sev_es_put_ghcb(struct ghcb_state *state) +{ + struct sev_es_runtime_data *data; + struct ghcb *ghcb; + + data = this_cpu_read(runtime_data); + ghcb = &data->ghcb_page; + + if (state->ghcb) { + /* Restore GHCB from Backup */ + *ghcb = *state->ghcb; + data->backup_ghcb_active = false; + state->ghcb = NULL; + } else { + data->ghcb_active = false; + } +} + +/* Needed in vc_early_forward_exception */ +void do_early_exception(struct pt_regs *regs, int trapnr); + +static inline u64 sev_es_rd_ghcb_msr(void) +{ + return __rdmsr(MSR_AMD64_SEV_ES_GHCB); +} + +static inline void sev_es_wr_ghcb_msr(u64 val) +{ + u32 low, high; + + low = (u32)(val); + high = (u32)(val >> 32); + + native_wrmsr(MSR_AMD64_SEV_ES_GHCB, low, high); +} + +static int vc_fetch_insn_kernel(struct es_em_ctxt *ctxt, + unsigned char *buffer) +{ + return copy_from_kernel_nofault(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE); +} + +static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt) +{ + char buffer[MAX_INSN_SIZE]; + enum es_result ret; + int res; + + if (user_mode(ctxt->regs)) { + res = insn_fetch_from_user(ctxt->regs, buffer); + if (!res) { + ctxt->fi.vector = X86_TRAP_PF; + ctxt->fi.error_code = X86_PF_INSTR | X86_PF_USER; + ctxt->fi.cr2 = ctxt->regs->ip; + return ES_EXCEPTION; + } + + if (!insn_decode(&ctxt->insn, ctxt->regs, buffer, res)) + return ES_DECODE_FAILED; + } else { + res = vc_fetch_insn_kernel(ctxt, buffer); + if (res) { + ctxt->fi.vector = X86_TRAP_PF; + ctxt->fi.error_code = X86_PF_INSTR; + ctxt->fi.cr2 = ctxt->regs->ip; + return ES_EXCEPTION; + } + + insn_init(&ctxt->insn, buffer, MAX_INSN_SIZE - res, 1); + insn_get_length(&ctxt->insn); + } + + ret = ctxt->insn.immediate.got ? ES_OK : ES_DECODE_FAILED; + + return ret; +} + +static enum es_result vc_write_mem(struct es_em_ctxt *ctxt, + char *dst, char *buf, size_t size) +{ + unsigned long error_code = X86_PF_PROT | X86_PF_WRITE; + char __user *target = (char __user *)dst; + u64 d8; + u32 d4; + u16 d2; + u8 d1; + + switch (size) { + case 1: + memcpy(&d1, buf, 1); + if (put_user(d1, target)) + goto fault; + break; + case 2: + memcpy(&d2, buf, 2); + if (put_user(d2, target)) + goto fault; + break; + case 4: + memcpy(&d4, buf, 4); + if (put_user(d4, target)) + goto fault; + break; + case 8: + memcpy(&d8, buf, 8); + if (put_user(d8, target)) + goto fault; + break; + default: + WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size); + return ES_UNSUPPORTED; + } + + return ES_OK; + +fault: + if (user_mode(ctxt->regs)) + error_code |= X86_PF_USER; + + ctxt->fi.vector = X86_TRAP_PF; + ctxt->fi.error_code = error_code; + ctxt->fi.cr2 = (unsigned long)dst; + + return ES_EXCEPTION; +} + +static enum es_result vc_read_mem(struct es_em_ctxt *ctxt, + char *src, char *buf, size_t size) +{ + unsigned long error_code = X86_PF_PROT; + char __user *s = (char __user *)src; + u64 d8; + u32 d4; + u16 d2; + u8 d1; + + switch (size) { + case 1: + if (get_user(d1, s)) + goto fault; + memcpy(buf, &d1, 1); + break; + case 2: + if (get_user(d2, s)) + goto fault; + memcpy(buf, &d2, 2); + break; + case 4: + if (get_user(d4, s)) + goto fault; + memcpy(buf, &d4, 4); + break; + case 8: + if (get_user(d8, s)) + goto fault; + memcpy(buf, &d8, 8); + break; + default: + WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size); + return ES_UNSUPPORTED; + } + + return ES_OK; + +fault: + if (user_mode(ctxt->regs)) + error_code |= X86_PF_USER; + + ctxt->fi.vector = X86_TRAP_PF; + ctxt->fi.error_code = error_code; + ctxt->fi.cr2 = (unsigned long)src; + + return ES_EXCEPTION; +} + +static bool vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt *ctxt, + unsigned long vaddr, phys_addr_t *paddr) +{ + unsigned long va = (unsigned long)vaddr; + unsigned int level; + phys_addr_t pa; + pgd_t *pgd; + pte_t *pte; + + pgd = __va(read_cr3_pa()); + pgd = &pgd[pgd_index(va)]; + pte = lookup_address_in_pgd(pgd, va, &level); + if (!pte) { + ctxt->fi.vector = X86_TRAP_PF; + ctxt->fi.cr2 = vaddr; + ctxt->fi.error_code = 0; + + if (user_mode(ctxt->regs)) + ctxt->fi.error_code |= X86_PF_USER; + + return false; + } + + pa = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT; + pa |= va & ~page_level_mask(level); + + *paddr = pa; + + return true; +} + +/* Include code shared with pre-decompression boot stage */ +#include "sev-es-shared.c" + +void noinstr __sev_es_nmi_complete(void) +{ + struct ghcb_state state; + struct ghcb *ghcb; + + ghcb = sev_es_get_ghcb(&state); + + vc_ghcb_invalidate(ghcb); + ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_NMI_COMPLETE); + ghcb_set_sw_exit_info_1(ghcb, 0); + ghcb_set_sw_exit_info_2(ghcb, 0); + + sev_es_wr_ghcb_msr(__pa_nodebug(ghcb)); + VMGEXIT(); + + sev_es_put_ghcb(&state); +} + +static u64 get_jump_table_addr(void) +{ + struct ghcb_state state; + unsigned long flags; + struct ghcb *ghcb; + u64 ret = 0; + + local_irq_save(flags); + + ghcb = sev_es_get_ghcb(&state); + + vc_ghcb_invalidate(ghcb); + ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_JUMP_TABLE); + ghcb_set_sw_exit_info_1(ghcb, SVM_VMGEXIT_GET_AP_JUMP_TABLE); + ghcb_set_sw_exit_info_2(ghcb, 0); + + sev_es_wr_ghcb_msr(__pa(ghcb)); + VMGEXIT(); + + if (ghcb_sw_exit_info_1_is_valid(ghcb) && + ghcb_sw_exit_info_2_is_valid(ghcb)) + ret = ghcb->save.sw_exit_info_2; + + sev_es_put_ghcb(&state); + + local_irq_restore(flags); + + return ret; +} + +int sev_es_setup_ap_jump_table(struct real_mode_header *rmh) +{ + u16 startup_cs, startup_ip; + phys_addr_t jump_table_pa; + u64 jump_table_addr; + u16 __iomem *jump_table; + + jump_table_addr = get_jump_table_addr(); + + /* On UP guests there is no jump table so this is not a failure */ + if (!jump_table_addr) + return 0; + + /* Check if AP Jump Table is page-aligned */ + if (jump_table_addr & ~PAGE_MASK) + return -EINVAL; + + jump_table_pa = jump_table_addr & PAGE_MASK; + + startup_cs = (u16)(rmh->trampoline_start >> 4); + startup_ip = (u16)(rmh->sev_es_trampoline_start - + rmh->trampoline_start); + + jump_table = ioremap_encrypted(jump_table_pa, PAGE_SIZE); + if (!jump_table) + return -EIO; + + writew(startup_ip, &jump_table[0]); + writew(startup_cs, &jump_table[1]); + + iounmap(jump_table); + + return 0; +} + +/* + * This is needed by the OVMF UEFI firmware which will use whatever it finds in + * the GHCB MSR as its GHCB to talk to the hypervisor. So make sure the per-cpu + * runtime GHCBs used by the kernel are also mapped in the EFI page-table. + */ +int __init sev_es_efi_map_ghcbs(pgd_t *pgd) +{ + struct sev_es_runtime_data *data; + unsigned long address, pflags; + int cpu; + u64 pfn; + + if (!sev_es_active()) + return 0; + + pflags = _PAGE_NX | _PAGE_RW; + + for_each_possible_cpu(cpu) { + data = per_cpu(runtime_data, cpu); + + address = __pa(&data->ghcb_page); + pfn = address >> PAGE_SHIFT; + + if (kernel_map_pages_in_pgd(pgd, pfn, address, 1, pflags)) + return 1; + } + + return 0; +} + +static enum es_result vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt) +{ + struct pt_regs *regs = ctxt->regs; + enum es_result ret; + u64 exit_info_1; + + /* Is it a WRMSR? */ + exit_info_1 = (ctxt->insn.opcode.bytes[1] == 0x30) ? 1 : 0; + + ghcb_set_rcx(ghcb, regs->cx); + if (exit_info_1) { + ghcb_set_rax(ghcb, regs->ax); + ghcb_set_rdx(ghcb, regs->dx); + } + + ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_MSR, exit_info_1, 0); + + if ((ret == ES_OK) && (!exit_info_1)) { + regs->ax = ghcb->save.rax; + regs->dx = ghcb->save.rdx; + } + + return ret; +} + +/* + * This function runs on the first #VC exception after the kernel + * switched to virtual addresses. + */ +static bool __init sev_es_setup_ghcb(void) +{ + /* First make sure the hypervisor talks a supported protocol. */ + if (!sev_es_negotiate_protocol()) + return false; + + /* + * Clear the boot_ghcb. The first exception comes in before the bss + * section is cleared. + */ + memset(&boot_ghcb_page, 0, PAGE_SIZE); + + /* Alright - Make the boot-ghcb public */ + boot_ghcb = &boot_ghcb_page; + + return true; +} + +#ifdef CONFIG_HOTPLUG_CPU +static void sev_es_ap_hlt_loop(void) +{ + struct ghcb_state state; + struct ghcb *ghcb; + + ghcb = sev_es_get_ghcb(&state); + + while (true) { + vc_ghcb_invalidate(ghcb); + ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_HLT_LOOP); + ghcb_set_sw_exit_info_1(ghcb, 0); + ghcb_set_sw_exit_info_2(ghcb, 0); + + sev_es_wr_ghcb_msr(__pa(ghcb)); + VMGEXIT(); + + /* Wakeup signal? */ + if (ghcb_sw_exit_info_2_is_valid(ghcb) && + ghcb->save.sw_exit_info_2) + break; + } + + sev_es_put_ghcb(&state); +} + +/* + * Play_dead handler when running under SEV-ES. This is needed because + * the hypervisor can't deliver an SIPI request to restart the AP. + * Instead the kernel has to issue a VMGEXIT to halt the VCPU until the + * hypervisor wakes it up again. + */ +static void sev_es_play_dead(void) +{ + play_dead_common(); + + /* IRQs now disabled */ + + sev_es_ap_hlt_loop(); + + /* + * If we get here, the VCPU was woken up again. Jump to CPU + * startup code to get it back online. + */ + start_cpu0(); +} +#else /* CONFIG_HOTPLUG_CPU */ +#define sev_es_play_dead native_play_dead +#endif /* CONFIG_HOTPLUG_CPU */ + +#ifdef CONFIG_SMP +static void __init sev_es_setup_play_dead(void) +{ + smp_ops.play_dead = sev_es_play_dead; +} +#else +static inline void sev_es_setup_play_dead(void) { } +#endif + +static void __init alloc_runtime_data(int cpu) +{ + struct sev_es_runtime_data *data; + + data = memblock_alloc(sizeof(*data), PAGE_SIZE); + if (!data) + panic("Can't allocate SEV-ES runtime data"); + + per_cpu(runtime_data, cpu) = data; +} + +static void __init init_ghcb(int cpu) +{ + struct sev_es_runtime_data *data; + int err; + + data = per_cpu(runtime_data, cpu); + + err = early_set_memory_decrypted((unsigned long)&data->ghcb_page, + sizeof(data->ghcb_page)); + if (err) + panic("Can't map GHCBs unencrypted"); + + memset(&data->ghcb_page, 0, sizeof(data->ghcb_page)); + + data->ghcb_active = false; + data->backup_ghcb_active = false; +} + +void __init sev_es_init_vc_handling(void) +{ + int cpu; + + BUILD_BUG_ON(offsetof(struct sev_es_runtime_data, ghcb_page) % PAGE_SIZE); + + if (!sev_es_active()) + return; + + if (!sev_es_check_cpu_features()) + panic("SEV-ES CPU Features missing"); + + /* Enable SEV-ES special handling */ + static_branch_enable(&sev_es_enable_key); + + /* Initialize per-cpu GHCB pages */ + for_each_possible_cpu(cpu) { + alloc_runtime_data(cpu); + init_ghcb(cpu); + setup_vc_stacks(cpu); + } + + sev_es_setup_play_dead(); + + /* Secondary CPUs use the runtime #VC handler */ + initial_vc_handler = (unsigned long)safe_stack_exc_vmm_communication; +} + +static void __init vc_early_forward_exception(struct es_em_ctxt *ctxt) +{ + int trapnr = ctxt->fi.vector; + + if (trapnr == X86_TRAP_PF) + native_write_cr2(ctxt->fi.cr2); + + ctxt->regs->orig_ax = ctxt->fi.error_code; + do_early_exception(ctxt->regs, trapnr); +} + +static long *vc_insn_get_reg(struct es_em_ctxt *ctxt) +{ + long *reg_array; + int offset; + + reg_array = (long *)ctxt->regs; + offset = insn_get_modrm_reg_off(&ctxt->insn, ctxt->regs); + + if (offset < 0) + return NULL; + + offset /= sizeof(long); + + return reg_array + offset; +} + +static long *vc_insn_get_rm(struct es_em_ctxt *ctxt) +{ + long *reg_array; + int offset; + + reg_array = (long *)ctxt->regs; + offset = insn_get_modrm_rm_off(&ctxt->insn, ctxt->regs); + + if (offset < 0) + return NULL; + + offset /= sizeof(long); + + return reg_array + offset; +} +static enum es_result vc_do_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt, + unsigned int bytes, bool read) +{ + u64 exit_code, exit_info_1, exit_info_2; + unsigned long ghcb_pa = __pa(ghcb); + phys_addr_t paddr; + void __user *ref; + + ref = insn_get_addr_ref(&ctxt->insn, ctxt->regs); + if (ref == (void __user *)-1L) + return ES_UNSUPPORTED; + + exit_code = read ? SVM_VMGEXIT_MMIO_READ : SVM_VMGEXIT_MMIO_WRITE; + + if (!vc_slow_virt_to_phys(ghcb, ctxt, (unsigned long)ref, &paddr)) { + if (!read) + ctxt->fi.error_code |= X86_PF_WRITE; + + return ES_EXCEPTION; + } + + exit_info_1 = paddr; + /* Can never be greater than 8 */ + exit_info_2 = bytes; + + ghcb_set_sw_scratch(ghcb, ghcb_pa + offsetof(struct ghcb, shared_buffer)); + + return sev_es_ghcb_hv_call(ghcb, ctxt, exit_code, exit_info_1, exit_info_2); +} + +static enum es_result vc_handle_mmio_twobyte_ops(struct ghcb *ghcb, + struct es_em_ctxt *ctxt) +{ + struct insn *insn = &ctxt->insn; + unsigned int bytes = 0; + enum es_result ret; + int sign_byte; + long *reg_data; + + switch (insn->opcode.bytes[1]) { + /* MMIO Read w/ zero-extension */ + case 0xb6: + bytes = 1; + fallthrough; + case 0xb7: + if (!bytes) + bytes = 2; + + ret = vc_do_mmio(ghcb, ctxt, bytes, true); + if (ret) + break; + + /* Zero extend based on operand size */ + reg_data = vc_insn_get_reg(ctxt); + if (!reg_data) + return ES_DECODE_FAILED; + + memset(reg_data, 0, insn->opnd_bytes); + + memcpy(reg_data, ghcb->shared_buffer, bytes); + break; + + /* MMIO Read w/ sign-extension */ + case 0xbe: + bytes = 1; + fallthrough; + case 0xbf: + if (!bytes) + bytes = 2; + + ret = vc_do_mmio(ghcb, ctxt, bytes, true); + if (ret) + break; + + /* Sign extend based on operand size */ + reg_data = vc_insn_get_reg(ctxt); + if (!reg_data) + return ES_DECODE_FAILED; + + if (bytes == 1) { + u8 *val = (u8 *)ghcb->shared_buffer; + + sign_byte = (*val & 0x80) ? 0xff : 0x00; + } else { + u16 *val = (u16 *)ghcb->shared_buffer; + + sign_byte = (*val & 0x8000) ? 0xff : 0x00; + } + memset(reg_data, sign_byte, insn->opnd_bytes); + + memcpy(reg_data, ghcb->shared_buffer, bytes); + break; + + default: + ret = ES_UNSUPPORTED; + } + + return ret; +} + +/* + * The MOVS instruction has two memory operands, which raises the + * problem that it is not known whether the access to the source or the + * destination caused the #VC exception (and hence whether an MMIO read + * or write operation needs to be emulated). + * + * Instead of playing games with walking page-tables and trying to guess + * whether the source or destination is an MMIO range, split the move + * into two operations, a read and a write with only one memory operand. + * This will cause a nested #VC exception on the MMIO address which can + * then be handled. + * + * This implementation has the benefit that it also supports MOVS where + * source _and_ destination are MMIO regions. + * + * It will slow MOVS on MMIO down a lot, but in SEV-ES guests it is a + * rare operation. If it turns out to be a performance problem the split + * operations can be moved to memcpy_fromio() and memcpy_toio(). + */ +static enum es_result vc_handle_mmio_movs(struct es_em_ctxt *ctxt, + unsigned int bytes) +{ + unsigned long ds_base, es_base; + unsigned char *src, *dst; + unsigned char buffer[8]; + enum es_result ret; + bool rep; + int off; + + ds_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_DS); + es_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_ES); + + if (ds_base == -1L || es_base == -1L) { + ctxt->fi.vector = X86_TRAP_GP; + ctxt->fi.error_code = 0; + return ES_EXCEPTION; + } + + src = ds_base + (unsigned char *)ctxt->regs->si; + dst = es_base + (unsigned char *)ctxt->regs->di; + + ret = vc_read_mem(ctxt, src, buffer, bytes); + if (ret != ES_OK) + return ret; + + ret = vc_write_mem(ctxt, dst, buffer, bytes); + if (ret != ES_OK) + return ret; + + if (ctxt->regs->flags & X86_EFLAGS_DF) + off = -bytes; + else + off = bytes; + + ctxt->regs->si += off; + ctxt->regs->di += off; + + rep = insn_has_rep_prefix(&ctxt->insn); + if (rep) + ctxt->regs->cx -= 1; + + if (!rep || ctxt->regs->cx == 0) + return ES_OK; + else + return ES_RETRY; +} + +static enum es_result vc_handle_mmio(struct ghcb *ghcb, + struct es_em_ctxt *ctxt) +{ + struct insn *insn = &ctxt->insn; + unsigned int bytes = 0; + enum es_result ret; + long *reg_data; + + switch (insn->opcode.bytes[0]) { + /* MMIO Write */ + case 0x88: + bytes = 1; + fallthrough; + case 0x89: + if (!bytes) + bytes = insn->opnd_bytes; + + reg_data = vc_insn_get_reg(ctxt); + if (!reg_data) + return ES_DECODE_FAILED; + + memcpy(ghcb->shared_buffer, reg_data, bytes); + + ret = vc_do_mmio(ghcb, ctxt, bytes, false); + break; + + case 0xc6: + bytes = 1; + fallthrough; + case 0xc7: + if (!bytes) + bytes = insn->opnd_bytes; + + memcpy(ghcb->shared_buffer, insn->immediate1.bytes, bytes); + + ret = vc_do_mmio(ghcb, ctxt, bytes, false); + break; + + /* MMIO Read */ + case 0x8a: + bytes = 1; + fallthrough; + case 0x8b: + if (!bytes) + bytes = insn->opnd_bytes; + + ret = vc_do_mmio(ghcb, ctxt, bytes, true); + if (ret) + break; + + reg_data = vc_insn_get_reg(ctxt); + if (!reg_data) + return ES_DECODE_FAILED; + + /* Zero-extend for 32-bit operation */ + if (bytes == 4) + *reg_data = 0; + + memcpy(reg_data, ghcb->shared_buffer, bytes); + break; + + /* MOVS instruction */ + case 0xa4: + bytes = 1; + fallthrough; + case 0xa5: + if (!bytes) + bytes = insn->opnd_bytes; + + ret = vc_handle_mmio_movs(ctxt, bytes); + break; + /* Two-Byte Opcodes */ + case 0x0f: + ret = vc_handle_mmio_twobyte_ops(ghcb, ctxt); + break; + default: + ret = ES_UNSUPPORTED; + } + + return ret; +} + +static enum es_result vc_handle_dr7_write(struct ghcb *ghcb, + struct es_em_ctxt *ctxt) +{ + struct sev_es_runtime_data *data = this_cpu_read(runtime_data); + long val, *reg = vc_insn_get_rm(ctxt); + enum es_result ret; + + if (!reg) + return ES_DECODE_FAILED; + + val = *reg; + + /* Upper 32 bits must be written as zeroes */ + if (val >> 32) { + ctxt->fi.vector = X86_TRAP_GP; + ctxt->fi.error_code = 0; + return ES_EXCEPTION; + } + + /* Clear out other reserved bits and set bit 10 */ + val = (val & 0xffff23ffL) | BIT(10); + + /* Early non-zero writes to DR7 are not supported */ + if (!data && (val & ~DR7_RESET_VALUE)) + return ES_UNSUPPORTED; + + /* Using a value of 0 for ExitInfo1 means RAX holds the value */ + ghcb_set_rax(ghcb, val); + ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WRITE_DR7, 0, 0); + if (ret != ES_OK) + return ret; + + if (data) + data->dr7 = val; + + return ES_OK; +} + +static enum es_result vc_handle_dr7_read(struct ghcb *ghcb, + struct es_em_ctxt *ctxt) +{ + struct sev_es_runtime_data *data = this_cpu_read(runtime_data); + long *reg = vc_insn_get_rm(ctxt); + + if (!reg) + return ES_DECODE_FAILED; + + if (data) + *reg = data->dr7; + else + *reg = DR7_RESET_VALUE; + + return ES_OK; +} + +static enum es_result vc_handle_wbinvd(struct ghcb *ghcb, + struct es_em_ctxt *ctxt) +{ + return sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WBINVD, 0, 0); +} + +static enum es_result vc_handle_rdpmc(struct ghcb *ghcb, struct es_em_ctxt *ctxt) +{ + enum es_result ret; + + ghcb_set_rcx(ghcb, ctxt->regs->cx); + + ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_RDPMC, 0, 0); + if (ret != ES_OK) + return ret; + + if (!(ghcb_rax_is_valid(ghcb) && ghcb_rdx_is_valid(ghcb))) + return ES_VMM_ERROR; + + ctxt->regs->ax = ghcb->save.rax; + ctxt->regs->dx = ghcb->save.rdx; + + return ES_OK; +} + +static enum es_result vc_handle_monitor(struct ghcb *ghcb, + struct es_em_ctxt *ctxt) +{ + /* + * Treat it as a NOP and do not leak a physical address to the + * hypervisor. + */ + return ES_OK; +} + +static enum es_result vc_handle_mwait(struct ghcb *ghcb, + struct es_em_ctxt *ctxt) +{ + /* Treat the same as MONITOR/MONITORX */ + return ES_OK; +} + +static enum es_result vc_handle_vmmcall(struct ghcb *ghcb, + struct es_em_ctxt *ctxt) +{ + enum es_result ret; + + ghcb_set_rax(ghcb, ctxt->regs->ax); + ghcb_set_cpl(ghcb, user_mode(ctxt->regs) ? 3 : 0); + + if (x86_platform.hyper.sev_es_hcall_prepare) + x86_platform.hyper.sev_es_hcall_prepare(ghcb, ctxt->regs); + + ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_VMMCALL, 0, 0); + if (ret != ES_OK) + return ret; + + if (!ghcb_rax_is_valid(ghcb)) + return ES_VMM_ERROR; + + ctxt->regs->ax = ghcb->save.rax; + + /* + * Call sev_es_hcall_finish() after regs->ax is already set. + * This allows the hypervisor handler to overwrite it again if + * necessary. + */ + if (x86_platform.hyper.sev_es_hcall_finish && + !x86_platform.hyper.sev_es_hcall_finish(ghcb, ctxt->regs)) + return ES_VMM_ERROR; + + return ES_OK; +} + +static enum es_result vc_handle_trap_ac(struct ghcb *ghcb, + struct es_em_ctxt *ctxt) +{ + /* + * Calling ecx_alignment_check() directly does not work, because it + * enables IRQs and the GHCB is active. Forward the exception and call + * it later from vc_forward_exception(). + */ + ctxt->fi.vector = X86_TRAP_AC; + ctxt->fi.error_code = 0; + return ES_EXCEPTION; +} + +static __always_inline void vc_handle_trap_db(struct pt_regs *regs) +{ + if (user_mode(regs)) + noist_exc_debug(regs); + else + exc_debug(regs); +} + +static enum es_result vc_handle_exitcode(struct es_em_ctxt *ctxt, + struct ghcb *ghcb, + unsigned long exit_code) +{ + enum es_result result; + + switch (exit_code) { + case SVM_EXIT_READ_DR7: + result = vc_handle_dr7_read(ghcb, ctxt); + break; + case SVM_EXIT_WRITE_DR7: + result = vc_handle_dr7_write(ghcb, ctxt); + break; + case SVM_EXIT_EXCP_BASE + X86_TRAP_AC: + result = vc_handle_trap_ac(ghcb, ctxt); + break; + case SVM_EXIT_RDTSC: + case SVM_EXIT_RDTSCP: + result = vc_handle_rdtsc(ghcb, ctxt, exit_code); + break; + case SVM_EXIT_RDPMC: + result = vc_handle_rdpmc(ghcb, ctxt); + break; + case SVM_EXIT_INVD: + pr_err_ratelimited("#VC exception for INVD??? Seriously???\n"); + result = ES_UNSUPPORTED; + break; + case SVM_EXIT_CPUID: + result = vc_handle_cpuid(ghcb, ctxt); + break; + case SVM_EXIT_IOIO: + result = vc_handle_ioio(ghcb, ctxt); + break; + case SVM_EXIT_MSR: + result = vc_handle_msr(ghcb, ctxt); + break; + case SVM_EXIT_VMMCALL: + result = vc_handle_vmmcall(ghcb, ctxt); + break; + case SVM_EXIT_WBINVD: + result = vc_handle_wbinvd(ghcb, ctxt); + break; + case SVM_EXIT_MONITOR: + result = vc_handle_monitor(ghcb, ctxt); + break; + case SVM_EXIT_MWAIT: + result = vc_handle_mwait(ghcb, ctxt); + break; + case SVM_EXIT_NPF: + result = vc_handle_mmio(ghcb, ctxt); + break; + default: + /* + * Unexpected #VC exception + */ + result = ES_UNSUPPORTED; + } + + return result; +} + +static __always_inline void vc_forward_exception(struct es_em_ctxt *ctxt) +{ + long error_code = ctxt->fi.error_code; + int trapnr = ctxt->fi.vector; + + ctxt->regs->orig_ax = ctxt->fi.error_code; + + switch (trapnr) { + case X86_TRAP_GP: + exc_general_protection(ctxt->regs, error_code); + break; + case X86_TRAP_UD: + exc_invalid_op(ctxt->regs); + break; + case X86_TRAP_AC: + exc_alignment_check(ctxt->regs, error_code); + break; + default: + pr_emerg("Unsupported exception in #VC instruction emulation - can't continue\n"); + BUG(); + } +} + +static __always_inline bool on_vc_fallback_stack(struct pt_regs *regs) +{ + unsigned long sp = (unsigned long)regs; + + return (sp >= __this_cpu_ist_bottom_va(VC2) && sp < __this_cpu_ist_top_va(VC2)); +} + +/* + * Main #VC exception handler. It is called when the entry code was able to + * switch off the IST to a safe kernel stack. + * + * With the current implementation it is always possible to switch to a safe + * stack because #VC exceptions only happen at known places, like intercepted + * instructions or accesses to MMIO areas/IO ports. They can also happen with + * code instrumentation when the hypervisor intercepts #DB, but the critical + * paths are forbidden to be instrumented, so #DB exceptions currently also + * only happen in safe places. + */ +DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication) +{ + struct sev_es_runtime_data *data = this_cpu_read(runtime_data); + struct ghcb_state state; + struct es_em_ctxt ctxt; + enum es_result result; + struct ghcb *ghcb; + + lockdep_assert_irqs_disabled(); + + /* + * Handle #DB before calling into !noinstr code to avoid recursive #DB. + */ + if (error_code == SVM_EXIT_EXCP_BASE + X86_TRAP_DB) { + vc_handle_trap_db(regs); + return; + } + + instrumentation_begin(); + + /* + * This is invoked through an interrupt gate, so IRQs are disabled. The + * code below might walk page-tables for user or kernel addresses, so + * keep the IRQs disabled to protect us against concurrent TLB flushes. + */ + + ghcb = sev_es_get_ghcb(&state); + if (!ghcb) { + /* + * Mark GHCBs inactive so that panic() is able to print the + * message. + */ + data->ghcb_active = false; + data->backup_ghcb_active = false; + + panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use"); + } + + vc_ghcb_invalidate(ghcb); + result = vc_init_em_ctxt(&ctxt, regs, error_code); + + if (result == ES_OK) + result = vc_handle_exitcode(&ctxt, ghcb, error_code); + + sev_es_put_ghcb(&state); + + /* Done - now check the result */ + switch (result) { + case ES_OK: + vc_finish_insn(&ctxt); + break; + case ES_UNSUPPORTED: + pr_err_ratelimited("Unsupported exit-code 0x%02lx in early #VC exception (IP: 0x%lx)\n", + error_code, regs->ip); + goto fail; + case ES_VMM_ERROR: + pr_err_ratelimited("Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n", + error_code, regs->ip); + goto fail; + case ES_DECODE_FAILED: + pr_err_ratelimited("Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n", + error_code, regs->ip); + goto fail; + case ES_EXCEPTION: + vc_forward_exception(&ctxt); + break; + case ES_RETRY: + /* Nothing to do */ + break; + default: + pr_emerg("Unknown result in %s():%d\n", __func__, result); + /* + * Emulating the instruction which caused the #VC exception + * failed - can't continue so print debug information + */ + BUG(); + } + +out: + instrumentation_end(); + + return; + +fail: + if (user_mode(regs)) { + /* + * Do not kill the machine if user-space triggered the + * exception. Send SIGBUS instead and let user-space deal with + * it. + */ + force_sig_fault(SIGBUS, BUS_OBJERR, (void __user *)0); + } else { + pr_emerg("PANIC: Unhandled #VC exception in kernel space (result=%d)\n", + result); + + /* Show some debug info */ + show_regs(regs); + + /* Ask hypervisor to sev_es_terminate */ + sev_es_terminate(GHCB_SEV_ES_REASON_GENERAL_REQUEST); + + /* If that fails and we get here - just panic */ + panic("Returned from Terminate-Request to Hypervisor\n"); + } + + goto out; +} + +/* This handler runs on the #VC fall-back stack. It can cause further #VC exceptions */ +DEFINE_IDTENTRY_VC_IST(exc_vmm_communication) +{ + instrumentation_begin(); + panic("Can't handle #VC exception from unsupported context\n"); + instrumentation_end(); +} + +DEFINE_IDTENTRY_VC(exc_vmm_communication) +{ + if (likely(!on_vc_fallback_stack(regs))) + safe_stack_exc_vmm_communication(regs, error_code); + else + ist_exc_vmm_communication(regs, error_code); +} + +bool __init handle_vc_boot_ghcb(struct pt_regs *regs) +{ + unsigned long exit_code = regs->orig_ax; + struct es_em_ctxt ctxt; + enum es_result result; + + /* Do initial setup or terminate the guest */ + if (unlikely(boot_ghcb == NULL && !sev_es_setup_ghcb())) + sev_es_terminate(GHCB_SEV_ES_REASON_GENERAL_REQUEST); + + vc_ghcb_invalidate(boot_ghcb); + + result = vc_init_em_ctxt(&ctxt, regs, exit_code); + if (result == ES_OK) + result = vc_handle_exitcode(&ctxt, boot_ghcb, exit_code); + + /* Done - now check the result */ + switch (result) { + case ES_OK: + vc_finish_insn(&ctxt); + break; + case ES_UNSUPPORTED: + early_printk("PANIC: Unsupported exit-code 0x%02lx in early #VC exception (IP: 0x%lx)\n", + exit_code, regs->ip); + goto fail; + case ES_VMM_ERROR: + early_printk("PANIC: Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n", + exit_code, regs->ip); + goto fail; + case ES_DECODE_FAILED: + early_printk("PANIC: Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n", + exit_code, regs->ip); + goto fail; + case ES_EXCEPTION: + vc_early_forward_exception(&ctxt); + break; + case ES_RETRY: + /* Nothing to do */ + break; + default: + BUG(); + } + + return true; + +fail: + show_regs(regs); + + while (true) + halt(); +} diff --git a/arch/x86/kernel/signal_compat.c b/arch/x86/kernel/signal_compat.c index 9ccbf0576cd0..a7f3e12cfbdb 100644 --- a/arch/x86/kernel/signal_compat.c +++ b/arch/x86/kernel/signal_compat.c @@ -27,7 +27,7 @@ static inline void signal_compat_build_tests(void) */ BUILD_BUG_ON(NSIGILL != 11); BUILD_BUG_ON(NSIGFPE != 15); - BUILD_BUG_ON(NSIGSEGV != 7); + BUILD_BUG_ON(NSIGSEGV != 9); BUILD_BUG_ON(NSIGBUS != 5); BUILD_BUG_ON(NSIGTRAP != 5); BUILD_BUG_ON(NSIGCHLD != 6); diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index f5ef689dd62a..de776b2e6046 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -227,7 +227,7 @@ static void notrace start_secondary(void *unused) load_cr3(swapper_pg_dir); __flush_tlb_all(); #endif - load_current_idt(); + cpu_init_exception_handling(); cpu_init(); x86_cpuinit.early_percpu_clock_init(); preempt_disable(); diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c index 2fd698e28e4d..8627fda8d993 100644 --- a/arch/x86/kernel/stacktrace.c +++ b/arch/x86/kernel/stacktrace.c @@ -18,13 +18,13 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, struct unwind_state state; unsigned long addr; - if (regs && !consume_entry(cookie, regs->ip, false)) + if (regs && !consume_entry(cookie, regs->ip)) return; for (unwind_start(&state, task, regs, NULL); !unwind_done(&state); unwind_next_frame(&state)) { addr = unwind_get_return_address(&state); - if (!addr || !consume_entry(cookie, addr, false)) + if (!addr || !consume_entry(cookie, addr)) break; } } @@ -72,7 +72,7 @@ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, if (!addr) return -EINVAL; - if (!consume_entry(cookie, addr, false)) + if (!consume_entry(cookie, addr)) return -EINVAL; } @@ -114,7 +114,7 @@ void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie, { const void __user *fp = (const void __user *)regs->bp; - if (!consume_entry(cookie, regs->ip, false)) + if (!consume_entry(cookie, regs->ip)) return; while (1) { @@ -128,7 +128,7 @@ void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie, break; if (!frame.ret_addr) break; - if (!consume_entry(cookie, frame.ret_addr, false)) + if (!consume_entry(cookie, frame.ret_addr)) break; fp = frame.next_fp; } diff --git a/arch/x86/kernel/static_call.c b/arch/x86/kernel/static_call.c new file mode 100644 index 000000000000..ca9a380d9c0b --- /dev/null +++ b/arch/x86/kernel/static_call.c @@ -0,0 +1,98 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/static_call.h> +#include <linux/memory.h> +#include <linux/bug.h> +#include <asm/text-patching.h> + +enum insn_type { + CALL = 0, /* site call */ + NOP = 1, /* site cond-call */ + JMP = 2, /* tramp / site tail-call */ + RET = 3, /* tramp / site cond-tail-call */ +}; + +static void __ref __static_call_transform(void *insn, enum insn_type type, void *func) +{ + int size = CALL_INSN_SIZE; + const void *code; + + switch (type) { + case CALL: + code = text_gen_insn(CALL_INSN_OPCODE, insn, func); + break; + + case NOP: + code = ideal_nops[NOP_ATOMIC5]; + break; + + case JMP: + code = text_gen_insn(JMP32_INSN_OPCODE, insn, func); + break; + + case RET: + code = text_gen_insn(RET_INSN_OPCODE, insn, func); + size = RET_INSN_SIZE; + break; + } + + if (memcmp(insn, code, size) == 0) + return; + + if (unlikely(system_state == SYSTEM_BOOTING)) + return text_poke_early(insn, code, size); + + text_poke_bp(insn, code, size, NULL); +} + +static void __static_call_validate(void *insn, bool tail) +{ + u8 opcode = *(u8 *)insn; + + if (tail) { + if (opcode == JMP32_INSN_OPCODE || + opcode == RET_INSN_OPCODE) + return; + } else { + if (opcode == CALL_INSN_OPCODE || + !memcmp(insn, ideal_nops[NOP_ATOMIC5], 5)) + return; + } + + /* + * If we ever trigger this, our text is corrupt, we'll probably not live long. + */ + WARN_ONCE(1, "unexpected static_call insn opcode 0x%x at %pS\n", opcode, insn); +} + +static inline enum insn_type __sc_insn(bool null, bool tail) +{ + /* + * Encode the following table without branches: + * + * tail null insn + * -----+-------+------ + * 0 | 0 | CALL + * 0 | 1 | NOP + * 1 | 0 | JMP + * 1 | 1 | RET + */ + return 2*tail + null; +} + +void arch_static_call_transform(void *site, void *tramp, void *func, bool tail) +{ + mutex_lock(&text_mutex); + + if (tramp) { + __static_call_validate(tramp, true); + __static_call_transform(tramp, __sc_insn(!func, true), func); + } + + if (IS_ENABLED(CONFIG_HAVE_STATIC_CALL_INLINE) && site) { + __static_call_validate(site, tail); + __static_call_transform(site, __sc_insn(!func, tail), func); + } + + mutex_unlock(&text_mutex); +} +EXPORT_SYMBOL_GPL(arch_static_call_transform); diff --git a/arch/x86/kernel/sys_ia32.c b/arch/x86/kernel/sys_ia32.c index 720cde885042..6cf65397d225 100644 --- a/arch/x86/kernel/sys_ia32.c +++ b/arch/x86/kernel/sys_ia32.c @@ -251,6 +251,6 @@ COMPAT_SYSCALL_DEFINE5(ia32_clone, unsigned long, clone_flags, .tls = tls_val, }; - return _do_fork(&args); + return kernel_clone(&args); } #endif /* CONFIG_IA32_EMULATION */ diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 81a2fb711091..3c70fb34028b 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -43,6 +43,7 @@ #include <asm/stacktrace.h> #include <asm/processor.h> #include <asm/debugreg.h> +#include <asm/realmode.h> #include <asm/text-patching.h> #include <asm/ftrace.h> #include <asm/traps.h> @@ -195,7 +196,7 @@ static __always_inline void __user *error_get_trap_addr(struct pt_regs *regs) DEFINE_IDTENTRY(exc_divide_error) { - do_error_trap(regs, 0, "divide_error", X86_TRAP_DE, SIGFPE, + do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE, FPE_INTDIV, error_get_trap_addr(regs)); } @@ -673,6 +674,50 @@ asmlinkage __visible noinstr struct pt_regs *sync_regs(struct pt_regs *eregs) return regs; } +#ifdef CONFIG_AMD_MEM_ENCRYPT +asmlinkage __visible noinstr struct pt_regs *vc_switch_off_ist(struct pt_regs *regs) +{ + unsigned long sp, *stack; + struct stack_info info; + struct pt_regs *regs_ret; + + /* + * In the SYSCALL entry path the RSP value comes from user-space - don't + * trust it and switch to the current kernel stack + */ + if (regs->ip >= (unsigned long)entry_SYSCALL_64 && + regs->ip < (unsigned long)entry_SYSCALL_64_safe_stack) { + sp = this_cpu_read(cpu_current_top_of_stack); + goto sync; + } + + /* + * From here on the RSP value is trusted. Now check whether entry + * happened from a safe stack. Not safe are the entry or unknown stacks, + * use the fall-back stack instead in this case. + */ + sp = regs->sp; + stack = (unsigned long *)sp; + + if (!get_stack_info_noinstr(stack, current, &info) || info.type == STACK_TYPE_ENTRY || + info.type >= STACK_TYPE_EXCEPTION_LAST) + sp = __this_cpu_ist_top_va(VC2); + +sync: + /* + * Found a safe stack - switch to it as if the entry didn't happen via + * IST stack. The code below only copies pt_regs, the real switch happens + * in assembly code. + */ + sp = ALIGN_DOWN(sp, 8) - sizeof(*regs_ret); + + regs_ret = (struct pt_regs *)sp; + *regs_ret = *regs; + + return regs_ret; +} +#endif + struct bad_iret_stack { void *error_entry_ret; struct pt_regs regs; @@ -745,9 +790,21 @@ static __always_inline unsigned long debug_read_clear_dr6(void) * Keep it simple: clear DR6 immediately. */ get_debugreg(dr6, 6); - set_debugreg(0, 6); - /* Filter out all the reserved bits which are preset to 1 */ - dr6 &= ~DR6_RESERVED; + set_debugreg(DR6_RESERVED, 6); + dr6 ^= DR6_RESERVED; /* Flip to positive polarity */ + + /* + * Clear the virtual DR6 value, ptrace routines will set bits here for + * things we want signals for. + */ + current->thread.virtual_dr6 = 0; + + /* + * The SDM says "The processor clears the BTF flag when it + * generates a debug exception." Clear TIF_BLOCKSTEP to keep + * TIF_BLOCKSTEP in sync with the hardware BTF flag. + */ + clear_thread_flag(TIF_BLOCKSTEP); return dr6; } @@ -776,74 +833,20 @@ static __always_inline unsigned long debug_read_clear_dr6(void) * * May run on IST stack. */ -static void handle_debug(struct pt_regs *regs, unsigned long dr6, bool user) -{ - struct task_struct *tsk = current; - bool user_icebp; - int si_code; - - /* - * The SDM says "The processor clears the BTF flag when it - * generates a debug exception." Clear TIF_BLOCKSTEP to keep - * TIF_BLOCKSTEP in sync with the hardware BTF flag. - */ - clear_thread_flag(TIF_BLOCKSTEP); - - /* - * If DR6 is zero, no point in trying to handle it. The kernel is - * not using INT1. - */ - if (!user && !dr6) - return; +static bool notify_debug(struct pt_regs *regs, unsigned long *dr6) +{ /* - * If dr6 has no reason to give us about the origin of this trap, - * then it's very likely the result of an icebp/int01 trap. - * User wants a sigtrap for that. + * Notifiers will clear bits in @dr6 to indicate the event has been + * consumed - hw_breakpoint_handler(), single_stop_cont(). + * + * Notifiers will set bits in @virtual_dr6 to indicate the desire + * for signals - ptrace_triggered(), kgdb_hw_overflow_handler(). */ - user_icebp = user && !dr6; - - /* Store the virtualized DR6 value */ - tsk->thread.debugreg6 = dr6; - -#ifdef CONFIG_KPROBES - if (kprobe_debug_handler(regs)) { - return; - } -#endif - - if (notify_die(DIE_DEBUG, "debug", regs, (long)&dr6, 0, - SIGTRAP) == NOTIFY_STOP) { - return; - } - - /* It's safe to allow irq's after DR6 has been saved */ - cond_local_irq_enable(regs); - - if (v8086_mode(regs)) { - handle_vm86_trap((struct kernel_vm86_regs *) regs, 0, - X86_TRAP_DB); - goto out; - } - - if (WARN_ON_ONCE((dr6 & DR_STEP) && !user_mode(regs))) { - /* - * Historical junk that used to handle SYSENTER single-stepping. - * This should be unreachable now. If we survive for a while - * without anyone hitting this warning, we'll turn this into - * an oops. - */ - tsk->thread.debugreg6 &= ~DR_STEP; - set_tsk_thread_flag(tsk, TIF_SINGLESTEP); - regs->flags &= ~X86_EFLAGS_TF; - } - - si_code = get_si_code(tsk->thread.debugreg6); - if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp) - send_sigtrap(regs, 0, si_code); + if (notify_die(DIE_DEBUG, "debug", regs, (long)dr6, 0, SIGTRAP) == NOTIFY_STOP) + return true; -out: - cond_local_irq_disable(regs); + return false; } static __always_inline void exc_debug_kernel(struct pt_regs *regs, @@ -877,8 +880,32 @@ static __always_inline void exc_debug_kernel(struct pt_regs *regs, if ((dr6 & DR_STEP) && is_sysenter_singlestep(regs)) dr6 &= ~DR_STEP; - handle_debug(regs, dr6, false); + if (kprobe_debug_handler(regs)) + goto out; + + /* + * The kernel doesn't use INT1 + */ + if (!dr6) + goto out; + + if (notify_debug(regs, &dr6)) + goto out; + /* + * The kernel doesn't use TF single-step outside of: + * + * - Kprobes, consumed through kprobe_debug_handler() + * - KGDB, consumed through notify_debug() + * + * So if we get here with DR_STEP set, something is wonky. + * + * A known way to trigger this is through QEMU's GDB stub, + * which leaks #DB into the guest and causes IST recursion. + */ + if (WARN_ON_ONCE(dr6 & DR_STEP)) + regs->flags &= ~X86_EFLAGS_TF; +out: instrumentation_end(); idtentry_exit_nmi(regs, irq_state); @@ -888,6 +915,8 @@ static __always_inline void exc_debug_kernel(struct pt_regs *regs, static __always_inline void exc_debug_user(struct pt_regs *regs, unsigned long dr6) { + bool icebp; + /* * If something gets miswired and we end up here for a kernel mode * #DB, we will malfunction. @@ -906,8 +935,32 @@ static __always_inline void exc_debug_user(struct pt_regs *regs, irqentry_enter_from_user_mode(regs); instrumentation_begin(); - handle_debug(regs, dr6, true); + /* + * If dr6 has no reason to give us about the origin of this trap, + * then it's very likely the result of an icebp/int01 trap. + * User wants a sigtrap for that. + */ + icebp = !dr6; + + if (notify_debug(regs, &dr6)) + goto out; + /* It's safe to allow irq's after DR6 has been saved */ + local_irq_enable(); + + if (v8086_mode(regs)) { + handle_vm86_trap((struct kernel_vm86_regs *)regs, 0, X86_TRAP_DB); + goto out_irq; + } + + /* Add the virtual_dr6 bits for signals. */ + dr6 |= current->thread.virtual_dr6; + if (dr6 & (DR_STEP | DR_TRAP_BITS) || icebp) + send_sigtrap(regs, 0, get_si_code(dr6)); + +out_irq: + local_irq_disable(); +out: instrumentation_end(); irqentry_exit_to_user_mode(regs); } @@ -1074,6 +1127,9 @@ void __init trap_init(void) /* Init cpu_entry_area before IST entries are set up */ setup_cpu_entry_areas(); + /* Init GHCB memory pages when running as an SEV-ES guest */ + sev_es_init_vc_handling(); + idt_setup_traps(); /* diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 49d925043171..f70dffc2771f 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -54,7 +54,7 @@ struct clocksource *art_related_clocksource; struct cyc2ns { struct cyc2ns_data data[2]; /* 0 + 2*16 = 32 */ - seqcount_t seq; /* 32 + 4 = 36 */ + seqcount_latch_t seq; /* 32 + 4 = 36 */ }; /* fits one cacheline */ @@ -73,14 +73,14 @@ __always_inline void cyc2ns_read_begin(struct cyc2ns_data *data) preempt_disable_notrace(); do { - seq = this_cpu_read(cyc2ns.seq.sequence); + seq = this_cpu_read(cyc2ns.seq.seqcount.sequence); idx = seq & 1; data->cyc2ns_offset = this_cpu_read(cyc2ns.data[idx].cyc2ns_offset); data->cyc2ns_mul = this_cpu_read(cyc2ns.data[idx].cyc2ns_mul); data->cyc2ns_shift = this_cpu_read(cyc2ns.data[idx].cyc2ns_shift); - } while (unlikely(seq != this_cpu_read(cyc2ns.seq.sequence))); + } while (unlikely(seq != this_cpu_read(cyc2ns.seq.seqcount.sequence))); } __always_inline void cyc2ns_read_end(void) @@ -186,7 +186,7 @@ static void __init cyc2ns_init_boot_cpu(void) { struct cyc2ns *c2n = this_cpu_ptr(&cyc2ns); - seqcount_init(&c2n->seq); + seqcount_latch_init(&c2n->seq); __set_cyc2ns_scale(tsc_khz, smp_processor_id(), rdtsc()); } @@ -203,7 +203,7 @@ static void __init cyc2ns_init_secondary_cpus(void) for_each_possible_cpu(cpu) { if (cpu != this_cpu) { - seqcount_init(&c2n->seq); + seqcount_latch_init(&c2n->seq); c2n = per_cpu_ptr(&cyc2ns, cpu); c2n->data[0] = data[0]; c2n->data[1] = data[1]; diff --git a/arch/x86/kernel/umip.c b/arch/x86/kernel/umip.c index 8d5cbe1bbb3b..f6225bf22c02 100644 --- a/arch/x86/kernel/umip.c +++ b/arch/x86/kernel/umip.c @@ -45,11 +45,12 @@ * value that, lies close to the top of the kernel memory. The limit for the GDT * and the IDT are set to zero. * - * Given that SLDT and STR are not commonly used in programs that run on WineHQ - * or DOSEMU2, they are not emulated. - * - * The instruction smsw is emulated to return the value that the register CR0 + * The instruction SMSW is emulated to return the value that the register CR0 * has at boot time as set in the head_32. + * SLDT and STR are emulated to return the values that the kernel programmatically + * assigns: + * - SLDT returns (GDT_ENTRY_LDT * 8) if an LDT has been set, 0 if not. + * - STR returns (GDT_ENTRY_TSS * 8). * * Emulation is provided for both 32-bit and 64-bit processes. * @@ -244,16 +245,34 @@ static int emulate_umip_insn(struct insn *insn, int umip_inst, *data_size += UMIP_GDT_IDT_LIMIT_SIZE; memcpy(data, &dummy_limit, UMIP_GDT_IDT_LIMIT_SIZE); - } else if (umip_inst == UMIP_INST_SMSW) { - unsigned long dummy_value = CR0_STATE; + } else if (umip_inst == UMIP_INST_SMSW || umip_inst == UMIP_INST_SLDT || + umip_inst == UMIP_INST_STR) { + unsigned long dummy_value; + + if (umip_inst == UMIP_INST_SMSW) { + dummy_value = CR0_STATE; + } else if (umip_inst == UMIP_INST_STR) { + dummy_value = GDT_ENTRY_TSS * 8; + } else if (umip_inst == UMIP_INST_SLDT) { +#ifdef CONFIG_MODIFY_LDT_SYSCALL + down_read(¤t->mm->context.ldt_usr_sem); + if (current->mm->context.ldt) + dummy_value = GDT_ENTRY_LDT * 8; + else + dummy_value = 0; + up_read(¤t->mm->context.ldt_usr_sem); +#else + dummy_value = 0; +#endif + } /* - * Even though the CR0 register has 4 bytes, the number + * For these 3 instructions, the number * of bytes to be copied in the result buffer is determined * by whether the operand is a register or a memory location. * If operand is a register, return as many bytes as the operand * size. If operand is memory, return only the two least - * siginificant bytes of CR0. + * siginificant bytes. */ if (X86_MODRM_MOD(insn->modrm.value) == 3) *data_size = insn->opnd_bytes; @@ -261,7 +280,6 @@ static int emulate_umip_insn(struct insn *insn, int umip_inst, *data_size = 2; memcpy(data, &dummy_value, *data_size); - /* STR and SLDT are not emulated */ } else { return -EINVAL; } @@ -317,63 +335,28 @@ static void force_sig_info_umip_fault(void __user *addr, struct pt_regs *regs) */ bool fixup_umip_exception(struct pt_regs *regs) { - int not_copied, nr_copied, reg_offset, dummy_data_size, umip_inst; - unsigned long seg_base = 0, *reg_addr; + int nr_copied, reg_offset, dummy_data_size, umip_inst; /* 10 bytes is the maximum size of the result of UMIP instructions */ unsigned char dummy_data[10] = { 0 }; unsigned char buf[MAX_INSN_SIZE]; + unsigned long *reg_addr; void __user *uaddr; struct insn insn; - int seg_defs; if (!regs) return false; - /* - * If not in user-space long mode, a custom code segment could be in - * use. This is true in protected mode (if the process defined a local - * descriptor table), or virtual-8086 mode. In most of the cases - * seg_base will be zero as in USER_CS. - */ - if (!user_64bit_mode(regs)) - seg_base = insn_get_seg_base(regs, INAT_SEG_REG_CS); - - if (seg_base == -1L) - return false; - - not_copied = copy_from_user(buf, (void __user *)(seg_base + regs->ip), - sizeof(buf)); - nr_copied = sizeof(buf) - not_copied; + nr_copied = insn_fetch_from_user(regs, buf); /* - * The copy_from_user above could have failed if user code is protected - * by a memory protection key. Give up on emulation in such a case. - * Should we issue a page fault? + * The insn_fetch_from_user above could have failed if user code + * is protected by a memory protection key. Give up on emulation + * in such a case. Should we issue a page fault? */ if (!nr_copied) return false; - insn_init(&insn, buf, nr_copied, user_64bit_mode(regs)); - - /* - * Override the default operand and address sizes with what is specified - * in the code segment descriptor. The instruction decoder only sets - * the address size it to either 4 or 8 address bytes and does nothing - * for the operand bytes. This OK for most of the cases, but we could - * have special cases where, for instance, a 16-bit code segment - * descriptor is used. - * If there is an address override prefix, the instruction decoder - * correctly updates these values, even for 16-bit defaults. - */ - seg_defs = insn_get_code_seg_params(regs); - if (seg_defs == -EINVAL) - return false; - - insn.addr_bytes = INSN_CODE_SEG_ADDR_SZ(seg_defs); - insn.opnd_bytes = INSN_CODE_SEG_OPND_SZ(seg_defs); - - insn_get_length(&insn); - if (nr_copied < insn.length) + if (!insn_decode(&insn, regs, buf, nr_copied)) return false; umip_inst = identify_insn(&insn); @@ -383,10 +366,6 @@ bool fixup_umip_exception(struct pt_regs *regs) umip_pr_warn(regs, "%s instruction cannot be used by applications.\n", umip_insns[umip_inst]); - /* Do not emulate (spoof) SLDT or STR. */ - if (umip_inst == UMIP_INST_STR || umip_inst == UMIP_INST_SLDT) - return false; - umip_pr_warn(regs, "For now, expensive software emulation returns the result.\n"); if (emulate_umip_insn(&insn, umip_inst, dummy_data, &dummy_data_size, diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c index ec88bbe08a32..6a339ce328e0 100644 --- a/arch/x86/kernel/unwind_orc.c +++ b/arch/x86/kernel/unwind_orc.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only +#include <linux/objtool.h> #include <linux/module.h> #include <linux/sort.h> #include <asm/ptrace.h> @@ -127,12 +128,12 @@ static struct orc_entry null_orc_entry = { .sp_offset = sizeof(long), .sp_reg = ORC_REG_SP, .bp_reg = ORC_REG_UNDEFINED, - .type = ORC_TYPE_CALL + .type = UNWIND_HINT_TYPE_CALL }; /* Fake frame pointer entry -- used as a fallback for generated code */ static struct orc_entry orc_fp_entry = { - .type = ORC_TYPE_CALL, + .type = UNWIND_HINT_TYPE_CALL, .sp_reg = ORC_REG_BP, .sp_offset = 16, .bp_reg = ORC_REG_PREV_SP, @@ -531,7 +532,7 @@ bool unwind_next_frame(struct unwind_state *state) /* Find IP, SP and possibly regs: */ switch (orc->type) { - case ORC_TYPE_CALL: + case UNWIND_HINT_TYPE_CALL: ip_p = sp - sizeof(long); if (!deref_stack_reg(state, ip_p, &state->ip)) @@ -546,7 +547,7 @@ bool unwind_next_frame(struct unwind_state *state) state->signal = false; break; - case ORC_TYPE_REGS: + case UNWIND_HINT_TYPE_REGS: if (!deref_stack_regs(state, sp, &state->ip, &state->sp)) { orc_warn_current("can't access registers at %pB\n", (void *)orig_ip); @@ -559,7 +560,7 @@ bool unwind_next_frame(struct unwind_state *state) state->signal = true; break; - case ORC_TYPE_REGS_IRET: + case UNWIND_HINT_TYPE_REGS_PARTIAL: if (!deref_stack_iret_regs(state, sp, &state->ip, &state->sp)) { orc_warn_current("can't access iret registers at %pB\n", (void *)orig_ip); diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 9a03e5b23135..bf9e0adb5b7e 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -136,6 +136,7 @@ SECTIONS ENTRY_TEXT ALIGN_ENTRY_TEXT_END SOFTIRQENTRY_TEXT + STATIC_CALL_TEXT *(.fixup) *(.gnu.warning) @@ -411,10 +412,47 @@ SECTIONS STABS_DEBUG DWARF_DEBUG + ELF_DETAILS DISCARDS -} + /* + * Make sure that the .got.plt is either completely empty or it + * contains only the lazy dispatch entries. + */ + .got.plt (INFO) : { *(.got.plt) } + ASSERT(SIZEOF(.got.plt) == 0 || +#ifdef CONFIG_X86_64 + SIZEOF(.got.plt) == 0x18, +#else + SIZEOF(.got.plt) == 0xc, +#endif + "Unexpected GOT/PLT entries detected!") + + /* + * Sections that should stay zero sized, which is safer to + * explicitly check instead of blindly discarding. + */ + .got : { + *(.got) *(.igot.*) + } + ASSERT(SIZEOF(.got) == 0, "Unexpected GOT entries detected!") + + .plt : { + *(.plt) *(.plt.*) *(.iplt) + } + ASSERT(SIZEOF(.plt) == 0, "Unexpected run-time procedure linkages detected!") + + .rel.dyn : { + *(.rel.*) *(.rel_*) + } + ASSERT(SIZEOF(.rel.dyn) == 0, "Unexpected run-time relocations (.rel) detected!") + + .rela.dyn : { + *(.rela.*) *(.rela_*) + } + ASSERT(SIZEOF(.rela.dyn) == 0, "Unexpected run-time relocations (.rela) detected!") +} #ifdef CONFIG_X86_32 /* diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c index 123f1c1f1788..a3038d8deb6a 100644 --- a/arch/x86/kernel/x86_init.c +++ b/arch/x86/kernel/x86_init.c @@ -24,6 +24,7 @@ #include <asm/tsc.h> #include <asm/iommu.h> #include <asm/mach_traps.h> +#include <asm/irqdomain.h> void x86_init_noop(void) { } void __init x86_init_uint_noop(unsigned int unused) { } @@ -67,11 +68,7 @@ struct x86_init_ops x86_init __initdata = { }, .mpparse = { - .mpc_record = x86_init_uint_noop, .setup_ioapic_ids = x86_init_noop, - .mpc_apic_id = default_mpc_apic_id, - .smp_read_mpc_oem = default_smp_read_mpc_oem, - .mpc_oem_bus_info = default_mpc_oem_bus_info, .find_smp_config = default_find_smp_config, .get_smp_config = default_get_smp_config, }, @@ -80,7 +77,8 @@ struct x86_init_ops x86_init __initdata = { .pre_vector_init = init_ISA_irqs, .intr_init = native_init_IRQ, .intr_mode_select = apic_intr_mode_select, - .intr_mode_init = apic_intr_mode_init + .intr_mode_init = apic_intr_mode_init, + .create_pci_msi_domain = native_create_pci_msi_domain, }, .oem = { @@ -148,28 +146,10 @@ EXPORT_SYMBOL_GPL(x86_platform); #if defined(CONFIG_PCI_MSI) struct x86_msi_ops x86_msi __ro_after_init = { - .setup_msi_irqs = native_setup_msi_irqs, - .teardown_msi_irq = native_teardown_msi_irq, - .teardown_msi_irqs = default_teardown_msi_irqs, .restore_msi_irqs = default_restore_msi_irqs, }; /* MSI arch specific hooks */ -int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) -{ - return x86_msi.setup_msi_irqs(dev, nvec, type); -} - -void arch_teardown_msi_irqs(struct pci_dev *dev) -{ - x86_msi.teardown_msi_irqs(dev); -} - -void arch_teardown_msi_irq(unsigned int irq) -{ - x86_msi.teardown_msi_irq(irq); -} - void arch_restore_msi_irqs(struct pci_dev *dev) { x86_msi.restore_msi_irqs(dev); diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 3fd6eec202d7..7456f9ad424b 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -371,7 +371,7 @@ void kvm_set_cpu_caps(void) F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP) | F(MD_CLEAR) | F(AVX512_VP2INTERSECT) | F(FSRM) | - F(SERIALIZE) + F(SERIALIZE) | F(TSXLDTRK) ); /* TSC_ADJUST and ARCH_CAPABILITIES are emulated in software. */ diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 1d330564eed8..8c1e8334eff0 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -2000,20 +2000,20 @@ int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, break; case HYPERV_CPUID_FEATURES: - ent->eax |= HV_X64_MSR_VP_RUNTIME_AVAILABLE; + ent->eax |= HV_MSR_VP_RUNTIME_AVAILABLE; ent->eax |= HV_MSR_TIME_REF_COUNT_AVAILABLE; - ent->eax |= HV_X64_MSR_SYNIC_AVAILABLE; + ent->eax |= HV_MSR_SYNIC_AVAILABLE; ent->eax |= HV_MSR_SYNTIMER_AVAILABLE; - ent->eax |= HV_X64_MSR_APIC_ACCESS_AVAILABLE; - ent->eax |= HV_X64_MSR_HYPERCALL_AVAILABLE; - ent->eax |= HV_X64_MSR_VP_INDEX_AVAILABLE; - ent->eax |= HV_X64_MSR_RESET_AVAILABLE; + ent->eax |= HV_MSR_APIC_ACCESS_AVAILABLE; + ent->eax |= HV_MSR_HYPERCALL_AVAILABLE; + ent->eax |= HV_MSR_VP_INDEX_AVAILABLE; + ent->eax |= HV_MSR_RESET_AVAILABLE; ent->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE; - ent->eax |= HV_X64_ACCESS_FREQUENCY_MSRS; - ent->eax |= HV_X64_ACCESS_REENLIGHTENMENT; + ent->eax |= HV_ACCESS_FREQUENCY_MSRS; + ent->eax |= HV_ACCESS_REENLIGHTENMENT; - ent->ebx |= HV_X64_POST_MESSAGES; - ent->ebx |= HV_X64_SIGNAL_EVENTS; + ent->ebx |= HV_POST_MESSAGES; + ent->ebx |= HV_SIGNAL_EVENTS; ent->edx |= HV_FEATURE_FREQUENCY_MSRS_AVAILABLE; ent->edx |= HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE; diff --git a/arch/x86/kvm/mmu/page_track.c b/arch/x86/kvm/mmu/page_track.c index a84a141a2ad2..8443a675715b 100644 --- a/arch/x86/kvm/mmu/page_track.c +++ b/arch/x86/kvm/mmu/page_track.c @@ -229,7 +229,8 @@ void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new, return; idx = srcu_read_lock(&head->track_srcu); - hlist_for_each_entry_rcu(n, &head->track_notifier_list, node) + hlist_for_each_entry_srcu(n, &head->track_notifier_list, node, + srcu_read_lock_held(&head->track_srcu)) if (n->track_write) n->track_write(vcpu, gpa, new, bytes, n); srcu_read_unlock(&head->track_srcu, idx); @@ -254,7 +255,8 @@ void kvm_page_track_flush_slot(struct kvm *kvm, struct kvm_memory_slot *slot) return; idx = srcu_read_lock(&head->track_srcu); - hlist_for_each_entry_rcu(n, &head->track_notifier_list, node) + hlist_for_each_entry_srcu(n, &head->track_notifier_list, node, + srcu_read_lock_held(&head->track_srcu)) if (n->track_flush_slot) n->track_flush_slot(kvm, slot, n); srcu_read_unlock(&head->track_srcu, idx); diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index e90bc436f584..598a769f1961 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -1062,10 +1062,14 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu, struct vmcb *hsave = svm->nested.hsave; struct vmcb __user *user_vmcb = (struct vmcb __user *) &user_kvm_nested_state->data.svm[0]; - struct vmcb_control_area ctl; - struct vmcb_save_area save; + struct vmcb_control_area *ctl; + struct vmcb_save_area *save; + int ret; u32 cr0; + BUILD_BUG_ON(sizeof(struct vmcb_control_area) + sizeof(struct vmcb_save_area) > + KVM_STATE_NESTED_SVM_VMCB_SIZE); + if (kvm_state->format != KVM_STATE_NESTED_FORMAT_SVM) return -EINVAL; @@ -1097,13 +1101,22 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu, return -EINVAL; if (kvm_state->size < sizeof(*kvm_state) + KVM_STATE_NESTED_SVM_VMCB_SIZE) return -EINVAL; - if (copy_from_user(&ctl, &user_vmcb->control, sizeof(ctl))) - return -EFAULT; - if (copy_from_user(&save, &user_vmcb->save, sizeof(save))) - return -EFAULT; - if (!nested_vmcb_check_controls(&ctl)) - return -EINVAL; + ret = -ENOMEM; + ctl = kzalloc(sizeof(*ctl), GFP_KERNEL); + save = kzalloc(sizeof(*save), GFP_KERNEL); + if (!ctl || !save) + goto out_free; + + ret = -EFAULT; + if (copy_from_user(ctl, &user_vmcb->control, sizeof(*ctl))) + goto out_free; + if (copy_from_user(save, &user_vmcb->save, sizeof(*save))) + goto out_free; + + ret = -EINVAL; + if (!nested_vmcb_check_controls(ctl)) + goto out_free; /* * Processor state contains L2 state. Check that it is @@ -1111,15 +1124,15 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu, */ cr0 = kvm_read_cr0(vcpu); if (((cr0 & X86_CR0_CD) == 0) && (cr0 & X86_CR0_NW)) - return -EINVAL; + goto out_free; /* * Validate host state saved from before VMRUN (see * nested_svm_check_permissions). * TODO: validate reserved bits for all saved state. */ - if (!(save.cr0 & X86_CR0_PG)) - return -EINVAL; + if (!(save->cr0 & X86_CR0_PG)) + goto out_free; /* * All checks done, we can enter guest mode. L1 control fields @@ -1128,10 +1141,10 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu, * contains saved L1 state. */ copy_vmcb_control_area(&hsave->control, &svm->vmcb->control); - hsave->save = save; + hsave->save = *save; svm->nested.vmcb = kvm_state->hdr.svm.vmcb_pa; - load_nested_vmcb_control(svm, &ctl); + load_nested_vmcb_control(svm, ctl); nested_prepare_vmcb_control(svm); if (!nested_svm_vmrun_msrpm(svm)) @@ -1139,7 +1152,13 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu, out_set_gif: svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET)); - return 0; + + ret = 0; +out_free: + kfree(save); + kfree(ctl); + + return ret; } struct kvm_x86_nested_ops svm_nested_ops = { diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 7bf7bf734979..3c9a45efdd4d 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -384,7 +384,8 @@ static void sev_clflush_pages(struct page *pages[], unsigned long npages) uint8_t *page_virtual; unsigned long i; - if (npages == 0 || pages == NULL) + if (this_cpu_has(X86_FEATURE_SME_COHERENT) || npages == 0 || + pages == NULL) return; for (i = 0; i < npages; i++) { diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 91ea74ae71b8..9709c98d0d6c 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -19,7 +19,7 @@ #include <linux/trace_events.h> #include <linux/slab.h> #include <linux/hashtable.h> -#include <linux/frame.h> +#include <linux/objtool.h> #include <linux/psp-sev.h> #include <linux/file.h> #include <linux/pagemap.h> @@ -4176,6 +4176,8 @@ static struct kvm_x86_init_ops svm_init_ops __initdata = { static int __init svm_init(void) { + __unused_size_checks(); + return kvm_init(&svm_init_ops, sizeof(struct vcpu_svm), __alignof__(struct vcpu_svm), THIS_MODULE); } diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 1bb6b31eb646..19e2265956ba 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 -#include <linux/frame.h> +#include <linux/objtool.h> #include <linux/percpu.h> #include <asm/debugreg.h> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 96979c09ebd1..f0a9954c49db 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -13,7 +13,6 @@ * Yaniv Kamay <yaniv@qumranet.com> */ -#include <linux/frame.h> #include <linux/highmem.h> #include <linux/hrtimer.h> #include <linux/kernel.h> @@ -22,6 +21,7 @@ #include <linux/moduleparam.h> #include <linux/mod_devicetable.h> #include <linux/mm.h> +#include <linux/objtool.h> #include <linux/sched.h> #include <linux/sched/smt.h> #include <linux/slab.h> diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile index aa067859a70b..bad4dee4f0e4 100644 --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile @@ -44,6 +44,7 @@ obj-$(CONFIG_SMP) += msr-smp.o cache-smp.o lib-y := delay.o misc.o cmdline.o cpu.o lib-y += usercopy_$(BITS).o usercopy.o getuser.o putuser.o lib-y += memcpy_$(BITS).o +lib-$(CONFIG_ARCH_HAS_COPY_MC) += copy_mc.o copy_mc_64.o lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o insn-eval.o lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o lib-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S index d1d768912368..4304320e51f4 100644 --- a/arch/x86/lib/checksum_32.S +++ b/arch/x86/lib/checksum_32.S @@ -253,28 +253,17 @@ EXPORT_SYMBOL(csum_partial) /* unsigned int csum_partial_copy_generic (const char *src, char *dst, - int len, int sum, int *src_err_ptr, int *dst_err_ptr) + int len) */ /* * Copy from ds while checksumming, otherwise like csum_partial - * - * The macros SRC and DST specify the type of access for the instruction. - * thus we can call a custom exception handler for all access types. - * - * FIXME: could someone double-check whether I haven't mixed up some SRC and - * DST definitions? It's damn hard to trigger all cases. I hope I got - * them all but there's no guarantee. */ -#define SRC(y...) \ +#define EXC(y...) \ 9999: y; \ _ASM_EXTABLE_UA(9999b, 6001f) -#define DST(y...) \ - 9999: y; \ - _ASM_EXTABLE_UA(9999b, 6002f) - #ifndef CONFIG_X86_USE_PPRO_CHECKSUM #define ARGBASE 16 @@ -285,20 +274,20 @@ SYM_FUNC_START(csum_partial_copy_generic) pushl %edi pushl %esi pushl %ebx - movl ARGBASE+16(%esp),%eax # sum movl ARGBASE+12(%esp),%ecx # len movl ARGBASE+4(%esp),%esi # src movl ARGBASE+8(%esp),%edi # dst + movl $-1, %eax # sum testl $2, %edi # Check alignment. jz 2f # Jump if alignment is ok. subl $2, %ecx # Alignment uses up two bytes. jae 1f # Jump if we had at least two bytes. addl $2, %ecx # ecx was < 2. Deal with it. jmp 4f -SRC(1: movw (%esi), %bx ) +EXC(1: movw (%esi), %bx ) addl $2, %esi -DST( movw %bx, (%edi) ) +EXC( movw %bx, (%edi) ) addl $2, %edi addw %bx, %ax adcl $0, %eax @@ -306,34 +295,34 @@ DST( movw %bx, (%edi) ) movl %ecx, FP(%esp) shrl $5, %ecx jz 2f - testl %esi, %esi -SRC(1: movl (%esi), %ebx ) -SRC( movl 4(%esi), %edx ) + testl %esi, %esi # what's wrong with clc? +EXC(1: movl (%esi), %ebx ) +EXC( movl 4(%esi), %edx ) adcl %ebx, %eax -DST( movl %ebx, (%edi) ) +EXC( movl %ebx, (%edi) ) adcl %edx, %eax -DST( movl %edx, 4(%edi) ) +EXC( movl %edx, 4(%edi) ) -SRC( movl 8(%esi), %ebx ) -SRC( movl 12(%esi), %edx ) +EXC( movl 8(%esi), %ebx ) +EXC( movl 12(%esi), %edx ) adcl %ebx, %eax -DST( movl %ebx, 8(%edi) ) +EXC( movl %ebx, 8(%edi) ) adcl %edx, %eax -DST( movl %edx, 12(%edi) ) +EXC( movl %edx, 12(%edi) ) -SRC( movl 16(%esi), %ebx ) -SRC( movl 20(%esi), %edx ) +EXC( movl 16(%esi), %ebx ) +EXC( movl 20(%esi), %edx ) adcl %ebx, %eax -DST( movl %ebx, 16(%edi) ) +EXC( movl %ebx, 16(%edi) ) adcl %edx, %eax -DST( movl %edx, 20(%edi) ) +EXC( movl %edx, 20(%edi) ) -SRC( movl 24(%esi), %ebx ) -SRC( movl 28(%esi), %edx ) +EXC( movl 24(%esi), %ebx ) +EXC( movl 28(%esi), %edx ) adcl %ebx, %eax -DST( movl %ebx, 24(%edi) ) +EXC( movl %ebx, 24(%edi) ) adcl %edx, %eax -DST( movl %edx, 28(%edi) ) +EXC( movl %edx, 28(%edi) ) lea 32(%esi), %esi lea 32(%edi), %edi @@ -345,9 +334,9 @@ DST( movl %edx, 28(%edi) ) andl $0x1c, %edx je 4f shrl $2, %edx # This clears CF -SRC(3: movl (%esi), %ebx ) +EXC(3: movl (%esi), %ebx ) adcl %ebx, %eax -DST( movl %ebx, (%edi) ) +EXC( movl %ebx, (%edi) ) lea 4(%esi), %esi lea 4(%edi), %edi dec %edx @@ -357,39 +346,24 @@ DST( movl %ebx, (%edi) ) jz 7f cmpl $2, %ecx jb 5f -SRC( movw (%esi), %cx ) +EXC( movw (%esi), %cx ) leal 2(%esi), %esi -DST( movw %cx, (%edi) ) +EXC( movw %cx, (%edi) ) leal 2(%edi), %edi je 6f shll $16,%ecx -SRC(5: movb (%esi), %cl ) -DST( movb %cl, (%edi) ) +EXC(5: movb (%esi), %cl ) +EXC( movb %cl, (%edi) ) 6: addl %ecx, %eax adcl $0, %eax 7: -5000: # Exception handler: .section .fixup, "ax" 6001: - movl ARGBASE+20(%esp), %ebx # src_err_ptr - movl $-EFAULT, (%ebx) - - # zero the complete destination - computing the rest - # is too much work - movl ARGBASE+8(%esp), %edi # dst - movl ARGBASE+12(%esp), %ecx # len - xorl %eax,%eax - rep ; stosb - - jmp 5000b - -6002: - movl ARGBASE+24(%esp), %ebx # dst_err_ptr - movl $-EFAULT,(%ebx) - jmp 5000b + xorl %eax, %eax + jmp 7b .previous @@ -405,14 +379,14 @@ SYM_FUNC_END(csum_partial_copy_generic) /* Version for PentiumII/PPro */ #define ROUND1(x) \ - SRC(movl x(%esi), %ebx ) ; \ + EXC(movl x(%esi), %ebx ) ; \ addl %ebx, %eax ; \ - DST(movl %ebx, x(%edi) ) ; + EXC(movl %ebx, x(%edi) ) ; #define ROUND(x) \ - SRC(movl x(%esi), %ebx ) ; \ + EXC(movl x(%esi), %ebx ) ; \ adcl %ebx, %eax ; \ - DST(movl %ebx, x(%edi) ) ; + EXC(movl %ebx, x(%edi) ) ; #define ARGBASE 12 @@ -423,7 +397,7 @@ SYM_FUNC_START(csum_partial_copy_generic) movl ARGBASE+4(%esp),%esi #src movl ARGBASE+8(%esp),%edi #dst movl ARGBASE+12(%esp),%ecx #len - movl ARGBASE+16(%esp),%eax #sum + movl $-1, %eax #sum # movl %ecx, %edx movl %ecx, %ebx movl %esi, %edx @@ -439,7 +413,7 @@ SYM_FUNC_START(csum_partial_copy_generic) JMP_NOSPEC ebx 1: addl $64,%esi addl $64,%edi - SRC(movb -32(%edx),%bl) ; SRC(movb (%edx),%bl) + EXC(movb -32(%edx),%bl) ; EXC(movb (%edx),%bl) ROUND1(-64) ROUND(-60) ROUND(-56) ROUND(-52) ROUND (-48) ROUND(-44) ROUND(-40) ROUND(-36) ROUND (-32) ROUND(-28) ROUND(-24) ROUND(-20) @@ -453,29 +427,20 @@ SYM_FUNC_START(csum_partial_copy_generic) jz 7f cmpl $2, %edx jb 5f -SRC( movw (%esi), %dx ) +EXC( movw (%esi), %dx ) leal 2(%esi), %esi -DST( movw %dx, (%edi) ) +EXC( movw %dx, (%edi) ) leal 2(%edi), %edi je 6f shll $16,%edx 5: -SRC( movb (%esi), %dl ) -DST( movb %dl, (%edi) ) +EXC( movb (%esi), %dl ) +EXC( movb %dl, (%edi) ) 6: addl %edx, %eax adcl $0, %eax 7: .section .fixup, "ax" -6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr - movl $-EFAULT, (%ebx) - # zero the complete destination (computing the rest is too much work) - movl ARGBASE+8(%esp),%edi # dst - movl ARGBASE+12(%esp),%ecx # len - xorl %eax,%eax - rep; stosb - jmp 7b -6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr - movl $-EFAULT, (%ebx) +6001: xorl %eax, %eax jmp 7b .previous diff --git a/arch/x86/lib/copy_mc.c b/arch/x86/lib/copy_mc.c new file mode 100644 index 000000000000..c13e8c9ee926 --- /dev/null +++ b/arch/x86/lib/copy_mc.c @@ -0,0 +1,96 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2016-2020 Intel Corporation. All rights reserved. */ + +#include <linux/jump_label.h> +#include <linux/uaccess.h> +#include <linux/export.h> +#include <linux/string.h> +#include <linux/types.h> + +#include <asm/mce.h> + +#ifdef CONFIG_X86_MCE +/* + * See COPY_MC_TEST for self-test of the copy_mc_fragile() + * implementation. + */ +static DEFINE_STATIC_KEY_FALSE(copy_mc_fragile_key); + +void enable_copy_mc_fragile(void) +{ + static_branch_inc(©_mc_fragile_key); +} +#define copy_mc_fragile_enabled (static_branch_unlikely(©_mc_fragile_key)) + +/* + * Similar to copy_user_handle_tail, probe for the write fault point, or + * source exception point. + */ +__visible notrace unsigned long +copy_mc_fragile_handle_tail(char *to, char *from, unsigned len) +{ + for (; len; --len, to++, from++) + if (copy_mc_fragile(to, from, 1)) + break; + return len; +} +#else +/* + * No point in doing careful copying, or consulting a static key when + * there is no #MC handler in the CONFIG_X86_MCE=n case. + */ +void enable_copy_mc_fragile(void) +{ +} +#define copy_mc_fragile_enabled (0) +#endif + +unsigned long copy_mc_enhanced_fast_string(void *dst, const void *src, unsigned len); + +/** + * copy_mc_to_kernel - memory copy that handles source exceptions + * + * @dst: destination address + * @src: source address + * @len: number of bytes to copy + * + * Call into the 'fragile' version on systems that benefit from avoiding + * corner case poison consumption scenarios, For example, accessing + * poison across 2 cachelines with a single instruction. Almost all + * other uses case can use copy_mc_enhanced_fast_string() for a fast + * recoverable copy, or fallback to plain memcpy. + * + * Return 0 for success, or number of bytes not copied if there was an + * exception. + */ +unsigned long __must_check copy_mc_to_kernel(void *dst, const void *src, unsigned len) +{ + if (copy_mc_fragile_enabled) + return copy_mc_fragile(dst, src, len); + if (static_cpu_has(X86_FEATURE_ERMS)) + return copy_mc_enhanced_fast_string(dst, src, len); + memcpy(dst, src, len); + return 0; +} +EXPORT_SYMBOL_GPL(copy_mc_to_kernel); + +unsigned long __must_check copy_mc_to_user(void *dst, const void *src, unsigned len) +{ + unsigned long ret; + + if (copy_mc_fragile_enabled) { + __uaccess_begin(); + ret = copy_mc_fragile(dst, src, len); + __uaccess_end(); + return ret; + } + + if (static_cpu_has(X86_FEATURE_ERMS)) { + __uaccess_begin(); + ret = copy_mc_enhanced_fast_string(dst, src, len); + __uaccess_end(); + return ret; + } + + return copy_user_generic(dst, src, len); +} diff --git a/arch/x86/lib/copy_mc_64.S b/arch/x86/lib/copy_mc_64.S new file mode 100644 index 000000000000..892d8915f609 --- /dev/null +++ b/arch/x86/lib/copy_mc_64.S @@ -0,0 +1,163 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2016-2020 Intel Corporation. All rights reserved. */ + +#include <linux/linkage.h> +#include <asm/copy_mc_test.h> +#include <asm/export.h> +#include <asm/asm.h> + +#ifndef CONFIG_UML + +#ifdef CONFIG_X86_MCE +COPY_MC_TEST_CTL + +/* + * copy_mc_fragile - copy memory with indication if an exception / fault happened + * + * The 'fragile' version is opted into by platform quirks and takes + * pains to avoid unrecoverable corner cases like 'fast-string' + * instruction sequences, and consuming poison across a cacheline + * boundary. The non-fragile version is equivalent to memcpy() + * regardless of CPU machine-check-recovery capability. + */ +SYM_FUNC_START(copy_mc_fragile) + cmpl $8, %edx + /* Less than 8 bytes? Go to byte copy loop */ + jb .L_no_whole_words + + /* Check for bad alignment of source */ + testl $7, %esi + /* Already aligned */ + jz .L_8byte_aligned + + /* Copy one byte at a time until source is 8-byte aligned */ + movl %esi, %ecx + andl $7, %ecx + subl $8, %ecx + negl %ecx + subl %ecx, %edx +.L_read_leading_bytes: + movb (%rsi), %al + COPY_MC_TEST_SRC %rsi 1 .E_leading_bytes + COPY_MC_TEST_DST %rdi 1 .E_leading_bytes +.L_write_leading_bytes: + movb %al, (%rdi) + incq %rsi + incq %rdi + decl %ecx + jnz .L_read_leading_bytes + +.L_8byte_aligned: + movl %edx, %ecx + andl $7, %edx + shrl $3, %ecx + jz .L_no_whole_words + +.L_read_words: + movq (%rsi), %r8 + COPY_MC_TEST_SRC %rsi 8 .E_read_words + COPY_MC_TEST_DST %rdi 8 .E_write_words +.L_write_words: + movq %r8, (%rdi) + addq $8, %rsi + addq $8, %rdi + decl %ecx + jnz .L_read_words + + /* Any trailing bytes? */ +.L_no_whole_words: + andl %edx, %edx + jz .L_done_memcpy_trap + + /* Copy trailing bytes */ + movl %edx, %ecx +.L_read_trailing_bytes: + movb (%rsi), %al + COPY_MC_TEST_SRC %rsi 1 .E_trailing_bytes + COPY_MC_TEST_DST %rdi 1 .E_trailing_bytes +.L_write_trailing_bytes: + movb %al, (%rdi) + incq %rsi + incq %rdi + decl %ecx + jnz .L_read_trailing_bytes + + /* Copy successful. Return zero */ +.L_done_memcpy_trap: + xorl %eax, %eax +.L_done: + ret +SYM_FUNC_END(copy_mc_fragile) +EXPORT_SYMBOL_GPL(copy_mc_fragile) + + .section .fixup, "ax" + /* + * Return number of bytes not copied for any failure. Note that + * there is no "tail" handling since the source buffer is 8-byte + * aligned and poison is cacheline aligned. + */ +.E_read_words: + shll $3, %ecx +.E_leading_bytes: + addl %edx, %ecx +.E_trailing_bytes: + mov %ecx, %eax + jmp .L_done + + /* + * For write fault handling, given the destination is unaligned, + * we handle faults on multi-byte writes with a byte-by-byte + * copy up to the write-protected page. + */ +.E_write_words: + shll $3, %ecx + addl %edx, %ecx + movl %ecx, %edx + jmp copy_mc_fragile_handle_tail + + .previous + + _ASM_EXTABLE_FAULT(.L_read_leading_bytes, .E_leading_bytes) + _ASM_EXTABLE_FAULT(.L_read_words, .E_read_words) + _ASM_EXTABLE_FAULT(.L_read_trailing_bytes, .E_trailing_bytes) + _ASM_EXTABLE(.L_write_leading_bytes, .E_leading_bytes) + _ASM_EXTABLE(.L_write_words, .E_write_words) + _ASM_EXTABLE(.L_write_trailing_bytes, .E_trailing_bytes) +#endif /* CONFIG_X86_MCE */ + +/* + * copy_mc_enhanced_fast_string - memory copy with exception handling + * + * Fast string copy + fault / exception handling. If the CPU does + * support machine check exception recovery, but does not support + * recovering from fast-string exceptions then this CPU needs to be + * added to the copy_mc_fragile_key set of quirks. Otherwise, absent any + * machine check recovery support this version should be no slower than + * standard memcpy. + */ +SYM_FUNC_START(copy_mc_enhanced_fast_string) + movq %rdi, %rax + movq %rdx, %rcx +.L_copy: + rep movsb + /* Copy successful. Return zero */ + xorl %eax, %eax + ret +SYM_FUNC_END(copy_mc_enhanced_fast_string) + + .section .fixup, "ax" +.E_copy: + /* + * On fault %rcx is updated such that the copy instruction could + * optionally be restarted at the fault position, i.e. it + * contains 'bytes remaining'. A non-zero return indicates error + * to copy_mc_generic() users, or indicate short transfers to + * user-copy routines. + */ + movq %rcx, %rax + ret + + .previous + + _ASM_EXTABLE_FAULT(.L_copy, .E_copy) +#endif /* !CONFIG_UML */ diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S index 816f128a6d52..77b9b2a3b5c8 100644 --- a/arch/x86/lib/copy_user_64.S +++ b/arch/x86/lib/copy_user_64.S @@ -15,6 +15,7 @@ #include <asm/asm.h> #include <asm/smap.h> #include <asm/export.h> +#include <asm/trapnr.h> .macro ALIGN_DESTINATION /* check for bad alignment of destination */ @@ -36,8 +37,8 @@ jmp .Lcopy_user_handle_tail .previous - _ASM_EXTABLE_UA(100b, 103b) - _ASM_EXTABLE_UA(101b, 103b) + _ASM_EXTABLE_CPY(100b, 103b) + _ASM_EXTABLE_CPY(101b, 103b) .endm /* @@ -116,26 +117,26 @@ SYM_FUNC_START(copy_user_generic_unrolled) 60: jmp .Lcopy_user_handle_tail /* ecx is zerorest also */ .previous - _ASM_EXTABLE_UA(1b, 30b) - _ASM_EXTABLE_UA(2b, 30b) - _ASM_EXTABLE_UA(3b, 30b) - _ASM_EXTABLE_UA(4b, 30b) - _ASM_EXTABLE_UA(5b, 30b) - _ASM_EXTABLE_UA(6b, 30b) - _ASM_EXTABLE_UA(7b, 30b) - _ASM_EXTABLE_UA(8b, 30b) - _ASM_EXTABLE_UA(9b, 30b) - _ASM_EXTABLE_UA(10b, 30b) - _ASM_EXTABLE_UA(11b, 30b) - _ASM_EXTABLE_UA(12b, 30b) - _ASM_EXTABLE_UA(13b, 30b) - _ASM_EXTABLE_UA(14b, 30b) - _ASM_EXTABLE_UA(15b, 30b) - _ASM_EXTABLE_UA(16b, 30b) - _ASM_EXTABLE_UA(18b, 40b) - _ASM_EXTABLE_UA(19b, 40b) - _ASM_EXTABLE_UA(21b, 50b) - _ASM_EXTABLE_UA(22b, 50b) + _ASM_EXTABLE_CPY(1b, 30b) + _ASM_EXTABLE_CPY(2b, 30b) + _ASM_EXTABLE_CPY(3b, 30b) + _ASM_EXTABLE_CPY(4b, 30b) + _ASM_EXTABLE_CPY(5b, 30b) + _ASM_EXTABLE_CPY(6b, 30b) + _ASM_EXTABLE_CPY(7b, 30b) + _ASM_EXTABLE_CPY(8b, 30b) + _ASM_EXTABLE_CPY(9b, 30b) + _ASM_EXTABLE_CPY(10b, 30b) + _ASM_EXTABLE_CPY(11b, 30b) + _ASM_EXTABLE_CPY(12b, 30b) + _ASM_EXTABLE_CPY(13b, 30b) + _ASM_EXTABLE_CPY(14b, 30b) + _ASM_EXTABLE_CPY(15b, 30b) + _ASM_EXTABLE_CPY(16b, 30b) + _ASM_EXTABLE_CPY(18b, 40b) + _ASM_EXTABLE_CPY(19b, 40b) + _ASM_EXTABLE_CPY(21b, 50b) + _ASM_EXTABLE_CPY(22b, 50b) SYM_FUNC_END(copy_user_generic_unrolled) EXPORT_SYMBOL(copy_user_generic_unrolled) @@ -180,8 +181,8 @@ SYM_FUNC_START(copy_user_generic_string) jmp .Lcopy_user_handle_tail .previous - _ASM_EXTABLE_UA(1b, 11b) - _ASM_EXTABLE_UA(3b, 12b) + _ASM_EXTABLE_CPY(1b, 11b) + _ASM_EXTABLE_CPY(3b, 12b) SYM_FUNC_END(copy_user_generic_string) EXPORT_SYMBOL(copy_user_generic_string) @@ -213,7 +214,7 @@ SYM_FUNC_START(copy_user_enhanced_fast_string) jmp .Lcopy_user_handle_tail .previous - _ASM_EXTABLE_UA(1b, 12b) + _ASM_EXTABLE_CPY(1b, 12b) SYM_FUNC_END(copy_user_enhanced_fast_string) EXPORT_SYMBOL(copy_user_enhanced_fast_string) @@ -221,6 +222,7 @@ EXPORT_SYMBOL(copy_user_enhanced_fast_string) * Try to copy last bytes and clear the rest if needed. * Since protection fault in copy_from/to_user is not a normal situation, * it is not necessary to optimize tail handling. + * Don't try to copy the tail if machine check happened * * Input: * rdi destination @@ -232,12 +234,25 @@ EXPORT_SYMBOL(copy_user_enhanced_fast_string) */ SYM_CODE_START_LOCAL(.Lcopy_user_handle_tail) movl %edx,%ecx + cmp $X86_TRAP_MC,%eax /* check if X86_TRAP_MC */ + je 3f 1: rep movsb 2: mov %ecx,%eax ASM_CLAC ret - _ASM_EXTABLE_UA(1b, 2b) + /* + * Return zero to pretend that this copy succeeded. This + * is counter-intuitive, but needed to prevent the code + * in lib/iov_iter.c from retrying and running back into + * the poison cache line again. The machine check handler + * will ensure that a SIGBUS is sent to the task. + */ +3: xorl %eax,%eax + ASM_CLAC + ret + + _ASM_EXTABLE_CPY(1b, 2b) SYM_CODE_END(.Lcopy_user_handle_tail) /* @@ -366,27 +381,27 @@ SYM_FUNC_START(__copy_user_nocache) jmp .Lcopy_user_handle_tail .previous - _ASM_EXTABLE_UA(1b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_UA(2b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_UA(3b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_UA(4b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_UA(5b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_UA(6b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_UA(7b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_UA(8b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_UA(9b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_UA(10b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_UA(11b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_UA(12b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_UA(13b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_UA(14b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_UA(15b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_UA(16b, .L_fixup_4x8b_copy) - _ASM_EXTABLE_UA(20b, .L_fixup_8b_copy) - _ASM_EXTABLE_UA(21b, .L_fixup_8b_copy) - _ASM_EXTABLE_UA(30b, .L_fixup_4b_copy) - _ASM_EXTABLE_UA(31b, .L_fixup_4b_copy) - _ASM_EXTABLE_UA(40b, .L_fixup_1b_copy) - _ASM_EXTABLE_UA(41b, .L_fixup_1b_copy) + _ASM_EXTABLE_CPY(1b, .L_fixup_4x8b_copy) + _ASM_EXTABLE_CPY(2b, .L_fixup_4x8b_copy) + _ASM_EXTABLE_CPY(3b, .L_fixup_4x8b_copy) + _ASM_EXTABLE_CPY(4b, .L_fixup_4x8b_copy) + _ASM_EXTABLE_CPY(5b, .L_fixup_4x8b_copy) + _ASM_EXTABLE_CPY(6b, .L_fixup_4x8b_copy) + _ASM_EXTABLE_CPY(7b, .L_fixup_4x8b_copy) + _ASM_EXTABLE_CPY(8b, .L_fixup_4x8b_copy) + _ASM_EXTABLE_CPY(9b, .L_fixup_4x8b_copy) + _ASM_EXTABLE_CPY(10b, .L_fixup_4x8b_copy) + _ASM_EXTABLE_CPY(11b, .L_fixup_4x8b_copy) + _ASM_EXTABLE_CPY(12b, .L_fixup_4x8b_copy) + _ASM_EXTABLE_CPY(13b, .L_fixup_4x8b_copy) + _ASM_EXTABLE_CPY(14b, .L_fixup_4x8b_copy) + _ASM_EXTABLE_CPY(15b, .L_fixup_4x8b_copy) + _ASM_EXTABLE_CPY(16b, .L_fixup_4x8b_copy) + _ASM_EXTABLE_CPY(20b, .L_fixup_8b_copy) + _ASM_EXTABLE_CPY(21b, .L_fixup_8b_copy) + _ASM_EXTABLE_CPY(30b, .L_fixup_4b_copy) + _ASM_EXTABLE_CPY(31b, .L_fixup_4b_copy) + _ASM_EXTABLE_CPY(40b, .L_fixup_1b_copy) + _ASM_EXTABLE_CPY(41b, .L_fixup_1b_copy) SYM_FUNC_END(__copy_user_nocache) EXPORT_SYMBOL(__copy_user_nocache) diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S index 3394a8ff7fd0..1fbd8ee9642d 100644 --- a/arch/x86/lib/csum-copy_64.S +++ b/arch/x86/lib/csum-copy_64.S @@ -18,9 +18,6 @@ * rdi source * rsi destination * edx len (32bit) - * ecx sum (32bit) - * r8 src_err_ptr (int) - * r9 dst_err_ptr (int) * * Output * eax 64bit sum. undefined in case of exception. @@ -31,44 +28,32 @@ .macro source 10: - _ASM_EXTABLE_UA(10b, .Lbad_source) + _ASM_EXTABLE_UA(10b, .Lfault) .endm .macro dest 20: - _ASM_EXTABLE_UA(20b, .Lbad_dest) + _ASM_EXTABLE_UA(20b, .Lfault) .endm - /* - * No _ASM_EXTABLE_UA; this is used for intentional prefetch on a - * potentially unmapped kernel address. - */ - .macro ignore L=.Lignore -30: - _ASM_EXTABLE(30b, \L) - .endm - - SYM_FUNC_START(csum_partial_copy_generic) - cmpl $3*64, %edx - jle .Lignore - -.Lignore: - subq $7*8, %rsp - movq %rbx, 2*8(%rsp) - movq %r12, 3*8(%rsp) - movq %r14, 4*8(%rsp) - movq %r13, 5*8(%rsp) - movq %r15, 6*8(%rsp) + subq $5*8, %rsp + movq %rbx, 0*8(%rsp) + movq %r12, 1*8(%rsp) + movq %r14, 2*8(%rsp) + movq %r13, 3*8(%rsp) + movq %r15, 4*8(%rsp) - movq %r8, (%rsp) - movq %r9, 1*8(%rsp) - - movl %ecx, %eax + movl $-1, %eax + xorl %r9d, %r9d movl %edx, %ecx + cmpl $8, %ecx + jb .Lshort - xorl %r9d, %r9d - movq %rcx, %r12 + testb $7, %sil + jne .Lunaligned +.Laligned: + movl %ecx, %r12d shrq $6, %r12 jz .Lhandle_tail /* < 64 */ @@ -99,7 +84,12 @@ SYM_FUNC_START(csum_partial_copy_generic) source movq 56(%rdi), %r13 - ignore 2f +30: + /* + * No _ASM_EXTABLE_UA; this is used for intentional prefetch on a + * potentially unmapped kernel address. + */ + _ASM_EXTABLE(30b, 2f) prefetcht0 5*64(%rdi) 2: adcq %rbx, %rax @@ -131,8 +121,6 @@ SYM_FUNC_START(csum_partial_copy_generic) dest movq %r13, 56(%rsi) -3: - leaq 64(%rdi), %rdi leaq 64(%rsi), %rsi @@ -142,8 +130,8 @@ SYM_FUNC_START(csum_partial_copy_generic) /* do last up to 56 bytes */ .Lhandle_tail: - /* ecx: count */ - movl %ecx, %r10d + /* ecx: count, rcx.63: the end result needs to be rol8 */ + movq %rcx, %r10 andl $63, %ecx shrl $3, %ecx jz .Lfold @@ -172,6 +160,7 @@ SYM_FUNC_START(csum_partial_copy_generic) .Lhandle_7: movl %r10d, %ecx andl $7, %ecx +.L1: /* .Lshort rejoins the common path here */ shrl $1, %ecx jz .Lhandle_1 movl $2, %edx @@ -203,26 +192,65 @@ SYM_FUNC_START(csum_partial_copy_generic) adcl %r9d, %eax /* carry */ .Lende: - movq 2*8(%rsp), %rbx - movq 3*8(%rsp), %r12 - movq 4*8(%rsp), %r14 - movq 5*8(%rsp), %r13 - movq 6*8(%rsp), %r15 - addq $7*8, %rsp + testq %r10, %r10 + js .Lwas_odd +.Lout: + movq 0*8(%rsp), %rbx + movq 1*8(%rsp), %r12 + movq 2*8(%rsp), %r14 + movq 3*8(%rsp), %r13 + movq 4*8(%rsp), %r15 + addq $5*8, %rsp ret +.Lshort: + movl %ecx, %r10d + jmp .L1 +.Lunaligned: + xorl %ebx, %ebx + testb $1, %sil + jne .Lodd +1: testb $2, %sil + je 2f + source + movw (%rdi), %bx + dest + movw %bx, (%rsi) + leaq 2(%rdi), %rdi + subq $2, %rcx + leaq 2(%rsi), %rsi + addq %rbx, %rax +2: testb $4, %sil + je .Laligned + source + movl (%rdi), %ebx + dest + movl %ebx, (%rsi) + leaq 4(%rdi), %rdi + subq $4, %rcx + leaq 4(%rsi), %rsi + addq %rbx, %rax + jmp .Laligned + +.Lodd: + source + movb (%rdi), %bl + dest + movb %bl, (%rsi) + leaq 1(%rdi), %rdi + leaq 1(%rsi), %rsi + /* decrement, set MSB */ + leaq -1(%rcx, %rcx), %rcx + rorq $1, %rcx + shll $8, %ebx + addq %rbx, %rax + jmp 1b + +.Lwas_odd: + roll $8, %eax + jmp .Lout - /* Exception handlers. Very simple, zeroing is done in the wrappers */ -.Lbad_source: - movq (%rsp), %rax - testq %rax, %rax - jz .Lende - movl $-EFAULT, (%rax) - jmp .Lende - -.Lbad_dest: - movq 8(%rsp), %rax - testq %rax, %rax - jz .Lende - movl $-EFAULT, (%rax) - jmp .Lende + /* Exception: just return 0 */ +.Lfault: + xorl %eax, %eax + jmp .Lout SYM_FUNC_END(csum_partial_copy_generic) diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c index ee63d7576fd2..189344924a2b 100644 --- a/arch/x86/lib/csum-wrappers_64.c +++ b/arch/x86/lib/csum-wrappers_64.c @@ -21,52 +21,16 @@ * src and dst are best aligned to 64bits. */ __wsum -csum_and_copy_from_user(const void __user *src, void *dst, - int len, __wsum isum, int *errp) +csum_and_copy_from_user(const void __user *src, void *dst, int len) { - might_sleep(); - *errp = 0; + __wsum sum; + might_sleep(); if (!user_access_begin(src, len)) - goto out_err; - - /* - * Why 6, not 7? To handle odd addresses aligned we - * would need to do considerable complications to fix the - * checksum which is defined as an 16bit accumulator. The - * fix alignment code is primarily for performance - * compatibility with 32bit and that will handle odd - * addresses slowly too. - */ - if (unlikely((unsigned long)src & 6)) { - while (((unsigned long)src & 6) && len >= 2) { - __u16 val16; - - unsafe_get_user(val16, (const __u16 __user *)src, out); - - *(__u16 *)dst = val16; - isum = (__force __wsum)add32_with_carry( - (__force unsigned)isum, val16); - src += 2; - dst += 2; - len -= 2; - } - } - isum = csum_partial_copy_generic((__force const void *)src, - dst, len, isum, errp, NULL); - user_access_end(); - if (unlikely(*errp)) - goto out_err; - - return isum; - -out: + return 0; + sum = csum_partial_copy_generic((__force const void *)src, dst, len); user_access_end(); -out_err: - *errp = -EFAULT; - memset(dst, 0, len); - - return isum; + return sum; } EXPORT_SYMBOL(csum_and_copy_from_user); @@ -82,40 +46,16 @@ EXPORT_SYMBOL(csum_and_copy_from_user); * src and dst are best aligned to 64bits. */ __wsum -csum_and_copy_to_user(const void *src, void __user *dst, - int len, __wsum isum, int *errp) +csum_and_copy_to_user(const void *src, void __user *dst, int len) { - __wsum ret; + __wsum sum; might_sleep(); - - if (!user_access_begin(dst, len)) { - *errp = -EFAULT; + if (!user_access_begin(dst, len)) return 0; - } - - if (unlikely((unsigned long)dst & 6)) { - while (((unsigned long)dst & 6) && len >= 2) { - __u16 val16 = *(__u16 *)src; - - isum = (__force __wsum)add32_with_carry( - (__force unsigned)isum, val16); - unsafe_put_user(val16, (__u16 __user *)dst, out); - src += 2; - dst += 2; - len -= 2; - } - } - - *errp = 0; - ret = csum_partial_copy_generic(src, (void __force *)dst, - len, isum, NULL, errp); - user_access_end(); - return ret; -out: + sum = csum_partial_copy_generic(src, (void __force *)dst, len); user_access_end(); - *errp = -EFAULT; - return isum; + return sum; } EXPORT_SYMBOL(csum_and_copy_to_user); @@ -129,9 +69,9 @@ EXPORT_SYMBOL(csum_and_copy_to_user); * Returns an 32bit unfolded checksum of the buffer. */ __wsum -csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) +csum_partial_copy_nocheck(const void *src, void *dst, int len) { - return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL); + return csum_partial_copy_generic(src, dst, len); } EXPORT_SYMBOL(csum_partial_copy_nocheck); diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S index c8a85b512796..2cd902e06062 100644 --- a/arch/x86/lib/getuser.S +++ b/arch/x86/lib/getuser.S @@ -35,6 +35,8 @@ #include <asm/smap.h> #include <asm/export.h> +#define ASM_BARRIER_NOSPEC ALTERNATIVE "", "lfence", X86_FEATURE_LFENCE_RDTSC + .text SYM_FUNC_START(__get_user_1) mov PER_CPU_VAR(current_task), %_ASM_DX @@ -114,6 +116,52 @@ SYM_FUNC_START(__get_user_8) SYM_FUNC_END(__get_user_8) EXPORT_SYMBOL(__get_user_8) +/* .. and the same for __get_user, just without the range checks */ +SYM_FUNC_START(__get_user_nocheck_1) + ASM_STAC + ASM_BARRIER_NOSPEC +6: movzbl (%_ASM_AX),%edx + xor %eax,%eax + ASM_CLAC + ret +SYM_FUNC_END(__get_user_nocheck_1) +EXPORT_SYMBOL(__get_user_nocheck_1) + +SYM_FUNC_START(__get_user_nocheck_2) + ASM_STAC + ASM_BARRIER_NOSPEC +7: movzwl (%_ASM_AX),%edx + xor %eax,%eax + ASM_CLAC + ret +SYM_FUNC_END(__get_user_nocheck_2) +EXPORT_SYMBOL(__get_user_nocheck_2) + +SYM_FUNC_START(__get_user_nocheck_4) + ASM_STAC + ASM_BARRIER_NOSPEC +8: movl (%_ASM_AX),%edx + xor %eax,%eax + ASM_CLAC + ret +SYM_FUNC_END(__get_user_nocheck_4) +EXPORT_SYMBOL(__get_user_nocheck_4) + +SYM_FUNC_START(__get_user_nocheck_8) + ASM_STAC + ASM_BARRIER_NOSPEC +#ifdef CONFIG_X86_64 +9: movq (%_ASM_AX),%rdx +#else +9: movl (%_ASM_AX),%edx +10: movl 4(%_ASM_AX),%ecx +#endif + xor %eax,%eax + ASM_CLAC + ret +SYM_FUNC_END(__get_user_nocheck_8) +EXPORT_SYMBOL(__get_user_nocheck_8) + SYM_CODE_START_LOCAL(.Lbad_get_user_clac) ASM_CLAC @@ -134,6 +182,7 @@ bad_get_user_8: SYM_CODE_END(.Lbad_get_user_8_clac) #endif +/* get_user */ _ASM_EXTABLE_UA(1b, .Lbad_get_user_clac) _ASM_EXTABLE_UA(2b, .Lbad_get_user_clac) _ASM_EXTABLE_UA(3b, .Lbad_get_user_clac) @@ -143,3 +192,14 @@ SYM_CODE_END(.Lbad_get_user_8_clac) _ASM_EXTABLE_UA(4b, .Lbad_get_user_8_clac) _ASM_EXTABLE_UA(5b, .Lbad_get_user_8_clac) #endif + +/* __get_user */ + _ASM_EXTABLE_UA(6b, .Lbad_get_user_clac) + _ASM_EXTABLE_UA(7b, .Lbad_get_user_clac) + _ASM_EXTABLE_UA(8b, .Lbad_get_user_clac) +#ifdef CONFIG_X86_64 + _ASM_EXTABLE_UA(9b, .Lbad_get_user_clac) +#else + _ASM_EXTABLE_UA(9b, .Lbad_get_user_8_clac) + _ASM_EXTABLE_UA(10b, .Lbad_get_user_8_clac) +#endif diff --git a/arch/x86/lib/insn-eval.c b/arch/x86/lib/insn-eval.c index 5e69603ff63f..58f7fb95c7f4 100644 --- a/arch/x86/lib/insn-eval.c +++ b/arch/x86/lib/insn-eval.c @@ -20,6 +20,7 @@ enum reg_type { REG_TYPE_RM = 0, + REG_TYPE_REG, REG_TYPE_INDEX, REG_TYPE_BASE, }; @@ -53,6 +54,30 @@ static bool is_string_insn(struct insn *insn) } /** + * insn_has_rep_prefix() - Determine if instruction has a REP prefix + * @insn: Instruction containing the prefix to inspect + * + * Returns: + * + * true if the instruction has a REP prefix, false if not. + */ +bool insn_has_rep_prefix(struct insn *insn) +{ + int i; + + insn_get_prefixes(insn); + + for (i = 0; i < insn->prefixes.nbytes; i++) { + insn_byte_t p = insn->prefixes.bytes[i]; + + if (p == 0xf2 || p == 0xf3) + return true; + } + + return false; +} + +/** * get_seg_reg_override_idx() - obtain segment register override index * @insn: Valid instruction with segment override prefixes * @@ -439,6 +464,13 @@ static int get_reg_offset(struct insn *insn, struct pt_regs *regs, regno += 8; break; + case REG_TYPE_REG: + regno = X86_MODRM_REG(insn->modrm.value); + + if (X86_REX_R(insn->rex_prefix.value)) + regno += 8; + break; + case REG_TYPE_INDEX: regno = X86_SIB_INDEX(insn->sib.value); if (X86_REX_X(insn->rex_prefix.value)) @@ -808,6 +840,21 @@ int insn_get_modrm_rm_off(struct insn *insn, struct pt_regs *regs) } /** + * insn_get_modrm_reg_off() - Obtain register in reg part of the ModRM byte + * @insn: Instruction containing the ModRM byte + * @regs: Register values as seen when entering kernel mode + * + * Returns: + * + * The register indicated by the reg part of the ModRM byte. The + * register is obtained as an offset from the base of pt_regs. + */ +int insn_get_modrm_reg_off(struct insn *insn, struct pt_regs *regs) +{ + return get_reg_offset(insn, regs, REG_TYPE_REG); +} + +/** * get_seg_base_limit() - obtain base address and limit of a segment * @insn: Instruction. Must be valid. * @regs: Register values as seen when entering kernel mode @@ -1367,3 +1414,86 @@ void __user *insn_get_addr_ref(struct insn *insn, struct pt_regs *regs) return (void __user *)-1L; } } + +/** + * insn_fetch_from_user() - Copy instruction bytes from user-space memory + * @regs: Structure with register values as seen when entering kernel mode + * @buf: Array to store the fetched instruction + * + * Gets the linear address of the instruction and copies the instruction bytes + * to the buf. + * + * Returns: + * + * Number of instruction bytes copied. + * + * 0 if nothing was copied. + */ +int insn_fetch_from_user(struct pt_regs *regs, unsigned char buf[MAX_INSN_SIZE]) +{ + unsigned long seg_base = 0; + int not_copied; + + /* + * If not in user-space long mode, a custom code segment could be in + * use. This is true in protected mode (if the process defined a local + * descriptor table), or virtual-8086 mode. In most of the cases + * seg_base will be zero as in USER_CS. + */ + if (!user_64bit_mode(regs)) { + seg_base = insn_get_seg_base(regs, INAT_SEG_REG_CS); + if (seg_base == -1L) + return 0; + } + + + not_copied = copy_from_user(buf, (void __user *)(seg_base + regs->ip), + MAX_INSN_SIZE); + + return MAX_INSN_SIZE - not_copied; +} + +/** + * insn_decode() - Decode an instruction + * @insn: Structure to store decoded instruction + * @regs: Structure with register values as seen when entering kernel mode + * @buf: Buffer containing the instruction bytes + * @buf_size: Number of instruction bytes available in buf + * + * Decodes the instruction provided in buf and stores the decoding results in + * insn. Also determines the correct address and operand sizes. + * + * Returns: + * + * True if instruction was decoded, False otherwise. + */ +bool insn_decode(struct insn *insn, struct pt_regs *regs, + unsigned char buf[MAX_INSN_SIZE], int buf_size) +{ + int seg_defs; + + insn_init(insn, buf, buf_size, user_64bit_mode(regs)); + + /* + * Override the default operand and address sizes with what is specified + * in the code segment descriptor. The instruction decoder only sets + * the address size it to either 4 or 8 address bytes and does nothing + * for the operand bytes. This OK for most of the cases, but we could + * have special cases where, for instance, a 16-bit code segment + * descriptor is used. + * If there is an address override prefix, the instruction decoder + * correctly updates these values, even for 16-bit defaults. + */ + seg_defs = insn_get_code_seg_params(regs); + if (seg_defs == -EINVAL) + return false; + + insn->addr_bytes = INSN_CODE_SEG_ADDR_SZ(seg_defs); + insn->opnd_bytes = INSN_CODE_SEG_OPND_SZ(seg_defs); + + insn_get_length(insn); + if (buf_size < insn->length) + return false; + + return true; +} diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S index bbcc05bcefad..037faac46b0c 100644 --- a/arch/x86/lib/memcpy_64.S +++ b/arch/x86/lib/memcpy_64.S @@ -4,7 +4,6 @@ #include <linux/linkage.h> #include <asm/errno.h> #include <asm/cpufeatures.h> -#include <asm/mcsafe_test.h> #include <asm/alternative-asm.h> #include <asm/export.h> @@ -187,117 +186,3 @@ SYM_FUNC_START_LOCAL(memcpy_orig) SYM_FUNC_END(memcpy_orig) .popsection - -#ifndef CONFIG_UML - -MCSAFE_TEST_CTL - -/* - * __memcpy_mcsafe - memory copy with machine check exception handling - * Note that we only catch machine checks when reading the source addresses. - * Writes to target are posted and don't generate machine checks. - */ -SYM_FUNC_START(__memcpy_mcsafe) - cmpl $8, %edx - /* Less than 8 bytes? Go to byte copy loop */ - jb .L_no_whole_words - - /* Check for bad alignment of source */ - testl $7, %esi - /* Already aligned */ - jz .L_8byte_aligned - - /* Copy one byte at a time until source is 8-byte aligned */ - movl %esi, %ecx - andl $7, %ecx - subl $8, %ecx - negl %ecx - subl %ecx, %edx -.L_read_leading_bytes: - movb (%rsi), %al - MCSAFE_TEST_SRC %rsi 1 .E_leading_bytes - MCSAFE_TEST_DST %rdi 1 .E_leading_bytes -.L_write_leading_bytes: - movb %al, (%rdi) - incq %rsi - incq %rdi - decl %ecx - jnz .L_read_leading_bytes - -.L_8byte_aligned: - movl %edx, %ecx - andl $7, %edx - shrl $3, %ecx - jz .L_no_whole_words - -.L_read_words: - movq (%rsi), %r8 - MCSAFE_TEST_SRC %rsi 8 .E_read_words - MCSAFE_TEST_DST %rdi 8 .E_write_words -.L_write_words: - movq %r8, (%rdi) - addq $8, %rsi - addq $8, %rdi - decl %ecx - jnz .L_read_words - - /* Any trailing bytes? */ -.L_no_whole_words: - andl %edx, %edx - jz .L_done_memcpy_trap - - /* Copy trailing bytes */ - movl %edx, %ecx -.L_read_trailing_bytes: - movb (%rsi), %al - MCSAFE_TEST_SRC %rsi 1 .E_trailing_bytes - MCSAFE_TEST_DST %rdi 1 .E_trailing_bytes -.L_write_trailing_bytes: - movb %al, (%rdi) - incq %rsi - incq %rdi - decl %ecx - jnz .L_read_trailing_bytes - - /* Copy successful. Return zero */ -.L_done_memcpy_trap: - xorl %eax, %eax -.L_done: - ret -SYM_FUNC_END(__memcpy_mcsafe) -EXPORT_SYMBOL_GPL(__memcpy_mcsafe) - - .section .fixup, "ax" - /* - * Return number of bytes not copied for any failure. Note that - * there is no "tail" handling since the source buffer is 8-byte - * aligned and poison is cacheline aligned. - */ -.E_read_words: - shll $3, %ecx -.E_leading_bytes: - addl %edx, %ecx -.E_trailing_bytes: - mov %ecx, %eax - jmp .L_done - - /* - * For write fault handling, given the destination is unaligned, - * we handle faults on multi-byte writes with a byte-by-byte - * copy up to the write-protected page. - */ -.E_write_words: - shll $3, %ecx - addl %edx, %ecx - movl %ecx, %edx - jmp mcsafe_handle_tail - - .previous - - _ASM_EXTABLE_FAULT(.L_read_leading_bytes, .E_leading_bytes) - _ASM_EXTABLE_FAULT(.L_read_words, .E_read_words) - _ASM_EXTABLE_FAULT(.L_read_trailing_bytes, .E_trailing_bytes) - _ASM_EXTABLE(.L_write_leading_bytes, .E_leading_bytes) - _ASM_EXTABLE(.L_write_words, .E_write_words) - _ASM_EXTABLE(.L_write_trailing_bytes, .E_trailing_bytes) -#endif diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S index 7c7c92db8497..b34a17763f28 100644 --- a/arch/x86/lib/putuser.S +++ b/arch/x86/lib/putuser.S @@ -25,7 +25,9 @@ * Inputs: %eax[:%edx] contains the data * %ecx contains the address * - * Outputs: %eax is error code (0 or -EFAULT) + * Outputs: %ecx is error code (0 or -EFAULT) + * + * Clobbers: %ebx needed for task pointer * * These functions should not modify any other registers, * as they get called from within inline assembly. @@ -38,13 +40,15 @@ SYM_FUNC_START(__put_user_1) ENTER cmp TASK_addr_limit(%_ASM_BX),%_ASM_CX jae .Lbad_put_user +SYM_INNER_LABEL(__put_user_nocheck_1, SYM_L_GLOBAL) ASM_STAC 1: movb %al,(%_ASM_CX) - xor %eax,%eax + xor %ecx,%ecx ASM_CLAC ret SYM_FUNC_END(__put_user_1) EXPORT_SYMBOL(__put_user_1) +EXPORT_SYMBOL(__put_user_nocheck_1) SYM_FUNC_START(__put_user_2) ENTER @@ -52,13 +56,15 @@ SYM_FUNC_START(__put_user_2) sub $1,%_ASM_BX cmp %_ASM_BX,%_ASM_CX jae .Lbad_put_user +SYM_INNER_LABEL(__put_user_nocheck_2, SYM_L_GLOBAL) ASM_STAC 2: movw %ax,(%_ASM_CX) - xor %eax,%eax + xor %ecx,%ecx ASM_CLAC ret SYM_FUNC_END(__put_user_2) EXPORT_SYMBOL(__put_user_2) +EXPORT_SYMBOL(__put_user_nocheck_2) SYM_FUNC_START(__put_user_4) ENTER @@ -66,13 +72,15 @@ SYM_FUNC_START(__put_user_4) sub $3,%_ASM_BX cmp %_ASM_BX,%_ASM_CX jae .Lbad_put_user +SYM_INNER_LABEL(__put_user_nocheck_4, SYM_L_GLOBAL) ASM_STAC 3: movl %eax,(%_ASM_CX) - xor %eax,%eax + xor %ecx,%ecx ASM_CLAC ret SYM_FUNC_END(__put_user_4) EXPORT_SYMBOL(__put_user_4) +EXPORT_SYMBOL(__put_user_nocheck_4) SYM_FUNC_START(__put_user_8) ENTER @@ -80,21 +88,23 @@ SYM_FUNC_START(__put_user_8) sub $7,%_ASM_BX cmp %_ASM_BX,%_ASM_CX jae .Lbad_put_user +SYM_INNER_LABEL(__put_user_nocheck_8, SYM_L_GLOBAL) ASM_STAC 4: mov %_ASM_AX,(%_ASM_CX) #ifdef CONFIG_X86_32 5: movl %edx,4(%_ASM_CX) #endif - xor %eax,%eax + xor %ecx,%ecx ASM_CLAC RET SYM_FUNC_END(__put_user_8) EXPORT_SYMBOL(__put_user_8) +EXPORT_SYMBOL(__put_user_nocheck_8) SYM_CODE_START_LOCAL(.Lbad_put_user_clac) ASM_CLAC .Lbad_put_user: - movl $-EFAULT,%eax + movl $-EFAULT,%ecx RET SYM_CODE_END(.Lbad_put_user_clac) diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c index 1847e993ac63..508c81e97ab1 100644 --- a/arch/x86/lib/usercopy_64.c +++ b/arch/x86/lib/usercopy_64.c @@ -56,27 +56,6 @@ unsigned long clear_user(void __user *to, unsigned long n) } EXPORT_SYMBOL(clear_user); -/* - * Similar to copy_user_handle_tail, probe for the write fault point, - * but reuse __memcpy_mcsafe in case a new read error is encountered. - * clac() is handled in _copy_to_iter_mcsafe(). - */ -__visible notrace unsigned long -mcsafe_handle_tail(char *to, char *from, unsigned len) -{ - for (; len; --len, to++, from++) { - /* - * Call the assembly routine back directly since - * memcpy_mcsafe() may silently fallback to memcpy. - */ - unsigned long rem = __memcpy_mcsafe(to, from, 1); - - if (rem) - break; - } - return len; -} - #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE /** * clean_cache_range - write back a cache range with CLWB diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c index 770b613790b3..f5e1e60c9095 100644 --- a/arch/x86/mm/cpu_entry_area.c +++ b/arch/x86/mm/cpu_entry_area.c @@ -21,7 +21,8 @@ DEFINE_PER_CPU(struct cea_exception_stacks*, cea_exception_stacks); DECLARE_PER_CPU_PAGE_ALIGNED(struct doublefault_stack, doublefault_stack); #endif -struct cpu_entry_area *get_cpu_entry_area(int cpu) +/* Is called from entry code, so must be noinstr */ +noinstr struct cpu_entry_area *get_cpu_entry_area(int cpu) { unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE; BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0); diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c index 1d6cb07f4f86..b93d6cd08a7f 100644 --- a/arch/x86/mm/extable.c +++ b/arch/x86/mm/extable.c @@ -5,6 +5,7 @@ #include <xen/xen.h> #include <asm/fpu/internal.h> +#include <asm/sev-es.h> #include <asm/traps.h> #include <asm/kdebug.h> @@ -80,6 +81,18 @@ __visible bool ex_handler_uaccess(const struct exception_table_entry *fixup, } EXPORT_SYMBOL(ex_handler_uaccess); +__visible bool ex_handler_copy(const struct exception_table_entry *fixup, + struct pt_regs *regs, int trapnr, + unsigned long error_code, + unsigned long fault_addr) +{ + WARN_ONCE(trapnr == X86_TRAP_GP, "General protection fault in user access. Non-canonical address?"); + regs->ip = ex_fixup_addr(fixup); + regs->ax = trapnr; + return true; +} +EXPORT_SYMBOL(ex_handler_copy); + __visible bool ex_handler_rdmsr_unsafe(const struct exception_table_entry *fixup, struct pt_regs *regs, int trapnr, unsigned long error_code, @@ -125,17 +138,21 @@ __visible bool ex_handler_clear_fs(const struct exception_table_entry *fixup, } EXPORT_SYMBOL(ex_handler_clear_fs); -__visible bool ex_has_fault_handler(unsigned long ip) +enum handler_type ex_get_fault_handler_type(unsigned long ip) { const struct exception_table_entry *e; ex_handler_t handler; e = search_exception_tables(ip); if (!e) - return false; + return EX_HANDLER_NONE; handler = ex_fixup_handler(e); - - return handler == ex_handler_fault; + if (handler == ex_handler_fault) + return EX_HANDLER_FAULT; + else if (handler == ex_handler_uaccess || handler == ex_handler_copy) + return EX_HANDLER_UACCESS; + else + return EX_HANDLER_OTHER; } int fixup_exception(struct pt_regs *regs, int trapnr, unsigned long error_code, diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 6e3e8a124903..42606a04ae85 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -1128,7 +1128,7 @@ access_error(unsigned long error_code, struct vm_area_struct *vma) return 0; } -static int fault_in_kernel_space(unsigned long address) +bool fault_in_kernel_space(unsigned long address) { /* * On 64-bit systems, the vsyscall page is at an address above diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index a4ac13cc3fdc..b5a3fa4033d3 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -217,11 +217,6 @@ static void sync_global_pgds(unsigned long start, unsigned long end) sync_global_pgds_l4(start, end); } -void arch_sync_kernel_mappings(unsigned long start, unsigned long end) -{ - sync_global_pgds(start, end); -} - /* * NOTE: This function is marked __ref because it calls __init function * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0. @@ -1257,14 +1252,19 @@ static void __init preallocate_vmalloc_pages(void) if (!p4d) goto failed; - /* - * With 5-level paging the P4D level is not folded. So the PGDs - * are now populated and there is no need to walk down to the - * PUD level. - */ if (pgtable_l5_enabled()) continue; + /* + * The goal here is to allocate all possibly required + * hardware page tables pointed to by the top hardware + * level. + * + * On 4-level systems, the P4D layer is folded away and + * the above code does no preallocation. Below, go down + * to the pud _software_ level to ensure the second + * hardware level is allocated on 4-level systems too. + */ lvl = "pud"; pud = pud_alloc(&init_mm, p4d, addr); if (!pud) diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index 9f1177edc2e7..ebb7edc8bc0a 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -38,6 +38,7 @@ * section is later cleared. */ u64 sme_me_mask __section(.data) = 0; +u64 sev_status __section(.data) = 0; EXPORT_SYMBOL(sme_me_mask); DEFINE_STATIC_KEY_FALSE(sev_enable_key); EXPORT_SYMBOL_GPL(sev_enable_key); @@ -347,7 +348,13 @@ bool sme_active(void) bool sev_active(void) { - return sme_me_mask && sev_enabled; + return sev_status & MSR_AMD64_SEV_ENABLED; +} + +/* Needs to be called from non-instrumentable code */ +bool noinstr sev_es_active(void) +{ + return sev_status & MSR_AMD64_SEV_ES_ENABLED; } /* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */ @@ -400,6 +407,31 @@ void __init mem_encrypt_free_decrypted_mem(void) free_init_pages("unused decrypted", vaddr, vaddr_end); } +static void print_mem_encrypt_feature_info(void) +{ + pr_info("AMD Memory Encryption Features active:"); + + /* Secure Memory Encryption */ + if (sme_active()) { + /* + * SME is mutually exclusive with any of the SEV + * features below. + */ + pr_cont(" SME\n"); + return; + } + + /* Secure Encrypted Virtualization */ + if (sev_active()) + pr_cont(" SEV"); + + /* Encrypted Register State */ + if (sev_es_active()) + pr_cont(" SEV-ES"); + + pr_cont("\n"); +} + /* Architecture __weak replacement functions */ void __init mem_encrypt_init(void) { @@ -415,8 +447,6 @@ void __init mem_encrypt_init(void) if (sev_active()) static_branch_enable(&sev_enable_key); - pr_info("AMD %s active\n", - sev_active() ? "Secure Encrypted Virtualization (SEV)" - : "Secure Memory Encryption (SME)"); + print_mem_encrypt_feature_info(); } diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c index e2b0e2ac07bb..68d75379e06a 100644 --- a/arch/x86/mm/mem_encrypt_identity.c +++ b/arch/x86/mm/mem_encrypt_identity.c @@ -540,6 +540,9 @@ void __init sme_enable(struct boot_params *bp) if (!(msr & MSR_AMD64_SEV_ENABLED)) return; + /* Save SEV_STATUS to avoid reading MSR again */ + sev_status = msr; + /* SEV state cannot be controlled by a command line option */ sme_me_mask = me_mask; sev_enabled = true; diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index aa76ec2d359b..44148691d78b 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c @@ -37,14 +37,12 @@ static __init int numa_setup(char *opt) return -EINVAL; if (!strncmp(opt, "off", 3)) numa_off = 1; -#ifdef CONFIG_NUMA_EMU if (!strncmp(opt, "fake=", 5)) - numa_emu_cmdline(opt + 5); -#endif -#ifdef CONFIG_ACPI_NUMA + return numa_emu_cmdline(opt + 5); if (!strncmp(opt, "noacpi", 6)) - acpi_numa = -1; -#endif + disable_srat(); + if (!strncmp(opt, "nohmat", 6)) + disable_hmat(); return 0; } early_param("numa", numa_setup); @@ -516,7 +514,7 @@ static void __init numa_clear_kernel_node_hotplug(void) * memory ranges, because quirks such as trim_snb_memory() * reserve specific pages for Sandy Bridge graphics. ] */ - for_each_memblock(reserved, mb_region) { + for_each_reserved_mem_region(mb_region) { int nid = memblock_get_region_node(mb_region); if (nid != MAX_NUMNODES) @@ -748,6 +746,27 @@ static void __init init_memory_less_node(int nid) } /* + * A node may exist which has one or more Generic Initiators but no CPUs and no + * memory. + * + * This function must be called after init_cpu_to_node(), to ensure that any + * memoryless CPU nodes have already been brought online, and before the + * node_data[nid] is needed for zone list setup in build_all_zonelists(). + * + * When this function is called, any nodes containing either memory and/or CPUs + * will already be online and there is no need to do anything extra, even if + * they also contain one or more Generic Initiators. + */ +void __init init_gi_nodes(void) +{ + int nid; + + for_each_node_state(nid, N_GENERIC_INITIATOR) + if (!node_online(nid)) + init_memory_less_node(nid); +} + +/* * Setup early cpu_to_node. * * Populate cpu_to_node[] only if x86_cpu_to_apicid[], @@ -919,7 +938,6 @@ int phys_to_target_node(phys_addr_t start) return meminfo_to_nid(&numa_reserved_meminfo, start); } -EXPORT_SYMBOL_GPL(phys_to_target_node); int memory_add_physaddr_to_nid(u64 start) { diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c index 683cd12f4793..87d77cc52f86 100644 --- a/arch/x86/mm/numa_emulation.c +++ b/arch/x86/mm/numa_emulation.c @@ -13,9 +13,10 @@ static int emu_nid_to_phys[MAX_NUMNODES]; static char *emu_cmdline __initdata; -void __init numa_emu_cmdline(char *str) +int __init numa_emu_cmdline(char *str) { emu_cmdline = str; + return 0; } static int __init emu_find_memblk_by_nid(int nid, const struct numa_meminfo *mi) diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c index d1b2a889f035..40baa90e74f4 100644 --- a/arch/x86/mm/pat/set_memory.c +++ b/arch/x86/mm/pat/set_memory.c @@ -1999,7 +1999,7 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc) /* * Before changing the encryption attribute, we need to flush caches. */ - cpa_flush(&cpa, 1); + cpa_flush(&cpa, !this_cpu_has(X86_FEATURE_SME_COHERENT)); ret = __change_page_attr_set_clr(&cpa, 1); diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 0951b47e64c1..11666ba19b62 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -14,7 +14,6 @@ #include <asm/nospec-branch.h> #include <asm/cache.h> #include <asm/apic.h> -#include <asm/uv/uv.h> #include "mm_internal.h" @@ -800,29 +799,6 @@ STATIC_NOPV void native_flush_tlb_others(const struct cpumask *cpumask, trace_tlb_flush(TLB_REMOTE_SEND_IPI, (info->end - info->start) >> PAGE_SHIFT); - if (is_uv_system()) { - /* - * This whole special case is confused. UV has a "Broadcast - * Assist Unit", which seems to be a fancy way to send IPIs. - * Back when x86 used an explicit TLB flush IPI, UV was - * optimized to use its own mechanism. These days, x86 uses - * smp_call_function_many(), but UV still uses a manual IPI, - * and that IPI's action is out of date -- it does a manual - * flush instead of calling flush_tlb_func_remote(). This - * means that the percpu tlb_gen variables won't be updated - * and we'll do pointless flushes on future context switches. - * - * Rather than hooking native_flush_tlb_others() here, I think - * that UV should be updated so that smp_call_function_many(), - * etc, are optimal on UV. - */ - cpumask = uv_flush_tlb_others(cpumask, info); - if (cpumask) - smp_call_function_many(cpumask, flush_tlb_func_remote, - (void *)info, 1); - return; - } - /* * If no page tables were freed, we can skip sending IPIs to * CPUs in lazy TLB mode. They will flush the CPU themselves diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 42b6709e6dc7..796506dcfc42 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -221,14 +221,48 @@ struct jit_context { /* Number of bytes emit_patch() needs to generate instructions */ #define X86_PATCH_SIZE 5 +/* Number of bytes that will be skipped on tailcall */ +#define X86_TAIL_CALL_OFFSET 11 -#define PROLOGUE_SIZE 25 +static void push_callee_regs(u8 **pprog, bool *callee_regs_used) +{ + u8 *prog = *pprog; + int cnt = 0; + + if (callee_regs_used[0]) + EMIT1(0x53); /* push rbx */ + if (callee_regs_used[1]) + EMIT2(0x41, 0x55); /* push r13 */ + if (callee_regs_used[2]) + EMIT2(0x41, 0x56); /* push r14 */ + if (callee_regs_used[3]) + EMIT2(0x41, 0x57); /* push r15 */ + *pprog = prog; +} + +static void pop_callee_regs(u8 **pprog, bool *callee_regs_used) +{ + u8 *prog = *pprog; + int cnt = 0; + + if (callee_regs_used[3]) + EMIT2(0x41, 0x5F); /* pop r15 */ + if (callee_regs_used[2]) + EMIT2(0x41, 0x5E); /* pop r14 */ + if (callee_regs_used[1]) + EMIT2(0x41, 0x5D); /* pop r13 */ + if (callee_regs_used[0]) + EMIT1(0x5B); /* pop rbx */ + *pprog = prog; +} /* - * Emit x86-64 prologue code for BPF program and check its size. - * bpf_tail_call helper will skip it while jumping into another program + * Emit x86-64 prologue code for BPF program. + * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes + * while jumping to another program */ -static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf) +static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf, + bool tail_call_reachable, bool is_subprog) { u8 *prog = *pprog; int cnt = X86_PATCH_SIZE; @@ -238,19 +272,19 @@ static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf) */ memcpy(prog, ideal_nops[NOP_ATOMIC5], cnt); prog += cnt; + if (!ebpf_from_cbpf) { + if (tail_call_reachable && !is_subprog) + EMIT2(0x31, 0xC0); /* xor eax, eax */ + else + EMIT2(0x66, 0x90); /* nop2 */ + } EMIT1(0x55); /* push rbp */ EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */ /* sub rsp, rounded_stack_depth */ - EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8)); - EMIT1(0x53); /* push rbx */ - EMIT2(0x41, 0x55); /* push r13 */ - EMIT2(0x41, 0x56); /* push r14 */ - EMIT2(0x41, 0x57); /* push r15 */ - if (!ebpf_from_cbpf) { - /* zero init tail_call_cnt */ - EMIT2(0x6a, 0x00); - BUILD_BUG_ON(cnt != PROLOGUE_SIZE); - } + if (stack_depth) + EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8)); + if (tail_call_reachable) + EMIT1(0x50); /* push rax */ *pprog = prog; } @@ -314,13 +348,14 @@ static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, mutex_lock(&text_mutex); if (memcmp(ip, old_insn, X86_PATCH_SIZE)) goto out; + ret = 1; if (memcmp(ip, new_insn, X86_PATCH_SIZE)) { if (text_live) text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL); else memcpy(ip, new_insn, X86_PATCH_SIZE); + ret = 0; } - ret = 0; out: mutex_unlock(&text_mutex); return ret; @@ -337,6 +372,22 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, return __bpf_arch_text_poke(ip, t, old_addr, new_addr, true); } +static int get_pop_bytes(bool *callee_regs_used) +{ + int bytes = 0; + + if (callee_regs_used[3]) + bytes += 2; + if (callee_regs_used[2]) + bytes += 2; + if (callee_regs_used[1]) + bytes += 2; + if (callee_regs_used[0]) + bytes += 1; + + return bytes; +} + /* * Generate the following code: * @@ -351,12 +402,32 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, * goto *(prog->bpf_func + prologue_size); * out: */ -static void emit_bpf_tail_call_indirect(u8 **pprog) +static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used, + u32 stack_depth) { + int tcc_off = -4 - round_up(stack_depth, 8); u8 *prog = *pprog; - int label1, label2, label3; + int pop_bytes = 0; + int off1 = 42; + int off2 = 31; + int off3 = 9; int cnt = 0; + /* count the additional bytes used for popping callee regs from stack + * that need to be taken into account for each of the offsets that + * are used for bailing out of the tail call + */ + pop_bytes = get_pop_bytes(callee_regs_used); + off1 += pop_bytes; + off2 += pop_bytes; + off3 += pop_bytes; + + if (stack_depth) { + off1 += 7; + off2 += 7; + off3 += 7; + } + /* * rdi - pointer to ctx * rsi - pointer to bpf_array @@ -370,72 +441,112 @@ static void emit_bpf_tail_call_indirect(u8 **pprog) EMIT2(0x89, 0xD2); /* mov edx, edx */ EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */ offsetof(struct bpf_array, map.max_entries)); -#define OFFSET1 (41 + RETPOLINE_RAX_BPF_JIT_SIZE) /* Number of bytes to jump */ +#define OFFSET1 (off1 + RETPOLINE_RCX_BPF_JIT_SIZE) /* Number of bytes to jump */ EMIT2(X86_JBE, OFFSET1); /* jbe out */ - label1 = cnt; /* * if (tail_call_cnt > MAX_TAIL_CALL_CNT) * goto out; */ - EMIT2_off32(0x8B, 0x85, -36 - MAX_BPF_STACK); /* mov eax, dword ptr [rbp - 548] */ + EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */ EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ -#define OFFSET2 (30 + RETPOLINE_RAX_BPF_JIT_SIZE) +#define OFFSET2 (off2 + RETPOLINE_RCX_BPF_JIT_SIZE) EMIT2(X86_JA, OFFSET2); /* ja out */ - label2 = cnt; EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ - EMIT2_off32(0x89, 0x85, -36 - MAX_BPF_STACK); /* mov dword ptr [rbp -548], eax */ + EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */ /* prog = array->ptrs[index]; */ - EMIT4_off32(0x48, 0x8B, 0x84, 0xD6, /* mov rax, [rsi + rdx * 8 + offsetof(...)] */ + EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6, /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */ offsetof(struct bpf_array, ptrs)); /* * if (prog == NULL) * goto out; */ - EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */ -#define OFFSET3 (8 + RETPOLINE_RAX_BPF_JIT_SIZE) + EMIT3(0x48, 0x85, 0xC9); /* test rcx,rcx */ +#define OFFSET3 (off3 + RETPOLINE_RCX_BPF_JIT_SIZE) EMIT2(X86_JE, OFFSET3); /* je out */ - label3 = cnt; - /* goto *(prog->bpf_func + prologue_size); */ - EMIT4(0x48, 0x8B, 0x40, /* mov rax, qword ptr [rax + 32] */ - offsetof(struct bpf_prog, bpf_func)); - EMIT4(0x48, 0x83, 0xC0, PROLOGUE_SIZE); /* add rax, prologue_size */ + *pprog = prog; + pop_callee_regs(pprog, callee_regs_used); + prog = *pprog; + + EMIT1(0x58); /* pop rax */ + if (stack_depth) + EMIT3_off32(0x48, 0x81, 0xC4, /* add rsp, sd */ + round_up(stack_depth, 8)); + /* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */ + EMIT4(0x48, 0x8B, 0x49, /* mov rcx, qword ptr [rcx + 32] */ + offsetof(struct bpf_prog, bpf_func)); + EMIT4(0x48, 0x83, 0xC1, /* add rcx, X86_TAIL_CALL_OFFSET */ + X86_TAIL_CALL_OFFSET); /* - * Wow we're ready to jump into next BPF program + * Now we're ready to jump into next BPF program * rdi == ctx (1st arg) - * rax == prog->bpf_func + prologue_size + * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET */ - RETPOLINE_RAX_BPF_JIT(); + RETPOLINE_RCX_BPF_JIT(); /* out: */ - BUILD_BUG_ON(cnt - label1 != OFFSET1); - BUILD_BUG_ON(cnt - label2 != OFFSET2); - BUILD_BUG_ON(cnt - label3 != OFFSET3); *pprog = prog; } static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke, - u8 **pprog, int addr, u8 *image) + u8 **pprog, int addr, u8 *image, + bool *callee_regs_used, u32 stack_depth) { + int tcc_off = -4 - round_up(stack_depth, 8); u8 *prog = *pprog; + int pop_bytes = 0; + int off1 = 20; + int poke_off; int cnt = 0; + /* count the additional bytes used for popping callee regs to stack + * that need to be taken into account for jump offset that is used for + * bailing out from of the tail call when limit is reached + */ + pop_bytes = get_pop_bytes(callee_regs_used); + off1 += pop_bytes; + + /* + * total bytes for: + * - nop5/ jmpq $off + * - pop callee regs + * - sub rsp, $val if depth > 0 + * - pop rax + */ + poke_off = X86_PATCH_SIZE + pop_bytes + 1; + if (stack_depth) { + poke_off += 7; + off1 += 7; + } + /* * if (tail_call_cnt > MAX_TAIL_CALL_CNT) * goto out; */ - EMIT2_off32(0x8B, 0x85, -36 - MAX_BPF_STACK); /* mov eax, dword ptr [rbp - 548] */ + EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */ EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ - EMIT2(X86_JA, 14); /* ja out */ + EMIT2(X86_JA, off1); /* ja out */ EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ - EMIT2_off32(0x89, 0x85, -36 - MAX_BPF_STACK); /* mov dword ptr [rbp -548], eax */ + EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */ - poke->ip = image + (addr - X86_PATCH_SIZE); - poke->adj_off = PROLOGUE_SIZE; + poke->tailcall_bypass = image + (addr - poke_off - X86_PATCH_SIZE); + poke->adj_off = X86_TAIL_CALL_OFFSET; + poke->tailcall_target = image + (addr - X86_PATCH_SIZE); + poke->bypass_addr = (u8 *)poke->tailcall_target + X86_PATCH_SIZE; + + emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE, + poke->tailcall_bypass); + + *pprog = prog; + pop_callee_regs(pprog, callee_regs_used); + prog = *pprog; + EMIT1(0x58); /* pop rax */ + if (stack_depth) + EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8)); memcpy(prog, ideal_nops[NOP_ATOMIC5], X86_PATCH_SIZE); prog += X86_PATCH_SIZE; @@ -453,7 +564,7 @@ static void bpf_tail_call_direct_fixup(struct bpf_prog *prog) for (i = 0; i < prog->aux->size_poke_tab; i++) { poke = &prog->aux->poke_tab[i]; - WARN_ON_ONCE(READ_ONCE(poke->ip_stable)); + WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable)); if (poke->reason != BPF_POKE_REASON_TAIL_CALL) continue; @@ -464,18 +575,25 @@ static void bpf_tail_call_direct_fixup(struct bpf_prog *prog) if (target) { /* Plain memcpy is used when image is not live yet * and still not locked as read-only. Once poke - * location is active (poke->ip_stable), any parallel - * bpf_arch_text_poke() might occur still on the - * read-write image until we finally locked it as - * read-only. Both modifications on the given image - * are under text_mutex to avoid interference. + * location is active (poke->tailcall_target_stable), + * any parallel bpf_arch_text_poke() might occur + * still on the read-write image until we finally + * locked it as read-only. Both modifications on + * the given image are under text_mutex to avoid + * interference. */ - ret = __bpf_arch_text_poke(poke->ip, BPF_MOD_JUMP, NULL, + ret = __bpf_arch_text_poke(poke->tailcall_target, + BPF_MOD_JUMP, NULL, (u8 *)target->bpf_func + poke->adj_off, false); BUG_ON(ret < 0); + ret = __bpf_arch_text_poke(poke->tailcall_bypass, + BPF_MOD_JUMP, + (u8 *)poke->tailcall_target + + X86_PATCH_SIZE, NULL, false); + BUG_ON(ret < 0); } - WRITE_ONCE(poke->ip_stable, true); + WRITE_ONCE(poke->tailcall_target_stable, true); mutex_unlock(&array->aux->poke_mutex); } } @@ -652,19 +770,49 @@ static bool ex_handler_bpf(const struct exception_table_entry *x, return true; } +static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt, + bool *regs_used, bool *tail_call_seen) +{ + int i; + + for (i = 1; i <= insn_cnt; i++, insn++) { + if (insn->code == (BPF_JMP | BPF_TAIL_CALL)) + *tail_call_seen = true; + if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6) + regs_used[0] = true; + if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7) + regs_used[1] = true; + if (insn->dst_reg == BPF_REG_8 || insn->src_reg == BPF_REG_8) + regs_used[2] = true; + if (insn->dst_reg == BPF_REG_9 || insn->src_reg == BPF_REG_9) + regs_used[3] = true; + } +} + static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, int oldproglen, struct jit_context *ctx) { + bool tail_call_reachable = bpf_prog->aux->tail_call_reachable; struct bpf_insn *insn = bpf_prog->insnsi; + bool callee_regs_used[4] = {}; int insn_cnt = bpf_prog->len; + bool tail_call_seen = false; bool seen_exit = false; u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY]; int i, cnt = 0, excnt = 0; int proglen = 0; u8 *prog = temp; + detect_reg_usage(insn, insn_cnt, callee_regs_used, + &tail_call_seen); + + /* tail call's presence in current prog implies it is reachable */ + tail_call_reachable |= tail_call_seen; + emit_prologue(&prog, bpf_prog->aux->stack_depth, - bpf_prog_was_classic(bpf_prog)); + bpf_prog_was_classic(bpf_prog), tail_call_reachable, + bpf_prog->aux->func_idx != 0); + push_callee_regs(&prog, callee_regs_used); addrs[0] = prog - temp; for (i = 1; i <= insn_cnt; i++, insn++) { @@ -1102,16 +1250,27 @@ xadd: if (is_imm8(insn->off)) /* call */ case BPF_JMP | BPF_CALL: func = (u8 *) __bpf_call_base + imm32; - if (!imm32 || emit_call(&prog, func, image + addrs[i - 1])) - return -EINVAL; + if (tail_call_reachable) { + EMIT3_off32(0x48, 0x8B, 0x85, + -(bpf_prog->aux->stack_depth + 8)); + if (!imm32 || emit_call(&prog, func, image + addrs[i - 1] + 7)) + return -EINVAL; + } else { + if (!imm32 || emit_call(&prog, func, image + addrs[i - 1])) + return -EINVAL; + } break; case BPF_JMP | BPF_TAIL_CALL: if (imm32) emit_bpf_tail_call_direct(&bpf_prog->aux->poke_tab[imm32 - 1], - &prog, addrs[i], image); + &prog, addrs[i], image, + callee_regs_used, + bpf_prog->aux->stack_depth); else - emit_bpf_tail_call_indirect(&prog); + emit_bpf_tail_call_indirect(&prog, + callee_regs_used, + bpf_prog->aux->stack_depth); break; /* cond jump */ @@ -1294,12 +1453,7 @@ emit_jmp: seen_exit = true; /* Update cleanup_addr */ ctx->cleanup_addr = proglen; - if (!bpf_prog_was_classic(bpf_prog)) - EMIT1(0x5B); /* get rid of tail_call_cnt */ - EMIT2(0x41, 0x5F); /* pop r15 */ - EMIT2(0x41, 0x5E); /* pop r14 */ - EMIT2(0x41, 0x5D); /* pop r13 */ - EMIT1(0x5B); /* pop rbx */ + pop_callee_regs(&prog, callee_regs_used); EMIT1(0xC9); /* leave */ EMIT1(0xC3); /* ret */ break; @@ -1379,10 +1533,15 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog, u8 *prog = *pprog; int cnt = 0; - if (emit_call(&prog, __bpf_prog_enter, prog)) - return -EINVAL; - /* remember prog start time returned by __bpf_prog_enter */ - emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0); + if (p->aux->sleepable) { + if (emit_call(&prog, __bpf_prog_enter_sleepable, prog)) + return -EINVAL; + } else { + if (emit_call(&prog, __bpf_prog_enter, prog)) + return -EINVAL; + /* remember prog start time returned by __bpf_prog_enter */ + emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0); + } /* arg1: lea rdi, [rbp - stack_size] */ EMIT4(0x48, 0x8D, 0x7D, -stack_size); @@ -1402,13 +1561,18 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog, if (mod_ret) emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); - /* arg1: mov rdi, progs[i] */ - emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, - (u32) (long) p); - /* arg2: mov rsi, rbx <- start time in nsec */ - emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6); - if (emit_call(&prog, __bpf_prog_exit, prog)) - return -EINVAL; + if (p->aux->sleepable) { + if (emit_call(&prog, __bpf_prog_exit_sleepable, prog)) + return -EINVAL; + } else { + /* arg1: mov rdi, progs[i] */ + emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, + (u32) (long) p); + /* arg2: mov rsi, rbx <- start time in nsec */ + emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6); + if (emit_call(&prog, __bpf_prog_exit, prog)) + return -EINVAL; + } *pprog = prog; return 0; diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index df1d95913d4e..3507f456fcd0 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c @@ -19,6 +19,7 @@ #include <asm/smp.h> #include <asm/pci_x86.h> #include <asm/setup.h> +#include <asm/irqdomain.h> unsigned int pci_probe = PCI_PROBE_BIOS | PCI_PROBE_CONF1 | PCI_PROBE_CONF2 | PCI_PROBE_MMCONF; @@ -633,8 +634,9 @@ static void set_dev_domain_options(struct pci_dev *pdev) int pcibios_add_device(struct pci_dev *dev) { - struct setup_data *data; struct pci_setup_rom *rom; + struct irq_domain *msidom; + struct setup_data *data; u64 pa_data; pa_data = boot_params.hdr.setup_data; @@ -661,6 +663,20 @@ int pcibios_add_device(struct pci_dev *dev) memunmap(data); } set_dev_domain_options(dev); + + /* + * Setup the initial MSI domain of the device. If the underlying + * bus has a PCI/MSI irqdomain associated use the bus domain, + * otherwise set the default domain. This ensures that special irq + * domains e.g. VMD are preserved. The default ensures initial + * operation if irq remapping is not active. If irq remapping is + * active it will overwrite the domain pointer when the device is + * associated to a remapping domain. + */ + msidom = dev_get_msi_domain(&dev->bus->dev); + if (!msidom) + msidom = x86_pci_msi_default_domain; + dev_set_msi_domain(&dev->dev, msidom); return 0; } diff --git a/arch/x86/pci/init.c b/arch/x86/pci/init.c index 5fc617edf108..00bfa1ebad6c 100644 --- a/arch/x86/pci/init.c +++ b/arch/x86/pci/init.c @@ -3,16 +3,17 @@ #include <linux/init.h> #include <asm/pci_x86.h> #include <asm/x86_init.h> +#include <asm/irqdomain.h> /* arch_initcall has too random ordering, so call the initializers in the right sequence from here. */ static __init int pci_arch_init(void) { -#ifdef CONFIG_PCI_DIRECT - int type = 0; + int type; + + x86_create_pci_msi_domain(); type = pci_direct_probe(); -#endif if (!(pci_probe & PCI_PROBE_NOEARLY)) pci_mmcfg_early_init(); @@ -20,18 +21,16 @@ static __init int pci_arch_init(void) if (x86_init.pci.arch_init && !x86_init.pci.arch_init()) return 0; -#ifdef CONFIG_PCI_BIOS pci_pcbios_init(); -#endif + /* * don't check for raw_pci_ops here because we want pcbios as last * fallback, yet it's needed to run first to set pcibios_last_bus * in case legacy PCI probing is used. otherwise detecting peer busses * fails. */ -#ifdef CONFIG_PCI_DIRECT pci_direct_init(type); -#endif + if (!raw_pci_ops && !raw_pci_ext_ops) printk(KERN_ERR "PCI: Fatal: No config space access function found\n"); diff --git a/arch/x86/pci/sta2x11-fixup.c b/arch/x86/pci/sta2x11-fixup.c index c313d784efab..5701d5ba3df4 100644 --- a/arch/x86/pci/sta2x11-fixup.c +++ b/arch/x86/pci/sta2x11-fixup.c @@ -15,7 +15,6 @@ #include <asm/iommu.h> #define STA2X11_SWIOTLB_SIZE (4*1024*1024) -extern int swiotlb_late_init_with_default_size(size_t default_size); /* * We build a list of bus numbers that are under the ConneXt. The @@ -133,7 +132,7 @@ static void sta2x11_map_ep(struct pci_dev *pdev) struct sta2x11_instance *instance = sta2x11_pdev_to_instance(pdev); struct device *dev = &pdev->dev; u32 amba_base, max_amba_addr; - int i; + int i, ret; if (!instance) return; @@ -141,7 +140,9 @@ static void sta2x11_map_ep(struct pci_dev *pdev) pci_read_config_dword(pdev, AHB_BASE(0), &amba_base); max_amba_addr = amba_base + STA2X11_AMBA_SIZE - 1; - dev->dma_pfn_offset = PFN_DOWN(-amba_base); + ret = dma_direct_set_offset(dev, 0, amba_base, STA2X11_AMBA_SIZE); + if (ret) + dev_err(dev, "sta2x11: could not set DMA offset\n"); dev->bus_dma_limit = max_amba_addr; pci_set_consistent_dma_mask(pdev, max_amba_addr); diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c index 89395a5049bb..c552cd2d0632 100644 --- a/arch/x86/pci/xen.c +++ b/arch/x86/pci/xen.c @@ -157,6 +157,13 @@ static int acpi_register_gsi_xen(struct device *dev, u32 gsi, struct xen_pci_frontend_ops *xen_pci_frontend; EXPORT_SYMBOL_GPL(xen_pci_frontend); +struct xen_msi_ops { + int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type); + void (*teardown_msi_irqs)(struct pci_dev *dev); +}; + +static struct xen_msi_ops xen_msi_ops __ro_after_init; + static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) { int irq, ret, i; @@ -372,28 +379,122 @@ static void xen_initdom_restore_msi_irqs(struct pci_dev *dev) WARN(ret && ret != -ENOSYS, "restore_msi -> %d\n", ret); } } -#endif +#else /* CONFIG_XEN_DOM0 */ +#define xen_initdom_setup_msi_irqs NULL +#define xen_initdom_restore_msi_irqs NULL +#endif /* !CONFIG_XEN_DOM0 */ static void xen_teardown_msi_irqs(struct pci_dev *dev) { struct msi_desc *msidesc; + int i; + + for_each_pci_msi_entry(msidesc, dev) { + if (msidesc->irq) { + for (i = 0; i < msidesc->nvec_used; i++) + xen_destroy_irq(msidesc->irq + i); + } + } +} + +static void xen_pv_teardown_msi_irqs(struct pci_dev *dev) +{ + struct msi_desc *msidesc = first_pci_msi_entry(dev); - msidesc = first_pci_msi_entry(dev); if (msidesc->msi_attrib.is_msix) xen_pci_frontend_disable_msix(dev); else xen_pci_frontend_disable_msi(dev); - /* Free the IRQ's and the msidesc using the generic code. */ - default_teardown_msi_irqs(dev); + xen_teardown_msi_irqs(dev); } -static void xen_teardown_msi_irq(unsigned int irq) +static int xen_msi_domain_alloc_irqs(struct irq_domain *domain, + struct device *dev, int nvec) { - xen_destroy_irq(irq); + int type; + + if (WARN_ON_ONCE(!dev_is_pci(dev))) + return -EINVAL; + + if (first_msi_entry(dev)->msi_attrib.is_msix) + type = PCI_CAP_ID_MSIX; + else + type = PCI_CAP_ID_MSI; + + return xen_msi_ops.setup_msi_irqs(to_pci_dev(dev), nvec, type); } -#endif +static void xen_msi_domain_free_irqs(struct irq_domain *domain, + struct device *dev) +{ + if (WARN_ON_ONCE(!dev_is_pci(dev))) + return; + + xen_msi_ops.teardown_msi_irqs(to_pci_dev(dev)); +} + +static struct msi_domain_ops xen_pci_msi_domain_ops = { + .domain_alloc_irqs = xen_msi_domain_alloc_irqs, + .domain_free_irqs = xen_msi_domain_free_irqs, +}; + +static struct msi_domain_info xen_pci_msi_domain_info = { + .ops = &xen_pci_msi_domain_ops, +}; + +/* + * This irq domain is a blatant violation of the irq domain design, but + * distangling XEN into real irq domains is not a job for mere mortals with + * limited XENology. But it's the least dangerous way for a mere mortal to + * get rid of the arch_*_msi_irqs() hackery in order to store the irq + * domain pointer in struct device. This irq domain wrappery allows to do + * that without breaking XEN terminally. + */ +static __init struct irq_domain *xen_create_pci_msi_domain(void) +{ + struct irq_domain *d = NULL; + struct fwnode_handle *fn; + + fn = irq_domain_alloc_named_fwnode("XEN-MSI"); + if (fn) + d = msi_create_irq_domain(fn, &xen_pci_msi_domain_info, NULL); + + /* FIXME: No idea how to survive if this fails */ + BUG_ON(!d); + + return d; +} + +static __init void xen_setup_pci_msi(void) +{ + if (xen_pv_domain()) { + if (xen_initial_domain()) { + xen_msi_ops.setup_msi_irqs = xen_initdom_setup_msi_irqs; + x86_msi.restore_msi_irqs = xen_initdom_restore_msi_irqs; + } else { + xen_msi_ops.setup_msi_irqs = xen_setup_msi_irqs; + } + xen_msi_ops.teardown_msi_irqs = xen_pv_teardown_msi_irqs; + pci_msi_ignore_mask = 1; + } else if (xen_hvm_domain()) { + xen_msi_ops.setup_msi_irqs = xen_hvm_setup_msi_irqs; + xen_msi_ops.teardown_msi_irqs = xen_teardown_msi_irqs; + } else { + WARN_ON_ONCE(1); + return; + } + + /* + * Override the PCI/MSI irq domain init function. No point + * in allocating the native domain and never use it. + */ + x86_init.irqs.create_pci_msi_domain = xen_create_pci_msi_domain; +} + +#else /* CONFIG_PCI_MSI */ +static inline void xen_setup_pci_msi(void) { } +#endif /* CONFIG_PCI_MSI */ int __init pci_xen_init(void) { @@ -410,17 +511,12 @@ int __init pci_xen_init(void) /* Keep ACPI out of the picture */ acpi_noirq_set(); -#ifdef CONFIG_PCI_MSI - x86_msi.setup_msi_irqs = xen_setup_msi_irqs; - x86_msi.teardown_msi_irq = xen_teardown_msi_irq; - x86_msi.teardown_msi_irqs = xen_teardown_msi_irqs; - pci_msi_ignore_mask = 1; -#endif + xen_setup_pci_msi(); return 0; } #ifdef CONFIG_PCI_MSI -void __init xen_msi_init(void) +static void __init xen_hvm_msi_init(void) { if (!disable_apic) { /* @@ -435,9 +531,7 @@ void __init xen_msi_init(void) ((eax & XEN_HVM_CPUID_APIC_ACCESS_VIRT) && boot_cpu_has(X86_FEATURE_APIC))) return; } - - x86_msi.setup_msi_irqs = xen_hvm_setup_msi_irqs; - x86_msi.teardown_msi_irq = xen_teardown_msi_irq; + xen_setup_pci_msi(); } #endif @@ -460,7 +554,7 @@ int __init pci_xen_hvm_init(void) * We need to wait until after x2apic is initialized * before we can set MSI IRQ ops. */ - x86_platform.apic_post_init = xen_msi_init; + x86_platform.apic_post_init = xen_hvm_msi_init; #endif return 0; } @@ -470,12 +564,7 @@ int __init pci_xen_initial_domain(void) { int irq; -#ifdef CONFIG_PCI_MSI - x86_msi.setup_msi_irqs = xen_initdom_setup_msi_irqs; - x86_msi.teardown_msi_irq = xen_teardown_msi_irq; - x86_msi.restore_msi_irqs = xen_initdom_restore_msi_irqs; - pci_msi_ignore_mask = 1; -#endif + xen_setup_pci_msi(); __acpi_register_gsi = acpi_register_gsi_xen; __acpi_unregister_gsi = NULL; /* diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index d37ebe6e70d7..8a26e705cb06 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -90,6 +90,9 @@ static const unsigned long * const efi_tables[] = { &efi.tpm_log, &efi.tpm_final_log, &efi_rng_seed, +#ifdef CONFIG_LOAD_UEFI_KEYS + &efi.mokvar_table, +#endif }; u64 efi_setup; /* efi setup_data physical address */ diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index 6af4da1149ba..8f5759df7776 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -47,6 +47,7 @@ #include <asm/realmode.h> #include <asm/time.h> #include <asm/pgalloc.h> +#include <asm/sev-es.h> /* * We allocate runtime services regions top-down, starting from -4G, i.e. @@ -230,6 +231,15 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) } /* + * When SEV-ES is active, the GHCB as set by the kernel will be used + * by firmware. Create a 1:1 unencrypted mapping for each GHCB. + */ + if (sev_es_efi_map_ghcbs(pgd)) { + pr_err("Failed to create 1:1 mapping for the GHCBs!\n"); + return 1; + } + + /* * When making calls to the firmware everything needs to be 1:1 * mapped and addressable with 32-bit pointers. Map the kernel * text and allocate a new stack because we can't rely on the diff --git a/arch/x86/platform/uv/Makefile b/arch/x86/platform/uv/Makefile index a3693c829e2e..224ff0504890 100644 --- a/arch/x86/platform/uv/Makefile +++ b/arch/x86/platform/uv/Makefile @@ -1,2 +1,2 @@ # SPDX-License-Identifier: GPL-2.0-only -obj-$(CONFIG_X86_UV) += tlb_uv.o bios_uv.o uv_irq.o uv_sysfs.o uv_time.o uv_nmi.o +obj-$(CONFIG_X86_UV) += bios_uv.o uv_irq.o uv_sysfs.o uv_time.o uv_nmi.o diff --git a/arch/x86/platform/uv/bios_uv.c b/arch/x86/platform/uv/bios_uv.c index a2f447dffea6..54511eaccf4d 100644 --- a/arch/x86/platform/uv/bios_uv.c +++ b/arch/x86/platform/uv/bios_uv.c @@ -2,8 +2,9 @@ /* * BIOS run time interface routines. * - * Copyright (c) 2008-2009 Silicon Graphics, Inc. All Rights Reserved. - * Copyright (c) Russ Anderson <rja@sgi.com> + * (C) Copyright 2020 Hewlett Packard Enterprise Development LP + * Copyright (C) 2007-2017 Silicon Graphics, Inc. All rights reserved. + * Copyright (c) Russ Anderson <rja@sgi.com> */ #include <linux/efi.h> @@ -170,16 +171,27 @@ int uv_bios_set_legacy_vga_target(bool decode, int domain, int bus) (u64)decode, (u64)domain, (u64)bus, 0, 0); } -int uv_bios_init(void) +unsigned long get_uv_systab_phys(bool msg) { - uv_systab = NULL; if ((uv_systab_phys == EFI_INVALID_TABLE_ADDR) || !uv_systab_phys || efi_runtime_disabled()) { - pr_crit("UV: UVsystab: missing\n"); - return -EEXIST; + if (msg) + pr_crit("UV: UVsystab: missing\n"); + return 0; } + return uv_systab_phys; +} + +int uv_bios_init(void) +{ + unsigned long uv_systab_phys_addr; + + uv_systab = NULL; + uv_systab_phys_addr = get_uv_systab_phys(1); + if (!uv_systab_phys_addr) + return -EEXIST; - uv_systab = ioremap(uv_systab_phys, sizeof(struct uv_systab)); + uv_systab = ioremap(uv_systab_phys_addr, sizeof(struct uv_systab)); if (!uv_systab || strncmp(uv_systab->signature, UV_SYSTAB_SIG, 4)) { pr_err("UV: UVsystab: bad signature!\n"); iounmap(uv_systab); @@ -191,7 +203,7 @@ int uv_bios_init(void) int size = uv_systab->size; iounmap(uv_systab); - uv_systab = ioremap(uv_systab_phys, size); + uv_systab = ioremap(uv_systab_phys_addr, size); if (!uv_systab) { pr_err("UV: UVsystab: ioremap(%d) failed!\n", size); return -EFAULT; diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c deleted file mode 100644 index 62ea907668f8..000000000000 --- a/arch/x86/platform/uv/tlb_uv.c +++ /dev/null @@ -1,2097 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * SGI UltraViolet TLB flush routines. - * - * (c) 2008-2014 Cliff Wickman <cpw@sgi.com>, SGI. - */ -#include <linux/seq_file.h> -#include <linux/proc_fs.h> -#include <linux/debugfs.h> -#include <linux/kernel.h> -#include <linux/slab.h> -#include <linux/delay.h> - -#include <asm/mmu_context.h> -#include <asm/uv/uv.h> -#include <asm/uv/uv_mmrs.h> -#include <asm/uv/uv_hub.h> -#include <asm/uv/uv_bau.h> -#include <asm/apic.h> -#include <asm/tsc.h> -#include <asm/irq_vectors.h> -#include <asm/timer.h> - -static struct bau_operations ops __ro_after_init; - -static int timeout_us; -static bool nobau = true; -static int nobau_perm; - -/* tunables: */ -static int max_concurr = MAX_BAU_CONCURRENT; -static int max_concurr_const = MAX_BAU_CONCURRENT; -static int plugged_delay = PLUGGED_DELAY; -static int plugsb4reset = PLUGSB4RESET; -static int giveup_limit = GIVEUP_LIMIT; -static int timeoutsb4reset = TIMEOUTSB4RESET; -static int ipi_reset_limit = IPI_RESET_LIMIT; -static int complete_threshold = COMPLETE_THRESHOLD; -static int congested_respns_us = CONGESTED_RESPONSE_US; -static int congested_reps = CONGESTED_REPS; -static int disabled_period = DISABLED_PERIOD; - -static struct tunables tunables[] = { - {&max_concurr, MAX_BAU_CONCURRENT}, /* must be [0] */ - {&plugged_delay, PLUGGED_DELAY}, - {&plugsb4reset, PLUGSB4RESET}, - {&timeoutsb4reset, TIMEOUTSB4RESET}, - {&ipi_reset_limit, IPI_RESET_LIMIT}, - {&complete_threshold, COMPLETE_THRESHOLD}, - {&congested_respns_us, CONGESTED_RESPONSE_US}, - {&congested_reps, CONGESTED_REPS}, - {&disabled_period, DISABLED_PERIOD}, - {&giveup_limit, GIVEUP_LIMIT} -}; - -static struct dentry *tunables_dir; - -/* these correspond to the statistics printed by ptc_seq_show() */ -static char *stat_description[] = { - "sent: number of shootdown messages sent", - "stime: time spent sending messages", - "numuvhubs: number of hubs targeted with shootdown", - "numuvhubs16: number times 16 or more hubs targeted", - "numuvhubs8: number times 8 or more hubs targeted", - "numuvhubs4: number times 4 or more hubs targeted", - "numuvhubs2: number times 2 or more hubs targeted", - "numuvhubs1: number times 1 hub targeted", - "numcpus: number of cpus targeted with shootdown", - "dto: number of destination timeouts", - "retries: destination timeout retries sent", - "rok: : destination timeouts successfully retried", - "resetp: ipi-style resource resets for plugs", - "resett: ipi-style resource resets for timeouts", - "giveup: fall-backs to ipi-style shootdowns", - "sto: number of source timeouts", - "bz: number of stay-busy's", - "throt: number times spun in throttle", - "swack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE", - "recv: shootdown messages received", - "rtime: time spent processing messages", - "all: shootdown all-tlb messages", - "one: shootdown one-tlb messages", - "mult: interrupts that found multiple messages", - "none: interrupts that found no messages", - "retry: number of retry messages processed", - "canc: number messages canceled by retries", - "nocan: number retries that found nothing to cancel", - "reset: number of ipi-style reset requests processed", - "rcan: number messages canceled by reset requests", - "disable: number times use of the BAU was disabled", - "enable: number times use of the BAU was re-enabled" -}; - -static int __init setup_bau(char *arg) -{ - int result; - - if (!arg) - return -EINVAL; - - result = strtobool(arg, &nobau); - if (result) - return result; - - /* we need to flip the logic here, so that bau=y sets nobau to false */ - nobau = !nobau; - - if (!nobau) - pr_info("UV BAU Enabled\n"); - else - pr_info("UV BAU Disabled\n"); - - return 0; -} -early_param("bau", setup_bau); - -/* base pnode in this partition */ -static int uv_base_pnode __read_mostly; - -static DEFINE_PER_CPU(struct ptc_stats, ptcstats); -static DEFINE_PER_CPU(struct bau_control, bau_control); -static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask); - -static void -set_bau_on(void) -{ - int cpu; - struct bau_control *bcp; - - if (nobau_perm) { - pr_info("BAU not initialized; cannot be turned on\n"); - return; - } - nobau = false; - for_each_present_cpu(cpu) { - bcp = &per_cpu(bau_control, cpu); - bcp->nobau = false; - } - pr_info("BAU turned on\n"); - return; -} - -static void -set_bau_off(void) -{ - int cpu; - struct bau_control *bcp; - - nobau = true; - for_each_present_cpu(cpu) { - bcp = &per_cpu(bau_control, cpu); - bcp->nobau = true; - } - pr_info("BAU turned off\n"); - return; -} - -/* - * Determine the first node on a uvhub. 'Nodes' are used for kernel - * memory allocation. - */ -static int __init uvhub_to_first_node(int uvhub) -{ - int node, b; - - for_each_online_node(node) { - b = uv_node_to_blade_id(node); - if (uvhub == b) - return node; - } - return -1; -} - -/* - * Determine the apicid of the first cpu on a uvhub. - */ -static int __init uvhub_to_first_apicid(int uvhub) -{ - int cpu; - - for_each_present_cpu(cpu) - if (uvhub == uv_cpu_to_blade_id(cpu)) - return per_cpu(x86_cpu_to_apicid, cpu); - return -1; -} - -/* - * Free a software acknowledge hardware resource by clearing its Pending - * bit. This will return a reply to the sender. - * If the message has timed out, a reply has already been sent by the - * hardware but the resource has not been released. In that case our - * clear of the Timeout bit (as well) will free the resource. No reply will - * be sent (the hardware will only do one reply per message). - */ -static void reply_to_message(struct msg_desc *mdp, struct bau_control *bcp, - int do_acknowledge) -{ - unsigned long dw; - struct bau_pq_entry *msg; - - msg = mdp->msg; - if (!msg->canceled && do_acknowledge) { - dw = (msg->swack_vec << UV_SW_ACK_NPENDING) | msg->swack_vec; - ops.write_l_sw_ack(dw); - } - msg->replied_to = 1; - msg->swack_vec = 0; -} - -/* - * Process the receipt of a RETRY message - */ -static void bau_process_retry_msg(struct msg_desc *mdp, - struct bau_control *bcp) -{ - int i; - int cancel_count = 0; - unsigned long msg_res; - unsigned long mmr = 0; - struct bau_pq_entry *msg = mdp->msg; - struct bau_pq_entry *msg2; - struct ptc_stats *stat = bcp->statp; - - stat->d_retries++; - /* - * cancel any message from msg+1 to the retry itself - */ - for (msg2 = msg+1, i = 0; i < DEST_Q_SIZE; msg2++, i++) { - if (msg2 > mdp->queue_last) - msg2 = mdp->queue_first; - if (msg2 == msg) - break; - - /* same conditions for cancellation as do_reset */ - if ((msg2->replied_to == 0) && (msg2->canceled == 0) && - (msg2->swack_vec) && ((msg2->swack_vec & - msg->swack_vec) == 0) && - (msg2->sending_cpu == msg->sending_cpu) && - (msg2->msg_type != MSG_NOOP)) { - mmr = ops.read_l_sw_ack(); - msg_res = msg2->swack_vec; - /* - * This is a message retry; clear the resources held - * by the previous message only if they timed out. - * If it has not timed out we have an unexpected - * situation to report. - */ - if (mmr & (msg_res << UV_SW_ACK_NPENDING)) { - unsigned long mr; - /* - * Is the resource timed out? - * Make everyone ignore the cancelled message. - */ - msg2->canceled = 1; - stat->d_canceled++; - cancel_count++; - mr = (msg_res << UV_SW_ACK_NPENDING) | msg_res; - ops.write_l_sw_ack(mr); - } - } - } - if (!cancel_count) - stat->d_nocanceled++; -} - -/* - * Do all the things a cpu should do for a TLB shootdown message. - * Other cpu's may come here at the same time for this message. - */ -static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp, - int do_acknowledge) -{ - short socket_ack_count = 0; - short *sp; - struct atomic_short *asp; - struct ptc_stats *stat = bcp->statp; - struct bau_pq_entry *msg = mdp->msg; - struct bau_control *smaster = bcp->socket_master; - - /* - * This must be a normal message, or retry of a normal message - */ - if (msg->address == TLB_FLUSH_ALL) { - flush_tlb_local(); - stat->d_alltlb++; - } else { - flush_tlb_one_user(msg->address); - stat->d_onetlb++; - } - stat->d_requestee++; - - /* - * One cpu on each uvhub has the additional job on a RETRY - * of releasing the resource held by the message that is - * being retried. That message is identified by sending - * cpu number. - */ - if (msg->msg_type == MSG_RETRY && bcp == bcp->uvhub_master) - bau_process_retry_msg(mdp, bcp); - - /* - * This is a swack message, so we have to reply to it. - * Count each responding cpu on the socket. This avoids - * pinging the count's cache line back and forth between - * the sockets. - */ - sp = &smaster->socket_acknowledge_count[mdp->msg_slot]; - asp = (struct atomic_short *)sp; - socket_ack_count = atom_asr(1, asp); - if (socket_ack_count == bcp->cpus_in_socket) { - int msg_ack_count; - /* - * Both sockets dump their completed count total into - * the message's count. - */ - *sp = 0; - asp = (struct atomic_short *)&msg->acknowledge_count; - msg_ack_count = atom_asr(socket_ack_count, asp); - - if (msg_ack_count == bcp->cpus_in_uvhub) { - /* - * All cpus in uvhub saw it; reply - * (unless we are in the UV2 workaround) - */ - reply_to_message(mdp, bcp, do_acknowledge); - } - } - - return; -} - -/* - * Determine the first cpu on a pnode. - */ -static int pnode_to_first_cpu(int pnode, struct bau_control *smaster) -{ - int cpu; - struct hub_and_pnode *hpp; - - for_each_present_cpu(cpu) { - hpp = &smaster->thp[cpu]; - if (pnode == hpp->pnode) - return cpu; - } - return -1; -} - -/* - * Last resort when we get a large number of destination timeouts is - * to clear resources held by a given cpu. - * Do this with IPI so that all messages in the BAU message queue - * can be identified by their nonzero swack_vec field. - * - * This is entered for a single cpu on the uvhub. - * The sender want's this uvhub to free a specific message's - * swack resources. - */ -static void do_reset(void *ptr) -{ - int i; - struct bau_control *bcp = &per_cpu(bau_control, smp_processor_id()); - struct reset_args *rap = (struct reset_args *)ptr; - struct bau_pq_entry *msg; - struct ptc_stats *stat = bcp->statp; - - stat->d_resets++; - /* - * We're looking for the given sender, and - * will free its swack resource. - * If all cpu's finally responded after the timeout, its - * message 'replied_to' was set. - */ - for (msg = bcp->queue_first, i = 0; i < DEST_Q_SIZE; msg++, i++) { - unsigned long msg_res; - /* do_reset: same conditions for cancellation as - bau_process_retry_msg() */ - if ((msg->replied_to == 0) && - (msg->canceled == 0) && - (msg->sending_cpu == rap->sender) && - (msg->swack_vec) && - (msg->msg_type != MSG_NOOP)) { - unsigned long mmr; - unsigned long mr; - /* - * make everyone else ignore this message - */ - msg->canceled = 1; - /* - * only reset the resource if it is still pending - */ - mmr = ops.read_l_sw_ack(); - msg_res = msg->swack_vec; - mr = (msg_res << UV_SW_ACK_NPENDING) | msg_res; - if (mmr & msg_res) { - stat->d_rcanceled++; - ops.write_l_sw_ack(mr); - } - } - } - return; -} - -/* - * Use IPI to get all target uvhubs to release resources held by - * a given sending cpu number. - */ -static void reset_with_ipi(struct pnmask *distribution, struct bau_control *bcp) -{ - int pnode; - int apnode; - int maskbits; - int sender = bcp->cpu; - cpumask_t *mask = bcp->uvhub_master->cpumask; - struct bau_control *smaster = bcp->socket_master; - struct reset_args reset_args; - - reset_args.sender = sender; - cpumask_clear(mask); - /* find a single cpu for each uvhub in this distribution mask */ - maskbits = sizeof(struct pnmask) * BITSPERBYTE; - /* each bit is a pnode relative to the partition base pnode */ - for (pnode = 0; pnode < maskbits; pnode++) { - int cpu; - if (!bau_uvhub_isset(pnode, distribution)) - continue; - apnode = pnode + bcp->partition_base_pnode; - cpu = pnode_to_first_cpu(apnode, smaster); - cpumask_set_cpu(cpu, mask); - } - - /* IPI all cpus; preemption is already disabled */ - smp_call_function_many(mask, do_reset, (void *)&reset_args, 1); - return; -} - -/* - * Not to be confused with cycles_2_ns() from tsc.c; this gives a relative - * number, not an absolute. It converts a duration in cycles to a duration in - * ns. - */ -static inline unsigned long long cycles_2_ns(unsigned long long cyc) -{ - struct cyc2ns_data data; - unsigned long long ns; - - cyc2ns_read_begin(&data); - ns = mul_u64_u32_shr(cyc, data.cyc2ns_mul, data.cyc2ns_shift); - cyc2ns_read_end(); - - return ns; -} - -/* - * The reverse of the above; converts a duration in ns to a duration in cycles. - */ -static inline unsigned long long ns_2_cycles(unsigned long long ns) -{ - struct cyc2ns_data data; - unsigned long long cyc; - - cyc2ns_read_begin(&data); - cyc = (ns << data.cyc2ns_shift) / data.cyc2ns_mul; - cyc2ns_read_end(); - - return cyc; -} - -static inline unsigned long cycles_2_us(unsigned long long cyc) -{ - return cycles_2_ns(cyc) / NSEC_PER_USEC; -} - -static inline cycles_t sec_2_cycles(unsigned long sec) -{ - return ns_2_cycles(sec * NSEC_PER_SEC); -} - -static inline unsigned long long usec_2_cycles(unsigned long usec) -{ - return ns_2_cycles(usec * NSEC_PER_USEC); -} - -/* - * wait for all cpus on this hub to finish their sends and go quiet - * leaves uvhub_quiesce set so that no new broadcasts are started by - * bau_flush_send_and_wait() - */ -static inline void quiesce_local_uvhub(struct bau_control *hmaster) -{ - atom_asr(1, (struct atomic_short *)&hmaster->uvhub_quiesce); -} - -/* - * mark this quiet-requestor as done - */ -static inline void end_uvhub_quiesce(struct bau_control *hmaster) -{ - atom_asr(-1, (struct atomic_short *)&hmaster->uvhub_quiesce); -} - -/* - * UV2 could have an extra bit of status in the ACTIVATION_STATUS_2 register. - * But not currently used. - */ -static unsigned long uv2_3_read_status(unsigned long offset, int rshft, int desc) -{ - return ((read_lmmr(offset) >> rshft) & UV_ACT_STATUS_MASK) << 1; -} - -/* - * Entered when a bau descriptor has gone into a permanent busy wait because - * of a hardware bug. - * Workaround the bug. - */ -static int handle_uv2_busy(struct bau_control *bcp) -{ - struct ptc_stats *stat = bcp->statp; - - stat->s_uv2_wars++; - bcp->busy = 1; - return FLUSH_GIVEUP; -} - -static int uv2_3_wait_completion(struct bau_desc *bau_desc, - struct bau_control *bcp, long try) -{ - unsigned long descriptor_stat; - cycles_t ttm; - u64 mmr_offset = bcp->status_mmr; - int right_shift = bcp->status_index; - int desc = bcp->uvhub_cpu; - long busy_reps = 0; - struct ptc_stats *stat = bcp->statp; - - descriptor_stat = uv2_3_read_status(mmr_offset, right_shift, desc); - - /* spin on the status MMR, waiting for it to go idle */ - while (descriptor_stat != UV2H_DESC_IDLE) { - if (descriptor_stat == UV2H_DESC_SOURCE_TIMEOUT) { - /* - * A h/w bug on the destination side may - * have prevented the message being marked - * pending, thus it doesn't get replied to - * and gets continually nacked until it times - * out with a SOURCE_TIMEOUT. - */ - stat->s_stimeout++; - return FLUSH_GIVEUP; - } else if (descriptor_stat == UV2H_DESC_DEST_TIMEOUT) { - ttm = get_cycles(); - - /* - * Our retries may be blocked by all destination - * swack resources being consumed, and a timeout - * pending. In that case hardware returns the - * ERROR that looks like a destination timeout. - * Without using the extended status we have to - * deduce from the short time that this was a - * strong nack. - */ - if (cycles_2_us(ttm - bcp->send_message) < timeout_us) { - bcp->conseccompletes = 0; - stat->s_plugged++; - /* FLUSH_RETRY_PLUGGED causes hang on boot */ - return FLUSH_GIVEUP; - } - stat->s_dtimeout++; - bcp->conseccompletes = 0; - /* FLUSH_RETRY_TIMEOUT causes hang on boot */ - return FLUSH_GIVEUP; - } else { - busy_reps++; - if (busy_reps > 1000000) { - /* not to hammer on the clock */ - busy_reps = 0; - ttm = get_cycles(); - if ((ttm - bcp->send_message) > bcp->timeout_interval) - return handle_uv2_busy(bcp); - } - /* - * descriptor_stat is still BUSY - */ - cpu_relax(); - } - descriptor_stat = uv2_3_read_status(mmr_offset, right_shift, desc); - } - bcp->conseccompletes++; - return FLUSH_COMPLETE; -} - -/* - * Returns the status of current BAU message for cpu desc as a bit field - * [Error][Busy][Aux] - */ -static u64 read_status(u64 status_mmr, int index, int desc) -{ - u64 stat; - - stat = ((read_lmmr(status_mmr) >> index) & UV_ACT_STATUS_MASK) << 1; - stat |= (read_lmmr(UVH_LB_BAU_SB_ACTIVATION_STATUS_2) >> desc) & 0x1; - - return stat; -} - -static int uv4_wait_completion(struct bau_desc *bau_desc, - struct bau_control *bcp, long try) -{ - struct ptc_stats *stat = bcp->statp; - u64 descriptor_stat; - u64 mmr = bcp->status_mmr; - int index = bcp->status_index; - int desc = bcp->uvhub_cpu; - - descriptor_stat = read_status(mmr, index, desc); - - /* spin on the status MMR, waiting for it to go idle */ - while (descriptor_stat != UV2H_DESC_IDLE) { - switch (descriptor_stat) { - case UV2H_DESC_SOURCE_TIMEOUT: - stat->s_stimeout++; - return FLUSH_GIVEUP; - - case UV2H_DESC_DEST_TIMEOUT: - stat->s_dtimeout++; - bcp->conseccompletes = 0; - return FLUSH_RETRY_TIMEOUT; - - case UV2H_DESC_DEST_STRONG_NACK: - stat->s_plugged++; - bcp->conseccompletes = 0; - return FLUSH_RETRY_PLUGGED; - - case UV2H_DESC_DEST_PUT_ERR: - bcp->conseccompletes = 0; - return FLUSH_GIVEUP; - - default: - /* descriptor_stat is still BUSY */ - cpu_relax(); - } - descriptor_stat = read_status(mmr, index, desc); - } - bcp->conseccompletes++; - return FLUSH_COMPLETE; -} - -/* - * Our retries are blocked by all destination sw ack resources being - * in use, and a timeout is pending. In that case hardware immediately - * returns the ERROR that looks like a destination timeout. - */ -static void destination_plugged(struct bau_desc *bau_desc, - struct bau_control *bcp, - struct bau_control *hmaster, struct ptc_stats *stat) -{ - udelay(bcp->plugged_delay); - bcp->plugged_tries++; - - if (bcp->plugged_tries >= bcp->plugsb4reset) { - bcp->plugged_tries = 0; - - quiesce_local_uvhub(hmaster); - - spin_lock(&hmaster->queue_lock); - reset_with_ipi(&bau_desc->distribution, bcp); - spin_unlock(&hmaster->queue_lock); - - end_uvhub_quiesce(hmaster); - - bcp->ipi_attempts++; - stat->s_resets_plug++; - } -} - -static void destination_timeout(struct bau_desc *bau_desc, - struct bau_control *bcp, struct bau_control *hmaster, - struct ptc_stats *stat) -{ - hmaster->max_concurr = 1; - bcp->timeout_tries++; - if (bcp->timeout_tries >= bcp->timeoutsb4reset) { - bcp->timeout_tries = 0; - - quiesce_local_uvhub(hmaster); - - spin_lock(&hmaster->queue_lock); - reset_with_ipi(&bau_desc->distribution, bcp); - spin_unlock(&hmaster->queue_lock); - - end_uvhub_quiesce(hmaster); - - bcp->ipi_attempts++; - stat->s_resets_timeout++; - } -} - -/* - * Stop all cpus on a uvhub from using the BAU for a period of time. - * This is reversed by check_enable. - */ -static void disable_for_period(struct bau_control *bcp, struct ptc_stats *stat) -{ - int tcpu; - struct bau_control *tbcp; - struct bau_control *hmaster; - cycles_t tm1; - - hmaster = bcp->uvhub_master; - spin_lock(&hmaster->disable_lock); - if (!bcp->baudisabled) { - stat->s_bau_disabled++; - tm1 = get_cycles(); - for_each_present_cpu(tcpu) { - tbcp = &per_cpu(bau_control, tcpu); - if (tbcp->uvhub_master == hmaster) { - tbcp->baudisabled = 1; - tbcp->set_bau_on_time = - tm1 + bcp->disabled_period; - } - } - } - spin_unlock(&hmaster->disable_lock); -} - -static void count_max_concurr(int stat, struct bau_control *bcp, - struct bau_control *hmaster) -{ - bcp->plugged_tries = 0; - bcp->timeout_tries = 0; - if (stat != FLUSH_COMPLETE) - return; - if (bcp->conseccompletes <= bcp->complete_threshold) - return; - if (hmaster->max_concurr >= hmaster->max_concurr_const) - return; - hmaster->max_concurr++; -} - -static void record_send_stats(cycles_t time1, cycles_t time2, - struct bau_control *bcp, struct ptc_stats *stat, - int completion_status, int try) -{ - cycles_t elapsed; - - if (time2 > time1) { - elapsed = time2 - time1; - stat->s_time += elapsed; - - if ((completion_status == FLUSH_COMPLETE) && (try == 1)) { - bcp->period_requests++; - bcp->period_time += elapsed; - if ((elapsed > usec_2_cycles(bcp->cong_response_us)) && - (bcp->period_requests > bcp->cong_reps) && - ((bcp->period_time / bcp->period_requests) > - usec_2_cycles(bcp->cong_response_us))) { - stat->s_congested++; - disable_for_period(bcp, stat); - } - } - } else - stat->s_requestor--; - - if (completion_status == FLUSH_COMPLETE && try > 1) - stat->s_retriesok++; - else if (completion_status == FLUSH_GIVEUP) { - stat->s_giveup++; - if (get_cycles() > bcp->period_end) - bcp->period_giveups = 0; - bcp->period_giveups++; - if (bcp->period_giveups == 1) - bcp->period_end = get_cycles() + bcp->disabled_period; - if (bcp->period_giveups > bcp->giveup_limit) { - disable_for_period(bcp, stat); - stat->s_giveuplimit++; - } - } -} - -/* - * Handle the completion status of a message send. - */ -static void handle_cmplt(int completion_status, struct bau_desc *bau_desc, - struct bau_control *bcp, struct bau_control *hmaster, - struct ptc_stats *stat) -{ - if (completion_status == FLUSH_RETRY_PLUGGED) - destination_plugged(bau_desc, bcp, hmaster, stat); - else if (completion_status == FLUSH_RETRY_TIMEOUT) - destination_timeout(bau_desc, bcp, hmaster, stat); -} - -/* - * Send a broadcast and wait for it to complete. - * - * The flush_mask contains the cpus the broadcast is to be sent to including - * cpus that are on the local uvhub. - * - * Returns 0 if all flushing represented in the mask was done. - * Returns 1 if it gives up entirely and the original cpu mask is to be - * returned to the kernel. - */ -static int uv_flush_send_and_wait(struct cpumask *flush_mask, - struct bau_control *bcp, - struct bau_desc *bau_desc) -{ - int seq_number = 0; - int completion_stat = 0; - long try = 0; - unsigned long index; - cycles_t time1; - cycles_t time2; - struct ptc_stats *stat = bcp->statp; - struct bau_control *hmaster = bcp->uvhub_master; - struct uv2_3_bau_msg_header *uv2_3_hdr = NULL; - - while (hmaster->uvhub_quiesce) - cpu_relax(); - - time1 = get_cycles(); - uv2_3_hdr = &bau_desc->header.uv2_3_hdr; - - do { - if (try == 0) { - uv2_3_hdr->msg_type = MSG_REGULAR; - seq_number = bcp->message_number++; - } else { - uv2_3_hdr->msg_type = MSG_RETRY; - stat->s_retry_messages++; - } - - uv2_3_hdr->sequence = seq_number; - index = (1UL << AS_PUSH_SHIFT) | bcp->uvhub_cpu; - bcp->send_message = get_cycles(); - - write_mmr_activation(index); - - try++; - completion_stat = ops.wait_completion(bau_desc, bcp, try); - - handle_cmplt(completion_stat, bau_desc, bcp, hmaster, stat); - - if (bcp->ipi_attempts >= bcp->ipi_reset_limit) { - bcp->ipi_attempts = 0; - stat->s_overipilimit++; - completion_stat = FLUSH_GIVEUP; - break; - } - cpu_relax(); - } while ((completion_stat == FLUSH_RETRY_PLUGGED) || - (completion_stat == FLUSH_RETRY_TIMEOUT)); - - time2 = get_cycles(); - - count_max_concurr(completion_stat, bcp, hmaster); - - while (hmaster->uvhub_quiesce) - cpu_relax(); - - atomic_dec(&hmaster->active_descriptor_count); - - record_send_stats(time1, time2, bcp, stat, completion_stat, try); - - if (completion_stat == FLUSH_GIVEUP) - /* FLUSH_GIVEUP will fall back to using IPI's for tlb flush */ - return 1; - return 0; -} - -/* - * The BAU is disabled for this uvhub. When the disabled time period has - * expired re-enable it. - * Return 0 if it is re-enabled for all cpus on this uvhub. - */ -static int check_enable(struct bau_control *bcp, struct ptc_stats *stat) -{ - int tcpu; - struct bau_control *tbcp; - struct bau_control *hmaster; - - hmaster = bcp->uvhub_master; - spin_lock(&hmaster->disable_lock); - if (bcp->baudisabled && (get_cycles() >= bcp->set_bau_on_time)) { - stat->s_bau_reenabled++; - for_each_present_cpu(tcpu) { - tbcp = &per_cpu(bau_control, tcpu); - if (tbcp->uvhub_master == hmaster) { - tbcp->baudisabled = 0; - tbcp->period_requests = 0; - tbcp->period_time = 0; - tbcp->period_giveups = 0; - } - } - spin_unlock(&hmaster->disable_lock); - return 0; - } - spin_unlock(&hmaster->disable_lock); - return -1; -} - -static void record_send_statistics(struct ptc_stats *stat, int locals, int hubs, - int remotes, struct bau_desc *bau_desc) -{ - stat->s_requestor++; - stat->s_ntargcpu += remotes + locals; - stat->s_ntargremotes += remotes; - stat->s_ntarglocals += locals; - - /* uvhub statistics */ - hubs = bau_uvhub_weight(&bau_desc->distribution); - if (locals) { - stat->s_ntarglocaluvhub++; - stat->s_ntargremoteuvhub += (hubs - 1); - } else - stat->s_ntargremoteuvhub += hubs; - - stat->s_ntarguvhub += hubs; - - if (hubs >= 16) - stat->s_ntarguvhub16++; - else if (hubs >= 8) - stat->s_ntarguvhub8++; - else if (hubs >= 4) - stat->s_ntarguvhub4++; - else if (hubs >= 2) - stat->s_ntarguvhub2++; - else - stat->s_ntarguvhub1++; -} - -/* - * Translate a cpu mask to the uvhub distribution mask in the BAU - * activation descriptor. - */ -static int set_distrib_bits(struct cpumask *flush_mask, struct bau_control *bcp, - struct bau_desc *bau_desc, int *localsp, int *remotesp) -{ - int cpu; - int pnode; - int cnt = 0; - struct hub_and_pnode *hpp; - - for_each_cpu(cpu, flush_mask) { - /* - * The distribution vector is a bit map of pnodes, relative - * to the partition base pnode (and the partition base nasid - * in the header). - * Translate cpu to pnode and hub using a local memory array. - */ - hpp = &bcp->socket_master->thp[cpu]; - pnode = hpp->pnode - bcp->partition_base_pnode; - bau_uvhub_set(pnode, &bau_desc->distribution); - cnt++; - if (hpp->uvhub == bcp->uvhub) - (*localsp)++; - else - (*remotesp)++; - } - if (!cnt) - return 1; - return 0; -} - -/* - * globally purge translation cache of a virtual address or all TLB's - * @cpumask: mask of all cpu's in which the address is to be removed - * @mm: mm_struct containing virtual address range - * @start: start virtual address to be removed from TLB - * @end: end virtual address to be remove from TLB - * @cpu: the current cpu - * - * This is the entry point for initiating any UV global TLB shootdown. - * - * Purges the translation caches of all specified processors of the given - * virtual address, or purges all TLB's on specified processors. - * - * The caller has derived the cpumask from the mm_struct. This function - * is called only if there are bits set in the mask. (e.g. flush_tlb_page()) - * - * The cpumask is converted into a uvhubmask of the uvhubs containing - * those cpus. - * - * Note that this function should be called with preemption disabled. - * - * Returns NULL if all remote flushing was done. - * Returns pointer to cpumask if some remote flushing remains to be - * done. The returned pointer is valid till preemption is re-enabled. - */ -const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, - const struct flush_tlb_info *info) -{ - unsigned int cpu = smp_processor_id(); - int locals = 0, remotes = 0, hubs = 0; - struct bau_desc *bau_desc; - struct cpumask *flush_mask; - struct ptc_stats *stat; - struct bau_control *bcp; - unsigned long descriptor_status, status, address; - - bcp = &per_cpu(bau_control, cpu); - - if (bcp->nobau) - return cpumask; - - stat = bcp->statp; - stat->s_enters++; - - if (bcp->busy) { - descriptor_status = - read_lmmr(UVH_LB_BAU_SB_ACTIVATION_STATUS_0); - status = ((descriptor_status >> (bcp->uvhub_cpu * - UV_ACT_STATUS_SIZE)) & UV_ACT_STATUS_MASK) << 1; - if (status == UV2H_DESC_BUSY) - return cpumask; - bcp->busy = 0; - } - - /* bau was disabled due to slow response */ - if (bcp->baudisabled) { - if (check_enable(bcp, stat)) { - stat->s_ipifordisabled++; - return cpumask; - } - } - - /* - * Each sending cpu has a per-cpu mask which it fills from the caller's - * cpu mask. All cpus are converted to uvhubs and copied to the - * activation descriptor. - */ - flush_mask = (struct cpumask *)per_cpu(uv_flush_tlb_mask, cpu); - /* don't actually do a shootdown of the local cpu */ - cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu)); - - if (cpumask_test_cpu(cpu, cpumask)) - stat->s_ntargself++; - - bau_desc = bcp->descriptor_base; - bau_desc += (ITEMS_PER_DESC * bcp->uvhub_cpu); - bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE); - if (set_distrib_bits(flush_mask, bcp, bau_desc, &locals, &remotes)) - return NULL; - - record_send_statistics(stat, locals, hubs, remotes, bau_desc); - - if (!info->end || (info->end - info->start) <= PAGE_SIZE) - address = info->start; - else - address = TLB_FLUSH_ALL; - - switch (bcp->uvhub_version) { - case UV_BAU_V2: - case UV_BAU_V3: - bau_desc->payload.uv2_3.address = address; - bau_desc->payload.uv2_3.sending_cpu = cpu; - break; - case UV_BAU_V4: - bau_desc->payload.uv4.address = address; - bau_desc->payload.uv4.sending_cpu = cpu; - bau_desc->payload.uv4.qualifier = BAU_DESC_QUALIFIER; - break; - } - - /* - * uv_flush_send_and_wait returns 0 if all cpu's were messaged, - * or 1 if it gave up and the original cpumask should be returned. - */ - if (!uv_flush_send_and_wait(flush_mask, bcp, bau_desc)) - return NULL; - else - return cpumask; -} - -/* - * Search the message queue for any 'other' unprocessed message with the - * same software acknowledge resource bit vector as the 'msg' message. - */ -static struct bau_pq_entry *find_another_by_swack(struct bau_pq_entry *msg, - struct bau_control *bcp) -{ - struct bau_pq_entry *msg_next = msg + 1; - unsigned char swack_vec = msg->swack_vec; - - if (msg_next > bcp->queue_last) - msg_next = bcp->queue_first; - while (msg_next != msg) { - if ((msg_next->canceled == 0) && (msg_next->replied_to == 0) && - (msg_next->swack_vec == swack_vec)) - return msg_next; - msg_next++; - if (msg_next > bcp->queue_last) - msg_next = bcp->queue_first; - } - return NULL; -} - -/* - * UV2 needs to work around a bug in which an arriving message has not - * set a bit in the UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE register. - * Such a message must be ignored. - */ -static void process_uv2_message(struct msg_desc *mdp, struct bau_control *bcp) -{ - unsigned long mmr_image; - unsigned char swack_vec; - struct bau_pq_entry *msg = mdp->msg; - struct bau_pq_entry *other_msg; - - mmr_image = ops.read_l_sw_ack(); - swack_vec = msg->swack_vec; - - if ((swack_vec & mmr_image) == 0) { - /* - * This message was assigned a swack resource, but no - * reserved acknowlegment is pending. - * The bug has prevented this message from setting the MMR. - */ - /* - * Some message has set the MMR 'pending' bit; it might have - * been another message. Look for that message. - */ - other_msg = find_another_by_swack(msg, bcp); - if (other_msg) { - /* - * There is another. Process this one but do not - * ack it. - */ - bau_process_message(mdp, bcp, 0); - /* - * Let the natural processing of that other message - * acknowledge it. Don't get the processing of sw_ack's - * out of order. - */ - return; - } - } - - /* - * Either the MMR shows this one pending a reply or there is no - * other message using this sw_ack, so it is safe to acknowledge it. - */ - bau_process_message(mdp, bcp, 1); - - return; -} - -/* - * The BAU message interrupt comes here. (registered by set_intr_gate) - * See entry_64.S - * - * We received a broadcast assist message. - * - * Interrupts are disabled; this interrupt could represent - * the receipt of several messages. - * - * All cores/threads on this hub get this interrupt. - * The last one to see it does the software ack. - * (the resource will not be freed until noninterruptable cpus see this - * interrupt; hardware may timeout the s/w ack and reply ERROR) - */ -DEFINE_IDTENTRY_SYSVEC(sysvec_uv_bau_message) -{ - int count = 0; - cycles_t time_start; - struct bau_pq_entry *msg; - struct bau_control *bcp; - struct ptc_stats *stat; - struct msg_desc msgdesc; - - ack_APIC_irq(); - kvm_set_cpu_l1tf_flush_l1d(); - time_start = get_cycles(); - - bcp = &per_cpu(bau_control, smp_processor_id()); - stat = bcp->statp; - - msgdesc.queue_first = bcp->queue_first; - msgdesc.queue_last = bcp->queue_last; - - msg = bcp->bau_msg_head; - while (msg->swack_vec) { - count++; - - msgdesc.msg_slot = msg - msgdesc.queue_first; - msgdesc.msg = msg; - if (bcp->uvhub_version == UV_BAU_V2) - process_uv2_message(&msgdesc, bcp); - else - /* no error workaround for uv3 */ - bau_process_message(&msgdesc, bcp, 1); - - msg++; - if (msg > msgdesc.queue_last) - msg = msgdesc.queue_first; - bcp->bau_msg_head = msg; - } - stat->d_time += (get_cycles() - time_start); - if (!count) - stat->d_nomsg++; - else if (count > 1) - stat->d_multmsg++; -} - -/* - * Each target uvhub (i.e. a uvhub that has cpu's) needs to have - * shootdown message timeouts enabled. The timeout does not cause - * an interrupt, but causes an error message to be returned to - * the sender. - */ -static void __init enable_timeouts(void) -{ - int uvhub; - int nuvhubs; - int pnode; - unsigned long mmr_image; - - nuvhubs = uv_num_possible_blades(); - - for (uvhub = 0; uvhub < nuvhubs; uvhub++) { - if (!uv_blade_nr_possible_cpus(uvhub)) - continue; - - pnode = uv_blade_to_pnode(uvhub); - mmr_image = read_mmr_misc_control(pnode); - /* - * Set the timeout period and then lock it in, in three - * steps; captures and locks in the period. - * - * To program the period, the SOFT_ACK_MODE must be off. - */ - mmr_image &= ~(1L << SOFTACK_MSHIFT); - write_mmr_misc_control(pnode, mmr_image); - /* - * Set the 4-bit period. - */ - mmr_image &= ~((unsigned long)0xf << SOFTACK_PSHIFT); - mmr_image |= (SOFTACK_TIMEOUT_PERIOD << SOFTACK_PSHIFT); - write_mmr_misc_control(pnode, mmr_image); - - mmr_image |= (1L << SOFTACK_MSHIFT); - if (is_uv2_hub()) { - /* do not touch the legacy mode bit */ - /* hw bug workaround; do not use extended status */ - mmr_image &= ~(1L << UV2_EXT_SHFT); - } else if (is_uv3_hub()) { - mmr_image &= ~(1L << PREFETCH_HINT_SHFT); - mmr_image |= (1L << SB_STATUS_SHFT); - } - write_mmr_misc_control(pnode, mmr_image); - } -} - -static void *ptc_seq_start(struct seq_file *file, loff_t *offset) -{ - if (*offset < num_possible_cpus()) - return offset; - return NULL; -} - -static void *ptc_seq_next(struct seq_file *file, void *data, loff_t *offset) -{ - (*offset)++; - if (*offset < num_possible_cpus()) - return offset; - return NULL; -} - -static void ptc_seq_stop(struct seq_file *file, void *data) -{ -} - -/* - * Display the statistics thru /proc/sgi_uv/ptc_statistics - * 'data' points to the cpu number - * Note: see the descriptions in stat_description[]. - */ -static int ptc_seq_show(struct seq_file *file, void *data) -{ - struct ptc_stats *stat; - struct bau_control *bcp; - int cpu; - - cpu = *(loff_t *)data; - if (!cpu) { - seq_puts(file, - "# cpu bauoff sent stime self locals remotes ncpus localhub "); - seq_puts(file, "remotehub numuvhubs numuvhubs16 numuvhubs8 "); - seq_puts(file, - "numuvhubs4 numuvhubs2 numuvhubs1 dto snacks retries "); - seq_puts(file, - "rok resetp resett giveup sto bz throt disable "); - seq_puts(file, - "enable wars warshw warwaits enters ipidis plugged "); - seq_puts(file, - "ipiover glim cong swack recv rtime all one mult "); - seq_puts(file, "none retry canc nocan reset rcan\n"); - } - if (cpu < num_possible_cpus() && cpu_online(cpu)) { - bcp = &per_cpu(bau_control, cpu); - if (bcp->nobau) { - seq_printf(file, "cpu %d bau disabled\n", cpu); - return 0; - } - stat = bcp->statp; - /* source side statistics */ - seq_printf(file, - "cpu %d %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ", - cpu, bcp->nobau, stat->s_requestor, - cycles_2_us(stat->s_time), - stat->s_ntargself, stat->s_ntarglocals, - stat->s_ntargremotes, stat->s_ntargcpu, - stat->s_ntarglocaluvhub, stat->s_ntargremoteuvhub, - stat->s_ntarguvhub, stat->s_ntarguvhub16); - seq_printf(file, "%ld %ld %ld %ld %ld %ld ", - stat->s_ntarguvhub8, stat->s_ntarguvhub4, - stat->s_ntarguvhub2, stat->s_ntarguvhub1, - stat->s_dtimeout, stat->s_strongnacks); - seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld ", - stat->s_retry_messages, stat->s_retriesok, - stat->s_resets_plug, stat->s_resets_timeout, - stat->s_giveup, stat->s_stimeout, - stat->s_busy, stat->s_throttles); - seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ", - stat->s_bau_disabled, stat->s_bau_reenabled, - stat->s_uv2_wars, stat->s_uv2_wars_hw, - stat->s_uv2_war_waits, stat->s_enters, - stat->s_ipifordisabled, stat->s_plugged, - stat->s_overipilimit, stat->s_giveuplimit, - stat->s_congested); - - /* destination side statistics */ - seq_printf(file, - "%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", - ops.read_g_sw_ack(uv_cpu_to_pnode(cpu)), - stat->d_requestee, cycles_2_us(stat->d_time), - stat->d_alltlb, stat->d_onetlb, stat->d_multmsg, - stat->d_nomsg, stat->d_retries, stat->d_canceled, - stat->d_nocanceled, stat->d_resets, - stat->d_rcanceled); - } - return 0; -} - -/* - * Display the tunables thru debugfs - */ -static ssize_t tunables_read(struct file *file, char __user *userbuf, - size_t count, loff_t *ppos) -{ - char *buf; - int ret; - - buf = kasprintf(GFP_KERNEL, "%s %s %s\n%d %d %d %d %d %d %d %d %d %d\n", - "max_concur plugged_delay plugsb4reset timeoutsb4reset", - "ipi_reset_limit complete_threshold congested_response_us", - "congested_reps disabled_period giveup_limit", - max_concurr, plugged_delay, plugsb4reset, - timeoutsb4reset, ipi_reset_limit, complete_threshold, - congested_respns_us, congested_reps, disabled_period, - giveup_limit); - - if (!buf) - return -ENOMEM; - - ret = simple_read_from_buffer(userbuf, count, ppos, buf, strlen(buf)); - kfree(buf); - return ret; -} - -/* - * handle a write to /proc/sgi_uv/ptc_statistics - * -1: reset the statistics - * 0: display meaning of the statistics - */ -static ssize_t ptc_proc_write(struct file *file, const char __user *user, - size_t count, loff_t *data) -{ - int cpu; - int i; - int elements; - long input_arg; - char optstr[64]; - struct ptc_stats *stat; - - if (count == 0 || count > sizeof(optstr)) - return -EINVAL; - if (copy_from_user(optstr, user, count)) - return -EFAULT; - optstr[count - 1] = '\0'; - - if (!strcmp(optstr, "on")) { - set_bau_on(); - return count; - } else if (!strcmp(optstr, "off")) { - set_bau_off(); - return count; - } - - if (kstrtol(optstr, 10, &input_arg) < 0) { - pr_debug("%s is invalid\n", optstr); - return -EINVAL; - } - - if (input_arg == 0) { - elements = ARRAY_SIZE(stat_description); - pr_debug("# cpu: cpu number\n"); - pr_debug("Sender statistics:\n"); - for (i = 0; i < elements; i++) - pr_debug("%s\n", stat_description[i]); - } else if (input_arg == -1) { - for_each_present_cpu(cpu) { - stat = &per_cpu(ptcstats, cpu); - memset(stat, 0, sizeof(struct ptc_stats)); - } - } - - return count; -} - -static int local_atoi(const char *name) -{ - int val = 0; - - for (;; name++) { - switch (*name) { - case '0' ... '9': - val = 10*val+(*name-'0'); - break; - default: - return val; - } - } -} - -/* - * Parse the values written to /sys/kernel/debug/sgi_uv/bau_tunables. - * Zero values reset them to defaults. - */ -static int parse_tunables_write(struct bau_control *bcp, char *instr, - int count) -{ - char *p; - char *q; - int cnt = 0; - int val; - int e = ARRAY_SIZE(tunables); - - p = instr + strspn(instr, WHITESPACE); - q = p; - for (; *p; p = q + strspn(q, WHITESPACE)) { - q = p + strcspn(p, WHITESPACE); - cnt++; - if (q == p) - break; - } - if (cnt != e) { - pr_info("bau tunable error: should be %d values\n", e); - return -EINVAL; - } - - p = instr + strspn(instr, WHITESPACE); - q = p; - for (cnt = 0; *p; p = q + strspn(q, WHITESPACE), cnt++) { - q = p + strcspn(p, WHITESPACE); - val = local_atoi(p); - switch (cnt) { - case 0: - if (val == 0) { - max_concurr = MAX_BAU_CONCURRENT; - max_concurr_const = MAX_BAU_CONCURRENT; - continue; - } - if (val < 1 || val > bcp->cpus_in_uvhub) { - pr_debug( - "Error: BAU max concurrent %d is invalid\n", - val); - return -EINVAL; - } - max_concurr = val; - max_concurr_const = val; - continue; - default: - if (val == 0) - *tunables[cnt].tunp = tunables[cnt].deflt; - else - *tunables[cnt].tunp = val; - continue; - } - } - return 0; -} - -/* - * Handle a write to debugfs. (/sys/kernel/debug/sgi_uv/bau_tunables) - */ -static ssize_t tunables_write(struct file *file, const char __user *user, - size_t count, loff_t *data) -{ - int cpu; - int ret; - char instr[100]; - struct bau_control *bcp; - - if (count == 0 || count > sizeof(instr)-1) - return -EINVAL; - if (copy_from_user(instr, user, count)) - return -EFAULT; - - instr[count] = '\0'; - - cpu = get_cpu(); - bcp = &per_cpu(bau_control, cpu); - ret = parse_tunables_write(bcp, instr, count); - put_cpu(); - if (ret) - return ret; - - for_each_present_cpu(cpu) { - bcp = &per_cpu(bau_control, cpu); - bcp->max_concurr = max_concurr; - bcp->max_concurr_const = max_concurr; - bcp->plugged_delay = plugged_delay; - bcp->plugsb4reset = plugsb4reset; - bcp->timeoutsb4reset = timeoutsb4reset; - bcp->ipi_reset_limit = ipi_reset_limit; - bcp->complete_threshold = complete_threshold; - bcp->cong_response_us = congested_respns_us; - bcp->cong_reps = congested_reps; - bcp->disabled_period = sec_2_cycles(disabled_period); - bcp->giveup_limit = giveup_limit; - } - return count; -} - -static const struct seq_operations uv_ptc_seq_ops = { - .start = ptc_seq_start, - .next = ptc_seq_next, - .stop = ptc_seq_stop, - .show = ptc_seq_show -}; - -static int ptc_proc_open(struct inode *inode, struct file *file) -{ - return seq_open(file, &uv_ptc_seq_ops); -} - -static int tunables_open(struct inode *inode, struct file *file) -{ - return 0; -} - -static const struct proc_ops uv_ptc_proc_ops = { - .proc_open = ptc_proc_open, - .proc_read = seq_read, - .proc_write = ptc_proc_write, - .proc_lseek = seq_lseek, - .proc_release = seq_release, -}; - -static const struct file_operations tunables_fops = { - .open = tunables_open, - .read = tunables_read, - .write = tunables_write, - .llseek = default_llseek, -}; - -static int __init uv_ptc_init(void) -{ - struct proc_dir_entry *proc_uv_ptc; - - if (!is_uv_system()) - return 0; - - proc_uv_ptc = proc_create(UV_PTC_BASENAME, 0444, NULL, - &uv_ptc_proc_ops); - if (!proc_uv_ptc) { - pr_err("unable to create %s proc entry\n", - UV_PTC_BASENAME); - return -EINVAL; - } - - tunables_dir = debugfs_create_dir(UV_BAU_TUNABLES_DIR, NULL); - debugfs_create_file(UV_BAU_TUNABLES_FILE, 0600, tunables_dir, NULL, - &tunables_fops); - return 0; -} - -/* - * Initialize the sending side's sending buffers. - */ -static void activation_descriptor_init(int node, int pnode, int base_pnode) -{ - int i; - int cpu; - unsigned long gpa; - unsigned long m; - unsigned long n; - size_t dsize; - struct bau_desc *bau_desc; - struct bau_desc *bd2; - struct uv2_3_bau_msg_header *uv2_3_hdr; - struct bau_control *bcp; - - /* - * each bau_desc is 64 bytes; there are 8 (ITEMS_PER_DESC) - * per cpu; and one per cpu on the uvhub (ADP_SZ) - */ - dsize = sizeof(struct bau_desc) * ADP_SZ * ITEMS_PER_DESC; - bau_desc = kmalloc_node(dsize, GFP_KERNEL, node); - BUG_ON(!bau_desc); - - gpa = uv_gpa(bau_desc); - n = uv_gpa_to_gnode(gpa); - m = ops.bau_gpa_to_offset(gpa); - - /* the 14-bit pnode */ - write_mmr_descriptor_base(pnode, - (n << UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT | m)); - /* - * Initializing all 8 (ITEMS_PER_DESC) descriptors for each - * cpu even though we only use the first one; one descriptor can - * describe a broadcast to 256 uv hubs. - */ - for (i = 0, bd2 = bau_desc; i < (ADP_SZ * ITEMS_PER_DESC); i++, bd2++) { - memset(bd2, 0, sizeof(struct bau_desc)); - /* - * BIOS uses legacy mode, but uv2 and uv3 hardware always - * uses native mode for selective broadcasts. - */ - uv2_3_hdr = &bd2->header.uv2_3_hdr; - uv2_3_hdr->swack_flag = 1; - uv2_3_hdr->base_dest_nasid = UV_PNODE_TO_NASID(base_pnode); - uv2_3_hdr->dest_subnodeid = UV_LB_SUBNODEID; - uv2_3_hdr->command = UV_NET_ENDPOINT_INTD; - } - for_each_present_cpu(cpu) { - if (pnode != uv_blade_to_pnode(uv_cpu_to_blade_id(cpu))) - continue; - bcp = &per_cpu(bau_control, cpu); - bcp->descriptor_base = bau_desc; - } -} - -/* - * initialize the destination side's receiving buffers - * entered for each uvhub in the partition - * - node is first node (kernel memory notion) on the uvhub - * - pnode is the uvhub's physical identifier - */ -static void pq_init(int node, int pnode) -{ - int cpu; - size_t plsize; - char *cp; - void *vp; - unsigned long gnode, first, last, tail; - struct bau_pq_entry *pqp; - struct bau_control *bcp; - - plsize = (DEST_Q_SIZE + 1) * sizeof(struct bau_pq_entry); - vp = kmalloc_node(plsize, GFP_KERNEL, node); - BUG_ON(!vp); - - pqp = (struct bau_pq_entry *)vp; - cp = (char *)pqp + 31; - pqp = (struct bau_pq_entry *)(((unsigned long)cp >> 5) << 5); - - for_each_present_cpu(cpu) { - if (pnode != uv_cpu_to_pnode(cpu)) - continue; - /* for every cpu on this pnode: */ - bcp = &per_cpu(bau_control, cpu); - bcp->queue_first = pqp; - bcp->bau_msg_head = pqp; - bcp->queue_last = pqp + (DEST_Q_SIZE - 1); - } - - first = ops.bau_gpa_to_offset(uv_gpa(pqp)); - last = ops.bau_gpa_to_offset(uv_gpa(pqp + (DEST_Q_SIZE - 1))); - - /* - * Pre UV4, the gnode is required to locate the payload queue - * and the payload queue tail must be maintained by the kernel. - */ - bcp = &per_cpu(bau_control, smp_processor_id()); - if (bcp->uvhub_version <= UV_BAU_V3) { - tail = first; - gnode = uv_gpa_to_gnode(uv_gpa(pqp)); - first = (gnode << UV_PAYLOADQ_GNODE_SHIFT) | tail; - write_mmr_payload_tail(pnode, tail); - } - - ops.write_payload_first(pnode, first); - ops.write_payload_last(pnode, last); - - /* in effect, all msg_type's are set to MSG_NOOP */ - memset(pqp, 0, sizeof(struct bau_pq_entry) * DEST_Q_SIZE); -} - -/* - * Initialization of each UV hub's structures - */ -static void __init init_uvhub(int uvhub, int vector, int base_pnode) -{ - int node; - int pnode; - unsigned long apicid; - - node = uvhub_to_first_node(uvhub); - pnode = uv_blade_to_pnode(uvhub); - - activation_descriptor_init(node, pnode, base_pnode); - - pq_init(node, pnode); - /* - * The below initialization can't be in firmware because the - * messaging IRQ will be determined by the OS. - */ - apicid = uvhub_to_first_apicid(uvhub); - write_mmr_data_config(pnode, ((apicid << 32) | vector)); -} - -/* - * We will set BAU_MISC_CONTROL with a timeout period. - * But the BIOS has set UVH_AGING_PRESCALE_SEL and UVH_TRANSACTION_TIMEOUT. - * So the destination timeout period has to be calculated from them. - */ -static int calculate_destination_timeout(void) -{ - unsigned long mmr_image; - int mult1; - int base; - int ret; - - /* same destination timeout for uv2 and uv3 */ - /* 4 bits 0/1 for 10/80us base, 3 bits of multiplier */ - mmr_image = uv_read_local_mmr(UVH_LB_BAU_MISC_CONTROL); - mmr_image = (mmr_image & UV_SA_MASK) >> UV_SA_SHFT; - if (mmr_image & (1L << UV2_ACK_UNITS_SHFT)) - base = 80; - else - base = 10; - mult1 = mmr_image & UV2_ACK_MASK; - ret = mult1 * base; - - return ret; -} - -static void __init init_per_cpu_tunables(void) -{ - int cpu; - struct bau_control *bcp; - - for_each_present_cpu(cpu) { - bcp = &per_cpu(bau_control, cpu); - bcp->baudisabled = 0; - if (nobau) - bcp->nobau = true; - bcp->statp = &per_cpu(ptcstats, cpu); - /* time interval to catch a hardware stay-busy bug */ - bcp->timeout_interval = usec_2_cycles(2*timeout_us); - bcp->max_concurr = max_concurr; - bcp->max_concurr_const = max_concurr; - bcp->plugged_delay = plugged_delay; - bcp->plugsb4reset = plugsb4reset; - bcp->timeoutsb4reset = timeoutsb4reset; - bcp->ipi_reset_limit = ipi_reset_limit; - bcp->complete_threshold = complete_threshold; - bcp->cong_response_us = congested_respns_us; - bcp->cong_reps = congested_reps; - bcp->disabled_period = sec_2_cycles(disabled_period); - bcp->giveup_limit = giveup_limit; - spin_lock_init(&bcp->queue_lock); - spin_lock_init(&bcp->uvhub_lock); - spin_lock_init(&bcp->disable_lock); - } -} - -/* - * Scan all cpus to collect blade and socket summaries. - */ -static int __init get_cpu_topology(int base_pnode, - struct uvhub_desc *uvhub_descs, - unsigned char *uvhub_mask) -{ - int cpu; - int pnode; - int uvhub; - int socket; - struct bau_control *bcp; - struct uvhub_desc *bdp; - struct socket_desc *sdp; - - for_each_present_cpu(cpu) { - bcp = &per_cpu(bau_control, cpu); - - memset(bcp, 0, sizeof(struct bau_control)); - - pnode = uv_cpu_hub_info(cpu)->pnode; - if ((pnode - base_pnode) >= UV_DISTRIBUTION_SIZE) { - pr_emerg( - "cpu %d pnode %d-%d beyond %d; BAU disabled\n", - cpu, pnode, base_pnode, UV_DISTRIBUTION_SIZE); - return 1; - } - - bcp->osnode = cpu_to_node(cpu); - bcp->partition_base_pnode = base_pnode; - - uvhub = uv_cpu_hub_info(cpu)->numa_blade_id; - *(uvhub_mask + (uvhub/8)) |= (1 << (uvhub%8)); - bdp = &uvhub_descs[uvhub]; - - bdp->num_cpus++; - bdp->uvhub = uvhub; - bdp->pnode = pnode; - - /* kludge: 'assuming' one node per socket, and assuming that - disabling a socket just leaves a gap in node numbers */ - socket = bcp->osnode & 1; - bdp->socket_mask |= (1 << socket); - sdp = &bdp->socket[socket]; - sdp->cpu_number[sdp->num_cpus] = cpu; - sdp->num_cpus++; - if (sdp->num_cpus > MAX_CPUS_PER_SOCKET) { - pr_emerg("%d cpus per socket invalid\n", - sdp->num_cpus); - return 1; - } - } - return 0; -} - -/* - * Each socket is to get a local array of pnodes/hubs. - */ -static void make_per_cpu_thp(struct bau_control *smaster) -{ - int cpu; - size_t hpsz = sizeof(struct hub_and_pnode) * num_possible_cpus(); - - smaster->thp = kzalloc_node(hpsz, GFP_KERNEL, smaster->osnode); - for_each_present_cpu(cpu) { - smaster->thp[cpu].pnode = uv_cpu_hub_info(cpu)->pnode; - smaster->thp[cpu].uvhub = uv_cpu_hub_info(cpu)->numa_blade_id; - } -} - -/* - * Each uvhub is to get a local cpumask. - */ -static void make_per_hub_cpumask(struct bau_control *hmaster) -{ - int sz = sizeof(cpumask_t); - - hmaster->cpumask = kzalloc_node(sz, GFP_KERNEL, hmaster->osnode); -} - -/* - * Initialize all the per_cpu information for the cpu's on a given socket, - * given what has been gathered into the socket_desc struct. - * And reports the chosen hub and socket masters back to the caller. - */ -static int scan_sock(struct socket_desc *sdp, struct uvhub_desc *bdp, - struct bau_control **smasterp, - struct bau_control **hmasterp) -{ - int i, cpu, uvhub_cpu; - struct bau_control *bcp; - - for (i = 0; i < sdp->num_cpus; i++) { - cpu = sdp->cpu_number[i]; - bcp = &per_cpu(bau_control, cpu); - bcp->cpu = cpu; - if (i == 0) { - *smasterp = bcp; - if (!(*hmasterp)) - *hmasterp = bcp; - } - bcp->cpus_in_uvhub = bdp->num_cpus; - bcp->cpus_in_socket = sdp->num_cpus; - bcp->socket_master = *smasterp; - bcp->uvhub = bdp->uvhub; - if (is_uv2_hub()) - bcp->uvhub_version = UV_BAU_V2; - else if (is_uv3_hub()) - bcp->uvhub_version = UV_BAU_V3; - else if (is_uv4_hub()) - bcp->uvhub_version = UV_BAU_V4; - else { - pr_emerg("uvhub version not 1, 2, 3, or 4\n"); - return 1; - } - bcp->uvhub_master = *hmasterp; - uvhub_cpu = uv_cpu_blade_processor_id(cpu); - bcp->uvhub_cpu = uvhub_cpu; - - /* - * The ERROR and BUSY status registers are located pairwise over - * the STATUS_0 and STATUS_1 mmrs; each an array[32] of 2 bits. - */ - if (uvhub_cpu < UV_CPUS_PER_AS) { - bcp->status_mmr = UVH_LB_BAU_SB_ACTIVATION_STATUS_0; - bcp->status_index = uvhub_cpu * UV_ACT_STATUS_SIZE; - } else { - bcp->status_mmr = UVH_LB_BAU_SB_ACTIVATION_STATUS_1; - bcp->status_index = (uvhub_cpu - UV_CPUS_PER_AS) - * UV_ACT_STATUS_SIZE; - } - - if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) { - pr_emerg("%d cpus per uvhub invalid\n", - bcp->uvhub_cpu); - return 1; - } - } - return 0; -} - -/* - * Summarize the blade and socket topology into the per_cpu structures. - */ -static int __init summarize_uvhub_sockets(int nuvhubs, - struct uvhub_desc *uvhub_descs, - unsigned char *uvhub_mask) -{ - int socket; - int uvhub; - unsigned short socket_mask; - - for (uvhub = 0; uvhub < nuvhubs; uvhub++) { - struct uvhub_desc *bdp; - struct bau_control *smaster = NULL; - struct bau_control *hmaster = NULL; - - if (!(*(uvhub_mask + (uvhub/8)) & (1 << (uvhub%8)))) - continue; - - bdp = &uvhub_descs[uvhub]; - socket_mask = bdp->socket_mask; - socket = 0; - while (socket_mask) { - struct socket_desc *sdp; - if ((socket_mask & 1)) { - sdp = &bdp->socket[socket]; - if (scan_sock(sdp, bdp, &smaster, &hmaster)) - return 1; - make_per_cpu_thp(smaster); - } - socket++; - socket_mask = (socket_mask >> 1); - } - make_per_hub_cpumask(hmaster); - } - return 0; -} - -/* - * initialize the bau_control structure for each cpu - */ -static int __init init_per_cpu(int nuvhubs, int base_part_pnode) -{ - struct uvhub_desc *uvhub_descs; - unsigned char *uvhub_mask = NULL; - - if (is_uv3_hub() || is_uv2_hub()) - timeout_us = calculate_destination_timeout(); - - uvhub_descs = kcalloc(nuvhubs, sizeof(struct uvhub_desc), GFP_KERNEL); - if (!uvhub_descs) - goto fail; - - uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL); - if (!uvhub_mask) - goto fail; - - if (get_cpu_topology(base_part_pnode, uvhub_descs, uvhub_mask)) - goto fail; - - if (summarize_uvhub_sockets(nuvhubs, uvhub_descs, uvhub_mask)) - goto fail; - - kfree(uvhub_descs); - kfree(uvhub_mask); - init_per_cpu_tunables(); - return 0; - -fail: - kfree(uvhub_descs); - kfree(uvhub_mask); - return 1; -} - -static const struct bau_operations uv2_3_bau_ops __initconst = { - .bau_gpa_to_offset = uv_gpa_to_offset, - .read_l_sw_ack = read_mmr_sw_ack, - .read_g_sw_ack = read_gmmr_sw_ack, - .write_l_sw_ack = write_mmr_sw_ack, - .write_g_sw_ack = write_gmmr_sw_ack, - .write_payload_first = write_mmr_payload_first, - .write_payload_last = write_mmr_payload_last, - .wait_completion = uv2_3_wait_completion, -}; - -static const struct bau_operations uv4_bau_ops __initconst = { - .bau_gpa_to_offset = uv_gpa_to_soc_phys_ram, - .read_l_sw_ack = read_mmr_proc_sw_ack, - .read_g_sw_ack = read_gmmr_proc_sw_ack, - .write_l_sw_ack = write_mmr_proc_sw_ack, - .write_g_sw_ack = write_gmmr_proc_sw_ack, - .write_payload_first = write_mmr_proc_payload_first, - .write_payload_last = write_mmr_proc_payload_last, - .wait_completion = uv4_wait_completion, -}; - -/* - * Initialization of BAU-related structures - */ -static int __init uv_bau_init(void) -{ - int uvhub; - int pnode; - int nuvhubs; - int cur_cpu; - int cpus; - int vector; - cpumask_var_t *mask; - - if (!is_uv_system()) - return 0; - - if (is_uv4_hub()) - ops = uv4_bau_ops; - else if (is_uv3_hub()) - ops = uv2_3_bau_ops; - else if (is_uv2_hub()) - ops = uv2_3_bau_ops; - - nuvhubs = uv_num_possible_blades(); - if (nuvhubs < 2) { - pr_crit("UV: BAU disabled - insufficient hub count\n"); - goto err_bau_disable; - } - - for_each_possible_cpu(cur_cpu) { - mask = &per_cpu(uv_flush_tlb_mask, cur_cpu); - zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cur_cpu)); - } - - uv_base_pnode = 0x7fffffff; - for (uvhub = 0; uvhub < nuvhubs; uvhub++) { - cpus = uv_blade_nr_possible_cpus(uvhub); - if (cpus && (uv_blade_to_pnode(uvhub) < uv_base_pnode)) - uv_base_pnode = uv_blade_to_pnode(uvhub); - } - - /* software timeouts are not supported on UV4 */ - if (is_uv3_hub() || is_uv2_hub()) - enable_timeouts(); - - if (init_per_cpu(nuvhubs, uv_base_pnode)) { - pr_crit("UV: BAU disabled - per CPU init failed\n"); - goto err_bau_disable; - } - - vector = UV_BAU_MESSAGE; - for_each_possible_blade(uvhub) { - if (uv_blade_nr_possible_cpus(uvhub)) - init_uvhub(uvhub, vector, uv_base_pnode); - } - - for_each_possible_blade(uvhub) { - if (uv_blade_nr_possible_cpus(uvhub)) { - unsigned long val; - unsigned long mmr; - pnode = uv_blade_to_pnode(uvhub); - /* INIT the bau */ - val = 1L << 63; - write_gmmr_activation(pnode, val); - mmr = 1; /* should be 1 to broadcast to both sockets */ - write_mmr_data_broadcast(pnode, mmr); - } - } - - return 0; - -err_bau_disable: - - for_each_possible_cpu(cur_cpu) - free_cpumask_var(per_cpu(uv_flush_tlb_mask, cur_cpu)); - - set_bau_off(); - nobau_perm = 1; - - return -EINVAL; -} -core_initcall(uv_bau_init); -fs_initcall(uv_ptc_init); diff --git a/arch/x86/platform/uv/uv_irq.c b/arch/x86/platform/uv/uv_irq.c index abb6075397f0..18ca2261cc9a 100644 --- a/arch/x86/platform/uv/uv_irq.c +++ b/arch/x86/platform/uv/uv_irq.c @@ -90,15 +90,15 @@ static int uv_domain_alloc(struct irq_domain *domain, unsigned int virq, ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg); if (ret >= 0) { - if (info->uv_limit == UV_AFFINITY_CPU) + if (info->uv.limit == UV_AFFINITY_CPU) irq_set_status_flags(virq, IRQ_NO_BALANCING); else irq_set_status_flags(virq, IRQ_MOVE_PCNTXT); - chip_data->pnode = uv_blade_to_pnode(info->uv_blade); - chip_data->offset = info->uv_offset; + chip_data->pnode = uv_blade_to_pnode(info->uv.blade); + chip_data->offset = info->uv.offset; irq_domain_set_info(domain, virq, virq, &uv_irq_chip, chip_data, - handle_percpu_irq, NULL, info->uv_name); + handle_percpu_irq, NULL, info->uv.name); } else { kfree(chip_data); } @@ -193,10 +193,10 @@ int uv_setup_irq(char *irq_name, int cpu, int mmr_blade, init_irq_alloc_info(&info, cpumask_of(cpu)); info.type = X86_IRQ_ALLOC_TYPE_UV; - info.uv_limit = limit; - info.uv_blade = mmr_blade; - info.uv_offset = mmr_offset; - info.uv_name = irq_name; + info.uv.limit = limit; + info.uv.blade = mmr_blade; + info.uv.offset = mmr_offset; + info.uv.name = irq_name; return irq_domain_alloc_irqs(domain, 1, uv_blade_to_memory_nid(mmr_blade), &info); diff --git a/arch/x86/platform/uv/uv_nmi.c b/arch/x86/platform/uv/uv_nmi.c index 9d08ff5a755e..eafc530c8767 100644 --- a/arch/x86/platform/uv/uv_nmi.c +++ b/arch/x86/platform/uv/uv_nmi.c @@ -2,8 +2,9 @@ /* * SGI NMI support routines * - * Copyright (c) 2009-2013 Silicon Graphics, Inc. All Rights Reserved. - * Copyright (c) Mike Travis + * (C) Copyright 2020 Hewlett Packard Enterprise Development LP + * Copyright (C) 2007-2017 Silicon Graphics, Inc. All rights reserved. + * Copyright (c) Mike Travis */ #include <linux/cpu.h> @@ -54,6 +55,19 @@ static struct uv_hub_nmi_s **uv_hub_nmi_list; DEFINE_PER_CPU(struct uv_cpu_nmi_s, uv_cpu_nmi); +/* Newer SMM NMI handler, not present in all systems */ +static unsigned long uvh_nmi_mmrx; /* UVH_EVENT_OCCURRED0/1 */ +static unsigned long uvh_nmi_mmrx_clear; /* UVH_EVENT_OCCURRED0/1_ALIAS */ +static int uvh_nmi_mmrx_shift; /* UVH_EVENT_OCCURRED0/1_EXTIO_INT0_SHFT */ +static char *uvh_nmi_mmrx_type; /* "EXTIO_INT0" */ + +/* Non-zero indicates newer SMM NMI handler present */ +static unsigned long uvh_nmi_mmrx_supported; /* UVH_EXTIO_INT0_BROADCAST */ + +/* Indicates to BIOS that we want to use the newer SMM NMI handler */ +static unsigned long uvh_nmi_mmrx_req; /* UVH_BIOS_KERNEL_MMR_ALIAS_2 */ +static int uvh_nmi_mmrx_req_shift; /* 62 */ + /* UV hubless values */ #define NMI_CONTROL_PORT 0x70 #define NMI_DUMMY_PORT 0x71 @@ -227,13 +241,41 @@ static inline bool uv_nmi_action_is(const char *action) /* Setup which NMI support is present in system */ static void uv_nmi_setup_mmrs(void) { - if (uv_read_local_mmr(UVH_NMI_MMRX_SUPPORTED)) { - uv_write_local_mmr(UVH_NMI_MMRX_REQ, - 1UL << UVH_NMI_MMRX_REQ_SHIFT); - nmi_mmr = UVH_NMI_MMRX; - nmi_mmr_clear = UVH_NMI_MMRX_CLEAR; - nmi_mmr_pending = 1UL << UVH_NMI_MMRX_SHIFT; - pr_info("UV: SMI NMI support: %s\n", UVH_NMI_MMRX_TYPE); + /* First determine arch specific MMRs to handshake with BIOS */ + if (UVH_EVENT_OCCURRED0_EXTIO_INT0_MASK) { + uvh_nmi_mmrx = UVH_EVENT_OCCURRED0; + uvh_nmi_mmrx_clear = UVH_EVENT_OCCURRED0_ALIAS; + uvh_nmi_mmrx_shift = UVH_EVENT_OCCURRED0_EXTIO_INT0_SHFT; + uvh_nmi_mmrx_type = "OCRD0-EXTIO_INT0"; + + uvh_nmi_mmrx_supported = UVH_EXTIO_INT0_BROADCAST; + uvh_nmi_mmrx_req = UVH_BIOS_KERNEL_MMR_ALIAS_2; + uvh_nmi_mmrx_req_shift = 62; + + } else if (UVH_EVENT_OCCURRED1_EXTIO_INT0_MASK) { + uvh_nmi_mmrx = UVH_EVENT_OCCURRED1; + uvh_nmi_mmrx_clear = UVH_EVENT_OCCURRED1_ALIAS; + uvh_nmi_mmrx_shift = UVH_EVENT_OCCURRED1_EXTIO_INT0_SHFT; + uvh_nmi_mmrx_type = "OCRD1-EXTIO_INT0"; + + uvh_nmi_mmrx_supported = UVH_EXTIO_INT0_BROADCAST; + uvh_nmi_mmrx_req = UVH_BIOS_KERNEL_MMR_ALIAS_2; + uvh_nmi_mmrx_req_shift = 62; + + } else { + pr_err("UV:%s:cannot find EVENT_OCCURRED*_EXTIO_INT0\n", + __func__); + return; + } + + /* Then find out if new NMI is supported */ + if (likely(uv_read_local_mmr(uvh_nmi_mmrx_supported))) { + uv_write_local_mmr(uvh_nmi_mmrx_req, + 1UL << uvh_nmi_mmrx_req_shift); + nmi_mmr = uvh_nmi_mmrx; + nmi_mmr_clear = uvh_nmi_mmrx_clear; + nmi_mmr_pending = 1UL << uvh_nmi_mmrx_shift; + pr_info("UV: SMI NMI support: %s\n", uvh_nmi_mmrx_type); } else { nmi_mmr = UVH_NMI_MMR; nmi_mmr_clear = UVH_NMI_MMR_CLEAR; @@ -1049,5 +1091,5 @@ void __init uv_nmi_setup_hubless(void) /* Ensure NMI enabled in Processor Interface Reg: */ uv_reassert_nmi(); uv_register_nmi_notifier(); - pr_info("UV: Hubless NMI enabled\n"); + pr_info("UV: PCH NMI enabled\n"); } diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c index f82a1337a608..54663f3e00cb 100644 --- a/arch/x86/platform/uv/uv_time.c +++ b/arch/x86/platform/uv/uv_time.c @@ -2,6 +2,7 @@ /* * SGI RTC clock/timer routines. * + * (C) Copyright 2020 Hewlett Packard Enterprise Development LP * Copyright (c) 2009-2013 Silicon Graphics, Inc. All Rights Reserved. * Copyright (c) Dimitri Sivanich */ @@ -52,7 +53,7 @@ struct uv_rtc_timer_head { struct { int lcpu; /* systemwide logical cpu number */ u64 expires; /* next timer expiration for this cpu */ - } cpu[1]; + } cpu[]; }; /* @@ -84,10 +85,8 @@ static void uv_rtc_send_IPI(int cpu) /* Check for an RTC interrupt pending */ static int uv_intr_pending(int pnode) { - if (is_uvx_hub()) - return uv_read_global_mmr64(pnode, UVXH_EVENT_OCCURRED2) & - UVXH_EVENT_OCCURRED2_RTC_1_MASK; - return 0; + return uv_read_global_mmr64(pnode, UVH_EVENT_OCCURRED2) & + UVH_EVENT_OCCURRED2_RTC_1_MASK; } /* Setup interrupt and return non-zero if early expiration occurred. */ @@ -101,8 +100,8 @@ static int uv_setup_intr(int cpu, u64 expires) UVH_RTC1_INT_CONFIG_M_MASK); uv_write_global_mmr64(pnode, UVH_INT_CMPB, -1L); - uv_write_global_mmr64(pnode, UVXH_EVENT_OCCURRED2_ALIAS, - UVXH_EVENT_OCCURRED2_RTC_1_MASK); + uv_write_global_mmr64(pnode, UVH_EVENT_OCCURRED2_ALIAS, + UVH_EVENT_OCCURRED2_RTC_1_MASK); val = (X86_PLATFORM_IPI_VECTOR << UVH_RTC1_INT_CONFIG_VECTOR_SHFT) | ((u64)apicid << UVH_RTC1_INT_CONFIG_APIC_ID_SHFT); @@ -148,9 +147,8 @@ static __init int uv_rtc_allocate_timers(void) struct uv_rtc_timer_head *head = blade_info[bid]; if (!head) { - head = kmalloc_node(sizeof(struct uv_rtc_timer_head) + - (uv_blade_nr_possible_cpus(bid) * - 2 * sizeof(u64)), + head = kmalloc_node(struct_size(head, cpu, + uv_blade_nr_possible_cpus(bid)), GFP_KERNEL, nid); if (!head) { uv_rtc_deallocate_timers(); diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c index 1ed1208931e0..22fda7d99159 100644 --- a/arch/x86/realmode/init.c +++ b/arch/x86/realmode/init.c @@ -9,6 +9,7 @@ #include <asm/realmode.h> #include <asm/tlbflush.h> #include <asm/crash.h> +#include <asm/sev-es.h> struct real_mode_header *real_mode_header; u32 *trampoline_cr4_features; @@ -38,6 +39,25 @@ void __init reserve_real_mode(void) crash_reserve_low_1M(); } +static void sme_sev_setup_real_mode(struct trampoline_header *th) +{ +#ifdef CONFIG_AMD_MEM_ENCRYPT + if (sme_active()) + th->flags |= TH_FLAGS_SME_ACTIVE; + + if (sev_es_active()) { + /* + * Skip the call to verify_cpu() in secondary_startup_64 as it + * will cause #VC exceptions when the AP can't handle them yet. + */ + th->start = (u64) secondary_startup_64_no_verify; + + if (sev_es_setup_ap_jump_table(real_mode_header)) + panic("Failed to get/update SEV-ES AP Jump Table"); + } +#endif +} + static void __init setup_real_mode(void) { u16 real_mode_seg; @@ -104,13 +124,13 @@ static void __init setup_real_mode(void) *trampoline_cr4_features = mmu_cr4_features; trampoline_header->flags = 0; - if (sme_active()) - trampoline_header->flags |= TH_FLAGS_SME_ACTIVE; trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd); trampoline_pgd[0] = trampoline_pgd_entry.pgd; trampoline_pgd[511] = init_top_pgt[511].pgd; #endif + + sme_sev_setup_real_mode(trampoline_header); } /* diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S index af04512c02d9..8c1db5bf5d78 100644 --- a/arch/x86/realmode/rm/header.S +++ b/arch/x86/realmode/rm/header.S @@ -20,6 +20,9 @@ SYM_DATA_START(real_mode_header) /* SMP trampoline */ .long pa_trampoline_start .long pa_trampoline_header +#ifdef CONFIG_AMD_MEM_ENCRYPT + .long pa_sev_es_trampoline_start +#endif #ifdef CONFIG_X86_64 .long pa_trampoline_pgd; #endif diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S index 251758ed7443..84c5d1b33d10 100644 --- a/arch/x86/realmode/rm/trampoline_64.S +++ b/arch/x86/realmode/rm/trampoline_64.S @@ -56,6 +56,7 @@ SYM_CODE_START(trampoline_start) testl %eax, %eax # Check for return code jnz no_longmode +.Lswitch_to_protected: /* * GDT tables in non default location kernel can be beyond 16MB and * lgdt will not be able to load the address as in real mode default @@ -80,6 +81,25 @@ no_longmode: jmp no_longmode SYM_CODE_END(trampoline_start) +#ifdef CONFIG_AMD_MEM_ENCRYPT +/* SEV-ES supports non-zero IP for entry points - no alignment needed */ +SYM_CODE_START(sev_es_trampoline_start) + cli # We should be safe anyway + + LJMPW_RM(1f) +1: + mov %cs, %ax # Code and data in the same place + mov %ax, %ds + mov %ax, %es + mov %ax, %ss + + # Setup stack + movl $rm_stack_end, %esp + + jmp .Lswitch_to_protected +SYM_CODE_END(sev_es_trampoline_start) +#endif /* CONFIG_AMD_MEM_ENCRYPT */ + #include "../kernel/verify_cpu.S" .section ".text32","ax" diff --git a/arch/x86/tools/gen-insn-attr-x86.awk b/arch/x86/tools/gen-insn-attr-x86.awk index a42015b305f4..af38469afd14 100644 --- a/arch/x86/tools/gen-insn-attr-x86.awk +++ b/arch/x86/tools/gen-insn-attr-x86.awk @@ -362,6 +362,9 @@ function convert_operands(count,opnd, i,j,imm,mod) END { if (awkchecked != "") exit 1 + + print "#ifndef __BOOT_COMPRESSED\n" + # print escape opcode map's array print "/* Escape opcode map array */" print "const insn_attr_t * const inat_escape_tables[INAT_ESC_MAX + 1]" \ @@ -388,6 +391,51 @@ END { for (j = 0; j < max_lprefix; j++) if (atable[i,j]) print " ["i"]["j"] = "atable[i,j]"," - print "};" + print "};\n" + + print "#else /* !__BOOT_COMPRESSED */\n" + + print "/* Escape opcode map array */" + print "static const insn_attr_t *inat_escape_tables[INAT_ESC_MAX + 1]" \ + "[INAT_LSTPFX_MAX + 1];" + print "" + + print "/* Group opcode map array */" + print "static const insn_attr_t *inat_group_tables[INAT_GRP_MAX + 1]"\ + "[INAT_LSTPFX_MAX + 1];" + print "" + + print "/* AVX opcode map array */" + print "static const insn_attr_t *inat_avx_tables[X86_VEX_M_MAX + 1]"\ + "[INAT_LSTPFX_MAX + 1];" + print "" + + print "static void inat_init_tables(void)" + print "{" + + # print escape opcode map's array + print "\t/* Print Escape opcode map array */" + for (i = 0; i < geid; i++) + for (j = 0; j < max_lprefix; j++) + if (etable[i,j]) + print "\tinat_escape_tables["i"]["j"] = "etable[i,j]";" + print "" + + # print group opcode map's array + print "\t/* Print Group opcode map array */" + for (i = 0; i < ggid; i++) + for (j = 0; j < max_lprefix; j++) + if (gtable[i,j]) + print "\tinat_group_tables["i"]["j"] = "gtable[i,j]";" + print "" + # print AVX opcode map's array + print "\t/* Print AVX opcode map array */" + for (i = 0; i < gaid; i++) + for (j = 0; j < max_lprefix; j++) + if (atable[i,j]) + print "\tinat_avx_tables["i"]["j"] = "atable[i,j]";" + + print "}" + print "#endif" } diff --git a/arch/x86/um/asm/checksum.h b/arch/x86/um/asm/checksum.h index ff6bba2c8ab6..b07824500363 100644 --- a/arch/x86/um/asm/checksum.h +++ b/arch/x86/um/asm/checksum.h @@ -20,22 +20,6 @@ */ extern __wsum csum_partial(const void *buff, int len, __wsum sum); -/* - * Note: when you get a NULL pointer exception here this means someone - * passed in an incorrect kernel address to one of these functions. - * - * If you use these functions directly please don't forget the - * access_ok(). - */ - -static __inline__ -__wsum csum_partial_copy_nocheck(const void *src, void *dst, - int len, __wsum sum) -{ - memcpy(dst, src, len); - return csum_partial(dst, len, sum); -} - /** * csum_fold - Fold and invert a 32bit checksum. * sum: 32bit unfolded sum diff --git a/arch/x86/um/asm/checksum_32.h b/arch/x86/um/asm/checksum_32.h index b9ac7c9eb72c..0b13c2947ad1 100644 --- a/arch/x86/um/asm/checksum_32.h +++ b/arch/x86/um/asm/checksum_32.h @@ -35,27 +35,4 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, return csum_fold(sum); } -/* - * Copy and checksum to user - */ -#define HAVE_CSUM_COPY_USER -static __inline__ __wsum csum_and_copy_to_user(const void *src, - void __user *dst, - int len, __wsum sum, int *err_ptr) -{ - if (access_ok(dst, len)) { - if (copy_to_user(dst, src, len)) { - *err_ptr = -EFAULT; - return (__force __wsum)-1; - } - - return csum_partial(src, len, sum); - } - - if (len) - *err_ptr = -EFAULT; - - return (__force __wsum)-1; /* invalid checksum */ -} - #endif diff --git a/arch/x86/um/ptrace_64.c b/arch/x86/um/ptrace_64.c index 09a085bde0d4..1401899dee9b 100644 --- a/arch/x86/um/ptrace_64.c +++ b/arch/x86/um/ptrace_64.c @@ -52,14 +52,6 @@ static const int reg_offsets[] = int putreg(struct task_struct *child, int regno, unsigned long value) { -#ifdef TIF_IA32 - /* - * Some code in the 64bit emulation may not be 64bit clean. - * Don't take any chances. - */ - if (test_tsk_thread_flag(child, TIF_IA32)) - value &= 0xffffffff; -#endif switch (regno) { case R8: case R9: @@ -137,10 +129,7 @@ int poke_user(struct task_struct *child, long addr, long data) unsigned long getreg(struct task_struct *child, int regno) { unsigned long mask = ~0UL; -#ifdef TIF_IA32 - if (test_tsk_thread_flag(child, TIF_IA32)) - mask = 0xffffffff; -#endif + switch (regno) { case R8: case R9: diff --git a/arch/x86/um/user-offsets.c b/arch/x86/um/user-offsets.c index c51dd8363d25..bae61554abcc 100644 --- a/arch/x86/um/user-offsets.c +++ b/arch/x86/um/user-offsets.c @@ -2,7 +2,7 @@ #include <stdio.h> #include <stddef.h> #include <signal.h> -#include <sys/poll.h> +#include <poll.h> #include <sys/mman.h> #include <sys/user.h> #define __FRAME_OFFSETS diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index 22e741e0b10c..4409306364dc 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -32,7 +32,7 @@ #include <linux/pci.h> #include <linux/gfp.h> #include <linux/edd.h> -#include <linux/frame.h> +#include <linux/objtool.h> #include <xen/xen.h> #include <xen/events.h> @@ -1014,8 +1014,6 @@ void __init xen_setup_vcpu_info_placement(void) } static const struct pv_info xen_info __initconst = { - .shared_kernel_pmd = 0, - .extra_user_64bit_cs = FLAT_USER_CS64, .name = "Xen", }; @@ -1302,7 +1300,7 @@ asmlinkage __visible void __init xen_start_kernel(void) * any NUMA information the kernel tries to get from ACPI will * be meaningless. Prevent it from trying. */ - acpi_numa = -1; + disable_srat(); #endif WARN_ON(xen_cpuhp_setup(xen_cpu_up_prepare_pv, xen_cpu_dead_pv)); @@ -1314,10 +1312,6 @@ asmlinkage __visible void __init xen_start_kernel(void) xen_start_info->nr_pages); xen_reserve_special_pages(); - /* keep using Xen gdt for now; no urgent need to change it */ - - pv_info.kernel_rpl = 0; - /* * We used to do this in xen_arch_setup, but that is too late * on AMD were early_cpu_init (run before ->arch_setup()) calls @@ -1376,6 +1370,15 @@ asmlinkage __visible void __init xen_start_kernel(void) x86_init.mpparse.get_smp_config = x86_init_uint_noop; xen_boot_params_init_edd(); + +#ifdef CONFIG_ACPI + /* + * Disable selecting "Firmware First mode" for correctable + * memory errors, as this is the duty of the hypervisor to + * decide. + */ + acpi_disable_cmcff = 1; +#endif } if (!boot_params.screen_info.orig_video_isVGA) diff --git a/arch/x86/xen/grant-table.c b/arch/x86/xen/grant-table.c index 4988e19598c8..1e681bf62561 100644 --- a/arch/x86/xen/grant-table.c +++ b/arch/x86/xen/grant-table.c @@ -25,6 +25,7 @@ static struct gnttab_vm_area { struct vm_struct *area; pte_t **ptes; + int idx; } gnttab_shared_vm_area, gnttab_status_vm_area; int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes, @@ -90,19 +91,31 @@ void arch_gnttab_unmap(void *shared, unsigned long nr_gframes) } } +static int gnttab_apply(pte_t *pte, unsigned long addr, void *data) +{ + struct gnttab_vm_area *area = data; + + area->ptes[area->idx++] = pte; + return 0; +} + static int arch_gnttab_valloc(struct gnttab_vm_area *area, unsigned nr_frames) { area->ptes = kmalloc_array(nr_frames, sizeof(*area->ptes), GFP_KERNEL); if (area->ptes == NULL) return -ENOMEM; - - area->area = alloc_vm_area(PAGE_SIZE * nr_frames, area->ptes); - if (area->area == NULL) { - kfree(area->ptes); - return -ENOMEM; - } - + area->area = get_vm_area(PAGE_SIZE * nr_frames, VM_IOREMAP); + if (!area->area) + goto out_free_ptes; + if (apply_to_page_range(&init_mm, (unsigned long)area->area->addr, + PAGE_SIZE * nr_frames, gnttab_apply, area)) + goto out_free_vm_area; return 0; +out_free_vm_area: + free_vm_area(area->area); +out_free_ptes: + kfree(area->ptes); + return -ENOMEM; } static void arch_gnttab_vfree(struct gnttab_vm_area *area) diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index 3273c985d3dd..cf2ade864c30 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c @@ -285,13 +285,6 @@ static void xen_set_pte(pte_t *ptep, pte_t pteval) __xen_set_pte(ptep, pteval); } -static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pteval) -{ - trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval); - __xen_set_pte(ptep, pteval); -} - pte_t xen_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { @@ -1149,7 +1142,7 @@ static void __init xen_pagetable_p2m_free(void) * We could be in __ka space. * We roundup to the PMD, which means that if anybody at this stage is * using the __ka address of xen_start_info or - * xen_start_info->shared_info they are in going to crash. Fortunatly + * xen_start_info->shared_info they are in going to crash. Fortunately * we have already revectored in xen_setup_kernel_pagetable. */ size = roundup(size, PMD_SIZE); @@ -2105,7 +2098,6 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = { .release_pmd = xen_release_pmd_init, .set_pte = xen_set_pte_init, - .set_pte_at = xen_set_pte_at, .set_pmd = xen_set_pmd_hyper, .ptep_modify_prot_start = __ptep_modify_prot_start, diff --git a/arch/x86/xen/pci-swiotlb-xen.c b/arch/x86/xen/pci-swiotlb-xen.c index 33293ce01d8d..19ae3e4fe4e9 100644 --- a/arch/x86/xen/pci-swiotlb-xen.c +++ b/arch/x86/xen/pci-swiotlb-xen.c @@ -2,7 +2,7 @@ /* Glue code to lib/swiotlb-xen.c */ -#include <linux/dma-mapping.h> +#include <linux/dma-map-ops.h> #include <linux/pci.h> #include <xen/swiotlb-xen.h> diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index e997e0119c02..b73d775831f1 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -217,20 +217,6 @@ config HOTPLUG_CPU Say N if you want to disable CPU hotplug. -config SECCOMP - bool - prompt "Enable seccomp to safely compute untrusted bytecode" - help - This kernel feature is useful for number crunching applications - that may need to compute untrusted bytecode during their - execution. By using pipes or other transports made available to - the process as file descriptors supporting the read/write - syscalls, it's possible to isolate those applications in - their own address space using seccomp. Once seccomp is - enabled via prctl(PR_SET_SECCOMP), it cannot be disabled - and the task is only allowed to execute a few safe syscalls - defined by each seccomp mode. - config FAST_SYSCALL_XTENSA bool "Enable fast atomic syscalls" default n @@ -537,7 +523,7 @@ config MEMMAP_CACHEATTR 2: cache bypass, 4: WB cached, f: illegal. - For ful MMU: + For full MMU: bit 0: executable, bit 1: writable, bits 2..3: diff --git a/arch/xtensa/include/asm/checksum.h b/arch/xtensa/include/asm/checksum.h index 243a5fe79d3c..44ec1d0b2a35 100644 --- a/arch/xtensa/include/asm/checksum.h +++ b/arch/xtensa/include/asm/checksum.h @@ -37,32 +37,27 @@ asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum); * better 64-bit) boundary */ -asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, - int len, __wsum sum, - int *src_err_ptr, int *dst_err_ptr); +asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, int len); +#define _HAVE_ARCH_CSUM_AND_COPY /* * Note: when you get a NULL pointer exception here this means someone * passed in an incorrect kernel address to one of these functions. */ static inline -__wsum csum_partial_copy_nocheck(const void *src, void *dst, - int len, __wsum sum) +__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len) { - return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL); + return csum_partial_copy_generic(src, dst, len); } #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER static inline __wsum csum_and_copy_from_user(const void __user *src, void *dst, - int len, __wsum sum, int *err_ptr) + int len) { - if (access_ok(src, len)) - return csum_partial_copy_generic((__force const void *)src, dst, - len, sum, err_ptr, NULL); - if (len) - *err_ptr = -EFAULT; - return sum; + if (!access_ok(src, len)) + return 0; + return csum_partial_copy_generic((__force const void *)src, dst, len); } /* @@ -243,15 +238,10 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, */ #define HAVE_CSUM_COPY_USER static __inline__ __wsum csum_and_copy_to_user(const void *src, - void __user *dst, int len, - __wsum sum, int *err_ptr) + void __user *dst, int len) { - if (access_ok(dst, len)) - return csum_partial_copy_generic(src,dst,len,sum,NULL,err_ptr); - - if (len) - *err_ptr = -EFAULT; - - return (__force __wsum)-1; /* invalid checksum */ + if (!access_ok(dst, len)) + return 0; + return csum_partial_copy_generic(src, (__force void *)dst, len); } #endif diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c index 17c4384f8495..94955caa4488 100644 --- a/arch/xtensa/kernel/pci-dma.c +++ b/arch/xtensa/kernel/pci-dma.c @@ -11,8 +11,7 @@ * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com> */ -#include <linux/dma-contiguous.h> -#include <linux/dma-noncoherent.h> +#include <linux/dma-map-ops.h> #include <linux/dma-direct.h> #include <linux/gfp.h> #include <linux/highmem.h> diff --git a/arch/xtensa/kernel/syscalls/syscall.tbl b/arch/xtensa/kernel/syscalls/syscall.tbl index 6276e3c2d3fc..b070f272995d 100644 --- a/arch/xtensa/kernel/syscalls/syscall.tbl +++ b/arch/xtensa/kernel/syscalls/syscall.tbl @@ -410,3 +410,4 @@ 437 common openat2 sys_openat2 438 common pidfd_getfd sys_pidfd_getfd 439 common faccessat2 sys_faccessat2 +440 common process_madvise sys_process_madvise diff --git a/arch/xtensa/lib/checksum.S b/arch/xtensa/lib/checksum.S index 4cb9ca58d9ad..cf1bed1a5bd6 100644 --- a/arch/xtensa/lib/checksum.S +++ b/arch/xtensa/lib/checksum.S @@ -175,19 +175,14 @@ ENDPROC(csum_partial) */ /* -unsigned int csum_partial_copy_generic (const char *src, char *dst, int len, - int sum, int *src_err_ptr, int *dst_err_ptr) +unsigned int csum_partial_copy_generic (const char *src, char *dst, int len) a2 = src a3 = dst a4 = len a5 = sum - a6 = src_err_ptr - a7 = dst_err_ptr a8 = temp a9 = temp a10 = temp - a11 = original len for exception handling - a12 = original dst for exception handling This function is optimized for 4-byte aligned addresses. Other alignments work, but not nearly as efficiently. @@ -196,8 +191,7 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst, int len, ENTRY(csum_partial_copy_generic) abi_entry_default - mov a12, a3 - mov a11, a4 + movi a5, -1 or a10, a2, a3 /* We optimize the following alignment tests for the 4-byte @@ -228,26 +222,26 @@ ENTRY(csum_partial_copy_generic) #endif EX(10f) l32i a9, a2, 0 EX(10f) l32i a8, a2, 4 -EX(11f) s32i a9, a3, 0 -EX(11f) s32i a8, a3, 4 +EX(10f) s32i a9, a3, 0 +EX(10f) s32i a8, a3, 4 ONES_ADD(a5, a9) ONES_ADD(a5, a8) EX(10f) l32i a9, a2, 8 EX(10f) l32i a8, a2, 12 -EX(11f) s32i a9, a3, 8 -EX(11f) s32i a8, a3, 12 +EX(10f) s32i a9, a3, 8 +EX(10f) s32i a8, a3, 12 ONES_ADD(a5, a9) ONES_ADD(a5, a8) EX(10f) l32i a9, a2, 16 EX(10f) l32i a8, a2, 20 -EX(11f) s32i a9, a3, 16 -EX(11f) s32i a8, a3, 20 +EX(10f) s32i a9, a3, 16 +EX(10f) s32i a8, a3, 20 ONES_ADD(a5, a9) ONES_ADD(a5, a8) EX(10f) l32i a9, a2, 24 EX(10f) l32i a8, a2, 28 -EX(11f) s32i a9, a3, 24 -EX(11f) s32i a8, a3, 28 +EX(10f) s32i a9, a3, 24 +EX(10f) s32i a8, a3, 28 ONES_ADD(a5, a9) ONES_ADD(a5, a8) addi a2, a2, 32 @@ -267,7 +261,7 @@ EX(11f) s32i a8, a3, 28 .Loop6: #endif EX(10f) l32i a9, a2, 0 -EX(11f) s32i a9, a3, 0 +EX(10f) s32i a9, a3, 0 ONES_ADD(a5, a9) addi a2, a2, 4 addi a3, a3, 4 @@ -298,7 +292,7 @@ EX(11f) s32i a9, a3, 0 .Loop7: #endif EX(10f) l16ui a9, a2, 0 -EX(11f) s16i a9, a3, 0 +EX(10f) s16i a9, a3, 0 ONES_ADD(a5, a9) addi a2, a2, 2 addi a3, a3, 2 @@ -309,7 +303,7 @@ EX(11f) s16i a9, a3, 0 /* This section processes a possible trailing odd byte. */ _bbci.l a4, 0, 8f /* 1-byte chunk */ EX(10f) l8ui a9, a2, 0 -EX(11f) s8i a9, a3, 0 +EX(10f) s8i a9, a3, 0 #ifdef __XTENSA_EB__ slli a9, a9, 8 /* shift byte to bits 8..15 */ #endif @@ -334,8 +328,8 @@ EX(11f) s8i a9, a3, 0 #endif EX(10f) l8ui a9, a2, 0 EX(10f) l8ui a8, a2, 1 -EX(11f) s8i a9, a3, 0 -EX(11f) s8i a8, a3, 1 +EX(10f) s8i a9, a3, 0 +EX(10f) s8i a8, a3, 1 #ifdef __XTENSA_EB__ slli a9, a9, 8 /* combine into a single 16-bit value */ #else /* for checksum computation */ @@ -356,38 +350,7 @@ ENDPROC(csum_partial_copy_generic) # Exception handler: .section .fixup, "ax" -/* - a6 = src_err_ptr - a7 = dst_err_ptr - a11 = original len for exception handling - a12 = original dst for exception handling -*/ - 10: - _movi a2, -EFAULT - s32i a2, a6, 0 /* src_err_ptr */ - - # clear the complete destination - computing the rest - # is too much work - movi a2, 0 -#if XCHAL_HAVE_LOOPS - loopgtz a11, 2f -#else - beqz a11, 2f - add a11, a11, a12 /* a11 = ending address */ -.Leloop: -#endif - s8i a2, a12, 0 - addi a12, a12, 1 -#if !XCHAL_HAVE_LOOPS - blt a12, a11, .Leloop -#endif -2: - abi_ret_default - -11: - movi a2, -EFAULT - s32i a2, a7, 0 /* dst_err_ptr */ movi a2, 0 abi_ret_default diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c index a05b306cf371..c6fc83efee0c 100644 --- a/arch/xtensa/mm/init.c +++ b/arch/xtensa/mm/init.c @@ -26,7 +26,7 @@ #include <linux/nodemask.h> #include <linux/mm.h> #include <linux/of_fdt.h> -#include <linux/dma-contiguous.h> +#include <linux/dma-map-ops.h> #include <asm/bootparam.h> #include <asm/page.h> @@ -79,67 +79,32 @@ void __init zones_init(void) free_area_init(max_zone_pfn); } -#ifdef CONFIG_HIGHMEM -static void __init free_area_high(unsigned long pfn, unsigned long end) -{ - for (; pfn < end; pfn++) - free_highmem_page(pfn_to_page(pfn)); -} - static void __init free_highpages(void) { +#ifdef CONFIG_HIGHMEM unsigned long max_low = max_low_pfn; - struct memblock_region *mem, *res; + phys_addr_t range_start, range_end; + u64 i; - reset_all_zones_managed_pages(); /* set highmem page free */ - for_each_memblock(memory, mem) { - unsigned long start = memblock_region_memory_base_pfn(mem); - unsigned long end = memblock_region_memory_end_pfn(mem); + for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, + &range_start, &range_end, NULL) { + unsigned long start = PHYS_PFN(range_start); + unsigned long end = PHYS_PFN(range_end); /* Ignore complete lowmem entries */ if (end <= max_low) continue; - if (memblock_is_nomap(mem)) - continue; - /* Truncate partial highmem entries */ if (start < max_low) start = max_low; - /* Find and exclude any reserved regions */ - for_each_memblock(reserved, res) { - unsigned long res_start, res_end; - - res_start = memblock_region_reserved_base_pfn(res); - res_end = memblock_region_reserved_end_pfn(res); - - if (res_end < start) - continue; - if (res_start < start) - res_start = start; - if (res_start > end) - res_start = end; - if (res_end > end) - res_end = end; - if (res_start != start) - free_area_high(start, res_start); - start = res_end; - if (start == end) - break; - } - - /* And now free anything which remains */ - if (start < end) - free_area_high(start, end); + for (; start < end; start++) + free_highmem_page(pfn_to_page(start)); } -} -#else -static void __init free_highpages(void) -{ -} #endif +} /* * Initialize memory pages. |