diff options
Diffstat (limited to 'arch')
259 files changed, 3289 insertions, 1337 deletions
diff --git a/arch/Kconfig b/arch/Kconfig index 3d81c5f1ba2d..d1b4ffd6e085 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -444,6 +444,13 @@ config HAVE_HARDLOCKUP_DETECTOR_ARCH It uses the same command line parameters, and sysctl interface, as the generic hardlockup detectors. +config UNWIND_USER + bool + +config HAVE_UNWIND_USER_FP + bool + select UNWIND_USER + config HAVE_PERF_REGS bool help diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 109a4cddcd13..80367f2cf821 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -7,6 +7,7 @@ config ALPHA select ARCH_HAS_DMA_OPS if PCI select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_SERIO + select ARCH_MODULE_NEEDS_WEAK_PER_CPU if SMP select ARCH_NO_PREEMPT select ARCH_NO_SG_CHAIN select ARCH_USE_CMPXCHG_LOCKREF diff --git a/arch/alpha/include/asm/percpu.h b/arch/alpha/include/asm/percpu.h index 6923249f2d49..4383d66341dc 100644 --- a/arch/alpha/include/asm/percpu.h +++ b/arch/alpha/include/asm/percpu.h @@ -9,10 +9,9 @@ * way above 4G. * * Always use weak definitions for percpu variables in modules. + * Therefore, we have enabled CONFIG_ARCH_MODULE_NEEDS_WEAK_PER_CPU + * in the Kconfig. */ -#if defined(MODULE) && defined(CONFIG_SMP) -#define ARCH_NEEDS_WEAK_PER_CPU -#endif #include <asm-generic/percpu.h> diff --git a/arch/alpha/kernel/core_marvel.c b/arch/alpha/kernel/core_marvel.c index b1bfbd11980d..d38f4d6759e4 100644 --- a/arch/alpha/kernel/core_marvel.c +++ b/arch/alpha/kernel/core_marvel.c @@ -17,6 +17,7 @@ #include <linux/vmalloc.h> #include <linux/mc146818rtc.h> #include <linux/rtc.h> +#include <linux/string.h> #include <linux/module.h> #include <linux/memblock.h> @@ -79,10 +80,12 @@ mk_resource_name(int pe, int port, char *str) { char tmp[80]; char *name; - - sprintf(tmp, "PCI %s PE %d PORT %d", str, pe, port); - name = memblock_alloc_or_panic(strlen(tmp) + 1, SMP_CACHE_BYTES); - strcpy(name, tmp); + size_t sz; + + sz = scnprintf(tmp, sizeof(tmp), "PCI %s PE %d PORT %d", str, pe, port); + sz += 1; /* NUL terminator */ + name = memblock_alloc_or_panic(sz, SMP_CACHE_BYTES); + strscpy(name, tmp, sz); return name; } diff --git a/arch/arm/boot/dts/broadcom/bcm7445.dtsi b/arch/arm/boot/dts/broadcom/bcm7445.dtsi index 5ac2042515b8..c6307c7437e3 100644 --- a/arch/arm/boot/dts/broadcom/bcm7445.dtsi +++ b/arch/arm/boot/dts/broadcom/bcm7445.dtsi @@ -237,7 +237,8 @@ ranges = <0x0 0x0 0x80000>; memc-ddr@2000 { - compatible = "brcm,brcmstb-memc-ddr"; + compatible = "brcm,brcmstb-memc-ddr-rev-b.1.x", + "brcm,brcmstb-memc-ddr"; reg = <0x2000 0x800>; }; @@ -259,7 +260,8 @@ ranges = <0x0 0x80000 0x80000>; memc-ddr@2000 { - compatible = "brcm,brcmstb-memc-ddr"; + compatible = "brcm,brcmstb-memc-ddr-rev-b.1.x", + "brcm,brcmstb-memc-ddr"; reg = <0x2000 0x800>; }; @@ -281,7 +283,8 @@ ranges = <0x0 0x100000 0x80000>; memc-ddr@2000 { - compatible = "brcm,brcmstb-memc-ddr"; + compatible = "brcm,brcmstb-memc-ddr-rev-b.1.x", + "brcm,brcmstb-memc-ddr"; reg = <0x2000 0x800>; }; diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c index d7e2ea27ce59..3389a70e4d49 100644 --- a/arch/arm/common/sa1111.c +++ b/arch/arm/common/sa1111.c @@ -617,8 +617,8 @@ static int sa1111_setup_gpios(struct sa1111 *sachip) sachip->gc.direction_input = sa1111_gpio_direction_input; sachip->gc.direction_output = sa1111_gpio_direction_output; sachip->gc.get = sa1111_gpio_get; - sachip->gc.set_rv = sa1111_gpio_set; - sachip->gc.set_multiple_rv = sa1111_gpio_set_multiple; + sachip->gc.set = sa1111_gpio_set; + sachip->gc.set_multiple = sa1111_gpio_set_multiple; sachip->gc.to_irq = sa1111_gpio_to_irq; sachip->gc.base = -1; sachip->gc.ngpio = 18; diff --git a/arch/arm/common/scoop.c b/arch/arm/common/scoop.c index 2d3ee76c8e17..dddb73c96826 100644 --- a/arch/arm/common/scoop.c +++ b/arch/arm/common/scoop.c @@ -218,7 +218,7 @@ static int scoop_probe(struct platform_device *pdev) devptr->gpio.label = dev_name(&pdev->dev); devptr->gpio.base = inf->gpio_base; devptr->gpio.ngpio = 12; /* PA11 = 0, PA12 = 1, etc. up to PA22 = 11 */ - devptr->gpio.set_rv = scoop_gpio_set; + devptr->gpio.set = scoop_gpio_set; devptr->gpio.get = scoop_gpio_get; devptr->gpio.direction_input = scoop_gpio_direction_input; devptr->gpio.direction_output = scoop_gpio_direction_output; diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index e8810d300457..f2822eeefb95 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig @@ -794,10 +794,12 @@ CONFIG_SND=m CONFIG_SND_HDA_TEGRA=m CONFIG_SND_HDA_INPUT_BEEP=y CONFIG_SND_HDA_PATCH_LOADER=y -CONFIG_SND_HDA_CODEC_REALTEK=y +CONFIG_SND_HDA_CODEC_REALTEK=m CONFIG_SND_HDA_CODEC_REALTEK_LIB=m CONFIG_SND_HDA_CODEC_ALC269=m CONFIG_SND_HDA_CODEC_HDMI=m +CONFIG_SND_HDA_CODEC_HDMI_GENERIC=m +CONFIG_SND_HDA_CODEC_HDMI_NVIDIA=m CONFIG_SND_HDA_CODEC_HDMI_TEGRA=m CONFIG_SND_USB_AUDIO=m CONFIG_SND_SOC=m diff --git a/arch/arm/configs/tegra_defconfig b/arch/arm/configs/tegra_defconfig index 3a9bda2bf422..ba863b445417 100644 --- a/arch/arm/configs/tegra_defconfig +++ b/arch/arm/configs/tegra_defconfig @@ -225,7 +225,12 @@ CONFIG_SND_HDA_TEGRA=y CONFIG_SND_HDA_INPUT_BEEP=y CONFIG_SND_HDA_PATCH_LOADER=y CONFIG_SND_HDA_CODEC_REALTEK=y +CONFIG_SND_HDA_CODEC_REALTEK_LIB=y +CONFIG_SND_HDA_CODEC_ALC269=y CONFIG_SND_HDA_CODEC_HDMI=y +CONFIG_SND_HDA_CODEC_HDMI_GENERIC=y +CONFIG_SND_HDA_CODEC_HDMI_NVIDIA=y +CONFIG_SND_HDA_CODEC_HDMI_TEGRA=y # CONFIG_SND_ARM is not set # CONFIG_SND_SPI is not set # CONFIG_SND_USB is not set diff --git a/arch/arm/crypto/aes-neonbs-glue.c b/arch/arm/crypto/aes-neonbs-glue.c index c60104dc1585..df5afe601e4a 100644 --- a/arch/arm/crypto/aes-neonbs-glue.c +++ b/arch/arm/crypto/aes-neonbs-glue.c @@ -206,7 +206,7 @@ static int ctr_encrypt(struct skcipher_request *req) while (walk.nbytes > 0) { const u8 *src = walk.src.virt.addr; u8 *dst = walk.dst.virt.addr; - int bytes = walk.nbytes; + unsigned int bytes = walk.nbytes; if (unlikely(bytes < AES_BLOCK_SIZE)) src = dst = memcpy(buf + sizeof(buf) - bytes, diff --git a/arch/arm/include/asm/cti.h b/arch/arm/include/asm/cti.h deleted file mode 100644 index f8500e5d6ea8..000000000000 --- a/arch/arm/include/asm/cti.h +++ /dev/null @@ -1,160 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __ASMARM_CTI_H -#define __ASMARM_CTI_H - -#include <asm/io.h> -#include <asm/hardware/coresight.h> - -/* The registers' definition is from section 3.2 of - * Embedded Cross Trigger Revision: r0p0 - */ -#define CTICONTROL 0x000 -#define CTISTATUS 0x004 -#define CTILOCK 0x008 -#define CTIPROTECTION 0x00C -#define CTIINTACK 0x010 -#define CTIAPPSET 0x014 -#define CTIAPPCLEAR 0x018 -#define CTIAPPPULSE 0x01c -#define CTIINEN 0x020 -#define CTIOUTEN 0x0A0 -#define CTITRIGINSTATUS 0x130 -#define CTITRIGOUTSTATUS 0x134 -#define CTICHINSTATUS 0x138 -#define CTICHOUTSTATUS 0x13c -#define CTIPERIPHID0 0xFE0 -#define CTIPERIPHID1 0xFE4 -#define CTIPERIPHID2 0xFE8 -#define CTIPERIPHID3 0xFEC -#define CTIPCELLID0 0xFF0 -#define CTIPCELLID1 0xFF4 -#define CTIPCELLID2 0xFF8 -#define CTIPCELLID3 0xFFC - -/* The below are from section 3.6.4 of - * CoreSight v1.0 Architecture Specification - */ -#define LOCKACCESS 0xFB0 -#define LOCKSTATUS 0xFB4 - -/** - * struct cti - cross trigger interface struct - * @base: mapped virtual address for the cti base - * @irq: irq number for the cti - * @trig_out_for_irq: triger out number which will cause - * the @irq happen - * - * cti struct used to operate cti registers. - */ -struct cti { - void __iomem *base; - int irq; - int trig_out_for_irq; -}; - -/** - * cti_init - initialize the cti instance - * @cti: cti instance - * @base: mapped virtual address for the cti base - * @irq: irq number for the cti - * @trig_out: triger out number which will cause - * the @irq happen - * - * called by machine code to pass the board dependent - * @base, @irq and @trig_out to cti. - */ -static inline void cti_init(struct cti *cti, - void __iomem *base, int irq, int trig_out) -{ - cti->base = base; - cti->irq = irq; - cti->trig_out_for_irq = trig_out; -} - -/** - * cti_map_trigger - use the @chan to map @trig_in to @trig_out - * @cti: cti instance - * @trig_in: trigger in number - * @trig_out: trigger out number - * @channel: channel number - * - * This function maps one trigger in of @trig_in to one trigger - * out of @trig_out using the channel @chan. - */ -static inline void cti_map_trigger(struct cti *cti, - int trig_in, int trig_out, int chan) -{ - void __iomem *base = cti->base; - unsigned long val; - - val = __raw_readl(base + CTIINEN + trig_in * 4); - val |= BIT(chan); - __raw_writel(val, base + CTIINEN + trig_in * 4); - - val = __raw_readl(base + CTIOUTEN + trig_out * 4); - val |= BIT(chan); - __raw_writel(val, base + CTIOUTEN + trig_out * 4); -} - -/** - * cti_enable - enable the cti module - * @cti: cti instance - * - * enable the cti module - */ -static inline void cti_enable(struct cti *cti) -{ - __raw_writel(0x1, cti->base + CTICONTROL); -} - -/** - * cti_disable - disable the cti module - * @cti: cti instance - * - * enable the cti module - */ -static inline void cti_disable(struct cti *cti) -{ - __raw_writel(0, cti->base + CTICONTROL); -} - -/** - * cti_irq_ack - clear the cti irq - * @cti: cti instance - * - * clear the cti irq - */ -static inline void cti_irq_ack(struct cti *cti) -{ - void __iomem *base = cti->base; - unsigned long val; - - val = __raw_readl(base + CTIINTACK); - val |= BIT(cti->trig_out_for_irq); - __raw_writel(val, base + CTIINTACK); -} - -/** - * cti_unlock - unlock cti module - * @cti: cti instance - * - * unlock the cti module, or else any writes to the cti - * module is not allowed. - */ -static inline void cti_unlock(struct cti *cti) -{ - __raw_writel(CS_LAR_KEY, cti->base + LOCKACCESS); -} - -/** - * cti_lock - lock cti module - * @cti: cti instance - * - * lock the cti module, so any writes to the cti - * module will be not allowed. - */ -static inline void cti_lock(struct cti *cti) -{ - __raw_writel(~CS_LAR_KEY, cti->base + LOCKACCESS); -} -#endif diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index a41c93988d2c..0bfd66c7ada0 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -1004,7 +1004,7 @@ static void __init reserve_crashkernel(void) total_mem = get_total_mem(); ret = parse_crashkernel(boot_command_line, total_mem, &crash_size, &crash_base, - NULL, NULL); + NULL, NULL, NULL); /* invalid value specified or crashkernel=0 */ if (ret || !crash_size) return; diff --git a/arch/arm/mach-s3c/gpio-samsung.c b/arch/arm/mach-s3c/gpio-samsung.c index 206a492fbaf5..81e198e5a6d3 100644 --- a/arch/arm/mach-s3c/gpio-samsung.c +++ b/arch/arm/mach-s3c/gpio-samsung.c @@ -517,7 +517,7 @@ static void __init samsung_gpiolib_add(struct samsung_gpio_chip *chip) if (!gc->direction_output) gc->direction_output = samsung_gpiolib_2bit_output; if (!gc->set) - gc->set_rv = samsung_gpiolib_set; + gc->set = samsung_gpiolib_set; if (!gc->get) gc->get = samsung_gpiolib_get; diff --git a/arch/arm/mach-sa1100/assabet.c b/arch/arm/mach-sa1100/assabet.c index bad8aa661e9d..2b833aa0212b 100644 --- a/arch/arm/mach-sa1100/assabet.c +++ b/arch/arm/mach-sa1100/assabet.c @@ -80,7 +80,7 @@ void ASSABET_BCR_frob(unsigned int mask, unsigned int val) { unsigned long m = mask, v = val; - assabet_bcr_gc->set_multiple_rv(assabet_bcr_gc, &m, &v); + assabet_bcr_gc->set_multiple(assabet_bcr_gc, &m, &v); } EXPORT_SYMBOL(ASSABET_BCR_frob); diff --git a/arch/arm/mach-sa1100/neponset.c b/arch/arm/mach-sa1100/neponset.c index 6516598c8a71..88fe79f0a4ed 100644 --- a/arch/arm/mach-sa1100/neponset.c +++ b/arch/arm/mach-sa1100/neponset.c @@ -126,7 +126,7 @@ void neponset_ncr_frob(unsigned int mask, unsigned int val) unsigned long m = mask, v = val; if (nep) - n->gpio[0]->set_multiple_rv(n->gpio[0], &m, &v); + n->gpio[0]->set_multiple(n->gpio[0], &m, &v); else WARN(1, "nep unset\n"); } diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index ab01b51de559..46169fe42c61 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -268,7 +268,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) int sig, code; vm_fault_t fault; unsigned int flags = FAULT_FLAG_DEFAULT; - unsigned long vm_flags = VM_ACCESS_FLAGS; + vm_flags_t vm_flags = VM_ACCESS_FLAGS; if (kprobe_page_fault(regs, fsr)) return 0; diff --git a/arch/arm/plat-orion/gpio.c b/arch/arm/plat-orion/gpio.c index 6f09f65e3d95..49e29b7894a3 100644 --- a/arch/arm/plat-orion/gpio.c +++ b/arch/arm/plat-orion/gpio.c @@ -540,7 +540,7 @@ void __init orion_gpio_init(int gpio_base, int ngpio, ochip->chip.direction_input = orion_gpio_direction_input; ochip->chip.get = orion_gpio_get; ochip->chip.direction_output = orion_gpio_direction_output; - ochip->chip.set_rv = orion_gpio_set; + ochip->chip.set = orion_gpio_set; ochip->chip.to_irq = orion_gpio_to_irq; ochip->chip.base = gpio_base; ochip->chip.ngpio = ngpio; diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 3c117b1fa198..e9bbfacc35a6 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -42,7 +42,6 @@ config ARM64 select ARCH_HAS_NONLEAF_PMD_YOUNG if ARM64_HAFT select ARCH_HAS_PREEMPT_LAZY select ARCH_HAS_PTDUMP - select ARCH_HAS_PTE_DEVMAP select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_HW_PTE_YOUNG select ARCH_HAS_SETUP_DMA_OPS diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms index 90d6b028fbbb..a88f5ad9328c 100644 --- a/arch/arm64/Kconfig.platforms +++ b/arch/arm64/Kconfig.platforms @@ -333,7 +333,6 @@ config ARCH_STM32 bool "STMicroelectronics STM32 SoC Family" select GPIOLIB select PINCTRL - select PINCTRL_STM32MP257 select ARM_SMC_MBOX select ARM_SCMI_PROTOCOL select REGULATOR diff --git a/arch/arm64/boot/dts/mediatek/mt8195.dtsi b/arch/arm64/boot/dts/mediatek/mt8195.dtsi index dd065b1bf94a..8877953ce292 100644 --- a/arch/arm64/boot/dts/mediatek/mt8195.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8195.dtsi @@ -1430,6 +1430,31 @@ status = "disabled"; }; + ufshci: ufshci@11270000 { + compatible = "mediatek,mt8195-ufshci"; + reg = <0 0x11270000 0 0x2300>; + interrupts = <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH 0>; + phys = <&ufsphy>; + clocks = <&infracfg_ao CLK_INFRA_AO_AES_UFSFDE>, + <&infracfg_ao CLK_INFRA_AO_AES>, + <&infracfg_ao CLK_INFRA_AO_UFS_TICK>, + <&infracfg_ao CLK_INFRA_AO_UNIPRO_SYS>, + <&infracfg_ao CLK_INFRA_AO_UNIPRO_TICK>, + <&infracfg_ao CLK_INFRA_AO_UFS_MP_SAP_B>, + <&infracfg_ao CLK_INFRA_AO_UFS_TX_SYMBOL>, + <&infracfg_ao CLK_INFRA_AO_PERI_UFS_MEM_SUB>; + clock-names = "ufs", "ufs_aes", "ufs_tick", + "unipro_sysclk", "unipro_tick", + "unipro_mp_bclk", "ufs_tx_symbol", + "ufs_mem_sub"; + freq-table-hz = <0 0>, <0 0>, <0 0>, + <0 0>, <0 0>, <0 0>, + <0 0>, <0 0>; + + mediatek,ufs-disable-mcq; + status = "disabled"; + }; + lvts_mcu: thermal-sensor@11278000 { compatible = "mediatek,mt8195-lvts-mcu"; reg = <0 0x11278000 0 0x1000>; diff --git a/arch/arm64/boot/dts/nvidia/tegra264.dtsi b/arch/arm64/boot/dts/nvidia/tegra264.dtsi index 62c87a387b14..e02659efa233 100644 --- a/arch/arm64/boot/dts/nvidia/tegra264.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra264.dtsi @@ -11,7 +11,6 @@ interrupt-parent = <&gic>; #address-cells = <2>; #size-cells = <2>; - numa-node-id = <0>; reserved-memory { #address-cells = <2>; @@ -341,7 +340,6 @@ status = "okay"; enable-method = "psci"; - numa-node-id = <0>; i-cache-size = <65536>; i-cache-line-size = <64>; @@ -358,7 +356,6 @@ status = "okay"; enable-method = "psci"; - numa-node-id = <0>; i-cache-size = <65536>; i-cache-line-size = <64>; diff --git a/arch/arm64/include/asm/asm-bug.h b/arch/arm64/include/asm/asm-bug.h index 6e73809f6492..a5f13801b784 100644 --- a/arch/arm64/include/asm/asm-bug.h +++ b/arch/arm64/include/asm/asm-bug.h @@ -21,16 +21,21 @@ #endif #ifdef CONFIG_GENERIC_BUG - -#define __BUG_ENTRY(flags) \ +#define __BUG_ENTRY_START \ .pushsection __bug_table,"aw"; \ .align 2; \ 14470: .long 14471f - .; \ -_BUGVERBOSE_LOCATION(__FILE__, __LINE__) \ - .short flags; \ + +#define __BUG_ENTRY_END \ .align 2; \ .popsection; \ 14471: + +#define __BUG_ENTRY(flags) \ + __BUG_ENTRY_START \ +_BUGVERBOSE_LOCATION(__FILE__, __LINE__) \ + .short flags; \ + __BUG_ENTRY_END #else #define __BUG_ENTRY(flags) #endif @@ -41,4 +46,24 @@ _BUGVERBOSE_LOCATION(__FILE__, __LINE__) \ #define ASM_BUG() ASM_BUG_FLAGS(0) +#ifdef CONFIG_DEBUG_BUGVERBOSE +#define __BUG_LOCATION_STRING(file, line) \ + ".long " file "- .;" \ + ".short " line ";" +#else +#define __BUG_LOCATION_STRING(file, line) +#endif + +#define __BUG_ENTRY_STRING(file, line, flags) \ + __stringify(__BUG_ENTRY_START) \ + __BUG_LOCATION_STRING(file, line) \ + ".short " flags ";" \ + __stringify(__BUG_ENTRY_END) + +#define ARCH_WARN_ASM(file, line, flags, size) \ + __BUG_ENTRY_STRING(file, line, flags) \ + __stringify(brk BUG_BRK_IMM) + +#define ARCH_WARN_REACHABLE + #endif /* __ASM_ASM_BUG_H */ diff --git a/arch/arm64/include/asm/cfi.h b/arch/arm64/include/asm/cfi.h new file mode 100644 index 000000000000..ab90f0351b7a --- /dev/null +++ b/arch/arm64/include/asm/cfi.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_ARM64_CFI_H +#define _ASM_ARM64_CFI_H + +#define __bpfcall + +#endif /* _ASM_ARM64_CFI_H */ diff --git a/arch/arm64/include/asm/mman.h b/arch/arm64/include/asm/mman.h index 21df8bbd2668..8770c7ee759f 100644 --- a/arch/arm64/include/asm/mman.h +++ b/arch/arm64/include/asm/mman.h @@ -11,10 +11,10 @@ #include <linux/shmem_fs.h> #include <linux/types.h> -static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, +static inline vm_flags_t arch_calc_vm_prot_bits(unsigned long prot, unsigned long pkey) { - unsigned long ret = 0; + vm_flags_t ret = 0; if (system_supports_bti() && (prot & PROT_BTI)) ret |= VM_ARM64_BTI; @@ -34,8 +34,8 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, } #define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey) -static inline unsigned long arch_calc_vm_flag_bits(struct file *file, - unsigned long flags) +static inline vm_flags_t arch_calc_vm_flag_bits(struct file *file, + unsigned long flags) { /* * Only allow MTE on anonymous mappings as these are guaranteed to be @@ -68,7 +68,7 @@ static inline bool arch_validate_prot(unsigned long prot, } #define arch_validate_prot(prot, addr) arch_validate_prot(prot, addr) -static inline bool arch_validate_flags(unsigned long vm_flags) +static inline bool arch_validate_flags(vm_flags_t vm_flags) { if (system_supports_mte()) { /* diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h index 7830d031742e..85dceb1c66f4 100644 --- a/arch/arm64/include/asm/pgtable-prot.h +++ b/arch/arm64/include/asm/pgtable-prot.h @@ -17,7 +17,6 @@ #define PTE_SWP_EXCLUSIVE (_AT(pteval_t, 1) << 2) /* only for swp ptes */ #define PTE_DIRTY (_AT(pteval_t, 1) << 55) #define PTE_SPECIAL (_AT(pteval_t, 1) << 56) -#define PTE_DEVMAP (_AT(pteval_t, 1) << 57) /* * PTE_PRESENT_INVALID=1 & PTE_VALID=0 indicates that the pte's fields should be diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 192d86e1cc76..abd2dee416b3 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -190,7 +190,6 @@ static inline pteval_t __phys_to_pte_val(phys_addr_t phys) #define pte_user(pte) (!!(pte_val(pte) & PTE_USER)) #define pte_user_exec(pte) (!(pte_val(pte) & PTE_UXN)) #define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT)) -#define pte_devmap(pte) (!!(pte_val(pte) & PTE_DEVMAP)) #define pte_tagged(pte) ((pte_val(pte) & PTE_ATTRINDX_MASK) == \ PTE_ATTRINDX(MT_NORMAL_TAGGED)) @@ -372,11 +371,6 @@ static inline pmd_t pmd_mkcont(pmd_t pmd) return __pmd(pmd_val(pmd) | PMD_SECT_CONT); } -static inline pte_t pte_mkdevmap(pte_t pte) -{ - return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL)); -} - #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP static inline int pte_uffd_wp(pte_t pte) { @@ -653,14 +647,6 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd) return __pmd((pmd_val(pmd) & ~mask) | val); } -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -#define pmd_devmap(pmd) pte_devmap(pmd_pte(pmd)) -#endif -static inline pmd_t pmd_mkdevmap(pmd_t pmd) -{ - return pte_pmd(set_pte_bit(pmd_pte(pmd), __pgprot(PTE_DEVMAP))); -} - #ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP #define pmd_special(pte) (!!((pmd_val(pte) & PTE_SPECIAL))) static inline pmd_t pmd_mkspecial(pmd_t pmd) @@ -1302,16 +1288,6 @@ static inline int pmdp_set_access_flags(struct vm_area_struct *vma, return __ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty); } - -static inline int pud_devmap(pud_t pud) -{ - return 0; -} - -static inline int pgd_devmap(pgd_t pgd) -{ - return 0; -} #endif #ifdef CONFIG_PAGE_TABLE_CHECK @@ -1643,6 +1619,14 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf, */ #define arch_wants_old_prefaulted_pte cpu_has_hw_af +/* + * Request exec memory is read into pagecache in at least 64K folios. This size + * can be contpte-mapped when 4K base pages are in use (16 pages into 1 iTLB + * entry), and HPA can coalesce it (4 pages into 1 TLB entry) when 16K base + * pages are in use. + */ +#define exec_folio_order() ilog2(SZ_64K >> PAGE_SHIFT) + static inline bool pud_sect_supported(void) { return PAGE_SIZE == SZ_4K; @@ -1659,6 +1643,16 @@ extern void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t old_pte, pte_t new_pte); +#define modify_prot_start_ptes modify_prot_start_ptes +extern pte_t modify_prot_start_ptes(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + unsigned int nr); + +#define modify_prot_commit_ptes modify_prot_commit_ptes +extern void modify_prot_commit_ptes(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep, pte_t old_pte, pte_t pte, + unsigned int nr); + #ifdef CONFIG_ARM64_CONTPTE /* diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index aa9efee17277..18a5dc0c9a54 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -323,17 +323,6 @@ static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm) } /* - * If mprotect/munmap/etc occurs during TLB batched flushing, we need to ensure - * all the previously issued TLBIs targeting mm have completed. But since we - * can be executing on a remote CPU, a DSB cannot guarantee this like it can - * for arch_tlbbatch_flush(). Our only option is to flush the entire mm. - */ -static inline void arch_flush_tlb_batched_pending(struct mm_struct *mm) -{ - flush_tlb_mm(mm); -} - -/* * To support TLB batched flush for multiple pages unmapping, we only send * the TLBI for each page in arch_tlbbatch_add_pending() and wait for the * completion at the end in arch_tlbbatch_flush(). Since we've already issued diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index fcc783e8e9bb..d816ff44faff 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -555,7 +555,7 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr, const struct fault_info *inf; struct mm_struct *mm = current->mm; vm_fault_t fault; - unsigned long vm_flags; + vm_flags_t vm_flags; unsigned int mm_flags = FAULT_FLAG_DEFAULT; unsigned long addr = untagged_addr(far); struct vm_area_struct *vma; diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 0c8c35dd645e..ea84a61ed508 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -106,7 +106,7 @@ static void __init arch_reserve_crashkernel(void) ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), &crash_size, &crash_base, - &low_size, &high); + &low_size, NULL, &high); if (ret) return; diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c index c86c348857c4..08ee177432c2 100644 --- a/arch/arm64/mm/mmap.c +++ b/arch/arm64/mm/mmap.c @@ -81,7 +81,7 @@ static int __init adjust_protection_map(void) } arch_initcall(adjust_protection_map); -pgprot_t vm_get_page_prot(unsigned long vm_flags) +pgprot_t vm_get_page_prot(vm_flags_t vm_flags) { ptdesc_t prot; diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 00ab1d648db6..34e5d78af076 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -26,6 +26,7 @@ #include <linux/set_memory.h> #include <linux/kfence.h> #include <linux/pkeys.h> +#include <linux/mm_inline.h> #include <asm/barrier.h> #include <asm/cputype.h> @@ -1524,24 +1525,41 @@ static int __init prevent_bootmem_remove_init(void) early_initcall(prevent_bootmem_remove_init); #endif -pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) +pte_t modify_prot_start_ptes(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep, unsigned int nr) { + pte_t pte = get_and_clear_ptes(vma->vm_mm, addr, ptep, nr); + if (alternative_has_cap_unlikely(ARM64_WORKAROUND_2645198)) { /* * Break-before-make (BBM) is required for all user space mappings * when the permission changes from executable to non-executable * in cases where cpu is affected with errata #2645198. */ - if (pte_user_exec(ptep_get(ptep))) - return ptep_clear_flush(vma, addr, ptep); + if (pte_accessible(vma->vm_mm, pte) && pte_user_exec(pte)) + __flush_tlb_range(vma, addr, nr * PAGE_SIZE, + PAGE_SIZE, true, 3); } - return ptep_get_and_clear(vma->vm_mm, addr, ptep); + + return pte; +} + +pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) +{ + return modify_prot_start_ptes(vma, addr, ptep, 1); +} + +void modify_prot_commit_ptes(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep, pte_t old_pte, pte_t pte, + unsigned int nr) +{ + set_ptes(vma->vm_mm, addr, ptep, pte, nr); } void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t old_pte, pte_t pte) { - set_pte_at(vma->vm_mm, addr, ptep, pte); + modify_prot_commit_ptes(vma, addr, ptep, old_pte, pte, 1); } /* diff --git a/arch/arm64/mm/ptdump_debugfs.c b/arch/arm64/mm/ptdump_debugfs.c index 68bf1a125502..1e308328c079 100644 --- a/arch/arm64/mm/ptdump_debugfs.c +++ b/arch/arm64/mm/ptdump_debugfs.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/debugfs.h> -#include <linux/memory_hotplug.h> #include <linux/seq_file.h> #include <asm/ptdump.h> @@ -9,9 +8,7 @@ static int ptdump_show(struct seq_file *m, void *v) { struct ptdump_info *info = m->private; - get_online_mems(); ptdump_walk(m, info); - put_online_mems(); return 0; } DEFINE_SHOW_ATTRIBUTE(ptdump); diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index 97dfd5432809..52ffe115a8c4 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -10,6 +10,7 @@ #include <linux/arm-smccc.h> #include <linux/bitfield.h> #include <linux/bpf.h> +#include <linux/cfi.h> #include <linux/filter.h> #include <linux/memory.h> #include <linux/printk.h> @@ -114,6 +115,14 @@ static inline void emit(const u32 insn, struct jit_ctx *ctx) ctx->idx++; } +static inline void emit_u32_data(const u32 data, struct jit_ctx *ctx) +{ + if (ctx->image != NULL && ctx->write) + ctx->image[ctx->idx] = data; + + ctx->idx++; +} + static inline void emit_a64_mov_i(const int is64, const int reg, const s32 val, struct jit_ctx *ctx) { @@ -174,6 +183,12 @@ static inline void emit_bti(u32 insn, struct jit_ctx *ctx) emit(insn, ctx); } +static inline void emit_kcfi(u32 hash, struct jit_ctx *ctx) +{ + if (IS_ENABLED(CONFIG_CFI_CLANG)) + emit_u32_data(hash, ctx); +} + /* * Kernel addresses in the vmalloc space use at most 48 bits, and the * remaining bits are guaranteed to be 0x1. So we can compose the address @@ -503,7 +518,6 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf) const u8 arena_vm_base = bpf2a64[ARENA_VM_START]; const u8 priv_sp = bpf2a64[PRIVATE_SP]; void __percpu *priv_stack_ptr; - const int idx0 = ctx->idx; int cur_offset; /* @@ -529,6 +543,9 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf) * */ + emit_kcfi(is_main_prog ? cfi_bpf_hash : cfi_bpf_subprog_hash, ctx); + const int idx0 = ctx->idx; + /* bpf function may be invoked by 3 instruction types: * 1. bl, attached via freplace to bpf prog via short jump * 2. br, attached via freplace to bpf prog via long jump @@ -2146,9 +2163,9 @@ skip_init_ctx: jit_data->ro_header = ro_header; } - prog->bpf_func = (void *)ctx.ro_image; + prog->bpf_func = (void *)ctx.ro_image + cfi_get_offset(); prog->jited = 1; - prog->jited_len = prog_size; + prog->jited_len = prog_size - cfi_get_offset(); if (!prog->is_func || extra_pass) { int i; @@ -2527,6 +2544,12 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im, /* return address locates above FP */ retaddr_off = stack_size + 8; + if (flags & BPF_TRAMP_F_INDIRECT) { + /* + * Indirect call for bpf_struct_ops + */ + emit_kcfi(cfi_get_func_hash(func_addr), ctx); + } /* bpf trampoline may be invoked by 3 instruction types: * 1. bl, attached to bpf prog or kernel function via short jump * 2. br, attached to bpf prog or kernel function via long jump @@ -3045,6 +3068,7 @@ void bpf_jit_free(struct bpf_prog *prog) sizeof(jit_data->header->size)); kfree(jit_data); } + prog->bpf_func -= cfi_get_offset(); hdr = bpf_jit_binary_pack_hdr(prog); bpf_jit_binary_pack_free(hdr, NULL); priv_stack_ptr = prog->aux->priv_stack_ptr; diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index 03ee9c2fd3d5..f0abc38c40ac 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -24,7 +24,6 @@ config LOONGARCH select ARCH_HAS_NMI_SAFE_THIS_CPU_OPS select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE select ARCH_HAS_PREEMPT_LAZY - select ARCH_HAS_PTE_DEVMAP select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_SET_MEMORY select ARCH_HAS_SET_DIRECT_MAP diff --git a/arch/loongarch/boot/dts/loongson-2k0500-ref.dts b/arch/loongarch/boot/dts/loongson-2k0500-ref.dts index a34734a6c3ce..018ed904352a 100644 --- a/arch/loongarch/boot/dts/loongson-2k0500-ref.dts +++ b/arch/loongarch/boot/dts/loongson-2k0500-ref.dts @@ -41,6 +41,15 @@ }; }; +&apbdma3 { + status = "okay"; +}; + +&mmc0 { + status = "okay"; + bus-width = <4>; +}; + &gmac0 { status = "okay"; diff --git a/arch/loongarch/boot/dts/loongson-2k0500.dtsi b/arch/loongarch/boot/dts/loongson-2k0500.dtsi index 760c60eebb89..588ebc3bded4 100644 --- a/arch/loongarch/boot/dts/loongson-2k0500.dtsi +++ b/arch/loongarch/boot/dts/loongson-2k0500.dtsi @@ -104,7 +104,7 @@ status = "disabled"; }; - dma-controller@1fe10c20 { + apbdma2: dma-controller@1fe10c20 { compatible = "loongson,ls2k0500-apbdma", "loongson,ls2k1000-apbdma"; reg = <0 0x1fe10c20 0 0x8>; interrupt-parent = <&eiointc>; @@ -114,7 +114,7 @@ status = "disabled"; }; - dma-controller@1fe10c30 { + apbdma3: dma-controller@1fe10c30 { compatible = "loongson,ls2k0500-apbdma", "loongson,ls2k1000-apbdma"; reg = <0 0x1fe10c30 0 0x8>; interrupt-parent = <&eiointc>; @@ -437,6 +437,30 @@ status = "disabled"; }; + mmc0: mmc@1ff64000 { + compatible = "loongson,ls2k0500-mmc"; + reg = <0 0x1ff64000 0 0x2000>, + <0 0x1fe10100 0 0x4>; + interrupt-parent = <&eiointc>; + interrupts = <57>; + dmas = <&apbdma3 0>; + dma-names = "rx-tx"; + clocks = <&clk LOONGSON2_APB_CLK>; + status = "disabled"; + }; + + mmc@1ff66000 { + compatible = "loongson,ls2k0500-mmc"; + reg = <0 0x1ff66000 0 0x2000>, + <0 0x1fe10100 0 0x4>; + interrupt-parent = <&eiointc>; + interrupts = <58>; + dmas = <&apbdma2 0>; + dma-names = "rx-tx"; + clocks = <&clk LOONGSON2_APB_CLK>; + status = "disabled"; + }; + pmc: power-management@1ff6c000 { compatible = "loongson,ls2k0500-pmc", "syscon"; reg = <0x0 0x1ff6c000 0x0 0x58>; diff --git a/arch/loongarch/boot/dts/loongson-2k1000-ref.dts b/arch/loongarch/boot/dts/loongson-2k1000-ref.dts index 78ea995abf1c..d9a452ada5d7 100644 --- a/arch/loongarch/boot/dts/loongson-2k1000-ref.dts +++ b/arch/loongarch/boot/dts/loongson-2k1000-ref.dts @@ -48,6 +48,19 @@ }; }; +&apbdma1 { + status = "okay"; +}; + +&mmc { + status = "okay"; + + pinctrl-0 = <&sdio_pins_default>; + pinctrl-names = "default"; + bus-width = <4>; + cd-gpios = <&gpio0 22 GPIO_ACTIVE_LOW>; +}; + &gmac0 { status = "okay"; diff --git a/arch/loongarch/boot/dts/loongson-2k1000.dtsi b/arch/loongarch/boot/dts/loongson-2k1000.dtsi index 1da3beb00f0e..d8e01e2534dd 100644 --- a/arch/loongarch/boot/dts/loongson-2k1000.dtsi +++ b/arch/loongarch/boot/dts/loongson-2k1000.dtsi @@ -187,14 +187,14 @@ <26 IRQ_TYPE_LEVEL_HIGH>, <26 IRQ_TYPE_LEVEL_HIGH>, <26 IRQ_TYPE_LEVEL_HIGH>, - <>, - <26 IRQ_TYPE_LEVEL_HIGH>, + <0 IRQ_TYPE_NONE>, <26 IRQ_TYPE_LEVEL_HIGH>, <26 IRQ_TYPE_LEVEL_HIGH>, <26 IRQ_TYPE_LEVEL_HIGH>, <26 IRQ_TYPE_LEVEL_HIGH>, <26 IRQ_TYPE_LEVEL_HIGH>, <26 IRQ_TYPE_LEVEL_HIGH>, + <26 IRQ_TYPE_NONE>, <26 IRQ_TYPE_LEVEL_HIGH>, <26 IRQ_TYPE_LEVEL_HIGH>, <26 IRQ_TYPE_LEVEL_HIGH>, @@ -209,13 +209,13 @@ <27 IRQ_TYPE_LEVEL_HIGH>, <27 IRQ_TYPE_LEVEL_HIGH>, <27 IRQ_TYPE_LEVEL_HIGH>, - <>, + <0 IRQ_TYPE_NONE>, <27 IRQ_TYPE_LEVEL_HIGH>, <27 IRQ_TYPE_LEVEL_HIGH>, <27 IRQ_TYPE_LEVEL_HIGH>, <27 IRQ_TYPE_LEVEL_HIGH>, - <>, - <>, + <0 IRQ_TYPE_NONE>, + <0 IRQ_TYPE_NONE>, <27 IRQ_TYPE_LEVEL_HIGH>, <27 IRQ_TYPE_LEVEL_HIGH>, <27 IRQ_TYPE_LEVEL_HIGH>, @@ -256,7 +256,7 @@ status = "disabled"; }; - dma-controller@1fe00c10 { + apbdma1: dma-controller@1fe00c10 { compatible = "loongson,ls2k1000-apbdma"; reg = <0x0 0x1fe00c10 0x0 0x8>; interrupt-parent = <&liointc1>; @@ -405,6 +405,18 @@ status = "disabled"; }; + mmc: mmc@1fe2c000 { + compatible = "loongson,ls2k1000-mmc"; + reg = <0 0x1fe2c000 0 0x68>, + <0 0x1fe00438 0 0x8>; + interrupt-parent = <&liointc0>; + interrupts = <31 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_APB_CLK>; + dmas = <&apbdma1 0>; + dma-names = "rx-tx"; + status = "disabled"; + }; + spi0: spi@1fff0220 { compatible = "loongson,ls2k1000-spi"; reg = <0x0 0x1fff0220 0x0 0x10>; diff --git a/arch/loongarch/boot/dts/loongson-2k2000-ref.dts b/arch/loongarch/boot/dts/loongson-2k2000-ref.dts index ea9e6985d0e9..3c6b12220386 100644 --- a/arch/loongarch/boot/dts/loongson-2k2000-ref.dts +++ b/arch/loongarch/boot/dts/loongson-2k2000-ref.dts @@ -39,6 +39,16 @@ }; }; +&emmc { + status = "okay"; + + bus-width = <8>; + cap-mmc-highspeed; + mmc-hs200-1_8v; + no-sd; + no-sdio; +}; + &sata { status = "okay"; }; diff --git a/arch/loongarch/boot/dts/loongson-2k2000.dtsi b/arch/loongarch/boot/dts/loongson-2k2000.dtsi index 9e0411f2754c..00cc485b753b 100644 --- a/arch/loongarch/boot/dts/loongson-2k2000.dtsi +++ b/arch/loongarch/boot/dts/loongson-2k2000.dtsi @@ -259,6 +259,24 @@ status = "disabled"; }; + emmc: mmc@79990000 { + compatible = "loongson,ls2k2000-mmc"; + reg = <0x0 0x79990000 0x0 0x1000>; + interrupt-parent = <&pic>; + interrupts = <51 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_EMMC_CLK>; + status = "disabled"; + }; + + mmc@79991000 { + compatible = "loongson,ls2k2000-mmc"; + reg = <0x0 0x79991000 0x0 0x1000>; + interrupt-parent = <&pic>; + interrupts = <50 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LOONGSON2_EMMC_CLK>; + status = "disabled"; + }; + pcie@1a000000 { compatible = "loongson,ls2k-pci"; reg = <0x0 0x1a000000 0x0 0x02000000>, diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig index 68e337aed2bb..34eaee0384c9 100644 --- a/arch/loongarch/configs/loongson3_defconfig +++ b/arch/loongarch/configs/loongson3_defconfig @@ -783,8 +783,23 @@ CONFIG_SND_HDA_HWDEP=y CONFIG_SND_HDA_INPUT_BEEP=y CONFIG_SND_HDA_PATCH_LOADER=y CONFIG_SND_HDA_CODEC_REALTEK=y +CONFIG_SND_HDA_CODEC_REALTEK_LIB=y +CONFIG_SND_HDA_CODEC_ALC260=y +CONFIG_SND_HDA_CODEC_ALC262=y +CONFIG_SND_HDA_CODEC_ALC268=y +CONFIG_SND_HDA_CODEC_ALC269=y +CONFIG_SND_HDA_CODEC_ALC662=y +CONFIG_SND_HDA_CODEC_ALC680=y +CONFIG_SND_HDA_CODEC_ALC861=y +CONFIG_SND_HDA_CODEC_ALC861VD=y +CONFIG_SND_HDA_CODEC_ALC880=y +CONFIG_SND_HDA_CODEC_ALC882=y CONFIG_SND_HDA_CODEC_SIGMATEL=y CONFIG_SND_HDA_CODEC_HDMI=y +CONFIG_SND_HDA_CODEC_HDMI_GENERIC=y +CONFIG_SND_HDA_CODEC_HDMI_INTEL=y +CONFIG_SND_HDA_CODEC_HDMI_ATI=y +CONFIG_SND_HDA_CODEC_HDMI_NVIDIA=y CONFIG_SND_HDA_CODEC_CONEXANT=y CONFIG_SND_USB_AUDIO=m CONFIG_SND_SOC=m diff --git a/arch/loongarch/include/asm/hugetlb.h b/arch/loongarch/include/asm/hugetlb.h index 4dc4b3e04225..ab68b594f889 100644 --- a/arch/loongarch/include/asm/hugetlb.h +++ b/arch/loongarch/include/asm/hugetlb.h @@ -10,20 +10,6 @@ uint64_t pmd_to_entrylo(unsigned long pmd_val); -#define __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE -static inline int prepare_hugepage_range(struct file *file, - unsigned long addr, - unsigned long len) -{ - unsigned long task_size = STACK_TOP; - - if (len > task_size) - return -ENOMEM; - if (task_size - len < addr) - return -EINVAL; - return 0; -} - #define __HAVE_ARCH_HUGE_PTE_CLEAR static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned long sz) diff --git a/arch/loongarch/include/asm/inst.h b/arch/loongarch/include/asm/inst.h index 3089785ca97e..277d2140676b 100644 --- a/arch/loongarch/include/asm/inst.h +++ b/arch/loongarch/include/asm/inst.h @@ -497,6 +497,7 @@ void arch_simulate_insn(union loongarch_instruction insn, struct pt_regs *regs); int larch_insn_read(void *addr, u32 *insnp); int larch_insn_write(void *addr, u32 insn); int larch_insn_patch_text(void *addr, u32 insn); +int larch_insn_text_copy(void *dst, void *src, size_t len); u32 larch_insn_gen_nop(void); u32 larch_insn_gen_b(unsigned long pc, unsigned long dest); @@ -510,6 +511,8 @@ u32 larch_insn_gen_move(enum loongarch_gpr rd, enum loongarch_gpr rj); u32 larch_insn_gen_lu12iw(enum loongarch_gpr rd, int imm); u32 larch_insn_gen_lu32id(enum loongarch_gpr rd, int imm); u32 larch_insn_gen_lu52id(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm); +u32 larch_insn_gen_beq(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm); +u32 larch_insn_gen_bne(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm); u32 larch_insn_gen_jirl(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm); static inline bool signed_imm_check(long val, unsigned int bit) diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h index a0994d226eff..09dfd7eb406e 100644 --- a/arch/loongarch/include/asm/loongarch.h +++ b/arch/loongarch/include/asm/loongarch.h @@ -451,6 +451,13 @@ #define LOONGARCH_CSR_KS6 0x36 #define LOONGARCH_CSR_KS7 0x37 #define LOONGARCH_CSR_KS8 0x38 +#define LOONGARCH_CSR_KS9 0x39 +#define LOONGARCH_CSR_KS10 0x3a +#define LOONGARCH_CSR_KS11 0x3b +#define LOONGARCH_CSR_KS12 0x3c +#define LOONGARCH_CSR_KS13 0x3d +#define LOONGARCH_CSR_KS14 0x3e +#define LOONGARCH_CSR_KS15 0x3f /* Exception allocated KS0, KS1 and KS2 statically */ #define EXCEPTION_KS0 LOONGARCH_CSR_KS0 diff --git a/arch/loongarch/include/asm/pgtable-bits.h b/arch/loongarch/include/asm/pgtable-bits.h index 7bbfb04a54cc..2fc3789220ac 100644 --- a/arch/loongarch/include/asm/pgtable-bits.h +++ b/arch/loongarch/include/asm/pgtable-bits.h @@ -22,7 +22,6 @@ #define _PAGE_PFN_SHIFT 12 #define _PAGE_SWP_EXCLUSIVE_SHIFT 23 #define _PAGE_PFN_END_SHIFT 48 -#define _PAGE_DEVMAP_SHIFT 59 #define _PAGE_PRESENT_INVALID_SHIFT 60 #define _PAGE_NO_READ_SHIFT 61 #define _PAGE_NO_EXEC_SHIFT 62 @@ -36,7 +35,6 @@ #define _PAGE_MODIFIED (_ULCAST_(1) << _PAGE_MODIFIED_SHIFT) #define _PAGE_PROTNONE (_ULCAST_(1) << _PAGE_PROTNONE_SHIFT) #define _PAGE_SPECIAL (_ULCAST_(1) << _PAGE_SPECIAL_SHIFT) -#define _PAGE_DEVMAP (_ULCAST_(1) << _PAGE_DEVMAP_SHIFT) /* We borrow bit 23 to store the exclusive marker in swap PTEs. */ #define _PAGE_SWP_EXCLUSIVE (_ULCAST_(1) << _PAGE_SWP_EXCLUSIVE_SHIFT) @@ -76,8 +74,8 @@ #define __READABLE (_PAGE_VALID) #define __WRITEABLE (_PAGE_DIRTY | _PAGE_WRITE) -#define _PAGE_CHG_MASK (_PAGE_MODIFIED | _PAGE_SPECIAL | _PAGE_DEVMAP | _PFN_MASK | _CACHE_MASK | _PAGE_PLV) -#define _HPAGE_CHG_MASK (_PAGE_MODIFIED | _PAGE_SPECIAL | _PAGE_DEVMAP | _PFN_MASK | _CACHE_MASK | _PAGE_PLV | _PAGE_HUGE) +#define _PAGE_CHG_MASK (_PAGE_MODIFIED | _PAGE_SPECIAL | _PFN_MASK | _CACHE_MASK | _PAGE_PLV) +#define _HPAGE_CHG_MASK (_PAGE_MODIFIED | _PAGE_SPECIAL | _PFN_MASK | _CACHE_MASK | _PAGE_PLV | _PAGE_HUGE) #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_NO_READ | \ _PAGE_USER | _CACHE_CC) diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h index f2aeff544cee..bd128696e96d 100644 --- a/arch/loongarch/include/asm/pgtable.h +++ b/arch/loongarch/include/asm/pgtable.h @@ -409,9 +409,6 @@ static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; static inline pte_t pte_mkspecial(pte_t pte) { pte_val(pte) |= _PAGE_SPECIAL; return pte; } #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */ -static inline int pte_devmap(pte_t pte) { return !!(pte_val(pte) & _PAGE_DEVMAP); } -static inline pte_t pte_mkdevmap(pte_t pte) { pte_val(pte) |= _PAGE_DEVMAP; return pte; } - #define pte_accessible pte_accessible static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a) { @@ -540,17 +537,6 @@ static inline pmd_t pmd_mkyoung(pmd_t pmd) return pmd; } -static inline int pmd_devmap(pmd_t pmd) -{ - return !!(pmd_val(pmd) & _PAGE_DEVMAP); -} - -static inline pmd_t pmd_mkdevmap(pmd_t pmd) -{ - pmd_val(pmd) |= _PAGE_DEVMAP; - return pmd; -} - static inline struct page *pmd_page(pmd_t pmd) { if (pmd_trans_huge(pmd)) @@ -606,11 +592,6 @@ static inline long pmd_protnone(pmd_t pmd) #define pmd_leaf(pmd) ((pmd_val(pmd) & _PAGE_HUGE) != 0) #define pud_leaf(pud) ((pud_val(pud) & _PAGE_HUGE) != 0) -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -#define pud_devmap(pud) (0) -#define pgd_devmap(pgd) (0) -#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ - /* * We provide our own get_unmapped area to cope with the virtual aliasing * constraints placed on us by the cache architecture. diff --git a/arch/loongarch/kernel/env.c b/arch/loongarch/kernel/env.c index 27144de5c5fe..c0a5dc9aeae2 100644 --- a/arch/loongarch/kernel/env.c +++ b/arch/loongarch/kernel/env.c @@ -39,16 +39,19 @@ void __init init_environ(void) static int __init init_cpu_fullname(void) { - struct device_node *root; int cpu, ret; - char *model; + char *cpuname; + const char *model; + struct device_node *root; /* Parsing cpuname from DTS model property */ root = of_find_node_by_path("/"); - ret = of_property_read_string(root, "model", (const char **)&model); + ret = of_property_read_string(root, "model", &model); + if (ret == 0) { + cpuname = kstrdup(model, GFP_KERNEL); + loongson_sysconf.cpuname = strsep(&cpuname, " "); + } of_node_put(root); - if (ret == 0) - loongson_sysconf.cpuname = strsep(&model, " "); if (loongson_sysconf.cpuname && !strncmp(loongson_sysconf.cpuname, "Loongson", 8)) { for (cpu = 0; cpu < NR_CPUS; cpu++) diff --git a/arch/loongarch/kernel/inst.c b/arch/loongarch/kernel/inst.c index 14d7d700bcb9..72ecfed29d55 100644 --- a/arch/loongarch/kernel/inst.c +++ b/arch/loongarch/kernel/inst.c @@ -4,6 +4,8 @@ */ #include <linux/sizes.h> #include <linux/uaccess.h> +#include <linux/set_memory.h> +#include <linux/stop_machine.h> #include <asm/cacheflush.h> #include <asm/inst.h> @@ -218,6 +220,50 @@ int larch_insn_patch_text(void *addr, u32 insn) return ret; } +struct insn_copy { + void *dst; + void *src; + size_t len; + unsigned int cpu; +}; + +static int text_copy_cb(void *data) +{ + int ret = 0; + struct insn_copy *copy = data; + + if (smp_processor_id() == copy->cpu) { + ret = copy_to_kernel_nofault(copy->dst, copy->src, copy->len); + if (ret) + pr_err("%s: operation failed\n", __func__); + } + + flush_icache_range((unsigned long)copy->dst, (unsigned long)copy->dst + copy->len); + + return ret; +} + +int larch_insn_text_copy(void *dst, void *src, size_t len) +{ + int ret = 0; + size_t start, end; + struct insn_copy copy = { + .dst = dst, + .src = src, + .len = len, + .cpu = smp_processor_id(), + }; + + start = round_down((size_t)dst, PAGE_SIZE); + end = round_up((size_t)dst + len, PAGE_SIZE); + + set_memory_rw(start, (end - start) / PAGE_SIZE); + ret = stop_machine(text_copy_cb, ©, cpu_online_mask); + set_memory_rox(start, (end - start) / PAGE_SIZE); + + return ret; +} + u32 larch_insn_gen_nop(void) { return INSN_NOP; @@ -323,6 +369,34 @@ u32 larch_insn_gen_lu52id(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm) return insn.word; } +u32 larch_insn_gen_beq(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm) +{ + union loongarch_instruction insn; + + if ((imm & 3) || imm < -SZ_128K || imm >= SZ_128K) { + pr_warn("The generated beq instruction is out of range.\n"); + return INSN_BREAK; + } + + emit_beq(&insn, rj, rd, imm >> 2); + + return insn.word; +} + +u32 larch_insn_gen_bne(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm) +{ + union loongarch_instruction insn; + + if ((imm & 3) || imm < -SZ_128K || imm >= SZ_128K) { + pr_warn("The generated bne instruction is out of range.\n"); + return INSN_BREAK; + } + + emit_bne(&insn, rj, rd, imm >> 2); + + return insn.word; +} + u32 larch_insn_gen_jirl(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm) { union loongarch_instruction insn; diff --git a/arch/loongarch/kernel/relocate_kernel.S b/arch/loongarch/kernel/relocate_kernel.S index 84e6de2fd973..8b5140ac9ea1 100644 --- a/arch/loongarch/kernel/relocate_kernel.S +++ b/arch/loongarch/kernel/relocate_kernel.S @@ -109,4 +109,4 @@ SYM_CODE_END(kexec_smp_wait) relocate_new_kernel_end: .section ".data" -SYM_DATA(relocate_new_kernel_size, .long relocate_new_kernel_end - relocate_new_kernel) +SYM_DATA(relocate_new_kernel_size, .quad relocate_new_kernel_end - relocate_new_kernel) diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c index b99fbb388fe0..075b79b2c1d3 100644 --- a/arch/loongarch/kernel/setup.c +++ b/arch/loongarch/kernel/setup.c @@ -191,6 +191,16 @@ static int __init early_parse_mem(char *p) return -EINVAL; } + start = 0; + size = memparse(p, &p); + if (*p == '@') /* Every mem=... should contain '@' */ + start = memparse(p + 1, &p); + else { /* Only one mem=... is allowed if no '@' */ + usermem = 1; + memblock_enforce_memory_limit(size); + return 0; + } + /* * If a user specifies memory size, we * blow away any automatically generated @@ -201,14 +211,6 @@ static int __init early_parse_mem(char *p) memblock_remove(memblock_start_of_DRAM(), memblock_end_of_DRAM() - memblock_start_of_DRAM()); } - start = 0; - size = memparse(p, &p); - if (*p == '@') - start = memparse(p + 1, &p); - else { - pr_err("Invalid format!\n"); - return -EINVAL; - } if (!IS_ENABLED(CONFIG_NUMA)) memblock_add(start, size); @@ -265,7 +267,7 @@ static void __init arch_reserve_crashkernel(void) return; ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), - &crash_size, &crash_base, &low_size, &high); + &crash_size, &crash_base, &low_size, NULL, &high); if (ret) return; diff --git a/arch/loongarch/kernel/unwind_orc.c b/arch/loongarch/kernel/unwind_orc.c index 0005be49b056..0d5fa64a2225 100644 --- a/arch/loongarch/kernel/unwind_orc.c +++ b/arch/loongarch/kernel/unwind_orc.c @@ -508,7 +508,7 @@ bool unwind_next_frame(struct unwind_state *state) state->pc = bt_address(pc); if (!state->pc) { - pr_err("cannot find unwind pc at %pK\n", (void *)pc); + pr_err("cannot find unwind pc at %p\n", (void *)pc); goto err; } diff --git a/arch/loongarch/mm/pageattr.c b/arch/loongarch/mm/pageattr.c index 99165903908a..f5e910b68229 100644 --- a/arch/loongarch/mm/pageattr.c +++ b/arch/loongarch/mm/pageattr.c @@ -118,7 +118,7 @@ static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask, pgp return 0; mmap_write_lock(&init_mm); - ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL, &masks); + ret = walk_kernel_page_table_range(start, end, &pageattr_ops, NULL, &masks); mmap_write_unlock(&init_mm); flush_tlb_kernel_range(start, end); diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c index fa1500d4aa3e..abfdb6bb5c38 100644 --- a/arch/loongarch/net/bpf_jit.c +++ b/arch/loongarch/net/bpf_jit.c @@ -4,13 +4,20 @@ * * Copyright (C) 2022 Loongson Technology Corporation Limited */ +#include <linux/memory.h> #include "bpf_jit.h" -#define REG_TCC LOONGARCH_GPR_A6 -#define TCC_SAVED LOONGARCH_GPR_S5 +#define LOONGARCH_MAX_REG_ARGS 8 + +#define LOONGARCH_LONG_JUMP_NINSNS 5 +#define LOONGARCH_LONG_JUMP_NBYTES (LOONGARCH_LONG_JUMP_NINSNS * 4) -#define SAVE_RA BIT(0) -#define SAVE_TCC BIT(1) +#define LOONGARCH_FENTRY_NINSNS 2 +#define LOONGARCH_FENTRY_NBYTES (LOONGARCH_FENTRY_NINSNS * 4) +#define LOONGARCH_BPF_FENTRY_NBYTES (LOONGARCH_LONG_JUMP_NINSNS * 4) + +#define REG_TCC LOONGARCH_GPR_A6 +#define BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack) (round_up(stack, 16) - 80) static const int regmap[] = { /* return value from in-kernel function, and exit value for eBPF program */ @@ -32,32 +39,57 @@ static const int regmap[] = { [BPF_REG_AX] = LOONGARCH_GPR_T0, }; -static void mark_call(struct jit_ctx *ctx) +static void prepare_bpf_tail_call_cnt(struct jit_ctx *ctx, int *store_offset) { - ctx->flags |= SAVE_RA; -} + const struct bpf_prog *prog = ctx->prog; + const bool is_main_prog = !bpf_is_subprog(prog); -static void mark_tail_call(struct jit_ctx *ctx) -{ - ctx->flags |= SAVE_TCC; -} + if (is_main_prog) { + /* + * LOONGARCH_GPR_T3 = MAX_TAIL_CALL_CNT + * if (REG_TCC > T3 ) + * std REG_TCC -> LOONGARCH_GPR_SP + store_offset + * else + * std REG_TCC -> LOONGARCH_GPR_SP + store_offset + * REG_TCC = LOONGARCH_GPR_SP + store_offset + * + * std REG_TCC -> LOONGARCH_GPR_SP + store_offset + * + * The purpose of this code is to first push the TCC into stack, + * and then push the address of TCC into stack. + * In cases where bpf2bpf and tailcall are used in combination, + * the value in REG_TCC may be a count or an address, + * these two cases need to be judged and handled separately. + */ + emit_insn(ctx, addid, LOONGARCH_GPR_T3, LOONGARCH_GPR_ZERO, MAX_TAIL_CALL_CNT); + *store_offset -= sizeof(long); -static bool seen_call(struct jit_ctx *ctx) -{ - return (ctx->flags & SAVE_RA); -} + emit_cond_jmp(ctx, BPF_JGT, REG_TCC, LOONGARCH_GPR_T3, 4); -static bool seen_tail_call(struct jit_ctx *ctx) -{ - return (ctx->flags & SAVE_TCC); -} + /* + * If REG_TCC < MAX_TAIL_CALL_CNT, the value in REG_TCC is a count, + * push tcc into stack + */ + emit_insn(ctx, std, REG_TCC, LOONGARCH_GPR_SP, *store_offset); -static u8 tail_call_reg(struct jit_ctx *ctx) -{ - if (seen_call(ctx)) - return TCC_SAVED; + /* Push the address of TCC into the REG_TCC */ + emit_insn(ctx, addid, REG_TCC, LOONGARCH_GPR_SP, *store_offset); + + emit_uncond_jmp(ctx, 2); + + /* + * If REG_TCC > MAX_TAIL_CALL_CNT, the value in REG_TCC is an address, + * push tcc_ptr into stack + */ + emit_insn(ctx, std, REG_TCC, LOONGARCH_GPR_SP, *store_offset); + } else { + *store_offset -= sizeof(long); + emit_insn(ctx, std, REG_TCC, LOONGARCH_GPR_SP, *store_offset); + } - return REG_TCC; + /* Push tcc_ptr into stack */ + *store_offset -= sizeof(long); + emit_insn(ctx, std, REG_TCC, LOONGARCH_GPR_SP, *store_offset); } /* @@ -80,6 +112,10 @@ static u8 tail_call_reg(struct jit_ctx *ctx) * | $s4 | * +-------------------------+ * | $s5 | + * +-------------------------+ + * | tcc | + * +-------------------------+ + * | tcc_ptr | * +-------------------------+ <--BPF_REG_FP * | prog->aux->stack_depth | * | (optional) | @@ -88,22 +124,32 @@ static u8 tail_call_reg(struct jit_ctx *ctx) */ static void build_prologue(struct jit_ctx *ctx) { - int stack_adjust = 0, store_offset, bpf_stack_adjust; + int i, stack_adjust = 0, store_offset, bpf_stack_adjust; + const struct bpf_prog *prog = ctx->prog; + const bool is_main_prog = !bpf_is_subprog(prog); bpf_stack_adjust = round_up(ctx->prog->aux->stack_depth, 16); - /* To store ra, fp, s0, s1, s2, s3, s4 and s5. */ + /* To store ra, fp, s0, s1, s2, s3, s4, s5 */ stack_adjust += sizeof(long) * 8; + /* To store tcc and tcc_ptr */ + stack_adjust += sizeof(long) * 2; + stack_adjust = round_up(stack_adjust, 16); stack_adjust += bpf_stack_adjust; + /* Reserve space for the move_imm + jirl instruction */ + for (i = 0; i < LOONGARCH_LONG_JUMP_NINSNS; i++) + emit_insn(ctx, nop); + /* - * First instruction initializes the tail call count (TCC). - * On tail call we skip this instruction, and the TCC is - * passed in REG_TCC from the caller. + * First instruction initializes the tail call count (TCC) + * register to zero. On tail call we skip this instruction, + * and the TCC is passed in REG_TCC from the caller. */ - emit_insn(ctx, addid, REG_TCC, LOONGARCH_GPR_ZERO, MAX_TAIL_CALL_CNT); + if (is_main_prog) + emit_insn(ctx, addid, REG_TCC, LOONGARCH_GPR_ZERO, 0); emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, -stack_adjust); @@ -131,20 +177,13 @@ static void build_prologue(struct jit_ctx *ctx) store_offset -= sizeof(long); emit_insn(ctx, std, LOONGARCH_GPR_S5, LOONGARCH_GPR_SP, store_offset); + prepare_bpf_tail_call_cnt(ctx, &store_offset); + emit_insn(ctx, addid, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_adjust); if (bpf_stack_adjust) emit_insn(ctx, addid, regmap[BPF_REG_FP], LOONGARCH_GPR_SP, bpf_stack_adjust); - /* - * Program contains calls and tail calls, so REG_TCC need - * to be saved across calls. - */ - if (seen_tail_call(ctx) && seen_call(ctx)) - move_reg(ctx, TCC_SAVED, REG_TCC); - else - emit_insn(ctx, nop); - ctx->stack_size = stack_adjust; } @@ -177,6 +216,16 @@ static void __build_epilogue(struct jit_ctx *ctx, bool is_tail_call) load_offset -= sizeof(long); emit_insn(ctx, ldd, LOONGARCH_GPR_S5, LOONGARCH_GPR_SP, load_offset); + /* + * When push into the stack, follow the order of tcc then tcc_ptr. + * When pop from the stack, first pop tcc_ptr then followed by tcc. + */ + load_offset -= 2 * sizeof(long); + emit_insn(ctx, ldd, REG_TCC, LOONGARCH_GPR_SP, load_offset); + + load_offset += sizeof(long); + emit_insn(ctx, ldd, REG_TCC, LOONGARCH_GPR_SP, load_offset); + emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, stack_adjust); if (!is_tail_call) { @@ -189,7 +238,7 @@ static void __build_epilogue(struct jit_ctx *ctx, bool is_tail_call) * Call the next bpf prog and skip the first instruction * of TCC initialization. */ - emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_T3, 1); + emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_T3, 6); } } @@ -208,12 +257,10 @@ bool bpf_jit_supports_far_kfunc_call(void) return true; } -/* initialized on the first pass of build_body() */ -static int out_offset = -1; -static int emit_bpf_tail_call(struct jit_ctx *ctx) +static int emit_bpf_tail_call(struct jit_ctx *ctx, int insn) { - int off; - u8 tcc = tail_call_reg(ctx); + int off, tc_ninsn = 0; + int tcc_ptr_off = BPF_TAIL_CALL_CNT_PTR_STACK_OFF(ctx->stack_size); u8 a1 = LOONGARCH_GPR_A1; u8 a2 = LOONGARCH_GPR_A2; u8 t1 = LOONGARCH_GPR_T1; @@ -222,7 +269,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) const int idx0 = ctx->idx; #define cur_offset (ctx->idx - idx0) -#define jmp_offset (out_offset - (cur_offset)) +#define jmp_offset (tc_ninsn - (cur_offset)) /* * a0: &ctx @@ -232,6 +279,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) * if (index >= array->map.max_entries) * goto out; */ + tc_ninsn = insn ? ctx->offset[insn+1] - ctx->offset[insn] : ctx->offset[0]; off = offsetof(struct bpf_array, map.max_entries); emit_insn(ctx, ldwu, t1, a1, off); /* bgeu $a2, $t1, jmp_offset */ @@ -239,11 +287,15 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) goto toofar; /* - * if (--TCC < 0) - * goto out; + * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT) + * goto out; */ - emit_insn(ctx, addid, REG_TCC, tcc, -1); - if (emit_tailcall_jmp(ctx, BPF_JSLT, REG_TCC, LOONGARCH_GPR_ZERO, jmp_offset) < 0) + emit_insn(ctx, ldd, REG_TCC, LOONGARCH_GPR_SP, tcc_ptr_off); + emit_insn(ctx, ldd, t3, REG_TCC, 0); + emit_insn(ctx, addid, t3, t3, 1); + emit_insn(ctx, std, t3, REG_TCC, 0); + emit_insn(ctx, addid, t2, LOONGARCH_GPR_ZERO, MAX_TAIL_CALL_CNT); + if (emit_tailcall_jmp(ctx, BPF_JSGT, t3, t2, jmp_offset) < 0) goto toofar; /* @@ -263,15 +315,6 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) emit_insn(ctx, ldd, t3, t2, off); __build_epilogue(ctx, true); - /* out: */ - if (out_offset == -1) - out_offset = cur_offset; - if (cur_offset != out_offset) { - pr_err_once("tail_call out_offset = %d, expected %d!\n", - cur_offset, out_offset); - return -1; - } - return 0; toofar: @@ -463,7 +506,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext u64 func_addr; bool func_addr_fixed, sign_extend; int i = insn - ctx->prog->insnsi; - int ret, jmp_offset; + int ret, jmp_offset, tcc_ptr_off; const u8 code = insn->code; const u8 cond = BPF_OP(code); const u8 t1 = LOONGARCH_GPR_T1; @@ -899,12 +942,16 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext /* function call */ case BPF_JMP | BPF_CALL: - mark_call(ctx); ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass, &func_addr, &func_addr_fixed); if (ret < 0) return ret; + if (insn->src_reg == BPF_PSEUDO_CALL) { + tcc_ptr_off = BPF_TAIL_CALL_CNT_PTR_STACK_OFF(ctx->stack_size); + emit_insn(ctx, ldd, REG_TCC, LOONGARCH_GPR_SP, tcc_ptr_off); + } + move_addr(ctx, t1, func_addr); emit_insn(ctx, jirl, LOONGARCH_GPR_RA, t1, 0); @@ -915,8 +962,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext /* tail call */ case BPF_JMP | BPF_TAIL_CALL: - mark_tail_call(ctx); - if (emit_bpf_tail_call(ctx) < 0) + if (emit_bpf_tail_call(ctx, i) < 0) return -EINVAL; break; @@ -1180,12 +1226,528 @@ static int validate_code(struct jit_ctx *ctx) return -1; } + return 0; +} + +static int validate_ctx(struct jit_ctx *ctx) +{ + if (validate_code(ctx)) + return -1; + if (WARN_ON_ONCE(ctx->num_exentries != ctx->prog->aux->num_exentries)) return -1; return 0; } +static int emit_jump_and_link(struct jit_ctx *ctx, u8 rd, u64 target) +{ + if (!target) { + pr_err("bpf_jit: jump target address is error\n"); + return -EFAULT; + } + + move_imm(ctx, LOONGARCH_GPR_T1, target, false); + emit_insn(ctx, jirl, rd, LOONGARCH_GPR_T1, 0); + + return 0; +} + +static int emit_jump_or_nops(void *target, void *ip, u32 *insns, bool is_call) +{ + int i; + struct jit_ctx ctx; + + ctx.idx = 0; + ctx.image = (union loongarch_instruction *)insns; + + if (!target) { + for (i = 0; i < LOONGARCH_LONG_JUMP_NINSNS; i++) + emit_insn((&ctx), nop); + return 0; + } + + return emit_jump_and_link(&ctx, is_call ? LOONGARCH_GPR_T0 : LOONGARCH_GPR_ZERO, (u64)target); +} + +static int emit_call(struct jit_ctx *ctx, u64 addr) +{ + return emit_jump_and_link(ctx, LOONGARCH_GPR_RA, addr); +} + +void *bpf_arch_text_copy(void *dst, void *src, size_t len) +{ + int ret; + + mutex_lock(&text_mutex); + ret = larch_insn_text_copy(dst, src, len); + mutex_unlock(&text_mutex); + + return ret ? ERR_PTR(-EINVAL) : dst; +} + +int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type, + void *old_addr, void *new_addr) +{ + int ret; + bool is_call = (poke_type == BPF_MOD_CALL); + u32 old_insns[LOONGARCH_LONG_JUMP_NINSNS] = {[0 ... 4] = INSN_NOP}; + u32 new_insns[LOONGARCH_LONG_JUMP_NINSNS] = {[0 ... 4] = INSN_NOP}; + + if (!is_kernel_text((unsigned long)ip) && + !is_bpf_text_address((unsigned long)ip)) + return -ENOTSUPP; + + ret = emit_jump_or_nops(old_addr, ip, old_insns, is_call); + if (ret) + return ret; + + if (memcmp(ip, old_insns, LOONGARCH_LONG_JUMP_NBYTES)) + return -EFAULT; + + ret = emit_jump_or_nops(new_addr, ip, new_insns, is_call); + if (ret) + return ret; + + mutex_lock(&text_mutex); + if (memcmp(ip, new_insns, LOONGARCH_LONG_JUMP_NBYTES)) + ret = larch_insn_text_copy(ip, new_insns, LOONGARCH_LONG_JUMP_NBYTES); + mutex_unlock(&text_mutex); + + return ret; +} + +int bpf_arch_text_invalidate(void *dst, size_t len) +{ + int i; + int ret = 0; + u32 *inst; + + inst = kvmalloc(len, GFP_KERNEL); + if (!inst) + return -ENOMEM; + + for (i = 0; i < (len / sizeof(u32)); i++) + inst[i] = INSN_BREAK; + + mutex_lock(&text_mutex); + if (larch_insn_text_copy(dst, inst, len)) + ret = -EINVAL; + mutex_unlock(&text_mutex); + + kvfree(inst); + + return ret; +} + +static void store_args(struct jit_ctx *ctx, int nargs, int args_off) +{ + int i; + + for (i = 0; i < nargs; i++) { + emit_insn(ctx, std, LOONGARCH_GPR_A0 + i, LOONGARCH_GPR_FP, -args_off); + args_off -= 8; + } +} + +static void restore_args(struct jit_ctx *ctx, int nargs, int args_off) +{ + int i; + + for (i = 0; i < nargs; i++) { + emit_insn(ctx, ldd, LOONGARCH_GPR_A0 + i, LOONGARCH_GPR_FP, -args_off); + args_off -= 8; + } +} + +static int invoke_bpf_prog(struct jit_ctx *ctx, struct bpf_tramp_link *l, + int args_off, int retval_off, int run_ctx_off, bool save_ret) +{ + int ret; + u32 *branch; + struct bpf_prog *p = l->link.prog; + int cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie); + + if (l->cookie) { + move_imm(ctx, LOONGARCH_GPR_T1, l->cookie, false); + emit_insn(ctx, std, LOONGARCH_GPR_T1, LOONGARCH_GPR_FP, -run_ctx_off + cookie_off); + } else { + emit_insn(ctx, std, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_FP, -run_ctx_off + cookie_off); + } + + /* arg1: prog */ + move_imm(ctx, LOONGARCH_GPR_A0, (const s64)p, false); + /* arg2: &run_ctx */ + emit_insn(ctx, addid, LOONGARCH_GPR_A1, LOONGARCH_GPR_FP, -run_ctx_off); + ret = emit_call(ctx, (const u64)bpf_trampoline_enter(p)); + if (ret) + return ret; + + /* store prog start time */ + move_reg(ctx, LOONGARCH_GPR_S1, LOONGARCH_GPR_A0); + + /* + * if (__bpf_prog_enter(prog) == 0) + * goto skip_exec_of_prog; + */ + branch = (u32 *)ctx->image + ctx->idx; + /* nop reserved for conditional jump */ + emit_insn(ctx, nop); + + /* arg1: &args_off */ + emit_insn(ctx, addid, LOONGARCH_GPR_A0, LOONGARCH_GPR_FP, -args_off); + if (!p->jited) + move_imm(ctx, LOONGARCH_GPR_A1, (const s64)p->insnsi, false); + ret = emit_call(ctx, (const u64)p->bpf_func); + if (ret) + return ret; + + if (save_ret) { + emit_insn(ctx, std, LOONGARCH_GPR_A0, LOONGARCH_GPR_FP, -retval_off); + emit_insn(ctx, std, regmap[BPF_REG_0], LOONGARCH_GPR_FP, -(retval_off - 8)); + } + + /* update branch with beqz */ + if (ctx->image) { + int offset = (void *)(&ctx->image[ctx->idx]) - (void *)branch; + *branch = larch_insn_gen_beq(LOONGARCH_GPR_A0, LOONGARCH_GPR_ZERO, offset); + } + + /* arg1: prog */ + move_imm(ctx, LOONGARCH_GPR_A0, (const s64)p, false); + /* arg2: prog start time */ + move_reg(ctx, LOONGARCH_GPR_A1, LOONGARCH_GPR_S1); + /* arg3: &run_ctx */ + emit_insn(ctx, addid, LOONGARCH_GPR_A2, LOONGARCH_GPR_FP, -run_ctx_off); + ret = emit_call(ctx, (const u64)bpf_trampoline_exit(p)); + + return ret; +} + +static void invoke_bpf_mod_ret(struct jit_ctx *ctx, struct bpf_tramp_links *tl, + int args_off, int retval_off, int run_ctx_off, u32 **branches) +{ + int i; + + emit_insn(ctx, std, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_FP, -retval_off); + for (i = 0; i < tl->nr_links; i++) { + invoke_bpf_prog(ctx, tl->links[i], args_off, retval_off, run_ctx_off, true); + emit_insn(ctx, ldd, LOONGARCH_GPR_T1, LOONGARCH_GPR_FP, -retval_off); + branches[i] = (u32 *)ctx->image + ctx->idx; + emit_insn(ctx, nop); + } +} + +void *arch_alloc_bpf_trampoline(unsigned int size) +{ + return bpf_prog_pack_alloc(size, jit_fill_hole); +} + +void arch_free_bpf_trampoline(void *image, unsigned int size) +{ + bpf_prog_pack_free(image, size); +} + +static int __arch_prepare_bpf_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im, + const struct btf_func_model *m, struct bpf_tramp_links *tlinks, + void *func_addr, u32 flags) +{ + int i, ret, save_ret; + int stack_size = 0, nargs = 0; + int retval_off, args_off, nargs_off, ip_off, run_ctx_off, sreg_off, tcc_ptr_off; + bool is_struct_ops = flags & BPF_TRAMP_F_INDIRECT; + void *orig_call = func_addr; + struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY]; + struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT]; + struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN]; + u32 **branches = NULL; + + if (flags & (BPF_TRAMP_F_ORIG_STACK | BPF_TRAMP_F_SHARE_IPMODIFY)) + return -ENOTSUPP; + + /* + * FP + 8 [ RA to parent func ] return address to parent + * function + * FP + 0 [ FP of parent func ] frame pointer of parent + * function + * FP - 8 [ T0 to traced func ] return address of traced + * function + * FP - 16 [ FP of traced func ] frame pointer of traced + * function + * + * FP - retval_off [ return value ] BPF_TRAMP_F_CALL_ORIG or + * BPF_TRAMP_F_RET_FENTRY_RET + * [ argN ] + * [ ... ] + * FP - args_off [ arg1 ] + * + * FP - nargs_off [ regs count ] + * + * FP - ip_off [ traced func ] BPF_TRAMP_F_IP_ARG + * + * FP - run_ctx_off [ bpf_tramp_run_ctx ] + * + * FP - sreg_off [ callee saved reg ] + * + * FP - tcc_ptr_off [ tail_call_cnt_ptr ] + */ + + if (m->nr_args > LOONGARCH_MAX_REG_ARGS) + return -ENOTSUPP; + + if (flags & (BPF_TRAMP_F_ORIG_STACK | BPF_TRAMP_F_SHARE_IPMODIFY)) + return -ENOTSUPP; + + stack_size = 0; + + /* Room of trampoline frame to store return address and frame pointer */ + stack_size += 16; + + save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET); + if (save_ret) { + /* Save BPF R0 and A0 */ + stack_size += 16; + retval_off = stack_size; + } + + /* Room of trampoline frame to store args */ + nargs = m->nr_args; + stack_size += nargs * 8; + args_off = stack_size; + + /* Room of trampoline frame to store args number */ + stack_size += 8; + nargs_off = stack_size; + + /* Room of trampoline frame to store ip address */ + if (flags & BPF_TRAMP_F_IP_ARG) { + stack_size += 8; + ip_off = stack_size; + } + + /* Room of trampoline frame to store struct bpf_tramp_run_ctx */ + stack_size += round_up(sizeof(struct bpf_tramp_run_ctx), 8); + run_ctx_off = stack_size; + + stack_size += 8; + sreg_off = stack_size; + + /* Room of trampoline frame to store tail_call_cnt_ptr */ + if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) { + stack_size += 8; + tcc_ptr_off = stack_size; + } + + stack_size = round_up(stack_size, 16); + + if (is_struct_ops) { + /* + * For the trampoline called directly, just handle + * the frame of trampoline. + */ + emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, -stack_size); + emit_insn(ctx, std, LOONGARCH_GPR_RA, LOONGARCH_GPR_SP, stack_size - 8); + emit_insn(ctx, std, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_size - 16); + emit_insn(ctx, addid, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_size); + } else { + /* + * For the trampoline called from function entry, + * the frame of traced function and the frame of + * trampoline need to be considered. + */ + /* RA and FP for parent function */ + emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, -16); + emit_insn(ctx, std, LOONGARCH_GPR_RA, LOONGARCH_GPR_SP, 8); + emit_insn(ctx, std, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, 0); + emit_insn(ctx, addid, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, 16); + + /* RA and FP for traced function */ + emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, -stack_size); + emit_insn(ctx, std, LOONGARCH_GPR_T0, LOONGARCH_GPR_SP, stack_size - 8); + emit_insn(ctx, std, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_size - 16); + emit_insn(ctx, addid, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_size); + } + + if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) + emit_insn(ctx, std, REG_TCC, LOONGARCH_GPR_FP, -tcc_ptr_off); + + /* callee saved register S1 to pass start time */ + emit_insn(ctx, std, LOONGARCH_GPR_S1, LOONGARCH_GPR_FP, -sreg_off); + + /* store ip address of the traced function */ + if (flags & BPF_TRAMP_F_IP_ARG) { + move_imm(ctx, LOONGARCH_GPR_T1, (const s64)func_addr, false); + emit_insn(ctx, std, LOONGARCH_GPR_T1, LOONGARCH_GPR_FP, -ip_off); + } + + /* store nargs number */ + move_imm(ctx, LOONGARCH_GPR_T1, nargs, false); + emit_insn(ctx, std, LOONGARCH_GPR_T1, LOONGARCH_GPR_FP, -nargs_off); + + store_args(ctx, nargs, args_off); + + /* To traced function */ + /* Ftrace jump skips 2 NOP instructions */ + if (is_kernel_text((unsigned long)orig_call)) + orig_call += LOONGARCH_FENTRY_NBYTES; + /* Direct jump skips 5 NOP instructions */ + else if (is_bpf_text_address((unsigned long)orig_call)) + orig_call += LOONGARCH_BPF_FENTRY_NBYTES; + + if (flags & BPF_TRAMP_F_CALL_ORIG) { + move_imm(ctx, LOONGARCH_GPR_A0, (const s64)im, false); + ret = emit_call(ctx, (const u64)__bpf_tramp_enter); + if (ret) + return ret; + } + + for (i = 0; i < fentry->nr_links; i++) { + ret = invoke_bpf_prog(ctx, fentry->links[i], args_off, retval_off, + run_ctx_off, flags & BPF_TRAMP_F_RET_FENTRY_RET); + if (ret) + return ret; + } + if (fmod_ret->nr_links) { + branches = kcalloc(fmod_ret->nr_links, sizeof(u32 *), GFP_KERNEL); + if (!branches) + return -ENOMEM; + + invoke_bpf_mod_ret(ctx, fmod_ret, args_off, retval_off, run_ctx_off, branches); + } + + if (flags & BPF_TRAMP_F_CALL_ORIG) { + restore_args(ctx, m->nr_args, args_off); + + if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) + emit_insn(ctx, ldd, REG_TCC, LOONGARCH_GPR_FP, -tcc_ptr_off); + + ret = emit_call(ctx, (const u64)orig_call); + if (ret) + goto out; + emit_insn(ctx, std, LOONGARCH_GPR_A0, LOONGARCH_GPR_FP, -retval_off); + emit_insn(ctx, std, regmap[BPF_REG_0], LOONGARCH_GPR_FP, -(retval_off - 8)); + im->ip_after_call = ctx->ro_image + ctx->idx; + /* Reserve space for the move_imm + jirl instruction */ + for (i = 0; i < LOONGARCH_LONG_JUMP_NINSNS; i++) + emit_insn(ctx, nop); + } + + for (i = 0; ctx->image && i < fmod_ret->nr_links; i++) { + int offset = (void *)(&ctx->image[ctx->idx]) - (void *)branches[i]; + *branches[i] = larch_insn_gen_bne(LOONGARCH_GPR_T1, LOONGARCH_GPR_ZERO, offset); + } + + for (i = 0; i < fexit->nr_links; i++) { + ret = invoke_bpf_prog(ctx, fexit->links[i], args_off, retval_off, run_ctx_off, false); + if (ret) + goto out; + } + + if (flags & BPF_TRAMP_F_CALL_ORIG) { + im->ip_epilogue = ctx->ro_image + ctx->idx; + move_imm(ctx, LOONGARCH_GPR_A0, (const s64)im, false); + ret = emit_call(ctx, (const u64)__bpf_tramp_exit); + if (ret) + goto out; + } + + if (flags & BPF_TRAMP_F_RESTORE_REGS) + restore_args(ctx, m->nr_args, args_off); + + if (save_ret) { + emit_insn(ctx, ldd, LOONGARCH_GPR_A0, LOONGARCH_GPR_FP, -retval_off); + emit_insn(ctx, ldd, regmap[BPF_REG_0], LOONGARCH_GPR_FP, -(retval_off - 8)); + } + + emit_insn(ctx, ldd, LOONGARCH_GPR_S1, LOONGARCH_GPR_FP, -sreg_off); + + if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) + emit_insn(ctx, ldd, REG_TCC, LOONGARCH_GPR_FP, -tcc_ptr_off); + + if (is_struct_ops) { + /* trampoline called directly */ + emit_insn(ctx, ldd, LOONGARCH_GPR_RA, LOONGARCH_GPR_SP, stack_size - 8); + emit_insn(ctx, ldd, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_size - 16); + emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, stack_size); + + emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_RA, 0); + } else { + /* trampoline called from function entry */ + emit_insn(ctx, ldd, LOONGARCH_GPR_T0, LOONGARCH_GPR_SP, stack_size - 8); + emit_insn(ctx, ldd, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_size - 16); + emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, stack_size); + + emit_insn(ctx, ldd, LOONGARCH_GPR_RA, LOONGARCH_GPR_SP, 8); + emit_insn(ctx, ldd, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, 0); + emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, 16); + + if (flags & BPF_TRAMP_F_SKIP_FRAME) + /* return to parent function */ + emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_RA, 0); + else + /* return to traced function */ + emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_T0, 0); + } + + ret = ctx->idx; +out: + kfree(branches); + + return ret; +} + +int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *ro_image, + void *ro_image_end, const struct btf_func_model *m, + u32 flags, struct bpf_tramp_links *tlinks, void *func_addr) +{ + int ret, size; + void *image, *tmp; + struct jit_ctx ctx; + + size = ro_image_end - ro_image; + image = kvmalloc(size, GFP_KERNEL); + if (!image) + return -ENOMEM; + + ctx.image = (union loongarch_instruction *)image; + ctx.ro_image = (union loongarch_instruction *)ro_image; + ctx.idx = 0; + + jit_fill_hole(image, (unsigned int)(ro_image_end - ro_image)); + ret = __arch_prepare_bpf_trampoline(&ctx, im, m, tlinks, func_addr, flags); + if (ret > 0 && validate_code(&ctx) < 0) { + ret = -EINVAL; + goto out; + } + + tmp = bpf_arch_text_copy(ro_image, image, size); + if (IS_ERR(tmp)) { + ret = PTR_ERR(tmp); + goto out; + } + + bpf_flush_icache(ro_image, ro_image_end); +out: + kvfree(image); + return ret < 0 ? ret : size; +} + +int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags, + struct bpf_tramp_links *tlinks, void *func_addr) +{ + int ret; + struct jit_ctx ctx; + struct bpf_tramp_image im; + + ctx.image = NULL; + ctx.idx = 0; + + ret = __arch_prepare_bpf_trampoline(&ctx, &im, m, tlinks, func_addr, flags); + + /* Page align */ + return ret < 0 ? ret : round_up(ret * LOONGARCH_INSN_SIZE, PAGE_SIZE); +} + struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) { bool tmp_blinded = false, extra_pass = false; @@ -1288,7 +1850,7 @@ skip_init_ctx: build_epilogue(&ctx); /* 3. Extra pass to validate JITed code */ - if (validate_code(&ctx)) { + if (validate_ctx(&ctx)) { bpf_jit_binary_free(header); prog = orig_prog; goto out_offset; @@ -1342,7 +1904,6 @@ out: if (tmp_blinded) bpf_jit_prog_release_other(prog, prog == orig_prog ? tmp : orig_prog); - out_offset = -1; return prog; @@ -1354,6 +1915,16 @@ out_free: goto out_offset; } +bool bpf_jit_bypass_spec_v1(void) +{ + return true; +} + +bool bpf_jit_bypass_spec_v4(void) +{ + return true; +} + /* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */ bool bpf_jit_supports_subprog_tailcalls(void) { diff --git a/arch/loongarch/net/bpf_jit.h b/arch/loongarch/net/bpf_jit.h index f9c569f53949..5697158fd164 100644 --- a/arch/loongarch/net/bpf_jit.h +++ b/arch/loongarch/net/bpf_jit.h @@ -18,6 +18,7 @@ struct jit_ctx { u32 *offset; int num_exentries; union loongarch_instruction *image; + union loongarch_instruction *ro_image; u32 stack_size; }; @@ -308,3 +309,8 @@ static inline int emit_tailcall_jmp(struct jit_ctx *ctx, u8 cond, enum loongarch return -EINVAL; } + +static inline void bpf_flush_icache(void *start, void *end) +{ + flush_icache_range((unsigned long)start, (unsigned long)end); +} diff --git a/arch/loongarch/vdso/Makefile b/arch/loongarch/vdso/Makefile index ccd2c5e135c6..d8316f993482 100644 --- a/arch/loongarch/vdso/Makefile +++ b/arch/loongarch/vdso/Makefile @@ -36,7 +36,7 @@ endif # VDSO linker flags. ldflags-y := -Bsymbolic --no-undefined -soname=linux-vdso.so.1 \ - $(filter -E%,$(KBUILD_CFLAGS)) -nostdlib -shared --build-id -T + $(filter -E%,$(KBUILD_CFLAGS)) -shared --build-id -T # # Shared build commands. diff --git a/arch/m68k/coldfire/gpio.c b/arch/m68k/coldfire/gpio.c index 30e5a4ed799d..e2f7af1facb2 100644 --- a/arch/m68k/coldfire/gpio.c +++ b/arch/m68k/coldfire/gpio.c @@ -160,7 +160,7 @@ static struct gpio_chip mcfgpio_chip = { .direction_input = mcfgpio_direction_input, .direction_output = mcfgpio_direction_output, .get = mcfgpio_get_value, - .set_rv = mcfgpio_set_value, + .set = mcfgpio_set_value, .to_irq = mcfgpio_to_irq, .base = 0, .ngpio = MCFGPIO_PIN_MAX, diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index f4a4f82eed98..caf508f6e9ec 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -562,6 +562,7 @@ config MIPS_MALTA select MIPS_L1_CACHE_SHIFT_6 select MIPS_MSC select PCI_GT64XXX_PCI0 + select RTC_MC146818_LIB select SMP_UP if SMP select SWAP_IO_SPACE select SYS_HAS_CPU_MIPS32_R1 @@ -1836,6 +1837,7 @@ config CPU_LOONGSON2EF select CPU_SUPPORTS_64BIT_KERNEL select CPU_SUPPORTS_HIGHMEM select CPU_SUPPORTS_HUGEPAGES + select RTC_MC146818_LIB config CPU_LOONGSON32 bool diff --git a/arch/mips/alchemy/common/gpiolib.c b/arch/mips/alchemy/common/gpiolib.c index 194034eba75f..e79e26ffac99 100644 --- a/arch/mips/alchemy/common/gpiolib.c +++ b/arch/mips/alchemy/common/gpiolib.c @@ -101,7 +101,7 @@ struct gpio_chip alchemy_gpio_chip[] = { .direction_input = gpio1_direction_input, .direction_output = gpio1_direction_output, .get = gpio1_get, - .set_rv = gpio1_set, + .set = gpio1_set, .to_irq = gpio1_to_irq, .base = ALCHEMY_GPIO1_BASE, .ngpio = ALCHEMY_GPIO1_NUM, @@ -111,7 +111,7 @@ struct gpio_chip alchemy_gpio_chip[] = { .direction_input = gpio2_direction_input, .direction_output = gpio2_direction_output, .get = gpio2_get, - .set_rv = gpio2_set, + .set = gpio2_set, .to_irq = gpio2_to_irq, .base = ALCHEMY_GPIO2_BASE, .ngpio = ALCHEMY_GPIO2_NUM, @@ -151,7 +151,7 @@ static struct gpio_chip au1300_gpiochip = { .direction_input = alchemy_gpic_dir_input, .direction_output = alchemy_gpic_dir_output, .get = alchemy_gpic_get, - .set_rv = alchemy_gpic_set, + .set = alchemy_gpic_set, .to_irq = alchemy_gpic_gpio_to_irq, .base = AU1300_GPIO_BASE, .ngpio = AU1300_GPIO_NUM, diff --git a/arch/mips/bcm63xx/gpio.c b/arch/mips/bcm63xx/gpio.c index e7a53cd0dec5..ff45a6989c3a 100644 --- a/arch/mips/bcm63xx/gpio.c +++ b/arch/mips/bcm63xx/gpio.c @@ -131,7 +131,7 @@ static struct gpio_chip bcm63xx_gpio_chip = { .direction_input = bcm63xx_gpio_direction_input, .direction_output = bcm63xx_gpio_direction_output, .get = bcm63xx_gpio_get, - .set_rv = bcm63xx_gpio_set, + .set = bcm63xx_gpio_set, .base = 0, }; diff --git a/arch/mips/boot/Makefile b/arch/mips/boot/Makefile index 196c44fa72d9..8473c4671702 100644 --- a/arch/mips/boot/Makefile +++ b/arch/mips/boot/Makefile @@ -54,10 +54,10 @@ UIMAGE_ENTRYADDR = $(VMLINUX_ENTRY_ADDRESS) # Compressed vmlinux images # -extra-y += vmlinux.bin.bz2 -extra-y += vmlinux.bin.gz -extra-y += vmlinux.bin.lzma -extra-y += vmlinux.bin.lzo +targets += vmlinux.bin.bz2 +targets += vmlinux.bin.gz +targets += vmlinux.bin.lzma +targets += vmlinux.bin.lzo $(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE $(call if_changed,bzip2) diff --git a/arch/mips/boot/dts/mobileye/eyeq5-epm5.dts b/arch/mips/boot/dts/mobileye/eyeq5-epm5.dts index 6898b2d8267d..9fc1a1b0a81b 100644 --- a/arch/mips/boot/dts/mobileye/eyeq5-epm5.dts +++ b/arch/mips/boot/dts/mobileye/eyeq5-epm5.dts @@ -21,3 +21,11 @@ <0x8 0x02000000 0x0 0x7E000000>; }; }; + +&i2c2 { + temperature-sensor@48 { + compatible = "ti,tmp112"; + reg = <0x48>; + label = "U60"; + }; +}; diff --git a/arch/mips/boot/dts/mobileye/eyeq5.dtsi b/arch/mips/boot/dts/mobileye/eyeq5.dtsi index a84e6e720619..36a73e8a63a1 100644 --- a/arch/mips/boot/dts/mobileye/eyeq5.dtsi +++ b/arch/mips/boot/dts/mobileye/eyeq5.dtsi @@ -110,6 +110,81 @@ ranges; compatible = "simple-bus"; + i2c0: i2c@300000 { + compatible = "mobileye,eyeq5-i2c", "arm,primecell"; + reg = <0 0x300000 0x0 0x1000>; + interrupt-parent = <&gic>; + interrupts = <GIC_SHARED 1 IRQ_TYPE_LEVEL_HIGH>; + clock-frequency = <400000>; /* Fast mode */ + #address-cells = <1>; + #size-cells = <0>; + clocks = <&olb 35>, <&olb EQ5C_PER_I2C>; + clock-names = "i2cclk", "apb_pclk"; + resets = <&olb 0 13>; + i2c-transfer-timeout-us = <10000>; + mobileye,olb = <&olb 0>; + }; + + i2c1: i2c@400000 { + compatible = "mobileye,eyeq5-i2c", "arm,primecell"; + reg = <0 0x400000 0x0 0x1000>; + interrupt-parent = <&gic>; + interrupts = <GIC_SHARED 2 IRQ_TYPE_LEVEL_HIGH>; + clock-frequency = <400000>; /* Fast mode */ + #address-cells = <1>; + #size-cells = <0>; + clocks = <&olb 35>, <&olb EQ5C_PER_I2C>; + clock-names = "i2cclk", "apb_pclk"; + resets = <&olb 0 14>; + i2c-transfer-timeout-us = <10000>; + mobileye,olb = <&olb 1>; + }; + + i2c2: i2c@500000 { + compatible = "mobileye,eyeq5-i2c", "arm,primecell"; + reg = <0 0x500000 0x0 0x1000>; + interrupt-parent = <&gic>; + interrupts = <GIC_SHARED 3 IRQ_TYPE_LEVEL_HIGH>; + clock-frequency = <400000>; /* Fast mode */ + #address-cells = <1>; + #size-cells = <0>; + clocks = <&olb 35>, <&olb EQ5C_PER_I2C>; + clock-names = "i2cclk", "apb_pclk"; + resets = <&olb 0 15>; + i2c-transfer-timeout-us = <10000>; + mobileye,olb = <&olb 2>; + }; + + i2c3: i2c@600000 { + compatible = "mobileye,eyeq5-i2c", "arm,primecell"; + reg = <0 0x600000 0x0 0x1000>; + interrupt-parent = <&gic>; + interrupts = <GIC_SHARED 4 IRQ_TYPE_LEVEL_HIGH>; + clock-frequency = <400000>; /* Fast mode */ + #address-cells = <1>; + #size-cells = <0>; + clocks = <&olb 35>, <&olb EQ5C_PER_I2C>; + clock-names = "i2cclk", "apb_pclk"; + resets = <&olb 0 16>; + i2c-transfer-timeout-us = <10000>; + mobileye,olb = <&olb 3>; + }; + + i2c4: i2c@700000 { + compatible = "mobileye,eyeq5-i2c", "arm,primecell"; + reg = <0 0x700000 0x0 0x1000>; + interrupt-parent = <&gic>; + interrupts = <GIC_SHARED 5 IRQ_TYPE_LEVEL_HIGH>; + clock-frequency = <400000>; /* Fast mode */ + #address-cells = <1>; + #size-cells = <0>; + clocks = <&olb 35>, <&olb EQ5C_PER_I2C>; + clock-names = "i2cclk", "apb_pclk"; + resets = <&olb 0 17>; + i2c-transfer-timeout-us = <10000>; + mobileye,olb = <&olb 4>; + }; + uart0: serial@800000 { compatible = "arm,pl011", "arm,primecell"; reg = <0 0x800000 0x0 0x1000>; @@ -178,6 +253,58 @@ clocks = <&olb EQ5C_CPU_CORE0>; }; }; + + emmc: mmc@2200000 { + compatible = "mobileye,eyeq-sd4hc", "cdns,sd4hc"; + reg = <0 0x2200000 0x0 0x1000>; + interrupt-parent = <&gic>; + interrupts = <GIC_SHARED 10 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&olb EQ5C_PER_EMMC>; + bus-width = <8>; + max-frequency = <200000000>; + mmc-ddr-1_8v; + sd-uhs-ddr50; + mmc-hs200-1_8v; + mmc-hs400-1_8v; + mmc-hs400-enhanced-strobe; + + cdns,phy-input-delay-legacy = <4>; + cdns,phy-input-delay-mmc-highspeed = <2>; + cdns,phy-input-delay-mmc-ddr = <3>; + cdns,phy-dll-delay-sdclk = <32>; + cdns,phy-dll-delay-sdclk-hsmmc = <32>; + cdns,phy-dll-delay-strobe = <32>; + }; + + gpio0: gpio@1400000 { + compatible = "mobileye,eyeq5-gpio"; + reg = <0x0 0x1400000 0x0 0x1000>; + gpio-bank = <0>; + ngpios = <29>; + interrupt-parent = <&gic>; + interrupts = <GIC_SHARED 14 IRQ_TYPE_LEVEL_HIGH>; + gpio-controller; + #gpio-cells = <2>; + gpio-ranges = <&olb 0 0 29>; + interrupt-controller; + #interrupt-cells = <2>; + resets = <&olb 0 26>; + }; + + gpio1: gpio@1500000 { + compatible = "mobileye,eyeq5-gpio"; + reg = <0x0 0x1500000 0x0 0x1000>; + gpio-bank = <1>; + ngpios = <23>; + interrupt-parent = <&gic>; + interrupts = <GIC_SHARED 14 IRQ_TYPE_LEVEL_HIGH>; + gpio-controller; + #gpio-cells = <2>; + gpio-ranges = <&olb 0 29 23>; + interrupt-controller; + #interrupt-cells = <2>; + resets = <&olb 0 26>; + }; }; }; diff --git a/arch/mips/boot/dts/mobileye/eyeq6h.dtsi b/arch/mips/boot/dts/mobileye/eyeq6h.dtsi index dabd5ed778b7..5ae939d25ea8 100644 --- a/arch/mips/boot/dts/mobileye/eyeq6h.dtsi +++ b/arch/mips/boot/dts/mobileye/eyeq6h.dtsi @@ -109,6 +109,28 @@ clock-names = "ref"; }; + emmc: mmc@d8010000 { + compatible = "mobileye,eyeq-sd4hc", "cdns,sd4hc"; + reg = <0 0xd8010000 0x0 0x1000>; + interrupt-parent = <&gic>; + interrupts = <GIC_SHARED 91 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&olb_south EQ6HC_SOUTH_DIV_EMMC>; + bus-width = <8>; + max-frequency = <200000000>; + mmc-ddr-1_8v; + sd-uhs-ddr50; + mmc-hs200-1_8v; + mmc-hs400-1_8v; + mmc-hs400-enhanced-strobe; + + cdns,phy-input-delay-legacy = <4>; + cdns,phy-input-delay-mmc-highspeed = <2>; + cdns,phy-input-delay-mmc-ddr = <3>; + cdns,phy-dll-delay-sdclk = <32>; + cdns,phy-dll-delay-sdclk-hsmmc = <32>; + cdns,phy-dll-delay-strobe = <32>; + }; + olb_south: system-controller@d8013000 { compatible = "mobileye,eyeq6h-south-olb", "syscon"; reg = <0x0 0xd8013000 0x0 0x1000>; diff --git a/arch/mips/boot/dts/qca/ar9132.dtsi b/arch/mips/boot/dts/qca/ar9132.dtsi index 61dcfa5b6ca7..c1ca03a27b6c 100644 --- a/arch/mips/boot/dts/qca/ar9132.dtsi +++ b/arch/mips/boot/dts/qca/ar9132.dtsi @@ -156,6 +156,15 @@ #address-cells = <1>; #size-cells = <0>; }; + + wifi: wifi@180c0000 { + compatible = "qca,ar9130-wifi"; + reg = <0x180c0000 0x230000>; + + interrupts = <2>; + + status = "disabled"; + }; }; usb_phy: usb-phy { diff --git a/arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts b/arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts index f894fe17816b..a7901bb040ce 100644 --- a/arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts +++ b/arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts @@ -108,3 +108,7 @@ }; }; }; + +&wifi { + status = "okay"; +}; diff --git a/arch/mips/boot/dts/qca/ar9331.dtsi b/arch/mips/boot/dts/qca/ar9331.dtsi index 768ac0f869b1..6eb84a26a20f 100644 --- a/arch/mips/boot/dts/qca/ar9331.dtsi +++ b/arch/mips/boot/dts/qca/ar9331.dtsi @@ -285,6 +285,15 @@ status = "disabled"; }; + + wifi: wifi@18100000 { + compatible = "qca,ar9330-wifi"; + reg = <0x18100000 0x20000>; + + interrupts = <2>; + + status = "disabled"; + }; }; usb_phy: usb-phy { diff --git a/arch/mips/boot/dts/qca/ar9331_dpt_module.dts b/arch/mips/boot/dts/qca/ar9331_dpt_module.dts index c857cd22f7db..08e728b8ced8 100644 --- a/arch/mips/boot/dts/qca/ar9331_dpt_module.dts +++ b/arch/mips/boot/dts/qca/ar9331_dpt_module.dts @@ -97,3 +97,7 @@ &phy_port4 { status = "okay"; }; + +&wifi { + status = "okay"; +}; diff --git a/arch/mips/boot/dts/qca/ar9331_dragino_ms14.dts b/arch/mips/boot/dts/qca/ar9331_dragino_ms14.dts index 7affa58d4fa6..37a74aabe4b4 100644 --- a/arch/mips/boot/dts/qca/ar9331_dragino_ms14.dts +++ b/arch/mips/boot/dts/qca/ar9331_dragino_ms14.dts @@ -98,3 +98,7 @@ reg = <0>; }; }; + +&wifi { + status = "okay"; +}; diff --git a/arch/mips/boot/dts/qca/ar9331_omega.dts b/arch/mips/boot/dts/qca/ar9331_omega.dts index 8904aa917a6e..1450419024cb 100644 --- a/arch/mips/boot/dts/qca/ar9331_omega.dts +++ b/arch/mips/boot/dts/qca/ar9331_omega.dts @@ -74,3 +74,7 @@ reg = <0>; }; }; + +&wifi { + status = "okay"; +}; diff --git a/arch/mips/boot/dts/qca/ar9331_openembed_som9331_board.dts b/arch/mips/boot/dts/qca/ar9331_openembed_som9331_board.dts index dc65ebd60bbc..5786a827c000 100644 --- a/arch/mips/boot/dts/qca/ar9331_openembed_som9331_board.dts +++ b/arch/mips/boot/dts/qca/ar9331_openembed_som9331_board.dts @@ -106,3 +106,7 @@ &phy_port4 { status = "okay"; }; + +&wifi { + status = "okay"; +}; diff --git a/arch/mips/boot/dts/qca/ar9331_tl_mr3020.dts b/arch/mips/boot/dts/qca/ar9331_tl_mr3020.dts index 10b9759228b7..a7108c803eb3 100644 --- a/arch/mips/boot/dts/qca/ar9331_tl_mr3020.dts +++ b/arch/mips/boot/dts/qca/ar9331_tl_mr3020.dts @@ -114,3 +114,7 @@ reg = <0>; }; }; + +&wifi { + status = "okay"; +}; diff --git a/arch/mips/boot/dts/ralink/gardena_smart_gateway_mt7688.dts b/arch/mips/boot/dts/ralink/gardena_smart_gateway_mt7688.dts index 7743d014631a..0bfb1dde9764 100644 --- a/arch/mips/boot/dts/ralink/gardena_smart_gateway_mt7688.dts +++ b/arch/mips/boot/dts/ralink/gardena_smart_gateway_mt7688.dts @@ -56,7 +56,7 @@ led-power-green { label = "smartgw:power:green"; gpios = <&gpio 19 GPIO_ACTIVE_HIGH>; - default-state = "off"; + linux,default-trigger = "timer"; }; led-power-red { diff --git a/arch/mips/boot/dts/ralink/mt7628a.dtsi b/arch/mips/boot/dts/ralink/mt7628a.dtsi index 0212700c4fb4..5d7a6cfa9e2b 100644 --- a/arch/mips/boot/dts/ralink/mt7628a.dtsi +++ b/arch/mips/boot/dts/ralink/mt7628a.dtsi @@ -33,7 +33,7 @@ #size-cells = <1>; sysc: syscon@0 { - compatible = "ralink,mt7628-sysc", "syscon"; + compatible = "ralink,mt7628-sysc", "ralink,mt7688-sysc", "syscon"; reg = <0x0 0x60>; #clock-cells = <1>; #reset-cells = <1>; @@ -134,13 +134,8 @@ watchdog: watchdog@100 { compatible = "mediatek,mt7621-wdt"; - reg = <0x100 0x30>; - - resets = <&sysc 8>; - reset-names = "wdt"; - - interrupt-parent = <&intc>; - interrupts = <24>; + reg = <0x100 0x100>; + mediatek,sysctl = <&sysc>; status = "disabled"; }; diff --git a/arch/mips/boot/dts/realtek/cameo-rtl9302c-2x-rtl8224-2xge.dts b/arch/mips/boot/dts/realtek/cameo-rtl9302c-2x-rtl8224-2xge.dts index 6789bf374044..6f6a05d4088e 100644 --- a/arch/mips/boot/dts/realtek/cameo-rtl9302c-2x-rtl8224-2xge.dts +++ b/arch/mips/boot/dts/realtek/cameo-rtl9302c-2x-rtl8224-2xge.dts @@ -71,3 +71,99 @@ }; }; }; + +&mdio0 { + /* External RTL8224 */ + phy0: ethernet-phy@0 { + reg = <0>; + compatible = "ethernet-phy-ieee802.3-c45"; + }; + phy1: ethernet-phy@1 { + reg = <1>; + compatible = "ethernet-phy-ieee802.3-c45"; + }; + phy2: ethernet-phy@2 { + reg = <2>; + compatible = "ethernet-phy-ieee802.3-c45"; + }; + phy3: ethernet-phy@3 { + reg = <3>; + compatible = "ethernet-phy-ieee802.3-c45"; + }; +}; + +&mdio1 { + /* External RTL8224 */ + phy4: ethernet-phy@0 { + reg = <0>; + compatible = "ethernet-phy-ieee802.3-c45"; + }; + phy5: ethernet-phy@1 { + reg = <1>; + compatible = "ethernet-phy-ieee802.3-c45"; + }; + phy6: ethernet-phy@2 { + reg = <2>; + compatible = "ethernet-phy-ieee802.3-c45"; + }; + phy7: ethernet-phy@3 { + reg = <3>; + compatible = "ethernet-phy-ieee802.3-c45"; + }; +}; + +&switch0 { + ethernet-ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + phy-handle = <&phy0>; + phy-mode = "usxgmii"; + }; + port@1 { + reg = <1>; + phy-handle = <&phy1>; + phy-mode = "usxgmii"; + }; + port@2 { + reg = <2>; + phy-handle = <&phy2>; + phy-mode = "usxgmii"; + }; + port@3 { + reg = <3>; + phy-handle = <&phy3>; + phy-mode = "usxgmii"; + }; + port@16 { + reg = <16>; + phy-handle = <&phy4>; + phy-mode = "usxgmii"; + }; + port@17 { + reg = <17>; + phy-handle = <&phy5>; + phy-mode = "usxgmii"; + }; + port@18 { + reg = <18>; + phy-handle = <&phy6>; + phy-mode = "usxgmii"; + }; + port@19 { + reg = <19>; + phy-handle = <&phy7>; + phy-mode = "usxgmii"; + }; + port@24{ + reg = <24>; + phy-mode = "10gbase-r"; + }; + port@25{ + reg = <25>; + phy-mode = "10gbase-r"; + }; + }; +}; diff --git a/arch/mips/boot/dts/realtek/rtl930x.dtsi b/arch/mips/boot/dts/realtek/rtl930x.dtsi index 101bab72a95f..24e262e2dc2a 100644 --- a/arch/mips/boot/dts/realtek/rtl930x.dtsi +++ b/arch/mips/boot/dts/realtek/rtl930x.dtsi @@ -48,6 +48,10 @@ #address-cells = <1>; #size-cells = <1>; + interrupt-parent = <&intc>; + interrupts = <23>, <24>; + interrupt-names = "switch", "nic"; + reboot@c { compatible = "syscon-reboot"; reg = <0x0c 0x4>; @@ -138,6 +142,33 @@ clocks = <&lx_clk>; }; + watchdog0: watchdog@3260 { + compatible = "realtek,rtl9300-wdt"; + reg = <0x3260 0xc>; + + realtek,reset-mode = "soc"; + + clocks = <&lx_clk>; + timeout-sec = <30>; + + interrupt-parent = <&intc>; + interrupt-names = "phase1", "phase2"; + interrupts = <5>, <6>; + }; + + gpio0: gpio@3300 { + compatible = "realtek,rtl9300-gpio", "realtek,otto-gpio"; + reg = <0x3300 0x1c>, <0x3338 0x8>; + gpio-controller; + #gpio-cells = <2>; + ngpios = <24>; + + interrupt-controller; + #interrupt-cells = <2>; + interrupt-parent = <&intc>; + interrupts = <13>; + }; + snand: spi@1a400 { compatible = "realtek,rtl9301-snand"; reg = <0x1a400 0x44>; diff --git a/arch/mips/configs/eyeq5_defconfig b/arch/mips/configs/eyeq5_defconfig index ff7af5dc6d9d..6688f56aba1c 100644 --- a/arch/mips/configs/eyeq5_defconfig +++ b/arch/mips/configs/eyeq5_defconfig @@ -19,20 +19,18 @@ CONFIG_SCHED_AUTOGROUP=y CONFIG_BLK_DEV_INITRD=y CONFIG_EXPERT=y CONFIG_EYEQ=y -CONFIG_MACH_EYEQ5=y CONFIG_FIT_IMAGE_FDT_EPM5=y -CONFIG_PAGE_SIZE_16KB=y CONFIG_MIPS_CPS=y CONFIG_CPU_HAS_MSA=y CONFIG_NR_CPUS=16 CONFIG_MIPS_RAW_APPENDED_DTB=y CONFIG_JUMP_LABEL=y +CONFIG_PAGE_SIZE_16KB=y CONFIG_COMPAT_32BIT_TIME=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_TRIM_UNUSED_KSYMS=y # CONFIG_COMPAT_BRK is not set -CONFIG_SPARSEMEM_MANUAL=y CONFIG_USERFAULTFD=y CONFIG_NET=y CONFIG_PACKET=y @@ -64,8 +62,14 @@ CONFIG_CAN_M_CAN=y CONFIG_SERIAL_AMBA_PL011=y CONFIG_SERIAL_AMBA_PL011_CONSOLE=y CONFIG_HW_RANDOM=y +CONFIG_I2C=y +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_NOMADIK=y # CONFIG_PTP_1588_CLOCK is not set CONFIG_PINCTRL=y +CONFIG_GPIOLIB=y +CONFIG_GPIO_NOMADIK=y +CONFIG_SENSORS_LM75=y CONFIG_MFD_SYSCON=y CONFIG_HID_A4TECH=y CONFIG_HID_BELKIN=y @@ -79,6 +83,8 @@ CONFIG_HID_MICROSOFT=y CONFIG_HID_MONTEREY=y CONFIG_MMC=y CONFIG_MMC_SDHCI=y +CONFIG_MMC_SDHCI_PLTFM=y +CONFIG_MMC_SDHCI_CADENCE=y # CONFIG_IOMMU_SUPPORT is not set CONFIG_RESET_CONTROLLER=y # CONFIG_NVMEM is not set diff --git a/arch/mips/configs/eyeq6_defconfig b/arch/mips/configs/eyeq6_defconfig index 0afbb45a78e8..0a00a201937b 100644 --- a/arch/mips/configs/eyeq6_defconfig +++ b/arch/mips/configs/eyeq6_defconfig @@ -82,6 +82,8 @@ CONFIG_HID_MICROSOFT=y CONFIG_HID_MONTEREY=y CONFIG_MMC=y CONFIG_MMC_SDHCI=y +CONFIG_MMC_SDHCI_PLTFM=y +CONFIG_MMC_SDHCI_CADENCE=y # CONFIG_IOMMU_SUPPORT is not set CONFIG_RESET_CONTROLLER=y # CONFIG_NVMEM is not set diff --git a/arch/mips/configs/loongson2k_defconfig b/arch/mips/configs/loongson2k_defconfig index 6aea6a5b1b66..0cc665d3ea34 100644 --- a/arch/mips/configs/loongson2k_defconfig +++ b/arch/mips/configs/loongson2k_defconfig @@ -256,6 +256,17 @@ CONFIG_SND_HDA_INTEL=y CONFIG_SND_HDA_HWDEP=y CONFIG_SND_HDA_PATCH_LOADER=y CONFIG_SND_HDA_CODEC_REALTEK=y +CONFIG_SND_HDA_CODEC_REALTEK_LIB=y +CONFIG_SND_HDA_CODEC_ALC260=y +CONFIG_SND_HDA_CODEC_ALC262=y +CONFIG_SND_HDA_CODEC_ALC268=y +CONFIG_SND_HDA_CODEC_ALC269=y +CONFIG_SND_HDA_CODEC_ALC662=y +CONFIG_SND_HDA_CODEC_ALC680=y +CONFIG_SND_HDA_CODEC_ALC861=y +CONFIG_SND_HDA_CODEC_ALC861VD=y +CONFIG_SND_HDA_CODEC_ALC880=y +CONFIG_SND_HDA_CODEC_ALC882=y CONFIG_SND_HDA_CODEC_ANALOG=y CONFIG_SND_HDA_CODEC_SIGMATEL=y CONFIG_SND_HDA_CODEC_VIA=y diff --git a/arch/mips/configs/loongson3_defconfig b/arch/mips/configs/loongson3_defconfig index b00344e0d899..240efff37d98 100644 --- a/arch/mips/configs/loongson3_defconfig +++ b/arch/mips/configs/loongson3_defconfig @@ -291,11 +291,24 @@ CONFIG_SND_SEQ_DUMMY=m # CONFIG_SND_ISA is not set CONFIG_SND_HDA_INTEL=m CONFIG_SND_HDA_PATCH_LOADER=y -CONFIG_SND_HDA_CODEC_REALTEK=y +CONFIG_SND_HDA_CODEC_REALTEK=m CONFIG_SND_HDA_CODEC_REALTEK_LIB=m +CONFIG_SND_HDA_CODEC_ALC260=m +CONFIG_SND_HDA_CODEC_ALC262=m +CONFIG_SND_HDA_CODEC_ALC268=m CONFIG_SND_HDA_CODEC_ALC269=m +CONFIG_SND_HDA_CODEC_ALC662=m +CONFIG_SND_HDA_CODEC_ALC680=m +CONFIG_SND_HDA_CODEC_ALC861=m +CONFIG_SND_HDA_CODEC_ALC861VD=m +CONFIG_SND_HDA_CODEC_ALC880=m +CONFIG_SND_HDA_CODEC_ALC882=m CONFIG_SND_HDA_CODEC_SIGMATEL=m CONFIG_SND_HDA_CODEC_HDMI=m +CONFIG_SND_HDA_CODEC_HDMI_GENERIC=m +CONFIG_SND_HDA_CODEC_HDMI_INTEL=m +CONFIG_SND_HDA_CODEC_HDMI_ATI=m +CONFIG_SND_HDA_CODEC_HDMI_NVIDIA=m CONFIG_SND_HDA_CODEC_CONEXANT=m # CONFIG_SND_USB is not set CONFIG_HIDRAW=y diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h index a600670d00e9..fd60837ce50b 100644 --- a/arch/mips/include/asm/cpu-info.h +++ b/arch/mips/include/asm/cpu-info.h @@ -123,6 +123,7 @@ extern struct cpuinfo_mips cpu_data[]; extern void cpu_probe(void); extern void cpu_report(void); +extern void cpu_disable_mmid(void); extern const char *__cpu_name[]; #define cpu_name_string() __cpu_name[raw_smp_processor_id()] diff --git a/arch/mips/include/asm/hugetlb.h b/arch/mips/include/asm/hugetlb.h index fbc71ddcf0f6..8c460ce01ffe 100644 --- a/arch/mips/include/asm/hugetlb.h +++ b/arch/mips/include/asm/hugetlb.h @@ -11,20 +11,6 @@ #include <asm/page.h> -#define __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE -static inline int prepare_hugepage_range(struct file *file, - unsigned long addr, - unsigned long len) -{ - unsigned long task_size = STACK_TOP; - - if (len > task_size) - return -ENOMEM; - if (task_size - len < addr) - return -EINVAL; - return 0; -} - #define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep, diff --git a/arch/mips/include/asm/mach-generic/mc146818rtc.h b/arch/mips/include/asm/mach-generic/mc146818rtc.h index 9c72e540ff56..249279b0494d 100644 --- a/arch/mips/include/asm/mach-generic/mc146818rtc.h +++ b/arch/mips/include/asm/mach-generic/mc146818rtc.h @@ -29,8 +29,4 @@ static inline void CMOS_WRITE(unsigned char data, unsigned long addr) #define RTC_ALWAYS_BCD 0 -#ifndef mc146818_decode_year -#define mc146818_decode_year(year) ((year) < 70 ? (year) + 2000 : (year) + 1900) -#endif - #endif /* __ASM_MACH_GENERIC_MC146818RTC_H */ diff --git a/arch/mips/include/asm/mach-ip30/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ip30/cpu-feature-overrides.h index ce4e4c6e09e2..50d487a4c95e 100644 --- a/arch/mips/include/asm/mach-ip30/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-ip30/cpu-feature-overrides.h @@ -5,7 +5,7 @@ * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org> * 2004-2007 Stanislaw Skowronek <skylark@unaligned.org> * 2009 Johannes Dickgreber <tanzy@gmx.de> - * 2015 Joshua Kinard <kumba@gentoo.org> + * 2015 Joshua Kinard <linux@kumba.dev> * */ #ifndef __ASM_MACH_IP30_CPU_FEATURE_OVERRIDES_H diff --git a/arch/mips/include/asm/mach-ip30/spaces.h b/arch/mips/include/asm/mach-ip30/spaces.h index c8a302dfbe05..d381b93d6ad3 100644 --- a/arch/mips/include/asm/mach-ip30/spaces.h +++ b/arch/mips/include/asm/mach-ip30/spaces.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright (C) 2016 Joshua Kinard <kumba@gentoo.org> + * Copyright (C) 2016 Joshua Kinard <linux@kumba.dev> * */ #ifndef _ASM_MACH_IP30_SPACES_H diff --git a/arch/mips/include/asm/mach-jazz/mc146818rtc.h b/arch/mips/include/asm/mach-jazz/mc146818rtc.h index 987f727afe25..639bff8ebca3 100644 --- a/arch/mips/include/asm/mach-jazz/mc146818rtc.h +++ b/arch/mips/include/asm/mach-jazz/mc146818rtc.h @@ -33,6 +33,4 @@ static inline void CMOS_WRITE(unsigned char data, unsigned long addr) #define RTC_ALWAYS_BCD 0 -#define mc146818_decode_year(year) ((year) + 1980) - #endif /* __ASM_MACH_JAZZ_MC146818RTC_H */ diff --git a/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h b/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h index c2e0acb755cd..dd9f621d0204 100644 --- a/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h +++ b/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h @@ -99,5 +99,8 @@ extern __iomem void *ltq_cgu_membase; extern void ltq_pmu_enable(unsigned int module); extern void ltq_pmu_disable(unsigned int module); +/* VMMC */ +extern unsigned int *ltq_get_cp1_base(void); + #endif /* CONFIG_SOC_TYPE_XWAY */ #endif /* _LTQ_XWAY_H__ */ diff --git a/arch/mips/include/asm/mach-malta/mc146818rtc.h b/arch/mips/include/asm/mach-malta/mc146818rtc.h index e8cc7fdf7415..7da2c0ea55da 100644 --- a/arch/mips/include/asm/mach-malta/mc146818rtc.h +++ b/arch/mips/include/asm/mach-malta/mc146818rtc.h @@ -31,6 +31,4 @@ static inline void CMOS_WRITE(unsigned char data, unsigned long addr) #define RTC_ALWAYS_BCD 0 -#define mc146818_decode_year(year) ((year) < 70 ? (year) + 2000 : (year) + 1900) - #endif /* __ASM_MACH_MALTA_MC146818RTC_H */ diff --git a/arch/mips/include/asm/mach-rm/mc146818rtc.h b/arch/mips/include/asm/mach-rm/mc146818rtc.h deleted file mode 100644 index a074f4f84f75..000000000000 --- a/arch/mips/include/asm/mach-rm/mc146818rtc.h +++ /dev/null @@ -1,21 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2004 by Ralf Baechle - * - * RTC routines for PC style attached Dallas chip with ARC epoch. - */ -#ifndef __ASM_MACH_RM_MC146818RTC_H -#define __ASM_MACH_RM_MC146818RTC_H - -#ifdef CONFIG_CPU_BIG_ENDIAN -#define mc146818_decode_year(year) ((year) < 70 ? (year) + 2000 : (year) + 1900) -#else -#define mc146818_decode_year(year) ((year) + 1980) -#endif - -#include <asm/mach-generic/mc146818rtc.h> - -#endif /* __ASM_MACH_RM_MC146818RTC_H */ diff --git a/arch/mips/include/asm/mc146818-time.h b/arch/mips/include/asm/mc146818-time.h index cbf5cec345f1..ac52a30b4161 100644 --- a/arch/mips/include/asm/mc146818-time.h +++ b/arch/mips/include/asm/mc146818-time.h @@ -8,112 +8,21 @@ #ifndef __ASM_MC146818_TIME_H #define __ASM_MC146818_TIME_H -#include <linux/bcd.h> #include <linux/mc146818rtc.h> #include <linux/time.h> -/* - * For check timing call set_rtc_mmss() 500ms; used in timer interrupt. - */ -#define USEC_AFTER 500000 -#define USEC_BEFORE 500000 - -/* - * In order to set the CMOS clock precisely, set_rtc_mmss has to be - * called 500 ms after the second nowtime has started, because when - * nowtime is written into the registers of the CMOS clock, it will - * jump to the next second precisely 500 ms later. Check the Motorola - * MC146818A or Dallas DS12887 data sheet for details. - * - * BUG: This routine does not handle hour overflow properly; it just - * sets the minutes. Usually you'll only notice that after reboot! - */ -static inline int mc146818_set_rtc_mmss(unsigned long nowtime) -{ - int real_seconds, real_minutes, cmos_minutes; - unsigned char save_control, save_freq_select; - int retval = 0; - unsigned long flags; - - spin_lock_irqsave(&rtc_lock, flags); - save_control = CMOS_READ(RTC_CONTROL); /* tell the clock it's being set */ - CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); - - save_freq_select = CMOS_READ(RTC_FREQ_SELECT); /* stop and reset prescaler */ - CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); - - cmos_minutes = CMOS_READ(RTC_MINUTES); - if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) - cmos_minutes = bcd2bin(cmos_minutes); - - /* - * since we're only adjusting minutes and seconds, - * don't interfere with hour overflow. This avoids - * messing with unknown time zones but requires your - * RTC not to be off by more than 15 minutes - */ - real_seconds = nowtime % 60; - real_minutes = nowtime / 60; - if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1) - real_minutes += 30; /* correct for half hour time zone */ - real_minutes %= 60; - - if (abs(real_minutes - cmos_minutes) < 30) { - if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { - real_seconds = bin2bcd(real_seconds); - real_minutes = bin2bcd(real_minutes); - } - CMOS_WRITE(real_seconds, RTC_SECONDS); - CMOS_WRITE(real_minutes, RTC_MINUTES); - } else { - printk_once(KERN_NOTICE - "set_rtc_mmss: can't update from %d to %d\n", - cmos_minutes, real_minutes); - retval = -1; - } - - /* The following flags have to be released exactly in this order, - * otherwise the DS12887 (popular MC146818A clone with integrated - * battery and quartz) will not reset the oscillator and will not - * update precisely 500 ms later. You won't find this mentioned in - * the Dallas Semiconductor data sheets, but who believes data - * sheets anyway ... -- Markus Kuhn - */ - CMOS_WRITE(save_control, RTC_CONTROL); - CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); - spin_unlock_irqrestore(&rtc_lock, flags); - - return retval; -} - +#ifdef CONFIG_RTC_MC146818_LIB static inline time64_t mc146818_get_cmos_time(void) { - unsigned int year, mon, day, hour, min, sec; - unsigned long flags; - - spin_lock_irqsave(&rtc_lock, flags); - - do { - sec = CMOS_READ(RTC_SECONDS); - min = CMOS_READ(RTC_MINUTES); - hour = CMOS_READ(RTC_HOURS); - day = CMOS_READ(RTC_DAY_OF_MONTH); - mon = CMOS_READ(RTC_MONTH); - year = CMOS_READ(RTC_YEAR); - } while (sec != CMOS_READ(RTC_SECONDS)); + struct rtc_time tm; - if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { - sec = bcd2bin(sec); - min = bcd2bin(min); - hour = bcd2bin(hour); - day = bcd2bin(day); - mon = bcd2bin(mon); - year = bcd2bin(year); + if (mc146818_get_time(&tm, 1000)) { + pr_err("Unable to read current time from RTC\n"); + return 0; } - spin_unlock_irqrestore(&rtc_lock, flags); - year = mc146818_decode_year(year); - return mktime64(year, mon, day, hour, min, sec); + return rtc_tm_to_time64(&tm); } +#endif /* CONFIG_RTC_MC146818_LIB */ #endif /* __ASM_MC146818_TIME_H */ diff --git a/arch/mips/include/asm/mips-cps.h b/arch/mips/include/asm/mips-cps.h index 917009b80e69..1fffd47a4564 100644 --- a/arch/mips/include/asm/mips-cps.h +++ b/arch/mips/include/asm/mips-cps.h @@ -258,6 +258,8 @@ static inline bool mips_cps_multicluster_cpus(void) /** * mips_cps_first_online_in_cluster() - Detect if CPU is first online in cluster + * @first_cpu: The first other online CPU in cluster, or nr_cpu_ids if + * the function returns true. * * Determine whether the local CPU is the first to be brought online in its * cluster - that is, whether there are any other online CPUs in the local @@ -265,6 +267,6 @@ static inline bool mips_cps_multicluster_cpus(void) * * Returns true if this CPU is first online, else false. */ -extern unsigned int mips_cps_first_online_in_cluster(void); +extern unsigned int mips_cps_first_online_in_cluster(int *first_cpu); #endif /* __MIPS_ASM_MIPS_CPS_H__ */ diff --git a/arch/mips/include/asm/sgi/heart.h b/arch/mips/include/asm/sgi/heart.h index 0d03751955c4..c224c2e3575a 100644 --- a/arch/mips/include/asm/sgi/heart.h +++ b/arch/mips/include/asm/sgi/heart.h @@ -4,7 +4,7 @@ * * Copyright (C) 2004-2007 Stanislaw Skowronek <skylark@unaligned.org> * 2009 Johannes Dickgreber <tanzy@gmx.de> - * 2007-2015 Joshua Kinard <kumba@gentoo.org> + * 2007-2015 Joshua Kinard <linux@kumba.dev> */ #ifndef __ASM_SGI_HEART_H #define __ASM_SGI_HEART_H diff --git a/arch/mips/include/asm/smp-cps.h b/arch/mips/include/asm/smp-cps.h index 10d3ebd890cb..88cfae5d22c8 100644 --- a/arch/mips/include/asm/smp-cps.h +++ b/arch/mips/include/asm/smp-cps.h @@ -24,6 +24,7 @@ struct core_boot_config { struct cluster_boot_config { unsigned long *core_power; + struct cpumask cpumask; struct core_boot_config *core_config; }; diff --git a/arch/mips/include/asm/vpe.h b/arch/mips/include/asm/vpe.h index 61fd4d0aeda4..c0769dc4b853 100644 --- a/arch/mips/include/asm/vpe.h +++ b/arch/mips/include/asm/vpe.h @@ -119,4 +119,12 @@ void cleanup_tc(struct tc *tc); int __init vpe_module_init(void); void __exit vpe_module_exit(void); + +#ifdef CONFIG_MIPS_VPE_LOADER_MT +void *vpe_alloc(void); +int vpe_start(void *vpe, unsigned long start); +int vpe_stop(void *vpe); +int vpe_free(void *vpe); +#endif /* CONFIG_MIPS_VPE_LOADER_MT */ + #endif /* _ASM_VPE_H */ diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index af7412549e6e..04dc9ab55524 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -9,6 +9,7 @@ */ #include <linux/init.h> #include <linux/kernel.h> +#include <linux/mmu_context.h> #include <linux/ptrace.h> #include <linux/smp.h> #include <linux/stddef.h> @@ -37,6 +38,8 @@ unsigned int elf_hwcap __read_mostly; EXPORT_SYMBOL_GPL(elf_hwcap); +static bool mmid_disabled_quirk; + static inline unsigned long cpu_get_msa_id(void) { unsigned long status, msa_id; @@ -645,7 +648,7 @@ static inline unsigned int decode_config5(struct cpuinfo_mips *c) config5 &= ~(MIPS_CONF5_UFR | MIPS_CONF5_UFE); if (cpu_has_mips_r6) { - if (!__builtin_constant_p(cpu_has_mmid) || cpu_has_mmid) + if (!mmid_disabled_quirk && (!__builtin_constant_p(cpu_has_mmid) || cpu_has_mmid)) config5 |= MIPS_CONF5_MI; else config5 &= ~MIPS_CONF5_MI; @@ -708,7 +711,6 @@ static inline unsigned int decode_config5(struct cpuinfo_mips *c) max_mmid_width); asid_mask = GENMASK(max_mmid_width - 1, 0); } - set_cpu_asid_mask(c, asid_mask); } } @@ -2046,3 +2048,39 @@ void cpu_set_vpe_id(struct cpuinfo_mips *cpuinfo, unsigned int vpe) cpuinfo->globalnumber &= ~MIPS_GLOBALNUMBER_VP; cpuinfo->globalnumber |= vpe << MIPS_GLOBALNUMBER_VP_SHF; } + +void cpu_disable_mmid(void) +{ + int i; + unsigned long asid_mask; + unsigned int cpu = smp_processor_id(); + struct cpuinfo_mips *c = ¤t_cpu_data; + unsigned int config4 = read_c0_config4(); + unsigned int config5 = read_c0_config5(); + + /* Setup the initial ASID mask based on config4 */ + asid_mask = MIPS_ENTRYHI_ASID; + if (config4 & MIPS_CONF4_AE) + asid_mask |= MIPS_ENTRYHI_ASIDX; + set_cpu_asid_mask(c, asid_mask); + + /* Disable MMID in the C0 and update cpuinfo_mips accordingly */ + config5 &= ~(MIPS_CONF5_UFR | MIPS_CONF5_UFE); + config5 &= ~MIPS_CONF5_MI; + write_c0_config5(config5); + /* Ensure the write to config5 above takes effect */ + back_to_back_c0_hazard(); + c->options &= ~MIPS_CPU_MMID; + + /* Setup asid cache value cleared in per_cpu_trap_init() */ + cpu_data[cpu].asid_cache = asid_first_version(cpu); + + /* Reinit context for each CPU */ + for_each_possible_cpu(i) + set_cpu_context(i, &init_mm, 0); + + /* Ensure that now MMID will be seen as disable */ + mmid_disabled_quirk = true; + + pr_info("MMID support disabled due to hardware support issue\n"); +} diff --git a/arch/mips/kernel/gpio_txx9.c b/arch/mips/kernel/gpio_txx9.c index 027fb57d0d79..96ac40d20c23 100644 --- a/arch/mips/kernel/gpio_txx9.c +++ b/arch/mips/kernel/gpio_txx9.c @@ -70,7 +70,7 @@ static int txx9_gpio_dir_out(struct gpio_chip *chip, unsigned int offset, static struct gpio_chip txx9_gpio_chip = { .get = txx9_gpio_get, - .set_rv = txx9_gpio_set, + .set = txx9_gpio_set, .direction_input = txx9_gpio_dir_in, .direction_output = txx9_gpio_dir_out, .label = "TXx9", diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c index 43cb1e20baed..7c9c5dc38823 100644 --- a/arch/mips/kernel/mips-cm.c +++ b/arch/mips/kernel/mips-cm.c @@ -10,6 +10,7 @@ #include <linux/spinlock.h> #include <asm/mips-cps.h> +#include <asm/smp-cps.h> #include <asm/mipsregs.h> void __iomem *mips_gcr_base; @@ -248,6 +249,11 @@ void mips_cm_update_property(void) return; pr_info("HCI (Hardware Cache Init for the L2 cache) in GCR_L2_RAM_CONFIG from the CM3 is broken"); mips_cm_is_l2_hci_broken = true; + + /* Disable MMID only if it was configured */ + if (cpu_has_mmid) + cpu_disable_mmid(); + of_node_put(cm_node); } @@ -529,39 +535,23 @@ void mips_cm_error_report(void) write_gcr_error_cause(cm_error); } -unsigned int mips_cps_first_online_in_cluster(void) +unsigned int mips_cps_first_online_in_cluster(int *first_cpu) { - unsigned int local_cl; - int i; - - local_cl = cpu_cluster(¤t_cpu_data); + unsigned int local_cl = cpu_cluster(¤t_cpu_data); + struct cpumask *local_cl_mask; /* - * We rely upon knowledge that CPUs are numbered sequentially by - * cluster - ie. CPUs 0..X will be in cluster 0, CPUs X+1..Y in cluster - * 1, CPUs Y+1..Z in cluster 2 etc. This means that CPUs in the same - * cluster will immediately precede or follow one another. - * - * First we scan backwards, until we find an online CPU in the cluster - * or we move on to another cluster. + * mips_cps_cluster_bootcfg is allocated in cps_prepare_cpus. If it is + * not yet done, then we are so early that only one CPU is running, so + * it is the first online CPU in the cluster. */ - for (i = smp_processor_id() - 1; i >= 0; i--) { - if (cpu_cluster(&cpu_data[i]) != local_cl) - break; - if (!cpu_online(i)) - continue; - return false; - } - - /* Then do the same for higher numbered CPUs */ - for (i = smp_processor_id() + 1; i < nr_cpu_ids; i++) { - if (cpu_cluster(&cpu_data[i]) != local_cl) - break; - if (!cpu_online(i)) - continue; - return false; - } - - /* We found no online CPUs in the local cluster */ - return true; + if (IS_ENABLED(CONFIG_MIPS_CPS) && mips_cps_cluster_bootcfg) + local_cl_mask = &mips_cps_cluster_bootcfg[local_cl].cpumask; + else + return true; + + *first_cpu = cpumask_any_and_but(local_cl_mask, + cpu_online_mask, + smp_processor_id()); + return (*first_cpu >= nr_cpu_ids); } diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index b630604c577f..02aa6a04a21d 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -690,18 +690,20 @@ unsigned long mips_stack_top(void) } /* Space for the VDSO, data page & GIC user page */ - top -= PAGE_ALIGN(current->thread.abi->vdso->size); - top -= PAGE_SIZE; - top -= mips_gic_present() ? PAGE_SIZE : 0; + if (current->thread.abi) { + top -= PAGE_ALIGN(current->thread.abi->vdso->size); + top -= PAGE_SIZE; + top -= mips_gic_present() ? PAGE_SIZE : 0; + + /* Space to randomize the VDSO base */ + if (current->flags & PF_RANDOMIZE) + top -= VDSO_RANDOMIZE_SIZE; + } /* Space for cache colour alignment */ if (cpu_has_dc_aliases) top -= shm_align_mask + 1; - /* Space to randomize the VDSO base */ - if (current->flags & PF_RANDOMIZE) - top -= VDSO_RANDOMIZE_SIZE; - return top; } diff --git a/arch/mips/kernel/relocate.c b/arch/mips/kernel/relocate.c index cda7983e7c18..7f1c136ad850 100644 --- a/arch/mips/kernel/relocate.c +++ b/arch/mips/kernel/relocate.c @@ -138,7 +138,7 @@ static int __init reloc_handler(u32 type, u32 *loc_orig, u32 *loc_new, apply_r_mips_hi16_rel(loc_orig, loc_new, offset); break; default: - pr_err("Unhandled relocation type %d at 0x%pK\n", type, + pr_err("Unhandled relocation type %d at 0x%p\n", type, loc_orig); return -ENOEXEC; } @@ -439,10 +439,10 @@ static void show_kernel_relocation(const char *level) { if (__kaslr_offset > 0) { printk(level); - pr_cont("Kernel relocated by 0x%pK\n", (void *)__kaslr_offset); - pr_cont(" .text @ 0x%pK\n", _text); - pr_cont(" .data @ 0x%pK\n", _sdata); - pr_cont(" .bss @ 0x%pK\n", __bss_start); + pr_cont("Kernel relocated by 0x%p\n", (void *)__kaslr_offset); + pr_cont(" .text @ 0x%p\n", _text); + pr_cont(" .data @ 0x%p\n", _sdata); + pr_cont(" .bss @ 0x%p\n", __bss_start); } } diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index fbfe0771317e..11b9b6b63e19 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -458,7 +458,7 @@ static void __init mips_parse_crashkernel(void) total_mem = memblock_phys_mem_size(); ret = parse_crashkernel(boot_command_line, total_mem, &crash_size, &crash_base, - NULL, NULL); + NULL, NULL, NULL); if (ret != 0 || crash_size <= 0) return; diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c index 7b0e69af4097..22d4f9ff3ae2 100644 --- a/arch/mips/kernel/smp-cps.c +++ b/arch/mips/kernel/smp-cps.c @@ -281,9 +281,20 @@ static void __init cps_smp_setup(void) #endif /* CONFIG_MIPS_MT_FPAFF */ } +unsigned long calibrate_delay_is_known(void) +{ + int first_cpu_cluster = 0; + + /* The calibration has to be done on the primary CPU of the cluster */ + if (mips_cps_first_online_in_cluster(&first_cpu_cluster)) + return 0; + + return cpu_data[first_cpu_cluster].udelay_val; +} + static void __init cps_prepare_cpus(unsigned int max_cpus) { - unsigned int nclusters, ncores, core_vpes, c, cl, cca; + unsigned int nclusters, ncores, core_vpes, nvpe = 0, c, cl, cca; bool cca_unsuitable, cores_limited; struct cluster_boot_config *cluster_bootcfg; struct core_boot_config *core_bootcfg; @@ -356,10 +367,13 @@ static void __init cps_prepare_cpus(unsigned int max_cpus) /* Allocate VPE boot configuration structs */ for (c = 0; c < ncores; c++) { + int v; core_vpes = core_vpe_count(cl, c); core_bootcfg[c].vpe_config = kcalloc(core_vpes, sizeof(*core_bootcfg[c].vpe_config), GFP_KERNEL); + for (v = 0; v < core_vpes; v++) + cpumask_set_cpu(nvpe++, &mips_cps_cluster_bootcfg[cl].cpumask); if (!core_bootcfg[c].vpe_config) goto err_out; } diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index cef3c423a41a..a75587018f44 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -315,7 +315,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) * we allocate is out of range, just give up now. */ if (!cpu_has_ebase_wg && virt_to_phys(gebase) >= 0x20000000) { - kvm_err("CP0_EBase.WG required for guest exception base %pK\n", + kvm_err("CP0_EBase.WG required for guest exception base %p\n", gebase); err = -ENOMEM; goto out_free_gebase; diff --git a/arch/mips/lantiq/falcon/prom.c b/arch/mips/lantiq/falcon/prom.c index 7b98def106e4..2a38c4267685 100644 --- a/arch/mips/lantiq/falcon/prom.c +++ b/arch/mips/lantiq/falcon/prom.c @@ -36,14 +36,14 @@ #define BOOT_NVEC (BOOT_REG_BASE | 0x04) #define BOOT_EVEC (BOOT_REG_BASE | 0x08) -void __init ltq_soc_nmi_setup(void) +static void __init ltq_soc_nmi_setup(void) { extern void (*nmi_handler)(void); ltq_w32((unsigned long)&nmi_handler, (void *)BOOT_NVEC); } -void __init ltq_soc_ejtag_setup(void) +static void __init ltq_soc_ejtag_setup(void) { extern void (*ejtag_debug_handler)(void); diff --git a/arch/mips/lantiq/falcon/sysctrl.c b/arch/mips/lantiq/falcon/sysctrl.c index 1187729d8cbb..577e6e6309a6 100644 --- a/arch/mips/lantiq/falcon/sysctrl.c +++ b/arch/mips/lantiq/falcon/sysctrl.c @@ -14,6 +14,7 @@ #include <lantiq_soc.h> #include "../clk.h" +#include "../prom.h" /* infrastructure control register */ #define SYS1_INFRAC 0x00bc @@ -72,11 +73,6 @@ static void __iomem *sysctl_membase[3], *status_membase; void __iomem *ltq_sys1_membase, *ltq_ebu_membase; -void falcon_trigger_hrst(int level) -{ - sysctl_w32(SYSCTL_SYS1, level & 1, SYS1_HRSTOUTC); -} - static inline void sysctl_wait(struct clk *clk, unsigned int test, unsigned int reg) { @@ -214,19 +210,16 @@ void __init ltq_soc_init(void) of_node_put(np_syseth); of_node_put(np_sysgpe); - if ((request_mem_region(res_status.start, resource_size(&res_status), - res_status.name) < 0) || - (request_mem_region(res_ebu.start, resource_size(&res_ebu), - res_ebu.name) < 0) || - (request_mem_region(res_sys[0].start, - resource_size(&res_sys[0]), - res_sys[0].name) < 0) || - (request_mem_region(res_sys[1].start, - resource_size(&res_sys[1]), - res_sys[1].name) < 0) || - (request_mem_region(res_sys[2].start, - resource_size(&res_sys[2]), - res_sys[2].name) < 0)) + if ((!request_mem_region(res_status.start, resource_size(&res_status), + res_status.name)) || + (!request_mem_region(res_ebu.start, resource_size(&res_ebu), + res_ebu.name)) || + (!request_mem_region(res_sys[0].start, resource_size(&res_sys[0]), + res_sys[0].name)) || + (!request_mem_region(res_sys[1].start, resource_size(&res_sys[1]), + res_sys[1].name)) || + (!request_mem_region(res_sys[2].start, resource_size(&res_sys[2]), + res_sys[2].name))) pr_err("Failed to request core resources"); status_membase = ioremap(res_status.start, diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c index a112573b6e37..961c55933a6d 100644 --- a/arch/mips/lantiq/irq.c +++ b/arch/mips/lantiq/irq.c @@ -16,6 +16,7 @@ #include <asm/bootinfo.h> #include <asm/irq_cpu.h> +#include <asm/time.h> #include <lantiq_soc.h> #include <irq.h> @@ -335,7 +336,8 @@ static const struct irq_domain_ops irq_domain_ops = { .map = icu_map, }; -int __init icu_of_init(struct device_node *node, struct device_node *parent) +static int __init +icu_of_init(struct device_node *node, struct device_node *parent) { struct device_node *eiu_node; struct resource res; diff --git a/arch/mips/lantiq/xway/clk.c b/arch/mips/lantiq/xway/clk.c index 47ad21430fe2..39fb3ecdd6b7 100644 --- a/arch/mips/lantiq/xway/clk.c +++ b/arch/mips/lantiq/xway/clk.c @@ -74,7 +74,7 @@ unsigned long ltq_danube_pp32_hz(void) return clk; } -unsigned long ltq_ar9_sys_hz(void) +static unsigned long ltq_ar9_sys_hz(void) { if (((ltq_cgu_r32(CGU_SYS) >> 3) & 0x3) == 0x2) return CLOCK_393M; diff --git a/arch/mips/lantiq/xway/dcdc.c b/arch/mips/lantiq/xway/dcdc.c index 4a808f8c5beb..b79c462fd48a 100644 --- a/arch/mips/lantiq/xway/dcdc.c +++ b/arch/mips/lantiq/xway/dcdc.c @@ -46,7 +46,7 @@ static struct platform_driver dcdc_driver = { }, }; -int __init dcdc_init(void) +static int __init dcdc_init(void) { int ret = platform_driver_register(&dcdc_driver); diff --git a/arch/mips/lantiq/xway/dma.c b/arch/mips/lantiq/xway/dma.c index 934ac72937e5..4693eba6c296 100644 --- a/arch/mips/lantiq/xway/dma.c +++ b/arch/mips/lantiq/xway/dma.c @@ -289,7 +289,7 @@ static struct platform_driver dma_driver = { }, }; -int __init +static int __init dma_init(void) { return platform_driver_register(&dma_driver); diff --git a/arch/mips/lantiq/xway/gptu.c b/arch/mips/lantiq/xway/gptu.c index 8d52001301de..484c9e3000c1 100644 --- a/arch/mips/lantiq/xway/gptu.c +++ b/arch/mips/lantiq/xway/gptu.c @@ -194,7 +194,7 @@ static struct platform_driver dma_driver = { }, }; -int __init gptu_init(void) +static int __init gptu_init(void) { int ret = platform_driver_register(&dma_driver); diff --git a/arch/mips/loongson64/setup.c b/arch/mips/loongson64/setup.c index 257038e18779..b3e590eae952 100644 --- a/arch/mips/loongson64/setup.c +++ b/arch/mips/loongson64/setup.c @@ -3,7 +3,6 @@ * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology * Author: Fuxin Zhang, zhangfx@lemote.com */ -#include <linux/export.h> #include <linux/init.h> #include <asm/bootinfo.h> diff --git a/arch/mips/mm/physaddr.c b/arch/mips/mm/physaddr.c index f9b8c85e9843..a6b1bf82057a 100644 --- a/arch/mips/mm/physaddr.c +++ b/arch/mips/mm/physaddr.c @@ -30,7 +30,7 @@ static inline bool __debug_virt_addr_valid(unsigned long x) phys_addr_t __virt_to_phys(volatile const void *x) { WARN(!__debug_virt_addr_valid((unsigned long)x), - "virt_to_phys used for non-linear address: %pK (%pS)\n", + "virt_to_phys used for non-linear address: %p (%pS)\n", x, x); return __virt_to_phys_nodebug(x); diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index 76f3b9c0a9f0..347126dc010d 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c @@ -508,6 +508,60 @@ static int __init set_ntlb(char *str) __setup("ntlb=", set_ntlb); +/* Initialise all TLB entries with unique values */ +static void r4k_tlb_uniquify(void) +{ + int entry = num_wired_entries(); + + htw_stop(); + write_c0_entrylo0(0); + write_c0_entrylo1(0); + + while (entry < current_cpu_data.tlbsize) { + unsigned long asid_mask = cpu_asid_mask(¤t_cpu_data); + unsigned long asid = 0; + int idx; + + /* Skip wired MMID to make ginvt_mmid work */ + if (cpu_has_mmid) + asid = MMID_KERNEL_WIRED + 1; + + /* Check for match before using UNIQUE_ENTRYHI */ + do { + if (cpu_has_mmid) { + write_c0_memorymapid(asid); + write_c0_entryhi(UNIQUE_ENTRYHI(entry)); + } else { + write_c0_entryhi(UNIQUE_ENTRYHI(entry) | asid); + } + mtc0_tlbw_hazard(); + tlb_probe(); + tlb_probe_hazard(); + idx = read_c0_index(); + /* No match or match is on current entry */ + if (idx < 0 || idx == entry) + break; + /* + * If we hit a match, we need to try again with + * a different ASID. + */ + asid++; + } while (asid < asid_mask); + + if (idx >= 0 && idx != entry) + panic("Unable to uniquify TLB entry %d", idx); + + write_c0_index(entry); + mtc0_tlbw_hazard(); + tlb_write_indexed(); + entry++; + } + + tlbw_use_hazard(); + htw_start(); + flush_micro_tlb(); +} + /* * Configure TLB (for init or after a CPU has been powered off). */ @@ -547,7 +601,7 @@ static void r4k_tlb_configure(void) temp_tlb_entry = current_cpu_data.tlbsize - 1; /* From this point on the ARC firmware is dead. */ - local_flush_tlb_all(); + r4k_tlb_uniquify(); /* Did I tell you that ARC SUCKS? */ } diff --git a/arch/mips/pci/pci-lantiq.c b/arch/mips/pci/pci-lantiq.c index 68a8cefed420..0e85839b8225 100644 --- a/arch/mips/pci/pci-lantiq.c +++ b/arch/mips/pci/pci-lantiq.c @@ -234,7 +234,7 @@ static struct platform_driver ltq_pci_driver = { }, }; -int __init pcibios_init(void) +static int __init pcibios_init(void) { int ret = platform_driver_register(<q_pci_driver); if (ret) diff --git a/arch/mips/pci/pci-rt2880.c b/arch/mips/pci/pci-rt2880.c index 1cada09fa5db..006e2bbab87e 100644 --- a/arch/mips/pci/pci-rt2880.c +++ b/arch/mips/pci/pci-rt2880.c @@ -264,7 +264,7 @@ static struct platform_driver rt288x_pci_driver = { }, }; -int __init pcibios_init(void) +static int __init pcibios_init(void) { int ret = platform_driver_register(&rt288x_pci_driver); diff --git a/arch/mips/ralink/irq.c b/arch/mips/ralink/irq.c index af5bbbea949b..955b36e89358 100644 --- a/arch/mips/ralink/irq.c +++ b/arch/mips/ralink/irq.c @@ -15,6 +15,7 @@ #include <asm/irq_cpu.h> #include <asm/mipsregs.h> +#include <asm/time.h> #include "common.h" diff --git a/arch/mips/rb532/gpio.c b/arch/mips/rb532/gpio.c index 0e47cd59b6cb..9aa5ef374465 100644 --- a/arch/mips/rb532/gpio.c +++ b/arch/mips/rb532/gpio.c @@ -164,7 +164,7 @@ static struct rb532_gpio_chip rb532_gpio_chip[] = { .direction_input = rb532_gpio_direction_input, .direction_output = rb532_gpio_direction_output, .get = rb532_gpio_get, - .set_rv = rb532_gpio_set, + .set = rb532_gpio_set, .to_irq = rb532_gpio_to_irq, .base = 0, .ngpio = 32, diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c index 288d4d17eddd..20ef663af16e 100644 --- a/arch/mips/sgi-ip27/ip27-irq.c +++ b/arch/mips/sgi-ip27/ip27-irq.c @@ -165,7 +165,7 @@ static void hub_domain_free(struct irq_domain *domain, return; irqd = irq_domain_get_irq_data(domain, virq); - if (irqd && irqd->chip_data) + if (irqd) kfree(irqd->chip_data); } diff --git a/arch/mips/sgi-ip30/ip30-power.c b/arch/mips/sgi-ip30/ip30-power.c index 120b3f3d5108..66851e17c5a7 100644 --- a/arch/mips/sgi-ip30/ip30-power.c +++ b/arch/mips/sgi-ip30/ip30-power.c @@ -3,7 +3,7 @@ * ip30-power.c: Software powerdown and reset handling for IP30 architecture. * * Copyright (C) 2004-2007 Stanislaw Skowronek <skylark@unaligned.org> - * 2014 Joshua Kinard <kumba@gentoo.org> + * 2014 Joshua Kinard <linux@kumba.dev> * 2009 Johannes Dickgreber <tanzy@gmx.de> */ diff --git a/arch/mips/sgi-ip30/ip30-setup.c b/arch/mips/sgi-ip30/ip30-setup.c index e8547636a748..3fcb3ec9f802 100644 --- a/arch/mips/sgi-ip30/ip30-setup.c +++ b/arch/mips/sgi-ip30/ip30-setup.c @@ -3,7 +3,7 @@ * SGI IP30 miscellaneous setup bits. * * Copyright (C) 2004-2007 Stanislaw Skowronek <skylark@unaligned.org> - * 2007 Joshua Kinard <kumba@gentoo.org> + * 2007 Joshua Kinard <linux@kumba.dev> * 2009 Johannes Dickgreber <tanzy@gmx.de> */ diff --git a/arch/mips/sgi-ip30/ip30-smp.c b/arch/mips/sgi-ip30/ip30-smp.c index 4bfe654602b1..1e8210f2a9f8 100644 --- a/arch/mips/sgi-ip30/ip30-smp.c +++ b/arch/mips/sgi-ip30/ip30-smp.c @@ -5,7 +5,7 @@ * and smp-bmips.c. * * Copyright (C) 2005-2007 Stanislaw Skowronek <skylark@unaligned.org> - * 2006-2007, 2014-2015 Joshua Kinard <kumba@gentoo.org> + * 2006-2007, 2014-2015 Joshua Kinard <linux@kumba.dev> * 2009 Johannes Dickgreber <tanzy@gmx.de> */ diff --git a/arch/mips/sgi-ip30/ip30-timer.c b/arch/mips/sgi-ip30/ip30-timer.c index d13e105478ae..7652f72f0daf 100644 --- a/arch/mips/sgi-ip30/ip30-timer.c +++ b/arch/mips/sgi-ip30/ip30-timer.c @@ -5,7 +5,7 @@ * * Copyright (C) 2004-2007 Stanislaw Skowronek <skylark@unaligned.org> * Copyright (C) 2009 Johannes Dickgreber <tanzy@gmx.de> - * Copyright (C) 2011 Joshua Kinard <kumba@gentoo.org> + * Copyright (C) 2011 Joshua Kinard <linux@kumba.dev> */ #include <linux/clocksource.h> diff --git a/arch/mips/sgi-ip30/ip30-xtalk.c b/arch/mips/sgi-ip30/ip30-xtalk.c index 7ceb2b23ea1c..d798ee8c998c 100644 --- a/arch/mips/sgi-ip30/ip30-xtalk.c +++ b/arch/mips/sgi-ip30/ip30-xtalk.c @@ -3,7 +3,7 @@ * ip30-xtalk.c - Very basic Crosstalk (XIO) detection support. * Copyright (C) 2004-2007 Stanislaw Skowronek <skylark@unaligned.org> * Copyright (C) 2009 Johannes Dickgreber <tanzy@gmx.de> - * Copyright (C) 2007, 2014-2016 Joshua Kinard <kumba@gentoo.org> + * Copyright (C) 2007, 2014-2016 Joshua Kinard <linux@kumba.dev> */ #include <linux/init.h> diff --git a/arch/mips/txx9/generic/setup.c b/arch/mips/txx9/generic/setup.c index 0586ca7668b4..5dc867ea2c69 100644 --- a/arch/mips/txx9/generic/setup.c +++ b/arch/mips/txx9/generic/setup.c @@ -655,7 +655,7 @@ void __init txx9_iocled_init(unsigned long baseaddr, if (!iocled->mmioaddr) goto out_free; iocled->chip.get = txx9_iocled_get; - iocled->chip.set_rv = txx9_iocled_set; + iocled->chip.set = txx9_iocled_set; iocled->chip.direction_input = txx9_iocled_dir_in; iocled->chip.direction_output = txx9_iocled_dir_out; iocled->chip.label = "iocled"; @@ -776,7 +776,7 @@ struct txx9_sramc_dev { }; static ssize_t txx9_sram_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buf, loff_t pos, size_t size) { struct txx9_sramc_dev *dev = bin_attr->private; @@ -791,7 +791,7 @@ static ssize_t txx9_sram_read(struct file *filp, struct kobject *kobj, } static ssize_t txx9_sram_write(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buf, loff_t pos, size_t size) { struct txx9_sramc_dev *dev = bin_attr->private; diff --git a/arch/openrisc/include/asm/mmu.h b/arch/openrisc/include/asm/mmu.h index eb720110f3a2..e7826a681bc4 100644 --- a/arch/openrisc/include/asm/mmu.h +++ b/arch/openrisc/include/asm/mmu.h @@ -15,7 +15,7 @@ #ifndef __ASM_OPENRISC_MMU_H #define __ASM_OPENRISC_MMU_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ typedef unsigned long mm_context_t; #endif diff --git a/arch/openrisc/include/asm/page.h b/arch/openrisc/include/asm/page.h index c589e96035e1..85797f94d1d7 100644 --- a/arch/openrisc/include/asm/page.h +++ b/arch/openrisc/include/asm/page.h @@ -25,7 +25,7 @@ */ #include <asm/setup.h> -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #define clear_page(page) memset((page), 0, PAGE_SIZE) #define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) @@ -55,10 +55,10 @@ typedef struct page *pgtable_t; #define __pgd(x) ((pgd_t) { (x) }) #define __pgprot(x) ((pgprot_t) { (x) }) -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET)) #define __pa(x) ((unsigned long) (x) - PAGE_OFFSET) @@ -73,7 +73,7 @@ static inline unsigned long virt_to_pfn(const void *kaddr) #define virt_addr_valid(kaddr) (pfn_valid(virt_to_pfn(kaddr))) -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #include <asm-generic/memory_model.h> #include <asm-generic/getorder.h> diff --git a/arch/openrisc/include/asm/pgtable.h b/arch/openrisc/include/asm/pgtable.h index 5bd6463bd514..d33702831505 100644 --- a/arch/openrisc/include/asm/pgtable.h +++ b/arch/openrisc/include/asm/pgtable.h @@ -23,7 +23,7 @@ #include <asm-generic/pgtable-nopmd.h> -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <asm/mmu.h> #include <asm/fixmap.h> @@ -430,5 +430,5 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte) typedef pte_t *pte_addr_t; -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* __ASM_OPENRISC_PGTABLE_H */ diff --git a/arch/openrisc/include/asm/processor.h b/arch/openrisc/include/asm/processor.h index e05d1b59e24e..3ff893a67c13 100644 --- a/arch/openrisc/include/asm/processor.h +++ b/arch/openrisc/include/asm/processor.h @@ -39,7 +39,7 @@ */ #define TASK_UNMAPPED_BASE (TASK_SIZE / 8 * 3) -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ struct task_struct; @@ -78,5 +78,5 @@ void show_registers(struct pt_regs *regs); #define cpu_relax() barrier() -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* __ASM_OPENRISC_PROCESSOR_H */ diff --git a/arch/openrisc/include/asm/ptrace.h b/arch/openrisc/include/asm/ptrace.h index e5a282b67075..28facf2f3e00 100644 --- a/arch/openrisc/include/asm/ptrace.h +++ b/arch/openrisc/include/asm/ptrace.h @@ -27,7 +27,7 @@ * they share a cacheline (not done yet, though... future optimization). */ -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ /* * This struct describes how the registers are laid out on the kernel stack * during a syscall or other kernel entry. @@ -147,7 +147,7 @@ static inline unsigned long regs_get_register(struct pt_regs *regs, return *(unsigned long *)((unsigned long)regs + offset); } -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ /* * Offsets used by 'ptrace' system call interface. diff --git a/arch/openrisc/include/asm/setup.h b/arch/openrisc/include/asm/setup.h index 9acbc5deda69..dce9f4d3b378 100644 --- a/arch/openrisc/include/asm/setup.h +++ b/arch/openrisc/include/asm/setup.h @@ -8,7 +8,7 @@ #include <linux/init.h> #include <asm-generic/setup.h> -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ void __init or1k_early_setup(void *fdt); #endif diff --git a/arch/openrisc/include/asm/thread_info.h b/arch/openrisc/include/asm/thread_info.h index 4af3049c34c2..e338fff7efb0 100644 --- a/arch/openrisc/include/asm/thread_info.h +++ b/arch/openrisc/include/asm/thread_info.h @@ -17,7 +17,7 @@ #ifdef __KERNEL__ -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <asm/types.h> #include <asm/processor.h> #endif @@ -38,7 +38,7 @@ * - if the contents of this structure are changed, the assembly constants * must also be changed */ -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ struct thread_info { struct task_struct *task; /* main task structure */ @@ -58,7 +58,7 @@ struct thread_info { * * preempt_count needs to be 1 initially, until the scheduler is functional. */ -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #define INIT_THREAD_INFO(tsk) \ { \ .task = &tsk, \ @@ -75,7 +75,7 @@ register struct thread_info *current_thread_info_reg asm("r10"); #define get_thread_info(ti) get_task_struct((ti)->task) #define put_thread_info(ti) put_task_struct((ti)->task) -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ /* * thread information flags diff --git a/arch/openrisc/include/uapi/asm/ptrace.h b/arch/openrisc/include/uapi/asm/ptrace.h index a77cc9915ca8..1f12a60d5a06 100644 --- a/arch/openrisc/include/uapi/asm/ptrace.h +++ b/arch/openrisc/include/uapi/asm/ptrace.h @@ -20,7 +20,7 @@ #ifndef _UAPI__ASM_OPENRISC_PTRACE_H #define _UAPI__ASM_OPENRISC_PTRACE_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ /* * This is the layout of the regset returned by the GETREGSET ptrace call */ diff --git a/arch/openrisc/kernel/dma.c b/arch/openrisc/kernel/dma.c index 3a7b5baaa450..af932a4ad306 100644 --- a/arch/openrisc/kernel/dma.c +++ b/arch/openrisc/kernel/dma.c @@ -72,7 +72,7 @@ void *arch_dma_set_uncached(void *cpu_addr, size_t size) * them and setting the cache-inhibit bit. */ mmap_write_lock(&init_mm); - error = walk_page_range_novma(&init_mm, va, va + size, + error = walk_kernel_page_table_range(va, va + size, &set_nocache_walk_ops, NULL, NULL); mmap_write_unlock(&init_mm); @@ -87,7 +87,7 @@ void arch_dma_clear_uncached(void *cpu_addr, size_t size) mmap_write_lock(&init_mm); /* walk_page_range shouldn't be able to fail here */ - WARN_ON(walk_page_range_novma(&init_mm, va, va + size, + WARN_ON(walk_kernel_page_table_range(va, va + size, &clear_nocache_walk_ops, NULL, NULL)); mmap_write_unlock(&init_mm); } diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index 21b8166a6883..48ae3c79557a 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile @@ -39,7 +39,9 @@ endif export LD_BFD -# Set default 32 bits cross compilers for vdso +# Set default 32 bits cross compilers for vdso. +# This means that for 64BIT, both the 64-bit tools and the 32-bit tools +# need to be in the path. CC_ARCHES_32 = hppa hppa2.0 hppa1.1 CC_SUFFIXES = linux linux-gnu unknown-linux-gnu suse-linux CROSS32_COMPILE := $(call cc-cross-prefix, \ @@ -139,7 +141,7 @@ palo lifimage: vmlinuz fi @if test ! -f "$(PALOCONF)"; then \ cp $(srctree)/arch/parisc/defpalo.conf $(objtree)/palo.conf; \ - echo 'A generic palo config file ($(objree)/palo.conf) has been created for you.'; \ + echo 'A generic palo config file ($(objtree)/palo.conf) has been created for you.'; \ echo 'You should check it and re-run "make palo".'; \ echo 'WARNING: the "lifimage" file is now placed in this directory by default!'; \ false; \ diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index 1a86a4370b29..2c139a4dbf4b 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -276,7 +276,7 @@ extern unsigned long *empty_zero_page; #define pte_none(x) (pte_val(x) == 0) #define pte_present(x) (pte_val(x) & _PAGE_PRESENT) #define pte_user(x) (pte_val(x) & _PAGE_USER) -#define pte_clear(mm, addr, xp) set_pte(xp, __pte(0)) +#define pte_clear(mm, addr, xp) set_pte_at((mm), (addr), (xp), __pte(0)) #define pmd_flag(x) (pmd_val(x) & PxD_FLAG_MASK) #define pmd_address(x) ((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT) @@ -392,6 +392,7 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr, } } #define set_ptes set_ptes +#define set_pte_at(mm, addr, ptep, pte) set_ptes(mm, addr, ptep, pte, 1) /* Used for deferring calls to flush_dcache_page() */ @@ -456,7 +457,7 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned if (!pte_young(pte)) { return 0; } - set_pte(ptep, pte_mkold(pte)); + set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte)); return 1; } @@ -466,7 +467,7 @@ pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, pte_t *pt struct mm_struct; static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - set_pte(ptep, pte_wrprotect(*ptep)); + set_pte_at(mm, addr, ptep, pte_wrprotect(*ptep)); } #define pte_same(A,B) (pte_val(A) == pte_val(B)) diff --git a/arch/parisc/include/asm/special_insns.h b/arch/parisc/include/asm/special_insns.h index 51f40eaf7780..1013eeba31e5 100644 --- a/arch/parisc/include/asm/special_insns.h +++ b/arch/parisc/include/asm/special_insns.h @@ -32,6 +32,34 @@ pa; \ }) +/** + * prober_user() - Probe user read access + * @sr: Space regster. + * @va: Virtual address. + * + * Return: Non-zero if address is accessible. + * + * Due to the way _PAGE_READ is handled in TLB entries, we need + * a special check to determine whether a user address is accessible. + * The ldb instruction does the initial access check. If it is + * successful, the probe instruction checks user access rights. + */ +#define prober_user(sr, va) ({ \ + unsigned long read_allowed; \ + __asm__ __volatile__( \ + "copy %%r0,%0\n" \ + "8:\tldb 0(%%sr%1,%2),%%r0\n" \ + "\tproberi (%%sr%1,%2),%3,%0\n" \ + "9:\n" \ + ASM_EXCEPTIONTABLE_ENTRY(8b, 9b, \ + "or %%r0,%%r0,%%r0") \ + : "=&r" (read_allowed) \ + : "i" (sr), "r" (va), "i" (PRIV_USER) \ + : "memory" \ + ); \ + read_allowed; \ +}) + #define CR_EIEM 15 /* External Interrupt Enable Mask */ #define CR_CR16 16 /* CR16 Interval Timer */ #define CR_EIRR 23 /* External Interrupt Request Register */ diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h index 88d0ae5769dd..6c531d2c847e 100644 --- a/arch/parisc/include/asm/uaccess.h +++ b/arch/parisc/include/asm/uaccess.h @@ -42,9 +42,24 @@ __gu_err; \ }) -#define __get_user(val, ptr) \ -({ \ - __get_user_internal(SR_USER, val, ptr); \ +#define __probe_user_internal(sr, error, ptr) \ +({ \ + __asm__("\tproberi (%%sr%1,%2),%3,%0\n" \ + "\tcmpiclr,= 1,%0,%0\n" \ + "\tldi %4,%0\n" \ + : "=r"(error) \ + : "i"(sr), "r"(ptr), "i"(PRIV_USER), \ + "i"(-EFAULT)); \ +}) + +#define __get_user(val, ptr) \ +({ \ + register long __gu_err; \ + \ + __gu_err = __get_user_internal(SR_USER, val, ptr); \ + if (likely(!__gu_err)) \ + __probe_user_internal(SR_USER, __gu_err, ptr); \ + __gu_err; \ }) #define __get_user_asm(sr, val, ldx, ptr) \ diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index db531e58d70e..37ca484cc495 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -429,7 +429,7 @@ static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr) return ptep; } -static inline bool pte_needs_flush(pte_t pte) +static inline bool pte_needs_cache_flush(pte_t pte) { return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE)) == (_PAGE_PRESENT | _PAGE_ACCESSED); @@ -630,7 +630,7 @@ static void flush_cache_page_if_present(struct vm_area_struct *vma, ptep = get_ptep(vma->vm_mm, vmaddr); if (ptep) { pte = ptep_get(ptep); - needs_flush = pte_needs_flush(pte); + needs_flush = pte_needs_cache_flush(pte); pte_unmap(ptep); } if (needs_flush) @@ -841,7 +841,7 @@ void flush_cache_vmap(unsigned long start, unsigned long end) } vm = find_vm_area((void *)start); - if (WARN_ON_ONCE(!vm)) { + if (!vm) { flush_cache_all(); return; } diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index ea57bcc21dc5..f4bf61a34701 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -499,6 +499,12 @@ * this happens is quite subtle, read below */ .macro make_insert_tlb spc,pte,prot,tmp space_to_prot \spc \prot /* create prot id from space */ + +#if _PAGE_SPECIAL_BIT == _PAGE_DMB_BIT + /* need to drop DMB bit, as it's used as SPECIAL flag */ + depi 0,_PAGE_SPECIAL_BIT,1,\pte +#endif + /* The following is the real subtlety. This is depositing * T <-> _PAGE_REFTRAP * D <-> _PAGE_DIRTY @@ -511,17 +517,18 @@ * Finally, _PAGE_READ goes in the top bit of PL1 (so we * trigger an access rights trap in user space if the user * tries to read an unreadable page */ -#if _PAGE_SPECIAL_BIT == _PAGE_DMB_BIT - /* need to drop DMB bit, as it's used as SPECIAL flag */ - depi 0,_PAGE_SPECIAL_BIT,1,\pte -#endif depd \pte,8,7,\prot /* PAGE_USER indicates the page can be read with user privileges, * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1 - * contains _PAGE_READ) */ + * contains _PAGE_READ). While the kernel can't directly write + * user pages which have _PAGE_WRITE zero, it can read pages + * which have _PAGE_READ zero (PL <= PL1). Thus, the kernel + * exception fault handler doesn't trigger when reading pages + * that aren't user read accessible */ extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0 depdi 7,11,3,\prot + /* If we're a gateway page, drop PL2 back to zero for promotion * to kernel privilege (so we can execute the page as kernel). * Any privilege promotion page always denys read and write */ diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index 0fa81bf1466b..f58c4bccfbce 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S @@ -613,6 +613,9 @@ lws_compare_and_swap32: lws_compare_and_swap: /* Trigger memory reference interruptions without writing to memory */ 1: ldw 0(%r26), %r28 + proberi (%r26), PRIV_USER, %r28 + comb,=,n %r28, %r0, lws_fault /* backwards, likely not taken */ + nop 2: stbys,e %r0, 0(%r26) /* Calculate 8-bit hash index from virtual address */ @@ -767,6 +770,9 @@ cas2_lock_start: copy %r26, %r28 depi_safe 0, 31, 2, %r28 10: ldw 0(%r28), %r1 + proberi (%r28), PRIV_USER, %r1 + comb,=,n %r1, %r0, lws_fault /* backwards, likely not taken */ + nop 11: stbys,e %r0, 0(%r28) /* Calculate 8-bit hash index from virtual address */ @@ -951,41 +957,47 @@ atomic_xchg_begin: /* 8-bit exchange */ 1: ldb 0(%r24), %r20 + proberi (%r24), PRIV_USER, %r20 + comb,=,n %r20, %r0, lws_fault /* backwards, likely not taken */ + nop copy %r23, %r20 depi_safe 0, 31, 2, %r20 b atomic_xchg_start 2: stbys,e %r0, 0(%r20) - nop - nop - nop /* 16-bit exchange */ 3: ldh 0(%r24), %r20 + proberi (%r24), PRIV_USER, %r20 + comb,=,n %r20, %r0, lws_fault /* backwards, likely not taken */ + nop copy %r23, %r20 depi_safe 0, 31, 2, %r20 b atomic_xchg_start 4: stbys,e %r0, 0(%r20) - nop - nop - nop /* 32-bit exchange */ 5: ldw 0(%r24), %r20 + proberi (%r24), PRIV_USER, %r20 + comb,=,n %r20, %r0, lws_fault /* backwards, likely not taken */ + nop b atomic_xchg_start 6: stbys,e %r0, 0(%r23) nop nop - nop - nop - nop /* 64-bit exchange */ #ifdef CONFIG_64BIT 7: ldd 0(%r24), %r20 + proberi (%r24), PRIV_USER, %r20 + comb,=,n %r20, %r0, lws_fault /* backwards, likely not taken */ + nop 8: stdby,e %r0, 0(%r23) #else 7: ldw 0(%r24), %r20 8: ldw 4(%r24), %r20 + proberi (%r24), PRIV_USER, %r20 + comb,=,n %r20, %r0, lws_fault /* backwards, likely not taken */ + nop copy %r23, %r20 depi_safe 0, 31, 2, %r20 9: stbys,e %r0, 0(%r20) diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c index 5fc0c852c84c..69d65ffab312 100644 --- a/arch/parisc/lib/memcpy.c +++ b/arch/parisc/lib/memcpy.c @@ -12,6 +12,7 @@ #include <linux/module.h> #include <linux/compiler.h> #include <linux/uaccess.h> +#include <linux/mm.h> #define get_user_space() mfsp(SR_USER) #define get_kernel_space() SR_KERNEL @@ -32,9 +33,25 @@ EXPORT_SYMBOL(raw_copy_to_user); unsigned long raw_copy_from_user(void *dst, const void __user *src, unsigned long len) { + unsigned long start = (unsigned long) src; + unsigned long end = start + len; + unsigned long newlen = len; + mtsp(get_user_space(), SR_TEMP1); mtsp(get_kernel_space(), SR_TEMP2); - return pa_memcpy(dst, (void __force *)src, len); + + /* Check region is user accessible */ + if (start) + while (start < end) { + if (!prober_user(SR_TEMP1, start)) { + newlen = (start - (unsigned long) src); + break; + } + start += PAGE_SIZE; + /* align to page boundry which may have different permission */ + start = PAGE_ALIGN_DOWN(start); + } + return len - newlen + pa_memcpy(dst, (void __force *)src, newlen); } EXPORT_SYMBOL(raw_copy_from_user); diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index c39de84e98b0..f1785640b049 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -363,6 +363,10 @@ bad_area: mmap_read_unlock(mm); bad_area_nosemaphore: + if (!user_mode(regs) && fixup_exception(regs)) { + return; + } + if (user_mode(regs)) { int signo, si_code; diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 514d0e92b336..93402a1d9c9f 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -147,7 +147,6 @@ config PPC select ARCH_HAS_PMEM_API select ARCH_HAS_PREEMPT_LAZY select ARCH_HAS_PTDUMP - select ARCH_HAS_PTE_DEVMAP if PPC_BOOK3S_64 select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE && PPC_BOOK3S_64 select ARCH_HAS_SET_MEMORY diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h index aa90a048f319..7132392fa7cd 100644 --- a/arch/powerpc/include/asm/book3s/64/hash-4k.h +++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h @@ -168,12 +168,6 @@ extern pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm, extern int hash__has_transparent_hugepage(void); #endif -static inline pmd_t hash__pmd_mkdevmap(pmd_t pmd) -{ - BUG(); - return pmd; -} - #endif /* !__ASSEMBLY__ */ #endif /* _ASM_POWERPC_BOOK3S_64_HASH_4K_H */ diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h index 0bf6fd0bf42a..0fb5b7da9478 100644 --- a/arch/powerpc/include/asm/book3s/64/hash-64k.h +++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h @@ -259,7 +259,7 @@ static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array, */ static inline int hash__pmd_trans_huge(pmd_t pmd) { - return !!((pmd_val(pmd) & (_PAGE_PTE | H_PAGE_THP_HUGE | _PAGE_DEVMAP)) == + return !!((pmd_val(pmd) & (_PAGE_PTE | H_PAGE_THP_HUGE)) == (_PAGE_PTE | H_PAGE_THP_HUGE)); } @@ -281,11 +281,6 @@ extern pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm, extern int hash__has_transparent_hugepage(void); #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ -static inline pmd_t hash__pmd_mkdevmap(pmd_t pmd) -{ - return __pmd(pmd_val(pmd) | (_PAGE_PTE | H_PAGE_THP_HUGE | _PAGE_DEVMAP)); -} - #endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_BOOK3S_64_HASH_64K_H */ diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index a2ddcbb3fcb9..c19800365315 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -88,7 +88,6 @@ #define _PAGE_SOFT_DIRTY _RPAGE_SW3 /* software: software dirty tracking */ #define _PAGE_SPECIAL _RPAGE_SW2 /* software: special page */ -#define _PAGE_DEVMAP _RPAGE_SW1 /* software: ZONE_DEVICE page */ /* * Drivers request for cache inhibited pte mapping using _PAGE_NO_CACHE @@ -109,7 +108,7 @@ */ #define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \ _PAGE_ACCESSED | H_PAGE_THP_HUGE | _PAGE_PTE | \ - _PAGE_SOFT_DIRTY | _PAGE_DEVMAP) + _PAGE_SOFT_DIRTY) /* * user access blocked by key */ @@ -123,7 +122,7 @@ */ #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \ _PAGE_ACCESSED | _PAGE_SPECIAL | _PAGE_PTE | \ - _PAGE_SOFT_DIRTY | _PAGE_DEVMAP) + _PAGE_SOFT_DIRTY) /* * We define 2 sets of base prot bits, one for basic pages (ie, @@ -609,24 +608,6 @@ static inline pte_t pte_mkhuge(pte_t pte) return pte; } -static inline pte_t pte_mkdevmap(pte_t pte) -{ - return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_SPECIAL | _PAGE_DEVMAP)); -} - -/* - * This is potentially called with a pmd as the argument, in which case it's not - * safe to check _PAGE_DEVMAP unless we also confirm that _PAGE_PTE is set. - * That's because the bit we use for _PAGE_DEVMAP is not reserved for software - * use in page directory entries (ie. non-ptes). - */ -static inline int pte_devmap(pte_t pte) -{ - __be64 mask = cpu_to_be64(_PAGE_DEVMAP | _PAGE_PTE); - - return (pte_raw(pte) & mask) == mask; -} - static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { /* FIXME!! check whether this need to be a conditional */ @@ -1379,36 +1360,6 @@ static inline bool arch_needs_pgtable_deposit(void) } extern void serialize_against_pte_lookup(struct mm_struct *mm); - -static inline pmd_t pmd_mkdevmap(pmd_t pmd) -{ - if (radix_enabled()) - return radix__pmd_mkdevmap(pmd); - return hash__pmd_mkdevmap(pmd); -} - -static inline pud_t pud_mkdevmap(pud_t pud) -{ - if (radix_enabled()) - return radix__pud_mkdevmap(pud); - BUG(); - return pud; -} - -static inline int pmd_devmap(pmd_t pmd) -{ - return pte_devmap(pmd_pte(pmd)); -} - -static inline int pud_devmap(pud_t pud) -{ - return pte_devmap(pud_pte(pud)); -} - -static inline int pgd_devmap(pgd_t pgd) -{ - return 0; -} #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION diff --git a/arch/powerpc/include/asm/book3s/64/pkeys.h b/arch/powerpc/include/asm/book3s/64/pkeys.h index 5b178139f3c0..ff911b4251d9 100644 --- a/arch/powerpc/include/asm/book3s/64/pkeys.h +++ b/arch/powerpc/include/asm/book3s/64/pkeys.h @@ -5,7 +5,7 @@ #include <asm/book3s/64/hash-pkey.h> -static inline u64 vmflag_to_pte_pkey_bits(u64 vm_flags) +static inline u64 vmflag_to_pte_pkey_bits(vm_flags_t vm_flags) { if (!mmu_has_feature(MMU_FTR_PKEY)) return 0x0UL; diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h index 8f55ff74bb68..df23a8267e4d 100644 --- a/arch/powerpc/include/asm/book3s/64/radix.h +++ b/arch/powerpc/include/asm/book3s/64/radix.h @@ -264,7 +264,7 @@ static inline int radix__p4d_bad(p4d_t p4d) static inline int radix__pmd_trans_huge(pmd_t pmd) { - return (pmd_val(pmd) & (_PAGE_PTE | _PAGE_DEVMAP)) == _PAGE_PTE; + return (pmd_val(pmd) & _PAGE_PTE) == _PAGE_PTE; } static inline pmd_t radix__pmd_mkhuge(pmd_t pmd) @@ -274,7 +274,7 @@ static inline pmd_t radix__pmd_mkhuge(pmd_t pmd) static inline int radix__pud_trans_huge(pud_t pud) { - return (pud_val(pud) & (_PAGE_PTE | _PAGE_DEVMAP)) == _PAGE_PTE; + return (pud_val(pud) & _PAGE_PTE) == _PAGE_PTE; } static inline pud_t radix__pud_mkhuge(pud_t pud) @@ -315,16 +315,6 @@ static inline int radix__has_transparent_pud_hugepage(void) } #endif -static inline pmd_t radix__pmd_mkdevmap(pmd_t pmd) -{ - return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_DEVMAP)); -} - -static inline pud_t radix__pud_mkdevmap(pud_t pud) -{ - return __pud(pud_val(pud) | (_PAGE_PTE | _PAGE_DEVMAP)); -} - struct vmem_altmap; struct dev_pagemap; extern int __meminit radix__vmemmap_create_mapping(unsigned long start, diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h index 42a51a993d94..912f78a956a1 100644 --- a/arch/powerpc/include/asm/mman.h +++ b/arch/powerpc/include/asm/mman.h @@ -14,7 +14,7 @@ #include <asm/cpu_has_feature.h> #include <asm/firmware.h> -static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, +static inline vm_flags_t arch_calc_vm_prot_bits(unsigned long prot, unsigned long pkey) { #ifdef CONFIG_PPC_MEM_KEYS diff --git a/arch/powerpc/include/asm/pkeys.h b/arch/powerpc/include/asm/pkeys.h index 59a2c7dbc78f..28e752138996 100644 --- a/arch/powerpc/include/asm/pkeys.h +++ b/arch/powerpc/include/asm/pkeys.h @@ -30,9 +30,9 @@ extern u32 reserved_allocation_mask; /* bits set for reserved keys */ #endif -static inline u64 pkey_to_vmflag_bits(u16 pkey) +static inline vm_flags_t pkey_to_vmflag_bits(u16 pkey) { - return (((u64)pkey << VM_PKEY_SHIFT) & ARCH_VM_PKEY_FLAGS); + return (((vm_flags_t)pkey << VM_PKEY_SHIFT) & ARCH_VM_PKEY_FLAGS); } static inline int vma_pkey(struct vm_area_struct *vma) diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 4312bcb913a4..8053b24afc39 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -425,6 +425,7 @@ #define PPC_RAW_SC() (0x44000002) #define PPC_RAW_SYNC() (0x7c0004ac) #define PPC_RAW_ISYNC() (0x4c00012c) +#define PPC_RAW_LWSYNC() (0x7c2004ac) /* * Define what the VSX XX1 form instructions will look like, then add diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index 13578f4db254..bb836f02101c 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -1139,6 +1139,7 @@ int eeh_unfreeze_pe(struct eeh_pe *pe) return ret; } +EXPORT_SYMBOL_GPL(eeh_unfreeze_pe); static struct pci_device_id eeh_reset_ids[] = { diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index 10ce6b3bd3b7..48ad0116f359 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c @@ -257,13 +257,12 @@ static void eeh_pe_report_edev(struct eeh_dev *edev, eeh_report_fn fn, struct pci_driver *driver; enum pci_ers_result new_result; - pci_lock_rescan_remove(); pdev = edev->pdev; if (pdev) get_device(&pdev->dev); - pci_unlock_rescan_remove(); if (!pdev) { eeh_edev_info(edev, "no device"); + *result = PCI_ERS_RESULT_DISCONNECT; return; } device_lock(&pdev->dev); @@ -304,8 +303,9 @@ static void eeh_pe_report(const char *name, struct eeh_pe *root, struct eeh_dev *edev, *tmp; pr_info("EEH: Beginning: '%s'\n", name); - eeh_for_each_pe(root, pe) eeh_pe_for_each_dev(pe, edev, tmp) - eeh_pe_report_edev(edev, fn, result); + eeh_for_each_pe(root, pe) + eeh_pe_for_each_dev(pe, edev, tmp) + eeh_pe_report_edev(edev, fn, result); if (result) pr_info("EEH: Finished:'%s' with aggregate recovery state:'%s'\n", name, pci_ers_result_name(*result)); @@ -383,6 +383,8 @@ static void eeh_dev_restore_state(struct eeh_dev *edev, void *userdata) if (!edev) return; + pci_lock_rescan_remove(); + /* * The content in the config space isn't saved because * the blocked config space on some adapters. We have @@ -393,14 +395,19 @@ static void eeh_dev_restore_state(struct eeh_dev *edev, void *userdata) if (list_is_last(&edev->entry, &edev->pe->edevs)) eeh_pe_restore_bars(edev->pe); + pci_unlock_rescan_remove(); return; } pdev = eeh_dev_to_pci_dev(edev); - if (!pdev) + if (!pdev) { + pci_unlock_rescan_remove(); return; + } pci_restore_state(pdev); + + pci_unlock_rescan_remove(); } /** @@ -647,9 +654,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus, if (any_passed || driver_eeh_aware || (pe->type & EEH_PE_VF)) { eeh_pe_dev_traverse(pe, eeh_rmv_device, rmv_data); } else { - pci_lock_rescan_remove(); pci_hp_remove_devices(bus); - pci_unlock_rescan_remove(); } /* @@ -665,8 +670,6 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus, if (rc) return rc; - pci_lock_rescan_remove(); - /* Restore PE */ eeh_ops->configure_bridge(pe); eeh_pe_restore_bars(pe); @@ -674,7 +677,6 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus, /* Clear frozen state */ rc = eeh_clear_pe_frozen_state(pe, false); if (rc) { - pci_unlock_rescan_remove(); return rc; } @@ -709,7 +711,6 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus, pe->tstamp = tstamp; pe->freeze_count = cnt; - pci_unlock_rescan_remove(); return 0; } @@ -843,10 +844,13 @@ void eeh_handle_normal_event(struct eeh_pe *pe) {LIST_HEAD_INIT(rmv_data.removed_vf_list), 0}; int devices = 0; + pci_lock_rescan_remove(); + bus = eeh_pe_bus_get(pe); if (!bus) { pr_err("%s: Cannot find PCI bus for PHB#%x-PE#%x\n", __func__, pe->phb->global_number, pe->addr); + pci_unlock_rescan_remove(); return; } @@ -1094,10 +1098,15 @@ recover_failed: eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true); eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED); - pci_lock_rescan_remove(); - pci_hp_remove_devices(bus); - pci_unlock_rescan_remove(); + bus = eeh_pe_bus_get(pe); + if (bus) + pci_hp_remove_devices(bus); + else + pr_err("%s: PCI bus for PHB#%x-PE#%x disappeared\n", + __func__, pe->phb->global_number, pe->addr); + /* The passed PE should no longer be used */ + pci_unlock_rescan_remove(); return; } @@ -1114,6 +1123,8 @@ out: eeh_clear_slot_attention(edev->pdev); eeh_pe_state_clear(pe, EEH_PE_RECOVERING, true); + + pci_unlock_rescan_remove(); } /** @@ -1132,6 +1143,7 @@ void eeh_handle_special_event(void) unsigned long flags; int rc; + pci_lock_rescan_remove(); do { rc = eeh_ops->next_error(&pe); @@ -1171,10 +1183,12 @@ void eeh_handle_special_event(void) break; case EEH_NEXT_ERR_NONE: + pci_unlock_rescan_remove(); return; default: pr_warn("%s: Invalid value %d from next_error()\n", __func__, rc); + pci_unlock_rescan_remove(); return; } @@ -1186,7 +1200,9 @@ void eeh_handle_special_event(void) if (rc == EEH_NEXT_ERR_FROZEN_PE || rc == EEH_NEXT_ERR_FENCED_PHB) { eeh_pe_state_mark(pe, EEH_PE_RECOVERING); + pci_unlock_rescan_remove(); eeh_handle_normal_event(pe); + pci_lock_rescan_remove(); } else { eeh_for_each_pe(pe, tmp_pe) eeh_pe_for_each_dev(tmp_pe, edev, tmp_edev) @@ -1199,7 +1215,6 @@ void eeh_handle_special_event(void) eeh_report_failure, NULL); eeh_set_channel_state(pe, pci_channel_io_perm_failure); - pci_lock_rescan_remove(); list_for_each_entry(hose, &hose_list, list_node) { phb_pe = eeh_phb_pe_get(hose); if (!phb_pe || @@ -1218,7 +1233,6 @@ void eeh_handle_special_event(void) } pci_hp_remove_devices(bus); } - pci_unlock_rescan_remove(); } /* @@ -1228,4 +1242,6 @@ void eeh_handle_special_event(void) if (rc == EEH_NEXT_ERR_DEAD_IOC) break; } while (rc != EEH_NEXT_ERR_NONE); + + pci_unlock_rescan_remove(); } diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c index d283d281d28e..e740101fadf3 100644 --- a/arch/powerpc/kernel/eeh_pe.c +++ b/arch/powerpc/kernel/eeh_pe.c @@ -671,10 +671,12 @@ static void eeh_bridge_check_link(struct eeh_dev *edev) eeh_ops->write_config(edev, cap + PCI_EXP_LNKCTL, 2, val); /* Check link */ - if (!edev->pdev->link_active_reporting) { - eeh_edev_dbg(edev, "No link reporting capability\n"); - msleep(1000); - return; + if (edev->pdev) { + if (!edev->pdev->link_active_reporting) { + eeh_edev_dbg(edev, "No link reporting capability\n"); + msleep(1000); + return; + } } /* Wait the link is up until timeout (5s) */ diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index 8a050f30e6d9..5782e743fd27 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -333,7 +333,7 @@ static __init u64 fadump_calculate_reserve_size(void) * memory at a predefined offset. */ ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), - &size, &base, NULL, NULL); + &size, &base, NULL, NULL, NULL); if (ret == 0 && size > 0) { unsigned long max_size; diff --git a/arch/powerpc/kernel/pci-hotplug.c b/arch/powerpc/kernel/pci-hotplug.c index 9ea74973d78d..6f444d0822d8 100644 --- a/arch/powerpc/kernel/pci-hotplug.c +++ b/arch/powerpc/kernel/pci-hotplug.c @@ -141,6 +141,9 @@ void pci_hp_add_devices(struct pci_bus *bus) struct pci_controller *phb; struct device_node *dn = pci_bus_to_OF_node(bus); + if (!dn) + return; + phb = pci_bus_to_host(bus); mode = PCI_PROBE_NORMAL; diff --git a/arch/powerpc/kexec/core.c b/arch/powerpc/kexec/core.c index 00e9c267b912..d1a2d755381c 100644 --- a/arch/powerpc/kexec/core.c +++ b/arch/powerpc/kexec/core.c @@ -110,7 +110,7 @@ void __init arch_reserve_crashkernel(void) /* use common parsing */ ret = parse_crashkernel(boot_command_line, total_mem_sz, &crash_size, - &crash_base, NULL, NULL); + &crash_base, NULL, NULL, NULL); if (ret) return; diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c index 3a6592a31a10..03f8c34fa0a2 100644 --- a/arch/powerpc/kvm/book3s_hv_uvmem.c +++ b/arch/powerpc/kvm/book3s_hv_uvmem.c @@ -393,7 +393,7 @@ static int kvmppc_memslot_page_merge(struct kvm *kvm, { unsigned long gfn = memslot->base_gfn; unsigned long end, start = gfn_to_hva(kvm, gfn); - unsigned long vm_flags; + vm_flags_t vm_flags; int ret = 0; struct vm_area_struct *vma; int merge_flag = (merge) ? MADV_MERGEABLE : MADV_UNMERGEABLE; diff --git a/arch/powerpc/mm/book3s64/hash_hugepage.c b/arch/powerpc/mm/book3s64/hash_hugepage.c index 15d6f3ea7178..cdfd4fe75edb 100644 --- a/arch/powerpc/mm/book3s64/hash_hugepage.c +++ b/arch/powerpc/mm/book3s64/hash_hugepage.c @@ -54,7 +54,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, /* * Make sure this is thp or devmap entry */ - if (!(old_pmd & (H_PAGE_THP_HUGE | _PAGE_DEVMAP))) + if (!(old_pmd & H_PAGE_THP_HUGE)) return 0; rflags = htab_convert_pte_flags(new_pmd, flags); diff --git a/arch/powerpc/mm/book3s64/hash_pgtable.c b/arch/powerpc/mm/book3s64/hash_pgtable.c index 988948d69bc1..82d31177630b 100644 --- a/arch/powerpc/mm/book3s64/hash_pgtable.c +++ b/arch/powerpc/mm/book3s64/hash_pgtable.c @@ -195,7 +195,7 @@ unsigned long hash__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr unsigned long old; #ifdef CONFIG_DEBUG_VM - WARN_ON(!hash__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp)); + WARN_ON(!hash__pmd_trans_huge(*pmdp)); assert_spin_locked(pmd_lockptr(mm, pmdp)); #endif @@ -227,7 +227,6 @@ pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addres VM_BUG_ON(address & ~HPAGE_PMD_MASK); VM_BUG_ON(pmd_trans_huge(*pmdp)); - VM_BUG_ON(pmd_devmap(*pmdp)); pmd = *pmdp; pmd_clear(pmdp); diff --git a/arch/powerpc/mm/book3s64/hugetlbpage.c b/arch/powerpc/mm/book3s64/hugetlbpage.c index 83c3361b358b..2bcbbf9d85ac 100644 --- a/arch/powerpc/mm/book3s64/hugetlbpage.c +++ b/arch/powerpc/mm/book3s64/hugetlbpage.c @@ -74,7 +74,7 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, } while(!pte_xchg(ptep, __pte(old_pte), __pte(new_pte))); /* Make sure this is a hugetlb entry */ - if (old_pte & (H_PAGE_THP_HUGE | _PAGE_DEVMAP)) + if (old_pte & H_PAGE_THP_HUGE) return 0; rflags = htab_convert_pte_flags(new_pte, flags); diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c index 0db01e10a3f8..c9431ae7f78a 100644 --- a/arch/powerpc/mm/book3s64/pgtable.c +++ b/arch/powerpc/mm/book3s64/pgtable.c @@ -62,7 +62,7 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, { int changed; #ifdef CONFIG_DEBUG_VM - WARN_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp)); + WARN_ON(!pmd_trans_huge(*pmdp)); assert_spin_locked(pmd_lockptr(vma->vm_mm, pmdp)); #endif changed = !pmd_same(*(pmdp), entry); @@ -82,7 +82,6 @@ int pudp_set_access_flags(struct vm_area_struct *vma, unsigned long address, { int changed; #ifdef CONFIG_DEBUG_VM - WARN_ON(!pud_devmap(*pudp)); assert_spin_locked(pud_lockptr(vma->vm_mm, pudp)); #endif changed = !pud_same(*(pudp), entry); @@ -204,8 +203,8 @@ pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma, { pmd_t pmd; VM_BUG_ON(addr & ~HPAGE_PMD_MASK); - VM_BUG_ON((pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) && - !pmd_devmap(*pmdp)) || !pmd_present(*pmdp)); + VM_BUG_ON((pmd_present(*pmdp) && !pmd_trans_huge(*pmdp)) || + !pmd_present(*pmdp)); pmd = pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp); /* * if it not a fullmm flush, then we can possibly end up converting @@ -223,8 +222,7 @@ pud_t pudp_huge_get_and_clear_full(struct vm_area_struct *vma, pud_t pud; VM_BUG_ON(addr & ~HPAGE_PMD_MASK); - VM_BUG_ON((pud_present(*pudp) && !pud_devmap(*pudp)) || - !pud_present(*pudp)); + VM_BUG_ON(!pud_present(*pudp)); pud = pudp_huge_get_and_clear(vma->vm_mm, addr, pudp); /* * if it not a fullmm flush, then we can possibly end up converting @@ -644,7 +642,7 @@ unsigned long memremap_compat_align(void) EXPORT_SYMBOL_GPL(memremap_compat_align); #endif -pgprot_t vm_get_page_prot(unsigned long vm_flags) +pgprot_t vm_get_page_prot(vm_flags_t vm_flags) { unsigned long prot; diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index d865ec915f9d..be523e5fe9c5 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -1433,7 +1433,7 @@ unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long add unsigned long old; #ifdef CONFIG_DEBUG_VM - WARN_ON(!radix__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp)); + WARN_ON(!radix__pmd_trans_huge(*pmdp)); assert_spin_locked(pmd_lockptr(mm, pmdp)); #endif @@ -1450,7 +1450,7 @@ unsigned long radix__pud_hugepage_update(struct mm_struct *mm, unsigned long add unsigned long old; #ifdef CONFIG_DEBUG_VM - WARN_ON(!pud_devmap(*pudp)); + WARN_ON(!pud_trans_huge(*pudp)); assert_spin_locked(pud_lockptr(mm, pudp)); #endif @@ -1468,7 +1468,6 @@ pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addre VM_BUG_ON(address & ~HPAGE_PMD_MASK); VM_BUG_ON(radix__pmd_trans_huge(*pmdp)); - VM_BUG_ON(pmd_devmap(*pmdp)); /* * khugepaged calls this for normal pmd */ diff --git a/arch/powerpc/mm/nohash/kaslr_booke.c b/arch/powerpc/mm/nohash/kaslr_booke.c index 5c8d1bb98b3e..5e4897daaaea 100644 --- a/arch/powerpc/mm/nohash/kaslr_booke.c +++ b/arch/powerpc/mm/nohash/kaslr_booke.c @@ -178,7 +178,7 @@ static void __init get_crash_kernel(void *fdt, unsigned long size) int ret; ret = parse_crashkernel(boot_command_line, size, &crash_size, - &crash_base, NULL, NULL); + &crash_base, NULL, NULL, NULL); if (ret != 0 || crash_size == 0) return; if (crash_base == 0) diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c index 61df5aed7989..dfaa9fd86f7e 100644 --- a/arch/powerpc/mm/pgtable.c +++ b/arch/powerpc/mm/pgtable.c @@ -509,7 +509,7 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea, return NULL; #endif - if (pmd_trans_huge(pmd) || pmd_devmap(pmd)) { + if (pmd_trans_huge(pmd)) { if (is_thp) *is_thp = true; ret_pte = (pte_t *)pmdp; diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index a25a6ffe7d7c..025524378443 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -409,6 +409,71 @@ asm ( " blr ;" ); +static int emit_atomic_ld_st(const struct bpf_insn insn, struct codegen_context *ctx, u32 *image) +{ + u32 code = insn.code; + u32 dst_reg = bpf_to_ppc(insn.dst_reg); + u32 src_reg = bpf_to_ppc(insn.src_reg); + u32 size = BPF_SIZE(code); + u32 tmp1_reg = bpf_to_ppc(TMP_REG_1); + u32 tmp2_reg = bpf_to_ppc(TMP_REG_2); + s16 off = insn.off; + s32 imm = insn.imm; + + switch (imm) { + case BPF_LOAD_ACQ: + switch (size) { + case BPF_B: + EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off)); + break; + case BPF_H: + EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off)); + break; + case BPF_W: + EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off)); + break; + case BPF_DW: + if (off % 4) { + EMIT(PPC_RAW_LI(tmp1_reg, off)); + EMIT(PPC_RAW_LDX(dst_reg, src_reg, tmp1_reg)); + } else { + EMIT(PPC_RAW_LD(dst_reg, src_reg, off)); + } + break; + } + EMIT(PPC_RAW_LWSYNC()); + break; + case BPF_STORE_REL: + EMIT(PPC_RAW_LWSYNC()); + switch (size) { + case BPF_B: + EMIT(PPC_RAW_STB(src_reg, dst_reg, off)); + break; + case BPF_H: + EMIT(PPC_RAW_STH(src_reg, dst_reg, off)); + break; + case BPF_W: + EMIT(PPC_RAW_STW(src_reg, dst_reg, off)); + break; + case BPF_DW: + if (off % 4) { + EMIT(PPC_RAW_LI(tmp2_reg, off)); + EMIT(PPC_RAW_STDX(src_reg, dst_reg, tmp2_reg)); + } else { + EMIT(PPC_RAW_STD(src_reg, dst_reg, off)); + } + break; + } + break; + default: + pr_err_ratelimited("unexpected atomic load/store op code %02x\n", + imm); + return -EINVAL; + } + + return 0; +} + /* Assemble the body code between the prologue & epilogue */ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct codegen_context *ctx, u32 *addrs, int pass, bool extra_pass) @@ -898,8 +963,25 @@ emit_clear: /* * BPF_STX ATOMIC (atomic ops) */ + case BPF_STX | BPF_ATOMIC | BPF_B: + case BPF_STX | BPF_ATOMIC | BPF_H: case BPF_STX | BPF_ATOMIC | BPF_W: case BPF_STX | BPF_ATOMIC | BPF_DW: + if (bpf_atomic_is_load_store(&insn[i])) { + ret = emit_atomic_ld_st(insn[i], ctx, image); + if (ret) + return ret; + + if (size != BPF_DW && insn_is_zext(&insn[i + 1])) + addrs[++i] = ctx->idx * 4; + break; + } else if (size == BPF_B || size == BPF_H) { + pr_err_ratelimited( + "eBPF filter atomic op code %02x (@%d) unsupported\n", + code, i); + return -EOPNOTSUPP; + } + save_reg = tmp2_reg; ret_reg = src_reg; diff --git a/arch/powerpc/platforms/44x/gpio.c b/arch/powerpc/platforms/44x/gpio.c index d540e261d85a..08ab76582568 100644 --- a/arch/powerpc/platforms/44x/gpio.c +++ b/arch/powerpc/platforms/44x/gpio.c @@ -180,7 +180,7 @@ static int __init ppc4xx_add_gpiochips(void) gc->direction_input = ppc4xx_gpio_dir_in; gc->direction_output = ppc4xx_gpio_dir_out; gc->get = ppc4xx_gpio_get; - gc->set_rv = ppc4xx_gpio_set; + gc->set = ppc4xx_gpio_set; ret = of_mm_gpiochip_add_data(np, mm_gc, ppc4xx_gc); if (ret) diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c index bda707d848a6..7748b6641a3c 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c @@ -336,7 +336,7 @@ static void mpc52xx_gpt_gpio_setup(struct mpc52xx_gpt_priv *gpt) gpt->gc.direction_input = mpc52xx_gpt_gpio_dir_in; gpt->gc.direction_output = mpc52xx_gpt_gpio_dir_out; gpt->gc.get = mpc52xx_gpt_gpio_get; - gpt->gc.set_rv = mpc52xx_gpt_gpio_set; + gpt->gc.set = mpc52xx_gpt_gpio_set; gpt->gc.base = -1; gpt->gc.parent = gpt->dev; diff --git a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c index 6e37dfc6c5c9..cb7b9498f291 100644 --- a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c +++ b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c @@ -126,7 +126,7 @@ static int mcu_gpiochip_add(struct mcu *mcu) gc->can_sleep = 1; gc->ngpio = MCU_NUM_GPIO; gc->base = -1; - gc->set_rv = mcu_gpio_set; + gc->set = mcu_gpio_set; gc->direction_output = mcu_gpio_dir_out; gc->parent = dev; diff --git a/arch/powerpc/platforms/8xx/cpm1.c b/arch/powerpc/platforms/8xx/cpm1.c index 7462c221115c..7433be7d66ee 100644 --- a/arch/powerpc/platforms/8xx/cpm1.c +++ b/arch/powerpc/platforms/8xx/cpm1.c @@ -499,7 +499,7 @@ int cpm1_gpiochip_add16(struct device *dev) gc->direction_input = cpm1_gpio16_dir_in; gc->direction_output = cpm1_gpio16_dir_out; gc->get = cpm1_gpio16_get; - gc->set_rv = cpm1_gpio16_set; + gc->set = cpm1_gpio16_set; gc->to_irq = cpm1_gpio16_to_irq; gc->parent = dev; gc->owner = THIS_MODULE; @@ -622,7 +622,7 @@ int cpm1_gpiochip_add32(struct device *dev) gc->direction_input = cpm1_gpio32_dir_in; gc->direction_output = cpm1_gpio32_dir_out; gc->get = cpm1_gpio32_get; - gc->set_rv = cpm1_gpio32_set; + gc->set = cpm1_gpio32_set; gc->parent = dev; gc->owner = THIS_MODULE; diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c index 5f4037c1d7fe..5e0a718d1be7 100644 --- a/arch/powerpc/platforms/pseries/cmm.c +++ b/arch/powerpc/platforms/pseries/cmm.c @@ -532,7 +532,6 @@ static int cmm_migratepage(struct balloon_dev_info *b_dev_info, spin_lock_irqsave(&b_dev_info->pages_lock, flags); balloon_page_insert(b_dev_info, newpage); - balloon_page_delete(page); b_dev_info->isolated_pages--; spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); @@ -542,6 +541,7 @@ static int cmm_migratepage(struct balloon_dev_info *b_dev_info, */ plpar_page_set_active(page); + balloon_page_finalize(page); /* balloon page list reference */ put_page(page); diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c index 52e2623a741d..aeb8633a3d00 100644 --- a/arch/powerpc/platforms/pseries/pci_dlpar.c +++ b/arch/powerpc/platforms/pseries/pci_dlpar.c @@ -29,7 +29,7 @@ struct pci_controller *init_phb_dynamic(struct device_node *dn) nid = of_node_to_nid(dn); if (likely((nid) >= 0)) { if (!node_online(nid)) { - if (__register_one_node(nid)) { + if (register_one_node(nid)) { pr_err("PCI: Failed to register node %d\n", nid); } else { update_numa_distance(dn); diff --git a/arch/powerpc/sysdev/cpm_common.c b/arch/powerpc/sysdev/cpm_common.c index e22fc638dbc7..f469f6a9f6e0 100644 --- a/arch/powerpc/sysdev/cpm_common.c +++ b/arch/powerpc/sysdev/cpm_common.c @@ -210,7 +210,7 @@ int cpm2_gpiochip_add32(struct device *dev) gc->direction_input = cpm2_gpio32_dir_in; gc->direction_output = cpm2_gpio32_dir_out; gc->get = cpm2_gpio32_get; - gc->set_rv = cpm2_gpio32_set; + gc->set = cpm2_gpio32_set; gc->parent = dev; gc->owner = THIS_MODULE; diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index cbd6b505b2ff..a4b233a0659e 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -43,7 +43,6 @@ config RISCV select ARCH_HAS_PREEMPT_LAZY select ARCH_HAS_PREPARE_SYNC_CORE_CMD select ARCH_HAS_PTDUMP if MMU - select ARCH_HAS_PTE_DEVMAP if 64BIT && MMU select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_SET_DIRECT_MAP if MMU select ARCH_HAS_SET_MEMORY if MMU @@ -94,6 +93,7 @@ config RISCV select CLINT_TIMER if RISCV_M_MODE select CLONE_BACKWARDS select COMMON_CLK + select CPU_NO_EFFICIENT_FFS if !RISCV_ISA_ZBB select CPU_PM if CPU_IDLE || HIBERNATION || SUSPEND select DYNAMIC_FTRACE if FUNCTION_TRACER select EDAC_SUPPORT diff --git a/arch/riscv/include/asm/bug.h b/arch/riscv/include/asm/bug.h index 1aaea81fb141..4c03e20ad11f 100644 --- a/arch/riscv/include/asm/bug.h +++ b/arch/riscv/include/asm/bug.h @@ -31,40 +31,45 @@ typedef u32 bug_insn_t; #ifdef CONFIG_GENERIC_BUG_RELATIVE_POINTERS #define __BUG_ENTRY_ADDR RISCV_INT " 1b - ." -#define __BUG_ENTRY_FILE RISCV_INT " %0 - ." +#define __BUG_ENTRY_FILE(file) RISCV_INT " " file " - ." #else #define __BUG_ENTRY_ADDR RISCV_PTR " 1b" -#define __BUG_ENTRY_FILE RISCV_PTR " %0" +#define __BUG_ENTRY_FILE(file) RISCV_PTR " " file #endif #ifdef CONFIG_DEBUG_BUGVERBOSE -#define __BUG_ENTRY \ +#define __BUG_ENTRY(file, line, flags) \ __BUG_ENTRY_ADDR "\n\t" \ - __BUG_ENTRY_FILE "\n\t" \ - RISCV_SHORT " %1\n\t" \ - RISCV_SHORT " %2" + __BUG_ENTRY_FILE(file) "\n\t" \ + RISCV_SHORT " " line "\n\t" \ + RISCV_SHORT " " flags #else -#define __BUG_ENTRY \ - __BUG_ENTRY_ADDR "\n\t" \ - RISCV_SHORT " %2" +#define __BUG_ENTRY(file, line, flags) \ + __BUG_ENTRY_ADDR "\n\t" \ + RISCV_SHORT " " flags #endif #ifdef CONFIG_GENERIC_BUG -#define __BUG_FLAGS(flags) \ -do { \ - __asm__ __volatile__ ( \ + +#define ARCH_WARN_ASM(file, line, flags, size) \ "1:\n\t" \ "ebreak\n" \ ".pushsection __bug_table,\"aw\"\n\t" \ "2:\n\t" \ - __BUG_ENTRY "\n\t" \ - ".org 2b + %3\n\t" \ + __BUG_ENTRY(file, line, flags) "\n\t" \ + ".org 2b + " size "\n\t" \ ".popsection" \ + +#define __BUG_FLAGS(flags) \ +do { \ + __asm__ __volatile__ ( \ + ARCH_WARN_ASM("%0", "%1", "%2", "%3") \ : \ : "i" (__FILE__), "i" (__LINE__), \ "i" (flags), \ "i" (sizeof(struct bug_entry))); \ } while (0) + #else /* CONFIG_GENERIC_BUG */ #define __BUG_FLAGS(flags) do { \ __asm__ __volatile__ ("ebreak\n"); \ @@ -78,6 +83,8 @@ do { \ #define __WARN_FLAGS(flags) __BUG_FLAGS(BUGFLAG_WARNING|(flags)) +#define ARCH_WARN_REACHABLE + #define HAVE_ARCH_BUG #include <asm-generic/bug.h> diff --git a/arch/riscv/include/asm/cfi.h b/arch/riscv/include/asm/cfi.h index fb9696d7a3f2..4508aaa7a2fd 100644 --- a/arch/riscv/include/asm/cfi.h +++ b/arch/riscv/include/asm/cfi.h @@ -14,27 +14,11 @@ struct pt_regs; #ifdef CONFIG_CFI_CLANG enum bug_trap_type handle_cfi_failure(struct pt_regs *regs); #define __bpfcall -static inline int cfi_get_offset(void) -{ - return 4; -} - -#define cfi_get_offset cfi_get_offset -extern u32 cfi_bpf_hash; -extern u32 cfi_bpf_subprog_hash; -extern u32 cfi_get_func_hash(void *func); #else static inline enum bug_trap_type handle_cfi_failure(struct pt_regs *regs) { return BUG_TRAP_TYPE_NONE; } - -#define cfi_bpf_hash 0U -#define cfi_bpf_subprog_hash 0U -static inline u32 cfi_get_func_hash(void *func) -{ - return 0; -} #endif /* CONFIG_CFI_CLANG */ #endif /* _ASM_RISCV_CFI_H */ diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h index 7de05db7d3bd..1018d2216901 100644 --- a/arch/riscv/include/asm/pgtable-64.h +++ b/arch/riscv/include/asm/pgtable-64.h @@ -397,24 +397,8 @@ static inline struct page *pgd_page(pgd_t pgd) p4d_t *p4d_offset(pgd_t *pgd, unsigned long address); #ifdef CONFIG_TRANSPARENT_HUGEPAGE -static inline int pte_devmap(pte_t pte); static inline pte_t pmd_pte(pmd_t pmd); static inline pte_t pud_pte(pud_t pud); - -static inline int pmd_devmap(pmd_t pmd) -{ - return pte_devmap(pmd_pte(pmd)); -} - -static inline int pud_devmap(pud_t pud) -{ - return pte_devmap(pud_pte(pud)); -} - -static inline int pgd_devmap(pgd_t pgd) -{ - return 0; -} #endif #endif /* _ASM_RISCV_PGTABLE_64_H */ diff --git a/arch/riscv/include/asm/pgtable-bits.h b/arch/riscv/include/asm/pgtable-bits.h index a8f5205cea54..179bd4afece4 100644 --- a/arch/riscv/include/asm/pgtable-bits.h +++ b/arch/riscv/include/asm/pgtable-bits.h @@ -19,7 +19,6 @@ #define _PAGE_SOFT (3 << 8) /* Reserved for software */ #define _PAGE_SPECIAL (1 << 8) /* RSW: 0x1 */ -#define _PAGE_DEVMAP (1 << 9) /* RSW, devmap */ #define _PAGE_TABLE _PAGE_PRESENT /* diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 5bd5aae60d53..91697fbf1f90 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -409,13 +409,6 @@ static inline int pte_special(pte_t pte) return pte_val(pte) & _PAGE_SPECIAL; } -#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP -static inline int pte_devmap(pte_t pte) -{ - return pte_val(pte) & _PAGE_DEVMAP; -} -#endif - /* static inline pte_t pte_rdprotect(pte_t pte) */ static inline pte_t pte_wrprotect(pte_t pte) @@ -457,11 +450,6 @@ static inline pte_t pte_mkspecial(pte_t pte) return __pte(pte_val(pte) | _PAGE_SPECIAL); } -static inline pte_t pte_mkdevmap(pte_t pte) -{ - return __pte(pte_val(pte) | _PAGE_DEVMAP); -} - static inline pte_t pte_mkhuge(pte_t pte) { return pte; @@ -790,11 +778,6 @@ static inline pmd_t pmd_mkdirty(pmd_t pmd) return pte_pmd(pte_mkdirty(pmd_pte(pmd))); } -static inline pmd_t pmd_mkdevmap(pmd_t pmd) -{ - return pte_pmd(pte_mkdevmap(pmd_pte(pmd))); -} - #ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP static inline bool pmd_special(pmd_t pmd) { @@ -946,11 +929,6 @@ static inline pud_t pud_mkhuge(pud_t pud) return pud; } -static inline pud_t pud_mkdevmap(pud_t pud) -{ - return pte_pud(pte_mkdevmap(pud_pte(pud))); -} - static inline int pudp_set_access_flags(struct vm_area_struct *vma, unsigned long address, pud_t *pudp, pud_t entry, int dirty) diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h index 1a20dd746a49..eed0abc40514 100644 --- a/arch/riscv/include/asm/tlbflush.h +++ b/arch/riscv/include/asm/tlbflush.h @@ -63,7 +63,6 @@ void flush_pud_tlb_range(struct vm_area_struct *vma, unsigned long start, bool arch_tlbbatch_should_defer(struct mm_struct *mm); void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch, struct mm_struct *mm, unsigned long start, unsigned long end); -void arch_flush_tlb_batched_pending(struct mm_struct *mm); void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch); extern unsigned long tlb_flush_all_threshold; diff --git a/arch/riscv/kernel/cfi.c b/arch/riscv/kernel/cfi.c index 64bdd3e1ab8c..6ec9dbd7292e 100644 --- a/arch/riscv/kernel/cfi.c +++ b/arch/riscv/kernel/cfi.c @@ -75,56 +75,3 @@ enum bug_trap_type handle_cfi_failure(struct pt_regs *regs) return report_cfi_failure(regs, regs->epc, &target, type); } - -#ifdef CONFIG_CFI_CLANG -struct bpf_insn; - -/* Must match bpf_func_t / DEFINE_BPF_PROG_RUN() */ -extern unsigned int __bpf_prog_runX(const void *ctx, - const struct bpf_insn *insn); - -/* - * Force a reference to the external symbol so the compiler generates - * __kcfi_typid. - */ -__ADDRESSABLE(__bpf_prog_runX); - -/* u32 __ro_after_init cfi_bpf_hash = __kcfi_typeid___bpf_prog_runX; */ -asm ( -" .pushsection .data..ro_after_init,\"aw\",@progbits \n" -" .type cfi_bpf_hash,@object \n" -" .globl cfi_bpf_hash \n" -" .p2align 2, 0x0 \n" -"cfi_bpf_hash: \n" -" .word __kcfi_typeid___bpf_prog_runX \n" -" .size cfi_bpf_hash, 4 \n" -" .popsection \n" -); - -/* Must match bpf_callback_t */ -extern u64 __bpf_callback_fn(u64, u64, u64, u64, u64); - -__ADDRESSABLE(__bpf_callback_fn); - -/* u32 __ro_after_init cfi_bpf_subprog_hash = __kcfi_typeid___bpf_callback_fn; */ -asm ( -" .pushsection .data..ro_after_init,\"aw\",@progbits \n" -" .type cfi_bpf_subprog_hash,@object \n" -" .globl cfi_bpf_subprog_hash \n" -" .p2align 2, 0x0 \n" -"cfi_bpf_subprog_hash: \n" -" .word __kcfi_typeid___bpf_callback_fn \n" -" .size cfi_bpf_subprog_hash, 4 \n" -" .popsection \n" -); - -u32 cfi_get_func_hash(void *func) -{ - u32 hash; - - if (get_kernel_nofault(hash, func - cfi_get_offset())) - return 0; - - return hash; -} -#endif diff --git a/arch/riscv/kernel/kexec_elf.c b/arch/riscv/kernel/kexec_elf.c index f4755d49b89e..56444c7bd34e 100644 --- a/arch/riscv/kernel/kexec_elf.c +++ b/arch/riscv/kernel/kexec_elf.c @@ -95,6 +95,7 @@ static int elf_find_pbase(struct kimage *image, unsigned long kernel_len, kbuf.buf_align = PMD_SIZE; kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; kbuf.memsz = ALIGN(kernel_len, PAGE_SIZE); + kbuf.cma = NULL; kbuf.top_down = false; ret = arch_kexec_locate_mem_hole(&kbuf); if (!ret) { diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index 14888e5ea19a..f90cce7a3ace 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -21,6 +21,8 @@ #include <linux/efi.h> #include <linux/crash_dump.h> #include <linux/panic_notifier.h> +#include <linux/jump_label.h> +#include <linux/gcd.h> #include <asm/acpi.h> #include <asm/alternative.h> @@ -362,6 +364,9 @@ void __init setup_arch(char **cmdline_p) riscv_user_isa_enable(); riscv_spinlock_init(); + + if (!IS_ENABLED(CONFIG_RISCV_ISA_ZBB) || !riscv_isa_extension_available(NULL, ZBB)) + static_branch_disable(&efficient_ffs_key); } bool arch_cpu_is_hotpluggable(int cpu) diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 8d0374d7ce8e..15683ae13fa5 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -1408,7 +1408,7 @@ static void __init arch_reserve_crashkernel(void) ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), &crash_size, &crash_base, - &low_size, &high); + &low_size, NULL, &high); if (ret) return; diff --git a/arch/riscv/mm/pageattr.c b/arch/riscv/mm/pageattr.c index d815448758a1..3f76db3d2769 100644 --- a/arch/riscv/mm/pageattr.c +++ b/arch/riscv/mm/pageattr.c @@ -299,7 +299,7 @@ static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask, if (ret) goto unlock; - ret = walk_page_range_novma(&init_mm, lm_start, lm_end, + ret = walk_kernel_page_table_range(lm_start, lm_end, &pageattr_ops, NULL, &masks); if (ret) goto unlock; @@ -317,13 +317,13 @@ static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask, if (ret) goto unlock; - ret = walk_page_range_novma(&init_mm, lm_start, lm_end, + ret = walk_kernel_page_table_range(lm_start, lm_end, &pageattr_ops, NULL, &masks); if (ret) goto unlock; } - ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL, + ret = walk_kernel_page_table_range(start, end, &pageattr_ops, NULL, &masks); unlock: @@ -335,7 +335,7 @@ unlock: */ flush_tlb_all(); #else - ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL, + ret = walk_kernel_page_table_range(start, end, &pageattr_ops, NULL, &masks); mmap_write_unlock(&init_mm); diff --git a/arch/riscv/mm/ptdump.c b/arch/riscv/mm/ptdump.c index 32922550a50a..3b51690cc876 100644 --- a/arch/riscv/mm/ptdump.c +++ b/arch/riscv/mm/ptdump.c @@ -6,7 +6,6 @@ #include <linux/efi.h> #include <linux/init.h> #include <linux/debugfs.h> -#include <linux/memory_hotplug.h> #include <linux/seq_file.h> #include <linux/ptdump.h> @@ -413,9 +412,7 @@ bool ptdump_check_wx(void) static int ptdump_show(struct seq_file *m, void *v) { - get_online_mems(); ptdump_walk(m, m->private); - put_online_mems(); return 0; } diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c index e737ba7949b1..8404530ec00f 100644 --- a/arch/riscv/mm/tlbflush.c +++ b/arch/riscv/mm/tlbflush.c @@ -234,11 +234,6 @@ void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch, mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end); } -void arch_flush_tlb_batched_pending(struct mm_struct *mm) -{ - flush_tlb_mm(mm); -} - void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch) { __flush_tlb_range(NULL, &batch->cpumask, diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index afc08b538f99..bf680c26a33c 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -74,6 +74,7 @@ config S390 select ARCH_ENABLE_MEMORY_HOTPLUG if SPARSEMEM select ARCH_ENABLE_MEMORY_HOTREMOVE select ARCH_ENABLE_SPLIT_PMD_PTLOCK if PGTABLE_LEVELS > 2 + select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE select ARCH_HAS_CPU_FINALIZE_INIT select ARCH_HAS_CURRENT_STACK_POINTER select ARCH_HAS_DEBUG_VIRTUAL @@ -102,6 +103,7 @@ config S390 select ARCH_HAS_UBSAN select ARCH_HAS_VDSO_TIME_DATA select ARCH_HAVE_NMI_SAFE_CMPXCHG + select ARCH_HAVE_TRACE_MMIO_ACCESS select ARCH_INLINE_READ_LOCK select ARCH_INLINE_READ_LOCK_BH select ARCH_INLINE_READ_LOCK_IRQ @@ -131,6 +133,7 @@ config S390 select ARCH_INLINE_WRITE_UNLOCK_IRQ select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE select ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE + select ARCH_MODULE_NEEDS_WEAK_PER_CPU select ARCH_STACKWALK select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_SUPPORTS_DEBUG_PAGEALLOC @@ -149,6 +152,7 @@ config S390 select ARCH_WANT_KERNEL_PMD_MKWRITE select ARCH_WANT_LD_ORPHAN_WARN select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP + select ARCH_WANTS_THP_SWAP select BUILDTIME_TABLE_SORT select CLONE_BACKWARDS2 select DCACHE_WORD_ACCESS if !KMSAN diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c index 305e6c791071..93684a775716 100644 --- a/arch/s390/boot/startup.c +++ b/arch/s390/boot/startup.c @@ -384,7 +384,7 @@ static unsigned long setup_kernel_memory_layout(unsigned long kernel_size) kernel_start = round_down(kernel_end - kernel_size, THREAD_SIZE); boot_debug("Randomization range: 0x%016lx-0x%016lx\n", vmax - kaslr_len, vmax); boot_debug("kernel image: 0x%016lx-0x%016lx (kaslr)\n", kernel_start, - kernel_size + kernel_size); + kernel_start + kernel_size); } else if (vmax < __NO_KASLR_END_KERNEL || vsize > __NO_KASLR_END_KERNEL) { kernel_start = round_down(vmax - kernel_size, THREAD_SIZE); boot_debug("kernel image: 0x%016lx-0x%016lx (constrained)\n", kernel_start, diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig index a8e84e80552d..6b33429f1c4d 100644 --- a/arch/s390/configs/debug_defconfig +++ b/arch/s390/configs/debug_defconfig @@ -816,6 +816,7 @@ CONFIG_PKEY_EP11=m CONFIG_PKEY_PCKMO=m CONFIG_PKEY_UV=m CONFIG_CRYPTO_PAES_S390=m +CONFIG_CRYPTO_PHMAC_S390=m CONFIG_CRYPTO_DEV_VIRTIO=m CONFIG_SYSTEM_BLACKLIST_KEYRING=y CONFIG_CRYPTO_KRB5=m diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig index a209b6ae4791..b75eb2775850 100644 --- a/arch/s390/configs/defconfig +++ b/arch/s390/configs/defconfig @@ -803,6 +803,7 @@ CONFIG_PKEY_EP11=m CONFIG_PKEY_PCKMO=m CONFIG_PKEY_UV=m CONFIG_CRYPTO_PAES_S390=m +CONFIG_CRYPTO_PHMAC_S390=m CONFIG_CRYPTO_DEV_VIRTIO=m CONFIG_SYSTEM_BLACKLIST_KEYRING=y CONFIG_CRYPTO_KRB5=m diff --git a/arch/s390/crypto/Makefile b/arch/s390/crypto/Makefile index 1e5a1038d491..998f4b656b18 100644 --- a/arch/s390/crypto/Makefile +++ b/arch/s390/crypto/Makefile @@ -11,4 +11,5 @@ obj-$(CONFIG_CRYPTO_PAES_S390) += paes_s390.o obj-$(CONFIG_S390_PRNG) += prng.o obj-$(CONFIG_CRYPTO_GHASH_S390) += ghash_s390.o obj-$(CONFIG_CRYPTO_HMAC_S390) += hmac_s390.o +obj-$(CONFIG_CRYPTO_PHMAC_S390) += phmac_s390.o obj-y += arch_random.o diff --git a/arch/s390/crypto/hmac_s390.c b/arch/s390/crypto/hmac_s390.c index 93a1098d9f8d..58444da9b004 100644 --- a/arch/s390/crypto/hmac_s390.c +++ b/arch/s390/crypto/hmac_s390.c @@ -290,6 +290,7 @@ static int s390_hmac_export(struct shash_desc *desc, void *out) struct s390_kmac_sha2_ctx *ctx = shash_desc_ctx(desc); unsigned int bs = crypto_shash_blocksize(desc->tfm); unsigned int ds = bs / 2; + u64 lo = ctx->buflen[0]; union { u8 *u8; u64 *u64; @@ -301,9 +302,10 @@ static int s390_hmac_export(struct shash_desc *desc, void *out) else memcpy(p.u8, ctx->param, ds); p.u8 += ds; - put_unaligned(ctx->buflen[0], p.u64++); + lo += bs; + put_unaligned(lo, p.u64++); if (ds == SHA512_DIGEST_SIZE) - put_unaligned(ctx->buflen[1], p.u64); + put_unaligned(ctx->buflen[1] + (lo < bs), p.u64); return err; } @@ -316,14 +318,16 @@ static int s390_hmac_import(struct shash_desc *desc, const void *in) const u8 *u8; const u64 *u64; } p = { .u8 = in }; + u64 lo; int err; err = s390_hmac_sha2_init(desc); memcpy(ctx->param, p.u8, ds); p.u8 += ds; - ctx->buflen[0] = get_unaligned(p.u64++); + lo = get_unaligned(p.u64++); + ctx->buflen[0] = lo - bs; if (ds == SHA512_DIGEST_SIZE) - ctx->buflen[1] = get_unaligned(p.u64); + ctx->buflen[1] = get_unaligned(p.u64) - (lo < bs); if (ctx->buflen[0] | ctx->buflen[1]) ctx->gr0.ikp = 1; return err; diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c index 8a340c16acb4..a624a43a2b54 100644 --- a/arch/s390/crypto/paes_s390.c +++ b/arch/s390/crypto/paes_s390.c @@ -1633,7 +1633,7 @@ static int __init paes_s390_init(void) /* with this pseudo devie alloc and start a crypto engine */ paes_crypto_engine = crypto_engine_alloc_init_and_set(paes_dev.this_device, - true, NULL, false, MAX_QLEN); + true, false, MAX_QLEN); if (!paes_crypto_engine) { rc = -ENOMEM; goto out_err; diff --git a/arch/s390/crypto/phmac_s390.c b/arch/s390/crypto/phmac_s390.c new file mode 100644 index 000000000000..7ecfdc4fba2d --- /dev/null +++ b/arch/s390/crypto/phmac_s390.c @@ -0,0 +1,1048 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright IBM Corp. 2025 + * + * s390 specific HMAC support for protected keys. + */ + +#define KMSG_COMPONENT "phmac_s390" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <asm/cpacf.h> +#include <asm/pkey.h> +#include <crypto/engine.h> +#include <crypto/hash.h> +#include <crypto/internal/hash.h> +#include <crypto/sha2.h> +#include <linux/atomic.h> +#include <linux/cpufeature.h> +#include <linux/delay.h> +#include <linux/miscdevice.h> +#include <linux/module.h> +#include <linux/spinlock.h> + +static struct crypto_engine *phmac_crypto_engine; +#define MAX_QLEN 10 + +/* + * A simple hash walk helper + */ + +struct hash_walk_helper { + struct crypto_hash_walk walk; + const u8 *walkaddr; + int walkbytes; +}; + +/* + * Prepare hash walk helper. + * Set up the base hash walk, fill walkaddr and walkbytes. + * Returns 0 on success or negative value on error. + */ +static inline int hwh_prepare(struct ahash_request *req, + struct hash_walk_helper *hwh) +{ + hwh->walkbytes = crypto_hash_walk_first(req, &hwh->walk); + if (hwh->walkbytes < 0) + return hwh->walkbytes; + hwh->walkaddr = hwh->walk.data; + return 0; +} + +/* + * Advance hash walk helper by n bytes. + * Progress the walkbytes and walkaddr fields by n bytes. + * If walkbytes is then 0, pull next hunk from hash walk + * and update walkbytes and walkaddr. + * If n is negative, unmap hash walk and return error. + * Returns 0 on success or negative value on error. + */ +static inline int hwh_advance(struct hash_walk_helper *hwh, int n) +{ + if (n < 0) + return crypto_hash_walk_done(&hwh->walk, n); + + hwh->walkbytes -= n; + hwh->walkaddr += n; + if (hwh->walkbytes > 0) + return 0; + + hwh->walkbytes = crypto_hash_walk_done(&hwh->walk, 0); + if (hwh->walkbytes < 0) + return hwh->walkbytes; + + hwh->walkaddr = hwh->walk.data; + return 0; +} + +/* + * KMAC param block layout for sha2 function codes: + * The layout of the param block for the KMAC instruction depends on the + * blocksize of the used hashing sha2-algorithm function codes. The param block + * contains the hash chaining value (cv), the input message bit-length (imbl) + * and the hmac-secret (key). To prevent code duplication, the sizes of all + * these are calculated based on the blocksize. + * + * param-block: + * +-------+ + * | cv | + * +-------+ + * | imbl | + * +-------+ + * | key | + * +-------+ + * + * sizes: + * part | sh2-alg | calculation | size | type + * -----+---------+-------------+------+-------- + * cv | 224/256 | blocksize/2 | 32 | u64[8] + * | 384/512 | | 64 | u128[8] + * imbl | 224/256 | blocksize/8 | 8 | u64 + * | 384/512 | | 16 | u128 + * key | 224/256 | blocksize | 96 | u8[96] + * | 384/512 | | 160 | u8[160] + */ + +#define MAX_DIGEST_SIZE SHA512_DIGEST_SIZE +#define MAX_IMBL_SIZE sizeof(u128) +#define MAX_BLOCK_SIZE SHA512_BLOCK_SIZE + +#define SHA2_CV_SIZE(bs) ((bs) >> 1) +#define SHA2_IMBL_SIZE(bs) ((bs) >> 3) + +#define SHA2_IMBL_OFFSET(bs) (SHA2_CV_SIZE(bs)) +#define SHA2_KEY_OFFSET(bs) (SHA2_CV_SIZE(bs) + SHA2_IMBL_SIZE(bs)) + +#define PHMAC_MAX_KEYSIZE 256 +#define PHMAC_SHA256_PK_SIZE (SHA256_BLOCK_SIZE + 32) +#define PHMAC_SHA512_PK_SIZE (SHA512_BLOCK_SIZE + 32) +#define PHMAC_MAX_PK_SIZE PHMAC_SHA512_PK_SIZE + +/* phmac protected key struct */ +struct phmac_protkey { + u32 type; + u32 len; + u8 protkey[PHMAC_MAX_PK_SIZE]; +}; + +#define PK_STATE_NO_KEY 0 +#define PK_STATE_CONVERT_IN_PROGRESS 1 +#define PK_STATE_VALID 2 + +/* phmac tfm context */ +struct phmac_tfm_ctx { + /* source key material used to derive a protected key from */ + u8 keybuf[PHMAC_MAX_KEYSIZE]; + unsigned int keylen; + + /* cpacf function code to use with this protected key type */ + long fc; + + /* nr of requests enqueued via crypto engine which use this tfm ctx */ + atomic_t via_engine_ctr; + + /* spinlock to atomic read/update all the following fields */ + spinlock_t pk_lock; + + /* see PK_STATE* defines above, < 0 holds convert failure rc */ + int pk_state; + /* if state is valid, pk holds the protected key */ + struct phmac_protkey pk; +}; + +union kmac_gr0 { + unsigned long reg; + struct { + unsigned long : 48; + unsigned long ikp : 1; + unsigned long iimp : 1; + unsigned long ccup : 1; + unsigned long : 6; + unsigned long fc : 7; + }; +}; + +struct kmac_sha2_ctx { + u8 param[MAX_DIGEST_SIZE + MAX_IMBL_SIZE + PHMAC_MAX_PK_SIZE]; + union kmac_gr0 gr0; + u8 buf[MAX_BLOCK_SIZE]; + u64 buflen[2]; +}; + +/* phmac request context */ +struct phmac_req_ctx { + struct hash_walk_helper hwh; + struct kmac_sha2_ctx kmac_ctx; + bool final; +}; + +/* + * Pkey 'token' struct used to derive a protected key value from a clear key. + */ +struct hmac_clrkey_token { + u8 type; + u8 res0[3]; + u8 version; + u8 res1[3]; + u32 keytype; + u32 len; + u8 key[]; +} __packed; + +static int hash_key(const u8 *in, unsigned int inlen, + u8 *digest, unsigned int digestsize) +{ + unsigned long func; + union { + struct sha256_paramblock { + u32 h[8]; + u64 mbl; + } sha256; + struct sha512_paramblock { + u64 h[8]; + u128 mbl; + } sha512; + } __packed param; + +#define PARAM_INIT(x, y, z) \ + param.sha##x.h[0] = SHA##y ## _H0; \ + param.sha##x.h[1] = SHA##y ## _H1; \ + param.sha##x.h[2] = SHA##y ## _H2; \ + param.sha##x.h[3] = SHA##y ## _H3; \ + param.sha##x.h[4] = SHA##y ## _H4; \ + param.sha##x.h[5] = SHA##y ## _H5; \ + param.sha##x.h[6] = SHA##y ## _H6; \ + param.sha##x.h[7] = SHA##y ## _H7; \ + param.sha##x.mbl = (z) + + switch (digestsize) { + case SHA224_DIGEST_SIZE: + func = CPACF_KLMD_SHA_256; + PARAM_INIT(256, 224, inlen * 8); + break; + case SHA256_DIGEST_SIZE: + func = CPACF_KLMD_SHA_256; + PARAM_INIT(256, 256, inlen * 8); + break; + case SHA384_DIGEST_SIZE: + func = CPACF_KLMD_SHA_512; + PARAM_INIT(512, 384, inlen * 8); + break; + case SHA512_DIGEST_SIZE: + func = CPACF_KLMD_SHA_512; + PARAM_INIT(512, 512, inlen * 8); + break; + default: + return -EINVAL; + } + +#undef PARAM_INIT + + cpacf_klmd(func, ¶m, in, inlen); + + memcpy(digest, ¶m, digestsize); + + return 0; +} + +/* + * make_clrkey_token() - wrap the clear key into a pkey clearkey token. + */ +static inline int make_clrkey_token(const u8 *clrkey, size_t clrkeylen, + unsigned int digestsize, u8 *dest) +{ + struct hmac_clrkey_token *token = (struct hmac_clrkey_token *)dest; + unsigned int blocksize; + int rc; + + token->type = 0x00; + token->version = 0x02; + switch (digestsize) { + case SHA224_DIGEST_SIZE: + case SHA256_DIGEST_SIZE: + token->keytype = PKEY_KEYTYPE_HMAC_512; + blocksize = 64; + break; + case SHA384_DIGEST_SIZE: + case SHA512_DIGEST_SIZE: + token->keytype = PKEY_KEYTYPE_HMAC_1024; + blocksize = 128; + break; + default: + return -EINVAL; + } + token->len = blocksize; + + if (clrkeylen > blocksize) { + rc = hash_key(clrkey, clrkeylen, token->key, digestsize); + if (rc) + return rc; + } else { + memcpy(token->key, clrkey, clrkeylen); + } + + return 0; +} + +/* + * phmac_tfm_ctx_setkey() - Set key value into tfm context, maybe construct + * a clear key token digestible by pkey from a clear key value. + */ +static inline int phmac_tfm_ctx_setkey(struct phmac_tfm_ctx *tfm_ctx, + const u8 *key, unsigned int keylen) +{ + if (keylen > sizeof(tfm_ctx->keybuf)) + return -EINVAL; + + memcpy(tfm_ctx->keybuf, key, keylen); + tfm_ctx->keylen = keylen; + + return 0; +} + +/* + * Convert the raw key material into a protected key via PKEY api. + * This function may sleep - don't call in non-sleeping context. + */ +static inline int convert_key(const u8 *key, unsigned int keylen, + struct phmac_protkey *pk) +{ + int rc, i; + + pk->len = sizeof(pk->protkey); + + /* + * In case of a busy card retry with increasing delay + * of 200, 400, 800 and 1600 ms - in total 3 s. + */ + for (rc = -EIO, i = 0; rc && i < 5; i++) { + if (rc == -EBUSY && msleep_interruptible((1 << i) * 100)) { + rc = -EINTR; + goto out; + } + rc = pkey_key2protkey(key, keylen, + pk->protkey, &pk->len, &pk->type, + PKEY_XFLAG_NOMEMALLOC); + } + +out: + pr_debug("rc=%d\n", rc); + return rc; +} + +/* + * (Re-)Convert the raw key material from the tfm ctx into a protected + * key via convert_key() function. Update the pk_state, pk_type, pk_len + * and the protected key in the tfm context. + * Please note this function may be invoked concurrently with the very + * same tfm context. The pk_lock spinlock in the context ensures an + * atomic update of the pk and the pk state but does not guarantee any + * order of update. So a fresh converted valid protected key may get + * updated with an 'old' expired key value. As the cpacf instructions + * detect this, refuse to operate with an invalid key and the calling + * code triggers a (re-)conversion this does no harm. This may lead to + * unnecessary additional conversion but never to invalid data on the + * hash operation. + */ +static int phmac_convert_key(struct phmac_tfm_ctx *tfm_ctx) +{ + struct phmac_protkey pk; + int rc; + + spin_lock_bh(&tfm_ctx->pk_lock); + tfm_ctx->pk_state = PK_STATE_CONVERT_IN_PROGRESS; + spin_unlock_bh(&tfm_ctx->pk_lock); + + rc = convert_key(tfm_ctx->keybuf, tfm_ctx->keylen, &pk); + + /* update context */ + spin_lock_bh(&tfm_ctx->pk_lock); + if (rc) { + tfm_ctx->pk_state = rc; + } else { + tfm_ctx->pk_state = PK_STATE_VALID; + tfm_ctx->pk = pk; + } + spin_unlock_bh(&tfm_ctx->pk_lock); + + memzero_explicit(&pk, sizeof(pk)); + pr_debug("rc=%d\n", rc); + return rc; +} + +/* + * kmac_sha2_set_imbl - sets the input message bit-length based on the blocksize + */ +static inline void kmac_sha2_set_imbl(u8 *param, u64 buflen_lo, + u64 buflen_hi, unsigned int blocksize) +{ + u8 *imbl = param + SHA2_IMBL_OFFSET(blocksize); + + switch (blocksize) { + case SHA256_BLOCK_SIZE: + *(u64 *)imbl = buflen_lo * BITS_PER_BYTE; + break; + case SHA512_BLOCK_SIZE: + *(u128 *)imbl = (((u128)buflen_hi << 64) + buflen_lo) << 3; + break; + default: + break; + } +} + +static int phmac_kmac_update(struct ahash_request *req, bool maysleep) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm); + struct phmac_req_ctx *req_ctx = ahash_request_ctx(req); + struct kmac_sha2_ctx *ctx = &req_ctx->kmac_ctx; + struct hash_walk_helper *hwh = &req_ctx->hwh; + unsigned int bs = crypto_ahash_blocksize(tfm); + unsigned int offset, k, n; + int rc = 0; + + /* + * The walk is always mapped when this function is called. + * Note that in case of partial processing or failure the walk + * is NOT unmapped here. So a follow up task may reuse the walk + * or in case of unrecoverable failure needs to unmap it. + */ + + while (hwh->walkbytes > 0) { + /* check sha2 context buffer */ + offset = ctx->buflen[0] % bs; + if (offset + hwh->walkbytes < bs) + goto store; + + if (offset) { + /* fill ctx buffer up to blocksize and process this block */ + n = bs - offset; + memcpy(ctx->buf + offset, hwh->walkaddr, n); + ctx->gr0.iimp = 1; + for (;;) { + k = _cpacf_kmac(&ctx->gr0.reg, ctx->param, ctx->buf, bs); + if (likely(k == bs)) + break; + if (unlikely(k > 0)) { + /* + * Can't deal with hunks smaller than blocksize. + * And kmac should always return the nr of + * processed bytes as 0 or a multiple of the + * blocksize. + */ + rc = -EIO; + goto out; + } + /* protected key is invalid and needs re-conversion */ + if (!maysleep) { + rc = -EKEYEXPIRED; + goto out; + } + rc = phmac_convert_key(tfm_ctx); + if (rc) + goto out; + spin_lock_bh(&tfm_ctx->pk_lock); + memcpy(ctx->param + SHA2_KEY_OFFSET(bs), + tfm_ctx->pk.protkey, tfm_ctx->pk.len); + spin_unlock_bh(&tfm_ctx->pk_lock); + } + ctx->buflen[0] += n; + if (ctx->buflen[0] < n) + ctx->buflen[1]++; + rc = hwh_advance(hwh, n); + if (unlikely(rc)) + goto out; + offset = 0; + } + + /* process as many blocks as possible from the walk */ + while (hwh->walkbytes >= bs) { + n = (hwh->walkbytes / bs) * bs; + ctx->gr0.iimp = 1; + k = _cpacf_kmac(&ctx->gr0.reg, ctx->param, hwh->walkaddr, n); + if (likely(k > 0)) { + ctx->buflen[0] += k; + if (ctx->buflen[0] < k) + ctx->buflen[1]++; + rc = hwh_advance(hwh, k); + if (unlikely(rc)) + goto out; + } + if (unlikely(k < n)) { + /* protected key is invalid and needs re-conversion */ + if (!maysleep) { + rc = -EKEYEXPIRED; + goto out; + } + rc = phmac_convert_key(tfm_ctx); + if (rc) + goto out; + spin_lock_bh(&tfm_ctx->pk_lock); + memcpy(ctx->param + SHA2_KEY_OFFSET(bs), + tfm_ctx->pk.protkey, tfm_ctx->pk.len); + spin_unlock_bh(&tfm_ctx->pk_lock); + } + } + +store: + /* store incomplete block in context buffer */ + if (hwh->walkbytes) { + memcpy(ctx->buf + offset, hwh->walkaddr, hwh->walkbytes); + ctx->buflen[0] += hwh->walkbytes; + if (ctx->buflen[0] < hwh->walkbytes) + ctx->buflen[1]++; + rc = hwh_advance(hwh, hwh->walkbytes); + if (unlikely(rc)) + goto out; + } + + } /* end of while (hwh->walkbytes > 0) */ + +out: + pr_debug("rc=%d\n", rc); + return rc; +} + +static int phmac_kmac_final(struct ahash_request *req, bool maysleep) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm); + struct phmac_req_ctx *req_ctx = ahash_request_ctx(req); + struct kmac_sha2_ctx *ctx = &req_ctx->kmac_ctx; + unsigned int ds = crypto_ahash_digestsize(tfm); + unsigned int bs = crypto_ahash_blocksize(tfm); + unsigned int k, n; + int rc = 0; + + n = ctx->buflen[0] % bs; + ctx->gr0.iimp = 0; + kmac_sha2_set_imbl(ctx->param, ctx->buflen[0], ctx->buflen[1], bs); + for (;;) { + k = _cpacf_kmac(&ctx->gr0.reg, ctx->param, ctx->buf, n); + if (likely(k == n)) + break; + if (unlikely(k > 0)) { + /* Can't deal with hunks smaller than blocksize. */ + rc = -EIO; + goto out; + } + /* protected key is invalid and needs re-conversion */ + if (!maysleep) { + rc = -EKEYEXPIRED; + goto out; + } + rc = phmac_convert_key(tfm_ctx); + if (rc) + goto out; + spin_lock_bh(&tfm_ctx->pk_lock); + memcpy(ctx->param + SHA2_KEY_OFFSET(bs), + tfm_ctx->pk.protkey, tfm_ctx->pk.len); + spin_unlock_bh(&tfm_ctx->pk_lock); + } + + memcpy(req->result, ctx->param, ds); + +out: + pr_debug("rc=%d\n", rc); + return rc; +} + +static int phmac_init(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm); + struct phmac_req_ctx *req_ctx = ahash_request_ctx(req); + struct kmac_sha2_ctx *kmac_ctx = &req_ctx->kmac_ctx; + unsigned int bs = crypto_ahash_blocksize(tfm); + int rc = 0; + + /* zero request context (includes the kmac sha2 context) */ + memset(req_ctx, 0, sizeof(*req_ctx)); + + /* + * setkey() should have set a valid fc into the tfm context. + * Copy this function code into the gr0 field of the kmac context. + */ + if (!tfm_ctx->fc) { + rc = -ENOKEY; + goto out; + } + kmac_ctx->gr0.fc = tfm_ctx->fc; + + /* + * Copy the pk from tfm ctx into kmac ctx. The protected key + * may be outdated but update() and final() will handle this. + */ + spin_lock_bh(&tfm_ctx->pk_lock); + memcpy(kmac_ctx->param + SHA2_KEY_OFFSET(bs), + tfm_ctx->pk.protkey, tfm_ctx->pk.len); + spin_unlock_bh(&tfm_ctx->pk_lock); + +out: + pr_debug("rc=%d\n", rc); + return rc; +} + +static int phmac_update(struct ahash_request *req) +{ + struct phmac_req_ctx *req_ctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm); + struct kmac_sha2_ctx *kmac_ctx = &req_ctx->kmac_ctx; + struct hash_walk_helper *hwh = &req_ctx->hwh; + int rc; + + /* prep the walk in the request context */ + rc = hwh_prepare(req, hwh); + if (rc) + goto out; + + /* Try synchronous operation if no active engine usage */ + if (!atomic_read(&tfm_ctx->via_engine_ctr)) { + rc = phmac_kmac_update(req, false); + if (rc == 0) + goto out; + } + + /* + * If sync operation failed or key expired or there are already + * requests enqueued via engine, fallback to async. Mark tfm as + * using engine to serialize requests. + */ + if (rc == 0 || rc == -EKEYEXPIRED) { + atomic_inc(&tfm_ctx->via_engine_ctr); + rc = crypto_transfer_hash_request_to_engine(phmac_crypto_engine, req); + if (rc != -EINPROGRESS) + atomic_dec(&tfm_ctx->via_engine_ctr); + } + + if (rc != -EINPROGRESS) { + hwh_advance(hwh, rc); + memzero_explicit(kmac_ctx, sizeof(*kmac_ctx)); + } + +out: + pr_debug("rc=%d\n", rc); + return rc; +} + +static int phmac_final(struct ahash_request *req) +{ + struct phmac_req_ctx *req_ctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm); + struct kmac_sha2_ctx *kmac_ctx = &req_ctx->kmac_ctx; + int rc = 0; + + /* Try synchronous operation if no active engine usage */ + if (!atomic_read(&tfm_ctx->via_engine_ctr)) { + rc = phmac_kmac_final(req, false); + if (rc == 0) + goto out; + } + + /* + * If sync operation failed or key expired or there are already + * requests enqueued via engine, fallback to async. Mark tfm as + * using engine to serialize requests. + */ + if (rc == 0 || rc == -EKEYEXPIRED) { + req->nbytes = 0; + req_ctx->final = true; + atomic_inc(&tfm_ctx->via_engine_ctr); + rc = crypto_transfer_hash_request_to_engine(phmac_crypto_engine, req); + if (rc != -EINPROGRESS) + atomic_dec(&tfm_ctx->via_engine_ctr); + } + +out: + if (rc != -EINPROGRESS) + memzero_explicit(kmac_ctx, sizeof(*kmac_ctx)); + pr_debug("rc=%d\n", rc); + return rc; +} + +static int phmac_finup(struct ahash_request *req) +{ + struct phmac_req_ctx *req_ctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm); + struct kmac_sha2_ctx *kmac_ctx = &req_ctx->kmac_ctx; + struct hash_walk_helper *hwh = &req_ctx->hwh; + int rc; + + /* prep the walk in the request context */ + rc = hwh_prepare(req, hwh); + if (rc) + goto out; + + /* Try synchronous operations if no active engine usage */ + if (!atomic_read(&tfm_ctx->via_engine_ctr)) { + rc = phmac_kmac_update(req, false); + if (rc == 0) + req->nbytes = 0; + } + if (!rc && !req->nbytes && !atomic_read(&tfm_ctx->via_engine_ctr)) { + rc = phmac_kmac_final(req, false); + if (rc == 0) + goto out; + } + + /* + * If sync operation failed or key expired or there are already + * requests enqueued via engine, fallback to async. Mark tfm as + * using engine to serialize requests. + */ + if (rc == 0 || rc == -EKEYEXPIRED) { + req_ctx->final = true; + atomic_inc(&tfm_ctx->via_engine_ctr); + rc = crypto_transfer_hash_request_to_engine(phmac_crypto_engine, req); + if (rc != -EINPROGRESS) + atomic_dec(&tfm_ctx->via_engine_ctr); + } + + if (rc != -EINPROGRESS) + hwh_advance(hwh, rc); + +out: + if (rc != -EINPROGRESS) + memzero_explicit(kmac_ctx, sizeof(*kmac_ctx)); + pr_debug("rc=%d\n", rc); + return rc; +} + +static int phmac_digest(struct ahash_request *req) +{ + int rc; + + rc = phmac_init(req); + if (rc) + goto out; + + rc = phmac_finup(req); + +out: + pr_debug("rc=%d\n", rc); + return rc; +} + +static int phmac_setkey(struct crypto_ahash *tfm, + const u8 *key, unsigned int keylen) +{ + struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm); + unsigned int ds = crypto_ahash_digestsize(tfm); + unsigned int bs = crypto_ahash_blocksize(tfm); + unsigned int tmpkeylen; + u8 *tmpkey = NULL; + int rc = 0; + + if (!crypto_ahash_tested(tfm)) { + /* + * selftest running: key is a raw hmac clear key and needs + * to get embedded into a 'clear key token' in order to have + * it correctly processed by the pkey module. + */ + tmpkeylen = sizeof(struct hmac_clrkey_token) + bs; + tmpkey = kzalloc(tmpkeylen, GFP_KERNEL); + if (!tmpkey) { + rc = -ENOMEM; + goto out; + } + rc = make_clrkey_token(key, keylen, ds, tmpkey); + if (rc) + goto out; + keylen = tmpkeylen; + key = tmpkey; + } + + /* copy raw key into tfm context */ + rc = phmac_tfm_ctx_setkey(tfm_ctx, key, keylen); + if (rc) + goto out; + + /* convert raw key into protected key */ + rc = phmac_convert_key(tfm_ctx); + if (rc) + goto out; + + /* set function code in tfm context, check for valid pk type */ + switch (ds) { + case SHA224_DIGEST_SIZE: + if (tfm_ctx->pk.type != PKEY_KEYTYPE_HMAC_512) + rc = -EINVAL; + else + tfm_ctx->fc = CPACF_KMAC_PHMAC_SHA_224; + break; + case SHA256_DIGEST_SIZE: + if (tfm_ctx->pk.type != PKEY_KEYTYPE_HMAC_512) + rc = -EINVAL; + else + tfm_ctx->fc = CPACF_KMAC_PHMAC_SHA_256; + break; + case SHA384_DIGEST_SIZE: + if (tfm_ctx->pk.type != PKEY_KEYTYPE_HMAC_1024) + rc = -EINVAL; + else + tfm_ctx->fc = CPACF_KMAC_PHMAC_SHA_384; + break; + case SHA512_DIGEST_SIZE: + if (tfm_ctx->pk.type != PKEY_KEYTYPE_HMAC_1024) + rc = -EINVAL; + else + tfm_ctx->fc = CPACF_KMAC_PHMAC_SHA_512; + break; + default: + tfm_ctx->fc = 0; + rc = -EINVAL; + } + +out: + kfree(tmpkey); + pr_debug("rc=%d\n", rc); + return rc; +} + +static int phmac_export(struct ahash_request *req, void *out) +{ + struct phmac_req_ctx *req_ctx = ahash_request_ctx(req); + struct kmac_sha2_ctx *ctx = &req_ctx->kmac_ctx; + + memcpy(out, ctx, sizeof(*ctx)); + + return 0; +} + +static int phmac_import(struct ahash_request *req, const void *in) +{ + struct phmac_req_ctx *req_ctx = ahash_request_ctx(req); + struct kmac_sha2_ctx *ctx = &req_ctx->kmac_ctx; + + memset(req_ctx, 0, sizeof(*req_ctx)); + memcpy(ctx, in, sizeof(*ctx)); + + return 0; +} + +static int phmac_init_tfm(struct crypto_ahash *tfm) +{ + struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm); + + memset(tfm_ctx, 0, sizeof(*tfm_ctx)); + spin_lock_init(&tfm_ctx->pk_lock); + + crypto_ahash_set_reqsize(tfm, sizeof(struct phmac_req_ctx)); + + return 0; +} + +static void phmac_exit_tfm(struct crypto_ahash *tfm) +{ + struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm); + + memzero_explicit(tfm_ctx->keybuf, sizeof(tfm_ctx->keybuf)); + memzero_explicit(&tfm_ctx->pk, sizeof(tfm_ctx->pk)); +} + +static int phmac_do_one_request(struct crypto_engine *engine, void *areq) +{ + struct ahash_request *req = ahash_request_cast(areq); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm); + struct phmac_req_ctx *req_ctx = ahash_request_ctx(req); + struct kmac_sha2_ctx *kmac_ctx = &req_ctx->kmac_ctx; + struct hash_walk_helper *hwh = &req_ctx->hwh; + int rc = -EINVAL; + + /* + * Three kinds of requests come in here: + * update when req->nbytes > 0 and req_ctx->final is false + * final when req->nbytes = 0 and req_ctx->final is true + * finup when req->nbytes > 0 and req_ctx->final is true + * For update and finup the hwh walk needs to be prepared and + * up to date but the actual nr of bytes in req->nbytes may be + * any non zero number. For final there is no hwh walk needed. + */ + + if (req->nbytes) { + rc = phmac_kmac_update(req, true); + if (rc == -EKEYEXPIRED) { + /* + * Protected key expired, conversion is in process. + * Trigger a re-schedule of this request by returning + * -ENOSPC ("hardware queue full") to the crypto engine. + * To avoid immediately re-invocation of this callback, + * tell scheduler to voluntarily give up the CPU here. + */ + pr_debug("rescheduling request\n"); + cond_resched(); + return -ENOSPC; + } else if (rc) { + hwh_advance(hwh, rc); + goto out; + } + req->nbytes = 0; + } + + if (req_ctx->final) { + rc = phmac_kmac_final(req, true); + if (rc == -EKEYEXPIRED) { + /* + * Protected key expired, conversion is in process. + * Trigger a re-schedule of this request by returning + * -ENOSPC ("hardware queue full") to the crypto engine. + * To avoid immediately re-invocation of this callback, + * tell scheduler to voluntarily give up the CPU here. + */ + pr_debug("rescheduling request\n"); + cond_resched(); + return -ENOSPC; + } + } + +out: + if (rc || req_ctx->final) + memzero_explicit(kmac_ctx, sizeof(*kmac_ctx)); + pr_debug("request complete with rc=%d\n", rc); + local_bh_disable(); + atomic_dec(&tfm_ctx->via_engine_ctr); + crypto_finalize_hash_request(engine, req, rc); + local_bh_enable(); + return rc; +} + +#define S390_ASYNC_PHMAC_ALG(x) \ +{ \ + .base = { \ + .init = phmac_init, \ + .update = phmac_update, \ + .final = phmac_final, \ + .finup = phmac_finup, \ + .digest = phmac_digest, \ + .setkey = phmac_setkey, \ + .import = phmac_import, \ + .export = phmac_export, \ + .init_tfm = phmac_init_tfm, \ + .exit_tfm = phmac_exit_tfm, \ + .halg = { \ + .digestsize = SHA##x##_DIGEST_SIZE, \ + .statesize = sizeof(struct kmac_sha2_ctx), \ + .base = { \ + .cra_name = "phmac(sha" #x ")", \ + .cra_driver_name = "phmac_s390_sha" #x, \ + .cra_blocksize = SHA##x##_BLOCK_SIZE, \ + .cra_priority = 400, \ + .cra_flags = CRYPTO_ALG_ASYNC | \ + CRYPTO_ALG_NO_FALLBACK, \ + .cra_ctxsize = sizeof(struct phmac_tfm_ctx), \ + .cra_module = THIS_MODULE, \ + }, \ + }, \ + }, \ + .op = { \ + .do_one_request = phmac_do_one_request, \ + }, \ +} + +static struct phmac_alg { + unsigned int fc; + struct ahash_engine_alg alg; + bool registered; +} phmac_algs[] = { + { + .fc = CPACF_KMAC_PHMAC_SHA_224, + .alg = S390_ASYNC_PHMAC_ALG(224), + }, { + .fc = CPACF_KMAC_PHMAC_SHA_256, + .alg = S390_ASYNC_PHMAC_ALG(256), + }, { + .fc = CPACF_KMAC_PHMAC_SHA_384, + .alg = S390_ASYNC_PHMAC_ALG(384), + }, { + .fc = CPACF_KMAC_PHMAC_SHA_512, + .alg = S390_ASYNC_PHMAC_ALG(512), + } +}; + +static struct miscdevice phmac_dev = { + .name = "phmac", + .minor = MISC_DYNAMIC_MINOR, +}; + +static void s390_phmac_exit(void) +{ + struct phmac_alg *phmac; + int i; + + if (phmac_crypto_engine) { + crypto_engine_stop(phmac_crypto_engine); + crypto_engine_exit(phmac_crypto_engine); + } + + for (i = ARRAY_SIZE(phmac_algs) - 1; i >= 0; i--) { + phmac = &phmac_algs[i]; + if (phmac->registered) + crypto_engine_unregister_ahash(&phmac->alg); + } + + misc_deregister(&phmac_dev); +} + +static int __init s390_phmac_init(void) +{ + struct phmac_alg *phmac; + int i, rc; + + /* for selftest cpacf klmd subfunction is needed */ + if (!cpacf_query_func(CPACF_KLMD, CPACF_KLMD_SHA_256)) + return -ENODEV; + if (!cpacf_query_func(CPACF_KLMD, CPACF_KLMD_SHA_512)) + return -ENODEV; + + /* register a simple phmac pseudo misc device */ + rc = misc_register(&phmac_dev); + if (rc) + return rc; + + /* with this pseudo device alloc and start a crypto engine */ + phmac_crypto_engine = + crypto_engine_alloc_init_and_set(phmac_dev.this_device, + true, false, MAX_QLEN); + if (!phmac_crypto_engine) { + rc = -ENOMEM; + goto out_err; + } + rc = crypto_engine_start(phmac_crypto_engine); + if (rc) { + crypto_engine_exit(phmac_crypto_engine); + phmac_crypto_engine = NULL; + goto out_err; + } + + for (i = 0; i < ARRAY_SIZE(phmac_algs); i++) { + phmac = &phmac_algs[i]; + if (!cpacf_query_func(CPACF_KMAC, phmac->fc)) + continue; + rc = crypto_engine_register_ahash(&phmac->alg); + if (rc) + goto out_err; + phmac->registered = true; + pr_debug("%s registered\n", phmac->alg.base.halg.base.cra_name); + } + + return 0; + +out_err: + s390_phmac_exit(); + return rc; +} + +module_init(s390_phmac_init); +module_exit(s390_phmac_exit); + +MODULE_ALIAS_CRYPTO("phmac(sha224)"); +MODULE_ALIAS_CRYPTO("phmac(sha256)"); +MODULE_ALIAS_CRYPTO("phmac(sha384)"); +MODULE_ALIAS_CRYPTO("phmac(sha512)"); + +MODULE_DESCRIPTION("S390 HMAC driver for protected keys"); +MODULE_LICENSE("GPL"); diff --git a/arch/s390/crypto/sha.h b/arch/s390/crypto/sha.h index d757ccbce2b4..cadb4b13622a 100644 --- a/arch/s390/crypto/sha.h +++ b/arch/s390/crypto/sha.h @@ -27,6 +27,9 @@ struct s390_sha_ctx { u64 state[SHA512_DIGEST_SIZE / sizeof(u64)]; u64 count_hi; } sha512; + struct { + __le64 state[SHA3_STATE_SIZE / sizeof(u64)]; + } sha3; }; int func; /* KIMD function to use */ bool first_message_part; diff --git a/arch/s390/crypto/sha3_256_s390.c b/arch/s390/crypto/sha3_256_s390.c index 4a7731ac6bcd..03bb4f4bab70 100644 --- a/arch/s390/crypto/sha3_256_s390.c +++ b/arch/s390/crypto/sha3_256_s390.c @@ -35,23 +35,33 @@ static int sha3_256_init(struct shash_desc *desc) static int sha3_256_export(struct shash_desc *desc, void *out) { struct s390_sha_ctx *sctx = shash_desc_ctx(desc); - struct sha3_state *octx = out; + union { + u8 *u8; + u64 *u64; + } p = { .u8 = out }; + int i; if (sctx->first_message_part) { - memset(sctx->state, 0, sizeof(sctx->state)); - sctx->first_message_part = 0; + memset(out, 0, SHA3_STATE_SIZE); + return 0; } - memcpy(octx->st, sctx->state, sizeof(octx->st)); + for (i = 0; i < SHA3_STATE_SIZE / 8; i++) + put_unaligned(le64_to_cpu(sctx->sha3.state[i]), p.u64++); return 0; } static int sha3_256_import(struct shash_desc *desc, const void *in) { struct s390_sha_ctx *sctx = shash_desc_ctx(desc); - const struct sha3_state *ictx = in; - + union { + const u8 *u8; + const u64 *u64; + } p = { .u8 = in }; + int i; + + for (i = 0; i < SHA3_STATE_SIZE / 8; i++) + sctx->sha3.state[i] = cpu_to_le64(get_unaligned(p.u64++)); sctx->count = 0; - memcpy(sctx->state, ictx->st, sizeof(ictx->st)); sctx->first_message_part = 0; sctx->func = CPACF_KIMD_SHA3_256; diff --git a/arch/s390/crypto/sha3_512_s390.c b/arch/s390/crypto/sha3_512_s390.c index 018f02fff444..a5c9690eecb1 100644 --- a/arch/s390/crypto/sha3_512_s390.c +++ b/arch/s390/crypto/sha3_512_s390.c @@ -34,24 +34,33 @@ static int sha3_512_init(struct shash_desc *desc) static int sha3_512_export(struct shash_desc *desc, void *out) { struct s390_sha_ctx *sctx = shash_desc_ctx(desc); - struct sha3_state *octx = out; - + union { + u8 *u8; + u64 *u64; + } p = { .u8 = out }; + int i; if (sctx->first_message_part) { - memset(sctx->state, 0, sizeof(sctx->state)); - sctx->first_message_part = 0; + memset(out, 0, SHA3_STATE_SIZE); + return 0; } - memcpy(octx->st, sctx->state, sizeof(octx->st)); + for (i = 0; i < SHA3_STATE_SIZE / 8; i++) + put_unaligned(le64_to_cpu(sctx->sha3.state[i]), p.u64++); return 0; } static int sha3_512_import(struct shash_desc *desc, const void *in) { struct s390_sha_ctx *sctx = shash_desc_ctx(desc); - const struct sha3_state *ictx = in; - + union { + const u8 *u8; + const u64 *u64; + } p = { .u8 = in }; + int i; + + for (i = 0; i < SHA3_STATE_SIZE / 8; i++) + sctx->sha3.state[i] = cpu_to_le64(get_unaligned(p.u64++)); sctx->count = 0; - memcpy(sctx->state, ictx->st, sizeof(ictx->st)); sctx->first_message_part = 0; sctx->func = CPACF_KIMD_SHA3_512; diff --git a/arch/s390/include/asm/ap.h b/arch/s390/include/asm/ap.h index 395b02d6a133..352108727d7e 100644 --- a/arch/s390/include/asm/ap.h +++ b/arch/s390/include/asm/ap.h @@ -103,7 +103,7 @@ struct ap_tapq_hwinfo { unsigned int accel : 1; /* A */ unsigned int ep11 : 1; /* X */ unsigned int apxa : 1; /* APXA */ - unsigned int : 1; + unsigned int slcf : 1; /* Cmd filtering avail. */ unsigned int class : 8; unsigned int bs : 2; /* SE bind/assoc */ unsigned int : 14; diff --git a/arch/s390/include/asm/cpacf.h b/arch/s390/include/asm/cpacf.h index 54cb97603ec0..4bc5317fbb12 100644 --- a/arch/s390/include/asm/cpacf.h +++ b/arch/s390/include/asm/cpacf.h @@ -129,6 +129,10 @@ #define CPACF_KMAC_HMAC_SHA_256 0x71 #define CPACF_KMAC_HMAC_SHA_384 0x72 #define CPACF_KMAC_HMAC_SHA_512 0x73 +#define CPACF_KMAC_PHMAC_SHA_224 0x78 +#define CPACF_KMAC_PHMAC_SHA_256 0x79 +#define CPACF_KMAC_PHMAC_SHA_384 0x7a +#define CPACF_KMAC_PHMAC_SHA_512 0x7b /* * Function codes for the PCKMO (PERFORM CRYPTOGRAPHIC KEY MANAGEMENT) diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h index 84f6b8357b45..96af7d964014 100644 --- a/arch/s390/include/asm/percpu.h +++ b/arch/s390/include/asm/percpu.h @@ -16,10 +16,9 @@ * For 64 bit module code, the module may be more than 4G above the * per cpu area, use weak definitions to force the compiler to * generate external references. + * Therefore, we have enabled CONFIG_ARCH_MODULE_NEEDS_WEAK_PER_CPU + * in the Kconfig. */ -#if defined(MODULE) -#define ARCH_NEEDS_WEAK_PER_CPU -#endif /* * We use a compare-and-swap loop since that uses less cpu cycles than diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 6d8bc27a366e..c1a7a92f0575 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -963,6 +963,12 @@ static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) return clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_SOFT_DIRTY)); } +#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION +#define pmd_swp_soft_dirty(pmd) pmd_soft_dirty(pmd) +#define pmd_swp_mksoft_dirty(pmd) pmd_mksoft_dirty(pmd) +#define pmd_swp_clear_soft_dirty(pmd) pmd_clear_soft_dirty(pmd) +#endif + /* * query functions pte_write/pte_dirty/pte_young only work if * pte_present() is true. Undefined behaviour if not.. @@ -1979,6 +1985,45 @@ static inline unsigned long __swp_offset_rste(swp_entry_t entry) #define __rste_to_swp_entry(rste) ((swp_entry_t) { rste }) +/* + * s390 has different layout for PTE and region / segment table entries (RSTE). + * This is also true for swap entries, and their swap type and offset encoding. + * For hugetlbfs PTE_MARKER support, s390 has internal __swp_type_rste() and + * __swp_offset_rste() helpers to correctly handle RSTE swap entries. + * + * But common swap code does not know about this difference, and only uses + * __swp_type(), __swp_offset() and __swp_entry() helpers for conversion between + * arch-dependent and arch-independent representation of swp_entry_t for all + * pagetable levels. On s390, those helpers only work for PTE swap entries. + * + * Therefore, implement __pmd_to_swp_entry() to build a fake PTE swap entry + * and return the arch-dependent representation of that. Correspondingly, + * implement __swp_entry_to_pmd() to convert that into a proper PMD swap + * entry again. With this, the arch-dependent swp_entry_t representation will + * always look like a PTE swap entry in common code. + * + * This is somewhat similar to fake PTEs in hugetlbfs code for s390, but only + * requires conversion of the swap type and offset, and not all the possible + * PTE bits. + */ +static inline swp_entry_t __pmd_to_swp_entry(pmd_t pmd) +{ + swp_entry_t arch_entry; + pte_t pte; + + arch_entry = __rste_to_swp_entry(pmd_val(pmd)); + pte = mk_swap_pte(__swp_type_rste(arch_entry), __swp_offset_rste(arch_entry)); + return __pte_to_swp_entry(pte); +} + +static inline pmd_t __swp_entry_to_pmd(swp_entry_t arch_entry) +{ + pmd_t pmd; + + pmd = __pmd(mk_swap_rste(__swp_type(arch_entry), __swp_offset(arch_entry))); + return pmd; +} + extern int vmem_add_mapping(unsigned long start, unsigned long size); extern void vmem_remove_mapping(unsigned long start, unsigned long size); extern int __vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot, bool alloc); diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index 2a41be2f7925..c62100dc62c8 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c @@ -1677,7 +1677,7 @@ EXPORT_SYMBOL(debug_dflt_header_fn); /* * prints debug data sprintf-formatted: - * debug_sprinf_event/exception calls must be used together with this view + * debug_sprintf_event/exception calls must be used together with this view */ #define DEBUG_SPRINTF_MAX_ARGS 10 diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index f244c5560e7f..7b529868789f 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -605,7 +605,7 @@ static void __init reserve_crashkernel(void) int rc; rc = parse_crashkernel(boot_command_line, ident_map_size, - &crash_size, &crash_base, NULL, NULL); + &crash_size, &crash_base, NULL, NULL, NULL); crash_base = ALIGN(crash_base, KEXEC_CRASH_MEM_ALIGN); crash_size = ALIGN(crash_size, KEXEC_CRASH_MEM_ALIGN); @@ -719,6 +719,11 @@ static void __init memblock_add_physmem_info(void) memblock_set_node(0, ULONG_MAX, &memblock.memory, 0); } +static void __init setup_high_memory(void) +{ + high_memory = __va(ident_map_size); +} + /* * Reserve memory used for lowcore. */ @@ -951,6 +956,7 @@ void __init setup_arch(char **cmdline_p) free_physmem_info(); setup_memory_end(); + setup_high_memory(); memblock_dump_all(); setup_memory(); diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c index ac604b176660..9af2aae0a515 100644 --- a/arch/s390/mm/dump_pagetables.c +++ b/arch/s390/mm/dump_pagetables.c @@ -247,11 +247,9 @@ static int ptdump_show(struct seq_file *m, void *v) .marker = markers, }; - get_online_mems(); mutex_lock(&cpa_mutex); ptdump_walk_pgd(&st.ptdump, &init_mm, NULL); mutex_unlock(&cpa_mutex); - put_online_mems(); return 0; } DEFINE_SHOW_ATTRIBUTE(ptdump); diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index 448dd6ed1069..f48ef361bc83 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -64,13 +64,12 @@ void *vmem_crst_alloc(unsigned long val) pte_t __ref *vmem_pte_alloc(void) { - unsigned long size = PTRS_PER_PTE * sizeof(pte_t); pte_t *pte; if (slab_is_available()) - pte = (pte_t *) page_table_alloc(&init_mm); + pte = (pte_t *)page_table_alloc(&init_mm); else - pte = (pte_t *) memblock_alloc(size, size); + pte = (pte_t *)memblock_alloc(PAGE_SIZE, PAGE_SIZE); if (!pte) return NULL; memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE); diff --git a/arch/sh/Makefile b/arch/sh/Makefile index cab2f9c011a8..7b420424b6d7 100644 --- a/arch/sh/Makefile +++ b/arch/sh/Makefile @@ -103,16 +103,16 @@ UTS_MACHINE := sh LDFLAGS_vmlinux += -e _stext ifdef CONFIG_CPU_LITTLE_ENDIAN -ld-bfd := elf32-sh-linux -LDFLAGS_vmlinux += --defsym jiffies=jiffies_64 --oformat $(ld-bfd) +ld_bfd := elf32-sh-linux +LDFLAGS_vmlinux += --defsym jiffies=jiffies_64 --oformat $(ld_bfd) KBUILD_LDFLAGS += -EL else -ld-bfd := elf32-shbig-linux -LDFLAGS_vmlinux += --defsym jiffies=jiffies_64+4 --oformat $(ld-bfd) +ld_bfd := elf32-shbig-linux +LDFLAGS_vmlinux += --defsym jiffies=jiffies_64+4 --oformat $(ld_bfd) KBUILD_LDFLAGS += -EB endif -export ld-bfd +export ld_bfd # Mach groups machdir-$(CONFIG_SOLUTION_ENGINE) += mach-se diff --git a/arch/sh/boot/compressed/Makefile b/arch/sh/boot/compressed/Makefile index 8bc319ff54bf..58df491778b2 100644 --- a/arch/sh/boot/compressed/Makefile +++ b/arch/sh/boot/compressed/Makefile @@ -27,7 +27,7 @@ endif ccflags-remove-$(CONFIG_MCOUNT) += -pg -LDFLAGS_vmlinux := --oformat $(ld-bfd) -Ttext $(IMAGE_OFFSET) -e startup \ +LDFLAGS_vmlinux := --oformat $(ld_bfd) -Ttext $(IMAGE_OFFSET) -e startup \ -T $(obj)/../../kernel/vmlinux.lds KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING @@ -51,7 +51,7 @@ $(obj)/vmlinux.bin.lzo: $(obj)/vmlinux.bin FORCE OBJCOPYFLAGS += -R .empty_zero_page -LDFLAGS_piggy.o := -r --format binary --oformat $(ld-bfd) -T +LDFLAGS_piggy.o := -r --format binary --oformat $(ld_bfd) -T $(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix_y) FORCE $(call if_changed,ld) diff --git a/arch/sh/boot/romimage/Makefile b/arch/sh/boot/romimage/Makefile index c7c8be58400c..17b03df0a8de 100644 --- a/arch/sh/boot/romimage/Makefile +++ b/arch/sh/boot/romimage/Makefile @@ -13,7 +13,7 @@ mmcif-obj-$(CONFIG_CPU_SUBTYPE_SH7724) := $(obj)/mmcif-sh7724.o load-$(CONFIG_ROMIMAGE_MMCIF) := $(mmcif-load-y) obj-$(CONFIG_ROMIMAGE_MMCIF) := $(mmcif-obj-y) -LDFLAGS_vmlinux := --oformat $(ld-bfd) -Ttext $(load-y) -e romstart \ +LDFLAGS_vmlinux := --oformat $(ld_bfd) -Ttext $(load-y) -e romstart \ -T $(obj)/../../kernel/vmlinux.lds $(obj)/vmlinux: $(obj)/head.o $(obj-y) $(obj)/piggy.o FORCE @@ -24,7 +24,7 @@ OBJCOPYFLAGS += -j .empty_zero_page $(obj)/zeropage.bin: vmlinux FORCE $(call if_changed,objcopy) -LDFLAGS_piggy.o := -r --format binary --oformat $(ld-bfd) -T +LDFLAGS_piggy.o := -r --format binary --oformat $(ld_bfd) -T $(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/zeropage.bin arch/sh/boot/zImage FORCE $(call if_changed,ld) diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c index 8321b31d2e19..37073ca1e0ad 100644 --- a/arch/sh/kernel/machine_kexec.c +++ b/arch/sh/kernel/machine_kexec.c @@ -146,7 +146,7 @@ void __init reserve_crashkernel(void) return; ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), - &crash_size, &crash_base, NULL, NULL); + &crash_size, &crash_base, NULL, NULL, NULL); if (ret == 0 && crash_size > 0) { crashk_res.start = crash_base; crashk_res.end = crash_base + crash_size - 1; diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index beda7f177c54..7b595092cbfb 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -96,6 +96,7 @@ config SPARC64 select HAVE_ARCH_AUDITSYSCALL select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_SUPPORTS_DEBUG_PAGEALLOC + select ARCH_SUPPORTS_HUGETLBFS select HAVE_NMI select HAVE_REGS_AND_STACK_ACCESS_API select ARCH_USE_QUEUED_RWLOCKS diff --git a/arch/sparc/include/asm/hugetlb.h b/arch/sparc/include/asm/hugetlb.h index e7a9cdd498dc..d3bc16fbcbbd 100644 --- a/arch/sparc/include/asm/hugetlb.h +++ b/arch/sparc/include/asm/hugetlb.h @@ -50,11 +50,6 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, return changed; } -#define __HAVE_ARCH_HUGETLB_FREE_PGD_RANGE -void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr, - unsigned long end, unsigned long floor, - unsigned long ceiling); - #include <asm-generic/hugetlb.h> #endif /* _ASM_SPARC64_HUGETLB_H */ diff --git a/arch/sparc/include/asm/mman.h b/arch/sparc/include/asm/mman.h index af9c10c83dc5..3e4bac33be81 100644 --- a/arch/sparc/include/asm/mman.h +++ b/arch/sparc/include/asm/mman.h @@ -28,7 +28,7 @@ static inline void ipi_set_tstate_mcde(void *arg) } #define arch_calc_vm_prot_bits(prot, pkey) sparc_calc_vm_prot_bits(prot) -static inline unsigned long sparc_calc_vm_prot_bits(unsigned long prot) +static inline vm_flags_t sparc_calc_vm_prot_bits(unsigned long prot) { if (adi_capable() && (prot & PROT_ADI)) { struct pt_regs *regs; @@ -58,7 +58,7 @@ static inline int sparc_validate_prot(unsigned long prot, unsigned long addr) /* arch_validate_flags() - Ensure combination of flags is valid for a * VMA. */ -static inline bool arch_validate_flags(unsigned long vm_flags) +static inline bool arch_validate_flags(vm_flags_t vm_flags) { /* If ADI is being enabled on this VMA, check for ADI * capability on the platform and ensure VMA is suitable diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index 80504148d8a5..4b9431311e05 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -295,122 +295,3 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, return entry; } - -static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, - unsigned long addr) -{ - pgtable_t token = pmd_pgtable(*pmd); - - pmd_clear(pmd); - pte_free_tlb(tlb, token, addr); - mm_dec_nr_ptes(tlb->mm); -} - -static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, - unsigned long addr, unsigned long end, - unsigned long floor, unsigned long ceiling) -{ - pmd_t *pmd; - unsigned long next; - unsigned long start; - - start = addr; - pmd = pmd_offset(pud, addr); - do { - next = pmd_addr_end(addr, end); - if (pmd_none(*pmd)) - continue; - if (is_hugetlb_pmd(*pmd)) - pmd_clear(pmd); - else - hugetlb_free_pte_range(tlb, pmd, addr); - } while (pmd++, addr = next, addr != end); - - start &= PUD_MASK; - if (start < floor) - return; - if (ceiling) { - ceiling &= PUD_MASK; - if (!ceiling) - return; - } - if (end - 1 > ceiling - 1) - return; - - pmd = pmd_offset(pud, start); - pud_clear(pud); - pmd_free_tlb(tlb, pmd, start); - mm_dec_nr_pmds(tlb->mm); -} - -static void hugetlb_free_pud_range(struct mmu_gather *tlb, p4d_t *p4d, - unsigned long addr, unsigned long end, - unsigned long floor, unsigned long ceiling) -{ - pud_t *pud; - unsigned long next; - unsigned long start; - - start = addr; - pud = pud_offset(p4d, addr); - do { - next = pud_addr_end(addr, end); - if (pud_none_or_clear_bad(pud)) - continue; - if (is_hugetlb_pud(*pud)) - pud_clear(pud); - else - hugetlb_free_pmd_range(tlb, pud, addr, next, floor, - ceiling); - } while (pud++, addr = next, addr != end); - - start &= PGDIR_MASK; - if (start < floor) - return; - if (ceiling) { - ceiling &= PGDIR_MASK; - if (!ceiling) - return; - } - if (end - 1 > ceiling - 1) - return; - - pud = pud_offset(p4d, start); - p4d_clear(p4d); - pud_free_tlb(tlb, pud, start); - mm_dec_nr_puds(tlb->mm); -} - -void hugetlb_free_pgd_range(struct mmu_gather *tlb, - unsigned long addr, unsigned long end, - unsigned long floor, unsigned long ceiling) -{ - pgd_t *pgd; - p4d_t *p4d; - unsigned long next; - - addr &= PMD_MASK; - if (addr < floor) { - addr += PMD_SIZE; - if (!addr) - return; - } - if (ceiling) { - ceiling &= PMD_MASK; - if (!ceiling) - return; - } - if (end - 1 > ceiling - 1) - end -= PMD_SIZE; - if (addr > end - 1) - return; - - pgd = pgd_offset(tlb->mm, addr); - p4d = p4d_offset(pgd, addr); - do { - next = p4d_addr_end(addr, end); - if (p4d_none_or_clear_bad(p4d)) - continue; - hugetlb_free_pud_range(tlb, p4d, addr, next, floor, ceiling); - } while (p4d++, addr = next, addr != end); -} diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 25ae4c897aae..7ed58bf3aaca 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -3201,7 +3201,7 @@ void copy_highpage(struct page *to, struct page *from) } EXPORT_SYMBOL(copy_highpage); -pgprot_t vm_get_page_prot(unsigned long vm_flags) +pgprot_t vm_get_page_prot(vm_flags_t vm_flags) { unsigned long prot = pgprot_val(protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]); diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 08e511657f05..58d890fe2100 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -99,7 +99,6 @@ config X86 select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE select ARCH_HAS_PMEM_API if X86_64 select ARCH_HAS_PREEMPT_LAZY - select ARCH_HAS_PTE_DEVMAP if X86_64 select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_HW_PTE_YOUNG select ARCH_HAS_NONLEAF_PMD_YOUNG if PGTABLE_LEVELS > 2 @@ -124,6 +123,7 @@ config X86 select ARCH_SUPPORTS_ACPI select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_SUPPORTS_DEBUG_PAGEALLOC + select ARCH_SUPPORTS_HUGETLBFS select ARCH_SUPPORTS_PAGE_TABLE_CHECK if X86_64 select ARCH_SUPPORTS_NUMA_BALANCING if X86_64 select ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP if NR_CPUS <= 4096 diff --git a/arch/x86/crypto/aegis128-aesni-glue.c b/arch/x86/crypto/aegis128-aesni-glue.c index f1b6d40154e3..f1adfba1a76e 100644 --- a/arch/x86/crypto/aegis128-aesni-glue.c +++ b/arch/x86/crypto/aegis128-aesni-glue.c @@ -104,10 +104,12 @@ static void crypto_aegis128_aesni_process_ad( } } -static __always_inline void +static __always_inline int crypto_aegis128_aesni_process_crypt(struct aegis_state *state, struct skcipher_walk *walk, bool enc) { + int err = 0; + while (walk->nbytes >= AEGIS128_BLOCK_SIZE) { if (enc) aegis128_aesni_enc(state, walk->src.virt.addr, @@ -119,7 +121,10 @@ crypto_aegis128_aesni_process_crypt(struct aegis_state *state, walk->dst.virt.addr, round_down(walk->nbytes, AEGIS128_BLOCK_SIZE)); - skcipher_walk_done(walk, walk->nbytes % AEGIS128_BLOCK_SIZE); + kernel_fpu_end(); + err = skcipher_walk_done(walk, + walk->nbytes % AEGIS128_BLOCK_SIZE); + kernel_fpu_begin(); } if (walk->nbytes) { @@ -131,8 +136,11 @@ crypto_aegis128_aesni_process_crypt(struct aegis_state *state, aegis128_aesni_dec_tail(state, walk->src.virt.addr, walk->dst.virt.addr, walk->nbytes); - skcipher_walk_done(walk, 0); + kernel_fpu_end(); + err = skcipher_walk_done(walk, 0); + kernel_fpu_begin(); } + return err; } static struct aegis_ctx *crypto_aegis128_aesni_ctx(struct crypto_aead *aead) @@ -165,7 +173,7 @@ static int crypto_aegis128_aesni_setauthsize(struct crypto_aead *tfm, return 0; } -static __always_inline void +static __always_inline int crypto_aegis128_aesni_crypt(struct aead_request *req, struct aegis_block *tag_xor, unsigned int cryptlen, bool enc) @@ -174,20 +182,24 @@ crypto_aegis128_aesni_crypt(struct aead_request *req, struct aegis_ctx *ctx = crypto_aegis128_aesni_ctx(tfm); struct skcipher_walk walk; struct aegis_state state; + int err; if (enc) - skcipher_walk_aead_encrypt(&walk, req, true); + err = skcipher_walk_aead_encrypt(&walk, req, false); else - skcipher_walk_aead_decrypt(&walk, req, true); + err = skcipher_walk_aead_decrypt(&walk, req, false); + if (err) + return err; kernel_fpu_begin(); aegis128_aesni_init(&state, &ctx->key, req->iv); crypto_aegis128_aesni_process_ad(&state, req->src, req->assoclen); - crypto_aegis128_aesni_process_crypt(&state, &walk, enc); - aegis128_aesni_final(&state, tag_xor, req->assoclen, cryptlen); - + err = crypto_aegis128_aesni_process_crypt(&state, &walk, enc); + if (err == 0) + aegis128_aesni_final(&state, tag_xor, req->assoclen, cryptlen); kernel_fpu_end(); + return err; } static int crypto_aegis128_aesni_encrypt(struct aead_request *req) @@ -196,8 +208,11 @@ static int crypto_aegis128_aesni_encrypt(struct aead_request *req) struct aegis_block tag = {}; unsigned int authsize = crypto_aead_authsize(tfm); unsigned int cryptlen = req->cryptlen; + int err; - crypto_aegis128_aesni_crypt(req, &tag, cryptlen, true); + err = crypto_aegis128_aesni_crypt(req, &tag, cryptlen, true); + if (err) + return err; scatterwalk_map_and_copy(tag.bytes, req->dst, req->assoclen + cryptlen, authsize, 1); @@ -212,11 +227,14 @@ static int crypto_aegis128_aesni_decrypt(struct aead_request *req) struct aegis_block tag; unsigned int authsize = crypto_aead_authsize(tfm); unsigned int cryptlen = req->cryptlen - authsize; + int err; scatterwalk_map_and_copy(tag.bytes, req->src, req->assoclen + cryptlen, authsize, 0); - crypto_aegis128_aesni_crypt(req, &tag, cryptlen, false); + err = crypto_aegis128_aesni_crypt(req, &tag, cryptlen, false); + if (err) + return err; return crypto_memneq(tag.bytes, zeros.bytes, authsize) ? -EBADMSG : 0; } diff --git a/arch/x86/crypto/aria_aesni_avx2_glue.c b/arch/x86/crypto/aria_aesni_avx2_glue.c index b4bddcd58457..007b250f774c 100644 --- a/arch/x86/crypto/aria_aesni_avx2_glue.c +++ b/arch/x86/crypto/aria_aesni_avx2_glue.c @@ -9,6 +9,7 @@ #include <crypto/aria.h> #include <linux/crypto.h> #include <linux/err.h> +#include <linux/export.h> #include <linux/module.h> #include <linux/types.h> diff --git a/arch/x86/crypto/aria_aesni_avx_glue.c b/arch/x86/crypto/aria_aesni_avx_glue.c index ab9b38d05332..4c88ef4eba82 100644 --- a/arch/x86/crypto/aria_aesni_avx_glue.c +++ b/arch/x86/crypto/aria_aesni_avx_glue.c @@ -9,6 +9,7 @@ #include <crypto/aria.h> #include <linux/crypto.h> #include <linux/err.h> +#include <linux/export.h> #include <linux/module.h> #include <linux/types.h> diff --git a/arch/x86/crypto/camellia_aesni_avx_glue.c b/arch/x86/crypto/camellia_aesni_avx_glue.c index a7d162388142..5c321f255eb7 100644 --- a/arch/x86/crypto/camellia_aesni_avx_glue.c +++ b/arch/x86/crypto/camellia_aesni_avx_glue.c @@ -8,6 +8,7 @@ #include <crypto/algapi.h> #include <linux/crypto.h> #include <linux/err.h> +#include <linux/export.h> #include <linux/module.h> #include <linux/types.h> diff --git a/arch/x86/crypto/camellia_glue.c b/arch/x86/crypto/camellia_glue.c index 3bd37d664121..cbede120e5f2 100644 --- a/arch/x86/crypto/camellia_glue.c +++ b/arch/x86/crypto/camellia_glue.c @@ -10,6 +10,7 @@ #include <linux/unaligned.h> #include <linux/crypto.h> +#include <linux/export.h> #include <linux/init.h> #include <linux/module.h> #include <linux/types.h> diff --git a/arch/x86/crypto/curve25519-x86_64.c b/arch/x86/crypto/curve25519-x86_64.c index dcfc0de333de..d587f05c3c8c 100644 --- a/arch/x86/crypto/curve25519-x86_64.c +++ b/arch/x86/crypto/curve25519-x86_64.c @@ -7,6 +7,7 @@ #include <crypto/curve25519.h> #include <crypto/internal/kpp.h> +#include <linux/export.h> #include <linux/types.h> #include <linux/jump_label.h> #include <linux/kernel.h> diff --git a/arch/x86/crypto/serpent_avx_glue.c b/arch/x86/crypto/serpent_avx_glue.c index e640abc1cb8a..9c8b3a335d5c 100644 --- a/arch/x86/crypto/serpent_avx_glue.c +++ b/arch/x86/crypto/serpent_avx_glue.c @@ -12,6 +12,7 @@ #include <linux/types.h> #include <linux/crypto.h> #include <linux/err.h> +#include <linux/export.h> #include <crypto/algapi.h> #include <crypto/serpent.h> diff --git a/arch/x86/crypto/sm4_aesni_avx_glue.c b/arch/x86/crypto/sm4_aesni_avx_glue.c index 72867fc49ce8..88caf418a06f 100644 --- a/arch/x86/crypto/sm4_aesni_avx_glue.c +++ b/arch/x86/crypto/sm4_aesni_avx_glue.c @@ -11,6 +11,7 @@ #include <asm/fpu/api.h> #include <linux/module.h> #include <linux/crypto.h> +#include <linux/export.h> #include <linux/kernel.h> #include <crypto/internal/skcipher.h> #include <crypto/sm4.h> diff --git a/arch/x86/crypto/twofish_glue.c b/arch/x86/crypto/twofish_glue.c index 4c67184dc573..8e9906d36902 100644 --- a/arch/x86/crypto/twofish_glue.c +++ b/arch/x86/crypto/twofish_glue.c @@ -40,6 +40,7 @@ #include <crypto/algapi.h> #include <crypto/twofish.h> +#include <linux/export.h> #include <linux/init.h> #include <linux/module.h> #include <linux/types.h> diff --git a/arch/x86/crypto/twofish_glue_3way.c b/arch/x86/crypto/twofish_glue_3way.c index 1a1ecfa7f72a..8ad77725bf60 100644 --- a/arch/x86/crypto/twofish_glue_3way.c +++ b/arch/x86/crypto/twofish_glue_3way.c @@ -9,6 +9,7 @@ #include <crypto/algapi.h> #include <crypto/twofish.h> #include <linux/crypto.h> +#include <linux/export.h> #include <linux/init.h> #include <linux/module.h> #include <linux/types.h> diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h index f0e9acf72547..20fcb8507ad1 100644 --- a/arch/x86/include/asm/bug.h +++ b/arch/x86/include/asm/bug.h @@ -32,45 +32,42 @@ #ifdef CONFIG_GENERIC_BUG #ifdef CONFIG_X86_32 -# define __BUG_REL(val) ".long " __stringify(val) +# define __BUG_REL(val) ".long " val #else -# define __BUG_REL(val) ".long " __stringify(val) " - ." +# define __BUG_REL(val) ".long " val " - ." #endif #ifdef CONFIG_DEBUG_BUGVERBOSE +#define __BUG_ENTRY(file, line, flags) \ + "2:\t" __BUG_REL("1b") "\t# bug_entry::bug_addr\n" \ + "\t" __BUG_REL(file) "\t# bug_entry::file\n" \ + "\t.word " line "\t# bug_entry::line\n" \ + "\t.word " flags "\t# bug_entry::flags\n" +#else +#define __BUG_ENTRY(file, line, flags) \ + "2:\t" __BUG_REL("1b") "\t# bug_entry::bug_addr\n" \ + "\t.word " flags "\t# bug_entry::flags\n" +#endif + +#define _BUG_FLAGS_ASM(ins, file, line, flags, size, extra) \ + "1:\t" ins "\n" \ + ".pushsection __bug_table,\"aw\"\n" \ + __BUG_ENTRY(file, line, flags) \ + "\t.org 2b + " size "\n" \ + ".popsection\n" \ + extra #define _BUG_FLAGS(ins, flags, extra) \ do { \ - asm_inline volatile("1:\t" ins "\n" \ - ".pushsection __bug_table,\"aw\"\n" \ - "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \ - "\t" __BUG_REL(%c0) "\t# bug_entry::file\n" \ - "\t.word %c1" "\t# bug_entry::line\n" \ - "\t.word %c2" "\t# bug_entry::flags\n" \ - "\t.org 2b+%c3\n" \ - ".popsection\n" \ - extra \ + asm_inline volatile(_BUG_FLAGS_ASM(ins, "%c0", \ + "%c1", "%c2", "%c3", extra) \ : : "i" (__FILE__), "i" (__LINE__), \ "i" (flags), \ "i" (sizeof(struct bug_entry))); \ } while (0) -#else /* !CONFIG_DEBUG_BUGVERBOSE */ - -#define _BUG_FLAGS(ins, flags, extra) \ -do { \ - asm_inline volatile("1:\t" ins "\n" \ - ".pushsection __bug_table,\"aw\"\n" \ - "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \ - "\t.word %c0" "\t# bug_entry::flags\n" \ - "\t.org 2b+%c1\n" \ - ".popsection\n" \ - extra \ - : : "i" (flags), \ - "i" (sizeof(struct bug_entry))); \ -} while (0) - -#endif /* CONFIG_DEBUG_BUGVERBOSE */ +#define ARCH_WARN_ASM(file, line, flags, size) \ + _BUG_FLAGS_ASM(ASM_UD2, file, line, flags, size, "") #else @@ -92,11 +89,14 @@ do { \ * were to trigger, we'd rather wreck the machine in an attempt to get the * message out than not know about it. */ + +#define ARCH_WARN_REACHABLE ANNOTATE_REACHABLE(1b) + #define __WARN_FLAGS(flags) \ do { \ __auto_type __flags = BUGFLAG_WARNING|(flags); \ instrumentation_begin(); \ - _BUG_FLAGS(ASM_UD2, __flags, ANNOTATE_REACHABLE(1b)); \ + _BUG_FLAGS(ASM_UD2, __flags, ARCH_WARN_REACHABLE); \ instrumentation_end(); \ } while (0) diff --git a/arch/x86/include/asm/cfi.h b/arch/x86/include/asm/cfi.h index 3e51ba459154..1751f1eb95ef 100644 --- a/arch/x86/include/asm/cfi.h +++ b/arch/x86/include/asm/cfi.h @@ -116,8 +116,6 @@ struct pt_regs; #ifdef CONFIG_CFI_CLANG enum bug_trap_type handle_cfi_failure(struct pt_regs *regs); #define __bpfcall -extern u32 cfi_bpf_hash; -extern u32 cfi_bpf_subprog_hash; static inline int cfi_get_offset(void) { @@ -135,6 +133,8 @@ static inline int cfi_get_offset(void) #define cfi_get_offset cfi_get_offset extern u32 cfi_get_func_hash(void *func); +#define cfi_get_func_hash cfi_get_func_hash + extern int cfi_get_func_arity(void *func); #ifdef CONFIG_FINEIBT @@ -153,12 +153,6 @@ static inline enum bug_trap_type handle_cfi_failure(struct pt_regs *regs) { return BUG_TRAP_TYPE_NONE; } -#define cfi_bpf_hash 0U -#define cfi_bpf_subprog_hash 0U -static inline u32 cfi_get_func_hash(void *func) -{ - return 0; -} static inline int cfi_get_func_arity(void *func) { return 0; diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 97954c936c54..e33df3da6980 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -301,16 +301,15 @@ static inline bool pmd_leaf(pmd_t pte) } #ifdef CONFIG_TRANSPARENT_HUGEPAGE -/* NOTE: when predicate huge page, consider also pmd_devmap, or use pmd_leaf */ static inline int pmd_trans_huge(pmd_t pmd) { - return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE; + return (pmd_val(pmd) & _PAGE_PSE) == _PAGE_PSE; } #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD static inline int pud_trans_huge(pud_t pud) { - return (pud_val(pud) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE; + return (pud_val(pud) & _PAGE_PSE) == _PAGE_PSE; } #endif @@ -320,24 +319,6 @@ static inline int has_transparent_hugepage(void) return boot_cpu_has(X86_FEATURE_PSE); } -#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP -static inline int pmd_devmap(pmd_t pmd) -{ - return !!(pmd_val(pmd) & _PAGE_DEVMAP); -} - -#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD -static inline int pud_devmap(pud_t pud) -{ - return !!(pud_val(pud) & _PAGE_DEVMAP); -} -#else -static inline int pud_devmap(pud_t pud) -{ - return 0; -} -#endif - #ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP static inline bool pmd_special(pmd_t pmd) { @@ -361,12 +342,6 @@ static inline pud_t pud_mkspecial(pud_t pud) return pud_set_flags(pud, _PAGE_SPECIAL); } #endif /* CONFIG_ARCH_SUPPORTS_PUD_PFNMAP */ - -static inline int pgd_devmap(pgd_t pgd) -{ - return 0; -} -#endif #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ static inline pte_t pte_set_flags(pte_t pte, pteval_t set) @@ -527,11 +502,6 @@ static inline pte_t pte_mkspecial(pte_t pte) return pte_set_flags(pte, _PAGE_SPECIAL); } -static inline pte_t pte_mkdevmap(pte_t pte) -{ - return pte_set_flags(pte, _PAGE_SPECIAL|_PAGE_DEVMAP); -} - /* See comments above mksaveddirty_shift() */ static inline pmd_t pmd_mksaveddirty(pmd_t pmd) { @@ -603,11 +573,6 @@ static inline pmd_t pmd_mkwrite_shstk(pmd_t pmd) return pmd_set_flags(pmd, _PAGE_DIRTY); } -static inline pmd_t pmd_mkdevmap(pmd_t pmd) -{ - return pmd_set_flags(pmd, _PAGE_DEVMAP); -} - static inline pmd_t pmd_mkhuge(pmd_t pmd) { return pmd_set_flags(pmd, _PAGE_PSE); @@ -673,11 +638,6 @@ static inline pud_t pud_mkdirty(pud_t pud) return pud_mksaveddirty(pud); } -static inline pud_t pud_mkdevmap(pud_t pud) -{ - return pud_set_flags(pud, _PAGE_DEVMAP); -} - static inline pud_t pud_mkhuge(pud_t pud) { return pud_set_flags(pud, _PAGE_PSE); @@ -1008,13 +968,6 @@ static inline int pte_present(pte_t a) return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE); } -#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP -static inline int pte_devmap(pte_t a) -{ - return (pte_flags(a) & _PAGE_DEVMAP) == _PAGE_DEVMAP; -} -#endif - #define pte_accessible pte_accessible static inline bool pte_accessible(struct mm_struct *mm, pte_t a) { diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index a5731fb1e9dd..2ec250ba467e 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h @@ -34,7 +34,6 @@ #define _PAGE_BIT_UFFD_WP _PAGE_BIT_SOFTW2 /* userfaultfd wrprotected */ #define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_SOFTW3 /* software dirty tracking */ #define _PAGE_BIT_KERNEL_4K _PAGE_BIT_SOFTW3 /* page must not be converted to large */ -#define _PAGE_BIT_DEVMAP _PAGE_BIT_SOFTW4 #ifdef CONFIG_X86_64 #define _PAGE_BIT_SAVED_DIRTY _PAGE_BIT_SOFTW5 /* Saved Dirty bit (leaf) */ @@ -121,11 +120,9 @@ #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX) -#define _PAGE_DEVMAP (_AT(u64, 1) << _PAGE_BIT_DEVMAP) #define _PAGE_SOFTW4 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW4) #else #define _PAGE_NX (_AT(pteval_t, 0)) -#define _PAGE_DEVMAP (_AT(pteval_t, 0)) #define _PAGE_SOFTW4 (_AT(pteval_t, 0)) #endif @@ -154,7 +151,7 @@ #define _COMMON_PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \ _PAGE_SPECIAL | _PAGE_ACCESSED | \ _PAGE_DIRTY_BITS | _PAGE_SOFT_DIRTY | \ - _PAGE_DEVMAP | _PAGE_CC | _PAGE_UFFD_WP) + _PAGE_CC | _PAGE_UFFD_WP) #define _PAGE_CHG_MASK (_COMMON_PAGE_CHG_MASK | _PAGE_PAT) #define _HPAGE_CHG_MASK (_COMMON_PAGE_CHG_MASK | _PAGE_PSE | _PAGE_PAT_LARGE) diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index e9b81876ebe4..00daedfefc1b 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -356,11 +356,6 @@ static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *b mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL); } -static inline void arch_flush_tlb_batched_pending(struct mm_struct *mm) -{ - flush_tlb_mm(mm); -} - extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch); static inline bool pte_flags_need_flush(unsigned long oldflags, diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index ea1d984166cd..7bde68247b5f 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -120,7 +120,7 @@ struct its_array its_pages; static void *__its_alloc(struct its_array *pages) { - void *page __free(execmem) = execmem_alloc(EXECMEM_MODULE_TEXT, PAGE_SIZE); + void *page __free(execmem) = execmem_alloc_rw(EXECMEM_MODULE_TEXT, PAGE_SIZE); if (!page) return NULL; @@ -237,7 +237,6 @@ static void *its_alloc(void) if (!page) return NULL; - execmem_make_temp_rw(page, PAGE_SIZE); if (pages == &its_pages) set_memory_x((unsigned long)page, 1); @@ -1184,43 +1183,6 @@ bool cfi_bhi __ro_after_init = false; #endif #ifdef CONFIG_CFI_CLANG -struct bpf_insn; - -/* Must match bpf_func_t / DEFINE_BPF_PROG_RUN() */ -extern unsigned int __bpf_prog_runX(const void *ctx, - const struct bpf_insn *insn); - -KCFI_REFERENCE(__bpf_prog_runX); - -/* u32 __ro_after_init cfi_bpf_hash = __kcfi_typeid___bpf_prog_runX; */ -asm ( -" .pushsection .data..ro_after_init,\"aw\",@progbits \n" -" .type cfi_bpf_hash,@object \n" -" .globl cfi_bpf_hash \n" -" .p2align 2, 0x0 \n" -"cfi_bpf_hash: \n" -" .long __kcfi_typeid___bpf_prog_runX \n" -" .size cfi_bpf_hash, 4 \n" -" .popsection \n" -); - -/* Must match bpf_callback_t */ -extern u64 __bpf_callback_fn(u64, u64, u64, u64, u64); - -KCFI_REFERENCE(__bpf_callback_fn); - -/* u32 __ro_after_init cfi_bpf_subprog_hash = __kcfi_typeid___bpf_callback_fn; */ -asm ( -" .pushsection .data..ro_after_init,\"aw\",@progbits \n" -" .type cfi_bpf_subprog_hash,@object \n" -" .globl cfi_bpf_subprog_hash \n" -" .p2align 2, 0x0 \n" -"cfi_bpf_subprog_hash: \n" -" .long __kcfi_typeid___bpf_callback_fn \n" -" .size cfi_bpf_subprog_hash, 4 \n" -" .popsection \n" -); - u32 cfi_get_func_hash(void *func) { u32 hash; diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c index 279148e72459..308dbbae6c6e 100644 --- a/arch/x86/kernel/cpu/sgx/encl.c +++ b/arch/x86/kernel/cpu/sgx/encl.c @@ -279,7 +279,7 @@ static struct sgx_encl_page *__sgx_encl_load_page(struct sgx_encl *encl, static struct sgx_encl_page *sgx_encl_load_page_in_vma(struct sgx_encl *encl, unsigned long addr, - unsigned long vm_flags) + vm_flags_t vm_flags) { unsigned long vm_prot_bits = vm_flags & VM_ACCESS_FLAGS; struct sgx_encl_page *entry; @@ -520,9 +520,9 @@ static void sgx_vma_open(struct vm_area_struct *vma) * Return: 0 on success, -EACCES otherwise */ int sgx_encl_may_map(struct sgx_encl *encl, unsigned long start, - unsigned long end, unsigned long vm_flags) + unsigned long end, vm_flags_t vm_flags) { - unsigned long vm_prot_bits = vm_flags & VM_ACCESS_FLAGS; + vm_flags_t vm_prot_bits = vm_flags & VM_ACCESS_FLAGS; struct sgx_encl_page *page; unsigned long count = 0; int ret = 0; @@ -605,7 +605,7 @@ static int sgx_encl_debug_write(struct sgx_encl *encl, struct sgx_encl_page *pag */ static struct sgx_encl_page *sgx_encl_reserve_page(struct sgx_encl *encl, unsigned long addr, - unsigned long vm_flags) + vm_flags_t vm_flags) { struct sgx_encl_page *entry; diff --git a/arch/x86/kernel/cpu/sgx/encl.h b/arch/x86/kernel/cpu/sgx/encl.h index f94ff14c9486..8ff47f6652b9 100644 --- a/arch/x86/kernel/cpu/sgx/encl.h +++ b/arch/x86/kernel/cpu/sgx/encl.h @@ -101,7 +101,7 @@ static inline int sgx_encl_find(struct mm_struct *mm, unsigned long addr, } int sgx_encl_may_map(struct sgx_encl *encl, unsigned long start, - unsigned long end, unsigned long vm_flags); + unsigned long end, vm_flags_t vm_flags); bool current_is_ksgxd(void); void sgx_encl_release(struct kref *ref); diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index bcb534688dfe..c6b12bed173d 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c @@ -163,10 +163,10 @@ static struct crash_mem *fill_up_crash_elf_data(void) return NULL; /* - * Exclusion of crash region and/or crashk_low_res may cause - * another range split. So add extra two slots here. + * Exclusion of crash region, crashk_low_res and/or crashk_cma_ranges + * may cause range splits. So add extra slots here. */ - nr_ranges += 2; + nr_ranges += 2 + crashk_cma_cnt; cmem = vzalloc(struct_size(cmem, ranges, nr_ranges)); if (!cmem) return NULL; @@ -184,6 +184,7 @@ static struct crash_mem *fill_up_crash_elf_data(void) static int elf_header_exclude_ranges(struct crash_mem *cmem) { int ret = 0; + int i; /* Exclude the low 1M because it is always reserved */ ret = crash_exclude_mem_range(cmem, 0, SZ_1M - 1); @@ -198,8 +199,17 @@ static int elf_header_exclude_ranges(struct crash_mem *cmem) if (crashk_low_res.end) ret = crash_exclude_mem_range(cmem, crashk_low_res.start, crashk_low_res.end); + if (ret) + return ret; - return ret; + for (i = 0; i < crashk_cma_cnt; ++i) { + ret = crash_exclude_mem_range(cmem, crashk_cma_ranges[i].start, + crashk_cma_ranges[i].end); + if (ret) + return ret; + } + + return 0; } static int prepare_elf64_ram_headers_callback(struct resource *res, void *arg) @@ -374,6 +384,14 @@ int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params) add_e820_entry(params, &ei); } + for (i = 0; i < crashk_cma_cnt; ++i) { + ei.addr = crashk_cma_ranges[i].start; + ei.size = crashk_cma_ranges[i].end - + crashk_cma_ranges[i].start + 1; + ei.type = E820_TYPE_RAM; + add_e820_entry(params, &ei); + } + out: vfree(cmem); return ret; diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 252e82bcfd2f..4450acec9390 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -263,7 +263,7 @@ void arch_ftrace_update_code(int command) static inline void *alloc_tramp(unsigned long size) { - return execmem_alloc(EXECMEM_FTRACE, size); + return execmem_alloc_rw(EXECMEM_FTRACE, size); } static inline void tramp_free(void *tramp) { diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 47cb8eb138ba..6079d15dab8c 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -481,24 +481,6 @@ static int prepare_singlestep(kprobe_opcode_t *buf, struct kprobe *p, return len; } -/* Make page to RO mode when allocate it */ -void *alloc_insn_page(void) -{ - void *page; - - page = execmem_alloc(EXECMEM_KPROBES, PAGE_SIZE); - if (!page) - return NULL; - - /* - * TODO: Once additional kernel code protection mechanisms are set, ensure - * that the page was not maliciously altered and it is still zeroed. - */ - set_memory_rox((unsigned long)page, 1); - - return page; -} - /* Kprobe x86 instruction emulation - only regs->ip or IF flag modifiers */ static void kprobe_emulate_ifmodifiers(struct kprobe *p, struct pt_regs *regs) diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 0792f31961ac..1b2edd07a3e1 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -603,7 +603,7 @@ static void __init memblock_x86_reserve_range_setup_data(void) static void __init arch_reserve_crashkernel(void) { - unsigned long long crash_base, crash_size, low_size = 0; + unsigned long long crash_base, crash_size, low_size = 0, cma_size = 0; bool high = false; int ret; @@ -612,7 +612,7 @@ static void __init arch_reserve_crashkernel(void) ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), &crash_size, &crash_base, - &low_size, &high); + &low_size, &cma_size, &high); if (ret) return; @@ -622,6 +622,7 @@ static void __init arch_reserve_crashkernel(void) } reserve_crashkernel_generic(crash_size, crash_base, low_size, high); + reserve_crashkernel_cma(cma_size); } static struct resource standard_io_resources[] = { diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index d1b79b418c05..850972deac8e 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c @@ -641,7 +641,7 @@ static void kvm_pit_reset(struct kvm_pit *pit) kvm_pit_reset_reinject(pit); } -static void pit_mask_notifer(struct kvm_irq_mask_notifier *kimn, bool mask) +static void pit_mask_notifier(struct kvm_irq_mask_notifier *kimn, bool mask) { struct kvm_pit *pit = container_of(kimn, struct kvm_pit, mask_notifier); @@ -763,7 +763,7 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags) pit_state->irq_ack_notifier.gsi = 0; pit_state->irq_ack_notifier.irq_acked = kvm_pit_ack_irq; - pit->mask_notifier.func = pit_mask_notifer; + pit->mask_notifier.func = pit_mask_notifier; kvm_pit_reset(pit); diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 7456df985d96..bb57e93b4caf 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -1063,13 +1063,9 @@ unsigned long arch_max_swapfile_size(void) static struct execmem_info execmem_info __ro_after_init; #ifdef CONFIG_ARCH_HAS_EXECMEM_ROX -void execmem_fill_trapping_insns(void *ptr, size_t size, bool writeable) +void execmem_fill_trapping_insns(void *ptr, size_t size) { - /* fill memory with INT3 instructions */ - if (writeable) - memset(ptr, INT3_INSN_OPCODE, size); - else - text_poke_set(ptr, INT3_INSN_OPCODE, size); + memset(ptr, INT3_INSN_OPCODE, size); } #endif @@ -1102,7 +1098,21 @@ struct execmem_info __init *execmem_arch_setup(void) .pgprot = pgprot, .alignment = MODULE_ALIGN, }, - [EXECMEM_KPROBES ... EXECMEM_BPF] = { + [EXECMEM_KPROBES] = { + .flags = flags, + .start = start, + .end = MODULES_END, + .pgprot = PAGE_KERNEL_ROX, + .alignment = MODULE_ALIGN, + }, + [EXECMEM_FTRACE] = { + .flags = flags, + .start = start, + .end = MODULES_END, + .pgprot = pgprot, + .alignment = MODULE_ALIGN, + }, + [EXECMEM_BPF] = { .flags = EXECMEM_KASAN_SHADOW, .start = start, .end = MODULES_END, diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c index 2e7923844afe..c09284302dd3 100644 --- a/arch/x86/mm/pat/memtype.c +++ b/arch/x86/mm/pat/memtype.c @@ -36,7 +36,6 @@ #include <linux/debugfs.h> #include <linux/ioport.h> #include <linux/kernel.h> -#include <linux/pfn_t.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/mm.h> diff --git a/arch/x86/mm/pgprot.c b/arch/x86/mm/pgprot.c index c84bd9540b16..dc1afd5c839d 100644 --- a/arch/x86/mm/pgprot.c +++ b/arch/x86/mm/pgprot.c @@ -32,7 +32,7 @@ void add_encrypt_protection_map(void) protection_map[i] = pgprot_encrypted(protection_map[i]); } -pgprot_t vm_get_page_prot(unsigned long vm_flags) +pgprot_t vm_get_page_prot(vm_flags_t vm_flags) { unsigned long val = pgprot_val(protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]); diff --git a/arch/xtensa/include/asm/bootparam.h b/arch/xtensa/include/asm/bootparam.h index 6333bd1eb9d2..a459ffbaf7ab 100644 --- a/arch/xtensa/include/asm/bootparam.h +++ b/arch/xtensa/include/asm/bootparam.h @@ -27,7 +27,7 @@ #define BP_TAG_FIRST 0x7B0B /* first tag with a version number */ #define BP_TAG_LAST 0x7E0B /* last tag */ -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ /* All records are aligned to 4 bytes */ diff --git a/arch/xtensa/include/asm/cmpxchg.h b/arch/xtensa/include/asm/cmpxchg.h index 95e33a913962..b6db4838b175 100644 --- a/arch/xtensa/include/asm/cmpxchg.h +++ b/arch/xtensa/include/asm/cmpxchg.h @@ -11,7 +11,7 @@ #ifndef _XTENSA_CMPXCHG_H #define _XTENSA_CMPXCHG_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/bits.h> #include <linux/stringify.h> @@ -220,6 +220,6 @@ __arch_xchg(unsigned long x, volatile void * ptr, int size) } } -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* _XTENSA_CMPXCHG_H */ diff --git a/arch/xtensa/include/asm/coprocessor.h b/arch/xtensa/include/asm/coprocessor.h index 3b1a0d5d2169..e0447bcc52c5 100644 --- a/arch/xtensa/include/asm/coprocessor.h +++ b/arch/xtensa/include/asm/coprocessor.h @@ -16,7 +16,7 @@ #include <asm/core.h> #include <asm/types.h> -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ # include <variant/tie-asm.h> .macro xchal_sa_start a b @@ -69,7 +69,7 @@ -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ /* * XTENSA_HAVE_COPROCESSOR(x) returns 1 if coprocessor x is configured. @@ -87,7 +87,7 @@ #define XTENSA_HAVE_IO_PORTS \ XCHAL_CP_PORT_MASK -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ /* * Additional registers. @@ -151,5 +151,5 @@ void local_coprocessors_flush_release_all(void); #endif /* XTENSA_HAVE_COPROCESSORS */ -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ #endif /* _XTENSA_COPROCESSOR_H */ diff --git a/arch/xtensa/include/asm/current.h b/arch/xtensa/include/asm/current.h index df275d554788..7b483538f066 100644 --- a/arch/xtensa/include/asm/current.h +++ b/arch/xtensa/include/asm/current.h @@ -13,7 +13,7 @@ #include <asm/thread_info.h> -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/thread_info.h> diff --git a/arch/xtensa/include/asm/ftrace.h b/arch/xtensa/include/asm/ftrace.h index 0ea4f84cd558..f676d209d110 100644 --- a/arch/xtensa/include/asm/ftrace.h +++ b/arch/xtensa/include/asm/ftrace.h @@ -12,20 +12,20 @@ #include <asm/processor.h> -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ extern unsigned long return_address(unsigned level); #define ftrace_return_address(n) return_address(n) -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #ifdef CONFIG_FUNCTION_TRACER #define MCOUNT_ADDR ((unsigned long)(_mcount)) #define MCOUNT_INSN_SIZE 3 -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ extern void _mcount(void); #define mcount _mcount -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* CONFIG_FUNCTION_TRACER */ #endif /* _XTENSA_FTRACE_H */ diff --git a/arch/xtensa/include/asm/initialize_mmu.h b/arch/xtensa/include/asm/initialize_mmu.h index 574795a20d6f..101bcb87e15b 100644 --- a/arch/xtensa/include/asm/initialize_mmu.h +++ b/arch/xtensa/include/asm/initialize_mmu.h @@ -34,7 +34,7 @@ #define CA_WRITEBACK (0x4) #endif -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ #define XTENSA_HWVERSION_RC_2009_0 230000 @@ -240,6 +240,6 @@ .endm -#endif /*__ASSEMBLY__*/ +#endif /*__ASSEMBLER__*/ #endif /* _XTENSA_INITIALIZE_MMU_H */ diff --git a/arch/xtensa/include/asm/jump_label.h b/arch/xtensa/include/asm/jump_label.h index 46c8596259d2..38e3e2a9b0fb 100644 --- a/arch/xtensa/include/asm/jump_label.h +++ b/arch/xtensa/include/asm/jump_label.h @@ -4,7 +4,7 @@ #ifndef _ASM_XTENSA_JUMP_LABEL_H #define _ASM_XTENSA_JUMP_LABEL_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/types.h> @@ -61,5 +61,5 @@ struct jump_entry { jump_label_t key; }; -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif diff --git a/arch/xtensa/include/asm/kasan.h b/arch/xtensa/include/asm/kasan.h index 8d2b4248466f..0da91b64fab9 100644 --- a/arch/xtensa/include/asm/kasan.h +++ b/arch/xtensa/include/asm/kasan.h @@ -2,7 +2,7 @@ #ifndef __ASM_KASAN_H #define __ASM_KASAN_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #ifdef CONFIG_KASAN diff --git a/arch/xtensa/include/asm/kmem_layout.h b/arch/xtensa/include/asm/kmem_layout.h index 6fc05cba61a2..6949724625a0 100644 --- a/arch/xtensa/include/asm/kmem_layout.h +++ b/arch/xtensa/include/asm/kmem_layout.h @@ -80,7 +80,7 @@ #if (!XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY) && defined(CONFIG_USE_OF) #define XCHAL_KIO_PADDR xtensa_get_kio_paddr() -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ extern unsigned long xtensa_kio_paddr; static inline unsigned long xtensa_get_kio_paddr(void) diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h index 644413792bf3..20655174b111 100644 --- a/arch/xtensa/include/asm/page.h +++ b/arch/xtensa/include/asm/page.h @@ -80,7 +80,7 @@ #endif -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ #define __pgprot(x) (x) @@ -172,7 +172,7 @@ static inline unsigned long ___pa(unsigned long va) #define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT) #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #include <asm-generic/memory_model.h> #endif /* _XTENSA_PAGE_H */ diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h index d62aa1c316fc..d6eb695f2b26 100644 --- a/arch/xtensa/include/asm/pgtable.h +++ b/arch/xtensa/include/asm/pgtable.h @@ -203,7 +203,7 @@ * What follows is the closest we can get by reasonable means.. * See linux/mm/mmap.c for protection_map[] array that uses these definitions. */ -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #define pte_ERROR(e) \ printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) @@ -366,10 +366,10 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte) return pte; } -#endif /* !defined (__ASSEMBLY__) */ +#endif /* !defined (__ASSEMBLER__) */ -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ /* Assembly macro _PGD_INDEX is the same as C pgd_index(unsigned long), * _PGD_OFFSET as C pgd_offset(struct mm_struct*, unsigned long), @@ -408,7 +408,7 @@ void update_mmu_tlb_range(struct vm_area_struct *vma, unsigned long address, pte_t *ptep, unsigned int nr); #define update_mmu_tlb_range update_mmu_tlb_range -#endif /* !defined (__ASSEMBLY__) */ +#endif /* !defined (__ASSEMBLER__) */ #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG #define __HAVE_ARCH_PTEP_GET_AND_CLEAR diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h index 47b5df86ab5c..60a211356335 100644 --- a/arch/xtensa/include/asm/processor.h +++ b/arch/xtensa/include/asm/processor.h @@ -105,7 +105,7 @@ #error Unsupported xtensa ABI #endif -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #if defined(__XTENSA_WINDOWED_ABI__) @@ -263,5 +263,5 @@ static inline unsigned long get_er(unsigned long addr) #endif /* XCHAL_HAVE_EXTERN_REGS */ -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* _XTENSA_PROCESSOR_H */ diff --git a/arch/xtensa/include/asm/ptrace.h b/arch/xtensa/include/asm/ptrace.h index 4871e5a4d6fb..d0568ff6d349 100644 --- a/arch/xtensa/include/asm/ptrace.h +++ b/arch/xtensa/include/asm/ptrace.h @@ -41,7 +41,7 @@ #define NO_SYSCALL (-1) -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <asm/coprocessor.h> #include <asm/core.h> @@ -106,11 +106,11 @@ static inline unsigned long regs_return_value(struct pt_regs *regs) int do_syscall_trace_enter(struct pt_regs *regs); void do_syscall_trace_leave(struct pt_regs *regs); -#else /* __ASSEMBLY__ */ +#else /* __ASSEMBLER__ */ # include <asm/asm-offsets.h> #define PT_REGS_OFFSET (KERNEL_STACK_SIZE - PT_USER_SIZE) -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ #endif /* _XTENSA_PTRACE_H */ diff --git a/arch/xtensa/include/asm/signal.h b/arch/xtensa/include/asm/signal.h index de169b4eaeef..d301e68573cc 100644 --- a/arch/xtensa/include/asm/signal.h +++ b/arch/xtensa/include/asm/signal.h @@ -14,10 +14,10 @@ #include <uapi/asm/signal.h> -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #define __ARCH_HAS_SA_RESTORER #include <asm/sigcontext.h> -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* _XTENSA_SIGNAL_H */ diff --git a/arch/xtensa/include/asm/thread_info.h b/arch/xtensa/include/asm/thread_info.h index e0dffcc43b9e..5b74dfc35ef9 100644 --- a/arch/xtensa/include/asm/thread_info.h +++ b/arch/xtensa/include/asm/thread_info.h @@ -16,7 +16,7 @@ #define CURRENT_SHIFT KERNEL_STACK_SHIFT -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ # include <asm/processor.h> #endif @@ -28,7 +28,7 @@ * must also be changed */ -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #if XTENSA_HAVE_COPROCESSORS @@ -80,7 +80,7 @@ struct thread_info { * macros/functions for gaining access to the thread information structure */ -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #define INIT_THREAD_INFO(tsk) \ { \ @@ -99,7 +99,7 @@ static __always_inline struct thread_info *current_thread_info(void) return ti; } -#else /* !__ASSEMBLY__ */ +#else /* !__ASSEMBLER__ */ /* how to get the thread information struct from ASM */ #define GET_THREAD_INFO(reg,sp) \ diff --git a/arch/xtensa/include/asm/tlbflush.h b/arch/xtensa/include/asm/tlbflush.h index 573df8cea200..3edaebeef423 100644 --- a/arch/xtensa/include/asm/tlbflush.h +++ b/arch/xtensa/include/asm/tlbflush.h @@ -20,7 +20,7 @@ #define ITLB_HIT_BIT 3 #define DTLB_HIT_BIT 4 -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ /* TLB flushing: * @@ -201,5 +201,5 @@ static inline unsigned long read_itlb_translation (int way) return tmp; } -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* _XTENSA_TLBFLUSH_H */ diff --git a/arch/xtensa/include/uapi/asm/ptrace.h b/arch/xtensa/include/uapi/asm/ptrace.h index 9115e86ebc75..6e89ea301438 100644 --- a/arch/xtensa/include/uapi/asm/ptrace.h +++ b/arch/xtensa/include/uapi/asm/ptrace.h @@ -42,7 +42,7 @@ #define PTRACE_GETFDPIC_EXEC 0 #define PTRACE_GETFDPIC_INTERP 1 -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ struct user_pt_regs { __u32 pc; diff --git a/arch/xtensa/include/uapi/asm/signal.h b/arch/xtensa/include/uapi/asm/signal.h index b8c824dd4b74..8060f1914400 100644 --- a/arch/xtensa/include/uapi/asm/signal.h +++ b/arch/xtensa/include/uapi/asm/signal.h @@ -19,7 +19,7 @@ #define _NSIG_BPW 32 #define _NSIG_WORDS (_NSIG / _NSIG_BPW) -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/types.h> @@ -77,7 +77,7 @@ typedef struct { #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <asm-generic/signal-defs.h> @@ -106,5 +106,5 @@ typedef struct sigaltstack { __kernel_size_t ss_size; } stack_t; -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* _UAPI_XTENSA_SIGNAL_H */ diff --git a/arch/xtensa/include/uapi/asm/types.h b/arch/xtensa/include/uapi/asm/types.h index 12db8ac38750..2e9217a06ebf 100644 --- a/arch/xtensa/include/uapi/asm/types.h +++ b/arch/xtensa/include/uapi/asm/types.h @@ -14,7 +14,7 @@ #include <asm-generic/int-ll64.h> -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ # define __XTENSA_UL(x) (x) # define __XTENSA_UL_CONST(x) x #else @@ -23,7 +23,7 @@ # define __XTENSA_UL_CONST(x) ___XTENSA_UL_CONST(x) #endif -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #endif |