summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.mailmap1
-rw-r--r--MAINTAINERS39
-rw-r--r--arch/arm/Kconfig.debug1
-rwxr-xr-xarch/arm/boot/deflate_xip_data.sh6
-rw-r--r--arch/arm/boot/dts/aspeed-g4.dtsi5
-rw-r--r--arch/arm/boot/dts/aspeed-g5.dtsi5
-rw-r--r--arch/arm/boot/dts/imx7d-sdb.dts2
-rw-r--r--arch/arm/boot/dts/rk3288.dtsi2
-rw-r--r--arch/arm/boot/dts/sun6i-a31s-sinovoip-bpi-m2.dts63
-rw-r--r--arch/arm/include/asm/vdso.h2
-rw-r--r--arch/arm/kernel/vdso.c12
-rw-r--r--arch/arm/mach-davinci/board-omapl138-hawk.c4
-rw-r--r--arch/arm/mach-ux500/cpu-db8500.c3
-rw-r--r--arch/arm/plat-omap/dmtimer.c7
-rw-r--r--arch/arm/plat-omap/include/plat/sram.h11
-rw-r--r--arch/arm/plat-omap/sram.c36
-rw-r--r--arch/arm/vfp/vfpmodule.c2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi16
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399.dtsi8
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu.h3
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush-radix.h3
-rw-r--r--arch/powerpc/include/asm/cputable.h3
-rw-r--r--arch/powerpc/include/asm/mmu_context.h18
-rw-r--r--arch/powerpc/kernel/dt_cpu_ftrs.c6
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S2
-rw-r--r--arch/powerpc/kernel/irq.c8
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_radix.c3
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c11
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S35
-rw-r--r--arch/powerpc/mm/hash_native_64.c16
-rw-r--r--arch/powerpc/mm/mmu_context_book3s64.c1
-rw-r--r--arch/powerpc/mm/pgtable_64.c1
-rw-r--r--arch/powerpc/mm/tlb-radix.c169
-rw-r--r--arch/x86/kernel/kvm.c4
-rw-r--r--arch/x86/kvm/vmx.c10
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c20
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c3
-rw-r--r--drivers/gpu/drm/tegra/dc.c6
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c5
-rw-r--r--drivers/infiniband/core/addr.c25
-rw-r--r--drivers/infiniband/core/device.c3
-rw-r--r--drivers/infiniband/core/ucma.c47
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c2
-rw-r--r--drivers/infiniband/hw/mlx5/main.c12
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c3
-rw-r--r--drivers/infiniband/hw/qedr/main.c3
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c45
-rw-r--r--drivers/md/dm-mpath.c2
-rw-r--r--drivers/md/dm.c8
-rw-r--r--drivers/mtd/chips/jedec_probe.c2
-rw-r--r--drivers/mtd/nand/atmel/pmecc.c2
-rw-r--r--drivers/scsi/hosts.c1
-rw-r--r--drivers/scsi/hpsa.c73
-rw-r--r--drivers/scsi/hpsa.h1
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c6
-rw-r--r--drivers/scsi/iscsi_tcp.c8
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h1
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c39
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c12
-rw-r--r--drivers/scsi/sd.c2
-rw-r--r--drivers/scsi/virtio_scsi.c129
-rw-r--r--include/rdma/ib_addr.h2
-rw-r--r--include/scsi/scsi_host.h3
-rw-r--r--ipc/shm.c12
-rw-r--r--mm/kmemleak.c12
-rw-r--r--mm/memcontrol.c6
-rw-r--r--mm/page_owner.c6
-rw-r--r--mm/slab.c1
-rw-r--r--mm/vmstat.c2
-rw-r--r--sound/core/oss/pcm_oss.c4
-rw-r--r--sound/core/pcm_native.c2
-rw-r--r--sound/usb/quirks.c1
72 files changed, 636 insertions, 393 deletions
diff --git a/.mailmap b/.mailmap
index e18cab73e209..a2ce89a456c2 100644
--- a/.mailmap
+++ b/.mailmap
@@ -62,6 +62,7 @@ Frank Zago <fzago@systemfabricworks.com>
Greg Kroah-Hartman <greg@echidna.(none)>
Greg Kroah-Hartman <gregkh@suse.de>
Greg Kroah-Hartman <greg@kroah.com>
+Gregory CLEMENT <gregory.clement@bootlin.com> <gregory.clement@free-electrons.com>
Henk Vergonet <Henk.Vergonet@gmail.com>
Henrik Kretzschmar <henne@nachtwindheim.de>
Henrik Rydberg <rydberg@bitmath.org>
diff --git a/MAINTAINERS b/MAINTAINERS
index 73c0cdabf755..6e950b8b4a41 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1060,41 +1060,42 @@ ARM PORT
M: Russell King <linux@armlinux.org.uk>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://www.armlinux.org.uk/
-S: Maintained
+S: Odd Fixes
T: git git://git.armlinux.org.uk/~rmk/linux-arm.git
F: arch/arm/
+X: arch/arm/boot/dts/
ARM PRIMECELL AACI PL041 DRIVER
M: Russell King <linux@armlinux.org.uk>
-S: Maintained
+S: Odd Fixes
F: sound/arm/aaci.*
ARM PRIMECELL BUS SUPPORT
M: Russell King <linux@armlinux.org.uk>
-S: Maintained
+S: Odd Fixes
F: drivers/amba/
F: include/linux/amba/bus.h
ARM PRIMECELL CLCD PL110 DRIVER
M: Russell King <linux@armlinux.org.uk>
-S: Maintained
+S: Odd Fixes
F: drivers/video/fbdev/amba-clcd.*
ARM PRIMECELL KMI PL050 DRIVER
M: Russell King <linux@armlinux.org.uk>
-S: Maintained
+S: Odd Fixes
F: drivers/input/serio/ambakmi.*
F: include/linux/amba/kmi.h
ARM PRIMECELL MMCI PL180/1 DRIVER
M: Russell King <linux@armlinux.org.uk>
-S: Maintained
+S: Odd Fixes
F: drivers/mmc/host/mmci.*
F: include/linux/amba/mmci.h
ARM PRIMECELL UART PL010 AND PL011 DRIVERS
M: Russell King <linux@armlinux.org.uk>
-S: Maintained
+S: Odd Fixes
F: drivers/tty/serial/amba-pl01*.c
F: include/linux/amba/serial.h
@@ -1152,7 +1153,7 @@ S: Maintained
F: drivers/clk/sunxi/
ARM/Allwinner sunXi SoC support
-M: Maxime Ripard <maxime.ripard@free-electrons.com>
+M: Maxime Ripard <maxime.ripard@bootlin.com>
M: Chen-Yu Tsai <wens@csie.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
@@ -4626,7 +4627,7 @@ F: include/uapi/drm/drm*
F: include/linux/vga*
DRM DRIVERS FOR ALLWINNER A10
-M: Maxime Ripard <maxime.ripard@free-electrons.com>
+M: Maxime Ripard <maxime.ripard@bootlin.com>
L: dri-devel@lists.freedesktop.org
S: Supported
F: drivers/gpu/drm/sun4i/
@@ -8434,7 +8435,7 @@ S: Orphan
F: drivers/net/wireless/marvell/libertas/
MARVELL MACCHIATOBIN SUPPORT
-M: Russell King <rmk@armlinux.org.uk>
+M: Russell King <linux@armlinux.org.uk>
L: linux-arm-kernel@lists.infradead.org
S: Maintained
F: arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts
@@ -8447,7 +8448,7 @@ F: drivers/net/ethernet/marvell/mv643xx_eth.*
F: include/linux/mv643xx.h
MARVELL MV88X3310 PHY DRIVER
-M: Russell King <rmk@armlinux.org.uk>
+M: Russell King <linux@armlinux.org.uk>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/phy/marvell10g.c
@@ -12875,6 +12876,19 @@ S: Maintained
F: drivers/net/ethernet/socionext/netsec.c
F: Documentation/devicetree/bindings/net/socionext-netsec.txt
+SOLIDRUN CLEARFOG SUPPORT
+M: Russell King <linux@armlinux.org.uk>
+S: Maintained
+F: arch/arm/boot/dts/armada-388-clearfog*
+F: arch/arm/boot/dts/armada-38x-solidrun-*
+
+SOLIDRUN CUBOX-I/HUMMINGBOARD SUPPORT
+M: Russell King <linux@armlinux.org.uk>
+S: Maintained
+F: arch/arm/boot/dts/imx6*-cubox-i*
+F: arch/arm/boot/dts/imx6*-hummingboard*
+F: arch/arm/boot/dts/imx6*-sr-*
+
SONIC NETWORK DRIVER
M: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
L: netdev@vger.kernel.org
@@ -13644,7 +13658,8 @@ S: Supported
F: drivers/i2c/busses/i2c-tegra.c
TEGRA IOMMU DRIVERS
-M: Hiroshi Doyu <hdoyu@nvidia.com>
+M: Thierry Reding <thierry.reding@gmail.com>
+L: linux-tegra@vger.kernel.org
S: Supported
F: drivers/iommu/tegra*
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 78a647080ebc..199ebc1c4538 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -22,6 +22,7 @@ config ARM_PTDUMP_DEBUGFS
config DEBUG_WX
bool "Warn on W+X mappings at boot"
+ depends on MMU
select ARM_PTDUMP_CORE
---help---
Generate a warning if any W+X mappings are found at boot.
diff --git a/arch/arm/boot/deflate_xip_data.sh b/arch/arm/boot/deflate_xip_data.sh
index 1189598a25eb..5e7d758ebdd6 100755
--- a/arch/arm/boot/deflate_xip_data.sh
+++ b/arch/arm/boot/deflate_xip_data.sh
@@ -30,7 +30,7 @@ esac
sym_val() {
# extract hex value for symbol in $1
- local val=$($NM "$VMLINUX" | sed -n "/ $1$/{s/ .*$//p;q}")
+ local val=$($NM "$VMLINUX" 2>/dev/null | sed -n "/ $1\$/{s/ .*$//p;q}")
[ "$val" ] || { echo "can't find $1 in $VMLINUX" 1>&2; exit 1; }
# convert from hex to decimal
echo $((0x$val))
@@ -48,12 +48,12 @@ data_end=$(($_edata_loc - $base_offset))
file_end=$(stat -c "%s" "$XIPIMAGE")
if [ "$file_end" != "$data_end" ]; then
printf "end of xipImage doesn't match with _edata_loc (%#x vs %#x)\n" \
- $(($file_end + $base_offset)) $_edata_loc 2>&1
+ $(($file_end + $base_offset)) $_edata_loc 1>&2
exit 1;
fi
# be ready to clean up
-trap 'rm -f "$XIPIMAGE.tmp"' 0 1 2 3
+trap 'rm -f "$XIPIMAGE.tmp"; exit 1' 1 2 3
# substitute the data section by a compressed version
$DD if="$XIPIMAGE" count=$data_start iflag=count_bytes of="$XIPIMAGE.tmp"
diff --git a/arch/arm/boot/dts/aspeed-g4.dtsi b/arch/arm/boot/dts/aspeed-g4.dtsi
index b0d8431a3700..ae2b8c952e80 100644
--- a/arch/arm/boot/dts/aspeed-g4.dtsi
+++ b/arch/arm/boot/dts/aspeed-g4.dtsi
@@ -42,6 +42,11 @@
};
};
+ memory@40000000 {
+ device_type = "memory";
+ reg = <0x40000000 0>;
+ };
+
ahb {
compatible = "simple-bus";
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/aspeed-g5.dtsi b/arch/arm/boot/dts/aspeed-g5.dtsi
index 40de3b66c33f..2477ebc11d9d 100644
--- a/arch/arm/boot/dts/aspeed-g5.dtsi
+++ b/arch/arm/boot/dts/aspeed-g5.dtsi
@@ -42,6 +42,11 @@
};
};
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x80000000 0>;
+ };
+
ahb {
compatible = "simple-bus";
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/imx7d-sdb.dts b/arch/arm/boot/dts/imx7d-sdb.dts
index a7a5dc7b2700..e7d2db839d70 100644
--- a/arch/arm/boot/dts/imx7d-sdb.dts
+++ b/arch/arm/boot/dts/imx7d-sdb.dts
@@ -82,7 +82,7 @@
enable-active-high;
};
- reg_usb_otg2_vbus: regulator-usb-otg1-vbus {
+ reg_usb_otg2_vbus: regulator-usb-otg2-vbus {
compatible = "regulator-fixed";
regulator-name = "usb_otg2_vbus";
regulator-min-microvolt = <5000000>;
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
index 6102e4e7f35c..354aff45c1af 100644
--- a/arch/arm/boot/dts/rk3288.dtsi
+++ b/arch/arm/boot/dts/rk3288.dtsi
@@ -927,6 +927,7 @@
i2s: i2s@ff890000 {
compatible = "rockchip,rk3288-i2s", "rockchip,rk3066-i2s";
reg = <0x0 0xff890000 0x0 0x10000>;
+ #sound-dai-cells = <0>;
interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
@@ -1176,6 +1177,7 @@
compatible = "rockchip,rk3288-dw-hdmi";
reg = <0x0 0xff980000 0x0 0x20000>;
reg-io-width = <4>;
+ #sound-dai-cells = <0>;
rockchip,grf = <&grf>;
interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru PCLK_HDMI_CTRL>, <&cru SCLK_HDMI_HDCP>, <&cru SCLK_HDMI_CEC>;
diff --git a/arch/arm/boot/dts/sun6i-a31s-sinovoip-bpi-m2.dts b/arch/arm/boot/dts/sun6i-a31s-sinovoip-bpi-m2.dts
index 51e6f1d21c32..b2758dd8ce43 100644
--- a/arch/arm/boot/dts/sun6i-a31s-sinovoip-bpi-m2.dts
+++ b/arch/arm/boot/dts/sun6i-a31s-sinovoip-bpi-m2.dts
@@ -42,7 +42,6 @@
/dts-v1/;
#include "sun6i-a31s.dtsi"
-#include "sunxi-common-regulators.dtsi"
#include <dt-bindings/gpio/gpio.h>
/ {
@@ -99,6 +98,7 @@
pinctrl-0 = <&gmac_pins_rgmii_a>, <&gmac_phy_reset_pin_bpi_m2>;
phy = <&phy1>;
phy-mode = "rgmii";
+ phy-supply = <&reg_dldo1>;
snps,reset-gpio = <&pio 0 21 GPIO_ACTIVE_HIGH>; /* PA21 */
snps,reset-active-low;
snps,reset-delays-us = <0 10000 30000>;
@@ -118,7 +118,7 @@
&mmc0 {
pinctrl-names = "default";
pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_bpi_m2>;
- vmmc-supply = <&reg_vcc3v0>;
+ vmmc-supply = <&reg_dcdc1>;
bus-width = <4>;
cd-gpios = <&pio 0 4 GPIO_ACTIVE_HIGH>; /* PA4 */
cd-inverted;
@@ -132,7 +132,7 @@
&mmc2 {
pinctrl-names = "default";
pinctrl-0 = <&mmc2_pins_a>;
- vmmc-supply = <&reg_vcc3v0>;
+ vmmc-supply = <&reg_aldo1>;
mmc-pwrseq = <&mmc2_pwrseq>;
bus-width = <4>;
non-removable;
@@ -163,6 +163,8 @@
reg = <0x68>;
interrupt-parent = <&nmi_intc>;
interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ eldoin-supply = <&reg_dcdc1>;
+ x-powers,drive-vbus-en;
};
};
@@ -193,7 +195,28 @@
#include "axp22x.dtsi"
+&reg_aldo1 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-wifi";
+};
+
+&reg_aldo2 {
+ regulator-always-on;
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <2500000>;
+ regulator-name = "vcc-gmac";
+};
+
+&reg_aldo3 {
+ regulator-always-on;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-name = "avcc";
+};
+
&reg_dc5ldo {
+ regulator-always-on;
regulator-min-microvolt = <700000>;
regulator-max-microvolt = <1320000>;
regulator-name = "vdd-cpus";
@@ -233,6 +256,40 @@
regulator-name = "vcc-dram";
};
+&reg_dldo1 {
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-name = "vcc-mac";
+};
+
+&reg_dldo2 {
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-name = "avdd-csi";
+};
+
+&reg_dldo3 {
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-pb";
+};
+
+&reg_eldo1 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vdd-csi";
+ status = "okay";
+};
+
+&reg_ldo_io1 {
+ regulator-always-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc-pm-cpus";
+ status = "okay";
+};
+
&uart0 {
pinctrl-names = "default";
pinctrl-0 = <&uart0_pins_a>;
diff --git a/arch/arm/include/asm/vdso.h b/arch/arm/include/asm/vdso.h
index 9c99e817535e..5b85889f82ee 100644
--- a/arch/arm/include/asm/vdso.h
+++ b/arch/arm/include/asm/vdso.h
@@ -12,8 +12,6 @@ struct mm_struct;
void arm_install_vdso(struct mm_struct *mm, unsigned long addr);
-extern char vdso_start, vdso_end;
-
extern unsigned int vdso_total_pages;
#else /* CONFIG_VDSO */
diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c
index a4d6dc0f2427..f4dd7f9663c1 100644
--- a/arch/arm/kernel/vdso.c
+++ b/arch/arm/kernel/vdso.c
@@ -39,6 +39,8 @@
static struct page **vdso_text_pagelist;
+extern char vdso_start[], vdso_end[];
+
/* Total number of pages needed for the data and text portions of the VDSO. */
unsigned int vdso_total_pages __ro_after_init;
@@ -197,13 +199,13 @@ static int __init vdso_init(void)
unsigned int text_pages;
int i;
- if (memcmp(&vdso_start, "\177ELF", 4)) {
+ if (memcmp(vdso_start, "\177ELF", 4)) {
pr_err("VDSO is not a valid ELF object!\n");
return -ENOEXEC;
}
- text_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT;
- pr_debug("vdso: %i text pages at base %p\n", text_pages, &vdso_start);
+ text_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
+ pr_debug("vdso: %i text pages at base %p\n", text_pages, vdso_start);
/* Allocate the VDSO text pagelist */
vdso_text_pagelist = kcalloc(text_pages, sizeof(struct page *),
@@ -218,7 +220,7 @@ static int __init vdso_init(void)
for (i = 0; i < text_pages; i++) {
struct page *page;
- page = virt_to_page(&vdso_start + i * PAGE_SIZE);
+ page = virt_to_page(vdso_start + i * PAGE_SIZE);
vdso_text_pagelist[i] = page;
}
@@ -229,7 +231,7 @@ static int __init vdso_init(void)
cntvct_ok = cntvct_functional();
- patch_vdso(&vdso_start);
+ patch_vdso(vdso_start);
return 0;
}
diff --git a/arch/arm/mach-davinci/board-omapl138-hawk.c b/arch/arm/mach-davinci/board-omapl138-hawk.c
index a3e78074be70..62eb7d668890 100644
--- a/arch/arm/mach-davinci/board-omapl138-hawk.c
+++ b/arch/arm/mach-davinci/board-omapl138-hawk.c
@@ -127,8 +127,8 @@ static struct gpiod_lookup_table mmc_gpios_table = {
.dev_id = "da830-mmc.0",
.table = {
/* CD: gpio3_12: gpio60: chip 1 contains gpio range 32-63*/
- GPIO_LOOKUP("davinci_gpio.1", 28, "cd", GPIO_ACTIVE_LOW),
- GPIO_LOOKUP("davinci_gpio.1", 29, "wp", GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP("davinci_gpio.0", 28, "cd", GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP("davinci_gpio.0", 29, "wp", GPIO_ACTIVE_LOW),
},
};
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c
index 7e5d7a083707..36cd23c8be9b 100644
--- a/arch/arm/mach-ux500/cpu-db8500.c
+++ b/arch/arm/mach-ux500/cpu-db8500.c
@@ -133,6 +133,9 @@ static void __init u8500_init_machine(void)
if (of_machine_is_compatible("st-ericsson,u8540"))
of_platform_populate(NULL, u8500_local_bus_nodes,
u8540_auxdata_lookup, NULL);
+ else
+ of_platform_populate(NULL, u8500_local_bus_nodes,
+ NULL, NULL);
}
static const char * stericsson_dt_platform_compat[] = {
diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c
index d443e481c3e9..8805a59bae53 100644
--- a/arch/arm/plat-omap/dmtimer.c
+++ b/arch/arm/plat-omap/dmtimer.c
@@ -888,11 +888,8 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
timer->irq = irq->start;
timer->pdev = pdev;
- /* Skip pm_runtime_enable for OMAP1 */
- if (!(timer->capability & OMAP_TIMER_NEEDS_RESET)) {
- pm_runtime_enable(dev);
- pm_runtime_irq_safe(dev);
- }
+ pm_runtime_enable(dev);
+ pm_runtime_irq_safe(dev);
if (!timer->reserved) {
ret = pm_runtime_get_sync(dev);
diff --git a/arch/arm/plat-omap/include/plat/sram.h b/arch/arm/plat-omap/include/plat/sram.h
index fb061cf0d736..30a07730807a 100644
--- a/arch/arm/plat-omap/include/plat/sram.h
+++ b/arch/arm/plat-omap/include/plat/sram.h
@@ -5,13 +5,4 @@ void omap_map_sram(unsigned long start, unsigned long size,
unsigned long skip, int cached);
void omap_sram_reset(void);
-extern void *omap_sram_push_address(unsigned long size);
-
-/* Macro to push a function to the internal SRAM, using the fncpy API */
-#define omap_sram_push(funcp, size) ({ \
- typeof(&(funcp)) _res = NULL; \
- void *_sram_address = omap_sram_push_address(size); \
- if (_sram_address) \
- _res = fncpy(_sram_address, &(funcp), size); \
- _res; \
-})
+extern void *omap_sram_push(void *funcp, unsigned long size);
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
index a5bc92d7e476..921840acf65c 100644
--- a/arch/arm/plat-omap/sram.c
+++ b/arch/arm/plat-omap/sram.c
@@ -23,6 +23,7 @@
#include <asm/fncpy.h>
#include <asm/tlb.h>
#include <asm/cacheflush.h>
+#include <asm/set_memory.h>
#include <asm/mach/map.h>
@@ -42,7 +43,7 @@ static void __iomem *omap_sram_ceil;
* Note that fncpy requires the returned address to be aligned
* to an 8-byte boundary.
*/
-void *omap_sram_push_address(unsigned long size)
+static void *omap_sram_push_address(unsigned long size)
{
unsigned long available, new_ceil = (unsigned long)omap_sram_ceil;
@@ -60,6 +61,30 @@ void *omap_sram_push_address(unsigned long size)
return (void *)omap_sram_ceil;
}
+void *omap_sram_push(void *funcp, unsigned long size)
+{
+ void *sram;
+ unsigned long base;
+ int pages;
+ void *dst = NULL;
+
+ sram = omap_sram_push_address(size);
+ if (!sram)
+ return NULL;
+
+ base = (unsigned long)sram & PAGE_MASK;
+ pages = PAGE_ALIGN(size) / PAGE_SIZE;
+
+ set_memory_rw(base, pages);
+
+ dst = fncpy(sram, funcp, size);
+
+ set_memory_ro(base, pages);
+ set_memory_x(base, pages);
+
+ return dst;
+}
+
/*
* The SRAM context is lost during off-idle and stack
* needs to be reset.
@@ -75,6 +100,9 @@ void omap_sram_reset(void)
void __init omap_map_sram(unsigned long start, unsigned long size,
unsigned long skip, int cached)
{
+ unsigned long base;
+ int pages;
+
if (size == 0)
return;
@@ -95,4 +123,10 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
*/
memset_io(omap_sram_base + omap_sram_skip, 0,
omap_sram_size - omap_sram_skip);
+
+ base = (unsigned long)omap_sram_base;
+ pages = PAGE_ALIGN(omap_sram_size) / PAGE_SIZE;
+
+ set_memory_ro(base, pages);
+ set_memory_x(base, pages);
}
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index 03c6a3c72f9c..4c375e11ae95 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -648,7 +648,7 @@ int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
*/
static int vfp_dying_cpu(unsigned int cpu)
{
- vfp_force_reload(cpu, current_thread_info());
+ vfp_current_hw_state[cpu] = NULL;
return 0;
}
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
index 03f195025390..204bdb9857b9 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
@@ -406,8 +406,9 @@
wlan_pd_n: wlan-pd-n {
compatible = "regulator-fixed";
regulator-name = "wlan_pd_n";
+ pinctrl-names = "default";
+ pinctrl-0 = <&wlan_module_reset_l>;
- /* Note the wlan_module_reset_l pinctrl */
enable-active-high;
gpio = <&gpio1 11 GPIO_ACTIVE_HIGH>;
@@ -983,12 +984,6 @@ ap_i2c_audio: &i2c8 {
pinctrl-0 = <
&ap_pwroff /* AP will auto-assert this when in S3 */
&clk_32k /* This pin is always 32k on gru boards */
-
- /*
- * We want this driven low ASAP; firmware should help us, but
- * we can help ourselves too.
- */
- &wlan_module_reset_l
>;
pcfg_output_low: pcfg-output-low {
@@ -1168,12 +1163,7 @@ ap_i2c_audio: &i2c8 {
};
wlan_module_reset_l: wlan-module-reset-l {
- /*
- * We want this driven low ASAP (As {Soon,Strongly} As
- * Possible), to avoid leakage through the powered-down
- * WiFi.
- */
- rockchip,pins = <1 11 RK_FUNC_GPIO &pcfg_output_low>;
+ rockchip,pins = <1 11 RK_FUNC_GPIO &pcfg_pull_none>;
};
bt_host_wake_l: bt-host-wake-l {
diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
index 2605118d4b4c..0b81ca1d07e7 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
@@ -411,8 +411,8 @@
reg = <0x0 0xfe800000 0x0 0x100000>;
interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH 0>;
dr_mode = "otg";
- phys = <&u2phy0_otg>, <&tcphy0_usb3>;
- phy-names = "usb2-phy", "usb3-phy";
+ phys = <&u2phy0_otg>;
+ phy-names = "usb2-phy";
phy_type = "utmi_wide";
snps,dis_enblslpm_quirk;
snps,dis-u2-freeclk-exists-quirk;
@@ -444,8 +444,8 @@
reg = <0x0 0xfe900000 0x0 0x100000>;
interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH 0>;
dr_mode = "otg";
- phys = <&u2phy1_otg>, <&tcphy1_usb3>;
- phy-names = "usb2-phy", "usb3-phy";
+ phys = <&u2phy1_otg>;
+ phy-names = "usb2-phy";
phy_type = "utmi_wide";
snps,dis_enblslpm_quirk;
snps,dis-u2-freeclk-exists-quirk;
diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
index 0abeb0e2d616..37671feb2bf6 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -87,6 +87,9 @@ typedef struct {
/* Number of bits in the mm_cpumask */
atomic_t active_cpus;
+ /* Number of users of the external (Nest) MMU */
+ atomic_t copros;
+
/* NPU NMMU context */
struct npu_context *npu_context;
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
index 8eea90f80e45..19b45ba6caf9 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
@@ -47,9 +47,6 @@ extern void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmad
#endif
extern void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr);
extern void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr);
-extern void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa,
- unsigned long page_size);
-extern void radix__flush_tlb_lpid(unsigned long lpid);
extern void radix__flush_tlb_all(void);
extern void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm,
unsigned long address);
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index a2c5c95882cf..2e2bacbdf6ed 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -203,6 +203,7 @@ static inline void cpu_feature_keys_init(void) { }
#define CPU_FTR_DAWR LONG_ASM_CONST(0x0400000000000000)
#define CPU_FTR_DABRX LONG_ASM_CONST(0x0800000000000000)
#define CPU_FTR_PMAO_BUG LONG_ASM_CONST(0x1000000000000000)
+#define CPU_FTR_P9_TLBIE_BUG LONG_ASM_CONST(0x2000000000000000)
#define CPU_FTR_POWER9_DD1 LONG_ASM_CONST(0x4000000000000000)
#define CPU_FTR_POWER9_DD2_1 LONG_ASM_CONST(0x8000000000000000)
@@ -465,7 +466,7 @@ static inline void cpu_feature_keys_init(void) { }
CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP | CPU_FTR_ARCH_300 | \
- CPU_FTR_PKEY)
+ CPU_FTR_PKEY | CPU_FTR_P9_TLBIE_BUG)
#define CPU_FTRS_POWER9_DD1 ((CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD1) & \
(~CPU_FTR_SAO))
#define CPU_FTRS_POWER9_DD2_0 CPU_FTRS_POWER9
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 051b3d63afe3..3a15b6db9501 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -92,15 +92,23 @@ static inline void dec_mm_active_cpus(struct mm_struct *mm)
static inline void mm_context_add_copro(struct mm_struct *mm)
{
/*
- * On hash, should only be called once over the lifetime of
- * the context, as we can't decrement the active cpus count
- * and flush properly for the time being.
+ * If any copro is in use, increment the active CPU count
+ * in order to force TLB invalidations to be global as to
+ * propagate to the Nest MMU.
*/
- inc_mm_active_cpus(mm);
+ if (atomic_inc_return(&mm->context.copros) == 1)
+ inc_mm_active_cpus(mm);
}
static inline void mm_context_remove_copro(struct mm_struct *mm)
{
+ int c;
+
+ c = atomic_dec_if_positive(&mm->context.copros);
+
+ /* Detect imbalance between add and remove */
+ WARN_ON(c < 0);
+
/*
* Need to broadcast a global flush of the full mm before
* decrementing active_cpus count, as the next TLBI may be
@@ -111,7 +119,7 @@ static inline void mm_context_remove_copro(struct mm_struct *mm)
* for the time being. Invalidations will remain global if
* used on hash.
*/
- if (radix_enabled()) {
+ if (c == 0 && radix_enabled()) {
flush_all_mm(mm);
dec_mm_active_cpus(mm);
}
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
index 945e2c29ad2d..8ca5d5b74618 100644
--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
+++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
@@ -709,6 +709,9 @@ static __init void cpufeatures_cpu_quirks(void)
cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD1;
else if ((version & 0xffffefff) == 0x004e0201)
cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
+
+ if ((version & 0xffff0000) == 0x004e0000)
+ cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_BUG;
}
static void __init cpufeatures_setup_finished(void)
@@ -720,6 +723,9 @@ static void __init cpufeatures_setup_finished(void)
cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
}
+ /* Make sure powerpc_base_platform is non-NULL */
+ powerpc_base_platform = cur_cpu_spec->platform;
+
system_registers.lpcr = mfspr(SPRN_LPCR);
system_registers.hfscr = mfspr(SPRN_HFSCR);
system_registers.fscr = mfspr(SPRN_FSCR);
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 3ac87e53b3da..1ecfd8ffb098 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -706,7 +706,7 @@ EXC_COMMON_BEGIN(bad_addr_slb)
ld r3, PACA_EXSLB+EX_DAR(r13)
std r3, _DAR(r1)
beq cr6, 2f
- li r10, 0x480 /* fix trap number for I-SLB miss */
+ li r10, 0x481 /* fix trap number for I-SLB miss */
std r10, _TRAP(r1)
2: bl save_nvgprs
addi r3, r1, STACK_FRAME_OVERHEAD
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index f88038847790..061aa0f47bb1 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -476,6 +476,14 @@ void force_external_irq_replay(void)
*/
WARN_ON(!arch_irqs_disabled());
+ /*
+ * Interrupts must always be hard disabled before irq_happened is
+ * modified (to prevent lost update in case of interrupt between
+ * load and store).
+ */
+ __hard_irq_disable();
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+
/* Indicate in the PACA that we have an interrupt to replay */
local_paca->irq_happened |= PACA_IRQ_EE;
}
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index 5cb4e4687107..5d9bafe9a371 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -157,6 +157,9 @@ static void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
asm volatile("ptesync": : :"memory");
asm volatile(PPC_TLBIE_5(%0, %1, 0, 0, 1)
: : "r" (addr), "r" (kvm->arch.lpid) : "memory");
+ if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG))
+ asm volatile(PPC_TLBIE_5(%0, %1, 0, 0, 1)
+ : : "r" (addr), "r" (kvm->arch.lpid) : "memory");
asm volatile("ptesync": : :"memory");
}
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 8888e625a999..e1c083fbe434 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -473,6 +473,17 @@ static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
trace_tlbie(kvm->arch.lpid, 0, rbvalues[i],
kvm->arch.lpid, 0, 0, 0);
}
+
+ if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
+ /*
+ * Need the extra ptesync to make sure we don't
+ * re-order the tlbie
+ */
+ asm volatile("ptesync": : :"memory");
+ asm volatile(PPC_TLBIE_5(%0,%1,0,0,0) : :
+ "r" (rbvalues[0]), "r" (kvm->arch.lpid));
+ }
+
asm volatile("eieio; tlbsync; ptesync" : : : "memory");
kvm->arch.tlbie_lock = 0;
} else {
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index d33264697a31..f86a20270e50 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1557,6 +1557,24 @@ mc_cont:
ptesync
3: stw r5,VCPU_SLB_MAX(r9)
+ /* load host SLB entries */
+BEGIN_MMU_FTR_SECTION
+ b 0f
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
+ ld r8,PACA_SLBSHADOWPTR(r13)
+
+ .rept SLB_NUM_BOLTED
+ li r3, SLBSHADOW_SAVEAREA
+ LDX_BE r5, r8, r3
+ addi r3, r3, 8
+ LDX_BE r6, r8, r3
+ andis. r7,r5,SLB_ESID_V@h
+ beq 1f
+ slbmte r6,r5
+1: addi r8,r8,16
+ .endr
+0:
+
guest_bypass:
stw r12, STACK_SLOT_TRAP(r1)
mr r3, r12
@@ -2018,23 +2036,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
mtspr SPRN_LPCR,r8
isync
48:
- /* load host SLB entries */
-BEGIN_MMU_FTR_SECTION
- b 0f
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
- ld r8,PACA_SLBSHADOWPTR(r13)
-
- .rept SLB_NUM_BOLTED
- li r3, SLBSHADOW_SAVEAREA
- LDX_BE r5, r8, r3
- addi r3, r3, 8
- LDX_BE r6, r8, r3
- andis. r7,r5,SLB_ESID_V@h
- beq 1f
- slbmte r6,r5
-1: addi r8,r8,16
- .endr
-0:
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
/* Finish timing, if we have a vcpu */
ld r4, HSTATE_KVM_VCPU(r13)
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index a0675e91ad7d..656933c85925 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -201,6 +201,15 @@ static inline unsigned long ___tlbie(unsigned long vpn, int psize,
return va;
}
+static inline void fixup_tlbie(unsigned long vpn, int psize, int apsize, int ssize)
+{
+ if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
+ /* Need the extra ptesync to ensure we don't reorder tlbie*/
+ asm volatile("ptesync": : :"memory");
+ ___tlbie(vpn, psize, apsize, ssize);
+ }
+}
+
static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
{
unsigned long rb;
@@ -278,6 +287,7 @@ static inline void tlbie(unsigned long vpn, int psize, int apsize,
asm volatile("ptesync": : :"memory");
} else {
__tlbie(vpn, psize, apsize, ssize);
+ fixup_tlbie(vpn, psize, apsize, ssize);
asm volatile("eieio; tlbsync; ptesync": : :"memory");
}
if (lock_tlbie && !use_local)
@@ -771,7 +781,7 @@ static void native_hpte_clear(void)
*/
static void native_flush_hash_range(unsigned long number, int local)
{
- unsigned long vpn;
+ unsigned long vpn = 0;
unsigned long hash, index, hidx, shift, slot;
struct hash_pte *hptep;
unsigned long hpte_v;
@@ -843,6 +853,10 @@ static void native_flush_hash_range(unsigned long number, int local)
__tlbie(vpn, psize, psize, ssize);
} pte_iterate_hashed_end();
}
+ /*
+ * Just do one more with the last used values.
+ */
+ fixup_tlbie(vpn, psize, psize, ssize);
asm volatile("eieio; tlbsync; ptesync":::"memory");
if (lock_tlbie)
diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c
index 929d9ef7083f..3f980baade4c 100644
--- a/arch/powerpc/mm/mmu_context_book3s64.c
+++ b/arch/powerpc/mm/mmu_context_book3s64.c
@@ -173,6 +173,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
mm_iommu_init(mm);
#endif
atomic_set(&mm->context.active_cpus, 0);
+ atomic_set(&mm->context.copros, 0);
return 0;
}
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 28c980eb4422..adf469f312f2 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -481,6 +481,7 @@ void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
"r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 0);
}
+ /* do we need fixup here ?*/
asm volatile("eieio; tlbsync; ptesync" : : : "memory");
}
EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry);
diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
index 71d1b19ad1c0..a07f5372a4bf 100644
--- a/arch/powerpc/mm/tlb-radix.c
+++ b/arch/powerpc/mm/tlb-radix.c
@@ -119,6 +119,49 @@ static inline void __tlbie_pid(unsigned long pid, unsigned long ric)
trace_tlbie(0, 0, rb, rs, ric, prs, r);
}
+static inline void __tlbiel_va(unsigned long va, unsigned long pid,
+ unsigned long ap, unsigned long ric)
+{
+ unsigned long rb,rs,prs,r;
+
+ rb = va & ~(PPC_BITMASK(52, 63));
+ rb |= ap << PPC_BITLSHIFT(58);
+ rs = pid << PPC_BITLSHIFT(31);
+ prs = 1; /* process scoped */
+ r = 1; /* raidx format */
+
+ asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
+ : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
+ trace_tlbie(0, 1, rb, rs, ric, prs, r);
+}
+
+static inline void __tlbie_va(unsigned long va, unsigned long pid,
+ unsigned long ap, unsigned long ric)
+{
+ unsigned long rb,rs,prs,r;
+
+ rb = va & ~(PPC_BITMASK(52, 63));
+ rb |= ap << PPC_BITLSHIFT(58);
+ rs = pid << PPC_BITLSHIFT(31);
+ prs = 1; /* process scoped */
+ r = 1; /* raidx format */
+
+ asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
+ : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
+ trace_tlbie(0, 0, rb, rs, ric, prs, r);
+}
+
+static inline void fixup_tlbie(void)
+{
+ unsigned long pid = 0;
+ unsigned long va = ((1UL << 52) - 1);
+
+ if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
+ asm volatile("ptesync": : :"memory");
+ __tlbie_va(va, pid, mmu_get_ap(MMU_PAGE_64K), RIC_FLUSH_TLB);
+ }
+}
+
/*
* We use 128 set in radix mode and 256 set in hpt mode.
*/
@@ -151,24 +194,25 @@ static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
{
asm volatile("ptesync": : :"memory");
- __tlbie_pid(pid, ric);
- asm volatile("eieio; tlbsync; ptesync": : :"memory");
-}
-static inline void __tlbiel_va(unsigned long va, unsigned long pid,
- unsigned long ap, unsigned long ric)
-{
- unsigned long rb,rs,prs,r;
-
- rb = va & ~(PPC_BITMASK(52, 63));
- rb |= ap << PPC_BITLSHIFT(58);
- rs = pid << PPC_BITLSHIFT(31);
- prs = 1; /* process scoped */
- r = 1; /* raidx format */
-
- asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
- : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
- trace_tlbie(0, 1, rb, rs, ric, prs, r);
+ /*
+ * Workaround the fact that the "ric" argument to __tlbie_pid
+ * must be a compile-time contraint to match the "i" constraint
+ * in the asm statement.
+ */
+ switch (ric) {
+ case RIC_FLUSH_TLB:
+ __tlbie_pid(pid, RIC_FLUSH_TLB);
+ break;
+ case RIC_FLUSH_PWC:
+ __tlbie_pid(pid, RIC_FLUSH_PWC);
+ break;
+ case RIC_FLUSH_ALL:
+ default:
+ __tlbie_pid(pid, RIC_FLUSH_ALL);
+ }
+ fixup_tlbie();
+ asm volatile("eieio; tlbsync; ptesync": : :"memory");
}
static inline void __tlbiel_va_range(unsigned long start, unsigned long end,
@@ -203,22 +247,6 @@ static inline void _tlbiel_va_range(unsigned long start, unsigned long end,
asm volatile("ptesync": : :"memory");
}
-static inline void __tlbie_va(unsigned long va, unsigned long pid,
- unsigned long ap, unsigned long ric)
-{
- unsigned long rb,rs,prs,r;
-
- rb = va & ~(PPC_BITMASK(52, 63));
- rb |= ap << PPC_BITLSHIFT(58);
- rs = pid << PPC_BITLSHIFT(31);
- prs = 1; /* process scoped */
- r = 1; /* raidx format */
-
- asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
- : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
- trace_tlbie(0, 0, rb, rs, ric, prs, r);
-}
-
static inline void __tlbie_va_range(unsigned long start, unsigned long end,
unsigned long pid, unsigned long page_size,
unsigned long psize)
@@ -237,6 +265,7 @@ static inline void _tlbie_va(unsigned long va, unsigned long pid,
asm volatile("ptesync": : :"memory");
__tlbie_va(va, pid, ap, ric);
+ fixup_tlbie();
asm volatile("eieio; tlbsync; ptesync": : :"memory");
}
@@ -248,6 +277,7 @@ static inline void _tlbie_va_range(unsigned long start, unsigned long end,
if (also_pwc)
__tlbie_pid(pid, RIC_FLUSH_PWC);
__tlbie_va_range(start, end, pid, page_size, psize);
+ fixup_tlbie();
asm volatile("eieio; tlbsync; ptesync": : :"memory");
}
@@ -311,6 +341,16 @@ void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmadd
}
EXPORT_SYMBOL(radix__local_flush_tlb_page);
+static bool mm_needs_flush_escalation(struct mm_struct *mm)
+{
+ /*
+ * P9 nest MMU has issues with the page walk cache
+ * caching PTEs and not flushing them properly when
+ * RIC = 0 for a PID/LPID invalidate
+ */
+ return atomic_read(&mm->context.copros) != 0;
+}
+
#ifdef CONFIG_SMP
void radix__flush_tlb_mm(struct mm_struct *mm)
{
@@ -321,9 +361,12 @@ void radix__flush_tlb_mm(struct mm_struct *mm)
return;
preempt_disable();
- if (!mm_is_thread_local(mm))
- _tlbie_pid(pid, RIC_FLUSH_TLB);
- else
+ if (!mm_is_thread_local(mm)) {
+ if (mm_needs_flush_escalation(mm))
+ _tlbie_pid(pid, RIC_FLUSH_ALL);
+ else
+ _tlbie_pid(pid, RIC_FLUSH_TLB);
+ } else
_tlbiel_pid(pid, RIC_FLUSH_TLB);
preempt_enable();
}
@@ -435,10 +478,14 @@ void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
}
if (full) {
- if (local)
+ if (local) {
_tlbiel_pid(pid, RIC_FLUSH_TLB);
- else
- _tlbie_pid(pid, RIC_FLUSH_TLB);
+ } else {
+ if (mm_needs_flush_escalation(mm))
+ _tlbie_pid(pid, RIC_FLUSH_ALL);
+ else
+ _tlbie_pid(pid, RIC_FLUSH_TLB);
+ }
} else {
bool hflush = false;
unsigned long hstart, hend;
@@ -465,6 +512,7 @@ void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
if (hflush)
__tlbie_va_range(hstart, hend, pid,
HPAGE_PMD_SIZE, MMU_PAGE_2M);
+ fixup_tlbie();
asm volatile("eieio; tlbsync; ptesync": : :"memory");
}
}
@@ -548,6 +596,9 @@ static inline void __radix__flush_tlb_range_psize(struct mm_struct *mm,
}
if (full) {
+ if (!local && mm_needs_flush_escalation(mm))
+ also_pwc = true;
+
if (local)
_tlbiel_pid(pid, also_pwc ? RIC_FLUSH_ALL : RIC_FLUSH_TLB);
else
@@ -603,46 +654,6 @@ void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr)
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa,
- unsigned long page_size)
-{
- unsigned long rb,rs,prs,r;
- unsigned long ap;
- unsigned long ric = RIC_FLUSH_TLB;
-
- ap = mmu_get_ap(radix_get_mmu_psize(page_size));
- rb = gpa & ~(PPC_BITMASK(52, 63));
- rb |= ap << PPC_BITLSHIFT(58);
- rs = lpid & ((1UL << 32) - 1);
- prs = 0; /* process scoped */
- r = 1; /* raidx format */
-
- asm volatile("ptesync": : :"memory");
- asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
- : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
- asm volatile("eieio; tlbsync; ptesync": : :"memory");
- trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
-}
-EXPORT_SYMBOL(radix__flush_tlb_lpid_va);
-
-void radix__flush_tlb_lpid(unsigned long lpid)
-{
- unsigned long rb,rs,prs,r;
- unsigned long ric = RIC_FLUSH_ALL;
-
- rb = 0x2 << PPC_BITLSHIFT(53); /* IS = 2 */
- rs = lpid & ((1UL << 32) - 1);
- prs = 0; /* partition scoped */
- r = 1; /* raidx format */
-
- asm volatile("ptesync": : :"memory");
- asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
- : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
- asm volatile("eieio; tlbsync; ptesync": : :"memory");
- trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
-}
-EXPORT_SYMBOL(radix__flush_tlb_lpid);
-
void radix__flush_pmd_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index bc1a27280c4b..fae86e36e399 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -546,7 +546,7 @@ static void __init kvm_guest_init(void)
}
if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
- !kvm_para_has_feature(KVM_FEATURE_STEAL_TIME))
+ kvm_para_has_feature(KVM_FEATURE_STEAL_TIME))
pv_mmu_ops.flush_tlb_others = kvm_flush_tlb_others;
if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
@@ -635,7 +635,7 @@ static __init int kvm_setup_pv_tlb_flush(void)
int cpu;
if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
- !kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
+ kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
for_each_possible_cpu(cpu) {
zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu),
GFP_KERNEL, cpu_to_node(cpu));
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 2d87603f9179..657c93409042 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -10711,6 +10711,11 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 exec_control, vmcs12_exec_ctrl;
+ if (vmx->nested.dirty_vmcs12) {
+ prepare_vmcs02_full(vcpu, vmcs12, from_vmentry);
+ vmx->nested.dirty_vmcs12 = false;
+ }
+
/*
* First, the fields that are shadowed. This must be kept in sync
* with vmx_shadow_fields.h.
@@ -10948,11 +10953,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
/* Note: modifies VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */
vmx_set_efer(vcpu, vcpu->arch.efer);
- if (vmx->nested.dirty_vmcs12) {
- prepare_vmcs02_full(vcpu, vmcs12, from_vmentry);
- vmx->nested.dirty_vmcs12 = false;
- }
-
/* Shadow page tables on either EPT or shadow page tables. */
if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12),
entry_failure_code))
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index b21285afa4ea..1bd5f26b3f00 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -821,13 +821,13 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
pr_warn("Can't create new usermode queue because %d queues were already created\n",
dqm->total_queue_count);
retval = -EPERM;
- goto out;
+ goto out_unlock;
}
if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
retval = allocate_sdma_queue(dqm, &q->sdma_id);
if (retval)
- goto out;
+ goto out_unlock;
q->properties.sdma_queue_id =
q->sdma_id / CIK_SDMA_QUEUES_PER_ENGINE;
q->properties.sdma_engine_id =
@@ -838,7 +838,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
if (!mqd) {
retval = -ENOMEM;
- goto out;
+ goto out_deallocate_sdma_queue;
}
dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
@@ -848,7 +848,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
&q->gart_mqd_addr, &q->properties);
if (retval)
- goto out;
+ goto out_deallocate_sdma_queue;
list_add(&q->list, &qpd->queues_list);
qpd->queue_count++;
@@ -869,7 +869,13 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
pr_debug("Total of %d queues are accountable so far\n",
dqm->total_queue_count);
-out:
+ mutex_unlock(&dqm->lock);
+ return retval;
+
+out_deallocate_sdma_queue:
+ if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
+ deallocate_sdma_queue(dqm, q->sdma_id);
+out_unlock:
mutex_unlock(&dqm->lock);
return retval;
}
@@ -1188,8 +1194,10 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
/* Clear all user mode queues */
list_for_each_entry(q, &qpd->queues_list, list) {
- if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
+ if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
dqm->sdma_queue_count--;
+ deallocate_sdma_queue(dqm, q->sdma_id);
+ }
if (q->properties.is_active)
dqm->queue_count--;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index 0ecbd1f9b606..0c3bc00978f7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -188,8 +188,7 @@ static int pm_create_map_process(struct packet_manager *pm, uint32_t *buffer,
packet->sh_mem_ape1_base = qpd->sh_mem_ape1_base;
packet->sh_mem_ape1_limit = qpd->sh_mem_ape1_limit;
- /* TODO: scratch support */
- packet->sh_hidden_private_base_vmid = 0;
+ packet->sh_hidden_private_base_vmid = qpd->sh_hidden_private_base;
packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area);
packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area);
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index fbffe1948b3b..90b25ce363ca 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -2009,9 +2009,9 @@ static const struct tegra_dc_soc_info tegra124_dc_soc_info = {
.coupled_pm = false,
.has_nvdisplay = false,
.num_primary_formats = ARRAY_SIZE(tegra124_primary_formats),
- .primary_formats = tegra114_primary_formats,
+ .primary_formats = tegra124_primary_formats,
.num_overlay_formats = ARRAY_SIZE(tegra124_overlay_formats),
- .overlay_formats = tegra114_overlay_formats,
+ .overlay_formats = tegra124_overlay_formats,
};
static const struct tegra_dc_soc_info tegra210_dc_soc_info = {
@@ -2160,7 +2160,7 @@ static int tegra_dc_couple(struct tegra_dc *dc)
struct device_link *link;
struct device *partner;
- partner = driver_find_device(dc->dev->driver, NULL, 0,
+ partner = driver_find_device(dc->dev->driver, NULL, NULL,
tegra_dc_match_by_pipe);
if (!partner)
return -EPROBE_DEFER;
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index b445b3bb0bb1..f273e28c39db 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -888,6 +888,11 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
}
setup = of_device_get_match_data(&pdev->dev);
+ if (!setup) {
+ dev_err(&pdev->dev, "Can't get device data\n");
+ ret = -ENODEV;
+ goto clk_free;
+ }
i2c_dev->setup = *setup;
ret = device_property_read_u32(i2c_dev->dev, "i2c-scl-rising-time-ns",
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 9183d148d644..cb1d2ab13c66 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -207,6 +207,22 @@ int rdma_addr_size(struct sockaddr *addr)
}
EXPORT_SYMBOL(rdma_addr_size);
+int rdma_addr_size_in6(struct sockaddr_in6 *addr)
+{
+ int ret = rdma_addr_size((struct sockaddr *) addr);
+
+ return ret <= sizeof(*addr) ? ret : 0;
+}
+EXPORT_SYMBOL(rdma_addr_size_in6);
+
+int rdma_addr_size_kss(struct __kernel_sockaddr_storage *addr)
+{
+ int ret = rdma_addr_size((struct sockaddr *) addr);
+
+ return ret <= sizeof(*addr) ? ret : 0;
+}
+EXPORT_SYMBOL(rdma_addr_size_kss);
+
static struct rdma_addr_client self;
void rdma_addr_register_client(struct rdma_addr_client *client)
@@ -586,6 +602,15 @@ static void process_one_req(struct work_struct *_work)
list_del(&req->list);
mutex_unlock(&lock);
+ /*
+ * Although the work will normally have been canceled by the
+ * workqueue, it can still be requeued as long as it is on the
+ * req_list, so it could have been requeued before we grabbed &lock.
+ * We need to cancel it after it is removed from req_list to really be
+ * sure it is safe to free.
+ */
+ cancel_delayed_work(&req->work);
+
req->callback(req->status, (struct sockaddr *)&req->src_addr,
req->addr, req->context);
put_client(req->client);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index bb065c9449be..b7459cf524e4 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -290,6 +290,7 @@ void ib_dealloc_device(struct ib_device *device)
{
WARN_ON(device->reg_state != IB_DEV_UNREGISTERED &&
device->reg_state != IB_DEV_UNINITIALIZED);
+ rdma_restrack_clean(&device->res);
put_device(&device->dev);
}
EXPORT_SYMBOL(ib_dealloc_device);
@@ -600,8 +601,6 @@ void ib_unregister_device(struct ib_device *device)
}
up_read(&lists_rwsem);
- rdma_restrack_clean(&device->res);
-
ib_device_unregister_rdmacg(device);
ib_device_unregister_sysfs(device);
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index e5a1e7d81326..d933336d7e01 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -632,6 +632,9 @@ static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf,
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
+ if (!rdma_addr_size_in6(&cmd.addr))
+ return -EINVAL;
+
ctx = ucma_get_ctx(file, cmd.id);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
@@ -645,22 +648,21 @@ static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf,
int in_len, int out_len)
{
struct rdma_ucm_bind cmd;
- struct sockaddr *addr;
struct ucma_context *ctx;
int ret;
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
- addr = (struct sockaddr *) &cmd.addr;
- if (cmd.reserved || !cmd.addr_size || (cmd.addr_size != rdma_addr_size(addr)))
+ if (cmd.reserved || !cmd.addr_size ||
+ cmd.addr_size != rdma_addr_size_kss(&cmd.addr))
return -EINVAL;
ctx = ucma_get_ctx(file, cmd.id);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- ret = rdma_bind_addr(ctx->cm_id, addr);
+ ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
ucma_put_ctx(ctx);
return ret;
}
@@ -670,23 +672,22 @@ static ssize_t ucma_resolve_ip(struct ucma_file *file,
int in_len, int out_len)
{
struct rdma_ucm_resolve_ip cmd;
- struct sockaddr *src, *dst;
struct ucma_context *ctx;
int ret;
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
- src = (struct sockaddr *) &cmd.src_addr;
- dst = (struct sockaddr *) &cmd.dst_addr;
- if (!rdma_addr_size(src) || !rdma_addr_size(dst))
+ if (!rdma_addr_size_in6(&cmd.src_addr) ||
+ !rdma_addr_size_in6(&cmd.dst_addr))
return -EINVAL;
ctx = ucma_get_ctx(file, cmd.id);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- ret = rdma_resolve_addr(ctx->cm_id, src, dst, cmd.timeout_ms);
+ ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
+ (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
ucma_put_ctx(ctx);
return ret;
}
@@ -696,24 +697,23 @@ static ssize_t ucma_resolve_addr(struct ucma_file *file,
int in_len, int out_len)
{
struct rdma_ucm_resolve_addr cmd;
- struct sockaddr *src, *dst;
struct ucma_context *ctx;
int ret;
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
- src = (struct sockaddr *) &cmd.src_addr;
- dst = (struct sockaddr *) &cmd.dst_addr;
- if (cmd.reserved || (cmd.src_size && (cmd.src_size != rdma_addr_size(src))) ||
- !cmd.dst_size || (cmd.dst_size != rdma_addr_size(dst)))
+ if (cmd.reserved ||
+ (cmd.src_size && (cmd.src_size != rdma_addr_size_kss(&cmd.src_addr))) ||
+ !cmd.dst_size || (cmd.dst_size != rdma_addr_size_kss(&cmd.dst_addr)))
return -EINVAL;
ctx = ucma_get_ctx(file, cmd.id);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- ret = rdma_resolve_addr(ctx->cm_id, src, dst, cmd.timeout_ms);
+ ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
+ (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
ucma_put_ctx(ctx);
return ret;
}
@@ -1166,6 +1166,11 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ if (!ctx->cm_id->device) {
+ ret = -EINVAL;
+ goto out;
+ }
+
resp.qp_attr_mask = 0;
memset(&qp_attr, 0, sizeof qp_attr);
qp_attr.qp_state = cmd.qp_state;
@@ -1307,7 +1312,7 @@ static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- if (unlikely(cmd.optval > KMALLOC_MAX_SIZE))
+ if (unlikely(cmd.optlen > KMALLOC_MAX_SIZE))
return -EINVAL;
optval = memdup_user((void __user *) (unsigned long) cmd.optval,
@@ -1331,7 +1336,7 @@ static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
{
struct rdma_ucm_notify cmd;
struct ucma_context *ctx;
- int ret;
+ int ret = -EINVAL;
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
@@ -1340,7 +1345,9 @@ static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event);
+ if (ctx->cm_id->device)
+ ret = rdma_notify(ctx->cm_id, (enum ib_event_type)cmd.event);
+
ucma_put_ctx(ctx);
return ret;
}
@@ -1426,7 +1433,7 @@ static ssize_t ucma_join_ip_multicast(struct ucma_file *file,
join_cmd.response = cmd.response;
join_cmd.uid = cmd.uid;
join_cmd.id = cmd.id;
- join_cmd.addr_size = rdma_addr_size((struct sockaddr *) &cmd.addr);
+ join_cmd.addr_size = rdma_addr_size_in6(&cmd.addr);
if (!join_cmd.addr_size)
return -EINVAL;
@@ -1445,7 +1452,7 @@ static ssize_t ucma_join_multicast(struct ucma_file *file,
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
- if (!rdma_addr_size((struct sockaddr *)&cmd.addr))
+ if (!rdma_addr_size_kss(&cmd.addr))
return -EINVAL;
return ucma_process_join(file, &cmd, out_len);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index db2ff352d75f..ec638778661c 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -4383,7 +4383,7 @@ err_dma_alloc_buf:
eq->l0_dma = 0;
if (mhop_num == 1)
- for (i -= i; i >= 0; i--)
+ for (i -= 1; i >= 0; i--)
dma_free_coherent(dev, buf_chk_sz, eq->buf[i],
eq->buf_dma[i]);
else if (mhop_num == 2) {
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index da091de4e69d..7f8bda3a2005 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -3448,9 +3448,12 @@ static void destroy_umrc_res(struct mlx5_ib_dev *dev)
if (err)
mlx5_ib_warn(dev, "mr cache cleanup failed\n");
- mlx5_ib_destroy_qp(dev->umrc.qp);
- ib_free_cq(dev->umrc.cq);
- ib_dealloc_pd(dev->umrc.pd);
+ if (dev->umrc.qp)
+ mlx5_ib_destroy_qp(dev->umrc.qp);
+ if (dev->umrc.cq)
+ ib_free_cq(dev->umrc.cq);
+ if (dev->umrc.pd)
+ ib_dealloc_pd(dev->umrc.pd);
}
enum {
@@ -3552,12 +3555,15 @@ static int create_umr_res(struct mlx5_ib_dev *dev)
error_4:
mlx5_ib_destroy_qp(qp);
+ dev->umrc.qp = NULL;
error_3:
ib_free_cq(cq);
+ dev->umrc.cq = NULL;
error_2:
ib_dealloc_pd(pd);
+ dev->umrc.pd = NULL;
error_0:
kfree(attr);
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index c51c602f06d6..3e0b3f0238d6 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -739,6 +739,9 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
{
int i;
+ if (!dev->cache.wq)
+ return 0;
+
dev->cache.stopped = 1;
flush_workqueue(dev->cache.wq);
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index db4bf97c0e15..0ffb9b93e22d 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -833,7 +833,8 @@ static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
dev->num_cnq = dev->ops->rdma_get_min_cnq_msix(cdev);
if (!dev->num_cnq) {
- DP_ERR(dev, "not enough CNQ resources.\n");
+ DP_ERR(dev, "Failed. At least one CNQ is required.\n");
+ rc = -ENOMEM;
goto init_err;
}
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 875b17272d65..419a158e8fca 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -1841,14 +1841,15 @@ static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
static int qedr_update_qp_state(struct qedr_dev *dev,
struct qedr_qp *qp,
+ enum qed_roce_qp_state cur_state,
enum qed_roce_qp_state new_state)
{
int status = 0;
- if (new_state == qp->state)
+ if (new_state == cur_state)
return 0;
- switch (qp->state) {
+ switch (cur_state) {
case QED_ROCE_QP_STATE_RESET:
switch (new_state) {
case QED_ROCE_QP_STATE_INIT:
@@ -1955,6 +1956,7 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
enum ib_qp_state old_qp_state, new_qp_state;
+ enum qed_roce_qp_state cur_state;
int rc = 0;
DP_DEBUG(dev, QEDR_MSG_QP,
@@ -2086,18 +2088,23 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
SET_FIELD(qp_params.modify_flags,
QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
- qp_params.ack_timeout = attr->timeout;
- if (attr->timeout) {
- u32 temp;
-
- temp = 4096 * (1UL << attr->timeout) / 1000 / 1000;
- /* FW requires [msec] */
- qp_params.ack_timeout = temp;
- } else {
- /* Infinite */
+ /* The received timeout value is an exponent used like this:
+ * "12.7.34 LOCAL ACK TIMEOUT
+ * Value representing the transport (ACK) timeout for use by
+ * the remote, expressed as: 4.096 * 2^timeout [usec]"
+ * The FW expects timeout in msec so we need to divide the usec
+ * result by 1000. We'll approximate 1000~2^10, and 4.096 ~ 2^2,
+ * so we get: 2^2 * 2^timeout / 2^10 = 2^(timeout - 8).
+ * The value of zero means infinite so we use a 'max_t' to make
+ * sure that sub 1 msec values will be configured as 1 msec.
+ */
+ if (attr->timeout)
+ qp_params.ack_timeout =
+ 1 << max_t(int, attr->timeout - 8, 0);
+ else
qp_params.ack_timeout = 0;
- }
}
+
if (attr_mask & IB_QP_RETRY_CNT) {
SET_FIELD(qp_params.modify_flags,
QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
@@ -2170,13 +2177,25 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
qp->dest_qp_num = attr->dest_qp_num;
}
+ cur_state = qp->state;
+
+ /* Update the QP state before the actual ramrod to prevent a race with
+ * fast path. Modifying the QP state to error will cause the device to
+ * flush the CQEs and while polling the flushed CQEs will considered as
+ * a potential issue if the QP isn't in error state.
+ */
+ if ((attr_mask & IB_QP_STATE) && qp->qp_type != IB_QPT_GSI &&
+ !udata && qp_params.new_state == QED_ROCE_QP_STATE_ERR)
+ qp->state = QED_ROCE_QP_STATE_ERR;
+
if (qp->qp_type != IB_QPT_GSI)
rc = dev->ops->rdma_modify_qp(dev->rdma_ctx,
qp->qed_qp, &qp_params);
if (attr_mask & IB_QP_STATE) {
if ((qp->qp_type != IB_QPT_GSI) && (!udata))
- rc = qedr_update_qp_state(dev, qp, qp_params.new_state);
+ rc = qedr_update_qp_state(dev, qp, cur_state,
+ qp_params.new_state);
qp->state = qp_params.new_state;
}
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index a05a560d3cba..a6b7baf31cdd 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -887,7 +887,7 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
q = bdev_get_queue(p->path.dev->bdev);
attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
- if (attached_handler_name) {
+ if (attached_handler_name || m->hw_handler_name) {
INIT_DELAYED_WORK(&p->activate_path, activate_path_work);
r = setup_scsi_dh(p->path.dev->bdev, m, attached_handler_name, &ti->error);
if (r) {
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 45328d8b2859..353ea0ede091 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -466,7 +466,7 @@ static int dm_get_bdev_for_ioctl(struct mapped_device *md,
{
struct dm_target *tgt;
struct dm_table *map;
- int srcu_idx, r;
+ int srcu_idx, r, r2;
retry:
r = -ENOTTY;
@@ -492,9 +492,11 @@ retry:
goto out;
bdgrab(*bdev);
- r = blkdev_get(*bdev, *mode, _dm_claim_ptr);
- if (r < 0)
+ r2 = blkdev_get(*bdev, *mode, _dm_claim_ptr);
+ if (r2 < 0) {
+ r = r2;
goto out;
+ }
dm_put_live_table(md, srcu_idx);
return r;
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
index 7c0b27d132b1..b479bd81120b 100644
--- a/drivers/mtd/chips/jedec_probe.c
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -1889,6 +1889,8 @@ static inline u32 jedec_read_mfr(struct map_info *map, uint32_t base,
do {
uint32_t ofs = cfi_build_cmd_addr(0 + (bank << 8), map, cfi);
mask = (1 << (cfi->device_type * 8)) - 1;
+ if (ofs >= map->size)
+ return 0;
result = map_read(map, base + ofs);
bank++;
} while ((result.x[0] & mask) == CFI_MFR_CONTINUATION);
diff --git a/drivers/mtd/nand/atmel/pmecc.c b/drivers/mtd/nand/atmel/pmecc.c
index fcbe4fd6e684..ca0a70389ba9 100644
--- a/drivers/mtd/nand/atmel/pmecc.c
+++ b/drivers/mtd/nand/atmel/pmecc.c
@@ -426,7 +426,7 @@ static int get_strength(struct atmel_pmecc_user *user)
static int get_sectorsize(struct atmel_pmecc_user *user)
{
- return user->cache.cfg & PMECC_LOOKUP_TABLE_SIZE_1024 ? 1024 : 512;
+ return user->cache.cfg & PMECC_CFG_SECTOR1024 ? 1024 : 512;
}
static void atmel_pmecc_gen_syndrome(struct atmel_pmecc_user *user, int sector)
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index dd9464920456..ef22b275d050 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -474,6 +474,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
shost->dma_boundary = 0xffffffff;
shost->use_blk_mq = scsi_use_blk_mq;
+ shost->use_blk_mq = scsi_use_blk_mq || shost->hostt->force_blk_mq;
device_initialize(&shost->shost_gendev);
dev_set_name(&shost->shost_gendev, "host%d", shost->host_no);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 5293e6827ce5..3a9eca163db8 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -1045,11 +1045,7 @@ static void set_performant_mode(struct ctlr_info *h, struct CommandList *c,
c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
if (unlikely(!h->msix_vectors))
return;
- if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
- c->Header.ReplyQueue =
- raw_smp_processor_id() % h->nreply_queues;
- else
- c->Header.ReplyQueue = reply_queue % h->nreply_queues;
+ c->Header.ReplyQueue = reply_queue;
}
}
@@ -1063,10 +1059,7 @@ static void set_ioaccel1_performant_mode(struct ctlr_info *h,
* Tell the controller to post the reply to the queue for this
* processor. This seems to give the best I/O throughput.
*/
- if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
- cp->ReplyQueue = smp_processor_id() % h->nreply_queues;
- else
- cp->ReplyQueue = reply_queue % h->nreply_queues;
+ cp->ReplyQueue = reply_queue;
/*
* Set the bits in the address sent down to include:
* - performant mode bit (bit 0)
@@ -1087,10 +1080,7 @@ static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h,
/* Tell the controller to post the reply to the queue for this
* processor. This seems to give the best I/O throughput.
*/
- if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
- cp->reply_queue = smp_processor_id() % h->nreply_queues;
- else
- cp->reply_queue = reply_queue % h->nreply_queues;
+ cp->reply_queue = reply_queue;
/* Set the bits in the address sent down to include:
* - performant mode bit not used in ioaccel mode 2
* - pull count (bits 0-3)
@@ -1109,10 +1099,7 @@ static void set_ioaccel2_performant_mode(struct ctlr_info *h,
* Tell the controller to post the reply to the queue for this
* processor. This seems to give the best I/O throughput.
*/
- if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
- cp->reply_queue = smp_processor_id() % h->nreply_queues;
- else
- cp->reply_queue = reply_queue % h->nreply_queues;
+ cp->reply_queue = reply_queue;
/*
* Set the bits in the address sent down to include:
* - performant mode bit not used in ioaccel mode 2
@@ -1157,6 +1144,8 @@ static void __enqueue_cmd_and_start_io(struct ctlr_info *h,
{
dial_down_lockup_detection_during_fw_flash(h, c);
atomic_inc(&h->commands_outstanding);
+
+ reply_queue = h->reply_map[raw_smp_processor_id()];
switch (c->cmd_type) {
case CMD_IOACCEL1:
set_ioaccel1_performant_mode(h, c, reply_queue);
@@ -7376,6 +7365,26 @@ static void hpsa_disable_interrupt_mode(struct ctlr_info *h)
h->msix_vectors = 0;
}
+static void hpsa_setup_reply_map(struct ctlr_info *h)
+{
+ const struct cpumask *mask;
+ unsigned int queue, cpu;
+
+ for (queue = 0; queue < h->msix_vectors; queue++) {
+ mask = pci_irq_get_affinity(h->pdev, queue);
+ if (!mask)
+ goto fallback;
+
+ for_each_cpu(cpu, mask)
+ h->reply_map[cpu] = queue;
+ }
+ return;
+
+fallback:
+ for_each_possible_cpu(cpu)
+ h->reply_map[cpu] = 0;
+}
+
/* If MSI/MSI-X is supported by the kernel we will try to enable it on
* controllers that are capable. If not, we use legacy INTx mode.
*/
@@ -7771,6 +7780,10 @@ static int hpsa_pci_init(struct ctlr_info *h)
err = hpsa_interrupt_mode(h);
if (err)
goto clean1;
+
+ /* setup mapping between CPU and reply queue */
+ hpsa_setup_reply_map(h);
+
err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
if (err)
goto clean2; /* intmode+region, pci */
@@ -8480,6 +8493,28 @@ static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h,
return wq;
}
+static void hpda_free_ctlr_info(struct ctlr_info *h)
+{
+ kfree(h->reply_map);
+ kfree(h);
+}
+
+static struct ctlr_info *hpda_alloc_ctlr_info(void)
+{
+ struct ctlr_info *h;
+
+ h = kzalloc(sizeof(*h), GFP_KERNEL);
+ if (!h)
+ return NULL;
+
+ h->reply_map = kzalloc(sizeof(*h->reply_map) * nr_cpu_ids, GFP_KERNEL);
+ if (!h->reply_map) {
+ kfree(h);
+ return NULL;
+ }
+ return h;
+}
+
static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int dac, rc;
@@ -8517,7 +8552,7 @@ reinit_after_soft_reset:
* the driver. See comments in hpsa.h for more info.
*/
BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
- h = kzalloc(sizeof(*h), GFP_KERNEL);
+ h = hpda_alloc_ctlr_info();
if (!h) {
dev_err(&pdev->dev, "Failed to allocate controller head\n");
return -ENOMEM;
@@ -8916,7 +8951,7 @@ static void hpsa_remove_one(struct pci_dev *pdev)
h->lockup_detected = NULL; /* init_one 2 */
/* (void) pci_disable_pcie_error_reporting(pdev); */ /* init_one 1 */
- kfree(h); /* init_one 1 */
+ hpda_free_ctlr_info(h); /* init_one 1 */
}
static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 018f980a701c..fb9f5e7f8209 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -158,6 +158,7 @@ struct bmic_controller_parameters {
#pragma pack()
struct ctlr_info {
+ unsigned int *reply_map;
int ctlr;
char devname[8];
char *product_name;
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index b1b1d3a3b173..daefe8172b04 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -3579,11 +3579,9 @@ static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
static int ibmvfc_adisc_needs_plogi(struct ibmvfc_passthru_mad *mad,
struct ibmvfc_target *tgt)
{
- if (memcmp(&mad->fc_iu.response[2], &tgt->ids.port_name,
- sizeof(tgt->ids.port_name)))
+ if (wwn_to_u64((u8 *)&mad->fc_iu.response[2]) != tgt->ids.port_name)
return 1;
- if (memcmp(&mad->fc_iu.response[4], &tgt->ids.node_name,
- sizeof(tgt->ids.node_name)))
+ if (wwn_to_u64((u8 *)&mad->fc_iu.response[4]) != tgt->ids.node_name)
return 1;
if (be32_to_cpu(mad->fc_iu.response[6]) != tgt->scsi_id)
return 1;
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 6198559abbd8..dd66c11399a2 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -37,6 +37,7 @@
#include <linux/kfifo.h>
#include <linux/scatterlist.h>
#include <linux/module.h>
+#include <linux/backing-dev.h>
#include <net/tcp.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
@@ -954,6 +955,13 @@ static int iscsi_sw_tcp_slave_alloc(struct scsi_device *sdev)
static int iscsi_sw_tcp_slave_configure(struct scsi_device *sdev)
{
+ struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(sdev->host);
+ struct iscsi_session *session = tcp_sw_host->session;
+ struct iscsi_conn *conn = session->leadconn;
+
+ if (conn->datadgst_en)
+ sdev->request_queue->backing_dev_info->capabilities
+ |= BDI_CAP_STABLE_WRITES;
blk_queue_bounce_limit(sdev->request_queue, BLK_BOUNCE_ANY);
blk_queue_dma_alignment(sdev->request_queue, 0);
return 0;
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index ba6503f37756..27fab8235ea5 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -2128,6 +2128,7 @@ enum MR_PD_TYPE {
struct megasas_instance {
+ unsigned int *reply_map;
__le32 *producer;
dma_addr_t producer_h;
__le32 *consumer;
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index a71ee67df084..dde0798b8a91 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -5165,6 +5165,26 @@ skip_alloc:
instance->use_seqnum_jbod_fp = false;
}
+static void megasas_setup_reply_map(struct megasas_instance *instance)
+{
+ const struct cpumask *mask;
+ unsigned int queue, cpu;
+
+ for (queue = 0; queue < instance->msix_vectors; queue++) {
+ mask = pci_irq_get_affinity(instance->pdev, queue);
+ if (!mask)
+ goto fallback;
+
+ for_each_cpu(cpu, mask)
+ instance->reply_map[cpu] = queue;
+ }
+ return;
+
+fallback:
+ for_each_possible_cpu(cpu)
+ instance->reply_map[cpu] = cpu % instance->msix_vectors;
+}
+
/**
* megasas_init_fw - Initializes the FW
* @instance: Adapter soft state
@@ -5343,6 +5363,8 @@ static int megasas_init_fw(struct megasas_instance *instance)
goto fail_setup_irqs;
}
+ megasas_setup_reply_map(instance);
+
dev_info(&instance->pdev->dev,
"firmware supports msix\t: (%d)", fw_msix_count);
dev_info(&instance->pdev->dev,
@@ -6123,20 +6145,29 @@ static inline int megasas_alloc_mfi_ctrl_mem(struct megasas_instance *instance)
*/
static int megasas_alloc_ctrl_mem(struct megasas_instance *instance)
{
+ instance->reply_map = kzalloc(sizeof(unsigned int) * nr_cpu_ids,
+ GFP_KERNEL);
+ if (!instance->reply_map)
+ return -ENOMEM;
+
switch (instance->adapter_type) {
case MFI_SERIES:
if (megasas_alloc_mfi_ctrl_mem(instance))
- return -ENOMEM;
+ goto fail;
break;
case VENTURA_SERIES:
case THUNDERBOLT_SERIES:
case INVADER_SERIES:
if (megasas_alloc_fusion_context(instance))
- return -ENOMEM;
+ goto fail;
break;
}
return 0;
+ fail:
+ kfree(instance->reply_map);
+ instance->reply_map = NULL;
+ return -ENOMEM;
}
/*
@@ -6148,6 +6179,7 @@ static int megasas_alloc_ctrl_mem(struct megasas_instance *instance)
*/
static inline void megasas_free_ctrl_mem(struct megasas_instance *instance)
{
+ kfree(instance->reply_map);
if (instance->adapter_type == MFI_SERIES) {
if (instance->producer)
pci_free_consistent(instance->pdev, sizeof(u32),
@@ -6540,7 +6572,6 @@ fail_io_attach:
pci_free_irq_vectors(instance->pdev);
fail_init_mfi:
scsi_host_put(host);
-
fail_alloc_instance:
pci_disable_device(pdev);
@@ -6746,6 +6777,8 @@ megasas_resume(struct pci_dev *pdev)
if (rval < 0)
goto fail_reenable_msix;
+ megasas_setup_reply_map(instance);
+
if (instance->adapter_type != MFI_SERIES) {
megasas_reset_reply_desc(instance);
if (megasas_ioc_init_fusion(instance)) {
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index dc8e850fbfd2..5ec3b74e8aed 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -2641,11 +2641,8 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
fp_possible = (io_info.fpOkForIo > 0) ? true : false;
}
- /* Use raw_smp_processor_id() for now until cmd->request->cpu is CPU
- id by default, not CPU group id, otherwise all MSI-X queues won't
- be utilized */
- cmd->request_desc->SCSIIO.MSIxIndex = instance->msix_vectors ?
- raw_smp_processor_id() % instance->msix_vectors : 0;
+ cmd->request_desc->SCSIIO.MSIxIndex =
+ instance->reply_map[raw_smp_processor_id()];
praid_context = &io_request->RaidContext;
@@ -2971,10 +2968,9 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
}
cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle;
- cmd->request_desc->SCSIIO.MSIxIndex =
- instance->msix_vectors ?
- (raw_smp_processor_id() % instance->msix_vectors) : 0;
+ cmd->request_desc->SCSIIO.MSIxIndex =
+ instance->reply_map[raw_smp_processor_id()];
if (!fp_possible) {
/* system pd firmware path */
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 3541caf3fceb..1fa84d6a0f8b 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2484,6 +2484,8 @@ sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer)
sector_size = old_sector_size;
goto got_data;
}
+ /* Remember that READ CAPACITY(16) succeeded */
+ sdp->try_rc_10_first = 0;
}
}
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 7c28e8d4955a..45d04631888a 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -91,9 +91,6 @@ struct virtio_scsi_vq {
struct virtio_scsi_target_state {
seqcount_t tgt_seq;
- /* Count of outstanding requests. */
- atomic_t reqs;
-
/* Currently active virtqueue for requests sent to this target. */
struct virtio_scsi_vq *req_vq;
};
@@ -152,8 +149,6 @@ static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)
struct virtio_scsi_cmd *cmd = buf;
struct scsi_cmnd *sc = cmd->sc;
struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd;
- struct virtio_scsi_target_state *tgt =
- scsi_target(sc->device)->hostdata;
dev_dbg(&sc->device->sdev_gendev,
"cmd %p response %u status %#02x sense_len %u\n",
@@ -210,8 +205,6 @@ static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)
}
sc->scsi_done(sc);
-
- atomic_dec(&tgt->reqs);
}
static void virtscsi_vq_done(struct virtio_scsi *vscsi,
@@ -529,11 +522,20 @@ static void virtio_scsi_init_hdr_pi(struct virtio_device *vdev,
}
#endif
-static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
- struct virtio_scsi_vq *req_vq,
+static struct virtio_scsi_vq *virtscsi_pick_vq_mq(struct virtio_scsi *vscsi,
+ struct scsi_cmnd *sc)
+{
+ u32 tag = blk_mq_unique_tag(sc->request);
+ u16 hwq = blk_mq_unique_tag_to_hwq(tag);
+
+ return &vscsi->req_vqs[hwq];
+}
+
+static int virtscsi_queuecommand(struct Scsi_Host *shost,
struct scsi_cmnd *sc)
{
- struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
+ struct virtio_scsi *vscsi = shost_priv(shost);
+ struct virtio_scsi_vq *req_vq = virtscsi_pick_vq_mq(vscsi, sc);
struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc);
unsigned long flags;
int req_size;
@@ -576,79 +578,6 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
return 0;
}
-static int virtscsi_queuecommand_single(struct Scsi_Host *sh,
- struct scsi_cmnd *sc)
-{
- struct virtio_scsi *vscsi = shost_priv(sh);
- struct virtio_scsi_target_state *tgt =
- scsi_target(sc->device)->hostdata;
-
- atomic_inc(&tgt->reqs);
- return virtscsi_queuecommand(vscsi, &vscsi->req_vqs[0], sc);
-}
-
-static struct virtio_scsi_vq *virtscsi_pick_vq_mq(struct virtio_scsi *vscsi,
- struct scsi_cmnd *sc)
-{
- u32 tag = blk_mq_unique_tag(sc->request);
- u16 hwq = blk_mq_unique_tag_to_hwq(tag);
-
- return &vscsi->req_vqs[hwq];
-}
-
-static struct virtio_scsi_vq *virtscsi_pick_vq(struct virtio_scsi *vscsi,
- struct virtio_scsi_target_state *tgt)
-{
- struct virtio_scsi_vq *vq;
- unsigned long flags;
- u32 queue_num;
-
- local_irq_save(flags);
- if (atomic_inc_return(&tgt->reqs) > 1) {
- unsigned long seq;
-
- do {
- seq = read_seqcount_begin(&tgt->tgt_seq);
- vq = tgt->req_vq;
- } while (read_seqcount_retry(&tgt->tgt_seq, seq));
- } else {
- /* no writes can be concurrent because of atomic_t */
- write_seqcount_begin(&tgt->tgt_seq);
-
- /* keep previous req_vq if a reader just arrived */
- if (unlikely(atomic_read(&tgt->reqs) > 1)) {
- vq = tgt->req_vq;
- goto unlock;
- }
-
- queue_num = smp_processor_id();
- while (unlikely(queue_num >= vscsi->num_queues))
- queue_num -= vscsi->num_queues;
- tgt->req_vq = vq = &vscsi->req_vqs[queue_num];
- unlock:
- write_seqcount_end(&tgt->tgt_seq);
- }
- local_irq_restore(flags);
-
- return vq;
-}
-
-static int virtscsi_queuecommand_multi(struct Scsi_Host *sh,
- struct scsi_cmnd *sc)
-{
- struct virtio_scsi *vscsi = shost_priv(sh);
- struct virtio_scsi_target_state *tgt =
- scsi_target(sc->device)->hostdata;
- struct virtio_scsi_vq *req_vq;
-
- if (shost_use_blk_mq(sh))
- req_vq = virtscsi_pick_vq_mq(vscsi, sc);
- else
- req_vq = virtscsi_pick_vq(vscsi, tgt);
-
- return virtscsi_queuecommand(vscsi, req_vq, sc);
-}
-
static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
{
DECLARE_COMPLETION_ONSTACK(comp);
@@ -775,7 +704,6 @@ static int virtscsi_target_alloc(struct scsi_target *starget)
return -ENOMEM;
seqcount_init(&tgt->tgt_seq);
- atomic_set(&tgt->reqs, 0);
tgt->req_vq = &vscsi->req_vqs[0];
starget->hostdata = tgt;
@@ -805,33 +733,13 @@ static enum blk_eh_timer_return virtscsi_eh_timed_out(struct scsi_cmnd *scmnd)
return BLK_EH_RESET_TIMER;
}
-static struct scsi_host_template virtscsi_host_template_single = {
- .module = THIS_MODULE,
- .name = "Virtio SCSI HBA",
- .proc_name = "virtio_scsi",
- .this_id = -1,
- .cmd_size = sizeof(struct virtio_scsi_cmd),
- .queuecommand = virtscsi_queuecommand_single,
- .change_queue_depth = virtscsi_change_queue_depth,
- .eh_abort_handler = virtscsi_abort,
- .eh_device_reset_handler = virtscsi_device_reset,
- .eh_timed_out = virtscsi_eh_timed_out,
- .slave_alloc = virtscsi_device_alloc,
-
- .dma_boundary = UINT_MAX,
- .use_clustering = ENABLE_CLUSTERING,
- .target_alloc = virtscsi_target_alloc,
- .target_destroy = virtscsi_target_destroy,
- .track_queue_depth = 1,
-};
-
-static struct scsi_host_template virtscsi_host_template_multi = {
+static struct scsi_host_template virtscsi_host_template = {
.module = THIS_MODULE,
.name = "Virtio SCSI HBA",
.proc_name = "virtio_scsi",
.this_id = -1,
.cmd_size = sizeof(struct virtio_scsi_cmd),
- .queuecommand = virtscsi_queuecommand_multi,
+ .queuecommand = virtscsi_queuecommand,
.change_queue_depth = virtscsi_change_queue_depth,
.eh_abort_handler = virtscsi_abort,
.eh_device_reset_handler = virtscsi_device_reset,
@@ -844,6 +752,7 @@ static struct scsi_host_template virtscsi_host_template_multi = {
.target_destroy = virtscsi_target_destroy,
.map_queues = virtscsi_map_queues,
.track_queue_depth = 1,
+ .force_blk_mq = 1,
};
#define virtscsi_config_get(vdev, fld) \
@@ -936,7 +845,6 @@ static int virtscsi_probe(struct virtio_device *vdev)
u32 sg_elems, num_targets;
u32 cmd_per_lun;
u32 num_queues;
- struct scsi_host_template *hostt;
if (!vdev->config->get) {
dev_err(&vdev->dev, "%s failure: config access disabled\n",
@@ -949,12 +857,7 @@ static int virtscsi_probe(struct virtio_device *vdev)
num_targets = virtscsi_config_get(vdev, max_target) + 1;
- if (num_queues == 1)
- hostt = &virtscsi_host_template_single;
- else
- hostt = &virtscsi_host_template_multi;
-
- shost = scsi_host_alloc(hostt,
+ shost = scsi_host_alloc(&virtscsi_host_template,
sizeof(*vscsi) + sizeof(vscsi->req_vqs[0]) * num_queues);
if (!shost)
return -ENOMEM;
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
index d656809f1217..415e09960017 100644
--- a/include/rdma/ib_addr.h
+++ b/include/rdma/ib_addr.h
@@ -130,6 +130,8 @@ void rdma_copy_addr(struct rdma_dev_addr *dev_addr,
const unsigned char *dst_dev_addr);
int rdma_addr_size(struct sockaddr *addr);
+int rdma_addr_size_in6(struct sockaddr_in6 *addr);
+int rdma_addr_size_kss(struct __kernel_sockaddr_storage *addr);
int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
const union ib_gid *dgid,
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index a8b7bf879ced..9c1e4bad6581 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -452,6 +452,9 @@ struct scsi_host_template {
/* True if the controller does not support WRITE SAME */
unsigned no_write_same:1;
+ /* True if the low-level driver supports blk-mq only */
+ unsigned force_blk_mq:1;
+
/*
* Countdown for host blocking with no commands outstanding.
*/
diff --git a/ipc/shm.c b/ipc/shm.c
index 4643865e9171..93e0e3a4d009 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -386,6 +386,17 @@ static int shm_fault(struct vm_fault *vmf)
return sfd->vm_ops->fault(vmf);
}
+static int shm_split(struct vm_area_struct *vma, unsigned long addr)
+{
+ struct file *file = vma->vm_file;
+ struct shm_file_data *sfd = shm_file_data(file);
+
+ if (sfd->vm_ops && sfd->vm_ops->split)
+ return sfd->vm_ops->split(vma, addr);
+
+ return 0;
+}
+
#ifdef CONFIG_NUMA
static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
{
@@ -510,6 +521,7 @@ static const struct vm_operations_struct shm_vm_ops = {
.open = shm_open, /* callback for a new vm-area open */
.close = shm_close, /* callback for when the vm-area is released */
.fault = shm_fault,
+ .split = shm_split,
#if defined(CONFIG_NUMA)
.set_policy = shm_set_policy,
.get_policy = shm_get_policy,
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index e83987c55a08..46c2290a08f1 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -1657,8 +1657,7 @@ static void start_scan_thread(void)
}
/*
- * Stop the automatic memory scanning thread. This function must be called
- * with the scan_mutex held.
+ * Stop the automatic memory scanning thread.
*/
static void stop_scan_thread(void)
{
@@ -1921,12 +1920,15 @@ static void kmemleak_do_cleanup(struct work_struct *work)
{
stop_scan_thread();
+ mutex_lock(&scan_mutex);
/*
- * Once the scan thread has stopped, it is safe to no longer track
- * object freeing. Ordering of the scan thread stopping and the memory
- * accesses below is guaranteed by the kthread_stop() function.
+ * Once it is made sure that kmemleak_scan has stopped, it is safe to no
+ * longer track object freeing. Ordering of the scan thread stopping and
+ * the memory accesses below is guaranteed by the kthread_stop()
+ * function.
*/
kmemleak_free_enabled = 0;
+ mutex_unlock(&scan_mutex);
if (!kmemleak_found_leaks)
__kmemleak_do_cleanup();
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 670e99b68aa6..9ec024b862ac 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -714,9 +714,9 @@ static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
* invocations for reference counting, or use mem_cgroup_iter_break()
* to cancel a hierarchy walk before the round-trip is complete.
*
- * Reclaimers can specify a zone and a priority level in @reclaim to
+ * Reclaimers can specify a node and a priority level in @reclaim to
* divide up the memcgs in the hierarchy among all concurrent
- * reclaimers operating on the same zone and priority.
+ * reclaimers operating on the same node and priority.
*/
struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
struct mem_cgroup *prev,
@@ -2299,7 +2299,7 @@ void memcg_kmem_put_cache(struct kmem_cache *cachep)
}
/**
- * memcg_kmem_charge: charge a kmem page
+ * memcg_kmem_charge_memcg: charge a kmem page
* @page: page to charge
* @gfp: reclaim mode
* @order: allocation order
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 9886c6073828..7172e0a80e13 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -123,13 +123,13 @@ void __reset_page_owner(struct page *page, unsigned int order)
static inline bool check_recursive_alloc(struct stack_trace *trace,
unsigned long ip)
{
- int i, count;
+ int i;
if (!trace->nr_entries)
return false;
- for (i = 0, count = 0; i < trace->nr_entries; i++) {
- if (trace->entries[i] == ip && ++count == 2)
+ for (i = 0; i < trace->nr_entries; i++) {
+ if (trace->entries[i] == ip)
return true;
}
diff --git a/mm/slab.c b/mm/slab.c
index 324446621b3e..9095c3945425 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1283,6 +1283,7 @@ void __init kmem_cache_init(void)
nr_node_ids * sizeof(struct kmem_cache_node *),
SLAB_HWCACHE_ALIGN, 0, 0);
list_add(&kmem_cache->list, &slab_caches);
+ memcg_link_cache(kmem_cache);
slab_state = PARTIAL;
/*
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 40b2db6db6b1..33581be705f0 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1839,9 +1839,11 @@ static void vmstat_update(struct work_struct *w)
* to occur in the future. Keep on running the
* update worker thread.
*/
+ preempt_disable();
queue_delayed_work_on(smp_processor_id(), mm_percpu_wq,
this_cpu_ptr(&vmstat_work),
round_jiffies_relative(sysctl_stat_interval));
+ preempt_enable();
}
}
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index 02298c9c6020..441405081195 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -1326,7 +1326,7 @@ static ssize_t snd_pcm_oss_write2(struct snd_pcm_substream *substream, const cha
static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const char __user *buf, size_t bytes)
{
size_t xfer = 0;
- ssize_t tmp;
+ ssize_t tmp = 0;
struct snd_pcm_runtime *runtime = substream->runtime;
if (atomic_read(&substream->mmap_count))
@@ -1433,7 +1433,7 @@ static ssize_t snd_pcm_oss_read2(struct snd_pcm_substream *substream, char *buf,
static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __user *buf, size_t bytes)
{
size_t xfer = 0;
- ssize_t tmp;
+ ssize_t tmp = 0;
struct snd_pcm_runtime *runtime = substream->runtime;
if (atomic_read(&substream->mmap_count))
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 77ba50ddcf9e..d18b3982548b 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -3422,7 +3422,7 @@ int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream,
area,
substream->runtime->dma_area,
substream->runtime->dma_addr,
- area->vm_end - area->vm_start);
+ substream->runtime->dma_bytes);
#endif /* CONFIG_X86 */
/* mmap with fault handler */
area->vm_ops = &snd_pcm_vm_ops_data_fault;
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index ea8f3de92fa4..794224e1d6df 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1171,6 +1171,7 @@ static bool is_teac_dsd_dac(unsigned int id)
switch (id) {
case USB_ID(0x0644, 0x8043): /* TEAC UD-501/UD-503/NT-503 */
case USB_ID(0x0644, 0x8044): /* Esoteric D-05X */
+ case USB_ID(0x0644, 0x804a): /* TEAC UD-301 */
return true;
}
return false;