summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/fb/efifb.txt6
-rw-r--r--MAINTAINERS1
-rw-r--r--Makefile2
-rw-r--r--arch/arm/include/asm/tlb.h11
-rw-r--r--arch/ia64/include/asm/tlb.h8
-rw-r--r--arch/mips/Kconfig2
-rw-r--r--arch/mips/Makefile15
-rw-r--r--arch/mips/boot/compressed/.gitignore2
-rw-r--r--arch/mips/cavium-octeon/octeon-usb.c2
-rw-r--r--arch/mips/dec/int-handler.S34
-rw-r--r--arch/mips/include/asm/cache.h2
-rw-r--r--arch/mips/include/asm/cpu-features.h3
-rw-r--r--arch/mips/include/asm/octeon/cvmx-l2c-defs.h37
-rw-r--r--arch/mips/include/asm/octeon/cvmx-l2d-defs.h60
-rw-r--r--arch/mips/include/asm/octeon/cvmx.h1
-rw-r--r--arch/mips/kernel/smp.c6
-rw-r--r--arch/mips/mm/uasm-mips.c2
-rw-r--r--arch/mips/pci/pci.c7
-rw-r--r--arch/mips/vdso/gettimeofday.c6
-rw-r--r--arch/powerpc/configs/powernv_defconfig3
-rw-r--r--arch/powerpc/configs/ppc64_defconfig3
-rw-r--r--arch/powerpc/configs/pseries_defconfig3
-rw-r--r--arch/powerpc/kernel/entry_64.S60
-rw-r--r--arch/powerpc/kernel/process.c4
-rw-r--r--arch/powerpc/kernel/smp.c6
-rw-r--r--arch/powerpc/kernel/watchdog.c49
-rw-r--r--arch/powerpc/platforms/powernv/idle.c41
-rw-r--r--arch/s390/include/asm/tlb.h17
-rw-r--r--arch/sh/include/asm/tlb.h8
-rw-r--r--arch/um/include/asm/tlb.h13
-rw-r--r--arch/x86/crypto/sha1_avx2_x86_64_asm.S67
-rw-r--r--arch/x86/crypto/sha1_ssse3_glue.c2
-rw-r--r--arch/x86/include/asm/hypervisor.h10
-rw-r--r--arch/x86/mm/init.c3
-rw-r--r--arch/x86/xen/enlighten_hvm.c59
-rw-r--r--block/bio-integrity.c6
-rw-r--r--block/blk-mq.c4
-rw-r--r--drivers/acpi/spcr.c36
-rw-r--r--drivers/base/firmware_class.c49
-rw-r--r--drivers/block/zram/zram_drv.c4
-rw-r--r--drivers/cpuidle/cpuidle-powernv.c10
-rw-r--r--drivers/crypto/ixp4xx_crypto.c6
-rw-r--r--drivers/dma-buf/sync_file.c5
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c14
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c27
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c11
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h14
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c38
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c11
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c4
-rw-r--r--drivers/gpu/drm/i915/intel_color.c1
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c2
-rw-r--r--drivers/gpu/drm/msm/Kconfig2
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c181
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.h3
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c11
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c14
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c12
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c12
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c4
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c12
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c6
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c41
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h3
-rw-r--r--drivers/gpu/drm/stm/Kconfig1
-rw-r--r--drivers/iio/accel/bmc150-accel-core.c9
-rw-r--r--drivers/iio/accel/st_accel_core.c32
-rw-r--r--drivers/iio/adc/aspeed_adc.c26
-rw-r--r--drivers/iio/adc/axp288_adc.c42
-rw-r--r--drivers/iio/adc/sun4i-gpadc-iio.c3
-rw-r--r--drivers/iio/adc/vf610_adc.c2
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c29
-rw-r--r--drivers/iio/light/tsl2563.c2
-rw-r--r--drivers/iio/pressure/st_pressure_core.c2
-rw-r--r--drivers/iommu/arm-smmu.c7
-rw-r--r--drivers/isdn/mISDN/fsm.c5
-rw-r--r--drivers/isdn/mISDN/fsm.h2
-rw-r--r--drivers/isdn/mISDN/layer1.c3
-rw-r--r--drivers/isdn/mISDN/layer2.c15
-rw-r--r--drivers/isdn/mISDN/tei.c20
-rw-r--r--drivers/md/md.c5
-rw-r--r--drivers/md/raid5-cache.c61
-rw-r--r--drivers/misc/mei/pci-me.c6
-rw-r--r--drivers/misc/mei/pci-txe.c6
-rw-r--r--drivers/mmc/core/block.c2
-rw-r--r--drivers/mmc/core/mmc.c2
-rw-r--r--drivers/mmc/host/omap_hsmmc.c2
-rw-r--r--drivers/mtd/mtd_blkdevs.c1
-rw-r--r--drivers/net/bonding/bond_main.c13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c23
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c18
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.c8
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c9
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c20
-rw-r--r--drivers/nvme/host/core.c35
-rw-r--r--drivers/nvme/host/pci.c18
-rw-r--r--drivers/nvme/target/fc.c212
-rw-r--r--drivers/pci/pci.c42
-rw-r--r--drivers/pci/probe.c43
-rw-r--r--drivers/pci/quirks.c89
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c30
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.h1
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c30
-rw-r--r--drivers/staging/comedi/comedi_fops.c3
-rw-r--r--drivers/staging/iio/resolver/ad2s1210.c2
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_cm.c16
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_target.c12
-rw-r--r--drivers/target/iscsi/iscsi_target.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c7
-rw-r--r--drivers/target/target_core_tpg.c4
-rw-r--r--drivers/target/target_core_transport.c4
-rw-r--r--drivers/target/target_core_user.c13
-rw-r--r--drivers/thunderbolt/eeprom.c9
-rw-r--r--drivers/tty/serial/8250/8250_core.c23
-rw-r--r--drivers/tty/serial/amba-pl011.c37
-rw-r--r--drivers/usb/core/hcd.c4
-rw-r--r--drivers/usb/core/hub.c10
-rw-r--r--drivers/usb/core/quirks.c4
-rw-r--r--drivers/usb/dwc3/gadget.c33
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c21
-rw-r--r--drivers/usb/host/pci-quirks.c37
-rw-r--r--drivers/usb/host/pci-quirks.h1
-rw-r--r--drivers/usb/host/xhci-pci.c7
-rw-r--r--drivers/usb/musb/musb_host.c1
-rw-r--r--drivers/usb/phy/phy-msm-usb.c17
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c5
-rw-r--r--drivers/usb/renesas_usbhs/rcar3.c9
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/option.c2
-rw-r--r--drivers/usb/serial/pl2303.c2
-rw-r--r--drivers/usb/serial/pl2303.h1
-rw-r--r--drivers/usb/storage/unusual_uas.h4
-rw-r--r--drivers/usb/storage/usb.c18
-rw-r--r--drivers/video/fbdev/efifb.c8
-rw-r--r--drivers/video/fbdev/imxfb.c10
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/core.c1
-rw-r--r--drivers/xen/events/events_base.c2
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c3
-rw-r--r--fs/fuse/file.c9
-rw-r--r--fs/fuse/fuse_i.h1
-rw-r--r--fs/nfs/Kconfig1
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayoutdev.c1
-rw-r--r--fs/nfs/nfs4proc.c3
-rw-r--r--fs/proc/meminfo.c8
-rw-r--r--fs/proc/task_mmu.c7
-rw-r--r--fs/userfaultfd.c4
-rw-r--r--include/asm-generic/tlb.h7
-rw-r--r--include/linux/acpi.h1
-rw-r--r--include/linux/iio/common/st_sensors.h7
-rw-r--r--include/linux/mm_types.h64
-rw-r--r--include/linux/net.h2
-rw-r--r--include/linux/nvme-fc-driver.h7
-rw-r--r--include/linux/pci.h4
-rw-r--r--include/linux/platform_data/st_sensors_pdata.h2
-rw-r--r--include/linux/sync_file.h3
-rw-r--r--include/net/addrconf.h10
-rw-r--r--include/net/bonding.h5
-rw-r--r--include/net/busy_poll.h12
-rw-r--r--include/net/mac80211.h15
-rw-r--r--include/net/udp.h7
-rw-r--r--include/target/iscsi/iscsi_target_core.h1
-rw-r--r--include/uapi/drm/msm_drm.h6
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/power/snapshot.c2
-rw-r--r--kernel/trace/bpf_trace.c34
-rw-r--r--lib/fault-inject.c8
-rw-r--r--lib/test_kmod.c16
-rw-r--r--mm/balloon_compaction.c2
-rw-r--r--mm/debug.c6
-rw-r--r--mm/huge_memory.c7
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/ksm.c3
-rw-r--r--mm/memory.c42
-rw-r--r--mm/migrate.c6
-rw-r--r--mm/mprotect.c4
-rw-r--r--mm/page_alloc.c11
-rw-r--r--mm/rmap.c52
-rw-r--r--mm/shmem.c12
-rw-r--r--mm/util.c2
-rw-r--r--net/core/filter.c2
-rw-r--r--net/dccp/proto.c5
-rw-r--r--net/dsa/tag_ksz.c13
-rw-r--r--net/ipv4/fib_semantics.c12
-rw-r--r--net/ipv4/route.c3
-rw-r--r--net/ipv4/tcp_ipv4.c4
-rw-r--r--net/ipv4/tcp_ulp.c14
-rw-r--r--net/ipv6/route.c19
-rw-r--r--net/ipv6/tcp_ipv6.c4
-rw-r--r--net/key/af_key.c48
-rw-r--r--net/mac80211/agg-rx.c22
-rw-r--r--net/sched/sch_api.c3
-rw-r--r--net/sched/sch_atm.c4
-rw-r--r--net/sched/sch_cbq.c4
-rw-r--r--net/sched/sch_hfsc.c12
-rw-r--r--net/sched/sch_htb.c4
-rw-r--r--net/sched/sch_sfq.c5
-rw-r--r--net/tipc/bearer.c2
-rw-r--r--net/tipc/msg.c1
-rw-r--r--tools/testing/selftests/futex/Makefile2
-rwxr-xr-x[-rw-r--r--]tools/testing/selftests/kmod/kmod.sh0
-rwxr-xr-x[-rw-r--r--]tools/testing/selftests/sysctl/sysctl.sh0
-rw-r--r--tools/testing/selftests/timers/freq-step.c7
228 files changed, 2154 insertions, 904 deletions
diff --git a/Documentation/fb/efifb.txt b/Documentation/fb/efifb.txt
index a59916c29b33..1a85c1bdaf38 100644
--- a/Documentation/fb/efifb.txt
+++ b/Documentation/fb/efifb.txt
@@ -27,5 +27,11 @@ You have to add the following kernel parameters in your elilo.conf:
Macbook Pro 17", iMac 20" :
video=efifb:i20
+Accepted options:
+
+nowc Don't map the framebuffer write combined. This can be used
+ to workaround side-effects and slowdowns on other CPU cores
+ when large amounts of console data are written.
+
--
Edgar Hucek <gimli@dark-green.com>
diff --git a/MAINTAINERS b/MAINTAINERS
index 2db0f8cd4002..0e967b3ca1c6 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -14016,6 +14016,7 @@ F: drivers/block/virtio_blk.c
F: include/linux/virtio*.h
F: include/uapi/linux/virtio_*.h
F: drivers/crypto/virtio/
+F: mm/balloon_compaction.c
VIRTIO CRYPTO DRIVER
M: Gonglei <arei.gonglei@huawei.com>
diff --git a/Makefile b/Makefile
index 6eba23bcb5ad..90da7bdc3f45 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 13
SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc5
NAME = Fearless Coyote
# *DOCUMENTATION*
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index 3f2eb76243e3..d5562f9ce600 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -148,7 +148,8 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
}
static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
+arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+ unsigned long start, unsigned long end)
{
tlb->mm = mm;
tlb->fullmm = !(start | (end+1));
@@ -166,8 +167,14 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start
}
static inline void
-tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
+arch_tlb_finish_mmu(struct mmu_gather *tlb,
+ unsigned long start, unsigned long end, bool force)
{
+ if (force) {
+ tlb->range_start = start;
+ tlb->range_end = end;
+ }
+
tlb_flush_mmu(tlb);
/* keep the page table cache within bounds */
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
index fced197b9626..cbe5ac3699bf 100644
--- a/arch/ia64/include/asm/tlb.h
+++ b/arch/ia64/include/asm/tlb.h
@@ -168,7 +168,8 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb)
static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
+arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+ unsigned long start, unsigned long end)
{
tlb->mm = mm;
tlb->max = ARRAY_SIZE(tlb->local);
@@ -185,8 +186,11 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start
* collected.
*/
static inline void
-tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
+arch_tlb_finish_mmu(struct mmu_gather *tlb,
+ unsigned long start, unsigned long end, bool force)
{
+ if (force)
+ tlb->need_flush = 1;
/*
* Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and
* tlb->end_addr.
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 8dd20358464f..48d91d5be4e9 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -2260,7 +2260,7 @@ config CPU_R4K_CACHE_TLB
config MIPS_MT_SMP
bool "MIPS MT SMP support (1 TC on each available VPE)"
- depends on SYS_SUPPORTS_MULTITHREADING && !CPU_MIPSR6
+ depends on SYS_SUPPORTS_MULTITHREADING && !CPU_MIPSR6 && !CPU_MICROMIPS
select CPU_MIPSR2_IRQ_VI
select CPU_MIPSR2_IRQ_EI
select SYNC_R4K
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index 04343625b929..bc2708c9ada4 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -243,8 +243,21 @@ include arch/mips/Kbuild.platforms
ifdef CONFIG_PHYSICAL_START
load-y = $(CONFIG_PHYSICAL_START)
endif
-entry-y = 0x$(shell $(NM) vmlinux 2>/dev/null \
+
+entry-noisa-y = 0x$(shell $(NM) vmlinux 2>/dev/null \
| grep "\bkernel_entry\b" | cut -f1 -d \ )
+ifdef CONFIG_CPU_MICROMIPS
+ #
+ # Set the ISA bit, since the kernel_entry symbol in the ELF will have it
+ # clear which would lead to images containing addresses which bootloaders may
+ # jump to as MIPS32 code.
+ #
+ entry-y = $(patsubst %0,%1,$(patsubst %2,%3,$(patsubst %4,%5, \
+ $(patsubst %6,%7,$(patsubst %8,%9,$(patsubst %a,%b, \
+ $(patsubst %c,%d,$(patsubst %e,%f,$(entry-noisa-y)))))))))
+else
+ entry-y = $(entry-noisa-y)
+endif
cflags-y += -I$(srctree)/arch/mips/include/asm/mach-generic
drivers-$(CONFIG_PCI) += arch/mips/pci/
diff --git a/arch/mips/boot/compressed/.gitignore b/arch/mips/boot/compressed/.gitignore
new file mode 100644
index 000000000000..ebae133f1d00
--- /dev/null
+++ b/arch/mips/boot/compressed/.gitignore
@@ -0,0 +1,2 @@
+ashldi3.c
+bswapsi.c
diff --git a/arch/mips/cavium-octeon/octeon-usb.c b/arch/mips/cavium-octeon/octeon-usb.c
index 542be1cd0f32..bfdfaf32d2c4 100644
--- a/arch/mips/cavium-octeon/octeon-usb.c
+++ b/arch/mips/cavium-octeon/octeon-usb.c
@@ -13,9 +13,9 @@
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/of_platform.h>
+#include <linux/io.h>
#include <asm/octeon/octeon.h>
-#include <asm/octeon/cvmx-gpio-defs.h>
/* USB Control Register */
union cvm_usbdrd_uctl_ctl {
diff --git a/arch/mips/dec/int-handler.S b/arch/mips/dec/int-handler.S
index 1910223a9c02..cea2bb1621e6 100644
--- a/arch/mips/dec/int-handler.S
+++ b/arch/mips/dec/int-handler.S
@@ -147,23 +147,12 @@
* Find irq with highest priority
*/
# open coded PTR_LA t1, cpu_mask_nr_tbl
-#if (_MIPS_SZPTR == 32)
+#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
# open coded la t1, cpu_mask_nr_tbl
lui t1, %hi(cpu_mask_nr_tbl)
addiu t1, %lo(cpu_mask_nr_tbl)
-
-#endif
-#if (_MIPS_SZPTR == 64)
- # open coded dla t1, cpu_mask_nr_tbl
- .set push
- .set noat
- lui t1, %highest(cpu_mask_nr_tbl)
- lui AT, %hi(cpu_mask_nr_tbl)
- daddiu t1, t1, %higher(cpu_mask_nr_tbl)
- daddiu AT, AT, %lo(cpu_mask_nr_tbl)
- dsll t1, 32
- daddu t1, t1, AT
- .set pop
+#else
+#error GCC `-msym32' option required for 64-bit DECstation builds
#endif
1: lw t2,(t1)
nop
@@ -214,23 +203,12 @@
* Find irq with highest priority
*/
# open coded PTR_LA t1,asic_mask_nr_tbl
-#if (_MIPS_SZPTR == 32)
+#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
# open coded la t1, asic_mask_nr_tbl
lui t1, %hi(asic_mask_nr_tbl)
addiu t1, %lo(asic_mask_nr_tbl)
-
-#endif
-#if (_MIPS_SZPTR == 64)
- # open coded dla t1, asic_mask_nr_tbl
- .set push
- .set noat
- lui t1, %highest(asic_mask_nr_tbl)
- lui AT, %hi(asic_mask_nr_tbl)
- daddiu t1, t1, %higher(asic_mask_nr_tbl)
- daddiu AT, AT, %lo(asic_mask_nr_tbl)
- dsll t1, 32
- daddu t1, t1, AT
- .set pop
+#else
+#error GCC `-msym32' option required for 64-bit DECstation builds
#endif
2: lw t2,(t1)
nop
diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
index fc67947ed658..8b14c2706aa5 100644
--- a/arch/mips/include/asm/cache.h
+++ b/arch/mips/include/asm/cache.h
@@ -9,6 +9,8 @@
#ifndef _ASM_CACHE_H
#define _ASM_CACHE_H
+#include <kmalloc.h>
+
#define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index 8baa9033b181..721b698bfe3c 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -428,6 +428,9 @@
#ifndef cpu_scache_line_size
#define cpu_scache_line_size() cpu_data[0].scache.linesz
#endif
+#ifndef cpu_tcache_line_size
+#define cpu_tcache_line_size() cpu_data[0].tcache.linesz
+#endif
#ifndef cpu_hwrena_impl_bits
#define cpu_hwrena_impl_bits 0
diff --git a/arch/mips/include/asm/octeon/cvmx-l2c-defs.h b/arch/mips/include/asm/octeon/cvmx-l2c-defs.h
index d045973ddb33..3ea84acf1814 100644
--- a/arch/mips/include/asm/octeon/cvmx-l2c-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-l2c-defs.h
@@ -33,6 +33,10 @@
#define CVMX_L2C_DBG (CVMX_ADD_IO_SEG(0x0001180080000030ull))
#define CVMX_L2C_CFG (CVMX_ADD_IO_SEG(0x0001180080000000ull))
#define CVMX_L2C_CTL (CVMX_ADD_IO_SEG(0x0001180080800000ull))
+#define CVMX_L2C_ERR_TDTX(block_id) \
+ (CVMX_ADD_IO_SEG(0x0001180080A007E0ull) + ((block_id) & 3) * 0x40000ull)
+#define CVMX_L2C_ERR_TTGX(block_id) \
+ (CVMX_ADD_IO_SEG(0x0001180080A007E8ull) + ((block_id) & 3) * 0x40000ull)
#define CVMX_L2C_LCKBASE (CVMX_ADD_IO_SEG(0x0001180080000058ull))
#define CVMX_L2C_LCKOFF (CVMX_ADD_IO_SEG(0x0001180080000060ull))
#define CVMX_L2C_PFCTL (CVMX_ADD_IO_SEG(0x0001180080000090ull))
@@ -66,9 +70,40 @@
((offset) & 1) * 8)
#define CVMX_L2C_WPAR_PPX(offset) (CVMX_ADD_IO_SEG(0x0001180080840000ull) + \
((offset) & 31) * 8)
-#define CVMX_L2D_FUS3 (CVMX_ADD_IO_SEG(0x00011800800007B8ull))
+union cvmx_l2c_err_tdtx {
+ uint64_t u64;
+ struct cvmx_l2c_err_tdtx_s {
+ __BITFIELD_FIELD(uint64_t dbe:1,
+ __BITFIELD_FIELD(uint64_t sbe:1,
+ __BITFIELD_FIELD(uint64_t vdbe:1,
+ __BITFIELD_FIELD(uint64_t vsbe:1,
+ __BITFIELD_FIELD(uint64_t syn:10,
+ __BITFIELD_FIELD(uint64_t reserved_22_49:28,
+ __BITFIELD_FIELD(uint64_t wayidx:18,
+ __BITFIELD_FIELD(uint64_t reserved_2_3:2,
+ __BITFIELD_FIELD(uint64_t type:2,
+ ;)))))))))
+ } s;
+};
+
+union cvmx_l2c_err_ttgx {
+ uint64_t u64;
+ struct cvmx_l2c_err_ttgx_s {
+ __BITFIELD_FIELD(uint64_t dbe:1,
+ __BITFIELD_FIELD(uint64_t sbe:1,
+ __BITFIELD_FIELD(uint64_t noway:1,
+ __BITFIELD_FIELD(uint64_t reserved_56_60:5,
+ __BITFIELD_FIELD(uint64_t syn:6,
+ __BITFIELD_FIELD(uint64_t reserved_22_49:28,
+ __BITFIELD_FIELD(uint64_t wayidx:15,
+ __BITFIELD_FIELD(uint64_t reserved_2_6:5,
+ __BITFIELD_FIELD(uint64_t type:2,
+ ;)))))))))
+ } s;
+};
+
union cvmx_l2c_cfg {
uint64_t u64;
struct cvmx_l2c_cfg_s {
diff --git a/arch/mips/include/asm/octeon/cvmx-l2d-defs.h b/arch/mips/include/asm/octeon/cvmx-l2d-defs.h
new file mode 100644
index 000000000000..a951ad5d65ad
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-l2d-defs.h
@@ -0,0 +1,60 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_L2D_DEFS_H__
+#define __CVMX_L2D_DEFS_H__
+
+#define CVMX_L2D_ERR (CVMX_ADD_IO_SEG(0x0001180080000010ull))
+#define CVMX_L2D_FUS3 (CVMX_ADD_IO_SEG(0x00011800800007B8ull))
+
+
+union cvmx_l2d_err {
+ uint64_t u64;
+ struct cvmx_l2d_err_s {
+ __BITFIELD_FIELD(uint64_t reserved_6_63:58,
+ __BITFIELD_FIELD(uint64_t bmhclsel:1,
+ __BITFIELD_FIELD(uint64_t ded_err:1,
+ __BITFIELD_FIELD(uint64_t sec_err:1,
+ __BITFIELD_FIELD(uint64_t ded_intena:1,
+ __BITFIELD_FIELD(uint64_t sec_intena:1,
+ __BITFIELD_FIELD(uint64_t ecc_ena:1,
+ ;)))))))
+ } s;
+};
+
+union cvmx_l2d_fus3 {
+ uint64_t u64;
+ struct cvmx_l2d_fus3_s {
+ __BITFIELD_FIELD(uint64_t reserved_40_63:24,
+ __BITFIELD_FIELD(uint64_t ema_ctl:3,
+ __BITFIELD_FIELD(uint64_t reserved_34_36:3,
+ __BITFIELD_FIELD(uint64_t q3fus:34,
+ ;))))
+ } s;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx.h b/arch/mips/include/asm/octeon/cvmx.h
index 9742202f2a32..e638735cc3ac 100644
--- a/arch/mips/include/asm/octeon/cvmx.h
+++ b/arch/mips/include/asm/octeon/cvmx.h
@@ -62,6 +62,7 @@ enum cvmx_mips_space {
#include <asm/octeon/cvmx-iob-defs.h>
#include <asm/octeon/cvmx-ipd-defs.h>
#include <asm/octeon/cvmx-l2c-defs.h>
+#include <asm/octeon/cvmx-l2d-defs.h>
#include <asm/octeon/cvmx-l2t-defs.h>
#include <asm/octeon/cvmx-led-defs.h>
#include <asm/octeon/cvmx-mio-defs.h>
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 770d4d1516cb..6bace7695788 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -376,9 +376,6 @@ asmlinkage void start_secondary(void)
cpumask_set_cpu(cpu, &cpu_coherent_mask);
notify_cpu_starting(cpu);
- complete(&cpu_running);
- synchronise_count_slave(cpu);
-
set_cpu_online(cpu, true);
set_cpu_sibling_map(cpu);
@@ -386,6 +383,9 @@ asmlinkage void start_secondary(void)
calculate_cpu_foreign_map();
+ complete(&cpu_running);
+ synchronise_count_slave(cpu);
+
/*
* irq will be enabled in ->smp_finish(), enabling it too early
* is dangerous.
diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c
index 3f74f6c1f065..9fea6c6bbf49 100644
--- a/arch/mips/mm/uasm-mips.c
+++ b/arch/mips/mm/uasm-mips.c
@@ -48,7 +48,7 @@
#include "uasm.c"
-static const struct insn const insn_table[insn_invalid] = {
+static const struct insn insn_table[insn_invalid] = {
[insn_addiu] = {M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
[insn_addu] = {M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD},
[insn_and] = {M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD},
diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c
index bd67ac74fe2d..9632436d74d7 100644
--- a/arch/mips/pci/pci.c
+++ b/arch/mips/pci/pci.c
@@ -28,16 +28,15 @@ EXPORT_SYMBOL(PCIBIOS_MIN_MEM);
static int __init pcibios_set_cache_line_size(void)
{
- struct cpuinfo_mips *c = &current_cpu_data;
unsigned int lsize;
/*
* Set PCI cacheline size to that of the highest level in the
* cache hierarchy.
*/
- lsize = c->dcache.linesz;
- lsize = c->scache.linesz ? : lsize;
- lsize = c->tcache.linesz ? : lsize;
+ lsize = cpu_dcache_line_size();
+ lsize = cpu_scache_line_size() ? : lsize;
+ lsize = cpu_tcache_line_size() ? : lsize;
BUG_ON(!lsize);
diff --git a/arch/mips/vdso/gettimeofday.c b/arch/mips/vdso/gettimeofday.c
index 974276e828b2..e2690d7ca4dd 100644
--- a/arch/mips/vdso/gettimeofday.c
+++ b/arch/mips/vdso/gettimeofday.c
@@ -35,7 +35,8 @@ static __always_inline long gettimeofday_fallback(struct timeval *_tv,
" syscall\n"
: "=r" (ret), "=r" (error)
: "r" (tv), "r" (tz), "r" (nr)
- : "memory");
+ : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
+ "$14", "$15", "$24", "$25", "hi", "lo", "memory");
return error ? -ret : ret;
}
@@ -55,7 +56,8 @@ static __always_inline long clock_gettime_fallback(clockid_t _clkid,
" syscall\n"
: "=r" (ret), "=r" (error)
: "r" (clkid), "r" (ts), "r" (nr)
- : "memory");
+ : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
+ "$14", "$15", "$24", "$25", "hi", "lo", "memory");
return error ? -ret : ret;
}
diff --git a/arch/powerpc/configs/powernv_defconfig b/arch/powerpc/configs/powernv_defconfig
index 0695ce047d56..34fc9bbfca9e 100644
--- a/arch/powerpc/configs/powernv_defconfig
+++ b/arch/powerpc/configs/powernv_defconfig
@@ -293,7 +293,8 @@ CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DEBUG_STACKOVERFLOW=y
-CONFIG_LOCKUP_DETECTOR=y
+CONFIG_SOFTLOCKUP_DETECTOR=y
+CONFIG_HARDLOCKUP_DETECTOR=y
CONFIG_LATENCYTOP=y
CONFIG_SCHED_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 5175028c56ce..c5246d29f385 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -324,7 +324,8 @@ CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DEBUG_STACKOVERFLOW=y
-CONFIG_LOCKUP_DETECTOR=y
+CONFIG_SOFTLOCKUP_DETECTOR=y
+CONFIG_HARDLOCKUP_DETECTOR=y
CONFIG_DEBUG_MUTEXES=y
CONFIG_LATENCYTOP=y
CONFIG_SCHED_TRACER=y
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index 1a61aa20dfba..fd5d98a0b95c 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -291,7 +291,8 @@ CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DEBUG_STACKOVERFLOW=y
-CONFIG_LOCKUP_DETECTOR=y
+CONFIG_SOFTLOCKUP_DETECTOR=y
+CONFIG_HARDLOCKUP_DETECTOR=y
CONFIG_LATENCYTOP=y
CONFIG_SCHED_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 49d8422767b4..e925c1c99c71 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -223,17 +223,27 @@ system_call_exit:
andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
bne- .Lsyscall_exit_work
- /* If MSR_FP and MSR_VEC are set in user msr, then no need to restore */
- li r7,MSR_FP
+ andi. r0,r8,MSR_FP
+ beq 2f
#ifdef CONFIG_ALTIVEC
- oris r7,r7,MSR_VEC@h
+ andis. r0,r8,MSR_VEC@h
+ bne 3f
#endif
- and r0,r8,r7
- cmpd r0,r7
- bne .Lsyscall_restore_math
-.Lsyscall_restore_math_cont:
+2: addi r3,r1,STACK_FRAME_OVERHEAD
+#ifdef CONFIG_PPC_BOOK3S
+ li r10,MSR_RI
+ mtmsrd r10,1 /* Restore RI */
+#endif
+ bl restore_math
+#ifdef CONFIG_PPC_BOOK3S
+ li r11,0
+ mtmsrd r11,1
+#endif
+ ld r8,_MSR(r1)
+ ld r3,RESULT(r1)
+ li r11,-MAX_ERRNO
- cmpld r3,r11
+3: cmpld r3,r11
ld r5,_CCR(r1)
bge- .Lsyscall_error
.Lsyscall_error_cont:
@@ -267,40 +277,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
std r5,_CCR(r1)
b .Lsyscall_error_cont
-.Lsyscall_restore_math:
- /*
- * Some initial tests from restore_math to avoid the heavyweight
- * C code entry and MSR manipulations.
- */
- LOAD_REG_IMMEDIATE(r0, MSR_TS_MASK)
- and. r0,r0,r8
- bne 1f
-
- ld r7,PACACURRENT(r13)
- lbz r0,THREAD+THREAD_LOAD_FP(r7)
-#ifdef CONFIG_ALTIVEC
- lbz r6,THREAD+THREAD_LOAD_VEC(r7)
- add r0,r0,r6
-#endif
- cmpdi r0,0
- beq .Lsyscall_restore_math_cont
-
-1: addi r3,r1,STACK_FRAME_OVERHEAD
-#ifdef CONFIG_PPC_BOOK3S
- li r10,MSR_RI
- mtmsrd r10,1 /* Restore RI */
-#endif
- bl restore_math
-#ifdef CONFIG_PPC_BOOK3S
- li r11,0
- mtmsrd r11,1
-#endif
- /* Restore volatiles, reload MSR from updated one */
- ld r8,_MSR(r1)
- ld r3,RESULT(r1)
- li r11,-MAX_ERRNO
- b .Lsyscall_restore_math_cont
-
/* Traced system call support */
.Lsyscall_dotrace:
bl save_nvgprs
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 9f3e2c932dcc..ec480966f9bf 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -511,10 +511,6 @@ void restore_math(struct pt_regs *regs)
{
unsigned long msr;
- /*
- * Syscall exit makes a similar initial check before branching
- * to restore_math. Keep them in synch.
- */
if (!msr_tm_active(regs->msr) &&
!current->thread.load_fp && !loadvec(current->thread))
return;
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index cf0e1245b8cc..8d3320562c70 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -351,7 +351,7 @@ static void nmi_ipi_lock_start(unsigned long *flags)
hard_irq_disable();
while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
raw_local_irq_restore(*flags);
- cpu_relax();
+ spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
raw_local_irq_save(*flags);
hard_irq_disable();
}
@@ -360,7 +360,7 @@ static void nmi_ipi_lock_start(unsigned long *flags)
static void nmi_ipi_lock(void)
{
while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
- cpu_relax();
+ spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
}
static void nmi_ipi_unlock(void)
@@ -475,7 +475,7 @@ int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
nmi_ipi_lock_start(&flags);
while (nmi_ipi_busy_count) {
nmi_ipi_unlock_end(&flags);
- cpu_relax();
+ spin_until_cond(nmi_ipi_busy_count == 0);
nmi_ipi_lock_start(&flags);
}
diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c
index b67f8b03a32d..34721a257a77 100644
--- a/arch/powerpc/kernel/watchdog.c
+++ b/arch/powerpc/kernel/watchdog.c
@@ -71,15 +71,20 @@ static inline void wd_smp_lock(unsigned long *flags)
* This may be called from low level interrupt handlers at some
* point in future.
*/
- local_irq_save(*flags);
- while (unlikely(test_and_set_bit_lock(0, &__wd_smp_lock)))
- cpu_relax();
+ raw_local_irq_save(*flags);
+ hard_irq_disable(); /* Make it soft-NMI safe */
+ while (unlikely(test_and_set_bit_lock(0, &__wd_smp_lock))) {
+ raw_local_irq_restore(*flags);
+ spin_until_cond(!test_bit(0, &__wd_smp_lock));
+ raw_local_irq_save(*flags);
+ hard_irq_disable();
+ }
}
static inline void wd_smp_unlock(unsigned long *flags)
{
clear_bit_unlock(0, &__wd_smp_lock);
- local_irq_restore(*flags);
+ raw_local_irq_restore(*flags);
}
static void wd_lockup_ipi(struct pt_regs *regs)
@@ -96,10 +101,10 @@ static void wd_lockup_ipi(struct pt_regs *regs)
nmi_panic(regs, "Hard LOCKUP");
}
-static void set_cpu_stuck(int cpu, u64 tb)
+static void set_cpumask_stuck(const struct cpumask *cpumask, u64 tb)
{
- cpumask_set_cpu(cpu, &wd_smp_cpus_stuck);
- cpumask_clear_cpu(cpu, &wd_smp_cpus_pending);
+ cpumask_or(&wd_smp_cpus_stuck, &wd_smp_cpus_stuck, cpumask);
+ cpumask_andnot(&wd_smp_cpus_pending, &wd_smp_cpus_pending, cpumask);
if (cpumask_empty(&wd_smp_cpus_pending)) {
wd_smp_last_reset_tb = tb;
cpumask_andnot(&wd_smp_cpus_pending,
@@ -107,6 +112,10 @@ static void set_cpu_stuck(int cpu, u64 tb)
&wd_smp_cpus_stuck);
}
}
+static void set_cpu_stuck(int cpu, u64 tb)
+{
+ set_cpumask_stuck(cpumask_of(cpu), tb);
+}
static void watchdog_smp_panic(int cpu, u64 tb)
{
@@ -135,11 +144,9 @@ static void watchdog_smp_panic(int cpu, u64 tb)
}
smp_flush_nmi_ipi(1000000);
- /* Take the stuck CPU out of the watch group */
- for_each_cpu(c, &wd_smp_cpus_pending)
- set_cpu_stuck(c, tb);
+ /* Take the stuck CPUs out of the watch group */
+ set_cpumask_stuck(&wd_smp_cpus_pending, tb);
-out:
wd_smp_unlock(&flags);
printk_safe_flush();
@@ -152,6 +159,11 @@ out:
if (hardlockup_panic)
nmi_panic(NULL, "Hard LOCKUP");
+
+ return;
+
+out:
+ wd_smp_unlock(&flags);
}
static void wd_smp_clear_cpu_pending(int cpu, u64 tb)
@@ -258,9 +270,11 @@ static void wd_timer_fn(unsigned long data)
void arch_touch_nmi_watchdog(void)
{
+ unsigned long ticks = tb_ticks_per_usec * wd_timer_period_ms * 1000;
int cpu = smp_processor_id();
- watchdog_timer_interrupt(cpu);
+ if (get_tb() - per_cpu(wd_timer_tb, cpu) >= ticks)
+ watchdog_timer_interrupt(cpu);
}
EXPORT_SYMBOL(arch_touch_nmi_watchdog);
@@ -283,6 +297,8 @@ static void stop_watchdog_timer_on(unsigned int cpu)
static int start_wd_on_cpu(unsigned int cpu)
{
+ unsigned long flags;
+
if (cpumask_test_cpu(cpu, &wd_cpus_enabled)) {
WARN_ON(1);
return 0;
@@ -297,12 +313,14 @@ static int start_wd_on_cpu(unsigned int cpu)
if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
return 0;
+ wd_smp_lock(&flags);
cpumask_set_cpu(cpu, &wd_cpus_enabled);
if (cpumask_weight(&wd_cpus_enabled) == 1) {
cpumask_set_cpu(cpu, &wd_smp_cpus_pending);
wd_smp_last_reset_tb = get_tb();
}
- smp_wmb();
+ wd_smp_unlock(&flags);
+
start_watchdog_timer_on(cpu);
return 0;
@@ -310,12 +328,17 @@ static int start_wd_on_cpu(unsigned int cpu)
static int stop_wd_on_cpu(unsigned int cpu)
{
+ unsigned long flags;
+
if (!cpumask_test_cpu(cpu, &wd_cpus_enabled))
return 0; /* Can happen in CPU unplug case */
stop_watchdog_timer_on(cpu);
+ wd_smp_lock(&flags);
cpumask_clear_cpu(cpu, &wd_cpus_enabled);
+ wd_smp_unlock(&flags);
+
wd_smp_clear_cpu_pending(cpu, get_tb());
return 0;
diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c
index 2abee070373f..a553aeea7af6 100644
--- a/arch/powerpc/platforms/powernv/idle.c
+++ b/arch/powerpc/platforms/powernv/idle.c
@@ -56,6 +56,7 @@ u64 pnv_first_deep_stop_state = MAX_STOP_STATE;
*/
static u64 pnv_deepest_stop_psscr_val;
static u64 pnv_deepest_stop_psscr_mask;
+static u64 pnv_deepest_stop_flag;
static bool deepest_stop_found;
static int pnv_save_sprs_for_deep_states(void)
@@ -185,8 +186,40 @@ static void pnv_alloc_idle_core_states(void)
update_subcore_sibling_mask();
- if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT)
- pnv_save_sprs_for_deep_states();
+ if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT) {
+ int rc = pnv_save_sprs_for_deep_states();
+
+ if (likely(!rc))
+ return;
+
+ /*
+ * The stop-api is unable to restore hypervisor
+ * resources on wakeup from platform idle states which
+ * lose full context. So disable such states.
+ */
+ supported_cpuidle_states &= ~OPAL_PM_LOSE_FULL_CONTEXT;
+ pr_warn("cpuidle-powernv: Disabling idle states that lose full context\n");
+ pr_warn("cpuidle-powernv: Idle power-savings, CPU-Hotplug affected\n");
+
+ if (cpu_has_feature(CPU_FTR_ARCH_300) &&
+ (pnv_deepest_stop_flag & OPAL_PM_LOSE_FULL_CONTEXT)) {
+ /*
+ * Use the default stop state for CPU-Hotplug
+ * if available.
+ */
+ if (default_stop_found) {
+ pnv_deepest_stop_psscr_val =
+ pnv_default_stop_val;
+ pnv_deepest_stop_psscr_mask =
+ pnv_default_stop_mask;
+ pr_warn("cpuidle-powernv: Offlined CPUs will stop with psscr = 0x%016llx\n",
+ pnv_deepest_stop_psscr_val);
+ } else { /* Fallback to snooze loop for CPU-Hotplug */
+ deepest_stop_found = false;
+ pr_warn("cpuidle-powernv: Offlined CPUs will busy wait\n");
+ }
+ }
+ }
}
u32 pnv_get_supported_cpuidle_states(void)
@@ -375,7 +408,8 @@ unsigned long pnv_cpu_offline(unsigned int cpu)
pnv_deepest_stop_psscr_val;
srr1 = power9_idle_stop(psscr);
- } else if (idle_states & OPAL_PM_WINKLE_ENABLED) {
+ } else if ((idle_states & OPAL_PM_WINKLE_ENABLED) &&
+ (idle_states & OPAL_PM_LOSE_FULL_CONTEXT)) {
srr1 = power7_idle_insn(PNV_THREAD_WINKLE);
} else if ((idle_states & OPAL_PM_SLEEP_ENABLED) ||
(idle_states & OPAL_PM_SLEEP_ENABLED_ER1)) {
@@ -553,6 +587,7 @@ static int __init pnv_power9_idle_init(struct device_node *np, u32 *flags,
max_residency_ns = residency_ns[i];
pnv_deepest_stop_psscr_val = psscr_val[i];
pnv_deepest_stop_psscr_mask = psscr_mask[i];
+ pnv_deepest_stop_flag = flags[i];
deepest_stop_found = true;
}
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index 7317b3108a88..2eb8ff0d6fca 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -47,10 +47,9 @@ struct mmu_table_batch {
extern void tlb_table_flush(struct mmu_gather *tlb);
extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
-static inline void tlb_gather_mmu(struct mmu_gather *tlb,
- struct mm_struct *mm,
- unsigned long start,
- unsigned long end)
+static inline void
+arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+ unsigned long start, unsigned long end)
{
tlb->mm = mm;
tlb->start = start;
@@ -76,9 +75,15 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
tlb_flush_mmu_free(tlb);
}
-static inline void tlb_finish_mmu(struct mmu_gather *tlb,
- unsigned long start, unsigned long end)
+static inline void
+arch_tlb_finish_mmu(struct mmu_gather *tlb,
+ unsigned long start, unsigned long end, bool force)
{
+ if (force) {
+ tlb->start = start;
+ tlb->end = end;
+ }
+
tlb_flush_mmu(tlb);
}
diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
index 46e0d635e36f..51a8bc967e75 100644
--- a/arch/sh/include/asm/tlb.h
+++ b/arch/sh/include/asm/tlb.h
@@ -36,7 +36,8 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
}
static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
+arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+ unsigned long start, unsigned long end)
{
tlb->mm = mm;
tlb->start = start;
@@ -47,9 +48,10 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start
}
static inline void
-tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
+arch_tlb_finish_mmu(struct mmu_gather *tlb,
+ unsigned long start, unsigned long end, bool force)
{
- if (tlb->fullmm)
+ if (tlb->fullmm || force)
flush_tlb_mm(tlb->mm);
/* keep the page table cache within bounds */
diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h
index 600a2e9bfee2..344d95619d03 100644
--- a/arch/um/include/asm/tlb.h
+++ b/arch/um/include/asm/tlb.h
@@ -45,7 +45,8 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
}
static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
+arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+ unsigned long start, unsigned long end)
{
tlb->mm = mm;
tlb->start = start;
@@ -80,13 +81,19 @@ tlb_flush_mmu(struct mmu_gather *tlb)
tlb_flush_mmu_free(tlb);
}
-/* tlb_finish_mmu
+/* arch_tlb_finish_mmu
* Called at the end of the shootdown operation to free up any resources
* that were required.
*/
static inline void
-tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
+arch_tlb_finish_mmu(struct mmu_gather *tlb,
+ unsigned long start, unsigned long end, bool force)
{
+ if (force) {
+ tlb->start = start;
+ tlb->end = end;
+ tlb->need_flush = 1;
+ }
tlb_flush_mmu(tlb);
/* keep the page table cache within bounds */
diff --git a/arch/x86/crypto/sha1_avx2_x86_64_asm.S b/arch/x86/crypto/sha1_avx2_x86_64_asm.S
index 1cd792db15ef..1eab79c9ac48 100644
--- a/arch/x86/crypto/sha1_avx2_x86_64_asm.S
+++ b/arch/x86/crypto/sha1_avx2_x86_64_asm.S
@@ -117,11 +117,10 @@
.set T1, REG_T1
.endm
-#define K_BASE %r8
#define HASH_PTR %r9
+#define BLOCKS_CTR %r8
#define BUFFER_PTR %r10
#define BUFFER_PTR2 %r13
-#define BUFFER_END %r11
#define PRECALC_BUF %r14
#define WK_BUF %r15
@@ -205,14 +204,14 @@
* blended AVX2 and ALU instruction scheduling
* 1 vector iteration per 8 rounds
*/
- vmovdqu ((i * 2) + PRECALC_OFFSET)(BUFFER_PTR), W_TMP
+ vmovdqu (i * 2)(BUFFER_PTR), W_TMP
.elseif ((i & 7) == 1)
- vinsertf128 $1, (((i-1) * 2)+PRECALC_OFFSET)(BUFFER_PTR2),\
+ vinsertf128 $1, ((i-1) * 2)(BUFFER_PTR2),\
WY_TMP, WY_TMP
.elseif ((i & 7) == 2)
vpshufb YMM_SHUFB_BSWAP, WY_TMP, WY
.elseif ((i & 7) == 4)
- vpaddd K_XMM(K_BASE), WY, WY_TMP
+ vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP
.elseif ((i & 7) == 7)
vmovdqu WY_TMP, PRECALC_WK(i&~7)
@@ -255,7 +254,7 @@
vpxor WY, WY_TMP, WY_TMP
.elseif ((i & 7) == 7)
vpxor WY_TMP2, WY_TMP, WY
- vpaddd K_XMM(K_BASE), WY, WY_TMP
+ vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP
vmovdqu WY_TMP, PRECALC_WK(i&~7)
PRECALC_ROTATE_WY
@@ -291,7 +290,7 @@
vpsrld $30, WY, WY
vpor WY, WY_TMP, WY
.elseif ((i & 7) == 7)
- vpaddd K_XMM(K_BASE), WY, WY_TMP
+ vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP
vmovdqu WY_TMP, PRECALC_WK(i&~7)
PRECALC_ROTATE_WY
@@ -446,6 +445,16 @@
.endm
+/* Add constant only if (%2 > %3) condition met (uses RTA as temp)
+ * %1 + %2 >= %3 ? %4 : 0
+ */
+.macro ADD_IF_GE a, b, c, d
+ mov \a, RTA
+ add $\d, RTA
+ cmp $\c, \b
+ cmovge RTA, \a
+.endm
+
/*
* macro implements 80 rounds of SHA-1, for multiple blocks with s/w pipelining
*/
@@ -463,13 +472,16 @@
lea (2*4*80+32)(%rsp), WK_BUF
# Precalc WK for first 2 blocks
- PRECALC_OFFSET = 0
+ ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 2, 64
.set i, 0
.rept 160
PRECALC i
.set i, i + 1
.endr
- PRECALC_OFFSET = 128
+
+ /* Go to next block if needed */
+ ADD_IF_GE BUFFER_PTR, BLOCKS_CTR, 3, 128
+ ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 4, 128
xchg WK_BUF, PRECALC_BUF
.align 32
@@ -479,8 +491,8 @@ _loop:
* we use K_BASE value as a signal of a last block,
* it is set below by: cmovae BUFFER_PTR, K_BASE
*/
- cmp K_BASE, BUFFER_PTR
- jne _begin
+ test BLOCKS_CTR, BLOCKS_CTR
+ jnz _begin
.align 32
jmp _end
.align 32
@@ -512,10 +524,10 @@ _loop0:
.set j, j+2
.endr
- add $(2*64), BUFFER_PTR /* move to next odd-64-byte block */
- cmp BUFFER_END, BUFFER_PTR /* is current block the last one? */
- cmovae K_BASE, BUFFER_PTR /* signal the last iteration smartly */
-
+ /* Update Counter */
+ sub $1, BLOCKS_CTR
+ /* Move to the next block only if needed*/
+ ADD_IF_GE BUFFER_PTR, BLOCKS_CTR, 4, 128
/*
* rounds
* 60,62,64,66,68
@@ -532,8 +544,8 @@ _loop0:
UPDATE_HASH 12(HASH_PTR), D
UPDATE_HASH 16(HASH_PTR), E
- cmp K_BASE, BUFFER_PTR /* is current block the last one? */
- je _loop
+ test BLOCKS_CTR, BLOCKS_CTR
+ jz _loop
mov TB, B
@@ -575,10 +587,10 @@ _loop2:
.set j, j+2
.endr
- add $(2*64), BUFFER_PTR2 /* move to next even-64-byte block */
-
- cmp BUFFER_END, BUFFER_PTR2 /* is current block the last one */
- cmovae K_BASE, BUFFER_PTR /* signal the last iteration smartly */
+ /* update counter */
+ sub $1, BLOCKS_CTR
+ /* Move to the next block only if needed*/
+ ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 4, 128
jmp _loop3
_loop3:
@@ -641,19 +653,12 @@ _loop3:
avx2_zeroupper
- lea K_XMM_AR(%rip), K_BASE
-
+ /* Setup initial values */
mov CTX, HASH_PTR
mov BUF, BUFFER_PTR
- lea 64(BUF), BUFFER_PTR2
-
- shl $6, CNT /* mul by 64 */
- add BUF, CNT
- add $64, CNT
- mov CNT, BUFFER_END
- cmp BUFFER_END, BUFFER_PTR2
- cmovae K_BASE, BUFFER_PTR2
+ mov BUF, BUFFER_PTR2
+ mov CNT, BLOCKS_CTR
xmm_mov BSWAP_SHUFB_CTL(%rip), YMM_SHUFB_BSWAP
diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c
index f960a043cdeb..fc61739150e7 100644
--- a/arch/x86/crypto/sha1_ssse3_glue.c
+++ b/arch/x86/crypto/sha1_ssse3_glue.c
@@ -201,7 +201,7 @@ asmlinkage void sha1_transform_avx2(u32 *digest, const char *data,
static bool avx2_usable(void)
{
- if (false && avx_usable() && boot_cpu_has(X86_FEATURE_AVX2)
+ if (avx_usable() && boot_cpu_has(X86_FEATURE_AVX2)
&& boot_cpu_has(X86_FEATURE_BMI1)
&& boot_cpu_has(X86_FEATURE_BMI2))
return true;
diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h
index 21126155a739..0ead9dbb9130 100644
--- a/arch/x86/include/asm/hypervisor.h
+++ b/arch/x86/include/asm/hypervisor.h
@@ -43,6 +43,9 @@ struct hypervisor_x86 {
/* pin current vcpu to specified physical cpu (run rarely) */
void (*pin_vcpu)(int);
+
+ /* called during init_mem_mapping() to setup early mappings. */
+ void (*init_mem_mapping)(void);
};
extern const struct hypervisor_x86 *x86_hyper;
@@ -57,8 +60,15 @@ extern const struct hypervisor_x86 x86_hyper_kvm;
extern void init_hypervisor_platform(void);
extern bool hypervisor_x2apic_available(void);
extern void hypervisor_pin_vcpu(int cpu);
+
+static inline void hypervisor_init_mem_mapping(void)
+{
+ if (x86_hyper && x86_hyper->init_mem_mapping)
+ x86_hyper->init_mem_mapping();
+}
#else
static inline void init_hypervisor_platform(void) { }
static inline bool hypervisor_x2apic_available(void) { return false; }
+static inline void hypervisor_init_mem_mapping(void) { }
#endif /* CONFIG_HYPERVISOR_GUEST */
#endif /* _ASM_X86_HYPERVISOR_H */
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 673541eb3b3f..bf3f1065d6ad 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -18,6 +18,7 @@
#include <asm/dma.h> /* for MAX_DMA_PFN */
#include <asm/microcode.h>
#include <asm/kaslr.h>
+#include <asm/hypervisor.h>
/*
* We need to define the tracepoints somewhere, and tlb.c
@@ -636,6 +637,8 @@ void __init init_mem_mapping(void)
load_cr3(swapper_pg_dir);
__flush_tlb_all();
+ hypervisor_init_mem_mapping();
+
early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
}
diff --git a/arch/x86/xen/enlighten_hvm.c b/arch/x86/xen/enlighten_hvm.c
index 87d791356ea9..de503c225ae1 100644
--- a/arch/x86/xen/enlighten_hvm.c
+++ b/arch/x86/xen/enlighten_hvm.c
@@ -12,6 +12,7 @@
#include <asm/setup.h>
#include <asm/hypervisor.h>
#include <asm/e820/api.h>
+#include <asm/early_ioremap.h>
#include <asm/xen/cpuid.h>
#include <asm/xen/hypervisor.h>
@@ -21,38 +22,50 @@
#include "mmu.h"
#include "smp.h"
-void __ref xen_hvm_init_shared_info(void)
+static unsigned long shared_info_pfn;
+
+void xen_hvm_init_shared_info(void)
{
struct xen_add_to_physmap xatp;
- u64 pa;
-
- if (HYPERVISOR_shared_info == &xen_dummy_shared_info) {
- /*
- * Search for a free page starting at 4kB physical address.
- * Low memory is preferred to avoid an EPT large page split up
- * by the mapping.
- * Starting below X86_RESERVE_LOW (usually 64kB) is fine as
- * the BIOS used for HVM guests is well behaved and won't
- * clobber memory other than the first 4kB.
- */
- for (pa = PAGE_SIZE;
- !e820__mapped_all(pa, pa + PAGE_SIZE, E820_TYPE_RAM) ||
- memblock_is_reserved(pa);
- pa += PAGE_SIZE)
- ;
-
- memblock_reserve(pa, PAGE_SIZE);
- HYPERVISOR_shared_info = __va(pa);
- }
xatp.domid = DOMID_SELF;
xatp.idx = 0;
xatp.space = XENMAPSPACE_shared_info;
- xatp.gpfn = virt_to_pfn(HYPERVISOR_shared_info);
+ xatp.gpfn = shared_info_pfn;
if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
BUG();
}
+static void __init reserve_shared_info(void)
+{
+ u64 pa;
+
+ /*
+ * Search for a free page starting at 4kB physical address.
+ * Low memory is preferred to avoid an EPT large page split up
+ * by the mapping.
+ * Starting below X86_RESERVE_LOW (usually 64kB) is fine as
+ * the BIOS used for HVM guests is well behaved and won't
+ * clobber memory other than the first 4kB.
+ */
+ for (pa = PAGE_SIZE;
+ !e820__mapped_all(pa, pa + PAGE_SIZE, E820_TYPE_RAM) ||
+ memblock_is_reserved(pa);
+ pa += PAGE_SIZE)
+ ;
+
+ shared_info_pfn = PHYS_PFN(pa);
+
+ memblock_reserve(pa, PAGE_SIZE);
+ HYPERVISOR_shared_info = early_memremap(pa, PAGE_SIZE);
+}
+
+static void __init xen_hvm_init_mem_mapping(void)
+{
+ early_memunmap(HYPERVISOR_shared_info, PAGE_SIZE);
+ HYPERVISOR_shared_info = __va(PFN_PHYS(shared_info_pfn));
+}
+
static void __init init_hvm_pv_info(void)
{
int major, minor;
@@ -153,6 +166,7 @@ static void __init xen_hvm_guest_init(void)
init_hvm_pv_info();
+ reserve_shared_info();
xen_hvm_init_shared_info();
/*
@@ -218,5 +232,6 @@ const struct hypervisor_x86 x86_hyper_xen_hvm = {
.init_platform = xen_hvm_guest_init,
.pin_vcpu = xen_pin_vcpu,
.x2apic_available = xen_x2apic_para_available,
+ .init_mem_mapping = xen_hvm_init_mem_mapping,
};
EXPORT_SYMBOL(x86_hyper_xen_hvm);
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 83e92beb3c9f..9b1ea478577b 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -387,9 +387,11 @@ static void bio_integrity_verify_fn(struct work_struct *work)
*/
bool __bio_integrity_endio(struct bio *bio)
{
- if (bio_op(bio) == REQ_OP_READ && !bio->bi_status) {
- struct bio_integrity_payload *bip = bio_integrity(bio);
+ struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
+ struct bio_integrity_payload *bip = bio_integrity(bio);
+ if (bio_op(bio) == REQ_OP_READ && !bio->bi_status &&
+ (bip->bip_flags & BIP_BLOCK_INTEGRITY) && bi->profile->verify_fn) {
INIT_WORK(&bip->bip_work, bio_integrity_verify_fn);
queue_work(kintegrityd_wq, &bip->bip_work);
return false;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 211ef367345f..535cbdf32aab 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -684,8 +684,8 @@ EXPORT_SYMBOL(blk_mq_kick_requeue_list);
void blk_mq_delay_kick_requeue_list(struct request_queue *q,
unsigned long msecs)
{
- kblockd_schedule_delayed_work(&q->requeue_work,
- msecs_to_jiffies(msecs));
+ kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work,
+ msecs_to_jiffies(msecs));
}
EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c
index 4ac3e06b41d8..98aa8c808a33 100644
--- a/drivers/acpi/spcr.c
+++ b/drivers/acpi/spcr.c
@@ -17,6 +17,16 @@
#include <linux/serial_core.h>
/*
+ * Erratum 44 for QDF2432v1 and QDF2400v1 SoCs describes the BUSY bit as
+ * occasionally getting stuck as 1. To avoid the potential for a hang, check
+ * TXFE == 0 instead of BUSY == 1. This may not be suitable for all UART
+ * implementations, so only do so if an affected platform is detected in
+ * parse_spcr().
+ */
+bool qdf2400_e44_present;
+EXPORT_SYMBOL(qdf2400_e44_present);
+
+/*
* Some Qualcomm Datacenter Technologies SoCs have a defective UART BUSY bit.
* Detect them by examining the OEM fields in the SPCR header, similiar to PCI
* quirk detection in pci_mcfg.c.
@@ -147,8 +157,30 @@ int __init parse_spcr(bool earlycon)
goto done;
}
- if (qdf2400_erratum_44_present(&table->header))
- uart = "qdf2400_e44";
+ /*
+ * If the E44 erratum is required, then we need to tell the pl011
+ * driver to implement the work-around.
+ *
+ * The global variable is used by the probe function when it
+ * creates the UARTs, whether or not they're used as a console.
+ *
+ * If the user specifies "traditional" earlycon, the qdf2400_e44
+ * console name matches the EARLYCON_DECLARE() statement, and
+ * SPCR is not used. Parameter "earlycon" is false.
+ *
+ * If the user specifies "SPCR" earlycon, then we need to update
+ * the console name so that it also says "qdf2400_e44". Parameter
+ * "earlycon" is true.
+ *
+ * For consistency, if we change the console name, then we do it
+ * for everyone, not just earlycon.
+ */
+ if (qdf2400_erratum_44_present(&table->header)) {
+ qdf2400_e44_present = true;
+ if (earlycon)
+ uart = "qdf2400_e44";
+ }
+
if (xgene_8250_erratum_present(table))
iotype = "mmio32";
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index b9f907eedbf7..bfbe1e154128 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -30,7 +30,6 @@
#include <linux/syscore_ops.h>
#include <linux/reboot.h>
#include <linux/security.h>
-#include <linux/swait.h>
#include <generated/utsrelease.h>
@@ -112,13 +111,13 @@ static inline long firmware_loading_timeout(void)
* state of the firmware loading.
*/
struct fw_state {
- struct swait_queue_head wq;
+ struct completion completion;
enum fw_status status;
};
static void fw_state_init(struct fw_state *fw_st)
{
- init_swait_queue_head(&fw_st->wq);
+ init_completion(&fw_st->completion);
fw_st->status = FW_STATUS_UNKNOWN;
}
@@ -131,9 +130,7 @@ static int __fw_state_wait_common(struct fw_state *fw_st, long timeout)
{
long ret;
- ret = swait_event_interruptible_timeout(fw_st->wq,
- __fw_state_is_done(READ_ONCE(fw_st->status)),
- timeout);
+ ret = wait_for_completion_killable_timeout(&fw_st->completion, timeout);
if (ret != 0 && fw_st->status == FW_STATUS_ABORTED)
return -ENOENT;
if (!ret)
@@ -148,35 +145,34 @@ static void __fw_state_set(struct fw_state *fw_st,
WRITE_ONCE(fw_st->status, status);
if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED)
- swake_up(&fw_st->wq);
+ complete_all(&fw_st->completion);
}
#define fw_state_start(fw_st) \
__fw_state_set(fw_st, FW_STATUS_LOADING)
#define fw_state_done(fw_st) \
__fw_state_set(fw_st, FW_STATUS_DONE)
+#define fw_state_aborted(fw_st) \
+ __fw_state_set(fw_st, FW_STATUS_ABORTED)
#define fw_state_wait(fw_st) \
__fw_state_wait_common(fw_st, MAX_SCHEDULE_TIMEOUT)
-#ifndef CONFIG_FW_LOADER_USER_HELPER
-
-#define fw_state_is_aborted(fw_st) false
-
-#else /* CONFIG_FW_LOADER_USER_HELPER */
-
static int __fw_state_check(struct fw_state *fw_st, enum fw_status status)
{
return fw_st->status == status;
}
+#define fw_state_is_aborted(fw_st) \
+ __fw_state_check(fw_st, FW_STATUS_ABORTED)
+
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+
#define fw_state_aborted(fw_st) \
__fw_state_set(fw_st, FW_STATUS_ABORTED)
#define fw_state_is_done(fw_st) \
__fw_state_check(fw_st, FW_STATUS_DONE)
#define fw_state_is_loading(fw_st) \
__fw_state_check(fw_st, FW_STATUS_LOADING)
-#define fw_state_is_aborted(fw_st) \
- __fw_state_check(fw_st, FW_STATUS_ABORTED)
#define fw_state_wait_timeout(fw_st, timeout) \
__fw_state_wait_common(fw_st, timeout)
@@ -1200,6 +1196,28 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name,
return 1; /* need to load */
}
+/*
+ * Batched requests need only one wake, we need to do this step last due to the
+ * fallback mechanism. The buf is protected with kref_get(), and it won't be
+ * released until the last user calls release_firmware().
+ *
+ * Failed batched requests are possible as well, in such cases we just share
+ * the struct firmware_buf and won't release it until all requests are woken
+ * and have gone through this same path.
+ */
+static void fw_abort_batch_reqs(struct firmware *fw)
+{
+ struct firmware_buf *buf;
+
+ /* Loaded directly? */
+ if (!fw || !fw->priv)
+ return;
+
+ buf = fw->priv;
+ if (!fw_state_is_aborted(&buf->fw_st))
+ fw_state_aborted(&buf->fw_st);
+}
+
/* called from request_firmware() and request_firmware_work_func() */
static int
_request_firmware(const struct firmware **firmware_p, const char *name,
@@ -1243,6 +1261,7 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
out:
if (ret < 0) {
+ fw_abort_batch_reqs(fw);
release_firmware(fw);
fw = NULL;
}
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 856d5dc02451..3b1b6340ba13 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -308,7 +308,7 @@ static ssize_t comp_algorithm_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct zram *zram = dev_to_zram(dev);
- char compressor[CRYPTO_MAX_ALG_NAME];
+ char compressor[ARRAY_SIZE(zram->compressor)];
size_t sz;
strlcpy(compressor, buf, sizeof(compressor));
@@ -327,7 +327,7 @@ static ssize_t comp_algorithm_store(struct device *dev,
return -EBUSY;
}
- strlcpy(zram->compressor, compressor, sizeof(compressor));
+ strcpy(zram->compressor, compressor);
up_write(&zram->init_lock);
return len;
}
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
index 37b0698b7193..42896a67aeae 100644
--- a/drivers/cpuidle/cpuidle-powernv.c
+++ b/drivers/cpuidle/cpuidle-powernv.c
@@ -235,6 +235,7 @@ static inline int validate_dt_prop_sizes(const char *prop1, int prop1_len,
return -1;
}
+extern u32 pnv_get_supported_cpuidle_states(void);
static int powernv_add_idle_states(void)
{
struct device_node *power_mgt;
@@ -248,6 +249,8 @@ static int powernv_add_idle_states(void)
const char *names[CPUIDLE_STATE_MAX];
u32 has_stop_states = 0;
int i, rc;
+ u32 supported_flags = pnv_get_supported_cpuidle_states();
+
/* Currently we have snooze statically defined */
@@ -362,6 +365,13 @@ static int powernv_add_idle_states(void)
for (i = 0; i < dt_idle_states; i++) {
unsigned int exit_latency, target_residency;
bool stops_timebase = false;
+
+ /*
+ * Skip the platform idle state whose flag isn't in
+ * the supported_cpuidle_states flag mask.
+ */
+ if ((flags[i] & supported_flags) != flags[i])
+ continue;
/*
* If an idle state has exit latency beyond
* POWERNV_THRESHOLD_LATENCY_NS then don't use it
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 427cbe012729..dadc4a808df5 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -1073,7 +1073,7 @@ static int aead_perform(struct aead_request *req, int encrypt,
req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
&crypt->icv_rev_aes);
if (unlikely(!req_ctx->hmac_virt))
- goto free_buf_src;
+ goto free_buf_dst;
if (!encrypt) {
scatterwalk_map_and_copy(req_ctx->hmac_virt,
req->src, cryptlen, authsize, 0);
@@ -1088,10 +1088,10 @@ static int aead_perform(struct aead_request *req, int encrypt,
BUG_ON(qmgr_stat_overflow(SEND_QID));
return -EINPROGRESS;
-free_buf_src:
- free_buf_chain(dev, req_ctx->src, crypt->src_buf);
free_buf_dst:
free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
+free_buf_src:
+ free_buf_chain(dev, req_ctx->src, crypt->src_buf);
crypt->ctl_flags = CTL_FLAG_UNUSED;
return -ENOMEM;
}
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index d7e219d2669d..66fb40d0ebdb 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -304,7 +304,7 @@ static int sync_file_release(struct inode *inode, struct file *file)
{
struct sync_file *sync_file = file->private_data;
- if (test_bit(POLL_ENABLED, &sync_file->fence->flags))
+ if (test_bit(POLL_ENABLED, &sync_file->flags))
dma_fence_remove_callback(sync_file->fence, &sync_file->cb);
dma_fence_put(sync_file->fence);
kfree(sync_file);
@@ -318,7 +318,8 @@ static unsigned int sync_file_poll(struct file *file, poll_table *wait)
poll_wait(file, &sync_file->wq, wait);
- if (!test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) {
+ if (list_empty(&sync_file->cb.node) &&
+ !test_and_set_bit(POLL_ENABLED, &sync_file->flags)) {
if (dma_fence_add_callback(sync_file->fence, &sync_file->cb,
fence_check_cb_func) < 0)
wake_up_all(&sync_file->wq);
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 5c26488e7a2d..0529e500c534 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -1255,7 +1255,7 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
/* port@2 is the output port */
ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &tc->panel, NULL);
- if (ret)
+ if (ret && ret != -ENODEV)
return ret;
/* Shut down GPIO is optional */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 5bd93169dac2..6463fc2c736f 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -270,8 +270,8 @@ static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream,
if (ret)
return ret;
- if (r->reloc_offset >= bo->obj->base.size - sizeof(*ptr)) {
- DRM_ERROR("relocation %u outside object", i);
+ if (r->reloc_offset > bo->obj->base.size - sizeof(*ptr)) {
+ DRM_ERROR("relocation %u outside object\n", i);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index d48fd7c918f8..73217c281c9a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -145,13 +145,19 @@ static struct drm_framebuffer *
exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
+ const struct drm_format_info *info = drm_get_format_info(dev, mode_cmd);
struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER];
struct drm_gem_object *obj;
struct drm_framebuffer *fb;
int i;
int ret;
- for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) {
+ for (i = 0; i < info->num_planes; i++) {
+ unsigned int height = (i == 0) ? mode_cmd->height :
+ DIV_ROUND_UP(mode_cmd->height, info->vsub);
+ unsigned long size = height * mode_cmd->pitches[i] +
+ mode_cmd->offsets[i];
+
obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);
if (!obj) {
DRM_ERROR("failed to lookup gem object\n");
@@ -160,6 +166,12 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
}
exynos_gem[i] = to_exynos_gem(obj);
+
+ if (size > exynos_gem[i]->size) {
+ i++;
+ ret = -EINVAL;
+ goto err;
+ }
}
fb = exynos_drm_framebuffer_init(dev, mode_cmd, exynos_gem, i);
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index 700050556242..1648887d3f55 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -46,6 +46,8 @@
#define same_context(a, b) (((a)->context_id == (b)->context_id) && \
((a)->lrca == (b)->lrca))
+static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask);
+
static int context_switch_events[] = {
[RCS] = RCS_AS_CONTEXT_SWITCH,
[BCS] = BCS_AS_CONTEXT_SWITCH,
@@ -499,10 +501,10 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
static int complete_execlist_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
- struct intel_vgpu_execlist *execlist =
- &vgpu->execlist[workload->ring_id];
+ int ring_id = workload->ring_id;
+ struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
struct intel_vgpu_workload *next_workload;
- struct list_head *next = workload_q_head(vgpu, workload->ring_id)->next;
+ struct list_head *next = workload_q_head(vgpu, ring_id)->next;
bool lite_restore = false;
int ret;
@@ -512,10 +514,25 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
release_shadow_batch_buffer(workload);
release_shadow_wa_ctx(&workload->wa_ctx);
- if (workload->status || vgpu->resetting)
+ if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
+ /* if workload->status is not successful means HW GPU
+ * has occurred GPU hang or something wrong with i915/GVT,
+ * and GVT won't inject context switch interrupt to guest.
+ * So this error is a vGPU hang actually to the guest.
+ * According to this we should emunlate a vGPU hang. If
+ * there are pending workloads which are already submitted
+ * from guest, we should clean them up like HW GPU does.
+ *
+ * if it is in middle of engine resetting, the pending
+ * workloads won't be submitted to HW GPU and will be
+ * cleaned up during the resetting process later, so doing
+ * the workload clean up here doesn't have any impact.
+ **/
+ clean_workloads(vgpu, ENGINE_MASK(ring_id));
goto out;
+ }
- if (!list_empty(workload_q_head(vgpu, workload->ring_id))) {
+ if (!list_empty(workload_q_head(vgpu, ring_id))) {
struct execlist_ctx_descriptor_format *this_desc, *next_desc;
next_workload = container_of(next,
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index 5dad9298b2d5..a26c1705430e 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -72,11 +72,13 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
struct intel_gvt_device_info *info = &gvt->device_info;
struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
struct intel_gvt_mmio_info *e;
+ struct gvt_mmio_block *block = gvt->mmio.mmio_block;
+ int num = gvt->mmio.num_mmio_block;
struct gvt_firmware_header *h;
void *firmware;
void *p;
unsigned long size, crc32_start;
- int i;
+ int i, j;
int ret;
size = sizeof(*h) + info->mmio_size + info->cfg_space_size;
@@ -105,6 +107,13 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
hash_for_each(gvt->mmio.mmio_info_table, i, e, node)
*(u32 *)(p + e->offset) = I915_READ_NOTRACE(_MMIO(e->offset));
+ for (i = 0; i < num; i++, block++) {
+ for (j = 0; j < block->size; j += 4)
+ *(u32 *)(p + INTEL_GVT_MMIO_OFFSET(block->offset) + j) =
+ I915_READ_NOTRACE(_MMIO(INTEL_GVT_MMIO_OFFSET(
+ block->offset) + j));
+ }
+
memcpy(gvt->firmware.mmio, p, info->mmio_size);
crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 3a74e79eac2f..2964a4d01a66 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -149,7 +149,7 @@ struct intel_vgpu {
bool active;
bool pv_notified;
bool failsafe;
- bool resetting;
+ unsigned int resetting_eng;
void *sched_data;
struct vgpu_sched_ctl sched_ctl;
@@ -195,6 +195,15 @@ struct intel_gvt_fence {
unsigned long vgpu_allocated_fence_num;
};
+/* Special MMIO blocks. */
+struct gvt_mmio_block {
+ unsigned int device;
+ i915_reg_t offset;
+ unsigned int size;
+ gvt_mmio_func read;
+ gvt_mmio_func write;
+};
+
#define INTEL_GVT_MMIO_HASH_BITS 11
struct intel_gvt_mmio {
@@ -214,6 +223,9 @@ struct intel_gvt_mmio {
/* This reg could be accessed by unaligned address */
#define F_UNALIGN (1 << 6)
+ struct gvt_mmio_block *mmio_block;
+ unsigned int num_mmio_block;
+
DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
unsigned int num_tracked_mmio;
};
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 17febe830ff6..feed9921b3b3 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -2857,31 +2857,15 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
return 0;
}
-/* Special MMIO blocks. */
-static struct gvt_mmio_block {
- unsigned int device;
- i915_reg_t offset;
- unsigned int size;
- gvt_mmio_func read;
- gvt_mmio_func write;
-} gvt_mmio_blocks[] = {
- {D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
- {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
- {D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
- pvinfo_mmio_read, pvinfo_mmio_write},
- {D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
- {D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL},
- {D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL},
-};
-
static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt,
unsigned int offset)
{
unsigned long device = intel_gvt_get_device_type(gvt);
- struct gvt_mmio_block *block = gvt_mmio_blocks;
+ struct gvt_mmio_block *block = gvt->mmio.mmio_block;
+ int num = gvt->mmio.num_mmio_block;
int i;
- for (i = 0; i < ARRAY_SIZE(gvt_mmio_blocks); i++, block++) {
+ for (i = 0; i < num; i++, block++) {
if (!(device & block->device))
continue;
if (offset >= INTEL_GVT_MMIO_OFFSET(block->offset) &&
@@ -2912,6 +2896,17 @@ void intel_gvt_clean_mmio_info(struct intel_gvt *gvt)
gvt->mmio.mmio_attribute = NULL;
}
+/* Special MMIO blocks. */
+static struct gvt_mmio_block mmio_blocks[] = {
+ {D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
+ {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
+ {D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
+ pvinfo_mmio_read, pvinfo_mmio_write},
+ {D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
+ {D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL},
+ {D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL},
+};
+
/**
* intel_gvt_setup_mmio_info - setup MMIO information table for GVT device
* @gvt: GVT device
@@ -2951,6 +2946,9 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
goto err;
}
+ gvt->mmio.mmio_block = mmio_blocks;
+ gvt->mmio.num_mmio_block = ARRAY_SIZE(mmio_blocks);
+
gvt_dbg_mmio("traced %u virtual mmio registers\n",
gvt->mmio.num_tracked_mmio);
return 0;
@@ -3030,7 +3028,7 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
gvt_mmio_func func;
int ret;
- if (WARN_ON(bytes > 4))
+ if (WARN_ON(bytes > 8))
return -EINVAL;
/*
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 4f7057d62d88..22e08eb2d0b7 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -432,7 +432,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
i915_gem_request_put(fetch_and_zero(&workload->req));
- if (!workload->status && !vgpu->resetting) {
+ if (!workload->status && !(vgpu->resetting_eng &
+ ENGINE_MASK(ring_id))) {
update_guest_context(workload);
for_each_set_bit(event, workload->pending_events,
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 90c14e6e3ea0..3deadcbd5a24 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -480,11 +480,13 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
{
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+ unsigned int resetting_eng = dmlr ? ALL_ENGINES : engine_mask;
gvt_dbg_core("------------------------------------------\n");
gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
vgpu->id, dmlr, engine_mask);
- vgpu->resetting = true;
+
+ vgpu->resetting_eng = resetting_eng;
intel_vgpu_stop_schedule(vgpu);
/*
@@ -497,7 +499,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
mutex_lock(&gvt->lock);
}
- intel_vgpu_reset_execlist(vgpu, dmlr ? ALL_ENGINES : engine_mask);
+ intel_vgpu_reset_execlist(vgpu, resetting_eng);
/* full GPU reset or device model level reset */
if (engine_mask == ALL_ENGINES || dmlr) {
@@ -520,7 +522,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
}
}
- vgpu->resetting = false;
+ vgpu->resetting_eng = 0;
gvt_dbg_core("reset vgpu%d done\n", vgpu->id);
gvt_dbg_core("------------------------------------------\n");
}
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 1032f98add11..77fb39808131 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -43,16 +43,21 @@ static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock)
return true;
case MUTEX_TRYLOCK_FAILED:
+ *unlock = false;
+ preempt_disable();
do {
cpu_relax();
if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
- case MUTEX_TRYLOCK_SUCCESS:
*unlock = true;
- return true;
+ break;
}
} while (!need_resched());
+ preempt_enable();
+ return *unlock;
- return false;
+ case MUTEX_TRYLOCK_SUCCESS:
+ *unlock = true;
+ return true;
}
BUG();
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 9cd22f83b0cf..f33d90226704 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1601,11 +1601,11 @@ static int gen8_emit_oa_config(struct drm_i915_gem_request *req)
u32 *cs;
int i;
- cs = intel_ring_begin(req, n_flex_regs * 2 + 4);
+ cs = intel_ring_begin(req, ARRAY_SIZE(flex_mmio) * 2 + 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
- *cs++ = MI_LOAD_REGISTER_IMM(n_flex_regs + 1);
+ *cs++ = MI_LOAD_REGISTER_IMM(ARRAY_SIZE(flex_mmio) + 1);
*cs++ = i915_mmio_reg_offset(GEN8_OACTXCONTROL);
*cs++ = (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
index 306c6b06b330..17c4ae7e4e7c 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -398,6 +398,7 @@ static void bdw_load_gamma_lut(struct drm_crtc_state *state, u32 offset)
}
/* Program the max register to clamp values > 1.0. */
+ i = lut_size - 1;
I915_WRITE(PREC_PAL_GC_MAX(pipe, 0),
drm_color_lut_extract(lut[i].red, 16));
I915_WRITE(PREC_PAL_GC_MAX(pipe, 1),
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 96c2cbd81869..593349be8b9d 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -469,7 +469,7 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector,
if (i915.invert_brightness > 0 ||
dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
- return panel->backlight.max - val;
+ return panel->backlight.max - val + panel->backlight.min;
}
return val;
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index b638d192ce5e..99d39b2aefa6 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -5,7 +5,7 @@ config DRM_MSM
depends on ARCH_QCOM || (ARM && COMPILE_TEST)
depends on OF && COMMON_CLK
depends on MMU
- select QCOM_MDT_LOADER
+ select QCOM_MDT_LOADER if ARCH_QCOM
select REGULATOR
select DRM_KMS_HELPER
select DRM_PANEL
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index b4b54f1c24bc..f9eae03aa1dc 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -15,7 +15,7 @@
#include <linux/cpumask.h>
#include <linux/qcom_scm.h>
#include <linux/dma-mapping.h>
-#include <linux/of_reserved_mem.h>
+#include <linux/of_address.h>
#include <linux/soc/qcom/mdt_loader.h>
#include "msm_gem.h"
#include "msm_mmu.h"
@@ -26,16 +26,34 @@ static void a5xx_dump(struct msm_gpu *gpu);
#define GPU_PAS_ID 13
-#if IS_ENABLED(CONFIG_QCOM_MDT_LOADER)
-
static int zap_shader_load_mdt(struct device *dev, const char *fwname)
{
const struct firmware *fw;
+ struct device_node *np;
+ struct resource r;
phys_addr_t mem_phys;
ssize_t mem_size;
void *mem_region = NULL;
int ret;
+ if (!IS_ENABLED(CONFIG_ARCH_QCOM))
+ return -EINVAL;
+
+ np = of_get_child_by_name(dev->of_node, "zap-shader");
+ if (!np)
+ return -ENODEV;
+
+ np = of_parse_phandle(np, "memory-region", 0);
+ if (!np)
+ return -EINVAL;
+
+ ret = of_address_to_resource(np, 0, &r);
+ if (ret)
+ return ret;
+
+ mem_phys = r.start;
+ mem_size = resource_size(&r);
+
/* Request the MDT file for the firmware */
ret = request_firmware(&fw, fwname, dev);
if (ret) {
@@ -51,7 +69,7 @@ static int zap_shader_load_mdt(struct device *dev, const char *fwname)
}
/* Allocate memory for the firmware image */
- mem_region = dmam_alloc_coherent(dev, mem_size, &mem_phys, GFP_KERNEL);
+ mem_region = memremap(mem_phys, mem_size, MEMREMAP_WC);
if (!mem_region) {
ret = -ENOMEM;
goto out;
@@ -69,16 +87,13 @@ static int zap_shader_load_mdt(struct device *dev, const char *fwname)
DRM_DEV_ERROR(dev, "Unable to authorize the image\n");
out:
+ if (mem_region)
+ memunmap(mem_region);
+
release_firmware(fw);
return ret;
}
-#else
-static int zap_shader_load_mdt(struct device *dev, const char *fwname)
-{
- return -ENODEV;
-}
-#endif
static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx)
@@ -117,12 +132,10 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
gpu->funcs->flush(gpu);
}
-struct a5xx_hwcg {
+static const struct {
u32 offset;
u32 value;
-};
-
-static const struct a5xx_hwcg a530_hwcg[] = {
+} a5xx_hwcg[] = {
{REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
{REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
{REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222},
@@ -217,38 +230,16 @@ static const struct a5xx_hwcg a530_hwcg[] = {
{REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}
};
-static const struct {
- int (*test)(struct adreno_gpu *gpu);
- const struct a5xx_hwcg *regs;
- unsigned int count;
-} a5xx_hwcg_regs[] = {
- { adreno_is_a530, a530_hwcg, ARRAY_SIZE(a530_hwcg), },
-};
-
-static void _a5xx_enable_hwcg(struct msm_gpu *gpu,
- const struct a5xx_hwcg *regs, unsigned int count)
+void a5xx_set_hwcg(struct msm_gpu *gpu, bool state)
{
unsigned int i;
- for (i = 0; i < count; i++)
- gpu_write(gpu, regs[i].offset, regs[i].value);
+ for (i = 0; i < ARRAY_SIZE(a5xx_hwcg); i++)
+ gpu_write(gpu, a5xx_hwcg[i].offset,
+ state ? a5xx_hwcg[i].value : 0);
- gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0xAAA8AA00);
- gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, 0x182);
-}
-
-static void a5xx_enable_hwcg(struct msm_gpu *gpu)
-{
- struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(a5xx_hwcg_regs); i++) {
- if (a5xx_hwcg_regs[i].test(adreno_gpu)) {
- _a5xx_enable_hwcg(gpu, a5xx_hwcg_regs[i].regs,
- a5xx_hwcg_regs[i].count);
- return;
- }
- }
+ gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, state ? 0xAAA8AA00 : 0);
+ gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, state ? 0x182 : 0x180);
}
static int a5xx_me_init(struct msm_gpu *gpu)
@@ -377,45 +368,6 @@ static int a5xx_zap_shader_resume(struct msm_gpu *gpu)
return ret;
}
-/* Set up a child device to "own" the zap shader */
-static int a5xx_zap_shader_dev_init(struct device *parent, struct device *dev)
-{
- struct device_node *node;
- int ret;
-
- if (dev->parent)
- return 0;
-
- /* Find the sub-node for the zap shader */
- node = of_get_child_by_name(parent->of_node, "zap-shader");
- if (!node) {
- DRM_DEV_ERROR(parent, "zap-shader not found in device tree\n");
- return -ENODEV;
- }
-
- dev->parent = parent;
- dev->of_node = node;
- dev_set_name(dev, "adreno_zap_shader");
-
- ret = device_register(dev);
- if (ret) {
- DRM_DEV_ERROR(parent, "Couldn't register zap shader device\n");
- goto out;
- }
-
- ret = of_reserved_mem_device_init(dev);
- if (ret) {
- DRM_DEV_ERROR(parent, "Unable to set up the reserved memory\n");
- device_unregister(dev);
- }
-
-out:
- if (ret)
- dev->parent = NULL;
-
- return ret;
-}
-
static int a5xx_zap_shader_init(struct msm_gpu *gpu)
{
static bool loaded;
@@ -444,11 +396,7 @@ static int a5xx_zap_shader_init(struct msm_gpu *gpu)
return -ENODEV;
}
- ret = a5xx_zap_shader_dev_init(&pdev->dev, &a5xx_gpu->zap_dev);
-
- if (!ret)
- ret = zap_shader_load_mdt(&a5xx_gpu->zap_dev,
- adreno_gpu->info->zapfw);
+ ret = zap_shader_load_mdt(&pdev->dev, adreno_gpu->info->zapfw);
loaded = !ret;
@@ -545,7 +493,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF);
/* Enable HWCG */
- a5xx_enable_hwcg(gpu);
+ a5xx_set_hwcg(gpu, true);
gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
@@ -691,9 +639,6 @@ static void a5xx_destroy(struct msm_gpu *gpu)
DBG("%s", gpu->name);
- if (a5xx_gpu->zap_dev.parent)
- device_unregister(&a5xx_gpu->zap_dev);
-
if (a5xx_gpu->pm4_bo) {
if (a5xx_gpu->pm4_iova)
msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
@@ -920,31 +865,30 @@ static const u32 a5xx_registers[] = {
0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B,
0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095,
0x0097, 0x00BB, 0x03A0, 0x0464, 0x0469, 0x046F, 0x04D2, 0x04D3,
- 0x04E0, 0x0533, 0x0540, 0x0555, 0xF400, 0xF400, 0xF800, 0xF807,
- 0x0800, 0x081A, 0x081F, 0x0841, 0x0860, 0x0860, 0x0880, 0x08A0,
- 0x0B00, 0x0B12, 0x0B15, 0x0B28, 0x0B78, 0x0B7F, 0x0BB0, 0x0BBD,
- 0x0BC0, 0x0BC6, 0x0BD0, 0x0C53, 0x0C60, 0x0C61, 0x0C80, 0x0C82,
- 0x0C84, 0x0C85, 0x0C90, 0x0C98, 0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2,
- 0x2180, 0x2185, 0x2580, 0x2585, 0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7,
- 0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8, 0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8,
- 0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E, 0x2100, 0x211E, 0x2140, 0x2145,
- 0x2500, 0x251E, 0x2540, 0x2545, 0x0D10, 0x0D17, 0x0D20, 0x0D23,
- 0x0D30, 0x0D30, 0x20C0, 0x20C0, 0x24C0, 0x24C0, 0x0E40, 0x0E43,
- 0x0E4A, 0x0E4A, 0x0E50, 0x0E57, 0x0E60, 0x0E7C, 0x0E80, 0x0E8E,
- 0x0E90, 0x0E96, 0x0EA0, 0x0EA8, 0x0EB0, 0x0EB2, 0xE140, 0xE147,
- 0xE150, 0xE187, 0xE1A0, 0xE1A9, 0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7,
- 0xE1D0, 0xE1D1, 0xE200, 0xE201, 0xE210, 0xE21C, 0xE240, 0xE268,
- 0xE000, 0xE006, 0xE010, 0xE09A, 0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB,
- 0xE100, 0xE105, 0xE380, 0xE38F, 0xE3B0, 0xE3B0, 0xE400, 0xE405,
- 0xE408, 0xE4E9, 0xE4F0, 0xE4F0, 0xE280, 0xE280, 0xE282, 0xE2A3,
- 0xE2A5, 0xE2C2, 0xE940, 0xE947, 0xE950, 0xE987, 0xE9A0, 0xE9A9,
- 0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7, 0xE9D0, 0xE9D1, 0xEA00, 0xEA01,
- 0xEA10, 0xEA1C, 0xEA40, 0xEA68, 0xE800, 0xE806, 0xE810, 0xE89A,
- 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB, 0xE900, 0xE905, 0xEB80, 0xEB8F,
- 0xEBB0, 0xEBB0, 0xEC00, 0xEC05, 0xEC08, 0xECE9, 0xECF0, 0xECF0,
- 0xEA80, 0xEA80, 0xEA82, 0xEAA3, 0xEAA5, 0xEAC2, 0xA800, 0xA8FF,
- 0xAC60, 0xAC60, 0xB000, 0xB97F, 0xB9A0, 0xB9BF,
- ~0
+ 0x04E0, 0x0533, 0x0540, 0x0555, 0x0800, 0x081A, 0x081F, 0x0841,
+ 0x0860, 0x0860, 0x0880, 0x08A0, 0x0B00, 0x0B12, 0x0B15, 0x0B28,
+ 0x0B78, 0x0B7F, 0x0BB0, 0x0BBD, 0x0BC0, 0x0BC6, 0x0BD0, 0x0C53,
+ 0x0C60, 0x0C61, 0x0C80, 0x0C82, 0x0C84, 0x0C85, 0x0C90, 0x0C98,
+ 0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2, 0x2180, 0x2185, 0x2580, 0x2585,
+ 0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7, 0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8,
+ 0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8, 0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E,
+ 0x2100, 0x211E, 0x2140, 0x2145, 0x2500, 0x251E, 0x2540, 0x2545,
+ 0x0D10, 0x0D17, 0x0D20, 0x0D23, 0x0D30, 0x0D30, 0x20C0, 0x20C0,
+ 0x24C0, 0x24C0, 0x0E40, 0x0E43, 0x0E4A, 0x0E4A, 0x0E50, 0x0E57,
+ 0x0E60, 0x0E7C, 0x0E80, 0x0E8E, 0x0E90, 0x0E96, 0x0EA0, 0x0EA8,
+ 0x0EB0, 0x0EB2, 0xE140, 0xE147, 0xE150, 0xE187, 0xE1A0, 0xE1A9,
+ 0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7, 0xE1D0, 0xE1D1, 0xE200, 0xE201,
+ 0xE210, 0xE21C, 0xE240, 0xE268, 0xE000, 0xE006, 0xE010, 0xE09A,
+ 0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB, 0xE100, 0xE105, 0xE380, 0xE38F,
+ 0xE3B0, 0xE3B0, 0xE400, 0xE405, 0xE408, 0xE4E9, 0xE4F0, 0xE4F0,
+ 0xE280, 0xE280, 0xE282, 0xE2A3, 0xE2A5, 0xE2C2, 0xE940, 0xE947,
+ 0xE950, 0xE987, 0xE9A0, 0xE9A9, 0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7,
+ 0xE9D0, 0xE9D1, 0xEA00, 0xEA01, 0xEA10, 0xEA1C, 0xEA40, 0xEA68,
+ 0xE800, 0xE806, 0xE810, 0xE89A, 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB,
+ 0xE900, 0xE905, 0xEB80, 0xEB8F, 0xEBB0, 0xEBB0, 0xEC00, 0xEC05,
+ 0xEC08, 0xECE9, 0xECF0, 0xECF0, 0xEA80, 0xEA80, 0xEA82, 0xEAA3,
+ 0xEAA5, 0xEAC2, 0xA800, 0xA8FF, 0xAC60, 0xAC60, 0xB000, 0xB97F,
+ 0xB9A0, 0xB9BF, ~0
};
static void a5xx_dump(struct msm_gpu *gpu)
@@ -1020,7 +964,14 @@ static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m)
{
seq_printf(m, "status: %08x\n",
gpu_read(gpu, REG_A5XX_RBBM_STATUS));
+
+ /*
+ * Temporarily disable hardware clock gating before going into
+ * adreno_show to avoid issues while reading the registers
+ */
+ a5xx_set_hwcg(gpu, false);
adreno_show(gpu, m);
+ a5xx_set_hwcg(gpu, true);
}
#endif
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
index 6638bc85645d..1137092241d5 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
@@ -36,8 +36,6 @@ struct a5xx_gpu {
uint32_t gpmu_dwords;
uint32_t lm_leakage;
-
- struct device zap_dev;
};
#define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
@@ -59,5 +57,6 @@ static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs,
}
bool a5xx_idle(struct msm_gpu *gpu);
+void a5xx_set_hwcg(struct msm_gpu *gpu, bool state);
#endif /* __A5XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index f1ab2703674a..7414c6bbd582 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -48,8 +48,15 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
*value = adreno_gpu->base.fast_rate;
return 0;
case MSM_PARAM_TIMESTAMP:
- if (adreno_gpu->funcs->get_timestamp)
- return adreno_gpu->funcs->get_timestamp(gpu, value);
+ if (adreno_gpu->funcs->get_timestamp) {
+ int ret;
+
+ pm_runtime_get_sync(&gpu->pdev->dev);
+ ret = adreno_gpu->funcs->get_timestamp(gpu, value);
+ pm_runtime_put_autosuspend(&gpu->pdev->dev);
+
+ return ret;
+ }
return -EINVAL;
default:
DBG("%s: invalid param: %u", gpu->name, param);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 9e9c5696bc03..c7b612c3d771 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -2137,6 +2137,13 @@ void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
struct msm_dsi_phy_clk_request *clk_req)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ int ret;
+
+ ret = dsi_calc_clk_rate(msm_host);
+ if (ret) {
+ pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
+ return;
+ }
clk_req->bitclk_rate = msm_host->byte_clk_rate * 8;
clk_req->escclk_rate = msm_host->esc_clk_rate;
@@ -2280,7 +2287,6 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
struct drm_display_mode *mode)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
- int ret;
if (msm_host->mode) {
drm_mode_destroy(msm_host->dev, msm_host->mode);
@@ -2293,12 +2299,6 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
return -ENOMEM;
}
- ret = dsi_calc_clk_rate(msm_host);
- if (ret) {
- pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
- return ret;
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index cb5415d6c04b..735a87a699fa 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -221,8 +221,8 @@ static void blend_setup(struct drm_crtc *crtc)
struct mdp5_ctl *ctl = mdp5_cstate->ctl;
uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0;
unsigned long flags;
- enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE };
- enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE };
+ enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
+ enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
int i, plane_cnt = 0;
bool bg_alpha_enabled = false;
u32 mixer_op_mode = 0;
@@ -753,6 +753,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
if (!handle) {
DBG("Cursor off");
cursor_enable = false;
+ mdp5_enable(mdp5_kms);
goto set_cursor;
}
@@ -776,6 +777,8 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
get_roi(crtc, &roi_w, &roi_h);
+ mdp5_enable(mdp5_kms);
+
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
@@ -804,6 +807,7 @@ set_cursor:
crtc_flush(crtc, flush_mask);
end:
+ mdp5_disable(mdp5_kms);
if (old_bo) {
drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo);
/* enable vblank to complete cursor work: */
@@ -836,6 +840,8 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
get_roi(crtc, &roi_w, &roi_h);
+ mdp5_enable(mdp5_kms);
+
spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
@@ -847,6 +853,8 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
crtc_flush(crtc, flush_mask);
+ mdp5_disable(mdp5_kms);
+
return 0;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
index 97f3294fbfc6..70bef51245af 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -299,7 +299,7 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder)
struct mdp5_interface *intf = mdp5_encoder->intf;
if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
- mdp5_cmd_encoder_disable(encoder);
+ mdp5_cmd_encoder_enable(encoder);
else
mdp5_vid_encoder_enable(encoder);
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 5d13fa5381ee..1c603aef3c59 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -502,7 +502,7 @@ static int get_clk(struct platform_device *pdev, struct clk **clkp,
const char *name, bool mandatory)
{
struct device *dev = &pdev->dev;
- struct clk *clk = devm_clk_get(dev, name);
+ struct clk *clk = msm_clk_get(pdev, name);
if (IS_ERR(clk) && mandatory) {
dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));
return PTR_ERR(clk);
@@ -887,21 +887,21 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
}
/* mandatory clocks: */
- ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus_clk", true);
+ ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus", true);
if (ret)
goto fail;
- ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface_clk", true);
+ ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface", true);
if (ret)
goto fail;
- ret = get_clk(pdev, &mdp5_kms->core_clk, "core_clk", true);
+ ret = get_clk(pdev, &mdp5_kms->core_clk, "core", true);
if (ret)
goto fail;
- ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync_clk", true);
+ ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync", true);
if (ret)
goto fail;
/* optional clocks: */
- get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk", false);
+ get_clk(pdev, &mdp5_kms->lut_clk, "lut", false);
/* we need to set a default rate before enabling. Set a safe
* rate first, then figure out hw revision, and then set a
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index fe3a4de1a433..61f39c86dd09 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -890,8 +890,8 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
struct mdp5_hw_pipe *right_hwpipe;
const struct mdp_format *format;
uint32_t nplanes, config = 0;
- struct phase_step step = { 0 };
- struct pixel_ext pe = { 0 };
+ struct phase_step step = { { 0 } };
+ struct pixel_ext pe = { { 0 } };
uint32_t hdecm = 0, vdecm = 0;
uint32_t pix_format;
unsigned int rotation;
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 65f35544c1ec..a0c60e738db8 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -383,8 +383,10 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
struct page **pages;
vma = add_vma(obj, aspace);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto unlock;
+ }
pages = get_pages(obj);
if (IS_ERR(pages)) {
@@ -405,7 +407,7 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
fail:
del_vma(vma);
-
+unlock:
mutex_unlock(&msm_obj->lock);
return ret;
}
@@ -928,8 +930,12 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
if (use_vram) {
struct msm_gem_vma *vma;
struct page **pages;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ mutex_lock(&msm_obj->lock);
vma = add_vma(obj, NULL);
+ mutex_unlock(&msm_obj->lock);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto fail;
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 6bfca7470141..8a75c0bd8a78 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -34,8 +34,8 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
struct msm_gpu *gpu, uint32_t nr_bos, uint32_t nr_cmds)
{
struct msm_gem_submit *submit;
- uint64_t sz = sizeof(*submit) + (nr_bos * sizeof(submit->bos[0])) +
- (nr_cmds * sizeof(submit->cmd[0]));
+ uint64_t sz = sizeof(*submit) + ((u64)nr_bos * sizeof(submit->bos[0])) +
+ ((u64)nr_cmds * sizeof(submit->cmd[0]));
if (sz > SIZE_MAX)
return NULL;
@@ -451,7 +451,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (ret)
goto out;
- if (!(args->fence & MSM_SUBMIT_NO_IMPLICIT)) {
+ if (!(args->flags & MSM_SUBMIT_NO_IMPLICIT)) {
ret = submit_fence_sync(submit);
if (ret)
goto out;
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index c36321bc8714..d34e331554f3 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -42,7 +42,7 @@ void
msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt)
{
- if (!vma->iova)
+ if (!aspace || !vma->iova)
return;
if (aspace->mmu) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
index c7c84d34d97e..88582af8bd89 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
@@ -267,6 +267,8 @@ nvkm_disp_oneinit(struct nvkm_engine *engine)
/* Create output path objects for each VBIOS display path. */
i = -1;
while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) {
+ if (ver < 0x40) /* No support for chipsets prior to NV50. */
+ break;
if (dcbE.type == DCB_OUTPUT_UNUSED)
continue;
if (dcbE.type == DCB_OUTPUT_EOL)
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 5d450332c2fd..2900f1410d95 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -500,7 +500,7 @@ static void vop_line_flag_irq_disable(struct vop *vop)
static int vop_enable(struct drm_crtc *crtc)
{
struct vop *vop = to_vop(crtc);
- int ret;
+ int ret, i;
ret = pm_runtime_get_sync(vop->dev);
if (ret < 0) {
@@ -533,6 +533,20 @@ static int vop_enable(struct drm_crtc *crtc)
}
memcpy(vop->regs, vop->regsbak, vop->len);
+ /*
+ * We need to make sure that all windows are disabled before we
+ * enable the crtc. Otherwise we might try to scan from a destroyed
+ * buffer later.
+ */
+ for (i = 0; i < vop->data->win_size; i++) {
+ struct vop_win *vop_win = &vop->win[i];
+ const struct vop_win_data *win = vop_win->data;
+
+ spin_lock(&vop->reg_lock);
+ VOP_WIN_SET(vop, win, enable, 0);
+ spin_unlock(&vop->reg_lock);
+ }
+
vop_cfg_done(vop);
/*
@@ -566,28 +580,11 @@ err_put_pm_runtime:
static void vop_crtc_disable(struct drm_crtc *crtc)
{
struct vop *vop = to_vop(crtc);
- int i;
WARN_ON(vop->event);
rockchip_drm_psr_deactivate(&vop->crtc);
- /*
- * We need to make sure that all windows are disabled before we
- * disable that crtc. Otherwise we might try to scan from a destroyed
- * buffer later.
- */
- for (i = 0; i < vop->data->win_size; i++) {
- struct vop_win *vop_win = &vop->win[i];
- const struct vop_win_data *win = vop_win->data;
-
- spin_lock(&vop->reg_lock);
- VOP_WIN_SET(vop, win, enable, 0);
- spin_unlock(&vop->reg_lock);
- }
-
- vop_cfg_done(vop);
-
drm_crtc_vblank_off(crtc);
/*
@@ -682,8 +679,10 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
* Src.x1 can be odd when do clip, but yuv plane start point
* need align with 2 pixel.
*/
- if (is_yuv_support(fb->format->format) && ((state->src.x1 >> 16) % 2))
+ if (is_yuv_support(fb->format->format) && ((state->src.x1 >> 16) % 2)) {
+ DRM_ERROR("Invalid Source: Yuv format not support odd xpos\n");
return -EINVAL;
+ }
return 0;
}
@@ -764,7 +763,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
spin_lock(&vop->reg_lock);
VOP_WIN_SET(vop, win, format, format);
- VOP_WIN_SET(vop, win, yrgb_vir, fb->pitches[0] >> 2);
+ VOP_WIN_SET(vop, win, yrgb_vir, DIV_ROUND_UP(fb->pitches[0], 4));
VOP_WIN_SET(vop, win, yrgb_mst, dma_addr);
if (is_yuv_support(fb->format->format)) {
int hsub = drm_format_horz_chroma_subsampling(fb->format->format);
@@ -778,7 +777,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
offset += (src->y1 >> 16) * fb->pitches[1] / vsub;
dma_addr = rk_uv_obj->dma_addr + offset + fb->offsets[1];
- VOP_WIN_SET(vop, win, uv_vir, fb->pitches[1] >> 2);
+ VOP_WIN_SET(vop, win, uv_vir, DIV_ROUND_UP(fb->pitches[1], 4));
VOP_WIN_SET(vop, win, uv_mst, dma_addr);
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index 9979fd0c2282..27eefbfcf3d0 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -282,6 +282,9 @@ static inline uint16_t scl_get_bili_dn_vskip(int src_h, int dst_h,
act_height = (src_h + vskiplines - 1) / vskiplines;
+ if (act_height == dst_h)
+ return GET_SCL_FT_BILI_DN(src_h, dst_h) / vskiplines;
+
return GET_SCL_FT_BILI_DN(act_height, dst_h);
}
diff --git a/drivers/gpu/drm/stm/Kconfig b/drivers/gpu/drm/stm/Kconfig
index 2c4817fb0890..8fe5b184b4e8 100644
--- a/drivers/gpu/drm/stm/Kconfig
+++ b/drivers/gpu/drm/stm/Kconfig
@@ -7,7 +7,6 @@ config DRM_STM
select DRM_PANEL
select VIDEOMODE_HELPERS
select FB_PROVIDE_GET_FB_UNMAPPED_AREA
- default y
help
Enable support for the on-chip display controller on
diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
index 6b5d3be283c4..807299dd45eb 100644
--- a/drivers/iio/accel/bmc150-accel-core.c
+++ b/drivers/iio/accel/bmc150-accel-core.c
@@ -193,7 +193,6 @@ struct bmc150_accel_data {
struct regmap *regmap;
int irq;
struct bmc150_accel_interrupt interrupts[BMC150_ACCEL_INTERRUPTS];
- atomic_t active_intr;
struct bmc150_accel_trigger triggers[BMC150_ACCEL_TRIGGERS];
struct mutex mutex;
u8 fifo_mode, watermark;
@@ -493,11 +492,6 @@ static int bmc150_accel_set_interrupt(struct bmc150_accel_data *data, int i,
goto out_fix_power_state;
}
- if (state)
- atomic_inc(&data->active_intr);
- else
- atomic_dec(&data->active_intr);
-
return 0;
out_fix_power_state:
@@ -1710,8 +1704,7 @@ static int bmc150_accel_resume(struct device *dev)
struct bmc150_accel_data *data = iio_priv(indio_dev);
mutex_lock(&data->mutex);
- if (atomic_read(&data->active_intr))
- bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0);
+ bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0);
bmc150_accel_fifo_set_mode(data);
mutex_unlock(&data->mutex);
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index 07d1489cd457..e44f62bf9caa 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -166,6 +166,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask_ihl = 0x02,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
+ .sim = {
+ .addr = 0x23,
+ .value = BIT(0),
+ },
.multi_read_bit = true,
.bootime = 2,
},
@@ -234,6 +238,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask_od = 0x40,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
+ .sim = {
+ .addr = 0x23,
+ .value = BIT(0),
+ },
.multi_read_bit = true,
.bootime = 2,
},
@@ -316,6 +324,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.en_mask = 0x08,
},
},
+ .sim = {
+ .addr = 0x24,
+ .value = BIT(0),
+ },
.multi_read_bit = false,
.bootime = 2,
},
@@ -379,6 +391,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask_int1 = 0x04,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
+ .sim = {
+ .addr = 0x21,
+ .value = BIT(1),
+ },
.multi_read_bit = true,
.bootime = 2, /* guess */
},
@@ -437,6 +453,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask_od = 0x40,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
+ .sim = {
+ .addr = 0x21,
+ .value = BIT(7),
+ },
.multi_read_bit = false,
.bootime = 2, /* guess */
},
@@ -499,6 +519,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.addr_ihl = 0x22,
.mask_ihl = 0x80,
},
+ .sim = {
+ .addr = 0x23,
+ .value = BIT(0),
+ },
.multi_read_bit = true,
.bootime = 2,
},
@@ -547,6 +571,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask_int1 = 0x04,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
+ .sim = {
+ .addr = 0x21,
+ .value = BIT(1),
+ },
.multi_read_bit = false,
.bootime = 2,
},
@@ -614,6 +642,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask_ihl = 0x02,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
+ .sim = {
+ .addr = 0x23,
+ .value = BIT(0),
+ },
.multi_read_bit = true,
.bootime = 2,
},
diff --git a/drivers/iio/adc/aspeed_adc.c b/drivers/iio/adc/aspeed_adc.c
index e0ea411a0b2d..c02b23d675cb 100644
--- a/drivers/iio/adc/aspeed_adc.c
+++ b/drivers/iio/adc/aspeed_adc.c
@@ -22,6 +22,7 @@
#include <linux/iio/iio.h>
#include <linux/iio/driver.h>
+#include <linux/iopoll.h>
#define ASPEED_RESOLUTION_BITS 10
#define ASPEED_CLOCKS_PER_SAMPLE 12
@@ -38,11 +39,17 @@
#define ASPEED_ENGINE_ENABLE BIT(0)
+#define ASPEED_ADC_CTRL_INIT_RDY BIT(8)
+
+#define ASPEED_ADC_INIT_POLLING_TIME 500
+#define ASPEED_ADC_INIT_TIMEOUT 500000
+
struct aspeed_adc_model_data {
const char *model_name;
unsigned int min_sampling_rate; // Hz
unsigned int max_sampling_rate; // Hz
unsigned int vref_voltage; // mV
+ bool wait_init_sequence;
};
struct aspeed_adc_data {
@@ -211,6 +218,24 @@ static int aspeed_adc_probe(struct platform_device *pdev)
goto scaler_error;
}
+ model_data = of_device_get_match_data(&pdev->dev);
+
+ if (model_data->wait_init_sequence) {
+ /* Enable engine in normal mode. */
+ writel(ASPEED_OPERATION_MODE_NORMAL | ASPEED_ENGINE_ENABLE,
+ data->base + ASPEED_REG_ENGINE_CONTROL);
+
+ /* Wait for initial sequence complete. */
+ ret = readl_poll_timeout(data->base + ASPEED_REG_ENGINE_CONTROL,
+ adc_engine_control_reg_val,
+ adc_engine_control_reg_val &
+ ASPEED_ADC_CTRL_INIT_RDY,
+ ASPEED_ADC_INIT_POLLING_TIME,
+ ASPEED_ADC_INIT_TIMEOUT);
+ if (ret)
+ goto scaler_error;
+ }
+
/* Start all channels in normal mode. */
ret = clk_prepare_enable(data->clk_scaler->clk);
if (ret)
@@ -274,6 +299,7 @@ static const struct aspeed_adc_model_data ast2500_model_data = {
.vref_voltage = 1800, // mV
.min_sampling_rate = 1,
.max_sampling_rate = 1000000,
+ .wait_init_sequence = true,
};
static const struct of_device_id aspeed_adc_matches[] = {
diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c
index 64799ad7ebad..462a99c13e7a 100644
--- a/drivers/iio/adc/axp288_adc.c
+++ b/drivers/iio/adc/axp288_adc.c
@@ -28,6 +28,8 @@
#include <linux/iio/driver.h>
#define AXP288_ADC_EN_MASK 0xF1
+#define AXP288_ADC_TS_PIN_GPADC 0xF2
+#define AXP288_ADC_TS_PIN_ON 0xF3
enum axp288_adc_id {
AXP288_ADC_TS,
@@ -121,6 +123,26 @@ static int axp288_adc_read_channel(int *val, unsigned long address,
return IIO_VAL_INT;
}
+static int axp288_adc_set_ts(struct regmap *regmap, unsigned int mode,
+ unsigned long address)
+{
+ int ret;
+
+ /* channels other than GPADC do not need to switch TS pin */
+ if (address != AXP288_GP_ADC_H)
+ return 0;
+
+ ret = regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, mode);
+ if (ret)
+ return ret;
+
+ /* When switching to the GPADC pin give things some time to settle */
+ if (mode == AXP288_ADC_TS_PIN_GPADC)
+ usleep_range(6000, 10000);
+
+ return 0;
+}
+
static int axp288_adc_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
@@ -131,7 +153,16 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
mutex_lock(&indio_dev->mlock);
switch (mask) {
case IIO_CHAN_INFO_RAW:
+ if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_GPADC,
+ chan->address)) {
+ dev_err(&indio_dev->dev, "GPADC mode\n");
+ ret = -EINVAL;
+ break;
+ }
ret = axp288_adc_read_channel(val, chan->address, info->regmap);
+ if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_ON,
+ chan->address))
+ dev_err(&indio_dev->dev, "TS pin restore\n");
break;
default:
ret = -EINVAL;
@@ -141,6 +172,15 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
return ret;
}
+static int axp288_adc_set_state(struct regmap *regmap)
+{
+ /* ADC should be always enabled for internal FG to function */
+ if (regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON))
+ return -EIO;
+
+ return regmap_write(regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK);
+}
+
static const struct iio_info axp288_adc_iio_info = {
.read_raw = &axp288_adc_read_raw,
.driver_module = THIS_MODULE,
@@ -169,7 +209,7 @@ static int axp288_adc_probe(struct platform_device *pdev)
* Set ADC to enabled state at all time, including system suspend.
* otherwise internal fuel gauge functionality may be affected.
*/
- ret = regmap_write(info->regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK);
+ ret = axp288_adc_set_state(axp20x->regmap);
if (ret) {
dev_err(&pdev->dev, "unable to enable ADC device\n");
return ret;
diff --git a/drivers/iio/adc/sun4i-gpadc-iio.c b/drivers/iio/adc/sun4i-gpadc-iio.c
index 81d4c39e414a..137f577d9432 100644
--- a/drivers/iio/adc/sun4i-gpadc-iio.c
+++ b/drivers/iio/adc/sun4i-gpadc-iio.c
@@ -256,6 +256,7 @@ static int sun4i_gpadc_read(struct iio_dev *indio_dev, int channel, int *val,
err:
pm_runtime_put_autosuspend(indio_dev->dev.parent);
+ disable_irq(irq);
mutex_unlock(&info->mutex);
return ret;
@@ -365,7 +366,6 @@ static irqreturn_t sun4i_gpadc_temp_data_irq_handler(int irq, void *dev_id)
complete(&info->completion);
out:
- disable_irq_nosync(info->temp_data_irq);
return IRQ_HANDLED;
}
@@ -380,7 +380,6 @@ static irqreturn_t sun4i_gpadc_fifo_data_irq_handler(int irq, void *dev_id)
complete(&info->completion);
out:
- disable_irq_nosync(info->fifo_data_irq);
return IRQ_HANDLED;
}
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
index 01fc76f7d660..c168e0db329a 100644
--- a/drivers/iio/adc/vf610_adc.c
+++ b/drivers/iio/adc/vf610_adc.c
@@ -77,7 +77,7 @@
#define VF610_ADC_ADSTS_MASK 0x300
#define VF610_ADC_ADLPC_EN 0x80
#define VF610_ADC_ADHSC_EN 0x400
-#define VF610_ADC_REFSEL_VALT 0x100
+#define VF610_ADC_REFSEL_VALT 0x800
#define VF610_ADC_REFSEL_VBG 0x1000
#define VF610_ADC_ADTRG_HARD 0x2000
#define VF610_ADC_AVGS_8 0x4000
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index 79c8c7cd70d5..6e6a1ecc99dd 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -550,6 +550,31 @@ out:
}
EXPORT_SYMBOL(st_sensors_read_info_raw);
+static int st_sensors_init_interface_mode(struct iio_dev *indio_dev,
+ const struct st_sensor_settings *sensor_settings)
+{
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+ struct device_node *np = sdata->dev->of_node;
+ struct st_sensors_platform_data *pdata;
+
+ pdata = (struct st_sensors_platform_data *)sdata->dev->platform_data;
+ if (((np && of_property_read_bool(np, "spi-3wire")) ||
+ (pdata && pdata->spi_3wire)) && sensor_settings->sim.addr) {
+ int err;
+
+ err = sdata->tf->write_byte(&sdata->tb, sdata->dev,
+ sensor_settings->sim.addr,
+ sensor_settings->sim.value);
+ if (err < 0) {
+ dev_err(&indio_dev->dev,
+ "failed to init interface mode\n");
+ return err;
+ }
+ }
+
+ return 0;
+}
+
int st_sensors_check_device_support(struct iio_dev *indio_dev,
int num_sensors_list,
const struct st_sensor_settings *sensor_settings)
@@ -574,6 +599,10 @@ int st_sensors_check_device_support(struct iio_dev *indio_dev,
return -ENODEV;
}
+ err = st_sensors_init_interface_mode(indio_dev, &sensor_settings[i]);
+ if (err < 0)
+ return err;
+
if (sensor_settings[i].wai_addr) {
err = sdata->tf->read_byte(&sdata->tb, sdata->dev,
sensor_settings[i].wai_addr, &wai);
diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
index e7d4ea75e007..7599693f7fe9 100644
--- a/drivers/iio/light/tsl2563.c
+++ b/drivers/iio/light/tsl2563.c
@@ -626,7 +626,7 @@ static irqreturn_t tsl2563_event_handler(int irq, void *private)
struct tsl2563_chip *chip = iio_priv(dev_info);
iio_push_event(dev_info,
- IIO_UNMOD_EVENT_CODE(IIO_LIGHT,
+ IIO_UNMOD_EVENT_CODE(IIO_INTENSITY,
0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_EITHER),
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index aa61ec15c139..f1bce05ffa13 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -456,7 +456,7 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
.mask_od = 0x40,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
- .multi_read_bit = true,
+ .multi_read_bit = false,
.bootime = 2,
},
};
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index b97188acc4f1..2d80fa8a0634 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -1519,6 +1519,13 @@ static int arm_smmu_add_device(struct device *dev)
if (using_legacy_binding) {
ret = arm_smmu_register_legacy_master(dev, &smmu);
+
+ /*
+ * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master()
+ * will allocate/initialise a new one. Thus we need to update fwspec for
+ * later use.
+ */
+ fwspec = dev->iommu_fwspec;
if (ret)
goto out_free;
} else if (fwspec && fwspec->ops == &arm_smmu_ops) {
diff --git a/drivers/isdn/mISDN/fsm.c b/drivers/isdn/mISDN/fsm.c
index 78fc5d5e9051..92e6570b1143 100644
--- a/drivers/isdn/mISDN/fsm.c
+++ b/drivers/isdn/mISDN/fsm.c
@@ -26,7 +26,7 @@
#define FSM_TIMER_DEBUG 0
-void
+int
mISDN_FsmNew(struct Fsm *fsm,
struct FsmNode *fnlist, int fncount)
{
@@ -34,6 +34,8 @@ mISDN_FsmNew(struct Fsm *fsm,
fsm->jumpmatrix = kzalloc(sizeof(FSMFNPTR) * fsm->state_count *
fsm->event_count, GFP_KERNEL);
+ if (fsm->jumpmatrix == NULL)
+ return -ENOMEM;
for (i = 0; i < fncount; i++)
if ((fnlist[i].state >= fsm->state_count) ||
@@ -45,6 +47,7 @@ mISDN_FsmNew(struct Fsm *fsm,
} else
fsm->jumpmatrix[fsm->state_count * fnlist[i].event +
fnlist[i].state] = (FSMFNPTR) fnlist[i].routine;
+ return 0;
}
EXPORT_SYMBOL(mISDN_FsmNew);
diff --git a/drivers/isdn/mISDN/fsm.h b/drivers/isdn/mISDN/fsm.h
index 928f5be192c1..e1def8490221 100644
--- a/drivers/isdn/mISDN/fsm.h
+++ b/drivers/isdn/mISDN/fsm.h
@@ -55,7 +55,7 @@ struct FsmTimer {
void *arg;
};
-extern void mISDN_FsmNew(struct Fsm *, struct FsmNode *, int);
+extern int mISDN_FsmNew(struct Fsm *, struct FsmNode *, int);
extern void mISDN_FsmFree(struct Fsm *);
extern int mISDN_FsmEvent(struct FsmInst *, int , void *);
extern void mISDN_FsmChangeState(struct FsmInst *, int);
diff --git a/drivers/isdn/mISDN/layer1.c b/drivers/isdn/mISDN/layer1.c
index bebc57b72138..3192b0eb3944 100644
--- a/drivers/isdn/mISDN/layer1.c
+++ b/drivers/isdn/mISDN/layer1.c
@@ -414,8 +414,7 @@ l1_init(u_int *deb)
l1fsm_s.event_count = L1_EVENT_COUNT;
l1fsm_s.strEvent = strL1Event;
l1fsm_s.strState = strL1SState;
- mISDN_FsmNew(&l1fsm_s, L1SFnList, ARRAY_SIZE(L1SFnList));
- return 0;
+ return mISDN_FsmNew(&l1fsm_s, L1SFnList, ARRAY_SIZE(L1SFnList));
}
void
diff --git a/drivers/isdn/mISDN/layer2.c b/drivers/isdn/mISDN/layer2.c
index 7243a6746f8b..9ff0903a0e89 100644
--- a/drivers/isdn/mISDN/layer2.c
+++ b/drivers/isdn/mISDN/layer2.c
@@ -2247,15 +2247,26 @@ static struct Bprotocol X75SLP = {
int
Isdnl2_Init(u_int *deb)
{
+ int res;
debug = deb;
mISDN_register_Bprotocol(&X75SLP);
l2fsm.state_count = L2_STATE_COUNT;
l2fsm.event_count = L2_EVENT_COUNT;
l2fsm.strEvent = strL2Event;
l2fsm.strState = strL2State;
- mISDN_FsmNew(&l2fsm, L2FnList, ARRAY_SIZE(L2FnList));
- TEIInit(deb);
+ res = mISDN_FsmNew(&l2fsm, L2FnList, ARRAY_SIZE(L2FnList));
+ if (res)
+ goto error;
+ res = TEIInit(deb);
+ if (res)
+ goto error_fsm;
return 0;
+
+error_fsm:
+ mISDN_FsmFree(&l2fsm);
+error:
+ mISDN_unregister_Bprotocol(&X75SLP);
+ return res;
}
void
diff --git a/drivers/isdn/mISDN/tei.c b/drivers/isdn/mISDN/tei.c
index 908127efccf8..12d9e5f4beb1 100644
--- a/drivers/isdn/mISDN/tei.c
+++ b/drivers/isdn/mISDN/tei.c
@@ -1387,23 +1387,37 @@ create_teimanager(struct mISDNdevice *dev)
int TEIInit(u_int *deb)
{
+ int res;
debug = deb;
teifsmu.state_count = TEI_STATE_COUNT;
teifsmu.event_count = TEI_EVENT_COUNT;
teifsmu.strEvent = strTeiEvent;
teifsmu.strState = strTeiState;
- mISDN_FsmNew(&teifsmu, TeiFnListUser, ARRAY_SIZE(TeiFnListUser));
+ res = mISDN_FsmNew(&teifsmu, TeiFnListUser, ARRAY_SIZE(TeiFnListUser));
+ if (res)
+ goto error;
teifsmn.state_count = TEI_STATE_COUNT;
teifsmn.event_count = TEI_EVENT_COUNT;
teifsmn.strEvent = strTeiEvent;
teifsmn.strState = strTeiState;
- mISDN_FsmNew(&teifsmn, TeiFnListNet, ARRAY_SIZE(TeiFnListNet));
+ res = mISDN_FsmNew(&teifsmn, TeiFnListNet, ARRAY_SIZE(TeiFnListNet));
+ if (res)
+ goto error_smn;
deactfsm.state_count = DEACT_STATE_COUNT;
deactfsm.event_count = DEACT_EVENT_COUNT;
deactfsm.strEvent = strDeactEvent;
deactfsm.strState = strDeactState;
- mISDN_FsmNew(&deactfsm, DeactFnList, ARRAY_SIZE(DeactFnList));
+ res = mISDN_FsmNew(&deactfsm, DeactFnList, ARRAY_SIZE(DeactFnList));
+ if (res)
+ goto error_deact;
return 0;
+
+error_deact:
+ mISDN_FsmFree(&teifsmn);
+error_smn:
+ mISDN_FsmFree(&teifsmu);
+error:
+ return res;
}
void TEIFree(void)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index c99634612fc4..b01e458d31e9 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -7996,7 +7996,7 @@ bool md_write_start(struct mddev *mddev, struct bio *bi)
if (mddev->safemode == 1)
mddev->safemode = 0;
/* sync_checkers is always 0 when writes_pending is in per-cpu mode */
- if (mddev->in_sync || !mddev->sync_checkers) {
+ if (mddev->in_sync || mddev->sync_checkers) {
spin_lock(&mddev->lock);
if (mddev->in_sync) {
mddev->in_sync = 0;
@@ -8656,6 +8656,9 @@ void md_check_recovery(struct mddev *mddev)
if (mddev_trylock(mddev)) {
int spares = 0;
+ if (!mddev->external && mddev->safemode == 1)
+ mddev->safemode = 0;
+
if (mddev->ro) {
struct md_rdev *rdev;
if (!mddev->external && mddev->in_sync)
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index bfa1e907c472..2dcbafa8e66c 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -236,9 +236,10 @@ struct r5l_io_unit {
bool need_split_bio;
struct bio *split_bio;
- unsigned int has_flush:1; /* include flush request */
- unsigned int has_fua:1; /* include fua request */
- unsigned int has_null_flush:1; /* include empty flush request */
+ unsigned int has_flush:1; /* include flush request */
+ unsigned int has_fua:1; /* include fua request */
+ unsigned int has_null_flush:1; /* include null flush request */
+ unsigned int has_flush_payload:1; /* include flush payload */
/*
* io isn't sent yet, flush/fua request can only be submitted till it's
* the first IO in running_ios list
@@ -571,6 +572,8 @@ static void r5l_log_endio(struct bio *bio)
struct r5l_io_unit *io_deferred;
struct r5l_log *log = io->log;
unsigned long flags;
+ bool has_null_flush;
+ bool has_flush_payload;
if (bio->bi_status)
md_error(log->rdev->mddev, log->rdev);
@@ -580,6 +583,16 @@ static void r5l_log_endio(struct bio *bio)
spin_lock_irqsave(&log->io_list_lock, flags);
__r5l_set_io_unit_state(io, IO_UNIT_IO_END);
+
+ /*
+ * if the io doesn't not have null_flush or flush payload,
+ * it is not safe to access it after releasing io_list_lock.
+ * Therefore, it is necessary to check the condition with
+ * the lock held.
+ */
+ has_null_flush = io->has_null_flush;
+ has_flush_payload = io->has_flush_payload;
+
if (log->need_cache_flush && !list_empty(&io->stripe_list))
r5l_move_to_end_ios(log);
else
@@ -600,19 +613,23 @@ static void r5l_log_endio(struct bio *bio)
if (log->need_cache_flush)
md_wakeup_thread(log->rdev->mddev->thread);
- if (io->has_null_flush) {
+ /* finish flush only io_unit and PAYLOAD_FLUSH only io_unit */
+ if (has_null_flush) {
struct bio *bi;
WARN_ON(bio_list_empty(&io->flush_barriers));
while ((bi = bio_list_pop(&io->flush_barriers)) != NULL) {
bio_endio(bi);
- atomic_dec(&io->pending_stripe);
+ if (atomic_dec_and_test(&io->pending_stripe)) {
+ __r5l_stripe_write_finished(io);
+ return;
+ }
}
}
-
- /* finish flush only io_unit and PAYLOAD_FLUSH only io_unit */
- if (atomic_read(&io->pending_stripe) == 0)
- __r5l_stripe_write_finished(io);
+ /* decrease pending_stripe for flush payload */
+ if (has_flush_payload)
+ if (atomic_dec_and_test(&io->pending_stripe))
+ __r5l_stripe_write_finished(io);
}
static void r5l_do_submit_io(struct r5l_log *log, struct r5l_io_unit *io)
@@ -881,6 +898,11 @@ static void r5l_append_flush_payload(struct r5l_log *log, sector_t sect)
payload->size = cpu_to_le32(sizeof(__le64));
payload->flush_stripes[0] = cpu_to_le64(sect);
io->meta_offset += meta_size;
+ /* multiple flush payloads count as one pending_stripe */
+ if (!io->has_flush_payload) {
+ io->has_flush_payload = 1;
+ atomic_inc(&io->pending_stripe);
+ }
mutex_unlock(&log->io_mutex);
}
@@ -2540,23 +2562,32 @@ static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page)
*/
int r5c_journal_mode_set(struct mddev *mddev, int mode)
{
- struct r5conf *conf = mddev->private;
- struct r5l_log *log = conf->log;
-
- if (!log)
- return -ENODEV;
+ struct r5conf *conf;
+ int err;
if (mode < R5C_JOURNAL_MODE_WRITE_THROUGH ||
mode > R5C_JOURNAL_MODE_WRITE_BACK)
return -EINVAL;
+ err = mddev_lock(mddev);
+ if (err)
+ return err;
+ conf = mddev->private;
+ if (!conf || !conf->log) {
+ mddev_unlock(mddev);
+ return -ENODEV;
+ }
+
if (raid5_calc_degraded(conf) > 0 &&
- mode == R5C_JOURNAL_MODE_WRITE_BACK)
+ mode == R5C_JOURNAL_MODE_WRITE_BACK) {
+ mddev_unlock(mddev);
return -EINVAL;
+ }
mddev_suspend(mddev);
conf->log->r5c_journal_mode = mode;
mddev_resume(mddev);
+ mddev_unlock(mddev);
pr_debug("md/raid:%s: setting r5c cache mode to %d: %s\n",
mdname(mddev), mode, r5c_journal_mode_str[mode]);
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 8621a198a2ce..bac33311f55a 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -216,6 +216,12 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, dev);
/*
+ * MEI requires to resume from runtime suspend mode
+ * in order to perform link reset flow upon system suspend.
+ */
+ pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
+
+ /*
* For not wake-able HW runtime pm framework
* can't be used on pci device level.
* Use domain runtime pm callbacks instead.
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
index f811cd524468..e38a5f144373 100644
--- a/drivers/misc/mei/pci-txe.c
+++ b/drivers/misc/mei/pci-txe.c
@@ -138,6 +138,12 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, dev);
/*
+ * MEI requires to resume from runtime suspend mode
+ * in order to perform link reset flow upon system suspend.
+ */
+ pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
+
+ /*
* For not wake-able HW runtime pm framework
* can't be used on pci device level.
* Use domain runtime pm callbacks instead.
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index e5938c791330..f1bbfd389367 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -2170,7 +2170,9 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md)
* from being accepted.
*/
card = md->queue.card;
+ spin_lock_irq(md->queue.queue->queue_lock);
queue_flag_set(QUEUE_FLAG_BYPASS, md->queue.queue);
+ spin_unlock_irq(md->queue.queue->queue_lock);
blk_set_queue_dying(md->queue.queue);
mmc_cleanup_queue(&md->queue);
if (md->disk->flags & GENHD_FL_UP) {
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 4ffea14b7eb6..2bae69e39544 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1289,7 +1289,7 @@ out_err:
static int mmc_select_hs400es(struct mmc_card *card)
{
struct mmc_host *host = card->host;
- int err = 0;
+ int err = -EINVAL;
u8 val;
if (!(host->caps & MMC_CAP_8_BIT_DATA)) {
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 04ff3c97a535..2ab4788d021f 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -2086,7 +2086,7 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
mmc->max_seg_size = mmc->max_req_size;
mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
- MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE;
+ MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE | MMC_CAP_CMD23;
mmc->caps |= mmc_pdata(host)->caps;
if (mmc->caps & MMC_CAP_8_BIT_DATA)
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index f336a9b85576..9ec8f033ac5f 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -113,6 +113,7 @@ static blk_status_t do_blktrans_request(struct mtd_blktrans_ops *tr,
for (; nsect > 0; nsect--, block++, buf += tr->blksize)
if (tr->writesect(dev, block, buf))
return BLK_STS_IOERR;
+ return BLK_STS_OK;
default:
return BLK_STS_IOERR;
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 9bee6c1c70cc..fc63992ab0e0 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1569,7 +1569,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
new_slave->delay = 0;
new_slave->link_failure_count = 0;
- if (bond_update_speed_duplex(new_slave))
+ if (bond_update_speed_duplex(new_slave) &&
+ bond_needs_speed_duplex(bond))
new_slave->link = BOND_LINK_DOWN;
new_slave->last_rx = jiffies -
@@ -2140,11 +2141,13 @@ static void bond_miimon_commit(struct bonding *bond)
continue;
case BOND_LINK_UP:
- if (bond_update_speed_duplex(slave)) {
+ if (bond_update_speed_duplex(slave) &&
+ bond_needs_speed_duplex(bond)) {
slave->link = BOND_LINK_DOWN;
- netdev_warn(bond->dev,
- "failed to get link speed/duplex for %s\n",
- slave->dev->name);
+ if (net_ratelimit())
+ netdev_warn(bond->dev,
+ "failed to get link speed/duplex for %s\n",
+ slave->dev->name);
continue;
}
bond_set_slave_link_state(slave, BOND_LINK_UP,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index daa37750d152..b9bff1d9801f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -531,6 +531,7 @@ enum { /* adapter flags */
USING_SOFT_PARAMS = (1 << 6),
MASTER_PF = (1 << 7),
FW_OFLD_CONN = (1 << 9),
+ ROOT_NO_RELAXED_ORDERING = (1 << 10),
};
enum {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index afa6fd688fac..77538cd8184a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -4610,11 +4610,6 @@ static void print_port_info(const struct net_device *dev)
dev->name, adap->params.vpd.id, adap->name, buf);
}
-static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
-{
- pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
-}
-
/*
* Free the following resources:
* - memory used for tables
@@ -4864,7 +4859,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
pci_enable_pcie_error_reporting(pdev);
- enable_pcie_relaxed_ordering(pdev);
pci_set_master(pdev);
pci_save_state(pdev);
@@ -4903,6 +4897,23 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->msg_enable = DFLT_MSG_ENABLE;
memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
+ /* If possible, we use PCIe Relaxed Ordering Attribute to deliver
+ * Ingress Packet Data to Free List Buffers in order to allow for
+ * chipset performance optimizations between the Root Complex and
+ * Memory Controllers. (Messages to the associated Ingress Queue
+ * notifying new Packet Placement in the Free Lists Buffers will be
+ * send without the Relaxed Ordering Attribute thus guaranteeing that
+ * all preceding PCIe Transaction Layer Packets will be processed
+ * first.) But some Root Complexes have various issues with Upstream
+ * Transaction Layer Packets with the Relaxed Ordering Attribute set.
+ * The PCIe devices which under the Root Complexes will be cleared the
+ * Relaxed Ordering bit in the configuration space, So we check our
+ * PCIe configuration space to see if it's flagged with advice against
+ * using Relaxed Ordering.
+ */
+ if (!pcie_relaxed_ordering_enabled(pdev))
+ adapter->flags |= ROOT_NO_RELAXED_ORDERING;
+
spin_lock_init(&adapter->stats_lock);
spin_lock_init(&adapter->tid_release_lock);
spin_lock_init(&adapter->win0_lock);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index ede12209f20b..4ef68f69b58c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2719,6 +2719,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
struct fw_iq_cmd c;
struct sge *s = &adap->sge;
struct port_info *pi = netdev_priv(dev);
+ int relaxed = !(adap->flags & ROOT_NO_RELAXED_ORDERING);
/* Size needs to be multiple of 16, including status entry. */
iq->size = roundup(iq->size, 16);
@@ -2772,8 +2773,8 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
c.iqns_to_fl0congen |= htonl(FW_IQ_CMD_FL0PACKEN_F |
- FW_IQ_CMD_FL0FETCHRO_F |
- FW_IQ_CMD_FL0DATARO_F |
+ FW_IQ_CMD_FL0FETCHRO_V(relaxed) |
+ FW_IQ_CMD_FL0DATARO_V(relaxed) |
FW_IQ_CMD_FL0PADEN_F);
if (cong >= 0)
c.iqns_to_fl0congen |=
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
index 109bc630408b..08c6ddb84a04 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
@@ -408,6 +408,7 @@ enum { /* adapter flags */
USING_MSI = (1UL << 1),
USING_MSIX = (1UL << 2),
QUEUES_BOUND = (1UL << 3),
+ ROOT_NO_RELAXED_ORDERING = (1UL << 4),
};
/*
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index ac7a150c54e9..2b85b874fd0d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -2888,6 +2888,24 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
*/
adapter->name = pci_name(pdev);
adapter->msg_enable = DFLT_MSG_ENABLE;
+
+ /* If possible, we use PCIe Relaxed Ordering Attribute to deliver
+ * Ingress Packet Data to Free List Buffers in order to allow for
+ * chipset performance optimizations between the Root Complex and
+ * Memory Controllers. (Messages to the associated Ingress Queue
+ * notifying new Packet Placement in the Free Lists Buffers will be
+ * send without the Relaxed Ordering Attribute thus guaranteeing that
+ * all preceding PCIe Transaction Layer Packets will be processed
+ * first.) But some Root Complexes have various issues with Upstream
+ * Transaction Layer Packets with the Relaxed Ordering Attribute set.
+ * The PCIe devices which under the Root Complexes will be cleared the
+ * Relaxed Ordering bit in the configuration space, So we check our
+ * PCIe configuration space to see if it's flagged with advice against
+ * using Relaxed Ordering.
+ */
+ if (!pcie_relaxed_ordering_enabled(pdev))
+ adapter->flags |= ROOT_NO_RELAXED_ORDERING;
+
err = adap_init0(adapter);
if (err)
goto err_unmap_bar;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index e37dde2ba97f..05498e7f2840 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -2205,6 +2205,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
struct port_info *pi = netdev_priv(dev);
struct fw_iq_cmd cmd, rpl;
int ret, iqandst, flsz = 0;
+ int relaxed = !(adapter->flags & ROOT_NO_RELAXED_ORDERING);
/*
* If we're using MSI interrupts and we're not initializing the
@@ -2300,6 +2301,8 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
cpu_to_be32(
FW_IQ_CMD_FL0HOSTFCMODE_V(SGE_HOSTFCMODE_NONE) |
FW_IQ_CMD_FL0PACKEN_F |
+ FW_IQ_CMD_FL0FETCHRO_V(relaxed) |
+ FW_IQ_CMD_FL0DATARO_V(relaxed) |
FW_IQ_CMD_FL0PADEN_F);
/* In T6, for egress queue type FL there is internal overhead
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
index a69d68ba3d0c..aa46b23cdfb1 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
@@ -148,14 +148,10 @@ nfp_flower_cmsg_portmod_rx(struct nfp_app *app, struct sk_buff *skb)
return;
}
- if (link) {
+ if (link)
netif_carrier_on(netdev);
- rtnl_lock();
- dev_set_mtu(netdev, be16_to_cpu(msg->mtu));
- rtnl_unlock();
- } else {
+ else
netif_carrier_off(netdev);
- }
rcu_read_unlock();
}
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index d3f96a8f743b..c7407d129c7d 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -1076,7 +1076,6 @@ enum efx_stats_action {
static int efx_mcdi_mac_stats(struct efx_nic *efx,
enum efx_stats_action action, int clear)
{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
int rc;
int change = action == EFX_STATS_PULL ? 0 : 1;
@@ -1098,7 +1097,12 @@ static int efx_mcdi_mac_stats(struct efx_nic *efx,
MAC_STATS_IN_PERIODIC_NOEVENT, 1,
MAC_STATS_IN_PERIOD_MS, period);
MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
- MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, nic_data->vport_id);
+
+ if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) {
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, nic_data->vport_id);
+ }
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
NULL, 0, NULL);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index db157a47000c..72ec711fcba2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -204,6 +204,7 @@ int stmmac_mdio_register(struct net_device *ndev)
struct stmmac_priv *priv = netdev_priv(ndev);
struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
struct device_node *mdio_node = priv->plat->mdio_node;
+ struct device *dev = ndev->dev.parent;
int addr, found;
if (!mdio_bus_data)
@@ -237,7 +238,7 @@ int stmmac_mdio_register(struct net_device *ndev)
else
err = mdiobus_register(new_bus);
if (err != 0) {
- netdev_err(ndev, "Cannot register the MDIO bus\n");
+ dev_err(dev, "Cannot register the MDIO bus\n");
goto bus_register_fail;
}
@@ -285,14 +286,12 @@ int stmmac_mdio_register(struct net_device *ndev)
irq_str = irq_num;
break;
}
- netdev_info(ndev, "PHY ID %08x at %d IRQ %s (%s)%s\n",
- phydev->phy_id, addr, irq_str, phydev_name(phydev),
- act ? " active" : "");
+ phy_attached_info(phydev);
found = 1;
}
if (!found && !mdio_node) {
- netdev_warn(ndev, "No PHY found\n");
+ dev_warn(dev, "No PHY found\n");
mdiobus_unregister(new_bus);
mdiobus_free(new_bus);
return -ENODEV;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index d21258d277ce..f1b60740e020 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -159,8 +159,10 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
brcmf_feat_firmware_capabilities(ifp);
memset(&gscan_cfg, 0, sizeof(gscan_cfg));
- brcmf_feat_iovar_data_set(ifp, BRCMF_FEAT_GSCAN, "pfn_gscan_cfg",
- &gscan_cfg, sizeof(gscan_cfg));
+ if (drvr->bus_if->chip != BRCM_CC_43430_CHIP_ID)
+ brcmf_feat_iovar_data_set(ifp, BRCMF_FEAT_GSCAN,
+ "pfn_gscan_cfg",
+ &gscan_cfg, sizeof(gscan_cfg));
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_PNO, "pfn");
if (drvr->bus_if->wowl_supported)
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_WOWL, "wowl");
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
index b4ecd1fe1374..97208ce19f92 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
@@ -154,7 +154,7 @@ static const struct iwl_tt_params iwl9000_tt_params = {
const struct iwl_cfg iwl9160_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9160",
.fw_name_pre = IWL9260A_FW_PRE,
- .fw_name_pre_next_step = IWL9260B_FW_PRE,
+ .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
IWL_DEVICE_9000,
.ht_params = &iwl9000_ht_params,
.nvm_ver = IWL9000_NVM_VERSION,
@@ -165,7 +165,7 @@ const struct iwl_cfg iwl9160_2ac_cfg = {
const struct iwl_cfg iwl9260_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9260",
.fw_name_pre = IWL9260A_FW_PRE,
- .fw_name_pre_next_step = IWL9260B_FW_PRE,
+ .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
IWL_DEVICE_9000,
.ht_params = &iwl9000_ht_params,
.nvm_ver = IWL9000_NVM_VERSION,
@@ -176,7 +176,7 @@ const struct iwl_cfg iwl9260_2ac_cfg = {
const struct iwl_cfg iwl9270_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9270",
.fw_name_pre = IWL9260A_FW_PRE,
- .fw_name_pre_next_step = IWL9260B_FW_PRE,
+ .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
IWL_DEVICE_9000,
.ht_params = &iwl9000_ht_params,
.nvm_ver = IWL9000_NVM_VERSION,
@@ -186,8 +186,8 @@ const struct iwl_cfg iwl9270_2ac_cfg = {
const struct iwl_cfg iwl9460_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9460",
- .fw_name_pre = IWL9000_FW_PRE,
- .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ .fw_name_pre = IWL9260A_FW_PRE,
+ .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
IWL_DEVICE_9000,
.ht_params = &iwl9000_ht_params,
.nvm_ver = IWL9000_NVM_VERSION,
@@ -198,8 +198,8 @@ const struct iwl_cfg iwl9460_2ac_cfg = {
const struct iwl_cfg iwl9560_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9560",
- .fw_name_pre = IWL9000_FW_PRE,
- .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ .fw_name_pre = IWL9260A_FW_PRE,
+ .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
IWL_DEVICE_9000,
.ht_params = &iwl9000_ht_params,
.nvm_ver = IWL9000_NVM_VERSION,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index 0fa8c473f1e2..c73a6438ce8f 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -328,6 +328,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* @IWL_UCODE_TLV_CAPA_TX_POWER_ACK: reduced TX power API has larger
* command size (command version 4) that supports toggling ACK TX
* power reduction.
+ * @IWL_UCODE_TLV_CAPA_MLME_OFFLOAD: supports MLME offload
*
* @NUM_IWL_UCODE_TLV_CAPA: number of bits used
*/
@@ -373,6 +374,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG = (__force iwl_ucode_tlv_capa_t)80,
IWL_UCODE_TLV_CAPA_LQM_SUPPORT = (__force iwl_ucode_tlv_capa_t)81,
IWL_UCODE_TLV_CAPA_TX_POWER_ACK = (__force iwl_ucode_tlv_capa_t)84,
+ IWL_UCODE_TLV_CAPA_MLME_OFFLOAD = (__force iwl_ucode_tlv_capa_t)96,
NUM_IWL_UCODE_TLV_CAPA
#ifdef __CHECKER__
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index c52623cb7c2a..d19c74827fbb 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -276,10 +276,10 @@ struct iwl_pwr_tx_backoff {
* @fw_name_pre: Firmware filename prefix. The api version and extension
* (.ucode) will be added to filename before loading from disk. The
* filename is constructed as fw_name_pre<api>.ucode.
- * @fw_name_pre_next_step: same as @fw_name_pre, only for next step
+ * @fw_name_pre_b_or_c_step: same as @fw_name_pre, only for b or c steps
* (if supported)
- * @fw_name_pre_rf_next_step: same as @fw_name_pre_next_step, only for rf next
- * step. Supported only in integrated solutions.
+ * @fw_name_pre_rf_next_step: same as @fw_name_pre_b_or_c_step, only for rf
+ * next step. Supported only in integrated solutions.
* @ucode_api_max: Highest version of uCode API supported by driver.
* @ucode_api_min: Lowest version of uCode API supported by driver.
* @max_inst_size: The maximal length of the fw inst section
@@ -330,7 +330,7 @@ struct iwl_cfg {
/* params specific to an individual device within a device family */
const char *name;
const char *fw_name_pre;
- const char *fw_name_pre_next_step;
+ const char *fw_name_pre_b_or_c_step;
const char *fw_name_pre_rf_next_step;
/* params not likely to change within a device family */
const struct iwl_base_params *base_params;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 6fdb5921e17f..4e0f86fe0a6f 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -216,8 +216,9 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
const char *fw_pre_name;
if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_9000 &&
- CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_B_STEP)
- fw_pre_name = cfg->fw_name_pre_next_step;
+ (CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_B_STEP ||
+ CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_C_STEP))
+ fw_pre_name = cfg->fw_name_pre_b_or_c_step;
else if (drv->trans->cfg->integrated &&
CSR_HW_RFID_STEP(drv->trans->hw_rf_id) == SILICON_B_STEP &&
cfg->fw_name_pre_rf_next_step)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index 5c08f4d40f6a..3ee6767392b6 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -785,7 +785,8 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
int num_of_ch, __le32 *channels, u16 fw_mcc)
{
int ch_idx;
- u16 ch_flags, prev_ch_flags = 0;
+ u16 ch_flags;
+ u32 reg_rule_flags, prev_reg_rule_flags = 0;
const u8 *nvm_chan = cfg->ext_nvm ?
iwl_ext_nvm_channels : iwl_nvm_channels;
struct ieee80211_regdomain *regd;
@@ -834,8 +835,11 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
continue;
}
+ reg_rule_flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx,
+ ch_flags, cfg);
+
/* we can't continue the same rule */
- if (ch_idx == 0 || prev_ch_flags != ch_flags ||
+ if (ch_idx == 0 || prev_reg_rule_flags != reg_rule_flags ||
center_freq - prev_center_freq > 20) {
valid_rules++;
new_rule = true;
@@ -854,18 +858,17 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
rule->power_rule.max_eirp =
DBM_TO_MBM(IWL_DEFAULT_MAX_TX_POWER);
- rule->flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx,
- ch_flags, cfg);
+ rule->flags = reg_rule_flags;
/* rely on auto-calculation to merge BW of contiguous chans */
rule->flags |= NL80211_RRF_AUTO_BW;
rule->freq_range.max_bandwidth_khz = 0;
- prev_ch_flags = ch_flags;
prev_center_freq = center_freq;
+ prev_reg_rule_flags = reg_rule_flags;
IWL_DEBUG_DEV(dev, IWL_DL_LAR,
- "Ch. %d [%sGHz] %s%s%s%s%s%s%s%s%s(0x%02x): Ad-Hoc %ssupported\n",
+ "Ch. %d [%sGHz] %s%s%s%s%s%s%s%s%s(0x%02x) reg_flags 0x%x: %s\n",
center_freq,
band == NL80211_BAND_5GHZ ? "5.2" : "2.4",
CHECK_AND_PRINT_I(VALID),
@@ -877,10 +880,10 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
CHECK_AND_PRINT_I(160MHZ),
CHECK_AND_PRINT_I(INDOOR_ONLY),
CHECK_AND_PRINT_I(GO_CONCURRENT),
- ch_flags,
+ ch_flags, reg_rule_flags,
((ch_flags & NVM_CHANNEL_ACTIVE) &&
!(ch_flags & NVM_CHANNEL_RADAR))
- ? "" : "not ");
+ ? "Ad-Hoc" : "");
}
regd->n_reg_rules = valid_rules;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 875cf3a60adb..4b2a454cf7b6 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -841,8 +841,10 @@ static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm)
entry = &wifi_pkg->package.elements[idx++];
if ((entry->type != ACPI_TYPE_INTEGER) ||
- (entry->integer.value > U8_MAX))
- return -EINVAL;
+ (entry->integer.value > U8_MAX)) {
+ ret = -EINVAL;
+ goto out_free;
+ }
mvm->geo_profiles[i].values[j] = entry->integer.value;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 2d1404c9fbf4..66f534aab240 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -2554,8 +2554,18 @@ static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm,
spin_lock_bh(&mvm_sta->lock);
for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
tid_data = &mvm_sta->tid_data[i];
- while ((skb = __skb_dequeue(&tid_data->deferred_tx_frames)))
+
+ while ((skb = __skb_dequeue(&tid_data->deferred_tx_frames))) {
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ /*
+ * The first deferred frame should've stopped the MAC
+ * queues, so we should never get a second deferred
+ * frame for the RA/TID.
+ */
+ iwl_mvm_start_mac_queues(mvm, info->hw_queue);
ieee80211_free_txskb(mvm->hw, skb);
+ }
}
spin_unlock_bh(&mvm_sta->lock);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 65beca3a457a..8999a1199d60 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -1291,7 +1291,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
* first index into rate scale table.
*/
if (info->flags & IEEE80211_TX_STAT_AMPDU) {
- rs_collect_tpc_data(mvm, lq_sta, curr_tbl, lq_rate.index,
+ rs_collect_tpc_data(mvm, lq_sta, curr_tbl, tx_resp_rate.index,
info->status.ampdu_len,
info->status.ampdu_ack_len,
reduced_txp);
@@ -1312,7 +1312,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
if (info->status.ampdu_ack_len == 0)
info->status.ampdu_len = 1;
- rs_collect_tlc_data(mvm, lq_sta, curr_tbl, lq_rate.index,
+ rs_collect_tlc_data(mvm, lq_sta, curr_tbl, tx_resp_rate.index,
info->status.ampdu_len,
info->status.ampdu_ack_len);
@@ -1348,11 +1348,11 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
continue;
rs_collect_tpc_data(mvm, lq_sta, tmp_tbl,
- lq_rate.index, 1,
+ tx_resp_rate.index, 1,
i < retries ? 0 : legacy_success,
reduced_txp);
rs_collect_tlc_data(mvm, lq_sta, tmp_tbl,
- lq_rate.index, 1,
+ tx_resp_rate.index, 1,
i < retries ? 0 : legacy_success);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 6b8e57b7234a..67ffd9774712 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -635,9 +635,9 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
baid_data = rcu_dereference(mvm->baid_map[baid]);
if (!baid_data) {
- WARN(!(reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN),
- "Received baid %d, but no data exists for this BAID\n",
- baid);
+ IWL_DEBUG_RX(mvm,
+ "Got valid BAID but no baid allocated, bypass the re-ordering buffer. Baid %d reorder 0x%x\n",
+ baid, reorder);
return false;
}
@@ -758,7 +758,9 @@ static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm,
data = rcu_dereference(mvm->baid_map[baid]);
if (!data) {
- WARN_ON(!(reorder_data & IWL_RX_MPDU_REORDER_BA_OLD_SN));
+ IWL_DEBUG_RX(mvm,
+ "Got valid BAID but no baid allocated, bypass the re-ordering buffer. Baid %d reorder 0x%x\n",
+ baid, reorder_data);
goto out;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 922cd5379841..b14830be0c22 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -121,7 +121,8 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
.add_modify = update ? 1 : 0,
.station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
- STA_FLG_MIMO_EN_MSK),
+ STA_FLG_MIMO_EN_MSK |
+ STA_FLG_RTS_MIMO_PROT),
.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
};
int ret;
@@ -290,8 +291,8 @@ static void iwl_mvm_rx_agg_session_expired(unsigned long data)
goto unlock;
mvm_sta = iwl_mvm_sta_from_mac80211(sta);
- ieee80211_stop_rx_ba_session_offl(mvm_sta->vif,
- sta->addr, ba_data->tid);
+ ieee80211_rx_ba_timer_expired(mvm_sta->vif,
+ sta->addr, ba_data->tid);
unlock:
rcu_read_unlock();
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 6d7d1a66af81..321e47874ceb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -184,8 +184,14 @@ static u16 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
else
udp_hdr(skb)->check = 0;
- /* mac header len should include IV, size is in words */
- if (info->control.hw_key)
+ /*
+ * mac header len should include IV, size is in words unless
+ * the IV is added by the firmware like in WEP.
+ * In new Tx API, the IV is always added by the firmware.
+ */
+ if (!iwl_mvm_has_new_tx_api(mvm) && info->control.hw_key &&
+ info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP40 &&
+ info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP104)
mh_len += info->control.hw_key->iv_len;
mh_len /= 2;
offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE;
@@ -1730,6 +1736,8 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
struct iwl_mvm_tid_data *tid_data;
struct iwl_mvm_sta *mvmsta;
+ ba_info.flags = IEEE80211_TX_STAT_AMPDU;
+
if (iwl_mvm_has_new_tx_api(mvm)) {
struct iwl_mvm_compressed_ba_notif *ba_res =
(void *)pkt->data;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 87712aeac31f..2126b9adbb08 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -510,9 +510,17 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
/* 9000 Series */
{IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0000, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0210, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0214, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9460_2ac_cfg)},
@@ -527,10 +535,22 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0060, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0260, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0064, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x00A4, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x02A4, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x00A0, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x02A0, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0030, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0230, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0234, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0238, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x023C, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg)},
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index c49f1f8b2e57..37046ac2c441 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -336,7 +336,7 @@ static int nvme_get_stream_params(struct nvme_ctrl *ctrl,
c.directive.opcode = nvme_admin_directive_recv;
c.directive.nsid = cpu_to_le32(nsid);
- c.directive.numd = cpu_to_le32(sizeof(*s));
+ c.directive.numd = cpu_to_le32((sizeof(*s) >> 2) - 1);
c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM;
c.directive.dtype = NVME_DIR_STREAMS;
@@ -1509,7 +1509,7 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
blk_queue_write_cache(q, vwc, vwc);
}
-static void nvme_configure_apst(struct nvme_ctrl *ctrl)
+static int nvme_configure_apst(struct nvme_ctrl *ctrl)
{
/*
* APST (Autonomous Power State Transition) lets us program a
@@ -1538,16 +1538,16 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
* then don't do anything.
*/
if (!ctrl->apsta)
- return;
+ return 0;
if (ctrl->npss > 31) {
dev_warn(ctrl->device, "NPSS is invalid; not using APST\n");
- return;
+ return 0;
}
table = kzalloc(sizeof(*table), GFP_KERNEL);
if (!table)
- return;
+ return 0;
if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) {
/* Turn off APST. */
@@ -1629,6 +1629,7 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret);
kfree(table);
+ return ret;
}
static void nvme_set_latency_tolerance(struct device *dev, s32 val)
@@ -1835,13 +1836,16 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
* In fabrics we need to verify the cntlid matches the
* admin connect
*/
- if (ctrl->cntlid != le16_to_cpu(id->cntlid))
+ if (ctrl->cntlid != le16_to_cpu(id->cntlid)) {
ret = -EINVAL;
+ goto out_free;
+ }
if (!ctrl->opts->discovery_nqn && !ctrl->kas) {
dev_err(ctrl->device,
"keep-alive support is mandatory for fabrics\n");
ret = -EINVAL;
+ goto out_free;
}
} else {
ctrl->cntlid = le16_to_cpu(id->cntlid);
@@ -1856,11 +1860,20 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
else if (!ctrl->apst_enabled && prev_apst_enabled)
dev_pm_qos_hide_latency_tolerance(ctrl->device);
- nvme_configure_apst(ctrl);
- nvme_configure_directives(ctrl);
+ ret = nvme_configure_apst(ctrl);
+ if (ret < 0)
+ return ret;
+
+ ret = nvme_configure_directives(ctrl);
+ if (ret < 0)
+ return ret;
ctrl->identified = true;
+ return 0;
+
+out_free:
+ kfree(id);
return ret;
}
EXPORT_SYMBOL_GPL(nvme_init_identify);
@@ -2004,9 +2017,11 @@ static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
if (memchr_inv(ns->eui, 0, sizeof(ns->eui)))
return sprintf(buf, "eui.%8phN\n", ns->eui);
- while (ctrl->serial[serial_len - 1] == ' ')
+ while (serial_len > 0 && (ctrl->serial[serial_len - 1] == ' ' ||
+ ctrl->serial[serial_len - 1] == '\0'))
serial_len--;
- while (ctrl->model[model_len - 1] == ' ')
+ while (model_len > 0 && (ctrl->model[model_len - 1] == ' ' ||
+ ctrl->model[model_len - 1] == '\0'))
model_len--;
return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", ctrl->vid,
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index cd888a47d0fc..74a124a06264 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1558,11 +1558,9 @@ static inline void nvme_release_cmb(struct nvme_dev *dev)
if (dev->cmb) {
iounmap(dev->cmb);
dev->cmb = NULL;
- if (dev->cmbsz) {
- sysfs_remove_file_from_group(&dev->ctrl.device->kobj,
- &dev_attr_cmb.attr, NULL);
- dev->cmbsz = 0;
- }
+ sysfs_remove_file_from_group(&dev->ctrl.device->kobj,
+ &dev_attr_cmb.attr, NULL);
+ dev->cmbsz = 0;
}
}
@@ -1953,16 +1951,14 @@ static int nvme_pci_enable(struct nvme_dev *dev)
/*
* CMBs can currently only exist on >=1.2 PCIe devices. We only
- * populate sysfs if a CMB is implemented. Note that we add the
- * CMB attribute to the nvme_ctrl kobj which removes the need to remove
- * it on exit. Since nvme_dev_attrs_group has no name we can pass
- * NULL as final argument to sysfs_add_file_to_group.
+ * populate sysfs if a CMB is implemented. Since nvme_dev_attrs_group
+ * has no name we can pass NULL as final argument to
+ * sysfs_add_file_to_group.
*/
if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2, 0)) {
dev->cmb = nvme_map_cmb(dev);
-
- if (dev->cmbsz) {
+ if (dev->cmb) {
if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
&dev_attr_cmb.attr, NULL))
dev_warn(dev->ctrl.device,
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 31ca55dfcb1d..1b7f2520a20d 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -114,6 +114,11 @@ struct nvmet_fc_tgtport {
struct kref ref;
};
+struct nvmet_fc_defer_fcp_req {
+ struct list_head req_list;
+ struct nvmefc_tgt_fcp_req *fcp_req;
+};
+
struct nvmet_fc_tgt_queue {
bool ninetypercent;
u16 qid;
@@ -132,6 +137,8 @@ struct nvmet_fc_tgt_queue {
struct nvmet_fc_tgt_assoc *assoc;
struct nvmet_fc_fcp_iod *fod; /* array of fcp_iods */
struct list_head fod_list;
+ struct list_head pending_cmd_list;
+ struct list_head avail_defer_list;
struct workqueue_struct *work_q;
struct kref ref;
} __aligned(sizeof(unsigned long long));
@@ -223,6 +230,8 @@ static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
+static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_fcp_iod *fod);
/* *********************** FC-NVME DMA Handling **************************** */
@@ -463,9 +472,9 @@ static struct nvmet_fc_fcp_iod *
nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
{
static struct nvmet_fc_fcp_iod *fod;
- unsigned long flags;
- spin_lock_irqsave(&queue->qlock, flags);
+ lockdep_assert_held(&queue->qlock);
+
fod = list_first_entry_or_null(&queue->fod_list,
struct nvmet_fc_fcp_iod, fcp_list);
if (fod) {
@@ -477,17 +486,37 @@ nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
* will "inherit" that reference.
*/
}
- spin_unlock_irqrestore(&queue->qlock, flags);
return fod;
}
static void
+nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_tgt_queue *queue,
+ struct nvmefc_tgt_fcp_req *fcpreq)
+{
+ struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
+
+ /*
+ * put all admin cmds on hw queue id 0. All io commands go to
+ * the respective hw queue based on a modulo basis
+ */
+ fcpreq->hwqid = queue->qid ?
+ ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
+
+ if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR)
+ queue_work_on(queue->cpu, queue->work_q, &fod->work);
+ else
+ nvmet_fc_handle_fcp_rqst(tgtport, fod);
+}
+
+static void
nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
struct nvmet_fc_fcp_iod *fod)
{
struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
struct nvmet_fc_tgtport *tgtport = fod->tgtport;
+ struct nvmet_fc_defer_fcp_req *deferfcp;
unsigned long flags;
fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
@@ -495,21 +524,56 @@ nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
fcpreq->nvmet_fc_private = NULL;
- spin_lock_irqsave(&queue->qlock, flags);
- list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
fod->active = false;
fod->abort = false;
fod->aborted = false;
fod->writedataactive = false;
fod->fcpreq = NULL;
+
+ tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
+
+ spin_lock_irqsave(&queue->qlock, flags);
+ deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
+ struct nvmet_fc_defer_fcp_req, req_list);
+ if (!deferfcp) {
+ list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
+ spin_unlock_irqrestore(&queue->qlock, flags);
+
+ /* Release reference taken at queue lookup and fod allocation */
+ nvmet_fc_tgt_q_put(queue);
+ return;
+ }
+
+ /* Re-use the fod for the next pending cmd that was deferred */
+ list_del(&deferfcp->req_list);
+
+ fcpreq = deferfcp->fcp_req;
+
+ /* deferfcp can be reused for another IO at a later date */
+ list_add_tail(&deferfcp->req_list, &queue->avail_defer_list);
+
spin_unlock_irqrestore(&queue->qlock, flags);
+ /* Save NVME CMD IO in fod */
+ memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen);
+
+ /* Setup new fcpreq to be processed */
+ fcpreq->rspaddr = NULL;
+ fcpreq->rsplen = 0;
+ fcpreq->nvmet_fc_private = fod;
+ fod->fcpreq = fcpreq;
+ fod->active = true;
+
+ /* inform LLDD IO is now being processed */
+ tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq);
+
+ /* Submit deferred IO for processing */
+ nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
+
/*
- * release the reference taken at queue lookup and fod allocation
+ * Leave the queue lookup get reference taken when
+ * fod was originally allocated.
*/
- nvmet_fc_tgt_q_put(queue);
-
- tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
}
static int
@@ -569,6 +633,8 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
queue->port = assoc->tgtport->port;
queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid);
INIT_LIST_HEAD(&queue->fod_list);
+ INIT_LIST_HEAD(&queue->avail_defer_list);
+ INIT_LIST_HEAD(&queue->pending_cmd_list);
atomic_set(&queue->connected, 0);
atomic_set(&queue->sqtail, 0);
atomic_set(&queue->rsn, 1);
@@ -638,6 +704,7 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
{
struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
struct nvmet_fc_fcp_iod *fod = queue->fod;
+ struct nvmet_fc_defer_fcp_req *deferfcp;
unsigned long flags;
int i, writedataactive;
bool disconnect;
@@ -666,6 +733,35 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
}
}
}
+
+ /* Cleanup defer'ed IOs in queue */
+ list_for_each_entry(deferfcp, &queue->avail_defer_list, req_list) {
+ list_del(&deferfcp->req_list);
+ kfree(deferfcp);
+ }
+
+ for (;;) {
+ deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
+ struct nvmet_fc_defer_fcp_req, req_list);
+ if (!deferfcp)
+ break;
+
+ list_del(&deferfcp->req_list);
+ spin_unlock_irqrestore(&queue->qlock, flags);
+
+ tgtport->ops->defer_rcv(&tgtport->fc_target_port,
+ deferfcp->fcp_req);
+
+ tgtport->ops->fcp_abort(&tgtport->fc_target_port,
+ deferfcp->fcp_req);
+
+ tgtport->ops->fcp_req_release(&tgtport->fc_target_port,
+ deferfcp->fcp_req);
+
+ kfree(deferfcp);
+
+ spin_lock_irqsave(&queue->qlock, flags);
+ }
spin_unlock_irqrestore(&queue->qlock, flags);
flush_workqueue(queue->work_q);
@@ -2172,11 +2268,38 @@ nvmet_fc_handle_fcp_rqst_work(struct work_struct *work)
* Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc
* layer for processing.
*
- * The nvmet-fc layer will copy cmd payload to an internal structure for
- * processing. As such, upon completion of the routine, the LLDD may
- * immediately free/reuse the CMD IU buffer passed in the call.
+ * The nvmet_fc layer allocates a local job structure (struct
+ * nvmet_fc_fcp_iod) from the queue for the io and copies the
+ * CMD IU buffer to the job structure. As such, on a successful
+ * completion (returns 0), the LLDD may immediately free/reuse
+ * the CMD IU buffer passed in the call.
+ *
+ * However, in some circumstances, due to the packetized nature of FC
+ * and the api of the FC LLDD which may issue a hw command to send the
+ * response, but the LLDD may not get the hw completion for that command
+ * and upcall the nvmet_fc layer before a new command may be
+ * asynchronously received - its possible for a command to be received
+ * before the LLDD and nvmet_fc have recycled the job structure. It gives
+ * the appearance of more commands received than fits in the sq.
+ * To alleviate this scenario, a temporary queue is maintained in the
+ * transport for pending LLDD requests waiting for a queue job structure.
+ * In these "overrun" cases, a temporary queue element is allocated
+ * the LLDD request and CMD iu buffer information remembered, and the
+ * routine returns a -EOVERFLOW status. Subsequently, when a queue job
+ * structure is freed, it is immediately reallocated for anything on the
+ * pending request list. The LLDDs defer_rcv() callback is called,
+ * informing the LLDD that it may reuse the CMD IU buffer, and the io
+ * is then started normally with the transport.
*
- * If this routine returns error, the lldd should abort the exchange.
+ * The LLDD, when receiving an -EOVERFLOW completion status, is to treat
+ * the completion as successful but must not reuse the CMD IU buffer
+ * until the LLDD's defer_rcv() callback has been called for the
+ * corresponding struct nvmefc_tgt_fcp_req pointer.
+ *
+ * If there is any other condition in which an error occurs, the
+ * transport will return a non-zero status indicating the error.
+ * In all cases other than -EOVERFLOW, the transport has not accepted the
+ * request and the LLDD should abort the exchange.
*
* @target_port: pointer to the (registered) target port the FCP CMD IU
* was received on.
@@ -2194,6 +2317,8 @@ nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;
struct nvmet_fc_tgt_queue *queue;
struct nvmet_fc_fcp_iod *fod;
+ struct nvmet_fc_defer_fcp_req *deferfcp;
+ unsigned long flags;
/* validate iu, so the connection id can be used to find the queue */
if ((cmdiubuf_len != sizeof(*cmdiu)) ||
@@ -2214,29 +2339,60 @@ nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
* when the fod is freed.
*/
+ spin_lock_irqsave(&queue->qlock, flags);
+
fod = nvmet_fc_alloc_fcp_iod(queue);
- if (!fod) {
+ if (fod) {
+ spin_unlock_irqrestore(&queue->qlock, flags);
+
+ fcpreq->nvmet_fc_private = fod;
+ fod->fcpreq = fcpreq;
+
+ memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
+
+ nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
+
+ return 0;
+ }
+
+ if (!tgtport->ops->defer_rcv) {
+ spin_unlock_irqrestore(&queue->qlock, flags);
/* release the queue lookup reference */
nvmet_fc_tgt_q_put(queue);
return -ENOENT;
}
- fcpreq->nvmet_fc_private = fod;
- fod->fcpreq = fcpreq;
- /*
- * put all admin cmds on hw queue id 0. All io commands go to
- * the respective hw queue based on a modulo basis
- */
- fcpreq->hwqid = queue->qid ?
- ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
- memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
+ deferfcp = list_first_entry_or_null(&queue->avail_defer_list,
+ struct nvmet_fc_defer_fcp_req, req_list);
+ if (deferfcp) {
+ /* Just re-use one that was previously allocated */
+ list_del(&deferfcp->req_list);
+ } else {
+ spin_unlock_irqrestore(&queue->qlock, flags);
- if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR)
- queue_work_on(queue->cpu, queue->work_q, &fod->work);
- else
- nvmet_fc_handle_fcp_rqst(tgtport, fod);
+ /* Now we need to dynamically allocate one */
+ deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL);
+ if (!deferfcp) {
+ /* release the queue lookup reference */
+ nvmet_fc_tgt_q_put(queue);
+ return -ENOMEM;
+ }
+ spin_lock_irqsave(&queue->qlock, flags);
+ }
- return 0;
+ /* For now, use rspaddr / rsplen to save payload information */
+ fcpreq->rspaddr = cmdiubuf;
+ fcpreq->rsplen = cmdiubuf_len;
+ deferfcp->fcp_req = fcpreq;
+
+ /* defer processing till a fod becomes available */
+ list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list);
+
+ /* NOTE: the queue lookup reference is still valid */
+
+ spin_unlock_irqrestore(&queue->qlock, flags);
+
+ return -EOVERFLOW;
}
EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index af0cc3456dc1..da5570cf5c6a 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -522,10 +522,11 @@ struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev)
bridge = pci_upstream_bridge(bridge);
}
- if (pci_pcie_type(highest_pcie_bridge) != PCI_EXP_TYPE_ROOT_PORT)
- return NULL;
+ if (highest_pcie_bridge &&
+ pci_pcie_type(highest_pcie_bridge) == PCI_EXP_TYPE_ROOT_PORT)
+ return highest_pcie_bridge;
- return highest_pcie_bridge;
+ return NULL;
}
EXPORT_SYMBOL(pci_find_pcie_root_port);
@@ -4260,6 +4261,41 @@ int pci_reset_function(struct pci_dev *dev)
EXPORT_SYMBOL_GPL(pci_reset_function);
/**
+ * pci_reset_function_locked - quiesce and reset a PCI device function
+ * @dev: PCI device to reset
+ *
+ * Some devices allow an individual function to be reset without affecting
+ * other functions in the same device. The PCI device must be responsive
+ * to PCI config space in order to use this function.
+ *
+ * This function does not just reset the PCI portion of a device, but
+ * clears all the state associated with the device. This function differs
+ * from __pci_reset_function() in that it saves and restores device state
+ * over the reset. It also differs from pci_reset_function() in that it
+ * requires the PCI device lock to be held.
+ *
+ * Returns 0 if the device function was successfully reset or negative if the
+ * device doesn't support resetting a single function.
+ */
+int pci_reset_function_locked(struct pci_dev *dev)
+{
+ int rc;
+
+ rc = pci_probe_reset_function(dev);
+ if (rc)
+ return rc;
+
+ pci_dev_save_and_disable(dev);
+
+ rc = __pci_reset_function_locked(dev);
+
+ pci_dev_restore(dev);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(pci_reset_function_locked);
+
+/**
* pci_try_reset_function - quiesce and reset a PCI device function
* @dev: PCI device to reset
*
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index c31310db0404..e6a917b4acd3 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1762,6 +1762,48 @@ static void pci_configure_extended_tags(struct pci_dev *dev)
PCI_EXP_DEVCTL_EXT_TAG);
}
+/**
+ * pcie_relaxed_ordering_enabled - Probe for PCIe relaxed ordering enable
+ * @dev: PCI device to query
+ *
+ * Returns true if the device has enabled relaxed ordering attribute.
+ */
+bool pcie_relaxed_ordering_enabled(struct pci_dev *dev)
+{
+ u16 v;
+
+ pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &v);
+
+ return !!(v & PCI_EXP_DEVCTL_RELAX_EN);
+}
+EXPORT_SYMBOL(pcie_relaxed_ordering_enabled);
+
+static void pci_configure_relaxed_ordering(struct pci_dev *dev)
+{
+ struct pci_dev *root;
+
+ /* PCI_EXP_DEVICE_RELAX_EN is RsvdP in VFs */
+ if (dev->is_virtfn)
+ return;
+
+ if (!pcie_relaxed_ordering_enabled(dev))
+ return;
+
+ /*
+ * For now, we only deal with Relaxed Ordering issues with Root
+ * Ports. Peer-to-Peer DMA is another can of worms.
+ */
+ root = pci_find_pcie_root_port(dev);
+ if (!root)
+ return;
+
+ if (root->dev_flags & PCI_DEV_FLAGS_NO_RELAXED_ORDERING) {
+ pcie_capability_clear_word(dev, PCI_EXP_DEVCTL,
+ PCI_EXP_DEVCTL_RELAX_EN);
+ dev_info(&dev->dev, "Disable Relaxed Ordering because the Root Port didn't support it\n");
+ }
+}
+
static void pci_configure_device(struct pci_dev *dev)
{
struct hotplug_params hpp;
@@ -1769,6 +1811,7 @@ static void pci_configure_device(struct pci_dev *dev)
pci_configure_mps(dev);
pci_configure_extended_tags(dev);
+ pci_configure_relaxed_ordering(dev);
memset(&hpp, 0, sizeof(hpp));
ret = pci_get_hp_params(dev, &hpp);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 6967c6b4cf6b..140760403f36 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -4016,6 +4016,95 @@ DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6869, PCI_CLASS_NOT_DEFINED, 8,
quirk_tw686x_class);
/*
+ * Some devices have problems with Transaction Layer Packets with the Relaxed
+ * Ordering Attribute set. Such devices should mark themselves and other
+ * Device Drivers should check before sending TLPs with RO set.
+ */
+static void quirk_relaxedordering_disable(struct pci_dev *dev)
+{
+ dev->dev_flags |= PCI_DEV_FLAGS_NO_RELAXED_ORDERING;
+ dev_info(&dev->dev, "Disable Relaxed Ordering Attributes to avoid PCIe Completion erratum\n");
+}
+
+/*
+ * Intel Xeon processors based on Broadwell/Haswell microarchitecture Root
+ * Complex has a Flow Control Credit issue which can cause performance
+ * problems with Upstream Transaction Layer Packets with Relaxed Ordering set.
+ */
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f01, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f02, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f03, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f04, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f05, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f06, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f07, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f08, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f09, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0a, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0b, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0c, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0d, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0e, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f01, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f02, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f03, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f04, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f05, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f06, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f07, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f08, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f09, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0a, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0b, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0c, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0d, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0e, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+
+/*
+ * The AMD ARM A1100 (AKA "SEATTLE") SoC has a bug in its PCIe Root Complex
+ * where Upstream Transaction Layer Packets with the Relaxed Ordering
+ * Attribute clear are allowed to bypass earlier TLPs with Relaxed Ordering
+ * set. This is a violation of the PCIe 3.0 Transaction Ordering Rules
+ * outlined in Section 2.4.1 (PCI Express(r) Base Specification Revision 3.0
+ * November 10, 2010). As a result, on this platform we can't use Relaxed
+ * Ordering for Upstream TLPs.
+ */
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a00, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a01, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a02, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_relaxedordering_disable);
+
+/*
* Per PCIe r3.0, sec 2.2.9, "Completion headers must supply the same
* values for the Attribute as were supplied in the header of the
* corresponding Request, except as explicitly allowed when IDO is used."
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 4ed48ed38e79..7ee1a94c0b33 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -205,8 +205,10 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
atomic_read(&tgtp->xmt_ls_rsp_error));
len += snprintf(buf+len, PAGE_SIZE-len,
- "FCP: Rcv %08x Release %08x Drop %08x\n",
+ "FCP: Rcv %08x Defer %08x Release %08x "
+ "Drop %08x\n",
atomic_read(&tgtp->rcv_fcp_cmd_in),
+ atomic_read(&tgtp->rcv_fcp_cmd_defer),
atomic_read(&tgtp->xmt_fcp_release),
atomic_read(&tgtp->rcv_fcp_cmd_drop));
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 5cc8b0f7d885..744f3f395b64 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -782,8 +782,11 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
atomic_read(&tgtp->xmt_ls_rsp_error));
len += snprintf(buf + len, size - len,
- "FCP: Rcv %08x Drop %08x\n",
+ "FCP: Rcv %08x Defer %08x Release %08x "
+ "Drop %08x\n",
atomic_read(&tgtp->rcv_fcp_cmd_in),
+ atomic_read(&tgtp->rcv_fcp_cmd_defer),
+ atomic_read(&tgtp->xmt_fcp_release),
atomic_read(&tgtp->rcv_fcp_cmd_drop));
if (atomic_read(&tgtp->rcv_fcp_cmd_in) !=
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index fbeec344c6cc..bbbd0f84160d 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -841,12 +841,31 @@ lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
}
+static void
+lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
+ struct nvmefc_tgt_fcp_req *rsp)
+{
+ struct lpfc_nvmet_tgtport *tgtp;
+ struct lpfc_nvmet_rcv_ctx *ctxp =
+ container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
+ struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
+ struct lpfc_hba *phba = ctxp->phba;
+
+ lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
+ ctxp->oxid, ctxp->size, smp_processor_id());
+
+ tgtp = phba->targetport->private;
+ atomic_inc(&tgtp->rcv_fcp_cmd_defer);
+ lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
+}
+
static struct nvmet_fc_target_template lpfc_tgttemplate = {
.targetport_delete = lpfc_nvmet_targetport_delete,
.xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp,
.fcp_op = lpfc_nvmet_xmt_fcp_op,
.fcp_abort = lpfc_nvmet_xmt_fcp_abort,
.fcp_req_release = lpfc_nvmet_xmt_fcp_release,
+ .defer_rcv = lpfc_nvmet_defer_rcv,
.max_hw_queues = 1,
.max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
@@ -1504,6 +1523,17 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
return;
}
+ /* Processing of FCP command is deferred */
+ if (rc == -EOVERFLOW) {
+ lpfc_nvmeio_data(phba,
+ "NVMET RCV BUSY: xri x%x sz %d from %06x\n",
+ oxid, size, sid);
+ /* defer reposting rcv buffer till .defer_rcv callback */
+ ctxp->rqb_buffer = nvmebuf;
+ atomic_inc(&tgtp->rcv_fcp_cmd_out);
+ return;
+ }
+
atomic_inc(&tgtp->rcv_fcp_cmd_drop);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h
index e675ef17be08..48a76788b003 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.h
+++ b/drivers/scsi/lpfc/lpfc_nvmet.h
@@ -49,6 +49,7 @@ struct lpfc_nvmet_tgtport {
atomic_t rcv_fcp_cmd_in;
atomic_t rcv_fcp_cmd_out;
atomic_t rcv_fcp_cmd_drop;
+ atomic_t rcv_fcp_cmd_defer;
atomic_t xmt_fcp_release;
/* Stats counters - lpfc_nvmet_xmt_fcp_op */
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index b20da0d27ad7..3f82ea1b72dc 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -500,7 +500,6 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
{
struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
- unsigned long flags;
/*
* Ensure that the complete FCP WRITE payload has been received.
@@ -508,17 +507,6 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
*/
cmd->cmd_in_wq = 0;
- spin_lock_irqsave(&cmd->cmd_lock, flags);
- cmd->data_work = 1;
- if (cmd->aborted) {
- cmd->data_work_free = 1;
- spin_unlock_irqrestore(&cmd->cmd_lock, flags);
-
- tcm_qla2xxx_free_cmd(cmd);
- return;
- }
- spin_unlock_irqrestore(&cmd->cmd_lock, flags);
-
cmd->qpair->tgt_counters.qla_core_ret_ctio++;
if (!cmd->write_data_transferred) {
/*
@@ -765,31 +753,13 @@ static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd)
qlt_xmit_tm_rsp(mcmd);
}
-#define DATA_WORK_NOT_FREE(_cmd) (_cmd->data_work && !_cmd->data_work_free)
static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
{
struct qla_tgt_cmd *cmd = container_of(se_cmd,
struct qla_tgt_cmd, se_cmd);
- unsigned long flags;
if (qlt_abort_cmd(cmd))
return;
-
- spin_lock_irqsave(&cmd->cmd_lock, flags);
- if ((cmd->state == QLA_TGT_STATE_NEW)||
- ((cmd->state == QLA_TGT_STATE_DATA_IN) &&
- DATA_WORK_NOT_FREE(cmd))) {
- cmd->data_work_free = 1;
- spin_unlock_irqrestore(&cmd->cmd_lock, flags);
- /*
- * cmd has not reached fw, Use this trigger to free it.
- */
- tcm_qla2xxx_free_cmd(cmd);
- return;
- }
- spin_unlock_irqrestore(&cmd->cmd_lock, flags);
- return;
-
}
static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index ca11be21f64b..34ca7823255d 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -2396,6 +2396,7 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
continue;
}
+ set_current_state(TASK_RUNNING);
wp = async->buf_write_ptr;
n1 = min(n, async->prealloc_bufsz - wp);
n2 = n - n1;
@@ -2528,6 +2529,8 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
}
continue;
}
+
+ set_current_state(TASK_RUNNING);
rp = async->buf_read_ptr;
n1 = min(n, async->prealloc_bufsz - rp);
n2 = n - n1;
diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c
index a6a8393d6664..3e00df74b18c 100644
--- a/drivers/staging/iio/resolver/ad2s1210.c
+++ b/drivers/staging/iio/resolver/ad2s1210.c
@@ -472,7 +472,7 @@ static int ad2s1210_read_raw(struct iio_dev *indio_dev,
long m)
{
struct ad2s1210_state *st = iio_priv(indio_dev);
- bool negative;
+ u16 negative;
int ret = 0;
u16 pos;
s16 vel;
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
index e583dd8a418b..d4fa41be80f9 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
@@ -1510,11 +1510,13 @@ cxgbit_pass_open_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
if (!cnp) {
pr_info("%s stid %d lookup failure\n", __func__, stid);
- return;
+ goto rel_skb;
}
cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
cxgbit_put_cnp(cnp);
+rel_skb:
+ __kfree_skb(skb);
}
static void
@@ -1530,11 +1532,13 @@ cxgbit_close_listsrv_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
if (!cnp) {
pr_info("%s stid %d lookup failure\n", __func__, stid);
- return;
+ goto rel_skb;
}
cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
cxgbit_put_cnp(cnp);
+rel_skb:
+ __kfree_skb(skb);
}
static void
@@ -1819,12 +1823,16 @@ static void cxgbit_set_tcb_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
struct tid_info *t = lldi->tids;
csk = lookup_tid(t, tid);
- if (unlikely(!csk))
+ if (unlikely(!csk)) {
pr_err("can't find connection for tid %u.\n", tid);
- else
+ goto rel_skb;
+ } else {
cxgbit_wake_up(&csk->com.wr_wait, __func__, rpl->status);
+ }
cxgbit_put_csk(csk);
+rel_skb:
+ __kfree_skb(skb);
}
static void cxgbit_rx_data(struct cxgbit_device *cdev, struct sk_buff *skb)
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c
index dda13f1af38e..514986b57c2d 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_target.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c
@@ -827,7 +827,7 @@ cxgbit_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
static void
cxgbit_skb_copy_to_sg(struct sk_buff *skb, struct scatterlist *sg,
- unsigned int nents)
+ unsigned int nents, u32 skip)
{
struct skb_seq_state st;
const u8 *buf;
@@ -846,7 +846,7 @@ cxgbit_skb_copy_to_sg(struct sk_buff *skb, struct scatterlist *sg,
}
consumed += sg_pcopy_from_buffer(sg, nents, (void *)buf,
- buf_len, consumed);
+ buf_len, skip + consumed);
}
}
@@ -912,7 +912,7 @@ cxgbit_handle_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
struct scatterlist *sg = &cmd->se_cmd.t_data_sg[0];
u32 sg_nents = max(1UL, DIV_ROUND_UP(pdu_cb->dlen, PAGE_SIZE));
- cxgbit_skb_copy_to_sg(csk->skb, sg, sg_nents);
+ cxgbit_skb_copy_to_sg(csk->skb, sg, sg_nents, 0);
}
cmd->write_data_done += pdu_cb->dlen;
@@ -1069,11 +1069,13 @@ static int cxgbit_handle_iscsi_dataout(struct cxgbit_sock *csk)
cmd->se_cmd.data_length);
if (!(pdu_cb->flags & PDUCBF_RX_DATA_DDPD)) {
+ u32 skip = data_offset % PAGE_SIZE;
+
sg_off = data_offset / PAGE_SIZE;
sg_start = &cmd->se_cmd.t_data_sg[sg_off];
- sg_nents = max(1UL, DIV_ROUND_UP(data_len, PAGE_SIZE));
+ sg_nents = max(1UL, DIV_ROUND_UP(skip + data_len, PAGE_SIZE));
- cxgbit_skb_copy_to_sg(csk->skb, sg_start, sg_nents);
+ cxgbit_skb_copy_to_sg(csk->skb, sg_start, sg_nents, skip);
}
check_payload:
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 74e4975dd1b1..5001261f5d69 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -418,6 +418,7 @@ int iscsit_reset_np_thread(
return 0;
}
np->np_thread_state = ISCSI_NP_THREAD_RESET;
+ atomic_inc(&np->np_reset_count);
if (np->np_thread) {
spin_unlock_bh(&np->np_thread_lock);
@@ -2167,6 +2168,7 @@ iscsit_setup_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
cmd->data_direction = DMA_NONE;
+ kfree(cmd->text_in_ptr);
cmd->text_in_ptr = NULL;
return 0;
@@ -3487,9 +3489,9 @@ iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
return text_length;
if (completed) {
- hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ hdr->flags = ISCSI_FLAG_CMD_FINAL;
} else {
- hdr->flags |= ISCSI_FLAG_TEXT_CONTINUE;
+ hdr->flags = ISCSI_FLAG_TEXT_CONTINUE;
cmd->read_data_done += text_length;
if (cmd->targ_xfer_tag == 0xFFFFFFFF)
cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index e9bdc8b86e7d..dc13afbd4c88 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -1243,9 +1243,11 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
flush_signals(current);
spin_lock_bh(&np->np_thread_lock);
- if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
+ if (atomic_dec_if_positive(&np->np_reset_count) >= 0) {
np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
+ spin_unlock_bh(&np->np_thread_lock);
complete(&np->np_restart_comp);
+ return 1;
} else if (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN) {
spin_unlock_bh(&np->np_thread_lock);
goto exit;
@@ -1278,7 +1280,8 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
goto exit;
} else if (rc < 0) {
spin_lock_bh(&np->np_thread_lock);
- if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
+ if (atomic_dec_if_positive(&np->np_reset_count) >= 0) {
+ np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
spin_unlock_bh(&np->np_thread_lock);
complete(&np->np_restart_comp);
iscsit_put_transport(conn->conn_transport);
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 36913734c6bc..02e8a5d86658 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -364,7 +364,7 @@ void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
mutex_lock(&tpg->acl_node_mutex);
if (acl->dynamic_node_acl)
acl->dynamic_node_acl = 0;
- list_del(&acl->acl_list);
+ list_del_init(&acl->acl_list);
mutex_unlock(&tpg->acl_node_mutex);
target_shutdown_sessions(acl);
@@ -548,7 +548,7 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
* in transport_deregister_session().
*/
list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) {
- list_del(&nacl->acl_list);
+ list_del_init(&nacl->acl_list);
core_tpg_wait_for_nacl_pr_ref(nacl);
core_free_device_list_for_node(nacl, se_tpg);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 97fed9a298bd..836d552b0385 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -466,7 +466,7 @@ static void target_complete_nacl(struct kref *kref)
}
mutex_lock(&se_tpg->acl_node_mutex);
- list_del(&nacl->acl_list);
+ list_del_init(&nacl->acl_list);
mutex_unlock(&se_tpg->acl_node_mutex);
core_tpg_wait_for_nacl_pr_ref(nacl);
@@ -538,7 +538,7 @@ void transport_free_session(struct se_session *se_sess)
spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
if (se_nacl->dynamic_stop)
- list_del(&se_nacl->acl_list);
+ list_del_init(&se_nacl->acl_list);
}
mutex_unlock(&se_tpg->acl_node_mutex);
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 80ee130f8253..942d094269fb 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -563,8 +563,6 @@ static int scatter_data_area(struct tcmu_dev *udev,
block_remaining);
to_offset = get_block_offset_user(udev, dbi,
block_remaining);
- offset = DATA_BLOCK_SIZE - block_remaining;
- to += offset;
if (*iov_cnt != 0 &&
to_offset == iov_tail(*iov)) {
@@ -575,8 +573,10 @@ static int scatter_data_area(struct tcmu_dev *udev,
(*iov)->iov_len = copy_bytes;
}
if (copy_data) {
- memcpy(to, from + sg->length - sg_remaining,
- copy_bytes);
+ offset = DATA_BLOCK_SIZE - block_remaining;
+ memcpy(to + offset,
+ from + sg->length - sg_remaining,
+ copy_bytes);
tcmu_flush_dcache_range(to, copy_bytes);
}
sg_remaining -= copy_bytes;
@@ -637,9 +637,8 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
copy_bytes = min_t(size_t, sg_remaining,
block_remaining);
offset = DATA_BLOCK_SIZE - block_remaining;
- from += offset;
tcmu_flush_dcache_range(from, copy_bytes);
- memcpy(to + sg->length - sg_remaining, from,
+ memcpy(to + sg->length - sg_remaining, from + offset,
copy_bytes);
sg_remaining -= copy_bytes;
@@ -1433,6 +1432,8 @@ static int tcmu_update_uio_info(struct tcmu_dev *udev)
if (udev->dev_config[0])
snprintf(str + used, size - used, "/%s", udev->dev_config);
+ /* If the old string exists, free it */
+ kfree(info->name);
info->name = str;
return 0;
diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
index 308b6e17c88a..fe2f00ceafc5 100644
--- a/drivers/thunderbolt/eeprom.c
+++ b/drivers/thunderbolt/eeprom.c
@@ -333,6 +333,15 @@ static int tb_drom_parse_entry_port(struct tb_switch *sw,
int res;
enum tb_port_type type;
+ /*
+ * Some DROMs list more ports than the controller actually has
+ * so we skip those but allow the parser to continue.
+ */
+ if (header->index > sw->config.max_port_number) {
+ dev_info_once(&sw->dev, "ignoring unnecessary extra entries in DROM\n");
+ return 0;
+ }
+
port = &sw->ports[header->index];
port->disabled = header->port_disabled;
if (port->disabled)
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index b5def356af63..1aab3010fbfa 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -1043,13 +1043,24 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
if (up->dl_write)
uart->dl_write = up->dl_write;
- if (serial8250_isa_config != NULL)
- serial8250_isa_config(0, &uart->port,
- &uart->capabilities);
+ if (uart->port.type != PORT_8250_CIR) {
+ if (serial8250_isa_config != NULL)
+ serial8250_isa_config(0, &uart->port,
+ &uart->capabilities);
+
+ ret = uart_add_one_port(&serial8250_reg,
+ &uart->port);
+ if (ret == 0)
+ ret = uart->port.line;
+ } else {
+ dev_info(uart->port.dev,
+ "skipping CIR port at 0x%lx / 0x%llx, IRQ %d\n",
+ uart->port.iobase,
+ (unsigned long long)uart->port.mapbase,
+ uart->port.irq);
- ret = uart_add_one_port(&serial8250_reg, &uart->port);
- if (ret == 0)
- ret = uart->port.line;
+ ret = 0;
+ }
}
mutex_unlock(&serial_mutex);
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 8a857bb34fbb..1888d168a41c 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -142,15 +142,7 @@ static struct vendor_data vendor_sbsa = {
.fixed_options = true,
};
-/*
- * Erratum 44 for QDF2432v1 and QDF2400v1 SoCs describes the BUSY bit as
- * occasionally getting stuck as 1. To avoid the potential for a hang, check
- * TXFE == 0 instead of BUSY == 1. This may not be suitable for all UART
- * implementations, so only do so if an affected platform is detected in
- * parse_spcr().
- */
-static bool qdf2400_e44_present = false;
-
+#ifdef CONFIG_ACPI_SPCR_TABLE
static struct vendor_data vendor_qdt_qdf2400_e44 = {
.reg_offset = pl011_std_offsets,
.fr_busy = UART011_FR_TXFE,
@@ -165,6 +157,7 @@ static struct vendor_data vendor_qdt_qdf2400_e44 = {
.always_enabled = true,
.fixed_options = true,
};
+#endif
static u16 pl011_st_offsets[REG_ARRAY_SIZE] = {
[REG_DR] = UART01x_DR,
@@ -2375,12 +2368,14 @@ static int __init pl011_console_match(struct console *co, char *name, int idx,
resource_size_t addr;
int i;
- if (strcmp(name, "qdf2400_e44") == 0) {
- pr_info_once("UART: Working around QDF2400 SoC erratum 44");
- qdf2400_e44_present = true;
- } else if (strcmp(name, "pl011") != 0) {
+ /*
+ * Systems affected by the Qualcomm Technologies QDF2400 E44 erratum
+ * have a distinct console name, so make sure we check for that.
+ * The actual implementation of the erratum occurs in the probe
+ * function.
+ */
+ if ((strcmp(name, "qdf2400_e44") != 0) && (strcmp(name, "pl011") != 0))
return -ENODEV;
- }
if (uart_parse_earlycon(options, &iotype, &addr, &options))
return -ENODEV;
@@ -2734,11 +2729,17 @@ static int sbsa_uart_probe(struct platform_device *pdev)
}
uap->port.irq = ret;
- uap->reg_offset = vendor_sbsa.reg_offset;
- uap->vendor = qdf2400_e44_present ?
- &vendor_qdt_qdf2400_e44 : &vendor_sbsa;
+#ifdef CONFIG_ACPI_SPCR_TABLE
+ if (qdf2400_e44_present) {
+ dev_info(&pdev->dev, "working around QDF2400 SoC erratum 44\n");
+ uap->vendor = &vendor_qdt_qdf2400_e44;
+ } else
+#endif
+ uap->vendor = &vendor_sbsa;
+
+ uap->reg_offset = uap->vendor->reg_offset;
uap->fifosize = 32;
- uap->port.iotype = vendor_sbsa.access_32b ? UPIO_MEM32 : UPIO_MEM;
+ uap->port.iotype = uap->vendor->access_32b ? UPIO_MEM32 : UPIO_MEM;
uap->port.ops = &sbsa_uart_pops;
uap->fixed_baud = baudrate;
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index ab1bb3b538ac..7f277b092b5b 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1888,7 +1888,7 @@ void usb_hcd_flush_endpoint(struct usb_device *udev,
/* No more submits can occur */
spin_lock_irq(&hcd_urb_list_lock);
rescan:
- list_for_each_entry (urb, &ep->urb_list, urb_list) {
+ list_for_each_entry_reverse(urb, &ep->urb_list, urb_list) {
int is_in;
if (urb->unlinked)
@@ -2485,6 +2485,8 @@ void usb_hc_died (struct usb_hcd *hcd)
}
if (usb_hcd_is_primary_hcd(hcd) && hcd->shared_hcd) {
hcd = hcd->shared_hcd;
+ clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags);
+ set_bit(HCD_FLAG_DEAD, &hcd->flags);
if (hcd->rh_registered) {
clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 6e6797d145dd..822f8c50e423 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -4725,7 +4725,8 @@ hub_power_remaining(struct usb_hub *hub)
static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
u16 portchange)
{
- int status, i;
+ int status = -ENODEV;
+ int i;
unsigned unit_load;
struct usb_device *hdev = hub->hdev;
struct usb_hcd *hcd = bus_to_hcd(hdev->bus);
@@ -4929,9 +4930,10 @@ loop:
done:
hub_port_disable(hub, port1, 1);
- if (hcd->driver->relinquish_port && !hub->hdev->parent)
- hcd->driver->relinquish_port(hcd, port1);
-
+ if (hcd->driver->relinquish_port && !hub->hdev->parent) {
+ if (status != -ENOTCONN && status != -ENODEV)
+ hcd->driver->relinquish_port(hcd, port1);
+ }
}
/* Handle physical or logical connection change events.
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 3116edfcdc18..574da2b4529c 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -150,6 +150,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* appletouch */
{ USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */
+ { USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM },
+
/* Avision AV600U */
{ USB_DEVICE(0x0638, 0x0a13), .driver_info =
USB_QUIRK_STRING_FETCH_255 },
@@ -249,6 +252,7 @@ static const struct usb_device_id usb_amd_resume_quirk_list[] = {
{ USB_DEVICE(0x093a, 0x2500), .driver_info = USB_QUIRK_RESET_RESUME },
{ USB_DEVICE(0x093a, 0x2510), .driver_info = USB_QUIRK_RESET_RESUME },
{ USB_DEVICE(0x093a, 0x2521), .driver_info = USB_QUIRK_RESET_RESUME },
+ { USB_DEVICE(0x03f0, 0x2b4a), .driver_info = USB_QUIRK_RESET_RESUME },
/* Logitech Optical Mouse M90/M100 */
{ USB_DEVICE(0x046d, 0xc05a), .driver_info = USB_QUIRK_RESET_RESUME },
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 6b299c7b7656..f064f1549333 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -896,9 +896,40 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
if (!node) {
trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
+ /*
+ * USB Specification 2.0 Section 5.9.2 states that: "If
+ * there is only a single transaction in the microframe,
+ * only a DATA0 data packet PID is used. If there are
+ * two transactions per microframe, DATA1 is used for
+ * the first transaction data packet and DATA0 is used
+ * for the second transaction data packet. If there are
+ * three transactions per microframe, DATA2 is used for
+ * the first transaction data packet, DATA1 is used for
+ * the second, and DATA0 is used for the third."
+ *
+ * IOW, we should satisfy the following cases:
+ *
+ * 1) length <= maxpacket
+ * - DATA0
+ *
+ * 2) maxpacket < length <= (2 * maxpacket)
+ * - DATA1, DATA0
+ *
+ * 3) (2 * maxpacket) < length <= (3 * maxpacket)
+ * - DATA2, DATA1, DATA0
+ */
if (speed == USB_SPEED_HIGH) {
struct usb_ep *ep = &dep->endpoint;
- trb->size |= DWC3_TRB_SIZE_PCM1(ep->mult - 1);
+ unsigned int mult = ep->mult - 1;
+ unsigned int maxp = usb_endpoint_maxp(ep->desc);
+
+ if (length <= (2 * maxp))
+ mult--;
+
+ if (length <= maxp)
+ mult--;
+
+ trb->size |= DWC3_TRB_SIZE_PCM1(mult);
}
} else {
trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index 62dc9c7798e7..e1de8fe599a3 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -838,21 +838,32 @@ static struct renesas_usb3_request *usb3_get_request(struct renesas_usb3_ep
return usb3_req;
}
-static void usb3_request_done(struct renesas_usb3_ep *usb3_ep,
- struct renesas_usb3_request *usb3_req, int status)
+static void __usb3_request_done(struct renesas_usb3_ep *usb3_ep,
+ struct renesas_usb3_request *usb3_req,
+ int status)
{
struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
- unsigned long flags;
dev_dbg(usb3_to_dev(usb3), "giveback: ep%2d, %u, %u, %d\n",
usb3_ep->num, usb3_req->req.length, usb3_req->req.actual,
status);
usb3_req->req.status = status;
- spin_lock_irqsave(&usb3->lock, flags);
usb3_ep->started = false;
list_del_init(&usb3_req->queue);
- spin_unlock_irqrestore(&usb3->lock, flags);
+ spin_unlock(&usb3->lock);
usb_gadget_giveback_request(&usb3_ep->ep, &usb3_req->req);
+ spin_lock(&usb3->lock);
+}
+
+static void usb3_request_done(struct renesas_usb3_ep *usb3_ep,
+ struct renesas_usb3_request *usb3_req, int status)
+{
+ struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
+ unsigned long flags;
+
+ spin_lock_irqsave(&usb3->lock, flags);
+ __usb3_request_done(usb3_ep, usb3_req, status);
+ spin_unlock_irqrestore(&usb3->lock, flags);
}
static void usb3_irq_epc_pipe0_status_end(struct renesas_usb3 *usb3)
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index c8989c62a262..c8f38649f749 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -98,6 +98,7 @@ enum amd_chipset_gen {
AMD_CHIPSET_HUDSON2,
AMD_CHIPSET_BOLTON,
AMD_CHIPSET_YANGTZE,
+ AMD_CHIPSET_TAISHAN,
AMD_CHIPSET_UNKNOWN,
};
@@ -141,6 +142,11 @@ static int amd_chipset_sb_type_init(struct amd_chipset_info *pinfo)
pinfo->sb_type.gen = AMD_CHIPSET_SB700;
else if (rev >= 0x40 && rev <= 0x4f)
pinfo->sb_type.gen = AMD_CHIPSET_SB800;
+ }
+ pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
+ 0x145c, NULL);
+ if (pinfo->smbus_dev) {
+ pinfo->sb_type.gen = AMD_CHIPSET_TAISHAN;
} else {
pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL);
@@ -260,11 +266,12 @@ int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev)
{
/* Make sure amd chipset type has already been initialized */
usb_amd_find_chipset_info();
- if (amd_chipset.sb_type.gen != AMD_CHIPSET_YANGTZE)
- return 0;
-
- dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n");
- return 1;
+ if (amd_chipset.sb_type.gen == AMD_CHIPSET_YANGTZE ||
+ amd_chipset.sb_type.gen == AMD_CHIPSET_TAISHAN) {
+ dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n");
+ return 1;
+ }
+ return 0;
}
EXPORT_SYMBOL_GPL(usb_hcd_amd_remote_wakeup_quirk);
@@ -1150,3 +1157,23 @@ static void quirk_usb_early_handoff(struct pci_dev *pdev)
}
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_SERIAL_USB, 8, quirk_usb_early_handoff);
+
+bool usb_xhci_needs_pci_reset(struct pci_dev *pdev)
+{
+ /*
+ * Our dear uPD72020{1,2} friend only partially resets when
+ * asked to via the XHCI interface, and may end up doing DMA
+ * at the wrong addresses, as it keeps the top 32bit of some
+ * addresses from its previous programming under obscure
+ * circumstances.
+ * Give it a good wack at probe time. Unfortunately, this
+ * needs to happen before we've had a chance to discover any
+ * quirk, or the system will be in a rather bad state.
+ */
+ if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
+ (pdev->device == 0x0014 || pdev->device == 0x0015))
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(usb_xhci_needs_pci_reset);
diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h
index 655994480198..5582cbafecd4 100644
--- a/drivers/usb/host/pci-quirks.h
+++ b/drivers/usb/host/pci-quirks.h
@@ -15,6 +15,7 @@ void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev);
void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev);
void usb_disable_xhci_ports(struct pci_dev *xhci_pdev);
void sb800_prefetch(struct device *dev, int on);
+bool usb_xhci_needs_pci_reset(struct pci_dev *pdev);
#else
struct pci_dev;
static inline void usb_amd_quirk_pll_disable(void) {}
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 5b0fa553c8bc..8071c8fdd15e 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -284,6 +284,13 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
driver = (struct hc_driver *)id->driver_data;
+ /* For some HW implementation, a XHCI reset is just not enough... */
+ if (usb_xhci_needs_pci_reset(dev)) {
+ dev_info(&dev->dev, "Resetting\n");
+ if (pci_reset_function_locked(dev))
+ dev_warn(&dev->dev, "Reset failed");
+ }
+
/* Prevent runtime suspending between USB-2 and USB-3 initialization */
pm_runtime_get_noresume(&dev->dev);
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 76decb8011eb..3344ffd5bb13 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -139,6 +139,7 @@ static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
"Could not flush host TX%d fifo: csr: %04x\n",
ep->epnum, csr))
return;
+ mdelay(1);
}
}
diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c
index 8fb86a5f458e..3d0dd2f97415 100644
--- a/drivers/usb/phy/phy-msm-usb.c
+++ b/drivers/usb/phy/phy-msm-usb.c
@@ -197,6 +197,7 @@ struct msm_otg {
struct regulator *v3p3;
struct regulator *v1p8;
struct regulator *vddcx;
+ struct regulator_bulk_data supplies[3];
struct reset_control *phy_rst;
struct reset_control *link_rst;
@@ -1731,7 +1732,6 @@ static int msm_otg_reboot_notify(struct notifier_block *this,
static int msm_otg_probe(struct platform_device *pdev)
{
- struct regulator_bulk_data regs[3];
int ret = 0;
struct device_node *np = pdev->dev.of_node;
struct msm_otg_platform_data *pdata;
@@ -1817,17 +1817,18 @@ static int msm_otg_probe(struct platform_device *pdev)
return motg->irq;
}
- regs[0].supply = "vddcx";
- regs[1].supply = "v3p3";
- regs[2].supply = "v1p8";
+ motg->supplies[0].supply = "vddcx";
+ motg->supplies[1].supply = "v3p3";
+ motg->supplies[2].supply = "v1p8";
- ret = devm_regulator_bulk_get(motg->phy.dev, ARRAY_SIZE(regs), regs);
+ ret = devm_regulator_bulk_get(motg->phy.dev, ARRAY_SIZE(motg->supplies),
+ motg->supplies);
if (ret)
return ret;
- motg->vddcx = regs[0].consumer;
- motg->v3p3 = regs[1].consumer;
- motg->v1p8 = regs[2].consumer;
+ motg->vddcx = motg->supplies[0].consumer;
+ motg->v3p3 = motg->supplies[1].consumer;
+ motg->v1p8 = motg->supplies[2].consumer;
clk_set_rate(motg->clk, 60000000);
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index 93fba9033b00..2c8161bcf5b5 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -639,14 +639,11 @@ static int usbhsg_ep_disable(struct usb_ep *ep)
struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep);
struct usbhs_pipe *pipe;
unsigned long flags;
- int ret = 0;
spin_lock_irqsave(&uep->lock, flags);
pipe = usbhsg_uep_to_pipe(uep);
- if (!pipe) {
- ret = -EINVAL;
+ if (!pipe)
goto out;
- }
usbhsg_pipe_disable(uep);
usbhs_pipe_free(pipe);
diff --git a/drivers/usb/renesas_usbhs/rcar3.c b/drivers/usb/renesas_usbhs/rcar3.c
index d544b331c9f2..02b67abfc2a1 100644
--- a/drivers/usb/renesas_usbhs/rcar3.c
+++ b/drivers/usb/renesas_usbhs/rcar3.c
@@ -20,9 +20,13 @@
/* Low Power Status register (LPSTS) */
#define LPSTS_SUSPM 0x4000
-/* USB General control register 2 (UGCTRL2), bit[31:6] should be 0 */
+/*
+ * USB General control register 2 (UGCTRL2)
+ * Remarks: bit[31:11] and bit[9:6] should be 0
+ */
#define UGCTRL2_RESERVED_3 0x00000001 /* bit[3:0] should be B'0001 */
#define UGCTRL2_USB0SEL_OTG 0x00000030
+#define UGCTRL2_VBUSSEL 0x00000400
static void usbhs_write32(struct usbhs_priv *priv, u32 reg, u32 data)
{
@@ -34,7 +38,8 @@ static int usbhs_rcar3_power_ctrl(struct platform_device *pdev,
{
struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
- usbhs_write32(priv, UGCTRL2, UGCTRL2_RESERVED_3 | UGCTRL2_USB0SEL_OTG);
+ usbhs_write32(priv, UGCTRL2, UGCTRL2_RESERVED_3 | UGCTRL2_USB0SEL_OTG |
+ UGCTRL2_VBUSSEL);
if (enable) {
usbhs_bset(priv, LPSTS, LPSTS_SUSPM, LPSTS_SUSPM);
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index f64e914a8985..2d945c9f975c 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -142,6 +142,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
{ USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
{ USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */
+ { USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */
{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index ebe51f11105d..fe123153b1a5 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -2025,6 +2025,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) }, /* D-Link DWM-158 */
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index c9ebefd8f35f..a585b477415d 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -52,6 +52,8 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
{ USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID),
.driver_info = PL2303_QUIRK_ENDPOINT_HACK },
+ { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_UC485),
+ .driver_info = PL2303_QUIRK_ENDPOINT_HACK },
{ USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID2) },
{ USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) },
{ USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) },
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index 09d9be88209e..3b5a15d1dc0d 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -27,6 +27,7 @@
#define ATEN_VENDOR_ID 0x0557
#define ATEN_VENDOR_ID2 0x0547
#define ATEN_PRODUCT_ID 0x2008
+#define ATEN_PRODUCT_UC485 0x2021
#define ATEN_PRODUCT_ID2 0x2118
#define IODATA_VENDOR_ID 0x04bb
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index cbea9f329e71..cde115359793 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -124,9 +124,9 @@ UNUSUAL_DEV(0x0bc2, 0xab2a, 0x0000, 0x9999,
/* Reported-by: Benjamin Tissoires <benjamin.tissoires@redhat.com> */
UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999,
"Initio Corporation",
- "",
+ "INIC-3069",
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
- US_FL_NO_ATA_1X),
+ US_FL_NO_ATA_1X | US_FL_IGNORE_RESIDUE),
/* Reported-by: Tom Arild Naess <tanaess@gmail.com> */
UNUSUAL_DEV(0x152d, 0x0539, 0x0000, 0x9999,
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 06615934fed1..0dceb9fa3a06 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -315,6 +315,7 @@ static int usb_stor_control_thread(void * __us)
{
struct us_data *us = (struct us_data *)__us;
struct Scsi_Host *host = us_to_host(us);
+ struct scsi_cmnd *srb;
for (;;) {
usb_stor_dbg(us, "*** thread sleeping\n");
@@ -330,6 +331,7 @@ static int usb_stor_control_thread(void * __us)
scsi_lock(host);
/* When we are called with no command pending, we're done */
+ srb = us->srb;
if (us->srb == NULL) {
scsi_unlock(host);
mutex_unlock(&us->dev_mutex);
@@ -398,14 +400,11 @@ static int usb_stor_control_thread(void * __us)
/* lock access to the state */
scsi_lock(host);
- /* indicate that the command is done */
- if (us->srb->result != DID_ABORT << 16) {
- usb_stor_dbg(us, "scsi cmd done, result=0x%x\n",
- us->srb->result);
- us->srb->scsi_done(us->srb);
- } else {
+ /* was the command aborted? */
+ if (us->srb->result == DID_ABORT << 16) {
SkipForAbort:
usb_stor_dbg(us, "scsi command aborted\n");
+ srb = NULL; /* Don't call srb->scsi_done() */
}
/*
@@ -429,6 +428,13 @@ SkipForAbort:
/* unlock the device pointers */
mutex_unlock(&us->dev_mutex);
+
+ /* now that the locks are released, notify the SCSI core */
+ if (srb) {
+ usb_stor_dbg(us, "scsi cmd done, result=0x%x\n",
+ srb->result);
+ srb->scsi_done(srb);
+ }
} /* for (;;) */
/* Wait until we are told to stop */
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index ff01bed7112f..1e784adb89b1 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -17,6 +17,7 @@
#include <asm/efi.h>
static bool request_mem_succeeded = false;
+static bool nowc = false;
static struct fb_var_screeninfo efifb_defined = {
.activate = FB_ACTIVATE_NOW,
@@ -99,6 +100,8 @@ static int efifb_setup(char *options)
screen_info.lfb_height = simple_strtoul(this_opt+7, NULL, 0);
else if (!strncmp(this_opt, "width:", 6))
screen_info.lfb_width = simple_strtoul(this_opt+6, NULL, 0);
+ else if (!strcmp(this_opt, "nowc"))
+ nowc = true;
}
}
@@ -255,7 +258,10 @@ static int efifb_probe(struct platform_device *dev)
info->apertures->ranges[0].base = efifb_fix.smem_start;
info->apertures->ranges[0].size = size_remap;
- info->screen_base = ioremap_wc(efifb_fix.smem_start, efifb_fix.smem_len);
+ if (nowc)
+ info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len);
+ else
+ info->screen_base = ioremap_wc(efifb_fix.smem_start, efifb_fix.smem_len);
if (!info->screen_base) {
pr_err("efifb: abort, cannot ioremap video memory 0x%x @ 0x%lx\n",
efifb_fix.smem_len, efifb_fix.smem_start);
diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c
index c166e0725be5..ba82f97fb42b 100644
--- a/drivers/video/fbdev/imxfb.c
+++ b/drivers/video/fbdev/imxfb.c
@@ -1073,20 +1073,16 @@ static int imxfb_remove(struct platform_device *pdev)
imxfb_disable_controller(fbi);
unregister_framebuffer(info);
-
+ fb_dealloc_cmap(&info->cmap);
pdata = dev_get_platdata(&pdev->dev);
if (pdata && pdata->exit)
pdata->exit(fbi->pdev);
-
- fb_dealloc_cmap(&info->cmap);
- kfree(info->pseudo_palette);
- framebuffer_release(info);
-
dma_free_wc(&pdev->dev, fbi->map_size, info->screen_base,
fbi->map_dma);
-
iounmap(fbi->regs);
release_mem_region(res->start, resource_size(res));
+ kfree(info->pseudo_palette);
+ framebuffer_release(info);
return 0;
}
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/core.c b/drivers/video/fbdev/omap2/omapfb/dss/core.c
index eecf695c16f4..09e5bb013d28 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/core.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/core.c
@@ -193,7 +193,6 @@ static struct notifier_block omap_dss_pm_notif_block = {
static int __init omap_dss_probe(struct platform_device *pdev)
{
- struct omap_dss_board_info *pdata = pdev->dev.platform_data;
int r;
core.pdev = pdev;
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index bae1f5d36c26..2d43118077e4 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -574,7 +574,7 @@ static void shutdown_pirq(struct irq_data *data)
static void enable_pirq(struct irq_data *data)
{
- startup_pirq(data);
+ enable_dynirq(data);
}
static void disable_pirq(struct irq_data *data)
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index e46080214955..3e59590c7254 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -857,6 +857,8 @@ static int xenwatch_thread(void *unused)
struct list_head *ent;
struct xs_watch_event *event;
+ xenwatch_pid = current->pid;
+
for (;;) {
wait_event_interruptible(watch_events_waitq,
!list_empty(&watch_events));
@@ -925,7 +927,6 @@ int xs_init(void)
task = kthread_run(xenwatch_thread, NULL, "xenwatch");
if (IS_ERR(task))
return PTR_ERR(task);
- xenwatch_pid = task->pid;
/* shutdown watches for kexec boot */
xs_reset_watches();
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 3ee4fdc3da9e..ab60051be6e5 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -46,7 +46,7 @@ struct fuse_file *fuse_file_alloc(struct fuse_conn *fc)
{
struct fuse_file *ff;
- ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL);
+ ff = kzalloc(sizeof(struct fuse_file), GFP_KERNEL);
if (unlikely(!ff))
return NULL;
@@ -609,7 +609,7 @@ static void fuse_aio_complete_req(struct fuse_conn *fc, struct fuse_req *req)
struct fuse_io_priv *io = req->io;
ssize_t pos = -1;
- fuse_release_user_pages(req, !io->write);
+ fuse_release_user_pages(req, io->should_dirty);
if (io->write) {
if (req->misc.write.in.size != req->misc.write.out.size)
@@ -1316,7 +1316,6 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
loff_t *ppos, int flags)
{
int write = flags & FUSE_DIO_WRITE;
- bool should_dirty = !write && iter_is_iovec(iter);
int cuse = flags & FUSE_DIO_CUSE;
struct file *file = io->file;
struct inode *inode = file->f_mapping->host;
@@ -1346,6 +1345,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
inode_unlock(inode);
}
+ io->should_dirty = !write && iter_is_iovec(iter);
while (count) {
size_t nres;
fl_owner_t owner = current->files;
@@ -1360,7 +1360,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
nres = fuse_send_read(req, io, pos, nbytes, owner);
if (!io->async)
- fuse_release_user_pages(req, should_dirty);
+ fuse_release_user_pages(req, io->should_dirty);
if (req->out.h.error) {
err = req->out.h.error;
break;
@@ -1669,6 +1669,7 @@ err_nofile:
err_free:
fuse_request_free(req);
err:
+ mapping_set_error(page->mapping, error);
end_page_writeback(page);
return error;
}
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 1bd7ffdad593..bd4d2a3e1ec1 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -249,6 +249,7 @@ struct fuse_io_priv {
size_t size;
__u64 offset;
bool write;
+ bool should_dirty;
int err;
struct kiocb *iocb;
struct file *file;
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index 69d02cf8cf37..5f93cfacb3d1 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -121,6 +121,7 @@ config PNFS_FILE_LAYOUT
config PNFS_BLOCK
tristate
depends on NFS_V4_1 && BLK_DEV_DM
+ depends on 64BIT || LBDAF
default NFS_V4
config PNFS_FLEXFILE_LAYOUT
diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
index 6df7a0cf5660..f32c58bbe556 100644
--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
@@ -32,6 +32,7 @@ void nfs4_ff_layout_free_deviceid(struct nfs4_ff_layout_ds *mirror_ds)
{
nfs4_print_deviceid(&mirror_ds->id_node.deviceid);
nfs4_pnfs_ds_put(mirror_ds->ds);
+ kfree(mirror_ds->ds_versions);
kfree_rcu(mirror_ds, id_node.rcu);
}
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index ffd2e712595d..d90132642340 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2553,9 +2553,8 @@ static int nfs41_check_open_stateid(struct nfs4_state *state)
clear_bit(NFS_O_RDWR_STATE, &state->flags);
clear_bit(NFS_OPEN_STATE, &state->flags);
stateid->type = NFS4_INVALID_STATEID_TYPE;
- }
- if (status != NFS_OK)
return status;
+ }
if (nfs_open_stateid_recover_openmode(state))
return -NFS4ERR_OPENMODE;
return NFS_OK;
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
index 8a428498d6b2..509a61668d90 100644
--- a/fs/proc/meminfo.c
+++ b/fs/proc/meminfo.c
@@ -106,13 +106,13 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
global_node_page_state(NR_FILE_MAPPED));
show_val_kb(m, "Shmem: ", i.sharedram);
show_val_kb(m, "Slab: ",
- global_page_state(NR_SLAB_RECLAIMABLE) +
- global_page_state(NR_SLAB_UNRECLAIMABLE));
+ global_node_page_state(NR_SLAB_RECLAIMABLE) +
+ global_node_page_state(NR_SLAB_UNRECLAIMABLE));
show_val_kb(m, "SReclaimable: ",
- global_page_state(NR_SLAB_RECLAIMABLE));
+ global_node_page_state(NR_SLAB_RECLAIMABLE));
show_val_kb(m, "SUnreclaim: ",
- global_page_state(NR_SLAB_UNRECLAIMABLE));
+ global_node_page_state(NR_SLAB_UNRECLAIMABLE));
seq_printf(m, "KernelStack: %8lu kB\n",
global_page_state(NR_KERNEL_STACK_KB));
show_val_kb(m, "PageTables: ",
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index b836fd61ed87..fe8f3265e877 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -16,9 +16,10 @@
#include <linux/mmu_notifier.h>
#include <linux/page_idle.h>
#include <linux/shmem_fs.h>
+#include <linux/uaccess.h>
#include <asm/elf.h>
-#include <linux/uaccess.h>
+#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include "internal.h"
@@ -1008,6 +1009,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
struct mm_struct *mm;
struct vm_area_struct *vma;
enum clear_refs_types type;
+ struct mmu_gather tlb;
int itype;
int rv;
@@ -1054,6 +1056,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
}
down_read(&mm->mmap_sem);
+ tlb_gather_mmu(&tlb, mm, 0, -1);
if (type == CLEAR_REFS_SOFT_DIRTY) {
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (!(vma->vm_flags & VM_SOFTDIRTY))
@@ -1075,7 +1078,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
walk_page_range(0, mm->highest_vm_end, &clear_refs_walk);
if (type == CLEAR_REFS_SOFT_DIRTY)
mmu_notifier_invalidate_range_end(mm, 0, -1);
- flush_tlb_mm(mm);
+ tlb_finish_mmu(&tlb, 0, -1);
up_read(&mm->mmap_sem);
out_mm:
mmput(mm);
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 06ea26b8c996..b0d5897bc4e6 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -1600,7 +1600,7 @@ static int userfaultfd_copy(struct userfaultfd_ctx *ctx,
uffdio_copy.len);
mmput(ctx->mm);
} else {
- return -ENOSPC;
+ return -ESRCH;
}
if (unlikely(put_user(ret, &user_uffdio_copy->copy)))
return -EFAULT;
@@ -1647,7 +1647,7 @@ static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx,
uffdio_zeropage.range.len);
mmput(ctx->mm);
} else {
- return -ENOSPC;
+ return -ESRCH;
}
if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage)))
return -EFAULT;
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 8afa4335e5b2..faddde44de8c 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -112,10 +112,11 @@ struct mmu_gather {
#define HAVE_GENERIC_MMU_GATHER
-void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end);
+void arch_tlb_gather_mmu(struct mmu_gather *tlb,
+ struct mm_struct *mm, unsigned long start, unsigned long end);
void tlb_flush_mmu(struct mmu_gather *tlb);
-void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,
- unsigned long end);
+void arch_tlb_finish_mmu(struct mmu_gather *tlb,
+ unsigned long start, unsigned long end, bool force);
extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
int page_size);
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index c749eef1daa1..27b4b6615263 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -1209,6 +1209,7 @@ static inline bool acpi_has_watchdog(void) { return false; }
#endif
#ifdef CONFIG_ACPI_SPCR_TABLE
+extern bool qdf2400_e44_present;
int parse_spcr(bool earlycon);
#else
static inline int parse_spcr(bool earlycon) { return 0; }
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
index 497f2b3a5a62..97f1b465d04f 100644
--- a/include/linux/iio/common/st_sensors.h
+++ b/include/linux/iio/common/st_sensors.h
@@ -105,6 +105,11 @@ struct st_sensor_fullscale {
struct st_sensor_fullscale_avl fs_avl[ST_SENSORS_FULLSCALE_AVL_MAX];
};
+struct st_sensor_sim {
+ u8 addr;
+ u8 value;
+};
+
/**
* struct st_sensor_bdu - ST sensor device block data update
* @addr: address of the register.
@@ -197,6 +202,7 @@ struct st_sensor_transfer_function {
* @bdu: Block data update register.
* @das: Data Alignment Selection register.
* @drdy_irq: Data ready register of the sensor.
+ * @sim: SPI serial interface mode register of the sensor.
* @multi_read_bit: Use or not particular bit for [I2C/SPI] multi-read.
* @bootime: samples to discard when sensor passing from power-down to power-up.
*/
@@ -213,6 +219,7 @@ struct st_sensor_settings {
struct st_sensor_bdu bdu;
struct st_sensor_das das;
struct st_sensor_data_ready_irq drdy_irq;
+ struct st_sensor_sim sim;
bool multi_read_bit;
unsigned int bootime;
};
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 7f384bb62d8e..3cadee0a3508 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -487,14 +487,12 @@ struct mm_struct {
/* numa_scan_seq prevents two threads setting pte_numa */
int numa_scan_seq;
#endif
-#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
/*
* An operation with batched TLB flushing is going on. Anything that
* can move process memory needs to flush the TLB when moving a
* PROT_NONE or PROT_NUMA mapped page.
*/
- bool tlb_flush_pending;
-#endif
+ atomic_t tlb_flush_pending;
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
/* See flush_tlb_batched_pending() */
bool tlb_flush_batched;
@@ -522,46 +520,60 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
return mm->cpu_vm_mask_var;
}
-#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
+struct mmu_gather;
+extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+ unsigned long start, unsigned long end);
+extern void tlb_finish_mmu(struct mmu_gather *tlb,
+ unsigned long start, unsigned long end);
+
/*
* Memory barriers to keep this state in sync are graciously provided by
* the page table locks, outside of which no page table modifications happen.
- * The barriers below prevent the compiler from re-ordering the instructions
- * around the memory barriers that are already present in the code.
+ * The barriers are used to ensure the order between tlb_flush_pending updates,
+ * which happen while the lock is not taken, and the PTE updates, which happen
+ * while the lock is taken, are serialized.
*/
static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
{
- barrier();
- return mm->tlb_flush_pending;
+ return atomic_read(&mm->tlb_flush_pending) > 0;
+}
+
+/*
+ * Returns true if there are two above TLB batching threads in parallel.
+ */
+static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
+{
+ return atomic_read(&mm->tlb_flush_pending) > 1;
+}
+
+static inline void init_tlb_flush_pending(struct mm_struct *mm)
+{
+ atomic_set(&mm->tlb_flush_pending, 0);
}
-static inline void set_tlb_flush_pending(struct mm_struct *mm)
+
+static inline void inc_tlb_flush_pending(struct mm_struct *mm)
{
- mm->tlb_flush_pending = true;
+ atomic_inc(&mm->tlb_flush_pending);
/*
- * Guarantee that the tlb_flush_pending store does not leak into the
+ * Guarantee that the tlb_flush_pending increase does not leak into the
* critical section updating the page tables
*/
smp_mb__before_spinlock();
}
+
/* Clearing is done after a TLB flush, which also provides a barrier. */
-static inline void clear_tlb_flush_pending(struct mm_struct *mm)
-{
- barrier();
- mm->tlb_flush_pending = false;
-}
-#else
-static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
-{
- return false;
-}
-static inline void set_tlb_flush_pending(struct mm_struct *mm)
-{
-}
-static inline void clear_tlb_flush_pending(struct mm_struct *mm)
+static inline void dec_tlb_flush_pending(struct mm_struct *mm)
{
+ /*
+ * Guarantee that the tlb_flush_pending does not not leak into the
+ * critical section, since we must order the PTE change and changes to
+ * the pending TLB flush indication. We could have relied on TLB flush
+ * as a memory barrier, but this behavior is not clearly documented.
+ */
+ smp_mb__before_atomic();
+ atomic_dec(&mm->tlb_flush_pending);
}
-#endif
struct vm_fault;
diff --git a/include/linux/net.h b/include/linux/net.h
index b5c15b31709b..d97d80d7fdf8 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -37,7 +37,7 @@ struct net;
/* Historically, SOCKWQ_ASYNC_NOSPACE & SOCKWQ_ASYNC_WAITDATA were located
* in sock->flags, but moved into sk->sk_wq->flags to be RCU protected.
- * Eventually all flags will be in sk->sk_wq_flags.
+ * Eventually all flags will be in sk->sk_wq->flags.
*/
#define SOCKWQ_ASYNC_NOSPACE 0
#define SOCKWQ_ASYNC_WAITDATA 1
diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h
index 6c8c5d8041b7..2591878c1d48 100644
--- a/include/linux/nvme-fc-driver.h
+++ b/include/linux/nvme-fc-driver.h
@@ -346,6 +346,11 @@ struct nvme_fc_remote_port {
* indicating an FC transport Aborted status.
* Entrypoint is Mandatory.
*
+ * @defer_rcv: Called by the transport to signal the LLLD that it has
+ * begun processing of a previously received NVME CMD IU. The LLDD
+ * is now free to re-use the rcv buffer associated with the
+ * nvmefc_tgt_fcp_req.
+ *
* @max_hw_queues: indicates the maximum number of hw queues the LLDD
* supports for cpu affinitization.
* Value is Mandatory. Must be at least 1.
@@ -846,6 +851,8 @@ struct nvmet_fc_target_template {
struct nvmefc_tgt_fcp_req *fcpreq);
void (*fcp_req_release)(struct nvmet_fc_target_port *tgtport,
struct nvmefc_tgt_fcp_req *fcpreq);
+ void (*defer_rcv)(struct nvmet_fc_target_port *tgtport,
+ struct nvmefc_tgt_fcp_req *fcpreq);
u32 max_hw_queues;
u16 max_sgl_segments;
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 4869e66dd659..f958d0732af6 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -188,6 +188,8 @@ enum pci_dev_flags {
* the direct_complete optimization.
*/
PCI_DEV_FLAGS_NEEDS_RESUME = (__force pci_dev_flags_t) (1 << 11),
+ /* Don't use Relaxed Ordering for TLPs directed at this device */
+ PCI_DEV_FLAGS_NO_RELAXED_ORDERING = (__force pci_dev_flags_t) (1 << 12),
};
enum pci_irq_reroute_variant {
@@ -1067,6 +1069,7 @@ void pcie_flr(struct pci_dev *dev);
int __pci_reset_function(struct pci_dev *dev);
int __pci_reset_function_locked(struct pci_dev *dev);
int pci_reset_function(struct pci_dev *dev);
+int pci_reset_function_locked(struct pci_dev *dev);
int pci_try_reset_function(struct pci_dev *dev);
int pci_probe_reset_slot(struct pci_slot *slot);
int pci_reset_slot(struct pci_slot *slot);
@@ -1125,6 +1128,7 @@ bool pci_check_pme_status(struct pci_dev *dev);
void pci_pme_wakeup_bus(struct pci_bus *bus);
void pci_d3cold_enable(struct pci_dev *dev);
void pci_d3cold_disable(struct pci_dev *dev);
+bool pcie_relaxed_ordering_enabled(struct pci_dev *dev);
/* PCI Virtual Channel */
int pci_save_vc_state(struct pci_dev *dev);
diff --git a/include/linux/platform_data/st_sensors_pdata.h b/include/linux/platform_data/st_sensors_pdata.h
index 79b0e4cdb814..f8274b0c6888 100644
--- a/include/linux/platform_data/st_sensors_pdata.h
+++ b/include/linux/platform_data/st_sensors_pdata.h
@@ -17,10 +17,12 @@
* Available only for accelerometer and pressure sensors.
* Accelerometer DRDY on LSM330 available only on pin 1 (see datasheet).
* @open_drain: set the interrupt line to be open drain if possible.
+ * @spi_3wire: enable spi-3wire mode.
*/
struct st_sensors_platform_data {
u8 drdy_int_pin;
bool open_drain;
+ bool spi_3wire;
};
#endif /* ST_SENSORS_PDATA_H */
diff --git a/include/linux/sync_file.h b/include/linux/sync_file.h
index 5726107963b2..0ad87c434ae6 100644
--- a/include/linux/sync_file.h
+++ b/include/linux/sync_file.h
@@ -43,12 +43,13 @@ struct sync_file {
#endif
wait_queue_head_t wq;
+ unsigned long flags;
struct dma_fence *fence;
struct dma_fence_cb cb;
};
-#define POLL_ENABLED DMA_FENCE_FLAG_USER_BITS
+#define POLL_ENABLED 0
struct sync_file *sync_file_create(struct dma_fence *fence);
struct dma_fence *sync_file_get_fence(int fd);
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 6df79e96a780..f44ff2476758 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -336,6 +336,16 @@ static inline void in6_dev_put(struct inet6_dev *idev)
in6_dev_finish_destroy(idev);
}
+static inline void in6_dev_put_clear(struct inet6_dev **pidev)
+{
+ struct inet6_dev *idev = *pidev;
+
+ if (idev) {
+ in6_dev_put(idev);
+ *pidev = NULL;
+ }
+}
+
static inline void __in6_dev_put(struct inet6_dev *idev)
{
refcount_dec(&idev->refcnt);
diff --git a/include/net/bonding.h b/include/net/bonding.h
index b00508d22e0a..b2e68657a216 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -277,6 +277,11 @@ static inline bool bond_is_lb(const struct bonding *bond)
BOND_MODE(bond) == BOND_MODE_ALB;
}
+static inline bool bond_needs_speed_duplex(const struct bonding *bond)
+{
+ return BOND_MODE(bond) == BOND_MODE_8023AD || bond_is_lb(bond);
+}
+
static inline bool bond_is_nondyn_tlb(const struct bonding *bond)
{
return (BOND_MODE(bond) == BOND_MODE_TLB) &&
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
index 8ffd434676b7..71c72a939bf8 100644
--- a/include/net/busy_poll.h
+++ b/include/net/busy_poll.h
@@ -29,18 +29,18 @@
#include <linux/sched/signal.h>
#include <net/ip.h>
-#ifdef CONFIG_NET_RX_BUSY_POLL
-
-struct napi_struct;
-extern unsigned int sysctl_net_busy_read __read_mostly;
-extern unsigned int sysctl_net_busy_poll __read_mostly;
-
/* 0 - Reserved to indicate value not set
* 1..NR_CPUS - Reserved for sender_cpu
* NR_CPUS+1..~0 - Region available for NAPI IDs
*/
#define MIN_NAPI_ID ((unsigned int)(NR_CPUS + 1))
+#ifdef CONFIG_NET_RX_BUSY_POLL
+
+struct napi_struct;
+extern unsigned int sysctl_net_busy_read __read_mostly;
+extern unsigned int sysctl_net_busy_poll __read_mostly;
+
static inline bool net_busy_loop_on(void)
{
return sysctl_net_busy_poll;
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index b2b5419467cc..f8149ca192b4 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -5499,6 +5499,21 @@ static inline void ieee80211_stop_rx_ba_session_offl(struct ieee80211_vif *vif,
ieee80211_manage_rx_ba_offl(vif, addr, tid + IEEE80211_NUM_TIDS);
}
+/**
+ * ieee80211_rx_ba_timer_expired - stop a Rx BA session due to timeout
+ *
+ * Some device drivers do not offload AddBa/DelBa negotiation, but handle rx
+ * buffer reording internally, and therefore also handle the session timer.
+ *
+ * Trigger the timeout flow, which sends a DelBa.
+ *
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback
+ * @addr: station mac address
+ * @tid: the rx tid
+ */
+void ieee80211_rx_ba_timer_expired(struct ieee80211_vif *vif,
+ const u8 *addr, unsigned int tid);
+
/* Rate control API */
/**
diff --git a/include/net/udp.h b/include/net/udp.h
index 20dcdca4e85c..4e5f23fec35e 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -366,12 +366,13 @@ static inline bool udp_skb_is_linear(struct sk_buff *skb)
static inline int copy_linear_skb(struct sk_buff *skb, int len, int off,
struct iov_iter *to)
{
- int n, copy = len - off;
+ int n;
- n = copy_to_iter(skb->data + off, copy, to);
- if (n == copy)
+ n = copy_to_iter(skb->data + off, len, to);
+ if (n == len)
return 0;
+ iov_iter_revert(to, n);
return -EFAULT;
}
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index 0ca1fb08805b..fb87d32f5e51 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -786,6 +786,7 @@ struct iscsi_np {
int np_sock_type;
enum np_thread_state_table np_thread_state;
bool enabled;
+ atomic_t np_reset_count;
enum iscsi_timer_flags_table np_login_timer_flags;
u32 np_exports;
enum np_flags_table np_flags;
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index 26c54f6d595d..ad4eb2863e70 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -171,7 +171,7 @@ struct drm_msm_gem_submit_cmd {
__u32 size; /* in, cmdstream size */
__u32 pad;
__u32 nr_relocs; /* in, number of submit_reloc's */
- __u64 __user relocs; /* in, ptr to array of submit_reloc's */
+ __u64 relocs; /* in, ptr to array of submit_reloc's */
};
/* Each buffer referenced elsewhere in the cmdstream submit (ie. the
@@ -215,8 +215,8 @@ struct drm_msm_gem_submit {
__u32 fence; /* out */
__u32 nr_bos; /* in, number of submit_bo's */
__u32 nr_cmds; /* in, number of submit_cmd's */
- __u64 __user bos; /* in, ptr to array of submit_bo's */
- __u64 __user cmds; /* in, ptr to array of submit_cmd's */
+ __u64 bos; /* in, ptr to array of submit_bo's */
+ __u64 cmds; /* in, ptr to array of submit_cmd's */
__s32 fence_fd; /* in/out fence fd (see MSM_SUBMIT_FENCE_FD_IN/OUT) */
};
diff --git a/kernel/fork.c b/kernel/fork.c
index 17921b0390b4..e075b7780421 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -807,7 +807,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
mm_init_aio(mm);
mm_init_owner(mm, p);
mmu_notifier_mm_init(mm);
- clear_tlb_flush_pending(mm);
+ init_tlb_flush_pending(mm);
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
mm->pmd_huge_pte = NULL;
#endif
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 222317721c5a..0972a8e09d08 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1650,7 +1650,7 @@ static unsigned long minimum_image_size(unsigned long saveable)
{
unsigned long size;
- size = global_page_state(NR_SLAB_RECLAIMABLE)
+ size = global_node_page_state(NR_SLAB_RECLAIMABLE)
+ global_node_page_state(NR_ACTIVE_ANON)
+ global_node_page_state(NR_INACTIVE_ANON)
+ global_node_page_state(NR_ACTIVE_FILE)
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 37385193a608..dc498b605d5d 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -204,10 +204,36 @@ BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
fmt_cnt++;
}
- return __trace_printk(1/* fake ip will not be printed */, fmt,
- mod[0] == 2 ? arg1 : mod[0] == 1 ? (long) arg1 : (u32) arg1,
- mod[1] == 2 ? arg2 : mod[1] == 1 ? (long) arg2 : (u32) arg2,
- mod[2] == 2 ? arg3 : mod[2] == 1 ? (long) arg3 : (u32) arg3);
+/* Horrid workaround for getting va_list handling working with different
+ * argument type combinations generically for 32 and 64 bit archs.
+ */
+#define __BPF_TP_EMIT() __BPF_ARG3_TP()
+#define __BPF_TP(...) \
+ __trace_printk(1 /* Fake ip will not be printed. */, \
+ fmt, ##__VA_ARGS__)
+
+#define __BPF_ARG1_TP(...) \
+ ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \
+ ? __BPF_TP(arg1, ##__VA_ARGS__) \
+ : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32)) \
+ ? __BPF_TP((long)arg1, ##__VA_ARGS__) \
+ : __BPF_TP((u32)arg1, ##__VA_ARGS__)))
+
+#define __BPF_ARG2_TP(...) \
+ ((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64)) \
+ ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__) \
+ : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32)) \
+ ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__) \
+ : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__)))
+
+#define __BPF_ARG3_TP(...) \
+ ((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64)) \
+ ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__) \
+ : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32)) \
+ ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__) \
+ : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__)))
+
+ return __BPF_TP_EMIT();
}
static const struct bpf_func_proto bpf_trace_printk_proto = {
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index 7d315fdb9f13..cf7b129b0b2b 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -110,10 +110,12 @@ bool should_fail(struct fault_attr *attr, ssize_t size)
if (in_task()) {
unsigned int fail_nth = READ_ONCE(current->fail_nth);
- if (fail_nth && !WRITE_ONCE(current->fail_nth, fail_nth - 1))
- goto fail;
+ if (fail_nth) {
+ if (!WRITE_ONCE(current->fail_nth, fail_nth - 1))
+ goto fail;
- return false;
+ return false;
+ }
}
/* No need to check any other properties if the probability is 0 */
diff --git a/lib/test_kmod.c b/lib/test_kmod.c
index 6c1d678bcf8b..ff9148969b92 100644
--- a/lib/test_kmod.c
+++ b/lib/test_kmod.c
@@ -485,7 +485,7 @@ static ssize_t config_show(struct device *dev,
config->test_driver);
else
len += snprintf(buf+len, PAGE_SIZE - len,
- "driver:\tEMTPY\n");
+ "driver:\tEMPTY\n");
if (config->test_fs)
len += snprintf(buf+len, PAGE_SIZE - len,
@@ -493,7 +493,7 @@ static ssize_t config_show(struct device *dev,
config->test_fs);
else
len += snprintf(buf+len, PAGE_SIZE - len,
- "fs:\tEMTPY\n");
+ "fs:\tEMPTY\n");
mutex_unlock(&test_dev->config_mutex);
@@ -746,11 +746,11 @@ static int trigger_config_run_type(struct kmod_test_device *test_dev,
strlen(test_str));
break;
case TEST_KMOD_FS_TYPE:
- break;
kfree_const(config->test_fs);
config->test_driver = NULL;
copied = config_copy_test_fs(config, test_str,
strlen(test_str));
+ break;
default:
mutex_unlock(&test_dev->config_mutex);
return -EINVAL;
@@ -880,10 +880,10 @@ static int test_dev_config_update_uint_sync(struct kmod_test_device *test_dev,
int (*test_sync)(struct kmod_test_device *test_dev))
{
int ret;
- long new;
+ unsigned long new;
unsigned int old_val;
- ret = kstrtol(buf, 10, &new);
+ ret = kstrtoul(buf, 10, &new);
if (ret)
return ret;
@@ -918,9 +918,9 @@ static int test_dev_config_update_uint_range(struct kmod_test_device *test_dev,
unsigned int max)
{
int ret;
- long new;
+ unsigned long new;
- ret = kstrtol(buf, 10, &new);
+ ret = kstrtoul(buf, 10, &new);
if (ret)
return ret;
@@ -1146,7 +1146,7 @@ static struct kmod_test_device *register_test_dev_kmod(void)
struct kmod_test_device *test_dev = NULL;
int ret;
- mutex_unlock(&reg_dev_mutex);
+ mutex_lock(&reg_dev_mutex);
/* int should suffice for number of devices, test for wrap */
if (unlikely(num_test_devs + 1) < 0) {
diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c
index 9075aa54e955..b06d9fe23a28 100644
--- a/mm/balloon_compaction.c
+++ b/mm/balloon_compaction.c
@@ -24,7 +24,7 @@ struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info)
{
unsigned long flags;
struct page *page = alloc_page(balloon_mapping_gfp_mask() |
- __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_ZERO);
+ __GFP_NOMEMALLOC | __GFP_NORETRY);
if (!page)
return NULL;
diff --git a/mm/debug.c b/mm/debug.c
index db1cd26d8752..5715448ab0b5 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -124,9 +124,7 @@ void dump_mm(const struct mm_struct *mm)
#ifdef CONFIG_NUMA_BALANCING
"numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
#endif
-#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
"tlb_flush_pending %d\n"
-#endif
"def_flags: %#lx(%pGv)\n",
mm, mm->mmap, mm->vmacache_seqnum, mm->task_size,
@@ -158,9 +156,7 @@ void dump_mm(const struct mm_struct *mm)
#ifdef CONFIG_NUMA_BALANCING
mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
#endif
-#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
- mm->tlb_flush_pending,
-#endif
+ atomic_read(&mm->tlb_flush_pending),
mm->def_flags, &mm->def_flags
);
}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 86975dec0ba1..216114f6ef0b 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1496,6 +1496,13 @@ int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
}
/*
+ * The page_table_lock above provides a memory barrier
+ * with change_protection_range.
+ */
+ if (mm_tlb_flush_pending(vma->vm_mm))
+ flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE);
+
+ /*
* Migrate the THP to the requested node, returns with page unlocked
* and access rights restored.
*/
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index a1a0ac0ad6f6..31e207cb399b 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4062,9 +4062,9 @@ out:
return ret;
out_release_unlock:
spin_unlock(ptl);
-out_release_nounlock:
if (vm_shared)
unlock_page(page);
+out_release_nounlock:
put_page(page);
goto out;
}
diff --git a/mm/ksm.c b/mm/ksm.c
index 4dc92f138786..db20f8436bc3 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1038,7 +1038,8 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
goto out_unlock;
if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte) ||
- (pte_protnone(*pvmw.pte) && pte_savedwrite(*pvmw.pte))) {
+ (pte_protnone(*pvmw.pte) && pte_savedwrite(*pvmw.pte)) ||
+ mm_tlb_flush_pending(mm)) {
pte_t entry;
swapped = PageSwapCache(page);
diff --git a/mm/memory.c b/mm/memory.c
index f65beaad319b..e158f7ac6730 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -215,12 +215,8 @@ static bool tlb_next_batch(struct mmu_gather *tlb)
return true;
}
-/* tlb_gather_mmu
- * Called to initialize an (on-stack) mmu_gather structure for page-table
- * tear-down from @mm. The @fullmm argument is used when @mm is without
- * users and we're going to destroy the full address space (exit/execve).
- */
-void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
+void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+ unsigned long start, unsigned long end)
{
tlb->mm = mm;
@@ -275,10 +271,14 @@ void tlb_flush_mmu(struct mmu_gather *tlb)
* Called at the end of the shootdown operation to free up any resources
* that were required.
*/
-void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
+void arch_tlb_finish_mmu(struct mmu_gather *tlb,
+ unsigned long start, unsigned long end, bool force)
{
struct mmu_gather_batch *batch, *next;
+ if (force)
+ __tlb_adjust_range(tlb, start, end - start);
+
tlb_flush_mmu(tlb);
/* keep the page table cache within bounds */
@@ -398,6 +398,34 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
+/* tlb_gather_mmu
+ * Called to initialize an (on-stack) mmu_gather structure for page-table
+ * tear-down from @mm. The @fullmm argument is used when @mm is without
+ * users and we're going to destroy the full address space (exit/execve).
+ */
+void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ arch_tlb_gather_mmu(tlb, mm, start, end);
+ inc_tlb_flush_pending(tlb->mm);
+}
+
+void tlb_finish_mmu(struct mmu_gather *tlb,
+ unsigned long start, unsigned long end)
+{
+ /*
+ * If there are parallel threads are doing PTE changes on same range
+ * under non-exclusive lock(e.g., mmap_sem read-side) but defer TLB
+ * flush by batching, a thread has stable TLB entry can fail to flush
+ * the TLB by observing pte_none|!pte_dirty, for example so flush TLB
+ * forcefully if we detect parallel PTE batching threads.
+ */
+ bool force = mm_tlb_flush_nested(tlb->mm);
+
+ arch_tlb_finish_mmu(tlb, start, end, force);
+ dec_tlb_flush_pending(tlb->mm);
+}
+
/*
* Note: this doesn't free the actual pages themselves. That
* has been handled earlier when unmapping all the memory regions.
diff --git a/mm/migrate.c b/mm/migrate.c
index 627671551873..d68a41da6abb 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1937,12 +1937,6 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
put_page(new_page);
goto out_fail;
}
- /*
- * We are not sure a pending tlb flush here is for a huge page
- * mapping or not. Hence use the tlb range variant
- */
- if (mm_tlb_flush_pending(mm))
- flush_tlb_range(vma, mmun_start, mmun_end);
/* Prepare a page as a migration target */
__SetPageLocked(new_page);
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 4180ad8cc9c5..bd0f409922cb 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -244,7 +244,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma,
BUG_ON(addr >= end);
pgd = pgd_offset(mm, addr);
flush_cache_range(vma, addr, end);
- set_tlb_flush_pending(mm);
+ inc_tlb_flush_pending(mm);
do {
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd))
@@ -256,7 +256,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma,
/* Only flush the TLB if we actually modified any entries: */
if (pages)
flush_tlb_range(vma, start, end);
- clear_tlb_flush_pending(mm);
+ dec_tlb_flush_pending(mm);
return pages;
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index fc32aa81f359..6d00f746c2fd 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4458,8 +4458,9 @@ long si_mem_available(void)
* Part of the reclaimable slab consists of items that are in use,
* and cannot be freed. Cap this estimate at the low watermark.
*/
- available += global_page_state(NR_SLAB_RECLAIMABLE) -
- min(global_page_state(NR_SLAB_RECLAIMABLE) / 2, wmark_low);
+ available += global_node_page_state(NR_SLAB_RECLAIMABLE) -
+ min(global_node_page_state(NR_SLAB_RECLAIMABLE) / 2,
+ wmark_low);
if (available < 0)
available = 0;
@@ -4602,8 +4603,8 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
global_node_page_state(NR_FILE_DIRTY),
global_node_page_state(NR_WRITEBACK),
global_node_page_state(NR_UNSTABLE_NFS),
- global_page_state(NR_SLAB_RECLAIMABLE),
- global_page_state(NR_SLAB_UNRECLAIMABLE),
+ global_node_page_state(NR_SLAB_RECLAIMABLE),
+ global_node_page_state(NR_SLAB_UNRECLAIMABLE),
global_node_page_state(NR_FILE_MAPPED),
global_node_page_state(NR_SHMEM),
global_page_state(NR_PAGETABLE),
@@ -7668,7 +7669,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
/* Make sure the range is really isolated. */
if (test_pages_isolated(outer_start, end, false)) {
- pr_info("%s: [%lx, %lx) PFNs busy\n",
+ pr_info_ratelimited("%s: [%lx, %lx) PFNs busy\n",
__func__, outer_start, end);
ret = -EBUSY;
goto done;
diff --git a/mm/rmap.c b/mm/rmap.c
index c8993c63eb25..c1286d47aa1f 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -888,10 +888,10 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
.flags = PVMW_SYNC,
};
int *cleaned = arg;
+ bool invalidation_needed = false;
while (page_vma_mapped_walk(&pvmw)) {
int ret = 0;
- address = pvmw.address;
if (pvmw.pte) {
pte_t entry;
pte_t *pte = pvmw.pte;
@@ -899,11 +899,11 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
if (!pte_dirty(*pte) && !pte_write(*pte))
continue;
- flush_cache_page(vma, address, pte_pfn(*pte));
- entry = ptep_clear_flush(vma, address, pte);
+ flush_cache_page(vma, pvmw.address, pte_pfn(*pte));
+ entry = ptep_clear_flush(vma, pvmw.address, pte);
entry = pte_wrprotect(entry);
entry = pte_mkclean(entry);
- set_pte_at(vma->vm_mm, address, pte, entry);
+ set_pte_at(vma->vm_mm, pvmw.address, pte, entry);
ret = 1;
} else {
#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
@@ -913,11 +913,11 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
if (!pmd_dirty(*pmd) && !pmd_write(*pmd))
continue;
- flush_cache_page(vma, address, page_to_pfn(page));
- entry = pmdp_huge_clear_flush(vma, address, pmd);
+ flush_cache_page(vma, pvmw.address, page_to_pfn(page));
+ entry = pmdp_huge_clear_flush(vma, pvmw.address, pmd);
entry = pmd_wrprotect(entry);
entry = pmd_mkclean(entry);
- set_pmd_at(vma->vm_mm, address, pmd, entry);
+ set_pmd_at(vma->vm_mm, pvmw.address, pmd, entry);
ret = 1;
#else
/* unexpected pmd-mapped page? */
@@ -926,11 +926,16 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
}
if (ret) {
- mmu_notifier_invalidate_page(vma->vm_mm, address);
(*cleaned)++;
+ invalidation_needed = true;
}
}
+ if (invalidation_needed) {
+ mmu_notifier_invalidate_range(vma->vm_mm, address,
+ address + (1UL << compound_order(page)));
+ }
+
return true;
}
@@ -1323,7 +1328,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
};
pte_t pteval;
struct page *subpage;
- bool ret = true;
+ bool ret = true, invalidation_needed = false;
enum ttu_flags flags = (enum ttu_flags)arg;
/* munlock has nothing to gain from examining un-locked vmas */
@@ -1363,11 +1368,9 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
VM_BUG_ON_PAGE(!pvmw.pte, page);
subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
- address = pvmw.address;
-
if (!(flags & TTU_IGNORE_ACCESS)) {
- if (ptep_clear_flush_young_notify(vma, address,
+ if (ptep_clear_flush_young_notify(vma, pvmw.address,
pvmw.pte)) {
ret = false;
page_vma_mapped_walk_done(&pvmw);
@@ -1376,7 +1379,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
}
/* Nuke the page table entry. */
- flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
+ flush_cache_page(vma, pvmw.address, pte_pfn(*pvmw.pte));
if (should_defer_flush(mm, flags)) {
/*
* We clear the PTE but do not flush so potentially
@@ -1386,11 +1389,12 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
* transition on a cached TLB entry is written through
* and traps if the PTE is unmapped.
*/
- pteval = ptep_get_and_clear(mm, address, pvmw.pte);
+ pteval = ptep_get_and_clear(mm, pvmw.address,
+ pvmw.pte);
set_tlb_ubc_flush_pending(mm, pte_dirty(pteval));
} else {
- pteval = ptep_clear_flush(vma, address, pvmw.pte);
+ pteval = ptep_clear_flush(vma, pvmw.address, pvmw.pte);
}
/* Move the dirty bit to the page. Now the pte is gone. */
@@ -1405,12 +1409,12 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
if (PageHuge(page)) {
int nr = 1 << compound_order(page);
hugetlb_count_sub(nr, mm);
- set_huge_swap_pte_at(mm, address,
+ set_huge_swap_pte_at(mm, pvmw.address,
pvmw.pte, pteval,
vma_mmu_pagesize(vma));
} else {
dec_mm_counter(mm, mm_counter(page));
- set_pte_at(mm, address, pvmw.pte, pteval);
+ set_pte_at(mm, pvmw.address, pvmw.pte, pteval);
}
} else if (pte_unused(pteval)) {
@@ -1434,7 +1438,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
swp_pte = swp_entry_to_pte(entry);
if (pte_soft_dirty(pteval))
swp_pte = pte_swp_mksoft_dirty(swp_pte);
- set_pte_at(mm, address, pvmw.pte, swp_pte);
+ set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
} else if (PageAnon(page)) {
swp_entry_t entry = { .val = page_private(subpage) };
pte_t swp_pte;
@@ -1460,7 +1464,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
* If the page was redirtied, it cannot be
* discarded. Remap the page to page table.
*/
- set_pte_at(mm, address, pvmw.pte, pteval);
+ set_pte_at(mm, pvmw.address, pvmw.pte, pteval);
SetPageSwapBacked(page);
ret = false;
page_vma_mapped_walk_done(&pvmw);
@@ -1468,7 +1472,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
}
if (swap_duplicate(entry) < 0) {
- set_pte_at(mm, address, pvmw.pte, pteval);
+ set_pte_at(mm, pvmw.address, pvmw.pte, pteval);
ret = false;
page_vma_mapped_walk_done(&pvmw);
break;
@@ -1484,14 +1488,18 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
swp_pte = swp_entry_to_pte(entry);
if (pte_soft_dirty(pteval))
swp_pte = pte_swp_mksoft_dirty(swp_pte);
- set_pte_at(mm, address, pvmw.pte, swp_pte);
+ set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
} else
dec_mm_counter(mm, mm_counter_file(page));
discard:
page_remove_rmap(subpage, PageHuge(page));
put_page(page);
- mmu_notifier_invalidate_page(mm, address);
+ invalidation_needed = true;
}
+
+ if (invalidation_needed)
+ mmu_notifier_invalidate_range(mm, address,
+ address + (1UL << compound_order(page)));
return ret;
}
diff --git a/mm/shmem.c b/mm/shmem.c
index b0aa6075d164..6540e5982444 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1022,7 +1022,11 @@ static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
*/
if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) {
spin_lock(&sbinfo->shrinklist_lock);
- if (list_empty(&info->shrinklist)) {
+ /*
+ * _careful to defend against unlocked access to
+ * ->shrink_list in shmem_unused_huge_shrink()
+ */
+ if (list_empty_careful(&info->shrinklist)) {
list_add_tail(&info->shrinklist,
&sbinfo->shrinklist);
sbinfo->shrinklist_len++;
@@ -1817,7 +1821,11 @@ alloc_nohuge: page = shmem_alloc_and_acct_page(gfp, info, sbinfo,
* to shrink under memory pressure.
*/
spin_lock(&sbinfo->shrinklist_lock);
- if (list_empty(&info->shrinklist)) {
+ /*
+ * _careful to defend against unlocked access to
+ * ->shrink_list in shmem_unused_huge_shrink()
+ */
+ if (list_empty_careful(&info->shrinklist)) {
list_add_tail(&info->shrinklist,
&sbinfo->shrinklist);
sbinfo->shrinklist_len++;
diff --git a/mm/util.c b/mm/util.c
index 7b07ec852e01..9ecddf568fe3 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -633,7 +633,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
* which are reclaimable, under pressure. The dentry
* cache and most inode caches should fall into this
*/
- free += global_page_state(NR_SLAB_RECLAIMABLE);
+ free += global_node_page_state(NR_SLAB_RECLAIMABLE);
/*
* Leave reserved pages. The pages are not for anonymous pages.
diff --git a/net/core/filter.c b/net/core/filter.c
index 5afe3ac191ec..e0688a855c47 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -3690,6 +3690,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
bpf_target_off(struct sk_buff, tc_index, 2,
target_size));
#else
+ *target_size = 2;
if (type == BPF_WRITE)
*insn++ = BPF_MOV64_REG(si->dst_reg, si->dst_reg);
else
@@ -3705,6 +3706,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
*insn++ = BPF_JMP_IMM(BPF_JGE, si->dst_reg, MIN_NAPI_ID, 1);
*insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
#else
+ *target_size = 4;
*insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
#endif
break;
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 9fe25bf63296..86bc40ba6ba5 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -201,10 +201,7 @@ void dccp_destroy_sock(struct sock *sk)
{
struct dccp_sock *dp = dccp_sk(sk);
- /*
- * DCCP doesn't use sk_write_queue, just sk_send_head
- * for retransmissions
- */
+ __skb_queue_purge(&sk->sk_write_queue);
if (sk->sk_send_head != NULL) {
kfree_skb(sk->sk_send_head);
sk->sk_send_head = NULL;
diff --git a/net/dsa/tag_ksz.c b/net/dsa/tag_ksz.c
index fab41de8e983..de66ca8e6201 100644
--- a/net/dsa/tag_ksz.c
+++ b/net/dsa/tag_ksz.c
@@ -42,6 +42,9 @@ static struct sk_buff *ksz_xmit(struct sk_buff *skb, struct net_device *dev)
padlen = (skb->len >= ETH_ZLEN) ? 0 : ETH_ZLEN - skb->len;
if (skb_tailroom(skb) >= padlen + KSZ_INGRESS_TAG_LEN) {
+ if (skb_put_padto(skb, skb->len + padlen))
+ return NULL;
+
nskb = skb;
} else {
nskb = alloc_skb(NET_IP_ALIGN + skb->len +
@@ -56,13 +59,15 @@ static struct sk_buff *ksz_xmit(struct sk_buff *skb, struct net_device *dev)
skb_set_transport_header(nskb,
skb_transport_header(skb) - skb->head);
skb_copy_and_csum_dev(skb, skb_put(nskb, skb->len));
+
+ if (skb_put_padto(nskb, nskb->len + padlen)) {
+ kfree_skb(nskb);
+ return NULL;
+ }
+
kfree_skb(skb);
}
- /* skb is freed when it fails */
- if (skb_put_padto(nskb, nskb->len + padlen))
- return NULL;
-
tag = skb_put(nskb, KSZ_INGRESS_TAG_LEN);
tag[0] = 0;
tag[1] = 1 << p->dp->index; /* destination port */
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 632b454ce77c..d521caf57385 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -1084,15 +1084,17 @@ struct fib_info *fib_create_info(struct fib_config *cfg,
fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
if (!fi)
goto failure;
- fib_info_cnt++;
if (cfg->fc_mx) {
fi->fib_metrics = kzalloc(sizeof(*fi->fib_metrics), GFP_KERNEL);
- if (!fi->fib_metrics)
- goto failure;
+ if (unlikely(!fi->fib_metrics)) {
+ kfree(fi);
+ return ERR_PTR(err);
+ }
atomic_set(&fi->fib_metrics->refcnt, 1);
- } else
+ } else {
fi->fib_metrics = (struct dst_metrics *)&dst_default_metrics;
-
+ }
+ fib_info_cnt++;
fi->fib_net = net;
fi->fib_protocol = cfg->fc_protocol;
fi->fib_scope = cfg->fc_scope;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 618bbe1405fc..d400c0543106 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2756,12 +2756,13 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
err = 0;
if (IS_ERR(rt))
err = PTR_ERR(rt);
+ else
+ skb_dst_set(skb, &rt->dst);
}
if (err)
goto errout_free;
- skb_dst_set(skb, &rt->dst);
if (rtm->rtm_flags & RTM_F_NOTIFY)
rt->rt_flags |= RTCF_NOTIFY;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index c8784ab37852..5af8b809dfbc 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1667,6 +1667,8 @@ process:
*/
sock_hold(sk);
refcounted = true;
+ if (tcp_filter(sk, skb))
+ goto discard_and_relse;
nsk = tcp_check_req(sk, skb, req, false);
if (!nsk) {
reqsk_put(req);
@@ -1674,8 +1676,6 @@ process:
}
if (nsk == sk) {
reqsk_put(req);
- } else if (tcp_filter(sk, skb)) {
- goto discard_and_relse;
} else if (tcp_child_process(sk, nsk, skb)) {
tcp_v4_send_reset(nsk, skb);
goto discard_and_relse;
diff --git a/net/ipv4/tcp_ulp.c b/net/ipv4/tcp_ulp.c
index 2417f55374c5..6bb9e14c710a 100644
--- a/net/ipv4/tcp_ulp.c
+++ b/net/ipv4/tcp_ulp.c
@@ -122,14 +122,14 @@ int tcp_set_ulp(struct sock *sk, const char *name)
ulp_ops = __tcp_ulp_find_autoload(name);
if (!ulp_ops)
- err = -ENOENT;
- else
- err = ulp_ops->init(sk);
+ return -ENOENT;
- if (err)
- goto out;
+ err = ulp_ops->init(sk);
+ if (err) {
+ module_put(ulp_ops->owner);
+ return err;
+ }
icsk->icsk_ulp_ops = ulp_ops;
- out:
- return err;
+ return 0;
}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 11ff19ba7efd..dc021ed6dd37 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -417,14 +417,11 @@ static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
struct net_device *loopback_dev =
dev_net(dev)->loopback_dev;
- if (dev != loopback_dev) {
- if (idev && idev->dev == dev) {
- struct inet6_dev *loopback_idev =
- in6_dev_get(loopback_dev);
- if (loopback_idev) {
- rt->rt6i_idev = loopback_idev;
- in6_dev_put(idev);
- }
+ if (idev && idev->dev != loopback_dev) {
+ struct inet6_dev *loopback_idev = in6_dev_get(loopback_dev);
+ if (loopback_idev) {
+ rt->rt6i_idev = loopback_idev;
+ in6_dev_put(idev);
}
}
}
@@ -3732,10 +3729,10 @@ static int ip6_route_dev_notify(struct notifier_block *this,
/* NETDEV_UNREGISTER could be fired for multiple times by
* netdev_wait_allrefs(). Make sure we only call this once.
*/
- in6_dev_put(net->ipv6.ip6_null_entry->rt6i_idev);
+ in6_dev_put_clear(&net->ipv6.ip6_null_entry->rt6i_idev);
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
- in6_dev_put(net->ipv6.ip6_prohibit_entry->rt6i_idev);
- in6_dev_put(net->ipv6.ip6_blk_hole_entry->rt6i_idev);
+ in6_dev_put_clear(&net->ipv6.ip6_prohibit_entry->rt6i_idev);
+ in6_dev_put_clear(&net->ipv6.ip6_blk_hole_entry->rt6i_idev);
#endif
}
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index f776ec4ecf6d..d79a1af3252e 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1458,6 +1458,8 @@ process:
}
sock_hold(sk);
refcounted = true;
+ if (tcp_filter(sk, skb))
+ goto discard_and_relse;
nsk = tcp_check_req(sk, skb, req, false);
if (!nsk) {
reqsk_put(req);
@@ -1466,8 +1468,6 @@ process:
if (nsk == sk) {
reqsk_put(req);
tcp_v6_restore_cb(skb);
- } else if (tcp_filter(sk, skb)) {
- goto discard_and_relse;
} else if (tcp_child_process(sk, nsk, skb)) {
tcp_v6_send_reset(nsk, skb);
goto discard_and_relse;
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 10d7133e4fe9..a00d607e7224 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -228,7 +228,7 @@ static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2,
#define BROADCAST_ONE 1
#define BROADCAST_REGISTERED 2
#define BROADCAST_PROMISC_ONLY 4
-static int pfkey_broadcast(struct sk_buff *skb,
+static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
int broadcast_flags, struct sock *one_sk,
struct net *net)
{
@@ -278,7 +278,7 @@ static int pfkey_broadcast(struct sk_buff *skb,
rcu_read_unlock();
if (one_sk != NULL)
- err = pfkey_broadcast_one(skb, &skb2, GFP_KERNEL, one_sk);
+ err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk);
kfree_skb(skb2);
kfree_skb(skb);
@@ -311,7 +311,7 @@ static int pfkey_do_dump(struct pfkey_sock *pfk)
hdr = (struct sadb_msg *) pfk->dump.skb->data;
hdr->sadb_msg_seq = 0;
hdr->sadb_msg_errno = rc;
- pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
+ pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
&pfk->sk, sock_net(&pfk->sk));
pfk->dump.skb = NULL;
}
@@ -355,7 +355,7 @@ static int pfkey_error(const struct sadb_msg *orig, int err, struct sock *sk)
hdr->sadb_msg_len = (sizeof(struct sadb_msg) /
sizeof(uint64_t));
- pfkey_broadcast(skb, BROADCAST_ONE, sk, sock_net(sk));
+ pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ONE, sk, sock_net(sk));
return 0;
}
@@ -1389,7 +1389,7 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, const struct sadb_
xfrm_state_put(x);
- pfkey_broadcast(resp_skb, BROADCAST_ONE, sk, net);
+ pfkey_broadcast(resp_skb, GFP_KERNEL, BROADCAST_ONE, sk, net);
return 0;
}
@@ -1476,7 +1476,7 @@ static int key_notify_sa(struct xfrm_state *x, const struct km_event *c)
hdr->sadb_msg_seq = c->seq;
hdr->sadb_msg_pid = c->portid;
- pfkey_broadcast(skb, BROADCAST_ALL, NULL, xs_net(x));
+ pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xs_net(x));
return 0;
}
@@ -1589,7 +1589,7 @@ static int pfkey_get(struct sock *sk, struct sk_buff *skb, const struct sadb_msg
out_hdr->sadb_msg_reserved = 0;
out_hdr->sadb_msg_seq = hdr->sadb_msg_seq;
out_hdr->sadb_msg_pid = hdr->sadb_msg_pid;
- pfkey_broadcast(out_skb, BROADCAST_ONE, sk, sock_net(sk));
+ pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, sock_net(sk));
return 0;
}
@@ -1694,8 +1694,8 @@ static int pfkey_register(struct sock *sk, struct sk_buff *skb, const struct sad
return -ENOBUFS;
}
- pfkey_broadcast(supp_skb, BROADCAST_REGISTERED, sk, sock_net(sk));
-
+ pfkey_broadcast(supp_skb, GFP_KERNEL, BROADCAST_REGISTERED, sk,
+ sock_net(sk));
return 0;
}
@@ -1712,7 +1712,8 @@ static int unicast_flush_resp(struct sock *sk, const struct sadb_msg *ihdr)
hdr->sadb_msg_errno = (uint8_t) 0;
hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
- return pfkey_broadcast(skb, BROADCAST_ONE, sk, sock_net(sk));
+ return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ONE, sk,
+ sock_net(sk));
}
static int key_notify_sa_flush(const struct km_event *c)
@@ -1733,7 +1734,7 @@ static int key_notify_sa_flush(const struct km_event *c)
hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
hdr->sadb_msg_reserved = 0;
- pfkey_broadcast(skb, BROADCAST_ALL, NULL, c->net);
+ pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
return 0;
}
@@ -1790,7 +1791,7 @@ static int dump_sa(struct xfrm_state *x, int count, void *ptr)
out_hdr->sadb_msg_pid = pfk->dump.msg_portid;
if (pfk->dump.skb)
- pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
+ pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
&pfk->sk, sock_net(&pfk->sk));
pfk->dump.skb = out_skb;
@@ -1878,7 +1879,7 @@ static int pfkey_promisc(struct sock *sk, struct sk_buff *skb, const struct sadb
new_hdr->sadb_msg_errno = 0;
}
- pfkey_broadcast(skb, BROADCAST_ALL, NULL, sock_net(sk));
+ pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ALL, NULL, sock_net(sk));
return 0;
}
@@ -2206,7 +2207,7 @@ static int key_notify_policy(struct xfrm_policy *xp, int dir, const struct km_ev
out_hdr->sadb_msg_errno = 0;
out_hdr->sadb_msg_seq = c->seq;
out_hdr->sadb_msg_pid = c->portid;
- pfkey_broadcast(out_skb, BROADCAST_ALL, NULL, xp_net(xp));
+ pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xp_net(xp));
return 0;
}
@@ -2424,7 +2425,7 @@ static int key_pol_get_resp(struct sock *sk, struct xfrm_policy *xp, const struc
out_hdr->sadb_msg_errno = 0;
out_hdr->sadb_msg_seq = hdr->sadb_msg_seq;
out_hdr->sadb_msg_pid = hdr->sadb_msg_pid;
- pfkey_broadcast(out_skb, BROADCAST_ONE, sk, xp_net(xp));
+ pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, xp_net(xp));
err = 0;
out:
@@ -2678,7 +2679,7 @@ static int dump_sp(struct xfrm_policy *xp, int dir, int count, void *ptr)
out_hdr->sadb_msg_pid = pfk->dump.msg_portid;
if (pfk->dump.skb)
- pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
+ pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
&pfk->sk, sock_net(&pfk->sk));
pfk->dump.skb = out_skb;
@@ -2735,7 +2736,7 @@ static int key_notify_policy_flush(const struct km_event *c)
hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC;
hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
hdr->sadb_msg_reserved = 0;
- pfkey_broadcast(skb_out, BROADCAST_ALL, NULL, c->net);
+ pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
return 0;
}
@@ -2797,7 +2798,7 @@ static int pfkey_process(struct sock *sk, struct sk_buff *skb, const struct sadb
void *ext_hdrs[SADB_EXT_MAX];
int err;
- pfkey_broadcast(skb_clone(skb, GFP_KERNEL),
+ pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL,
BROADCAST_PROMISC_ONLY, NULL, sock_net(sk));
memset(ext_hdrs, 0, sizeof(ext_hdrs));
@@ -3018,7 +3019,8 @@ static int key_notify_sa_expire(struct xfrm_state *x, const struct km_event *c)
out_hdr->sadb_msg_seq = 0;
out_hdr->sadb_msg_pid = 0;
- pfkey_broadcast(out_skb, BROADCAST_REGISTERED, NULL, xs_net(x));
+ pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL,
+ xs_net(x));
return 0;
}
@@ -3206,7 +3208,8 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
xfrm_ctx->ctx_len);
}
- return pfkey_broadcast(skb, BROADCAST_REGISTERED, NULL, xs_net(x));
+ return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL,
+ xs_net(x));
}
static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt,
@@ -3402,7 +3405,8 @@ static int pfkey_send_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
n_port->sadb_x_nat_t_port_port = sport;
n_port->sadb_x_nat_t_port_reserved = 0;
- return pfkey_broadcast(skb, BROADCAST_REGISTERED, NULL, xs_net(x));
+ return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL,
+ xs_net(x));
}
#ifdef CONFIG_NET_KEY_MIGRATE
@@ -3593,7 +3597,7 @@ static int pfkey_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
}
/* broadcast migrate message to sockets */
- pfkey_broadcast(skb, BROADCAST_ALL, NULL, &init_net);
+ pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, &init_net);
return 0;
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 8708cbe8af5b..2b36eff5d97e 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -7,7 +7,7 @@
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
* Copyright 2007-2010, Intel Corporation
- * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2015-2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -466,3 +466,23 @@ void ieee80211_manage_rx_ba_offl(struct ieee80211_vif *vif,
rcu_read_unlock();
}
EXPORT_SYMBOL(ieee80211_manage_rx_ba_offl);
+
+void ieee80211_rx_ba_timer_expired(struct ieee80211_vif *vif,
+ const u8 *addr, unsigned int tid)
+{
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ struct ieee80211_local *local = sdata->local;
+ struct sta_info *sta;
+
+ rcu_read_lock();
+ sta = sta_info_get_bss(sdata, addr);
+ if (!sta)
+ goto unlock;
+
+ set_bit(tid, sta->ampdu_mlme.tid_rx_timer_expired);
+ ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
+
+ unlock:
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL(ieee80211_rx_ba_timer_expired);
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 816c8092e601..361377fbd780 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -286,9 +286,6 @@ static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
void qdisc_hash_add(struct Qdisc *q, bool invisible)
{
if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
- struct Qdisc *root = qdisc_dev(q)->qdisc;
-
- WARN_ON_ONCE(root == &noop_qdisc);
ASSERT_RTNL();
hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle);
if (invisible)
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 0af4b1c6f674..2732950766a9 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -572,8 +572,10 @@ static void atm_tc_destroy(struct Qdisc *sch)
struct atm_flow_data *flow, *tmp;
pr_debug("atm_tc_destroy(sch %p,[qdisc %p])\n", sch, p);
- list_for_each_entry(flow, &p->flows, list)
+ list_for_each_entry(flow, &p->flows, list) {
tcf_block_put(flow->block);
+ flow->block = NULL;
+ }
list_for_each_entry_safe(flow, tmp, &p->flows, list) {
if (flow->ref > 1)
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 481036f6b54e..780db43300b1 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -1431,8 +1431,10 @@ static void cbq_destroy(struct Qdisc *sch)
* be bound to classes which have been destroyed already. --TGR '04
*/
for (h = 0; h < q->clhash.hashsize; h++) {
- hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode)
+ hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
tcf_block_put(cl->block);
+ cl->block = NULL;
+ }
}
for (h = 0; h < q->clhash.hashsize; h++) {
hlist_for_each_entry_safe(cl, next, &q->clhash.hash[h],
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index b52f74610dc7..fd15200f8627 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1428,6 +1428,10 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
return err;
q->eligible = RB_ROOT;
+ err = tcf_block_get(&q->root.block, &q->root.filter_list);
+ if (err)
+ goto err_tcf;
+
q->root.cl_common.classid = sch->handle;
q->root.refcnt = 1;
q->root.sched = q;
@@ -1447,6 +1451,10 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
qdisc_watchdog_init(&q->watchdog, sch);
return 0;
+
+err_tcf:
+ qdisc_class_hash_destroy(&q->clhash);
+ return err;
}
static int
@@ -1522,8 +1530,10 @@ hfsc_destroy_qdisc(struct Qdisc *sch)
unsigned int i;
for (i = 0; i < q->clhash.hashsize; i++) {
- hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode)
+ hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode) {
tcf_block_put(cl->block);
+ cl->block = NULL;
+ }
}
for (i = 0; i < q->clhash.hashsize; i++) {
hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 203286ab4427..5d65ec5207e9 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1258,8 +1258,10 @@ static void htb_destroy(struct Qdisc *sch)
tcf_block_put(q->block);
for (i = 0; i < q->clhash.hashsize; i++) {
- hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode)
+ hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
tcf_block_put(cl->block);
+ cl->block = NULL;
+ }
}
for (i = 0; i < q->clhash.hashsize; i++) {
hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index f80ea2cc5f1f..82469ef9655e 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -437,6 +437,7 @@ congestion_drop:
qdisc_drop(head, sch, to_free);
slot_queue_add(slot, skb);
+ qdisc_tree_reduce_backlog(sch, 0, delta);
return NET_XMIT_CN;
}
@@ -468,8 +469,10 @@ enqueue:
/* Return Congestion Notification only if we dropped a packet
* from this flow.
*/
- if (qlen != slot->qlen)
+ if (qlen != slot->qlen) {
+ qdisc_tree_reduce_backlog(sch, 0, dropped - qdisc_pkt_len(skb));
return NET_XMIT_CN;
+ }
/* As we dropped a packet, better let upper stack know this */
qdisc_tree_reduce_backlog(sch, 1, dropped);
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index d174ee3254ee..767e0537dde5 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -596,7 +596,7 @@ static int tipc_l2_rcv_msg(struct sk_buff *skb, struct net_device *dev,
rcu_read_lock();
b = rcu_dereference_rtnl(dev->tipc_ptr);
if (likely(b && test_bit(0, &b->up) &&
- (skb->pkt_type <= PACKET_BROADCAST))) {
+ (skb->pkt_type <= PACKET_MULTICAST))) {
skb->next = NULL;
tipc_rcv(dev_net(dev), skb, b);
rcu_read_unlock();
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index ab3087687a32..dcd90e6fa7c3 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -513,6 +513,7 @@ bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err)
/* Now reverse the concerned fields */
msg_set_errcode(hdr, err);
+ msg_set_non_seq(hdr, 0);
msg_set_origport(hdr, msg_destport(&ohdr));
msg_set_destport(hdr, msg_origport(&ohdr));
msg_set_destnode(hdr, msg_prevnode(&ohdr));
diff --git a/tools/testing/selftests/futex/Makefile b/tools/testing/selftests/futex/Makefile
index e2fbb890aef9..7c647f619d63 100644
--- a/tools/testing/selftests/futex/Makefile
+++ b/tools/testing/selftests/futex/Makefile
@@ -14,7 +14,7 @@ all:
done
override define RUN_TESTS
- @if [ `dirname $(OUTPUT)` = $(PWD) ]; then ./run.sh; fi
+ $(OUTPUT)/run.sh
endef
override define INSTALL_RULE
diff --git a/tools/testing/selftests/kmod/kmod.sh b/tools/testing/selftests/kmod/kmod.sh
index 8cecae9a8bca..8cecae9a8bca 100644..100755
--- a/tools/testing/selftests/kmod/kmod.sh
+++ b/tools/testing/selftests/kmod/kmod.sh
diff --git a/tools/testing/selftests/sysctl/sysctl.sh b/tools/testing/selftests/sysctl/sysctl.sh
index ec232c3cfcaa..ec232c3cfcaa 100644..100755
--- a/tools/testing/selftests/sysctl/sysctl.sh
+++ b/tools/testing/selftests/sysctl/sysctl.sh
diff --git a/tools/testing/selftests/timers/freq-step.c b/tools/testing/selftests/timers/freq-step.c
index e8c61830825a..22312eb4c941 100644
--- a/tools/testing/selftests/timers/freq-step.c
+++ b/tools/testing/selftests/timers/freq-step.c
@@ -229,10 +229,9 @@ static void init_test(void)
printf("CLOCK_MONOTONIC_RAW+CLOCK_MONOTONIC precision: %.0f ns\t\t",
1e9 * precision);
- if (precision > MAX_PRECISION) {
- printf("[SKIP]\n");
- ksft_exit_skip();
- }
+ if (precision > MAX_PRECISION)
+ ksft_exit_skip("precision: %.0f ns > MAX_PRECISION: %.0f ns\n",
+ 1e9 * precision, 1e9 * MAX_PRECISION);
printf("[OK]\n");
srand(ts.tv_sec ^ ts.tv_nsec);