summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2016-05-17 02:17:59 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2016-05-17 02:17:59 -0400
commit0e0162bb8c008fa7742f69d4d4982c8a37b88f95 (patch)
tree4b230ab63b5698a44d2948e70a6cc22405c351e9 /drivers
parentae05327a00fd47c34dfe25294b359a3f3fef96e8 (diff)
parent38b78a5f18584db6fa7441e0f4531b283b0e6725 (diff)
Merge branch 'ovl-fixes' into for-linus
Backmerge to resolve a conflict in ovl_lookup_real(); "ovl_lookup_real(): use lookup_one_len_unlocked()" instead, but it was too late in the cycle to rebase.
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpica/dsmethod.c3
-rw-r--r--drivers/acpi/nfit.c5
-rw-r--r--drivers/ata/Kconfig8
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/ahci_platform.c3
-rw-r--r--drivers/ata/ahci_seattle.c210
-rw-r--r--drivers/ata/libahci.c1
-rw-r--r--drivers/base/power/opp/core.c3
-rw-r--r--drivers/base/property.c2
-rw-r--r--drivers/bcma/main.c17
-rw-r--r--drivers/block/loop.c6
-rw-r--r--drivers/block/rbd.c52
-rw-r--r--drivers/bus/mvebu-mbus.c2
-rw-r--r--drivers/bus/uniphier-system-bus.c2
-rw-r--r--drivers/char/hw_random/bcm63xx-rng.c1
-rw-r--r--drivers/clk/imx/clk-imx6q.c2
-rw-r--r--drivers/clocksource/tango_xtal.c2
-rw-r--r--drivers/cpufreq/cpufreq.c29
-rw-r--r--drivers/cpufreq/cpufreq_governor.c8
-rw-r--r--drivers/cpufreq/intel_pstate.c35
-rw-r--r--drivers/cpufreq/sti-cpufreq.c4
-rw-r--r--drivers/cpuidle/cpuidle-arm.c2
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-cmac.c3
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c3
-rw-r--r--drivers/crypto/talitos.c87
-rw-r--r--drivers/dma/dw/core.c34
-rw-r--r--drivers/dma/edma.c63
-rw-r--r--drivers/dma/hsu/hsu.c13
-rw-r--r--drivers/dma/hsu/hsu.h3
-rw-r--r--drivers/dma/omap-dma.c26
-rw-r--r--drivers/dma/xilinx/xilinx_vdma.c2
-rw-r--r--drivers/edac/i7core_edac.c2
-rw-r--r--drivers/edac/sb_edac.c32
-rw-r--r--drivers/extcon/extcon-palmas.c3
-rw-r--r--drivers/firmware/efi/arm-init.c18
-rw-r--r--drivers/firmware/efi/vars.c37
-rw-r--r--drivers/firmware/psci.c2
-rw-r--r--drivers/firmware/qemu_fw_cfg.c2
-rw-r--r--drivers/gpio/gpio-rcar.c65
-rw-r--r--drivers/gpio/gpiolib-acpi.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c2
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c29
-rw-r--r--drivers/gpu/drm/drm_edid.c10
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c31
-rw-r--r--drivers/gpu/drm/exynos/Kconfig2
-rw-r--r--drivers/gpu/drm/exynos/Makefile6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_core.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.h23
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c12
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c36
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c29
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c4
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h9
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c22
-rw-r--r--drivers/gpu/drm/i915/intel_display.c3
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c4
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c6
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c12
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c16
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c5
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c42
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c18
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c13
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h2
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c4
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c154
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h46
-rw-r--r--drivers/gpu/drm/radeon/ni_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c31
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c2
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c17
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c6
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c7
-rw-r--r--drivers/hid/hid-core.c3
-rw-r--r--drivers/hid/hid-ids.h4
-rw-r--r--drivers/hid/hid-lenovo.c16
-rw-r--r--drivers/hid/hid-microsoft.c6
-rw-r--r--drivers/hid/hid-multitouch.c1
-rw-r--r--drivers/hid/hid-wiimote-modules.c14
-rw-r--r--drivers/hid/usbhid/hid-core.c73
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hid/wacom_sys.c102
-rw-r--r--drivers/hid/wacom_wac.c17
-rw-r--r--drivers/hid/wacom_wac.h8
-rw-r--r--drivers/hv/ring_buffer.c26
-rw-r--r--drivers/i2c/busses/Kconfig4
-rw-r--r--drivers/i2c/busses/i2c-cpm.c4
-rw-r--r--drivers/i2c/busses/i2c-exynos5.c24
-rw-r--r--drivers/i2c/busses/i2c-ismt.c2
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c1
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c2
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c30
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c3
-rw-r--r--drivers/iio/magnetometer/ak8975.c6
-rw-r--r--drivers/infiniband/core/cache.c3
-rw-r--r--drivers/infiniband/core/ucm.c4
-rw-r--r--drivers/infiniband/core/ucma.c3
-rw-r--r--drivers/infiniband/core/uverbs_main.c5
-rw-r--r--drivers/infiniband/core/verbs.c3
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c24
-rw-r--r--drivers/infiniband/hw/mlx5/main.c6
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c5
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c4
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c14
-rw-r--r--drivers/input/joystick/xpad.c2
-rw-r--r--drivers/input/misc/arizona-haptics.c1
-rw-r--r--drivers/input/misc/pmic8xxx-pwrkey.c7
-rw-r--r--drivers/input/misc/twl4030-vibra.c1
-rw-r--r--drivers/input/misc/twl6040-vibra.c24
-rw-r--r--drivers/input/tablet/gtco.c10
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c28
-rw-r--r--drivers/input/touchscreen/zforce_ts.c4
-rw-r--r--drivers/iommu/amd_iommu.c87
-rw-r--r--drivers/iommu/arm-smmu.c22
-rw-r--r--drivers/irqchip/irq-mips-gic.c4
-rw-r--r--drivers/isdn/mISDN/socket.c3
-rw-r--r--drivers/lguest/interrupts_and_traps.c6
-rw-r--r--drivers/lguest/lg.h1
-rw-r--r--drivers/lguest/x86/core.c6
-rw-r--r--drivers/mailbox/mailbox-test.c16
-rw-r--r--drivers/mailbox/mailbox-xgene-slimpro.c4
-rw-r--r--drivers/mailbox/mailbox.c4
-rw-r--r--drivers/md/dm-cache-metadata.c64
-rw-r--r--drivers/md/dm.c4
-rw-r--r--drivers/md/md.c2
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/md/raid5.c2
-rw-r--r--drivers/media/media-device.c8
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.c13
-rw-r--r--drivers/media/platform/s3c-camif/camif-core.c12
-rw-r--r--drivers/media/usb/usbvision/usbvision-video.c7
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c20
-rw-r--r--drivers/media/v4l2-core/videobuf2-memops.c2
-rw-r--r--drivers/media/v4l2-core/videobuf2-v4l2.c20
-rw-r--r--drivers/misc/cxl/context.c7
-rw-r--r--drivers/misc/cxl/cxl.h2
-rw-r--r--drivers/misc/cxl/irq.c1
-rw-r--r--drivers/misc/cxl/native.c31
-rw-r--r--drivers/misc/lkdtm.c11
-rw-r--r--drivers/misc/mic/vop/vop_vringh.c5
-rw-r--r--drivers/mmc/card/block.c18
-rw-r--r--drivers/mmc/host/Kconfig1
-rw-r--r--drivers/mmc/host/sdhci-acpi.c81
-rw-r--r--drivers/mmc/host/sdhci-tegra.c10
-rw-r--r--drivers/mmc/host/sunxi-mmc.c5
-rw-r--r--drivers/mtd/nand/nand_base.c10
-rw-r--r--drivers/net/Kconfig6
-rw-r--r--drivers/net/dsa/mv88e6xxx.c36
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c2
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c5
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c53
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h13
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c6
-rw-r--r--drivers/net/ethernet/cadence/macb.c34
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c20
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c43
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c30
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c49
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h10
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c49
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h10
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c6
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c76
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c106
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vxlan.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vxlan.h11
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c4
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c157
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h4
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c3
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c6
-rw-r--r--drivers/net/ethernet/sfc/ef10.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c66
-rw-r--r--drivers/net/ethernet/ti/cpsw.c71
-rw-r--r--drivers/net/ethernet/ti/cpsw.h1
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c8
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_wireless.c2
-rw-r--r--drivers/net/macsec.c65
-rw-r--r--drivers/net/phy/at803x.c40
-rw-r--r--drivers/net/phy/spi_ks8995.c2
-rw-r--r--drivers/net/usb/cdc_mbim.c9
-rw-r--r--drivers/net/usb/lan78xx.c44
-rw-r--r--drivers/net/usb/pegasus.c10
-rw-r--r--drivers/net/usb/smsc75xx.c12
-rw-r--r--drivers/net/usb/smsc95xx.c12
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c12
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h4
-rw-r--r--drivers/net/vrf.c177
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c5
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-8000.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c6
-rw-r--r--drivers/nvdimm/pmem.c27
-rw-r--r--drivers/nvme/host/pci.c31
-rw-r--r--drivers/nvmem/mxs-ocotp.c4
-rw-r--r--drivers/pci/access.c42
-rw-r--r--drivers/pci/host/pci-imx6.c20
-rw-r--r--drivers/pci/pci-sysfs.c2
-rw-r--r--drivers/pci/pci.h1
-rw-r--r--drivers/perf/arm_pmu.c15
-rw-r--r--drivers/phy/phy-rockchip-dp.c7
-rw-r--r--drivers/phy/phy-rockchip-emmc.c5
-rw-r--r--drivers/pinctrl/freescale/Kconfig1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c9
-rw-r--r--drivers/pinctrl/pinctrl-single.c6
-rw-r--r--drivers/platform/x86/hp_accel.c6
-rw-r--r--drivers/platform/x86/intel-hid.c2
-rw-r--r--drivers/platform/x86/intel_pmc_ipc.c48
-rw-r--r--drivers/platform/x86/intel_punit_ipc.c48
-rw-r--r--drivers/platform/x86/intel_telemetry_pltdrv.c2
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c4
-rw-r--r--drivers/platform/x86/toshiba_acpi.c2
-rw-r--r--drivers/pwm/pwm-fsl-ftm.c2
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c119
-rw-r--r--drivers/rtc/rtc-ds1307.c6
-rw-r--r--drivers/s390/block/dcssblk.c5
-rw-r--r--drivers/s390/block/scm_blk.c2
-rw-r--r--drivers/s390/char/sclp_ctl.c12
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c1
-rw-r--r--drivers/soc/mediatek/mtk-scpsys.c11
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.c54
-rw-r--r--drivers/staging/rdma/hfi1/TODO2
-rw-r--r--drivers/staging/rdma/hfi1/file_ops.c91
-rw-r--r--drivers/staging/rdma/hfi1/mmu_rb.c40
-rw-r--r--drivers/staging/rdma/hfi1/mmu_rb.h3
-rw-r--r--drivers/staging/rdma/hfi1/qp.c2
-rw-r--r--drivers/staging/rdma/hfi1/user_exp_rcv.c11
-rw-r--r--drivers/staging/rdma/hfi1/user_sdma.c33
-rw-r--r--drivers/thermal/Kconfig2
-rw-r--r--drivers/thermal/hisi_thermal.c4
-rw-r--r--drivers/thermal/mtk_thermal.c3
-rw-r--r--drivers/thermal/of-thermal.c4
-rw-r--r--drivers/thermal/power_allocator.c2
-rw-r--r--drivers/thermal/thermal_core.c10
-rw-r--r--drivers/tty/pty.c79
-rw-r--r--drivers/tty/serial/8250/8250_port.c11
-rw-r--r--drivers/tty/serial/8250/Kconfig1
-rw-r--r--drivers/tty/serial/uartlite.c8
-rw-r--r--drivers/tty/tty_io.c6
-rw-r--r--drivers/usb/class/cdc-acm.c4
-rw-r--r--drivers/usb/core/hcd-pci.c9
-rw-r--r--drivers/usb/core/port.c6
-rw-r--r--drivers/usb/core/usb.c8
-rw-r--r--drivers/usb/dwc3/core.c23
-rw-r--r--drivers/usb/dwc3/debugfs.c13
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c12
-rw-r--r--drivers/usb/dwc3/gadget.c6
-rw-r--r--drivers/usb/gadget/composite.c2
-rw-r--r--drivers/usb/gadget/function/f_fs.c5
-rw-r--r--drivers/usb/host/xhci-mem.c6
-rw-r--r--drivers/usb/host/xhci-pci.c5
-rw-r--r--drivers/usb/host/xhci-plat.c13
-rw-r--r--drivers/usb/host/xhci-plat.h2
-rw-r--r--drivers/usb/host/xhci-ring.c3
-rw-r--r--drivers/usb/host/xhci.c24
-rw-r--r--drivers/usb/host/xhci.h2
-rw-r--r--drivers/usb/musb/jz4740.c4
-rw-r--r--drivers/usb/musb/musb_gadget.c6
-rw-r--r--drivers/usb/musb/musb_host.c2
-rw-r--r--drivers/usb/serial/cp210x.c4
-rw-r--r--drivers/usb/storage/uas.c21
-rw-r--r--drivers/usb/storage/unusual_uas.h7
-rw-r--r--drivers/usb/storage/usb.c5
-rw-r--r--drivers/video/fbdev/amba-clcd.c15
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c12
-rw-r--r--drivers/virtio/virtio_ring.c2
-rw-r--r--drivers/xen/balloon.c16
-rw-r--r--drivers/xen/evtchn.c20
328 files changed, 3451 insertions, 1802 deletions
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index 1982310e6d83..da198b864107 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -428,6 +428,9 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
obj_desc->method.mutex->mutex.
original_sync_level =
obj_desc->method.mutex->mutex.sync_level;
+
+ obj_desc->method.mutex->mutex.thread_id =
+ acpi_os_get_thread_id();
}
}
diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c
index d0f35e63640b..63cc9dbe4f3b 100644
--- a/drivers/acpi/nfit.c
+++ b/drivers/acpi/nfit.c
@@ -287,8 +287,11 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
offset);
rc = -ENXIO;
}
- } else
+ } else {
rc = 0;
+ if (cmd_rc)
+ *cmd_rc = xlat_status(buf, cmd);
+ }
out:
ACPI_FREE(out_obj);
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 5083f85efea7..cfa936a32513 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -202,6 +202,14 @@ config SATA_FSL
If unsure, say N.
+config SATA_AHCI_SEATTLE
+ tristate "AMD Seattle 6.0Gbps AHCI SATA host controller support"
+ depends on ARCH_SEATTLE
+ help
+ This option enables support for AMD Seattle SATA host controller.
+
+ If unsure, say N
+
config SATA_INIC162X
tristate "Initio 162x SATA support (Very Experimental)"
depends on PCI
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 18579521464e..0b2afb7e5f35 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_ATA) += libata.o
# non-SFF interface
obj-$(CONFIG_SATA_AHCI) += ahci.o libahci.o
obj-$(CONFIG_SATA_ACARD_AHCI) += acard-ahci.o libahci.o
+obj-$(CONFIG_SATA_AHCI_SEATTLE) += ahci_seattle.o libahci.o libahci_platform.o
obj-$(CONFIG_SATA_AHCI_PLATFORM) += ahci_platform.o libahci.o libahci_platform.o
obj-$(CONFIG_SATA_FSL) += sata_fsl.o
obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 40442332bfa7..62a04c8fb5c9 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -51,6 +51,9 @@ static int ahci_probe(struct platform_device *pdev)
if (rc)
return rc;
+ of_property_read_u32(dev->of_node,
+ "ports-implemented", &hpriv->force_port_map);
+
if (of_device_is_compatible(dev->of_node, "hisilicon,hisi-ahci"))
hpriv->flags |= AHCI_HFLAG_NO_FBS | AHCI_HFLAG_NO_NCQ;
diff --git a/drivers/ata/ahci_seattle.c b/drivers/ata/ahci_seattle.c
new file mode 100644
index 000000000000..6e702ab57220
--- /dev/null
+++ b/drivers/ata/ahci_seattle.c
@@ -0,0 +1,210 @@
+/*
+ * AMD Seattle AHCI SATA driver
+ *
+ * Copyright (c) 2015, Advanced Micro Devices
+ * Author: Brijesh Singh <brijesh.singh@amd.com>
+ *
+ * based on the AHCI SATA platform driver by Jeff Garzik and Anton Vorontsov
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/device.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/libata.h>
+#include <linux/ahci_platform.h>
+#include <linux/acpi.h>
+#include <linux/pci_ids.h>
+#include "ahci.h"
+
+/* SGPIO Control Register definition
+ *
+ * Bit Type Description
+ * 31 RW OD7.2 (activity)
+ * 30 RW OD7.1 (locate)
+ * 29 RW OD7.0 (fault)
+ * 28...8 RW OD6.2...OD0.0 (3bits per port, 1 bit per LED)
+ * 7 RO SGPIO feature flag
+ * 6:4 RO Reserved
+ * 3:0 RO Number of ports (0 means no port supported)
+ */
+#define ACTIVITY_BIT_POS(x) (8 + (3 * x))
+#define LOCATE_BIT_POS(x) (ACTIVITY_BIT_POS(x) + 1)
+#define FAULT_BIT_POS(x) (LOCATE_BIT_POS(x) + 1)
+
+#define ACTIVITY_MASK 0x00010000
+#define LOCATE_MASK 0x00080000
+#define FAULT_MASK 0x00400000
+
+#define DRV_NAME "ahci-seattle"
+
+static ssize_t seattle_transmit_led_message(struct ata_port *ap, u32 state,
+ ssize_t size);
+
+struct seattle_plat_data {
+ void __iomem *sgpio_ctrl;
+};
+
+static struct ata_port_operations ahci_port_ops = {
+ .inherits = &ahci_ops,
+};
+
+static const struct ata_port_info ahci_port_info = {
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_port_ops,
+};
+
+static struct ata_port_operations ahci_seattle_ops = {
+ .inherits = &ahci_ops,
+ .transmit_led_message = seattle_transmit_led_message,
+};
+
+static const struct ata_port_info ahci_port_seattle_info = {
+ .flags = AHCI_FLAG_COMMON | ATA_FLAG_EM | ATA_FLAG_SW_ACTIVITY,
+ .link_flags = ATA_LFLAG_SW_ACTIVITY,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_seattle_ops,
+};
+
+static struct scsi_host_template ahci_platform_sht = {
+ AHCI_SHT(DRV_NAME),
+};
+
+static ssize_t seattle_transmit_led_message(struct ata_port *ap, u32 state,
+ ssize_t size)
+{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ struct ahci_port_priv *pp = ap->private_data;
+ struct seattle_plat_data *plat_data = hpriv->plat_data;
+ unsigned long flags;
+ int pmp;
+ struct ahci_em_priv *emp;
+ u32 val;
+
+ /* get the slot number from the message */
+ pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
+ if (pmp >= EM_MAX_SLOTS)
+ return -EINVAL;
+ emp = &pp->em_priv[pmp];
+
+ val = ioread32(plat_data->sgpio_ctrl);
+ if (state & ACTIVITY_MASK)
+ val |= 1 << ACTIVITY_BIT_POS((ap->port_no));
+ else
+ val &= ~(1 << ACTIVITY_BIT_POS((ap->port_no)));
+
+ if (state & LOCATE_MASK)
+ val |= 1 << LOCATE_BIT_POS((ap->port_no));
+ else
+ val &= ~(1 << LOCATE_BIT_POS((ap->port_no)));
+
+ if (state & FAULT_MASK)
+ val |= 1 << FAULT_BIT_POS((ap->port_no));
+ else
+ val &= ~(1 << FAULT_BIT_POS((ap->port_no)));
+
+ iowrite32(val, plat_data->sgpio_ctrl);
+
+ spin_lock_irqsave(ap->lock, flags);
+
+ /* save off new led state for port/slot */
+ emp->led_state = state;
+
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ return size;
+}
+
+static const struct ata_port_info *ahci_seattle_get_port_info(
+ struct platform_device *pdev, struct ahci_host_priv *hpriv)
+{
+ struct device *dev = &pdev->dev;
+ struct seattle_plat_data *plat_data;
+ u32 val;
+
+ plat_data = devm_kzalloc(dev, sizeof(*plat_data), GFP_KERNEL);
+ if (IS_ERR(plat_data))
+ return &ahci_port_info;
+
+ plat_data->sgpio_ctrl = devm_ioremap_resource(dev,
+ platform_get_resource(pdev, IORESOURCE_MEM, 1));
+ if (IS_ERR(plat_data->sgpio_ctrl))
+ return &ahci_port_info;
+
+ val = ioread32(plat_data->sgpio_ctrl);
+
+ if (!(val & 0xf))
+ return &ahci_port_info;
+
+ hpriv->em_loc = 0;
+ hpriv->em_buf_sz = 4;
+ hpriv->em_msg_type = EM_MSG_TYPE_LED;
+ hpriv->plat_data = plat_data;
+
+ dev_info(dev, "SGPIO LED control is enabled.\n");
+ return &ahci_port_seattle_info;
+}
+
+static int ahci_seattle_probe(struct platform_device *pdev)
+{
+ int rc;
+ struct ahci_host_priv *hpriv;
+
+ hpriv = ahci_platform_get_resources(pdev);
+ if (IS_ERR(hpriv))
+ return PTR_ERR(hpriv);
+
+ rc = ahci_platform_enable_resources(hpriv);
+ if (rc)
+ return rc;
+
+ rc = ahci_platform_init_host(pdev, hpriv,
+ ahci_seattle_get_port_info(pdev, hpriv),
+ &ahci_platform_sht);
+ if (rc)
+ goto disable_resources;
+
+ return 0;
+disable_resources:
+ ahci_platform_disable_resources(hpriv);
+ return rc;
+}
+
+static SIMPLE_DEV_PM_OPS(ahci_pm_ops, ahci_platform_suspend,
+ ahci_platform_resume);
+
+static const struct acpi_device_id ahci_acpi_match[] = {
+ { "AMDI0600", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, ahci_acpi_match);
+
+static struct platform_driver ahci_seattle_driver = {
+ .probe = ahci_seattle_probe,
+ .remove = ata_platform_remove_one,
+ .driver = {
+ .name = DRV_NAME,
+ .acpi_match_table = ahci_acpi_match,
+ .pm = &ahci_pm_ops,
+ },
+};
+module_platform_driver(ahci_seattle_driver);
+
+MODULE_DESCRIPTION("Seattle AHCI SATA platform driver");
+MODULE_AUTHOR("Brijesh Singh <brijesh.singh@amd.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 3982054060b8..a5d7c1c2a05e 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -507,6 +507,7 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
dev_info(dev, "forcing port_map 0x%x -> 0x%x\n",
port_map, hpriv->force_port_map);
port_map = hpriv->force_port_map;
+ hpriv->saved_port_map = port_map;
}
if (hpriv->mask_port_map) {
diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c
index 433b60092972..d8f4cc22856c 100644
--- a/drivers/base/power/opp/core.c
+++ b/drivers/base/power/opp/core.c
@@ -259,9 +259,6 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
reg = opp_table->regulator;
if (IS_ERR(reg)) {
/* Regulator may not be required for device */
- if (reg)
- dev_err(dev, "%s: Invalid regulator (%ld)\n", __func__,
- PTR_ERR(reg));
rcu_read_unlock();
return 0;
}
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 9b1a65debd49..7f692accdc90 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -21,7 +21,7 @@
static inline bool is_pset_node(struct fwnode_handle *fwnode)
{
- return fwnode && fwnode->type == FWNODE_PDATA;
+ return !IS_ERR_OR_NULL(fwnode) && fwnode->type == FWNODE_PDATA;
}
static inline struct property_set *to_pset_node(struct fwnode_handle *fwnode)
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 786be8fed39e..1f635471f318 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -136,7 +136,6 @@ static bool bcma_is_core_needed_early(u16 core_id)
return false;
}
-#if defined(CONFIG_OF) && defined(CONFIG_OF_ADDRESS)
static struct device_node *bcma_of_find_child_device(struct platform_device *parent,
struct bcma_device *core)
{
@@ -184,7 +183,7 @@ static unsigned int bcma_of_get_irq(struct platform_device *parent,
struct of_phandle_args out_irq;
int ret;
- if (!parent || !parent->dev.of_node)
+ if (!IS_ENABLED(CONFIG_OF_IRQ) || !parent || !parent->dev.of_node)
return 0;
ret = bcma_of_irq_parse(parent, core, &out_irq, num);
@@ -202,23 +201,15 @@ static void bcma_of_fill_device(struct platform_device *parent,
{
struct device_node *node;
+ if (!IS_ENABLED(CONFIG_OF_IRQ))
+ return;
+
node = bcma_of_find_child_device(parent, core);
if (node)
core->dev.of_node = node;
core->irq = bcma_of_get_irq(parent, core, 0);
}
-#else
-static void bcma_of_fill_device(struct platform_device *parent,
- struct bcma_device *core)
-{
-}
-static inline unsigned int bcma_of_get_irq(struct platform_device *parent,
- struct bcma_device *core, int num)
-{
- return 0;
-}
-#endif /* CONFIG_OF */
unsigned int bcma_core_irq(struct bcma_device *core, int num)
{
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 423f4ca7d712..80cf8add46ff 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -488,6 +488,12 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
iov_iter_bvec(&iter, ITER_BVEC | rw, bvec,
bio_segments(bio), blk_rq_bytes(cmd->rq));
+ /*
+ * This bio may be started from the middle of the 'bvec'
+ * because of bio splitting, so offset from the bvec must
+ * be passed to iov iterator
+ */
+ iter.iov_offset = bio->bi_iter.bi_bvec_done;
cmd->iocb.ki_pos = pos;
cmd->iocb.ki_filp = file;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 94a1843b0426..0ede6d7e2568 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -538,7 +538,6 @@ static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
u8 *order, u64 *snap_size);
static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
u64 *snap_features);
-static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name);
static int rbd_open(struct block_device *bdev, fmode_t mode)
{
@@ -3127,9 +3126,6 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
struct rbd_device *rbd_dev = (struct rbd_device *)data;
int ret;
- if (!rbd_dev)
- return;
-
dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
rbd_dev->header_name, (unsigned long long)notify_id,
(unsigned int)opcode);
@@ -3263,6 +3259,9 @@ static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
ceph_osdc_cancel_event(rbd_dev->watch_event);
rbd_dev->watch_event = NULL;
+
+ dout("%s flushing notifies\n", __func__);
+ ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
}
/*
@@ -3642,21 +3641,14 @@ static void rbd_exists_validate(struct rbd_device *rbd_dev)
static void rbd_dev_update_size(struct rbd_device *rbd_dev)
{
sector_t size;
- bool removing;
/*
- * Don't hold the lock while doing disk operations,
- * or lock ordering will conflict with the bdev mutex via:
- * rbd_add() -> blkdev_get() -> rbd_open()
+ * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
+ * try to update its size. If REMOVING is set, updating size
+ * is just useless work since the device can't be opened.
*/
- spin_lock_irq(&rbd_dev->lock);
- removing = test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
- spin_unlock_irq(&rbd_dev->lock);
- /*
- * If the device is being removed, rbd_dev->disk has
- * been destroyed, so don't try to update its size
- */
- if (!removing) {
+ if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) &&
+ !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
dout("setting size to %llu sectors", (unsigned long long)size);
set_capacity(rbd_dev->disk, size);
@@ -4191,7 +4183,7 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
__le64 features;
__le64 incompat;
} __attribute__ ((packed)) features_buf = { 0 };
- u64 incompat;
+ u64 unsup;
int ret;
ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
@@ -4204,9 +4196,12 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
if (ret < sizeof (features_buf))
return -ERANGE;
- incompat = le64_to_cpu(features_buf.incompat);
- if (incompat & ~RBD_FEATURES_SUPPORTED)
+ unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED;
+ if (unsup) {
+ rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx",
+ unsup);
return -ENXIO;
+ }
*snap_features = le64_to_cpu(features_buf.features);
@@ -5187,6 +5182,10 @@ out_err:
return ret;
}
+/*
+ * rbd_dev->header_rwsem must be locked for write and will be unlocked
+ * upon return.
+ */
static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
{
int ret;
@@ -5195,7 +5194,7 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
ret = rbd_dev_id_get(rbd_dev);
if (ret)
- return ret;
+ goto err_out_unlock;
BUILD_BUG_ON(DEV_NAME_LEN
< sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
@@ -5236,8 +5235,9 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
/* Everything's ready. Announce the disk to the world. */
set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
- add_disk(rbd_dev->disk);
+ up_write(&rbd_dev->header_rwsem);
+ add_disk(rbd_dev->disk);
pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
(unsigned long long) rbd_dev->mapping.size);
@@ -5252,6 +5252,8 @@ err_out_blkdev:
unregister_blkdev(rbd_dev->major, rbd_dev->name);
err_out_id:
rbd_dev_id_put(rbd_dev);
+err_out_unlock:
+ up_write(&rbd_dev->header_rwsem);
return ret;
}
@@ -5442,6 +5444,7 @@ static ssize_t do_rbd_add(struct bus_type *bus,
spec = NULL; /* rbd_dev now owns this */
rbd_opts = NULL; /* rbd_dev now owns this */
+ down_write(&rbd_dev->header_rwsem);
rc = rbd_dev_image_probe(rbd_dev, 0);
if (rc < 0)
goto err_out_rbd_dev;
@@ -5471,6 +5474,7 @@ out:
return rc;
err_out_rbd_dev:
+ up_write(&rbd_dev->header_rwsem);
rbd_dev_destroy(rbd_dev);
err_out_client:
rbd_put_client(rbdc);
@@ -5577,12 +5581,6 @@ static ssize_t do_rbd_remove(struct bus_type *bus,
return ret;
rbd_dev_header_unwatch_sync(rbd_dev);
- /*
- * flush remaining watch callbacks - these must be complete
- * before the osd_client is shutdown
- */
- dout("%s: flushing notifies", __func__);
- ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
/*
* Don't free anything from rbd_dev->disk until after all
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index c2e52864bb03..ce54a0160faa 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -972,7 +972,7 @@ int mvebu_mbus_get_dram_win_info(phys_addr_t phyaddr, u8 *target, u8 *attr)
}
}
- pr_err("invalid dram address 0x%x\n", phyaddr);
+ pr_err("invalid dram address %pa\n", &phyaddr);
return -EINVAL;
}
EXPORT_SYMBOL_GPL(mvebu_mbus_get_dram_win_info);
diff --git a/drivers/bus/uniphier-system-bus.c b/drivers/bus/uniphier-system-bus.c
index 834a2aeaf27a..350b7309c26d 100644
--- a/drivers/bus/uniphier-system-bus.c
+++ b/drivers/bus/uniphier-system-bus.c
@@ -108,7 +108,7 @@ static int uniphier_system_bus_check_overlap(
for (i = 0; i < ARRAY_SIZE(priv->bank); i++) {
for (j = i + 1; j < ARRAY_SIZE(priv->bank); j++) {
- if (priv->bank[i].end > priv->bank[j].base ||
+ if (priv->bank[i].end > priv->bank[j].base &&
priv->bank[i].base < priv->bank[j].end) {
dev_err(priv->dev,
"region overlap between bank%d and bank%d\n",
diff --git a/drivers/char/hw_random/bcm63xx-rng.c b/drivers/char/hw_random/bcm63xx-rng.c
index ca9c40309757..5132c9cde50d 100644
--- a/drivers/char/hw_random/bcm63xx-rng.c
+++ b/drivers/char/hw_random/bcm63xx-rng.c
@@ -12,6 +12,7 @@
#include <linux/clk.h>
#include <linux/platform_device.h>
#include <linux/hw_random.h>
+#include <linux/of.h>
#define RNG_CTRL 0x00
#define RNG_EN (1 << 0)
diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
index 02e18182fcb5..2beb396fe652 100644
--- a/drivers/clk/imx/clk-imx6q.c
+++ b/drivers/clk/imx/clk-imx6q.c
@@ -394,7 +394,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
clk[IMX6QDL_CLK_LDB_DI1_DIV_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1", 2, 7);
} else {
clk[IMX6QDL_CLK_ECSPI_ROOT] = imx_clk_divider("ecspi_root", "pll3_60m", base + 0x38, 19, 6);
- clk[IMX6QDL_CLK_CAN_ROOT] = imx_clk_divider("can_root", "pll3_60", base + 0x20, 2, 6);
+ clk[IMX6QDL_CLK_CAN_ROOT] = imx_clk_divider("can_root", "pll3_60m", base + 0x20, 2, 6);
clk[IMX6QDL_CLK_IPG_PER] = imx_clk_fixup_divider("ipg_per", "ipg", base + 0x1c, 0, 6, imx_cscmr1_fixup);
clk[IMX6QDL_CLK_UART_SERIAL_PODF] = imx_clk_divider("uart_serial_podf", "pll3_80m", base + 0x24, 0, 6);
clk[IMX6QDL_CLK_LDB_DI0_DIV_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7);
diff --git a/drivers/clocksource/tango_xtal.c b/drivers/clocksource/tango_xtal.c
index 2bcecafdeaea..c407c47a3232 100644
--- a/drivers/clocksource/tango_xtal.c
+++ b/drivers/clocksource/tango_xtal.c
@@ -42,7 +42,7 @@ static void __init tango_clocksource_init(struct device_node *np)
ret = clocksource_mmio_init(xtal_in_cnt, "tango-xtal", xtal_freq, 350,
32, clocksource_mmio_readl_up);
- if (!ret) {
+ if (ret) {
pr_err("%s: registration failed\n", np->full_name);
return;
}
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index b87596b591b3..c4acfc5273b3 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1491,6 +1491,9 @@ static unsigned int cpufreq_update_current_freq(struct cpufreq_policy *policy)
{
unsigned int new_freq;
+ if (cpufreq_suspended)
+ return 0;
+
new_freq = cpufreq_driver->get(policy->cpu);
if (!new_freq)
return 0;
@@ -1554,21 +1557,25 @@ void cpufreq_suspend(void)
if (!cpufreq_driver)
return;
- if (!has_target())
+ if (!has_target() && !cpufreq_driver->suspend)
goto suspend;
pr_debug("%s: Suspending Governors\n", __func__);
for_each_active_policy(policy) {
- down_write(&policy->rwsem);
- ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
- up_write(&policy->rwsem);
+ if (has_target()) {
+ down_write(&policy->rwsem);
+ ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+ up_write(&policy->rwsem);
- if (ret)
- pr_err("%s: Failed to stop governor for policy: %p\n",
- __func__, policy);
- else if (cpufreq_driver->suspend
- && cpufreq_driver->suspend(policy))
+ if (ret) {
+ pr_err("%s: Failed to stop governor for policy: %p\n",
+ __func__, policy);
+ continue;
+ }
+ }
+
+ if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
pr_err("%s: Failed to suspend driver: %p\n", __func__,
policy);
}
@@ -1593,7 +1600,7 @@ void cpufreq_resume(void)
cpufreq_suspended = false;
- if (!has_target())
+ if (!has_target() && !cpufreq_driver->resume)
return;
pr_debug("%s: Resuming Governors\n", __func__);
@@ -1602,7 +1609,7 @@ void cpufreq_resume(void)
if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
pr_err("%s: Failed to resume driver: %p\n", __func__,
policy);
- } else {
+ } else if (has_target()) {
down_write(&policy->rwsem);
ret = cpufreq_start_governor(policy);
up_write(&policy->rwsem);
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 10a5cfeae8c5..5f1147fa9239 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -193,12 +193,8 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
wall_time = cur_wall_time - j_cdbs->prev_cpu_wall;
j_cdbs->prev_cpu_wall = cur_wall_time;
- if (cur_idle_time <= j_cdbs->prev_cpu_idle) {
- idle_time = 0;
- } else {
- idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
- j_cdbs->prev_cpu_idle = cur_idle_time;
- }
+ idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
+ j_cdbs->prev_cpu_idle = cur_idle_time;
if (ignore_nice) {
u64 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 8b5a415ee14a..b230ebaae66c 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -453,6 +453,14 @@ static void intel_pstate_hwp_set(const struct cpumask *cpumask)
}
}
+static int intel_pstate_hwp_set_policy(struct cpufreq_policy *policy)
+{
+ if (hwp_active)
+ intel_pstate_hwp_set(policy->cpus);
+
+ return 0;
+}
+
static void intel_pstate_hwp_set_online_cpus(void)
{
get_online_cpus();
@@ -813,6 +821,11 @@ static int core_get_max_pstate(void)
if (err)
goto skip_tar;
+ /* For level 1 and 2, bits[23:16] contain the ratio */
+ if (tdp_ctrl)
+ tdp_ratio >>= 16;
+
+ tdp_ratio &= 0xff; /* ratios are only 8 bits long */
if (tdp_ratio - 1 == tar) {
max_pstate = tar;
pr_debug("max_pstate=TAC %x\n", max_pstate);
@@ -1057,8 +1070,9 @@ static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
static inline int32_t get_avg_frequency(struct cpudata *cpu)
{
- return div64_u64(cpu->pstate.max_pstate_physical * cpu->sample.aperf *
- cpu->pstate.scaling, cpu->sample.mperf);
+ return fp_toint(mul_fp(cpu->sample.core_pct_busy,
+ int_tofp(cpu->pstate.max_pstate_physical *
+ cpu->pstate.scaling / 100)));
}
static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
@@ -1101,8 +1115,6 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
int32_t core_busy, max_pstate, current_pstate, sample_ratio;
u64 duration_ns;
- intel_pstate_calc_busy(cpu);
-
/*
* core_busy is the ratio of actual performance to max
* max_pstate is the max non turbo pstate available
@@ -1130,6 +1142,10 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
sample_ratio = div_fp(int_tofp(pid_params.sample_rate_ns),
int_tofp(duration_ns));
core_busy = mul_fp(core_busy, sample_ratio);
+ } else {
+ sample_ratio = div_fp(100 * cpu->sample.mperf, cpu->sample.tsc);
+ if (sample_ratio < int_tofp(1))
+ core_busy = 0;
}
cpu->sample.busy_scaled = core_busy;
@@ -1182,8 +1198,11 @@ static void intel_pstate_update_util(struct update_util_data *data, u64 time,
if ((s64)delta_ns >= pid_params.sample_rate_ns) {
bool sample_taken = intel_pstate_sample(cpu, time);
- if (sample_taken && !hwp_active)
- intel_pstate_adjust_busy_pstate(cpu);
+ if (sample_taken) {
+ intel_pstate_calc_busy(cpu);
+ if (!hwp_active)
+ intel_pstate_adjust_busy_pstate(cpu);
+ }
}
}
@@ -1337,8 +1356,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
out:
intel_pstate_set_update_util_hook(policy->cpu);
- if (hwp_active)
- intel_pstate_hwp_set(policy->cpus);
+ intel_pstate_hwp_set_policy(policy);
return 0;
}
@@ -1402,6 +1420,7 @@ static struct cpufreq_driver intel_pstate_driver = {
.flags = CPUFREQ_CONST_LOOPS,
.verify = intel_pstate_verify_policy,
.setpolicy = intel_pstate_set_policy,
+ .resume = intel_pstate_hwp_set_policy,
.get = intel_pstate_get,
.init = intel_pstate_cpu_init,
.stop_cpu = intel_pstate_stop_cpu,
diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c
index a9c659f58974..04042038ec4b 100644
--- a/drivers/cpufreq/sti-cpufreq.c
+++ b/drivers/cpufreq/sti-cpufreq.c
@@ -259,6 +259,10 @@ static int sti_cpufreq_init(void)
{
int ret;
+ if ((!of_machine_is_compatible("st,stih407")) &&
+ (!of_machine_is_compatible("st,stih410")))
+ return -ENODEV;
+
ddata.cpu = get_cpu_device(0);
if (!ddata.cpu) {
dev_err(ddata.cpu, "Failed to get device for CPU0\n");
diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c
index 545069d5fdfb..e342565e8715 100644
--- a/drivers/cpuidle/cpuidle-arm.c
+++ b/drivers/cpuidle/cpuidle-arm.c
@@ -50,7 +50,7 @@ static int arm_enter_idle_state(struct cpuidle_device *dev,
* call the CPU ops suspend protocol with idle index as a
* parameter.
*/
- arm_cpuidle_suspend(idx);
+ ret = arm_cpuidle_suspend(idx);
cpu_pm_exit();
}
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index 3d9acc53d247..60fc0fa26fd3 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -225,6 +225,9 @@ static int ccp_aes_cmac_export(struct ahash_request *req, void *out)
struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
struct ccp_aes_cmac_exp_ctx state;
+ /* Don't let anything leak to 'out' */
+ memset(&state, 0, sizeof(state));
+
state.null_msg = rctx->null_msg;
memcpy(state.iv, rctx->iv, sizeof(state.iv));
state.buf_count = rctx->buf_count;
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index b5ad72897dc2..8f36af62fe95 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -212,6 +212,9 @@ static int ccp_sha_export(struct ahash_request *req, void *out)
struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
struct ccp_sha_exp_ctx state;
+ /* Don't let anything leak to 'out' */
+ memset(&state, 0, sizeof(state));
+
state.type = rctx->type;
state.msg_bits = rctx->msg_bits;
state.first = rctx->first;
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index a0d4a08313ae..aae05547b924 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -63,6 +63,14 @@ static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
ptr->eptr = upper_32_bits(dma_addr);
}
+static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
+ struct talitos_ptr *src_ptr, bool is_sec1)
+{
+ dst_ptr->ptr = src_ptr->ptr;
+ if (!is_sec1)
+ dst_ptr->eptr = src_ptr->eptr;
+}
+
static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len,
bool is_sec1)
{
@@ -1083,21 +1091,20 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ?: 1,
(areq->src == areq->dst) ? DMA_BIDIRECTIONAL
: DMA_TO_DEVICE);
-
/* hmac data */
desc->ptr[1].len = cpu_to_be16(areq->assoclen);
if (sg_count > 1 &&
(ret = sg_to_link_tbl_offset(areq->src, sg_count, 0,
areq->assoclen,
&edesc->link_tbl[tbl_off])) > 1) {
- tbl_off += ret;
-
to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
sizeof(struct talitos_ptr), 0);
desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
dma_sync_single_for_device(dev, edesc->dma_link_tbl,
edesc->dma_len, DMA_BIDIRECTIONAL);
+
+ tbl_off += ret;
} else {
to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0);
desc->ptr[1].j_extent = 0;
@@ -1126,11 +1133,13 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
sg_link_tbl_len += authsize;
- if (sg_count > 1 &&
- (ret = sg_to_link_tbl_offset(areq->src, sg_count, areq->assoclen,
- sg_link_tbl_len,
- &edesc->link_tbl[tbl_off])) > 1) {
- tbl_off += ret;
+ if (sg_count == 1) {
+ to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src) +
+ areq->assoclen, 0);
+ } else if ((ret = sg_to_link_tbl_offset(areq->src, sg_count,
+ areq->assoclen, sg_link_tbl_len,
+ &edesc->link_tbl[tbl_off])) >
+ 1) {
desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
tbl_off *
@@ -1138,8 +1147,10 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
dma_sync_single_for_device(dev, edesc->dma_link_tbl,
edesc->dma_len,
DMA_BIDIRECTIONAL);
- } else
- to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src), 0);
+ tbl_off += ret;
+ } else {
+ copy_talitos_ptr(&desc->ptr[4], &edesc->link_tbl[tbl_off], 0);
+ }
/* cipher out */
desc->ptr[5].len = cpu_to_be16(cryptlen);
@@ -1151,11 +1162,13 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
edesc->icv_ool = false;
- if (sg_count > 1 &&
- (sg_count = sg_to_link_tbl_offset(areq->dst, sg_count,
+ if (sg_count == 1) {
+ to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst) +
+ areq->assoclen, 0);
+ } else if ((sg_count =
+ sg_to_link_tbl_offset(areq->dst, sg_count,
areq->assoclen, cryptlen,
- &edesc->link_tbl[tbl_off])) >
- 1) {
+ &edesc->link_tbl[tbl_off])) > 1) {
struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
@@ -1178,8 +1191,9 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
edesc->dma_len, DMA_BIDIRECTIONAL);
edesc->icv_ool = true;
- } else
- to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0);
+ } else {
+ copy_talitos_ptr(&desc->ptr[5], &edesc->link_tbl[tbl_off], 0);
+ }
/* iv out */
map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
@@ -2629,21 +2643,11 @@ struct talitos_crypto_alg {
struct talitos_alg_template algt;
};
-static int talitos_cra_init(struct crypto_tfm *tfm)
+static int talitos_init_common(struct talitos_ctx *ctx,
+ struct talitos_crypto_alg *talitos_alg)
{
- struct crypto_alg *alg = tfm->__crt_alg;
- struct talitos_crypto_alg *talitos_alg;
- struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
struct talitos_private *priv;
- if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
- talitos_alg = container_of(__crypto_ahash_alg(alg),
- struct talitos_crypto_alg,
- algt.alg.hash);
- else
- talitos_alg = container_of(alg, struct talitos_crypto_alg,
- algt.alg.crypto);
-
/* update context with ptr to dev */
ctx->dev = talitos_alg->dev;
@@ -2661,10 +2665,33 @@ static int talitos_cra_init(struct crypto_tfm *tfm)
return 0;
}
+static int talitos_cra_init(struct crypto_tfm *tfm)
+{
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct talitos_crypto_alg *talitos_alg;
+ struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
+ talitos_alg = container_of(__crypto_ahash_alg(alg),
+ struct talitos_crypto_alg,
+ algt.alg.hash);
+ else
+ talitos_alg = container_of(alg, struct talitos_crypto_alg,
+ algt.alg.crypto);
+
+ return talitos_init_common(ctx, talitos_alg);
+}
+
static int talitos_cra_init_aead(struct crypto_aead *tfm)
{
- talitos_cra_init(crypto_aead_tfm(tfm));
- return 0;
+ struct aead_alg *alg = crypto_aead_alg(tfm);
+ struct talitos_crypto_alg *talitos_alg;
+ struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
+
+ talitos_alg = container_of(alg, struct talitos_crypto_alg,
+ algt.alg.aead);
+
+ return talitos_init_common(ctx, talitos_alg);
}
static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 5ad0ec1f0e29..97199b3c25a2 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -130,26 +130,14 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
static void dwc_initialize(struct dw_dma_chan *dwc)
{
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
- struct dw_dma_slave *dws = dwc->chan.private;
u32 cfghi = DWC_CFGH_FIFO_MODE;
u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
if (dwc->initialized == true)
return;
- if (dws) {
- /*
- * We need controller-specific data to set up slave
- * transfers.
- */
- BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
-
- cfghi |= DWC_CFGH_DST_PER(dws->dst_id);
- cfghi |= DWC_CFGH_SRC_PER(dws->src_id);
- } else {
- cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
- cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
- }
+ cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
+ cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
channel_writel(dwc, CFG_LO, cfglo);
channel_writel(dwc, CFG_HI, cfghi);
@@ -941,7 +929,7 @@ bool dw_dma_filter(struct dma_chan *chan, void *param)
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_dma_slave *dws = param;
- if (!dws || dws->dma_dev != chan->device->dev)
+ if (dws->dma_dev != chan->device->dev)
return false;
/* We have to copy data since dws can be temporary storage */
@@ -1165,6 +1153,14 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
* doesn't mean what you think it means), and status writeback.
*/
+ /*
+ * We need controller-specific data to set up slave transfers.
+ */
+ if (chan->private && !dw_dma_filter(chan, chan->private)) {
+ dev_warn(chan2dev(chan), "Wrong controller-specific data\n");
+ return -EINVAL;
+ }
+
/* Enable controller here if needed */
if (!dw->in_use)
dw_dma_on(dw);
@@ -1226,6 +1222,14 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
spin_lock_irqsave(&dwc->lock, flags);
list_splice_init(&dwc->free_list, &list);
dwc->descs_allocated = 0;
+
+ /* Clear custom channel configuration */
+ dwc->src_id = 0;
+ dwc->dst_id = 0;
+
+ dwc->src_master = 0;
+ dwc->dst_master = 0;
+
dwc->initialized = false;
/* Disable interrupts */
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index ee3463e774f8..04070baab78a 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -1238,6 +1238,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
struct edma_desc *edesc;
dma_addr_t src_addr, dst_addr;
enum dma_slave_buswidth dev_width;
+ bool use_intermediate = false;
u32 burst;
int i, ret, nslots;
@@ -1279,8 +1280,21 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
* but the synchronization is difficult to achieve with Cyclic and
* cannot be guaranteed, so we error out early.
*/
- if (nslots > MAX_NR_SG)
- return NULL;
+ if (nslots > MAX_NR_SG) {
+ /*
+ * If the burst and period sizes are the same, we can put
+ * the full buffer into a single period and activate
+ * intermediate interrupts. This will produce interrupts
+ * after each burst, which is also after each desired period.
+ */
+ if (burst == period_len) {
+ period_len = buf_len;
+ nslots = 2;
+ use_intermediate = true;
+ } else {
+ return NULL;
+ }
+ }
edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]),
GFP_ATOMIC);
@@ -1358,8 +1372,13 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
/*
* Enable period interrupt only if it is requested
*/
- if (tx_flags & DMA_PREP_INTERRUPT)
+ if (tx_flags & DMA_PREP_INTERRUPT) {
edesc->pset[i].param.opt |= TCINTEN;
+
+ /* Also enable intermediate interrupts if necessary */
+ if (use_intermediate)
+ edesc->pset[i].param.opt |= ITCINTEN;
+ }
}
/* Place the cyclic channel to highest priority queue */
@@ -1570,32 +1589,6 @@ static irqreturn_t dma_ccerr_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static void edma_tc_set_pm_state(struct edma_tc *tc, bool enable)
-{
- struct platform_device *tc_pdev;
- int ret;
-
- if (!IS_ENABLED(CONFIG_OF) || !tc)
- return;
-
- tc_pdev = of_find_device_by_node(tc->node);
- if (!tc_pdev) {
- pr_err("%s: TPTC device is not found\n", __func__);
- return;
- }
- if (!pm_runtime_enabled(&tc_pdev->dev))
- pm_runtime_enable(&tc_pdev->dev);
-
- if (enable)
- ret = pm_runtime_get_sync(&tc_pdev->dev);
- else
- ret = pm_runtime_put_sync(&tc_pdev->dev);
-
- if (ret < 0)
- pr_err("%s: pm_runtime_%s_sync() failed for %s\n", __func__,
- enable ? "get" : "put", dev_name(&tc_pdev->dev));
-}
-
/* Alloc channel resources */
static int edma_alloc_chan_resources(struct dma_chan *chan)
{
@@ -1632,8 +1625,6 @@ static int edma_alloc_chan_resources(struct dma_chan *chan)
EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id,
echan->hw_triggered ? "HW" : "SW");
- edma_tc_set_pm_state(echan->tc, true);
-
return 0;
err_slot:
@@ -1670,7 +1661,6 @@ static void edma_free_chan_resources(struct dma_chan *chan)
echan->alloced = false;
}
- edma_tc_set_pm_state(echan->tc, false);
echan->tc = NULL;
echan->hw_triggered = false;
@@ -2417,10 +2407,8 @@ static int edma_pm_suspend(struct device *dev)
int i;
for (i = 0; i < ecc->num_channels; i++) {
- if (echan[i].alloced) {
+ if (echan[i].alloced)
edma_setup_interrupt(&echan[i], false);
- edma_tc_set_pm_state(echan[i].tc, false);
- }
}
return 0;
@@ -2450,8 +2438,6 @@ static int edma_pm_resume(struct device *dev)
/* Set up channel -> slot mapping for the entry slot */
edma_set_chmap(&echan[i], echan[i].slot[0]);
-
- edma_tc_set_pm_state(echan[i].tc, true);
}
}
@@ -2475,7 +2461,8 @@ static struct platform_driver edma_driver = {
static int edma_tptc_probe(struct platform_device *pdev)
{
- return 0;
+ pm_runtime_enable(&pdev->dev);
+ return pm_runtime_get_sync(&pdev->dev);
}
static struct platform_driver edma_tptc_driver = {
diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
index eef145edb936..ee510515ce18 100644
--- a/drivers/dma/hsu/hsu.c
+++ b/drivers/dma/hsu/hsu.c
@@ -64,10 +64,10 @@ static void hsu_dma_chan_start(struct hsu_dma_chan *hsuc)
if (hsuc->direction == DMA_MEM_TO_DEV) {
bsr = config->dst_maxburst;
- mtsr = config->dst_addr_width;
+ mtsr = config->src_addr_width;
} else if (hsuc->direction == DMA_DEV_TO_MEM) {
bsr = config->src_maxburst;
- mtsr = config->src_addr_width;
+ mtsr = config->dst_addr_width;
}
hsu_chan_disable(hsuc);
@@ -135,7 +135,7 @@ static u32 hsu_dma_chan_get_sr(struct hsu_dma_chan *hsuc)
sr = hsu_chan_readl(hsuc, HSU_CH_SR);
spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
- return sr;
+ return sr & ~(HSU_CH_SR_DESCE_ANY | HSU_CH_SR_CDESC_ANY);
}
irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr)
@@ -254,10 +254,13 @@ static void hsu_dma_issue_pending(struct dma_chan *chan)
static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc)
{
struct hsu_dma_desc *desc = hsuc->desc;
- size_t bytes = desc->length;
+ size_t bytes = 0;
int i;
- i = desc->active % HSU_DMA_CHAN_NR_DESC;
+ for (i = desc->active; i < desc->nents; i++)
+ bytes += desc->sg[i].len;
+
+ i = HSU_DMA_CHAN_NR_DESC - 1;
do {
bytes += hsu_chan_readl(hsuc, HSU_CH_DxTSR(i));
} while (--i >= 0);
diff --git a/drivers/dma/hsu/hsu.h b/drivers/dma/hsu/hsu.h
index 578a8ee8cd05..6b070c22b1df 100644
--- a/drivers/dma/hsu/hsu.h
+++ b/drivers/dma/hsu/hsu.h
@@ -41,6 +41,9 @@
#define HSU_CH_SR_DESCTO(x) BIT(8 + (x))
#define HSU_CH_SR_DESCTO_ANY (BIT(11) | BIT(10) | BIT(9) | BIT(8))
#define HSU_CH_SR_CHE BIT(15)
+#define HSU_CH_SR_DESCE(x) BIT(16 + (x))
+#define HSU_CH_SR_DESCE_ANY (BIT(19) | BIT(18) | BIT(17) | BIT(16))
+#define HSU_CH_SR_CDESC_ANY (BIT(31) | BIT(30))
/* Bits in HSU_CH_CR */
#define HSU_CH_CR_CHA BIT(0)
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index 43bd5aee7ffe..1e984e18c126 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -48,6 +48,7 @@ struct omap_chan {
unsigned dma_sig;
bool cyclic;
bool paused;
+ bool running;
int dma_ch;
struct omap_desc *desc;
@@ -294,6 +295,8 @@ static void omap_dma_start(struct omap_chan *c, struct omap_desc *d)
/* Enable channel */
omap_dma_chan_write(c, CCR, d->ccr | CCR_ENABLE);
+
+ c->running = true;
}
static void omap_dma_stop(struct omap_chan *c)
@@ -355,6 +358,8 @@ static void omap_dma_stop(struct omap_chan *c)
omap_dma_chan_write(c, CLNK_CTRL, val);
}
+
+ c->running = false;
}
static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
@@ -673,15 +678,20 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
struct omap_chan *c = to_omap_dma_chan(chan);
struct virt_dma_desc *vd;
enum dma_status ret;
- uint32_t ccr;
unsigned long flags;
- ccr = omap_dma_chan_read(c, CCR);
- /* The channel is no longer active, handle the completion right away */
- if (!(ccr & CCR_ENABLE))
- omap_dma_callback(c->dma_ch, 0, c);
-
ret = dma_cookie_status(chan, cookie, txstate);
+
+ if (!c->paused && c->running) {
+ uint32_t ccr = omap_dma_chan_read(c, CCR);
+ /*
+ * The channel is no longer active, set the return value
+ * accordingly
+ */
+ if (!(ccr & CCR_ENABLE))
+ ret = DMA_COMPLETE;
+ }
+
if (ret == DMA_COMPLETE || !txstate)
return ret;
@@ -945,9 +955,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_memcpy(
d->ccr = c->ccr;
d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_POSTINC;
- d->cicr = CICR_DROP_IE;
- if (tx_flags & DMA_PREP_INTERRUPT)
- d->cicr |= CICR_FRAME_IE;
+ d->cicr = CICR_DROP_IE | CICR_FRAME_IE;
d->csdp = data_type;
diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c
index 0ee0321868d3..ef67f278e076 100644
--- a/drivers/dma/xilinx/xilinx_vdma.c
+++ b/drivers/dma/xilinx/xilinx_vdma.c
@@ -1236,7 +1236,7 @@ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
struct xilinx_vdma_device *xdev = ofdma->of_dma_data;
int chan_id = dma_spec->args[0];
- if (chan_id >= XILINX_VDMA_MAX_CHANS_PER_DEVICE)
+ if (chan_id >= XILINX_VDMA_MAX_CHANS_PER_DEVICE || !xdev->chan[chan_id])
return NULL;
return dma_get_slave_channel(&xdev->chan[chan_id]->common);
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 01087a38da22..792bdae2b91d 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -1866,7 +1866,7 @@ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
i7_dev = get_i7core_dev(mce->socketid);
if (!i7_dev)
- return NOTIFY_BAD;
+ return NOTIFY_DONE;
mci = i7_dev->mci;
pvt = mci->pvt_info;
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 93f0d4120289..8bf745d2da7e 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -362,6 +362,7 @@ struct sbridge_pvt {
/* Memory type detection */
bool is_mirrored, is_lockstep, is_close_pg;
+ bool is_chan_hash;
/* Fifo double buffers */
struct mce mce_entry[MCE_LOG_LEN];
@@ -1060,6 +1061,20 @@ static inline u8 sad_pkg_ha(u8 pkg)
return (pkg >> 2) & 0x1;
}
+static int haswell_chan_hash(int idx, u64 addr)
+{
+ int i;
+
+ /*
+ * XOR even bits from 12:26 to bit0 of idx,
+ * odd bits from 13:27 to bit1
+ */
+ for (i = 12; i < 28; i += 2)
+ idx ^= (addr >> i) & 3;
+
+ return idx;
+}
+
/****************************************************************************
Memory check routines
****************************************************************************/
@@ -1616,6 +1631,10 @@ static int get_dimm_config(struct mem_ctl_info *mci)
KNL_MAX_CHANNELS : NUM_CHANNELS;
u64 knl_mc_sizes[KNL_MAX_CHANNELS];
+ if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
+ pci_read_config_dword(pvt->pci_ha0, HASWELL_HASYSDEFEATURE2, &reg);
+ pvt->is_chan_hash = GET_BITFIELD(reg, 21, 21);
+ }
if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL ||
pvt->info.type == KNIGHTS_LANDING)
pci_read_config_dword(pvt->pci_sad1, SAD_TARGET, &reg);
@@ -2118,12 +2137,15 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
}
ch_way = TAD_CH(reg) + 1;
- sck_way = 1 << TAD_SOCK(reg);
+ sck_way = TAD_SOCK(reg);
if (ch_way == 3)
idx = addr >> 6;
- else
+ else {
idx = (addr >> (6 + sck_way + shiftup)) & 0x3;
+ if (pvt->is_chan_hash)
+ idx = haswell_chan_hash(idx, addr);
+ }
idx = idx % ch_way;
/*
@@ -2157,7 +2179,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
switch(ch_way) {
case 2:
case 4:
- sck_xch = 1 << sck_way * (ch_way >> 1);
+ sck_xch = (1 << sck_way) * (ch_way >> 1);
break;
default:
sprintf(msg, "Invalid mirror set. Can't decode addr");
@@ -2193,7 +2215,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
ch_addr = addr - offset;
ch_addr >>= (6 + shiftup);
- ch_addr /= ch_way * sck_way;
+ ch_addr /= sck_xch;
ch_addr <<= (6 + shiftup);
ch_addr |= addr & ((1 << (6 + shiftup)) - 1);
@@ -3146,7 +3168,7 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
mci = get_mci_for_node_id(mce->socketid);
if (!mci)
- return NOTIFY_BAD;
+ return NOTIFY_DONE;
pvt = mci->pvt_info;
/*
diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c
index 841a4b586395..8b3226dca1d9 100644
--- a/drivers/extcon/extcon-palmas.c
+++ b/drivers/extcon/extcon-palmas.c
@@ -348,8 +348,7 @@ static int palmas_usb_probe(struct platform_device *pdev)
palmas_vbus_irq_handler,
IRQF_TRIGGER_FALLING |
IRQF_TRIGGER_RISING |
- IRQF_ONESHOT |
- IRQF_EARLY_RESUME,
+ IRQF_ONESHOT,
"palmas_usb_vbus",
palmas_usb);
if (status < 0) {
diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c
index aa1f743152a2..8714f8c271ba 100644
--- a/drivers/firmware/efi/arm-init.c
+++ b/drivers/firmware/efi/arm-init.c
@@ -203,7 +203,19 @@ void __init efi_init(void)
reserve_regions();
early_memunmap(memmap.map, params.mmap_size);
- memblock_mark_nomap(params.mmap & PAGE_MASK,
- PAGE_ALIGN(params.mmap_size +
- (params.mmap & ~PAGE_MASK)));
+
+ if (IS_ENABLED(CONFIG_ARM)) {
+ /*
+ * ARM currently does not allow ioremap_cache() to be called on
+ * memory regions that are covered by struct page. So remove the
+ * UEFI memory map from the linear mapping.
+ */
+ memblock_mark_nomap(params.mmap & PAGE_MASK,
+ PAGE_ALIGN(params.mmap_size +
+ (params.mmap & ~PAGE_MASK)));
+ } else {
+ memblock_reserve(params.mmap & PAGE_MASK,
+ PAGE_ALIGN(params.mmap_size +
+ (params.mmap & ~PAGE_MASK)));
+ }
}
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
index 0ac594c0a234..34b741940494 100644
--- a/drivers/firmware/efi/vars.c
+++ b/drivers/firmware/efi/vars.c
@@ -202,29 +202,44 @@ static const struct variable_validate variable_validate[] = {
{ NULL_GUID, "", NULL },
};
+/*
+ * Check if @var_name matches the pattern given in @match_name.
+ *
+ * @var_name: an array of @len non-NUL characters.
+ * @match_name: a NUL-terminated pattern string, optionally ending in "*". A
+ * final "*" character matches any trailing characters @var_name,
+ * including the case when there are none left in @var_name.
+ * @match: on output, the number of non-wildcard characters in @match_name
+ * that @var_name matches, regardless of the return value.
+ * @return: whether @var_name fully matches @match_name.
+ */
static bool
variable_matches(const char *var_name, size_t len, const char *match_name,
int *match)
{
for (*match = 0; ; (*match)++) {
char c = match_name[*match];
- char u = var_name[*match];
- /* Wildcard in the matching name means we've matched */
- if (c == '*')
+ switch (c) {
+ case '*':
+ /* Wildcard in @match_name means we've matched. */
return true;
- /* Case sensitive match */
- if (!c && *match == len)
- return true;
+ case '\0':
+ /* @match_name has ended. Has @var_name too? */
+ return (*match == len);
- if (c != u)
+ default:
+ /*
+ * We've reached a non-wildcard char in @match_name.
+ * Continue only if there's an identical character in
+ * @var_name.
+ */
+ if (*match < len && c == var_name[*match])
+ continue;
return false;
-
- if (!c)
- return true;
+ }
}
- return true;
}
bool
diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c
index 11bfee8b79a9..b5d05807e6ec 100644
--- a/drivers/firmware/psci.c
+++ b/drivers/firmware/psci.c
@@ -360,7 +360,7 @@ static struct cpuidle_ops psci_cpuidle_ops __initdata = {
.init = psci_dt_cpu_init_idle,
};
-CPUIDLE_METHOD_OF_DECLARE(psci, "arm,psci", &psci_cpuidle_ops);
+CPUIDLE_METHOD_OF_DECLARE(psci, "psci", &psci_cpuidle_ops);
#endif
#endif
diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c
index 815c4a5cae54..1b95475b6aef 100644
--- a/drivers/firmware/qemu_fw_cfg.c
+++ b/drivers/firmware/qemu_fw_cfg.c
@@ -77,7 +77,7 @@ static inline u16 fw_cfg_sel_endianness(u16 key)
static inline void fw_cfg_read_blob(u16 key,
void *buf, loff_t pos, size_t count)
{
- u32 glk;
+ u32 glk = -1U;
acpi_status status;
/* If we have ACPI, ensure mutual exclusion against any potential
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index d9ab0cd1d205..4d9a315cfd43 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -196,44 +196,6 @@ static int gpio_rcar_irq_set_wake(struct irq_data *d, unsigned int on)
return 0;
}
-static void gpio_rcar_irq_bus_lock(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct gpio_rcar_priv *p = gpiochip_get_data(gc);
-
- pm_runtime_get_sync(&p->pdev->dev);
-}
-
-static void gpio_rcar_irq_bus_sync_unlock(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct gpio_rcar_priv *p = gpiochip_get_data(gc);
-
- pm_runtime_put(&p->pdev->dev);
-}
-
-
-static int gpio_rcar_irq_request_resources(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct gpio_rcar_priv *p = gpiochip_get_data(gc);
- int error;
-
- error = pm_runtime_get_sync(&p->pdev->dev);
- if (error < 0)
- return error;
-
- return 0;
-}
-
-static void gpio_rcar_irq_release_resources(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct gpio_rcar_priv *p = gpiochip_get_data(gc);
-
- pm_runtime_put(&p->pdev->dev);
-}
-
static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id)
{
struct gpio_rcar_priv *p = dev_id;
@@ -280,32 +242,18 @@ static void gpio_rcar_config_general_input_output_mode(struct gpio_chip *chip,
static int gpio_rcar_request(struct gpio_chip *chip, unsigned offset)
{
- struct gpio_rcar_priv *p = gpiochip_get_data(chip);
- int error;
-
- error = pm_runtime_get_sync(&p->pdev->dev);
- if (error < 0)
- return error;
-
- error = pinctrl_request_gpio(chip->base + offset);
- if (error)
- pm_runtime_put(&p->pdev->dev);
-
- return error;
+ return pinctrl_request_gpio(chip->base + offset);
}
static void gpio_rcar_free(struct gpio_chip *chip, unsigned offset)
{
- struct gpio_rcar_priv *p = gpiochip_get_data(chip);
-
pinctrl_free_gpio(chip->base + offset);
- /* Set the GPIO as an input to ensure that the next GPIO request won't
+ /*
+ * Set the GPIO as an input to ensure that the next GPIO request won't
* drive the GPIO pin as an output.
*/
gpio_rcar_config_general_input_output_mode(chip, offset, false);
-
- pm_runtime_put(&p->pdev->dev);
}
static int gpio_rcar_direction_input(struct gpio_chip *chip, unsigned offset)
@@ -452,6 +400,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
}
pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
@@ -488,10 +437,6 @@ static int gpio_rcar_probe(struct platform_device *pdev)
irq_chip->irq_unmask = gpio_rcar_irq_enable;
irq_chip->irq_set_type = gpio_rcar_irq_set_type;
irq_chip->irq_set_wake = gpio_rcar_irq_set_wake;
- irq_chip->irq_bus_lock = gpio_rcar_irq_bus_lock;
- irq_chip->irq_bus_sync_unlock = gpio_rcar_irq_bus_sync_unlock;
- irq_chip->irq_request_resources = gpio_rcar_irq_request_resources;
- irq_chip->irq_release_resources = gpio_rcar_irq_release_resources;
irq_chip->flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_MASK_ON_SUSPEND;
ret = gpiochip_add_data(gpio_chip, p);
@@ -522,6 +467,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
err1:
gpiochip_remove(gpio_chip);
err0:
+ pm_runtime_put(dev);
pm_runtime_disable(dev);
return ret;
}
@@ -532,6 +478,7 @@ static int gpio_rcar_remove(struct platform_device *pdev)
gpiochip_remove(&p->gpio_chip);
+ pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return 0;
}
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 682070d20f00..2dc52585e3f2 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -977,7 +977,7 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id)
lookup = kmalloc(sizeof(*lookup), GFP_KERNEL);
if (lookup) {
lookup->adev = adev;
- lookup->con_id = con_id;
+ lookup->con_id = kstrdup(con_id, GFP_KERNEL);
list_add_tail(&lookup->node, &acpi_crs_lookup_list);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 62a778012fe0..1bcbade479dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1591,6 +1591,7 @@ struct amdgpu_uvd {
struct amdgpu_bo *vcpu_bo;
void *cpu_addr;
uint64_t gpu_addr;
+ unsigned fw_version;
void *saved_bo;
atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
@@ -2034,6 +2035,7 @@ struct amdgpu_device {
/* tracking pinned memory */
u64 vram_pin_size;
+ u64 invisible_pin_size;
u64 gart_pin_size;
/* amdkfd interface */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index d6b0bff510aa..b7b583c42ea8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -425,6 +425,10 @@ static int acp_resume(void *handle)
struct acp_pm_domain *apd;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ /* return early if no ACP */
+ if (!adev->acp.acp_genpd)
+ return 0;
+
/* SMU block will power on ACP irrespective of ACP runtime status.
* Power off explicitly based on genpd ACP runtime status so that ACP
* hw and ACP-genpd status are in sync.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 0020a0ea43ff..35a1248aaa77 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -63,10 +63,6 @@ bool amdgpu_has_atpx(void) {
return amdgpu_atpx_priv.atpx_detected;
}
-bool amdgpu_has_atpx_dgpu_power_cntl(void) {
- return amdgpu_atpx_priv.atpx.functions.power_cntl;
-}
-
/**
* amdgpu_atpx_call - call an ATPX method
*
@@ -146,6 +142,13 @@ static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mas
*/
static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
{
+ /* make sure required functions are enabled */
+ /* dGPU power control is required */
+ if (atpx->functions.power_cntl == false) {
+ printk("ATPX dGPU power cntl not present, forcing\n");
+ atpx->functions.power_cntl = true;
+ }
+
if (atpx->functions.px_params) {
union acpi_object *info;
struct atpx_px_params output;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 612117478b57..2139da773da6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -62,12 +62,6 @@ static const char *amdgpu_asic_name[] = {
"LAST",
};
-#if defined(CONFIG_VGA_SWITCHEROO)
-bool amdgpu_has_atpx_dgpu_power_cntl(void);
-#else
-static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
-#endif
-
bool amdgpu_device_is_px(struct drm_device *dev)
{
struct amdgpu_device *adev = dev->dev_private;
@@ -1485,7 +1479,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (amdgpu_runtime_pm == 1)
runtime = true;
- if (amdgpu_device_is_px(ddev) && amdgpu_has_atpx_dgpu_power_cntl())
+ if (amdgpu_device_is_px(ddev))
runtime = true;
vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime);
if (runtime)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 598eb0cd5aab..b04337de65d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -303,7 +303,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
fw_info.feature = adev->vce.fb_version;
break;
case AMDGPU_INFO_FW_UVD:
- fw_info.ver = 0;
+ fw_info.ver = adev->uvd.fw_version;
fw_info.feature = 0;
break;
case AMDGPU_INFO_FW_GMC:
@@ -384,7 +384,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
vram_gtt.vram_size = adev->mc.real_vram_size;
vram_gtt.vram_size -= adev->vram_pin_size;
vram_gtt.vram_cpu_accessible_size = adev->mc.visible_vram_size;
- vram_gtt.vram_cpu_accessible_size -= adev->vram_pin_size;
+ vram_gtt.vram_cpu_accessible_size -= (adev->vram_pin_size - adev->invisible_pin_size);
vram_gtt.gtt_size = adev->mc.gtt_size;
vram_gtt.gtt_size -= adev->gart_pin_size;
return copy_to_user(out, &vram_gtt,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 8d432e6901af..81bd964d3dfc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -53,7 +53,7 @@ struct amdgpu_hpd;
#define AMDGPU_MAX_HPD_PINS 6
#define AMDGPU_MAX_CRTCS 6
-#define AMDGPU_MAX_AFMT_BLOCKS 7
+#define AMDGPU_MAX_AFMT_BLOCKS 9
enum amdgpu_rmx_type {
RMX_OFF,
@@ -309,8 +309,8 @@ struct amdgpu_mode_info {
struct atom_context *atom_context;
struct card_info *atom_card_info;
bool mode_config_initialized;
- struct amdgpu_crtc *crtcs[6];
- struct amdgpu_afmt *afmt[7];
+ struct amdgpu_crtc *crtcs[AMDGPU_MAX_CRTCS];
+ struct amdgpu_afmt *afmt[AMDGPU_MAX_AFMT_BLOCKS];
/* DVI-I properties */
struct drm_property *coherent_mode_property;
/* DAC enable load detect */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 5b6639faa731..7ecea83ce453 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -424,9 +424,11 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
bo->pin_count = 1;
if (gpu_addr != NULL)
*gpu_addr = amdgpu_bo_gpu_offset(bo);
- if (domain == AMDGPU_GEM_DOMAIN_VRAM)
+ if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
bo->adev->vram_pin_size += amdgpu_bo_size(bo);
- else
+ if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
+ bo->adev->invisible_pin_size += amdgpu_bo_size(bo);
+ } else
bo->adev->gart_pin_size += amdgpu_bo_size(bo);
} else {
dev_err(bo->adev->dev, "%p pin failed\n", bo);
@@ -456,9 +458,11 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
}
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (likely(r == 0)) {
- if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
+ if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
bo->adev->vram_pin_size -= amdgpu_bo_size(bo);
- else
+ if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
+ bo->adev->invisible_pin_size -= amdgpu_bo_size(bo);
+ } else
bo->adev->gart_pin_size -= amdgpu_bo_size(bo);
} else {
dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo);
@@ -537,6 +541,7 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
if (!metadata_size) {
if (bo->metadata_size) {
kfree(bo->metadata);
+ bo->metadata = NULL;
bo->metadata_size = 0;
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 6f3369de232f..11af4492b4be 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -223,6 +223,8 @@ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
struct amdgpu_bo *rbo = container_of(bo, struct amdgpu_bo, tbo);
+ if (amdgpu_ttm_tt_get_usermm(bo->ttm))
+ return -EPERM;
return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 338da80006b6..871018c634e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -158,6 +158,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
version_major, version_minor, family_id);
+ adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
+ (family_id << 8));
+
bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
+ AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE;
r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
@@ -255,6 +258,8 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
if (i == AMDGPU_MAX_UVD_HANDLES)
return 0;
+ cancel_delayed_work_sync(&adev->uvd.idle_work);
+
size = amdgpu_bo_size(adev->uvd.vcpu_bo);
ptr = adev->uvd.cpu_addr;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 4bec0c108cea..481a64fa9b47 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -234,6 +234,7 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
if (i == AMDGPU_MAX_VCE_HANDLES)
return 0;
+ cancel_delayed_work_sync(&adev->vce.idle_work);
/* TODO: suspending running encoding sessions isn't supported */
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index 1e0bba29e167..1cd6de575305 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -298,6 +298,10 @@ bool amdgpu_atombios_encoder_mode_fixup(struct drm_encoder *encoder,
&& (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
+ /* vertical FP must be at least 1 */
+ if (mode->crtc_vsync_start == mode->crtc_vdisplay)
+ adjusted_mode->crtc_vsync_start++;
+
/* get the native mode for scaling */
if (amdgpu_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT))
amdgpu_panel_mode_fixup(encoder, adjusted_mode);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 05b0353d3880..a4a2e6cc61bb 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -910,7 +910,10 @@ static int gmc_v7_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+ if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
+ return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+ else
+ return 0;
}
static int gmc_v7_0_sw_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 02deb3229405..7a9db2c72c89 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -870,7 +870,10 @@ static int gmc_v8_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+ if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
+ return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+ else
+ return 0;
}
#define mmMC_SEQ_MISC0_FIJI 0xA71
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index b6f7d7bff929..0f14199cf716 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -307,7 +307,7 @@ static int tonga_ih_sw_fini(void *handle)
amdgpu_irq_fini(adev);
amdgpu_ih_ring_fini(adev);
- amdgpu_irq_add_domain(adev);
+ amdgpu_irq_remove_domain(adev);
return 0;
}
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 27fbd79d0daf..71ea0521ea96 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1672,13 +1672,19 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
u8 sinks[DRM_DP_MAX_SDP_STREAMS];
int i;
+ port = drm_dp_get_validated_port_ref(mgr, port);
+ if (!port)
+ return -EINVAL;
+
port_num = port->port_num;
mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
if (!mstb) {
mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
- if (!mstb)
+ if (!mstb) {
+ drm_dp_put_port(port);
return -EINVAL;
+ }
}
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
@@ -1707,6 +1713,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
kfree(txmsg);
fail_put:
drm_dp_put_mst_branch_device(mstb);
+ drm_dp_put_port(port);
return ret;
}
@@ -1789,6 +1796,11 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
req_payload.start_slot = cur_slots;
if (mgr->proposed_vcpis[i]) {
port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
+ port = drm_dp_get_validated_port_ref(mgr, port);
+ if (!port) {
+ mutex_unlock(&mgr->payload_lock);
+ return -EINVAL;
+ }
req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi;
} else {
@@ -1816,6 +1828,9 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
mgr->payloads[i].payload_state = req_payload.payload_state;
}
cur_slots += req_payload.num_slots;
+
+ if (port)
+ drm_dp_put_port(port);
}
for (i = 0; i < mgr->max_payloads; i++) {
@@ -2121,6 +2136,8 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
if (mgr->mst_primary) {
int sret;
+ u8 guid[16];
+
sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
if (sret != DP_RECEIVER_CAP_SIZE) {
DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
@@ -2135,6 +2152,16 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
ret = -1;
goto out_unlock;
}
+
+ /* Some hubs forget their guids after they resume */
+ sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
+ if (sret != 16) {
+ DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
+ ret = -1;
+ goto out_unlock;
+ }
+ drm_dp_check_mstb_guid(mgr->mst_primary, guid);
+
ret = 0;
} else
ret = -1;
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 414d7f61aa05..558ef9fc39e6 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -205,7 +205,7 @@ static const struct drm_display_mode drm_dmt_modes[] = {
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x0f - 1024x768@43Hz, interlace */
{ DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
- 1208, 1264, 0, 768, 768, 772, 817, 0,
+ 1208, 1264, 0, 768, 768, 776, 817, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 0x10 - 1024x768@60Hz */
@@ -522,12 +522,12 @@ static const struct drm_display_mode edid_est_modes[] = {
720, 840, 0, 480, 481, 484, 500, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
- 704, 832, 0, 480, 489, 491, 520, 0,
+ 704, 832, 0, 480, 489, 492, 520, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704,
768, 864, 0, 480, 483, 486, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656,
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
752, 800, 0, 480, 490, 492, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */
{ DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738,
@@ -539,7 +539,7 @@ static const struct drm_display_mode edid_est_modes[] = {
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040,
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78750, 1024, 1040,
1136, 1312, 0, 768, 769, 772, 800, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
@@ -2241,7 +2241,7 @@ drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing)
{
int i, j, m, modes = 0;
struct drm_display_mode *mode;
- u8 *est = ((u8 *)timing) + 5;
+ u8 *est = ((u8 *)timing) + 6;
for (i = 0; i < 6; i++) {
for (j = 7; j >= 0; j--) {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 09198d0b5814..306dde18a94a 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -572,6 +572,24 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
goto fail;
}
+ /*
+ * Set the GPU linear window to be at the end of the DMA window, where
+ * the CMA area is likely to reside. This ensures that we are able to
+ * map the command buffers while having the linear window overlap as
+ * much RAM as possible, so we can optimize mappings for other buffers.
+ *
+ * For 3D cores only do this if MC2.0 is present, as with MC1.0 it leads
+ * to different views of the memory on the individual engines.
+ */
+ if (!(gpu->identity.features & chipFeatures_PIPE_3D) ||
+ (gpu->identity.minor_features0 & chipMinorFeatures0_MC20)) {
+ u32 dma_mask = (u32)dma_get_required_mask(gpu->dev);
+ if (dma_mask < PHYS_OFFSET + SZ_2G)
+ gpu->memory_base = PHYS_OFFSET;
+ else
+ gpu->memory_base = dma_mask - SZ_2G + 1;
+ }
+
ret = etnaviv_hw_reset(gpu);
if (ret)
goto fail;
@@ -1566,7 +1584,6 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct etnaviv_gpu *gpu;
- u32 dma_mask;
int err = 0;
gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL);
@@ -1576,18 +1593,6 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
gpu->dev = &pdev->dev;
mutex_init(&gpu->lock);
- /*
- * Set the GPU linear window to be at the end of the DMA window, where
- * the CMA area is likely to reside. This ensures that we are able to
- * map the command buffers while having the linear window overlap as
- * much RAM as possible, so we can optimize mappings for other buffers.
- */
- dma_mask = (u32)dma_get_required_mask(dev);
- if (dma_mask < PHYS_OFFSET + SZ_2G)
- gpu->memory_base = PHYS_OFFSET;
- else
- gpu->memory_base = dma_mask - SZ_2G + 1;
-
/* Map registers: */
gpu->mmio = etnaviv_ioremap(pdev, NULL, dev_name(gpu->dev));
if (IS_ERR(gpu->mmio))
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index f17d39279596..baddf33fb475 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -94,7 +94,7 @@ comment "Sub-drivers"
config DRM_EXYNOS_G2D
bool "G2D"
- depends on !VIDEO_SAMSUNG_S5P_G2D
+ depends on VIDEO_SAMSUNG_S5P_G2D=n
select FRAME_VECTOR
help
Choose this option if you want to use Exynos G2D for DRM.
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index 968b31c522b2..23d2f958739b 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -2,10 +2,10 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-exynosdrm-y := exynos_drm_drv.o exynos_drm_crtc.o exynos_drm_fbdev.o \
- exynos_drm_fb.o exynos_drm_gem.o exynos_drm_core.o \
- exynos_drm_plane.o
+exynosdrm-y := exynos_drm_drv.o exynos_drm_crtc.o exynos_drm_fb.o \
+ exynos_drm_gem.o exynos_drm_core.o exynos_drm_plane.o
+exynosdrm-$(CONFIG_DRM_FBDEV_EMULATION) += exynos_drm_fbdev.o
exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
exynosdrm-$(CONFIG_DRM_EXYNOS5433_DECON) += exynos5433_drm_decon.o
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
index 7f55ba6771c6..011211e4167d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_core.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_core.c
@@ -101,7 +101,7 @@ int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file)
return 0;
err:
- list_for_each_entry_reverse(subdrv, &subdrv->list, list) {
+ list_for_each_entry_continue_reverse(subdrv, &exynos_drm_subdrv_list, list) {
if (subdrv->close)
subdrv->close(dev, subdrv->dev, file);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index d614194644c8..81cc5537cf25 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -199,17 +199,6 @@ dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index)
return exynos_fb->dma_addr[index];
}
-static void exynos_drm_output_poll_changed(struct drm_device *dev)
-{
- struct exynos_drm_private *private = dev->dev_private;
- struct drm_fb_helper *fb_helper = private->fb_helper;
-
- if (fb_helper)
- drm_fb_helper_hotplug_event(fb_helper);
- else
- exynos_drm_fbdev_init(dev);
-}
-
static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
.fb_create = exynos_user_fb_create,
.output_poll_changed = exynos_drm_output_poll_changed,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 4ae860c44f1d..72d7c0b7c216 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -317,3 +317,14 @@ void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
drm_fb_helper_restore_fbdev_mode_unlocked(private->fb_helper);
}
+
+void exynos_drm_output_poll_changed(struct drm_device *dev)
+{
+ struct exynos_drm_private *private = dev->dev_private;
+ struct drm_fb_helper *fb_helper = private->fb_helper;
+
+ if (fb_helper)
+ drm_fb_helper_hotplug_event(fb_helper);
+ else
+ exynos_drm_fbdev_init(dev);
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
index e16d7f0ae192..330eef87f718 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
@@ -15,9 +15,30 @@
#ifndef _EXYNOS_DRM_FBDEV_H_
#define _EXYNOS_DRM_FBDEV_H_
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+
int exynos_drm_fbdev_init(struct drm_device *dev);
-int exynos_drm_fbdev_reinit(struct drm_device *dev);
void exynos_drm_fbdev_fini(struct drm_device *dev);
void exynos_drm_fbdev_restore_mode(struct drm_device *dev);
+void exynos_drm_output_poll_changed(struct drm_device *dev);
+
+#else
+
+static inline int exynos_drm_fbdev_init(struct drm_device *dev)
+{
+ return 0;
+}
+
+static inline void exynos_drm_fbdev_fini(struct drm_device *dev)
+{
+}
+
+static inline void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
+{
+}
+
+#define exynos_drm_output_poll_changed (NULL)
+
+#endif
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 51d484ae9f49..018449f8d557 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -888,7 +888,7 @@ static void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable)
* clock. On these SoCs the bootloader may enable it but any
* power domain off/on will reset it to disable state.
*/
- if (ctx->driver_data != &exynos5_fimd_driver_data ||
+ if (ctx->driver_data != &exynos5_fimd_driver_data &&
ctx->driver_data != &exynos5420_fimd_driver_data)
return;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index 9869d70e9e54..a0def0be6d65 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -129,7 +129,7 @@ static void mic_set_path(struct exynos_mic *mic, bool enable)
} else
val &= ~(MIC0_RGB_MUX | MIC0_I80_MUX | MIC0_ON_MUX);
- regmap_write(mic->sysreg, DSD_CFG_MUX, val);
+ ret = regmap_write(mic->sysreg, DSD_CFG_MUX, val);
if (ret)
DRM_ERROR("mic: Failed to read system register\n");
}
@@ -457,6 +457,7 @@ static int exynos_mic_probe(struct platform_device *pdev)
"samsung,disp-syscon");
if (IS_ERR(mic->sysreg)) {
DRM_ERROR("mic: Failed to get system register.\n");
+ ret = PTR_ERR(mic->sysreg);
goto err;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index d86227236f55..50185ac347b2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -11,9 +11,10 @@
#include <drm/drmP.h>
-#include <drm/exynos_drm.h>
-#include <drm/drm_plane_helper.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
#include "exynos_drm_crtc.h"
#include "exynos_drm_fb.h"
@@ -57,11 +58,12 @@ static int exynos_plane_get_size(int start, unsigned length, unsigned last)
}
static void exynos_plane_mode_set(struct exynos_drm_plane_state *exynos_state)
-
{
struct drm_plane_state *state = &exynos_state->base;
- struct drm_crtc *crtc = exynos_state->base.crtc;
- struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_crtc_state *crtc_state =
+ drm_atomic_get_existing_crtc_state(state->state, crtc);
+ struct drm_display_mode *mode = &crtc_state->adjusted_mode;
int crtc_x, crtc_y;
unsigned int crtc_w, crtc_h;
unsigned int src_x, src_y;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 20e82008b8b6..6d2fb3f4ac62 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -758,10 +758,10 @@ static int i915_drm_resume(struct drm_device *dev)
dev_priv->display.hpd_irq_setup(dev);
spin_unlock_irq(&dev_priv->irq_lock);
- intel_display_resume(dev);
-
intel_dp_mst_resume(dev);
+ intel_display_resume(dev);
+
/*
* ... but also need to make sure that hotplug processing
* doesn't cause havoc. Like in the driver load code we don't
@@ -792,7 +792,7 @@ static int i915_drm_resume(struct drm_device *dev)
static int i915_drm_resume_early(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int ret = 0;
+ int ret;
/*
* We have a resume ordering issue with the snd-hda driver also
@@ -803,6 +803,36 @@ static int i915_drm_resume_early(struct drm_device *dev)
* FIXME: This should be solved with a special hdmi sink device or
* similar so that power domains can be employed.
*/
+
+ /*
+ * Note that we need to set the power state explicitly, since we
+ * powered off the device during freeze and the PCI core won't power
+ * it back up for us during thaw. Powering off the device during
+ * freeze is not a hard requirement though, and during the
+ * suspend/resume phases the PCI core makes sure we get here with the
+ * device powered on. So in case we change our freeze logic and keep
+ * the device powered we can also remove the following set power state
+ * call.
+ */
+ ret = pci_set_power_state(dev->pdev, PCI_D0);
+ if (ret) {
+ DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
+ goto out;
+ }
+
+ /*
+ * Note that pci_enable_device() first enables any parent bridge
+ * device and only then sets the power state for this device. The
+ * bridge enabling is a nop though, since bridge devices are resumed
+ * first. The order of enabling power and enabling the device is
+ * imposed by the PCI core as described above, so here we preserve the
+ * same order for the freeze/thaw phases.
+ *
+ * TODO: eventually we should remove pci_disable_device() /
+ * pci_enable_enable_device() from suspend/resume. Due to how they
+ * depend on the device enable refcount we can't anyway depend on them
+ * disabling/enabling the device.
+ */
if (pci_enable_device(dev->pdev)) {
ret = -EIO;
goto out;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 10480939159c..daba7ebb9699 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2634,8 +2634,9 @@ struct drm_i915_cmd_table {
/* WaRsDisableCoarsePowerGating:skl,bxt */
#define NEEDS_WaRsDisableCoarsePowerGating(dev) (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || \
- ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && \
- IS_SKL_REVID(dev, 0, SKL_REVID_F0)))
+ IS_SKL_GT3(dev) || \
+ IS_SKL_GT4(dev))
+
/*
* dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
* even when in MSI mode. This results in spurious interrupt warnings if the
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 18ba8139e922..4d30b60defda 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -501,19 +501,24 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
if (pvec != NULL) {
struct mm_struct *mm = obj->userptr.mm->mm;
- down_read(&mm->mmap_sem);
- while (pinned < npages) {
- ret = get_user_pages_remote(work->task, mm,
- obj->userptr.ptr + pinned * PAGE_SIZE,
- npages - pinned,
- !obj->userptr.read_only, 0,
- pvec + pinned, NULL);
- if (ret < 0)
- break;
-
- pinned += ret;
+ ret = -EFAULT;
+ if (atomic_inc_not_zero(&mm->mm_users)) {
+ down_read(&mm->mmap_sem);
+ while (pinned < npages) {
+ ret = get_user_pages_remote
+ (work->task, mm,
+ obj->userptr.ptr + pinned * PAGE_SIZE,
+ npages - pinned,
+ !obj->userptr.read_only, 0,
+ pvec + pinned, NULL);
+ if (ret < 0)
+ break;
+
+ pinned += ret;
+ }
+ up_read(&mm->mmap_sem);
+ mmput(mm);
}
- up_read(&mm->mmap_sem);
}
mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index d1a46ef5ab3f..1c212205d0e7 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1829,7 +1829,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
disable_rpm_wakeref_asserts(dev_priv);
- for (;;) {
+ do {
master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
iir = I915_READ(VLV_IIR);
@@ -1857,7 +1857,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
POSTING_READ(GEN8_MASTER_IRQ);
- }
+ } while (0);
enable_rpm_wakeref_asserts(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index f76cbf3e5d1e..fffdac801d3b 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2907,7 +2907,14 @@ enum skl_disp_power_wells {
#define GEN6_RP_STATE_CAP _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5998)
#define BXT_RP_STATE_CAP _MMIO(0x138170)
-#define INTERVAL_1_28_US(us) (((us) * 100) >> 7)
+/*
+ * Make these a multiple of magic 25 to avoid SNB (eg. Dell XPS
+ * 8300) freezing up around GPU hangs. Looks as if even
+ * scheduling/timer interrupts start misbehaving if the RPS
+ * EI/thresholds are "bad", leading to a very sluggish or even
+ * frozen machine.
+ */
+#define INTERVAL_1_28_US(us) roundup(((us) * 100) >> 7, 25)
#define INTERVAL_1_33_US(us) (((us) * 3) >> 2)
#define INTERVAL_0_833_US(us) (((us) * 6) / 5)
#define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 62de9f4bce09..3b57bf06abe8 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -443,9 +443,17 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
} else if (IS_BROADWELL(dev_priv)) {
ddi_translations_fdi = bdw_ddi_translations_fdi;
ddi_translations_dp = bdw_ddi_translations_dp;
- ddi_translations_edp = bdw_ddi_translations_edp;
+
+ if (dev_priv->edp_low_vswing) {
+ ddi_translations_edp = bdw_ddi_translations_edp;
+ n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
+ } else {
+ ddi_translations_edp = bdw_ddi_translations_dp;
+ n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
+ }
+
ddi_translations_hdmi = bdw_ddi_translations_hdmi;
- n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
+
n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
hdmi_default_entry = 7;
@@ -3201,12 +3209,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
intel_ddi_clock_get(encoder, pipe_config);
}
-static void intel_ddi_destroy(struct drm_encoder *encoder)
-{
- /* HDMI has nothing special to destroy, so we can go with this. */
- intel_dp_encoder_destroy(encoder);
-}
-
static bool intel_ddi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
@@ -3225,7 +3227,8 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder,
}
static const struct drm_encoder_funcs intel_ddi_funcs = {
- .destroy = intel_ddi_destroy,
+ .reset = intel_dp_encoder_reset,
+ .destroy = intel_dp_encoder_destroy,
};
static struct intel_connector *
@@ -3324,6 +3327,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
intel_encoder->post_disable = intel_ddi_post_disable;
intel_encoder->get_hw_state = intel_ddi_get_hw_state;
intel_encoder->get_config = intel_ddi_get_config;
+ intel_encoder->suspend = intel_dp_encoder_suspend;
intel_dig_port->port = port;
intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 6e0d8283daa6..182f84937345 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -13351,6 +13351,9 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
}
for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ if (state->legacy_cursor_update)
+ continue;
+
ret = intel_crtc_wait_for_pending_flips(crtc);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index f069a82deb57..412a34c39522 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -4898,7 +4898,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
kfree(intel_dig_port);
}
-static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
+void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
@@ -4940,7 +4940,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
edp_panel_vdd_schedule_off(intel_dp);
}
-static void intel_dp_encoder_reset(struct drm_encoder *encoder)
+void intel_dp_encoder_reset(struct drm_encoder *encoder)
{
struct intel_dp *intel_dp;
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index a2bd698fe2f7..937e77228466 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -506,6 +506,8 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_device *dev = connector->dev;
+ intel_connector->unregister(intel_connector);
+
/* need to nuke the connector */
drm_modeset_lock_all(dev);
if (connector->state->crtc) {
@@ -519,11 +521,7 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
WARN(ret, "Disabling mst crtc failed with %i\n", ret);
}
- drm_modeset_unlock_all(dev);
- intel_connector->unregister(intel_connector);
-
- drm_modeset_lock_all(dev);
intel_connector_remove_from_fbdev(intel_connector);
drm_connector_cleanup(connector);
drm_modeset_unlock_all(dev);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 4c027d69fac9..7d3af3a72abe 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1238,6 +1238,8 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp,
void intel_dp_start_link_train(struct intel_dp *intel_dp);
void intel_dp_stop_link_train(struct intel_dp *intel_dp);
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
+void intel_dp_encoder_reset(struct drm_encoder *encoder);
+void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
void intel_dp_encoder_destroy(struct drm_encoder *encoder);
int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
bool intel_dp_compute_config(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index a0d8daed2470..1ab6f687f640 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -1415,8 +1415,16 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
hdmi_to_dig_port(intel_hdmi));
}
- if (!live_status)
- DRM_DEBUG_KMS("Live status not up!");
+ if (!live_status) {
+ DRM_DEBUG_KMS("HDMI live status down\n");
+ /*
+ * Live status register is not reliable on all intel platforms.
+ * So consider live_status only for certain platforms, for
+ * others, read EDID to determine presence of sink.
+ */
+ if (INTEL_INFO(dev_priv)->gen < 7 || IS_IVYBRIDGE(dev_priv))
+ live_status = true;
+ }
intel_hdmi_unset_edid(connector);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 6a978ce80244..5c6080fd0968 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -841,11 +841,11 @@ static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
if (unlikely(total_bytes > remain_usable)) {
/*
* The base request will fit but the reserved space
- * falls off the end. So only need to to wait for the
- * reserved size after flushing out the remainder.
+ * falls off the end. So don't need an immediate wrap
+ * and only need to effectively wait for the reserved
+ * size space from the start of ringbuffer.
*/
wait_bytes = remain_actual + ringbuf->reserved_size;
- need_wrap = true;
} else if (total_bytes > ringbuf->space) {
/* No wrapping required, just waiting. */
wait_bytes = total_bytes;
@@ -1913,15 +1913,18 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
struct intel_ringbuffer *ringbuf = request->ringbuf;
int ret;
- ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS);
+ ret = intel_logical_ring_begin(request, 8 + WA_TAIL_DWORDS);
if (ret)
return ret;
+ /* We're using qword write, seqno should be aligned to 8 bytes. */
+ BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
+
/* w/a for post sync ops following a GPGPU operation we
* need a prior CS_STALL, which is emitted by the flush
* following the batch.
*/
- intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(5));
+ intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
intel_logical_ring_emit(ringbuf,
(PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_CS_STALL |
@@ -1929,7 +1932,10 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
intel_logical_ring_emit(ringbuf, hws_seqno_address(request->ring));
intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
+ /* We're thrashing one dword of HWS. */
+ intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
+ intel_logical_ring_emit(ringbuf, MI_NOOP);
return intel_logical_ring_advance_and_submit(request);
}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 30a8403a8f4f..cd9fe609aefb 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -478,11 +478,8 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
* and as part of the cleanup in the hw state restore we also redisable
* the vga plane.
*/
- if (!HAS_PCH_SPLIT(dev)) {
- drm_modeset_lock_all(dev);
+ if (!HAS_PCH_SPLIT(dev))
intel_display_resume(dev);
- drm_modeset_unlock_all(dev);
- }
dev_priv->modeset_restore = MODESET_DONE;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 347d4df49a9b..8ed3cf34f82d 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2876,25 +2876,28 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
const struct drm_plane_state *pstate,
int y)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
+ struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
struct drm_framebuffer *fb = pstate->fb;
+ uint32_t width = 0, height = 0;
+
+ width = drm_rect_width(&intel_pstate->src) >> 16;
+ height = drm_rect_height(&intel_pstate->src) >> 16;
+
+ if (intel_rotation_90_or_270(pstate->rotation))
+ swap(width, height);
/* for planar format */
if (fb->pixel_format == DRM_FORMAT_NV12) {
if (y) /* y-plane data rate */
- return intel_crtc->config->pipe_src_w *
- intel_crtc->config->pipe_src_h *
+ return width * height *
drm_format_plane_cpp(fb->pixel_format, 0);
else /* uv-plane data rate */
- return (intel_crtc->config->pipe_src_w/2) *
- (intel_crtc->config->pipe_src_h/2) *
+ return (width / 2) * (height / 2) *
drm_format_plane_cpp(fb->pixel_format, 1);
}
/* for packed formats */
- return intel_crtc->config->pipe_src_w *
- intel_crtc->config->pipe_src_h *
- drm_format_plane_cpp(fb->pixel_format, 0);
+ return width * height * drm_format_plane_cpp(fb->pixel_format, 0);
}
/*
@@ -2973,8 +2976,9 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
struct drm_framebuffer *fb = plane->state->fb;
int id = skl_wm_plane_id(intel_plane);
- if (fb == NULL)
+ if (!to_intel_plane_state(plane->state)->visible)
continue;
+
if (plane->type == DRM_PLANE_TYPE_CURSOR)
continue;
@@ -3000,7 +3004,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
uint16_t plane_blocks, y_plane_blocks = 0;
int id = skl_wm_plane_id(intel_plane);
- if (pstate->fb == NULL)
+ if (!to_intel_plane_state(pstate)->visible)
continue;
if (plane->type == DRM_PLANE_TYPE_CURSOR)
continue;
@@ -3123,26 +3127,36 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
{
struct drm_plane *plane = &intel_plane->base;
struct drm_framebuffer *fb = plane->state->fb;
+ struct intel_plane_state *intel_pstate =
+ to_intel_plane_state(plane->state);
uint32_t latency = dev_priv->wm.skl_latency[level];
uint32_t method1, method2;
uint32_t plane_bytes_per_line, plane_blocks_per_line;
uint32_t res_blocks, res_lines;
uint32_t selected_result;
uint8_t cpp;
+ uint32_t width = 0, height = 0;
- if (latency == 0 || !cstate->base.active || !fb)
+ if (latency == 0 || !cstate->base.active || !intel_pstate->visible)
return false;
+ width = drm_rect_width(&intel_pstate->src) >> 16;
+ height = drm_rect_height(&intel_pstate->src) >> 16;
+
+ if (intel_rotation_90_or_270(plane->state->rotation))
+ swap(width, height);
+
cpp = drm_format_plane_cpp(fb->pixel_format, 0);
method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate),
cpp, latency);
method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate),
cstate->base.adjusted_mode.crtc_htotal,
- cstate->pipe_src_w,
- cpp, fb->modifier[0],
+ width,
+ cpp,
+ fb->modifier[0],
latency);
- plane_bytes_per_line = cstate->pipe_src_w * cpp;
+ plane_bytes_per_line = width * cpp;
plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 45ce45a5e122..9121646d7c4d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -968,7 +968,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
/* WaForceContextSaveRestoreNonCoherent:skl,bxt */
tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
- if (IS_SKL_REVID(dev, SKL_REVID_F0, SKL_REVID_F0) ||
+ if (IS_SKL_REVID(dev, SKL_REVID_F0, REVID_FOREVER) ||
IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER))
tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
@@ -1085,7 +1085,8 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
WA_SET_BIT_MASKED(HIZ_CHICKEN,
BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
- if (IS_SKL_REVID(dev, 0, SKL_REVID_F0)) {
+ /* This is tied to WaForceContextSaveRestoreNonCoherent */
+ if (IS_SKL_REVID(dev, 0, REVID_FOREVER)) {
/*
*Use Force Non-Coherent whenever executing a 3D context. This
* is a workaround for a possible hang in the unlikely event
@@ -2090,10 +2091,12 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj = ringbuf->obj;
+ /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
+ unsigned flags = PIN_OFFSET_BIAS | 4096;
int ret;
if (HAS_LLC(dev_priv) && !obj->stolen) {
- ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, 0);
+ ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, flags);
if (ret)
return ret;
@@ -2109,7 +2112,8 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
return -ENOMEM;
}
} else {
- ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
+ ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE,
+ flags | PIN_MAPPABLE);
if (ret)
return ret;
@@ -2454,11 +2458,11 @@ static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
if (unlikely(total_bytes > remain_usable)) {
/*
* The base request will fit but the reserved space
- * falls off the end. So only need to to wait for the
- * reserved size after flushing out the remainder.
+ * falls off the end. So don't need an immediate wrap
+ * and only need to effectively wait for the reserved
+ * size space from the start of ringbuffer.
*/
wait_bytes = remain_actual + ringbuf->reserved_size;
- need_wrap = true;
} else if (total_bytes > ringbuf->space) {
/* No wrapping required, just waiting. */
wait_bytes = total_bytes;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 436d8f2b8682..68b6f69aa682 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1189,7 +1189,11 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
dev_priv->uncore.funcs.force_wake_get =
fw_domains_get_with_thread_status;
- dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
+ if (IS_HASWELL(dev))
+ dev_priv->uncore.funcs.force_wake_put =
+ fw_domains_put_with_fifo;
+ else
+ dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
} else if (IS_IVYBRIDGE(dev)) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index ae96ebc490fb..e81aefe5ffa7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -1276,18 +1276,18 @@ nouveau_connector_create(struct drm_device *dev, int index)
break;
default:
if (disp->dithering_mode) {
+ nv_connector->dithering_mode = DITHERING_MODE_AUTO;
drm_object_attach_property(&connector->base,
disp->dithering_mode,
nv_connector->
dithering_mode);
- nv_connector->dithering_mode = DITHERING_MODE_AUTO;
}
if (disp->dithering_depth) {
+ nv_connector->dithering_depth = DITHERING_DEPTH_AUTO;
drm_object_attach_property(&connector->base,
disp->dithering_depth,
nv_connector->
dithering_depth);
- nv_connector->dithering_depth = DITHERING_DEPTH_AUTO;
}
break;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index c56a886229f1..b2de290da16f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -1832,6 +1832,8 @@ gf100_gr_init(struct gf100_gr *gr)
gf100_gr_mmio(gr, gr->func->mmio);
+ nvkm_mask(device, TPC_UNIT(0, 0, 0x05c), 0x00000001, 0x00000001);
+
memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
do {
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 43e5f503d1c5..030409a3ee4e 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -375,10 +375,15 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
qxl_bo_kunmap(user_bo);
+ qcrtc->cur_x += qcrtc->hot_spot_x - hot_x;
+ qcrtc->cur_y += qcrtc->hot_spot_y - hot_y;
+ qcrtc->hot_spot_x = hot_x;
+ qcrtc->hot_spot_y = hot_y;
+
cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_CURSOR_SET;
- cmd->u.set.position.x = qcrtc->cur_x;
- cmd->u.set.position.y = qcrtc->cur_y;
+ cmd->u.set.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
+ cmd->u.set.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0);
@@ -441,8 +446,8 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc,
cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_CURSOR_MOVE;
- cmd->u.position.x = qcrtc->cur_x;
- cmd->u.position.y = qcrtc->cur_y;
+ cmd->u.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
+ cmd->u.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
qxl_release_unmap(qdev, release, &cmd->release_info);
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 6e6b9b1519b8..3f3897eb458c 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -135,6 +135,8 @@ struct qxl_crtc {
int index;
int cur_x;
int cur_y;
+ int hot_spot_x;
+ int hot_spot_y;
};
struct qxl_output {
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index edd05cdb0cd8..587cae4e73c9 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -310,6 +310,10 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
&& (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
+ /* vertical FP must be at least 1 */
+ if (mode->crtc_vsync_start == mode->crtc_vdisplay)
+ adjusted_mode->crtc_vsync_start++;
+
/* get the native mode for scaling */
if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
radeon_panel_mode_fixup(encoder, adjusted_mode);
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 76c4bdf21b20..34f7a29d9366 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2608,10 +2608,152 @@ static void evergreen_agp_enable(struct radeon_device *rdev)
WREG32(VM_CONTEXT1_CNTL, 0);
}
+static const unsigned ni_dig_offsets[] =
+{
+ NI_DIG0_REGISTER_OFFSET,
+ NI_DIG1_REGISTER_OFFSET,
+ NI_DIG2_REGISTER_OFFSET,
+ NI_DIG3_REGISTER_OFFSET,
+ NI_DIG4_REGISTER_OFFSET,
+ NI_DIG5_REGISTER_OFFSET
+};
+
+static const unsigned ni_tx_offsets[] =
+{
+ NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1,
+ NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1,
+ NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1,
+ NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1,
+ NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1,
+ NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1
+};
+
+static const unsigned evergreen_dp_offsets[] =
+{
+ EVERGREEN_DP0_REGISTER_OFFSET,
+ EVERGREEN_DP1_REGISTER_OFFSET,
+ EVERGREEN_DP2_REGISTER_OFFSET,
+ EVERGREEN_DP3_REGISTER_OFFSET,
+ EVERGREEN_DP4_REGISTER_OFFSET,
+ EVERGREEN_DP5_REGISTER_OFFSET
+};
+
+
+/*
+ * Assumption is that EVERGREEN_CRTC_MASTER_EN enable for requested crtc
+ * We go from crtc to connector and it is not relible since it
+ * should be an opposite direction .If crtc is enable then
+ * find the dig_fe which selects this crtc and insure that it enable.
+ * if such dig_fe is found then find dig_be which selects found dig_be and
+ * insure that it enable and in DP_SST mode.
+ * if UNIPHY_PLL_CONTROL1.enable then we should disconnect timing
+ * from dp symbols clocks .
+ */
+static bool evergreen_is_dp_sst_stream_enabled(struct radeon_device *rdev,
+ unsigned crtc_id, unsigned *ret_dig_fe)
+{
+ unsigned i;
+ unsigned dig_fe;
+ unsigned dig_be;
+ unsigned dig_en_be;
+ unsigned uniphy_pll;
+ unsigned digs_fe_selected;
+ unsigned dig_be_mode;
+ unsigned dig_fe_mask;
+ bool is_enabled = false;
+ bool found_crtc = false;
+
+ /* loop through all running dig_fe to find selected crtc */
+ for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
+ dig_fe = RREG32(NI_DIG_FE_CNTL + ni_dig_offsets[i]);
+ if (dig_fe & NI_DIG_FE_CNTL_SYMCLK_FE_ON &&
+ crtc_id == NI_DIG_FE_CNTL_SOURCE_SELECT(dig_fe)) {
+ /* found running pipe */
+ found_crtc = true;
+ dig_fe_mask = 1 << i;
+ dig_fe = i;
+ break;
+ }
+ }
+
+ if (found_crtc) {
+ /* loop through all running dig_be to find selected dig_fe */
+ for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
+ dig_be = RREG32(NI_DIG_BE_CNTL + ni_dig_offsets[i]);
+ /* if dig_fe_selected by dig_be? */
+ digs_fe_selected = NI_DIG_BE_CNTL_FE_SOURCE_SELECT(dig_be);
+ dig_be_mode = NI_DIG_FE_CNTL_MODE(dig_be);
+ if (dig_fe_mask & digs_fe_selected &&
+ /* if dig_be in sst mode? */
+ dig_be_mode == NI_DIG_BE_DPSST) {
+ dig_en_be = RREG32(NI_DIG_BE_EN_CNTL +
+ ni_dig_offsets[i]);
+ uniphy_pll = RREG32(NI_DCIO_UNIPHY0_PLL_CONTROL1 +
+ ni_tx_offsets[i]);
+ /* dig_be enable and tx is running */
+ if (dig_en_be & NI_DIG_BE_EN_CNTL_ENABLE &&
+ dig_en_be & NI_DIG_BE_EN_CNTL_SYMBCLK_ON &&
+ uniphy_pll & NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE) {
+ is_enabled = true;
+ *ret_dig_fe = dig_fe;
+ break;
+ }
+ }
+ }
+ }
+
+ return is_enabled;
+}
+
+/*
+ * Blank dig when in dp sst mode
+ * Dig ignores crtc timing
+ */
+static void evergreen_blank_dp_output(struct radeon_device *rdev,
+ unsigned dig_fe)
+{
+ unsigned stream_ctrl;
+ unsigned fifo_ctrl;
+ unsigned counter = 0;
+
+ if (dig_fe >= ARRAY_SIZE(evergreen_dp_offsets)) {
+ DRM_ERROR("invalid dig_fe %d\n", dig_fe);
+ return;
+ }
+
+ stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
+ evergreen_dp_offsets[dig_fe]);
+ if (!(stream_ctrl & EVERGREEN_DP_VID_STREAM_CNTL_ENABLE)) {
+ DRM_ERROR("dig %d , should be enable\n", dig_fe);
+ return;
+ }
+
+ stream_ctrl &=~EVERGREEN_DP_VID_STREAM_CNTL_ENABLE;
+ WREG32(EVERGREEN_DP_VID_STREAM_CNTL +
+ evergreen_dp_offsets[dig_fe], stream_ctrl);
+
+ stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
+ evergreen_dp_offsets[dig_fe]);
+ while (counter < 32 && stream_ctrl & EVERGREEN_DP_VID_STREAM_STATUS) {
+ msleep(1);
+ counter++;
+ stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
+ evergreen_dp_offsets[dig_fe]);
+ }
+ if (counter >= 32 )
+ DRM_ERROR("counter exceeds %d\n", counter);
+
+ fifo_ctrl = RREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe]);
+ fifo_ctrl |= EVERGREEN_DP_STEER_FIFO_RESET;
+ WREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe], fifo_ctrl);
+
+}
+
void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
{
u32 crtc_enabled, tmp, frame_count, blackout;
int i, j;
+ unsigned dig_fe;
if (!ASIC_IS_NODCE(rdev)) {
save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
@@ -2651,7 +2793,17 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
break;
udelay(1);
}
-
+ /*we should disable dig if it drives dp sst*/
+ /*but we are in radeon_device_init and the topology is unknown*/
+ /*and it is available after radeon_modeset_init*/
+ /*the following method radeon_atom_encoder_dpms_dig*/
+ /*does the job if we initialize it properly*/
+ /*for now we do it this manually*/
+ /**/
+ if (ASIC_IS_DCE5(rdev) &&
+ evergreen_is_dp_sst_stream_enabled(rdev, i ,&dig_fe))
+ evergreen_blank_dp_output(rdev, dig_fe);
+ /*we could remove 6 lines below*/
/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index aa939dfed3a3..b436badf9efa 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -250,8 +250,43 @@
/* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */
#define EVERGREEN_HDMI_BASE 0x7030
+/*DIG block*/
+#define NI_DIG0_REGISTER_OFFSET (0x7000 - 0x7000)
+#define NI_DIG1_REGISTER_OFFSET (0x7C00 - 0x7000)
+#define NI_DIG2_REGISTER_OFFSET (0x10800 - 0x7000)
+#define NI_DIG3_REGISTER_OFFSET (0x11400 - 0x7000)
+#define NI_DIG4_REGISTER_OFFSET (0x12000 - 0x7000)
+#define NI_DIG5_REGISTER_OFFSET (0x12C00 - 0x7000)
+
+
+#define NI_DIG_FE_CNTL 0x7000
+# define NI_DIG_FE_CNTL_SOURCE_SELECT(x) ((x) & 0x3)
+# define NI_DIG_FE_CNTL_SYMCLK_FE_ON (1<<24)
+
+
+#define NI_DIG_BE_CNTL 0x7140
+# define NI_DIG_BE_CNTL_FE_SOURCE_SELECT(x) (((x) >> 8 ) & 0x3F)
+# define NI_DIG_FE_CNTL_MODE(x) (((x) >> 16) & 0x7 )
+
+#define NI_DIG_BE_EN_CNTL 0x7144
+# define NI_DIG_BE_EN_CNTL_ENABLE (1 << 0)
+# define NI_DIG_BE_EN_CNTL_SYMBCLK_ON (1 << 8)
+# define NI_DIG_BE_DPSST 0
/* Display Port block */
+#define EVERGREEN_DP0_REGISTER_OFFSET (0x730C - 0x730C)
+#define EVERGREEN_DP1_REGISTER_OFFSET (0x7F0C - 0x730C)
+#define EVERGREEN_DP2_REGISTER_OFFSET (0x10B0C - 0x730C)
+#define EVERGREEN_DP3_REGISTER_OFFSET (0x1170C - 0x730C)
+#define EVERGREEN_DP4_REGISTER_OFFSET (0x1230C - 0x730C)
+#define EVERGREEN_DP5_REGISTER_OFFSET (0x12F0C - 0x730C)
+
+
+#define EVERGREEN_DP_VID_STREAM_CNTL 0x730C
+# define EVERGREEN_DP_VID_STREAM_CNTL_ENABLE (1 << 0)
+# define EVERGREEN_DP_VID_STREAM_STATUS (1 <<16)
+#define EVERGREEN_DP_STEER_FIFO 0x7310
+# define EVERGREEN_DP_STEER_FIFO_RESET (1 << 0)
#define EVERGREEN_DP_SEC_CNTL 0x7280
# define EVERGREEN_DP_SEC_STREAM_ENABLE (1 << 0)
# define EVERGREEN_DP_SEC_ASP_ENABLE (1 << 4)
@@ -266,4 +301,15 @@
# define EVERGREEN_DP_SEC_N_BASE_MULTIPLE(x) (((x) & 0xf) << 24)
# define EVERGREEN_DP_SEC_SS_EN (1 << 28)
+/*DCIO_UNIPHY block*/
+#define NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1 (0x6600 -0x6600)
+#define NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1 (0x6640 -0x6600)
+#define NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1 (0x6680 - 0x6600)
+#define NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1 (0x66C0 - 0x6600)
+#define NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1 (0x6700 - 0x6600)
+#define NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1 (0x6740 - 0x6600)
+
+#define NI_DCIO_UNIPHY0_PLL_CONTROL1 0x6618
+# define NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE (1 << 0)
+
#endif
diff --git a/drivers/gpu/drm/radeon/ni_reg.h b/drivers/gpu/drm/radeon/ni_reg.h
index da310a70c0f0..827ccc87cbc3 100644
--- a/drivers/gpu/drm/radeon/ni_reg.h
+++ b/drivers/gpu/drm/radeon/ni_reg.h
@@ -109,6 +109,8 @@
#define NI_DP_MSE_SAT2 0x7398
#define NI_DP_MSE_SAT_UPDATE 0x739c
+# define NI_DP_MSE_SAT_UPDATE_MASK 0x3
+# define NI_DP_MSE_16_MTP_KEEPOUT 0x100
#define NI_DIG_BE_CNTL 0x7140
# define NI_DIG_FE_SOURCE_SELECT(x) (((x) & 0x7f) << 8)
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index fd8c4d317e60..95f4fea89302 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -62,10 +62,6 @@ bool radeon_has_atpx(void) {
return radeon_atpx_priv.atpx_detected;
}
-bool radeon_has_atpx_dgpu_power_cntl(void) {
- return radeon_atpx_priv.atpx.functions.power_cntl;
-}
-
/**
* radeon_atpx_call - call an ATPX method
*
@@ -145,6 +141,13 @@ static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mas
*/
static int radeon_atpx_validate(struct radeon_atpx *atpx)
{
+ /* make sure required functions are enabled */
+ /* dGPU power control is required */
+ if (atpx->functions.power_cntl == false) {
+ printk("ATPX dGPU power cntl not present, forcing\n");
+ atpx->functions.power_cntl = true;
+ }
+
if (atpx->functions.px_params) {
union acpi_object *info;
struct atpx_px_params output;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index cfcc099c537d..81a63d7f5cd9 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -2002,10 +2002,12 @@ radeon_add_atom_connector(struct drm_device *dev,
rdev->mode_info.dither_property,
RADEON_FMT_DITHER_DISABLE);
- if (radeon_audio != 0)
+ if (radeon_audio != 0) {
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
RADEON_AUDIO_AUTO);
+ radeon_connector->audio = RADEON_AUDIO_AUTO;
+ }
if (ASIC_IS_DCE5(rdev))
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.output_csc_property,
@@ -2130,6 +2132,7 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
RADEON_AUDIO_AUTO);
+ radeon_connector->audio = RADEON_AUDIO_AUTO;
}
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
radeon_connector->dac_load_detect = true;
@@ -2185,6 +2188,7 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
RADEON_AUDIO_AUTO);
+ radeon_connector->audio = RADEON_AUDIO_AUTO;
}
if (ASIC_IS_DCE5(rdev))
drm_object_attach_property(&radeon_connector->base.base,
@@ -2237,6 +2241,7 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
RADEON_AUDIO_AUTO);
+ radeon_connector->audio = RADEON_AUDIO_AUTO;
}
if (ASIC_IS_DCE5(rdev))
drm_object_attach_property(&radeon_connector->base.base,
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 4fd1a961012d..d0826fb0434c 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -103,12 +103,6 @@ static const char radeon_family_name[][16] = {
"LAST",
};
-#if defined(CONFIG_VGA_SWITCHEROO)
-bool radeon_has_atpx_dgpu_power_cntl(void);
-#else
-static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
-#endif
-
#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
@@ -1305,9 +1299,9 @@ int radeon_device_init(struct radeon_device *rdev,
}
rdev->fence_context = fence_context_alloc(RADEON_NUM_RINGS);
- DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
- radeon_family_name[rdev->family], pdev->vendor, pdev->device,
- pdev->subsystem_vendor, pdev->subsystem_device);
+ DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
+ radeon_family_name[rdev->family], pdev->vendor, pdev->device,
+ pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
/* mutex initialization are all done here so we
* can recall function without having locking issues */
@@ -1439,7 +1433,7 @@ int radeon_device_init(struct radeon_device *rdev,
* ignore it */
vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
- if ((rdev->flags & RADEON_IS_PX) && radeon_has_atpx_dgpu_power_cntl())
+ if (rdev->flags & RADEON_IS_PX)
runtime = true;
vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
if (runtime)
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index 43cffb526b0c..de504ea29c06 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -89,8 +89,16 @@ static int radeon_dp_mst_set_stream_attrib(struct radeon_encoder *primary,
WREG32(NI_DP_MSE_SAT_UPDATE + primary->offset, 1);
do {
+ unsigned value1, value2;
+ udelay(10);
temp = RREG32(NI_DP_MSE_SAT_UPDATE + primary->offset);
- } while ((temp & 0x1) && retries++ < 10000);
+
+ value1 = temp & NI_DP_MSE_SAT_UPDATE_MASK;
+ value2 = temp & NI_DP_MSE_16_MTP_KEEPOUT;
+
+ if (!value1 && !value2)
+ break;
+ } while (retries++ < 50);
if (retries == 10000)
DRM_ERROR("timed out waitin for SAT update %d\n", primary->offset);
@@ -150,7 +158,7 @@ static int radeon_dp_mst_update_stream_attribs(struct radeon_connector *mst_conn
return 0;
}
-static int radeon_dp_mst_set_vcp_size(struct radeon_encoder *mst, uint32_t x, uint32_t y)
+static int radeon_dp_mst_set_vcp_size(struct radeon_encoder *mst, s64 avg_time_slots_per_mtp)
{
struct drm_device *dev = mst->base.dev;
struct radeon_device *rdev = dev->dev_private;
@@ -158,6 +166,8 @@ static int radeon_dp_mst_set_vcp_size(struct radeon_encoder *mst, uint32_t x, ui
uint32_t val, temp;
uint32_t offset = radeon_atom_set_enc_offset(mst_enc->fe);
int retries = 0;
+ uint32_t x = drm_fixp2int(avg_time_slots_per_mtp);
+ uint32_t y = drm_fixp2int_ceil((avg_time_slots_per_mtp - x) << 26);
val = NI_DP_MSE_RATE_X(x) | NI_DP_MSE_RATE_Y(y);
@@ -165,6 +175,7 @@ static int radeon_dp_mst_set_vcp_size(struct radeon_encoder *mst, uint32_t x, ui
do {
temp = RREG32(NI_DP_MSE_RATE_UPDATE + offset);
+ udelay(10);
} while ((temp & 0x1) && (retries++ < 10000));
if (retries >= 10000)
@@ -246,14 +257,8 @@ radeon_dp_mst_connector_destroy(struct drm_connector *connector)
kfree(radeon_connector);
}
-static int radeon_connector_dpms(struct drm_connector *connector, int mode)
-{
- DRM_DEBUG_KMS("\n");
- return 0;
-}
-
static const struct drm_connector_funcs radeon_dp_mst_connector_funcs = {
- .dpms = radeon_connector_dpms,
+ .dpms = drm_helper_connector_dpms,
.detect = radeon_dp_mst_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = radeon_dp_mst_connector_destroy,
@@ -394,7 +399,7 @@ radeon_mst_encoder_dpms(struct drm_encoder *encoder, int mode)
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
int ret, slots;
-
+ s64 fixed_pbn, fixed_pbn_per_slot, avg_time_slots_per_mtp;
if (!ASIC_IS_DCE5(rdev)) {
DRM_ERROR("got mst dpms on non-DCE5\n");
return;
@@ -456,7 +461,11 @@ radeon_mst_encoder_dpms(struct drm_encoder *encoder, int mode)
mst_enc->enc_active = true;
radeon_dp_mst_update_stream_attribs(radeon_connector->mst_port, primary);
- radeon_dp_mst_set_vcp_size(radeon_encoder, slots, 0);
+
+ fixed_pbn = drm_int2fixp(mst_enc->pbn);
+ fixed_pbn_per_slot = drm_int2fixp(radeon_connector->mst_port->mst_mgr.pbn_div);
+ avg_time_slots_per_mtp = drm_fixp_div(fixed_pbn, fixed_pbn_per_slot);
+ radeon_dp_mst_set_vcp_size(radeon_encoder, avg_time_slots_per_mtp);
atombios_dig_encoder_setup2(&primary->base, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0,
mst_enc->fe);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 7dddfdce85e6..90f739478a1b 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -235,6 +235,8 @@ static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
+ if (radeon_ttm_tt_has_userptr(bo->ttm))
+ return -EPERM;
return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
}
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index af4df81c4e0c..e6abc09b67e3 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2931,6 +2931,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
{ PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
{ PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
{ PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 },
+ { PCI_VENDOR_ID_ATI, 0x6810, 0x1682, 0x9275, 0, 120000 },
{ 0, 0, 0, 0 },
};
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 4cbf26555093..e3daafa1be13 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -230,22 +230,13 @@ EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
{
- struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_mem_type_manager *man;
+ int put_count = 0;
lockdep_assert_held(&bo->resv->lock.base);
- if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
- list_del_init(&bo->swap);
- list_del_init(&bo->lru);
-
- } else {
- if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG))
- list_move_tail(&bo->swap, &bo->glob->swap_lru);
-
- man = &bdev->man[bo->mem.mem_type];
- list_move_tail(&bo->lru, &man->lru);
- }
+ put_count = ttm_bo_del_from_lru(bo);
+ ttm_bo_list_ref_sub(bo, put_count, true);
+ ttm_bo_add_to_lru(bo);
}
EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 4854dac87e24..5fd1fd06effc 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -267,11 +267,23 @@ static int virtio_gpu_crtc_atomic_check(struct drm_crtc *crtc,
return 0;
}
+static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ if (crtc->state->event)
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+}
+
static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = {
.enable = virtio_gpu_crtc_enable,
.disable = virtio_gpu_crtc_disable,
.mode_set_nofb = virtio_gpu_crtc_mode_set_nofb,
.atomic_check = virtio_gpu_crtc_atomic_check,
+ .atomic_flush = virtio_gpu_crtc_atomic_flush,
};
static void virtio_gpu_enc_mode_set(struct drm_encoder *encoder,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 723ba16c6084..1a1a87cbf109 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -3293,19 +3293,19 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
&vmw_cmd_dx_cid_check, true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
true, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_ok,
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
- &vmw_cmd_ok, true, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_ok,
+ &vmw_cmd_dx_cid_check, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
true, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_ok,
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
true, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_invalid,
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
true, false, true),
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 3b1faf7862a5..679a4cb98ee3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -573,9 +573,9 @@ static int vmw_fb_set_par(struct fb_info *info)
mode = old_mode;
old_mode = NULL;
} else if (!vmw_kms_validate_mode_vram(vmw_priv,
- mode->hdisplay *
- (var->bits_per_pixel + 7) / 8,
- mode->vdisplay)) {
+ mode->hdisplay *
+ DIV_ROUND_UP(var->bits_per_pixel, 8),
+ mode->vdisplay)) {
drm_mode_destroy(vmw_priv->dev, mode);
return -EINVAL;
}
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index e00db3f510dd..abb98c77bad2 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -1068,7 +1068,6 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
goto err_register;
}
- pdev->dev.of_node = of_node;
pdev->dev.parent = dev;
ret = platform_device_add_data(pdev, &reg->pdata,
@@ -1079,6 +1078,12 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
platform_device_put(pdev);
goto err_register;
}
+
+ /*
+ * Set of_node only after calling platform_device_add. Otherwise
+ * the platform:imx-ipuv3-crtc modalias won't be used.
+ */
+ pdev->dev.of_node = of_node;
}
return 0;
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index bdb8cc89cacc..4f9c5c6deaed 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1979,6 +1979,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 5c0e43ed5c53..0238f0169e48 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -259,6 +259,7 @@
#define USB_DEVICE_ID_CORSAIR_K90 0x1b02
#define USB_VENDOR_ID_CREATIVELABS 0x041e
+#define USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51 0x322c
#define USB_DEVICE_ID_PRODIKEYS_PCMIDI 0x2801
#define USB_VENDOR_ID_CVTOUCH 0x1ff7
@@ -676,6 +677,7 @@
#define USB_DEVICE_ID_SIDEWINDER_GV 0x003b
#define USB_DEVICE_ID_MS_OFFICE_KB 0x0048
#define USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0 0x009d
+#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K 0x00b4
#define USB_DEVICE_ID_MS_NE4K 0x00db
#define USB_DEVICE_ID_MS_NE4K_JP 0x00dc
#define USB_DEVICE_ID_MS_LK6K 0x00f9
@@ -683,6 +685,8 @@
#define USB_DEVICE_ID_MS_PRESENTER_8K_USB 0x0713
#define USB_DEVICE_ID_MS_NE7K 0x071d
#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K 0x0730
+#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1 0x0732
+#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_600 0x0750
#define USB_DEVICE_ID_MS_COMFORT_MOUSE_4500 0x076c
#define USB_DEVICE_ID_MS_COMFORT_KEYBOARD 0x00e3
#define USB_DEVICE_ID_MS_SURFACE_PRO_2 0x0799
diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
index 0125e356bd8d..1ac4ff4d57a6 100644
--- a/drivers/hid/hid-lenovo.c
+++ b/drivers/hid/hid-lenovo.c
@@ -184,21 +184,31 @@ static int lenovo_send_cmd_cptkbd(struct hid_device *hdev,
unsigned char byte2, unsigned char byte3)
{
int ret;
- unsigned char buf[] = {0x18, byte2, byte3};
+ unsigned char *buf;
+
+ buf = kzalloc(3, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ buf[0] = 0x18;
+ buf[1] = byte2;
+ buf[2] = byte3;
switch (hdev->product) {
case USB_DEVICE_ID_LENOVO_CUSBKBD:
- ret = hid_hw_raw_request(hdev, 0x13, buf, sizeof(buf),
+ ret = hid_hw_raw_request(hdev, 0x13, buf, 3,
HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
break;
case USB_DEVICE_ID_LENOVO_CBTKBD:
- ret = hid_hw_output_report(hdev, buf, sizeof(buf));
+ ret = hid_hw_output_report(hdev, buf, 3);
break;
default:
ret = -EINVAL;
break;
}
+ kfree(buf);
+
return ret < 0 ? ret : 0; /* BT returns 0, USB returns sizeof(buf) */
}
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index 75cd3bc59c54..e924d555536c 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -272,6 +272,12 @@ static const struct hid_device_id ms_devices[] = {
.driver_data = MS_PRESENTER },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K),
.driver_data = MS_ERGONOMY | MS_RDESC_3K },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K),
+ .driver_data = MS_ERGONOMY },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600),
+ .driver_data = MS_ERGONOMY },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1),
+ .driver_data = MS_ERGONOMY },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0),
.driver_data = MS_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500),
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 25d3c4330bf6..c741f5e50a66 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1169,6 +1169,7 @@ static void mt_release_contacts(struct hid_device *hid)
MT_TOOL_FINGER,
false);
}
+ input_mt_sync_frame(input_dev);
input_sync(input_dev);
}
}
diff --git a/drivers/hid/hid-wiimote-modules.c b/drivers/hid/hid-wiimote-modules.c
index 4390eee2ce84..c830ed39348f 100644
--- a/drivers/hid/hid-wiimote-modules.c
+++ b/drivers/hid/hid-wiimote-modules.c
@@ -2049,9 +2049,11 @@ static void wiimod_mp_in_mp(struct wiimote_data *wdata, const __u8 *ext)
* -----+------------------------------+-----+-----+
* The single bits Yaw, Roll, Pitch in the lower right corner specify
* whether the wiimote is rotating fast (0) or slow (1). Speed for slow
- * roation is 440 deg/s and for fast rotation 2000 deg/s. To get a
- * linear scale we multiply by 2000/440 = ~4.5454 which is 18 for fast
- * and 9 for slow.
+ * roation is 8192/440 units / deg/s and for fast rotation 8192/2000
+ * units / deg/s. To get a linear scale for fast rotation we multiply
+ * by 2000/440 = ~4.5454 and scale both fast and slow by 9 to match the
+ * previous scale reported by this driver.
+ * This leaves a linear scale with 8192*9/440 (~167.564) units / deg/s.
* If the wiimote is not rotating the sensor reports 2^13 = 8192.
* Ext specifies whether an extension is connected to the motionp.
* which is parsed by wiimote-core.
@@ -2070,15 +2072,15 @@ static void wiimod_mp_in_mp(struct wiimote_data *wdata, const __u8 *ext)
z -= 8192;
if (!(ext[3] & 0x02))
- x *= 18;
+ x = (x * 2000 * 9) / 440;
else
x *= 9;
if (!(ext[4] & 0x02))
- y *= 18;
+ y = (y * 2000 * 9) / 440;
else
y *= 9;
if (!(ext[3] & 0x01))
- z *= 18;
+ z = (z * 2000 * 9) / 440;
else
z *= 9;
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index ad71160b9ea4..ae83af649a60 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -951,14 +951,6 @@ static int usbhid_output_report(struct hid_device *hid, __u8 *buf, size_t count)
return ret;
}
-static void usbhid_restart_queues(struct usbhid_device *usbhid)
-{
- if (usbhid->urbout && !test_bit(HID_OUT_RUNNING, &usbhid->iofl))
- usbhid_restart_out_queue(usbhid);
- if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl))
- usbhid_restart_ctrl_queue(usbhid);
-}
-
static void hid_free_buffers(struct usb_device *dev, struct hid_device *hid)
{
struct usbhid_device *usbhid = hid->driver_data;
@@ -1404,6 +1396,37 @@ static void hid_cease_io(struct usbhid_device *usbhid)
usb_kill_urb(usbhid->urbout);
}
+static void hid_restart_io(struct hid_device *hid)
+{
+ struct usbhid_device *usbhid = hid->driver_data;
+ int clear_halt = test_bit(HID_CLEAR_HALT, &usbhid->iofl);
+ int reset_pending = test_bit(HID_RESET_PENDING, &usbhid->iofl);
+
+ spin_lock_irq(&usbhid->lock);
+ clear_bit(HID_SUSPENDED, &usbhid->iofl);
+ usbhid_mark_busy(usbhid);
+
+ if (clear_halt || reset_pending)
+ schedule_work(&usbhid->reset_work);
+ usbhid->retry_delay = 0;
+ spin_unlock_irq(&usbhid->lock);
+
+ if (reset_pending || !test_bit(HID_STARTED, &usbhid->iofl))
+ return;
+
+ if (!clear_halt) {
+ if (hid_start_in(hid) < 0)
+ hid_io_error(hid);
+ }
+
+ spin_lock_irq(&usbhid->lock);
+ if (usbhid->urbout && !test_bit(HID_OUT_RUNNING, &usbhid->iofl))
+ usbhid_restart_out_queue(usbhid);
+ if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl))
+ usbhid_restart_ctrl_queue(usbhid);
+ spin_unlock_irq(&usbhid->lock);
+}
+
/* Treat USB reset pretty much the same as suspend/resume */
static int hid_pre_reset(struct usb_interface *intf)
{
@@ -1453,14 +1476,14 @@ static int hid_post_reset(struct usb_interface *intf)
return 1;
}
+ /* No need to do another reset or clear a halted endpoint */
spin_lock_irq(&usbhid->lock);
clear_bit(HID_RESET_PENDING, &usbhid->iofl);
+ clear_bit(HID_CLEAR_HALT, &usbhid->iofl);
spin_unlock_irq(&usbhid->lock);
hid_set_idle(dev, intf->cur_altsetting->desc.bInterfaceNumber, 0, 0);
- status = hid_start_in(hid);
- if (status < 0)
- hid_io_error(hid);
- usbhid_restart_queues(usbhid);
+
+ hid_restart_io(hid);
return 0;
}
@@ -1483,25 +1506,9 @@ void usbhid_put_power(struct hid_device *hid)
#ifdef CONFIG_PM
static int hid_resume_common(struct hid_device *hid, bool driver_suspended)
{
- struct usbhid_device *usbhid = hid->driver_data;
- int status;
-
- spin_lock_irq(&usbhid->lock);
- clear_bit(HID_SUSPENDED, &usbhid->iofl);
- usbhid_mark_busy(usbhid);
-
- if (test_bit(HID_CLEAR_HALT, &usbhid->iofl) ||
- test_bit(HID_RESET_PENDING, &usbhid->iofl))
- schedule_work(&usbhid->reset_work);
- usbhid->retry_delay = 0;
-
- usbhid_restart_queues(usbhid);
- spin_unlock_irq(&usbhid->lock);
-
- status = hid_start_in(hid);
- if (status < 0)
- hid_io_error(hid);
+ int status = 0;
+ hid_restart_io(hid);
if (driver_suspended && hid->driver && hid->driver->resume)
status = hid->driver->resume(hid);
return status;
@@ -1570,12 +1577,8 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
static int hid_resume(struct usb_interface *intf)
{
struct hid_device *hid = usb_get_intfdata (intf);
- struct usbhid_device *usbhid = hid->driver_data;
int status;
- if (!test_bit(HID_STARTED, &usbhid->iofl))
- return 0;
-
status = hid_resume_common(hid, true);
dev_dbg(&intf->dev, "resume status %d\n", status);
return 0;
@@ -1584,10 +1587,8 @@ static int hid_resume(struct usb_interface *intf)
static int hid_reset_resume(struct usb_interface *intf)
{
struct hid_device *hid = usb_get_intfdata(intf);
- struct usbhid_device *usbhid = hid->driver_data;
int status;
- clear_bit(HID_SUSPENDED, &usbhid->iofl);
status = hid_post_reset(intf);
if (status >= 0 && hid->driver && hid->driver->reset_resume) {
int ret = hid->driver->reset_resume(hid);
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index ed2f68edc8f1..53fc856d6867 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -71,6 +71,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
+ { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_ELAN, HID_ANY_ID, HID_QUIRK_ALWAYS_POLL },
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 68a560957871..ccf1883318c3 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -152,6 +152,25 @@ static void wacom_feature_mapping(struct hid_device *hdev,
hid_data->inputmode = field->report->id;
hid_data->inputmode_index = usage->usage_index;
break;
+
+ case HID_UP_DIGITIZER:
+ if (field->report->id == 0x0B &&
+ (field->application == WACOM_G9_DIGITIZER ||
+ field->application == WACOM_G11_DIGITIZER)) {
+ wacom->wacom_wac.mode_report = field->report->id;
+ wacom->wacom_wac.mode_value = 0;
+ }
+ break;
+
+ case WACOM_G9_PAGE:
+ case WACOM_G11_PAGE:
+ if (field->report->id == 0x03 &&
+ (field->application == WACOM_G9_TOUCHSCREEN ||
+ field->application == WACOM_G11_TOUCHSCREEN)) {
+ wacom->wacom_wac.mode_report = field->report->id;
+ wacom->wacom_wac.mode_value = 0;
+ }
+ break;
}
}
@@ -322,26 +341,41 @@ static int wacom_hid_set_device_mode(struct hid_device *hdev)
return 0;
}
-static int wacom_set_device_mode(struct hid_device *hdev, int report_id,
- int length, int mode)
+static int wacom_set_device_mode(struct hid_device *hdev,
+ struct wacom_wac *wacom_wac)
{
- unsigned char *rep_data;
+ u8 *rep_data;
+ struct hid_report *r;
+ struct hid_report_enum *re;
+ int length;
int error = -ENOMEM, limit = 0;
- rep_data = kzalloc(length, GFP_KERNEL);
+ if (wacom_wac->mode_report < 0)
+ return 0;
+
+ re = &(hdev->report_enum[HID_FEATURE_REPORT]);
+ r = re->report_id_hash[wacom_wac->mode_report];
+ if (!r)
+ return -EINVAL;
+
+ rep_data = hid_alloc_report_buf(r, GFP_KERNEL);
if (!rep_data)
- return error;
+ return -ENOMEM;
+
+ length = hid_report_len(r);
do {
- rep_data[0] = report_id;
- rep_data[1] = mode;
+ rep_data[0] = wacom_wac->mode_report;
+ rep_data[1] = wacom_wac->mode_value;
error = wacom_set_report(hdev, HID_FEATURE_REPORT, rep_data,
length, 1);
if (error >= 0)
error = wacom_get_report(hdev, HID_FEATURE_REPORT,
rep_data, length, 1);
- } while (error >= 0 && rep_data[1] != mode && limit++ < WAC_MSG_RETRIES);
+ } while (error >= 0 &&
+ rep_data[1] != wacom_wac->mode_report &&
+ limit++ < WAC_MSG_RETRIES);
kfree(rep_data);
@@ -411,32 +445,41 @@ static int wacom_bt_query_tablet_data(struct hid_device *hdev, u8 speed,
static int wacom_query_tablet_data(struct hid_device *hdev,
struct wacom_features *features)
{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+
if (hdev->bus == BUS_BLUETOOTH)
return wacom_bt_query_tablet_data(hdev, 1, features);
- if (features->type == HID_GENERIC)
- return wacom_hid_set_device_mode(hdev);
-
- if (features->device_type & WACOM_DEVICETYPE_TOUCH) {
- if (features->type > TABLETPC) {
- /* MT Tablet PC touch */
- return wacom_set_device_mode(hdev, 3, 4, 4);
- }
- else if (features->type == WACOM_24HDT) {
- return wacom_set_device_mode(hdev, 18, 3, 2);
- }
- else if (features->type == WACOM_27QHDT) {
- return wacom_set_device_mode(hdev, 131, 3, 2);
- }
- else if (features->type == BAMBOO_PAD) {
- return wacom_set_device_mode(hdev, 2, 2, 2);
- }
- } else if (features->device_type & WACOM_DEVICETYPE_PEN) {
- if (features->type <= BAMBOO_PT) {
- return wacom_set_device_mode(hdev, 2, 2, 2);
+ if (features->type != HID_GENERIC) {
+ if (features->device_type & WACOM_DEVICETYPE_TOUCH) {
+ if (features->type > TABLETPC) {
+ /* MT Tablet PC touch */
+ wacom_wac->mode_report = 3;
+ wacom_wac->mode_value = 4;
+ } else if (features->type == WACOM_24HDT) {
+ wacom_wac->mode_report = 18;
+ wacom_wac->mode_value = 2;
+ } else if (features->type == WACOM_27QHDT) {
+ wacom_wac->mode_report = 131;
+ wacom_wac->mode_value = 2;
+ } else if (features->type == BAMBOO_PAD) {
+ wacom_wac->mode_report = 2;
+ wacom_wac->mode_value = 2;
+ }
+ } else if (features->device_type & WACOM_DEVICETYPE_PEN) {
+ if (features->type <= BAMBOO_PT) {
+ wacom_wac->mode_report = 2;
+ wacom_wac->mode_value = 2;
+ }
}
}
+ wacom_set_device_mode(hdev, wacom_wac);
+
+ if (features->type == HID_GENERIC)
+ return wacom_hid_set_device_mode(hdev);
+
return 0;
}
@@ -1817,6 +1860,9 @@ static int wacom_probe(struct hid_device *hdev,
goto fail_type;
}
+ wacom_wac->hid_data.inputmode = -1;
+ wacom_wac->mode_report = -1;
+
wacom->usbdev = dev;
wacom->intf = intf;
mutex_init(&wacom->lock);
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index bd198bbd4df0..cf2ba43453fd 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -684,6 +684,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
wacom->tool[idx] = wacom_intuos_get_tool_type(wacom->id[idx]);
+ wacom->shared->stylus_in_proximity = true;
return 1;
}
@@ -2426,6 +2427,17 @@ void wacom_setup_device_quirks(struct wacom *wacom)
}
/*
+ * Hack for the Bamboo One:
+ * the device presents a PAD/Touch interface as most Bamboos and even
+ * sends ghosts PAD data on it. However, later, we must disable this
+ * ghost interface, and we can not detect it unless we set it here
+ * to WACOM_DEVICETYPE_PAD or WACOM_DEVICETYPE_TOUCH.
+ */
+ if (features->type == BAMBOO_PEN &&
+ features->pktlen == WACOM_PKGLEN_BBTOUCH3)
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+
+ /*
* Raw Wacom-mode pen and touch events both come from interface
* 0, whose HID descriptor has an application usage of 0xFF0D
* (i.e., WACOM_VENDORDEFINED_PEN). We route pen packets back
@@ -3384,6 +3396,10 @@ static const struct wacom_features wacom_features_0x33E =
{ "Wacom Intuos PT M 2", 21600, 13500, 2047, 63,
INTUOSHT2, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 16,
.check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
+static const struct wacom_features wacom_features_0x343 =
+ { "Wacom DTK1651", 34616, 19559, 1023, 0,
+ DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4,
+ WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
static const struct wacom_features wacom_features_HID_ANY_ID =
{ "Wacom HID", .type = HID_GENERIC };
@@ -3549,6 +3565,7 @@ const struct hid_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0x33C) },
{ USB_DEVICE_WACOM(0x33D) },
{ USB_DEVICE_WACOM(0x33E) },
+ { USB_DEVICE_WACOM(0x343) },
{ USB_DEVICE_WACOM(0x4001) },
{ USB_DEVICE_WACOM(0x4004) },
{ USB_DEVICE_WACOM(0x5000) },
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index 25baa7f29599..e2084d914c14 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -84,6 +84,12 @@
#define WACOM_DEVICETYPE_WL_MONITOR 0x0008
#define WACOM_VENDORDEFINED_PEN 0xff0d0001
+#define WACOM_G9_PAGE 0xff090000
+#define WACOM_G9_DIGITIZER (WACOM_G9_PAGE | 0x02)
+#define WACOM_G9_TOUCHSCREEN (WACOM_G9_PAGE | 0x11)
+#define WACOM_G11_PAGE 0xff110000
+#define WACOM_G11_DIGITIZER (WACOM_G11_PAGE | 0x02)
+#define WACOM_G11_TOUCHSCREEN (WACOM_G11_PAGE | 0x11)
#define WACOM_PEN_FIELD(f) (((f)->logical == HID_DG_STYLUS) || \
((f)->physical == HID_DG_STYLUS) || \
@@ -238,6 +244,8 @@ struct wacom_wac {
int ps_connected;
u8 bt_features;
u8 bt_high_speed;
+ int mode_report;
+ int mode_value;
struct hid_data hid_data;
};
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 5613e2b5cff7..a40a73a7b71d 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -103,15 +103,29 @@ static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
* there is room for the producer to send the pending packet.
*/
-static bool hv_need_to_signal_on_read(u32 prev_write_sz,
- struct hv_ring_buffer_info *rbi)
+static bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi)
{
u32 cur_write_sz;
u32 r_size;
- u32 write_loc = rbi->ring_buffer->write_index;
+ u32 write_loc;
u32 read_loc = rbi->ring_buffer->read_index;
- u32 pending_sz = rbi->ring_buffer->pending_send_sz;
+ u32 pending_sz;
+ /*
+ * Issue a full memory barrier before making the signaling decision.
+ * Here is the reason for having this barrier:
+ * If the reading of the pend_sz (in this function)
+ * were to be reordered and read before we commit the new read
+ * index (in the calling function) we could
+ * have a problem. If the host were to set the pending_sz after we
+ * have sampled pending_sz and go to sleep before we commit the
+ * read index, we could miss sending the interrupt. Issue a full
+ * memory barrier to address this.
+ */
+ mb();
+
+ pending_sz = rbi->ring_buffer->pending_send_sz;
+ write_loc = rbi->ring_buffer->write_index;
/* If the other end is not blocked on write don't bother. */
if (pending_sz == 0)
return false;
@@ -120,7 +134,7 @@ static bool hv_need_to_signal_on_read(u32 prev_write_sz,
cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) :
read_loc - write_loc;
- if ((prev_write_sz < pending_sz) && (cur_write_sz >= pending_sz))
+ if (cur_write_sz >= pending_sz)
return true;
return false;
@@ -455,7 +469,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
/* Update the read index */
hv_set_next_read_location(inring_info, next_read_location);
- *signal = hv_need_to_signal_on_read(bytes_avail_towrite, inring_info);
+ *signal = hv_need_to_signal_on_read(inring_info);
return ret;
}
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index faa8e6821fea..0967e1a5b3a2 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -975,10 +975,10 @@ config I2C_XLR
config I2C_XLP9XX
tristate "XLP9XX I2C support"
- depends on CPU_XLP || COMPILE_TEST
+ depends on CPU_XLP || ARCH_VULCAN || COMPILE_TEST
help
This driver enables support for the on-chip I2C interface of
- the Broadcom XLP9xx/XLP5xx MIPS processors.
+ the Broadcom XLP9xx/XLP5xx MIPS and Vulcan ARM64 processors.
This driver can also be built as a module. If so, the module will
be called i2c-xlp9xx.
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index 714bdc837769..b167ab25310a 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -116,8 +116,8 @@ struct cpm_i2c {
cbd_t __iomem *rbase;
u_char *txbuf[CPM_MAXBD];
u_char *rxbuf[CPM_MAXBD];
- u32 txdma[CPM_MAXBD];
- u32 rxdma[CPM_MAXBD];
+ dma_addr_t txdma[CPM_MAXBD];
+ dma_addr_t rxdma[CPM_MAXBD];
};
static irqreturn_t cpm_i2c_interrupt(int irq, void *dev_id)
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
index b29c7500461a..f54ece8fce78 100644
--- a/drivers/i2c/busses/i2c-exynos5.c
+++ b/drivers/i2c/busses/i2c-exynos5.c
@@ -671,7 +671,9 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap,
return -EIO;
}
- clk_prepare_enable(i2c->clk);
+ ret = clk_enable(i2c->clk);
+ if (ret)
+ return ret;
for (i = 0; i < num; i++, msgs++) {
stop = (i == num - 1);
@@ -695,7 +697,7 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap,
}
out:
- clk_disable_unprepare(i2c->clk);
+ clk_disable(i2c->clk);
return ret;
}
@@ -747,7 +749,9 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
return -ENOENT;
}
- clk_prepare_enable(i2c->clk);
+ ret = clk_prepare_enable(i2c->clk);
+ if (ret)
+ return ret;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
i2c->regs = devm_ioremap_resource(&pdev->dev, mem);
@@ -799,6 +803,10 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, i2c);
+ clk_disable(i2c->clk);
+
+ return 0;
+
err_clk:
clk_disable_unprepare(i2c->clk);
return ret;
@@ -810,6 +818,8 @@ static int exynos5_i2c_remove(struct platform_device *pdev)
i2c_del_adapter(&i2c->adap);
+ clk_unprepare(i2c->clk);
+
return 0;
}
@@ -821,6 +831,8 @@ static int exynos5_i2c_suspend_noirq(struct device *dev)
i2c->suspended = 1;
+ clk_unprepare(i2c->clk);
+
return 0;
}
@@ -830,7 +842,9 @@ static int exynos5_i2c_resume_noirq(struct device *dev)
struct exynos5_i2c *i2c = platform_get_drvdata(pdev);
int ret = 0;
- clk_prepare_enable(i2c->clk);
+ ret = clk_prepare_enable(i2c->clk);
+ if (ret)
+ return ret;
ret = exynos5_hsi2c_clock_setup(i2c);
if (ret) {
@@ -839,7 +853,7 @@ static int exynos5_i2c_resume_noirq(struct device *dev)
}
exynos5_i2c_init(i2c);
- clk_disable_unprepare(i2c->clk);
+ clk_disable(i2c->clk);
i2c->suspended = 0;
return 0;
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index 7ba795b24e75..1c8707710098 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -75,6 +75,7 @@
/* PCI DIDs for the Intel SMBus Message Transport (SMT) Devices */
#define PCI_DEVICE_ID_INTEL_S1200_SMT0 0x0c59
#define PCI_DEVICE_ID_INTEL_S1200_SMT1 0x0c5a
+#define PCI_DEVICE_ID_INTEL_DNV_SMT 0x19ac
#define PCI_DEVICE_ID_INTEL_AVOTON_SMT 0x1f15
#define ISMT_DESC_ENTRIES 2 /* number of descriptor entries */
@@ -180,6 +181,7 @@ struct ismt_priv {
static const struct pci_device_id ismt_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT0) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT1) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMT) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AVOTON_SMT) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index 9096d17beb5b..3dcc5f3f26cb 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -855,6 +855,7 @@ static struct rk3x_i2c_soc_data soc_data[3] = {
static const struct of_device_id rk3x_i2c_match[] = {
{ .compatible = "rockchip,rk3066-i2c", .data = (void *)&soc_data[0] },
{ .compatible = "rockchip,rk3188-i2c", .data = (void *)&soc_data[1] },
+ { .compatible = "rockchip,rk3228-i2c", .data = (void *)&soc_data[2] },
{ .compatible = "rockchip,rk3288-i2c", .data = (void *)&soc_data[2] },
{},
};
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
index dbee13ad33a3..2e154cb51685 100644
--- a/drivers/iio/adc/at91-sama5d2_adc.c
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -451,6 +451,8 @@ static int at91_adc_probe(struct platform_device *pdev)
if (ret)
goto vref_disable;
+ platform_set_drvdata(pdev, indio_dev);
+
ret = iio_device_register(indio_dev);
if (ret < 0)
goto per_clk_disable_unprepare;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
index f581256d9d4c..5ee4e0dc093e 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
@@ -104,6 +104,19 @@ static int inv_mpu6050_deselect_bypass(struct i2c_adapter *adap,
return 0;
}
+static const char *inv_mpu_match_acpi_device(struct device *dev, int *chip_id)
+{
+ const struct acpi_device_id *id;
+
+ id = acpi_match_device(dev->driver->acpi_match_table, dev);
+ if (!id)
+ return NULL;
+
+ *chip_id = (int)id->driver_data;
+
+ return dev_name(dev);
+}
+
/**
* inv_mpu_probe() - probe function.
* @client: i2c client.
@@ -115,14 +128,25 @@ static int inv_mpu_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct inv_mpu6050_state *st;
- int result;
- const char *name = id ? id->name : NULL;
+ int result, chip_type;
struct regmap *regmap;
+ const char *name;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_I2C_BLOCK))
return -EOPNOTSUPP;
+ if (id) {
+ chip_type = (int)id->driver_data;
+ name = id->name;
+ } else if (ACPI_HANDLE(&client->dev)) {
+ name = inv_mpu_match_acpi_device(&client->dev, &chip_type);
+ if (!name)
+ return -ENODEV;
+ } else {
+ return -ENOSYS;
+ }
+
regmap = devm_regmap_init_i2c(client, &inv_mpu_regmap_config);
if (IS_ERR(regmap)) {
dev_err(&client->dev, "Failed to register i2c regmap %d\n",
@@ -131,7 +155,7 @@ static int inv_mpu_probe(struct i2c_client *client,
}
result = inv_mpu_core_probe(regmap, client->irq, name,
- NULL, id->driver_data);
+ NULL, chip_type);
if (result < 0)
return result;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
index dea6c4361de0..7bcb8d839f05 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
@@ -46,6 +46,7 @@ static int inv_mpu_probe(struct spi_device *spi)
struct regmap *regmap;
const struct spi_device_id *id = spi_get_device_id(spi);
const char *name = id ? id->name : NULL;
+ const int chip_type = id ? id->driver_data : 0;
regmap = devm_regmap_init_spi(spi, &inv_mpu_regmap_config);
if (IS_ERR(regmap)) {
@@ -55,7 +56,7 @@ static int inv_mpu_probe(struct spi_device *spi)
}
return inv_mpu_core_probe(regmap, spi->irq, name,
- inv_mpu_i2c_disable, id->driver_data);
+ inv_mpu_i2c_disable, chip_type);
}
static int inv_mpu_remove(struct spi_device *spi)
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index 9c5c9ef3f1da..0e931a9a1669 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -462,6 +462,8 @@ static int ak8975_setup_irq(struct ak8975_data *data)
int rc;
int irq;
+ init_waitqueue_head(&data->data_ready_queue);
+ clear_bit(0, &data->flags);
if (client->irq)
irq = client->irq;
else
@@ -477,8 +479,6 @@ static int ak8975_setup_irq(struct ak8975_data *data)
return rc;
}
- init_waitqueue_head(&data->data_ready_queue);
- clear_bit(0, &data->flags);
data->eoc_irq = irq;
return rc;
@@ -732,7 +732,7 @@ static int ak8975_probe(struct i2c_client *client,
int eoc_gpio;
int err;
const char *name = NULL;
- enum asahi_compass_chipset chipset;
+ enum asahi_compass_chipset chipset = AK_MAX_TYPE;
/* Grab and set up the supplied GPIO. */
if (client->dev.platform_data)
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index cb00d59da456..c2e257d97eff 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -691,7 +691,8 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
NULL);
/* Coudn't find default GID location */
- WARN_ON(ix < 0);
+ if (WARN_ON(ix < 0))
+ goto release;
zattr_type.gid_type = gid_type;
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 4a9aa0433b07..7713ef089c3c 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -48,6 +48,7 @@
#include <asm/uaccess.h>
+#include <rdma/ib.h>
#include <rdma/ib_cm.h>
#include <rdma/ib_user_cm.h>
#include <rdma/ib_marshall.h>
@@ -1103,6 +1104,9 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
struct ib_ucm_cmd_hdr hdr;
ssize_t result;
+ if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
+ return -EACCES;
+
if (len < sizeof(hdr))
return -EINVAL;
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index dd3bcceadfde..c0f3826abb30 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -1574,6 +1574,9 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf,
struct rdma_ucm_cmd_hdr hdr;
ssize_t ret;
+ if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
+ return -EACCES;
+
if (len < sizeof(hdr))
return -EINVAL;
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 28ba2cc81535..31f422a70623 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -48,6 +48,8 @@
#include <asm/uaccess.h>
+#include <rdma/ib.h>
+
#include "uverbs.h"
MODULE_AUTHOR("Roland Dreier");
@@ -709,6 +711,9 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
int srcu_key;
ssize_t ret;
+ if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
+ return -EACCES;
+
if (count < sizeof hdr)
return -EINVAL;
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 15b8adbf39c0..b65b3541e732 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1860,6 +1860,7 @@ EXPORT_SYMBOL(ib_drain_rq);
void ib_drain_qp(struct ib_qp *qp)
{
ib_drain_sq(qp);
- ib_drain_rq(qp);
+ if (!qp->srq)
+ ib_drain_rq(qp);
}
EXPORT_SYMBOL(ib_drain_qp);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 42a7b8952d13..3234a8be16f6 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -1390,6 +1390,8 @@ int iwch_register_device(struct iwch_dev *dev)
dev->ibdev.iwcm->add_ref = iwch_qp_add_ref;
dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref;
dev->ibdev.iwcm->get_qp = iwch_get_qp;
+ memcpy(dev->ibdev.iwcm->ifname, dev->rdev.t3cdev_p->lldev->name,
+ sizeof(dev->ibdev.iwcm->ifname));
ret = ib_register_device(&dev->ibdev, NULL);
if (ret)
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index b4eeb783573c..b0b955724458 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -162,7 +162,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, T4_BAR2_QTYPE_INGRESS,
&cq->bar2_qid,
user ? &cq->bar2_pa : NULL);
- if (user && !cq->bar2_va) {
+ if (user && !cq->bar2_pa) {
pr_warn(MOD "%s: cqid %u not in BAR2 range.\n",
pci_name(rdev->lldi.pdev), cq->cqid);
ret = -EINVAL;
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 124682dc5709..7574f394fdac 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -580,6 +580,8 @@ int c4iw_register_device(struct c4iw_dev *dev)
dev->ibdev.iwcm->add_ref = c4iw_qp_add_ref;
dev->ibdev.iwcm->rem_ref = c4iw_qp_rem_ref;
dev->ibdev.iwcm->get_qp = c4iw_get_qp;
+ memcpy(dev->ibdev.iwcm->ifname, dev->rdev.lldi.ports[0]->name,
+ sizeof(dev->ibdev.iwcm->ifname));
ret = ib_register_device(&dev->ibdev, NULL);
if (ret)
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index e17fb5d5e033..e8993e49b8b3 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -185,6 +185,10 @@ void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid,
if (pbar2_pa)
*pbar2_pa = (rdev->bar2_pa + bar2_qoffset) & PAGE_MASK;
+
+ if (is_t4(rdev->lldi.adapter_type))
+ return NULL;
+
return rdev->bar2_kva + bar2_qoffset;
}
@@ -270,7 +274,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
/*
* User mode must have bar2 access.
*/
- if (user && (!wq->sq.bar2_va || !wq->rq.bar2_va)) {
+ if (user && (!wq->sq.bar2_pa || !wq->rq.bar2_pa)) {
pr_warn(MOD "%s: sqid %u or rqid %u not in BAR2 range.\n",
pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid);
goto free_dma;
@@ -1895,13 +1899,27 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
void c4iw_drain_sq(struct ib_qp *ibqp)
{
struct c4iw_qp *qp = to_c4iw_qp(ibqp);
+ unsigned long flag;
+ bool need_to_wait;
- wait_for_completion(&qp->sq_drained);
+ spin_lock_irqsave(&qp->lock, flag);
+ need_to_wait = !t4_sq_empty(&qp->wq);
+ spin_unlock_irqrestore(&qp->lock, flag);
+
+ if (need_to_wait)
+ wait_for_completion(&qp->sq_drained);
}
void c4iw_drain_rq(struct ib_qp *ibqp)
{
struct c4iw_qp *qp = to_c4iw_qp(ibqp);
+ unsigned long flag;
+ bool need_to_wait;
+
+ spin_lock_irqsave(&qp->lock, flag);
+ need_to_wait = !t4_rq_empty(&qp->wq);
+ spin_unlock_irqrestore(&qp->lock, flag);
- wait_for_completion(&qp->rq_drained);
+ if (need_to_wait)
+ wait_for_completion(&qp->rq_drained);
}
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 5acf346e048e..6ad0489cb3c5 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -530,7 +530,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
sizeof(struct mlx5_wqe_ctrl_seg)) /
sizeof(struct mlx5_wqe_data_seg);
props->max_sge = min(max_rq_sg, max_sq_sg);
- props->max_sge_rd = props->max_sge;
+ props->max_sge_rd = MLX5_MAX_SGE_RD;
props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
@@ -671,8 +671,8 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_core_dev *mdev = dev->mdev;
struct mlx5_hca_vport_context *rep;
- int max_mtu;
- int oper_mtu;
+ u16 max_mtu;
+ u16 oper_mtu;
int err;
u8 ib_link_width_oper;
u8 vl_hw_cap;
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 3ea9e055fdd3..92914539edc7 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -500,9 +500,6 @@ static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev)
* skb_shinfo(skb)->nr_frags, skb_is_gso(skb));
*/
- if (!netif_carrier_ok(netdev))
- return NETDEV_TX_OK;
-
if (netif_queue_stopped(netdev))
return NETDEV_TX_BUSY;
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index e449e394963f..24f4a782e0f4 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -45,6 +45,8 @@
#include <linux/export.h>
#include <linux/uio.h>
+#include <rdma/ib.h>
+
#include "qib.h"
#include "qib_common.h"
#include "qib_user_sdma.h"
@@ -2067,6 +2069,9 @@ static ssize_t qib_write(struct file *fp, const char __user *data,
ssize_t ret = 0;
void *dest;
+ if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
+ return -EACCES;
+
if (count < sizeof(cmd.type)) {
ret = -EINVAL;
goto bail;
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index bd82a6948dc8..a9e3bcc522c4 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -1637,9 +1637,9 @@ bail:
spin_unlock_irqrestore(&qp->s_hlock, flags);
if (nreq) {
if (call_send)
- rdi->driver_f.schedule_send_no_lock(qp);
- else
rdi->driver_f.do_send(qp);
+ else
+ rdi->driver_f.schedule_send_no_lock(qp);
}
return err;
}
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 80b6bedc172f..64b3d11dcf1e 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -612,6 +612,7 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
struct Scsi_Host *shost;
struct iser_conn *iser_conn = NULL;
struct ib_conn *ib_conn;
+ u32 max_fr_sectors;
u16 max_cmds;
shost = iscsi_host_alloc(&iscsi_iser_sht, 0, 0);
@@ -632,7 +633,6 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
iser_conn = ep->dd_data;
max_cmds = iser_conn->max_cmds;
shost->sg_tablesize = iser_conn->scsi_sg_tablesize;
- shost->max_sectors = iser_conn->scsi_max_sectors;
mutex_lock(&iser_conn->state_mutex);
if (iser_conn->state != ISER_CONN_UP) {
@@ -657,8 +657,6 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
*/
shost->sg_tablesize = min_t(unsigned short, shost->sg_tablesize,
ib_conn->device->ib_device->attrs.max_fast_reg_page_list_len);
- shost->max_sectors = min_t(unsigned int,
- 1024, (shost->sg_tablesize * PAGE_SIZE) >> 9);
if (iscsi_host_add(shost,
ib_conn->device->ib_device->dma_device)) {
@@ -672,6 +670,15 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
goto free_host;
}
+ /*
+ * FRs or FMRs can only map up to a (device) page per entry, but if the
+ * first entry is misaligned we'll end up using using two entries
+ * (head and tail) for a single page worth data, so we have to drop
+ * one segment from the calculation.
+ */
+ max_fr_sectors = ((shost->sg_tablesize - 1) * PAGE_SIZE) >> 9;
+ shost->max_sectors = min(iser_max_sectors, max_fr_sectors);
+
if (cmds_max > max_cmds) {
iser_info("cmds_max changed from %u to %u\n",
cmds_max, max_cmds);
@@ -989,7 +996,6 @@ static struct scsi_host_template iscsi_iser_sht = {
.queuecommand = iscsi_queuecommand,
.change_queue_depth = scsi_change_queue_depth,
.sg_tablesize = ISCSI_ISER_DEF_SG_TABLESIZE,
- .max_sectors = ISER_DEF_MAX_SECTORS,
.cmd_per_lun = ISER_DEF_CMD_PER_LUN,
.eh_abort_handler = iscsi_eh_abort,
.eh_device_reset_handler= iscsi_eh_device_reset,
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index e8a84d12b7ff..1142a93dd90b 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -153,6 +153,7 @@ static const struct xpad_device {
{ 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x0738, 0x4738, "Mad Catz Wired Xbox 360 Controller (SFIV)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x0738, 0x4740, "Mad Catz Beat Pad", 0, XTYPE_XBOX360 },
+ { 0x0738, 0x4a01, "Mad Catz FightStick TE 2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
{ 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x0738, 0xb726, "Mad Catz Xbox controller - MW2", 0, XTYPE_XBOX360 },
{ 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 },
@@ -304,6 +305,7 @@ static struct usb_device_id xpad_table[] = {
XPAD_XBOX360_VENDOR(0x046d), /* Logitech X-Box 360 style controllers */
XPAD_XBOX360_VENDOR(0x0738), /* Mad Catz X-Box 360 controllers */
{ USB_DEVICE(0x0738, 0x4540) }, /* Mad Catz Beat Pad */
+ XPAD_XBOXONE_VENDOR(0x0738), /* Mad Catz FightStick TE 2 */
XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */
XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
diff --git a/drivers/input/misc/arizona-haptics.c b/drivers/input/misc/arizona-haptics.c
index d5994a745ffa..982936334537 100644
--- a/drivers/input/misc/arizona-haptics.c
+++ b/drivers/input/misc/arizona-haptics.c
@@ -178,7 +178,6 @@ static int arizona_haptics_probe(struct platform_device *pdev)
input_set_drvdata(haptics->input_dev, haptics);
haptics->input_dev->name = "arizona:haptics";
- haptics->input_dev->dev.parent = pdev->dev.parent;
haptics->input_dev->close = arizona_haptics_close;
__set_bit(FF_RUMBLE, haptics->input_dev->ffbit);
diff --git a/drivers/input/misc/pmic8xxx-pwrkey.c b/drivers/input/misc/pmic8xxx-pwrkey.c
index 3f02e0e03d12..67aab86048ad 100644
--- a/drivers/input/misc/pmic8xxx-pwrkey.c
+++ b/drivers/input/misc/pmic8xxx-pwrkey.c
@@ -353,7 +353,8 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
if (of_property_read_u32(pdev->dev.of_node, "debounce", &kpd_delay))
kpd_delay = 15625;
- if (kpd_delay > 62500 || kpd_delay == 0) {
+ /* Valid range of pwr key trigger delay is 1/64 sec to 2 seconds. */
+ if (kpd_delay > USEC_PER_SEC * 2 || kpd_delay < USEC_PER_SEC / 64) {
dev_err(&pdev->dev, "invalid power key trigger delay\n");
return -EINVAL;
}
@@ -385,8 +386,8 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
pwr->name = "pmic8xxx_pwrkey";
pwr->phys = "pmic8xxx_pwrkey/input0";
- delay = (kpd_delay << 10) / USEC_PER_SEC;
- delay = 1 + ilog2(delay);
+ delay = (kpd_delay << 6) / USEC_PER_SEC;
+ delay = ilog2(delay);
err = regmap_read(regmap, PON_CNTL_1, &pon_cntl);
if (err < 0) {
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c
index 10c4e3d462f1..caa5a62c42fb 100644
--- a/drivers/input/misc/twl4030-vibra.c
+++ b/drivers/input/misc/twl4030-vibra.c
@@ -222,7 +222,6 @@ static int twl4030_vibra_probe(struct platform_device *pdev)
info->input_dev->name = "twl4030:vibrator";
info->input_dev->id.version = 1;
- info->input_dev->dev.parent = pdev->dev.parent;
info->input_dev->close = twl4030_vibra_close;
__set_bit(FF_RUMBLE, info->input_dev->ffbit);
diff --git a/drivers/input/misc/twl6040-vibra.c b/drivers/input/misc/twl6040-vibra.c
index ea63fad48de6..df3581f60628 100644
--- a/drivers/input/misc/twl6040-vibra.c
+++ b/drivers/input/misc/twl6040-vibra.c
@@ -45,7 +45,6 @@
struct vibra_info {
struct device *dev;
struct input_dev *input_dev;
- struct workqueue_struct *workqueue;
struct work_struct play_work;
struct mutex mutex;
int irq;
@@ -182,6 +181,14 @@ static void vibra_play_work(struct work_struct *work)
{
struct vibra_info *info = container_of(work,
struct vibra_info, play_work);
+ int ret;
+
+ /* Do not allow effect, while the routing is set to use audio */
+ ret = twl6040_get_vibralr_status(info->twl6040);
+ if (ret & TWL6040_VIBSEL) {
+ dev_info(info->dev, "Vibra is configured for audio\n");
+ return;
+ }
mutex_lock(&info->mutex);
@@ -200,24 +207,12 @@ static int vibra_play(struct input_dev *input, void *data,
struct ff_effect *effect)
{
struct vibra_info *info = input_get_drvdata(input);
- int ret;
-
- /* Do not allow effect, while the routing is set to use audio */
- ret = twl6040_get_vibralr_status(info->twl6040);
- if (ret & TWL6040_VIBSEL) {
- dev_info(&input->dev, "Vibra is configured for audio\n");
- return -EBUSY;
- }
info->weak_speed = effect->u.rumble.weak_magnitude;
info->strong_speed = effect->u.rumble.strong_magnitude;
info->direction = effect->direction < EFFECT_DIR_180_DEG ? 1 : -1;
- ret = queue_work(info->workqueue, &info->play_work);
- if (!ret) {
- dev_info(&input->dev, "work is already on queue\n");
- return ret;
- }
+ schedule_work(&info->play_work);
return 0;
}
@@ -362,7 +357,6 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
info->input_dev->name = "twl6040:vibrator";
info->input_dev->id.version = 1;
- info->input_dev->dev.parent = pdev->dev.parent;
info->input_dev->close = twl6040_vibra_close;
__set_bit(FF_RUMBLE, info->input_dev->ffbit);
diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
index 3a7f3a4a4396..7c18249d6c8e 100644
--- a/drivers/input/tablet/gtco.c
+++ b/drivers/input/tablet/gtco.c
@@ -858,6 +858,14 @@ static int gtco_probe(struct usb_interface *usbinterface,
goto err_free_buf;
}
+ /* Sanity check that a device has an endpoint */
+ if (usbinterface->altsetting[0].desc.bNumEndpoints < 1) {
+ dev_err(&usbinterface->dev,
+ "Invalid number of endpoints\n");
+ error = -EINVAL;
+ goto err_free_urb;
+ }
+
/*
* The endpoint is always altsetting 0, we know this since we know
* this device only has one interrupt endpoint
@@ -879,7 +887,7 @@ static int gtco_probe(struct usb_interface *usbinterface,
* HID report descriptor
*/
if (usb_get_extra_descriptor(usbinterface->cur_altsetting,
- HID_DEVICE_TYPE, &hid_desc) != 0){
+ HID_DEVICE_TYPE, &hid_desc) != 0) {
dev_err(&usbinterface->dev,
"Can't retrieve exta USB descriptor to get hid report descriptor length\n");
error = -EIO;
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 2160512e861a..5af7907d0af4 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -1093,6 +1093,19 @@ static int mxt_t6_command(struct mxt_data *data, u16 cmd_offset,
return 0;
}
+static int mxt_acquire_irq(struct mxt_data *data)
+{
+ int error;
+
+ enable_irq(data->irq);
+
+ error = mxt_process_messages_until_invalid(data);
+ if (error)
+ return error;
+
+ return 0;
+}
+
static int mxt_soft_reset(struct mxt_data *data)
{
struct device *dev = &data->client->dev;
@@ -1111,7 +1124,7 @@ static int mxt_soft_reset(struct mxt_data *data)
/* Ignore CHG line for 100ms after reset */
msleep(100);
- enable_irq(data->irq);
+ mxt_acquire_irq(data);
ret = mxt_wait_for_completion(data, &data->reset_completion,
MXT_RESET_TIMEOUT);
@@ -1466,19 +1479,6 @@ release_mem:
return ret;
}
-static int mxt_acquire_irq(struct mxt_data *data)
-{
- int error;
-
- enable_irq(data->irq);
-
- error = mxt_process_messages_until_invalid(data);
- if (error)
- return error;
-
- return 0;
-}
-
static int mxt_get_info(struct mxt_data *data)
{
struct i2c_client *client = data->client;
diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c
index 9bbadaaf6bc3..7b3845aa5983 100644
--- a/drivers/input/touchscreen/zforce_ts.c
+++ b/drivers/input/touchscreen/zforce_ts.c
@@ -370,8 +370,8 @@ static int zforce_touch_event(struct zforce_ts *ts, u8 *payload)
point.coord_x = point.coord_y = 0;
}
- point.state = payload[9 * i + 5] & 0x03;
- point.id = (payload[9 * i + 5] & 0xfc) >> 2;
+ point.state = payload[9 * i + 5] & 0x0f;
+ point.id = (payload[9 * i + 5] & 0xf0) >> 4;
/* determine touch major, minor and orientation */
point.area_major = max(payload[9 * i + 6],
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 374c129219ef..5efadad4615b 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -92,6 +92,7 @@ struct iommu_dev_data {
struct list_head dev_data_list; /* For global dev_data_list */
struct protection_domain *domain; /* Domain the device is bound to */
u16 devid; /* PCI Device ID */
+ u16 alias; /* Alias Device ID */
bool iommu_v2; /* Device can make use of IOMMUv2 */
bool passthrough; /* Device is identity mapped */
struct {
@@ -166,6 +167,13 @@ static struct protection_domain *to_pdomain(struct iommu_domain *dom)
return container_of(dom, struct protection_domain, domain);
}
+static inline u16 get_device_id(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ return PCI_DEVID(pdev->bus->number, pdev->devfn);
+}
+
static struct iommu_dev_data *alloc_dev_data(u16 devid)
{
struct iommu_dev_data *dev_data;
@@ -203,6 +211,68 @@ out_unlock:
return dev_data;
}
+static int __last_alias(struct pci_dev *pdev, u16 alias, void *data)
+{
+ *(u16 *)data = alias;
+ return 0;
+}
+
+static u16 get_alias(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ u16 devid, ivrs_alias, pci_alias;
+
+ devid = get_device_id(dev);
+ ivrs_alias = amd_iommu_alias_table[devid];
+ pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
+
+ if (ivrs_alias == pci_alias)
+ return ivrs_alias;
+
+ /*
+ * DMA alias showdown
+ *
+ * The IVRS is fairly reliable in telling us about aliases, but it
+ * can't know about every screwy device. If we don't have an IVRS
+ * reported alias, use the PCI reported alias. In that case we may
+ * still need to initialize the rlookup and dev_table entries if the
+ * alias is to a non-existent device.
+ */
+ if (ivrs_alias == devid) {
+ if (!amd_iommu_rlookup_table[pci_alias]) {
+ amd_iommu_rlookup_table[pci_alias] =
+ amd_iommu_rlookup_table[devid];
+ memcpy(amd_iommu_dev_table[pci_alias].data,
+ amd_iommu_dev_table[devid].data,
+ sizeof(amd_iommu_dev_table[pci_alias].data));
+ }
+
+ return pci_alias;
+ }
+
+ pr_info("AMD-Vi: Using IVRS reported alias %02x:%02x.%d "
+ "for device %s[%04x:%04x], kernel reported alias "
+ "%02x:%02x.%d\n", PCI_BUS_NUM(ivrs_alias), PCI_SLOT(ivrs_alias),
+ PCI_FUNC(ivrs_alias), dev_name(dev), pdev->vendor, pdev->device,
+ PCI_BUS_NUM(pci_alias), PCI_SLOT(pci_alias),
+ PCI_FUNC(pci_alias));
+
+ /*
+ * If we don't have a PCI DMA alias and the IVRS alias is on the same
+ * bus, then the IVRS table may know about a quirk that we don't.
+ */
+ if (pci_alias == devid &&
+ PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) {
+ pdev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
+ pdev->dma_alias_devfn = ivrs_alias & 0xff;
+ pr_info("AMD-Vi: Added PCI DMA alias %02x.%d for %s\n",
+ PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias),
+ dev_name(dev));
+ }
+
+ return ivrs_alias;
+}
+
static struct iommu_dev_data *find_dev_data(u16 devid)
{
struct iommu_dev_data *dev_data;
@@ -215,13 +285,6 @@ static struct iommu_dev_data *find_dev_data(u16 devid)
return dev_data;
}
-static inline u16 get_device_id(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
-
- return PCI_DEVID(pdev->bus->number, pdev->devfn);
-}
-
static struct iommu_dev_data *get_dev_data(struct device *dev)
{
return dev->archdata.iommu;
@@ -349,6 +412,8 @@ static int iommu_init_device(struct device *dev)
if (!dev_data)
return -ENOMEM;
+ dev_data->alias = get_alias(dev);
+
if (pci_iommuv2_capable(pdev)) {
struct amd_iommu *iommu;
@@ -369,7 +434,7 @@ static void iommu_ignore_device(struct device *dev)
u16 devid, alias;
devid = get_device_id(dev);
- alias = amd_iommu_alias_table[devid];
+ alias = get_alias(dev);
memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
@@ -1061,7 +1126,7 @@ static int device_flush_dte(struct iommu_dev_data *dev_data)
int ret;
iommu = amd_iommu_rlookup_table[dev_data->devid];
- alias = amd_iommu_alias_table[dev_data->devid];
+ alias = dev_data->alias;
ret = iommu_flush_dte(iommu, dev_data->devid);
if (!ret && alias != dev_data->devid)
@@ -2039,7 +2104,7 @@ static void do_attach(struct iommu_dev_data *dev_data,
bool ats;
iommu = amd_iommu_rlookup_table[dev_data->devid];
- alias = amd_iommu_alias_table[dev_data->devid];
+ alias = dev_data->alias;
ats = dev_data->ats.enabled;
/* Update data structures */
@@ -2073,7 +2138,7 @@ static void do_detach(struct iommu_dev_data *dev_data)
return;
iommu = amd_iommu_rlookup_table[dev_data->devid];
- alias = amd_iommu_alias_table[dev_data->devid];
+ alias = dev_data->alias;
/* decrease reference counters */
dev_data->domain->dev_iommu[iommu->index] -= 1;
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 2409e3bd3df2..7c39ac4b9c53 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -826,6 +826,12 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
if (smmu_domain->smmu)
goto out_unlock;
+ /* We're bypassing these SIDs, so don't allocate an actual context */
+ if (domain->type == IOMMU_DOMAIN_DMA) {
+ smmu_domain->smmu = smmu;
+ goto out_unlock;
+ }
+
/*
* Mapping the requested stage onto what we support is surprisingly
* complicated, mainly because the spec allows S1+S2 SMMUs without
@@ -948,7 +954,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
void __iomem *cb_base;
int irq;
- if (!smmu)
+ if (!smmu || domain->type == IOMMU_DOMAIN_DMA)
return;
/*
@@ -1089,18 +1095,20 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
struct arm_smmu_device *smmu = smmu_domain->smmu;
void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
- /* Devices in an IOMMU group may already be configured */
- ret = arm_smmu_master_configure_smrs(smmu, cfg);
- if (ret)
- return ret == -EEXIST ? 0 : ret;
-
/*
* FIXME: This won't be needed once we have IOMMU-backed DMA ops
- * for all devices behind the SMMU.
+ * for all devices behind the SMMU. Note that we need to take
+ * care configuring SMRs for devices both a platform_device and
+ * and a PCI device (i.e. a PCI host controller)
*/
if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
return 0;
+ /* Devices in an IOMMU group may already be configured */
+ ret = arm_smmu_master_configure_smrs(smmu, cfg);
+ if (ret)
+ return ret == -EEXIST ? 0 : ret;
+
for (i = 0; i < cfg->num_streamids; ++i) {
u32 idx, s2cr;
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 94a30da0cfac..4dffccf532a2 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -467,7 +467,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
gic_map_to_vpe(irq, mips_cm_vp_id(cpumask_first(&tmp)));
/* Update the pcpu_masks */
- for (i = 0; i < gic_vpes; i++)
+ for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
clear_bit(irq, pcpu_masks[i].pcpu_mask);
set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask);
@@ -707,7 +707,7 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
spin_lock_irqsave(&gic_lock, flags);
gic_map_to_pin(intr, gic_cpu_pin);
gic_map_to_vpe(intr, vpe);
- for (i = 0; i < gic_vpes; i++)
+ for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
clear_bit(intr, pcpu_masks[i].pcpu_mask);
set_bit(intr, pcpu_masks[vpe].pcpu_mask);
spin_unlock_irqrestore(&gic_lock, flags);
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index 0d29b5a6356d..99e5f9751e8b 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -715,6 +715,9 @@ base_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
if (!maddr || maddr->family != AF_ISDN)
return -EINVAL;
+ if (addr_len < sizeof(struct sockaddr_mISDN))
+ return -EINVAL;
+
lock_sock(sk);
if (_pms(sk)->dev) {
diff --git a/drivers/lguest/interrupts_and_traps.c b/drivers/lguest/interrupts_and_traps.c
index eb934b0242e0..67392b6ab845 100644
--- a/drivers/lguest/interrupts_and_traps.c
+++ b/drivers/lguest/interrupts_and_traps.c
@@ -331,7 +331,7 @@ void set_interrupt(struct lg_cpu *cpu, unsigned int irq)
* Actually now I think of it, it's possible that Ron *is* half the Plan 9
* userbase. Oh well.
*/
-static bool could_be_syscall(unsigned int num)
+bool could_be_syscall(unsigned int num)
{
/* Normal Linux IA32_SYSCALL_VECTOR or reserved vector? */
return num == IA32_SYSCALL_VECTOR || num == syscall_vector;
@@ -416,6 +416,10 @@ bool deliver_trap(struct lg_cpu *cpu, unsigned int num)
*
* This routine indicates if a particular trap number could be delivered
* directly.
+ *
+ * Unfortunately, Linux 4.6 started using an interrupt gate instead of a
+ * trap gate for syscalls, so this trick is ineffective. See Mastery for
+ * how we could do this anyway...
*/
static bool direct_trap(unsigned int num)
{
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h
index ac8ad0461e80..69b3814afd2f 100644
--- a/drivers/lguest/lg.h
+++ b/drivers/lguest/lg.h
@@ -167,6 +167,7 @@ void guest_set_clockevent(struct lg_cpu *cpu, unsigned long delta);
bool send_notify_to_eventfd(struct lg_cpu *cpu);
void init_clockdev(struct lg_cpu *cpu);
bool check_syscall_vector(struct lguest *lg);
+bool could_be_syscall(unsigned int num);
int init_interrupts(void);
void free_interrupts(void);
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index 6a4cd771a2be..adc162c7040d 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -429,8 +429,12 @@ void lguest_arch_handle_trap(struct lg_cpu *cpu)
return;
break;
case 32 ... 255:
+ /* This might be a syscall. */
+ if (could_be_syscall(cpu->regs->trapnum))
+ break;
+
/*
- * These values mean a real interrupt occurred, in which case
+ * Other values mean a real interrupt occurred, in which case
* the Host handler has already been run. We just do a
* friendly check if another process should now be run, then
* return to run the Guest again.
diff --git a/drivers/mailbox/mailbox-test.c b/drivers/mailbox/mailbox-test.c
index dc11bbf27274..58d04726cdd7 100644
--- a/drivers/mailbox/mailbox-test.c
+++ b/drivers/mailbox/mailbox-test.c
@@ -46,7 +46,6 @@ static ssize_t mbox_test_signal_write(struct file *filp,
size_t count, loff_t *ppos)
{
struct mbox_test_device *tdev = filp->private_data;
- int ret;
if (!tdev->tx_channel) {
dev_err(tdev->dev, "Channel cannot do Tx\n");
@@ -60,17 +59,20 @@ static ssize_t mbox_test_signal_write(struct file *filp,
return -EINVAL;
}
- tdev->signal = kzalloc(MBOX_MAX_SIG_LEN, GFP_KERNEL);
- if (!tdev->signal)
- return -ENOMEM;
+ /* Only allocate memory if we need to */
+ if (!tdev->signal) {
+ tdev->signal = kzalloc(MBOX_MAX_SIG_LEN, GFP_KERNEL);
+ if (!tdev->signal)
+ return -ENOMEM;
+ }
- ret = copy_from_user(tdev->signal, userbuf, count);
- if (ret) {
+ if (copy_from_user(tdev->signal, userbuf, count)) {
kfree(tdev->signal);
+ tdev->signal = NULL;
return -EFAULT;
}
- return ret < 0 ? ret : count;
+ return count;
}
static const struct file_operations mbox_test_signal_ops = {
diff --git a/drivers/mailbox/mailbox-xgene-slimpro.c b/drivers/mailbox/mailbox-xgene-slimpro.c
index bd07f39f0692..dd2afbca51c9 100644
--- a/drivers/mailbox/mailbox-xgene-slimpro.c
+++ b/drivers/mailbox/mailbox-xgene-slimpro.c
@@ -189,8 +189,8 @@ static int slimpro_mbox_probe(struct platform_device *pdev)
int i;
ctx = devm_kzalloc(&pdev->dev, sizeof(struct slimpro_mbox), GFP_KERNEL);
- if (IS_ERR(ctx))
- return PTR_ERR(ctx);
+ if (!ctx)
+ return -ENOMEM;
platform_set_drvdata(pdev, ctx);
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index 6a4811f85705..4a36632c236f 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -375,13 +375,13 @@ struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl,
if (!np) {
dev_err(cl->dev, "%s() currently only supports DT\n", __func__);
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-EINVAL);
}
if (!of_get_property(np, "mbox-names", NULL)) {
dev_err(cl->dev,
"%s() requires an \"mbox-names\" property\n", __func__);
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-EINVAL);
}
of_property_for_each_string(np, "mbox-names", prop, mbox_name) {
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 27f2ef300f8b..3970cda10080 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -867,39 +867,55 @@ static int blocks_are_unmapped_or_clean(struct dm_cache_metadata *cmd,
return 0;
}
-#define WRITE_LOCK(cmd) \
- down_write(&cmd->root_lock); \
- if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
- up_write(&cmd->root_lock); \
- return -EINVAL; \
+static bool cmd_write_lock(struct dm_cache_metadata *cmd)
+{
+ down_write(&cmd->root_lock);
+ if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) {
+ up_write(&cmd->root_lock);
+ return false;
}
+ return true;
+}
-#define WRITE_LOCK_VOID(cmd) \
- down_write(&cmd->root_lock); \
- if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
- up_write(&cmd->root_lock); \
- return; \
- }
+#define WRITE_LOCK(cmd) \
+ do { \
+ if (!cmd_write_lock((cmd))) \
+ return -EINVAL; \
+ } while(0)
+
+#define WRITE_LOCK_VOID(cmd) \
+ do { \
+ if (!cmd_write_lock((cmd))) \
+ return; \
+ } while(0)
#define WRITE_UNLOCK(cmd) \
- up_write(&cmd->root_lock)
+ up_write(&(cmd)->root_lock)
-#define READ_LOCK(cmd) \
- down_read(&cmd->root_lock); \
- if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
- up_read(&cmd->root_lock); \
- return -EINVAL; \
+static bool cmd_read_lock(struct dm_cache_metadata *cmd)
+{
+ down_read(&cmd->root_lock);
+ if (cmd->fail_io) {
+ up_read(&cmd->root_lock);
+ return false;
}
+ return true;
+}
-#define READ_LOCK_VOID(cmd) \
- down_read(&cmd->root_lock); \
- if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
- up_read(&cmd->root_lock); \
- return; \
- }
+#define READ_LOCK(cmd) \
+ do { \
+ if (!cmd_read_lock((cmd))) \
+ return -EINVAL; \
+ } while(0)
+
+#define READ_LOCK_VOID(cmd) \
+ do { \
+ if (!cmd_read_lock((cmd))) \
+ return; \
+ } while(0)
#define READ_UNLOCK(cmd) \
- up_read(&cmd->root_lock)
+ up_read(&(cmd)->root_lock)
int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)
{
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index be4905769a45..3d3ac13287a4 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1662,8 +1662,10 @@ static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
tio = alloc_tio(ci, ti, target_bio_nr);
tio->len_ptr = len;
r = clone_bio(tio, bio, sector, *len);
- if (r < 0)
+ if (r < 0) {
+ free_tio(ci->md, tio);
break;
+ }
__map_bio(tio);
}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 194580fba7fd..14d3b37944df 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -284,6 +284,8 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
* go away inside make_request
*/
sectors = bio_sectors(bio);
+ /* bio could be mergeable after passing to underlayer */
+ bio->bi_rw &= ~REQ_NOMERGE;
mddev->pers->make_request(mddev, bio);
cpu = part_stat_lock();
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 2ea12c6bf659..34783a3c8b3c 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -70,7 +70,6 @@ static void dump_zones(struct mddev *mddev)
(unsigned long long)zone_size>>1);
zone_start = conf->strip_zone[j].zone_end;
}
- printk(KERN_INFO "\n");
}
static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
@@ -85,6 +84,7 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
unsigned short blksize = 512;
+ *private_conf = ERR_PTR(-ENOMEM);
if (!conf)
return -ENOMEM;
rdev_for_each(rdev1, mddev) {
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 8ab8b65e1741..e48c262ce032 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3502,8 +3502,6 @@ returnbi:
dev = &sh->dev[i];
} else if (test_bit(R5_Discard, &dev->flags))
discard_pending = 1;
- WARN_ON(test_bit(R5_SkipCopy, &dev->flags));
- WARN_ON(dev->page != dev->orig_page);
}
r5l_stripe_write_finished(sh);
diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
index 6e43c95629ea..3cfd7af8c5ca 100644
--- a/drivers/media/media-device.c
+++ b/drivers/media/media-device.c
@@ -846,11 +846,11 @@ struct media_device *media_device_find_devres(struct device *dev)
}
EXPORT_SYMBOL_GPL(media_device_find_devres);
+#if IS_ENABLED(CONFIG_PCI)
void media_device_pci_init(struct media_device *mdev,
struct pci_dev *pci_dev,
const char *name)
{
-#ifdef CONFIG_PCI
mdev->dev = &pci_dev->dev;
if (name)
@@ -866,16 +866,16 @@ void media_device_pci_init(struct media_device *mdev,
mdev->driver_version = LINUX_VERSION_CODE;
media_device_init(mdev);
-#endif
}
EXPORT_SYMBOL_GPL(media_device_pci_init);
+#endif
+#if IS_ENABLED(CONFIG_USB)
void __media_device_usb_init(struct media_device *mdev,
struct usb_device *udev,
const char *board_name,
const char *driver_name)
{
-#ifdef CONFIG_USB
mdev->dev = &udev->dev;
if (driver_name)
@@ -895,9 +895,9 @@ void __media_device_usb_init(struct media_device *mdev,
mdev->driver_version = LINUX_VERSION_CODE;
media_device_init(mdev);
-#endif
}
EXPORT_SYMBOL_GPL(__media_device_usb_init);
+#endif
#endif /* CONFIG_MEDIA_CONTROLLER */
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
index feb521f28e14..4f494acd8150 100644
--- a/drivers/media/platform/exynos4-is/media-dev.c
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -1446,22 +1446,13 @@ static int fimc_md_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, fmd);
- /* Protect the media graph while we're registering entities */
- mutex_lock(&fmd->media_dev.graph_mutex);
-
ret = fimc_md_register_platform_entities(fmd, dev->of_node);
- if (ret) {
- mutex_unlock(&fmd->media_dev.graph_mutex);
+ if (ret)
goto err_clk;
- }
ret = fimc_md_register_sensor_entities(fmd);
- if (ret) {
- mutex_unlock(&fmd->media_dev.graph_mutex);
+ if (ret)
goto err_m_ent;
- }
-
- mutex_unlock(&fmd->media_dev.graph_mutex);
ret = device_create_file(&pdev->dev, &dev_attr_subdev_conf_mode);
if (ret)
diff --git a/drivers/media/platform/s3c-camif/camif-core.c b/drivers/media/platform/s3c-camif/camif-core.c
index 0b44b9accf50..af237af204e2 100644
--- a/drivers/media/platform/s3c-camif/camif-core.c
+++ b/drivers/media/platform/s3c-camif/camif-core.c
@@ -493,21 +493,17 @@ static int s3c_camif_probe(struct platform_device *pdev)
if (ret < 0)
goto err_sens;
- mutex_lock(&camif->media_dev.graph_mutex);
-
ret = v4l2_device_register_subdev_nodes(&camif->v4l2_dev);
if (ret < 0)
- goto err_unlock;
+ goto err_sens;
ret = camif_register_video_nodes(camif);
if (ret < 0)
- goto err_unlock;
+ goto err_sens;
ret = camif_create_media_links(camif);
if (ret < 0)
- goto err_unlock;
-
- mutex_unlock(&camif->media_dev.graph_mutex);
+ goto err_sens;
ret = media_device_register(&camif->media_dev);
if (ret < 0)
@@ -516,8 +512,6 @@ static int s3c_camif_probe(struct platform_device *pdev)
pm_runtime_put(dev);
return 0;
-err_unlock:
- mutex_unlock(&camif->media_dev.graph_mutex);
err_sens:
v4l2_device_unregister(&camif->v4l2_dev);
media_device_unregister(&camif->media_dev);
diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
index 12f5ebbd0436..ad2f3d27b266 100644
--- a/drivers/media/usb/usbvision/usbvision-video.c
+++ b/drivers/media/usb/usbvision/usbvision-video.c
@@ -1452,13 +1452,6 @@ static int usbvision_probe(struct usb_interface *intf,
printk(KERN_INFO "%s: %s found\n", __func__,
usbvision_device_data[model].model_string);
- /*
- * this is a security check.
- * an exploit using an incorrect bInterfaceNumber is known
- */
- if (ifnum >= USB_MAXINTERFACES || !dev->actconfig->interface[ifnum])
- return -ENODEV;
-
if (usbvision_device_data[model].interface >= 0)
interface = &dev->actconfig->interface[usbvision_device_data[model].interface]->altsetting[0];
else if (ifnum < dev->actconfig->desc.bNumInterfaces)
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 5d016f496e0e..9fbcb67a9ee6 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -1645,7 +1645,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
* Will sleep if required for nonblocking == false.
*/
static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
- int nonblocking)
+ void *pb, int nonblocking)
{
unsigned long flags;
int ret;
@@ -1666,10 +1666,10 @@ static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
/*
* Only remove the buffer from done_list if v4l2_buffer can handle all
* the planes.
- * Verifying planes is NOT necessary since it already has been checked
- * before the buffer is queued/prepared. So it can never fail.
*/
- list_del(&(*vb)->done_entry);
+ ret = call_bufop(q, verify_planes_array, *vb, pb);
+ if (!ret)
+ list_del(&(*vb)->done_entry);
spin_unlock_irqrestore(&q->done_lock, flags);
return ret;
@@ -1748,7 +1748,7 @@ int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
struct vb2_buffer *vb = NULL;
int ret;
- ret = __vb2_get_done_vb(q, &vb, nonblocking);
+ ret = __vb2_get_done_vb(q, &vb, pb, nonblocking);
if (ret < 0)
return ret;
@@ -2298,6 +2298,16 @@ unsigned int vb2_core_poll(struct vb2_queue *q, struct file *file,
return POLLERR;
/*
+ * If this quirk is set and QBUF hasn't been called yet then
+ * return POLLERR as well. This only affects capture queues, output
+ * queues will always initialize waiting_for_buffers to false.
+ * This quirk is set by V4L2 for backwards compatibility reasons.
+ */
+ if (q->quirk_poll_must_check_waiting_for_buffers &&
+ q->waiting_for_buffers && (req_events & (POLLIN | POLLRDNORM)))
+ return POLLERR;
+
+ /*
* For output streams you can call write() as long as there are fewer
* buffers queued than there are buffers available.
*/
diff --git a/drivers/media/v4l2-core/videobuf2-memops.c b/drivers/media/v4l2-core/videobuf2-memops.c
index dbec5923fcf0..3c3b517f1d1c 100644
--- a/drivers/media/v4l2-core/videobuf2-memops.c
+++ b/drivers/media/v4l2-core/videobuf2-memops.c
@@ -49,7 +49,7 @@ struct frame_vector *vb2_create_framevec(unsigned long start,
vec = frame_vector_create(nr);
if (!vec)
return ERR_PTR(-ENOMEM);
- ret = get_vaddr_frames(start, nr, write, 1, vec);
+ ret = get_vaddr_frames(start & PAGE_MASK, nr, write, true, vec);
if (ret < 0)
goto out_destroy;
/* We accept only complete set of PFNs */
diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c
index 91f552124050..7f366f1b0377 100644
--- a/drivers/media/v4l2-core/videobuf2-v4l2.c
+++ b/drivers/media/v4l2-core/videobuf2-v4l2.c
@@ -74,6 +74,11 @@ static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer
return 0;
}
+static int __verify_planes_array_core(struct vb2_buffer *vb, const void *pb)
+{
+ return __verify_planes_array(vb, pb);
+}
+
/**
* __verify_length() - Verify that the bytesused value for each plane fits in
* the plane length and that the data offset doesn't exceed the bytesused value.
@@ -437,6 +442,7 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb,
}
static const struct vb2_buf_ops v4l2_buf_ops = {
+ .verify_planes_array = __verify_planes_array_core,
.fill_user_buffer = __fill_v4l2_buffer,
.fill_vb2_buffer = __fill_vb2_buffer,
.copy_timestamp = __copy_timestamp,
@@ -765,6 +771,12 @@ int vb2_queue_init(struct vb2_queue *q)
q->is_output = V4L2_TYPE_IS_OUTPUT(q->type);
q->copy_timestamp = (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK)
== V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ /*
+ * For compatibility with vb1: if QBUF hasn't been called yet, then
+ * return POLLERR as well. This only affects capture queues, output
+ * queues will always initialize waiting_for_buffers to false.
+ */
+ q->quirk_poll_must_check_waiting_for_buffers = true;
return vb2_core_queue_init(q);
}
@@ -818,14 +830,6 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
poll_wait(file, &fh->wait, wait);
}
- /*
- * For compatibility with vb1: if QBUF hasn't been called yet, then
- * return POLLERR as well. This only affects capture queues, output
- * queues will always initialize waiting_for_buffers to false.
- */
- if (q->waiting_for_buffers && (req_events & (POLLIN | POLLRDNORM)))
- return POLLERR;
-
return res | vb2_core_poll(q, file, wait);
}
EXPORT_SYMBOL_GPL(vb2_poll);
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index 10370f280500..7edea9c19199 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -223,6 +223,13 @@ int __detach_context(struct cxl_context *ctx)
cxl_ops->link_ok(ctx->afu->adapter, ctx->afu));
flush_work(&ctx->fault_work); /* Only needed for dedicated process */
+ /*
+ * Wait until no further interrupts are presented by the PSL
+ * for this context.
+ */
+ if (cxl_ops->irq_wait)
+ cxl_ops->irq_wait(ctx);
+
/* release the reference to the group leader and mm handling pid */
put_pid(ctx->pid);
put_pid(ctx->glpid);
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index 38e21cf7806e..73dc2a33da74 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -274,6 +274,7 @@ static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0};
#define CXL_PSL_DSISR_An_PE (1ull << (63-4)) /* PSL Error (implementation specific) */
#define CXL_PSL_DSISR_An_AE (1ull << (63-5)) /* AFU Error */
#define CXL_PSL_DSISR_An_OC (1ull << (63-6)) /* OS Context Warning */
+#define CXL_PSL_DSISR_PENDING (CXL_PSL_DSISR_TRANS | CXL_PSL_DSISR_An_PE | CXL_PSL_DSISR_An_AE | CXL_PSL_DSISR_An_OC)
/* NOTE: Bits 32:63 are undefined if DSISR[DS] = 1 */
#define CXL_PSL_DSISR_An_M DSISR_NOHPTE /* PTE not found */
#define CXL_PSL_DSISR_An_P DSISR_PROTFAULT /* Storage protection violation */
@@ -855,6 +856,7 @@ struct cxl_backend_ops {
u64 dsisr, u64 errstat);
irqreturn_t (*psl_interrupt)(int irq, void *data);
int (*ack_irq)(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask);
+ void (*irq_wait)(struct cxl_context *ctx);
int (*attach_process)(struct cxl_context *ctx, bool kernel,
u64 wed, u64 amr);
int (*detach_process)(struct cxl_context *ctx);
diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
index be646dc41a2c..8def4553acba 100644
--- a/drivers/misc/cxl/irq.c
+++ b/drivers/misc/cxl/irq.c
@@ -203,7 +203,6 @@ unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
void cxl_unmap_irq(unsigned int virq, void *cookie)
{
free_irq(virq, cookie);
- irq_dispose_mapping(virq);
}
int cxl_register_one_irq(struct cxl *adapter,
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index 387fcbdf9793..ecf7557cd657 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -14,6 +14,7 @@
#include <linux/mutex.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
+#include <linux/delay.h>
#include <asm/synch.h>
#include <misc/cxl-base.h>
@@ -797,6 +798,35 @@ static irqreturn_t native_irq_multiplexed(int irq, void *data)
return fail_psl_irq(afu, &irq_info);
}
+void native_irq_wait(struct cxl_context *ctx)
+{
+ u64 dsisr;
+ int timeout = 1000;
+ int ph;
+
+ /*
+ * Wait until no further interrupts are presented by the PSL
+ * for this context.
+ */
+ while (timeout--) {
+ ph = cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) & 0xffff;
+ if (ph != ctx->pe)
+ return;
+ dsisr = cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An);
+ if ((dsisr & CXL_PSL_DSISR_PENDING) == 0)
+ return;
+ /*
+ * We are waiting for the workqueue to process our
+ * irq, so need to let that run here.
+ */
+ msleep(1);
+ }
+
+ dev_warn(&ctx->afu->dev, "WARNING: waiting on DSI for PE %i"
+ " DSISR %016llx!\n", ph, dsisr);
+ return;
+}
+
static irqreturn_t native_slice_irq_err(int irq, void *data)
{
struct cxl_afu *afu = data;
@@ -1076,6 +1106,7 @@ const struct cxl_backend_ops cxl_native_ops = {
.handle_psl_slice_error = native_handle_psl_slice_error,
.psl_interrupt = NULL,
.ack_irq = native_ack_irq,
+ .irq_wait = native_irq_wait,
.attach_process = native_attach_process,
.detach_process = native_detach_process,
.support_attributes = native_support_attributes,
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
index 5f1a36b8fbb0..0a5cbbe12452 100644
--- a/drivers/misc/lkdtm.c
+++ b/drivers/misc/lkdtm.c
@@ -458,8 +458,10 @@ static void lkdtm_do_action(enum ctype which)
break;
val = kmalloc(len, GFP_KERNEL);
- if (!val)
+ if (!val) {
+ kfree(base);
break;
+ }
*val = 0x12345678;
base[offset] = *val;
@@ -498,14 +500,17 @@ static void lkdtm_do_action(enum ctype which)
}
case CT_READ_BUDDY_AFTER_FREE: {
unsigned long p = __get_free_page(GFP_KERNEL);
- int saw, *val = kmalloc(1024, GFP_KERNEL);
+ int saw, *val;
int *base;
if (!p)
break;
- if (!val)
+ val = kmalloc(1024, GFP_KERNEL);
+ if (!val) {
+ free_page(p);
break;
+ }
base = (int *)p;
diff --git a/drivers/misc/mic/vop/vop_vringh.c b/drivers/misc/mic/vop/vop_vringh.c
index e94c7fb6712a..88e45234d527 100644
--- a/drivers/misc/mic/vop/vop_vringh.c
+++ b/drivers/misc/mic/vop/vop_vringh.c
@@ -945,6 +945,11 @@ static long vop_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
ret = -EFAULT;
goto free_ret;
}
+ /* Ensure desc has not changed between the two reads */
+ if (memcmp(&dd, dd_config, sizeof(dd))) {
+ ret = -EINVAL;
+ goto free_ret;
+ }
mutex_lock(&vdev->vdev_mutex);
mutex_lock(&vi->vop_mutex);
ret = vop_virtio_add_device(vdev, dd_config);
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 3bdbe50a363f..8a0147dfed27 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -86,7 +86,6 @@ static int max_devices;
/* TODO: Replace these with struct ida */
static DECLARE_BITMAP(dev_use, MAX_DEVICES);
-static DECLARE_BITMAP(name_use, MAX_DEVICES);
/*
* There is one mmc_blk_data per slot.
@@ -105,7 +104,6 @@ struct mmc_blk_data {
unsigned int usage;
unsigned int read_only;
unsigned int part_type;
- unsigned int name_idx;
unsigned int reset_done;
#define MMC_BLK_READ BIT(0)
#define MMC_BLK_WRITE BIT(1)
@@ -2202,19 +2200,6 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
goto out;
}
- /*
- * !subname implies we are creating main mmc_blk_data that will be
- * associated with mmc_card with dev_set_drvdata. Due to device
- * partitions, devidx will not coincide with a per-physical card
- * index anymore so we keep track of a name index.
- */
- if (!subname) {
- md->name_idx = find_first_zero_bit(name_use, max_devices);
- __set_bit(md->name_idx, name_use);
- } else
- md->name_idx = ((struct mmc_blk_data *)
- dev_to_disk(parent)->private_data)->name_idx;
-
md->area_type = area_type;
/*
@@ -2264,7 +2249,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
*/
snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
- "mmcblk%u%s", md->name_idx, subname ? subname : "");
+ "mmcblk%u%s", card->host->index, subname ? subname : "");
if (mmc_card_mmc(card))
blk_queue_logical_block_size(md->queue.queue,
@@ -2418,7 +2403,6 @@ static void mmc_blk_remove_parts(struct mmc_card *card,
struct list_head *pos, *q;
struct mmc_blk_data *part_md;
- __clear_bit(md->name_idx, name_use);
list_for_each_safe(pos, q, &md->part) {
part_md = list_entry(pos, struct mmc_blk_data, part);
list_del(pos);
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 04feea8354cb..e657af0e95fa 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -97,6 +97,7 @@ config MMC_RICOH_MMC
config MMC_SDHCI_ACPI
tristate "SDHCI support for ACPI enumerated SDHCI controllers"
depends on MMC_SDHCI && ACPI
+ select IOSF_MBI if X86
help
This selects support for ACPI enumerated SDHCI controllers,
identified by ACPI Compatibility ID PNP0D40 or specific
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 6839e41c6d58..bed6a494f52c 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -41,6 +41,11 @@
#include <linux/mmc/pm.h>
#include <linux/mmc/slot-gpio.h>
+#ifdef CONFIG_X86
+#include <asm/cpu_device_id.h>
+#include <asm/iosf_mbi.h>
+#endif
+
#include "sdhci.h"
enum {
@@ -116,6 +121,75 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_int = {
.ops = &sdhci_acpi_ops_int,
};
+#ifdef CONFIG_X86
+
+static bool sdhci_acpi_byt(void)
+{
+ static const struct x86_cpu_id byt[] = {
+ { X86_VENDOR_INTEL, 6, 0x37 },
+ {}
+ };
+
+ return x86_match_cpu(byt);
+}
+
+#define BYT_IOSF_SCCEP 0x63
+#define BYT_IOSF_OCP_NETCTRL0 0x1078
+#define BYT_IOSF_OCP_TIMEOUT_BASE GENMASK(10, 8)
+
+static void sdhci_acpi_byt_setting(struct device *dev)
+{
+ u32 val = 0;
+
+ if (!sdhci_acpi_byt())
+ return;
+
+ if (iosf_mbi_read(BYT_IOSF_SCCEP, MBI_CR_READ, BYT_IOSF_OCP_NETCTRL0,
+ &val)) {
+ dev_err(dev, "%s read error\n", __func__);
+ return;
+ }
+
+ if (!(val & BYT_IOSF_OCP_TIMEOUT_BASE))
+ return;
+
+ val &= ~BYT_IOSF_OCP_TIMEOUT_BASE;
+
+ if (iosf_mbi_write(BYT_IOSF_SCCEP, MBI_CR_WRITE, BYT_IOSF_OCP_NETCTRL0,
+ val)) {
+ dev_err(dev, "%s write error\n", __func__);
+ return;
+ }
+
+ dev_dbg(dev, "%s completed\n", __func__);
+}
+
+static bool sdhci_acpi_byt_defer(struct device *dev)
+{
+ if (!sdhci_acpi_byt())
+ return false;
+
+ if (!iosf_mbi_available())
+ return true;
+
+ sdhci_acpi_byt_setting(dev);
+
+ return false;
+}
+
+#else
+
+static inline void sdhci_acpi_byt_setting(struct device *dev)
+{
+}
+
+static inline bool sdhci_acpi_byt_defer(struct device *dev)
+{
+ return false;
+}
+
+#endif
+
static int bxt_get_cd(struct mmc_host *mmc)
{
int gpio_cd = mmc_gpio_get_cd(mmc);
@@ -322,6 +396,9 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
if (acpi_bus_get_status(device) || !device->status.present)
return -ENODEV;
+ if (sdhci_acpi_byt_defer(dev))
+ return -EPROBE_DEFER;
+
hid = acpi_device_hid(device);
uid = device->pnp.unique_id;
@@ -447,6 +524,8 @@ static int sdhci_acpi_resume(struct device *dev)
{
struct sdhci_acpi_host *c = dev_get_drvdata(dev);
+ sdhci_acpi_byt_setting(&c->pdev->dev);
+
return sdhci_resume_host(c->host);
}
@@ -470,6 +549,8 @@ static int sdhci_acpi_runtime_resume(struct device *dev)
{
struct sdhci_acpi_host *c = dev_get_drvdata(dev);
+ sdhci_acpi_byt_setting(&c->pdev->dev);
+
return sdhci_runtime_resume_host(c->host);
}
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index f8c4762bb48d..bcc0de47fe7e 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -382,14 +382,6 @@ static const struct sdhci_tegra_soc_data soc_data_tegra114 = {
.pdata = &sdhci_tegra114_pdata,
};
-static const struct sdhci_tegra_soc_data soc_data_tegra124 = {
- .pdata = &sdhci_tegra114_pdata,
- .nvquirks = NVQUIRK_ENABLE_SDR50 |
- NVQUIRK_ENABLE_DDR50 |
- NVQUIRK_ENABLE_SDR104 |
- NVQUIRK_HAS_PADCALIB,
-};
-
static const struct sdhci_pltfm_data sdhci_tegra210_pdata = {
.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
@@ -407,7 +399,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra210 = {
static const struct of_device_id sdhci_tegra_dt_match[] = {
{ .compatible = "nvidia,tegra210-sdhci", .data = &soc_data_tegra210 },
- { .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra124 },
+ { .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra114 },
{ .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra114 },
{ .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 },
{ .compatible = "nvidia,tegra20-sdhci", .data = &soc_data_tegra20 },
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index 8372a413848c..7fc8b7aa83f0 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -1129,6 +1129,11 @@ static int sunxi_mmc_probe(struct platform_device *pdev)
MMC_CAP_1_8V_DDR |
MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ;
+ /* TODO MMC DDR is not working on A80 */
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "allwinner,sun9i-a80-mmc"))
+ mmc->caps &= ~MMC_CAP_1_8V_DDR;
+
ret = mmc_of_parse(mmc);
if (ret)
goto error_free_dma;
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index b6facac54fc0..557b8462f55e 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -4009,7 +4009,6 @@ static int nand_dt_init(struct nand_chip *chip)
* This is the first phase of the normal nand_scan() function. It reads the
* flash ID and sets up MTD fields accordingly.
*
- * The mtd->owner field must be set to the module of the caller.
*/
int nand_scan_ident(struct mtd_info *mtd, int maxchips,
struct nand_flash_dev *table)
@@ -4429,19 +4428,12 @@ EXPORT_SYMBOL(nand_scan_tail);
*
* This fills out all the uninitialized function pointers with the defaults.
* The flash ID is read and the mtd/chip structures are filled with the
- * appropriate values. The mtd->owner field must be set to the module of the
- * caller.
+ * appropriate values.
*/
int nand_scan(struct mtd_info *mtd, int maxchips)
{
int ret;
- /* Many callers got this wrong, so check for it for a while... */
- if (!mtd->owner && caller_is_module()) {
- pr_crit("%s called with NULL mtd->owner!\n", __func__);
- BUG();
- }
-
ret = nand_scan_ident(mtd, maxchips, NULL);
if (!ret)
ret = nand_scan_tail(mtd);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 2a1ba62b7da2..befd67df08e1 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -62,9 +62,8 @@ config DUMMY
this device is consigned into oblivion) with a configurable IP
address. It is most commonly used in order to make your currently
inactive SLIP address seem like a real address for local programs.
- If you use SLIP or PPP, you might want to say Y here. Since this
- thing often comes in handy, the default is Y. It won't enlarge your
- kernel either. What a deal. Read about it in the Network
+ If you use SLIP or PPP, you might want to say Y here. It won't
+ enlarge your kernel. What a deal. Read about it in the Network
Administrator's Guide, available from
<http://www.tldp.org/docs.html#guide>.
@@ -195,6 +194,7 @@ config GENEVE
config MACSEC
tristate "IEEE 802.1AE MAC-level encryption (MACsec)"
+ select CRYPTO
select CRYPTO_AES
select CRYPTO_GCM
---help---
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index 50454be86570..5e572b3510b9 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -2181,27 +2181,10 @@ int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
struct net_device *bridge)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- u16 fid;
- int i, err;
+ int i, err = 0;
mutex_lock(&ps->smi_mutex);
- /* Get or create the bridge FID and assign it to the port */
- for (i = 0; i < ps->num_ports; ++i)
- if (ps->ports[i].bridge_dev == bridge)
- break;
-
- if (i < ps->num_ports)
- err = _mv88e6xxx_port_fid_get(ds, i, &fid);
- else
- err = _mv88e6xxx_fid_new(ds, &fid);
- if (err)
- goto unlock;
-
- err = _mv88e6xxx_port_fid_set(ds, port, fid);
- if (err)
- goto unlock;
-
/* Assign the bridge and remap each port's VLANTable */
ps->ports[port].bridge_dev = bridge;
@@ -2213,7 +2196,6 @@ int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
}
}
-unlock:
mutex_unlock(&ps->smi_mutex);
return err;
@@ -2223,16 +2205,10 @@ void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
struct net_device *bridge = ps->ports[port].bridge_dev;
- u16 fid;
int i;
mutex_lock(&ps->smi_mutex);
- /* Give the port a fresh Filtering Information Database */
- if (_mv88e6xxx_fid_new(ds, &fid) ||
- _mv88e6xxx_port_fid_set(ds, port, fid))
- netdev_warn(ds->ports[port], "failed to assign a new FID\n");
-
/* Unassign the bridge and remap each port's VLANTable */
ps->ports[port].bridge_dev = NULL;
@@ -2476,9 +2452,9 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
* the other bits clear.
*/
reg = 1 << port;
- /* Disable learning for DSA and CPU ports */
- if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
- reg = PORT_ASSOC_VECTOR_LOCKED_PORT;
+ /* Disable learning for CPU port */
+ if (dsa_is_cpu_port(ds, port))
+ reg = 0;
ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_ASSOC_VECTOR, reg);
if (ret)
@@ -2558,11 +2534,11 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
if (ret)
goto abort;
- /* Port based VLAN map: give each port its own address
+ /* Port based VLAN map: give each port the same default address
* database, and allow bidirectional communication between the
* CPU and DSA port(s), and the other ports.
*/
- ret = _mv88e6xxx_port_fid_set(ds, port, port + 1);
+ ret = _mv88e6xxx_port_fid_set(ds, port, 0);
if (ret)
goto abort;
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 8f76f4558a88..2ff465848b65 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -1412,7 +1412,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = -EIO;
- netdev->hw_features = NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX;
+ netdev->hw_features = NETIF_F_HW_VLAN_CTAG_RX;
netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
/* Init PHY as early as possible due to power saving issue */
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 99b30a952b38..38db2e4d7d54 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1572,6 +1572,11 @@ static int bgmac_probe(struct bcma_device *core)
dev_warn(&core->dev, "Using random MAC: %pM\n", mac);
}
+ /* This (reset &) enable is not preset in specs or reference driver but
+ * Broadcom does it in arch PCI code when enabling fake PCI device.
+ */
+ bcma_core_enable(core, 0);
+
/* Allocation and references */
net_dev = alloc_etherdev(sizeof(*bgmac));
if (!net_dev)
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 4fbb093e0d84..9a03c142b742 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -199,9 +199,9 @@
#define BGMAC_CMDCFG_TAI 0x00000200
#define BGMAC_CMDCFG_HD 0x00000400 /* Set if in half duplex mode */
#define BGMAC_CMDCFG_HD_SHIFT 10
-#define BGMAC_CMDCFG_SR_REV0 0x00000800 /* Set to reset mode, for other revs */
-#define BGMAC_CMDCFG_SR_REV4 0x00002000 /* Set to reset mode, only for core rev 4 */
-#define BGMAC_CMDCFG_SR(rev) ((rev == 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0)
+#define BGMAC_CMDCFG_SR_REV0 0x00000800 /* Set to reset mode, for core rev 0-3 */
+#define BGMAC_CMDCFG_SR_REV4 0x00002000 /* Set to reset mode, for core rev >= 4 */
+#define BGMAC_CMDCFG_SR(rev) ((rev >= 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0)
#define BGMAC_CMDCFG_ML 0x00008000 /* Set to activate mac loopback mode */
#define BGMAC_CMDCFG_AE 0x00400000
#define BGMAC_CMDCFG_CFE 0x00800000
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 12a009d720cd..72eb29ed0359 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -581,12 +581,30 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
struct page *page;
dma_addr_t mapping;
u16 sw_prod = rxr->rx_sw_agg_prod;
+ unsigned int offset = 0;
- page = alloc_page(gfp);
- if (!page)
- return -ENOMEM;
+ if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
+ page = rxr->rx_page;
+ if (!page) {
+ page = alloc_page(gfp);
+ if (!page)
+ return -ENOMEM;
+ rxr->rx_page = page;
+ rxr->rx_page_offset = 0;
+ }
+ offset = rxr->rx_page_offset;
+ rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
+ if (rxr->rx_page_offset == PAGE_SIZE)
+ rxr->rx_page = NULL;
+ else
+ get_page(page);
+ } else {
+ page = alloc_page(gfp);
+ if (!page)
+ return -ENOMEM;
+ }
- mapping = dma_map_page(&pdev->dev, page, 0, PAGE_SIZE,
+ mapping = dma_map_page(&pdev->dev, page, offset, BNXT_RX_PAGE_SIZE,
PCI_DMA_FROMDEVICE);
if (dma_mapping_error(&pdev->dev, mapping)) {
__free_page(page);
@@ -601,6 +619,7 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
rx_agg_buf->page = page;
+ rx_agg_buf->offset = offset;
rx_agg_buf->mapping = mapping;
rxbd->rx_bd_haddr = cpu_to_le64(mapping);
rxbd->rx_bd_opaque = sw_prod;
@@ -642,6 +661,7 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons,
page = cons_rx_buf->page;
cons_rx_buf->page = NULL;
prod_rx_buf->page = page;
+ prod_rx_buf->offset = cons_rx_buf->offset;
prod_rx_buf->mapping = cons_rx_buf->mapping;
@@ -709,7 +729,8 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
cons_rx_buf = &rxr->rx_agg_ring[cons];
- skb_fill_page_desc(skb, i, cons_rx_buf->page, 0, frag_len);
+ skb_fill_page_desc(skb, i, cons_rx_buf->page,
+ cons_rx_buf->offset, frag_len);
__clear_bit(cons, rxr->rx_agg_bmap);
/* It is possible for bnxt_alloc_rx_page() to allocate
@@ -740,7 +761,7 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
return NULL;
}
- dma_unmap_page(&pdev->dev, mapping, PAGE_SIZE,
+ dma_unmap_page(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
PCI_DMA_FROMDEVICE);
skb->data_len += frag_len;
@@ -1584,13 +1605,17 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
dma_unmap_page(&pdev->dev,
dma_unmap_addr(rx_agg_buf, mapping),
- PAGE_SIZE, PCI_DMA_FROMDEVICE);
+ BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE);
rx_agg_buf->page = NULL;
__clear_bit(j, rxr->rx_agg_bmap);
__free_page(page);
}
+ if (rxr->rx_page) {
+ __free_page(rxr->rx_page);
+ rxr->rx_page = NULL;
+ }
}
}
@@ -1973,7 +1998,7 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
return 0;
- type = ((u32)PAGE_SIZE << RX_BD_LEN_SHIFT) |
+ type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
bnxt_init_rxbd_pages(ring, type);
@@ -2164,7 +2189,7 @@ void bnxt_set_ring_params(struct bnxt *bp)
bp->rx_agg_nr_pages = 0;
if (bp->flags & BNXT_FLAG_TPA)
- agg_factor = 4;
+ agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
bp->flags &= ~BNXT_FLAG_JUMBO;
if (rx_space > PAGE_SIZE) {
@@ -3020,12 +3045,12 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
/* Number of segs are log2 units, and first packet is not
* included as part of this units.
*/
- if (mss <= PAGE_SIZE) {
- n = PAGE_SIZE / mss;
+ if (mss <= BNXT_RX_PAGE_SIZE) {
+ n = BNXT_RX_PAGE_SIZE / mss;
nsegs = (MAX_SKB_FRAGS - 1) * n;
} else {
- n = mss / PAGE_SIZE;
- if (mss & (PAGE_SIZE - 1))
+ n = mss / BNXT_RX_PAGE_SIZE;
+ if (mss & (BNXT_RX_PAGE_SIZE - 1))
n++;
nsegs = (MAX_SKB_FRAGS - n) / n;
}
@@ -4309,7 +4334,7 @@ static int bnxt_setup_int_mode(struct bnxt *bp)
if (bp->flags & BNXT_FLAG_MSIX_CAP)
rc = bnxt_setup_msix(bp);
- if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
+ if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
/* fallback to INTA */
rc = bnxt_setup_inta(bp);
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 709b95b8fcba..8b823ff558ff 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -407,6 +407,15 @@ struct rx_tpa_end_cmp_ext {
#define BNXT_PAGE_SIZE (1 << BNXT_PAGE_SHIFT)
+/* The RXBD length is 16-bit so we can only support page sizes < 64K */
+#if (PAGE_SHIFT > 15)
+#define BNXT_RX_PAGE_SHIFT 15
+#else
+#define BNXT_RX_PAGE_SHIFT PAGE_SHIFT
+#endif
+
+#define BNXT_RX_PAGE_SIZE (1 << BNXT_RX_PAGE_SHIFT)
+
#define BNXT_MIN_PKT_SIZE 45
#define BNXT_NUM_TESTS(bp) 0
@@ -506,6 +515,7 @@ struct bnxt_sw_rx_bd {
struct bnxt_sw_rx_agg_bd {
struct page *page;
+ unsigned int offset;
dma_addr_t mapping;
};
@@ -586,6 +596,9 @@ struct bnxt_rx_ring_info {
unsigned long *rx_agg_bmap;
u16 rx_agg_bmap_size;
+ struct page *rx_page;
+ unsigned int rx_page_offset;
+
dma_addr_t rx_desc_mapping[MAX_RX_PAGES];
dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES];
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index cf6445d148ca..44ad1490b472 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -878,7 +878,11 @@ static void bcmgenet_get_ethtool_stats(struct net_device *dev,
else
p = (char *)priv;
p += s->stat_offset;
- data[i] = *(u32 *)p;
+ if (sizeof(unsigned long) != sizeof(u32) &&
+ s->stat_sizeof == sizeof(unsigned long))
+ data[i] = *(unsigned long *)p;
+ else
+ data[i] = *(u32 *)p;
}
}
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 48a7d7dee846..a63551d0a18a 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -441,7 +441,7 @@ static int macb_mii_init(struct macb *bp)
snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
bp->pdev->name, bp->pdev->id);
bp->mii_bus->priv = bp;
- bp->mii_bus->parent = &bp->dev->dev;
+ bp->mii_bus->parent = &bp->pdev->dev;
pdata = dev_get_platdata(&bp->pdev->dev);
dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
@@ -458,7 +458,8 @@ static int macb_mii_init(struct macb *bp)
struct phy_device *phydev;
phydev = mdiobus_scan(bp->mii_bus, i);
- if (IS_ERR(phydev)) {
+ if (IS_ERR(phydev) &&
+ PTR_ERR(phydev) != -ENODEV) {
err = PTR_ERR(phydev);
break;
}
@@ -3019,29 +3020,36 @@ static int macb_probe(struct platform_device *pdev)
if (err)
goto err_out_free_netdev;
+ err = macb_mii_init(bp);
+ if (err)
+ goto err_out_free_netdev;
+
+ phydev = bp->phy_dev;
+
+ netif_carrier_off(dev);
+
err = register_netdev(dev);
if (err) {
dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
- goto err_out_unregister_netdev;
+ goto err_out_unregister_mdio;
}
- err = macb_mii_init(bp);
- if (err)
- goto err_out_unregister_netdev;
-
- netif_carrier_off(dev);
+ phy_attached_info(phydev);
netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
dev->base_addr, dev->irq, dev->dev_addr);
- phydev = bp->phy_dev;
- phy_attached_info(phydev);
-
return 0;
-err_out_unregister_netdev:
- unregister_netdev(dev);
+err_out_unregister_mdio:
+ phy_disconnect(bp->phy_dev);
+ mdiobus_unregister(bp->mii_bus);
+ mdiobus_free(bp->mii_bus);
+
+ /* Shutdown the PHY if there is a GPIO reset */
+ if (bp->reset_gpio)
+ gpiod_set_value(bp->reset_gpio, 0);
err_out_free_netdev:
free_netdev(dev);
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 967951582e03..d20539a6d162 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1011,10 +1011,11 @@ static int bgx_init_of_phy(struct bgx *bgx)
}
lmac++;
- if (lmac == MAX_LMAC_PER_BGX)
+ if (lmac == MAX_LMAC_PER_BGX) {
+ of_node_put(node);
break;
+ }
}
- of_node_put(node);
return 0;
defer:
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 60908eab3b3a..43da891fab97 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -576,7 +576,7 @@ static void setup_rss(struct adapter *adap)
unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
u8 cpus[SGE_QSETS + 1];
- u16 rspq_map[RSS_TABLE_SIZE];
+ u16 rspq_map[RSS_TABLE_SIZE + 1];
for (i = 0; i < SGE_QSETS; ++i)
cpus[i] = i;
@@ -586,6 +586,7 @@ static void setup_rss(struct adapter *adap)
rspq_map[i] = i % nq0;
rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
}
+ rspq_map[RSS_TABLE_SIZE] = 0xffff; /* terminator */
t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 984a3cc26f86..326d4009525e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -1451,6 +1451,9 @@ int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
unsigned int mmd, unsigned int reg, u16 *valp);
int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
unsigned int mmd, unsigned int reg, u16 val);
+int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int iqtype, unsigned int iqid,
+ unsigned int fl0id, unsigned int fl1id);
int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int iqtype, unsigned int iqid,
unsigned int fl0id, unsigned int fl1id);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 13b144bcf725..6278e5a74b74 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2981,14 +2981,28 @@ void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q)
void t4_free_sge_resources(struct adapter *adap)
{
int i;
- struct sge_eth_rxq *eq = adap->sge.ethrxq;
- struct sge_eth_txq *etq = adap->sge.ethtxq;
+ struct sge_eth_rxq *eq;
+ struct sge_eth_txq *etq;
+
+ /* stop all Rx queues in order to start them draining */
+ for (i = 0; i < adap->sge.ethqsets; i++) {
+ eq = &adap->sge.ethrxq[i];
+ if (eq->rspq.desc)
+ t4_iq_stop(adap, adap->mbox, adap->pf, 0,
+ FW_IQ_TYPE_FL_INT_CAP,
+ eq->rspq.cntxt_id,
+ eq->fl.size ? eq->fl.cntxt_id : 0xffff,
+ 0xffff);
+ }
/* clean up Ethernet Tx/Rx queues */
- for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) {
+ for (i = 0; i < adap->sge.ethqsets; i++) {
+ eq = &adap->sge.ethrxq[i];
if (eq->rspq.desc)
free_rspq_fl(adap, &eq->rspq,
eq->fl.size ? &eq->fl : NULL);
+
+ etq = &adap->sge.ethtxq[i];
if (etq->q.desc) {
t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
etq->q.cntxt_id);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index cc1736bece0f..71586a3e0f61 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -2557,6 +2557,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
}
#define EEPROM_STAT_ADDR 0x7bfc
+#define VPD_SIZE 0x800
#define VPD_BASE 0x400
#define VPD_BASE_OLD 0
#define VPD_LEN 1024
@@ -2594,6 +2595,15 @@ int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
if (!vpd)
return -ENOMEM;
+ /* We have two VPD data structures stored in the adapter VPD area.
+ * By default, Linux calculates the size of the VPD area by traversing
+ * the first VPD area at offset 0x0, so we need to tell the OS what
+ * our real VPD size is.
+ */
+ ret = pci_set_vpd_size(adapter->pdev, VPD_SIZE);
+ if (ret < 0)
+ goto out;
+
/* Card information normally starts at VPD_BASE but early cards had
* it at 0.
*/
@@ -6940,6 +6950,39 @@ int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
}
/**
+ * t4_iq_stop - stop an ingress queue and its FLs
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @pf: the PF owning the queues
+ * @vf: the VF owning the queues
+ * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
+ * @iqid: ingress queue id
+ * @fl0id: FL0 queue id or 0xffff if no attached FL0
+ * @fl1id: FL1 queue id or 0xffff if no attached FL1
+ *
+ * Stops an ingress queue and its associated FLs, if any. This causes
+ * any current or future data/messages destined for these queues to be
+ * tossed.
+ */
+int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int iqtype, unsigned int iqid,
+ unsigned int fl0id, unsigned int fl1id)
+{
+ struct fw_iq_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
+ FW_IQ_CMD_VFN_V(vf));
+ c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_IQSTOP_F | FW_LEN16(c));
+ c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
+ c.iqid = cpu_to_be16(iqid);
+ c.fl0id = cpu_to_be16(fl0id);
+ c.fl1id = cpu_to_be16(fl1id);
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
* t4_iq_free - free an ingress queue and its FLs
* @adap: the adapter
* @mbox: mailbox to use for the FW command
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index 62ccebc5f728..8cf943db5662 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -1223,18 +1223,32 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
if (err)
return err;
- /* verify upper 16 bits are zero */
- if (vid >> 16)
- return FM10K_ERR_PARAM;
-
set = !(vid & FM10K_VLAN_CLEAR);
vid &= ~FM10K_VLAN_CLEAR;
- err = fm10k_iov_select_vid(vf_info, (u16)vid);
- if (err < 0)
- return err;
+ /* if the length field has been set, this is a multi-bit
+ * update request. For multi-bit requests, simply disallow
+ * them when the pf_vid has been set. In this case, the PF
+ * should have already cleared the VLAN_TABLE, and if we
+ * allowed them, it could allow a rogue VF to receive traffic
+ * on a VLAN it was not assigned. In the single-bit case, we
+ * need to modify requests for VLAN 0 to use the default PF or
+ * SW vid when assigned.
+ */
- vid = err;
+ if (vid >> 16) {
+ /* prevent multi-bit requests when PF has
+ * administratively set the VLAN for this VF
+ */
+ if (vf_info->pf_vid)
+ return FM10K_ERR_PARAM;
+ } else {
+ err = fm10k_iov_select_vid(vf_info, (u16)vid);
+ if (err < 0)
+ return err;
+
+ vid = err;
+ }
/* update VSI info for VF in regards to VLAN table */
err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 084d0ab316b7..6a49b7ae511c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2594,35 +2594,34 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
}
/**
- * __i40e_chk_linearize - Check if there are more than 8 fragments per packet
+ * __i40e_chk_linearize - Check if there are more than 8 buffers per packet
* @skb: send buffer
*
- * Note: Our HW can't scatter-gather more than 8 fragments to build
- * a packet on the wire and so we need to figure out the cases where we
- * need to linearize the skb.
+ * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
+ * and so we need to figure out the cases where we need to linearize the skb.
+ *
+ * For TSO we need to count the TSO header and segment payload separately.
+ * As such we need to check cases where we have 7 fragments or more as we
+ * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
+ * the segment payload in the first descriptor, and another 7 for the
+ * fragments.
**/
bool __i40e_chk_linearize(struct sk_buff *skb)
{
const struct skb_frag_struct *frag, *stale;
- int gso_size, nr_frags, sum;
-
- /* check to see if TSO is enabled, if so we may get a repreive */
- gso_size = skb_shinfo(skb)->gso_size;
- if (unlikely(!gso_size))
- return true;
+ int nr_frags, sum;
- /* no need to check if number of frags is less than 8 */
+ /* no need to check if number of frags is less than 7 */
nr_frags = skb_shinfo(skb)->nr_frags;
- if (nr_frags < I40E_MAX_BUFFER_TXD)
+ if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
return false;
/* We need to walk through the list and validate that each group
* of 6 fragments totals at least gso_size. However we don't need
- * to perform such validation on the first or last 6 since the first
- * 6 cannot inherit any data from a descriptor before them, and the
- * last 6 cannot inherit any data from a descriptor after them.
+ * to perform such validation on the last 6 since the last 6 cannot
+ * inherit any data from a descriptor after them.
*/
- nr_frags -= I40E_MAX_BUFFER_TXD - 1;
+ nr_frags -= I40E_MAX_BUFFER_TXD - 2;
frag = &skb_shinfo(skb)->frags[0];
/* Initialize size to the negative value of gso_size minus 1. We
@@ -2631,21 +2630,21 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
* descriptors for a single transmit as the header and previous
* fragment are already consuming 2 descriptors.
*/
- sum = 1 - gso_size;
+ sum = 1 - skb_shinfo(skb)->gso_size;
- /* Add size of frags 1 through 5 to create our initial sum */
- sum += skb_frag_size(++frag);
- sum += skb_frag_size(++frag);
- sum += skb_frag_size(++frag);
- sum += skb_frag_size(++frag);
- sum += skb_frag_size(++frag);
+ /* Add size of frags 0 through 4 to create our initial sum */
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
/* Walk through fragments adding latest fragment, testing it, and
* then removing stale fragments from the sum.
*/
stale = &skb_shinfo(skb)->frags[0];
for (;;) {
- sum += skb_frag_size(++frag);
+ sum += skb_frag_size(frag++);
/* if sum is negative we failed to make sufficient progress */
if (sum < 0)
@@ -2655,7 +2654,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
if (!--nr_frags)
break;
- sum -= skb_frag_size(++stale);
+ sum -= skb_frag_size(stale++);
}
return false;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index cdd5dc00aec5..a9bd70537d65 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -413,10 +413,14 @@ static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
**/
static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
{
- /* we can only support up to 8 data buffers for a single send */
- if (likely(count <= I40E_MAX_BUFFER_TXD))
+ /* Both TSO and single send will work if count is less than 8 */
+ if (likely(count < I40E_MAX_BUFFER_TXD))
return false;
- return __i40e_chk_linearize(skb);
+ if (skb_is_gso(skb))
+ return __i40e_chk_linearize(skb);
+
+ /* we can support up to 8 data buffers for a single send */
+ return count != I40E_MAX_BUFFER_TXD;
}
#endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index ebcc25c05796..cea97daa844c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -1796,35 +1796,34 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
}
/**
- * __i40evf_chk_linearize - Check if there are more than 8 fragments per packet
+ * __i40evf_chk_linearize - Check if there are more than 8 buffers per packet
* @skb: send buffer
*
- * Note: Our HW can't scatter-gather more than 8 fragments to build
- * a packet on the wire and so we need to figure out the cases where we
- * need to linearize the skb.
+ * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
+ * and so we need to figure out the cases where we need to linearize the skb.
+ *
+ * For TSO we need to count the TSO header and segment payload separately.
+ * As such we need to check cases where we have 7 fragments or more as we
+ * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
+ * the segment payload in the first descriptor, and another 7 for the
+ * fragments.
**/
bool __i40evf_chk_linearize(struct sk_buff *skb)
{
const struct skb_frag_struct *frag, *stale;
- int gso_size, nr_frags, sum;
-
- /* check to see if TSO is enabled, if so we may get a repreive */
- gso_size = skb_shinfo(skb)->gso_size;
- if (unlikely(!gso_size))
- return true;
+ int nr_frags, sum;
- /* no need to check if number of frags is less than 8 */
+ /* no need to check if number of frags is less than 7 */
nr_frags = skb_shinfo(skb)->nr_frags;
- if (nr_frags < I40E_MAX_BUFFER_TXD)
+ if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
return false;
/* We need to walk through the list and validate that each group
* of 6 fragments totals at least gso_size. However we don't need
- * to perform such validation on the first or last 6 since the first
- * 6 cannot inherit any data from a descriptor before them, and the
- * last 6 cannot inherit any data from a descriptor after them.
+ * to perform such validation on the last 6 since the last 6 cannot
+ * inherit any data from a descriptor after them.
*/
- nr_frags -= I40E_MAX_BUFFER_TXD - 1;
+ nr_frags -= I40E_MAX_BUFFER_TXD - 2;
frag = &skb_shinfo(skb)->frags[0];
/* Initialize size to the negative value of gso_size minus 1. We
@@ -1833,21 +1832,21 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
* descriptors for a single transmit as the header and previous
* fragment are already consuming 2 descriptors.
*/
- sum = 1 - gso_size;
+ sum = 1 - skb_shinfo(skb)->gso_size;
- /* Add size of frags 1 through 5 to create our initial sum */
- sum += skb_frag_size(++frag);
- sum += skb_frag_size(++frag);
- sum += skb_frag_size(++frag);
- sum += skb_frag_size(++frag);
- sum += skb_frag_size(++frag);
+ /* Add size of frags 0 through 4 to create our initial sum */
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
/* Walk through fragments adding latest fragment, testing it, and
* then removing stale fragments from the sum.
*/
stale = &skb_shinfo(skb)->frags[0];
for (;;) {
- sum += skb_frag_size(++frag);
+ sum += skb_frag_size(frag++);
/* if sum is negative we failed to make sufficient progress */
if (sum < 0)
@@ -1857,7 +1856,7 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
if (!--nr_frags)
break;
- sum -= skb_frag_size(++stale);
+ sum -= skb_frag_size(stale++);
}
return false;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index c1dd8c5c9666..0429553fe887 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -395,10 +395,14 @@ static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
**/
static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
{
- /* we can only support up to 8 data buffers for a single send */
- if (likely(count <= I40E_MAX_BUFFER_TXD))
+ /* Both TSO and single send will work if count is less than 8 */
+ if (likely(count < I40E_MAX_BUFFER_TXD))
return false;
- return __i40evf_chk_linearize(skb);
+ if (skb_is_gso(skb))
+ return __i40evf_chk_linearize(skb);
+
+ /* we can support up to 8 data buffers for a single send */
+ return count != I40E_MAX_BUFFER_TXD;
}
#endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 7fc490225da5..a6d26d351dfc 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -3354,8 +3354,7 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
/* Enable per-CPU interrupts on the CPU that is
* brought up.
*/
- smp_call_function_single(cpu, mvneta_percpu_enable,
- pp, true);
+ mvneta_percpu_enable(pp);
/* Enable per-CPU interrupt on the one CPU we care
* about.
@@ -3387,8 +3386,7 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
/* Disable per-CPU interrupts on the CPU that is
* brought down.
*/
- smp_call_function_single(cpu, mvneta_percpu_disable,
- pp, true);
+ mvneta_percpu_disable(pp);
break;
case CPU_DEAD:
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 7ace07dad6a3..c442f6ad15ff 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -979,6 +979,8 @@ static int pxa168_init_phy(struct net_device *dev)
return 0;
pep->phy = mdiobus_scan(pep->smi_bus, pep->phy_addr);
+ if (IS_ERR(pep->phy))
+ return PTR_ERR(pep->phy);
if (!pep->phy)
return -ENODEV;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index f69584a9b47f..c761194bb323 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -337,7 +337,7 @@ static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
case ETH_SS_STATS:
return bitmap_iterator_count(&it) +
(priv->tx_ring_num * 2) +
- (priv->rx_ring_num * 2);
+ (priv->rx_ring_num * 3);
case ETH_SS_TEST:
return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags
& MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2;
@@ -404,6 +404,7 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
for (i = 0; i < priv->rx_ring_num; i++) {
data[index++] = priv->rx_ring[i]->packets;
data[index++] = priv->rx_ring[i]->bytes;
+ data[index++] = priv->rx_ring[i]->dropped;
}
spin_unlock_bh(&priv->stats_lock);
@@ -477,6 +478,8 @@ static void mlx4_en_get_strings(struct net_device *dev,
"rx%d_packets", i);
sprintf(data + (index++) * ETH_GSTRING_LEN,
"rx%d_bytes", i);
+ sprintf(data + (index++) * ETH_GSTRING_LEN,
+ "rx%d_dropped", i);
}
break;
case ETH_SS_PRIV_FLAGS:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 3904b5fc0b7c..20b6c2e678b8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -158,6 +158,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
u64 in_mod = reset << 8 | port;
int err;
int i, counter_index;
+ unsigned long sw_rx_dropped = 0;
mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
if (IS_ERR(mailbox))
@@ -180,6 +181,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
for (i = 0; i < priv->rx_ring_num; i++) {
stats->rx_packets += priv->rx_ring[i]->packets;
stats->rx_bytes += priv->rx_ring[i]->bytes;
+ sw_rx_dropped += priv->rx_ring[i]->dropped;
priv->port_stats.rx_chksum_good += priv->rx_ring[i]->csum_ok;
priv->port_stats.rx_chksum_none += priv->rx_ring[i]->csum_none;
priv->port_stats.rx_chksum_complete += priv->rx_ring[i]->csum_complete;
@@ -236,7 +238,8 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
&mlx4_en_stats->MCAST_prio_1,
NUM_PRIORITIES);
stats->collisions = 0;
- stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP);
+ stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP) +
+ sw_rx_dropped;
stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
stats->rx_over_errors = 0;
stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 86bcfe510e4e..b723e3bcab39 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -61,7 +61,7 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
gfp_t gfp = _gfp;
if (order)
- gfp |= __GFP_COMP | __GFP_NOWARN;
+ gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NOMEMALLOC;
page = alloc_pages(gfp, order);
if (likely(page))
break;
@@ -126,7 +126,9 @@ out:
dma_unmap_page(priv->ddev, page_alloc[i].dma,
page_alloc[i].page_size, PCI_DMA_FROMDEVICE);
page = page_alloc[i].page;
- set_page_count(page, 1);
+ /* Revert changes done by mlx4_alloc_pages */
+ page_ref_sub(page, page_alloc[i].page_size /
+ priv->frag_info[i].frag_stride - 1);
put_page(page);
}
}
@@ -176,7 +178,9 @@ out:
dma_unmap_page(priv->ddev, page_alloc->dma,
page_alloc->page_size, PCI_DMA_FROMDEVICE);
page = page_alloc->page;
- set_page_count(page, 1);
+ /* Revert changes done by mlx4_alloc_pages */
+ page_ref_sub(page, page_alloc->page_size /
+ priv->frag_info[i].frag_stride - 1);
put_page(page);
page_alloc->page = NULL;
}
@@ -939,7 +943,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
/* GRO not possible, complete processing here */
skb = mlx4_en_rx_skb(priv, rx_desc, frags, length);
if (!skb) {
- priv->stats.rx_dropped++;
+ ring->dropped++;
goto next;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index c0d7b7296236..a386f047c1af 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -405,7 +405,6 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
u32 packets = 0;
u32 bytes = 0;
int factor = priv->cqe_factor;
- u64 timestamp = 0;
int done = 0;
int budget = priv->tx_work_limit;
u32 last_nr_txbb;
@@ -445,9 +444,12 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
do {
+ u64 timestamp = 0;
+
txbbs_skipped += last_nr_txbb;
ring_index = (ring_index + last_nr_txbb) & size_mask;
- if (ring->tx_info[ring_index].ts_requested)
+
+ if (unlikely(ring->tx_info[ring_index].ts_requested))
timestamp = mlx4_en_get_cqe_ts(cqe);
/* free next descriptor */
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 358f7230da58..12c77a70abdb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -3172,6 +3172,34 @@ static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap
return 0;
}
+static int mlx4_pci_enable_device(struct mlx4_dev *dev)
+{
+ struct pci_dev *pdev = dev->persist->pdev;
+ int err = 0;
+
+ mutex_lock(&dev->persist->pci_status_mutex);
+ if (dev->persist->pci_status == MLX4_PCI_STATUS_DISABLED) {
+ err = pci_enable_device(pdev);
+ if (!err)
+ dev->persist->pci_status = MLX4_PCI_STATUS_ENABLED;
+ }
+ mutex_unlock(&dev->persist->pci_status_mutex);
+
+ return err;
+}
+
+static void mlx4_pci_disable_device(struct mlx4_dev *dev)
+{
+ struct pci_dev *pdev = dev->persist->pdev;
+
+ mutex_lock(&dev->persist->pci_status_mutex);
+ if (dev->persist->pci_status == MLX4_PCI_STATUS_ENABLED) {
+ pci_disable_device(pdev);
+ dev->persist->pci_status = MLX4_PCI_STATUS_DISABLED;
+ }
+ mutex_unlock(&dev->persist->pci_status_mutex);
+}
+
static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
int total_vfs, int *nvfs, struct mlx4_priv *priv,
int reset_flow)
@@ -3582,7 +3610,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data,
pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev));
- err = pci_enable_device(pdev);
+ err = mlx4_pci_enable_device(&priv->dev);
if (err) {
dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
return err;
@@ -3715,7 +3743,7 @@ err_release_regions:
pci_release_regions(pdev);
err_disable_pdev:
- pci_disable_device(pdev);
+ mlx4_pci_disable_device(&priv->dev);
pci_set_drvdata(pdev, NULL);
return err;
}
@@ -3775,6 +3803,7 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
priv->pci_dev_data = id->driver_data;
mutex_init(&dev->persist->device_state_mutex);
mutex_init(&dev->persist->interface_state_mutex);
+ mutex_init(&dev->persist->pci_status_mutex);
ret = devlink_register(devlink, &pdev->dev);
if (ret)
@@ -3923,7 +3952,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
}
pci_release_regions(pdev);
- pci_disable_device(pdev);
+ mlx4_pci_disable_device(dev);
devlink_unregister(devlink);
kfree(dev->persist);
devlink_free(devlink);
@@ -4042,7 +4071,7 @@ static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
- pci_disable_device(pdev);
+ mlx4_pci_disable_device(persist->dev);
return PCI_ERS_RESULT_NEED_RESET;
}
@@ -4050,45 +4079,53 @@ static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
{
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
struct mlx4_dev *dev = persist->dev;
- struct mlx4_priv *priv = mlx4_priv(dev);
- int ret;
- int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
- int total_vfs;
+ int err;
mlx4_err(dev, "mlx4_pci_slot_reset was called\n");
- ret = pci_enable_device(pdev);
- if (ret) {
- mlx4_err(dev, "Can not re-enable device, ret=%d\n", ret);
+ err = mlx4_pci_enable_device(dev);
+ if (err) {
+ mlx4_err(dev, "Can not re-enable device, err=%d\n", err);
return PCI_ERS_RESULT_DISCONNECT;
}
pci_set_master(pdev);
pci_restore_state(pdev);
pci_save_state(pdev);
+ return PCI_ERS_RESULT_RECOVERED;
+}
+static void mlx4_pci_resume(struct pci_dev *pdev)
+{
+ struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
+ struct mlx4_dev *dev = persist->dev;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
+ int total_vfs;
+ int err;
+
+ mlx4_err(dev, "%s was called\n", __func__);
total_vfs = dev->persist->num_vfs;
memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
mutex_lock(&persist->interface_state_mutex);
if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) {
- ret = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs,
+ err = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs,
priv, 1);
- if (ret) {
- mlx4_err(dev, "%s: mlx4_load_one failed, ret=%d\n",
- __func__, ret);
+ if (err) {
+ mlx4_err(dev, "%s: mlx4_load_one failed, err=%d\n",
+ __func__, err);
goto end;
}
- ret = restore_current_port_types(dev, dev->persist->
+ err = restore_current_port_types(dev, dev->persist->
curr_port_type, dev->persist->
curr_port_poss_type);
- if (ret)
- mlx4_err(dev, "could not restore original port types (%d)\n", ret);
+ if (err)
+ mlx4_err(dev, "could not restore original port types (%d)\n", err);
}
end:
mutex_unlock(&persist->interface_state_mutex);
- return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
}
static void mlx4_shutdown(struct pci_dev *pdev)
@@ -4105,6 +4142,7 @@ static void mlx4_shutdown(struct pci_dev *pdev)
static const struct pci_error_handlers mlx4_err_handler = {
.error_detected = mlx4_pci_err_detected,
.slot_reset = mlx4_pci_slot_reset,
+ .resume = mlx4_pci_resume,
};
static struct pci_driver mlx4_driver = {
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index ef9683101ead..c9d7fc5159f2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -586,6 +586,8 @@ struct mlx4_mfunc_master_ctx {
struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1];
int init_port_ref[MLX4_MAX_PORTS + 1];
u16 max_mtu[MLX4_MAX_PORTS + 1];
+ u8 pptx;
+ u8 pprx;
int disable_mcast_ref[MLX4_MAX_PORTS + 1];
struct mlx4_resource_tracker res_tracker;
struct workqueue_struct *comm_wq;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index d12ab6a73344..63b1aeae2c03 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -323,6 +323,7 @@ struct mlx4_en_rx_ring {
unsigned long csum_ok;
unsigned long csum_none;
unsigned long csum_complete;
+ unsigned long dropped;
int hwtstamp_rx_filter;
cpumask_var_t affinity_mask;
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 211c65087997..087b23b320cb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -1317,6 +1317,19 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
}
gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
+ /* Slave cannot change Global Pause configuration */
+ if (slave != mlx4_master_func_num(dev) &&
+ ((gen_context->pptx != master->pptx) ||
+ (gen_context->pprx != master->pprx))) {
+ gen_context->pptx = master->pptx;
+ gen_context->pprx = master->pprx;
+ mlx4_warn(dev,
+ "denying Global Pause change for slave:%d\n",
+ slave);
+ } else {
+ master->pptx = gen_context->pptx;
+ master->pprx = gen_context->pprx;
+ }
break;
case MLX4_SET_PORT_GID_TABLE:
/* change to MULTIPLE entries: number of guest's gids
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 1cf722eba607..559d11a443bc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -14,6 +14,7 @@ config MLX5_CORE_EN
bool "Mellanox Technologies ConnectX-4 Ethernet support"
depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE
select PTP_1588_CLOCK
+ select VXLAN if MLX5_CORE=y
default n
---help---
Ethernet support in Mellanox Technologies ConnectX-4 NIC.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 879e6276c473..3881dce0cc30 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -567,6 +567,7 @@ struct mlx5e_priv {
struct mlx5e_vxlan_db vxlan;
struct mlx5e_params params;
+ struct workqueue_struct *wq;
struct work_struct update_carrier_work;
struct work_struct set_rx_mode_work;
struct delayed_work update_stats_work;
@@ -609,7 +610,7 @@ enum mlx5e_link_mode {
MLX5E_100GBASE_KR4 = 22,
MLX5E_100GBASE_LR4 = 23,
MLX5E_100BASE_TX = 24,
- MLX5E_100BASE_T = 25,
+ MLX5E_1000BASE_T = 25,
MLX5E_10GBASE_T = 26,
MLX5E_25GBASE_CR = 27,
MLX5E_25GBASE_KR = 28,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 68834b715f6c..3476ab844634 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -138,10 +138,10 @@ static const struct {
[MLX5E_100BASE_TX] = {
.speed = 100,
},
- [MLX5E_100BASE_T] = {
- .supported = SUPPORTED_100baseT_Full,
- .advertised = ADVERTISED_100baseT_Full,
- .speed = 100,
+ [MLX5E_1000BASE_T] = {
+ .supported = SUPPORTED_1000baseT_Full,
+ .advertised = ADVERTISED_1000baseT_Full,
+ .speed = 1000,
},
[MLX5E_10GBASE_T] = {
.supported = SUPPORTED_10000baseT_Full,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index e0adb604f461..d4dfc5ce516a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -262,9 +262,8 @@ static void mlx5e_update_stats_work(struct work_struct *work)
mutex_lock(&priv->state_lock);
if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
mlx5e_update_stats(priv);
- schedule_delayed_work(dwork,
- msecs_to_jiffies(
- MLX5E_UPDATE_STATS_INTERVAL));
+ queue_delayed_work(priv->wq, dwork,
+ msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL));
}
mutex_unlock(&priv->state_lock);
}
@@ -280,7 +279,7 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
switch (event) {
case MLX5_DEV_EVENT_PORT_UP:
case MLX5_DEV_EVENT_PORT_DOWN:
- schedule_work(&priv->update_carrier_work);
+ queue_work(priv->wq, &priv->update_carrier_work);
break;
default:
@@ -1404,24 +1403,50 @@ static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv)
return 0;
}
-static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
+static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu)
{
- struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
- int hw_mtu;
+ u16 hw_mtu = MLX5E_SW2HW_MTU(mtu);
int err;
- err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(netdev->mtu), 1);
+ err = mlx5_set_port_mtu(mdev, hw_mtu, 1);
if (err)
return err;
- mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
+ /* Update vport context MTU */
+ mlx5_modify_nic_vport_mtu(mdev, hw_mtu);
+ return 0;
+}
+
+static void mlx5e_query_mtu(struct mlx5e_priv *priv, u16 *mtu)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u16 hw_mtu = 0;
+ int err;
- if (MLX5E_HW2SW_MTU(hw_mtu) != netdev->mtu)
- netdev_warn(netdev, "%s: Port MTU %d is different than netdev mtu %d\n",
- __func__, MLX5E_HW2SW_MTU(hw_mtu), netdev->mtu);
+ err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
+ if (err || !hw_mtu) /* fallback to port oper mtu */
+ mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
+
+ *mtu = MLX5E_HW2SW_MTU(hw_mtu);
+}
+
+static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ u16 mtu;
+ int err;
- netdev->mtu = MLX5E_HW2SW_MTU(hw_mtu);
+ err = mlx5e_set_mtu(priv, netdev->mtu);
+ if (err)
+ return err;
+
+ mlx5e_query_mtu(priv, &mtu);
+ if (mtu != netdev->mtu)
+ netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
+ __func__, mtu, netdev->mtu);
+
+ netdev->mtu = mtu;
return 0;
}
@@ -1479,7 +1504,7 @@ int mlx5e_open_locked(struct net_device *netdev)
mlx5e_update_carrier(priv);
mlx5e_timestamp_init(priv);
- schedule_delayed_work(&priv->update_stats_work, 0);
+ queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
return 0;
@@ -1935,7 +1960,7 @@ static void mlx5e_set_rx_mode(struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- schedule_work(&priv->set_rx_mode_work);
+ queue_work(priv->wq, &priv->set_rx_mode_work);
}
static int mlx5e_set_mac(struct net_device *netdev, void *addr)
@@ -1950,7 +1975,7 @@ static int mlx5e_set_mac(struct net_device *netdev, void *addr)
ether_addr_copy(netdev->dev_addr, saddr->sa_data);
netif_addr_unlock_bh(netdev);
- schedule_work(&priv->set_rx_mode_work);
+ queue_work(priv->wq, &priv->set_rx_mode_work);
return 0;
}
@@ -1999,22 +2024,27 @@ static int mlx5e_set_features(struct net_device *netdev,
return err;
}
+#define MXL5_HW_MIN_MTU 64
+#define MXL5E_MIN_MTU (MXL5_HW_MIN_MTU + ETH_FCS_LEN)
+
static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
bool was_opened;
- int max_mtu;
+ u16 max_mtu;
+ u16 min_mtu;
int err = 0;
mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
max_mtu = MLX5E_HW2SW_MTU(max_mtu);
+ min_mtu = MLX5E_HW2SW_MTU(MXL5E_MIN_MTU);
- if (new_mtu > max_mtu) {
+ if (new_mtu > max_mtu || new_mtu < min_mtu) {
netdev_err(netdev,
- "%s: Bad MTU (%d) > (%d) Max\n",
- __func__, new_mtu, max_mtu);
+ "%s: Bad MTU (%d), valid range is: [%d..%d]\n",
+ __func__, new_mtu, min_mtu, max_mtu);
return -EINVAL;
}
@@ -2127,7 +2157,7 @@ static void mlx5e_add_vxlan_port(struct net_device *netdev,
if (!mlx5e_vxlan_allowed(priv->mdev))
return;
- mlx5e_vxlan_add_port(priv, be16_to_cpu(port));
+ mlx5e_vxlan_queue_work(priv, sa_family, be16_to_cpu(port), 1);
}
static void mlx5e_del_vxlan_port(struct net_device *netdev,
@@ -2138,7 +2168,7 @@ static void mlx5e_del_vxlan_port(struct net_device *netdev,
if (!mlx5e_vxlan_allowed(priv->mdev))
return;
- mlx5e_vxlan_del_port(priv, be16_to_cpu(port));
+ mlx5e_vxlan_queue_work(priv, sa_family, be16_to_cpu(port), 0);
}
static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv,
@@ -2467,10 +2497,14 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
priv = netdev_priv(netdev);
+ priv->wq = create_singlethread_workqueue("mlx5e");
+ if (!priv->wq)
+ goto err_free_netdev;
+
err = mlx5_alloc_map_uar(mdev, &priv->cq_uar, false);
if (err) {
mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err);
- goto err_free_netdev;
+ goto err_destroy_wq;
}
err = mlx5_core_alloc_pd(mdev, &priv->pdn);
@@ -2549,7 +2583,7 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
vxlan_get_rx_port(netdev);
mlx5e_enable_async_events(priv);
- schedule_work(&priv->set_rx_mode_work);
+ queue_work(priv->wq, &priv->set_rx_mode_work);
return priv;
@@ -2586,6 +2620,9 @@ err_dealloc_pd:
err_unmap_free_uar:
mlx5_unmap_free_uar(mdev, &priv->cq_uar);
+err_destroy_wq:
+ destroy_workqueue(priv->wq);
+
err_free_netdev:
free_netdev(netdev);
@@ -2599,10 +2636,19 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
set_bit(MLX5E_STATE_DESTROYING, &priv->state);
- schedule_work(&priv->set_rx_mode_work);
+ queue_work(priv->wq, &priv->set_rx_mode_work);
mlx5e_disable_async_events(priv);
- flush_scheduled_work();
- unregister_netdev(netdev);
+ flush_workqueue(priv->wq);
+ if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) {
+ netif_device_detach(netdev);
+ mutex_lock(&priv->state_lock);
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ mlx5e_close_locked(netdev);
+ mutex_unlock(&priv->state_lock);
+ } else {
+ unregister_netdev(netdev);
+ }
+
mlx5e_tc_cleanup(priv);
mlx5e_vxlan_cleanup(priv);
mlx5e_destroy_flow_tables(priv);
@@ -2615,7 +2661,11 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
mlx5_core_dealloc_transport_domain(priv->mdev, priv->tdn);
mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
- free_netdev(netdev);
+ cancel_delayed_work_sync(&priv->update_stats_work);
+ destroy_workqueue(priv->wq);
+
+ if (!test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state))
+ free_netdev(netdev);
}
static void *mlx5e_get_netdev(void *vpriv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 5121be4675d1..89cce97d46c6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1065,33 +1065,6 @@ unlock_fg:
return rule;
}
-static struct mlx5_flow_rule *add_rule_to_auto_fg(struct mlx5_flow_table *ft,
- u8 match_criteria_enable,
- u32 *match_criteria,
- u32 *match_value,
- u8 action,
- u32 flow_tag,
- struct mlx5_flow_destination *dest)
-{
- struct mlx5_flow_rule *rule;
- struct mlx5_flow_group *g;
-
- g = create_autogroup(ft, match_criteria_enable, match_criteria);
- if (IS_ERR(g))
- return (void *)g;
-
- rule = add_rule_fg(g, match_value,
- action, flow_tag, dest);
- if (IS_ERR(rule)) {
- /* Remove assumes refcount > 0 and autogroup creates a group
- * with a refcount = 0.
- */
- tree_get_node(&g->node);
- tree_remove_node(&g->node);
- }
- return rule;
-}
-
static struct mlx5_flow_rule *
_mlx5_add_flow_rule(struct mlx5_flow_table *ft,
u8 match_criteria_enable,
@@ -1119,8 +1092,23 @@ _mlx5_add_flow_rule(struct mlx5_flow_table *ft,
goto unlock;
}
- rule = add_rule_to_auto_fg(ft, match_criteria_enable, match_criteria,
- match_value, action, flow_tag, dest);
+ g = create_autogroup(ft, match_criteria_enable, match_criteria);
+ if (IS_ERR(g)) {
+ rule = (void *)g;
+ goto unlock;
+ }
+
+ rule = add_rule_fg(g, match_value,
+ action, flow_tag, dest);
+ if (IS_ERR(rule)) {
+ /* Remove assumes refcount > 0 and autogroup creates a group
+ * with a refcount = 0.
+ */
+ unlock_ref_node(&ft->node);
+ tree_get_node(&g->node);
+ tree_remove_node(&g->node);
+ return rule;
+ }
unlock:
unlock_ref_node(&ft->node);
return rule;
@@ -1288,7 +1276,7 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
{
struct mlx5_flow_root_namespace *root_ns = dev->priv.root_ns;
int prio;
- static struct fs_prio *fs_prio;
+ struct fs_prio *fs_prio;
struct mlx5_flow_namespace *ns;
if (!root_ns)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 3f3b2fae4991..6892746fd10d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -966,7 +966,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
int err;
mutex_lock(&dev->intf_state_mutex);
- if (dev->interface_state == MLX5_INTERFACE_STATE_UP) {
+ if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
dev_warn(&dev->pdev->dev, "%s: interface is up, NOP\n",
__func__);
goto out;
@@ -1133,7 +1133,8 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
if (err)
pr_info("failed request module on %s\n", MLX5_IB_MOD);
- dev->interface_state = MLX5_INTERFACE_STATE_UP;
+ clear_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state);
+ set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
out:
mutex_unlock(&dev->intf_state_mutex);
@@ -1207,7 +1208,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
}
mutex_lock(&dev->intf_state_mutex);
- if (dev->interface_state == MLX5_INTERFACE_STATE_DOWN) {
+ if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) {
dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n",
__func__);
goto out;
@@ -1241,7 +1242,8 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
mlx5_cmd_cleanup(dev);
out:
- dev->interface_state = MLX5_INTERFACE_STATE_DOWN;
+ clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
+ set_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state);
mutex_unlock(&dev->intf_state_mutex);
return err;
}
@@ -1452,6 +1454,18 @@ static const struct pci_error_handlers mlx5_err_handler = {
.resume = mlx5_pci_resume
};
+static void shutdown(struct pci_dev *pdev)
+{
+ struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+ struct mlx5_priv *priv = &dev->priv;
+
+ dev_info(&pdev->dev, "Shutdown was called\n");
+ /* Notify mlx5 clients that the kernel is being shut down */
+ set_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &dev->intf_state);
+ mlx5_unload_one(dev, priv);
+ mlx5_pci_disable_device(dev);
+}
+
static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x1011) }, /* Connect-IB */
{ PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF}, /* Connect-IB VF */
@@ -1459,6 +1473,8 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4 VF */
{ PCI_VDEVICE(MELLANOX, 0x1015) }, /* ConnectX-4LX */
{ PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4LX VF */
+ { PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5 */
+ { PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 VF */
{ 0, }
};
@@ -1469,6 +1485,7 @@ static struct pci_driver mlx5_core_driver = {
.id_table = mlx5_core_pci_table,
.probe = init_one,
.remove = remove_one,
+ .shutdown = shutdown,
.err_handler = &mlx5_err_handler,
.sriov_configure = mlx5_core_sriov_configure,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index ae378c575deb..53cc1e2c693b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -247,8 +247,8 @@ int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
}
EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status);
-static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu,
- int *max_mtu, int *oper_mtu, u8 port)
+static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, u16 *admin_mtu,
+ u16 *max_mtu, u16 *oper_mtu, u8 port)
{
u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
@@ -268,7 +268,7 @@ static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu,
*admin_mtu = MLX5_GET(pmtu_reg, out, admin_mtu);
}
-int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port)
+int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port)
{
u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
@@ -283,14 +283,14 @@ int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port)
}
EXPORT_SYMBOL_GPL(mlx5_set_port_mtu);
-void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu,
+void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu,
u8 port)
{
mlx5_query_port_mtu(dev, NULL, max_mtu, NULL, port);
}
EXPORT_SYMBOL_GPL(mlx5_query_port_max_mtu);
-void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
+void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
u8 port)
{
mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu, port);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index 8ba080e441a1..5ff8af472bf5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -269,8 +269,10 @@ EXPORT_SYMBOL(mlx5_alloc_map_uar);
void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
{
- iounmap(uar->map);
- iounmap(uar->bf_map);
+ if (uar->map)
+ iounmap(uar->map);
+ else
+ iounmap(uar->bf_map);
mlx5_cmd_free_uar(mdev, uar->index);
}
EXPORT_SYMBOL(mlx5_unmap_free_uar);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index bd518405859e..b69dadcfb897 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -196,6 +196,46 @@ int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
}
EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_address);
+int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
+{
+ int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+ u32 *out;
+ int err;
+
+ out = mlx5_vzalloc(outlen);
+ if (!out)
+ return -ENOMEM;
+
+ err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
+ if (!err)
+ *mtu = MLX5_GET(query_nic_vport_context_out, out,
+ nic_vport_context.mtu);
+
+ kvfree(out);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mtu);
+
+int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu)
+{
+ int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
+ void *in;
+ int err;
+
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
+ MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
+
+ err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+
+ kvfree(in);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mtu);
+
int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
u32 vport,
enum mlx5_list_type list_type,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
index 9f10df25f3cd..f2fd1ef16da7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
@@ -95,21 +95,22 @@ struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port)
return vxlan;
}
-int mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port)
+static void mlx5e_vxlan_add_port(struct work_struct *work)
{
+ struct mlx5e_vxlan_work *vxlan_work =
+ container_of(work, struct mlx5e_vxlan_work, work);
+ struct mlx5e_priv *priv = vxlan_work->priv;
struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
+ u16 port = vxlan_work->port;
struct mlx5e_vxlan *vxlan;
int err;
- err = mlx5e_vxlan_core_add_port_cmd(priv->mdev, port);
- if (err)
- return err;
+ if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port))
+ goto free_work;
vxlan = kzalloc(sizeof(*vxlan), GFP_KERNEL);
- if (!vxlan) {
- err = -ENOMEM;
+ if (!vxlan)
goto err_delete_port;
- }
vxlan->udp_port = port;
@@ -119,13 +120,14 @@ int mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port)
if (err)
goto err_free;
- return 0;
+ goto free_work;
err_free:
kfree(vxlan);
err_delete_port:
mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
- return err;
+free_work:
+ kfree(vxlan_work);
}
static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port)
@@ -145,12 +147,36 @@ static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port)
kfree(vxlan);
}
-void mlx5e_vxlan_del_port(struct mlx5e_priv *priv, u16 port)
+static void mlx5e_vxlan_del_port(struct work_struct *work)
{
- if (!mlx5e_vxlan_lookup_port(priv, port))
- return;
+ struct mlx5e_vxlan_work *vxlan_work =
+ container_of(work, struct mlx5e_vxlan_work, work);
+ struct mlx5e_priv *priv = vxlan_work->priv;
+ u16 port = vxlan_work->port;
__mlx5e_vxlan_core_del_port(priv, port);
+
+ kfree(vxlan_work);
+}
+
+void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family,
+ u16 port, int add)
+{
+ struct mlx5e_vxlan_work *vxlan_work;
+
+ vxlan_work = kmalloc(sizeof(*vxlan_work), GFP_ATOMIC);
+ if (!vxlan_work)
+ return;
+
+ if (add)
+ INIT_WORK(&vxlan_work->work, mlx5e_vxlan_add_port);
+ else
+ INIT_WORK(&vxlan_work->work, mlx5e_vxlan_del_port);
+
+ vxlan_work->priv = priv;
+ vxlan_work->port = port;
+ vxlan_work->sa_family = sa_family;
+ queue_work(priv->wq, &vxlan_work->work);
}
void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
index a01685056ab1..129f3527aa14 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
@@ -39,6 +39,13 @@ struct mlx5e_vxlan {
u16 udp_port;
};
+struct mlx5e_vxlan_work {
+ struct work_struct work;
+ struct mlx5e_priv *priv;
+ sa_family_t sa_family;
+ u16 port;
+};
+
static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev)
{
return (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) &&
@@ -46,8 +53,8 @@ static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev)
}
void mlx5e_vxlan_init(struct mlx5e_priv *priv);
-int mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port);
-void mlx5e_vxlan_del_port(struct mlx5e_priv *priv, u16 port);
+void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family,
+ u16 port, int add);
struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port);
void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv);
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 270c9eeb7ab6..6d1a956e3f77 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -2668,9 +2668,9 @@ static int myri10ge_close(struct net_device *dev)
del_timer_sync(&mgp->watchdog_timer);
mgp->running = MYRI10GE_ETH_STOPPING;
- local_bh_disable(); /* myri10ge_ss_lock_napi needs bh disabled */
for (i = 0; i < mgp->num_slices; i++) {
napi_disable(&mgp->ss[i].napi);
+ local_bh_disable(); /* myri10ge_ss_lock_napi needs this */
/* Lock the slice to prevent the busy_poll handler from
* accessing it. Later when we bring the NIC up, myri10ge_open
* resets the slice including this lock.
@@ -2679,8 +2679,8 @@ static int myri10ge_close(struct net_device *dev)
pr_info("Slice %d locked\n", i);
mdelay(1);
}
+ local_bh_enable();
}
- local_bh_enable();
netif_carrier_off(dev);
netif_tx_stop_all_queues(dev);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 518af329502d..7869465435fa 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -750,6 +750,12 @@ static bool qede_has_tx_work(struct qede_fastpath *fp)
return false;
}
+static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
+{
+ qed_chain_consume(&rxq->rx_bd_ring);
+ rxq->sw_rx_cons++;
+}
+
/* This function reuses the buffer(from an offset) from
* consumer index to producer index in the bd ring
*/
@@ -773,6 +779,21 @@ static inline void qede_reuse_page(struct qede_dev *edev,
curr_cons->data = NULL;
}
+/* In case of allocation failures reuse buffers
+ * from consumer index to produce buffers for firmware
+ */
+static void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
+ struct qede_dev *edev, u8 count)
+{
+ struct sw_rx_data *curr_cons;
+
+ for (; count > 0; count--) {
+ curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
+ qede_reuse_page(edev, rxq, curr_cons);
+ qede_rx_bd_ring_consume(rxq);
+ }
+}
+
static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
struct qede_rx_queue *rxq,
struct sw_rx_data *curr_cons)
@@ -781,8 +802,14 @@ static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
curr_cons->page_offset += rxq->rx_buf_seg_size;
if (curr_cons->page_offset == PAGE_SIZE) {
- if (unlikely(qede_alloc_rx_buffer(edev, rxq)))
+ if (unlikely(qede_alloc_rx_buffer(edev, rxq))) {
+ /* Since we failed to allocate new buffer
+ * current buffer can be used again.
+ */
+ curr_cons->page_offset -= rxq->rx_buf_seg_size;
+
return -ENOMEM;
+ }
dma_unmap_page(&edev->pdev->dev, curr_cons->mapping,
PAGE_SIZE, DMA_FROM_DEVICE);
@@ -901,7 +928,10 @@ static int qede_fill_frag_skb(struct qede_dev *edev,
len_on_bd);
if (unlikely(qede_realloc_rx_buffer(edev, rxq, current_bd))) {
- tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
+ /* Incr page ref count to reuse on allocation failure
+ * so that it doesn't get freed while freeing SKB.
+ */
+ atomic_inc(&current_bd->data->_count);
goto out;
}
@@ -915,6 +945,8 @@ static int qede_fill_frag_skb(struct qede_dev *edev,
return 0;
out:
+ tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
+ qede_recycle_rx_bd_ring(rxq, edev, 1);
return -ENOMEM;
}
@@ -966,8 +998,9 @@ static void qede_tpa_start(struct qede_dev *edev,
tpa_info->skb = netdev_alloc_skb(edev->ndev,
le16_to_cpu(cqe->len_on_first_bd));
if (unlikely(!tpa_info->skb)) {
+ DP_NOTICE(edev, "Failed to allocate SKB for gro\n");
tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
- return;
+ goto cons_buf;
}
skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd));
@@ -990,6 +1023,7 @@ static void qede_tpa_start(struct qede_dev *edev,
/* This is needed in order to enable forwarding support */
qede_set_gro_params(edev, tpa_info->skb, cqe);
+cons_buf: /* We still need to handle bd_len_list to consume buffers */
if (likely(cqe->ext_bd_len_list[0]))
qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
le16_to_cpu(cqe->ext_bd_len_list[0]));
@@ -1007,7 +1041,6 @@ static void qede_gro_ip_csum(struct sk_buff *skb)
const struct iphdr *iph = ip_hdr(skb);
struct tcphdr *th;
- skb_set_network_header(skb, 0);
skb_set_transport_header(skb, sizeof(struct iphdr));
th = tcp_hdr(skb);
@@ -1022,7 +1055,6 @@ static void qede_gro_ipv6_csum(struct sk_buff *skb)
struct ipv6hdr *iph = ipv6_hdr(skb);
struct tcphdr *th;
- skb_set_network_header(skb, 0);
skb_set_transport_header(skb, sizeof(struct ipv6hdr));
th = tcp_hdr(skb);
@@ -1037,8 +1069,21 @@ static void qede_gro_receive(struct qede_dev *edev,
struct sk_buff *skb,
u16 vlan_tag)
{
+ /* FW can send a single MTU sized packet from gro flow
+ * due to aggregation timeout/last segment etc. which
+ * is not expected to be a gro packet. If a skb has zero
+ * frags then simply push it in the stack as non gso skb.
+ */
+ if (unlikely(!skb->data_len)) {
+ skb_shinfo(skb)->gso_type = 0;
+ skb_shinfo(skb)->gso_size = 0;
+ goto send_skb;
+ }
+
#ifdef CONFIG_INET
if (skb_shinfo(skb)->gso_size) {
+ skb_set_network_header(skb, 0);
+
switch (skb->protocol) {
case htons(ETH_P_IP):
qede_gro_ip_csum(skb);
@@ -1053,6 +1098,8 @@ static void qede_gro_receive(struct qede_dev *edev,
}
}
#endif
+
+send_skb:
skb_record_rx_queue(skb, fp->rss_id);
qede_skb_receive(edev, fp, skb, vlan_tag);
}
@@ -1244,17 +1291,17 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
"CQE in CONS = %u has error, flags = %x, dropping incoming packet\n",
sw_comp_cons, parse_flag);
rxq->rx_hw_errors++;
- qede_reuse_page(edev, rxq, sw_rx_data);
- goto next_rx;
+ qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
+ goto next_cqe;
}
skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
if (unlikely(!skb)) {
DP_NOTICE(edev,
"Build_skb failed, dropping incoming packet\n");
- qede_reuse_page(edev, rxq, sw_rx_data);
+ qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
rxq->rx_alloc_errors++;
- goto next_rx;
+ goto next_cqe;
}
/* Copy data into SKB */
@@ -1288,11 +1335,22 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
if (unlikely(qede_realloc_rx_buffer(edev, rxq,
sw_rx_data))) {
DP_ERR(edev, "Failed to allocate rx buffer\n");
+ /* Incr page ref count to reuse on allocation
+ * failure so that it doesn't get freed while
+ * freeing SKB.
+ */
+
+ atomic_inc(&sw_rx_data->data->_count);
rxq->rx_alloc_errors++;
+ qede_recycle_rx_bd_ring(rxq, edev,
+ fp_cqe->bd_num);
+ dev_kfree_skb_any(skb);
goto next_cqe;
}
}
+ qede_rx_bd_ring_consume(rxq);
+
if (fp_cqe->bd_num != 1) {
u16 pkt_len = le16_to_cpu(fp_cqe->pkt_len);
u8 num_frags;
@@ -1303,18 +1361,27 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
num_frags--) {
u16 cur_size = pkt_len > rxq->rx_buf_size ?
rxq->rx_buf_size : pkt_len;
+ if (unlikely(!cur_size)) {
+ DP_ERR(edev,
+ "Still got %d BDs for mapping jumbo, but length became 0\n",
+ num_frags);
+ qede_recycle_rx_bd_ring(rxq, edev,
+ num_frags);
+ dev_kfree_skb_any(skb);
+ goto next_cqe;
+ }
- WARN_ONCE(!cur_size,
- "Still got %d BDs for mapping jumbo, but length became 0\n",
- num_frags);
-
- if (unlikely(qede_alloc_rx_buffer(edev, rxq)))
+ if (unlikely(qede_alloc_rx_buffer(edev, rxq))) {
+ qede_recycle_rx_bd_ring(rxq, edev,
+ num_frags);
+ dev_kfree_skb_any(skb);
goto next_cqe;
+ }
- rxq->sw_rx_cons++;
sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
- qed_chain_consume(&rxq->rx_bd_ring);
+ qede_rx_bd_ring_consume(rxq);
+
dma_unmap_page(&edev->pdev->dev,
sw_rx_data->mapping,
PAGE_SIZE, DMA_FROM_DEVICE);
@@ -1330,7 +1397,7 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
pkt_len -= cur_size;
}
- if (pkt_len)
+ if (unlikely(pkt_len))
DP_ERR(edev,
"Mapped all BDs of jumbo, but still have %d bytes\n",
pkt_len);
@@ -1349,10 +1416,6 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
skb_record_rx_queue(skb, fp->rss_id);
qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag));
-
- qed_chain_consume(&rxq->rx_bd_ring);
-next_rx:
- rxq->sw_rx_cons++;
next_rx_only:
rx_pkt++;
@@ -2257,7 +2320,7 @@ static void qede_free_sge_mem(struct qede_dev *edev,
struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
- if (replace_buf) {
+ if (replace_buf->data) {
dma_unmap_page(&edev->pdev->dev,
dma_unmap_addr(replace_buf, mapping),
PAGE_SIZE, DMA_FROM_DEVICE);
@@ -2377,7 +2440,7 @@ err:
static int qede_alloc_mem_rxq(struct qede_dev *edev,
struct qede_rx_queue *rxq)
{
- int i, rc, size, num_allocated;
+ int i, rc, size;
rxq->num_rx_buffers = edev->q_num_rx_buffers;
@@ -2394,6 +2457,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev,
rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
if (!rxq->sw_rx_ring) {
DP_ERR(edev, "Rx buffers ring allocation failed\n");
+ rc = -ENOMEM;
goto err;
}
@@ -2421,26 +2485,16 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev,
/* Allocate buffers for the Rx ring */
for (i = 0; i < rxq->num_rx_buffers; i++) {
rc = qede_alloc_rx_buffer(edev, rxq);
- if (rc)
- break;
- }
- num_allocated = i;
- if (!num_allocated) {
- DP_ERR(edev, "Rx buffers allocation failed\n");
- goto err;
- } else if (num_allocated < rxq->num_rx_buffers) {
- DP_NOTICE(edev,
- "Allocated less buffers than desired (%d allocated)\n",
- num_allocated);
+ if (rc) {
+ DP_ERR(edev,
+ "Rx buffers allocation failed at index %d\n", i);
+ goto err;
+ }
}
- qede_alloc_sge_mem(edev, rxq);
-
- return 0;
-
+ rc = qede_alloc_sge_mem(edev, rxq);
err:
- qede_free_mem_rxq(edev, rxq);
- return -ENOMEM;
+ return rc;
}
static void qede_free_mem_txq(struct qede_dev *edev,
@@ -2523,10 +2577,8 @@ static int qede_alloc_mem_fp(struct qede_dev *edev,
}
return 0;
-
err:
- qede_free_mem_fp(edev, fp);
- return -ENOMEM;
+ return rc;
}
static void qede_free_mem_load(struct qede_dev *edev)
@@ -2549,22 +2601,13 @@ static int qede_alloc_mem_load(struct qede_dev *edev)
struct qede_fastpath *fp = &edev->fp_array[rss_id];
rc = qede_alloc_mem_fp(edev, fp);
- if (rc)
- break;
- }
-
- if (rss_id != QEDE_RSS_CNT(edev)) {
- /* Failed allocating memory for all the queues */
- if (!rss_id) {
+ if (rc) {
DP_ERR(edev,
- "Failed to allocate memory for the leading queue\n");
- rc = -ENOMEM;
- } else {
- DP_NOTICE(edev,
- "Failed to allocate memory for all of RSS queues\n Desired: %d queues, allocated: %d queues\n",
- QEDE_RSS_CNT(edev), rss_id);
+ "Failed to allocate memory for fastpath - rss id = %d\n",
+ rss_id);
+ qede_free_mem_load(edev);
+ return rc;
}
- edev->num_rss = rss_id;
}
return 0;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 55007f1e6bbc..caf6ddb7ea76 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -37,8 +37,8 @@
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 3
-#define _QLCNIC_LINUX_SUBVERSION 63
-#define QLCNIC_LINUX_VERSIONID "5.3.63"
+#define _QLCNIC_LINUX_SUBVERSION 64
+#define QLCNIC_LINUX_VERSIONID "5.3.64"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 087e14a3fba7..9e2a0bd8f5a8 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1691,6 +1691,9 @@ static int ravb_set_gti(struct net_device *ndev)
rate = clk_get_rate(clk);
clk_put(clk);
+ if (!rate)
+ return -EINVAL;
+
inc = 1000000000ULL << 20;
do_div(inc, rate);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 004e2d7560fd..ceea74cc2229 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2194,17 +2194,13 @@ static int sh_eth_set_ringparam(struct net_device *ndev,
__func__);
return ret;
}
- ret = sh_eth_dev_init(ndev, false);
+ ret = sh_eth_dev_init(ndev, true);
if (ret < 0) {
netdev_err(ndev, "%s: sh_eth_dev_init failed.\n",
__func__);
return ret;
}
- mdp->irq_enabled = true;
- sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
- /* Setting the Rx mode will start the Rx process. */
- sh_eth_write(ndev, EDRRR_R, EDRRR);
netif_device_attach(ndev);
}
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 98d33d462c6c..1681084cc96f 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -1920,6 +1920,10 @@ static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context,
return 0;
}
+ if (nic_data->datapath_caps &
+ 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN)
+ return -EOPNOTSUPP;
+
MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
nic_data->vport_id);
MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type);
@@ -2923,9 +2927,16 @@ static void efx_ef10_filter_push_prep(struct efx_nic *efx,
bool replacing)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ u32 flags = spec->flags;
memset(inbuf, 0, MC_CMD_FILTER_OP_IN_LEN);
+ /* Remove RSS flag if we don't have an RSS context. */
+ if (flags & EFX_FILTER_FLAG_RX_RSS &&
+ spec->rss_context == EFX_FILTER_RSS_CONTEXT_DEFAULT &&
+ nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID)
+ flags &= ~EFX_FILTER_FLAG_RX_RSS;
+
if (replacing) {
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
MC_CMD_FILTER_OP_IN_OP_REPLACE);
@@ -2985,10 +2996,10 @@ static void efx_ef10_filter_push_prep(struct efx_nic *efx,
spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
0 : spec->dmaq_id);
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE,
- (spec->flags & EFX_FILTER_FLAG_RX_RSS) ?
+ (flags & EFX_FILTER_FLAG_RX_RSS) ?
MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE);
- if (spec->flags & EFX_FILTER_FLAG_RX_RSS)
+ if (flags & EFX_FILTER_FLAG_RX_RSS)
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT,
spec->rss_context !=
EFX_FILTER_RSS_CONTEXT_DEFAULT ?
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index f0d797ab74d8..afb90d129cb6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -34,6 +34,9 @@
#define SYSMGR_EMACGRP_CTRL_PHYSEL_MASK 0x00000003
#define SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK 0x00000010
+#define SYSMGR_FPGAGRP_MODULE_REG 0x00000028
+#define SYSMGR_FPGAGRP_MODULE_EMAC 0x00000004
+
#define EMAC_SPLITTER_CTRL_REG 0x0
#define EMAC_SPLITTER_CTRL_SPEED_MASK 0x3
#define EMAC_SPLITTER_CTRL_SPEED_10 0x2
@@ -46,7 +49,6 @@ struct socfpga_dwmac {
u32 reg_shift;
struct device *dev;
struct regmap *sys_mgr_base_addr;
- struct reset_control *stmmac_rst;
void __iomem *splitter_base;
bool f2h_ptp_ref_clk;
};
@@ -89,15 +91,6 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *
struct device_node *np_splitter;
struct resource res_splitter;
- dwmac->stmmac_rst = devm_reset_control_get(dev,
- STMMAC_RESOURCE_NAME);
- if (IS_ERR(dwmac->stmmac_rst)) {
- dev_info(dev, "Could not get reset control!\n");
- if (PTR_ERR(dwmac->stmmac_rst) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- dwmac->stmmac_rst = NULL;
- }
-
dwmac->interface = of_get_phy_mode(np);
sys_mgr_base_addr = syscon_regmap_lookup_by_phandle(np, "altr,sysmgr-syscon");
@@ -148,7 +141,7 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac)
int phymode = dwmac->interface;
u32 reg_offset = dwmac->reg_offset;
u32 reg_shift = dwmac->reg_shift;
- u32 ctrl, val;
+ u32 ctrl, val, module;
switch (phymode) {
case PHY_INTERFACE_MODE_RGMII:
@@ -175,39 +168,39 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac)
ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift);
ctrl |= val << reg_shift;
- if (dwmac->f2h_ptp_ref_clk)
+ if (dwmac->f2h_ptp_ref_clk) {
ctrl |= SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2);
- else
+ regmap_read(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG,
+ &module);
+ module |= (SYSMGR_FPGAGRP_MODULE_EMAC << (reg_shift / 2));
+ regmap_write(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG,
+ module);
+ } else {
ctrl &= ~(SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2));
+ }
regmap_write(sys_mgr_base_addr, reg_offset, ctrl);
- return 0;
-}
-
-static void socfpga_dwmac_exit(struct platform_device *pdev, void *priv)
-{
- struct socfpga_dwmac *dwmac = priv;
- /* On socfpga platform exit, assert and hold reset to the
- * enet controller - the default state after a hard reset.
- */
- if (dwmac->stmmac_rst)
- reset_control_assert(dwmac->stmmac_rst);
+ return 0;
}
static int socfpga_dwmac_init(struct platform_device *pdev, void *priv)
{
- struct socfpga_dwmac *dwmac = priv;
+ struct socfpga_dwmac *dwmac = priv;
struct net_device *ndev = platform_get_drvdata(pdev);
struct stmmac_priv *stpriv = NULL;
int ret = 0;
- if (ndev)
- stpriv = netdev_priv(ndev);
+ if (!ndev)
+ return -EINVAL;
+
+ stpriv = netdev_priv(ndev);
+ if (!stpriv)
+ return -EINVAL;
/* Assert reset to the enet controller before changing the phy mode */
- if (dwmac->stmmac_rst)
- reset_control_assert(dwmac->stmmac_rst);
+ if (stpriv->stmmac_rst)
+ reset_control_assert(stpriv->stmmac_rst);
/* Setup the phy mode in the system manager registers according to
* devicetree configuration
@@ -217,8 +210,8 @@ static int socfpga_dwmac_init(struct platform_device *pdev, void *priv)
/* Deassert reset for the phy configuration to be sampled by
* the enet controller, and operation to start in requested mode
*/
- if (dwmac->stmmac_rst)
- reset_control_deassert(dwmac->stmmac_rst);
+ if (stpriv->stmmac_rst)
+ reset_control_deassert(stpriv->stmmac_rst);
/* Before the enet controller is suspended, the phy is suspended.
* This causes the phy clock to be gated. The enet controller is
@@ -235,7 +228,7 @@ static int socfpga_dwmac_init(struct platform_device *pdev, void *priv)
* control register 0, and can be modified by the phy driver
* framework.
*/
- if (stpriv && stpriv->phydev)
+ if (stpriv->phydev)
phy_resume(stpriv->phydev);
return ret;
@@ -275,14 +268,13 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
plat_dat->bsp_priv = dwmac;
plat_dat->init = socfpga_dwmac_init;
- plat_dat->exit = socfpga_dwmac_exit;
plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed;
- ret = socfpga_dwmac_init(pdev, plat_dat->bsp_priv);
- if (ret)
- return ret;
+ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ if (!ret)
+ ret = socfpga_dwmac_init(pdev, dwmac);
- return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ return ret;
}
static const struct of_device_id socfpga_dwmac_match[] = {
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 42fdfd4d9d4f..e2fcdf1eec44 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -367,7 +367,6 @@ struct cpsw_priv {
spinlock_t lock;
struct platform_device *pdev;
struct net_device *ndev;
- struct device_node *phy_node;
struct napi_struct napi_rx;
struct napi_struct napi_tx;
struct device *dev;
@@ -1148,25 +1147,34 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
- if (priv->phy_node)
- slave->phy = of_phy_connect(priv->ndev, priv->phy_node,
+ if (slave->data->phy_node) {
+ slave->phy = of_phy_connect(priv->ndev, slave->data->phy_node,
&cpsw_adjust_link, 0, slave->data->phy_if);
- else
+ if (!slave->phy) {
+ dev_err(priv->dev, "phy \"%s\" not found on slave %d\n",
+ slave->data->phy_node->full_name,
+ slave->slave_num);
+ return;
+ }
+ } else {
slave->phy = phy_connect(priv->ndev, slave->data->phy_id,
&cpsw_adjust_link, slave->data->phy_if);
- if (IS_ERR(slave->phy)) {
- dev_err(priv->dev, "phy %s not found on slave %d\n",
- slave->data->phy_id, slave->slave_num);
- slave->phy = NULL;
- } else {
- phy_attached_info(slave->phy);
+ if (IS_ERR(slave->phy)) {
+ dev_err(priv->dev,
+ "phy \"%s\" not found on slave %d, err %ld\n",
+ slave->data->phy_id, slave->slave_num,
+ PTR_ERR(slave->phy));
+ slave->phy = NULL;
+ return;
+ }
+ }
- phy_start(slave->phy);
+ phy_attached_info(slave->phy);
- /* Configure GMII_SEL register */
- cpsw_phy_sel(&priv->pdev->dev, slave->phy->interface,
- slave->slave_num);
- }
+ phy_start(slave->phy);
+
+ /* Configure GMII_SEL register */
+ cpsw_phy_sel(&priv->pdev->dev, slave->phy->interface, slave->slave_num);
}
static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
@@ -1251,12 +1259,12 @@ static int cpsw_ndo_open(struct net_device *ndev)
int i, ret;
u32 reg;
+ pm_runtime_get_sync(&priv->pdev->dev);
+
if (!cpsw_common_res_usage_state(priv))
cpsw_intr_disable(priv);
netif_carrier_off(ndev);
- pm_runtime_get_sync(&priv->pdev->dev);
-
reg = priv->version;
dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n",
@@ -1940,12 +1948,11 @@ static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
slave->port_vlan = data->dual_emac_res_vlan;
}
-static int cpsw_probe_dt(struct cpsw_priv *priv,
+static int cpsw_probe_dt(struct cpsw_platform_data *data,
struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
struct device_node *slave_node;
- struct cpsw_platform_data *data = &priv->data;
int i = 0, ret;
u32 prop;
@@ -2033,25 +2040,21 @@ static int cpsw_probe_dt(struct cpsw_priv *priv,
if (strcmp(slave_node->name, "slave"))
continue;
- priv->phy_node = of_parse_phandle(slave_node, "phy-handle", 0);
+ slave_data->phy_node = of_parse_phandle(slave_node,
+ "phy-handle", 0);
parp = of_get_property(slave_node, "phy_id", &lenp);
- if (of_phy_is_fixed_link(slave_node)) {
- struct device_node *phy_node;
- struct phy_device *phy_dev;
-
+ if (slave_data->phy_node) {
+ dev_dbg(&pdev->dev,
+ "slave[%d] using phy-handle=\"%s\"\n",
+ i, slave_data->phy_node->full_name);
+ } else if (of_phy_is_fixed_link(slave_node)) {
/* In the case of a fixed PHY, the DT node associated
* to the PHY is the Ethernet MAC DT node.
*/
ret = of_phy_register_fixed_link(slave_node);
if (ret)
return ret;
- phy_node = of_node_get(slave_node);
- phy_dev = of_phy_find_device(phy_node);
- if (!phy_dev)
- return -ENODEV;
- snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
- PHY_ID_FMT, phy_dev->mdio.bus->id,
- phy_dev->mdio.addr);
+ slave_data->phy_node = of_node_get(slave_node);
} else if (parp) {
u32 phyid;
struct device_node *mdio_node;
@@ -2072,7 +2075,9 @@ static int cpsw_probe_dt(struct cpsw_priv *priv,
snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
PHY_ID_FMT, mdio->name, phyid);
} else {
- dev_err(&pdev->dev, "No slave[%d] phy_id or fixed-link property\n", i);
+ dev_err(&pdev->dev,
+ "No slave[%d] phy_id, phy-handle, or fixed-link property\n",
+ i);
goto no_phy_slave;
}
slave_data->phy_if = of_get_phy_mode(slave_node);
@@ -2275,7 +2280,7 @@ static int cpsw_probe(struct platform_device *pdev)
/* Select default pin state */
pinctrl_pm_select_default_state(&pdev->dev);
- if (cpsw_probe_dt(priv, pdev)) {
+ if (cpsw_probe_dt(&priv->data, pdev)) {
dev_err(&pdev->dev, "cpsw: platform data missing\n");
ret = -ENODEV;
goto clean_runtime_disable_ret;
diff --git a/drivers/net/ethernet/ti/cpsw.h b/drivers/net/ethernet/ti/cpsw.h
index 442a7038e660..e50afd1b2eda 100644
--- a/drivers/net/ethernet/ti/cpsw.h
+++ b/drivers/net/ethernet/ti/cpsw.h
@@ -18,6 +18,7 @@
#include <linux/phy.h>
struct cpsw_slave_data {
+ struct device_node *phy_node;
char phy_id[MII_BUS_ID_SIZE];
int phy_if;
u8 mac_addr[ETH_ALEN];
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 5d9abedd6b75..f56d66e6ec15 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1512,7 +1512,10 @@ static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
/* TODO: Add phy read and write and private statistics get feature */
- return phy_mii_ioctl(priv->phydev, ifrq, cmd);
+ if (priv->phydev)
+ return phy_mii_ioctl(priv->phydev, ifrq, cmd);
+ else
+ return -EOPNOTSUPP;
}
static int match_first_device(struct device *dev, void *data)
@@ -1878,8 +1881,6 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
pdata->hw_ram_addr = auxdata->hw_ram_addr;
}
- pdev->dev.platform_data = pdata;
-
return pdata;
}
@@ -2101,6 +2102,7 @@ static int davinci_emac_remove(struct platform_device *pdev)
cpdma_ctlr_destroy(priv->dma);
unregister_netdev(ndev);
+ pm_runtime_disable(&pdev->dev);
free_netdev(ndev);
return 0;
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
index 13214a6492ac..743b18266a7c 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
@@ -1622,7 +1622,7 @@ static void gelic_wl_scan_complete_event(struct gelic_wl_info *wl)
continue;
/* copy hw scan info */
- memcpy(target->hwinfo, scan_info, scan_info->size);
+ memcpy(target->hwinfo, scan_info, be16_to_cpu(scan_info->size));
target->essid_len = strnlen(scan_info->essid,
sizeof(scan_info->essid));
target->rate_len = 0;
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 84d3e5ca8817..c6385617bfb2 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -880,12 +880,12 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
macsec_skb_cb(skb)->valid = false;
skb = skb_share_check(skb, GFP_ATOMIC);
if (!skb)
- return NULL;
+ return ERR_PTR(-ENOMEM);
req = aead_request_alloc(rx_sa->key.tfm, GFP_ATOMIC);
if (!req) {
kfree_skb(skb);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
hdr = (struct macsec_eth_header *)skb->data;
@@ -905,7 +905,7 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
skb = skb_unshare(skb, GFP_ATOMIC);
if (!skb) {
aead_request_free(req);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
} else {
/* integrity only: all headers + data authenticated */
@@ -921,14 +921,14 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
dev_hold(dev);
ret = crypto_aead_decrypt(req);
if (ret == -EINPROGRESS) {
- return NULL;
+ return ERR_PTR(ret);
} else if (ret != 0) {
/* decryption/authentication failed
* 10.6 if validateFrames is disabled, deliver anyway
*/
if (ret != -EBADMSG) {
kfree_skb(skb);
- skb = NULL;
+ skb = ERR_PTR(ret);
}
} else {
macsec_skb_cb(skb)->valid = true;
@@ -1146,8 +1146,10 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
secy->validate_frames != MACSEC_VALIDATE_DISABLED)
skb = macsec_decrypt(skb, dev, rx_sa, sci, secy);
- if (!skb) {
- macsec_rxsa_put(rx_sa);
+ if (IS_ERR(skb)) {
+ /* the decrypt callback needs the reference */
+ if (PTR_ERR(skb) != -EINPROGRESS)
+ macsec_rxsa_put(rx_sa);
rcu_read_unlock();
*pskb = NULL;
return RX_HANDLER_CONSUMED;
@@ -1161,7 +1163,8 @@ deliver:
macsec_extra_len(macsec_skb_cb(skb)->has_sci));
macsec_reset_skb(skb, secy->netdev);
- macsec_rxsa_put(rx_sa);
+ if (rx_sa)
+ macsec_rxsa_put(rx_sa);
count_rx(dev, skb->len);
rcu_read_unlock();
@@ -1622,8 +1625,9 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
}
rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL);
- if (init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len,
- secy->icv_len)) {
+ if (!rx_sa || init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
+ secy->key_len, secy->icv_len)) {
+ kfree(rx_sa);
rtnl_unlock();
return -ENOMEM;
}
@@ -1768,6 +1772,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL);
if (!tx_sa || init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
secy->key_len, secy->icv_len)) {
+ kfree(tx_sa);
rtnl_unlock();
return -ENOMEM;
}
@@ -2227,7 +2232,8 @@ static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb)
return 1;
if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci) ||
- nla_put_u64(skb, MACSEC_SECY_ATTR_CIPHER_SUITE, DEFAULT_CIPHER_ID) ||
+ nla_put_u64(skb, MACSEC_SECY_ATTR_CIPHER_SUITE,
+ MACSEC_DEFAULT_CIPHER_ID) ||
nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) ||
nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) ||
nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) ||
@@ -2268,7 +2274,7 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
if (!hdr)
return -EMSGSIZE;
- rtnl_lock();
+ genl_dump_check_consistent(cb, hdr, &macsec_fam);
if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex))
goto nla_put_failure;
@@ -2429,18 +2435,17 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
nla_nest_end(skb, rxsc_list);
- rtnl_unlock();
-
genlmsg_end(skb, hdr);
return 0;
nla_put_failure:
- rtnl_unlock();
genlmsg_cancel(skb, hdr);
return -EMSGSIZE;
}
+static int macsec_generation = 1; /* protected by RTNL */
+
static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
@@ -2450,6 +2455,10 @@ static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb)
dev_idx = cb->args[0];
d = 0;
+ rtnl_lock();
+
+ cb->seq = macsec_generation;
+
for_each_netdev(net, dev) {
struct macsec_secy *secy;
@@ -2467,6 +2476,7 @@ next:
}
done:
+ rtnl_unlock();
cb->args[0] = d;
return skb->len;
}
@@ -2920,10 +2930,14 @@ static void macsec_dellink(struct net_device *dev, struct list_head *head)
struct net_device *real_dev = macsec->real_dev;
struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
+ macsec_generation++;
+
unregister_netdevice_queue(dev, head);
list_del_rcu(&macsec->secys);
- if (list_empty(&rxd->secys))
+ if (list_empty(&rxd->secys)) {
netdev_rx_handler_unregister(real_dev);
+ kfree(rxd);
+ }
macsec_del_dev(macsec);
}
@@ -2945,8 +2959,10 @@ static int register_macsec_dev(struct net_device *real_dev,
err = netdev_rx_handler_register(real_dev, macsec_handle_frame,
rxd);
- if (err < 0)
+ if (err < 0) {
+ kfree(rxd);
return err;
+ }
}
list_add_tail_rcu(&macsec->secys, &rxd->secys);
@@ -3066,6 +3082,8 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
if (err < 0)
goto del_dev;
+ macsec_generation++;
+
dev_hold(real_dev);
return 0;
@@ -3079,7 +3097,7 @@ unregister:
static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[])
{
- u64 csid = DEFAULT_CIPHER_ID;
+ u64 csid = MACSEC_DEFAULT_CIPHER_ID;
u8 icv_len = DEFAULT_ICV_LEN;
int flag;
bool es, scb, sci;
@@ -3094,8 +3112,8 @@ static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[])
icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
switch (csid) {
- case DEFAULT_CIPHER_ID:
- case DEFAULT_CIPHER_ALT:
+ case MACSEC_DEFAULT_CIPHER_ID:
+ case MACSEC_DEFAULT_CIPHER_ALT:
if (icv_len < MACSEC_MIN_ICV_LEN ||
icv_len > MACSEC_MAX_ICV_LEN)
return -EINVAL;
@@ -3129,8 +3147,8 @@ static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[])
nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX)
return -EINVAL;
- if ((data[IFLA_MACSEC_PROTECT] &&
- nla_get_u8(data[IFLA_MACSEC_PROTECT])) &&
+ if ((data[IFLA_MACSEC_REPLAY_PROTECT] &&
+ nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) &&
!data[IFLA_MACSEC_WINDOW])
return -EINVAL;
@@ -3168,7 +3186,8 @@ static int macsec_fill_info(struct sk_buff *skb,
if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci) ||
nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) ||
- nla_put_u64(skb, IFLA_MACSEC_CIPHER_SUITE, DEFAULT_CIPHER_ID) ||
+ nla_put_u64(skb, IFLA_MACSEC_CIPHER_SUITE,
+ MACSEC_DEFAULT_CIPHER_ID) ||
nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) ||
nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) ||
nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) ||
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index b3ffaee30858..f279a897a5c7 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -359,27 +359,25 @@ static void at803x_link_change_notify(struct phy_device *phydev)
* in the FIFO. In such cases, the FIFO enters an error mode it
* cannot recover from by software.
*/
- if (phydev->drv->phy_id == ATH8030_PHY_ID) {
- if (phydev->state == PHY_NOLINK) {
- if (priv->gpiod_reset && !priv->phy_reset) {
- struct at803x_context context;
-
- at803x_context_save(phydev, &context);
-
- gpiod_set_value(priv->gpiod_reset, 1);
- msleep(1);
- gpiod_set_value(priv->gpiod_reset, 0);
- msleep(1);
-
- at803x_context_restore(phydev, &context);
-
- phydev_dbg(phydev, "%s(): phy was reset\n",
- __func__);
- priv->phy_reset = true;
- }
- } else {
- priv->phy_reset = false;
+ if (phydev->state == PHY_NOLINK) {
+ if (priv->gpiod_reset && !priv->phy_reset) {
+ struct at803x_context context;
+
+ at803x_context_save(phydev, &context);
+
+ gpiod_set_value(priv->gpiod_reset, 1);
+ msleep(1);
+ gpiod_set_value(priv->gpiod_reset, 0);
+ msleep(1);
+
+ at803x_context_restore(phydev, &context);
+
+ phydev_dbg(phydev, "%s(): phy was reset\n",
+ __func__);
+ priv->phy_reset = true;
}
+ } else {
+ priv->phy_reset = false;
}
}
@@ -391,7 +389,6 @@ static struct phy_driver at803x_driver[] = {
.phy_id_mask = 0xffffffef,
.probe = at803x_probe,
.config_init = at803x_config_init,
- .link_change_notify = at803x_link_change_notify,
.set_wol = at803x_set_wol,
.get_wol = at803x_get_wol,
.suspend = at803x_suspend,
@@ -427,7 +424,6 @@ static struct phy_driver at803x_driver[] = {
.phy_id_mask = 0xffffffef,
.probe = at803x_probe,
.config_init = at803x_config_init,
- .link_change_notify = at803x_link_change_notify,
.set_wol = at803x_set_wol,
.get_wol = at803x_get_wol,
.suspend = at803x_suspend,
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index b5d50d458728..93ffedfa2994 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -441,7 +441,7 @@ static int ks8995_probe(struct spi_device *spi)
return -ENOMEM;
mutex_init(&ks->lock);
- ks->spi = spi_dev_get(spi);
+ ks->spi = spi;
ks->chip = &ks8995_chip[variant];
if (ks->spi->dev.of_node) {
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index bdd83d95ec0a..96a5028621c8 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -617,8 +617,13 @@ static const struct usb_device_id mbim_devs[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(0x0bdb, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&cdc_mbim_info,
},
- /* Huawei E3372 fails unless NDP comes after the IP packets */
- { USB_DEVICE_AND_INTERFACE_INFO(0x12d1, 0x157d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
+
+ /* Some Huawei devices, ME906s-158 (12d1:15c1) and E3372
+ * (12d1:157d), are known to fail unless the NDP is placed
+ * after the IP packets. Applying the quirk to all Huawei
+ * devices is broader than necessary, but harmless.
+ */
+ { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&cdc_mbim_info_ndp_to_end,
},
/* default entry */
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index f20890ee03f3..f64778ad9753 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -269,6 +269,7 @@ struct skb_data { /* skb->cb is one of these */
struct lan78xx_net *dev;
enum skb_state state;
size_t length;
+ int num_of_packet;
};
struct usb_context {
@@ -1803,7 +1804,34 @@ static void lan78xx_remove_mdio(struct lan78xx_net *dev)
static void lan78xx_link_status_change(struct net_device *net)
{
- /* nothing to do */
+ struct phy_device *phydev = net->phydev;
+ int ret, temp;
+
+ /* At forced 100 F/H mode, chip may fail to set mode correctly
+ * when cable is switched between long(~50+m) and short one.
+ * As workaround, set to 10 before setting to 100
+ * at forced 100 F/H mode.
+ */
+ if (!phydev->autoneg && (phydev->speed == 100)) {
+ /* disable phy interrupt */
+ temp = phy_read(phydev, LAN88XX_INT_MASK);
+ temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
+ ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
+
+ temp = phy_read(phydev, MII_BMCR);
+ temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
+ phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
+ temp |= BMCR_SPEED100;
+ phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
+
+ /* clear pending interrupt generated while workaround */
+ temp = phy_read(phydev, LAN88XX_INT_STS);
+
+ /* enable phy interrupt back */
+ temp = phy_read(phydev, LAN88XX_INT_MASK);
+ temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
+ ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
+ }
}
static int lan78xx_phy_init(struct lan78xx_net *dev)
@@ -2464,7 +2492,7 @@ static void tx_complete(struct urb *urb)
struct lan78xx_net *dev = entry->dev;
if (urb->status == 0) {
- dev->net->stats.tx_packets++;
+ dev->net->stats.tx_packets += entry->num_of_packet;
dev->net->stats.tx_bytes += entry->length;
} else {
dev->net->stats.tx_errors++;
@@ -2681,10 +2709,11 @@ void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
return;
}
- skb->protocol = eth_type_trans(skb, dev->net);
dev->net->stats.rx_packets++;
dev->net->stats.rx_bytes += skb->len;
+ skb->protocol = eth_type_trans(skb, dev->net);
+
netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
skb->len + sizeof(struct ethhdr), skb->protocol);
memset(skb->cb, 0, sizeof(struct skb_data));
@@ -2934,13 +2963,16 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
skb_totallen = 0;
pkt_cnt = 0;
+ count = 0;
+ length = 0;
for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
if (skb_is_gso(skb)) {
if (pkt_cnt) {
/* handle previous packets first */
break;
}
- length = skb->len;
+ count = 1;
+ length = skb->len - TX_OVERHEAD;
skb2 = skb_dequeue(tqp);
goto gso_skb;
}
@@ -2961,14 +2993,13 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
for (count = pos = 0; count < pkt_cnt; count++) {
skb2 = skb_dequeue(tqp);
if (skb2) {
+ length += (skb2->len - TX_OVERHEAD);
memcpy(skb->data + pos, skb2->data, skb2->len);
pos += roundup(skb2->len, sizeof(u32));
dev_kfree_skb(skb2);
}
}
- length = skb_totallen;
-
gso_skb:
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb) {
@@ -2980,6 +3011,7 @@ gso_skb:
entry->urb = urb;
entry->dev = dev;
entry->length = length;
+ entry->num_of_packet = count;
spin_lock_irqsave(&dev->txq.lock, flags);
ret = usb_autopm_get_interface_async(dev->intf);
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index f84080215915..82129eef7774 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -411,7 +411,7 @@ static int enable_net_traffic(struct net_device *dev, struct usb_device *usb)
int ret;
read_mii_word(pegasus, pegasus->phy, MII_LPA, &linkpart);
- data[0] = 0xc9;
+ data[0] = 0xc8; /* TX & RX enable, append status, no CRC */
data[1] = 0;
if (linkpart & (ADVERTISE_100FULL | ADVERTISE_10FULL))
data[1] |= 0x20; /* set full duplex */
@@ -497,7 +497,7 @@ static void read_bulk_callback(struct urb *urb)
pkt_len = buf[count - 3] << 8;
pkt_len += buf[count - 4];
pkt_len &= 0xfff;
- pkt_len -= 8;
+ pkt_len -= 4;
}
/*
@@ -528,7 +528,7 @@ static void read_bulk_callback(struct urb *urb)
goon:
usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb,
usb_rcvbulkpipe(pegasus->usb, 1),
- pegasus->rx_skb->data, PEGASUS_MTU + 8,
+ pegasus->rx_skb->data, PEGASUS_MTU,
read_bulk_callback, pegasus);
rx_status = usb_submit_urb(pegasus->rx_urb, GFP_ATOMIC);
if (rx_status == -ENODEV)
@@ -569,7 +569,7 @@ static void rx_fixup(unsigned long data)
}
usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb,
usb_rcvbulkpipe(pegasus->usb, 1),
- pegasus->rx_skb->data, PEGASUS_MTU + 8,
+ pegasus->rx_skb->data, PEGASUS_MTU,
read_bulk_callback, pegasus);
try_again:
status = usb_submit_urb(pegasus->rx_urb, GFP_ATOMIC);
@@ -823,7 +823,7 @@ static int pegasus_open(struct net_device *net)
usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb,
usb_rcvbulkpipe(pegasus->usb, 1),
- pegasus->rx_skb->data, PEGASUS_MTU + 8,
+ pegasus->rx_skb->data, PEGASUS_MTU,
read_bulk_callback, pegasus);
if ((res = usb_submit_urb(pegasus->rx_urb, GFP_KERNEL))) {
if (res == -ENODEV)
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 30033dbe6662..c369db99c005 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -29,6 +29,7 @@
#include <linux/crc32.h>
#include <linux/usb/usbnet.h>
#include <linux/slab.h>
+#include <linux/of_net.h>
#include "smsc75xx.h"
#define SMSC_CHIPNAME "smsc75xx"
@@ -761,6 +762,15 @@ static int smsc75xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
static void smsc75xx_init_mac_address(struct usbnet *dev)
{
+ const u8 *mac_addr;
+
+ /* maybe the boot loader passed the MAC address in devicetree */
+ mac_addr = of_get_mac_address(dev->udev->dev.of_node);
+ if (mac_addr) {
+ memcpy(dev->net->dev_addr, mac_addr, ETH_ALEN);
+ return;
+ }
+
/* try reading mac address from EEPROM */
if (smsc75xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
dev->net->dev_addr) == 0) {
@@ -772,7 +782,7 @@ static void smsc75xx_init_mac_address(struct usbnet *dev)
}
}
- /* no eeprom, or eeprom values are invalid. generate random MAC */
+ /* no useful static MAC address found. generate a random one */
eth_hw_addr_random(dev->net);
netif_dbg(dev, ifup, dev->net, "MAC address set to eth_random_addr\n");
}
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 66b3ab9f614e..2edc2bc6d1b9 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -29,6 +29,7 @@
#include <linux/crc32.h>
#include <linux/usb/usbnet.h>
#include <linux/slab.h>
+#include <linux/of_net.h>
#include "smsc95xx.h"
#define SMSC_CHIPNAME "smsc95xx"
@@ -765,6 +766,15 @@ static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
static void smsc95xx_init_mac_address(struct usbnet *dev)
{
+ const u8 *mac_addr;
+
+ /* maybe the boot loader passed the MAC address in devicetree */
+ mac_addr = of_get_mac_address(dev->udev->dev.of_node);
+ if (mac_addr) {
+ memcpy(dev->net->dev_addr, mac_addr, ETH_ALEN);
+ return;
+ }
+
/* try reading mac address from EEPROM */
if (smsc95xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
dev->net->dev_addr) == 0) {
@@ -775,7 +785,7 @@ static void smsc95xx_init_mac_address(struct usbnet *dev)
}
}
- /* no eeprom, or eeprom values are invalid. generate random MAC */
+ /* no useful static MAC address found. generate a random one */
eth_hw_addr_random(dev->net);
netif_dbg(dev, ifup, dev->net, "MAC address set to eth_random_addr\n");
}
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index b2348f67b00a..db8022ae415b 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1152,12 +1152,16 @@ vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
union Vmxnet3_GenericDesc *gdesc)
{
if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) {
- /* typical case: TCP/UDP over IP and both csums are correct */
- if ((le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) ==
- VMXNET3_RCD_CSUM_OK) {
+ if (gdesc->rcd.v4 &&
+ (le32_to_cpu(gdesc->dword[3]) &
+ VMXNET3_RCD_CSUM_OK) == VMXNET3_RCD_CSUM_OK) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
+ BUG_ON(gdesc->rcd.frg);
+ } else if (gdesc->rcd.v6 && (le32_to_cpu(gdesc->dword[3]) &
+ (1 << VMXNET3_RCD_TUC_SHIFT))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
- BUG_ON(!(gdesc->rcd.v4 || gdesc->rcd.v6));
BUG_ON(gdesc->rcd.frg);
} else {
if (gdesc->rcd.csum) {
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 729c344e6774..c4825392d64b 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -69,10 +69,10 @@
/*
* Version numbers
*/
-#define VMXNET3_DRIVER_VERSION_STRING "1.4.6.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING "1.4.7.0-k"
/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM 0x01040600
+#define VMXNET3_DRIVER_VERSION_NUM 0x01040700
#if defined(CONFIG_PCI_MSI)
/* RSS only makes sense if MSI-X is supported. */
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 9a9fabb900c1..8a8f1e58b415 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -60,41 +60,6 @@ struct pcpu_dstats {
struct u64_stats_sync syncp;
};
-static struct dst_entry *vrf_ip_check(struct dst_entry *dst, u32 cookie)
-{
- return dst;
-}
-
-static int vrf_ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
-{
- return ip_local_out(net, sk, skb);
-}
-
-static unsigned int vrf_v4_mtu(const struct dst_entry *dst)
-{
- /* TO-DO: return max ethernet size? */
- return dst->dev->mtu;
-}
-
-static void vrf_dst_destroy(struct dst_entry *dst)
-{
- /* our dst lives forever - or until the device is closed */
-}
-
-static unsigned int vrf_default_advmss(const struct dst_entry *dst)
-{
- return 65535 - 40;
-}
-
-static struct dst_ops vrf_dst_ops = {
- .family = AF_INET,
- .local_out = vrf_ip_local_out,
- .check = vrf_ip_check,
- .mtu = vrf_v4_mtu,
- .destroy = vrf_dst_destroy,
- .default_advmss = vrf_default_advmss,
-};
-
/* neighbor handling is done with actual device; do not want
* to flip skb->dev for those ndisc packets. This really fails
* for multiple next protocols (e.g., NEXTHDR_HOP). But it is
@@ -349,46 +314,6 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
}
#if IS_ENABLED(CONFIG_IPV6)
-static struct dst_entry *vrf_ip6_check(struct dst_entry *dst, u32 cookie)
-{
- return dst;
-}
-
-static struct dst_ops vrf_dst_ops6 = {
- .family = AF_INET6,
- .local_out = ip6_local_out,
- .check = vrf_ip6_check,
- .mtu = vrf_v4_mtu,
- .destroy = vrf_dst_destroy,
- .default_advmss = vrf_default_advmss,
-};
-
-static int init_dst_ops6_kmem_cachep(void)
-{
- vrf_dst_ops6.kmem_cachep = kmem_cache_create("vrf_ip6_dst_cache",
- sizeof(struct rt6_info),
- 0,
- SLAB_HWCACHE_ALIGN,
- NULL);
-
- if (!vrf_dst_ops6.kmem_cachep)
- return -ENOMEM;
-
- return 0;
-}
-
-static void free_dst_ops6_kmem_cachep(void)
-{
- kmem_cache_destroy(vrf_dst_ops6.kmem_cachep);
-}
-
-static int vrf_input6(struct sk_buff *skb)
-{
- skb->dev->stats.rx_errors++;
- kfree_skb(skb);
- return 0;
-}
-
/* modelled after ip6_finish_output2 */
static int vrf_finish_output6(struct net *net, struct sock *sk,
struct sk_buff *skb)
@@ -429,67 +354,34 @@ static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb)
!(IP6CB(skb)->flags & IP6SKB_REROUTED));
}
-static void vrf_rt6_destroy(struct net_vrf *vrf)
+static void vrf_rt6_release(struct net_vrf *vrf)
{
- dst_destroy(&vrf->rt6->dst);
- free_percpu(vrf->rt6->rt6i_pcpu);
+ dst_release(&vrf->rt6->dst);
vrf->rt6 = NULL;
}
static int vrf_rt6_create(struct net_device *dev)
{
struct net_vrf *vrf = netdev_priv(dev);
- struct dst_entry *dst;
+ struct net *net = dev_net(dev);
struct rt6_info *rt6;
- int cpu;
int rc = -ENOMEM;
- rt6 = dst_alloc(&vrf_dst_ops6, dev, 0,
- DST_OBSOLETE_NONE,
- (DST_HOST | DST_NOPOLICY | DST_NOXFRM));
+ rt6 = ip6_dst_alloc(net, dev,
+ DST_HOST | DST_NOPOLICY | DST_NOXFRM | DST_NOCACHE);
if (!rt6)
goto out;
- dst = &rt6->dst;
-
- rt6->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_KERNEL);
- if (!rt6->rt6i_pcpu) {
- dst_destroy(dst);
- goto out;
- }
- for_each_possible_cpu(cpu) {
- struct rt6_info **p = per_cpu_ptr(rt6->rt6i_pcpu, cpu);
- *p = NULL;
- }
-
- memset(dst + 1, 0, sizeof(*rt6) - sizeof(*dst));
-
- INIT_LIST_HEAD(&rt6->rt6i_siblings);
- INIT_LIST_HEAD(&rt6->rt6i_uncached);
-
- rt6->dst.input = vrf_input6;
rt6->dst.output = vrf_output6;
-
- rt6->rt6i_table = fib6_get_table(dev_net(dev), vrf->tb_id);
-
- atomic_set(&rt6->dst.__refcnt, 2);
-
+ rt6->rt6i_table = fib6_get_table(net, vrf->tb_id);
+ dst_hold(&rt6->dst);
vrf->rt6 = rt6;
rc = 0;
out:
return rc;
}
#else
-static int init_dst_ops6_kmem_cachep(void)
-{
- return 0;
-}
-
-static void free_dst_ops6_kmem_cachep(void)
-{
-}
-
-static void vrf_rt6_destroy(struct net_vrf *vrf)
+static void vrf_rt6_release(struct net_vrf *vrf)
{
}
@@ -557,11 +449,11 @@ static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
!(IPCB(skb)->flags & IPSKB_REROUTED));
}
-static void vrf_rtable_destroy(struct net_vrf *vrf)
+static void vrf_rtable_release(struct net_vrf *vrf)
{
struct dst_entry *dst = (struct dst_entry *)vrf->rth;
- dst_destroy(dst);
+ dst_release(dst);
vrf->rth = NULL;
}
@@ -570,22 +462,10 @@ static struct rtable *vrf_rtable_create(struct net_device *dev)
struct net_vrf *vrf = netdev_priv(dev);
struct rtable *rth;
- rth = dst_alloc(&vrf_dst_ops, dev, 2,
- DST_OBSOLETE_NONE,
- (DST_HOST | DST_NOPOLICY | DST_NOXFRM));
+ rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1, 1, 0);
if (rth) {
rth->dst.output = vrf_output;
- rth->rt_genid = rt_genid_ipv4(dev_net(dev));
- rth->rt_flags = 0;
- rth->rt_type = RTN_UNICAST;
- rth->rt_is_input = 0;
- rth->rt_iif = 0;
- rth->rt_pmtu = 0;
- rth->rt_gateway = 0;
- rth->rt_uses_gateway = 0;
rth->rt_table_id = vrf->tb_id;
- INIT_LIST_HEAD(&rth->rt_uncached);
- rth->rt_uncached_list = NULL;
}
return rth;
@@ -673,8 +553,8 @@ static void vrf_dev_uninit(struct net_device *dev)
struct net_device *port_dev;
struct list_head *iter;
- vrf_rtable_destroy(vrf);
- vrf_rt6_destroy(vrf);
+ vrf_rtable_release(vrf);
+ vrf_rt6_release(vrf);
netdev_for_each_lower_dev(dev, port_dev, iter)
vrf_del_slave(dev, port_dev);
@@ -704,7 +584,7 @@ static int vrf_dev_init(struct net_device *dev)
return 0;
out_rth:
- vrf_rtable_destroy(vrf);
+ vrf_rtable_release(vrf);
out_stats:
free_percpu(dev->dstats);
dev->dstats = NULL;
@@ -737,7 +617,7 @@ static struct rtable *vrf_get_rtable(const struct net_device *dev,
struct net_vrf *vrf = netdev_priv(dev);
rth = vrf->rth;
- atomic_inc(&rth->dst.__refcnt);
+ dst_hold(&rth->dst);
}
return rth;
@@ -788,7 +668,7 @@ static struct dst_entry *vrf_get_rt6_dst(const struct net_device *dev,
struct net_vrf *vrf = netdev_priv(dev);
rt = vrf->rt6;
- atomic_inc(&rt->dst.__refcnt);
+ dst_hold(&rt->dst);
}
return (struct dst_entry *)rt;
@@ -946,19 +826,6 @@ static int __init vrf_init_module(void)
{
int rc;
- vrf_dst_ops.kmem_cachep =
- kmem_cache_create("vrf_ip_dst_cache",
- sizeof(struct rtable), 0,
- SLAB_HWCACHE_ALIGN,
- NULL);
-
- if (!vrf_dst_ops.kmem_cachep)
- return -ENOMEM;
-
- rc = init_dst_ops6_kmem_cachep();
- if (rc != 0)
- goto error2;
-
register_netdevice_notifier(&vrf_notifier_block);
rc = rtnl_link_register(&vrf_link_ops);
@@ -969,22 +836,10 @@ static int __init vrf_init_module(void)
error:
unregister_netdevice_notifier(&vrf_notifier_block);
- free_dst_ops6_kmem_cachep();
-error2:
- kmem_cache_destroy(vrf_dst_ops.kmem_cachep);
return rc;
}
-static void __exit vrf_cleanup_module(void)
-{
- rtnl_link_unregister(&vrf_link_ops);
- unregister_netdevice_notifier(&vrf_notifier_block);
- kmem_cache_destroy(vrf_dst_ops.kmem_cachep);
- free_dst_ops6_kmem_cachep();
-}
-
module_init(vrf_init_module);
-module_exit(vrf_cleanup_module);
MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern");
MODULE_DESCRIPTION("Device driver to instantiate VRF domains");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index 8f8793004b9f..1b271b99c49e 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -274,6 +274,9 @@ void ar5008_hw_cmn_spur_mitigate(struct ath_hw *ah,
};
static const int inc[4] = { 0, 100, 0, 0 };
+ memset(&mask_m, 0, sizeof(int8_t) * 123);
+ memset(&mask_p, 0, sizeof(int8_t) * 123);
+
cur_bin = -6000;
upper = bin + 100;
lower = bin - 100;
@@ -424,14 +427,9 @@ static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
int tmp, new;
int i;
- int8_t mask_m[123];
- int8_t mask_p[123];
int cur_bb_spur;
bool is2GHz = IS_CHAN_2GHZ(chan);
- memset(&mask_m, 0, sizeof(int8_t) * 123);
- memset(&mask_p, 0, sizeof(int8_t) * 123);
-
for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
if (AR_NO_SPUR == cur_bb_spur)
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
index db6624527d99..53d7445a5d12 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -178,14 +178,9 @@ static void ar9002_hw_spur_mitigate(struct ath_hw *ah,
int i;
struct chan_centers centers;
- int8_t mask_m[123];
- int8_t mask_p[123];
int cur_bb_spur;
bool is2GHz = IS_CHAN_2GHZ(chan);
- memset(&mask_m, 0, sizeof(int8_t) * 123);
- memset(&mask_p, 0, sizeof(int8_t) * 123);
-
ath9k_hw_get_channel_centers(ah, chan, &centers);
freq = centers.synth_center;
diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c
index 72380af9dc52..b0603e796ad8 100644
--- a/drivers/net/wireless/broadcom/b43/main.c
+++ b/drivers/net/wireless/broadcom/b43/main.c
@@ -5680,11 +5680,12 @@ static int b43_bcma_probe(struct bcma_device *core)
INIT_WORK(&wl->firmware_load, b43_request_firmware);
schedule_work(&wl->firmware_load);
-bcma_out:
return err;
bcma_err_wireless_exit:
ieee80211_free_hw(wl->hw);
+bcma_out:
+ kfree(dev);
return err;
}
@@ -5712,8 +5713,8 @@ static void b43_bcma_remove(struct bcma_device *core)
b43_rng_exit(wl);
b43_leds_unregister(wl);
-
ieee80211_free_hw(wl->hw);
+ kfree(wldev->dev);
}
static struct bcma_driver b43_bcma_driver = {
@@ -5796,6 +5797,7 @@ static void b43_ssb_remove(struct ssb_device *sdev)
b43_leds_unregister(wl);
b43_wireless_exit(dev, wl);
+ kfree(dev);
}
static struct ssb_driver b43_ssb_driver = {
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
index 97be104d1203..b5c57eebf995 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
@@ -93,7 +93,7 @@
#define IWL8260_SMEM_OFFSET 0x400000
#define IWL8260_SMEM_LEN 0x68000
-#define IWL8000_FW_PRE "iwlwifi-8000"
+#define IWL8000_FW_PRE "iwlwifi-8000C-"
#define IWL8000_MODULE_FIRMWARE(api) \
IWL8000_FW_PRE "-" __stringify(api) ".ucode"
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index f899666acb41..9e45bf9c6071 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -238,19 +238,6 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode",
name_pre, tag);
- /*
- * Starting 8000B - FW name format has changed. This overwrites the
- * previous name and uses the new format.
- */
- if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
- char rev_step = 'A' + CSR_HW_REV_STEP(drv->trans->hw_rev);
-
- if (rev_step != 'A')
- snprintf(drv->firmware_name,
- sizeof(drv->firmware_name), "%s%c-%s.ucode",
- name_pre, rev_step, tag);
- }
-
IWL_DEBUG_INFO(drv, "attempting to load firmware %s'%s'\n",
(drv->fw_index == UCODE_EXPERIMENTAL_INDEX)
? "EXPERIMENTAL " : "",
@@ -1060,11 +1047,18 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
return -EINVAL;
}
- if (WARN(fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) &&
- !gscan_capa,
- "GSCAN is supported but capabilities TLV is unavailable\n"))
+ /*
+ * If ucode advertises that it supports GSCAN but GSCAN
+ * capabilities TLV is not present, or if it has an old format,
+ * warn and continue without GSCAN.
+ */
+ if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) &&
+ !gscan_capa) {
+ IWL_DEBUG_INFO(drv,
+ "GSCAN is supported but capabilities TLV is unavailable\n");
__clear_bit((__force long)IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT,
capa->_capa);
+ }
return 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
index 4856eac120f6..6938cd37be57 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
@@ -526,7 +526,8 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
/* Make room for fw's virtual image pages, if it exists */
- if (mvm->fw->img[mvm->cur_ucode].paging_mem_size)
+ if (mvm->fw->img[mvm->cur_ucode].paging_mem_size &&
+ mvm->fw_paging_db[0].fw_paging_block)
file_len += mvm->num_of_paging_blk *
(sizeof(*dump_data) +
sizeof(struct iwl_fw_error_dump_paging) +
@@ -643,7 +644,8 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
}
/* Dump fw's virtual image */
- if (mvm->fw->img[mvm->cur_ucode].paging_mem_size) {
+ if (mvm->fw->img[mvm->cur_ucode].paging_mem_size &&
+ mvm->fw_paging_db[0].fw_paging_block) {
for (i = 1; i < mvm->num_of_paging_blk + 1; i++) {
struct iwl_fw_error_dump_paging *paging;
struct page *pages =
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 594cd0dc7df9..09d895fafaf2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -144,9 +144,11 @@ void iwl_free_fw_paging(struct iwl_mvm *mvm)
__free_pages(mvm->fw_paging_db[i].fw_paging_block,
get_order(mvm->fw_paging_db[i].fw_paging_size));
+ mvm->fw_paging_db[i].fw_paging_block = NULL;
}
kfree(mvm->trans->paging_download_buf);
mvm->trans->paging_download_buf = NULL;
+ mvm->trans->paging_db = NULL;
memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db));
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 76e649c680a1..a50f4df7eae7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -1147,6 +1147,8 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
/* the fw is stopped, the aux sta is dead: clean up driver state */
iwl_mvm_del_aux_sta(mvm);
+ iwl_free_fw_paging(mvm);
+
/*
* Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
* won't be called in this case).
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 5e8ab796d5bc..d278399097dc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -761,8 +761,6 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
kfree(mvm->nvm_sections[i].data);
- iwl_free_fw_paging(mvm);
-
iwl_mvm_tof_clean(mvm);
ieee80211_free_hw(mvm->hw);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 05b968506836..79d7cd7d461e 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -479,8 +479,18 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x24F3, 0x0930, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0x0000, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0010, iwl8265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x0110, iwl8265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x1110, iwl8265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x1010, iwl8265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x0050, iwl8265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x0150, iwl8265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x9010, iwl8265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x8110, iwl8265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x8050, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x8010, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0810, iwl8265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x9110, iwl8265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x8130, iwl8265_2ac_cfg)},
/* 9000 Series */
{IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl5165_2ac_cfg)},
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index eb39c7e09781..b2b79354d5c0 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -732,8 +732,8 @@ static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans)
*/
val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0);
if (val & (BIT(1) | BIT(17))) {
- IWL_INFO(trans,
- "can't access the RSA semaphore it is write protected\n");
+ IWL_DEBUG_INFO(trans,
+ "can't access the RSA semaphore it is write protected\n");
return 0;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
index 95dcbff4673b..6a8245c4ea48 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
@@ -2488,9 +2488,9 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(
for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++)
rtldm->swing_idx_ofdm_base[p] = rtldm->swing_idx_ofdm[p];
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue= %d\n",
- rtldm->thermalvalue, thermal_value);
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue= %d\n",
+ rtldm->thermalvalue, thermal_value);
/*Record last Power Tracking Thermal Value*/
rtldm->thermalvalue = thermal_value;
}
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 8e09c544d892..5101f3ab4f29 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -103,6 +103,20 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
flush_dcache_page(page);
}
} else {
+ /*
+ * Note that we write the data both before and after
+ * clearing poison. The write before clear poison
+ * handles situations where the latest written data is
+ * preserved and the clear poison operation simply marks
+ * the address range as valid without changing the data.
+ * In this case application software can assume that an
+ * interrupted write will either return the new good
+ * data or an error.
+ *
+ * However, if pmem_clear_poison() leaves the data in an
+ * indeterminate state we need to perform the write
+ * after clear poison.
+ */
flush_dcache_page(page);
memcpy_to_pmem(pmem_addr, mem + off, len);
if (unlikely(bad_pmem)) {
@@ -383,10 +397,17 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
*/
start += start_pad;
npfns = (pmem->size - start_pad - end_trunc - SZ_8K) / SZ_4K;
- if (nd_pfn->mode == PFN_MODE_PMEM)
- offset = ALIGN(start + SZ_8K + 64 * npfns, nd_pfn->align)
+ if (nd_pfn->mode == PFN_MODE_PMEM) {
+ unsigned long memmap_size;
+
+ /*
+ * vmemmap_populate_hugepages() allocates the memmap array in
+ * HPAGE_SIZE chunks.
+ */
+ memmap_size = ALIGN(64 * npfns, HPAGE_SIZE);
+ offset = ALIGN(start + SZ_8K + memmap_size, nd_pfn->align)
- start;
- else if (nd_pfn->mode == PFN_MODE_RAM)
+ } else if (nd_pfn->mode == PFN_MODE_RAM)
offset = ALIGN(start + SZ_8K, nd_pfn->align) - start;
else
goto err;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 24ccda303efb..4fd733ff72b1 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1478,8 +1478,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
if (result > 0) {
dev_err(dev->ctrl.device,
"Could not set queue count (%d)\n", result);
- nr_io_queues = 0;
- result = 0;
+ return 0;
}
if (dev->cmb && NVME_CMB_SQS(dev->cmbsz)) {
@@ -1513,7 +1512,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
* If we enable msix early due to not intx, disable it again before
* setting up the full range we need.
*/
- if (!pdev->irq)
+ if (pdev->msi_enabled)
+ pci_disable_msi(pdev);
+ else if (pdev->msix_enabled)
pci_disable_msix(pdev);
for (i = 0; i < nr_io_queues; i++)
@@ -1696,7 +1697,6 @@ static int nvme_pci_enable(struct nvme_dev *dev)
if (pci_enable_device_mem(pdev))
return result;
- dev->entry[0].vector = pdev->irq;
pci_set_master(pdev);
if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)) &&
@@ -1709,13 +1709,18 @@ static int nvme_pci_enable(struct nvme_dev *dev)
}
/*
- * Some devices don't advertse INTx interrupts, pre-enable a single
- * MSIX vec for setup. We'll adjust this later.
+ * Some devices and/or platforms don't advertise or work with INTx
+ * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll
+ * adjust this later.
*/
- if (!pdev->irq) {
- result = pci_enable_msix(pdev, dev->entry, 1);
- if (result < 0)
- goto disable;
+ if (pci_enable_msix(pdev, dev->entry, 1)) {
+ pci_enable_msi(pdev);
+ dev->entry[0].vector = pdev->irq;
+ }
+
+ if (!dev->entry[0].vector) {
+ result = -ENODEV;
+ goto disable;
}
cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
@@ -1859,6 +1864,9 @@ static void nvme_reset_work(struct work_struct *work)
if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
nvme_dev_disable(dev, false);
+ if (test_bit(NVME_CTRL_REMOVING, &dev->flags))
+ goto out;
+
set_bit(NVME_CTRL_RESETTING, &dev->flags);
result = nvme_pci_enable(dev);
@@ -2078,11 +2086,10 @@ static void nvme_remove(struct pci_dev *pdev)
{
struct nvme_dev *dev = pci_get_drvdata(pdev);
- del_timer_sync(&dev->watchdog_timer);
-
set_bit(NVME_CTRL_REMOVING, &dev->flags);
pci_set_drvdata(pdev, NULL);
flush_work(&dev->async_work);
+ flush_work(&dev->reset_work);
flush_work(&dev->scan_work);
nvme_remove_namespaces(&dev->ctrl);
nvme_uninit_ctrl(&dev->ctrl);
diff --git a/drivers/nvmem/mxs-ocotp.c b/drivers/nvmem/mxs-ocotp.c
index 8ba19bba3156..2bb3c5799ac4 100644
--- a/drivers/nvmem/mxs-ocotp.c
+++ b/drivers/nvmem/mxs-ocotp.c
@@ -94,7 +94,7 @@ static int mxs_ocotp_read(void *context, const void *reg, size_t reg_size,
if (ret)
goto close_banks;
- while (val_size) {
+ while (val_size >= reg_size) {
if ((offset < OCOTP_DATA_OFFSET) || (offset % 16)) {
/* fill up non-data register */
*buf = 0;
@@ -103,7 +103,7 @@ static int mxs_ocotp_read(void *context, const void *reg, size_t reg_size,
}
buf++;
- val_size--;
+ val_size -= reg_size;
offset += reg_size;
}
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 01b9d0a00abc..d11cdbb8fba3 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -275,6 +275,19 @@ ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void
}
EXPORT_SYMBOL(pci_write_vpd);
+/**
+ * pci_set_vpd_size - Set size of Vital Product Data space
+ * @dev: pci device struct
+ * @len: size of vpd space
+ */
+int pci_set_vpd_size(struct pci_dev *dev, size_t len)
+{
+ if (!dev->vpd || !dev->vpd->ops)
+ return -ENODEV;
+ return dev->vpd->ops->set_size(dev, len);
+}
+EXPORT_SYMBOL(pci_set_vpd_size);
+
#define PCI_VPD_MAX_SIZE (PCI_VPD_ADDR_MASK + 1)
/**
@@ -498,9 +511,23 @@ out:
return ret ? ret : count;
}
+static int pci_vpd_set_size(struct pci_dev *dev, size_t len)
+{
+ struct pci_vpd *vpd = dev->vpd;
+
+ if (len == 0 || len > PCI_VPD_MAX_SIZE)
+ return -EIO;
+
+ vpd->valid = 1;
+ vpd->len = len;
+
+ return 0;
+}
+
static const struct pci_vpd_ops pci_vpd_ops = {
.read = pci_vpd_read,
.write = pci_vpd_write,
+ .set_size = pci_vpd_set_size,
};
static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
@@ -533,9 +560,24 @@ static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
return ret;
}
+static int pci_vpd_f0_set_size(struct pci_dev *dev, size_t len)
+{
+ struct pci_dev *tdev = pci_get_slot(dev->bus,
+ PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
+ int ret;
+
+ if (!tdev)
+ return -ENODEV;
+
+ ret = pci_set_vpd_size(tdev, len);
+ pci_dev_put(tdev);
+ return ret;
+}
+
static const struct pci_vpd_ops pci_vpd_f0_ops = {
.read = pci_vpd_f0_read,
.write = pci_vpd_f0_write,
+ .set_size = pci_vpd_f0_set_size,
};
int pci_vpd_init(struct pci_dev *dev)
diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
index eb5a2755a164..2f817fa4c661 100644
--- a/drivers/pci/host/pci-imx6.c
+++ b/drivers/pci/host/pci-imx6.c
@@ -32,7 +32,7 @@
#define to_imx6_pcie(x) container_of(x, struct imx6_pcie, pp)
struct imx6_pcie {
- struct gpio_desc *reset_gpio;
+ int reset_gpio;
struct clk *pcie_bus;
struct clk *pcie_phy;
struct clk *pcie;
@@ -309,10 +309,10 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
usleep_range(200, 500);
/* Some boards don't have PCIe reset GPIO. */
- if (imx6_pcie->reset_gpio) {
- gpiod_set_value_cansleep(imx6_pcie->reset_gpio, 0);
+ if (gpio_is_valid(imx6_pcie->reset_gpio)) {
+ gpio_set_value_cansleep(imx6_pcie->reset_gpio, 0);
msleep(100);
- gpiod_set_value_cansleep(imx6_pcie->reset_gpio, 1);
+ gpio_set_value_cansleep(imx6_pcie->reset_gpio, 1);
}
return 0;
@@ -523,6 +523,7 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
{
struct imx6_pcie *imx6_pcie;
struct pcie_port *pp;
+ struct device_node *np = pdev->dev.of_node;
struct resource *dbi_base;
struct device_node *node = pdev->dev.of_node;
int ret;
@@ -544,8 +545,15 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
return PTR_ERR(pp->dbi_base);
/* Fetch GPIOs */
- imx6_pcie->reset_gpio = devm_gpiod_get_optional(&pdev->dev, "reset",
- GPIOD_OUT_LOW);
+ imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
+ if (gpio_is_valid(imx6_pcie->reset_gpio)) {
+ ret = devm_gpio_request_one(&pdev->dev, imx6_pcie->reset_gpio,
+ GPIOF_OUT_INIT_LOW, "PCIe reset");
+ if (ret) {
+ dev_err(&pdev->dev, "unable to get reset gpio\n");
+ return ret;
+ }
+ }
/* Fetch clocks */
imx6_pcie->pcie_phy = devm_clk_get(&pdev->dev, "pcie_phy");
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index e982010f0ed1..342b6918bbde 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -636,7 +636,7 @@ static ssize_t pci_read_config(struct file *filp, struct kobject *kobj,
u8 *data = (u8 *) buf;
/* Several chips lock up trying to read undefined config space */
- if (security_capable(filp->f_cred, &init_user_ns, CAP_SYS_ADMIN) == 0)
+ if (file_ns_capable(filp, &init_user_ns, CAP_SYS_ADMIN))
size = dev->cfg_size;
else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
size = 128;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index d0fb93481573..a814bbb80fcb 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -97,6 +97,7 @@ static inline bool pci_has_subordinate(struct pci_dev *pci_dev)
struct pci_vpd_ops {
ssize_t (*read)(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
ssize_t (*write)(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
+ int (*set_size)(struct pci_dev *dev, size_t len);
};
struct pci_vpd {
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 32346b5a8a11..f70090897fdf 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -737,8 +737,19 @@ static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
break;
case CPU_PM_EXIT:
case CPU_PM_ENTER_FAILED:
- /* Restore and enable the counter */
- armpmu_start(event, PERF_EF_RELOAD);
+ /*
+ * Restore and enable the counter.
+ * armpmu_start() indirectly calls
+ *
+ * perf_event_update_userpage()
+ *
+ * that requires RCU read locking to be functional,
+ * wrap the call within RCU_NONIDLE to make the
+ * RCU subsystem aware this cpu is not idle from
+ * an RCU perspective for the armpmu_start() call
+ * duration.
+ */
+ RCU_NONIDLE(armpmu_start(event, PERF_EF_RELOAD));
break;
default:
break;
diff --git a/drivers/phy/phy-rockchip-dp.c b/drivers/phy/phy-rockchip-dp.c
index 77e2d02e6bee..793ecb6d87bc 100644
--- a/drivers/phy/phy-rockchip-dp.c
+++ b/drivers/phy/phy-rockchip-dp.c
@@ -86,6 +86,9 @@ static int rockchip_dp_phy_probe(struct platform_device *pdev)
if (!np)
return -ENODEV;
+ if (!dev->parent || !dev->parent->of_node)
+ return -ENODEV;
+
dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
if (IS_ERR(dp))
return -ENOMEM;
@@ -104,9 +107,9 @@ static int rockchip_dp_phy_probe(struct platform_device *pdev)
return ret;
}
- dp->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
+ dp->grf = syscon_node_to_regmap(dev->parent->of_node);
if (IS_ERR(dp->grf)) {
- dev_err(dev, "rk3288-dp needs rockchip,grf property\n");
+ dev_err(dev, "rk3288-dp needs the General Register Files syscon\n");
return PTR_ERR(dp->grf);
}
diff --git a/drivers/phy/phy-rockchip-emmc.c b/drivers/phy/phy-rockchip-emmc.c
index 887b4c27195f..6ebcf3e41c46 100644
--- a/drivers/phy/phy-rockchip-emmc.c
+++ b/drivers/phy/phy-rockchip-emmc.c
@@ -176,7 +176,10 @@ static int rockchip_emmc_phy_probe(struct platform_device *pdev)
struct regmap *grf;
unsigned int reg_offset;
- grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf");
+ if (!dev->parent || !dev->parent->of_node)
+ return -ENODEV;
+
+ grf = syscon_node_to_regmap(dev->parent->of_node);
if (IS_ERR(grf)) {
dev_err(dev, "Missing rockchip,grf property\n");
return PTR_ERR(grf);
diff --git a/drivers/pinctrl/freescale/Kconfig b/drivers/pinctrl/freescale/Kconfig
index debe1219d76d..fc8cbf611723 100644
--- a/drivers/pinctrl/freescale/Kconfig
+++ b/drivers/pinctrl/freescale/Kconfig
@@ -2,6 +2,7 @@ config PINCTRL_IMX
bool
select PINMUX
select PINCONF
+ select REGMAP
config PINCTRL_IMX1_CORE
bool
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index 2bbe6f7964a7..6ab8c3ccdeea 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -1004,7 +1004,8 @@ static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
struct mtk_pinctrl *pctl = dev_get_drvdata(chip->parent);
int eint_num, virq, eint_offset;
unsigned int set_offset, bit, clr_bit, clr_offset, rst, i, unmask, dbnc;
- static const unsigned int dbnc_arr[] = {0 , 1, 16, 32, 64, 128, 256};
+ static const unsigned int debounce_time[] = {500, 1000, 16000, 32000, 64000,
+ 128000, 256000};
const struct mtk_desc_pin *pin;
struct irq_data *d;
@@ -1022,9 +1023,9 @@ static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
if (!mtk_eint_can_en_debounce(pctl, eint_num))
return -ENOSYS;
- dbnc = ARRAY_SIZE(dbnc_arr);
- for (i = 0; i < ARRAY_SIZE(dbnc_arr); i++) {
- if (debounce <= dbnc_arr[i]) {
+ dbnc = ARRAY_SIZE(debounce_time);
+ for (i = 0; i < ARRAY_SIZE(debounce_time); i++) {
+ if (debounce <= debounce_time[i]) {
dbnc = i;
break;
}
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index fb126d56ad40..cf9bafa10acf 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -1280,9 +1280,9 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs,
/* Parse pins in each row from LSB */
while (mask) {
- bit_pos = ffs(mask);
+ bit_pos = __ffs(mask);
pin_num_from_lsb = bit_pos / pcs->bits_per_pin;
- mask_pos = ((pcs->fmask) << (bit_pos - 1));
+ mask_pos = ((pcs->fmask) << bit_pos);
val_pos = val & mask_pos;
submask = mask & mask_pos;
@@ -1852,7 +1852,7 @@ static int pcs_probe(struct platform_device *pdev)
ret = of_property_read_u32(np, "pinctrl-single,function-mask",
&pcs->fmask);
if (!ret) {
- pcs->fshift = ffs(pcs->fmask) - 1;
+ pcs->fshift = __ffs(pcs->fmask);
pcs->fmax = pcs->fmask >> pcs->fshift;
} else {
/* If mask property doesn't exist, function mux is invalid. */
diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
index 10ce6cba4455..09356684c32f 100644
--- a/drivers/platform/x86/hp_accel.c
+++ b/drivers/platform/x86/hp_accel.c
@@ -127,8 +127,10 @@ static int lis3lv02d_acpi_read(struct lis3lv02d *lis3, int reg, u8 *ret)
arg0.integer.value = reg;
status = acpi_evaluate_integer(dev->handle, "ALRD", &args, &lret);
+ if (ACPI_FAILURE(status))
+ return -EINVAL;
*ret = lret;
- return (status != AE_OK) ? -EINVAL : 0;
+ return 0;
}
/**
@@ -173,6 +175,7 @@ static int lis3lv02d_dmi_matched(const struct dmi_system_id *dmi)
DEFINE_CONV(normal, 1, 2, 3);
DEFINE_CONV(y_inverted, 1, -2, 3);
DEFINE_CONV(x_inverted, -1, 2, 3);
+DEFINE_CONV(x_inverted_usd, -1, 2, -3);
DEFINE_CONV(z_inverted, 1, 2, -3);
DEFINE_CONV(xy_swap, 2, 1, 3);
DEFINE_CONV(xy_rotated_left, -2, 1, 3);
@@ -236,6 +239,7 @@ static const struct dmi_system_id lis3lv02d_dmi_ids[] = {
AXIS_DMI_MATCH("HP8710", "HP Compaq 8710", y_inverted),
AXIS_DMI_MATCH("HDX18", "HP HDX 18", x_inverted),
AXIS_DMI_MATCH("HPB432x", "HP ProBook 432", xy_rotated_left),
+ AXIS_DMI_MATCH("HPB440G3", "HP ProBook 440 G3", x_inverted_usd),
AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left),
AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted),
AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap),
diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
index f93abc8c1424..a818db6aa08f 100644
--- a/drivers/platform/x86/intel-hid.c
+++ b/drivers/platform/x86/intel-hid.c
@@ -91,6 +91,8 @@ static int intel_hid_pl_resume_handler(struct device *device)
}
static const struct dev_pm_ops intel_hid_pl_pm_ops = {
+ .freeze = intel_hid_pl_suspend_handler,
+ .restore = intel_hid_pl_resume_handler,
.suspend = intel_hid_pl_suspend_handler,
.resume = intel_hid_pl_resume_handler,
};
diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c
index 3fb1d85c70a8..6f497e80c9df 100644
--- a/drivers/platform/x86/intel_pmc_ipc.c
+++ b/drivers/platform/x86/intel_pmc_ipc.c
@@ -687,8 +687,8 @@ static int ipc_plat_get_res(struct platform_device *pdev)
ipcdev.acpi_io_size = size;
dev_info(&pdev->dev, "io res: %pR\n", res);
- /* This is index 0 to cover BIOS data register */
punit_res = punit_res_array;
+ /* This is index 0 to cover BIOS data register */
res = platform_get_resource(pdev, IORESOURCE_MEM,
PLAT_RESOURCE_BIOS_DATA_INDEX);
if (!res) {
@@ -698,55 +698,51 @@ static int ipc_plat_get_res(struct platform_device *pdev)
*punit_res = *res;
dev_info(&pdev->dev, "punit BIOS data res: %pR\n", res);
+ /* This is index 1 to cover BIOS interface register */
res = platform_get_resource(pdev, IORESOURCE_MEM,
PLAT_RESOURCE_BIOS_IFACE_INDEX);
if (!res) {
dev_err(&pdev->dev, "Failed to get res of punit BIOS iface\n");
return -ENXIO;
}
- /* This is index 1 to cover BIOS interface register */
*++punit_res = *res;
dev_info(&pdev->dev, "punit BIOS interface res: %pR\n", res);
+ /* This is index 2 to cover ISP data register, optional */
res = platform_get_resource(pdev, IORESOURCE_MEM,
PLAT_RESOURCE_ISP_DATA_INDEX);
- if (!res) {
- dev_err(&pdev->dev, "Failed to get res of punit ISP data\n");
- return -ENXIO;
+ ++punit_res;
+ if (res) {
+ *punit_res = *res;
+ dev_info(&pdev->dev, "punit ISP data res: %pR\n", res);
}
- /* This is index 2 to cover ISP data register */
- *++punit_res = *res;
- dev_info(&pdev->dev, "punit ISP data res: %pR\n", res);
+ /* This is index 3 to cover ISP interface register, optional */
res = platform_get_resource(pdev, IORESOURCE_MEM,
PLAT_RESOURCE_ISP_IFACE_INDEX);
- if (!res) {
- dev_err(&pdev->dev, "Failed to get res of punit ISP iface\n");
- return -ENXIO;
+ ++punit_res;
+ if (res) {
+ *punit_res = *res;
+ dev_info(&pdev->dev, "punit ISP interface res: %pR\n", res);
}
- /* This is index 3 to cover ISP interface register */
- *++punit_res = *res;
- dev_info(&pdev->dev, "punit ISP interface res: %pR\n", res);
+ /* This is index 4 to cover GTD data register, optional */
res = platform_get_resource(pdev, IORESOURCE_MEM,
PLAT_RESOURCE_GTD_DATA_INDEX);
- if (!res) {
- dev_err(&pdev->dev, "Failed to get res of punit GTD data\n");
- return -ENXIO;
+ ++punit_res;
+ if (res) {
+ *punit_res = *res;
+ dev_info(&pdev->dev, "punit GTD data res: %pR\n", res);
}
- /* This is index 4 to cover GTD data register */
- *++punit_res = *res;
- dev_info(&pdev->dev, "punit GTD data res: %pR\n", res);
+ /* This is index 5 to cover GTD interface register, optional */
res = platform_get_resource(pdev, IORESOURCE_MEM,
PLAT_RESOURCE_GTD_IFACE_INDEX);
- if (!res) {
- dev_err(&pdev->dev, "Failed to get res of punit GTD iface\n");
- return -ENXIO;
+ ++punit_res;
+ if (res) {
+ *punit_res = *res;
+ dev_info(&pdev->dev, "punit GTD interface res: %pR\n", res);
}
- /* This is index 5 to cover GTD interface register */
- *++punit_res = *res;
- dev_info(&pdev->dev, "punit GTD interface res: %pR\n", res);
res = platform_get_resource(pdev, IORESOURCE_MEM,
PLAT_RESOURCE_IPC_INDEX);
diff --git a/drivers/platform/x86/intel_punit_ipc.c b/drivers/platform/x86/intel_punit_ipc.c
index bd875409a02d..a47a41fc10ad 100644
--- a/drivers/platform/x86/intel_punit_ipc.c
+++ b/drivers/platform/x86/intel_punit_ipc.c
@@ -227,6 +227,11 @@ static int intel_punit_get_bars(struct platform_device *pdev)
struct resource *res;
void __iomem *addr;
+ /*
+ * The following resources are required
+ * - BIOS_IPC BASE_DATA
+ * - BIOS_IPC BASE_IFACE
+ */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
addr = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(addr))
@@ -239,29 +244,40 @@ static int intel_punit_get_bars(struct platform_device *pdev)
return PTR_ERR(addr);
punit_ipcdev->base[BIOS_IPC][BASE_IFACE] = addr;
+ /*
+ * The following resources are optional
+ * - ISPDRIVER_IPC BASE_DATA
+ * - ISPDRIVER_IPC BASE_IFACE
+ * - GTDRIVER_IPC BASE_DATA
+ * - GTDRIVER_IPC BASE_IFACE
+ */
res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- addr = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(addr))
- return PTR_ERR(addr);
- punit_ipcdev->base[ISPDRIVER_IPC][BASE_DATA] = addr;
+ if (res) {
+ addr = devm_ioremap_resource(&pdev->dev, res);
+ if (!IS_ERR(addr))
+ punit_ipcdev->base[ISPDRIVER_IPC][BASE_DATA] = addr;
+ }
res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
- addr = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(addr))
- return PTR_ERR(addr);
- punit_ipcdev->base[ISPDRIVER_IPC][BASE_IFACE] = addr;
+ if (res) {
+ addr = devm_ioremap_resource(&pdev->dev, res);
+ if (!IS_ERR(addr))
+ punit_ipcdev->base[ISPDRIVER_IPC][BASE_IFACE] = addr;
+ }
res = platform_get_resource(pdev, IORESOURCE_MEM, 4);
- addr = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(addr))
- return PTR_ERR(addr);
- punit_ipcdev->base[GTDRIVER_IPC][BASE_DATA] = addr;
+ if (res) {
+ addr = devm_ioremap_resource(&pdev->dev, res);
+ if (!IS_ERR(addr))
+ punit_ipcdev->base[GTDRIVER_IPC][BASE_DATA] = addr;
+ }
res = platform_get_resource(pdev, IORESOURCE_MEM, 5);
- addr = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(addr))
- return PTR_ERR(addr);
- punit_ipcdev->base[GTDRIVER_IPC][BASE_IFACE] = addr;
+ if (res) {
+ addr = devm_ioremap_resource(&pdev->dev, res);
+ if (!IS_ERR(addr))
+ punit_ipcdev->base[GTDRIVER_IPC][BASE_IFACE] = addr;
+ }
return 0;
}
diff --git a/drivers/platform/x86/intel_telemetry_pltdrv.c b/drivers/platform/x86/intel_telemetry_pltdrv.c
index 397119f83e82..781bd10ca7ac 100644
--- a/drivers/platform/x86/intel_telemetry_pltdrv.c
+++ b/drivers/platform/x86/intel_telemetry_pltdrv.c
@@ -659,7 +659,7 @@ static int telemetry_plt_update_events(struct telemetry_evtconfig pss_evtconfig,
static int telemetry_plt_set_sampling_period(u8 pss_period, u8 ioss_period)
{
u32 telem_ctrl = 0;
- int ret;
+ int ret = 0;
mutex_lock(&(telm_conf->telem_lock));
if (ioss_period) {
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index e305ab541a22..9255ff3ee81a 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -7972,10 +7972,12 @@ static int fan_get_status_safe(u8 *status)
fan_update_desired_level(s);
mutex_unlock(&fan_mutex);
+ if (rc)
+ return rc;
if (status)
*status = s;
- return rc;
+ return 0;
}
static int fan_get_speed(unsigned int *speed)
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index df1f1a76a862..01e12d221a8b 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -135,7 +135,7 @@ MODULE_LICENSE("GPL");
/* Field definitions */
#define HCI_ACCEL_MASK 0x7fff
#define HCI_HOTKEY_DISABLE 0x0b
-#define HCI_HOTKEY_ENABLE 0x01
+#define HCI_HOTKEY_ENABLE 0x09
#define HCI_HOTKEY_SPECIAL_FUNCTIONS 0x10
#define HCI_LCD_BRIGHTNESS_BITS 3
#define HCI_LCD_BRIGHTNESS_SHIFT (16-HCI_LCD_BRIGHTNESS_BITS)
diff --git a/drivers/pwm/pwm-fsl-ftm.c b/drivers/pwm/pwm-fsl-ftm.c
index 7225ac6b3df5..fad968eb75f6 100644
--- a/drivers/pwm/pwm-fsl-ftm.c
+++ b/drivers/pwm/pwm-fsl-ftm.c
@@ -392,7 +392,7 @@ static const struct regmap_config fsl_pwm_regmap_config = {
.max_register = FTM_PWMLOAD,
.volatile_reg = fsl_pwm_volatile_reg,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_FLAT,
};
static int fsl_pwm_probe(struct platform_device *pdev)
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index 5d4d91846357..e165b7ce29d7 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -126,7 +126,7 @@ struct rio_mport_mapping {
struct list_head node;
struct mport_dev *md;
enum rio_mport_map_dir dir;
- u32 rioid;
+ u16 rioid;
u64 rio_addr;
dma_addr_t phys_addr; /* for mmap */
void *virt_addr; /* kernel address, for dma_free_coherent */
@@ -137,7 +137,7 @@ struct rio_mport_mapping {
struct rio_mport_dma_map {
int valid;
- uint64_t length;
+ u64 length;
void *vaddr;
dma_addr_t paddr;
};
@@ -208,7 +208,7 @@ struct mport_cdev_priv {
struct kfifo event_fifo;
wait_queue_head_t event_rx_wait;
spinlock_t fifo_lock;
- unsigned int event_mask; /* RIO_DOORBELL, RIO_PORTWRITE */
+ u32 event_mask; /* RIO_DOORBELL, RIO_PORTWRITE */
#ifdef CONFIG_RAPIDIO_DMA_ENGINE
struct dma_chan *dmach;
struct list_head async_list;
@@ -276,7 +276,8 @@ static int rio_mport_maint_rd(struct mport_cdev_priv *priv, void __user *arg,
return -EFAULT;
if ((maint_io.offset % 4) ||
- (maint_io.length == 0) || (maint_io.length % 4))
+ (maint_io.length == 0) || (maint_io.length % 4) ||
+ (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ)
return -EINVAL;
buffer = vmalloc(maint_io.length);
@@ -298,7 +299,8 @@ static int rio_mport_maint_rd(struct mport_cdev_priv *priv, void __user *arg,
offset += 4;
}
- if (unlikely(copy_to_user(maint_io.buffer, buffer, maint_io.length)))
+ if (unlikely(copy_to_user((void __user *)(uintptr_t)maint_io.buffer,
+ buffer, maint_io.length)))
ret = -EFAULT;
out:
vfree(buffer);
@@ -319,7 +321,8 @@ static int rio_mport_maint_wr(struct mport_cdev_priv *priv, void __user *arg,
return -EFAULT;
if ((maint_io.offset % 4) ||
- (maint_io.length == 0) || (maint_io.length % 4))
+ (maint_io.length == 0) || (maint_io.length % 4) ||
+ (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ)
return -EINVAL;
buffer = vmalloc(maint_io.length);
@@ -327,7 +330,8 @@ static int rio_mport_maint_wr(struct mport_cdev_priv *priv, void __user *arg,
return -ENOMEM;
length = maint_io.length;
- if (unlikely(copy_from_user(buffer, maint_io.buffer, length))) {
+ if (unlikely(copy_from_user(buffer,
+ (void __user *)(uintptr_t)maint_io.buffer, length))) {
ret = -EFAULT;
goto out;
}
@@ -360,7 +364,7 @@ out:
*/
static int
rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp,
- u32 rioid, u64 raddr, u32 size,
+ u16 rioid, u64 raddr, u32 size,
dma_addr_t *paddr)
{
struct rio_mport *mport = md->mport;
@@ -369,7 +373,7 @@ rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp,
rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%x", rioid, raddr, size);
- map = kzalloc(sizeof(struct rio_mport_mapping), GFP_KERNEL);
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
if (map == NULL)
return -ENOMEM;
@@ -394,7 +398,7 @@ err_map_outb:
static int
rio_mport_get_outbound_mapping(struct mport_dev *md, struct file *filp,
- u32 rioid, u64 raddr, u32 size,
+ u16 rioid, u64 raddr, u32 size,
dma_addr_t *paddr)
{
struct rio_mport_mapping *map;
@@ -433,7 +437,7 @@ static int rio_mport_obw_map(struct file *filp, void __user *arg)
dma_addr_t paddr;
int ret;
- if (unlikely(copy_from_user(&map, arg, sizeof(struct rio_mmap))))
+ if (unlikely(copy_from_user(&map, arg, sizeof(map))))
return -EFAULT;
rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%llx",
@@ -448,7 +452,7 @@ static int rio_mport_obw_map(struct file *filp, void __user *arg)
map.handle = paddr;
- if (unlikely(copy_to_user(arg, &map, sizeof(struct rio_mmap))))
+ if (unlikely(copy_to_user(arg, &map, sizeof(map))))
return -EFAULT;
return 0;
}
@@ -469,7 +473,7 @@ static int rio_mport_obw_free(struct file *filp, void __user *arg)
if (!md->mport->ops->unmap_outb)
return -EPROTONOSUPPORT;
- if (copy_from_user(&handle, arg, sizeof(u64)))
+ if (copy_from_user(&handle, arg, sizeof(handle)))
return -EFAULT;
rmcd_debug(OBW, "h=0x%llx", handle);
@@ -498,9 +502,9 @@ static int rio_mport_obw_free(struct file *filp, void __user *arg)
static int maint_hdid_set(struct mport_cdev_priv *priv, void __user *arg)
{
struct mport_dev *md = priv->md;
- uint16_t hdid;
+ u16 hdid;
- if (copy_from_user(&hdid, arg, sizeof(uint16_t)))
+ if (copy_from_user(&hdid, arg, sizeof(hdid)))
return -EFAULT;
md->mport->host_deviceid = hdid;
@@ -520,9 +524,9 @@ static int maint_hdid_set(struct mport_cdev_priv *priv, void __user *arg)
static int maint_comptag_set(struct mport_cdev_priv *priv, void __user *arg)
{
struct mport_dev *md = priv->md;
- uint32_t comptag;
+ u32 comptag;
- if (copy_from_user(&comptag, arg, sizeof(uint32_t)))
+ if (copy_from_user(&comptag, arg, sizeof(comptag)))
return -EFAULT;
rio_local_write_config_32(md->mport, RIO_COMPONENT_TAG_CSR, comptag);
@@ -837,7 +841,7 @@ err_out:
* @xfer: data transfer descriptor structure
*/
static int
-rio_dma_transfer(struct file *filp, uint32_t transfer_mode,
+rio_dma_transfer(struct file *filp, u32 transfer_mode,
enum rio_transfer_sync sync, enum dma_data_direction dir,
struct rio_transfer_io *xfer)
{
@@ -875,7 +879,7 @@ rio_dma_transfer(struct file *filp, uint32_t transfer_mode,
unsigned long offset;
long pinned;
- offset = (unsigned long)xfer->loc_addr & ~PAGE_MASK;
+ offset = (unsigned long)(uintptr_t)xfer->loc_addr & ~PAGE_MASK;
nr_pages = PAGE_ALIGN(xfer->length + offset) >> PAGE_SHIFT;
page_list = kmalloc_array(nr_pages,
@@ -1015,19 +1019,20 @@ static int rio_mport_transfer_ioctl(struct file *filp, void __user *arg)
if (unlikely(copy_from_user(&transaction, arg, sizeof(transaction))))
return -EFAULT;
- if (transaction.count != 1)
+ if (transaction.count != 1) /* only single transfer for now */
return -EINVAL;
if ((transaction.transfer_mode &
priv->md->properties.transfer_mode) == 0)
return -ENODEV;
- transfer = vmalloc(transaction.count * sizeof(struct rio_transfer_io));
+ transfer = vmalloc(transaction.count * sizeof(*transfer));
if (!transfer)
return -ENOMEM;
- if (unlikely(copy_from_user(transfer, transaction.block,
- transaction.count * sizeof(struct rio_transfer_io)))) {
+ if (unlikely(copy_from_user(transfer,
+ (void __user *)(uintptr_t)transaction.block,
+ transaction.count * sizeof(*transfer)))) {
ret = -EFAULT;
goto out_free;
}
@@ -1038,8 +1043,9 @@ static int rio_mport_transfer_ioctl(struct file *filp, void __user *arg)
ret = rio_dma_transfer(filp, transaction.transfer_mode,
transaction.sync, dir, &transfer[i]);
- if (unlikely(copy_to_user(transaction.block, transfer,
- transaction.count * sizeof(struct rio_transfer_io))))
+ if (unlikely(copy_to_user((void __user *)(uintptr_t)transaction.block,
+ transfer,
+ transaction.count * sizeof(*transfer))))
ret = -EFAULT;
out_free:
@@ -1129,11 +1135,11 @@ err_tmo:
}
static int rio_mport_create_dma_mapping(struct mport_dev *md, struct file *filp,
- uint64_t size, struct rio_mport_mapping **mapping)
+ u64 size, struct rio_mport_mapping **mapping)
{
struct rio_mport_mapping *map;
- map = kzalloc(sizeof(struct rio_mport_mapping), GFP_KERNEL);
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
if (map == NULL)
return -ENOMEM;
@@ -1165,7 +1171,7 @@ static int rio_mport_alloc_dma(struct file *filp, void __user *arg)
struct rio_mport_mapping *mapping = NULL;
int ret;
- if (unlikely(copy_from_user(&map, arg, sizeof(struct rio_dma_mem))))
+ if (unlikely(copy_from_user(&map, arg, sizeof(map))))
return -EFAULT;
ret = rio_mport_create_dma_mapping(md, filp, map.length, &mapping);
@@ -1174,7 +1180,7 @@ static int rio_mport_alloc_dma(struct file *filp, void __user *arg)
map.dma_handle = mapping->phys_addr;
- if (unlikely(copy_to_user(arg, &map, sizeof(struct rio_dma_mem)))) {
+ if (unlikely(copy_to_user(arg, &map, sizeof(map)))) {
mutex_lock(&md->buf_mutex);
kref_put(&mapping->ref, mport_release_mapping);
mutex_unlock(&md->buf_mutex);
@@ -1192,7 +1198,7 @@ static int rio_mport_free_dma(struct file *filp, void __user *arg)
int ret = -EFAULT;
struct rio_mport_mapping *map, *_map;
- if (copy_from_user(&handle, arg, sizeof(u64)))
+ if (copy_from_user(&handle, arg, sizeof(handle)))
return -EFAULT;
rmcd_debug(EXIT, "filp=%p", filp);
@@ -1242,14 +1248,18 @@ static int rio_mport_free_dma(struct file *filp, void __user *arg)
static int
rio_mport_create_inbound_mapping(struct mport_dev *md, struct file *filp,
- u64 raddr, u32 size,
+ u64 raddr, u64 size,
struct rio_mport_mapping **mapping)
{
struct rio_mport *mport = md->mport;
struct rio_mport_mapping *map;
int ret;
- map = kzalloc(sizeof(struct rio_mport_mapping), GFP_KERNEL);
+ /* rio_map_inb_region() accepts u32 size */
+ if (size > 0xffffffff)
+ return -EINVAL;
+
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
if (map == NULL)
return -ENOMEM;
@@ -1262,7 +1272,7 @@ rio_mport_create_inbound_mapping(struct mport_dev *md, struct file *filp,
if (raddr == RIO_MAP_ANY_ADDR)
raddr = map->phys_addr;
- ret = rio_map_inb_region(mport, map->phys_addr, raddr, size, 0);
+ ret = rio_map_inb_region(mport, map->phys_addr, raddr, (u32)size, 0);
if (ret < 0)
goto err_map_inb;
@@ -1288,7 +1298,7 @@ err_dma_alloc:
static int
rio_mport_get_inbound_mapping(struct mport_dev *md, struct file *filp,
- u64 raddr, u32 size,
+ u64 raddr, u64 size,
struct rio_mport_mapping **mapping)
{
struct rio_mport_mapping *map;
@@ -1331,7 +1341,7 @@ static int rio_mport_map_inbound(struct file *filp, void __user *arg)
if (!md->mport->ops->map_inb)
return -EPROTONOSUPPORT;
- if (unlikely(copy_from_user(&map, arg, sizeof(struct rio_mmap))))
+ if (unlikely(copy_from_user(&map, arg, sizeof(map))))
return -EFAULT;
rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp);
@@ -1344,7 +1354,7 @@ static int rio_mport_map_inbound(struct file *filp, void __user *arg)
map.handle = mapping->phys_addr;
map.rio_addr = mapping->rio_addr;
- if (unlikely(copy_to_user(arg, &map, sizeof(struct rio_mmap)))) {
+ if (unlikely(copy_to_user(arg, &map, sizeof(map)))) {
/* Delete mapping if it was created by this request */
if (ret == 0 && mapping->filp == filp) {
mutex_lock(&md->buf_mutex);
@@ -1375,7 +1385,7 @@ static int rio_mport_inbound_free(struct file *filp, void __user *arg)
if (!md->mport->ops->unmap_inb)
return -EPROTONOSUPPORT;
- if (copy_from_user(&handle, arg, sizeof(u64)))
+ if (copy_from_user(&handle, arg, sizeof(handle)))
return -EFAULT;
mutex_lock(&md->buf_mutex);
@@ -1401,7 +1411,7 @@ static int rio_mport_inbound_free(struct file *filp, void __user *arg)
static int maint_port_idx_get(struct mport_cdev_priv *priv, void __user *arg)
{
struct mport_dev *md = priv->md;
- uint32_t port_idx = md->mport->index;
+ u32 port_idx = md->mport->index;
rmcd_debug(MPORT, "port_index=%d", port_idx);
@@ -1451,7 +1461,7 @@ static void rio_mport_doorbell_handler(struct rio_mport *mport, void *dev_id,
handled = 0;
spin_lock(&data->db_lock);
list_for_each_entry(db_filter, &data->doorbells, data_node) {
- if (((db_filter->filter.rioid == 0xffffffff ||
+ if (((db_filter->filter.rioid == RIO_INVALID_DESTID ||
db_filter->filter.rioid == src)) &&
info >= db_filter->filter.low &&
info <= db_filter->filter.high) {
@@ -1525,6 +1535,9 @@ static int rio_mport_remove_db_filter(struct mport_cdev_priv *priv,
if (copy_from_user(&filter, arg, sizeof(filter)))
return -EFAULT;
+ if (filter.low > filter.high)
+ return -EINVAL;
+
spin_lock_irqsave(&priv->md->db_lock, flags);
list_for_each_entry(db_filter, &priv->db_filters, priv_node) {
if (db_filter->filter.rioid == filter.rioid &&
@@ -1737,10 +1750,10 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
return -EEXIST;
}
- size = sizeof(struct rio_dev);
+ size = sizeof(*rdev);
mport = md->mport;
- destid = (u16)dev_info.destid;
- hopcount = (u8)dev_info.hopcount;
+ destid = dev_info.destid;
+ hopcount = dev_info.hopcount;
if (rio_mport_read_config_32(mport, destid, hopcount,
RIO_PEF_CAR, &rval))
@@ -1872,8 +1885,8 @@ static int rio_mport_del_riodev(struct mport_cdev_priv *priv, void __user *arg)
do {
rdev = rio_get_comptag(dev_info.comptag, rdev);
if (rdev && rdev->dev.parent == &mport->net->dev &&
- rdev->destid == (u16)dev_info.destid &&
- rdev->hopcount == (u8)dev_info.hopcount)
+ rdev->destid == dev_info.destid &&
+ rdev->hopcount == dev_info.hopcount)
break;
} while (rdev);
}
@@ -2146,8 +2159,8 @@ static long mport_cdev_ioctl(struct file *filp,
return maint_port_idx_get(data, (void __user *)arg);
case RIO_MPORT_GET_PROPERTIES:
md->properties.hdid = md->mport->host_deviceid;
- if (copy_to_user((void __user *)arg, &(data->md->properties),
- sizeof(data->md->properties)))
+ if (copy_to_user((void __user *)arg, &(md->properties),
+ sizeof(md->properties)))
return -EFAULT;
return 0;
case RIO_ENABLE_DOORBELL_RANGE:
@@ -2159,11 +2172,11 @@ static long mport_cdev_ioctl(struct file *filp,
case RIO_DISABLE_PORTWRITE_RANGE:
return rio_mport_remove_pw_filter(data, (void __user *)arg);
case RIO_SET_EVENT_MASK:
- data->event_mask = arg;
+ data->event_mask = (u32)arg;
return 0;
case RIO_GET_EVENT_MASK:
if (copy_to_user((void __user *)arg, &data->event_mask,
- sizeof(data->event_mask)))
+ sizeof(u32)))
return -EFAULT;
return 0;
case RIO_MAP_OUTBOUND:
@@ -2374,7 +2387,7 @@ static ssize_t mport_write(struct file *filp, const char __user *buf,
return -EINVAL;
ret = rio_mport_send_doorbell(mport,
- (u16)event.u.doorbell.rioid,
+ event.u.doorbell.rioid,
event.u.doorbell.payload);
if (ret < 0)
return ret;
@@ -2421,7 +2434,7 @@ static struct mport_dev *mport_cdev_add(struct rio_mport *mport)
struct mport_dev *md;
struct rio_mport_attr attr;
- md = kzalloc(sizeof(struct mport_dev), GFP_KERNEL);
+ md = kzalloc(sizeof(*md), GFP_KERNEL);
if (!md) {
rmcd_error("Unable allocate a device object");
return NULL;
@@ -2470,7 +2483,7 @@ static struct mport_dev *mport_cdev_add(struct rio_mport *mport)
/* The transfer_mode property will be returned through mport query
* interface
*/
-#ifdef CONFIG_PPC /* for now: only on Freescale's SoCs */
+#ifdef CONFIG_FSL_RIO /* for now: only on Freescale's SoCs */
md->properties.transfer_mode |= RIO_TRANSFER_MODE_MAPPED;
#else
md->properties.transfer_mode |= RIO_TRANSFER_MODE_TRANSFER;
@@ -2669,9 +2682,9 @@ static int __init mport_init(void)
/* Create device class needed by udev */
dev_class = class_create(THIS_MODULE, DRV_NAME);
- if (!dev_class) {
+ if (IS_ERR(dev_class)) {
rmcd_error("Unable to create " DRV_NAME " class");
- return -EINVAL;
+ return PTR_ERR(dev_class);
}
ret = alloc_chrdev_region(&dev_number, 0, RIO_MAX_MPORTS, DRV_NAME);
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index b2156ee5bae1..ecb7dbae9be9 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -863,7 +863,7 @@ out:
* A user-initiated temperature conversion is not started by this function,
* so the temperature is updated once every 64 seconds.
*/
-static int ds3231_hwmon_read_temp(struct device *dev, s16 *mC)
+static int ds3231_hwmon_read_temp(struct device *dev, s32 *mC)
{
struct ds1307 *ds1307 = dev_get_drvdata(dev);
u8 temp_buf[2];
@@ -892,7 +892,7 @@ static ssize_t ds3231_hwmon_show_temp(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret;
- s16 temp;
+ s32 temp;
ret = ds3231_hwmon_read_temp(dev, &temp);
if (ret)
@@ -1531,7 +1531,7 @@ read_rtc:
return PTR_ERR(ds1307->rtc);
}
- if (ds1307_can_wakeup_device) {
+ if (ds1307_can_wakeup_device && ds1307->client->irq <= 0) {
/* Disable request for an IRQ */
want_irq = false;
dev_info(&client->dev, "'wakeup-source' is set, request for an IRQ is disabled!\n");
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 1bce9cf51b1e..b83908670a9a 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -756,15 +756,16 @@ dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const ch
blk_cleanup_queue(dev_info->dcssblk_queue);
dev_info->gd->queue = NULL;
put_disk(dev_info->gd);
- device_unregister(&dev_info->dev);
/* unload all related segments */
list_for_each_entry(entry, &dev_info->seg_list, lh)
segment_unload(entry->segment_name);
- put_device(&dev_info->dev);
up_write(&dcssblk_devices_sem);
+ device_unregister(&dev_info->dev);
+ put_device(&dev_info->dev);
+
rc = count;
out_buf:
kfree(local_buf);
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index 75d9896deccb..e6f54d3b8969 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -303,7 +303,7 @@ static void scm_blk_request(struct request_queue *rq)
if (req->cmd_type != REQ_TYPE_FS) {
blk_start_request(req);
blk_dump_rq_flags(req, KMSG_COMPONENT " bad request");
- blk_end_request_all(req, -EIO);
+ __blk_end_request_all(req, -EIO);
continue;
}
diff --git a/drivers/s390/char/sclp_ctl.c b/drivers/s390/char/sclp_ctl.c
index 648cb86afd42..ea607a4a1bdd 100644
--- a/drivers/s390/char/sclp_ctl.c
+++ b/drivers/s390/char/sclp_ctl.c
@@ -56,6 +56,7 @@ static int sclp_ctl_ioctl_sccb(void __user *user_area)
{
struct sclp_ctl_sccb ctl_sccb;
struct sccb_header *sccb;
+ unsigned long copied;
int rc;
if (copy_from_user(&ctl_sccb, user_area, sizeof(ctl_sccb)))
@@ -65,14 +66,15 @@ static int sclp_ctl_ioctl_sccb(void __user *user_area)
sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
- if (copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), sizeof(*sccb))) {
+ copied = PAGE_SIZE -
+ copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), PAGE_SIZE);
+ if (offsetof(struct sccb_header, length) +
+ sizeof(sccb->length) > copied || sccb->length > copied) {
rc = -EFAULT;
goto out_free;
}
- if (sccb->length > PAGE_SIZE || sccb->length < 8)
- return -EINVAL;
- if (copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), sccb->length)) {
- rc = -EFAULT;
+ if (sccb->length < 8) {
+ rc = -EINVAL;
goto out_free;
}
rc = sclp_sync_request(ctl_sccb.cmdw, sccb);
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index f3bb7af4e984..ead83a24bcd1 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -688,6 +688,7 @@ static struct rt6_info *find_route_ipv6(const struct in6_addr *saddr,
{
struct flowi6 fl;
+ memset(&fl, 0, sizeof(fl));
if (saddr)
memcpy(&fl.saddr, saddr, sizeof(struct in6_addr));
if (daddr)
diff --git a/drivers/soc/mediatek/mtk-scpsys.c b/drivers/soc/mediatek/mtk-scpsys.c
index 57e781c71e67..837effe19907 100644
--- a/drivers/soc/mediatek/mtk-scpsys.c
+++ b/drivers/soc/mediatek/mtk-scpsys.c
@@ -491,13 +491,14 @@ static int scpsys_probe(struct platform_device *pdev)
genpd->dev_ops.active_wakeup = scpsys_active_wakeup;
/*
- * With CONFIG_PM disabled turn on all domains to make the
- * hardware usable.
+ * Initially turn on all domains to make the domains usable
+ * with !CONFIG_PM and to get the hardware in sync with the
+ * software. The unused domains will be switched off during
+ * late_init time.
*/
- if (!IS_ENABLED(CONFIG_PM))
- genpd->power_on(genpd);
+ genpd->power_on(genpd);
- pm_genpd_init(genpd, NULL, true);
+ pm_genpd_init(genpd, NULL, false);
}
/*
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
index b793c04028a3..be72a8e5f221 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c
@@ -172,9 +172,11 @@ static int vpfe_prepare_pipeline(struct vpfe_video_device *video)
static int vpfe_update_pipe_state(struct vpfe_video_device *video)
{
struct vpfe_pipeline *pipe = &video->pipe;
+ int ret;
- if (vpfe_prepare_pipeline(video))
- return vpfe_prepare_pipeline(video);
+ ret = vpfe_prepare_pipeline(video);
+ if (ret)
+ return ret;
/*
* Find out if there is any input video
@@ -182,9 +184,10 @@ static int vpfe_update_pipe_state(struct vpfe_video_device *video)
*/
if (pipe->input_num == 0) {
pipe->state = VPFE_PIPELINE_STREAM_CONTINUOUS;
- if (vpfe_update_current_ext_subdev(video)) {
+ ret = vpfe_update_current_ext_subdev(video);
+ if (ret) {
pr_err("Invalid external subdev\n");
- return vpfe_update_current_ext_subdev(video);
+ return ret;
}
} else {
pipe->state = VPFE_PIPELINE_STREAM_SINGLESHOT;
@@ -667,6 +670,7 @@ static int vpfe_enum_fmt(struct file *file, void *priv,
struct v4l2_subdev *subdev;
struct v4l2_format format;
struct media_pad *remote;
+ int ret;
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_enum_fmt\n");
@@ -695,10 +699,11 @@ static int vpfe_enum_fmt(struct file *file, void *priv,
sd_fmt.pad = remote->index;
sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
/* get output format of remote subdev */
- if (v4l2_subdev_call(subdev, pad, get_fmt, NULL, &sd_fmt)) {
+ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &sd_fmt);
+ if (ret) {
v4l2_err(&vpfe_dev->v4l2_dev,
"invalid remote subdev for video node\n");
- return v4l2_subdev_call(subdev, pad, get_fmt, NULL, &sd_fmt);
+ return ret;
}
/* convert to pix format */
mbus.code = sd_fmt.format.code;
@@ -725,6 +730,7 @@ static int vpfe_s_fmt(struct file *file, void *priv,
struct vpfe_video_device *video = video_drvdata(file);
struct vpfe_device *vpfe_dev = video->vpfe_dev;
struct v4l2_format format;
+ int ret;
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_fmt\n");
/* If streaming is started, return error */
@@ -733,8 +739,9 @@ static int vpfe_s_fmt(struct file *file, void *priv,
return -EBUSY;
}
/* get adjacent subdev's output pad format */
- if (__vpfe_video_get_format(video, &format))
- return __vpfe_video_get_format(video, &format);
+ ret = __vpfe_video_get_format(video, &format);
+ if (ret)
+ return ret;
*fmt = format;
video->fmt = *fmt;
return 0;
@@ -757,11 +764,13 @@ static int vpfe_try_fmt(struct file *file, void *priv,
struct vpfe_video_device *video = video_drvdata(file);
struct vpfe_device *vpfe_dev = video->vpfe_dev;
struct v4l2_format format;
+ int ret;
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_try_fmt\n");
/* get adjacent subdev's output pad format */
- if (__vpfe_video_get_format(video, &format))
- return __vpfe_video_get_format(video, &format);
+ ret = __vpfe_video_get_format(video, &format);
+ if (ret)
+ return ret;
*fmt = format;
return 0;
@@ -838,8 +847,9 @@ static int vpfe_s_input(struct file *file, void *priv, unsigned int index)
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_input\n");
- if (mutex_lock_interruptible(&video->lock))
- return mutex_lock_interruptible(&video->lock);
+ ret = mutex_lock_interruptible(&video->lock);
+ if (ret)
+ return ret;
/*
* If streaming is started return device busy
* error
@@ -940,8 +950,9 @@ static int vpfe_s_std(struct file *file, void *priv, v4l2_std_id std_id)
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_std\n");
/* Call decoder driver function to set the standard */
- if (mutex_lock_interruptible(&video->lock))
- return mutex_lock_interruptible(&video->lock);
+ ret = mutex_lock_interruptible(&video->lock);
+ if (ret)
+ return ret;
sdinfo = video->current_ext_subdev;
/* If streaming is started, return device busy error */
if (video->started) {
@@ -1327,8 +1338,9 @@ static int vpfe_reqbufs(struct file *file, void *priv,
return -EINVAL;
}
- if (mutex_lock_interruptible(&video->lock))
- return mutex_lock_interruptible(&video->lock);
+ ret = mutex_lock_interruptible(&video->lock);
+ if (ret)
+ return ret;
if (video->io_usrs != 0) {
v4l2_err(&vpfe_dev->v4l2_dev, "Only one IO user allowed\n");
@@ -1354,10 +1366,11 @@ static int vpfe_reqbufs(struct file *file, void *priv,
q->buf_struct_size = sizeof(struct vpfe_cap_buffer);
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- if (vb2_queue_init(q)) {
+ ret = vb2_queue_init(q);
+ if (ret) {
v4l2_err(&vpfe_dev->v4l2_dev, "vb2_queue_init() failed\n");
vb2_dma_contig_cleanup_ctx(vpfe_dev->pdev);
- return vb2_queue_init(q);
+ return ret;
}
fh->io_allowed = 1;
@@ -1533,8 +1546,9 @@ static int vpfe_streamoff(struct file *file, void *priv,
return -EINVAL;
}
- if (mutex_lock_interruptible(&video->lock))
- return mutex_lock_interruptible(&video->lock);
+ ret = mutex_lock_interruptible(&video->lock);
+ if (ret)
+ return ret;
vpfe_stop_capture(video);
ret = vb2_streamoff(&video->buffer_queue, buf_type);
diff --git a/drivers/staging/rdma/hfi1/TODO b/drivers/staging/rdma/hfi1/TODO
index 05de0dad8762..4c6f1d7d2eaf 100644
--- a/drivers/staging/rdma/hfi1/TODO
+++ b/drivers/staging/rdma/hfi1/TODO
@@ -3,4 +3,4 @@ July, 2015
- Remove unneeded file entries in sysfs
- Remove software processing of IB protocol and place in library for use
by qib, ipath (if still present), hfi1, and eventually soft-roce
-
+- Replace incorrect uAPI
diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/staging/rdma/hfi1/file_ops.c
index 8396dc5fb6c1..c1c5bf82addb 100644
--- a/drivers/staging/rdma/hfi1/file_ops.c
+++ b/drivers/staging/rdma/hfi1/file_ops.c
@@ -49,6 +49,8 @@
#include <linux/vmalloc.h>
#include <linux/io.h>
+#include <rdma/ib.h>
+
#include "hfi.h"
#include "pio.h"
#include "device.h"
@@ -190,6 +192,10 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
int uctxt_required = 1;
int must_be_root = 0;
+ /* FIXME: This interface cannot continue out of staging */
+ if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
+ return -EACCES;
+
if (count < sizeof(cmd)) {
ret = -EINVAL;
goto bail;
@@ -791,15 +797,16 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
spin_unlock_irqrestore(&dd->uctxt_lock, flags);
dd->rcd[uctxt->ctxt] = NULL;
+
+ hfi1_user_exp_rcv_free(fdata);
+ hfi1_clear_ctxt_pkey(dd, uctxt->ctxt);
+
uctxt->rcvwait_to = 0;
uctxt->piowait_to = 0;
uctxt->rcvnowait = 0;
uctxt->pionowait = 0;
uctxt->event_flags = 0;
- hfi1_user_exp_rcv_free(fdata);
- hfi1_clear_ctxt_pkey(dd, uctxt->ctxt);
-
hfi1_stats.sps_ctxts--;
if (++dd->freectxts == dd->num_user_contexts)
aspm_enable_all(dd);
@@ -1127,27 +1134,13 @@ bail:
static int user_init(struct file *fp)
{
- int ret;
unsigned int rcvctrl_ops = 0;
struct hfi1_filedata *fd = fp->private_data;
struct hfi1_ctxtdata *uctxt = fd->uctxt;
/* make sure that the context has already been setup */
- if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags)) {
- ret = -EFAULT;
- goto done;
- }
-
- /*
- * Subctxts don't need to initialize anything since master
- * has done it.
- */
- if (fd->subctxt) {
- ret = wait_event_interruptible(uctxt->wait, !test_bit(
- HFI1_CTXT_MASTER_UNINIT,
- &uctxt->event_flags));
- goto expected;
- }
+ if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags))
+ return -EFAULT;
/* initialize poll variables... */
uctxt->urgent = 0;
@@ -1202,19 +1195,7 @@ static int user_init(struct file *fp)
wake_up(&uctxt->wait);
}
-expected:
- /*
- * Expected receive has to be setup for all processes (including
- * shared contexts). However, it has to be done after the master
- * context has been fully configured as it depends on the
- * eager/expected split of the RcvArray entries.
- * Setting it up here ensures that the subcontexts will be waiting
- * (due to the above wait_event_interruptible() until the master
- * is setup.
- */
- ret = hfi1_user_exp_rcv_init(fp);
-done:
- return ret;
+ return 0;
}
static int get_ctxt_info(struct file *fp, void __user *ubase, __u32 len)
@@ -1261,7 +1242,7 @@ static int setup_ctxt(struct file *fp)
int ret = 0;
/*
- * Context should be set up only once (including allocation and
+ * Context should be set up only once, including allocation and
* programming of eager buffers. This is done if context sharing
* is not requested or by the master process.
*/
@@ -1282,10 +1263,29 @@ static int setup_ctxt(struct file *fp)
if (ret)
goto done;
}
+ } else {
+ ret = wait_event_interruptible(uctxt->wait, !test_bit(
+ HFI1_CTXT_MASTER_UNINIT,
+ &uctxt->event_flags));
+ if (ret)
+ goto done;
}
+
ret = hfi1_user_sdma_alloc_queues(uctxt, fp);
if (ret)
goto done;
+ /*
+ * Expected receive has to be setup for all processes (including
+ * shared contexts). However, it has to be done after the master
+ * context has been fully configured as it depends on the
+ * eager/expected split of the RcvArray entries.
+ * Setting it up here ensures that the subcontexts will be waiting
+ * (due to the above wait_event_interruptible() until the master
+ * is setup.
+ */
+ ret = hfi1_user_exp_rcv_init(fp);
+ if (ret)
+ goto done;
set_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags);
done:
@@ -1565,29 +1565,8 @@ static loff_t ui_lseek(struct file *filp, loff_t offset, int whence)
{
struct hfi1_devdata *dd = filp->private_data;
- switch (whence) {
- case SEEK_SET:
- break;
- case SEEK_CUR:
- offset += filp->f_pos;
- break;
- case SEEK_END:
- offset = ((dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE) -
- offset;
- break;
- default:
- return -EINVAL;
- }
-
- if (offset < 0)
- return -EINVAL;
-
- if (offset >= (dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE)
- return -EINVAL;
-
- filp->f_pos = offset;
-
- return filp->f_pos;
+ return fixed_size_llseek(filp, offset, whence,
+ (dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE);
}
/* NOTE: assumes unsigned long is 8 bytes */
diff --git a/drivers/staging/rdma/hfi1/mmu_rb.c b/drivers/staging/rdma/hfi1/mmu_rb.c
index c7ad0164ea9a..b3f0682a36c9 100644
--- a/drivers/staging/rdma/hfi1/mmu_rb.c
+++ b/drivers/staging/rdma/hfi1/mmu_rb.c
@@ -71,6 +71,7 @@ static inline void mmu_notifier_range_start(struct mmu_notifier *,
struct mm_struct *,
unsigned long, unsigned long);
static void mmu_notifier_mem_invalidate(struct mmu_notifier *,
+ struct mm_struct *,
unsigned long, unsigned long);
static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *,
unsigned long, unsigned long);
@@ -137,7 +138,7 @@ void hfi1_mmu_rb_unregister(struct rb_root *root)
rbnode = rb_entry(node, struct mmu_rb_node, node);
rb_erase(node, root);
if (handler->ops->remove)
- handler->ops->remove(root, rbnode, false);
+ handler->ops->remove(root, rbnode, NULL);
}
}
@@ -176,7 +177,7 @@ unlock:
return ret;
}
-/* Caller must host handler lock */
+/* Caller must hold handler lock */
static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
unsigned long addr,
unsigned long len)
@@ -200,15 +201,21 @@ static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
return node;
}
+/* Caller must *not* hold handler lock. */
static void __mmu_rb_remove(struct mmu_rb_handler *handler,
- struct mmu_rb_node *node, bool arg)
+ struct mmu_rb_node *node, struct mm_struct *mm)
{
+ unsigned long flags;
+
/* Validity of handler and node pointers has been checked by caller. */
hfi1_cdbg(MMU, "Removing node addr 0x%llx, len %u", node->addr,
node->len);
+ spin_lock_irqsave(&handler->lock, flags);
__mmu_int_rb_remove(node, handler->root);
+ spin_unlock_irqrestore(&handler->lock, flags);
+
if (handler->ops->remove)
- handler->ops->remove(handler->root, node, arg);
+ handler->ops->remove(handler->root, node, mm);
}
struct mmu_rb_node *hfi1_mmu_rb_search(struct rb_root *root, unsigned long addr,
@@ -231,14 +238,11 @@ struct mmu_rb_node *hfi1_mmu_rb_search(struct rb_root *root, unsigned long addr,
void hfi1_mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node)
{
struct mmu_rb_handler *handler = find_mmu_handler(root);
- unsigned long flags;
if (!handler || !node)
return;
- spin_lock_irqsave(&handler->lock, flags);
- __mmu_rb_remove(handler, node, false);
- spin_unlock_irqrestore(&handler->lock, flags);
+ __mmu_rb_remove(handler, node, NULL);
}
static struct mmu_rb_handler *find_mmu_handler(struct rb_root *root)
@@ -260,7 +264,7 @@ unlock:
static inline void mmu_notifier_page(struct mmu_notifier *mn,
struct mm_struct *mm, unsigned long addr)
{
- mmu_notifier_mem_invalidate(mn, addr, addr + PAGE_SIZE);
+ mmu_notifier_mem_invalidate(mn, mm, addr, addr + PAGE_SIZE);
}
static inline void mmu_notifier_range_start(struct mmu_notifier *mn,
@@ -268,25 +272,31 @@ static inline void mmu_notifier_range_start(struct mmu_notifier *mn,
unsigned long start,
unsigned long end)
{
- mmu_notifier_mem_invalidate(mn, start, end);
+ mmu_notifier_mem_invalidate(mn, mm, start, end);
}
static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn,
+ struct mm_struct *mm,
unsigned long start, unsigned long end)
{
struct mmu_rb_handler *handler =
container_of(mn, struct mmu_rb_handler, mn);
struct rb_root *root = handler->root;
- struct mmu_rb_node *node;
+ struct mmu_rb_node *node, *ptr = NULL;
unsigned long flags;
spin_lock_irqsave(&handler->lock, flags);
- for (node = __mmu_int_rb_iter_first(root, start, end - 1); node;
- node = __mmu_int_rb_iter_next(node, start, end - 1)) {
+ for (node = __mmu_int_rb_iter_first(root, start, end - 1);
+ node; node = ptr) {
+ /* Guard against node removal. */
+ ptr = __mmu_int_rb_iter_next(node, start, end - 1);
hfi1_cdbg(MMU, "Invalidating node addr 0x%llx, len %u",
node->addr, node->len);
- if (handler->ops->invalidate(root, node))
- __mmu_rb_remove(handler, node, true);
+ if (handler->ops->invalidate(root, node)) {
+ spin_unlock_irqrestore(&handler->lock, flags);
+ __mmu_rb_remove(handler, node, mm);
+ spin_lock_irqsave(&handler->lock, flags);
+ }
}
spin_unlock_irqrestore(&handler->lock, flags);
}
diff --git a/drivers/staging/rdma/hfi1/mmu_rb.h b/drivers/staging/rdma/hfi1/mmu_rb.h
index f8523fdb8a18..19a306e83c7d 100644
--- a/drivers/staging/rdma/hfi1/mmu_rb.h
+++ b/drivers/staging/rdma/hfi1/mmu_rb.h
@@ -59,7 +59,8 @@ struct mmu_rb_node {
struct mmu_rb_ops {
bool (*filter)(struct mmu_rb_node *, unsigned long, unsigned long);
int (*insert)(struct rb_root *, struct mmu_rb_node *);
- void (*remove)(struct rb_root *, struct mmu_rb_node *, bool);
+ void (*remove)(struct rb_root *, struct mmu_rb_node *,
+ struct mm_struct *);
int (*invalidate)(struct rb_root *, struct mmu_rb_node *);
};
diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c
index 29a5ad28019b..dc9119e1b458 100644
--- a/drivers/staging/rdma/hfi1/qp.c
+++ b/drivers/staging/rdma/hfi1/qp.c
@@ -519,10 +519,12 @@ static void iowait_sdma_drained(struct iowait *wait)
* do the flush work until that QP's
* sdma work has finished.
*/
+ spin_lock(&qp->s_lock);
if (qp->s_flags & RVT_S_WAIT_DMA) {
qp->s_flags &= ~RVT_S_WAIT_DMA;
hfi1_schedule_send(qp);
}
+ spin_unlock(&qp->s_lock);
}
/**
diff --git a/drivers/staging/rdma/hfi1/user_exp_rcv.c b/drivers/staging/rdma/hfi1/user_exp_rcv.c
index 0861e095df8d..8bd56d5c783d 100644
--- a/drivers/staging/rdma/hfi1/user_exp_rcv.c
+++ b/drivers/staging/rdma/hfi1/user_exp_rcv.c
@@ -87,7 +87,8 @@ static u32 find_phys_blocks(struct page **, unsigned, struct tid_pageset *);
static int set_rcvarray_entry(struct file *, unsigned long, u32,
struct tid_group *, struct page **, unsigned);
static int mmu_rb_insert(struct rb_root *, struct mmu_rb_node *);
-static void mmu_rb_remove(struct rb_root *, struct mmu_rb_node *, bool);
+static void mmu_rb_remove(struct rb_root *, struct mmu_rb_node *,
+ struct mm_struct *);
static int mmu_rb_invalidate(struct rb_root *, struct mmu_rb_node *);
static int program_rcvarray(struct file *, unsigned long, struct tid_group *,
struct tid_pageset *, unsigned, u16, struct page **,
@@ -254,6 +255,8 @@ int hfi1_user_exp_rcv_free(struct hfi1_filedata *fd)
struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct tid_group *grp, *gptr;
+ if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags))
+ return 0;
/*
* The notifier would have been removed when the process'es mm
* was freed.
@@ -899,7 +902,7 @@ static int unprogram_rcvarray(struct file *fp, u32 tidinfo,
if (!node || node->rcventry != (uctxt->expected_base + rcventry))
return -EBADF;
if (HFI1_CAP_IS_USET(TID_UNMAP))
- mmu_rb_remove(&fd->tid_rb_root, &node->mmu, false);
+ mmu_rb_remove(&fd->tid_rb_root, &node->mmu, NULL);
else
hfi1_mmu_rb_remove(&fd->tid_rb_root, &node->mmu);
@@ -965,7 +968,7 @@ static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt,
continue;
if (HFI1_CAP_IS_USET(TID_UNMAP))
mmu_rb_remove(&fd->tid_rb_root,
- &node->mmu, false);
+ &node->mmu, NULL);
else
hfi1_mmu_rb_remove(&fd->tid_rb_root,
&node->mmu);
@@ -1032,7 +1035,7 @@ static int mmu_rb_insert(struct rb_root *root, struct mmu_rb_node *node)
}
static void mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node,
- bool notifier)
+ struct mm_struct *mm)
{
struct hfi1_filedata *fdata =
container_of(root, struct hfi1_filedata, tid_rb_root);
diff --git a/drivers/staging/rdma/hfi1/user_sdma.c b/drivers/staging/rdma/hfi1/user_sdma.c
index ab6b6a42000f..d53a659548e0 100644
--- a/drivers/staging/rdma/hfi1/user_sdma.c
+++ b/drivers/staging/rdma/hfi1/user_sdma.c
@@ -278,7 +278,8 @@ static inline void pq_update(struct hfi1_user_sdma_pkt_q *);
static void user_sdma_free_request(struct user_sdma_request *, bool);
static int pin_vector_pages(struct user_sdma_request *,
struct user_sdma_iovec *);
-static void unpin_vector_pages(struct mm_struct *, struct page **, unsigned);
+static void unpin_vector_pages(struct mm_struct *, struct page **, unsigned,
+ unsigned);
static int check_header_template(struct user_sdma_request *,
struct hfi1_pkt_header *, u32, u32);
static int set_txreq_header(struct user_sdma_request *,
@@ -299,7 +300,8 @@ static int defer_packet_queue(
static void activate_packet_queue(struct iowait *, int);
static bool sdma_rb_filter(struct mmu_rb_node *, unsigned long, unsigned long);
static int sdma_rb_insert(struct rb_root *, struct mmu_rb_node *);
-static void sdma_rb_remove(struct rb_root *, struct mmu_rb_node *, bool);
+static void sdma_rb_remove(struct rb_root *, struct mmu_rb_node *,
+ struct mm_struct *);
static int sdma_rb_invalidate(struct rb_root *, struct mmu_rb_node *);
static struct mmu_rb_ops sdma_rb_ops = {
@@ -1063,8 +1065,10 @@ static int pin_vector_pages(struct user_sdma_request *req,
rb_node = hfi1_mmu_rb_search(&pq->sdma_rb_root,
(unsigned long)iovec->iov.iov_base,
iovec->iov.iov_len);
- if (rb_node)
+ if (rb_node && !IS_ERR(rb_node))
node = container_of(rb_node, struct sdma_mmu_node, rb);
+ else
+ rb_node = NULL;
if (!node) {
node = kzalloc(sizeof(*node), GFP_KERNEL);
@@ -1107,7 +1111,8 @@ retry:
goto bail;
}
if (pinned != npages) {
- unpin_vector_pages(current->mm, pages, pinned);
+ unpin_vector_pages(current->mm, pages, node->npages,
+ pinned);
ret = -EFAULT;
goto bail;
}
@@ -1147,9 +1152,9 @@ bail:
}
static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
- unsigned npages)
+ unsigned start, unsigned npages)
{
- hfi1_release_user_pages(mm, pages, npages, 0);
+ hfi1_release_user_pages(mm, pages + start, npages, 0);
kfree(pages);
}
@@ -1502,7 +1507,7 @@ static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
&req->pq->sdma_rb_root,
(unsigned long)req->iovs[i].iov.iov_base,
req->iovs[i].iov.iov_len);
- if (!mnode)
+ if (!mnode || IS_ERR(mnode))
continue;
node = container_of(mnode, struct sdma_mmu_node, rb);
@@ -1547,7 +1552,7 @@ static int sdma_rb_insert(struct rb_root *root, struct mmu_rb_node *mnode)
}
static void sdma_rb_remove(struct rb_root *root, struct mmu_rb_node *mnode,
- bool notifier)
+ struct mm_struct *mm)
{
struct sdma_mmu_node *node =
container_of(mnode, struct sdma_mmu_node, rb);
@@ -1557,14 +1562,20 @@ static void sdma_rb_remove(struct rb_root *root, struct mmu_rb_node *mnode,
node->pq->n_locked -= node->npages;
spin_unlock(&node->pq->evict_lock);
- unpin_vector_pages(notifier ? NULL : current->mm, node->pages,
+ /*
+ * If mm is set, we are being called by the MMU notifier and we
+ * should not pass a mm_struct to unpin_vector_page(). This is to
+ * prevent a deadlock when hfi1_release_user_pages() attempts to
+ * take the mmap_sem, which the MMU notifier has already taken.
+ */
+ unpin_vector_pages(mm ? NULL : current->mm, node->pages, 0,
node->npages);
/*
* If called by the MMU notifier, we have to adjust the pinned
* page count ourselves.
*/
- if (notifier)
- current->mm->pinned_vm -= node->npages;
+ if (mm)
+ mm->pinned_vm -= node->npages;
kfree(node);
}
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index c37eedc35a24..3c3dc4a3d52c 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -376,6 +376,8 @@ config MTK_THERMAL
tristate "Temperature sensor driver for mediatek SoCs"
depends on ARCH_MEDIATEK || COMPILE_TEST
depends on HAS_IOMEM
+ depends on NVMEM || NVMEM=n
+ depends on RESET_CONTROLLER
default y
help
Enable this option if you want to have support for thermal management
diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c
index 36d07295f8e3..5e820b541506 100644
--- a/drivers/thermal/hisi_thermal.c
+++ b/drivers/thermal/hisi_thermal.c
@@ -68,12 +68,12 @@ static inline int _step_to_temp(int step)
* Every step equals (1 * 200) / 255 celsius, and finally
* need convert to millicelsius.
*/
- return (HISI_TEMP_BASE + (step * 200 / 255)) * 1000;
+ return (HISI_TEMP_BASE * 1000 + (step * 200000 / 255));
}
static inline long _temp_to_step(long temp)
{
- return ((temp / 1000 - HISI_TEMP_BASE) * 255 / 200);
+ return ((temp - HISI_TEMP_BASE * 1000) * 255) / 200000;
}
static long hisi_thermal_get_sensor_temp(struct hisi_thermal_data *data,
diff --git a/drivers/thermal/mtk_thermal.c b/drivers/thermal/mtk_thermal.c
index 3d93b1c07cee..507632b9648e 100644
--- a/drivers/thermal/mtk_thermal.c
+++ b/drivers/thermal/mtk_thermal.c
@@ -27,7 +27,6 @@
#include <linux/thermal.h>
#include <linux/reset.h>
#include <linux/types.h>
-#include <linux/nvmem-consumer.h>
/* AUXADC Registers */
#define AUXADC_CON0_V 0x000
@@ -619,7 +618,7 @@ static struct platform_driver mtk_thermal_driver = {
module_platform_driver(mtk_thermal_driver);
-MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de");
+MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
MODULE_AUTHOR("Hanyi Wu <hanyi.wu@mediatek.com>");
MODULE_DESCRIPTION("Mediatek thermal driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index 49ac23d3e776..d8ec44b194d6 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -803,8 +803,8 @@ static int thermal_of_populate_trip(struct device_node *np,
* otherwise, it returns a corresponding ERR_PTR(). Caller must
* check the return value with help of IS_ERR() helper.
*/
-static struct __thermal_zone *
-thermal_of_build_thermal_zone(struct device_node *np)
+static struct __thermal_zone
+__init *thermal_of_build_thermal_zone(struct device_node *np)
{
struct device_node *child = NULL, *gchild;
struct __thermal_zone *tz;
diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c
index 1246aa6fcab0..2f1a863a8e15 100644
--- a/drivers/thermal/power_allocator.c
+++ b/drivers/thermal/power_allocator.c
@@ -301,7 +301,7 @@ static void divvy_up_power(u32 *req_power, u32 *max_power, int num_actors,
capped_extra_power = 0;
extra_power = 0;
for (i = 0; i < num_actors; i++) {
- u64 req_range = req_power[i] * power_range;
+ u64 req_range = (u64)req_power[i] * power_range;
granted_power[i] = DIV_ROUND_CLOSEST_ULL(req_range,
total_req_power);
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index d4b54653ecf8..5133cd1e10b7 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -688,7 +688,7 @@ trip_point_temp_store(struct device *dev, struct device_attribute *attr,
{
struct thermal_zone_device *tz = to_thermal_zone(dev);
int trip, ret;
- unsigned long temperature;
+ int temperature;
if (!tz->ops->set_trip_temp)
return -EPERM;
@@ -696,7 +696,7 @@ trip_point_temp_store(struct device *dev, struct device_attribute *attr,
if (!sscanf(attr->attr.name, "trip_point_%d_temp", &trip))
return -EINVAL;
- if (kstrtoul(buf, 10, &temperature))
+ if (kstrtoint(buf, 10, &temperature))
return -EINVAL;
ret = tz->ops->set_trip_temp(tz, trip, temperature);
@@ -899,9 +899,9 @@ emul_temp_store(struct device *dev, struct device_attribute *attr,
{
struct thermal_zone_device *tz = to_thermal_zone(dev);
int ret = 0;
- unsigned long temperature;
+ int temperature;
- if (kstrtoul(buf, 10, &temperature))
+ if (kstrtoint(buf, 10, &temperature))
return -EINVAL;
if (!tz->ops->set_emul_temp) {
@@ -959,7 +959,7 @@ static DEVICE_ATTR(sustainable_power, S_IWUSR | S_IRUGO, sustainable_power_show,
struct thermal_zone_device *tz = to_thermal_zone(dev); \
\
if (tz->tzp) \
- return sprintf(buf, "%u\n", tz->tzp->name); \
+ return sprintf(buf, "%d\n", tz->tzp->name); \
else \
return -EIO; \
} \
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index e16a49b507ef..cf0dc51a2690 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -626,7 +626,7 @@ static int pty_unix98_ioctl(struct tty_struct *tty,
*/
static struct tty_struct *ptm_unix98_lookup(struct tty_driver *driver,
- struct inode *ptm_inode, int idx)
+ struct file *file, int idx)
{
/* Master must be open via /dev/ptmx */
return ERR_PTR(-EIO);
@@ -642,12 +642,12 @@ static struct tty_struct *ptm_unix98_lookup(struct tty_driver *driver,
*/
static struct tty_struct *pts_unix98_lookup(struct tty_driver *driver,
- struct inode *pts_inode, int idx)
+ struct file *file, int idx)
{
struct tty_struct *tty;
mutex_lock(&devpts_mutex);
- tty = devpts_get_priv(pts_inode);
+ tty = devpts_get_priv(file->f_path.dentry);
mutex_unlock(&devpts_mutex);
/* Master must be open before slave */
if (!tty)
@@ -663,14 +663,14 @@ static int pty_unix98_install(struct tty_driver *driver, struct tty_struct *tty)
/* this is called once with whichever end is closed last */
static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
{
- struct inode *ptmx_inode;
+ struct pts_fs_info *fsi;
if (tty->driver->subtype == PTY_TYPE_MASTER)
- ptmx_inode = tty->driver_data;
+ fsi = tty->driver_data;
else
- ptmx_inode = tty->link->driver_data;
- devpts_kill_index(ptmx_inode, tty->index);
- devpts_del_ref(ptmx_inode);
+ fsi = tty->link->driver_data;
+ devpts_kill_index(fsi, tty->index);
+ devpts_put_ref(fsi);
}
static const struct tty_operations ptm_unix98_ops = {
@@ -720,8 +720,9 @@ static const struct tty_operations pty_unix98_ops = {
static int ptmx_open(struct inode *inode, struct file *filp)
{
+ struct pts_fs_info *fsi;
struct tty_struct *tty;
- struct inode *slave_inode;
+ struct dentry *dentry;
int retval;
int index;
@@ -734,54 +735,46 @@ static int ptmx_open(struct inode *inode, struct file *filp)
if (retval)
return retval;
+ fsi = devpts_get_ref(inode, filp);
+ retval = -ENODEV;
+ if (!fsi)
+ goto out_free_file;
+
/* find a device that is not in use. */
mutex_lock(&devpts_mutex);
- index = devpts_new_index(inode);
- if (index < 0) {
- retval = index;
- mutex_unlock(&devpts_mutex);
- goto err_file;
- }
-
+ index = devpts_new_index(fsi);
mutex_unlock(&devpts_mutex);
- mutex_lock(&tty_mutex);
- tty = tty_init_dev(ptm_driver, index);
+ retval = index;
+ if (index < 0)
+ goto out_put_ref;
- if (IS_ERR(tty)) {
- retval = PTR_ERR(tty);
- goto out;
- }
+ mutex_lock(&tty_mutex);
+ tty = tty_init_dev(ptm_driver, index);
/* The tty returned here is locked so we can safely
drop the mutex */
mutex_unlock(&tty_mutex);
- set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
- tty->driver_data = inode;
+ retval = PTR_ERR(tty);
+ if (IS_ERR(tty))
+ goto out;
/*
- * In the case where all references to ptmx inode are dropped and we
- * still have /dev/tty opened pointing to the master/slave pair (ptmx
- * is closed/released before /dev/tty), we must make sure that the inode
- * is still valid when we call the final pty_unix98_shutdown, thus we
- * hold an additional reference to the ptmx inode. For the same /dev/tty
- * last close case, we also need to make sure the super_block isn't
- * destroyed (devpts instance unmounted), before /dev/tty is closed and
- * on its release devpts_kill_index is called.
+ * From here on out, the tty is "live", and the index and
+ * fsi will be killed/put by the tty_release()
*/
- devpts_add_ref(inode);
+ set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
+ tty->driver_data = fsi;
tty_add_file(tty, filp);
- slave_inode = devpts_pty_new(inode,
- MKDEV(UNIX98_PTY_SLAVE_MAJOR, index), index,
- tty->link);
- if (IS_ERR(slave_inode)) {
- retval = PTR_ERR(slave_inode);
+ dentry = devpts_pty_new(fsi, index, tty->link);
+ if (IS_ERR(dentry)) {
+ retval = PTR_ERR(dentry);
goto err_release;
}
- tty->link->driver_data = slave_inode;
+ tty->link->driver_data = dentry;
retval = ptm_driver->ops->open(tty, filp);
if (retval)
@@ -793,12 +786,14 @@ static int ptmx_open(struct inode *inode, struct file *filp)
return 0;
err_release:
tty_unlock(tty);
+ // This will also put-ref the fsi
tty_release(inode, filp);
return retval;
out:
- mutex_unlock(&tty_mutex);
- devpts_kill_index(inode, index);
-err_file:
+ devpts_kill_index(fsi, index);
+out_put_ref:
+ devpts_put_ref(fsi);
+out_free_file:
tty_free_file(filp);
return retval;
}
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index e213da01a3d7..00ad2637b08c 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -1403,9 +1403,18 @@ static void __do_stop_tx_rs485(struct uart_8250_port *p)
/*
* Empty the RX FIFO, we are not interested in anything
* received during the half-duplex transmission.
+ * Enable previously disabled RX interrupts.
*/
- if (!(p->port.rs485.flags & SER_RS485_RX_DURING_TX))
+ if (!(p->port.rs485.flags & SER_RS485_RX_DURING_TX)) {
serial8250_clear_fifos(p);
+
+ serial8250_rpm_get(p);
+
+ p->ier |= UART_IER_RLSI | UART_IER_RDI;
+ serial_port_out(&p->port, UART_IER, p->ier);
+
+ serial8250_rpm_put(p);
+ }
}
static void serial8250_em485_handle_stop_tx(unsigned long arg)
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index 64742a086ae3..4d7cb9c04fce 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -324,7 +324,6 @@ config SERIAL_8250_EM
config SERIAL_8250_RT288X
bool "Ralink RT288x/RT305x/RT3662/RT3883 serial port support"
depends on SERIAL_8250
- depends on MIPS || COMPILE_TEST
default y if MIPS_ALCHEMY || SOC_RT288X || SOC_RT305X || SOC_RT3883 || SOC_MT7620
help
Selecting this option will add support for the alternate register
diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
index c9fdfc8bf47f..d08baa668d5d 100644
--- a/drivers/tty/serial/uartlite.c
+++ b/drivers/tty/serial/uartlite.c
@@ -72,7 +72,7 @@ static void uartlite_outbe32(u32 val, void __iomem *addr)
iowrite32be(val, addr);
}
-static const struct uartlite_reg_ops uartlite_be = {
+static struct uartlite_reg_ops uartlite_be = {
.in = uartlite_inbe32,
.out = uartlite_outbe32,
};
@@ -87,21 +87,21 @@ static void uartlite_outle32(u32 val, void __iomem *addr)
iowrite32(val, addr);
}
-static const struct uartlite_reg_ops uartlite_le = {
+static struct uartlite_reg_ops uartlite_le = {
.in = uartlite_inle32,
.out = uartlite_outle32,
};
static inline u32 uart_in32(u32 offset, struct uart_port *port)
{
- const struct uartlite_reg_ops *reg_ops = port->private_data;
+ struct uartlite_reg_ops *reg_ops = port->private_data;
return reg_ops->in(port->membase + offset);
}
static inline void uart_out32(u32 val, u32 offset, struct uart_port *port)
{
- const struct uartlite_reg_ops *reg_ops = port->private_data;
+ struct uartlite_reg_ops *reg_ops = port->private_data;
reg_ops->out(val, port->membase + offset);
}
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 9b04d72e752e..24d5491ef0da 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1367,12 +1367,12 @@ static ssize_t tty_line_name(struct tty_driver *driver, int index, char *p)
* Locking: tty_mutex must be held. If the tty is found, bump the tty kref.
*/
static struct tty_struct *tty_driver_lookup_tty(struct tty_driver *driver,
- struct inode *inode, int idx)
+ struct file *file, int idx)
{
struct tty_struct *tty;
if (driver->ops->lookup)
- tty = driver->ops->lookup(driver, inode, idx);
+ tty = driver->ops->lookup(driver, file, idx);
else
tty = driver->ttys[idx];
@@ -2040,7 +2040,7 @@ static struct tty_struct *tty_open_by_driver(dev_t device, struct inode *inode,
}
/* check whether we're reopening an existing tty */
- tty = tty_driver_lookup_tty(driver, inode, index);
+ tty = tty_driver_lookup_tty(driver, filp, index);
if (IS_ERR(tty)) {
mutex_unlock(&tty_mutex);
goto out;
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 83fd30b0577c..a6c4a1b895bd 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -744,11 +744,15 @@ static void acm_tty_flush_chars(struct tty_struct *tty)
int err;
unsigned long flags;
+ if (!cur) /* nothing to do */
+ return;
+
acm->putbuffer = NULL;
err = usb_autopm_get_interface_async(acm->control);
spin_lock_irqsave(&acm->write_lock, flags);
if (err < 0) {
cur->use = 0;
+ acm->putbuffer = cur;
goto out;
}
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index f9d42cf23e55..7859d738df41 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -73,6 +73,15 @@ static void for_each_companion(struct pci_dev *pdev, struct usb_hcd *hcd,
if (companion->bus != pdev->bus ||
PCI_SLOT(companion->devfn) != slot)
continue;
+
+ /*
+ * Companion device should be either UHCI,OHCI or EHCI host
+ * controller, otherwise skip.
+ */
+ if (companion->class != CL_UHCI && companion->class != CL_OHCI &&
+ companion->class != CL_EHCI)
+ continue;
+
companion_hcd = pci_get_drvdata(companion);
if (!companion_hcd || !companion_hcd->self.root_hub)
continue;
diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
index 14718a9ffcfb..460c855be0d0 100644
--- a/drivers/usb/core/port.c
+++ b/drivers/usb/core/port.c
@@ -249,18 +249,12 @@ static int usb_port_runtime_suspend(struct device *dev)
return retval;
}
-
-static int usb_port_prepare(struct device *dev)
-{
- return 1;
-}
#endif
static const struct dev_pm_ops usb_port_pm_ops = {
#ifdef CONFIG_PM
.runtime_suspend = usb_port_runtime_suspend,
.runtime_resume = usb_port_runtime_resume,
- .prepare = usb_port_prepare,
#endif
};
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index dcb85e3cd5a7..479187c32571 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -312,13 +312,7 @@ static int usb_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
static int usb_dev_prepare(struct device *dev)
{
- struct usb_device *udev = to_usb_device(dev);
-
- /* Return 0 if the current wakeup setting is wrong, otherwise 1 */
- if (udev->do_remote_wakeup != device_may_wakeup(dev))
- return 0;
-
- return 1;
+ return 0; /* Implement eventually? */
}
static void usb_dev_complete(struct device *dev)
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index fa20f5a99d12..34277ced26bd 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1150,6 +1150,11 @@ static int dwc3_suspend(struct device *dev)
phy_exit(dwc->usb2_generic_phy);
phy_exit(dwc->usb3_generic_phy);
+ usb_phy_set_suspend(dwc->usb2_phy, 1);
+ usb_phy_set_suspend(dwc->usb3_phy, 1);
+ WARN_ON(phy_power_off(dwc->usb2_generic_phy) < 0);
+ WARN_ON(phy_power_off(dwc->usb3_generic_phy) < 0);
+
pinctrl_pm_select_sleep_state(dev);
return 0;
@@ -1163,11 +1168,21 @@ static int dwc3_resume(struct device *dev)
pinctrl_pm_select_default_state(dev);
+ usb_phy_set_suspend(dwc->usb2_phy, 0);
+ usb_phy_set_suspend(dwc->usb3_phy, 0);
+ ret = phy_power_on(dwc->usb2_generic_phy);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_power_on(dwc->usb3_generic_phy);
+ if (ret < 0)
+ goto err_usb2phy_power;
+
usb_phy_init(dwc->usb3_phy);
usb_phy_init(dwc->usb2_phy);
ret = phy_init(dwc->usb2_generic_phy);
if (ret < 0)
- return ret;
+ goto err_usb3phy_power;
ret = phy_init(dwc->usb3_generic_phy);
if (ret < 0)
@@ -1200,6 +1215,12 @@ static int dwc3_resume(struct device *dev)
err_usb2phy_init:
phy_exit(dwc->usb2_generic_phy);
+err_usb3phy_power:
+ phy_power_off(dwc->usb3_generic_phy);
+
+err_usb2phy_power:
+ phy_power_off(dwc->usb2_generic_phy);
+
return ret;
}
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
index 9ac37fe1b6a7..cebf9e38b60a 100644
--- a/drivers/usb/dwc3/debugfs.c
+++ b/drivers/usb/dwc3/debugfs.c
@@ -645,7 +645,7 @@ int dwc3_debugfs_init(struct dwc3 *dwc)
file = debugfs_create_regset32("regdump", S_IRUGO, root, dwc->regset);
if (!file) {
ret = -ENOMEM;
- goto err1;
+ goto err2;
}
if (IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)) {
@@ -653,7 +653,7 @@ int dwc3_debugfs_init(struct dwc3 *dwc)
dwc, &dwc3_mode_fops);
if (!file) {
ret = -ENOMEM;
- goto err1;
+ goto err2;
}
}
@@ -663,19 +663,22 @@ int dwc3_debugfs_init(struct dwc3 *dwc)
dwc, &dwc3_testmode_fops);
if (!file) {
ret = -ENOMEM;
- goto err1;
+ goto err2;
}
file = debugfs_create_file("link_state", S_IRUGO | S_IWUSR, root,
dwc, &dwc3_link_state_fops);
if (!file) {
ret = -ENOMEM;
- goto err1;
+ goto err2;
}
}
return 0;
+err2:
+ kfree(dwc->regset);
+
err1:
debugfs_remove_recursive(root);
@@ -686,5 +689,5 @@ err0:
void dwc3_debugfs_exit(struct dwc3 *dwc)
{
debugfs_remove_recursive(dwc->root);
- dwc->root = NULL;
+ kfree(dwc->regset);
}
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index 22e9606d8e08..55da2c7f727f 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -496,7 +496,7 @@ static int dwc3_omap_probe(struct platform_device *pdev)
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
dev_err(dev, "get_sync failed with err %d\n", ret);
- goto err0;
+ goto err1;
}
dwc3_omap_map_offset(omap);
@@ -516,28 +516,24 @@ static int dwc3_omap_probe(struct platform_device *pdev)
ret = dwc3_omap_extcon_register(omap);
if (ret < 0)
- goto err2;
+ goto err1;
ret = of_platform_populate(node, NULL, NULL, dev);
if (ret) {
dev_err(&pdev->dev, "failed to create dwc3 core\n");
- goto err3;
+ goto err2;
}
dwc3_omap_enable_irqs(omap);
return 0;
-err3:
+err2:
extcon_unregister_notifier(omap->edev, EXTCON_USB, &omap->vbus_nb);
extcon_unregister_notifier(omap->edev, EXTCON_USB_HOST, &omap->id_nb);
-err2:
- dwc3_omap_disable_irqs(omap);
err1:
pm_runtime_put_sync(dev);
-
-err0:
pm_runtime_disable(dev);
return ret;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index d54a028cdfeb..8e4a1b195e9b 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2936,6 +2936,9 @@ void dwc3_gadget_exit(struct dwc3 *dwc)
int dwc3_gadget_suspend(struct dwc3 *dwc)
{
+ if (!dwc->gadget_driver)
+ return 0;
+
if (dwc->pullups_connected) {
dwc3_gadget_disable_irq(dwc);
dwc3_gadget_run_stop(dwc, true, true);
@@ -2954,6 +2957,9 @@ int dwc3_gadget_resume(struct dwc3 *dwc)
struct dwc3_ep *dep;
int ret;
+ if (!dwc->gadget_driver)
+ return 0;
+
/* Start with SuperSpeed Default */
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index de9ffd60fcfa..524e233d48de 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -651,6 +651,8 @@ static int bos_desc(struct usb_composite_dev *cdev)
ssp_cap->bLength = USB_DT_USB_SSP_CAP_SIZE(1);
ssp_cap->bDescriptorType = USB_DT_DEVICE_CAPABILITY;
ssp_cap->bDevCapabilityType = USB_SSP_CAP_TYPE;
+ ssp_cap->bReserved = 0;
+ ssp_cap->wReserved = 0;
/* SSAC = 1 (2 attributes) */
ssp_cap->bmAttributes = cpu_to_le32(1);
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index e21ca2bd6839..15b648cbc75c 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -646,6 +646,7 @@ static void ffs_user_copy_worker(struct work_struct *work)
work);
int ret = io_data->req->status ? io_data->req->status :
io_data->req->actual;
+ bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD;
if (io_data->read && ret > 0) {
use_mm(io_data->mm);
@@ -657,13 +658,11 @@ static void ffs_user_copy_worker(struct work_struct *work)
io_data->kiocb->ki_complete(io_data->kiocb, ret, ret);
- if (io_data->ffs->ffs_eventfd &&
- !(io_data->kiocb->ki_flags & IOCB_EVENTFD))
+ if (io_data->ffs->ffs_eventfd && !kiocb_has_eventfd)
eventfd_signal(io_data->ffs->ffs_eventfd, 1);
usb_ep_free_request(io_data->ep, io_data->req);
- io_data->kiocb->private = NULL;
if (io_data->read)
kfree(io_data->to_free);
kfree(io_data->buf);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 80c1de239e9a..bad0d1f9a41d 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1861,6 +1861,12 @@ no_bw:
kfree(xhci->rh_bw);
kfree(xhci->ext_caps);
+ xhci->usb2_ports = NULL;
+ xhci->usb3_ports = NULL;
+ xhci->port_array = NULL;
+ xhci->rh_bw = NULL;
+ xhci->ext_caps = NULL;
+
xhci->page_size = 0;
xhci->page_shift = 0;
xhci->bus_state[0].bus_suspended = 0;
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index f0640b7a1c42..48672fac7ff3 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -48,6 +48,7 @@
#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f
#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f
#define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8
+#define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI 0x1aa8
static const char hcd_name[] = "xhci_hcd";
@@ -155,7 +156,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
(pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
- pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI)) {
+ pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI)) {
xhci->quirks |= XHCI_PME_STUCK_QUIRK;
}
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
@@ -302,6 +304,7 @@ static void xhci_pci_remove(struct pci_dev *dev)
struct xhci_hcd *xhci;
xhci = hcd_to_xhci(pci_get_drvdata(dev));
+ xhci->xhc_state |= XHCI_STATE_REMOVING;
if (xhci->shared_hcd) {
usb_remove_hcd(xhci->shared_hcd);
usb_put_hcd(xhci->shared_hcd);
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 5c15e9bc5f7a..474b5fa14900 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -39,12 +39,25 @@ static const struct xhci_driver_overrides xhci_plat_overrides __initconst = {
static void xhci_plat_quirks(struct device *dev, struct xhci_hcd *xhci)
{
+ struct usb_hcd *hcd = xhci_to_hcd(xhci);
+
/*
* As of now platform drivers don't provide MSI support so we ensure
* here that the generic code does not try to make a pci_dev from our
* dev struct in order to setup MSI
*/
xhci->quirks |= XHCI_PLAT;
+
+ /*
+ * On R-Car Gen2 and Gen3, the AC64 bit (bit 0) of HCCPARAMS1 is set
+ * to 1. However, these SoCs don't support 64-bit address memory
+ * pointers. So, this driver clears the AC64 bit of xhci->hcc_params
+ * to call dma_set_coherent_mask(dev, DMA_BIT_MASK(32)) in
+ * xhci_gen_setup().
+ */
+ if (xhci_plat_type_is(hcd, XHCI_PLAT_TYPE_RENESAS_RCAR_GEN2) ||
+ xhci_plat_type_is(hcd, XHCI_PLAT_TYPE_RENESAS_RCAR_GEN3))
+ xhci->quirks |= XHCI_NO_64BIT_SUPPORT;
}
/* called during probe() after chip reset completes */
diff --git a/drivers/usb/host/xhci-plat.h b/drivers/usb/host/xhci-plat.h
index 5a2e2e3936c4..529c3c40f901 100644
--- a/drivers/usb/host/xhci-plat.h
+++ b/drivers/usb/host/xhci-plat.h
@@ -14,7 +14,7 @@
#include "xhci.h" /* for hcd_to_xhci() */
enum xhci_plat_type {
- XHCI_PLAT_TYPE_MARVELL_ARMADA,
+ XHCI_PLAT_TYPE_MARVELL_ARMADA = 1,
XHCI_PLAT_TYPE_RENESAS_RCAR_GEN2,
XHCI_PLAT_TYPE_RENESAS_RCAR_GEN3,
};
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 7cf66212ceae..99b4ff42f7a0 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -4004,7 +4004,8 @@ static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
int reserved_trbs = xhci->cmd_ring_reserved_trbs;
int ret;
- if (xhci->xhc_state) {
+ if ((xhci->xhc_state & XHCI_STATE_DYING) ||
+ (xhci->xhc_state & XHCI_STATE_HALTED)) {
xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n");
return -ESHUTDOWN;
}
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index d51ee0c3cf9f..9e71c96ad74a 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -147,7 +147,8 @@ static int xhci_start(struct xhci_hcd *xhci)
"waited %u microseconds.\n",
XHCI_MAX_HALT_USEC);
if (!ret)
- xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING);
+ /* clear state flags. Including dying, halted or removing */
+ xhci->xhc_state = 0;
return ret;
}
@@ -1108,8 +1109,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
/* Resume root hubs only when have pending events. */
status = readl(&xhci->op_regs->status);
if (status & STS_EINT) {
- usb_hcd_resume_root_hub(hcd);
usb_hcd_resume_root_hub(xhci->shared_hcd);
+ usb_hcd_resume_root_hub(hcd);
}
}
@@ -1124,10 +1125,10 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
/* Re-enable port polling. */
xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
- set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
- usb_hcd_poll_rh_status(hcd);
set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
usb_hcd_poll_rh_status(xhci->shared_hcd);
+ set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+ usb_hcd_poll_rh_status(hcd);
return retval;
}
@@ -2773,7 +2774,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
if (ret <= 0)
return ret;
xhci = hcd_to_xhci(hcd);
- if (xhci->xhc_state & XHCI_STATE_DYING)
+ if ((xhci->xhc_state & XHCI_STATE_DYING) ||
+ (xhci->xhc_state & XHCI_STATE_REMOVING))
return -ENODEV;
xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
@@ -3820,7 +3822,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
mutex_lock(&xhci->mutex);
- if (xhci->xhc_state) /* dying or halted */
+ if (xhci->xhc_state) /* dying, removing or halted */
goto out;
if (!udev->slot_id) {
@@ -4948,6 +4950,16 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
return retval;
xhci_dbg(xhci, "Reset complete\n");
+ /*
+ * On some xHCI controllers (e.g. R-Car SoCs), the AC64 bit (bit 0)
+ * of HCCPARAMS1 is set to 1. However, the xHCs don't support 64-bit
+ * address memory pointers actually. So, this driver clears the AC64
+ * bit of xhci->hcc_params to call dma_set_coherent_mask(dev,
+ * DMA_BIT_MASK(32)) in this xhci_gen_setup().
+ */
+ if (xhci->quirks & XHCI_NO_64BIT_SUPPORT)
+ xhci->hcc_params &= ~BIT(0);
+
/* Set dma_mask and coherent_dma_mask to 64-bits,
* if xHC supports 64-bit addressing */
if (HCC_64BIT_ADDR(xhci->hcc_params) &&
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index e293e0974f48..6c629c97f8ad 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1605,6 +1605,7 @@ struct xhci_hcd {
*/
#define XHCI_STATE_DYING (1 << 0)
#define XHCI_STATE_HALTED (1 << 1)
+#define XHCI_STATE_REMOVING (1 << 2)
/* Statistics */
int error_bitmask;
unsigned int quirks;
@@ -1641,6 +1642,7 @@ struct xhci_hcd {
#define XHCI_PME_STUCK_QUIRK (1 << 20)
#define XHCI_MTK_HOST (1 << 21)
#define XHCI_SSIC_PORT_UNUSED (1 << 22)
+#define XHCI_NO_64BIT_SUPPORT (1 << 23)
unsigned int num_active_eps;
unsigned int limit_active_eps;
/* There are two roothubs to keep track of bus suspend info for */
diff --git a/drivers/usb/musb/jz4740.c b/drivers/usb/musb/jz4740.c
index 5e5a8fa005f8..bc8889956d17 100644
--- a/drivers/usb/musb/jz4740.c
+++ b/drivers/usb/musb/jz4740.c
@@ -83,9 +83,9 @@ static int jz4740_musb_init(struct musb *musb)
{
usb_phy_generic_register();
musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2);
- if (!musb->xceiv) {
+ if (IS_ERR(musb->xceiv)) {
pr_err("HS UDC: no transceiver configured\n");
- return -ENODEV;
+ return PTR_ERR(musb->xceiv);
}
/* Silicon does not implement ConfigData register.
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 87bd578799a8..152865b36522 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1164,12 +1164,12 @@ static int musb_gadget_disable(struct usb_ep *ep)
musb_writew(epio, MUSB_RXMAXP, 0);
}
- musb_ep->desc = NULL;
- musb_ep->end_point.desc = NULL;
-
/* abort all pending DMA and requests */
nuke(musb_ep, -ESHUTDOWN);
+ musb_ep->desc = NULL;
+ musb_ep->end_point.desc = NULL;
+
schedule_work(&musb->irq_work);
spin_unlock_irqrestore(&(musb->lock), flags);
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 58487a473521..2f8ad7f1f482 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -2735,7 +2735,7 @@ static const struct hc_driver musb_hc_driver = {
.description = "musb-hcd",
.product_desc = "MUSB HDRC host driver",
.hcd_priv_size = sizeof(struct musb *),
- .flags = HCD_USB2 | HCD_MEMORY | HCD_BH,
+ .flags = HCD_USB2 | HCD_MEMORY,
/* not using irq handler or reset hooks from usbcore, since
* those must be shared with peripheral code for OTG configs
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index dd47823bb014..7c9f25e9c422 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -109,6 +109,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */
{ USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */
{ USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */
+ { USB_DEVICE(0x10C4, 0x82F4) }, /* Starizona MicroTouch */
{ USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
{ USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
{ USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */
@@ -118,6 +119,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
{ USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
{ USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
+ { USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */
{ USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
{ USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
{ USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
@@ -141,6 +143,8 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0xF004) }, /* Elan Digital Systems USBcount50 */
{ USB_DEVICE(0x10C5, 0xEA61) }, /* Silicon Labs MobiData GPRS USB Modem */
{ USB_DEVICE(0x10CE, 0xEA6A) }, /* Silicon Labs MobiData GPRS USB Modem 100EU */
+ { USB_DEVICE(0x12B8, 0xEC60) }, /* Link G4 ECU */
+ { USB_DEVICE(0x12B8, 0xEC62) }, /* Link G4+ ECU */
{ USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */
{ USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
{ USB_DEVICE(0x166A, 0x0201) }, /* Clipsal 5500PACA C-Bus Pascal Automation Controller */
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 13e4cc31bc79..16bc679dc2fc 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -2,7 +2,7 @@
* USB Attached SCSI
* Note that this is not the same as the USB Mass Storage driver
*
- * Copyright Hans de Goede <hdegoede@redhat.com> for Red Hat, Inc. 2013 - 2014
+ * Copyright Hans de Goede <hdegoede@redhat.com> for Red Hat, Inc. 2013 - 2016
* Copyright Matthew Wilcox for Intel Corp, 2010
* Copyright Sarah Sharp for Intel Corp, 2010
*
@@ -781,6 +781,17 @@ static int uas_eh_bus_reset_handler(struct scsi_cmnd *cmnd)
return SUCCESS;
}
+static int uas_target_alloc(struct scsi_target *starget)
+{
+ struct uas_dev_info *devinfo = (struct uas_dev_info *)
+ dev_to_shost(starget->dev.parent)->hostdata;
+
+ if (devinfo->flags & US_FL_NO_REPORT_LUNS)
+ starget->no_report_luns = 1;
+
+ return 0;
+}
+
static int uas_slave_alloc(struct scsi_device *sdev)
{
struct uas_dev_info *devinfo =
@@ -824,7 +835,6 @@ static int uas_slave_configure(struct scsi_device *sdev)
if (devinfo->flags & US_FL_BROKEN_FUA)
sdev->broken_fua = 1;
- scsi_change_queue_depth(sdev, devinfo->qdepth - 2);
return 0;
}
@@ -832,6 +842,7 @@ static struct scsi_host_template uas_host_template = {
.module = THIS_MODULE,
.name = "uas",
.queuecommand = uas_queuecommand,
+ .target_alloc = uas_target_alloc,
.slave_alloc = uas_slave_alloc,
.slave_configure = uas_slave_configure,
.eh_abort_handler = uas_eh_abort_handler,
@@ -956,6 +967,12 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
if (result)
goto set_alt0;
+ /*
+ * 1 tag is reserved for untagged commands +
+ * 1 tag to avoid off by one errors in some bridge firmwares
+ */
+ shost->can_queue = devinfo->qdepth - 2;
+
usb_set_intfdata(intf, shost);
result = scsi_add_host(shost, &intf->dev);
if (result)
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index ccc113e83d88..53341a77d89f 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -64,6 +64,13 @@ UNUSUAL_DEV(0x0bc2, 0x3312, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_ATA_1X),
+/* Reported-by: David Webb <djw@noc.ac.uk> */
+UNUSUAL_DEV(0x0bc2, 0x331a, 0x0000, 0x9999,
+ "Seagate",
+ "Expansion Desk",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_REPORT_LUNS),
+
/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
UNUSUAL_DEV(0x0bc2, 0x3320, 0x0000, 0x9999,
"Seagate",
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 43576ed31ccd..9de988a0f856 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -482,7 +482,7 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
US_FL_NO_READ_DISC_INFO | US_FL_NO_READ_CAPACITY_16 |
US_FL_INITIAL_READ10 | US_FL_WRITE_CACHE |
US_FL_NO_ATA_1X | US_FL_NO_REPORT_OPCODES |
- US_FL_MAX_SECTORS_240);
+ US_FL_MAX_SECTORS_240 | US_FL_NO_REPORT_LUNS);
p = quirks;
while (*p) {
@@ -532,6 +532,9 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
case 'i':
f |= US_FL_IGNORE_DEVICE;
break;
+ case 'j':
+ f |= US_FL_NO_REPORT_LUNS;
+ break;
case 'l':
f |= US_FL_NOT_LOCKABLE;
break;
diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c
index fe274b5851c7..93e66a9148b9 100644
--- a/drivers/video/fbdev/amba-clcd.c
+++ b/drivers/video/fbdev/amba-clcd.c
@@ -440,13 +440,14 @@ static int clcdfb_register(struct clcd_fb *fb)
fb->off_ienb = CLCD_PL111_IENB;
fb->off_cntl = CLCD_PL111_CNTL;
} else {
-#ifdef CONFIG_ARCH_VERSATILE
- fb->off_ienb = CLCD_PL111_IENB;
- fb->off_cntl = CLCD_PL111_CNTL;
-#else
- fb->off_ienb = CLCD_PL110_IENB;
- fb->off_cntl = CLCD_PL110_CNTL;
-#endif
+ if (of_machine_is_compatible("arm,versatile-ab") ||
+ of_machine_is_compatible("arm,versatile-pb")) {
+ fb->off_ienb = CLCD_PL111_IENB;
+ fb->off_cntl = CLCD_PL111_CNTL;
+ } else {
+ fb->off_ienb = CLCD_PL110_IENB;
+ fb->off_cntl = CLCD_PL110_CNTL;
+ }
}
fb->clk = clk_get(&fb->dev->dev, NULL);
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
index abfd1f6e3327..1954ec913ce5 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
@@ -200,20 +200,16 @@ static struct omap_dss_driver sharp_ls_ops = {
static int sharp_ls_get_gpio(struct device *dev, int gpio, unsigned long flags,
char *desc, struct gpio_desc **gpiod)
{
- struct gpio_desc *gd;
int r;
- *gpiod = NULL;
-
r = devm_gpio_request_one(dev, gpio, flags, desc);
- if (r)
+ if (r) {
+ *gpiod = NULL;
return r == -ENOENT ? 0 : r;
+ }
- gd = gpio_to_desc(gpio);
- if (IS_ERR(gd))
- return PTR_ERR(gd) == -ENOENT ? 0 : PTR_ERR(gd);
+ *gpiod = gpio_to_desc(gpio);
- *gpiod = gd;
return 0;
}
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 5c802d47892c..ca6bfddaacad 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -1006,7 +1006,7 @@ struct virtqueue *vring_create_virtqueue(
const char *name)
{
struct virtqueue *vq;
- void *queue;
+ void *queue = NULL;
dma_addr_t dma_addr;
size_t queue_size_in_bytes;
struct vring vring;
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 9781e0dd59d6..d46839f51e73 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -151,6 +151,8 @@ static DECLARE_WAIT_QUEUE_HEAD(balloon_wq);
static void balloon_process(struct work_struct *work);
static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
+static void release_memory_resource(struct resource *resource);
+
/* When ballooning out (allocating memory to return to Xen) we don't really
want the kernel to try too hard since that can trigger the oom killer. */
#define GFP_BALLOON \
@@ -267,6 +269,20 @@ static struct resource *additional_memory_resource(phys_addr_t size)
return NULL;
}
+#ifdef CONFIG_SPARSEMEM
+ {
+ unsigned long limit = 1UL << (MAX_PHYSMEM_BITS - PAGE_SHIFT);
+ unsigned long pfn = res->start >> PAGE_SHIFT;
+
+ if (pfn > limit) {
+ pr_err("New System RAM resource outside addressable RAM (%lu > %lu)\n",
+ pfn, limit);
+ release_memory_resource(res);
+ return NULL;
+ }
+ }
+#endif
+
return res;
}
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index 38272ad24551..f4edd6df3df2 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -316,7 +316,6 @@ static int evtchn_resize_ring(struct per_user_data *u)
{
unsigned int new_size;
evtchn_port_t *new_ring, *old_ring;
- unsigned int p, c;
/*
* Ensure the ring is large enough to capture all possible
@@ -346,20 +345,17 @@ static int evtchn_resize_ring(struct per_user_data *u)
/*
* Copy the old ring contents to the new ring.
*
- * If the ring contents crosses the end of the current ring,
- * it needs to be copied in two chunks.
+ * To take care of wrapping, a full ring, and the new index
+ * pointing into the second half, simply copy the old contents
+ * twice.
*
* +---------+ +------------------+
- * |34567 12| -> | 1234567 |
- * +-----p-c-+ +------------------+
+ * |34567 12| -> |34567 1234567 12|
+ * +-----p-c-+ +-------c------p---+
*/
- p = evtchn_ring_offset(u, u->ring_prod);
- c = evtchn_ring_offset(u, u->ring_cons);
- if (p < c) {
- memcpy(new_ring + c, u->ring + c, (u->ring_size - c) * sizeof(*u->ring));
- memcpy(new_ring + u->ring_size, u->ring, p * sizeof(*u->ring));
- } else
- memcpy(new_ring + c, u->ring + c, (p - c) * sizeof(*u->ring));
+ memcpy(new_ring, old_ring, u->ring_size * sizeof(*u->ring));
+ memcpy(new_ring + u->ring_size, old_ring,
+ u->ring_size * sizeof(*u->ring));
u->ring = new_ring;
u->ring_size = new_size;