summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/nfit/core.c2
-rw-r--r--drivers/acpi/sleep.c6
-rw-r--r--drivers/acpi/video_detect.c49
-rw-r--r--drivers/ata/libata-core.c2
-rw-r--r--drivers/block/ublk_drv.c9
-rw-r--r--drivers/bus/sunxi-rsb.c8
-rw-r--r--drivers/clk/ingenic/jz4760-cgu.c18
-rw-r--r--drivers/clk/microchip/clk-mpfs-ccc.c10
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c26
-rw-r--r--drivers/cxl/acpi.c1
-rw-r--r--drivers/cxl/core/pmem.c42
-rw-r--r--drivers/cxl/core/region.c12
-rw-r--r--drivers/cxl/pci.c7
-rw-r--r--drivers/cxl/pmem.c24
-rw-r--r--drivers/dax/super.c2
-rw-r--r--drivers/dma-buf/dma-fence.c2
-rw-r--r--drivers/edac/edac_device.c15
-rw-r--r--drivers/edac/qcom_edac.c5
-rw-r--r--drivers/firewire/core-cdev.c4
-rw-r--r--drivers/firmware/efi/efi.c2
-rw-r--r--drivers/firmware/efi/libstub/arm64.c9
-rw-r--r--drivers/firmware/efi/memattr.c2
-rw-r--r--drivers/fpga/intel-m10-bmc-sec-update.c17
-rw-r--r--drivers/fpga/stratix10-soc.c4
-rw-r--r--drivers/gpio/Kconfig1
-rw-r--r--drivers/gpio/gpio-ep93xx.c38
-rw-r--r--drivers/gpio/gpio-mxc.c3
-rw-r--r--drivers/gpio/gpio-vf610.c41
-rw-r--r--drivers/gpio/gpiolib-acpi.c15
-rw-r--r--drivers/gpio/gpiolib-acpi.h1
-rw-r--r--drivers/gpu/drm/Kconfig3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/imu_v11_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c95
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c51
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c12
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c14
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h29
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c7
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c1
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c4
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c1
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c4
-rw-r--r--drivers/gpu/drm/drm_client.c33
-rw-r--r--drivers/gpu/drm/drm_fbdev_generic.c15
-rw-r--r--drivers/gpu/drm/drm_vma_manager.c76
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c33
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c12
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c14
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c14
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_tiling.c9
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c74
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c27
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.h4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c37
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c14
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c14
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c33
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/firmware.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/tu102.c55
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c2
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c16
-rw-r--r--drivers/gpu/drm/solomon/ssd130x.c18
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c21
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c6
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gem.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c4
-rw-r--r--[-rwxr-xr-x]drivers/gpu/drm/vmwgfx/vmwgfx_msg_arm64.h0
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c10
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_client.c13
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_hid.h1
-rw-r--r--drivers/hid/hid-core.c3
-rw-r--r--drivers/hid/hid-elecom.c16
-rw-r--r--drivers/hid/hid-ids.h5
-rw-r--r--drivers/hid/hid-input.c4
-rw-r--r--drivers/hid/hid-logitech-hidpp.c3
-rw-r--r--drivers/hid/hid-quirks.c3
-rw-r--r--drivers/hv/hv_balloon.c2
-rw-r--r--drivers/i2c/busses/i2c-axxia.c2
-rw-r--r--drivers/i2c/busses/i2c-designware-common.c9
-rw-r--r--drivers/i2c/busses/i2c-designware-pcidrv.c2
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c20
-rw-r--r--drivers/i2c/busses/i2c-mxs.c4
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c44
-rw-r--r--drivers/iio/accel/hid-sensor-accel-3d.c1
-rw-r--r--drivers/iio/adc/berlin2-adc.c4
-rw-r--r--drivers/iio/adc/imx8qxp-adc.c11
-rw-r--r--drivers/iio/adc/stm32-dfsdm-adc.c1
-rw-r--r--drivers/iio/adc/twl6030-gpadc.c32
-rw-r--r--drivers/iio/adc/xilinx-ams.c2
-rw-r--r--drivers/iio/gyro/hid-sensor-gyro-3d.c1
-rw-r--r--drivers/iio/imu/fxos8700_core.c111
-rw-r--r--drivers/iio/imu/st_lsm6dsx/Kconfig1
-rw-r--r--drivers/iio/light/cm32181.c9
-rw-r--r--drivers/infiniband/core/umem_dmabuf.c8
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c7
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.c9
-rw-r--r--drivers/infiniband/hw/irdma/cm.c3
-rw-r--r--drivers/infiniband/hw/mana/qp.c2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c8
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c3
-rw-r--r--drivers/input/mouse/synaptics.c1
-rw-r--r--drivers/input/serio/i8042-acpipnpio.h7
-rw-r--r--drivers/md/bcache/bcache_ondisk.h3
-rw-r--r--drivers/md/bcache/journal.c3
-rw-r--r--drivers/media/common/videobuf2/videobuf2-core.c5
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-api.c2
-rw-r--r--drivers/net/bonding/bond_debugfs.c2
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c1
-rw-r--r--drivers/net/dsa/Kconfig7
-rw-r--r--drivers/net/dsa/microchip/ksz9477_i2c.c2
-rw-r--r--drivers/net/dsa/mt7530.c26
-rw-r--r--drivers/net/ethernet/adi/adin1110.c2
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c8
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c8
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c31
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c15
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c6
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c9
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c4
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c10
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c113
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c23
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c28
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c58
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_mbx.c21
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c16
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c15
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c54
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c25
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c14
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c28
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c35
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c30
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h4
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.c3
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.h1
-rw-r--r--drivers/net/ethernet/mediatek/mtk_sgmii.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ecpf.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c90
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c25
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c6
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c4
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c46
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c24
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ptp.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/ipsec.c39
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h25
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c108
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c194
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.h12
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c9
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h12
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c68
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c29
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c87
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c7
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c10
-rw-r--r--drivers/net/ethernet/renesas/rswitch.c22
-rw-r--r--drivers/net/ethernet/renesas/rswitch.h12
-rw-r--r--drivers/net/ethernet/sfc/efx.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c2
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c12
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.h1
-rw-r--r--drivers/net/hyperv/netvsc.c11
-rw-r--r--drivers/net/mdio/mdio-mux-meson-g12a.c23
-rw-r--r--drivers/net/phy/dp83822.c6
-rw-r--r--drivers/net/phy/meson-gxl.c4
-rw-r--r--drivers/net/phy/phy_device.c2
-rw-r--r--drivers/net/phy/phylink.c5
-rw-r--r--drivers/net/usb/kalmia.c8
-rw-r--r--drivers/net/usb/plusb.c4
-rw-r--r--drivers/net/virtio_net.c8
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c50
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c11
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c29
-rw-r--r--drivers/net/wwan/t7xx/t7xx_netdev.c16
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pci.c2
-rw-r--r--drivers/nvdimm/Kconfig19
-rw-r--r--drivers/nvdimm/nd.h2
-rw-r--r--drivers/nvdimm/pfn_devs.c42
-rw-r--r--drivers/nvme/host/auth.c14
-rw-r--r--drivers/nvme/host/core.c7
-rw-r--r--drivers/nvme/host/fc.c18
-rw-r--r--drivers/nvme/host/pci.c21
-rw-r--r--drivers/nvme/target/fc.c4
-rw-r--r--drivers/nvmem/brcm_nvram.c3
-rw-r--r--drivers/nvmem/core.c60
-rw-r--r--drivers/nvmem/qcom-spmi-sdam.c1
-rw-r--r--drivers/nvmem/sunxi_sid.c15
-rw-r--r--drivers/of/address.c21
-rw-r--r--drivers/of/fdt.c6
-rw-r--r--drivers/of/of_reserved_mem.c3
-rw-r--r--drivers/of/platform.c12
-rw-r--r--drivers/parisc/pdc_stable.c9
-rw-r--r--drivers/pci/pci.c7
-rw-r--r--drivers/pci/pci.h4
-rw-r--r--drivers/pci/pcie/aspm.c111
-rw-r--r--drivers/perf/arm-cmn.c7
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.c13
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c16
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8195.c4
-rw-r--r--drivers/pinctrl/pinctrl-amd.c1
-rw-r--r--drivers/pinctrl/pinctrl-single.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c2
-rw-r--r--drivers/platform/x86/amd/Kconfig1
-rw-r--r--drivers/platform/x86/amd/pmc.c58
-rw-r--r--drivers/platform/x86/amd/pmf/auto-mode.c9
-rw-r--r--drivers/platform/x86/amd/pmf/cnqf.c14
-rw-r--r--drivers/platform/x86/amd/pmf/core.c32
-rw-r--r--drivers/platform/x86/amd/pmf/pmf.h3
-rw-r--r--drivers/platform/x86/amd/pmf/sps.c28
-rw-r--r--drivers/platform/x86/apple-gmux.c93
-rw-r--r--drivers/platform/x86/asus-wmi.c17
-rw-r--r--drivers/platform/x86/dell/dell-wmi-base.c3
-rw-r--r--drivers/platform/x86/gigabyte-wmi.c1
-rw-r--r--drivers/platform/x86/hp/hp-wmi.c6
-rw-r--r--drivers/platform/x86/intel/vsec.c9
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c13
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c9
-rw-r--r--drivers/rtc/rtc-efi.c48
-rw-r--r--drivers/rtc/rtc-sunplus.c4
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c5
-rw-r--r--drivers/scsi/hpsa.c2
-rw-r--r--drivers/scsi/iscsi_tcp.c20
-rw-r--r--drivers/scsi/libiscsi.c38
-rw-r--r--drivers/scsi/scsi.c2
-rw-r--r--drivers/scsi/scsi_scan.c7
-rw-r--r--drivers/scsi/scsi_sysfs.c2
-rw-r--r--drivers/spi/spi-dw-core.c2
-rw-r--r--drivers/spi/spi.c23
-rw-r--r--drivers/spi/spidev.c22
-rw-r--r--drivers/target/target_core_tmr.c4
-rw-r--r--drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c28
-rw-r--r--drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h1
-rw-r--r--drivers/tty/serial/8250/8250_dma.c21
-rw-r--r--drivers/tty/serial/stm32-usart.c33
-rw-r--r--drivers/tty/vt/vc_screen.c9
-rw-r--r--drivers/ufs/core/ufshcd.c29
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/dwc3/dwc3-qcom.c2
-rw-r--r--drivers/usb/fotg210/fotg210-udc.c1
-rw-r--r--drivers/usb/gadget/function/f_fs.c4
-rw-r--r--drivers/usb/gadget/function/f_uac2.c1
-rw-r--r--drivers/usb/gadget/function/u_ether.c4
-rw-r--r--drivers/usb/gadget/udc/bcm63xx_udc.c1
-rw-r--r--drivers/usb/gadget/udc/fsl_qe_udc.c1
-rw-r--r--drivers/usb/gadget/udc/fsl_udc_core.c1
-rw-r--r--drivers/usb/gadget/udc/fusb300_udc.c1
-rw-r--r--drivers/usb/gadget/udc/goku_udc.c1
-rw-r--r--drivers/usb/gadget/udc/gr_udc.c1
-rw-r--r--drivers/usb/gadget/udc/m66592-udc.c1
-rw-r--r--drivers/usb/gadget/udc/max3420_udc.c1
-rw-r--r--drivers/usb/gadget/udc/mv_u3d_core.c1
-rw-r--r--drivers/usb/gadget/udc/mv_udc_core.c1
-rw-r--r--drivers/usb/gadget/udc/net2272.c1
-rw-r--r--drivers/usb/gadget/udc/net2280.c1
-rw-r--r--drivers/usb/gadget/udc/omap_udc.c1
-rw-r--r--drivers/usb/gadget/udc/pch_udc.c1
-rw-r--r--drivers/usb/gadget/udc/snps_udc_core.c1
-rw-r--r--drivers/usb/typec/altmodes/displayport.c8
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c9
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_main.c2
-rw-r--r--drivers/vfio/vfio_iommu_type1.c31
-rw-r--r--drivers/vhost/net.c3
-rw-r--r--drivers/vhost/scsi.c21
-rw-r--r--drivers/vhost/vhost.c3
-rw-r--r--drivers/vhost/vhost.h1
-rw-r--r--drivers/video/fbdev/atmel_lcdfb.c22
-rw-r--r--drivers/video/fbdev/aty/aty128fb.c6
-rw-r--r--drivers/video/fbdev/aty/atyfb_base.c8
-rw-r--r--drivers/video/fbdev/aty/radeon_backlight.c6
-rw-r--r--drivers/video/fbdev/core/fb_defio.c10
-rw-r--r--drivers/video/fbdev/core/fbcon.c7
-rw-r--r--drivers/video/fbdev/core/fbmem.c4
-rw-r--r--drivers/video/fbdev/core/fbmon.c2
-rw-r--r--drivers/video/fbdev/mx3fb.c7
-rw-r--r--drivers/video/fbdev/nvidia/nv_backlight.c8
-rw-r--r--drivers/video/fbdev/nvidia/nvidia.c81
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c8
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c7
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c7
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c3
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c3
-rw-r--r--drivers/video/fbdev/riva/fbdev.c8
-rw-r--r--drivers/watchdog/diag288_wdt.c15
357 files changed, 3103 insertions, 1808 deletions
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index f1cc5ec6a3b6..4e48d6db05eb 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -3297,8 +3297,8 @@ void acpi_nfit_shutdown(void *data)
mutex_lock(&acpi_desc->init_mutex);
set_bit(ARS_CANCEL, &acpi_desc->scrub_flags);
- cancel_delayed_work_sync(&acpi_desc->dwork);
mutex_unlock(&acpi_desc->init_mutex);
+ cancel_delayed_work_sync(&acpi_desc->dwork);
/*
* Bounce the nvdimm bus lock to make sure any in-flight
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 0b557c0d405e..4ca667251272 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -60,13 +60,17 @@ static struct notifier_block tts_notifier = {
.priority = 0,
};
+#ifndef acpi_skip_set_wakeup_address
+#define acpi_skip_set_wakeup_address() false
+#endif
+
static int acpi_sleep_prepare(u32 acpi_state)
{
#ifdef CONFIG_ACPI_SLEEP
unsigned long acpi_wakeup_address;
/* do we have a wakeup address for S2 and S3? */
- if (acpi_state == ACPI_STATE_S3) {
+ if (acpi_state == ACPI_STATE_S3 && !acpi_skip_set_wakeup_address()) {
acpi_wakeup_address = acpi_get_wakeup_address();
if (!acpi_wakeup_address)
return -EFAULT;
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 65cec7bb6d96..a8c02608dde4 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -110,26 +110,6 @@ static bool nvidia_wmi_ec_supported(void)
}
#endif
-static bool apple_gmux_backlight_present(void)
-{
- struct acpi_device *adev;
- struct device *dev;
-
- adev = acpi_dev_get_first_match_dev(GMUX_ACPI_HID, NULL, -1);
- if (!adev)
- return false;
-
- dev = acpi_get_first_physical_node(adev);
- if (!dev)
- return false;
-
- /*
- * drivers/platform/x86/apple-gmux.c only supports old style
- * Apple GMUX with an IO-resource.
- */
- return pnp_get_resource(to_pnp_dev(dev), IORESOURCE_IO, 0) != NULL;
-}
-
/* Force to use vendor driver when the ACPI device is known to be
* buggy */
static int video_detect_force_vendor(const struct dmi_system_id *d)
@@ -612,6 +592,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
},
{
.callback = video_detect_force_native,
+ /* Asus U46E */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "U46E"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
/* Asus UX303UB */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
@@ -620,6 +608,23 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
},
{
.callback = video_detect_force_native,
+ /* HP EliteBook 8460p */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 8460p"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
+ /* HP Pavilion g6-1d80nr / B4U19UA */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion g6 Notebook PC"),
+ DMI_MATCH(DMI_PRODUCT_SKU, "B4U19UA"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
/* Samsung N150P */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
@@ -766,6 +771,7 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
{
static DEFINE_MUTEX(init_mutex);
static bool nvidia_wmi_ec_present;
+ static bool apple_gmux_present;
static bool native_available;
static bool init_done;
static long video_caps;
@@ -779,6 +785,7 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
ACPI_UINT32_MAX, find_video, NULL,
&video_caps, NULL);
nvidia_wmi_ec_present = nvidia_wmi_ec_supported();
+ apple_gmux_present = apple_gmux_detect(NULL, NULL);
init_done = true;
}
if (native)
@@ -800,7 +807,7 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
if (nvidia_wmi_ec_present)
return acpi_backlight_nvidia_wmi_ec;
- if (apple_gmux_backlight_present())
+ if (apple_gmux_present)
return acpi_backlight_apple_gmux;
/* Use ACPI video if available, except when native should be preferred. */
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 884ae73b11ea..2ea572628b1c 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -3109,7 +3109,7 @@ int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
*/
if (spd > 1)
mask &= (1 << (spd - 1)) - 1;
- else
+ else if (link->sata_spd)
return -EINVAL;
/* were we already at the bottom? */
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 17b677b5d3b2..6368b56eacf1 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -137,7 +137,7 @@ struct ublk_device {
char *__queues;
- unsigned short queue_size;
+ unsigned int queue_size;
struct ublksrv_ctrl_dev_info dev_info;
struct blk_mq_tag_set tag_set;
@@ -2092,13 +2092,12 @@ static void __exit ublk_exit(void)
struct ublk_device *ub;
int id;
- class_destroy(ublk_chr_class);
-
- misc_deregister(&ublk_misc);
-
idr_for_each_entry(&ublk_index_idr, ub, id)
ublk_remove(ub);
+ class_destroy(ublk_chr_class);
+ misc_deregister(&ublk_misc);
+
idr_destroy(&ublk_index_idr);
unregister_chrdev_region(ublk_chr_devt, UBLK_MINORS);
}
diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
index 3aa91aed3bf7..226e87b85116 100644
--- a/drivers/bus/sunxi-rsb.c
+++ b/drivers/bus/sunxi-rsb.c
@@ -857,7 +857,13 @@ static int __init sunxi_rsb_init(void)
return ret;
}
- return platform_driver_register(&sunxi_rsb_driver);
+ ret = platform_driver_register(&sunxi_rsb_driver);
+ if (ret) {
+ bus_unregister(&sunxi_rsb_bus);
+ return ret;
+ }
+
+ return 0;
}
module_init(sunxi_rsb_init);
diff --git a/drivers/clk/ingenic/jz4760-cgu.c b/drivers/clk/ingenic/jz4760-cgu.c
index ecd395ac8a28..e407f00bd594 100644
--- a/drivers/clk/ingenic/jz4760-cgu.c
+++ b/drivers/clk/ingenic/jz4760-cgu.c
@@ -58,7 +58,7 @@ jz4760_cgu_calc_m_n_od(const struct ingenic_cgu_pll_info *pll_info,
unsigned long rate, unsigned long parent_rate,
unsigned int *pm, unsigned int *pn, unsigned int *pod)
{
- unsigned int m, n, od, m_max = (1 << pll_info->m_bits) - 2;
+ unsigned int m, n, od, m_max = (1 << pll_info->m_bits) - 1;
/* The frequency after the N divider must be between 1 and 50 MHz. */
n = parent_rate / (1 * MHZ);
@@ -66,19 +66,17 @@ jz4760_cgu_calc_m_n_od(const struct ingenic_cgu_pll_info *pll_info,
/* The N divider must be >= 2. */
n = clamp_val(n, 2, 1 << pll_info->n_bits);
- for (;; n >>= 1) {
- od = (unsigned int)-1;
+ rate /= MHZ;
+ parent_rate /= MHZ;
- do {
- m = (rate / MHZ) * (1 << ++od) * n / (parent_rate / MHZ);
- } while ((m > m_max || m & 1) && (od < 4));
-
- if (od < 4 && m >= 4 && m <= m_max)
- break;
+ for (m = m_max; m >= m_max && n >= 2; n--) {
+ m = rate * n / parent_rate;
+ od = m & 1;
+ m <<= od;
}
*pm = m;
- *pn = n;
+ *pn = n + 1;
*pod = 1 << od;
}
diff --git a/drivers/clk/microchip/clk-mpfs-ccc.c b/drivers/clk/microchip/clk-mpfs-ccc.c
index 32aae880a14f..0ddc73e07be4 100644
--- a/drivers/clk/microchip/clk-mpfs-ccc.c
+++ b/drivers/clk/microchip/clk-mpfs-ccc.c
@@ -164,12 +164,11 @@ static int mpfs_ccc_register_outputs(struct device *dev, struct mpfs_ccc_out_hw_
for (unsigned int i = 0; i < num_clks; i++) {
struct mpfs_ccc_out_hw_clock *out_hw = &out_hws[i];
- char *name = devm_kzalloc(dev, 23, GFP_KERNEL);
+ char *name = devm_kasprintf(dev, GFP_KERNEL, "%s_out%u", parent->name, i);
if (!name)
return -ENOMEM;
- snprintf(name, 23, "%s_out%u", parent->name, i);
out_hw->divider.hw.init = CLK_HW_INIT_HW(name, &parent->hw, &clk_divider_ops, 0);
out_hw->divider.reg = data->pll_base[i / MPFS_CCC_OUTPUTS_PER_PLL] +
out_hw->reg_offset;
@@ -201,14 +200,13 @@ static int mpfs_ccc_register_plls(struct device *dev, struct mpfs_ccc_pll_hw_clo
for (unsigned int i = 0; i < num_clks; i++) {
struct mpfs_ccc_pll_hw_clock *pll_hw = &pll_hws[i];
- char *name = devm_kzalloc(dev, 18, GFP_KERNEL);
- if (!name)
+ pll_hw->name = devm_kasprintf(dev, GFP_KERNEL, "ccc%s_pll%u",
+ strchrnul(dev->of_node->full_name, '@'), i);
+ if (!pll_hw->name)
return -ENOMEM;
pll_hw->base = data->pll_base[i];
- snprintf(name, 18, "ccc%s_pll%u", strchrnul(dev->of_node->full_name, '@'), i);
- pll_hw->name = (const char *)name;
pll_hw->hw.init = CLK_HW_INIT_PARENTS_DATA_FIXED_SIZE(pll_hw->name,
pll_hw->parents,
&mpfs_ccc_pll_ops, 0);
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index 9505a812d6a1..d3f55ca06ed3 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -143,40 +143,42 @@ static unsigned long qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
return lval * xo_rate;
}
-/* Get the current frequency of the CPU (after throttling) */
-static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
+/* Get the frequency requested by the cpufreq core for the CPU */
+static unsigned int qcom_cpufreq_get_freq(unsigned int cpu)
{
struct qcom_cpufreq_data *data;
+ const struct qcom_cpufreq_soc_data *soc_data;
struct cpufreq_policy *policy;
+ unsigned int index;
policy = cpufreq_cpu_get_raw(cpu);
if (!policy)
return 0;
data = policy->driver_data;
+ soc_data = qcom_cpufreq.soc_data;
- return qcom_lmh_get_throttle_freq(data) / HZ_PER_KHZ;
+ index = readl_relaxed(data->base + soc_data->reg_perf_state);
+ index = min(index, LUT_MAX_ENTRIES - 1);
+
+ return policy->freq_table[index].frequency;
}
-/* Get the frequency requested by the cpufreq core for the CPU */
-static unsigned int qcom_cpufreq_get_freq(unsigned int cpu)
+static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
{
struct qcom_cpufreq_data *data;
- const struct qcom_cpufreq_soc_data *soc_data;
struct cpufreq_policy *policy;
- unsigned int index;
policy = cpufreq_cpu_get_raw(cpu);
if (!policy)
return 0;
data = policy->driver_data;
- soc_data = qcom_cpufreq.soc_data;
- index = readl_relaxed(data->base + soc_data->reg_perf_state);
- index = min(index, LUT_MAX_ENTRIES - 1);
+ if (data->throttle_irq >= 0)
+ return qcom_lmh_get_throttle_freq(data) / HZ_PER_KHZ;
- return policy->freq_table[index].frequency;
+ return qcom_cpufreq_get_freq(cpu);
}
static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
@@ -704,6 +706,8 @@ static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
return -ENOMEM;
qcom_cpufreq.soc_data = of_device_get_match_data(dev);
+ if (!qcom_cpufreq.soc_data)
+ return -ENODEV;
clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, num_domains), GFP_KERNEL);
if (!clk_data)
diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c
index ad0849af42d7..13cde44c6086 100644
--- a/drivers/cxl/acpi.c
+++ b/drivers/cxl/acpi.c
@@ -736,4 +736,3 @@ module_exit(cxl_acpi_exit);
MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS(CXL);
MODULE_IMPORT_NS(ACPI);
-MODULE_SOFTDEP("pre: cxl_pmem");
diff --git a/drivers/cxl/core/pmem.c b/drivers/cxl/core/pmem.c
index f3d2169b6731..c2e4b1093788 100644
--- a/drivers/cxl/core/pmem.c
+++ b/drivers/cxl/core/pmem.c
@@ -227,34 +227,16 @@ static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_nvdimm_bridge *cxl_nvb,
return cxl_nvd;
}
-static void cxl_nvd_unregister(void *_cxl_nvd)
+static void cxlmd_release_nvdimm(void *_cxlmd)
{
- struct cxl_nvdimm *cxl_nvd = _cxl_nvd;
- struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
+ struct cxl_memdev *cxlmd = _cxlmd;
+ struct cxl_nvdimm *cxl_nvd = cxlmd->cxl_nvd;
struct cxl_nvdimm_bridge *cxl_nvb = cxlmd->cxl_nvb;
- /*
- * Either the bridge is in ->remove() context under the device_lock(),
- * or cxlmd_release_nvdimm() is cancelling the bridge's release action
- * for @cxl_nvd and doing it itself (while manually holding the bridge
- * lock).
- */
- device_lock_assert(&cxl_nvb->dev);
cxl_nvd->cxlmd = NULL;
cxlmd->cxl_nvd = NULL;
+ cxlmd->cxl_nvb = NULL;
device_unregister(&cxl_nvd->dev);
-}
-
-static void cxlmd_release_nvdimm(void *_cxlmd)
-{
- struct cxl_memdev *cxlmd = _cxlmd;
- struct cxl_nvdimm_bridge *cxl_nvb = cxlmd->cxl_nvb;
-
- device_lock(&cxl_nvb->dev);
- if (cxlmd->cxl_nvd)
- devm_release_action(&cxl_nvb->dev, cxl_nvd_unregister,
- cxlmd->cxl_nvd);
- device_unlock(&cxl_nvb->dev);
put_device(&cxl_nvb->dev);
}
@@ -293,22 +275,6 @@ int devm_cxl_add_nvdimm(struct cxl_memdev *cxlmd)
dev_dbg(&cxlmd->dev, "register %s\n", dev_name(dev));
- /*
- * The two actions below arrange for @cxl_nvd to be deleted when either
- * the top-level PMEM bridge goes down, or the endpoint device goes
- * through ->remove().
- */
- device_lock(&cxl_nvb->dev);
- if (cxl_nvb->dev.driver)
- rc = devm_add_action_or_reset(&cxl_nvb->dev, cxl_nvd_unregister,
- cxl_nvd);
- else
- rc = -ENXIO;
- device_unlock(&cxl_nvb->dev);
-
- if (rc)
- goto err_alloc;
-
/* @cxlmd carries a reference on @cxl_nvb until cxlmd_release_nvdimm */
return devm_add_action_or_reset(&cxlmd->dev, cxlmd_release_nvdimm, cxlmd);
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index 02f28da519e3..940f805b1534 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -131,7 +131,7 @@ static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
struct cxl_port *iter = cxled_to_port(cxled);
struct cxl_ep *ep;
- int rc;
+ int rc = 0;
while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
iter = to_cxl_port(iter->dev.parent);
@@ -143,7 +143,8 @@ static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
cxl_rr = cxl_rr_load(iter, cxlr);
cxld = cxl_rr->decoder;
- rc = cxld->reset(cxld);
+ if (cxld->reset)
+ rc = cxld->reset(cxld);
if (rc)
return rc;
}
@@ -186,7 +187,8 @@ static int cxl_region_decode_commit(struct cxl_region *cxlr)
iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) {
cxl_rr = cxl_rr_load(iter, cxlr);
cxld = cxl_rr->decoder;
- cxld->reset(cxld);
+ if (cxld->reset)
+ cxld->reset(cxld);
}
cxled->cxld.reset(&cxled->cxld);
@@ -991,10 +993,10 @@ static int cxl_port_setup_targets(struct cxl_port *port,
int i, distance;
/*
- * Passthrough ports impose no distance requirements between
+ * Passthrough decoders impose no distance requirements between
* peers
*/
- if (port->nr_dports == 1)
+ if (cxl_rr->nr_targets == 1)
distance = 0;
else
distance = p->nr_targets / cxl_rr->nr_targets;
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
index 33083a522fd1..258004f34281 100644
--- a/drivers/cxl/pci.c
+++ b/drivers/cxl/pci.c
@@ -554,8 +554,11 @@ static bool cxl_report_and_clear(struct cxl_dev_state *cxlds)
/* If multiple errors, log header points to first error from ctrl reg */
if (hweight32(status) > 1) {
- addr = cxlds->regs.ras + CXL_RAS_CAP_CONTROL_OFFSET;
- fe = BIT(FIELD_GET(CXL_RAS_CAP_CONTROL_FE_MASK, readl(addr)));
+ void __iomem *rcc_addr =
+ cxlds->regs.ras + CXL_RAS_CAP_CONTROL_OFFSET;
+
+ fe = BIT(FIELD_GET(CXL_RAS_CAP_CONTROL_FE_MASK,
+ readl(rcc_addr)));
} else {
fe = status;
}
diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c
index eedefebc4283..08bbbac9a6d0 100644
--- a/drivers/cxl/pmem.c
+++ b/drivers/cxl/pmem.c
@@ -225,11 +225,35 @@ static int cxl_pmem_ctl(struct nvdimm_bus_descriptor *nd_desc,
return cxl_pmem_nvdimm_ctl(nvdimm, cmd, buf, buf_len);
}
+static int detach_nvdimm(struct device *dev, void *data)
+{
+ struct cxl_nvdimm *cxl_nvd;
+ bool release = false;
+
+ if (!is_cxl_nvdimm(dev))
+ return 0;
+
+ device_lock(dev);
+ if (!dev->driver)
+ goto out;
+
+ cxl_nvd = to_cxl_nvdimm(dev);
+ if (cxl_nvd->cxlmd && cxl_nvd->cxlmd->cxl_nvb == data)
+ release = true;
+out:
+ device_unlock(dev);
+ if (release)
+ device_release_driver(dev);
+ return 0;
+}
+
static void unregister_nvdimm_bus(void *_cxl_nvb)
{
struct cxl_nvdimm_bridge *cxl_nvb = _cxl_nvb;
struct nvdimm_bus *nvdimm_bus = cxl_nvb->nvdimm_bus;
+ bus_for_each_dev(&cxl_bus_type, NULL, cxl_nvb, detach_nvdimm);
+
cxl_nvb->nvdimm_bus = NULL;
nvdimm_bus_unregister(nvdimm_bus);
}
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index da4438f3188c..c4c4728a36e4 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -475,7 +475,7 @@ EXPORT_SYMBOL_GPL(put_dax);
/**
* dax_holder() - obtain the holder of a dax device
* @dax_dev: a dax_device instance
-
+ *
* Return: the holder's data which represents the holder if registered,
* otherwize NULL.
*/
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 406b4e26f538..0de0482cd36e 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -167,7 +167,7 @@ struct dma_fence *dma_fence_allocate_private_stub(void)
0, 0);
set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
- &dma_fence_stub.flags);
+ &fence->flags);
dma_fence_signal(fence);
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
index 878deb4880cd..0689e1510721 100644
--- a/drivers/edac/edac_device.c
+++ b/drivers/edac/edac_device.c
@@ -34,6 +34,9 @@
static DEFINE_MUTEX(device_ctls_mutex);
static LIST_HEAD(edac_device_list);
+/* Default workqueue processing interval on this instance, in msecs */
+#define DEFAULT_POLL_INTERVAL 1000
+
#ifdef CONFIG_EDAC_DEBUG
static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev)
{
@@ -336,7 +339,7 @@ static void edac_device_workq_function(struct work_struct *work_req)
* whole one second to save timers firing all over the period
* between integral seconds
*/
- if (edac_dev->poll_msec == 1000)
+ if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
else
edac_queue_work(&edac_dev->work, edac_dev->delay);
@@ -366,7 +369,7 @@ static void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
* timers firing on sub-second basis, while they are happy
* to fire together on the 1 second exactly
*/
- if (edac_dev->poll_msec == 1000)
+ if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
else
edac_queue_work(&edac_dev->work, edac_dev->delay);
@@ -400,7 +403,7 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
edac_dev->delay = msecs_to_jiffies(msec);
/* See comment in edac_device_workq_setup() above */
- if (edac_dev->poll_msec == 1000)
+ if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
edac_mod_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
else
edac_mod_work(&edac_dev->work, edac_dev->delay);
@@ -442,11 +445,7 @@ int edac_device_add_device(struct edac_device_ctl_info *edac_dev)
/* This instance is NOW RUNNING */
edac_dev->op_state = OP_RUNNING_POLL;
- /*
- * enable workq processing on this instance,
- * default = 1000 msec
- */
- edac_device_workq_setup(edac_dev, 1000);
+ edac_device_workq_setup(edac_dev, edac_dev->poll_msec ?: DEFAULT_POLL_INTERVAL);
} else {
edac_dev->op_state = OP_RUNNING_INTERRUPT;
}
diff --git a/drivers/edac/qcom_edac.c b/drivers/edac/qcom_edac.c
index 97a27e42dd61..c45519f59dc1 100644
--- a/drivers/edac/qcom_edac.c
+++ b/drivers/edac/qcom_edac.c
@@ -252,7 +252,7 @@ clear:
static int
dump_syn_reg(struct edac_device_ctl_info *edev_ctl, int err_type, u32 bank)
{
- struct llcc_drv_data *drv = edev_ctl->pvt_info;
+ struct llcc_drv_data *drv = edev_ctl->dev->platform_data;
int ret;
ret = dump_syn_reg_values(drv, bank, err_type);
@@ -289,7 +289,7 @@ static irqreturn_t
llcc_ecc_irq_handler(int irq, void *edev_ctl)
{
struct edac_device_ctl_info *edac_dev_ctl = edev_ctl;
- struct llcc_drv_data *drv = edac_dev_ctl->pvt_info;
+ struct llcc_drv_data *drv = edac_dev_ctl->dev->platform_data;
irqreturn_t irq_rc = IRQ_NONE;
u32 drp_error, trp_error, i;
int ret;
@@ -358,7 +358,6 @@ static int qcom_llcc_edac_probe(struct platform_device *pdev)
edev_ctl->dev_name = dev_name(dev);
edev_ctl->ctl_name = "llcc";
edev_ctl->panic_on_ue = LLCC_ERP_PANIC_ON_UE;
- edev_ctl->pvt_info = llcc_driv_data;
rc = edac_device_add_device(edev_ctl);
if (rc)
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 9c89f7d53e99..958aa4662ccb 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -819,8 +819,10 @@ static int ioctl_send_response(struct client *client, union ioctl_arg *arg)
r = container_of(resource, struct inbound_transaction_resource,
resource);
- if (is_fcp_request(r->request))
+ if (is_fcp_request(r->request)) {
+ kfree(r->data);
goto out;
+ }
if (a->length != fw_get_response_length(r->request)) {
ret = -EINVAL;
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index a2b0cbc8741c..1e0b016fdc2b 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -1007,6 +1007,8 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
/* first try to find a slot in an existing linked list entry */
for (prsv = efi_memreserve_root->next; prsv; ) {
rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB);
+ if (!rsv)
+ return -ENOMEM;
index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
if (index < rsv->size) {
rsv->entry[index].base = addr;
diff --git a/drivers/firmware/efi/libstub/arm64.c b/drivers/firmware/efi/libstub/arm64.c
index ff2d18c42ee7..4501652e11ab 100644
--- a/drivers/firmware/efi/libstub/arm64.c
+++ b/drivers/firmware/efi/libstub/arm64.c
@@ -19,10 +19,13 @@ static bool system_needs_vamap(void)
const u8 *type1_family = efi_get_smbios_string(1, family);
/*
- * Ampere Altra machines crash in SetTime() if SetVirtualAddressMap()
- * has not been called prior.
+ * Ampere eMAG, Altra, and Altra Max machines crash in SetTime() if
+ * SetVirtualAddressMap() has not been called prior.
*/
- if (!type1_family || strcmp(type1_family, "Altra"))
+ if (!type1_family || (
+ strcmp(type1_family, "eMAG") &&
+ strcmp(type1_family, "Altra") &&
+ strcmp(type1_family, "Altra Max")))
return false;
efi_warn("Working around broken SetVirtualAddressMap()\n");
diff --git a/drivers/firmware/efi/memattr.c b/drivers/firmware/efi/memattr.c
index 0a9aba5f9cef..f178b2984dfb 100644
--- a/drivers/firmware/efi/memattr.c
+++ b/drivers/firmware/efi/memattr.c
@@ -33,7 +33,7 @@ int __init efi_memattr_init(void)
return -ENOMEM;
}
- if (tbl->version > 1) {
+ if (tbl->version > 2) {
pr_warn("Unexpected EFI Memory Attributes table version %d\n",
tbl->version);
goto unmap;
diff --git a/drivers/fpga/intel-m10-bmc-sec-update.c b/drivers/fpga/intel-m10-bmc-sec-update.c
index 79d48852825e..03f1bd81c434 100644
--- a/drivers/fpga/intel-m10-bmc-sec-update.c
+++ b/drivers/fpga/intel-m10-bmc-sec-update.c
@@ -574,20 +574,27 @@ static int m10bmc_sec_probe(struct platform_device *pdev)
len = scnprintf(buf, SEC_UPDATE_LEN_MAX, "secure-update%d",
sec->fw_name_id);
sec->fw_name = kmemdup_nul(buf, len, GFP_KERNEL);
- if (!sec->fw_name)
- return -ENOMEM;
+ if (!sec->fw_name) {
+ ret = -ENOMEM;
+ goto fw_name_fail;
+ }
fwl = firmware_upload_register(THIS_MODULE, sec->dev, sec->fw_name,
&m10bmc_ops, sec);
if (IS_ERR(fwl)) {
dev_err(sec->dev, "Firmware Upload driver failed to start\n");
- kfree(sec->fw_name);
- xa_erase(&fw_upload_xa, sec->fw_name_id);
- return PTR_ERR(fwl);
+ ret = PTR_ERR(fwl);
+ goto fw_uploader_fail;
}
sec->fwl = fwl;
return 0;
+
+fw_uploader_fail:
+ kfree(sec->fw_name);
+fw_name_fail:
+ xa_erase(&fw_upload_xa, sec->fw_name_id);
+ return ret;
}
static int m10bmc_sec_remove(struct platform_device *pdev)
diff --git a/drivers/fpga/stratix10-soc.c b/drivers/fpga/stratix10-soc.c
index 357cea58ec98..f7f01982a512 100644
--- a/drivers/fpga/stratix10-soc.c
+++ b/drivers/fpga/stratix10-soc.c
@@ -213,9 +213,9 @@ static int s10_ops_write_init(struct fpga_manager *mgr,
/* Allocate buffers from the service layer's pool. */
for (i = 0; i < NUM_SVC_BUFS; i++) {
kbuf = stratix10_svc_allocate_memory(priv->chan, SVC_BUF_SIZE);
- if (!kbuf) {
+ if (IS_ERR(kbuf)) {
s10_free_buffers(mgr);
- ret = -ENOMEM;
+ ret = PTR_ERR(kbuf);
goto init_done;
}
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index ec7cfd4f52b1..e9917a45b005 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -1531,6 +1531,7 @@ config GPIO_MLXBF2
tristate "Mellanox BlueField 2 SoC GPIO"
depends on (MELLANOX_PLATFORM && ARM64 && ACPI) || (64BIT && COMPILE_TEST)
select GPIO_GENERIC
+ select GPIOLIB_IRQCHIP
help
Say Y here if you want GPIO support on Mellanox BlueField 2 SoC.
diff --git a/drivers/gpio/gpio-ep93xx.c b/drivers/gpio/gpio-ep93xx.c
index 2e1779709113..6cedf46efec6 100644
--- a/drivers/gpio/gpio-ep93xx.c
+++ b/drivers/gpio/gpio-ep93xx.c
@@ -17,6 +17,7 @@
#include <linux/slab.h>
#include <linux/gpio/driver.h>
#include <linux/bitops.h>
+#include <linux/seq_file.h>
#define EP93XX_GPIO_F_INT_STATUS 0x5c
#define EP93XX_GPIO_A_INT_STATUS 0xa0
@@ -40,7 +41,6 @@
#define EP93XX_GPIO_F_IRQ_BASE 80
struct ep93xx_gpio_irq_chip {
- struct irq_chip ic;
u8 irq_offset;
u8 int_unmasked;
u8 int_enabled;
@@ -148,7 +148,7 @@ static void ep93xx_gpio_f_irq_handler(struct irq_desc *desc)
*/
struct irq_chip *irqchip = irq_desc_get_chip(desc);
unsigned int irq = irq_desc_get_irq(desc);
- int port_f_idx = ((irq + 1) & 7) ^ 4; /* {19..22,47..50} -> {0..7} */
+ int port_f_idx = (irq & 7) ^ 4; /* {20..23,48..51} -> {0..7} */
int gpio_irq = EP93XX_GPIO_F_IRQ_BASE + port_f_idx;
chained_irq_enter(irqchip, desc);
@@ -185,6 +185,7 @@ static void ep93xx_gpio_irq_mask_ack(struct irq_data *d)
ep93xx_gpio_update_int_params(epg, eic);
writeb(port_mask, epg->base + eic->irq_offset + EP93XX_INT_EOI_OFFSET);
+ gpiochip_disable_irq(gc, irqd_to_hwirq(d));
}
static void ep93xx_gpio_irq_mask(struct irq_data *d)
@@ -195,6 +196,7 @@ static void ep93xx_gpio_irq_mask(struct irq_data *d)
eic->int_unmasked &= ~BIT(d->irq & 7);
ep93xx_gpio_update_int_params(epg, eic);
+ gpiochip_disable_irq(gc, irqd_to_hwirq(d));
}
static void ep93xx_gpio_irq_unmask(struct irq_data *d)
@@ -203,6 +205,7 @@ static void ep93xx_gpio_irq_unmask(struct irq_data *d)
struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc);
struct ep93xx_gpio *epg = gpiochip_get_data(gc);
+ gpiochip_enable_irq(gc, irqd_to_hwirq(d));
eic->int_unmasked |= BIT(d->irq & 7);
ep93xx_gpio_update_int_params(epg, eic);
}
@@ -320,15 +323,25 @@ static int ep93xx_gpio_set_config(struct gpio_chip *gc, unsigned offset,
return 0;
}
-static void ep93xx_init_irq_chip(struct device *dev, struct irq_chip *ic)
+static void ep93xx_irq_print_chip(struct irq_data *data, struct seq_file *p)
{
- ic->irq_ack = ep93xx_gpio_irq_ack;
- ic->irq_mask_ack = ep93xx_gpio_irq_mask_ack;
- ic->irq_mask = ep93xx_gpio_irq_mask;
- ic->irq_unmask = ep93xx_gpio_irq_unmask;
- ic->irq_set_type = ep93xx_gpio_irq_type;
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+
+ seq_printf(p, dev_name(gc->parent));
}
+static const struct irq_chip gpio_eic_irq_chip = {
+ .name = "ep93xx-gpio-eic",
+ .irq_ack = ep93xx_gpio_irq_ack,
+ .irq_mask = ep93xx_gpio_irq_mask,
+ .irq_unmask = ep93xx_gpio_irq_unmask,
+ .irq_mask_ack = ep93xx_gpio_irq_mask_ack,
+ .irq_set_type = ep93xx_gpio_irq_type,
+ .irq_print_chip = ep93xx_irq_print_chip,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
static int ep93xx_gpio_add_bank(struct ep93xx_gpio_chip *egc,
struct platform_device *pdev,
struct ep93xx_gpio *epg,
@@ -350,8 +363,6 @@ static int ep93xx_gpio_add_bank(struct ep93xx_gpio_chip *egc,
girq = &gc->irq;
if (bank->has_irq || bank->has_hierarchical_irq) {
- struct irq_chip *ic;
-
gc->set_config = ep93xx_gpio_set_config;
egc->eic = devm_kcalloc(dev, 1,
sizeof(*egc->eic),
@@ -359,12 +370,7 @@ static int ep93xx_gpio_add_bank(struct ep93xx_gpio_chip *egc,
if (!egc->eic)
return -ENOMEM;
egc->eic->irq_offset = bank->irq;
- ic = &egc->eic->ic;
- ic->name = devm_kasprintf(dev, GFP_KERNEL, "gpio-irq-%s", bank->label);
- if (!ic->name)
- return -ENOMEM;
- ep93xx_init_irq_chip(dev, ic);
- girq->chip = ic;
+ gpio_irq_chip_set_chip(girq, &gpio_eic_irq_chip);
}
if (bank->has_irq) {
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index 6f673b2f2a1b..9d0cec4b82a3 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -249,10 +249,11 @@ static void mxc_flip_edge(struct mxc_gpio_port *port, u32 gpio)
} else {
pr_err("mxc: invalid configuration for GPIO %d: %x\n",
gpio, edge);
- return;
+ goto unlock;
}
writel(val | (edge << (bit << 1)), reg);
+unlock:
raw_spin_unlock_irqrestore(&port->gc.bgpio_lock, flags);
}
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index 9db42f6a2043..9033db00c360 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -30,7 +30,6 @@ struct fsl_gpio_soc_data {
struct vf610_gpio_port {
struct gpio_chip gc;
- struct irq_chip ic;
void __iomem *base;
void __iomem *gpio_base;
const struct fsl_gpio_soc_data *sdata;
@@ -207,20 +206,24 @@ static int vf610_gpio_irq_set_type(struct irq_data *d, u32 type)
static void vf610_gpio_irq_mask(struct irq_data *d)
{
- struct vf610_gpio_port *port =
- gpiochip_get_data(irq_data_get_irq_chip_data(d));
- void __iomem *pcr_base = port->base + PORT_PCR(d->hwirq);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct vf610_gpio_port *port = gpiochip_get_data(gc);
+ irq_hw_number_t gpio_num = irqd_to_hwirq(d);
+ void __iomem *pcr_base = port->base + PORT_PCR(gpio_num);
vf610_gpio_writel(0, pcr_base);
+ gpiochip_disable_irq(gc, gpio_num);
}
static void vf610_gpio_irq_unmask(struct irq_data *d)
{
- struct vf610_gpio_port *port =
- gpiochip_get_data(irq_data_get_irq_chip_data(d));
- void __iomem *pcr_base = port->base + PORT_PCR(d->hwirq);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct vf610_gpio_port *port = gpiochip_get_data(gc);
+ irq_hw_number_t gpio_num = irqd_to_hwirq(d);
+ void __iomem *pcr_base = port->base + PORT_PCR(gpio_num);
- vf610_gpio_writel(port->irqc[d->hwirq] << PORT_PCR_IRQC_OFFSET,
+ gpiochip_enable_irq(gc, gpio_num);
+ vf610_gpio_writel(port->irqc[gpio_num] << PORT_PCR_IRQC_OFFSET,
pcr_base);
}
@@ -237,6 +240,17 @@ static int vf610_gpio_irq_set_wake(struct irq_data *d, u32 enable)
return 0;
}
+static const struct irq_chip vf610_irqchip = {
+ .name = "gpio-vf610",
+ .irq_ack = vf610_gpio_irq_ack,
+ .irq_mask = vf610_gpio_irq_mask,
+ .irq_unmask = vf610_gpio_irq_unmask,
+ .irq_set_type = vf610_gpio_irq_set_type,
+ .irq_set_wake = vf610_gpio_irq_set_wake,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
static void vf610_gpio_disable_clk(void *data)
{
clk_disable_unprepare(data);
@@ -249,7 +263,6 @@ static int vf610_gpio_probe(struct platform_device *pdev)
struct vf610_gpio_port *port;
struct gpio_chip *gc;
struct gpio_irq_chip *girq;
- struct irq_chip *ic;
int i;
int ret;
@@ -315,14 +328,6 @@ static int vf610_gpio_probe(struct platform_device *pdev)
gc->direction_output = vf610_gpio_direction_output;
gc->set = vf610_gpio_set;
- ic = &port->ic;
- ic->name = "gpio-vf610";
- ic->irq_ack = vf610_gpio_irq_ack;
- ic->irq_mask = vf610_gpio_irq_mask;
- ic->irq_unmask = vf610_gpio_irq_unmask;
- ic->irq_set_type = vf610_gpio_irq_set_type;
- ic->irq_set_wake = vf610_gpio_irq_set_wake;
-
/* Mask all GPIO interrupts */
for (i = 0; i < gc->ngpio; i++)
vf610_gpio_writel(0, port->base + PORT_PCR(i));
@@ -331,7 +336,7 @@ static int vf610_gpio_probe(struct platform_device *pdev)
vf610_gpio_writel(~0, port->base + PORT_ISFR);
girq = &gc->irq;
- girq->chip = ic;
+ gpio_irq_chip_set_chip(girq, &vf610_irqchip);
girq->parent_handler = vf610_gpio_irq_handler;
girq->num_parents = 1;
girq->parents = devm_kcalloc(&pdev->dev, 1,
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 9ef0f5641b52..34ff048e70d0 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -1104,7 +1104,8 @@ int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *name, in
dev_dbg(&adev->dev, "IRQ %d already in use\n", irq);
}
- if (wake_capable)
+ /* avoid suspend issues with GPIOs when systems are using S3 */
+ if (wake_capable && acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)
*wake_capable = info.wake_capable;
return irq;
@@ -1636,6 +1637,18 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
.ignore_wake = "ELAN0415:00@9",
},
},
+ {
+ /*
+ * Spurious wakeups from TP_ATTN# pin
+ * Found in BIOS 1.7.7
+ */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "NH5xAx"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "SYNA1202:00@16",
+ },
+ },
{} /* Terminating entry */
};
diff --git a/drivers/gpio/gpiolib-acpi.h b/drivers/gpio/gpiolib-acpi.h
index 9475f99a9694..5a08693b8fb1 100644
--- a/drivers/gpio/gpiolib-acpi.h
+++ b/drivers/gpio/gpiolib-acpi.h
@@ -14,7 +14,6 @@
#include <linux/gpio/consumer.h>
-struct acpi_device;
struct device;
struct fwnode_handle;
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 315cbdf61979..9abfb482b615 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -53,7 +53,8 @@ config DRM_DEBUG_MM
config DRM_USE_DYNAMIC_DEBUG
bool "use dynamic debug to implement drm.debug"
- default y
+ default n
+ depends on BROKEN
depends on DRM
depends on DYNAMIC_DEBUG || DYNAMIC_DEBUG_CORE
depends on JUMP_LABEL
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index e3e2e6e3b485..d148a1bd85e6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -243,6 +243,7 @@ extern int amdgpu_num_kcq;
#define AMDGPU_VCNFW_LOG_SIZE (32 * 1024)
extern int amdgpu_vcnfw_log;
+extern int amdgpu_sg_display;
#define AMDGPU_VM_MAX_NUM_CTX 4096
#define AMDGPU_SG_THRESHOLD (256*1024*1024)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 7b5ce00f0602..7af3041ccd0e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1220,10 +1220,13 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
* next job actually sees the results from the previous one
* before we start executing on the same scheduler ring.
*/
- if (!s_fence || s_fence->sched != sched)
+ if (!s_fence || s_fence->sched != sched) {
+ dma_fence_put(fence);
continue;
+ }
r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence);
+ dma_fence_put(fence);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 2f28a8c02f64..fbf2f24169eb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -4268,6 +4268,9 @@ exit:
}
adev->in_suspend = false;
+ if (adev->enable_mes)
+ amdgpu_mes_self_test(adev);
+
if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
DRM_WARN("smart shift update failed\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index cd4caaa29528..3fe277bc233f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -186,6 +186,7 @@ int amdgpu_num_kcq = -1;
int amdgpu_smartshift_bias;
int amdgpu_use_xgmi_p2p = 1;
int amdgpu_vcnfw_log;
+int amdgpu_sg_display = -1; /* auto */
static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work);
@@ -932,6 +933,16 @@ MODULE_PARM_DESC(vcnfw_log, "Enable vcnfw log(0 = disable (default value), 1 = e
module_param_named(vcnfw_log, amdgpu_vcnfw_log, int, 0444);
/**
+ * DOC: sg_display (int)
+ * Disable S/G (scatter/gather) display (i.e., display from system memory).
+ * This option is only relevant on APUs. Set this option to 0 to disable
+ * S/G display if you experience flickering or other issues under memory
+ * pressure and report the issue.
+ */
+MODULE_PARM_DESC(sg_display, "S/G Display (-1 = auto (default), 0 = disable)");
+module_param_named(sg_display, amdgpu_sg_display, int, 0444);
+
+/**
* DOC: smu_pptable_id (int)
* Used to override pptable id. id = 0 use VBIOS pptable.
* id > 0 use the soft pptable with specicfied id.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 00444203220d..faff4a3f96e6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -618,7 +618,13 @@ void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev)
if (!ring || !ring->fence_drv.initialized)
continue;
- if (!ring->no_scheduler)
+ /*
+ * Notice we check for sched.ops since there's some
+ * override on the meaning of sched.ready by amdgpu.
+ * The natural check would be sched.ready, which is
+ * set as drm_sched_init() finishes...
+ */
+ if (ring->sched.ops)
drm_sched_fini(&ring->sched);
for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index f752c7ae7f60..3989e755a5b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -295,7 +295,7 @@ struct amdgpu_ring {
#define amdgpu_ring_parse_cs(r, p, job, ib) ((r)->funcs->parse_cs((p), (job), (ib)))
#define amdgpu_ring_patch_cs_in_place(r, p, job, ib) ((r)->funcs->patch_cs_in_place((p), (job), (ib)))
#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
-#define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
+#define amdgpu_ring_test_ib(r, t) ((r)->funcs->test_ib ? (r)->funcs->test_ib((r), (t)) : 0)
#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
index b5f3bba851db..01e42bdd8e4e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
@@ -974,7 +974,7 @@ int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
trace_amdgpu_vm_update_ptes(params, frag_start, upd_end,
min(nptes, 32u), dst, incr,
upd_flags,
- vm->task_info.pid,
+ vm->task_info.tgid,
vm->immediate.fence_context);
amdgpu_vm_pte_update_flags(params, to_amdgpu_bo_vm(pt),
cursor.level, pe_start, dst,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index b9b57a66e113..66eb102cd88f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -790,8 +790,8 @@ static void gfx_v11_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd,
* zero here */
WARN_ON(simd != 0);
- /* type 2 wave data */
- dst[(*no_fields)++] = 2;
+ /* type 3 wave data */
+ dst[(*no_fields)++] = 3;
dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_STATUS);
dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_LO);
dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_HI);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index f202b45c413c..5dde6f82a1ca 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -6877,7 +6877,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
.emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
.emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
.test_ring = gfx_v9_0_ring_test_ring,
- .test_ib = gfx_v9_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_switch_buffer = gfx_v9_ring_emit_sb,
diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
index 95548c512f4f..077c53c6cc08 100644
--- a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
@@ -35,6 +35,7 @@ MODULE_FIRMWARE("amdgpu/gc_11_0_0_imu.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_1_imu.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_2_imu.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_3_imu.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_0_4_imu.bin");
static int imu_v11_0_init_microcode(struct amdgpu_device *adev)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index 970b066b37bb..1c4787000a5f 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -40,6 +40,8 @@ MODULE_FIRMWARE("amdgpu/gc_11_0_2_mes.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_2_mes1.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_3_mes.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_3_mes1.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_0_4_mes.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_0_4_mes1.bin");
static int mes_v11_0_hw_fini(void *handle);
static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev);
@@ -196,7 +198,6 @@ static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes,
mes_add_queue_pkt.trap_handler_addr = input->tba_addr;
mes_add_queue_pkt.tma_addr = input->tma_addr;
mes_add_queue_pkt.is_kfd_process = input->is_kfd_process;
- mes_add_queue_pkt.trap_en = 1;
/* For KFD, gds_size is re-used for queue size (needed in MES for AQL queues) */
mes_add_queue_pkt.is_aql_queue = input->is_aql_queue;
@@ -1343,7 +1344,7 @@ static int mes_v11_0_late_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* it's only intended for use in mes_self_test case, not for s0ix and reset */
- if (!amdgpu_in_reset(adev) && !adev->in_s0ix &&
+ if (!amdgpu_in_reset(adev) && !adev->in_s0ix && !adev->in_suspend &&
(adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3)))
amdgpu_mes_self_test(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c
index 15eb3658d70e..09fdcd20cb91 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c
@@ -337,7 +337,13 @@ const struct nbio_hdp_flush_reg nbio_v4_3_hdp_flush_reg = {
static void nbio_v4_3_init_registers(struct amdgpu_device *adev)
{
- return;
+ if (adev->ip_versions[NBIO_HWIP][0] == IP_VERSION(4, 3, 0)) {
+ uint32_t data;
+
+ data = RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF2_STRAP2);
+ data &= ~RCC_DEV0_EPF2_STRAP2__STRAP_NO_SOFT_RESET_DEV0_F2_MASK;
+ WREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF2_STRAP2, data);
+ }
}
static u32 nbio_v4_3_get_rom_offset(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
index 5562670b7b52..7050238c4c48 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
@@ -640,7 +640,10 @@ static int soc21_common_early_init(void *handle)
AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_CGLS |
AMD_CG_SUPPORT_REPEATER_FGCG |
- AMD_CG_SUPPORT_GFX_MGCG;
+ AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_HDP_SD |
+ AMD_CG_SUPPORT_ATHUB_MGCG |
+ AMD_CG_SUPPORT_ATHUB_LS;
adev->pg_flags = AMD_PG_SUPPORT_VCN |
AMD_PG_SUPPORT_VCN_DPG |
AMD_PG_SUPPORT_JPEG;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 4d42033a703f..9c7b69d377bd 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1184,24 +1184,38 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
memset(pa_config, 0, sizeof(*pa_config));
- logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
- pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
-
- if (adev->apu_flags & AMD_APU_IS_RAVEN2)
- /*
- * Raven2 has a HW issue that it is unable to use the vram which
- * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
- * workaround that increase system aperture high address (add 1)
- * to get rid of the VM fault and hardware hang.
- */
- logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
- else
- logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
-
agp_base = 0;
agp_bot = adev->gmc.agp_start >> 24;
agp_top = adev->gmc.agp_end >> 24;
+ /* AGP aperture is disabled */
+ if (agp_bot == agp_top) {
+ logical_addr_low = adev->gmc.vram_start >> 18;
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
+ /*
+ * Raven2 has a HW issue that it is unable to use the vram which
+ * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
+ * workaround that increase system aperture high address (add 1)
+ * to get rid of the VM fault and hardware hang.
+ */
+ logical_addr_high = (adev->gmc.fb_end >> 18) + 0x1;
+ else
+ logical_addr_high = adev->gmc.vram_end >> 18;
+ } else {
+ logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
+ /*
+ * Raven2 has a HW issue that it is unable to use the vram which
+ * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
+ * workaround that increase system aperture high address (add 1)
+ * to get rid of the VM fault and hardware hang.
+ */
+ logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
+ else
+ logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
+ }
+
+ pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
@@ -1503,6 +1517,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
case IP_VERSION(3, 0, 1):
case IP_VERSION(3, 1, 2):
case IP_VERSION(3, 1, 3):
+ case IP_VERSION(3, 1, 4):
+ case IP_VERSION(3, 1, 5):
case IP_VERSION(3, 1, 6):
init_data.flags.gpu_vm_support = true;
break;
@@ -1511,6 +1527,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
}
break;
}
+ if (init_data.flags.gpu_vm_support &&
+ (amdgpu_sg_display == 0))
+ init_data.flags.gpu_vm_support = false;
if (init_data.flags.gpu_vm_support)
adev->mode_info.gpu_vm_support = true;
@@ -4501,6 +4520,17 @@ DEVICE_ATTR_WO(s3_debug);
static int dm_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ struct atom_context *ctx = mode_info->atom_context;
+ int index = GetIndexIntoMasterTable(DATA, Object_Header);
+ u16 data_offset;
+
+ /* if there is no object header, skip DM */
+ if (!amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
+ adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
+ dev_info(adev->dev, "No object header, skipping DM\n");
+ return -ENOENT;
+ }
switch (adev->asic_type) {
#if defined(CONFIG_DRM_AMD_DC_SI)
@@ -8881,6 +8911,13 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
if (!dm_old_crtc_state->stream)
goto skip_modeset;
+ /* Unset freesync video if it was active before */
+ if (dm_old_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) {
+ dm_new_crtc_state->freesync_config.state = VRR_STATE_INACTIVE;
+ dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = 0;
+ }
+
+ /* Now check if we should set freesync video mode */
if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
is_timing_unchanged_for_freesync(new_crtc_state,
old_crtc_state)) {
@@ -9497,6 +9534,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
bool lock_and_validation_needed = false;
struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct drm_dp_mst_topology_mgr *mgr;
+ struct drm_dp_mst_topology_state *mst_state;
struct dsc_mst_fairness_vars vars[MAX_PIPES];
#endif
@@ -9619,7 +9658,11 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
* `dcn10_can_pipe_disable_cursor`). By now, all modified planes are in
* atomic state, so call drm helper to normalize zpos.
*/
- drm_atomic_normalize_zpos(dev, state);
+ ret = drm_atomic_normalize_zpos(dev, state);
+ if (ret) {
+ drm_dbg(dev, "drm_atomic_normalize_zpos() failed\n");
+ goto fail;
+ }
/* Remove exiting planes if they are modified */
for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
@@ -9745,6 +9788,28 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
lock_and_validation_needed = true;
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ /* set the slot info for each mst_state based on the link encoding format */
+ for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
+ u8 link_coding_cap;
+
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
+ if (connector->index == mst_state->mgr->conn_base_id) {
+ aconnector = to_amdgpu_dm_connector(connector);
+ link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
+ drm_dp_mst_update_slots(mst_state, link_coding_cap);
+
+ break;
+ }
+ }
+ drm_connector_list_iter_end(&iter);
+ }
+#endif
+
/**
* Streams and planes are reset when there are changes that affect
* bandwidth. Anything that affects bandwidth needs to go through
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 6994c9a1ed85..5cff56bb8f56 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -120,23 +120,50 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
}
static void
-fill_dc_mst_payload_table_from_drm(struct drm_dp_mst_topology_state *mst_state,
- struct amdgpu_dm_connector *aconnector,
+fill_dc_mst_payload_table_from_drm(struct dc_link *link,
+ bool enable,
+ struct drm_dp_mst_atomic_payload *target_payload,
struct dc_dp_mst_stream_allocation_table *table)
{
struct dc_dp_mst_stream_allocation_table new_table = { 0 };
struct dc_dp_mst_stream_allocation *sa;
- struct drm_dp_mst_atomic_payload *payload;
+ struct link_mst_stream_allocation_table copy_of_link_table =
+ link->mst_stream_alloc_table;
+
+ int i;
+ int current_hw_table_stream_cnt = copy_of_link_table.stream_count;
+ struct link_mst_stream_allocation *dc_alloc;
+
+ /* TODO: refactor to set link->mst_stream_alloc_table directly if possible.*/
+ if (enable) {
+ dc_alloc =
+ &copy_of_link_table.stream_allocations[current_hw_table_stream_cnt];
+ dc_alloc->vcp_id = target_payload->vcpi;
+ dc_alloc->slot_count = target_payload->time_slots;
+ } else {
+ for (i = 0; i < copy_of_link_table.stream_count; i++) {
+ dc_alloc =
+ &copy_of_link_table.stream_allocations[i];
+
+ if (dc_alloc->vcp_id == target_payload->vcpi) {
+ dc_alloc->vcp_id = 0;
+ dc_alloc->slot_count = 0;
+ break;
+ }
+ }
+ ASSERT(i != copy_of_link_table.stream_count);
+ }
/* Fill payload info*/
- list_for_each_entry(payload, &mst_state->payloads, next) {
- if (payload->delete)
- continue;
-
- sa = &new_table.stream_allocations[new_table.stream_count];
- sa->slot_count = payload->time_slots;
- sa->vcp_id = payload->vcpi;
- new_table.stream_count++;
+ for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
+ dc_alloc =
+ &copy_of_link_table.stream_allocations[i];
+ if (dc_alloc->vcp_id > 0 && dc_alloc->slot_count > 0) {
+ sa = &new_table.stream_allocations[new_table.stream_count];
+ sa->slot_count = dc_alloc->slot_count;
+ sa->vcp_id = dc_alloc->vcp_id;
+ new_table.stream_count++;
+ }
}
/* Overwrite the old table */
@@ -185,7 +212,7 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
* AUX message. The sequence is slot 1-63 allocated sequence for each
* stream. AMD ASIC stream slot allocation should follow the same
* sequence. copy DRM MST allocation to dc */
- fill_dc_mst_payload_table_from_drm(mst_state, aconnector, proposed_table);
+ fill_dc_mst_payload_table_from_drm(stream->link, enable, payload, proposed_table);
return true;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index d7a044e79730..abdbd4352f6f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -903,11 +903,6 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
if (IS_ERR(mst_state))
return PTR_ERR(mst_state);
- mst_state->pbn_div = dm_mst_get_pbn_divider(dc_link);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
- drm_dp_mst_update_slots(mst_state, dc_link_dp_mst_decide_link_encoding_format(dc_link));
-#endif
-
/* Set up params */
for (i = 0; i < dc_state->stream_count; i++) {
struct dc_dsc_policy dsc_policy = {0};
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 342e906ae26e..c88f044666fe 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -3995,10 +3995,13 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_from_int(0);
int i;
bool mst_mode = (link->type == dc_connection_mst_branch);
+ /* adjust for drm changes*/
+ bool update_drm_mst_state = true;
const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
const struct dc_link_settings empty_link_settings = {0};
DC_LOGGER_INIT(link->ctx->logger);
+
/* deallocate_mst_payload is called before disable link. When mode or
* disable/enable monitor, new stream is created which is not in link
* stream[] yet. For this, payload is not allocated yet, so de-alloc
@@ -4014,7 +4017,7 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
&empty_link_settings,
avg_time_slots_per_mtp);
- if (mst_mode) {
+ if (mst_mode || update_drm_mst_state) {
/* when link is in mst mode, reply on mst manager to remove
* payload
*/
@@ -4077,11 +4080,18 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
stream->ctx,
stream);
+ if (!update_drm_mst_state)
+ dm_helpers_dp_mst_send_payload_allocation(
+ stream->ctx,
+ stream,
+ false);
+ }
+
+ if (update_drm_mst_state)
dm_helpers_dp_mst_send_payload_allocation(
stream->ctx,
stream,
false);
- }
return DC_OK;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index fe2023f18b7d..8f894c1d1d1e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -3626,7 +3626,7 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
(int)hubp->curs_attr.width || pos_cpy.x
<= (int)hubp->curs_attr.width +
pipe_ctx->plane_state->src_rect.x) {
- pos_cpy.x = temp_x + viewport_width;
+ pos_cpy.x = 2 * viewport_width - temp_x;
}
}
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
index f9ea1e86707f..79850a68f62a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
@@ -874,8 +874,9 @@ static const struct dc_plane_cap plane_cap = {
},
// 6:1 downscaling ratio: 1000/6 = 166.666
+ // 4:1 downscaling ratio for ARGB888 to prevent underflow during P010 playback: 1000/4 = 250
.max_downscale_factor = {
- .argb8888 = 167,
+ .argb8888 = 250,
.nv12 = 167,
.fp16 = 167
},
@@ -1763,7 +1764,7 @@ static bool dcn314_resource_construct(
pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
pool->base.mpcc_count = pool->base.res_cap->num_timing_generator;
- dc->caps.max_downscale_ratio = 600;
+ dc->caps.max_downscale_ratio = 400;
dc->caps.i2c_speed_in_khz = 100;
dc->caps.i2c_speed_in_khz_hdcp = 100;
dc->caps.max_cursor_size = 256;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
index dc4649458567..a4e9fd5307c6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
@@ -94,7 +94,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
.calc_vupdate_position = dcn10_calc_vupdate_position,
.apply_idle_power_optimizations = dcn32_apply_idle_power_optimizations,
- .does_plane_fit_in_mall = dcn30_does_plane_fit_in_mall,
+ .does_plane_fit_in_mall = NULL,
.set_backlight_level = dcn21_set_backlight_level,
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.hardware_release = dcn30_hardware_release,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
index 950669f2c10d..cb7c0c878423 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
@@ -3183,7 +3183,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
} else {
v->MIN_DST_Y_NEXT_START[k] = v->VTotal[k] - v->VFrontPorch[k] + v->VTotal[k] - v->VActive[k] - v->VStartup[k];
}
- v->MIN_DST_Y_NEXT_START[k] += dml_floor(4.0 * v->TSetup[k] / (double)v->HTotal[k] / v->PixelClock[k], 1.0) / 4.0;
+ v->MIN_DST_Y_NEXT_START[k] += dml_floor(4.0 * v->TSetup[k] / ((double)v->HTotal[k] / v->PixelClock[k]), 1.0) / 4.0;
if (((v->VUpdateOffsetPix[k] + v->VUpdateWidthPix[k] + v->VReadyOffsetPix[k]) / v->HTotal[k])
<= (isInterlaceTiming ?
dml_floor((v->VTotal[k] - v->VActive[k] - v->VFrontPorch[k] - v->VStartup[k]) / 2.0, 1.0) :
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index 4a122925c3ae..92c18bfb98b3 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -532,6 +532,9 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
if (dmub->hw_funcs.reset)
dmub->hw_funcs.reset(dmub);
+ /* reset the cache of the last wptr as well now that hw is reset */
+ dmub->inbox1_last_wptr = 0;
+
cw0.offset.quad_part = inst_fb->gpu_addr;
cw0.region.base = DMUB_CW0_BASE;
cw0.region.top = cw0.region.base + inst_fb->size - 1;
@@ -649,6 +652,15 @@ enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub)
if (dmub->hw_funcs.reset)
dmub->hw_funcs.reset(dmub);
+ /* mailboxes have been reset in hw, so reset the sw state as well */
+ dmub->inbox1_last_wptr = 0;
+ dmub->inbox1_rb.wrpt = 0;
+ dmub->inbox1_rb.rptr = 0;
+ dmub->outbox0_rb.wrpt = 0;
+ dmub->outbox0_rb.rptr = 0;
+ dmub->outbox1_rb.wrpt = 0;
+ dmub->outbox1_rb.rptr = 0;
+
dmub->hw_init = false;
return DMUB_STATUS_OK;
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 236657eece47..2f3e239e623d 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -1991,6 +1991,8 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
case IP_VERSION(9, 4, 2):
case IP_VERSION(10, 3, 0):
case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 1):
+ case IP_VERSION(11, 0, 2):
*states = ATTR_STATE_SUPPORTED;
break;
default:
@@ -2007,14 +2009,16 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
gc_ver == IP_VERSION(10, 3, 0) ||
gc_ver == IP_VERSION(10, 1, 2) ||
gc_ver == IP_VERSION(11, 0, 0) ||
- gc_ver == IP_VERSION(11, 0, 2)))
+ gc_ver == IP_VERSION(11, 0, 2) ||
+ gc_ver == IP_VERSION(11, 0, 3)))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_dpm_dclk)) {
if (!(gc_ver == IP_VERSION(10, 3, 1) ||
gc_ver == IP_VERSION(10, 3, 0) ||
gc_ver == IP_VERSION(10, 1, 2) ||
gc_ver == IP_VERSION(11, 0, 0) ||
- gc_ver == IP_VERSION(11, 0, 2)))
+ gc_ver == IP_VERSION(11, 0, 2) ||
+ gc_ver == IP_VERSION(11, 0, 3)))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_power_profile_mode)) {
if (amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index ca3beb5d8f27..6ab155023592 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -1500,6 +1500,20 @@ static int smu_disable_dpms(struct smu_context *smu)
}
/*
+ * For SMU 13.0.4/11, PMFW will handle the features disablement properly
+ * for gpu reset case. Driver involvement is unnecessary.
+ */
+ if (amdgpu_in_reset(adev)) {
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(13, 0, 4):
+ case IP_VERSION(13, 0, 11):
+ return 0;
+ default:
+ break;
+ }
+ }
+
+ /*
* For gpu reset, runpm and hibernation through BACO,
* BACO feature has to be kept enabled.
*/
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
index d6b964cf73bd..4bc7aee4d44f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
@@ -123,7 +123,8 @@
(1 << FEATURE_DS_FCLK_BIT) | \
(1 << FEATURE_DS_LCLK_BIT) | \
(1 << FEATURE_DS_DCFCLK_BIT) | \
- (1 << FEATURE_DS_UCLK_BIT))
+ (1 << FEATURE_DS_UCLK_BIT) | \
+ (1ULL << FEATURE_DS_VCN_BIT))
//For use with feature control messages
typedef enum {
@@ -522,9 +523,9 @@ typedef enum {
TEMP_HOTSPOT_M,
TEMP_MEM,
TEMP_VR_GFX,
- TEMP_VR_SOC,
TEMP_VR_MEM0,
TEMP_VR_MEM1,
+ TEMP_VR_SOC,
TEMP_VR_U,
TEMP_LIQUID0,
TEMP_LIQUID1,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h
index d6b13933a98f..48a3a3952ceb 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h
@@ -113,20 +113,21 @@
#define NUM_FEATURES 64
#define ALLOWED_FEATURE_CTRL_DEFAULT 0xFFFFFFFFFFFFFFFFULL
-#define ALLOWED_FEATURE_CTRL_SCPM (1 << FEATURE_DPM_GFXCLK_BIT) | \
- (1 << FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT) | \
- (1 << FEATURE_DPM_UCLK_BIT) | \
- (1 << FEATURE_DPM_FCLK_BIT) | \
- (1 << FEATURE_DPM_SOCCLK_BIT) | \
- (1 << FEATURE_DPM_MP0CLK_BIT) | \
- (1 << FEATURE_DPM_LINK_BIT) | \
- (1 << FEATURE_DPM_DCN_BIT) | \
- (1 << FEATURE_DS_GFXCLK_BIT) | \
- (1 << FEATURE_DS_SOCCLK_BIT) | \
- (1 << FEATURE_DS_FCLK_BIT) | \
- (1 << FEATURE_DS_LCLK_BIT) | \
- (1 << FEATURE_DS_DCFCLK_BIT) | \
- (1 << FEATURE_DS_UCLK_BIT)
+#define ALLOWED_FEATURE_CTRL_SCPM ((1 << FEATURE_DPM_GFXCLK_BIT) | \
+ (1 << FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT) | \
+ (1 << FEATURE_DPM_UCLK_BIT) | \
+ (1 << FEATURE_DPM_FCLK_BIT) | \
+ (1 << FEATURE_DPM_SOCCLK_BIT) | \
+ (1 << FEATURE_DPM_MP0CLK_BIT) | \
+ (1 << FEATURE_DPM_LINK_BIT) | \
+ (1 << FEATURE_DPM_DCN_BIT) | \
+ (1 << FEATURE_DS_GFXCLK_BIT) | \
+ (1 << FEATURE_DS_SOCCLK_BIT) | \
+ (1 << FEATURE_DS_FCLK_BIT) | \
+ (1 << FEATURE_DS_LCLK_BIT) | \
+ (1 << FEATURE_DS_DCFCLK_BIT) | \
+ (1 << FEATURE_DS_UCLK_BIT) | \
+ (1ULL << FEATURE_DS_VCN_BIT))
//For use with feature control messages
typedef enum {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index e8c6febb8b64..992163e66f7b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -28,11 +28,11 @@
#define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF
#define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04
#define SMU13_DRIVER_IF_VERSION_ALDE 0x08
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_0 0x34
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_0 0x37
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x07
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_10 0x32
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x35
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x37
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_10 0x1D
#define SMU13_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 4c20d17e7416..508e392547d7 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -145,6 +145,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(SetBadMemoryPagesRetiredFlagsPerChannel,
PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel, 0),
MSG_MAP(AllowGpo, PPSMC_MSG_SetGpoAllow, 0),
+ MSG_MAP(AllowIHHostInterrupt, PPSMC_MSG_AllowIHHostInterrupt, 0),
};
static struct cmn2asic_mapping smu_v13_0_0_clk_map[SMU_CLK_COUNT] = {
@@ -406,6 +407,9 @@ static int smu_v13_0_0_setup_pptable(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (amdgpu_sriov_vf(smu->adev))
+ return 0;
+
ret = smu_v13_0_0_get_pptable_from_pmfw(smu,
&smu_table->power_play_table,
&smu_table->power_play_table_size);
@@ -1256,6 +1260,9 @@ static int smu_v13_0_0_get_thermal_temperature_range(struct smu_context *smu,
table_context->power_play_table;
PPTable_t *pptable = smu->smu_table.driver_pptable;
+ if (amdgpu_sriov_vf(smu->adev))
+ return 0;
+
if (!range)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index e87db7e02e8a..9e1967d8049e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -124,6 +124,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0),
MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0),
MSG_MAP(AllowGpo, PPSMC_MSG_SetGpoAllow, 0),
+ MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0),
};
static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = {
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index c7443317c747..66a4a41c3fe9 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -714,7 +714,7 @@ static int ast_primary_plane_init(struct ast_private *ast)
struct ast_plane *ast_primary_plane = &ast->primary_plane;
struct drm_plane *primary_plane = &ast_primary_plane->base;
void __iomem *vaddr = ast->vram;
- u64 offset = ast->vram_base;
+ u64 offset = 0; /* with shmem, the primary plane is always at offset 0 */
unsigned long cursor_size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE);
unsigned long size = ast->vram_fb_available - cursor_size;
int ret;
@@ -972,7 +972,7 @@ static int ast_cursor_plane_init(struct ast_private *ast)
return -ENOMEM;
vaddr = ast->vram + ast->vram_fb_available - size;
- offset = ast->vram_base + ast->vram_fb_available - size;
+ offset = ast->vram_fb_available - size;
ret = ast_plane_init(dev, ast_cursor_plane, vaddr, offset, size,
0x01, &ast_cursor_plane_funcs,
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
index a2f0860b20bb..d751820c6da6 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
@@ -193,6 +193,7 @@ static int snd_dw_hdmi_probe(struct platform_device *pdev)
struct hdmi_codec_pdata pdata;
struct platform_device *platform;
+ memset(&pdata, 0, sizeof(pdata));
pdata.ops = &dw_hdmi_i2s_ops;
pdata.i2s = 1;
pdata.max_i2s_channels = 8;
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
index 51a46689cda7..4ca37261584a 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
@@ -3372,6 +3372,9 @@ void drm_dp_remove_payload(struct drm_dp_mst_topology_mgr *mgr,
mgr->payload_count--;
mgr->next_start_slot -= payload->time_slots;
+
+ if (payload->delete)
+ drm_dp_mst_put_port_malloc(payload->port);
}
EXPORT_SYMBOL(drm_dp_remove_payload);
@@ -4327,7 +4330,6 @@ int drm_dp_atomic_release_time_slots(struct drm_atomic_state *state,
drm_dbg_atomic(mgr->dev, "[MST PORT:%p] TU %d -> 0\n", port, payload->time_slots);
if (!payload->delete) {
- drm_dp_mst_put_port_malloc(port);
payload->pbn = 0;
payload->delete = true;
topology_state->payload_mask &= ~BIT(payload->vcpi - 1);
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index fd67efe37c63..056ab9d5f313 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -233,21 +233,17 @@ void drm_client_dev_restore(struct drm_device *dev)
static void drm_client_buffer_delete(struct drm_client_buffer *buffer)
{
- struct drm_device *dev = buffer->client->dev;
-
if (buffer->gem) {
drm_gem_vunmap_unlocked(buffer->gem, &buffer->map);
drm_gem_object_put(buffer->gem);
}
- if (buffer->handle)
- drm_mode_destroy_dumb(dev, buffer->handle, buffer->client->file);
-
kfree(buffer);
}
static struct drm_client_buffer *
-drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format)
+drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height,
+ u32 format, u32 *handle)
{
const struct drm_format_info *info = drm_format_info(format);
struct drm_mode_create_dumb dumb_args = { };
@@ -269,16 +265,15 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u
if (ret)
goto err_delete;
- buffer->handle = dumb_args.handle;
- buffer->pitch = dumb_args.pitch;
-
obj = drm_gem_object_lookup(client->file, dumb_args.handle);
if (!obj) {
ret = -ENOENT;
goto err_delete;
}
+ buffer->pitch = dumb_args.pitch;
buffer->gem = obj;
+ *handle = dumb_args.handle;
return buffer;
@@ -365,7 +360,8 @@ static void drm_client_buffer_rmfb(struct drm_client_buffer *buffer)
}
static int drm_client_buffer_addfb(struct drm_client_buffer *buffer,
- u32 width, u32 height, u32 format)
+ u32 width, u32 height, u32 format,
+ u32 handle)
{
struct drm_client_dev *client = buffer->client;
struct drm_mode_fb_cmd fb_req = { };
@@ -377,7 +373,7 @@ static int drm_client_buffer_addfb(struct drm_client_buffer *buffer,
fb_req.depth = info->depth;
fb_req.width = width;
fb_req.height = height;
- fb_req.handle = buffer->handle;
+ fb_req.handle = handle;
fb_req.pitch = buffer->pitch;
ret = drm_mode_addfb(client->dev, &fb_req, client->file);
@@ -414,13 +410,24 @@ struct drm_client_buffer *
drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format)
{
struct drm_client_buffer *buffer;
+ u32 handle;
int ret;
- buffer = drm_client_buffer_create(client, width, height, format);
+ buffer = drm_client_buffer_create(client, width, height, format,
+ &handle);
if (IS_ERR(buffer))
return buffer;
- ret = drm_client_buffer_addfb(buffer, width, height, format);
+ ret = drm_client_buffer_addfb(buffer, width, height, format, handle);
+
+ /*
+ * The handle is only needed for creating the framebuffer, destroy it
+ * again to solve a circular dependency should anybody export the GEM
+ * object as DMA-buf. The framebuffer and our buffer structure are still
+ * holding references to the GEM object to prevent its destruction.
+ */
+ drm_mode_destroy_dumb(client->dev, handle, client->file);
+
if (ret) {
drm_client_buffer_delete(buffer);
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/drm_fbdev_generic.c b/drivers/gpu/drm/drm_fbdev_generic.c
index ab8695669279..593aa3283792 100644
--- a/drivers/gpu/drm/drm_fbdev_generic.c
+++ b/drivers/gpu/drm/drm_fbdev_generic.c
@@ -171,11 +171,6 @@ static const struct fb_ops drm_fbdev_fb_ops = {
.fb_imageblit = drm_fbdev_fb_imageblit,
};
-static struct fb_deferred_io drm_fbdev_defio = {
- .delay = HZ / 20,
- .deferred_io = drm_fb_helper_deferred_io,
-};
-
/*
* This function uses the client API to create a framebuffer backed by a dumb buffer.
*/
@@ -222,8 +217,14 @@ static int drm_fbdev_fb_probe(struct drm_fb_helper *fb_helper,
return -ENOMEM;
fbi->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST;
- fbi->fbdefio = &drm_fbdev_defio;
- fb_deferred_io_init(fbi);
+ /* Set a default deferred I/O handler */
+ fb_helper->fbdefio.delay = HZ / 20;
+ fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
+
+ fbi->fbdefio = &fb_helper->fbdefio;
+ ret = fb_deferred_io_init(fbi);
+ if (ret)
+ return ret;
} else {
/* buffer is mapped for HW framebuffer */
ret = drm_client_buffer_vmap(fb_helper->buffer, &map);
diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c
index 7de37f8c68fd..83229a031af0 100644
--- a/drivers/gpu/drm/drm_vma_manager.c
+++ b/drivers/gpu/drm/drm_vma_manager.c
@@ -240,27 +240,8 @@ void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
}
EXPORT_SYMBOL(drm_vma_offset_remove);
-/**
- * drm_vma_node_allow - Add open-file to list of allowed users
- * @node: Node to modify
- * @tag: Tag of file to remove
- *
- * Add @tag to the list of allowed open-files for this node. If @tag is
- * already on this list, the ref-count is incremented.
- *
- * The list of allowed-users is preserved across drm_vma_offset_add() and
- * drm_vma_offset_remove() calls. You may even call it if the node is currently
- * not added to any offset-manager.
- *
- * You must remove all open-files the same number of times as you added them
- * before destroying the node. Otherwise, you will leak memory.
- *
- * This is locked against concurrent access internally.
- *
- * RETURNS:
- * 0 on success, negative error code on internal failure (out-of-mem)
- */
-int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag)
+static int vma_node_allow(struct drm_vma_offset_node *node,
+ struct drm_file *tag, bool ref_counted)
{
struct rb_node **iter;
struct rb_node *parent = NULL;
@@ -282,7 +263,8 @@ int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag)
entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb);
if (tag == entry->vm_tag) {
- entry->vm_count++;
+ if (ref_counted)
+ entry->vm_count++;
goto unlock;
} else if (tag > entry->vm_tag) {
iter = &(*iter)->rb_right;
@@ -307,9 +289,59 @@ unlock:
kfree(new);
return ret;
}
+
+/**
+ * drm_vma_node_allow - Add open-file to list of allowed users
+ * @node: Node to modify
+ * @tag: Tag of file to remove
+ *
+ * Add @tag to the list of allowed open-files for this node. If @tag is
+ * already on this list, the ref-count is incremented.
+ *
+ * The list of allowed-users is preserved across drm_vma_offset_add() and
+ * drm_vma_offset_remove() calls. You may even call it if the node is currently
+ * not added to any offset-manager.
+ *
+ * You must remove all open-files the same number of times as you added them
+ * before destroying the node. Otherwise, you will leak memory.
+ *
+ * This is locked against concurrent access internally.
+ *
+ * RETURNS:
+ * 0 on success, negative error code on internal failure (out-of-mem)
+ */
+int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag)
+{
+ return vma_node_allow(node, tag, true);
+}
EXPORT_SYMBOL(drm_vma_node_allow);
/**
+ * drm_vma_node_allow_once - Add open-file to list of allowed users
+ * @node: Node to modify
+ * @tag: Tag of file to remove
+ *
+ * Add @tag to the list of allowed open-files for this node.
+ *
+ * The list of allowed-users is preserved across drm_vma_offset_add() and
+ * drm_vma_offset_remove() calls. You may even call it if the node is currently
+ * not added to any offset-manager.
+ *
+ * This is not ref-counted unlike drm_vma_node_allow() hence drm_vma_node_revoke()
+ * should only be called once after this.
+ *
+ * This is locked against concurrent access internally.
+ *
+ * RETURNS:
+ * 0 on success, negative error code on internal failure (out-of-mem)
+ */
+int drm_vma_node_allow_once(struct drm_vma_offset_node *node, struct drm_file *tag)
+{
+ return vma_node_allow(node, tag, false);
+}
+EXPORT_SYMBOL(drm_vma_node_allow_once);
+
+/**
* drm_vma_node_revoke - Remove open-file from list of allowed users
* @node: Node to modify
* @tag: Tag of file to remove
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 572a4e3769f3..a491e6c38875 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -2466,6 +2466,22 @@ static enum port dvo_port_to_port(struct drm_i915_private *i915,
dvo_port);
}
+static enum port
+dsi_dvo_port_to_port(struct drm_i915_private *i915, u8 dvo_port)
+{
+ switch (dvo_port) {
+ case DVO_PORT_MIPIA:
+ return PORT_A;
+ case DVO_PORT_MIPIC:
+ if (DISPLAY_VER(i915) >= 11)
+ return PORT_B;
+ else
+ return PORT_C;
+ default:
+ return PORT_NONE;
+ }
+}
+
static int parse_bdb_230_dp_max_link_rate(const int vbt_max_link_rate)
{
switch (vbt_max_link_rate) {
@@ -3414,19 +3430,16 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *i915,
dvo_port = child->dvo_port;
- if (dvo_port == DVO_PORT_MIPIA ||
- (dvo_port == DVO_PORT_MIPIB && DISPLAY_VER(i915) >= 11) ||
- (dvo_port == DVO_PORT_MIPIC && DISPLAY_VER(i915) < 11)) {
- if (port)
- *port = dvo_port - DVO_PORT_MIPIA;
- return true;
- } else if (dvo_port == DVO_PORT_MIPIB ||
- dvo_port == DVO_PORT_MIPIC ||
- dvo_port == DVO_PORT_MIPID) {
+ if (dsi_dvo_port_to_port(i915, dvo_port) == PORT_NONE) {
drm_dbg_kms(&i915->drm,
"VBT has unsupported DSI port %c\n",
port_name(dvo_port - DVO_PORT_MIPIA));
+ continue;
}
+
+ if (port)
+ *port = dsi_dvo_port_to_port(i915, dvo_port);
+ return true;
}
return false;
@@ -3511,7 +3524,7 @@ bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT))
continue;
- if (child->dvo_port - DVO_PORT_MIPIA == encoder->port) {
+ if (dsi_dvo_port_to_port(i915, child->dvo_port) == encoder->port) {
if (!devdata->dsc)
return false;
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index b74e36d76013..407a477939e5 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -1319,7 +1319,7 @@ static const struct intel_cdclk_vals adlp_cdclk_table[] = {
{ .refclk = 24000, .cdclk = 192000, .divider = 2, .ratio = 16 },
{ .refclk = 24000, .cdclk = 312000, .divider = 2, .ratio = 26 },
{ .refclk = 24000, .cdclk = 552000, .divider = 2, .ratio = 46 },
- { .refclk = 24400, .cdclk = 648000, .divider = 2, .ratio = 54 },
+ { .refclk = 24000, .cdclk = 648000, .divider = 2, .ratio = 54 },
{ .refclk = 38400, .cdclk = 179200, .divider = 3, .ratio = 14 },
{ .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 10 },
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 5575d7abdc09..f76c06b7f1d4 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -328,8 +328,20 @@ out_unlock:
return ret;
}
+static int intelfb_dirty(struct drm_fb_helper *helper, struct drm_clip_rect *clip)
+{
+ if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
+ return 0;
+
+ if (helper->fb->funcs->dirty)
+ return helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
+
+ return 0;
+}
+
static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
.fb_probe = intelfb_create,
+ .fb_dirty = intelfb_dirty,
};
static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
index e0766d1be966..11554645e6ee 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.c
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -1587,7 +1587,8 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
skl_check_wm_level(&wm->wm[level], ddb);
if (icl_need_wm1_wa(i915, plane_id) &&
- level == 1 && wm->wm[0].enable) {
+ level == 1 && !wm->wm[level].enable &&
+ wm->wm[0].enable) {
wm->wm[level].blocks = wm->wm[0].blocks;
wm->wm[level].lines = wm->wm[0].lines;
wm->wm[level].ignore_lines = wm->wm[0].ignore_lines;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 6250de9b9196..e4b78ab4773b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -1861,11 +1861,19 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv,
vm = ctx->vm;
GEM_BUG_ON(!vm);
+ /*
+ * Get a reference for the allocated handle. Once the handle is
+ * visible in the vm_xa table, userspace could try to close it
+ * from under our feet, so we need to hold the extra reference
+ * first.
+ */
+ i915_vm_get(vm);
+
err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL);
- if (err)
+ if (err) {
+ i915_vm_put(vm);
return err;
-
- i915_vm_get(vm);
+ }
GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
args->value = id;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index f266b68cf012..0f2e056c02dd 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -3483,6 +3483,13 @@ err_request:
eb.composite_fence :
&eb.requests[0]->fence);
+ if (unlikely(eb.gem_context->syncobj)) {
+ drm_syncobj_replace_fence(eb.gem_context->syncobj,
+ eb.composite_fence ?
+ eb.composite_fence :
+ &eb.requests[0]->fence);
+ }
+
if (out_fence) {
if (err == 0) {
fd_install(out_fence_fd, out_fence->file);
@@ -3494,13 +3501,6 @@ err_request:
}
}
- if (unlikely(eb.gem_context->syncobj)) {
- drm_syncobj_replace_fence(eb.gem_context->syncobj,
- eb.composite_fence ?
- eb.composite_fence :
- &eb.requests[0]->fence);
- }
-
if (!out_fence && eb.composite_fence)
dma_fence_put(eb.composite_fence);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 0ad44f3868de..c7c252d4d366 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -697,7 +697,7 @@ insert:
GEM_BUG_ON(lookup_mmo(obj, mmap_type) != mmo);
out:
if (file)
- drm_vma_node_allow(&mmo->vma_node, file);
+ drm_vma_node_allow_once(&mmo->vma_node, file);
return mmo;
err:
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 9c759df700ca..937728840428 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -579,7 +579,7 @@ static int shmem_object_init(struct intel_memory_region *mem,
mapping_set_gfp_mask(mapping, mask);
GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
- i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class, 0);
+ i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class, flags);
obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
obj->write_domain = I915_GEM_DOMAIN_CPU;
obj->read_domains = I915_GEM_DOMAIN_CPU;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index fd42b89b7162..bc21b1c2350a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -305,10 +305,6 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
spin_unlock(&obj->vma.lock);
obj->tiling_and_stride = tiling | stride;
- i915_gem_object_unlock(obj);
-
- /* Force the fence to be reacquired for GTT access */
- i915_gem_object_release_mmap_gtt(obj);
/* Try to preallocate memory required to save swizzling on put-pages */
if (i915_gem_object_needs_bit17_swizzle(obj)) {
@@ -321,6 +317,11 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
obj->bit_17 = NULL;
}
+ i915_gem_object_unlock(obj);
+
+ /* Force the fence to be reacquired for GTT access */
+ i915_gem_object_release_mmap_gtt(obj);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index e94365b08f1e..2aa63ec521b8 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -528,7 +528,7 @@ retry:
return rq;
}
-struct i915_request *intel_context_find_active_request(struct intel_context *ce)
+struct i915_request *intel_context_get_active_request(struct intel_context *ce)
{
struct intel_context *parent = intel_context_to_parent(ce);
struct i915_request *rq, *active = NULL;
@@ -552,6 +552,8 @@ struct i915_request *intel_context_find_active_request(struct intel_context *ce)
active = rq;
}
+ if (active)
+ active = i915_request_get_rcu(active);
spin_unlock_irqrestore(&parent->guc_state.lock, flags);
return active;
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index fb62b7b8cbcd..0a8d553da3f4 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -268,8 +268,7 @@ int intel_context_prepare_remote_request(struct intel_context *ce,
struct i915_request *intel_context_create_request(struct intel_context *ce);
-struct i915_request *
-intel_context_find_active_request(struct intel_context *ce);
+struct i915_request *intel_context_get_active_request(struct intel_context *ce);
static inline bool intel_context_is_barrier(const struct intel_context *ce)
{
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index cbc8b857d5f7..7a4504ea35c3 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -248,8 +248,8 @@ void intel_engine_dump_active_requests(struct list_head *requests,
ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine,
ktime_t *now);
-struct i915_request *
-intel_engine_execlist_find_hung_request(struct intel_engine_cs *engine);
+void intel_engine_get_hung_entity(struct intel_engine_cs *engine,
+ struct intel_context **ce, struct i915_request **rq);
u32 intel_engine_context_size(struct intel_gt *gt, u8 class);
struct intel_context *
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index c33e0d72d670..d37931e16fd9 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -2094,17 +2094,6 @@ static void print_request_ring(struct drm_printer *m, struct i915_request *rq)
}
}
-static unsigned long list_count(struct list_head *list)
-{
- struct list_head *pos;
- unsigned long count = 0;
-
- list_for_each(pos, list)
- count++;
-
- return count;
-}
-
static unsigned long read_ul(void *p, size_t x)
{
return *(unsigned long *)(p + x);
@@ -2196,11 +2185,11 @@ void intel_engine_dump_active_requests(struct list_head *requests,
}
}
-static void engine_dump_active_requests(struct intel_engine_cs *engine, struct drm_printer *m)
+static void engine_dump_active_requests(struct intel_engine_cs *engine,
+ struct drm_printer *m)
{
+ struct intel_context *hung_ce = NULL;
struct i915_request *hung_rq = NULL;
- struct intel_context *ce;
- bool guc;
/*
* No need for an engine->irq_seqno_barrier() before the seqno reads.
@@ -2209,27 +2198,22 @@ static void engine_dump_active_requests(struct intel_engine_cs *engine, struct d
* But the intention here is just to report an instantaneous snapshot
* so that's fine.
*/
- lockdep_assert_held(&engine->sched_engine->lock);
+ intel_engine_get_hung_entity(engine, &hung_ce, &hung_rq);
drm_printf(m, "\tRequests:\n");
- guc = intel_uc_uses_guc_submission(&engine->gt->uc);
- if (guc) {
- ce = intel_engine_get_hung_context(engine);
- if (ce)
- hung_rq = intel_context_find_active_request(ce);
- } else {
- hung_rq = intel_engine_execlist_find_hung_request(engine);
- }
-
if (hung_rq)
engine_dump_request(hung_rq, m, "\t\thung");
+ else if (hung_ce)
+ drm_printf(m, "\t\tGot hung ce but no hung rq!\n");
- if (guc)
+ if (intel_uc_uses_guc_submission(&engine->gt->uc))
intel_guc_dump_active_requests(engine, hung_rq, m);
else
- intel_engine_dump_active_requests(&engine->sched_engine->requests,
- hung_rq, m);
+ intel_execlists_dump_active_requests(engine, hung_rq, m);
+
+ if (hung_rq)
+ i915_request_put(hung_rq);
}
void intel_engine_dump(struct intel_engine_cs *engine,
@@ -2239,7 +2223,6 @@ void intel_engine_dump(struct intel_engine_cs *engine,
struct i915_gpu_error * const error = &engine->i915->gpu_error;
struct i915_request *rq;
intel_wakeref_t wakeref;
- unsigned long flags;
ktime_t dummy;
if (header) {
@@ -2276,13 +2259,8 @@ void intel_engine_dump(struct intel_engine_cs *engine,
i915_reset_count(error));
print_properties(engine, m);
- spin_lock_irqsave(&engine->sched_engine->lock, flags);
engine_dump_active_requests(engine, m);
- drm_printf(m, "\tOn hold?: %lu\n",
- list_count(&engine->sched_engine->hold));
- spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
-
drm_printf(m, "\tMMIO base: 0x%08x\n", engine->mmio_base);
wakeref = intel_runtime_pm_get_if_in_use(engine->uncore->rpm);
if (wakeref) {
@@ -2328,8 +2306,7 @@ intel_engine_create_virtual(struct intel_engine_cs **siblings,
return siblings[0]->cops->create_virtual(siblings, count, flags);
}
-struct i915_request *
-intel_engine_execlist_find_hung_request(struct intel_engine_cs *engine)
+static struct i915_request *engine_execlist_find_hung_request(struct intel_engine_cs *engine)
{
struct i915_request *request, *active = NULL;
@@ -2381,6 +2358,33 @@ intel_engine_execlist_find_hung_request(struct intel_engine_cs *engine)
return active;
}
+void intel_engine_get_hung_entity(struct intel_engine_cs *engine,
+ struct intel_context **ce, struct i915_request **rq)
+{
+ unsigned long flags;
+
+ *ce = intel_engine_get_hung_context(engine);
+ if (*ce) {
+ intel_engine_clear_hung_context(engine);
+
+ *rq = intel_context_get_active_request(*ce);
+ return;
+ }
+
+ /*
+ * Getting here with GuC enabled means it is a forced error capture
+ * with no actual hang. So, no need to attempt the execlist search.
+ */
+ if (intel_uc_uses_guc_submission(&engine->gt->uc))
+ return;
+
+ spin_lock_irqsave(&engine->sched_engine->lock, flags);
+ *rq = engine_execlist_find_hung_request(engine);
+ if (*rq)
+ *rq = i915_request_get_rcu(*rq);
+ spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
+}
+
void xehp_enable_ccs_engines(struct intel_engine_cs *engine)
{
/*
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 2daffa7c7dfd..21cb5b69d82e 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -4148,6 +4148,33 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
spin_unlock_irqrestore(&sched_engine->lock, flags);
}
+static unsigned long list_count(struct list_head *list)
+{
+ struct list_head *pos;
+ unsigned long count = 0;
+
+ list_for_each(pos, list)
+ count++;
+
+ return count;
+}
+
+void intel_execlists_dump_active_requests(struct intel_engine_cs *engine,
+ struct i915_request *hung_rq,
+ struct drm_printer *m)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&engine->sched_engine->lock, flags);
+
+ intel_engine_dump_active_requests(&engine->sched_engine->requests, hung_rq, m);
+
+ drm_printf(m, "\tOn hold?: %lu\n",
+ list_count(&engine->sched_engine->hold));
+
+ spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_execlists.c"
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.h b/drivers/gpu/drm/i915/gt/intel_execlists_submission.h
index a1aa92c983a5..d2c7d45ea062 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.h
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.h
@@ -32,6 +32,10 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
int indent),
unsigned int max);
+void intel_execlists_dump_active_requests(struct intel_engine_cs *engine,
+ struct i915_request *hung_rq,
+ struct drm_printer *m);
+
bool
intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine);
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 7771a19008c6..bbeeb6dde7ae 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -288,39 +288,6 @@ static const u8 dg2_xcs_offsets[] = {
END
};
-static const u8 mtl_xcs_offsets[] = {
- NOP(1),
- LRI(13, POSTED),
- REG16(0x244),
- REG(0x034),
- REG(0x030),
- REG(0x038),
- REG(0x03c),
- REG(0x168),
- REG(0x140),
- REG(0x110),
- REG(0x1c0),
- REG(0x1c4),
- REG(0x1c8),
- REG(0x180),
- REG16(0x2b4),
- NOP(4),
-
- NOP(1),
- LRI(9, POSTED),
- REG16(0x3a8),
- REG16(0x28c),
- REG16(0x288),
- REG16(0x284),
- REG16(0x280),
- REG16(0x27c),
- REG16(0x278),
- REG16(0x274),
- REG16(0x270),
-
- END
-};
-
static const u8 gen8_rcs_offsets[] = {
NOP(1),
LRI(14, POSTED),
@@ -739,9 +706,7 @@ static const u8 *reg_offsets(const struct intel_engine_cs *engine)
else
return gen8_rcs_offsets;
} else {
- if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 70))
- return mtl_xcs_offsets;
- else if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
return dg2_xcs_offsets;
else if (GRAPHICS_VER(engine->i915) >= 12)
return gen12_xcs_offsets;
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 949c19339015..a0740308555d 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -1355,6 +1355,13 @@ icl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
GAMT_CHKN_BIT_REG,
GAMT_CHKN_DISABLE_L3_COH_PIPE);
+ /*
+ * Wa_1408615072:icl,ehl (vsunit)
+ * Wa_1407596294:icl,ehl (hsunit)
+ */
+ wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
+ VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
+
/* Wa_1407352427:icl,ehl */
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
PSDUNIT_CLKGATE_DIS);
@@ -2540,13 +2547,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN11_ENABLE_32_PLANE_MODE);
/*
- * Wa_1408615072:icl,ehl (vsunit)
- * Wa_1407596294:icl,ehl (hsunit)
- */
- wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
- VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
-
- /*
* Wa_1408767742:icl[a2..forever],ehl[all]
* Wa_1605460711:icl[a0..c0]
*/
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 0a42f1807f52..c10977cb06b9 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -1702,7 +1702,7 @@ static void __guc_reset_context(struct intel_context *ce, intel_engine_mask_t st
goto next_context;
guilty = false;
- rq = intel_context_find_active_request(ce);
+ rq = intel_context_get_active_request(ce);
if (!rq) {
head = ce->ring->tail;
goto out_replay;
@@ -1715,6 +1715,7 @@ static void __guc_reset_context(struct intel_context *ce, intel_engine_mask_t st
head = intel_ring_wrap(ce->ring, rq->head);
__i915_request_reset(rq, guilty);
+ i915_request_put(rq);
out_replay:
guc_reset_state(ce, head, guilty);
next_context:
@@ -4817,6 +4818,8 @@ void intel_guc_find_hung_context(struct intel_engine_cs *engine)
xa_lock_irqsave(&guc->context_lookup, flags);
xa_for_each(&guc->context_lookup, index, ce) {
+ bool found;
+
if (!kref_get_unless_zero(&ce->ref))
continue;
@@ -4833,10 +4836,18 @@ void intel_guc_find_hung_context(struct intel_engine_cs *engine)
goto next;
}
+ found = false;
+ spin_lock(&ce->guc_state.lock);
list_for_each_entry(rq, &ce->guc_state.requests, sched.link) {
if (i915_test_request_state(rq) != I915_REQUEST_ACTIVE)
continue;
+ found = true;
+ break;
+ }
+ spin_unlock(&ce->guc_state.lock);
+
+ if (found) {
intel_engine_set_hung_context(engine, ce);
/* Can only cope with one hang at a time... */
@@ -4844,6 +4855,7 @@ void intel_guc_find_hung_context(struct intel_engine_cs *engine)
xa_lock(&guc->context_lookup);
goto done;
}
+
next:
intel_context_put(ce);
xa_lock(&guc->context_lookup);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 9d5d5a397b64..b20bd6365615 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1596,43 +1596,20 @@ capture_engine(struct intel_engine_cs *engine,
{
struct intel_engine_capture_vma *capture = NULL;
struct intel_engine_coredump *ee;
- struct intel_context *ce;
+ struct intel_context *ce = NULL;
struct i915_request *rq = NULL;
- unsigned long flags;
ee = intel_engine_coredump_alloc(engine, ALLOW_FAIL, dump_flags);
if (!ee)
return NULL;
- ce = intel_engine_get_hung_context(engine);
- if (ce) {
- intel_engine_clear_hung_context(engine);
- rq = intel_context_find_active_request(ce);
- if (!rq || !i915_request_started(rq))
- goto no_request_capture;
- } else {
- /*
- * Getting here with GuC enabled means it is a forced error capture
- * with no actual hang. So, no need to attempt the execlist search.
- */
- if (!intel_uc_uses_guc_submission(&engine->gt->uc)) {
- spin_lock_irqsave(&engine->sched_engine->lock, flags);
- rq = intel_engine_execlist_find_hung_request(engine);
- spin_unlock_irqrestore(&engine->sched_engine->lock,
- flags);
- }
- }
- if (rq)
- rq = i915_request_get_rcu(rq);
-
- if (!rq)
+ intel_engine_get_hung_entity(engine, &ce, &rq);
+ if (!rq || !i915_request_started(rq))
goto no_request_capture;
capture = intel_engine_coredump_add_request(ee, rq, ATOMIC_MAYFAIL);
- if (!capture) {
- i915_request_put(rq);
+ if (!capture)
goto no_request_capture;
- }
if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)
intel_guc_capture_get_matching_node(engine->gt, ee, ce);
@@ -1642,6 +1619,8 @@ capture_engine(struct intel_engine_cs *engine,
return ee;
no_request_capture:
+ if (rq)
+ i915_request_put(rq);
kfree(ee);
return NULL;
}
diff --git a/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c b/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c
index 310fb83c527e..2990dd4d4a0d 100644
--- a/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c
+++ b/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c
@@ -28,8 +28,7 @@ struct intel_engine_cs *intel_selftest_find_any_engine(struct intel_gt *gt)
int intel_selftest_modify_policy(struct intel_engine_cs *engine,
struct intel_selftest_saved_policy *saved,
- u32 modify_type)
-
+ enum selftest_scheduler_modify modify_type)
{
int err;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index 40768373cdd9..c5a4f49ee206 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -97,6 +97,7 @@ int gp100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct n
int gp102_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
int gp10b_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
int gv100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int tu102_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
int ga100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
int ga102_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
index fcf2a002f6cb..91fb494d4009 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
@@ -151,6 +151,9 @@ nvkm_firmware_mem_page(struct nvkm_memory *memory)
static enum nvkm_memory_target
nvkm_firmware_mem_target(struct nvkm_memory *memory)
{
+ if (nvkm_firmware_mem(memory)->device->func->tegra)
+ return NVKM_MEM_TARGET_NCOH;
+
return NVKM_MEM_TARGET_HOST;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 364fea320cb3..1c81e5b34d29 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2405,7 +2405,7 @@ nv162_chipset = {
.bus = { 0x00000001, gf100_bus_new },
.devinit = { 0x00000001, tu102_devinit_new },
.fault = { 0x00000001, tu102_fault_new },
- .fb = { 0x00000001, gv100_fb_new },
+ .fb = { 0x00000001, tu102_fb_new },
.fuse = { 0x00000001, gm107_fuse_new },
.gpio = { 0x00000001, gk104_gpio_new },
.gsp = { 0x00000001, gv100_gsp_new },
@@ -2440,7 +2440,7 @@ nv164_chipset = {
.bus = { 0x00000001, gf100_bus_new },
.devinit = { 0x00000001, tu102_devinit_new },
.fault = { 0x00000001, tu102_fault_new },
- .fb = { 0x00000001, gv100_fb_new },
+ .fb = { 0x00000001, tu102_fb_new },
.fuse = { 0x00000001, gm107_fuse_new },
.gpio = { 0x00000001, gk104_gpio_new },
.gsp = { 0x00000001, gv100_gsp_new },
@@ -2475,7 +2475,7 @@ nv166_chipset = {
.bus = { 0x00000001, gf100_bus_new },
.devinit = { 0x00000001, tu102_devinit_new },
.fault = { 0x00000001, tu102_fault_new },
- .fb = { 0x00000001, gv100_fb_new },
+ .fb = { 0x00000001, tu102_fb_new },
.fuse = { 0x00000001, gm107_fuse_new },
.gpio = { 0x00000001, gk104_gpio_new },
.gsp = { 0x00000001, gv100_gsp_new },
@@ -2510,7 +2510,7 @@ nv167_chipset = {
.bus = { 0x00000001, gf100_bus_new },
.devinit = { 0x00000001, tu102_devinit_new },
.fault = { 0x00000001, tu102_fault_new },
- .fb = { 0x00000001, gv100_fb_new },
+ .fb = { 0x00000001, tu102_fb_new },
.fuse = { 0x00000001, gm107_fuse_new },
.gpio = { 0x00000001, gk104_gpio_new },
.gsp = { 0x00000001, gv100_gsp_new },
@@ -2545,7 +2545,7 @@ nv168_chipset = {
.bus = { 0x00000001, gf100_bus_new },
.devinit = { 0x00000001, tu102_devinit_new },
.fault = { 0x00000001, tu102_fault_new },
- .fb = { 0x00000001, gv100_fb_new },
+ .fb = { 0x00000001, tu102_fb_new },
.fuse = { 0x00000001, gm107_fuse_new },
.gpio = { 0x00000001, gk104_gpio_new },
.gsp = { 0x00000001, gv100_gsp_new },
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c b/drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c
index 393ade9f7e6c..b7da3ab44c27 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c
@@ -48,6 +48,16 @@ gm200_flcn_pio_dmem_rd(struct nvkm_falcon *falcon, u8 port, const u8 *img, int l
img += 4;
len -= 4;
}
+
+ /* Sigh. Tegra PMU FW's init message... */
+ if (len) {
+ u32 data = nvkm_falcon_rd32(falcon, 0x1c4 + (port * 8));
+
+ while (len--) {
+ *(u8 *)img++ = data & 0xff;
+ data >>= 8;
+ }
+ }
}
static void
@@ -64,6 +74,8 @@ gm200_flcn_pio_dmem_wr(struct nvkm_falcon *falcon, u8 port, const u8 *img, int l
img += 4;
len -= 4;
}
+
+ WARN_ON(len);
}
static void
@@ -74,7 +86,7 @@ gm200_flcn_pio_dmem_wr_init(struct nvkm_falcon *falcon, u8 port, bool sec, u32 d
const struct nvkm_falcon_func_pio
gm200_flcn_dmem_pio = {
- .min = 4,
+ .min = 1,
.max = 0x100,
.wr_init = gm200_flcn_pio_dmem_wr_init,
.wr = gm200_flcn_pio_dmem_wr,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c
index 634f64f88fc8..81a1ad2c88a7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c
@@ -65,10 +65,33 @@ tu102_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
return ret;
}
+static int
+tu102_devinit_wait(struct nvkm_device *device)
+{
+ unsigned timeout = 50 + 2000;
+
+ do {
+ if (nvkm_rd32(device, 0x118128) & 0x00000001) {
+ if ((nvkm_rd32(device, 0x118234) & 0x000000ff) == 0xff)
+ return 0;
+ }
+
+ usleep_range(1000, 2000);
+ } while (timeout--);
+
+ return -ETIMEDOUT;
+}
+
int
tu102_devinit_post(struct nvkm_devinit *base, bool post)
{
struct nv50_devinit *init = nv50_devinit(base);
+ int ret;
+
+ ret = tu102_devinit_wait(init->base.subdev.device);
+ if (ret)
+ return ret;
+
gm200_devinit_preos(init, post);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
index 5d0bab8ecb43..6ba5120a2ebe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
@@ -32,6 +32,7 @@ nvkm-y += nvkm/subdev/fb/gp100.o
nvkm-y += nvkm/subdev/fb/gp102.o
nvkm-y += nvkm/subdev/fb/gp10b.o
nvkm-y += nvkm/subdev/fb/gv100.o
+nvkm-y += nvkm/subdev/fb/tu102.o
nvkm-y += nvkm/subdev/fb/ga100.o
nvkm-y += nvkm/subdev/fb/ga102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c
index 8b7c8ea5e8a5..5a21b0ae4595 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c
@@ -40,12 +40,6 @@ ga102_fb_vpr_scrub(struct nvkm_fb *fb)
return ret;
}
-static bool
-ga102_fb_vpr_scrub_required(struct nvkm_fb *fb)
-{
- return (nvkm_rd32(fb->subdev.device, 0x1fa80c) & 0x00000010) != 0;
-}
-
static const struct nvkm_fb_func
ga102_fb = {
.dtor = gf100_fb_dtor,
@@ -56,7 +50,7 @@ ga102_fb = {
.sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
.ram_new = ga102_ram_new,
.default_bigpage = 16,
- .vpr.scrub_required = ga102_fb_vpr_scrub_required,
+ .vpr.scrub_required = tu102_fb_vpr_scrub_required,
.vpr.scrub = ga102_fb_vpr_scrub,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
index 1f0126437c1a..0e3c0a8f5d71 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
@@ -49,8 +49,3 @@ gv100_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, s
}
MODULE_FIRMWARE("nvidia/gv100/nvdec/scrubber.bin");
-MODULE_FIRMWARE("nvidia/tu102/nvdec/scrubber.bin");
-MODULE_FIRMWARE("nvidia/tu104/nvdec/scrubber.bin");
-MODULE_FIRMWARE("nvidia/tu106/nvdec/scrubber.bin");
-MODULE_FIRMWARE("nvidia/tu116/nvdec/scrubber.bin");
-MODULE_FIRMWARE("nvidia/tu117/nvdec/scrubber.bin");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
index ac03eac0f261..f517751f94ac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
@@ -89,4 +89,6 @@ bool gp102_fb_vpr_scrub_required(struct nvkm_fb *);
int gp102_fb_vpr_scrub(struct nvkm_fb *);
int gv100_fb_init_page(struct nvkm_fb *);
+
+bool tu102_fb_vpr_scrub_required(struct nvkm_fb *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/tu102.c
new file mode 100644
index 000000000000..be82af0364ee
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/tu102.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gf100.h"
+#include "ram.h"
+
+bool
+tu102_fb_vpr_scrub_required(struct nvkm_fb *fb)
+{
+ return (nvkm_rd32(fb->subdev.device, 0x1fa80c) & 0x00000010) != 0;
+}
+
+static const struct nvkm_fb_func
+tu102_fb = {
+ .dtor = gf100_fb_dtor,
+ .oneinit = gf100_fb_oneinit,
+ .init = gm200_fb_init,
+ .init_page = gv100_fb_init_page,
+ .init_unkn = gp100_fb_init_unkn,
+ .sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
+ .vpr.scrub_required = tu102_fb_vpr_scrub_required,
+ .vpr.scrub = gp102_fb_vpr_scrub,
+ .ram_new = gp100_ram_new,
+ .default_bigpage = 16,
+};
+
+int
+tu102_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
+{
+ return gp102_fb_new_(&tu102_fb, device, type, inst, pfb);
+}
+
+MODULE_FIRMWARE("nvidia/tu102/nvdec/scrubber.bin");
+MODULE_FIRMWARE("nvidia/tu104/nvdec/scrubber.bin");
+MODULE_FIRMWARE("nvidia/tu106/nvdec/scrubber.bin");
+MODULE_FIRMWARE("nvidia/tu116/nvdec/scrubber.bin");
+MODULE_FIRMWARE("nvidia/tu117/nvdec/scrubber.bin");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
index a72403777329..2ed04da3621d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
@@ -225,7 +225,7 @@ gm20b_pmu_init(struct nvkm_pmu *pmu)
pmu->initmsg_received = false;
- nvkm_falcon_load_dmem(falcon, &args, addr_args, sizeof(args), 0);
+ nvkm_falcon_pio_wr(falcon, (u8 *)&args, 0, 0, DMEM, addr_args, sizeof(args), 0, false);
nvkm_falcon_start(falcon);
return 0;
}
diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
index 857a2f0420d7..c924f1124ebc 100644
--- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
@@ -1193,14 +1193,11 @@ static int boe_panel_enter_sleep_mode(struct boe_panel *boe)
return 0;
}
-static int boe_panel_unprepare(struct drm_panel *panel)
+static int boe_panel_disable(struct drm_panel *panel)
{
struct boe_panel *boe = to_boe_panel(panel);
int ret;
- if (!boe->prepared)
- return 0;
-
ret = boe_panel_enter_sleep_mode(boe);
if (ret < 0) {
dev_err(panel->dev, "failed to set panel off: %d\n", ret);
@@ -1209,6 +1206,16 @@ static int boe_panel_unprepare(struct drm_panel *panel)
msleep(150);
+ return 0;
+}
+
+static int boe_panel_unprepare(struct drm_panel *panel)
+{
+ struct boe_panel *boe = to_boe_panel(panel);
+
+ if (!boe->prepared)
+ return 0;
+
if (boe->desc->discharge_on_disable) {
regulator_disable(boe->avee);
regulator_disable(boe->avdd);
@@ -1528,6 +1535,7 @@ static enum drm_panel_orientation boe_panel_get_orientation(struct drm_panel *pa
}
static const struct drm_panel_funcs boe_panel_funcs = {
+ .disable = boe_panel_disable,
.unprepare = boe_panel_unprepare,
.prepare = boe_panel_prepare,
.enable = boe_panel_enable,
diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c
index 53464afc2b9a..91f69e62430b 100644
--- a/drivers/gpu/drm/solomon/ssd130x.c
+++ b/drivers/gpu/drm/solomon/ssd130x.c
@@ -656,18 +656,8 @@ static const struct drm_crtc_helper_funcs ssd130x_crtc_helper_funcs = {
.atomic_check = drm_crtc_helper_atomic_check,
};
-static void ssd130x_crtc_reset(struct drm_crtc *crtc)
-{
- struct drm_device *drm = crtc->dev;
- struct ssd130x_device *ssd130x = drm_to_ssd130x(drm);
-
- ssd130x_init(ssd130x);
-
- drm_atomic_helper_crtc_reset(crtc);
-}
-
static const struct drm_crtc_funcs ssd130x_crtc_funcs = {
- .reset = ssd130x_crtc_reset,
+ .reset = drm_atomic_helper_crtc_reset,
.destroy = drm_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
@@ -686,6 +676,12 @@ static void ssd130x_encoder_helper_atomic_enable(struct drm_encoder *encoder,
if (ret)
return;
+ ret = ssd130x_init(ssd130x);
+ if (ret) {
+ ssd130x_power_off(ssd130x);
+ return;
+ }
+
ssd130x_write_cmd(ssd130x, 1, SSD130X_DISPLAY_ON);
backlight_enable(ssd130x->bl_dev);
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 0108613e79d5..7258975331ca 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -711,7 +711,7 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
if (vc4_encoder->type == VC4_ENCODER_TYPE_HDMI0) {
- vc4_state->hvs_load = max(mode->clock * mode->hdisplay / mode->htotal + 1000,
+ vc4_state->hvs_load = max(mode->clock * mode->hdisplay / mode->htotal + 8000,
mode->clock * 9 / 10) * 1000;
} else {
vc4_state->hvs_load = mode->clock * 1000;
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 12a00d644b61..7546103f1499 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -97,6 +97,10 @@
#define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1_SHIFT 8
#define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1_MASK VC4_MASK(15, 8)
+#define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_0_MASK VC4_MASK(7, 0)
+#define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_0_SET_AVMUTE BIT(0)
+#define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_0_CLEAR_AVMUTE BIT(4)
+
# define VC4_HD_M_SW_RST BIT(2)
# define VC4_HD_M_ENABLE BIT(0)
@@ -1306,7 +1310,6 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
VC4_HDMI_VERTB_VBP));
unsigned long flags;
unsigned char gcp;
- bool gcp_en;
u32 reg;
int idx;
@@ -1341,16 +1344,13 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
switch (vc4_state->output_bpc) {
case 12:
gcp = 6;
- gcp_en = true;
break;
case 10:
gcp = 5;
- gcp_en = true;
break;
case 8:
default:
- gcp = 4;
- gcp_en = false;
+ gcp = 0;
break;
}
@@ -1359,8 +1359,7 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
* doesn't signal in GCP.
*/
if (vc4_state->output_format == VC4_HDMI_OUTPUT_YUV422) {
- gcp = 4;
- gcp_en = false;
+ gcp = 0;
}
reg = HDMI_READ(HDMI_DEEP_COLOR_CONFIG_1);
@@ -1373,11 +1372,12 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
reg = HDMI_READ(HDMI_GCP_WORD_1);
reg &= ~VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1_MASK;
reg |= VC4_SET_FIELD(gcp, VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1);
+ reg &= ~VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_0_MASK;
+ reg |= VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_0_CLEAR_AVMUTE;
HDMI_WRITE(HDMI_GCP_WORD_1, reg);
reg = HDMI_READ(HDMI_GCP_CONFIG);
- reg &= ~VC5_HDMI_GCP_CONFIG_GCP_ENABLE;
- reg |= gcp_en ? VC5_HDMI_GCP_CONFIG_GCP_ENABLE : 0;
+ reg |= VC5_HDMI_GCP_CONFIG_GCP_ENABLE;
HDMI_WRITE(HDMI_GCP_CONFIG, reg);
reg = HDMI_READ(HDMI_MISC_CONTROL);
@@ -3018,7 +3018,8 @@ static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
}
vc4_hdmi->cec_adap = cec_allocate_adapter(&vc4_hdmi_cec_adap_ops,
- vc4_hdmi, "vc4",
+ vc4_hdmi,
+ vc4_hdmi->variant->card_name,
CEC_CAP_DEFAULTS |
CEC_CAP_CONNECTOR_INFO, 1);
ret = PTR_ERR_OR_ZERO(vc4_hdmi->cec_adap);
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 8b92a45a3c89..bd5acc4a8687 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -340,7 +340,7 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
{
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
struct drm_framebuffer *fb = state->fb;
- struct drm_gem_dma_object *bo = drm_fb_dma_get_gem_obj(fb, 0);
+ struct drm_gem_dma_object *bo;
int num_planes = fb->format->num_planes;
struct drm_crtc_state *crtc_state;
u32 h_subsample = fb->format->hsub;
@@ -359,8 +359,10 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
if (ret)
return ret;
- for (i = 0; i < num_planes; i++)
+ for (i = 0; i < num_planes; i++) {
+ bo = drm_fb_dma_get_gem_obj(fb, i);
vc4_state->offsets[i] = bo->dma_addr + fb->offsets[i];
+ }
/*
* We don't support subpixel source positioning for scaling,
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 9f4a90493aea..da45215a933d 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -126,7 +126,6 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
void __user *user_bo_handles = NULL;
struct virtio_gpu_object_array *buflist = NULL;
struct sync_file *sync_file;
- int in_fence_fd = exbuf->fence_fd;
int out_fence_fd = -1;
void *buf;
uint64_t fence_ctx;
@@ -152,13 +151,11 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
ring_idx = exbuf->ring_idx;
}
- exbuf->fence_fd = -1;
-
virtio_gpu_create_context(dev, file);
if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) {
struct dma_fence *in_fence;
- in_fence = sync_file_get_fence(in_fence_fd);
+ in_fence = sync_file_get_fence(exbuf->fence_fd);
if (!in_fence)
return -EINVAL;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index aa1cd5126a32..4dcf2eb7aa80 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -462,6 +462,9 @@ int vmw_bo_create(struct vmw_private *vmw,
return -ENOMEM;
}
+ /*
+ * vmw_bo_init will delete the *p_bo object if it fails
+ */
ret = vmw_bo_init(vmw, *p_bo, size,
placement, interruptible, pin,
bo_free);
@@ -470,7 +473,6 @@ int vmw_bo_create(struct vmw_private *vmw,
return ret;
out_error:
- kfree(*p_bo);
*p_bo = NULL;
return ret;
}
@@ -596,6 +598,7 @@ static int vmw_user_bo_synccpu_release(struct drm_file *filp,
ttm_bo_put(&vmw_bo->base);
}
+ drm_gem_object_put(&vmw_bo->base.base);
return ret;
}
@@ -636,6 +639,7 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
ret = vmw_user_bo_synccpu_grab(vbo, arg->flags);
vmw_bo_unreference(&vbo);
+ drm_gem_object_put(&vbo->base.base);
if (unlikely(ret != 0)) {
if (ret == -ERESTARTSYS || ret == -EBUSY)
return -EBUSY;
@@ -693,7 +697,7 @@ int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
* struct vmw_buffer_object should be placed.
* Return: Zero on success, Negative error code on error.
*
- * The vmw buffer object pointer will be refcounted.
+ * The vmw buffer object pointer will be refcounted (both ttm and gem)
*/
int vmw_user_bo_lookup(struct drm_file *filp,
uint32_t handle,
@@ -710,7 +714,6 @@ int vmw_user_bo_lookup(struct drm_file *filp,
*out = gem_to_vmw_bo(gobj);
ttm_bo_get(&(*out)->base);
- drm_gem_object_put(gobj);
return 0;
}
@@ -791,7 +794,8 @@ int vmw_dumb_create(struct drm_file *file_priv,
ret = vmw_gem_object_create_with_handle(dev_priv, file_priv,
args->size, &args->handle,
&vbo);
-
+ /* drop reference from allocate - handle holds it now */
+ drm_gem_object_put(&vbo->base.base);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index a44d53e33cdb..c0686283ffd1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1160,6 +1160,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
}
ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
ttm_bo_put(&vmw_bo->base);
+ drm_gem_object_put(&vmw_bo->base.base);
if (unlikely(ret != 0))
return ret;
@@ -1214,6 +1215,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
}
ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
ttm_bo_put(&vmw_bo->base);
+ drm_gem_object_put(&vmw_bo->base.base);
if (unlikely(ret != 0))
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
index ce609e7d758f..4d2c28e39f4e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
@@ -146,14 +146,12 @@ int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
&vmw_sys_placement :
&vmw_vram_sys_placement,
true, false, &vmw_gem_destroy, p_vbo);
-
- (*p_vbo)->base.base.funcs = &vmw_gem_object_funcs;
if (ret != 0)
goto out_no_bo;
+ (*p_vbo)->base.base.funcs = &vmw_gem_object_funcs;
+
ret = drm_gem_handle_create(filp, &(*p_vbo)->base.base, handle);
- /* drop reference from allocate - handle holds it now */
- drm_gem_object_put(&(*p_vbo)->base.base);
out_no_bo:
return ret;
}
@@ -180,6 +178,8 @@ int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data,
rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node);
rep->cur_gmr_id = handle;
rep->cur_gmr_offset = 0;
+ /* drop reference from allocate - handle holds it now */
+ drm_gem_object_put(&vbo->base.base);
out_no_bo:
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 257f090071f1..445d619e1fdc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1815,8 +1815,10 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
err_out:
/* vmw_user_lookup_handle takes one ref so does new_fb */
- if (bo)
+ if (bo) {
vmw_bo_unreference(&bo);
+ drm_gem_object_put(&bo->base.base);
+ }
if (surface)
vmw_surface_unreference(&surface);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg_arm64.h b/drivers/gpu/drm/vmwgfx/vmwgfx_msg_arm64.h
index 4f40167ad61f..4f40167ad61f 100755..100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg_arm64.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg_arm64.h
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index e9f5c89b4ca6..b5b311f2a91a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -458,6 +458,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);
vmw_bo_unreference(&buf);
+ drm_gem_object_put(&buf->base.base);
out_unlock:
mutex_unlock(&overlay->mutex);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index 108a496b5d18..51e83dfa1cac 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -807,6 +807,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
num_output_sig, tfile, shader_handle);
out_bad_arg:
vmw_bo_unreference(&buffer);
+ drm_gem_object_put(&buffer->base.base);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 3bc63ae768f3..dcfb003841b3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -683,7 +683,7 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
container_of(base, struct vmw_user_surface, prime.base);
struct vmw_resource *res = &user_srf->srf.res;
- if (base->shareable && res && res->backup)
+ if (res && res->backup)
drm_gem_object_put(&res->backup->base.base);
*p_base = NULL;
@@ -864,7 +864,11 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
goto out_unlock;
}
vmw_bo_reference(res->backup);
- drm_gem_object_get(&res->backup->base.base);
+ /*
+ * We don't expose the handle to the userspace and surface
+ * already holds a gem reference
+ */
+ drm_gem_handle_delete(file_priv, backup_handle);
}
tmp = vmw_resource_reference(&srf->res);
@@ -1568,8 +1572,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
drm_vma_node_offset_addr(&res->backup->base.base.vma_node);
rep->buffer_size = res->backup->base.base.size;
rep->buffer_handle = backup_handle;
- if (user_srf->prime.base.shareable)
- drm_gem_object_get(&res->backup->base.base);
} else {
rep->buffer_map_handle = 0;
rep->buffer_size = 0;
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_client.c b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
index 1fb0f7105fb2..c751d12f5df8 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_client.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
@@ -227,6 +227,7 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
cl_data->num_hid_devices = amd_mp2_get_sensor_num(privdata, &cl_data->sensor_idx[0]);
if (cl_data->num_hid_devices == 0)
return -ENODEV;
+ cl_data->is_any_sensor_enabled = false;
INIT_DELAYED_WORK(&cl_data->work, amd_sfh_work);
INIT_DELAYED_WORK(&cl_data->work_buffer, amd_sfh_work_buffer);
@@ -287,6 +288,7 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
status = amd_sfh_wait_for_response
(privdata, cl_data->sensor_idx[i], SENSOR_ENABLED);
if (status == SENSOR_ENABLED) {
+ cl_data->is_any_sensor_enabled = true;
cl_data->sensor_sts[i] = SENSOR_ENABLED;
rc = amdtp_hid_probe(cl_data->cur_hid_dev, cl_data);
if (rc) {
@@ -301,19 +303,26 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
cl_data->sensor_sts[i]);
goto cleanup;
}
+ } else {
+ cl_data->sensor_sts[i] = SENSOR_DISABLED;
+ dev_dbg(dev, "sid 0x%x (%s) status 0x%x\n",
+ cl_data->sensor_idx[i],
+ get_sensor_name(cl_data->sensor_idx[i]),
+ cl_data->sensor_sts[i]);
}
dev_dbg(dev, "sid 0x%x (%s) status 0x%x\n",
cl_data->sensor_idx[i], get_sensor_name(cl_data->sensor_idx[i]),
cl_data->sensor_sts[i]);
}
- if (mp2_ops->discovery_status && mp2_ops->discovery_status(privdata) == 0) {
+ if (!cl_data->is_any_sensor_enabled ||
+ (mp2_ops->discovery_status && mp2_ops->discovery_status(privdata) == 0)) {
amd_sfh_hid_client_deinit(privdata);
for (i = 0; i < cl_data->num_hid_devices; i++) {
devm_kfree(dev, cl_data->feature_report[i]);
devm_kfree(dev, in_data->input_report[i]);
devm_kfree(dev, cl_data->report_descr[i]);
}
- dev_warn(dev, "Failed to discover, sensors not enabled\n");
+ dev_warn(dev, "Failed to discover, sensors not enabled is %d\n", cl_data->is_any_sensor_enabled);
return -EOPNOTSUPP;
}
schedule_delayed_work(&cl_data->work_buffer, msecs_to_jiffies(AMD_SFH_IDLE_LOOP));
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_hid.h b/drivers/hid/amd-sfh-hid/amd_sfh_hid.h
index 3754fb423e3a..528036892c9d 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_hid.h
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_hid.h
@@ -32,6 +32,7 @@ struct amd_input_data {
struct amdtp_cl_data {
u8 init_done;
u32 cur_hid_dev;
+ bool is_any_sensor_enabled;
u32 hid_dev_count;
u32 num_hid_devices;
struct device_info *hid_devices;
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 3e1803592bd4..5c72aef3d3dd 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1202,6 +1202,7 @@ int hid_open_report(struct hid_device *device)
__u8 *end;
__u8 *next;
int ret;
+ int i;
static int (*dispatch_type[])(struct hid_parser *parser,
struct hid_item *item) = {
hid_parser_main,
@@ -1252,6 +1253,8 @@ int hid_open_report(struct hid_device *device)
goto err;
}
device->collection_size = HID_DEFAULT_NUM_COLLECTIONS;
+ for (i = 0; i < HID_DEFAULT_NUM_COLLECTIONS; i++)
+ device->collection[i].parent_idx = -1;
ret = -EINVAL;
while ((next = fetch_item(start, end, &item)) != NULL) {
diff --git a/drivers/hid/hid-elecom.c b/drivers/hid/hid-elecom.c
index e59e9911fc37..4fa45ee77503 100644
--- a/drivers/hid/hid-elecom.c
+++ b/drivers/hid/hid-elecom.c
@@ -12,6 +12,7 @@
* Copyright (c) 2017 Alex Manoussakis <amanou@gnu.org>
* Copyright (c) 2017 Tomasz Kramkowski <tk@the-tk.com>
* Copyright (c) 2020 YOSHIOKA Takuma <lo48576@hard-wi.red>
+ * Copyright (c) 2022 Takahiro Fujii <fujii@xaxxi.net>
*/
/*
@@ -89,7 +90,7 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
case USB_DEVICE_ID_ELECOM_M_DT1URBK:
case USB_DEVICE_ID_ELECOM_M_DT1DRBK:
case USB_DEVICE_ID_ELECOM_M_HT1URBK:
- case USB_DEVICE_ID_ELECOM_M_HT1DRBK:
+ case USB_DEVICE_ID_ELECOM_M_HT1DRBK_010D:
/*
* Report descriptor format:
* 12: button bit count
@@ -99,6 +100,16 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
*/
mouse_button_fixup(hdev, rdesc, *rsize, 12, 30, 14, 20, 8);
break;
+ case USB_DEVICE_ID_ELECOM_M_HT1DRBK_011C:
+ /*
+ * Report descriptor format:
+ * 22: button bit count
+ * 30: padding bit count
+ * 24: button report size
+ * 16: button usage maximum
+ */
+ mouse_button_fixup(hdev, rdesc, *rsize, 22, 30, 24, 16, 8);
+ break;
}
return rdesc;
}
@@ -112,7 +123,8 @@ static const struct hid_device_id elecom_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT1URBK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT1DRBK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1URBK) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1DRBK) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1DRBK_010D) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1DRBK_011C) },
{ }
};
MODULE_DEVICE_TABLE(hid, elecom_devices);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 0f8c11842a3a..9e36b4cd905e 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -413,6 +413,8 @@
#define I2C_DEVICE_ID_HP_ENVY_X360_15T_DR100 0x29CF
#define I2C_DEVICE_ID_HP_ENVY_X360_EU0009NV 0x2CF9
#define I2C_DEVICE_ID_HP_SPECTRE_X360_15 0x2817
+#define I2C_DEVICE_ID_HP_SPECTRE_X360_13_AW0020NG 0x29DF
+#define I2C_DEVICE_ID_ASUS_TP420IA_TOUCHSCREEN 0x2BC8
#define USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN 0x2544
#define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN 0x2706
#define I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN 0x261A
@@ -428,7 +430,8 @@
#define USB_DEVICE_ID_ELECOM_M_DT1URBK 0x00fe
#define USB_DEVICE_ID_ELECOM_M_DT1DRBK 0x00ff
#define USB_DEVICE_ID_ELECOM_M_HT1URBK 0x010c
-#define USB_DEVICE_ID_ELECOM_M_HT1DRBK 0x010d
+#define USB_DEVICE_ID_ELECOM_M_HT1DRBK_010D 0x010d
+#define USB_DEVICE_ID_ELECOM_M_HT1DRBK_011C 0x011c
#define USB_VENDOR_ID_DREAM_CHEEKY 0x1d34
#define USB_DEVICE_ID_DREAM_CHEEKY_WN 0x0004
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 9b59e436df0a..77c8c49852b5 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -370,6 +370,8 @@ static const struct hid_device_id hid_battery_quirks[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_DINOVO_EDGE_KBD),
HID_BATTERY_QUIRK_IGNORE },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_ASUS_TP420IA_TOUCHSCREEN),
+ HID_BATTERY_QUIRK_IGNORE },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN),
HID_BATTERY_QUIRK_IGNORE },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN),
@@ -384,6 +386,8 @@ static const struct hid_device_id hid_battery_quirks[] = {
HID_BATTERY_QUIRK_IGNORE },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_15),
HID_BATTERY_QUIRK_IGNORE },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_13_AW0020NG),
+ HID_BATTERY_QUIRK_IGNORE },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN),
HID_BATTERY_QUIRK_IGNORE },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_SURFACE_GO2_TOUCHSCREEN),
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index abf2c95e4d0b..9c1ee8e91e0c 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -3978,7 +3978,8 @@ static void hidpp_connect_event(struct hidpp_device *hidpp)
}
hidpp_initialize_battery(hidpp);
- hidpp_initialize_hires_scroll(hidpp);
+ if (!hid_is_usb(hidpp->hid_dev))
+ hidpp_initialize_hires_scroll(hidpp);
/* forward current battery state */
if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP10_BATTERY) {
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index be3ad02573de..5bc91f68b374 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -393,7 +393,8 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT1URBK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT1DRBK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1URBK) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1DRBK) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1DRBK_010D) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1DRBK_011C) },
#endif
#if IS_ENABLED(CONFIG_HID_ELO)
{ HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) },
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index cbe43e2567a7..64ac5bdee3a6 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -1963,7 +1963,7 @@ static void hv_balloon_debugfs_init(struct hv_dynmem_device *b)
static void hv_balloon_debugfs_exit(struct hv_dynmem_device *b)
{
- debugfs_remove(debugfs_lookup("hv-balloon", NULL));
+ debugfs_lookup_and_remove("hv-balloon", NULL);
}
#else
diff --git a/drivers/i2c/busses/i2c-axxia.c b/drivers/i2c/busses/i2c-axxia.c
index bdf3b50de8ad..c1c74ce08407 100644
--- a/drivers/i2c/busses/i2c-axxia.c
+++ b/drivers/i2c/busses/i2c-axxia.c
@@ -118,7 +118,7 @@
#define SDA_HOLD_TIME 0x90
/**
- * axxia_i2c_dev - I2C device context
+ * struct axxia_i2c_dev - I2C device context
* @base: pointer to register struct
* @msg: pointer to current message
* @msg_r: pointer to current read message (sequence transfer)
diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c
index a3240ece55b2..581e02cc979a 100644
--- a/drivers/i2c/busses/i2c-designware-common.c
+++ b/drivers/i2c/busses/i2c-designware-common.c
@@ -351,7 +351,8 @@ u32 i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset)
*
* If your hardware is free from tHD;STA issue, try this one.
*/
- return DIV_ROUND_CLOSEST(ic_clk * tSYMBOL, MICRO) - 8 + offset;
+ return DIV_ROUND_CLOSEST_ULL((u64)ic_clk * tSYMBOL, MICRO) -
+ 8 + offset;
else
/*
* Conditional expression:
@@ -367,7 +368,8 @@ u32 i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset)
* The reason why we need to take into account "tf" here,
* is the same as described in i2c_dw_scl_lcnt().
*/
- return DIV_ROUND_CLOSEST(ic_clk * (tSYMBOL + tf), MICRO) - 3 + offset;
+ return DIV_ROUND_CLOSEST_ULL((u64)ic_clk * (tSYMBOL + tf), MICRO) -
+ 3 + offset;
}
u32 i2c_dw_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset)
@@ -383,7 +385,8 @@ u32 i2c_dw_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset)
* account the fall time of SCL signal (tf). Default tf value
* should be 0.3 us, for safety.
*/
- return DIV_ROUND_CLOSEST(ic_clk * (tLOW + tf), MICRO) - 1 + offset;
+ return DIV_ROUND_CLOSEST_ULL((u64)ic_clk * (tLOW + tf), MICRO) -
+ 1 + offset;
}
int i2c_dw_set_sda_hold(struct dw_i2c_dev *dev)
diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
index e499f96506c5..782fe1ef3ca1 100644
--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
+++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
@@ -396,6 +396,8 @@ static const struct pci_device_id i2_designware_pci_ids[] = {
{ PCI_VDEVICE(ATI, 0x73a4), navi_amd },
{ PCI_VDEVICE(ATI, 0x73e4), navi_amd },
{ PCI_VDEVICE(ATI, 0x73c4), navi_amd },
+ { PCI_VDEVICE(ATI, 0x7444), navi_amd },
+ { PCI_VDEVICE(ATI, 0x7464), navi_amd },
{ 0,}
};
MODULE_DEVICE_TABLE(pci, i2_designware_pci_ids);
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index ba043b547393..74182db03a88 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -351,13 +351,11 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
if (dev->flags & ACCESS_NO_IRQ_SUSPEND) {
dev_pm_set_driver_flags(&pdev->dev,
- DPM_FLAG_SMART_PREPARE |
- DPM_FLAG_MAY_SKIP_RESUME);
+ DPM_FLAG_SMART_PREPARE);
} else {
dev_pm_set_driver_flags(&pdev->dev,
DPM_FLAG_SMART_PREPARE |
- DPM_FLAG_SMART_SUSPEND |
- DPM_FLAG_MAY_SKIP_RESUME);
+ DPM_FLAG_SMART_SUSPEND);
}
device_enable_async_suspend(&pdev->dev);
@@ -419,21 +417,8 @@ static int dw_i2c_plat_prepare(struct device *dev)
*/
return !has_acpi_companion(dev);
}
-
-static void dw_i2c_plat_complete(struct device *dev)
-{
- /*
- * The device can only be in runtime suspend at this point if it has not
- * been resumed throughout the ending system suspend/resume cycle, so if
- * the platform firmware might mess up with it, request the runtime PM
- * framework to resume it.
- */
- if (pm_runtime_suspended(dev) && pm_resume_via_firmware())
- pm_request_resume(dev);
-}
#else
#define dw_i2c_plat_prepare NULL
-#define dw_i2c_plat_complete NULL
#endif
#ifdef CONFIG_PM
@@ -483,7 +468,6 @@ static int __maybe_unused dw_i2c_plat_resume(struct device *dev)
static const struct dev_pm_ops dw_i2c_dev_pm_ops = {
.prepare = dw_i2c_plat_prepare,
- .complete = dw_i2c_plat_complete,
SET_LATE_SYSTEM_SLEEP_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume)
SET_RUNTIME_PM_OPS(dw_i2c_plat_runtime_suspend, dw_i2c_plat_runtime_resume, NULL)
};
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index 5af5cffc444e..d113bed79545 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -826,8 +826,8 @@ static int mxs_i2c_probe(struct platform_device *pdev)
/* Setup the DMA */
i2c->dmach = dma_request_chan(dev, "rx-tx");
if (IS_ERR(i2c->dmach)) {
- dev_err(dev, "Failed to request dma\n");
- return PTR_ERR(i2c->dmach);
+ return dev_err_probe(dev, PTR_ERR(i2c->dmach),
+ "Failed to request dma\n");
}
platform_set_drvdata(pdev, i2c);
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index d1658ed76562..b31cf4f18f85 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -80,7 +80,7 @@ enum {
#define DEFAULT_SCL_RATE (100 * 1000) /* Hz */
/**
- * struct i2c_spec_values:
+ * struct i2c_spec_values - I2C specification values for various modes
* @min_hold_start_ns: min hold time (repeated) START condition
* @min_low_ns: min LOW period of the SCL clock
* @min_high_ns: min HIGH period of the SCL cloc
@@ -136,7 +136,7 @@ static const struct i2c_spec_values fast_mode_plus_spec = {
};
/**
- * struct rk3x_i2c_calced_timings:
+ * struct rk3x_i2c_calced_timings - calculated V1 timings
* @div_low: Divider output for low
* @div_high: Divider output for high
* @tuning: Used to adjust setup/hold data time,
@@ -159,7 +159,7 @@ enum rk3x_i2c_state {
};
/**
- * struct rk3x_i2c_soc_data:
+ * struct rk3x_i2c_soc_data - SOC-specific data
* @grf_offset: offset inside the grf regmap for setting the i2c type
* @calc_timings: Callback function for i2c timing information calculated
*/
@@ -239,7 +239,8 @@ static inline void rk3x_i2c_clean_ipd(struct rk3x_i2c *i2c)
}
/**
- * Generate a START condition, which triggers a REG_INT_START interrupt.
+ * rk3x_i2c_start - Generate a START condition, which triggers a REG_INT_START interrupt.
+ * @i2c: target controller data
*/
static void rk3x_i2c_start(struct rk3x_i2c *i2c)
{
@@ -258,8 +259,8 @@ static void rk3x_i2c_start(struct rk3x_i2c *i2c)
}
/**
- * Generate a STOP condition, which triggers a REG_INT_STOP interrupt.
- *
+ * rk3x_i2c_stop - Generate a STOP condition, which triggers a REG_INT_STOP interrupt.
+ * @i2c: target controller data
* @error: Error code to return in rk3x_i2c_xfer
*/
static void rk3x_i2c_stop(struct rk3x_i2c *i2c, int error)
@@ -298,7 +299,8 @@ static void rk3x_i2c_stop(struct rk3x_i2c *i2c, int error)
}
/**
- * Setup a read according to i2c->msg
+ * rk3x_i2c_prepare_read - Setup a read according to i2c->msg
+ * @i2c: target controller data
*/
static void rk3x_i2c_prepare_read(struct rk3x_i2c *i2c)
{
@@ -329,7 +331,8 @@ static void rk3x_i2c_prepare_read(struct rk3x_i2c *i2c)
}
/**
- * Fill the transmit buffer with data from i2c->msg
+ * rk3x_i2c_fill_transmit_buf - Fill the transmit buffer with data from i2c->msg
+ * @i2c: target controller data
*/
static void rk3x_i2c_fill_transmit_buf(struct rk3x_i2c *i2c)
{
@@ -532,11 +535,10 @@ out:
}
/**
- * Get timing values of I2C specification
- *
+ * rk3x_i2c_get_spec - Get timing values of I2C specification
* @speed: Desired SCL frequency
*
- * Returns: Matched i2c spec values.
+ * Return: Matched i2c_spec_values.
*/
static const struct i2c_spec_values *rk3x_i2c_get_spec(unsigned int speed)
{
@@ -549,13 +551,12 @@ static const struct i2c_spec_values *rk3x_i2c_get_spec(unsigned int speed)
}
/**
- * Calculate divider values for desired SCL frequency
- *
+ * rk3x_i2c_v0_calc_timings - Calculate divider values for desired SCL frequency
* @clk_rate: I2C input clock rate
* @t: Known I2C timing information
* @t_calc: Caculated rk3x private timings that would be written into regs
*
- * Returns: 0 on success, -EINVAL if the goal SCL rate is too slow. In that case
+ * Return: %0 on success, -%EINVAL if the goal SCL rate is too slow. In that case
* a best-effort divider value is returned in divs. If the target rate is
* too high, we silently use the highest possible rate.
*/
@@ -710,13 +711,12 @@ static int rk3x_i2c_v0_calc_timings(unsigned long clk_rate,
}
/**
- * Calculate timing values for desired SCL frequency
- *
+ * rk3x_i2c_v1_calc_timings - Calculate timing values for desired SCL frequency
* @clk_rate: I2C input clock rate
* @t: Known I2C timing information
* @t_calc: Caculated rk3x private timings that would be written into regs
*
- * Returns: 0 on success, -EINVAL if the goal SCL rate is too slow. In that case
+ * Return: %0 on success, -%EINVAL if the goal SCL rate is too slow. In that case
* a best-effort divider value is returned in divs. If the target rate is
* too high, we silently use the highest possible rate.
* The following formulas are v1's method to calculate timings.
@@ -960,14 +960,14 @@ static int rk3x_i2c_clk_notifier_cb(struct notifier_block *nb, unsigned long
}
/**
- * Setup I2C registers for an I2C operation specified by msgs, num.
- *
- * Must be called with i2c->lock held.
- *
+ * rk3x_i2c_setup - Setup I2C registers for an I2C operation specified by msgs, num.
+ * @i2c: target controller data
* @msgs: I2C msgs to process
* @num: Number of msgs
*
- * returns: Number of I2C msgs processed or negative in case of error
+ * Must be called with i2c->lock held.
+ *
+ * Return: Number of I2C msgs processed or negative in case of error
*/
static int rk3x_i2c_setup(struct rk3x_i2c *i2c, struct i2c_msg *msgs, int num)
{
diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c
index a2def6f9380a..5eac7ea19993 100644
--- a/drivers/iio/accel/hid-sensor-accel-3d.c
+++ b/drivers/iio/accel/hid-sensor-accel-3d.c
@@ -280,6 +280,7 @@ static int accel_3d_capture_sample(struct hid_sensor_hub_device *hsdev,
hid_sensor_convert_timestamp(
&accel_state->common_attributes,
*(int64_t *)raw_data);
+ ret = 0;
break;
default:
break;
diff --git a/drivers/iio/adc/berlin2-adc.c b/drivers/iio/adc/berlin2-adc.c
index 3d2e8b4db61a..a4e7c7eff5ac 100644
--- a/drivers/iio/adc/berlin2-adc.c
+++ b/drivers/iio/adc/berlin2-adc.c
@@ -298,8 +298,10 @@ static int berlin2_adc_probe(struct platform_device *pdev)
int ret;
indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*priv));
- if (!indio_dev)
+ if (!indio_dev) {
+ of_node_put(parent_np);
return -ENOMEM;
+ }
priv = iio_priv(indio_dev);
diff --git a/drivers/iio/adc/imx8qxp-adc.c b/drivers/iio/adc/imx8qxp-adc.c
index 36777b827165..f5a0fc9e64c5 100644
--- a/drivers/iio/adc/imx8qxp-adc.c
+++ b/drivers/iio/adc/imx8qxp-adc.c
@@ -86,6 +86,8 @@
#define IMX8QXP_ADC_TIMEOUT msecs_to_jiffies(100)
+#define IMX8QXP_ADC_MAX_FIFO_SIZE 16
+
struct imx8qxp_adc {
struct device *dev;
void __iomem *regs;
@@ -95,6 +97,7 @@ struct imx8qxp_adc {
/* Serialise ADC channel reads */
struct mutex lock;
struct completion completion;
+ u32 fifo[IMX8QXP_ADC_MAX_FIFO_SIZE];
};
#define IMX8QXP_ADC_CHAN(_idx) { \
@@ -238,8 +241,7 @@ static int imx8qxp_adc_read_raw(struct iio_dev *indio_dev,
return ret;
}
- *val = FIELD_GET(IMX8QXP_ADC_RESFIFO_VAL_MASK,
- readl(adc->regs + IMX8QXP_ADR_ADC_RESFIFO));
+ *val = adc->fifo[0];
mutex_unlock(&adc->lock);
return IIO_VAL_INT;
@@ -265,10 +267,15 @@ static irqreturn_t imx8qxp_adc_isr(int irq, void *dev_id)
{
struct imx8qxp_adc *adc = dev_id;
u32 fifo_count;
+ int i;
fifo_count = FIELD_GET(IMX8QXP_ADC_FCTRL_FCOUNT_MASK,
readl(adc->regs + IMX8QXP_ADR_ADC_FCTRL));
+ for (i = 0; i < fifo_count; i++)
+ adc->fifo[i] = FIELD_GET(IMX8QXP_ADC_RESFIFO_VAL_MASK,
+ readl_relaxed(adc->regs + IMX8QXP_ADR_ADC_RESFIFO));
+
if (fifo_count)
complete(&adc->completion);
diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c
index 6d21ea84fa82..a428bdb567d5 100644
--- a/drivers/iio/adc/stm32-dfsdm-adc.c
+++ b/drivers/iio/adc/stm32-dfsdm-adc.c
@@ -1520,6 +1520,7 @@ static const struct of_device_id stm32_dfsdm_adc_match[] = {
},
{}
};
+MODULE_DEVICE_TABLE(of, stm32_dfsdm_adc_match);
static int stm32_dfsdm_adc_probe(struct platform_device *pdev)
{
diff --git a/drivers/iio/adc/twl6030-gpadc.c b/drivers/iio/adc/twl6030-gpadc.c
index f53e8558b560..32873fb5f367 100644
--- a/drivers/iio/adc/twl6030-gpadc.c
+++ b/drivers/iio/adc/twl6030-gpadc.c
@@ -57,6 +57,18 @@
#define TWL6030_GPADCS BIT(1)
#define TWL6030_GPADCR BIT(0)
+#define USB_VBUS_CTRL_SET 0x04
+#define USB_ID_CTRL_SET 0x06
+
+#define TWL6030_MISC1 0xE4
+#define VBUS_MEAS 0x01
+#define ID_MEAS 0x01
+
+#define VAC_MEAS 0x04
+#define VBAT_MEAS 0x02
+#define BB_MEAS 0x01
+
+
/**
* struct twl6030_chnl_calib - channel calibration
* @gain: slope coefficient for ideal curve
@@ -927,6 +939,26 @@ static int twl6030_gpadc_probe(struct platform_device *pdev)
return ret;
}
+ ret = twl_i2c_write_u8(TWL_MODULE_USB, VBUS_MEAS, USB_VBUS_CTRL_SET);
+ if (ret < 0) {
+ dev_err(dev, "failed to wire up inputs\n");
+ return ret;
+ }
+
+ ret = twl_i2c_write_u8(TWL_MODULE_USB, ID_MEAS, USB_ID_CTRL_SET);
+ if (ret < 0) {
+ dev_err(dev, "failed to wire up inputs\n");
+ return ret;
+ }
+
+ ret = twl_i2c_write_u8(TWL6030_MODULE_ID0,
+ VBAT_MEAS | BB_MEAS | VAC_MEAS,
+ TWL6030_MISC1);
+ if (ret < 0) {
+ dev_err(dev, "failed to wire up inputs\n");
+ return ret;
+ }
+
indio_dev->name = DRIVER_NAME;
indio_dev->info = &twl6030_gpadc_iio_info;
indio_dev->modes = INDIO_DIRECT_MODE;
diff --git a/drivers/iio/adc/xilinx-ams.c b/drivers/iio/adc/xilinx-ams.c
index 5b4bdf3a26bb..a507d2e17079 100644
--- a/drivers/iio/adc/xilinx-ams.c
+++ b/drivers/iio/adc/xilinx-ams.c
@@ -1329,7 +1329,7 @@ static int ams_parse_firmware(struct iio_dev *indio_dev)
dev_channels = devm_krealloc(dev, ams_channels, dev_size, GFP_KERNEL);
if (!dev_channels)
- ret = -ENOMEM;
+ return -ENOMEM;
indio_dev->channels = dev_channels;
indio_dev->num_channels = num_channels;
diff --git a/drivers/iio/gyro/hid-sensor-gyro-3d.c b/drivers/iio/gyro/hid-sensor-gyro-3d.c
index 8f0ad022c7f1..698c50da1f10 100644
--- a/drivers/iio/gyro/hid-sensor-gyro-3d.c
+++ b/drivers/iio/gyro/hid-sensor-gyro-3d.c
@@ -231,6 +231,7 @@ static int gyro_3d_capture_sample(struct hid_sensor_hub_device *hsdev,
gyro_state->timestamp =
hid_sensor_convert_timestamp(&gyro_state->common_attributes,
*(s64 *)raw_data);
+ ret = 0;
break;
default:
break;
diff --git a/drivers/iio/imu/fxos8700_core.c b/drivers/iio/imu/fxos8700_core.c
index 423cfe526f2a..6d189c4b9ff9 100644
--- a/drivers/iio/imu/fxos8700_core.c
+++ b/drivers/iio/imu/fxos8700_core.c
@@ -10,6 +10,7 @@
#include <linux/regmap.h>
#include <linux/acpi.h>
#include <linux/bitops.h>
+#include <linux/bitfield.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -144,9 +145,8 @@
#define FXOS8700_NVM_DATA_BNK0 0xa7
/* Bit definitions for FXOS8700_CTRL_REG1 */
-#define FXOS8700_CTRL_ODR_MSK 0x38
#define FXOS8700_CTRL_ODR_MAX 0x00
-#define FXOS8700_CTRL_ODR_MIN GENMASK(4, 3)
+#define FXOS8700_CTRL_ODR_MSK GENMASK(5, 3)
/* Bit definitions for FXOS8700_M_CTRL_REG1 */
#define FXOS8700_HMS_MASK GENMASK(1, 0)
@@ -320,7 +320,7 @@ static enum fxos8700_sensor fxos8700_to_sensor(enum iio_chan_type iio_type)
switch (iio_type) {
case IIO_ACCEL:
return FXOS8700_ACCEL;
- case IIO_ANGL_VEL:
+ case IIO_MAGN:
return FXOS8700_MAGN;
default:
return -EINVAL;
@@ -345,15 +345,35 @@ static int fxos8700_set_active_mode(struct fxos8700_data *data,
static int fxos8700_set_scale(struct fxos8700_data *data,
enum fxos8700_sensor t, int uscale)
{
- int i;
+ int i, ret, val;
+ bool active_mode;
static const int scale_num = ARRAY_SIZE(fxos8700_accel_scale);
struct device *dev = regmap_get_device(data->regmap);
if (t == FXOS8700_MAGN) {
- dev_err(dev, "Magnetometer scale is locked at 1200uT\n");
+ dev_err(dev, "Magnetometer scale is locked at 0.001Gs\n");
return -EINVAL;
}
+ /*
+ * When device is in active mode, it failed to set an ACCEL
+ * full-scale range(2g/4g/8g) in FXOS8700_XYZ_DATA_CFG.
+ * This is not align with the datasheet, but it is a fxos8700
+ * chip behavier. Set the device in standby mode before setting
+ * an ACCEL full-scale range.
+ */
+ ret = regmap_read(data->regmap, FXOS8700_CTRL_REG1, &val);
+ if (ret)
+ return ret;
+
+ active_mode = val & FXOS8700_ACTIVE;
+ if (active_mode) {
+ ret = regmap_write(data->regmap, FXOS8700_CTRL_REG1,
+ val & ~FXOS8700_ACTIVE);
+ if (ret)
+ return ret;
+ }
+
for (i = 0; i < scale_num; i++)
if (fxos8700_accel_scale[i].uscale == uscale)
break;
@@ -361,8 +381,12 @@ static int fxos8700_set_scale(struct fxos8700_data *data,
if (i == scale_num)
return -EINVAL;
- return regmap_write(data->regmap, FXOS8700_XYZ_DATA_CFG,
+ ret = regmap_write(data->regmap, FXOS8700_XYZ_DATA_CFG,
fxos8700_accel_scale[i].bits);
+ if (ret)
+ return ret;
+ return regmap_write(data->regmap, FXOS8700_CTRL_REG1,
+ active_mode);
}
static int fxos8700_get_scale(struct fxos8700_data *data,
@@ -372,7 +396,7 @@ static int fxos8700_get_scale(struct fxos8700_data *data,
static const int scale_num = ARRAY_SIZE(fxos8700_accel_scale);
if (t == FXOS8700_MAGN) {
- *uscale = 1200; /* Magnetometer is locked at 1200uT */
+ *uscale = 1000; /* Magnetometer is locked at 0.001Gs */
return 0;
}
@@ -394,22 +418,61 @@ static int fxos8700_get_data(struct fxos8700_data *data, int chan_type,
int axis, int *val)
{
u8 base, reg;
+ s16 tmp;
int ret;
- enum fxos8700_sensor type = fxos8700_to_sensor(chan_type);
- base = type ? FXOS8700_OUT_X_MSB : FXOS8700_M_OUT_X_MSB;
+ /*
+ * Different register base addresses varies with channel types.
+ * This bug hasn't been noticed before because using an enum is
+ * really hard to read. Use an a switch statement to take over that.
+ */
+ switch (chan_type) {
+ case IIO_ACCEL:
+ base = FXOS8700_OUT_X_MSB;
+ break;
+ case IIO_MAGN:
+ base = FXOS8700_M_OUT_X_MSB;
+ break;
+ default:
+ return -EINVAL;
+ }
/* Block read 6 bytes of device output registers to avoid data loss */
ret = regmap_bulk_read(data->regmap, base, data->buf,
- FXOS8700_DATA_BUF_SIZE);
+ sizeof(data->buf));
if (ret)
return ret;
/* Convert axis to buffer index */
reg = axis - IIO_MOD_X;
+ /*
+ * Convert to native endianness. The accel data and magn data
+ * are signed, so a forced type conversion is needed.
+ */
+ tmp = be16_to_cpu(data->buf[reg]);
+
+ /*
+ * ACCEL output data registers contain the X-axis, Y-axis, and Z-axis
+ * 14-bit left-justified sample data and MAGN output data registers
+ * contain the X-axis, Y-axis, and Z-axis 16-bit sample data. Apply
+ * a signed 2 bits right shift to the readback raw data from ACCEL
+ * output data register and keep that from MAGN sensor as the origin.
+ * Value should be extended to 32 bit.
+ */
+ switch (chan_type) {
+ case IIO_ACCEL:
+ tmp = tmp >> 2;
+ break;
+ case IIO_MAGN:
+ /* Nothing to do */
+ break;
+ default:
+ return -EINVAL;
+ }
+
/* Convert to native endianness */
- *val = sign_extend32(be16_to_cpu(data->buf[reg]), 15);
+ *val = sign_extend32(tmp, 15);
return 0;
}
@@ -445,10 +508,9 @@ static int fxos8700_set_odr(struct fxos8700_data *data, enum fxos8700_sensor t,
if (i >= odr_num)
return -EINVAL;
- return regmap_update_bits(data->regmap,
- FXOS8700_CTRL_REG1,
- FXOS8700_CTRL_ODR_MSK + FXOS8700_ACTIVE,
- fxos8700_odr[i].bits << 3 | active_mode);
+ val &= ~FXOS8700_CTRL_ODR_MSK;
+ val |= FIELD_PREP(FXOS8700_CTRL_ODR_MSK, fxos8700_odr[i].bits) | FXOS8700_ACTIVE;
+ return regmap_write(data->regmap, FXOS8700_CTRL_REG1, val);
}
static int fxos8700_get_odr(struct fxos8700_data *data, enum fxos8700_sensor t,
@@ -461,7 +523,7 @@ static int fxos8700_get_odr(struct fxos8700_data *data, enum fxos8700_sensor t,
if (ret)
return ret;
- val &= FXOS8700_CTRL_ODR_MSK;
+ val = FIELD_GET(FXOS8700_CTRL_ODR_MSK, val);
for (i = 0; i < odr_num; i++)
if (val == fxos8700_odr[i].bits)
@@ -526,7 +588,7 @@ static IIO_CONST_ATTR(in_accel_sampling_frequency_available,
static IIO_CONST_ATTR(in_magn_sampling_frequency_available,
"1.5625 6.25 12.5 50 100 200 400 800");
static IIO_CONST_ATTR(in_accel_scale_available, "0.000244 0.000488 0.000976");
-static IIO_CONST_ATTR(in_magn_scale_available, "0.000001200");
+static IIO_CONST_ATTR(in_magn_scale_available, "0.001000");
static struct attribute *fxos8700_attrs[] = {
&iio_const_attr_in_accel_sampling_frequency_available.dev_attr.attr,
@@ -592,14 +654,19 @@ static int fxos8700_chip_init(struct fxos8700_data *data, bool use_spi)
if (ret)
return ret;
- /* Max ODR (800Hz individual or 400Hz hybrid), active mode */
- ret = regmap_write(data->regmap, FXOS8700_CTRL_REG1,
- FXOS8700_CTRL_ODR_MAX | FXOS8700_ACTIVE);
+ /*
+ * Set max full-scale range (+/-8G) for ACCEL sensor in chip
+ * initialization then activate the device.
+ */
+ ret = regmap_write(data->regmap, FXOS8700_XYZ_DATA_CFG, MODE_8G);
if (ret)
return ret;
- /* Set for max full-scale range (+/-8G) */
- return regmap_write(data->regmap, FXOS8700_XYZ_DATA_CFG, MODE_8G);
+ /* Max ODR (800Hz individual or 400Hz hybrid), active mode */
+ return regmap_update_bits(data->regmap, FXOS8700_CTRL_REG1,
+ FXOS8700_CTRL_ODR_MSK | FXOS8700_ACTIVE,
+ FIELD_PREP(FXOS8700_CTRL_ODR_MSK, FXOS8700_CTRL_ODR_MAX) |
+ FXOS8700_ACTIVE);
}
static void fxos8700_chip_uninit(void *data)
diff --git a/drivers/iio/imu/st_lsm6dsx/Kconfig b/drivers/iio/imu/st_lsm6dsx/Kconfig
index f6660847fb58..8c16cdacf2f2 100644
--- a/drivers/iio/imu/st_lsm6dsx/Kconfig
+++ b/drivers/iio/imu/st_lsm6dsx/Kconfig
@@ -4,6 +4,7 @@ config IIO_ST_LSM6DSX
tristate "ST_LSM6DSx driver for STM 6-axis IMU MEMS sensors"
depends on (I2C || SPI || I3C)
select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
select IIO_KFIFO_BUF
select IIO_ST_LSM6DSX_I2C if (I2C)
select IIO_ST_LSM6DSX_SPI if (SPI_MASTER)
diff --git a/drivers/iio/light/cm32181.c b/drivers/iio/light/cm32181.c
index 001055d09750..b1674a5bfa36 100644
--- a/drivers/iio/light/cm32181.c
+++ b/drivers/iio/light/cm32181.c
@@ -440,6 +440,8 @@ static int cm32181_probe(struct i2c_client *client)
if (!indio_dev)
return -ENOMEM;
+ i2c_set_clientdata(client, indio_dev);
+
/*
* Some ACPI systems list 2 I2C resources for the CM3218 sensor, the
* SMBus Alert Response Address (ARA, 0x0c) and the actual I2C address.
@@ -460,8 +462,6 @@ static int cm32181_probe(struct i2c_client *client)
return PTR_ERR(client);
}
- i2c_set_clientdata(client, indio_dev);
-
cm32181 = iio_priv(indio_dev);
cm32181->client = client;
cm32181->dev = dev;
@@ -490,7 +490,8 @@ static int cm32181_probe(struct i2c_client *client)
static int cm32181_suspend(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
+ struct cm32181_chip *cm32181 = iio_priv(dev_get_drvdata(dev));
+ struct i2c_client *client = cm32181->client;
return i2c_smbus_write_word_data(client, CM32181_REG_ADDR_CMD,
CM32181_CMD_ALS_DISABLE);
@@ -498,8 +499,8 @@ static int cm32181_suspend(struct device *dev)
static int cm32181_resume(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
struct cm32181_chip *cm32181 = iio_priv(dev_get_drvdata(dev));
+ struct i2c_client *client = cm32181->client;
return i2c_smbus_write_word_data(client, CM32181_REG_ADDR_CMD,
cm32181->conf_regs[CM32181_REG_ADDR_CMD]);
diff --git a/drivers/infiniband/core/umem_dmabuf.c b/drivers/infiniband/core/umem_dmabuf.c
index 43b26bc12288..39357dc2d229 100644
--- a/drivers/infiniband/core/umem_dmabuf.c
+++ b/drivers/infiniband/core/umem_dmabuf.c
@@ -26,8 +26,8 @@ int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
if (umem_dmabuf->sgt)
goto wait_fence;
- sgt = dma_buf_map_attachment_unlocked(umem_dmabuf->attach,
- DMA_BIDIRECTIONAL);
+ sgt = dma_buf_map_attachment(umem_dmabuf->attach,
+ DMA_BIDIRECTIONAL);
if (IS_ERR(sgt))
return PTR_ERR(sgt);
@@ -103,8 +103,8 @@ void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf)
umem_dmabuf->last_sg_trim = 0;
}
- dma_buf_unmap_attachment_unlocked(umem_dmabuf->attach, umem_dmabuf->sgt,
- DMA_BIDIRECTIONAL);
+ dma_buf_unmap_attachment(umem_dmabuf->attach, umem_dmabuf->sgt,
+ DMA_BIDIRECTIONAL);
umem_dmabuf->sgt = NULL;
}
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index f5f9269fdc16..7c5d487ec916 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -1318,12 +1318,15 @@ static int user_exp_rcv_setup(struct hfi1_filedata *fd, unsigned long arg,
addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
sizeof(tinfo.tidcnt)))
- return -EFAULT;
+ ret = -EFAULT;
addr = arg + offsetof(struct hfi1_tid_info, length);
- if (copy_to_user((void __user *)addr, &tinfo.length,
+ if (!ret && copy_to_user((void __user *)addr, &tinfo.length,
sizeof(tinfo.length)))
ret = -EFAULT;
+
+ if (ret)
+ hfi1_user_exp_rcv_invalid(fd, &tinfo);
}
return ret;
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
index b02f2f0809c8..350884d5f089 100644
--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
@@ -160,16 +160,11 @@ static void unpin_rcv_pages(struct hfi1_filedata *fd,
static int pin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf)
{
int pinned;
- unsigned int npages;
+ unsigned int npages = tidbuf->npages;
unsigned long vaddr = tidbuf->vaddr;
struct page **pages = NULL;
struct hfi1_devdata *dd = fd->uctxt->dd;
- /* Get the number of pages the user buffer spans */
- npages = num_user_pages(vaddr, tidbuf->length);
- if (!npages)
- return -EINVAL;
-
if (npages > fd->uctxt->expected_count) {
dd_dev_err(dd, "Expected buffer too big\n");
return -EINVAL;
@@ -196,7 +191,6 @@ static int pin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf)
return pinned;
}
tidbuf->pages = pages;
- tidbuf->npages = npages;
fd->tid_n_pinned += pinned;
return pinned;
}
@@ -274,6 +268,7 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
mutex_init(&tidbuf->cover_mutex);
tidbuf->vaddr = tinfo->vaddr;
tidbuf->length = tinfo->length;
+ tidbuf->npages = num_user_pages(tidbuf->vaddr, tidbuf->length);
tidbuf->psets = kcalloc(uctxt->expected_count, sizeof(*tidbuf->psets),
GFP_KERNEL);
if (!tidbuf->psets) {
diff --git a/drivers/infiniband/hw/irdma/cm.c b/drivers/infiniband/hw/irdma/cm.c
index 7b086fe63a24..195aa9ea18b6 100644
--- a/drivers/infiniband/hw/irdma/cm.c
+++ b/drivers/infiniband/hw/irdma/cm.c
@@ -1722,6 +1722,9 @@ static int irdma_add_mqh_4(struct irdma_device *iwdev,
continue;
idev = in_dev_get(ip_dev);
+ if (!idev)
+ continue;
+
in_dev_for_each_ifa_rtnl(ifa, idev) {
ibdev_dbg(&iwdev->ibdev,
"CM: Allocating child CM Listener forIP=%pI4, vlan_id=%d, MAC=%pM\n",
diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c
index ea15ec77e321..54b61930a7fd 100644
--- a/drivers/infiniband/hw/mana/qp.c
+++ b/drivers/infiniband/hw/mana/qp.c
@@ -289,7 +289,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
/* IB ports start with 1, MANA Ethernet ports start with 0 */
port = ucmd.port;
- if (ucmd.port > mc->num_ports)
+ if (port < 1 || port > mc->num_ports)
return -EINVAL;
if (attr->cap.max_send_wr > MAX_SEND_BUFFERS_PER_QUEUE) {
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
index c301b3be9f30..a2857accc427 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -276,8 +276,8 @@ iter_chunk:
size = pa_end - pa_start + PAGE_SIZE;
usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x",
va_start, &pa_start, size, flags);
- err = iommu_map(pd->domain, va_start, pa_start,
- size, flags);
+ err = iommu_map_atomic(pd->domain, va_start,
+ pa_start, size, flags);
if (err) {
usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
va_start, &pa_start, size, err);
@@ -293,8 +293,8 @@ iter_chunk:
size = pa - pa_start + PAGE_SIZE;
usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x\n",
va_start, &pa_start, size, flags);
- err = iommu_map(pd->domain, va_start, pa_start,
- size, flags);
+ err = iommu_map_atomic(pd->domain, va_start,
+ pa_start, size, flags);
if (err) {
usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
va_start, &pa_start, size, err);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index ac25fc80fb33..f10d4bcf87d2 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -2200,6 +2200,14 @@ int ipoib_intf_init(struct ib_device *hca, u32 port, const char *name,
rn->attach_mcast = ipoib_mcast_attach;
rn->detach_mcast = ipoib_mcast_detach;
rn->hca = hca;
+
+ rc = netif_set_real_num_tx_queues(dev, 1);
+ if (rc)
+ goto out;
+
+ rc = netif_set_real_num_rx_queues(dev, 1);
+ if (rc)
+ goto out;
}
priv->rn_ops = dev->netdev_ops;
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
index c76ba29da1e2..5adba0f754b6 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
@@ -312,9 +312,8 @@ void rtrs_srv_destroy_path_files(struct rtrs_srv_path *srv_path)
if (srv_path->kobj.state_in_sysfs) {
sysfs_remove_group(&srv_path->kobj, &rtrs_srv_path_attr_group);
- kobject_del(&srv_path->kobj);
kobject_put(&srv_path->kobj);
+ rtrs_srv_destroy_once_sysfs_root_folders(srv_path);
}
- rtrs_srv_destroy_once_sysfs_root_folders(srv_path);
}
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index b0f776448a1c..fa021af8506e 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -192,7 +192,6 @@ static const char * const smbus_pnp_ids[] = {
"SYN3221", /* HP 15-ay000 */
"SYN323d", /* HP Spectre X360 13-w013dx */
"SYN3257", /* HP Envy 13-ad105ng */
- "SYN3286", /* HP Laptop 15-da3001TU */
NULL
};
diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h
index 46f8a694291e..efc61736099b 100644
--- a/drivers/input/serio/i8042-acpipnpio.h
+++ b/drivers/input/serio/i8042-acpipnpio.h
@@ -1240,6 +1240,13 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
},
{
.matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "PCX0DX"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ },
+ {
+ .matches = {
DMI_MATCH(DMI_BOARD_NAME, "X170SM"),
},
.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
diff --git a/drivers/md/bcache/bcache_ondisk.h b/drivers/md/bcache/bcache_ondisk.h
index 97413586195b..f96034e0ba4f 100644
--- a/drivers/md/bcache/bcache_ondisk.h
+++ b/drivers/md/bcache/bcache_ondisk.h
@@ -106,7 +106,8 @@ static inline unsigned long bkey_bytes(const struct bkey *k)
return bkey_u64s(k) * sizeof(__u64);
}
-#define bkey_copy(_dest, _src) memcpy(_dest, _src, bkey_bytes(_src))
+#define bkey_copy(_dest, _src) unsafe_memcpy(_dest, _src, bkey_bytes(_src), \
+ /* bkey is always padded */)
static inline void bkey_copy_key(struct bkey *dest, const struct bkey *src)
{
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index e5da469a4235..c182c21de2e8 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -149,7 +149,8 @@ add:
bytes, GFP_KERNEL);
if (!i)
return -ENOMEM;
- memcpy(&i->j, j, bytes);
+ unsafe_memcpy(&i->j, j, bytes,
+ /* "bytes" was calculated by set_bytes() above */);
/* Add to the location after 'where' points to */
list_add(&i->list, where);
ret = 1;
diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
index fc3758a5bc1c..53e495223ea0 100644
--- a/drivers/media/common/videobuf2/videobuf2-core.c
+++ b/drivers/media/common/videobuf2/videobuf2-core.c
@@ -2149,8 +2149,6 @@ int vb2_core_streamon(struct vb2_queue *q, unsigned int type)
if (ret)
return ret;
- q->streaming = 1;
-
/*
* Tell driver to start streaming provided sufficient buffers
* are available.
@@ -2161,12 +2159,13 @@ int vb2_core_streamon(struct vb2_queue *q, unsigned int type)
goto unprepare;
}
+ q->streaming = 1;
+
dprintk(q, 3, "successful\n");
return 0;
unprepare:
call_void_qop(q, unprepare_streaming, q);
- q->streaming = 0;
return ret;
}
EXPORT_SYMBOL_GPL(vb2_core_streamon);
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-api.c b/drivers/media/v4l2-core/v4l2-ctrls-api.c
index 3d3b6dc24ca6..002ea6588edf 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls-api.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls-api.c
@@ -150,8 +150,8 @@ static int user_to_new(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl)
* then return an error.
*/
if (strlen(ctrl->p_new.p_char) == ctrl->maximum && last)
- ctrl->is_new = 1;
return -ERANGE;
+ ctrl->is_new = 1;
}
return ret;
default:
diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c
index 4f9b4a18c74c..594094526648 100644
--- a/drivers/net/bonding/bond_debugfs.c
+++ b/drivers/net/bonding/bond_debugfs.c
@@ -76,7 +76,7 @@ void bond_debug_reregister(struct bonding *bond)
d = debugfs_rename(bonding_debug_root, bond->debug_dir,
bonding_debug_root, bond->dev->name);
- if (d) {
+ if (!IS_ERR(d)) {
bond->debug_dir = d;
} else {
netdev_warn(bond->dev, "failed to reregister, so just unregister old one\n");
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c
index 3585f02575df..57eeb066a945 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c
@@ -48,6 +48,7 @@ mcp251xfd_ring_set_ringparam(struct net_device *ndev,
priv->rx_obj_num = layout.cur_rx;
priv->rx_obj_num_coalesce_irq = layout.rx_coalesce;
priv->tx->obj_num = layout.cur_tx;
+ priv->tx_obj_num_coalesce_irq = layout.tx_coalesce;
return 0;
}
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index c26755f662c1..f6f3b43dfb06 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -35,12 +35,13 @@ config NET_DSA_LANTIQ_GSWIP
the xrx200 / VR9 SoC.
config NET_DSA_MT7530
- tristate "MediaTek MT753x and MT7621 Ethernet switch support"
+ tristate "MediaTek MT7530 and MT7531 Ethernet switch support"
select NET_DSA_TAG_MTK
select MEDIATEK_GE_PHY
help
- This enables support for the MediaTek MT7530, MT7531, and MT7621
- Ethernet switch chips.
+ This enables support for the MediaTek MT7530 and MT7531 Ethernet
+ switch chips. Multi-chip module MT7530 in MT7621AT, MT7621DAT,
+ MT7621ST and MT7623AI SoCs is supported.
config NET_DSA_MV88E6060
tristate "Marvell 88E6060 ethernet switch chip support"
diff --git a/drivers/net/dsa/microchip/ksz9477_i2c.c b/drivers/net/dsa/microchip/ksz9477_i2c.c
index c1a633ca1e6d..e315f669ec06 100644
--- a/drivers/net/dsa/microchip/ksz9477_i2c.c
+++ b/drivers/net/dsa/microchip/ksz9477_i2c.c
@@ -104,7 +104,7 @@ static const struct of_device_id ksz9477_dt_ids[] = {
},
{
.compatible = "microchip,ksz8563",
- .data = &ksz_switch_chips[KSZ9893]
+ .data = &ksz_switch_chips[KSZ8563]
},
{
.compatible = "microchip,ksz9567",
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 908fa89444c9..338f238f2043 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -1309,14 +1309,26 @@ mt7530_port_set_vlan_aware(struct dsa_switch *ds, int port)
if (!priv->ports[port].pvid)
mt7530_rmw(priv, MT7530_PVC_P(port), ACC_FRM_MASK,
MT7530_VLAN_ACC_TAGGED);
- }
- /* Set the port as a user port which is to be able to recognize VID
- * from incoming packets before fetching entry within the VLAN table.
- */
- mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK | PVC_EG_TAG_MASK,
- VLAN_ATTR(MT7530_VLAN_USER) |
- PVC_EG_TAG(MT7530_VLAN_EG_DISABLED));
+ /* Set the port as a user port which is to be able to recognize
+ * VID from incoming packets before fetching entry within the
+ * VLAN table.
+ */
+ mt7530_rmw(priv, MT7530_PVC_P(port),
+ VLAN_ATTR_MASK | PVC_EG_TAG_MASK,
+ VLAN_ATTR(MT7530_VLAN_USER) |
+ PVC_EG_TAG(MT7530_VLAN_EG_DISABLED));
+ } else {
+ /* Also set CPU ports to the "user" VLAN port attribute, to
+ * allow VLAN classification, but keep the EG_TAG attribute as
+ * "consistent" (i.o.w. don't change its value) for packets
+ * received by the switch from the CPU, so that tagged packets
+ * are forwarded to user ports as tagged, and untagged as
+ * untagged.
+ */
+ mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK,
+ VLAN_ATTR(MT7530_VLAN_USER));
+ }
}
static void
diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c
index 0805f249fff2..c26b8597945b 100644
--- a/drivers/net/ethernet/adi/adin1110.c
+++ b/drivers/net/ethernet/adi/adin1110.c
@@ -356,7 +356,7 @@ static int adin1110_read_fifo(struct adin1110_port_priv *port_priv)
if ((port_priv->flags & IFF_ALLMULTI && rxb->pkt_type == PACKET_MULTICAST) ||
(port_priv->flags & IFF_BROADCAST && rxb->pkt_type == PACKET_BROADCAST))
- rxb->offload_fwd_mark = 1;
+ rxb->offload_fwd_mark = port_priv->priv->forwarding;
netif_rx(rxb);
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c
index 02bd3cf9a260..6e4f36aaf5db 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c
@@ -240,12 +240,12 @@ static int bgmac_probe(struct bcma_device *core)
bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL1;
bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_PHY;
- if (ci->pkg == BCMA_PKG_ID_BCM47188 ||
- ci->pkg == BCMA_PKG_ID_BCM47186) {
+ if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) ||
+ (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188)) {
bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_RGMII;
bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED;
}
- if (ci->pkg == BCMA_PKG_ID_BCM5358)
+ if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM5358)
bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_EPHYRMII;
break;
case BCMA_CHIP_ID_BCM53573:
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 240a7e8a7652..6c32f5c427b5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -9274,10 +9274,14 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
return rc;
}
- if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
+ if (tcs && (bp->tx_nr_rings_per_tc * tcs !=
+ bp->tx_nr_rings - bp->tx_nr_rings_xdp)) {
netdev_err(bp->dev, "tx ring reservation failure\n");
netdev_reset_tc(bp->dev);
- bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ if (bp->tx_nr_rings_xdp)
+ bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp;
+ else
+ bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
return -ENOMEM;
}
return 0;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 59debdc344a5..58747292521d 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -11166,7 +11166,7 @@ static void tg3_reset_task(struct work_struct *work)
rtnl_lock();
tg3_full_lock(tp, 0);
- if (!netif_running(tp->dev)) {
+ if (tp->pcierr_recovery || !netif_running(tp->dev)) {
tg3_flag_clear(tp, RESET_TASK_PENDING);
tg3_full_unlock(tp);
rtnl_unlock();
@@ -18101,6 +18101,9 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
netdev_info(netdev, "PCI I/O error detected\n");
+ /* Want to make sure that the reset task doesn't run */
+ tg3_reset_task_cancel(tp);
+
rtnl_lock();
/* Could be second call or maybe we don't have netdev yet */
@@ -18117,9 +18120,6 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
tg3_timer_stop(tp);
- /* Want to make sure that the reset task doesn't run */
- tg3_reset_task_cancel(tp);
-
netif_device_detach(netdev);
/* Clean up software state, even if MMIO is blocked */
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 72e42820713d..6cda31520c42 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -4627,25 +4627,26 @@ static int init_reset_optional(struct platform_device *pdev)
if (ret)
return dev_err_probe(&pdev->dev, ret,
"failed to init SGMII PHY\n");
- }
- ret = zynqmp_pm_is_function_supported(PM_IOCTL, IOCTL_SET_GEM_CONFIG);
- if (!ret) {
- u32 pm_info[2];
+ ret = zynqmp_pm_is_function_supported(PM_IOCTL, IOCTL_SET_GEM_CONFIG);
+ if (!ret) {
+ u32 pm_info[2];
+
+ ret = of_property_read_u32_array(pdev->dev.of_node, "power-domains",
+ pm_info, ARRAY_SIZE(pm_info));
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to read power management information\n");
+ goto err_out_phy_exit;
+ }
+ ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_FIXED, 0);
+ if (ret)
+ goto err_out_phy_exit;
- ret = of_property_read_u32_array(pdev->dev.of_node, "power-domains",
- pm_info, ARRAY_SIZE(pm_info));
- if (ret) {
- dev_err(&pdev->dev, "Failed to read power management information\n");
- goto err_out_phy_exit;
+ ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_SGMII_MODE, 1);
+ if (ret)
+ goto err_out_phy_exit;
}
- ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_FIXED, 0);
- if (ret)
- goto err_out_phy_exit;
- ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_SGMII_MODE, 1);
- if (ret)
- goto err_out_phy_exit;
}
/* Fully reset controller at hardware level if mapped in device tree */
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
index bf0190e1d2ea..00e2108f2ca4 100644
--- a/drivers/net/ethernet/engleder/tsnep_main.c
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -450,7 +450,7 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
/* ring full, shall not happen because queue is stopped if full
* below
*/
- netif_stop_queue(tx->adapter->netdev);
+ netif_stop_subqueue(tx->adapter->netdev, tx->queue_index);
spin_unlock_irqrestore(&tx->lock, flags);
@@ -493,7 +493,7 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1)) {
/* ring can get full with next frame */
- netif_stop_queue(tx->adapter->netdev);
+ netif_stop_subqueue(tx->adapter->netdev, tx->queue_index);
}
spin_unlock_irqrestore(&tx->lock, flags);
@@ -503,11 +503,14 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
{
+ struct tsnep_tx_entry *entry;
+ struct netdev_queue *nq;
unsigned long flags;
int budget = 128;
- struct tsnep_tx_entry *entry;
- int count;
int length;
+ int count;
+
+ nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index);
spin_lock_irqsave(&tx->lock, flags);
@@ -564,8 +567,8 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
} while (likely(budget));
if ((tsnep_tx_desc_available(tx) >= ((MAX_SKB_FRAGS + 1) * 2)) &&
- netif_queue_stopped(tx->adapter->netdev)) {
- netif_wake_queue(tx->adapter->netdev);
+ netif_tx_queue_stopped(nq)) {
+ netif_tx_wake_queue(nq);
}
spin_unlock_irqrestore(&tx->lock, flags);
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 3f8032947d86..027fff9f7db0 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2410,6 +2410,9 @@ static int dpaa_eth_poll(struct napi_struct *napi, int budget)
cleaned = qman_p_poll_dqrr(np->p, budget);
+ if (np->xdp_act & XDP_REDIRECT)
+ xdp_do_flush();
+
if (cleaned < budget) {
napi_complete_done(napi, cleaned);
qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
@@ -2417,9 +2420,6 @@ static int dpaa_eth_poll(struct napi_struct *napi, int budget)
qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
}
- if (np->xdp_act & XDP_REDIRECT)
- xdp_do_flush();
-
return cleaned;
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 0c35abb7d065..2e79d18fc3c7 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -1993,10 +1993,15 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
if (rx_cleaned >= budget ||
txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) {
work_done = budget;
+ if (ch->xdp.res & XDP_REDIRECT)
+ xdp_do_flush();
goto out;
}
} while (store_cleaned);
+ if (ch->xdp.res & XDP_REDIRECT)
+ xdp_do_flush();
+
/* Update NET DIM with the values for this CDAN */
dpaa2_io_update_net_dim(ch->dpio, ch->stats.frames_per_cdan,
ch->stats.bytes_per_cdan);
@@ -2032,9 +2037,7 @@ out:
txc_fq->dq_bytes = 0;
}
- if (ch->xdp.res & XDP_REDIRECT)
- xdp_do_flush_map();
- else if (rx_cleaned && ch->xdp.res & XDP_TX)
+ if (rx_cleaned && ch->xdp.res & XDP_TX)
dpaa2_eth_xdp_tx_flush(priv, ch, &priv->fq[flowid]);
return work_done;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 644f3c963730..2341597408d1 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -3191,7 +3191,7 @@ static void fec_enet_free_buffers(struct net_device *ndev)
for (q = 0; q < fep->num_rx_queues; q++) {
rxq = fep->rx_queue[q];
for (i = 0; i < rxq->bd.ring_size; i++)
- page_pool_release_page(rxq->page_pool, rxq->rx_skb_info[i].page);
+ page_pool_put_full_page(rxq->page_pool, rxq->rx_skb_info[i].page, false);
for (i = 0; i < XDP_STATS_TOTAL; i++)
rxq->stats[i] = 0;
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index 9349f841bd06..587ad81a2dc3 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -1055,6 +1055,9 @@ static struct phylink_pcs *memac_pcs_create(struct device_node *mac_node,
return ERR_PTR(-EPROBE_DEFER);
pcs = lynx_pcs_create(mdiodev);
+ if (!pcs)
+ mdio_device_free(mdiodev);
+
return pcs;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 53d0083e35da..52eec0a50492 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -2921,7 +2921,7 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
struct i40e_pf *pf = vsi->back;
if (i40e_enabled_xdp_vsi(vsi)) {
- int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ int frame_size = new_mtu + I40E_PACKET_HDR_PAD;
if (frame_size > i40e_max_xdp_frame_size(vsi))
return -EINVAL;
@@ -13167,6 +13167,8 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
}
br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+ if (!br_spec)
+ return -EINVAL;
nla_for_each_nested(attr, br_spec, rem) {
__u16 mode;
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 0d1bab4ac1b0..2a9f1eeeb701 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -249,6 +249,7 @@ struct iavf_cloud_filter {
/* board specific private data structure */
struct iavf_adapter {
+ struct workqueue_struct *wq;
struct work_struct reset_task;
struct work_struct adminq_task;
struct delayed_work client_task;
@@ -459,7 +460,6 @@ struct iavf_device {
/* needed by iavf_ethtool.c */
extern char iavf_driver_name[];
-extern struct workqueue_struct *iavf_wq;
static inline const char *iavf_state_str(enum iavf_state_t state)
{
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index d79ead5e8d0c..6f171d1d85b7 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -532,7 +532,7 @@ static int iavf_set_priv_flags(struct net_device *netdev, u32 flags)
if (changed_flags & IAVF_FLAG_LEGACY_RX) {
if (netif_running(netdev)) {
adapter->flags |= IAVF_FLAG_RESET_NEEDED;
- queue_work(iavf_wq, &adapter->reset_task);
+ queue_work(adapter->wq, &adapter->reset_task);
}
}
@@ -672,7 +672,7 @@ static int iavf_set_ringparam(struct net_device *netdev,
if (netif_running(netdev)) {
adapter->flags |= IAVF_FLAG_RESET_NEEDED;
- queue_work(iavf_wq, &adapter->reset_task);
+ queue_work(adapter->wq, &adapter->reset_task);
}
return 0;
@@ -1433,7 +1433,7 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
spin_unlock_bh(&adapter->fdir_fltr_lock);
- mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
+ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
ret:
if (err && fltr)
@@ -1474,7 +1474,7 @@ static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
spin_unlock_bh(&adapter->fdir_fltr_lock);
if (fltr && fltr->state == IAVF_FDIR_FLTR_DEL_REQUEST)
- mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
+ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
return err;
}
@@ -1658,7 +1658,7 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
spin_unlock_bh(&adapter->adv_rss_lock);
if (!err)
- mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
+ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
mutex_unlock(&adapter->crit_lock);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index adc02adef83a..4b09785d2147 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -49,7 +49,6 @@ MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver")
MODULE_LICENSE("GPL v2");
static const struct net_device_ops iavf_netdev_ops;
-struct workqueue_struct *iavf_wq;
int iavf_status_to_errno(enum iavf_status status)
{
@@ -277,7 +276,7 @@ void iavf_schedule_reset(struct iavf_adapter *adapter)
if (!(adapter->flags &
(IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) {
adapter->flags |= IAVF_FLAG_RESET_NEEDED;
- queue_work(iavf_wq, &adapter->reset_task);
+ queue_work(adapter->wq, &adapter->reset_task);
}
}
@@ -291,7 +290,7 @@ void iavf_schedule_reset(struct iavf_adapter *adapter)
void iavf_schedule_request_stats(struct iavf_adapter *adapter)
{
adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_STATS;
- mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
+ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
}
/**
@@ -411,7 +410,7 @@ static irqreturn_t iavf_msix_aq(int irq, void *data)
if (adapter->state != __IAVF_REMOVE)
/* schedule work on the private workqueue */
- queue_work(iavf_wq, &adapter->adminq_task);
+ queue_work(adapter->wq, &adapter->adminq_task);
return IRQ_HANDLED;
}
@@ -1034,7 +1033,7 @@ int iavf_replace_primary_mac(struct iavf_adapter *adapter,
/* schedule the watchdog task to immediately process the request */
if (f) {
- queue_work(iavf_wq, &adapter->watchdog_task.work);
+ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
return 0;
}
return -ENOMEM;
@@ -1257,7 +1256,7 @@ static void iavf_up_complete(struct iavf_adapter *adapter)
adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES;
if (CLIENT_ENABLED(adapter))
adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN;
- mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
+ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
}
/**
@@ -1414,7 +1413,7 @@ void iavf_down(struct iavf_adapter *adapter)
adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
}
- mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
+ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
}
/**
@@ -2248,7 +2247,7 @@ iavf_set_vlan_offload_features(struct iavf_adapter *adapter,
if (aq_required) {
adapter->aq_required |= aq_required;
- mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
+ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
}
}
@@ -2693,6 +2692,15 @@ static void iavf_watchdog_task(struct work_struct *work)
goto restart_watchdog;
}
+ if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES) &&
+ adapter->netdev_registered &&
+ !test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section) &&
+ rtnl_trylock()) {
+ netdev_update_features(adapter->netdev);
+ rtnl_unlock();
+ adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES;
+ }
+
if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
iavf_change_state(adapter, __IAVF_COMM_FAILED);
@@ -2700,7 +2708,7 @@ static void iavf_watchdog_task(struct work_struct *work)
adapter->aq_required = 0;
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
mutex_unlock(&adapter->crit_lock);
- queue_work(iavf_wq, &adapter->reset_task);
+ queue_work(adapter->wq, &adapter->reset_task);
return;
}
@@ -2708,31 +2716,31 @@ static void iavf_watchdog_task(struct work_struct *work)
case __IAVF_STARTUP:
iavf_startup(adapter);
mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(iavf_wq, &adapter->watchdog_task,
+ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
msecs_to_jiffies(30));
return;
case __IAVF_INIT_VERSION_CHECK:
iavf_init_version_check(adapter);
mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(iavf_wq, &adapter->watchdog_task,
+ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
msecs_to_jiffies(30));
return;
case __IAVF_INIT_GET_RESOURCES:
iavf_init_get_resources(adapter);
mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(iavf_wq, &adapter->watchdog_task,
+ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
msecs_to_jiffies(1));
return;
case __IAVF_INIT_EXTENDED_CAPS:
iavf_init_process_extended_caps(adapter);
mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(iavf_wq, &adapter->watchdog_task,
+ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
msecs_to_jiffies(1));
return;
case __IAVF_INIT_CONFIG_ADAPTER:
iavf_init_config_adapter(adapter);
mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(iavf_wq, &adapter->watchdog_task,
+ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
msecs_to_jiffies(1));
return;
case __IAVF_INIT_FAILED:
@@ -2751,14 +2759,14 @@ static void iavf_watchdog_task(struct work_struct *work)
adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
iavf_shutdown_adminq(hw);
mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(iavf_wq,
+ queue_delayed_work(adapter->wq,
&adapter->watchdog_task, (5 * HZ));
return;
}
/* Try again from failed step*/
iavf_change_state(adapter, adapter->last_state);
mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ);
+ queue_delayed_work(adapter->wq, &adapter->watchdog_task, HZ);
return;
case __IAVF_COMM_FAILED:
if (test_bit(__IAVF_IN_REMOVE_TASK,
@@ -2789,13 +2797,14 @@ static void iavf_watchdog_task(struct work_struct *work)
adapter->aq_required = 0;
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(iavf_wq,
+ queue_delayed_work(adapter->wq,
&adapter->watchdog_task,
msecs_to_jiffies(10));
return;
case __IAVF_RESETTING:
mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
+ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
+ HZ * 2);
return;
case __IAVF_DOWN:
case __IAVF_DOWN_PENDING:
@@ -2834,9 +2843,9 @@ static void iavf_watchdog_task(struct work_struct *work)
adapter->aq_required = 0;
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
- queue_work(iavf_wq, &adapter->reset_task);
+ queue_work(adapter->wq, &adapter->reset_task);
mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(iavf_wq,
+ queue_delayed_work(adapter->wq,
&adapter->watchdog_task, HZ * 2);
return;
}
@@ -2845,12 +2854,13 @@ static void iavf_watchdog_task(struct work_struct *work)
mutex_unlock(&adapter->crit_lock);
restart_watchdog:
if (adapter->state >= __IAVF_DOWN)
- queue_work(iavf_wq, &adapter->adminq_task);
+ queue_work(adapter->wq, &adapter->adminq_task);
if (adapter->aq_required)
- queue_delayed_work(iavf_wq, &adapter->watchdog_task,
+ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
msecs_to_jiffies(20));
else
- queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
+ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
+ HZ * 2);
}
/**
@@ -2952,7 +2962,7 @@ static void iavf_reset_task(struct work_struct *work)
*/
if (!mutex_trylock(&adapter->crit_lock)) {
if (adapter->state != __IAVF_REMOVE)
- queue_work(iavf_wq, &adapter->reset_task);
+ queue_work(adapter->wq, &adapter->reset_task);
goto reset_finish;
}
@@ -3116,7 +3126,7 @@ continue_reset:
bitmap_clear(adapter->vsi.active_cvlans, 0, VLAN_N_VID);
bitmap_clear(adapter->vsi.active_svlans, 0, VLAN_N_VID);
- mod_delayed_work(iavf_wq, &adapter->watchdog_task, 2);
+ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 2);
/* We were running when the reset started, so we need to restore some
* state here.
@@ -3208,7 +3218,7 @@ static void iavf_adminq_task(struct work_struct *work)
if (adapter->state == __IAVF_REMOVE)
return;
- queue_work(iavf_wq, &adapter->adminq_task);
+ queue_work(adapter->wq, &adapter->adminq_task);
goto out;
}
@@ -3232,24 +3242,6 @@ static void iavf_adminq_task(struct work_struct *work)
} while (pending);
mutex_unlock(&adapter->crit_lock);
- if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES)) {
- if (adapter->netdev_registered ||
- !test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) {
- struct net_device *netdev = adapter->netdev;
-
- rtnl_lock();
- netdev_update_features(netdev);
- rtnl_unlock();
- /* Request VLAN offload settings */
- if (VLAN_V2_ALLOWED(adapter))
- iavf_set_vlan_offload_features
- (adapter, 0, netdev->features);
-
- iavf_set_queue_vlan_tag_loc(adapter);
- }
-
- adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES;
- }
if ((adapter->flags &
(IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) ||
adapter->state == __IAVF_RESETTING)
@@ -4349,7 +4341,7 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
if (netif_running(netdev)) {
adapter->flags |= IAVF_FLAG_RESET_NEEDED;
- queue_work(iavf_wq, &adapter->reset_task);
+ queue_work(adapter->wq, &adapter->reset_task);
}
return 0;
@@ -4898,6 +4890,13 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw = &adapter->hw;
hw->back = adapter;
+ adapter->wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
+ iavf_driver_name);
+ if (!adapter->wq) {
+ err = -ENOMEM;
+ goto err_alloc_wq;
+ }
+
adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
iavf_change_state(adapter, __IAVF_STARTUP);
@@ -4942,7 +4941,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&adapter->adminq_task, iavf_adminq_task);
INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task);
INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task);
- queue_delayed_work(iavf_wq, &adapter->watchdog_task,
+ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
/* Setup the wait queue for indicating transition to down status */
@@ -4954,6 +4953,8 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
err_ioremap:
+ destroy_workqueue(adapter->wq);
+err_alloc_wq:
free_netdev(netdev);
err_alloc_etherdev:
pci_disable_pcie_error_reporting(pdev);
@@ -5023,7 +5024,7 @@ static int __maybe_unused iavf_resume(struct device *dev_d)
return err;
}
- queue_work(iavf_wq, &adapter->reset_task);
+ queue_work(adapter->wq, &adapter->reset_task);
netif_device_attach(adapter->netdev);
@@ -5170,6 +5171,8 @@ static void iavf_remove(struct pci_dev *pdev)
}
spin_unlock_bh(&adapter->adv_rss_lock);
+ destroy_workqueue(adapter->wq);
+
free_netdev(netdev);
pci_disable_pcie_error_reporting(pdev);
@@ -5196,24 +5199,11 @@ static struct pci_driver iavf_driver = {
**/
static int __init iavf_init_module(void)
{
- int ret;
-
pr_info("iavf: %s\n", iavf_driver_string);
pr_info("%s\n", iavf_copyright);
- iavf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1,
- iavf_driver_name);
- if (!iavf_wq) {
- pr_err("%s: Failed to create workqueue\n", iavf_driver_name);
- return -ENOMEM;
- }
-
- ret = pci_register_driver(&iavf_driver);
- if (ret)
- destroy_workqueue(iavf_wq);
-
- return ret;
+ return pci_register_driver(&iavf_driver);
}
module_init(iavf_init_module);
@@ -5227,7 +5217,6 @@ module_init(iavf_init_module);
static void __exit iavf_exit_module(void)
{
pci_unregister_driver(&iavf_driver);
- destroy_workqueue(iavf_wq);
}
module_exit(iavf_exit_module);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 24a701fd140e..365ca0c710c4 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -1952,7 +1952,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) {
adapter->flags |= IAVF_FLAG_RESET_PENDING;
dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
- queue_work(iavf_wq, &adapter->reset_task);
+ queue_work(adapter->wq, &adapter->reset_task);
}
break;
default:
@@ -2226,6 +2226,14 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
iavf_process_config(adapter);
adapter->flags |= IAVF_FLAG_SETUP_NETDEV_FEATURES;
+
+ /* Request VLAN offload settings */
+ if (VLAN_V2_ALLOWED(adapter))
+ iavf_set_vlan_offload_features(adapter, 0,
+ netdev->features);
+
+ iavf_set_queue_vlan_tag_loc(adapter);
+
was_mac_changed = !ether_addr_equal(netdev->dev_addr,
adapter->hw.mac.addr);
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 2f0b604abc5e..713069f809ec 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -880,7 +880,7 @@ void ice_set_ethtool_repr_ops(struct net_device *netdev);
void ice_set_ethtool_safe_mode_ops(struct net_device *netdev);
u16 ice_get_avail_txq_count(struct ice_pf *pf);
u16 ice_get_avail_rxq_count(struct ice_pf *pf);
-int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx);
+int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked);
void ice_update_vsi_stats(struct ice_vsi *vsi);
void ice_update_pf_stats(struct ice_pf *pf);
void
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index d02b55b6aa9c..3e08847505ce 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -5524,7 +5524,7 @@ bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
* returned by the firmware is a 16 bit * value, but is indexed
* by [fls(speed) - 1]
*/
-static const u32 ice_aq_to_link_speed[15] = {
+static const u32 ice_aq_to_link_speed[] = {
SPEED_10, /* BIT(0) */
SPEED_100,
SPEED_1000,
@@ -5536,10 +5536,6 @@ static const u32 ice_aq_to_link_speed[15] = {
SPEED_40000,
SPEED_50000,
SPEED_100000, /* BIT(10) */
- 0,
- 0,
- 0,
- 0 /* BIT(14) */
};
/**
@@ -5550,5 +5546,8 @@ static const u32 ice_aq_to_link_speed[15] = {
*/
u32 ice_get_link_speed(u16 index)
{
+ if (index >= ARRAY_SIZE(ice_aq_to_link_speed))
+ return 0;
+
return ice_aq_to_link_speed[index];
}
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index 4f24d441c35e..0a55c552189a 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -441,7 +441,7 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
goto out;
}
- ice_pf_dcb_recfg(pf);
+ ice_pf_dcb_recfg(pf, false);
out:
/* enable previously downed VSIs */
@@ -731,12 +731,13 @@ static int ice_dcb_noncontig_cfg(struct ice_pf *pf)
/**
* ice_pf_dcb_recfg - Reconfigure all VEBs and VSIs
* @pf: pointer to the PF struct
+ * @locked: is adev device lock held
*
* Assumed caller has already disabled all VSIs before
* calling this function. Reconfiguring DCB based on
* local_dcbx_cfg.
*/
-void ice_pf_dcb_recfg(struct ice_pf *pf)
+void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked)
{
struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
struct iidc_event *event;
@@ -783,14 +784,16 @@ void ice_pf_dcb_recfg(struct ice_pf *pf)
if (vsi->type == ICE_VSI_PF)
ice_dcbnl_set_all(vsi);
}
- /* Notify the AUX drivers that TC change is finished */
- event = kzalloc(sizeof(*event), GFP_KERNEL);
- if (!event)
- return;
+ if (!locked) {
+ /* Notify the AUX drivers that TC change is finished */
+ event = kzalloc(sizeof(*event), GFP_KERNEL);
+ if (!event)
+ return;
- set_bit(IIDC_EVENT_AFTER_TC_CHANGE, event->type);
- ice_send_event_to_aux(pf, event);
- kfree(event);
+ set_bit(IIDC_EVENT_AFTER_TC_CHANGE, event->type);
+ ice_send_event_to_aux(pf, event);
+ kfree(event);
+ }
}
/**
@@ -1044,7 +1047,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
}
/* changes in configuration update VSI */
- ice_pf_dcb_recfg(pf);
+ ice_pf_dcb_recfg(pf, false);
/* enable previously downed VSIs */
ice_dcb_ena_dis_vsi(pf, true, true);
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
index 4c421c842a13..800879a88c5e 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
@@ -23,7 +23,7 @@ u8 ice_dcb_get_tc(struct ice_vsi *vsi, int queue_index);
int
ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked);
int ice_dcb_bwchk(struct ice_pf *pf, struct ice_dcbx_cfg *dcbcfg);
-void ice_pf_dcb_recfg(struct ice_pf *pf);
+void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked);
void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi);
int ice_init_pf_dcb(struct ice_pf *pf, bool locked);
void ice_update_dcb_stats(struct ice_pf *pf);
@@ -128,7 +128,7 @@ static inline u8 ice_get_pfc_mode(struct ice_pf *pf)
return 0;
}
-static inline void ice_pf_dcb_recfg(struct ice_pf *pf) { }
+static inline void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked) { }
static inline void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi) { }
static inline void ice_update_dcb_stats(struct ice_pf *pf) { }
static inline void
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
index 8286e47b4bae..0fae0186bd85 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
@@ -899,7 +899,7 @@ static int ice_set_object_tx_priority(struct ice_port_info *pi, struct ice_sched
{
int status;
- if (node->tx_priority >= 8) {
+ if (priority >= 8) {
NL_SET_ERR_MSG_MOD(extack, "Priority should be less than 8");
return -EINVAL;
}
@@ -929,7 +929,7 @@ static int ice_set_object_tx_weight(struct ice_port_info *pi, struct ice_sched_n
{
int status;
- if (node->tx_weight > 200 || node->tx_weight < 1) {
+ if (weight > 200 || weight < 1) {
NL_SET_ERR_MSG_MOD(extack, "Weight must be between 1 and 200");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 4191994d8f3a..a359f1610fc1 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -3641,7 +3641,9 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
int new_rx = 0, new_tx = 0;
+ bool locked = false;
u32 curr_combined;
+ int ret = 0;
/* do not support changing channels in Safe Mode */
if (ice_is_safe_mode(pf)) {
@@ -3705,15 +3707,33 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
return -EINVAL;
}
- ice_vsi_recfg_qs(vsi, new_rx, new_tx);
+ if (pf->adev) {
+ mutex_lock(&pf->adev_mutex);
+ device_lock(&pf->adev->dev);
+ locked = true;
+ if (pf->adev->dev.driver) {
+ netdev_err(dev, "Cannot change channels when RDMA is active\n");
+ ret = -EBUSY;
+ goto adev_unlock;
+ }
+ }
+
+ ice_vsi_recfg_qs(vsi, new_rx, new_tx, locked);
- if (!netif_is_rxfh_configured(dev))
- return ice_vsi_set_dflt_rss_lut(vsi, new_rx);
+ if (!netif_is_rxfh_configured(dev)) {
+ ret = ice_vsi_set_dflt_rss_lut(vsi, new_rx);
+ goto adev_unlock;
+ }
/* Update rss_size due to change in Rx queues */
vsi->rss_size = ice_get_valid_rss_size(&pf->hw, new_rx);
- return 0;
+adev_unlock:
+ if (locked) {
+ device_unlock(&pf->adev->dev);
+ mutex_unlock(&pf->adev_mutex);
+ }
+ return ret;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 94aa834cd9a6..a596e07b3ce9 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -3235,9 +3235,6 @@ int ice_vsi_release(struct ice_vsi *vsi)
}
}
- if (vsi->type == ICE_VSI_PF)
- ice_devlink_destroy_pf_port(pf);
-
if (vsi->type == ICE_VSI_VF &&
vsi->agg_node && vsi->agg_node->valid)
vsi->agg_node->num_vsis--;
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index a9a7f8b52140..8ec24f6cf6be 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -275,6 +275,8 @@ static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m)
if (status && status != -EEXIST)
return status;
+ netdev_dbg(vsi->netdev, "set promisc filter bits for VSI %i: 0x%x\n",
+ vsi->vsi_num, promisc_m);
return 0;
}
@@ -300,6 +302,8 @@ static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m)
promisc_m, 0);
}
+ netdev_dbg(vsi->netdev, "clear promisc filter bits for VSI %i: 0x%x\n",
+ vsi->vsi_num, promisc_m);
return status;
}
@@ -414,6 +418,16 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
}
err = 0;
vlan_ops->dis_rx_filtering(vsi);
+
+ /* promiscuous mode implies allmulticast so
+ * that VSIs that are in promiscuous mode are
+ * subscribed to multicast packets coming to
+ * the port
+ */
+ err = ice_set_promisc(vsi,
+ ICE_MCAST_PROMISC_BITS);
+ if (err)
+ goto out_promisc;
}
} else {
/* Clear Rx filter to remove traffic from wire */
@@ -430,6 +444,18 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
NETIF_F_HW_VLAN_CTAG_FILTER)
vlan_ops->ena_rx_filtering(vsi);
}
+
+ /* disable allmulti here, but only if allmulti is not
+ * still enabled for the netdev
+ */
+ if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) {
+ err = ice_clear_promisc(vsi,
+ ICE_MCAST_PROMISC_BITS);
+ if (err) {
+ netdev_err(netdev, "Error %d clearing multicast promiscuous on VSI %i\n",
+ err, vsi->vsi_num);
+ }
+ }
}
}
goto exit;
@@ -4195,12 +4221,13 @@ bool ice_is_wol_supported(struct ice_hw *hw)
* @vsi: VSI being changed
* @new_rx: new number of Rx queues
* @new_tx: new number of Tx queues
+ * @locked: is adev device_lock held
*
* Only change the number of queues if new_tx, or new_rx is non-0.
*
* Returns 0 on success.
*/
-int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx)
+int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked)
{
struct ice_pf *pf = vsi->back;
int err = 0, timeout = 50;
@@ -4229,7 +4256,7 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx)
ice_vsi_close(vsi);
ice_vsi_rebuild(vsi, false);
- ice_pf_dcb_recfg(pf);
+ ice_pf_dcb_recfg(pf, locked);
ice_vsi_open(vsi);
done:
clear_bit(ICE_CFG_BUSY, pf->state);
@@ -4590,7 +4617,7 @@ static void ice_print_wake_reason(struct ice_pf *pf)
}
/**
- * ice_register_netdev - register netdev and devlink port
+ * ice_register_netdev - register netdev
* @pf: pointer to the PF struct
*/
static int ice_register_netdev(struct ice_pf *pf)
@@ -4602,11 +4629,6 @@ static int ice_register_netdev(struct ice_pf *pf)
if (!vsi || !vsi->netdev)
return -EIO;
- err = ice_devlink_create_pf_port(pf);
- if (err)
- goto err_devlink_create;
-
- SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
err = register_netdev(vsi->netdev);
if (err)
goto err_register_netdev;
@@ -4617,8 +4639,6 @@ static int ice_register_netdev(struct ice_pf *pf)
return 0;
err_register_netdev:
- ice_devlink_destroy_pf_port(pf);
-err_devlink_create:
free_netdev(vsi->netdev);
vsi->netdev = NULL;
clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
@@ -4636,6 +4656,7 @@ static int
ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
{
struct device *dev = &pdev->dev;
+ struct ice_vsi *vsi;
struct ice_pf *pf;
struct ice_hw *hw;
int i, err;
@@ -4918,6 +4939,18 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
pcie_print_link_status(pf->pdev);
probe_done:
+ err = ice_devlink_create_pf_port(pf);
+ if (err)
+ goto err_create_pf_port;
+
+ vsi = ice_get_main_vsi(pf);
+ if (!vsi || !vsi->netdev) {
+ err = -EINVAL;
+ goto err_netdev_reg;
+ }
+
+ SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
+
err = ice_register_netdev(pf);
if (err)
goto err_netdev_reg;
@@ -4955,6 +4988,8 @@ err_init_aux_unroll:
err_devlink_reg_param:
ice_devlink_unregister_params(pf);
err_netdev_reg:
+ ice_devlink_destroy_pf_port(pf);
+err_create_pf_port:
err_send_version_unroll:
ice_vsi_release_all(pf);
err_alloc_sw_unroll:
@@ -5083,6 +5118,7 @@ static void ice_remove(struct pci_dev *pdev)
ice_setup_mc_magic_wake(pf);
ice_vsi_release_all(pf);
mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
+ ice_devlink_destroy_pf_port(pf);
ice_set_wake(pf);
ice_free_irq_msix_misc(pf);
ice_for_each_vsi(pf, i) {
@@ -5531,7 +5567,7 @@ static int __init ice_module_init(void)
pr_info("%s\n", ice_driver_string);
pr_info("%s\n", ice_copyright);
- ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME);
+ ice_wq = alloc_workqueue("%s", 0, 0, KBUILD_MODNAME);
if (!ice_wq) {
pr_err("Failed to create workqueue\n");
return -ENOMEM;
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 9b762f7972ce..61f844d22512 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -5420,7 +5420,7 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
*/
status = ice_add_special_words(rinfo, lkup_exts, ice_is_dvm_ena(hw));
if (status)
- goto err_free_lkup_exts;
+ goto err_unroll;
/* Group match words into recipes using preferred recipe grouping
* criteria.
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
index faba0f857cd9..95f392ab9670 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -1681,7 +1681,7 @@ ice_tc_forward_to_queue(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr,
struct ice_vsi *ch_vsi = NULL;
u16 queue = act->rx_queue;
- if (queue > vsi->num_rxq) {
+ if (queue >= vsi->num_rxq) {
NL_SET_ERR_MSG_MOD(fltr->extack,
"Unable to add filter because specified queue is invalid");
return -EINVAL;
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_mbx.c b/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
index d4a4001b6e5d..f56fa94ff3d0 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
@@ -39,7 +39,7 @@ ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd);
}
-static const u32 ice_legacy_aq_to_vc_speed[15] = {
+static const u32 ice_legacy_aq_to_vc_speed[] = {
VIRTCHNL_LINK_SPEED_100MB, /* BIT(0) */
VIRTCHNL_LINK_SPEED_100MB,
VIRTCHNL_LINK_SPEED_1GB,
@@ -51,10 +51,6 @@ static const u32 ice_legacy_aq_to_vc_speed[15] = {
VIRTCHNL_LINK_SPEED_40GB,
VIRTCHNL_LINK_SPEED_40GB,
VIRTCHNL_LINK_SPEED_40GB,
- VIRTCHNL_LINK_SPEED_UNKNOWN,
- VIRTCHNL_LINK_SPEED_UNKNOWN,
- VIRTCHNL_LINK_SPEED_UNKNOWN,
- VIRTCHNL_LINK_SPEED_UNKNOWN /* BIT(14) */
};
/**
@@ -71,21 +67,20 @@ static const u32 ice_legacy_aq_to_vc_speed[15] = {
*/
u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed)
{
- u32 speed;
+ /* convert a BIT() value into an array index */
+ u32 index = fls(link_speed) - 1;
- if (adv_link_support) {
- /* convert a BIT() value into an array index */
- speed = ice_get_link_speed(fls(link_speed) - 1);
- } else {
+ if (adv_link_support)
+ return ice_get_link_speed(index);
+ else if (index < ARRAY_SIZE(ice_legacy_aq_to_vc_speed))
/* Virtchnl speeds are not defined for every speed supported in
* the hardware. To maintain compatibility with older AVF
* drivers, while reporting the speed the new speed values are
* resolved to the closest known virtchnl speeds
*/
- speed = ice_legacy_aq_to_vc_speed[fls(link_speed) - 1];
- }
+ return ice_legacy_aq_to_vc_speed[index];
- return speed;
+ return VIRTCHNL_LINK_SPEED_UNKNOWN;
}
/* The mailbox overflow detection algorithm helps to check if there
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
index 5ecc0ee9a78e..b1ffb81893d4 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
@@ -44,13 +44,17 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi)
/* outer VLAN ops regardless of port VLAN config */
vlan_ops->add_vlan = ice_vsi_add_vlan;
- vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering;
vlan_ops->ena_tx_filtering = ice_vsi_ena_tx_vlan_filtering;
vlan_ops->dis_tx_filtering = ice_vsi_dis_tx_vlan_filtering;
if (ice_vf_is_port_vlan_ena(vf)) {
/* setup outer VLAN ops */
vlan_ops->set_port_vlan = ice_vsi_set_outer_port_vlan;
+ /* all Rx traffic should be in the domain of the
+ * assigned port VLAN, so prevent disabling Rx VLAN
+ * filtering
+ */
+ vlan_ops->dis_rx_filtering = noop_vlan;
vlan_ops->ena_rx_filtering =
ice_vsi_ena_rx_vlan_filtering;
@@ -63,6 +67,9 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi)
vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion;
vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion;
} else {
+ vlan_ops->dis_rx_filtering =
+ ice_vsi_dis_rx_vlan_filtering;
+
if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags))
vlan_ops->ena_rx_filtering = noop_vlan;
else
@@ -96,7 +103,14 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi)
vlan_ops->set_port_vlan = ice_vsi_set_inner_port_vlan;
vlan_ops->ena_rx_filtering =
ice_vsi_ena_rx_vlan_filtering;
+ /* all Rx traffic should be in the domain of the
+ * assigned port VLAN, so prevent disabling Rx VLAN
+ * filtering
+ */
+ vlan_ops->dis_rx_filtering = noop_vlan;
} else {
+ vlan_ops->dis_rx_filtering =
+ ice_vsi_dis_rx_vlan_filtering;
if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags))
vlan_ops->ena_rx_filtering = noop_vlan;
else
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 7105de6fb344..374b7f10b549 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -800,6 +800,7 @@ static void ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
struct ice_tx_desc *tx_desc;
u16 cnt = xdp_ring->count;
struct ice_tx_buf *tx_buf;
+ u16 completed_frames = 0;
u16 xsk_frames = 0;
u16 last_rs;
int i;
@@ -809,19 +810,21 @@ static void ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
if ((tx_desc->cmd_type_offset_bsz &
cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) {
if (last_rs >= ntc)
- xsk_frames = last_rs - ntc + 1;
+ completed_frames = last_rs - ntc + 1;
else
- xsk_frames = last_rs + cnt - ntc + 1;
+ completed_frames = last_rs + cnt - ntc + 1;
}
- if (!xsk_frames)
+ if (!completed_frames)
return;
- if (likely(!xdp_ring->xdp_tx_active))
+ if (likely(!xdp_ring->xdp_tx_active)) {
+ xsk_frames = completed_frames;
goto skip;
+ }
ntc = xdp_ring->next_to_clean;
- for (i = 0; i < xsk_frames; i++) {
+ for (i = 0; i < completed_frames; i++) {
tx_buf = &xdp_ring->tx_buf[ntc];
if (tx_buf->raw_buf) {
@@ -837,7 +840,7 @@ static void ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
}
skip:
tx_desc->cmd_type_offset_bsz = 0;
- xdp_ring->next_to_clean += xsk_frames;
+ xdp_ring->next_to_clean += completed_frames;
if (xdp_ring->next_to_clean >= cnt)
xdp_ring->next_to_clean -= cnt;
if (xsk_frames)
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 3c0c35ecea10..b5b443883da9 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -2256,6 +2256,30 @@ static void igb_enable_mas(struct igb_adapter *adapter)
}
}
+#ifdef CONFIG_IGB_HWMON
+/**
+ * igb_set_i2c_bb - Init I2C interface
+ * @hw: pointer to hardware structure
+ **/
+static void igb_set_i2c_bb(struct e1000_hw *hw)
+{
+ u32 ctrl_ext;
+ s32 i2cctl;
+
+ ctrl_ext = rd32(E1000_CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_I2C_ENA;
+ wr32(E1000_CTRL_EXT, ctrl_ext);
+ wrfl();
+
+ i2cctl = rd32(E1000_I2CPARAMS);
+ i2cctl |= E1000_I2CBB_EN
+ | E1000_I2C_CLK_OE_N
+ | E1000_I2C_DATA_OE_N;
+ wr32(E1000_I2CPARAMS, i2cctl);
+ wrfl();
+}
+#endif
+
void igb_reset(struct igb_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
@@ -2400,7 +2424,8 @@ void igb_reset(struct igb_adapter *adapter)
* interface.
*/
if (adapter->ets)
- mac->ops.init_thermal_sensor_thresh(hw);
+ igb_set_i2c_bb(hw);
+ mac->ops.init_thermal_sensor_thresh(hw);
}
}
#endif
@@ -3117,21 +3142,12 @@ static void igb_init_mas(struct igb_adapter *adapter)
**/
static s32 igb_init_i2c(struct igb_adapter *adapter)
{
- struct e1000_hw *hw = &adapter->hw;
s32 status = 0;
- s32 i2cctl;
/* I2C interface supported on i350 devices */
if (adapter->hw.mac.type != e1000_i350)
return 0;
- i2cctl = rd32(E1000_I2CPARAMS);
- i2cctl |= E1000_I2CBB_EN
- | E1000_I2C_CLK_OUT | E1000_I2C_CLK_OE_N
- | E1000_I2C_DATA_OUT | E1000_I2C_DATA_OE_N;
- wr32(E1000_I2CPARAMS, i2cctl);
- wrfl();
-
/* Initialize the i2c bus which is controlled by the registers.
* This bus will use the i2c_algo_bit structure that implements
* the protocol through toggling of the 4 bits in the register.
@@ -3521,6 +3537,12 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->ets = true;
else
adapter->ets = false;
+ /* Only enable I2C bit banging if an external thermal
+ * sensor is supported.
+ */
+ if (adapter->ets)
+ igb_set_i2c_bb(hw);
+ hw->mac.ops.init_thermal_sensor_thresh(hw);
if (igb_sysfs_init(adapter))
dev_err(&pdev->dev,
"failed to allocate sysfs resources\n");
@@ -6794,7 +6816,7 @@ static void igb_perout(struct igb_adapter *adapter, int tsintr_tt)
struct timespec64 ts;
u32 tsauxc;
- if (pin < 0 || pin >= IGB_N_PEROUT)
+ if (pin < 0 || pin >= IGB_N_SDP)
return;
spin_lock(&adapter->tmreg_lock);
@@ -6802,7 +6824,7 @@ static void igb_perout(struct igb_adapter *adapter, int tsintr_tt)
if (hw->mac.type == e1000_82580 ||
hw->mac.type == e1000_i354 ||
hw->mac.type == e1000_i350) {
- s64 ns = timespec64_to_ns(&adapter->perout[pin].period);
+ s64 ns = timespec64_to_ns(&adapter->perout[tsintr_tt].period);
u32 systiml, systimh, level_mask, level, rem;
u64 systim, now;
@@ -6850,8 +6872,8 @@ static void igb_perout(struct igb_adapter *adapter, int tsintr_tt)
ts.tv_nsec = (u32)systim;
ts.tv_sec = ((u32)(systim >> 32)) & 0xFF;
} else {
- ts = timespec64_add(adapter->perout[pin].start,
- adapter->perout[pin].period);
+ ts = timespec64_add(adapter->perout[tsintr_tt].start,
+ adapter->perout[tsintr_tt].period);
}
/* u32 conversion of tv_sec is safe until y2106 */
@@ -6860,7 +6882,7 @@ static void igb_perout(struct igb_adapter *adapter, int tsintr_tt)
tsauxc = rd32(E1000_TSAUXC);
tsauxc |= TSAUXC_EN_TT0;
wr32(E1000_TSAUXC, tsauxc);
- adapter->perout[pin].start = ts;
+ adapter->perout[tsintr_tt].start = ts;
spin_unlock(&adapter->tmreg_lock);
}
@@ -6874,7 +6896,7 @@ static void igb_extts(struct igb_adapter *adapter, int tsintr_tt)
struct ptp_clock_event event;
struct timespec64 ts;
- if (pin < 0 || pin >= IGB_N_EXTTS)
+ if (pin < 0 || pin >= IGB_N_SDP)
return;
if (hw->mac.type == e1000_82580 ||
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 44b1740dc098..1dd2a7fee8d4 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -2942,7 +2942,9 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
if (tx_buffer->next_to_watch &&
time_after(jiffies, tx_buffer->time_stamp +
(adapter->tx_timeout_factor * HZ)) &&
- !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF)) {
+ !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF) &&
+ (rd32(IGC_TDH(tx_ring->reg_idx)) !=
+ readl(tx_ring->tail))) {
/* detected Tx unit hang */
netdev_err(tx_ring->netdev,
"Detected Tx Unit Hang\n"
@@ -5069,6 +5071,24 @@ static int igc_change_mtu(struct net_device *netdev, int new_mtu)
}
/**
+ * igc_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ * @txqueue: queue number that timed out
+ **/
+static void igc_tx_timeout(struct net_device *netdev,
+ unsigned int __always_unused txqueue)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_hw *hw = &adapter->hw;
+
+ /* Do the reset outside of interrupt context */
+ adapter->tx_timeout_count++;
+ schedule_work(&adapter->reset_task);
+ wr32(IGC_EICS,
+ (adapter->eims_enable_mask & ~adapter->eims_other));
+}
+
+/**
* igc_get_stats64 - Get System Network Statistics
* @netdev: network interface device structure
* @stats: rtnl_link_stats64 pointer
@@ -5495,7 +5515,7 @@ static void igc_watchdog_task(struct work_struct *work)
case SPEED_100:
case SPEED_1000:
case SPEED_2500:
- adapter->tx_timeout_factor = 7;
+ adapter->tx_timeout_factor = 1;
break;
}
@@ -6320,6 +6340,7 @@ static const struct net_device_ops igc_netdev_ops = {
.ndo_set_rx_mode = igc_set_rx_mode,
.ndo_set_mac_address = igc_set_mac,
.ndo_change_mtu = igc_change_mtu,
+ .ndo_tx_timeout = igc_tx_timeout,
.ndo_get_stats64 = igc_get_stats64,
.ndo_fix_features = igc_fix_features,
.ndo_set_features = igc_set_features,
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index c34734d432e0..4e10ced736db 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -417,10 +417,12 @@ static int igc_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
*
* We need to convert the system time value stored in the RX/TXSTMP registers
* into a hwtstamp which can be used by the upper level timestamping functions.
+ *
+ * Returns 0 on success.
**/
-static void igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter,
- struct skb_shared_hwtstamps *hwtstamps,
- u64 systim)
+static int igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter,
+ struct skb_shared_hwtstamps *hwtstamps,
+ u64 systim)
{
switch (adapter->hw.mac.type) {
case igc_i225:
@@ -430,8 +432,9 @@ static void igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter,
systim & 0xFFFFFFFF);
break;
default:
- break;
+ return -EINVAL;
}
+ return 0;
}
/**
@@ -652,7 +655,8 @@ static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter)
regval = rd32(IGC_TXSTMPL);
regval |= (u64)rd32(IGC_TXSTMPH) << 32;
- igc_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
+ if (igc_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval))
+ return;
switch (adapter->link_speed) {
case SPEED_10:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index bc68b8f2176d..8736ca4b2628 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -73,6 +73,8 @@
#define IXGBE_RXBUFFER_4K 4096
#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */
+#define IXGBE_PKT_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
+
/* Attempt to maximize the headroom available for incoming frames. We
* use a 2K buffer for receives and need 1536/1534 to store the data for
* the frame. This leaves us with 512 bytes of room. From that we need
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index ab8370c413f3..4507fba8747a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -6778,6 +6778,18 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_max_xdp_frame_size - returns the maximum allowed frame size for XDP
+ * @adapter: device handle, pointer to adapter
+ */
+static int ixgbe_max_xdp_frame_size(struct ixgbe_adapter *adapter)
+{
+ if (PAGE_SIZE >= 8192 || adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
+ return IXGBE_RXBUFFER_2K;
+ else
+ return IXGBE_RXBUFFER_3K;
+}
+
+/**
* ixgbe_change_mtu - Change the Maximum Transfer Unit
* @netdev: network interface device structure
* @new_mtu: new value for maximum frame size
@@ -6788,18 +6800,12 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- if (adapter->xdp_prog) {
- int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN +
- VLAN_HLEN;
- int i;
-
- for (i = 0; i < adapter->num_rx_queues; i++) {
- struct ixgbe_ring *ring = adapter->rx_ring[i];
+ if (ixgbe_enabled_xdp_adapter(adapter)) {
+ int new_frame_size = new_mtu + IXGBE_PKT_HDR_PAD;
- if (new_frame_size > ixgbe_rx_bufsz(ring)) {
- e_warn(probe, "Requested MTU size is not supported with XDP\n");
- return -EINVAL;
- }
+ if (new_frame_size > ixgbe_max_xdp_frame_size(adapter)) {
+ e_warn(probe, "Requested MTU size is not supported with XDP\n");
+ return -EINVAL;
}
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
index bda1a6fa2ec4..e4407f09c9d3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -1500,6 +1500,9 @@ static const struct devlink_param rvu_af_dl_params[] = {
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
rvu_af_dl_dwrr_mtu_get, rvu_af_dl_dwrr_mtu_set,
rvu_af_dl_dwrr_mtu_validate),
+};
+
+static const struct devlink_param rvu_af_dl_param_exact_match[] = {
DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
"npc_exact_feature_disable", DEVLINK_PARAM_TYPE_STRING,
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
@@ -1556,7 +1559,6 @@ int rvu_register_dl(struct rvu *rvu)
{
struct rvu_devlink *rvu_dl;
struct devlink *dl;
- size_t size;
int err;
dl = devlink_alloc(&rvu_devlink_ops, sizeof(struct rvu_devlink),
@@ -1578,21 +1580,32 @@ int rvu_register_dl(struct rvu *rvu)
goto err_dl_health;
}
+ err = devlink_params_register(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
+ if (err) {
+ dev_err(rvu->dev,
+ "devlink params register failed with error %d", err);
+ goto err_dl_health;
+ }
+
/* Register exact match devlink only for CN10K-B */
- size = ARRAY_SIZE(rvu_af_dl_params);
if (!rvu_npc_exact_has_match_table(rvu))
- size -= 1;
+ goto done;
- err = devlink_params_register(dl, rvu_af_dl_params, size);
+ err = devlink_params_register(dl, rvu_af_dl_param_exact_match,
+ ARRAY_SIZE(rvu_af_dl_param_exact_match));
if (err) {
dev_err(rvu->dev,
- "devlink params register failed with error %d", err);
- goto err_dl_health;
+ "devlink exact match params register failed with error %d", err);
+ goto err_dl_exact_match;
}
+done:
devlink_register(dl);
return 0;
+err_dl_exact_match:
+ devlink_params_unregister(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
+
err_dl_health:
rvu_health_reporters_destroy(rvu);
devlink_free(dl);
@@ -1605,8 +1618,14 @@ void rvu_unregister_dl(struct rvu *rvu)
struct devlink *dl = rvu_dl->dl;
devlink_unregister(dl);
- devlink_params_unregister(dl, rvu_af_dl_params,
- ARRAY_SIZE(rvu_af_dl_params));
+
+ devlink_params_unregister(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
+
+ /* Unregister exact match devlink only for CN10K-B */
+ if (rvu_npc_exact_has_match_table(rvu))
+ devlink_params_unregister(dl, rvu_af_dl_param_exact_match,
+ ARRAY_SIZE(rvu_af_dl_param_exact_match));
+
rvu_health_reporters_destroy(rvu);
devlink_free(dl);
}
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index e3de9a53b2d9..e3123723522e 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1570,8 +1570,8 @@ static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
if (IS_ERR(pp))
return pp;
- err = __xdp_rxq_info_reg(xdp_q, &eth->dummy_dev, eth->rx_napi.napi_id,
- id, PAGE_SIZE);
+ err = __xdp_rxq_info_reg(xdp_q, &eth->dummy_dev, id,
+ eth->rx_napi.napi_id, PAGE_SIZE);
if (err < 0)
goto err_free_pp;
@@ -1870,7 +1870,9 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
while (done < budget) {
unsigned int pktlen, *rxdcsum;
+ bool has_hwaccel_tag = false;
struct net_device *netdev;
+ u16 vlan_proto, vlan_tci;
dma_addr_t dma_addr;
u32 hash, reason;
int mac = 0;
@@ -2010,27 +2012,29 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
- if (trxd.rxd3 & RX_DMA_VTAG_V2)
- __vlan_hwaccel_put_tag(skb,
- htons(RX_DMA_VPID(trxd.rxd4)),
- RX_DMA_VID(trxd.rxd4));
+ if (trxd.rxd3 & RX_DMA_VTAG_V2) {
+ vlan_proto = RX_DMA_VPID(trxd.rxd4);
+ vlan_tci = RX_DMA_VID(trxd.rxd4);
+ has_hwaccel_tag = true;
+ }
} else if (trxd.rxd2 & RX_DMA_VTAG) {
- __vlan_hwaccel_put_tag(skb, htons(RX_DMA_VPID(trxd.rxd3)),
- RX_DMA_VID(trxd.rxd3));
+ vlan_proto = RX_DMA_VPID(trxd.rxd3);
+ vlan_tci = RX_DMA_VID(trxd.rxd3);
+ has_hwaccel_tag = true;
}
}
/* When using VLAN untagging in combination with DSA, the
* hardware treats the MTK special tag as a VLAN and untags it.
*/
- if (skb_vlan_tag_present(skb) && netdev_uses_dsa(netdev)) {
- unsigned int port = ntohs(skb->vlan_proto) & GENMASK(2, 0);
+ if (has_hwaccel_tag && netdev_uses_dsa(netdev)) {
+ unsigned int port = vlan_proto & GENMASK(2, 0);
if (port < ARRAY_SIZE(eth->dsa_meta) &&
eth->dsa_meta[port])
skb_dst_set_noref(skb, &eth->dsa_meta[port]->dst);
-
- __vlan_hwaccel_clear_tag(skb);
+ } else if (has_hwaccel_tag) {
+ __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vlan_tci);
}
skb_record_rx_queue(skb, 0);
@@ -3111,7 +3115,7 @@ static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
val |= config;
- if (!i && eth->netdev[0] && netdev_uses_dsa(eth->netdev[0]))
+ if (eth->netdev[i] && netdev_uses_dsa(eth->netdev[i]))
val |= MTK_GDMA_SPECIAL_TAG;
mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 18a50529ce7b..2d9186d32bc0 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -519,7 +519,7 @@
#define SGMII_SPEED_10 FIELD_PREP(SGMII_SPEED_MASK, 0)
#define SGMII_SPEED_100 FIELD_PREP(SGMII_SPEED_MASK, 1)
#define SGMII_SPEED_1000 FIELD_PREP(SGMII_SPEED_MASK, 2)
-#define SGMII_DUPLEX_FULL BIT(4)
+#define SGMII_DUPLEX_HALF BIT(4)
#define SGMII_IF_MODE_BIT5 BIT(5)
#define SGMII_REMOTE_FAULT_DIS BIT(8)
#define SGMII_CODE_SYNC_SET_VAL BIT(9)
@@ -1036,11 +1036,13 @@ struct mtk_soc_data {
* @regmap: The register map pointing at the range used to setup
* SGMII modes
* @ana_rgc3: The offset refers to register ANA_RGC3 related to regmap
+ * @interface: Currently configured interface mode
* @pcs: Phylink PCS structure
*/
struct mtk_pcs {
struct regmap *regmap;
u32 ana_rgc3;
+ phy_interface_t interface;
struct phylink_pcs pcs;
};
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
index 269208a841c7..1ff024f42444 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
@@ -615,8 +615,7 @@ mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
u32 ib1_mask = mtk_get_ib1_pkt_type_mask(ppe->eth) | MTK_FOE_IB1_UDP;
int type;
- flow_info = kzalloc(offsetof(struct mtk_flow_entry, l2_data.end),
- GFP_ATOMIC);
+ flow_info = kzalloc(sizeof(*flow_info), GFP_ATOMIC);
if (!flow_info)
return;
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h
index ea64fac1d425..b5e432031340 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
@@ -279,7 +279,6 @@ struct mtk_flow_entry {
struct {
struct mtk_flow_entry *base_flow;
struct hlist_node list;
- struct {} end;
} l2_data;
};
struct rhash_head node;
diff --git a/drivers/net/ethernet/mediatek/mtk_sgmii.c b/drivers/net/ethernet/mediatek/mtk_sgmii.c
index 5c286f2c9418..bb00de1003ac 100644
--- a/drivers/net/ethernet/mediatek/mtk_sgmii.c
+++ b/drivers/net/ethernet/mediatek/mtk_sgmii.c
@@ -43,11 +43,6 @@ static int mtk_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
int advertise, link_timer;
bool changed, use_an;
- if (interface == PHY_INTERFACE_MODE_2500BASEX)
- rgc3 = RG_PHY_SPEED_3_125G;
- else
- rgc3 = 0;
-
advertise = phylink_mii_c22_pcs_encode_advertisement(interface,
advertising);
if (advertise < 0)
@@ -88,9 +83,22 @@ static int mtk_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
bmcr = 0;
}
- /* Configure the underlying interface speed */
- regmap_update_bits(mpcs->regmap, mpcs->ana_rgc3,
- RG_PHY_SPEED_3_125G, rgc3);
+ if (mpcs->interface != interface) {
+ /* PHYA power down */
+ regmap_update_bits(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL,
+ SGMII_PHYA_PWD, SGMII_PHYA_PWD);
+
+ if (interface == PHY_INTERFACE_MODE_2500BASEX)
+ rgc3 = RG_PHY_SPEED_3_125G;
+ else
+ rgc3 = 0;
+
+ /* Configure the underlying interface speed */
+ regmap_update_bits(mpcs->regmap, mpcs->ana_rgc3,
+ RG_PHY_SPEED_3_125G, rgc3);
+
+ mpcs->interface = interface;
+ }
/* Update the advertisement, noting whether it has changed */
regmap_update_bits_check(mpcs->regmap, SGMSYS_PCS_ADVERTISE,
@@ -108,9 +116,17 @@ static int mtk_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
regmap_update_bits(mpcs->regmap, SGMSYS_PCS_CONTROL_1,
SGMII_AN_RESTART | SGMII_AN_ENABLE, bmcr);
- /* Release PHYA power down state */
- regmap_update_bits(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL,
- SGMII_PHYA_PWD, 0);
+ /* Release PHYA power down state
+ * Only removing bit SGMII_PHYA_PWD isn't enough.
+ * There are cases when the SGMII_PHYA_PWD register contains 0x9 which
+ * prevents SGMII from working. The SGMII still shows link but no traffic
+ * can flow. Writing 0x0 to the PHYA_PWD register fix the issue. 0x0 was
+ * taken from a good working state of the SGMII interface.
+ * Unknown how much the QPHY needs but it is racy without a sleep.
+ * Tested on mt7622 & mt7986.
+ */
+ usleep_range(50, 100);
+ regmap_write(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, 0);
return changed;
}
@@ -138,11 +154,11 @@ static void mtk_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
else
sgm_mode = SGMII_SPEED_1000;
- if (duplex == DUPLEX_FULL)
- sgm_mode |= SGMII_DUPLEX_FULL;
+ if (duplex != DUPLEX_FULL)
+ sgm_mode |= SGMII_DUPLEX_HALF;
regmap_update_bits(mpcs->regmap, SGMSYS_SGMII_MODE,
- SGMII_DUPLEX_FULL | SGMII_SPEED_MASK,
+ SGMII_DUPLEX_HALF | SGMII_SPEED_MASK,
sgm_mode);
}
}
@@ -171,6 +187,8 @@ int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *r, u32 ana_rgc3)
return PTR_ERR(ss->pcs[i].regmap);
ss->pcs[i].pcs.ops = &mtk_pcs_ops;
+ ss->pcs[i].pcs.poll = true;
+ ss->pcs[i].interface = PHY_INTERFACE_MODE_NA;
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
index 3e232a65a0c3..bb95b40d25eb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
@@ -245,8 +245,9 @@ void mlx5_pages_debugfs_init(struct mlx5_core_dev *dev)
pages = dev->priv.dbg.pages_debugfs;
debugfs_create_u32("fw_pages_total", 0400, pages, &dev->priv.fw_pages);
- debugfs_create_u32("fw_pages_vfs", 0400, pages, &dev->priv.vfs_pages);
- debugfs_create_u32("fw_pages_host_pf", 0400, pages, &dev->priv.host_pf_pages);
+ debugfs_create_u32("fw_pages_vfs", 0400, pages, &dev->priv.page_counters[MLX5_VF]);
+ debugfs_create_u32("fw_pages_sfs", 0400, pages, &dev->priv.page_counters[MLX5_SF]);
+ debugfs_create_u32("fw_pages_host_pf", 0400, pages, &dev->priv.page_counters[MLX5_HOST_PF]);
debugfs_create_u32("fw_pages_alloc_failed", 0400, pages, &dev->priv.fw_pages_alloc_failed);
debugfs_create_u32("fw_pages_give_dropped", 0400, pages, &dev->priv.give_pages_dropped);
debugfs_create_u32("fw_pages_reclaim_discard", 0400, pages,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index 21831386b26e..5b05b884b5fb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -64,6 +64,7 @@ static int mlx5_query_mtrc_caps(struct mlx5_fw_tracer *tracer)
MLX5_GET(mtrc_cap, out, num_string_trace);
tracer->str_db.num_string_db = MLX5_GET(mtrc_cap, out, num_string_db);
tracer->owner = !!MLX5_GET(mtrc_cap, out, trace_owner);
+ tracer->str_db.loaded = false;
for (i = 0; i < tracer->str_db.num_string_db; i++) {
mtrc_cap_sp = MLX5_ADDR_OF(mtrc_cap, out, string_db_param[i]);
@@ -756,6 +757,7 @@ static int mlx5_fw_tracer_set_mtrc_conf(struct mlx5_fw_tracer *tracer)
if (err)
mlx5_core_warn(dev, "FWTracer: Failed to set tracer configurations %d\n", err);
+ tracer->buff.consumer_index = 0;
return err;
}
@@ -820,7 +822,6 @@ static void mlx5_fw_tracer_ownership_change(struct work_struct *work)
mlx5_core_dbg(tracer->dev, "FWTracer: ownership changed, current=(%d)\n", tracer->owner);
if (tracer->owner) {
tracer->owner = false;
- tracer->buff.consumer_index = 0;
return;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
index 464eb3a18450..cdc87ecae5d3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
@@ -87,7 +87,7 @@ void mlx5_ec_cleanup(struct mlx5_core_dev *dev)
mlx5_host_pf_cleanup(dev);
- err = mlx5_wait_for_pages(dev, &dev->priv.host_pf_pages);
+ err = mlx5_wait_for_pages(dev, &dev->priv.page_counters[MLX5_HOST_PF]);
if (err)
mlx5_core_warn(dev, "Timeout reclaiming external host PF pages err(%d)\n", err);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
index 8099a21e674c..ce85b48d327d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
@@ -438,10 +438,6 @@ static int mlx5_esw_bridge_switchdev_event(struct notifier_block *nb,
switch (event) {
case SWITCHDEV_FDB_ADD_TO_BRIDGE:
- /* only handle the event on native eswtich of representor */
- if (!mlx5_esw_bridge_is_local(dev, rep, esw))
- break;
-
fdb_info = container_of(info,
struct switchdev_notifier_fdb_info,
info);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 1892ccb889b3..7cd36f4ac3ef 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -443,7 +443,7 @@ void mlx5e_enable_cvlan_filter(struct mlx5e_flow_steering *fs, bool promisc)
void mlx5e_disable_cvlan_filter(struct mlx5e_flow_steering *fs, bool promisc)
{
- if (fs->vlan->cvlan_filter_disabled)
+ if (!fs->vlan || fs->vlan->cvlan_filter_disabled)
return;
fs->vlan->cvlan_filter_disabled = true;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index abcc614b6191..6c24f33a5ea5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -591,7 +591,8 @@ static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param
rq->ix = c->ix;
rq->channel = c;
rq->mdev = mdev;
- rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
+ rq->hw_mtu =
+ MLX5E_SW2HW_MTU(params, params->sw_mtu) - ETH_FCS_LEN * !params->scatter_fcs_en;
rq->xdpsq = &c->rq_xdpsq;
rq->stats = &c->priv->channel_stats[c->ix]->rq;
rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev);
@@ -1014,35 +1015,6 @@ int mlx5e_flush_rq(struct mlx5e_rq *rq, int curr_state)
return mlx5e_rq_to_ready(rq, curr_state);
}
-static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable)
-{
- struct mlx5_core_dev *mdev = rq->mdev;
-
- void *in;
- void *rqc;
- int inlen;
- int err;
-
- inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
- in = kvzalloc(inlen, GFP_KERNEL);
- if (!in)
- return -ENOMEM;
-
- rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
-
- MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
- MLX5_SET64(modify_rq_in, in, modify_bitmask,
- MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS);
- MLX5_SET(rqc, rqc, scatter_fcs, enable);
- MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
-
- err = mlx5_core_modify_rq(mdev, rq->rqn, in);
-
- kvfree(in);
-
- return err;
-}
-
static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
{
struct mlx5_core_dev *mdev = rq->mdev;
@@ -3314,20 +3286,6 @@ static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
mlx5e_destroy_tises(priv);
}
-static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels *chs, bool enable)
-{
- int err = 0;
- int i;
-
- for (i = 0; i < chs->num; i++) {
- err = mlx5e_modify_rq_scatter_fcs(&chs->c[i]->rq, enable);
- if (err)
- return err;
- }
-
- return 0;
-}
-
static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
{
int err;
@@ -3903,41 +3861,27 @@ static int mlx5e_set_rx_port_ts(struct mlx5_core_dev *mdev, bool enable)
return mlx5_set_ports_check(mdev, in, sizeof(in));
}
+static int mlx5e_set_rx_port_ts_wrap(struct mlx5e_priv *priv, void *ctx)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ bool enable = *(bool *)ctx;
+
+ return mlx5e_set_rx_port_ts(mdev, enable);
+}
+
static int set_feature_rx_fcs(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_channels *chs = &priv->channels;
- struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_params new_params;
int err;
mutex_lock(&priv->state_lock);
- if (enable) {
- err = mlx5e_set_rx_port_ts(mdev, false);
- if (err)
- goto out;
-
- chs->params.scatter_fcs_en = true;
- err = mlx5e_modify_channels_scatter_fcs(chs, true);
- if (err) {
- chs->params.scatter_fcs_en = false;
- mlx5e_set_rx_port_ts(mdev, true);
- }
- } else {
- chs->params.scatter_fcs_en = false;
- err = mlx5e_modify_channels_scatter_fcs(chs, false);
- if (err) {
- chs->params.scatter_fcs_en = true;
- goto out;
- }
- err = mlx5e_set_rx_port_ts(mdev, true);
- if (err) {
- mlx5_core_warn(mdev, "Failed to set RX port timestamp %d\n", err);
- err = 0;
- }
- }
-
-out:
+ new_params = chs->params;
+ new_params.scatter_fcs_en = enable;
+ err = mlx5e_safe_switch_params(priv, &new_params, mlx5e_set_rx_port_ts_wrap,
+ &new_params.scatter_fcs_en, true);
mutex_unlock(&priv->state_lock);
return err;
}
@@ -4074,6 +4018,10 @@ static netdev_features_t mlx5e_fix_uplink_rep_features(struct net_device *netdev
if (netdev->features & NETIF_F_GRO_HW)
netdev_warn(netdev, "Disabling HW_GRO, not supported in switchdev mode\n");
+ features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+ if (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ netdev_warn(netdev, "Disabling HW_VLAN CTAG FILTERING, not supported in switchdev mode\n");
+
return features;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
index b176648d1343..3cdcb0e0b20f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
@@ -1715,7 +1715,7 @@ void mlx5_esw_bridge_fdb_update_used(struct net_device *dev, u16 vport_num, u16
struct mlx5_esw_bridge *bridge;
port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
- if (!port || port->flags & MLX5_ESW_BRIDGE_PORT_FLAG_PEER)
+ if (!port)
return;
bridge = port->bridge;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index eff92dc0927c..e09518f887a0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -189,16 +189,16 @@ static inline int mlx5_ptys_rate_enum_to_int(enum mlx5_ptys_rate rate)
}
}
-static int mlx5i_get_speed_settings(u16 ib_link_width_oper, u16 ib_proto_oper)
+static u32 mlx5i_get_speed_settings(u16 ib_link_width_oper, u16 ib_proto_oper)
{
int rate, width;
rate = mlx5_ptys_rate_enum_to_int(ib_proto_oper);
if (rate < 0)
- return -EINVAL;
+ return SPEED_UNKNOWN;
width = mlx5_ptys_width_enum_to_int(ib_link_width_oper);
if (width < 0)
- return -EINVAL;
+ return SPEED_UNKNOWN;
return rate * width;
}
@@ -221,16 +221,13 @@ static int mlx5i_get_link_ksettings(struct net_device *netdev,
ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
speed = mlx5i_get_speed_settings(ib_link_width_oper, ib_proto_oper);
- if (speed < 0)
- return -EINVAL;
+ link_ksettings->base.speed = speed;
+ link_ksettings->base.duplex = speed == SPEED_UNKNOWN ? DUPLEX_UNKNOWN : DUPLEX_FULL;
- link_ksettings->base.duplex = DUPLEX_FULL;
link_ksettings->base.port = PORT_OTHER;
link_ksettings->base.autoneg = AUTONEG_DISABLE;
- link_ksettings->base.speed = speed;
-
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 3d5f2a4b1fed..4e1b5757528a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -2110,7 +2110,7 @@ static int __init mlx5_init(void)
mlx5_core_verify_params();
mlx5_register_debugfs();
- err = pci_register_driver(&mlx5_core_driver);
+ err = mlx5e_init();
if (err)
goto err_debug;
@@ -2118,16 +2118,16 @@ static int __init mlx5_init(void)
if (err)
goto err_sf;
- err = mlx5e_init();
+ err = pci_register_driver(&mlx5_core_driver);
if (err)
- goto err_en;
+ goto err_pci;
return 0;
-err_en:
+err_pci:
mlx5_sf_driver_unregister();
err_sf:
- pci_unregister_driver(&mlx5_core_driver);
+ mlx5e_cleanup();
err_debug:
mlx5_unregister_debugfs();
return err;
@@ -2135,9 +2135,9 @@ err_debug:
static void __exit mlx5_cleanup(void)
{
- mlx5e_cleanup();
- mlx5_sf_driver_unregister();
pci_unregister_driver(&mlx5_core_driver);
+ mlx5_sf_driver_unregister();
+ mlx5e_cleanup();
mlx5_unregister_debugfs();
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 60596357bfc7..0eb50be175cc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -74,6 +74,14 @@ static u32 get_function(u16 func_id, bool ec_function)
return (u32)func_id | (ec_function << 16);
}
+static u16 func_id_to_type(struct mlx5_core_dev *dev, u16 func_id, bool ec_function)
+{
+ if (!func_id)
+ return mlx5_core_is_ecpf(dev) && !ec_function ? MLX5_HOST_PF : MLX5_PF;
+
+ return func_id <= mlx5_core_max_vfs(dev) ? MLX5_VF : MLX5_SF;
+}
+
static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function)
{
struct rb_root *root;
@@ -332,6 +340,7 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
int notify_fail = event;
+ u16 func_type;
u64 addr;
int err;
u32 *in;
@@ -383,11 +392,9 @@ retry:
goto out_dropped;
}
+ func_type = func_id_to_type(dev, func_id, ec_function);
+ dev->priv.page_counters[func_type] += npages;
dev->priv.fw_pages += npages;
- if (func_id)
- dev->priv.vfs_pages += npages;
- else if (mlx5_core_is_ecpf(dev) && !ec_function)
- dev->priv.host_pf_pages += npages;
mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x, err %d\n",
npages, ec_function, func_id, err);
@@ -414,6 +421,7 @@ static void release_all_pages(struct mlx5_core_dev *dev, u16 func_id,
struct rb_root *root;
struct rb_node *p;
int npages = 0;
+ u16 func_type;
root = xa_load(&dev->priv.page_root_xa, function);
if (WARN_ON_ONCE(!root))
@@ -428,11 +436,9 @@ static void release_all_pages(struct mlx5_core_dev *dev, u16 func_id,
free_fwp(dev, fwp, fwp->free_count);
}
+ func_type = func_id_to_type(dev, func_id, ec_function);
+ dev->priv.page_counters[func_type] -= npages;
dev->priv.fw_pages -= npages;
- if (func_id)
- dev->priv.vfs_pages -= npages;
- else if (mlx5_core_is_ecpf(dev) && !ec_function)
- dev->priv.host_pf_pages -= npages;
mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x\n",
npages, ec_function, func_id);
@@ -498,6 +504,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
int num_claimed;
+ u16 func_type;
u32 *out;
int err;
int i;
@@ -549,11 +556,9 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
if (nclaimed)
*nclaimed = num_claimed;
+ func_type = func_id_to_type(dev, func_id, ec_function);
+ dev->priv.page_counters[func_type] -= num_claimed;
dev->priv.fw_pages -= num_claimed;
- if (func_id)
- dev->priv.vfs_pages -= num_claimed;
- else if (mlx5_core_is_ecpf(dev) && !ec_function)
- dev->priv.host_pf_pages -= num_claimed;
out_free:
kvfree(out);
@@ -706,12 +711,12 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
WARN(dev->priv.fw_pages,
"FW pages counter is %d after reclaiming all pages\n",
dev->priv.fw_pages);
- WARN(dev->priv.vfs_pages,
+ WARN(dev->priv.page_counters[MLX5_VF],
"VFs FW pages counter is %d after reclaiming all pages\n",
- dev->priv.vfs_pages);
- WARN(dev->priv.host_pf_pages,
+ dev->priv.page_counters[MLX5_VF]);
+ WARN(dev->priv.page_counters[MLX5_HOST_PF],
"External host PF FW pages counter is %d after reclaiming all pages\n",
- dev->priv.host_pf_pages);
+ dev->priv.page_counters[MLX5_HOST_PF]);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index c0e6c487c63c..3008e9ce2bbf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -147,7 +147,7 @@ mlx5_device_disable_sriov(struct mlx5_core_dev *dev, int num_vfs, bool clear_vf)
mlx5_eswitch_disable_sriov(dev->priv.eswitch, clear_vf);
- if (mlx5_wait_for_pages(dev, &dev->priv.vfs_pages))
+ if (mlx5_wait_for_pages(dev, &dev->priv.page_counters[MLX5_VF]))
mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
index b851141e03de..042ca0349124 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
@@ -1138,12 +1138,14 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
rule->flow_source))
return 0;
+ mlx5dr_domain_nic_lock(nic_dmn);
+
ret = mlx5dr_matcher_select_builders(matcher,
nic_matcher,
dr_rule_get_ipv(&param->outer),
dr_rule_get_ipv(&param->inner));
if (ret)
- return ret;
+ goto err_unlock;
hw_ste_arr_is_opt = nic_matcher->num_of_builders <= DR_RULE_MAX_STES_OPTIMIZED;
if (likely(hw_ste_arr_is_opt)) {
@@ -1152,12 +1154,12 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
hw_ste_arr = kzalloc((nic_matcher->num_of_builders + DR_ACTION_MAX_STES) *
DR_STE_SIZE, GFP_KERNEL);
- if (!hw_ste_arr)
- return -ENOMEM;
+ if (!hw_ste_arr) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
}
- mlx5dr_domain_nic_lock(nic_dmn);
-
ret = mlx5dr_matcher_add_to_tbl_nic(dmn, nic_matcher);
if (ret)
goto free_hw_ste;
@@ -1223,7 +1225,10 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
mlx5dr_domain_nic_unlock(nic_dmn);
- goto out;
+ if (unlikely(!hw_ste_arr_is_opt))
+ kfree(hw_ste_arr);
+
+ return 0;
free_rule:
dr_rule_clean_rule_members(rule, nic_rule);
@@ -1238,12 +1243,12 @@ remove_from_nic_tbl:
mlx5dr_matcher_remove_from_tbl_nic(dmn, nic_matcher);
free_hw_ste:
- mlx5dr_domain_nic_unlock(nic_dmn);
-
-out:
- if (unlikely(!hw_ste_arr_is_opt))
+ if (!hw_ste_arr_is_opt)
kfree(hw_ste_arr);
+err_unlock:
+ mlx5dr_domain_nic_unlock(nic_dmn);
+
return ret;
}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
index 5314c064ceae..55b484b10562 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
@@ -608,12 +608,12 @@ allocate_new:
lan966x_fdma_rx_reload(rx);
}
- if (counter < weight && napi_complete_done(napi, counter))
- lan_wr(0xff, lan966x, FDMA_INTR_DB_ENA);
-
if (redirect)
xdp_do_flush();
+ if (counter < weight && napi_complete_done(napi, counter))
+ lan_wr(0xff, lan966x, FDMA_INTR_DB_ENA);
+
return counter;
}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c
index 0ed1ea7727c5..69e76634f9aa 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c
@@ -633,7 +633,7 @@ int sparx5_ptp_init(struct sparx5 *sparx5)
/* Enable master counters */
spx5_wr(PTP_PTP_DOM_CFG_PTP_ENA_SET(0x7), sparx5, PTP_PTP_DOM_CFG);
- for (i = 0; i < sparx5->port_count; i++) {
+ for (i = 0; i < SPX5_PORTS; i++) {
port = sparx5->ports[i];
if (!port)
continue;
@@ -649,7 +649,7 @@ void sparx5_ptp_deinit(struct sparx5 *sparx5)
struct sparx5_port *port;
int i;
- for (i = 0; i < sparx5->port_count; i++) {
+ for (i = 0; i < SPX5_PORTS; i++) {
port = sparx5->ports[i];
if (!port)
continue;
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index e708c2d04983..f9b8f372ec8a 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -1217,9 +1217,7 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
unsigned int max_queues_per_port = num_online_cpus();
struct gdma_context *gc = pci_get_drvdata(pdev);
struct gdma_irq_context *gic;
- unsigned int max_irqs;
- u16 *cpus;
- cpumask_var_t req_mask;
+ unsigned int max_irqs, cpu;
int nvec, irq;
int err, i = 0, j;
@@ -1240,39 +1238,31 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
goto free_irq_vector;
}
- if (!zalloc_cpumask_var(&req_mask, GFP_KERNEL)) {
- err = -ENOMEM;
- goto free_irq;
- }
-
- cpus = kcalloc(nvec, sizeof(*cpus), GFP_KERNEL);
- if (!cpus) {
- err = -ENOMEM;
- goto free_mask;
- }
- for (i = 0; i < nvec; i++)
- cpus[i] = cpumask_local_spread(i, gc->numa_node);
-
for (i = 0; i < nvec; i++) {
- cpumask_set_cpu(cpus[i], req_mask);
gic = &gc->irq_contexts[i];
gic->handler = NULL;
gic->arg = NULL;
+ if (!i)
+ snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_hwc@pci:%s",
+ pci_name(pdev));
+ else
+ snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_q%d@pci:%s",
+ i - 1, pci_name(pdev));
+
irq = pci_irq_vector(pdev, i);
if (irq < 0) {
err = irq;
- goto free_mask;
+ goto free_irq;
}
- err = request_irq(irq, mana_gd_intr, 0, "mana_intr", gic);
+ err = request_irq(irq, mana_gd_intr, 0, gic->name, gic);
if (err)
- goto free_mask;
- irq_set_affinity_and_hint(irq, req_mask);
- cpumask_clear(req_mask);
+ goto free_irq;
+
+ cpu = cpumask_local_spread(i, gc->numa_node);
+ irq_set_affinity_and_hint(irq, cpumask_of(cpu));
}
- free_cpumask_var(req_mask);
- kfree(cpus);
err = mana_gd_alloc_res_map(nvec, &gc->msix_resource);
if (err)
@@ -1283,13 +1273,12 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
return 0;
-free_mask:
- free_cpumask_var(req_mask);
- kfree(cpus);
free_irq:
for (j = i - 1; j >= 0; j--) {
irq = pci_irq_vector(pdev, j);
gic = &gc->irq_contexts[j];
+
+ irq_update_affinity_hint(irq, NULL);
free_irq(irq, gic);
}
@@ -1317,6 +1306,9 @@ static void mana_gd_remove_irqs(struct pci_dev *pdev)
continue;
gic = &gc->irq_contexts[i];
+
+ /* Need to clear the hint before free_irq */
+ irq_update_affinity_hint(irq, NULL);
free_irq(irq, gic);
}
diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c
index 7c0897e779dc..ee052404eb55 100644
--- a/drivers/net/ethernet/mscc/ocelot_flower.c
+++ b/drivers/net/ethernet/mscc/ocelot_flower.c
@@ -605,6 +605,18 @@ ocelot_flower_parse_key(struct ocelot *ocelot, int port, bool ingress,
flow_rule_match_control(rule, &match);
}
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+
+ flow_rule_match_vlan(rule, &match);
+ filter->key_type = OCELOT_VCAP_KEY_ANY;
+ filter->vlan.vid.value = match.key->vlan_id;
+ filter->vlan.vid.mask = match.mask->vlan_id;
+ filter->vlan.pcp.value[0] = match.key->vlan_priority;
+ filter->vlan.pcp.mask[0] = match.mask->vlan_priority;
+ match_protocol = false;
+ }
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
struct flow_match_eth_addrs match;
@@ -737,18 +749,6 @@ ocelot_flower_parse_key(struct ocelot *ocelot, int port, bool ingress,
match_protocol = false;
}
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
- struct flow_match_vlan match;
-
- flow_rule_match_vlan(rule, &match);
- filter->key_type = OCELOT_VCAP_KEY_ANY;
- filter->vlan.vid.value = match.key->vlan_id;
- filter->vlan.vid.mask = match.mask->vlan_id;
- filter->vlan.pcp.value[0] = match.key->vlan_priority;
- filter->vlan.pcp.mask[0] = match.mask->vlan_priority;
- match_protocol = false;
- }
-
finished_key_parsing:
if (match_protocol && proto != ETH_P_ALL) {
if (filter->block_id == VCAP_ES0) {
diff --git a/drivers/net/ethernet/mscc/ocelot_ptp.c b/drivers/net/ethernet/mscc/ocelot_ptp.c
index 1a82f10c8853..2180ae94c744 100644
--- a/drivers/net/ethernet/mscc/ocelot_ptp.c
+++ b/drivers/net/ethernet/mscc/ocelot_ptp.c
@@ -335,8 +335,8 @@ static void
ocelot_populate_ipv6_ptp_event_trap_key(struct ocelot_vcap_filter *trap)
{
trap->key_type = OCELOT_VCAP_KEY_IPV6;
- trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
- trap->key.ipv4.proto.mask[0] = 0xff;
+ trap->key.ipv6.proto.value[0] = IPPROTO_UDP;
+ trap->key.ipv6.proto.mask[0] = 0xff;
trap->key.ipv6.dport.value = PTP_EV_PORT;
trap->key.ipv6.dport.mask = 0xffff;
}
@@ -355,8 +355,8 @@ static void
ocelot_populate_ipv6_ptp_general_trap_key(struct ocelot_vcap_filter *trap)
{
trap->key_type = OCELOT_VCAP_KEY_IPV6;
- trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
- trap->key.ipv4.proto.mask[0] = 0xff;
+ trap->key.ipv6.proto.value[0] = IPPROTO_UDP;
+ trap->key.ipv6.proto.mask[0] = 0xff;
trap->key.ipv6.dport.value = PTP_GEN_PORT;
trap->key.ipv6.dport.mask = 0xffff;
}
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c b/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
index 4632268695cb..063cd371033a 100644
--- a/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
+++ b/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
@@ -129,26 +129,31 @@ struct nfp_ipsec_cfg_mssg {
};
};
-static int nfp_ipsec_cfg_cmd_issue(struct nfp_net *nn, int type, int saidx,
- struct nfp_ipsec_cfg_mssg *msg)
+static int nfp_net_ipsec_cfg(struct nfp_net *nn, struct nfp_mbox_amsg_entry *entry)
{
+ unsigned int offset = nn->tlv_caps.mbox_off + NFP_NET_CFG_MBOX_SIMPLE_VAL;
+ struct nfp_ipsec_cfg_mssg *msg = (struct nfp_ipsec_cfg_mssg *)entry->msg;
int i, msg_size, ret;
- msg->cmd = type;
- msg->sa_idx = saidx;
- msg->rsp = 0;
- msg_size = ARRAY_SIZE(msg->raw);
+ ret = nfp_net_mbox_lock(nn, sizeof(*msg));
+ if (ret)
+ return ret;
+ msg_size = ARRAY_SIZE(msg->raw);
for (i = 0; i < msg_size; i++)
- nn_writel(nn, NFP_NET_CFG_MBOX_VAL + 4 * i, msg->raw[i]);
+ nn_writel(nn, offset + 4 * i, msg->raw[i]);
- ret = nfp_net_mbox_reconfig(nn, NFP_NET_CFG_MBOX_CMD_IPSEC);
- if (ret < 0)
+ ret = nfp_net_mbox_reconfig(nn, entry->cmd);
+ if (ret < 0) {
+ nn_ctrl_bar_unlock(nn);
return ret;
+ }
/* For now we always read the whole message response back */
for (i = 0; i < msg_size; i++)
- msg->raw[i] = nn_readl(nn, NFP_NET_CFG_MBOX_VAL + 4 * i);
+ msg->raw[i] = nn_readl(nn, offset + 4 * i);
+
+ nn_ctrl_bar_unlock(nn);
switch (msg->rsp) {
case NFP_IPSEC_CFG_MSSG_OK:
@@ -477,7 +482,10 @@ static int nfp_net_xfrm_add_state(struct xfrm_state *x)
}
/* Allocate saidx and commit the SA */
- err = nfp_ipsec_cfg_cmd_issue(nn, NFP_IPSEC_CFG_MSSG_ADD_SA, saidx, &msg);
+ msg.cmd = NFP_IPSEC_CFG_MSSG_ADD_SA;
+ msg.sa_idx = saidx;
+ err = nfp_net_sched_mbox_amsg_work(nn, NFP_NET_CFG_MBOX_CMD_IPSEC, &msg,
+ sizeof(msg), nfp_net_ipsec_cfg);
if (err) {
xa_erase(&nn->xa_ipsec, saidx);
nn_err(nn, "Failed to issue IPsec command err ret=%d\n", err);
@@ -491,14 +499,17 @@ static int nfp_net_xfrm_add_state(struct xfrm_state *x)
static void nfp_net_xfrm_del_state(struct xfrm_state *x)
{
+ struct nfp_ipsec_cfg_mssg msg = {
+ .cmd = NFP_IPSEC_CFG_MSSG_INV_SA,
+ .sa_idx = x->xso.offload_handle - 1,
+ };
struct net_device *netdev = x->xso.dev;
- struct nfp_ipsec_cfg_mssg msg;
struct nfp_net *nn;
int err;
nn = netdev_priv(netdev);
- err = nfp_ipsec_cfg_cmd_issue(nn, NFP_IPSEC_CFG_MSSG_INV_SA,
- x->xso.offload_handle - 1, &msg);
+ err = nfp_net_sched_mbox_amsg_work(nn, NFP_NET_CFG_MBOX_CMD_IPSEC, &msg,
+ sizeof(msg), nfp_net_ipsec_cfg);
if (err)
nn_warn(nn, "Failed to invalidate SA in hardware\n");
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index a8678d5612ee..060a77f2265d 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -460,6 +460,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
sizeof(struct nfp_tun_neigh_v4);
unsigned long cookie = (unsigned long)neigh;
struct nfp_flower_priv *priv = app->priv;
+ struct nfp_tun_neigh_lag lag_info;
struct nfp_neigh_entry *nn_entry;
u32 port_id;
u8 mtype;
@@ -468,6 +469,11 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
if (!port_id)
return;
+ if ((port_id & NFP_FL_LAG_OUT) == NFP_FL_LAG_OUT) {
+ memset(&lag_info, 0, sizeof(struct nfp_tun_neigh_lag));
+ nfp_flower_lag_get_info_from_netdev(app, netdev, &lag_info);
+ }
+
spin_lock_bh(&priv->predt_lock);
nn_entry = rhashtable_lookup_fast(&priv->neigh_table, &cookie,
neigh_table_params);
@@ -515,7 +521,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
neigh_ha_snapshot(common->dst_addr, neigh, netdev);
if ((port_id & NFP_FL_LAG_OUT) == NFP_FL_LAG_OUT)
- nfp_flower_lag_get_info_from_netdev(app, netdev, lag);
+ memcpy(lag, &lag_info, sizeof(struct nfp_tun_neigh_lag));
common->port_id = cpu_to_be32(port_id);
if (rhashtable_insert_fast(&priv->neigh_table,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index 432d79d691c2..939cfce15830 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -617,9 +617,10 @@ struct nfp_net_dp {
* @vnic_no_name: For non-port PF vNIC make ndo_get_phys_port_name return
* -EOPNOTSUPP to keep backwards compatibility (set by app)
* @port: Pointer to nfp_port structure if vNIC is a port
- * @mc_lock: Protect mc_addrs list
- * @mc_addrs: List of mc addrs to add/del to HW
- * @mc_work: Work to update mc addrs
+ * @mbox_amsg: Asynchronously processed message via mailbox
+ * @mbox_amsg.lock: Protect message list
+ * @mbox_amsg.list: List of message to process
+ * @mbox_amsg.work: Work to process message asynchronously
* @app_priv: APP private data for this vNIC
*/
struct nfp_net {
@@ -721,13 +722,25 @@ struct nfp_net {
struct nfp_port *port;
- spinlock_t mc_lock;
- struct list_head mc_addrs;
- struct work_struct mc_work;
+ struct {
+ spinlock_t lock;
+ struct list_head list;
+ struct work_struct work;
+ } mbox_amsg;
void *app_priv;
};
+struct nfp_mbox_amsg_entry {
+ struct list_head list;
+ int (*cfg)(struct nfp_net *nn, struct nfp_mbox_amsg_entry *entry);
+ u32 cmd;
+ char msg[];
+};
+
+int nfp_net_sched_mbox_amsg_work(struct nfp_net *nn, u32 cmd, const void *data, size_t len,
+ int (*cb)(struct nfp_net *, struct nfp_mbox_amsg_entry *));
+
/* Functions to read/write from/to a BAR
* Performs any endian conversion necessary.
*/
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 18fc9971f1c8..70d7484c82af 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -1334,14 +1334,54 @@ err_unlock:
return err;
}
-struct nfp_mc_addr_entry {
- u8 addr[ETH_ALEN];
- u32 cmd;
- struct list_head list;
-};
+int nfp_net_sched_mbox_amsg_work(struct nfp_net *nn, u32 cmd, const void *data, size_t len,
+ int (*cb)(struct nfp_net *, struct nfp_mbox_amsg_entry *))
+{
+ struct nfp_mbox_amsg_entry *entry;
+
+ entry = kmalloc(sizeof(*entry) + len, GFP_ATOMIC);
+ if (!entry)
+ return -ENOMEM;
+
+ memcpy(entry->msg, data, len);
+ entry->cmd = cmd;
+ entry->cfg = cb;
+
+ spin_lock_bh(&nn->mbox_amsg.lock);
+ list_add_tail(&entry->list, &nn->mbox_amsg.list);
+ spin_unlock_bh(&nn->mbox_amsg.lock);
+
+ schedule_work(&nn->mbox_amsg.work);
+
+ return 0;
+}
+
+static void nfp_net_mbox_amsg_work(struct work_struct *work)
+{
+ struct nfp_net *nn = container_of(work, struct nfp_net, mbox_amsg.work);
+ struct nfp_mbox_amsg_entry *entry, *tmp;
+ struct list_head tmp_list;
+
+ INIT_LIST_HEAD(&tmp_list);
+
+ spin_lock_bh(&nn->mbox_amsg.lock);
+ list_splice_init(&nn->mbox_amsg.list, &tmp_list);
+ spin_unlock_bh(&nn->mbox_amsg.lock);
+
+ list_for_each_entry_safe(entry, tmp, &tmp_list, list) {
+ int err = entry->cfg(nn, entry);
+
+ if (err)
+ nn_err(nn, "Config cmd %d to HW failed %d.\n", entry->cmd, err);
+
+ list_del(&entry->list);
+ kfree(entry);
+ }
+}
-static int nfp_net_mc_cfg(struct nfp_net *nn, const unsigned char *addr, const u32 cmd)
+static int nfp_net_mc_cfg(struct nfp_net *nn, struct nfp_mbox_amsg_entry *entry)
{
+ unsigned char *addr = entry->msg;
int ret;
ret = nfp_net_mbox_lock(nn, NFP_NET_CFG_MULTICAST_SZ);
@@ -1353,26 +1393,7 @@ static int nfp_net_mc_cfg(struct nfp_net *nn, const unsigned char *addr, const u
nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_MULTICAST_MAC_LO,
get_unaligned_be16(addr + 4));
- return nfp_net_mbox_reconfig_and_unlock(nn, cmd);
-}
-
-static int nfp_net_mc_prep(struct nfp_net *nn, const unsigned char *addr, const u32 cmd)
-{
- struct nfp_mc_addr_entry *entry;
-
- entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
- if (!entry)
- return -ENOMEM;
-
- ether_addr_copy(entry->addr, addr);
- entry->cmd = cmd;
- spin_lock_bh(&nn->mc_lock);
- list_add_tail(&entry->list, &nn->mc_addrs);
- spin_unlock_bh(&nn->mc_lock);
-
- schedule_work(&nn->mc_work);
-
- return 0;
+ return nfp_net_mbox_reconfig_and_unlock(nn, entry->cmd);
}
static int nfp_net_mc_sync(struct net_device *netdev, const unsigned char *addr)
@@ -1385,35 +1406,16 @@ static int nfp_net_mc_sync(struct net_device *netdev, const unsigned char *addr)
return -EINVAL;
}
- return nfp_net_mc_prep(nn, addr, NFP_NET_CFG_MBOX_CMD_MULTICAST_ADD);
+ return nfp_net_sched_mbox_amsg_work(nn, NFP_NET_CFG_MBOX_CMD_MULTICAST_ADD, addr,
+ NFP_NET_CFG_MULTICAST_SZ, nfp_net_mc_cfg);
}
static int nfp_net_mc_unsync(struct net_device *netdev, const unsigned char *addr)
{
struct nfp_net *nn = netdev_priv(netdev);
- return nfp_net_mc_prep(nn, addr, NFP_NET_CFG_MBOX_CMD_MULTICAST_DEL);
-}
-
-static void nfp_net_mc_addr_config(struct work_struct *work)
-{
- struct nfp_net *nn = container_of(work, struct nfp_net, mc_work);
- struct nfp_mc_addr_entry *entry, *tmp;
- struct list_head tmp_list;
-
- INIT_LIST_HEAD(&tmp_list);
-
- spin_lock_bh(&nn->mc_lock);
- list_splice_init(&nn->mc_addrs, &tmp_list);
- spin_unlock_bh(&nn->mc_lock);
-
- list_for_each_entry_safe(entry, tmp, &tmp_list, list) {
- if (nfp_net_mc_cfg(nn, entry->addr, entry->cmd))
- nn_err(nn, "Config mc address to HW failed.\n");
-
- list_del(&entry->list);
- kfree(entry);
- }
+ return nfp_net_sched_mbox_amsg_work(nn, NFP_NET_CFG_MBOX_CMD_MULTICAST_DEL, addr,
+ NFP_NET_CFG_MULTICAST_SZ, nfp_net_mc_cfg);
}
static void nfp_net_set_rx_mode(struct net_device *netdev)
@@ -2681,9 +2683,9 @@ int nfp_net_init(struct nfp_net *nn)
if (!nn->dp.netdev)
return 0;
- spin_lock_init(&nn->mc_lock);
- INIT_LIST_HEAD(&nn->mc_addrs);
- INIT_WORK(&nn->mc_work, nfp_net_mc_addr_config);
+ spin_lock_init(&nn->mbox_amsg.lock);
+ INIT_LIST_HEAD(&nn->mbox_amsg.list);
+ INIT_WORK(&nn->mbox_amsg.work, nfp_net_mbox_amsg_work);
return register_netdev(nn->dp.netdev);
@@ -2704,6 +2706,6 @@ void nfp_net_clean(struct nfp_net *nn)
unregister_netdev(nn->dp.netdev);
nfp_net_ipsec_clean(nn);
nfp_ccm_mbox_clean(nn);
- flush_work(&nn->mc_work);
+ flush_work(&nn->mbox_amsg.work);
nfp_net_reconfig_wait_posted(nn);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index 51124309ae1f..f03dcadff738 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -403,7 +403,6 @@
*/
#define NFP_NET_CFG_MBOX_BASE 0x1800
#define NFP_NET_CFG_MBOX_VAL_MAX_SZ 0x1F8
-#define NFP_NET_CFG_MBOX_VAL 0x1808
#define NFP_NET_CFG_MBOX_SIMPLE_CMD 0x0
#define NFP_NET_CFG_MBOX_SIMPLE_RET 0x4
#define NFP_NET_CFG_MBOX_SIMPLE_VAL 0x8
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index a4a89ef3f18b..cc97b3d00414 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -293,35 +293,131 @@ nfp_net_set_fec_link_mode(struct nfp_eth_table_port *eth_port,
}
}
-static const u16 nfp_eth_media_table[] = {
- [NFP_MEDIA_1000BASE_CX] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
- [NFP_MEDIA_1000BASE_KX] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
- [NFP_MEDIA_10GBASE_KX4] = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
- [NFP_MEDIA_10GBASE_KR] = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
- [NFP_MEDIA_10GBASE_CX4] = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
- [NFP_MEDIA_10GBASE_CR] = ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
- [NFP_MEDIA_10GBASE_SR] = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
- [NFP_MEDIA_10GBASE_ER] = ETHTOOL_LINK_MODE_10000baseER_Full_BIT,
- [NFP_MEDIA_25GBASE_KR] = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
- [NFP_MEDIA_25GBASE_KR_S] = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
- [NFP_MEDIA_25GBASE_CR] = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
- [NFP_MEDIA_25GBASE_CR_S] = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
- [NFP_MEDIA_25GBASE_SR] = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
- [NFP_MEDIA_40GBASE_CR4] = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
- [NFP_MEDIA_40GBASE_KR4] = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
- [NFP_MEDIA_40GBASE_SR4] = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
- [NFP_MEDIA_40GBASE_LR4] = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
- [NFP_MEDIA_50GBASE_KR] = ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
- [NFP_MEDIA_50GBASE_SR] = ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
- [NFP_MEDIA_50GBASE_CR] = ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
- [NFP_MEDIA_50GBASE_LR] = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
- [NFP_MEDIA_50GBASE_ER] = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
- [NFP_MEDIA_50GBASE_FR] = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
- [NFP_MEDIA_100GBASE_KR4] = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
- [NFP_MEDIA_100GBASE_SR4] = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
- [NFP_MEDIA_100GBASE_CR4] = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
- [NFP_MEDIA_100GBASE_KP4] = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
- [NFP_MEDIA_100GBASE_CR10] = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+static const struct nfp_eth_media_link_mode {
+ u16 ethtool_link_mode;
+ u16 speed;
+} nfp_eth_media_table[NFP_MEDIA_LINK_MODES_NUMBER] = {
+ [NFP_MEDIA_1000BASE_CX] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ .speed = NFP_SPEED_1G,
+ },
+ [NFP_MEDIA_1000BASE_KX] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ .speed = NFP_SPEED_1G,
+ },
+ [NFP_MEDIA_10GBASE_KX4] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ .speed = NFP_SPEED_10G,
+ },
+ [NFP_MEDIA_10GBASE_KR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ .speed = NFP_SPEED_10G,
+ },
+ [NFP_MEDIA_10GBASE_CX4] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ .speed = NFP_SPEED_10G,
+ },
+ [NFP_MEDIA_10GBASE_CR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
+ .speed = NFP_SPEED_10G,
+ },
+ [NFP_MEDIA_10GBASE_SR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
+ .speed = NFP_SPEED_10G,
+ },
+ [NFP_MEDIA_10GBASE_ER] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_10000baseER_Full_BIT,
+ .speed = NFP_SPEED_10G,
+ },
+ [NFP_MEDIA_25GBASE_KR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ .speed = NFP_SPEED_25G,
+ },
+ [NFP_MEDIA_25GBASE_KR_S] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ .speed = NFP_SPEED_25G,
+ },
+ [NFP_MEDIA_25GBASE_CR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ .speed = NFP_SPEED_25G,
+ },
+ [NFP_MEDIA_25GBASE_CR_S] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ .speed = NFP_SPEED_25G,
+ },
+ [NFP_MEDIA_25GBASE_SR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+ .speed = NFP_SPEED_25G,
+ },
+ [NFP_MEDIA_40GBASE_CR4] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+ .speed = NFP_SPEED_40G,
+ },
+ [NFP_MEDIA_40GBASE_KR4] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+ .speed = NFP_SPEED_40G,
+ },
+ [NFP_MEDIA_40GBASE_SR4] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
+ .speed = NFP_SPEED_40G,
+ },
+ [NFP_MEDIA_40GBASE_LR4] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
+ .speed = NFP_SPEED_40G,
+ },
+ [NFP_MEDIA_50GBASE_KR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
+ .speed = NFP_SPEED_50G,
+ },
+ [NFP_MEDIA_50GBASE_SR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
+ .speed = NFP_SPEED_50G,
+ },
+ [NFP_MEDIA_50GBASE_CR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
+ .speed = NFP_SPEED_50G,
+ },
+ [NFP_MEDIA_50GBASE_LR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
+ .speed = NFP_SPEED_50G,
+ },
+ [NFP_MEDIA_50GBASE_ER] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
+ .speed = NFP_SPEED_50G,
+ },
+ [NFP_MEDIA_50GBASE_FR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
+ .speed = NFP_SPEED_50G,
+ },
+ [NFP_MEDIA_100GBASE_KR4] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ .speed = NFP_SPEED_100G,
+ },
+ [NFP_MEDIA_100GBASE_SR4] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
+ .speed = NFP_SPEED_100G,
+ },
+ [NFP_MEDIA_100GBASE_CR4] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ .speed = NFP_SPEED_100G,
+ },
+ [NFP_MEDIA_100GBASE_KP4] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ .speed = NFP_SPEED_100G,
+ },
+ [NFP_MEDIA_100GBASE_CR10] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ .speed = NFP_SPEED_100G,
+ },
+};
+
+static const unsigned int nfp_eth_speed_map[NFP_SUP_SPEED_NUMBER] = {
+ [NFP_SPEED_1G] = SPEED_1000,
+ [NFP_SPEED_10G] = SPEED_10000,
+ [NFP_SPEED_25G] = SPEED_25000,
+ [NFP_SPEED_40G] = SPEED_40000,
+ [NFP_SPEED_50G] = SPEED_50000,
+ [NFP_SPEED_100G] = SPEED_100000,
};
static void nfp_add_media_link_mode(struct nfp_port *port,
@@ -334,8 +430,12 @@ static void nfp_add_media_link_mode(struct nfp_port *port,
};
struct nfp_cpp *cpp = port->app->cpp;
- if (nfp_eth_read_media(cpp, &ethm))
+ if (nfp_eth_read_media(cpp, &ethm)) {
+ bitmap_fill(port->speed_bitmap, NFP_SUP_SPEED_NUMBER);
return;
+ }
+
+ bitmap_zero(port->speed_bitmap, NFP_SUP_SPEED_NUMBER);
for (u32 i = 0; i < 2; i++) {
supported_modes[i] = le64_to_cpu(ethm.supported_modes[i]);
@@ -344,20 +444,26 @@ static void nfp_add_media_link_mode(struct nfp_port *port,
for (u32 i = 0; i < NFP_MEDIA_LINK_MODES_NUMBER; i++) {
if (i < 64) {
- if (supported_modes[0] & BIT_ULL(i))
- __set_bit(nfp_eth_media_table[i],
+ if (supported_modes[0] & BIT_ULL(i)) {
+ __set_bit(nfp_eth_media_table[i].ethtool_link_mode,
cmd->link_modes.supported);
+ __set_bit(nfp_eth_media_table[i].speed,
+ port->speed_bitmap);
+ }
if (advertised_modes[0] & BIT_ULL(i))
- __set_bit(nfp_eth_media_table[i],
+ __set_bit(nfp_eth_media_table[i].ethtool_link_mode,
cmd->link_modes.advertising);
} else {
- if (supported_modes[1] & BIT_ULL(i - 64))
- __set_bit(nfp_eth_media_table[i],
+ if (supported_modes[1] & BIT_ULL(i - 64)) {
+ __set_bit(nfp_eth_media_table[i].ethtool_link_mode,
cmd->link_modes.supported);
+ __set_bit(nfp_eth_media_table[i].speed,
+ port->speed_bitmap);
+ }
if (advertised_modes[1] & BIT_ULL(i - 64))
- __set_bit(nfp_eth_media_table[i],
+ __set_bit(nfp_eth_media_table[i].ethtool_link_mode,
cmd->link_modes.advertising);
}
}
@@ -468,6 +574,22 @@ nfp_net_set_link_ksettings(struct net_device *netdev,
if (cmd->base.speed != SPEED_UNKNOWN) {
u32 speed = cmd->base.speed / eth_port->lanes;
+ bool is_supported = false;
+
+ for (u32 i = 0; i < NFP_SUP_SPEED_NUMBER; i++) {
+ if (cmd->base.speed == nfp_eth_speed_map[i] &&
+ test_bit(i, port->speed_bitmap)) {
+ is_supported = true;
+ break;
+ }
+ }
+
+ if (!is_supported) {
+ netdev_err(netdev, "Speed %u is not supported.\n",
+ cmd->base.speed);
+ err = -EINVAL;
+ goto err_bad_set;
+ }
if (req_aneg) {
netdev_err(netdev, "Speed changing is not allowed when working on autoneg mode.\n");
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h
index f8cd157ca1d7..9c04f9f0e2c9 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_port.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h
@@ -38,6 +38,16 @@ enum nfp_port_flags {
NFP_PORT_CHANGED = 0,
};
+enum {
+ NFP_SPEED_1G,
+ NFP_SPEED_10G,
+ NFP_SPEED_25G,
+ NFP_SPEED_40G,
+ NFP_SPEED_50G,
+ NFP_SPEED_100G,
+ NFP_SUP_SPEED_NUMBER
+};
+
/**
* struct nfp_port - structure representing NFP port
* @netdev: backpointer to associated netdev
@@ -52,6 +62,7 @@ enum nfp_port_flags {
* @eth_forced: for %NFP_PORT_PHYS_PORT port is forced UP or DOWN, don't change
* @eth_port: for %NFP_PORT_PHYS_PORT translated ETH Table port entry
* @eth_stats: for %NFP_PORT_PHYS_PORT MAC stats if available
+ * @speed_bitmap: for %NFP_PORT_PHYS_PORT supported speed bitmap
* @pf_id: for %NFP_PORT_PF_PORT, %NFP_PORT_VF_PORT ID of the PCI PF (0-3)
* @vf_id: for %NFP_PORT_VF_PORT ID of the PCI VF within @pf_id
* @pf_split: for %NFP_PORT_PF_PORT %true if PCI PF has more than one vNIC
@@ -78,6 +89,7 @@ struct nfp_port {
bool eth_forced;
struct nfp_eth_table_port *eth_port;
u8 __iomem *eth_stats;
+ DECLARE_BITMAP(speed_bitmap, NFP_SUP_SPEED_NUMBER);
};
/* NFP_PORT_PF_PORT, NFP_PORT_VF_PORT */
struct {
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index 626b9113e7c4..d911f4fd9af6 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -708,9 +708,16 @@ void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb,
q->lif->index, q->name, q->hw_type, q->hw_index,
q->head_idx, ring_doorbell);
- if (ring_doorbell)
+ if (ring_doorbell) {
ionic_dbell_ring(lif->kern_dbpage, q->hw_type,
q->dbval | q->head_idx);
+
+ q->dbell_jiffies = jiffies;
+
+ if (q_to_qcq(q)->napi_qcq)
+ mod_timer(&q_to_qcq(q)->napi_qcq->napi_deadline,
+ jiffies + IONIC_NAPI_DEADLINE);
+ }
}
static bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index 2a1d7b9c07e7..bce3ca38669b 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -25,6 +25,12 @@
#define IONIC_DEV_INFO_REG_COUNT 32
#define IONIC_DEV_CMD_REG_COUNT 32
+#define IONIC_NAPI_DEADLINE (HZ / 200) /* 5ms */
+#define IONIC_ADMIN_DOORBELL_DEADLINE (HZ / 2) /* 500ms */
+#define IONIC_TX_DOORBELL_DEADLINE (HZ / 100) /* 10ms */
+#define IONIC_RX_MIN_DOORBELL_DEADLINE (HZ / 100) /* 10ms */
+#define IONIC_RX_MAX_DOORBELL_DEADLINE (HZ * 5) /* 5s */
+
struct ionic_dev_bar {
void __iomem *vaddr;
phys_addr_t bus_addr;
@@ -216,6 +222,8 @@ struct ionic_queue {
struct ionic_lif *lif;
struct ionic_desc_info *info;
u64 dbval;
+ unsigned long dbell_deadline;
+ unsigned long dbell_jiffies;
u16 head_idx;
u16 tail_idx;
unsigned int index;
@@ -361,4 +369,8 @@ void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info,
int ionic_heartbeat_check(struct ionic *ionic);
bool ionic_is_fw_running(struct ionic_dev *idev);
+bool ionic_adminq_poke_doorbell(struct ionic_queue *q);
+bool ionic_txq_poke_doorbell(struct ionic_queue *q);
+bool ionic_rxq_poke_doorbell(struct ionic_queue *q);
+
#endif /* _IONIC_DEV_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 4dd16c487f2b..63a78a9ac241 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -16,6 +16,7 @@
#include "ionic.h"
#include "ionic_bus.h"
+#include "ionic_dev.h"
#include "ionic_lif.h"
#include "ionic_txrx.h"
#include "ionic_ethtool.h"
@@ -200,6 +201,13 @@ void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep)
}
}
+static void ionic_napi_deadline(struct timer_list *timer)
+{
+ struct ionic_qcq *qcq = container_of(timer, struct ionic_qcq, napi_deadline);
+
+ napi_schedule(&qcq->napi);
+}
+
static irqreturn_t ionic_isr(int irq, void *data)
{
struct napi_struct *napi = data;
@@ -269,6 +277,7 @@ static int ionic_qcq_enable(struct ionic_qcq *qcq)
.oper = IONIC_Q_ENABLE,
},
};
+ int ret;
idev = &lif->ionic->idev;
dev = lif->ionic->dev;
@@ -276,16 +285,24 @@ static int ionic_qcq_enable(struct ionic_qcq *qcq)
dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n",
ctx.cmd.q_control.index, ctx.cmd.q_control.type);
+ if (qcq->flags & IONIC_QCQ_F_INTR)
+ ionic_intr_clean(idev->intr_ctrl, qcq->intr.index);
+
+ ret = ionic_adminq_post_wait(lif, &ctx);
+ if (ret)
+ return ret;
+
+ if (qcq->napi.poll)
+ napi_enable(&qcq->napi);
+
if (qcq->flags & IONIC_QCQ_F_INTR) {
irq_set_affinity_hint(qcq->intr.vector,
&qcq->intr.affinity_mask);
- napi_enable(&qcq->napi);
- ionic_intr_clean(idev->intr_ctrl, qcq->intr.index);
ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
IONIC_INTR_MASK_CLEAR);
}
- return ionic_adminq_post_wait(lif, &ctx);
+ return 0;
}
static int ionic_qcq_disable(struct ionic_lif *lif, struct ionic_qcq *qcq, int fw_err)
@@ -316,6 +333,7 @@ static int ionic_qcq_disable(struct ionic_lif *lif, struct ionic_qcq *qcq, int f
synchronize_irq(qcq->intr.vector);
irq_set_affinity_hint(qcq->intr.vector, NULL);
napi_disable(&qcq->napi);
+ del_timer_sync(&qcq->napi_deadline);
}
/* If there was a previous fw communcation error, don't bother with
@@ -451,6 +469,7 @@ static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
n_qcq->intr.vector = src_qcq->intr.vector;
n_qcq->intr.index = src_qcq->intr.index;
+ n_qcq->napi_qcq = src_qcq->napi_qcq;
}
static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qcq)
@@ -564,13 +583,15 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
}
if (flags & IONIC_QCQ_F_NOTIFYQ) {
- int q_size, cq_size;
+ int q_size;
- /* q & cq need to be contiguous in case of notifyq */
+ /* q & cq need to be contiguous in NotifyQ, so alloc it all in q
+ * and don't alloc qc. We leave new->qc_size and new->qc_base
+ * as 0 to be sure we don't try to free it later.
+ */
q_size = ALIGN(num_descs * desc_size, PAGE_SIZE);
- cq_size = ALIGN(num_descs * cq_desc_size, PAGE_SIZE);
-
- new->q_size = PAGE_SIZE + q_size + cq_size;
+ new->q_size = PAGE_SIZE + q_size +
+ ALIGN(num_descs * cq_desc_size, PAGE_SIZE);
new->q_base = dma_alloc_coherent(dev, new->q_size,
&new->q_base_pa, GFP_KERNEL);
if (!new->q_base) {
@@ -773,8 +794,14 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
dev_dbg(dev, "txq->hw_type %d\n", q->hw_type);
dev_dbg(dev, "txq->hw_index %d\n", q->hw_index);
- if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
+ q->dbell_deadline = IONIC_TX_DOORBELL_DEADLINE;
+ q->dbell_jiffies = jiffies;
+
+ if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) {
netif_napi_add(lif->netdev, &qcq->napi, ionic_tx_napi);
+ qcq->napi_qcq = qcq;
+ timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0);
+ }
qcq->flags |= IONIC_QCQ_F_INITED;
@@ -828,11 +855,17 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type);
dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index);
+ q->dbell_deadline = IONIC_RX_MIN_DOORBELL_DEADLINE;
+ q->dbell_jiffies = jiffies;
+
if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi);
else
netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi);
+ qcq->napi_qcq = qcq;
+ timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0);
+
qcq->flags |= IONIC_QCQ_F_INITED;
return 0;
@@ -1150,6 +1183,7 @@ static int ionic_adminq_napi(struct napi_struct *napi, int budget)
struct ionic_dev *idev = &lif->ionic->idev;
unsigned long irqflags;
unsigned int flags = 0;
+ bool resched = false;
int rx_work = 0;
int tx_work = 0;
int n_work = 0;
@@ -1187,6 +1221,16 @@ static int ionic_adminq_napi(struct napi_struct *napi, int budget)
ionic_intr_credits(idev->intr_ctrl, intr->index, credits, flags);
}
+ if (!a_work && ionic_adminq_poke_doorbell(&lif->adminqcq->q))
+ resched = true;
+ if (lif->hwstamp_rxq && !rx_work && ionic_rxq_poke_doorbell(&lif->hwstamp_rxq->q))
+ resched = true;
+ if (lif->hwstamp_txq && !tx_work && ionic_txq_poke_doorbell(&lif->hwstamp_txq->q))
+ resched = true;
+ if (resched)
+ mod_timer(&lif->adminqcq->napi_deadline,
+ jiffies + IONIC_NAPI_DEADLINE);
+
return work_done;
}
@@ -3245,8 +3289,14 @@ static int ionic_lif_adminq_init(struct ionic_lif *lif)
dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type);
dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index);
+ q->dbell_deadline = IONIC_ADMIN_DOORBELL_DEADLINE;
+ q->dbell_jiffies = jiffies;
+
netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi);
+ qcq->napi_qcq = qcq;
+ timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0);
+
napi_enable(&qcq->napi);
if (qcq->flags & IONIC_QCQ_F_INTR)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index a53984bf3544..734519895614 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -74,8 +74,10 @@ struct ionic_qcq {
struct ionic_queue q;
struct ionic_cq cq;
struct ionic_intr_info intr;
+ struct timer_list napi_deadline;
struct napi_struct napi;
unsigned int flags;
+ struct ionic_qcq *napi_qcq;
struct dentry *dentry;
};
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index a13530ec4dd8..08c42b039d92 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -289,6 +289,35 @@ static void ionic_adminq_cb(struct ionic_queue *q,
complete_all(&ctx->work);
}
+bool ionic_adminq_poke_doorbell(struct ionic_queue *q)
+{
+ struct ionic_lif *lif = q->lif;
+ unsigned long now, then, dif;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&lif->adminq_lock, irqflags);
+
+ if (q->tail_idx == q->head_idx) {
+ spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
+ return false;
+ }
+
+ now = READ_ONCE(jiffies);
+ then = q->dbell_jiffies;
+ dif = now - then;
+
+ if (dif > q->dbell_deadline) {
+ ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
+ q->dbval | q->head_idx);
+
+ q->dbell_jiffies = now;
+ }
+
+ spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
+
+ return true;
+}
+
int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
{
struct ionic_desc_info *desc_info;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 0c3977416cd1..f761780f0162 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -22,6 +22,67 @@ static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell,
ionic_q_post(q, ring_dbell, cb_func, cb_arg);
}
+bool ionic_txq_poke_doorbell(struct ionic_queue *q)
+{
+ unsigned long now, then, dif;
+ struct netdev_queue *netdev_txq;
+ struct net_device *netdev;
+
+ netdev = q->lif->netdev;
+ netdev_txq = netdev_get_tx_queue(netdev, q->index);
+
+ HARD_TX_LOCK(netdev, netdev_txq, smp_processor_id());
+
+ if (q->tail_idx == q->head_idx) {
+ HARD_TX_UNLOCK(netdev, netdev_txq);
+ return false;
+ }
+
+ now = READ_ONCE(jiffies);
+ then = q->dbell_jiffies;
+ dif = now - then;
+
+ if (dif > q->dbell_deadline) {
+ ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
+ q->dbval | q->head_idx);
+
+ q->dbell_jiffies = now;
+ }
+
+ HARD_TX_UNLOCK(netdev, netdev_txq);
+
+ return true;
+}
+
+bool ionic_rxq_poke_doorbell(struct ionic_queue *q)
+{
+ unsigned long now, then, dif;
+
+ /* no lock, called from rx napi or txrx napi, nothing else can fill */
+
+ if (q->tail_idx == q->head_idx)
+ return false;
+
+ now = READ_ONCE(jiffies);
+ then = q->dbell_jiffies;
+ dif = now - then;
+
+ if (dif > q->dbell_deadline) {
+ ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
+ q->dbval | q->head_idx);
+
+ q->dbell_jiffies = now;
+
+ dif = 2 * q->dbell_deadline;
+ if (dif > IONIC_RX_MAX_DOORBELL_DEADLINE)
+ dif = IONIC_RX_MAX_DOORBELL_DEADLINE;
+
+ q->dbell_deadline = dif;
+ }
+
+ return true;
+}
+
static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
{
return netdev_get_tx_queue(q->lif->netdev, q->index);
@@ -424,6 +485,12 @@ void ionic_rx_fill(struct ionic_queue *q)
ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
q->dbval | q->head_idx);
+
+ q->dbell_deadline = IONIC_RX_MIN_DOORBELL_DEADLINE;
+ q->dbell_jiffies = jiffies;
+
+ mod_timer(&q_to_qcq(q)->napi_qcq->napi_deadline,
+ jiffies + IONIC_NAPI_DEADLINE);
}
void ionic_rx_empty(struct ionic_queue *q)
@@ -511,6 +578,9 @@ int ionic_tx_napi(struct napi_struct *napi, int budget)
work_done, flags);
}
+ if (!work_done && ionic_txq_poke_doorbell(&qcq->q))
+ mod_timer(&qcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE);
+
return work_done;
}
@@ -544,23 +614,29 @@ int ionic_rx_napi(struct napi_struct *napi, int budget)
work_done, flags);
}
+ if (!work_done && ionic_rxq_poke_doorbell(&qcq->q))
+ mod_timer(&qcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE);
+
return work_done;
}
int ionic_txrx_napi(struct napi_struct *napi, int budget)
{
- struct ionic_qcq *qcq = napi_to_qcq(napi);
+ struct ionic_qcq *rxqcq = napi_to_qcq(napi);
struct ionic_cq *rxcq = napi_to_cq(napi);
unsigned int qi = rxcq->bound_q->index;
+ struct ionic_qcq *txqcq;
struct ionic_dev *idev;
struct ionic_lif *lif;
struct ionic_cq *txcq;
+ bool resched = false;
u32 rx_work_done = 0;
u32 tx_work_done = 0;
u32 flags = 0;
lif = rxcq->bound_q->lif;
idev = &lif->ionic->idev;
+ txqcq = lif->txqcqs[qi];
txcq = &lif->txqcqs[qi]->cq;
tx_work_done = ionic_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT,
@@ -572,7 +648,7 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
ionic_rx_fill(rxcq->bound_q);
if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) {
- ionic_dim_update(qcq, 0);
+ ionic_dim_update(rxqcq, 0);
flags |= IONIC_INTR_CRED_UNMASK;
rxcq->bound_intr->rearm_count++;
}
@@ -583,6 +659,13 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
tx_work_done + rx_work_done, flags);
}
+ if (!rx_work_done && ionic_rxq_poke_doorbell(&rxqcq->q))
+ resched = true;
+ if (!tx_work_done && ionic_txq_poke_doorbell(&txqcq->q))
+ resched = true;
+ if (resched)
+ mod_timer(&rxqcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE);
+
return rx_work_done;
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index 7c2af482192d..cb1746bc0e0c 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -1438,6 +1438,10 @@ int qede_poll(struct napi_struct *napi, int budget)
rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) &&
qede_has_rx_work(fp->rxq)) ?
qede_rx_int(fp, budget) : 0;
+
+ if (fp->xdp_xmit & QEDE_XDP_REDIRECT)
+ xdp_do_flush();
+
/* Handle case where we are called by netpoll with a budget of 0 */
if (rx_work_done < budget || !budget) {
if (!qede_poll_is_more_work(fp)) {
@@ -1457,9 +1461,6 @@ int qede_poll(struct napi_struct *napi, int budget)
qede_update_tx_producer(fp->xdp_tx);
}
- if (fp->xdp_xmit & QEDE_XDP_REDIRECT)
- xdp_do_flush_map();
-
return rx_work_done;
}
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index b4e0fc7f65bd..0f54849a3823 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1101,14 +1101,14 @@ static void ravb_error_interrupt(struct net_device *ndev)
ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS);
if (eis & EIS_QFS) {
ris2 = ravb_read(ndev, RIS2);
- ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF | RIS2_RESERVED),
+ ravb_write(ndev, ~(RIS2_QFF0 | RIS2_QFF1 | RIS2_RFFF | RIS2_RESERVED),
RIS2);
/* Receive Descriptor Empty int */
if (ris2 & RIS2_QFF0)
priv->stats[RAVB_BE].rx_over_errors++;
- /* Receive Descriptor Empty int */
+ /* Receive Descriptor Empty int */
if (ris2 & RIS2_QFF1)
priv->stats[RAVB_NC].rx_over_errors++;
@@ -2973,6 +2973,9 @@ static int __maybe_unused ravb_suspend(struct device *dev)
else
ret = ravb_close(ndev);
+ if (priv->info->ccc_gac)
+ ravb_ptp_stop(ndev);
+
return ret;
}
@@ -3011,6 +3014,9 @@ static int __maybe_unused ravb_resume(struct device *dev)
/* Restore descriptor base address table */
ravb_write(ndev, priv->desc_bat_dma, DBAT);
+ if (priv->info->ccc_gac)
+ ravb_ptp_init(ndev, priv->pdev);
+
if (netif_running(ndev)) {
if (priv->wol_enabled) {
ret = ravb_wol_restore(ndev);
diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c
index 6441892636db..2370c7797a0a 100644
--- a/drivers/net/ethernet/renesas/rswitch.c
+++ b/drivers/net/ethernet/renesas/rswitch.c
@@ -1074,8 +1074,11 @@ static struct device_node *rswitch_get_port_node(struct rswitch_device *rdev)
port = NULL;
goto out;
}
- if (index == rdev->etha->index)
+ if (index == rdev->etha->index) {
+ if (!of_device_is_available(port))
+ port = NULL;
break;
+ }
}
out:
@@ -1106,7 +1109,7 @@ static int rswitch_etha_get_params(struct rswitch_device *rdev)
port = rswitch_get_port_node(rdev);
if (!port)
- return -ENODEV;
+ return 0; /* ignored */
err = of_get_phy_mode(port, &rdev->etha->phy_interface);
of_node_put(port);
@@ -1324,13 +1327,13 @@ static int rswitch_ether_port_init_all(struct rswitch_private *priv)
{
int i, err;
- for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
+ rswitch_for_each_enabled_port(priv, i) {
err = rswitch_ether_port_init_one(priv->rdev[i]);
if (err)
goto err_init_one;
}
- for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
+ rswitch_for_each_enabled_port(priv, i) {
err = rswitch_serdes_init(priv->rdev[i]);
if (err)
goto err_serdes;
@@ -1339,12 +1342,12 @@ static int rswitch_ether_port_init_all(struct rswitch_private *priv)
return 0;
err_serdes:
- for (i--; i >= 0; i--)
+ rswitch_for_each_enabled_port_continue_reverse(priv, i)
rswitch_serdes_deinit(priv->rdev[i]);
i = RSWITCH_NUM_PORTS;
err_init_one:
- for (i--; i >= 0; i--)
+ rswitch_for_each_enabled_port_continue_reverse(priv, i)
rswitch_ether_port_deinit_one(priv->rdev[i]);
return err;
@@ -1608,6 +1611,7 @@ static int rswitch_device_alloc(struct rswitch_private *priv, int index)
netif_napi_add(ndev, &rdev->napi, rswitch_poll);
port = rswitch_get_port_node(rdev);
+ rdev->disabled = !port;
err = of_get_ethdev_address(port, ndev);
of_node_put(port);
if (err) {
@@ -1707,16 +1711,16 @@ static int rswitch_init(struct rswitch_private *priv)
if (err)
goto err_ether_port_init_all;
- for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
+ rswitch_for_each_enabled_port(priv, i) {
err = register_netdev(priv->rdev[i]->ndev);
if (err) {
- for (i--; i >= 0; i--)
+ rswitch_for_each_enabled_port_continue_reverse(priv, i)
unregister_netdev(priv->rdev[i]->ndev);
goto err_register_netdev;
}
}
- for (i = 0; i < RSWITCH_NUM_PORTS; i++)
+ rswitch_for_each_enabled_port(priv, i)
netdev_info(priv->rdev[i]->ndev, "MAC address %pM\n",
priv->rdev[i]->ndev->dev_addr);
diff --git a/drivers/net/ethernet/renesas/rswitch.h b/drivers/net/ethernet/renesas/rswitch.h
index edbdd1b98d3d..49efb0f31c77 100644
--- a/drivers/net/ethernet/renesas/rswitch.h
+++ b/drivers/net/ethernet/renesas/rswitch.h
@@ -13,6 +13,17 @@
#define RSWITCH_MAX_NUM_QUEUES 128
#define RSWITCH_NUM_PORTS 3
+#define rswitch_for_each_enabled_port(priv, i) \
+ for (i = 0; i < RSWITCH_NUM_PORTS; i++) \
+ if (priv->rdev[i]->disabled) \
+ continue; \
+ else
+
+#define rswitch_for_each_enabled_port_continue_reverse(priv, i) \
+ for (i--; i >= 0; i--) \
+ if (priv->rdev[i]->disabled) \
+ continue; \
+ else
#define TX_RING_SIZE 1024
#define RX_RING_SIZE 1024
@@ -938,6 +949,7 @@ struct rswitch_device {
struct rswitch_gwca_queue *tx_queue;
struct rswitch_gwca_queue *rx_queue;
u8 ts_tag;
+ bool disabled;
int port;
struct rswitch_etha *etha;
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 0556542d7a6b..3a86f1213a05 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1003,8 +1003,11 @@ static int efx_pci_probe_post_io(struct efx_nic *efx)
/* Determine netdevice features */
net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_RXALL);
- if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
+ if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM)) {
net_dev->features |= NETIF_F_TSO6;
+ if (efx_has_cap(efx, TX_TSO_V2_ENCAP))
+ net_dev->hw_enc_features |= NETIF_F_TSO6;
+ }
/* Check whether device supports TSO */
if (!efx->type->tso_versions || !efx->type->tso_versions(efx))
net_dev->features &= ~NETIF_F_ALL_TSO;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index 835caa15d55f..732774645c1a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -560,6 +560,8 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
plat_dat->has_gmac4 = 1;
plat_dat->pmt = 1;
plat_dat->tso_en = of_property_read_bool(np, "snps,tso");
+ if (of_device_is_compatible(np, "qcom,qcs404-ethqos"))
+ plat_dat->rx_clk_runs_in_lpi = 1;
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
index 413f66017219..e95d35f1e5a0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
@@ -541,9 +541,9 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
return 0;
}
- val |= PPSCMDx(index, 0x2);
val |= TRGTMODSELx(index, 0x2);
val |= PPSEN0;
+ writel(val, ioaddr + MAC_PPS_CONTROL);
writel(cfg->start.tv_sec, ioaddr + MAC_PPSx_TARGET_TIME_SEC(index));
@@ -568,6 +568,7 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
writel(period - 1, ioaddr + MAC_PPSx_WIDTH(index));
/* Finally, activate it */
+ val |= PPSCMDx(index, 0x2);
writel(val, ioaddr + MAC_PPS_CONTROL);
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index b7e5af58ab75..1a5b8dab5e9b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1080,7 +1080,8 @@ static void stmmac_mac_link_up(struct phylink_config *config,
stmmac_mac_set(priv, priv->ioaddr, true);
if (phy && priv->dma_cap.eee) {
- priv->eee_active = phy_init_eee(phy, 1) >= 0;
+ priv->eee_active =
+ phy_init_eee(phy, !priv->plat->rx_clk_runs_in_lpi) >= 0;
priv->eee_enabled = stmmac_eee_init(priv);
priv->tx_lpi_enabled = priv->eee_enabled;
stmmac_set_eee_pls(priv, priv->hw, true);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index eb6d9cd8e93f..0046a4ee6e64 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -559,7 +559,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
dma_cfg->mixed_burst = of_property_read_bool(np, "snps,mixed-burst");
plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode");
- if (plat->force_thresh_dma_mode) {
+ if (plat->force_thresh_dma_mode && plat->force_sf_dma_mode) {
plat->force_sf_dma_mode = 0;
dev_warn(&pdev->dev,
"force_sf_dma_mode is ignored if force_thresh_dma_mode is set.\n");
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index ecbde83b5243..6cda4b7c10cb 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -501,7 +501,15 @@ static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
k3_udma_glue_disable_tx_chn(common->tx_chns[i].tx_chn);
}
+ reinit_completion(&common->tdown_complete);
k3_udma_glue_tdown_rx_chn(common->rx_chns.rx_chn, true);
+
+ if (common->pdata.quirks & AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ) {
+ i = wait_for_completion_timeout(&common->tdown_complete, msecs_to_jiffies(1000));
+ if (!i)
+ dev_err(common->dev, "rx teardown timeout\n");
+ }
+
napi_disable(&common->napi_rx);
for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++)
@@ -721,6 +729,8 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
if (cppi5_desc_is_tdcm(desc_dma)) {
dev_dbg(dev, "%s RX tdown flow: %u\n", __func__, flow_idx);
+ if (common->pdata.quirks & AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ)
+ complete(&common->tdown_complete);
return 0;
}
@@ -2672,7 +2682,7 @@ static const struct am65_cpsw_pdata j721e_pdata = {
};
static const struct am65_cpsw_pdata am64x_cpswxg_pdata = {
- .quirks = 0,
+ .quirks = AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ,
.ale_dev_id = "am64-cpswxg",
.fdqring_mode = K3_RINGACC_RING_MODE_RING,
};
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
index 4b75620f8d28..e5f1c44788c1 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
@@ -90,6 +90,7 @@ struct am65_cpsw_rx_chn {
};
#define AM65_CPSW_QUIRK_I2027_NO_TX_CSUM BIT(0)
+#define AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ BIT(1)
struct am65_cpsw_pdata {
u32 quirks;
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 9352dad58996..79f4e13620a4 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -987,9 +987,6 @@ static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
void netvsc_dma_unmap(struct hv_device *hv_dev,
struct hv_netvsc_packet *packet)
{
- u32 page_count = packet->cp_partial ?
- packet->page_buf_cnt - packet->rmsg_pgcnt :
- packet->page_buf_cnt;
int i;
if (!hv_is_isolation_supported())
@@ -998,7 +995,7 @@ void netvsc_dma_unmap(struct hv_device *hv_dev,
if (!packet->dma_range)
return;
- for (i = 0; i < page_count; i++)
+ for (i = 0; i < packet->page_buf_cnt; i++)
dma_unmap_single(&hv_dev->device, packet->dma_range[i].dma,
packet->dma_range[i].mapping_size,
DMA_TO_DEVICE);
@@ -1028,9 +1025,7 @@ static int netvsc_dma_map(struct hv_device *hv_dev,
struct hv_netvsc_packet *packet,
struct hv_page_buffer *pb)
{
- u32 page_count = packet->cp_partial ?
- packet->page_buf_cnt - packet->rmsg_pgcnt :
- packet->page_buf_cnt;
+ u32 page_count = packet->page_buf_cnt;
dma_addr_t dma;
int i;
@@ -1039,7 +1034,7 @@ static int netvsc_dma_map(struct hv_device *hv_dev,
packet->dma_range = kcalloc(page_count,
sizeof(*packet->dma_range),
- GFP_KERNEL);
+ GFP_ATOMIC);
if (!packet->dma_range)
return -ENOMEM;
diff --git a/drivers/net/mdio/mdio-mux-meson-g12a.c b/drivers/net/mdio/mdio-mux-meson-g12a.c
index 4a2e94faf57e..c4542ecf5623 100644
--- a/drivers/net/mdio/mdio-mux-meson-g12a.c
+++ b/drivers/net/mdio/mdio-mux-meson-g12a.c
@@ -4,6 +4,7 @@
*/
#include <linux/bitfield.h>
+#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/device.h>
@@ -150,6 +151,7 @@ static const struct clk_ops g12a_ephy_pll_ops = {
static int g12a_enable_internal_mdio(struct g12a_mdio_mux *priv)
{
+ u32 value;
int ret;
/* Enable the phy clock */
@@ -163,18 +165,25 @@ static int g12a_enable_internal_mdio(struct g12a_mdio_mux *priv)
/* Initialize ephy control */
writel(EPHY_G12A_ID, priv->regs + ETH_PHY_CNTL0);
- writel(FIELD_PREP(PHY_CNTL1_ST_MODE, 3) |
- FIELD_PREP(PHY_CNTL1_ST_PHYADD, EPHY_DFLT_ADD) |
- FIELD_PREP(PHY_CNTL1_MII_MODE, EPHY_MODE_RMII) |
- PHY_CNTL1_CLK_EN |
- PHY_CNTL1_CLKFREQ |
- PHY_CNTL1_PHY_ENB,
- priv->regs + ETH_PHY_CNTL1);
+
+ /* Make sure we get a 0 -> 1 transition on the enable bit */
+ value = FIELD_PREP(PHY_CNTL1_ST_MODE, 3) |
+ FIELD_PREP(PHY_CNTL1_ST_PHYADD, EPHY_DFLT_ADD) |
+ FIELD_PREP(PHY_CNTL1_MII_MODE, EPHY_MODE_RMII) |
+ PHY_CNTL1_CLK_EN |
+ PHY_CNTL1_CLKFREQ;
+ writel(value, priv->regs + ETH_PHY_CNTL1);
writel(PHY_CNTL2_USE_INTERNAL |
PHY_CNTL2_SMI_SRC_MAC |
PHY_CNTL2_RX_CLK_EPHY,
priv->regs + ETH_PHY_CNTL2);
+ value |= PHY_CNTL1_PHY_ENB;
+ writel(value, priv->regs + ETH_PHY_CNTL1);
+
+ /* The phy needs a bit of time to power up */
+ mdelay(10);
+
return 0;
}
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index a6f05e35d91f..b7cb71817780 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -233,7 +233,8 @@ static int dp83822_config_intr(struct phy_device *phydev)
DP83822_ENERGY_DET_INT_EN |
DP83822_LINK_QUAL_INT_EN);
- if (!dp83822->fx_enabled)
+ /* Private data pointer is NULL on DP83825/26 */
+ if (!dp83822 || !dp83822->fx_enabled)
misr_status |= DP83822_ANEG_COMPLETE_INT_EN |
DP83822_DUP_MODE_CHANGE_INT_EN |
DP83822_SPEED_CHANGED_INT_EN;
@@ -253,7 +254,8 @@ static int dp83822_config_intr(struct phy_device *phydev)
DP83822_PAGE_RX_INT_EN |
DP83822_EEE_ERROR_CHANGE_INT_EN);
- if (!dp83822->fx_enabled)
+ /* Private data pointer is NULL on DP83825/26 */
+ if (!dp83822 || !dp83822->fx_enabled)
misr_status |= DP83822_ANEG_ERR_INT_EN |
DP83822_WOL_PKT_INT_EN;
diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
index c49062ad72c6..a6015cd03bff 100644
--- a/drivers/net/phy/meson-gxl.c
+++ b/drivers/net/phy/meson-gxl.c
@@ -261,6 +261,8 @@ static struct phy_driver meson_gxl_phy[] = {
.handle_interrupt = meson_gxl_handle_interrupt,
.suspend = genphy_suspend,
.resume = genphy_resume,
+ .read_mmd = genphy_read_mmd_unsupported,
+ .write_mmd = genphy_write_mmd_unsupported,
}, {
PHY_ID_MATCH_EXACT(0x01803301),
.name = "Meson G12A Internal PHY",
@@ -271,6 +273,8 @@ static struct phy_driver meson_gxl_phy[] = {
.handle_interrupt = meson_gxl_handle_interrupt,
.suspend = genphy_suspend,
.resume = genphy_resume,
+ .read_mmd = genphy_read_mmd_unsupported,
+ .write_mmd = genphy_write_mmd_unsupported,
},
};
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 716870a4499c..607aa786c8cb 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1517,7 +1517,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
* another mac interface, so we should create a device link between
* phy dev and mac dev.
*/
- if (phydev->mdio.bus->parent && dev->dev.parent != phydev->mdio.bus->parent)
+ if (dev && phydev->mdio.bus->parent && dev->dev.parent != phydev->mdio.bus->parent)
phydev->devlink = device_link_add(dev->dev.parent, &phydev->mdio.dev,
DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS);
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 09cc65c0da93..4d2519cdb801 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -1812,10 +1812,9 @@ int phylink_fwnode_phy_connect(struct phylink *pl,
ret = phy_attach_direct(pl->netdev, phy_dev, flags,
pl->link_interface);
- if (ret) {
- phy_device_free(phy_dev);
+ phy_device_free(phy_dev);
+ if (ret)
return ret;
- }
ret = phylink_bringup_phy(pl, phy_dev, pl->link_config.interface);
if (ret)
diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c
index 9f2b70ef39aa..613fc6910f14 100644
--- a/drivers/net/usb/kalmia.c
+++ b/drivers/net/usb/kalmia.c
@@ -65,8 +65,8 @@ kalmia_send_init_packet(struct usbnet *dev, u8 *init_msg, u8 init_msg_len,
init_msg, init_msg_len, &act_len, KALMIA_USB_TIMEOUT);
if (status != 0) {
netdev_err(dev->net,
- "Error sending init packet. Status %i, length %i\n",
- status, act_len);
+ "Error sending init packet. Status %i\n",
+ status);
return status;
}
else if (act_len != init_msg_len) {
@@ -83,8 +83,8 @@ kalmia_send_init_packet(struct usbnet *dev, u8 *init_msg, u8 init_msg_len,
if (status != 0)
netdev_err(dev->net,
- "Error receiving init result. Status %i, length %i\n",
- status, act_len);
+ "Error receiving init result. Status %i\n",
+ status);
else if (act_len != expected_len)
netdev_err(dev->net, "Unexpected init result length: %i\n",
act_len);
diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c
index 2c82fbcaab22..7a2b0094de51 100644
--- a/drivers/net/usb/plusb.c
+++ b/drivers/net/usb/plusb.c
@@ -57,9 +57,7 @@
static inline int
pl_vendor_req(struct usbnet *dev, u8 req, u8 val, u8 index)
{
- return usbnet_read_cmd(dev, req,
- USB_DIR_IN | USB_TYPE_VENDOR |
- USB_RECIP_DEVICE,
+ return usbnet_write_cmd(dev, req, USB_TYPE_VENDOR | USB_RECIP_DEVICE,
val, index, NULL, 0);
}
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 18b3de854aeb..61e33e4dd0cd 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1677,13 +1677,13 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
received = virtnet_receive(rq, budget, &xdp_xmit);
+ if (xdp_xmit & VIRTIO_XDP_REDIR)
+ xdp_do_flush();
+
/* Out of packets? */
if (received < budget)
virtqueue_napi_complete(napi, rq->vq, received);
- if (xdp_xmit & VIRTIO_XDP_REDIR)
- xdp_do_flush();
-
if (xdp_xmit & VIRTIO_XDP_TX) {
sq = virtnet_xdp_get_sq(vi);
if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
@@ -2158,8 +2158,8 @@ static int virtnet_close(struct net_device *dev)
cancel_delayed_work_sync(&vi->refill);
for (i = 0; i < vi->max_queue_pairs; i++) {
- xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
napi_disable(&vi->rq[i].napi);
+ xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
virtnet_napi_tx_disable(&vi->sq[i].napi);
}
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 56267c327f0b..682987040ea8 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1546,31 +1546,6 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
rxd->len = rbi->len;
}
-#ifdef VMXNET3_RSS
- if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE &&
- (adapter->netdev->features & NETIF_F_RXHASH)) {
- enum pkt_hash_types hash_type;
-
- switch (rcd->rssType) {
- case VMXNET3_RCD_RSS_TYPE_IPV4:
- case VMXNET3_RCD_RSS_TYPE_IPV6:
- hash_type = PKT_HASH_TYPE_L3;
- break;
- case VMXNET3_RCD_RSS_TYPE_TCPIPV4:
- case VMXNET3_RCD_RSS_TYPE_TCPIPV6:
- case VMXNET3_RCD_RSS_TYPE_UDPIPV4:
- case VMXNET3_RCD_RSS_TYPE_UDPIPV6:
- hash_type = PKT_HASH_TYPE_L4;
- break;
- default:
- hash_type = PKT_HASH_TYPE_L3;
- break;
- }
- skb_set_hash(ctx->skb,
- le32_to_cpu(rcd->rssHash),
- hash_type);
- }
-#endif
skb_record_rx_queue(ctx->skb, rq->qid);
skb_put(ctx->skb, rcd->len);
@@ -1653,6 +1628,31 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
u32 mtu = adapter->netdev->mtu;
skb->len += skb->data_len;
+#ifdef VMXNET3_RSS
+ if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE &&
+ (adapter->netdev->features & NETIF_F_RXHASH)) {
+ enum pkt_hash_types hash_type;
+
+ switch (rcd->rssType) {
+ case VMXNET3_RCD_RSS_TYPE_IPV4:
+ case VMXNET3_RCD_RSS_TYPE_IPV6:
+ hash_type = PKT_HASH_TYPE_L3;
+ break;
+ case VMXNET3_RCD_RSS_TYPE_TCPIPV4:
+ case VMXNET3_RCD_RSS_TYPE_TCPIPV6:
+ case VMXNET3_RCD_RSS_TYPE_UDPIPV4:
+ case VMXNET3_RCD_RSS_TYPE_UDPIPV6:
+ hash_type = PKT_HASH_TYPE_L4;
+ break;
+ default:
+ hash_type = PKT_HASH_TYPE_L3;
+ break;
+ }
+ skb_set_hash(skb,
+ le32_to_cpu(rcd->rssHash),
+ hash_type);
+ }
+#endif
vmxnet3_rx_csum(adapter, skb,
(union Vmxnet3_GenericDesc *)rcd);
skb->protocol = eth_type_trans(skb, adapter->netdev);
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c
index 7eff3531b9a5..7ff33c1d6ac7 100644
--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c
@@ -152,6 +152,15 @@ static irqreturn_t t7xx_dpmaif_isr_handler(int irq, void *data)
}
t7xx_pcie_mac_clear_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t t7xx_dpmaif_isr_thread(int irq, void *data)
+{
+ struct dpmaif_isr_para *isr_para = data;
+ struct dpmaif_ctrl *dpmaif_ctrl = isr_para->dpmaif_ctrl;
+
t7xx_dpmaif_irq_cb(isr_para);
t7xx_pcie_mac_set_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int);
return IRQ_HANDLED;
@@ -188,7 +197,7 @@ static void t7xx_dpmaif_register_pcie_irq(struct dpmaif_ctrl *dpmaif_ctrl)
t7xx_pcie_mac_clear_int(t7xx_dev, int_type);
t7xx_dev->intr_handler[int_type] = t7xx_dpmaif_isr_handler;
- t7xx_dev->intr_thread[int_type] = NULL;
+ t7xx_dev->intr_thread[int_type] = t7xx_dpmaif_isr_thread;
t7xx_dev->callback_param[int_type] = isr_para;
t7xx_pcie_mac_clear_int_status(t7xx_dev, int_type);
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
index aa2174a10437..f4ff2198b5ef 100644
--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
@@ -840,14 +840,13 @@ int t7xx_dpmaif_napi_rx_poll(struct napi_struct *napi, const int budget)
if (!rxq->que_started) {
atomic_set(&rxq->rx_processing, 0);
+ pm_runtime_put_autosuspend(rxq->dpmaif_ctrl->dev);
dev_err(rxq->dpmaif_ctrl->dev, "Work RXQ: %d has not been started\n", rxq->index);
return work_done;
}
- if (!rxq->sleep_lock_pending) {
- pm_runtime_get_noresume(rxq->dpmaif_ctrl->dev);
+ if (!rxq->sleep_lock_pending)
t7xx_pci_disable_sleep(t7xx_dev);
- }
ret = try_wait_for_completion(&t7xx_dev->sleep_lock_acquire);
if (!ret) {
@@ -876,22 +875,22 @@ int t7xx_dpmaif_napi_rx_poll(struct napi_struct *napi, const int budget)
napi_complete_done(napi, work_done);
t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info);
t7xx_dpmaif_dlq_unmask_rx_done(&rxq->dpmaif_ctrl->hw_info, rxq->index);
+ t7xx_pci_enable_sleep(rxq->dpmaif_ctrl->t7xx_dev);
+ pm_runtime_mark_last_busy(rxq->dpmaif_ctrl->dev);
+ pm_runtime_put_autosuspend(rxq->dpmaif_ctrl->dev);
+ atomic_set(&rxq->rx_processing, 0);
} else {
t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info);
}
- t7xx_pci_enable_sleep(rxq->dpmaif_ctrl->t7xx_dev);
- pm_runtime_mark_last_busy(rxq->dpmaif_ctrl->dev);
- pm_runtime_put_noidle(rxq->dpmaif_ctrl->dev);
- atomic_set(&rxq->rx_processing, 0);
-
return work_done;
}
void t7xx_dpmaif_irq_rx_done(struct dpmaif_ctrl *dpmaif_ctrl, const unsigned int que_mask)
{
struct dpmaif_rx_queue *rxq;
- int qno;
+ struct dpmaif_ctrl *ctrl;
+ int qno, ret;
qno = ffs(que_mask) - 1;
if (qno < 0 || qno > DPMAIF_RXQ_NUM - 1) {
@@ -900,6 +899,18 @@ void t7xx_dpmaif_irq_rx_done(struct dpmaif_ctrl *dpmaif_ctrl, const unsigned int
}
rxq = &dpmaif_ctrl->rxq[qno];
+ ctrl = rxq->dpmaif_ctrl;
+ /* We need to make sure that the modem has been resumed before
+ * calling napi. This can't be done inside the polling function
+ * as we could be blocked waiting for device to be resumed,
+ * which can't be done from softirq context the poll function
+ * is running in.
+ */
+ ret = pm_runtime_resume_and_get(ctrl->dev);
+ if (ret < 0 && ret != -EACCES) {
+ dev_err(ctrl->dev, "Failed to resume device: %d\n", ret);
+ return;
+ }
napi_schedule(&rxq->napi);
}
diff --git a/drivers/net/wwan/t7xx/t7xx_netdev.c b/drivers/net/wwan/t7xx/t7xx_netdev.c
index 494a28e386a3..3ef4a8a4f8fd 100644
--- a/drivers/net/wwan/t7xx/t7xx_netdev.c
+++ b/drivers/net/wwan/t7xx/t7xx_netdev.c
@@ -27,6 +27,7 @@
#include <linux/list.h>
#include <linux/netdev_features.h>
#include <linux/netdevice.h>
+#include <linux/pm_runtime.h>
#include <linux/skbuff.h>
#include <linux/types.h>
#include <linux/wwan.h>
@@ -45,12 +46,25 @@
static void t7xx_ccmni_enable_napi(struct t7xx_ccmni_ctrl *ctlb)
{
- int i;
+ struct dpmaif_ctrl *ctrl;
+ int i, ret;
+
+ ctrl = ctlb->hif_ctrl;
if (ctlb->is_napi_en)
return;
for (i = 0; i < RXQ_NUM; i++) {
+ /* The usage count has to be bumped every time before calling
+ * napi_schedule. It will be decresed in the poll routine,
+ * right after napi_complete_done is called.
+ */
+ ret = pm_runtime_resume_and_get(ctrl->dev);
+ if (ret < 0) {
+ dev_err(ctrl->dev, "Failed to resume device: %d\n",
+ ret);
+ return;
+ }
napi_enable(ctlb->napi[i]);
napi_schedule(ctlb->napi[i]);
}
diff --git a/drivers/net/wwan/t7xx/t7xx_pci.c b/drivers/net/wwan/t7xx/t7xx_pci.c
index 871f2a27a398..226fc1703e90 100644
--- a/drivers/net/wwan/t7xx/t7xx_pci.c
+++ b/drivers/net/wwan/t7xx/t7xx_pci.c
@@ -121,6 +121,8 @@ void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev)
iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED);
+ pm_runtime_mark_last_busy(&t7xx_dev->pdev->dev);
+ pm_runtime_allow(&t7xx_dev->pdev->dev);
pm_runtime_put_noidle(&t7xx_dev->pdev->dev);
}
diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig
index 79d93126453d..77b06d54cc62 100644
--- a/drivers/nvdimm/Kconfig
+++ b/drivers/nvdimm/Kconfig
@@ -102,6 +102,25 @@ config NVDIMM_KEYS
depends on ENCRYPTED_KEYS
depends on (LIBNVDIMM=ENCRYPTED_KEYS) || LIBNVDIMM=m
+config NVDIMM_KMSAN
+ bool
+ depends on KMSAN
+ help
+ KMSAN, and other memory debug facilities, increase the size of
+ 'struct page' to contain extra metadata. This collides with
+ the NVDIMM capability to store a potentially
+ larger-than-"System RAM" size 'struct page' array in a
+ reservation of persistent memory rather than limited /
+ precious DRAM. However, that reservation needs to persist for
+ the life of the given NVDIMM namespace. If you are using KMSAN
+ to debug an issue unrelated to NVDIMMs or DAX then say N to this
+ option. Otherwise, say Y but understand that any namespaces
+ (with the page array stored pmem) created with this build of
+ the kernel will permanently reserve and strand excess
+ capacity compared to the CONFIG_KMSAN=n case.
+
+ Select N if unsure.
+
config NVDIMM_TEST_BUILD
tristate "Build the unit test core"
depends on m
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index 85ca5b4da3cf..ec5219680092 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -652,7 +652,7 @@ void devm_namespace_disable(struct device *dev,
struct nd_namespace_common *ndns);
#if IS_ENABLED(CONFIG_ND_CLAIM)
/* max struct page size independent of kernel config */
-#define MAX_STRUCT_PAGE_SIZE 128
+#define MAX_STRUCT_PAGE_SIZE 64
int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap);
#else
static inline int nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 61af072ac98f..af7d9301520c 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -13,6 +13,8 @@
#include "pfn.h"
#include "nd.h"
+static const bool page_struct_override = IS_ENABLED(CONFIG_NVDIMM_KMSAN);
+
static void nd_pfn_release(struct device *dev)
{
struct nd_region *nd_region = to_nd_region(dev->parent);
@@ -758,12 +760,6 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
return -ENXIO;
}
- /*
- * Note, we use 64 here for the standard size of struct page,
- * debugging options may cause it to be larger in which case the
- * implementation will limit the pfns advertised through
- * ->direct_access() to those that are included in the memmap.
- */
start = nsio->res.start;
size = resource_size(&nsio->res);
npfns = PHYS_PFN(size - SZ_8K);
@@ -782,20 +778,33 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
}
end_trunc = start + size - ALIGN_DOWN(start + size, align);
if (nd_pfn->mode == PFN_MODE_PMEM) {
+ unsigned long page_map_size = MAX_STRUCT_PAGE_SIZE * npfns;
+
/*
* The altmap should be padded out to the block size used
* when populating the vmemmap. This *should* be equal to
* PMD_SIZE for most architectures.
*
- * Also make sure size of struct page is less than 128. We
- * want to make sure we use large enough size here so that
- * we don't have a dynamic reserve space depending on
- * struct page size. But we also want to make sure we notice
- * when we end up adding new elements to struct page.
+ * Also make sure size of struct page is less than
+ * MAX_STRUCT_PAGE_SIZE. The goal here is compatibility in the
+ * face of production kernel configurations that reduce the
+ * 'struct page' size below MAX_STRUCT_PAGE_SIZE. For debug
+ * kernel configurations that increase the 'struct page' size
+ * above MAX_STRUCT_PAGE_SIZE, the page_struct_override allows
+ * for continuing with the capacity that will be wasted when
+ * reverting to a production kernel configuration. Otherwise,
+ * those configurations are blocked by default.
*/
- BUILD_BUG_ON(sizeof(struct page) > MAX_STRUCT_PAGE_SIZE);
- offset = ALIGN(start + SZ_8K + MAX_STRUCT_PAGE_SIZE * npfns, align)
- - start;
+ if (sizeof(struct page) > MAX_STRUCT_PAGE_SIZE) {
+ if (page_struct_override)
+ page_map_size = sizeof(struct page) * npfns;
+ else {
+ dev_err(&nd_pfn->dev,
+ "Memory debug options prevent using pmem for the page map\n");
+ return -EINVAL;
+ }
+ }
+ offset = ALIGN(start + SZ_8K + page_map_size, align) - start;
} else if (nd_pfn->mode == PFN_MODE_RAM)
offset = ALIGN(start + SZ_8K, align) - start;
else
@@ -818,7 +827,10 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
pfn_sb->version_minor = cpu_to_le16(4);
pfn_sb->end_trunc = cpu_to_le32(end_trunc);
pfn_sb->align = cpu_to_le32(nd_pfn->align);
- pfn_sb->page_struct_size = cpu_to_le16(MAX_STRUCT_PAGE_SIZE);
+ if (sizeof(struct page) > MAX_STRUCT_PAGE_SIZE && page_struct_override)
+ pfn_sb->page_struct_size = cpu_to_le16(sizeof(struct page));
+ else
+ pfn_sb->page_struct_size = cpu_to_le16(MAX_STRUCT_PAGE_SIZE);
pfn_sb->page_size = cpu_to_le32(PAGE_SIZE);
checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
pfn_sb->checksum = cpu_to_le64(checksum);
diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c
index 4424f53a8a0a..bdb97496ba2d 100644
--- a/drivers/nvme/host/auth.c
+++ b/drivers/nvme/host/auth.c
@@ -45,6 +45,8 @@ struct nvme_dhchap_queue_context {
int sess_key_len;
};
+static struct workqueue_struct *nvme_auth_wq;
+
#define nvme_auth_flags_from_qid(qid) \
(qid == 0) ? 0 : BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED
#define nvme_auth_queue_from_qid(ctrl, qid) \
@@ -866,7 +868,7 @@ int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid)
chap = &ctrl->dhchap_ctxs[qid];
cancel_work_sync(&chap->auth_work);
- queue_work(nvme_wq, &chap->auth_work);
+ queue_work(nvme_auth_wq, &chap->auth_work);
return 0;
}
EXPORT_SYMBOL_GPL(nvme_auth_negotiate);
@@ -1008,10 +1010,15 @@ EXPORT_SYMBOL_GPL(nvme_auth_free);
int __init nvme_init_auth(void)
{
+ nvme_auth_wq = alloc_workqueue("nvme-auth-wq",
+ WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
+ if (!nvme_auth_wq)
+ return -ENOMEM;
+
nvme_chap_buf_cache = kmem_cache_create("nvme-chap-buf-cache",
CHAP_BUF_SIZE, 0, SLAB_HWCACHE_ALIGN, NULL);
if (!nvme_chap_buf_cache)
- return -ENOMEM;
+ goto err_destroy_workqueue;
nvme_chap_buf_pool = mempool_create(16, mempool_alloc_slab,
mempool_free_slab, nvme_chap_buf_cache);
@@ -1021,6 +1028,8 @@ int __init nvme_init_auth(void)
return 0;
err_destroy_chap_buf_cache:
kmem_cache_destroy(nvme_chap_buf_cache);
+err_destroy_workqueue:
+ destroy_workqueue(nvme_auth_wq);
return -ENOMEM;
}
@@ -1028,4 +1037,5 @@ void __exit nvme_exit_auth(void)
{
mempool_destroy(nvme_chap_buf_pool);
kmem_cache_destroy(nvme_chap_buf_cache);
+ destroy_workqueue(nvme_auth_wq);
}
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 7be562a4e1aa..8b6421141162 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1093,7 +1093,7 @@ u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
if (ns) {
if (ns->head->effects)
effects = le32_to_cpu(ns->head->effects->iocs[opcode]);
- if (ns->head->ids.csi == NVME_CAP_CSS_NVM)
+ if (ns->head->ids.csi == NVME_CSI_NVM)
effects |= nvme_known_nvm_effects(opcode);
if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))
dev_warn_once(ctrl->device,
@@ -4921,7 +4921,9 @@ out_cleanup_admin_q:
blk_mq_destroy_queue(ctrl->admin_q);
blk_put_queue(ctrl->admin_q);
out_free_tagset:
- blk_mq_free_tag_set(ctrl->admin_tagset);
+ blk_mq_free_tag_set(set);
+ ctrl->admin_q = NULL;
+ ctrl->fabrics_q = NULL;
return ret;
}
EXPORT_SYMBOL_GPL(nvme_alloc_admin_tag_set);
@@ -4983,6 +4985,7 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
out_free_tag_set:
blk_mq_free_tag_set(set);
+ ctrl->connect_q = NULL;
return ret;
}
EXPORT_SYMBOL_GPL(nvme_alloc_io_tag_set);
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 4564f16a0b20..456ee42a6133 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -3521,13 +3521,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
nvme_fc_init_queue(ctrl, 0);
- ret = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set,
- &nvme_fc_admin_mq_ops,
- struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
- ctrl->lport->ops->fcprqst_priv_sz));
- if (ret)
- goto out_free_queues;
-
/*
* Would have been nice to init io queues tag set as well.
* However, we require interaction from the controller
@@ -3537,10 +3530,17 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0);
if (ret)
- goto out_cleanup_tagset;
+ goto out_free_queues;
/* at this point, teardown path changes to ref counting on nvme ctrl */
+ ret = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set,
+ &nvme_fc_admin_mq_ops,
+ struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
+ ctrl->lport->ops->fcprqst_priv_sz));
+ if (ret)
+ goto fail_ctrl;
+
spin_lock_irqsave(&rport->lock, flags);
list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
spin_unlock_irqrestore(&rport->lock, flags);
@@ -3592,8 +3592,6 @@ fail_ctrl:
return ERR_PTR(-EIO);
-out_cleanup_tagset:
- nvme_remove_admin_tag_set(&ctrl->ctrl);
out_free_queues:
kfree(ctrl->queues);
out_free_ida:
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 1ff8843bc4b3..6d88ddd35565 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2509,18 +2509,12 @@ static int nvme_pci_enable(struct nvme_dev *dev)
{
int result = -ENOMEM;
struct pci_dev *pdev = to_pci_dev(dev->dev);
- int dma_address_bits = 64;
if (pci_enable_device_mem(pdev))
return result;
pci_set_master(pdev);
- if (dev->ctrl.quirks & NVME_QUIRK_DMA_ADDRESS_BITS_48)
- dma_address_bits = 48;
- if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(dma_address_bits)))
- goto disable;
-
if (readl(dev->bar + NVME_REG_CSTS) == -1) {
result = -ENODEV;
goto disable;
@@ -2970,7 +2964,7 @@ static struct nvme_dev *nvme_pci_alloc_dev(struct pci_dev *pdev,
dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node);
if (!dev)
- return NULL;
+ return ERR_PTR(-ENOMEM);
INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work);
mutex_init(&dev->shutdown_lock);
@@ -2998,7 +2992,11 @@ static struct nvme_dev *nvme_pci_alloc_dev(struct pci_dev *pdev,
quirks);
if (ret)
goto out_put_device;
-
+
+ if (dev->ctrl.quirks & NVME_QUIRK_DMA_ADDRESS_BITS_48)
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
+ else
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
dma_set_min_align_mask(&pdev->dev, NVME_CTRL_PAGE_SIZE - 1);
dma_set_max_seg_size(&pdev->dev, 0xffffffff);
@@ -3031,8 +3029,8 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
int result = -ENOMEM;
dev = nvme_pci_alloc_dev(pdev, id);
- if (!dev)
- return -ENOMEM;
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
result = nvme_dev_map(dev);
if (result)
@@ -3102,6 +3100,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
nvme_start_ctrl(&dev->ctrl);
nvme_put_ctrl(&dev->ctrl);
+ flush_work(&dev->ctrl.scan_work);
return 0;
out_disable:
@@ -3422,6 +3421,8 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_DEVICE(0x10ec, 0x5762), /* ADATA SX6000LNP */
.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN |
NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x10ec, 0x5763), /* ADATA SX6000PNP */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS |
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index ab2627e17bb9..1ab6601fdd5c 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -1685,8 +1685,10 @@ nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
else {
queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
be16_to_cpu(rqst->assoc_cmd.sqsize));
- if (!queue)
+ if (!queue) {
ret = VERR_QUEUE_ALLOC_FAIL;
+ nvmet_fc_tgt_a_put(iod->assoc);
+ }
}
}
diff --git a/drivers/nvmem/brcm_nvram.c b/drivers/nvmem/brcm_nvram.c
index 34130449f2d2..39aa27942f28 100644
--- a/drivers/nvmem/brcm_nvram.c
+++ b/drivers/nvmem/brcm_nvram.c
@@ -98,6 +98,9 @@ static int brcm_nvram_parse(struct brcm_nvram *priv)
len = le32_to_cpu(header.len);
data = kzalloc(len, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
memcpy_fromio(data, priv->base, len);
data[len - 1] = '\0';
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 321d7d63e068..34ee9d36ee7b 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -770,31 +770,32 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
return ERR_PTR(rval);
}
- if (config->wp_gpio)
- nvmem->wp_gpio = config->wp_gpio;
- else if (!config->ignore_wp)
+ nvmem->id = rval;
+
+ nvmem->dev.type = &nvmem_provider_type;
+ nvmem->dev.bus = &nvmem_bus_type;
+ nvmem->dev.parent = config->dev;
+
+ device_initialize(&nvmem->dev);
+
+ if (!config->ignore_wp)
nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp",
GPIOD_OUT_HIGH);
if (IS_ERR(nvmem->wp_gpio)) {
- ida_free(&nvmem_ida, nvmem->id);
rval = PTR_ERR(nvmem->wp_gpio);
- kfree(nvmem);
- return ERR_PTR(rval);
+ nvmem->wp_gpio = NULL;
+ goto err_put_device;
}
kref_init(&nvmem->refcnt);
INIT_LIST_HEAD(&nvmem->cells);
- nvmem->id = rval;
nvmem->owner = config->owner;
if (!nvmem->owner && config->dev->driver)
nvmem->owner = config->dev->driver->owner;
nvmem->stride = config->stride ?: 1;
nvmem->word_size = config->word_size ?: 1;
nvmem->size = config->size;
- nvmem->dev.type = &nvmem_provider_type;
- nvmem->dev.bus = &nvmem_bus_type;
- nvmem->dev.parent = config->dev;
nvmem->root_only = config->root_only;
nvmem->priv = config->priv;
nvmem->type = config->type;
@@ -822,11 +823,8 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
break;
}
- if (rval) {
- ida_free(&nvmem_ida, nvmem->id);
- kfree(nvmem);
- return ERR_PTR(rval);
- }
+ if (rval)
+ goto err_put_device;
nvmem->read_only = device_property_present(config->dev, "read-only") ||
config->read_only || !nvmem->reg_write;
@@ -835,28 +833,22 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
nvmem->dev.groups = nvmem_dev_groups;
#endif
- dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
-
- rval = device_register(&nvmem->dev);
- if (rval)
- goto err_put_device;
-
if (nvmem->nkeepout) {
rval = nvmem_validate_keepouts(nvmem);
if (rval)
- goto err_device_del;
+ goto err_put_device;
}
if (config->compat) {
rval = nvmem_sysfs_setup_compat(nvmem, config);
if (rval)
- goto err_device_del;
+ goto err_put_device;
}
if (config->cells) {
rval = nvmem_add_cells(nvmem, config->cells, config->ncells);
if (rval)
- goto err_teardown_compat;
+ goto err_remove_cells;
}
rval = nvmem_add_cells_from_table(nvmem);
@@ -867,17 +859,20 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
if (rval)
goto err_remove_cells;
+ dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
+
+ rval = device_add(&nvmem->dev);
+ if (rval)
+ goto err_remove_cells;
+
blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
return nvmem;
err_remove_cells:
nvmem_device_remove_all_cells(nvmem);
-err_teardown_compat:
if (config->compat)
nvmem_sysfs_remove_compat(nvmem, config);
-err_device_del:
- device_del(&nvmem->dev);
err_put_device:
put_device(&nvmem->dev);
@@ -1242,16 +1237,21 @@ struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
if (!cell_np)
return ERR_PTR(-ENOENT);
- nvmem_np = of_get_next_parent(cell_np);
- if (!nvmem_np)
+ nvmem_np = of_get_parent(cell_np);
+ if (!nvmem_np) {
+ of_node_put(cell_np);
return ERR_PTR(-EINVAL);
+ }
nvmem = __nvmem_device_get(nvmem_np, device_match_of_node);
of_node_put(nvmem_np);
- if (IS_ERR(nvmem))
+ if (IS_ERR(nvmem)) {
+ of_node_put(cell_np);
return ERR_CAST(nvmem);
+ }
cell_entry = nvmem_find_cell_entry_by_node(nvmem, cell_np);
+ of_node_put(cell_np);
if (!cell_entry) {
__nvmem_device_put(nvmem);
return ERR_PTR(-ENOENT);
diff --git a/drivers/nvmem/qcom-spmi-sdam.c b/drivers/nvmem/qcom-spmi-sdam.c
index 4fcb63507ecd..8499892044b7 100644
--- a/drivers/nvmem/qcom-spmi-sdam.c
+++ b/drivers/nvmem/qcom-spmi-sdam.c
@@ -166,6 +166,7 @@ static const struct of_device_id sdam_match_table[] = {
{ .compatible = "qcom,spmi-sdam" },
{},
};
+MODULE_DEVICE_TABLE(of, sdam_match_table);
static struct platform_driver sdam_driver = {
.driver = {
diff --git a/drivers/nvmem/sunxi_sid.c b/drivers/nvmem/sunxi_sid.c
index 5750e1f4bcdb..92dfe4cb10e3 100644
--- a/drivers/nvmem/sunxi_sid.c
+++ b/drivers/nvmem/sunxi_sid.c
@@ -41,8 +41,21 @@ static int sunxi_sid_read(void *context, unsigned int offset,
void *val, size_t bytes)
{
struct sunxi_sid *sid = context;
+ u32 word;
+
+ /* .stride = 4 so offset is guaranteed to be aligned */
+ __ioread32_copy(val, sid->base + sid->value_offset + offset, bytes / 4);
- memcpy_fromio(val, sid->base + sid->value_offset + offset, bytes);
+ val += round_down(bytes, 4);
+ offset += round_down(bytes, 4);
+ bytes = bytes % 4;
+
+ if (!bytes)
+ return 0;
+
+ /* Handle any trailing bytes */
+ word = readl_relaxed(sid->base + sid->value_offset + offset);
+ memcpy(val, &word, bytes);
return 0;
}
diff --git a/drivers/of/address.c b/drivers/of/address.c
index c34ac33b7338..67763e5b8c0e 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -965,8 +965,19 @@ int of_dma_get_range(struct device_node *np, const struct bus_dma_region **map)
}
of_dma_range_parser_init(&parser, node);
- for_each_of_range(&parser, &range)
+ for_each_of_range(&parser, &range) {
+ if (range.cpu_addr == OF_BAD_ADDR) {
+ pr_err("translation of DMA address(%llx) to CPU address failed node(%pOF)\n",
+ range.bus_addr, node);
+ continue;
+ }
num_ranges++;
+ }
+
+ if (!num_ranges) {
+ ret = -EINVAL;
+ goto out;
+ }
r = kcalloc(num_ranges + 1, sizeof(*r), GFP_KERNEL);
if (!r) {
@@ -975,18 +986,16 @@ int of_dma_get_range(struct device_node *np, const struct bus_dma_region **map)
}
/*
- * Record all info in the generic DMA ranges array for struct device.
+ * Record all info in the generic DMA ranges array for struct device,
+ * returning an error if we don't find any parsable ranges.
*/
*map = r;
of_dma_range_parser_init(&parser, node);
for_each_of_range(&parser, &range) {
pr_debug("dma_addr(%llx) cpu_addr(%llx) size(%llx)\n",
range.bus_addr, range.cpu_addr, range.size);
- if (range.cpu_addr == OF_BAD_ADDR) {
- pr_err("translation of DMA address(%llx) to CPU address failed node(%pOF)\n",
- range.bus_addr, node);
+ if (range.cpu_addr == OF_BAD_ADDR)
continue;
- }
r->cpu_start = range.cpu_addr;
r->dma_start = range.bus_addr;
r->size = range.size;
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index f08b25195ae7..d1a68b6d03b3 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -26,7 +26,6 @@
#include <linux/serial_core.h>
#include <linux/sysfs.h>
#include <linux/random.h>
-#include <linux/kmemleak.h>
#include <asm/setup.h> /* for COMMAND_LINE_SIZE */
#include <asm/page.h>
@@ -525,12 +524,9 @@ static int __init __reserved_mem_reserve_reg(unsigned long node,
size = dt_mem_next_cell(dt_root_size_cells, &prop);
if (size &&
- early_init_dt_reserve_memory(base, size, nomap) == 0) {
+ early_init_dt_reserve_memory(base, size, nomap) == 0)
pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %lu MiB\n",
uname, &base, (unsigned long)(size / SZ_1M));
- if (!nomap)
- kmemleak_alloc_phys(base, size, 0);
- }
else
pr_err("Reserved memory: failed to reserve memory for node '%s': base %pa, size %lu MiB\n",
uname, &base, (unsigned long)(size / SZ_1M));
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 65f3b02a0e4e..f90975e00446 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -48,9 +48,10 @@ static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
err = memblock_mark_nomap(base, size);
if (err)
memblock_phys_free(base, size);
- kmemleak_ignore_phys(base);
}
+ kmemleak_ignore_phys(base);
+
return err;
}
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 81c8c227ab6b..b3878a98d27f 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -525,6 +525,7 @@ static int __init of_platform_default_populate_init(void)
if (IS_ENABLED(CONFIG_PPC)) {
struct device_node *boot_display = NULL;
struct platform_device *dev;
+ int display_number = 0;
int ret;
/* Check if we have a MacOS display without a node spec */
@@ -555,16 +556,23 @@ static int __init of_platform_default_populate_init(void)
if (!of_get_property(node, "linux,opened", NULL) ||
!of_get_property(node, "linux,boot-display", NULL))
continue;
- dev = of_platform_device_create(node, "of-display", NULL);
+ dev = of_platform_device_create(node, "of-display.0", NULL);
+ of_node_put(node);
if (WARN_ON(!dev))
return -ENOMEM;
boot_display = node;
+ display_number++;
break;
}
for_each_node_by_type(node, "display") {
+ char buf[14];
+ const char *of_display_format = "of-display.%d";
+
if (!of_get_property(node, "linux,opened", NULL) || node == boot_display)
continue;
- of_platform_device_create(node, "of-display", NULL);
+ ret = snprintf(buf, sizeof(buf), of_display_format, display_number++);
+ if (ret < sizeof(buf))
+ of_platform_device_create(node, buf, NULL);
}
} else {
diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c
index d6af5726ddf3..2a18f7ba2398 100644
--- a/drivers/parisc/pdc_stable.c
+++ b/drivers/parisc/pdc_stable.c
@@ -274,8 +274,7 @@ pdcspath_hwpath_write(struct pdcspath_entry *entry, const char *buf, size_t coun
/* We'll use a local copy of buf */
count = min_t(size_t, count, sizeof(in)-1);
- strncpy(in, buf, count);
- in[count] = '\0';
+ strscpy(in, buf, count + 1);
/* Let's clean up the target. 0xff is a blank pattern */
memset(&hwpath, 0xff, sizeof(hwpath));
@@ -388,8 +387,7 @@ pdcspath_layer_write(struct pdcspath_entry *entry, const char *buf, size_t count
/* We'll use a local copy of buf */
count = min_t(size_t, count, sizeof(in)-1);
- strncpy(in, buf, count);
- in[count] = '\0';
+ strscpy(in, buf, count + 1);
/* Let's clean up the target. 0 is a blank pattern */
memset(&layers, 0, sizeof(layers));
@@ -756,8 +754,7 @@ static ssize_t pdcs_auto_write(struct kobject *kobj,
/* We'll use a local copy of buf */
count = min_t(size_t, count, sizeof(in)-1);
- strncpy(in, buf, count);
- in[count] = '\0';
+ strscpy(in, buf, count + 1);
/* Current flags are stored in primary boot path entry */
pathentry = &pdcspath_entry_primary;
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index fba95486caaf..5641786bd020 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1665,7 +1665,6 @@ int pci_save_state(struct pci_dev *dev)
return i;
pci_save_ltr_state(dev);
- pci_save_aspm_l1ss_state(dev);
pci_save_dpc_state(dev);
pci_save_aer_state(dev);
pci_save_ptm_state(dev);
@@ -1772,7 +1771,6 @@ void pci_restore_state(struct pci_dev *dev)
* LTR itself (in the PCIe capability).
*/
pci_restore_ltr_state(dev);
- pci_restore_aspm_l1ss_state(dev);
pci_restore_pcie_state(dev);
pci_restore_pasid_state(dev);
@@ -3465,11 +3463,6 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev)
if (error)
pci_err(dev, "unable to allocate suspend buffer for LTR\n");
- error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_L1SS,
- 2 * sizeof(u32));
- if (error)
- pci_err(dev, "unable to allocate suspend buffer for ASPM-L1SS\n");
-
pci_allocate_vc_save_buffers(dev);
}
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 9ed3b5550043..9049d07d3aae 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -566,14 +566,10 @@ bool pcie_wait_for_link(struct pci_dev *pdev, bool active);
void pcie_aspm_init_link_state(struct pci_dev *pdev);
void pcie_aspm_exit_link_state(struct pci_dev *pdev);
void pcie_aspm_powersave_config_link(struct pci_dev *pdev);
-void pci_save_aspm_l1ss_state(struct pci_dev *dev);
-void pci_restore_aspm_l1ss_state(struct pci_dev *dev);
#else
static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) { }
static inline void pcie_aspm_exit_link_state(struct pci_dev *pdev) { }
static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev) { }
-static inline void pci_save_aspm_l1ss_state(struct pci_dev *dev) { }
-static inline void pci_restore_aspm_l1ss_state(struct pci_dev *dev) { }
#endif
#ifdef CONFIG_PCIE_ECRC
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 53a1fa306e1e..4b4184563a92 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -470,31 +470,6 @@ static void pci_clear_and_set_dword(struct pci_dev *pdev, int pos,
pci_write_config_dword(pdev, pos, val);
}
-static void aspm_program_l1ss(struct pci_dev *dev, u32 ctl1, u32 ctl2)
-{
- u16 l1ss = dev->l1ss;
- u32 l1_2_enable;
-
- /*
- * Per PCIe r6.0, sec 5.5.4, T_POWER_ON in PCI_L1SS_CTL2 must be
- * programmed prior to setting the L1.2 enable bits in PCI_L1SS_CTL1.
- */
- pci_write_config_dword(dev, l1ss + PCI_L1SS_CTL2, ctl2);
-
- /*
- * In addition, Common_Mode_Restore_Time and LTR_L1.2_THRESHOLD in
- * PCI_L1SS_CTL1 must be programmed *before* setting the L1.2
- * enable bits, even though they're all in PCI_L1SS_CTL1.
- */
- l1_2_enable = ctl1 & PCI_L1SS_CTL1_L1_2_MASK;
- ctl1 &= ~PCI_L1SS_CTL1_L1_2_MASK;
-
- pci_write_config_dword(dev, l1ss + PCI_L1SS_CTL1, ctl1);
- if (l1_2_enable)
- pci_write_config_dword(dev, l1ss + PCI_L1SS_CTL1,
- ctl1 | l1_2_enable);
-}
-
/* Calculate L1.2 PM substate timing parameters */
static void aspm_calc_l1ss_info(struct pcie_link_state *link,
u32 parent_l1ss_cap, u32 child_l1ss_cap)
@@ -504,6 +479,7 @@ static void aspm_calc_l1ss_info(struct pcie_link_state *link,
u32 t_common_mode, t_power_on, l1_2_threshold, scale, value;
u32 ctl1 = 0, ctl2 = 0;
u32 pctl1, pctl2, cctl1, cctl2;
+ u32 pl1_2_enables, cl1_2_enables;
if (!(link->aspm_support & ASPM_STATE_L1_2_MASK))
return;
@@ -552,21 +528,39 @@ static void aspm_calc_l1ss_info(struct pcie_link_state *link,
ctl2 == pctl2 && ctl2 == cctl2)
return;
- pctl1 &= ~(PCI_L1SS_CTL1_CM_RESTORE_TIME |
- PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
- PCI_L1SS_CTL1_LTR_L12_TH_SCALE);
- pctl1 |= (ctl1 & (PCI_L1SS_CTL1_CM_RESTORE_TIME |
- PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
- PCI_L1SS_CTL1_LTR_L12_TH_SCALE));
- aspm_program_l1ss(parent, pctl1, ctl2);
-
- cctl1 &= ~(PCI_L1SS_CTL1_CM_RESTORE_TIME |
- PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
- PCI_L1SS_CTL1_LTR_L12_TH_SCALE);
- cctl1 |= (ctl1 & (PCI_L1SS_CTL1_CM_RESTORE_TIME |
- PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
- PCI_L1SS_CTL1_LTR_L12_TH_SCALE));
- aspm_program_l1ss(child, cctl1, ctl2);
+ /* Disable L1.2 while updating. See PCIe r5.0, sec 5.5.4, 7.8.3.3 */
+ pl1_2_enables = pctl1 & PCI_L1SS_CTL1_L1_2_MASK;
+ cl1_2_enables = cctl1 & PCI_L1SS_CTL1_L1_2_MASK;
+
+ if (pl1_2_enables || cl1_2_enables) {
+ pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_L1_2_MASK, 0);
+ pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_L1_2_MASK, 0);
+ }
+
+ /* Program T_POWER_ON times in both ports */
+ pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, ctl2);
+ pci_write_config_dword(child, child->l1ss + PCI_L1SS_CTL2, ctl2);
+
+ /* Program Common_Mode_Restore_Time in upstream device */
+ pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_CM_RESTORE_TIME, ctl1);
+
+ /* Program LTR_L1.2_THRESHOLD time in both ports */
+ pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
+ PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1);
+ pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
+ PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1);
+
+ if (pl1_2_enables || cl1_2_enables) {
+ pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, 0,
+ pl1_2_enables);
+ pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, 0,
+ cl1_2_enables);
+ }
}
static void aspm_l1ss_init(struct pcie_link_state *link)
@@ -757,43 +751,6 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
PCI_L1SS_CTL1_L1SS_MASK, val);
}
-void pci_save_aspm_l1ss_state(struct pci_dev *dev)
-{
- struct pci_cap_saved_state *save_state;
- u16 l1ss = dev->l1ss;
- u32 *cap;
-
- if (!l1ss)
- return;
-
- save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_L1SS);
- if (!save_state)
- return;
-
- cap = (u32 *)&save_state->cap.data[0];
- pci_read_config_dword(dev, l1ss + PCI_L1SS_CTL2, cap++);
- pci_read_config_dword(dev, l1ss + PCI_L1SS_CTL1, cap++);
-}
-
-void pci_restore_aspm_l1ss_state(struct pci_dev *dev)
-{
- struct pci_cap_saved_state *save_state;
- u32 *cap, ctl1, ctl2;
- u16 l1ss = dev->l1ss;
-
- if (!l1ss)
- return;
-
- save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_L1SS);
- if (!save_state)
- return;
-
- cap = (u32 *)&save_state->cap.data[0];
- ctl2 = *cap++;
- ctl1 = *cap;
- aspm_program_l1ss(dev, ctl1, ctl2);
-}
-
static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val)
{
pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL,
diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
index b80a9b74662b..1deb61b22bc7 100644
--- a/drivers/perf/arm-cmn.c
+++ b/drivers/perf/arm-cmn.c
@@ -1576,7 +1576,6 @@ static int arm_cmn_event_init(struct perf_event *event)
hw->dn++;
continue;
}
- hw->dtcs_used |= arm_cmn_node_to_xp(cmn, dn)->dtc;
hw->num_dns++;
if (bynodeid)
break;
@@ -1589,6 +1588,12 @@ static int arm_cmn_event_init(struct perf_event *event)
nodeid, nid.x, nid.y, nid.port, nid.dev, type);
return -EINVAL;
}
+ /*
+ * Keep assuming non-cycles events count in all DTC domains; turns out
+ * it's hard to make a worthwhile optimisation around this, short of
+ * going all-in with domain-local counter allocation as well.
+ */
+ hw->dtcs_used = (1U << cmn->num_dtcs) - 1;
return arm_cmn_validate_group(cmn, event);
}
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
index 3945612900e6..9c6ee46ac7a0 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
@@ -93,10 +93,19 @@ static int aspeed_sig_expr_enable(struct aspeed_pinmux_data *ctx,
static int aspeed_sig_expr_disable(struct aspeed_pinmux_data *ctx,
const struct aspeed_sig_expr *expr)
{
+ int ret;
+
pr_debug("Disabling signal %s for %s\n", expr->signal,
expr->function);
- return aspeed_sig_expr_set(ctx, expr, false);
+ ret = aspeed_sig_expr_eval(ctx, expr, true);
+ if (ret < 0)
+ return ret;
+
+ if (ret)
+ return aspeed_sig_expr_set(ctx, expr, false);
+
+ return 0;
}
/**
@@ -114,7 +123,7 @@ static int aspeed_disable_sig(struct aspeed_pinmux_data *ctx,
int ret = 0;
if (!exprs)
- return true;
+ return -EINVAL;
while (*exprs && !ret) {
ret = aspeed_sig_expr_disable(ctx, *exprs);
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index cc3aaba24188..e49f271de936 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -1709,6 +1709,12 @@ const struct intel_pinctrl_soc_data *intel_pinctrl_get_soc_data(struct platform_
EXPORT_SYMBOL_GPL(intel_pinctrl_get_soc_data);
#ifdef CONFIG_PM_SLEEP
+static bool __intel_gpio_is_direct_irq(u32 value)
+{
+ return (value & PADCFG0_GPIROUTIOXAPIC) && (value & PADCFG0_GPIOTXDIS) &&
+ (__intel_gpio_get_gpio_mode(value) == PADCFG0_PMODE_GPIO);
+}
+
static bool intel_pinctrl_should_save(struct intel_pinctrl *pctrl, unsigned int pin)
{
const struct pin_desc *pd = pin_desc_get(pctrl->pctldev, pin);
@@ -1742,8 +1748,7 @@ static bool intel_pinctrl_should_save(struct intel_pinctrl *pctrl, unsigned int
* See https://bugzilla.kernel.org/show_bug.cgi?id=214749.
*/
value = readl(intel_get_padcfg(pctrl, pin, PADCFG0));
- if ((value & PADCFG0_GPIROUTIOXAPIC) && (value & PADCFG0_GPIOTXDIS) &&
- (__intel_gpio_get_gpio_mode(value) == PADCFG0_PMODE_GPIO))
+ if (__intel_gpio_is_direct_irq(value))
return true;
return false;
@@ -1873,7 +1878,12 @@ int intel_pinctrl_resume_noirq(struct device *dev)
for (i = 0; i < pctrl->soc->npins; i++) {
const struct pinctrl_pin_desc *desc = &pctrl->soc->pins[i];
- if (!intel_pinctrl_should_save(pctrl, desc->number))
+ if (!(intel_pinctrl_should_save(pctrl, desc->number) ||
+ /*
+ * If the firmware mangled the register contents too much,
+ * check the saved value for the Direct IRQ mode.
+ */
+ __intel_gpio_is_direct_irq(pads[i].padcfg0)))
continue;
intel_restore_padcfg(pctrl, desc->number, PADCFG0, pads[i].padcfg0);
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8195.c b/drivers/pinctrl/mediatek/pinctrl-mt8195.c
index 89557c7ed2ab..09c4dcef9338 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8195.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8195.c
@@ -659,7 +659,7 @@ static const struct mtk_pin_field_calc mt8195_pin_drv_range[] = {
PIN_FIELD_BASE(10, 10, 4, 0x010, 0x10, 9, 3),
PIN_FIELD_BASE(11, 11, 4, 0x000, 0x10, 24, 3),
PIN_FIELD_BASE(12, 12, 4, 0x010, 0x10, 12, 3),
- PIN_FIELD_BASE(13, 13, 4, 0x010, 0x10, 27, 3),
+ PIN_FIELD_BASE(13, 13, 4, 0x000, 0x10, 27, 3),
PIN_FIELD_BASE(14, 14, 4, 0x010, 0x10, 15, 3),
PIN_FIELD_BASE(15, 15, 4, 0x010, 0x10, 0, 3),
PIN_FIELD_BASE(16, 16, 4, 0x010, 0x10, 18, 3),
@@ -708,7 +708,7 @@ static const struct mtk_pin_field_calc mt8195_pin_drv_range[] = {
PIN_FIELD_BASE(78, 78, 3, 0x000, 0x10, 15, 3),
PIN_FIELD_BASE(79, 79, 3, 0x000, 0x10, 18, 3),
PIN_FIELD_BASE(80, 80, 3, 0x000, 0x10, 21, 3),
- PIN_FIELD_BASE(81, 81, 3, 0x000, 0x10, 28, 3),
+ PIN_FIELD_BASE(81, 81, 3, 0x000, 0x10, 24, 3),
PIN_FIELD_BASE(82, 82, 3, 0x000, 0x10, 27, 3),
PIN_FIELD_BASE(83, 83, 3, 0x010, 0x10, 0, 3),
PIN_FIELD_BASE(84, 84, 3, 0x010, 0x10, 3, 3),
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 9bc6e3922e78..32c3edaf9038 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -365,6 +365,7 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
} else {
debounce_enable = " ∅";
+ time = 0;
}
snprintf(debounce_value, sizeof(debounce_value), "%u", time * unit);
seq_printf(s, "debounce %s (🕑 %sus)| ", debounce_enable, debounce_value);
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index 99c3745da456..190923757cda 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -372,6 +372,8 @@ static int pcs_set_mux(struct pinctrl_dev *pctldev, unsigned fselector,
if (!pcs->fmask)
return 0;
function = pinmux_generic_get_function(pctldev, fselector);
+ if (!function)
+ return -EINVAL;
func = function->data;
if (!func)
return -EINVAL;
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c
index c3c8c34148f1..e22d03ce292e 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c
@@ -105,7 +105,7 @@ static const struct pinctrl_pin_desc sm8450_lpi_pins[] = {
static const char * const swr_tx_clk_groups[] = { "gpio0" };
static const char * const swr_tx_data_groups[] = { "gpio1", "gpio2", "gpio14" };
static const char * const swr_rx_clk_groups[] = { "gpio3" };
-static const char * const swr_rx_data_groups[] = { "gpio4", "gpio5", "gpio15" };
+static const char * const swr_rx_data_groups[] = { "gpio4", "gpio5" };
static const char * const dmic1_clk_groups[] = { "gpio6" };
static const char * const dmic1_data_groups[] = { "gpio7" };
static const char * const dmic2_clk_groups[] = { "gpio8" };
diff --git a/drivers/platform/x86/amd/Kconfig b/drivers/platform/x86/amd/Kconfig
index a825af8126c8..2ce8cb2170df 100644
--- a/drivers/platform/x86/amd/Kconfig
+++ b/drivers/platform/x86/amd/Kconfig
@@ -8,6 +8,7 @@ source "drivers/platform/x86/amd/pmf/Kconfig"
config AMD_PMC
tristate "AMD SoC PMC driver"
depends on ACPI && PCI && RTC_CLASS
+ select SERIO
help
The driver provides support for AMD Power Management Controller
primarily responsible for S2Idle transactions that are driven from
diff --git a/drivers/platform/x86/amd/pmc.c b/drivers/platform/x86/amd/pmc.c
index 8d924986381b..3cbb01ec10e3 100644
--- a/drivers/platform/x86/amd/pmc.c
+++ b/drivers/platform/x86/amd/pmc.c
@@ -22,6 +22,7 @@
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
+#include <linux/serio.h>
#include <linux/suspend.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
@@ -160,6 +161,10 @@ static bool enable_stb;
module_param(enable_stb, bool, 0644);
MODULE_PARM_DESC(enable_stb, "Enable the STB debug mechanism");
+static bool disable_workarounds;
+module_param(disable_workarounds, bool, 0644);
+MODULE_PARM_DESC(disable_workarounds, "Disable workarounds for platform bugs");
+
static struct amd_pmc_dev pmc;
static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg, bool ret);
static int amd_pmc_read_stb(struct amd_pmc_dev *dev, u32 *buf);
@@ -653,6 +658,33 @@ static int amd_pmc_get_os_hint(struct amd_pmc_dev *dev)
return -EINVAL;
}
+static int amd_pmc_czn_wa_irq1(struct amd_pmc_dev *pdev)
+{
+ struct device *d;
+ int rc;
+
+ if (!pdev->major) {
+ rc = amd_pmc_get_smu_version(pdev);
+ if (rc)
+ return rc;
+ }
+
+ if (pdev->major > 64 || (pdev->major == 64 && pdev->minor > 65))
+ return 0;
+
+ d = bus_find_device_by_name(&serio_bus, NULL, "serio0");
+ if (!d)
+ return 0;
+ if (device_may_wakeup(d)) {
+ dev_info_once(d, "Disabling IRQ1 wakeup source to avoid platform firmware bug\n");
+ disable_irq_wake(1);
+ device_set_wakeup_enable(d, false);
+ }
+ put_device(d);
+
+ return 0;
+}
+
static int amd_pmc_verify_czn_rtc(struct amd_pmc_dev *pdev, u32 *arg)
{
struct rtc_device *rtc_device;
@@ -715,8 +747,8 @@ static void amd_pmc_s2idle_prepare(void)
/* Reset and Start SMU logging - to monitor the s0i3 stats */
amd_pmc_setup_smu_logging(pdev);
- /* Activate CZN specific RTC functionality */
- if (pdev->cpu_id == AMD_CPU_ID_CZN) {
+ /* Activate CZN specific platform bug workarounds */
+ if (pdev->cpu_id == AMD_CPU_ID_CZN && !disable_workarounds) {
rc = amd_pmc_verify_czn_rtc(pdev, &arg);
if (rc) {
dev_err(pdev->dev, "failed to set RTC: %d\n", rc);
@@ -782,6 +814,25 @@ static struct acpi_s2idle_dev_ops amd_pmc_s2idle_dev_ops = {
.check = amd_pmc_s2idle_check,
.restore = amd_pmc_s2idle_restore,
};
+
+static int __maybe_unused amd_pmc_suspend_handler(struct device *dev)
+{
+ struct amd_pmc_dev *pdev = dev_get_drvdata(dev);
+
+ if (pdev->cpu_id == AMD_CPU_ID_CZN && !disable_workarounds) {
+ int rc = amd_pmc_czn_wa_irq1(pdev);
+
+ if (rc) {
+ dev_err(pdev->dev, "failed to adjust keyboard wakeup: %d\n", rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(amd_pmc_pm, amd_pmc_suspend_handler, NULL);
+
#endif
static const struct pci_device_id pmc_pci_ids[] = {
@@ -980,6 +1031,9 @@ static struct platform_driver amd_pmc_driver = {
.name = "amd_pmc",
.acpi_match_table = amd_pmc_acpi_ids,
.dev_groups = pmc_groups,
+#ifdef CONFIG_SUSPEND
+ .pm = &amd_pmc_pm,
+#endif
},
.probe = amd_pmc_probe,
.remove = amd_pmc_remove,
diff --git a/drivers/platform/x86/amd/pmf/auto-mode.c b/drivers/platform/x86/amd/pmf/auto-mode.c
index 644af42e07cf..96a8e1832c05 100644
--- a/drivers/platform/x86/amd/pmf/auto-mode.c
+++ b/drivers/platform/x86/amd/pmf/auto-mode.c
@@ -275,13 +275,8 @@ int amd_pmf_reset_amt(struct amd_pmf_dev *dev)
*/
if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) {
- int mode = amd_pmf_get_pprof_modes(dev);
-
- if (mode < 0)
- return mode;
-
dev_dbg(dev->dev, "resetting AMT thermals\n");
- amd_pmf_update_slider(dev, SLIDER_OP_SET, mode, NULL);
+ amd_pmf_set_sps_power_limits(dev);
}
return 0;
}
@@ -299,7 +294,5 @@ void amd_pmf_deinit_auto_mode(struct amd_pmf_dev *dev)
void amd_pmf_init_auto_mode(struct amd_pmf_dev *dev)
{
amd_pmf_load_defaults_auto_mode(dev);
- /* update the thermal limits for Automode */
- amd_pmf_set_automode(dev, config_store.current_mode, NULL);
amd_pmf_init_metrics_table(dev);
}
diff --git a/drivers/platform/x86/amd/pmf/cnqf.c b/drivers/platform/x86/amd/pmf/cnqf.c
index 3f9731a2ac28..4beb22a19466 100644
--- a/drivers/platform/x86/amd/pmf/cnqf.c
+++ b/drivers/platform/x86/amd/pmf/cnqf.c
@@ -103,7 +103,7 @@ int amd_pmf_trans_cnqf(struct amd_pmf_dev *dev, int socket_power, ktime_t time_l
src = amd_pmf_cnqf_get_power_source(dev);
- if (dev->current_profile == PLATFORM_PROFILE_BALANCED) {
+ if (is_pprof_balanced(dev)) {
amd_pmf_set_cnqf(dev, src, config_store.current_mode, NULL);
} else {
/*
@@ -307,13 +307,9 @@ static ssize_t cnqf_enable_store(struct device *dev,
const char *buf, size_t count)
{
struct amd_pmf_dev *pdev = dev_get_drvdata(dev);
- int mode, result, src;
+ int result, src;
bool input;
- mode = amd_pmf_get_pprof_modes(pdev);
- if (mode < 0)
- return mode;
-
result = kstrtobool(buf, &input);
if (result)
return result;
@@ -321,11 +317,11 @@ static ssize_t cnqf_enable_store(struct device *dev,
src = amd_pmf_cnqf_get_power_source(pdev);
pdev->cnqf_enabled = input;
- if (pdev->cnqf_enabled && pdev->current_profile == PLATFORM_PROFILE_BALANCED) {
+ if (pdev->cnqf_enabled && is_pprof_balanced(pdev)) {
amd_pmf_set_cnqf(pdev, src, config_store.current_mode, NULL);
} else {
if (is_apmf_func_supported(pdev, APMF_FUNC_STATIC_SLIDER_GRANULAR))
- amd_pmf_update_slider(pdev, SLIDER_OP_SET, mode, NULL);
+ amd_pmf_set_sps_power_limits(pdev);
}
dev_dbg(pdev->dev, "Received CnQF %s\n", input ? "on" : "off");
@@ -386,7 +382,7 @@ int amd_pmf_init_cnqf(struct amd_pmf_dev *dev)
dev->cnqf_enabled = amd_pmf_check_flags(dev);
/* update the thermal for CnQF */
- if (dev->cnqf_enabled && dev->current_profile == PLATFORM_PROFILE_BALANCED) {
+ if (dev->cnqf_enabled && is_pprof_balanced(dev)) {
src = amd_pmf_cnqf_get_power_source(dev);
amd_pmf_set_cnqf(dev, src, config_store.current_mode, NULL);
}
diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c
index a5f5a4bcff6d..da23639071d7 100644
--- a/drivers/platform/x86/amd/pmf/core.c
+++ b/drivers/platform/x86/amd/pmf/core.c
@@ -58,6 +58,25 @@ static bool force_load;
module_param(force_load, bool, 0444);
MODULE_PARM_DESC(force_load, "Force load this driver on supported older platforms (experimental)");
+static int amd_pmf_pwr_src_notify_call(struct notifier_block *nb, unsigned long event, void *data)
+{
+ struct amd_pmf_dev *pmf = container_of(nb, struct amd_pmf_dev, pwr_src_notifier);
+
+ if (event != PSY_EVENT_PROP_CHANGED)
+ return NOTIFY_OK;
+
+ if (is_apmf_func_supported(pmf, APMF_FUNC_AUTO_MODE) ||
+ is_apmf_func_supported(pmf, APMF_FUNC_DYN_SLIDER_DC) ||
+ is_apmf_func_supported(pmf, APMF_FUNC_DYN_SLIDER_AC)) {
+ if ((pmf->amt_enabled || pmf->cnqf_enabled) && is_pprof_balanced(pmf))
+ return NOTIFY_DONE;
+ }
+
+ amd_pmf_set_sps_power_limits(pmf);
+
+ return NOTIFY_OK;
+}
+
static int current_power_limits_show(struct seq_file *seq, void *unused)
{
struct amd_pmf_dev *dev = seq->private;
@@ -366,14 +385,18 @@ static int amd_pmf_probe(struct platform_device *pdev)
if (!dev->regbase)
return -ENOMEM;
+ mutex_init(&dev->lock);
+ mutex_init(&dev->update_mutex);
+
apmf_acpi_init(dev);
platform_set_drvdata(pdev, dev);
amd_pmf_init_features(dev);
apmf_install_handler(dev);
amd_pmf_dbgfs_register(dev);
- mutex_init(&dev->lock);
- mutex_init(&dev->update_mutex);
+ dev->pwr_src_notifier.notifier_call = amd_pmf_pwr_src_notify_call;
+ power_supply_reg_notifier(&dev->pwr_src_notifier);
+
dev_info(dev->dev, "registered PMF device successfully\n");
return 0;
@@ -383,11 +406,12 @@ static int amd_pmf_remove(struct platform_device *pdev)
{
struct amd_pmf_dev *dev = platform_get_drvdata(pdev);
- mutex_destroy(&dev->lock);
- mutex_destroy(&dev->update_mutex);
+ power_supply_unreg_notifier(&dev->pwr_src_notifier);
amd_pmf_deinit_features(dev);
apmf_acpi_deinit(dev);
amd_pmf_dbgfs_unregister(dev);
+ mutex_destroy(&dev->lock);
+ mutex_destroy(&dev->update_mutex);
kfree(dev->buf);
return 0;
}
diff --git a/drivers/platform/x86/amd/pmf/pmf.h b/drivers/platform/x86/amd/pmf/pmf.h
index 84bbe2c6ea61..06c30cdc0573 100644
--- a/drivers/platform/x86/amd/pmf/pmf.h
+++ b/drivers/platform/x86/amd/pmf/pmf.h
@@ -169,6 +169,7 @@ struct amd_pmf_dev {
struct mutex update_mutex; /* protects race between ACPI handler and metrics thread */
bool cnqf_enabled;
bool cnqf_supported;
+ struct notifier_block pwr_src_notifier;
};
struct apmf_sps_prop_granular {
@@ -391,9 +392,11 @@ int amd_pmf_init_sps(struct amd_pmf_dev *dev);
void amd_pmf_deinit_sps(struct amd_pmf_dev *dev);
int apmf_get_static_slider_granular(struct amd_pmf_dev *pdev,
struct apmf_static_slider_granular_output *output);
+bool is_pprof_balanced(struct amd_pmf_dev *pmf);
int apmf_update_fan_idx(struct amd_pmf_dev *pdev, bool manual, u32 idx);
+int amd_pmf_set_sps_power_limits(struct amd_pmf_dev *pmf);
/* Auto Mode Layer */
int apmf_get_auto_mode_def(struct amd_pmf_dev *pdev, struct apmf_auto_mode *data);
diff --git a/drivers/platform/x86/amd/pmf/sps.c b/drivers/platform/x86/amd/pmf/sps.c
index dba7e36962dc..bed762d47a14 100644
--- a/drivers/platform/x86/amd/pmf/sps.c
+++ b/drivers/platform/x86/amd/pmf/sps.c
@@ -70,6 +70,24 @@ void amd_pmf_update_slider(struct amd_pmf_dev *dev, bool op, int idx,
}
}
+int amd_pmf_set_sps_power_limits(struct amd_pmf_dev *pmf)
+{
+ int mode;
+
+ mode = amd_pmf_get_pprof_modes(pmf);
+ if (mode < 0)
+ return mode;
+
+ amd_pmf_update_slider(pmf, SLIDER_OP_SET, mode, NULL);
+
+ return 0;
+}
+
+bool is_pprof_balanced(struct amd_pmf_dev *pmf)
+{
+ return (pmf->current_profile == PLATFORM_PROFILE_BALANCED) ? true : false;
+}
+
static int amd_pmf_profile_get(struct platform_profile_handler *pprof,
enum platform_profile_option *profile)
{
@@ -105,15 +123,10 @@ static int amd_pmf_profile_set(struct platform_profile_handler *pprof,
enum platform_profile_option profile)
{
struct amd_pmf_dev *pmf = container_of(pprof, struct amd_pmf_dev, pprof);
- int mode;
pmf->current_profile = profile;
- mode = amd_pmf_get_pprof_modes(pmf);
- if (mode < 0)
- return mode;
- amd_pmf_update_slider(pmf, SLIDER_OP_SET, mode, NULL);
- return 0;
+ return amd_pmf_set_sps_power_limits(pmf);
}
int amd_pmf_init_sps(struct amd_pmf_dev *dev)
@@ -123,6 +136,9 @@ int amd_pmf_init_sps(struct amd_pmf_dev *dev)
dev->current_profile = PLATFORM_PROFILE_BALANCED;
amd_pmf_load_defaults_sps(dev);
+ /* update SPS balanced power mode thermals */
+ amd_pmf_set_sps_power_limits(dev);
+
dev->pprof.profile_get = amd_pmf_profile_get;
dev->pprof.profile_set = amd_pmf_profile_set;
diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c
index ca33df7ea550..9333f82cfa8a 100644
--- a/drivers/platform/x86/apple-gmux.c
+++ b/drivers/platform/x86/apple-gmux.c
@@ -64,29 +64,6 @@ struct apple_gmux_data {
static struct apple_gmux_data *apple_gmux_data;
-/*
- * gmux port offsets. Many of these are not yet used, but may be in the
- * future, and it's useful to have them documented here anyhow.
- */
-#define GMUX_PORT_VERSION_MAJOR 0x04
-#define GMUX_PORT_VERSION_MINOR 0x05
-#define GMUX_PORT_VERSION_RELEASE 0x06
-#define GMUX_PORT_SWITCH_DISPLAY 0x10
-#define GMUX_PORT_SWITCH_GET_DISPLAY 0x11
-#define GMUX_PORT_INTERRUPT_ENABLE 0x14
-#define GMUX_PORT_INTERRUPT_STATUS 0x16
-#define GMUX_PORT_SWITCH_DDC 0x28
-#define GMUX_PORT_SWITCH_EXTERNAL 0x40
-#define GMUX_PORT_SWITCH_GET_EXTERNAL 0x41
-#define GMUX_PORT_DISCRETE_POWER 0x50
-#define GMUX_PORT_MAX_BRIGHTNESS 0x70
-#define GMUX_PORT_BRIGHTNESS 0x74
-#define GMUX_PORT_VALUE 0xc2
-#define GMUX_PORT_READ 0xd0
-#define GMUX_PORT_WRITE 0xd4
-
-#define GMUX_MIN_IO_LEN (GMUX_PORT_BRIGHTNESS + 4)
-
#define GMUX_INTERRUPT_ENABLE 0xff
#define GMUX_INTERRUPT_DISABLE 0x00
@@ -249,23 +226,6 @@ static void gmux_write32(struct apple_gmux_data *gmux_data, int port,
gmux_pio_write32(gmux_data, port, val);
}
-static bool gmux_is_indexed(struct apple_gmux_data *gmux_data)
-{
- u16 val;
-
- outb(0xaa, gmux_data->iostart + 0xcc);
- outb(0x55, gmux_data->iostart + 0xcd);
- outb(0x00, gmux_data->iostart + 0xce);
-
- val = inb(gmux_data->iostart + 0xcc) |
- (inb(gmux_data->iostart + 0xcd) << 8);
-
- if (val == 0x55aa)
- return true;
-
- return false;
-}
-
/**
* DOC: Backlight control
*
@@ -605,60 +565,43 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
int ret = -ENXIO;
acpi_status status;
unsigned long long gpe;
+ bool indexed = false;
+ u32 version;
if (apple_gmux_data)
return -EBUSY;
+ if (!apple_gmux_detect(pnp, &indexed)) {
+ pr_info("gmux device not present\n");
+ return -ENODEV;
+ }
+
gmux_data = kzalloc(sizeof(*gmux_data), GFP_KERNEL);
if (!gmux_data)
return -ENOMEM;
pnp_set_drvdata(pnp, gmux_data);
res = pnp_get_resource(pnp, IORESOURCE_IO, 0);
- if (!res) {
- pr_err("Failed to find gmux I/O resource\n");
- goto err_free;
- }
-
gmux_data->iostart = res->start;
gmux_data->iolen = resource_size(res);
- if (gmux_data->iolen < GMUX_MIN_IO_LEN) {
- pr_err("gmux I/O region too small (%lu < %u)\n",
- gmux_data->iolen, GMUX_MIN_IO_LEN);
- goto err_free;
- }
-
if (!request_region(gmux_data->iostart, gmux_data->iolen,
"Apple gmux")) {
pr_err("gmux I/O already in use\n");
goto err_free;
}
- /*
- * Invalid version information may indicate either that the gmux
- * device isn't present or that it's a new one that uses indexed
- * io
- */
-
- ver_major = gmux_read8(gmux_data, GMUX_PORT_VERSION_MAJOR);
- ver_minor = gmux_read8(gmux_data, GMUX_PORT_VERSION_MINOR);
- ver_release = gmux_read8(gmux_data, GMUX_PORT_VERSION_RELEASE);
- if (ver_major == 0xff && ver_minor == 0xff && ver_release == 0xff) {
- if (gmux_is_indexed(gmux_data)) {
- u32 version;
- mutex_init(&gmux_data->index_lock);
- gmux_data->indexed = true;
- version = gmux_read32(gmux_data,
- GMUX_PORT_VERSION_MAJOR);
- ver_major = (version >> 24) & 0xff;
- ver_minor = (version >> 16) & 0xff;
- ver_release = (version >> 8) & 0xff;
- } else {
- pr_info("gmux device not present\n");
- ret = -ENODEV;
- goto err_release;
- }
+ if (indexed) {
+ mutex_init(&gmux_data->index_lock);
+ gmux_data->indexed = true;
+ version = gmux_read32(gmux_data, GMUX_PORT_VERSION_MAJOR);
+ ver_major = (version >> 24) & 0xff;
+ ver_minor = (version >> 16) & 0xff;
+ ver_release = (version >> 8) & 0xff;
+ } else {
+ ver_major = gmux_read8(gmux_data, GMUX_PORT_VERSION_MAJOR);
+ ver_minor = gmux_read8(gmux_data, GMUX_PORT_VERSION_MINOR);
+ ver_release = gmux_read8(gmux_data, GMUX_PORT_VERSION_RELEASE);
}
pr_info("Found gmux version %d.%d.%d [%s]\n", ver_major, ver_minor,
ver_release, (gmux_data->indexed ? "indexed" : "classic"));
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 104188d70988..1038dfdcdd32 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -225,6 +225,7 @@ struct asus_wmi {
int tablet_switch_event_code;
u32 tablet_switch_dev_id;
+ bool tablet_switch_inverted;
enum fan_type fan_type;
enum fan_type gpu_fan_type;
@@ -493,6 +494,13 @@ static bool asus_wmi_dev_is_present(struct asus_wmi *asus, u32 dev_id)
}
/* Input **********************************************************************/
+static void asus_wmi_tablet_sw_report(struct asus_wmi *asus, bool value)
+{
+ input_report_switch(asus->inputdev, SW_TABLET_MODE,
+ asus->tablet_switch_inverted ? !value : value);
+ input_sync(asus->inputdev);
+}
+
static void asus_wmi_tablet_sw_init(struct asus_wmi *asus, u32 dev_id, int event_code)
{
struct device *dev = &asus->platform_device->dev;
@@ -501,7 +509,7 @@ static void asus_wmi_tablet_sw_init(struct asus_wmi *asus, u32 dev_id, int event
result = asus_wmi_get_devstate_simple(asus, dev_id);
if (result >= 0) {
input_set_capability(asus->inputdev, EV_SW, SW_TABLET_MODE);
- input_report_switch(asus->inputdev, SW_TABLET_MODE, result);
+ asus_wmi_tablet_sw_report(asus, result);
asus->tablet_switch_dev_id = dev_id;
asus->tablet_switch_event_code = event_code;
} else if (result == -ENODEV) {
@@ -534,6 +542,7 @@ static int asus_wmi_input_init(struct asus_wmi *asus)
case asus_wmi_no_tablet_switch:
break;
case asus_wmi_kbd_dock_devid:
+ asus->tablet_switch_inverted = true;
asus_wmi_tablet_sw_init(asus, ASUS_WMI_DEVID_KBD_DOCK, NOTIFY_KBD_DOCK_CHANGE);
break;
case asus_wmi_lid_flip_devid:
@@ -573,10 +582,8 @@ static void asus_wmi_tablet_mode_get_state(struct asus_wmi *asus)
return;
result = asus_wmi_get_devstate_simple(asus, asus->tablet_switch_dev_id);
- if (result >= 0) {
- input_report_switch(asus->inputdev, SW_TABLET_MODE, result);
- input_sync(asus->inputdev);
- }
+ if (result >= 0)
+ asus_wmi_tablet_sw_report(asus, result);
}
/* dGPU ********************************************************************/
diff --git a/drivers/platform/x86/dell/dell-wmi-base.c b/drivers/platform/x86/dell/dell-wmi-base.c
index 0a259a27459f..502783a7adb1 100644
--- a/drivers/platform/x86/dell/dell-wmi-base.c
+++ b/drivers/platform/x86/dell/dell-wmi-base.c
@@ -261,6 +261,9 @@ static const struct key_entry dell_wmi_keymap_type_0010[] = {
{ KE_KEY, 0x57, { KEY_BRIGHTNESSDOWN } },
{ KE_KEY, 0x58, { KEY_BRIGHTNESSUP } },
+ /*Speaker Mute*/
+ { KE_KEY, 0x109, { KEY_MUTE} },
+
/* Mic mute */
{ KE_KEY, 0x150, { KEY_MICMUTE } },
diff --git a/drivers/platform/x86/gigabyte-wmi.c b/drivers/platform/x86/gigabyte-wmi.c
index 5e7e6659a849..322cfaeda17b 100644
--- a/drivers/platform/x86/gigabyte-wmi.c
+++ b/drivers/platform/x86/gigabyte-wmi.c
@@ -141,6 +141,7 @@ static u8 gigabyte_wmi_detect_sensor_usability(struct wmi_device *wdev)
static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M DS3H-CF"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M DS3H WIFI-CF"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M S2H V2"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE AX V2"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE"),
diff --git a/drivers/platform/x86/hp/hp-wmi.c b/drivers/platform/x86/hp/hp-wmi.c
index 0a99058be813..2ef201b625b3 100644
--- a/drivers/platform/x86/hp/hp-wmi.c
+++ b/drivers/platform/x86/hp/hp-wmi.c
@@ -90,6 +90,7 @@ enum hp_wmi_event_ids {
HPWMI_PEAKSHIFT_PERIOD = 0x0F,
HPWMI_BATTERY_CHARGE_PERIOD = 0x10,
HPWMI_SANITIZATION_MODE = 0x17,
+ HPWMI_OMEN_KEY = 0x1D,
HPWMI_SMART_EXPERIENCE_APP = 0x21,
};
@@ -216,6 +217,8 @@ static const struct key_entry hp_wmi_keymap[] = {
{ KE_KEY, 0x213b, { KEY_INFO } },
{ KE_KEY, 0x2169, { KEY_ROTATE_DISPLAY } },
{ KE_KEY, 0x216a, { KEY_SETUP } },
+ { KE_KEY, 0x21a5, { KEY_PROG2 } }, /* HP Omen Key */
+ { KE_KEY, 0x21a7, { KEY_FN_ESC } },
{ KE_KEY, 0x21a9, { KEY_TOUCHPAD_OFF } },
{ KE_KEY, 0x121a9, { KEY_TOUCHPAD_ON } },
{ KE_KEY, 0x231b, { KEY_HELP } },
@@ -548,7 +551,7 @@ static int __init hp_wmi_enable_hotkeys(void)
static int hp_wmi_set_block(void *data, bool blocked)
{
- enum hp_wmi_radio r = (enum hp_wmi_radio) data;
+ enum hp_wmi_radio r = (long)data;
int query = BIT(r + 8) | ((!blocked) << r);
int ret;
@@ -810,6 +813,7 @@ static void hp_wmi_notify(u32 value, void *context)
case HPWMI_SMART_ADAPTER:
break;
case HPWMI_BEZEL_BUTTON:
+ case HPWMI_OMEN_KEY:
key_code = hp_wmi_read_int(HPWMI_HOTKEY_QUERY);
if (key_code < 0)
break;
diff --git a/drivers/platform/x86/intel/vsec.c b/drivers/platform/x86/intel/vsec.c
index bb81b8b1f7e9..89c5374e33b3 100644
--- a/drivers/platform/x86/intel/vsec.c
+++ b/drivers/platform/x86/intel/vsec.c
@@ -408,14 +408,23 @@ static const struct intel_vsec_platform_info dg1_info = {
.quirks = VSEC_QUIRK_NO_DVSEC | VSEC_QUIRK_EARLY_HW,
};
+/* MTL info */
+static const struct intel_vsec_platform_info mtl_info = {
+ .quirks = VSEC_QUIRK_NO_WATCHER | VSEC_QUIRK_NO_CRASHLOG,
+};
+
#define PCI_DEVICE_ID_INTEL_VSEC_ADL 0x467d
#define PCI_DEVICE_ID_INTEL_VSEC_DG1 0x490e
+#define PCI_DEVICE_ID_INTEL_VSEC_MTL_M 0x7d0d
+#define PCI_DEVICE_ID_INTEL_VSEC_MTL_S 0xad0d
#define PCI_DEVICE_ID_INTEL_VSEC_OOBMSM 0x09a7
#define PCI_DEVICE_ID_INTEL_VSEC_RPL 0xa77d
#define PCI_DEVICE_ID_INTEL_VSEC_TGL 0x9a0d
static const struct pci_device_id intel_vsec_pci_ids[] = {
{ PCI_DEVICE_DATA(INTEL, VSEC_ADL, &tgl_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_DG1, &dg1_info) },
+ { PCI_DEVICE_DATA(INTEL, VSEC_MTL_M, &mtl_info) },
+ { PCI_DEVICE_DATA(INTEL, VSEC_MTL_S, &mtl_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_OOBMSM, &(struct intel_vsec_platform_info) {}) },
{ PCI_DEVICE_DATA(INTEL, VSEC_RPL, &tgl_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_TGL, &tgl_info) },
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index a95946800ae9..32c10457399e 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -5563,7 +5563,7 @@ static int light_sysfs_set(struct led_classdev *led_cdev,
static enum led_brightness light_sysfs_get(struct led_classdev *led_cdev)
{
- return (light_get_status() == 1) ? LED_FULL : LED_OFF;
+ return (light_get_status() == 1) ? LED_ON : LED_OFF;
}
static struct tpacpi_led_classdev tpacpi_led_thinklight = {
@@ -10496,8 +10496,7 @@ static int dytc_profile_set(struct platform_profile_handler *pprof,
if (err)
goto unlock;
}
- }
- if (dytc_capabilities & BIT(DYTC_FC_PSC)) {
+ } else if (dytc_capabilities & BIT(DYTC_FC_PSC)) {
err = dytc_command(DYTC_SET_COMMAND(DYTC_FUNCTION_PSC, perfmode, 1), &output);
if (err)
goto unlock;
@@ -10525,14 +10524,16 @@ static void dytc_profile_refresh(void)
err = dytc_command(DYTC_CMD_MMC_GET, &output);
else
err = dytc_cql_command(DYTC_CMD_GET, &output);
- } else if (dytc_capabilities & BIT(DYTC_FC_PSC))
+ funcmode = DYTC_FUNCTION_MMC;
+ } else if (dytc_capabilities & BIT(DYTC_FC_PSC)) {
err = dytc_command(DYTC_CMD_GET, &output);
-
+ /* Check if we are PSC mode, or have AMT enabled */
+ funcmode = (output >> DYTC_GET_FUNCTION_BIT) & 0xF;
+ }
mutex_unlock(&dytc_mutex);
if (err)
return;
- funcmode = (output >> DYTC_GET_FUNCTION_BIT) & 0xF;
perfmode = (output >> DYTC_GET_MODE_BIT) & 0xF;
convert_dytc_to_profile(funcmode, perfmode, &profile);
if (profile != dytc_current_profile) {
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index f00995390fdf..13802a3c3591 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -1098,6 +1098,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Chuwi Vi8 (CWI501) */
+ .driver_data = (void *)&chuwi_vi8_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "i86"),
+ DMI_MATCH(DMI_BIOS_VERSION, "CHUWI.W86JLBNR01"),
+ },
+ },
+ {
/* Chuwi Vi8 (CWI506) */
.driver_data = (void *)&chuwi_vi8_data,
.matches = {
diff --git a/drivers/rtc/rtc-efi.c b/drivers/rtc/rtc-efi.c
index e991cccdb6e9..1e8bc6cc1e12 100644
--- a/drivers/rtc/rtc-efi.c
+++ b/drivers/rtc/rtc-efi.c
@@ -188,9 +188,10 @@ static int efi_set_time(struct device *dev, struct rtc_time *tm)
static int efi_procfs(struct device *dev, struct seq_file *seq)
{
- efi_time_t eft, alm;
- efi_time_cap_t cap;
- efi_bool_t enabled, pending;
+ efi_time_t eft, alm;
+ efi_time_cap_t cap;
+ efi_bool_t enabled, pending;
+ struct rtc_device *rtc = dev_get_drvdata(dev);
memset(&eft, 0, sizeof(eft));
memset(&alm, 0, sizeof(alm));
@@ -213,23 +214,25 @@ static int efi_procfs(struct device *dev, struct seq_file *seq)
/* XXX fixme: convert to string? */
seq_printf(seq, "Timezone\t: %u\n", eft.timezone);
- seq_printf(seq,
- "Alarm Time\t: %u:%u:%u.%09u\n"
- "Alarm Date\t: %u-%u-%u\n"
- "Alarm Daylight\t: %u\n"
- "Enabled\t\t: %s\n"
- "Pending\t\t: %s\n",
- alm.hour, alm.minute, alm.second, alm.nanosecond,
- alm.year, alm.month, alm.day,
- alm.daylight,
- enabled == 1 ? "yes" : "no",
- pending == 1 ? "yes" : "no");
-
- if (eft.timezone == EFI_UNSPECIFIED_TIMEZONE)
- seq_puts(seq, "Timezone\t: unspecified\n");
- else
- /* XXX fixme: convert to string? */
- seq_printf(seq, "Timezone\t: %u\n", alm.timezone);
+ if (test_bit(RTC_FEATURE_ALARM, rtc->features)) {
+ seq_printf(seq,
+ "Alarm Time\t: %u:%u:%u.%09u\n"
+ "Alarm Date\t: %u-%u-%u\n"
+ "Alarm Daylight\t: %u\n"
+ "Enabled\t\t: %s\n"
+ "Pending\t\t: %s\n",
+ alm.hour, alm.minute, alm.second, alm.nanosecond,
+ alm.year, alm.month, alm.day,
+ alm.daylight,
+ enabled == 1 ? "yes" : "no",
+ pending == 1 ? "yes" : "no");
+
+ if (eft.timezone == EFI_UNSPECIFIED_TIMEZONE)
+ seq_puts(seq, "Timezone\t: unspecified\n");
+ else
+ /* XXX fixme: convert to string? */
+ seq_printf(seq, "Timezone\t: %u\n", alm.timezone);
+ }
/*
* now prints the capabilities
@@ -269,7 +272,10 @@ static int __init efi_rtc_probe(struct platform_device *dev)
rtc->ops = &efi_rtc_ops;
clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->features);
- set_bit(RTC_FEATURE_ALARM_WAKEUP_ONLY, rtc->features);
+ if (efi_rt_services_supported(EFI_RT_SUPPORTED_WAKEUP_SERVICES))
+ set_bit(RTC_FEATURE_ALARM_WAKEUP_ONLY, rtc->features);
+ else
+ clear_bit(RTC_FEATURE_ALARM, rtc->features);
device_init_wakeup(&dev->dev, true);
diff --git a/drivers/rtc/rtc-sunplus.c b/drivers/rtc/rtc-sunplus.c
index e8e2ab1103fc..4b578e4d44f6 100644
--- a/drivers/rtc/rtc-sunplus.c
+++ b/drivers/rtc/rtc-sunplus.c
@@ -240,8 +240,8 @@ static int sp_rtc_probe(struct platform_device *plat_dev)
if (IS_ERR(sp_rtc->reg_base))
return dev_err_probe(&plat_dev->dev, PTR_ERR(sp_rtc->reg_base),
"%s devm_ioremap_resource fail\n", RTC_REG_NAME);
- dev_dbg(&plat_dev->dev, "res = 0x%x, reg_base = 0x%lx\n",
- sp_rtc->res->start, (unsigned long)sp_rtc->reg_base);
+ dev_dbg(&plat_dev->dev, "res = %pR, reg_base = %p\n",
+ sp_rtc->res, sp_rtc->reg_base);
sp_rtc->irq = platform_get_irq(plat_dev, 0);
if (sp_rtc->irq < 0)
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 49cc18a87473..29a2865b8e2e 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -981,6 +981,9 @@ queue_rtpg:
*
* Returns true if and only if alua_rtpg_work() will be called asynchronously.
* That function is responsible for calling @qdata->fn().
+ *
+ * Context: may be called from atomic context (alua_check()) only if the caller
+ * holds an sdev reference.
*/
static bool alua_rtpg_queue(struct alua_port_group *pg,
struct scsi_device *sdev,
@@ -989,8 +992,6 @@ static bool alua_rtpg_queue(struct alua_port_group *pg,
int start_queue = 0;
unsigned long flags;
- might_sleep();
-
if (WARN_ON_ONCE(!pg) || scsi_device_get(sdev))
return false;
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 4dbf51e2623a..f6da34850af9 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -5850,7 +5850,7 @@ static int hpsa_scsi_host_alloc(struct ctlr_info *h)
{
struct Scsi_Host *sh;
- sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
+ sh = scsi_host_alloc(&hpsa_driver_template, sizeof(struct ctlr_info));
if (sh == NULL) {
dev_err(&h->pdev->dev, "scsi_host_alloc failed\n");
return -ENOMEM;
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 1d1cf641937c..0454d94e8cf0 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -849,7 +849,7 @@ static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost,
enum iscsi_host_param param, char *buf)
{
struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(shost);
- struct iscsi_session *session = tcp_sw_host->session;
+ struct iscsi_session *session;
struct iscsi_conn *conn;
struct iscsi_tcp_conn *tcp_conn;
struct iscsi_sw_tcp_conn *tcp_sw_conn;
@@ -859,6 +859,7 @@ static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost,
switch (param) {
case ISCSI_HOST_PARAM_IPADDRESS:
+ session = tcp_sw_host->session;
if (!session)
return -ENOTCONN;
@@ -959,11 +960,13 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
if (!cls_session)
goto remove_host;
session = cls_session->dd_data;
- tcp_sw_host = iscsi_host_priv(shost);
- tcp_sw_host->session = session;
if (iscsi_tcp_r2tpool_alloc(session))
goto remove_session;
+
+ /* We are now fully setup so expose the session to sysfs. */
+ tcp_sw_host = iscsi_host_priv(shost);
+ tcp_sw_host->session = session;
return cls_session;
remove_session:
@@ -983,10 +986,17 @@ static void iscsi_sw_tcp_session_destroy(struct iscsi_cls_session *cls_session)
if (WARN_ON_ONCE(session->leadconn))
return;
+ iscsi_session_remove(cls_session);
+ /*
+ * Our get_host_param needs to access the session, so remove the
+ * host from sysfs before freeing the session to make sure userspace
+ * is no longer accessing the callout.
+ */
+ iscsi_host_remove(shost, false);
+
iscsi_tcp_r2tpool_free(cls_session->dd_data);
- iscsi_session_teardown(cls_session);
- iscsi_host_remove(shost, false);
+ iscsi_session_free(cls_session);
iscsi_host_free(shost);
}
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index ef2fc860257e..127f3d7f19dc 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -3104,17 +3104,32 @@ dec_session_count:
}
EXPORT_SYMBOL_GPL(iscsi_session_setup);
-/**
- * iscsi_session_teardown - destroy session, host, and cls_session
- * @cls_session: iscsi session
+/*
+ * issi_session_remove - Remove session from iSCSI class.
*/
-void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
+void iscsi_session_remove(struct iscsi_cls_session *cls_session)
{
struct iscsi_session *session = cls_session->dd_data;
- struct module *owner = cls_session->transport->owner;
struct Scsi_Host *shost = session->host;
iscsi_remove_session(cls_session);
+ /*
+ * host removal only has to wait for its children to be removed from
+ * sysfs, and iscsi_tcp needs to do iscsi_host_remove before freeing
+ * the session, so drop the session count here.
+ */
+ iscsi_host_dec_session_cnt(shost);
+}
+EXPORT_SYMBOL_GPL(iscsi_session_remove);
+
+/**
+ * iscsi_session_free - Free iscsi session and it's resources
+ * @cls_session: iscsi session
+ */
+void iscsi_session_free(struct iscsi_cls_session *cls_session)
+{
+ struct iscsi_session *session = cls_session->dd_data;
+ struct module *owner = cls_session->transport->owner;
iscsi_pool_free(&session->cmdpool);
kfree(session->password);
@@ -3132,10 +3147,19 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
kfree(session->discovery_parent_type);
iscsi_free_session(cls_session);
-
- iscsi_host_dec_session_cnt(shost);
module_put(owner);
}
+EXPORT_SYMBOL_GPL(iscsi_session_free);
+
+/**
+ * iscsi_session_teardown - destroy session and cls_session
+ * @cls_session: iscsi session
+ */
+void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
+{
+ iscsi_session_remove(cls_session);
+ iscsi_session_free(cls_session);
+}
EXPORT_SYMBOL_GPL(iscsi_session_teardown);
/**
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 1426b9b03612..9feb0323bc44 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -588,8 +588,6 @@ void scsi_device_put(struct scsi_device *sdev)
{
struct module *mod = sdev->host->hostt->module;
- might_sleep();
-
put_device(&sdev->sdev_gendev);
module_put(mod);
}
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 7a6904a3928e..f9b18fdc7b3c 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -1232,8 +1232,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
* that no LUN is present, so don't add sdev in these cases.
* Two specific examples are:
* 1) NetApp targets: return PQ=1, PDT=0x1f
- * 2) IBM/2145 targets: return PQ=1, PDT=0
- * 3) USB UFI: returns PDT=0x1f, with the PQ bits being "reserved"
+ * 2) USB UFI: returns PDT=0x1f, with the PQ bits being "reserved"
* in the UFI 1.0 spec (we cannot rely on reserved bits).
*
* References:
@@ -1247,8 +1246,8 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
* PDT=00h Direct-access device (floppy)
* PDT=1Fh none (no FDD connected to the requested logical unit)
*/
- if (((result[0] >> 5) == 1 ||
- (starget->pdt_1f_for_no_lun && (result[0] & 0x1f) == 0x1f)) &&
+ if (((result[0] >> 5) == 1 || starget->pdt_1f_for_no_lun) &&
+ (result[0] & 0x1f) == 0x1f &&
!scsi_is_wlun(lun)) {
SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
"scsi scan: peripheral device type"
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 981d1bab2120..8ef9a5494340 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -451,6 +451,8 @@ static void scsi_device_dev_release(struct device *dev)
struct scsi_vpd *vpd_pgb0 = NULL, *vpd_pgb1 = NULL, *vpd_pgb2 = NULL;
unsigned long flags;
+ might_sleep();
+
scsi_dh_release_device(sdev);
parent = sdev->sdev_gendev.parent;
diff --git a/drivers/spi/spi-dw-core.c b/drivers/spi/spi-dw-core.c
index 99edddf9958b..c3bfb6c84cab 100644
--- a/drivers/spi/spi-dw-core.c
+++ b/drivers/spi/spi-dw-core.c
@@ -366,7 +366,7 @@ static void dw_spi_irq_setup(struct dw_spi *dws)
* will be adjusted at the final stage of the IRQ-based SPI transfer
* execution so not to lose the leftover of the incoming data.
*/
- level = min_t(u16, dws->fifo_len / 2, dws->tx_len);
+ level = min_t(unsigned int, dws->fifo_len / 2, dws->tx_len);
dw_writel(dws, DW_SPI_TXFTLR, level);
dw_writel(dws, DW_SPI_RXFTLR, level - 1);
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 15f174f4e056..3f33934f5429 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -2220,11 +2220,26 @@ void spi_flush_queue(struct spi_controller *ctlr)
/*-------------------------------------------------------------------------*/
#if defined(CONFIG_OF)
+static void of_spi_parse_dt_cs_delay(struct device_node *nc,
+ struct spi_delay *delay, const char *prop)
+{
+ u32 value;
+
+ if (!of_property_read_u32(nc, prop, &value)) {
+ if (value > U16_MAX) {
+ delay->value = DIV_ROUND_UP(value, 1000);
+ delay->unit = SPI_DELAY_UNIT_USECS;
+ } else {
+ delay->value = value;
+ delay->unit = SPI_DELAY_UNIT_NSECS;
+ }
+ }
+}
+
static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
struct device_node *nc)
{
u32 value;
- u16 cs_setup;
int rc;
/* Mode (clock phase/polarity/etc.) */
@@ -2310,10 +2325,8 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
if (!of_property_read_u32(nc, "spi-max-frequency", &value))
spi->max_speed_hz = value;
- if (!of_property_read_u16(nc, "spi-cs-setup-delay-ns", &cs_setup)) {
- spi->cs_setup.value = cs_setup;
- spi->cs_setup.unit = SPI_DELAY_UNIT_NSECS;
- }
+ /* Device CS delays */
+ of_spi_parse_dt_cs_delay(nc, &spi->cs_setup, "spi-cs-setup-delay-ns");
return 0;
}
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 1935ca613447..a1ea093795cf 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -90,9 +90,21 @@ MODULE_PARM_DESC(bufsiz, "data bytes in biggest supported SPI message");
/*-------------------------------------------------------------------------*/
static ssize_t
+spidev_sync_unlocked(struct spi_device *spi, struct spi_message *message)
+{
+ ssize_t status;
+
+ status = spi_sync(spi, message);
+ if (status == 0)
+ status = message->actual_length;
+
+ return status;
+}
+
+static ssize_t
spidev_sync(struct spidev_data *spidev, struct spi_message *message)
{
- int status;
+ ssize_t status;
struct spi_device *spi;
mutex_lock(&spidev->spi_lock);
@@ -101,12 +113,10 @@ spidev_sync(struct spidev_data *spidev, struct spi_message *message)
if (spi == NULL)
status = -ESHUTDOWN;
else
- status = spi_sync(spi, message);
-
- if (status == 0)
- status = message->actual_length;
+ status = spidev_sync_unlocked(spi, message);
mutex_unlock(&spidev->spi_lock);
+
return status;
}
@@ -294,7 +304,7 @@ static int spidev_message(struct spidev_data *spidev,
spi_message_add_tail(k_tmp, &msg);
}
- status = spidev_sync(spidev, &msg);
+ status = spidev_sync_unlocked(spidev->spi, &msg);
if (status < 0)
goto done;
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index bac111456fa1..2b95b4550a63 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -73,8 +73,8 @@ static bool __target_check_io_state(struct se_cmd *se_cmd,
{
struct se_session *sess = se_cmd->se_sess;
- assert_spin_locked(&sess->sess_cmd_lock);
- WARN_ON_ONCE(!irqs_disabled());
+ lockdep_assert_held(&sess->sess_cmd_lock);
+
/*
* If command already reached CMD_T_COMPLETE state within
* target_complete_cmd() or CMD_T_FABRIC_STOP due to shutdown,
diff --git a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
index 62c0aa5d0783..0a4eaa307156 100644
--- a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
+++ b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
@@ -44,11 +44,13 @@ static int int340x_thermal_get_trip_temp(struct thermal_zone_device *zone,
int trip, int *temp)
{
struct int34x_thermal_zone *d = zone->devdata;
- int i;
+ int i, ret = 0;
if (d->override_ops && d->override_ops->get_trip_temp)
return d->override_ops->get_trip_temp(zone, trip, temp);
+ mutex_lock(&d->trip_mutex);
+
if (trip < d->aux_trip_nr)
*temp = d->aux_trips[trip];
else if (trip == d->crt_trip_id)
@@ -66,10 +68,12 @@ static int int340x_thermal_get_trip_temp(struct thermal_zone_device *zone,
}
}
if (i == INT340X_THERMAL_MAX_ACT_TRIP_COUNT)
- return -EINVAL;
+ ret = -EINVAL;
}
- return 0;
+ mutex_unlock(&d->trip_mutex);
+
+ return ret;
}
static int int340x_thermal_get_trip_type(struct thermal_zone_device *zone,
@@ -77,11 +81,13 @@ static int int340x_thermal_get_trip_type(struct thermal_zone_device *zone,
enum thermal_trip_type *type)
{
struct int34x_thermal_zone *d = zone->devdata;
- int i;
+ int i, ret = 0;
if (d->override_ops && d->override_ops->get_trip_type)
return d->override_ops->get_trip_type(zone, trip, type);
+ mutex_lock(&d->trip_mutex);
+
if (trip < d->aux_trip_nr)
*type = THERMAL_TRIP_PASSIVE;
else if (trip == d->crt_trip_id)
@@ -99,10 +105,12 @@ static int int340x_thermal_get_trip_type(struct thermal_zone_device *zone,
}
}
if (i == INT340X_THERMAL_MAX_ACT_TRIP_COUNT)
- return -EINVAL;
+ ret = -EINVAL;
}
- return 0;
+ mutex_unlock(&d->trip_mutex);
+
+ return ret;
}
static int int340x_thermal_set_trip_temp(struct thermal_zone_device *zone,
@@ -180,6 +188,8 @@ int int340x_thermal_read_trips(struct int34x_thermal_zone *int34x_zone)
int trip_cnt = int34x_zone->aux_trip_nr;
int i;
+ mutex_lock(&int34x_zone->trip_mutex);
+
int34x_zone->crt_trip_id = -1;
if (!int340x_thermal_get_trip_config(int34x_zone->adev->handle, "_CRT",
&int34x_zone->crt_temp))
@@ -207,6 +217,8 @@ int int340x_thermal_read_trips(struct int34x_thermal_zone *int34x_zone)
int34x_zone->act_trips[i].valid = true;
}
+ mutex_unlock(&int34x_zone->trip_mutex);
+
return trip_cnt;
}
EXPORT_SYMBOL_GPL(int340x_thermal_read_trips);
@@ -230,6 +242,8 @@ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev,
if (!int34x_thermal_zone)
return ERR_PTR(-ENOMEM);
+ mutex_init(&int34x_thermal_zone->trip_mutex);
+
int34x_thermal_zone->adev = adev;
int34x_thermal_zone->override_ops = override_ops;
@@ -281,6 +295,7 @@ err_thermal_zone:
acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table);
kfree(int34x_thermal_zone->aux_trips);
err_trip_alloc:
+ mutex_destroy(&int34x_thermal_zone->trip_mutex);
kfree(int34x_thermal_zone);
return ERR_PTR(ret);
}
@@ -292,6 +307,7 @@ void int340x_thermal_zone_remove(struct int34x_thermal_zone
thermal_zone_device_unregister(int34x_thermal_zone->zone);
acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table);
kfree(int34x_thermal_zone->aux_trips);
+ mutex_destroy(&int34x_thermal_zone->trip_mutex);
kfree(int34x_thermal_zone);
}
EXPORT_SYMBOL_GPL(int340x_thermal_zone_remove);
diff --git a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h
index 3b4971df1b33..8f9872afd0d3 100644
--- a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h
+++ b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h
@@ -32,6 +32,7 @@ struct int34x_thermal_zone {
struct thermal_zone_device_ops *override_ops;
void *priv_data;
struct acpi_lpat_conversion_table *lpat_table;
+ struct mutex trip_mutex;
};
struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *,
diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
index 37d6af2ec427..7fa66501792d 100644
--- a/drivers/tty/serial/8250/8250_dma.c
+++ b/drivers/tty/serial/8250/8250_dma.c
@@ -43,15 +43,23 @@ static void __dma_rx_complete(struct uart_8250_port *p)
struct uart_8250_dma *dma = p->dma;
struct tty_port *tty_port = &p->port.state->port;
struct dma_tx_state state;
+ enum dma_status dma_status;
int count;
- dma->rx_running = 0;
- dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
+ /*
+ * New DMA Rx can be started during the completion handler before it
+ * could acquire port's lock and it might still be ongoing. Don't to
+ * anything in such case.
+ */
+ dma_status = dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
+ if (dma_status == DMA_IN_PROGRESS)
+ return;
count = dma->rx_size - state.residue;
tty_insert_flip_string(tty_port, dma->rx_buf, count);
p->port.icount.rx += count;
+ dma->rx_running = 0;
tty_flip_buffer_push(tty_port);
}
@@ -62,9 +70,14 @@ static void dma_rx_complete(void *param)
struct uart_8250_dma *dma = p->dma;
unsigned long flags;
- __dma_rx_complete(p);
-
spin_lock_irqsave(&p->port.lock, flags);
+ if (dma->rx_running)
+ __dma_rx_complete(p);
+
+ /*
+ * Cannot be combined with the previous check because __dma_rx_complete()
+ * changes dma->rx_running.
+ */
if (!dma->rx_running && (serial_lsr_in(p) & UART_LSR_DR))
p->dma->rx_dma(p);
spin_unlock_irqrestore(&p->port.lock, flags);
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index a1490033aa16..409e91d6829a 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -797,25 +797,11 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
spin_unlock(&port->lock);
}
- if (stm32_usart_rx_dma_enabled(port))
- return IRQ_WAKE_THREAD;
- else
- return IRQ_HANDLED;
-}
-
-static irqreturn_t stm32_usart_threaded_interrupt(int irq, void *ptr)
-{
- struct uart_port *port = ptr;
- struct tty_port *tport = &port->state->port;
- struct stm32_port *stm32_port = to_stm32_port(port);
- unsigned int size;
- unsigned long flags;
-
/* Receiver timeout irq for DMA RX */
- if (!stm32_port->throttled) {
- spin_lock_irqsave(&port->lock, flags);
+ if (stm32_usart_rx_dma_enabled(port) && !stm32_port->throttled) {
+ spin_lock(&port->lock);
size = stm32_usart_receive_chars(port, false);
- uart_unlock_and_check_sysrq_irqrestore(port, flags);
+ uart_unlock_and_check_sysrq(port);
if (size)
tty_flip_buffer_push(tport);
}
@@ -1015,10 +1001,8 @@ static int stm32_usart_startup(struct uart_port *port)
u32 val;
int ret;
- ret = request_threaded_irq(port->irq, stm32_usart_interrupt,
- stm32_usart_threaded_interrupt,
- IRQF_ONESHOT | IRQF_NO_SUSPEND,
- name, port);
+ ret = request_irq(port->irq, stm32_usart_interrupt,
+ IRQF_NO_SUSPEND, name, port);
if (ret)
return ret;
@@ -1601,13 +1585,6 @@ static int stm32_usart_of_dma_rx_probe(struct stm32_port *stm32port,
struct dma_slave_config config;
int ret;
- /*
- * Using DMA and threaded handler for the console could lead to
- * deadlocks.
- */
- if (uart_console(port))
- return -ENODEV;
-
stm32port->rx_buf = dma_alloc_coherent(dev, RX_BUF_L,
&stm32port->rx_dma_buf,
GFP_KERNEL);
diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c
index 1850bacdb5b0..f566eb1839dc 100644
--- a/drivers/tty/vt/vc_screen.c
+++ b/drivers/tty/vt/vc_screen.c
@@ -386,10 +386,6 @@ vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
uni_mode = use_unicode(inode);
attr = use_attributes(inode);
- ret = -ENXIO;
- vc = vcs_vc(inode, &viewed);
- if (!vc)
- goto unlock_out;
ret = -EINVAL;
if (pos < 0)
@@ -407,6 +403,11 @@ vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
unsigned int this_round, skip = 0;
int size;
+ ret = -ENXIO;
+ vc = vcs_vc(inode, &viewed);
+ if (!vc)
+ goto unlock_out;
+
/* Check whether we are above size each round,
* as copy_to_user at the end of this loop
* could sleep.
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index bda61be5f035..3a1c4d31e010 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -1234,12 +1234,14 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
* clock scaling is in progress
*/
ufshcd_scsi_block_requests(hba);
+ mutex_lock(&hba->wb_mutex);
down_write(&hba->clk_scaling_lock);
if (!hba->clk_scaling.is_allowed ||
ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
ret = -EBUSY;
up_write(&hba->clk_scaling_lock);
+ mutex_unlock(&hba->wb_mutex);
ufshcd_scsi_unblock_requests(hba);
goto out;
}
@@ -1251,12 +1253,16 @@ out:
return ret;
}
-static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, bool writelock)
+static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool scale_up)
{
- if (writelock)
- up_write(&hba->clk_scaling_lock);
- else
- up_read(&hba->clk_scaling_lock);
+ up_write(&hba->clk_scaling_lock);
+
+ /* Enable Write Booster if we have scaled up else disable it */
+ if (ufshcd_enable_wb_if_scaling_up(hba) && !err)
+ ufshcd_wb_toggle(hba, scale_up);
+
+ mutex_unlock(&hba->wb_mutex);
+
ufshcd_scsi_unblock_requests(hba);
ufshcd_release(hba);
}
@@ -1273,7 +1279,6 @@ static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, bool writelock)
static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
{
int ret = 0;
- bool is_writelock = true;
ret = ufshcd_clock_scaling_prepare(hba);
if (ret)
@@ -1302,15 +1307,8 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
}
}
- /* Enable Write Booster if we have scaled up else disable it */
- if (ufshcd_enable_wb_if_scaling_up(hba)) {
- downgrade_write(&hba->clk_scaling_lock);
- is_writelock = false;
- ufshcd_wb_toggle(hba, scale_up);
- }
-
out_unprepare:
- ufshcd_clock_scaling_unprepare(hba, is_writelock);
+ ufshcd_clock_scaling_unprepare(hba, ret, scale_up);
return ret;
}
@@ -6066,9 +6064,11 @@ static void ufshcd_force_error_recovery(struct ufs_hba *hba)
static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
{
+ mutex_lock(&hba->wb_mutex);
down_write(&hba->clk_scaling_lock);
hba->clk_scaling.is_allowed = allow;
up_write(&hba->clk_scaling_lock);
+ mutex_unlock(&hba->wb_mutex);
}
static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend)
@@ -9793,6 +9793,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
/* Initialize mutex for exception event control */
mutex_init(&hba->ee_ctrl_mutex);
+ mutex_init(&hba->wb_mutex);
init_rwsem(&hba->clk_scaling_lock);
ufshcd_init_clk_gating(hba);
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 079e183cf3bf..934b3d997702 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -526,6 +526,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* DJI CineSSD */
{ USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
+ /* Alcor Link AK9563 SC Reader used in 2022 Lenovo ThinkPads */
+ { USB_DEVICE(0x2ce3, 0x9563), .driver_info = USB_QUIRK_NO_LPM },
+
/* DELL USB GEN2 */
{ USB_DEVICE(0x413c, 0xb062), .driver_info = USB_QUIRK_NO_LPM | USB_QUIRK_RESET_RESUME },
diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
index b0a0351d2d8b..959fc925ca7c 100644
--- a/drivers/usb/dwc3/dwc3-qcom.c
+++ b/drivers/usb/dwc3/dwc3-qcom.c
@@ -901,7 +901,7 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
qcom->mode = usb_get_dr_mode(&qcom->dwc3->dev);
/* enable vbus override for device mode */
- if (qcom->mode == USB_DR_MODE_PERIPHERAL)
+ if (qcom->mode != USB_DR_MODE_HOST)
dwc3_qcom_vbus_override_enable(qcom, true);
/* register extcon to override sw_vbus on Vbus change later */
diff --git a/drivers/usb/fotg210/fotg210-udc.c b/drivers/usb/fotg210/fotg210-udc.c
index 87cca81bf4ac..eb076746f032 100644
--- a/drivers/usb/fotg210/fotg210-udc.c
+++ b/drivers/usb/fotg210/fotg210-udc.c
@@ -1014,7 +1014,6 @@ static int fotg210_udc_start(struct usb_gadget *g,
int ret;
/* hook up the driver */
- driver->driver.bus = NULL;
fotg210->driver = driver;
if (!IS_ERR_OR_NULL(fotg210->phy)) {
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 523a961b910b..8ad354741380 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -279,8 +279,10 @@ static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
struct usb_request *req = ffs->ep0req;
int ret;
- if (!req)
+ if (!req) {
+ spin_unlock_irq(&ffs->ev.waitq.lock);
return -EINVAL;
+ }
req->zero = len < le16_to_cpu(ffs->ev.setup.wLength);
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index 08726e4c68a5..0219cd79493a 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -1142,6 +1142,7 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
}
std_as_out_if0_desc.bInterfaceNumber = ret;
std_as_out_if1_desc.bInterfaceNumber = ret;
+ std_as_out_if1_desc.bNumEndpoints = 1;
uac2->as_out_intf = ret;
uac2->as_out_alt = 0;
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 8f12f3f8f6ee..e06022873df1 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -798,6 +798,7 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g,
net->max_mtu = GETHER_MAX_MTU_SIZE;
dev->gadget = g;
+ SET_NETDEV_DEV(net, &g->dev);
SET_NETDEV_DEVTYPE(net, &gadget_type);
status = register_netdev(net);
@@ -872,6 +873,8 @@ int gether_register_netdev(struct net_device *net)
struct usb_gadget *g;
int status;
+ if (!net->dev.parent)
+ return -EINVAL;
dev = netdev_priv(net);
g = dev->gadget;
@@ -902,6 +905,7 @@ void gether_set_gadget(struct net_device *net, struct usb_gadget *g)
dev = netdev_priv(net);
dev->gadget = g;
+ SET_NETDEV_DEV(net, &g->dev);
}
EXPORT_SYMBOL_GPL(gether_set_gadget);
diff --git a/drivers/usb/gadget/udc/bcm63xx_udc.c b/drivers/usb/gadget/udc/bcm63xx_udc.c
index 2cdb07905bde..d04d72f5816e 100644
--- a/drivers/usb/gadget/udc/bcm63xx_udc.c
+++ b/drivers/usb/gadget/udc/bcm63xx_udc.c
@@ -1830,7 +1830,6 @@ static int bcm63xx_udc_start(struct usb_gadget *gadget,
bcm63xx_select_phy_mode(udc, true);
udc->driver = driver;
- driver->driver.bus = NULL;
udc->gadget.dev.of_node = udc->dev->of_node;
spin_unlock_irqrestore(&udc->lock, flags);
diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.c b/drivers/usb/gadget/udc/fsl_qe_udc.c
index bf745358e28e..3b1cc8fa30c8 100644
--- a/drivers/usb/gadget/udc/fsl_qe_udc.c
+++ b/drivers/usb/gadget/udc/fsl_qe_udc.c
@@ -2285,7 +2285,6 @@ static int fsl_qe_start(struct usb_gadget *gadget,
/* lock is needed but whether should use this lock or another */
spin_lock_irqsave(&udc->lock, flags);
- driver->driver.bus = NULL;
/* hook up the driver */
udc->driver = driver;
udc->gadget.speed = driver->max_speed;
diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
index 50435e804118..a67873a074b7 100644
--- a/drivers/usb/gadget/udc/fsl_udc_core.c
+++ b/drivers/usb/gadget/udc/fsl_udc_core.c
@@ -1943,7 +1943,6 @@ static int fsl_udc_start(struct usb_gadget *g,
/* lock is needed but whether should use this lock or another */
spin_lock_irqsave(&udc_controller->lock, flags);
- driver->driver.bus = NULL;
/* hook up the driver */
udc_controller->driver = driver;
spin_unlock_irqrestore(&udc_controller->lock, flags);
diff --git a/drivers/usb/gadget/udc/fusb300_udc.c b/drivers/usb/gadget/udc/fusb300_udc.c
index 9af8b415f303..5954800d652c 100644
--- a/drivers/usb/gadget/udc/fusb300_udc.c
+++ b/drivers/usb/gadget/udc/fusb300_udc.c
@@ -1311,7 +1311,6 @@ static int fusb300_udc_start(struct usb_gadget *g,
struct fusb300 *fusb300 = to_fusb300(g);
/* hook up the driver */
- driver->driver.bus = NULL;
fusb300->driver = driver;
return 0;
diff --git a/drivers/usb/gadget/udc/goku_udc.c b/drivers/usb/gadget/udc/goku_udc.c
index bdc56b24b5c9..5ffb3d5c635b 100644
--- a/drivers/usb/gadget/udc/goku_udc.c
+++ b/drivers/usb/gadget/udc/goku_udc.c
@@ -1375,7 +1375,6 @@ static int goku_udc_start(struct usb_gadget *g,
struct goku_udc *dev = to_goku_udc(g);
/* hook up the driver */
- driver->driver.bus = NULL;
dev->driver = driver;
/*
diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c
index 22096f8505de..85cdc0af3bf9 100644
--- a/drivers/usb/gadget/udc/gr_udc.c
+++ b/drivers/usb/gadget/udc/gr_udc.c
@@ -1906,7 +1906,6 @@ static int gr_udc_start(struct usb_gadget *gadget,
spin_lock(&dev->lock);
/* Hook up the driver */
- driver->driver.bus = NULL;
dev->driver = driver;
/* Get ready for host detection */
diff --git a/drivers/usb/gadget/udc/m66592-udc.c b/drivers/usb/gadget/udc/m66592-udc.c
index c7e421b449f3..06e21cee431b 100644
--- a/drivers/usb/gadget/udc/m66592-udc.c
+++ b/drivers/usb/gadget/udc/m66592-udc.c
@@ -1454,7 +1454,6 @@ static int m66592_udc_start(struct usb_gadget *g,
struct m66592 *m66592 = to_m66592(g);
/* hook up the driver */
- driver->driver.bus = NULL;
m66592->driver = driver;
m66592_bset(m66592, M66592_VBSE | M66592_URST, M66592_INTENB0);
diff --git a/drivers/usb/gadget/udc/max3420_udc.c b/drivers/usb/gadget/udc/max3420_udc.c
index 3074da00c3df..ddf0ed3eb4f2 100644
--- a/drivers/usb/gadget/udc/max3420_udc.c
+++ b/drivers/usb/gadget/udc/max3420_udc.c
@@ -1108,7 +1108,6 @@ static int max3420_udc_start(struct usb_gadget *gadget,
spin_lock_irqsave(&udc->lock, flags);
/* hook up the driver */
- driver->driver.bus = NULL;
udc->driver = driver;
udc->gadget.speed = USB_SPEED_FULL;
diff --git a/drivers/usb/gadget/udc/mv_u3d_core.c b/drivers/usb/gadget/udc/mv_u3d_core.c
index 598654a3cb41..411b6179782c 100644
--- a/drivers/usb/gadget/udc/mv_u3d_core.c
+++ b/drivers/usb/gadget/udc/mv_u3d_core.c
@@ -1243,7 +1243,6 @@ static int mv_u3d_start(struct usb_gadget *g,
}
/* hook up the driver ... */
- driver->driver.bus = NULL;
u3d->driver = driver;
u3d->ep0_dir = USB_DIR_OUT;
diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c
index fdb17d86cd65..b397f3a848cf 100644
--- a/drivers/usb/gadget/udc/mv_udc_core.c
+++ b/drivers/usb/gadget/udc/mv_udc_core.c
@@ -1359,7 +1359,6 @@ static int mv_udc_start(struct usb_gadget *gadget,
spin_lock_irqsave(&udc->lock, flags);
/* hook up the driver ... */
- driver->driver.bus = NULL;
udc->driver = driver;
udc->usb_state = USB_STATE_ATTACHED;
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
index 84605a4d0715..538c1b9a2883 100644
--- a/drivers/usb/gadget/udc/net2272.c
+++ b/drivers/usb/gadget/udc/net2272.c
@@ -1451,7 +1451,6 @@ static int net2272_start(struct usb_gadget *_gadget,
dev->ep[i].irqs = 0;
/* hook up the driver ... */
dev->softconnect = 1;
- driver->driver.bus = NULL;
dev->driver = driver;
/* ... then enable host detection and ep0; and we're ready
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index d6a68631354a..1b929c519cd7 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -2423,7 +2423,6 @@ static int net2280_start(struct usb_gadget *_gadget,
dev->ep[i].irqs = 0;
/* hook up the driver ... */
- driver->driver.bus = NULL;
dev->driver = driver;
retval = device_create_file(&dev->pdev->dev, &dev_attr_function);
diff --git a/drivers/usb/gadget/udc/omap_udc.c b/drivers/usb/gadget/udc/omap_udc.c
index bea346e362b2..f660ebfa1379 100644
--- a/drivers/usb/gadget/udc/omap_udc.c
+++ b/drivers/usb/gadget/udc/omap_udc.c
@@ -2066,7 +2066,6 @@ static int omap_udc_start(struct usb_gadget *g,
udc->softconnect = 1;
/* hook up the driver */
- driver->driver.bus = NULL;
udc->driver = driver;
spin_unlock_irqrestore(&udc->lock, flags);
diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c
index 9bb7a9d7a2fb..4f8617210d85 100644
--- a/drivers/usb/gadget/udc/pch_udc.c
+++ b/drivers/usb/gadget/udc/pch_udc.c
@@ -2908,7 +2908,6 @@ static int pch_udc_start(struct usb_gadget *g,
{
struct pch_udc_dev *dev = to_pch_udc(g);
- driver->driver.bus = NULL;
dev->driver = driver;
/* get ready for ep0 traffic */
diff --git a/drivers/usb/gadget/udc/snps_udc_core.c b/drivers/usb/gadget/udc/snps_udc_core.c
index 52ea4dcf6a92..2fc5d4d277bc 100644
--- a/drivers/usb/gadget/udc/snps_udc_core.c
+++ b/drivers/usb/gadget/udc/snps_udc_core.c
@@ -1933,7 +1933,6 @@ static int amd5536_udc_start(struct usb_gadget *g,
struct udc *dev = to_amd5536_udc(g);
u32 tmp;
- driver->driver.bus = NULL;
dev->driver = driver;
/* Some gadget drivers use both ep0 directions.
diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
index 9a6860285fbe..50b24096eb7f 100644
--- a/drivers/usb/typec/altmodes/displayport.c
+++ b/drivers/usb/typec/altmodes/displayport.c
@@ -535,10 +535,10 @@ int dp_altmode_probe(struct typec_altmode *alt)
/* FIXME: Port can only be DFP_U. */
/* Make sure we have compatiple pin configurations */
- if (!(DP_CAP_DFP_D_PIN_ASSIGN(port->vdo) &
- DP_CAP_UFP_D_PIN_ASSIGN(alt->vdo)) &&
- !(DP_CAP_UFP_D_PIN_ASSIGN(port->vdo) &
- DP_CAP_DFP_D_PIN_ASSIGN(alt->vdo)))
+ if (!(DP_CAP_PIN_ASSIGN_DFP_D(port->vdo) &
+ DP_CAP_PIN_ASSIGN_UFP_D(alt->vdo)) &&
+ !(DP_CAP_PIN_ASSIGN_UFP_D(port->vdo) &
+ DP_CAP_PIN_ASSIGN_DFP_D(alt->vdo)))
return -ENODEV;
ret = sysfs_create_group(&alt->dev.kobj, &dp_altmode_group);
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index 1292241d581a..1cf8947c6d66 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -1269,6 +1269,9 @@ err_unregister:
con->port = NULL;
}
+ kfree(ucsi->connector);
+ ucsi->connector = NULL;
+
err_reset:
memset(&ucsi->cap, 0, sizeof(ucsi->cap));
ucsi_reset_ppm(ucsi);
@@ -1300,7 +1303,8 @@ static void ucsi_resume_work(struct work_struct *work)
int ucsi_resume(struct ucsi *ucsi)
{
- queue_work(system_long_wq, &ucsi->resume_work);
+ if (ucsi->connector)
+ queue_work(system_long_wq, &ucsi->resume_work);
return 0;
}
EXPORT_SYMBOL_GPL(ucsi_resume);
@@ -1420,6 +1424,9 @@ void ucsi_unregister(struct ucsi *ucsi)
/* Disable notifications */
ucsi->ops->async_write(ucsi, UCSI_CONTROL, &cmd, sizeof(cmd));
+ if (!ucsi->connector)
+ return;
+
for (i = 0; i < ucsi->cap.num_connectors; i++) {
cancel_work_sync(&ucsi->connector[i].work);
ucsi_unregister_partner(&ucsi->connector[i]);
diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
index f9c0044c6442..44b29289aa19 100644
--- a/drivers/vdpa/ifcvf/ifcvf_main.c
+++ b/drivers/vdpa/ifcvf/ifcvf_main.c
@@ -849,7 +849,7 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ret = ifcvf_init_hw(vf, pdev);
if (ret) {
IFCVF_ERR(pdev, "Failed to init IFCVF hw\n");
- return ret;
+ goto err;
}
for (i = 0; i < vf->nr_vring; i++)
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 23c24fe98c00..2209372f236d 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -1856,24 +1856,33 @@ unwind:
* significantly boosts non-hugetlbfs mappings and doesn't seem to hurt when
* hugetlbfs is in use.
*/
-static void vfio_test_domain_fgsp(struct vfio_domain *domain)
+static void vfio_test_domain_fgsp(struct vfio_domain *domain, struct list_head *regions)
{
- struct page *pages;
int ret, order = get_order(PAGE_SIZE * 2);
+ struct vfio_iova *region;
+ struct page *pages;
+ dma_addr_t start;
pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!pages)
return;
- ret = iommu_map(domain->domain, 0, page_to_phys(pages), PAGE_SIZE * 2,
- IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE);
- if (!ret) {
- size_t unmapped = iommu_unmap(domain->domain, 0, PAGE_SIZE);
+ list_for_each_entry(region, regions, list) {
+ start = ALIGN(region->start, PAGE_SIZE * 2);
+ if (start >= region->end || (region->end - start < PAGE_SIZE * 2))
+ continue;
- if (unmapped == PAGE_SIZE)
- iommu_unmap(domain->domain, PAGE_SIZE, PAGE_SIZE);
- else
- domain->fgsp = true;
+ ret = iommu_map(domain->domain, start, page_to_phys(pages), PAGE_SIZE * 2,
+ IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE);
+ if (!ret) {
+ size_t unmapped = iommu_unmap(domain->domain, start, PAGE_SIZE);
+
+ if (unmapped == PAGE_SIZE)
+ iommu_unmap(domain->domain, start + PAGE_SIZE, PAGE_SIZE);
+ else
+ domain->fgsp = true;
+ }
+ break;
}
__free_pages(pages, order);
@@ -2326,7 +2335,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
}
}
- vfio_test_domain_fgsp(domain);
+ vfio_test_domain_fgsp(domain, &iova_copy);
/* replay mappings on new domains */
ret = vfio_iommu_replay(iommu, domain);
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 9af19b0cf3b7..4c538b30fd76 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -1511,6 +1511,9 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
nvq = &n->vqs[index];
mutex_lock(&vq->mutex);
+ if (fd == -1)
+ vhost_clear_msg(&n->dev);
+
/* Verify that ring has been setup correctly. */
if (!vhost_vq_access_ok(vq)) {
r = -EFAULT;
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index dca6346d75b3..d5ecb8876fc9 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -80,7 +80,7 @@ struct vhost_scsi_cmd {
struct scatterlist *tvc_prot_sgl;
struct page **tvc_upages;
/* Pointer to response header iovec */
- struct iovec tvc_resp_iov;
+ struct iovec *tvc_resp_iov;
/* Pointer to vhost_scsi for our device */
struct vhost_scsi *tvc_vhost;
/* Pointer to vhost_virtqueue for the cmd */
@@ -563,7 +563,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
memcpy(v_rsp.sense, cmd->tvc_sense_buf,
se_cmd->scsi_sense_length);
- iov_iter_init(&iov_iter, ITER_DEST, &cmd->tvc_resp_iov,
+ iov_iter_init(&iov_iter, ITER_DEST, cmd->tvc_resp_iov,
cmd->tvc_in_iovs, sizeof(v_rsp));
ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
if (likely(ret == sizeof(v_rsp))) {
@@ -594,6 +594,7 @@ vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
struct vhost_scsi_cmd *cmd;
struct vhost_scsi_nexus *tv_nexus;
struct scatterlist *sg, *prot_sg;
+ struct iovec *tvc_resp_iov;
struct page **pages;
int tag;
@@ -613,6 +614,7 @@ vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
sg = cmd->tvc_sgl;
prot_sg = cmd->tvc_prot_sgl;
pages = cmd->tvc_upages;
+ tvc_resp_iov = cmd->tvc_resp_iov;
memset(cmd, 0, sizeof(*cmd));
cmd->tvc_sgl = sg;
cmd->tvc_prot_sgl = prot_sg;
@@ -625,6 +627,7 @@ vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
cmd->tvc_data_direction = data_direction;
cmd->tvc_nexus = tv_nexus;
cmd->inflight = vhost_scsi_get_inflight(vq);
+ cmd->tvc_resp_iov = tvc_resp_iov;
memcpy(cmd->tvc_cdb, cdb, VHOST_SCSI_MAX_CDB_SIZE);
@@ -935,7 +938,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
struct iov_iter in_iter, prot_iter, data_iter;
u64 tag;
u32 exp_data_len, data_direction;
- int ret, prot_bytes, c = 0;
+ int ret, prot_bytes, i, c = 0;
u16 lun;
u8 task_attr;
bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
@@ -1092,7 +1095,8 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
}
cmd->tvc_vhost = vs;
cmd->tvc_vq = vq;
- cmd->tvc_resp_iov = vq->iov[vc.out];
+ for (i = 0; i < vc.in ; i++)
+ cmd->tvc_resp_iov[i] = vq->iov[vc.out + i];
cmd->tvc_in_iovs = vc.in;
pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
@@ -1461,6 +1465,7 @@ static void vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue *vq)
kfree(tv_cmd->tvc_sgl);
kfree(tv_cmd->tvc_prot_sgl);
kfree(tv_cmd->tvc_upages);
+ kfree(tv_cmd->tvc_resp_iov);
}
sbitmap_free(&svq->scsi_tags);
@@ -1508,6 +1513,14 @@ static int vhost_scsi_setup_vq_cmds(struct vhost_virtqueue *vq, int max_cmds)
goto out;
}
+ tv_cmd->tvc_resp_iov = kcalloc(UIO_MAXIOV,
+ sizeof(struct iovec),
+ GFP_KERNEL);
+ if (!tv_cmd->tvc_resp_iov) {
+ pr_err("Unable to allocate tv_cmd->tvc_resp_iov\n");
+ goto out;
+ }
+
tv_cmd->tvc_prot_sgl = kcalloc(VHOST_SCSI_PREALLOC_PROT_SGLS,
sizeof(struct scatterlist),
GFP_KERNEL);
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index cbe72bfd2f1f..43c9770b86e5 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -661,7 +661,7 @@ void vhost_dev_stop(struct vhost_dev *dev)
}
EXPORT_SYMBOL_GPL(vhost_dev_stop);
-static void vhost_clear_msg(struct vhost_dev *dev)
+void vhost_clear_msg(struct vhost_dev *dev)
{
struct vhost_msg_node *node, *n;
@@ -679,6 +679,7 @@ static void vhost_clear_msg(struct vhost_dev *dev)
spin_unlock(&dev->iotlb_lock);
}
+EXPORT_SYMBOL_GPL(vhost_clear_msg);
void vhost_dev_cleanup(struct vhost_dev *dev)
{
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index d9109107af08..790b296271f1 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -181,6 +181,7 @@ long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp);
long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp);
bool vhost_vq_access_ok(struct vhost_virtqueue *vq);
bool vhost_log_access_ok(struct vhost_dev *);
+void vhost_clear_msg(struct vhost_dev *dev);
int vhost_get_vq_desc(struct vhost_virtqueue *,
struct iovec iov[], unsigned int iov_count,
diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c
index 1fc8de4ecbeb..8187a7c4f910 100644
--- a/drivers/video/fbdev/atmel_lcdfb.c
+++ b/drivers/video/fbdev/atmel_lcdfb.c
@@ -49,7 +49,6 @@ struct atmel_lcdfb_info {
struct clk *lcdc_clk;
struct backlight_device *backlight;
- u8 bl_power;
u8 saved_lcdcon;
u32 pseudo_palette[16];
@@ -109,22 +108,7 @@ static u32 contrast_ctr = ATMEL_LCDC_PS_DIV8
static int atmel_bl_update_status(struct backlight_device *bl)
{
struct atmel_lcdfb_info *sinfo = bl_get_data(bl);
- int power = sinfo->bl_power;
- int brightness = bl->props.brightness;
-
- /* REVISIT there may be a meaningful difference between
- * fb_blank and power ... there seem to be some cases
- * this doesn't handle correctly.
- */
- if (bl->props.fb_blank != sinfo->bl_power)
- power = bl->props.fb_blank;
- else if (bl->props.power != sinfo->bl_power)
- power = bl->props.power;
-
- if (brightness < 0 && power == FB_BLANK_UNBLANK)
- brightness = lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
- else if (power != FB_BLANK_UNBLANK)
- brightness = 0;
+ int brightness = backlight_get_brightness(bl);
lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_VAL, brightness);
if (contrast_ctr & ATMEL_LCDC_POL_POSITIVE)
@@ -133,8 +117,6 @@ static int atmel_bl_update_status(struct backlight_device *bl)
else
lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_CTR, contrast_ctr);
- bl->props.fb_blank = bl->props.power = sinfo->bl_power = power;
-
return 0;
}
@@ -155,8 +137,6 @@ static void init_backlight(struct atmel_lcdfb_info *sinfo)
struct backlight_properties props;
struct backlight_device *bl;
- sinfo->bl_power = FB_BLANK_UNBLANK;
-
if (sinfo->backlight)
return;
diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c
index dd31b9d7d337..36a9ac05a340 100644
--- a/drivers/video/fbdev/aty/aty128fb.c
+++ b/drivers/video/fbdev/aty/aty128fb.c
@@ -1766,12 +1766,10 @@ static int aty128_bl_update_status(struct backlight_device *bd)
unsigned int reg = aty_ld_le32(LVDS_GEN_CNTL);
int level;
- if (bd->props.power != FB_BLANK_UNBLANK ||
- bd->props.fb_blank != FB_BLANK_UNBLANK ||
- !par->lcd_on)
+ if (!par->lcd_on)
level = 0;
else
- level = bd->props.brightness;
+ level = backlight_get_brightness(bd);
reg |= LVDS_BL_MOD_EN | LVDS_BLON;
if (level > 0) {
diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
index d59215a4992e..b02e4e645035 100644
--- a/drivers/video/fbdev/aty/atyfb_base.c
+++ b/drivers/video/fbdev/aty/atyfb_base.c
@@ -2219,13 +2219,7 @@ static int aty_bl_update_status(struct backlight_device *bd)
{
struct atyfb_par *par = bl_get_data(bd);
unsigned int reg = aty_ld_lcd(LCD_MISC_CNTL, par);
- int level;
-
- if (bd->props.power != FB_BLANK_UNBLANK ||
- bd->props.fb_blank != FB_BLANK_UNBLANK)
- level = 0;
- else
- level = bd->props.brightness;
+ int level = backlight_get_brightness(bd);
reg |= (BLMOD_EN | BIASMOD_EN);
if (level > 0) {
diff --git a/drivers/video/fbdev/aty/radeon_backlight.c b/drivers/video/fbdev/aty/radeon_backlight.c
index d2c1263ad260..427adc838f77 100644
--- a/drivers/video/fbdev/aty/radeon_backlight.c
+++ b/drivers/video/fbdev/aty/radeon_backlight.c
@@ -57,11 +57,7 @@ static int radeon_bl_update_status(struct backlight_device *bd)
* backlight. This provides some greater power saving and the display
* is useless without backlight anyway.
*/
- if (bd->props.power != FB_BLANK_UNBLANK ||
- bd->props.fb_blank != FB_BLANK_UNBLANK)
- level = 0;
- else
- level = bd->props.brightness;
+ level = backlight_get_brightness(bd);
del_timer_sync(&rinfo->lvds_timer);
radeon_engine_idle();
diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
index c730253ab85c..583cbcf09446 100644
--- a/drivers/video/fbdev/core/fb_defio.c
+++ b/drivers/video/fbdev/core/fb_defio.c
@@ -313,7 +313,7 @@ void fb_deferred_io_open(struct fb_info *info,
}
EXPORT_SYMBOL_GPL(fb_deferred_io_open);
-void fb_deferred_io_cleanup(struct fb_info *info)
+void fb_deferred_io_release(struct fb_info *info)
{
struct fb_deferred_io *fbdefio = info->fbdefio;
struct page *page;
@@ -327,6 +327,14 @@ void fb_deferred_io_cleanup(struct fb_info *info)
page = fb_deferred_io_page(info, i);
page->mapping = NULL;
}
+}
+EXPORT_SYMBOL_GPL(fb_deferred_io_release);
+
+void fb_deferred_io_cleanup(struct fb_info *info)
+{
+ struct fb_deferred_io *fbdefio = info->fbdefio;
+
+ fb_deferred_io_release(info);
kvfree(info->pagerefs);
mutex_destroy(&fbdefio->lock);
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 14a7d404062c..1b14c21af2b7 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -2495,9 +2495,12 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font,
h > FBCON_SWAP(info->var.rotate, info->var.yres, info->var.xres))
return -EINVAL;
+ if (font->width > 32 || font->height > 32)
+ return -EINVAL;
+
/* Make sure drawing engine can handle the font */
- if (!(info->pixmap.blit_x & (1 << (font->width - 1))) ||
- !(info->pixmap.blit_y & (1 << (font->height - 1))))
+ if (!(info->pixmap.blit_x & BIT(font->width - 1)) ||
+ !(info->pixmap.blit_y & BIT(font->height - 1)))
return -EINVAL;
/* Make sure driver can handle the font length */
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 3a6c8458eb8d..ab3545a00abc 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -1454,6 +1454,10 @@ __releases(&info->lock)
struct fb_info * const info = file->private_data;
lock_fb_info(info);
+#if IS_ENABLED(CONFIG_FB_DEFERRED_IO)
+ if (info->fbdefio)
+ fb_deferred_io_release(info);
+#endif
if (info->fbops->fb_release)
info->fbops->fb_release(info,1);
module_put(info->fbops->owner);
diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c
index b0e690f41025..79e5bfbdd34c 100644
--- a/drivers/video/fbdev/core/fbmon.c
+++ b/drivers/video/fbdev/core/fbmon.c
@@ -1050,7 +1050,7 @@ static u32 fb_get_vblank(u32 hfreq)
}
/**
- * fb_get_hblank_by_freq - get horizontal blank time given hfreq
+ * fb_get_hblank_by_hfreq - get horizontal blank time given hfreq
* @hfreq: horizontal freq
* @xres: horizontal resolution in pixels
*
diff --git a/drivers/video/fbdev/mx3fb.c b/drivers/video/fbdev/mx3fb.c
index b945b68984b9..76771e126d0a 100644
--- a/drivers/video/fbdev/mx3fb.c
+++ b/drivers/video/fbdev/mx3fb.c
@@ -283,12 +283,7 @@ static int mx3fb_bl_get_brightness(struct backlight_device *bl)
static int mx3fb_bl_update_status(struct backlight_device *bl)
{
struct mx3fb_data *fbd = bl_get_data(bl);
- int brightness = bl->props.brightness;
-
- if (bl->props.power != FB_BLANK_UNBLANK)
- brightness = 0;
- if (bl->props.fb_blank != FB_BLANK_UNBLANK)
- brightness = 0;
+ int brightness = backlight_get_brightness(bl);
fbd->backlight_level = (fbd->backlight_level & ~0xFF) | brightness;
diff --git a/drivers/video/fbdev/nvidia/nv_backlight.c b/drivers/video/fbdev/nvidia/nv_backlight.c
index 2ce53529f636..503a7a683855 100644
--- a/drivers/video/fbdev/nvidia/nv_backlight.c
+++ b/drivers/video/fbdev/nvidia/nv_backlight.c
@@ -49,17 +49,11 @@ static int nvidia_bl_update_status(struct backlight_device *bd)
{
struct nvidia_par *par = bl_get_data(bd);
u32 tmp_pcrt, tmp_pmc, fpcontrol;
- int level;
+ int level = backlight_get_brightness(bd);
if (!par->FlatPanel)
return 0;
- if (bd->props.power != FB_BLANK_UNBLANK ||
- bd->props.fb_blank != FB_BLANK_UNBLANK)
- level = 0;
- else
- level = bd->props.brightness;
-
tmp_pmc = NV_RD32(par->PMC, 0x10F0) & 0x0000FFFF;
tmp_pcrt = NV_RD32(par->PCRTC0, 0x081C) & 0xFFFFFFFC;
fpcontrol = NV_RD32(par->PRAMDAC, 0x0848) & 0xCFFFFFCC;
diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c
index 1960916098d4..e60a276b4855 100644
--- a/drivers/video/fbdev/nvidia/nvidia.c
+++ b/drivers/video/fbdev/nvidia/nvidia.c
@@ -1197,17 +1197,17 @@ static int nvidia_set_fbinfo(struct fb_info *info)
return nvidiafb_check_var(&info->var, info);
}
-static u32 nvidia_get_chipset(struct fb_info *info)
+static u32 nvidia_get_chipset(struct pci_dev *pci_dev,
+ volatile u32 __iomem *REGS)
{
- struct nvidia_par *par = info->par;
- u32 id = (par->pci_dev->vendor << 16) | par->pci_dev->device;
+ u32 id = (pci_dev->vendor << 16) | pci_dev->device;
printk(KERN_INFO PFX "Device ID: %x \n", id);
if ((id & 0xfff0) == 0x00f0 ||
(id & 0xfff0) == 0x02e0) {
/* pci-e */
- id = NV_RD32(par->REGS, 0x1800);
+ id = NV_RD32(REGS, 0x1800);
if ((id & 0x0000ffff) == 0x000010DE)
id = 0x10DE0000 | (id >> 16);
@@ -1220,12 +1220,11 @@ static u32 nvidia_get_chipset(struct fb_info *info)
return id;
}
-static u32 nvidia_get_arch(struct fb_info *info)
+static u32 nvidia_get_arch(u32 Chipset)
{
- struct nvidia_par *par = info->par;
u32 arch = 0;
- switch (par->Chipset & 0x0ff0) {
+ switch (Chipset & 0x0ff0) {
case 0x0100: /* GeForce 256 */
case 0x0110: /* GeForce2 MX */
case 0x0150: /* GeForce2 */
@@ -1278,16 +1277,44 @@ static int nvidiafb_probe(struct pci_dev *pd, const struct pci_device_id *ent)
struct fb_info *info;
unsigned short cmd;
int ret;
+ volatile u32 __iomem *REGS;
+ int Chipset;
+ u32 Architecture;
NVTRACE_ENTER();
assert(pd != NULL);
+ if (pci_enable_device(pd)) {
+ printk(KERN_ERR PFX "cannot enable PCI device\n");
+ return -ENODEV;
+ }
+
+ /* enable IO and mem if not already done */
+ pci_read_config_word(pd, PCI_COMMAND, &cmd);
+ cmd |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
+ pci_write_config_word(pd, PCI_COMMAND, cmd);
+
+ nvidiafb_fix.mmio_start = pci_resource_start(pd, 0);
+ nvidiafb_fix.mmio_len = pci_resource_len(pd, 0);
+
+ REGS = ioremap(nvidiafb_fix.mmio_start, nvidiafb_fix.mmio_len);
+ if (!REGS) {
+ printk(KERN_ERR PFX "cannot ioremap MMIO base\n");
+ return -ENODEV;
+ }
+
+ Chipset = nvidia_get_chipset(pd, REGS);
+ Architecture = nvidia_get_arch(Chipset);
+ if (Architecture == 0) {
+ printk(KERN_ERR PFX "unknown NV_ARCH\n");
+ goto err_out;
+ }
+
ret = aperture_remove_conflicting_pci_devices(pd, "nvidiafb");
if (ret)
- return ret;
+ goto err_out;
info = framebuffer_alloc(sizeof(struct nvidia_par), &pd->dev);
-
if (!info)
goto err_out;
@@ -1298,11 +1325,6 @@ static int nvidiafb_probe(struct pci_dev *pd, const struct pci_device_id *ent)
if (info->pixmap.addr == NULL)
goto err_out_kfree;
- if (pci_enable_device(pd)) {
- printk(KERN_ERR PFX "cannot enable PCI device\n");
- goto err_out_enable;
- }
-
if (pci_request_regions(pd, "nvidiafb")) {
printk(KERN_ERR PFX "cannot request PCI regions\n");
goto err_out_enable;
@@ -1318,34 +1340,17 @@ static int nvidiafb_probe(struct pci_dev *pd, const struct pci_device_id *ent)
par->paneltweak = paneltweak;
par->reverse_i2c = reverse_i2c;
- /* enable IO and mem if not already done */
- pci_read_config_word(pd, PCI_COMMAND, &cmd);
- cmd |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
- pci_write_config_word(pd, PCI_COMMAND, cmd);
-
- nvidiafb_fix.mmio_start = pci_resource_start(pd, 0);
nvidiafb_fix.smem_start = pci_resource_start(pd, 1);
- nvidiafb_fix.mmio_len = pci_resource_len(pd, 0);
-
- par->REGS = ioremap(nvidiafb_fix.mmio_start, nvidiafb_fix.mmio_len);
- if (!par->REGS) {
- printk(KERN_ERR PFX "cannot ioremap MMIO base\n");
- goto err_out_free_base0;
- }
+ par->REGS = REGS;
- par->Chipset = nvidia_get_chipset(info);
- par->Architecture = nvidia_get_arch(info);
-
- if (par->Architecture == 0) {
- printk(KERN_ERR PFX "unknown NV_ARCH\n");
- goto err_out_arch;
- }
+ par->Chipset = Chipset;
+ par->Architecture = Architecture;
sprintf(nvidiafb_fix.id, "NV%x", (pd->device & 0x0ff0) >> 4);
if (NVCommonSetup(info))
- goto err_out_arch;
+ goto err_out_free_base0;
par->FbAddress = nvidiafb_fix.smem_start;
par->FbMapSize = par->RamAmountKBytes * 1024;
@@ -1401,7 +1406,6 @@ static int nvidiafb_probe(struct pci_dev *pd, const struct pci_device_id *ent)
goto err_out_iounmap_fb;
}
-
printk(KERN_INFO PFX
"PCI nVidia %s framebuffer (%dMB @ 0x%lX)\n",
info->fix.id,
@@ -1415,15 +1419,14 @@ err_out_iounmap_fb:
err_out_free_base1:
fb_destroy_modedb(info->monspecs.modedb);
nvidia_delete_i2c_busses(par);
-err_out_arch:
- iounmap(par->REGS);
- err_out_free_base0:
+err_out_free_base0:
pci_release_regions(pd);
err_out_enable:
kfree(info->pixmap.addr);
err_out_kfree:
framebuffer_release(info);
err_out:
+ iounmap(REGS);
return -ENODEV;
}
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
index 4fc4b26a8d30..ba94a0a7bd4f 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
@@ -331,13 +331,7 @@ static int dsicm_bl_update_status(struct backlight_device *dev)
struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev);
struct omap_dss_device *in = ddata->in;
int r;
- int level;
-
- if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
- dev->props.power == FB_BLANK_UNBLANK)
- level = dev->props.brightness;
- else
- level = 0;
+ int level = backlight_get_brightness(dev);
dev_dbg(&ddata->pdev->dev, "update brightness to %d\n", level);
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c b/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c
index bc5a44c2a144..ae937854403b 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c
@@ -10,6 +10,7 @@
#define DSS_SUBSYS_NAME "DISPLAY"
#include <linux/kernel.h>
+#include <linux/kstrtox.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/sysfs.h>
@@ -36,7 +37,7 @@ static ssize_t display_enabled_store(struct omap_dss_device *dssdev,
int r;
bool enable;
- r = strtobool(buf, &enable);
+ r = kstrtobool(buf, &enable);
if (r)
return r;
@@ -73,7 +74,7 @@ static ssize_t display_tear_store(struct omap_dss_device *dssdev,
if (!dssdev->driver->enable_te || !dssdev->driver->get_te)
return -ENOENT;
- r = strtobool(buf, &te);
+ r = kstrtobool(buf, &te);
if (r)
return r;
@@ -183,7 +184,7 @@ static ssize_t display_mirror_store(struct omap_dss_device *dssdev,
if (!dssdev->driver->set_mirror || !dssdev->driver->get_mirror)
return -ENOENT;
- r = strtobool(buf, &mirror);
+ r = kstrtobool(buf, &mirror);
if (r)
return r;
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c b/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c
index ba21c4a2633d..1b644be5fe2e 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c
@@ -10,6 +10,7 @@
#define DSS_SUBSYS_NAME "MANAGER"
#include <linux/kernel.h>
+#include <linux/kstrtox.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -246,7 +247,7 @@ static ssize_t manager_trans_key_enabled_store(struct omap_overlay_manager *mgr,
bool enable;
int r;
- r = strtobool(buf, &enable);
+ r = kstrtobool(buf, &enable);
if (r)
return r;
@@ -290,7 +291,7 @@ static ssize_t manager_alpha_blending_enabled_store(
if(!dss_has_feature(FEAT_ALPHA_FIXED_ZORDER))
return -ENODEV;
- r = strtobool(buf, &enable);
+ r = kstrtobool(buf, &enable);
if (r)
return r;
@@ -329,7 +330,7 @@ static ssize_t manager_cpr_enable_store(struct omap_overlay_manager *mgr,
if (!dss_has_feature(FEAT_CPR))
return -ENODEV;
- r = strtobool(buf, &enable);
+ r = kstrtobool(buf, &enable);
if (r)
return r;
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c b/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c
index 601c0beb6de9..1da4fb1c77b4 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c
@@ -13,6 +13,7 @@
#include <linux/err.h>
#include <linux/sysfs.h>
#include <linux/kobject.h>
+#include <linux/kstrtox.h>
#include <linux/platform_device.h>
#include <video/omapfb_dss.h>
@@ -210,7 +211,7 @@ static ssize_t overlay_enabled_store(struct omap_overlay *ovl, const char *buf,
int r;
bool enable;
- r = strtobool(buf, &enable);
+ r = kstrtobool(buf, &enable);
if (r)
return r;
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c b/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c
index 06dc41aa0354..831b2c2fbdf9 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c
@@ -15,6 +15,7 @@
#include <linux/uaccess.h>
#include <linux/platform_device.h>
#include <linux/kernel.h>
+#include <linux/kstrtox.h>
#include <linux/mm.h>
#include <linux/omapfb.h>
@@ -96,7 +97,7 @@ static ssize_t store_mirror(struct device *dev,
int r;
struct fb_var_screeninfo new_var;
- r = strtobool(buf, &mirror);
+ r = kstrtobool(buf, &mirror);
if (r)
return r;
diff --git a/drivers/video/fbdev/riva/fbdev.c b/drivers/video/fbdev/riva/fbdev.c
index 644278146d3b..41edc6e79460 100644
--- a/drivers/video/fbdev/riva/fbdev.c
+++ b/drivers/video/fbdev/riva/fbdev.c
@@ -293,13 +293,7 @@ static int riva_bl_update_status(struct backlight_device *bd)
{
struct riva_par *par = bl_get_data(bd);
U032 tmp_pcrt, tmp_pmc;
- int level;
-
- if (bd->props.power != FB_BLANK_UNBLANK ||
- bd->props.fb_blank != FB_BLANK_UNBLANK)
- level = 0;
- else
- level = bd->props.brightness;
+ int level = backlight_get_brightness(bd);
tmp_pmc = NV_RD32(par->riva.PMC, 0x10F0) & 0x0000FFFF;
tmp_pcrt = NV_RD32(par->riva.PCRTC0, 0x081C) & 0xFFFFFFFC;
diff --git a/drivers/watchdog/diag288_wdt.c b/drivers/watchdog/diag288_wdt.c
index 4cb10877017c..6ca5d9515d85 100644
--- a/drivers/watchdog/diag288_wdt.c
+++ b/drivers/watchdog/diag288_wdt.c
@@ -86,7 +86,7 @@ static int __diag288(unsigned int func, unsigned int timeout,
"1:\n"
EX_TABLE(0b, 1b)
: "+d" (err) : "d"(__func), "d"(__timeout),
- "d"(__action), "d"(__len) : "1", "cc");
+ "d"(__action), "d"(__len) : "1", "cc", "memory");
return err;
}
@@ -268,12 +268,21 @@ static int __init diag288_init(void)
char ebc_begin[] = {
194, 197, 199, 201, 213
};
+ char *ebc_cmd;
watchdog_set_nowayout(&wdt_dev, nowayout_info);
if (MACHINE_IS_VM) {
- if (__diag288_vm(WDT_FUNC_INIT, 15,
- ebc_begin, sizeof(ebc_begin)) != 0) {
+ ebc_cmd = kmalloc(sizeof(ebc_begin), GFP_KERNEL);
+ if (!ebc_cmd) {
+ pr_err("The watchdog cannot be initialized\n");
+ return -ENOMEM;
+ }
+ memcpy(ebc_cmd, ebc_begin, sizeof(ebc_begin));
+ ret = __diag288_vm(WDT_FUNC_INIT, 15,
+ ebc_cmd, sizeof(ebc_begin));
+ kfree(ebc_cmd);
+ if (ret != 0) {
pr_err("The watchdog cannot be initialized\n");
return -EINVAL;
}