summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpi_video.c49
-rw-r--r--drivers/acpi/resource.c24
-rw-r--r--drivers/ata/ahci.h2
-rw-r--r--drivers/ata/pata_macio.c2
-rw-r--r--drivers/ata/sata_mv.c2
-rw-r--r--drivers/ata/sata_nv.c4
-rw-r--r--drivers/ata/sata_sil24.c1
-rw-r--r--drivers/base/topology.c24
-rw-r--r--drivers/block/amiflop.c1
-rw-r--r--drivers/block/aoe/aoeblk.c1
-rw-r--r--drivers/block/ataflop.c1
-rw-r--r--drivers/block/floppy.c1
-rw-r--r--drivers/block/loop.c178
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c1
-rw-r--r--drivers/block/nbd.c116
-rw-r--r--drivers/block/null_blk/main.c31
-rw-r--r--drivers/block/null_blk/null_blk.h1
-rw-r--r--drivers/block/ps3disk.c7
-rw-r--r--drivers/block/rbd.c1
-rw-r--r--drivers/block/rnbd/rnbd-clt.c3
-rw-r--r--drivers/block/rnbd/rnbd-srv.c2
-rw-r--r--drivers/block/rnull.rs30
-rw-r--r--drivers/block/sunvdc.c2
-rw-r--r--drivers/block/swim.c2
-rw-r--r--drivers/block/swim3.c3
-rw-r--r--drivers/block/ublk_drv.c1
-rw-r--r--drivers/block/virtio_blk.c9
-rw-r--r--drivers/block/xen-blkfront.c1
-rw-r--r--drivers/block/z2ram.c1
-rw-r--r--drivers/block/zram/zram_drv.c1
-rw-r--r--drivers/bluetooth/btmtk.c7
-rw-r--r--drivers/bluetooth/btnxpuart.c1
-rw-r--r--drivers/bus/mhi/host/pci_generic.c2
-rw-r--r--drivers/cdrom/cdrom.c2
-rw-r--r--drivers/cdrom/gdrom.c2
-rw-r--r--drivers/clk/imx/clk-imx8mp-audiomix.c3
-rw-r--r--drivers/clk/thead/clk-th1520-ap.c13
-rw-r--r--drivers/cpufreq/Kconfig4
-rw-r--r--drivers/cpuidle/cpuidle-riscv-sbi.c4
-rw-r--r--drivers/cpuidle/governors/teo.c91
-rw-r--r--drivers/crypto/ccp/sev-dev.c2
-rw-r--r--drivers/edac/Kconfig16
-rw-r--r--drivers/edac/Makefile3
-rw-r--r--drivers/edac/cell_edac.c281
-rw-r--r--drivers/edac/edac_mc.c2
-rw-r--r--drivers/edac/edac_mc_sysfs.c6
-rw-r--r--drivers/edac/i10nm_base.c12
-rw-r--r--drivers/edac/i5000_edac.c8
-rw-r--r--drivers/edac/loongson_edac.c157
-rw-r--r--drivers/edac/skx_base.c9
-rw-r--r--drivers/edac/skx_common.c47
-rw-r--r--drivers/edac/skx_common.h3
-rw-r--r--drivers/gpio/gpio-loongson-64bit.c6
-rw-r--r--drivers/gpio/gpio-sim.c48
-rw-r--r--drivers/gpio/gpio-virtuser.c91
-rw-r--r--drivers/gpio/gpio-xilinx.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_debug.c17
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c76
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c25
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c48
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c35
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_state.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c12
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c12
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c12
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c1
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_audio.c14
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c10
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7533.c4
-rw-r--r--drivers/gpu/drm/bridge/ite-it6263.c4
-rw-r--r--drivers/gpu/drm/display/drm_bridge_connector.c8
-rw-r--r--drivers/gpu/drm/drm_bridge.c4
-rw-r--r--drivers/gpu/drm/drm_connector.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c2
-rw-r--r--drivers/gpu/drm/mediatek/Kconfig5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_crtc.c25
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c69
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dp.c46
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c13
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c49
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c1
-rw-r--r--drivers/gpu/drm/tests/drm_connector_test.c60
-rw-r--r--drivers/gpu/drm/tests/drm_kunit_helpers.c3
-rw-r--r--drivers/gpu/drm/v3d/v3d_irq.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gem.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c20
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c5
-rw-r--r--drivers/gpu/drm/xe/tests/xe_bo.c7
-rw-r--r--drivers/gpu/drm/xe/tests/xe_mocs.c3
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c12
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump.c15
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.c9
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c8
-rw-r--r--drivers/gpu/drm/xe/xe_gt.h25
-rw-r--r--drivers/gpu/drm/xe/xe_gt_ccs_mode.c2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_debugfs.c4
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle.c10
-rw-r--r--drivers/gpu/drm/xe/xe_gt_mcr.c6
-rw-r--r--drivers/gpu/drm/xe/xe_gt_mcr.h4
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c4
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h3
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ads.c15
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine.c2
-rw-r--r--drivers/gpu/drm/xe/xe_oa.c135
-rw-r--r--drivers/gpu/drm/xe/xe_ring_ops.c5
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job_types.h2
-rw-r--r--drivers/hwmon/acpi_power_meter.c2
-rw-r--r--drivers/hwmon/drivetemp.c8
-rw-r--r--drivers/hwmon/ltc2991.c2
-rw-r--r--drivers/hwmon/tmp513.c7
-rw-r--r--drivers/i2c/busses/i2c-rcar.c20
-rw-r--r--drivers/i2c/i2c-atr.c2
-rw-r--r--drivers/i2c/i2c-core-base.c1
-rw-r--r--drivers/i2c/i2c-slave-testunit.c19
-rw-r--r--drivers/i2c/muxes/i2c-demux-pinctrl.c6
-rw-r--r--drivers/iio/adc/ad4695.c100
-rw-r--r--drivers/iio/adc/ad7124.c3
-rw-r--r--drivers/iio/adc/ad7173.c10
-rw-r--r--drivers/iio/adc/ad9467.c15
-rw-r--r--drivers/iio/adc/at91_adc.c2
-rw-r--r--drivers/iio/adc/rockchip_saradc.c2
-rw-r--r--drivers/iio/adc/stm32-dfsdm-adc.c13
-rw-r--r--drivers/iio/adc/ti-ads1119.c4
-rw-r--r--drivers/iio/adc/ti-ads124s08.c4
-rw-r--r--drivers/iio/adc/ti-ads1298.c2
-rw-r--r--drivers/iio/adc/ti-ads8688.c2
-rw-r--r--drivers/iio/dummy/iio_simple_dummy_buffer.c2
-rw-r--r--drivers/iio/gyro/fxas21002c_core.c9
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600.h1
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_core.c22
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c3
-rw-r--r--drivers/iio/imu/kmx61.c2
-rw-r--r--drivers/iio/inkern.c2
-rw-r--r--drivers/iio/light/bh1745.c2
-rw-r--r--drivers/iio/light/vcnl4035.c2
-rw-r--r--drivers/iio/pressure/zpa2326.c2
-rw-r--r--drivers/iio/temperature/tmp006.c2
-rw-r--r--drivers/iio/test/Kconfig2
-rw-r--r--drivers/iio/test/iio-test-rescale.c4
-rw-r--r--drivers/infiniband/core/cma.c16
-rw-r--r--drivers/infiniband/core/nldev.c2
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c16
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c50
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h4
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c8
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c79
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h4
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c5
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h5
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c18
-rw-r--r--drivers/infiniband/hw/bnxt_re/roce_hsi.h1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c43
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c11
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c5
-rw-r--r--drivers/infiniband/hw/mlx5/main.c8
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c23
-rw-r--r--drivers/infiniband/sw/rxe/rxe.h3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mcast.c22
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c24
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c26
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h11
-rw-r--r--drivers/infiniband/sw/siw/siw.h7
-rw-r--r--drivers/infiniband/sw/siw/siw_cm.c27
-rw-r--r--drivers/infiniband/sw/siw/siw_main.c15
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c35
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.c2
-rw-r--r--drivers/interconnect/icc-clk.c10
-rw-r--r--drivers/interconnect/qcom/icc-rpm.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c2
-rw-r--r--drivers/irqchip/irq-gic-v3.c2
-rw-r--r--drivers/irqchip/irq-sunxi-nmi.c3
-rw-r--r--drivers/irqchip/irqchip.c4
-rw-r--r--drivers/macintosh/mac_hid.c2
-rw-r--r--drivers/md/Kconfig13
-rw-r--r--drivers/md/Makefile2
-rw-r--r--drivers/md/bcache/movinggc.c2
-rw-r--r--drivers/md/bcache/writeback.c2
-rw-r--r--drivers/md/dm-ebs-target.c2
-rw-r--r--drivers/md/dm-rq.c2
-rw-r--r--drivers/md/dm-thin.c5
-rw-r--r--drivers/md/dm-verity-fec.c61
-rw-r--r--drivers/md/dm-verity-target.c4
-rw-r--r--drivers/md/md-autodetect.c8
-rw-r--r--drivers/md/md-bitmap.c116
-rw-r--r--drivers/md/md-bitmap.h7
-rw-r--r--drivers/md/md-linear.c354
-rw-r--r--drivers/md/md.c31
-rw-r--r--drivers/md/md.h5
-rw-r--r--drivers/md/persistent-data/dm-array.c19
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/md/raid1.c36
-rw-r--r--drivers/md/raid1.h1
-rw-r--r--drivers/md/raid10.c28
-rw-r--r--drivers/md/raid10.h1
-rw-r--r--drivers/md/raid5-cache.c20
-rw-r--r--drivers/md/raid5.c111
-rw-r--r--drivers/md/raid5.h4
-rw-r--r--drivers/memstick/core/ms_block.c3
-rw-r--r--drivers/memstick/core/mspro_block.c3
-rw-r--r--drivers/misc/cxl/Kconfig6
-rw-r--r--drivers/misc/cxl/of.c2
-rw-r--r--drivers/misc/cxl/pci.c2
-rw-r--r--drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c4
-rw-r--r--drivers/mmc/core/queue.c2
-rw-r--r--drivers/mmc/host/sdhci-msm.c16
-rw-r--r--drivers/mtd/mtd_blkdevs.c2
-rw-r--r--drivers/mtd/spi-nor/core.c2
-rw-r--r--drivers/mtd/ubi/block.c2
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c47
-rw-r--r--drivers/net/dsa/microchip/ksz9477_reg.h4
-rw-r--r--drivers/net/dsa/microchip/lan937x_main.c62
-rw-r--r--drivers/net/dsa/microchip/lan937x_reg.h9
-rw-r--r--drivers/net/ethernet/amd/pds_core/devlink.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c19
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c21
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c63
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c5
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c19
-rw-r--r--drivers/net/ethernet/google/gve/gve.h1
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c77
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c46
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c96
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c45
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c41
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c51
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.c35
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c23
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_consts.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c263
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.c6
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c14
-rw-r--r--drivers/net/ethernet/marvell/sky2.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wc.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c3
-rw-r--r--drivers/net/ethernet/meta/fbnic/Makefile1
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic.h5
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_csr.c2
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.h7
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_hwmon.c81
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.c22
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.h7
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_pci.c3
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c3
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c44
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase_main.c2
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c1
-rw-r--r--drivers/net/ethernet/sfc/tc_conntrack.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c43
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c14
-rw-r--r--drivers/net/ethernet/ti/icssg/icss_iep.c8
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_common.c25
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_config.c41
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_config.h1
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c261
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.h5
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c24
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.c24
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c6
-rw-r--r--drivers/net/gtp.c26
-rw-r--r--drivers/net/ieee802154/ca8210.c6
-rw-r--r--drivers/net/mctp/mctp-i3c.c4
-rw-r--r--drivers/net/pcs/pcs-xpcs.c4
-rw-r--r--drivers/net/pfcp.c15
-rw-r--r--drivers/net/phy/micrel.c114
-rw-r--r--drivers/net/pse-pd/tps23881.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/bz.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c37
-rw-r--r--drivers/net/wireless/st/cw1200/cw1200_spi.c2
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mmio.c2
-rw-r--r--drivers/net/wwan/t7xx/t7xx_state_monitor.c26
-rw-r--r--drivers/net/wwan/t7xx/t7xx_state_monitor.h5
-rw-r--r--drivers/nvme/host/apple.c2
-rw-r--r--drivers/nvme/host/core.c67
-rw-r--r--drivers/nvme/host/fc.c1
-rw-r--r--drivers/nvme/host/nvme.h44
-rw-r--r--drivers/nvme/host/pci.c26
-rw-r--r--drivers/nvme/host/tcp.c88
-rw-r--r--drivers/nvme/target/Kconfig11
-rw-r--r--drivers/nvme/target/Makefile2
-rw-r--r--drivers/nvme/target/admin-cmd.c397
-rw-r--r--drivers/nvme/target/configfs.c72
-rw-r--r--drivers/nvme/target/core.c374
-rw-r--r--drivers/nvme/target/discovery.c17
-rw-r--r--drivers/nvme/target/fabrics-cmd-auth.c14
-rw-r--r--drivers/nvme/target/fabrics-cmd.c101
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c5
-rw-r--r--drivers/nvme/target/nvmet.h117
-rw-r--r--drivers/nvme/target/passthru.c18
-rw-r--r--drivers/nvme/target/pci-epf.c2591
-rw-r--r--drivers/nvme/target/pr.c8
-rw-r--r--drivers/nvme/target/zns.c3
-rw-r--r--drivers/of/address.c18
-rw-r--r--drivers/of/unittest-data/tests-platform.dtsi13
-rw-r--r--drivers/of/unittest.c14
-rw-r--r--drivers/pci/pci-driver.c14
-rw-r--r--drivers/pci/pcie/bwctrl.c25
-rw-r--r--drivers/perf/apple_m1_cpu_pmu.c2
-rw-r--r--drivers/perf/arm-cmn.c4
-rw-r--r--drivers/perf/arm_cspmu/nvidia_cspmu.c75
-rw-r--r--drivers/perf/arm_spe_pmu.c22
-rw-r--r--drivers/perf/dwc_pcie_pmu.c72
-rw-r--r--drivers/perf/fsl_imx9_ddr_perf.c33
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c42
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c61
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_hha_pmu.c48
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c44
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pa_pmu.c53
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.c160
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.h49
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c43
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_uc_pmu.c45
-rw-r--r--drivers/perf/marvell_cn10k_ddr_pmu.c530
-rw-r--r--drivers/perf/marvell_cn10k_tad_pmu.c66
-rw-r--r--drivers/perf/riscv_pmu_sbi.c22
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c6
-rw-r--r--drivers/platform/x86/amd/pmc/pmc.c8
-rw-r--r--drivers/platform/x86/dell/dell-uart-backlight.c5
-rw-r--r--drivers/platform/x86/hp/hp-wmi.c4
-rw-r--r--drivers/platform/x86/intel/pmc/core_ssram.c4
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_common.c1
-rw-r--r--drivers/platform/x86/intel/tpmi_power_domains.c1
-rw-r--r--drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c5
-rw-r--r--drivers/platform/x86/mlx-platform.c2
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c4
-rw-r--r--drivers/pmdomain/core.c6
-rw-r--r--drivers/pmdomain/imx/gpcv2.c4
-rw-r--r--drivers/pmdomain/imx/imx8mp-blk-ctrl.c2
-rw-r--r--drivers/reset/reset-rzg2l-usbphy-ctrl.c1
-rw-r--r--drivers/s390/block/dasd_genhd.c1
-rw-r--r--drivers/s390/block/scm_blk.c1
-rw-r--r--drivers/s390/char/sclp.h18
-rw-r--r--drivers/s390/char/sclp_config.c4
-rw-r--r--drivers/s390/char/sclp_early.c3
-rw-r--r--drivers/s390/char/sclp_pci.c19
-rw-r--r--drivers/s390/char/sclp_sd.c4
-rw-r--r--drivers/s390/cio/device_ops.c2
-rw-r--r--drivers/s390/cio/qdio.h9
-rw-r--r--drivers/s390/cio/qdio_setup.c21
-rw-r--r--drivers/s390/crypto/pkey_sysfs.c128
-rw-r--r--drivers/scsi/cxlflash/Kconfig6
-rw-r--r--drivers/scsi/cxlflash/main.c2
-rw-r--r--drivers/scsi/fnic/fnic_main.c3
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h1
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c6
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c3
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr.h1
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c3
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c4
-rw-r--r--drivers/scsi/scsi_lib.c8
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c4
-rw-r--r--drivers/scsi/sd.c22
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c7
-rw-r--r--drivers/scsi/sr.c5
-rw-r--r--drivers/scsi/virtio_scsi.c3
-rw-r--r--drivers/staging/gpib/Kconfig8
-rw-r--r--drivers/staging/gpib/agilent_82350b/Makefile2
-rw-r--r--drivers/staging/gpib/agilent_82350b/agilent_82350b.c4
-rw-r--r--drivers/staging/gpib/agilent_82357a/Makefile2
-rw-r--r--drivers/staging/gpib/cb7210/Makefile2
-rw-r--r--drivers/staging/gpib/cb7210/cb7210.c12
-rw-r--r--drivers/staging/gpib/cb7210/cb7210.h4
-rw-r--r--drivers/staging/gpib/cec/Makefile2
-rw-r--r--drivers/staging/gpib/cec/cec_gpib.c4
-rw-r--r--drivers/staging/gpib/common/gpib_os.c54
-rw-r--r--drivers/staging/gpib/eastwood/Makefile2
-rw-r--r--drivers/staging/gpib/eastwood/fluke_gpib.c12
-rw-r--r--drivers/staging/gpib/eastwood/fluke_gpib.h4
-rw-r--r--drivers/staging/gpib/fmh_gpib/fmh_gpib.c27
-rw-r--r--drivers/staging/gpib/fmh_gpib/fmh_gpib.h4
-rw-r--r--drivers/staging/gpib/gpio/Makefile2
-rw-r--r--drivers/staging/gpib/gpio/gpib_bitbang.c2
-rw-r--r--drivers/staging/gpib/hp_82335/Makefile2
-rw-r--r--drivers/staging/gpib/hp_82335/hp82335.c21
-rw-r--r--drivers/staging/gpib/hp_82341/Makefile2
-rw-r--r--drivers/staging/gpib/hp_82341/hp_82341.c16
-rw-r--r--drivers/staging/gpib/include/gpibP.h12
-rw-r--r--drivers/staging/gpib/include/gpib_types.h3
-rw-r--r--drivers/staging/gpib/include/nec7210.h5
-rw-r--r--drivers/staging/gpib/include/tms9914.h5
-rw-r--r--drivers/staging/gpib/ines/Makefile2
-rw-r--r--drivers/staging/gpib/ines/ines.h4
-rw-r--r--drivers/staging/gpib/ines/ines_gpib.c22
-rw-r--r--drivers/staging/gpib/lpvo_usb_gpib/Makefile2
-rw-r--r--drivers/staging/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c18
-rw-r--r--drivers/staging/gpib/nec7210/nec7210.c16
-rw-r--r--drivers/staging/gpib/ni_usb/Makefile2
-rw-r--r--drivers/staging/gpib/pc2/Makefile2
-rw-r--r--drivers/staging/gpib/pc2/pc2_gpib.c16
-rw-r--r--drivers/staging/gpib/tms9914/Makefile2
-rw-r--r--drivers/staging/gpib/tms9914/tms9914.c8
-rw-r--r--drivers/staging/gpib/tnt4882/Makefile2
-rw-r--r--drivers/staging/gpib/tnt4882/mite.c69
-rw-r--r--drivers/staging/gpib/tnt4882/mite.h13
-rw-r--r--drivers/staging/gpib/tnt4882/tnt4882_gpib.c67
-rw-r--r--drivers/staging/iio/frequency/ad9832.c2
-rw-r--r--drivers/staging/iio/frequency/ad9834.c2
-rw-r--r--drivers/target/target_core_pscsi.c6
-rw-r--r--drivers/thermal/thermal_of.c1
-rw-r--r--drivers/tty/serial/8250/8250_core.c3
-rw-r--r--drivers/tty/serial/imx.c4
-rw-r--r--drivers/tty/serial/stm32-usart.c4
-rw-r--r--drivers/ufs/core/ufshcd-priv.h6
-rw-r--r--drivers/ufs/core/ufshcd.c11
-rw-r--r--drivers/ufs/host/ufs-qcom.c31
-rw-r--r--drivers/ufs/host/ufs-qcom.h5
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c25
-rw-r--r--drivers/usb/class/usblp.c7
-rw-r--r--drivers/usb/core/hub.c6
-rw-r--r--drivers/usb/core/port.c7
-rw-r--r--drivers/usb/dwc3/core.h1
-rw-r--r--drivers/usb/dwc3/dwc3-am62.c1
-rw-r--r--drivers/usb/dwc3/gadget.c4
-rw-r--r--drivers/usb/gadget/Kconfig4
-rw-r--r--drivers/usb/gadget/configfs.c6
-rw-r--r--drivers/usb/gadget/function/f_fs.c2
-rw-r--r--drivers/usb/gadget/function/f_uac2.c1
-rw-r--r--drivers/usb/gadget/function/u_serial.c8
-rw-r--r--drivers/usb/host/xhci-plat.c3
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/option.c4
-rw-r--r--drivers/usb/storage/scsiglue.c5
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/usb/typec/tcpm/maxim_contaminant.c4
-rw-r--r--drivers/usb/typec/tcpm/tcpci.c25
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c4
-rw-r--r--drivers/usb/typec/ucsi/ucsi_glink.c5
-rw-r--r--drivers/vfio/pci/vfio_pci_core.c17
-rw-r--r--drivers/virt/coco/arm-cca-guest/arm-cca-guest.c8
-rw-r--r--drivers/virt/vboxguest/Kconfig2
-rw-r--r--drivers/virtio/virtio.c19
-rw-r--r--drivers/watchdog/stm32_iwdg.c2
-rw-r--r--drivers/zorro/zorro-sysfs.c10
499 files changed, 8896 insertions, 3664 deletions
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 8274a17872ed..a972831dbd66 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -610,16 +610,28 @@ acpi_video_device_lcd_get_level_current(struct acpi_video_device *device,
return 0;
}
+/**
+ * acpi_video_device_EDID() - Get EDID from ACPI _DDC
+ * @device: video output device (LCD, CRT, ..)
+ * @edid: address for returned EDID pointer
+ * @length: _DDC length to request (must be a multiple of 128)
+ *
+ * Get EDID from ACPI _DDC. On success, a pointer to the EDID data is written
+ * to the @edid address, and the length of the EDID is returned. The caller is
+ * responsible for freeing the edid pointer.
+ *
+ * Return the length of EDID (positive value) on success or error (negative
+ * value).
+ */
static int
-acpi_video_device_EDID(struct acpi_video_device *device,
- union acpi_object **edid, int length)
+acpi_video_device_EDID(struct acpi_video_device *device, void **edid, int length)
{
- int status;
+ acpi_status status;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
union acpi_object arg0 = { ACPI_TYPE_INTEGER };
struct acpi_object_list args = { 1, &arg0 };
-
+ int ret;
*edid = NULL;
@@ -636,16 +648,17 @@ acpi_video_device_EDID(struct acpi_video_device *device,
obj = buffer.pointer;
- if (obj && obj->type == ACPI_TYPE_BUFFER)
- *edid = obj;
- else {
+ if (obj && obj->type == ACPI_TYPE_BUFFER) {
+ *edid = kmemdup(obj->buffer.pointer, obj->buffer.length, GFP_KERNEL);
+ ret = *edid ? obj->buffer.length : -ENOMEM;
+ } else {
acpi_handle_debug(device->dev->handle,
"Invalid _DDC data for length %d\n", length);
- status = -EFAULT;
- kfree(obj);
+ ret = -EFAULT;
}
- return status;
+ kfree(obj);
+ return ret;
}
/* bus */
@@ -1435,9 +1448,7 @@ int acpi_video_get_edid(struct acpi_device *device, int type, int device_id,
{
struct acpi_video_bus *video;
struct acpi_video_device *video_device;
- union acpi_object *buffer = NULL;
- acpi_status status;
- int i, length;
+ int i, length, ret;
if (!device || !acpi_driver_data(device))
return -EINVAL;
@@ -1477,16 +1488,10 @@ int acpi_video_get_edid(struct acpi_device *device, int type, int device_id,
}
for (length = 512; length > 0; length -= 128) {
- status = acpi_video_device_EDID(video_device, &buffer,
- length);
- if (ACPI_SUCCESS(status))
- break;
+ ret = acpi_video_device_EDID(video_device, edid, length);
+ if (ret > 0)
+ return ret;
}
- if (!length)
- continue;
-
- *edid = buffer->buffer.pointer;
- return length;
}
return -ENODEV;
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 821867de43be..90aaec923889 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -441,6 +441,13 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
},
},
{
+ /* Asus Vivobook X1504VAP */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "X1504VAP"),
+ },
+ },
+ {
/* Asus Vivobook X1704VAP */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
@@ -646,6 +653,17 @@ static const struct dmi_system_id irq1_edge_low_force_override[] = {
DMI_MATCH(DMI_BOARD_NAME, "GMxHGxx"),
},
},
+ {
+ /*
+ * TongFang GM5HG0A in case of the SKIKK Vanaheim relabel the
+ * board-name is changed, so check OEM strings instead. Note
+ * OEM string matches are always exact matches.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=219614
+ */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_OEM_STRING, "GM5HG0A"),
+ },
+ },
{ }
};
@@ -671,11 +689,11 @@ static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity,
for (i = 0; i < ARRAY_SIZE(override_table); i++) {
const struct irq_override_cmp *entry = &override_table[i];
- if (dmi_check_system(entry->system) &&
- entry->irq == gsi &&
+ if (entry->irq == gsi &&
entry->triggering == triggering &&
entry->polarity == polarity &&
- entry->shareable == shareable)
+ entry->shareable == shareable &&
+ dmi_check_system(entry->system))
return entry->override;
}
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 8f40f75ba08c..06781bdde0d2 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -396,7 +396,7 @@ extern const struct attribute_group *ahci_sdev_groups[];
.shost_groups = ahci_shost_groups, \
.sdev_groups = ahci_sdev_groups, \
.change_queue_depth = ata_scsi_change_queue_depth, \
- .tag_alloc_policy = BLK_TAG_ALLOC_RR, \
+ .tag_alloc_policy_rr = true, \
.device_configure = ata_scsi_device_configure
extern struct ata_port_operations ahci_ops;
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index f2f36e55a1f4..4b01bb6880b0 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -935,7 +935,7 @@ static const struct scsi_host_template pata_macio_sht = {
.device_configure = pata_macio_device_configure,
.sdev_groups = ata_common_sdev_groups,
.can_queue = ATA_DEF_QUEUE,
- .tag_alloc_policy = BLK_TAG_ALLOC_RR,
+ .tag_alloc_policy_rr = true,
};
static struct ata_port_operations pata_macio_ops = {
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index b8f363370e1a..21c72650f9cc 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -672,7 +672,7 @@ static const struct scsi_host_template mv6_sht = {
.dma_boundary = MV_DMA_BOUNDARY,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
- .tag_alloc_policy = BLK_TAG_ALLOC_RR,
+ .tag_alloc_policy_rr = true,
.device_configure = ata_scsi_device_configure
};
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 36d99043ef50..823cce5ea1e9 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -385,7 +385,7 @@ static const struct scsi_host_template nv_adma_sht = {
.device_configure = nv_adma_device_configure,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
- .tag_alloc_policy = BLK_TAG_ALLOC_RR,
+ .tag_alloc_policy_rr = true,
};
static const struct scsi_host_template nv_swncq_sht = {
@@ -396,7 +396,7 @@ static const struct scsi_host_template nv_swncq_sht = {
.device_configure = nv_swncq_device_configure,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
- .tag_alloc_policy = BLK_TAG_ALLOC_RR,
+ .tag_alloc_policy_rr = true,
};
/*
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index 72c03cbdaff4..935b13e79dec 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -378,7 +378,6 @@ static const struct scsi_host_template sil24_sht = {
.can_queue = SIL24_MAX_CMDS,
.sg_tablesize = SIL24_MAX_SGE,
.dma_boundary = ATA_DMA_BOUNDARY,
- .tag_alloc_policy = BLK_TAG_ALLOC_FIFO,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
.device_configure = ata_scsi_device_configure
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index cf160dd2c27b..b962da263eee 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -27,9 +27,17 @@ static ssize_t name##_read(struct file *file, struct kobject *kobj, \
loff_t off, size_t count) \
{ \
struct device *dev = kobj_to_dev(kobj); \
+ cpumask_var_t mask; \
+ ssize_t n; \
\
- return cpumap_print_bitmask_to_buf(buf, topology_##mask(dev->id), \
- off, count); \
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL)) \
+ return -ENOMEM; \
+ \
+ cpumask_copy(mask, topology_##mask(dev->id)); \
+ n = cpumap_print_bitmask_to_buf(buf, mask, off, count); \
+ free_cpumask_var(mask); \
+ \
+ return n; \
} \
\
static ssize_t name##_list_read(struct file *file, struct kobject *kobj, \
@@ -37,9 +45,17 @@ static ssize_t name##_list_read(struct file *file, struct kobject *kobj, \
loff_t off, size_t count) \
{ \
struct device *dev = kobj_to_dev(kobj); \
+ cpumask_var_t mask; \
+ ssize_t n; \
+ \
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL)) \
+ return -ENOMEM; \
+ \
+ cpumask_copy(mask, topology_##mask(dev->id)); \
+ n = cpumap_print_list_to_buf(buf, mask, off, count); \
+ free_cpumask_var(mask); \
\
- return cpumap_print_list_to_buf(buf, topology_##mask(dev->id), \
- off, count); \
+ return n; \
}
define_id_show_func(physical_package_id, "%d");
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 49ced65bef4c..9edd4468f755 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1819,7 +1819,6 @@ static int fd_alloc_drive(int drive)
unit[drive].tag_set.nr_maps = 1;
unit[drive].tag_set.queue_depth = 2;
unit[drive].tag_set.numa_node = NUMA_NO_NODE;
- unit[drive].tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
if (blk_mq_alloc_tag_set(&unit[drive].tag_set))
goto out_cleanup_trackbuf;
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 2028795ec61c..00b74a845328 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -368,7 +368,6 @@ aoeblk_gdalloc(void *vp)
set->nr_hw_queues = 1;
set->queue_depth = 128;
set->numa_node = NUMA_NO_NODE;
- set->flags = BLK_MQ_F_SHOULD_MERGE;
err = blk_mq_alloc_tag_set(set);
if (err) {
pr_err("aoe: cannot allocate tag set for %ld.%d\n",
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index 4ba98c6654be..110f9aca2667 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -2088,7 +2088,6 @@ static int __init atari_floppy_init (void)
unit[i].tag_set.nr_maps = 1;
unit[i].tag_set.queue_depth = 2;
unit[i].tag_set.numa_node = NUMA_NO_NODE;
- unit[i].tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
ret = blk_mq_alloc_tag_set(&unit[i].tag_set);
if (ret)
goto err;
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 3affb538b989..abf0486f0d4f 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -4596,7 +4596,6 @@ static int __init do_floppy_init(void)
tag_sets[drive].nr_maps = 1;
tag_sets[drive].queue_depth = 2;
tag_sets[drive].numa_node = NUMA_NO_NODE;
- tag_sets[drive].flags = BLK_MQ_F_SHOULD_MERGE;
err = blk_mq_alloc_tag_set(&tag_sets[drive]);
if (err)
goto out_put_disk;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 8f6761c27c68..1ec7417c7f00 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -68,7 +68,6 @@ struct loop_device {
struct list_head idle_worker_list;
struct rb_root worker_tree;
struct timer_list timer;
- bool use_dio;
bool sysfs_inited;
struct request_queue *lo_queue;
@@ -182,41 +181,44 @@ static bool lo_bdev_can_use_dio(struct loop_device *lo,
return true;
}
-static void __loop_update_dio(struct loop_device *lo, bool dio)
+static bool lo_can_use_dio(struct loop_device *lo)
{
- struct file *file = lo->lo_backing_file;
- struct inode *inode = file->f_mapping->host;
- struct block_device *backing_bdev = NULL;
- bool use_dio;
+ struct inode *inode = lo->lo_backing_file->f_mapping->host;
- if (S_ISBLK(inode->i_mode))
- backing_bdev = I_BDEV(inode);
- else if (inode->i_sb->s_bdev)
- backing_bdev = inode->i_sb->s_bdev;
+ if (!(lo->lo_backing_file->f_mode & FMODE_CAN_ODIRECT))
+ return false;
- use_dio = dio && (file->f_mode & FMODE_CAN_ODIRECT) &&
- (!backing_bdev || lo_bdev_can_use_dio(lo, backing_bdev));
+ if (S_ISBLK(inode->i_mode))
+ return lo_bdev_can_use_dio(lo, I_BDEV(inode));
+ if (inode->i_sb->s_bdev)
+ return lo_bdev_can_use_dio(lo, inode->i_sb->s_bdev);
+ return true;
+}
- if (lo->use_dio == use_dio)
- return;
+/*
+ * Direct I/O can be enabled either by using an O_DIRECT file descriptor, or by
+ * passing in the LO_FLAGS_DIRECT_IO flag from userspace. It will be silently
+ * disabled when the device block size is too small or the offset is unaligned.
+ *
+ * loop_get_status will always report the effective LO_FLAGS_DIRECT_IO flag and
+ * not the originally passed in one.
+ */
+static inline void loop_update_dio(struct loop_device *lo)
+{
+ bool dio_in_use = lo->lo_flags & LO_FLAGS_DIRECT_IO;
- /* flush dirty pages before changing direct IO */
- vfs_fsync(file, 0);
+ lockdep_assert_held(&lo->lo_mutex);
+ WARN_ON_ONCE(lo->lo_state == Lo_bound &&
+ lo->lo_queue->mq_freeze_depth == 0);
- /*
- * The flag of LO_FLAGS_DIRECT_IO is handled similarly with
- * LO_FLAGS_READ_ONLY, both are set from kernel, and losetup
- * will get updated by ioctl(LOOP_GET_STATUS)
- */
- if (lo->lo_state == Lo_bound)
- blk_mq_freeze_queue(lo->lo_queue);
- lo->use_dio = use_dio;
- if (use_dio)
+ if (lo->lo_backing_file->f_flags & O_DIRECT)
lo->lo_flags |= LO_FLAGS_DIRECT_IO;
- else
+ if ((lo->lo_flags & LO_FLAGS_DIRECT_IO) && !lo_can_use_dio(lo))
lo->lo_flags &= ~LO_FLAGS_DIRECT_IO;
- if (lo->lo_state == Lo_bound)
- blk_mq_unfreeze_queue(lo->lo_queue);
+
+ /* flush dirty pages before starting to issue direct I/O */
+ if ((lo->lo_flags & LO_FLAGS_DIRECT_IO) && !dio_in_use)
+ vfs_fsync(lo->lo_backing_file, 0);
}
/**
@@ -311,6 +313,13 @@ static void loop_clear_limits(struct loop_device *lo, int mode)
lim.discard_granularity = 0;
}
+ /*
+ * XXX: this updates the queue limits without freezing the queue, which
+ * is against the locking protocol and dangerous. But we can't just
+ * freeze the queue as we're inside the ->queue_rq method here. So this
+ * should move out into a workqueue unless we get the file operations to
+ * advertise if they support specific fallocate operations.
+ */
queue_limits_commit_update(lo->lo_queue, &lim);
}
@@ -520,12 +529,6 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq)
}
}
-static inline void loop_update_dio(struct loop_device *lo)
-{
- __loop_update_dio(lo, (lo->lo_backing_file->f_flags & O_DIRECT) |
- lo->use_dio);
-}
-
static void loop_reread_partitions(struct loop_device *lo)
{
int rc;
@@ -964,7 +967,6 @@ loop_set_status_from_info(struct loop_device *lo,
memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
lo->lo_file_name[LO_NAME_SIZE-1] = 0;
- lo->lo_flags = info->lo_flags;
return 0;
}
@@ -977,12 +979,12 @@ static unsigned int loop_default_blocksize(struct loop_device *lo,
return SECTOR_SIZE;
}
-static int loop_reconfigure_limits(struct loop_device *lo, unsigned int bsize)
+static void loop_update_limits(struct loop_device *lo, struct queue_limits *lim,
+ unsigned int bsize)
{
struct file *file = lo->lo_backing_file;
struct inode *inode = file->f_mapping->host;
struct block_device *backing_bdev = NULL;
- struct queue_limits lim;
u32 granularity = 0, max_discard_sectors = 0;
if (S_ISBLK(inode->i_mode))
@@ -995,22 +997,20 @@ static int loop_reconfigure_limits(struct loop_device *lo, unsigned int bsize)
loop_get_discard_config(lo, &granularity, &max_discard_sectors);
- lim = queue_limits_start_update(lo->lo_queue);
- lim.logical_block_size = bsize;
- lim.physical_block_size = bsize;
- lim.io_min = bsize;
- lim.features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_ROTATIONAL);
+ lim->logical_block_size = bsize;
+ lim->physical_block_size = bsize;
+ lim->io_min = bsize;
+ lim->features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_ROTATIONAL);
if (file->f_op->fsync && !(lo->lo_flags & LO_FLAGS_READ_ONLY))
- lim.features |= BLK_FEAT_WRITE_CACHE;
+ lim->features |= BLK_FEAT_WRITE_CACHE;
if (backing_bdev && !bdev_nonrot(backing_bdev))
- lim.features |= BLK_FEAT_ROTATIONAL;
- lim.max_hw_discard_sectors = max_discard_sectors;
- lim.max_write_zeroes_sectors = max_discard_sectors;
+ lim->features |= BLK_FEAT_ROTATIONAL;
+ lim->max_hw_discard_sectors = max_discard_sectors;
+ lim->max_write_zeroes_sectors = max_discard_sectors;
if (max_discard_sectors)
- lim.discard_granularity = granularity;
+ lim->discard_granularity = granularity;
else
- lim.discard_granularity = 0;
- return queue_limits_commit_update(lo->lo_queue, &lim);
+ lim->discard_granularity = 0;
}
static int loop_configure(struct loop_device *lo, blk_mode_t mode,
@@ -1019,6 +1019,7 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
{
struct file *file = fget(config->fd);
struct address_space *mapping;
+ struct queue_limits lim;
int error;
loff_t size;
bool partscan;
@@ -1063,6 +1064,7 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
error = loop_set_status_from_info(lo, &config->info);
if (error)
goto out_unlock;
+ lo->lo_flags = config->info.lo_flags;
if (!(file->f_mode & FMODE_WRITE) || !(mode & BLK_OPEN_WRITE) ||
!file->f_op->write_iter)
@@ -1084,13 +1086,15 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
disk_force_media_change(lo->lo_disk);
set_disk_ro(lo->lo_disk, (lo->lo_flags & LO_FLAGS_READ_ONLY) != 0);
- lo->use_dio = lo->lo_flags & LO_FLAGS_DIRECT_IO;
lo->lo_device = bdev;
lo->lo_backing_file = file;
lo->old_gfp_mask = mapping_gfp_mask(mapping);
mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
- error = loop_reconfigure_limits(lo, config->block_size);
+ lim = queue_limits_start_update(lo->lo_queue);
+ loop_update_limits(lo, &lim, config->block_size);
+ /* No need to freeze the queue as the device isn't bound yet. */
+ error = queue_limits_commit_update(lo->lo_queue, &lim);
if (error)
goto out_unlock;
@@ -1150,7 +1154,12 @@ static void __loop_clr_fd(struct loop_device *lo)
lo->lo_sizelimit = 0;
memset(lo->lo_file_name, 0, LO_NAME_SIZE);
- /* reset the block size to the default */
+ /*
+ * Reset the block size to the default.
+ *
+ * No queue freezing needed because this is called from the final
+ * ->release call only, so there can't be any outstanding I/O.
+ */
lim = queue_limits_start_update(lo->lo_queue);
lim.logical_block_size = SECTOR_SIZE;
lim.physical_block_size = SECTOR_SIZE;
@@ -1244,7 +1253,6 @@ static int
loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
{
int err;
- int prev_lo_flags;
bool partscan = false;
bool size_changed = false;
@@ -1263,21 +1271,19 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
invalidate_bdev(lo->lo_device);
}
- /* I/O need to be drained during transfer transition */
+ /* I/O needs to be drained before changing lo_offset or lo_sizelimit */
blk_mq_freeze_queue(lo->lo_queue);
- prev_lo_flags = lo->lo_flags;
-
err = loop_set_status_from_info(lo, info);
if (err)
goto out_unfreeze;
- /* Mask out flags that can't be set using LOOP_SET_STATUS. */
- lo->lo_flags &= LOOP_SET_STATUS_SETTABLE_FLAGS;
- /* For those flags, use the previous values instead */
- lo->lo_flags |= prev_lo_flags & ~LOOP_SET_STATUS_SETTABLE_FLAGS;
- /* For flags that can't be cleared, use previous values too */
- lo->lo_flags |= prev_lo_flags & ~LOOP_SET_STATUS_CLEARABLE_FLAGS;
+ partscan = !(lo->lo_flags & LO_FLAGS_PARTSCAN) &&
+ (info->lo_flags & LO_FLAGS_PARTSCAN);
+
+ lo->lo_flags &= ~(LOOP_SET_STATUS_SETTABLE_FLAGS |
+ LOOP_SET_STATUS_CLEARABLE_FLAGS);
+ lo->lo_flags |= (info->lo_flags & LOOP_SET_STATUS_SETTABLE_FLAGS);
if (size_changed) {
loff_t new_size = get_size(lo->lo_offset, lo->lo_sizelimit,
@@ -1285,17 +1291,13 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
loop_set_size(lo, new_size);
}
- /* update dio if lo_offset or transfer is changed */
- __loop_update_dio(lo, lo->use_dio);
+ /* update the direct I/O flag if lo_offset changed */
+ loop_update_dio(lo);
out_unfreeze:
blk_mq_unfreeze_queue(lo->lo_queue);
-
- if (!err && (lo->lo_flags & LO_FLAGS_PARTSCAN) &&
- !(prev_lo_flags & LO_FLAGS_PARTSCAN)) {
+ if (partscan)
clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
- partscan = true;
- }
out_unlock:
mutex_unlock(&lo->lo_mutex);
if (partscan)
@@ -1444,20 +1446,32 @@ static int loop_set_capacity(struct loop_device *lo)
static int loop_set_dio(struct loop_device *lo, unsigned long arg)
{
- int error = -ENXIO;
- if (lo->lo_state != Lo_bound)
- goto out;
+ bool use_dio = !!arg;
- __loop_update_dio(lo, !!arg);
- if (lo->use_dio == !!arg)
+ if (lo->lo_state != Lo_bound)
+ return -ENXIO;
+ if (use_dio == !!(lo->lo_flags & LO_FLAGS_DIRECT_IO))
return 0;
- error = -EINVAL;
- out:
- return error;
+
+ if (use_dio) {
+ if (!lo_can_use_dio(lo))
+ return -EINVAL;
+ /* flush dirty pages before starting to use direct I/O */
+ vfs_fsync(lo->lo_backing_file, 0);
+ }
+
+ blk_mq_freeze_queue(lo->lo_queue);
+ if (use_dio)
+ lo->lo_flags |= LO_FLAGS_DIRECT_IO;
+ else
+ lo->lo_flags &= ~LO_FLAGS_DIRECT_IO;
+ blk_mq_unfreeze_queue(lo->lo_queue);
+ return 0;
}
static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
{
+ struct queue_limits lim;
int err = 0;
if (lo->lo_state != Lo_bound)
@@ -1469,8 +1483,11 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
sync_blockdev(lo->lo_device);
invalidate_bdev(lo->lo_device);
+ lim = queue_limits_start_update(lo->lo_queue);
+ loop_update_limits(lo, &lim, arg);
+
blk_mq_freeze_queue(lo->lo_queue);
- err = loop_reconfigure_limits(lo, arg);
+ err = queue_limits_commit_update(lo->lo_queue, &lim);
loop_update_dio(lo);
blk_mq_unfreeze_queue(lo->lo_queue);
@@ -1854,7 +1871,7 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
cmd->use_aio = false;
break;
default:
- cmd->use_aio = lo->use_dio;
+ cmd->use_aio = lo->lo_flags & LO_FLAGS_DIRECT_IO;
break;
}
@@ -2023,8 +2040,7 @@ static int loop_add(int i)
lo->tag_set.queue_depth = hw_queue_depth;
lo->tag_set.numa_node = NUMA_NO_NODE;
lo->tag_set.cmd_size = sizeof(struct loop_cmd);
- lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING |
- BLK_MQ_F_NO_SCHED_BY_DEFAULT;
+ lo->tag_set.flags = BLK_MQ_F_STACKING | BLK_MQ_F_NO_SCHED_BY_DEFAULT;
lo->tag_set.driver_data = lo;
err = blk_mq_alloc_tag_set(&lo->tag_set);
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 43701b7b10a7..95361099a2dc 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3416,7 +3416,6 @@ static int mtip_block_initialize(struct driver_data *dd)
dd->tags.reserved_tags = 1;
dd->tags.cmd_size = sizeof(struct mtip_cmd);
dd->tags.numa_node = dd->numa_node;
- dd->tags.flags = BLK_MQ_F_SHOULD_MERGE;
dd->tags.driver_data = dd;
dd->tags.timeout = MTIP_NCQ_CMD_TIMEOUT_MS;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index b852050d8a96..b63a0f29a54a 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -62,6 +62,7 @@ struct nbd_sock {
bool dead;
int fallback_index;
int cookie;
+ struct work_struct work;
};
struct recv_thread_args {
@@ -141,6 +142,9 @@ struct nbd_device {
*/
#define NBD_CMD_INFLIGHT 2
+/* Just part of request header or data payload is sent successfully */
+#define NBD_CMD_PARTIAL_SEND 3
+
struct nbd_cmd {
struct nbd_device *nbd;
struct mutex lock;
@@ -327,8 +331,7 @@ static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock,
nsock->sent = 0;
}
-static int __nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
- loff_t blksize)
+static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize, loff_t blksize)
{
struct queue_limits lim;
int error;
@@ -368,7 +371,7 @@ static int __nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
lim.logical_block_size = blksize;
lim.physical_block_size = blksize;
- error = queue_limits_commit_update(nbd->disk->queue, &lim);
+ error = queue_limits_commit_update_frozen(nbd->disk->queue, &lim);
if (error)
return error;
@@ -379,18 +382,6 @@ static int __nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
return 0;
}
-static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
- loff_t blksize)
-{
- int error;
-
- blk_mq_freeze_queue(nbd->disk->queue);
- error = __nbd_set_size(nbd, bytesize, blksize);
- blk_mq_unfreeze_queue(nbd->disk->queue);
-
- return error;
-}
-
static void nbd_complete_rq(struct request *req)
{
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
@@ -466,6 +457,12 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req)
if (!mutex_trylock(&cmd->lock))
return BLK_EH_RESET_TIMER;
+ /* partial send is handled in nbd_sock's work function */
+ if (test_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags)) {
+ mutex_unlock(&cmd->lock);
+ return BLK_EH_RESET_TIMER;
+ }
+
if (!test_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
mutex_unlock(&cmd->lock);
return BLK_EH_DONE;
@@ -615,6 +612,30 @@ static inline int was_interrupted(int result)
}
/*
+ * We've already sent header or part of data payload, have no choice but
+ * to set pending and schedule it in work.
+ *
+ * And we have to return BLK_STS_OK to block core, otherwise this same
+ * request may be re-dispatched with different tag, but our header has
+ * been sent out with old tag, and this way does confuse reply handling.
+ */
+static void nbd_sched_pending_work(struct nbd_device *nbd,
+ struct nbd_sock *nsock,
+ struct nbd_cmd *cmd, int sent)
+{
+ struct request *req = blk_mq_rq_from_pdu(cmd);
+
+ /* pending work should be scheduled only once */
+ WARN_ON_ONCE(test_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags));
+
+ nsock->pending = req;
+ nsock->sent = sent;
+ set_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags);
+ refcount_inc(&nbd->config_refs);
+ schedule_work(&nsock->work);
+}
+
+/*
* Returns BLK_STS_RESOURCE if the caller should retry after a delay.
* Returns BLK_STS_IOERR if sending failed.
*/
@@ -699,8 +720,8 @@ static blk_status_t nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd,
* completely done.
*/
if (sent) {
- nsock->pending = req;
- nsock->sent = sent;
+ nbd_sched_pending_work(nbd, nsock, cmd, sent);
+ return BLK_STS_OK;
}
set_bit(NBD_CMD_REQUEUED, &cmd->flags);
return BLK_STS_RESOURCE;
@@ -737,14 +758,8 @@ send_pages:
result = sock_xmit(nbd, index, 1, &from, flags, &sent);
if (result < 0) {
if (was_interrupted(result)) {
- /* We've already sent the header, we
- * have no choice but to set pending and
- * return BUSY.
- */
- nsock->pending = req;
- nsock->sent = sent;
- set_bit(NBD_CMD_REQUEUED, &cmd->flags);
- return BLK_STS_RESOURCE;
+ nbd_sched_pending_work(nbd, nsock, cmd, sent);
+ return BLK_STS_OK;
}
dev_err(disk_to_dev(nbd->disk),
"Send data failed (result %d)\n",
@@ -770,6 +785,14 @@ out:
return BLK_STS_OK;
requeue:
+ /*
+ * Can't requeue in case we are dealing with partial send
+ *
+ * We must run from pending work function.
+ * */
+ if (test_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags))
+ return BLK_STS_OK;
+
/* retry on a different socket */
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Request send failed, requeueing\n");
@@ -778,6 +801,44 @@ requeue:
return BLK_STS_OK;
}
+/* handle partial sending */
+static void nbd_pending_cmd_work(struct work_struct *work)
+{
+ struct nbd_sock *nsock = container_of(work, struct nbd_sock, work);
+ struct request *req = nsock->pending;
+ struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
+ struct nbd_device *nbd = cmd->nbd;
+ unsigned long deadline = READ_ONCE(req->deadline);
+ unsigned int wait_ms = 2;
+
+ mutex_lock(&cmd->lock);
+
+ WARN_ON_ONCE(test_bit(NBD_CMD_REQUEUED, &cmd->flags));
+ if (WARN_ON_ONCE(!test_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags)))
+ goto out;
+
+ mutex_lock(&nsock->tx_lock);
+ while (true) {
+ nbd_send_cmd(nbd, cmd, cmd->index);
+ if (!nsock->pending)
+ break;
+
+ /* don't bother timeout handler for partial sending */
+ if (READ_ONCE(jiffies) + msecs_to_jiffies(wait_ms) >= deadline) {
+ cmd->status = BLK_STS_IOERR;
+ blk_mq_complete_request(req);
+ break;
+ }
+ msleep(wait_ms);
+ wait_ms *= 2;
+ }
+ mutex_unlock(&nsock->tx_lock);
+ clear_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags);
+out:
+ mutex_unlock(&cmd->lock);
+ nbd_config_put(nbd);
+}
+
static int nbd_read_reply(struct nbd_device *nbd, struct socket *sock,
struct nbd_reply *reply)
{
@@ -1224,6 +1285,7 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
nsock->pending = NULL;
nsock->sent = 0;
nsock->cookie = 0;
+ INIT_WORK(&nsock->work, nbd_pending_cmd_work);
socks[config->num_connections++] = nsock;
atomic_inc(&config->live_connections);
blk_mq_unfreeze_queue(nbd->disk->queue);
@@ -1841,8 +1903,7 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
nbd->tag_set.queue_depth = 128;
nbd->tag_set.numa_node = NUMA_NO_NODE;
nbd->tag_set.cmd_size = sizeof(struct nbd_cmd);
- nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
- BLK_MQ_F_BLOCKING;
+ nbd->tag_set.flags = BLK_MQ_F_BLOCKING;
nbd->tag_set.driver_data = nbd;
INIT_WORK(&nbd->remove_work, nbd_dev_remove_work);
nbd->backend = NULL;
@@ -2180,6 +2241,7 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd)
flush_workqueue(nbd->recv_workq);
nbd_clear_que(nbd);
nbd->task_setup = NULL;
+ clear_bit(NBD_RT_BOUND, &nbd->config->runtime_flags);
mutex_unlock(&nbd->config_lock);
if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 3c3d8d200abb..d94ef37480bd 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -266,6 +266,10 @@ static bool g_zone_full;
module_param_named(zone_full, g_zone_full, bool, S_IRUGO);
MODULE_PARM_DESC(zone_full, "Initialize the sequential write required zones of a zoned device to be full. Default: false");
+static bool g_rotational;
+module_param_named(rotational, g_rotational, bool, S_IRUGO);
+MODULE_PARM_DESC(rotational, "Set the rotational feature for the device. Default: false");
+
static struct nullb_device *null_alloc_dev(void);
static void null_free_dev(struct nullb_device *dev);
static void null_del_dev(struct nullb *nullb);
@@ -468,6 +472,7 @@ NULLB_DEVICE_ATTR(no_sched, bool, NULL);
NULLB_DEVICE_ATTR(shared_tags, bool, NULL);
NULLB_DEVICE_ATTR(shared_tag_bitmap, bool, NULL);
NULLB_DEVICE_ATTR(fua, bool, NULL);
+NULLB_DEVICE_ATTR(rotational, bool, NULL);
static ssize_t nullb_device_power_show(struct config_item *item, char *page)
{
@@ -621,6 +626,7 @@ static struct configfs_attribute *nullb_device_attrs[] = {
&nullb_device_attr_shared_tags,
&nullb_device_attr_shared_tag_bitmap,
&nullb_device_attr_fua,
+ &nullb_device_attr_rotational,
NULL,
};
@@ -706,7 +712,8 @@ static ssize_t memb_group_features_show(struct config_item *item, char *page)
"shared_tags,size,submit_queues,use_per_node_hctx,"
"virt_boundary,zoned,zone_capacity,zone_max_active,"
"zone_max_open,zone_nr_conv,zone_offline,zone_readonly,"
- "zone_size,zone_append_max_sectors,zone_full\n");
+ "zone_size,zone_append_max_sectors,zone_full,"
+ "rotational\n");
}
CONFIGFS_ATTR_RO(memb_group_, features);
@@ -793,6 +800,7 @@ static struct nullb_device *null_alloc_dev(void)
dev->shared_tags = g_shared_tags;
dev->shared_tag_bitmap = g_shared_tag_bitmap;
dev->fua = g_fua;
+ dev->rotational = g_rotational;
return dev;
}
@@ -899,7 +907,7 @@ static struct nullb_page *null_radix_tree_insert(struct nullb *nullb, u64 idx,
if (radix_tree_insert(root, idx, t_page)) {
null_free_page(t_page);
t_page = radix_tree_lookup(root, idx);
- WARN_ON(!t_page || t_page->page->index != idx);
+ WARN_ON(!t_page || t_page->page->private != idx);
} else if (is_cache)
nullb->dev->curr_cache += PAGE_SIZE;
@@ -922,7 +930,7 @@ static void null_free_device_storage(struct nullb_device *dev, bool is_cache)
(void **)t_pages, pos, FREE_BATCH);
for (i = 0; i < nr_pages; i++) {
- pos = t_pages[i]->page->index;
+ pos = t_pages[i]->page->private;
ret = radix_tree_delete_item(root, pos, t_pages[i]);
WARN_ON(ret != t_pages[i]);
null_free_page(ret);
@@ -948,7 +956,7 @@ static struct nullb_page *__null_lookup_page(struct nullb *nullb,
root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
t_page = radix_tree_lookup(root, idx);
- WARN_ON(t_page && t_page->page->index != idx);
+ WARN_ON(t_page && t_page->page->private != idx);
if (t_page && (for_write || test_bit(sector_bit, t_page->bitmap)))
return t_page;
@@ -991,7 +999,7 @@ static struct nullb_page *null_insert_page(struct nullb *nullb,
spin_lock_irq(&nullb->lock);
idx = sector >> PAGE_SECTORS_SHIFT;
- t_page->page->index = idx;
+ t_page->page->private = idx;
t_page = null_radix_tree_insert(nullb, idx, t_page, !ignore_cache);
radix_tree_preload_end();
@@ -1011,7 +1019,7 @@ static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
struct nullb_page *t_page, *ret;
void *dst, *src;
- idx = c_page->page->index;
+ idx = c_page->page->private;
t_page = null_insert_page(nullb, idx << PAGE_SECTORS_SHIFT, true);
@@ -1070,7 +1078,7 @@ again:
* avoid race, we don't allow page free
*/
for (i = 0; i < nr_pages; i++) {
- nullb->cache_flush_pos = c_pages[i]->page->index;
+ nullb->cache_flush_pos = c_pages[i]->page->private;
/*
* We found the page which is being flushed to disk by other
* threads
@@ -1783,9 +1791,8 @@ static int null_init_global_tag_set(void)
tag_set.nr_hw_queues = g_submit_queues;
tag_set.queue_depth = g_hw_queue_depth;
tag_set.numa_node = g_home_node;
- tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
if (g_no_sched)
- tag_set.flags |= BLK_MQ_F_NO_SCHED;
+ tag_set.flags |= BLK_MQ_F_NO_SCHED_BY_DEFAULT;
if (g_shared_tag_bitmap)
tag_set.flags |= BLK_MQ_F_TAG_HCTX_SHARED;
if (g_blocking)
@@ -1809,9 +1816,8 @@ static int null_setup_tagset(struct nullb *nullb)
nullb->tag_set->nr_hw_queues = nullb->dev->submit_queues;
nullb->tag_set->queue_depth = nullb->dev->hw_queue_depth;
nullb->tag_set->numa_node = nullb->dev->home_node;
- nullb->tag_set->flags = BLK_MQ_F_SHOULD_MERGE;
if (nullb->dev->no_sched)
- nullb->tag_set->flags |= BLK_MQ_F_NO_SCHED;
+ nullb->tag_set->flags |= BLK_MQ_F_NO_SCHED_BY_DEFAULT;
if (nullb->dev->shared_tag_bitmap)
nullb->tag_set->flags |= BLK_MQ_F_TAG_HCTX_SHARED;
if (nullb->dev->blocking)
@@ -1938,6 +1944,9 @@ static int null_add_dev(struct nullb_device *dev)
lim.features |= BLK_FEAT_FUA;
}
+ if (dev->rotational)
+ lim.features |= BLK_FEAT_ROTATIONAL;
+
nullb->disk = blk_mq_alloc_disk(nullb->tag_set, &lim, nullb);
if (IS_ERR(nullb->disk)) {
rv = PTR_ERR(nullb->disk);
diff --git a/drivers/block/null_blk/null_blk.h b/drivers/block/null_blk/null_blk.h
index a7bb32f73ec3..6f9fe6171087 100644
--- a/drivers/block/null_blk/null_blk.h
+++ b/drivers/block/null_blk/null_blk.h
@@ -107,6 +107,7 @@ struct nullb_device {
bool shared_tags; /* share tag set between devices for blk-mq */
bool shared_tag_bitmap; /* use hostwide shared tags */
bool fua; /* Support FUA */
+ bool rotational; /* Fake rotational device */
};
struct nullb {
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index ff45ed766469..dc9e4a14b885 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -384,9 +384,9 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
unsigned int devidx;
struct queue_limits lim = {
.logical_block_size = dev->blk_size,
- .max_hw_sectors = dev->bounce_size >> 9,
+ .max_hw_sectors = BOUNCE_SIZE >> 9,
.max_segments = -1,
- .max_segment_size = dev->bounce_size,
+ .max_segment_size = BOUNCE_SIZE,
.dma_alignment = dev->blk_size - 1,
.features = BLK_FEAT_WRITE_CACHE |
BLK_FEAT_ROTATIONAL,
@@ -434,8 +434,7 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
ps3disk_identify(dev);
- error = blk_mq_alloc_sq_tag_set(&priv->tag_set, &ps3disk_mq_ops, 1,
- BLK_MQ_F_SHOULD_MERGE);
+ error = blk_mq_alloc_sq_tag_set(&priv->tag_set, &ps3disk_mq_ops, 1, 0);
if (error)
goto fail_teardown;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index ac421dbeeb11..5b393e4a1ddf 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -4964,7 +4964,6 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
rbd_dev->tag_set.ops = &rbd_mq_ops;
rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
- rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
rbd_dev->tag_set.nr_hw_queues = num_present_cpus();
rbd_dev->tag_set.cmd_size = sizeof(struct rbd_img_request);
diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
index c34695d2eea7..82467ecde7ec 100644
--- a/drivers/block/rnbd/rnbd-clt.c
+++ b/drivers/block/rnbd/rnbd-clt.c
@@ -1209,8 +1209,7 @@ static int setup_mq_tags(struct rnbd_clt_session *sess)
tag_set->ops = &rnbd_mq_ops;
tag_set->queue_depth = sess->queue_depth;
tag_set->numa_node = NUMA_NO_NODE;
- tag_set->flags = BLK_MQ_F_SHOULD_MERGE |
- BLK_MQ_F_TAG_QUEUE_SHARED;
+ tag_set->flags = BLK_MQ_F_TAG_QUEUE_SHARED;
tag_set->cmd_size = sizeof(struct rnbd_iu) + RNBD_RDMA_SGL_SIZE;
/* for HCTX_TYPE_DEFAULT, HCTX_TYPE_READ, HCTX_TYPE_POLL */
diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
index 08ce6d96d04c..2ee6e9bd4e28 100644
--- a/drivers/block/rnbd/rnbd-srv.c
+++ b/drivers/block/rnbd/rnbd-srv.c
@@ -167,7 +167,7 @@ static int process_rdma(struct rnbd_srv_session *srv_sess,
bio->bi_iter.bi_sector = le64_to_cpu(msg->sector);
prio = srv_sess->ver < RNBD_PROTO_VER_MAJOR ||
usrlen < sizeof(*msg) ? 0 : le16_to_cpu(msg->prio);
- bio_set_prio(bio, prio);
+ bio->bi_ioprio = prio;
submit_bio(bio);
diff --git a/drivers/block/rnull.rs b/drivers/block/rnull.rs
index 9cca05dcf772..ddf3629d8894 100644
--- a/drivers/block/rnull.rs
+++ b/drivers/block/rnull.rs
@@ -32,25 +32,31 @@ module! {
license: "GPL v2",
}
+#[pin_data]
struct NullBlkModule {
- _disk: Pin<KBox<Mutex<GenDisk<NullBlkDevice>>>>,
+ #[pin]
+ _disk: Mutex<GenDisk<NullBlkDevice>>,
}
-impl kernel::Module for NullBlkModule {
- fn init(_module: &'static ThisModule) -> Result<Self> {
+impl kernel::InPlaceModule for NullBlkModule {
+ fn init(_module: &'static ThisModule) -> impl PinInit<Self, Error> {
pr_info!("Rust null_blk loaded\n");
- let tagset = Arc::pin_init(TagSet::new(1, 256, 1), flags::GFP_KERNEL)?;
- let disk = gen_disk::GenDiskBuilder::new()
- .capacity_sectors(4096 << 11)
- .logical_block_size(4096)?
- .physical_block_size(4096)?
- .rotational(false)
- .build(format_args!("rnullb{}", 0), tagset)?;
+ // Use a immediately-called closure as a stable `try` block
+ let disk = /* try */ (|| {
+ let tagset = Arc::pin_init(TagSet::new(1, 256, 1), flags::GFP_KERNEL)?;
- let disk = KBox::pin_init(new_mutex!(disk, "nullb:disk"), flags::GFP_KERNEL)?;
+ gen_disk::GenDiskBuilder::new()
+ .capacity_sectors(4096 << 11)
+ .logical_block_size(4096)?
+ .physical_block_size(4096)?
+ .rotational(false)
+ .build(format_args!("rnullb{}", 0), tagset)
+ })();
- Ok(Self { _disk: disk })
+ try_pin_init!(Self {
+ _disk <- new_mutex!(disk?, "nullb:disk"),
+ })
}
}
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 2d38331ee667..88dcae6ec575 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -829,7 +829,7 @@ static int probe_disk(struct vdc_port *port)
}
err = blk_mq_alloc_sq_tag_set(&port->tag_set, &vdc_mq_ops,
- VDC_TX_RING_SIZE, BLK_MQ_F_SHOULD_MERGE);
+ VDC_TX_RING_SIZE, 0);
if (err)
return err;
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index be4ac58afe41..eda33c5eb5e2 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -818,7 +818,7 @@ static int swim_floppy_init(struct swim_priv *swd)
for (drive = 0; drive < swd->floppy_count; drive++) {
err = blk_mq_alloc_sq_tag_set(&swd->unit[drive].tag_set,
- &swim_mq_ops, 2, BLK_MQ_F_SHOULD_MERGE);
+ &swim_mq_ops, 2, 0);
if (err)
goto exit_put_disks;
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 90be1017f7bf..9914153b365b 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -1208,8 +1208,7 @@ static int swim3_attach(struct macio_dev *mdev,
fs = &floppy_states[floppy_count];
memset(fs, 0, sizeof(*fs));
- rc = blk_mq_alloc_sq_tag_set(&fs->tag_set, &swim3_mq_ops, 2,
- BLK_MQ_F_SHOULD_MERGE);
+ rc = blk_mq_alloc_sq_tag_set(&fs->tag_set, &swim3_mq_ops, 2, 0);
if (rc)
goto out_unregister;
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 934ab9332c80..529085181f35 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -2213,7 +2213,6 @@ static int ublk_add_tag_set(struct ublk_device *ub)
ub->tag_set.queue_depth = ub->dev_info.queue_depth;
ub->tag_set.numa_node = NUMA_NO_NODE;
ub->tag_set.cmd_size = sizeof(struct ublk_rq_data);
- ub->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
ub->tag_set.driver_data = ub;
return blk_mq_alloc_tag_set(&ub->tag_set);
}
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 3efe378f1386..bbaa26b523b8 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -13,7 +13,6 @@
#include <linux/string_helpers.h>
#include <linux/idr.h>
#include <linux/blk-mq.h>
-#include <linux/blk-mq-virtio.h>
#include <linux/numa.h>
#include <linux/vmalloc.h>
#include <uapi/linux/virtio_ring.h>
@@ -1106,9 +1105,7 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
lim.features |= BLK_FEAT_WRITE_CACHE;
else
lim.features &= ~BLK_FEAT_WRITE_CACHE;
- blk_mq_freeze_queue(disk->queue);
- i = queue_limits_commit_update(disk->queue, &lim);
- blk_mq_unfreeze_queue(disk->queue);
+ i = queue_limits_commit_update_frozen(disk->queue, &lim);
if (i)
return i;
return count;
@@ -1181,7 +1178,8 @@ static void virtblk_map_queues(struct blk_mq_tag_set *set)
if (i == HCTX_TYPE_POLL)
blk_mq_map_queues(&set->map[i]);
else
- blk_mq_virtio_map_queues(&set->map[i], vblk->vdev, 0);
+ blk_mq_map_hw_queues(&set->map[i],
+ &vblk->vdev->dev, 0);
}
}
@@ -1481,7 +1479,6 @@ static int virtblk_probe(struct virtio_device *vdev)
vblk->tag_set.ops = &virtio_mq_ops;
vblk->tag_set.queue_depth = queue_depth;
vblk->tag_set.numa_node = NUMA_NO_NODE;
- vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
vblk->tag_set.cmd_size =
sizeof(struct virtblk_req) +
sizeof(struct scatterlist) * VIRTIO_BLK_INLINE_SG_CNT;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 59ce113b882a..edcd08a9dcef 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1131,7 +1131,6 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
} else
info->tag_set.queue_depth = BLK_RING_SIZE(info);
info->tag_set.numa_node = NUMA_NO_NODE;
- info->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
info->tag_set.cmd_size = sizeof(struct blkif_req);
info->tag_set.driver_data = info;
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index 4b7219be1bb8..8c1c7f4211eb 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -354,7 +354,6 @@ static int __init z2_init(void)
tag_set.nr_maps = 1;
tag_set.queue_depth = 16;
tag_set.numa_node = NUMA_NO_NODE;
- tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
ret = blk_mq_alloc_tag_set(&tag_set);
if (ret)
goto out_unregister_blkdev;
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 45df5eeabc5e..7903a4da40ac 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -1468,6 +1468,7 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize)
zram->mem_pool = zs_create_pool(zram->disk->disk_name);
if (!zram->mem_pool) {
vfree(zram->table);
+ zram->table = NULL;
return false;
}
diff --git a/drivers/bluetooth/btmtk.c b/drivers/bluetooth/btmtk.c
index 7fd9d5ddce02..224eafc27dbe 100644
--- a/drivers/bluetooth/btmtk.c
+++ b/drivers/bluetooth/btmtk.c
@@ -1472,10 +1472,15 @@ EXPORT_SYMBOL_GPL(btmtk_usb_setup);
int btmtk_usb_shutdown(struct hci_dev *hdev)
{
+ struct btmtk_data *data = hci_get_priv(hdev);
struct btmtk_hci_wmt_params wmt_params;
u8 param = 0;
int err;
+ err = usb_autopm_get_interface(data->intf);
+ if (err < 0)
+ return err;
+
/* Disable the device */
wmt_params.op = BTMTK_WMT_FUNC_CTRL;
wmt_params.flag = 0;
@@ -1486,9 +1491,11 @@ int btmtk_usb_shutdown(struct hci_dev *hdev)
err = btmtk_usb_hci_wmt_sync(hdev, &wmt_params);
if (err < 0) {
bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
+ usb_autopm_put_interface(data->intf);
return err;
}
+ usb_autopm_put_interface(data->intf);
return 0;
}
EXPORT_SYMBOL_GPL(btmtk_usb_shutdown);
diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c
index 569f5b7d6e46..1230045d78a5 100644
--- a/drivers/bluetooth/btnxpuart.c
+++ b/drivers/bluetooth/btnxpuart.c
@@ -1381,6 +1381,7 @@ static void btnxpuart_tx_work(struct work_struct *work)
while ((skb = nxp_dequeue(nxpdev))) {
len = serdev_device_write_buf(serdev, skb->data, skb->len);
+ serdev_device_wait_until_sent(serdev, 0);
hdev->stat.byte_tx += len;
skb_pull(skb, len);
diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c
index 07645ce2119a..56ba4192c89c 100644
--- a/drivers/bus/mhi/host/pci_generic.c
+++ b/drivers/bus/mhi/host/pci_generic.c
@@ -917,7 +917,7 @@ static int mhi_pci_claim(struct mhi_controller *mhi_cntrl,
return err;
}
- mhi_cntrl->regs = pcim_iomap_region(pdev, 1 << bar_num, pci_name(pdev));
+ mhi_cntrl->regs = pcim_iomap_region(pdev, bar_num, pci_name(pdev));
if (IS_ERR(mhi_cntrl->regs)) {
err = PTR_ERR(mhi_cntrl->regs);
dev_err(&pdev->dev, "failed to map pci region: %d\n", err);
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 6a99a459b80b..51745ed1bbab 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -1106,7 +1106,7 @@ int open_for_data(struct cdrom_device_info *cdi)
}
}
- cd_dbg(CD_OPEN, "all seems well, opening the devicen");
+ cd_dbg(CD_OPEN, "all seems well, opening the device\n");
/* all seems well, we can open the device */
ret = cdo->open(cdi, 0); /* open for data */
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 64b097e830d4..85aceab5eac6 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -777,7 +777,7 @@ static int probe_gdrom(struct platform_device *devptr)
probe_gdrom_setupcd();
err = blk_mq_alloc_sq_tag_set(&gd.tag_set, &gdrom_mq_ops, 1,
- BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING);
+ BLK_MQ_F_BLOCKING);
if (err)
goto probe_fail_free_cd_info;
diff --git a/drivers/clk/imx/clk-imx8mp-audiomix.c b/drivers/clk/imx/clk-imx8mp-audiomix.c
index b2cb157703c5..c409fc7e0618 100644
--- a/drivers/clk/imx/clk-imx8mp-audiomix.c
+++ b/drivers/clk/imx/clk-imx8mp-audiomix.c
@@ -278,7 +278,8 @@ static int clk_imx8mp_audiomix_reset_controller_register(struct device *dev,
#else /* !CONFIG_RESET_CONTROLLER */
-static int clk_imx8mp_audiomix_reset_controller_register(struct clk_imx8mp_audiomix_priv *priv)
+static int clk_imx8mp_audiomix_reset_controller_register(struct device *dev,
+ struct clk_imx8mp_audiomix_priv *priv)
{
return 0;
}
diff --git a/drivers/clk/thead/clk-th1520-ap.c b/drivers/clk/thead/clk-th1520-ap.c
index 17e32ae08720..1015fab95251 100644
--- a/drivers/clk/thead/clk-th1520-ap.c
+++ b/drivers/clk/thead/clk-th1520-ap.c
@@ -779,6 +779,13 @@ static struct ccu_div dpu1_clk = {
},
};
+static CLK_FIXED_FACTOR_HW(emmc_sdio_ref_clk, "emmc-sdio-ref",
+ &video_pll_clk.common.hw, 4, 1, 0);
+
+static const struct clk_parent_data emmc_sdio_ref_clk_pd[] = {
+ { .hw = &emmc_sdio_ref_clk.hw },
+};
+
static CCU_GATE(CLK_BROM, brom_clk, "brom", ahb2_cpusys_hclk_pd, 0x100, BIT(4), 0);
static CCU_GATE(CLK_BMU, bmu_clk, "bmu", axi4_cpusys2_aclk_pd, 0x100, BIT(5), 0);
static CCU_GATE(CLK_AON2CPU_A2X, aon2cpu_a2x_clk, "aon2cpu-a2x", axi4_cpusys2_aclk_pd,
@@ -798,7 +805,7 @@ static CCU_GATE(CLK_PERISYS_APB4_HCLK, perisys_apb4_hclk, "perisys-apb4-hclk", p
0x150, BIT(12), 0);
static CCU_GATE(CLK_NPU_AXI, npu_axi_clk, "npu-axi", axi_aclk_pd, 0x1c8, BIT(5), 0);
static CCU_GATE(CLK_CPU2VP, cpu2vp_clk, "cpu2vp", axi_aclk_pd, 0x1e0, BIT(13), 0);
-static CCU_GATE(CLK_EMMC_SDIO, emmc_sdio_clk, "emmc-sdio", video_pll_clk_pd, 0x204, BIT(30), 0);
+static CCU_GATE(CLK_EMMC_SDIO, emmc_sdio_clk, "emmc-sdio", emmc_sdio_ref_clk_pd, 0x204, BIT(30), 0);
static CCU_GATE(CLK_GMAC1, gmac1_clk, "gmac1", gmac_pll_clk_pd, 0x204, BIT(26), 0);
static CCU_GATE(CLK_PADCTRL1, padctrl1_clk, "padctrl1", perisys_apb_pclk_pd, 0x204, BIT(24), 0);
static CCU_GATE(CLK_DSMART, dsmart_clk, "dsmart", perisys_apb_pclk_pd, 0x204, BIT(23), 0);
@@ -1059,6 +1066,10 @@ static int th1520_clk_probe(struct platform_device *pdev)
return ret;
priv->hws[CLK_PLL_GMAC_100M] = &gmac_pll_clk_100m.hw;
+ ret = devm_clk_hw_register(dev, &emmc_sdio_ref_clk.hw);
+ if (ret)
+ return ret;
+
ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, priv);
if (ret)
return ret;
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 92a83a9bb2e1..26e98fea991a 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -325,8 +325,6 @@ config QORIQ_CPUFREQ
This adds the CPUFreq driver support for Freescale QorIQ SoCs
which are capable of changing the CPU's frequency dynamically.
-endif
-
config ACPI_CPPC_CPUFREQ
tristate "CPUFreq driver based on the ACPI CPPC spec"
depends on ACPI_PROCESSOR
@@ -355,4 +353,6 @@ config ACPI_CPPC_CPUFREQ_FIE
If in doubt, say N.
+endif
+
endmenu
diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c
index 14462c092039..0c92a628bbd4 100644
--- a/drivers/cpuidle/cpuidle-riscv-sbi.c
+++ b/drivers/cpuidle/cpuidle-riscv-sbi.c
@@ -504,12 +504,12 @@ static int sbi_cpuidle_probe(struct platform_device *pdev)
int cpu, ret;
struct cpuidle_driver *drv;
struct cpuidle_device *dev;
- struct device_node *np, *pds_node;
+ struct device_node *pds_node;
/* Detect OSI support based on CPU DT nodes */
sbi_cpuidle_use_osi = true;
for_each_possible_cpu(cpu) {
- np = of_cpu_device_node_get(cpu);
+ struct device_node *np __free(device_node) = of_cpu_device_node_get(cpu);
if (np &&
of_property_present(np, "power-domains") &&
of_property_present(np, "power-domain-names")) {
diff --git a/drivers/cpuidle/governors/teo.c b/drivers/cpuidle/governors/teo.c
index f2992f92d8db..173ddcac540a 100644
--- a/drivers/cpuidle/governors/teo.c
+++ b/drivers/cpuidle/governors/teo.c
@@ -10,25 +10,27 @@
* DOC: teo-description
*
* The idea of this governor is based on the observation that on many systems
- * timer events are two or more orders of magnitude more frequent than any
- * other interrupts, so they are likely to be the most significant cause of CPU
- * wakeups from idle states. Moreover, information about what happened in the
- * (relatively recent) past can be used to estimate whether or not the deepest
- * idle state with target residency within the (known) time till the closest
- * timer event, referred to as the sleep length, is likely to be suitable for
- * the upcoming CPU idle period and, if not, then which of the shallower idle
- * states to choose instead of it.
+ * timer interrupts are two or more orders of magnitude more frequent than any
+ * other interrupt types, so they are likely to dominate CPU wakeup patterns.
+ * Moreover, in principle, the time when the next timer event is going to occur
+ * can be determined at the idle state selection time, although doing that may
+ * be costly, so it can be regarded as the most reliable source of information
+ * for idle state selection.
*
- * Of course, non-timer wakeup sources are more important in some use cases
- * which can be covered by taking a few most recent idle time intervals of the
- * CPU into account. However, even in that context it is not necessary to
- * consider idle duration values greater than the sleep length, because the
- * closest timer will ultimately wake up the CPU anyway unless it is woken up
- * earlier.
+ * Of course, non-timer wakeup sources are more important in some use cases,
+ * but even then it is generally unnecessary to consider idle duration values
+ * greater than the time time till the next timer event, referred as the sleep
+ * length in what follows, because the closest timer will ultimately wake up the
+ * CPU anyway unless it is woken up earlier.
*
- * Thus this governor estimates whether or not the prospective idle duration of
- * a CPU is likely to be significantly shorter than the sleep length and selects
- * an idle state for it accordingly.
+ * However, since obtaining the sleep length may be costly, the governor first
+ * checks if it can select a shallow idle state using wakeup pattern information
+ * from recent times, in which case it can do without knowing the sleep length
+ * at all. For this purpose, it counts CPU wakeup events and looks for an idle
+ * state whose target residency has not exceeded the idle duration (measured
+ * after wakeup) in the majority of relevant recent cases. If the target
+ * residency of that state is small enough, it may be used right away and the
+ * sleep length need not be determined.
*
* The computations carried out by this governor are based on using bins whose
* boundaries are aligned with the target residency parameter values of the CPU
@@ -39,7 +41,11 @@
* idle state 2, the third bin spans from the target residency of idle state 2
* up to, but not including, the target residency of idle state 3 and so on.
* The last bin spans from the target residency of the deepest idle state
- * supplied by the driver to infinity.
+ * supplied by the driver to the scheduler tick period length or to infinity if
+ * the tick period length is less than the target residency of that state. In
+ * the latter case, the governor also counts events with the measured idle
+ * duration between the tick period length and the target residency of the
+ * deepest idle state.
*
* Two metrics called "hits" and "intercepts" are associated with each bin.
* They are updated every time before selecting an idle state for the given CPU
@@ -49,47 +55,46 @@
* sleep length and the idle duration measured after CPU wakeup fall into the
* same bin (that is, the CPU appears to wake up "on time" relative to the sleep
* length). In turn, the "intercepts" metric reflects the relative frequency of
- * situations in which the measured idle duration is so much shorter than the
- * sleep length that the bin it falls into corresponds to an idle state
- * shallower than the one whose bin is fallen into by the sleep length (these
- * situations are referred to as "intercepts" below).
+ * non-timer wakeup events for which the measured idle duration falls into a bin
+ * that corresponds to an idle state shallower than the one whose bin is fallen
+ * into by the sleep length (these events are also referred to as "intercepts"
+ * below).
*
* In order to select an idle state for a CPU, the governor takes the following
* steps (modulo the possible latency constraint that must be taken into account
* too):
*
- * 1. Find the deepest CPU idle state whose target residency does not exceed
- * the current sleep length (the candidate idle state) and compute 2 sums as
- * follows:
+ * 1. Find the deepest enabled CPU idle state (the candidate idle state) and
+ * compute 2 sums as follows:
*
- * - The sum of the "hits" and "intercepts" metrics for the candidate state
- * and all of the deeper idle states (it represents the cases in which the
- * CPU was idle long enough to avoid being intercepted if the sleep length
- * had been equal to the current one).
+ * - The sum of the "hits" metric for all of the idle states shallower than
+ * the candidate one (it represents the cases in which the CPU was likely
+ * woken up by a timer).
*
- * - The sum of the "intercepts" metrics for all of the idle states shallower
- * than the candidate one (it represents the cases in which the CPU was not
- * idle long enough to avoid being intercepted if the sleep length had been
- * equal to the current one).
+ * - The sum of the "intercepts" metric for all of the idle states shallower
+ * than the candidate one (it represents the cases in which the CPU was
+ * likely woken up by a non-timer wakeup source).
*
- * 2. If the second sum is greater than the first one the CPU is likely to wake
- * up early, so look for an alternative idle state to select.
+ * 2. If the second sum computed in step 1 is greater than a half of the sum of
+ * both metrics for the candidate state bin and all subsequent bins(if any),
+ * a shallower idle state is likely to be more suitable, so look for it.
*
- * - Traverse the idle states shallower than the candidate one in the
+ * - Traverse the enabled idle states shallower than the candidate one in the
* descending order.
*
* - For each of them compute the sum of the "intercepts" metrics over all
* of the idle states between it and the candidate one (including the
* former and excluding the latter).
*
- * - If each of these sums that needs to be taken into account (because the
- * check related to it has indicated that the CPU is likely to wake up
- * early) is greater than a half of the corresponding sum computed in step
- * 1 (which means that the target residency of the state in question had
- * not exceeded the idle duration in over a half of the relevant cases),
- * select the given idle state instead of the candidate one.
+ * - If this sum is greater than a half of the second sum computed in step 1,
+ * use the given idle state as the new candidate one.
*
- * 3. By default, select the candidate state.
+ * 3. If the current candidate state is state 0 or its target residency is short
+ * enough, return it and prevent the scheduler tick from being stopped.
+ *
+ * 4. Obtain the sleep length value and check if it is below the target
+ * residency of the current candidate state, in which case a new shallower
+ * candidate state needs to be found, so look for it.
*/
#include <linux/cpuidle.h>
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index af018afd9cd7..2e87ca0e292a 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -249,7 +249,7 @@ static struct file *open_file_as_root(const char *filename, int flags, umode_t m
fp = file_open_root(&root, filename, flags, mode);
path_put(&root);
- revert_creds(old_cred);
+ put_cred(revert_creds(old_cred));
return fp;
}
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 06f7b43a6f78..b0a39761847a 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -303,14 +303,6 @@ config EDAC_PASEMI
Support for error detection and correction on PA Semi
PWRficient.
-config EDAC_CELL
- tristate "Cell Broadband Engine memory controller"
- depends on PPC_CELL_COMMON
- help
- Support for error detection and correction on the
- Cell Broadband Engine internal memory controller
- on platform without a hypervisor
-
config EDAC_CPC925
tristate "IBM CPC925 Memory Controller (PPC970FX)"
depends on PPC64
@@ -546,5 +538,13 @@ config EDAC_VERSAL
Support injecting both correctable and uncorrectable errors
for debugging purposes.
+config EDAC_LOONGSON
+ tristate "Loongson Memory Controller"
+ depends on LOONGARCH && ACPI
+ help
+ Support for error detection and correction on the Loongson
+ family memory controller. This driver reports single bit
+ errors (CE) only. Loongson-3A5000/3C5000/3D5000/3A6000/3C6000
+ are compatible.
endif # EDAC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index f9cf19d8d13d..89789ba8275f 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -62,8 +62,6 @@ obj-$(CONFIG_EDAC_SKX) += skx_edac.o skx_edac_common.o
i10nm_edac-y := i10nm_base.o
obj-$(CONFIG_EDAC_I10NM) += i10nm_edac.o skx_edac_common.o
-obj-$(CONFIG_EDAC_CELL) += cell_edac.o
-
obj-$(CONFIG_EDAC_HIGHBANK_MC) += highbank_mc_edac.o
obj-$(CONFIG_EDAC_HIGHBANK_L2) += highbank_l2_edac.o
@@ -86,3 +84,4 @@ obj-$(CONFIG_EDAC_DMC520) += dmc520_edac.o
obj-$(CONFIG_EDAC_NPCM) += npcm_edac.o
obj-$(CONFIG_EDAC_ZYNQMP) += zynqmp_edac.o
obj-$(CONFIG_EDAC_VERSAL) += versal_edac.o
+obj-$(CONFIG_EDAC_LOONGSON) += loongson_edac.o
diff --git a/drivers/edac/cell_edac.c b/drivers/edac/cell_edac.c
deleted file mode 100644
index c2420e2287ff..000000000000
--- a/drivers/edac/cell_edac.c
+++ /dev/null
@@ -1,281 +0,0 @@
-/*
- * Cell MIC driver for ECC counting
- *
- * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
- * <benh@kernel.crashing.org>
- *
- * This file may be distributed under the terms of the
- * GNU General Public License.
- */
-#undef DEBUG
-
-#include <linux/edac.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/stop_machine.h>
-#include <linux/io.h>
-#include <linux/of_address.h>
-#include <asm/machdep.h>
-#include <asm/cell-regs.h>
-
-#include "edac_module.h"
-
-struct cell_edac_priv
-{
- struct cbe_mic_tm_regs __iomem *regs;
- int node;
- int chanmask;
-#ifdef DEBUG
- u64 prev_fir;
-#endif
-};
-
-static void cell_edac_count_ce(struct mem_ctl_info *mci, int chan, u64 ar)
-{
- struct cell_edac_priv *priv = mci->pvt_info;
- struct csrow_info *csrow = mci->csrows[0];
- unsigned long address, pfn, offset, syndrome;
-
- dev_dbg(mci->pdev, "ECC CE err on node %d, channel %d, ar = 0x%016llx\n",
- priv->node, chan, ar);
-
- /* Address decoding is likely a bit bogus, to dbl check */
- address = (ar & 0xffffffffe0000000ul) >> 29;
- if (priv->chanmask == 0x3)
- address = (address << 1) | chan;
- pfn = address >> PAGE_SHIFT;
- offset = address & ~PAGE_MASK;
- syndrome = (ar & 0x000000001fe00000ul) >> 21;
-
- /* TODO: Decoding of the error address */
- edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
- csrow->first_page + pfn, offset, syndrome,
- 0, chan, -1, "", "");
-}
-
-static void cell_edac_count_ue(struct mem_ctl_info *mci, int chan, u64 ar)
-{
- struct cell_edac_priv *priv = mci->pvt_info;
- struct csrow_info *csrow = mci->csrows[0];
- unsigned long address, pfn, offset;
-
- dev_dbg(mci->pdev, "ECC UE err on node %d, channel %d, ar = 0x%016llx\n",
- priv->node, chan, ar);
-
- /* Address decoding is likely a bit bogus, to dbl check */
- address = (ar & 0xffffffffe0000000ul) >> 29;
- if (priv->chanmask == 0x3)
- address = (address << 1) | chan;
- pfn = address >> PAGE_SHIFT;
- offset = address & ~PAGE_MASK;
-
- /* TODO: Decoding of the error address */
- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
- csrow->first_page + pfn, offset, 0,
- 0, chan, -1, "", "");
-}
-
-static void cell_edac_check(struct mem_ctl_info *mci)
-{
- struct cell_edac_priv *priv = mci->pvt_info;
- u64 fir, addreg, clear = 0;
-
- fir = in_be64(&priv->regs->mic_fir);
-#ifdef DEBUG
- if (fir != priv->prev_fir) {
- dev_dbg(mci->pdev, "fir change : 0x%016lx\n", fir);
- priv->prev_fir = fir;
- }
-#endif
- if ((priv->chanmask & 0x1) && (fir & CBE_MIC_FIR_ECC_SINGLE_0_ERR)) {
- addreg = in_be64(&priv->regs->mic_df_ecc_address_0);
- clear |= CBE_MIC_FIR_ECC_SINGLE_0_RESET;
- cell_edac_count_ce(mci, 0, addreg);
- }
- if ((priv->chanmask & 0x2) && (fir & CBE_MIC_FIR_ECC_SINGLE_1_ERR)) {
- addreg = in_be64(&priv->regs->mic_df_ecc_address_1);
- clear |= CBE_MIC_FIR_ECC_SINGLE_1_RESET;
- cell_edac_count_ce(mci, 1, addreg);
- }
- if ((priv->chanmask & 0x1) && (fir & CBE_MIC_FIR_ECC_MULTI_0_ERR)) {
- addreg = in_be64(&priv->regs->mic_df_ecc_address_0);
- clear |= CBE_MIC_FIR_ECC_MULTI_0_RESET;
- cell_edac_count_ue(mci, 0, addreg);
- }
- if ((priv->chanmask & 0x2) && (fir & CBE_MIC_FIR_ECC_MULTI_1_ERR)) {
- addreg = in_be64(&priv->regs->mic_df_ecc_address_1);
- clear |= CBE_MIC_FIR_ECC_MULTI_1_RESET;
- cell_edac_count_ue(mci, 1, addreg);
- }
-
- /* The procedure for clearing FIR bits is a bit ... weird */
- if (clear) {
- fir &= ~(CBE_MIC_FIR_ECC_ERR_MASK | CBE_MIC_FIR_ECC_SET_MASK);
- fir |= CBE_MIC_FIR_ECC_RESET_MASK;
- fir &= ~clear;
- out_be64(&priv->regs->mic_fir, fir);
- (void)in_be64(&priv->regs->mic_fir);
-
- mb(); /* sync up */
-#ifdef DEBUG
- fir = in_be64(&priv->regs->mic_fir);
- dev_dbg(mci->pdev, "fir clear : 0x%016lx\n", fir);
-#endif
- }
-}
-
-static void cell_edac_init_csrows(struct mem_ctl_info *mci)
-{
- struct csrow_info *csrow = mci->csrows[0];
- struct dimm_info *dimm;
- struct cell_edac_priv *priv = mci->pvt_info;
- struct device_node *np;
- int j;
- u32 nr_pages;
-
- for_each_node_by_name(np, "memory") {
- struct resource r;
-
- /* We "know" that the Cell firmware only creates one entry
- * in the "memory" nodes. If that changes, this code will
- * need to be adapted.
- */
- if (of_address_to_resource(np, 0, &r))
- continue;
- if (of_node_to_nid(np) != priv->node)
- continue;
- csrow->first_page = r.start >> PAGE_SHIFT;
- nr_pages = resource_size(&r) >> PAGE_SHIFT;
- csrow->last_page = csrow->first_page + nr_pages - 1;
-
- for (j = 0; j < csrow->nr_channels; j++) {
- dimm = csrow->channels[j]->dimm;
- dimm->mtype = MEM_XDR;
- dimm->edac_mode = EDAC_SECDED;
- dimm->nr_pages = nr_pages / csrow->nr_channels;
- }
- dev_dbg(mci->pdev,
- "Initialized on node %d, chanmask=0x%x,"
- " first_page=0x%lx, nr_pages=0x%x\n",
- priv->node, priv->chanmask,
- csrow->first_page, nr_pages);
- break;
- }
- of_node_put(np);
-}
-
-static int cell_edac_probe(struct platform_device *pdev)
-{
- struct cbe_mic_tm_regs __iomem *regs;
- struct mem_ctl_info *mci;
- struct edac_mc_layer layers[2];
- struct cell_edac_priv *priv;
- u64 reg;
- int rc, chanmask, num_chans;
-
- regs = cbe_get_cpu_mic_tm_regs(cbe_node_to_cpu(pdev->id));
- if (regs == NULL)
- return -ENODEV;
-
- edac_op_state = EDAC_OPSTATE_POLL;
-
- /* Get channel population */
- reg = in_be64(&regs->mic_mnt_cfg);
- dev_dbg(&pdev->dev, "MIC_MNT_CFG = 0x%016llx\n", reg);
- chanmask = 0;
- if (reg & CBE_MIC_MNT_CFG_CHAN_0_POP)
- chanmask |= 0x1;
- if (reg & CBE_MIC_MNT_CFG_CHAN_1_POP)
- chanmask |= 0x2;
- if (chanmask == 0) {
- dev_warn(&pdev->dev,
- "Yuck ! No channel populated ? Aborting !\n");
- return -ENODEV;
- }
- dev_dbg(&pdev->dev, "Initial FIR = 0x%016llx\n",
- in_be64(&regs->mic_fir));
-
- /* Allocate & init EDAC MC data structure */
- num_chans = chanmask == 3 ? 2 : 1;
-
- layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
- layers[0].size = 1;
- layers[0].is_virt_csrow = true;
- layers[1].type = EDAC_MC_LAYER_CHANNEL;
- layers[1].size = num_chans;
- layers[1].is_virt_csrow = false;
- mci = edac_mc_alloc(pdev->id, ARRAY_SIZE(layers), layers,
- sizeof(struct cell_edac_priv));
- if (mci == NULL)
- return -ENOMEM;
- priv = mci->pvt_info;
- priv->regs = regs;
- priv->node = pdev->id;
- priv->chanmask = chanmask;
- mci->pdev = &pdev->dev;
- mci->mtype_cap = MEM_FLAG_XDR;
- mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
- mci->edac_cap = EDAC_FLAG_EC | EDAC_FLAG_SECDED;
- mci->mod_name = "cell_edac";
- mci->ctl_name = "MIC";
- mci->dev_name = dev_name(&pdev->dev);
- mci->edac_check = cell_edac_check;
- cell_edac_init_csrows(mci);
-
- /* Register with EDAC core */
- rc = edac_mc_add_mc(mci);
- if (rc) {
- dev_err(&pdev->dev, "failed to register with EDAC core\n");
- edac_mc_free(mci);
- return rc;
- }
-
- return 0;
-}
-
-static void cell_edac_remove(struct platform_device *pdev)
-{
- struct mem_ctl_info *mci = edac_mc_del_mc(&pdev->dev);
- if (mci)
- edac_mc_free(mci);
-}
-
-static struct platform_driver cell_edac_driver = {
- .driver = {
- .name = "cbe-mic",
- },
- .probe = cell_edac_probe,
- .remove = cell_edac_remove,
-};
-
-static int __init cell_edac_init(void)
-{
- /* Sanity check registers data structure */
- BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
- mic_df_ecc_address_0) != 0xf8);
- BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
- mic_df_ecc_address_1) != 0x1b8);
- BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
- mic_df_config) != 0x218);
- BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
- mic_fir) != 0x230);
- BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
- mic_mnt_cfg) != 0x210);
- BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
- mic_exc) != 0x208);
-
- return platform_driver_register(&cell_edac_driver);
-}
-
-static void __exit cell_edac_exit(void)
-{
- platform_driver_unregister(&cell_edac_driver);
-}
-
-module_init(cell_edac_init);
-module_exit(cell_edac_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
-MODULE_DESCRIPTION("ECC counting for Cell MIC");
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index d6eed727b0cd..0959320fe51c 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -214,7 +214,7 @@ static int edac_mc_alloc_csrows(struct mem_ctl_info *mci)
unsigned int row, chn;
/*
- * Alocate and fill the csrow/channels structs
+ * Allocate and fill the csrow/channels structs
*/
mci->csrows = kcalloc(tot_csrows, sizeof(*mci->csrows), GFP_KERNEL);
if (!mci->csrows)
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 4200aec04831..0f338adf7d93 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -422,7 +422,7 @@ static inline int nr_pages_per_csrow(struct csrow_info *csrow)
return nr_pages;
}
-/* Create a CSROW object under specifed edac_mc_device */
+/* Create a CSROW object under specified edac_mc_device */
static int edac_create_csrow_object(struct mem_ctl_info *mci,
struct csrow_info *csrow, int index)
{
@@ -449,7 +449,7 @@ static int edac_create_csrow_object(struct mem_ctl_info *mci,
return 0;
}
-/* Create a CSROW object under specifed edac_mc_device */
+/* Create a CSROW object under specified edac_mc_device */
static int edac_create_csrow_objects(struct mem_ctl_info *mci)
{
int err, i;
@@ -636,7 +636,7 @@ static void dimm_release(struct device *dev)
*/
}
-/* Create a DIMM object under specifed memory controller device */
+/* Create a DIMM object under specified memory controller device */
static int edac_create_dimm_object(struct mem_ctl_info *mci,
struct dimm_info *dimm)
{
diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
index 51556c72a967..70dff342c1b9 100644
--- a/drivers/edac/i10nm_base.c
+++ b/drivers/edac/i10nm_base.c
@@ -948,6 +948,7 @@ static const struct x86_cpu_id i10nm_cpuids[] = {
X86_MATCH_VFM_STEPPINGS(INTEL_GRANITERAPIDS_X, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
X86_MATCH_VFM_STEPPINGS(INTEL_ATOM_CRESTMONT_X, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
X86_MATCH_VFM_STEPPINGS(INTEL_ATOM_CRESTMONT, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
+ X86_MATCH_VFM_STEPPINGS(INTEL_ATOM_DARKMONT_X, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
{}
};
MODULE_DEVICE_TABLE(x86cpu, i10nm_cpuids);
@@ -1010,7 +1011,7 @@ static struct notifier_block i10nm_mce_dec = {
static int __init i10nm_init(void)
{
- u8 mc = 0, src_id = 0, node_id = 0;
+ u8 mc = 0, src_id = 0;
const struct x86_cpu_id *id;
struct res_config *cfg;
const char *owner;
@@ -1070,19 +1071,14 @@ static int __init i10nm_init(void)
if (rc < 0)
goto fail;
- rc = skx_get_node_id(d, &node_id);
- if (rc < 0)
- goto fail;
-
- edac_dbg(2, "src_id = %d node_id = %d\n", src_id, node_id);
+ edac_dbg(2, "src_id = %d\n", src_id);
for (i = 0; i < imc_num; i++) {
if (!d->imc[i].mdev)
continue;
d->imc[i].mc = mc++;
d->imc[i].lmc = i;
- d->imc[i].src_id = src_id;
- d->imc[i].node_id = node_id;
+ d->imc[i].src_id = src_id;
if (d->imc[i].hbm_mc) {
d->imc[i].chan_mmio_sz = cfg->hbm_chan_mmio_sz;
d->imc[i].num_channels = cfg->hbm_chan_num;
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index 4b5a71f8739d..4a1bebc1ff14 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -338,11 +338,11 @@ struct i5000_pvt {
u16 mir0, mir1, mir2;
- u16 b0_mtr[NUM_MTRS]; /* Memory Technlogy Reg */
+ u16 b0_mtr[NUM_MTRS]; /* Memory Technology Reg */
u16 b0_ambpresent0; /* Branch 0, Channel 0 */
- u16 b0_ambpresent1; /* Brnach 0, Channel 1 */
+ u16 b0_ambpresent1; /* Branch 0, Channel 1 */
- u16 b1_mtr[NUM_MTRS]; /* Memory Technlogy Reg */
+ u16 b1_mtr[NUM_MTRS]; /* Memory Technology Reg */
u16 b1_ambpresent0; /* Branch 1, Channel 8 */
u16 b1_ambpresent1; /* Branch 1, Channel 1 */
@@ -1210,7 +1210,7 @@ static void i5000_get_mc_regs(struct mem_ctl_info *mci)
&pvt->b0_ambpresent1);
edac_dbg(2, "\t\tAMB-Branch 0-present1 0x%x:\n", pvt->b0_ambpresent1);
- /* Only if we have 2 branchs (4 channels) */
+ /* Only if we have 2 branches (4 channels) */
if (pvt->maxch < CHANNELS_PER_BRANCH) {
pvt->b1_ambpresent0 = 0;
pvt->b1_ambpresent1 = 0;
diff --git a/drivers/edac/loongson_edac.c b/drivers/edac/loongson_edac.c
new file mode 100644
index 000000000000..38745800ed01
--- /dev/null
+++ b/drivers/edac/loongson_edac.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024 Loongson Technology Corporation Limited.
+ */
+
+#include <linux/acpi.h>
+#include <linux/edac.h>
+#include <linux/init.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include "edac_module.h"
+
+#define ECC_CS_COUNT_REG 0x18
+
+struct loongson_edac_pvt {
+ void __iomem *ecc_base;
+
+ /*
+ * The ECC register in this controller records the number of errors
+ * encountered since reset and cannot be zeroed so in order to be able
+ * to report the error count at each check, this records the previous
+ * register state.
+ */
+ int last_ce_count;
+};
+
+static int read_ecc(struct mem_ctl_info *mci)
+{
+ struct loongson_edac_pvt *pvt = mci->pvt_info;
+ u64 ecc;
+ int cs;
+
+ ecc = readq(pvt->ecc_base + ECC_CS_COUNT_REG);
+ /* cs0 -- cs3 */
+ cs = ecc & 0xff;
+ cs += (ecc >> 8) & 0xff;
+ cs += (ecc >> 16) & 0xff;
+ cs += (ecc >> 24) & 0xff;
+
+ return cs;
+}
+
+static void edac_check(struct mem_ctl_info *mci)
+{
+ struct loongson_edac_pvt *pvt = mci->pvt_info;
+ int new, add;
+
+ new = read_ecc(mci);
+ add = new - pvt->last_ce_count;
+ pvt->last_ce_count = new;
+ if (add <= 0)
+ return;
+
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, add,
+ 0, 0, 0, 0, 0, -1, "error", "");
+}
+
+static void dimm_config_init(struct mem_ctl_info *mci)
+{
+ struct dimm_info *dimm;
+ u32 size, npages;
+
+ /* size not used */
+ size = -1;
+ npages = MiB_TO_PAGES(size);
+
+ dimm = edac_get_dimm(mci, 0, 0, 0);
+ dimm->nr_pages = npages;
+ snprintf(dimm->label, sizeof(dimm->label),
+ "MC#%uChannel#%u_DIMM#%u", mci->mc_idx, 0, 0);
+ dimm->grain = 8;
+}
+
+static void pvt_init(struct mem_ctl_info *mci, void __iomem *vbase)
+{
+ struct loongson_edac_pvt *pvt = mci->pvt_info;
+
+ pvt->ecc_base = vbase;
+ pvt->last_ce_count = read_ecc(mci);
+}
+
+static int edac_probe(struct platform_device *pdev)
+{
+ struct edac_mc_layer layers[2];
+ struct mem_ctl_info *mci;
+ void __iomem *vbase;
+ int ret;
+
+ vbase = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(vbase))
+ return PTR_ERR(vbase);
+
+ layers[0].type = EDAC_MC_LAYER_CHANNEL;
+ layers[0].size = 1;
+ layers[0].is_virt_csrow = false;
+ layers[1].type = EDAC_MC_LAYER_SLOT;
+ layers[1].size = 1;
+ layers[1].is_virt_csrow = true;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
+ sizeof(struct loongson_edac_pvt));
+ if (mci == NULL)
+ return -ENOMEM;
+
+ mci->mc_idx = edac_device_alloc_index();
+ mci->mtype_cap = MEM_FLAG_RDDR4;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE;
+ mci->edac_cap = EDAC_FLAG_NONE;
+ mci->mod_name = "loongson_edac.c";
+ mci->ctl_name = "loongson_edac_ctl";
+ mci->dev_name = "loongson_edac_dev";
+ mci->ctl_page_to_phys = NULL;
+ mci->pdev = &pdev->dev;
+ mci->error_desc.grain = 8;
+ mci->edac_check = edac_check;
+
+ pvt_init(mci, vbase);
+ dimm_config_init(mci);
+
+ ret = edac_mc_add_mc(mci);
+ if (ret) {
+ edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
+ edac_mc_free(mci);
+ return ret;
+ }
+ edac_op_state = EDAC_OPSTATE_POLL;
+
+ return 0;
+}
+
+static void edac_remove(struct platform_device *pdev)
+{
+ struct mem_ctl_info *mci = edac_mc_del_mc(&pdev->dev);
+
+ if (mci)
+ edac_mc_free(mci);
+}
+
+static const struct acpi_device_id loongson_edac_acpi_match[] = {
+ {"LOON0010", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, loongson_edac_acpi_match);
+
+static struct platform_driver loongson_edac_driver = {
+ .probe = edac_probe,
+ .remove = edac_remove,
+ .driver = {
+ .name = "loongson-mc-edac",
+ .acpi_match_table = loongson_edac_acpi_match,
+ },
+};
+module_platform_driver(loongson_edac_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Zhao Qunqin <zhaoqunqin@loongson.cn>");
+MODULE_DESCRIPTION("EDAC driver for loongson memory controller");
diff --git a/drivers/edac/skx_base.c b/drivers/edac/skx_base.c
index 14cfd394b469..93f7c05faccc 100644
--- a/drivers/edac/skx_base.c
+++ b/drivers/edac/skx_base.c
@@ -600,7 +600,7 @@ static int __init skx_init(void)
const struct munit *m;
const char *owner;
int rc = 0, i, off[3] = {0xd0, 0xd4, 0xd8};
- u8 mc = 0, src_id, node_id;
+ u8 mc = 0, src_id;
struct skx_dev *d;
edac_dbg(2, "\n");
@@ -650,15 +650,12 @@ static int __init skx_init(void)
rc = skx_get_src_id(d, 0xf0, &src_id);
if (rc < 0)
goto fail;
- rc = skx_get_node_id(d, &node_id);
- if (rc < 0)
- goto fail;
- edac_dbg(2, "src_id=%d node_id=%d\n", src_id, node_id);
+
+ edac_dbg(2, "src_id = %d\n", src_id);
for (i = 0; i < SKX_NUM_IMC; i++) {
d->imc[i].mc = mc++;
d->imc[i].lmc = i;
d->imc[i].src_id = src_id;
- d->imc[i].node_id = node_id;
rc = skx_register_mci(&d->imc[i], d->imc[i].chan[0].cdev,
"Skylake Socket", EDAC_MOD_STR,
skx_get_dimm_config, cfg);
diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
index 6cf17af7d911..f7bd930e058f 100644
--- a/drivers/edac/skx_common.c
+++ b/drivers/edac/skx_common.c
@@ -19,6 +19,7 @@
#include <linux/adxl.h>
#include <acpi/nfit.h>
#include <asm/mce.h>
+#include <asm/uv/uv.h>
#include "edac_module.h"
#include "skx_common.h"
@@ -221,33 +222,51 @@ void skx_set_decode(skx_decode_f decode, skx_show_retry_log_f show_retry_log)
}
EXPORT_SYMBOL_GPL(skx_set_decode);
-int skx_get_src_id(struct skx_dev *d, int off, u8 *id)
+static int skx_get_pkg_id(struct skx_dev *d, u8 *id)
{
- u32 reg;
+ int node;
+ int cpu;
- if (pci_read_config_dword(d->util_all, off, &reg)) {
- skx_printk(KERN_ERR, "Failed to read src id\n");
- return -ENODEV;
+ node = pcibus_to_node(d->util_all->bus);
+ if (numa_valid_node(node)) {
+ for_each_cpu(cpu, cpumask_of_pcibus(d->util_all->bus)) {
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
+
+ if (c->initialized && cpu_to_node(cpu) == node) {
+ *id = c->topo.pkg_id;
+ return 0;
+ }
+ }
}
- *id = GET_BITFIELD(reg, 12, 14);
- return 0;
+ skx_printk(KERN_ERR, "Failed to get package ID from NUMA information\n");
+ return -ENODEV;
}
-EXPORT_SYMBOL_GPL(skx_get_src_id);
-int skx_get_node_id(struct skx_dev *d, u8 *id)
+int skx_get_src_id(struct skx_dev *d, int off, u8 *id)
{
u32 reg;
- if (pci_read_config_dword(d->util_all, 0xf4, &reg)) {
- skx_printk(KERN_ERR, "Failed to read node id\n");
+ /*
+ * The 3-bit source IDs in PCI configuration space registers are limited
+ * to 8 unique IDs, and each ID is local to a UPI/QPI domain.
+ *
+ * Source IDs cannot be used to map devices to sockets on UV systems
+ * because they can exceed 8 sockets and have multiple UPI/QPI domains
+ * with identical, repeating source IDs.
+ */
+ if (is_uv_system())
+ return skx_get_pkg_id(d, id);
+
+ if (pci_read_config_dword(d->util_all, off, &reg)) {
+ skx_printk(KERN_ERR, "Failed to read src id\n");
return -ENODEV;
}
- *id = GET_BITFIELD(reg, 0, 2);
+ *id = GET_BITFIELD(reg, 12, 14);
return 0;
}
-EXPORT_SYMBOL_GPL(skx_get_node_id);
+EXPORT_SYMBOL_GPL(skx_get_src_id);
static int get_width(u32 mtr)
{
@@ -507,7 +526,7 @@ int skx_register_mci(struct skx_imc *imc, struct pci_dev *pdev,
pvt->imc = imc;
mci->ctl_name = kasprintf(GFP_KERNEL, "%s#%d IMC#%d", ctl_name,
- imc->node_id, imc->lmc);
+ imc->src_id, imc->lmc);
if (!mci->ctl_name) {
rc = -ENOMEM;
goto fail0;
diff --git a/drivers/edac/skx_common.h b/drivers/edac/skx_common.h
index 54bba8a62f72..b0845bdd4516 100644
--- a/drivers/edac/skx_common.h
+++ b/drivers/edac/skx_common.h
@@ -103,7 +103,7 @@ struct skx_dev {
bool hbm_mc;
u8 mc; /* system wide mc# */
u8 lmc; /* socket relative mc# */
- u8 src_id, node_id;
+ u8 src_id;
struct skx_channel {
struct pci_dev *cdev;
struct pci_dev *edev;
@@ -244,7 +244,6 @@ void skx_set_mem_cfg(bool mem_cfg_2lm);
void skx_set_res_cfg(struct res_config *cfg);
int skx_get_src_id(struct skx_dev *d, int off, u8 *id);
-int skx_get_node_id(struct skx_dev *d, u8 *id);
int skx_get_all_bus_mappings(struct res_config *cfg, struct list_head **list);
diff --git a/drivers/gpio/gpio-loongson-64bit.c b/drivers/gpio/gpio-loongson-64bit.c
index 6749d4dd6d64..7f4d78fd800e 100644
--- a/drivers/gpio/gpio-loongson-64bit.c
+++ b/drivers/gpio/gpio-loongson-64bit.c
@@ -237,9 +237,9 @@ static const struct loongson_gpio_chip_data loongson_gpio_ls2k2000_data1 = {
static const struct loongson_gpio_chip_data loongson_gpio_ls2k2000_data2 = {
.label = "ls2k2000_gpio",
.mode = BIT_CTRL_MODE,
- .conf_offset = 0x84,
- .in_offset = 0x88,
- .out_offset = 0x80,
+ .conf_offset = 0x4,
+ .in_offset = 0x8,
+ .out_offset = 0x0,
};
static const struct loongson_gpio_chip_data loongson_gpio_ls3a5000_data = {
diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c
index f387dad81f29..686ae3d11ba3 100644
--- a/drivers/gpio/gpio-sim.c
+++ b/drivers/gpio/gpio-sim.c
@@ -1027,6 +1027,30 @@ static void gpio_sim_device_deactivate(struct gpio_sim_device *dev)
dev->pdev = NULL;
}
+static void
+gpio_sim_device_lockup_configfs(struct gpio_sim_device *dev, bool lock)
+{
+ struct configfs_subsystem *subsys = dev->group.cg_subsys;
+ struct gpio_sim_bank *bank;
+ struct gpio_sim_line *line;
+
+ /*
+ * The device only needs to depend on leaf line entries. This is
+ * sufficient to lock up all the configfs entries that the
+ * instantiated, alive device depends on.
+ */
+ list_for_each_entry(bank, &dev->bank_list, siblings) {
+ list_for_each_entry(line, &bank->line_list, siblings) {
+ if (lock)
+ WARN_ON(configfs_depend_item_unlocked(
+ subsys, &line->group.cg_item));
+ else
+ configfs_undepend_item_unlocked(
+ &line->group.cg_item);
+ }
+ }
+}
+
static ssize_t
gpio_sim_device_config_live_store(struct config_item *item,
const char *page, size_t count)
@@ -1039,14 +1063,24 @@ gpio_sim_device_config_live_store(struct config_item *item,
if (ret)
return ret;
- guard(mutex)(&dev->lock);
+ if (live)
+ gpio_sim_device_lockup_configfs(dev, true);
- if (live == gpio_sim_device_is_live(dev))
- ret = -EPERM;
- else if (live)
- ret = gpio_sim_device_activate(dev);
- else
- gpio_sim_device_deactivate(dev);
+ scoped_guard(mutex, &dev->lock) {
+ if (live == gpio_sim_device_is_live(dev))
+ ret = -EPERM;
+ else if (live)
+ ret = gpio_sim_device_activate(dev);
+ else
+ gpio_sim_device_deactivate(dev);
+ }
+
+ /*
+ * Undepend is required only if device disablement (live == 0)
+ * succeeds or if device enablement (live == 1) fails.
+ */
+ if (live == !!ret)
+ gpio_sim_device_lockup_configfs(dev, false);
return ret ?: count;
}
diff --git a/drivers/gpio/gpio-virtuser.c b/drivers/gpio/gpio-virtuser.c
index 91b6352c957c..e89f299f2140 100644
--- a/drivers/gpio/gpio-virtuser.c
+++ b/drivers/gpio/gpio-virtuser.c
@@ -1410,7 +1410,7 @@ gpio_virtuser_make_lookup_table(struct gpio_virtuser_device *dev)
size_t num_entries = gpio_virtuser_get_lookup_count(dev);
struct gpio_virtuser_lookup_entry *entry;
struct gpio_virtuser_lookup *lookup;
- unsigned int i = 0;
+ unsigned int i = 0, idx;
lockdep_assert_held(&dev->lock);
@@ -1424,12 +1424,12 @@ gpio_virtuser_make_lookup_table(struct gpio_virtuser_device *dev)
return -ENOMEM;
list_for_each_entry(lookup, &dev->lookup_list, siblings) {
+ idx = 0;
list_for_each_entry(entry, &lookup->entry_list, siblings) {
- table->table[i] =
+ table->table[i++] =
GPIO_LOOKUP_IDX(entry->key,
entry->offset < 0 ? U16_MAX : entry->offset,
- lookup->con_id, i, entry->flags);
- i++;
+ lookup->con_id, idx++, entry->flags);
}
}
@@ -1439,6 +1439,15 @@ gpio_virtuser_make_lookup_table(struct gpio_virtuser_device *dev)
return 0;
}
+static void
+gpio_virtuser_remove_lookup_table(struct gpio_virtuser_device *dev)
+{
+ gpiod_remove_lookup_table(dev->lookup_table);
+ kfree(dev->lookup_table->dev_id);
+ kfree(dev->lookup_table);
+ dev->lookup_table = NULL;
+}
+
static struct fwnode_handle *
gpio_virtuser_make_device_swnode(struct gpio_virtuser_device *dev)
{
@@ -1487,10 +1496,8 @@ gpio_virtuser_device_activate(struct gpio_virtuser_device *dev)
pdevinfo.fwnode = swnode;
ret = gpio_virtuser_make_lookup_table(dev);
- if (ret) {
- fwnode_remove_software_node(swnode);
- return ret;
- }
+ if (ret)
+ goto err_remove_swnode;
reinit_completion(&dev->probe_completion);
dev->driver_bound = false;
@@ -1498,23 +1505,31 @@ gpio_virtuser_device_activate(struct gpio_virtuser_device *dev)
pdev = platform_device_register_full(&pdevinfo);
if (IS_ERR(pdev)) {
+ ret = PTR_ERR(pdev);
bus_unregister_notifier(&platform_bus_type, &dev->bus_notifier);
- fwnode_remove_software_node(swnode);
- return PTR_ERR(pdev);
+ goto err_remove_lookup_table;
}
wait_for_completion(&dev->probe_completion);
bus_unregister_notifier(&platform_bus_type, &dev->bus_notifier);
if (!dev->driver_bound) {
- platform_device_unregister(pdev);
- fwnode_remove_software_node(swnode);
- return -ENXIO;
+ ret = -ENXIO;
+ goto err_unregister_pdev;
}
dev->pdev = pdev;
return 0;
+
+err_unregister_pdev:
+ platform_device_unregister(pdev);
+err_remove_lookup_table:
+ gpio_virtuser_remove_lookup_table(dev);
+err_remove_swnode:
+ fwnode_remove_software_node(swnode);
+
+ return ret;
}
static void
@@ -1526,10 +1541,33 @@ gpio_virtuser_device_deactivate(struct gpio_virtuser_device *dev)
swnode = dev_fwnode(&dev->pdev->dev);
platform_device_unregister(dev->pdev);
+ gpio_virtuser_remove_lookup_table(dev);
fwnode_remove_software_node(swnode);
dev->pdev = NULL;
- gpiod_remove_lookup_table(dev->lookup_table);
- kfree(dev->lookup_table);
+}
+
+static void
+gpio_virtuser_device_lockup_configfs(struct gpio_virtuser_device *dev, bool lock)
+{
+ struct configfs_subsystem *subsys = dev->group.cg_subsys;
+ struct gpio_virtuser_lookup_entry *entry;
+ struct gpio_virtuser_lookup *lookup;
+
+ /*
+ * The device only needs to depend on leaf lookup entries. This is
+ * sufficient to lock up all the configfs entries that the
+ * instantiated, alive device depends on.
+ */
+ list_for_each_entry(lookup, &dev->lookup_list, siblings) {
+ list_for_each_entry(entry, &lookup->entry_list, siblings) {
+ if (lock)
+ WARN_ON(configfs_depend_item_unlocked(
+ subsys, &entry->group.cg_item));
+ else
+ configfs_undepend_item_unlocked(
+ &entry->group.cg_item);
+ }
+ }
}
static ssize_t
@@ -1544,15 +1582,24 @@ gpio_virtuser_device_config_live_store(struct config_item *item,
if (ret)
return ret;
- guard(mutex)(&dev->lock);
+ if (live)
+ gpio_virtuser_device_lockup_configfs(dev, true);
- if (live == gpio_virtuser_device_is_live(dev))
- return -EPERM;
+ scoped_guard(mutex, &dev->lock) {
+ if (live == gpio_virtuser_device_is_live(dev))
+ ret = -EPERM;
+ else if (live)
+ ret = gpio_virtuser_device_activate(dev);
+ else
+ gpio_virtuser_device_deactivate(dev);
+ }
- if (live)
- ret = gpio_virtuser_device_activate(dev);
- else
- gpio_virtuser_device_deactivate(dev);
+ /*
+ * Undepend is required only if device disablement (live == 0)
+ * succeeds or if device enablement (live == 1) fails.
+ */
+ if (live == !!ret)
+ gpio_virtuser_device_lockup_configfs(dev, false);
return ret ?: count;
}
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index c6a8f2c82680..792d94c49077 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -65,7 +65,7 @@ struct xgpio_instance {
DECLARE_BITMAP(state, 64);
DECLARE_BITMAP(last_irq_read, 64);
DECLARE_BITMAP(dir, 64);
- spinlock_t gpio_lock; /* For serializing operations */
+ raw_spinlock_t gpio_lock; /* For serializing operations */
int irq;
DECLARE_BITMAP(enable, 64);
DECLARE_BITMAP(rising_edge, 64);
@@ -179,14 +179,14 @@ static void xgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
struct xgpio_instance *chip = gpiochip_get_data(gc);
int bit = xgpio_to_bit(chip, gpio);
- spin_lock_irqsave(&chip->gpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->gpio_lock, flags);
/* Write to GPIO signal and set its direction to output */
__assign_bit(bit, chip->state, val);
xgpio_write_ch(chip, XGPIO_DATA_OFFSET, bit, chip->state);
- spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
}
/**
@@ -210,7 +210,7 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
bitmap_remap(hw_mask, mask, chip->sw_map, chip->hw_map, 64);
bitmap_remap(hw_bits, bits, chip->sw_map, chip->hw_map, 64);
- spin_lock_irqsave(&chip->gpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->gpio_lock, flags);
bitmap_replace(state, chip->state, hw_bits, hw_mask, 64);
@@ -218,7 +218,7 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
bitmap_copy(chip->state, state, 64);
- spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
}
/**
@@ -236,13 +236,13 @@ static int xgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
struct xgpio_instance *chip = gpiochip_get_data(gc);
int bit = xgpio_to_bit(chip, gpio);
- spin_lock_irqsave(&chip->gpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->gpio_lock, flags);
/* Set the GPIO bit in shadow register and set direction as input */
__set_bit(bit, chip->dir);
xgpio_write_ch(chip, XGPIO_TRI_OFFSET, bit, chip->dir);
- spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
return 0;
}
@@ -265,7 +265,7 @@ static int xgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
struct xgpio_instance *chip = gpiochip_get_data(gc);
int bit = xgpio_to_bit(chip, gpio);
- spin_lock_irqsave(&chip->gpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->gpio_lock, flags);
/* Write state of GPIO signal */
__assign_bit(bit, chip->state, val);
@@ -275,7 +275,7 @@ static int xgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
__clear_bit(bit, chip->dir);
xgpio_write_ch(chip, XGPIO_TRI_OFFSET, bit, chip->dir);
- spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
return 0;
}
@@ -398,7 +398,7 @@ static void xgpio_irq_mask(struct irq_data *irq_data)
int bit = xgpio_to_bit(chip, irq_offset);
u32 mask = BIT(bit / 32), temp;
- spin_lock_irqsave(&chip->gpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->gpio_lock, flags);
__clear_bit(bit, chip->enable);
@@ -408,7 +408,7 @@ static void xgpio_irq_mask(struct irq_data *irq_data)
temp &= ~mask;
xgpio_writereg(chip->regs + XGPIO_IPIER_OFFSET, temp);
}
- spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
gpiochip_disable_irq(&chip->gc, irq_offset);
}
@@ -428,7 +428,7 @@ static void xgpio_irq_unmask(struct irq_data *irq_data)
gpiochip_enable_irq(&chip->gc, irq_offset);
- spin_lock_irqsave(&chip->gpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->gpio_lock, flags);
__set_bit(bit, chip->enable);
@@ -447,7 +447,7 @@ static void xgpio_irq_unmask(struct irq_data *irq_data)
xgpio_writereg(chip->regs + XGPIO_IPIER_OFFSET, val);
}
- spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
}
/**
@@ -512,7 +512,7 @@ static void xgpio_irqhandler(struct irq_desc *desc)
chained_irq_enter(irqchip, desc);
- spin_lock(&chip->gpio_lock);
+ raw_spin_lock(&chip->gpio_lock);
xgpio_read_ch_all(chip, XGPIO_DATA_OFFSET, all);
@@ -529,7 +529,7 @@ static void xgpio_irqhandler(struct irq_desc *desc)
bitmap_copy(chip->last_irq_read, all, 64);
bitmap_or(all, rising, falling, 64);
- spin_unlock(&chip->gpio_lock);
+ raw_spin_unlock(&chip->gpio_lock);
dev_dbg(gc->parent, "IRQ rising %*pb falling %*pb\n", 64, rising, 64, falling);
@@ -620,7 +620,7 @@ static int xgpio_probe(struct platform_device *pdev)
bitmap_set(chip->hw_map, 0, width[0]);
bitmap_set(chip->hw_map, 32, width[1]);
- spin_lock_init(&chip->gpio_lock);
+ raw_spin_lock_init(&chip->gpio_lock);
chip->gc.base = -1;
chip->gc.ngpio = bitmap_weight(chip->hw_map, 64);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 3afcd1e8aa54..c4e733c2e75e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -715,8 +715,9 @@ err:
void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle)
{
enum amd_powergating_state state = idle ? AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE;
- if (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 11 &&
- ((adev->mes.kiq_version & AMDGPU_MES_VERSION_MASK) <= 64)) {
+ if ((IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 11 &&
+ ((adev->mes.kiq_version & AMDGPU_MES_VERSION_MASK) <= 64)) ||
+ (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 12)) {
pr_debug("GFXOFF is %s\n", idle ? "enabled" : "disabled");
amdgpu_gfx_off_ctrl(adev, idle);
} else if ((IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 9) &&
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c
index 2d4b67175b55..328a1b963548 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c
@@ -122,6 +122,10 @@ static int amdgpu_is_fw_attestation_supported(struct amdgpu_device *adev)
if (adev->flags & AMD_IS_APU)
return 0;
+ if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(14, 0, 2) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(14, 0, 3))
+ return 0;
+
if (adev->asic_type >= CHIP_SIENNA_CICHLID)
return 1;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 69a6b6dba0a5..1d155463d044 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -1989,6 +1989,7 @@ void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 idx;
+ bool sched_work = false;
if (!adev->gfx.enable_cleaner_shader)
return;
@@ -2007,15 +2008,19 @@ void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring)
mutex_lock(&adev->enforce_isolation_mutex);
if (adev->enforce_isolation[idx]) {
if (adev->kfd.init_complete)
- amdgpu_gfx_kfd_sch_ctrl(adev, idx, false);
+ sched_work = true;
}
mutex_unlock(&adev->enforce_isolation_mutex);
+
+ if (sched_work)
+ amdgpu_gfx_kfd_sch_ctrl(adev, idx, false);
}
void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 idx;
+ bool sched_work = false;
if (!adev->gfx.enable_cleaner_shader)
return;
@@ -2031,9 +2036,12 @@ void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring)
mutex_lock(&adev->enforce_isolation_mutex);
if (adev->enforce_isolation[idx]) {
if (adev->kfd.init_complete)
- amdgpu_gfx_kfd_sch_ctrl(adev, idx, true);
+ sched_work = true;
}
mutex_unlock(&adev->enforce_isolation_mutex);
+
+ if (sched_work)
+ amdgpu_gfx_kfd_sch_ctrl(adev, idx, true);
}
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 8b512dc28df8..071f187f5e28 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -193,8 +193,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
need_ctx_switch = ring->current_ctx != fence_ctx;
if (ring->funcs->emit_pipeline_sync && job &&
((tmp = amdgpu_sync_get_fence(&job->explicit_sync)) ||
- (amdgpu_sriov_vf(adev) && need_ctx_switch) ||
- amdgpu_vm_need_pipeline_sync(ring, job))) {
+ need_ctx_switch || amdgpu_vm_need_pipeline_sync(ring, job))) {
+
need_pipe_sync = true;
if (tmp)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 7d26a962f811..ff5e52025266 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -567,7 +567,6 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
else
remaining_size -= size;
}
- mutex_unlock(&mgr->lock);
if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS && adjust_dcc_size) {
struct drm_buddy_block *dcc_block;
@@ -584,6 +583,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
(u64)vres->base.size,
&vres->blocks);
}
+ mutex_unlock(&mgr->lock);
vres->base.start = 0;
size = max_t(u64, amdgpu_vram_mgr_blocks_size(&vres->blocks),
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c
index 312dfa84f29f..a8abc3091801 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c
@@ -350,10 +350,27 @@ int kfd_dbg_set_mes_debug_mode(struct kfd_process_device *pdd, bool sq_trap_en)
{
uint32_t spi_dbg_cntl = pdd->spi_dbg_override | pdd->spi_dbg_launch_mode;
uint32_t flags = pdd->process->dbg_flags;
+ struct amdgpu_device *adev = pdd->dev->adev;
+ int r;
if (!kfd_dbg_is_per_vmid_supported(pdd->dev))
return 0;
+ if (!pdd->proc_ctx_cpu_ptr) {
+ r = amdgpu_amdkfd_alloc_gtt_mem(adev,
+ AMDGPU_MES_PROC_CTX_SIZE,
+ &pdd->proc_ctx_bo,
+ &pdd->proc_ctx_gpu_addr,
+ &pdd->proc_ctx_cpu_ptr,
+ false);
+ if (r) {
+ dev_err(adev->dev,
+ "failed to allocate process context bo\n");
+ return r;
+ }
+ memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
+ }
+
return amdgpu_mes_set_shader_debugger(pdd->dev->adev, pdd->proc_ctx_gpu_addr, spi_dbg_cntl,
pdd->watch_points, flags, sq_trap_en);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index d0ee173acf82..edfe0b4788f4 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -1160,7 +1160,8 @@ static void kfd_process_wq_release(struct work_struct *work)
*/
synchronize_rcu();
ef = rcu_access_pointer(p->ef);
- dma_fence_signal(ef);
+ if (ef)
+ dma_fence_signal(ef);
kfd_process_remove_sysfs(p);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 48be917e7bc5..5f216d626cbb 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -8400,16 +8400,6 @@ static void manage_dm_interrupts(struct amdgpu_device *adev,
struct amdgpu_crtc *acrtc,
struct dm_crtc_state *acrtc_state)
{
- /*
- * We have no guarantee that the frontend index maps to the same
- * backend index - some even map to more than one.
- *
- * TODO: Use a different interrupt or check DC itself for the mapping.
- */
- int irq_type =
- amdgpu_display_crtc_idx_to_irq_type(
- adev,
- acrtc->crtc_id);
struct drm_vblank_crtc_config config = {0};
struct dc_crtc_timing *timing;
int offdelay;
@@ -8435,28 +8425,7 @@ static void manage_dm_interrupts(struct amdgpu_device *adev,
drm_crtc_vblank_on_config(&acrtc->base,
&config);
-
- amdgpu_irq_get(
- adev,
- &adev->pageflip_irq,
- irq_type);
-#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
- amdgpu_irq_get(
- adev,
- &adev->vline0_irq,
- irq_type);
-#endif
} else {
-#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
- amdgpu_irq_put(
- adev,
- &adev->vline0_irq,
- irq_type);
-#endif
- amdgpu_irq_put(
- adev,
- &adev->pageflip_irq,
- irq_type);
drm_crtc_vblank_off(&acrtc->base);
}
}
@@ -8927,6 +8896,7 @@ static void amdgpu_dm_enable_self_refresh(struct amdgpu_crtc *acrtc_attach,
struct replay_settings *pr = &acrtc_state->stream->link->replay_settings;
struct amdgpu_dm_connector *aconn =
(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
+ bool vrr_active = amdgpu_dm_crtc_vrr_active(acrtc_state);
if (acrtc_state->update_type > UPDATE_TYPE_FAST) {
if (pr->config.replay_supported && !pr->replay_feature_enabled)
@@ -8953,14 +8923,15 @@ static void amdgpu_dm_enable_self_refresh(struct amdgpu_crtc *acrtc_attach,
* adequate number of fast atomic commits to notify KMD
* of update events. See `vblank_control_worker()`.
*/
- if (acrtc_attach->dm_irq_params.allow_sr_entry &&
+ if (!vrr_active &&
+ acrtc_attach->dm_irq_params.allow_sr_entry &&
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
!amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
#endif
(current_ts - psr->psr_dirty_rects_change_timestamp_ns) > 500000000) {
if (pr->replay_feature_enabled && !pr->replay_allow_active)
amdgpu_dm_replay_enable(acrtc_state->stream, true);
- if (psr->psr_version >= DC_PSR_VERSION_SU_1 &&
+ if (psr->psr_version == DC_PSR_VERSION_SU_1 &&
!psr->psr_allow_active && !aconn->disallow_edp_enter_psr)
amdgpu_dm_psr_enable(acrtc_state->stream);
}
@@ -9131,7 +9102,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns =
timestamp_ns;
if (acrtc_state->stream->link->psr_settings.psr_allow_active)
- amdgpu_dm_psr_disable(acrtc_state->stream);
+ amdgpu_dm_psr_disable(acrtc_state->stream, true);
mutex_unlock(&dm->dc_lock);
}
}
@@ -9297,11 +9268,11 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
bundle->stream_update.abm_level = &acrtc_state->abm_level;
mutex_lock(&dm->dc_lock);
- if (acrtc_state->update_type > UPDATE_TYPE_FAST) {
+ if ((acrtc_state->update_type > UPDATE_TYPE_FAST) || vrr_active) {
if (acrtc_state->stream->link->replay_settings.replay_allow_active)
amdgpu_dm_replay_disable(acrtc_state->stream);
if (acrtc_state->stream->link->psr_settings.psr_allow_active)
- amdgpu_dm_psr_disable(acrtc_state->stream);
+ amdgpu_dm_psr_disable(acrtc_state->stream, true);
}
mutex_unlock(&dm->dc_lock);
@@ -11155,8 +11126,8 @@ dm_get_plane_scale(struct drm_plane_state *plane_state,
int plane_src_w, plane_src_h;
dm_get_oriented_plane_size(plane_state, &plane_src_w, &plane_src_h);
- *out_plane_scale_w = plane_state->crtc_w * 1000 / plane_src_w;
- *out_plane_scale_h = plane_state->crtc_h * 1000 / plane_src_h;
+ *out_plane_scale_w = plane_src_w ? plane_state->crtc_w * 1000 / plane_src_w : 0;
+ *out_plane_scale_h = plane_src_h ? plane_state->crtc_h * 1000 / plane_src_h : 0;
}
/*
@@ -11410,6 +11381,25 @@ static int dm_crtc_get_cursor_mode(struct amdgpu_device *adev,
return 0;
}
+static bool amdgpu_dm_crtc_mem_type_changed(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ struct drm_crtc_state *crtc_state)
+{
+ struct drm_plane *plane;
+ struct drm_plane_state *new_plane_state, *old_plane_state;
+
+ drm_for_each_plane_mask(plane, dev, crtc_state->plane_mask) {
+ new_plane_state = drm_atomic_get_plane_state(state, plane);
+ old_plane_state = drm_atomic_get_plane_state(state, plane);
+
+ if (old_plane_state->fb && new_plane_state->fb &&
+ get_mem_type(old_plane_state->fb) != get_mem_type(new_plane_state->fb))
+ return true;
+ }
+
+ return false;
+}
+
/**
* amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
*
@@ -11607,10 +11597,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
/* Remove exiting planes if they are modified */
for_each_oldnew_plane_in_descending_zpos(state, plane, old_plane_state, new_plane_state) {
- if (old_plane_state->fb && new_plane_state->fb &&
- get_mem_type(old_plane_state->fb) !=
- get_mem_type(new_plane_state->fb))
- lock_and_validation_needed = true;
ret = dm_update_plane_state(dc, state, plane,
old_plane_state,
@@ -11905,9 +11891,11 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
/*
* Only allow async flips for fast updates that don't change
- * the FB pitch, the DCC state, rotation, etc.
+ * the FB pitch, the DCC state, rotation, mem_type, etc.
*/
- if (new_crtc_state->async_flip && lock_and_validation_needed) {
+ if (new_crtc_state->async_flip &&
+ (lock_and_validation_needed ||
+ amdgpu_dm_crtc_mem_type_changed(dev, state, new_crtc_state))) {
drm_dbg_atomic(crtc->dev,
"[CRTC:%d:%s] async flips are only supported for fast updates\n",
crtc->base.id, crtc->name);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 6464a8378387..2227cd8e4a89 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -697,6 +697,8 @@ struct amdgpu_dm_connector {
struct drm_dp_mst_port *mst_output_port;
struct amdgpu_dm_connector *mst_root;
struct drm_dp_aux *dsc_aux;
+ uint32_t mst_local_bw;
+ uint16_t vc_full_pbn;
struct mutex handle_mst_msg_ready;
/* TODO see if we can merge with ddc_bus or make a dm_connector */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
index f936a35fa9eb..0f6ba7b1575d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -30,6 +30,7 @@
#include "amdgpu_dm.h"
#include "dc.h"
#include "amdgpu_securedisplay.h"
+#include "amdgpu_dm_psr.h"
static const char *const pipe_crc_sources[] = {
"none",
@@ -224,6 +225,10 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
mutex_lock(&adev->dm.dc_lock);
+ /* For PSR1, check that the panel has exited PSR */
+ if (stream_state->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1)
+ amdgpu_dm_psr_wait_disable(stream_state);
+
/* Enable or disable CRTC CRC generation */
if (dm_is_crc_source_crtc(source) || source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE) {
if (!dc_stream_configure_crc(stream_state->ctx->dc,
@@ -357,6 +362,17 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
}
+ /*
+ * Reading the CRC requires the vblank interrupt handler to be
+ * enabled. Keep a reference until CRC capture stops.
+ */
+ enabled = amdgpu_dm_is_valid_crc_source(cur_crc_src);
+ if (!enabled && enable) {
+ ret = drm_crtc_vblank_get(crtc);
+ if (ret)
+ goto cleanup;
+ }
+
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
/* Reset secure_display when we change crc source from debugfs */
amdgpu_dm_set_crc_window_default(crtc, crtc_state->stream);
@@ -367,16 +383,7 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
goto cleanup;
}
- /*
- * Reading the CRC requires the vblank interrupt handler to be
- * enabled. Keep a reference until CRC capture stops.
- */
- enabled = amdgpu_dm_is_valid_crc_source(cur_crc_src);
if (!enabled && enable) {
- ret = drm_crtc_vblank_get(crtc);
- if (ret)
- goto cleanup;
-
if (dm_is_crc_source_dprx(source)) {
if (drm_dp_start_crc(aux, crtc)) {
DRM_DEBUG_DRIVER("dp start crc failed\n");
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index 64a041c2af05..36a830a7440f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -93,7 +93,7 @@ int amdgpu_dm_crtc_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
return rc;
}
-bool amdgpu_dm_crtc_vrr_active(struct dm_crtc_state *dm_state)
+bool amdgpu_dm_crtc_vrr_active(const struct dm_crtc_state *dm_state)
{
return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
@@ -142,7 +142,7 @@ static void amdgpu_dm_crtc_set_panel_sr_feature(
amdgpu_dm_replay_enable(vblank_work->stream, true);
} else if (vblank_enabled) {
if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 && is_sr_active)
- amdgpu_dm_psr_disable(vblank_work->stream);
+ amdgpu_dm_psr_disable(vblank_work->stream, false);
} else if (link->psr_settings.psr_feature_enabled &&
allow_sr_entry && !is_sr_active && !is_crc_window_active) {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h
index 17e948753f59..c1212947a77b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h
@@ -37,7 +37,7 @@ int amdgpu_dm_crtc_set_vupdate_irq(struct drm_crtc *crtc, bool enable);
bool amdgpu_dm_crtc_vrr_active_irq(struct amdgpu_crtc *acrtc);
-bool amdgpu_dm_crtc_vrr_active(struct dm_crtc_state *dm_state);
+bool amdgpu_dm_crtc_vrr_active(const struct dm_crtc_state *dm_state);
int amdgpu_dm_crtc_enable_vblank(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 6a97bb2d9160..8b7c1a428f64 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -3606,7 +3606,7 @@ static int crc_win_update_set(void *data, u64 val)
/* PSR may write to OTG CRC window control register,
* so close it before starting secure_display.
*/
- amdgpu_dm_psr_disable(acrtc->dm_irq_params.stream);
+ amdgpu_dm_psr_disable(acrtc->dm_irq_params.stream, true);
spin_lock_irq(&adev_to_drm(adev)->event_lock);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 6e4359490613..1080075ccb17 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -155,6 +155,17 @@ amdgpu_dm_mst_connector_late_register(struct drm_connector *connector)
return 0;
}
+
+static inline void
+amdgpu_dm_mst_reset_mst_connector_setting(struct amdgpu_dm_connector *aconnector)
+{
+ aconnector->drm_edid = NULL;
+ aconnector->dsc_aux = NULL;
+ aconnector->mst_output_port->passthrough_aux = NULL;
+ aconnector->mst_local_bw = 0;
+ aconnector->vc_full_pbn = 0;
+}
+
static void
amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)
{
@@ -182,9 +193,7 @@ amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)
dc_sink_release(dc_sink);
aconnector->dc_sink = NULL;
- aconnector->drm_edid = NULL;
- aconnector->dsc_aux = NULL;
- port->passthrough_aux = NULL;
+ amdgpu_dm_mst_reset_mst_connector_setting(aconnector);
}
aconnector->mst_status = MST_STATUS_DEFAULT;
@@ -504,9 +513,7 @@ dm_dp_mst_detect(struct drm_connector *connector,
dc_sink_release(aconnector->dc_sink);
aconnector->dc_sink = NULL;
- aconnector->drm_edid = NULL;
- aconnector->dsc_aux = NULL;
- port->passthrough_aux = NULL;
+ amdgpu_dm_mst_reset_mst_connector_setting(aconnector);
amdgpu_dm_set_mst_status(&aconnector->mst_status,
MST_REMOTE_EDID | MST_ALLOCATE_NEW_PAYLOAD | MST_CLEAR_ALLOCATED_PAYLOAD,
@@ -1819,9 +1826,18 @@ enum dc_status dm_dp_mst_is_port_support_mode(
struct drm_dp_mst_port *immediate_upstream_port = NULL;
uint32_t end_link_bw = 0;
- /*Get last DP link BW capability*/
- if (dp_get_link_current_set_bw(&aconnector->mst_output_port->aux, &end_link_bw)) {
- if (stream_kbps > end_link_bw) {
+ /*Get last DP link BW capability. Mode shall be supported by Legacy peer*/
+ if (aconnector->mst_output_port->pdt != DP_PEER_DEVICE_DP_LEGACY_CONV &&
+ aconnector->mst_output_port->pdt != DP_PEER_DEVICE_NONE) {
+ if (aconnector->vc_full_pbn != aconnector->mst_output_port->full_pbn) {
+ dp_get_link_current_set_bw(&aconnector->mst_output_port->aux, &end_link_bw);
+ aconnector->vc_full_pbn = aconnector->mst_output_port->full_pbn;
+ aconnector->mst_local_bw = end_link_bw;
+ } else {
+ end_link_bw = aconnector->mst_local_bw;
+ }
+
+ if (end_link_bw > 0 && stream_kbps > end_link_bw) {
DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link."
"Mode required bw can't fit into last link\n");
return DC_FAIL_BANDWIDTH_VALIDATE;
@@ -1835,11 +1851,15 @@ enum dc_status dm_dp_mst_is_port_support_mode(
if (immediate_upstream_port) {
virtual_channel_bw_in_kbps = kbps_from_pbn(immediate_upstream_port->full_pbn);
virtual_channel_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps);
- if (bw_range.min_kbps > virtual_channel_bw_in_kbps) {
- DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link."
- "Max dsc compression can't fit into MST available bw\n");
- return DC_FAIL_BANDWIDTH_VALIDATE;
- }
+ } else {
+ /* For topology LCT 1 case - only one mstb*/
+ virtual_channel_bw_in_kbps = root_link_bw_in_kbps;
+ }
+
+ if (bw_range.min_kbps > virtual_channel_bw_in_kbps) {
+ DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link."
+ "Max dsc compression can't fit into MST available bw\n");
+ return DC_FAIL_BANDWIDTH_VALIDATE;
}
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index f40240aafe98..45858bf1523d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -201,14 +201,13 @@ void amdgpu_dm_psr_enable(struct dc_stream_state *stream)
*
* Return: true if success
*/
-bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
+bool amdgpu_dm_psr_disable(struct dc_stream_state *stream, bool wait)
{
- unsigned int power_opt = 0;
bool psr_enable = false;
DRM_DEBUG_DRIVER("Disabling psr...\n");
- return dc_link_set_psr_allow_active(stream->link, &psr_enable, true, false, &power_opt);
+ return dc_link_set_psr_allow_active(stream->link, &psr_enable, wait, false, NULL);
}
/*
@@ -251,3 +250,33 @@ bool amdgpu_dm_psr_is_active_allowed(struct amdgpu_display_manager *dm)
return allow_active;
}
+
+/**
+ * amdgpu_dm_psr_wait_disable() - Wait for eDP panel to exit PSR
+ * @stream: stream state attached to the eDP link
+ *
+ * Waits for a max of 500ms for the eDP panel to exit PSR.
+ *
+ * Return: true if panel exited PSR, false otherwise.
+ */
+bool amdgpu_dm_psr_wait_disable(struct dc_stream_state *stream)
+{
+ enum dc_psr_state psr_state = PSR_STATE0;
+ struct dc_link *link = stream->link;
+ int retry_count;
+
+ if (link == NULL)
+ return false;
+
+ for (retry_count = 0; retry_count <= 1000; retry_count++) {
+ dc_link_get_psr_state(link, &psr_state);
+ if (psr_state == PSR_STATE0)
+ break;
+ udelay(500);
+ }
+
+ if (retry_count == 1000)
+ return false;
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
index cd2d45c2b5ef..e2366321a3c1 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
@@ -34,8 +34,9 @@
void amdgpu_dm_set_psr_caps(struct dc_link *link);
void amdgpu_dm_psr_enable(struct dc_stream_state *stream);
bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
-bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
+bool amdgpu_dm_psr_disable(struct dc_stream_state *stream, bool wait);
bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
bool amdgpu_dm_psr_is_active_allowed(struct amdgpu_display_manager *dm);
+bool amdgpu_dm_psr_wait_disable(struct dc_stream_state *stream);
#endif /* AMDGPU_DM_AMDGPU_DM_PSR_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 49fe7dcf9372..2723558049d6 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -4510,7 +4510,7 @@ static bool commit_minimal_transition_based_on_current_context(struct dc *dc,
struct pipe_split_policy_backup policy;
struct dc_state *intermediate_context;
struct dc_state *old_current_state = dc->current_state;
- struct dc_surface_update srf_updates[MAX_SURFACE_NUM] = {0};
+ struct dc_surface_update srf_updates[MAX_SURFACES] = {0};
int surface_count;
/*
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
index e006f816ff2f..1b2cce127981 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
@@ -483,9 +483,9 @@ bool dc_state_add_plane(
if (stream_status == NULL) {
dm_error("Existing stream not found; failed to attach surface!\n");
goto out;
- } else if (stream_status->plane_count == MAX_SURFACE_NUM) {
+ } else if (stream_status->plane_count == MAX_SURFACES) {
dm_error("Surface: can not attach plane_state %p! Maximum is: %d\n",
- plane_state, MAX_SURFACE_NUM);
+ plane_state, MAX_SURFACES);
goto out;
} else if (!otg_master_pipe) {
goto out;
@@ -600,7 +600,7 @@ bool dc_state_rem_all_planes_for_stream(
{
int i, old_plane_count;
struct dc_stream_status *stream_status = NULL;
- struct dc_plane_state *del_planes[MAX_SURFACE_NUM] = { 0 };
+ struct dc_plane_state *del_planes[MAX_SURFACES] = { 0 };
for (i = 0; i < state->stream_count; i++)
if (state->streams[i] == stream) {
@@ -875,7 +875,7 @@ bool dc_state_rem_all_phantom_planes_for_stream(
{
int i, old_plane_count;
struct dc_stream_status *stream_status = NULL;
- struct dc_plane_state *del_planes[MAX_SURFACE_NUM] = { 0 };
+ struct dc_plane_state *del_planes[MAX_SURFACES] = { 0 };
for (i = 0; i < state->stream_count; i++)
if (state->streams[i] == phantom_stream) {
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index e9b9126c0401..08c5a315b3a6 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -57,7 +57,7 @@ struct dmub_notification;
#define DC_VER "3.2.310"
-#define MAX_SURFACES 3
+#define MAX_SURFACES 4
#define MAX_PLANES 6
#define MAX_STREAMS 6
#define MIN_VIEWPORT_SIZE 12
@@ -1398,7 +1398,7 @@ struct dc_scratch_space {
* store current value in plane states so we can still recover
* a valid current state during dc update.
*/
- struct dc_plane_state plane_states[MAX_SURFACE_NUM];
+ struct dc_plane_state plane_states[MAX_SURFACES];
struct dc_stream_state stream_state;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 413970588a26..860506c6bda4 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -56,7 +56,7 @@ struct dc_stream_status {
int plane_count;
int audio_inst;
struct timing_sync_info timing_sync_info;
- struct dc_plane_state *plane_states[MAX_SURFACE_NUM];
+ struct dc_plane_state *plane_states[MAX_SURFACES];
bool is_abm_supported;
struct mall_stream_config mall_stream_config;
bool fpo_in_use;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index edf4df1d03b5..9466b63644d5 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -76,7 +76,6 @@ struct dc_perf_trace {
unsigned long last_entry_write;
};
-#define MAX_SURFACE_NUM 6
#define NUM_PIXEL_FORMATS 10
enum tiling_mode {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
index bf636b28e3e1..5bb8b78bf250 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
@@ -63,7 +63,8 @@ void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv,
bool should_use_dmub_lock(struct dc_link *link)
{
- if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
+ if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 ||
+ link->psr_settings.psr_version == DC_PSR_VERSION_1)
return true;
if (link->replay_settings.replay_feature_enabled)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
index beed7adbbd43..47d785204f29 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
@@ -195,9 +195,9 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
.dcn_downspread_percent = 0.5,
.gpuvm_min_page_size_bytes = 4096,
.hostvm_min_page_size_bytes = 4096,
- .do_urgent_latency_adjustment = 1,
+ .do_urgent_latency_adjustment = 0,
.urgent_latency_adjustment_fabric_clock_component_us = 0,
- .urgent_latency_adjustment_fabric_clock_reference_mhz = 3000,
+ .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
};
void dcn35_build_wm_range_table_fpu(struct clk_mgr *clk_mgr)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
index 072bd0539605..6b2ab4ec2b5f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
@@ -66,11 +66,15 @@ static inline double dml_max5(double a, double b, double c, double d, double e)
static inline double dml_ceil(double a, double granularity)
{
+ if (granularity == 0)
+ return 0;
return (double) dcn_bw_ceil2(a, granularity);
}
static inline double dml_floor(double a, double granularity)
{
+ if (granularity == 0)
+ return 0;
return (double) dcn_bw_floor2(a, granularity);
}
@@ -114,11 +118,15 @@ static inline double dml_ceil_2(double f)
static inline double dml_ceil_ex(double x, double granularity)
{
+ if (granularity == 0)
+ return 0;
return (double) dcn_bw_ceil2(x, granularity);
}
static inline double dml_floor_ex(double x, double granularity)
{
+ if (granularity == 0)
+ return 0;
return (double) dcn_bw_floor2(x, granularity);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
index 601320b1be81..ecfa3c898e09 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
@@ -579,8 +579,8 @@ static void CalculateBytePerPixelAndBlockSizes(
{
*BytePerPixelDETY = 0;
*BytePerPixelDETC = 0;
- *BytePerPixelY = 0;
- *BytePerPixelC = 0;
+ *BytePerPixelY = 1;
+ *BytePerPixelC = 1;
if (SourcePixelFormat == dml2_444_64) {
*BytePerPixelDETY = 8;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
index 3d29169dd6bb..6b3b8803e0ae 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
@@ -813,7 +813,7 @@ static bool remove_all_phantom_planes_for_stream(struct dml2_context *ctx, struc
{
int i, old_plane_count;
struct dc_stream_status *stream_status = NULL;
- struct dc_plane_state *del_planes[MAX_SURFACE_NUM] = { 0 };
+ struct dc_plane_state *del_planes[MAX_SURFACES] = { 0 };
for (i = 0; i < context->stream_count; i++)
if (context->streams[i] == stream) {
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
index fa11f075d1f9..d9136d252ee1 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
@@ -1398,12 +1398,12 @@ void dcn32_disable_link_output(struct dc_link *link,
link_hwss->disable_link_output(link, link_res, signal);
link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF;
-
- if (signal == SIGNAL_TYPE_EDP &&
- link->dc->hwss.edp_power_control &&
- !link->skip_implict_edp_power_control)
- link->dc->hwss.edp_power_control(link, false);
- else if (dmcu != NULL && dmcu->funcs->unlock_phy)
+ /*
+ * Add the logic to extract BOTH power up and power down sequences
+ * from enable/disable link output and only call edp panel control
+ * in enable_link_dp and disable_link_dp once.
+ */
+ if (dmcu != NULL && dmcu->funcs->unlock_phy)
dmcu->funcs->unlock_phy(dmcu);
dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index ae3563d71fa0..356d9422b411 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -303,5 +303,7 @@ int smu_v13_0_set_wbrf_exclusion_ranges(struct smu_context *smu,
int smu_v13_0_get_boot_freq_by_index(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *value);
+
+void smu_v13_0_interrupt_work(struct smu_context *smu);
#endif
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index 2bfea740dace..2d1e7ebd1bac 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -1320,11 +1320,11 @@ static int smu_v13_0_set_irq_state(struct amdgpu_device *adev,
return 0;
}
-static int smu_v13_0_ack_ac_dc_interrupt(struct smu_context *smu)
+void smu_v13_0_interrupt_work(struct smu_context *smu)
{
- return smu_cmn_send_smc_msg(smu,
- SMU_MSG_ReenableAcDcInterrupt,
- NULL);
+ smu_cmn_send_smc_msg(smu,
+ SMU_MSG_ReenableAcDcInterrupt,
+ NULL);
}
#define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */
@@ -1377,12 +1377,12 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev,
switch (ctxid) {
case SMU_IH_INTERRUPT_CONTEXT_ID_AC:
dev_dbg(adev->dev, "Switched to AC mode!\n");
- smu_v13_0_ack_ac_dc_interrupt(smu);
+ schedule_work(&smu->interrupt_work);
adev->pm.ac_power = true;
break;
case SMU_IH_INTERRUPT_CONTEXT_ID_DC:
dev_dbg(adev->dev, "Switched to DC mode!\n");
- smu_v13_0_ack_ac_dc_interrupt(smu);
+ schedule_work(&smu->interrupt_work);
adev->pm.ac_power = false;
break;
case SMU_IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING:
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 3aa705aae4c0..0551a3311217 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -2643,11 +2643,12 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
&backend_workload_mask);
/* Add optimizations for SMU13.0.0/10. Reuse the power saving profile */
- if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) &&
- ((smu->adev->pm.fw_version == 0x004e6601) ||
- (smu->adev->pm.fw_version >= 0x004e7300))) ||
- (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) &&
- smu->adev->pm.fw_version >= 0x00504500)) {
+ if ((workload_mask & (1 << PP_SMC_POWER_PROFILE_COMPUTE)) &&
+ ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) &&
+ ((smu->adev->pm.fw_version == 0x004e6601) ||
+ (smu->adev->pm.fw_version >= 0x004e7300))) ||
+ (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) &&
+ smu->adev->pm.fw_version >= 0x00504500))) {
workload_type = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_WORKLOAD,
PP_SMC_POWER_PROFILE_POWERSAVING);
@@ -3219,6 +3220,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.is_asic_wbrf_supported = smu_v13_0_0_wbrf_support_check,
.enable_uclk_shadow = smu_v13_0_enable_uclk_shadow,
.set_wbrf_exclusion_ranges = smu_v13_0_set_wbrf_exclusion_ranges,
+ .interrupt_work = smu_v13_0_interrupt_work,
};
void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index aabb94796005..55ef18517b0f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -2797,6 +2797,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.is_asic_wbrf_supported = smu_v13_0_7_wbrf_support_check,
.enable_uclk_shadow = smu_v13_0_enable_uclk_shadow,
.set_wbrf_exclusion_ranges = smu_v13_0_set_wbrf_exclusion_ranges,
+ .interrupt_work = smu_v13_0_interrupt_work,
};
void smu_v13_0_7_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
index 61f4a38e7d2b..8f786592143b 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
@@ -153,7 +153,16 @@ static int adv7511_hdmi_hw_params(struct device *dev, void *data,
ADV7511_AUDIO_CFG3_LEN_MASK, len);
regmap_update_bits(adv7511->regmap, ADV7511_REG_I2C_FREQ_ID_CFG,
ADV7511_I2C_FREQ_ID_CFG_RATE_MASK, rate << 4);
- regmap_write(adv7511->regmap, 0x73, 0x1);
+
+ /* send current Audio infoframe values while updating */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
+ BIT(5), BIT(5));
+
+ regmap_write(adv7511->regmap, ADV7511_REG_AUDIO_INFOFRAME(0), 0x1);
+
+ /* use Audio infoframe updated info */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
+ BIT(5), 0);
return 0;
}
@@ -184,8 +193,9 @@ static int audio_startup(struct device *dev, void *data)
regmap_update_bits(adv7511->regmap, ADV7511_REG_GC(0),
BIT(7) | BIT(6), BIT(7));
/* use Audio infoframe updated info */
- regmap_update_bits(adv7511->regmap, ADV7511_REG_GC(1),
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
BIT(5), 0);
+
/* enable SPDIF receiver */
if (adv7511->audio_source == ADV7511_AUDIO_SOURCE_SPDIF)
regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG,
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index eb5919b38263..a13b3d8ab6ac 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -1241,8 +1241,10 @@ static int adv7511_probe(struct i2c_client *i2c)
return ret;
ret = adv7511_init_regulators(adv7511);
- if (ret)
- return dev_err_probe(dev, ret, "failed to init regulators\n");
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to init regulators\n");
+ goto err_of_node_put;
+ }
/*
* The power down GPIO is optional. If present, toggle it from active to
@@ -1363,6 +1365,8 @@ err_i2c_unregister_edid:
i2c_unregister_device(adv7511->i2c_edid);
uninit_regulators:
adv7511_uninit_regulators(adv7511);
+err_of_node_put:
+ of_node_put(adv7511->host_node);
return ret;
}
@@ -1371,6 +1375,8 @@ static void adv7511_remove(struct i2c_client *i2c)
{
struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
+ of_node_put(adv7511->host_node);
+
adv7511_uninit_regulators(adv7511);
drm_bridge_remove(&adv7511->bridge);
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7533.c b/drivers/gpu/drm/bridge/adv7511/adv7533.c
index 4481489aaf5e..122ad91e8a32 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7533.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7533.c
@@ -172,7 +172,7 @@ int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv)
of_property_read_u32(np, "adi,dsi-lanes", &num_lanes);
- if (num_lanes < 1 || num_lanes > 4)
+ if (num_lanes < 2 || num_lanes > 4)
return -EINVAL;
adv->num_dsi_lanes = num_lanes;
@@ -181,8 +181,6 @@ int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv)
if (!adv->host_node)
return -ENODEV;
- of_node_put(adv->host_node);
-
adv->use_timing_gen = !of_property_read_bool(np,
"adi,disable-timing-generator");
diff --git a/drivers/gpu/drm/bridge/ite-it6263.c b/drivers/gpu/drm/bridge/ite-it6263.c
index cbabd4e20d3e..5f138a5692c7 100644
--- a/drivers/gpu/drm/bridge/ite-it6263.c
+++ b/drivers/gpu/drm/bridge/ite-it6263.c
@@ -845,8 +845,8 @@ static int it6263_probe(struct i2c_client *client)
it->lvds_i2c = devm_i2c_new_dummy_device(dev, client->adapter,
LVDS_INPUT_CTRL_I2C_ADDR);
if (IS_ERR(it->lvds_i2c))
- dev_err_probe(it->dev, PTR_ERR(it->lvds_i2c),
- "failed to allocate I2C device for LVDS\n");
+ return dev_err_probe(it->dev, PTR_ERR(it->lvds_i2c),
+ "failed to allocate I2C device for LVDS\n");
it->lvds_regmap = devm_regmap_init_i2c(it->lvds_i2c,
&it6263_lvds_regmap_config);
diff --git a/drivers/gpu/drm/display/drm_bridge_connector.c b/drivers/gpu/drm/display/drm_bridge_connector.c
index 320c297008aa..3a5a4f92c979 100644
--- a/drivers/gpu/drm/display/drm_bridge_connector.c
+++ b/drivers/gpu/drm/display/drm_bridge_connector.c
@@ -459,7 +459,10 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
if (connector_type == DRM_MODE_CONNECTOR_Unknown)
return ERR_PTR(-EINVAL);
- if (bridge_connector->bridge_hdmi)
+ if (bridge_connector->bridge_hdmi) {
+ if (!connector->ycbcr_420_allowed)
+ supported_formats &= ~BIT(HDMI_COLORSPACE_YUV420);
+
ret = drmm_connector_hdmi_init(drm, connector,
bridge_connector->bridge_hdmi->vendor,
bridge_connector->bridge_hdmi->product,
@@ -468,10 +471,11 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
connector_type, ddc,
supported_formats,
max_bpc);
- else
+ } else {
ret = drmm_connector_init(drm, connector,
&drm_bridge_connector_funcs,
connector_type, ddc);
+ }
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index c6af46dd02bf..241a384ebce3 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -207,6 +207,10 @@ void drm_bridge_add(struct drm_bridge *bridge)
{
mutex_init(&bridge->hpd_mutex);
+ if (bridge->ops & DRM_BRIDGE_OP_HDMI)
+ bridge->ycbcr_420_allowed = !!(bridge->supported_formats &
+ BIT(HDMI_COLORSPACE_YUV420));
+
mutex_lock(&bridge_lock);
list_add_tail(&bridge->list, &bridge_list);
mutex_unlock(&bridge_lock);
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index fc35f47e2849..ca7f43c8d6f1 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -507,6 +507,9 @@ int drmm_connector_hdmi_init(struct drm_device *dev,
if (!supported_formats || !(supported_formats & BIT(HDMI_COLORSPACE_RGB)))
return -EINVAL;
+ if (connector->ycbcr_420_allowed != !!(supported_formats & BIT(HDMI_COLORSPACE_YUV420)))
+ return -EINVAL;
+
if (!(max_bpc == 8 || max_bpc == 10 || max_bpc == 12))
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
index 71dc659228ab..0c7aee13495a 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
@@ -2115,14 +2115,6 @@ static void intel_c10_pll_program(struct intel_display *display,
0, C10_VDR_CTRL_MSGBUS_ACCESS,
MB_WRITE_COMMITTED);
- /* Custom width needs to be programmed to 0 for both the phy lanes */
- intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CUSTOM_WIDTH,
- C10_VDR_CUSTOM_WIDTH_MASK, C10_VDR_CUSTOM_WIDTH_8_10,
- MB_WRITE_COMMITTED);
- intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1),
- 0, C10_VDR_CTRL_UPDATE_CFG,
- MB_WRITE_COMMITTED);
-
/* Program the pll values only for the master lane */
for (i = 0; i < ARRAY_SIZE(pll_state->pll); i++)
intel_cx0_write(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_PLL(i),
@@ -2132,6 +2124,10 @@ static void intel_c10_pll_program(struct intel_display *display,
intel_cx0_write(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_CMN(0), pll_state->cmn, MB_WRITE_COMMITTED);
intel_cx0_write(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_TX(0), pll_state->tx, MB_WRITE_COMMITTED);
+ /* Custom width needs to be programmed to 0 for both the phy lanes */
+ intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CUSTOM_WIDTH,
+ C10_VDR_CUSTOM_WIDTH_MASK, C10_VDR_CUSTOM_WIDTH_8_10,
+ MB_WRITE_COMMITTED);
intel_cx0_rmw(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_CONTROL(1),
0, C10_VDR_CTRL_MASTER_LANE | C10_VDR_CTRL_UPDATE_CFG,
MB_WRITE_COMMITTED);
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index 6a7060889f40..223c4218c019 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -1694,7 +1694,7 @@ int intel_fill_fb_info(struct drm_i915_private *i915, struct intel_framebuffer *
* arithmetic related to alignment and offset calculation.
*/
if (is_gen12_ccs_cc_plane(&fb->base, i)) {
- if (IS_ALIGNED(fb->base.offsets[i], PAGE_SIZE))
+ if (IS_ALIGNED(fb->base.offsets[i], 64))
continue;
else
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index f57e4dba2873..8fee26d791f4 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -1158,9 +1158,15 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
goto out;
}
- intel_hdcp_update_value(connector,
- DRM_MODE_CONTENT_PROTECTION_DESIRED,
- true);
+ ret = intel_hdcp1_enable(connector);
+ if (ret) {
+ drm_err(display->drm, "Failed to enable hdcp (%d)\n", ret);
+ intel_hdcp_update_value(connector,
+ DRM_MODE_CONTENT_PROTECTION_DESIRED,
+ true);
+ goto out;
+ }
+
out:
mutex_unlock(&dig_port->hdcp_mutex);
mutex_unlock(&hdcp->mutex);
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index c864d101faf9..9378d5901c49 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -133,7 +133,7 @@ static void gen11_rc6_enable(struct intel_rc6 *rc6)
GEN9_MEDIA_PG_ENABLE |
GEN11_MEDIA_SAMPLER_PG_ENABLE;
- if (GRAPHICS_VER(gt->i915) >= 12) {
+ if (GRAPHICS_VER(gt->i915) >= 12 && !IS_DG1(gt->i915)) {
for (i = 0; i < I915_MAX_VCS; i++)
if (HAS_ENGINE(gt, _VCS(i)))
pg_enable |= (VDN_HCP_POWERGATE_ENABLE(i) |
diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig
index f496e6cfdfe0..e47debd60619 100644
--- a/drivers/gpu/drm/mediatek/Kconfig
+++ b/drivers/gpu/drm/mediatek/Kconfig
@@ -14,9 +14,6 @@ config DRM_MEDIATEK
select DRM_BRIDGE_CONNECTOR
select DRM_MIPI_DSI
select DRM_PANEL
- select MEMORY
- select MTK_SMI
- select PHY_MTK_MIPI_DSI
select VIDEOMODE_HELPERS
help
Choose this option if you have a Mediatek SoCs.
@@ -27,7 +24,6 @@ config DRM_MEDIATEK
config DRM_MEDIATEK_DP
tristate "DRM DPTX Support for MediaTek SoCs"
depends on DRM_MEDIATEK
- select PHY_MTK_DP
select DRM_DISPLAY_HELPER
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_DP_AUX_BUS
@@ -38,6 +34,5 @@ config DRM_MEDIATEK_HDMI
tristate "DRM HDMI Support for Mediatek SoCs"
depends on DRM_MEDIATEK
select SND_SOC_HDMI_CODEC if SND_SOC
- select PHY_MTK_HDMI
help
DRM/KMS HDMI driver for Mediatek SoCs
diff --git a/drivers/gpu/drm/mediatek/mtk_crtc.c b/drivers/gpu/drm/mediatek/mtk_crtc.c
index eb0e1233ad04..5674f5707cca 100644
--- a/drivers/gpu/drm/mediatek/mtk_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_crtc.c
@@ -112,6 +112,11 @@ static void mtk_drm_finish_page_flip(struct mtk_crtc *mtk_crtc)
drm_crtc_handle_vblank(&mtk_crtc->base);
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ if (mtk_crtc->cmdq_client.chan)
+ return;
+#endif
+
spin_lock_irqsave(&mtk_crtc->config_lock, flags);
if (!mtk_crtc->config_updating && mtk_crtc->pending_needs_vblank) {
mtk_crtc_finish_page_flip(mtk_crtc);
@@ -284,10 +289,8 @@ static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg)
state = to_mtk_crtc_state(mtk_crtc->base.state);
spin_lock_irqsave(&mtk_crtc->config_lock, flags);
- if (mtk_crtc->config_updating) {
- spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
+ if (mtk_crtc->config_updating)
goto ddp_cmdq_cb_out;
- }
state->pending_config = false;
@@ -315,10 +318,15 @@ static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg)
mtk_crtc->pending_async_planes = false;
}
- spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
-
ddp_cmdq_cb_out:
+ if (mtk_crtc->pending_needs_vblank) {
+ mtk_crtc_finish_page_flip(mtk_crtc);
+ mtk_crtc->pending_needs_vblank = false;
+ }
+
+ spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
+
mtk_crtc->cmdq_vblank_cnt = 0;
wake_up(&mtk_crtc->cb_blocking_queue);
}
@@ -606,13 +614,18 @@ static void mtk_crtc_update_config(struct mtk_crtc *mtk_crtc, bool needs_vblank)
*/
mtk_crtc->cmdq_vblank_cnt = 3;
+ spin_lock_irqsave(&mtk_crtc->config_lock, flags);
+ mtk_crtc->config_updating = false;
+ spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
+
mbox_send_message(mtk_crtc->cmdq_client.chan, cmdq_handle);
mbox_client_txdone(mtk_crtc->cmdq_client.chan, 0);
}
-#endif
+#else
spin_lock_irqsave(&mtk_crtc->config_lock, flags);
mtk_crtc->config_updating = false;
spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
+#endif
mutex_unlock(&mtk_crtc->hw_lock);
}
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index f731d4fbe8b6..df82cea4bb79 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -460,6 +460,29 @@ static unsigned int mtk_ovl_fmt_convert(struct mtk_disp_ovl *ovl,
}
}
+static void mtk_ovl_afbc_layer_config(struct mtk_disp_ovl *ovl,
+ unsigned int idx,
+ struct mtk_plane_pending_state *pending,
+ struct cmdq_pkt *cmdq_pkt)
+{
+ unsigned int pitch_msb = pending->pitch >> 16;
+ unsigned int hdr_pitch = pending->hdr_pitch;
+ unsigned int hdr_addr = pending->hdr_addr;
+
+ if (pending->modifier != DRM_FORMAT_MOD_LINEAR) {
+ mtk_ddp_write_relaxed(cmdq_pkt, hdr_addr, &ovl->cmdq_reg, ovl->regs,
+ DISP_REG_OVL_HDR_ADDR(ovl, idx));
+ mtk_ddp_write_relaxed(cmdq_pkt,
+ OVL_PITCH_MSB_2ND_SUBBUF | pitch_msb,
+ &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_PITCH_MSB(idx));
+ mtk_ddp_write_relaxed(cmdq_pkt, hdr_pitch, &ovl->cmdq_reg, ovl->regs,
+ DISP_REG_OVL_HDR_PITCH(ovl, idx));
+ } else {
+ mtk_ddp_write_relaxed(cmdq_pkt, pitch_msb,
+ &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_PITCH_MSB(idx));
+ }
+}
+
void mtk_ovl_layer_config(struct device *dev, unsigned int idx,
struct mtk_plane_state *state,
struct cmdq_pkt *cmdq_pkt)
@@ -467,25 +490,14 @@ void mtk_ovl_layer_config(struct device *dev, unsigned int idx,
struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
struct mtk_plane_pending_state *pending = &state->pending;
unsigned int addr = pending->addr;
- unsigned int hdr_addr = pending->hdr_addr;
- unsigned int pitch = pending->pitch;
- unsigned int hdr_pitch = pending->hdr_pitch;
+ unsigned int pitch_lsb = pending->pitch & GENMASK(15, 0);
unsigned int fmt = pending->format;
+ unsigned int rotation = pending->rotation;
unsigned int offset = (pending->y << 16) | pending->x;
unsigned int src_size = (pending->height << 16) | pending->width;
unsigned int blend_mode = state->base.pixel_blend_mode;
unsigned int ignore_pixel_alpha = 0;
unsigned int con;
- bool is_afbc = pending->modifier != DRM_FORMAT_MOD_LINEAR;
- union overlay_pitch {
- struct split_pitch {
- u16 lsb;
- u16 msb;
- } split_pitch;
- u32 pitch;
- } overlay_pitch;
-
- overlay_pitch.pitch = pitch;
if (!pending->enable) {
mtk_ovl_layer_off(dev, idx, cmdq_pkt);
@@ -513,22 +525,30 @@ void mtk_ovl_layer_config(struct device *dev, unsigned int idx,
ignore_pixel_alpha = OVL_CONST_BLEND;
}
- if (pending->rotation & DRM_MODE_REFLECT_Y) {
+ /*
+ * Treat rotate 180 as flip x + flip y, and XOR the original rotation value
+ * to flip x + flip y to support both in the same time.
+ */
+ if (rotation & DRM_MODE_ROTATE_180)
+ rotation ^= DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y;
+
+ if (rotation & DRM_MODE_REFLECT_Y) {
con |= OVL_CON_VIRT_FLIP;
addr += (pending->height - 1) * pending->pitch;
}
- if (pending->rotation & DRM_MODE_REFLECT_X) {
+ if (rotation & DRM_MODE_REFLECT_X) {
con |= OVL_CON_HORZ_FLIP;
addr += pending->pitch - 1;
}
if (ovl->data->supports_afbc)
- mtk_ovl_set_afbc(ovl, cmdq_pkt, idx, is_afbc);
+ mtk_ovl_set_afbc(ovl, cmdq_pkt, idx,
+ pending->modifier != DRM_FORMAT_MOD_LINEAR);
mtk_ddp_write_relaxed(cmdq_pkt, con, &ovl->cmdq_reg, ovl->regs,
DISP_REG_OVL_CON(idx));
- mtk_ddp_write_relaxed(cmdq_pkt, overlay_pitch.split_pitch.lsb | ignore_pixel_alpha,
+ mtk_ddp_write_relaxed(cmdq_pkt, pitch_lsb | ignore_pixel_alpha,
&ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_PITCH(idx));
mtk_ddp_write_relaxed(cmdq_pkt, src_size, &ovl->cmdq_reg, ovl->regs,
DISP_REG_OVL_SRC_SIZE(idx));
@@ -537,19 +557,8 @@ void mtk_ovl_layer_config(struct device *dev, unsigned int idx,
mtk_ddp_write_relaxed(cmdq_pkt, addr, &ovl->cmdq_reg, ovl->regs,
DISP_REG_OVL_ADDR(ovl, idx));
- if (is_afbc) {
- mtk_ddp_write_relaxed(cmdq_pkt, hdr_addr, &ovl->cmdq_reg, ovl->regs,
- DISP_REG_OVL_HDR_ADDR(ovl, idx));
- mtk_ddp_write_relaxed(cmdq_pkt,
- OVL_PITCH_MSB_2ND_SUBBUF | overlay_pitch.split_pitch.msb,
- &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_PITCH_MSB(idx));
- mtk_ddp_write_relaxed(cmdq_pkt, hdr_pitch, &ovl->cmdq_reg, ovl->regs,
- DISP_REG_OVL_HDR_PITCH(ovl, idx));
- } else {
- mtk_ddp_write_relaxed(cmdq_pkt,
- overlay_pitch.split_pitch.msb,
- &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_PITCH_MSB(idx));
- }
+ if (ovl->data->supports_afbc)
+ mtk_ovl_afbc_layer_config(ovl, idx, pending, cmdq_pkt);
mtk_ovl_set_bit_depth(dev, idx, fmt, cmdq_pkt);
mtk_ovl_layer_on(dev, idx, cmdq_pkt);
diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c
index 36713c176cfc..3b0993abfdc3 100644
--- a/drivers/gpu/drm/mediatek/mtk_dp.c
+++ b/drivers/gpu/drm/mediatek/mtk_dp.c
@@ -543,18 +543,16 @@ static int mtk_dp_set_color_format(struct mtk_dp *mtk_dp,
enum dp_pixelformat color_format)
{
u32 val;
-
- /* update MISC0 */
- mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3034,
- color_format << DP_TEST_COLOR_FORMAT_SHIFT,
- DP_TEST_COLOR_FORMAT_MASK);
+ u32 misc0_color;
switch (color_format) {
case DP_PIXELFORMAT_YUV422:
val = PIXEL_ENCODE_FORMAT_DP_ENC0_P0_YCBCR422;
+ misc0_color = DP_COLOR_FORMAT_YCbCr422;
break;
case DP_PIXELFORMAT_RGB:
val = PIXEL_ENCODE_FORMAT_DP_ENC0_P0_RGB;
+ misc0_color = DP_COLOR_FORMAT_RGB;
break;
default:
drm_warn(mtk_dp->drm_dev, "Unsupported color format: %d\n",
@@ -562,6 +560,11 @@ static int mtk_dp_set_color_format(struct mtk_dp *mtk_dp,
return -EINVAL;
}
+ /* update MISC0 */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3034,
+ misc0_color,
+ DP_TEST_COLOR_FORMAT_MASK);
+
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_303C,
val, PIXEL_ENCODE_FORMAT_DP_ENC0_P0_MASK);
return 0;
@@ -2100,7 +2103,6 @@ static enum drm_connector_status mtk_dp_bdg_detect(struct drm_bridge *bridge)
struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
enum drm_connector_status ret = connector_status_disconnected;
bool enabled = mtk_dp->enabled;
- u8 sink_count = 0;
if (!mtk_dp->train_info.cable_plugged_in)
return ret;
@@ -2115,8 +2117,8 @@ static enum drm_connector_status mtk_dp_bdg_detect(struct drm_bridge *bridge)
* function, we just need to check the HPD connection to check
* whether we connect to a sink device.
*/
- drm_dp_dpcd_readb(&mtk_dp->aux, DP_SINK_COUNT, &sink_count);
- if (DP_GET_SINK_COUNT(sink_count))
+
+ if (drm_dp_read_sink_count(&mtk_dp->aux) > 0)
ret = connector_status_connected;
if (!enabled)
@@ -2408,12 +2410,19 @@ mtk_dp_bridge_mode_valid(struct drm_bridge *bridge,
{
struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
u32 bpp = info->color_formats & DRM_COLOR_FORMAT_YCBCR422 ? 16 : 24;
- u32 rate = min_t(u32, drm_dp_max_link_rate(mtk_dp->rx_cap) *
- drm_dp_max_lane_count(mtk_dp->rx_cap),
- drm_dp_bw_code_to_link_rate(mtk_dp->max_linkrate) *
- mtk_dp->max_lanes);
+ u32 lane_count_min = mtk_dp->train_info.lane_count;
+ u32 rate = drm_dp_bw_code_to_link_rate(mtk_dp->train_info.link_rate) *
+ lane_count_min;
- if (rate < mode->clock * bpp / 8)
+ /*
+ *FEC overhead is approximately 2.4% from DP 1.4a spec 2.2.1.4.2.
+ *The down-spread amplitude shall either be disabled (0.0%) or up
+ *to 0.5% from 1.4a 3.5.2.6. Add up to approximately 3% total overhead.
+ *
+ *Because rate is already divided by 10,
+ *mode->clock does not need to be multiplied by 10
+ */
+ if ((rate * 97 / 100) < (mode->clock * bpp / 8))
return MODE_CLOCK_HIGH;
return MODE_OK;
@@ -2454,10 +2463,9 @@ static u32 *mtk_dp_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
struct drm_display_mode *mode = &crtc_state->adjusted_mode;
struct drm_display_info *display_info =
&conn_state->connector->display_info;
- u32 rate = min_t(u32, drm_dp_max_link_rate(mtk_dp->rx_cap) *
- drm_dp_max_lane_count(mtk_dp->rx_cap),
- drm_dp_bw_code_to_link_rate(mtk_dp->max_linkrate) *
- mtk_dp->max_lanes);
+ u32 lane_count_min = mtk_dp->train_info.lane_count;
+ u32 rate = drm_dp_bw_code_to_link_rate(mtk_dp->train_info.link_rate) *
+ lane_count_min;
*num_input_fmts = 0;
@@ -2466,8 +2474,8 @@ static u32 *mtk_dp_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
* datarate of YUV422 and sink device supports YUV422, we output YUV422
* format. Use this condition, we can support more resolution.
*/
- if ((rate < (mode->clock * 24 / 8)) &&
- (rate > (mode->clock * 16 / 8)) &&
+ if (((rate * 97 / 100) < (mode->clock * 24 / 8)) &&
+ ((rate * 97 / 100) > (mode->clock * 16 / 8)) &&
(display_info->color_formats & DRM_COLOR_FORMAT_YCBCR422)) {
input_fmts = kcalloc(1, sizeof(*input_fmts), GFP_KERNEL);
if (!input_fmts)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 0829ceb9967c..b037616d3342 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -373,11 +373,12 @@ static bool mtk_drm_get_all_drm_priv(struct device *dev)
struct mtk_drm_private *temp_drm_priv;
struct device_node *phandle = dev->parent->of_node;
const struct of_device_id *of_id;
+ struct device_node *node;
struct device *drm_dev;
unsigned int cnt = 0;
int i, j;
- for_each_child_of_node_scoped(phandle->parent, node) {
+ for_each_child_of_node(phandle->parent, node) {
struct platform_device *pdev;
of_id = of_match_node(mtk_drm_of_ids, node);
@@ -406,8 +407,10 @@ static bool mtk_drm_get_all_drm_priv(struct device *dev)
if (temp_drm_priv->mtk_drm_bound)
cnt++;
- if (cnt == MAX_CRTC)
+ if (cnt == MAX_CRTC) {
+ of_node_put(node);
break;
+ }
}
if (drm_priv->data->mmsys_dev_num == cnt) {
@@ -673,6 +676,8 @@ err_deinit:
err_free:
private->drm = NULL;
drm_dev_put(drm);
+ for (i = 0; i < private->data->mmsys_dev_num; i++)
+ private->all_drm_private[i]->drm = NULL;
return ret;
}
@@ -900,7 +905,7 @@ static int mtk_drm_of_ddp_path_build_one(struct device *dev, enum mtk_crtc_path
const unsigned int **out_path,
unsigned int *out_path_len)
{
- struct device_node *next, *prev, *vdo = dev->parent->of_node;
+ struct device_node *next = NULL, *prev, *vdo = dev->parent->of_node;
unsigned int temp_path[DDP_COMPONENT_DRM_ID_MAX] = { 0 };
unsigned int *final_ddp_path;
unsigned short int idx = 0;
@@ -1089,7 +1094,7 @@ static int mtk_drm_probe(struct platform_device *pdev)
/* No devicetree graphs support: go with hardcoded paths if present */
dev_dbg(dev, "Using hardcoded paths for MMSYS %u\n", mtk_drm_data->mmsys_id);
private->data = mtk_drm_data;
- };
+ }
private->all_drm_private = devm_kmalloc_array(dev, private->data->mmsys_dev_num,
sizeof(*private->all_drm_private),
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index e61b9bc68e9a..40752f232054 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -139,11 +139,11 @@
#define CLK_HS_POST GENMASK(15, 8)
#define CLK_HS_EXIT GENMASK(23, 16)
-#define DSI_VM_CMD_CON 0x130
+/* DSI_VM_CMD_CON */
#define VM_CMD_EN BIT(0)
#define TS_VFP_EN BIT(5)
-#define DSI_SHADOW_DEBUG 0x190U
+/* DSI_SHADOW_DEBUG */
#define FORCE_COMMIT BIT(0)
#define BYPASS_SHADOW BIT(1)
@@ -187,6 +187,8 @@ struct phy;
struct mtk_dsi_driver_data {
const u32 reg_cmdq_off;
+ const u32 reg_vm_cmd_off;
+ const u32 reg_shadow_dbg_off;
bool has_shadow_ctl;
bool has_size_ctl;
bool cmdq_long_packet_ctl;
@@ -246,23 +248,22 @@ static void mtk_dsi_phy_timconfig(struct mtk_dsi *dsi)
u32 data_rate_mhz = DIV_ROUND_UP(dsi->data_rate, HZ_PER_MHZ);
struct mtk_phy_timing *timing = &dsi->phy_timing;
- timing->lpx = (80 * data_rate_mhz / (8 * 1000)) + 1;
- timing->da_hs_prepare = (59 * data_rate_mhz + 4 * 1000) / 8000 + 1;
- timing->da_hs_zero = (163 * data_rate_mhz + 11 * 1000) / 8000 + 1 -
+ timing->lpx = (60 * data_rate_mhz / (8 * 1000)) + 1;
+ timing->da_hs_prepare = (80 * data_rate_mhz + 4 * 1000) / 8000;
+ timing->da_hs_zero = (170 * data_rate_mhz + 10 * 1000) / 8000 + 1 -
timing->da_hs_prepare;
- timing->da_hs_trail = (78 * data_rate_mhz + 7 * 1000) / 8000 + 1;
+ timing->da_hs_trail = timing->da_hs_prepare + 1;
- timing->ta_go = 4 * timing->lpx;
- timing->ta_sure = 3 * timing->lpx / 2;
- timing->ta_get = 5 * timing->lpx;
- timing->da_hs_exit = (118 * data_rate_mhz / (8 * 1000)) + 1;
+ timing->ta_go = 4 * timing->lpx - 2;
+ timing->ta_sure = timing->lpx + 2;
+ timing->ta_get = 4 * timing->lpx;
+ timing->da_hs_exit = 2 * timing->lpx + 1;
- timing->clk_hs_prepare = (57 * data_rate_mhz / (8 * 1000)) + 1;
- timing->clk_hs_post = (65 * data_rate_mhz + 53 * 1000) / 8000 + 1;
- timing->clk_hs_trail = (78 * data_rate_mhz + 7 * 1000) / 8000 + 1;
- timing->clk_hs_zero = (330 * data_rate_mhz / (8 * 1000)) + 1 -
- timing->clk_hs_prepare;
- timing->clk_hs_exit = (118 * data_rate_mhz / (8 * 1000)) + 1;
+ timing->clk_hs_prepare = 70 * data_rate_mhz / (8 * 1000);
+ timing->clk_hs_post = timing->clk_hs_prepare + 8;
+ timing->clk_hs_trail = timing->clk_hs_prepare;
+ timing->clk_hs_zero = timing->clk_hs_trail * 4;
+ timing->clk_hs_exit = 2 * timing->clk_hs_trail;
timcon0 = FIELD_PREP(LPX, timing->lpx) |
FIELD_PREP(HS_PREP, timing->da_hs_prepare) |
@@ -367,8 +368,8 @@ static void mtk_dsi_set_mode(struct mtk_dsi *dsi)
static void mtk_dsi_set_vm_cmd(struct mtk_dsi *dsi)
{
- mtk_dsi_mask(dsi, DSI_VM_CMD_CON, VM_CMD_EN, VM_CMD_EN);
- mtk_dsi_mask(dsi, DSI_VM_CMD_CON, TS_VFP_EN, TS_VFP_EN);
+ mtk_dsi_mask(dsi, dsi->driver_data->reg_vm_cmd_off, VM_CMD_EN, VM_CMD_EN);
+ mtk_dsi_mask(dsi, dsi->driver_data->reg_vm_cmd_off, TS_VFP_EN, TS_VFP_EN);
}
static void mtk_dsi_rxtx_control(struct mtk_dsi *dsi)
@@ -714,7 +715,7 @@ static int mtk_dsi_poweron(struct mtk_dsi *dsi)
if (dsi->driver_data->has_shadow_ctl)
writel(FORCE_COMMIT | BYPASS_SHADOW,
- dsi->regs + DSI_SHADOW_DEBUG);
+ dsi->regs + dsi->driver_data->reg_shadow_dbg_off);
mtk_dsi_reset_engine(dsi);
mtk_dsi_phy_timconfig(dsi);
@@ -1263,26 +1264,36 @@ static void mtk_dsi_remove(struct platform_device *pdev)
static const struct mtk_dsi_driver_data mt8173_dsi_driver_data = {
.reg_cmdq_off = 0x200,
+ .reg_vm_cmd_off = 0x130,
+ .reg_shadow_dbg_off = 0x190
};
static const struct mtk_dsi_driver_data mt2701_dsi_driver_data = {
.reg_cmdq_off = 0x180,
+ .reg_vm_cmd_off = 0x130,
+ .reg_shadow_dbg_off = 0x190
};
static const struct mtk_dsi_driver_data mt8183_dsi_driver_data = {
.reg_cmdq_off = 0x200,
+ .reg_vm_cmd_off = 0x130,
+ .reg_shadow_dbg_off = 0x190,
.has_shadow_ctl = true,
.has_size_ctl = true,
};
static const struct mtk_dsi_driver_data mt8186_dsi_driver_data = {
.reg_cmdq_off = 0xd00,
+ .reg_vm_cmd_off = 0x200,
+ .reg_shadow_dbg_off = 0xc00,
.has_shadow_ctl = true,
.has_size_ctl = true,
};
static const struct mtk_dsi_driver_data mt8188_dsi_driver_data = {
.reg_cmdq_off = 0xd00,
+ .reg_vm_cmd_off = 0x200,
+ .reg_shadow_dbg_off = 0xc00,
.has_shadow_ctl = true,
.has_size_ctl = true,
.cmdq_long_packet_ctl = true,
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 8f0c69aad248..21b56cc7605c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -384,7 +384,7 @@ nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector)
if (ret < 0)
return NULL;
- return kmemdup(edid, EDID_LENGTH, GFP_KERNEL);
+ return edid;
}
bool nouveau_acpi_video_backlight_use_native(void)
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 09686d038d60..7cc84472cece 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -387,11 +387,13 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
if (f) {
struct nouveau_channel *prev;
bool must_wait = true;
+ bool local;
rcu_read_lock();
prev = rcu_dereference(f->channel);
- if (prev && (prev == chan ||
- fctx->sync(f, prev, chan) == 0))
+ local = prev && prev->cli->drm == chan->cli->drm;
+ if (local && (prev == chan ||
+ fctx->sync(f, prev, chan) == 0))
must_wait = false;
rcu_read_unlock();
if (!must_wait)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c
index 841e3b69fcaf..5a0c9b8a79f3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c
@@ -31,6 +31,7 @@ mcp77_sor = {
.state = g94_sor_state,
.power = nv50_sor_power,
.clock = nv50_sor_clock,
+ .bl = &nv50_sor_bl,
.hdmi = &g84_sor_hdmi,
.dp = &g94_sor_dp,
};
diff --git a/drivers/gpu/drm/tests/drm_connector_test.c b/drivers/gpu/drm/tests/drm_connector_test.c
index 6bba97d0be88..3d48d676e001 100644
--- a/drivers/gpu/drm/tests/drm_connector_test.c
+++ b/drivers/gpu/drm/tests/drm_connector_test.c
@@ -635,6 +635,64 @@ static void drm_test_connector_hdmi_init_formats_no_rgb(struct kunit *test)
KUNIT_EXPECT_LT(test, ret, 0);
}
+struct drm_connector_hdmi_init_formats_yuv420_allowed_test {
+ unsigned long supported_formats;
+ bool yuv420_allowed;
+ int expected_result;
+};
+
+#define YUV420_ALLOWED_TEST(_formats, _allowed, _result) \
+ { \
+ .supported_formats = BIT(HDMI_COLORSPACE_RGB) | (_formats), \
+ .yuv420_allowed = _allowed, \
+ .expected_result = _result, \
+ }
+
+static const struct drm_connector_hdmi_init_formats_yuv420_allowed_test
+drm_connector_hdmi_init_formats_yuv420_allowed_tests[] = {
+ YUV420_ALLOWED_TEST(BIT(HDMI_COLORSPACE_YUV420), true, 0),
+ YUV420_ALLOWED_TEST(BIT(HDMI_COLORSPACE_YUV420), false, -EINVAL),
+ YUV420_ALLOWED_TEST(BIT(HDMI_COLORSPACE_YUV422), true, -EINVAL),
+ YUV420_ALLOWED_TEST(BIT(HDMI_COLORSPACE_YUV422), false, 0),
+};
+
+static void
+drm_connector_hdmi_init_formats_yuv420_allowed_desc(const struct drm_connector_hdmi_init_formats_yuv420_allowed_test *t,
+ char *desc)
+{
+ sprintf(desc, "supported_formats=0x%lx yuv420_allowed=%d",
+ t->supported_formats, t->yuv420_allowed);
+}
+
+KUNIT_ARRAY_PARAM(drm_connector_hdmi_init_formats_yuv420_allowed,
+ drm_connector_hdmi_init_formats_yuv420_allowed_tests,
+ drm_connector_hdmi_init_formats_yuv420_allowed_desc);
+
+/*
+ * Test that the registration of an HDMI connector succeeds only when
+ * the presence of YUV420 in the supported formats matches the value
+ * of the ycbcr_420_allowed flag.
+ */
+static void drm_test_connector_hdmi_init_formats_yuv420_allowed(struct kunit *test)
+{
+ const struct drm_connector_hdmi_init_formats_yuv420_allowed_test *params;
+ struct drm_connector_init_priv *priv = test->priv;
+ int ret;
+
+ params = test->param_value;
+ priv->connector.ycbcr_420_allowed = params->yuv420_allowed;
+
+ ret = drmm_connector_hdmi_init(&priv->drm, &priv->connector,
+ "Vendor", "Product",
+ &dummy_funcs,
+ &dummy_hdmi_funcs,
+ DRM_MODE_CONNECTOR_HDMIA,
+ &priv->ddc,
+ params->supported_formats,
+ 8);
+ KUNIT_EXPECT_EQ(test, ret, params->expected_result);
+}
+
/*
* Test that the registration of an HDMI connector with an HDMI
* connector type succeeds.
@@ -726,6 +784,8 @@ static struct kunit_case drmm_connector_hdmi_init_tests[] = {
KUNIT_CASE(drm_test_connector_hdmi_init_bpc_null),
KUNIT_CASE(drm_test_connector_hdmi_init_formats_empty),
KUNIT_CASE(drm_test_connector_hdmi_init_formats_no_rgb),
+ KUNIT_CASE_PARAM(drm_test_connector_hdmi_init_formats_yuv420_allowed,
+ drm_connector_hdmi_init_formats_yuv420_allowed_gen_params),
KUNIT_CASE(drm_test_connector_hdmi_init_null_ddc),
KUNIT_CASE(drm_test_connector_hdmi_init_null_product),
KUNIT_CASE(drm_test_connector_hdmi_init_null_vendor),
diff --git a/drivers/gpu/drm/tests/drm_kunit_helpers.c b/drivers/gpu/drm/tests/drm_kunit_helpers.c
index 04a6b8cc62ac..3c0b7824c0be 100644
--- a/drivers/gpu/drm/tests/drm_kunit_helpers.c
+++ b/drivers/gpu/drm/tests/drm_kunit_helpers.c
@@ -320,8 +320,7 @@ static void kunit_action_drm_mode_destroy(void *ptr)
}
/**
- * drm_kunit_display_mode_from_cea_vic() - return a mode for CEA VIC
- for a KUnit test
+ * drm_kunit_display_mode_from_cea_vic() - return a mode for CEA VIC for a KUnit test
* @test: The test context object
* @dev: DRM device
* @video_code: CEA VIC of the mode
diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
index 20bf33702c3c..da203045df9b 100644
--- a/drivers/gpu/drm/v3d/v3d_irq.c
+++ b/drivers/gpu/drm/v3d/v3d_irq.c
@@ -108,6 +108,7 @@ v3d_irq(int irq, void *arg)
v3d_job_update_stats(&v3d->bin_job->base, V3D_BIN);
trace_v3d_bcl_irq(&v3d->drm, fence->seqno);
dma_fence_signal(&fence->base);
+ v3d->bin_job = NULL;
status = IRQ_HANDLED;
}
@@ -118,6 +119,7 @@ v3d_irq(int irq, void *arg)
v3d_job_update_stats(&v3d->render_job->base, V3D_RENDER);
trace_v3d_rcl_irq(&v3d->drm, fence->seqno);
dma_fence_signal(&fence->base);
+ v3d->render_job = NULL;
status = IRQ_HANDLED;
}
@@ -128,6 +130,7 @@ v3d_irq(int irq, void *arg)
v3d_job_update_stats(&v3d->csd_job->base, V3D_CSD);
trace_v3d_csd_irq(&v3d->drm, fence->seqno);
dma_fence_signal(&fence->base);
+ v3d->csd_job = NULL;
status = IRQ_HANDLED;
}
@@ -165,6 +168,7 @@ v3d_hub_irq(int irq, void *arg)
v3d_job_update_stats(&v3d->tfu_job->base, V3D_TFU);
trace_v3d_tfu_irq(&v3d->drm, fence->seqno);
dma_fence_signal(&fence->base);
+ v3d->tfu_job = NULL;
status = IRQ_HANDLED;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index a0e433fbcba6..9b5b8c1f063b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -228,7 +228,6 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
VMW_BO_DOMAIN_VRAM,
VMW_BO_DOMAIN_VRAM);
buf->places[0].lpfn = PFN_UP(bo->resource->size);
- buf->busy_places[0].lpfn = PFN_UP(bo->resource->size);
ret = ttm_bo_validate(bo, &buf->placement, &ctx);
/* For some reason we didn't end up at the start of vram */
@@ -443,7 +442,8 @@ static int vmw_bo_init(struct vmw_private *dev_priv,
if (params->pin)
ttm_bo_pin(&vmw_bo->tbo);
- ttm_bo_unreserve(&vmw_bo->tbo);
+ if (!params->keep_resv)
+ ttm_bo_unreserve(&vmw_bo->tbo);
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
index 43b5439ec9f7..11e330c7c7f5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
@@ -56,8 +56,9 @@ struct vmw_bo_params {
u32 domain;
u32 busy_domain;
enum ttm_bo_type bo_type;
- size_t size;
bool pin;
+ bool keep_resv;
+ size_t size;
struct dma_resv *resv;
struct sg_table *sg;
};
@@ -83,7 +84,6 @@ struct vmw_bo {
struct ttm_placement placement;
struct ttm_place places[5];
- struct ttm_place busy_places[5];
/* Protected by reservation */
struct ttm_bo_kmap_obj map;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 2c46897876dd..b19a062592b0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -403,7 +403,8 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
.busy_domain = VMW_BO_DOMAIN_SYS,
.bo_type = ttm_bo_type_kernel,
.size = PAGE_SIZE,
- .pin = true
+ .pin = true,
+ .keep_resv = true,
};
/*
@@ -415,10 +416,6 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
if (unlikely(ret != 0))
return ret;
- ret = ttm_bo_reserve(&vbo->tbo, false, true, NULL);
- BUG_ON(ret != 0);
- vmw_bo_pin_reserved(vbo, true);
-
ret = ttm_bo_kmap(&vbo->tbo, 0, 1, &map);
if (likely(ret == 0)) {
result = ttm_kmap_obj_virtual(&map, &dummy);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
index b9857f37ca1a..ed5015ced392 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
@@ -206,6 +206,7 @@ struct drm_gem_object *vmw_prime_import_sg_table(struct drm_device *dev,
.bo_type = ttm_bo_type_sg,
.size = attach->dmabuf->size,
.pin = false,
+ .keep_resv = true,
.resv = attach->dmabuf->resv,
.sg = table,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 8db38927729b..800a79e035ed 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -750,6 +750,7 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state);
struct vmw_bo *old_bo = NULL;
struct vmw_bo *new_bo = NULL;
+ struct ww_acquire_ctx ctx;
s32 hotspot_x, hotspot_y;
int ret;
@@ -769,9 +770,11 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
if (du->cursor_surface)
du->cursor_age = du->cursor_surface->snooper.age;
+ ww_acquire_init(&ctx, &reservation_ww_class);
+
if (!vmw_user_object_is_null(&old_vps->uo)) {
old_bo = vmw_user_object_buffer(&old_vps->uo);
- ret = ttm_bo_reserve(&old_bo->tbo, false, false, NULL);
+ ret = ttm_bo_reserve(&old_bo->tbo, false, false, &ctx);
if (ret != 0)
return;
}
@@ -779,9 +782,14 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
if (!vmw_user_object_is_null(&vps->uo)) {
new_bo = vmw_user_object_buffer(&vps->uo);
if (old_bo != new_bo) {
- ret = ttm_bo_reserve(&new_bo->tbo, false, false, NULL);
- if (ret != 0)
+ ret = ttm_bo_reserve(&new_bo->tbo, false, false, &ctx);
+ if (ret != 0) {
+ if (old_bo) {
+ ttm_bo_unreserve(&old_bo->tbo);
+ ww_acquire_fini(&ctx);
+ }
return;
+ }
} else {
new_bo = NULL;
}
@@ -803,10 +811,12 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
hotspot_x, hotspot_y);
}
- if (old_bo)
- ttm_bo_unreserve(&old_bo->tbo);
if (new_bo)
ttm_bo_unreserve(&new_bo->tbo);
+ if (old_bo)
+ ttm_bo_unreserve(&old_bo->tbo);
+
+ ww_acquire_fini(&ctx);
du->cursor_x = new_state->crtc_x + du->set_gui_x;
du->cursor_y = new_state->crtc_y + du->set_gui_y;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index a01ca3226d0a..7fb1c88bcc47 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -896,7 +896,8 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
.busy_domain = VMW_BO_DOMAIN_SYS,
.bo_type = ttm_bo_type_device,
.size = size,
- .pin = true
+ .pin = true,
+ .keep_resv = true,
};
if (!vmw_shader_id_ok(user_key, shader_type))
@@ -906,10 +907,6 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
goto out;
- ret = ttm_bo_reserve(&buf->tbo, false, true, NULL);
- if (unlikely(ret != 0))
- goto no_reserve;
-
/* Map and copy shader bytecode. */
ret = ttm_bo_kmap(&buf->tbo, 0, PFN_UP(size), &map);
if (unlikely(ret != 0)) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index 621d98b376bb..5553892d7c3e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -572,15 +572,14 @@ int vmw_bo_create_and_populate(struct vmw_private *dev_priv,
.busy_domain = domain,
.bo_type = ttm_bo_type_kernel,
.size = bo_size,
- .pin = true
+ .pin = true,
+ .keep_resv = true,
};
ret = vmw_bo_create(dev_priv, &bo_params, &vbo);
if (unlikely(ret != 0))
return ret;
- ret = ttm_bo_reserve(&vbo->tbo, false, true, NULL);
- BUG_ON(ret != 0);
ret = vmw_ttm_populate(vbo->tbo.bdev, vbo->tbo.ttm, &ctx);
if (likely(ret == 0)) {
struct vmw_ttm_tt *vmw_tt =
diff --git a/drivers/gpu/drm/xe/tests/xe_bo.c b/drivers/gpu/drm/xe/tests/xe_bo.c
index 3e0ae40ebbd2..812046f5d741 100644
--- a/drivers/gpu/drm/xe/tests/xe_bo.c
+++ b/drivers/gpu/drm/xe/tests/xe_bo.c
@@ -257,10 +257,9 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc
* however seems quite fragile not to also restart the GT. Try
* to do that here by triggering a GT reset.
*/
- for_each_gt(__gt, xe, id) {
- xe_gt_reset_async(__gt);
- flush_work(&__gt->reset.worker);
- }
+ for_each_gt(__gt, xe, id)
+ xe_gt_reset(__gt);
+
if (err) {
KUNIT_FAIL(test, "restore kernel err=%pe\n",
ERR_PTR(err));
diff --git a/drivers/gpu/drm/xe/tests/xe_mocs.c b/drivers/gpu/drm/xe/tests/xe_mocs.c
index 6f9b7a266b41..2698e433c6fa 100644
--- a/drivers/gpu/drm/xe/tests/xe_mocs.c
+++ b/drivers/gpu/drm/xe/tests/xe_mocs.c
@@ -162,8 +162,7 @@ static int mocs_reset_test_run_device(struct xe_device *xe)
if (flags & HAS_LNCF_MOCS)
read_l3cc_table(gt, &mocs.table);
- xe_gt_reset_async(gt);
- flush_work(&gt->reset.worker);
+ xe_gt_reset(gt);
kunit_info(test, "mocs_reset_test after reset\n");
if (flags & HAS_GLOBAL_MOCS)
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index ae6b337cdc54..f61a8ef38094 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -724,7 +724,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
new_mem->mem_type == XE_PL_SYSTEM) {
long timeout = dma_resv_wait_timeout(ttm_bo->base.resv,
DMA_RESV_USAGE_BOOKKEEP,
- true,
+ false,
MAX_SCHEDULE_TIMEOUT);
if (timeout < 0) {
ret = timeout;
@@ -848,8 +848,16 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
out:
if ((!ttm_bo->resource || ttm_bo->resource->mem_type == XE_PL_SYSTEM) &&
- ttm_bo->ttm)
+ ttm_bo->ttm) {
+ long timeout = dma_resv_wait_timeout(ttm_bo->base.resv,
+ DMA_RESV_USAGE_KERNEL,
+ false,
+ MAX_SCHEDULE_TIMEOUT);
+ if (timeout < 0)
+ ret = timeout;
+
xe_tt_unmap_sg(ttm_bo->ttm);
+ }
return ret;
}
diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c
index f8947e7e917e..21a50d539426 100644
--- a/drivers/gpu/drm/xe/xe_devcoredump.c
+++ b/drivers/gpu/drm/xe/xe_devcoredump.c
@@ -109,7 +109,11 @@ static ssize_t __xe_devcoredump_read(char *buffer, size_t count,
drm_puts(&p, "\n**** GuC CT ****\n");
xe_guc_ct_snapshot_print(ss->guc.ct, &p);
- drm_puts(&p, "\n**** Contexts ****\n");
+ /*
+ * Don't add a new section header here because the mesa debug decoder
+ * tool expects the context information to be in the 'GuC CT' section.
+ */
+ /* drm_puts(&p, "\n**** Contexts ****\n"); */
xe_guc_exec_queue_snapshot_print(ss->ge, &p);
drm_puts(&p, "\n**** Job ****\n");
@@ -363,6 +367,15 @@ void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix,
char buff[ASCII85_BUFSZ], *line_buff;
size_t line_pos = 0;
+ /*
+ * Splitting blobs across multiple lines is not compatible with the mesa
+ * debug decoder tool. Note that even dropping the explicit '\n' below
+ * doesn't help because the GuC log is so big some underlying implementation
+ * still splits the lines at 512K characters. So just bail completely for
+ * the moment.
+ */
+ return;
+
#define DMESG_MAX_LINE_LEN 800
#define MIN_SPACE (ASCII85_BUFSZ + 2) /* 85 + "\n\0" */
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
index fd0f3b3c9101..268cd3123be9 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.c
+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
@@ -8,6 +8,7 @@
#include <linux/nospec.h>
#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <uapi/drm/xe_drm.h>
@@ -762,9 +763,11 @@ bool xe_exec_queue_is_idle(struct xe_exec_queue *q)
*/
void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
{
+ struct xe_device *xe = gt_to_xe(q->gt);
struct xe_file *xef;
struct xe_lrc *lrc;
u32 old_ts, new_ts;
+ int idx;
/*
* Jobs that are run during driver load may use an exec_queue, but are
@@ -774,6 +777,10 @@ void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
if (!q->vm || !q->vm->xef)
return;
+ /* Synchronize with unbind while holding the xe file open */
+ if (!drm_dev_enter(&xe->drm, &idx))
+ return;
+
xef = q->vm->xef;
/*
@@ -787,6 +794,8 @@ void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
lrc = q->lrc[0];
new_ts = xe_lrc_update_timestamp(lrc, &old_ts);
xef->run_ticks[q->class] += (new_ts - old_ts) * q->width;
+
+ drm_dev_exit(idx);
}
/**
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index d6744be01a68..94d468d01253 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -387,6 +387,10 @@ int xe_gt_init_early(struct xe_gt *gt)
xe_force_wake_init_gt(gt, gt_to_fw(gt));
spin_lock_init(&gt->global_invl_lock);
+ err = xe_gt_tlb_invalidation_init_early(gt);
+ if (err)
+ return err;
+
return 0;
}
@@ -588,10 +592,6 @@ int xe_gt_init(struct xe_gt *gt)
xe_hw_fence_irq_init(&gt->fence_irq[i]);
}
- err = xe_gt_tlb_invalidation_init(gt);
- if (err)
- return err;
-
err = xe_gt_pagefault_init(gt);
if (err)
return err;
diff --git a/drivers/gpu/drm/xe/xe_gt.h b/drivers/gpu/drm/xe/xe_gt.h
index 82b9b7f82fca..eb2946963420 100644
--- a/drivers/gpu/drm/xe/xe_gt.h
+++ b/drivers/gpu/drm/xe/xe_gt.h
@@ -57,6 +57,31 @@ int xe_gt_sanitize_freq(struct xe_gt *gt);
void xe_gt_remove(struct xe_gt *gt);
/**
+ * xe_gt_wait_for_reset - wait for gt's async reset to finalize.
+ * @gt: GT structure
+ * Return:
+ * %true if it waited for the work to finish execution,
+ * %false if there was no scheduled reset or it was done.
+ */
+static inline bool xe_gt_wait_for_reset(struct xe_gt *gt)
+{
+ return flush_work(&gt->reset.worker);
+}
+
+/**
+ * xe_gt_reset - perform synchronous reset
+ * @gt: GT structure
+ * Return:
+ * %true if it waited for the reset to finish,
+ * %false if there was no scheduled reset.
+ */
+static inline bool xe_gt_reset(struct xe_gt *gt)
+{
+ xe_gt_reset_async(gt);
+ return xe_gt_wait_for_reset(gt);
+}
+
+/**
* xe_gt_any_hw_engine_by_reset_domain - scan the list of engines and return the
* first that matches the same reset domain as @class
* @gt: GT structure
diff --git a/drivers/gpu/drm/xe/xe_gt_ccs_mode.c b/drivers/gpu/drm/xe/xe_gt_ccs_mode.c
index b6adfb9f2030..50fffc9ebf62 100644
--- a/drivers/gpu/drm/xe/xe_gt_ccs_mode.c
+++ b/drivers/gpu/drm/xe/xe_gt_ccs_mode.c
@@ -150,7 +150,7 @@ ccs_mode_store(struct device *kdev, struct device_attribute *attr,
xe_gt_info(gt, "Setting compute mode to %d\n", num_engines);
gt->ccs_mode = num_engines;
xe_gt_record_user_engines(gt);
- xe_gt_reset_async(gt);
+ xe_gt_reset(gt);
}
mutex_unlock(&xe->drm.filelist_mutex);
diff --git a/drivers/gpu/drm/xe/xe_gt_debugfs.c b/drivers/gpu/drm/xe/xe_gt_debugfs.c
index 3e8c351a0eab..e7792858b1e4 100644
--- a/drivers/gpu/drm/xe/xe_gt_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_gt_debugfs.c
@@ -132,11 +132,9 @@ static int force_reset(struct xe_gt *gt, struct drm_printer *p)
static int force_reset_sync(struct xe_gt *gt, struct drm_printer *p)
{
xe_pm_runtime_get(gt_to_xe(gt));
- xe_gt_reset_async(gt);
+ xe_gt_reset(gt);
xe_pm_runtime_put(gt_to_xe(gt));
- flush_work(&gt->reset.worker);
-
return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_gt_idle.c b/drivers/gpu/drm/xe/xe_gt_idle.c
index fd80afeef56a..ffd3ba7f6656 100644
--- a/drivers/gpu/drm/xe/xe_gt_idle.c
+++ b/drivers/gpu/drm/xe/xe_gt_idle.c
@@ -122,10 +122,12 @@ void xe_gt_idle_enable_pg(struct xe_gt *gt)
if (!xe_gt_is_media_type(gt))
gtidle->powergate_enable |= RENDER_POWERGATE_ENABLE;
- for (i = XE_HW_ENGINE_VCS0, j = 0; i <= XE_HW_ENGINE_VCS7; ++i, ++j) {
- if ((gt->info.engine_mask & BIT(i)))
- gtidle->powergate_enable |= (VDN_HCP_POWERGATE_ENABLE(j) |
- VDN_MFXVDENC_POWERGATE_ENABLE(j));
+ if (xe->info.platform != XE_DG1) {
+ for (i = XE_HW_ENGINE_VCS0, j = 0; i <= XE_HW_ENGINE_VCS7; ++i, ++j) {
+ if ((gt->info.engine_mask & BIT(i)))
+ gtidle->powergate_enable |= (VDN_HCP_POWERGATE_ENABLE(j) |
+ VDN_MFXVDENC_POWERGATE_ENABLE(j));
+ }
}
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.c b/drivers/gpu/drm/xe/xe_gt_mcr.c
index 5013d674e17d..d929a78c26a9 100644
--- a/drivers/gpu/drm/xe/xe_gt_mcr.c
+++ b/drivers/gpu/drm/xe/xe_gt_mcr.c
@@ -550,9 +550,9 @@ void xe_gt_mcr_set_implicit_defaults(struct xe_gt *gt)
* Returns true if the caller should steer to the @group/@instance values
* returned. Returns false if the caller need not perform any steering
*/
-static bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt,
- struct xe_reg_mcr reg_mcr,
- u8 *group, u8 *instance)
+bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt,
+ struct xe_reg_mcr reg_mcr,
+ u8 *group, u8 *instance)
{
const struct xe_reg reg = to_xe_reg(reg_mcr);
const struct xe_mmio_range *implicit_ranges;
diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.h b/drivers/gpu/drm/xe/xe_gt_mcr.h
index c0cd36021c24..bc06520befab 100644
--- a/drivers/gpu/drm/xe/xe_gt_mcr.h
+++ b/drivers/gpu/drm/xe/xe_gt_mcr.h
@@ -26,6 +26,10 @@ void xe_gt_mcr_unicast_write(struct xe_gt *gt, struct xe_reg_mcr mcr_reg,
void xe_gt_mcr_multicast_write(struct xe_gt *gt, struct xe_reg_mcr mcr_reg,
u32 value);
+bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt,
+ struct xe_reg_mcr reg_mcr,
+ u8 *group, u8 *instance);
+
void xe_gt_mcr_steering_dump(struct xe_gt *gt, struct drm_printer *p);
void xe_gt_mcr_get_dss_steering(struct xe_gt *gt, unsigned int dss, u16 *group, u16 *instance);
u32 xe_gt_mcr_steering_info_to_dss_id(struct xe_gt *gt, u16 group, u16 instance);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
index 192643d63d22..ca49860168f6 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
@@ -2046,7 +2046,7 @@ static int pf_validate_vf_config(struct xe_gt *gt, unsigned int vfid)
valid_any = valid_any || (valid_ggtt && is_primary);
if (IS_DGFX(xe)) {
- bool valid_lmem = pf_get_vf_config_ggtt(primary_gt, vfid);
+ bool valid_lmem = pf_get_vf_config_lmem(primary_gt, vfid);
valid_any = valid_any || (valid_lmem && is_primary);
valid_all = valid_all && valid_lmem;
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
index 6146d1776bda..0a0af413770e 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
@@ -106,7 +106,7 @@ static void xe_gt_tlb_fence_timeout(struct work_struct *work)
}
/**
- * xe_gt_tlb_invalidation_init - Initialize GT TLB invalidation state
+ * xe_gt_tlb_invalidation_init_early - Initialize GT TLB invalidation state
* @gt: graphics tile
*
* Initialize GT TLB invalidation state, purely software initialization, should
@@ -114,7 +114,7 @@ static void xe_gt_tlb_fence_timeout(struct work_struct *work)
*
* Return: 0 on success, negative error code on error.
*/
-int xe_gt_tlb_invalidation_init(struct xe_gt *gt)
+int xe_gt_tlb_invalidation_init_early(struct xe_gt *gt)
{
gt->tlb_invalidation.seqno = 1;
INIT_LIST_HEAD(&gt->tlb_invalidation.pending_fences);
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
index 00b1c6c01e8d..672acfcdf0d7 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
@@ -14,7 +14,8 @@ struct xe_gt;
struct xe_guc;
struct xe_vma;
-int xe_gt_tlb_invalidation_init(struct xe_gt *gt);
+int xe_gt_tlb_invalidation_init_early(struct xe_gt *gt);
+
void xe_gt_tlb_invalidation_reset(struct xe_gt *gt);
int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt);
int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
diff --git a/drivers/gpu/drm/xe/xe_guc_ads.c b/drivers/gpu/drm/xe/xe_guc_ads.c
index 4e746ae98888..dd7f9d0effa9 100644
--- a/drivers/gpu/drm/xe/xe_guc_ads.c
+++ b/drivers/gpu/drm/xe/xe_guc_ads.c
@@ -29,6 +29,7 @@
#include "xe_platform_types.h"
#include "xe_uc_fw.h"
#include "xe_wa.h"
+#include "xe_gt_mcr.h"
/* Slack of a few additional entries per engine */
#define ADS_REGSET_EXTRA_MAX 8
@@ -701,6 +702,20 @@ static void guc_mmio_regset_write_one(struct xe_guc_ads *ads,
.flags = reg.masked ? GUC_REGSET_MASKED : 0,
};
+ if (reg.mcr) {
+ struct xe_reg_mcr mcr_reg = XE_REG_MCR(reg.addr);
+ u8 group, instance;
+
+ bool steer = xe_gt_mcr_get_nonterminated_steering(ads_to_gt(ads), mcr_reg,
+ &group, &instance);
+
+ if (steer) {
+ entry.flags |= FIELD_PREP(GUC_REGSET_STEERING_GROUP, group);
+ entry.flags |= FIELD_PREP(GUC_REGSET_STEERING_INSTANCE, instance);
+ entry.flags |= GUC_REGSET_STEERING_NEEDED;
+ }
+ }
+
xe_map_memcpy_to(ads_to_xe(ads), regset_map, n_entry * sizeof(entry),
&entry, sizeof(entry));
}
diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c
index 1557acee3523..eeeb98f66982 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine.c
@@ -419,7 +419,7 @@ hw_engine_setup_default_state(struct xe_hw_engine *hwe)
* Bspec: 72161
*/
const u8 mocs_write_idx = gt->mocs.uc_index;
- const u8 mocs_read_idx = hwe->class == XE_ENGINE_CLASS_COMPUTE &&
+ const u8 mocs_read_idx = hwe->class == XE_ENGINE_CLASS_COMPUTE && IS_DGFX(xe) &&
(GRAPHICS_VER(xe) >= 20 || xe->info.platform == XE_PVC) ?
gt->mocs.wb_index : gt->mocs.uc_index;
u32 ring_cmd_cctl_val = REG_FIELD_PREP(CMD_CCTL_WRITE_OVERRIDE_MASK, mocs_write_idx) |
diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
index 8dd55798ab31..678fa40e4cea 100644
--- a/drivers/gpu/drm/xe/xe_oa.c
+++ b/drivers/gpu/drm/xe/xe_oa.c
@@ -74,12 +74,6 @@ struct xe_oa_config {
struct rcu_head rcu;
};
-struct flex {
- struct xe_reg reg;
- u32 offset;
- u32 value;
-};
-
struct xe_oa_open_param {
struct xe_file *xef;
u32 oa_unit_id;
@@ -596,19 +590,38 @@ static __poll_t xe_oa_poll(struct file *file, poll_table *wait)
return ret;
}
+static void xe_oa_lock_vma(struct xe_exec_queue *q)
+{
+ if (q->vm) {
+ down_read(&q->vm->lock);
+ xe_vm_lock(q->vm, false);
+ }
+}
+
+static void xe_oa_unlock_vma(struct xe_exec_queue *q)
+{
+ if (q->vm) {
+ xe_vm_unlock(q->vm);
+ up_read(&q->vm->lock);
+ }
+}
+
static struct dma_fence *xe_oa_submit_bb(struct xe_oa_stream *stream, enum xe_oa_submit_deps deps,
struct xe_bb *bb)
{
+ struct xe_exec_queue *q = stream->exec_q ?: stream->k_exec_q;
struct xe_sched_job *job;
struct dma_fence *fence;
int err = 0;
- /* Kernel configuration is issued on stream->k_exec_q, not stream->exec_q */
- job = xe_bb_create_job(stream->k_exec_q, bb);
+ xe_oa_lock_vma(q);
+
+ job = xe_bb_create_job(q, bb);
if (IS_ERR(job)) {
err = PTR_ERR(job);
goto exit;
}
+ job->ggtt = true;
if (deps == XE_OA_SUBMIT_ADD_DEPS) {
for (int i = 0; i < stream->num_syncs && !err; i++)
@@ -623,10 +636,13 @@ static struct dma_fence *xe_oa_submit_bb(struct xe_oa_stream *stream, enum xe_oa
fence = dma_fence_get(&job->drm.s_fence->finished);
xe_sched_job_push(job);
+ xe_oa_unlock_vma(q);
+
return fence;
err_put_job:
xe_sched_job_put(job);
exit:
+ xe_oa_unlock_vma(q);
return ERR_PTR(err);
}
@@ -675,63 +691,19 @@ static void xe_oa_free_configs(struct xe_oa_stream *stream)
dma_fence_put(stream->last_fence);
}
-static void xe_oa_store_flex(struct xe_oa_stream *stream, struct xe_lrc *lrc,
- struct xe_bb *bb, const struct flex *flex, u32 count)
-{
- u32 offset = xe_bo_ggtt_addr(lrc->bo);
-
- do {
- bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_GGTT | MI_SDI_NUM_DW(1);
- bb->cs[bb->len++] = offset + flex->offset * sizeof(u32);
- bb->cs[bb->len++] = 0;
- bb->cs[bb->len++] = flex->value;
-
- } while (flex++, --count);
-}
-
-static int xe_oa_modify_ctx_image(struct xe_oa_stream *stream, struct xe_lrc *lrc,
- const struct flex *flex, u32 count)
-{
- struct dma_fence *fence;
- struct xe_bb *bb;
- int err;
-
- bb = xe_bb_new(stream->gt, 4 * count, false);
- if (IS_ERR(bb)) {
- err = PTR_ERR(bb);
- goto exit;
- }
-
- xe_oa_store_flex(stream, lrc, bb, flex, count);
-
- fence = xe_oa_submit_bb(stream, XE_OA_SUBMIT_NO_DEPS, bb);
- if (IS_ERR(fence)) {
- err = PTR_ERR(fence);
- goto free_bb;
- }
- xe_bb_free(bb, fence);
- dma_fence_put(fence);
-
- return 0;
-free_bb:
- xe_bb_free(bb, NULL);
-exit:
- return err;
-}
-
-static int xe_oa_load_with_lri(struct xe_oa_stream *stream, struct xe_oa_reg *reg_lri)
+static int xe_oa_load_with_lri(struct xe_oa_stream *stream, struct xe_oa_reg *reg_lri, u32 count)
{
struct dma_fence *fence;
struct xe_bb *bb;
int err;
- bb = xe_bb_new(stream->gt, 3, false);
+ bb = xe_bb_new(stream->gt, 2 * count + 1, false);
if (IS_ERR(bb)) {
err = PTR_ERR(bb);
goto exit;
}
- write_cs_mi_lri(bb, reg_lri, 1);
+ write_cs_mi_lri(bb, reg_lri, count);
fence = xe_oa_submit_bb(stream, XE_OA_SUBMIT_NO_DEPS, bb);
if (IS_ERR(fence)) {
@@ -751,71 +723,55 @@ exit:
static int xe_oa_configure_oar_context(struct xe_oa_stream *stream, bool enable)
{
const struct xe_oa_format *format = stream->oa_buffer.format;
- struct xe_lrc *lrc = stream->exec_q->lrc[0];
- u32 regs_offset = xe_lrc_regs_offset(lrc) / sizeof(u32);
u32 oacontrol = __format_to_oactrl(format, OAR_OACONTROL_COUNTER_SEL_MASK) |
(enable ? OAR_OACONTROL_COUNTER_ENABLE : 0);
- struct flex regs_context[] = {
+ struct xe_oa_reg reg_lri[] = {
{
OACTXCONTROL(stream->hwe->mmio_base),
- stream->oa->ctx_oactxctrl_offset[stream->hwe->class] + 1,
enable ? OA_COUNTER_RESUME : 0,
},
{
+ OAR_OACONTROL,
+ oacontrol,
+ },
+ {
RING_CONTEXT_CONTROL(stream->hwe->mmio_base),
- regs_offset + CTX_CONTEXT_CONTROL,
- _MASKED_BIT_ENABLE(CTX_CTRL_OAC_CONTEXT_ENABLE),
+ _MASKED_FIELD(CTX_CTRL_OAC_CONTEXT_ENABLE,
+ enable ? CTX_CTRL_OAC_CONTEXT_ENABLE : 0)
},
};
- struct xe_oa_reg reg_lri = { OAR_OACONTROL, oacontrol };
- int err;
-
- /* Modify stream hwe context image with regs_context */
- err = xe_oa_modify_ctx_image(stream, stream->exec_q->lrc[0],
- regs_context, ARRAY_SIZE(regs_context));
- if (err)
- return err;
- /* Apply reg_lri using LRI */
- return xe_oa_load_with_lri(stream, &reg_lri);
+ return xe_oa_load_with_lri(stream, reg_lri, ARRAY_SIZE(reg_lri));
}
static int xe_oa_configure_oac_context(struct xe_oa_stream *stream, bool enable)
{
const struct xe_oa_format *format = stream->oa_buffer.format;
- struct xe_lrc *lrc = stream->exec_q->lrc[0];
- u32 regs_offset = xe_lrc_regs_offset(lrc) / sizeof(u32);
u32 oacontrol = __format_to_oactrl(format, OAR_OACONTROL_COUNTER_SEL_MASK) |
(enable ? OAR_OACONTROL_COUNTER_ENABLE : 0);
- struct flex regs_context[] = {
+ struct xe_oa_reg reg_lri[] = {
{
OACTXCONTROL(stream->hwe->mmio_base),
- stream->oa->ctx_oactxctrl_offset[stream->hwe->class] + 1,
enable ? OA_COUNTER_RESUME : 0,
},
{
+ OAC_OACONTROL,
+ oacontrol
+ },
+ {
RING_CONTEXT_CONTROL(stream->hwe->mmio_base),
- regs_offset + CTX_CONTEXT_CONTROL,
- _MASKED_BIT_ENABLE(CTX_CTRL_OAC_CONTEXT_ENABLE) |
+ _MASKED_FIELD(CTX_CTRL_OAC_CONTEXT_ENABLE,
+ enable ? CTX_CTRL_OAC_CONTEXT_ENABLE : 0) |
_MASKED_FIELD(CTX_CTRL_RUN_ALONE, enable ? CTX_CTRL_RUN_ALONE : 0),
},
};
- struct xe_oa_reg reg_lri = { OAC_OACONTROL, oacontrol };
- int err;
/* Set ccs select to enable programming of OAC_OACONTROL */
xe_mmio_write32(&stream->gt->mmio, __oa_regs(stream)->oa_ctrl,
__oa_ccs_select(stream));
- /* Modify stream hwe context image with regs_context */
- err = xe_oa_modify_ctx_image(stream, stream->exec_q->lrc[0],
- regs_context, ARRAY_SIZE(regs_context));
- if (err)
- return err;
-
- /* Apply reg_lri using LRI */
- return xe_oa_load_with_lri(stream, &reg_lri);
+ return xe_oa_load_with_lri(stream, reg_lri, ARRAY_SIZE(reg_lri));
}
static int xe_oa_configure_oa_context(struct xe_oa_stream *stream, bool enable)
@@ -2066,8 +2022,8 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f
if (XE_IOCTL_DBG(oa->xe, !param.exec_q))
return -ENOENT;
- if (param.exec_q->width > 1)
- drm_dbg(&oa->xe->drm, "exec_q->width > 1, programming only exec_q->lrc[0]\n");
+ if (XE_IOCTL_DBG(oa->xe, param.exec_q->width > 1))
+ return -EOPNOTSUPP;
}
/*
@@ -2242,6 +2198,7 @@ static const struct xe_mmio_range xe2_oa_mux_regs[] = {
{ .start = 0x5194, .end = 0x5194 }, /* SYS_MEM_LAT_MEASURE_MERTF_GRP_3D */
{ .start = 0x8704, .end = 0x8704 }, /* LMEM_LAT_MEASURE_MCFG_GRP */
{ .start = 0xB1BC, .end = 0xB1BC }, /* L3_BANK_LAT_MEASURE_LBCF_GFX */
+ { .start = 0xD0E0, .end = 0xD0F4 }, /* VISACTL */
{ .start = 0xE18C, .end = 0xE18C }, /* SAMPLER_MODE */
{ .start = 0xE590, .end = 0xE590 }, /* TDL_LSC_LAT_MEASURE_TDL_GFX */
{ .start = 0x13000, .end = 0x137FC }, /* PES_0_PESL0 - PES_63_UPPER_PESL3 */
diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c
index 0be4f489d3e1..9f327f27c072 100644
--- a/drivers/gpu/drm/xe/xe_ring_ops.c
+++ b/drivers/gpu/drm/xe/xe_ring_ops.c
@@ -221,7 +221,10 @@ static int emit_pipe_imm_ggtt(u32 addr, u32 value, bool stall_only, u32 *dw,
static u32 get_ppgtt_flag(struct xe_sched_job *job)
{
- return job->q->vm ? BIT(8) : 0;
+ if (job->q->vm && !job->ggtt)
+ return BIT(8);
+
+ return 0;
}
static int emit_copy_timestamp(struct xe_lrc *lrc, u32 *dw, int i)
diff --git a/drivers/gpu/drm/xe/xe_sched_job_types.h b/drivers/gpu/drm/xe/xe_sched_job_types.h
index f13f333f00be..d942b20a9f29 100644
--- a/drivers/gpu/drm/xe/xe_sched_job_types.h
+++ b/drivers/gpu/drm/xe/xe_sched_job_types.h
@@ -56,6 +56,8 @@ struct xe_sched_job {
u32 migrate_flush_flags;
/** @ring_ops_flush_tlb: The ring ops need to flush TLB before payload. */
bool ring_ops_flush_tlb;
+ /** @ggtt: mapped in ggtt. */
+ bool ggtt;
/** @ptrs: per instance pointers. */
struct xe_job_ptrs ptrs[];
};
diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
index 2f1c9d97ad21..3db1b9d09c32 100644
--- a/drivers/hwmon/acpi_power_meter.c
+++ b/drivers/hwmon/acpi_power_meter.c
@@ -682,7 +682,7 @@ static int setup_attrs(struct acpi_power_meter_resource *resource)
/* _PMD method is optional. */
res = read_domain_devices(resource);
- if (res != -ENODEV)
+ if (res && res != -ENODEV)
return res;
if (resource->caps.flags & POWER_METER_CAN_MEASURE) {
diff --git a/drivers/hwmon/drivetemp.c b/drivers/hwmon/drivetemp.c
index 6bdd21aa005a..291d91f68646 100644
--- a/drivers/hwmon/drivetemp.c
+++ b/drivers/hwmon/drivetemp.c
@@ -165,6 +165,7 @@ static int drivetemp_scsi_command(struct drivetemp_data *st,
{
u8 scsi_cmd[MAX_COMMAND_SIZE];
enum req_op op;
+ int err;
memset(scsi_cmd, 0, sizeof(scsi_cmd));
scsi_cmd[0] = ATA_16;
@@ -192,8 +193,11 @@ static int drivetemp_scsi_command(struct drivetemp_data *st,
scsi_cmd[12] = lba_high;
scsi_cmd[14] = ata_command;
- return scsi_execute_cmd(st->sdev, scsi_cmd, op, st->smartdata,
- ATA_SECT_SIZE, HZ, 5, NULL);
+ err = scsi_execute_cmd(st->sdev, scsi_cmd, op, st->smartdata,
+ ATA_SECT_SIZE, 10 * HZ, 5, NULL);
+ if (err > 0)
+ err = -EIO;
+ return err;
}
static int drivetemp_ata_command(struct drivetemp_data *st, u8 feature,
diff --git a/drivers/hwmon/ltc2991.c b/drivers/hwmon/ltc2991.c
index 7ca139e4b6af..6d5d4cb846da 100644
--- a/drivers/hwmon/ltc2991.c
+++ b/drivers/hwmon/ltc2991.c
@@ -125,7 +125,7 @@ static int ltc2991_get_curr(struct ltc2991_state *st, u32 reg, int channel,
/* Vx-Vy, 19.075uV/LSB */
*val = DIV_ROUND_CLOSEST(sign_extend32(reg_val, 14) * 19075,
- st->r_sense_uohm[channel]);
+ (s32)st->r_sense_uohm[channel]);
return 0;
}
diff --git a/drivers/hwmon/tmp513.c b/drivers/hwmon/tmp513.c
index 1c2cb12071b8..5acbfd7d088d 100644
--- a/drivers/hwmon/tmp513.c
+++ b/drivers/hwmon/tmp513.c
@@ -207,7 +207,8 @@ static int tmp51x_get_value(struct tmp51x_data *data, u8 reg, u8 pos,
*val = sign_extend32(regval,
reg == TMP51X_SHUNT_CURRENT_RESULT ?
16 - tmp51x_get_pga_shift(data) : 15);
- *val = DIV_ROUND_CLOSEST(*val * 10 * MILLI, data->shunt_uohms);
+ *val = DIV_ROUND_CLOSEST(*val * 10 * (long)MILLI, (long)data->shunt_uohms);
+
break;
case TMP51X_BUS_VOLTAGE_RESULT:
case TMP51X_BUS_VOLTAGE_H_LIMIT:
@@ -223,7 +224,7 @@ static int tmp51x_get_value(struct tmp51x_data *data, u8 reg, u8 pos,
case TMP51X_BUS_CURRENT_RESULT:
// Current = (ShuntVoltage * CalibrationRegister) / 4096
*val = sign_extend32(regval, 15) * (long)data->curr_lsb_ua;
- *val = DIV_ROUND_CLOSEST(*val, MILLI);
+ *val = DIV_ROUND_CLOSEST(*val, (long)MILLI);
break;
case TMP51X_LOCAL_TEMP_RESULT:
case TMP51X_REMOTE_TEMP_RESULT_1:
@@ -263,7 +264,7 @@ static int tmp51x_set_value(struct tmp51x_data *data, u8 reg, long val)
* The user enter current value and we convert it to
* voltage. 1lsb = 10uV
*/
- val = DIV_ROUND_CLOSEST(val * data->shunt_uohms, 10 * MILLI);
+ val = DIV_ROUND_CLOSEST(val * (long)data->shunt_uohms, 10 * (long)MILLI);
max_val = U16_MAX >> tmp51x_get_pga_shift(data);
regval = clamp_val(val, -max_val, max_val);
break;
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index a7b77d14ee86..5693a38da7b5 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -130,6 +130,8 @@
#define ID_P_PM_BLOCKED BIT(31)
#define ID_P_MASK GENMASK(31, 27)
+#define ID_SLAVE_NACK BIT(0)
+
enum rcar_i2c_type {
I2C_RCAR_GEN1,
I2C_RCAR_GEN2,
@@ -166,6 +168,7 @@ struct rcar_i2c_priv {
int irq;
struct i2c_client *host_notify_client;
+ u8 slave_flags;
};
#define rcar_i2c_priv_to_dev(p) ((p)->adap.dev.parent)
@@ -655,6 +658,7 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv)
{
u32 ssr_raw, ssr_filtered;
u8 value;
+ int ret;
ssr_raw = rcar_i2c_read(priv, ICSSR) & 0xff;
ssr_filtered = ssr_raw & rcar_i2c_read(priv, ICSIER);
@@ -670,7 +674,10 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv)
rcar_i2c_write(priv, ICRXTX, value);
rcar_i2c_write(priv, ICSIER, SDE | SSR | SAR);
} else {
- i2c_slave_event(priv->slave, I2C_SLAVE_WRITE_REQUESTED, &value);
+ ret = i2c_slave_event(priv->slave, I2C_SLAVE_WRITE_REQUESTED, &value);
+ if (ret)
+ priv->slave_flags |= ID_SLAVE_NACK;
+
rcar_i2c_read(priv, ICRXTX); /* dummy read */
rcar_i2c_write(priv, ICSIER, SDR | SSR | SAR);
}
@@ -683,18 +690,21 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv)
if (ssr_filtered & SSR) {
i2c_slave_event(priv->slave, I2C_SLAVE_STOP, &value);
rcar_i2c_write(priv, ICSCR, SIE | SDBS); /* clear our NACK */
+ priv->slave_flags &= ~ID_SLAVE_NACK;
rcar_i2c_write(priv, ICSIER, SAR);
rcar_i2c_write(priv, ICSSR, ~SSR & 0xff);
}
/* master wants to write to us */
if (ssr_filtered & SDR) {
- int ret;
-
value = rcar_i2c_read(priv, ICRXTX);
ret = i2c_slave_event(priv->slave, I2C_SLAVE_WRITE_RECEIVED, &value);
- /* Send NACK in case of error */
- rcar_i2c_write(priv, ICSCR, SIE | SDBS | (ret < 0 ? FNA : 0));
+ if (ret)
+ priv->slave_flags |= ID_SLAVE_NACK;
+
+ /* Send NACK in case of error, but it will come 1 byte late :( */
+ rcar_i2c_write(priv, ICSCR, SIE | SDBS |
+ (priv->slave_flags & ID_SLAVE_NACK ? FNA : 0));
rcar_i2c_write(priv, ICSSR, ~SDR & 0xff);
}
diff --git a/drivers/i2c/i2c-atr.c b/drivers/i2c/i2c-atr.c
index b7c10ced5a43..8fe9ddff8e96 100644
--- a/drivers/i2c/i2c-atr.c
+++ b/drivers/i2c/i2c-atr.c
@@ -412,7 +412,7 @@ static int i2c_atr_bus_notifier_call(struct notifier_block *nb,
dev_name(dev), ret);
break;
- case BUS_NOTIFY_DEL_DEVICE:
+ case BUS_NOTIFY_REMOVED_DEVICE:
i2c_atr_detach_client(client->adapter, client);
break;
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 7c810893bfa3..75d30861ffe2 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -1562,6 +1562,7 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
res = device_add(&adap->dev);
if (res) {
pr_err("adapter '%s': can't register device (%d)\n", adap->name, res);
+ put_device(&adap->dev);
goto out_list;
}
diff --git a/drivers/i2c/i2c-slave-testunit.c b/drivers/i2c/i2c-slave-testunit.c
index 0d6fbaa48248..6de4307050dd 100644
--- a/drivers/i2c/i2c-slave-testunit.c
+++ b/drivers/i2c/i2c-slave-testunit.c
@@ -38,6 +38,7 @@ enum testunit_regs {
enum testunit_flags {
TU_FLAG_IN_PROCESS,
+ TU_FLAG_NACK,
};
struct testunit_data {
@@ -90,8 +91,10 @@ static int i2c_slave_testunit_slave_cb(struct i2c_client *client,
switch (event) {
case I2C_SLAVE_WRITE_REQUESTED:
- if (test_bit(TU_FLAG_IN_PROCESS, &tu->flags))
- return -EBUSY;
+ if (test_bit(TU_FLAG_IN_PROCESS | TU_FLAG_NACK, &tu->flags)) {
+ ret = -EBUSY;
+ break;
+ }
memset(tu->regs, 0, TU_NUM_REGS);
tu->reg_idx = 0;
@@ -99,8 +102,10 @@ static int i2c_slave_testunit_slave_cb(struct i2c_client *client,
break;
case I2C_SLAVE_WRITE_RECEIVED:
- if (test_bit(TU_FLAG_IN_PROCESS, &tu->flags))
- return -EBUSY;
+ if (test_bit(TU_FLAG_IN_PROCESS | TU_FLAG_NACK, &tu->flags)) {
+ ret = -EBUSY;
+ break;
+ }
if (tu->reg_idx < TU_NUM_REGS)
tu->regs[tu->reg_idx] = *val;
@@ -129,6 +134,8 @@ static int i2c_slave_testunit_slave_cb(struct i2c_client *client,
* here because we still need them in the workqueue!
*/
tu->reg_idx = 0;
+
+ clear_bit(TU_FLAG_NACK, &tu->flags);
break;
case I2C_SLAVE_READ_PROCESSED:
@@ -151,6 +158,10 @@ static int i2c_slave_testunit_slave_cb(struct i2c_client *client,
break;
}
+ /* If an error occurred somewhen, we NACK everything until next STOP */
+ if (ret)
+ set_bit(TU_FLAG_NACK, &tu->flags);
+
return ret;
}
diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c
index dce18f763a09..77a740561fd7 100644
--- a/drivers/i2c/muxes/i2c-demux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c
@@ -68,7 +68,7 @@ static int i2c_demux_activate_master(struct i2c_demux_pinctrl_priv *priv, u32 ne
}
/*
- * Check if there are pinctrl states at all. Note: we cant' use
+ * Check if there are pinctrl states at all. Note: we can't use
* devm_pinctrl_get_select() because we need to distinguish between
* the -ENODEV from devm_pinctrl_get() and pinctrl_lookup_state().
*/
@@ -261,7 +261,9 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
pm_runtime_no_callbacks(&pdev->dev);
/* switch to first parent as active master */
- i2c_demux_activate_master(priv, 0);
+ err = i2c_demux_activate_master(priv, 0);
+ if (err)
+ goto err_rollback;
err = device_create_file(&pdev->dev, &dev_attr_available_masters);
if (err)
diff --git a/drivers/iio/adc/ad4695.c b/drivers/iio/adc/ad4695.c
index 595ec4158e73..0146aed9069f 100644
--- a/drivers/iio/adc/ad4695.c
+++ b/drivers/iio/adc/ad4695.c
@@ -91,6 +91,7 @@
#define AD4695_T_WAKEUP_SW_MS 3
#define AD4695_T_REFBUF_MS 100
#define AD4695_T_REGCONFIG_NS 20
+#define AD4695_T_SCK_CNV_DELAY_NS 80
#define AD4695_REG_ACCESS_SCLK_HZ (10 * MEGA)
/* Max number of voltage input channels. */
@@ -132,8 +133,13 @@ struct ad4695_state {
unsigned int vref_mv;
/* Common mode input pin voltage. */
unsigned int com_mv;
- /* 1 per voltage and temperature chan plus 1 xfer to trigger 1st CNV */
- struct spi_transfer buf_read_xfer[AD4695_MAX_CHANNELS + 2];
+ /*
+ * 2 per voltage and temperature chan plus 1 xfer to trigger 1st
+ * CNV. Excluding the trigger xfer, every 2nd xfer only serves
+ * to control CS and add a delay between the last SCLK and next
+ * CNV rising edges.
+ */
+ struct spi_transfer buf_read_xfer[AD4695_MAX_CHANNELS * 2 + 3];
struct spi_message buf_read_msg;
/* Raw conversion data received. */
u8 buf[ALIGN((AD4695_MAX_CHANNELS + 2) * AD4695_MAX_CHANNEL_SIZE,
@@ -423,7 +429,7 @@ static int ad4695_buffer_preenable(struct iio_dev *indio_dev)
u8 temp_chan_bit = st->chip_info->num_voltage_inputs;
u32 bit, num_xfer, num_slots;
u32 temp_en = 0;
- int ret;
+ int ret, rx_buf_offset = 0;
/*
* We are using the advanced sequencer since it is the only way to read
@@ -449,11 +455,9 @@ static int ad4695_buffer_preenable(struct iio_dev *indio_dev)
iio_for_each_active_channel(indio_dev, bit) {
xfer = &st->buf_read_xfer[num_xfer];
xfer->bits_per_word = 16;
- xfer->rx_buf = &st->buf[(num_xfer - 1) * 2];
+ xfer->rx_buf = &st->buf[rx_buf_offset];
xfer->len = 2;
- xfer->cs_change = 1;
- xfer->cs_change_delay.value = AD4695_T_CONVERT_NS;
- xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
+ rx_buf_offset += xfer->len;
if (bit == temp_chan_bit) {
temp_en = 1;
@@ -468,21 +472,44 @@ static int ad4695_buffer_preenable(struct iio_dev *indio_dev)
}
num_xfer++;
+
+ /*
+ * We need to add a blank xfer in data reads, to meet the timing
+ * requirement of a minimum delay between the last SCLK rising
+ * edge and the CS deassert.
+ */
+ xfer = &st->buf_read_xfer[num_xfer];
+ xfer->delay.value = AD4695_T_SCK_CNV_DELAY_NS;
+ xfer->delay.unit = SPI_DELAY_UNIT_NSECS;
+ xfer->cs_change = 1;
+ xfer->cs_change_delay.value = AD4695_T_CONVERT_NS;
+ xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
+
+ num_xfer++;
}
/*
* The advanced sequencer requires that at least 2 slots are enabled.
* Since slot 0 is always used for other purposes, we need only 1
- * enabled voltage channel to meet this requirement. If the temperature
- * channel is the only enabled channel, we need to add one more slot
- * in the sequence but not read from it.
+ * enabled voltage channel to meet this requirement. If the temperature
+ * channel is the only enabled channel, we need to add one more slot in
+ * the sequence but not read from it. This is because the temperature
+ * sensor is sampled at the end of the channel sequence in advanced
+ * sequencer mode (see datasheet page 38).
+ *
+ * From the iio_for_each_active_channel() block above, we now have an
+ * xfer with data followed by a blank xfer to allow us to meet the
+ * timing spec, so move both of those up before adding an extra to
+ * handle the temperature-only case.
*/
if (num_slots < 2) {
- /* move last xfer so we can insert one more xfer before it */
- st->buf_read_xfer[num_xfer] = *xfer;
+ /* Move last two xfers */
+ st->buf_read_xfer[num_xfer] = st->buf_read_xfer[num_xfer - 1];
+ st->buf_read_xfer[num_xfer - 1] = st->buf_read_xfer[num_xfer - 2];
num_xfer++;
- /* modify 2nd to last xfer for extra slot */
+ /* Modify inserted xfer for extra slot. */
+ xfer = &st->buf_read_xfer[num_xfer - 3];
memset(xfer, 0, sizeof(*xfer));
xfer->cs_change = 1;
xfer->delay.value = st->chip_info->t_acq_ns;
@@ -499,6 +526,12 @@ static int ad4695_buffer_preenable(struct iio_dev *indio_dev)
return ret;
num_slots++;
+
+ /*
+ * We still want to point at the last xfer when finished, so
+ * update the pointer.
+ */
+ xfer = &st->buf_read_xfer[num_xfer - 1];
}
/*
@@ -583,8 +616,20 @@ out:
*/
static int ad4695_read_one_sample(struct ad4695_state *st, unsigned int address)
{
- struct spi_transfer xfer[2] = { };
- int ret, i = 0;
+ struct spi_transfer xfers[2] = {
+ {
+ .speed_hz = AD4695_REG_ACCESS_SCLK_HZ,
+ .bits_per_word = 16,
+ .tx_buf = &st->cnv_cmd,
+ .len = 2,
+ },
+ {
+ /* Required delay between last SCLK and CNV/CS */
+ .delay.value = AD4695_T_SCK_CNV_DELAY_NS,
+ .delay.unit = SPI_DELAY_UNIT_NSECS,
+ }
+ };
+ int ret;
ret = ad4695_set_single_cycle_mode(st, address);
if (ret)
@@ -592,29 +637,22 @@ static int ad4695_read_one_sample(struct ad4695_state *st, unsigned int address)
/*
* Setting the first channel to the temperature channel isn't supported
- * in single-cycle mode, so we have to do an extra xfer to read the
- * temperature.
+ * in single-cycle mode, so we have to do an extra conversion to read
+ * the temperature.
*/
if (address == AD4695_CMD_TEMP_CHAN) {
- /* We aren't reading, so we can make this a short xfer. */
- st->cnv_cmd2 = AD4695_CMD_TEMP_CHAN << 3;
- xfer[0].tx_buf = &st->cnv_cmd2;
- xfer[0].len = 1;
- xfer[0].cs_change = 1;
- xfer[0].cs_change_delay.value = AD4695_T_CONVERT_NS;
- xfer[0].cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
-
- i = 1;
+ st->cnv_cmd = AD4695_CMD_TEMP_CHAN << 11;
+
+ ret = spi_sync_transfer(st->spi, xfers, ARRAY_SIZE(xfers));
+ if (ret)
+ return ret;
}
/* Then read the result and exit conversion mode. */
st->cnv_cmd = AD4695_CMD_EXIT_CNV_MODE << 11;
- xfer[i].bits_per_word = 16;
- xfer[i].tx_buf = &st->cnv_cmd;
- xfer[i].rx_buf = &st->raw_data;
- xfer[i].len = 2;
+ xfers[0].rx_buf = &st->raw_data;
- return spi_sync_transfer(st->spi, xfer, i + 1);
+ return spi_sync_transfer(st->spi, xfers, ARRAY_SIZE(xfers));
}
static int ad4695_read_raw(struct iio_dev *indio_dev,
diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c
index 7314fb32bdec..3d678c420cbf 100644
--- a/drivers/iio/adc/ad7124.c
+++ b/drivers/iio/adc/ad7124.c
@@ -917,6 +917,9 @@ static int ad7124_setup(struct ad7124_state *st)
* set all channels to this default value.
*/
ad7124_set_channel_odr(st, i, 10);
+
+ /* Disable all channels to prevent unintended conversions. */
+ ad_sd_write_reg(&st->sd, AD7124_CHANNEL(i), 2, 0);
}
ret = ad_sd_write_reg(&st->sd, AD7124_ADC_CONTROL, 2, st->adc_control);
diff --git a/drivers/iio/adc/ad7173.c b/drivers/iio/adc/ad7173.c
index 8a0c931ca83a..8b03c1e5567e 100644
--- a/drivers/iio/adc/ad7173.c
+++ b/drivers/iio/adc/ad7173.c
@@ -200,6 +200,7 @@ struct ad7173_channel {
struct ad7173_state {
struct ad_sigma_delta sd;
+ struct ad_sigma_delta_info sigma_delta_info;
const struct ad7173_device_info *info;
struct ad7173_channel *channels;
struct regulator_bulk_data regulators[3];
@@ -753,7 +754,7 @@ static int ad7173_disable_one(struct ad_sigma_delta *sd, unsigned int chan)
return ad_sd_write_reg(sd, AD7173_REG_CH(chan), 2, 0);
}
-static struct ad_sigma_delta_info ad7173_sigma_delta_info = {
+static const struct ad_sigma_delta_info ad7173_sigma_delta_info = {
.set_channel = ad7173_set_channel,
.append_status = ad7173_append_status,
.disable_all = ad7173_disable_all,
@@ -1403,7 +1404,7 @@ static int ad7173_fw_parse_device_config(struct iio_dev *indio_dev)
if (ret < 0)
return dev_err_probe(dev, ret, "Interrupt 'rdy' is required\n");
- ad7173_sigma_delta_info.irq_line = ret;
+ st->sigma_delta_info.irq_line = ret;
return ad7173_fw_parse_channel_config(indio_dev);
}
@@ -1436,8 +1437,9 @@ static int ad7173_probe(struct spi_device *spi)
spi->mode = SPI_MODE_3;
spi_setup(spi);
- ad7173_sigma_delta_info.num_slots = st->info->num_configs;
- ret = ad_sd_init(&st->sd, indio_dev, spi, &ad7173_sigma_delta_info);
+ st->sigma_delta_info = ad7173_sigma_delta_info;
+ st->sigma_delta_info.num_slots = st->info->num_configs;
+ ret = ad_sd_init(&st->sd, indio_dev, spi, &st->sigma_delta_info);
if (ret)
return ret;
diff --git a/drivers/iio/adc/ad9467.c b/drivers/iio/adc/ad9467.c
index d358958ab310..f30119b42ba0 100644
--- a/drivers/iio/adc/ad9467.c
+++ b/drivers/iio/adc/ad9467.c
@@ -895,7 +895,7 @@ static int ad9467_update_scan_mode(struct iio_dev *indio_dev,
return 0;
}
-static struct iio_info ad9467_info = {
+static const struct iio_info ad9467_info = {
.read_raw = ad9467_read_raw,
.write_raw = ad9467_write_raw,
.update_scan_mode = ad9467_update_scan_mode,
@@ -903,6 +903,14 @@ static struct iio_info ad9467_info = {
.read_avail = ad9467_read_avail,
};
+/* Same as above, but without .read_avail */
+static const struct iio_info ad9467_info_no_read_avail = {
+ .read_raw = ad9467_read_raw,
+ .write_raw = ad9467_write_raw,
+ .update_scan_mode = ad9467_update_scan_mode,
+ .debugfs_reg_access = ad9467_reg_access,
+};
+
static int ad9467_scale_fill(struct ad9467_state *st)
{
const struct ad9467_chip_info *info = st->info;
@@ -1214,11 +1222,12 @@ static int ad9467_probe(struct spi_device *spi)
}
if (st->info->num_scales > 1)
- ad9467_info.read_avail = ad9467_read_avail;
+ indio_dev->info = &ad9467_info;
+ else
+ indio_dev->info = &ad9467_info_no_read_avail;
indio_dev->name = st->info->name;
indio_dev->channels = st->info->channels;
indio_dev->num_channels = st->info->num_channels;
- indio_dev->info = &ad9467_info;
ret = ad9467_iio_backend_get(st);
if (ret)
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index a3f0a2321666..5927756b749a 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -979,7 +979,7 @@ static int at91_ts_register(struct iio_dev *idev,
return ret;
err:
- input_free_device(st->ts_input);
+ input_free_device(input);
return ret;
}
diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c
index 240cfa391674..dfd47a6e1f4a 100644
--- a/drivers/iio/adc/rockchip_saradc.c
+++ b/drivers/iio/adc/rockchip_saradc.c
@@ -368,6 +368,8 @@ static irqreturn_t rockchip_saradc_trigger_handler(int irq, void *p)
int ret;
int i, j = 0;
+ memset(&data, 0, sizeof(data));
+
mutex_lock(&info->lock);
iio_for_each_active_channel(i_dev, i) {
diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c
index 1f9eca2fb2bf..fe11b0d8eab3 100644
--- a/drivers/iio/adc/stm32-dfsdm-adc.c
+++ b/drivers/iio/adc/stm32-dfsdm-adc.c
@@ -691,11 +691,14 @@ static int stm32_dfsdm_generic_channel_parse_of(struct stm32_dfsdm *dfsdm,
return -EINVAL;
}
- ret = fwnode_property_read_string(node, "label", &ch->datasheet_name);
- if (ret < 0) {
- dev_err(&indio_dev->dev,
- " Error parsing 'label' for idx %d\n", ch->channel);
- return ret;
+ if (fwnode_property_present(node, "label")) {
+ /* label is optional */
+ ret = fwnode_property_read_string(node, "label", &ch->datasheet_name);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev,
+ " Error parsing 'label' for idx %d\n", ch->channel);
+ return ret;
+ }
}
df_ch = &dfsdm->ch_list[ch->channel];
diff --git a/drivers/iio/adc/ti-ads1119.c b/drivers/iio/adc/ti-ads1119.c
index e9d9d4d46d38..c268e27eec12 100644
--- a/drivers/iio/adc/ti-ads1119.c
+++ b/drivers/iio/adc/ti-ads1119.c
@@ -500,12 +500,14 @@ static irqreturn_t ads1119_trigger_handler(int irq, void *private)
struct iio_dev *indio_dev = pf->indio_dev;
struct ads1119_state *st = iio_priv(indio_dev);
struct {
- unsigned int sample;
+ s16 sample;
s64 timestamp __aligned(8);
} scan;
unsigned int index;
int ret;
+ memset(&scan, 0, sizeof(scan));
+
if (!iio_trigger_using_own(indio_dev)) {
index = find_first_bit(indio_dev->active_scan_mask,
iio_get_masklength(indio_dev));
diff --git a/drivers/iio/adc/ti-ads124s08.c b/drivers/iio/adc/ti-ads124s08.c
index 425b48d8986f..f452f57f11c9 100644
--- a/drivers/iio/adc/ti-ads124s08.c
+++ b/drivers/iio/adc/ti-ads124s08.c
@@ -183,9 +183,9 @@ static int ads124s_reset(struct iio_dev *indio_dev)
struct ads124s_private *priv = iio_priv(indio_dev);
if (priv->reset_gpio) {
- gpiod_set_value(priv->reset_gpio, 0);
+ gpiod_set_value_cansleep(priv->reset_gpio, 0);
udelay(200);
- gpiod_set_value(priv->reset_gpio, 1);
+ gpiod_set_value_cansleep(priv->reset_gpio, 1);
} else {
return ads124s_write_cmd(indio_dev, ADS124S08_CMD_RESET);
}
diff --git a/drivers/iio/adc/ti-ads1298.c b/drivers/iio/adc/ti-ads1298.c
index 36d43495f603..03f762415fa5 100644
--- a/drivers/iio/adc/ti-ads1298.c
+++ b/drivers/iio/adc/ti-ads1298.c
@@ -613,6 +613,8 @@ static int ads1298_init(struct iio_dev *indio_dev)
}
indio_dev->name = devm_kasprintf(dev, GFP_KERNEL, "ads129%u%s",
indio_dev->num_channels, suffix);
+ if (!indio_dev->name)
+ return -ENOMEM;
/* Enable internal test signal, double amplitude, double frequency */
ret = regmap_write(priv->regmap, ADS1298_REG_CONFIG2,
diff --git a/drivers/iio/adc/ti-ads8688.c b/drivers/iio/adc/ti-ads8688.c
index 9b1814f1965a..a31658b760a4 100644
--- a/drivers/iio/adc/ti-ads8688.c
+++ b/drivers/iio/adc/ti-ads8688.c
@@ -381,7 +381,7 @@ static irqreturn_t ads8688_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
/* Ensure naturally aligned timestamp */
- u16 buffer[ADS8688_MAX_CHANNELS + sizeof(s64)/sizeof(u16)] __aligned(8);
+ u16 buffer[ADS8688_MAX_CHANNELS + sizeof(s64)/sizeof(u16)] __aligned(8) = { };
int i, j = 0;
iio_for_each_active_channel(indio_dev, i) {
diff --git a/drivers/iio/dummy/iio_simple_dummy_buffer.c b/drivers/iio/dummy/iio_simple_dummy_buffer.c
index 4ca3f1aaff99..288880346707 100644
--- a/drivers/iio/dummy/iio_simple_dummy_buffer.c
+++ b/drivers/iio/dummy/iio_simple_dummy_buffer.c
@@ -48,7 +48,7 @@ static irqreturn_t iio_simple_dummy_trigger_h(int irq, void *p)
int i = 0, j;
u16 *data;
- data = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
+ data = kzalloc(indio_dev->scan_bytes, GFP_KERNEL);
if (!data)
goto done;
diff --git a/drivers/iio/gyro/fxas21002c_core.c b/drivers/iio/gyro/fxas21002c_core.c
index 0391c78c2f18..754c8a564ba4 100644
--- a/drivers/iio/gyro/fxas21002c_core.c
+++ b/drivers/iio/gyro/fxas21002c_core.c
@@ -730,14 +730,21 @@ static irqreturn_t fxas21002c_trigger_handler(int irq, void *p)
int ret;
mutex_lock(&data->lock);
+ ret = fxas21002c_pm_get(data);
+ if (ret < 0)
+ goto out_unlock;
+
ret = regmap_bulk_read(data->regmap, FXAS21002C_REG_OUT_X_MSB,
data->buffer, CHANNEL_SCAN_MAX * sizeof(s16));
if (ret < 0)
- goto out_unlock;
+ goto out_pm_put;
iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
data->timestamp);
+out_pm_put:
+ fxas21002c_pm_put(data);
+
out_unlock:
mutex_unlock(&data->lock);
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600.h b/drivers/iio/imu/inv_icm42600/inv_icm42600.h
index 3a07e43e4cf1..18787a43477b 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600.h
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600.h
@@ -403,6 +403,7 @@ struct inv_icm42600_sensor_state {
typedef int (*inv_icm42600_bus_setup)(struct inv_icm42600_state *);
extern const struct regmap_config inv_icm42600_regmap_config;
+extern const struct regmap_config inv_icm42600_spi_regmap_config;
extern const struct dev_pm_ops inv_icm42600_pm_ops;
const struct iio_mount_matrix *
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
index 561d245c1d64..ef9875d3b79d 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
@@ -87,6 +87,21 @@ const struct regmap_config inv_icm42600_regmap_config = {
};
EXPORT_SYMBOL_NS_GPL(inv_icm42600_regmap_config, "IIO_ICM42600");
+/* define specific regmap for SPI not supporting burst write */
+const struct regmap_config inv_icm42600_spi_regmap_config = {
+ .name = "inv_icm42600",
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x4FFF,
+ .ranges = inv_icm42600_regmap_ranges,
+ .num_ranges = ARRAY_SIZE(inv_icm42600_regmap_ranges),
+ .volatile_table = inv_icm42600_regmap_volatile_accesses,
+ .rd_noinc_table = inv_icm42600_regmap_rd_noinc_accesses,
+ .cache_type = REGCACHE_RBTREE,
+ .use_single_write = true,
+};
+EXPORT_SYMBOL_NS_GPL(inv_icm42600_spi_regmap_config, "IIO_ICM42600");
+
struct inv_icm42600_hw {
uint8_t whoami;
const char *name;
@@ -814,6 +829,8 @@ out_unlock:
static int inv_icm42600_resume(struct device *dev)
{
struct inv_icm42600_state *st = dev_get_drvdata(dev);
+ struct inv_icm42600_sensor_state *gyro_st = iio_priv(st->indio_gyro);
+ struct inv_icm42600_sensor_state *accel_st = iio_priv(st->indio_accel);
int ret;
mutex_lock(&st->lock);
@@ -834,9 +851,12 @@ static int inv_icm42600_resume(struct device *dev)
goto out_unlock;
/* restore FIFO data streaming */
- if (st->fifo.on)
+ if (st->fifo.on) {
+ inv_sensors_timestamp_reset(&gyro_st->ts);
+ inv_sensors_timestamp_reset(&accel_st->ts);
ret = regmap_write(st->map, INV_ICM42600_REG_FIFO_CONFIG,
INV_ICM42600_FIFO_CONFIG_STREAM);
+ }
out_unlock:
mutex_unlock(&st->lock);
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c
index c55d8e672183..2bd2c4c8e50c 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c
@@ -59,7 +59,8 @@ static int inv_icm42600_probe(struct spi_device *spi)
return -EINVAL;
chip = (uintptr_t)match;
- regmap = devm_regmap_init_spi(spi, &inv_icm42600_regmap_config);
+ /* use SPI specific regmap */
+ regmap = devm_regmap_init_spi(spi, &inv_icm42600_spi_regmap_config);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
diff --git a/drivers/iio/imu/kmx61.c b/drivers/iio/imu/kmx61.c
index 324c38764656..e19c5d3137c6 100644
--- a/drivers/iio/imu/kmx61.c
+++ b/drivers/iio/imu/kmx61.c
@@ -1193,7 +1193,7 @@ static irqreturn_t kmx61_trigger_handler(int irq, void *p)
struct kmx61_data *data = kmx61_get_data(indio_dev);
int bit, ret, i = 0;
u8 base;
- s16 buffer[8];
+ s16 buffer[8] = { };
if (indio_dev == data->acc_indio_dev)
base = KMX61_ACC_XOUT_L;
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index 136b225b6bc8..9050a59129e6 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -500,7 +500,7 @@ struct iio_channel *iio_channel_get_all(struct device *dev)
return_ptr(chans);
error_free_chans:
- for (i = 0; i < nummaps; i++)
+ for (i = 0; i < mapind; i++)
iio_device_put(chans[i].indio_dev);
return ERR_PTR(ret);
}
diff --git a/drivers/iio/light/bh1745.c b/drivers/iio/light/bh1745.c
index 56e32689bb97..63bf729df517 100644
--- a/drivers/iio/light/bh1745.c
+++ b/drivers/iio/light/bh1745.c
@@ -746,6 +746,8 @@ static irqreturn_t bh1745_trigger_handler(int interrupt, void *p)
int i;
int j = 0;
+ memset(&scan, 0, sizeof(scan));
+
iio_for_each_active_channel(indio_dev, i) {
ret = regmap_bulk_read(data->regmap, BH1745_RED_LSB + 2 * i,
&value, 2);
diff --git a/drivers/iio/light/vcnl4035.c b/drivers/iio/light/vcnl4035.c
index 337a1332c2c6..67c94be02018 100644
--- a/drivers/iio/light/vcnl4035.c
+++ b/drivers/iio/light/vcnl4035.c
@@ -105,7 +105,7 @@ static irqreturn_t vcnl4035_trigger_consumer_handler(int irq, void *p)
struct iio_dev *indio_dev = pf->indio_dev;
struct vcnl4035_data *data = iio_priv(indio_dev);
/* Ensure naturally aligned timestamp */
- u8 buffer[ALIGN(sizeof(u16), sizeof(s64)) + sizeof(s64)] __aligned(8);
+ u8 buffer[ALIGN(sizeof(u16), sizeof(s64)) + sizeof(s64)] __aligned(8) = { };
int ret;
ret = regmap_read(data->regmap, VCNL4035_ALS_DATA, (int *)buffer);
diff --git a/drivers/iio/pressure/zpa2326.c b/drivers/iio/pressure/zpa2326.c
index 2adea84f5b4d..9db1c94dfc18 100644
--- a/drivers/iio/pressure/zpa2326.c
+++ b/drivers/iio/pressure/zpa2326.c
@@ -586,6 +586,8 @@ static int zpa2326_fill_sample_buffer(struct iio_dev *indio_dev,
} sample;
int err;
+ memset(&sample, 0, sizeof(sample));
+
if (test_bit(0, indio_dev->active_scan_mask)) {
/* Get current pressure from hardware FIFO. */
err = zpa2326_dequeue_pressure(indio_dev, &sample.pressure);
diff --git a/drivers/iio/temperature/tmp006.c b/drivers/iio/temperature/tmp006.c
index 0c844137d7aa..02b27f471baa 100644
--- a/drivers/iio/temperature/tmp006.c
+++ b/drivers/iio/temperature/tmp006.c
@@ -252,6 +252,8 @@ static irqreturn_t tmp006_trigger_handler(int irq, void *p)
} scan;
s32 ret;
+ memset(&scan, 0, sizeof(scan));
+
ret = i2c_smbus_read_word_data(data->client, TMP006_VOBJECT);
if (ret < 0)
goto err;
diff --git a/drivers/iio/test/Kconfig b/drivers/iio/test/Kconfig
index 33cca49c8058..7a181cac3cc9 100644
--- a/drivers/iio/test/Kconfig
+++ b/drivers/iio/test/Kconfig
@@ -5,7 +5,7 @@
# Keep in alphabetical order
config IIO_GTS_KUNIT_TEST
- tristate "Test IIO formatting functions" if !KUNIT_ALL_TESTS
+ tristate "Test IIO gain-time-scale helpers" if !KUNIT_ALL_TESTS
depends on KUNIT
select IIO_GTS_HELPER
select TEST_KUNIT_DEVICE_HELPERS
diff --git a/drivers/iio/test/iio-test-rescale.c b/drivers/iio/test/iio-test-rescale.c
index cbf13337ed1f..bbc6a2e1c2c1 100644
--- a/drivers/iio/test/iio-test-rescale.c
+++ b/drivers/iio/test/iio-test-rescale.c
@@ -652,6 +652,8 @@ static void iio_rescale_test_scale(struct kunit *test)
int rel_ppm;
int ret;
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buff);
+
rescale.numerator = t->numerator;
rescale.denominator = t->denominator;
rescale.offset = t->offset;
@@ -681,6 +683,8 @@ static void iio_rescale_test_offset(struct kunit *test)
int values[2];
int ret;
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buff_off);
+
rescale.numerator = t->numerator;
rescale.denominator = t->denominator;
rescale.offset = t->offset;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 64ace0b968f0..91db10515d74 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -690,6 +690,7 @@ cma_validate_port(struct ib_device *device, u32 port,
int bound_if_index = dev_addr->bound_dev_if;
int dev_type = dev_addr->dev_type;
struct net_device *ndev = NULL;
+ struct net_device *pdev = NULL;
if (!rdma_dev_access_netns(device, id_priv->id.route.addr.dev_addr.net))
goto out;
@@ -714,6 +715,21 @@ cma_validate_port(struct ib_device *device, u32 port,
rcu_read_lock();
ndev = rcu_dereference(sgid_attr->ndev);
+ if (ndev->ifindex != bound_if_index) {
+ pdev = dev_get_by_index_rcu(dev_addr->net, bound_if_index);
+ if (pdev) {
+ if (is_vlan_dev(pdev)) {
+ pdev = vlan_dev_real_dev(pdev);
+ if (ndev->ifindex == pdev->ifindex)
+ bound_if_index = pdev->ifindex;
+ }
+ if (is_vlan_dev(ndev)) {
+ pdev = vlan_dev_real_dev(ndev);
+ if (bound_if_index == pdev->ifindex)
+ bound_if_index = ndev->ifindex;
+ }
+ }
+ }
if (!net_eq(dev_net(ndev), dev_addr->net) ||
ndev->ifindex != bound_if_index) {
rdma_put_gid_attr(sgid_attr);
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index ff121e59b9c0..cb987ab0177c 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -2833,8 +2833,8 @@ int rdma_nl_notify_event(struct ib_device *device, u32 port_num,
enum rdma_nl_notify_event_type type)
{
struct sk_buff *skb;
+ int ret = -EMSGSIZE;
struct net *net;
- int ret = 0;
void *nlh;
net = read_pnet(&device->coredev.rdma_net);
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 66b02fbf077a..5ad14c39d48c 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -161,7 +161,7 @@ static const void __user *uverbs_request_next_ptr(struct uverbs_req_iter *iter,
{
const void __user *res = iter->cur;
- if (iter->cur + len > iter->end)
+ if (len > iter->end - iter->cur)
return (void __force __user *)ERR_PTR(-ENOSPC);
iter->cur += len;
return res;
@@ -2008,11 +2008,13 @@ static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs)
ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
if (ret)
return ret;
- wqes = uverbs_request_next_ptr(&iter, cmd.wqe_size * cmd.wr_count);
+ wqes = uverbs_request_next_ptr(&iter, size_mul(cmd.wqe_size,
+ cmd.wr_count));
if (IS_ERR(wqes))
return PTR_ERR(wqes);
- sgls = uverbs_request_next_ptr(
- &iter, cmd.sge_count * sizeof(struct ib_uverbs_sge));
+ sgls = uverbs_request_next_ptr(&iter,
+ size_mul(cmd.sge_count,
+ sizeof(struct ib_uverbs_sge)));
if (IS_ERR(sgls))
return PTR_ERR(sgls);
ret = uverbs_request_finish(&iter);
@@ -2198,11 +2200,11 @@ ib_uverbs_unmarshall_recv(struct uverbs_req_iter *iter, u32 wr_count,
if (wqe_size < sizeof(struct ib_uverbs_recv_wr))
return ERR_PTR(-EINVAL);
- wqes = uverbs_request_next_ptr(iter, wqe_size * wr_count);
+ wqes = uverbs_request_next_ptr(iter, size_mul(wqe_size, wr_count));
if (IS_ERR(wqes))
return ERR_CAST(wqes);
- sgls = uverbs_request_next_ptr(
- iter, sge_count * sizeof(struct ib_uverbs_sge));
+ sgls = uverbs_request_next_ptr(iter, size_mul(sge_count,
+ sizeof(struct ib_uverbs_sge)));
if (IS_ERR(sgls))
return ERR_CAST(sgls);
ret = uverbs_request_finish(iter);
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 82023394e330..e3d26bd6de05 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -199,7 +199,7 @@ int bnxt_re_query_device(struct ib_device *ibdev,
ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
- ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
+ ib_attr->hw_ver = rdev->en_dev->pdev->revision;
ib_attr->max_qp = dev_attr->max_qp;
ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
ib_attr->device_cap_flags =
@@ -967,13 +967,13 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
unsigned int flags;
int rc;
+ bnxt_re_debug_rem_qpinfo(rdev, qp);
+
bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
- if (rc) {
+ if (rc)
ibdev_err(&rdev->ibdev, "Failed to destroy HW QP");
- return rc;
- }
if (rdma_is_kernel_res(&qp->ib_qp.res)) {
flags = bnxt_re_lock_cqs(qp);
@@ -983,11 +983,8 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
- if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp) {
- rc = bnxt_re_destroy_gsi_sqp(qp);
- if (rc)
- return rc;
- }
+ if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp)
+ bnxt_re_destroy_gsi_sqp(qp);
mutex_lock(&rdev->qp_lock);
list_del(&qp->list);
@@ -998,8 +995,6 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
else if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD)
atomic_dec(&rdev->stats.res.ud_qp_count);
- bnxt_re_debug_rem_qpinfo(rdev, qp);
-
ib_umem_release(qp->rumem);
ib_umem_release(qp->sumem);
@@ -2167,18 +2162,20 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
}
}
- if (qp_attr_mask & IB_QP_PATH_MTU) {
- qp->qplib_qp.modify_flags |=
- CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
- qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
- qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu);
- } else if (qp_attr->qp_state == IB_QPS_RTR) {
- qp->qplib_qp.modify_flags |=
- CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
- qp->qplib_qp.path_mtu =
- __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
- qp->qplib_qp.mtu =
- ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
+ if (qp_attr->qp_state == IB_QPS_RTR) {
+ enum ib_mtu qpmtu;
+
+ qpmtu = iboe_get_mtu(rdev->netdev->mtu);
+ if (qp_attr_mask & IB_QP_PATH_MTU) {
+ if (ib_mtu_enum_to_int(qp_attr->path_mtu) >
+ ib_mtu_enum_to_int(qpmtu))
+ return -EINVAL;
+ qpmtu = qp_attr->path_mtu;
+ }
+
+ qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
+ qp->qplib_qp.path_mtu = __from_ib_mtu(qpmtu);
+ qp->qplib_qp.mtu = ib_mtu_enum_to_int(qpmtu);
}
if (qp_attr_mask & IB_QP_TIMEOUT) {
@@ -2328,6 +2325,7 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
qp_attr->retry_cnt = qplib_qp->retry_cnt;
qp_attr->rnr_retry = qplib_qp->rnr_retry;
qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
+ qp_attr->port_num = __to_ib_port_num(qplib_qp->port_id);
qp_attr->rq_psn = qplib_qp->rq.psn;
qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
qp_attr->sq_psn = qplib_qp->sq.psn;
@@ -2824,7 +2822,8 @@ bad:
wr = wr->next;
}
bnxt_qplib_post_send_db(&qp->qplib_qp);
- bnxt_ud_qp_hw_stall_workaround(qp);
+ if (!bnxt_qplib_is_chip_gen_p5_p7(qp->rdev->chip_ctx))
+ bnxt_ud_qp_hw_stall_workaround(qp);
spin_unlock_irqrestore(&qp->sq_lock, flags);
return rc;
}
@@ -2936,7 +2935,8 @@ bad:
wr = wr->next;
}
bnxt_qplib_post_send_db(&qp->qplib_qp);
- bnxt_ud_qp_hw_stall_workaround(qp);
+ if (!bnxt_qplib_is_chip_gen_p5_p7(qp->rdev->chip_ctx))
+ bnxt_ud_qp_hw_stall_workaround(qp);
spin_unlock_irqrestore(&qp->sq_lock, flags);
return rc;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index ac59f1d73b15..fbb16a411d6a 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -268,6 +268,10 @@ void bnxt_re_dealloc_ucontext(struct ib_ucontext *context);
int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
void bnxt_re_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
+static inline u32 __to_ib_port_num(u16 port_id)
+{
+ return (u32)port_id + 1;
+}
unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp);
void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, unsigned long flags);
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index b7af0d5ff3b6..c143f273b759 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -1715,11 +1715,8 @@ static bool bnxt_re_is_qp1_or_shadow_qp(struct bnxt_re_dev *rdev,
static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev)
{
- int mask = IB_QP_STATE;
- struct ib_qp_attr qp_attr;
struct bnxt_re_qp *qp;
- qp_attr.qp_state = IB_QPS_ERR;
mutex_lock(&rdev->qp_lock);
list_for_each_entry(qp, &rdev->qp_list, list) {
/* Modify the state of all QPs except QP1/Shadow QP */
@@ -1727,12 +1724,9 @@ static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev)
if (qp->qplib_qp.state !=
CMDQ_MODIFY_QP_NEW_STATE_RESET &&
qp->qplib_qp.state !=
- CMDQ_MODIFY_QP_NEW_STATE_ERR) {
+ CMDQ_MODIFY_QP_NEW_STATE_ERR)
bnxt_re_dispatch_event(&rdev->ibdev, &qp->ib_qp,
1, IB_EVENT_QP_FATAL);
- bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, mask,
- NULL);
- }
}
}
mutex_unlock(&rdev->qp_lock);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index e42abf5be6c0..5336f74297f8 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -659,13 +659,6 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr);
if (rc)
return rc;
-
- srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
- GFP_KERNEL);
- if (!srq->swq) {
- rc = -ENOMEM;
- goto fail;
- }
srq->dbinfo.flags = 0;
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
CMDQ_BASE_OPCODE_CREATE_SRQ,
@@ -694,9 +687,17 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
spin_lock_init(&srq->lock);
srq->start_idx = 0;
srq->last_idx = srq->hwq.max_elements - 1;
- for (idx = 0; idx < srq->hwq.max_elements; idx++)
- srq->swq[idx].next_idx = idx + 1;
- srq->swq[srq->last_idx].next_idx = -1;
+ if (!srq->hwq.is_user) {
+ srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
+ GFP_KERNEL);
+ if (!srq->swq) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+ for (idx = 0; idx < srq->hwq.max_elements; idx++)
+ srq->swq[idx].next_idx = idx + 1;
+ srq->swq[srq->last_idx].next_idx = -1;
+ }
srq->id = le32_to_cpu(resp.xid);
srq->dbinfo.hwq = &srq->hwq;
@@ -1000,9 +1001,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
u32 tbl_indx;
u16 nsge;
- if (res->dattr)
- qp->is_host_msn_tbl = _is_host_msn_table(res->dattr->dev_cap_flags2);
-
+ qp->is_host_msn_tbl = _is_host_msn_table(res->dattr->dev_cap_flags2);
sq->dbinfo.flags = 0;
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
CMDQ_BASE_OPCODE_CREATE_QP,
@@ -1034,7 +1033,12 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
: 0;
/* Update msn tbl size */
if (qp->is_host_msn_tbl && psn_sz) {
- hwq_attr.aux_depth = roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
+ if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
+ hwq_attr.aux_depth =
+ roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
+ else
+ hwq_attr.aux_depth =
+ roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode)) / 2;
qp->msn_tbl_sz = hwq_attr.aux_depth;
qp->msn = 0;
}
@@ -1044,13 +1048,14 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
if (rc)
return rc;
- rc = bnxt_qplib_alloc_init_swq(sq);
- if (rc)
- goto fail_sq;
-
- if (psn_sz)
- bnxt_qplib_init_psn_ptr(qp, psn_sz);
+ if (!sq->hwq.is_user) {
+ rc = bnxt_qplib_alloc_init_swq(sq);
+ if (rc)
+ goto fail_sq;
+ if (psn_sz)
+ bnxt_qplib_init_psn_ptr(qp, psn_sz);
+ }
req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
pbl = &sq->hwq.pbl[PBL_LVL_0];
req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
@@ -1076,9 +1081,11 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
if (rc)
goto sq_swq;
- rc = bnxt_qplib_alloc_init_swq(rq);
- if (rc)
- goto fail_rq;
+ if (!rq->hwq.is_user) {
+ rc = bnxt_qplib_alloc_init_swq(rq);
+ if (rc)
+ goto fail_rq;
+ }
req.rq_size = cpu_to_le32(rq->max_wqe);
pbl = &rq->hwq.pbl[PBL_LVL_0];
@@ -1174,9 +1181,11 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
rq->dbinfo.db = qp->dpi->dbr;
rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
}
+ spin_lock_bh(&rcfw->tbl_lock);
tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
+ spin_unlock_bh(&rcfw->tbl_lock);
return 0;
fail:
@@ -1283,7 +1292,8 @@ static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
}
}
-static void bnxt_set_mandatory_attributes(struct bnxt_qplib_qp *qp,
+static void bnxt_set_mandatory_attributes(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_qp *qp,
struct cmdq_modify_qp *req)
{
u32 mandatory_flags = 0;
@@ -1298,6 +1308,14 @@ static void bnxt_set_mandatory_attributes(struct bnxt_qplib_qp *qp,
mandatory_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
}
+ if (_is_min_rnr_in_rtr_rts_mandatory(res->dattr->dev_cap_flags2) &&
+ (qp->cur_qp_state == CMDQ_MODIFY_QP_NEW_STATE_RTR &&
+ qp->state == CMDQ_MODIFY_QP_NEW_STATE_RTS)) {
+ if (qp->type == CMDQ_MODIFY_QP_QP_TYPE_RC)
+ mandatory_flags |=
+ CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
+ }
+
if (qp->type == CMDQ_MODIFY_QP_QP_TYPE_UD ||
qp->type == CMDQ_MODIFY_QP_QP_TYPE_GSI)
mandatory_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
@@ -1338,7 +1356,7 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
/* Set mandatory attributes for INIT -> RTR and RTR -> RTS transition */
if (_is_optimize_modify_qp_supported(res->dattr->dev_cap_flags2) &&
is_optimized_state_transition(qp))
- bnxt_set_mandatory_attributes(qp, &req);
+ bnxt_set_mandatory_attributes(res, qp, &req);
}
bmask = qp->modify_flags;
req.modify_mask = cpu_to_le32(qp->modify_flags);
@@ -1521,6 +1539,7 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
memcpy(qp->smac, sb->src_mac, 6);
qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
+ qp->port_id = le16_to_cpu(sb->port_id);
bail:
dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
sbuf.sb, sbuf.dma_addr);
@@ -2667,10 +2686,12 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
bnxt_qplib_add_flush_qp(qp);
} else {
/* Before we complete, do WA 9060 */
- if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
- cqe_sq_cons)) {
- *lib_qp = qp;
- goto out;
+ if (!bnxt_qplib_is_chip_gen_p5_p7(qp->cctx)) {
+ if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
+ cqe_sq_cons)) {
+ *lib_qp = qp;
+ goto out;
+ }
}
if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
cqe->status = CQ_REQ_STATUS_OK;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index ef3424c81345..0660101b5310 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -114,7 +114,6 @@ struct bnxt_qplib_sge {
u32 size;
};
-#define BNXT_QPLIB_QP_MAX_SGL 6
struct bnxt_qplib_swq {
u64 wr_id;
int next_idx;
@@ -154,7 +153,7 @@ struct bnxt_qplib_swqe {
#define BNXT_QPLIB_SWQE_FLAGS_UC_FENCE BIT(2)
#define BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT BIT(3)
#define BNXT_QPLIB_SWQE_FLAGS_INLINE BIT(4)
- struct bnxt_qplib_sge sg_list[BNXT_QPLIB_QP_MAX_SGL];
+ struct bnxt_qplib_sge sg_list[BNXT_VAR_MAX_SGE];
int num_sge;
/* Max inline data is 96 bytes */
u32 inline_len;
@@ -299,6 +298,7 @@ struct bnxt_qplib_qp {
u32 dest_qpn;
u8 smac[6];
u16 vlan_id;
+ u16 port_id;
u8 nw_type;
struct bnxt_qplib_ah ah;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 5e90ea232de8..17e62f22683b 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -424,7 +424,8 @@ static int __send_message_basic_sanity(struct bnxt_qplib_rcfw *rcfw,
/* Prevent posting if f/w is not in a state to process */
if (test_bit(ERR_DEVICE_DETACHED, &rcfw->cmdq.flags))
- return bnxt_qplib_map_rc(opcode);
+ return -ENXIO;
+
if (test_bit(FIRMWARE_STALL_DETECTED, &cmdq->flags))
return -ETIMEDOUT;
@@ -493,7 +494,7 @@ static int __bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
rc = __send_message_basic_sanity(rcfw, msg, opcode);
if (rc)
- return rc;
+ return rc == -ENXIO ? bnxt_qplib_map_rc(opcode) : rc;
rc = __send_message(rcfw, msg, opcode);
if (rc)
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index 21fb148713a6..cbfc49a1a56d 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -584,6 +584,11 @@ static inline bool _is_optimize_modify_qp_supported(u16 dev_cap_ext_flags2)
return dev_cap_ext_flags2 & CREQ_QUERY_FUNC_RESP_SB_OPTIMIZE_MODIFY_QP_SUPPORTED;
}
+static inline bool _is_min_rnr_in_rtr_rts_mandatory(u16 dev_cap_ext_flags2)
+{
+ return !!(dev_cap_ext_flags2 & CREQ_QUERY_FUNC_RESP_SB_MIN_RNR_RTR_RTS_OPT_SUPPORTED);
+}
+
static inline bool _is_cq_coalescing_supported(u16 dev_cap_ext_flags2)
{
return dev_cap_ext_flags2 & CREQ_QUERY_FUNC_RESP_SB_CQ_COALESCING_SUPPORTED;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 7e20ae3d2c4f..9df3e3271577 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -129,12 +129,18 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
attr->max_qp_init_rd_atom =
sb->max_qp_init_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ?
BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_init_rd_atom;
- attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr);
- /*
- * 128 WQEs needs to be reserved for the HW (8916). Prevent
- * reporting the max number
- */
- attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS + 1;
+ attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr) - 1;
+ if (!bnxt_qplib_is_chip_gen_p5_p7(rcfw->res->cctx)) {
+ /*
+ * 128 WQEs needs to be reserved for the HW (8916). Prevent
+ * reporting the max number on legacy devices
+ */
+ attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS + 1;
+ }
+
+ /* Adjust for max_qp_wqes for variable wqe */
+ if (cctx->modes.wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
+ attr->max_qp_wqes = BNXT_VAR_MAX_WQE - 1;
attr->max_qp_sges = cctx->modes.wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE ?
min_t(u32, sb->max_sge_var_wqe, BNXT_VAR_MAX_SGE) : 6;
diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
index a98fc9c2313e..0ee60fdc18b3 100644
--- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h
+++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
@@ -2215,6 +2215,7 @@ struct creq_query_func_resp_sb {
#define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_IQM_MSN_TABLE (0x2UL << 4)
#define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_LAST \
CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_IQM_MSN_TABLE
+ #define CREQ_QUERY_FUNC_RESP_SB_MIN_RNR_RTR_RTS_OPT_SUPPORTED 0x1000UL
__le16 max_xp_qp_size;
__le16 create_qp_batch_size;
__le16 destroy_qp_batch_size;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index f84521be3bea..605562122ecc 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -931,6 +931,7 @@ struct hns_roce_hem_item {
size_t count; /* max ba numbers */
int start; /* start buf offset in this hem */
int end; /* end buf offset in this hem */
+ bool exist_bt;
};
/* All HEM items are linked in a tree structure */
@@ -959,6 +960,7 @@ hem_list_alloc_item(struct hns_roce_dev *hr_dev, int start, int end, int count,
}
}
+ hem->exist_bt = exist_bt;
hem->count = count;
hem->start = start;
hem->end = end;
@@ -969,22 +971,22 @@ hem_list_alloc_item(struct hns_roce_dev *hr_dev, int start, int end, int count,
}
static void hem_list_free_item(struct hns_roce_dev *hr_dev,
- struct hns_roce_hem_item *hem, bool exist_bt)
+ struct hns_roce_hem_item *hem)
{
- if (exist_bt)
+ if (hem->exist_bt)
dma_free_coherent(hr_dev->dev, hem->count * BA_BYTE_LEN,
hem->addr, hem->dma_addr);
kfree(hem);
}
static void hem_list_free_all(struct hns_roce_dev *hr_dev,
- struct list_head *head, bool exist_bt)
+ struct list_head *head)
{
struct hns_roce_hem_item *hem, *temp_hem;
list_for_each_entry_safe(hem, temp_hem, head, list) {
list_del(&hem->list);
- hem_list_free_item(hr_dev, hem, exist_bt);
+ hem_list_free_item(hr_dev, hem);
}
}
@@ -1084,6 +1086,10 @@ int hns_roce_hem_list_calc_root_ba(const struct hns_roce_buf_region *regions,
for (i = 0; i < region_cnt; i++) {
r = (struct hns_roce_buf_region *)&regions[i];
+ /* when r->hopnum = 0, the region should not occupy root_ba. */
+ if (!r->hopnum)
+ continue;
+
if (r->hopnum > 1) {
step = hem_list_calc_ba_range(r->hopnum, 1, unit);
if (step > 0)
@@ -1177,7 +1183,7 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev,
err_exit:
for (level = 1; level < hopnum; level++)
- hem_list_free_all(hr_dev, &temp_list[level], true);
+ hem_list_free_all(hr_dev, &temp_list[level]);
return ret;
}
@@ -1218,16 +1224,26 @@ static int alloc_fake_root_bt(struct hns_roce_dev *hr_dev, void *cpu_base,
{
struct hns_roce_hem_item *hem;
+ /* This is on the has_mtt branch, if r->hopnum
+ * is 0, there is no root_ba to reuse for the
+ * region's fake hem, so a dma_alloc request is
+ * necessary here.
+ */
hem = hem_list_alloc_item(hr_dev, r->offset, r->offset + r->count - 1,
- r->count, false);
+ r->count, !r->hopnum);
if (!hem)
return -ENOMEM;
- hem_list_assign_bt(hem, cpu_base, phy_base);
+ /* The root_ba can be reused only when r->hopnum > 0. */
+ if (r->hopnum)
+ hem_list_assign_bt(hem, cpu_base, phy_base);
list_add(&hem->list, branch_head);
list_add(&hem->sibling, leaf_head);
- return r->count;
+ /* If r->hopnum == 0, 0 is returned,
+ * so that the root_bt entry is not occupied.
+ */
+ return r->hopnum ? r->count : 0;
}
static int setup_middle_bt(struct hns_roce_dev *hr_dev, void *cpu_base,
@@ -1271,7 +1287,7 @@ setup_root_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem_list *hem_list,
return -ENOMEM;
total = 0;
- for (i = 0; i < region_cnt && total < max_ba_num; i++) {
+ for (i = 0; i < region_cnt && total <= max_ba_num; i++) {
r = &regions[i];
if (!r->count)
continue;
@@ -1337,9 +1353,9 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
region_cnt);
if (ret) {
for (i = 0; i < region_cnt; i++)
- hem_list_free_all(hr_dev, &head.branch[i], false);
+ hem_list_free_all(hr_dev, &head.branch[i]);
- hem_list_free_all(hr_dev, &head.root, true);
+ hem_list_free_all(hr_dev, &head.root);
}
return ret;
@@ -1402,10 +1418,9 @@ void hns_roce_hem_list_release(struct hns_roce_dev *hr_dev,
for (i = 0; i < HNS_ROCE_MAX_BT_REGION; i++)
for (j = 0; j < HNS_ROCE_MAX_BT_LEVEL; j++)
- hem_list_free_all(hr_dev, &hem_list->mid_bt[i][j],
- j != 0);
+ hem_list_free_all(hr_dev, &hem_list->mid_bt[i][j]);
- hem_list_free_all(hr_dev, &hem_list->root_bt, true);
+ hem_list_free_all(hr_dev, &hem_list->root_bt);
INIT_LIST_HEAD(&hem_list->btm_bt);
hem_list->root_ba = 0;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 697b17cca02e..0144e7210d05 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -468,7 +468,7 @@ static inline int set_ud_wqe(struct hns_roce_qp *qp,
valid_num_sge = calc_wr_sge_num(wr, &msg_len);
ret = set_ud_opcode(ud_sq_wqe, wr);
- if (WARN_ON(ret))
+ if (WARN_ON_ONCE(ret))
return ret;
ud_sq_wqe->msg_len = cpu_to_le32(msg_len);
@@ -572,7 +572,7 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp,
rc_sq_wqe->msg_len = cpu_to_le32(msg_len);
ret = set_rc_opcode(hr_dev, rc_sq_wqe, wr);
- if (WARN_ON(ret))
+ if (WARN_ON_ONCE(ret))
return ret;
hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SO,
@@ -670,6 +670,10 @@ static void write_dwqe(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
#define HNS_ROCE_SL_SHIFT 2
struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe;
+ if (unlikely(qp->state == IB_QPS_ERR)) {
+ flush_cqe(hr_dev, qp);
+ return;
+ }
/* All kinds of DirectWQE have the same header field layout */
hr_reg_enable(rc_sq_wqe, RC_SEND_WQE_FLAG);
hr_reg_write(rc_sq_wqe, RC_SEND_WQE_DB_SL_L, qp->sl);
@@ -5619,6 +5623,9 @@ static void put_dip_ctx_idx(struct hns_roce_dev *hr_dev,
{
struct hns_roce_dip *hr_dip = hr_qp->dip;
+ if (!hr_dip)
+ return;
+
xa_lock(&hr_dev->qp_table.dip_xa);
hr_dip->qp_cnt--;
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index bf30b3a65a9b..55b9283bfc6f 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -814,11 +814,6 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
for (i = 0, mapped_cnt = 0; i < mtr->hem_cfg.region_count &&
mapped_cnt < page_cnt; i++) {
r = &mtr->hem_cfg.region[i];
- /* if hopnum is 0, no need to map pages in this region */
- if (!r->hopnum) {
- mapped_cnt += r->count;
- continue;
- }
if (r->offset + r->count > page_cnt) {
ret = -EINVAL;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index bc7930d0c564..f5b59d02f4d3 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -2839,7 +2839,7 @@ static int mlx5_ib_get_plane_num(struct mlx5_core_dev *mdev, u8 *num_plane)
int err;
*num_plane = 0;
- if (!MLX5_CAP_GEN(mdev, ib_virt))
+ if (!MLX5_CAP_GEN(mdev, ib_virt) || !MLX5_CAP_GEN_2(mdev, multiplane))
return 0;
err = mlx5_query_hca_vport_context(mdev, 0, 1, 0, &vport_ctx);
@@ -3639,7 +3639,8 @@ static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev)
list_for_each_entry(mpi, &mlx5_ib_unaffiliated_port_list,
list) {
if (dev->sys_image_guid == mpi->sys_image_guid &&
- (mlx5_core_native_port_num(mpi->mdev) - 1) == i) {
+ (mlx5_core_native_port_num(mpi->mdev) - 1) == i &&
+ mlx5_core_same_coredev_type(dev->mdev, mpi->mdev)) {
bound = mlx5_ib_bind_slave_port(dev, mpi);
}
@@ -4785,7 +4786,8 @@ static int mlx5r_mp_probe(struct auxiliary_device *adev,
mutex_lock(&mlx5_ib_multiport_mutex);
list_for_each_entry(dev, &mlx5_ib_dev_list, ib_dev_list) {
- if (dev->sys_image_guid == mpi->sys_image_guid)
+ if (dev->sys_image_guid == mpi->sys_image_guid &&
+ mlx5_core_same_coredev_type(dev->mdev, mpi->mdev))
bound = mlx5_ib_bind_slave_port(dev, mpi);
if (bound) {
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index 255677bc12b2..1ba4a0c8726a 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -40,6 +40,8 @@ void rxe_dealloc(struct ib_device *ib_dev)
/* initialize rxe device parameters */
static void rxe_init_device_param(struct rxe_dev *rxe)
{
+ struct net_device *ndev;
+
rxe->max_inline_data = RXE_MAX_INLINE_DATA;
rxe->attr.vendor_id = RXE_VENDOR_ID;
@@ -71,8 +73,15 @@ static void rxe_init_device_param(struct rxe_dev *rxe)
rxe->attr.max_fast_reg_page_list_len = RXE_MAX_FMR_PAGE_LIST_LEN;
rxe->attr.max_pkeys = RXE_MAX_PKEYS;
rxe->attr.local_ca_ack_delay = RXE_LOCAL_CA_ACK_DELAY;
+
+ ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
+ if (!ndev)
+ return;
+
addrconf_addr_eui48((unsigned char *)&rxe->attr.sys_image_guid,
- rxe->ndev->dev_addr);
+ ndev->dev_addr);
+
+ dev_put(ndev);
rxe->max_ucontext = RXE_MAX_UCONTEXT;
}
@@ -109,10 +118,15 @@ static void rxe_init_port_param(struct rxe_port *port)
static void rxe_init_ports(struct rxe_dev *rxe)
{
struct rxe_port *port = &rxe->port;
+ struct net_device *ndev;
rxe_init_port_param(port);
+ ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
+ if (!ndev)
+ return;
addrconf_addr_eui48((unsigned char *)&port->port_guid,
- rxe->ndev->dev_addr);
+ ndev->dev_addr);
+ dev_put(ndev);
spin_lock_init(&port->port_lock);
}
@@ -167,12 +181,13 @@ void rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu)
/* called by ifc layer to create new rxe device.
* The caller should allocate memory for rxe by calling ib_alloc_device.
*/
-int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name)
+int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name,
+ struct net_device *ndev)
{
rxe_init(rxe);
rxe_set_mtu(rxe, mtu);
- return rxe_register_device(rxe, ibdev_name);
+ return rxe_register_device(rxe, ibdev_name, ndev);
}
static int rxe_newlink(const char *ibdev_name, struct net_device *ndev)
diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h
index d8fb2c7af30a..fe7f97066732 100644
--- a/drivers/infiniband/sw/rxe/rxe.h
+++ b/drivers/infiniband/sw/rxe/rxe.h
@@ -139,7 +139,8 @@ enum resp_states {
void rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu);
-int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name);
+int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name,
+ struct net_device *ndev);
void rxe_rcv(struct sk_buff *skb);
diff --git a/drivers/infiniband/sw/rxe/rxe_mcast.c b/drivers/infiniband/sw/rxe/rxe_mcast.c
index 86cc2e18a7fd..07ff47bae31d 100644
--- a/drivers/infiniband/sw/rxe/rxe_mcast.c
+++ b/drivers/infiniband/sw/rxe/rxe_mcast.c
@@ -31,10 +31,19 @@
static int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)
{
unsigned char ll_addr[ETH_ALEN];
+ struct net_device *ndev;
+ int ret;
+
+ ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
+ if (!ndev)
+ return -ENODEV;
ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
- return dev_mc_add(rxe->ndev, ll_addr);
+ ret = dev_mc_add(ndev, ll_addr);
+ dev_put(ndev);
+
+ return ret;
}
/**
@@ -47,10 +56,19 @@ static int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)
static int rxe_mcast_del(struct rxe_dev *rxe, union ib_gid *mgid)
{
unsigned char ll_addr[ETH_ALEN];
+ struct net_device *ndev;
+ int ret;
+
+ ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
+ if (!ndev)
+ return -ENODEV;
ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
- return dev_mc_del(rxe->ndev, ll_addr);
+ ret = dev_mc_del(ndev, ll_addr);
+ dev_put(ndev);
+
+ return ret;
}
/**
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index 75d1407db52d..8cc64ceeb356 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -524,7 +524,16 @@ out:
*/
const char *rxe_parent_name(struct rxe_dev *rxe, unsigned int port_num)
{
- return rxe->ndev->name;
+ struct net_device *ndev;
+ char *ndev_name;
+
+ ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
+ if (!ndev)
+ return NULL;
+ ndev_name = ndev->name;
+ dev_put(ndev);
+
+ return ndev_name;
}
int rxe_net_add(const char *ibdev_name, struct net_device *ndev)
@@ -536,10 +545,9 @@ int rxe_net_add(const char *ibdev_name, struct net_device *ndev)
if (!rxe)
return -ENOMEM;
- rxe->ndev = ndev;
ib_mark_name_assigned_by_user(&rxe->ib_dev);
- err = rxe_add(rxe, ndev->mtu, ibdev_name);
+ err = rxe_add(rxe, ndev->mtu, ibdev_name, ndev);
if (err) {
ib_dealloc_device(&rxe->ib_dev);
return err;
@@ -587,10 +595,18 @@ void rxe_port_down(struct rxe_dev *rxe)
void rxe_set_port_state(struct rxe_dev *rxe)
{
- if (netif_running(rxe->ndev) && netif_carrier_ok(rxe->ndev))
+ struct net_device *ndev;
+
+ ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
+ if (!ndev)
+ return;
+
+ if (netif_running(ndev) && netif_carrier_ok(ndev))
rxe_port_up(rxe);
else
rxe_port_down(rxe);
+
+ dev_put(ndev);
}
static int rxe_notify(struct notifier_block *not_blk,
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 5c18f7e342f2..8a5fc20fd186 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -41,6 +41,7 @@ static int rxe_query_port(struct ib_device *ibdev,
u32 port_num, struct ib_port_attr *attr)
{
struct rxe_dev *rxe = to_rdev(ibdev);
+ struct net_device *ndev;
int err, ret;
if (port_num != 1) {
@@ -49,6 +50,12 @@ static int rxe_query_port(struct ib_device *ibdev,
goto err_out;
}
+ ndev = rxe_ib_device_get_netdev(ibdev);
+ if (!ndev) {
+ err = -ENODEV;
+ goto err_out;
+ }
+
memcpy(attr, &rxe->port.attr, sizeof(*attr));
mutex_lock(&rxe->usdev_lock);
@@ -57,13 +64,14 @@ static int rxe_query_port(struct ib_device *ibdev,
if (attr->state == IB_PORT_ACTIVE)
attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
- else if (dev_get_flags(rxe->ndev) & IFF_UP)
+ else if (dev_get_flags(ndev) & IFF_UP)
attr->phys_state = IB_PORT_PHYS_STATE_POLLING;
else
attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
mutex_unlock(&rxe->usdev_lock);
+ dev_put(ndev);
return ret;
err_out:
@@ -1425,9 +1433,16 @@ static const struct attribute_group rxe_attr_group = {
static int rxe_enable_driver(struct ib_device *ib_dev)
{
struct rxe_dev *rxe = container_of(ib_dev, struct rxe_dev, ib_dev);
+ struct net_device *ndev;
+
+ ndev = rxe_ib_device_get_netdev(ib_dev);
+ if (!ndev)
+ return -ENODEV;
rxe_set_port_state(rxe);
- dev_info(&rxe->ib_dev.dev, "added %s\n", netdev_name(rxe->ndev));
+ dev_info(&rxe->ib_dev.dev, "added %s\n", netdev_name(ndev));
+
+ dev_put(ndev);
return 0;
}
@@ -1495,7 +1510,8 @@ static const struct ib_device_ops rxe_dev_ops = {
INIT_RDMA_OBJ_SIZE(ib_mw, rxe_mw, ibmw),
};
-int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
+int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name,
+ struct net_device *ndev)
{
int err;
struct ib_device *dev = &rxe->ib_dev;
@@ -1507,13 +1523,13 @@ int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
dev->num_comp_vectors = num_possible_cpus();
dev->local_dma_lkey = 0;
addrconf_addr_eui48((unsigned char *)&dev->node_guid,
- rxe->ndev->dev_addr);
+ ndev->dev_addr);
dev->uverbs_cmd_mask |= BIT_ULL(IB_USER_VERBS_CMD_POST_SEND) |
BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ);
ib_set_device_ops(dev, &rxe_dev_ops);
- err = ib_device_set_netdev(&rxe->ib_dev, rxe->ndev, 1);
+ err = ib_device_set_netdev(&rxe->ib_dev, ndev, 1);
if (err)
return err;
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index 3c1354f82283..6573ceec0ef5 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -370,6 +370,7 @@ struct rxe_port {
u32 qp_gsi_index;
};
+#define RXE_PORT 1
struct rxe_dev {
struct ib_device ib_dev;
struct ib_device_attr attr;
@@ -377,8 +378,6 @@ struct rxe_dev {
int max_inline_data;
struct mutex usdev_lock;
- struct net_device *ndev;
-
struct rxe_pool uc_pool;
struct rxe_pool pd_pool;
struct rxe_pool ah_pool;
@@ -406,6 +405,11 @@ struct rxe_dev {
struct crypto_shash *tfm;
};
+static inline struct net_device *rxe_ib_device_get_netdev(struct ib_device *dev)
+{
+ return ib_device_get_netdev(dev, RXE_PORT);
+}
+
static inline void rxe_counter_inc(struct rxe_dev *rxe, enum rxe_counters index)
{
atomic64_inc(&rxe->stats_counters[index]);
@@ -471,6 +475,7 @@ static inline struct rxe_pd *rxe_mw_pd(struct rxe_mw *mw)
return to_rpd(mw->ibmw.pd);
}
-int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name);
+int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name,
+ struct net_device *ndev);
#endif /* RXE_VERBS_H */
diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
index 86d4d6a2170e..ea5eee50dc39 100644
--- a/drivers/infiniband/sw/siw/siw.h
+++ b/drivers/infiniband/sw/siw/siw.h
@@ -46,6 +46,9 @@
*/
#define SIW_IRQ_MAXBURST_SQ_ACTIVE 4
+/* There is always only a port 1 per siw device */
+#define SIW_PORT 1
+
struct siw_dev_cap {
int max_qp;
int max_qp_wr;
@@ -69,16 +72,12 @@ struct siw_pd {
struct siw_device {
struct ib_device base_dev;
- struct net_device *netdev;
struct siw_dev_cap attrs;
u32 vendor_part_id;
int numa_node;
char raw_gid[ETH_ALEN];
- /* physical port state (only one port per device) */
- enum ib_port_state state;
-
spinlock_t lock;
struct xarray qp_xa;
diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
index 86323918a570..708b13993fdf 100644
--- a/drivers/infiniband/sw/siw/siw_cm.c
+++ b/drivers/infiniband/sw/siw/siw_cm.c
@@ -1759,6 +1759,7 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
{
struct socket *s;
struct siw_cep *cep = NULL;
+ struct net_device *ndev = NULL;
struct siw_device *sdev = to_siw_dev(id->device);
int addr_family = id->local_addr.ss_family;
int rv = 0;
@@ -1779,9 +1780,15 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
struct sockaddr_in *laddr = &to_sockaddr_in(id->local_addr);
/* For wildcard addr, limit binding to current device only */
- if (ipv4_is_zeronet(laddr->sin_addr.s_addr))
- s->sk->sk_bound_dev_if = sdev->netdev->ifindex;
-
+ if (ipv4_is_zeronet(laddr->sin_addr.s_addr)) {
+ ndev = ib_device_get_netdev(id->device, SIW_PORT);
+ if (ndev) {
+ s->sk->sk_bound_dev_if = ndev->ifindex;
+ } else {
+ rv = -ENODEV;
+ goto error;
+ }
+ }
rv = s->ops->bind(s, (struct sockaddr *)laddr,
sizeof(struct sockaddr_in));
} else {
@@ -1797,9 +1804,15 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
}
/* For wildcard addr, limit binding to current device only */
- if (ipv6_addr_any(&laddr->sin6_addr))
- s->sk->sk_bound_dev_if = sdev->netdev->ifindex;
-
+ if (ipv6_addr_any(&laddr->sin6_addr)) {
+ ndev = ib_device_get_netdev(id->device, SIW_PORT);
+ if (ndev) {
+ s->sk->sk_bound_dev_if = ndev->ifindex;
+ } else {
+ rv = -ENODEV;
+ goto error;
+ }
+ }
rv = s->ops->bind(s, (struct sockaddr *)laddr,
sizeof(struct sockaddr_in6));
}
@@ -1860,6 +1873,7 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
}
list_add_tail(&cep->listenq, (struct list_head *)id->provider_data);
cep->state = SIW_EPSTATE_LISTENING;
+ dev_put(ndev);
siw_dbg(id->device, "Listen at laddr %pISp\n", &id->local_addr);
@@ -1879,6 +1893,7 @@ error:
siw_cep_set_free_and_put(cep);
}
sock_release(s);
+ dev_put(ndev);
return rv;
}
diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
index 17abef48abcd..14d3103aee6f 100644
--- a/drivers/infiniband/sw/siw/siw_main.c
+++ b/drivers/infiniband/sw/siw/siw_main.c
@@ -287,7 +287,6 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
return NULL;
base_dev = &sdev->base_dev;
- sdev->netdev = netdev;
if (netdev->addr_len) {
memcpy(sdev->raw_gid, netdev->dev_addr,
@@ -381,12 +380,10 @@ static int siw_netdev_event(struct notifier_block *nb, unsigned long event,
switch (event) {
case NETDEV_UP:
- sdev->state = IB_PORT_ACTIVE;
siw_port_event(sdev, 1, IB_EVENT_PORT_ACTIVE);
break;
case NETDEV_DOWN:
- sdev->state = IB_PORT_DOWN;
siw_port_event(sdev, 1, IB_EVENT_PORT_ERR);
break;
@@ -407,12 +404,8 @@ static int siw_netdev_event(struct notifier_block *nb, unsigned long event,
siw_port_event(sdev, 1, IB_EVENT_LID_CHANGE);
break;
/*
- * Todo: Below netdev events are currently not handled.
+ * All other events are not handled
*/
- case NETDEV_CHANGEMTU:
- case NETDEV_CHANGE:
- break;
-
default:
break;
}
@@ -442,12 +435,6 @@ static int siw_newlink(const char *basedev_name, struct net_device *netdev)
sdev = siw_device_create(netdev);
if (sdev) {
dev_dbg(&netdev->dev, "siw: new device\n");
-
- if (netif_running(netdev) && netif_carrier_ok(netdev))
- sdev->state = IB_PORT_ACTIVE;
- else
- sdev->state = IB_PORT_DOWN;
-
ib_mark_name_assigned_by_user(&sdev->base_dev);
rv = siw_device_register(sdev, basedev_name);
if (rv)
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index 986666c19378..7ca0297d68a4 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -171,21 +171,29 @@ int siw_query_device(struct ib_device *base_dev, struct ib_device_attr *attr,
int siw_query_port(struct ib_device *base_dev, u32 port,
struct ib_port_attr *attr)
{
- struct siw_device *sdev = to_siw_dev(base_dev);
+ struct net_device *ndev;
int rv;
memset(attr, 0, sizeof(*attr));
rv = ib_get_eth_speed(base_dev, port, &attr->active_speed,
&attr->active_width);
+ if (rv)
+ return rv;
+
+ ndev = ib_device_get_netdev(base_dev, SIW_PORT);
+ if (!ndev)
+ return -ENODEV;
+
attr->gid_tbl_len = 1;
attr->max_msg_sz = -1;
- attr->max_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu);
- attr->active_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu);
- attr->phys_state = sdev->state == IB_PORT_ACTIVE ?
+ attr->max_mtu = ib_mtu_int_to_enum(ndev->max_mtu);
+ attr->active_mtu = ib_mtu_int_to_enum(READ_ONCE(ndev->mtu));
+ attr->phys_state = (netif_running(ndev) && netif_carrier_ok(ndev)) ?
IB_PORT_PHYS_STATE_LINK_UP : IB_PORT_PHYS_STATE_DISABLED;
+ attr->state = attr->phys_state == IB_PORT_PHYS_STATE_LINK_UP ?
+ IB_PORT_ACTIVE : IB_PORT_DOWN;
attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_DEVICE_MGMT_SUP;
- attr->state = sdev->state;
/*
* All zero
*
@@ -199,6 +207,7 @@ int siw_query_port(struct ib_device *base_dev, u32 port,
* attr->subnet_timeout = 0;
* attr->init_type_repy = 0;
*/
+ dev_put(ndev);
return rv;
}
@@ -505,21 +514,24 @@ int siw_query_qp(struct ib_qp *base_qp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
{
struct siw_qp *qp;
- struct siw_device *sdev;
+ struct net_device *ndev;
- if (base_qp && qp_attr && qp_init_attr) {
+ if (base_qp && qp_attr && qp_init_attr)
qp = to_siw_qp(base_qp);
- sdev = to_siw_dev(base_qp->device);
- } else {
+ else
return -EINVAL;
- }
+
+ ndev = ib_device_get_netdev(base_qp->device, SIW_PORT);
+ if (!ndev)
+ return -ENODEV;
+
qp_attr->qp_state = siw_qp_state_to_ib_qp_state[qp->attrs.state];
qp_attr->cap.max_inline_data = SIW_MAX_INLINE;
qp_attr->cap.max_send_wr = qp->attrs.sq_size;
qp_attr->cap.max_send_sge = qp->attrs.sq_max_sges;
qp_attr->cap.max_recv_wr = qp->attrs.rq_size;
qp_attr->cap.max_recv_sge = qp->attrs.rq_max_sges;
- qp_attr->path_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu);
+ qp_attr->path_mtu = ib_mtu_int_to_enum(READ_ONCE(ndev->mtu));
qp_attr->max_rd_atomic = qp->attrs.irq_size;
qp_attr->max_dest_rd_atomic = qp->attrs.orq_size;
@@ -534,6 +546,7 @@ int siw_query_qp(struct ib_qp *base_qp, struct ib_qp_attr *qp_attr,
qp_init_attr->cap = qp_attr->cap;
+ dev_put(ndev);
return 0;
}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
index e83d95647852..ef4abdea3c2d 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
@@ -349,6 +349,7 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
struct rtrs_srv_mr *srv_mr;
bool need_inval = false;
enum ib_send_flags flags;
+ struct ib_sge list;
u32 imm;
int err;
@@ -401,7 +402,6 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
imm = rtrs_to_io_rsp_imm(id->msg_id, errno, need_inval);
imm_wr.wr.next = NULL;
if (always_invalidate) {
- struct ib_sge list;
struct rtrs_msg_rkey_rsp *msg;
srv_mr = &srv_path->mrs[id->msg_id];
diff --git a/drivers/interconnect/icc-clk.c b/drivers/interconnect/icc-clk.c
index b956e4050f38..88f311c11020 100644
--- a/drivers/interconnect/icc-clk.c
+++ b/drivers/interconnect/icc-clk.c
@@ -116,6 +116,11 @@ struct icc_provider *icc_clk_register(struct device *dev,
}
node->name = devm_kasprintf(dev, GFP_KERNEL, "%s_master", data[i].name);
+ if (!node->name) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
node->data = &qp->clocks[i];
icc_node_add(node, provider);
/* link to the next node, slave */
@@ -129,6 +134,11 @@ struct icc_provider *icc_clk_register(struct device *dev,
}
node->name = devm_kasprintf(dev, GFP_KERNEL, "%s_slave", data[i].name);
+ if (!node->name) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
/* no data for slave node */
icc_node_add(node, provider);
onecell->nodes[j++] = node;
diff --git a/drivers/interconnect/qcom/icc-rpm.c b/drivers/interconnect/qcom/icc-rpm.c
index a8ed435f696c..ea1042d38128 100644
--- a/drivers/interconnect/qcom/icc-rpm.c
+++ b/drivers/interconnect/qcom/icc-rpm.c
@@ -503,6 +503,7 @@ int qnoc_probe(struct platform_device *pdev)
GFP_KERNEL);
if (!data)
return -ENOMEM;
+ data->num_nodes = num_nodes;
qp->num_intf_clks = cd_num;
for (i = 0; i < cd_num; i++)
@@ -597,7 +598,6 @@ regmap_done:
data->nodes[i] = node;
}
- data->num_nodes = num_nodes;
clk_bulk_disable_unprepare(qp->num_intf_clks, qp->intf_clks);
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 92244cfa0464..8c3ec5734f1e 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -2045,7 +2045,7 @@ static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
if (!is_v4(its_dev->its))
return -EINVAL;
- guard(raw_spinlock_irq)(&its_dev->event_map.vlpi_lock);
+ guard(raw_spinlock)(&its_dev->event_map.vlpi_lock);
/* Unmap request? */
if (!info)
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 79d8cc80693c..76dce0aac246 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -1522,7 +1522,7 @@ static int gic_retrigger(struct irq_data *data)
static int gic_cpu_pm_notifier(struct notifier_block *self,
unsigned long cmd, void *v)
{
- if (cmd == CPU_PM_EXIT) {
+ if (cmd == CPU_PM_EXIT || cmd == CPU_PM_ENTER_FAILED) {
if (gic_dist_security_disabled())
gic_enable_redist(true);
gic_cpu_sys_reg_enable();
diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c
index bb92fd85e975..0b4312152024 100644
--- a/drivers/irqchip/irq-sunxi-nmi.c
+++ b/drivers/irqchip/irq-sunxi-nmi.c
@@ -186,7 +186,8 @@ static int __init sunxi_sc_nmi_irq_init(struct device_node *node,
gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
gc->chip_types[0].chip.irq_eoi = irq_gc_ack_set_bit;
gc->chip_types[0].chip.irq_set_type = sunxi_sc_nmi_set_type;
- gc->chip_types[0].chip.flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED;
+ gc->chip_types[0].chip.flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED |
+ IRQCHIP_SKIP_SET_WAKE;
gc->chip_types[0].regs.ack = reg_offs->pend;
gc->chip_types[0].regs.mask = reg_offs->enable;
gc->chip_types[0].regs.type = reg_offs->ctrl;
diff --git a/drivers/irqchip/irqchip.c b/drivers/irqchip/irqchip.c
index 1eeb0d0156ce..0ee7b6b71f5f 100644
--- a/drivers/irqchip/irqchip.c
+++ b/drivers/irqchip/irqchip.c
@@ -35,11 +35,10 @@ void __init irqchip_init(void)
int platform_irqchip_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- struct device_node *par_np = of_irq_find_parent(np);
+ struct device_node *par_np __free(device_node) = of_irq_find_parent(np);
of_irq_init_cb_t irq_init_cb = of_device_get_match_data(&pdev->dev);
if (!irq_init_cb) {
- of_node_put(par_np);
return -EINVAL;
}
@@ -55,7 +54,6 @@ int platform_irqchip_probe(struct platform_device *pdev)
* interrupt controller can check for specific domains as necessary.
*/
if (par_np && !irq_find_matching_host(par_np, DOMAIN_BUS_ANY)) {
- of_node_put(par_np);
return -EPROBE_DEFER;
}
diff --git a/drivers/macintosh/mac_hid.c b/drivers/macintosh/mac_hid.c
index b461b1bed25b..369d72f59b3c 100644
--- a/drivers/macintosh/mac_hid.c
+++ b/drivers/macintosh/mac_hid.c
@@ -215,7 +215,7 @@ static int mac_hid_toggle_emumouse(const struct ctl_table *table, int write,
}
/* file(s) in /proc/sys/dev/mac_hid */
-static struct ctl_table mac_hid_files[] = {
+static const struct ctl_table mac_hid_files[] = {
{
.procname = "mouse_button_emulation",
.data = &mouse_emulate_buttons,
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 1e9db8e4acdf..0b1870a09e1f 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -61,6 +61,19 @@ config MD_BITMAP_FILE
various kernel APIs and can only work with files on a file system not
actually sitting on the MD device.
+config MD_LINEAR
+ tristate "Linear (append) mode"
+ depends on BLK_DEV_MD
+ help
+ If you say Y here, then your multiple devices driver will be able to
+ use the so-called linear mode, i.e. it will combine the hard disk
+ partitions by simply appending one to the other.
+
+ To compile this as a module, choose M here: the module
+ will be called linear.
+
+ If unsure, say Y.
+
config MD_RAID0
tristate "RAID-0 (striping) mode"
depends on BLK_DEV_MD
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 476a214e4bdc..87bdfc9fe14c 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -29,12 +29,14 @@ dm-zoned-y += dm-zoned-target.o dm-zoned-metadata.o dm-zoned-reclaim.o
md-mod-y += md.o md-bitmap.o
raid456-y += raid5.o raid5-cache.o raid5-ppl.o
+linear-y += md-linear.o
# Note: link order is important. All raid personalities
# and must come before md.o, as they each initialise
# themselves, and md.o may use the personalities when it
# auto-initialised.
+obj-$(CONFIG_MD_LINEAR) += linear.o
obj-$(CONFIG_MD_RAID0) += raid0.o
obj-$(CONFIG_MD_RAID1) += raid1.o
obj-$(CONFIG_MD_RAID10) += raid10.o
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index ef6abf33f926..45ca134cbf02 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -82,7 +82,7 @@ static void moving_init(struct moving_io *io)
bio_init(bio, NULL, bio->bi_inline_vecs,
DIV_ROUND_UP(KEY_SIZE(&io->w->key), PAGE_SECTORS), 0);
bio_get(bio);
- bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
+ bio->bi_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0);
bio->bi_iter.bi_size = KEY_SIZE(&io->w->key) << 9;
bio->bi_private = &io->cl;
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index c1d28e365910..453efbbdc8ee 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -334,7 +334,7 @@ static void dirty_init(struct keybuf_key *w)
bio_init(bio, NULL, bio->bi_inline_vecs,
DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS), 0);
if (!io->dc->writeback_percent)
- bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
+ bio->bi_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0);
bio->bi_iter.bi_size = KEY_SIZE(&w->key) << 9;
bio->bi_private = w;
diff --git a/drivers/md/dm-ebs-target.c b/drivers/md/dm-ebs-target.c
index ec5db1478b2f..18ae45dcbfb2 100644
--- a/drivers/md/dm-ebs-target.c
+++ b/drivers/md/dm-ebs-target.c
@@ -442,7 +442,7 @@ static int ebs_iterate_devices(struct dm_target *ti,
static struct target_type ebs_target = {
.name = "ebs",
.version = {1, 0, 1},
- .features = DM_TARGET_PASSES_INTEGRITY,
+ .features = 0,
.module = THIS_MODULE,
.ctr = ebs_ctr,
.dtr = ebs_dtr,
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 499f8cc8a39f..e23076f7ece2 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -547,7 +547,7 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
md->tag_set->ops = &dm_mq_ops;
md->tag_set->queue_depth = dm_get_blk_mq_queue_depth();
md->tag_set->numa_node = md->numa_node_id;
- md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING;
+ md->tag_set->flags = BLK_MQ_F_STACKING;
md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues();
md->tag_set->driver_data = md;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index bf0f9dddd146..05cf4e3f2bbe 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -2332,10 +2332,9 @@ static struct thin_c *get_first_thin(struct pool *pool)
struct thin_c *tc = NULL;
rcu_read_lock();
- if (!list_empty(&pool->active_thins)) {
- tc = list_entry_rcu(pool->active_thins.next, struct thin_c, list);
+ tc = list_first_or_null_rcu(&pool->active_thins, struct thin_c, list);
+ if (tc)
thin_get(tc);
- }
rcu_read_unlock();
return tc;
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index 62b1a44b8dd2..0c41949db784 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -40,35 +40,23 @@ static inline u64 fec_interleave(struct dm_verity *v, u64 offset)
}
/*
- * Decode an RS block using Reed-Solomon.
- */
-static int fec_decode_rs8(struct dm_verity *v, struct dm_verity_fec_io *fio,
- u8 *data, u8 *fec, int neras)
-{
- int i;
- uint16_t par[DM_VERITY_FEC_RSM - DM_VERITY_FEC_MIN_RSN];
-
- for (i = 0; i < v->fec->roots; i++)
- par[i] = fec[i];
-
- return decode_rs8(fio->rs, data, par, v->fec->rsn, NULL, neras,
- fio->erasures, 0, NULL);
-}
-
-/*
* Read error-correcting codes for the requested RS block. Returns a pointer
* to the data block. Caller is responsible for releasing buf.
*/
static u8 *fec_read_parity(struct dm_verity *v, u64 rsb, int index,
- unsigned int *offset, struct dm_buffer **buf,
- unsigned short ioprio)
+ unsigned int *offset, unsigned int par_buf_offset,
+ struct dm_buffer **buf, unsigned short ioprio)
{
u64 position, block, rem;
u8 *res;
+ /* We have already part of parity bytes read, skip to the next block */
+ if (par_buf_offset)
+ index++;
+
position = (index + rsb) * v->fec->roots;
block = div64_u64_rem(position, v->fec->io_size, &rem);
- *offset = (unsigned int)rem;
+ *offset = par_buf_offset ? 0 : (unsigned int)rem;
res = dm_bufio_read_with_ioprio(v->fec->bufio, block, buf, ioprio);
if (IS_ERR(res)) {
@@ -128,11 +116,13 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_io *io,
{
int r, corrected = 0, res;
struct dm_buffer *buf;
- unsigned int n, i, offset;
+ unsigned int n, i, j, offset, par_buf_offset = 0;
+ uint16_t par_buf[DM_VERITY_FEC_RSM - DM_VERITY_FEC_MIN_RSN];
u8 *par, *block;
struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
- par = fec_read_parity(v, rsb, block_offset, &offset, &buf, bio_prio(bio));
+ par = fec_read_parity(v, rsb, block_offset, &offset,
+ par_buf_offset, &buf, bio->bi_ioprio);
if (IS_ERR(par))
return PTR_ERR(par);
@@ -142,7 +132,11 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_io *io,
*/
fec_for_each_buffer_rs_block(fio, n, i) {
block = fec_buffer_rs_block(v, fio, n, i);
- res = fec_decode_rs8(v, fio, block, &par[offset], neras);
+ for (j = 0; j < v->fec->roots - par_buf_offset; j++)
+ par_buf[par_buf_offset + j] = par[offset + j];
+ /* Decode an RS block using Reed-Solomon */
+ res = decode_rs8(fio->rs, block, par_buf, v->fec->rsn,
+ NULL, neras, fio->erasures, 0, NULL);
if (res < 0) {
r = res;
goto error;
@@ -155,12 +149,22 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_io *io,
if (block_offset >= 1 << v->data_dev_block_bits)
goto done;
- /* read the next block when we run out of parity bytes */
- offset += v->fec->roots;
+ /* Read the next block when we run out of parity bytes */
+ offset += (v->fec->roots - par_buf_offset);
+ /* Check if parity bytes are split between blocks */
+ if (offset < v->fec->io_size && (offset + v->fec->roots) > v->fec->io_size) {
+ par_buf_offset = v->fec->io_size - offset;
+ for (j = 0; j < par_buf_offset; j++)
+ par_buf[j] = par[offset + j];
+ offset += par_buf_offset;
+ } else
+ par_buf_offset = 0;
+
if (offset >= v->fec->io_size) {
dm_bufio_release(buf);
- par = fec_read_parity(v, rsb, block_offset, &offset, &buf, bio_prio(bio));
+ par = fec_read_parity(v, rsb, block_offset, &offset,
+ par_buf_offset, &buf, bio->bi_ioprio);
if (IS_ERR(par))
return PTR_ERR(par);
}
@@ -250,7 +254,7 @@ static int fec_read_bufs(struct dm_verity *v, struct dm_verity_io *io,
bufio = v->bufio;
}
- bbuf = dm_bufio_read_with_ioprio(bufio, block, &buf, bio_prio(bio));
+ bbuf = dm_bufio_read_with_ioprio(bufio, block, &buf, bio->bi_ioprio);
if (IS_ERR(bbuf)) {
DMWARN_LIMIT("%s: FEC %llu: read failed (%llu): %ld",
v->data_dev->name,
@@ -724,10 +728,7 @@ int verity_fec_ctr(struct dm_verity *v)
return -E2BIG;
}
- if ((f->roots << SECTOR_SHIFT) & ((1 << v->data_dev_block_bits) - 1))
- f->io_size = 1 << v->data_dev_block_bits;
- else
- f->io_size = v->fec->roots << SECTOR_SHIFT;
+ f->io_size = 1 << v->data_dev_block_bits;
f->bufio = dm_bufio_client_create(f->dev->bdev,
f->io_size,
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 47d595f6a76e..e86c1431b108 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -321,7 +321,7 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
}
} else {
data = dm_bufio_read_with_ioprio(v->bufio, hash_block,
- &buf, bio_prio(bio));
+ &buf, bio->bi_ioprio);
}
if (IS_ERR(data))
@@ -789,7 +789,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
verity_fec_init_io(io);
- verity_submit_prefetch(v, io, bio_prio(bio));
+ verity_submit_prefetch(v, io, bio->bi_ioprio);
submit_bio_noacct(bio);
diff --git a/drivers/md/md-autodetect.c b/drivers/md/md-autodetect.c
index b2a00f213c2c..4b80165afd23 100644
--- a/drivers/md/md-autodetect.c
+++ b/drivers/md/md-autodetect.c
@@ -49,6 +49,7 @@ static int md_setup_ents __initdata;
* instead of just one. -- KTK
* 18May2000: Added support for persistent-superblock arrays:
* md=n,0,factor,fault,device-list uses RAID0 for device n
+ * md=n,-1,factor,fault,device-list uses LINEAR for device n
* md=n,device-list reads a RAID superblock from the devices
* elements in device-list are read by name_to_kdev_t so can be
* a hex number or something like /dev/hda1 /dev/sdb
@@ -87,7 +88,7 @@ static int __init md_setup(char *str)
md_setup_ents++;
switch (get_option(&str, &level)) { /* RAID level */
case 2: /* could be 0 or -1.. */
- if (level == 0) {
+ if (level == 0 || level == LEVEL_LINEAR) {
if (get_option(&str, &factor) != 2 || /* Chunk Size */
get_option(&str, &fault) != 2) {
printk(KERN_WARNING "md: Too few arguments supplied to md=.\n");
@@ -95,7 +96,10 @@ static int __init md_setup(char *str)
}
md_setup_args[ent].level = level;
md_setup_args[ent].chunk = 1 << (factor+12);
- pername = "raid0";
+ if (level == LEVEL_LINEAR)
+ pername = "linear";
+ else
+ pername = "raid0";
break;
}
fallthrough;
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index c3a42dd66ce5..ec4ecd96e6b1 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -682,7 +682,7 @@ static void bitmap_update_sb(void *data)
return;
if (!bitmap->storage.sb_page) /* no superblock */
return;
- sb = kmap_atomic(bitmap->storage.sb_page);
+ sb = kmap_local_page(bitmap->storage.sb_page);
sb->events = cpu_to_le64(bitmap->mddev->events);
if (bitmap->mddev->events < bitmap->events_cleared)
/* rocking back to read-only */
@@ -702,7 +702,7 @@ static void bitmap_update_sb(void *data)
sb->nodes = cpu_to_le32(bitmap->mddev->bitmap_info.nodes);
sb->sectors_reserved = cpu_to_le32(bitmap->mddev->
bitmap_info.space);
- kunmap_atomic(sb);
+ kunmap_local(sb);
if (bitmap->storage.file)
write_file_page(bitmap, bitmap->storage.sb_page, 1);
@@ -717,7 +717,7 @@ static void bitmap_print_sb(struct bitmap *bitmap)
if (!bitmap || !bitmap->storage.sb_page)
return;
- sb = kmap_atomic(bitmap->storage.sb_page);
+ sb = kmap_local_page(bitmap->storage.sb_page);
pr_debug("%s: bitmap file superblock:\n", bmname(bitmap));
pr_debug(" magic: %08x\n", le32_to_cpu(sb->magic));
pr_debug(" version: %u\n", le32_to_cpu(sb->version));
@@ -736,7 +736,7 @@ static void bitmap_print_sb(struct bitmap *bitmap)
pr_debug(" sync size: %llu KB\n",
(unsigned long long)le64_to_cpu(sb->sync_size)/2);
pr_debug("max write behind: %u\n", le32_to_cpu(sb->write_behind));
- kunmap_atomic(sb);
+ kunmap_local(sb);
}
/*
@@ -760,7 +760,7 @@ static int md_bitmap_new_disk_sb(struct bitmap *bitmap)
return -ENOMEM;
bitmap->storage.sb_index = 0;
- sb = kmap_atomic(bitmap->storage.sb_page);
+ sb = kmap_local_page(bitmap->storage.sb_page);
sb->magic = cpu_to_le32(BITMAP_MAGIC);
sb->version = cpu_to_le32(BITMAP_MAJOR_HI);
@@ -768,7 +768,7 @@ static int md_bitmap_new_disk_sb(struct bitmap *bitmap)
chunksize = bitmap->mddev->bitmap_info.chunksize;
BUG_ON(!chunksize);
if (!is_power_of_2(chunksize)) {
- kunmap_atomic(sb);
+ kunmap_local(sb);
pr_warn("bitmap chunksize not a power of 2\n");
return -EINVAL;
}
@@ -803,7 +803,7 @@ static int md_bitmap_new_disk_sb(struct bitmap *bitmap)
sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
bitmap->mddev->bitmap_info.nodes = 0;
- kunmap_atomic(sb);
+ kunmap_local(sb);
return 0;
}
@@ -865,7 +865,7 @@ re_read:
return err;
err = -EINVAL;
- sb = kmap_atomic(sb_page);
+ sb = kmap_local_page(sb_page);
chunksize = le32_to_cpu(sb->chunksize);
daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
@@ -932,7 +932,7 @@ re_read:
err = 0;
out:
- kunmap_atomic(sb);
+ kunmap_local(sb);
if (err == 0 && nodes && (bitmap->cluster_slot < 0)) {
/* Assigning chunksize is required for "re_read" */
bitmap->mddev->bitmap_info.chunksize = chunksize;
@@ -1161,12 +1161,12 @@ static void md_bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
bit = file_page_offset(&bitmap->storage, chunk);
/* set the bit */
- kaddr = kmap_atomic(page);
+ kaddr = kmap_local_page(page);
if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
set_bit(bit, kaddr);
else
set_bit_le(bit, kaddr);
- kunmap_atomic(kaddr);
+ kunmap_local(kaddr);
pr_debug("set file bit %lu page %lu\n", bit, index);
/* record page number so it gets flushed to disk when unplug occurs */
set_page_attr(bitmap, index - node_offset, BITMAP_PAGE_DIRTY);
@@ -1190,12 +1190,12 @@ static void md_bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block)
if (!page)
return;
bit = file_page_offset(&bitmap->storage, chunk);
- paddr = kmap_atomic(page);
+ paddr = kmap_local_page(page);
if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
clear_bit(bit, paddr);
else
clear_bit_le(bit, paddr);
- kunmap_atomic(paddr);
+ kunmap_local(paddr);
if (!test_page_attr(bitmap, index - node_offset, BITMAP_PAGE_NEEDWRITE)) {
set_page_attr(bitmap, index - node_offset, BITMAP_PAGE_PENDING);
bitmap->allclean = 0;
@@ -1214,12 +1214,12 @@ static int md_bitmap_file_test_bit(struct bitmap *bitmap, sector_t block)
if (!page)
return -EINVAL;
bit = file_page_offset(&bitmap->storage, chunk);
- paddr = kmap_atomic(page);
+ paddr = kmap_local_page(page);
if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
set = test_bit(bit, paddr);
else
set = test_bit_le(bit, paddr);
- kunmap_atomic(paddr);
+ kunmap_local(paddr);
return set;
}
@@ -1388,9 +1388,9 @@ static int md_bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
* If the bitmap is out of date, dirty the whole page
* and write it out
*/
- paddr = kmap_atomic(page);
+ paddr = kmap_local_page(page);
memset(paddr + offset, 0xff, PAGE_SIZE - offset);
- kunmap_atomic(paddr);
+ kunmap_local(paddr);
filemap_write_page(bitmap, i, true);
if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags)) {
@@ -1406,12 +1406,12 @@ static int md_bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
void *paddr;
bool was_set;
- paddr = kmap_atomic(page);
+ paddr = kmap_local_page(page);
if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
was_set = test_bit(bit, paddr);
else
was_set = test_bit_le(bit, paddr);
- kunmap_atomic(paddr);
+ kunmap_local(paddr);
if (was_set) {
/* if the disk bit is set, set the memory bit */
@@ -1546,10 +1546,10 @@ static void bitmap_daemon_work(struct mddev *mddev)
bitmap_super_t *sb;
bitmap->need_sync = 0;
if (bitmap->storage.filemap) {
- sb = kmap_atomic(bitmap->storage.sb_page);
+ sb = kmap_local_page(bitmap->storage.sb_page);
sb->events_cleared =
cpu_to_le64(bitmap->events_cleared);
- kunmap_atomic(sb);
+ kunmap_local(sb);
set_page_attr(bitmap, 0,
BITMAP_PAGE_NEEDWRITE);
}
@@ -1671,24 +1671,13 @@ __acquires(bitmap->lock)
}
static int bitmap_startwrite(struct mddev *mddev, sector_t offset,
- unsigned long sectors, bool behind)
+ unsigned long sectors)
{
struct bitmap *bitmap = mddev->bitmap;
if (!bitmap)
return 0;
- if (behind) {
- int bw;
- atomic_inc(&bitmap->behind_writes);
- bw = atomic_read(&bitmap->behind_writes);
- if (bw > bitmap->behind_writes_used)
- bitmap->behind_writes_used = bw;
-
- pr_debug("inc write-behind count %d/%lu\n",
- bw, bitmap->mddev->bitmap_info.max_write_behind);
- }
-
while (sectors) {
sector_t blocks;
bitmap_counter_t *bmc;
@@ -1737,21 +1726,13 @@ static int bitmap_startwrite(struct mddev *mddev, sector_t offset,
}
static void bitmap_endwrite(struct mddev *mddev, sector_t offset,
- unsigned long sectors, bool success, bool behind)
+ unsigned long sectors)
{
struct bitmap *bitmap = mddev->bitmap;
if (!bitmap)
return;
- if (behind) {
- if (atomic_dec_and_test(&bitmap->behind_writes))
- wake_up(&bitmap->behind_wait);
- pr_debug("dec write-behind count %d/%lu\n",
- atomic_read(&bitmap->behind_writes),
- bitmap->mddev->bitmap_info.max_write_behind);
- }
-
while (sectors) {
sector_t blocks;
unsigned long flags;
@@ -1764,15 +1745,16 @@ static void bitmap_endwrite(struct mddev *mddev, sector_t offset,
return;
}
- if (success && !bitmap->mddev->degraded &&
- bitmap->events_cleared < bitmap->mddev->events) {
- bitmap->events_cleared = bitmap->mddev->events;
- bitmap->need_sync = 1;
- sysfs_notify_dirent_safe(bitmap->sysfs_can_clear);
- }
-
- if (!success && !NEEDED(*bmc))
+ if (!bitmap->mddev->degraded) {
+ if (bitmap->events_cleared < bitmap->mddev->events) {
+ bitmap->events_cleared = bitmap->mddev->events;
+ bitmap->need_sync = 1;
+ sysfs_notify_dirent_safe(
+ bitmap->sysfs_can_clear);
+ }
+ } else if (!NEEDED(*bmc)) {
*bmc |= NEEDED_MASK;
+ }
if (COUNTER(*bmc) == COUNTER_MAX)
wake_up(&bitmap->overflow_wait);
@@ -2062,6 +2044,37 @@ static void md_bitmap_free(void *data)
kfree(bitmap);
}
+static void bitmap_start_behind_write(struct mddev *mddev)
+{
+ struct bitmap *bitmap = mddev->bitmap;
+ int bw;
+
+ if (!bitmap)
+ return;
+
+ atomic_inc(&bitmap->behind_writes);
+ bw = atomic_read(&bitmap->behind_writes);
+ if (bw > bitmap->behind_writes_used)
+ bitmap->behind_writes_used = bw;
+
+ pr_debug("inc write-behind count %d/%lu\n",
+ bw, bitmap->mddev->bitmap_info.max_write_behind);
+}
+
+static void bitmap_end_behind_write(struct mddev *mddev)
+{
+ struct bitmap *bitmap = mddev->bitmap;
+
+ if (!bitmap)
+ return;
+
+ if (atomic_dec_and_test(&bitmap->behind_writes))
+ wake_up(&bitmap->behind_wait);
+ pr_debug("dec write-behind count %d/%lu\n",
+ atomic_read(&bitmap->behind_writes),
+ bitmap->mddev->bitmap_info.max_write_behind);
+}
+
static void bitmap_wait_behind_writes(struct mddev *mddev)
{
struct bitmap *bitmap = mddev->bitmap;
@@ -2981,6 +2994,9 @@ static struct bitmap_operations bitmap_ops = {
.dirty_bits = bitmap_dirty_bits,
.unplug = bitmap_unplug,
.daemon_work = bitmap_daemon_work,
+
+ .start_behind_write = bitmap_start_behind_write,
+ .end_behind_write = bitmap_end_behind_write,
.wait_behind_writes = bitmap_wait_behind_writes,
.startwrite = bitmap_startwrite,
diff --git a/drivers/md/md-bitmap.h b/drivers/md/md-bitmap.h
index 662e6fc141a7..31c93019c76b 100644
--- a/drivers/md/md-bitmap.h
+++ b/drivers/md/md-bitmap.h
@@ -84,12 +84,15 @@ struct bitmap_operations {
unsigned long e);
void (*unplug)(struct mddev *mddev, bool sync);
void (*daemon_work)(struct mddev *mddev);
+
+ void (*start_behind_write)(struct mddev *mddev);
+ void (*end_behind_write)(struct mddev *mddev);
void (*wait_behind_writes)(struct mddev *mddev);
int (*startwrite)(struct mddev *mddev, sector_t offset,
- unsigned long sectors, bool behind);
+ unsigned long sectors);
void (*endwrite)(struct mddev *mddev, sector_t offset,
- unsigned long sectors, bool success, bool behind);
+ unsigned long sectors);
bool (*start_sync)(struct mddev *mddev, sector_t offset,
sector_t *blocks, bool degraded);
void (*end_sync)(struct mddev *mddev, sector_t offset, sector_t *blocks);
diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c
new file mode 100644
index 000000000000..a382929ce7ba
--- /dev/null
+++ b/drivers/md/md-linear.c
@@ -0,0 +1,354 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * linear.c : Multiple Devices driver for Linux Copyright (C) 1994-96 Marc
+ * ZYNGIER <zyngier@ufr-info-p7.ibp.fr> or <maz@gloups.fdn.fr>
+ */
+
+#include <linux/blkdev.h>
+#include <linux/raid/md_u.h>
+#include <linux/seq_file.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <trace/events/block.h>
+#include "md.h"
+
+struct dev_info {
+ struct md_rdev *rdev;
+ sector_t end_sector;
+};
+
+struct linear_conf {
+ struct rcu_head rcu;
+ sector_t array_sectors;
+ /* a copy of mddev->raid_disks */
+ int raid_disks;
+ struct dev_info disks[] __counted_by(raid_disks);
+};
+
+/*
+ * find which device holds a particular offset
+ */
+static inline struct dev_info *which_dev(struct mddev *mddev, sector_t sector)
+{
+ int lo, mid, hi;
+ struct linear_conf *conf;
+
+ lo = 0;
+ hi = mddev->raid_disks - 1;
+ conf = mddev->private;
+
+ /*
+ * Binary Search
+ */
+
+ while (hi > lo) {
+
+ mid = (hi + lo) / 2;
+ if (sector < conf->disks[mid].end_sector)
+ hi = mid;
+ else
+ lo = mid + 1;
+ }
+
+ return conf->disks + lo;
+}
+
+static sector_t linear_size(struct mddev *mddev, sector_t sectors, int raid_disks)
+{
+ struct linear_conf *conf;
+ sector_t array_sectors;
+
+ conf = mddev->private;
+ WARN_ONCE(sectors || raid_disks,
+ "%s does not support generic reshape\n", __func__);
+ array_sectors = conf->array_sectors;
+
+ return array_sectors;
+}
+
+static int linear_set_limits(struct mddev *mddev)
+{
+ struct queue_limits lim;
+ int err;
+
+ md_init_stacking_limits(&lim);
+ lim.max_hw_sectors = mddev->chunk_sectors;
+ lim.max_write_zeroes_sectors = mddev->chunk_sectors;
+ lim.io_min = mddev->chunk_sectors << 9;
+ err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
+ if (err) {
+ queue_limits_cancel_update(mddev->gendisk->queue);
+ return err;
+ }
+
+ return queue_limits_set(mddev->gendisk->queue, &lim);
+}
+
+static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks)
+{
+ struct linear_conf *conf;
+ struct md_rdev *rdev;
+ int ret = -EINVAL;
+ int cnt;
+ int i;
+
+ conf = kzalloc(struct_size(conf, disks, raid_disks), GFP_KERNEL);
+ if (!conf)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * conf->raid_disks is copy of mddev->raid_disks. The reason to
+ * keep a copy of mddev->raid_disks in struct linear_conf is,
+ * mddev->raid_disks may not be consistent with pointers number of
+ * conf->disks[] when it is updated in linear_add() and used to
+ * iterate old conf->disks[] earray in linear_congested().
+ * Here conf->raid_disks is always consitent with number of
+ * pointers in conf->disks[] array, and mddev->private is updated
+ * with rcu_assign_pointer() in linear_addr(), such race can be
+ * avoided.
+ */
+ conf->raid_disks = raid_disks;
+
+ cnt = 0;
+ conf->array_sectors = 0;
+
+ rdev_for_each(rdev, mddev) {
+ int j = rdev->raid_disk;
+ struct dev_info *disk = conf->disks + j;
+ sector_t sectors;
+
+ if (j < 0 || j >= raid_disks || disk->rdev) {
+ pr_warn("md/linear:%s: disk numbering problem. Aborting!\n",
+ mdname(mddev));
+ goto out;
+ }
+
+ disk->rdev = rdev;
+ if (mddev->chunk_sectors) {
+ sectors = rdev->sectors;
+ sector_div(sectors, mddev->chunk_sectors);
+ rdev->sectors = sectors * mddev->chunk_sectors;
+ }
+
+ conf->array_sectors += rdev->sectors;
+ cnt++;
+ }
+ if (cnt != raid_disks) {
+ pr_warn("md/linear:%s: not enough drives present. Aborting!\n",
+ mdname(mddev));
+ goto out;
+ }
+
+ /*
+ * Here we calculate the device offsets.
+ */
+ conf->disks[0].end_sector = conf->disks[0].rdev->sectors;
+
+ for (i = 1; i < raid_disks; i++)
+ conf->disks[i].end_sector =
+ conf->disks[i-1].end_sector +
+ conf->disks[i].rdev->sectors;
+
+ if (!mddev_is_dm(mddev)) {
+ ret = linear_set_limits(mddev);
+ if (ret)
+ goto out;
+ }
+
+ return conf;
+
+out:
+ kfree(conf);
+ return ERR_PTR(ret);
+}
+
+static int linear_run(struct mddev *mddev)
+{
+ struct linear_conf *conf;
+ int ret;
+
+ if (md_check_no_bitmap(mddev))
+ return -EINVAL;
+
+ conf = linear_conf(mddev, mddev->raid_disks);
+ if (IS_ERR(conf))
+ return PTR_ERR(conf);
+
+ mddev->private = conf;
+ md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
+
+ ret = md_integrity_register(mddev);
+ if (ret) {
+ kfree(conf);
+ mddev->private = NULL;
+ }
+ return ret;
+}
+
+static int linear_add(struct mddev *mddev, struct md_rdev *rdev)
+{
+ /* Adding a drive to a linear array allows the array to grow.
+ * It is permitted if the new drive has a matching superblock
+ * already on it, with raid_disk equal to raid_disks.
+ * It is achieved by creating a new linear_private_data structure
+ * and swapping it in in-place of the current one.
+ * The current one is never freed until the array is stopped.
+ * This avoids races.
+ */
+ struct linear_conf *newconf, *oldconf;
+
+ if (rdev->saved_raid_disk != mddev->raid_disks)
+ return -EINVAL;
+
+ rdev->raid_disk = rdev->saved_raid_disk;
+ rdev->saved_raid_disk = -1;
+
+ newconf = linear_conf(mddev, mddev->raid_disks + 1);
+ if (IS_ERR(newconf))
+ return PTR_ERR(newconf);
+
+ /* newconf->raid_disks already keeps a copy of * the increased
+ * value of mddev->raid_disks, WARN_ONCE() is just used to make
+ * sure of this. It is possible that oldconf is still referenced
+ * in linear_congested(), therefore kfree_rcu() is used to free
+ * oldconf until no one uses it anymore.
+ */
+ oldconf = rcu_dereference_protected(mddev->private,
+ lockdep_is_held(&mddev->reconfig_mutex));
+ mddev->raid_disks++;
+ WARN_ONCE(mddev->raid_disks != newconf->raid_disks,
+ "copied raid_disks doesn't match mddev->raid_disks");
+ rcu_assign_pointer(mddev->private, newconf);
+ md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
+ set_capacity_and_notify(mddev->gendisk, mddev->array_sectors);
+ kfree_rcu(oldconf, rcu);
+ return 0;
+}
+
+static void linear_free(struct mddev *mddev, void *priv)
+{
+ struct linear_conf *conf = priv;
+
+ kfree(conf);
+}
+
+static bool linear_make_request(struct mddev *mddev, struct bio *bio)
+{
+ struct dev_info *tmp_dev;
+ sector_t start_sector, end_sector, data_offset;
+ sector_t bio_sector = bio->bi_iter.bi_sector;
+
+ if (unlikely(bio->bi_opf & REQ_PREFLUSH)
+ && md_flush_request(mddev, bio))
+ return true;
+
+ tmp_dev = which_dev(mddev, bio_sector);
+ start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
+ end_sector = tmp_dev->end_sector;
+ data_offset = tmp_dev->rdev->data_offset;
+
+ if (unlikely(bio_sector >= end_sector ||
+ bio_sector < start_sector))
+ goto out_of_bounds;
+
+ if (unlikely(is_rdev_broken(tmp_dev->rdev))) {
+ md_error(mddev, tmp_dev->rdev);
+ bio_io_error(bio);
+ return true;
+ }
+
+ if (unlikely(bio_end_sector(bio) > end_sector)) {
+ /* This bio crosses a device boundary, so we have to split it */
+ struct bio *split = bio_split(bio, end_sector - bio_sector,
+ GFP_NOIO, &mddev->bio_set);
+
+ if (IS_ERR(split)) {
+ bio->bi_status = errno_to_blk_status(PTR_ERR(split));
+ bio_endio(bio);
+ return true;
+ }
+
+ bio_chain(split, bio);
+ submit_bio_noacct(bio);
+ bio = split;
+ }
+
+ md_account_bio(mddev, &bio);
+ bio_set_dev(bio, tmp_dev->rdev->bdev);
+ bio->bi_iter.bi_sector = bio->bi_iter.bi_sector -
+ start_sector + data_offset;
+
+ if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
+ !bdev_max_discard_sectors(bio->bi_bdev))) {
+ /* Just ignore it */
+ bio_endio(bio);
+ } else {
+ if (mddev->gendisk)
+ trace_block_bio_remap(bio, disk_devt(mddev->gendisk),
+ bio_sector);
+ mddev_check_write_zeroes(mddev, bio);
+ submit_bio_noacct(bio);
+ }
+ return true;
+
+out_of_bounds:
+ pr_err("md/linear:%s: make_request: Sector %llu out of bounds on dev %pg: %llu sectors, offset %llu\n",
+ mdname(mddev),
+ (unsigned long long)bio->bi_iter.bi_sector,
+ tmp_dev->rdev->bdev,
+ (unsigned long long)tmp_dev->rdev->sectors,
+ (unsigned long long)start_sector);
+ bio_io_error(bio);
+ return true;
+}
+
+static void linear_status(struct seq_file *seq, struct mddev *mddev)
+{
+ seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2);
+}
+
+static void linear_error(struct mddev *mddev, struct md_rdev *rdev)
+{
+ if (!test_and_set_bit(MD_BROKEN, &mddev->flags)) {
+ char *md_name = mdname(mddev);
+
+ pr_crit("md/linear%s: Disk failure on %pg detected, failing array.\n",
+ md_name, rdev->bdev);
+ }
+}
+
+static void linear_quiesce(struct mddev *mddev, int state)
+{
+}
+
+static struct md_personality linear_personality = {
+ .name = "linear",
+ .level = LEVEL_LINEAR,
+ .owner = THIS_MODULE,
+ .make_request = linear_make_request,
+ .run = linear_run,
+ .free = linear_free,
+ .status = linear_status,
+ .hot_add_disk = linear_add,
+ .size = linear_size,
+ .quiesce = linear_quiesce,
+ .error_handler = linear_error,
+};
+
+static int __init linear_init(void)
+{
+ return register_md_personality(&linear_personality);
+}
+
+static void linear_exit(void)
+{
+ unregister_md_personality(&linear_personality);
+}
+
+module_init(linear_init);
+module_exit(linear_exit);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Linear device concatenation personality for MD (deprecated)");
+MODULE_ALIAS("md-personality-1"); /* LINEAR - deprecated*/
+MODULE_ALIAS("md-linear");
+MODULE_ALIAS("md-level--1");
diff --git a/drivers/md/md.c b/drivers/md/md.c
index aebe12b0ee27..866015b681af 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -8124,7 +8124,7 @@ void md_error(struct mddev *mddev, struct md_rdev *rdev)
return;
mddev->pers->error_handler(mddev, rdev);
- if (mddev->pers->level == 0)
+ if (mddev->pers->level == 0 || mddev->pers->level == LEVEL_LINEAR)
return;
if (mddev->degraded && !test_bit(MD_BROKEN, &mddev->flags))
@@ -8745,12 +8745,32 @@ void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev,
}
EXPORT_SYMBOL_GPL(md_submit_discard_bio);
+static void md_bitmap_start(struct mddev *mddev,
+ struct md_io_clone *md_io_clone)
+{
+ if (mddev->pers->bitmap_sector)
+ mddev->pers->bitmap_sector(mddev, &md_io_clone->offset,
+ &md_io_clone->sectors);
+
+ mddev->bitmap_ops->startwrite(mddev, md_io_clone->offset,
+ md_io_clone->sectors);
+}
+
+static void md_bitmap_end(struct mddev *mddev, struct md_io_clone *md_io_clone)
+{
+ mddev->bitmap_ops->endwrite(mddev, md_io_clone->offset,
+ md_io_clone->sectors);
+}
+
static void md_end_clone_io(struct bio *bio)
{
struct md_io_clone *md_io_clone = bio->bi_private;
struct bio *orig_bio = md_io_clone->orig_bio;
struct mddev *mddev = md_io_clone->mddev;
+ if (bio_data_dir(orig_bio) == WRITE && mddev->bitmap)
+ md_bitmap_end(mddev, md_io_clone);
+
if (bio->bi_status && !orig_bio->bi_status)
orig_bio->bi_status = bio->bi_status;
@@ -8775,6 +8795,12 @@ static void md_clone_bio(struct mddev *mddev, struct bio **bio)
if (blk_queue_io_stat(bdev->bd_disk->queue))
md_io_clone->start_time = bio_start_io_acct(*bio);
+ if (bio_data_dir(*bio) == WRITE && mddev->bitmap) {
+ md_io_clone->offset = (*bio)->bi_iter.bi_sector;
+ md_io_clone->sectors = bio_sectors(*bio);
+ md_bitmap_start(mddev, md_io_clone);
+ }
+
clone->bi_end_io = md_end_clone_io;
clone->bi_private = md_io_clone;
*bio = clone;
@@ -8793,6 +8819,9 @@ void md_free_cloned_bio(struct bio *bio)
struct bio *orig_bio = md_io_clone->orig_bio;
struct mddev *mddev = md_io_clone->mddev;
+ if (bio_data_dir(orig_bio) == WRITE && mddev->bitmap)
+ md_bitmap_end(mddev, md_io_clone);
+
if (bio->bi_status && !orig_bio->bi_status)
orig_bio->bi_status = bio->bi_status;
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 4ba93af36126..def808064ad8 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -746,6 +746,9 @@ struct md_personality
void *(*takeover) (struct mddev *mddev);
/* Changes the consistency policy of an active array. */
int (*change_consistency_policy)(struct mddev *mddev, const char *buf);
+ /* convert io ranges from array to bitmap */
+ void (*bitmap_sector)(struct mddev *mddev, sector_t *offset,
+ unsigned long *sectors);
};
struct md_sysfs_entry {
@@ -828,6 +831,8 @@ struct md_io_clone {
struct mddev *mddev;
struct bio *orig_bio;
unsigned long start_time;
+ sector_t offset;
+ unsigned long sectors;
struct bio bio_clone;
};
diff --git a/drivers/md/persistent-data/dm-array.c b/drivers/md/persistent-data/dm-array.c
index 157c9bd2fed7..8f8792e55806 100644
--- a/drivers/md/persistent-data/dm-array.c
+++ b/drivers/md/persistent-data/dm-array.c
@@ -917,23 +917,27 @@ static int load_ablock(struct dm_array_cursor *c)
if (c->block)
unlock_ablock(c->info, c->block);
- c->block = NULL;
- c->ab = NULL;
c->index = 0;
r = dm_btree_cursor_get_value(&c->cursor, &key, &value_le);
if (r) {
DMERR("dm_btree_cursor_get_value failed");
- dm_btree_cursor_end(&c->cursor);
+ goto out;
} else {
r = get_ablock(c->info, le64_to_cpu(value_le), &c->block, &c->ab);
if (r) {
DMERR("get_ablock failed");
- dm_btree_cursor_end(&c->cursor);
+ goto out;
}
}
+ return 0;
+
+out:
+ dm_btree_cursor_end(&c->cursor);
+ c->block = NULL;
+ c->ab = NULL;
return r;
}
@@ -956,10 +960,10 @@ EXPORT_SYMBOL_GPL(dm_array_cursor_begin);
void dm_array_cursor_end(struct dm_array_cursor *c)
{
- if (c->block) {
+ if (c->block)
unlock_ablock(c->info, c->block);
- dm_btree_cursor_end(&c->cursor);
- }
+
+ dm_btree_cursor_end(&c->cursor);
}
EXPORT_SYMBOL_GPL(dm_array_cursor_end);
@@ -999,6 +1003,7 @@ int dm_array_cursor_skip(struct dm_array_cursor *c, uint32_t count)
}
count -= remaining;
+ c->index += (remaining - 1);
r = dm_array_cursor_next(c);
} while (!r);
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 7049ec7fb8eb..8fc9339b00c7 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -384,7 +384,7 @@ static int raid0_set_limits(struct mddev *mddev)
lim.max_write_zeroes_sectors = mddev->chunk_sectors;
lim.io_min = mddev->chunk_sectors << 9;
lim.io_opt = lim.io_min * mddev->raid_disks;
- lim.features |= BLK_FEAT_ATOMIC_WRITES_STACKED;
+ lim.features |= BLK_FEAT_ATOMIC_WRITES;
err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
if (err) {
queue_limits_cancel_update(mddev->gendisk->queue);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 519c56f0ee3d..9d57a88dbd26 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -420,10 +420,8 @@ static void close_write(struct r1bio *r1_bio)
r1_bio->behind_master_bio = NULL;
}
- /* clear the bitmap if all writes complete successfully */
- mddev->bitmap_ops->endwrite(mddev, r1_bio->sector, r1_bio->sectors,
- !test_bit(R1BIO_Degraded, &r1_bio->state),
- test_bit(R1BIO_BehindIO, &r1_bio->state));
+ if (test_bit(R1BIO_BehindIO, &r1_bio->state))
+ mddev->bitmap_ops->end_behind_write(mddev);
md_write_end(mddev);
}
@@ -480,8 +478,6 @@ static void raid1_end_write_request(struct bio *bio)
if (!test_bit(Faulty, &rdev->flags))
set_bit(R1BIO_WriteError, &r1_bio->state);
else {
- /* Fail the request */
- set_bit(R1BIO_Degraded, &r1_bio->state);
/* Finished with this branch */
r1_bio->bios[mirror] = NULL;
to_put = bio;
@@ -1535,11 +1531,8 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
write_behind = true;
r1_bio->bios[i] = NULL;
- if (!rdev || test_bit(Faulty, &rdev->flags)) {
- if (i < conf->raid_disks)
- set_bit(R1BIO_Degraded, &r1_bio->state);
+ if (!rdev || test_bit(Faulty, &rdev->flags))
continue;
- }
atomic_inc(&rdev->nr_pending);
if (test_bit(WriteErrorSeen, &rdev->flags)) {
@@ -1558,16 +1551,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
*/
max_sectors = bad_sectors;
rdev_dec_pending(rdev, mddev);
- /* We don't set R1BIO_Degraded as that
- * only applies if the disk is
- * missing, so it might be re-added,
- * and we want to know to recover this
- * chunk.
- * In this case the device is here,
- * and the fact that this chunk is not
- * in-sync is recorded in the bad
- * block log
- */
continue;
}
if (is_bad) {
@@ -1645,9 +1628,8 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
stats.behind_writes < max_write_behind)
alloc_behind_master_bio(r1_bio, bio);
- mddev->bitmap_ops->startwrite(
- mddev, r1_bio->sector, r1_bio->sectors,
- test_bit(R1BIO_BehindIO, &r1_bio->state));
+ if (test_bit(R1BIO_BehindIO, &r1_bio->state))
+ mddev->bitmap_ops->start_behind_write(mddev);
first_clone = 0;
}
@@ -2614,12 +2596,10 @@ static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
* errors.
*/
fail = true;
- if (!narrow_write_error(r1_bio, m)) {
+ if (!narrow_write_error(r1_bio, m))
md_error(conf->mddev,
conf->mirrors[m].rdev);
/* an I/O failed, we can't clear the bitmap */
- set_bit(R1BIO_Degraded, &r1_bio->state);
- }
rdev_dec_pending(conf->mirrors[m].rdev,
conf->mddev);
}
@@ -2710,8 +2690,6 @@ static void raid1d(struct md_thread *thread)
list_del(&r1_bio->retry_list);
idx = sector_to_idx(r1_bio->sector);
atomic_dec(&conf->nr_queued[idx]);
- if (mddev->degraded)
- set_bit(R1BIO_Degraded, &r1_bio->state);
if (test_bit(R1BIO_WriteError, &r1_bio->state))
close_write(r1_bio);
raid_end_bio_io(r1_bio);
@@ -3239,7 +3217,7 @@ static int raid1_set_limits(struct mddev *mddev)
md_init_stacking_limits(&lim);
lim.max_write_zeroes_sectors = 0;
- lim.features |= BLK_FEAT_ATOMIC_WRITES_STACKED;
+ lim.features |= BLK_FEAT_ATOMIC_WRITES;
err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
if (err) {
queue_limits_cancel_update(mddev->gendisk->queue);
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index 5300cbaa58a4..33f318fcc268 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -188,7 +188,6 @@ struct r1bio {
enum r1bio_state {
R1BIO_Uptodate,
R1BIO_IsSync,
- R1BIO_Degraded,
R1BIO_BehindIO,
/* Set ReadError on bios that experience a readerror so that
* raid1d knows what to do with them.
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 7d7a8a2524dc..efe93b979167 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -428,10 +428,6 @@ static void close_write(struct r10bio *r10_bio)
{
struct mddev *mddev = r10_bio->mddev;
- /* clear the bitmap if all writes complete successfully */
- mddev->bitmap_ops->endwrite(mddev, r10_bio->sector, r10_bio->sectors,
- !test_bit(R10BIO_Degraded, &r10_bio->state),
- false);
md_write_end(mddev);
}
@@ -501,7 +497,6 @@ static void raid10_end_write_request(struct bio *bio)
set_bit(R10BIO_WriteError, &r10_bio->state);
else {
/* Fail the request */
- set_bit(R10BIO_Degraded, &r10_bio->state);
r10_bio->devs[slot].bio = NULL;
to_put = bio;
dec_rdev = 1;
@@ -1438,10 +1433,8 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
r10_bio->devs[i].bio = NULL;
r10_bio->devs[i].repl_bio = NULL;
- if (!rdev && !rrdev) {
- set_bit(R10BIO_Degraded, &r10_bio->state);
+ if (!rdev && !rrdev)
continue;
- }
if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
sector_t first_bad;
sector_t dev_sector = r10_bio->devs[i].addr;
@@ -1458,14 +1451,6 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
* to other devices yet
*/
max_sectors = bad_sectors;
- /* We don't set R10BIO_Degraded as that
- * only applies if the disk is missing,
- * so it might be re-added, and we want to
- * know to recover this chunk.
- * In this case the device is here, and the
- * fact that this chunk is not in-sync is
- * recorded in the bad block log.
- */
continue;
}
if (is_bad) {
@@ -1519,8 +1504,6 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
md_account_bio(mddev, &bio);
r10_bio->master_bio = bio;
atomic_set(&r10_bio->remaining, 1);
- mddev->bitmap_ops->startwrite(mddev, r10_bio->sector, r10_bio->sectors,
- false);
for (i = 0; i < conf->copies; i++) {
if (r10_bio->devs[i].bio)
@@ -2966,11 +2949,8 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
rdev_dec_pending(rdev, conf->mddev);
} else if (bio != NULL && bio->bi_status) {
fail = true;
- if (!narrow_write_error(r10_bio, m)) {
+ if (!narrow_write_error(r10_bio, m))
md_error(conf->mddev, rdev);
- set_bit(R10BIO_Degraded,
- &r10_bio->state);
- }
rdev_dec_pending(rdev, conf->mddev);
}
bio = r10_bio->devs[m].repl_bio;
@@ -3029,8 +3009,6 @@ static void raid10d(struct md_thread *thread)
r10_bio = list_first_entry(&tmp, struct r10bio,
retry_list);
list_del(&r10_bio->retry_list);
- if (mddev->degraded)
- set_bit(R10BIO_Degraded, &r10_bio->state);
if (test_bit(R10BIO_WriteError,
&r10_bio->state))
@@ -4040,7 +4018,7 @@ static int raid10_set_queue_limits(struct mddev *mddev)
lim.max_write_zeroes_sectors = 0;
lim.io_min = mddev->chunk_sectors << 9;
lim.io_opt = lim.io_min * raid10_nr_stripes(conf);
- lim.features |= BLK_FEAT_ATOMIC_WRITES_STACKED;
+ lim.features |= BLK_FEAT_ATOMIC_WRITES;
err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
if (err) {
queue_limits_cancel_update(mddev->gendisk->queue);
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
index 2e75e88d0802..3f16ad6904a9 100644
--- a/drivers/md/raid10.h
+++ b/drivers/md/raid10.h
@@ -161,7 +161,6 @@ enum r10bio_state {
R10BIO_IsSync,
R10BIO_IsRecover,
R10BIO_IsReshape,
- R10BIO_Degraded,
/* Set ReadError on bios that experience a read error
* so that raid10d knows what to do with them.
*/
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index b4f7b79fd187..e530271cb86b 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -313,10 +313,6 @@ void r5c_handle_cached_data_endio(struct r5conf *conf,
if (sh->dev[i].written) {
set_bit(R5_UPTODATE, &sh->dev[i].flags);
r5c_return_dev_pending_writes(conf, &sh->dev[i]);
- conf->mddev->bitmap_ops->endwrite(conf->mddev,
- sh->sector, RAID5_STRIPE_SECTORS(conf),
- !test_bit(STRIPE_DEGRADED, &sh->state),
- false);
}
}
}
@@ -1023,10 +1019,10 @@ int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
/* checksum is already calculated in last run */
if (test_bit(STRIPE_LOG_TRAPPED, &sh->state))
continue;
- addr = kmap_atomic(sh->dev[i].page);
+ addr = kmap_local_page(sh->dev[i].page);
sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
addr, PAGE_SIZE);
- kunmap_atomic(addr);
+ kunmap_local(addr);
}
parity_pages = 1 + !!(sh->qd_idx >= 0);
data_pages = write_disks - parity_pages;
@@ -1979,9 +1975,9 @@ r5l_recovery_verify_data_checksum(struct r5l_log *log,
u32 checksum;
r5l_recovery_read_page(log, ctx, page, log_offset);
- addr = kmap_atomic(page);
+ addr = kmap_local_page(page);
checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE);
- kunmap_atomic(addr);
+ kunmap_local(addr);
return (le32_to_cpu(log_checksum) == checksum) ? 0 : -EINVAL;
}
@@ -2381,11 +2377,11 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
payload->size = cpu_to_le32(BLOCK_SECTORS);
payload->location = cpu_to_le64(
raid5_compute_blocknr(sh, i, 0));
- addr = kmap_atomic(dev->page);
+ addr = kmap_local_page(dev->page);
payload->checksum[0] = cpu_to_le32(
crc32c_le(log->uuid_checksum, addr,
PAGE_SIZE));
- kunmap_atomic(addr);
+ kunmap_local(addr);
sync_page_io(log->rdev, write_pos, PAGE_SIZE,
dev->page, REQ_OP_WRITE, false);
write_pos = r5l_ring_add(log, write_pos,
@@ -2888,10 +2884,10 @@ int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh)
if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
continue;
- addr = kmap_atomic(sh->dev[i].page);
+ addr = kmap_local_page(sh->dev[i].page);
sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
addr, PAGE_SIZE);
- kunmap_atomic(addr);
+ kunmap_local(addr);
pages++;
}
WARN_ON(pages == 0);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index f09e7677ee9f..5c79429acc64 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -906,8 +906,7 @@ static bool stripe_can_batch(struct stripe_head *sh)
if (raid5_has_log(conf) || raid5_has_ppl(conf))
return false;
return test_bit(STRIPE_BATCH_READY, &sh->state) &&
- !test_bit(STRIPE_BITMAP_PENDING, &sh->state) &&
- is_full_stripe_write(sh);
+ is_full_stripe_write(sh);
}
/* we only do back search */
@@ -1345,8 +1344,6 @@ again:
submit_bio_noacct(rbi);
}
if (!rdev && !rrdev) {
- if (op_is_write(op))
- set_bit(STRIPE_DEGRADED, &sh->state);
pr_debug("skip op %d on disc %d for sector %llu\n",
bi->bi_opf, i, (unsigned long long)sh->sector);
clear_bit(R5_LOCKED, &sh->dev[i].flags);
@@ -2884,7 +2881,6 @@ static void raid5_end_write_request(struct bio *bi)
set_bit(R5_MadeGoodRepl, &sh->dev[i].flags);
} else {
if (bi->bi_status) {
- set_bit(STRIPE_DEGRADED, &sh->state);
set_bit(WriteErrorSeen, &rdev->flags);
set_bit(R5_WriteError, &sh->dev[i].flags);
if (!test_and_set_bit(WantReplacement, &rdev->flags))
@@ -3548,29 +3544,9 @@ static void __add_stripe_bio(struct stripe_head *sh, struct bio *bi,
(*bip)->bi_iter.bi_sector, sh->sector, dd_idx,
sh->dev[dd_idx].sector);
- if (conf->mddev->bitmap && firstwrite) {
- /* Cannot hold spinlock over bitmap_startwrite,
- * but must ensure this isn't added to a batch until
- * we have added to the bitmap and set bm_seq.
- * So set STRIPE_BITMAP_PENDING to prevent
- * batching.
- * If multiple __add_stripe_bio() calls race here they
- * much all set STRIPE_BITMAP_PENDING. So only the first one
- * to complete "bitmap_startwrite" gets to set
- * STRIPE_BIT_DELAY. This is important as once a stripe
- * is added to a batch, STRIPE_BIT_DELAY cannot be changed
- * any more.
- */
- set_bit(STRIPE_BITMAP_PENDING, &sh->state);
- spin_unlock_irq(&sh->stripe_lock);
- conf->mddev->bitmap_ops->startwrite(conf->mddev, sh->sector,
- RAID5_STRIPE_SECTORS(conf), false);
- spin_lock_irq(&sh->stripe_lock);
- clear_bit(STRIPE_BITMAP_PENDING, &sh->state);
- if (!sh->batch_head) {
- sh->bm_seq = conf->seq_flush+1;
- set_bit(STRIPE_BIT_DELAY, &sh->state);
- }
+ if (conf->mddev->bitmap && firstwrite && !sh->batch_head) {
+ sh->bm_seq = conf->seq_flush+1;
+ set_bit(STRIPE_BIT_DELAY, &sh->state);
}
}
@@ -3621,7 +3597,6 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
BUG_ON(sh->batch_head);
for (i = disks; i--; ) {
struct bio *bi;
- int bitmap_end = 0;
if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
struct md_rdev *rdev = conf->disks[i].rdev;
@@ -3646,8 +3621,6 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
sh->dev[i].towrite = NULL;
sh->overwrite_disks = 0;
spin_unlock_irq(&sh->stripe_lock);
- if (bi)
- bitmap_end = 1;
log_stripe_write_finished(sh);
@@ -3662,11 +3635,6 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
bio_io_error(bi);
bi = nextbi;
}
- if (bitmap_end)
- conf->mddev->bitmap_ops->endwrite(conf->mddev,
- sh->sector, RAID5_STRIPE_SECTORS(conf),
- false, false);
- bitmap_end = 0;
/* and fail all 'written' */
bi = sh->dev[i].written;
sh->dev[i].written = NULL;
@@ -3675,7 +3643,6 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
sh->dev[i].page = sh->dev[i].orig_page;
}
- if (bi) bitmap_end = 1;
while (bi && bi->bi_iter.bi_sector <
sh->dev[i].sector + RAID5_STRIPE_SECTORS(conf)) {
struct bio *bi2 = r5_next_bio(conf, bi, sh->dev[i].sector);
@@ -3709,10 +3676,6 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
bi = nextbi;
}
}
- if (bitmap_end)
- conf->mddev->bitmap_ops->endwrite(conf->mddev,
- sh->sector, RAID5_STRIPE_SECTORS(conf),
- false, false);
/* If we were in the middle of a write the parity block might
* still be locked - so just clear all R5_LOCKED flags
*/
@@ -4061,10 +4024,7 @@ returnbi:
bio_endio(wbi);
wbi = wbi2;
}
- conf->mddev->bitmap_ops->endwrite(conf->mddev,
- sh->sector, RAID5_STRIPE_SECTORS(conf),
- !test_bit(STRIPE_DEGRADED, &sh->state),
- false);
+
if (head_sh->batch_head) {
sh = list_first_entry(&sh->batch_list,
struct stripe_head,
@@ -4341,7 +4301,6 @@ static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
s->locked++;
set_bit(R5_Wantwrite, &dev->flags);
- clear_bit(STRIPE_DEGRADED, &sh->state);
set_bit(STRIPE_INSYNC, &sh->state);
break;
case check_state_run:
@@ -4498,7 +4457,6 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
clear_bit(R5_Wantwrite, &dev->flags);
s->locked--;
}
- clear_bit(STRIPE_DEGRADED, &sh->state);
set_bit(STRIPE_INSYNC, &sh->state);
break;
@@ -4891,8 +4849,7 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
(1 << STRIPE_COMPUTE_RUN) |
(1 << STRIPE_DISCARD) |
(1 << STRIPE_BATCH_READY) |
- (1 << STRIPE_BATCH_ERR) |
- (1 << STRIPE_BITMAP_PENDING)),
+ (1 << STRIPE_BATCH_ERR)),
"stripe state: %lx\n", sh->state);
WARN_ONCE(head_sh->state & ((1 << STRIPE_DISCARD) |
(1 << STRIPE_REPLACED)),
@@ -4900,7 +4857,6 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS |
(1 << STRIPE_PREREAD_ACTIVE) |
- (1 << STRIPE_DEGRADED) |
(1 << STRIPE_ON_UNPLUG_LIST)),
head_sh->state & (1 << STRIPE_INSYNC));
@@ -5784,10 +5740,6 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
}
spin_unlock_irq(&sh->stripe_lock);
if (conf->mddev->bitmap) {
- for (d = 0; d < conf->raid_disks - conf->max_degraded;
- d++)
- mddev->bitmap_ops->startwrite(mddev, sh->sector,
- RAID5_STRIPE_SECTORS(conf), false);
sh->bm_seq = conf->seq_flush + 1;
set_bit(STRIPE_BIT_DELAY, &sh->state);
}
@@ -5928,6 +5880,54 @@ static enum reshape_loc get_reshape_loc(struct mddev *mddev,
return LOC_BEHIND_RESHAPE;
}
+static void raid5_bitmap_sector(struct mddev *mddev, sector_t *offset,
+ unsigned long *sectors)
+{
+ struct r5conf *conf = mddev->private;
+ sector_t start = *offset;
+ sector_t end = start + *sectors;
+ sector_t prev_start = start;
+ sector_t prev_end = end;
+ int sectors_per_chunk;
+ enum reshape_loc loc;
+ int dd_idx;
+
+ sectors_per_chunk = conf->chunk_sectors *
+ (conf->raid_disks - conf->max_degraded);
+ start = round_down(start, sectors_per_chunk);
+ end = round_up(end, sectors_per_chunk);
+
+ start = raid5_compute_sector(conf, start, 0, &dd_idx, NULL);
+ end = raid5_compute_sector(conf, end, 0, &dd_idx, NULL);
+
+ /*
+ * For LOC_INSIDE_RESHAPE, this IO will wait for reshape to make
+ * progress, hence it's the same as LOC_BEHIND_RESHAPE.
+ */
+ loc = get_reshape_loc(mddev, conf, prev_start);
+ if (likely(loc != LOC_AHEAD_OF_RESHAPE)) {
+ *offset = start;
+ *sectors = end - start;
+ return;
+ }
+
+ sectors_per_chunk = conf->prev_chunk_sectors *
+ (conf->previous_raid_disks - conf->max_degraded);
+ prev_start = round_down(prev_start, sectors_per_chunk);
+ prev_end = round_down(prev_end, sectors_per_chunk);
+
+ prev_start = raid5_compute_sector(conf, prev_start, 1, &dd_idx, NULL);
+ prev_end = raid5_compute_sector(conf, prev_end, 1, &dd_idx, NULL);
+
+ /*
+ * for LOC_AHEAD_OF_RESHAPE, reshape can make progress before this IO
+ * is handled in make_stripe_request(), we can't know this here hence
+ * we set bits for both.
+ */
+ *offset = min(start, prev_start);
+ *sectors = max(end, prev_end) - *offset;
+}
+
static enum stripe_result make_stripe_request(struct mddev *mddev,
struct r5conf *conf, struct stripe_request_ctx *ctx,
sector_t logical_sector, struct bio *bi)
@@ -8976,6 +8976,7 @@ static struct md_personality raid6_personality =
.takeover = raid6_takeover,
.change_consistency_policy = raid5_change_consistency_policy,
.prepare_suspend = raid5_prepare_suspend,
+ .bitmap_sector = raid5_bitmap_sector,
};
static struct md_personality raid5_personality =
{
@@ -9001,6 +9002,7 @@ static struct md_personality raid5_personality =
.takeover = raid5_takeover,
.change_consistency_policy = raid5_change_consistency_policy,
.prepare_suspend = raid5_prepare_suspend,
+ .bitmap_sector = raid5_bitmap_sector,
};
static struct md_personality raid4_personality =
@@ -9027,6 +9029,7 @@ static struct md_personality raid4_personality =
.takeover = raid4_takeover,
.change_consistency_policy = raid5_change_consistency_policy,
.prepare_suspend = raid5_prepare_suspend,
+ .bitmap_sector = raid5_bitmap_sector,
};
static int __init raid5_init(void)
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index d174e586698f..eafc6e9ed6ee 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -358,7 +358,6 @@ enum {
STRIPE_REPLACED,
STRIPE_PREREAD_ACTIVE,
STRIPE_DELAYED,
- STRIPE_DEGRADED,
STRIPE_BIT_DELAY,
STRIPE_EXPANDING,
STRIPE_EXPAND_SOURCE,
@@ -372,9 +371,6 @@ enum {
STRIPE_ON_RELEASE_LIST,
STRIPE_BATCH_READY,
STRIPE_BATCH_ERR,
- STRIPE_BITMAP_PENDING, /* Being added to bitmap, don't add
- * to batch yet.
- */
STRIPE_LOG_TRAPPED, /* trapped into log (see raid5-cache.c)
* this bit is used in two scenarios:
*
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
index 20a2466bec23..5b617c1f6789 100644
--- a/drivers/memstick/core/ms_block.c
+++ b/drivers/memstick/core/ms_block.c
@@ -2094,8 +2094,7 @@ static int msb_init_disk(struct memstick_dev *card)
if (msb->disk_id < 0)
return msb->disk_id;
- rc = blk_mq_alloc_sq_tag_set(&msb->tag_set, &msb_mq_ops, 2,
- BLK_MQ_F_SHOULD_MERGE);
+ rc = blk_mq_alloc_sq_tag_set(&msb->tag_set, &msb_mq_ops, 2, 0);
if (rc)
goto out_release_id;
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index 13b317c56069..634d343b6bdb 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -1139,8 +1139,7 @@ static int mspro_block_init_disk(struct memstick_dev *card)
if (disk_id < 0)
return disk_id;
- rc = blk_mq_alloc_sq_tag_set(&msb->tag_set, &mspro_mq_ops, 2,
- BLK_MQ_F_SHOULD_MERGE);
+ rc = blk_mq_alloc_sq_tag_set(&msb->tag_set, &mspro_mq_ops, 2, 0);
if (rc)
goto out_release_id;
diff --git a/drivers/misc/cxl/Kconfig b/drivers/misc/cxl/Kconfig
index 5efc4151bf58..15307f5e4307 100644
--- a/drivers/misc/cxl/Kconfig
+++ b/drivers/misc/cxl/Kconfig
@@ -9,11 +9,13 @@ config CXL_BASE
select PPC_64S_HASH_MMU
config CXL
- tristate "Support for IBM Coherent Accelerators (CXL)"
+ tristate "Support for IBM Coherent Accelerators (CXL) (DEPRECATED)"
depends on PPC_POWERNV && PCI_MSI && EEH
select CXL_BASE
- default m
help
+ The cxl driver is deprecated and will be removed in a future
+ kernel release.
+
Select this option to enable driver support for IBM Coherent
Accelerators (CXL). CXL is otherwise known as Coherent Accelerator
Processor Interface (CAPI). CAPI allows accelerators in FPGAs to be
diff --git a/drivers/misc/cxl/of.c b/drivers/misc/cxl/of.c
index cf6bd8a43056..e26ee85279fa 100644
--- a/drivers/misc/cxl/of.c
+++ b/drivers/misc/cxl/of.c
@@ -295,6 +295,8 @@ int cxl_of_probe(struct platform_device *pdev)
int ret;
int slice = 0, slice_ok = 0;
+ dev_err_once(&pdev->dev, "DEPRECATION: cxl is deprecated and will be removed in a future kernel release\n");
+
pr_devel("in %s\n", __func__);
np = pdev->dev.of_node;
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index 3d52f9b92d0d..92bf7c5c7b35 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -1726,6 +1726,8 @@ static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id)
int slice;
int rc;
+ dev_err_once(&dev->dev, "DEPRECATED: cxl is deprecated and will be removed in a future kernel release\n");
+
if (cxl_pci_is_vphb_device(dev)) {
dev_dbg(&dev->dev, "cxl_init_adapter: Ignoring cxl vphb device\n");
return -ENODEV;
diff --git a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c
index e616e3ec2b42..3c1359d8d4e6 100644
--- a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c
+++ b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c
@@ -148,7 +148,7 @@ static int pci1xxxx_gpio_set_config(struct gpio_chip *gpio, unsigned int offset,
pci1xxx_assign_bit(priv->reg_base, OPENDRAIN_OFFSET(offset), (offset % 32), true);
break;
default:
- ret = -EOPNOTSUPP;
+ ret = -ENOTSUPP;
break;
}
spin_unlock_irqrestore(&priv->lock, flags);
@@ -277,7 +277,7 @@ static irqreturn_t pci1xxxx_gpio_irq_handler(int irq, void *dev_id)
writel(BIT(bit), priv->reg_base + INTR_STATUS_OFFSET(gpiobank));
spin_unlock_irqrestore(&priv->lock, flags);
irq = irq_find_mapping(gc->irq.domain, (bit + (gpiobank * 32)));
- generic_handle_irq(irq);
+ handle_nested_irq(irq);
}
}
spin_lock_irqsave(&priv->lock, flags);
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index 4d6844261912..ab662f502fe7 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -441,7 +441,7 @@ struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
else
mq->tag_set.queue_depth = MMC_QUEUE_DEPTH;
mq->tag_set.numa_node = NUMA_NO_NODE;
- mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
+ mq->tag_set.flags = BLK_MQ_F_BLOCKING;
mq->tag_set.nr_hw_queues = 1;
mq->tag_set.cmd_size = sizeof(struct mmc_queue_req);
mq->tag_set.driver_data = mq;
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index e00208535bd1..319f0ebbe652 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -1867,20 +1867,20 @@ static int sdhci_msm_program_key(struct cqhci_host *cq_host,
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
union cqhci_crypto_cap_entry cap;
+ if (!(cfg->config_enable & CQHCI_CRYPTO_CONFIGURATION_ENABLE))
+ return qcom_ice_evict_key(msm_host->ice, slot);
+
/* Only AES-256-XTS has been tested so far. */
cap = cq_host->crypto_cap_array[cfg->crypto_cap_idx];
if (cap.algorithm_id != CQHCI_CRYPTO_ALG_AES_XTS ||
cap.key_size != CQHCI_CRYPTO_KEY_SIZE_256)
return -EINVAL;
- if (cfg->config_enable & CQHCI_CRYPTO_CONFIGURATION_ENABLE)
- return qcom_ice_program_key(msm_host->ice,
- QCOM_ICE_CRYPTO_ALG_AES_XTS,
- QCOM_ICE_CRYPTO_KEY_SIZE_256,
- cfg->crypto_key,
- cfg->data_unit_size, slot);
- else
- return qcom_ice_evict_key(msm_host->ice, slot);
+ return qcom_ice_program_key(msm_host->ice,
+ QCOM_ICE_CRYPTO_ALG_AES_XTS,
+ QCOM_ICE_CRYPTO_KEY_SIZE_256,
+ cfg->crypto_key,
+ cfg->data_unit_size, slot);
}
#else /* CONFIG_MMC_CRYPTO */
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 47ead84407cd..ee7e1d908986 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -329,7 +329,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
goto out_list_del;
ret = blk_mq_alloc_sq_tag_set(new->tag_set, &mtd_mq_ops, 2,
- BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING);
+ BLK_MQ_F_BLOCKING);
if (ret)
goto out_kfree_tag_set;
diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
index 66949d9f0cc5..b6f374ded390 100644
--- a/drivers/mtd/spi-nor/core.c
+++ b/drivers/mtd/spi-nor/core.c
@@ -89,7 +89,7 @@ void spi_nor_spimem_setup_op(const struct spi_nor *nor,
op->addr.buswidth = spi_nor_get_protocol_addr_nbits(proto);
if (op->dummy.nbytes)
- op->dummy.buswidth = spi_nor_get_protocol_data_nbits(proto);
+ op->dummy.buswidth = spi_nor_get_protocol_addr_nbits(proto);
if (op->data.nbytes)
op->data.buswidth = spi_nor_get_protocol_data_nbits(proto);
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index 60d0155be869..2836905f0152 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -383,7 +383,7 @@ int ubiblock_create(struct ubi_volume_info *vi)
dev->tag_set.ops = &ubiblock_mq_ops;
dev->tag_set.queue_depth = 64;
dev->tag_set.numa_node = NUMA_NO_NODE;
- dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
+ dev->tag_set.flags = BLK_MQ_F_BLOCKING;
dev->tag_set.cmd_size = sizeof(struct ubiblock_pdu);
dev->tag_set.driver_data = dev;
dev->tag_set.nr_hw_queues = 1;
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index d16817e0476f..29fe79ea74cd 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -2,7 +2,7 @@
/*
* Microchip KSZ9477 switch driver main logic
*
- * Copyright (C) 2017-2019 Microchip Technology Inc.
+ * Copyright (C) 2017-2024 Microchip Technology Inc.
*/
#include <linux/kernel.h>
@@ -983,26 +983,51 @@ void ksz9477_get_caps(struct ksz_device *dev, int port,
int ksz9477_set_ageing_time(struct ksz_device *dev, unsigned int msecs)
{
u32 secs = msecs / 1000;
- u8 value;
- u8 data;
+ u8 data, mult, value;
+ u32 max_val;
int ret;
- value = FIELD_GET(SW_AGE_PERIOD_7_0_M, secs);
+#define MAX_TIMER_VAL ((1 << 8) - 1)
- ret = ksz_write8(dev, REG_SW_LUE_CTRL_3, value);
- if (ret < 0)
- return ret;
+ /* The aging timer comprises a 3-bit multiplier and an 8-bit second
+ * value. Either of them cannot be zero. The maximum timer is then
+ * 7 * 255 = 1785 seconds.
+ */
+ if (!secs)
+ secs = 1;
- data = FIELD_GET(SW_AGE_PERIOD_10_8_M, secs);
+ /* Return error if too large. */
+ else if (secs > 7 * MAX_TIMER_VAL)
+ return -EINVAL;
ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &value);
if (ret < 0)
return ret;
- value &= ~SW_AGE_CNT_M;
- value |= FIELD_PREP(SW_AGE_CNT_M, data);
+ /* Check whether there is need to update the multiplier. */
+ mult = FIELD_GET(SW_AGE_CNT_M, value);
+ max_val = MAX_TIMER_VAL;
+ if (mult > 0) {
+ /* Try to use the same multiplier already in the register as
+ * the hardware default uses multiplier 4 and 75 seconds for
+ * 300 seconds.
+ */
+ max_val = DIV_ROUND_UP(secs, mult);
+ if (max_val > MAX_TIMER_VAL || max_val * mult != secs)
+ max_val = MAX_TIMER_VAL;
+ }
+
+ data = DIV_ROUND_UP(secs, max_val);
+ if (mult != data) {
+ value &= ~SW_AGE_CNT_M;
+ value |= FIELD_PREP(SW_AGE_CNT_M, data);
+ ret = ksz_write8(dev, REG_SW_LUE_CTRL_0, value);
+ if (ret < 0)
+ return ret;
+ }
- return ksz_write8(dev, REG_SW_LUE_CTRL_0, value);
+ value = DIV_ROUND_UP(secs, data);
+ return ksz_write8(dev, REG_SW_LUE_CTRL_3, value);
}
void ksz9477_port_queue_split(struct ksz_device *dev, int port)
diff --git a/drivers/net/dsa/microchip/ksz9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h
index 04235c22bf40..ff579920078e 100644
--- a/drivers/net/dsa/microchip/ksz9477_reg.h
+++ b/drivers/net/dsa/microchip/ksz9477_reg.h
@@ -2,7 +2,7 @@
/*
* Microchip KSZ9477 register definitions
*
- * Copyright (C) 2017-2018 Microchip Technology Inc.
+ * Copyright (C) 2017-2024 Microchip Technology Inc.
*/
#ifndef __KSZ9477_REGS_H
@@ -165,8 +165,6 @@
#define SW_VLAN_ENABLE BIT(7)
#define SW_DROP_INVALID_VID BIT(6)
#define SW_AGE_CNT_M GENMASK(5, 3)
-#define SW_AGE_CNT_S 3
-#define SW_AGE_PERIOD_10_8_M GENMASK(10, 8)
#define SW_RESV_MCAST_ENABLE BIT(2)
#define SW_HASH_OPTION_M 0x03
#define SW_HASH_OPTION_CRC 1
diff --git a/drivers/net/dsa/microchip/lan937x_main.c b/drivers/net/dsa/microchip/lan937x_main.c
index b7652efd632e..b1ae3b9de3d1 100644
--- a/drivers/net/dsa/microchip/lan937x_main.c
+++ b/drivers/net/dsa/microchip/lan937x_main.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Microchip LAN937X switch driver main logic
- * Copyright (C) 2019-2022 Microchip Technology Inc.
+ * Copyright (C) 2019-2024 Microchip Technology Inc.
*/
#include <linux/kernel.h>
#include <linux/module.h>
@@ -461,10 +461,66 @@ int lan937x_change_mtu(struct ksz_device *dev, int port, int new_mtu)
int lan937x_set_ageing_time(struct ksz_device *dev, unsigned int msecs)
{
- u32 secs = msecs / 1000;
- u32 value;
+ u8 data, mult, value8;
+ bool in_msec = false;
+ u32 max_val, value;
+ u32 secs = msecs;
int ret;
+#define MAX_TIMER_VAL ((1 << 20) - 1)
+
+ /* The aging timer comprises a 3-bit multiplier and a 20-bit second
+ * value. Either of them cannot be zero. The maximum timer is then
+ * 7 * 1048575 = 7340025 seconds. As this value is too large for
+ * practical use it can be interpreted as microseconds, making the
+ * maximum timer 7340 seconds with finer control. This allows for
+ * maximum 122 minutes compared to 29 minutes in KSZ9477 switch.
+ */
+ if (msecs % 1000)
+ in_msec = true;
+ else
+ secs /= 1000;
+ if (!secs)
+ secs = 1;
+
+ /* Return error if too large. */
+ else if (secs > 7 * MAX_TIMER_VAL)
+ return -EINVAL;
+
+ /* Configure how to interpret the number value. */
+ ret = ksz_rmw8(dev, REG_SW_LUE_CTRL_2, SW_AGE_CNT_IN_MICROSEC,
+ in_msec ? SW_AGE_CNT_IN_MICROSEC : 0);
+ if (ret < 0)
+ return ret;
+
+ ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &value8);
+ if (ret < 0)
+ return ret;
+
+ /* Check whether there is need to update the multiplier. */
+ mult = FIELD_GET(SW_AGE_CNT_M, value8);
+ max_val = MAX_TIMER_VAL;
+ if (mult > 0) {
+ /* Try to use the same multiplier already in the register as
+ * the hardware default uses multiplier 4 and 75 seconds for
+ * 300 seconds.
+ */
+ max_val = DIV_ROUND_UP(secs, mult);
+ if (max_val > MAX_TIMER_VAL || max_val * mult != secs)
+ max_val = MAX_TIMER_VAL;
+ }
+
+ data = DIV_ROUND_UP(secs, max_val);
+ if (mult != data) {
+ value8 &= ~SW_AGE_CNT_M;
+ value8 |= FIELD_PREP(SW_AGE_CNT_M, data);
+ ret = ksz_write8(dev, REG_SW_LUE_CTRL_0, value8);
+ if (ret < 0)
+ return ret;
+ }
+
+ secs = DIV_ROUND_UP(secs, data);
+
value = FIELD_GET(SW_AGE_PERIOD_7_0_M, secs);
ret = ksz_write8(dev, REG_SW_AGE_PERIOD__1, value);
diff --git a/drivers/net/dsa/microchip/lan937x_reg.h b/drivers/net/dsa/microchip/lan937x_reg.h
index 4ec93e421da4..72042fd64e5b 100644
--- a/drivers/net/dsa/microchip/lan937x_reg.h
+++ b/drivers/net/dsa/microchip/lan937x_reg.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Microchip LAN937X switch register definitions
- * Copyright (C) 2019-2021 Microchip Technology Inc.
+ * Copyright (C) 2019-2024 Microchip Technology Inc.
*/
#ifndef __LAN937X_REG_H
#define __LAN937X_REG_H
@@ -56,8 +56,7 @@
#define SW_VLAN_ENABLE BIT(7)
#define SW_DROP_INVALID_VID BIT(6)
-#define SW_AGE_CNT_M 0x7
-#define SW_AGE_CNT_S 3
+#define SW_AGE_CNT_M GENMASK(5, 3)
#define SW_RESV_MCAST_ENABLE BIT(2)
#define REG_SW_LUE_CTRL_1 0x0311
@@ -70,6 +69,10 @@
#define SW_FAST_AGING BIT(1)
#define SW_LINK_AUTO_AGING BIT(0)
+#define REG_SW_LUE_CTRL_2 0x0312
+
+#define SW_AGE_CNT_IN_MICROSEC BIT(7)
+
#define REG_SW_AGE_PERIOD__1 0x0313
#define SW_AGE_PERIOD_7_0_M GENMASK(7, 0)
diff --git a/drivers/net/ethernet/amd/pds_core/devlink.c b/drivers/net/ethernet/amd/pds_core/devlink.c
index 2681889162a2..44971e71991f 100644
--- a/drivers/net/ethernet/amd/pds_core/devlink.c
+++ b/drivers/net/ethernet/amd/pds_core/devlink.c
@@ -118,7 +118,7 @@ int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
if (err && err != -EIO)
return err;
- listlen = fw_list.num_fw_slots;
+ listlen = min(fw_list.num_fw_slots, ARRAY_SIZE(fw_list.fw_names));
for (i = 0; i < listlen; i++) {
if (i < ARRAY_SIZE(fw_slotnames))
strscpy(buf, fw_slotnames[i], sizeof(buf));
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index 6a716337f48b..268399dfcf22 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -923,7 +923,6 @@ static void xgbe_phy_free_phy_device(struct xgbe_prv_data *pdata)
static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
struct xgbe_phy_data *phy_data = pdata->phy_data;
unsigned int phy_id = phy_data->phydev->phy_id;
@@ -945,14 +944,7 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
phy_write(phy_data->phydev, 0x04, 0x0d01);
phy_write(phy_data->phydev, 0x00, 0x9140);
- linkmode_set_bit_array(phy_10_100_features_array,
- ARRAY_SIZE(phy_10_100_features_array),
- supported);
- linkmode_set_bit_array(phy_gbit_features_array,
- ARRAY_SIZE(phy_gbit_features_array),
- supported);
-
- linkmode_copy(phy_data->phydev->supported, supported);
+ linkmode_copy(phy_data->phydev->supported, PHY_GBIT_FEATURES);
phy_support_asym_pause(phy_data->phydev);
@@ -964,7 +956,6 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
struct xgbe_phy_data *phy_data = pdata->phy_data;
struct xgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom;
unsigned int phy_id = phy_data->phydev->phy_id;
@@ -1028,13 +1019,7 @@ static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata)
reg = phy_read(phy_data->phydev, 0x00);
phy_write(phy_data->phydev, 0x00, reg & ~0x00800);
- linkmode_set_bit_array(phy_10_100_features_array,
- ARRAY_SIZE(phy_10_100_features_array),
- supported);
- linkmode_set_bit_array(phy_gbit_features_array,
- ARRAY_SIZE(phy_gbit_features_array),
- supported);
- linkmode_copy(phy_data->phydev->supported, supported);
+ linkmode_copy(phy_data->phydev->supported, PHY_GBIT_FEATURES);
phy_support_asym_pause(phy_data->phydev);
netif_dbg(pdata, drv, pdata->netdev,
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 42672c63f108..bc4e1f3b3752 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1933,7 +1933,11 @@ static int bcm_sysport_open(struct net_device *dev)
unsigned int i;
int ret;
- clk_prepare_enable(priv->clk);
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ netdev_err(dev, "could not enable priv clock\n");
+ return ret;
+ }
/* Reset UniMAC */
umac_reset(priv);
@@ -2591,7 +2595,11 @@ static int bcm_sysport_probe(struct platform_device *pdev)
goto err_deregister_notifier;
}
- clk_prepare_enable(priv->clk);
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "could not enable priv clock\n");
+ goto err_deregister_netdev;
+ }
priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
dev_info(&pdev->dev,
@@ -2605,6 +2613,8 @@ static int bcm_sysport_probe(struct platform_device *pdev)
return 0;
+err_deregister_netdev:
+ unregister_netdev(dev);
err_deregister_notifier:
unregister_netdevice_notifier(&priv->netdev_notifier);
err_deregister_fixed_link:
@@ -2774,7 +2784,12 @@ static int __maybe_unused bcm_sysport_resume(struct device *d)
if (!netif_running(dev))
return 0;
- clk_prepare_enable(priv->clk);
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ netdev_err(dev, "could not enable priv clock\n");
+ return ret;
+ }
+
if (priv->wolopts)
clk_disable_unprepare(priv->wol_clk);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index b86f980fa7ea..b6f844cac80e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -2897,6 +2897,13 @@ static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
return 0;
}
+static bool bnxt_vnic_is_active(struct bnxt *bp)
+{
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+
+ return vnic->fw_vnic_id != INVALID_HW_RING_ID && vnic->mru > 0;
+}
+
static irqreturn_t bnxt_msix(int irq, void *dev_instance)
{
struct bnxt_napi *bnapi = dev_instance;
@@ -3164,7 +3171,7 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
break;
}
}
- if (bp->flags & BNXT_FLAG_DIM) {
+ if ((bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) {
struct dim_sample dim_sample = {};
dim_update_sample(cpr->event_ctr,
@@ -3295,7 +3302,7 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
poll_done:
cpr_rx = &cpr->cp_ring_arr[0];
if (cpr_rx->cp_ring_type == BNXT_NQ_HDL_TYPE_RX &&
- (bp->flags & BNXT_FLAG_DIM)) {
+ (bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) {
struct dim_sample dim_sample = {};
dim_update_sample(cpr->event_ctr,
@@ -4701,7 +4708,7 @@ void bnxt_set_ring_params(struct bnxt *bp)
/* Changing allocation mode of RX rings.
* TODO: Update when extending xdp_rxq_info to support allocation modes.
*/
-int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
+static void __bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
{
struct net_device *dev = bp->dev;
@@ -4722,15 +4729,30 @@ int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
bp->rx_skb_func = bnxt_rx_page_skb;
}
bp->rx_dir = DMA_BIDIRECTIONAL;
- /* Disable LRO or GRO_HW */
- netdev_update_features(dev);
} else {
dev->max_mtu = bp->max_mtu;
bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
bp->rx_dir = DMA_FROM_DEVICE;
bp->rx_skb_func = bnxt_rx_skb;
}
- return 0;
+}
+
+void bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
+{
+ __bnxt_set_rx_skb_mode(bp, page_mode);
+
+ if (!page_mode) {
+ int rx, tx;
+
+ bnxt_get_max_rings(bp, &rx, &tx, true);
+ if (rx > 1) {
+ bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS;
+ bp->dev->hw_features |= NETIF_F_LRO;
+ }
+ }
+
+ /* Update LRO and GRO_HW availability */
+ netdev_update_features(bp->dev);
}
static void bnxt_free_vnic_attributes(struct bnxt *bp)
@@ -7266,6 +7288,26 @@ err_out:
return rc;
}
+static void bnxt_cancel_dim(struct bnxt *bp)
+{
+ int i;
+
+ /* DIM work is initialized in bnxt_enable_napi(). Proceed only
+ * if NAPI is enabled.
+ */
+ if (!bp->bnapi || test_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
+ return;
+
+ /* Make sure NAPI sees that the VNIC is disabled */
+ synchronize_net();
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
+ struct bnxt_napi *bnapi = rxr->bnapi;
+
+ cancel_work_sync(&bnapi->cp_ring.dim.work);
+ }
+}
+
static int hwrm_ring_free_send_msg(struct bnxt *bp,
struct bnxt_ring_struct *ring,
u32 ring_type, int cmpl_ring_id)
@@ -7366,6 +7408,7 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
}
}
+ bnxt_cancel_dim(bp);
for (i = 0; i < bp->rx_nr_rings; i++) {
bnxt_hwrm_rx_ring_free(bp, &bp->rx_ring[i], close_path);
bnxt_hwrm_rx_agg_ring_free(bp, &bp->rx_ring[i], close_path);
@@ -11309,8 +11352,6 @@ static void bnxt_disable_napi(struct bnxt *bp)
if (bnapi->in_reset)
cpr->sw_stats->rx.rx_resets++;
napi_disable(&bnapi->napi);
- if (bnapi->rx_ring)
- cancel_work_sync(&cpr->dim.work);
}
}
@@ -15572,8 +15613,10 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
bnxt_hwrm_vnic_update(bp, vnic,
VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
}
-
+ /* Make sure NAPI sees that the VNIC is disabled */
+ synchronize_net();
rxr = &bp->rx_ring[idx];
+ cancel_work_sync(&rxr->bnapi->cp_ring.dim.work);
bnxt_hwrm_rx_ring_free(bp, rxr, false);
bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
rxr->rx_next_cons = 0;
@@ -16186,7 +16229,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (bp->max_fltr < BNXT_MAX_FLTR)
bp->max_fltr = BNXT_MAX_FLTR;
bnxt_init_l2_fltr_tbl(bp);
- bnxt_set_rx_skb_mode(bp, false);
+ __bnxt_set_rx_skb_mode(bp, false);
bnxt_set_tpa_flags(bp);
bnxt_set_ring_params(bp);
bnxt_rdma_aux_device_init(bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 7df7a2233307..f11ed59203d9 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -2846,7 +2846,7 @@ u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx);
bool bnxt_bs_trace_avail(struct bnxt *bp, u16 type);
void bnxt_set_tpa_flags(struct bnxt *bp);
void bnxt_set_ring_params(struct bnxt *);
-int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
+void bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr);
void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr);
int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index b771c84cdd89..0ed26e3a28f4 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -208,7 +208,7 @@ int bnxt_send_msg(struct bnxt_en_dev *edev,
rc = hwrm_req_replace(bp, req, fw_msg->msg, fw_msg->msg_len);
if (rc)
- return rc;
+ goto drop_req;
hwrm_req_timeout(bp, req, fw_msg->timeout);
resp = hwrm_req_hold(bp, req);
@@ -220,6 +220,7 @@ int bnxt_send_msg(struct bnxt_en_dev *edev,
memcpy(fw_msg->resp, resp, resp_len);
}
+drop_req:
hwrm_req_drop(bp, req);
return rc;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index f88b641533fc..dc51dce209d5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -422,15 +422,8 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
bnxt_set_rx_skb_mode(bp, true);
xdp_features_set_redirect_target(dev, true);
} else {
- int rx, tx;
-
xdp_features_clear_redirect_target(dev);
bnxt_set_rx_skb_mode(bp, false);
- bnxt_get_max_rings(bp, &rx, &tx, true);
- if (rx > 1) {
- bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS;
- bp->dev->hw_features |= NETIF_F_LRO;
- }
}
bp->tx_nr_rings_xdp = tx_xdp;
bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc + tx_xdp;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index bc3af0054406..604dcfd49aa4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -1799,7 +1799,10 @@ void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid,
struct adapter *adap = container_of(t, struct adapter, tids);
struct sk_buff *skb;
- WARN_ON(tid_out_of_range(&adap->tids, tid));
+ if (tid_out_of_range(&adap->tids, tid)) {
+ dev_err(adap->pdev_dev, "tid %d out of range\n", tid);
+ return;
+ }
if (t->tid_tab[tid - adap->tids.tid_base]) {
t->tid_tab[tid - adap->tids.tid_base] = NULL;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 1b55047c0237..4566848e1d7c 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1591,19 +1591,22 @@ static void fec_enet_tx(struct net_device *ndev, int budget)
fec_enet_tx_queue(ndev, i, budget);
}
-static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
+static int fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
struct bufdesc *bdp, int index)
{
struct page *new_page;
dma_addr_t phys_addr;
new_page = page_pool_dev_alloc_pages(rxq->page_pool);
- WARN_ON(!new_page);
- rxq->rx_skb_info[index].page = new_page;
+ if (unlikely(!new_page))
+ return -ENOMEM;
+ rxq->rx_skb_info[index].page = new_page;
rxq->rx_skb_info[index].offset = FEC_ENET_XDP_HEADROOM;
phys_addr = page_pool_get_dma_addr(new_page) + FEC_ENET_XDP_HEADROOM;
bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
+
+ return 0;
}
static u32
@@ -1698,6 +1701,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
int cpu = smp_processor_id();
struct xdp_buff xdp;
struct page *page;
+ __fec32 cbd_bufaddr;
u32 sub_len = 4;
#if !defined(CONFIG_M5272)
@@ -1766,12 +1770,17 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
index = fec_enet_get_bd_index(bdp, &rxq->bd);
page = rxq->rx_skb_info[index].page;
+ cbd_bufaddr = bdp->cbd_bufaddr;
+ if (fec_enet_update_cbd(rxq, bdp, index)) {
+ ndev->stats.rx_dropped++;
+ goto rx_processing_done;
+ }
+
dma_sync_single_for_cpu(&fep->pdev->dev,
- fec32_to_cpu(bdp->cbd_bufaddr),
+ fec32_to_cpu(cbd_bufaddr),
pkt_len,
DMA_FROM_DEVICE);
prefetch(page_address(page));
- fec_enet_update_cbd(rxq, bdp, index);
if (xdp_prog) {
xdp_buff_clear_frags_flag(&xdp);
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index dd92949bb214..8167cc5fb0df 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -1140,6 +1140,7 @@ int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid);
bool gve_tx_poll(struct gve_notify_block *block, int budget);
bool gve_xdp_poll(struct gve_notify_block *block, int budget);
+int gve_xsk_tx_poll(struct gve_notify_block *block, int budget);
int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *cfg);
void gve_tx_free_rings_gqi(struct gve_priv *priv,
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index e171ca248f9a..533e659b15b3 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -333,6 +333,14 @@ int gve_napi_poll(struct napi_struct *napi, int budget)
if (block->rx) {
work_done = gve_rx_poll(block, budget);
+
+ /* Poll XSK TX as part of RX NAPI. Setup re-poll based on max of
+ * TX and RX work done.
+ */
+ if (priv->xdp_prog)
+ work_done = max_t(int, work_done,
+ gve_xsk_tx_poll(block, budget));
+
reschedule |= work_done == budget;
}
@@ -922,11 +930,13 @@ static void gve_init_sync_stats(struct gve_priv *priv)
static void gve_tx_get_curr_alloc_cfg(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *cfg)
{
+ int num_xdp_queues = priv->xdp_prog ? priv->rx_cfg.num_queues : 0;
+
cfg->qcfg = &priv->tx_cfg;
cfg->raw_addressing = !gve_is_qpl(priv);
cfg->ring_size = priv->tx_desc_cnt;
cfg->start_idx = 0;
- cfg->num_rings = gve_num_tx_queues(priv);
+ cfg->num_rings = priv->tx_cfg.num_queues + num_xdp_queues;
cfg->tx = priv->tx;
}
@@ -1623,8 +1633,8 @@ static int gve_xsk_pool_enable(struct net_device *dev,
if (err)
return err;
- /* If XDP prog is not installed, return */
- if (!priv->xdp_prog)
+ /* If XDP prog is not installed or interface is down, return. */
+ if (!priv->xdp_prog || !netif_running(dev))
return 0;
rx = &priv->rx[qid];
@@ -1669,21 +1679,16 @@ static int gve_xsk_pool_disable(struct net_device *dev,
if (qid >= priv->rx_cfg.num_queues)
return -EINVAL;
- /* If XDP prog is not installed, unmap DMA and return */
- if (!priv->xdp_prog)
- goto done;
-
- tx_qid = gve_xdp_tx_queue_id(priv, qid);
- if (!netif_running(dev)) {
- priv->rx[qid].xsk_pool = NULL;
- xdp_rxq_info_unreg(&priv->rx[qid].xsk_rxq);
- priv->tx[tx_qid].xsk_pool = NULL;
+ /* If XDP prog is not installed or interface is down, unmap DMA and
+ * return.
+ */
+ if (!priv->xdp_prog || !netif_running(dev))
goto done;
- }
napi_rx = &priv->ntfy_blocks[priv->rx[qid].ntfy_id].napi;
napi_disable(napi_rx); /* make sure current rx poll is done */
+ tx_qid = gve_xdp_tx_queue_id(priv, qid);
napi_tx = &priv->ntfy_blocks[priv->tx[tx_qid].ntfy_id].napi;
napi_disable(napi_tx); /* make sure current tx poll is done */
@@ -1709,24 +1714,20 @@ done:
static int gve_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
{
struct gve_priv *priv = netdev_priv(dev);
- int tx_queue_id = gve_xdp_tx_queue_id(priv, queue_id);
+ struct napi_struct *napi;
+
+ if (!gve_get_napi_enabled(priv))
+ return -ENETDOWN;
if (queue_id >= priv->rx_cfg.num_queues || !priv->xdp_prog)
return -EINVAL;
- if (flags & XDP_WAKEUP_TX) {
- struct gve_tx_ring *tx = &priv->tx[tx_queue_id];
- struct napi_struct *napi =
- &priv->ntfy_blocks[tx->ntfy_id].napi;
-
- if (!napi_if_scheduled_mark_missed(napi)) {
- /* Call local_bh_enable to trigger SoftIRQ processing */
- local_bh_disable();
- napi_schedule(napi);
- local_bh_enable();
- }
-
- tx->xdp_xsk_wakeup++;
+ napi = &priv->ntfy_blocks[gve_rx_idx_to_ntfy(priv, queue_id)].napi;
+ if (!napi_if_scheduled_mark_missed(napi)) {
+ /* Call local_bh_enable to trigger SoftIRQ processing */
+ local_bh_disable();
+ napi_schedule(napi);
+ local_bh_enable();
}
return 0;
@@ -1837,6 +1838,7 @@ int gve_adjust_queues(struct gve_priv *priv,
{
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
+ int num_xdp_queues;
int err;
gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
@@ -1847,6 +1849,10 @@ int gve_adjust_queues(struct gve_priv *priv,
rx_alloc_cfg.qcfg = &new_rx_config;
tx_alloc_cfg.num_rings = new_tx_config.num_queues;
+ /* Add dedicated XDP TX queues if enabled. */
+ num_xdp_queues = priv->xdp_prog ? new_rx_config.num_queues : 0;
+ tx_alloc_cfg.num_rings += num_xdp_queues;
+
if (netif_running(priv->dev)) {
err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
return err;
@@ -1899,6 +1905,9 @@ static void gve_turndown(struct gve_priv *priv)
gve_clear_napi_enabled(priv);
gve_clear_report_stats(priv);
+
+ /* Make sure that all traffic is finished processing. */
+ synchronize_net();
}
static void gve_turnup(struct gve_priv *priv)
@@ -2232,14 +2241,18 @@ static void gve_service_task(struct work_struct *work)
static void gve_set_netdev_xdp_features(struct gve_priv *priv)
{
+ xdp_features_t xdp_features;
+
if (priv->queue_format == GVE_GQI_QPL_FORMAT) {
- priv->dev->xdp_features = NETDEV_XDP_ACT_BASIC;
- priv->dev->xdp_features |= NETDEV_XDP_ACT_REDIRECT;
- priv->dev->xdp_features |= NETDEV_XDP_ACT_NDO_XMIT;
- priv->dev->xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
+ xdp_features = NETDEV_XDP_ACT_BASIC;
+ xdp_features |= NETDEV_XDP_ACT_REDIRECT;
+ xdp_features |= NETDEV_XDP_ACT_NDO_XMIT;
+ xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
} else {
- priv->dev->xdp_features = 0;
+ xdp_features = 0;
}
+
+ xdp_set_features_flag(priv->dev, xdp_features);
}
static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
index e7fb7d6d283d..4350ebd9c2bd 100644
--- a/drivers/net/ethernet/google/gve/gve_tx.c
+++ b/drivers/net/ethernet/google/gve/gve_tx.c
@@ -206,7 +206,10 @@ void gve_tx_stop_ring_gqi(struct gve_priv *priv, int idx)
return;
gve_remove_napi(priv, ntfy_idx);
- gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false);
+ if (tx->q_num < priv->tx_cfg.num_queues)
+ gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false);
+ else
+ gve_clean_xdp_done(priv, tx, priv->tx_desc_cnt);
netdev_tx_reset_queue(tx->netdev_txq);
gve_tx_remove_from_block(priv, idx);
}
@@ -834,9 +837,12 @@ int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
struct gve_tx_ring *tx;
int i, err = 0, qid;
- if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK) || !priv->xdp_prog)
return -EINVAL;
+ if (!gve_get_napi_enabled(priv))
+ return -ENETDOWN;
+
qid = gve_xdp_tx_queue_id(priv,
smp_processor_id() % priv->num_xdp_queues);
@@ -975,33 +981,41 @@ out:
return sent;
}
+int gve_xsk_tx_poll(struct gve_notify_block *rx_block, int budget)
+{
+ struct gve_rx_ring *rx = rx_block->rx;
+ struct gve_priv *priv = rx->gve;
+ struct gve_tx_ring *tx;
+ int sent = 0;
+
+ tx = &priv->tx[gve_xdp_tx_queue_id(priv, rx->q_num)];
+ if (tx->xsk_pool) {
+ sent = gve_xsk_tx(priv, tx, budget);
+
+ u64_stats_update_begin(&tx->statss);
+ tx->xdp_xsk_sent += sent;
+ u64_stats_update_end(&tx->statss);
+ if (xsk_uses_need_wakeup(tx->xsk_pool))
+ xsk_set_tx_need_wakeup(tx->xsk_pool);
+ }
+
+ return sent;
+}
+
bool gve_xdp_poll(struct gve_notify_block *block, int budget)
{
struct gve_priv *priv = block->priv;
struct gve_tx_ring *tx = block->tx;
u32 nic_done;
- bool repoll;
u32 to_do;
/* Find out how much work there is to be done */
nic_done = gve_tx_load_event_counter(priv, tx);
to_do = min_t(u32, (nic_done - tx->done), budget);
gve_clean_xdp_done(priv, tx, to_do);
- repoll = nic_done != tx->done;
-
- if (tx->xsk_pool) {
- int sent = gve_xsk_tx(priv, tx, budget);
-
- u64_stats_update_begin(&tx->statss);
- tx->xdp_xsk_sent += sent;
- u64_stats_update_end(&tx->statss);
- repoll |= (sent == budget);
- if (xsk_uses_need_wakeup(tx->xsk_pool))
- xsk_set_tx_need_wakeup(tx->xsk_pool);
- }
/* If we still have work we want to repoll */
- return repoll;
+ return nic_done != tx->done;
}
bool gve_tx_poll(struct gve_notify_block *block, int budget)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 710a8f9f2248..12ba380eb701 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -916,9 +916,6 @@ struct hnae3_handle {
u8 netdev_flags;
struct dentry *hnae3_dbgfs;
- /* protects concurrent contention between debugfs commands */
- struct mutex dbgfs_lock;
- char **dbgfs_buf;
/* Network interface message level enabled bits */
u32 msg_enable;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 807eb3bbb11c..9bbece25552b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -1260,69 +1260,55 @@ static int hns3_dbg_read_cmd(struct hns3_dbg_data *dbg_data,
static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer,
size_t count, loff_t *ppos)
{
- struct hns3_dbg_data *dbg_data = filp->private_data;
+ char *buf = filp->private_data;
+
+ return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
+}
+
+static int hns3_dbg_open(struct inode *inode, struct file *filp)
+{
+ struct hns3_dbg_data *dbg_data = inode->i_private;
struct hnae3_handle *handle = dbg_data->handle;
struct hns3_nic_priv *priv = handle->priv;
- ssize_t size = 0;
- char **save_buf;
- char *read_buf;
u32 index;
+ char *buf;
int ret;
+ if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
+ test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
+ return -EBUSY;
+
ret = hns3_dbg_get_cmd_index(dbg_data, &index);
if (ret)
return ret;
- mutex_lock(&handle->dbgfs_lock);
- save_buf = &handle->dbgfs_buf[index];
-
- if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
- test_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) {
- ret = -EBUSY;
- goto out;
- }
-
- if (*save_buf) {
- read_buf = *save_buf;
- } else {
- read_buf = kvzalloc(hns3_dbg_cmd[index].buf_len, GFP_KERNEL);
- if (!read_buf) {
- ret = -ENOMEM;
- goto out;
- }
-
- /* save the buffer addr until the last read operation */
- *save_buf = read_buf;
-
- /* get data ready for the first time to read */
- ret = hns3_dbg_read_cmd(dbg_data, hns3_dbg_cmd[index].cmd,
- read_buf, hns3_dbg_cmd[index].buf_len);
- if (ret)
- goto out;
- }
+ buf = kvzalloc(hns3_dbg_cmd[index].buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
- size = simple_read_from_buffer(buffer, count, ppos, read_buf,
- strlen(read_buf));
- if (size > 0) {
- mutex_unlock(&handle->dbgfs_lock);
- return size;
+ ret = hns3_dbg_read_cmd(dbg_data, hns3_dbg_cmd[index].cmd,
+ buf, hns3_dbg_cmd[index].buf_len);
+ if (ret) {
+ kvfree(buf);
+ return ret;
}
-out:
- /* free the buffer for the last read operation */
- if (*save_buf) {
- kvfree(*save_buf);
- *save_buf = NULL;
- }
+ filp->private_data = buf;
+ return 0;
+}
- mutex_unlock(&handle->dbgfs_lock);
- return ret;
+static int hns3_dbg_release(struct inode *inode, struct file *filp)
+{
+ kvfree(filp->private_data);
+ filp->private_data = NULL;
+ return 0;
}
static const struct file_operations hns3_dbg_fops = {
.owner = THIS_MODULE,
- .open = simple_open,
+ .open = hns3_dbg_open,
.read = hns3_dbg_read,
+ .release = hns3_dbg_release,
};
static int hns3_dbg_bd_file_init(struct hnae3_handle *handle, u32 cmd)
@@ -1379,13 +1365,6 @@ int hns3_dbg_init(struct hnae3_handle *handle)
int ret;
u32 i;
- handle->dbgfs_buf = devm_kcalloc(&handle->pdev->dev,
- ARRAY_SIZE(hns3_dbg_cmd),
- sizeof(*handle->dbgfs_buf),
- GFP_KERNEL);
- if (!handle->dbgfs_buf)
- return -ENOMEM;
-
hns3_dbg_dentry[HNS3_DBG_DENTRY_COMMON].dentry =
debugfs_create_dir(name, hns3_dbgfs_root);
handle->hnae3_dbgfs = hns3_dbg_dentry[HNS3_DBG_DENTRY_COMMON].dentry;
@@ -1395,8 +1374,6 @@ int hns3_dbg_init(struct hnae3_handle *handle)
debugfs_create_dir(hns3_dbg_dentry[i].name,
handle->hnae3_dbgfs);
- mutex_init(&handle->dbgfs_lock);
-
for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++) {
if ((hns3_dbg_cmd[i].cmd == HNAE3_DBG_CMD_TM_NODES &&
ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) ||
@@ -1425,24 +1402,13 @@ int hns3_dbg_init(struct hnae3_handle *handle)
out:
debugfs_remove_recursive(handle->hnae3_dbgfs);
handle->hnae3_dbgfs = NULL;
- mutex_destroy(&handle->dbgfs_lock);
return ret;
}
void hns3_dbg_uninit(struct hnae3_handle *handle)
{
- u32 i;
-
debugfs_remove_recursive(handle->hnae3_dbgfs);
handle->hnae3_dbgfs = NULL;
-
- for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++)
- if (handle->dbgfs_buf[i]) {
- kvfree(handle->dbgfs_buf[i]);
- handle->dbgfs_buf[i] = NULL;
- }
-
- mutex_destroy(&handle->dbgfs_lock);
}
void hns3_dbg_register_debugfs(const char *debugfs_dir_name)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 43377a7b2426..a7e3b22f641c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -2452,7 +2452,6 @@ static int hns3_nic_set_features(struct net_device *netdev,
return ret;
}
- netdev->features = features;
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 05942fa78b11..db7845009252 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -6,6 +6,7 @@
#include <linux/etherdevice.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
@@ -3574,6 +3575,17 @@ static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
return ret;
}
+static void hclge_set_reset_pending(struct hclge_dev *hdev,
+ enum hnae3_reset_type reset_type)
+{
+ /* When an incorrect reset type is executed, the get_reset_level
+ * function generates the HNAE3_NONE_RESET flag. As a result, this
+ * type do not need to pending.
+ */
+ if (reset_type != HNAE3_NONE_RESET)
+ set_bit(reset_type, &hdev->reset_pending);
+}
+
static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
{
u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg;
@@ -3594,7 +3606,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
*/
if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
- set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
+ hclge_set_reset_pending(hdev, HNAE3_IMP_RESET);
set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
hdev->rst_stats.imp_rst_cnt++;
@@ -3604,7 +3616,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
dev_info(&hdev->pdev->dev, "global reset interrupt\n");
set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
- set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
+ hclge_set_reset_pending(hdev, HNAE3_GLOBAL_RESET);
*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
hdev->rst_stats.global_rst_cnt++;
return HCLGE_VECTOR0_EVENT_RST;
@@ -3759,7 +3771,7 @@ static int hclge_misc_irq_init(struct hclge_dev *hdev)
snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
HCLGE_NAME, pci_name(hdev->pdev));
ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
- 0, hdev->misc_vector.name, hdev);
+ IRQF_NO_AUTOEN, hdev->misc_vector.name, hdev);
if (ret) {
hclge_free_vector(hdev, 0);
dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
@@ -4052,7 +4064,7 @@ static void hclge_do_reset(struct hclge_dev *hdev)
case HNAE3_FUNC_RESET:
dev_info(&pdev->dev, "PF reset requested\n");
/* schedule again to check later */
- set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
+ hclge_set_reset_pending(hdev, HNAE3_FUNC_RESET);
hclge_reset_task_schedule(hdev);
break;
default:
@@ -4086,6 +4098,8 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
clear_bit(HNAE3_FLR_RESET, addr);
}
+ clear_bit(HNAE3_NONE_RESET, addr);
+
if (hdev->reset_type != HNAE3_NONE_RESET &&
rst_level < hdev->reset_type)
return HNAE3_NONE_RESET;
@@ -4227,7 +4241,7 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev)
return false;
} else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
hdev->rst_stats.reset_fail_cnt++;
- set_bit(hdev->reset_type, &hdev->reset_pending);
+ hclge_set_reset_pending(hdev, hdev->reset_type);
dev_info(&hdev->pdev->dev,
"re-schedule reset task(%u)\n",
hdev->rst_stats.reset_fail_cnt);
@@ -4470,8 +4484,20 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
enum hnae3_reset_type rst_type)
{
+#define HCLGE_SUPPORT_RESET_TYPE \
+ (BIT(HNAE3_FLR_RESET) | BIT(HNAE3_FUNC_RESET) | \
+ BIT(HNAE3_GLOBAL_RESET) | BIT(HNAE3_IMP_RESET))
+
struct hclge_dev *hdev = ae_dev->priv;
+ if (!(BIT(rst_type) & HCLGE_SUPPORT_RESET_TYPE)) {
+ /* To prevent reset triggered by hclge_reset_event */
+ set_bit(HNAE3_NONE_RESET, &hdev->default_reset_request);
+ dev_warn(&hdev->pdev->dev, "unsupported reset type %d\n",
+ rst_type);
+ return;
+ }
+
set_bit(rst_type, &hdev->default_reset_request);
}
@@ -11881,9 +11907,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_init_rxd_adv_layout(hdev);
- /* Enable MISC vector(vector0) */
- hclge_enable_vector(&hdev->misc_vector, true);
-
ret = hclge_init_wol(hdev);
if (ret)
dev_warn(&pdev->dev,
@@ -11896,6 +11919,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_state_init(hdev);
hdev->last_reset_time = jiffies;
+ /* Enable MISC vector(vector0) */
+ enable_irq(hdev->misc_vector.vector_irq);
+ hclge_enable_vector(&hdev->misc_vector, true);
+
dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
HCLGE_DRIVER_NAME);
@@ -12301,7 +12328,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
/* Disable MISC vector(vector0) */
hclge_enable_vector(&hdev->misc_vector, false);
- synchronize_irq(hdev->misc_vector.vector_irq);
+ disable_irq(hdev->misc_vector.vector_irq);
/* Disable all hw interrupts */
hclge_config_mac_tnl_int(hdev, false);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
index 5505caea88e9..bab16c2191b2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
@@ -58,6 +58,9 @@ bool hclge_ptp_set_tx_info(struct hnae3_handle *handle, struct sk_buff *skb)
struct hclge_dev *hdev = vport->back;
struct hclge_ptp *ptp = hdev->ptp;
+ if (!ptp)
+ return false;
+
if (!test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags) ||
test_and_set_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state)) {
ptp->tx_skipped++;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c
index 43c1c18fa81f..8c057192aae6 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c
@@ -510,9 +510,9 @@ out:
static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
struct hnae3_knic_private_info *kinfo)
{
-#define HCLGE_RING_REG_OFFSET 0x200
#define HCLGE_RING_INT_REG_OFFSET 0x4
+ struct hnae3_queue *tqp;
int i, j, reg_num;
int data_num_sum;
u32 *reg = data;
@@ -533,10 +533,11 @@ static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
reg_num = ARRAY_SIZE(ring_reg_addr_list);
for (j = 0; j < kinfo->num_tqps; j++) {
reg += hclge_reg_get_tlv(HCLGE_REG_TAG_RING, reg_num, reg);
+ tqp = kinfo->tqp[j];
for (i = 0; i < reg_num; i++)
- *reg++ = hclge_read_dev(&hdev->hw,
- ring_reg_addr_list[i] +
- HCLGE_RING_REG_OFFSET * j);
+ *reg++ = readl_relaxed(tqp->io_base -
+ HCLGE_TQP_REG_OFFSET +
+ ring_reg_addr_list[i]);
}
data_num_sum += (reg_num + HCLGE_REG_TLV_SPACE) * kinfo->num_tqps;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 2f6ffb88e700..163c6e59ea4c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -1393,6 +1393,17 @@ static int hclgevf_notify_roce_client(struct hclgevf_dev *hdev,
return ret;
}
+static void hclgevf_set_reset_pending(struct hclgevf_dev *hdev,
+ enum hnae3_reset_type reset_type)
+{
+ /* When an incorrect reset type is executed, the get_reset_level
+ * function generates the HNAE3_NONE_RESET flag. As a result, this
+ * type do not need to pending.
+ */
+ if (reset_type != HNAE3_NONE_RESET)
+ set_bit(reset_type, &hdev->reset_pending);
+}
+
static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
{
#define HCLGEVF_RESET_WAIT_US 20000
@@ -1542,7 +1553,7 @@ static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
hdev->rst_stats.rst_fail_cnt);
if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT)
- set_bit(hdev->reset_type, &hdev->reset_pending);
+ hclgevf_set_reset_pending(hdev, hdev->reset_type);
if (hclgevf_is_reset_pending(hdev)) {
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
@@ -1662,6 +1673,8 @@ static enum hnae3_reset_type hclgevf_get_reset_level(unsigned long *addr)
clear_bit(HNAE3_FLR_RESET, addr);
}
+ clear_bit(HNAE3_NONE_RESET, addr);
+
return rst_level;
}
@@ -1671,14 +1684,15 @@ static void hclgevf_reset_event(struct pci_dev *pdev,
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
struct hclgevf_dev *hdev = ae_dev->priv;
- dev_info(&hdev->pdev->dev, "received reset request from VF enet\n");
-
if (hdev->default_reset_request)
hdev->reset_level =
hclgevf_get_reset_level(&hdev->default_reset_request);
else
hdev->reset_level = HNAE3_VF_FUNC_RESET;
+ dev_info(&hdev->pdev->dev, "received reset request from VF enet, reset level is %d\n",
+ hdev->reset_level);
+
/* reset of this VF requested */
set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state);
hclgevf_reset_task_schedule(hdev);
@@ -1689,8 +1703,20 @@ static void hclgevf_reset_event(struct pci_dev *pdev,
static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
enum hnae3_reset_type rst_type)
{
+#define HCLGEVF_SUPPORT_RESET_TYPE \
+ (BIT(HNAE3_VF_RESET) | BIT(HNAE3_VF_FUNC_RESET) | \
+ BIT(HNAE3_VF_PF_FUNC_RESET) | BIT(HNAE3_VF_FULL_RESET) | \
+ BIT(HNAE3_FLR_RESET) | BIT(HNAE3_VF_EXP_RESET))
+
struct hclgevf_dev *hdev = ae_dev->priv;
+ if (!(BIT(rst_type) & HCLGEVF_SUPPORT_RESET_TYPE)) {
+ /* To prevent reset triggered by hclge_reset_event */
+ set_bit(HNAE3_NONE_RESET, &hdev->default_reset_request);
+ dev_info(&hdev->pdev->dev, "unsupported reset type %d\n",
+ rst_type);
+ return;
+ }
set_bit(rst_type, &hdev->default_reset_request);
}
@@ -1847,14 +1873,14 @@ static void hclgevf_reset_service_task(struct hclgevf_dev *hdev)
*/
if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) {
/* prepare for full reset of stack + pcie interface */
- set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending);
+ hclgevf_set_reset_pending(hdev, HNAE3_VF_FULL_RESET);
/* "defer" schedule the reset task again */
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
} else {
hdev->reset_attempts++;
- set_bit(hdev->reset_level, &hdev->reset_pending);
+ hclgevf_set_reset_pending(hdev, hdev->reset_level);
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
}
hclgevf_reset_task_schedule(hdev);
@@ -1977,7 +2003,7 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
dev_info(&hdev->pdev->dev,
"receive reset interrupt 0x%x!\n", rst_ing_reg);
- set_bit(HNAE3_VF_RESET, &hdev->reset_pending);
+ hclgevf_set_reset_pending(hdev, HNAE3_VF_RESET);
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
*clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B);
@@ -2287,6 +2313,8 @@ static void hclgevf_state_init(struct hclgevf_dev *hdev)
clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task);
+ /* timer needs to be initialized before misc irq */
+ timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0);
mutex_init(&hdev->mbx_resp.mbx_mutex);
sema_init(&hdev->reset_sem, 1);
@@ -2986,7 +3014,6 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
HCLGEVF_DRIVER_NAME);
hclgevf_task_schedule(hdev, round_jiffies_relative(HZ));
- timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0);
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
index 6db415d8b917..7d9d9dbc7560 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
@@ -123,10 +123,10 @@ int hclgevf_get_regs_len(struct hnae3_handle *handle)
void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
void *data)
{
-#define HCLGEVF_RING_REG_OFFSET 0x200
#define HCLGEVF_RING_INT_REG_OFFSET 0x4
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hnae3_queue *tqp;
int i, j, reg_um;
u32 *reg = data;
@@ -147,10 +147,11 @@ void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
reg_um = ARRAY_SIZE(ring_reg_addr_list);
for (j = 0; j < hdev->num_tqps; j++) {
reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_RING, reg_um, reg);
+ tqp = &hdev->htqp[j].q;
for (i = 0; i < reg_um; i++)
- *reg++ = hclgevf_read_dev(&hdev->hw,
- ring_reg_addr_list[i] +
- HCLGEVF_RING_REG_OFFSET * j);
+ *reg++ = readl_relaxed(tqp->io_base -
+ HCLGEVF_TQP_REG_OFFSET +
+ ring_reg_addr_list[i]);
}
reg_um = ARRAY_SIZE(tqp_intr_reg_addr_list);
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 1489a8ceec51..46f9726d9a8a 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -1665,6 +1665,7 @@ struct ice_aqc_get_port_options_elem {
#define ICE_AQC_PORT_OPT_MAX_LANE_25G 5
#define ICE_AQC_PORT_OPT_MAX_LANE_50G 6
#define ICE_AQC_PORT_OPT_MAX_LANE_100G 7
+#define ICE_AQC_PORT_OPT_MAX_LANE_200G 8
u8 global_scid[2];
u8 phy_scid[2];
@@ -2264,6 +2265,8 @@ struct ice_aqc_get_pkg_info_resp {
struct ice_aqc_get_pkg_info pkg_info[];
};
+#define ICE_AQC_GET_CGU_MAX_PHASE_ADJ GENMASK(30, 0)
+
/* Get CGU abilities command response data structure (indirect 0x0C61) */
struct ice_aqc_get_cgu_abilities {
u8 num_inputs;
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 496d86cbd13f..532024f34ce4 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -4096,6 +4096,57 @@ ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid,
}
/**
+ * ice_get_phy_lane_number - Get PHY lane number for current adapter
+ * @hw: pointer to the hw struct
+ *
+ * Return: PHY lane number on success, negative error code otherwise.
+ */
+int ice_get_phy_lane_number(struct ice_hw *hw)
+{
+ struct ice_aqc_get_port_options_elem *options;
+ unsigned int lport = 0;
+ unsigned int lane;
+ int err;
+
+ options = kcalloc(ICE_AQC_PORT_OPT_MAX, sizeof(*options), GFP_KERNEL);
+ if (!options)
+ return -ENOMEM;
+
+ for (lane = 0; lane < ICE_MAX_PORT_PER_PCI_DEV; lane++) {
+ u8 options_count = ICE_AQC_PORT_OPT_MAX;
+ u8 speed, active_idx, pending_idx;
+ bool active_valid, pending_valid;
+
+ err = ice_aq_get_port_options(hw, options, &options_count, lane,
+ true, &active_idx, &active_valid,
+ &pending_idx, &pending_valid);
+ if (err)
+ goto err;
+
+ if (!active_valid)
+ continue;
+
+ speed = options[active_idx].max_lane_speed;
+ /* If we don't get speed for this lane, it's unoccupied */
+ if (speed > ICE_AQC_PORT_OPT_MAX_LANE_200G)
+ continue;
+
+ if (hw->pf_id == lport) {
+ kfree(options);
+ return lane;
+ }
+
+ lport++;
+ }
+
+ /* PHY lane not found */
+ err = -ENXIO;
+err:
+ kfree(options);
+ return err;
+}
+
+/**
* ice_aq_sff_eeprom
* @hw: pointer to the HW struct
* @lport: bits [7:0] = logical port, bit [8] = logical port valid
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 27208a60cece..fe6f88cfd948 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -193,6 +193,7 @@ ice_aq_get_port_options(struct ice_hw *hw,
int
ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid,
u8 new_option);
+int ice_get_phy_lane_number(struct ice_hw *hw);
int
ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c
index d5ad6d84007c..38e151c7ea23 100644
--- a/drivers/net/ethernet/intel/ice/ice_dpll.c
+++ b/drivers/net/ethernet/intel/ice/ice_dpll.c
@@ -2065,6 +2065,18 @@ static int ice_dpll_init_worker(struct ice_pf *pf)
}
/**
+ * ice_dpll_phase_range_set - initialize phase adjust range helper
+ * @range: pointer to phase adjust range struct to be initialized
+ * @phase_adj: a value to be used as min(-)/max(+) boundary
+ */
+static void ice_dpll_phase_range_set(struct dpll_pin_phase_adjust_range *range,
+ u32 phase_adj)
+{
+ range->min = -phase_adj;
+ range->max = phase_adj;
+}
+
+/**
* ice_dpll_init_info_pins_generic - initializes generic pins info
* @pf: board private structure
* @input: if input pins initialized
@@ -2105,8 +2117,8 @@ static int ice_dpll_init_info_pins_generic(struct ice_pf *pf, bool input)
for (i = 0; i < pin_num; i++) {
pins[i].idx = i;
pins[i].prop.board_label = labels[i];
- pins[i].prop.phase_range.min = phase_adj_max;
- pins[i].prop.phase_range.max = -phase_adj_max;
+ ice_dpll_phase_range_set(&pins[i].prop.phase_range,
+ phase_adj_max);
pins[i].prop.capabilities = cap;
pins[i].pf = pf;
ret = ice_dpll_pin_state_update(pf, &pins[i], pin_type, NULL);
@@ -2152,6 +2164,7 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
struct ice_hw *hw = &pf->hw;
struct ice_dpll_pin *pins;
unsigned long caps;
+ u32 phase_adj_max;
u8 freq_supp_num;
bool input;
@@ -2159,11 +2172,13 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
case ICE_DPLL_PIN_TYPE_INPUT:
pins = pf->dplls.inputs;
num_pins = pf->dplls.num_inputs;
+ phase_adj_max = pf->dplls.input_phase_adj_max;
input = true;
break;
case ICE_DPLL_PIN_TYPE_OUTPUT:
pins = pf->dplls.outputs;
num_pins = pf->dplls.num_outputs;
+ phase_adj_max = pf->dplls.output_phase_adj_max;
input = false;
break;
default:
@@ -2188,19 +2203,13 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
return ret;
caps |= (DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE |
DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE);
- pins[i].prop.phase_range.min =
- pf->dplls.input_phase_adj_max;
- pins[i].prop.phase_range.max =
- -pf->dplls.input_phase_adj_max;
} else {
- pins[i].prop.phase_range.min =
- pf->dplls.output_phase_adj_max;
- pins[i].prop.phase_range.max =
- -pf->dplls.output_phase_adj_max;
ret = ice_cgu_get_output_pin_state_caps(hw, i, &caps);
if (ret)
return ret;
}
+ ice_dpll_phase_range_set(&pins[i].prop.phase_range,
+ phase_adj_max);
pins[i].prop.capabilities = caps;
ret = ice_dpll_pin_state_update(pf, &pins[i], pin_type, NULL);
if (ret)
@@ -2308,8 +2317,10 @@ static int ice_dpll_init_info(struct ice_pf *pf, bool cgu)
dp->dpll_idx = abilities.pps_dpll_idx;
d->num_inputs = abilities.num_inputs;
d->num_outputs = abilities.num_outputs;
- d->input_phase_adj_max = le32_to_cpu(abilities.max_in_phase_adj);
- d->output_phase_adj_max = le32_to_cpu(abilities.max_out_phase_adj);
+ d->input_phase_adj_max = le32_to_cpu(abilities.max_in_phase_adj) &
+ ICE_AQC_GET_CGU_MAX_PHASE_ADJ;
+ d->output_phase_adj_max = le32_to_cpu(abilities.max_out_phase_adj) &
+ ICE_AQC_GET_CGU_MAX_PHASE_ADJ;
alloc_size = sizeof(*d->inputs) * d->num_inputs;
d->inputs = kzalloc(alloc_size, GFP_KERNEL);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 0ab35607e5d5..89fa3d53d317 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -1144,7 +1144,7 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
if (link_up == old_link && link_speed == old_link_speed)
return 0;
- ice_ptp_link_change(pf, pf->hw.pf_id, link_up);
+ ice_ptp_link_change(pf, link_up);
if (ice_is_dcb_active(pf)) {
if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
@@ -6790,7 +6790,7 @@ static int ice_up_complete(struct ice_vsi *vsi)
ice_print_link_msg(vsi, true);
netif_tx_start_all_queues(vsi->netdev);
netif_carrier_on(vsi->netdev);
- ice_ptp_link_change(pf, pf->hw.pf_id, true);
+ ice_ptp_link_change(pf, true);
}
/* Perform an initial read of the statistics registers now to
@@ -7260,7 +7260,7 @@ int ice_down(struct ice_vsi *vsi)
if (vsi->netdev) {
vlan_err = ice_vsi_del_vlan_zero(vsi);
- ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false);
+ ice_ptp_link_change(vsi->back, false);
netif_carrier_off(vsi->netdev);
netif_tx_disable(vsi->netdev);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index a999fface272..efd770dfec44 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -1388,10 +1388,9 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
/**
* ice_ptp_link_change - Reconfigure PTP after link status change
* @pf: Board private structure
- * @port: Port for which the PHY start is set
* @linkup: Link is up or down
*/
-void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
+void ice_ptp_link_change(struct ice_pf *pf, bool linkup)
{
struct ice_ptp_port *ptp_port;
struct ice_hw *hw = &pf->hw;
@@ -1399,14 +1398,7 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
if (pf->ptp.state != ICE_PTP_READY)
return;
- if (WARN_ON_ONCE(port >= hw->ptp.num_lports))
- return;
-
ptp_port = &pf->ptp.port;
- if (ice_is_e825c(hw) && hw->ptp.is_2x50g_muxed_topo)
- port *= 2;
- if (WARN_ON_ONCE(ptp_port->port_num != port))
- return;
/* Update cached link status for this port immediately */
ptp_port->link_up = linkup;
@@ -3164,10 +3156,17 @@ void ice_ptp_init(struct ice_pf *pf)
{
struct ice_ptp *ptp = &pf->ptp;
struct ice_hw *hw = &pf->hw;
- int err;
+ int lane_num, err;
ptp->state = ICE_PTP_INITIALIZING;
+ lane_num = ice_get_phy_lane_number(hw);
+ if (lane_num < 0) {
+ err = lane_num;
+ goto err_exit;
+ }
+
+ ptp->port.port_num = (u8)lane_num;
ice_ptp_init_hw(hw);
ice_ptp_init_tx_interrupt_mode(pf);
@@ -3188,10 +3187,6 @@ void ice_ptp_init(struct ice_pf *pf)
if (err)
goto err_exit;
- ptp->port.port_num = hw->pf_id;
- if (ice_is_e825c(hw) && hw->ptp.is_2x50g_muxed_topo)
- ptp->port.port_num = hw->pf_id * 2;
-
err = ice_ptp_init_port(pf, &ptp->port);
if (err)
goto err_exit;
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
index 824e73b677a4..c490d98fd9c6 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -310,7 +310,7 @@ void ice_ptp_prepare_for_reset(struct ice_pf *pf,
enum ice_reset_req reset_type);
void ice_ptp_init(struct ice_pf *pf);
void ice_ptp_release(struct ice_pf *pf);
-void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup);
+void ice_ptp_link_change(struct ice_pf *pf, bool linkup);
#else /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
static inline int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
{
@@ -358,7 +358,7 @@ static inline void ice_ptp_prepare_for_reset(struct ice_pf *pf,
}
static inline void ice_ptp_init(struct ice_pf *pf) { }
static inline void ice_ptp_release(struct ice_pf *pf) { }
-static inline void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
+static inline void ice_ptp_link_change(struct ice_pf *pf, bool linkup)
{
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
index 585ce200c60f..a8e57cf05a9c 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
@@ -131,7 +131,7 @@ struct ice_eth56g_mac_reg_cfg eth56g_mac_cfg[NUM_ICE_ETH56G_LNK_SPD] = {
.rx_offset = {
.serdes = 0xffffeb27, /* -10.42424 */
.no_fec = 0xffffcccd, /* -25.6 */
- .fc = 0xfffe0014, /* -255.96 */
+ .fc = 0xfffc557b, /* -469.26 */
.sfd = 0x4a4, /* 2.32 */
.bs_ds = 0x32 /* 0.0969697 */
}
@@ -761,9 +761,9 @@ const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD] = {
/* rx_desk_rsgb_par */
644531250, /* 644.53125 MHz Reed Solomon gearbox */
/* tx_desk_rsgb_pcs */
- 644531250, /* 644.53125 MHz Reed Solomon gearbox */
+ 390625000, /* 390.625 MHz Reed Solomon gearbox */
/* rx_desk_rsgb_pcs */
- 644531250, /* 644.53125 MHz Reed Solomon gearbox */
+ 390625000, /* 390.625 MHz Reed Solomon gearbox */
/* tx_fixed_delay */
1620,
/* pmd_adj_divisor */
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index 518893f23372..02e84f5b1d45 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -901,30 +901,45 @@ static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw)
*/
/**
+ * ice_ptp_get_dest_dev_e825 - get destination PHY for given port number
+ * @hw: pointer to the HW struct
+ * @port: destination port
+ *
+ * Return: destination sideband queue PHY device.
+ */
+static enum ice_sbq_msg_dev ice_ptp_get_dest_dev_e825(struct ice_hw *hw,
+ u8 port)
+{
+ /* On a single complex E825, PHY 0 is always destination device phy_0
+ * and PHY 1 is phy_0_peer.
+ */
+ if (port >= hw->ptp.ports_per_phy)
+ return eth56g_phy_1;
+ else
+ return eth56g_phy_0;
+}
+
+/**
* ice_write_phy_eth56g - Write a PHY port register
* @hw: pointer to the HW struct
- * @phy_idx: PHY index
+ * @port: destination port
* @addr: PHY register address
* @val: Value to write
*
* Return: 0 on success, other error codes when failed to write to PHY
*/
-static int ice_write_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr,
- u32 val)
+static int ice_write_phy_eth56g(struct ice_hw *hw, u8 port, u32 addr, u32 val)
{
- struct ice_sbq_msg_input phy_msg;
+ struct ice_sbq_msg_input msg = {
+ .dest_dev = ice_ptp_get_dest_dev_e825(hw, port),
+ .opcode = ice_sbq_msg_wr,
+ .msg_addr_low = lower_16_bits(addr),
+ .msg_addr_high = upper_16_bits(addr),
+ .data = val
+ };
int err;
- phy_msg.opcode = ice_sbq_msg_wr;
-
- phy_msg.msg_addr_low = lower_16_bits(addr);
- phy_msg.msg_addr_high = upper_16_bits(addr);
-
- phy_msg.data = val;
- phy_msg.dest_dev = hw->ptp.phy.eth56g.phy_addr[phy_idx];
-
- err = ice_sbq_rw_reg(hw, &phy_msg, ICE_AQ_FLAG_RD);
-
+ err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
if (err)
ice_debug(hw, ICE_DBG_PTP, "PTP failed to send msg to phy %d\n",
err);
@@ -935,41 +950,36 @@ static int ice_write_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr,
/**
* ice_read_phy_eth56g - Read a PHY port register
* @hw: pointer to the HW struct
- * @phy_idx: PHY index
+ * @port: destination port
* @addr: PHY register address
* @val: Value to write
*
* Return: 0 on success, other error codes when failed to read from PHY
*/
-static int ice_read_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr,
- u32 *val)
+static int ice_read_phy_eth56g(struct ice_hw *hw, u8 port, u32 addr, u32 *val)
{
- struct ice_sbq_msg_input phy_msg;
+ struct ice_sbq_msg_input msg = {
+ .dest_dev = ice_ptp_get_dest_dev_e825(hw, port),
+ .opcode = ice_sbq_msg_rd,
+ .msg_addr_low = lower_16_bits(addr),
+ .msg_addr_high = upper_16_bits(addr)
+ };
int err;
- phy_msg.opcode = ice_sbq_msg_rd;
-
- phy_msg.msg_addr_low = lower_16_bits(addr);
- phy_msg.msg_addr_high = upper_16_bits(addr);
-
- phy_msg.data = 0;
- phy_msg.dest_dev = hw->ptp.phy.eth56g.phy_addr[phy_idx];
-
- err = ice_sbq_rw_reg(hw, &phy_msg, ICE_AQ_FLAG_RD);
- if (err) {
+ err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
+ if (err)
ice_debug(hw, ICE_DBG_PTP, "PTP failed to send msg to phy %d\n",
err);
- return err;
- }
-
- *val = phy_msg.data;
+ else
+ *val = msg.data;
- return 0;
+ return err;
}
/**
* ice_phy_res_address_eth56g - Calculate a PHY port register address
- * @port: Port number to be written
+ * @hw: pointer to the HW struct
+ * @lane: Lane number to be written
* @res_type: resource type (register/memory)
* @offset: Offset from PHY port register base
* @addr: The result address
@@ -978,17 +988,19 @@ static int ice_read_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr,
* * %0 - success
* * %EINVAL - invalid port number or resource type
*/
-static int ice_phy_res_address_eth56g(u8 port, enum eth56g_res_type res_type,
- u32 offset, u32 *addr)
+static int ice_phy_res_address_eth56g(struct ice_hw *hw, u8 lane,
+ enum eth56g_res_type res_type,
+ u32 offset,
+ u32 *addr)
{
- u8 lane = port % ICE_PORTS_PER_QUAD;
- u8 phy = ICE_GET_QUAD_NUM(port);
-
if (res_type >= NUM_ETH56G_PHY_RES)
return -EINVAL;
- *addr = eth56g_phy_res[res_type].base[phy] +
+ /* Lanes 4..7 are in fact 0..3 on a second PHY */
+ lane %= hw->ptp.ports_per_phy;
+ *addr = eth56g_phy_res[res_type].base[0] +
lane * eth56g_phy_res[res_type].step + offset;
+
return 0;
}
@@ -1008,19 +1020,17 @@ static int ice_phy_res_address_eth56g(u8 port, enum eth56g_res_type res_type,
static int ice_write_port_eth56g(struct ice_hw *hw, u8 port, u32 offset,
u32 val, enum eth56g_res_type res_type)
{
- u8 phy_port = port % hw->ptp.ports_per_phy;
- u8 phy_idx = port / hw->ptp.ports_per_phy;
u32 addr;
int err;
if (port >= hw->ptp.num_lports)
return -EINVAL;
- err = ice_phy_res_address_eth56g(phy_port, res_type, offset, &addr);
+ err = ice_phy_res_address_eth56g(hw, port, res_type, offset, &addr);
if (err)
return err;
- return ice_write_phy_eth56g(hw, phy_idx, addr, val);
+ return ice_write_phy_eth56g(hw, port, addr, val);
}
/**
@@ -1039,19 +1049,17 @@ static int ice_write_port_eth56g(struct ice_hw *hw, u8 port, u32 offset,
static int ice_read_port_eth56g(struct ice_hw *hw, u8 port, u32 offset,
u32 *val, enum eth56g_res_type res_type)
{
- u8 phy_port = port % hw->ptp.ports_per_phy;
- u8 phy_idx = port / hw->ptp.ports_per_phy;
u32 addr;
int err;
if (port >= hw->ptp.num_lports)
return -EINVAL;
- err = ice_phy_res_address_eth56g(phy_port, res_type, offset, &addr);
+ err = ice_phy_res_address_eth56g(hw, port, res_type, offset, &addr);
if (err)
return err;
- return ice_read_phy_eth56g(hw, phy_idx, addr, val);
+ return ice_read_phy_eth56g(hw, port, addr, val);
}
/**
@@ -1201,6 +1209,56 @@ static int ice_write_port_mem_eth56g(struct ice_hw *hw, u8 port, u16 offset,
}
/**
+ * ice_write_quad_ptp_reg_eth56g - Write a PHY quad register
+ * @hw: pointer to the HW struct
+ * @offset: PHY register offset
+ * @port: Port number
+ * @val: Value to write
+ *
+ * Return:
+ * * %0 - success
+ * * %EIO - invalid port number or resource type
+ * * %other - failed to write to PHY
+ */
+static int ice_write_quad_ptp_reg_eth56g(struct ice_hw *hw, u8 port,
+ u32 offset, u32 val)
+{
+ u32 addr;
+
+ if (port >= hw->ptp.num_lports)
+ return -EIO;
+
+ addr = eth56g_phy_res[ETH56G_PHY_REG_PTP].base[0] + offset;
+
+ return ice_write_phy_eth56g(hw, port, addr, val);
+}
+
+/**
+ * ice_read_quad_ptp_reg_eth56g - Read a PHY quad register
+ * @hw: pointer to the HW struct
+ * @offset: PHY register offset
+ * @port: Port number
+ * @val: Value to read
+ *
+ * Return:
+ * * %0 - success
+ * * %EIO - invalid port number or resource type
+ * * %other - failed to read from PHY
+ */
+static int ice_read_quad_ptp_reg_eth56g(struct ice_hw *hw, u8 port,
+ u32 offset, u32 *val)
+{
+ u32 addr;
+
+ if (port >= hw->ptp.num_lports)
+ return -EIO;
+
+ addr = eth56g_phy_res[ETH56G_PHY_REG_PTP].base[0] + offset;
+
+ return ice_read_phy_eth56g(hw, port, addr, val);
+}
+
+/**
* ice_is_64b_phy_reg_eth56g - Check if this is a 64bit PHY register
* @low_addr: the low address to check
* @high_addr: on return, contains the high address of the 64bit register
@@ -1919,7 +1977,6 @@ ice_phy_get_speed_eth56g(struct ice_link_status *li)
*/
static int ice_phy_cfg_parpcs_eth56g(struct ice_hw *hw, u8 port)
{
- u8 port_blk = port & ~(ICE_PORTS_PER_QUAD - 1);
u32 val;
int err;
@@ -1934,8 +1991,8 @@ static int ice_phy_cfg_parpcs_eth56g(struct ice_hw *hw, u8 port)
switch (ice_phy_get_speed_eth56g(&hw->port_info->phy.link_info)) {
case ICE_ETH56G_LNK_SPD_1G:
case ICE_ETH56G_LNK_SPD_2_5G:
- err = ice_read_ptp_reg_eth56g(hw, port_blk,
- PHY_GPCS_CONFIG_REG0, &val);
+ err = ice_read_quad_ptp_reg_eth56g(hw, port,
+ PHY_GPCS_CONFIG_REG0, &val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read PHY_GPCS_CONFIG_REG0, status: %d",
err);
@@ -1946,8 +2003,8 @@ static int ice_phy_cfg_parpcs_eth56g(struct ice_hw *hw, u8 port)
val |= FIELD_PREP(PHY_GPCS_CONFIG_REG0_TX_THR_M,
ICE_ETH56G_NOMINAL_TX_THRESH);
- err = ice_write_ptp_reg_eth56g(hw, port_blk,
- PHY_GPCS_CONFIG_REG0, val);
+ err = ice_write_quad_ptp_reg_eth56g(hw, port,
+ PHY_GPCS_CONFIG_REG0, val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to write PHY_GPCS_CONFIG_REG0, status: %d",
err);
@@ -1988,50 +2045,47 @@ static int ice_phy_cfg_parpcs_eth56g(struct ice_hw *hw, u8 port)
*/
int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port)
{
- u8 port_blk = port & ~(ICE_PORTS_PER_QUAD - 1);
- u8 blk_port = port & (ICE_PORTS_PER_QUAD - 1);
+ u8 quad_lane = port % ICE_PORTS_PER_QUAD;
+ u32 addr, val, peer_delay;
bool enable, sfd_ena;
- u32 val, peer_delay;
int err;
enable = hw->ptp.phy.eth56g.onestep_ena;
peer_delay = hw->ptp.phy.eth56g.peer_delay;
sfd_ena = hw->ptp.phy.eth56g.sfd_ena;
- /* PHY_PTP_1STEP_CONFIG */
- err = ice_read_ptp_reg_eth56g(hw, port_blk, PHY_PTP_1STEP_CONFIG, &val);
+ addr = PHY_PTP_1STEP_CONFIG;
+ err = ice_read_quad_ptp_reg_eth56g(hw, port, addr, &val);
if (err)
return err;
if (enable)
- val |= blk_port;
+ val |= BIT(quad_lane);
else
- val &= ~blk_port;
+ val &= ~BIT(quad_lane);
val &= ~(PHY_PTP_1STEP_T1S_UP64_M | PHY_PTP_1STEP_T1S_DELTA_M);
- err = ice_write_ptp_reg_eth56g(hw, port_blk, PHY_PTP_1STEP_CONFIG, val);
+ err = ice_write_quad_ptp_reg_eth56g(hw, port, addr, val);
if (err)
return err;
- /* PHY_PTP_1STEP_PEER_DELAY */
+ addr = PHY_PTP_1STEP_PEER_DELAY(quad_lane);
val = FIELD_PREP(PHY_PTP_1STEP_PD_DELAY_M, peer_delay);
if (peer_delay)
val |= PHY_PTP_1STEP_PD_ADD_PD_M;
val |= PHY_PTP_1STEP_PD_DLY_V_M;
- err = ice_write_ptp_reg_eth56g(hw, port_blk,
- PHY_PTP_1STEP_PEER_DELAY(blk_port), val);
+ err = ice_write_quad_ptp_reg_eth56g(hw, port, addr, val);
if (err)
return err;
val &= ~PHY_PTP_1STEP_PD_DLY_V_M;
- err = ice_write_ptp_reg_eth56g(hw, port_blk,
- PHY_PTP_1STEP_PEER_DELAY(blk_port), val);
+ err = ice_write_quad_ptp_reg_eth56g(hw, port, addr, val);
if (err)
return err;
- /* PHY_MAC_XIF_MODE */
- err = ice_read_mac_reg_eth56g(hw, port, PHY_MAC_XIF_MODE, &val);
+ addr = PHY_MAC_XIF_MODE;
+ err = ice_read_mac_reg_eth56g(hw, port, addr, &val);
if (err)
return err;
@@ -2051,7 +2105,7 @@ int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port)
FIELD_PREP(PHY_MAC_XIF_TS_BIN_MODE_M, enable) |
FIELD_PREP(PHY_MAC_XIF_TS_SFD_ENA_M, sfd_ena);
- return ice_write_mac_reg_eth56g(hw, port, PHY_MAC_XIF_MODE, val);
+ return ice_write_mac_reg_eth56g(hw, port, addr, val);
}
/**
@@ -2093,21 +2147,22 @@ static u32 ice_ptp_calc_bitslip_eth56g(struct ice_hw *hw, u8 port, u32 bs,
bool fc, bool rs,
enum ice_eth56g_link_spd spd)
{
- u8 port_offset = port & (ICE_PORTS_PER_QUAD - 1);
- u8 port_blk = port & ~(ICE_PORTS_PER_QUAD - 1);
u32 bitslip;
int err;
if (!bs || rs)
return 0;
- if (spd == ICE_ETH56G_LNK_SPD_1G || spd == ICE_ETH56G_LNK_SPD_2_5G)
+ if (spd == ICE_ETH56G_LNK_SPD_1G || spd == ICE_ETH56G_LNK_SPD_2_5G) {
err = ice_read_gpcs_reg_eth56g(hw, port, PHY_GPCS_BITSLIP,
&bitslip);
- else
- err = ice_read_ptp_reg_eth56g(hw, port_blk,
- PHY_REG_SD_BIT_SLIP(port_offset),
- &bitslip);
+ } else {
+ u8 quad_lane = port % ICE_PORTS_PER_QUAD;
+ u32 addr;
+
+ addr = PHY_REG_SD_BIT_SLIP(quad_lane);
+ err = ice_read_quad_ptp_reg_eth56g(hw, port, addr, &bitslip);
+ }
if (err)
return 0;
@@ -2667,59 +2722,29 @@ static int ice_get_phy_tx_tstamp_ready_eth56g(struct ice_hw *hw, u8 port,
}
/**
- * ice_is_muxed_topo - detect breakout 2x50G topology for E825C
- * @hw: pointer to the HW struct
- *
- * Return: true if it's 2x50 breakout topology, false otherwise
- */
-static bool ice_is_muxed_topo(struct ice_hw *hw)
-{
- u8 link_topo;
- bool mux;
- u32 val;
-
- val = rd32(hw, GLGEN_SWITCH_MODE_CONFIG);
- mux = FIELD_GET(GLGEN_SWITCH_MODE_CONFIG_25X4_QUAD_M, val);
- val = rd32(hw, GLGEN_MAC_LINK_TOPO);
- link_topo = FIELD_GET(GLGEN_MAC_LINK_TOPO_LINK_TOPO_M, val);
-
- return (mux && link_topo == ICE_LINK_TOPO_UP_TO_2_LINKS);
-}
-
-/**
- * ice_ptp_init_phy_e825c - initialize PHY parameters
+ * ice_ptp_init_phy_e825 - initialize PHY parameters
* @hw: pointer to the HW struct
*/
-static void ice_ptp_init_phy_e825c(struct ice_hw *hw)
+static void ice_ptp_init_phy_e825(struct ice_hw *hw)
{
struct ice_ptp_hw *ptp = &hw->ptp;
struct ice_eth56g_params *params;
- u8 phy;
+ u32 phy_rev;
+ int err;
ptp->phy_model = ICE_PHY_ETH56G;
params = &ptp->phy.eth56g;
params->onestep_ena = false;
params->peer_delay = 0;
params->sfd_ena = false;
- params->phy_addr[0] = eth56g_phy_0;
- params->phy_addr[1] = eth56g_phy_1;
params->num_phys = 2;
ptp->ports_per_phy = 4;
ptp->num_lports = params->num_phys * ptp->ports_per_phy;
ice_sb_access_ena_eth56g(hw, true);
- for (phy = 0; phy < params->num_phys; phy++) {
- u32 phy_rev;
- int err;
-
- err = ice_read_phy_eth56g(hw, phy, PHY_REG_REVISION, &phy_rev);
- if (err || phy_rev != PHY_REVISION_ETH56G) {
- ptp->phy_model = ICE_PHY_UNSUP;
- return;
- }
- }
-
- ptp->is_2x50g_muxed_topo = ice_is_muxed_topo(hw);
+ err = ice_read_phy_eth56g(hw, hw->pf_id, PHY_REG_REVISION, &phy_rev);
+ if (err || phy_rev != PHY_REVISION_ETH56G)
+ ptp->phy_model = ICE_PHY_UNSUP;
}
/* E822 family functions
@@ -2738,10 +2763,9 @@ static void ice_fill_phy_msg_e82x(struct ice_hw *hw,
struct ice_sbq_msg_input *msg, u8 port,
u16 offset)
{
- int phy_port, phy, quadtype;
+ int phy_port, quadtype;
phy_port = port % hw->ptp.ports_per_phy;
- phy = port / hw->ptp.ports_per_phy;
quadtype = ICE_GET_QUAD_NUM(port) %
ICE_GET_QUAD_NUM(hw->ptp.ports_per_phy);
@@ -2753,12 +2777,7 @@ static void ice_fill_phy_msg_e82x(struct ice_hw *hw,
msg->msg_addr_high = P_Q1_H(P_4_BASE + offset, phy_port);
}
- if (phy == 0)
- msg->dest_dev = rmn_0;
- else if (phy == 1)
- msg->dest_dev = rmn_1;
- else
- msg->dest_dev = rmn_2;
+ msg->dest_dev = rmn_0;
}
/**
@@ -5478,7 +5497,7 @@ void ice_ptp_init_hw(struct ice_hw *hw)
else if (ice_is_e810(hw))
ice_ptp_init_phy_e810(ptp);
else if (ice_is_e825c(hw))
- ice_ptp_init_phy_e825c(hw);
+ ice_ptp_init_phy_e825(hw);
else
ptp->phy_model = ICE_PHY_UNSUP;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index adb168860711..4a9ef722635f 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -850,7 +850,6 @@ struct ice_mbx_data {
struct ice_eth56g_params {
u8 num_phys;
- u8 phy_addr[2];
bool onestep_ena;
bool sfd_ena;
u32 peer_delay;
@@ -881,7 +880,6 @@ struct ice_ptp_hw {
union ice_phy_params phy;
u8 num_lports;
u8 ports_per_phy;
- bool is_2x50g_muxed_topo;
};
/* Port hardware description */
diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c
index 9fae8bdec2a7..1613b562d17c 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.c
+++ b/drivers/net/ethernet/intel/igc/igc_base.c
@@ -68,6 +68,10 @@ static s32 igc_init_nvm_params_base(struct igc_hw *hw)
u32 eecd = rd32(IGC_EECD);
u16 size;
+ /* failed to read reg and got all F's */
+ if (!(~eecd))
+ return -ENXIO;
+
size = FIELD_GET(IGC_EECD_SIZE_EX_MASK, eecd);
/* Added to a constant, "size" becomes the left-shift value
@@ -221,6 +225,8 @@ static s32 igc_get_invariants_base(struct igc_hw *hw)
/* NVM initialization */
ret_val = igc_init_nvm_params_base(hw);
+ if (ret_val)
+ goto out;
switch (hw->mac.type) {
case igc_i225:
ret_val = igc_init_nvm_params_i225(hw);
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index a06048719e84..67a6ff07c83d 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2704,9 +2704,15 @@ static struct platform_device *port_platdev[3];
static void mv643xx_eth_shared_of_remove(void)
{
+ struct mv643xx_eth_platform_data *pd;
int n;
for (n = 0; n < 3; n++) {
+ if (!port_platdev[n])
+ continue;
+ pd = dev_get_platdata(&port_platdev[n]->dev);
+ if (pd)
+ of_node_put(pd->phy_node);
platform_device_del(port_platdev[n]);
port_platdev[n] = NULL;
}
@@ -2769,8 +2775,10 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
}
ppdev = platform_device_alloc(MV643XX_ETH_NAME, dev_num);
- if (!ppdev)
- return -ENOMEM;
+ if (!ppdev) {
+ ret = -ENOMEM;
+ goto put_err;
+ }
ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
ppdev->dev.of_node = pnp;
@@ -2792,6 +2800,8 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
port_err:
platform_device_put(ppdev);
+put_err:
+ of_node_put(ppd.phy_node);
return ret;
}
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 3914cd9210d4..988fa28cfb5f 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -130,6 +130,7 @@ static const struct pci_device_id sky2_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436C) }, /* 88E8072 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436D) }, /* 88E8055 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4370) }, /* 88E8075 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4373) }, /* 88E8075 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4380) }, /* 88E8057 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4381) }, /* 88E8059 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4382) }, /* 88E8079 */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 6bd8a18e3af3..e733b81e18a2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -1013,6 +1013,7 @@ static void cmd_work_handler(struct work_struct *work)
complete(&ent->done);
}
up(&cmd->vars.sem);
+ complete(&ent->slotted);
return;
}
} else {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index ca92e518be76..1baf8933a07c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -724,6 +724,12 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
/* check esn */
if (x->props.flags & XFRM_STATE_ESN)
mlx5e_ipsec_update_esn_state(sa_entry);
+ else
+ /* According to RFC4303, section "3.3.3. Sequence Number Generation",
+ * the first packet sent using a given SA will contain a sequence
+ * number of 1.
+ */
+ sa_entry->esn_state.esn = 1;
mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &sa_entry->attrs);
@@ -768,9 +774,12 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
MLX5_IPSEC_RESCHED);
if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET &&
- x->props.mode == XFRM_MODE_TUNNEL)
- xa_set_mark(&ipsec->sadb, sa_entry->ipsec_obj_id,
- MLX5E_IPSEC_TUNNEL_SA);
+ x->props.mode == XFRM_MODE_TUNNEL) {
+ xa_lock_bh(&ipsec->sadb);
+ __xa_set_mark(&ipsec->sadb, sa_entry->ipsec_obj_id,
+ MLX5E_IPSEC_TUNNEL_SA);
+ xa_unlock_bh(&ipsec->sadb);
+ }
out:
x->xso.offload_handle = (unsigned long)sa_entry;
@@ -797,7 +806,6 @@ err_xfrm:
static void mlx5e_xfrm_del_state(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
- struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
struct mlx5e_ipsec_sa_entry *old;
@@ -806,12 +814,6 @@ static void mlx5e_xfrm_del_state(struct xfrm_state *x)
old = xa_erase_bh(&ipsec->sadb, sa_entry->ipsec_obj_id);
WARN_ON(old != sa_entry);
-
- if (attrs->mode == XFRM_MODE_TUNNEL &&
- attrs->type == XFRM_DEV_OFFLOAD_PACKET)
- /* Make sure that no ARP requests are running in parallel */
- flush_workqueue(ipsec->wq);
-
}
static void mlx5e_xfrm_free_state(struct xfrm_state *x)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index e51b03d4c717..57861d34d46f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -1718,23 +1718,21 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
goto err_alloc;
}
- if (attrs->family == AF_INET)
- setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
- else
- setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
-
setup_fte_no_frags(spec);
setup_fte_upper_proto_match(spec, &attrs->upspec);
switch (attrs->type) {
case XFRM_DEV_OFFLOAD_CRYPTO:
+ if (attrs->family == AF_INET)
+ setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
+ else
+ setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
setup_fte_spi(spec, attrs->spi, false);
setup_fte_esp(spec);
setup_fte_reg_a(spec);
break;
case XFRM_DEV_OFFLOAD_PACKET:
- if (attrs->reqid)
- setup_fte_reg_c4(spec, attrs->reqid);
+ setup_fte_reg_c4(spec, attrs->reqid);
err = setup_pkt_reformat(ipsec, attrs, &flow_act);
if (err)
goto err_pkt_reformat;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
index 53cfa39188cb..820debf3fbbf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
@@ -91,8 +91,9 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
EXPORT_SYMBOL_GPL(mlx5_ipsec_device_caps);
static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
- struct mlx5_accel_esp_xfrm_attrs *attrs)
+ struct mlx5e_ipsec_sa_entry *sa_entry)
{
+ struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
void *aso_ctx;
aso_ctx = MLX5_ADDR_OF(ipsec_obj, obj, ipsec_aso);
@@ -120,8 +121,12 @@ static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
* active.
*/
MLX5_SET(ipsec_obj, obj, aso_return_reg, MLX5_IPSEC_ASO_REG_C_4_5);
- if (attrs->dir == XFRM_DEV_OFFLOAD_OUT)
+ if (attrs->dir == XFRM_DEV_OFFLOAD_OUT) {
MLX5_SET(ipsec_aso, aso_ctx, mode, MLX5_IPSEC_ASO_INC_SN);
+ if (!attrs->replay_esn.trigger)
+ MLX5_SET(ipsec_aso, aso_ctx, mode_parameter,
+ sa_entry->esn_state.esn);
+ }
if (attrs->lft.hard_packet_limit != XFRM_INF) {
MLX5_SET(ipsec_aso, aso_ctx, remove_flow_pkt_cnt,
@@ -175,7 +180,7 @@ static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
res = &mdev->mlx5e_res.hw_objs;
if (attrs->type == XFRM_DEV_OFFLOAD_PACKET)
- mlx5e_ipsec_packet_setup(obj, res->pdn, attrs);
+ mlx5e_ipsec_packet_setup(obj, res->pdn, sa_entry);
err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
if (!err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
index cc9bcc420032..6ab02f3fc291 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
@@ -339,9 +339,13 @@ static int mlx5e_macsec_init_sa_fs(struct macsec_context *ctx,
{
struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
struct mlx5_macsec_fs *macsec_fs = priv->mdev->macsec_fs;
+ const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
struct mlx5_macsec_rule_attrs rule_attrs;
union mlx5_macsec_rule *macsec_rule;
+ if (is_tx && tx_sc->encoding_sa != sa->assoc_num)
+ return 0;
+
rule_attrs.macsec_obj_id = sa->macsec_obj_id;
rule_attrs.sci = sa->sci;
rule_attrs.assoc_num = sa->assoc_num;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index dd16d73000c3..0ec17c276bdd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -6542,8 +6542,23 @@ static void _mlx5e_remove(struct auxiliary_device *adev)
mlx5_core_uplink_netdev_set(mdev, NULL);
mlx5e_dcbnl_delete_app(priv);
- unregister_netdev(priv->netdev);
- _mlx5e_suspend(adev, false);
+ /* When unload driver, the netdev is in registered state
+ * if it's from legacy mode. If from switchdev mode, it
+ * is already unregistered before changing to NIC profile.
+ */
+ if (priv->netdev->reg_state == NETREG_REGISTERED) {
+ unregister_netdev(priv->netdev);
+ _mlx5e_suspend(adev, false);
+ } else {
+ struct mlx5_core_dev *pos;
+ int i;
+
+ if (test_bit(MLX5E_STATE_DESTROYING, &priv->state))
+ mlx5_sd_for_each_dev(i, mdev, pos)
+ mlx5e_destroy_mdev_resources(pos);
+ else
+ _mlx5e_suspend(adev, true);
+ }
/* Avoid cleanup if profile rollback failed. */
if (priv->profile)
priv->profile->cleanup(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 554f9cb5b53f..fdff9fd8a89e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -1509,6 +1509,21 @@ mlx5e_vport_uplink_rep_unload(struct mlx5e_rep_priv *rpriv)
priv = netdev_priv(netdev);
+ /* This bit is set when using devlink to change eswitch mode from
+ * switchdev to legacy. As need to keep uplink netdev ifindex, we
+ * detach uplink representor profile and attach NIC profile only.
+ * The netdev will be unregistered later when unload NIC auxiliary
+ * driver for this case.
+ * We explicitly block devlink eswitch mode change if any IPSec rules
+ * offloaded, but can't block other cases, such as driver unload
+ * and devlink reload. We have to unregister netdev before profile
+ * change for those cases. This is to avoid resource leak because
+ * the offloaded rules don't have the chance to be unoffloaded before
+ * cleanup which is triggered by detach uplink representor profile.
+ */
+ if (!(priv->mdev->priv.flags & MLX5_PRIV_FLAGS_SWITCH_LEGACY))
+ unregister_netdev(netdev);
+
mlx5e_netdev_attach_nic_profile(priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
index 5a0047bdcb51..ed977ae75fab 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
@@ -150,11 +150,11 @@ void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev)
unsigned long i;
int err;
- xa_for_each(&esw->offloads.vport_reps, i, rep) {
- rpriv = rep->rep_data[REP_ETH].priv;
- if (!rpriv || !rpriv->netdev)
+ mlx5_esw_for_each_rep(esw, i, rep) {
+ if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
continue;
+ rpriv = rep->rep_data[REP_ETH].priv;
rhashtable_walk_enter(&rpriv->tc_ht, &iter);
rhashtable_walk_start(&iter);
while ((flow = rhashtable_walk_next(&iter)) != NULL) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index a83d41121db6..8573d36785f4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -714,6 +714,9 @@ void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
MLX5_CAP_GEN_2((esw->dev), ec_vf_vport_base) +\
(last) - 1)
+#define mlx5_esw_for_each_rep(esw, i, rep) \
+ xa_for_each(&((esw)->offloads.vport_reps), i, rep)
+
struct mlx5_eswitch *__must_check
mlx5_devlink_eswitch_get(struct devlink *devlink);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index d5b42b3a19fd..06076dd9ec64 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -53,9 +53,6 @@
#include "lag/lag.h"
#include "en/tc/post_meter.h"
-#define mlx5_esw_for_each_rep(esw, i, rep) \
- xa_for_each(&((esw)->offloads.vport_reps), i, rep)
-
/* There are two match-all miss flows, one for unicast dst mac and
* one for multicast.
*/
@@ -3780,6 +3777,8 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
esw->eswitch_operation_in_progress = true;
up_write(&esw->mode_lock);
+ if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
+ esw->dev->priv.flags |= MLX5_PRIV_FLAGS_SWITCH_LEGACY;
mlx5_eswitch_disable_locked(esw);
if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) {
if (mlx5_devlink_trap_get_num_active(esw->dev)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 2eabfcc247c6..0ce999706d41 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -2709,6 +2709,7 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
break;
case MLX5_FLOW_NAMESPACE_RDMA_TX:
root_ns = steering->rdma_tx_root_ns;
+ prio = RDMA_TX_BYPASS_PRIO;
break;
case MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS:
root_ns = steering->rdma_rx_root_ns;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
index ab2717012b79..39e80704b1c4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
@@ -530,7 +530,7 @@ int mlx5_lag_port_sel_create(struct mlx5_lag *ldev,
set_tt_map(port_sel, hash_type);
err = mlx5_lag_create_definers(ldev, hash_type, ports);
if (err)
- return err;
+ goto clear_port_sel;
if (port_sel->tunnel) {
err = mlx5_lag_create_inner_ttc_table(ldev);
@@ -549,6 +549,8 @@ destroy_inner:
mlx5_destroy_ttc_table(port_sel->inner.ttc);
destroy_definers:
mlx5_lag_destroy_definers(ldev);
+clear_port_sel:
+ memset(port_sel, 0, sizeof(*port_sel));
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
index a96be98be032..b96909fbeb12 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
@@ -257,6 +257,7 @@ static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
return 0;
esw_err:
+ mlx5_sf_function_id_erase(table, sf);
mlx5_sf_free(table, sf);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c
index 6fa06ba2d346..f57c84e5128b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c
@@ -1067,7 +1067,6 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
int inlen, err, eqn;
void *cqc, *in;
__be64 *pas;
- int vector;
u32 i;
cq = kzalloc(sizeof(*cq), GFP_KERNEL);
@@ -1096,8 +1095,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
if (!in)
goto err_cqwq;
- vector = raw_smp_processor_id() % mlx5_comp_vectors_max(mdev);
- err = mlx5_comp_eqn_get(mdev, vector, &eqn);
+ err = mlx5_comp_eqn_get(mdev, 0, &eqn);
if (err) {
kvfree(in);
goto err_cqwq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wc.c b/drivers/net/ethernet/mellanox/mlx5/core/wc.c
index 1bed75eca97d..740b719e7072 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wc.c
@@ -382,6 +382,7 @@ err_alloc_bfreg:
bool mlx5_wc_support_get(struct mlx5_core_dev *mdev)
{
+ struct mutex *wc_state_lock = &mdev->wc_state_lock;
struct mlx5_core_dev *parent = NULL;
if (!MLX5_CAP_GEN(mdev, bf)) {
@@ -400,32 +401,31 @@ bool mlx5_wc_support_get(struct mlx5_core_dev *mdev)
*/
goto out;
- mutex_lock(&mdev->wc_state_lock);
-
- if (mdev->wc_state != MLX5_WC_STATE_UNINITIALIZED)
- goto unlock;
-
#ifdef CONFIG_MLX5_SF
- if (mlx5_core_is_sf(mdev))
+ if (mlx5_core_is_sf(mdev)) {
parent = mdev->priv.parent_mdev;
+ wc_state_lock = &parent->wc_state_lock;
+ }
#endif
- if (parent) {
- mutex_lock(&parent->wc_state_lock);
+ mutex_lock(wc_state_lock);
+ if (mdev->wc_state != MLX5_WC_STATE_UNINITIALIZED)
+ goto unlock;
+
+ if (parent) {
mlx5_core_test_wc(parent);
mlx5_core_dbg(mdev, "parent set wc_state=%d\n",
parent->wc_state);
mdev->wc_state = parent->wc_state;
- mutex_unlock(&parent->wc_state_lock);
+ } else {
+ mlx5_core_test_wc(mdev);
}
- mlx5_core_test_wc(mdev);
-
unlock:
- mutex_unlock(&mdev->wc_state_lock);
+ mutex_unlock(wc_state_lock);
out:
mlx5_core_dbg(mdev, "wc_state=%d\n", mdev->wc_state);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index 4b5fd71c897d..32d2e61f2b82 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -423,8 +423,7 @@ mlxsw_sp_span_gretap4_route(const struct net_device *to_dev,
parms = mlxsw_sp_ipip_netdev_parms4(to_dev);
ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp,
- 0, 0, dev_net(to_dev), parms.link, tun->fwmark, 0,
- 0);
+ 0, 0, tun->net, parms.link, tun->fwmark, 0, 0);
rt = ip_route_output_key(tun->net, &fl4);
if (IS_ERR(rt))
diff --git a/drivers/net/ethernet/meta/fbnic/Makefile b/drivers/net/ethernet/meta/fbnic/Makefile
index 239b2258ec65..ea6214ca48e7 100644
--- a/drivers/net/ethernet/meta/fbnic/Makefile
+++ b/drivers/net/ethernet/meta/fbnic/Makefile
@@ -13,7 +13,6 @@ fbnic-y := fbnic_csr.o \
fbnic_ethtool.o \
fbnic_fw.o \
fbnic_hw_stats.o \
- fbnic_hwmon.o \
fbnic_irq.o \
fbnic_mac.o \
fbnic_netdev.o \
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic.h b/drivers/net/ethernet/meta/fbnic/fbnic.h
index 706ae6104c8e..744eb0d95449 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic.h
@@ -20,7 +20,6 @@ struct fbnic_dev {
struct device *dev;
struct net_device *netdev;
struct dentry *dbg_fbd;
- struct device *hwmon;
u32 __iomem *uc_addr0;
u32 __iomem *uc_addr4;
@@ -33,7 +32,6 @@ struct fbnic_dev {
struct fbnic_fw_mbx mbx[FBNIC_IPC_MBX_INDICES];
struct fbnic_fw_cap fw_cap;
- struct fbnic_fw_completion *cmpl_data;
/* Lock protecting Tx Mailbox queue to prevent possible races */
spinlock_t fw_tx_lock;
@@ -142,9 +140,6 @@ void fbnic_devlink_unregister(struct fbnic_dev *fbd);
int fbnic_fw_enable_mbx(struct fbnic_dev *fbd);
void fbnic_fw_disable_mbx(struct fbnic_dev *fbd);
-void fbnic_hwmon_register(struct fbnic_dev *fbd);
-void fbnic_hwmon_unregister(struct fbnic_dev *fbd);
-
int fbnic_pcs_irq_enable(struct fbnic_dev *fbd);
void fbnic_pcs_irq_disable(struct fbnic_dev *fbd);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_csr.c b/drivers/net/ethernet/meta/fbnic/fbnic_csr.c
index 2118901b25e9..aeb9f333f4c7 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_csr.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_csr.c
@@ -64,7 +64,7 @@ static void fbnic_csr_get_regs_rpc_ram(struct fbnic_dev *fbd, u32 **data_p)
u32 i, j;
*(data++) = start;
- *(data++) = end - 1;
+ *(data++) = end;
/* FBNIC_RPC_TCAM_ACT */
for (i = 0; i < FBNIC_RPC_TCAM_ACT_NUM_ENTRIES; i++) {
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
index 7cd8841920e4..221faf8c6756 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
@@ -44,13 +44,6 @@ struct fbnic_fw_cap {
u8 link_fec;
};
-struct fbnic_fw_completion {
- struct {
- s32 millivolts;
- s32 millidegrees;
- } tsene;
-};
-
void fbnic_mbx_init(struct fbnic_dev *fbd);
void fbnic_mbx_clean(struct fbnic_dev *fbd);
void fbnic_mbx_poll(struct fbnic_dev *fbd);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_hwmon.c b/drivers/net/ethernet/meta/fbnic/fbnic_hwmon.c
deleted file mode 100644
index bcd1086e3768..000000000000
--- a/drivers/net/ethernet/meta/fbnic/fbnic_hwmon.c
+++ /dev/null
@@ -1,81 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) Meta Platforms, Inc. and affiliates. */
-
-#include <linux/hwmon.h>
-
-#include "fbnic.h"
-#include "fbnic_mac.h"
-
-static int fbnic_hwmon_sensor_id(enum hwmon_sensor_types type)
-{
- if (type == hwmon_temp)
- return FBNIC_SENSOR_TEMP;
- if (type == hwmon_in)
- return FBNIC_SENSOR_VOLTAGE;
-
- return -EOPNOTSUPP;
-}
-
-static umode_t fbnic_hwmon_is_visible(const void *drvdata,
- enum hwmon_sensor_types type,
- u32 attr, int channel)
-{
- if (type == hwmon_temp && attr == hwmon_temp_input)
- return 0444;
- if (type == hwmon_in && attr == hwmon_in_input)
- return 0444;
-
- return 0;
-}
-
-static int fbnic_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
- u32 attr, int channel, long *val)
-{
- struct fbnic_dev *fbd = dev_get_drvdata(dev);
- const struct fbnic_mac *mac = fbd->mac;
- int id;
-
- id = fbnic_hwmon_sensor_id(type);
- return id < 0 ? id : mac->get_sensor(fbd, id, val);
-}
-
-static const struct hwmon_ops fbnic_hwmon_ops = {
- .is_visible = fbnic_hwmon_is_visible,
- .read = fbnic_hwmon_read,
-};
-
-static const struct hwmon_channel_info *fbnic_hwmon_info[] = {
- HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT),
- HWMON_CHANNEL_INFO(in, HWMON_I_INPUT),
- NULL
-};
-
-static const struct hwmon_chip_info fbnic_chip_info = {
- .ops = &fbnic_hwmon_ops,
- .info = fbnic_hwmon_info,
-};
-
-void fbnic_hwmon_register(struct fbnic_dev *fbd)
-{
- if (!IS_REACHABLE(CONFIG_HWMON))
- return;
-
- fbd->hwmon = hwmon_device_register_with_info(fbd->dev, "fbnic",
- fbd, &fbnic_chip_info,
- NULL);
- if (IS_ERR(fbd->hwmon)) {
- dev_notice(fbd->dev,
- "Failed to register hwmon device %pe\n",
- fbd->hwmon);
- fbd->hwmon = NULL;
- }
-}
-
-void fbnic_hwmon_unregister(struct fbnic_dev *fbd)
-{
- if (!IS_REACHABLE(CONFIG_HWMON) || !fbd->hwmon)
- return;
-
- hwmon_device_unregister(fbd->hwmon);
- fbd->hwmon = NULL;
-}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
index 80b82ff12c4d..7b654d0a6dac 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
@@ -686,27 +686,6 @@ fbnic_mac_get_eth_mac_stats(struct fbnic_dev *fbd, bool reset,
MAC_STAT_TX_BROADCAST);
}
-static int fbnic_mac_get_sensor_asic(struct fbnic_dev *fbd, int id, long *val)
-{
- struct fbnic_fw_completion fw_cmpl;
- s32 *sensor;
-
- switch (id) {
- case FBNIC_SENSOR_TEMP:
- sensor = &fw_cmpl.tsene.millidegrees;
- break;
- case FBNIC_SENSOR_VOLTAGE:
- sensor = &fw_cmpl.tsene.millivolts;
- break;
- default:
- return -EINVAL;
- }
-
- *val = *sensor;
-
- return 0;
-}
-
static const struct fbnic_mac fbnic_mac_asic = {
.init_regs = fbnic_mac_init_regs,
.pcs_enable = fbnic_pcs_enable_asic,
@@ -716,7 +695,6 @@ static const struct fbnic_mac fbnic_mac_asic = {
.get_eth_mac_stats = fbnic_mac_get_eth_mac_stats,
.link_down = fbnic_mac_link_down_asic,
.link_up = fbnic_mac_link_up_asic,
- .get_sensor = fbnic_mac_get_sensor_asic,
};
/**
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.h b/drivers/net/ethernet/meta/fbnic/fbnic_mac.h
index 05a591653e09..476239a9d381 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.h
@@ -47,11 +47,6 @@ enum {
#define FBNIC_LINK_MODE_PAM4 (FBNIC_LINK_50R1)
#define FBNIC_LINK_MODE_MASK (FBNIC_LINK_AUTO - 1)
-enum fbnic_sensor_id {
- FBNIC_SENSOR_TEMP, /* Temp in millidegrees Centigrade */
- FBNIC_SENSOR_VOLTAGE, /* Voltage in millivolts */
-};
-
/* This structure defines the interface hooks for the MAC. The MAC hooks
* will be configured as a const struct provided with a set of function
* pointers.
@@ -88,8 +83,6 @@ struct fbnic_mac {
void (*link_down)(struct fbnic_dev *fbd);
void (*link_up)(struct fbnic_dev *fbd, bool tx_pause, bool rx_pause);
-
- int (*get_sensor)(struct fbnic_dev *fbd, int id, long *val);
};
int fbnic_mac_init(struct fbnic_dev *fbd);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
index 32702dc4a066..7ccf192f13d5 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
@@ -296,8 +296,6 @@ static int fbnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Capture snapshot of hardware stats so netdev can calculate delta */
fbnic_reset_hw_stats(fbd);
- fbnic_hwmon_register(fbd);
-
if (!fbd->dsn) {
dev_warn(&pdev->dev, "Reading serial number failed\n");
goto init_failure_mode;
@@ -360,7 +358,6 @@ static void fbnic_remove(struct pci_dev *pdev)
fbnic_netdev_free(fbd);
}
- fbnic_hwmon_unregister(fbd);
fbnic_dbg_fbd_exit(fbd);
fbnic_devlink_unregister(fbd);
fbnic_fw_disable_mbx(fbd);
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index 2dc0c6ad54be..be95336ce089 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -1656,9 +1656,9 @@ static int __init mana_driver_init(void)
static void __exit mana_driver_exit(void)
{
- debugfs_remove(mana_debugfs_root);
-
pci_unregister_driver(&mana_driver);
+
+ debugfs_remove(mana_debugfs_root);
}
module_init(mana_driver_init);
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
index 9d97cd281f18..c03558adda91 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -458,7 +458,8 @@ int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data,
map_id_full = be64_to_cpu(cbe->map_ptr);
map_id = map_id_full;
- if (len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size)
+ if (size_add(pkt_size, data_size) > INT_MAX ||
+ len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size)
return -EINVAL;
if (cbe->hdr.ver != NFP_CCM_ABI_VERSION)
return -EINVAL;
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 739707a7b40f..8a3959bb2360 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -16,7 +16,6 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/ethtool.h>
-#include <linux/hwmon.h>
#include <linux/phy.h>
#include <linux/if_vlan.h>
#include <linux/in.h>
@@ -5347,43 +5346,6 @@ static bool rtl_aspm_is_safe(struct rtl8169_private *tp)
return false;
}
-static umode_t r8169_hwmon_is_visible(const void *drvdata,
- enum hwmon_sensor_types type,
- u32 attr, int channel)
-{
- return 0444;
-}
-
-static int r8169_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
- u32 attr, int channel, long *val)
-{
- struct rtl8169_private *tp = dev_get_drvdata(dev);
- int val_raw;
-
- val_raw = phy_read_paged(tp->phydev, 0xbd8, 0x12) & 0x3ff;
- if (val_raw >= 512)
- val_raw -= 1024;
-
- *val = 1000 * val_raw / 2;
-
- return 0;
-}
-
-static const struct hwmon_ops r8169_hwmon_ops = {
- .is_visible = r8169_hwmon_is_visible,
- .read = r8169_hwmon_read,
-};
-
-static const struct hwmon_channel_info * const r8169_hwmon_info[] = {
- HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT),
- NULL
-};
-
-static const struct hwmon_chip_info r8169_hwmon_chip_info = {
- .ops = &r8169_hwmon_ops,
- .info = r8169_hwmon_info,
-};
-
static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct rtl8169_private *tp;
@@ -5563,12 +5525,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
- /* The temperature sensor is available from RTl8125B */
- if (IS_REACHABLE(CONFIG_HWMON) && tp->mac_version >= RTL_GIGA_MAC_VER_63)
- /* ignore errors */
- devm_hwmon_device_register_with_info(&pdev->dev, "nic_temp", tp,
- &r8169_hwmon_chip_info,
- NULL);
rc = register_netdev(dev);
if (rc)
return rc;
diff --git a/drivers/net/ethernet/realtek/rtase/rtase_main.c b/drivers/net/ethernet/realtek/rtase/rtase_main.c
index de7f11232593..c42c0516656b 100644
--- a/drivers/net/ethernet/realtek/rtase/rtase_main.c
+++ b/drivers/net/ethernet/realtek/rtase/rtase_main.c
@@ -1827,7 +1827,7 @@ static int rtase_alloc_msix(struct pci_dev *pdev, struct rtase_private *tp)
for (i = 0; i < tp->int_nums; i++) {
irq = pci_irq_vector(pdev, i);
- if (!irq) {
+ if (irq < 0) {
pci_disable_msix(pdev);
return irq;
}
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index ac0f093f647a..bc395294a32d 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -2763,6 +2763,7 @@ static const struct ravb_hw_info ravb_rzv2m_hw_info = {
.net_features = NETIF_F_RXCSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
+ .tx_max_frame_size = SZ_2K,
.rx_max_frame_size = SZ_2K,
.rx_buffer_size = SZ_2K +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
diff --git a/drivers/net/ethernet/sfc/tc_conntrack.c b/drivers/net/ethernet/sfc/tc_conntrack.c
index d90206f27161..c0603f54cec3 100644
--- a/drivers/net/ethernet/sfc/tc_conntrack.c
+++ b/drivers/net/ethernet/sfc/tc_conntrack.c
@@ -16,7 +16,7 @@ static int efx_tc_flow_block(enum tc_setup_type type, void *type_data,
void *cb_priv);
static const struct rhashtable_params efx_tc_ct_zone_ht_params = {
- .key_len = offsetof(struct efx_tc_ct_zone, linkage),
+ .key_len = sizeof_field(struct efx_tc_ct_zone, zone),
.key_offset = 0,
.head_offset = offsetof(struct efx_tc_ct_zone, linkage),
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c
index 3827997d2132..dc903b846b1b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/iommu.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/module.h>
@@ -19,6 +20,8 @@ struct tegra_mgbe {
struct reset_control *rst_mac;
struct reset_control *rst_pcs;
+ u32 iommu_sid;
+
void __iomem *hv;
void __iomem *regs;
void __iomem *xpcs;
@@ -50,7 +53,6 @@ struct tegra_mgbe {
#define MGBE_WRAP_COMMON_INTR_ENABLE 0x8704
#define MAC_SBD_INTR BIT(2)
#define MGBE_WRAP_AXI_ASID0_CTRL 0x8400
-#define MGBE_SID 0x6
static int __maybe_unused tegra_mgbe_suspend(struct device *dev)
{
@@ -84,7 +86,7 @@ static int __maybe_unused tegra_mgbe_resume(struct device *dev)
writel(MAC_SBD_INTR, mgbe->regs + MGBE_WRAP_COMMON_INTR_ENABLE);
/* Program SID */
- writel(MGBE_SID, mgbe->hv + MGBE_WRAP_AXI_ASID0_CTRL);
+ writel(mgbe->iommu_sid, mgbe->hv + MGBE_WRAP_AXI_ASID0_CTRL);
value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_STATUS);
if ((value & XPCS_WRAP_UPHY_STATUS_TX_P_UP) == 0) {
@@ -241,6 +243,12 @@ static int tegra_mgbe_probe(struct platform_device *pdev)
if (IS_ERR(mgbe->xpcs))
return PTR_ERR(mgbe->xpcs);
+ /* get controller's stream id from iommu property in device tree */
+ if (!tegra_dev_iommu_get_stream_id(mgbe->dev, &mgbe->iommu_sid)) {
+ dev_err(mgbe->dev, "failed to get iommu stream id\n");
+ return -EINVAL;
+ }
+
res.addr = mgbe->regs;
res.irq = irq;
@@ -346,7 +354,7 @@ static int tegra_mgbe_probe(struct platform_device *pdev)
writel(MAC_SBD_INTR, mgbe->regs + MGBE_WRAP_COMMON_INTR_ENABLE);
/* Program SID */
- writel(MGBE_SID, mgbe->hv + MGBE_WRAP_AXI_ASID0_CTRL);
+ writel(mgbe->iommu_sid, mgbe->hv + MGBE_WRAP_AXI_ASID0_CTRL);
plat->flags |= STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 3ac32444e492..dc9884130b91 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -406,22 +406,6 @@ static int stmmac_of_get_mac_mode(struct device_node *np)
}
/**
- * stmmac_remove_config_dt - undo the effects of stmmac_probe_config_dt()
- * @pdev: platform_device structure
- * @plat: driver data platform structure
- *
- * Release resources claimed by stmmac_probe_config_dt().
- */
-static void stmmac_remove_config_dt(struct platform_device *pdev,
- struct plat_stmmacenet_data *plat)
-{
- clk_disable_unprepare(plat->stmmac_clk);
- clk_disable_unprepare(plat->pclk);
- of_node_put(plat->phy_node);
- of_node_put(plat->mdio_node);
-}
-
-/**
* stmmac_probe_config_dt - parse device-tree driver parameters
* @pdev: platform_device structure
* @mac: MAC address to use
@@ -490,8 +474,10 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n");
rc = stmmac_mdio_setup(plat, np, &pdev->dev);
- if (rc)
- return ERR_PTR(rc);
+ if (rc) {
+ ret = ERR_PTR(rc);
+ goto error_put_phy;
+ }
of_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size);
@@ -581,8 +567,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
GFP_KERNEL);
if (!dma_cfg) {
- stmmac_remove_config_dt(pdev, plat);
- return ERR_PTR(-ENOMEM);
+ ret = ERR_PTR(-ENOMEM);
+ goto error_put_mdio;
}
plat->dma_cfg = dma_cfg;
@@ -610,8 +596,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
rc = stmmac_mtl_setup(pdev, plat);
if (rc) {
- stmmac_remove_config_dt(pdev, plat);
- return ERR_PTR(rc);
+ ret = ERR_PTR(rc);
+ goto error_put_mdio;
}
/* clock setup */
@@ -663,6 +649,10 @@ error_hw_init:
clk_disable_unprepare(plat->pclk);
error_pclk_get:
clk_disable_unprepare(plat->stmmac_clk);
+error_put_mdio:
+ of_node_put(plat->mdio_node);
+error_put_phy:
+ of_node_put(plat->phy_node);
return ret;
}
@@ -671,16 +661,17 @@ static void devm_stmmac_remove_config_dt(void *data)
{
struct plat_stmmacenet_data *plat = data;
- /* Platform data argument is unused */
- stmmac_remove_config_dt(NULL, plat);
+ clk_disable_unprepare(plat->stmmac_clk);
+ clk_disable_unprepare(plat->pclk);
+ of_node_put(plat->mdio_node);
+ of_node_put(plat->phy_node);
}
/**
* devm_stmmac_probe_config_dt
* @pdev: platform_device structure
* @mac: MAC address to use
- * Description: Devres variant of stmmac_probe_config_dt(). Does not require
- * the user to call stmmac_remove_config_dt() at driver detach.
+ * Description: Devres variant of stmmac_probe_config_dt().
*/
struct plat_stmmacenet_data *
devm_stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 14e1df721f2e..5465bf872734 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -3551,7 +3551,7 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
init_completion(&common->tdown_complete);
common->tx_ch_num = AM65_CPSW_DEFAULT_TX_CHNS;
common->rx_ch_num_flows = AM65_CPSW_DEFAULT_RX_CHN_FLOWS;
- common->pf_p0_rx_ptype_rrobin = false;
+ common->pf_p0_rx_ptype_rrobin = true;
common->default_vlan = 1;
common->ports = devm_kcalloc(dev, common->port_num,
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 52e4e350b734..5cc72a91f220 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -127,15 +127,15 @@ struct cpsw_ale_dev_id {
static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
{
- int idx, idx2;
+ int idx, idx2, index;
u32 hi_val = 0;
idx = start / 32;
idx2 = (start + bits - 1) / 32;
/* Check if bits to be fetched exceed a word */
if (idx != idx2) {
- idx2 = 2 - idx2; /* flip */
- hi_val = ale_entry[idx2] << ((idx2 * 32) - start);
+ index = 2 - idx2; /* flip */
+ hi_val = ale_entry[index] << ((idx2 * 32) - start);
}
start -= idx * 32;
idx = 2 - idx; /* flip */
@@ -145,16 +145,16 @@ static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits,
u32 value)
{
- int idx, idx2;
+ int idx, idx2, index;
value &= BITMASK(bits);
idx = start / 32;
idx2 = (start + bits - 1) / 32;
/* Check if bits to be set exceed a word */
if (idx != idx2) {
- idx2 = 2 - idx2; /* flip */
- ale_entry[idx2] &= ~(BITMASK(bits + start - (idx2 * 32)));
- ale_entry[idx2] |= (value >> ((idx2 * 32) - start));
+ index = 2 - idx2; /* flip */
+ ale_entry[index] &= ~(BITMASK(bits + start - (idx2 * 32)));
+ ale_entry[index] |= (value >> ((idx2 * 32) - start));
}
start -= idx * 32;
idx = 2 - idx; /* flip */
diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c
index 5d6d1cf78e93..768578c0d958 100644
--- a/drivers/net/ethernet/ti/icssg/icss_iep.c
+++ b/drivers/net/ethernet/ti/icssg/icss_iep.c
@@ -215,6 +215,9 @@ static void icss_iep_enable_shadow_mode(struct icss_iep *iep)
for (cmp = IEP_MIN_CMP; cmp < IEP_MAX_CMP; cmp++) {
regmap_update_bits(iep->map, ICSS_IEP_CMP_STAT_REG,
IEP_CMP_STATUS(cmp), IEP_CMP_STATUS(cmp));
+
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
+ IEP_CMP_CFG_CMP_EN(cmp), 0);
}
/* enable reset counter on CMP0 event */
@@ -780,6 +783,11 @@ int icss_iep_exit(struct icss_iep *iep)
}
icss_iep_disable(iep);
+ if (iep->pps_enabled)
+ icss_iep_pps_enable(iep, false);
+ else if (iep->perout_enabled)
+ icss_iep_perout_enable(iep, NULL, false);
+
return 0;
}
EXPORT_SYMBOL_GPL(icss_iep_exit);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_common.c b/drivers/net/ethernet/ti/icssg/icssg_common.c
index fdebeb2f84e0..74f0f200a89d 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_common.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_common.c
@@ -855,31 +855,6 @@ irqreturn_t prueth_rx_irq(int irq, void *dev_id)
}
EXPORT_SYMBOL_GPL(prueth_rx_irq);
-void prueth_emac_stop(struct prueth_emac *emac)
-{
- struct prueth *prueth = emac->prueth;
- int slice;
-
- switch (emac->port_id) {
- case PRUETH_PORT_MII0:
- slice = ICSS_SLICE0;
- break;
- case PRUETH_PORT_MII1:
- slice = ICSS_SLICE1;
- break;
- default:
- netdev_err(emac->ndev, "invalid port\n");
- return;
- }
-
- emac->fw_running = 0;
- if (!emac->is_sr1)
- rproc_shutdown(prueth->txpru[slice]);
- rproc_shutdown(prueth->rtu[slice]);
- rproc_shutdown(prueth->pru[slice]);
-}
-EXPORT_SYMBOL_GPL(prueth_emac_stop);
-
void prueth_cleanup_tx_ts(struct prueth_emac *emac)
{
int i;
diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.c b/drivers/net/ethernet/ti/icssg/icssg_config.c
index 5d2491c2943a..ddfd1c02a885 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_config.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_config.c
@@ -397,7 +397,7 @@ static int prueth_emac_buffer_setup(struct prueth_emac *emac)
return 0;
}
-static void icssg_init_emac_mode(struct prueth *prueth)
+void icssg_init_emac_mode(struct prueth *prueth)
{
/* When the device is configured as a bridge and it is being brought
* back to the emac mode, the host mac address has to be set as 0.
@@ -406,9 +406,6 @@ static void icssg_init_emac_mode(struct prueth *prueth)
int i;
u8 mac[ETH_ALEN] = { 0 };
- if (prueth->emacs_initialized)
- return;
-
/* Set VLAN TABLE address base */
regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, SMEM_VLAN_OFFSET_MASK,
addr << SMEM_VLAN_OFFSET);
@@ -423,15 +420,13 @@ static void icssg_init_emac_mode(struct prueth *prueth)
/* Clear host MAC address */
icssg_class_set_host_mac_addr(prueth->miig_rt, mac);
}
+EXPORT_SYMBOL_GPL(icssg_init_emac_mode);
-static void icssg_init_fw_offload_mode(struct prueth *prueth)
+void icssg_init_fw_offload_mode(struct prueth *prueth)
{
u32 addr = prueth->shram.pa + EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET;
int i;
- if (prueth->emacs_initialized)
- return;
-
/* Set VLAN TABLE address base */
regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, SMEM_VLAN_OFFSET_MASK,
addr << SMEM_VLAN_OFFSET);
@@ -448,6 +443,7 @@ static void icssg_init_fw_offload_mode(struct prueth *prueth)
icssg_class_set_host_mac_addr(prueth->miig_rt, prueth->hw_bridge_dev->dev_addr);
icssg_set_pvid(prueth, prueth->default_vlan, PRUETH_PORT_HOST);
}
+EXPORT_SYMBOL_GPL(icssg_init_fw_offload_mode);
int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice)
{
@@ -455,11 +451,6 @@ int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice)
struct icssg_flow_cfg __iomem *flow_cfg;
int ret;
- if (prueth->is_switch_mode || prueth->is_hsr_offload_mode)
- icssg_init_fw_offload_mode(prueth);
- else
- icssg_init_emac_mode(prueth);
-
memset_io(config, 0, TAS_GATE_MASK_LIST0);
icssg_miig_queues_init(prueth, slice);
@@ -786,3 +777,27 @@ void icssg_set_pvid(struct prueth *prueth, u8 vid, u8 port)
writel(pvid, prueth->shram.va + EMAC_ICSSG_SWITCH_PORT0_DEFAULT_VLAN_OFFSET);
}
EXPORT_SYMBOL_GPL(icssg_set_pvid);
+
+int emac_fdb_flow_id_updated(struct prueth_emac *emac)
+{
+ struct mgmt_cmd_rsp fdb_cmd_rsp = { 0 };
+ int slice = prueth_emac_slice(emac);
+ struct mgmt_cmd fdb_cmd = { 0 };
+ int ret;
+
+ fdb_cmd.header = ICSSG_FW_MGMT_CMD_HEADER;
+ fdb_cmd.type = ICSSG_FW_MGMT_FDB_CMD_TYPE_RX_FLOW;
+ fdb_cmd.seqnum = ++(emac->prueth->icssg_hwcmdseq);
+ fdb_cmd.param = 0;
+
+ fdb_cmd.param |= (slice << 4);
+ fdb_cmd.cmd_args[0] = 0;
+
+ ret = icssg_send_fdb_msg(emac, &fdb_cmd, &fdb_cmd_rsp);
+ if (ret)
+ return ret;
+
+ WARN_ON(fdb_cmd.seqnum != fdb_cmd_rsp.seqnum);
+ return fdb_cmd_rsp.status == 1 ? 0 : -EINVAL;
+}
+EXPORT_SYMBOL_GPL(emac_fdb_flow_id_updated);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.h b/drivers/net/ethernet/ti/icssg/icssg_config.h
index 92c2deaa3068..c884e9fa099e 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_config.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_config.h
@@ -55,6 +55,7 @@ struct icssg_rxq_ctx {
#define ICSSG_FW_MGMT_FDB_CMD_TYPE 0x03
#define ICSSG_FW_MGMT_CMD_TYPE 0x04
#define ICSSG_FW_MGMT_PKT 0x80000000
+#define ICSSG_FW_MGMT_FDB_CMD_TYPE_RX_FLOW 0x05
struct icssg_r30_cmd {
u32 cmd[4];
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index c568c84a032b..d76fe6d05e10 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -164,11 +164,26 @@ static struct icssg_firmwares icssg_emac_firmwares[] = {
}
};
-static int prueth_emac_start(struct prueth *prueth, struct prueth_emac *emac)
+static int prueth_start(struct rproc *rproc, const char *fw_name)
+{
+ int ret;
+
+ ret = rproc_set_firmware(rproc, fw_name);
+ if (ret)
+ return ret;
+ return rproc_boot(rproc);
+}
+
+static void prueth_shutdown(struct rproc *rproc)
+{
+ rproc_shutdown(rproc);
+}
+
+static int prueth_emac_start(struct prueth *prueth)
{
struct icssg_firmwares *firmwares;
struct device *dev = prueth->dev;
- int slice, ret;
+ int ret, slice;
if (prueth->is_switch_mode)
firmwares = icssg_switch_firmwares;
@@ -177,49 +192,126 @@ static int prueth_emac_start(struct prueth *prueth, struct prueth_emac *emac)
else
firmwares = icssg_emac_firmwares;
- slice = prueth_emac_slice(emac);
- if (slice < 0) {
- netdev_err(emac->ndev, "invalid port\n");
- return -EINVAL;
+ for (slice = 0; slice < PRUETH_NUM_MACS; slice++) {
+ ret = prueth_start(prueth->pru[slice], firmwares[slice].pru);
+ if (ret) {
+ dev_err(dev, "failed to boot PRU%d: %d\n", slice, ret);
+ goto unwind_slices;
+ }
+
+ ret = prueth_start(prueth->rtu[slice], firmwares[slice].rtu);
+ if (ret) {
+ dev_err(dev, "failed to boot RTU%d: %d\n", slice, ret);
+ rproc_shutdown(prueth->pru[slice]);
+ goto unwind_slices;
+ }
+
+ ret = prueth_start(prueth->txpru[slice], firmwares[slice].txpru);
+ if (ret) {
+ dev_err(dev, "failed to boot TX_PRU%d: %d\n", slice, ret);
+ rproc_shutdown(prueth->rtu[slice]);
+ rproc_shutdown(prueth->pru[slice]);
+ goto unwind_slices;
+ }
}
- ret = icssg_config(prueth, emac, slice);
- if (ret)
- return ret;
+ return 0;
- ret = rproc_set_firmware(prueth->pru[slice], firmwares[slice].pru);
- ret = rproc_boot(prueth->pru[slice]);
- if (ret) {
- dev_err(dev, "failed to boot PRU%d: %d\n", slice, ret);
- return -EINVAL;
+unwind_slices:
+ while (--slice >= 0) {
+ prueth_shutdown(prueth->txpru[slice]);
+ prueth_shutdown(prueth->rtu[slice]);
+ prueth_shutdown(prueth->pru[slice]);
}
- ret = rproc_set_firmware(prueth->rtu[slice], firmwares[slice].rtu);
- ret = rproc_boot(prueth->rtu[slice]);
- if (ret) {
- dev_err(dev, "failed to boot RTU%d: %d\n", slice, ret);
- goto halt_pru;
+ return ret;
+}
+
+static void prueth_emac_stop(struct prueth *prueth)
+{
+ int slice;
+
+ for (slice = 0; slice < PRUETH_NUM_MACS; slice++) {
+ prueth_shutdown(prueth->txpru[slice]);
+ prueth_shutdown(prueth->rtu[slice]);
+ prueth_shutdown(prueth->pru[slice]);
}
+}
+
+static int prueth_emac_common_start(struct prueth *prueth)
+{
+ struct prueth_emac *emac;
+ int ret = 0;
+ int slice;
+
+ if (!prueth->emac[ICSS_SLICE0] && !prueth->emac[ICSS_SLICE1])
+ return -EINVAL;
+
+ /* clear SMEM and MSMC settings for all slices */
+ memset_io(prueth->msmcram.va, 0, prueth->msmcram.size);
+ memset_io(prueth->shram.va, 0, ICSSG_CONFIG_OFFSET_SLICE1 * PRUETH_NUM_MACS);
+
+ icssg_class_default(prueth->miig_rt, ICSS_SLICE0, 0, false);
+ icssg_class_default(prueth->miig_rt, ICSS_SLICE1, 0, false);
+
+ if (prueth->is_switch_mode || prueth->is_hsr_offload_mode)
+ icssg_init_fw_offload_mode(prueth);
+ else
+ icssg_init_emac_mode(prueth);
+
+ for (slice = 0; slice < PRUETH_NUM_MACS; slice++) {
+ emac = prueth->emac[slice];
+ if (!emac)
+ continue;
+ ret = icssg_config(prueth, emac, slice);
+ if (ret)
+ goto disable_class;
+ }
+
+ ret = prueth_emac_start(prueth);
+ if (ret)
+ goto disable_class;
- ret = rproc_set_firmware(prueth->txpru[slice], firmwares[slice].txpru);
- ret = rproc_boot(prueth->txpru[slice]);
+ emac = prueth->emac[ICSS_SLICE0] ? prueth->emac[ICSS_SLICE0] :
+ prueth->emac[ICSS_SLICE1];
+ ret = icss_iep_init(emac->iep, &prueth_iep_clockops,
+ emac, IEP_DEFAULT_CYCLE_TIME_NS);
if (ret) {
- dev_err(dev, "failed to boot TX_PRU%d: %d\n", slice, ret);
- goto halt_rtu;
+ dev_err(prueth->dev, "Failed to initialize IEP module\n");
+ goto stop_pruss;
}
- emac->fw_running = 1;
return 0;
-halt_rtu:
- rproc_shutdown(prueth->rtu[slice]);
+stop_pruss:
+ prueth_emac_stop(prueth);
-halt_pru:
- rproc_shutdown(prueth->pru[slice]);
+disable_class:
+ icssg_class_disable(prueth->miig_rt, ICSS_SLICE0);
+ icssg_class_disable(prueth->miig_rt, ICSS_SLICE1);
return ret;
}
+static int prueth_emac_common_stop(struct prueth *prueth)
+{
+ struct prueth_emac *emac;
+
+ if (!prueth->emac[ICSS_SLICE0] && !prueth->emac[ICSS_SLICE1])
+ return -EINVAL;
+
+ icssg_class_disable(prueth->miig_rt, ICSS_SLICE0);
+ icssg_class_disable(prueth->miig_rt, ICSS_SLICE1);
+
+ prueth_emac_stop(prueth);
+
+ emac = prueth->emac[ICSS_SLICE0] ? prueth->emac[ICSS_SLICE0] :
+ prueth->emac[ICSS_SLICE1];
+ icss_iep_exit(emac->iep);
+
+ return 0;
+}
+
/* called back by PHY layer if there is change in link state of hw port*/
static void emac_adjust_link(struct net_device *ndev)
{
@@ -374,9 +466,6 @@ static void prueth_iep_settime(void *clockops_data, u64 ns)
u32 cycletime;
int timeout;
- if (!emac->fw_running)
- return;
-
sc_descp = emac->prueth->shram.va + TIMESYNC_FW_WC_SETCLOCK_DESC_OFFSET;
cycletime = IEP_DEFAULT_CYCLE_TIME_NS;
@@ -543,23 +632,17 @@ static int emac_ndo_open(struct net_device *ndev)
{
struct prueth_emac *emac = netdev_priv(ndev);
int ret, i, num_data_chn = emac->tx_ch_num;
+ struct icssg_flow_cfg __iomem *flow_cfg;
struct prueth *prueth = emac->prueth;
int slice = prueth_emac_slice(emac);
struct device *dev = prueth->dev;
int max_rx_flows;
int rx_flow;
- /* clear SMEM and MSMC settings for all slices */
- if (!prueth->emacs_initialized) {
- memset_io(prueth->msmcram.va, 0, prueth->msmcram.size);
- memset_io(prueth->shram.va, 0, ICSSG_CONFIG_OFFSET_SLICE1 * PRUETH_NUM_MACS);
- }
-
/* set h/w MAC as user might have re-configured */
ether_addr_copy(emac->mac_addr, ndev->dev_addr);
icssg_class_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
- icssg_class_default(prueth->miig_rt, slice, 0, false);
icssg_ft1_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
/* Notify the stack of the actual queue counts. */
@@ -597,18 +680,23 @@ static int emac_ndo_open(struct net_device *ndev)
goto cleanup_napi;
}
- /* reset and start PRU firmware */
- ret = prueth_emac_start(prueth, emac);
- if (ret)
- goto free_rx_irq;
+ if (!prueth->emacs_initialized) {
+ ret = prueth_emac_common_start(prueth);
+ if (ret)
+ goto free_rx_irq;
+ }
- icssg_mii_update_mtu(prueth->mii_rt, slice, ndev->max_mtu);
+ flow_cfg = emac->dram.va + ICSSG_CONFIG_OFFSET + PSI_L_REGULAR_FLOW_ID_BASE_OFFSET;
+ writew(emac->rx_flow_id_base, &flow_cfg->rx_base_flow);
+ ret = emac_fdb_flow_id_updated(emac);
- if (!prueth->emacs_initialized) {
- ret = icss_iep_init(emac->iep, &prueth_iep_clockops,
- emac, IEP_DEFAULT_CYCLE_TIME_NS);
+ if (ret) {
+ netdev_err(ndev, "Failed to update Rx Flow ID %d", ret);
+ goto stop;
}
+ icssg_mii_update_mtu(prueth->mii_rt, slice, ndev->max_mtu);
+
ret = request_threaded_irq(emac->tx_ts_irq, NULL, prueth_tx_ts_irq,
IRQF_ONESHOT, dev_name(dev), emac);
if (ret)
@@ -653,7 +741,8 @@ reset_rx_chn:
free_tx_ts_irq:
free_irq(emac->tx_ts_irq, emac);
stop:
- prueth_emac_stop(emac);
+ if (!prueth->emacs_initialized)
+ prueth_emac_common_stop(prueth);
free_rx_irq:
free_irq(emac->rx_chns.irq[rx_flow], emac);
cleanup_napi:
@@ -689,8 +778,6 @@ static int emac_ndo_stop(struct net_device *ndev)
if (ndev->phydev)
phy_stop(ndev->phydev);
- icssg_class_disable(prueth->miig_rt, prueth_emac_slice(emac));
-
if (emac->prueth->is_hsr_offload_mode)
__dev_mc_unsync(ndev, icssg_prueth_hsr_del_mcast);
else
@@ -728,11 +815,9 @@ static int emac_ndo_stop(struct net_device *ndev)
/* Destroying the queued work in ndo_stop() */
cancel_delayed_work_sync(&emac->stats_work);
- if (prueth->emacs_initialized == 1)
- icss_iep_exit(emac->iep);
-
/* stop PRUs */
- prueth_emac_stop(emac);
+ if (prueth->emacs_initialized == 1)
+ prueth_emac_common_stop(prueth);
free_irq(emac->tx_ts_irq, emac);
@@ -1053,10 +1138,11 @@ static void prueth_offload_fwd_mark_update(struct prueth *prueth)
}
}
-static void prueth_emac_restart(struct prueth *prueth)
+static int prueth_emac_restart(struct prueth *prueth)
{
struct prueth_emac *emac0 = prueth->emac[PRUETH_MAC0];
struct prueth_emac *emac1 = prueth->emac[PRUETH_MAC1];
+ int ret;
/* Detach the net_device for both PRUeth ports*/
if (netif_running(emac0->ndev))
@@ -1065,36 +1151,46 @@ static void prueth_emac_restart(struct prueth *prueth)
netif_device_detach(emac1->ndev);
/* Disable both PRUeth ports */
- icssg_set_port_state(emac0, ICSSG_EMAC_PORT_DISABLE);
- icssg_set_port_state(emac1, ICSSG_EMAC_PORT_DISABLE);
+ ret = icssg_set_port_state(emac0, ICSSG_EMAC_PORT_DISABLE);
+ ret |= icssg_set_port_state(emac1, ICSSG_EMAC_PORT_DISABLE);
+ if (ret)
+ return ret;
/* Stop both pru cores for both PRUeth ports*/
- prueth_emac_stop(emac0);
- prueth->emacs_initialized--;
- prueth_emac_stop(emac1);
- prueth->emacs_initialized--;
+ ret = prueth_emac_common_stop(prueth);
+ if (ret) {
+ dev_err(prueth->dev, "Failed to stop the firmwares");
+ return ret;
+ }
/* Start both pru cores for both PRUeth ports */
- prueth_emac_start(prueth, emac0);
- prueth->emacs_initialized++;
- prueth_emac_start(prueth, emac1);
- prueth->emacs_initialized++;
+ ret = prueth_emac_common_start(prueth);
+ if (ret) {
+ dev_err(prueth->dev, "Failed to start the firmwares");
+ return ret;
+ }
/* Enable forwarding for both PRUeth ports */
- icssg_set_port_state(emac0, ICSSG_EMAC_PORT_FORWARD);
- icssg_set_port_state(emac1, ICSSG_EMAC_PORT_FORWARD);
+ ret = icssg_set_port_state(emac0, ICSSG_EMAC_PORT_FORWARD);
+ ret |= icssg_set_port_state(emac1, ICSSG_EMAC_PORT_FORWARD);
/* Attache net_device for both PRUeth ports */
netif_device_attach(emac0->ndev);
netif_device_attach(emac1->ndev);
+
+ return ret;
}
static void icssg_change_mode(struct prueth *prueth)
{
struct prueth_emac *emac;
- int mac;
+ int mac, ret;
- prueth_emac_restart(prueth);
+ ret = prueth_emac_restart(prueth);
+ if (ret) {
+ dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process");
+ return;
+ }
for (mac = PRUETH_MAC0; mac < PRUETH_NUM_MACS; mac++) {
emac = prueth->emac[mac];
@@ -1173,13 +1269,18 @@ static void prueth_netdevice_port_unlink(struct net_device *ndev)
{
struct prueth_emac *emac = netdev_priv(ndev);
struct prueth *prueth = emac->prueth;
+ int ret;
prueth->br_members &= ~BIT(emac->port_id);
if (prueth->is_switch_mode) {
prueth->is_switch_mode = false;
emac->port_vlan = 0;
- prueth_emac_restart(prueth);
+ ret = prueth_emac_restart(prueth);
+ if (ret) {
+ dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process");
+ return;
+ }
}
prueth_offload_fwd_mark_update(prueth);
@@ -1228,6 +1329,7 @@ static void prueth_hsr_port_unlink(struct net_device *ndev)
struct prueth *prueth = emac->prueth;
struct prueth_emac *emac0;
struct prueth_emac *emac1;
+ int ret;
emac0 = prueth->emac[PRUETH_MAC0];
emac1 = prueth->emac[PRUETH_MAC1];
@@ -1238,7 +1340,11 @@ static void prueth_hsr_port_unlink(struct net_device *ndev)
emac0->port_vlan = 0;
emac1->port_vlan = 0;
prueth->hsr_dev = NULL;
- prueth_emac_restart(prueth);
+ ret = prueth_emac_restart(prueth);
+ if (ret) {
+ dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process");
+ return;
+ }
netdev_dbg(ndev, "Disabling HSR Offload mode\n");
}
}
@@ -1413,13 +1519,10 @@ static int prueth_probe(struct platform_device *pdev)
prueth->pa_stats = NULL;
}
- if (eth0_node) {
+ if (eth0_node || eth1_node) {
ret = prueth_get_cores(prueth, ICSS_SLICE0, false);
if (ret)
goto put_cores;
- }
-
- if (eth1_node) {
ret = prueth_get_cores(prueth, ICSS_SLICE1, false);
if (ret)
goto put_cores;
@@ -1618,14 +1721,12 @@ put_pruss:
pruss_put(prueth->pruss);
put_cores:
- if (eth1_node) {
- prueth_put_cores(prueth, ICSS_SLICE1);
- of_node_put(eth1_node);
- }
-
- if (eth0_node) {
+ if (eth0_node || eth1_node) {
prueth_put_cores(prueth, ICSS_SLICE0);
of_node_put(eth0_node);
+
+ prueth_put_cores(prueth, ICSS_SLICE1);
+ of_node_put(eth1_node);
}
return ret;
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
index f5c1d473e9f9..5473315ea204 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
@@ -140,7 +140,6 @@ struct prueth_rx_chn {
/* data for each emac port */
struct prueth_emac {
bool is_sr1;
- bool fw_running;
struct prueth *prueth;
struct net_device *ndev;
u8 mac_addr[6];
@@ -361,6 +360,8 @@ int icssg_set_port_state(struct prueth_emac *emac,
enum icssg_port_state_cmd state);
void icssg_config_set_speed(struct prueth_emac *emac);
void icssg_config_half_duplex(struct prueth_emac *emac);
+void icssg_init_emac_mode(struct prueth *prueth);
+void icssg_init_fw_offload_mode(struct prueth *prueth);
/* Buffer queue helpers */
int icssg_queue_pop(struct prueth *prueth, u8 queue);
@@ -377,6 +378,7 @@ void icssg_vtbl_modify(struct prueth_emac *emac, u8 vid, u8 port_mask,
u8 untag_mask, bool add);
u16 icssg_get_pvid(struct prueth_emac *emac);
void icssg_set_pvid(struct prueth *prueth, u8 vid, u8 port);
+int emac_fdb_flow_id_updated(struct prueth_emac *emac);
#define prueth_napi_to_tx_chn(pnapi) \
container_of(pnapi, struct prueth_tx_chn, napi_tx)
@@ -407,7 +409,6 @@ void emac_rx_timestamp(struct prueth_emac *emac,
struct sk_buff *skb, u32 *psdata);
enum netdev_tx icssg_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev);
irqreturn_t prueth_rx_irq(int irq, void *dev_id);
-void prueth_emac_stop(struct prueth_emac *emac);
void prueth_cleanup_tx_ts(struct prueth_emac *emac);
int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget);
int prueth_prepare_rx_chan(struct prueth_emac *emac,
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
index 5024f0647a0d..3dc86397c367 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
@@ -440,7 +440,6 @@ static int prueth_emac_start(struct prueth *prueth, struct prueth_emac *emac)
goto halt_pru;
}
- emac->fw_running = 1;
return 0;
halt_pru:
@@ -449,6 +448,29 @@ halt_pru:
return ret;
}
+static void prueth_emac_stop(struct prueth_emac *emac)
+{
+ struct prueth *prueth = emac->prueth;
+ int slice;
+
+ switch (emac->port_id) {
+ case PRUETH_PORT_MII0:
+ slice = ICSS_SLICE0;
+ break;
+ case PRUETH_PORT_MII1:
+ slice = ICSS_SLICE1;
+ break;
+ default:
+ netdev_err(emac->ndev, "invalid port\n");
+ return;
+ }
+
+ if (!emac->is_sr1)
+ rproc_shutdown(prueth->txpru[slice]);
+ rproc_shutdown(prueth->rtu[slice]);
+ rproc_shutdown(prueth->pru[slice]);
+}
+
/**
* emac_ndo_open - EMAC device open
* @ndev: network adapter device
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
index 1bf9c38e4125..deaf670c160e 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
@@ -334,27 +334,25 @@ int wx_host_interface_command(struct wx *wx, u32 *buffer,
status = read_poll_timeout(rd32, hicr, hicr & WX_MNG_MBOX_CTL_FWRDY, 1000,
timeout * 1000, false, wx, WX_MNG_MBOX_CTL);
+ buf[0] = rd32(wx, WX_MNG_MBOX);
+ if ((buf[0] & 0xff0000) >> 16 == 0x80) {
+ wx_err(wx, "Unknown FW command: 0x%x\n", buffer[0] & 0xff);
+ status = -EINVAL;
+ goto rel_out;
+ }
+
/* Check command completion */
if (status) {
- wx_dbg(wx, "Command has failed with no status valid.\n");
-
- buf[0] = rd32(wx, WX_MNG_MBOX);
- if ((buffer[0] & 0xff) != (~buf[0] >> 24)) {
- status = -EINVAL;
- goto rel_out;
- }
- if ((buf[0] & 0xff0000) >> 16 == 0x80) {
- wx_dbg(wx, "It's unknown cmd.\n");
- status = -EINVAL;
- goto rel_out;
- }
-
+ wx_err(wx, "Command has failed with no status valid.\n");
wx_dbg(wx, "write value:\n");
for (i = 0; i < dword_len; i++)
wx_dbg(wx, "%x ", buffer[i]);
wx_dbg(wx, "read value:\n");
for (i = 0; i < dword_len; i++)
wx_dbg(wx, "%x ", buf[i]);
+ wx_dbg(wx, "\ncheck: %x %x\n", buffer[0] & 0xff, ~buf[0] >> 24);
+
+ goto rel_out;
}
if (!return_data)
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 0f4b02fe6f85..ae743991117c 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -2056,6 +2056,12 @@ axienet_ethtools_set_coalesce(struct net_device *ndev,
return -EBUSY;
}
+ if (ecoalesce->rx_max_coalesced_frames > 255 ||
+ ecoalesce->tx_max_coalesced_frames > 255) {
+ NL_SET_ERR_MSG(extack, "frames must be less than 256");
+ return -EINVAL;
+ }
+
if (ecoalesce->rx_max_coalesced_frames)
lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
if (ecoalesce->rx_coalesce_usecs)
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 89a996ad8cd0..fbabada7d3ba 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -1524,8 +1524,8 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
goto out_encap;
}
- gn = net_generic(dev_net(dev), gtp_net_id);
- list_add_rcu(&gtp->list, &gn->gtp_dev_list);
+ gn = net_generic(src_net, gtp_net_id);
+ list_add(&gtp->list, &gn->gtp_dev_list);
dev->priv_destructor = gtp_destructor;
netdev_dbg(dev, "registered new GTP interface\n");
@@ -1551,7 +1551,7 @@ static void gtp_dellink(struct net_device *dev, struct list_head *head)
hlist_for_each_entry_safe(pctx, next, &gtp->tid_hash[i], hlist_tid)
pdp_context_delete(pctx);
- list_del_rcu(&gtp->list);
+ list_del(&gtp->list);
unregister_netdevice_queue(dev, head);
}
@@ -2271,16 +2271,19 @@ static int gtp_genl_dump_pdp(struct sk_buff *skb,
struct gtp_dev *last_gtp = (struct gtp_dev *)cb->args[2], *gtp;
int i, j, bucket = cb->args[0], skip = cb->args[1];
struct net *net = sock_net(skb->sk);
+ struct net_device *dev;
struct pdp_ctx *pctx;
- struct gtp_net *gn;
-
- gn = net_generic(net, gtp_net_id);
if (cb->args[4])
return 0;
rcu_read_lock();
- list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) {
+ for_each_netdev_rcu(net, dev) {
+ if (dev->rtnl_link_ops != &gtp_link_ops)
+ continue;
+
+ gtp = netdev_priv(dev);
+
if (last_gtp && last_gtp != gtp)
continue;
else
@@ -2475,9 +2478,14 @@ static void __net_exit gtp_net_exit_batch_rtnl(struct list_head *net_list,
list_for_each_entry(net, net_list, exit_list) {
struct gtp_net *gn = net_generic(net, gtp_net_id);
- struct gtp_dev *gtp;
+ struct gtp_dev *gtp, *gtp_next;
+ struct net_device *dev;
+
+ for_each_netdev(net, dev)
+ if (dev->rtnl_link_ops == &gtp_link_ops)
+ gtp_dellink(dev, dev_to_kill);
- list_for_each_entry(gtp, &gn->gtp_dev_list, list)
+ list_for_each_entry_safe(gtp, gtp_next, &gn->gtp_dev_list, list)
gtp_dellink(gtp->dev, dev_to_kill);
}
}
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index e685a7f946f0..753215ebc67c 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -3072,7 +3072,11 @@ static int ca8210_probe(struct spi_device *spi_device)
spi_set_drvdata(priv->spi, priv);
if (IS_ENABLED(CONFIG_IEEE802154_CA8210_DEBUGFS)) {
cascoda_api_upstream = ca8210_test_int_driver_write;
- ca8210_test_interface_init(priv);
+ ret = ca8210_test_interface_init(priv);
+ if (ret) {
+ dev_crit(&spi_device->dev, "ca8210_test_interface_init failed\n");
+ goto error;
+ }
} else {
cascoda_api_upstream = NULL;
}
diff --git a/drivers/net/mctp/mctp-i3c.c b/drivers/net/mctp/mctp-i3c.c
index 9adad59b8676..d247fe483c58 100644
--- a/drivers/net/mctp/mctp-i3c.c
+++ b/drivers/net/mctp/mctp-i3c.c
@@ -125,6 +125,8 @@ static int mctp_i3c_read(struct mctp_i3c_device *mi)
xfer.data.in = skb_put(skb, mi->mrl);
+ /* Make sure netif_rx() is read in the same order as i3c. */
+ mutex_lock(&mi->lock);
rc = i3c_device_do_priv_xfers(mi->i3c, &xfer, 1);
if (rc < 0)
goto err;
@@ -166,8 +168,10 @@ static int mctp_i3c_read(struct mctp_i3c_device *mi)
stats->rx_dropped++;
}
+ mutex_unlock(&mi->lock);
return 0;
err:
+ mutex_unlock(&mi->lock);
kfree_skb(skb);
return rc;
}
diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c
index 7246a910728d..3059435af596 100644
--- a/drivers/net/pcs/pcs-xpcs.c
+++ b/drivers/net/pcs/pcs-xpcs.c
@@ -684,7 +684,9 @@ static int xpcs_config_aneg_c37_sgmii(struct dw_xpcs *xpcs,
if (ret < 0)
return ret;
- mask = DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW;
+ val = 0;
+ mask = DW_VR_MII_DIG_CTRL1_2G5_EN | DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW;
+
if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED)
val = DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW;
diff --git a/drivers/net/pfcp.c b/drivers/net/pfcp.c
index 69434fd13f96..68d0d9e92a22 100644
--- a/drivers/net/pfcp.c
+++ b/drivers/net/pfcp.c
@@ -206,8 +206,8 @@ static int pfcp_newlink(struct net *net, struct net_device *dev,
goto exit_del_pfcp_sock;
}
- pn = net_generic(dev_net(dev), pfcp_net_id);
- list_add_rcu(&pfcp->list, &pn->pfcp_dev_list);
+ pn = net_generic(net, pfcp_net_id);
+ list_add(&pfcp->list, &pn->pfcp_dev_list);
netdev_dbg(dev, "registered new PFCP interface\n");
@@ -224,7 +224,7 @@ static void pfcp_dellink(struct net_device *dev, struct list_head *head)
{
struct pfcp_dev *pfcp = netdev_priv(dev);
- list_del_rcu(&pfcp->list);
+ list_del(&pfcp->list);
unregister_netdevice_queue(dev, head);
}
@@ -247,11 +247,16 @@ static int __net_init pfcp_net_init(struct net *net)
static void __net_exit pfcp_net_exit(struct net *net)
{
struct pfcp_net *pn = net_generic(net, pfcp_net_id);
- struct pfcp_dev *pfcp;
+ struct pfcp_dev *pfcp, *pfcp_next;
+ struct net_device *dev;
LIST_HEAD(list);
rtnl_lock();
- list_for_each_entry(pfcp, &pn->pfcp_dev_list, list)
+ for_each_netdev(net, dev)
+ if (dev->rtnl_link_ops == &pfcp_link_ops)
+ pfcp_dellink(dev, &list);
+
+ list_for_each_entry_safe(pfcp, pfcp_next, &pn->pfcp_dev_list, list)
pfcp_dellink(pfcp->dev, &list);
unregister_netdevice_many(&list);
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 3ef508840674..eeb33eb181ac 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -432,10 +432,12 @@ struct kszphy_ptp_priv {
struct kszphy_priv {
struct kszphy_ptp_priv ptp_priv;
const struct kszphy_type *type;
+ struct clk *clk;
int led_mode;
u16 vct_ctrl1000;
bool rmii_ref_clk_sel;
bool rmii_ref_clk_sel_val;
+ bool clk_enable;
u64 stats[ARRAY_SIZE(kszphy_hw_stats)];
};
@@ -2050,6 +2052,46 @@ static void kszphy_get_stats(struct phy_device *phydev,
data[i] = kszphy_get_stat(phydev, i);
}
+static void kszphy_enable_clk(struct phy_device *phydev)
+{
+ struct kszphy_priv *priv = phydev->priv;
+
+ if (!priv->clk_enable && priv->clk) {
+ clk_prepare_enable(priv->clk);
+ priv->clk_enable = true;
+ }
+}
+
+static void kszphy_disable_clk(struct phy_device *phydev)
+{
+ struct kszphy_priv *priv = phydev->priv;
+
+ if (priv->clk_enable && priv->clk) {
+ clk_disable_unprepare(priv->clk);
+ priv->clk_enable = false;
+ }
+}
+
+static int kszphy_generic_resume(struct phy_device *phydev)
+{
+ kszphy_enable_clk(phydev);
+
+ return genphy_resume(phydev);
+}
+
+static int kszphy_generic_suspend(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_suspend(phydev);
+ if (ret)
+ return ret;
+
+ kszphy_disable_clk(phydev);
+
+ return 0;
+}
+
static int kszphy_suspend(struct phy_device *phydev)
{
/* Disable PHY Interrupts */
@@ -2059,7 +2101,7 @@ static int kszphy_suspend(struct phy_device *phydev)
phydev->drv->config_intr(phydev);
}
- return genphy_suspend(phydev);
+ return kszphy_generic_suspend(phydev);
}
static void kszphy_parse_led_mode(struct phy_device *phydev)
@@ -2090,7 +2132,9 @@ static int kszphy_resume(struct phy_device *phydev)
{
int ret;
- genphy_resume(phydev);
+ ret = kszphy_generic_resume(phydev);
+ if (ret)
+ return ret;
/* After switching from power-down to normal mode, an internal global
* reset is automatically generated. Wait a minimum of 1 ms before
@@ -2112,6 +2156,24 @@ static int kszphy_resume(struct phy_device *phydev)
return 0;
}
+/* Because of errata DS80000700A, receiver error following software
+ * power down. Suspend and resume callbacks only disable and enable
+ * external rmii reference clock.
+ */
+static int ksz8041_resume(struct phy_device *phydev)
+{
+ kszphy_enable_clk(phydev);
+
+ return 0;
+}
+
+static int ksz8041_suspend(struct phy_device *phydev)
+{
+ kszphy_disable_clk(phydev);
+
+ return 0;
+}
+
static int ksz9477_resume(struct phy_device *phydev)
{
int ret;
@@ -2159,7 +2221,10 @@ static int ksz8061_resume(struct phy_device *phydev)
if (!(ret & BMCR_PDOWN))
return 0;
- genphy_resume(phydev);
+ ret = kszphy_generic_resume(phydev);
+ if (ret)
+ return ret;
+
usleep_range(1000, 2000);
/* Re-program the value after chip is reset. */
@@ -2177,6 +2242,11 @@ static int ksz8061_resume(struct phy_device *phydev)
return 0;
}
+static int ksz8061_suspend(struct phy_device *phydev)
+{
+ return kszphy_suspend(phydev);
+}
+
static int kszphy_probe(struct phy_device *phydev)
{
const struct kszphy_type *type = phydev->drv->driver_data;
@@ -2217,10 +2287,14 @@ static int kszphy_probe(struct phy_device *phydev)
} else if (!clk) {
/* unnamed clock from the generic ethernet-phy binding */
clk = devm_clk_get_optional_enabled(&phydev->mdio.dev, NULL);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
}
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ clk_disable_unprepare(clk);
+ priv->clk = clk;
+
if (ksz8041_fiber_mode(phydev))
phydev->port = PORT_FIBRE;
@@ -5290,6 +5364,21 @@ static int lan8841_probe(struct phy_device *phydev)
return 0;
}
+static int lan8804_resume(struct phy_device *phydev)
+{
+ return kszphy_resume(phydev);
+}
+
+static int lan8804_suspend(struct phy_device *phydev)
+{
+ return kszphy_generic_suspend(phydev);
+}
+
+static int lan8841_resume(struct phy_device *phydev)
+{
+ return kszphy_generic_resume(phydev);
+}
+
static int lan8841_suspend(struct phy_device *phydev)
{
struct kszphy_priv *priv = phydev->priv;
@@ -5298,7 +5387,7 @@ static int lan8841_suspend(struct phy_device *phydev)
if (ptp_priv->ptp_clock)
ptp_cancel_worker_sync(ptp_priv->ptp_clock);
- return genphy_suspend(phydev);
+ return kszphy_generic_suspend(phydev);
}
static struct phy_driver ksphy_driver[] = {
@@ -5358,9 +5447,8 @@ static struct phy_driver ksphy_driver[] = {
.get_sset_count = kszphy_get_sset_count,
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
- /* No suspend/resume callbacks because of errata DS80000700A,
- * receiver error following software power down.
- */
+ .suspend = ksz8041_suspend,
+ .resume = ksz8041_resume,
}, {
.phy_id = PHY_ID_KSZ8041RNLI,
.phy_id_mask = MICREL_PHY_ID_MASK,
@@ -5436,7 +5524,7 @@ static struct phy_driver ksphy_driver[] = {
.soft_reset = genphy_soft_reset,
.config_intr = kszphy_config_intr,
.handle_interrupt = kszphy_handle_interrupt,
- .suspend = kszphy_suspend,
+ .suspend = ksz8061_suspend,
.resume = ksz8061_resume,
}, {
.phy_id = PHY_ID_KSZ9021,
@@ -5507,8 +5595,8 @@ static struct phy_driver ksphy_driver[] = {
.get_sset_count = kszphy_get_sset_count,
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
- .suspend = genphy_suspend,
- .resume = kszphy_resume,
+ .suspend = lan8804_suspend,
+ .resume = lan8804_resume,
.config_intr = lan8804_config_intr,
.handle_interrupt = lan8804_handle_interrupt,
}, {
@@ -5526,7 +5614,7 @@ static struct phy_driver ksphy_driver[] = {
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
.suspend = lan8841_suspend,
- .resume = genphy_resume,
+ .resume = lan8841_resume,
.cable_test_start = lan8814_cable_test_start,
.cable_test_get_status = ksz886x_cable_test_get_status,
}, {
diff --git a/drivers/net/pse-pd/tps23881.c b/drivers/net/pse-pd/tps23881.c
index 5c4e88be46ee..8797ca1a8a21 100644
--- a/drivers/net/pse-pd/tps23881.c
+++ b/drivers/net/pse-pd/tps23881.c
@@ -64,15 +64,11 @@ static int tps23881_pi_enable(struct pse_controller_dev *pcdev, int id)
if (id >= TPS23881_MAX_CHANS)
return -ERANGE;
- ret = i2c_smbus_read_word_data(client, TPS23881_REG_PW_STATUS);
- if (ret < 0)
- return ret;
-
chan = priv->port[id].chan[0];
if (chan < 4)
- val = (u16)(ret | BIT(chan));
+ val = BIT(chan);
else
- val = (u16)(ret | BIT(chan + 4));
+ val = BIT(chan + 4);
if (priv->port[id].is_4p) {
chan = priv->port[id].chan[1];
@@ -100,15 +96,11 @@ static int tps23881_pi_disable(struct pse_controller_dev *pcdev, int id)
if (id >= TPS23881_MAX_CHANS)
return -ERANGE;
- ret = i2c_smbus_read_word_data(client, TPS23881_REG_PW_STATUS);
- if (ret < 0)
- return ret;
-
chan = priv->port[id].chan[0];
if (chan < 4)
- val = (u16)(ret | BIT(chan + 4));
+ val = BIT(chan + 4);
else
- val = (u16)(ret | BIT(chan + 8));
+ val = BIT(chan + 8);
if (priv->port[id].is_4p) {
chan = priv->port[id].chan[1];
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
index cd1fe8490ae5..1c43f283ac4a 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
@@ -161,6 +161,7 @@ const struct iwl_cfg_trans_params iwl_gl_trans_cfg = {
const char iwl_bz_name[] = "Intel(R) TBD Bz device";
const char iwl_fm_name[] = "Intel(R) Wi-Fi 7 BE201 320MHz";
+const char iwl_wh_name[] = "Intel(R) Wi-Fi 7 BE211 320MHz";
const char iwl_gl_name[] = "Intel(R) Wi-Fi 7 BE200 320MHz";
const char iwl_mtp_name[] = "Intel(R) Wi-Fi 7 BE202 160MHz";
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 34c91deca57b..17721bb47e25 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -545,6 +545,7 @@ extern const char iwl_ax231_name[];
extern const char iwl_ax411_name[];
extern const char iwl_bz_name[];
extern const char iwl_fm_name[];
+extern const char iwl_wh_name[];
extern const char iwl_gl_name[];
extern const char iwl_mtp_name[];
extern const char iwl_sc_name[];
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index f85c01e04ebf..7d973546c9fb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -2954,6 +2954,7 @@ static void iwl_mvm_query_set_freqs(struct iwl_mvm *mvm,
int idx)
{
int i;
+ int n_channels = 0;
if (fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) {
@@ -2962,7 +2963,7 @@ static void iwl_mvm_query_set_freqs(struct iwl_mvm *mvm,
for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN * 8; i++)
if (matches[idx].matching_channels[i / 8] & (BIT(i % 8)))
- match->channels[match->n_channels++] =
+ match->channels[n_channels++] =
mvm->nd_channels[i]->center_freq;
} else {
struct iwl_scan_offload_profile_match_v1 *matches =
@@ -2970,9 +2971,11 @@ static void iwl_mvm_query_set_freqs(struct iwl_mvm *mvm,
for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1 * 8; i++)
if (matches[idx].matching_channels[i / 8] & (BIT(i % 8)))
- match->channels[match->n_channels++] =
+ match->channels[n_channels++] =
mvm->nd_channels[i]->center_freq;
}
+ /* We may have ended up with fewer channels than we allocated. */
+ match->n_channels = n_channels;
}
/**
@@ -3053,6 +3056,8 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
GFP_KERNEL);
if (!net_detect || !n_matches)
goto out_report_nd;
+ net_detect->n_matches = n_matches;
+ n_matches = 0;
for_each_set_bit(i, &matched_profiles, mvm->n_nd_match_sets) {
struct cfg80211_wowlan_nd_match *match;
@@ -3066,8 +3071,9 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
GFP_KERNEL);
if (!match)
goto out_report_nd;
+ match->n_channels = n_channels;
- net_detect->matches[net_detect->n_matches++] = match;
+ net_detect->matches[n_matches++] = match;
/* We inverted the order of the SSIDs in the scan
* request, so invert the index here.
@@ -3082,6 +3088,8 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
iwl_mvm_query_set_freqs(mvm, d3_data->nd_results, match, i);
}
+ /* We may have fewer matches than we allocated. */
+ net_detect->n_matches = n_matches;
out_report_nd:
wakeup.net_detect = net_detect;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 805fb249a0c6..8fb2aa282242 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -1106,19 +1106,54 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = {
iwlax210_2ax_cfg_so_jf_b0, iwl9462_name),
/* Bz */
-/* FIXME: need to change the naming according to the actual CRF */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_bz, iwl_ax201_name),
+
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_bz, iwl_ax211_name),
+
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_bz, iwl_fm_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_bz, iwl_wh_name),
+
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_bz, iwl_ax201_name),
+
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_bz, iwl_ax211_name),
+
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_bz, iwl_fm_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_bz, iwl_wh_name),
+
/* Ga (Gl) */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
diff --git a/drivers/net/wireless/st/cw1200/cw1200_spi.c b/drivers/net/wireless/st/cw1200/cw1200_spi.c
index 862964a8cc87..52386dfb5f4a 100644
--- a/drivers/net/wireless/st/cw1200/cw1200_spi.c
+++ b/drivers/net/wireless/st/cw1200/cw1200_spi.c
@@ -442,8 +442,8 @@ static void cw1200_spi_disconnect(struct spi_device *func)
cw1200_core_release(self->core);
self->core = NULL;
}
+ cw1200_spi_off(self, dev_get_platdata(&func->dev));
}
- cw1200_spi_off(self, dev_get_platdata(&func->dev));
}
static int __maybe_unused cw1200_spi_suspend(struct device *dev)
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mmio.c b/drivers/net/wwan/iosm/iosm_ipc_mmio.c
index 63eb08c43c05..6764c13530b9 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_mmio.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_mmio.c
@@ -104,7 +104,7 @@ struct iosm_mmio *ipc_mmio_init(void __iomem *mmio, struct device *dev)
break;
msleep(20);
- } while (retries-- > 0);
+ } while (--retries > 0);
if (!retries) {
dev_err(ipc_mmio->dev, "invalid exec stage %X", stage);
diff --git a/drivers/net/wwan/t7xx/t7xx_state_monitor.c b/drivers/net/wwan/t7xx/t7xx_state_monitor.c
index 3931c7a13f5a..cbdbb91e8381 100644
--- a/drivers/net/wwan/t7xx/t7xx_state_monitor.c
+++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.c
@@ -104,14 +104,21 @@ void t7xx_fsm_broadcast_state(struct t7xx_fsm_ctl *ctl, enum md_state state)
fsm_state_notify(ctl->md, state);
}
+static void fsm_release_command(struct kref *ref)
+{
+ struct t7xx_fsm_command *cmd = container_of(ref, typeof(*cmd), refcnt);
+
+ kfree(cmd);
+}
+
static void fsm_finish_command(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd, int result)
{
if (cmd->flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) {
- *cmd->ret = result;
- complete_all(cmd->done);
+ cmd->result = result;
+ complete_all(&cmd->done);
}
- kfree(cmd);
+ kref_put(&cmd->refcnt, fsm_release_command);
}
static void fsm_del_kf_event(struct t7xx_fsm_event *event)
@@ -475,7 +482,6 @@ static int fsm_main_thread(void *data)
int t7xx_fsm_append_cmd(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_cmd_state cmd_id, unsigned int flag)
{
- DECLARE_COMPLETION_ONSTACK(done);
struct t7xx_fsm_command *cmd;
unsigned long flags;
int ret;
@@ -487,11 +493,13 @@ int t7xx_fsm_append_cmd(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_cmd_state cmd_id
INIT_LIST_HEAD(&cmd->entry);
cmd->cmd_id = cmd_id;
cmd->flag = flag;
+ kref_init(&cmd->refcnt);
if (flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) {
- cmd->done = &done;
- cmd->ret = &ret;
+ init_completion(&cmd->done);
+ kref_get(&cmd->refcnt);
}
+ kref_get(&cmd->refcnt);
spin_lock_irqsave(&ctl->command_lock, flags);
list_add_tail(&cmd->entry, &ctl->command_queue);
spin_unlock_irqrestore(&ctl->command_lock, flags);
@@ -501,11 +509,11 @@ int t7xx_fsm_append_cmd(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_cmd_state cmd_id
if (flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) {
unsigned long wait_ret;
- wait_ret = wait_for_completion_timeout(&done,
+ wait_ret = wait_for_completion_timeout(&cmd->done,
msecs_to_jiffies(FSM_CMD_TIMEOUT_MS));
- if (!wait_ret)
- return -ETIMEDOUT;
+ ret = wait_ret ? cmd->result : -ETIMEDOUT;
+ kref_put(&cmd->refcnt, fsm_release_command);
return ret;
}
diff --git a/drivers/net/wwan/t7xx/t7xx_state_monitor.h b/drivers/net/wwan/t7xx/t7xx_state_monitor.h
index 7b0a9baf488c..6e0601bb752e 100644
--- a/drivers/net/wwan/t7xx/t7xx_state_monitor.h
+++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.h
@@ -110,8 +110,9 @@ struct t7xx_fsm_command {
struct list_head entry;
enum t7xx_fsm_cmd_state cmd_id;
unsigned int flag;
- struct completion *done;
- int *ret;
+ struct completion done;
+ int result;
+ struct kref refcnt;
};
struct t7xx_fsm_notifier {
diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
index 4319ab50c10d..1de11b722f04 100644
--- a/drivers/nvme/host/apple.c
+++ b/drivers/nvme/host/apple.c
@@ -1251,7 +1251,6 @@ static int apple_nvme_alloc_tagsets(struct apple_nvme *anv)
anv->admin_tagset.timeout = NVME_ADMIN_TIMEOUT;
anv->admin_tagset.numa_node = NUMA_NO_NODE;
anv->admin_tagset.cmd_size = sizeof(struct apple_nvme_iod);
- anv->admin_tagset.flags = BLK_MQ_F_NO_SCHED;
anv->admin_tagset.driver_data = &anv->adminq;
ret = blk_mq_alloc_tag_set(&anv->admin_tagset);
@@ -1275,7 +1274,6 @@ static int apple_nvme_alloc_tagsets(struct apple_nvme *anv)
anv->tagset.timeout = NVME_IO_TIMEOUT;
anv->tagset.numa_node = NUMA_NO_NODE;
anv->tagset.cmd_size = sizeof(struct apple_nvme_iod);
- anv->tagset.flags = BLK_MQ_F_SHOULD_MERGE;
anv->tagset.driver_data = &anv->ioq;
ret = blk_mq_alloc_tag_set(&anv->tagset);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index a970168a3014..76b615d4d5b9 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -885,6 +885,12 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
return BLK_STS_OK;
}
+static void nvme_set_app_tag(struct request *req, struct nvme_command *cmnd)
+{
+ cmnd->rw.lbat = cpu_to_le16(bio_integrity(req->bio)->app_tag);
+ cmnd->rw.lbatm = cpu_to_le16(0xffff);
+}
+
static void nvme_set_ref_tag(struct nvme_ns *ns, struct nvme_command *cmnd,
struct request *req)
{
@@ -1017,18 +1023,17 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
control |= NVME_RW_PRINFO_PRACT;
}
- switch (ns->head->pi_type) {
- case NVME_NS_DPS_PI_TYPE3:
+ if (bio_integrity_flagged(req->bio, BIP_CHECK_GUARD))
control |= NVME_RW_PRINFO_PRCHK_GUARD;
- break;
- case NVME_NS_DPS_PI_TYPE1:
- case NVME_NS_DPS_PI_TYPE2:
- control |= NVME_RW_PRINFO_PRCHK_GUARD |
- NVME_RW_PRINFO_PRCHK_REF;
+ if (bio_integrity_flagged(req->bio, BIP_CHECK_REFTAG)) {
+ control |= NVME_RW_PRINFO_PRCHK_REF;
if (op == nvme_cmd_zone_append)
control |= NVME_RW_APPEND_PIREMAP;
nvme_set_ref_tag(ns, cmnd, req);
- break;
+ }
+ if (bio_integrity_flagged(req->bio, BIP_CHECK_APPTAG)) {
+ control |= NVME_RW_PRINFO_PRCHK_APP;
+ nvme_set_app_tag(req, cmnd);
}
}
@@ -2002,6 +2007,7 @@ static void nvme_update_atomic_write_disk_info(struct nvme_ns *ns,
lim->atomic_write_hw_boundary = boundary;
lim->atomic_write_hw_unit_min = bs;
lim->atomic_write_hw_unit_max = rounddown_pow_of_two(atomic_bs);
+ lim->features |= BLK_FEAT_ATOMIC_WRITES;
}
static u32 nvme_max_drv_segments(struct nvme_ctrl *ctrl)
@@ -2128,9 +2134,10 @@ static int nvme_update_ns_info_generic(struct nvme_ns *ns,
struct queue_limits lim;
int ret;
- blk_mq_freeze_queue(ns->disk->queue);
lim = queue_limits_start_update(ns->disk->queue);
nvme_set_ctrl_limits(ns->ctrl, &lim);
+
+ blk_mq_freeze_queue(ns->disk->queue);
ret = queue_limits_commit_update(ns->disk->queue, &lim);
set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info));
blk_mq_unfreeze_queue(ns->disk->queue);
@@ -2177,12 +2184,12 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
goto out;
}
+ lim = queue_limits_start_update(ns->disk->queue);
+
blk_mq_freeze_queue(ns->disk->queue);
ns->head->lba_shift = id->lbaf[lbaf].ds;
ns->head->nuse = le64_to_cpu(id->nuse);
capacity = nvme_lba_to_sect(ns->head, le64_to_cpu(id->nsze));
-
- lim = queue_limits_start_update(ns->disk->queue);
nvme_set_ctrl_limits(ns->ctrl, &lim);
nvme_configure_metadata(ns->ctrl, ns->head, id, nvm, info);
nvme_set_chunk_sectors(ns, id, &lim);
@@ -2285,6 +2292,7 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info)
struct queue_limits *ns_lim = &ns->disk->queue->limits;
struct queue_limits lim;
+ lim = queue_limits_start_update(ns->head->disk->queue);
blk_mq_freeze_queue(ns->head->disk->queue);
/*
* queue_limits mixes values that are the hardware limitations
@@ -2301,7 +2309,6 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info)
* the splitting limits in to make sure we still obey possibly
* lower limitations of other controllers.
*/
- lim = queue_limits_start_update(ns->head->disk->queue);
lim.logical_block_size = ns_lim->logical_block_size;
lim.physical_block_size = ns_lim->physical_block_size;
lim.io_min = ns_lim->io_min;
@@ -3092,7 +3099,7 @@ int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi,
struct nvme_effects_log **log)
{
- struct nvme_effects_log *cel = xa_load(&ctrl->cels, csi);
+ struct nvme_effects_log *old, *cel = xa_load(&ctrl->cels, csi);
int ret;
if (cel)
@@ -3109,7 +3116,11 @@ static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi,
return ret;
}
- xa_store(&ctrl->cels, csi, cel, GFP_KERNEL);
+ old = xa_store(&ctrl->cels, csi, cel, GFP_KERNEL);
+ if (xa_is_err(old)) {
+ kfree(cel);
+ return xa_err(old);
+ }
out:
*log = cel;
return 0;
@@ -3171,6 +3182,25 @@ free_data:
return ret;
}
+static int nvme_init_effects_log(struct nvme_ctrl *ctrl,
+ u8 csi, struct nvme_effects_log **log)
+{
+ struct nvme_effects_log *effects, *old;
+
+ effects = kzalloc(sizeof(*effects), GFP_KERNEL);
+ if (!effects)
+ return -ENOMEM;
+
+ old = xa_store(&ctrl->cels, csi, effects, GFP_KERNEL);
+ if (xa_is_err(old)) {
+ kfree(effects);
+ return xa_err(old);
+ }
+
+ *log = effects;
+ return 0;
+}
+
static void nvme_init_known_nvm_effects(struct nvme_ctrl *ctrl)
{
struct nvme_effects_log *log = ctrl->effects;
@@ -3217,10 +3247,9 @@ static int nvme_init_effects(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
}
if (!ctrl->effects) {
- ctrl->effects = kzalloc(sizeof(*ctrl->effects), GFP_KERNEL);
- if (!ctrl->effects)
- return -ENOMEM;
- xa_store(&ctrl->cels, NVME_CSI_NVM, ctrl->effects, GFP_KERNEL);
+ ret = nvme_init_effects_log(ctrl, NVME_CSI_NVM, &ctrl->effects);
+ if (ret < 0)
+ return ret;
}
nvme_init_known_nvm_effects(ctrl);
@@ -4564,7 +4593,6 @@ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
/* Reserved for fabric connect and keep alive */
set->reserved_tags = 2;
set->numa_node = ctrl->numa_node;
- set->flags = BLK_MQ_F_NO_SCHED;
if (ctrl->ops->flags & NVME_F_BLOCKING)
set->flags |= BLK_MQ_F_BLOCKING;
set->cmd_size = cmd_size;
@@ -4639,7 +4667,6 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
/* Reserved for fabric connect */
set->reserved_tags = 1;
set->numa_node = ctrl->numa_node;
- set->flags = BLK_MQ_F_SHOULD_MERGE;
if (ctrl->ops->flags & NVME_F_BLOCKING)
set->flags |= BLK_MQ_F_BLOCKING;
set->cmd_size = cmd_size;
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index b81af7919e94..094be164ffdc 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -16,7 +16,6 @@
#include <linux/nvme-fc.h>
#include "fc.h"
#include <scsi/scsi_transport_fc.h>
-#include <linux/blk-mq-pci.h>
/* *************************** Data Structures/Defines ****************** */
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 611b02c8a8b3..7be92d07430e 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -173,6 +173,11 @@ enum nvme_quirks {
* MSI (but not MSI-X) interrupts are broken and never fire.
*/
NVME_QUIRK_BROKEN_MSI = (1 << 21),
+
+ /*
+ * Align dma pool segment size to 512 bytes
+ */
+ NVME_QUIRK_DMAPOOL_ALIGN_512 = (1 << 22),
};
/*
@@ -1182,43 +1187,4 @@ static inline bool nvme_multi_css(struct nvme_ctrl *ctrl)
return (ctrl->ctrl_config & NVME_CC_CSS_MASK) == NVME_CC_CSS_CSI;
}
-#ifdef CONFIG_NVME_VERBOSE_ERRORS
-const char *nvme_get_error_status_str(u16 status);
-const char *nvme_get_opcode_str(u8 opcode);
-const char *nvme_get_admin_opcode_str(u8 opcode);
-const char *nvme_get_fabrics_opcode_str(u8 opcode);
-#else /* CONFIG_NVME_VERBOSE_ERRORS */
-static inline const char *nvme_get_error_status_str(u16 status)
-{
- return "I/O Error";
-}
-static inline const char *nvme_get_opcode_str(u8 opcode)
-{
- return "I/O Cmd";
-}
-static inline const char *nvme_get_admin_opcode_str(u8 opcode)
-{
- return "Admin Cmd";
-}
-
-static inline const char *nvme_get_fabrics_opcode_str(u8 opcode)
-{
- return "Fabrics Cmd";
-}
-#endif /* CONFIG_NVME_VERBOSE_ERRORS */
-
-static inline const char *nvme_opcode_str(int qid, u8 opcode)
-{
- return qid ? nvme_get_opcode_str(opcode) :
- nvme_get_admin_opcode_str(opcode);
-}
-
-static inline const char *nvme_fabrics_opcode_str(
- int qid, const struct nvme_command *cmd)
-{
- if (nvme_is_fabrics(cmd))
- return nvme_get_fabrics_opcode_str(cmd->fabrics.fctype);
-
- return nvme_opcode_str(qid, cmd->common.opcode);
-}
#endif /* _NVME_H */
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 1a5ba80f1811..278bed4e35bb 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -8,7 +8,6 @@
#include <linux/async.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
-#include <linux/blk-mq-pci.h>
#include <linux/blk-integrity.h>
#include <linux/dmi.h>
#include <linux/init.h>
@@ -373,7 +372,7 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, __le32 *dbbuf_db,
/*
* Ensure that the doorbell is updated before reading the event
* index from memory. The controller needs to provide similar
- * ordering to ensure the envent index is updated before reading
+ * ordering to ensure the event index is updated before reading
* the doorbell.
*/
mb();
@@ -463,7 +462,7 @@ static void nvme_pci_map_queues(struct blk_mq_tag_set *set)
*/
map->queue_offset = qoff;
if (i != HCTX_TYPE_POLL && offset)
- blk_mq_pci_map_queues(map, to_pci_dev(dev->dev), offset);
+ blk_mq_map_hw_queues(map, dev->dev, offset);
else
blk_mq_map_queues(map);
qoff += map->nr_queues;
@@ -1148,13 +1147,13 @@ static inline void nvme_update_cq_head(struct nvme_queue *nvmeq)
}
}
-static inline int nvme_poll_cq(struct nvme_queue *nvmeq,
- struct io_comp_batch *iob)
+static inline bool nvme_poll_cq(struct nvme_queue *nvmeq,
+ struct io_comp_batch *iob)
{
- int found = 0;
+ bool found = false;
while (nvme_cqe_pending(nvmeq)) {
- found++;
+ found = true;
/*
* load-load control dependency between phase and the rest of
* the cqe requires a full read memory barrier
@@ -2086,8 +2085,8 @@ static int nvme_alloc_host_mem_single(struct nvme_dev *dev, u64 size)
sizeof(*dev->host_mem_descs), &dev->host_mem_descs_dma,
GFP_KERNEL);
if (!dev->host_mem_descs) {
- dma_free_noncontiguous(dev->dev, dev->host_mem_size,
- dev->hmb_sgt, DMA_BIDIRECTIONAL);
+ dma_free_noncontiguous(dev->dev, size, dev->hmb_sgt,
+ DMA_BIDIRECTIONAL);
dev->hmb_sgt = NULL;
return -ENOMEM;
}
@@ -2834,15 +2833,20 @@ static int nvme_disable_prepare_reset(struct nvme_dev *dev, bool shutdown)
static int nvme_setup_prp_pools(struct nvme_dev *dev)
{
+ size_t small_align = 256;
+
dev->prp_page_pool = dma_pool_create("prp list page", dev->dev,
NVME_CTRL_PAGE_SIZE,
NVME_CTRL_PAGE_SIZE, 0);
if (!dev->prp_page_pool)
return -ENOMEM;
+ if (dev->ctrl.quirks & NVME_QUIRK_DMAPOOL_ALIGN_512)
+ small_align = 512;
+
/* Optimisation for I/Os between 4k and 128k */
dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev,
- 256, 256, 0);
+ 256, small_align, 0);
if (!dev->prp_small_pool) {
dma_pool_destroy(dev->prp_page_pool);
return -ENOMEM;
@@ -3607,7 +3611,7 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1217, 0x8760), /* O2 Micro 64GB Steam Deck */
- .driver_data = NVME_QUIRK_QDEPTH_ONE },
+ .driver_data = NVME_QUIRK_DMAPOOL_ALIGN_512, },
{ PCI_DEVICE(0x126f, 0x2262), /* Silicon Motion generic */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS |
NVME_QUIRK_BOGUS_NID, },
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 28c76a3e1bd2..841238f38fdd 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -54,6 +54,8 @@ MODULE_PARM_DESC(tls_handshake_timeout,
"nvme TLS handshake timeout in seconds (default 10)");
#endif
+static atomic_t nvme_tcp_cpu_queues[NR_CPUS];
+
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/* lockdep can detect a circular dependency of the form
* sk_lock -> mmap_lock (page fault) -> fs locks -> sk_lock
@@ -127,6 +129,7 @@ enum nvme_tcp_queue_flags {
NVME_TCP_Q_ALLOCATED = 0,
NVME_TCP_Q_LIVE = 1,
NVME_TCP_Q_POLLING = 2,
+ NVME_TCP_Q_IO_CPU_SET = 3,
};
enum nvme_tcp_recv_state {
@@ -1562,23 +1565,56 @@ static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue)
ctrl->io_queues[HCTX_TYPE_POLL];
}
+/**
+ * Track the number of queues assigned to each cpu using a global per-cpu
+ * counter and select the least used cpu from the mq_map. Our goal is to spread
+ * different controllers I/O threads across different cpu cores.
+ *
+ * Note that the accounting is not 100% perfect, but we don't need to be, we're
+ * simply putting our best effort to select the best candidate cpu core that we
+ * find at any given point.
+ */
static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
{
struct nvme_tcp_ctrl *ctrl = queue->ctrl;
- int qid = nvme_tcp_queue_id(queue);
- int n = 0;
+ struct blk_mq_tag_set *set = &ctrl->tag_set;
+ int qid = nvme_tcp_queue_id(queue) - 1;
+ unsigned int *mq_map = NULL;
+ int cpu, min_queues = INT_MAX, io_cpu;
+
+ if (wq_unbound)
+ goto out;
if (nvme_tcp_default_queue(queue))
- n = qid - 1;
+ mq_map = set->map[HCTX_TYPE_DEFAULT].mq_map;
else if (nvme_tcp_read_queue(queue))
- n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1;
+ mq_map = set->map[HCTX_TYPE_READ].mq_map;
else if (nvme_tcp_poll_queue(queue))
- n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] -
- ctrl->io_queues[HCTX_TYPE_READ] - 1;
- if (wq_unbound)
- queue->io_cpu = WORK_CPU_UNBOUND;
- else
- queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
+ mq_map = set->map[HCTX_TYPE_POLL].mq_map;
+
+ if (WARN_ON(!mq_map))
+ goto out;
+
+ /* Search for the least used cpu from the mq_map */
+ io_cpu = WORK_CPU_UNBOUND;
+ for_each_online_cpu(cpu) {
+ int num_queues = atomic_read(&nvme_tcp_cpu_queues[cpu]);
+
+ if (mq_map[cpu] != qid)
+ continue;
+ if (num_queues < min_queues) {
+ io_cpu = cpu;
+ min_queues = num_queues;
+ }
+ }
+ if (io_cpu != WORK_CPU_UNBOUND) {
+ queue->io_cpu = io_cpu;
+ atomic_inc(&nvme_tcp_cpu_queues[io_cpu]);
+ set_bit(NVME_TCP_Q_IO_CPU_SET, &queue->flags);
+ }
+out:
+ dev_dbg(ctrl->ctrl.device, "queue %d: using cpu %d\n",
+ qid, queue->io_cpu);
}
static void nvme_tcp_tls_done(void *data, int status, key_serial_t pskid)
@@ -1722,7 +1758,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid,
queue->sock->sk->sk_allocation = GFP_ATOMIC;
queue->sock->sk->sk_use_task_frag = false;
- nvme_tcp_set_queue_io_cpu(queue);
+ queue->io_cpu = WORK_CPU_UNBOUND;
queue->request = NULL;
queue->data_remaining = 0;
queue->ddgst_remaining = 0;
@@ -1844,6 +1880,9 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
return;
+ if (test_and_clear_bit(NVME_TCP_Q_IO_CPU_SET, &queue->flags))
+ atomic_dec(&nvme_tcp_cpu_queues[queue->io_cpu]);
+
mutex_lock(&queue->queue_lock);
if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
__nvme_tcp_stop_queue(queue);
@@ -1878,9 +1917,10 @@ static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
nvme_tcp_init_recv_ctx(queue);
nvme_tcp_setup_sock_ops(queue);
- if (idx)
+ if (idx) {
+ nvme_tcp_set_queue_io_cpu(queue);
ret = nvmf_connect_io_queue(nctrl, idx);
- else
+ } else
ret = nvmf_connect_admin_queue(nctrl);
if (!ret) {
@@ -2024,14 +2064,6 @@ static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
return __nvme_tcp_alloc_io_queues(ctrl);
}
-static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
-{
- nvme_tcp_stop_io_queues(ctrl);
- if (remove)
- nvme_remove_io_tag_set(ctrl);
- nvme_tcp_free_io_queues(ctrl);
-}
-
static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
{
int ret, nr_queues;
@@ -2176,9 +2208,11 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
nvme_sync_io_queues(ctrl);
nvme_tcp_stop_io_queues(ctrl);
nvme_cancel_tagset(ctrl);
- if (remove)
+ if (remove) {
nvme_unquiesce_io_queues(ctrl);
- nvme_tcp_destroy_io_queues(ctrl, remove);
+ nvme_remove_io_tag_set(ctrl);
+ }
+ nvme_tcp_free_io_queues(ctrl);
}
static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl,
@@ -2267,7 +2301,9 @@ destroy_io:
nvme_sync_io_queues(ctrl);
nvme_tcp_stop_io_queues(ctrl);
nvme_cancel_tagset(ctrl);
- nvme_tcp_destroy_io_queues(ctrl, new);
+ if (new)
+ nvme_remove_io_tag_set(ctrl);
+ nvme_tcp_free_io_queues(ctrl);
}
destroy_admin:
nvme_stop_keep_alive(ctrl);
@@ -2849,6 +2885,7 @@ static struct nvmf_transport_ops nvme_tcp_transport = {
static int __init nvme_tcp_init_module(void)
{
unsigned int wq_flags = WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_SYSFS;
+ int cpu;
BUILD_BUG_ON(sizeof(struct nvme_tcp_hdr) != 8);
BUILD_BUG_ON(sizeof(struct nvme_tcp_cmd_pdu) != 72);
@@ -2866,6 +2903,9 @@ static int __init nvme_tcp_init_module(void)
if (!nvme_tcp_wq)
return -ENOMEM;
+ for_each_possible_cpu(cpu)
+ atomic_set(&nvme_tcp_cpu_queues[cpu], 0);
+
nvmf_register_transport(&nvme_tcp_transport);
return 0;
}
diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig
index 46be031f91b4..fb7446d6d682 100644
--- a/drivers/nvme/target/Kconfig
+++ b/drivers/nvme/target/Kconfig
@@ -115,3 +115,14 @@ config NVME_TARGET_AUTH
target side.
If unsure, say N.
+
+config NVME_TARGET_PCI_EPF
+ tristate "NVMe PCI Endpoint Function target support"
+ depends on NVME_TARGET && PCI_ENDPOINT
+ depends on NVME_CORE=y || NVME_CORE=NVME_TARGET
+ help
+ This enables the NVMe PCI Endpoint Function target driver support,
+ which allows creating a NVMe PCI controller using an endpoint mode
+ capable PCI controller.
+
+ If unsure, say N.
diff --git a/drivers/nvme/target/Makefile b/drivers/nvme/target/Makefile
index f2b025bbe10c..ed8522911d1f 100644
--- a/drivers/nvme/target/Makefile
+++ b/drivers/nvme/target/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_NVME_TARGET_RDMA) += nvmet-rdma.o
obj-$(CONFIG_NVME_TARGET_FC) += nvmet-fc.o
obj-$(CONFIG_NVME_TARGET_FCLOOP) += nvme-fcloop.o
obj-$(CONFIG_NVME_TARGET_TCP) += nvmet-tcp.o
+obj-$(CONFIG_NVME_TARGET_PCI_EPF) += nvmet-pci-epf.o
nvmet-y += core.o configfs.o admin-cmd.o fabrics-cmd.o \
discovery.o io-cmd-file.o io-cmd-bdev.o pr.o
@@ -20,4 +21,5 @@ nvmet-rdma-y += rdma.o
nvmet-fc-y += fc.o
nvme-fcloop-y += fcloop.o
nvmet-tcp-y += tcp.o
+nvmet-pci-epf-y += pci-epf.o
nvmet-$(CONFIG_TRACING) += trace.o
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 2962794ce881..e670dc185a96 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -12,6 +12,142 @@
#include <linux/unaligned.h>
#include "nvmet.h"
+static void nvmet_execute_delete_sq(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ u16 sqid = le16_to_cpu(req->cmd->delete_queue.qid);
+ u16 status;
+
+ if (!nvmet_is_pci_ctrl(ctrl)) {
+ status = nvmet_report_invalid_opcode(req);
+ goto complete;
+ }
+
+ if (!sqid) {
+ status = NVME_SC_QID_INVALID | NVME_STATUS_DNR;
+ goto complete;
+ }
+
+ status = nvmet_check_sqid(ctrl, sqid, false);
+ if (status != NVME_SC_SUCCESS)
+ goto complete;
+
+ status = ctrl->ops->delete_sq(ctrl, sqid);
+
+complete:
+ nvmet_req_complete(req, status);
+}
+
+static void nvmet_execute_create_sq(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvme_command *cmd = req->cmd;
+ u16 sqid = le16_to_cpu(cmd->create_sq.sqid);
+ u16 cqid = le16_to_cpu(cmd->create_sq.cqid);
+ u16 sq_flags = le16_to_cpu(cmd->create_sq.sq_flags);
+ u16 qsize = le16_to_cpu(cmd->create_sq.qsize);
+ u64 prp1 = le64_to_cpu(cmd->create_sq.prp1);
+ u16 status;
+
+ if (!nvmet_is_pci_ctrl(ctrl)) {
+ status = nvmet_report_invalid_opcode(req);
+ goto complete;
+ }
+
+ if (!sqid) {
+ status = NVME_SC_QID_INVALID | NVME_STATUS_DNR;
+ goto complete;
+ }
+
+ status = nvmet_check_sqid(ctrl, sqid, true);
+ if (status != NVME_SC_SUCCESS)
+ goto complete;
+
+ /*
+ * Note: The NVMe specification allows multiple SQs to use the same CQ.
+ * However, the target code does not really support that. So for now,
+ * prevent this and fail the command if sqid and cqid are different.
+ */
+ if (!cqid || cqid != sqid) {
+ pr_err("SQ %u: Unsupported CQID %u\n", sqid, cqid);
+ status = NVME_SC_CQ_INVALID | NVME_STATUS_DNR;
+ goto complete;
+ }
+
+ if (!qsize || qsize > NVME_CAP_MQES(ctrl->cap)) {
+ status = NVME_SC_QUEUE_SIZE | NVME_STATUS_DNR;
+ goto complete;
+ }
+
+ status = ctrl->ops->create_sq(ctrl, sqid, sq_flags, qsize, prp1);
+
+complete:
+ nvmet_req_complete(req, status);
+}
+
+static void nvmet_execute_delete_cq(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ u16 cqid = le16_to_cpu(req->cmd->delete_queue.qid);
+ u16 status;
+
+ if (!nvmet_is_pci_ctrl(ctrl)) {
+ status = nvmet_report_invalid_opcode(req);
+ goto complete;
+ }
+
+ if (!cqid) {
+ status = NVME_SC_QID_INVALID | NVME_STATUS_DNR;
+ goto complete;
+ }
+
+ status = nvmet_check_cqid(ctrl, cqid);
+ if (status != NVME_SC_SUCCESS)
+ goto complete;
+
+ status = ctrl->ops->delete_cq(ctrl, cqid);
+
+complete:
+ nvmet_req_complete(req, status);
+}
+
+static void nvmet_execute_create_cq(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvme_command *cmd = req->cmd;
+ u16 cqid = le16_to_cpu(cmd->create_cq.cqid);
+ u16 cq_flags = le16_to_cpu(cmd->create_cq.cq_flags);
+ u16 qsize = le16_to_cpu(cmd->create_cq.qsize);
+ u16 irq_vector = le16_to_cpu(cmd->create_cq.irq_vector);
+ u64 prp1 = le64_to_cpu(cmd->create_cq.prp1);
+ u16 status;
+
+ if (!nvmet_is_pci_ctrl(ctrl)) {
+ status = nvmet_report_invalid_opcode(req);
+ goto complete;
+ }
+
+ if (!cqid) {
+ status = NVME_SC_QID_INVALID | NVME_STATUS_DNR;
+ goto complete;
+ }
+
+ status = nvmet_check_cqid(ctrl, cqid);
+ if (status != NVME_SC_SUCCESS)
+ goto complete;
+
+ if (!qsize || qsize > NVME_CAP_MQES(ctrl->cap)) {
+ status = NVME_SC_QUEUE_SIZE | NVME_STATUS_DNR;
+ goto complete;
+ }
+
+ status = ctrl->ops->create_cq(ctrl, cqid, cq_flags, qsize,
+ prp1, irq_vector);
+
+complete:
+ nvmet_req_complete(req, status);
+}
+
u32 nvmet_get_log_page_len(struct nvme_command *cmd)
{
u32 len = le16_to_cpu(cmd->get_log_page.numdu);
@@ -139,7 +275,7 @@ static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
unsigned long idx;
ctrl = req->sq->ctrl;
- xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
+ nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
/* we don't have the right data for file backed ns */
if (!ns->bdev)
continue;
@@ -230,8 +366,18 @@ out:
nvmet_req_complete(req, status);
}
-static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log)
+static void nvmet_get_cmd_effects_admin(struct nvmet_ctrl *ctrl,
+ struct nvme_effects_log *log)
{
+ /* For a PCI target controller, advertize support for the . */
+ if (nvmet_is_pci_ctrl(ctrl)) {
+ log->acs[nvme_admin_delete_sq] =
+ log->acs[nvme_admin_create_sq] =
+ log->acs[nvme_admin_delete_cq] =
+ log->acs[nvme_admin_create_cq] =
+ cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
+ }
+
log->acs[nvme_admin_get_log_page] =
log->acs[nvme_admin_identify] =
log->acs[nvme_admin_abort_cmd] =
@@ -240,7 +386,10 @@ static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log)
log->acs[nvme_admin_async_event] =
log->acs[nvme_admin_keep_alive] =
cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
+}
+static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log)
+{
log->iocs[nvme_cmd_read] =
log->iocs[nvme_cmd_flush] =
log->iocs[nvme_cmd_dsm] =
@@ -265,6 +414,7 @@ static void nvmet_get_cmd_effects_zns(struct nvme_effects_log *log)
static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
struct nvme_effects_log *log;
u16 status = NVME_SC_SUCCESS;
@@ -276,6 +426,7 @@ static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
switch (req->cmd->get_log_page.csi) {
case NVME_CSI_NVM:
+ nvmet_get_cmd_effects_admin(ctrl, log);
nvmet_get_cmd_effects_nvm(log);
break;
case NVME_CSI_ZNS:
@@ -283,6 +434,7 @@ static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
status = NVME_SC_INVALID_IO_CMD_SET;
goto free;
}
+ nvmet_get_cmd_effects_admin(ctrl, log);
nvmet_get_cmd_effects_nvm(log);
nvmet_get_cmd_effects_zns(log);
break;
@@ -331,9 +483,10 @@ static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
u32 count = 0;
if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
- xa_for_each(&ctrl->subsys->namespaces, idx, ns)
+ nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
if (ns->anagrpid == grpid)
desc->nsids[count++] = cpu_to_le32(ns->nsid);
+ }
}
desc->grpid = cpu_to_le32(grpid);
@@ -507,7 +660,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
struct nvmet_ctrl *ctrl = req->sq->ctrl;
struct nvmet_subsys *subsys = ctrl->subsys;
struct nvme_id_ctrl *id;
- u32 cmd_capsule_size;
+ u32 cmd_capsule_size, ctratt;
u16 status = 0;
if (!subsys->subsys_discovered) {
@@ -522,9 +675,8 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
goto out;
}
- /* XXX: figure out how to assign real vendors IDs. */
- id->vid = 0;
- id->ssvid = 0;
+ id->vid = cpu_to_le16(subsys->vendor_id);
+ id->ssvid = cpu_to_le16(subsys->subsys_vendor_id);
memcpy(id->sn, ctrl->subsys->serial, NVMET_SN_MAX_SIZE);
memcpy_and_pad(id->mn, sizeof(id->mn), subsys->model_number,
@@ -556,8 +708,10 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
/* XXX: figure out what to do about RTD3R/RTD3 */
id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL);
- id->ctratt = cpu_to_le32(NVME_CTRL_ATTR_HID_128_BIT |
- NVME_CTRL_ATTR_TBKAS);
+ ctratt = NVME_CTRL_ATTR_HID_128_BIT | NVME_CTRL_ATTR_TBKAS;
+ if (nvmet_is_pci_ctrl(ctrl))
+ ctratt |= NVME_CTRL_ATTR_RHII;
+ id->ctratt = cpu_to_le32(ctratt);
id->oacs = 0;
@@ -772,7 +926,7 @@ static void nvmet_execute_identify_endgrp_list(struct nvmet_req *req)
goto out;
}
- xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
+ nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
if (ns->nsid <= min_endgid)
continue;
@@ -815,7 +969,7 @@ static void nvmet_execute_identify_nslist(struct nvmet_req *req, bool match_css)
goto out;
}
- xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
+ nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
if (ns->nsid <= min_nsid)
continue;
if (match_css && req->ns->csi != req->cmd->identify.csi)
@@ -1104,6 +1258,92 @@ u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
return 0;
}
+static u16 nvmet_set_feat_host_id(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+
+ if (!nvmet_is_pci_ctrl(ctrl))
+ return NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR;
+
+ /*
+ * The NVMe base specifications v2.1 recommends supporting 128-bits host
+ * IDs (section 5.1.25.1.28.1). However, that same section also says
+ * that "The controller may support a 64-bit Host Identifier and/or an
+ * extended 128-bit Host Identifier". So simplify this support and do
+ * not support 64-bits host IDs to avoid needing to check that all
+ * controllers associated with the same subsystem all use the same host
+ * ID size.
+ */
+ if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
+ req->error_loc = offsetof(struct nvme_common_command, cdw11);
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ }
+
+ return nvmet_copy_from_sgl(req, 0, &req->sq->ctrl->hostid,
+ sizeof(req->sq->ctrl->hostid));
+}
+
+static u16 nvmet_set_feat_irq_coalesce(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
+ struct nvmet_feat_irq_coalesce irqc = {
+ .time = (cdw11 >> 8) & 0xff,
+ .thr = cdw11 & 0xff,
+ };
+
+ /*
+ * This feature is not supported for fabrics controllers and mandatory
+ * for PCI controllers.
+ */
+ if (!nvmet_is_pci_ctrl(ctrl)) {
+ req->error_loc = offsetof(struct nvme_common_command, cdw10);
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ }
+
+ return ctrl->ops->set_feature(ctrl, NVME_FEAT_IRQ_COALESCE, &irqc);
+}
+
+static u16 nvmet_set_feat_irq_config(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
+ struct nvmet_feat_irq_config irqcfg = {
+ .iv = cdw11 & 0xffff,
+ .cd = (cdw11 >> 16) & 0x1,
+ };
+
+ /*
+ * This feature is not supported for fabrics controllers and mandatory
+ * for PCI controllers.
+ */
+ if (!nvmet_is_pci_ctrl(ctrl)) {
+ req->error_loc = offsetof(struct nvme_common_command, cdw10);
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ }
+
+ return ctrl->ops->set_feature(ctrl, NVME_FEAT_IRQ_CONFIG, &irqcfg);
+}
+
+static u16 nvmet_set_feat_arbitration(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
+ struct nvmet_feat_arbitration arb = {
+ .hpw = (cdw11 >> 24) & 0xff,
+ .mpw = (cdw11 >> 16) & 0xff,
+ .lpw = (cdw11 >> 8) & 0xff,
+ .ab = cdw11 & 0x3,
+ };
+
+ if (!ctrl->ops->set_feature) {
+ req->error_loc = offsetof(struct nvme_common_command, cdw10);
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ }
+
+ return ctrl->ops->set_feature(ctrl, NVME_FEAT_ARBITRATION, &arb);
+}
+
void nvmet_execute_set_features(struct nvmet_req *req)
{
struct nvmet_subsys *subsys = nvmet_req_subsys(req);
@@ -1117,6 +1357,9 @@ void nvmet_execute_set_features(struct nvmet_req *req)
return;
switch (cdw10 & 0xff) {
+ case NVME_FEAT_ARBITRATION:
+ status = nvmet_set_feat_arbitration(req);
+ break;
case NVME_FEAT_NUM_QUEUES:
ncqr = (cdw11 >> 16) & 0xffff;
nsqr = cdw11 & 0xffff;
@@ -1127,6 +1370,12 @@ void nvmet_execute_set_features(struct nvmet_req *req)
nvmet_set_result(req,
(subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
break;
+ case NVME_FEAT_IRQ_COALESCE:
+ status = nvmet_set_feat_irq_coalesce(req);
+ break;
+ case NVME_FEAT_IRQ_CONFIG:
+ status = nvmet_set_feat_irq_config(req);
+ break;
case NVME_FEAT_KATO:
status = nvmet_set_feat_kato(req);
break;
@@ -1134,7 +1383,7 @@ void nvmet_execute_set_features(struct nvmet_req *req)
status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL);
break;
case NVME_FEAT_HOST_ID:
- status = NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR;
+ status = nvmet_set_feat_host_id(req);
break;
case NVME_FEAT_WRITE_PROTECT:
status = nvmet_set_feat_write_protect(req);
@@ -1171,6 +1420,79 @@ static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
return 0;
}
+static u16 nvmet_get_feat_irq_coalesce(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmet_feat_irq_coalesce irqc = { };
+ u16 status;
+
+ /*
+ * This feature is not supported for fabrics controllers and mandatory
+ * for PCI controllers.
+ */
+ if (!nvmet_is_pci_ctrl(ctrl)) {
+ req->error_loc = offsetof(struct nvme_common_command, cdw10);
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ }
+
+ status = ctrl->ops->get_feature(ctrl, NVME_FEAT_IRQ_COALESCE, &irqc);
+ if (status != NVME_SC_SUCCESS)
+ return status;
+
+ nvmet_set_result(req, ((u32)irqc.time << 8) | (u32)irqc.thr);
+
+ return NVME_SC_SUCCESS;
+}
+
+static u16 nvmet_get_feat_irq_config(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ u32 iv = le32_to_cpu(req->cmd->common.cdw11) & 0xffff;
+ struct nvmet_feat_irq_config irqcfg = { .iv = iv };
+ u16 status;
+
+ /*
+ * This feature is not supported for fabrics controllers and mandatory
+ * for PCI controllers.
+ */
+ if (!nvmet_is_pci_ctrl(ctrl)) {
+ req->error_loc = offsetof(struct nvme_common_command, cdw10);
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ }
+
+ status = ctrl->ops->get_feature(ctrl, NVME_FEAT_IRQ_CONFIG, &irqcfg);
+ if (status != NVME_SC_SUCCESS)
+ return status;
+
+ nvmet_set_result(req, ((u32)irqcfg.cd << 16) | iv);
+
+ return NVME_SC_SUCCESS;
+}
+
+static u16 nvmet_get_feat_arbitration(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmet_feat_arbitration arb = { };
+ u16 status;
+
+ if (!ctrl->ops->get_feature) {
+ req->error_loc = offsetof(struct nvme_common_command, cdw10);
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ }
+
+ status = ctrl->ops->get_feature(ctrl, NVME_FEAT_ARBITRATION, &arb);
+ if (status != NVME_SC_SUCCESS)
+ return status;
+
+ nvmet_set_result(req,
+ ((u32)arb.hpw << 24) |
+ ((u32)arb.mpw << 16) |
+ ((u32)arb.lpw << 8) |
+ (arb.ab & 0x3));
+
+ return NVME_SC_SUCCESS;
+}
+
void nvmet_get_feat_kato(struct nvmet_req *req)
{
nvmet_set_result(req, req->sq->ctrl->kato * 1000);
@@ -1197,21 +1519,24 @@ void nvmet_execute_get_features(struct nvmet_req *req)
* need to come up with some fake values for these.
*/
#if 0
- case NVME_FEAT_ARBITRATION:
- break;
case NVME_FEAT_POWER_MGMT:
break;
case NVME_FEAT_TEMP_THRESH:
break;
case NVME_FEAT_ERR_RECOVERY:
break;
+ case NVME_FEAT_WRITE_ATOMIC:
+ break;
+#endif
+ case NVME_FEAT_ARBITRATION:
+ status = nvmet_get_feat_arbitration(req);
+ break;
case NVME_FEAT_IRQ_COALESCE:
+ status = nvmet_get_feat_irq_coalesce(req);
break;
case NVME_FEAT_IRQ_CONFIG:
+ status = nvmet_get_feat_irq_config(req);
break;
- case NVME_FEAT_WRITE_ATOMIC:
- break;
-#endif
case NVME_FEAT_ASYNC_EVENT:
nvmet_get_feat_async_event(req);
break;
@@ -1292,6 +1617,27 @@ out:
nvmet_req_complete(req, status);
}
+u32 nvmet_admin_cmd_data_len(struct nvmet_req *req)
+{
+ struct nvme_command *cmd = req->cmd;
+
+ if (nvme_is_fabrics(cmd))
+ return nvmet_fabrics_admin_cmd_data_len(req);
+ if (nvmet_is_disc_subsys(nvmet_req_subsys(req)))
+ return nvmet_discovery_cmd_data_len(req);
+
+ switch (cmd->common.opcode) {
+ case nvme_admin_get_log_page:
+ return nvmet_get_log_page_len(cmd);
+ case nvme_admin_identify:
+ return NVME_IDENTIFY_DATA_SIZE;
+ case nvme_admin_get_features:
+ return nvmet_feat_data_len(req, le32_to_cpu(cmd->common.cdw10));
+ default:
+ return 0;
+ }
+}
+
u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
{
struct nvme_command *cmd = req->cmd;
@@ -1306,13 +1652,30 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
if (unlikely(ret))
return ret;
+ /* For PCI controllers, admin commands shall not use SGL. */
+ if (nvmet_is_pci_ctrl(req->sq->ctrl) && !req->sq->qid &&
+ cmd->common.flags & NVME_CMD_SGL_ALL)
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+
if (nvmet_is_passthru_req(req))
return nvmet_parse_passthru_admin_cmd(req);
switch (cmd->common.opcode) {
+ case nvme_admin_delete_sq:
+ req->execute = nvmet_execute_delete_sq;
+ return 0;
+ case nvme_admin_create_sq:
+ req->execute = nvmet_execute_create_sq;
+ return 0;
case nvme_admin_get_log_page:
req->execute = nvmet_execute_get_log_page;
return 0;
+ case nvme_admin_delete_cq:
+ req->execute = nvmet_execute_delete_cq;
+ return 0;
+ case nvme_admin_create_cq:
+ req->execute = nvmet_execute_create_cq;
+ return 0;
case nvme_admin_identify:
req->execute = nvmet_execute_identify;
return 0;
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index eeee9e9b854c..e44ef69dffc2 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -37,6 +37,7 @@ static struct nvmet_type_name_map nvmet_transport[] = {
{ NVMF_TRTYPE_RDMA, "rdma" },
{ NVMF_TRTYPE_FC, "fc" },
{ NVMF_TRTYPE_TCP, "tcp" },
+ { NVMF_TRTYPE_PCI, "pci" },
{ NVMF_TRTYPE_LOOP, "loop" },
};
@@ -46,6 +47,7 @@ static const struct nvmet_type_name_map nvmet_addr_family[] = {
{ NVMF_ADDR_FAMILY_IP6, "ipv6" },
{ NVMF_ADDR_FAMILY_IB, "ib" },
{ NVMF_ADDR_FAMILY_FC, "fc" },
+ { NVMF_ADDR_FAMILY_PCI, "pci" },
{ NVMF_ADDR_FAMILY_LOOP, "loop" },
};
@@ -810,18 +812,6 @@ static struct configfs_attribute *nvmet_ns_attrs[] = {
NULL,
};
-bool nvmet_subsys_nsid_exists(struct nvmet_subsys *subsys, u32 nsid)
-{
- struct config_item *ns_item;
- char name[12];
-
- snprintf(name, sizeof(name), "%u", nsid);
- mutex_lock(&subsys->namespaces_group.cg_subsys->su_mutex);
- ns_item = config_group_find_item(&subsys->namespaces_group, name);
- mutex_unlock(&subsys->namespaces_group.cg_subsys->su_mutex);
- return ns_item != NULL;
-}
-
static void nvmet_ns_release(struct config_item *item)
{
struct nvmet_ns *ns = to_nvmet_ns(item);
@@ -1412,6 +1402,49 @@ out_unlock:
}
CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_max);
+static ssize_t nvmet_subsys_attr_vendor_id_show(struct config_item *item,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "0x%x\n", to_subsys(item)->vendor_id);
+}
+
+static ssize_t nvmet_subsys_attr_vendor_id_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ u16 vid;
+
+ if (kstrtou16(page, 0, &vid))
+ return -EINVAL;
+
+ down_write(&nvmet_config_sem);
+ to_subsys(item)->vendor_id = vid;
+ up_write(&nvmet_config_sem);
+ return count;
+}
+CONFIGFS_ATTR(nvmet_subsys_, attr_vendor_id);
+
+static ssize_t nvmet_subsys_attr_subsys_vendor_id_show(struct config_item *item,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "0x%x\n",
+ to_subsys(item)->subsys_vendor_id);
+}
+
+static ssize_t nvmet_subsys_attr_subsys_vendor_id_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ u16 ssvid;
+
+ if (kstrtou16(page, 0, &ssvid))
+ return -EINVAL;
+
+ down_write(&nvmet_config_sem);
+ to_subsys(item)->subsys_vendor_id = ssvid;
+ up_write(&nvmet_config_sem);
+ return count;
+}
+CONFIGFS_ATTR(nvmet_subsys_, attr_subsys_vendor_id);
+
static ssize_t nvmet_subsys_attr_model_show(struct config_item *item,
char *page)
{
@@ -1640,6 +1673,8 @@ static struct configfs_attribute *nvmet_subsys_attrs[] = {
&nvmet_subsys_attr_attr_serial,
&nvmet_subsys_attr_attr_cntlid_min,
&nvmet_subsys_attr_attr_cntlid_max,
+ &nvmet_subsys_attr_attr_vendor_id,
+ &nvmet_subsys_attr_attr_subsys_vendor_id,
&nvmet_subsys_attr_attr_model,
&nvmet_subsys_attr_attr_qid_max,
&nvmet_subsys_attr_attr_ieee_oui,
@@ -1794,6 +1829,7 @@ static struct config_group *nvmet_referral_make(
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&port->entry);
+ port->disc_addr.trtype = NVMF_TRTYPE_MAX;
config_group_init_type_name(&port->group, name, &nvmet_referral_type);
return &port->group;
@@ -2019,6 +2055,7 @@ static struct config_group *nvmet_ports_make(struct config_group *group,
port->inline_data_size = -1; /* < 0 == let the transport choose */
port->max_queue_size = -1; /* < 0 == let the transport choose */
+ port->disc_addr.trtype = NVMF_TRTYPE_MAX;
port->disc_addr.portid = cpu_to_le16(portid);
port->disc_addr.adrfam = NVMF_ADDR_FAMILY_MAX;
port->disc_addr.treq = NVMF_TREQ_DISABLE_SQFLOW;
@@ -2254,12 +2291,17 @@ static ssize_t nvmet_root_discovery_nqn_store(struct config_item *item,
const char *page, size_t count)
{
struct list_head *entry;
+ char *old_nqn, *new_nqn;
size_t len;
len = strcspn(page, "\n");
if (!len || len > NVMF_NQN_FIELD_LEN - 1)
return -EINVAL;
+ new_nqn = kstrndup(page, len, GFP_KERNEL);
+ if (!new_nqn)
+ return -ENOMEM;
+
down_write(&nvmet_config_sem);
list_for_each(entry, &nvmet_subsystems_group.cg_children) {
struct config_item *item =
@@ -2268,13 +2310,15 @@ static ssize_t nvmet_root_discovery_nqn_store(struct config_item *item,
if (!strncmp(config_item_name(item), page, len)) {
pr_err("duplicate NQN %s\n", config_item_name(item));
up_write(&nvmet_config_sem);
+ kfree(new_nqn);
return -EINVAL;
}
}
- memset(nvmet_disc_subsys->subsysnqn, 0, NVMF_NQN_FIELD_LEN);
- memcpy(nvmet_disc_subsys->subsysnqn, page, len);
+ old_nqn = nvmet_disc_subsys->subsysnqn;
+ nvmet_disc_subsys->subsysnqn = new_nqn;
up_write(&nvmet_config_sem);
+ kfree(old_nqn);
return len;
}
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 1f4e9989663b..cdc4a09a6e8a 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -127,7 +127,7 @@ static u32 nvmet_max_nsid(struct nvmet_subsys *subsys)
unsigned long idx;
u32 nsid = 0;
- xa_for_each(&subsys->namespaces, idx, cur)
+ nvmet_for_each_enabled_ns(&subsys->namespaces, idx, cur)
nsid = cur->nsid;
return nsid;
@@ -441,11 +441,14 @@ u16 nvmet_req_find_ns(struct nvmet_req *req)
struct nvmet_subsys *subsys = nvmet_req_subsys(req);
req->ns = xa_load(&subsys->namespaces, nsid);
- if (unlikely(!req->ns)) {
+ if (unlikely(!req->ns || !req->ns->enabled)) {
req->error_loc = offsetof(struct nvme_common_command, nsid);
- if (nvmet_subsys_nsid_exists(subsys, nsid))
- return NVME_SC_INTERNAL_PATH_ERROR;
- return NVME_SC_INVALID_NS | NVME_STATUS_DNR;
+ if (!req->ns) /* ns doesn't exist! */
+ return NVME_SC_INVALID_NS | NVME_STATUS_DNR;
+
+ /* ns exists but it's disabled */
+ req->ns = NULL;
+ return NVME_SC_INTERNAL_PATH_ERROR;
}
percpu_ref_get(&req->ns->ref);
@@ -583,8 +586,6 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
goto out_unlock;
ret = -EMFILE;
- if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES)
- goto out_unlock;
ret = nvmet_bdev_ns_enable(ns);
if (ret == -ENOTBLK)
@@ -599,38 +600,19 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
nvmet_p2pmem_ns_add_p2p(ctrl, ns);
- ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace,
- 0, GFP_KERNEL);
- if (ret)
- goto out_dev_put;
-
- if (ns->nsid > subsys->max_nsid)
- subsys->max_nsid = ns->nsid;
-
- ret = xa_insert(&subsys->namespaces, ns->nsid, ns, GFP_KERNEL);
- if (ret)
- goto out_restore_subsys_maxnsid;
-
if (ns->pr.enable) {
ret = nvmet_pr_init_ns(ns);
if (ret)
- goto out_remove_from_subsys;
+ goto out_dev_put;
}
- subsys->nr_namespaces++;
-
nvmet_ns_changed(subsys, ns->nsid);
ns->enabled = true;
+ xa_set_mark(&subsys->namespaces, ns->nsid, NVMET_NS_ENABLED);
ret = 0;
out_unlock:
mutex_unlock(&subsys->lock);
return ret;
-
-out_remove_from_subsys:
- xa_erase(&subsys->namespaces, ns->nsid);
-out_restore_subsys_maxnsid:
- subsys->max_nsid = nvmet_max_nsid(subsys);
- percpu_ref_exit(&ns->ref);
out_dev_put:
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
@@ -649,15 +631,37 @@ void nvmet_ns_disable(struct nvmet_ns *ns)
goto out_unlock;
ns->enabled = false;
- xa_erase(&ns->subsys->namespaces, ns->nsid);
- if (ns->nsid == subsys->max_nsid)
- subsys->max_nsid = nvmet_max_nsid(subsys);
+ xa_clear_mark(&subsys->namespaces, ns->nsid, NVMET_NS_ENABLED);
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
mutex_unlock(&subsys->lock);
+ if (ns->pr.enable)
+ nvmet_pr_exit_ns(ns);
+
+ mutex_lock(&subsys->lock);
+ nvmet_ns_changed(subsys, ns->nsid);
+ nvmet_ns_dev_disable(ns);
+out_unlock:
+ mutex_unlock(&subsys->lock);
+}
+
+void nvmet_ns_free(struct nvmet_ns *ns)
+{
+ struct nvmet_subsys *subsys = ns->subsys;
+
+ nvmet_ns_disable(ns);
+
+ mutex_lock(&subsys->lock);
+
+ xa_erase(&subsys->namespaces, ns->nsid);
+ if (ns->nsid == subsys->max_nsid)
+ subsys->max_nsid = nvmet_max_nsid(subsys);
+
+ mutex_unlock(&subsys->lock);
+
/*
* Now that we removed the namespaces from the lookup list, we
* can kill the per_cpu ref and wait for any remaining references
@@ -671,21 +675,9 @@ void nvmet_ns_disable(struct nvmet_ns *ns)
wait_for_completion(&ns->disable_done);
percpu_ref_exit(&ns->ref);
- if (ns->pr.enable)
- nvmet_pr_exit_ns(ns);
-
mutex_lock(&subsys->lock);
-
subsys->nr_namespaces--;
- nvmet_ns_changed(subsys, ns->nsid);
- nvmet_ns_dev_disable(ns);
-out_unlock:
mutex_unlock(&subsys->lock);
-}
-
-void nvmet_ns_free(struct nvmet_ns *ns)
-{
- nvmet_ns_disable(ns);
down_write(&nvmet_ana_sem);
nvmet_ana_group_enabled[ns->anagrpid]--;
@@ -699,15 +691,33 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
{
struct nvmet_ns *ns;
+ mutex_lock(&subsys->lock);
+
+ if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES)
+ goto out_unlock;
+
ns = kzalloc(sizeof(*ns), GFP_KERNEL);
if (!ns)
- return NULL;
+ goto out_unlock;
init_completion(&ns->disable_done);
ns->nsid = nsid;
ns->subsys = subsys;
+ if (percpu_ref_init(&ns->ref, nvmet_destroy_namespace, 0, GFP_KERNEL))
+ goto out_free;
+
+ if (ns->nsid > subsys->max_nsid)
+ subsys->max_nsid = nsid;
+
+ if (xa_insert(&subsys->namespaces, ns->nsid, ns, GFP_KERNEL))
+ goto out_exit;
+
+ subsys->nr_namespaces++;
+
+ mutex_unlock(&subsys->lock);
+
down_write(&nvmet_ana_sem);
ns->anagrpid = NVMET_DEFAULT_ANA_GRPID;
nvmet_ana_group_enabled[ns->anagrpid]++;
@@ -718,6 +728,14 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
ns->csi = NVME_CSI_NVM;
return ns;
+out_exit:
+ subsys->max_nsid = nvmet_max_nsid(subsys);
+ percpu_ref_exit(&ns->ref);
+out_free:
+ kfree(ns);
+out_unlock:
+ mutex_unlock(&subsys->lock);
+ return NULL;
}
static void nvmet_update_sq_head(struct nvmet_req *req)
@@ -818,6 +836,89 @@ static void nvmet_confirm_sq(struct percpu_ref *ref)
complete(&sq->confirm_done);
}
+u16 nvmet_check_cqid(struct nvmet_ctrl *ctrl, u16 cqid)
+{
+ if (!ctrl->sqs)
+ return NVME_SC_INTERNAL | NVME_STATUS_DNR;
+
+ if (cqid > ctrl->subsys->max_qid)
+ return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
+
+ /*
+ * Note: For PCI controllers, the NVMe specifications allows multiple
+ * SQs to share a single CQ. However, we do not support this yet, so
+ * check that there is no SQ defined for a CQ. If one exist, then the
+ * CQ ID is invalid for creation as well as when the CQ is being
+ * deleted (as that would mean that the SQ was not deleted before the
+ * CQ).
+ */
+ if (ctrl->sqs[cqid])
+ return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
+
+ return NVME_SC_SUCCESS;
+}
+
+u16 nvmet_cq_create(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
+ u16 qid, u16 size)
+{
+ u16 status;
+
+ status = nvmet_check_cqid(ctrl, qid);
+ if (status != NVME_SC_SUCCESS)
+ return status;
+
+ nvmet_cq_setup(ctrl, cq, qid, size);
+
+ return NVME_SC_SUCCESS;
+}
+EXPORT_SYMBOL_GPL(nvmet_cq_create);
+
+u16 nvmet_check_sqid(struct nvmet_ctrl *ctrl, u16 sqid,
+ bool create)
+{
+ if (!ctrl->sqs)
+ return NVME_SC_INTERNAL | NVME_STATUS_DNR;
+
+ if (sqid > ctrl->subsys->max_qid)
+ return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
+
+ if ((create && ctrl->sqs[sqid]) ||
+ (!create && !ctrl->sqs[sqid]))
+ return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
+
+ return NVME_SC_SUCCESS;
+}
+
+u16 nvmet_sq_create(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
+ u16 sqid, u16 size)
+{
+ u16 status;
+ int ret;
+
+ if (!kref_get_unless_zero(&ctrl->ref))
+ return NVME_SC_INTERNAL | NVME_STATUS_DNR;
+
+ status = nvmet_check_sqid(ctrl, sqid, true);
+ if (status != NVME_SC_SUCCESS)
+ return status;
+
+ ret = nvmet_sq_init(sq);
+ if (ret) {
+ status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
+ goto ctrl_put;
+ }
+
+ nvmet_sq_setup(ctrl, sq, sqid, size);
+ sq->ctrl = ctrl;
+
+ return NVME_SC_SUCCESS;
+
+ctrl_put:
+ nvmet_ctrl_put(ctrl);
+ return status;
+}
+EXPORT_SYMBOL_GPL(nvmet_sq_create);
+
void nvmet_sq_destroy(struct nvmet_sq *sq)
{
struct nvmet_ctrl *ctrl = sq->ctrl;
@@ -911,6 +1012,33 @@ static inline u16 nvmet_io_cmd_check_access(struct nvmet_req *req)
return 0;
}
+static u32 nvmet_io_cmd_transfer_len(struct nvmet_req *req)
+{
+ struct nvme_command *cmd = req->cmd;
+ u32 metadata_len = 0;
+
+ if (nvme_is_fabrics(cmd))
+ return nvmet_fabrics_io_cmd_data_len(req);
+
+ if (!req->ns)
+ return 0;
+
+ switch (req->cmd->common.opcode) {
+ case nvme_cmd_read:
+ case nvme_cmd_write:
+ case nvme_cmd_zone_append:
+ if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns))
+ metadata_len = nvmet_rw_metadata_len(req);
+ return nvmet_rw_data_len(req) + metadata_len;
+ case nvme_cmd_dsm:
+ return nvmet_dsm_len(req);
+ case nvme_cmd_zone_mgmt_recv:
+ return (le32_to_cpu(req->cmd->zmr.numd) + 1) << 2;
+ default:
+ return 0;
+ }
+}
+
static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
{
struct nvme_command *cmd = req->cmd;
@@ -1012,12 +1140,15 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
/*
* For fabrics, PSDT field shall describe metadata pointer (MPTR) that
* contains an address of a single contiguous physical buffer that is
- * byte aligned.
+ * byte aligned. For PCI controllers, this is optional so not enforced.
*/
if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) {
- req->error_loc = offsetof(struct nvme_common_command, flags);
- status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
- goto fail;
+ if (!req->sq->ctrl || !nvmet_is_pci_ctrl(req->sq->ctrl)) {
+ req->error_loc =
+ offsetof(struct nvme_common_command, flags);
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ goto fail;
+ }
}
if (unlikely(!req->sq->ctrl))
@@ -1059,11 +1190,27 @@ void nvmet_req_uninit(struct nvmet_req *req)
}
EXPORT_SYMBOL_GPL(nvmet_req_uninit);
+size_t nvmet_req_transfer_len(struct nvmet_req *req)
+{
+ if (likely(req->sq->qid != 0))
+ return nvmet_io_cmd_transfer_len(req);
+ if (unlikely(!req->sq->ctrl))
+ return nvmet_connect_cmd_data_len(req);
+ return nvmet_admin_cmd_data_len(req);
+}
+EXPORT_SYMBOL_GPL(nvmet_req_transfer_len);
+
bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len)
{
if (unlikely(len != req->transfer_len)) {
+ u16 status;
+
req->error_loc = offsetof(struct nvme_common_command, dptr);
- nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR);
+ if (req->cmd->common.flags & NVME_CMD_SGL_ALL)
+ status = NVME_SC_SGL_INVALID_DATA;
+ else
+ status = NVME_SC_INVALID_FIELD;
+ nvmet_req_complete(req, status | NVME_STATUS_DNR);
return false;
}
@@ -1074,8 +1221,14 @@ EXPORT_SYMBOL_GPL(nvmet_check_transfer_len);
bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len)
{
if (unlikely(data_len > req->transfer_len)) {
+ u16 status;
+
req->error_loc = offsetof(struct nvme_common_command, dptr);
- nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR);
+ if (req->cmd->common.flags & NVME_CMD_SGL_ALL)
+ status = NVME_SC_SGL_INVALID_DATA;
+ else
+ status = NVME_SC_INVALID_FIELD;
+ nvmet_req_complete(req, status | NVME_STATUS_DNR);
return false;
}
@@ -1166,41 +1319,6 @@ void nvmet_req_free_sgls(struct nvmet_req *req)
}
EXPORT_SYMBOL_GPL(nvmet_req_free_sgls);
-static inline bool nvmet_cc_en(u32 cc)
-{
- return (cc >> NVME_CC_EN_SHIFT) & 0x1;
-}
-
-static inline u8 nvmet_cc_css(u32 cc)
-{
- return (cc >> NVME_CC_CSS_SHIFT) & 0x7;
-}
-
-static inline u8 nvmet_cc_mps(u32 cc)
-{
- return (cc >> NVME_CC_MPS_SHIFT) & 0xf;
-}
-
-static inline u8 nvmet_cc_ams(u32 cc)
-{
- return (cc >> NVME_CC_AMS_SHIFT) & 0x7;
-}
-
-static inline u8 nvmet_cc_shn(u32 cc)
-{
- return (cc >> NVME_CC_SHN_SHIFT) & 0x3;
-}
-
-static inline u8 nvmet_cc_iosqes(u32 cc)
-{
- return (cc >> NVME_CC_IOSQES_SHIFT) & 0xf;
-}
-
-static inline u8 nvmet_cc_iocqes(u32 cc)
-{
- return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf;
-}
-
static inline bool nvmet_css_supported(u8 cc_css)
{
switch (cc_css << NVME_CC_CSS_SHIFT) {
@@ -1277,6 +1395,7 @@ void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new)
ctrl->csts &= ~NVME_CSTS_SHST_CMPLT;
mutex_unlock(&ctrl->lock);
}
+EXPORT_SYMBOL_GPL(nvmet_update_cc);
static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
{
@@ -1384,17 +1503,17 @@ bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn)
* Note: ctrl->subsys->lock should be held when calling this function
*/
static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
- struct nvmet_req *req)
+ struct device *p2p_client)
{
struct nvmet_ns *ns;
unsigned long idx;
- if (!req->p2p_client)
+ if (!p2p_client)
return;
- ctrl->p2p_client = get_device(req->p2p_client);
+ ctrl->p2p_client = get_device(p2p_client);
- xa_for_each(&ctrl->subsys->namespaces, idx, ns)
+ nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns)
nvmet_p2pmem_ns_add_p2p(ctrl, ns);
}
@@ -1421,45 +1540,44 @@ static void nvmet_fatal_error_handler(struct work_struct *work)
ctrl->ops->delete_ctrl(ctrl);
}
-u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
- struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp,
- uuid_t *hostid)
+struct nvmet_ctrl *nvmet_alloc_ctrl(struct nvmet_alloc_ctrl_args *args)
{
struct nvmet_subsys *subsys;
struct nvmet_ctrl *ctrl;
+ u32 kato = args->kato;
+ u8 dhchap_status;
int ret;
- u16 status;
- status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR;
- subsys = nvmet_find_get_subsys(req->port, subsysnqn);
+ args->status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR;
+ subsys = nvmet_find_get_subsys(args->port, args->subsysnqn);
if (!subsys) {
pr_warn("connect request for invalid subsystem %s!\n",
- subsysnqn);
- req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
- req->error_loc = offsetof(struct nvme_common_command, dptr);
- goto out;
+ args->subsysnqn);
+ args->result = IPO_IATTR_CONNECT_DATA(subsysnqn);
+ args->error_loc = offsetof(struct nvme_common_command, dptr);
+ return NULL;
}
down_read(&nvmet_config_sem);
- if (!nvmet_host_allowed(subsys, hostnqn)) {
+ if (!nvmet_host_allowed(subsys, args->hostnqn)) {
pr_info("connect by host %s for subsystem %s not allowed\n",
- hostnqn, subsysnqn);
- req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
+ args->hostnqn, args->subsysnqn);
+ args->result = IPO_IATTR_CONNECT_DATA(hostnqn);
up_read(&nvmet_config_sem);
- status = NVME_SC_CONNECT_INVALID_HOST | NVME_STATUS_DNR;
- req->error_loc = offsetof(struct nvme_common_command, dptr);
+ args->status = NVME_SC_CONNECT_INVALID_HOST | NVME_STATUS_DNR;
+ args->error_loc = offsetof(struct nvme_common_command, dptr);
goto out_put_subsystem;
}
up_read(&nvmet_config_sem);
- status = NVME_SC_INTERNAL;
+ args->status = NVME_SC_INTERNAL;
ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
if (!ctrl)
goto out_put_subsystem;
mutex_init(&ctrl->lock);
- ctrl->port = req->port;
- ctrl->ops = req->ops;
+ ctrl->port = args->port;
+ ctrl->ops = args->ops;
#ifdef CONFIG_NVME_TARGET_PASSTHRU
/* By default, set loop targets to clear IDS by default */
@@ -1473,8 +1591,8 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
- memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
- memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
+ memcpy(ctrl->subsysnqn, args->subsysnqn, NVMF_NQN_SIZE);
+ memcpy(ctrl->hostnqn, args->hostnqn, NVMF_NQN_SIZE);
kref_init(&ctrl->ref);
ctrl->subsys = subsys;
@@ -1497,12 +1615,12 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
subsys->cntlid_min, subsys->cntlid_max,
GFP_KERNEL);
if (ret < 0) {
- status = NVME_SC_CONNECT_CTRL_BUSY | NVME_STATUS_DNR;
+ args->status = NVME_SC_CONNECT_CTRL_BUSY | NVME_STATUS_DNR;
goto out_free_sqs;
}
ctrl->cntlid = ret;
- uuid_copy(&ctrl->hostid, hostid);
+ uuid_copy(&ctrl->hostid, args->hostid);
/*
* Discovery controllers may use some arbitrary high value
@@ -1524,12 +1642,35 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
if (ret)
goto init_pr_fail;
list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
- nvmet_setup_p2p_ns_map(ctrl, req);
+ nvmet_setup_p2p_ns_map(ctrl, args->p2p_client);
nvmet_debugfs_ctrl_setup(ctrl);
mutex_unlock(&subsys->lock);
- *ctrlp = ctrl;
- return 0;
+ if (args->hostid)
+ uuid_copy(&ctrl->hostid, args->hostid);
+
+ dhchap_status = nvmet_setup_auth(ctrl);
+ if (dhchap_status) {
+ pr_err("Failed to setup authentication, dhchap status %u\n",
+ dhchap_status);
+ nvmet_ctrl_put(ctrl);
+ if (dhchap_status == NVME_AUTH_DHCHAP_FAILURE_FAILED)
+ args->status =
+ NVME_SC_CONNECT_INVALID_HOST | NVME_STATUS_DNR;
+ else
+ args->status = NVME_SC_INTERNAL;
+ return NULL;
+ }
+
+ args->status = NVME_SC_SUCCESS;
+
+ pr_info("Created %s controller %d for subsystem %s for NQN %s%s%s.\n",
+ nvmet_is_disc_subsys(ctrl->subsys) ? "discovery" : "nvm",
+ ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn,
+ ctrl->pi_support ? " T10-PI is enabled" : "",
+ nvmet_has_auth(ctrl) ? " with DH-HMAC-CHAP" : "");
+
+ return ctrl;
init_pr_fail:
mutex_unlock(&subsys->lock);
@@ -1543,9 +1684,9 @@ out_free_ctrl:
kfree(ctrl);
out_put_subsystem:
nvmet_subsys_put(subsys);
-out:
- return status;
+ return NULL;
}
+EXPORT_SYMBOL_GPL(nvmet_alloc_ctrl);
static void nvmet_ctrl_free(struct kref *ref)
{
@@ -1581,6 +1722,7 @@ void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
{
kref_put(&ctrl->ref, nvmet_ctrl_free);
}
+EXPORT_SYMBOL_GPL(nvmet_ctrl_put);
void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
{
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index 28843df5fa7c..df7207640506 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -224,6 +224,9 @@ static void nvmet_execute_disc_get_log_page(struct nvmet_req *req)
}
list_for_each_entry(r, &req->port->referrals, entry) {
+ if (r->disc_addr.trtype == NVMF_TRTYPE_PCI)
+ continue;
+
nvmet_format_discovery_entry(hdr, r,
NVME_DISC_SUBSYS_NAME,
r->disc_addr.traddr,
@@ -352,6 +355,20 @@ static void nvmet_execute_disc_get_features(struct nvmet_req *req)
nvmet_req_complete(req, stat);
}
+u32 nvmet_discovery_cmd_data_len(struct nvmet_req *req)
+{
+ struct nvme_command *cmd = req->cmd;
+
+ switch (cmd->common.opcode) {
+ case nvme_admin_get_log_page:
+ return nvmet_get_log_page_len(req->cmd);
+ case nvme_admin_identify:
+ return NVME_IDENTIFY_DATA_SIZE;
+ default:
+ return 0;
+ }
+}
+
u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
{
struct nvme_command *cmd = req->cmd;
diff --git a/drivers/nvme/target/fabrics-cmd-auth.c b/drivers/nvme/target/fabrics-cmd-auth.c
index 3f2857c17d95..2022757f08dc 100644
--- a/drivers/nvme/target/fabrics-cmd-auth.c
+++ b/drivers/nvme/target/fabrics-cmd-auth.c
@@ -179,6 +179,11 @@ static u8 nvmet_auth_failure2(void *d)
return data->rescode_exp;
}
+u32 nvmet_auth_send_data_len(struct nvmet_req *req)
+{
+ return le32_to_cpu(req->cmd->auth_send.tl);
+}
+
void nvmet_execute_auth_send(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
@@ -206,7 +211,7 @@ void nvmet_execute_auth_send(struct nvmet_req *req)
offsetof(struct nvmf_auth_send_command, spsp1);
goto done;
}
- tl = le32_to_cpu(req->cmd->auth_send.tl);
+ tl = nvmet_auth_send_data_len(req);
if (!tl) {
status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
req->error_loc =
@@ -429,6 +434,11 @@ static void nvmet_auth_failure1(struct nvmet_req *req, void *d, int al)
data->rescode_exp = req->sq->dhchap_status;
}
+u32 nvmet_auth_receive_data_len(struct nvmet_req *req)
+{
+ return le32_to_cpu(req->cmd->auth_receive.al);
+}
+
void nvmet_execute_auth_receive(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
@@ -454,7 +464,7 @@ void nvmet_execute_auth_receive(struct nvmet_req *req)
offsetof(struct nvmf_auth_receive_command, spsp1);
goto done;
}
- al = le32_to_cpu(req->cmd->auth_receive.al);
+ al = nvmet_auth_receive_data_len(req);
if (!al) {
status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
req->error_loc =
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index c49904ebb6c2..a7ff05b3be29 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -85,6 +85,22 @@ static void nvmet_execute_prop_get(struct nvmet_req *req)
nvmet_req_complete(req, status);
}
+u32 nvmet_fabrics_admin_cmd_data_len(struct nvmet_req *req)
+{
+ struct nvme_command *cmd = req->cmd;
+
+ switch (cmd->fabrics.fctype) {
+#ifdef CONFIG_NVME_TARGET_AUTH
+ case nvme_fabrics_type_auth_send:
+ return nvmet_auth_send_data_len(req);
+ case nvme_fabrics_type_auth_receive:
+ return nvmet_auth_receive_data_len(req);
+#endif
+ default:
+ return 0;
+ }
+}
+
u16 nvmet_parse_fabrics_admin_cmd(struct nvmet_req *req)
{
struct nvme_command *cmd = req->cmd;
@@ -114,6 +130,22 @@ u16 nvmet_parse_fabrics_admin_cmd(struct nvmet_req *req)
return 0;
}
+u32 nvmet_fabrics_io_cmd_data_len(struct nvmet_req *req)
+{
+ struct nvme_command *cmd = req->cmd;
+
+ switch (cmd->fabrics.fctype) {
+#ifdef CONFIG_NVME_TARGET_AUTH
+ case nvme_fabrics_type_auth_send:
+ return nvmet_auth_send_data_len(req);
+ case nvme_fabrics_type_auth_receive:
+ return nvmet_auth_receive_data_len(req);
+#endif
+ default:
+ return 0;
+ }
+}
+
u16 nvmet_parse_fabrics_io_cmd(struct nvmet_req *req)
{
struct nvme_command *cmd = req->cmd;
@@ -213,73 +245,67 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
struct nvmf_connect_command *c = &req->cmd->connect;
struct nvmf_connect_data *d;
struct nvmet_ctrl *ctrl = NULL;
- u16 status;
- u8 dhchap_status;
+ struct nvmet_alloc_ctrl_args args = {
+ .port = req->port,
+ .ops = req->ops,
+ .p2p_client = req->p2p_client,
+ .kato = le32_to_cpu(c->kato),
+ };
if (!nvmet_check_transfer_len(req, sizeof(struct nvmf_connect_data)))
return;
d = kmalloc(sizeof(*d), GFP_KERNEL);
if (!d) {
- status = NVME_SC_INTERNAL;
+ args.status = NVME_SC_INTERNAL;
goto complete;
}
- status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d));
- if (status)
+ args.status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d));
+ if (args.status)
goto out;
if (c->recfmt != 0) {
pr_warn("invalid connect version (%d).\n",
le16_to_cpu(c->recfmt));
- req->error_loc = offsetof(struct nvmf_connect_command, recfmt);
- status = NVME_SC_CONNECT_FORMAT | NVME_STATUS_DNR;
+ args.error_loc = offsetof(struct nvmf_connect_command, recfmt);
+ args.status = NVME_SC_CONNECT_FORMAT | NVME_STATUS_DNR;
goto out;
}
if (unlikely(d->cntlid != cpu_to_le16(0xffff))) {
pr_warn("connect attempt for invalid controller ID %#x\n",
d->cntlid);
- status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR;
- req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
+ args.status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR;
+ args.result = IPO_IATTR_CONNECT_DATA(cntlid);
goto out;
}
d->subsysnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
d->hostnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
- status = nvmet_alloc_ctrl(d->subsysnqn, d->hostnqn, req,
- le32_to_cpu(c->kato), &ctrl, &d->hostid);
- if (status)
- goto out;
- dhchap_status = nvmet_setup_auth(ctrl);
- if (dhchap_status) {
- pr_err("Failed to setup authentication, dhchap status %u\n",
- dhchap_status);
- nvmet_ctrl_put(ctrl);
- if (dhchap_status == NVME_AUTH_DHCHAP_FAILURE_FAILED)
- status = (NVME_SC_CONNECT_INVALID_HOST | NVME_STATUS_DNR);
- else
- status = NVME_SC_INTERNAL;
+ args.subsysnqn = d->subsysnqn;
+ args.hostnqn = d->hostnqn;
+ args.hostid = &d->hostid;
+ args.kato = c->kato;
+
+ ctrl = nvmet_alloc_ctrl(&args);
+ if (!ctrl)
goto out;
- }
- status = nvmet_install_queue(ctrl, req);
- if (status) {
+ args.status = nvmet_install_queue(ctrl, req);
+ if (args.status) {
nvmet_ctrl_put(ctrl);
goto out;
}
- pr_info("creating %s controller %d for subsystem %s for NQN %s%s%s.\n",
- nvmet_is_disc_subsys(ctrl->subsys) ? "discovery" : "nvm",
- ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn,
- ctrl->pi_support ? " T10-PI is enabled" : "",
- nvmet_has_auth(ctrl) ? " with DH-HMAC-CHAP" : "");
- req->cqe->result.u32 = cpu_to_le32(nvmet_connect_result(ctrl));
+ args.result = cpu_to_le32(nvmet_connect_result(ctrl));
out:
kfree(d);
complete:
- nvmet_req_complete(req, status);
+ req->error_loc = args.error_loc;
+ req->cqe->result.u32 = args.result;
+ nvmet_req_complete(req, args.status);
}
static void nvmet_execute_io_connect(struct nvmet_req *req)
@@ -343,6 +369,17 @@ out_ctrl_put:
goto out;
}
+u32 nvmet_connect_cmd_data_len(struct nvmet_req *req)
+{
+ struct nvme_command *cmd = req->cmd;
+
+ if (!nvme_is_fabrics(cmd) ||
+ cmd->fabrics.fctype != nvme_fabrics_type_connect)
+ return 0;
+
+ return sizeof(struct nvmf_connect_data);
+}
+
u16 nvmet_parse_connect_cmd(struct nvmet_req *req)
{
struct nvme_command *cmd = req->cmd;
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index 0bda83d0fc3e..c1f574fe3280 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -36,7 +36,7 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
*/
id->nsfeat |= 1 << 4;
/* NPWG = Namespace Preferred Write Granularity. 0's based */
- id->npwg = lpp0b;
+ id->npwg = to0based(bdev_io_min(bdev) / bdev_logical_block_size(bdev));
/* NPWA = Namespace Preferred Write Alignment. 0's based */
id->npwa = id->npwg;
/* NPDG = Namespace Preferred Deallocate Granularity. 0's based */
@@ -272,6 +272,9 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
iter_flags = SG_MITER_FROM_SG;
}
+ if (req->cmd->rw.control & NVME_RW_LR)
+ opf |= REQ_FAILFAST_DEV;
+
if (is_pci_p2pdma_page(sg_page(req->sg)))
opf |= REQ_NOMERGE;
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 58328b35dc96..b540216c0c9a 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -24,6 +24,7 @@
#define NVMET_DEFAULT_VS NVME_VS(2, 1, 0)
+#define NVMET_NS_ENABLED XA_MARK_1
#define NVMET_ASYNC_EVENTS 4
#define NVMET_ERROR_LOG_SLOTS 128
#define NVMET_NO_ERROR_LOC ((u16)-1)
@@ -33,6 +34,12 @@
#define NVMET_FR_MAX_SIZE 8
#define NVMET_PR_LOG_QUEUE_SIZE 64
+#define nvmet_for_each_ns(xa, index, entry) \
+ xa_for_each(xa, index, entry)
+
+#define nvmet_for_each_enabled_ns(xa, index, entry) \
+ xa_for_each_marked(xa, index, entry, NVMET_NS_ENABLED)
+
/*
* Supported optional AENs:
*/
@@ -238,6 +245,8 @@ struct nvmet_ctrl {
struct nvmet_subsys *subsys;
struct nvmet_sq **sqs;
+ void *drvdata;
+
bool reset_tbkas;
struct mutex lock;
@@ -324,6 +333,8 @@ struct nvmet_subsys {
struct config_group namespaces_group;
struct config_group allowed_hosts_group;
+ u16 vendor_id;
+ u16 subsys_vendor_id;
char *model_number;
u32 ieee_oui;
char *firmware_rev;
@@ -404,6 +415,18 @@ struct nvmet_fabrics_ops {
void (*discovery_chg)(struct nvmet_port *port);
u8 (*get_mdts)(const struct nvmet_ctrl *ctrl);
u16 (*get_max_queue_size)(const struct nvmet_ctrl *ctrl);
+
+ /* Operations mandatory for PCI target controllers */
+ u16 (*create_sq)(struct nvmet_ctrl *ctrl, u16 sqid, u16 flags,
+ u16 qsize, u64 prp1);
+ u16 (*delete_sq)(struct nvmet_ctrl *ctrl, u16 sqid);
+ u16 (*create_cq)(struct nvmet_ctrl *ctrl, u16 cqid, u16 flags,
+ u16 qsize, u64 prp1, u16 irq_vector);
+ u16 (*delete_cq)(struct nvmet_ctrl *ctrl, u16 cqid);
+ u16 (*set_feature)(const struct nvmet_ctrl *ctrl, u8 feat,
+ void *feat_data);
+ u16 (*get_feature)(const struct nvmet_ctrl *ctrl, u8 feat,
+ void *feat_data);
};
#define NVMET_MAX_INLINE_BIOVEC 8
@@ -513,18 +536,24 @@ void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl);
void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl);
u16 nvmet_parse_connect_cmd(struct nvmet_req *req);
+u32 nvmet_connect_cmd_data_len(struct nvmet_req *req);
void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id);
u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req);
u16 nvmet_file_parse_io_cmd(struct nvmet_req *req);
u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req);
+u32 nvmet_admin_cmd_data_len(struct nvmet_req *req);
u16 nvmet_parse_admin_cmd(struct nvmet_req *req);
+u32 nvmet_discovery_cmd_data_len(struct nvmet_req *req);
u16 nvmet_parse_discovery_cmd(struct nvmet_req *req);
u16 nvmet_parse_fabrics_admin_cmd(struct nvmet_req *req);
+u32 nvmet_fabrics_admin_cmd_data_len(struct nvmet_req *req);
u16 nvmet_parse_fabrics_io_cmd(struct nvmet_req *req);
+u32 nvmet_fabrics_io_cmd_data_len(struct nvmet_req *req);
bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops);
void nvmet_req_uninit(struct nvmet_req *req);
+size_t nvmet_req_transfer_len(struct nvmet_req *req);
bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len);
bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len);
void nvmet_req_complete(struct nvmet_req *req, u16 status);
@@ -535,19 +564,37 @@ void nvmet_execute_set_features(struct nvmet_req *req);
void nvmet_execute_get_features(struct nvmet_req *req);
void nvmet_execute_keep_alive(struct nvmet_req *req);
+u16 nvmet_check_cqid(struct nvmet_ctrl *ctrl, u16 cqid);
void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
u16 size);
+u16 nvmet_cq_create(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
+ u16 size);
+u16 nvmet_check_sqid(struct nvmet_ctrl *ctrl, u16 sqid, bool create);
void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid,
u16 size);
+u16 nvmet_sq_create(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid,
+ u16 size);
void nvmet_sq_destroy(struct nvmet_sq *sq);
int nvmet_sq_init(struct nvmet_sq *sq);
void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl);
void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new);
-u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
- struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp,
- uuid_t *hostid);
+
+struct nvmet_alloc_ctrl_args {
+ struct nvmet_port *port;
+ char *subsysnqn;
+ char *hostnqn;
+ uuid_t *hostid;
+ const struct nvmet_fabrics_ops *ops;
+ struct device *p2p_client;
+ u32 kato;
+ u32 result;
+ u16 error_loc;
+ u16 status;
+};
+
+struct nvmet_ctrl *nvmet_alloc_ctrl(struct nvmet_alloc_ctrl_args *args);
struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
const char *hostnqn, u16 cntlid,
struct nvmet_req *req);
@@ -689,6 +736,11 @@ static inline bool nvmet_is_disc_subsys(struct nvmet_subsys *subsys)
return subsys->type != NVME_NQN_NVME;
}
+static inline bool nvmet_is_pci_ctrl(struct nvmet_ctrl *ctrl)
+{
+ return ctrl->port->disc_addr.trtype == NVMF_TRTYPE_PCI;
+}
+
#ifdef CONFIG_NVME_TARGET_PASSTHRU
void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys);
int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys);
@@ -730,6 +782,41 @@ void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl);
u16 errno_to_nvme_status(struct nvmet_req *req, int errno);
u16 nvmet_report_invalid_opcode(struct nvmet_req *req);
+static inline bool nvmet_cc_en(u32 cc)
+{
+ return (cc >> NVME_CC_EN_SHIFT) & 0x1;
+}
+
+static inline u8 nvmet_cc_css(u32 cc)
+{
+ return (cc >> NVME_CC_CSS_SHIFT) & 0x7;
+}
+
+static inline u8 nvmet_cc_mps(u32 cc)
+{
+ return (cc >> NVME_CC_MPS_SHIFT) & 0xf;
+}
+
+static inline u8 nvmet_cc_ams(u32 cc)
+{
+ return (cc >> NVME_CC_AMS_SHIFT) & 0x7;
+}
+
+static inline u8 nvmet_cc_shn(u32 cc)
+{
+ return (cc >> NVME_CC_SHN_SHIFT) & 0x3;
+}
+
+static inline u8 nvmet_cc_iosqes(u32 cc)
+{
+ return (cc >> NVME_CC_IOSQES_SHIFT) & 0xf;
+}
+
+static inline u8 nvmet_cc_iocqes(u32 cc)
+{
+ return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf;
+}
+
/* Convert a 32-bit number to a 16-bit 0's based number */
static inline __le16 to0based(u32 a)
{
@@ -766,7 +853,9 @@ static inline void nvmet_req_bio_put(struct nvmet_req *req, struct bio *bio)
}
#ifdef CONFIG_NVME_TARGET_AUTH
+u32 nvmet_auth_send_data_len(struct nvmet_req *req);
void nvmet_execute_auth_send(struct nvmet_req *req);
+u32 nvmet_auth_receive_data_len(struct nvmet_req *req);
void nvmet_execute_auth_receive(struct nvmet_req *req);
int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
bool set_ctrl);
@@ -824,4 +913,26 @@ static inline void nvmet_pr_put_ns_pc_ref(struct nvmet_pr_per_ctrl_ref *pc_ref)
{
percpu_ref_put(&pc_ref->ref);
}
+
+/*
+ * Data for the get_feature() and set_feature() operations of PCI target
+ * controllers.
+ */
+struct nvmet_feat_irq_coalesce {
+ u8 thr;
+ u8 time;
+};
+
+struct nvmet_feat_irq_config {
+ u16 iv;
+ bool cd;
+};
+
+struct nvmet_feat_arbitration {
+ u8 hpw;
+ u8 mpw;
+ u8 lpw;
+ u8 ab;
+};
+
#endif /* _NVMET_H */
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index 30b21936b0c6..26e2907ce8bb 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -261,6 +261,7 @@ static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
{
struct scatterlist *sg;
struct bio *bio;
+ int ret = -EINVAL;
int i;
if (req->sg_cnt > BIO_MAX_VECS)
@@ -277,16 +278,19 @@ static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
}
for_each_sg(req->sg, sg, req->sg_cnt, i) {
- if (bio_add_pc_page(rq->q, bio, sg_page(sg), sg->length,
- sg->offset) < sg->length) {
- nvmet_req_bio_put(req, bio);
- return -EINVAL;
- }
+ if (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) <
+ sg->length)
+ goto out_bio_put;
}
- blk_rq_bio_prep(rq, bio, req->sg_cnt);
-
+ ret = blk_rq_append_bio(rq, bio);
+ if (ret)
+ goto out_bio_put;
return 0;
+
+out_bio_put:
+ nvmet_req_bio_put(req, bio);
+ return ret;
}
static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
diff --git a/drivers/nvme/target/pci-epf.c b/drivers/nvme/target/pci-epf.c
new file mode 100644
index 000000000000..ac30b42cc622
--- /dev/null
+++ b/drivers/nvme/target/pci-epf.c
@@ -0,0 +1,2591 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVMe PCI Endpoint Function target driver.
+ *
+ * Copyright (c) 2024, Western Digital Corporation or its affiliates.
+ * Copyright (c) 2024, Rick Wertenbroek <rick.wertenbroek@gmail.com>
+ * REDS Institute, HEIG-VD, HES-SO, Switzerland
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/io.h>
+#include <linux/mempool.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/nvme.h>
+#include <linux/pci_ids.h>
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+#include <linux/pci_regs.h>
+#include <linux/slab.h>
+
+#include "nvmet.h"
+
+static LIST_HEAD(nvmet_pci_epf_ports);
+static DEFINE_MUTEX(nvmet_pci_epf_ports_mutex);
+
+/*
+ * Default and maximum allowed data transfer size. For the default,
+ * allow up to 128 page-sized segments. For the maximum allowed,
+ * use 4 times the default (which is completely arbitrary).
+ */
+#define NVMET_PCI_EPF_MAX_SEGS 128
+#define NVMET_PCI_EPF_MDTS_KB \
+ (NVMET_PCI_EPF_MAX_SEGS << (PAGE_SHIFT - 10))
+#define NVMET_PCI_EPF_MAX_MDTS_KB (NVMET_PCI_EPF_MDTS_KB * 4)
+
+/*
+ * IRQ vector coalescing threshold: by default, post 8 CQEs before raising an
+ * interrupt vector to the host. This default 8 is completely arbitrary and can
+ * be changed by the host with a nvme_set_features command.
+ */
+#define NVMET_PCI_EPF_IV_THRESHOLD 8
+
+/*
+ * BAR CC register and SQ polling intervals.
+ */
+#define NVMET_PCI_EPF_CC_POLL_INTERVAL msecs_to_jiffies(5)
+#define NVMET_PCI_EPF_SQ_POLL_INTERVAL msecs_to_jiffies(5)
+#define NVMET_PCI_EPF_SQ_POLL_IDLE msecs_to_jiffies(5000)
+
+/*
+ * SQ arbitration burst default: fetch at most 8 commands at a time from an SQ.
+ */
+#define NVMET_PCI_EPF_SQ_AB 8
+
+/*
+ * Handling of CQs is normally immediate, unless we fail to map a CQ or the CQ
+ * is full, in which case we retry the CQ processing after this interval.
+ */
+#define NVMET_PCI_EPF_CQ_RETRY_INTERVAL msecs_to_jiffies(1)
+
+enum nvmet_pci_epf_queue_flags {
+ NVMET_PCI_EPF_Q_IS_SQ = 0, /* The queue is a submission queue */
+ NVMET_PCI_EPF_Q_LIVE, /* The queue is live */
+ NVMET_PCI_EPF_Q_IRQ_ENABLED, /* IRQ is enabled for this queue */
+};
+
+/*
+ * IRQ vector descriptor.
+ */
+struct nvmet_pci_epf_irq_vector {
+ unsigned int vector;
+ unsigned int ref;
+ bool cd;
+ int nr_irqs;
+};
+
+struct nvmet_pci_epf_queue {
+ union {
+ struct nvmet_sq nvme_sq;
+ struct nvmet_cq nvme_cq;
+ };
+ struct nvmet_pci_epf_ctrl *ctrl;
+ unsigned long flags;
+
+ u64 pci_addr;
+ size_t pci_size;
+ struct pci_epc_map pci_map;
+
+ u16 qid;
+ u16 depth;
+ u16 vector;
+ u16 head;
+ u16 tail;
+ u16 phase;
+ u32 db;
+
+ size_t qes;
+
+ struct nvmet_pci_epf_irq_vector *iv;
+ struct workqueue_struct *iod_wq;
+ struct delayed_work work;
+ spinlock_t lock;
+ struct list_head list;
+};
+
+/*
+ * PCI Root Complex (RC) address data segment for mapping an admin or
+ * I/O command buffer @buf of @length bytes to the PCI address @pci_addr.
+ */
+struct nvmet_pci_epf_segment {
+ void *buf;
+ u64 pci_addr;
+ u32 length;
+};
+
+/*
+ * Command descriptors.
+ */
+struct nvmet_pci_epf_iod {
+ struct list_head link;
+
+ struct nvmet_req req;
+ struct nvme_command cmd;
+ struct nvme_completion cqe;
+ unsigned int status;
+
+ struct nvmet_pci_epf_ctrl *ctrl;
+
+ struct nvmet_pci_epf_queue *sq;
+ struct nvmet_pci_epf_queue *cq;
+
+ /* Data transfer size and direction for the command. */
+ size_t data_len;
+ enum dma_data_direction dma_dir;
+
+ /*
+ * PCI Root Complex (RC) address data segments: if nr_data_segs is 1, we
+ * use only @data_seg. Otherwise, the array of segments @data_segs is
+ * allocated to manage multiple PCI address data segments. @data_sgl and
+ * @data_sgt are used to setup the command request for execution by the
+ * target core.
+ */
+ unsigned int nr_data_segs;
+ struct nvmet_pci_epf_segment data_seg;
+ struct nvmet_pci_epf_segment *data_segs;
+ struct scatterlist data_sgl;
+ struct sg_table data_sgt;
+
+ struct work_struct work;
+ struct completion done;
+};
+
+/*
+ * PCI target controller private data.
+ */
+struct nvmet_pci_epf_ctrl {
+ struct nvmet_pci_epf *nvme_epf;
+ struct nvmet_port *port;
+ struct nvmet_ctrl *tctrl;
+ struct device *dev;
+
+ unsigned int nr_queues;
+ struct nvmet_pci_epf_queue *sq;
+ struct nvmet_pci_epf_queue *cq;
+ unsigned int sq_ab;
+
+ mempool_t iod_pool;
+ void *bar;
+ u64 cap;
+ u32 cc;
+ u32 csts;
+
+ size_t io_sqes;
+ size_t io_cqes;
+
+ size_t mps_shift;
+ size_t mps;
+ size_t mps_mask;
+
+ unsigned int mdts;
+
+ struct delayed_work poll_cc;
+ struct delayed_work poll_sqs;
+
+ struct mutex irq_lock;
+ struct nvmet_pci_epf_irq_vector *irq_vectors;
+ unsigned int irq_vector_threshold;
+
+ bool link_up;
+ bool enabled;
+};
+
+/*
+ * PCI EPF driver private data.
+ */
+struct nvmet_pci_epf {
+ struct pci_epf *epf;
+
+ const struct pci_epc_features *epc_features;
+
+ void *reg_bar;
+ size_t msix_table_offset;
+
+ unsigned int irq_type;
+ unsigned int nr_vectors;
+
+ struct nvmet_pci_epf_ctrl ctrl;
+
+ bool dma_enabled;
+ struct dma_chan *dma_tx_chan;
+ struct mutex dma_tx_lock;
+ struct dma_chan *dma_rx_chan;
+ struct mutex dma_rx_lock;
+
+ struct mutex mmio_lock;
+
+ /* PCI endpoint function configfs attributes. */
+ struct config_group group;
+ __le16 portid;
+ char subsysnqn[NVMF_NQN_SIZE];
+ unsigned int mdts_kb;
+};
+
+static inline u32 nvmet_pci_epf_bar_read32(struct nvmet_pci_epf_ctrl *ctrl,
+ u32 off)
+{
+ __le32 *bar_reg = ctrl->bar + off;
+
+ return le32_to_cpu(READ_ONCE(*bar_reg));
+}
+
+static inline void nvmet_pci_epf_bar_write32(struct nvmet_pci_epf_ctrl *ctrl,
+ u32 off, u32 val)
+{
+ __le32 *bar_reg = ctrl->bar + off;
+
+ WRITE_ONCE(*bar_reg, cpu_to_le32(val));
+}
+
+static inline u64 nvmet_pci_epf_bar_read64(struct nvmet_pci_epf_ctrl *ctrl,
+ u32 off)
+{
+ return (u64)nvmet_pci_epf_bar_read32(ctrl, off) |
+ ((u64)nvmet_pci_epf_bar_read32(ctrl, off + 4) << 32);
+}
+
+static inline void nvmet_pci_epf_bar_write64(struct nvmet_pci_epf_ctrl *ctrl,
+ u32 off, u64 val)
+{
+ nvmet_pci_epf_bar_write32(ctrl, off, val & 0xFFFFFFFF);
+ nvmet_pci_epf_bar_write32(ctrl, off + 4, (val >> 32) & 0xFFFFFFFF);
+}
+
+static inline int nvmet_pci_epf_mem_map(struct nvmet_pci_epf *nvme_epf,
+ u64 pci_addr, size_t size, struct pci_epc_map *map)
+{
+ struct pci_epf *epf = nvme_epf->epf;
+
+ return pci_epc_mem_map(epf->epc, epf->func_no, epf->vfunc_no,
+ pci_addr, size, map);
+}
+
+static inline void nvmet_pci_epf_mem_unmap(struct nvmet_pci_epf *nvme_epf,
+ struct pci_epc_map *map)
+{
+ struct pci_epf *epf = nvme_epf->epf;
+
+ pci_epc_mem_unmap(epf->epc, epf->func_no, epf->vfunc_no, map);
+}
+
+struct nvmet_pci_epf_dma_filter {
+ struct device *dev;
+ u32 dma_mask;
+};
+
+static bool nvmet_pci_epf_dma_filter(struct dma_chan *chan, void *arg)
+{
+ struct nvmet_pci_epf_dma_filter *filter = arg;
+ struct dma_slave_caps caps;
+
+ memset(&caps, 0, sizeof(caps));
+ dma_get_slave_caps(chan, &caps);
+
+ return chan->device->dev == filter->dev &&
+ (filter->dma_mask & caps.directions);
+}
+
+static void nvmet_pci_epf_init_dma(struct nvmet_pci_epf *nvme_epf)
+{
+ struct pci_epf *epf = nvme_epf->epf;
+ struct device *dev = &epf->dev;
+ struct nvmet_pci_epf_dma_filter filter;
+ struct dma_chan *chan;
+ dma_cap_mask_t mask;
+
+ mutex_init(&nvme_epf->dma_rx_lock);
+ mutex_init(&nvme_epf->dma_tx_lock);
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ filter.dev = epf->epc->dev.parent;
+ filter.dma_mask = BIT(DMA_DEV_TO_MEM);
+
+ chan = dma_request_channel(mask, nvmet_pci_epf_dma_filter, &filter);
+ if (!chan)
+ goto out_dma_no_rx;
+
+ nvme_epf->dma_rx_chan = chan;
+
+ filter.dma_mask = BIT(DMA_MEM_TO_DEV);
+ chan = dma_request_channel(mask, nvmet_pci_epf_dma_filter, &filter);
+ if (!chan)
+ goto out_dma_no_tx;
+
+ nvme_epf->dma_tx_chan = chan;
+
+ nvme_epf->dma_enabled = true;
+
+ dev_dbg(dev, "Using DMA RX channel %s, maximum segment size %u B\n",
+ dma_chan_name(chan),
+ dma_get_max_seg_size(dmaengine_get_dma_device(chan)));
+
+ dev_dbg(dev, "Using DMA TX channel %s, maximum segment size %u B\n",
+ dma_chan_name(chan),
+ dma_get_max_seg_size(dmaengine_get_dma_device(chan)));
+
+ return;
+
+out_dma_no_tx:
+ dma_release_channel(nvme_epf->dma_rx_chan);
+ nvme_epf->dma_rx_chan = NULL;
+
+out_dma_no_rx:
+ mutex_destroy(&nvme_epf->dma_rx_lock);
+ mutex_destroy(&nvme_epf->dma_tx_lock);
+ nvme_epf->dma_enabled = false;
+
+ dev_info(&epf->dev, "DMA not supported, falling back to MMIO\n");
+}
+
+static void nvmet_pci_epf_deinit_dma(struct nvmet_pci_epf *nvme_epf)
+{
+ if (!nvme_epf->dma_enabled)
+ return;
+
+ dma_release_channel(nvme_epf->dma_tx_chan);
+ nvme_epf->dma_tx_chan = NULL;
+ dma_release_channel(nvme_epf->dma_rx_chan);
+ nvme_epf->dma_rx_chan = NULL;
+ mutex_destroy(&nvme_epf->dma_rx_lock);
+ mutex_destroy(&nvme_epf->dma_tx_lock);
+ nvme_epf->dma_enabled = false;
+}
+
+static int nvmet_pci_epf_dma_transfer(struct nvmet_pci_epf *nvme_epf,
+ struct nvmet_pci_epf_segment *seg, enum dma_data_direction dir)
+{
+ struct pci_epf *epf = nvme_epf->epf;
+ struct dma_async_tx_descriptor *desc;
+ struct dma_slave_config sconf = {};
+ struct device *dev = &epf->dev;
+ struct device *dma_dev;
+ struct dma_chan *chan;
+ dma_cookie_t cookie;
+ dma_addr_t dma_addr;
+ struct mutex *lock;
+ int ret;
+
+ switch (dir) {
+ case DMA_FROM_DEVICE:
+ lock = &nvme_epf->dma_rx_lock;
+ chan = nvme_epf->dma_rx_chan;
+ sconf.direction = DMA_DEV_TO_MEM;
+ sconf.src_addr = seg->pci_addr;
+ break;
+ case DMA_TO_DEVICE:
+ lock = &nvme_epf->dma_tx_lock;
+ chan = nvme_epf->dma_tx_chan;
+ sconf.direction = DMA_MEM_TO_DEV;
+ sconf.dst_addr = seg->pci_addr;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mutex_lock(lock);
+
+ dma_dev = dmaengine_get_dma_device(chan);
+ dma_addr = dma_map_single(dma_dev, seg->buf, seg->length, dir);
+ ret = dma_mapping_error(dma_dev, dma_addr);
+ if (ret)
+ goto unlock;
+
+ ret = dmaengine_slave_config(chan, &sconf);
+ if (ret) {
+ dev_err(dev, "Failed to configure DMA channel\n");
+ goto unmap;
+ }
+
+ desc = dmaengine_prep_slave_single(chan, dma_addr, seg->length,
+ sconf.direction, DMA_CTRL_ACK);
+ if (!desc) {
+ dev_err(dev, "Failed to prepare DMA\n");
+ ret = -EIO;
+ goto unmap;
+ }
+
+ cookie = dmaengine_submit(desc);
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dev_err(dev, "Failed to do DMA submit (err=%d)\n", ret);
+ goto unmap;
+ }
+
+ if (dma_sync_wait(chan, cookie) != DMA_COMPLETE) {
+ dev_err(dev, "DMA transfer failed\n");
+ ret = -EIO;
+ }
+
+ dmaengine_terminate_sync(chan);
+
+unmap:
+ dma_unmap_single(dma_dev, dma_addr, seg->length, dir);
+
+unlock:
+ mutex_unlock(lock);
+
+ return ret;
+}
+
+static int nvmet_pci_epf_mmio_transfer(struct nvmet_pci_epf *nvme_epf,
+ struct nvmet_pci_epf_segment *seg, enum dma_data_direction dir)
+{
+ u64 pci_addr = seg->pci_addr;
+ u32 length = seg->length;
+ void *buf = seg->buf;
+ struct pci_epc_map map;
+ int ret = -EINVAL;
+
+ /*
+ * Note: MMIO transfers do not need serialization but this is a
+ * simple way to avoid using too many mapping windows.
+ */
+ mutex_lock(&nvme_epf->mmio_lock);
+
+ while (length) {
+ ret = nvmet_pci_epf_mem_map(nvme_epf, pci_addr, length, &map);
+ if (ret)
+ break;
+
+ switch (dir) {
+ case DMA_FROM_DEVICE:
+ memcpy_fromio(buf, map.virt_addr, map.pci_size);
+ break;
+ case DMA_TO_DEVICE:
+ memcpy_toio(map.virt_addr, buf, map.pci_size);
+ break;
+ default:
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ pci_addr += map.pci_size;
+ buf += map.pci_size;
+ length -= map.pci_size;
+
+ nvmet_pci_epf_mem_unmap(nvme_epf, &map);
+ }
+
+unlock:
+ mutex_unlock(&nvme_epf->mmio_lock);
+
+ return ret;
+}
+
+static inline int nvmet_pci_epf_transfer_seg(struct nvmet_pci_epf *nvme_epf,
+ struct nvmet_pci_epf_segment *seg, enum dma_data_direction dir)
+{
+ if (nvme_epf->dma_enabled)
+ return nvmet_pci_epf_dma_transfer(nvme_epf, seg, dir);
+
+ return nvmet_pci_epf_mmio_transfer(nvme_epf, seg, dir);
+}
+
+static inline int nvmet_pci_epf_transfer(struct nvmet_pci_epf_ctrl *ctrl,
+ void *buf, u64 pci_addr, u32 length,
+ enum dma_data_direction dir)
+{
+ struct nvmet_pci_epf_segment seg = {
+ .buf = buf,
+ .pci_addr = pci_addr,
+ .length = length,
+ };
+
+ return nvmet_pci_epf_transfer_seg(ctrl->nvme_epf, &seg, dir);
+}
+
+static int nvmet_pci_epf_alloc_irq_vectors(struct nvmet_pci_epf_ctrl *ctrl)
+{
+ ctrl->irq_vectors = kcalloc(ctrl->nr_queues,
+ sizeof(struct nvmet_pci_epf_irq_vector),
+ GFP_KERNEL);
+ if (!ctrl->irq_vectors)
+ return -ENOMEM;
+
+ mutex_init(&ctrl->irq_lock);
+
+ return 0;
+}
+
+static void nvmet_pci_epf_free_irq_vectors(struct nvmet_pci_epf_ctrl *ctrl)
+{
+ if (ctrl->irq_vectors) {
+ mutex_destroy(&ctrl->irq_lock);
+ kfree(ctrl->irq_vectors);
+ ctrl->irq_vectors = NULL;
+ }
+}
+
+static struct nvmet_pci_epf_irq_vector *
+nvmet_pci_epf_find_irq_vector(struct nvmet_pci_epf_ctrl *ctrl, u16 vector)
+{
+ struct nvmet_pci_epf_irq_vector *iv;
+ int i;
+
+ lockdep_assert_held(&ctrl->irq_lock);
+
+ for (i = 0; i < ctrl->nr_queues; i++) {
+ iv = &ctrl->irq_vectors[i];
+ if (iv->ref && iv->vector == vector)
+ return iv;
+ }
+
+ return NULL;
+}
+
+static struct nvmet_pci_epf_irq_vector *
+nvmet_pci_epf_add_irq_vector(struct nvmet_pci_epf_ctrl *ctrl, u16 vector)
+{
+ struct nvmet_pci_epf_irq_vector *iv;
+ int i;
+
+ mutex_lock(&ctrl->irq_lock);
+
+ iv = nvmet_pci_epf_find_irq_vector(ctrl, vector);
+ if (iv) {
+ iv->ref++;
+ goto unlock;
+ }
+
+ for (i = 0; i < ctrl->nr_queues; i++) {
+ iv = &ctrl->irq_vectors[i];
+ if (!iv->ref)
+ break;
+ }
+
+ if (WARN_ON_ONCE(!iv))
+ goto unlock;
+
+ iv->ref = 1;
+ iv->vector = vector;
+ iv->nr_irqs = 0;
+
+unlock:
+ mutex_unlock(&ctrl->irq_lock);
+
+ return iv;
+}
+
+static void nvmet_pci_epf_remove_irq_vector(struct nvmet_pci_epf_ctrl *ctrl,
+ u16 vector)
+{
+ struct nvmet_pci_epf_irq_vector *iv;
+
+ mutex_lock(&ctrl->irq_lock);
+
+ iv = nvmet_pci_epf_find_irq_vector(ctrl, vector);
+ if (iv) {
+ iv->ref--;
+ if (!iv->ref) {
+ iv->vector = 0;
+ iv->nr_irqs = 0;
+ }
+ }
+
+ mutex_unlock(&ctrl->irq_lock);
+}
+
+static bool nvmet_pci_epf_should_raise_irq(struct nvmet_pci_epf_ctrl *ctrl,
+ struct nvmet_pci_epf_queue *cq, bool force)
+{
+ struct nvmet_pci_epf_irq_vector *iv = cq->iv;
+ bool ret;
+
+ if (!test_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags))
+ return false;
+
+ /* IRQ coalescing for the admin queue is not allowed. */
+ if (!cq->qid)
+ return true;
+
+ if (iv->cd)
+ return true;
+
+ if (force) {
+ ret = iv->nr_irqs > 0;
+ } else {
+ iv->nr_irqs++;
+ ret = iv->nr_irqs >= ctrl->irq_vector_threshold;
+ }
+ if (ret)
+ iv->nr_irqs = 0;
+
+ return ret;
+}
+
+static void nvmet_pci_epf_raise_irq(struct nvmet_pci_epf_ctrl *ctrl,
+ struct nvmet_pci_epf_queue *cq, bool force)
+{
+ struct nvmet_pci_epf *nvme_epf = ctrl->nvme_epf;
+ struct pci_epf *epf = nvme_epf->epf;
+ int ret = 0;
+
+ if (!test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags))
+ return;
+
+ mutex_lock(&ctrl->irq_lock);
+
+ if (!nvmet_pci_epf_should_raise_irq(ctrl, cq, force))
+ goto unlock;
+
+ switch (nvme_epf->irq_type) {
+ case PCI_IRQ_MSIX:
+ case PCI_IRQ_MSI:
+ ret = pci_epc_raise_irq(epf->epc, epf->func_no, epf->vfunc_no,
+ nvme_epf->irq_type, cq->vector + 1);
+ if (!ret)
+ break;
+ /*
+ * If we got an error, it is likely because the host is using
+ * legacy IRQs (e.g. BIOS, grub).
+ */
+ fallthrough;
+ case PCI_IRQ_INTX:
+ ret = pci_epc_raise_irq(epf->epc, epf->func_no, epf->vfunc_no,
+ PCI_IRQ_INTX, 0);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret)
+ dev_err(ctrl->dev, "Failed to raise IRQ (err=%d)\n", ret);
+
+unlock:
+ mutex_unlock(&ctrl->irq_lock);
+}
+
+static inline const char *nvmet_pci_epf_iod_name(struct nvmet_pci_epf_iod *iod)
+{
+ return nvme_opcode_str(iod->sq->qid, iod->cmd.common.opcode);
+}
+
+static void nvmet_pci_epf_exec_iod_work(struct work_struct *work);
+
+static struct nvmet_pci_epf_iod *
+nvmet_pci_epf_alloc_iod(struct nvmet_pci_epf_queue *sq)
+{
+ struct nvmet_pci_epf_ctrl *ctrl = sq->ctrl;
+ struct nvmet_pci_epf_iod *iod;
+
+ iod = mempool_alloc(&ctrl->iod_pool, GFP_KERNEL);
+ if (unlikely(!iod))
+ return NULL;
+
+ memset(iod, 0, sizeof(*iod));
+ iod->req.cmd = &iod->cmd;
+ iod->req.cqe = &iod->cqe;
+ iod->req.port = ctrl->port;
+ iod->ctrl = ctrl;
+ iod->sq = sq;
+ iod->cq = &ctrl->cq[sq->qid];
+ INIT_LIST_HEAD(&iod->link);
+ iod->dma_dir = DMA_NONE;
+ INIT_WORK(&iod->work, nvmet_pci_epf_exec_iod_work);
+ init_completion(&iod->done);
+
+ return iod;
+}
+
+/*
+ * Allocate or grow a command table of PCI segments.
+ */
+static int nvmet_pci_epf_alloc_iod_data_segs(struct nvmet_pci_epf_iod *iod,
+ int nsegs)
+{
+ struct nvmet_pci_epf_segment *segs;
+ int nr_segs = iod->nr_data_segs + nsegs;
+
+ segs = krealloc(iod->data_segs,
+ nr_segs * sizeof(struct nvmet_pci_epf_segment),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!segs)
+ return -ENOMEM;
+
+ iod->nr_data_segs = nr_segs;
+ iod->data_segs = segs;
+
+ return 0;
+}
+
+static void nvmet_pci_epf_free_iod(struct nvmet_pci_epf_iod *iod)
+{
+ int i;
+
+ if (iod->data_segs) {
+ for (i = 0; i < iod->nr_data_segs; i++)
+ kfree(iod->data_segs[i].buf);
+ if (iod->data_segs != &iod->data_seg)
+ kfree(iod->data_segs);
+ }
+ if (iod->data_sgt.nents > 1)
+ sg_free_table(&iod->data_sgt);
+ mempool_free(iod, &iod->ctrl->iod_pool);
+}
+
+static int nvmet_pci_epf_transfer_iod_data(struct nvmet_pci_epf_iod *iod)
+{
+ struct nvmet_pci_epf *nvme_epf = iod->ctrl->nvme_epf;
+ struct nvmet_pci_epf_segment *seg = &iod->data_segs[0];
+ int i, ret;
+
+ /* Split the data transfer according to the PCI segments. */
+ for (i = 0; i < iod->nr_data_segs; i++, seg++) {
+ ret = nvmet_pci_epf_transfer_seg(nvme_epf, seg, iod->dma_dir);
+ if (ret) {
+ iod->status = NVME_SC_DATA_XFER_ERROR | NVME_STATUS_DNR;
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static inline u32 nvmet_pci_epf_prp_ofst(struct nvmet_pci_epf_ctrl *ctrl,
+ u64 prp)
+{
+ return prp & ctrl->mps_mask;
+}
+
+static inline size_t nvmet_pci_epf_prp_size(struct nvmet_pci_epf_ctrl *ctrl,
+ u64 prp)
+{
+ return ctrl->mps - nvmet_pci_epf_prp_ofst(ctrl, prp);
+}
+
+/*
+ * Transfer a PRP list from the host and return the number of prps.
+ */
+static int nvmet_pci_epf_get_prp_list(struct nvmet_pci_epf_ctrl *ctrl, u64 prp,
+ size_t xfer_len, __le64 *prps)
+{
+ size_t nr_prps = (xfer_len + ctrl->mps_mask) >> ctrl->mps_shift;
+ u32 length;
+ int ret;
+
+ /*
+ * Compute the number of PRPs required for the number of bytes to
+ * transfer (xfer_len). If this number overflows the memory page size
+ * with the PRP list pointer specified, only return the space available
+ * in the memory page, the last PRP in there will be a PRP list pointer
+ * to the remaining PRPs.
+ */
+ length = min(nvmet_pci_epf_prp_size(ctrl, prp), nr_prps << 3);
+ ret = nvmet_pci_epf_transfer(ctrl, prps, prp, length, DMA_FROM_DEVICE);
+ if (ret)
+ return ret;
+
+ return length >> 3;
+}
+
+static int nvmet_pci_epf_iod_parse_prp_list(struct nvmet_pci_epf_ctrl *ctrl,
+ struct nvmet_pci_epf_iod *iod)
+{
+ struct nvme_command *cmd = &iod->cmd;
+ struct nvmet_pci_epf_segment *seg;
+ size_t size = 0, ofst, prp_size, xfer_len;
+ size_t transfer_len = iod->data_len;
+ int nr_segs, nr_prps = 0;
+ u64 pci_addr, prp;
+ int i = 0, ret;
+ __le64 *prps;
+
+ prps = kzalloc(ctrl->mps, GFP_KERNEL);
+ if (!prps)
+ goto err_internal;
+
+ /*
+ * Allocate PCI segments for the command: this considers the worst case
+ * scenario where all prps are discontiguous, so get as many segments
+ * as we can have prps. In practice, most of the time, we will have
+ * far less PCI segments than prps.
+ */
+ prp = le64_to_cpu(cmd->common.dptr.prp1);
+ if (!prp)
+ goto err_invalid_field;
+
+ ofst = nvmet_pci_epf_prp_ofst(ctrl, prp);
+ nr_segs = (transfer_len + ofst + ctrl->mps - 1) >> ctrl->mps_shift;
+
+ ret = nvmet_pci_epf_alloc_iod_data_segs(iod, nr_segs);
+ if (ret)
+ goto err_internal;
+
+ /* Set the first segment using prp1. */
+ seg = &iod->data_segs[0];
+ seg->pci_addr = prp;
+ seg->length = nvmet_pci_epf_prp_size(ctrl, prp);
+
+ size = seg->length;
+ pci_addr = prp + size;
+ nr_segs = 1;
+
+ /*
+ * Now build the PCI address segments using the PRP lists, starting
+ * from prp2.
+ */
+ prp = le64_to_cpu(cmd->common.dptr.prp2);
+ if (!prp)
+ goto err_invalid_field;
+
+ while (size < transfer_len) {
+ xfer_len = transfer_len - size;
+
+ if (!nr_prps) {
+ nr_prps = nvmet_pci_epf_get_prp_list(ctrl, prp,
+ xfer_len, prps);
+ if (nr_prps < 0)
+ goto err_internal;
+
+ i = 0;
+ ofst = 0;
+ }
+
+ /* Current entry */
+ prp = le64_to_cpu(prps[i]);
+ if (!prp)
+ goto err_invalid_field;
+
+ /* Did we reach the last PRP entry of the list? */
+ if (xfer_len > ctrl->mps && i == nr_prps - 1) {
+ /* We need more PRPs: PRP is a list pointer. */
+ nr_prps = 0;
+ continue;
+ }
+
+ /* Only the first PRP is allowed to have an offset. */
+ if (nvmet_pci_epf_prp_ofst(ctrl, prp))
+ goto err_invalid_offset;
+
+ if (prp != pci_addr) {
+ /* Discontiguous prp: new segment. */
+ nr_segs++;
+ if (WARN_ON_ONCE(nr_segs > iod->nr_data_segs))
+ goto err_internal;
+
+ seg++;
+ seg->pci_addr = prp;
+ seg->length = 0;
+ pci_addr = prp;
+ }
+
+ prp_size = min_t(size_t, ctrl->mps, xfer_len);
+ seg->length += prp_size;
+ pci_addr += prp_size;
+ size += prp_size;
+
+ i++;
+ }
+
+ iod->nr_data_segs = nr_segs;
+ ret = 0;
+
+ if (size != transfer_len) {
+ dev_err(ctrl->dev,
+ "PRPs transfer length mismatch: got %zu B, need %zu B\n",
+ size, transfer_len);
+ goto err_internal;
+ }
+
+ kfree(prps);
+
+ return 0;
+
+err_invalid_offset:
+ dev_err(ctrl->dev, "PRPs list invalid offset\n");
+ iod->status = NVME_SC_PRP_INVALID_OFFSET | NVME_STATUS_DNR;
+ goto err;
+
+err_invalid_field:
+ dev_err(ctrl->dev, "PRPs list invalid field\n");
+ iod->status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ goto err;
+
+err_internal:
+ dev_err(ctrl->dev, "PRPs list internal error\n");
+ iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
+
+err:
+ kfree(prps);
+ return -EINVAL;
+}
+
+static int nvmet_pci_epf_iod_parse_prp_simple(struct nvmet_pci_epf_ctrl *ctrl,
+ struct nvmet_pci_epf_iod *iod)
+{
+ struct nvme_command *cmd = &iod->cmd;
+ size_t transfer_len = iod->data_len;
+ int ret, nr_segs = 1;
+ u64 prp1, prp2 = 0;
+ size_t prp1_size;
+
+ prp1 = le64_to_cpu(cmd->common.dptr.prp1);
+ prp1_size = nvmet_pci_epf_prp_size(ctrl, prp1);
+
+ /* For commands crossing a page boundary, we should have prp2. */
+ if (transfer_len > prp1_size) {
+ prp2 = le64_to_cpu(cmd->common.dptr.prp2);
+ if (!prp2) {
+ iod->status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ return -EINVAL;
+ }
+ if (nvmet_pci_epf_prp_ofst(ctrl, prp2)) {
+ iod->status =
+ NVME_SC_PRP_INVALID_OFFSET | NVME_STATUS_DNR;
+ return -EINVAL;
+ }
+ if (prp2 != prp1 + prp1_size)
+ nr_segs = 2;
+ }
+
+ if (nr_segs == 1) {
+ iod->nr_data_segs = 1;
+ iod->data_segs = &iod->data_seg;
+ iod->data_segs[0].pci_addr = prp1;
+ iod->data_segs[0].length = transfer_len;
+ return 0;
+ }
+
+ ret = nvmet_pci_epf_alloc_iod_data_segs(iod, nr_segs);
+ if (ret) {
+ iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
+ return ret;
+ }
+
+ iod->data_segs[0].pci_addr = prp1;
+ iod->data_segs[0].length = prp1_size;
+ iod->data_segs[1].pci_addr = prp2;
+ iod->data_segs[1].length = transfer_len - prp1_size;
+
+ return 0;
+}
+
+static int nvmet_pci_epf_iod_parse_prps(struct nvmet_pci_epf_iod *iod)
+{
+ struct nvmet_pci_epf_ctrl *ctrl = iod->ctrl;
+ u64 prp1 = le64_to_cpu(iod->cmd.common.dptr.prp1);
+ size_t ofst;
+
+ /* Get the PCI address segments for the command using its PRPs. */
+ ofst = nvmet_pci_epf_prp_ofst(ctrl, prp1);
+ if (ofst & 0x3) {
+ iod->status = NVME_SC_PRP_INVALID_OFFSET | NVME_STATUS_DNR;
+ return -EINVAL;
+ }
+
+ if (iod->data_len + ofst <= ctrl->mps * 2)
+ return nvmet_pci_epf_iod_parse_prp_simple(ctrl, iod);
+
+ return nvmet_pci_epf_iod_parse_prp_list(ctrl, iod);
+}
+
+/*
+ * Transfer an SGL segment from the host and return the number of data
+ * descriptors and the next segment descriptor, if any.
+ */
+static struct nvme_sgl_desc *
+nvmet_pci_epf_get_sgl_segment(struct nvmet_pci_epf_ctrl *ctrl,
+ struct nvme_sgl_desc *desc, unsigned int *nr_sgls)
+{
+ struct nvme_sgl_desc *sgls;
+ u32 length = le32_to_cpu(desc->length);
+ int nr_descs, ret;
+ void *buf;
+
+ buf = kmalloc(length, GFP_KERNEL);
+ if (!buf)
+ return NULL;
+
+ ret = nvmet_pci_epf_transfer(ctrl, buf, le64_to_cpu(desc->addr), length,
+ DMA_FROM_DEVICE);
+ if (ret) {
+ kfree(buf);
+ return NULL;
+ }
+
+ sgls = buf;
+ nr_descs = length / sizeof(struct nvme_sgl_desc);
+ if (sgls[nr_descs - 1].type == (NVME_SGL_FMT_SEG_DESC << 4) ||
+ sgls[nr_descs - 1].type == (NVME_SGL_FMT_LAST_SEG_DESC << 4)) {
+ /*
+ * We have another SGL segment following this one: do not count
+ * it as a regular data SGL descriptor and return it to the
+ * caller.
+ */
+ *desc = sgls[nr_descs - 1];
+ nr_descs--;
+ } else {
+ /* We do not have another SGL segment after this one. */
+ desc->length = 0;
+ }
+
+ *nr_sgls = nr_descs;
+
+ return sgls;
+}
+
+static int nvmet_pci_epf_iod_parse_sgl_segments(struct nvmet_pci_epf_ctrl *ctrl,
+ struct nvmet_pci_epf_iod *iod)
+{
+ struct nvme_command *cmd = &iod->cmd;
+ struct nvme_sgl_desc seg = cmd->common.dptr.sgl;
+ struct nvme_sgl_desc *sgls = NULL;
+ int n = 0, i, nr_sgls;
+ int ret;
+
+ /*
+ * We do not support inline data nor keyed SGLs, so we should be seeing
+ * only segment descriptors.
+ */
+ if (seg.type != (NVME_SGL_FMT_SEG_DESC << 4) &&
+ seg.type != (NVME_SGL_FMT_LAST_SEG_DESC << 4)) {
+ iod->status = NVME_SC_SGL_INVALID_TYPE | NVME_STATUS_DNR;
+ return -EIO;
+ }
+
+ while (seg.length) {
+ sgls = nvmet_pci_epf_get_sgl_segment(ctrl, &seg, &nr_sgls);
+ if (!sgls) {
+ iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
+ return -EIO;
+ }
+
+ /* Grow the PCI segment table as needed. */
+ ret = nvmet_pci_epf_alloc_iod_data_segs(iod, nr_sgls);
+ if (ret) {
+ iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
+ goto out;
+ }
+
+ /*
+ * Parse the SGL descriptors to build the PCI segment table,
+ * checking the descriptor type as we go.
+ */
+ for (i = 0; i < nr_sgls; i++) {
+ if (sgls[i].type != (NVME_SGL_FMT_DATA_DESC << 4)) {
+ iod->status = NVME_SC_SGL_INVALID_TYPE |
+ NVME_STATUS_DNR;
+ goto out;
+ }
+ iod->data_segs[n].pci_addr = le64_to_cpu(sgls[i].addr);
+ iod->data_segs[n].length = le32_to_cpu(sgls[i].length);
+ n++;
+ }
+
+ kfree(sgls);
+ }
+
+ out:
+ if (iod->status != NVME_SC_SUCCESS) {
+ kfree(sgls);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int nvmet_pci_epf_iod_parse_sgls(struct nvmet_pci_epf_iod *iod)
+{
+ struct nvmet_pci_epf_ctrl *ctrl = iod->ctrl;
+ struct nvme_sgl_desc *sgl = &iod->cmd.common.dptr.sgl;
+
+ if (sgl->type == (NVME_SGL_FMT_DATA_DESC << 4)) {
+ /* Single data descriptor case. */
+ iod->nr_data_segs = 1;
+ iod->data_segs = &iod->data_seg;
+ iod->data_seg.pci_addr = le64_to_cpu(sgl->addr);
+ iod->data_seg.length = le32_to_cpu(sgl->length);
+ return 0;
+ }
+
+ return nvmet_pci_epf_iod_parse_sgl_segments(ctrl, iod);
+}
+
+static int nvmet_pci_epf_alloc_iod_data_buf(struct nvmet_pci_epf_iod *iod)
+{
+ struct nvmet_pci_epf_ctrl *ctrl = iod->ctrl;
+ struct nvmet_req *req = &iod->req;
+ struct nvmet_pci_epf_segment *seg;
+ struct scatterlist *sg;
+ int ret, i;
+
+ if (iod->data_len > ctrl->mdts) {
+ iod->status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ return -EINVAL;
+ }
+
+ /*
+ * Get the PCI address segments for the command data buffer using either
+ * its SGLs or PRPs.
+ */
+ if (iod->cmd.common.flags & NVME_CMD_SGL_ALL)
+ ret = nvmet_pci_epf_iod_parse_sgls(iod);
+ else
+ ret = nvmet_pci_epf_iod_parse_prps(iod);
+ if (ret)
+ return ret;
+
+ /* Get a command buffer using SGLs matching the PCI segments. */
+ if (iod->nr_data_segs == 1) {
+ sg_init_table(&iod->data_sgl, 1);
+ iod->data_sgt.sgl = &iod->data_sgl;
+ iod->data_sgt.nents = 1;
+ iod->data_sgt.orig_nents = 1;
+ } else {
+ ret = sg_alloc_table(&iod->data_sgt, iod->nr_data_segs,
+ GFP_KERNEL);
+ if (ret)
+ goto err_nomem;
+ }
+
+ for_each_sgtable_sg(&iod->data_sgt, sg, i) {
+ seg = &iod->data_segs[i];
+ seg->buf = kmalloc(seg->length, GFP_KERNEL);
+ if (!seg->buf)
+ goto err_nomem;
+ sg_set_buf(sg, seg->buf, seg->length);
+ }
+
+ req->transfer_len = iod->data_len;
+ req->sg = iod->data_sgt.sgl;
+ req->sg_cnt = iod->data_sgt.nents;
+
+ return 0;
+
+err_nomem:
+ iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
+ return -ENOMEM;
+}
+
+static void nvmet_pci_epf_complete_iod(struct nvmet_pci_epf_iod *iod)
+{
+ struct nvmet_pci_epf_queue *cq = iod->cq;
+ unsigned long flags;
+
+ /* Print an error message for failed commands, except AENs. */
+ iod->status = le16_to_cpu(iod->cqe.status) >> 1;
+ if (iod->status && iod->cmd.common.opcode != nvme_admin_async_event)
+ dev_err(iod->ctrl->dev,
+ "CQ[%d]: Command %s (0x%x) status 0x%0x\n",
+ iod->sq->qid, nvmet_pci_epf_iod_name(iod),
+ iod->cmd.common.opcode, iod->status);
+
+ /*
+ * Add the command to the list of completed commands and schedule the
+ * CQ work.
+ */
+ spin_lock_irqsave(&cq->lock, flags);
+ list_add_tail(&iod->link, &cq->list);
+ queue_delayed_work(system_highpri_wq, &cq->work, 0);
+ spin_unlock_irqrestore(&cq->lock, flags);
+}
+
+static void nvmet_pci_epf_drain_queue(struct nvmet_pci_epf_queue *queue)
+{
+ struct nvmet_pci_epf_iod *iod;
+ unsigned long flags;
+
+ spin_lock_irqsave(&queue->lock, flags);
+ while (!list_empty(&queue->list)) {
+ iod = list_first_entry(&queue->list, struct nvmet_pci_epf_iod,
+ link);
+ list_del_init(&iod->link);
+ nvmet_pci_epf_free_iod(iod);
+ }
+ spin_unlock_irqrestore(&queue->lock, flags);
+}
+
+static int nvmet_pci_epf_add_port(struct nvmet_port *port)
+{
+ mutex_lock(&nvmet_pci_epf_ports_mutex);
+ list_add_tail(&port->entry, &nvmet_pci_epf_ports);
+ mutex_unlock(&nvmet_pci_epf_ports_mutex);
+ return 0;
+}
+
+static void nvmet_pci_epf_remove_port(struct nvmet_port *port)
+{
+ mutex_lock(&nvmet_pci_epf_ports_mutex);
+ list_del_init(&port->entry);
+ mutex_unlock(&nvmet_pci_epf_ports_mutex);
+}
+
+static struct nvmet_port *
+nvmet_pci_epf_find_port(struct nvmet_pci_epf_ctrl *ctrl, __le16 portid)
+{
+ struct nvmet_port *p, *port = NULL;
+
+ mutex_lock(&nvmet_pci_epf_ports_mutex);
+ list_for_each_entry(p, &nvmet_pci_epf_ports, entry) {
+ if (p->disc_addr.portid == portid) {
+ port = p;
+ break;
+ }
+ }
+ mutex_unlock(&nvmet_pci_epf_ports_mutex);
+
+ return port;
+}
+
+static void nvmet_pci_epf_queue_response(struct nvmet_req *req)
+{
+ struct nvmet_pci_epf_iod *iod =
+ container_of(req, struct nvmet_pci_epf_iod, req);
+
+ iod->status = le16_to_cpu(req->cqe->status) >> 1;
+
+ /* If we have no data to transfer, directly complete the command. */
+ if (!iod->data_len || iod->dma_dir != DMA_TO_DEVICE) {
+ nvmet_pci_epf_complete_iod(iod);
+ return;
+ }
+
+ complete(&iod->done);
+}
+
+static u8 nvmet_pci_epf_get_mdts(const struct nvmet_ctrl *tctrl)
+{
+ struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata;
+ int page_shift = NVME_CAP_MPSMIN(tctrl->cap) + 12;
+
+ return ilog2(ctrl->mdts) - page_shift;
+}
+
+static u16 nvmet_pci_epf_create_cq(struct nvmet_ctrl *tctrl,
+ u16 cqid, u16 flags, u16 qsize, u64 pci_addr, u16 vector)
+{
+ struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata;
+ struct nvmet_pci_epf_queue *cq = &ctrl->cq[cqid];
+ u16 status;
+
+ if (test_and_set_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags))
+ return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
+
+ if (!(flags & NVME_QUEUE_PHYS_CONTIG))
+ return NVME_SC_INVALID_QUEUE | NVME_STATUS_DNR;
+
+ if (flags & NVME_CQ_IRQ_ENABLED)
+ set_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags);
+
+ cq->pci_addr = pci_addr;
+ cq->qid = cqid;
+ cq->depth = qsize + 1;
+ cq->vector = vector;
+ cq->head = 0;
+ cq->tail = 0;
+ cq->phase = 1;
+ cq->db = NVME_REG_DBS + (((cqid * 2) + 1) * sizeof(u32));
+ nvmet_pci_epf_bar_write32(ctrl, cq->db, 0);
+
+ if (!cqid)
+ cq->qes = sizeof(struct nvme_completion);
+ else
+ cq->qes = ctrl->io_cqes;
+ cq->pci_size = cq->qes * cq->depth;
+
+ cq->iv = nvmet_pci_epf_add_irq_vector(ctrl, vector);
+ if (!cq->iv) {
+ status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
+ goto err;
+ }
+
+ status = nvmet_cq_create(tctrl, &cq->nvme_cq, cqid, cq->depth);
+ if (status != NVME_SC_SUCCESS)
+ goto err;
+
+ dev_dbg(ctrl->dev, "CQ[%u]: %u entries of %zu B, IRQ vector %u\n",
+ cqid, qsize, cq->qes, cq->vector);
+
+ return NVME_SC_SUCCESS;
+
+err:
+ clear_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags);
+ clear_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags);
+ return status;
+}
+
+static u16 nvmet_pci_epf_delete_cq(struct nvmet_ctrl *tctrl, u16 cqid)
+{
+ struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata;
+ struct nvmet_pci_epf_queue *cq = &ctrl->cq[cqid];
+
+ if (!test_and_clear_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags))
+ return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
+
+ cancel_delayed_work_sync(&cq->work);
+ nvmet_pci_epf_drain_queue(cq);
+ nvmet_pci_epf_remove_irq_vector(ctrl, cq->vector);
+
+ return NVME_SC_SUCCESS;
+}
+
+static u16 nvmet_pci_epf_create_sq(struct nvmet_ctrl *tctrl,
+ u16 sqid, u16 flags, u16 qsize, u64 pci_addr)
+{
+ struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata;
+ struct nvmet_pci_epf_queue *sq = &ctrl->sq[sqid];
+ u16 status;
+
+ if (test_and_set_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags))
+ return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
+
+ if (!(flags & NVME_QUEUE_PHYS_CONTIG))
+ return NVME_SC_INVALID_QUEUE | NVME_STATUS_DNR;
+
+ sq->pci_addr = pci_addr;
+ sq->qid = sqid;
+ sq->depth = qsize + 1;
+ sq->head = 0;
+ sq->tail = 0;
+ sq->phase = 0;
+ sq->db = NVME_REG_DBS + (sqid * 2 * sizeof(u32));
+ nvmet_pci_epf_bar_write32(ctrl, sq->db, 0);
+ if (!sqid)
+ sq->qes = 1UL << NVME_ADM_SQES;
+ else
+ sq->qes = ctrl->io_sqes;
+ sq->pci_size = sq->qes * sq->depth;
+
+ status = nvmet_sq_create(tctrl, &sq->nvme_sq, sqid, sq->depth);
+ if (status != NVME_SC_SUCCESS)
+ goto out_clear_bit;
+
+ sq->iod_wq = alloc_workqueue("sq%d_wq", WQ_UNBOUND,
+ min_t(int, sq->depth, WQ_MAX_ACTIVE), sqid);
+ if (!sq->iod_wq) {
+ dev_err(ctrl->dev, "Failed to create SQ %d work queue\n", sqid);
+ status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
+ goto out_destroy_sq;
+ }
+
+ dev_dbg(ctrl->dev, "SQ[%u]: %u entries of %zu B\n",
+ sqid, qsize, sq->qes);
+
+ return NVME_SC_SUCCESS;
+
+out_destroy_sq:
+ nvmet_sq_destroy(&sq->nvme_sq);
+out_clear_bit:
+ clear_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags);
+ return status;
+}
+
+static u16 nvmet_pci_epf_delete_sq(struct nvmet_ctrl *tctrl, u16 sqid)
+{
+ struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata;
+ struct nvmet_pci_epf_queue *sq = &ctrl->sq[sqid];
+
+ if (!test_and_clear_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags))
+ return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
+
+ flush_workqueue(sq->iod_wq);
+ destroy_workqueue(sq->iod_wq);
+ sq->iod_wq = NULL;
+
+ nvmet_pci_epf_drain_queue(sq);
+
+ if (sq->nvme_sq.ctrl)
+ nvmet_sq_destroy(&sq->nvme_sq);
+
+ return NVME_SC_SUCCESS;
+}
+
+static u16 nvmet_pci_epf_get_feat(const struct nvmet_ctrl *tctrl,
+ u8 feat, void *data)
+{
+ struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata;
+ struct nvmet_feat_arbitration *arb;
+ struct nvmet_feat_irq_coalesce *irqc;
+ struct nvmet_feat_irq_config *irqcfg;
+ struct nvmet_pci_epf_irq_vector *iv;
+ u16 status;
+
+ switch (feat) {
+ case NVME_FEAT_ARBITRATION:
+ arb = data;
+ if (!ctrl->sq_ab)
+ arb->ab = 0x7;
+ else
+ arb->ab = ilog2(ctrl->sq_ab);
+ return NVME_SC_SUCCESS;
+
+ case NVME_FEAT_IRQ_COALESCE:
+ irqc = data;
+ irqc->thr = ctrl->irq_vector_threshold;
+ irqc->time = 0;
+ return NVME_SC_SUCCESS;
+
+ case NVME_FEAT_IRQ_CONFIG:
+ irqcfg = data;
+ mutex_lock(&ctrl->irq_lock);
+ iv = nvmet_pci_epf_find_irq_vector(ctrl, irqcfg->iv);
+ if (iv) {
+ irqcfg->cd = iv->cd;
+ status = NVME_SC_SUCCESS;
+ } else {
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ }
+ mutex_unlock(&ctrl->irq_lock);
+ return status;
+
+ default:
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ }
+}
+
+static u16 nvmet_pci_epf_set_feat(const struct nvmet_ctrl *tctrl,
+ u8 feat, void *data)
+{
+ struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata;
+ struct nvmet_feat_arbitration *arb;
+ struct nvmet_feat_irq_coalesce *irqc;
+ struct nvmet_feat_irq_config *irqcfg;
+ struct nvmet_pci_epf_irq_vector *iv;
+ u16 status;
+
+ switch (feat) {
+ case NVME_FEAT_ARBITRATION:
+ arb = data;
+ if (arb->ab == 0x7)
+ ctrl->sq_ab = 0;
+ else
+ ctrl->sq_ab = 1 << arb->ab;
+ return NVME_SC_SUCCESS;
+
+ case NVME_FEAT_IRQ_COALESCE:
+ /*
+ * Since we do not implement precise IRQ coalescing timing,
+ * ignore the time field.
+ */
+ irqc = data;
+ ctrl->irq_vector_threshold = irqc->thr + 1;
+ return NVME_SC_SUCCESS;
+
+ case NVME_FEAT_IRQ_CONFIG:
+ irqcfg = data;
+ mutex_lock(&ctrl->irq_lock);
+ iv = nvmet_pci_epf_find_irq_vector(ctrl, irqcfg->iv);
+ if (iv) {
+ iv->cd = irqcfg->cd;
+ status = NVME_SC_SUCCESS;
+ } else {
+ status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ }
+ mutex_unlock(&ctrl->irq_lock);
+ return status;
+
+ default:
+ return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
+ }
+}
+
+static const struct nvmet_fabrics_ops nvmet_pci_epf_fabrics_ops = {
+ .owner = THIS_MODULE,
+ .type = NVMF_TRTYPE_PCI,
+ .add_port = nvmet_pci_epf_add_port,
+ .remove_port = nvmet_pci_epf_remove_port,
+ .queue_response = nvmet_pci_epf_queue_response,
+ .get_mdts = nvmet_pci_epf_get_mdts,
+ .create_cq = nvmet_pci_epf_create_cq,
+ .delete_cq = nvmet_pci_epf_delete_cq,
+ .create_sq = nvmet_pci_epf_create_sq,
+ .delete_sq = nvmet_pci_epf_delete_sq,
+ .get_feature = nvmet_pci_epf_get_feat,
+ .set_feature = nvmet_pci_epf_set_feat,
+};
+
+static void nvmet_pci_epf_cq_work(struct work_struct *work);
+
+static void nvmet_pci_epf_init_queue(struct nvmet_pci_epf_ctrl *ctrl,
+ unsigned int qid, bool sq)
+{
+ struct nvmet_pci_epf_queue *queue;
+
+ if (sq) {
+ queue = &ctrl->sq[qid];
+ set_bit(NVMET_PCI_EPF_Q_IS_SQ, &queue->flags);
+ } else {
+ queue = &ctrl->cq[qid];
+ INIT_DELAYED_WORK(&queue->work, nvmet_pci_epf_cq_work);
+ }
+ queue->ctrl = ctrl;
+ queue->qid = qid;
+ spin_lock_init(&queue->lock);
+ INIT_LIST_HEAD(&queue->list);
+}
+
+static int nvmet_pci_epf_alloc_queues(struct nvmet_pci_epf_ctrl *ctrl)
+{
+ unsigned int qid;
+
+ ctrl->sq = kcalloc(ctrl->nr_queues,
+ sizeof(struct nvmet_pci_epf_queue), GFP_KERNEL);
+ if (!ctrl->sq)
+ return -ENOMEM;
+
+ ctrl->cq = kcalloc(ctrl->nr_queues,
+ sizeof(struct nvmet_pci_epf_queue), GFP_KERNEL);
+ if (!ctrl->cq) {
+ kfree(ctrl->sq);
+ ctrl->sq = NULL;
+ return -ENOMEM;
+ }
+
+ for (qid = 0; qid < ctrl->nr_queues; qid++) {
+ nvmet_pci_epf_init_queue(ctrl, qid, true);
+ nvmet_pci_epf_init_queue(ctrl, qid, false);
+ }
+
+ return 0;
+}
+
+static void nvmet_pci_epf_free_queues(struct nvmet_pci_epf_ctrl *ctrl)
+{
+ kfree(ctrl->sq);
+ ctrl->sq = NULL;
+ kfree(ctrl->cq);
+ ctrl->cq = NULL;
+}
+
+static int nvmet_pci_epf_map_queue(struct nvmet_pci_epf_ctrl *ctrl,
+ struct nvmet_pci_epf_queue *queue)
+{
+ struct nvmet_pci_epf *nvme_epf = ctrl->nvme_epf;
+ int ret;
+
+ ret = nvmet_pci_epf_mem_map(nvme_epf, queue->pci_addr,
+ queue->pci_size, &queue->pci_map);
+ if (ret) {
+ dev_err(ctrl->dev, "Failed to map queue %u (err=%d)\n",
+ queue->qid, ret);
+ return ret;
+ }
+
+ if (queue->pci_map.pci_size < queue->pci_size) {
+ dev_err(ctrl->dev, "Invalid partial mapping of queue %u\n",
+ queue->qid);
+ nvmet_pci_epf_mem_unmap(nvme_epf, &queue->pci_map);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static inline void nvmet_pci_epf_unmap_queue(struct nvmet_pci_epf_ctrl *ctrl,
+ struct nvmet_pci_epf_queue *queue)
+{
+ nvmet_pci_epf_mem_unmap(ctrl->nvme_epf, &queue->pci_map);
+}
+
+static void nvmet_pci_epf_exec_iod_work(struct work_struct *work)
+{
+ struct nvmet_pci_epf_iod *iod =
+ container_of(work, struct nvmet_pci_epf_iod, work);
+ struct nvmet_req *req = &iod->req;
+ int ret;
+
+ if (!iod->ctrl->link_up) {
+ nvmet_pci_epf_free_iod(iod);
+ return;
+ }
+
+ if (!test_bit(NVMET_PCI_EPF_Q_LIVE, &iod->sq->flags)) {
+ iod->status = NVME_SC_QID_INVALID | NVME_STATUS_DNR;
+ goto complete;
+ }
+
+ if (!nvmet_req_init(req, &iod->cq->nvme_cq, &iod->sq->nvme_sq,
+ &nvmet_pci_epf_fabrics_ops))
+ goto complete;
+
+ iod->data_len = nvmet_req_transfer_len(req);
+ if (iod->data_len) {
+ /*
+ * Get the data DMA transfer direction. Here "device" means the
+ * PCI root-complex host.
+ */
+ if (nvme_is_write(&iod->cmd))
+ iod->dma_dir = DMA_FROM_DEVICE;
+ else
+ iod->dma_dir = DMA_TO_DEVICE;
+
+ /*
+ * Setup the command data buffer and get the command data from
+ * the host if needed.
+ */
+ ret = nvmet_pci_epf_alloc_iod_data_buf(iod);
+ if (!ret && iod->dma_dir == DMA_FROM_DEVICE)
+ ret = nvmet_pci_epf_transfer_iod_data(iod);
+ if (ret) {
+ nvmet_req_uninit(req);
+ goto complete;
+ }
+ }
+
+ req->execute(req);
+
+ /*
+ * If we do not have data to transfer after the command execution
+ * finishes, nvmet_pci_epf_queue_response() will complete the command
+ * directly. No need to wait for the completion in this case.
+ */
+ if (!iod->data_len || iod->dma_dir != DMA_TO_DEVICE)
+ return;
+
+ wait_for_completion(&iod->done);
+
+ if (iod->status == NVME_SC_SUCCESS) {
+ WARN_ON_ONCE(!iod->data_len || iod->dma_dir != DMA_TO_DEVICE);
+ nvmet_pci_epf_transfer_iod_data(iod);
+ }
+
+complete:
+ nvmet_pci_epf_complete_iod(iod);
+}
+
+static int nvmet_pci_epf_process_sq(struct nvmet_pci_epf_ctrl *ctrl,
+ struct nvmet_pci_epf_queue *sq)
+{
+ struct nvmet_pci_epf_iod *iod;
+ int ret, n = 0;
+
+ sq->tail = nvmet_pci_epf_bar_read32(ctrl, sq->db);
+ while (sq->head != sq->tail && (!ctrl->sq_ab || n < ctrl->sq_ab)) {
+ iod = nvmet_pci_epf_alloc_iod(sq);
+ if (!iod)
+ break;
+
+ /* Get the NVMe command submitted by the host. */
+ ret = nvmet_pci_epf_transfer(ctrl, &iod->cmd,
+ sq->pci_addr + sq->head * sq->qes,
+ sq->qes, DMA_FROM_DEVICE);
+ if (ret) {
+ /* Not much we can do... */
+ nvmet_pci_epf_free_iod(iod);
+ break;
+ }
+
+ dev_dbg(ctrl->dev, "SQ[%u]: head %u, tail %u, command %s\n",
+ sq->qid, sq->head, sq->tail,
+ nvmet_pci_epf_iod_name(iod));
+
+ sq->head++;
+ if (sq->head == sq->depth)
+ sq->head = 0;
+ n++;
+
+ queue_work_on(WORK_CPU_UNBOUND, sq->iod_wq, &iod->work);
+
+ sq->tail = nvmet_pci_epf_bar_read32(ctrl, sq->db);
+ }
+
+ return n;
+}
+
+static void nvmet_pci_epf_poll_sqs_work(struct work_struct *work)
+{
+ struct nvmet_pci_epf_ctrl *ctrl =
+ container_of(work, struct nvmet_pci_epf_ctrl, poll_sqs.work);
+ struct nvmet_pci_epf_queue *sq;
+ unsigned long last = 0;
+ int i, nr_sqs;
+
+ while (ctrl->link_up && ctrl->enabled) {
+ nr_sqs = 0;
+ /* Do round-robin arbitration. */
+ for (i = 0; i < ctrl->nr_queues; i++) {
+ sq = &ctrl->sq[i];
+ if (!test_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags))
+ continue;
+ if (nvmet_pci_epf_process_sq(ctrl, sq))
+ nr_sqs++;
+ }
+
+ if (nr_sqs) {
+ last = jiffies;
+ continue;
+ }
+
+ /*
+ * If we have not received any command on any queue for more
+ * than NVMET_PCI_EPF_SQ_POLL_IDLE, assume we are idle and
+ * reschedule. This avoids "burning" a CPU when the controller
+ * is idle for a long time.
+ */
+ if (time_is_before_jiffies(last + NVMET_PCI_EPF_SQ_POLL_IDLE))
+ break;
+
+ cpu_relax();
+ }
+
+ schedule_delayed_work(&ctrl->poll_sqs, NVMET_PCI_EPF_SQ_POLL_INTERVAL);
+}
+
+static void nvmet_pci_epf_cq_work(struct work_struct *work)
+{
+ struct nvmet_pci_epf_queue *cq =
+ container_of(work, struct nvmet_pci_epf_queue, work.work);
+ struct nvmet_pci_epf_ctrl *ctrl = cq->ctrl;
+ struct nvme_completion *cqe;
+ struct nvmet_pci_epf_iod *iod;
+ unsigned long flags;
+ int ret, n = 0;
+
+ ret = nvmet_pci_epf_map_queue(ctrl, cq);
+ if (ret)
+ goto again;
+
+ while (test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags) && ctrl->link_up) {
+
+ /* Check that the CQ is not full. */
+ cq->head = nvmet_pci_epf_bar_read32(ctrl, cq->db);
+ if (cq->head == cq->tail + 1) {
+ ret = -EAGAIN;
+ break;
+ }
+
+ spin_lock_irqsave(&cq->lock, flags);
+ iod = list_first_entry_or_null(&cq->list,
+ struct nvmet_pci_epf_iod, link);
+ if (iod)
+ list_del_init(&iod->link);
+ spin_unlock_irqrestore(&cq->lock, flags);
+
+ if (!iod)
+ break;
+
+ /* Post the IOD completion entry. */
+ cqe = &iod->cqe;
+ cqe->status = cpu_to_le16((iod->status << 1) | cq->phase);
+
+ dev_dbg(ctrl->dev,
+ "CQ[%u]: %s status 0x%x, result 0x%llx, head %u, tail %u, phase %u\n",
+ cq->qid, nvmet_pci_epf_iod_name(iod), iod->status,
+ le64_to_cpu(cqe->result.u64), cq->head, cq->tail,
+ cq->phase);
+
+ memcpy_toio(cq->pci_map.virt_addr + cq->tail * cq->qes,
+ cqe, cq->qes);
+
+ cq->tail++;
+ if (cq->tail >= cq->depth) {
+ cq->tail = 0;
+ cq->phase ^= 1;
+ }
+
+ nvmet_pci_epf_free_iod(iod);
+
+ /* Signal the host. */
+ nvmet_pci_epf_raise_irq(ctrl, cq, false);
+ n++;
+ }
+
+ nvmet_pci_epf_unmap_queue(ctrl, cq);
+
+ /*
+ * We do not support precise IRQ coalescing time (100ns units as per
+ * NVMe specifications). So if we have posted completion entries without
+ * reaching the interrupt coalescing threshold, raise an interrupt.
+ */
+ if (n)
+ nvmet_pci_epf_raise_irq(ctrl, cq, true);
+
+again:
+ if (ret < 0)
+ queue_delayed_work(system_highpri_wq, &cq->work,
+ NVMET_PCI_EPF_CQ_RETRY_INTERVAL);
+}
+
+static int nvmet_pci_epf_enable_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
+{
+ u64 pci_addr, asq, acq;
+ u32 aqa;
+ u16 status, qsize;
+
+ if (ctrl->enabled)
+ return 0;
+
+ dev_info(ctrl->dev, "Enabling controller\n");
+
+ ctrl->mps_shift = nvmet_cc_mps(ctrl->cc) + 12;
+ ctrl->mps = 1UL << ctrl->mps_shift;
+ ctrl->mps_mask = ctrl->mps - 1;
+
+ ctrl->io_sqes = 1UL << nvmet_cc_iosqes(ctrl->cc);
+ if (ctrl->io_sqes < sizeof(struct nvme_command)) {
+ dev_err(ctrl->dev, "Unsupported I/O SQES %zu (need %zu)\n",
+ ctrl->io_sqes, sizeof(struct nvme_command));
+ return -EINVAL;
+ }
+
+ ctrl->io_cqes = 1UL << nvmet_cc_iocqes(ctrl->cc);
+ if (ctrl->io_cqes < sizeof(struct nvme_completion)) {
+ dev_err(ctrl->dev, "Unsupported I/O CQES %zu (need %zu)\n",
+ ctrl->io_sqes, sizeof(struct nvme_completion));
+ return -EINVAL;
+ }
+
+ /* Create the admin queue. */
+ aqa = nvmet_pci_epf_bar_read32(ctrl, NVME_REG_AQA);
+ asq = nvmet_pci_epf_bar_read64(ctrl, NVME_REG_ASQ);
+ acq = nvmet_pci_epf_bar_read64(ctrl, NVME_REG_ACQ);
+
+ qsize = (aqa & 0x0fff0000) >> 16;
+ pci_addr = acq & GENMASK_ULL(63, 12);
+ status = nvmet_pci_epf_create_cq(ctrl->tctrl, 0,
+ NVME_CQ_IRQ_ENABLED | NVME_QUEUE_PHYS_CONTIG,
+ qsize, pci_addr, 0);
+ if (status != NVME_SC_SUCCESS) {
+ dev_err(ctrl->dev, "Failed to create admin completion queue\n");
+ return -EINVAL;
+ }
+
+ qsize = aqa & 0x00000fff;
+ pci_addr = asq & GENMASK_ULL(63, 12);
+ status = nvmet_pci_epf_create_sq(ctrl->tctrl, 0, NVME_QUEUE_PHYS_CONTIG,
+ qsize, pci_addr);
+ if (status != NVME_SC_SUCCESS) {
+ dev_err(ctrl->dev, "Failed to create admin submission queue\n");
+ nvmet_pci_epf_delete_cq(ctrl->tctrl, 0);
+ return -EINVAL;
+ }
+
+ ctrl->sq_ab = NVMET_PCI_EPF_SQ_AB;
+ ctrl->irq_vector_threshold = NVMET_PCI_EPF_IV_THRESHOLD;
+ ctrl->enabled = true;
+
+ /* Start polling the controller SQs. */
+ schedule_delayed_work(&ctrl->poll_sqs, 0);
+
+ return 0;
+}
+
+static void nvmet_pci_epf_disable_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
+{
+ int qid;
+
+ if (!ctrl->enabled)
+ return;
+
+ dev_info(ctrl->dev, "Disabling controller\n");
+
+ ctrl->enabled = false;
+ cancel_delayed_work_sync(&ctrl->poll_sqs);
+
+ /* Delete all I/O queues first. */
+ for (qid = 1; qid < ctrl->nr_queues; qid++)
+ nvmet_pci_epf_delete_sq(ctrl->tctrl, qid);
+
+ for (qid = 1; qid < ctrl->nr_queues; qid++)
+ nvmet_pci_epf_delete_cq(ctrl->tctrl, qid);
+
+ /* Delete the admin queue last. */
+ nvmet_pci_epf_delete_sq(ctrl->tctrl, 0);
+ nvmet_pci_epf_delete_cq(ctrl->tctrl, 0);
+}
+
+static void nvmet_pci_epf_poll_cc_work(struct work_struct *work)
+{
+ struct nvmet_pci_epf_ctrl *ctrl =
+ container_of(work, struct nvmet_pci_epf_ctrl, poll_cc.work);
+ u32 old_cc, new_cc;
+ int ret;
+
+ if (!ctrl->tctrl)
+ return;
+
+ old_cc = ctrl->cc;
+ new_cc = nvmet_pci_epf_bar_read32(ctrl, NVME_REG_CC);
+ ctrl->cc = new_cc;
+
+ if (nvmet_cc_en(new_cc) && !nvmet_cc_en(old_cc)) {
+ ret = nvmet_pci_epf_enable_ctrl(ctrl);
+ if (ret)
+ return;
+ ctrl->csts |= NVME_CSTS_RDY;
+ }
+
+ if (!nvmet_cc_en(new_cc) && nvmet_cc_en(old_cc)) {
+ nvmet_pci_epf_disable_ctrl(ctrl);
+ ctrl->csts &= ~NVME_CSTS_RDY;
+ }
+
+ if (nvmet_cc_shn(new_cc) && !nvmet_cc_shn(old_cc)) {
+ nvmet_pci_epf_disable_ctrl(ctrl);
+ ctrl->csts |= NVME_CSTS_SHST_CMPLT;
+ }
+
+ if (!nvmet_cc_shn(new_cc) && nvmet_cc_shn(old_cc))
+ ctrl->csts &= ~NVME_CSTS_SHST_CMPLT;
+
+ nvmet_update_cc(ctrl->tctrl, ctrl->cc);
+ nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CSTS, ctrl->csts);
+
+ schedule_delayed_work(&ctrl->poll_cc, NVMET_PCI_EPF_CC_POLL_INTERVAL);
+}
+
+static void nvmet_pci_epf_init_bar(struct nvmet_pci_epf_ctrl *ctrl)
+{
+ struct nvmet_ctrl *tctrl = ctrl->tctrl;
+
+ ctrl->bar = ctrl->nvme_epf->reg_bar;
+
+ /* Copy the target controller capabilities as a base. */
+ ctrl->cap = tctrl->cap;
+
+ /* Contiguous Queues Required (CQR). */
+ ctrl->cap |= 0x1ULL << 16;
+
+ /* Set Doorbell stride to 4B (DSTRB). */
+ ctrl->cap &= ~GENMASK_ULL(35, 32);
+
+ /* Clear NVM Subsystem Reset Supported (NSSRS). */
+ ctrl->cap &= ~(0x1ULL << 36);
+
+ /* Clear Boot Partition Support (BPS). */
+ ctrl->cap &= ~(0x1ULL << 45);
+
+ /* Clear Persistent Memory Region Supported (PMRS). */
+ ctrl->cap &= ~(0x1ULL << 56);
+
+ /* Clear Controller Memory Buffer Supported (CMBS). */
+ ctrl->cap &= ~(0x1ULL << 57);
+
+ /* Controller configuration. */
+ ctrl->cc = tctrl->cc & (~NVME_CC_ENABLE);
+
+ /* Controller status. */
+ ctrl->csts = ctrl->tctrl->csts;
+
+ nvmet_pci_epf_bar_write64(ctrl, NVME_REG_CAP, ctrl->cap);
+ nvmet_pci_epf_bar_write32(ctrl, NVME_REG_VS, tctrl->subsys->ver);
+ nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CSTS, ctrl->csts);
+ nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CC, ctrl->cc);
+}
+
+static int nvmet_pci_epf_create_ctrl(struct nvmet_pci_epf *nvme_epf,
+ unsigned int max_nr_queues)
+{
+ struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl;
+ struct nvmet_alloc_ctrl_args args = {};
+ char hostnqn[NVMF_NQN_SIZE];
+ uuid_t id;
+ int ret;
+
+ memset(ctrl, 0, sizeof(*ctrl));
+ ctrl->dev = &nvme_epf->epf->dev;
+ mutex_init(&ctrl->irq_lock);
+ ctrl->nvme_epf = nvme_epf;
+ ctrl->mdts = nvme_epf->mdts_kb * SZ_1K;
+ INIT_DELAYED_WORK(&ctrl->poll_cc, nvmet_pci_epf_poll_cc_work);
+ INIT_DELAYED_WORK(&ctrl->poll_sqs, nvmet_pci_epf_poll_sqs_work);
+
+ ret = mempool_init_kmalloc_pool(&ctrl->iod_pool,
+ max_nr_queues * NVMET_MAX_QUEUE_SIZE,
+ sizeof(struct nvmet_pci_epf_iod));
+ if (ret) {
+ dev_err(ctrl->dev, "Failed to initialize IOD mempool\n");
+ return ret;
+ }
+
+ ctrl->port = nvmet_pci_epf_find_port(ctrl, nvme_epf->portid);
+ if (!ctrl->port) {
+ dev_err(ctrl->dev, "Port not found\n");
+ ret = -EINVAL;
+ goto out_mempool_exit;
+ }
+
+ /* Create the target controller. */
+ uuid_gen(&id);
+ snprintf(hostnqn, NVMF_NQN_SIZE,
+ "nqn.2014-08.org.nvmexpress:uuid:%pUb", &id);
+ args.port = ctrl->port;
+ args.subsysnqn = nvme_epf->subsysnqn;
+ memset(&id, 0, sizeof(uuid_t));
+ args.hostid = &id;
+ args.hostnqn = hostnqn;
+ args.ops = &nvmet_pci_epf_fabrics_ops;
+
+ ctrl->tctrl = nvmet_alloc_ctrl(&args);
+ if (!ctrl->tctrl) {
+ dev_err(ctrl->dev, "Failed to create target controller\n");
+ ret = -ENOMEM;
+ goto out_mempool_exit;
+ }
+ ctrl->tctrl->drvdata = ctrl;
+
+ /* We do not support protection information for now. */
+ if (ctrl->tctrl->pi_support) {
+ dev_err(ctrl->dev,
+ "Protection information (PI) is not supported\n");
+ ret = -ENOTSUPP;
+ goto out_put_ctrl;
+ }
+
+ /* Allocate our queues, up to the maximum number. */
+ ctrl->nr_queues = min(ctrl->tctrl->subsys->max_qid + 1, max_nr_queues);
+ ret = nvmet_pci_epf_alloc_queues(ctrl);
+ if (ret)
+ goto out_put_ctrl;
+
+ /*
+ * Allocate the IRQ vectors descriptors. We cannot have more than the
+ * maximum number of queues.
+ */
+ ret = nvmet_pci_epf_alloc_irq_vectors(ctrl);
+ if (ret)
+ goto out_free_queues;
+
+ dev_info(ctrl->dev,
+ "New PCI ctrl \"%s\", %u I/O queues, mdts %u B\n",
+ ctrl->tctrl->subsys->subsysnqn, ctrl->nr_queues - 1,
+ ctrl->mdts);
+
+ /* Initialize BAR 0 using the target controller CAP. */
+ nvmet_pci_epf_init_bar(ctrl);
+
+ return 0;
+
+out_free_queues:
+ nvmet_pci_epf_free_queues(ctrl);
+out_put_ctrl:
+ nvmet_ctrl_put(ctrl->tctrl);
+ ctrl->tctrl = NULL;
+out_mempool_exit:
+ mempool_exit(&ctrl->iod_pool);
+ return ret;
+}
+
+static void nvmet_pci_epf_start_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
+{
+ schedule_delayed_work(&ctrl->poll_cc, NVMET_PCI_EPF_CC_POLL_INTERVAL);
+}
+
+static void nvmet_pci_epf_stop_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
+{
+ cancel_delayed_work_sync(&ctrl->poll_cc);
+
+ nvmet_pci_epf_disable_ctrl(ctrl);
+}
+
+static void nvmet_pci_epf_destroy_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
+{
+ if (!ctrl->tctrl)
+ return;
+
+ dev_info(ctrl->dev, "Destroying PCI ctrl \"%s\"\n",
+ ctrl->tctrl->subsys->subsysnqn);
+
+ nvmet_pci_epf_stop_ctrl(ctrl);
+
+ nvmet_pci_epf_free_queues(ctrl);
+ nvmet_pci_epf_free_irq_vectors(ctrl);
+
+ nvmet_ctrl_put(ctrl->tctrl);
+ ctrl->tctrl = NULL;
+
+ mempool_exit(&ctrl->iod_pool);
+}
+
+static int nvmet_pci_epf_configure_bar(struct nvmet_pci_epf *nvme_epf)
+{
+ struct pci_epf *epf = nvme_epf->epf;
+ const struct pci_epc_features *epc_features = nvme_epf->epc_features;
+ size_t reg_size, reg_bar_size;
+ size_t msix_table_size = 0;
+
+ /*
+ * The first free BAR will be our register BAR and per NVMe
+ * specifications, it must be BAR 0.
+ */
+ if (pci_epc_get_first_free_bar(epc_features) != BAR_0) {
+ dev_err(&epf->dev, "BAR 0 is not free\n");
+ return -ENODEV;
+ }
+
+ if (epc_features->bar[BAR_0].only_64bit)
+ epf->bar[BAR_0].flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
+
+ /*
+ * Calculate the size of the register bar: NVMe registers first with
+ * enough space for the doorbells, followed by the MSI-X table
+ * if supported.
+ */
+ reg_size = NVME_REG_DBS + (NVMET_NR_QUEUES * 2 * sizeof(u32));
+ reg_size = ALIGN(reg_size, 8);
+
+ if (epc_features->msix_capable) {
+ size_t pba_size;
+
+ msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts;
+ nvme_epf->msix_table_offset = reg_size;
+ pba_size = ALIGN(DIV_ROUND_UP(epf->msix_interrupts, 8), 8);
+
+ reg_size += msix_table_size + pba_size;
+ }
+
+ if (epc_features->bar[BAR_0].type == BAR_FIXED) {
+ if (reg_size > epc_features->bar[BAR_0].fixed_size) {
+ dev_err(&epf->dev,
+ "BAR 0 size %llu B too small, need %zu B\n",
+ epc_features->bar[BAR_0].fixed_size,
+ reg_size);
+ return -ENOMEM;
+ }
+ reg_bar_size = epc_features->bar[BAR_0].fixed_size;
+ } else {
+ reg_bar_size = ALIGN(reg_size, max(epc_features->align, 4096));
+ }
+
+ nvme_epf->reg_bar = pci_epf_alloc_space(epf, reg_bar_size, BAR_0,
+ epc_features, PRIMARY_INTERFACE);
+ if (!nvme_epf->reg_bar) {
+ dev_err(&epf->dev, "Failed to allocate BAR 0\n");
+ return -ENOMEM;
+ }
+ memset(nvme_epf->reg_bar, 0, reg_bar_size);
+
+ return 0;
+}
+
+static void nvmet_pci_epf_free_bar(struct nvmet_pci_epf *nvme_epf)
+{
+ struct pci_epf *epf = nvme_epf->epf;
+
+ if (!nvme_epf->reg_bar)
+ return;
+
+ pci_epf_free_space(epf, nvme_epf->reg_bar, BAR_0, PRIMARY_INTERFACE);
+ nvme_epf->reg_bar = NULL;
+}
+
+static void nvmet_pci_epf_clear_bar(struct nvmet_pci_epf *nvme_epf)
+{
+ struct pci_epf *epf = nvme_epf->epf;
+
+ pci_epc_clear_bar(epf->epc, epf->func_no, epf->vfunc_no,
+ &epf->bar[BAR_0]);
+}
+
+static int nvmet_pci_epf_init_irq(struct nvmet_pci_epf *nvme_epf)
+{
+ const struct pci_epc_features *epc_features = nvme_epf->epc_features;
+ struct pci_epf *epf = nvme_epf->epf;
+ int ret;
+
+ /* Enable MSI-X if supported, otherwise, use MSI. */
+ if (epc_features->msix_capable && epf->msix_interrupts) {
+ ret = pci_epc_set_msix(epf->epc, epf->func_no, epf->vfunc_no,
+ epf->msix_interrupts, BAR_0,
+ nvme_epf->msix_table_offset);
+ if (ret) {
+ dev_err(&epf->dev, "Failed to configure MSI-X\n");
+ return ret;
+ }
+
+ nvme_epf->nr_vectors = epf->msix_interrupts;
+ nvme_epf->irq_type = PCI_IRQ_MSIX;
+
+ return 0;
+ }
+
+ if (epc_features->msi_capable && epf->msi_interrupts) {
+ ret = pci_epc_set_msi(epf->epc, epf->func_no, epf->vfunc_no,
+ epf->msi_interrupts);
+ if (ret) {
+ dev_err(&epf->dev, "Failed to configure MSI\n");
+ return ret;
+ }
+
+ nvme_epf->nr_vectors = epf->msi_interrupts;
+ nvme_epf->irq_type = PCI_IRQ_MSI;
+
+ return 0;
+ }
+
+ /* MSI and MSI-X are not supported: fall back to INTx. */
+ nvme_epf->nr_vectors = 1;
+ nvme_epf->irq_type = PCI_IRQ_INTX;
+
+ return 0;
+}
+
+static int nvmet_pci_epf_epc_init(struct pci_epf *epf)
+{
+ struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
+ const struct pci_epc_features *epc_features = nvme_epf->epc_features;
+ struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl;
+ unsigned int max_nr_queues = NVMET_NR_QUEUES;
+ int ret;
+
+ /* For now, do not support virtual functions. */
+ if (epf->vfunc_no > 0) {
+ dev_err(&epf->dev, "Virtual functions are not supported\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Cap the maximum number of queues we can support on the controller
+ * with the number of IRQs we can use.
+ */
+ if (epc_features->msix_capable && epf->msix_interrupts) {
+ dev_info(&epf->dev,
+ "PCI endpoint controller supports MSI-X, %u vectors\n",
+ epf->msix_interrupts);
+ max_nr_queues = min(max_nr_queues, epf->msix_interrupts);
+ } else if (epc_features->msi_capable && epf->msi_interrupts) {
+ dev_info(&epf->dev,
+ "PCI endpoint controller supports MSI, %u vectors\n",
+ epf->msi_interrupts);
+ max_nr_queues = min(max_nr_queues, epf->msi_interrupts);
+ }
+
+ if (max_nr_queues < 2) {
+ dev_err(&epf->dev, "Invalid maximum number of queues %u\n",
+ max_nr_queues);
+ return -EINVAL;
+ }
+
+ /* Create the target controller. */
+ ret = nvmet_pci_epf_create_ctrl(nvme_epf, max_nr_queues);
+ if (ret) {
+ dev_err(&epf->dev,
+ "Failed to create NVMe PCI target controller (err=%d)\n",
+ ret);
+ return ret;
+ }
+
+ /* Set device ID, class, etc. */
+ epf->header->vendorid = ctrl->tctrl->subsys->vendor_id;
+ epf->header->subsys_vendor_id = ctrl->tctrl->subsys->subsys_vendor_id;
+ ret = pci_epc_write_header(epf->epc, epf->func_no, epf->vfunc_no,
+ epf->header);
+ if (ret) {
+ dev_err(&epf->dev,
+ "Failed to write configuration header (err=%d)\n", ret);
+ goto out_destroy_ctrl;
+ }
+
+ ret = pci_epc_set_bar(epf->epc, epf->func_no, epf->vfunc_no,
+ &epf->bar[BAR_0]);
+ if (ret) {
+ dev_err(&epf->dev, "Failed to set BAR 0 (err=%d)\n", ret);
+ goto out_destroy_ctrl;
+ }
+
+ /*
+ * Enable interrupts and start polling the controller BAR if we do not
+ * have a link up notifier.
+ */
+ ret = nvmet_pci_epf_init_irq(nvme_epf);
+ if (ret)
+ goto out_clear_bar;
+
+ if (!epc_features->linkup_notifier) {
+ ctrl->link_up = true;
+ nvmet_pci_epf_start_ctrl(&nvme_epf->ctrl);
+ }
+
+ return 0;
+
+out_clear_bar:
+ nvmet_pci_epf_clear_bar(nvme_epf);
+out_destroy_ctrl:
+ nvmet_pci_epf_destroy_ctrl(&nvme_epf->ctrl);
+ return ret;
+}
+
+static void nvmet_pci_epf_epc_deinit(struct pci_epf *epf)
+{
+ struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
+ struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl;
+
+ ctrl->link_up = false;
+ nvmet_pci_epf_destroy_ctrl(ctrl);
+
+ nvmet_pci_epf_deinit_dma(nvme_epf);
+ nvmet_pci_epf_clear_bar(nvme_epf);
+}
+
+static int nvmet_pci_epf_link_up(struct pci_epf *epf)
+{
+ struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
+ struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl;
+
+ ctrl->link_up = true;
+ nvmet_pci_epf_start_ctrl(ctrl);
+
+ return 0;
+}
+
+static int nvmet_pci_epf_link_down(struct pci_epf *epf)
+{
+ struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
+ struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl;
+
+ ctrl->link_up = false;
+ nvmet_pci_epf_stop_ctrl(ctrl);
+
+ return 0;
+}
+
+static const struct pci_epc_event_ops nvmet_pci_epf_event_ops = {
+ .epc_init = nvmet_pci_epf_epc_init,
+ .epc_deinit = nvmet_pci_epf_epc_deinit,
+ .link_up = nvmet_pci_epf_link_up,
+ .link_down = nvmet_pci_epf_link_down,
+};
+
+static int nvmet_pci_epf_bind(struct pci_epf *epf)
+{
+ struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
+ const struct pci_epc_features *epc_features;
+ struct pci_epc *epc = epf->epc;
+ int ret;
+
+ if (WARN_ON_ONCE(!epc))
+ return -EINVAL;
+
+ epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
+ if (!epc_features) {
+ dev_err(&epf->dev, "epc_features not implemented\n");
+ return -EOPNOTSUPP;
+ }
+ nvme_epf->epc_features = epc_features;
+
+ ret = nvmet_pci_epf_configure_bar(nvme_epf);
+ if (ret)
+ return ret;
+
+ nvmet_pci_epf_init_dma(nvme_epf);
+
+ return 0;
+}
+
+static void nvmet_pci_epf_unbind(struct pci_epf *epf)
+{
+ struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
+ struct pci_epc *epc = epf->epc;
+
+ nvmet_pci_epf_destroy_ctrl(&nvme_epf->ctrl);
+
+ if (epc->init_complete) {
+ nvmet_pci_epf_deinit_dma(nvme_epf);
+ nvmet_pci_epf_clear_bar(nvme_epf);
+ }
+
+ nvmet_pci_epf_free_bar(nvme_epf);
+}
+
+static struct pci_epf_header nvme_epf_pci_header = {
+ .vendorid = PCI_ANY_ID,
+ .deviceid = PCI_ANY_ID,
+ .progif_code = 0x02, /* NVM Express */
+ .baseclass_code = PCI_BASE_CLASS_STORAGE,
+ .subclass_code = 0x08, /* Non-Volatile Memory controller */
+ .interrupt_pin = PCI_INTERRUPT_INTA,
+};
+
+static int nvmet_pci_epf_probe(struct pci_epf *epf,
+ const struct pci_epf_device_id *id)
+{
+ struct nvmet_pci_epf *nvme_epf;
+ int ret;
+
+ nvme_epf = devm_kzalloc(&epf->dev, sizeof(*nvme_epf), GFP_KERNEL);
+ if (!nvme_epf)
+ return -ENOMEM;
+
+ ret = devm_mutex_init(&epf->dev, &nvme_epf->mmio_lock);
+ if (ret)
+ return ret;
+
+ nvme_epf->epf = epf;
+ nvme_epf->mdts_kb = NVMET_PCI_EPF_MDTS_KB;
+
+ epf->event_ops = &nvmet_pci_epf_event_ops;
+ epf->header = &nvme_epf_pci_header;
+ epf_set_drvdata(epf, nvme_epf);
+
+ return 0;
+}
+
+#define to_nvme_epf(epf_group) \
+ container_of(epf_group, struct nvmet_pci_epf, group)
+
+static ssize_t nvmet_pci_epf_portid_show(struct config_item *item, char *page)
+{
+ struct config_group *group = to_config_group(item);
+ struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group);
+
+ return sysfs_emit(page, "%u\n", le16_to_cpu(nvme_epf->portid));
+}
+
+static ssize_t nvmet_pci_epf_portid_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct config_group *group = to_config_group(item);
+ struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group);
+ u16 portid;
+
+ /* Do not allow setting this when the function is already started. */
+ if (nvme_epf->ctrl.tctrl)
+ return -EBUSY;
+
+ if (!len)
+ return -EINVAL;
+
+ if (kstrtou16(page, 0, &portid))
+ return -EINVAL;
+
+ nvme_epf->portid = cpu_to_le16(portid);
+
+ return len;
+}
+
+CONFIGFS_ATTR(nvmet_pci_epf_, portid);
+
+static ssize_t nvmet_pci_epf_subsysnqn_show(struct config_item *item,
+ char *page)
+{
+ struct config_group *group = to_config_group(item);
+ struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group);
+
+ return sysfs_emit(page, "%s\n", nvme_epf->subsysnqn);
+}
+
+static ssize_t nvmet_pci_epf_subsysnqn_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct config_group *group = to_config_group(item);
+ struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group);
+
+ /* Do not allow setting this when the function is already started. */
+ if (nvme_epf->ctrl.tctrl)
+ return -EBUSY;
+
+ if (!len)
+ return -EINVAL;
+
+ strscpy(nvme_epf->subsysnqn, page, len);
+
+ return len;
+}
+
+CONFIGFS_ATTR(nvmet_pci_epf_, subsysnqn);
+
+static ssize_t nvmet_pci_epf_mdts_kb_show(struct config_item *item, char *page)
+{
+ struct config_group *group = to_config_group(item);
+ struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group);
+
+ return sysfs_emit(page, "%u\n", nvme_epf->mdts_kb);
+}
+
+static ssize_t nvmet_pci_epf_mdts_kb_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct config_group *group = to_config_group(item);
+ struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group);
+ unsigned long mdts_kb;
+ int ret;
+
+ if (nvme_epf->ctrl.tctrl)
+ return -EBUSY;
+
+ ret = kstrtoul(page, 0, &mdts_kb);
+ if (ret)
+ return ret;
+ if (!mdts_kb)
+ mdts_kb = NVMET_PCI_EPF_MDTS_KB;
+ else if (mdts_kb > NVMET_PCI_EPF_MAX_MDTS_KB)
+ mdts_kb = NVMET_PCI_EPF_MAX_MDTS_KB;
+
+ if (!is_power_of_2(mdts_kb))
+ return -EINVAL;
+
+ nvme_epf->mdts_kb = mdts_kb;
+
+ return len;
+}
+
+CONFIGFS_ATTR(nvmet_pci_epf_, mdts_kb);
+
+static struct configfs_attribute *nvmet_pci_epf_attrs[] = {
+ &nvmet_pci_epf_attr_portid,
+ &nvmet_pci_epf_attr_subsysnqn,
+ &nvmet_pci_epf_attr_mdts_kb,
+ NULL,
+};
+
+static const struct config_item_type nvmet_pci_epf_group_type = {
+ .ct_attrs = nvmet_pci_epf_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *nvmet_pci_epf_add_cfs(struct pci_epf *epf,
+ struct config_group *group)
+{
+ struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
+
+ config_group_init_type_name(&nvme_epf->group, "nvme",
+ &nvmet_pci_epf_group_type);
+
+ return &nvme_epf->group;
+}
+
+static const struct pci_epf_device_id nvmet_pci_epf_ids[] = {
+ { .name = "nvmet_pci_epf" },
+ {},
+};
+
+static struct pci_epf_ops nvmet_pci_epf_ops = {
+ .bind = nvmet_pci_epf_bind,
+ .unbind = nvmet_pci_epf_unbind,
+ .add_cfs = nvmet_pci_epf_add_cfs,
+};
+
+static struct pci_epf_driver nvmet_pci_epf_driver = {
+ .driver.name = "nvmet_pci_epf",
+ .probe = nvmet_pci_epf_probe,
+ .id_table = nvmet_pci_epf_ids,
+ .ops = &nvmet_pci_epf_ops,
+ .owner = THIS_MODULE,
+};
+
+static int __init nvmet_pci_epf_init_module(void)
+{
+ int ret;
+
+ ret = pci_epf_register_driver(&nvmet_pci_epf_driver);
+ if (ret)
+ return ret;
+
+ ret = nvmet_register_transport(&nvmet_pci_epf_fabrics_ops);
+ if (ret) {
+ pci_epf_unregister_driver(&nvmet_pci_epf_driver);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit nvmet_pci_epf_cleanup_module(void)
+{
+ nvmet_unregister_transport(&nvmet_pci_epf_fabrics_ops);
+ pci_epf_unregister_driver(&nvmet_pci_epf_driver);
+}
+
+module_init(nvmet_pci_epf_init_module);
+module_exit(nvmet_pci_epf_cleanup_module);
+
+MODULE_DESCRIPTION("NVMe PCI Endpoint Function target driver");
+MODULE_AUTHOR("Damien Le Moal <dlemoal@kernel.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/nvme/target/pr.c b/drivers/nvme/target/pr.c
index 90e9f5bbe581..cd22d8333314 100644
--- a/drivers/nvme/target/pr.c
+++ b/drivers/nvme/target/pr.c
@@ -60,7 +60,7 @@ u16 nvmet_set_feat_resv_notif_mask(struct nvmet_req *req, u32 mask)
goto success;
}
- xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
+ nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
if (ns->pr.enable)
WRITE_ONCE(ns->pr.notify_mask, mask);
}
@@ -1056,7 +1056,7 @@ int nvmet_ctrl_init_pr(struct nvmet_ctrl *ctrl)
* nvmet_pr_init_ns(), see more details in nvmet_ns_enable().
* So just check ns->pr.enable.
*/
- xa_for_each(&subsys->namespaces, idx, ns) {
+ nvmet_for_each_enabled_ns(&subsys->namespaces, idx, ns) {
if (ns->pr.enable) {
ret = nvmet_pr_alloc_and_insert_pc_ref(ns, ctrl->cntlid,
&ctrl->hostid);
@@ -1067,7 +1067,7 @@ int nvmet_ctrl_init_pr(struct nvmet_ctrl *ctrl)
return 0;
free_per_ctrl_refs:
- xa_for_each(&subsys->namespaces, idx, ns) {
+ nvmet_for_each_enabled_ns(&subsys->namespaces, idx, ns) {
if (ns->pr.enable) {
pc_ref = xa_erase(&ns->pr_per_ctrl_refs, ctrl->cntlid);
if (pc_ref)
@@ -1087,7 +1087,7 @@ void nvmet_ctrl_destroy_pr(struct nvmet_ctrl *ctrl)
kfifo_free(&ctrl->pr_log_mgr.log_queue);
mutex_destroy(&ctrl->pr_log_mgr.lock);
- xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
+ nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
if (ns->pr.enable) {
pc_ref = xa_erase(&ns->pr_per_ctrl_refs, ctrl->cntlid);
if (pc_ref)
diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c
index 3aef35b05111..29a60fabfcc8 100644
--- a/drivers/nvme/target/zns.c
+++ b/drivers/nvme/target/zns.c
@@ -586,8 +586,7 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
for_each_sg(req->sg, sg, req->sg_cnt, sg_cnt) {
unsigned int len = sg->length;
- if (bio_add_pc_page(bdev_get_queue(bio->bi_bdev), bio,
- sg_page(sg), len, sg->offset) != len) {
+ if (bio_add_page(bio, sg_page(sg), len, sg->offset) != len) {
status = NVME_SC_INTERNAL;
goto out_put_bio;
}
diff --git a/drivers/of/address.c b/drivers/of/address.c
index c1f1c810e810..8770004d9b08 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -340,6 +340,15 @@ static int of_bus_default_flags_match(struct device_node *np)
return of_property_present(np, "#address-cells") && (of_bus_n_addr_cells(np) == 3);
}
+static int of_bus_default_match(struct device_node *np)
+{
+ /*
+ * Check for presence first since of_bus_n_addr_cells() will warn when
+ * walking parent nodes.
+ */
+ return of_property_present(np, "#address-cells");
+}
+
/*
* Array of bus specific translators
*/
@@ -384,7 +393,7 @@ static const struct of_bus of_busses[] = {
{
.name = "default",
.addresses = "reg",
- .match = NULL,
+ .match = of_bus_default_match,
.count_cells = of_bus_default_count_cells,
.map = of_bus_default_map,
.translate = of_bus_default_translate,
@@ -399,7 +408,6 @@ static const struct of_bus *of_match_bus(struct device_node *np)
for (i = 0; i < ARRAY_SIZE(of_busses); i++)
if (!of_busses[i].match || of_busses[i].match(np))
return &of_busses[i];
- BUG();
return NULL;
}
@@ -521,6 +529,8 @@ static u64 __of_translate_address(struct device_node *node,
if (parent == NULL)
return OF_BAD_ADDR;
bus = of_match_bus(parent);
+ if (!bus)
+ return OF_BAD_ADDR;
/* Count address cells & copy address locally */
bus->count_cells(dev, &na, &ns);
@@ -564,6 +574,8 @@ static u64 __of_translate_address(struct device_node *node,
/* Get new parent bus and counts */
pbus = of_match_bus(parent);
+ if (!pbus)
+ return OF_BAD_ADDR;
pbus->count_cells(dev, &pna, &pns);
if (!OF_CHECK_COUNTS(pna, pns)) {
pr_err("Bad cell count for %pOF\n", dev);
@@ -703,7 +715,7 @@ const __be32 *__of_get_address(struct device_node *dev, int index, int bar_no,
/* match the parent's bus type */
bus = of_match_bus(parent);
- if (strcmp(bus->name, "pci") && (bar_no >= 0))
+ if (!bus || (strcmp(bus->name, "pci") && (bar_no >= 0)))
return NULL;
/* Get "reg" or "assigned-addresses" property */
diff --git a/drivers/of/unittest-data/tests-platform.dtsi b/drivers/of/unittest-data/tests-platform.dtsi
index fa39611071b3..cd310b26b50c 100644
--- a/drivers/of/unittest-data/tests-platform.dtsi
+++ b/drivers/of/unittest-data/tests-platform.dtsi
@@ -34,5 +34,18 @@
};
};
};
+
+ platform-tests-2 {
+ // No #address-cells or #size-cells
+ node {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ test-device@100 {
+ compatible = "test-sub-device";
+ reg = <0x100 1>;
+ };
+ };
+ };
};
};
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 438fd70fa995..0fa0c0fd9a6a 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -1380,6 +1380,7 @@ static void __init of_unittest_bus_3cell_ranges(void)
static void __init of_unittest_reg(void)
{
struct device_node *np;
+ struct resource res;
int ret;
u64 addr, size;
@@ -1396,6 +1397,19 @@ static void __init of_unittest_reg(void)
np, addr);
of_node_put(np);
+
+ np = of_find_node_by_path("/testcase-data/platform-tests-2/node/test-device@100");
+ if (!np) {
+ pr_err("missing testcase data\n");
+ return;
+ }
+
+ ret = of_address_to_resource(np, 0, &res);
+ unittest(ret == -EINVAL, "of_address_to_resource(%pOF) expected error on untranslatable address\n",
+ np);
+
+ of_node_put(np);
+
}
struct of_unittest_expected_res {
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 35270172c833..f57ea36d125d 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -1670,6 +1670,19 @@ static void pci_dma_cleanup(struct device *dev)
iommu_device_unuse_default_domain(dev);
}
+/*
+ * pci_device_irq_get_affinity - get IRQ affinity mask for device
+ * @dev: ptr to dev structure
+ * @irq_vec: interrupt vector number
+ *
+ * Return the CPU affinity mask for @dev and @irq_vec.
+ */
+static const struct cpumask *pci_device_irq_get_affinity(struct device *dev,
+ unsigned int irq_vec)
+{
+ return pci_irq_get_affinity(to_pci_dev(dev), irq_vec);
+}
+
const struct bus_type pci_bus_type = {
.name = "pci",
.match = pci_bus_match,
@@ -1677,6 +1690,7 @@ const struct bus_type pci_bus_type = {
.probe = pci_device_probe,
.remove = pci_device_remove,
.shutdown = pci_device_shutdown,
+ .irq_get_affinity = pci_device_irq_get_affinity,
.dev_groups = pci_dev_groups,
.bus_groups = pci_bus_groups,
.drv_groups = pci_drv_groups,
diff --git a/drivers/pci/pcie/bwctrl.c b/drivers/pci/pcie/bwctrl.c
index b59cacc740fa..0a5e7efbce2c 100644
--- a/drivers/pci/pcie/bwctrl.c
+++ b/drivers/pci/pcie/bwctrl.c
@@ -303,14 +303,17 @@ static int pcie_bwnotif_probe(struct pcie_device *srv)
if (ret)
return ret;
- ret = devm_request_irq(&srv->device, srv->irq, pcie_bwnotif_irq,
- IRQF_SHARED, "PCIe bwctrl", srv);
- if (ret)
- return ret;
-
scoped_guard(rwsem_write, &pcie_bwctrl_setspeed_rwsem) {
scoped_guard(rwsem_write, &pcie_bwctrl_lbms_rwsem) {
- port->link_bwctrl = no_free_ptr(data);
+ port->link_bwctrl = data;
+
+ ret = request_irq(srv->irq, pcie_bwnotif_irq,
+ IRQF_SHARED, "PCIe bwctrl", srv);
+ if (ret) {
+ port->link_bwctrl = NULL;
+ return ret;
+ }
+
pcie_bwnotif_enable(srv);
}
}
@@ -331,11 +334,15 @@ static void pcie_bwnotif_remove(struct pcie_device *srv)
pcie_cooling_device_unregister(data->cdev);
- pcie_bwnotif_disable(srv->port);
+ scoped_guard(rwsem_write, &pcie_bwctrl_setspeed_rwsem) {
+ scoped_guard(rwsem_write, &pcie_bwctrl_lbms_rwsem) {
+ pcie_bwnotif_disable(srv->port);
+
+ free_irq(srv->irq, srv);
- scoped_guard(rwsem_write, &pcie_bwctrl_setspeed_rwsem)
- scoped_guard(rwsem_write, &pcie_bwctrl_lbms_rwsem)
srv->port->link_bwctrl = NULL;
+ }
+ }
}
static int pcie_bwnotif_suspend(struct pcie_device *srv)
diff --git a/drivers/perf/apple_m1_cpu_pmu.c b/drivers/perf/apple_m1_cpu_pmu.c
index 1d4d01e1275e..06fd317529fc 100644
--- a/drivers/perf/apple_m1_cpu_pmu.c
+++ b/drivers/perf/apple_m1_cpu_pmu.c
@@ -168,6 +168,8 @@ static const unsigned m1_pmu_perf_map[PERF_COUNT_HW_MAX] = {
PERF_MAP_ALL_UNSUPPORTED,
[PERF_COUNT_HW_CPU_CYCLES] = M1_PMU_PERFCTR_CORE_ACTIVE_CYCLE,
[PERF_COUNT_HW_INSTRUCTIONS] = M1_PMU_PERFCTR_INST_ALL,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = M1_PMU_PERFCTR_INST_BRANCH,
+ [PERF_COUNT_HW_BRANCH_MISSES] = M1_PMU_PERFCTR_BRANCH_MISPRED_NONSPEC,
};
/* sysfs definitions */
diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
index b20fa600e510..ef959e66db7c 100644
--- a/drivers/perf/arm-cmn.c
+++ b/drivers/perf/arm-cmn.c
@@ -1713,8 +1713,8 @@ static int arm_cmn_validate_group(struct arm_cmn *cmn, struct perf_event *event)
goto done;
}
- for (i = 0; i < CMN_MAX_DTCS; i++)
- if (val->dtc_count[i] == CMN_DT_NUM_COUNTERS)
+ for_each_hw_dtc_idx(hw, dtc, idx)
+ if (val->dtc_count[dtc] == CMN_DT_NUM_COUNTERS)
goto done;
for_each_hw_dn(hw, dn, i) {
diff --git a/drivers/perf/arm_cspmu/nvidia_cspmu.c b/drivers/perf/arm_cspmu/nvidia_cspmu.c
index d0ef611240aa..8116c7846a46 100644
--- a/drivers/perf/arm_cspmu/nvidia_cspmu.c
+++ b/drivers/perf/arm_cspmu/nvidia_cspmu.c
@@ -54,65 +54,24 @@ static struct attribute *scf_pmu_event_attrs[] = {
ARM_CSPMU_EVENT_ATTR(scf_cache_wb, 0xF3),
NV_CSPMU_EVENT_ATTR_4(socket, rd_data, 0x101),
- NV_CSPMU_EVENT_ATTR_4(socket, dl_rsp, 0x105),
NV_CSPMU_EVENT_ATTR_4(socket, wb_data, 0x109),
- NV_CSPMU_EVENT_ATTR_4(socket, ev_rsp, 0x10d),
- NV_CSPMU_EVENT_ATTR_4(socket, prb_data, 0x111),
NV_CSPMU_EVENT_ATTR_4(socket, rd_outstanding, 0x115),
- NV_CSPMU_EVENT_ATTR_4(socket, dl_outstanding, 0x119),
- NV_CSPMU_EVENT_ATTR_4(socket, wb_outstanding, 0x11d),
- NV_CSPMU_EVENT_ATTR_4(socket, wr_outstanding, 0x121),
- NV_CSPMU_EVENT_ATTR_4(socket, ev_outstanding, 0x125),
- NV_CSPMU_EVENT_ATTR_4(socket, prb_outstanding, 0x129),
NV_CSPMU_EVENT_ATTR_4(socket, rd_access, 0x12d),
- NV_CSPMU_EVENT_ATTR_4(socket, dl_access, 0x131),
NV_CSPMU_EVENT_ATTR_4(socket, wb_access, 0x135),
NV_CSPMU_EVENT_ATTR_4(socket, wr_access, 0x139),
- NV_CSPMU_EVENT_ATTR_4(socket, ev_access, 0x13d),
- NV_CSPMU_EVENT_ATTR_4(socket, prb_access, 0x141),
-
- NV_CSPMU_EVENT_ATTR_4(ocu, gmem_rd_data, 0x145),
- NV_CSPMU_EVENT_ATTR_4(ocu, gmem_rd_access, 0x149),
- NV_CSPMU_EVENT_ATTR_4(ocu, gmem_wb_access, 0x14d),
- NV_CSPMU_EVENT_ATTR_4(ocu, gmem_rd_outstanding, 0x151),
- NV_CSPMU_EVENT_ATTR_4(ocu, gmem_wr_outstanding, 0x155),
-
- NV_CSPMU_EVENT_ATTR_4(ocu, rem_rd_data, 0x159),
- NV_CSPMU_EVENT_ATTR_4(ocu, rem_rd_access, 0x15d),
- NV_CSPMU_EVENT_ATTR_4(ocu, rem_wb_access, 0x161),
- NV_CSPMU_EVENT_ATTR_4(ocu, rem_rd_outstanding, 0x165),
- NV_CSPMU_EVENT_ATTR_4(ocu, rem_wr_outstanding, 0x169),
ARM_CSPMU_EVENT_ATTR(gmem_rd_data, 0x16d),
ARM_CSPMU_EVENT_ATTR(gmem_rd_access, 0x16e),
ARM_CSPMU_EVENT_ATTR(gmem_rd_outstanding, 0x16f),
- ARM_CSPMU_EVENT_ATTR(gmem_dl_rsp, 0x170),
- ARM_CSPMU_EVENT_ATTR(gmem_dl_access, 0x171),
- ARM_CSPMU_EVENT_ATTR(gmem_dl_outstanding, 0x172),
ARM_CSPMU_EVENT_ATTR(gmem_wb_data, 0x173),
ARM_CSPMU_EVENT_ATTR(gmem_wb_access, 0x174),
- ARM_CSPMU_EVENT_ATTR(gmem_wb_outstanding, 0x175),
- ARM_CSPMU_EVENT_ATTR(gmem_ev_rsp, 0x176),
- ARM_CSPMU_EVENT_ATTR(gmem_ev_access, 0x177),
- ARM_CSPMU_EVENT_ATTR(gmem_ev_outstanding, 0x178),
ARM_CSPMU_EVENT_ATTR(gmem_wr_data, 0x179),
- ARM_CSPMU_EVENT_ATTR(gmem_wr_outstanding, 0x17a),
ARM_CSPMU_EVENT_ATTR(gmem_wr_access, 0x17b),
NV_CSPMU_EVENT_ATTR_4(socket, wr_data, 0x17c),
- NV_CSPMU_EVENT_ATTR_4(ocu, gmem_wr_data, 0x180),
- NV_CSPMU_EVENT_ATTR_4(ocu, gmem_wb_data, 0x184),
- NV_CSPMU_EVENT_ATTR_4(ocu, gmem_wr_access, 0x188),
- NV_CSPMU_EVENT_ATTR_4(ocu, gmem_wb_outstanding, 0x18c),
-
- NV_CSPMU_EVENT_ATTR_4(ocu, rem_wr_data, 0x190),
- NV_CSPMU_EVENT_ATTR_4(ocu, rem_wb_data, 0x194),
- NV_CSPMU_EVENT_ATTR_4(ocu, rem_wr_access, 0x198),
- NV_CSPMU_EVENT_ATTR_4(ocu, rem_wb_outstanding, 0x19c),
-
ARM_CSPMU_EVENT_ATTR(gmem_wr_total_bytes, 0x1a0),
ARM_CSPMU_EVENT_ATTR(remote_socket_wr_total_bytes, 0x1a1),
ARM_CSPMU_EVENT_ATTR(remote_socket_rd_data, 0x1a2),
@@ -122,35 +81,12 @@ static struct attribute *scf_pmu_event_attrs[] = {
ARM_CSPMU_EVENT_ATTR(cmem_rd_data, 0x1a5),
ARM_CSPMU_EVENT_ATTR(cmem_rd_access, 0x1a6),
ARM_CSPMU_EVENT_ATTR(cmem_rd_outstanding, 0x1a7),
- ARM_CSPMU_EVENT_ATTR(cmem_dl_rsp, 0x1a8),
- ARM_CSPMU_EVENT_ATTR(cmem_dl_access, 0x1a9),
- ARM_CSPMU_EVENT_ATTR(cmem_dl_outstanding, 0x1aa),
ARM_CSPMU_EVENT_ATTR(cmem_wb_data, 0x1ab),
ARM_CSPMU_EVENT_ATTR(cmem_wb_access, 0x1ac),
- ARM_CSPMU_EVENT_ATTR(cmem_wb_outstanding, 0x1ad),
- ARM_CSPMU_EVENT_ATTR(cmem_ev_rsp, 0x1ae),
- ARM_CSPMU_EVENT_ATTR(cmem_ev_access, 0x1af),
- ARM_CSPMU_EVENT_ATTR(cmem_ev_outstanding, 0x1b0),
ARM_CSPMU_EVENT_ATTR(cmem_wr_data, 0x1b1),
- ARM_CSPMU_EVENT_ATTR(cmem_wr_outstanding, 0x1b2),
-
- NV_CSPMU_EVENT_ATTR_4(ocu, cmem_rd_data, 0x1b3),
- NV_CSPMU_EVENT_ATTR_4(ocu, cmem_rd_access, 0x1b7),
- NV_CSPMU_EVENT_ATTR_4(ocu, cmem_wb_access, 0x1bb),
- NV_CSPMU_EVENT_ATTR_4(ocu, cmem_rd_outstanding, 0x1bf),
- NV_CSPMU_EVENT_ATTR_4(ocu, cmem_wr_outstanding, 0x1c3),
-
- ARM_CSPMU_EVENT_ATTR(ocu_prb_access, 0x1c7),
- ARM_CSPMU_EVENT_ATTR(ocu_prb_data, 0x1c8),
- ARM_CSPMU_EVENT_ATTR(ocu_prb_outstanding, 0x1c9),
ARM_CSPMU_EVENT_ATTR(cmem_wr_access, 0x1ca),
- NV_CSPMU_EVENT_ATTR_4(ocu, cmem_wr_access, 0x1cb),
- NV_CSPMU_EVENT_ATTR_4(ocu, cmem_wb_data, 0x1cf),
- NV_CSPMU_EVENT_ATTR_4(ocu, cmem_wr_data, 0x1d3),
- NV_CSPMU_EVENT_ATTR_4(ocu, cmem_wb_outstanding, 0x1d7),
-
ARM_CSPMU_EVENT_ATTR(cmem_wr_total_bytes, 0x1db),
ARM_CSPMU_EVENT_ATTR(cycles, ARM_CSPMU_EVT_CYCLES_DEFAULT),
@@ -194,6 +130,7 @@ static struct attribute *pcie_pmu_format_attrs[] = {
static struct attribute *nvlink_c2c_pmu_format_attrs[] = {
ARM_CSPMU_FORMAT_EVENT_ATTR,
+ ARM_CSPMU_FORMAT_ATTR(port, "config1:0-1"),
NULL,
};
@@ -238,10 +175,12 @@ static u32 nv_cspmu_event_filter(const struct perf_event *event)
const struct nv_cspmu_ctx *ctx =
to_nv_cspmu_ctx(to_arm_cspmu(event->pmu));
- if (ctx->filter_mask == 0)
+ const u32 filter_val = event->attr.config1 & ctx->filter_mask;
+
+ if (filter_val == 0)
return ctx->filter_default_val;
- return event->attr.config1 & ctx->filter_mask;
+ return filter_val;
}
enum nv_cspmu_name_fmt {
@@ -274,7 +213,7 @@ static const struct nv_cspmu_match nv_cspmu_match[] = {
{
.prodid = 0x104,
.prodid_mask = NV_PRODID_MASK,
- .filter_mask = 0x0,
+ .filter_mask = NV_NVL_C2C_FILTER_ID_MASK,
.filter_default_val = NV_NVL_C2C_FILTER_ID_MASK,
.name_pattern = "nvidia_nvlink_c2c1_pmu_%u",
.name_fmt = NAME_FMT_SOCKET,
@@ -284,7 +223,7 @@ static const struct nv_cspmu_match nv_cspmu_match[] = {
{
.prodid = 0x105,
.prodid_mask = NV_PRODID_MASK,
- .filter_mask = 0x0,
+ .filter_mask = NV_NVL_C2C_FILTER_ID_MASK,
.filter_default_val = NV_NVL_C2C_FILTER_ID_MASK,
.name_pattern = "nvidia_nvlink_c2c0_pmu_%u",
.name_fmt = NAME_FMT_SOCKET,
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index fd5b78732603..f5e6878db9d6 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -85,6 +85,7 @@ struct arm_spe_pmu {
#define SPE_PMU_FEAT_LDS (1UL << 4)
#define SPE_PMU_FEAT_ERND (1UL << 5)
#define SPE_PMU_FEAT_INV_FILT_EVT (1UL << 6)
+#define SPE_PMU_FEAT_DISCARD (1UL << 7)
#define SPE_PMU_FEAT_DEV_PROBED (1UL << 63)
u64 features;
@@ -193,6 +194,9 @@ static const struct attribute_group arm_spe_pmu_cap_group = {
#define ATTR_CFG_FLD_store_filter_CFG config /* PMSFCR_EL1.ST */
#define ATTR_CFG_FLD_store_filter_LO 34
#define ATTR_CFG_FLD_store_filter_HI 34
+#define ATTR_CFG_FLD_discard_CFG config /* PMBLIMITR_EL1.FM = DISCARD */
+#define ATTR_CFG_FLD_discard_LO 35
+#define ATTR_CFG_FLD_discard_HI 35
#define ATTR_CFG_FLD_event_filter_CFG config1 /* PMSEVFR_EL1 */
#define ATTR_CFG_FLD_event_filter_LO 0
@@ -216,6 +220,7 @@ GEN_PMU_FORMAT_ATTR(store_filter);
GEN_PMU_FORMAT_ATTR(event_filter);
GEN_PMU_FORMAT_ATTR(inv_event_filter);
GEN_PMU_FORMAT_ATTR(min_latency);
+GEN_PMU_FORMAT_ATTR(discard);
static struct attribute *arm_spe_pmu_formats_attr[] = {
&format_attr_ts_enable.attr,
@@ -228,6 +233,7 @@ static struct attribute *arm_spe_pmu_formats_attr[] = {
&format_attr_event_filter.attr,
&format_attr_inv_event_filter.attr,
&format_attr_min_latency.attr,
+ &format_attr_discard.attr,
NULL,
};
@@ -238,6 +244,9 @@ static umode_t arm_spe_pmu_format_attr_is_visible(struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct arm_spe_pmu *spe_pmu = dev_get_drvdata(dev);
+ if (attr == &format_attr_discard.attr && !(spe_pmu->features & SPE_PMU_FEAT_DISCARD))
+ return 0;
+
if (attr == &format_attr_inv_event_filter.attr && !(spe_pmu->features & SPE_PMU_FEAT_INV_FILT_EVT))
return 0;
@@ -502,6 +511,12 @@ static void arm_spe_perf_aux_output_begin(struct perf_output_handle *handle,
u64 base, limit;
struct arm_spe_pmu_buf *buf;
+ if (ATTR_CFG_GET_FLD(&event->attr, discard)) {
+ limit = FIELD_PREP(PMBLIMITR_EL1_FM, PMBLIMITR_EL1_FM_DISCARD);
+ limit |= PMBLIMITR_EL1_E;
+ goto out_write_limit;
+ }
+
/* Start a new aux session */
buf = perf_aux_output_begin(handle, event);
if (!buf) {
@@ -743,6 +758,10 @@ static int arm_spe_pmu_event_init(struct perf_event *event)
!(spe_pmu->features & SPE_PMU_FEAT_FILT_LAT))
return -EOPNOTSUPP;
+ if (ATTR_CFG_GET_FLD(&event->attr, discard) &&
+ !(spe_pmu->features & SPE_PMU_FEAT_DISCARD))
+ return -EOPNOTSUPP;
+
set_spe_event_has_cx(event);
reg = arm_spe_event_to_pmscr(event);
if (reg & (PMSCR_EL1_PA | PMSCR_EL1_PCT))
@@ -1027,6 +1046,9 @@ static void __arm_spe_pmu_dev_probe(void *info)
if (FIELD_GET(PMSIDR_EL1_ERND, reg))
spe_pmu->features |= SPE_PMU_FEAT_ERND;
+ if (spe_pmu->pmsver >= ID_AA64DFR0_EL1_PMSVer_V1P2)
+ spe_pmu->features |= SPE_PMU_FEAT_DISCARD;
+
/* This field has a spaced out encoding, so just use a look-up */
fld = FIELD_GET(PMSIDR_EL1_INTERVAL, reg);
switch (fld) {
diff --git a/drivers/perf/dwc_pcie_pmu.c b/drivers/perf/dwc_pcie_pmu.c
index 9cbea9675e21..cccecae9823f 100644
--- a/drivers/perf/dwc_pcie_pmu.c
+++ b/drivers/perf/dwc_pcie_pmu.c
@@ -20,7 +20,6 @@
#include <linux/sysfs.h>
#include <linux/types.h>
-#define DWC_PCIE_VSEC_RAS_DES_ID 0x02
#define DWC_PCIE_EVENT_CNT_CTL 0x8
/*
@@ -100,14 +99,23 @@ struct dwc_pcie_dev_info {
struct list_head dev_node;
};
-struct dwc_pcie_vendor_id {
- int vendor_id;
+struct dwc_pcie_pmu_vsec_id {
+ u16 vendor_id;
+ u16 vsec_id;
+ u8 vsec_rev;
};
-static const struct dwc_pcie_vendor_id dwc_pcie_vendor_ids[] = {
- {.vendor_id = PCI_VENDOR_ID_ALIBABA },
- {.vendor_id = PCI_VENDOR_ID_AMPERE },
- {.vendor_id = PCI_VENDOR_ID_QCOM },
+/*
+ * VSEC IDs are allocated by the vendor, so a given ID may mean different
+ * things to different vendors. See PCIe r6.0, sec 7.9.5.2.
+ */
+static const struct dwc_pcie_pmu_vsec_id dwc_pcie_pmu_vsec_ids[] = {
+ { .vendor_id = PCI_VENDOR_ID_ALIBABA,
+ .vsec_id = 0x02, .vsec_rev = 0x4 },
+ { .vendor_id = PCI_VENDOR_ID_AMPERE,
+ .vsec_id = 0x02, .vsec_rev = 0x4 },
+ { .vendor_id = PCI_VENDOR_ID_QCOM,
+ .vsec_id = 0x02, .vsec_rev = 0x4 },
{} /* terminator */
};
@@ -199,8 +207,8 @@ static struct attribute *dwc_pcie_pmu_time_event_attrs[] = {
DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1_1, 0x05),
DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1_2, 0x06),
DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(CFG_RCVRY, 0x07),
- DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(TX_RX_L0S, 0x08),
- DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1_AUX, 0x09),
+ DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1_AUX, 0x08),
+ DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(TX_RX_L0S, 0x09),
/* Group #1 */
DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(tx_pcie_tlp_data_payload, 0x20),
@@ -519,31 +527,28 @@ static void dwc_pcie_unregister_pmu(void *data)
perf_pmu_unregister(&pcie_pmu->pmu);
}
-static bool dwc_pcie_match_des_cap(struct pci_dev *pdev)
+static u16 dwc_pcie_des_cap(struct pci_dev *pdev)
{
- const struct dwc_pcie_vendor_id *vid;
- u16 vsec = 0;
+ const struct dwc_pcie_pmu_vsec_id *vid;
+ u16 vsec;
u32 val;
if (!pci_is_pcie(pdev) || !(pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT))
- return false;
+ return 0;
- for (vid = dwc_pcie_vendor_ids; vid->vendor_id; vid++) {
+ for (vid = dwc_pcie_pmu_vsec_ids; vid->vendor_id; vid++) {
vsec = pci_find_vsec_capability(pdev, vid->vendor_id,
- DWC_PCIE_VSEC_RAS_DES_ID);
- if (vsec)
- break;
+ vid->vsec_id);
+ if (vsec) {
+ pci_read_config_dword(pdev, vsec + PCI_VNDR_HEADER,
+ &val);
+ if (PCI_VNDR_HEADER_REV(val) == vid->vsec_rev) {
+ pci_dbg(pdev, "Detected PCIe Vendor-Specific Extended Capability RAS DES\n");
+ return vsec;
+ }
+ }
}
- if (!vsec)
- return false;
-
- pci_read_config_dword(pdev, vsec + PCI_VNDR_HEADER, &val);
- if (PCI_VNDR_HEADER_REV(val) != 0x04)
- return false;
-
- pci_dbg(pdev,
- "Detected PCIe Vendor-Specific Extended Capability RAS DES\n");
- return true;
+ return 0;
}
static void dwc_pcie_unregister_dev(struct dwc_pcie_dev_info *dev_info)
@@ -587,7 +592,7 @@ static int dwc_pcie_pmu_notifier(struct notifier_block *nb,
switch (action) {
case BUS_NOTIFY_ADD_DEVICE:
- if (!dwc_pcie_match_des_cap(pdev))
+ if (!dwc_pcie_des_cap(pdev))
return NOTIFY_DONE;
if (dwc_pcie_register_dev(pdev))
return NOTIFY_BAD;
@@ -612,13 +617,14 @@ static int dwc_pcie_pmu_probe(struct platform_device *plat_dev)
struct pci_dev *pdev = plat_dev->dev.platform_data;
struct dwc_pcie_pmu *pcie_pmu;
char *name;
- u32 sbdf, val;
+ u32 sbdf;
u16 vsec;
int ret;
- vsec = pci_find_vsec_capability(pdev, pdev->vendor,
- DWC_PCIE_VSEC_RAS_DES_ID);
- pci_read_config_dword(pdev, vsec + PCI_VNDR_HEADER, &val);
+ vsec = dwc_pcie_des_cap(pdev);
+ if (!vsec)
+ return -ENODEV;
+
sbdf = plat_dev->id;
name = devm_kasprintf(&plat_dev->dev, GFP_KERNEL, "dwc_rootport_%x", sbdf);
if (!name)
@@ -730,7 +736,7 @@ static int __init dwc_pcie_pmu_init(void)
int ret;
for_each_pci_dev(pdev) {
- if (!dwc_pcie_match_des_cap(pdev))
+ if (!dwc_pcie_des_cap(pdev))
continue;
ret = dwc_pcie_register_dev(pdev);
diff --git a/drivers/perf/fsl_imx9_ddr_perf.c b/drivers/perf/fsl_imx9_ddr_perf.c
index 3c856d9a4e97..843f163e6c33 100644
--- a/drivers/perf/fsl_imx9_ddr_perf.c
+++ b/drivers/perf/fsl_imx9_ddr_perf.c
@@ -63,8 +63,21 @@
static DEFINE_IDA(ddr_ida);
+/*
+ * V1 support 1 read transaction, 1 write transaction and 1 read beats
+ * event which corresponding respecitively to counter 2, 3 and 4.
+ */
+#define DDR_PERF_AXI_FILTER_V1 0x1
+
+/*
+ * V2 support 1 read beats and 3 write beats events which corresponding
+ * respecitively to counter 2-5.
+ */
+#define DDR_PERF_AXI_FILTER_V2 0x2
+
struct imx_ddr_devtype_data {
const char *identifier; /* system PMU identifier for userspace */
+ unsigned int filter_ver; /* AXI filter version */
};
struct ddr_pmu {
@@ -83,24 +96,27 @@ struct ddr_pmu {
static const struct imx_ddr_devtype_data imx91_devtype_data = {
.identifier = "imx91",
+ .filter_ver = DDR_PERF_AXI_FILTER_V1
};
static const struct imx_ddr_devtype_data imx93_devtype_data = {
.identifier = "imx93",
+ .filter_ver = DDR_PERF_AXI_FILTER_V1
};
static const struct imx_ddr_devtype_data imx95_devtype_data = {
.identifier = "imx95",
+ .filter_ver = DDR_PERF_AXI_FILTER_V2
};
-static inline bool is_imx93(struct ddr_pmu *pmu)
+static inline bool axi_filter_v1(struct ddr_pmu *pmu)
{
- return pmu->devtype_data == &imx93_devtype_data;
+ return pmu->devtype_data->filter_ver == DDR_PERF_AXI_FILTER_V1;
}
-static inline bool is_imx95(struct ddr_pmu *pmu)
+static inline bool axi_filter_v2(struct ddr_pmu *pmu)
{
- return pmu->devtype_data == &imx95_devtype_data;
+ return pmu->devtype_data->filter_ver == DDR_PERF_AXI_FILTER_V2;
}
static const struct of_device_id imx_ddr_pmu_dt_ids[] = {
@@ -155,7 +171,7 @@ static const struct attribute_group ddr_perf_cpumask_attr_group = {
struct imx9_pmu_events_attr {
struct device_attribute attr;
u64 id;
- const void *devtype_data;
+ const struct imx_ddr_devtype_data *devtype_data;
};
static ssize_t ddr_pmu_event_show(struct device *dev,
@@ -307,7 +323,8 @@ ddr_perf_events_attrs_is_visible(struct kobject *kobj,
if (!eattr->devtype_data)
return attr->mode;
- if (eattr->devtype_data != ddr_pmu->devtype_data)
+ if (eattr->devtype_data != ddr_pmu->devtype_data &&
+ eattr->devtype_data->filter_ver != ddr_pmu->devtype_data->filter_ver)
return 0;
return attr->mode;
@@ -624,11 +641,11 @@ static int ddr_perf_event_add(struct perf_event *event, int flags)
hwc->idx = counter;
hwc->state |= PERF_HES_STOPPED;
- if (is_imx93(pmu))
+ if (axi_filter_v1(pmu))
/* read trans, write trans, read beat */
imx93_ddr_perf_monitor_config(pmu, event_id, counter, cfg1, cfg2);
- if (is_imx95(pmu))
+ if (axi_filter_v2(pmu))
/* write beat, read beat2, read beat1, read beat */
imx95_ddr_perf_monitor_config(pmu, event_id, counter, cfg1, cfg2);
diff --git a/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c b/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c
index 3f3fb1de11f5..b879b81adfdd 100644
--- a/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c
@@ -180,20 +180,18 @@ MODULE_DEVICE_TABLE(acpi, hisi_cpa_pmu_acpi_match);
static int hisi_cpa_pmu_init_data(struct platform_device *pdev,
struct hisi_pmu *cpa_pmu)
{
- if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
- &cpa_pmu->sicl_id)) {
+ hisi_uncore_pmu_init_topology(cpa_pmu, &pdev->dev);
+
+ if (cpa_pmu->topo.sicl_id < 0) {
dev_err(&pdev->dev, "Can not read sicl-id\n");
return -EINVAL;
}
- if (device_property_read_u32(&pdev->dev, "hisilicon,idx-id",
- &cpa_pmu->index_id)) {
+ if (cpa_pmu->topo.index_id < 0) {
dev_err(&pdev->dev, "Cannot read idx-id\n");
return -EINVAL;
}
- cpa_pmu->ccl_id = -1;
- cpa_pmu->sccl_id = -1;
cpa_pmu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(cpa_pmu->base))
return PTR_ERR(cpa_pmu->base);
@@ -227,34 +225,11 @@ static const struct attribute_group hisi_cpa_pmu_events_group = {
.attrs = hisi_cpa_pmu_events_attr,
};
-static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
-
-static struct attribute *hisi_cpa_pmu_cpumask_attrs[] = {
- &dev_attr_cpumask.attr,
- NULL
-};
-
-static const struct attribute_group hisi_cpa_pmu_cpumask_attr_group = {
- .attrs = hisi_cpa_pmu_cpumask_attrs,
-};
-
-static struct device_attribute hisi_cpa_pmu_identifier_attr =
- __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
-
-static struct attribute *hisi_cpa_pmu_identifier_attrs[] = {
- &hisi_cpa_pmu_identifier_attr.attr,
- NULL
-};
-
-static const struct attribute_group hisi_cpa_pmu_identifier_group = {
- .attrs = hisi_cpa_pmu_identifier_attrs,
-};
-
static const struct attribute_group *hisi_cpa_pmu_attr_groups[] = {
&hisi_cpa_pmu_format_group,
&hisi_cpa_pmu_events_group,
- &hisi_cpa_pmu_cpumask_attr_group,
- &hisi_cpa_pmu_identifier_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
NULL
};
@@ -311,8 +286,8 @@ static int hisi_cpa_pmu_probe(struct platform_device *pdev)
if (ret)
return ret;
- name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sicl%d_cpa%u",
- cpa_pmu->sicl_id, cpa_pmu->index_id);
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sicl%d_cpa%d",
+ cpa_pmu->topo.sicl_id, cpa_pmu->topo.index_id);
if (!name)
return -ENOMEM;
@@ -389,6 +364,7 @@ static void __exit hisi_cpa_pmu_module_exit(void)
}
module_exit(hisi_cpa_pmu_module_exit);
+MODULE_IMPORT_NS("HISI_PMU");
MODULE_DESCRIPTION("HiSilicon SoC CPA PMU driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Qi Liu <liuqi115@huawei.com>");
diff --git a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
index a6ebf2ec99d3..7e490f8868f2 100644
--- a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
@@ -111,14 +111,14 @@ static void hisi_ddrc_pmu_v2_write_counter(struct hisi_pmu *ddrc_pmu,
* so there is no need to write event type, while it is programmable counter in
* PMU v2.
*/
-static void hisi_ddrc_pmu_write_evtype(struct hisi_pmu *hha_pmu, int idx,
+static void hisi_ddrc_pmu_write_evtype(struct hisi_pmu *ddrc_pmu, int idx,
u32 type)
{
u32 offset;
- if (hha_pmu->identifier >= HISI_PMU_V2) {
+ if (ddrc_pmu->identifier >= HISI_PMU_V2) {
offset = DDRC_V2_EVENT_TYPE + 4 * idx;
- writel(type, hha_pmu->base + offset);
+ writel(type, ddrc_pmu->base + offset);
}
}
@@ -297,23 +297,22 @@ MODULE_DEVICE_TABLE(acpi, hisi_ddrc_pmu_acpi_match);
static int hisi_ddrc_pmu_init_data(struct platform_device *pdev,
struct hisi_pmu *ddrc_pmu)
{
+ hisi_uncore_pmu_init_topology(ddrc_pmu, &pdev->dev);
+
/*
* Use the SCCL_ID and DDRC channel ID to identify the
* DDRC PMU, while SCCL_ID is in MPIDR[aff2].
*/
if (device_property_read_u32(&pdev->dev, "hisilicon,ch-id",
- &ddrc_pmu->index_id)) {
+ &ddrc_pmu->topo.index_id)) {
dev_err(&pdev->dev, "Can not read ddrc channel-id!\n");
return -EINVAL;
}
- if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
- &ddrc_pmu->sccl_id)) {
+ if (ddrc_pmu->topo.sccl_id < 0) {
dev_err(&pdev->dev, "Can not read ddrc sccl-id!\n");
return -EINVAL;
}
- /* DDRC PMUs only share the same SCCL */
- ddrc_pmu->ccl_id = -1;
ddrc_pmu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ddrc_pmu->base)) {
@@ -323,8 +322,7 @@ static int hisi_ddrc_pmu_init_data(struct platform_device *pdev,
ddrc_pmu->identifier = readl(ddrc_pmu->base + DDRC_VERSION);
if (ddrc_pmu->identifier >= HISI_PMU_V2) {
- if (device_property_read_u32(&pdev->dev, "hisilicon,sub-id",
- &ddrc_pmu->sub_id)) {
+ if (ddrc_pmu->topo.sub_id < 0) {
dev_err(&pdev->dev, "Can not read sub-id!\n");
return -EINVAL;
}
@@ -382,42 +380,19 @@ static const struct attribute_group hisi_ddrc_pmu_v2_events_group = {
.attrs = hisi_ddrc_pmu_v2_events_attr,
};
-static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
-
-static struct attribute *hisi_ddrc_pmu_cpumask_attrs[] = {
- &dev_attr_cpumask.attr,
- NULL,
-};
-
-static const struct attribute_group hisi_ddrc_pmu_cpumask_attr_group = {
- .attrs = hisi_ddrc_pmu_cpumask_attrs,
-};
-
-static struct device_attribute hisi_ddrc_pmu_identifier_attr =
- __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
-
-static struct attribute *hisi_ddrc_pmu_identifier_attrs[] = {
- &hisi_ddrc_pmu_identifier_attr.attr,
- NULL
-};
-
-static const struct attribute_group hisi_ddrc_pmu_identifier_group = {
- .attrs = hisi_ddrc_pmu_identifier_attrs,
-};
-
static const struct attribute_group *hisi_ddrc_pmu_v1_attr_groups[] = {
&hisi_ddrc_pmu_v1_format_group,
&hisi_ddrc_pmu_v1_events_group,
- &hisi_ddrc_pmu_cpumask_attr_group,
- &hisi_ddrc_pmu_identifier_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
NULL,
};
static const struct attribute_group *hisi_ddrc_pmu_v2_attr_groups[] = {
&hisi_ddrc_pmu_v2_format_group,
&hisi_ddrc_pmu_v2_events_group,
- &hisi_ddrc_pmu_cpumask_attr_group,
- &hisi_ddrc_pmu_identifier_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
NULL
};
@@ -501,13 +476,13 @@ static int hisi_ddrc_pmu_probe(struct platform_device *pdev)
if (ddrc_pmu->identifier >= HISI_PMU_V2)
name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
- "hisi_sccl%u_ddrc%u_%u",
- ddrc_pmu->sccl_id, ddrc_pmu->index_id,
- ddrc_pmu->sub_id);
+ "hisi_sccl%d_ddrc%d_%d",
+ ddrc_pmu->topo.sccl_id, ddrc_pmu->topo.index_id,
+ ddrc_pmu->topo.sub_id);
else
name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
- "hisi_sccl%u_ddrc%u", ddrc_pmu->sccl_id,
- ddrc_pmu->index_id);
+ "hisi_sccl%d_ddrc%d", ddrc_pmu->topo.sccl_id,
+ ddrc_pmu->topo.index_id);
if (!name)
return -ENOMEM;
@@ -575,10 +550,10 @@ static void __exit hisi_ddrc_pmu_module_exit(void)
{
platform_driver_unregister(&hisi_ddrc_pmu_driver);
cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE);
-
}
module_exit(hisi_ddrc_pmu_module_exit);
+MODULE_IMPORT_NS("HISI_PMU");
MODULE_DESCRIPTION("HiSilicon SoC DDRC uncore PMU driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>");
diff --git a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
index 32624872596f..ca609db86046 100644
--- a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
@@ -295,12 +295,13 @@ static int hisi_hha_pmu_init_data(struct platform_device *pdev,
unsigned long long id;
acpi_status status;
+ hisi_uncore_pmu_init_topology(hha_pmu, &pdev->dev);
+
/*
* Use SCCL_ID and UID to identify the HHA PMU, while
* SCCL_ID is in MPIDR[aff2].
*/
- if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
- &hha_pmu->sccl_id)) {
+ if (hha_pmu->topo.sccl_id < 0) {
dev_err(&pdev->dev, "Can not read hha sccl-id!\n");
return -EINVAL;
}
@@ -309,8 +310,7 @@ static int hisi_hha_pmu_init_data(struct platform_device *pdev,
* Early versions of BIOS support _UID by mistake, so we support
* both "hisilicon, idx-id" as preference, if available.
*/
- if (device_property_read_u32(&pdev->dev, "hisilicon,idx-id",
- &hha_pmu->index_id)) {
+ if (hha_pmu->topo.index_id < 0) {
status = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
"_UID", NULL, &id);
if (ACPI_FAILURE(status)) {
@@ -318,10 +318,8 @@ static int hisi_hha_pmu_init_data(struct platform_device *pdev,
return -EINVAL;
}
- hha_pmu->index_id = id;
+ hha_pmu->topo.index_id = id;
}
- /* HHA PMUs only share the same SCCL */
- hha_pmu->ccl_id = -1;
hha_pmu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hha_pmu->base)) {
@@ -407,42 +405,19 @@ static const struct attribute_group hisi_hha_pmu_v2_events_group = {
.attrs = hisi_hha_pmu_v2_events_attr,
};
-static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
-
-static struct attribute *hisi_hha_pmu_cpumask_attrs[] = {
- &dev_attr_cpumask.attr,
- NULL,
-};
-
-static const struct attribute_group hisi_hha_pmu_cpumask_attr_group = {
- .attrs = hisi_hha_pmu_cpumask_attrs,
-};
-
-static struct device_attribute hisi_hha_pmu_identifier_attr =
- __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
-
-static struct attribute *hisi_hha_pmu_identifier_attrs[] = {
- &hisi_hha_pmu_identifier_attr.attr,
- NULL
-};
-
-static const struct attribute_group hisi_hha_pmu_identifier_group = {
- .attrs = hisi_hha_pmu_identifier_attrs,
-};
-
static const struct attribute_group *hisi_hha_pmu_v1_attr_groups[] = {
&hisi_hha_pmu_v1_format_group,
&hisi_hha_pmu_v1_events_group,
- &hisi_hha_pmu_cpumask_attr_group,
- &hisi_hha_pmu_identifier_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
NULL,
};
static const struct attribute_group *hisi_hha_pmu_v2_attr_groups[] = {
&hisi_hha_pmu_v2_format_group,
&hisi_hha_pmu_v2_events_group,
- &hisi_hha_pmu_cpumask_attr_group,
- &hisi_hha_pmu_identifier_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
NULL
};
@@ -510,8 +485,8 @@ static int hisi_hha_pmu_probe(struct platform_device *pdev)
if (ret)
return ret;
- name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_hha%u",
- hha_pmu->sccl_id, hha_pmu->index_id);
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%d_hha%d",
+ hha_pmu->topo.sccl_id, hha_pmu->topo.index_id);
if (!name)
return -ENOMEM;
@@ -581,6 +556,7 @@ static void __exit hisi_hha_pmu_module_exit(void)
}
module_exit(hisi_hha_pmu_module_exit);
+MODULE_IMPORT_NS("HISI_PMU");
MODULE_DESCRIPTION("HiSilicon SoC HHA uncore PMU driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>");
diff --git a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
index c235b46ce873..412fc3a97963 100644
--- a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
@@ -355,18 +355,18 @@ MODULE_DEVICE_TABLE(acpi, hisi_l3c_pmu_acpi_match);
static int hisi_l3c_pmu_init_data(struct platform_device *pdev,
struct hisi_pmu *l3c_pmu)
{
+ hisi_uncore_pmu_init_topology(l3c_pmu, &pdev->dev);
+
/*
* Use the SCCL_ID and CCL_ID to identify the L3C PMU, while
* SCCL_ID is in MPIDR[aff2] and CCL_ID is in MPIDR[aff1].
*/
- if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
- &l3c_pmu->sccl_id)) {
+ if (l3c_pmu->topo.sccl_id < 0) {
dev_err(&pdev->dev, "Can not read l3c sccl-id!\n");
return -EINVAL;
}
- if (device_property_read_u32(&pdev->dev, "hisilicon,ccl-id",
- &l3c_pmu->ccl_id)) {
+ if (l3c_pmu->topo.ccl_id < 0) {
dev_err(&pdev->dev, "Can not read l3c ccl-id!\n");
return -EINVAL;
}
@@ -441,42 +441,19 @@ static const struct attribute_group hisi_l3c_pmu_v2_events_group = {
.attrs = hisi_l3c_pmu_v2_events_attr,
};
-static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
-
-static struct attribute *hisi_l3c_pmu_cpumask_attrs[] = {
- &dev_attr_cpumask.attr,
- NULL,
-};
-
-static const struct attribute_group hisi_l3c_pmu_cpumask_attr_group = {
- .attrs = hisi_l3c_pmu_cpumask_attrs,
-};
-
-static struct device_attribute hisi_l3c_pmu_identifier_attr =
- __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
-
-static struct attribute *hisi_l3c_pmu_identifier_attrs[] = {
- &hisi_l3c_pmu_identifier_attr.attr,
- NULL
-};
-
-static const struct attribute_group hisi_l3c_pmu_identifier_group = {
- .attrs = hisi_l3c_pmu_identifier_attrs,
-};
-
static const struct attribute_group *hisi_l3c_pmu_v1_attr_groups[] = {
&hisi_l3c_pmu_v1_format_group,
&hisi_l3c_pmu_v1_events_group,
- &hisi_l3c_pmu_cpumask_attr_group,
- &hisi_l3c_pmu_identifier_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
NULL,
};
static const struct attribute_group *hisi_l3c_pmu_v2_attr_groups[] = {
&hisi_l3c_pmu_v2_format_group,
&hisi_l3c_pmu_v2_events_group,
- &hisi_l3c_pmu_cpumask_attr_group,
- &hisi_l3c_pmu_identifier_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
NULL
};
@@ -544,8 +521,8 @@ static int hisi_l3c_pmu_probe(struct platform_device *pdev)
if (ret)
return ret;
- name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_l3c%u",
- l3c_pmu->sccl_id, l3c_pmu->ccl_id);
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%d_l3c%d",
+ l3c_pmu->topo.sccl_id, l3c_pmu->topo.ccl_id);
if (!name)
return -ENOMEM;
@@ -615,6 +592,7 @@ static void __exit hisi_l3c_pmu_module_exit(void)
}
module_exit(hisi_l3c_pmu_module_exit);
+MODULE_IMPORT_NS("HISI_PMU");
MODULE_DESCRIPTION("HiSilicon SoC L3C uncore PMU driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Anurup M <anurup.m@huawei.com>");
diff --git a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
index c0f5d7c73e06..a0142684e379 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
@@ -269,25 +269,22 @@ static void hisi_pa_pmu_clear_int_status(struct hisi_pmu *pa_pmu, int idx)
static int hisi_pa_pmu_init_data(struct platform_device *pdev,
struct hisi_pmu *pa_pmu)
{
+ hisi_uncore_pmu_init_topology(pa_pmu, &pdev->dev);
+
/*
* As PA PMU is in a SICL, use the SICL_ID and the index ID
* to identify the PA PMU.
*/
- if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
- &pa_pmu->sicl_id)) {
+ if (pa_pmu->topo.sicl_id < 0) {
dev_err(&pdev->dev, "Cannot read sicl-id!\n");
return -EINVAL;
}
- if (device_property_read_u32(&pdev->dev, "hisilicon,idx-id",
- &pa_pmu->index_id)) {
+ if (pa_pmu->topo.index_id < 0) {
dev_err(&pdev->dev, "Cannot read idx-id!\n");
return -EINVAL;
}
- pa_pmu->ccl_id = -1;
- pa_pmu->sccl_id = -1;
-
pa_pmu->dev_info = device_get_match_data(&pdev->dev);
if (!pa_pmu->dev_info)
return -ENODEV;
@@ -356,29 +353,6 @@ static const struct attribute_group hisi_h60pa_pmu_events_group = {
.attrs = hisi_h60pa_pmu_events_attr,
};
-static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
-
-static struct attribute *hisi_pa_pmu_cpumask_attrs[] = {
- &dev_attr_cpumask.attr,
- NULL
-};
-
-static const struct attribute_group hisi_pa_pmu_cpumask_attr_group = {
- .attrs = hisi_pa_pmu_cpumask_attrs,
-};
-
-static struct device_attribute hisi_pa_pmu_identifier_attr =
- __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
-
-static struct attribute *hisi_pa_pmu_identifier_attrs[] = {
- &hisi_pa_pmu_identifier_attr.attr,
- NULL
-};
-
-static const struct attribute_group hisi_pa_pmu_identifier_group = {
- .attrs = hisi_pa_pmu_identifier_attrs,
-};
-
static struct hisi_pa_pmu_int_regs hisi_pa_pmu_regs = {
.mask_offset = PA_INT_MASK,
.clear_offset = PA_INT_CLEAR,
@@ -388,8 +362,8 @@ static struct hisi_pa_pmu_int_regs hisi_pa_pmu_regs = {
static const struct attribute_group *hisi_pa_pmu_v2_attr_groups[] = {
&hisi_pa_pmu_v2_format_group,
&hisi_pa_pmu_v2_events_group,
- &hisi_pa_pmu_cpumask_attr_group,
- &hisi_pa_pmu_identifier_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
NULL
};
@@ -402,8 +376,8 @@ static const struct hisi_pmu_dev_info hisi_h32pa_v2 = {
static const struct attribute_group *hisi_pa_pmu_v3_attr_groups[] = {
&hisi_pa_pmu_v2_format_group,
&hisi_pa_pmu_v3_events_group,
- &hisi_pa_pmu_cpumask_attr_group,
- &hisi_pa_pmu_identifier_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
NULL
};
@@ -422,8 +396,8 @@ static struct hisi_pa_pmu_int_regs hisi_h60pa_pmu_regs = {
static const struct attribute_group *hisi_h60pa_pmu_attr_groups[] = {
&hisi_pa_pmu_v2_format_group,
&hisi_h60pa_pmu_events_group,
- &hisi_pa_pmu_cpumask_attr_group,
- &hisi_pa_pmu_identifier_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
NULL
};
@@ -488,9 +462,9 @@ static int hisi_pa_pmu_probe(struct platform_device *pdev)
if (ret)
return ret;
- name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sicl%d_%s%u",
- pa_pmu->sicl_id, pa_pmu->dev_info->name,
- pa_pmu->index_id);
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sicl%d_%s%d",
+ pa_pmu->topo.sicl_id, pa_pmu->dev_info->name,
+ pa_pmu->topo.index_id);
if (!name)
return -ENOMEM;
@@ -569,6 +543,7 @@ static void __exit hisi_pa_pmu_module_exit(void)
}
module_exit(hisi_pa_pmu_module_exit);
+MODULE_IMPORT_NS("HISI_PMU");
MODULE_DESCRIPTION("HiSilicon Protocol Adapter uncore PMU driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>");
diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c
index 918cdc31de57..ef058b1dd509 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c
@@ -14,6 +14,7 @@
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
+#include <linux/property.h>
#include <asm/cputype.h>
#include <asm/local64.h>
@@ -34,7 +35,7 @@ ssize_t hisi_event_sysfs_show(struct device *dev,
return sysfs_emit(page, "config=0x%lx\n", (unsigned long)eattr->var);
}
-EXPORT_SYMBOL_GPL(hisi_event_sysfs_show);
+EXPORT_SYMBOL_NS_GPL(hisi_event_sysfs_show, "HISI_PMU");
/*
* sysfs cpumask attributes. For uncore PMU, we only have a single CPU to show
@@ -46,7 +47,52 @@ ssize_t hisi_cpumask_sysfs_show(struct device *dev,
return sysfs_emit(buf, "%d\n", hisi_pmu->on_cpu);
}
-EXPORT_SYMBOL_GPL(hisi_cpumask_sysfs_show);
+EXPORT_SYMBOL_NS_GPL(hisi_cpumask_sysfs_show, "HISI_PMU");
+
+static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
+
+static ssize_t hisi_associated_cpus_sysfs_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hisi_pmu *hisi_pmu = to_hisi_pmu(dev_get_drvdata(dev));
+
+ return cpumap_print_to_pagebuf(true, buf, &hisi_pmu->associated_cpus);
+}
+static DEVICE_ATTR(associated_cpus, 0444, hisi_associated_cpus_sysfs_show, NULL);
+
+static struct attribute *hisi_pmu_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ &dev_attr_associated_cpus.attr,
+ NULL
+};
+
+const struct attribute_group hisi_pmu_cpumask_attr_group = {
+ .attrs = hisi_pmu_cpumask_attrs,
+};
+EXPORT_SYMBOL_NS_GPL(hisi_pmu_cpumask_attr_group, "HISI_PMU");
+
+ssize_t hisi_uncore_pmu_identifier_attr_show(struct device *dev,
+ struct device_attribute *attr,
+ char *page)
+{
+ struct hisi_pmu *hisi_pmu = to_hisi_pmu(dev_get_drvdata(dev));
+
+ return sysfs_emit(page, "0x%08x\n", hisi_pmu->identifier);
+}
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_identifier_attr_show, "HISI_PMU");
+
+static struct device_attribute hisi_pmu_identifier_attr =
+ __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
+
+static struct attribute *hisi_pmu_identifier_attrs[] = {
+ &hisi_pmu_identifier_attr.attr,
+ NULL
+};
+
+const struct attribute_group hisi_pmu_identifier_group = {
+ .attrs = hisi_pmu_identifier_attrs,
+};
+EXPORT_SYMBOL_NS_GPL(hisi_pmu_identifier_group, "HISI_PMU");
static bool hisi_validate_event_group(struct perf_event *event)
{
@@ -96,17 +142,7 @@ int hisi_uncore_pmu_get_event_idx(struct perf_event *event)
return idx;
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_get_event_idx);
-
-ssize_t hisi_uncore_pmu_identifier_attr_show(struct device *dev,
- struct device_attribute *attr,
- char *page)
-{
- struct hisi_pmu *hisi_pmu = to_hisi_pmu(dev_get_drvdata(dev));
-
- return sysfs_emit(page, "0x%08x\n", hisi_pmu->identifier);
-}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_identifier_attr_show);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_get_event_idx, "HISI_PMU");
static void hisi_uncore_pmu_clear_event_idx(struct hisi_pmu *hisi_pmu, int idx)
{
@@ -165,7 +201,7 @@ int hisi_uncore_pmu_init_irq(struct hisi_pmu *hisi_pmu,
return 0;
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_init_irq);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_init_irq, "HISI_PMU");
int hisi_uncore_pmu_event_init(struct perf_event *event)
{
@@ -219,7 +255,7 @@ int hisi_uncore_pmu_event_init(struct perf_event *event)
return 0;
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_event_init);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_event_init, "HISI_PMU");
/*
* Set the counter to count the event that we're interested in,
@@ -273,7 +309,7 @@ void hisi_uncore_pmu_set_event_period(struct perf_event *event)
/* Write start value to the hardware event counter */
hisi_pmu->ops->write_counter(hisi_pmu, hwc, val);
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_set_event_period);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_set_event_period, "HISI_PMU");
void hisi_uncore_pmu_event_update(struct perf_event *event)
{
@@ -294,7 +330,7 @@ void hisi_uncore_pmu_event_update(struct perf_event *event)
HISI_MAX_PERIOD(hisi_pmu->counter_bits);
local64_add(delta, &event->count);
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_event_update);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_event_update, "HISI_PMU");
void hisi_uncore_pmu_start(struct perf_event *event, int flags)
{
@@ -317,7 +353,7 @@ void hisi_uncore_pmu_start(struct perf_event *event, int flags)
hisi_uncore_pmu_enable_event(event);
perf_event_update_userpage(event);
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_start);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_start, "HISI_PMU");
void hisi_uncore_pmu_stop(struct perf_event *event, int flags)
{
@@ -334,7 +370,7 @@ void hisi_uncore_pmu_stop(struct perf_event *event, int flags)
hisi_uncore_pmu_event_update(event);
hwc->state |= PERF_HES_UPTODATE;
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_stop);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_stop, "HISI_PMU");
int hisi_uncore_pmu_add(struct perf_event *event, int flags)
{
@@ -357,7 +393,7 @@ int hisi_uncore_pmu_add(struct perf_event *event, int flags)
return 0;
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_add);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_add, "HISI_PMU");
void hisi_uncore_pmu_del(struct perf_event *event, int flags)
{
@@ -369,14 +405,14 @@ void hisi_uncore_pmu_del(struct perf_event *event, int flags)
perf_event_update_userpage(event);
hisi_pmu->pmu_events.hw_events[hwc->idx] = NULL;
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_del);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_del, "HISI_PMU");
void hisi_uncore_pmu_read(struct perf_event *event)
{
/* Read hardware counter and update the perf counter statistics */
hisi_uncore_pmu_event_update(event);
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_read);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_read, "HISI_PMU");
void hisi_uncore_pmu_enable(struct pmu *pmu)
{
@@ -389,7 +425,7 @@ void hisi_uncore_pmu_enable(struct pmu *pmu)
hisi_pmu->ops->start_counters(hisi_pmu);
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_enable);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_enable, "HISI_PMU");
void hisi_uncore_pmu_disable(struct pmu *pmu)
{
@@ -397,7 +433,7 @@ void hisi_uncore_pmu_disable(struct pmu *pmu)
hisi_pmu->ops->stop_counters(hisi_pmu);
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_disable);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_disable, "HISI_PMU");
/*
@@ -444,22 +480,19 @@ static void hisi_read_sccl_and_ccl_id(int *scclp, int *cclp)
*/
static bool hisi_pmu_cpu_is_associated_pmu(struct hisi_pmu *hisi_pmu)
{
+ struct hisi_pmu_topology *topo = &hisi_pmu->topo;
int sccl_id, ccl_id;
- /* If SCCL_ID is -1, the PMU is in a SICL and has no CPU affinity */
- if (hisi_pmu->sccl_id == -1)
- return true;
-
- if (hisi_pmu->ccl_id == -1) {
+ if (topo->ccl_id == -1) {
/* If CCL_ID is -1, the PMU only shares the same SCCL */
hisi_read_sccl_and_ccl_id(&sccl_id, NULL);
- return sccl_id == hisi_pmu->sccl_id;
+ return sccl_id == topo->sccl_id;
}
hisi_read_sccl_and_ccl_id(&sccl_id, &ccl_id);
- return sccl_id == hisi_pmu->sccl_id && ccl_id == hisi_pmu->ccl_id;
+ return sccl_id == topo->sccl_id && ccl_id == topo->ccl_id;
}
int hisi_uncore_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
@@ -467,13 +500,25 @@ int hisi_uncore_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
struct hisi_pmu *hisi_pmu = hlist_entry_safe(node, struct hisi_pmu,
node);
- if (!hisi_pmu_cpu_is_associated_pmu(hisi_pmu))
+ /*
+ * If the CPU is not associated to PMU, initialize the hisi_pmu->on_cpu
+ * based on the locality if it hasn't been initialized yet. For PMUs
+ * do have associated CPUs, it'll be updated later.
+ */
+ if (!hisi_pmu_cpu_is_associated_pmu(hisi_pmu)) {
+ if (hisi_pmu->on_cpu != -1)
+ return 0;
+
+ hisi_pmu->on_cpu = cpumask_local_spread(0, dev_to_node(hisi_pmu->dev));
+ WARN_ON(irq_set_affinity(hisi_pmu->irq, cpumask_of(hisi_pmu->on_cpu)));
return 0;
+ }
cpumask_set_cpu(cpu, &hisi_pmu->associated_cpus);
- /* If another CPU is already managing this PMU, simply return. */
- if (hisi_pmu->on_cpu != -1)
+ /* If another associated CPU is already managing this PMU, simply return. */
+ if (hisi_pmu->on_cpu != -1 &&
+ cpumask_test_cpu(hisi_pmu->on_cpu, &hisi_pmu->associated_cpus))
return 0;
/* Use this CPU in cpumask for event counting */
@@ -484,7 +529,7 @@ int hisi_uncore_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
return 0;
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_online_cpu);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_online_cpu, "HISI_PMU");
int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
{
@@ -492,9 +537,6 @@ int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
node);
unsigned int target;
- if (!cpumask_test_and_clear_cpu(cpu, &hisi_pmu->associated_cpus))
- return 0;
-
/* Nothing to do if this CPU doesn't own the PMU */
if (hisi_pmu->on_cpu != cpu)
return 0;
@@ -502,10 +544,17 @@ int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
/* Give up ownership of the PMU */
hisi_pmu->on_cpu = -1;
- /* Choose a new CPU to migrate ownership of the PMU to */
+ /*
+ * Migrate ownership of the PMU to a new CPU chosen from PMU's online
+ * associated CPUs if possible, if no associated CPU online then
+ * migrate to one online CPU.
+ */
target = cpumask_any_and_but(&hisi_pmu->associated_cpus,
cpu_online_mask, cpu);
if (target >= nr_cpu_ids)
+ target = cpumask_any_but(cpu_online_mask, cpu);
+
+ if (target >= nr_cpu_ids)
return 0;
perf_pmu_migrate_context(&hisi_pmu->pmu, cpu, target);
@@ -515,7 +564,36 @@ int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
return 0;
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_offline_cpu);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_offline_cpu, "HISI_PMU");
+
+/*
+ * Retrieve the topology information from the firmware for the hisi_pmu device.
+ * The topology ID will be -1 if we cannot initialize it, it may either due to
+ * the PMU doesn't locate on this certain topology or the firmware needs to be
+ * fixed.
+ */
+void hisi_uncore_pmu_init_topology(struct hisi_pmu *hisi_pmu, struct device *dev)
+{
+ struct hisi_pmu_topology *topo = &hisi_pmu->topo;
+
+ topo->sccl_id = -1;
+ topo->ccl_id = -1;
+ topo->index_id = -1;
+ topo->sub_id = -1;
+
+ if (device_property_read_u32(dev, "hisilicon,scl-id", &topo->sccl_id))
+ dev_dbg(dev, "no scl-id present\n");
+
+ if (device_property_read_u32(dev, "hisilicon,ccl-id", &topo->ccl_id))
+ dev_dbg(dev, "no ccl-id present\n");
+
+ if (device_property_read_u32(dev, "hisilicon,idx-id", &topo->index_id))
+ dev_dbg(dev, "no idx-id present\n");
+
+ if (device_property_read_u32(dev, "hisilicon,sub-id", &topo->sub_id))
+ dev_dbg(dev, "no sub-id present\n");
+}
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_init_topology, "HISI_PMU");
void hisi_pmu_init(struct hisi_pmu *hisi_pmu, struct module *module)
{
@@ -535,7 +613,7 @@ void hisi_pmu_init(struct hisi_pmu *hisi_pmu, struct module *module)
pmu->attr_groups = hisi_pmu->pmu_events.attr_groups;
pmu->capabilities = PERF_PMU_CAP_NO_EXCLUDE;
}
-EXPORT_SYMBOL_GPL(hisi_pmu_init);
+EXPORT_SYMBOL_NS_GPL(hisi_pmu_init, "HISI_PMU");
MODULE_DESCRIPTION("HiSilicon SoC uncore Performance Monitor driver framework");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.h b/drivers/perf/hisilicon/hisi_uncore_pmu.h
index 25b2d43b72bf..f4fed2544877 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pmu.h
+++ b/drivers/perf/hisilicon/hisi_uncore_pmu.h
@@ -81,27 +81,55 @@ struct hisi_pmu_hwevents {
const struct attribute_group **attr_groups;
};
+/**
+ * struct hisi_pmu_topology - Describe the topology hierarchy on which the PMU
+ * is located.
+ * @sccl_id: ID of the SCCL on which the PMU locate is located.
+ * @sicl_id: ID of the SICL on which the PMU locate is located.
+ * @scl_id: ID used by the core which is unaware of the SCCL/SICL.
+ * @ccl_id: ID of the CCL (CPU cluster) on which the PMU is located.
+ * @index_id: the ID of the PMU module if there're several PMUs at a
+ * particularly location in the topology.
+ * @sub_id: submodule ID of the PMU. For example we use this for DDRC PMU v2
+ * since each DDRC has more than one DMC
+ *
+ * The ID will be -1 if the PMU isn't located on a certain topology.
+ */
+struct hisi_pmu_topology {
+ /*
+ * SCCL (Super CPU CLuster) and SICL (Super I/O Cluster) are parallel
+ * so a PMU cannot locate on a SCCL and a SICL. If the SCCL/SICL
+ * distinction is not relevant, use scl_id instead.
+ */
+ union {
+ int sccl_id;
+ int sicl_id;
+ int scl_id;
+ };
+ int ccl_id;
+ int index_id;
+ int sub_id;
+};
+
/* Generic pmu struct for different pmu types */
struct hisi_pmu {
struct pmu pmu;
const struct hisi_uncore_ops *ops;
const struct hisi_pmu_dev_info *dev_info;
struct hisi_pmu_hwevents pmu_events;
- /* associated_cpus: All CPUs associated with the PMU */
+ struct hisi_pmu_topology topo;
+ /*
+ * CPUs associated to the PMU and are preferred to use for counting.
+ * Could be empty if PMU has no association (e.g. PMU on SICL), in
+ * which case any online CPU will be used.
+ */
cpumask_t associated_cpus;
/* CPU used for counting */
int on_cpu;
int irq;
struct device *dev;
struct hlist_node node;
- int sccl_id;
- int sicl_id;
- int ccl_id;
void __iomem *base;
- /* the ID of the PMU modules */
- u32 index_id;
- /* For DDRC PMU v2: each DDRC has more than one DMC */
- u32 sub_id;
int num_counters;
int counter_bits;
/* check event code range */
@@ -109,6 +137,10 @@ struct hisi_pmu {
u32 identifier;
};
+/* Generic implementation of cpumask/identifier group */
+extern const struct attribute_group hisi_pmu_cpumask_attr_group;
+extern const struct attribute_group hisi_pmu_identifier_group;
+
int hisi_uncore_pmu_get_event_idx(struct perf_event *event);
void hisi_uncore_pmu_read(struct perf_event *event);
int hisi_uncore_pmu_add(struct perf_event *event, int flags);
@@ -132,6 +164,7 @@ ssize_t hisi_uncore_pmu_identifier_attr_show(struct device *dev,
char *page);
int hisi_uncore_pmu_init_irq(struct hisi_pmu *hisi_pmu,
struct platform_device *pdev);
+void hisi_uncore_pmu_init_topology(struct hisi_pmu *hisi_pmu, struct device *dev);
void hisi_pmu_init(struct hisi_pmu *hisi_pmu, struct module *module);
#endif /* __HISI_UNCORE_PMU_H__ */
diff --git a/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
index c5f4764ee888..dbd079016fc4 100644
--- a/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
@@ -288,25 +288,22 @@ MODULE_DEVICE_TABLE(acpi, hisi_sllc_pmu_acpi_match);
static int hisi_sllc_pmu_init_data(struct platform_device *pdev,
struct hisi_pmu *sllc_pmu)
{
+ hisi_uncore_pmu_init_topology(sllc_pmu, &pdev->dev);
+
/*
* Use the SCCL_ID and the index ID to identify the SLLC PMU,
* while SCCL_ID is from MPIDR_EL1 by CPU.
*/
- if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
- &sllc_pmu->sccl_id)) {
+ if (sllc_pmu->topo.sccl_id < 0) {
dev_err(&pdev->dev, "Cannot read sccl-id!\n");
return -EINVAL;
}
- if (device_property_read_u32(&pdev->dev, "hisilicon,idx-id",
- &sllc_pmu->index_id)) {
+ if (sllc_pmu->topo.index_id < 0) {
dev_err(&pdev->dev, "Cannot read idx-id!\n");
return -EINVAL;
}
- /* SLLC PMUs only share the same SCCL */
- sllc_pmu->ccl_id = -1;
-
sllc_pmu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sllc_pmu->base)) {
dev_err(&pdev->dev, "ioremap failed for sllc_pmu resource.\n");
@@ -347,34 +344,11 @@ static const struct attribute_group hisi_sllc_pmu_v2_events_group = {
.attrs = hisi_sllc_pmu_v2_events_attr,
};
-static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
-
-static struct attribute *hisi_sllc_pmu_cpumask_attrs[] = {
- &dev_attr_cpumask.attr,
- NULL
-};
-
-static const struct attribute_group hisi_sllc_pmu_cpumask_attr_group = {
- .attrs = hisi_sllc_pmu_cpumask_attrs,
-};
-
-static struct device_attribute hisi_sllc_pmu_identifier_attr =
- __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
-
-static struct attribute *hisi_sllc_pmu_identifier_attrs[] = {
- &hisi_sllc_pmu_identifier_attr.attr,
- NULL
-};
-
-static const struct attribute_group hisi_sllc_pmu_identifier_group = {
- .attrs = hisi_sllc_pmu_identifier_attrs,
-};
-
static const struct attribute_group *hisi_sllc_pmu_v2_attr_groups[] = {
&hisi_sllc_pmu_v2_format_group,
&hisi_sllc_pmu_v2_events_group,
- &hisi_sllc_pmu_cpumask_attr_group,
- &hisi_sllc_pmu_identifier_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
NULL
};
@@ -433,8 +407,8 @@ static int hisi_sllc_pmu_probe(struct platform_device *pdev)
if (ret)
return ret;
- name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_sllc%u",
- sllc_pmu->sccl_id, sllc_pmu->index_id);
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%d_sllc%d",
+ sllc_pmu->topo.sccl_id, sllc_pmu->topo.index_id);
if (!name)
return -ENOMEM;
@@ -507,6 +481,7 @@ static void __exit hisi_sllc_pmu_module_exit(void)
}
module_exit(hisi_sllc_pmu_module_exit);
+MODULE_IMPORT_NS("HISI_PMU");
MODULE_DESCRIPTION("HiSilicon SLLC uncore PMU driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>");
diff --git a/drivers/perf/hisilicon/hisi_uncore_uc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_uc_pmu.c
index 481dcc9e8fbf..03cb9b564b99 100644
--- a/drivers/perf/hisilicon/hisi_uncore_uc_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_uc_pmu.c
@@ -11,7 +11,6 @@
#include <linux/irq.h>
#include <linux/list.h>
#include <linux/mod_devicetable.h>
-#include <linux/property.h>
#include "hisi_uncore_pmu.h"
@@ -366,25 +365,24 @@ static void hisi_uc_pmu_clear_int_status(struct hisi_pmu *uc_pmu, int idx)
static int hisi_uc_pmu_init_data(struct platform_device *pdev,
struct hisi_pmu *uc_pmu)
{
+ hisi_uncore_pmu_init_topology(uc_pmu, &pdev->dev);
+
/*
* Use SCCL (Super CPU Cluster) ID and CCL (CPU Cluster) ID to
* identify the topology information of UC PMU devices in the chip.
* They have some CCLs per SCCL and then 4 UC PMU per CCL.
*/
- if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
- &uc_pmu->sccl_id)) {
+ if (uc_pmu->topo.sccl_id < 0) {
dev_err(&pdev->dev, "Can not read uc sccl-id!\n");
return -EINVAL;
}
- if (device_property_read_u32(&pdev->dev, "hisilicon,ccl-id",
- &uc_pmu->ccl_id)) {
+ if (uc_pmu->topo.ccl_id < 0) {
dev_err(&pdev->dev, "Can not read uc ccl-id!\n");
return -EINVAL;
}
- if (device_property_read_u32(&pdev->dev, "hisilicon,sub-id",
- &uc_pmu->sub_id)) {
+ if (uc_pmu->topo.sub_id < 0) {
dev_err(&pdev->dev, "Can not read sub-id!\n");
return -EINVAL;
}
@@ -439,34 +437,11 @@ static const struct attribute_group hisi_uc_pmu_events_group = {
.attrs = hisi_uc_pmu_events_attr,
};
-static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
-
-static struct attribute *hisi_uc_pmu_cpumask_attrs[] = {
- &dev_attr_cpumask.attr,
- NULL,
-};
-
-static const struct attribute_group hisi_uc_pmu_cpumask_attr_group = {
- .attrs = hisi_uc_pmu_cpumask_attrs,
-};
-
-static struct device_attribute hisi_uc_pmu_identifier_attr =
- __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
-
-static struct attribute *hisi_uc_pmu_identifier_attrs[] = {
- &hisi_uc_pmu_identifier_attr.attr,
- NULL
-};
-
-static const struct attribute_group hisi_uc_pmu_identifier_group = {
- .attrs = hisi_uc_pmu_identifier_attrs,
-};
-
static const struct attribute_group *hisi_uc_pmu_attr_groups[] = {
&hisi_uc_pmu_format_group,
&hisi_uc_pmu_events_group,
- &hisi_uc_pmu_cpumask_attr_group,
- &hisi_uc_pmu_identifier_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
NULL
};
@@ -538,8 +513,9 @@ static int hisi_uc_pmu_probe(struct platform_device *pdev)
if (ret)
return ret;
- name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%d_uc%d_%u",
- uc_pmu->sccl_id, uc_pmu->ccl_id, uc_pmu->sub_id);
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%d_uc%d_%d",
+ uc_pmu->topo.sccl_id, uc_pmu->topo.ccl_id,
+ uc_pmu->topo.sub_id);
if (!name)
return -ENOMEM;
@@ -613,6 +589,7 @@ static void __exit hisi_uc_pmu_module_exit(void)
}
module_exit(hisi_uc_pmu_module_exit);
+MODULE_IMPORT_NS("HISI_PMU");
MODULE_DESCRIPTION("HiSilicon SoC UC uncore PMU driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Junhao He <hejunhao3@huawei.com>");
diff --git a/drivers/perf/marvell_cn10k_ddr_pmu.c b/drivers/perf/marvell_cn10k_ddr_pmu.c
index 8860d9f687ae..039feded9152 100644
--- a/drivers/perf/marvell_cn10k_ddr_pmu.c
+++ b/drivers/perf/marvell_cn10k_ddr_pmu.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
-/* Marvell CN10K DRAM Subsystem (DSS) Performance Monitor Driver
+/*
+ * Marvell CN10K DRAM Subsystem (DSS) Performance Monitor Driver
*
- * Copyright (C) 2021 Marvell.
+ * Copyright (C) 2021-2024 Marvell.
*/
#include <linux/init.h>
@@ -14,24 +15,29 @@
#include <linux/platform_device.h>
/* Performance Counters Operating Mode Control Registers */
-#define DDRC_PERF_CNT_OP_MODE_CTRL 0x8020
-#define OP_MODE_CTRL_VAL_MANNUAL 0x1
+#define CN10K_DDRC_PERF_CNT_OP_MODE_CTRL 0x8020
+#define ODY_DDRC_PERF_CNT_OP_MODE_CTRL 0x20020
+#define OP_MODE_CTRL_VAL_MANUAL 0x1
/* Performance Counters Start Operation Control Registers */
-#define DDRC_PERF_CNT_START_OP_CTRL 0x8028
+#define CN10K_DDRC_PERF_CNT_START_OP_CTRL 0x8028
+#define ODY_DDRC_PERF_CNT_START_OP_CTRL 0x200A0
#define START_OP_CTRL_VAL_START 0x1ULL
#define START_OP_CTRL_VAL_ACTIVE 0x2
/* Performance Counters End Operation Control Registers */
-#define DDRC_PERF_CNT_END_OP_CTRL 0x8030
+#define CN10K_DDRC_PERF_CNT_END_OP_CTRL 0x8030
+#define ODY_DDRC_PERF_CNT_END_OP_CTRL 0x200E0
#define END_OP_CTRL_VAL_END 0x1ULL
/* Performance Counters End Status Registers */
-#define DDRC_PERF_CNT_END_STATUS 0x8038
+#define CN10K_DDRC_PERF_CNT_END_STATUS 0x8038
+#define ODY_DDRC_PERF_CNT_END_STATUS 0x20120
#define END_STATUS_VAL_END_TIMER_MODE_END 0x1
/* Performance Counters Configuration Registers */
-#define DDRC_PERF_CFG_BASE 0x8040
+#define CN10K_DDRC_PERF_CFG_BASE 0x8040
+#define ODY_DDRC_PERF_CFG_BASE 0x20160
/* 8 Generic event counter + 2 fixed event counters */
#define DDRC_PERF_NUM_GEN_COUNTERS 8
@@ -42,18 +48,28 @@
DDRC_PERF_NUM_FIX_COUNTERS)
/* Generic event counter registers */
-#define DDRC_PERF_CFG(n) (DDRC_PERF_CFG_BASE + 8 * (n))
+#define DDRC_PERF_CFG(base, n) ((base) + 8 * (n))
#define EVENT_ENABLE BIT_ULL(63)
/* Two dedicated event counters for DDR reads and writes */
#define EVENT_DDR_READS 101
#define EVENT_DDR_WRITES 100
+#define DDRC_PERF_REG(base, n) ((base) + 8 * (n))
/*
* programmable events IDs in programmable event counters.
* DO NOT change these event-id numbers, they are used to
* program event bitmap in h/w.
*/
+#define EVENT_DFI_CMD_IS_RETRY 61
+#define EVENT_RD_UC_ECC_ERROR 60
+#define EVENT_RD_CRC_ERROR 59
+#define EVENT_CAPAR_ERROR 58
+#define EVENT_WR_CRC_ERROR 57
+#define EVENT_DFI_PARITY_POISON 56
+#define EVENT_RETRY_FIFO_FULL 46
+#define EVENT_DFI_CYCLES 45
+
#define EVENT_OP_IS_ZQLATCH 55
#define EVENT_OP_IS_ZQSTART 54
#define EVENT_OP_IS_TCR_MRR 53
@@ -102,28 +118,37 @@
#define EVENT_HIF_RD_OR_WR 1
/* Event counter value registers */
-#define DDRC_PERF_CNT_VALUE_BASE 0x8080
-#define DDRC_PERF_CNT_VALUE(n) (DDRC_PERF_CNT_VALUE_BASE + 8 * (n))
+#define CN10K_DDRC_PERF_CNT_VALUE_BASE 0x8080
+#define ODY_DDRC_PERF_CNT_VALUE_BASE 0x201C0
/* Fixed event counter enable/disable register */
-#define DDRC_PERF_CNT_FREERUN_EN 0x80C0
+#define CN10K_DDRC_PERF_CNT_FREERUN_EN 0x80C0
#define DDRC_PERF_FREERUN_WRITE_EN 0x1
#define DDRC_PERF_FREERUN_READ_EN 0x2
/* Fixed event counter control register */
-#define DDRC_PERF_CNT_FREERUN_CTRL 0x80C8
+#define CN10K_DDRC_PERF_CNT_FREERUN_CTRL 0x80C8
+#define ODY_DDRC_PERF_CNT_FREERUN_CTRL 0x20240
#define DDRC_FREERUN_WRITE_CNT_CLR 0x1
#define DDRC_FREERUN_READ_CNT_CLR 0x2
-/* Fixed event counter value register */
-#define DDRC_PERF_CNT_VALUE_WR_OP 0x80D0
-#define DDRC_PERF_CNT_VALUE_RD_OP 0x80D8
+/* Fixed event counter clear register, defined only for Odyssey */
+#define ODY_DDRC_PERF_CNT_FREERUN_CLR 0x20248
+
#define DDRC_PERF_CNT_VALUE_OVERFLOW BIT_ULL(48)
#define DDRC_PERF_CNT_MAX_VALUE GENMASK_ULL(48, 0)
+/* Fixed event counter value register */
+#define CN10K_DDRC_PERF_CNT_VALUE_WR_OP 0x80D0
+#define CN10K_DDRC_PERF_CNT_VALUE_RD_OP 0x80D8
+#define ODY_DDRC_PERF_CNT_VALUE_WR_OP 0x20250
+#define ODY_DDRC_PERF_CNT_VALUE_RD_OP 0x20258
+
struct cn10k_ddr_pmu {
struct pmu pmu;
void __iomem *base;
+ const struct ddr_pmu_platform_data *p_data;
+ const struct ddr_pmu_ops *ops;
unsigned int cpu;
struct device *dev;
int active_events;
@@ -132,8 +157,36 @@ struct cn10k_ddr_pmu {
struct hlist_node node;
};
+struct ddr_pmu_ops {
+ void (*enable_read_freerun_counter)(struct cn10k_ddr_pmu *pmu,
+ bool enable);
+ void (*enable_write_freerun_counter)(struct cn10k_ddr_pmu *pmu,
+ bool enable);
+ void (*clear_read_freerun_counter)(struct cn10k_ddr_pmu *pmu);
+ void (*clear_write_freerun_counter)(struct cn10k_ddr_pmu *pmu);
+ void (*pmu_overflow_handler)(struct cn10k_ddr_pmu *pmu, int evt_idx);
+};
+
#define to_cn10k_ddr_pmu(p) container_of(p, struct cn10k_ddr_pmu, pmu)
+struct ddr_pmu_platform_data {
+ u64 counter_overflow_val;
+ u64 counter_max_val;
+ u64 cnt_base;
+ u64 cfg_base;
+ u64 cnt_op_mode_ctrl;
+ u64 cnt_start_op_ctrl;
+ u64 cnt_end_op_ctrl;
+ u64 cnt_end_status;
+ u64 cnt_freerun_en;
+ u64 cnt_freerun_ctrl;
+ u64 cnt_freerun_clr;
+ u64 cnt_value_wr_op;
+ u64 cnt_value_rd_op;
+ bool is_cn10k;
+ bool is_ody;
+};
+
static ssize_t cn10k_ddr_pmu_event_show(struct device *dev,
struct device_attribute *attr,
char *page)
@@ -209,6 +262,85 @@ static struct attribute *cn10k_ddr_perf_events_attrs[] = {
NULL
};
+static struct attribute *odyssey_ddr_perf_events_attrs[] = {
+ /* Programmable */
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_rd_or_wr_access, EVENT_HIF_RD_OR_WR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_wr_access, EVENT_HIF_WR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_rd_access, EVENT_HIF_RD),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_rmw_access, EVENT_HIF_RMW),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_pri_rdaccess, EVENT_HIF_HI_PRI_RD),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_rd_bypass_access, EVENT_READ_BYPASS),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_act_bypass_access, EVENT_ACT_BYPASS),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_dfi_wr_data_access,
+ EVENT_DFI_WR_DATA_CYCLES),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_dfi_rd_data_access,
+ EVENT_DFI_RD_DATA_CYCLES),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_hpri_sched_rd_crit_access,
+ EVENT_HPR_XACT_WHEN_CRITICAL),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_lpri_sched_rd_crit_access,
+ EVENT_LPR_XACT_WHEN_CRITICAL),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_wr_trxn_crit_access,
+ EVENT_WR_XACT_WHEN_CRITICAL),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_active_access, EVENT_OP_IS_ACTIVATE),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_rd_or_wr_access,
+ EVENT_OP_IS_RD_OR_WR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_rd_active_access,
+ EVENT_OP_IS_RD_ACTIVATE),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_read, EVENT_OP_IS_RD),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_write, EVENT_OP_IS_WR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_mwr, EVENT_OP_IS_MWR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_precharge, EVENT_OP_IS_PRECHARGE),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_precharge_for_rdwr,
+ EVENT_PRECHARGE_FOR_RDWR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_precharge_for_other,
+ EVENT_PRECHARGE_FOR_OTHER),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_rdwr_transitions, EVENT_RDWR_TRANSITIONS),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_write_combine, EVENT_WRITE_COMBINE),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_war_hazard, EVENT_WAR_HAZARD),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_raw_hazard, EVENT_RAW_HAZARD),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_waw_hazard, EVENT_WAW_HAZARD),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_enter_selfref, EVENT_OP_IS_ENTER_SELFREF),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_enter_powerdown,
+ EVENT_OP_IS_ENTER_POWERDOWN),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_enter_mpsm, EVENT_OP_IS_ENTER_MPSM),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_refresh, EVENT_OP_IS_REFRESH),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_crit_ref, EVENT_OP_IS_CRIT_REF),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_spec_ref, EVENT_OP_IS_SPEC_REF),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_load_mode, EVENT_OP_IS_LOAD_MODE),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_zqcl, EVENT_OP_IS_ZQCL),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_wr_access, EVENT_OP_IS_ZQCS),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_dfi_cycles, EVENT_DFI_CYCLES),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_retry_fifo_full,
+ EVENT_RETRY_FIFO_FULL),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_bsm_alloc, EVENT_BSM_ALLOC),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_bsm_starvation, EVENT_BSM_STARVATION),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_win_limit_reached_rd,
+ EVENT_VISIBLE_WIN_LIMIT_REACHED_RD),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_win_limit_reached_wr,
+ EVENT_VISIBLE_WIN_LIMIT_REACHED_WR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_dqsosc_mpc, EVENT_OP_IS_DQSOSC_MPC),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_dqsosc_mrr, EVENT_OP_IS_DQSOSC_MRR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_tcr_mrr, EVENT_OP_IS_TCR_MRR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_zqstart, EVENT_OP_IS_ZQSTART),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_zqlatch, EVENT_OP_IS_ZQLATCH),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_dfi_parity_poison,
+ EVENT_DFI_PARITY_POISON),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_wr_crc_error, EVENT_WR_CRC_ERROR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_capar_error, EVENT_CAPAR_ERROR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_rd_crc_error, EVENT_RD_CRC_ERROR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_rd_uc_ecc_error, EVENT_RD_UC_ECC_ERROR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_dfi_cmd_is_retry, EVENT_DFI_CMD_IS_RETRY),
+ /* Free run event counters */
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_ddr_reads, EVENT_DDR_READS),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_ddr_writes, EVENT_DDR_WRITES),
+ NULL
+};
+
+static struct attribute_group odyssey_ddr_perf_events_attr_group = {
+ .name = "events",
+ .attrs = odyssey_ddr_perf_events_attrs,
+};
+
static struct attribute_group cn10k_ddr_perf_events_attr_group = {
.name = "events",
.attrs = cn10k_ddr_perf_events_attrs,
@@ -254,6 +386,13 @@ static const struct attribute_group *cn10k_attr_groups[] = {
NULL,
};
+static const struct attribute_group *odyssey_attr_groups[] = {
+ &odyssey_ddr_perf_events_attr_group,
+ &cn10k_ddr_perf_format_attr_group,
+ &cn10k_ddr_perf_cpumask_attr_group,
+ NULL
+};
+
/* Default poll timeout is 100 sec, which is very sufficient for
* 48 bit counter incremented max at 5.6 GT/s, which may take many
* hours to overflow.
@@ -266,9 +405,18 @@ static ktime_t cn10k_ddr_pmu_timer_period(void)
return ms_to_ktime((u64)cn10k_ddr_pmu_poll_period_sec * USEC_PER_SEC);
}
-static int ddr_perf_get_event_bitmap(int eventid, u64 *event_bitmap)
+static int ddr_perf_get_event_bitmap(int eventid, u64 *event_bitmap,
+ struct cn10k_ddr_pmu *ddr_pmu)
{
+ int err = 0;
+
switch (eventid) {
+ case EVENT_DFI_PARITY_POISON ...EVENT_DFI_CMD_IS_RETRY:
+ if (!ddr_pmu->p_data->is_ody) {
+ err = -EINVAL;
+ break;
+ }
+ fallthrough;
case EVENT_HIF_RD_OR_WR ... EVENT_WAW_HAZARD:
case EVENT_OP_IS_REFRESH ... EVENT_OP_IS_ZQLATCH:
*event_bitmap = (1ULL << (eventid - 1));
@@ -279,11 +427,12 @@ static int ddr_perf_get_event_bitmap(int eventid, u64 *event_bitmap)
*event_bitmap = (0xFULL << (eventid - 1));
break;
default:
- pr_err("%s Invalid eventid %d\n", __func__, eventid);
- return -EINVAL;
+ err = -EINVAL;
}
- return 0;
+ if (err)
+ pr_err("%s Invalid eventid %d\n", __func__, eventid);
+ return err;
}
static int cn10k_ddr_perf_alloc_counter(struct cn10k_ddr_pmu *pmu,
@@ -351,9 +500,33 @@ static int cn10k_ddr_perf_event_init(struct perf_event *event)
return 0;
}
+static void cn10k_ddr_perf_counter_start(struct cn10k_ddr_pmu *ddr_pmu,
+ int counter)
+{
+ const struct ddr_pmu_platform_data *p_data = ddr_pmu->p_data;
+ u64 ctrl_reg = p_data->cnt_start_op_ctrl;
+
+ writeq_relaxed(START_OP_CTRL_VAL_START, ddr_pmu->base +
+ DDRC_PERF_REG(ctrl_reg, counter));
+}
+
+static void cn10k_ddr_perf_counter_stop(struct cn10k_ddr_pmu *ddr_pmu,
+ int counter)
+{
+ const struct ddr_pmu_platform_data *p_data = ddr_pmu->p_data;
+ u64 ctrl_reg = p_data->cnt_end_op_ctrl;
+
+ writeq_relaxed(END_OP_CTRL_VAL_END, ddr_pmu->base +
+ DDRC_PERF_REG(ctrl_reg, counter));
+}
+
static void cn10k_ddr_perf_counter_enable(struct cn10k_ddr_pmu *pmu,
int counter, bool enable)
{
+ const struct ddr_pmu_platform_data *p_data = pmu->p_data;
+ u64 ctrl_reg = pmu->p_data->cnt_op_mode_ctrl;
+ const struct ddr_pmu_ops *ops = pmu->ops;
+ bool is_ody = pmu->p_data->is_ody;
u32 reg;
u64 val;
@@ -363,7 +536,7 @@ static void cn10k_ddr_perf_counter_enable(struct cn10k_ddr_pmu *pmu,
}
if (counter < DDRC_PERF_NUM_GEN_COUNTERS) {
- reg = DDRC_PERF_CFG(counter);
+ reg = DDRC_PERF_CFG(p_data->cfg_base, counter);
val = readq_relaxed(pmu->base + reg);
if (enable)
@@ -372,40 +545,52 @@ static void cn10k_ddr_perf_counter_enable(struct cn10k_ddr_pmu *pmu,
val &= ~EVENT_ENABLE;
writeq_relaxed(val, pmu->base + reg);
- } else {
- val = readq_relaxed(pmu->base + DDRC_PERF_CNT_FREERUN_EN);
- if (enable) {
- if (counter == DDRC_PERF_READ_COUNTER_IDX)
- val |= DDRC_PERF_FREERUN_READ_EN;
- else
- val |= DDRC_PERF_FREERUN_WRITE_EN;
- } else {
- if (counter == DDRC_PERF_READ_COUNTER_IDX)
- val &= ~DDRC_PERF_FREERUN_READ_EN;
- else
- val &= ~DDRC_PERF_FREERUN_WRITE_EN;
+
+ if (is_ody) {
+ if (enable) {
+ /*
+ * Setup the PMU counter to work in
+ * manual mode
+ */
+ reg = DDRC_PERF_REG(ctrl_reg, counter);
+ writeq_relaxed(OP_MODE_CTRL_VAL_MANUAL,
+ pmu->base + reg);
+
+ cn10k_ddr_perf_counter_start(pmu, counter);
+ } else {
+ cn10k_ddr_perf_counter_stop(pmu, counter);
+ }
}
- writeq_relaxed(val, pmu->base + DDRC_PERF_CNT_FREERUN_EN);
+ } else {
+ if (counter == DDRC_PERF_READ_COUNTER_IDX)
+ ops->enable_read_freerun_counter(pmu, enable);
+ else
+ ops->enable_write_freerun_counter(pmu, enable);
}
}
static u64 cn10k_ddr_perf_read_counter(struct cn10k_ddr_pmu *pmu, int counter)
{
+ const struct ddr_pmu_platform_data *p_data = pmu->p_data;
u64 val;
if (counter == DDRC_PERF_READ_COUNTER_IDX)
- return readq_relaxed(pmu->base + DDRC_PERF_CNT_VALUE_RD_OP);
+ return readq_relaxed(pmu->base +
+ p_data->cnt_value_rd_op);
if (counter == DDRC_PERF_WRITE_COUNTER_IDX)
- return readq_relaxed(pmu->base + DDRC_PERF_CNT_VALUE_WR_OP);
+ return readq_relaxed(pmu->base +
+ p_data->cnt_value_wr_op);
- val = readq_relaxed(pmu->base + DDRC_PERF_CNT_VALUE(counter));
+ val = readq_relaxed(pmu->base +
+ DDRC_PERF_REG(p_data->cnt_base, counter));
return val;
}
static void cn10k_ddr_perf_event_update(struct perf_event *event)
{
struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu);
+ const struct ddr_pmu_platform_data *p_data = pmu->p_data;
struct hw_perf_event *hwc = &event->hw;
u64 prev_count, new_count, mask;
@@ -414,7 +599,7 @@ static void cn10k_ddr_perf_event_update(struct perf_event *event)
new_count = cn10k_ddr_perf_read_counter(pmu, hwc->idx);
} while (local64_xchg(&hwc->prev_count, new_count) != prev_count);
- mask = DDRC_PERF_CNT_MAX_VALUE;
+ mask = p_data->counter_max_val;
local64_add((new_count - prev_count) & mask, &event->count);
}
@@ -435,6 +620,8 @@ static void cn10k_ddr_perf_event_start(struct perf_event *event, int flags)
static int cn10k_ddr_perf_event_add(struct perf_event *event, int flags)
{
struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu);
+ const struct ddr_pmu_platform_data *p_data = pmu->p_data;
+ const struct ddr_pmu_ops *ops = pmu->ops;
struct hw_perf_event *hwc = &event->hw;
u8 config = event->attr.config;
int counter, ret;
@@ -454,8 +641,8 @@ static int cn10k_ddr_perf_event_add(struct perf_event *event, int flags)
if (counter < DDRC_PERF_NUM_GEN_COUNTERS) {
/* Generic counters, configure event id */
- reg_offset = DDRC_PERF_CFG(counter);
- ret = ddr_perf_get_event_bitmap(config, &val);
+ reg_offset = DDRC_PERF_CFG(p_data->cfg_base, counter);
+ ret = ddr_perf_get_event_bitmap(config, &val, pmu);
if (ret)
return ret;
@@ -463,11 +650,9 @@ static int cn10k_ddr_perf_event_add(struct perf_event *event, int flags)
} else {
/* fixed event counter, clear counter value */
if (counter == DDRC_PERF_READ_COUNTER_IDX)
- val = DDRC_FREERUN_READ_CNT_CLR;
+ ops->clear_read_freerun_counter(pmu);
else
- val = DDRC_FREERUN_WRITE_CNT_CLR;
-
- writeq_relaxed(val, pmu->base + DDRC_PERF_CNT_FREERUN_CTRL);
+ ops->clear_write_freerun_counter(pmu);
}
hwc->state |= PERF_HES_STOPPED;
@@ -512,17 +697,19 @@ static void cn10k_ddr_perf_event_del(struct perf_event *event, int flags)
static void cn10k_ddr_perf_pmu_enable(struct pmu *pmu)
{
struct cn10k_ddr_pmu *ddr_pmu = to_cn10k_ddr_pmu(pmu);
+ const struct ddr_pmu_platform_data *p_data = ddr_pmu->p_data;
writeq_relaxed(START_OP_CTRL_VAL_START, ddr_pmu->base +
- DDRC_PERF_CNT_START_OP_CTRL);
+ p_data->cnt_start_op_ctrl);
}
static void cn10k_ddr_perf_pmu_disable(struct pmu *pmu)
{
struct cn10k_ddr_pmu *ddr_pmu = to_cn10k_ddr_pmu(pmu);
+ const struct ddr_pmu_platform_data *p_data = ddr_pmu->p_data;
writeq_relaxed(END_OP_CTRL_VAL_END, ddr_pmu->base +
- DDRC_PERF_CNT_END_OP_CTRL);
+ p_data->cnt_end_op_ctrl);
}
static void cn10k_ddr_perf_event_update_all(struct cn10k_ddr_pmu *pmu)
@@ -547,8 +734,123 @@ static void cn10k_ddr_perf_event_update_all(struct cn10k_ddr_pmu *pmu)
}
}
+static void ddr_pmu_enable_read_freerun(struct cn10k_ddr_pmu *pmu, bool enable)
+{
+ const struct ddr_pmu_platform_data *p_data = pmu->p_data;
+ u64 val;
+
+ val = readq_relaxed(pmu->base + p_data->cnt_freerun_en);
+ if (enable)
+ val |= DDRC_PERF_FREERUN_READ_EN;
+ else
+ val &= ~DDRC_PERF_FREERUN_READ_EN;
+
+ writeq_relaxed(val, pmu->base + p_data->cnt_freerun_en);
+}
+
+static void ddr_pmu_enable_write_freerun(struct cn10k_ddr_pmu *pmu, bool enable)
+{
+ const struct ddr_pmu_platform_data *p_data = pmu->p_data;
+ u64 val;
+
+ val = readq_relaxed(pmu->base + p_data->cnt_freerun_en);
+ if (enable)
+ val |= DDRC_PERF_FREERUN_WRITE_EN;
+ else
+ val &= ~DDRC_PERF_FREERUN_WRITE_EN;
+
+ writeq_relaxed(val, pmu->base + p_data->cnt_freerun_en);
+}
+
+static void ddr_pmu_read_clear_freerun(struct cn10k_ddr_pmu *pmu)
+{
+ const struct ddr_pmu_platform_data *p_data = pmu->p_data;
+ u64 val;
+
+ val = DDRC_FREERUN_READ_CNT_CLR;
+ writeq_relaxed(val, pmu->base + p_data->cnt_freerun_ctrl);
+}
+
+static void ddr_pmu_write_clear_freerun(struct cn10k_ddr_pmu *pmu)
+{
+ const struct ddr_pmu_platform_data *p_data = pmu->p_data;
+ u64 val;
+
+ val = DDRC_FREERUN_WRITE_CNT_CLR;
+ writeq_relaxed(val, pmu->base + p_data->cnt_freerun_ctrl);
+}
+
+static void ddr_pmu_overflow_hander(struct cn10k_ddr_pmu *pmu, int evt_idx)
+{
+ cn10k_ddr_perf_event_update_all(pmu);
+ cn10k_ddr_perf_pmu_disable(&pmu->pmu);
+ cn10k_ddr_perf_pmu_enable(&pmu->pmu);
+}
+
+static void ddr_pmu_ody_enable_read_freerun(struct cn10k_ddr_pmu *pmu,
+ bool enable)
+{
+ const struct ddr_pmu_platform_data *p_data = pmu->p_data;
+ u64 val;
+
+ val = readq_relaxed(pmu->base + p_data->cnt_freerun_ctrl);
+ if (enable)
+ val |= DDRC_PERF_FREERUN_READ_EN;
+ else
+ val &= ~DDRC_PERF_FREERUN_READ_EN;
+
+ writeq_relaxed(val, pmu->base + p_data->cnt_freerun_ctrl);
+}
+
+static void ddr_pmu_ody_enable_write_freerun(struct cn10k_ddr_pmu *pmu,
+ bool enable)
+{
+ const struct ddr_pmu_platform_data *p_data = pmu->p_data;
+ u64 val;
+
+ val = readq_relaxed(pmu->base + p_data->cnt_freerun_ctrl);
+ if (enable)
+ val |= DDRC_PERF_FREERUN_WRITE_EN;
+ else
+ val &= ~DDRC_PERF_FREERUN_WRITE_EN;
+
+ writeq_relaxed(val, pmu->base + p_data->cnt_freerun_ctrl);
+}
+
+static void ddr_pmu_ody_read_clear_freerun(struct cn10k_ddr_pmu *pmu)
+{
+ const struct ddr_pmu_platform_data *p_data = pmu->p_data;
+ u64 val;
+
+ val = DDRC_FREERUN_READ_CNT_CLR;
+ writeq_relaxed(val, pmu->base + p_data->cnt_freerun_clr);
+}
+
+static void ddr_pmu_ody_write_clear_freerun(struct cn10k_ddr_pmu *pmu)
+{
+ const struct ddr_pmu_platform_data *p_data = pmu->p_data;
+ u64 val;
+
+ val = DDRC_FREERUN_WRITE_CNT_CLR;
+ writeq_relaxed(val, pmu->base + p_data->cnt_freerun_clr);
+}
+
+static void ddr_pmu_ody_overflow_hander(struct cn10k_ddr_pmu *pmu, int evt_idx)
+{
+ /*
+ * On reaching the maximum value of the counter, the counter freezes
+ * there. The particular event is updated and the respective counter
+ * is stopped and started again so that it starts counting from zero
+ */
+ cn10k_ddr_perf_event_update(pmu->events[evt_idx]);
+ cn10k_ddr_perf_counter_stop(pmu, evt_idx);
+ cn10k_ddr_perf_counter_start(pmu, evt_idx);
+}
+
static irqreturn_t cn10k_ddr_pmu_overflow_handler(struct cn10k_ddr_pmu *pmu)
{
+ const struct ddr_pmu_platform_data *p_data = pmu->p_data;
+ const struct ddr_pmu_ops *ops = pmu->ops;
struct perf_event *event;
struct hw_perf_event *hwc;
u64 prev_count, new_count;
@@ -586,11 +888,9 @@ static irqreturn_t cn10k_ddr_pmu_overflow_handler(struct cn10k_ddr_pmu *pmu)
continue;
value = cn10k_ddr_perf_read_counter(pmu, i);
- if (value == DDRC_PERF_CNT_MAX_VALUE) {
+ if (value == p_data->counter_max_val) {
pr_info("Counter-(%d) reached max value\n", i);
- cn10k_ddr_perf_event_update_all(pmu);
- cn10k_ddr_perf_pmu_disable(&pmu->pmu);
- cn10k_ddr_perf_pmu_enable(&pmu->pmu);
+ ops->pmu_overflow_handler(pmu, i);
}
}
@@ -629,11 +929,68 @@ static int cn10k_ddr_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
return 0;
}
+static const struct ddr_pmu_ops ddr_pmu_ops = {
+ .enable_read_freerun_counter = ddr_pmu_enable_read_freerun,
+ .enable_write_freerun_counter = ddr_pmu_enable_write_freerun,
+ .clear_read_freerun_counter = ddr_pmu_read_clear_freerun,
+ .clear_write_freerun_counter = ddr_pmu_write_clear_freerun,
+ .pmu_overflow_handler = ddr_pmu_overflow_hander,
+};
+
+#if defined(CONFIG_ACPI) || defined(CONFIG_OF)
+static const struct ddr_pmu_platform_data cn10k_ddr_pmu_pdata = {
+ .counter_overflow_val = BIT_ULL(48),
+ .counter_max_val = GENMASK_ULL(48, 0),
+ .cnt_base = CN10K_DDRC_PERF_CNT_VALUE_BASE,
+ .cfg_base = CN10K_DDRC_PERF_CFG_BASE,
+ .cnt_op_mode_ctrl = CN10K_DDRC_PERF_CNT_OP_MODE_CTRL,
+ .cnt_start_op_ctrl = CN10K_DDRC_PERF_CNT_START_OP_CTRL,
+ .cnt_end_op_ctrl = CN10K_DDRC_PERF_CNT_END_OP_CTRL,
+ .cnt_end_status = CN10K_DDRC_PERF_CNT_END_STATUS,
+ .cnt_freerun_en = CN10K_DDRC_PERF_CNT_FREERUN_EN,
+ .cnt_freerun_ctrl = CN10K_DDRC_PERF_CNT_FREERUN_CTRL,
+ .cnt_freerun_clr = 0,
+ .cnt_value_wr_op = CN10K_DDRC_PERF_CNT_VALUE_WR_OP,
+ .cnt_value_rd_op = CN10K_DDRC_PERF_CNT_VALUE_RD_OP,
+ .is_cn10k = TRUE,
+};
+#endif
+
+static const struct ddr_pmu_ops ddr_pmu_ody_ops = {
+ .enable_read_freerun_counter = ddr_pmu_ody_enable_read_freerun,
+ .enable_write_freerun_counter = ddr_pmu_ody_enable_write_freerun,
+ .clear_read_freerun_counter = ddr_pmu_ody_read_clear_freerun,
+ .clear_write_freerun_counter = ddr_pmu_ody_write_clear_freerun,
+ .pmu_overflow_handler = ddr_pmu_ody_overflow_hander,
+};
+
+#ifdef CONFIG_ACPI
+static const struct ddr_pmu_platform_data odyssey_ddr_pmu_pdata = {
+ .counter_overflow_val = 0,
+ .counter_max_val = GENMASK_ULL(63, 0),
+ .cnt_base = ODY_DDRC_PERF_CNT_VALUE_BASE,
+ .cfg_base = ODY_DDRC_PERF_CFG_BASE,
+ .cnt_op_mode_ctrl = ODY_DDRC_PERF_CNT_OP_MODE_CTRL,
+ .cnt_start_op_ctrl = ODY_DDRC_PERF_CNT_START_OP_CTRL,
+ .cnt_end_op_ctrl = ODY_DDRC_PERF_CNT_END_OP_CTRL,
+ .cnt_end_status = ODY_DDRC_PERF_CNT_END_STATUS,
+ .cnt_freerun_en = 0,
+ .cnt_freerun_ctrl = ODY_DDRC_PERF_CNT_FREERUN_CTRL,
+ .cnt_freerun_clr = ODY_DDRC_PERF_CNT_FREERUN_CLR,
+ .cnt_value_wr_op = ODY_DDRC_PERF_CNT_VALUE_WR_OP,
+ .cnt_value_rd_op = ODY_DDRC_PERF_CNT_VALUE_RD_OP,
+ .is_ody = TRUE,
+};
+#endif
+
static int cn10k_ddr_perf_probe(struct platform_device *pdev)
{
+ const struct ddr_pmu_platform_data *dev_data;
struct cn10k_ddr_pmu *ddr_pmu;
struct resource *res;
void __iomem *base;
+ bool is_cn10k;
+ bool is_ody;
char *name;
int ret;
@@ -644,30 +1001,60 @@ static int cn10k_ddr_perf_probe(struct platform_device *pdev)
ddr_pmu->dev = &pdev->dev;
platform_set_drvdata(pdev, ddr_pmu);
+ dev_data = device_get_match_data(&pdev->dev);
+ if (!dev_data) {
+ dev_err(&pdev->dev, "Error: No device match data found\n");
+ return -ENODEV;
+ }
+
base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(base))
return PTR_ERR(base);
ddr_pmu->base = base;
- /* Setup the PMU counter to work in manual mode */
- writeq_relaxed(OP_MODE_CTRL_VAL_MANNUAL, ddr_pmu->base +
- DDRC_PERF_CNT_OP_MODE_CTRL);
-
- ddr_pmu->pmu = (struct pmu) {
- .module = THIS_MODULE,
- .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
- .task_ctx_nr = perf_invalid_context,
- .attr_groups = cn10k_attr_groups,
- .event_init = cn10k_ddr_perf_event_init,
- .add = cn10k_ddr_perf_event_add,
- .del = cn10k_ddr_perf_event_del,
- .start = cn10k_ddr_perf_event_start,
- .stop = cn10k_ddr_perf_event_stop,
- .read = cn10k_ddr_perf_event_update,
- .pmu_enable = cn10k_ddr_perf_pmu_enable,
- .pmu_disable = cn10k_ddr_perf_pmu_disable,
- };
+ ddr_pmu->p_data = dev_data;
+ is_cn10k = ddr_pmu->p_data->is_cn10k;
+ is_ody = ddr_pmu->p_data->is_ody;
+
+ if (is_cn10k) {
+ ddr_pmu->ops = &ddr_pmu_ops;
+ /* Setup the PMU counter to work in manual mode */
+ writeq_relaxed(OP_MODE_CTRL_VAL_MANUAL, ddr_pmu->base +
+ ddr_pmu->p_data->cnt_op_mode_ctrl);
+
+ ddr_pmu->pmu = (struct pmu) {
+ .module = THIS_MODULE,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
+ .task_ctx_nr = perf_invalid_context,
+ .attr_groups = cn10k_attr_groups,
+ .event_init = cn10k_ddr_perf_event_init,
+ .add = cn10k_ddr_perf_event_add,
+ .del = cn10k_ddr_perf_event_del,
+ .start = cn10k_ddr_perf_event_start,
+ .stop = cn10k_ddr_perf_event_stop,
+ .read = cn10k_ddr_perf_event_update,
+ .pmu_enable = cn10k_ddr_perf_pmu_enable,
+ .pmu_disable = cn10k_ddr_perf_pmu_disable,
+ };
+ }
+
+ if (is_ody) {
+ ddr_pmu->ops = &ddr_pmu_ody_ops;
+
+ ddr_pmu->pmu = (struct pmu) {
+ .module = THIS_MODULE,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
+ .task_ctx_nr = perf_invalid_context,
+ .attr_groups = odyssey_attr_groups,
+ .event_init = cn10k_ddr_perf_event_init,
+ .add = cn10k_ddr_perf_event_add,
+ .del = cn10k_ddr_perf_event_del,
+ .start = cn10k_ddr_perf_event_start,
+ .stop = cn10k_ddr_perf_event_stop,
+ .read = cn10k_ddr_perf_event_update,
+ };
+ }
/* Choose this cpu to collect perf data */
ddr_pmu->cpu = raw_smp_processor_id();
@@ -688,7 +1075,7 @@ static int cn10k_ddr_perf_probe(struct platform_device *pdev)
if (ret)
goto error;
- pr_info("CN10K DDR PMU Driver for ddrc@%llx\n", res->start);
+ pr_info("DDR PMU Driver for ddrc@%llx\n", res->start);
return 0;
error:
cpuhp_state_remove_instance_nocalls(
@@ -710,7 +1097,7 @@ static void cn10k_ddr_perf_remove(struct platform_device *pdev)
#ifdef CONFIG_OF
static const struct of_device_id cn10k_ddr_pmu_of_match[] = {
- { .compatible = "marvell,cn10k-ddr-pmu", },
+ { .compatible = "marvell,cn10k-ddr-pmu", .data = &cn10k_ddr_pmu_pdata },
{ },
};
MODULE_DEVICE_TABLE(of, cn10k_ddr_pmu_of_match);
@@ -718,7 +1105,8 @@ MODULE_DEVICE_TABLE(of, cn10k_ddr_pmu_of_match);
#ifdef CONFIG_ACPI
static const struct acpi_device_id cn10k_ddr_pmu_acpi_match[] = {
- {"MRVL000A", 0},
+ {"MRVL000A", (kernel_ulong_t)&cn10k_ddr_pmu_pdata },
+ {"MRVL000C", (kernel_ulong_t)&odyssey_ddr_pmu_pdata},
{},
};
MODULE_DEVICE_TABLE(acpi, cn10k_ddr_pmu_acpi_match);
diff --git a/drivers/perf/marvell_cn10k_tad_pmu.c b/drivers/perf/marvell_cn10k_tad_pmu.c
index cda55ee35eee..51ccb0befa05 100644
--- a/drivers/perf/marvell_cn10k_tad_pmu.c
+++ b/drivers/perf/marvell_cn10k_tad_pmu.c
@@ -37,6 +37,15 @@ struct tad_pmu {
DECLARE_BITMAP(counters_map, TAD_MAX_COUNTERS);
};
+enum mrvl_tad_pmu_version {
+ TAD_PMU_V1 = 1,
+ TAD_PMU_V2,
+};
+
+struct tad_pmu_data {
+ int id;
+};
+
static int tad_pmu_cpuhp_state;
static void tad_pmu_event_counter_read(struct perf_event *event)
@@ -214,6 +223,24 @@ static const struct attribute_group tad_pmu_events_attr_group = {
.attrs = tad_pmu_event_attrs,
};
+static struct attribute *ody_tad_pmu_event_attrs[] = {
+ TAD_PMU_EVENT_ATTR(tad_req_msh_in_exlmn, 0x3),
+ TAD_PMU_EVENT_ATTR(tad_alloc_dtg, 0x1a),
+ TAD_PMU_EVENT_ATTR(tad_alloc_ltg, 0x1b),
+ TAD_PMU_EVENT_ATTR(tad_alloc_any, 0x1c),
+ TAD_PMU_EVENT_ATTR(tad_hit_dtg, 0x1d),
+ TAD_PMU_EVENT_ATTR(tad_hit_ltg, 0x1e),
+ TAD_PMU_EVENT_ATTR(tad_hit_any, 0x1f),
+ TAD_PMU_EVENT_ATTR(tad_tag_rd, 0x20),
+ TAD_PMU_EVENT_ATTR(tad_tot_cycle, 0xFF),
+ NULL
+};
+
+static const struct attribute_group ody_tad_pmu_events_attr_group = {
+ .name = "events",
+ .attrs = ody_tad_pmu_event_attrs,
+};
+
PMU_FORMAT_ATTR(event, "config:0-7");
static struct attribute *tad_pmu_format_attrs[] = {
@@ -252,8 +279,16 @@ static const struct attribute_group *tad_pmu_attr_groups[] = {
NULL
};
+static const struct attribute_group *ody_tad_pmu_attr_groups[] = {
+ &ody_tad_pmu_events_attr_group,
+ &tad_pmu_format_attr_group,
+ &tad_pmu_cpumask_attr_group,
+ NULL
+};
+
static int tad_pmu_probe(struct platform_device *pdev)
{
+ const struct tad_pmu_data *dev_data;
struct device *dev = &pdev->dev;
struct tad_region *regions;
struct tad_pmu *tad_pmu;
@@ -261,6 +296,7 @@ static int tad_pmu_probe(struct platform_device *pdev)
u32 tad_pmu_page_size;
u32 tad_page_size;
u32 tad_cnt;
+ int version;
int i, ret;
char *name;
@@ -270,6 +306,13 @@ static int tad_pmu_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, tad_pmu);
+ dev_data = device_get_match_data(&pdev->dev);
+ if (!dev_data) {
+ dev_err(&pdev->dev, "Error: No device match data found\n");
+ return -ENODEV;
+ }
+ version = dev_data->id;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "Mem resource not found\n");
@@ -319,7 +362,6 @@ static int tad_pmu_probe(struct platform_device *pdev)
tad_pmu->pmu = (struct pmu) {
.module = THIS_MODULE,
- .attr_groups = tad_pmu_attr_groups,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE |
PERF_PMU_CAP_NO_INTERRUPT,
.task_ctx_nr = perf_invalid_context,
@@ -332,6 +374,11 @@ static int tad_pmu_probe(struct platform_device *pdev)
.read = tad_pmu_event_counter_read,
};
+ if (version == TAD_PMU_V1)
+ tad_pmu->pmu.attr_groups = tad_pmu_attr_groups;
+ else
+ tad_pmu->pmu.attr_groups = ody_tad_pmu_attr_groups;
+
tad_pmu->cpu = raw_smp_processor_id();
/* Register pmu instance for cpu hotplug */
@@ -360,16 +407,29 @@ static void tad_pmu_remove(struct platform_device *pdev)
perf_pmu_unregister(&pmu->pmu);
}
+#if defined(CONFIG_OF) || defined(CONFIG_ACPI)
+static const struct tad_pmu_data tad_pmu_data = {
+ .id = TAD_PMU_V1,
+};
+#endif
+
+#ifdef CONFIG_ACPI
+static const struct tad_pmu_data tad_pmu_v2_data = {
+ .id = TAD_PMU_V2,
+};
+#endif
+
#ifdef CONFIG_OF
static const struct of_device_id tad_pmu_of_match[] = {
- { .compatible = "marvell,cn10k-tad-pmu", },
+ { .compatible = "marvell,cn10k-tad-pmu", .data = &tad_pmu_data },
{},
};
#endif
#ifdef CONFIG_ACPI
static const struct acpi_device_id tad_pmu_acpi_match[] = {
- {"MRVL000B", 0},
+ {"MRVL000B", (kernel_ulong_t)&tad_pmu_data},
+ {"MRVL000D", (kernel_ulong_t)&tad_pmu_v2_data},
{},
};
MODULE_DEVICE_TABLE(acpi, tad_pmu_acpi_match);
diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
index 1aa303f76cc7..194c153e5d71 100644
--- a/drivers/perf/riscv_pmu_sbi.c
+++ b/drivers/perf/riscv_pmu_sbi.c
@@ -507,8 +507,7 @@ static int pmu_sbi_event_map(struct perf_event *event, u64 *econfig)
{
u32 type = event->attr.type;
u64 config = event->attr.config;
- u64 raw_config_val;
- int ret;
+ int ret = -ENOENT;
/*
* Ensure we are finished checking standard hardware events for
@@ -528,21 +527,23 @@ static int pmu_sbi_event_map(struct perf_event *event, u64 *econfig)
case PERF_TYPE_RAW:
/*
* As per SBI specification, the upper 16 bits must be unused
- * for a raw event.
+ * for a hardware raw event.
* Bits 63:62 are used to distinguish between raw events
* 00 - Hardware raw event
* 10 - SBI firmware events
* 11 - Risc-V platform specific firmware event
*/
- raw_config_val = config & RISCV_PMU_RAW_EVENT_MASK;
+
switch (config >> 62) {
case 0:
- ret = RISCV_PMU_RAW_EVENT_IDX;
- *econfig = raw_config_val;
+ /* Return error any bits [48-63] is set as it is not allowed by the spec */
+ if (!(config & ~RISCV_PMU_RAW_EVENT_MASK)) {
+ *econfig = config & RISCV_PMU_RAW_EVENT_MASK;
+ ret = RISCV_PMU_RAW_EVENT_IDX;
+ }
break;
case 2:
- ret = (raw_config_val & 0xFFFF) |
- (SBI_PMU_EVENT_TYPE_FW << 16);
+ ret = (config & 0xFFFF) | (SBI_PMU_EVENT_TYPE_FW << 16);
break;
case 3:
/*
@@ -551,12 +552,13 @@ static int pmu_sbi_event_map(struct perf_event *event, u64 *econfig)
* Event data - raw event encoding
*/
ret = SBI_PMU_EVENT_TYPE_FW << 16 | RISCV_PLAT_FW_EVENT;
- *econfig = raw_config_val;
+ *econfig = config & RISCV_PMU_PLAT_FW_EVENT_MASK;
+ break;
+ default:
break;
}
break;
default:
- ret = -ENOENT;
break;
}
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
index d66c3a3e8429..b96e6368a956 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08.c
@@ -86,6 +86,7 @@ const struct regmap_config mcp23x08_regmap = {
.num_reg_defaults = ARRAY_SIZE(mcp23x08_defaults),
.cache_type = REGCACHE_FLAT,
.max_register = MCP_OLAT,
+ .disable_locking = true, /* mcp->lock protects the regmap */
};
EXPORT_SYMBOL_GPL(mcp23x08_regmap);
@@ -132,6 +133,7 @@ const struct regmap_config mcp23x17_regmap = {
.num_reg_defaults = ARRAY_SIZE(mcp23x17_defaults),
.cache_type = REGCACHE_FLAT,
.val_format_endian = REGMAP_ENDIAN_LITTLE,
+ .disable_locking = true, /* mcp->lock protects the regmap */
};
EXPORT_SYMBOL_GPL(mcp23x17_regmap);
@@ -228,7 +230,9 @@ static int mcp_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
switch (param) {
case PIN_CONFIG_BIAS_PULL_UP:
+ mutex_lock(&mcp->lock);
ret = mcp_read(mcp, MCP_GPPU, &data);
+ mutex_unlock(&mcp->lock);
if (ret < 0)
return ret;
status = (data & BIT(pin)) ? 1 : 0;
@@ -257,7 +261,9 @@ static int mcp_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
switch (param) {
case PIN_CONFIG_BIAS_PULL_UP:
+ mutex_lock(&mcp->lock);
ret = mcp_set_bit(mcp, MCP_GPPU, pin, arg);
+ mutex_unlock(&mcp->lock);
break;
default:
dev_dbg(mcp->dev, "Invalid config param %04x\n", param);
diff --git a/drivers/platform/x86/amd/pmc/pmc.c b/drivers/platform/x86/amd/pmc/pmc.c
index 26b878ee5191..a254debb9256 100644
--- a/drivers/platform/x86/amd/pmc/pmc.c
+++ b/drivers/platform/x86/amd/pmc/pmc.c
@@ -947,6 +947,10 @@ static int amd_pmc_suspend_handler(struct device *dev)
{
struct amd_pmc_dev *pdev = dev_get_drvdata(dev);
+ /*
+ * Must be called only from the same set of dev_pm_ops handlers
+ * as i8042_pm_suspend() is called: currently just from .suspend.
+ */
if (pdev->disable_8042_wakeup && !disable_workarounds) {
int rc = amd_pmc_wa_irq1(pdev);
@@ -959,7 +963,9 @@ static int amd_pmc_suspend_handler(struct device *dev)
return 0;
}
-static DEFINE_SIMPLE_DEV_PM_OPS(amd_pmc_pm, amd_pmc_suspend_handler, NULL);
+static const struct dev_pm_ops amd_pmc_pm = {
+ .suspend = amd_pmc_suspend_handler,
+};
static const struct pci_device_id pmc_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PS) },
diff --git a/drivers/platform/x86/dell/dell-uart-backlight.c b/drivers/platform/x86/dell/dell-uart-backlight.c
index 6e5dc7e3674f..bcc5c0f3bb4d 100644
--- a/drivers/platform/x86/dell/dell-uart-backlight.c
+++ b/drivers/platform/x86/dell/dell-uart-backlight.c
@@ -283,6 +283,9 @@ static int dell_uart_bl_serdev_probe(struct serdev_device *serdev)
init_waitqueue_head(&dell_bl->wait_queue);
dell_bl->dev = dev;
+ serdev_device_set_drvdata(serdev, dell_bl);
+ serdev_device_set_client_ops(serdev, &dell_uart_bl_serdev_ops);
+
ret = devm_serdev_device_open(dev, serdev);
if (ret)
return dev_err_probe(dev, ret, "opening UART device\n");
@@ -290,8 +293,6 @@ static int dell_uart_bl_serdev_probe(struct serdev_device *serdev)
/* 9600 bps, no flow control, these are the default but set them to be sure */
serdev_device_set_baudrate(serdev, 9600);
serdev_device_set_flow_control(serdev, false);
- serdev_device_set_drvdata(serdev, dell_bl);
- serdev_device_set_client_ops(serdev, &dell_uart_bl_serdev_ops);
get_version[0] = DELL_SOF(GET_CMD_LEN);
get_version[1] = CMD_GET_VERSION;
diff --git a/drivers/platform/x86/hp/hp-wmi.c b/drivers/platform/x86/hp/hp-wmi.c
index 81ccc96ffe40..20c55bab3b8c 100644
--- a/drivers/platform/x86/hp/hp-wmi.c
+++ b/drivers/platform/x86/hp/hp-wmi.c
@@ -64,7 +64,7 @@ static const char * const omen_thermal_profile_boards[] = {
"874A", "8603", "8604", "8748", "886B", "886C", "878A", "878B", "878C",
"88C8", "88CB", "8786", "8787", "8788", "88D1", "88D2", "88F4", "88FD",
"88F5", "88F6", "88F7", "88FE", "88FF", "8900", "8901", "8902", "8912",
- "8917", "8918", "8949", "894A", "89EB", "8BAD", "8A42"
+ "8917", "8918", "8949", "894A", "89EB", "8BAD", "8A42", "8A15"
};
/* DMI Board names of Omen laptops that are specifically set to be thermal
@@ -80,7 +80,7 @@ static const char * const omen_thermal_profile_force_v0_boards[] = {
* "balanced" when reaching zero.
*/
static const char * const omen_timed_thermal_profile_boards[] = {
- "8BAD", "8A42"
+ "8BAD", "8A42", "8A15"
};
/* DMI Board names of Victus laptops */
diff --git a/drivers/platform/x86/intel/pmc/core_ssram.c b/drivers/platform/x86/intel/pmc/core_ssram.c
index 50ebfd586d3f..739569803017 100644
--- a/drivers/platform/x86/intel/pmc/core_ssram.c
+++ b/drivers/platform/x86/intel/pmc/core_ssram.c
@@ -269,8 +269,12 @@ pmc_core_ssram_get_pmc(struct pmc_dev *pmcdev, int pmc_idx, u32 offset)
/*
* The secondary PMC BARS (which are behind hidden PCI devices)
* are read from fixed offsets in MMIO of the primary PMC BAR.
+ * If a device is not present, the value will be 0.
*/
ssram_base = get_base(tmp_ssram, offset);
+ if (!ssram_base)
+ return 0;
+
ssram = ioremap(ssram_base, SSRAM_HDR_SIZE);
if (!ssram)
return -ENOMEM;
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
index 1e46e30dae96..dbcd3087aaa4 100644
--- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
@@ -804,6 +804,7 @@ EXPORT_SYMBOL_GPL(isst_if_cdev_unregister);
static const struct x86_cpu_id isst_cpu_ids[] = {
X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, SST_HPM_SUPPORTED),
X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, SST_HPM_SUPPORTED),
+ X86_MATCH_VFM(INTEL_ATOM_DARKMONT_X, SST_HPM_SUPPORTED),
X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, 0),
X86_MATCH_VFM(INTEL_GRANITERAPIDS_D, SST_HPM_SUPPORTED),
X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, SST_HPM_SUPPORTED),
diff --git a/drivers/platform/x86/intel/tpmi_power_domains.c b/drivers/platform/x86/intel/tpmi_power_domains.c
index 603e7ded06a9..2f01cd22a6ee 100644
--- a/drivers/platform/x86/intel/tpmi_power_domains.c
+++ b/drivers/platform/x86/intel/tpmi_power_domains.c
@@ -81,6 +81,7 @@ static const struct x86_cpu_id tpmi_cpu_ids[] = {
X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, NULL),
X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, NULL),
X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, NULL),
+ X86_MATCH_VFM(INTEL_ATOM_DARKMONT_X, NULL),
X86_MATCH_VFM(INTEL_GRANITERAPIDS_D, NULL),
X86_MATCH_VFM(INTEL_PANTHERCOVE_X, NULL),
{}
diff --git a/drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c b/drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c
index d2699ca24f34..a96b215cd2c5 100644
--- a/drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c
+++ b/drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c
@@ -199,14 +199,15 @@ static int yt2_1380_fc_serdev_probe(struct serdev_device *serdev)
if (ret)
return ret;
+ serdev_device_set_drvdata(serdev, fc);
+ serdev_device_set_client_ops(serdev, &yt2_1380_fc_serdev_ops);
+
ret = devm_serdev_device_open(dev, serdev);
if (ret)
return dev_err_probe(dev, ret, "opening UART device\n");
serdev_device_set_baudrate(serdev, 600);
serdev_device_set_flow_control(serdev, false);
- serdev_device_set_drvdata(serdev, fc);
- serdev_device_set_client_ops(serdev, &yt2_1380_fc_serdev_ops);
ret = devm_extcon_register_notifier_all(dev, fc->extcon, &fc->nb);
if (ret)
diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
index 671021cd1f59..9c7f30a47f1f 100644
--- a/drivers/platform/x86/mlx-platform.c
+++ b/drivers/platform/x86/mlx-platform.c
@@ -6237,6 +6237,7 @@ fail_pci_set_dma_mask:
fail_pci_request_regions:
pci_disable_device(pci_dev);
fail_pci_enable_device:
+ pci_dev_put(pci_dev);
return err;
}
@@ -6247,6 +6248,7 @@ mlxplat_pci_fpga_device_exit(struct pci_dev *pci_bridge,
iounmap(pci_bridge_addr);
pci_release_regions(pci_bridge);
pci_disable_device(pci_bridge);
+ pci_dev_put(pci_bridge);
}
static int
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 6371a9f765c1..2cfb2ac3f465 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -184,7 +184,8 @@ enum tpacpi_hkey_event_t {
*/
TP_HKEY_EV_AMT_TOGGLE = 0x131a, /* Toggle AMT on/off */
TP_HKEY_EV_DOUBLETAP_TOGGLE = 0x131c, /* Toggle trackpoint doubletap on/off */
- TP_HKEY_EV_PROFILE_TOGGLE = 0x131f, /* Toggle platform profile */
+ TP_HKEY_EV_PROFILE_TOGGLE = 0x131f, /* Toggle platform profile in 2024 systems */
+ TP_HKEY_EV_PROFILE_TOGGLE2 = 0x1401, /* Toggle platform profile in 2025 + systems */
/* Reasons for waking up from S3/S4 */
TP_HKEY_EV_WKUP_S3_UNDOCK = 0x2304, /* undock requested, S3 */
@@ -11200,6 +11201,7 @@ static bool tpacpi_driver_event(const unsigned int hkey_event)
tp_features.trackpoint_doubletap = !tp_features.trackpoint_doubletap;
return true;
case TP_HKEY_EV_PROFILE_TOGGLE:
+ case TP_HKEY_EV_PROFILE_TOGGLE2:
platform_profile_cycle();
return true;
}
diff --git a/drivers/pmdomain/core.c b/drivers/pmdomain/core.c
index bb11f467dc78..20a9efebbcb7 100644
--- a/drivers/pmdomain/core.c
+++ b/drivers/pmdomain/core.c
@@ -2142,6 +2142,11 @@ static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
return 0;
}
+static void genpd_provider_release(struct device *dev)
+{
+ /* nothing to be done here */
+}
+
static int genpd_alloc_data(struct generic_pm_domain *genpd)
{
struct genpd_governor_data *gd = NULL;
@@ -2173,6 +2178,7 @@ static int genpd_alloc_data(struct generic_pm_domain *genpd)
genpd->gd = gd;
device_initialize(&genpd->dev);
+ genpd->dev.release = genpd_provider_release;
if (!genpd_is_dev_name_fw(genpd)) {
dev_set_name(&genpd->dev, "%s", genpd->name);
diff --git a/drivers/pmdomain/imx/gpcv2.c b/drivers/pmdomain/imx/gpcv2.c
index e67ecf99ef84..9bdb80fd7210 100644
--- a/drivers/pmdomain/imx/gpcv2.c
+++ b/drivers/pmdomain/imx/gpcv2.c
@@ -1458,12 +1458,12 @@ static int imx_gpcv2_probe(struct platform_device *pdev)
.max_register = SZ_4K,
};
struct device *dev = &pdev->dev;
- struct device_node *pgc_np;
+ struct device_node *pgc_np __free(device_node) =
+ of_get_child_by_name(dev->of_node, "pgc");
struct regmap *regmap;
void __iomem *base;
int ret;
- pgc_np = of_get_child_by_name(dev->of_node, "pgc");
if (!pgc_np) {
dev_err(dev, "No power domains specified in DT\n");
return -EINVAL;
diff --git a/drivers/pmdomain/imx/imx8mp-blk-ctrl.c b/drivers/pmdomain/imx/imx8mp-blk-ctrl.c
index e3a0f64c144c..3668fe66b22c 100644
--- a/drivers/pmdomain/imx/imx8mp-blk-ctrl.c
+++ b/drivers/pmdomain/imx/imx8mp-blk-ctrl.c
@@ -770,7 +770,7 @@ static void imx8mp_blk_ctrl_remove(struct platform_device *pdev)
of_genpd_del_provider(pdev->dev.of_node);
- for (i = 0; bc->onecell_data.num_domains; i++) {
+ for (i = 0; i < bc->onecell_data.num_domains; i++) {
struct imx8mp_blk_ctrl_domain *domain = &bc->domains[i];
pm_genpd_remove(&domain->genpd);
diff --git a/drivers/reset/reset-rzg2l-usbphy-ctrl.c b/drivers/reset/reset-rzg2l-usbphy-ctrl.c
index 12d0535a874b..8a7f167e405e 100644
--- a/drivers/reset/reset-rzg2l-usbphy-ctrl.c
+++ b/drivers/reset/reset-rzg2l-usbphy-ctrl.c
@@ -176,6 +176,7 @@ static int rzg2l_usbphy_ctrl_probe(struct platform_device *pdev)
vdev->dev.parent = dev;
priv->vdev = vdev;
+ device_set_of_node_from_dev(&vdev->dev, dev);
error = platform_device_add(vdev);
if (error)
goto err_device_put;
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index 6da47a65af61..28e92fad0ca1 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -56,7 +56,6 @@ int dasd_gendisk_alloc(struct dasd_block *block)
block->tag_set.cmd_size = sizeof(struct dasd_ccw_req);
block->tag_set.nr_hw_queues = nr_hw_queues;
block->tag_set.queue_depth = queue_depth;
- block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
block->tag_set.numa_node = NUMA_NO_NODE;
rc = blk_mq_alloc_tag_set(&block->tag_set);
if (rc)
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index 3fcfe029db1b..91bbe9d2e5ac 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -461,7 +461,6 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
bdev->tag_set.cmd_size = sizeof(blk_status_t);
bdev->tag_set.nr_hw_queues = nr_requests;
bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests;
- bdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
bdev->tag_set.numa_node = NUMA_NO_NODE;
ret = blk_mq_alloc_tag_set(&bdev->tag_set);
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index 6c91e422927f..07a6e8a7f05a 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -85,13 +85,6 @@ typedef unsigned int sclp_cmdw_t;
typedef u64 sccb_mask_t;
-struct sccb_header {
- u16 length;
- u8 function_code;
- u8 control_mask[3];
- u16 response_code;
-} __attribute__((packed));
-
struct init_sccb {
struct sccb_header header;
u16 _reserved;
@@ -196,7 +189,9 @@ struct read_info_sccb {
u8 byte_134; /* 134 */
u8 cpudirq; /* 135 */
u16 cbl; /* 136-137 */
- u8 _pad_138[EXT_SCCB_READ_SCP - 138];
+ u8 byte_138; /* 138 */
+ u8 byte_139; /* 139 */
+ u8 _pad_140[EXT_SCCB_READ_SCP - 140];
} __packed __aligned(PAGE_SIZE);
struct read_storage_sccb {
@@ -238,13 +233,6 @@ struct gds_vector {
u16 gds_id;
} __attribute__((packed));
-struct evbuf_header {
- u16 length;
- u8 type;
- u8 flags;
- u16 _reserved;
-} __attribute__((packed));
-
struct sclp_req {
struct list_head list; /* list_head for request queueing. */
sclp_cmdw_t command; /* sclp command to execute */
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
index f56ea9b60e08..ae5d28987177 100644
--- a/drivers/s390/char/sclp_config.c
+++ b/drivers/s390/char/sclp_config.c
@@ -128,7 +128,7 @@ out:
}
static ssize_t sysfs_ofb_data_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
int rc;
@@ -142,7 +142,7 @@ static const struct bin_attribute ofb_bin_attr = {
.name = "event_data",
.mode = S_IWUSR,
},
- .write = sysfs_ofb_data_write,
+ .write_new = sysfs_ofb_data_write,
};
#endif
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index 29156455970e..d9d6edaf8de8 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -55,6 +55,7 @@ static void __init sclp_early_facilities_detect(void)
if (sccb->fac91 & 0x40)
get_lowcore()->machine_flags |= MACHINE_FLAG_TLB_GUEST;
sclp.has_diag204_bif = !!(sccb->fac98 & 0x80);
+ sclp.has_diag310 = !!(sccb->fac91 & 0x80);
if (sccb->cpuoff > 134) {
sclp.has_diag318 = !!(sccb->byte_134 & 0x80);
sclp.has_diag320 = !!(sccb->byte_134 & 0x04);
@@ -64,6 +65,8 @@ static void __init sclp_early_facilities_detect(void)
sclp.has_sipl = !!(sccb->cbl & 0x4000);
sclp.has_sipl_eckd = !!(sccb->cbl & 0x2000);
}
+ if (sccb->cpuoff > 139)
+ sclp.has_diag324 = !!(sccb->byte_139 & 0x80);
sclp.rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
sclp.rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
sclp.rzm <<= 20;
diff --git a/drivers/s390/char/sclp_pci.c b/drivers/s390/char/sclp_pci.c
index c3466a8c56bb..56400886f7fc 100644
--- a/drivers/s390/char/sclp_pci.c
+++ b/drivers/s390/char/sclp_pci.c
@@ -24,30 +24,11 @@
#define SCLP_ATYPE_PCI 2
-#define SCLP_ERRNOTIFY_AQ_RESET 0
-#define SCLP_ERRNOTIFY_AQ_REPAIR 1
-#define SCLP_ERRNOTIFY_AQ_INFO_LOG 2
-#define SCLP_ERRNOTIFY_AQ_OPTICS_DATA 3
-
static DEFINE_MUTEX(sclp_pci_mutex);
static struct sclp_register sclp_pci_event = {
.send_mask = EVTYP_ERRNOTIFY_MASK,
};
-struct err_notify_evbuf {
- struct evbuf_header header;
- u8 action;
- u8 atype;
- u32 fh;
- u32 fid;
- u8 data[];
-} __packed;
-
-struct err_notify_sccb {
- struct sccb_header header;
- struct err_notify_evbuf evbuf;
-} __packed;
-
struct pci_cfg_sccb {
struct sccb_header header;
u8 atype; /* adapter type */
diff --git a/drivers/s390/char/sclp_sd.c b/drivers/s390/char/sclp_sd.c
index c2dc9aadb7d2..8524c14affed 100644
--- a/drivers/s390/char/sclp_sd.c
+++ b/drivers/s390/char/sclp_sd.c
@@ -476,7 +476,7 @@ static struct kobj_type sclp_sd_file_ktype = {
* on EOF.
*/
static ssize_t data_read(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr, char *buffer,
+ const struct bin_attribute *attr, char *buffer,
loff_t off, size_t size)
{
struct sclp_sd_file *sd_file = to_sd_file(kobj);
@@ -539,7 +539,7 @@ static __init struct sclp_sd_file *sclp_sd_file_create(const char *name, u8 di)
sysfs_bin_attr_init(&sd_file->data_attr);
sd_file->data_attr.attr.name = "data";
sd_file->data_attr.attr.mode = 0444;
- sd_file->data_attr.read = data_read;
+ sd_file->data_attr.read_new = data_read;
rc = sysfs_create_bin_file(&sd_file->kobj, &sd_file->data_attr);
if (rc) {
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index acd6790dba4d..61c07b4a0fe8 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -445,7 +445,7 @@ struct ciw *ccw_device_get_ciw(struct ccw_device *cdev, __u32 ct)
return NULL;
for (ciw_cnt = 0; ciw_cnt < MAX_CIWS; ciw_cnt++)
if (cdev->private->dma_area->senseid.ciw[ciw_cnt].ct == ct)
- return cdev->private->dma_area->senseid.ciw + ciw_cnt;
+ return &cdev->private->dma_area->senseid.ciw[ciw_cnt];
return NULL;
}
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 641f0dbb65a9..4bd4c00c9c0c 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -210,11 +210,10 @@ struct qdio_q {
qdio_handler_t (*handler);
struct qdio_irq *irq_ptr;
+
+ /* memory page (PAGE_SIZE) used to place slib and sl on */
+ void *sl_page;
struct sl *sl;
- /*
- * A page is allocated under this pointer and used for slib and sl.
- * slib is 2048 bytes big and sl points to offset PAGE_SIZE / 2.
- */
struct slib *slib;
} __attribute__ ((aligned(256)));
@@ -266,7 +265,7 @@ struct qdio_irq {
#define is_thinint_irq(irq) \
(irq->qib.qfmt == QDIO_IQDIO_QFMT || \
- css_general_characteristics.aif_osa)
+ css_general_characteristics.aif_qdio)
#define qperf(__qdev, __attr) ((__qdev)->perf_stat.(__attr))
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 99c0fd23022d..ea09aadaae4e 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -83,7 +83,7 @@ static void __qdio_free_queues(struct qdio_q **queues, unsigned int count)
for (i = 0; i < count; i++) {
q = queues[i];
- free_page((unsigned long) q->slib);
+ free_page((unsigned long)q->sl_page);
kmem_cache_free(qdio_q_cache, q);
}
}
@@ -109,12 +109,16 @@ static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
return -ENOMEM;
}
- q->slib = (struct slib *) __get_free_page(GFP_KERNEL);
- if (!q->slib) {
+ q->sl_page = (void *)__get_free_page(GFP_KERNEL);
+ if (!q->sl_page) {
kmem_cache_free(qdio_q_cache, q);
__qdio_free_queues(irq_ptr_qs, i);
return -ENOMEM;
}
+ q->slib = q->sl_page;
+ /* As per architecture: SLIB is 2K bytes long, and SL 1K. */
+ q->sl = (struct sl *)(q->slib + 1);
+
irq_ptr_qs[i] = q;
}
return 0;
@@ -142,11 +146,15 @@ int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs, int nr_output_qs
static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr,
qdio_handler_t *handler, int i)
{
- struct slib *slib = q->slib;
+ struct slib *const slib = q->slib;
+ void *const sl_page = q->sl_page;
+ struct sl *const sl = q->sl;
/* queue must be cleared for qdio_establish */
memset(q, 0, sizeof(*q));
- memset(slib, 0, PAGE_SIZE);
+ memset(sl_page, 0, PAGE_SIZE);
+ q->sl_page = sl_page;
+ q->sl = sl;
q->slib = slib;
q->irq_ptr = irq_ptr;
q->mask = 1 << (31 - i);
@@ -161,7 +169,6 @@ static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
int j;
DBF_HEX(&q, sizeof(void *));
- q->sl = (struct sl *)((char *)q->slib + PAGE_SIZE / 2);
/* fill in sbal */
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
@@ -423,7 +430,7 @@ int __init qdio_setup_init(void)
/* Check for OSA/FCP thin interrupts (bit 67). */
DBF_EVENT("thinint:%1d",
- (css_general_characteristics.aif_osa) ? 1 : 0);
+ (css_general_characteristics.aif_qdio) ? 1 : 0);
/* Check for QEBSM support in general (bit 58). */
DBF_EVENT("cssQEBSM:%1d", css_general_characteristics.qebsm);
diff --git a/drivers/s390/crypto/pkey_sysfs.c b/drivers/s390/crypto/pkey_sysfs.c
index a4eb45803f5e..57edc97bafd2 100644
--- a/drivers/s390/crypto/pkey_sysfs.c
+++ b/drivers/s390/crypto/pkey_sysfs.c
@@ -184,7 +184,7 @@ static ssize_t pkey_protkey_hmac_attr_read(u32 keytype, char *buf,
static ssize_t protkey_aes_128_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -194,7 +194,7 @@ static ssize_t protkey_aes_128_read(struct file *filp,
static ssize_t protkey_aes_192_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -204,7 +204,7 @@ static ssize_t protkey_aes_192_read(struct file *filp,
static ssize_t protkey_aes_256_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -214,7 +214,7 @@ static ssize_t protkey_aes_256_read(struct file *filp,
static ssize_t protkey_aes_128_xts_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -224,7 +224,7 @@ static ssize_t protkey_aes_128_xts_read(struct file *filp,
static ssize_t protkey_aes_256_xts_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -234,7 +234,7 @@ static ssize_t protkey_aes_256_xts_read(struct file *filp,
static ssize_t protkey_aes_xts_128_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -244,7 +244,7 @@ static ssize_t protkey_aes_xts_128_read(struct file *filp,
static ssize_t protkey_aes_xts_256_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -254,7 +254,7 @@ static ssize_t protkey_aes_xts_256_read(struct file *filp,
static ssize_t protkey_hmac_512_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -264,7 +264,7 @@ static ssize_t protkey_hmac_512_read(struct file *filp,
static ssize_t protkey_hmac_1024_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -272,17 +272,17 @@ static ssize_t protkey_hmac_1024_read(struct file *filp,
buf, off, count);
}
-static BIN_ATTR_RO(protkey_aes_128, sizeof(struct protaeskeytoken));
-static BIN_ATTR_RO(protkey_aes_192, sizeof(struct protaeskeytoken));
-static BIN_ATTR_RO(protkey_aes_256, sizeof(struct protaeskeytoken));
-static BIN_ATTR_RO(protkey_aes_128_xts, 2 * sizeof(struct protaeskeytoken));
-static BIN_ATTR_RO(protkey_aes_256_xts, 2 * sizeof(struct protaeskeytoken));
-static BIN_ATTR_RO(protkey_aes_xts_128, sizeof(struct protkeytoken) + 64);
-static BIN_ATTR_RO(protkey_aes_xts_256, sizeof(struct protkeytoken) + 96);
-static BIN_ATTR_RO(protkey_hmac_512, sizeof(struct protkeytoken) + 96);
-static BIN_ATTR_RO(protkey_hmac_1024, sizeof(struct protkeytoken) + 160);
-
-static struct bin_attribute *protkey_attrs[] = {
+static const BIN_ATTR_RO(protkey_aes_128, sizeof(struct protaeskeytoken));
+static const BIN_ATTR_RO(protkey_aes_192, sizeof(struct protaeskeytoken));
+static const BIN_ATTR_RO(protkey_aes_256, sizeof(struct protaeskeytoken));
+static const BIN_ATTR_RO(protkey_aes_128_xts, 2 * sizeof(struct protaeskeytoken));
+static const BIN_ATTR_RO(protkey_aes_256_xts, 2 * sizeof(struct protaeskeytoken));
+static const BIN_ATTR_RO(protkey_aes_xts_128, sizeof(struct protkeytoken) + 64);
+static const BIN_ATTR_RO(protkey_aes_xts_256, sizeof(struct protkeytoken) + 96);
+static const BIN_ATTR_RO(protkey_hmac_512, sizeof(struct protkeytoken) + 96);
+static const BIN_ATTR_RO(protkey_hmac_1024, sizeof(struct protkeytoken) + 160);
+
+static const struct bin_attribute *const protkey_attrs[] = {
&bin_attr_protkey_aes_128,
&bin_attr_protkey_aes_192,
&bin_attr_protkey_aes_256,
@@ -295,9 +295,9 @@ static struct bin_attribute *protkey_attrs[] = {
NULL
};
-static struct attribute_group protkey_attr_group = {
- .name = "protkey",
- .bin_attrs = protkey_attrs,
+static const struct attribute_group protkey_attr_group = {
+ .name = "protkey",
+ .bin_attrs_new = protkey_attrs,
};
/*
@@ -341,7 +341,7 @@ static ssize_t pkey_ccadata_aes_attr_read(u32 keytype, bool is_xts, char *buf,
static ssize_t ccadata_aes_128_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -351,7 +351,7 @@ static ssize_t ccadata_aes_128_read(struct file *filp,
static ssize_t ccadata_aes_192_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -361,7 +361,7 @@ static ssize_t ccadata_aes_192_read(struct file *filp,
static ssize_t ccadata_aes_256_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -371,7 +371,7 @@ static ssize_t ccadata_aes_256_read(struct file *filp,
static ssize_t ccadata_aes_128_xts_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -381,7 +381,7 @@ static ssize_t ccadata_aes_128_xts_read(struct file *filp,
static ssize_t ccadata_aes_256_xts_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -389,13 +389,13 @@ static ssize_t ccadata_aes_256_xts_read(struct file *filp,
off, count);
}
-static BIN_ATTR_RO(ccadata_aes_128, sizeof(struct secaeskeytoken));
-static BIN_ATTR_RO(ccadata_aes_192, sizeof(struct secaeskeytoken));
-static BIN_ATTR_RO(ccadata_aes_256, sizeof(struct secaeskeytoken));
-static BIN_ATTR_RO(ccadata_aes_128_xts, 2 * sizeof(struct secaeskeytoken));
-static BIN_ATTR_RO(ccadata_aes_256_xts, 2 * sizeof(struct secaeskeytoken));
+static const BIN_ATTR_RO(ccadata_aes_128, sizeof(struct secaeskeytoken));
+static const BIN_ATTR_RO(ccadata_aes_192, sizeof(struct secaeskeytoken));
+static const BIN_ATTR_RO(ccadata_aes_256, sizeof(struct secaeskeytoken));
+static const BIN_ATTR_RO(ccadata_aes_128_xts, 2 * sizeof(struct secaeskeytoken));
+static const BIN_ATTR_RO(ccadata_aes_256_xts, 2 * sizeof(struct secaeskeytoken));
-static struct bin_attribute *ccadata_attrs[] = {
+static const struct bin_attribute *const ccadata_attrs[] = {
&bin_attr_ccadata_aes_128,
&bin_attr_ccadata_aes_192,
&bin_attr_ccadata_aes_256,
@@ -404,9 +404,9 @@ static struct bin_attribute *ccadata_attrs[] = {
NULL
};
-static struct attribute_group ccadata_attr_group = {
- .name = "ccadata",
- .bin_attrs = ccadata_attrs,
+static const struct attribute_group ccadata_attr_group = {
+ .name = "ccadata",
+ .bin_attrs_new = ccadata_attrs,
};
#define CCACIPHERTOKENSIZE (sizeof(struct cipherkeytoken) + 80)
@@ -455,7 +455,7 @@ static ssize_t pkey_ccacipher_aes_attr_read(enum pkey_key_size keybits,
static ssize_t ccacipher_aes_128_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -465,7 +465,7 @@ static ssize_t ccacipher_aes_128_read(struct file *filp,
static ssize_t ccacipher_aes_192_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -475,7 +475,7 @@ static ssize_t ccacipher_aes_192_read(struct file *filp,
static ssize_t ccacipher_aes_256_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -485,7 +485,7 @@ static ssize_t ccacipher_aes_256_read(struct file *filp,
static ssize_t ccacipher_aes_128_xts_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -495,7 +495,7 @@ static ssize_t ccacipher_aes_128_xts_read(struct file *filp,
static ssize_t ccacipher_aes_256_xts_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -503,13 +503,13 @@ static ssize_t ccacipher_aes_256_xts_read(struct file *filp,
off, count);
}
-static BIN_ATTR_RO(ccacipher_aes_128, CCACIPHERTOKENSIZE);
-static BIN_ATTR_RO(ccacipher_aes_192, CCACIPHERTOKENSIZE);
-static BIN_ATTR_RO(ccacipher_aes_256, CCACIPHERTOKENSIZE);
-static BIN_ATTR_RO(ccacipher_aes_128_xts, 2 * CCACIPHERTOKENSIZE);
-static BIN_ATTR_RO(ccacipher_aes_256_xts, 2 * CCACIPHERTOKENSIZE);
+static const BIN_ATTR_RO(ccacipher_aes_128, CCACIPHERTOKENSIZE);
+static const BIN_ATTR_RO(ccacipher_aes_192, CCACIPHERTOKENSIZE);
+static const BIN_ATTR_RO(ccacipher_aes_256, CCACIPHERTOKENSIZE);
+static const BIN_ATTR_RO(ccacipher_aes_128_xts, 2 * CCACIPHERTOKENSIZE);
+static const BIN_ATTR_RO(ccacipher_aes_256_xts, 2 * CCACIPHERTOKENSIZE);
-static struct bin_attribute *ccacipher_attrs[] = {
+static const struct bin_attribute *const ccacipher_attrs[] = {
&bin_attr_ccacipher_aes_128,
&bin_attr_ccacipher_aes_192,
&bin_attr_ccacipher_aes_256,
@@ -518,9 +518,9 @@ static struct bin_attribute *ccacipher_attrs[] = {
NULL
};
-static struct attribute_group ccacipher_attr_group = {
- .name = "ccacipher",
- .bin_attrs = ccacipher_attrs,
+static const struct attribute_group ccacipher_attr_group = {
+ .name = "ccacipher",
+ .bin_attrs_new = ccacipher_attrs,
};
/*
@@ -570,7 +570,7 @@ static ssize_t pkey_ep11_aes_attr_read(enum pkey_key_size keybits,
static ssize_t ep11_aes_128_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -580,7 +580,7 @@ static ssize_t ep11_aes_128_read(struct file *filp,
static ssize_t ep11_aes_192_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -590,7 +590,7 @@ static ssize_t ep11_aes_192_read(struct file *filp,
static ssize_t ep11_aes_256_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -600,7 +600,7 @@ static ssize_t ep11_aes_256_read(struct file *filp,
static ssize_t ep11_aes_128_xts_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -610,7 +610,7 @@ static ssize_t ep11_aes_128_xts_read(struct file *filp,
static ssize_t ep11_aes_256_xts_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off,
size_t count)
{
@@ -618,13 +618,13 @@ static ssize_t ep11_aes_256_xts_read(struct file *filp,
off, count);
}
-static BIN_ATTR_RO(ep11_aes_128, MAXEP11AESKEYBLOBSIZE);
-static BIN_ATTR_RO(ep11_aes_192, MAXEP11AESKEYBLOBSIZE);
-static BIN_ATTR_RO(ep11_aes_256, MAXEP11AESKEYBLOBSIZE);
-static BIN_ATTR_RO(ep11_aes_128_xts, 2 * MAXEP11AESKEYBLOBSIZE);
-static BIN_ATTR_RO(ep11_aes_256_xts, 2 * MAXEP11AESKEYBLOBSIZE);
+static const BIN_ATTR_RO(ep11_aes_128, MAXEP11AESKEYBLOBSIZE);
+static const BIN_ATTR_RO(ep11_aes_192, MAXEP11AESKEYBLOBSIZE);
+static const BIN_ATTR_RO(ep11_aes_256, MAXEP11AESKEYBLOBSIZE);
+static const BIN_ATTR_RO(ep11_aes_128_xts, 2 * MAXEP11AESKEYBLOBSIZE);
+static const BIN_ATTR_RO(ep11_aes_256_xts, 2 * MAXEP11AESKEYBLOBSIZE);
-static struct bin_attribute *ep11_attrs[] = {
+static const struct bin_attribute *const ep11_attrs[] = {
&bin_attr_ep11_aes_128,
&bin_attr_ep11_aes_192,
&bin_attr_ep11_aes_256,
@@ -633,9 +633,9 @@ static struct bin_attribute *ep11_attrs[] = {
NULL
};
-static struct attribute_group ep11_attr_group = {
+static const struct attribute_group ep11_attr_group = {
.name = "ep11",
- .bin_attrs = ep11_attrs,
+ .bin_attrs_new = ep11_attrs,
};
const struct attribute_group *pkey_attr_groups[] = {
diff --git a/drivers/scsi/cxlflash/Kconfig b/drivers/scsi/cxlflash/Kconfig
index 5533bdcb0458..c424d36e89a6 100644
--- a/drivers/scsi/cxlflash/Kconfig
+++ b/drivers/scsi/cxlflash/Kconfig
@@ -4,10 +4,12 @@
#
config CXLFLASH
- tristate "Support for IBM CAPI Flash"
+ tristate "Support for IBM CAPI Flash (DEPRECATED)"
depends on PCI && SCSI && (CXL || OCXL) && EEH
select IRQ_POLL
- default m
help
+ The cxlflash driver is deprecated and will be removed in a future
+ kernel release.
+
Allows CAPI Accelerated IO to Flash
If unsure, say N.
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index 60d62b93d624..62806f5e32e6 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -3651,6 +3651,8 @@ static int cxlflash_probe(struct pci_dev *pdev,
int rc = 0;
int k;
+ dev_err_once(&pdev->dev, "DEPRECATION: cxlflash is deprecated and will be removed in a future kernel release\n");
+
dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n",
__func__, pdev->irq);
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index adec0df24bc4..1cb517f731f4 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -16,7 +16,6 @@
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/if_ether.h>
-#include <linux/blk-mq-pci.h>
#include <scsi/fc/fc_fip.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport.h>
@@ -601,7 +600,7 @@ void fnic_mq_map_queues_cpus(struct Scsi_Host *host)
return;
}
- blk_mq_pci_map_queues(qmap, l_pdev, FNIC_PCI_OFFSET);
+ blk_mq_map_hw_queues(qmap, &l_pdev->dev, FNIC_PCI_OFFSET);
}
static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index a44768bceb9a..4101447bb8eb 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -9,7 +9,6 @@
#include <linux/acpi.h>
#include <linux/blk-mq.h>
-#include <linux/blk-mq-pci.h>
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/dmapool.h>
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 5db931663ae4..35501d0aa655 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -3328,8 +3328,8 @@ static void hisi_sas_map_queues(struct Scsi_Host *shost)
if (i == HCTX_TYPE_POLL)
blk_mq_map_queues(qmap);
else
- blk_mq_pci_map_queues(qmap, hisi_hba->pci_dev,
- BASE_VECTORS_V3_HW);
+ blk_mq_map_hw_queues(qmap, hisi_hba->dev,
+ BASE_VECTORS_V3_HW);
qoff += qmap->nr_queues;
}
}
@@ -3345,7 +3345,7 @@ static const struct scsi_host_template sht_v3_hw = {
.slave_alloc = hisi_sas_slave_alloc,
.shost_groups = host_v3_hw_groups,
.sdev_groups = sdev_groups_v3_hw,
- .tag_alloc_policy = BLK_TAG_ALLOC_RR,
+ .tag_alloc_policy_rr = true,
.host_reset = hisi_sas_host_reset,
.host_tagset = 1,
.mq_poll = queue_complete_v3_hw,
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 50f1dcb6d584..49abd7dd75a7 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -37,7 +37,6 @@
#include <linux/poll.h>
#include <linux/vmalloc.h>
#include <linux/irq_poll.h>
-#include <linux/blk-mq-pci.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -3193,7 +3192,7 @@ static void megasas_map_queues(struct Scsi_Host *shost)
map = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
map->nr_queues = instance->msix_vectors - offset;
map->queue_offset = 0;
- blk_mq_pci_map_queues(map, instance->pdev, offset);
+ blk_mq_map_hw_queues(map, &instance->pdev->dev, offset);
qoff += map->nr_queues;
offset += map->nr_queues;
diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h
index 0c3e1ac076b5..0d72b5f1b69d 100644
--- a/drivers/scsi/mpi3mr/mpi3mr.h
+++ b/drivers/scsi/mpi3mr/mpi3mr.h
@@ -12,7 +12,6 @@
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
-#include <linux/blk-mq-pci.h>
#include <linux/delay.h>
#include <linux/dmapool.h>
#include <linux/errno.h>
diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
index 1bef88130d0c..1e8735538b23 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
@@ -4042,7 +4042,7 @@ static void mpi3mr_map_queues(struct Scsi_Host *shost)
*/
map->queue_offset = qoff;
if (i != HCTX_TYPE_POLL)
- blk_mq_pci_map_queues(map, mrioc->pdev, offset);
+ blk_mq_map_hw_queues(map, &mrioc->pdev->dev, offset);
else
blk_mq_map_queues(map);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index f2a55aa5fe65..9599d7a50028 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -53,7 +53,6 @@
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/raid_class.h>
-#include <linux/blk-mq-pci.h>
#include <linux/unaligned.h>
#include "mpt3sas_base.h"
@@ -11890,7 +11889,7 @@ static void scsih_map_queues(struct Scsi_Host *shost)
*/
map->queue_offset = qoff;
if (i != HCTX_TYPE_POLL)
- blk_mq_pci_map_queues(map, ioc->pdev, offset);
+ blk_mq_map_hw_queues(map, &ioc->pdev->dev, offset);
else
blk_mq_map_queues(map);
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index f8c81e53e93f..2a7822fd613e 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -105,7 +105,7 @@ static void pm8001_map_queues(struct Scsi_Host *shost)
struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
if (pm8001_ha->number_of_intr > 1) {
- blk_mq_pci_map_queues(qmap, pm8001_ha->pdev, 1);
+ blk_mq_map_hw_queues(qmap, &pm8001_ha->pdev->dev, 1);
return;
}
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 42c7b3f7afbf..d3bd8683f344 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -56,7 +56,6 @@
#include <scsi/sas_ata.h>
#include <linux/atomic.h>
#include <linux/blk-mq.h>
-#include <linux/blk-mq-pci.h>
#include "pm8001_defs.h"
#define DRV_NAME "pm80xx"
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 8f4cc136a9c9..8ee2e337c9e1 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -8,7 +8,6 @@
#include <linux/delay.h>
#include <linux/nvme.h>
#include <linux/nvme-fc.h>
-#include <linux/blk-mq-pci.h>
#include <linux/blk-mq.h>
static struct nvme_fc_port_template qla_nvme_fc_transport;
@@ -841,7 +840,7 @@ static void qla_nvme_map_queues(struct nvme_fc_local_port *lport,
{
struct scsi_qla_host *vha = lport->private;
- blk_mq_pci_map_queues(map, vha->hw->pdev, vha->irq_offset);
+ blk_mq_map_hw_queues(map, &vha->hw->pdev->dev, vha->irq_offset);
}
static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 7ab717ed7232..31535beaaa16 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -13,7 +13,6 @@
#include <linux/mutex.h>
#include <linux/kobject.h>
#include <linux/slab.h>
-#include <linux/blk-mq-pci.h>
#include <linux/refcount.h>
#include <linux/crash_dump.h>
#include <linux/trace_events.h>
@@ -8071,7 +8070,8 @@ static void qla2xxx_map_queues(struct Scsi_Host *shost)
if (USER_CTRL_IRQ(vha->hw) || !vha->hw->mqiobase)
blk_mq_map_queues(qmap);
else
- blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset);
+ blk_mq_map_hw_queues(qmap, &vha->hw->pdev->dev,
+ vha->irq_offset);
}
struct scsi_host_template qla2xxx_driver_template = {
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index adee6f60c966..4411426a7894 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -210,6 +210,9 @@ static int scsi_check_passthrough(struct scsi_cmnd *scmd,
struct scsi_sense_hdr sshdr;
enum sam_status status;
+ if (!scmd->result)
+ return 0;
+
if (!failures)
return 0;
@@ -2065,9 +2068,8 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost)
tag_set->queue_depth = shost->can_queue;
tag_set->cmd_size = cmd_size;
tag_set->numa_node = dev_to_node(shost->dma_dev);
- tag_set->flags = BLK_MQ_F_SHOULD_MERGE;
- tag_set->flags |=
- BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy);
+ if (shost->hostt->tag_alloc_policy_rr)
+ tag_set->flags |= BLK_MQ_F_TAG_RR;
if (shost->queuecommand_may_block)
tag_set->flags |= BLK_MQ_F_BLOCKING;
tag_set->driver_data = shost;
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index fde7de3b1e55..9b47f91c5b97 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -4104,7 +4104,7 @@ iscsi_if_rx(struct sk_buff *skb)
}
do {
/*
- * special case for GET_STATS:
+ * special case for GET_STATS, GET_CHAP and GET_HOST_STATS:
* on success - sending reply and stats from
* inside of if_recv_msg(),
* on error - fall through.
@@ -4113,6 +4113,8 @@ iscsi_if_rx(struct sk_buff *skb)
break;
if (ev->type == ISCSI_UEVENT_GET_CHAP && !err)
break;
+ if (ev->type == ISCSI_UEVENT_GET_HOST_STATS && !err)
+ break;
err = iscsi_if_send_reply(portid, nlh->nlmsg_type,
ev, sizeof(*ev));
if (err == -EAGAIN && --retries < 0) {
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 8947dab132d7..950d8c9fb884 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -177,9 +177,8 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
lim = queue_limits_start_update(sdkp->disk->queue);
sd_set_flush_flag(sdkp, &lim);
- blk_mq_freeze_queue(sdkp->disk->queue);
- ret = queue_limits_commit_update(sdkp->disk->queue, &lim);
- blk_mq_unfreeze_queue(sdkp->disk->queue);
+ ret = queue_limits_commit_update_frozen(sdkp->disk->queue,
+ &lim);
if (ret)
return ret;
return count;
@@ -483,9 +482,7 @@ provisioning_mode_store(struct device *dev, struct device_attribute *attr,
lim = queue_limits_start_update(sdkp->disk->queue);
sd_config_discard(sdkp, &lim, mode);
- blk_mq_freeze_queue(sdkp->disk->queue);
- err = queue_limits_commit_update(sdkp->disk->queue, &lim);
- blk_mq_unfreeze_queue(sdkp->disk->queue);
+ err = queue_limits_commit_update_frozen(sdkp->disk->queue, &lim);
if (err)
return err;
return count;
@@ -594,9 +591,7 @@ max_write_same_blocks_store(struct device *dev, struct device_attribute *attr,
lim = queue_limits_start_update(sdkp->disk->queue);
sd_config_write_same(sdkp, &lim);
- blk_mq_freeze_queue(sdkp->disk->queue);
- err = queue_limits_commit_update(sdkp->disk->queue, &lim);
- blk_mq_unfreeze_queue(sdkp->disk->queue);
+ err = queue_limits_commit_update_frozen(sdkp->disk->queue, &lim);
if (err)
return err;
return count;
@@ -814,14 +809,14 @@ static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd,
if (bio_integrity_flagged(bio, BIP_IP_CHECKSUM))
scmd->prot_flags |= SCSI_PROT_IP_CHECKSUM;
- if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false)
+ if (bio_integrity_flagged(bio, BIP_CHECK_GUARD))
scmd->prot_flags |= SCSI_PROT_GUARD_CHECK;
}
if (dif != T10_PI_TYPE3_PROTECTION) { /* DIX/DIF Type 0, 1, 2 */
scmd->prot_flags |= SCSI_PROT_REF_INCREMENT;
- if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false)
+ if (bio_integrity_flagged(bio, BIP_CHECK_REFTAG))
scmd->prot_flags |= SCSI_PROT_REF_CHECK;
}
@@ -996,6 +991,7 @@ static void sd_config_atomic(struct scsi_disk *sdkp, struct queue_limits *lim)
lim->atomic_write_hw_boundary = 0;
lim->atomic_write_hw_unit_min = unit_min * logical_block_size;
lim->atomic_write_hw_unit_max = unit_max * logical_block_size;
+ lim->features |= BLK_FEAT_ATOMIC_WRITES;
}
static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd,
@@ -3803,9 +3799,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
sd_config_write_same(sdkp, &lim);
kfree(buffer);
- blk_mq_freeze_queue(sdkp->disk->queue);
- err = queue_limits_commit_update(sdkp->disk->queue, &lim);
- blk_mq_unfreeze_queue(sdkp->disk->queue);
+ err = queue_limits_commit_update_frozen(sdkp->disk->queue, &lim);
if (err)
return err;
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 870f37b70546..04fb24d77e9b 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -19,7 +19,6 @@
#include <linux/bcd.h>
#include <linux/reboot.h>
#include <linux/cciss_ioctl.h>
-#include <linux/blk-mq-pci.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
@@ -6547,10 +6546,10 @@ static void pqi_map_queues(struct Scsi_Host *shost)
struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
if (!ctrl_info->disable_managed_interrupts)
- return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
- ctrl_info->pci_dev, 0);
+ blk_mq_map_hw_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
+ &ctrl_info->pci_dev->dev, 0);
else
- return blk_mq_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT]);
+ blk_mq_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT]);
}
static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device)
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 198bec87bb8e..b17796d5ee66 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -797,10 +797,7 @@ static int get_sectorsize(struct scsi_cd *cd)
lim = queue_limits_start_update(q);
lim.logical_block_size = sector_size;
- blk_mq_freeze_queue(q);
- err = queue_limits_commit_update(q, &lim);
- blk_mq_unfreeze_queue(q);
- return err;
+ return queue_limits_commit_update_frozen(q, &lim);
}
static int get_capabilities(struct scsi_cd *cd)
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 8471f38b730e..60be1a0c6183 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -29,7 +29,6 @@
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_devinfo.h>
#include <linux/seqlock.h>
-#include <linux/blk-mq-virtio.h>
#include "sd.h"
@@ -746,7 +745,7 @@ static void virtscsi_map_queues(struct Scsi_Host *shost)
if (i == HCTX_TYPE_POLL)
blk_mq_map_queues(map);
else
- blk_mq_virtio_map_queues(map, vscsi->vdev, 2);
+ blk_mq_map_hw_queues(map, &vscsi->vdev->dev, 2);
}
}
diff --git a/drivers/staging/gpib/Kconfig b/drivers/staging/gpib/Kconfig
index 259f3ff33646..81510db3072e 100644
--- a/drivers/staging/gpib/Kconfig
+++ b/drivers/staging/gpib/Kconfig
@@ -65,6 +65,8 @@ config GPIB_NI_PCI_ISA
depends on ISA_BUS || PCI || PCMCIA
depends on HAS_IOPORT
depends on !X86_PAE
+ depends on PCMCIA || !PCMCIA
+ depends on HAS_IOPORT_MAP
select GPIB_COMMON
select GPIB_NEC7210
help
@@ -89,6 +91,7 @@ config GPIB_CB7210
depends on HAS_IOPORT
depends on ISA_BUS || PCI || PCMCIA
depends on !X86_PAE
+ depends on PCMCIA || !PCMCIA
select GPIB_COMMON
select GPIB_NEC7210
help
@@ -177,6 +180,7 @@ config GPIB_HP82341
config GPIB_INES
tristate "INES"
depends on PCI || ISA_BUS || PCMCIA
+ depends on PCMCIA || !PCMCIA
depends on HAS_IOPORT
depends on !X86_PAE
select GPIB_COMMON
@@ -199,8 +203,8 @@ config GPIB_INES
called cb7210.
config GPIB_PCMCIA
- bool "PCMCIA/Cardbus support for NI MC and Ines boards"
- depends on PCCARD && (GPIB_NI_PCI_ISA || GPIB_CB7210 || GPIB_INES)
+ def_bool y
+ depends on PCMCIA && (GPIB_NI_PCI_ISA || GPIB_CB7210 || GPIB_INES)
help
Enable PCMCIA/CArdbus support for National Instruments,
measurement computing boards and Ines boards.
diff --git a/drivers/staging/gpib/agilent_82350b/Makefile b/drivers/staging/gpib/agilent_82350b/Makefile
index d9236c92e04b..f24e1e713a63 100644
--- a/drivers/staging/gpib/agilent_82350b/Makefile
+++ b/drivers/staging/gpib/agilent_82350b/Makefile
@@ -1,2 +1,2 @@
-obj-m += agilent_82350b.o
+obj-$(CONFIG_GPIB_AGILENT_82350B) += agilent_82350b.o
diff --git a/drivers/staging/gpib/agilent_82350b/agilent_82350b.c b/drivers/staging/gpib/agilent_82350b/agilent_82350b.c
index 53006d0cc79c..8e2334fe5c9b 100644
--- a/drivers/staging/gpib/agilent_82350b/agilent_82350b.c
+++ b/drivers/staging/gpib/agilent_82350b/agilent_82350b.c
@@ -700,7 +700,7 @@ static int agilent_82350b_generic_attach(gpib_board_t *board, const gpib_board_c
GPIB_82350A_REGION));
dev_dbg(board->gpib_dev, "%s: gpib base address remapped to 0x%p\n",
driver_name, a_priv->gpib_base);
- tms_priv->iobase = a_priv->gpib_base + TMS9914_BASE_REG;
+ tms_priv->mmiobase = a_priv->gpib_base + TMS9914_BASE_REG;
a_priv->sram_base = ioremap(pci_resource_start(a_priv->pci_device,
SRAM_82350A_REGION),
pci_resource_len(a_priv->pci_device,
@@ -724,7 +724,7 @@ static int agilent_82350b_generic_attach(gpib_board_t *board, const gpib_board_c
pci_resource_len(a_priv->pci_device, GPIB_REGION));
dev_dbg(board->gpib_dev, "%s: gpib base address remapped to 0x%p\n",
driver_name, a_priv->gpib_base);
- tms_priv->iobase = a_priv->gpib_base + TMS9914_BASE_REG;
+ tms_priv->mmiobase = a_priv->gpib_base + TMS9914_BASE_REG;
a_priv->sram_base = ioremap(pci_resource_start(a_priv->pci_device, SRAM_REGION),
pci_resource_len(a_priv->pci_device, SRAM_REGION));
dev_dbg(board->gpib_dev, "%s: sram base address remapped to 0x%p\n",
diff --git a/drivers/staging/gpib/agilent_82357a/Makefile b/drivers/staging/gpib/agilent_82357a/Makefile
index 4a1d940fce2b..81a55c257a6e 100644
--- a/drivers/staging/gpib/agilent_82357a/Makefile
+++ b/drivers/staging/gpib/agilent_82357a/Makefile
@@ -1,4 +1,4 @@
-obj-m += agilent_82357a.o
+obj-$(CONFIG_GPIB_AGILENT_82357A) += agilent_82357a.o
diff --git a/drivers/staging/gpib/cb7210/Makefile b/drivers/staging/gpib/cb7210/Makefile
index 22e0214fc17d..cda0725d6487 100644
--- a/drivers/staging/gpib/cb7210/Makefile
+++ b/drivers/staging/gpib/cb7210/Makefile
@@ -1,4 +1,4 @@
ccflags-$(CONFIG_GPIB_PCMCIA) := -DGPIB_PCMCIA
-obj-m += cb7210.o
+obj-$(CONFIG_GPIB_CB7210) += cb7210.o
diff --git a/drivers/staging/gpib/cb7210/cb7210.c b/drivers/staging/gpib/cb7210/cb7210.c
index 63df7f3eb3f3..59e41c97f518 100644
--- a/drivers/staging/gpib/cb7210/cb7210.c
+++ b/drivers/staging/gpib/cb7210/cb7210.c
@@ -971,12 +971,12 @@ int cb_pci_attach(gpib_board_t *board, const gpib_board_config_t *config)
switch (cb_priv->pci_chip) {
case PCI_CHIP_AMCC_S5933:
cb_priv->amcc_iobase = pci_resource_start(cb_priv->pci_device, 0);
- nec_priv->iobase = (void *)(pci_resource_start(cb_priv->pci_device, 1));
+ nec_priv->iobase = pci_resource_start(cb_priv->pci_device, 1);
cb_priv->fifo_iobase = pci_resource_start(cb_priv->pci_device, 2);
break;
case PCI_CHIP_QUANCOM:
- nec_priv->iobase = (void *)(pci_resource_start(cb_priv->pci_device, 0));
- cb_priv->fifo_iobase = (unsigned long)nec_priv->iobase;
+ nec_priv->iobase = pci_resource_start(cb_priv->pci_device, 0);
+ cb_priv->fifo_iobase = nec_priv->iobase;
break;
default:
pr_err("cb7210: bug! unhandled pci_chip=%i\n", cb_priv->pci_chip);
@@ -1040,8 +1040,8 @@ int cb_isa_attach(gpib_board_t *board, const gpib_board_config_t *config)
return retval;
cb_priv = board->private_data;
nec_priv = &cb_priv->nec7210_priv;
- if (request_region((unsigned long)config->ibbase, cb7210_iosize, "cb7210") == 0) {
- pr_err("gpib: ioports starting at 0x%p are already in use\n", config->ibbase);
+ if (request_region(config->ibbase, cb7210_iosize, "cb7210") == 0) {
+ pr_err("gpib: ioports starting at 0x%u are already in use\n", config->ibbase);
return -EIO;
}
nec_priv->iobase = config->ibbase;
@@ -1471,7 +1471,7 @@ int cb_pcmcia_attach(gpib_board_t *board, const gpib_board_config_t *config)
(unsigned long)curr_dev->resource[0]->start);
return -EIO;
}
- nec_priv->iobase = (void *)(unsigned long)curr_dev->resource[0]->start;
+ nec_priv->iobase = curr_dev->resource[0]->start;
cb_priv->fifo_iobase = curr_dev->resource[0]->start;
if (request_irq(curr_dev->irq, cb7210_interrupt, IRQF_SHARED,
diff --git a/drivers/staging/gpib/cb7210/cb7210.h b/drivers/staging/gpib/cb7210/cb7210.h
index 4ad976de2b68..c17cb22585f7 100644
--- a/drivers/staging/gpib/cb7210/cb7210.h
+++ b/drivers/staging/gpib/cb7210/cb7210.h
@@ -113,9 +113,9 @@ enum hs_regs {
HS_STATUS = 0x8, /* HS_STATUS register */
};
-static inline unsigned long nec7210_iobase(const struct cb7210_priv *cb_priv)
+static inline u32 nec7210_iobase(const struct cb7210_priv *cb_priv)
{
- return (unsigned long)(cb_priv->nec7210_priv.iobase);
+ return cb_priv->nec7210_priv.iobase;
}
static inline int cb7210_page_in_bits(unsigned int page)
diff --git a/drivers/staging/gpib/cec/Makefile b/drivers/staging/gpib/cec/Makefile
index f4638628ff29..b7141e23d4e0 100644
--- a/drivers/staging/gpib/cec/Makefile
+++ b/drivers/staging/gpib/cec/Makefile
@@ -1,3 +1,3 @@
-obj-m += cec_gpib.o
+obj-$(CONFIG_GPIB_CEC_PCI) += cec_gpib.o
diff --git a/drivers/staging/gpib/cec/cec_gpib.c b/drivers/staging/gpib/cec/cec_gpib.c
index 3dc933deb401..9c00a874468c 100644
--- a/drivers/staging/gpib/cec/cec_gpib.c
+++ b/drivers/staging/gpib/cec/cec_gpib.c
@@ -297,8 +297,8 @@ int cec_pci_attach(gpib_board_t *board, const gpib_board_config_t *config)
cec_priv->plx_iobase = pci_resource_start(cec_priv->pci_device, 1);
pr_info(" plx9050 base address 0x%lx\n", cec_priv->plx_iobase);
- nec_priv->iobase = (void *)(pci_resource_start(cec_priv->pci_device, 3));
- pr_info(" nec7210 base address 0x%p\n", nec_priv->iobase);
+ nec_priv->iobase = pci_resource_start(cec_priv->pci_device, 3);
+ pr_info(" nec7210 base address 0x%x\n", nec_priv->iobase);
isr_flags |= IRQF_SHARED;
if (request_irq(cec_priv->pci_device->irq, cec_interrupt, isr_flags, "pci-gpib", board)) {
diff --git a/drivers/staging/gpib/common/gpib_os.c b/drivers/staging/gpib/common/gpib_os.c
index 405237d8cb47..0962729d7dfe 100644
--- a/drivers/staging/gpib/common/gpib_os.c
+++ b/drivers/staging/gpib/common/gpib_os.c
@@ -116,56 +116,6 @@ int io_timed_out(gpib_board_t *board)
return 0;
}
-void writeb_wrapper(unsigned int value, void *address)
-{
- writeb(value, address);
-};
-EXPORT_SYMBOL(writeb_wrapper);
-
-void writew_wrapper(unsigned int value, void *address)
-{
- writew(value, address);
-};
-EXPORT_SYMBOL(writew_wrapper);
-
-unsigned int readb_wrapper(void *address)
-{
- return readb(address);
-};
-EXPORT_SYMBOL(readb_wrapper);
-
-unsigned int readw_wrapper(void *address)
-{
- return readw(address);
-};
-EXPORT_SYMBOL(readw_wrapper);
-
-#ifdef CONFIG_HAS_IOPORT
-void outb_wrapper(unsigned int value, void *address)
-{
- outb(value, (unsigned long)(address));
-};
-EXPORT_SYMBOL(outb_wrapper);
-
-void outw_wrapper(unsigned int value, void *address)
-{
- outw(value, (unsigned long)(address));
-};
-EXPORT_SYMBOL(outw_wrapper);
-
-unsigned int inb_wrapper(void *address)
-{
- return inb((unsigned long)(address));
-};
-EXPORT_SYMBOL(inb_wrapper);
-
-unsigned int inw_wrapper(void *address)
-{
- return inw((unsigned long)(address));
-};
-EXPORT_SYMBOL(inw_wrapper);
-#endif
-
/* this is a function instead of a constant because of Suse
* defining HZ to be a function call to get_hz()
*/
@@ -536,7 +486,7 @@ int dvrsp(gpib_board_t *board, unsigned int pad, int sad,
return -1;
}
- if (pad > MAX_GPIB_PRIMARY_ADDRESS || sad > MAX_GPIB_SECONDARY_ADDRESS) {
+ if (pad > MAX_GPIB_PRIMARY_ADDRESS || sad > MAX_GPIB_SECONDARY_ADDRESS || sad < -1) {
pr_err("gpib: bad address for serial poll");
return -1;
}
@@ -1623,7 +1573,7 @@ static int iobase_ioctl(gpib_board_config_t *config, unsigned long arg)
if (WARN_ON_ONCE(sizeof(void *) > sizeof(base_addr)))
return -EFAULT;
- config->ibbase = (void *)(unsigned long)(base_addr);
+ config->ibbase = base_addr;
return 0;
}
diff --git a/drivers/staging/gpib/eastwood/Makefile b/drivers/staging/gpib/eastwood/Makefile
index c74056f959d0..384825195f77 100644
--- a/drivers/staging/gpib/eastwood/Makefile
+++ b/drivers/staging/gpib/eastwood/Makefile
@@ -1,3 +1,3 @@
-obj-m += fluke_gpib.o
+obj-$(CONFIG_GPIB_FLUKE) += fluke_gpib.o
diff --git a/drivers/staging/gpib/eastwood/fluke_gpib.c b/drivers/staging/gpib/eastwood/fluke_gpib.c
index 3f938ab0c84d..5e59d38beb35 100644
--- a/drivers/staging/gpib/eastwood/fluke_gpib.c
+++ b/drivers/staging/gpib/eastwood/fluke_gpib.c
@@ -1011,12 +1011,12 @@ static int fluke_attach_impl(gpib_board_t *board, const gpib_board_config_t *con
}
e_priv->gpib_iomem_res = res;
- nec_priv->iobase = ioremap(e_priv->gpib_iomem_res->start,
+ nec_priv->mmiobase = ioremap(e_priv->gpib_iomem_res->start,
resource_size(e_priv->gpib_iomem_res));
- pr_info("gpib: iobase %lx remapped to %p, length=%d\n",
- (unsigned long)e_priv->gpib_iomem_res->start,
- nec_priv->iobase, (int)resource_size(e_priv->gpib_iomem_res));
- if (!nec_priv->iobase) {
+ pr_info("gpib: mmiobase %llx remapped to %p, length=%d\n",
+ (u64)e_priv->gpib_iomem_res->start,
+ nec_priv->mmiobase, (int)resource_size(e_priv->gpib_iomem_res));
+ if (!nec_priv->mmiobase) {
dev_err(&fluke_gpib_pdev->dev, "Could not map I/O memory\n");
return -ENOMEM;
}
@@ -1107,7 +1107,7 @@ void fluke_detach(gpib_board_t *board)
gpib_free_pseudo_irq(board);
nec_priv = &e_priv->nec7210_priv;
- if (nec_priv->iobase) {
+ if (nec_priv->mmiobase) {
fluke_paged_write_byte(e_priv, 0, ISR0_IMR0, ISR0_IMR0_PAGE);
nec7210_board_reset(nec_priv, board);
}
diff --git a/drivers/staging/gpib/eastwood/fluke_gpib.h b/drivers/staging/gpib/eastwood/fluke_gpib.h
index fcbd42f8f9af..4e2144d45270 100644
--- a/drivers/staging/gpib/eastwood/fluke_gpib.h
+++ b/drivers/staging/gpib/eastwood/fluke_gpib.h
@@ -72,7 +72,7 @@ static inline uint8_t fluke_read_byte_nolock(struct nec7210_priv *nec_priv,
{
u8 retval;
- retval = readl(nec_priv->iobase + register_num * nec_priv->offset);
+ retval = readl(nec_priv->mmiobase + register_num * nec_priv->offset);
return retval;
}
@@ -80,7 +80,7 @@ static inline uint8_t fluke_read_byte_nolock(struct nec7210_priv *nec_priv,
static inline void fluke_write_byte_nolock(struct nec7210_priv *nec_priv, uint8_t data,
int register_num)
{
- writel(data, nec_priv->iobase + register_num * nec_priv->offset);
+ writel(data, nec_priv->mmiobase + register_num * nec_priv->offset);
}
static inline uint8_t fluke_paged_read_byte(struct fluke_priv *e_priv,
diff --git a/drivers/staging/gpib/fmh_gpib/fmh_gpib.c b/drivers/staging/gpib/fmh_gpib/fmh_gpib.c
index 62791db1c34a..0662b20a45e7 100644
--- a/drivers/staging/gpib/fmh_gpib/fmh_gpib.c
+++ b/drivers/staging/gpib/fmh_gpib/fmh_gpib.c
@@ -24,6 +24,8 @@
#include <linux/slab.h>
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("GPIB Driver for fmh_gpib_core");
+MODULE_AUTHOR("Frank Mori Hess <fmh6jj@gmail.com>");
static irqreturn_t fmh_gpib_interrupt(int irq, void *arg);
static int fmh_gpib_attach_holdoff_all(gpib_board_t *board, const gpib_board_config_t *config);
@@ -1419,15 +1421,14 @@ static int fmh_gpib_attach_impl(gpib_board_t *board, const gpib_board_config_t *
}
e_priv->gpib_iomem_res = res;
- nec_priv->iobase = ioremap(e_priv->gpib_iomem_res->start,
+ nec_priv->mmiobase = ioremap(e_priv->gpib_iomem_res->start,
resource_size(e_priv->gpib_iomem_res));
- if (!nec_priv->iobase) {
+ if (!nec_priv->mmiobase) {
dev_err(board->dev, "Could not map I/O memory for gpib\n");
return -ENOMEM;
}
- dev_info(board->dev, "iobase 0x%lx remapped to %p, length=%ld\n",
- (unsigned long)e_priv->gpib_iomem_res->start,
- nec_priv->iobase, (unsigned long)resource_size(e_priv->gpib_iomem_res));
+ dev_info(board->dev, "iobase %pr remapped to %p\n",
+ e_priv->gpib_iomem_res, nec_priv->mmiobase);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dma_fifos");
if (!res) {
@@ -1507,14 +1508,14 @@ void fmh_gpib_detach(gpib_board_t *board)
free_irq(e_priv->irq, board);
if (e_priv->fifo_base)
fifos_write(e_priv, 0, FIFO_CONTROL_STATUS_REG);
- if (nec_priv->iobase) {
+ if (nec_priv->mmiobase) {
write_byte(nec_priv, 0, ISR0_IMR0_REG);
nec7210_board_reset(nec_priv, board);
}
if (e_priv->fifo_base)
iounmap(e_priv->fifo_base);
- if (nec_priv->iobase)
- iounmap(nec_priv->iobase);
+ if (nec_priv->mmiobase)
+ iounmap(nec_priv->mmiobase);
if (e_priv->dma_port_res) {
release_mem_region(e_priv->dma_port_res->start,
resource_size(e_priv->dma_port_res));
@@ -1564,12 +1565,12 @@ static int fmh_gpib_pci_attach_impl(gpib_board_t *board, const gpib_board_config
e_priv->gpib_iomem_res = &pci_device->resource[gpib_control_status_pci_resource_index];
e_priv->dma_port_res = &pci_device->resource[gpib_fifo_pci_resource_index];
- nec_priv->iobase = ioremap(pci_resource_start(pci_device,
+ nec_priv->mmiobase = ioremap(pci_resource_start(pci_device,
gpib_control_status_pci_resource_index),
pci_resource_len(pci_device,
gpib_control_status_pci_resource_index));
dev_info(board->dev, "base address for gpib control/status registers remapped to 0x%p\n",
- nec_priv->iobase);
+ nec_priv->mmiobase);
if (e_priv->dma_port_res->flags & IORESOURCE_MEM) {
e_priv->fifo_base = ioremap(pci_resource_start(pci_device,
@@ -1632,14 +1633,14 @@ void fmh_gpib_pci_detach(gpib_board_t *board)
free_irq(e_priv->irq, board);
if (e_priv->fifo_base)
fifos_write(e_priv, 0, FIFO_CONTROL_STATUS_REG);
- if (nec_priv->iobase) {
+ if (nec_priv->mmiobase) {
write_byte(nec_priv, 0, ISR0_IMR0_REG);
nec7210_board_reset(nec_priv, board);
}
if (e_priv->fifo_base)
iounmap(e_priv->fifo_base);
- if (nec_priv->iobase)
- iounmap(nec_priv->iobase);
+ if (nec_priv->mmiobase)
+ iounmap(nec_priv->mmiobase);
if (e_priv->dma_port_res || e_priv->gpib_iomem_res)
pci_release_regions(to_pci_dev(board->dev));
if (board->dev)
diff --git a/drivers/staging/gpib/fmh_gpib/fmh_gpib.h b/drivers/staging/gpib/fmh_gpib/fmh_gpib.h
index 43bfc89d2a6f..60b1bd6d3c15 100644
--- a/drivers/staging/gpib/fmh_gpib/fmh_gpib.h
+++ b/drivers/staging/gpib/fmh_gpib/fmh_gpib.h
@@ -127,13 +127,13 @@ static const unsigned int fifo_max_burst_length_mask = 0x00ff;
static inline uint8_t gpib_cs_read_byte(struct nec7210_priv *nec_priv,
unsigned int register_num)
{
- return readb(nec_priv->iobase + register_num * nec_priv->offset);
+ return readb(nec_priv->mmiobase + register_num * nec_priv->offset);
}
static inline void gpib_cs_write_byte(struct nec7210_priv *nec_priv, uint8_t data,
unsigned int register_num)
{
- writeb(data, nec_priv->iobase + register_num * nec_priv->offset);
+ writeb(data, nec_priv->mmiobase + register_num * nec_priv->offset);
}
static inline uint16_t fifos_read(struct fmh_priv *fmh_priv, int register_num)
diff --git a/drivers/staging/gpib/gpio/Makefile b/drivers/staging/gpib/gpio/Makefile
index a31ded6e5924..00ea52abdda7 100644
--- a/drivers/staging/gpib/gpio/Makefile
+++ b/drivers/staging/gpib/gpio/Makefile
@@ -1,4 +1,4 @@
-obj-m += gpib_bitbang.o
+obj-$(CONFIG_GPIB_GPIO) += gpib_bitbang.o
diff --git a/drivers/staging/gpib/gpio/gpib_bitbang.c b/drivers/staging/gpib/gpio/gpib_bitbang.c
index a2d562cbd65b..23550502e012 100644
--- a/drivers/staging/gpib/gpio/gpib_bitbang.c
+++ b/drivers/staging/gpib/gpio/gpib_bitbang.c
@@ -315,7 +315,7 @@ struct bb_priv {
enum listener_function_state listener_state;
};
-inline long usec_diff(struct timespec64 *a, struct timespec64 *b);
+static inline long usec_diff(struct timespec64 *a, struct timespec64 *b);
static void bb_buffer_print(unsigned char *buffer, size_t length, int cmd, int eoi);
static void set_data_lines(u8 byte);
static u8 get_data_lines(void);
diff --git a/drivers/staging/gpib/hp_82335/Makefile b/drivers/staging/gpib/hp_82335/Makefile
index 8b7a552e9355..305ce44ee48a 100644
--- a/drivers/staging/gpib/hp_82335/Makefile
+++ b/drivers/staging/gpib/hp_82335/Makefile
@@ -1,4 +1,4 @@
-obj-m += hp82335.o
+obj-$(CONFIG_GPIB_HP82335) += hp82335.o
diff --git a/drivers/staging/gpib/hp_82335/hp82335.c b/drivers/staging/gpib/hp_82335/hp82335.c
index 40afe42aea47..ea78143c7ab6 100644
--- a/drivers/staging/gpib/hp_82335/hp82335.c
+++ b/drivers/staging/gpib/hp_82335/hp82335.c
@@ -9,6 +9,7 @@
*/
#include "hp82335.h"
+#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/module.h>
@@ -233,7 +234,7 @@ static void hp82335_clear_interrupt(struct hp82335_priv *hp_priv)
{
struct tms9914_priv *tms_priv = &hp_priv->tms9914_priv;
- writeb(0, tms_priv->iobase + HPREG_INTR_CLEAR);
+ writeb(0, tms_priv->mmiobase + HPREG_INTR_CLEAR);
}
int hp82335_attach(gpib_board_t *board, const gpib_board_config_t *config)
@@ -241,7 +242,7 @@ int hp82335_attach(gpib_board_t *board, const gpib_board_config_t *config)
struct hp82335_priv *hp_priv;
struct tms9914_priv *tms_priv;
int retval;
- const unsigned long upper_iomem_base = (unsigned long)config->ibbase + hp82335_rom_size;
+ const unsigned long upper_iomem_base = config->ibbase + hp82335_rom_size;
board->status = 0;
@@ -253,7 +254,7 @@ int hp82335_attach(gpib_board_t *board, const gpib_board_config_t *config)
tms_priv->write_byte = hp82335_write_byte;
tms_priv->offset = 1;
- switch ((unsigned long)(config->ibbase)) {
+ switch (config->ibbase) {
case 0xc4000:
case 0xc8000:
case 0xcc000:
@@ -271,7 +272,7 @@ int hp82335_attach(gpib_board_t *board, const gpib_board_config_t *config)
case 0xfc000:
break;
default:
- pr_err("hp82335: invalid base io address 0x%p\n", config->ibbase);
+ pr_err("hp82335: invalid base io address 0x%u\n", config->ibbase);
return -EINVAL;
}
if (!request_mem_region(upper_iomem_base, hp82335_upper_iomem_size, "hp82335")) {
@@ -280,9 +281,9 @@ int hp82335_attach(gpib_board_t *board, const gpib_board_config_t *config)
return -EBUSY;
}
hp_priv->raw_iobase = upper_iomem_base;
- tms_priv->iobase = ioremap(upper_iomem_base, hp82335_upper_iomem_size);
+ tms_priv->mmiobase = ioremap(upper_iomem_base, hp82335_upper_iomem_size);
pr_info("hp82335: upper half of 82335 iomem region 0x%lx remapped to 0x%p\n",
- hp_priv->raw_iobase, tms_priv->iobase);
+ hp_priv->raw_iobase, tms_priv->mmiobase);
retval = request_irq(config->ibirq, hp82335_interrupt, 0, "hp82335", board);
if (retval) {
@@ -296,7 +297,7 @@ int hp82335_attach(gpib_board_t *board, const gpib_board_config_t *config)
hp82335_clear_interrupt(hp_priv);
- writeb(INTR_ENABLE, tms_priv->iobase + HPREG_CCR);
+ writeb(INTR_ENABLE, tms_priv->mmiobase + HPREG_CCR);
tms9914_online(board, tms_priv);
@@ -312,10 +313,10 @@ void hp82335_detach(gpib_board_t *board)
tms_priv = &hp_priv->tms9914_priv;
if (hp_priv->irq)
free_irq(hp_priv->irq, board);
- if (tms_priv->iobase) {
- writeb(0, tms_priv->iobase + HPREG_CCR);
+ if (tms_priv->mmiobase) {
+ writeb(0, tms_priv->mmiobase + HPREG_CCR);
tms9914_board_reset(tms_priv);
- iounmap((void *)tms_priv->iobase);
+ iounmap(tms_priv->mmiobase);
}
if (hp_priv->raw_iobase)
release_mem_region(hp_priv->raw_iobase, hp82335_upper_iomem_size);
diff --git a/drivers/staging/gpib/hp_82341/Makefile b/drivers/staging/gpib/hp_82341/Makefile
index 1fe7db4f8ca4..21367310a17e 100644
--- a/drivers/staging/gpib/hp_82341/Makefile
+++ b/drivers/staging/gpib/hp_82341/Makefile
@@ -1,2 +1,2 @@
-obj-m += hp_82341.o
+obj-$(CONFIG_GPIB_HP82341) += hp_82341.o
diff --git a/drivers/staging/gpib/hp_82341/hp_82341.c b/drivers/staging/gpib/hp_82341/hp_82341.c
index 8ad1c885a9fb..71d481e88bd9 100644
--- a/drivers/staging/gpib/hp_82341/hp_82341.c
+++ b/drivers/staging/gpib/hp_82341/hp_82341.c
@@ -473,12 +473,12 @@ void hp_82341_free_private(gpib_board_t *board)
static uint8_t hp_82341_read_byte(struct tms9914_priv *priv, unsigned int register_num)
{
- return inb((unsigned long)(priv->iobase) + register_num);
+ return inb(priv->iobase + register_num);
}
static void hp_82341_write_byte(struct tms9914_priv *priv, uint8_t data, unsigned int register_num)
{
- outb(data, (unsigned long)(priv->iobase) + register_num);
+ outb(data, priv->iobase + register_num);
}
static int hp_82341_find_isapnp_board(struct pnp_dev **dev)
@@ -682,8 +682,8 @@ int hp_82341_attach(gpib_board_t *board, const gpib_board_config_t *config)
{
struct hp_82341_priv *hp_priv;
struct tms9914_priv *tms_priv;
- unsigned long start_addr;
- void *iobase;
+ u32 start_addr;
+ u32 iobase;
int irq;
int i;
int retval;
@@ -704,7 +704,7 @@ int hp_82341_attach(gpib_board_t *board, const gpib_board_config_t *config)
if (retval < 0)
return retval;
hp_priv->pnp_dev = dev;
- iobase = (void *)(pnp_port_start(dev, 0));
+ iobase = pnp_port_start(dev, 0);
irq = pnp_irq(dev, 0);
hp_priv->hw_version = HW_VERSION_82341D;
hp_priv->io_region_offset = 0x8;
@@ -714,9 +714,9 @@ int hp_82341_attach(gpib_board_t *board, const gpib_board_config_t *config)
hp_priv->hw_version = HW_VERSION_82341C;
hp_priv->io_region_offset = 0x400;
}
- pr_info("hp_82341: base io 0x%p\n", iobase);
+ pr_info("hp_82341: base io 0x%u\n", iobase);
for (i = 0; i < hp_82341_num_io_regions; ++i) {
- start_addr = (unsigned long)(iobase) + i * hp_priv->io_region_offset;
+ start_addr = iobase + i * hp_priv->io_region_offset;
if (!request_region(start_addr, hp_82341_region_iosize, "hp_82341")) {
pr_err("hp_82341: failed to allocate io ports 0x%lx-0x%lx\n",
start_addr,
@@ -725,7 +725,7 @@ int hp_82341_attach(gpib_board_t *board, const gpib_board_config_t *config)
}
hp_priv->iobase[i] = start_addr;
}
- tms_priv->iobase = (void *)(hp_priv->iobase[2]);
+ tms_priv->iobase = hp_priv->iobase[2];
if (hp_priv->hw_version == HW_VERSION_82341D) {
retval = isapnp_cfg_begin(hp_priv->pnp_dev->card->number,
hp_priv->pnp_dev->number);
diff --git a/drivers/staging/gpib/include/gpibP.h b/drivers/staging/gpib/include/gpibP.h
index 5fc42b645ab7..b97da577ba33 100644
--- a/drivers/staging/gpib/include/gpibP.h
+++ b/drivers/staging/gpib/include/gpibP.h
@@ -16,6 +16,7 @@
#include <linux/fs.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
void gpib_register_driver(gpib_interface_t *interface, struct module *mod);
void gpib_unregister_driver(gpib_interface_t *interface);
@@ -35,16 +36,5 @@ extern gpib_board_t board_array[GPIB_MAX_NUM_BOARDS];
extern struct list_head registered_drivers;
-#include <linux/io.h>
-
-void writeb_wrapper(unsigned int value, void *address);
-unsigned int readb_wrapper(void *address);
-void outb_wrapper(unsigned int value, void *address);
-unsigned int inb_wrapper(void *address);
-void writew_wrapper(unsigned int value, void *address);
-unsigned int readw_wrapper(void *address);
-void outw_wrapper(unsigned int value, void *address);
-unsigned int inw_wrapper(void *address);
-
#endif // _GPIB_P_H
diff --git a/drivers/staging/gpib/include/gpib_types.h b/drivers/staging/gpib/include/gpib_types.h
index ee2643da6d71..b41781a55a60 100644
--- a/drivers/staging/gpib/include/gpib_types.h
+++ b/drivers/staging/gpib/include/gpib_types.h
@@ -31,7 +31,8 @@ typedef struct {
void *init_data;
int init_data_length;
/* IO base address to use for non-pnp cards (set by core, driver should make local copy) */
- void *ibbase;
+ u32 ibbase;
+ void __iomem *mmibbase;
/* IRQ to use for non-pnp cards (set by core, driver should make local copy) */
unsigned int ibirq;
/* dma channel to use for non-pnp cards (set by core, driver should make local copy) */
diff --git a/drivers/staging/gpib/include/nec7210.h b/drivers/staging/gpib/include/nec7210.h
index c00aba4ce846..ca998c4a84bf 100644
--- a/drivers/staging/gpib/include/nec7210.h
+++ b/drivers/staging/gpib/include/nec7210.h
@@ -18,7 +18,10 @@
/* struct used to provide variables local to a nec7210 chip */
struct nec7210_priv {
- void *iobase;
+#ifdef CONFIG_HAS_IOPORT
+ u32 iobase;
+#endif
+ void __iomem *mmiobase;
unsigned int offset; // offset between successive nec7210 io addresses
unsigned int dma_channel;
u8 *dma_buffer;
diff --git a/drivers/staging/gpib/include/tms9914.h b/drivers/staging/gpib/include/tms9914.h
index 456b488212d2..d8c8d1c9b131 100644
--- a/drivers/staging/gpib/include/tms9914.h
+++ b/drivers/staging/gpib/include/tms9914.h
@@ -20,7 +20,10 @@ enum tms9914_holdoff_mode {
/* struct used to provide variables local to a tms9914 chip */
struct tms9914_priv {
- void *iobase;
+#ifdef CONFIG_HAS_IOPORT
+ u32 iobase;
+#endif
+ void __iomem *mmiobase;
unsigned int offset; // offset between successive tms9914 io addresses
unsigned int dma_channel;
// software copy of bits written to interrupt mask registers
diff --git a/drivers/staging/gpib/ines/Makefile b/drivers/staging/gpib/ines/Makefile
index cdcaa59a4e39..6b6e480fd811 100644
--- a/drivers/staging/gpib/ines/Makefile
+++ b/drivers/staging/gpib/ines/Makefile
@@ -1,4 +1,4 @@
ccflags-$(CONFIG_GPIB_PCMCIA) := -DGPIB_PCMCIA
-obj-m += ines_gpib.o
+obj-$(CONFIG_GPIB_INES) += ines_gpib.o
diff --git a/drivers/staging/gpib/ines/ines.h b/drivers/staging/gpib/ines/ines.h
index 7e8302619998..eed038fd3f28 100644
--- a/drivers/staging/gpib/ines/ines.h
+++ b/drivers/staging/gpib/ines/ines.h
@@ -83,14 +83,14 @@ void ines_set_xfer_counter(struct ines_priv *priv, unsigned int count);
/* inb/outb wrappers */
static inline unsigned int ines_inb(struct ines_priv *priv, unsigned int register_number)
{
- return inb((unsigned long)(priv->nec7210_priv.iobase) +
+ return inb(priv->nec7210_priv.iobase +
register_number * priv->nec7210_priv.offset);
}
static inline void ines_outb(struct ines_priv *priv, unsigned int value,
unsigned int register_number)
{
- outb(value, (unsigned long)(priv->nec7210_priv.iobase) +
+ outb(value, priv->nec7210_priv.iobase +
register_number * priv->nec7210_priv.offset);
}
diff --git a/drivers/staging/gpib/ines/ines_gpib.c b/drivers/staging/gpib/ines/ines_gpib.c
index 9d8387c3bf01..e18455ba842f 100644
--- a/drivers/staging/gpib/ines/ines_gpib.c
+++ b/drivers/staging/gpib/ines/ines_gpib.c
@@ -273,10 +273,10 @@ irqreturn_t ines_pci_interrupt(int irq, void *arg)
struct nec7210_priv *nec_priv = &priv->nec7210_priv;
if (priv->pci_chip_type == PCI_CHIP_QUANCOM) {
- if ((inb((unsigned long)nec_priv->iobase +
+ if ((inb(nec_priv->iobase +
QUANCOM_IRQ_CONTROL_STATUS_REG) &
QUANCOM_IRQ_ASSERTED_BIT))
- outb(QUANCOM_IRQ_ENABLE_BIT, (unsigned long)(nec_priv->iobase) +
+ outb(QUANCOM_IRQ_ENABLE_BIT, nec_priv->iobase +
QUANCOM_IRQ_CONTROL_STATUS_REG);
}
@@ -780,8 +780,8 @@ static int ines_common_pci_attach(gpib_board_t *board, const gpib_board_config_t
if (pci_request_regions(ines_priv->pci_device, "ines-gpib"))
return -1;
- nec_priv->iobase = (void *)(pci_resource_start(ines_priv->pci_device,
- found_id.gpib_region));
+ nec_priv->iobase = pci_resource_start(ines_priv->pci_device,
+ found_id.gpib_region);
ines_priv->pci_chip_type = found_id.pci_chip_type;
nec_priv->offset = found_id.io_offset;
@@ -840,7 +840,7 @@ static int ines_common_pci_attach(gpib_board_t *board, const gpib_board_config_t
}
break;
case PCI_CHIP_QUANCOM:
- outb(QUANCOM_IRQ_ENABLE_BIT, (unsigned long)(nec_priv->iobase) +
+ outb(QUANCOM_IRQ_ENABLE_BIT, nec_priv->iobase +
QUANCOM_IRQ_CONTROL_STATUS_REG);
break;
case PCI_CHIP_QUICKLOGIC5030:
@@ -899,8 +899,8 @@ int ines_isa_attach(gpib_board_t *board, const gpib_board_config_t *config)
ines_priv = board->private_data;
nec_priv = &ines_priv->nec7210_priv;
- if (!request_region((unsigned long)config->ibbase, ines_isa_iosize, "ines_gpib")) {
- pr_err("ines_gpib: ioports at 0x%p already in use\n", config->ibbase);
+ if (!request_region(config->ibbase, ines_isa_iosize, "ines_gpib")) {
+ pr_err("ines_gpib: ioports at 0x%x already in use\n", config->ibbase);
return -1;
}
nec_priv->iobase = config->ibbase;
@@ -931,7 +931,7 @@ void ines_pci_detach(gpib_board_t *board)
break;
case PCI_CHIP_QUANCOM:
if (nec_priv->iobase)
- outb(0, (unsigned long)(nec_priv->iobase) +
+ outb(0, nec_priv->iobase +
QUANCOM_IRQ_CONTROL_STATUS_REG);
break;
default:
@@ -960,7 +960,7 @@ void ines_isa_detach(gpib_board_t *board)
free_irq(ines_priv->irq, board);
if (nec_priv->iobase) {
nec7210_board_reset(nec_priv, board);
- release_region((unsigned long)(nec_priv->iobase), ines_isa_iosize);
+ release_region(nec_priv->iobase, ines_isa_iosize);
}
}
ines_free_private(board);
@@ -1355,7 +1355,7 @@ int ines_common_pcmcia_attach(gpib_board_t *board)
return -1;
}
- nec_priv->iobase = (void *)(unsigned long)curr_dev->resource[0]->start;
+ nec_priv->iobase = curr_dev->resource[0]->start;
nec7210_board_reset(nec_priv, board);
@@ -1410,7 +1410,7 @@ void ines_pcmcia_detach(gpib_board_t *board)
free_irq(ines_priv->irq, board);
if (nec_priv->iobase) {
nec7210_board_reset(nec_priv, board);
- release_region((unsigned long)(nec_priv->iobase), ines_pcmcia_iosize);
+ release_region(nec_priv->iobase, ines_pcmcia_iosize);
}
}
ines_free_private(board);
diff --git a/drivers/staging/gpib/lpvo_usb_gpib/Makefile b/drivers/staging/gpib/lpvo_usb_gpib/Makefile
index 137511acce63..360553488e6d 100644
--- a/drivers/staging/gpib/lpvo_usb_gpib/Makefile
+++ b/drivers/staging/gpib/lpvo_usb_gpib/Makefile
@@ -1,3 +1,3 @@
-obj-m += lpvo_usb_gpib.o
+obj-$(CONFIG_GPIB_LPVO) += lpvo_usb_gpib.o
diff --git a/drivers/staging/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c b/drivers/staging/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c
index 267651a15fa0..93a05c792816 100644
--- a/drivers/staging/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c
+++ b/drivers/staging/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c
@@ -99,8 +99,8 @@ module_param(debug, int, 0644);
#define USB_GPIB_DEBUG_ON "\nIBDE\xAA\n"
#define USB_GPIB_SET_LISTEN "\nIBDT0\n"
#define USB_GPIB_SET_TALK "\nIBDT1\n"
-#define USB_GPIB_SET_LINES "\nIBDC\n"
-#define USB_GPIB_SET_DATA "\nIBDM\n"
+#define USB_GPIB_SET_LINES "\nIBDC.\n"
+#define USB_GPIB_SET_DATA "\nIBDM.\n"
#define USB_GPIB_READ_LINES "\nIBD?C\n"
#define USB_GPIB_READ_DATA "\nIBD?M\n"
#define USB_GPIB_READ_BUS "\nIBD??\n"
@@ -210,7 +210,7 @@ static int skel_do_release(gpib_board_t *);
* (unix time in sec and NANOsec)
*/
-inline int usec_diff(struct timespec64 *a, struct timespec64 *b)
+static inline int usec_diff(struct timespec64 *a, struct timespec64 *b)
{
return ((a->tv_sec - b->tv_sec) * 1000000 +
(a->tv_nsec - b->tv_nsec) / 1000);
@@ -436,7 +436,7 @@ static void set_timeout(gpib_board_t *board)
static int usb_gpib_attach(gpib_board_t *board, const gpib_board_config_t *config)
{
int retval, j;
- int base = (long)config->ibbase;
+ u32 base = config->ibbase;
char *device_path;
int match;
struct usb_device *udev;
@@ -589,7 +589,7 @@ static int usb_gpib_command(gpib_board_t *board,
size_t *bytes_written)
{
int i, retval;
- char command[6] = "IBc\n";
+ char command[6] = "IBc.\n";
DIA_LOG(1, "enter %p\n", board);
@@ -608,7 +608,7 @@ static int usb_gpib_command(gpib_board_t *board,
}
/**
- * disable_eos() - Disable END on eos byte (END on EOI only)
+ * usb_gpib_disable_eos() - Disable END on eos byte (END on EOI only)
*
* @board: the gpib_board data area for this gpib interface
*
@@ -624,7 +624,7 @@ static void usb_gpib_disable_eos(gpib_board_t *board)
}
/**
- * enable_eos() - Enable END for reads when eos byte is received.
+ * usb_gpib_enable_eos() - Enable END for reads when eos byte is received.
*
* @board: the gpib_board data area for this gpib interface
* @eos_byte: the 'eos' byte
@@ -647,7 +647,7 @@ static int usb_gpib_enable_eos(gpib_board_t *board,
}
/**
- * go_to_standby() - De-assert ATN
+ * usb_gpib_go_to_standby() - De-assert ATN
*
* @board: the gpib_board data area for this gpib interface
*/
@@ -664,7 +664,7 @@ static int usb_gpib_go_to_standby(gpib_board_t *board)
}
/**
- * interface_clear() - Assert or de-assert IFC
+ * usb_gpib_interface_clear() - Assert or de-assert IFC
*
* @board: the gpib_board data area for this gpib interface
* assert: 1: assert IFC; 0: de-assert IFC
diff --git a/drivers/staging/gpib/nec7210/nec7210.c b/drivers/staging/gpib/nec7210/nec7210.c
index 1d9951035497..c9a837fad96e 100644
--- a/drivers/staging/gpib/nec7210/nec7210.c
+++ b/drivers/staging/gpib/nec7210/nec7210.c
@@ -1035,7 +1035,7 @@ EXPORT_SYMBOL(nec7210_board_online);
/* wrappers for io */
uint8_t nec7210_ioport_read_byte(struct nec7210_priv *priv, unsigned int register_num)
{
- return inb((unsigned long)(priv->iobase) + register_num * priv->offset);
+ return inb(priv->iobase + register_num * priv->offset);
}
EXPORT_SYMBOL(nec7210_ioport_read_byte);
@@ -1047,7 +1047,7 @@ void nec7210_ioport_write_byte(struct nec7210_priv *priv, uint8_t data, unsigned
*/
nec7210_locking_ioport_write_byte(priv, data, register_num);
else
- outb(data, (unsigned long)(priv->iobase) + register_num * priv->offset);
+ outb(data, priv->iobase + register_num * priv->offset);
}
EXPORT_SYMBOL(nec7210_ioport_write_byte);
@@ -1058,7 +1058,7 @@ uint8_t nec7210_locking_ioport_read_byte(struct nec7210_priv *priv, unsigned int
unsigned long flags;
spin_lock_irqsave(&priv->register_page_lock, flags);
- retval = inb((unsigned long)(priv->iobase) + register_num * priv->offset);
+ retval = inb(priv->iobase + register_num * priv->offset);
spin_unlock_irqrestore(&priv->register_page_lock, flags);
return retval;
}
@@ -1072,7 +1072,7 @@ void nec7210_locking_ioport_write_byte(struct nec7210_priv *priv, uint8_t data,
spin_lock_irqsave(&priv->register_page_lock, flags);
if (register_num == AUXMR)
udelay(1);
- outb(data, (unsigned long)(priv->iobase) + register_num * priv->offset);
+ outb(data, priv->iobase + register_num * priv->offset);
spin_unlock_irqrestore(&priv->register_page_lock, flags);
}
EXPORT_SYMBOL(nec7210_locking_ioport_write_byte);
@@ -1080,7 +1080,7 @@ EXPORT_SYMBOL(nec7210_locking_ioport_write_byte);
uint8_t nec7210_iomem_read_byte(struct nec7210_priv *priv, unsigned int register_num)
{
- return readb(priv->iobase + register_num * priv->offset);
+ return readb(priv->mmiobase + register_num * priv->offset);
}
EXPORT_SYMBOL(nec7210_iomem_read_byte);
@@ -1092,7 +1092,7 @@ void nec7210_iomem_write_byte(struct nec7210_priv *priv, uint8_t data, unsigned
*/
nec7210_locking_iomem_write_byte(priv, data, register_num);
else
- writeb(data, priv->iobase + register_num * priv->offset);
+ writeb(data, priv->mmiobase + register_num * priv->offset);
}
EXPORT_SYMBOL(nec7210_iomem_write_byte);
@@ -1102,7 +1102,7 @@ uint8_t nec7210_locking_iomem_read_byte(struct nec7210_priv *priv, unsigned int
unsigned long flags;
spin_lock_irqsave(&priv->register_page_lock, flags);
- retval = readb(priv->iobase + register_num * priv->offset);
+ retval = readb(priv->mmiobase + register_num * priv->offset);
spin_unlock_irqrestore(&priv->register_page_lock, flags);
return retval;
}
@@ -1116,7 +1116,7 @@ void nec7210_locking_iomem_write_byte(struct nec7210_priv *priv, uint8_t data,
spin_lock_irqsave(&priv->register_page_lock, flags);
if (register_num == AUXMR)
udelay(1);
- writeb(data, priv->iobase + register_num * priv->offset);
+ writeb(data, priv->mmiobase + register_num * priv->offset);
spin_unlock_irqrestore(&priv->register_page_lock, flags);
}
EXPORT_SYMBOL(nec7210_locking_iomem_write_byte);
diff --git a/drivers/staging/gpib/ni_usb/Makefile b/drivers/staging/gpib/ni_usb/Makefile
index e22b3b21a62c..469c5d16add3 100644
--- a/drivers/staging/gpib/ni_usb/Makefile
+++ b/drivers/staging/gpib/ni_usb/Makefile
@@ -1,4 +1,4 @@
-obj-m += ni_usb_gpib.o
+obj-$(CONFIG_GPIB_NI_USB) += ni_usb_gpib.o
diff --git a/drivers/staging/gpib/pc2/Makefile b/drivers/staging/gpib/pc2/Makefile
index 8148425e0f87..481ee4296e1b 100644
--- a/drivers/staging/gpib/pc2/Makefile
+++ b/drivers/staging/gpib/pc2/Makefile
@@ -1,5 +1,5 @@
-obj-m += pc2_gpib.o
+obj-$(CONFIG_GPIB_PC2) += pc2_gpib.o
diff --git a/drivers/staging/gpib/pc2/pc2_gpib.c b/drivers/staging/gpib/pc2/pc2_gpib.c
index 7b3b34f47341..d22af25125b1 100644
--- a/drivers/staging/gpib/pc2/pc2_gpib.c
+++ b/drivers/staging/gpib/pc2/pc2_gpib.c
@@ -426,7 +426,7 @@ int pc2_attach(gpib_board_t *board, const gpib_board_config_t *config)
nec_priv = &pc2_priv->nec7210_priv;
nec_priv->offset = pc2_reg_offset;
- if (request_region((unsigned long)config->ibbase, pc2_iosize, "pc2") == 0) {
+ if (request_region(config->ibbase, pc2_iosize, "pc2") == 0) {
pr_err("gpib: ioports are already in use\n");
return -1;
}
@@ -471,7 +471,7 @@ void pc2_detach(gpib_board_t *board)
free_irq(pc2_priv->irq, board);
if (nec_priv->iobase) {
nec7210_board_reset(nec_priv, board);
- release_region((unsigned long)(nec_priv->iobase), pc2_iosize);
+ release_region(nec_priv->iobase, pc2_iosize);
}
if (nec_priv->dma_buffer) {
dma_free_coherent(board->dev, nec_priv->dma_buffer_length,
@@ -498,14 +498,14 @@ static int pc2a_common_attach(gpib_board_t *board, const gpib_board_config_t *co
nec_priv = &pc2_priv->nec7210_priv;
nec_priv->offset = pc2a_reg_offset;
- switch ((unsigned long)(config->ibbase)) {
+ switch (config->ibbase) {
case 0x02e1:
case 0x22e1:
case 0x42e1:
case 0x62e1:
break;
default:
- pr_err("PCIIa base range invalid, must be one of 0x[0246]2e1, but is 0x%p\n",
+ pr_err("PCIIa base range invalid, must be one of 0x[0246]2e1, but is 0x%d\n",
config->ibbase);
return -1;
}
@@ -522,7 +522,7 @@ static int pc2a_common_attach(gpib_board_t *board, const gpib_board_config_t *co
unsigned int err = 0;
for (i = 0; i < num_registers; i++) {
- if (check_region((unsigned long)config->ibbase + i * pc2a_reg_offset, 1))
+ if (check_region(config->ibbase + i * pc2a_reg_offset, 1))
err++;
}
if (config->ibirq && check_region(pc2a_clear_intr_iobase + config->ibirq, 1))
@@ -533,11 +533,11 @@ static int pc2a_common_attach(gpib_board_t *board, const gpib_board_config_t *co
}
#endif
for (i = 0; i < num_registers; i++) {
- if (!request_region((unsigned long)config->ibbase +
+ if (!request_region(config->ibbase +
i * pc2a_reg_offset, 1, "pc2a")) {
pr_err("gpib: ioports are already in use");
for (j = 0; j < i; j++)
- release_region((unsigned long)(config->ibbase) +
+ release_region(config->ibbase +
j * pc2a_reg_offset, 1);
return -1;
}
@@ -608,7 +608,7 @@ static void pc2a_common_detach(gpib_board_t *board, unsigned int num_registers)
if (nec_priv->iobase) {
nec7210_board_reset(nec_priv, board);
for (i = 0; i < num_registers; i++)
- release_region((unsigned long)nec_priv->iobase +
+ release_region(nec_priv->iobase +
i * pc2a_reg_offset, 1);
}
if (pc2_priv->clear_intr_addr)
diff --git a/drivers/staging/gpib/tms9914/Makefile b/drivers/staging/gpib/tms9914/Makefile
index 81b7e3cf104c..4705ab07f413 100644
--- a/drivers/staging/gpib/tms9914/Makefile
+++ b/drivers/staging/gpib/tms9914/Makefile
@@ -1,5 +1,5 @@
-obj-m += tms9914.o
+obj-$(CONFIG_GPIB_TMS9914) += tms9914.o
diff --git a/drivers/staging/gpib/tms9914/tms9914.c b/drivers/staging/gpib/tms9914/tms9914.c
index 152b243b845b..ec8e1d4d762f 100644
--- a/drivers/staging/gpib/tms9914/tms9914.c
+++ b/drivers/staging/gpib/tms9914/tms9914.c
@@ -866,14 +866,14 @@ EXPORT_SYMBOL_GPL(tms9914_online);
// wrapper for inb
uint8_t tms9914_ioport_read_byte(struct tms9914_priv *priv, unsigned int register_num)
{
- return inb((unsigned long)(priv->iobase) + register_num * priv->offset);
+ return inb(priv->iobase + register_num * priv->offset);
}
EXPORT_SYMBOL_GPL(tms9914_ioport_read_byte);
// wrapper for outb
void tms9914_ioport_write_byte(struct tms9914_priv *priv, uint8_t data, unsigned int register_num)
{
- outb(data, (unsigned long)(priv->iobase) + register_num * priv->offset);
+ outb(data, priv->iobase + register_num * priv->offset);
if (register_num == AUXCR)
udelay(1);
}
@@ -883,14 +883,14 @@ EXPORT_SYMBOL_GPL(tms9914_ioport_write_byte);
// wrapper for readb
uint8_t tms9914_iomem_read_byte(struct tms9914_priv *priv, unsigned int register_num)
{
- return readb(priv->iobase + register_num * priv->offset);
+ return readb(priv->mmiobase + register_num * priv->offset);
}
EXPORT_SYMBOL_GPL(tms9914_iomem_read_byte);
// wrapper for writeb
void tms9914_iomem_write_byte(struct tms9914_priv *priv, uint8_t data, unsigned int register_num)
{
- writeb(data, priv->iobase + register_num * priv->offset);
+ writeb(data, priv->mmiobase + register_num * priv->offset);
if (register_num == AUXCR)
udelay(1);
}
diff --git a/drivers/staging/gpib/tnt4882/Makefile b/drivers/staging/gpib/tnt4882/Makefile
index f767c990db7a..a3c3fb96d5ed 100644
--- a/drivers/staging/gpib/tnt4882/Makefile
+++ b/drivers/staging/gpib/tnt4882/Makefile
@@ -1,5 +1,5 @@
ccflags-$(CONFIG_GPIB_PCMCIA) := -DGPIB_PCMCIA
-obj-m += tnt4882.o
+obj-$(CONFIG_GPIB_NI_PCI_ISA) += tnt4882.o
tnt4882-objs := tnt4882_gpib.o mite.o
diff --git a/drivers/staging/gpib/tnt4882/mite.c b/drivers/staging/gpib/tnt4882/mite.c
index 0edf34d243e9..4bd352967616 100644
--- a/drivers/staging/gpib/tnt4882/mite.c
+++ b/drivers/staging/gpib/tnt4882/mite.c
@@ -148,72 +148,3 @@ void mite_list_devices(void)
}
pr_info("\n");
}
-
-int mite_bytes_transferred(struct mite_struct *mite, int chan)
-{
- int dar, fcr;
-
- dar = readl(mite->mite_io_addr + MITE_DAR + CHAN_OFFSET(chan));
- fcr = readl(mite->mite_io_addr + MITE_FCR + CHAN_OFFSET(chan)) & 0x000000FF;
- return dar - fcr;
-}
-
-int mite_dma_tcr(struct mite_struct *mite)
-{
- int tcr;
- int lkar;
-
- lkar = readl(mite->mite_io_addr + CHAN_OFFSET(0) + MITE_LKAR);
- tcr = readl(mite->mite_io_addr + CHAN_OFFSET(0) + MITE_TCR);
- MDPRINTK("lkar=0x%08x tcr=%d\n", lkar, tcr);
-
- return tcr;
-}
-
-void mite_dma_disarm(struct mite_struct *mite)
-{
- int chor;
-
- /* disarm */
- chor = CHOR_ABORT;
- writel(chor, mite->mite_io_addr + CHAN_OFFSET(0) + MITE_CHOR);
-}
-
-void mite_dump_regs(struct mite_struct *mite)
-{
- void *addr = 0;
- unsigned long temp = 0;
-
- pr_info("mite address is =0x%p\n", mite->mite_io_addr);
-
- addr = mite->mite_io_addr + MITE_CHOR + CHAN_OFFSET(0);
- pr_info("mite status[CHOR]at 0x%p =0x%08lx\n", addr, temp = readl(addr));
- //mite_decode(mite_CHOR_strings,temp);
- addr = mite->mite_io_addr + MITE_CHCR + CHAN_OFFSET(0);
- pr_info("mite status[CHCR]at 0x%p =0x%08lx\n", addr, temp = readl(addr));
- //mite_decode(mite_CHCR_strings,temp);
- addr = mite->mite_io_addr + MITE_TCR + CHAN_OFFSET(0);
- pr_info("mite status[TCR] at 0x%p =0x%08x\n", addr, readl(addr));
- addr = mite->mite_io_addr + MITE_MCR + CHAN_OFFSET(0);
- pr_info("mite status[MCR] at 0x%p =0x%08lx\n", addr, temp = readl(addr));
- //mite_decode(mite_MCR_strings,temp);
- addr = mite->mite_io_addr + MITE_MAR + CHAN_OFFSET(0);
- pr_info("mite status[MAR] at 0x%p =0x%08x\n", addr, readl(addr));
- addr = mite->mite_io_addr + MITE_DCR + CHAN_OFFSET(0);
- pr_info("mite status[DCR] at 0x%p =0x%08lx\n", addr, temp = readl(addr));
- //mite_decode(mite_CR_strings,temp);
- addr = mite->mite_io_addr + MITE_DAR + CHAN_OFFSET(0);
- pr_info("mite status[DAR] at 0x%p =0x%08x\n", addr, readl(addr));
- addr = mite->mite_io_addr + MITE_LKCR + CHAN_OFFSET(0);
- pr_info("mite status[LKCR]at 0x%p =0x%08lx\n", addr, temp = readl(addr));
- //mite_decode(mite_CR_strings,temp);
- addr = mite->mite_io_addr + MITE_LKAR + CHAN_OFFSET(0);
- pr_info("mite status[LKAR]at 0x%p =0x%08x\n", addr, readl(addr));
-
- addr = mite->mite_io_addr + MITE_CHSR + CHAN_OFFSET(0);
- pr_info("mite status[CHSR]at 0x%p =0x%08lx\n", addr, temp = readl(addr));
- //mite_decode(mite_CHSR_strings,temp);
- addr = mite->mite_io_addr + MITE_FCR + CHAN_OFFSET(0);
- pr_info("mite status[FCR] at 0x%p =0x%08x\n\n", addr, readl(addr));
-}
-
diff --git a/drivers/staging/gpib/tnt4882/mite.h b/drivers/staging/gpib/tnt4882/mite.h
index 6454d069b8cc..edb873435b51 100644
--- a/drivers/staging/gpib/tnt4882/mite.h
+++ b/drivers/staging/gpib/tnt4882/mite.h
@@ -34,9 +34,9 @@ struct mite_struct {
struct pci_dev *pcidev;
unsigned long mite_phys_addr;
- void *mite_io_addr;
+ void __iomem *mite_io_addr;
unsigned long daq_phys_addr;
- void *daq_io_addr;
+ void __iomem *daq_io_addr;
int DMA_CheckNearEnd;
@@ -61,15 +61,6 @@ int mite_setup(struct mite_struct *mite);
void mite_unsetup(struct mite_struct *mite);
void mite_list_devices(void);
-int mite_dma_tcr(struct mite_struct *mite);
-
-void mite_dma_arm(struct mite_struct *mite);
-void mite_dma_disarm(struct mite_struct *mite);
-
-void mite_dump_regs(struct mite_struct *mite);
-void mite_setregs(struct mite_struct *mite, unsigned long ll_start, int chan, int dir);
-int mite_bytes_transferred(struct mite_struct *mite, int chan);
-
#define CHAN_OFFSET(x) (0x100 * (x))
/* DMA base for chan 0 is 0x500, chan 1 is 0x600 */
diff --git a/drivers/staging/gpib/tnt4882/tnt4882_gpib.c b/drivers/staging/gpib/tnt4882/tnt4882_gpib.c
index e49a952fa0d8..ce91c3eb768c 100644
--- a/drivers/staging/gpib/tnt4882/tnt4882_gpib.c
+++ b/drivers/staging/gpib/tnt4882/tnt4882_gpib.c
@@ -45,10 +45,6 @@ struct tnt4882_priv {
unsigned short imr0_bits;
unsigned short imr3_bits;
unsigned short auxg_bits; // bits written to auxiliary register G
- void (*io_writeb)(unsigned int value, void *address);
- void (*io_writew)(unsigned int value, void *address);
- unsigned int (*io_readb)(void *address);
- unsigned int (*io_readw)(void *address);
};
// interface functions
@@ -104,23 +100,23 @@ static const int atgpib_iosize = 32;
/* paged io */
static inline unsigned int tnt_paged_readb(struct tnt4882_priv *priv, unsigned long offset)
{
- priv->io_writeb(AUX_PAGEIN, priv->nec7210_priv.iobase + AUXMR * priv->nec7210_priv.offset);
+ iowrite8(AUX_PAGEIN, priv->nec7210_priv.mmiobase + AUXMR * priv->nec7210_priv.offset);
udelay(1);
- return priv->io_readb(priv->nec7210_priv.iobase + offset);
+ return ioread8(priv->nec7210_priv.mmiobase + offset);
}
static inline void tnt_paged_writeb(struct tnt4882_priv *priv, unsigned int value,
unsigned long offset)
{
- priv->io_writeb(AUX_PAGEIN, priv->nec7210_priv.iobase + AUXMR * priv->nec7210_priv.offset);
+ iowrite8(AUX_PAGEIN, priv->nec7210_priv.mmiobase + AUXMR * priv->nec7210_priv.offset);
udelay(1);
- priv->io_writeb(value, priv->nec7210_priv.iobase + offset);
+ iowrite8(value, priv->nec7210_priv.mmiobase + offset);
}
/* readb/writeb wrappers */
static inline unsigned short tnt_readb(struct tnt4882_priv *priv, unsigned long offset)
{
- void *address = priv->nec7210_priv.iobase + offset;
+ void *address = priv->nec7210_priv.mmiobase + offset;
unsigned long flags;
unsigned short retval;
spinlock_t *register_lock = &priv->nec7210_priv.register_page_lock;
@@ -134,7 +130,7 @@ static inline unsigned short tnt_readb(struct tnt4882_priv *priv, unsigned long
switch (priv->nec7210_priv.type) {
case TNT4882:
case TNT5004:
- retval = priv->io_readb(address);
+ retval = ioread8(address);
break;
case NAT4882:
retval = tnt_paged_readb(priv, offset - tnt_pagein_offset);
@@ -149,7 +145,7 @@ static inline unsigned short tnt_readb(struct tnt4882_priv *priv, unsigned long
}
break;
default:
- retval = priv->io_readb(address);
+ retval = ioread8(address);
break;
}
spin_unlock_irqrestore(register_lock, flags);
@@ -158,7 +154,7 @@ static inline unsigned short tnt_readb(struct tnt4882_priv *priv, unsigned long
static inline void tnt_writeb(struct tnt4882_priv *priv, unsigned short value, unsigned long offset)
{
- void *address = priv->nec7210_priv.iobase + offset;
+ void *address = priv->nec7210_priv.mmiobase + offset;
unsigned long flags;
spinlock_t *register_lock = &priv->nec7210_priv.register_page_lock;
@@ -170,7 +166,7 @@ static inline void tnt_writeb(struct tnt4882_priv *priv, unsigned short value, u
switch (priv->nec7210_priv.type) {
case TNT4882:
case TNT5004:
- priv->io_writeb(value, address);
+ iowrite8(value, address);
break;
case NAT4882:
tnt_paged_writeb(priv, value, offset - tnt_pagein_offset);
@@ -183,7 +179,7 @@ static inline void tnt_writeb(struct tnt4882_priv *priv, unsigned short value, u
}
break;
default:
- priv->io_writeb(value, address);
+ iowrite8(value, address);
break;
}
spin_unlock_irqrestore(register_lock, flags);
@@ -288,7 +284,7 @@ static int drain_fifo_words(struct tnt4882_priv *tnt_priv, uint8_t *buffer, int
while (fifo_word_available(tnt_priv) && count + 2 <= num_bytes) {
short word;
- word = tnt_priv->io_readw(nec_priv->iobase + FIFOB);
+ word = ioread16(nec_priv->mmiobase + FIFOB);
buffer[count++] = word & 0xff;
buffer[count++] = (word >> 8) & 0xff;
}
@@ -573,7 +569,7 @@ static int generic_write(gpib_board_t *board, uint8_t *buffer, size_t length,
word = buffer[count++] & 0xff;
if (count < length)
word |= (buffer[count++] << 8) & 0xff00;
- tnt_priv->io_writew(word, nec_priv->iobase + FIFOB);
+ iowrite16(word, nec_priv->mmiobase + FIFOB);
}
// avoid unnecessary HR_NFF interrupts
// tnt_priv->imr3_bits |= HR_NFF;
@@ -1269,10 +1265,6 @@ int ni_pci_attach(gpib_board_t *board, const gpib_board_config_t *config)
if (tnt4882_allocate_private(board))
return -ENOMEM;
tnt_priv = board->private_data;
- tnt_priv->io_writeb = writeb_wrapper;
- tnt_priv->io_readb = readb_wrapper;
- tnt_priv->io_writew = writew_wrapper;
- tnt_priv->io_readw = readw_wrapper;
nec_priv = &tnt_priv->nec7210_priv;
nec_priv->type = TNT4882;
nec_priv->read_byte = nec7210_locking_iomem_read_byte;
@@ -1324,7 +1316,7 @@ int ni_pci_attach(gpib_board_t *board, const gpib_board_config_t *config)
return retval;
}
- nec_priv->iobase = tnt_priv->mite->daq_io_addr;
+ nec_priv->mmiobase = tnt_priv->mite->daq_io_addr;
// get irq
if (request_irq(mite_irq(tnt_priv->mite), tnt4882_interrupt, isr_flags,
@@ -1359,7 +1351,7 @@ void ni_pci_detach(gpib_board_t *board)
if (tnt_priv) {
nec_priv = &tnt_priv->nec7210_priv;
- if (nec_priv->iobase)
+ if (nec_priv->mmiobase)
tnt4882_board_reset(tnt_priv, board);
if (tnt_priv->irq)
free_irq(tnt_priv->irq, board);
@@ -1400,7 +1392,7 @@ static int ni_isa_attach_common(gpib_board_t *board, const gpib_board_config_t *
struct tnt4882_priv *tnt_priv;
struct nec7210_priv *nec_priv;
int isr_flags = 0;
- void *iobase;
+ u32 iobase;
int irq;
board->status = 0;
@@ -1408,10 +1400,6 @@ static int ni_isa_attach_common(gpib_board_t *board, const gpib_board_config_t *
if (tnt4882_allocate_private(board))
return -ENOMEM;
tnt_priv = board->private_data;
- tnt_priv->io_writeb = outb_wrapper;
- tnt_priv->io_readb = inb_wrapper;
- tnt_priv->io_writew = outw_wrapper;
- tnt_priv->io_readw = inw_wrapper;
nec_priv = &tnt_priv->nec7210_priv;
nec_priv->type = chipset;
nec_priv->read_byte = nec7210_locking_ioport_read_byte;
@@ -1427,18 +1415,20 @@ static int ni_isa_attach_common(gpib_board_t *board, const gpib_board_config_t *
if (retval < 0)
return retval;
tnt_priv->pnp_dev = dev;
- iobase = (void *)(pnp_port_start(dev, 0));
+ iobase = pnp_port_start(dev, 0);
irq = pnp_irq(dev, 0);
} else {
iobase = config->ibbase;
irq = config->ibirq;
}
// allocate ioports
- if (!request_region((unsigned long)(iobase), atgpib_iosize, "atgpib")) {
+ if (!request_region(iobase, atgpib_iosize, "atgpib")) {
pr_err("tnt4882: failed to allocate ioports\n");
return -1;
}
- nec_priv->iobase = iobase;
+ nec_priv->mmiobase = ioport_map(iobase, atgpib_iosize);
+ if (!nec_priv->mmiobase)
+ return -1;
// get irq
if (request_irq(irq, tnt4882_interrupt, isr_flags, "atgpib", board)) {
@@ -1478,8 +1468,10 @@ void ni_isa_detach(gpib_board_t *board)
tnt4882_board_reset(tnt_priv, board);
if (tnt_priv->irq)
free_irq(tnt_priv->irq, board);
+ if (nec_priv->mmiobase)
+ ioport_unmap(nec_priv->mmiobase);
if (nec_priv->iobase)
- release_region((unsigned long)(nec_priv->iobase), atgpib_iosize);
+ release_region(nec_priv->iobase, atgpib_iosize);
if (tnt_priv->pnp_dev)
pnp_device_detach(tnt_priv->pnp_dev);
}
@@ -1817,10 +1809,6 @@ int ni_pcmcia_attach(gpib_board_t *board, const gpib_board_config_t *config)
if (tnt4882_allocate_private(board))
return -ENOMEM;
tnt_priv = board->private_data;
- tnt_priv->io_writeb = outb_wrapper;
- tnt_priv->io_readb = inb_wrapper;
- tnt_priv->io_writew = outw_wrapper;
- tnt_priv->io_readw = inw_wrapper;
nec_priv = &tnt_priv->nec7210_priv;
nec_priv->type = TNT4882;
nec_priv->read_byte = nec7210_locking_ioport_read_byte;
@@ -1835,7 +1823,10 @@ int ni_pcmcia_attach(gpib_board_t *board, const gpib_board_config_t *config)
return -EIO;
}
- nec_priv->iobase = (void *)(unsigned long)curr_dev->resource[0]->start;
+ nec_priv->mmiobase = ioport_map(curr_dev->resource[0]->start,
+ resource_size(curr_dev->resource[0]));
+ if (!nec_priv->mmiobase)
+ return -1;
// get irq
if (request_irq(curr_dev->irq, tnt4882_interrupt, isr_flags, "tnt4882", board)) {
@@ -1860,9 +1851,11 @@ void ni_pcmcia_detach(gpib_board_t *board)
nec_priv = &tnt_priv->nec7210_priv;
if (tnt_priv->irq)
free_irq(tnt_priv->irq, board);
+ if (nec_priv->mmiobase)
+ ioport_unmap(nec_priv->mmiobase);
if (nec_priv->iobase) {
tnt4882_board_reset(tnt_priv, board);
- release_region((unsigned long)nec_priv->iobase, pcmcia_gpib_iosize);
+ release_region(nec_priv->iobase, pcmcia_gpib_iosize);
}
}
tnt4882_free_private(board);
diff --git a/drivers/staging/iio/frequency/ad9832.c b/drivers/staging/iio/frequency/ad9832.c
index 492612e8f8ba..140ee4f9c137 100644
--- a/drivers/staging/iio/frequency/ad9832.c
+++ b/drivers/staging/iio/frequency/ad9832.c
@@ -158,7 +158,7 @@ static int ad9832_write_frequency(struct ad9832_state *st,
static int ad9832_write_phase(struct ad9832_state *st,
unsigned long addr, unsigned long phase)
{
- if (phase > BIT(AD9832_PHASE_BITS))
+ if (phase >= BIT(AD9832_PHASE_BITS))
return -EINVAL;
st->phase_data[0] = cpu_to_be16((AD9832_CMD_PHA8BITSW << CMD_SHIFT) |
diff --git a/drivers/staging/iio/frequency/ad9834.c b/drivers/staging/iio/frequency/ad9834.c
index 47e7d7e6d920..6e99e008c5f4 100644
--- a/drivers/staging/iio/frequency/ad9834.c
+++ b/drivers/staging/iio/frequency/ad9834.c
@@ -131,7 +131,7 @@ static int ad9834_write_frequency(struct ad9834_state *st,
static int ad9834_write_phase(struct ad9834_state *st,
unsigned long addr, unsigned long phase)
{
- if (phase > BIT(AD9834_PHASE_BITS))
+ if (phase >= BIT(AD9834_PHASE_BITS))
return -EINVAL;
st->data = cpu_to_be16(addr | phase);
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 287ac5b0495f..f991cf759836 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -823,7 +823,6 @@ static sense_reason_t
pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
struct request *req)
{
- struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev);
struct bio *bio = NULL;
struct page *page;
struct scatterlist *sg;
@@ -871,12 +870,11 @@ new_bio:
(rw) ? "rw" : "r", nr_vecs);
}
- pr_debug("PSCSI: Calling bio_add_pc_page() i: %d"
+ pr_debug("PSCSI: Calling bio_add_page() i: %d"
" bio: %p page: %p len: %d off: %d\n", i, bio,
page, len, off);
- rc = bio_add_pc_page(pdv->pdv_sd->request_queue,
- bio, page, bytes, off);
+ rc = bio_add_page(bio, page, bytes, off);
pr_debug("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n",
bio_segments(bio), nr_vecs);
if (rc != bytes) {
diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c
index fab11b98ca49..5ab4ce4daaeb 100644
--- a/drivers/thermal/thermal_of.c
+++ b/drivers/thermal/thermal_of.c
@@ -160,6 +160,7 @@ static struct device_node *of_thermal_zone_find(struct device_node *sensor, int
return ERR_PTR(ret);
}
+ of_node_put(sensor_specs.np);
if ((sensor == sensor_specs.np) && id == (sensor_specs.args_count ?
sensor_specs.args[0] : 0)) {
pr_debug("sensor %pOFn id=%d belongs to %pOFn\n", sensor, id, child);
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 5f9f06911795..68baf75bdadc 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -812,6 +812,9 @@ int serial8250_register_8250_port(const struct uart_8250_port *up)
uart->dl_write = up->dl_write;
if (uart->port.type != PORT_8250_CIR) {
+ if (uart_console_registered(&uart->port))
+ pm_runtime_get_sync(uart->port.dev);
+
if (serial8250_isa_config != NULL)
serial8250_isa_config(0, &uart->port,
&uart->capabilities);
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 17f70e4bee43..9c59ec128bb4 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -2692,7 +2692,7 @@ static void imx_uart_enable_wakeup(struct imx_port *sport, bool on)
{
u32 ucr3;
- uart_port_lock(&sport->port);
+ uart_port_lock_irq(&sport->port);
ucr3 = imx_uart_readl(sport, UCR3);
if (on) {
@@ -2714,7 +2714,7 @@ static void imx_uart_enable_wakeup(struct imx_port *sport, bool on)
imx_uart_writel(sport, ucr1, UCR1);
}
- uart_port_unlock(&sport->port);
+ uart_port_unlock_irq(&sport->port);
}
static int imx_uart_suspend_noirq(struct device *dev)
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index 7dc254546075..1ec5d8c3aef8 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -1051,14 +1051,14 @@ static void stm32_usart_break_ctl(struct uart_port *port, int break_state)
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ uart_port_lock_irqsave(port, &flags);
if (break_state)
stm32_usart_set_bits(port, ofs->rqr, USART_RQR_SBKRQ);
else
stm32_usart_clr_bits(port, ofs->rqr, USART_RQR_SBKRQ);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static int stm32_usart_startup(struct uart_port *port)
diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h
index 9ffd94ddf8c7..786f20ef2238 100644
--- a/drivers/ufs/core/ufshcd-priv.h
+++ b/drivers/ufs/core/ufshcd-priv.h
@@ -237,12 +237,6 @@ static inline void ufshcd_vops_config_scaling_param(struct ufs_hba *hba,
hba->vops->config_scaling_param(hba, p, data);
}
-static inline void ufshcd_vops_reinit_notify(struct ufs_hba *hba)
-{
- if (hba->vops && hba->vops->reinit_notify)
- hba->vops->reinit_notify(hba);
-}
-
static inline int ufshcd_vops_mcq_config_resource(struct ufs_hba *hba)
{
if (hba->vops && hba->vops->mcq_config_resource)
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 8a01e4393159..3094f3c89e82 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -8858,7 +8858,6 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
ufshcd_device_reset(hba);
ufs_put_device_desc(hba);
ufshcd_hba_stop(hba);
- ufshcd_vops_reinit_notify(hba);
ret = ufshcd_hba_enable(hba);
if (ret) {
dev_err(hba->dev, "Host controller enable failed\n");
@@ -10412,7 +10411,6 @@ static int ufshcd_add_scsi_host(struct ufs_hba *hba)
.nr_hw_queues = 1,
.queue_depth = hba->nutmrs,
.ops = &ufshcd_tmf_ops,
- .flags = BLK_MQ_F_NO_SCHED,
};
err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
if (err < 0)
@@ -10591,14 +10589,17 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
}
/*
- * Set the default power management level for runtime and system PM.
+ * Set the default power management level for runtime and system PM if
+ * not set by the host controller drivers.
* Default power saving mode is to keep UFS link in Hibern8 state
* and UFS device in sleep state.
*/
- hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
+ if (!hba->rpm_lvl)
+ hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
UFS_SLEEP_PWR_MODE,
UIC_LINK_HIBERN8_STATE);
- hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
+ if (!hba->spm_lvl)
+ hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
UFS_SLEEP_PWR_MODE,
UIC_LINK_HIBERN8_STATE);
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 68040b2ab5f8..91e94fe990b4 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -368,6 +368,11 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
if (ret)
return ret;
+ if (phy->power_count) {
+ phy_power_off(phy);
+ phy_exit(phy);
+ }
+
/* phy initialization - calibrate the phy */
ret = phy_init(phy);
if (ret) {
@@ -866,6 +871,7 @@ static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba)
*/
static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
{
+ const struct ufs_qcom_drvdata *drvdata = of_device_get_match_data(hba->dev);
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
if (host->hw_ver.major == 0x2)
@@ -874,9 +880,8 @@ static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
if (host->hw_ver.major > 0x3)
hba->quirks |= UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH;
- if (of_device_is_compatible(hba->dev->of_node, "qcom,sm8550-ufshc") ||
- of_device_is_compatible(hba->dev->of_node, "qcom,sm8650-ufshc"))
- hba->quirks |= UFSHCD_QUIRK_BROKEN_LSDBS_CAP;
+ if (drvdata && drvdata->quirks)
+ hba->quirks |= drvdata->quirks;
}
static void ufs_qcom_set_phy_gear(struct ufs_qcom_host *host)
@@ -1064,6 +1069,7 @@ static int ufs_qcom_init(struct ufs_hba *hba)
struct device *dev = hba->dev;
struct ufs_qcom_host *host;
struct ufs_clk_info *clki;
+ const struct ufs_qcom_drvdata *drvdata = of_device_get_match_data(hba->dev);
host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
if (!host)
@@ -1143,6 +1149,9 @@ static int ufs_qcom_init(struct ufs_hba *hba)
dev_warn(dev, "%s: failed to configure the testbus %d\n",
__func__, err);
+ if (drvdata && drvdata->no_phy_retention)
+ hba->spm_lvl = UFS_PM_LVL_5;
+
return 0;
out_variant_clear:
@@ -1579,13 +1588,6 @@ static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
}
#endif
-static void ufs_qcom_reinit_notify(struct ufs_hba *hba)
-{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
-
- phy_power_off(host->generic_phy);
-}
-
/* Resources */
static const struct ufshcd_res_info ufs_res_info[RES_MAX] = {
{.name = "ufs_mem",},
@@ -1825,7 +1827,6 @@ static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
.device_reset = ufs_qcom_device_reset,
.config_scaling_param = ufs_qcom_config_scaling_param,
.program_key = ufs_qcom_ice_program_key,
- .reinit_notify = ufs_qcom_reinit_notify,
.mcq_config_resource = ufs_qcom_mcq_config_resource,
.get_hba_mac = ufs_qcom_get_hba_mac,
.op_runtime_config = ufs_qcom_op_runtime_config,
@@ -1868,9 +1869,15 @@ static void ufs_qcom_remove(struct platform_device *pdev)
platform_device_msi_free_irqs_all(hba->dev);
}
+static const struct ufs_qcom_drvdata ufs_qcom_sm8550_drvdata = {
+ .quirks = UFSHCD_QUIRK_BROKEN_LSDBS_CAP,
+ .no_phy_retention = true,
+};
+
static const struct of_device_id ufs_qcom_of_match[] __maybe_unused = {
{ .compatible = "qcom,ufshc" },
- { .compatible = "qcom,sm8550-ufshc" },
+ { .compatible = "qcom,sm8550-ufshc", .data = &ufs_qcom_sm8550_drvdata },
+ { .compatible = "qcom,sm8650-ufshc", .data = &ufs_qcom_sm8550_drvdata },
{},
};
MODULE_DEVICE_TABLE(of, ufs_qcom_of_match);
diff --git a/drivers/ufs/host/ufs-qcom.h b/drivers/ufs/host/ufs-qcom.h
index b9de170983c9..919f53682beb 100644
--- a/drivers/ufs/host/ufs-qcom.h
+++ b/drivers/ufs/host/ufs-qcom.h
@@ -217,6 +217,11 @@ struct ufs_qcom_host {
bool esi_enabled;
};
+struct ufs_qcom_drvdata {
+ enum ufshcd_quirks quirks;
+ bool no_phy_retention;
+};
+
static inline u32
ufs_qcom_get_debug_reg_offset(struct ufs_qcom_host *host, u32 reg)
{
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index f2801700be8e..1a7fc638213e 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -370,25 +370,29 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
data->pinctrl = devm_pinctrl_get(dev);
if (PTR_ERR(data->pinctrl) == -ENODEV)
data->pinctrl = NULL;
- else if (IS_ERR(data->pinctrl))
- return dev_err_probe(dev, PTR_ERR(data->pinctrl),
+ else if (IS_ERR(data->pinctrl)) {
+ ret = dev_err_probe(dev, PTR_ERR(data->pinctrl),
"pinctrl get failed\n");
+ goto err_put;
+ }
data->hsic_pad_regulator =
devm_regulator_get_optional(dev, "hsic");
if (PTR_ERR(data->hsic_pad_regulator) == -ENODEV) {
/* no pad regulator is needed */
data->hsic_pad_regulator = NULL;
- } else if (IS_ERR(data->hsic_pad_regulator))
- return dev_err_probe(dev, PTR_ERR(data->hsic_pad_regulator),
+ } else if (IS_ERR(data->hsic_pad_regulator)) {
+ ret = dev_err_probe(dev, PTR_ERR(data->hsic_pad_regulator),
"Get HSIC pad regulator error\n");
+ goto err_put;
+ }
if (data->hsic_pad_regulator) {
ret = regulator_enable(data->hsic_pad_regulator);
if (ret) {
dev_err(dev,
"Failed to enable HSIC pad regulator\n");
- return ret;
+ goto err_put;
}
}
}
@@ -402,13 +406,14 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
dev_err(dev,
"pinctrl_hsic_idle lookup failed, err=%ld\n",
PTR_ERR(pinctrl_hsic_idle));
- return PTR_ERR(pinctrl_hsic_idle);
+ ret = PTR_ERR(pinctrl_hsic_idle);
+ goto err_put;
}
ret = pinctrl_select_state(data->pinctrl, pinctrl_hsic_idle);
if (ret) {
dev_err(dev, "hsic_idle select failed, err=%d\n", ret);
- return ret;
+ goto err_put;
}
data->pinctrl_hsic_active = pinctrl_lookup_state(data->pinctrl,
@@ -417,7 +422,8 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
dev_err(dev,
"pinctrl_hsic_active lookup failed, err=%ld\n",
PTR_ERR(data->pinctrl_hsic_active));
- return PTR_ERR(data->pinctrl_hsic_active);
+ ret = PTR_ERR(data->pinctrl_hsic_active);
+ goto err_put;
}
}
@@ -527,6 +533,8 @@ disable_hsic_regulator:
if (pdata.flags & CI_HDRC_PMQOS)
cpu_latency_qos_remove_request(&data->pm_qos_req);
data->ci_pdev = NULL;
+err_put:
+ put_device(data->usbmisc_data->dev);
return ret;
}
@@ -551,6 +559,7 @@ static void ci_hdrc_imx_remove(struct platform_device *pdev)
if (data->hsic_pad_regulator)
regulator_disable(data->hsic_pad_regulator);
}
+ put_device(data->usbmisc_data->dev);
}
static void ci_hdrc_imx_shutdown(struct platform_device *pdev)
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index 5a2e43331064..ff1a941fd2ed 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -1337,11 +1337,12 @@ static int usblp_set_protocol(struct usblp *usblp, int protocol)
if (protocol < USBLP_FIRST_PROTOCOL || protocol > USBLP_LAST_PROTOCOL)
return -EINVAL;
+ alts = usblp->protocol[protocol].alt_setting;
+ if (alts < 0)
+ return -EINVAL;
+
/* Don't unnecessarily set the interface if there's a single alt. */
if (usblp->intf->num_altsetting > 1) {
- alts = usblp->protocol[protocol].alt_setting;
- if (alts < 0)
- return -EINVAL;
r = usb_set_interface(usblp->dev, usblp->ifnum, alts);
if (r < 0) {
printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n",
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 4b93c0bd1d4b..21ac9b464696 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2663,13 +2663,13 @@ int usb_new_device(struct usb_device *udev)
err = sysfs_create_link(&udev->dev.kobj,
&port_dev->dev.kobj, "port");
if (err)
- goto fail;
+ goto out_del_dev;
err = sysfs_create_link(&port_dev->dev.kobj,
&udev->dev.kobj, "device");
if (err) {
sysfs_remove_link(&udev->dev.kobj, "port");
- goto fail;
+ goto out_del_dev;
}
if (!test_and_set_bit(port1, hub->child_usage_bits))
@@ -2683,6 +2683,8 @@ int usb_new_device(struct usb_device *udev)
pm_runtime_put_sync_autosuspend(&udev->dev);
return err;
+out_del_dev:
+ device_del(&udev->dev);
fail:
usb_set_device_state(udev, USB_STATE_NOTATTACHED);
pm_runtime_disable(&udev->dev);
diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
index 45d7af00f8d1..e857e532b35a 100644
--- a/drivers/usb/core/port.c
+++ b/drivers/usb/core/port.c
@@ -453,10 +453,11 @@ static int usb_port_runtime_suspend(struct device *dev)
static void usb_port_shutdown(struct device *dev)
{
struct usb_port *port_dev = to_usb_port(dev);
+ struct usb_device *udev = port_dev->child;
- if (port_dev->child) {
- usb_disable_usb2_hardware_lpm(port_dev->child);
- usb_unlocked_disable_lpm(port_dev->child);
+ if (udev && !udev->port_is_suspended) {
+ usb_disable_usb2_hardware_lpm(udev);
+ usb_unlocked_disable_lpm(udev);
}
}
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index ee73789326bc..f11570c8ffd0 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -464,6 +464,7 @@
#define DWC3_DCTL_TRGTULST_SS_INACT (DWC3_DCTL_TRGTULST(6))
/* These apply for core versions 1.94a and later */
+#define DWC3_DCTL_NYET_THRES_MASK (0xf << 20)
#define DWC3_DCTL_NYET_THRES(n) (((n) & 0xf) << 20)
#define DWC3_DCTL_KEEP_CONNECT BIT(19)
diff --git a/drivers/usb/dwc3/dwc3-am62.c b/drivers/usb/dwc3/dwc3-am62.c
index 5e3d1741701f..7d43da5f2897 100644
--- a/drivers/usb/dwc3/dwc3-am62.c
+++ b/drivers/usb/dwc3/dwc3-am62.c
@@ -309,6 +309,7 @@ static void dwc3_ti_remove(struct platform_device *pdev)
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
+ pm_runtime_dont_use_autosuspend(dev);
pm_runtime_set_suspended(dev);
}
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 83dc7304d701..31a654c6f15b 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -4195,8 +4195,10 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
WARN_ONCE(DWC3_VER_IS_PRIOR(DWC3, 240A) && dwc->has_lpm_erratum,
"LPM Erratum not available on dwc3 revisions < 2.40a\n");
- if (dwc->has_lpm_erratum && !DWC3_VER_IS_PRIOR(DWC3, 240A))
+ if (dwc->has_lpm_erratum && !DWC3_VER_IS_PRIOR(DWC3, 240A)) {
+ reg &= ~DWC3_DCTL_NYET_THRES_MASK;
reg |= DWC3_DCTL_NYET_THRES(dwc->lpm_nyet_threshold);
+ }
dwc3_gadget_dctl_write_safe(dwc, reg);
} else {
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 566ff0b1282a..76521555e3c1 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -211,6 +211,8 @@ config USB_F_MIDI
config USB_F_MIDI2
tristate
+ select SND_UMP
+ select SND_UMP_LEGACY_RAWMIDI
config USB_F_HID
tristate
@@ -445,8 +447,6 @@ config USB_CONFIGFS_F_MIDI2
depends on USB_CONFIGFS
depends on SND
select USB_LIBCOMPOSITE
- select SND_UMP
- select SND_UMP_LEGACY_RAWMIDI
select USB_F_MIDI2
help
The MIDI 2.0 function driver provides the generic emulated
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 6499a88d346c..fba2a56dae97 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -827,11 +827,15 @@ static ssize_t gadget_string_s_store(struct config_item *item, const char *page,
{
struct gadget_string *string = to_gadget_string(item);
int size = min(sizeof(string->string), len + 1);
+ ssize_t cpy_len;
if (len > USB_MAX_STRING_LEN)
return -EINVAL;
- return strscpy(string->string, page, size);
+ cpy_len = strscpy(string->string, page, size);
+ if (cpy_len > 0 && string->string[cpy_len - 1] == '\n')
+ string->string[cpy_len - 1] = 0;
+ return len;
}
CONFIGFS_ATTR(gadget_string_, s);
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index ad79eb0f729b..2dea9e42a0f8 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -2285,7 +2285,7 @@ static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev)
struct usb_gadget_strings **lang;
int first_id;
- if (WARN_ON(ffs->state != FFS_ACTIVE
+ if ((ffs->state != FFS_ACTIVE
|| test_and_set_bit(FFS_FL_BOUND, &ffs->flags)))
return -EBADFD;
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index ce5b77f89190..9b324821c93b 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -1185,6 +1185,7 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
uac2->as_in_alt = 0;
}
+ std_ac_if_desc.bNumEndpoints = 0;
if (FUOUT_EN(uac2_opts) || FUIN_EN(uac2_opts)) {
uac2->int_ep = usb_ep_autoconfig(gadget, &fs_ep_int_desc);
if (!uac2->int_ep) {
diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
index 53d9fc41acc5..bc143a86c2dd 100644
--- a/drivers/usb/gadget/function/u_serial.c
+++ b/drivers/usb/gadget/function/u_serial.c
@@ -1420,6 +1420,10 @@ void gserial_disconnect(struct gserial *gser)
/* REVISIT as above: how best to track this? */
port->port_line_coding = gser->port_line_coding;
+ /* disable endpoints, aborting down any active I/O */
+ usb_ep_disable(gser->out);
+ usb_ep_disable(gser->in);
+
port->port_usb = NULL;
gser->ioport = NULL;
if (port->port.count > 0) {
@@ -1431,10 +1435,6 @@ void gserial_disconnect(struct gserial *gser)
spin_unlock(&port->port_lock);
spin_unlock_irqrestore(&serial_port_lock, flags);
- /* disable endpoints, aborting down any active I/O */
- usb_ep_disable(gser->out);
- usb_ep_disable(gser->in);
-
/* finally, free any unused/unusable I/O buffers */
spin_lock_irqsave(&port->port_lock, flags);
if (port->port.count == 0)
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index e6c9006bd568..db109b570c5c 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -290,7 +290,8 @@ int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const s
hcd->tpl_support = of_usb_host_tpl_support(sysdev->of_node);
- if (priv && (priv->quirks & XHCI_SKIP_PHY_INIT))
+ if ((priv && (priv->quirks & XHCI_SKIP_PHY_INIT)) ||
+ (xhci->quirks & XHCI_SKIP_PHY_INIT))
hcd->skip_phy_initialization = 1;
if (priv && (priv->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK))
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index c24101f0a07a..9960ac2b10b7 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -223,6 +223,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
{ USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
{ USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
+ { USB_DEVICE(0x1B93, 0x1013) }, /* Phoenix Contact UPS Device */
{ USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */
{ USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
{ USB_DEVICE(0x1D6F, 0x0010) }, /* Seluxit ApS RF Dongle */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 64317b390d22..1e2ae0c6c41c 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -621,7 +621,7 @@ static void option_instat_callback(struct urb *urb);
/* MeiG Smart Technology products */
#define MEIGSMART_VENDOR_ID 0x2dee
-/* MeiG Smart SRM825L based on Qualcomm 315 */
+/* MeiG Smart SRM815/SRM825L based on Qualcomm 315 */
#define MEIGSMART_PRODUCT_SRM825L 0x4d22
/* MeiG Smart SLM320 based on UNISOC UIS8910 */
#define MEIGSMART_PRODUCT_SLM320 0x4d41
@@ -2405,6 +2405,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, LUAT_PRODUCT_AIR720U, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SLM320, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SLM770A, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0xff, 0x60) },
@@ -2412,6 +2413,7 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(1) },
{ USB_DEVICE_INTERFACE_CLASS(0x1bbb, 0x0640, 0xff), /* TCL IK512 ECM */
.driver_info = NCTRL(3) },
+ { USB_DEVICE_INTERFACE_CLASS(0x2949, 0x8700, 0xff) }, /* Neoway N723-EA */
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 8c8b5e6041cc..dc98ceecb724 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -592,12 +592,9 @@ static ssize_t max_sectors_store(struct device *dev, struct device_attribute *at
if (sscanf(buf, "%hu", &ms) <= 0)
return -EINVAL;
- blk_mq_freeze_queue(sdev->request_queue);
lim = queue_limits_start_update(sdev->request_queue);
lim.max_hw_sectors = ms;
- ret = queue_limits_commit_update(sdev->request_queue, &lim);
- blk_mq_unfreeze_queue(sdev->request_queue);
-
+ ret = queue_limits_commit_update_frozen(sdev->request_queue, &lim);
if (ret)
return ret;
return count;
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index e5ad23d86833..54f0b1c83317 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -255,6 +255,13 @@ UNUSUAL_DEV( 0x0421, 0x06aa, 0x1110, 0x1110,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_MAX_SECTORS_64 ),
+/* Added by Lubomir Rintel <lkundrak@v3.sk>, a very fine chap */
+UNUSUAL_DEV( 0x0421, 0x06c2, 0x0000, 0x0406,
+ "Nokia",
+ "Nokia 208",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_MAX_SECTORS_64 ),
+
#ifdef NO_SDDR09
UNUSUAL_DEV( 0x0436, 0x0005, 0x0100, 0x0100,
"Microtech",
diff --git a/drivers/usb/typec/tcpm/maxim_contaminant.c b/drivers/usb/typec/tcpm/maxim_contaminant.c
index 22163d8f9eb0..0cdda06592fd 100644
--- a/drivers/usb/typec/tcpm/maxim_contaminant.c
+++ b/drivers/usb/typec/tcpm/maxim_contaminant.c
@@ -135,7 +135,7 @@ static int max_contaminant_read_resistance_kohm(struct max_tcpci_chip *chip,
mv = max_contaminant_read_adc_mv(chip, channel, sleep_msec, raw, true);
if (mv < 0)
- return ret;
+ return mv;
/* OVP enable */
ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCOVPDIS, 0);
@@ -157,7 +157,7 @@ static int max_contaminant_read_resistance_kohm(struct max_tcpci_chip *chip,
mv = max_contaminant_read_adc_mv(chip, channel, sleep_msec, raw, true);
if (mv < 0)
- return ret;
+ return mv;
/* Disable current source */
ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, SBURPCTRL, 0);
if (ret < 0)
diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
index ed32583829be..24a6a4354df8 100644
--- a/drivers/usb/typec/tcpm/tcpci.c
+++ b/drivers/usb/typec/tcpm/tcpci.c
@@ -700,7 +700,7 @@ static int tcpci_init(struct tcpc_dev *tcpc)
tcpci->alert_mask = reg;
- return tcpci_write16(tcpci, TCPC_ALERT_MASK, reg);
+ return 0;
}
irqreturn_t tcpci_irq(struct tcpci *tcpci)
@@ -923,22 +923,27 @@ static int tcpci_probe(struct i2c_client *client)
chip->data.set_orientation = err;
+ chip->tcpci = tcpci_register_port(&client->dev, &chip->data);
+ if (IS_ERR(chip->tcpci))
+ return PTR_ERR(chip->tcpci);
+
err = devm_request_threaded_irq(&client->dev, client->irq, NULL,
_tcpci_irq,
IRQF_SHARED | IRQF_ONESHOT,
dev_name(&client->dev), chip);
if (err < 0)
- return err;
+ goto unregister_port;
- /*
- * Disable irq while registering port. If irq is configured as an edge
- * irq this allow to keep track and process the irq as soon as it is enabled.
- */
- disable_irq(client->irq);
- chip->tcpci = tcpci_register_port(&client->dev, &chip->data);
- enable_irq(client->irq);
+ /* Enable chip interrupts at last */
+ err = tcpci_write16(chip->tcpci, TCPC_ALERT_MASK, chip->tcpci->alert_mask);
+ if (err < 0)
+ goto unregister_port;
- return PTR_ERR_OR_ZERO(chip->tcpci);
+ return 0;
+
+unregister_port:
+ tcpci_unregister_port(chip->tcpci);
+ return err;
}
static void tcpci_remove(struct i2c_client *client)
diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
index fcb8e61136cf..740171f24ef9 100644
--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
+++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
@@ -646,7 +646,7 @@ static int ucsi_ccg_sync_control(struct ucsi *ucsi, u64 command)
UCSI_CMD_CONNECTOR_MASK;
if (con_index == 0) {
ret = -EINVAL;
- goto unlock;
+ goto err_put;
}
con = &uc->ucsi->connector[con_index - 1];
ucsi_ccg_update_set_new_cam_cmd(uc, con, &command);
@@ -654,8 +654,8 @@ static int ucsi_ccg_sync_control(struct ucsi *ucsi, u64 command)
ret = ucsi_sync_control_common(ucsi, command);
+err_put:
pm_runtime_put_sync(uc->dev);
-unlock:
mutex_unlock(&uc->lock);
return ret;
diff --git a/drivers/usb/typec/ucsi/ucsi_glink.c b/drivers/usb/typec/ucsi/ucsi_glink.c
index 90948cd6d297..fed39d458090 100644
--- a/drivers/usb/typec/ucsi/ucsi_glink.c
+++ b/drivers/usb/typec/ucsi/ucsi_glink.c
@@ -185,6 +185,11 @@ static void pmic_glink_ucsi_connector_status(struct ucsi_connector *con)
struct pmic_glink_ucsi *ucsi = ucsi_get_drvdata(con->ucsi);
int orientation;
+ if (!UCSI_CONSTAT(con, CONNECTED)) {
+ typec_set_orientation(con->port, TYPEC_ORIENTATION_NONE);
+ return;
+ }
+
if (con->num > PMIC_GLINK_MAX_PORTS ||
!ucsi->port_orientation[con->num - 1])
return;
diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
index 1ab58da9f38a..1a4ed5a357d3 100644
--- a/drivers/vfio/pci/vfio_pci_core.c
+++ b/drivers/vfio/pci/vfio_pci_core.c
@@ -1661,14 +1661,15 @@ static vm_fault_t vfio_pci_mmap_huge_fault(struct vm_fault *vmf,
unsigned long pfn, pgoff = vmf->pgoff - vma->vm_pgoff;
vm_fault_t ret = VM_FAULT_SIGBUS;
- if (order && (vmf->address & ((PAGE_SIZE << order) - 1) ||
+ pfn = vma_to_pfn(vma) + pgoff;
+
+ if (order && (pfn & ((1 << order) - 1) ||
+ vmf->address & ((PAGE_SIZE << order) - 1) ||
vmf->address + (PAGE_SIZE << order) > vma->vm_end)) {
ret = VM_FAULT_FALLBACK;
goto out;
}
- pfn = vma_to_pfn(vma);
-
down_read(&vdev->memory_lock);
if (vdev->pm_runtime_engaged || !__vfio_pci_memory_enabled(vdev))
@@ -1676,18 +1677,18 @@ static vm_fault_t vfio_pci_mmap_huge_fault(struct vm_fault *vmf,
switch (order) {
case 0:
- ret = vmf_insert_pfn(vma, vmf->address, pfn + pgoff);
+ ret = vmf_insert_pfn(vma, vmf->address, pfn);
break;
#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
case PMD_ORDER:
- ret = vmf_insert_pfn_pmd(vmf, __pfn_to_pfn_t(pfn + pgoff,
- PFN_DEV), false);
+ ret = vmf_insert_pfn_pmd(vmf,
+ __pfn_to_pfn_t(pfn, PFN_DEV), false);
break;
#endif
#ifdef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP
case PUD_ORDER:
- ret = vmf_insert_pfn_pud(vmf, __pfn_to_pfn_t(pfn + pgoff,
- PFN_DEV), false);
+ ret = vmf_insert_pfn_pud(vmf,
+ __pfn_to_pfn_t(pfn, PFN_DEV), false);
break;
#endif
default:
diff --git a/drivers/virt/coco/arm-cca-guest/arm-cca-guest.c b/drivers/virt/coco/arm-cca-guest/arm-cca-guest.c
index 488153879ec9..87f162736b2e 100644
--- a/drivers/virt/coco/arm-cca-guest/arm-cca-guest.c
+++ b/drivers/virt/coco/arm-cca-guest/arm-cca-guest.c
@@ -6,6 +6,7 @@
#include <linux/arm-smccc.h>
#include <linux/cc_platform.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/smp.h>
#include <linux/tsm.h>
@@ -219,6 +220,13 @@ static void __exit arm_cca_guest_exit(void)
}
module_exit(arm_cca_guest_exit);
+/* modalias, so userspace can autoload this module when RSI is available */
+static const struct platform_device_id arm_cca_match[] __maybe_unused = {
+ { RSI_PDEV_NAME, 0},
+ { }
+};
+
+MODULE_DEVICE_TABLE(platform, arm_cca_match);
MODULE_AUTHOR("Sami Mujawar <sami.mujawar@arm.com>");
MODULE_DESCRIPTION("Arm CCA Guest TSM Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/virt/vboxguest/Kconfig b/drivers/virt/vboxguest/Kconfig
index cc329887bfae..11b153e7454e 100644
--- a/drivers/virt/vboxguest/Kconfig
+++ b/drivers/virt/vboxguest/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config VBOXGUEST
tristate "Virtual Box Guest integration support"
- depends on X86 && PCI && INPUT
+ depends on (ARM64 || X86) && PCI && INPUT
help
This is a driver for the Virtual Box Guest PCI device used in
Virtual Box virtual machines. Enabling this driver will add
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index b9095751e43b..b10ed9f5b543 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -377,6 +377,24 @@ static void virtio_dev_remove(struct device *_d)
of_node_put(dev->dev.of_node);
}
+/*
+ * virtio_irq_get_affinity - get IRQ affinity mask for device
+ * @_d: ptr to dev structure
+ * @irq_vec: interrupt vector number
+ *
+ * Return the CPU affinity mask for @_d and @irq_vec.
+ */
+static const struct cpumask *virtio_irq_get_affinity(struct device *_d,
+ unsigned int irq_vec)
+{
+ struct virtio_device *dev = dev_to_virtio(_d);
+
+ if (!dev->config->get_vq_affinity)
+ return NULL;
+
+ return dev->config->get_vq_affinity(dev, irq_vec);
+}
+
static const struct bus_type virtio_bus = {
.name = "virtio",
.match = virtio_dev_match,
@@ -384,6 +402,7 @@ static const struct bus_type virtio_bus = {
.uevent = virtio_uevent,
.probe = virtio_dev_probe,
.remove = virtio_dev_remove,
+ .irq_get_affinity = virtio_irq_get_affinity,
};
int __register_virtio_driver(struct virtio_driver *driver, struct module *owner)
diff --git a/drivers/watchdog/stm32_iwdg.c b/drivers/watchdog/stm32_iwdg.c
index d700e0d49bb9..8ad06b54c5ad 100644
--- a/drivers/watchdog/stm32_iwdg.c
+++ b/drivers/watchdog/stm32_iwdg.c
@@ -286,7 +286,7 @@ static int stm32_iwdg_irq_init(struct platform_device *pdev,
if (!wdt->data->has_early_wakeup)
return 0;
- irq = platform_get_irq(pdev, 0);
+ irq = platform_get_irq_optional(pdev, 0);
if (irq <= 0)
return 0;
diff --git a/drivers/zorro/zorro-sysfs.c b/drivers/zorro/zorro-sysfs.c
index 3d34dba9bb2d..10aedcd21363 100644
--- a/drivers/zorro/zorro-sysfs.c
+++ b/drivers/zorro/zorro-sysfs.c
@@ -81,7 +81,7 @@ static struct attribute *zorro_device_attrs[] = {
};
static ssize_t zorro_read_config(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct zorro_dev *z = to_zorro_dev(kobj_to_dev(kobj));
@@ -98,23 +98,23 @@ static ssize_t zorro_read_config(struct file *filp, struct kobject *kobj,
return memory_read_from_buffer(buf, count, &off, &cd, sizeof(cd));
}
-static struct bin_attribute zorro_config_attr = {
+static const struct bin_attribute zorro_config_attr = {
.attr = {
.name = "config",
.mode = S_IRUGO,
},
.size = sizeof(struct ConfigDev),
- .read = zorro_read_config,
+ .read_new = zorro_read_config,
};
-static struct bin_attribute *zorro_device_bin_attrs[] = {
+static const struct bin_attribute *const zorro_device_bin_attrs[] = {
&zorro_config_attr,
NULL
};
static const struct attribute_group zorro_device_attr_group = {
.attrs = zorro_device_attrs,
- .bin_attrs = zorro_device_bin_attrs,
+ .bin_attrs_new = zorro_device_bin_attrs,
};
const struct attribute_group *zorro_device_attribute_groups[] = {