summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/processor_idle.c3
-rw-r--r--drivers/acpi/scan.c13
-rw-r--r--drivers/atm/Kconfig54
-rw-r--r--drivers/atm/Makefile3
-rw-r--r--drivers/atm/firestream.c2057
-rw-r--r--drivers/atm/firestream.h502
-rw-r--r--drivers/atm/horizon.c2853
-rw-r--r--drivers/atm/horizon.h492
-rw-r--r--drivers/atm/nicstarmac.c5
-rw-r--r--drivers/atm/uPD98401.h293
-rw-r--r--drivers/atm/uPD98402.c266
-rw-r--r--drivers/atm/uPD98402.h107
-rw-r--r--drivers/atm/zatm.c1652
-rw-r--r--drivers/atm/zatm.h104
-rw-r--r--drivers/base/dd.c1
-rw-r--r--drivers/block/drbd/drbd_int.h8
-rw-r--r--drivers/block/drbd/drbd_main.c7
-rw-r--r--drivers/block/drbd/drbd_nl.c41
-rw-r--r--drivers/block/drbd/drbd_state.c18
-rw-r--r--drivers/block/drbd/drbd_state_change.h8
-rw-r--r--drivers/block/null_blk/main.c2
-rw-r--r--drivers/cdrom/cdrom.c3
-rw-r--r--drivers/char/random.c44
-rw-r--r--drivers/cxl/pci.c1
-rw-r--r--drivers/dma-buf/Makefile1
-rw-r--r--drivers/dma-buf/dma-fence-array.c32
-rw-r--r--drivers/dma-buf/selftests.h1
-rw-r--r--drivers/dma-buf/st-dma-fence-unwrap.c261
-rw-r--r--drivers/dma-buf/sync_file.c141
-rw-r--r--drivers/firmware/arm_scmi/clock.c5
-rw-r--r--drivers/firmware/arm_scmi/driver.c3
-rw-r--r--drivers/firmware/arm_scmi/optee.c8
-rw-r--r--drivers/gpio/gpio-sim.c4
-rw-r--r--drivers/gpio/gpiolib-acpi.c22
-rw-r--r--drivers/gpio/gpiolib.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ObjectID.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c71
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c14
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c24
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c10
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c107
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c65
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c34
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h3
-rw-r--r--drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c5
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c17
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c2
-rw-r--r--drivers/gpu/drm/drm_of.c99
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c2
-rw-r--r--drivers/gpu/drm/imx/dw_hdmi-imx.c8
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c2
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c80
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c34
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c3
-rw-r--r--drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c6
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.c20
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.h1
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h1
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9341.c4
-rw-r--r--drivers/gpu/ipu-v3/ipu-di.c5
-rw-r--r--drivers/i2c/busses/i2c-imx.c33
-rw-r--r--drivers/i2c/busses/i2c-ismt.c4
-rw-r--r--drivers/i2c/busses/i2c-pasemi-core.c6
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c6
-rw-r--r--drivers/i2c/i2c-dev.c17
-rw-r--r--drivers/infiniband/core/cm.c3
-rw-r--r--drivers/infiniband/core/device.c2
-rw-r--r--drivers/infiniband/core/uverbs_std_types_flow_action.c383
-rw-r--r--drivers/infiniband/hw/hfi1/mmu_rb.c6
-rw-r--r--drivers/infiniband/hw/mlx5/fs.c223
-rw-r--r--drivers/infiniband/hw/mlx5/main.c31
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c5
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c6
-rw-r--r--drivers/iommu/omap-iommu.c2
-rw-r--r--drivers/irqchip/Kconfig1
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c28
-rw-r--r--drivers/irqchip/irq-gic-v3.c14
-rw-r--r--drivers/irqchip/irq-gic.c6
-rw-r--r--drivers/irqchip/irq-qcom-mpm.c2
-rw-r--r--drivers/md/dm-integrity.c7
-rw-r--r--drivers/md/dm-ps-historical-service-time.c11
-rw-r--r--drivers/md/dm-zone.c49
-rw-r--r--drivers/md/dm.c18
-rw-r--r--drivers/media/platform/nxp/Kconfig1
-rw-r--r--drivers/media/platform/rockchip/rga/rga.c2
-rw-r--r--drivers/media/tuners/si2157.c22
-rw-r--r--drivers/memory/atmel-ebi.c23
-rw-r--r--drivers/memory/fsl_ifc.c3
-rw-r--r--drivers/memory/renesas-rpc-if.c10
-rw-r--r--drivers/message/fusion/mptbase.c4
-rw-r--r--drivers/misc/habanalabs/common/memory.c16
-rw-r--r--drivers/mmc/core/block.c48
-rw-r--r--drivers/mmc/core/core.c5
-rw-r--r--drivers/mmc/core/mmc_test.c3
-rw-r--r--drivers/mmc/host/mmci_stm32_sdmmc.c6
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c8
-rw-r--r--drivers/mmc/host/sdhci-xenon.c10
-rw-r--r--drivers/net/bonding/bond_main.c15
-rw-r--r--drivers/net/can/Kconfig1
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/ctucanfd/Kconfig34
-rw-r--r--drivers/net/can/ctucanfd/Makefile10
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd.h82
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_base.c1490
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_kframe.h77
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_kregs.h325
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_pci.c304
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_platform.c132
-rw-r--r--drivers/net/can/dev/bittiming.c2
-rw-r--r--drivers/net/can/dev/rx-offload.c6
-rw-r--r--drivers/net/can/flexcan/flexcan-core.c16
-rw-r--r--drivers/net/can/m_can/m_can.c2
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c2
-rw-r--r--drivers/net/can/sja1000/Kconfig2
-rw-r--r--drivers/net/can/sja1000/tscan1.c7
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c25
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c2
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd.h12
-rw-r--r--drivers/net/can/ti_hecc.c4
-rw-r--r--drivers/net/can/xilinx_can.c4
-rw-r--r--drivers/net/dsa/microchip/ksz8795.c35
-rw-r--r--drivers/net/dsa/microchip/ksz8795_reg.h3
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c131
-rw-r--r--drivers/net/dsa/microchip/ksz9477_reg.h4
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c136
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h10
-rw-r--r--drivers/net/dsa/mt7530.c332
-rw-r--r--drivers/net/dsa/mt7530.h26
-rw-r--r--drivers/net/dsa/ocelot/felix.c23
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c2
-rw-r--r--drivers/net/dsa/qca8k.c145
-rw-r--r--drivers/net/dsa/qca8k.h12
-rw-r--r--drivers/net/dsa/realtek/Kconfig30
-rw-r--r--drivers/net/dsa/realtek/realtek-mdio.c1
-rw-r--r--drivers/net/dsa/realtek/realtek-smi.c9
-rw-r--r--drivers/net/eql.c3
-rw-r--r--drivers/net/ethernet/Kconfig26
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c9
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c87
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c144
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c409
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.h21
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c47
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.h6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c10
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h8
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c4
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c7
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c32
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c22
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c2
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c3
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c10
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h17
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c141
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c8
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c310
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h54
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_devids.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c25
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c49
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.c9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c29
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c7
-rw-r--r--drivers/net/ethernet/intel/igc/igc_i225.c11
-rw-r--r--drivers/net/ethernet/intel/igc/igc_phy.c4
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c15
-rw-r--r--drivers/net/ethernet/marvell/Kconfig2
-rw-r--r--drivers/net/ethernet/marvell/Makefile1
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c20
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/Kconfig20
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/Makefile9
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c737
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_config.h204
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c245
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h170
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c194
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h299
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c463
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.c1176
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.h357
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h367
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_rx.c508
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_rx.h199
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_tx.c335
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_tx.h284
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_router.c11
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.c3
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c179
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h96
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.h38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c125
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h156
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c)95
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c245
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c63
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c71
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h86
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c)51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c247
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h132
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c390
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h91
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c1582
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h62
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c622
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h74
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c58
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h80
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.c681
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.h47
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c311
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_linecards.c1373
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c250
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h544
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c282
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h6
-rw-r--r--drivers/net/ethernet/microchip/lan966x/Makefile2
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c842
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_mac.c6
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c85
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.h121
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_port.c3
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c284
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_regs.h146
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c3
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c2
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vcap.c4
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c2
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/tls.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c43
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h26
-rw-r--r--drivers/net/ethernet/qlogic/qed/Makefile3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_nvmetcp_ip_services.c238
-rw-r--r--drivers/net/ethernet/realtek/atp.h4
-rw-r--r--drivers/net/ethernet/sfc/ef10.c2
-rw-r--r--drivers/net/ethernet/sfc/efx.h1
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c52
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.h4
-rw-r--r--drivers/net/ethernet/sfc/efx_common.c4
-rw-r--r--drivers/net/ethernet/sfc/farch.c1
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h4
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c18
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c24
-rw-r--r--drivers/net/ethernet/ti/Kconfig1
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-ethtool.c6
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c33
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-qos.c180
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-qos.h8
-rw-r--r--drivers/net/ethernet/ti/cpsw.c38
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c66
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h2
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c39
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c223
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h9
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c9
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c18
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c55
-rw-r--r--drivers/net/ethernet/xscale/ptp_ixp46x.c2
-rw-r--r--drivers/net/geneve.c10
-rw-r--r--drivers/net/hamradio/Kconfig34
-rw-r--r--drivers/net/hamradio/Makefile1
-rw-r--r--drivers/net/hamradio/dmascc.c1450
-rw-r--r--drivers/net/hippi/rrunner.c2
-rw-r--r--drivers/net/hyperv/hyperv_net.h69
-rw-r--r--drivers/net/hyperv/netvsc.c8
-rw-r--r--drivers/net/hyperv/netvsc_bpf.c95
-rw-r--r--drivers/net/hyperv/netvsc_drv.c150
-rw-r--r--drivers/net/ipa/ipa_endpoint.c9
-rw-r--r--drivers/net/macvlan.c8
-rw-r--r--drivers/net/mdio/fwnode_mdio.c5
-rw-r--r--drivers/net/mdio/mdio-mscc-miim.c29
-rw-r--r--drivers/net/netdevsim/fib.c9
-rw-r--r--drivers/net/phy/microchip_t1.c56
-rw-r--r--drivers/net/phy/phylink.c28
-rw-r--r--drivers/net/tun.c2
-rw-r--r--drivers/net/usb/cdc_ether.c3
-rw-r--r--drivers/net/usb/cdc_ncm.c8
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/usb/rndis_host.c47
-rw-r--r--drivers/net/usb/sr9800.h2
-rw-r--r--drivers/net/veth.c2
-rw-r--r--drivers/net/vxlan/vxlan_core.c4
-rw-r--r--drivers/net/wan/Kconfig44
-rw-r--r--drivers/net/wan/Makefile3
-rw-r--r--drivers/net/wan/cosa.c2052
-rw-r--r--drivers/net/wan/cosa.h104
-rw-r--r--drivers/net/wan/hostess_sv11.c336
-rw-r--r--drivers/net/wan/sealevel.c352
-rw-r--r--drivers/net/wan/z85230.c1641
-rw-r--r--drivers/net/wan/z85230.h407
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c22
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c33
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c2
-rw-r--r--drivers/net/wwan/wwan_hwsim.c22
-rw-r--r--drivers/nvme/host/core.c27
-rw-r--r--drivers/nvme/host/nvme.h5
-rw-r--r--drivers/nvme/host/pci.c9
-rw-r--r--drivers/perf/Kconfig2
-rw-r--r--drivers/perf/fsl_imx8_ddr_perf.c2
-rw-r--r--drivers/perf/qcom_l2_pmu.c6
-rw-r--r--drivers/platform/x86/acerhdf.c21
-rw-r--r--drivers/platform/x86/amd-pmc.c14
-rw-r--r--drivers/platform/x86/barco-p50-gpio.c1
-rw-r--r--drivers/platform/x86/samsung-laptop.c2
-rw-r--r--drivers/platform/x86/think-lmi.c44
-rw-r--r--drivers/platform/x86/think-lmi.h1
-rw-r--r--drivers/power/supply/power_supply_core.c6
-rw-r--r--drivers/power/supply/samsung-sdi-battery.c2
-rw-r--r--drivers/regulator/atc260x-regulator.c1
-rw-r--r--drivers/regulator/rtq2134-regulator.c1
-rw-r--r--drivers/regulator/wm8994-regulator.c42
-rw-r--r--drivers/reset/reset-rzg2l-usbphy-ctrl.c4
-rw-r--r--drivers/reset/tegra/reset-bpmp.c9
-rw-r--r--drivers/scsi/aha152x.c235
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.h2
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_pci.c6
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.h2
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_pci.c4
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c6
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c6
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c2
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c6
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c1
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c2
-rw-r--r--drivers/scsi/isci/host.c6
-rw-r--r--drivers/scsi/libiscsi.c28
-rw-r--r--drivers/scsi/libiscsi_tcp.c2
-rw-r--r--drivers/scsi/lpfc/lpfc.h7
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c120
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c88
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c27
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c12
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c75
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h3
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c7
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c3
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c9
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c5
-rw-r--r--drivers/scsi/mvsas/mv_init.c1
-rw-r--r--drivers/scsi/pcmcia/sym53c500_cs.c52
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c33
-rw-r--r--drivers/scsi/pmcraid.c491
-rw-r--r--drivers/scsi/pmcraid.h33
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c69
-rw-r--r--drivers/scsi/scsi_debug.c205
-rw-r--r--drivers/scsi/scsi_logging.c2
-rw-r--r--drivers/scsi/scsi_scan.c5
-rw-r--r--drivers/scsi/scsi_sysfs.c4
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c239
-rw-r--r--drivers/scsi/sd.c3
-rw-r--r--drivers/scsi/sr.c2
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c15
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c17
-rw-r--r--drivers/scsi/ufs/ufshcd.h2
-rw-r--r--drivers/scsi/ufs/ufshpb.c11
-rw-r--r--drivers/scsi/virtio_scsi.c8
-rw-r--r--drivers/scsi/zorro7xx.c2
-rw-r--r--drivers/spi/atmel-quadspi.c3
-rw-r--r--drivers/spi/spi-bcm-qspi.c4
-rw-r--r--drivers/spi/spi-cadence-quadspi.c65
-rw-r--r--drivers/spi/spi-intel-pci.c1
-rw-r--r--drivers/spi/spi-mtk-nor.c12
-rw-r--r--drivers/spi/spi-mxic.c1
-rw-r--r--drivers/spi/spi-rpc-if.c8
-rw-r--r--drivers/spi/spi.c7
-rw-r--r--drivers/staging/r8188eu/core/rtw_br_ext.c2
-rw-r--r--drivers/target/target_core_user.c3
-rw-r--r--drivers/tty/serial/mpc52xx_uart.c20
-rw-r--r--drivers/vfio/pci/vfio_pci_core.c124
-rw-r--r--drivers/video/fbdev/core/fbmem.c9
-rw-r--r--drivers/xen/balloon.c54
-rw-r--r--drivers/xen/unpopulated-alloc.c33
505 files changed, 18551 insertions, 23857 deletions
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 32b20efff5f8..4556c86c3465 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -570,8 +570,7 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
{
struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
- if (cx->type == ACPI_STATE_C3)
- ACPI_FLUSH_CPU_CACHE();
+ ACPI_FLUSH_CPU_CACHE();
while (1) {
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 9efbfe087de7..762b61f67e6c 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -588,19 +588,6 @@ static struct acpi_device *handle_to_device(acpi_handle handle,
return adev;
}
-int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device)
-{
- if (!device)
- return -EINVAL;
-
- *device = handle_to_device(handle, NULL);
- if (!*device)
- return -ENODEV;
-
- return 0;
-}
-EXPORT_SYMBOL(acpi_bus_get_device);
-
/**
* acpi_fetch_acpi_dev - Retrieve ACPI device object.
* @handle: ACPI handle associated with the requested ACPI device object.
diff --git a/drivers/atm/Kconfig b/drivers/atm/Kconfig
index 7be08e24955c..63cdb46a3439 100644
--- a/drivers/atm/Kconfig
+++ b/drivers/atm/Kconfig
@@ -146,36 +146,6 @@ config ATM_ENI_BURST_RX_2W
try this if you have disabled 4W and 8W bursts. Enabling 2W if 4W or
8W are also set may or may not improve throughput.
-config ATM_FIRESTREAM
- tristate "Fujitsu FireStream (FS50/FS155) "
- depends on PCI && VIRT_TO_BUS
- help
- Driver for the Fujitsu FireStream 155 (MB86697) and
- FireStream 50 (MB86695) ATM PCI chips.
-
- To compile this driver as a module, choose M here: the module will
- be called firestream.
-
-config ATM_ZATM
- tristate "ZeitNet ZN1221/ZN1225"
- depends on PCI && VIRT_TO_BUS
- help
- Driver for the ZeitNet ZN1221 (MMF) and ZN1225 (UTP-5) 155 Mbps ATM
- adapters.
-
- To compile this driver as a module, choose M here: the module will
- be called zatm.
-
-config ATM_ZATM_DEBUG
- bool "Enable extended debugging"
- depends on ATM_ZATM
- help
- Extended debugging records various events and displays that list
- when an inconsistency is detected. This mechanism is faster than
- generally using printks, but still has some impact on performance.
- Note that extended debugging may create certain race conditions
- itself. Enable this ONLY if you suspect problems with the driver.
-
config ATM_NICSTAR
tristate "IDT 77201 (NICStAR) (ForeRunnerLE)"
depends on PCI
@@ -244,30 +214,6 @@ config ATM_IDT77252_USE_SUNI
depends on ATM_IDT77252
default y
-config ATM_HORIZON
- tristate "Madge Horizon [Ultra] (Collage PCI 25 and Collage PCI 155 Client)"
- depends on PCI && VIRT_TO_BUS
- help
- This is a driver for the Horizon chipset ATM adapter cards once
- produced by Madge Networks Ltd. Say Y (or M to compile as a module
- named horizon) here if you have one of these cards.
-
-config ATM_HORIZON_DEBUG
- bool "Enable debugging messages"
- depends on ATM_HORIZON
- help
- Somewhat useful debugging messages are available. The choice of
- messages is controlled by a bitmap. This may be specified as a
- module argument (kernel command line argument as well?), changed
- dynamically using an ioctl (not yet) or changed by sending the
- string "Dxxxx" to VCI 1023 (where x is a hex digit). See the file
- <file:drivers/atm/horizon.h> for the meanings of the bits in the
- mask.
-
- When active, these messages can have a significant impact on the
- speed of the driver, and the size of your syslog files! When
- inactive, they will have only a modest impact on performance.
-
config ATM_IA
tristate "Interphase ATM PCI x575/x525/x531"
depends on PCI
diff --git a/drivers/atm/Makefile b/drivers/atm/Makefile
index 99ecbc280643..c9eade92019b 100644
--- a/drivers/atm/Makefile
+++ b/drivers/atm/Makefile
@@ -5,9 +5,7 @@
fore_200e-y := fore200e.o
-obj-$(CONFIG_ATM_ZATM) += zatm.o uPD98402.o
obj-$(CONFIG_ATM_NICSTAR) += nicstar.o
-obj-$(CONFIG_ATM_HORIZON) += horizon.o
obj-$(CONFIG_ATM_IA) += iphase.o suni.o
obj-$(CONFIG_ATM_FORE200E) += fore_200e.o
obj-$(CONFIG_ATM_ENI) += eni.o suni.o
@@ -26,7 +24,6 @@ endif
obj-$(CONFIG_ATM_DUMMY) += adummy.o
obj-$(CONFIG_ATM_TCP) += atmtcp.o
-obj-$(CONFIG_ATM_FIRESTREAM) += firestream.o
obj-$(CONFIG_ATM_LANAI) += lanai.o
obj-$(CONFIG_ATM_HE) += he.o
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
deleted file mode 100644
index 4f67404fe64c..000000000000
--- a/drivers/atm/firestream.c
+++ /dev/null
@@ -1,2057 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-/* drivers/atm/firestream.c - FireStream 155 (MB86697) and
- * FireStream 50 (MB86695) device driver
- */
-
-/* Written & (C) 2000 by R.E.Wolff@BitWizard.nl
- * Copied snippets from zatm.c by Werner Almesberger, EPFL LRC/ICA
- * and ambassador.c Copyright (C) 1995-1999 Madge Networks Ltd
- */
-
-/*
-*/
-
-
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/pci.h>
-#include <linux/poison.h>
-#include <linux/errno.h>
-#include <linux/atm.h>
-#include <linux/atmdev.h>
-#include <linux/sonet.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/delay.h>
-#include <linux/ioport.h> /* for request_region */
-#include <linux/uio.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/capability.h>
-#include <linux/bitops.h>
-#include <linux/slab.h>
-#include <asm/byteorder.h>
-#include <asm/string.h>
-#include <asm/io.h>
-#include <linux/atomic.h>
-#include <linux/uaccess.h>
-#include <linux/wait.h>
-
-#include "firestream.h"
-
-static int loopback = 0;
-static int num=0x5a;
-
-/* According to measurements (but they look suspicious to me!) done in
- * '97, 37% of the packets are one cell in size. So it pays to have
- * buffers allocated at that size. A large jump in percentage of
- * packets occurs at packets around 536 bytes in length. So it also
- * pays to have those pre-allocated. Unfortunately, we can't fully
- * take advantage of this as the majority of the packets is likely to
- * be TCP/IP (As where obviously the measurement comes from) There the
- * link would be opened with say a 1500 byte MTU, and we can't handle
- * smaller buffers more efficiently than the larger ones. -- REW
- */
-
-/* Due to the way Linux memory management works, specifying "576" as
- * an allocation size here isn't going to help. They are allocated
- * from 1024-byte regions anyway. With the size of the sk_buffs (quite
- * large), it doesn't pay to allocate the smallest size (64) -- REW */
-
-/* This is all guesswork. Hard numbers to back this up or disprove this,
- * are appreciated. -- REW */
-
-/* The last entry should be about 64k. However, the "buffer size" is
- * passed to the chip in a 16 bit field. I don't know how "65536"
- * would be interpreted. -- REW */
-
-#define NP FS_NR_FREE_POOLS
-static int rx_buf_sizes[NP] = {128, 256, 512, 1024, 2048, 4096, 16384, 65520};
-/* log2: 7 8 9 10 11 12 14 16 */
-
-#if 0
-static int rx_pool_sizes[NP] = {1024, 1024, 512, 256, 128, 64, 32, 32};
-#else
-/* debug */
-static int rx_pool_sizes[NP] = {128, 128, 128, 64, 64, 64, 32, 32};
-#endif
-/* log2: 10 10 9 8 7 6 5 5 */
-/* sumlog2: 17 18 18 18 18 18 19 21 */
-/* mem allocated: 128k 256k 256k 256k 256k 256k 512k 2M */
-/* tot mem: almost 4M */
-
-/* NP is shorter, so that it fits on a single line. */
-#undef NP
-
-
-/* Small hardware gotcha:
-
- The FS50 CAM (VP/VC match registers) always take the lowest channel
- number that matches. This is not a problem.
-
- However, they also ignore whether the channel is enabled or
- not. This means that if you allocate channel 0 to 1.2 and then
- channel 1 to 0.0, then disabeling channel 0 and writing 0 to the
- match channel for channel 0 will "steal" the traffic from channel
- 1, even if you correctly disable channel 0.
-
- Workaround:
-
- - When disabling channels, write an invalid VP/VC value to the
- match register. (We use 0xffffffff, which in the worst case
- matches VP/VC = <maxVP>/<maxVC>, but I expect it not to match
- anything as some "when not in use, program to 0" bits are now
- programmed to 1...)
-
- - Don't initialize the match registers to 0, as 0.0 is a valid
- channel.
-*/
-
-
-/* Optimization hints and tips.
-
- The FireStream chips are very capable of reducing the amount of
- "interrupt-traffic" for the CPU. This driver requests an interrupt on EVERY
- action. You could try to minimize this a bit.
-
- Besides that, the userspace->kernel copy and the PCI bus are the
- performance limiting issues for this driver.
-
- You could queue up a bunch of outgoing packets without telling the
- FireStream. I'm not sure that's going to win you much though. The
- Linux layer won't tell us in advance when it's not going to give us
- any more packets in a while. So this is tricky to implement right without
- introducing extra delays.
-
- -- REW
- */
-
-
-
-
-/* The strings that define what the RX queue entry is all about. */
-/* Fujitsu: Please tell me which ones can have a pointer to a
- freepool descriptor! */
-static char *res_strings[] = {
- "RX OK: streaming not EOP",
- "RX OK: streaming EOP",
- "RX OK: Single buffer packet",
- "RX OK: packet mode",
- "RX OK: F4 OAM (end to end)",
- "RX OK: F4 OAM (Segment)",
- "RX OK: F5 OAM (end to end)",
- "RX OK: F5 OAM (Segment)",
- "RX OK: RM cell",
- "RX OK: TRANSP cell",
- "RX OK: TRANSPC cell",
- "Unmatched cell",
- "reserved 12",
- "reserved 13",
- "reserved 14",
- "Unrecognized cell",
- "reserved 16",
- "reassembly abort: AAL5 abort",
- "packet purged",
- "packet ageing timeout",
- "channel ageing timeout",
- "calculated length error",
- "programmed length limit error",
- "aal5 crc32 error",
- "oam transp or transpc crc10 error",
- "reserved 25",
- "reserved 26",
- "reserved 27",
- "reserved 28",
- "reserved 29",
- "reserved 30", /* FIXME: The strings between 30-40 might be wrong. */
- "reassembly abort: no buffers",
- "receive buffer overflow",
- "change in GFC",
- "receive buffer full",
- "low priority discard - no receive descriptor",
- "low priority discard - missing end of packet",
- "reserved 37",
- "reserved 38",
- "reserved 39",
- "reserved 40",
- "reserved 41",
- "reserved 42",
- "reserved 43",
- "reserved 44",
- "reserved 45",
- "reserved 46",
- "reserved 47",
- "reserved 48",
- "reserved 49",
- "reserved 50",
- "reserved 51",
- "reserved 52",
- "reserved 53",
- "reserved 54",
- "reserved 55",
- "reserved 56",
- "reserved 57",
- "reserved 58",
- "reserved 59",
- "reserved 60",
- "reserved 61",
- "reserved 62",
- "reserved 63",
-};
-
-static char *irq_bitname[] = {
- "LPCO",
- "DPCO",
- "RBRQ0_W",
- "RBRQ1_W",
- "RBRQ2_W",
- "RBRQ3_W",
- "RBRQ0_NF",
- "RBRQ1_NF",
- "RBRQ2_NF",
- "RBRQ3_NF",
- "BFP_SC",
- "INIT",
- "INIT_ERR",
- "USCEO",
- "UPEC0",
- "VPFCO",
- "CRCCO",
- "HECO",
- "TBRQ_W",
- "TBRQ_NF",
- "CTPQ_E",
- "GFC_C0",
- "PCI_FTL",
- "CSQ_W",
- "CSQ_NF",
- "EXT_INT",
- "RXDMA_S"
-};
-
-
-#define PHY_EOF -1
-#define PHY_CLEARALL -2
-
-struct reginit_item {
- int reg, val;
-};
-
-
-static struct reginit_item PHY_NTC_INIT[] = {
- { PHY_CLEARALL, 0x40 },
- { 0x12, 0x0001 },
- { 0x13, 0x7605 },
- { 0x1A, 0x0001 },
- { 0x1B, 0x0005 },
- { 0x38, 0x0003 },
- { 0x39, 0x0006 }, /* changed here to make loopback */
- { 0x01, 0x5262 },
- { 0x15, 0x0213 },
- { 0x00, 0x0003 },
- { PHY_EOF, 0}, /* -1 signals end of list */
-};
-
-
-/* Safetyfeature: If the card interrupts more than this number of times
- in a jiffy (1/100th of a second) then we just disable the interrupt and
- print a message. This prevents the system from hanging.
-
- 150000 packets per second is close to the limit a PC is going to have
- anyway. We therefore have to disable this for production. -- REW */
-#undef IRQ_RATE_LIMIT // 100
-
-/* Interrupts work now. Unlike serial cards, ATM cards don't work all
- that great without interrupts. -- REW */
-#undef FS_POLL_FREQ // 100
-
-/*
- This driver can spew a whole lot of debugging output at you. If you
- need maximum performance, you should disable the DEBUG define. To
- aid in debugging in the field, I'm leaving the compile-time debug
- features enabled, and disable them "runtime". That allows me to
- instruct people with problems to enable debugging without requiring
- them to recompile... -- REW
-*/
-#define DEBUG
-
-#ifdef DEBUG
-#define fs_dprintk(f, str...) if (fs_debug & f) printk (str)
-#else
-#define fs_dprintk(f, str...) /* nothing */
-#endif
-
-
-static int fs_keystream = 0;
-
-#ifdef DEBUG
-/* I didn't forget to set this to zero before shipping. Hit me with a stick
- if you get this with the debug default not set to zero again. -- REW */
-static int fs_debug = 0;
-#else
-#define fs_debug 0
-#endif
-
-#ifdef MODULE
-#ifdef DEBUG
-module_param(fs_debug, int, 0644);
-#endif
-module_param(loopback, int, 0);
-module_param(num, int, 0);
-module_param(fs_keystream, int, 0);
-/* XXX Add rx_buf_sizes, and rx_pool_sizes As per request Amar. -- REW */
-#endif
-
-
-#define FS_DEBUG_FLOW 0x00000001
-#define FS_DEBUG_OPEN 0x00000002
-#define FS_DEBUG_QUEUE 0x00000004
-#define FS_DEBUG_IRQ 0x00000008
-#define FS_DEBUG_INIT 0x00000010
-#define FS_DEBUG_SEND 0x00000020
-#define FS_DEBUG_PHY 0x00000040
-#define FS_DEBUG_CLEANUP 0x00000080
-#define FS_DEBUG_QOS 0x00000100
-#define FS_DEBUG_TXQ 0x00000200
-#define FS_DEBUG_ALLOC 0x00000400
-#define FS_DEBUG_TXMEM 0x00000800
-#define FS_DEBUG_QSIZE 0x00001000
-
-
-#define func_enter() fs_dprintk(FS_DEBUG_FLOW, "fs: enter %s\n", __func__)
-#define func_exit() fs_dprintk(FS_DEBUG_FLOW, "fs: exit %s\n", __func__)
-
-
-static struct fs_dev *fs_boards = NULL;
-
-#ifdef DEBUG
-
-static void my_hd (void *addr, int len)
-{
- int j, ch;
- unsigned char *ptr = addr;
-
- while (len > 0) {
- printk ("%p ", ptr);
- for (j=0;j < ((len < 16)?len:16);j++) {
- printk ("%02x %s", ptr[j], (j==7)?" ":"");
- }
- for ( ;j < 16;j++) {
- printk (" %s", (j==7)?" ":"");
- }
- for (j=0;j < ((len < 16)?len:16);j++) {
- ch = ptr[j];
- printk ("%c", (ch < 0x20)?'.':((ch > 0x7f)?'.':ch));
- }
- printk ("\n");
- ptr += 16;
- len -= 16;
- }
-}
-#else /* DEBUG */
-static void my_hd (void *addr, int len){}
-#endif /* DEBUG */
-
-/********** free an skb (as per ATM device driver documentation) **********/
-
-/* Hmm. If this is ATM specific, why isn't there an ATM routine for this?
- * I copied it over from the ambassador driver. -- REW */
-
-static inline void fs_kfree_skb (struct sk_buff * skb)
-{
- if (ATM_SKB(skb)->vcc->pop)
- ATM_SKB(skb)->vcc->pop (ATM_SKB(skb)->vcc, skb);
- else
- dev_kfree_skb_any (skb);
-}
-
-
-
-
-/* It seems the ATM forum recommends this horribly complicated 16bit
- * floating point format. Turns out the Ambassador uses the exact same
- * encoding. I just copied it over. If Mitch agrees, I'll move it over
- * to the atm_misc file or something like that. (and remove it from
- * here and the ambassador driver) -- REW
- */
-
-/* The good thing about this format is that it is monotonic. So,
- a conversion routine need not be very complicated. To be able to
- round "nearest" we need to take along a few extra bits. Lets
- put these after 16 bits, so that we can just return the top 16
- bits of the 32bit number as the result:
-
- int mr (unsigned int rate, int r)
- {
- int e = 16+9;
- static int round[4]={0, 0, 0xffff, 0x8000};
- if (!rate) return 0;
- while (rate & 0xfc000000) {
- rate >>= 1;
- e++;
- }
- while (! (rate & 0xfe000000)) {
- rate <<= 1;
- e--;
- }
-
-// Now the mantissa is in positions bit 16-25. Excepf for the "hidden 1" that's in bit 26.
- rate &= ~0x02000000;
-// Next add in the exponent
- rate |= e << (16+9);
-// And perform the rounding:
- return (rate + round[r]) >> 16;
- }
-
- 14 lines-of-code. Compare that with the 120 that the Ambassador
- guys needed. (would be 8 lines shorter if I'd try to really reduce
- the number of lines:
-
- int mr (unsigned int rate, int r)
- {
- int e = 16+9;
- static int round[4]={0, 0, 0xffff, 0x8000};
- if (!rate) return 0;
- for (; rate & 0xfc000000 ;rate >>= 1, e++);
- for (;!(rate & 0xfe000000);rate <<= 1, e--);
- return ((rate & ~0x02000000) | (e << (16+9)) + round[r]) >> 16;
- }
-
- Exercise for the reader: Remove one more line-of-code, without
- cheating. (Just joining two lines is cheating). (I know it's
- possible, don't think you've beat me if you found it... If you
- manage to lose two lines or more, keep me updated! ;-)
-
- -- REW */
-
-
-#define ROUND_UP 1
-#define ROUND_DOWN 2
-#define ROUND_NEAREST 3
-/********** make rate (not quite as much fun as Horizon) **********/
-
-static int make_rate(unsigned int rate, int r,
- u16 *bits, unsigned int *actual)
-{
- unsigned char exp = -1; /* hush gcc */
- unsigned int man = -1; /* hush gcc */
-
- fs_dprintk (FS_DEBUG_QOS, "make_rate %u", rate);
-
- /* rates in cells per second, ITU format (nasty 16-bit floating-point)
- given 5-bit e and 9-bit m:
- rate = EITHER (1+m/2^9)*2^e OR 0
- bits = EITHER 1<<14 | e<<9 | m OR 0
- (bit 15 is "reserved", bit 14 "non-zero")
- smallest rate is 0 (special representation)
- largest rate is (1+511/512)*2^31 = 4290772992 (< 2^32-1)
- smallest non-zero rate is (1+0/512)*2^0 = 1 (> 0)
- simple algorithm:
- find position of top bit, this gives e
- remove top bit and shift (rounding if feeling clever) by 9-e
- */
- /* Ambassador ucode bug: please don't set bit 14! so 0 rate not
- representable. // This should move into the ambassador driver
- when properly merged. -- REW */
-
- if (rate > 0xffc00000U) {
- /* larger than largest representable rate */
-
- if (r == ROUND_UP) {
- return -EINVAL;
- } else {
- exp = 31;
- man = 511;
- }
-
- } else if (rate) {
- /* representable rate */
-
- exp = 31;
- man = rate;
-
- /* invariant: rate = man*2^(exp-31) */
- while (!(man & (1<<31))) {
- exp = exp - 1;
- man = man<<1;
- }
-
- /* man has top bit set
- rate = (2^31+(man-2^31))*2^(exp-31)
- rate = (1+(man-2^31)/2^31)*2^exp
- */
- man = man<<1;
- man &= 0xffffffffU; /* a nop on 32-bit systems */
- /* rate = (1+man/2^32)*2^exp
-
- exp is in the range 0 to 31, man is in the range 0 to 2^32-1
- time to lose significance... we want m in the range 0 to 2^9-1
- rounding presents a minor problem... we first decide which way
- we are rounding (based on given rounding direction and possibly
- the bits of the mantissa that are to be discarded).
- */
-
- switch (r) {
- case ROUND_DOWN: {
- /* just truncate */
- man = man>>(32-9);
- break;
- }
- case ROUND_UP: {
- /* check all bits that we are discarding */
- if (man & (~0U>>9)) {
- man = (man>>(32-9)) + 1;
- if (man == (1<<9)) {
- /* no need to check for round up outside of range */
- man = 0;
- exp += 1;
- }
- } else {
- man = (man>>(32-9));
- }
- break;
- }
- case ROUND_NEAREST: {
- /* check msb that we are discarding */
- if (man & (1<<(32-9-1))) {
- man = (man>>(32-9)) + 1;
- if (man == (1<<9)) {
- /* no need to check for round up outside of range */
- man = 0;
- exp += 1;
- }
- } else {
- man = (man>>(32-9));
- }
- break;
- }
- }
-
- } else {
- /* zero rate - not representable */
-
- if (r == ROUND_DOWN) {
- return -EINVAL;
- } else {
- exp = 0;
- man = 0;
- }
- }
-
- fs_dprintk (FS_DEBUG_QOS, "rate: man=%u, exp=%hu", man, exp);
-
- if (bits)
- *bits = /* (1<<14) | */ (exp<<9) | man;
-
- if (actual)
- *actual = (exp >= 9)
- ? (1 << exp) + (man << (exp-9))
- : (1 << exp) + ((man + (1<<(9-exp-1))) >> (9-exp));
-
- return 0;
-}
-
-
-
-
-/* FireStream access routines */
-/* For DEEP-DOWN debugging these can be rigged to intercept accesses to
- certain registers or to just log all accesses. */
-
-static inline void write_fs (struct fs_dev *dev, int offset, u32 val)
-{
- writel (val, dev->base + offset);
-}
-
-
-static inline u32 read_fs (struct fs_dev *dev, int offset)
-{
- return readl (dev->base + offset);
-}
-
-
-
-static inline struct FS_QENTRY *get_qentry (struct fs_dev *dev, struct queue *q)
-{
- return bus_to_virt (read_fs (dev, Q_WP(q->offset)) & Q_ADDR_MASK);
-}
-
-
-static void submit_qentry (struct fs_dev *dev, struct queue *q, struct FS_QENTRY *qe)
-{
- u32 wp;
- struct FS_QENTRY *cqe;
-
- /* XXX Sanity check: the write pointer can be checked to be
- still the same as the value passed as qe... -- REW */
- /* udelay (5); */
- while ((wp = read_fs (dev, Q_WP (q->offset))) & Q_FULL) {
- fs_dprintk (FS_DEBUG_TXQ, "Found queue at %x full. Waiting.\n",
- q->offset);
- schedule ();
- }
-
- wp &= ~0xf;
- cqe = bus_to_virt (wp);
- if (qe != cqe) {
- fs_dprintk (FS_DEBUG_TXQ, "q mismatch! %p %p\n", qe, cqe);
- }
-
- write_fs (dev, Q_WP(q->offset), Q_INCWRAP);
-
- {
- static int c;
- if (!(c++ % 100))
- {
- int rp, wp;
- rp = read_fs (dev, Q_RP(q->offset));
- wp = read_fs (dev, Q_WP(q->offset));
- fs_dprintk (FS_DEBUG_TXQ, "q at %d: %x-%x: %x entries.\n",
- q->offset, rp, wp, wp-rp);
- }
- }
-}
-
-#ifdef DEBUG_EXTRA
-static struct FS_QENTRY pq[60];
-static int qp;
-
-static struct FS_BPENTRY dq[60];
-static int qd;
-static void *da[60];
-#endif
-
-static void submit_queue (struct fs_dev *dev, struct queue *q,
- u32 cmd, u32 p1, u32 p2, u32 p3)
-{
- struct FS_QENTRY *qe;
-
- qe = get_qentry (dev, q);
- qe->cmd = cmd;
- qe->p0 = p1;
- qe->p1 = p2;
- qe->p2 = p3;
- submit_qentry (dev, q, qe);
-
-#ifdef DEBUG_EXTRA
- pq[qp].cmd = cmd;
- pq[qp].p0 = p1;
- pq[qp].p1 = p2;
- pq[qp].p2 = p3;
- qp++;
- if (qp >= 60) qp = 0;
-#endif
-}
-
-/* Test the "other" way one day... -- REW */
-#if 1
-#define submit_command submit_queue
-#else
-
-static void submit_command (struct fs_dev *dev, struct queue *q,
- u32 cmd, u32 p1, u32 p2, u32 p3)
-{
- write_fs (dev, CMDR0, cmd);
- write_fs (dev, CMDR1, p1);
- write_fs (dev, CMDR2, p2);
- write_fs (dev, CMDR3, p3);
-}
-#endif
-
-
-
-static void process_return_queue (struct fs_dev *dev, struct queue *q)
-{
- long rq;
- struct FS_QENTRY *qe;
- void *tc;
-
- while (!((rq = read_fs (dev, Q_RP(q->offset))) & Q_EMPTY)) {
- fs_dprintk (FS_DEBUG_QUEUE, "reaping return queue entry at %lx\n", rq);
- qe = bus_to_virt (rq);
-
- fs_dprintk (FS_DEBUG_QUEUE, "queue entry: %08x %08x %08x %08x. (%d)\n",
- qe->cmd, qe->p0, qe->p1, qe->p2, STATUS_CODE (qe));
-
- switch (STATUS_CODE (qe)) {
- case 5:
- tc = bus_to_virt (qe->p0);
- fs_dprintk (FS_DEBUG_ALLOC, "Free tc: %p\n", tc);
- kfree (tc);
- break;
- }
-
- write_fs (dev, Q_RP(q->offset), Q_INCWRAP);
- }
-}
-
-
-static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
-{
- long rq;
- long tmp;
- struct FS_QENTRY *qe;
- struct sk_buff *skb;
- struct FS_BPENTRY *td;
-
- while (!((rq = read_fs (dev, Q_RP(q->offset))) & Q_EMPTY)) {
- fs_dprintk (FS_DEBUG_QUEUE, "reaping txdone entry at %lx\n", rq);
- qe = bus_to_virt (rq);
-
- fs_dprintk (FS_DEBUG_QUEUE, "queue entry: %08x %08x %08x %08x: %d\n",
- qe->cmd, qe->p0, qe->p1, qe->p2, STATUS_CODE (qe));
-
- if (STATUS_CODE (qe) != 2)
- fs_dprintk (FS_DEBUG_TXMEM, "queue entry: %08x %08x %08x %08x: %d\n",
- qe->cmd, qe->p0, qe->p1, qe->p2, STATUS_CODE (qe));
-
-
- switch (STATUS_CODE (qe)) {
- case 0x01: /* This is for AAL0 where we put the chip in streaming mode */
- fallthrough;
- case 0x02:
- /* Process a real txdone entry. */
- tmp = qe->p0;
- if (tmp & 0x0f)
- printk (KERN_WARNING "td not aligned: %ld\n", tmp);
- tmp &= ~0x0f;
- td = bus_to_virt (tmp);
-
- fs_dprintk (FS_DEBUG_QUEUE, "Pool entry: %08x %08x %08x %08x %p.\n",
- td->flags, td->next, td->bsa, td->aal_bufsize, td->skb );
-
- skb = td->skb;
- if (skb == FS_VCC (ATM_SKB(skb)->vcc)->last_skb) {
- FS_VCC (ATM_SKB(skb)->vcc)->last_skb = NULL;
- wake_up_interruptible (& FS_VCC (ATM_SKB(skb)->vcc)->close_wait);
- }
- td->dev->ntxpckts--;
-
- {
- static int c=0;
-
- if (!(c++ % 100)) {
- fs_dprintk (FS_DEBUG_QSIZE, "[%d]", td->dev->ntxpckts);
- }
- }
-
- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
-
- fs_dprintk (FS_DEBUG_TXMEM, "i");
- fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
- fs_kfree_skb (skb);
-
- fs_dprintk (FS_DEBUG_ALLOC, "Free trans-d: %p\n", td);
- memset (td, ATM_POISON_FREE, sizeof(struct FS_BPENTRY));
- kfree (td);
- break;
- default:
- /* Here we get the tx purge inhibit command ... */
- /* Action, I believe, is "don't do anything". -- REW */
- ;
- }
-
- write_fs (dev, Q_RP(q->offset), Q_INCWRAP);
- }
-}
-
-
-static void process_incoming (struct fs_dev *dev, struct queue *q)
-{
- long rq;
- struct FS_QENTRY *qe;
- struct FS_BPENTRY *pe;
- struct sk_buff *skb;
- unsigned int channo;
- struct atm_vcc *atm_vcc;
-
- while (!((rq = read_fs (dev, Q_RP(q->offset))) & Q_EMPTY)) {
- fs_dprintk (FS_DEBUG_QUEUE, "reaping incoming queue entry at %lx\n", rq);
- qe = bus_to_virt (rq);
-
- fs_dprintk (FS_DEBUG_QUEUE, "queue entry: %08x %08x %08x %08x. ",
- qe->cmd, qe->p0, qe->p1, qe->p2);
-
- fs_dprintk (FS_DEBUG_QUEUE, "-> %x: %s\n",
- STATUS_CODE (qe),
- res_strings[STATUS_CODE(qe)]);
-
- pe = bus_to_virt (qe->p0);
- fs_dprintk (FS_DEBUG_QUEUE, "Pool entry: %08x %08x %08x %08x %p %p.\n",
- pe->flags, pe->next, pe->bsa, pe->aal_bufsize,
- pe->skb, pe->fp);
-
- channo = qe->cmd & 0xffff;
-
- if (channo < dev->nchannels)
- atm_vcc = dev->atm_vccs[channo];
- else
- atm_vcc = NULL;
-
- /* Single buffer packet */
- switch (STATUS_CODE (qe)) {
- case 0x1:
- /* Fall through for streaming mode */
- fallthrough;
- case 0x2:/* Packet received OK.... */
- if (atm_vcc) {
- skb = pe->skb;
- pe->fp->n--;
-#if 0
- fs_dprintk (FS_DEBUG_QUEUE, "Got skb: %p\n", skb);
- if (FS_DEBUG_QUEUE & fs_debug) my_hd (bus_to_virt (pe->bsa), 0x20);
-#endif
- skb_put (skb, qe->p1 & 0xffff);
- ATM_SKB(skb)->vcc = atm_vcc;
- atomic_inc(&atm_vcc->stats->rx);
- __net_timestamp(skb);
- fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
- atm_vcc->push (atm_vcc, skb);
- fs_dprintk (FS_DEBUG_ALLOC, "Free rec-d: %p\n", pe);
- kfree (pe);
- } else {
- printk (KERN_ERR "Got a receive on a non-open channel %d.\n", channo);
- }
- break;
- case 0x17:/* AAL 5 CRC32 error. IFF the length field is nonzero, a buffer
- has been consumed and needs to be processed. -- REW */
- if (qe->p1 & 0xffff) {
- pe = bus_to_virt (qe->p0);
- pe->fp->n--;
- fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p\n", pe->skb);
- dev_kfree_skb_any (pe->skb);
- fs_dprintk (FS_DEBUG_ALLOC, "Free rec-d: %p\n", pe);
- kfree (pe);
- }
- if (atm_vcc)
- atomic_inc(&atm_vcc->stats->rx_drop);
- break;
- case 0x1f: /* Reassembly abort: no buffers. */
- /* Silently increment error counter. */
- if (atm_vcc)
- atomic_inc(&atm_vcc->stats->rx_drop);
- break;
- default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
- printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
- STATUS_CODE(qe), res_strings[STATUS_CODE (qe)]);
- }
- write_fs (dev, Q_RP(q->offset), Q_INCWRAP);
- }
-}
-
-
-
-#define DO_DIRECTION(tp) ((tp)->traffic_class != ATM_NONE)
-
-static int fs_open(struct atm_vcc *atm_vcc)
-{
- struct fs_dev *dev;
- struct fs_vcc *vcc;
- struct fs_transmit_config *tc;
- struct atm_trafprm * txtp;
- struct atm_trafprm * rxtp;
- /* struct fs_receive_config *rc;*/
- /* struct FS_QENTRY *qe; */
- int error;
- int bfp;
- int to;
- unsigned short tmc0;
- short vpi = atm_vcc->vpi;
- int vci = atm_vcc->vci;
-
- func_enter ();
-
- dev = FS_DEV(atm_vcc->dev);
- fs_dprintk (FS_DEBUG_OPEN, "fs: open on dev: %p, vcc at %p\n",
- dev, atm_vcc);
-
- if (vci != ATM_VPI_UNSPEC && vpi != ATM_VCI_UNSPEC)
- set_bit(ATM_VF_ADDR, &atm_vcc->flags);
-
- if ((atm_vcc->qos.aal != ATM_AAL5) &&
- (atm_vcc->qos.aal != ATM_AAL2))
- return -EINVAL; /* XXX AAL0 */
-
- fs_dprintk (FS_DEBUG_OPEN, "fs: (itf %d): open %d.%d\n",
- atm_vcc->dev->number, atm_vcc->vpi, atm_vcc->vci);
-
- /* XXX handle qos parameters (rate limiting) ? */
-
- vcc = kmalloc(sizeof(struct fs_vcc), GFP_KERNEL);
- fs_dprintk (FS_DEBUG_ALLOC, "Alloc VCC: %p(%zd)\n", vcc, sizeof(struct fs_vcc));
- if (!vcc) {
- clear_bit(ATM_VF_ADDR, &atm_vcc->flags);
- return -ENOMEM;
- }
-
- atm_vcc->dev_data = vcc;
- vcc->last_skb = NULL;
-
- init_waitqueue_head (&vcc->close_wait);
-
- txtp = &atm_vcc->qos.txtp;
- rxtp = &atm_vcc->qos.rxtp;
-
- if (!test_bit(ATM_VF_PARTIAL, &atm_vcc->flags)) {
- if (IS_FS50(dev)) {
- /* Increment the channel numer: take a free one next time. */
- for (to=33;to;to--, dev->channo++) {
- /* We only have 32 channels */
- if (dev->channo >= 32)
- dev->channo = 0;
- /* If we need to do RX, AND the RX is inuse, try the next */
- if (DO_DIRECTION(rxtp) && dev->atm_vccs[dev->channo])
- continue;
- /* If we need to do TX, AND the TX is inuse, try the next */
- if (DO_DIRECTION(txtp) && test_bit (dev->channo, dev->tx_inuse))
- continue;
- /* Ok, both are free! (or not needed) */
- break;
- }
- if (!to) {
- printk ("No more free channels for FS50..\n");
- kfree(vcc);
- return -EBUSY;
- }
- vcc->channo = dev->channo;
- dev->channo &= dev->channel_mask;
-
- } else {
- vcc->channo = (vpi << FS155_VCI_BITS) | (vci);
- if (((DO_DIRECTION(rxtp) && dev->atm_vccs[vcc->channo])) ||
- ( DO_DIRECTION(txtp) && test_bit (vcc->channo, dev->tx_inuse))) {
- printk ("Channel is in use for FS155.\n");
- kfree(vcc);
- return -EBUSY;
- }
- }
- fs_dprintk (FS_DEBUG_OPEN, "OK. Allocated channel %x(%d).\n",
- vcc->channo, vcc->channo);
- }
-
- if (DO_DIRECTION (txtp)) {
- tc = kmalloc (sizeof (struct fs_transmit_config), GFP_KERNEL);
- fs_dprintk (FS_DEBUG_ALLOC, "Alloc tc: %p(%zd)\n",
- tc, sizeof (struct fs_transmit_config));
- if (!tc) {
- fs_dprintk (FS_DEBUG_OPEN, "fs: can't alloc transmit_config.\n");
- kfree(vcc);
- return -ENOMEM;
- }
-
- /* Allocate the "open" entry from the high priority txq. This makes
- it most likely that the chip will notice it. It also prevents us
- from having to wait for completion. On the other hand, we may
- need to wait for completion anyway, to see if it completed
- successfully. */
-
- switch (atm_vcc->qos.aal) {
- case ATM_AAL2:
- case ATM_AAL0:
- tc->flags = 0
- | TC_FLAGS_TRANSPARENT_PAYLOAD
- | TC_FLAGS_PACKET
- | (1 << 28)
- | TC_FLAGS_TYPE_UBR /* XXX Change to VBR -- PVDL */
- | TC_FLAGS_CAL0;
- break;
- case ATM_AAL5:
- tc->flags = 0
- | TC_FLAGS_AAL5
- | TC_FLAGS_PACKET /* ??? */
- | TC_FLAGS_TYPE_CBR
- | TC_FLAGS_CAL0;
- break;
- default:
- printk ("Unknown aal: %d\n", atm_vcc->qos.aal);
- tc->flags = 0;
- }
- /* Docs are vague about this atm_hdr field. By the way, the FS
- * chip makes odd errors if lower bits are set.... -- REW */
- tc->atm_hdr = (vpi << 20) | (vci << 4);
- tmc0 = 0;
- {
- int pcr = atm_pcr_goal (txtp);
-
- fs_dprintk (FS_DEBUG_OPEN, "pcr = %d.\n", pcr);
-
- /* XXX Hmm. officially we're only allowed to do this if rounding
- is round_down -- REW */
- if (IS_FS50(dev)) {
- if (pcr > 51840000/53/8) pcr = 51840000/53/8;
- } else {
- if (pcr > 155520000/53/8) pcr = 155520000/53/8;
- }
- if (!pcr) {
- /* no rate cap */
- tmc0 = IS_FS50(dev)?0x61BE:0x64c9; /* Just copied over the bits from Fujitsu -- REW */
- } else {
- int r;
- if (pcr < 0) {
- r = ROUND_DOWN;
- pcr = -pcr;
- } else {
- r = ROUND_UP;
- }
- error = make_rate (pcr, r, &tmc0, NULL);
- if (error) {
- kfree(tc);
- kfree(vcc);
- return error;
- }
- }
- fs_dprintk (FS_DEBUG_OPEN, "pcr = %d.\n", pcr);
- }
-
- tc->TMC[0] = tmc0 | 0x4000;
- tc->TMC[1] = 0; /* Unused */
- tc->TMC[2] = 0; /* Unused */
- tc->TMC[3] = 0; /* Unused */
-
- tc->spec = 0; /* UTOPIA address, UDF, HEC: Unused -> 0 */
- tc->rtag[0] = 0; /* What should I do with routing tags???
- -- Not used -- AS -- Thanks -- REW*/
- tc->rtag[1] = 0;
- tc->rtag[2] = 0;
-
- if (fs_debug & FS_DEBUG_OPEN) {
- fs_dprintk (FS_DEBUG_OPEN, "TX config record:\n");
- my_hd (tc, sizeof (*tc));
- }
-
- /* We now use the "submit_command" function to submit commands to
- the firestream. There is a define up near the definition of
- that routine that switches this routine between immediate write
- to the immediate command registers and queuing the commands in
- the HPTXQ for execution. This last technique might be more
- efficient if we know we're going to submit a whole lot of
- commands in one go, but this driver is not setup to be able to
- use such a construct. So it probably doen't matter much right
- now. -- REW */
-
- /* The command is IMMediate and INQueue. The parameters are out-of-line.. */
- submit_command (dev, &dev->hp_txq,
- QE_CMD_CONFIG_TX | QE_CMD_IMM_INQ | vcc->channo,
- virt_to_bus (tc), 0, 0);
-
- submit_command (dev, &dev->hp_txq,
- QE_CMD_TX_EN | QE_CMD_IMM_INQ | vcc->channo,
- 0, 0, 0);
- set_bit (vcc->channo, dev->tx_inuse);
- }
-
- if (DO_DIRECTION (rxtp)) {
- dev->atm_vccs[vcc->channo] = atm_vcc;
-
- for (bfp = 0;bfp < FS_NR_FREE_POOLS; bfp++)
- if (atm_vcc->qos.rxtp.max_sdu <= dev->rx_fp[bfp].bufsize) break;
- if (bfp >= FS_NR_FREE_POOLS) {
- fs_dprintk (FS_DEBUG_OPEN, "No free pool fits sdu: %d.\n",
- atm_vcc->qos.rxtp.max_sdu);
- /* XXX Cleanup? -- Would just calling fs_close work??? -- REW */
-
- /* XXX clear tx inuse. Close TX part? */
- dev->atm_vccs[vcc->channo] = NULL;
- kfree (vcc);
- return -EINVAL;
- }
-
- switch (atm_vcc->qos.aal) {
- case ATM_AAL0:
- case ATM_AAL2:
- submit_command (dev, &dev->hp_txq,
- QE_CMD_CONFIG_RX | QE_CMD_IMM_INQ | vcc->channo,
- RC_FLAGS_TRANSP |
- RC_FLAGS_BFPS_BFP * bfp |
- RC_FLAGS_RXBM_PSB, 0, 0);
- break;
- case ATM_AAL5:
- submit_command (dev, &dev->hp_txq,
- QE_CMD_CONFIG_RX | QE_CMD_IMM_INQ | vcc->channo,
- RC_FLAGS_AAL5 |
- RC_FLAGS_BFPS_BFP * bfp |
- RC_FLAGS_RXBM_PSB, 0, 0);
- break;
- }
- if (IS_FS50 (dev)) {
- submit_command (dev, &dev->hp_txq,
- QE_CMD_REG_WR | QE_CMD_IMM_INQ,
- 0x80 + vcc->channo,
- (vpi << 16) | vci, 0 ); /* XXX -- Use defines. */
- }
- submit_command (dev, &dev->hp_txq,
- QE_CMD_RX_EN | QE_CMD_IMM_INQ | vcc->channo,
- 0, 0, 0);
- }
-
- /* Indicate we're done! */
- set_bit(ATM_VF_READY, &atm_vcc->flags);
-
- func_exit ();
- return 0;
-}
-
-
-static void fs_close(struct atm_vcc *atm_vcc)
-{
- struct fs_dev *dev = FS_DEV (atm_vcc->dev);
- struct fs_vcc *vcc = FS_VCC (atm_vcc);
- struct atm_trafprm * txtp;
- struct atm_trafprm * rxtp;
-
- func_enter ();
-
- clear_bit(ATM_VF_READY, &atm_vcc->flags);
-
- fs_dprintk (FS_DEBUG_QSIZE, "--==**[%d]**==--", dev->ntxpckts);
- if (vcc->last_skb) {
- fs_dprintk (FS_DEBUG_QUEUE, "Waiting for skb %p to be sent.\n",
- vcc->last_skb);
- /* We're going to wait for the last packet to get sent on this VC. It would
- be impolite not to send them don't you think?
- XXX
- We don't know which packets didn't get sent. So if we get interrupted in
- this sleep_on, we'll lose any reference to these packets. Memory leak!
- On the other hand, it's awfully convenient that we can abort a "close" that
- is taking too long. Maybe just use non-interruptible sleep on? -- REW */
- wait_event_interruptible(vcc->close_wait, !vcc->last_skb);
- }
-
- txtp = &atm_vcc->qos.txtp;
- rxtp = &atm_vcc->qos.rxtp;
-
-
- /* See App note XXX (Unpublished as of now) for the reason for the
- removal of the "CMD_IMM_INQ" part of the TX_PURGE_INH... -- REW */
-
- if (DO_DIRECTION (txtp)) {
- submit_command (dev, &dev->hp_txq,
- QE_CMD_TX_PURGE_INH | /*QE_CMD_IMM_INQ|*/ vcc->channo, 0,0,0);
- clear_bit (vcc->channo, dev->tx_inuse);
- }
-
- if (DO_DIRECTION (rxtp)) {
- submit_command (dev, &dev->hp_txq,
- QE_CMD_RX_PURGE_INH | QE_CMD_IMM_INQ | vcc->channo, 0,0,0);
- dev->atm_vccs [vcc->channo] = NULL;
-
- /* This means that this is configured as a receive channel */
- if (IS_FS50 (dev)) {
- /* Disable the receive filter. Is 0/0 indeed an invalid receive
- channel? -- REW. Yes it is. -- Hang. Ok. I'll use -1
- (0xfff...) -- REW */
- submit_command (dev, &dev->hp_txq,
- QE_CMD_REG_WR | QE_CMD_IMM_INQ,
- 0x80 + vcc->channo, -1, 0 );
- }
- }
-
- fs_dprintk (FS_DEBUG_ALLOC, "Free vcc: %p\n", vcc);
- kfree (vcc);
-
- func_exit ();
-}
-
-
-static int fs_send (struct atm_vcc *atm_vcc, struct sk_buff *skb)
-{
- struct fs_dev *dev = FS_DEV (atm_vcc->dev);
- struct fs_vcc *vcc = FS_VCC (atm_vcc);
- struct FS_BPENTRY *td;
-
- func_enter ();
-
- fs_dprintk (FS_DEBUG_TXMEM, "I");
- fs_dprintk (FS_DEBUG_SEND, "Send: atm_vcc %p skb %p vcc %p dev %p\n",
- atm_vcc, skb, vcc, dev);
-
- fs_dprintk (FS_DEBUG_ALLOC, "Alloc t-skb: %p (atm_send)\n", skb);
-
- ATM_SKB(skb)->vcc = atm_vcc;
-
- vcc->last_skb = skb;
-
- td = kmalloc (sizeof (struct FS_BPENTRY), GFP_ATOMIC);
- fs_dprintk (FS_DEBUG_ALLOC, "Alloc transd: %p(%zd)\n", td, sizeof (struct FS_BPENTRY));
- if (!td) {
- /* Oops out of mem */
- return -ENOMEM;
- }
-
- fs_dprintk (FS_DEBUG_SEND, "first word in buffer: %x\n",
- *(int *) skb->data);
-
- td->flags = TD_EPI | TD_DATA | skb->len;
- td->next = 0;
- td->bsa = virt_to_bus (skb->data);
- td->skb = skb;
- td->dev = dev;
- dev->ntxpckts++;
-
-#ifdef DEBUG_EXTRA
- da[qd] = td;
- dq[qd].flags = td->flags;
- dq[qd].next = td->next;
- dq[qd].bsa = td->bsa;
- dq[qd].skb = td->skb;
- dq[qd].dev = td->dev;
- qd++;
- if (qd >= 60) qd = 0;
-#endif
-
- submit_queue (dev, &dev->hp_txq,
- QE_TRANSMIT_DE | vcc->channo,
- virt_to_bus (td), 0,
- virt_to_bus (td));
-
- fs_dprintk (FS_DEBUG_QUEUE, "in send: txq %d txrq %d\n",
- read_fs (dev, Q_EA (dev->hp_txq.offset)) -
- read_fs (dev, Q_SA (dev->hp_txq.offset)),
- read_fs (dev, Q_EA (dev->tx_relq.offset)) -
- read_fs (dev, Q_SA (dev->tx_relq.offset)));
-
- func_exit ();
- return 0;
-}
-
-
-/* Some function placeholders for functions we don't yet support. */
-
-#if 0
-static int fs_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
-{
- func_enter ();
- func_exit ();
- return -ENOIOCTLCMD;
-}
-
-
-static int fs_getsockopt(struct atm_vcc *vcc,int level,int optname,
- void __user *optval,int optlen)
-{
- func_enter ();
- func_exit ();
- return 0;
-}
-
-
-static int fs_setsockopt(struct atm_vcc *vcc,int level,int optname,
- void __user *optval,unsigned int optlen)
-{
- func_enter ();
- func_exit ();
- return 0;
-}
-
-
-static void fs_phy_put(struct atm_dev *dev,unsigned char value,
- unsigned long addr)
-{
- func_enter ();
- func_exit ();
-}
-
-
-static unsigned char fs_phy_get(struct atm_dev *dev,unsigned long addr)
-{
- func_enter ();
- func_exit ();
- return 0;
-}
-
-
-static int fs_change_qos(struct atm_vcc *vcc,struct atm_qos *qos,int flags)
-{
- func_enter ();
- func_exit ();
- return 0;
-};
-
-#endif
-
-
-static const struct atmdev_ops ops = {
- .open = fs_open,
- .close = fs_close,
- .send = fs_send,
- .owner = THIS_MODULE,
- /* ioctl: fs_ioctl, */
- /* change_qos: fs_change_qos, */
-
- /* For now implement these internally here... */
- /* phy_put: fs_phy_put, */
- /* phy_get: fs_phy_get, */
-};
-
-
-static void undocumented_pci_fix(struct pci_dev *pdev)
-{
- u32 tint;
-
- /* The Windows driver says: */
- /* Switch off FireStream Retry Limit Threshold
- */
-
- /* The register at 0x28 is documented as "reserved", no further
- comments. */
-
- pci_read_config_dword (pdev, 0x28, &tint);
- if (tint != 0x80) {
- tint = 0x80;
- pci_write_config_dword (pdev, 0x28, tint);
- }
-}
-
-
-
-/**************************************************************************
- * PHY routines *
- **************************************************************************/
-
-static void write_phy(struct fs_dev *dev, int regnum, int val)
-{
- submit_command (dev, &dev->hp_txq, QE_CMD_PRP_WR | QE_CMD_IMM_INQ,
- regnum, val, 0);
-}
-
-static int init_phy(struct fs_dev *dev, struct reginit_item *reginit)
-{
- int i;
-
- func_enter ();
- while (reginit->reg != PHY_EOF) {
- if (reginit->reg == PHY_CLEARALL) {
- /* "PHY_CLEARALL means clear all registers. Numregisters is in "val". */
- for (i=0;i<reginit->val;i++) {
- write_phy (dev, i, 0);
- }
- } else {
- write_phy (dev, reginit->reg, reginit->val);
- }
- reginit++;
- }
- func_exit ();
- return 0;
-}
-
-static void reset_chip (struct fs_dev *dev)
-{
- int i;
-
- write_fs (dev, SARMODE0, SARMODE0_SRTS0);
-
- /* Undocumented delay */
- udelay (128);
-
- /* The "internal registers are documented to all reset to zero, but
- comments & code in the Windows driver indicates that the pools are
- NOT reset. */
- for (i=0;i < FS_NR_FREE_POOLS;i++) {
- write_fs (dev, FP_CNF (RXB_FP(i)), 0);
- write_fs (dev, FP_SA (RXB_FP(i)), 0);
- write_fs (dev, FP_EA (RXB_FP(i)), 0);
- write_fs (dev, FP_CNT (RXB_FP(i)), 0);
- write_fs (dev, FP_CTU (RXB_FP(i)), 0);
- }
-
- /* The same goes for the match channel registers, although those are
- NOT documented that way in the Windows driver. -- REW */
- /* The Windows driver DOES write 0 to these registers somewhere in
- the init sequence. However, a small hardware-feature, will
- prevent reception of data on VPI/VCI = 0/0 (Unless the channel
- allocated happens to have no disabled channels that have a lower
- number. -- REW */
-
- /* Clear the match channel registers. */
- if (IS_FS50 (dev)) {
- for (i=0;i<FS50_NR_CHANNELS;i++) {
- write_fs (dev, 0x200 + i * 4, -1);
- }
- }
-}
-
-static void *aligned_kmalloc(int size, gfp_t flags, int alignment)
-{
- void *t;
-
- if (alignment <= 0x10) {
- t = kmalloc (size, flags);
- if ((unsigned long)t & (alignment-1)) {
- printk ("Kmalloc doesn't align things correctly! %p\n", t);
- kfree (t);
- return aligned_kmalloc (size, flags, alignment * 4);
- }
- return t;
- }
- printk (KERN_ERR "Request for > 0x10 alignment not yet implemented (hard!)\n");
- return NULL;
-}
-
-static int init_q(struct fs_dev *dev, struct queue *txq, int queue,
- int nentries, int is_rq)
-{
- int sz = nentries * sizeof (struct FS_QENTRY);
- struct FS_QENTRY *p;
-
- func_enter ();
-
- fs_dprintk (FS_DEBUG_INIT, "Initializing queue at %x: %d entries:\n",
- queue, nentries);
-
- p = aligned_kmalloc (sz, GFP_KERNEL, 0x10);
- fs_dprintk (FS_DEBUG_ALLOC, "Alloc queue: %p(%d)\n", p, sz);
-
- if (!p) return 0;
-
- write_fs (dev, Q_SA(queue), virt_to_bus(p));
- write_fs (dev, Q_EA(queue), virt_to_bus(p+nentries-1));
- write_fs (dev, Q_WP(queue), virt_to_bus(p));
- write_fs (dev, Q_RP(queue), virt_to_bus(p));
- if (is_rq) {
- /* Configuration for the receive queue: 0: interrupt immediately,
- no pre-warning to empty queues: We do our best to keep the
- queue filled anyway. */
- write_fs (dev, Q_CNF(queue), 0 );
- }
-
- txq->sa = p;
- txq->ea = p;
- txq->offset = queue;
-
- func_exit ();
- return 1;
-}
-
-
-static int init_fp(struct fs_dev *dev, struct freepool *fp, int queue,
- int bufsize, int nr_buffers)
-{
- func_enter ();
-
- fs_dprintk (FS_DEBUG_INIT, "Initializing free pool at %x:\n", queue);
-
- write_fs (dev, FP_CNF(queue), (bufsize * RBFP_RBS) | RBFP_RBSVAL | RBFP_CME);
- write_fs (dev, FP_SA(queue), 0);
- write_fs (dev, FP_EA(queue), 0);
- write_fs (dev, FP_CTU(queue), 0);
- write_fs (dev, FP_CNT(queue), 0);
-
- fp->offset = queue;
- fp->bufsize = bufsize;
- fp->nr_buffers = nr_buffers;
-
- func_exit ();
- return 1;
-}
-
-
-static inline int nr_buffers_in_freepool (struct fs_dev *dev, struct freepool *fp)
-{
-#if 0
- /* This seems to be unreliable.... */
- return read_fs (dev, FP_CNT (fp->offset));
-#else
- return fp->n;
-#endif
-}
-
-
-/* Check if this gets going again if a pool ever runs out. -- Yes, it
- does. I've seen "receive abort: no buffers" and things started
- working again after that... -- REW */
-
-static void top_off_fp (struct fs_dev *dev, struct freepool *fp,
- gfp_t gfp_flags)
-{
- struct FS_BPENTRY *qe, *ne;
- struct sk_buff *skb;
- int n = 0;
- u32 qe_tmp;
-
- fs_dprintk (FS_DEBUG_QUEUE, "Topping off queue at %x (%d-%d/%d)\n",
- fp->offset, read_fs (dev, FP_CNT (fp->offset)), fp->n,
- fp->nr_buffers);
- while (nr_buffers_in_freepool(dev, fp) < fp->nr_buffers) {
-
- skb = alloc_skb (fp->bufsize, gfp_flags);
- fs_dprintk (FS_DEBUG_ALLOC, "Alloc rec-skb: %p(%d)\n", skb, fp->bufsize);
- if (!skb) break;
- ne = kmalloc (sizeof (struct FS_BPENTRY), gfp_flags);
- fs_dprintk (FS_DEBUG_ALLOC, "Alloc rec-d: %p(%zd)\n", ne, sizeof (struct FS_BPENTRY));
- if (!ne) {
- fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p\n", skb);
- dev_kfree_skb_any (skb);
- break;
- }
-
- fs_dprintk (FS_DEBUG_QUEUE, "Adding skb %p desc %p -> %p(%p) ",
- skb, ne, skb->data, skb->head);
- n++;
- ne->flags = FP_FLAGS_EPI | fp->bufsize;
- ne->next = virt_to_bus (NULL);
- ne->bsa = virt_to_bus (skb->data);
- ne->aal_bufsize = fp->bufsize;
- ne->skb = skb;
- ne->fp = fp;
-
- /*
- * FIXME: following code encodes and decodes
- * machine pointers (could be 64-bit) into a
- * 32-bit register.
- */
-
- qe_tmp = read_fs (dev, FP_EA(fp->offset));
- fs_dprintk (FS_DEBUG_QUEUE, "link at %x\n", qe_tmp);
- if (qe_tmp) {
- qe = bus_to_virt ((long) qe_tmp);
- qe->next = virt_to_bus(ne);
- qe->flags &= ~FP_FLAGS_EPI;
- } else
- write_fs (dev, FP_SA(fp->offset), virt_to_bus(ne));
-
- write_fs (dev, FP_EA(fp->offset), virt_to_bus (ne));
- fp->n++; /* XXX Atomic_inc? */
- write_fs (dev, FP_CTU(fp->offset), 1);
- }
-
- fs_dprintk (FS_DEBUG_QUEUE, "Added %d entries. \n", n);
-}
-
-static void free_queue(struct fs_dev *dev, struct queue *txq)
-{
- func_enter ();
-
- write_fs (dev, Q_SA(txq->offset), 0);
- write_fs (dev, Q_EA(txq->offset), 0);
- write_fs (dev, Q_RP(txq->offset), 0);
- write_fs (dev, Q_WP(txq->offset), 0);
- /* Configuration ? */
-
- fs_dprintk (FS_DEBUG_ALLOC, "Free queue: %p\n", txq->sa);
- kfree (txq->sa);
-
- func_exit ();
-}
-
-static void free_freepool(struct fs_dev *dev, struct freepool *fp)
-{
- func_enter ();
-
- write_fs (dev, FP_CNF(fp->offset), 0);
- write_fs (dev, FP_SA (fp->offset), 0);
- write_fs (dev, FP_EA (fp->offset), 0);
- write_fs (dev, FP_CNT(fp->offset), 0);
- write_fs (dev, FP_CTU(fp->offset), 0);
-
- func_exit ();
-}
-
-
-
-static irqreturn_t fs_irq (int irq, void *dev_id)
-{
- int i;
- u32 status;
- struct fs_dev *dev = dev_id;
-
- status = read_fs (dev, ISR);
- if (!status)
- return IRQ_NONE;
-
- func_enter ();
-
-#ifdef IRQ_RATE_LIMIT
- /* Aaargh! I'm ashamed. This costs more lines-of-code than the actual
- interrupt routine!. (Well, used to when I wrote that comment) -- REW */
- {
- static int lastjif;
- static int nintr=0;
-
- if (lastjif == jiffies) {
- if (++nintr > IRQ_RATE_LIMIT) {
- free_irq (dev->irq, dev_id);
- printk (KERN_ERR "fs: Too many interrupts. Turning off interrupt %d.\n",
- dev->irq);
- }
- } else {
- lastjif = jiffies;
- nintr = 0;
- }
- }
-#endif
- fs_dprintk (FS_DEBUG_QUEUE, "in intr: txq %d txrq %d\n",
- read_fs (dev, Q_EA (dev->hp_txq.offset)) -
- read_fs (dev, Q_SA (dev->hp_txq.offset)),
- read_fs (dev, Q_EA (dev->tx_relq.offset)) -
- read_fs (dev, Q_SA (dev->tx_relq.offset)));
-
- /* print the bits in the ISR register. */
- if (fs_debug & FS_DEBUG_IRQ) {
- /* The FS_DEBUG things are unnecessary here. But this way it is
- clear for grep that these are debug prints. */
- fs_dprintk (FS_DEBUG_IRQ, "IRQ status:");
- for (i=0;i<27;i++)
- if (status & (1 << i))
- fs_dprintk (FS_DEBUG_IRQ, " %s", irq_bitname[i]);
- fs_dprintk (FS_DEBUG_IRQ, "\n");
- }
-
- if (status & ISR_RBRQ0_W) {
- fs_dprintk (FS_DEBUG_IRQ, "Iiiin-coming (0)!!!!\n");
- process_incoming (dev, &dev->rx_rq[0]);
- /* items mentioned on RBRQ0 are from FP 0 or 1. */
- top_off_fp (dev, &dev->rx_fp[0], GFP_ATOMIC);
- top_off_fp (dev, &dev->rx_fp[1], GFP_ATOMIC);
- }
-
- if (status & ISR_RBRQ1_W) {
- fs_dprintk (FS_DEBUG_IRQ, "Iiiin-coming (1)!!!!\n");
- process_incoming (dev, &dev->rx_rq[1]);
- top_off_fp (dev, &dev->rx_fp[2], GFP_ATOMIC);
- top_off_fp (dev, &dev->rx_fp[3], GFP_ATOMIC);
- }
-
- if (status & ISR_RBRQ2_W) {
- fs_dprintk (FS_DEBUG_IRQ, "Iiiin-coming (2)!!!!\n");
- process_incoming (dev, &dev->rx_rq[2]);
- top_off_fp (dev, &dev->rx_fp[4], GFP_ATOMIC);
- top_off_fp (dev, &dev->rx_fp[5], GFP_ATOMIC);
- }
-
- if (status & ISR_RBRQ3_W) {
- fs_dprintk (FS_DEBUG_IRQ, "Iiiin-coming (3)!!!!\n");
- process_incoming (dev, &dev->rx_rq[3]);
- top_off_fp (dev, &dev->rx_fp[6], GFP_ATOMIC);
- top_off_fp (dev, &dev->rx_fp[7], GFP_ATOMIC);
- }
-
- if (status & ISR_CSQ_W) {
- fs_dprintk (FS_DEBUG_IRQ, "Command executed ok!\n");
- process_return_queue (dev, &dev->st_q);
- }
-
- if (status & ISR_TBRQ_W) {
- fs_dprintk (FS_DEBUG_IRQ, "Data transmitted!\n");
- process_txdone_queue (dev, &dev->tx_relq);
- }
-
- func_exit ();
- return IRQ_HANDLED;
-}
-
-
-#ifdef FS_POLL_FREQ
-static void fs_poll (struct timer_list *t)
-{
- struct fs_dev *dev = from_timer(dev, t, timer);
-
- fs_irq (0, dev);
- dev->timer.expires = jiffies + FS_POLL_FREQ;
- add_timer (&dev->timer);
-}
-#endif
-
-static int fs_init(struct fs_dev *dev)
-{
- struct pci_dev *pci_dev;
- int isr, to;
- int i;
-
- func_enter ();
- pci_dev = dev->pci_dev;
-
- printk (KERN_INFO "found a FireStream %d card, base %16llx, irq%d.\n",
- IS_FS50(dev)?50:155,
- (unsigned long long)pci_resource_start(pci_dev, 0),
- dev->pci_dev->irq);
-
- if (fs_debug & FS_DEBUG_INIT)
- my_hd ((unsigned char *) dev, sizeof (*dev));
-
- undocumented_pci_fix (pci_dev);
-
- dev->hw_base = pci_resource_start(pci_dev, 0);
-
- dev->base = ioremap(dev->hw_base, 0x1000);
- if (!dev->base)
- return 1;
-
- reset_chip (dev);
-
- write_fs (dev, SARMODE0, 0
- | (0 * SARMODE0_SHADEN) /* We don't use shadow registers. */
- | (1 * SARMODE0_INTMODE_READCLEAR)
- | (1 * SARMODE0_CWRE)
- | (IS_FS50(dev) ? SARMODE0_PRPWT_FS50_5:
- SARMODE0_PRPWT_FS155_3)
- | (1 * SARMODE0_CALSUP_1)
- | (IS_FS50(dev) ? (0
- | SARMODE0_RXVCS_32
- | SARMODE0_ABRVCS_32
- | SARMODE0_TXVCS_32):
- (0
- | SARMODE0_RXVCS_1k
- | SARMODE0_ABRVCS_1k
- | SARMODE0_TXVCS_1k)));
-
- /* 10ms * 100 is 1 second. That should be enough, as AN3:9 says it takes
- 1ms. */
- to = 100;
- while (--to) {
- isr = read_fs (dev, ISR);
-
- /* This bit is documented as "RESERVED" */
- if (isr & ISR_INIT_ERR) {
- printk (KERN_ERR "Error initializing the FS... \n");
- goto unmap;
- }
- if (isr & ISR_INIT) {
- fs_dprintk (FS_DEBUG_INIT, "Ha! Initialized OK!\n");
- break;
- }
-
- /* Try again after 10ms. */
- msleep(10);
- }
-
- if (!to) {
- printk (KERN_ERR "timeout initializing the FS... \n");
- goto unmap;
- }
-
- /* XXX fix for fs155 */
- dev->channel_mask = 0x1f;
- dev->channo = 0;
-
- /* AN3: 10 */
- write_fs (dev, SARMODE1, 0
- | (fs_keystream * SARMODE1_DEFHEC) /* XXX PHY */
- | ((loopback == 1) * SARMODE1_TSTLP) /* XXX Loopback mode enable... */
- | (1 * SARMODE1_DCRM)
- | (1 * SARMODE1_DCOAM)
- | (0 * SARMODE1_OAMCRC)
- | (0 * SARMODE1_DUMPE)
- | (0 * SARMODE1_GPLEN)
- | (0 * SARMODE1_GNAM)
- | (0 * SARMODE1_GVAS)
- | (0 * SARMODE1_GPAS)
- | (1 * SARMODE1_GPRI)
- | (0 * SARMODE1_PMS)
- | (0 * SARMODE1_GFCR)
- | (1 * SARMODE1_HECM2)
- | (1 * SARMODE1_HECM1)
- | (1 * SARMODE1_HECM0)
- | (1 << 12) /* That's what hang's driver does. Program to 0 */
- | (0 * 0xff) /* XXX FS155 */);
-
-
- /* Cal prescale etc */
-
- /* AN3: 11 */
- write_fs (dev, TMCONF, 0x0000000f);
- write_fs (dev, CALPRESCALE, 0x01010101 * num);
- write_fs (dev, 0x80, 0x000F00E4);
-
- /* AN3: 12 */
- write_fs (dev, CELLOSCONF, 0
- | ( 0 * CELLOSCONF_CEN)
- | ( CELLOSCONF_SC1)
- | (0x80 * CELLOSCONF_COBS)
- | (num * CELLOSCONF_COPK) /* Changed from 0xff to 0x5a */
- | (num * CELLOSCONF_COST));/* after a hint from Hang.
- * performance jumped 50->70... */
-
- /* Magic value by Hang */
- write_fs (dev, CELLOSCONF_COST, 0x0B809191);
-
- if (IS_FS50 (dev)) {
- write_fs (dev, RAS0, RAS0_DCD_XHLT);
- dev->atm_dev->ci_range.vpi_bits = 12;
- dev->atm_dev->ci_range.vci_bits = 16;
- dev->nchannels = FS50_NR_CHANNELS;
- } else {
- write_fs (dev, RAS0, RAS0_DCD_XHLT
- | (((1 << FS155_VPI_BITS) - 1) * RAS0_VPSEL)
- | (((1 << FS155_VCI_BITS) - 1) * RAS0_VCSEL));
- /* We can chose the split arbitrarily. We might be able to
- support more. Whatever. This should do for now. */
- dev->atm_dev->ci_range.vpi_bits = FS155_VPI_BITS;
- dev->atm_dev->ci_range.vci_bits = FS155_VCI_BITS;
-
- /* Address bits we can't use should be compared to 0. */
- write_fs (dev, RAC, 0);
-
- /* Manual (AN9, page 6) says ASF1=0 means compare Utopia address
- * too. I can't find ASF1 anywhere. Anyway, we AND with just the
- * other bits, then compare with 0, which is exactly what we
- * want. */
- write_fs (dev, RAM, (1 << (28 - FS155_VPI_BITS - FS155_VCI_BITS)) - 1);
- dev->nchannels = FS155_NR_CHANNELS;
- }
- dev->atm_vccs = kcalloc (dev->nchannels, sizeof (struct atm_vcc *),
- GFP_KERNEL);
- fs_dprintk (FS_DEBUG_ALLOC, "Alloc atmvccs: %p(%zd)\n",
- dev->atm_vccs, dev->nchannels * sizeof (struct atm_vcc *));
-
- if (!dev->atm_vccs) {
- printk (KERN_WARNING "Couldn't allocate memory for VCC buffers. Woops!\n");
- /* XXX Clean up..... */
- goto unmap;
- }
-
- dev->tx_inuse = kzalloc (dev->nchannels / 8 /* bits/byte */ , GFP_KERNEL);
- fs_dprintk (FS_DEBUG_ALLOC, "Alloc tx_inuse: %p(%d)\n",
- dev->atm_vccs, dev->nchannels / 8);
-
- if (!dev->tx_inuse) {
- printk (KERN_WARNING "Couldn't allocate memory for tx_inuse bits!\n");
- /* XXX Clean up..... */
- goto unmap;
- }
- /* -- RAS1 : FS155 and 50 differ. Default (0) should be OK for both */
- /* -- RAS2 : FS50 only: Default is OK. */
-
- /* DMAMODE, default should be OK. -- REW */
- write_fs (dev, DMAMR, DMAMR_TX_MODE_FULL);
-
- init_q (dev, &dev->hp_txq, TX_PQ(TXQ_HP), TXQ_NENTRIES, 0);
- init_q (dev, &dev->lp_txq, TX_PQ(TXQ_LP), TXQ_NENTRIES, 0);
- init_q (dev, &dev->tx_relq, TXB_RQ, TXQ_NENTRIES, 1);
- init_q (dev, &dev->st_q, ST_Q, TXQ_NENTRIES, 1);
-
- for (i=0;i < FS_NR_FREE_POOLS;i++) {
- init_fp (dev, &dev->rx_fp[i], RXB_FP(i),
- rx_buf_sizes[i], rx_pool_sizes[i]);
- top_off_fp (dev, &dev->rx_fp[i], GFP_KERNEL);
- }
-
-
- for (i=0;i < FS_NR_RX_QUEUES;i++)
- init_q (dev, &dev->rx_rq[i], RXB_RQ(i), RXRQ_NENTRIES, 1);
-
- dev->irq = pci_dev->irq;
- if (request_irq (dev->irq, fs_irq, IRQF_SHARED, "firestream", dev)) {
- printk (KERN_WARNING "couldn't get irq %d for firestream.\n", pci_dev->irq);
- /* XXX undo all previous stuff... */
- goto unmap;
- }
- fs_dprintk (FS_DEBUG_INIT, "Grabbed irq %d for dev at %p.\n", dev->irq, dev);
-
- /* We want to be notified of most things. Just the statistics count
- overflows are not interesting */
- write_fs (dev, IMR, 0
- | ISR_RBRQ0_W
- | ISR_RBRQ1_W
- | ISR_RBRQ2_W
- | ISR_RBRQ3_W
- | ISR_TBRQ_W
- | ISR_CSQ_W);
-
- write_fs (dev, SARMODE0, 0
- | (0 * SARMODE0_SHADEN) /* We don't use shadow registers. */
- | (1 * SARMODE0_GINT)
- | (1 * SARMODE0_INTMODE_READCLEAR)
- | (0 * SARMODE0_CWRE)
- | (IS_FS50(dev)?SARMODE0_PRPWT_FS50_5:
- SARMODE0_PRPWT_FS155_3)
- | (1 * SARMODE0_CALSUP_1)
- | (IS_FS50 (dev)?(0
- | SARMODE0_RXVCS_32
- | SARMODE0_ABRVCS_32
- | SARMODE0_TXVCS_32):
- (0
- | SARMODE0_RXVCS_1k
- | SARMODE0_ABRVCS_1k
- | SARMODE0_TXVCS_1k))
- | (1 * SARMODE0_RUN));
-
- init_phy (dev, PHY_NTC_INIT);
-
- if (loopback == 2) {
- write_phy (dev, 0x39, 0x000e);
- }
-
-#ifdef FS_POLL_FREQ
- timer_setup(&dev->timer, fs_poll, 0);
- dev->timer.expires = jiffies + FS_POLL_FREQ;
- add_timer (&dev->timer);
-#endif
-
- dev->atm_dev->dev_data = dev;
-
- func_exit ();
- return 0;
-unmap:
- iounmap(dev->base);
- return 1;
-}
-
-static int firestream_init_one(struct pci_dev *pci_dev,
- const struct pci_device_id *ent)
-{
- struct atm_dev *atm_dev;
- struct fs_dev *fs_dev;
-
- if (pci_enable_device(pci_dev))
- goto err_out;
-
- fs_dev = kzalloc (sizeof (struct fs_dev), GFP_KERNEL);
- fs_dprintk (FS_DEBUG_ALLOC, "Alloc fs-dev: %p(%zd)\n",
- fs_dev, sizeof (struct fs_dev));
- if (!fs_dev)
- goto err_out;
- atm_dev = atm_dev_register("fs", &pci_dev->dev, &ops, -1, NULL);
- if (!atm_dev)
- goto err_out_free_fs_dev;
-
- fs_dev->pci_dev = pci_dev;
- fs_dev->atm_dev = atm_dev;
- fs_dev->flags = ent->driver_data;
-
- if (fs_init(fs_dev))
- goto err_out_free_atm_dev;
-
- fs_dev->next = fs_boards;
- fs_boards = fs_dev;
- return 0;
-
- err_out_free_atm_dev:
- atm_dev_deregister(atm_dev);
- err_out_free_fs_dev:
- kfree(fs_dev);
- err_out:
- return -ENODEV;
-}
-
-static void firestream_remove_one(struct pci_dev *pdev)
-{
- int i;
- struct fs_dev *dev, *nxtdev;
- struct fs_vcc *vcc;
- struct FS_BPENTRY *fp, *nxt;
-
- func_enter ();
-
-#if 0
- printk ("hptxq:\n");
- for (i=0;i<60;i++) {
- printk ("%d: %08x %08x %08x %08x \n",
- i, pq[qp].cmd, pq[qp].p0, pq[qp].p1, pq[qp].p2);
- qp++;
- if (qp >= 60) qp = 0;
- }
-
- printk ("descriptors:\n");
- for (i=0;i<60;i++) {
- printk ("%d: %p: %08x %08x %p %p\n",
- i, da[qd], dq[qd].flags, dq[qd].bsa, dq[qd].skb, dq[qd].dev);
- qd++;
- if (qd >= 60) qd = 0;
- }
-#endif
-
- for (dev = fs_boards;dev != NULL;dev=nxtdev) {
- fs_dprintk (FS_DEBUG_CLEANUP, "Releasing resources for dev at %p.\n", dev);
-
- /* XXX Hit all the tx channels too! */
-
- for (i=0;i < dev->nchannels;i++) {
- if (dev->atm_vccs[i]) {
- vcc = FS_VCC (dev->atm_vccs[i]);
- submit_command (dev, &dev->hp_txq,
- QE_CMD_TX_PURGE_INH | QE_CMD_IMM_INQ | vcc->channo, 0,0,0);
- submit_command (dev, &dev->hp_txq,
- QE_CMD_RX_PURGE_INH | QE_CMD_IMM_INQ | vcc->channo, 0,0,0);
-
- }
- }
-
- /* XXX Wait a while for the chip to release all buffers. */
-
- for (i=0;i < FS_NR_FREE_POOLS;i++) {
- for (fp=bus_to_virt (read_fs (dev, FP_SA(dev->rx_fp[i].offset)));
- !(fp->flags & FP_FLAGS_EPI);fp = nxt) {
- fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p\n", fp->skb);
- dev_kfree_skb_any (fp->skb);
- nxt = bus_to_virt (fp->next);
- fs_dprintk (FS_DEBUG_ALLOC, "Free rec-d: %p\n", fp);
- kfree (fp);
- }
- fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p\n", fp->skb);
- dev_kfree_skb_any (fp->skb);
- fs_dprintk (FS_DEBUG_ALLOC, "Free rec-d: %p\n", fp);
- kfree (fp);
- }
-
- /* Hang the chip in "reset", prevent it clobbering memory that is
- no longer ours. */
- reset_chip (dev);
-
- fs_dprintk (FS_DEBUG_CLEANUP, "Freeing irq%d.\n", dev->irq);
- free_irq (dev->irq, dev);
- del_timer_sync (&dev->timer);
-
- atm_dev_deregister(dev->atm_dev);
- free_queue (dev, &dev->hp_txq);
- free_queue (dev, &dev->lp_txq);
- free_queue (dev, &dev->tx_relq);
- free_queue (dev, &dev->st_q);
-
- fs_dprintk (FS_DEBUG_ALLOC, "Free atmvccs: %p\n", dev->atm_vccs);
- kfree (dev->atm_vccs);
-
- for (i=0;i< FS_NR_FREE_POOLS;i++)
- free_freepool (dev, &dev->rx_fp[i]);
-
- for (i=0;i < FS_NR_RX_QUEUES;i++)
- free_queue (dev, &dev->rx_rq[i]);
-
- iounmap(dev->base);
- fs_dprintk (FS_DEBUG_ALLOC, "Free fs-dev: %p\n", dev);
- nxtdev = dev->next;
- kfree (dev);
- }
-
- func_exit ();
-}
-
-static const struct pci_device_id firestream_pci_tbl[] = {
- { PCI_VDEVICE(FUJITSU_ME, PCI_DEVICE_ID_FUJITSU_FS50), FS_IS50},
- { PCI_VDEVICE(FUJITSU_ME, PCI_DEVICE_ID_FUJITSU_FS155), FS_IS155},
- { 0, }
-};
-
-MODULE_DEVICE_TABLE(pci, firestream_pci_tbl);
-
-static struct pci_driver firestream_driver = {
- .name = "firestream",
- .id_table = firestream_pci_tbl,
- .probe = firestream_init_one,
- .remove = firestream_remove_one,
-};
-
-static int __init firestream_init_module (void)
-{
- int error;
-
- func_enter ();
- error = pci_register_driver(&firestream_driver);
- func_exit ();
- return error;
-}
-
-static void __exit firestream_cleanup_module(void)
-{
- pci_unregister_driver(&firestream_driver);
-}
-
-module_init(firestream_init_module);
-module_exit(firestream_cleanup_module);
-
-MODULE_LICENSE("GPL");
-
-
-
diff --git a/drivers/atm/firestream.h b/drivers/atm/firestream.h
deleted file mode 100644
index 6d684160808d..000000000000
--- a/drivers/atm/firestream.h
+++ /dev/null
@@ -1,502 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/* drivers/atm/firestream.h - FireStream 155 (MB86697) and
- * FireStream 50 (MB86695) device driver
- */
-
-/* Written & (C) 2000 by R.E.Wolff@BitWizard.nl
- * Copied snippets from zatm.c by Werner Almesberger, EPFL LRC/ICA
- * and ambassador.c Copyright (C) 1995-1999 Madge Networks Ltd
- */
-
-/*
-*/
-
-
-/***********************************************************************
- * first the defines for the chip. *
- ***********************************************************************/
-
-
-/********************* General chip parameters. ************************/
-
-#define FS_NR_FREE_POOLS 8
-#define FS_NR_RX_QUEUES 4
-
-
-/********************* queues and queue access macros ******************/
-
-
-/* A queue entry. */
-struct FS_QENTRY {
- u32 cmd;
- u32 p0, p1, p2;
-};
-
-
-/* A freepool entry. */
-struct FS_BPENTRY {
- u32 flags;
- u32 next;
- u32 bsa;
- u32 aal_bufsize;
-
- /* The hardware doesn't look at this, but we need the SKB somewhere... */
- struct sk_buff *skb;
- struct freepool *fp;
- struct fs_dev *dev;
-};
-
-
-#define STATUS_CODE(qe) ((qe->cmd >> 22) & 0x3f)
-
-
-/* OFFSETS against the base of a QUEUE... */
-#define QSA 0x00
-#define QEA 0x04
-#define QRP 0x08
-#define QWP 0x0c
-#define QCNF 0x10 /* Only for Release queues! */
-/* Not for the transmit pending queue. */
-
-
-/* OFFSETS against the base of a FREE POOL... */
-#define FPCNF 0x00
-#define FPSA 0x04
-#define FPEA 0x08
-#define FPCNT 0x0c
-#define FPCTU 0x10
-
-#define Q_SA(b) (b + QSA )
-#define Q_EA(b) (b + QEA )
-#define Q_RP(b) (b + QRP )
-#define Q_WP(b) (b + QWP )
-#define Q_CNF(b) (b + QCNF)
-
-#define FP_CNF(b) (b + FPCNF)
-#define FP_SA(b) (b + FPSA)
-#define FP_EA(b) (b + FPEA)
-#define FP_CNT(b) (b + FPCNT)
-#define FP_CTU(b) (b + FPCTU)
-
-/* bits in a queue register. */
-#define Q_FULL 0x1
-#define Q_EMPTY 0x2
-#define Q_INCWRAP 0x4
-#define Q_ADDR_MASK 0xfffffff0
-
-/* bits in a FreePool config register */
-#define RBFP_RBS (0x1 << 16)
-#define RBFP_RBSVAL (0x1 << 15)
-#define RBFP_CME (0x1 << 12)
-#define RBFP_DLP (0x1 << 11)
-#define RBFP_BFPWT (0x1 << 0)
-
-
-
-
-/* FireStream commands. */
-#define QE_CMD_NULL (0x00 << 22)
-#define QE_CMD_REG_RD (0x01 << 22)
-#define QE_CMD_REG_RDM (0x02 << 22)
-#define QE_CMD_REG_WR (0x03 << 22)
-#define QE_CMD_REG_WRM (0x04 << 22)
-#define QE_CMD_CONFIG_TX (0x05 << 22)
-#define QE_CMD_CONFIG_RX (0x06 << 22)
-#define QE_CMD_PRP_RD (0x07 << 22)
-#define QE_CMD_PRP_RDM (0x2a << 22)
-#define QE_CMD_PRP_WR (0x09 << 22)
-#define QE_CMD_PRP_WRM (0x2b << 22)
-#define QE_CMD_RX_EN (0x0a << 22)
-#define QE_CMD_RX_PURGE (0x0b << 22)
-#define QE_CMD_RX_PURGE_INH (0x0c << 22)
-#define QE_CMD_TX_EN (0x0d << 22)
-#define QE_CMD_TX_PURGE (0x0e << 22)
-#define QE_CMD_TX_PURGE_INH (0x0f << 22)
-#define QE_CMD_RST_CG (0x10 << 22)
-#define QE_CMD_SET_CG (0x11 << 22)
-#define QE_CMD_RST_CLP (0x12 << 22)
-#define QE_CMD_SET_CLP (0x13 << 22)
-#define QE_CMD_OVERRIDE (0x14 << 22)
-#define QE_CMD_ADD_BFP (0x15 << 22)
-#define QE_CMD_DUMP_TX (0x16 << 22)
-#define QE_CMD_DUMP_RX (0x17 << 22)
-#define QE_CMD_LRAM_RD (0x18 << 22)
-#define QE_CMD_LRAM_RDM (0x28 << 22)
-#define QE_CMD_LRAM_WR (0x19 << 22)
-#define QE_CMD_LRAM_WRM (0x29 << 22)
-#define QE_CMD_LRAM_BSET (0x1a << 22)
-#define QE_CMD_LRAM_BCLR (0x1b << 22)
-#define QE_CMD_CONFIG_SEGM (0x1c << 22)
-#define QE_CMD_READ_SEGM (0x1d << 22)
-#define QE_CMD_CONFIG_ROUT (0x1e << 22)
-#define QE_CMD_READ_ROUT (0x1f << 22)
-#define QE_CMD_CONFIG_TM (0x20 << 22)
-#define QE_CMD_READ_TM (0x21 << 22)
-#define QE_CMD_CONFIG_TXBM (0x22 << 22)
-#define QE_CMD_READ_TXBM (0x23 << 22)
-#define QE_CMD_CONFIG_RXBM (0x24 << 22)
-#define QE_CMD_READ_RXBM (0x25 << 22)
-#define QE_CMD_CONFIG_REAS (0x26 << 22)
-#define QE_CMD_READ_REAS (0x27 << 22)
-
-#define QE_TRANSMIT_DE (0x0 << 30)
-#define QE_CMD_LINKED (0x1 << 30)
-#define QE_CMD_IMM (0x2 << 30)
-#define QE_CMD_IMM_INQ (0x3 << 30)
-
-#define TD_EPI (0x1 << 27)
-#define TD_COMMAND (0x1 << 28)
-
-#define TD_DATA (0x0 << 29)
-#define TD_RM_CELL (0x1 << 29)
-#define TD_OAM_CELL (0x2 << 29)
-#define TD_OAM_CELL_SEGMENT (0x3 << 29)
-
-#define TD_BPI (0x1 << 20)
-
-#define FP_FLAGS_EPI (0x1 << 27)
-
-
-#define TX_PQ(i) (0x00 + (i) * 0x10)
-#define TXB_RQ (0x20)
-#define ST_Q (0x48)
-#define RXB_FP(i) (0x90 + (i) * 0x14)
-#define RXB_RQ(i) (0x134 + (i) * 0x14)
-
-
-#define TXQ_HP 0
-#define TXQ_LP 1
-
-/* Phew. You don't want to know how many revisions these simple queue
- * address macros went through before I got them nice and compact as
- * they are now. -- REW
- */
-
-
-/* And now for something completely different:
- * The rest of the registers... */
-
-
-#define CMDR0 0x34
-#define CMDR1 0x38
-#define CMDR2 0x3c
-#define CMDR3 0x40
-
-
-#define SARMODE0 0x5c
-
-#define SARMODE0_TXVCS_0 (0x0 << 0)
-#define SARMODE0_TXVCS_1k (0x1 << 0)
-#define SARMODE0_TXVCS_2k (0x2 << 0)
-#define SARMODE0_TXVCS_4k (0x3 << 0)
-#define SARMODE0_TXVCS_8k (0x4 << 0)
-#define SARMODE0_TXVCS_16k (0x5 << 0)
-#define SARMODE0_TXVCS_32k (0x6 << 0)
-#define SARMODE0_TXVCS_64k (0x7 << 0)
-#define SARMODE0_TXVCS_32 (0x8 << 0)
-
-#define SARMODE0_ABRVCS_0 (0x0 << 4)
-#define SARMODE0_ABRVCS_512 (0x1 << 4)
-#define SARMODE0_ABRVCS_1k (0x2 << 4)
-#define SARMODE0_ABRVCS_2k (0x3 << 4)
-#define SARMODE0_ABRVCS_4k (0x4 << 4)
-#define SARMODE0_ABRVCS_8k (0x5 << 4)
-#define SARMODE0_ABRVCS_16k (0x6 << 4)
-#define SARMODE0_ABRVCS_32k (0x7 << 4)
-#define SARMODE0_ABRVCS_32 (0x9 << 4) /* The others are "8", this one really has to
- be 9. Tell me you don't believe me. -- REW */
-
-#define SARMODE0_RXVCS_0 (0x0 << 8)
-#define SARMODE0_RXVCS_1k (0x1 << 8)
-#define SARMODE0_RXVCS_2k (0x2 << 8)
-#define SARMODE0_RXVCS_4k (0x3 << 8)
-#define SARMODE0_RXVCS_8k (0x4 << 8)
-#define SARMODE0_RXVCS_16k (0x5 << 8)
-#define SARMODE0_RXVCS_32k (0x6 << 8)
-#define SARMODE0_RXVCS_64k (0x7 << 8)
-#define SARMODE0_RXVCS_32 (0x8 << 8)
-
-#define SARMODE0_CALSUP_1 (0x0 << 12)
-#define SARMODE0_CALSUP_2 (0x1 << 12)
-#define SARMODE0_CALSUP_3 (0x2 << 12)
-#define SARMODE0_CALSUP_4 (0x3 << 12)
-
-#define SARMODE0_PRPWT_FS50_0 (0x0 << 14)
-#define SARMODE0_PRPWT_FS50_2 (0x1 << 14)
-#define SARMODE0_PRPWT_FS50_5 (0x2 << 14)
-#define SARMODE0_PRPWT_FS50_11 (0x3 << 14)
-
-#define SARMODE0_PRPWT_FS155_0 (0x0 << 14)
-#define SARMODE0_PRPWT_FS155_1 (0x1 << 14)
-#define SARMODE0_PRPWT_FS155_2 (0x2 << 14)
-#define SARMODE0_PRPWT_FS155_3 (0x3 << 14)
-
-#define SARMODE0_SRTS0 (0x1 << 23)
-#define SARMODE0_SRTS1 (0x1 << 24)
-
-#define SARMODE0_RUN (0x1 << 25)
-
-#define SARMODE0_UNLOCK (0x1 << 26)
-#define SARMODE0_CWRE (0x1 << 27)
-
-
-#define SARMODE0_INTMODE_READCLEAR (0x0 << 28)
-#define SARMODE0_INTMODE_READNOCLEAR (0x1 << 28)
-#define SARMODE0_INTMODE_READNOCLEARINHIBIT (0x2 << 28)
-#define SARMODE0_INTMODE_READCLEARINHIBIT (0x3 << 28) /* Tell me you don't believe me. */
-
-#define SARMODE0_GINT (0x1 << 30)
-#define SARMODE0_SHADEN (0x1 << 31)
-
-
-#define SARMODE1 0x60
-
-
-#define SARMODE1_TRTL_SHIFT 0 /* Program to 0 */
-#define SARMODE1_RRTL_SHIFT 4 /* Program to 0 */
-
-#define SARMODE1_TAGM (0x1 << 8) /* Program to 0 */
-
-#define SARMODE1_HECM0 (0x1 << 9)
-#define SARMODE1_HECM1 (0x1 << 10)
-#define SARMODE1_HECM2 (0x1 << 11)
-
-#define SARMODE1_GFCE (0x1 << 14)
-#define SARMODE1_GFCR (0x1 << 15)
-#define SARMODE1_PMS (0x1 << 18)
-#define SARMODE1_GPRI (0x1 << 19)
-#define SARMODE1_GPAS (0x1 << 20)
-#define SARMODE1_GVAS (0x1 << 21)
-#define SARMODE1_GNAM (0x1 << 22)
-#define SARMODE1_GPLEN (0x1 << 23)
-#define SARMODE1_DUMPE (0x1 << 24)
-#define SARMODE1_OAMCRC (0x1 << 25)
-#define SARMODE1_DCOAM (0x1 << 26)
-#define SARMODE1_DCRM (0x1 << 27)
-#define SARMODE1_TSTLP (0x1 << 28)
-#define SARMODE1_DEFHEC (0x1 << 29)
-
-
-#define ISR 0x64
-#define IUSR 0x68
-#define IMR 0x6c
-
-#define ISR_LPCO (0x1 << 0)
-#define ISR_DPCO (0x1 << 1)
-#define ISR_RBRQ0_W (0x1 << 2)
-#define ISR_RBRQ1_W (0x1 << 3)
-#define ISR_RBRQ2_W (0x1 << 4)
-#define ISR_RBRQ3_W (0x1 << 5)
-#define ISR_RBRQ0_NF (0x1 << 6)
-#define ISR_RBRQ1_NF (0x1 << 7)
-#define ISR_RBRQ2_NF (0x1 << 8)
-#define ISR_RBRQ3_NF (0x1 << 9)
-#define ISR_BFP_SC (0x1 << 10)
-#define ISR_INIT (0x1 << 11)
-#define ISR_INIT_ERR (0x1 << 12) /* Documented as "reserved" */
-#define ISR_USCEO (0x1 << 13)
-#define ISR_UPEC0 (0x1 << 14)
-#define ISR_VPFCO (0x1 << 15)
-#define ISR_CRCCO (0x1 << 16)
-#define ISR_HECO (0x1 << 17)
-#define ISR_TBRQ_W (0x1 << 18)
-#define ISR_TBRQ_NF (0x1 << 19)
-#define ISR_CTPQ_E (0x1 << 20)
-#define ISR_GFC_C0 (0x1 << 21)
-#define ISR_PCI_FTL (0x1 << 22)
-#define ISR_CSQ_W (0x1 << 23)
-#define ISR_CSQ_NF (0x1 << 24)
-#define ISR_EXT_INT (0x1 << 25)
-#define ISR_RXDMA_S (0x1 << 26)
-
-
-#define TMCONF 0x78
-/* Bits? */
-
-
-#define CALPRESCALE 0x7c
-/* Bits? */
-
-#define CELLOSCONF 0x84
-#define CELLOSCONF_COTS (0x1 << 28)
-#define CELLOSCONF_CEN (0x1 << 27)
-#define CELLOSCONF_SC8 (0x3 << 24)
-#define CELLOSCONF_SC4 (0x2 << 24)
-#define CELLOSCONF_SC2 (0x1 << 24)
-#define CELLOSCONF_SC1 (0x0 << 24)
-
-#define CELLOSCONF_COBS (0x1 << 16)
-#define CELLOSCONF_COPK (0x1 << 8)
-#define CELLOSCONF_COST (0x1 << 0)
-/* Bits? */
-
-#define RAS0 0x1bc
-#define RAS0_DCD_XHLT (0x1 << 31)
-
-#define RAS0_VPSEL (0x1 << 16)
-#define RAS0_VCSEL (0x1 << 0)
-
-#define RAS1 0x1c0
-#define RAS1_UTREG (0x1 << 5)
-
-
-#define DMAMR 0x1cc
-#define DMAMR_TX_MODE_FULL (0x0 << 0)
-#define DMAMR_TX_MODE_PART (0x1 << 0)
-#define DMAMR_TX_MODE_NONE (0x2 << 0) /* And 3 */
-
-
-
-#define RAS2 0x280
-
-#define RAS2_NNI (0x1 << 0)
-#define RAS2_USEL (0x1 << 1)
-#define RAS2_UBS (0x1 << 2)
-
-
-
-struct fs_transmit_config {
- u32 flags;
- u32 atm_hdr;
- u32 TMC[4];
- u32 spec;
- u32 rtag[3];
-};
-
-#define TC_FLAGS_AAL5 (0x0 << 29)
-#define TC_FLAGS_TRANSPARENT_PAYLOAD (0x1 << 29)
-#define TC_FLAGS_TRANSPARENT_CELL (0x2 << 29)
-#define TC_FLAGS_STREAMING (0x1 << 28)
-#define TC_FLAGS_PACKET (0x0)
-#define TC_FLAGS_TYPE_ABR (0x0 << 22)
-#define TC_FLAGS_TYPE_CBR (0x1 << 22)
-#define TC_FLAGS_TYPE_VBR (0x2 << 22)
-#define TC_FLAGS_TYPE_UBR (0x3 << 22)
-#define TC_FLAGS_CAL0 (0x0 << 20)
-#define TC_FLAGS_CAL1 (0x1 << 20)
-#define TC_FLAGS_CAL2 (0x2 << 20)
-#define TC_FLAGS_CAL3 (0x3 << 20)
-
-
-#define RC_FLAGS_NAM (0x1 << 13)
-#define RC_FLAGS_RXBM_PSB (0x0 << 14)
-#define RC_FLAGS_RXBM_CIF (0x1 << 14)
-#define RC_FLAGS_RXBM_PMB (0x2 << 14)
-#define RC_FLAGS_RXBM_STR (0x4 << 14)
-#define RC_FLAGS_RXBM_SAF (0x6 << 14)
-#define RC_FLAGS_RXBM_POS (0x6 << 14)
-#define RC_FLAGS_BFPS (0x1 << 17)
-
-#define RC_FLAGS_BFPS_BFP (0x1 << 17)
-
-#define RC_FLAGS_BFPS_BFP0 (0x0 << 17)
-#define RC_FLAGS_BFPS_BFP1 (0x1 << 17)
-#define RC_FLAGS_BFPS_BFP2 (0x2 << 17)
-#define RC_FLAGS_BFPS_BFP3 (0x3 << 17)
-#define RC_FLAGS_BFPS_BFP4 (0x4 << 17)
-#define RC_FLAGS_BFPS_BFP5 (0x5 << 17)
-#define RC_FLAGS_BFPS_BFP6 (0x6 << 17)
-#define RC_FLAGS_BFPS_BFP7 (0x7 << 17)
-#define RC_FLAGS_BFPS_BFP01 (0x8 << 17)
-#define RC_FLAGS_BFPS_BFP23 (0x9 << 17)
-#define RC_FLAGS_BFPS_BFP45 (0xa << 17)
-#define RC_FLAGS_BFPS_BFP67 (0xb << 17)
-#define RC_FLAGS_BFPS_BFP07 (0xc << 17)
-#define RC_FLAGS_BFPS_BFP27 (0xd << 17)
-#define RC_FLAGS_BFPS_BFP47 (0xe << 17)
-
-#define RC_FLAGS_BFPP (0x1 << 21)
-#define RC_FLAGS_TEVC (0x1 << 22)
-#define RC_FLAGS_TEP (0x1 << 23)
-#define RC_FLAGS_AAL5 (0x0 << 24)
-#define RC_FLAGS_TRANSP (0x1 << 24)
-#define RC_FLAGS_TRANSC (0x2 << 24)
-#define RC_FLAGS_ML (0x1 << 27)
-#define RC_FLAGS_TRBRM (0x1 << 28)
-#define RC_FLAGS_PRI (0x1 << 29)
-#define RC_FLAGS_HOAM (0x1 << 30)
-#define RC_FLAGS_CRC10 (0x1 << 31)
-
-
-#define RAC 0x1c8
-#define RAM 0x1c4
-
-
-
-/************************************************************************
- * Then the datastructures that the DRIVER uses. *
- ************************************************************************/
-
-#define TXQ_NENTRIES 32
-#define RXRQ_NENTRIES 1024
-
-
-struct fs_vcc {
- int channo;
- wait_queue_head_t close_wait;
- struct sk_buff *last_skb;
-};
-
-
-struct queue {
- struct FS_QENTRY *sa, *ea;
- int offset;
-};
-
-struct freepool {
- int offset;
- int bufsize;
- int nr_buffers;
- int n;
-};
-
-
-struct fs_dev {
- struct fs_dev *next; /* other FS devices */
- int flags;
-
- unsigned char irq; /* IRQ */
- struct pci_dev *pci_dev; /* PCI stuff */
- struct atm_dev *atm_dev;
- struct timer_list timer;
-
- unsigned long hw_base; /* mem base address */
- void __iomem *base; /* Mapping of base address */
- int channo;
- unsigned long channel_mask;
-
- struct queue hp_txq, lp_txq, tx_relq, st_q;
- struct freepool rx_fp[FS_NR_FREE_POOLS];
- struct queue rx_rq[FS_NR_RX_QUEUES];
-
- int nchannels;
- struct atm_vcc **atm_vccs;
- void *tx_inuse;
- int ntxpckts;
-};
-
-
-
-
-/* Number of channesl that the FS50 supports. */
-#define FS50_CHANNEL_BITS 5
-#define FS50_NR_CHANNELS (1 << FS50_CHANNEL_BITS)
-
-
-#define FS_DEV(atm_dev) ((struct fs_dev *) (atm_dev)->dev_data)
-#define FS_VCC(atm_vcc) ((struct fs_vcc *) (atm_vcc)->dev_data)
-
-
-#define FS_IS50 0x1
-#define FS_IS155 0x2
-
-#define IS_FS50(dev) (dev->flags & FS_IS50)
-#define IS_FS155(dev) (dev->flags & FS_IS155)
-
-/* Within limits this is user-configurable. */
-/* Note: Currently the sum (10 -> 1k channels) is hardcoded in the driver. */
-#define FS155_VPI_BITS 4
-#define FS155_VCI_BITS 6
-
-#define FS155_CHANNEL_BITS (FS155_VPI_BITS + FS155_VCI_BITS)
-#define FS155_NR_CHANNELS (1 << FS155_CHANNEL_BITS)
diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
deleted file mode 100644
index d0e67ec46216..000000000000
--- a/drivers/atm/horizon.c
+++ /dev/null
@@ -1,2853 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- Madge Horizon ATM Adapter driver.
- Copyright (C) 1995-1999 Madge Networks Ltd.
-
-*/
-
-/*
- IMPORTANT NOTE: Madge Networks no longer makes the adapters
- supported by this driver and makes no commitment to maintain it.
-*/
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched/signal.h>
-#include <linux/mm.h>
-#include <linux/pci.h>
-#include <linux/errno.h>
-#include <linux/atm.h>
-#include <linux/atmdev.h>
-#include <linux/sonet.h>
-#include <linux/skbuff.h>
-#include <linux/time.h>
-#include <linux/delay.h>
-#include <linux/uio.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/wait.h>
-#include <linux/slab.h>
-
-#include <asm/io.h>
-#include <linux/atomic.h>
-#include <linux/uaccess.h>
-#include <asm/string.h>
-#include <asm/byteorder.h>
-
-#include "horizon.h"
-
-#define maintainer_string "Giuliano Procida at Madge Networks <gprocida@madge.com>"
-#define description_string "Madge ATM Horizon [Ultra] driver"
-#define version_string "1.2.1"
-
-static inline void __init show_version (void) {
- printk ("%s version %s\n", description_string, version_string);
-}
-
-/*
-
- CREDITS
-
- Driver and documentation by:
-
- Chris Aston Madge Networks
- Giuliano Procida Madge Networks
- Simon Benham Madge Networks
- Simon Johnson Madge Networks
- Various Others Madge Networks
-
- Some inspiration taken from other drivers by:
-
- Alexandru Cucos UTBv
- Kari Mettinen University of Helsinki
- Werner Almesberger EPFL LRC
-
- Theory of Operation
-
- I Hardware, detection, initialisation and shutdown.
-
- 1. Supported Hardware
-
- This driver should handle all variants of the PCI Madge ATM adapters
- with the Horizon chipset. These are all PCI cards supporting PIO, BM
- DMA and a form of MMIO (registers only, not internal RAM).
-
- The driver is only known to work with SONET and UTP Horizon Ultra
- cards at 155Mb/s. However, code is in place to deal with both the
- original Horizon and 25Mb/s operation.
-
- There are two revisions of the Horizon ASIC: the original and the
- Ultra. Details of hardware bugs are in section III.
-
- The ASIC version can be distinguished by chip markings but is NOT
- indicated by the PCI revision (all adapters seem to have PCI rev 1).
-
- I believe that:
-
- Horizon => Collage 25 PCI Adapter (UTP and STP)
- Horizon Ultra => Collage 155 PCI Client (UTP or SONET)
- Ambassador x => Collage 155 PCI Server (completely different)
-
- Horizon (25Mb/s) is fitted with UTP and STP connectors. It seems to
- have a Madge B154 plus glue logic serializer. I have also found a
- really ancient version of this with slightly different glue. It
- comes with the revision 0 (140-025-01) ASIC.
-
- Horizon Ultra (155Mb/s) is fitted with either a Pulse Medialink
- output (UTP) or an HP HFBR 5205 output (SONET). It has either
- Madge's SAMBA framer or a SUNI-lite device (early versions). It
- comes with the revision 1 (140-027-01) ASIC.
-
- 2. Detection
-
- All Horizon-based cards present with the same PCI Vendor and Device
- IDs. The standard Linux 2.2 PCI API is used to locate any cards and
- to enable bus-mastering (with appropriate latency).
-
- ATM_LAYER_STATUS in the control register distinguishes between the
- two possible physical layers (25 and 155). It is not clear whether
- the 155 cards can also operate at 25Mbps. We rely on the fact that a
- card operates at 155 if and only if it has the newer Horizon Ultra
- ASIC.
-
- For 155 cards the two possible framers are probed for and then set
- up for loop-timing.
-
- 3. Initialisation
-
- The card is reset and then put into a known state. The physical
- layer is configured for normal operation at the appropriate speed;
- in the case of the 155 cards, the framer is initialised with
- line-based timing; the internal RAM is zeroed and the allocation of
- buffers for RX and TX is made; the Burnt In Address is read and
- copied to the ATM ESI; various policy settings for RX (VPI bits,
- unknown VCs, oam cells) are made. Ideally all policy items should be
- configurable at module load (if not actually on-demand), however,
- only the vpi vs vci bit allocation can be specified at insmod.
-
- 4. Shutdown
-
- This is in response to module_cleaup. No VCs are in use and the card
- should be idle; it is reset.
-
- II Driver software (as it should be)
-
- 0. Traffic Parameters
-
- The traffic classes (not an enumeration) are currently: ATM_NONE (no
- traffic), ATM_UBR, ATM_CBR, ATM_VBR and ATM_ABR, ATM_ANYCLASS
- (compatible with everything). Together with (perhaps only some of)
- the following items they make up the traffic specification.
-
- struct atm_trafprm {
- unsigned char traffic_class; traffic class (ATM_UBR, ...)
- int max_pcr; maximum PCR in cells per second
- int pcr; desired PCR in cells per second
- int min_pcr; minimum PCR in cells per second
- int max_cdv; maximum CDV in microseconds
- int max_sdu; maximum SDU in bytes
- };
-
- Note that these denote bandwidth available not bandwidth used; the
- possibilities according to ATMF are:
-
- Real Time (cdv and max CDT given)
-
- CBR(pcr) pcr bandwidth always available
- rtVBR(pcr,scr,mbs) scr bandwidth always available, up to pcr at mbs too
-
- Non Real Time
-
- nrtVBR(pcr,scr,mbs) scr bandwidth always available, up to pcr at mbs too
- UBR()
- ABR(mcr,pcr) mcr bandwidth always available, up to pcr (depending) too
-
- mbs is max burst size (bucket)
- pcr and scr have associated cdvt values
- mcr is like scr but has no cdtv
- cdtv may differ at each hop
-
- Some of the above items are qos items (as opposed to traffic
- parameters). We have nothing to do with qos. All except ABR can have
- their traffic parameters converted to GCRA parameters. The GCRA may
- be implemented as a (real-number) leaky bucket. The GCRA can be used
- in complicated ways by switches and in simpler ways by end-stations.
- It can be used both to filter incoming cells and shape out-going
- cells.
-
- ATM Linux actually supports:
-
- ATM_NONE() (no traffic in this direction)
- ATM_UBR(max_frame_size)
- ATM_CBR(max/min_pcr, max_cdv, max_frame_size)
-
- 0 or ATM_MAX_PCR are used to indicate maximum available PCR
-
- A traffic specification consists of the AAL type and separate
- traffic specifications for either direction. In ATM Linux it is:
-
- struct atm_qos {
- struct atm_trafprm txtp;
- struct atm_trafprm rxtp;
- unsigned char aal;
- };
-
- AAL types are:
-
- ATM_NO_AAL AAL not specified
- ATM_AAL0 "raw" ATM cells
- ATM_AAL1 AAL1 (CBR)
- ATM_AAL2 AAL2 (VBR)
- ATM_AAL34 AAL3/4 (data)
- ATM_AAL5 AAL5 (data)
- ATM_SAAL signaling AAL
-
- The Horizon has support for AAL frame types: 0, 3/4 and 5. However,
- it does not implement AAL 3/4 SAR and it has a different notion of
- "raw cell" to ATM Linux's (48 bytes vs. 52 bytes) so neither are
- supported by this driver.
-
- The Horizon has limited support for ABR (including UBR), VBR and
- CBR. Each TX channel has a bucket (containing up to 31 cell units)
- and two timers (PCR and SCR) associated with it that can be used to
- govern cell emissions and host notification (in the case of ABR this
- is presumably so that RM cells may be emitted at appropriate times).
- The timers may either be disabled or may be set to any of 240 values
- (determined by the clock crystal, a fixed (?) per-device divider, a
- configurable divider and a configurable timer preload value).
-
- At the moment only UBR and CBR are supported by the driver. VBR will
- be supported as soon as ATM for Linux supports it. ABR support is
- very unlikely as RM cell handling is completely up to the driver.
-
- 1. TX (TX channel setup and TX transfer)
-
- The TX half of the driver owns the TX Horizon registers. The TX
- component in the IRQ handler is the BM completion handler. This can
- only be entered when tx_busy is true (enforced by hardware). The
- other TX component can only be entered when tx_busy is false
- (enforced by driver). So TX is single-threaded.
-
- Apart from a minor optimisation to not re-select the last channel,
- the TX send component works as follows:
-
- Atomic test and set tx_busy until we succeed; we should implement
- some sort of timeout so that tx_busy will never be stuck at true.
-
- If no TX channel is set up for this VC we wait for an idle one (if
- necessary) and set it up.
-
- At this point we have a TX channel ready for use. We wait for enough
- buffers to become available then start a TX transmit (set the TX
- descriptor, schedule transfer, exit).
-
- The IRQ component handles TX completion (stats, free buffer, tx_busy
- unset, exit). We also re-schedule further transfers for the same
- frame if needed.
-
- TX setup in more detail:
-
- TX open is a nop, the relevant information is held in the hrz_vcc
- (vcc->dev_data) structure and is "cached" on the card.
-
- TX close gets the TX lock and clears the channel from the "cache".
-
- 2. RX (Data Available and RX transfer)
-
- The RX half of the driver owns the RX registers. There are two RX
- components in the IRQ handler: the data available handler deals with
- fresh data that has arrived on the card, the BM completion handler
- is very similar to the TX completion handler. The data available
- handler grabs the rx_lock and it is only released once the data has
- been discarded or completely transferred to the host. The BM
- completion handler only runs when the lock is held; the data
- available handler is locked out over the same period.
-
- Data available on the card triggers an interrupt. If the data is not
- suitable for our existing RX channels or we cannot allocate a buffer
- it is flushed. Otherwise an RX receive is scheduled. Multiple RX
- transfers may be scheduled for the same frame.
-
- RX setup in more detail:
-
- RX open...
- RX close...
-
- III Hardware Bugs
-
- 0. Byte vs Word addressing of adapter RAM.
-
- A design feature; see the .h file (especially the memory map).
-
- 1. Bus Master Data Transfers (original Horizon only, fixed in Ultra)
-
- The host must not start a transmit direction transfer at a
- non-four-byte boundary in host memory. Instead the host should
- perform a byte, or a two byte, or one byte followed by two byte
- transfer in order to start the rest of the transfer on a four byte
- boundary. RX is OK.
-
- Simultaneous transmit and receive direction bus master transfers are
- not allowed.
-
- The simplest solution to these two is to always do PIO (never DMA)
- in the TX direction on the original Horizon. More complicated
- solutions are likely to hurt my brain.
-
- 2. Loss of buffer on close VC
-
- When a VC is being closed, the buffer associated with it is not
- returned to the pool. The host must store the reference to this
- buffer and when opening a new VC then give it to that new VC.
-
- The host intervention currently consists of stacking such a buffer
- pointer at VC close and checking the stack at VC open.
-
- 3. Failure to close a VC
-
- If a VC is currently receiving a frame then closing the VC may fail
- and the frame continues to be received.
-
- The solution is to make sure any received frames are flushed when
- ready. This is currently done just before the solution to 2.
-
- 4. PCI bus (original Horizon only, fixed in Ultra)
-
- Reading from the data port prior to initialisation will hang the PCI
- bus. Just don't do that then! We don't.
-
- IV To Do List
-
- . Timer code may be broken.
-
- . Allow users to specify buffer allocation split for TX and RX.
-
- . Deal once and for all with buggy VC close.
-
- . Handle interrupted and/or non-blocking operations.
-
- . Change some macros to functions and move from .h to .c.
-
- . Try to limit the number of TX frames each VC may have queued, in
- order to reduce the chances of TX buffer exhaustion.
-
- . Implement VBR (bucket and timers not understood) and ABR (need to
- do RM cells manually); also no Linux support for either.
-
- . Implement QoS changes on open VCs (involves extracting parts of VC open
- and close into separate functions and using them to make changes).
-
-*/
-
-/********** globals **********/
-
-static void do_housekeeping (struct timer_list *t);
-
-static unsigned short debug = 0;
-static unsigned short vpi_bits = 0;
-static int max_tx_size = 9000;
-static int max_rx_size = 9000;
-static unsigned char pci_lat = 0;
-
-/********** access functions **********/
-
-/* Read / Write Horizon registers */
-static inline void wr_regl (const hrz_dev * dev, unsigned char reg, u32 data) {
- outl (cpu_to_le32 (data), dev->iobase + reg);
-}
-
-static inline u32 rd_regl (const hrz_dev * dev, unsigned char reg) {
- return le32_to_cpu (inl (dev->iobase + reg));
-}
-
-static inline void wr_regw (const hrz_dev * dev, unsigned char reg, u16 data) {
- outw (cpu_to_le16 (data), dev->iobase + reg);
-}
-
-static inline u16 rd_regw (const hrz_dev * dev, unsigned char reg) {
- return le16_to_cpu (inw (dev->iobase + reg));
-}
-
-static inline void wrs_regb (const hrz_dev * dev, unsigned char reg, void * addr, u32 len) {
- outsb (dev->iobase + reg, addr, len);
-}
-
-static inline void rds_regb (const hrz_dev * dev, unsigned char reg, void * addr, u32 len) {
- insb (dev->iobase + reg, addr, len);
-}
-
-/* Read / Write to a given address in Horizon buffer memory.
- Interrupts must be disabled between the address register and data
- port accesses as these must form an atomic operation. */
-static inline void wr_mem (const hrz_dev * dev, HDW * addr, u32 data) {
- // wr_regl (dev, MEM_WR_ADDR_REG_OFF, (u32) addr);
- wr_regl (dev, MEM_WR_ADDR_REG_OFF, (addr - (HDW *) 0) * sizeof(HDW));
- wr_regl (dev, MEMORY_PORT_OFF, data);
-}
-
-static inline u32 rd_mem (const hrz_dev * dev, HDW * addr) {
- // wr_regl (dev, MEM_RD_ADDR_REG_OFF, (u32) addr);
- wr_regl (dev, MEM_RD_ADDR_REG_OFF, (addr - (HDW *) 0) * sizeof(HDW));
- return rd_regl (dev, MEMORY_PORT_OFF);
-}
-
-static inline void wr_framer (const hrz_dev * dev, u32 addr, u32 data) {
- wr_regl (dev, MEM_WR_ADDR_REG_OFF, (u32) addr | 0x80000000);
- wr_regl (dev, MEMORY_PORT_OFF, data);
-}
-
-static inline u32 rd_framer (const hrz_dev * dev, u32 addr) {
- wr_regl (dev, MEM_RD_ADDR_REG_OFF, (u32) addr | 0x80000000);
- return rd_regl (dev, MEMORY_PORT_OFF);
-}
-
-/********** specialised access functions **********/
-
-/* RX */
-
-static inline void FLUSH_RX_CHANNEL (hrz_dev * dev, u16 channel) {
- wr_regw (dev, RX_CHANNEL_PORT_OFF, FLUSH_CHANNEL | channel);
- return;
-}
-
-static void WAIT_FLUSH_RX_COMPLETE (hrz_dev * dev) {
- while (rd_regw (dev, RX_CHANNEL_PORT_OFF) & FLUSH_CHANNEL)
- ;
- return;
-}
-
-static inline void SELECT_RX_CHANNEL (hrz_dev * dev, u16 channel) {
- wr_regw (dev, RX_CHANNEL_PORT_OFF, channel);
- return;
-}
-
-static void WAIT_UPDATE_COMPLETE (hrz_dev * dev) {
- while (rd_regw (dev, RX_CHANNEL_PORT_OFF) & RX_CHANNEL_UPDATE_IN_PROGRESS)
- ;
- return;
-}
-
-/* TX */
-
-static inline void SELECT_TX_CHANNEL (hrz_dev * dev, u16 tx_channel) {
- wr_regl (dev, TX_CHANNEL_PORT_OFF, tx_channel);
- return;
-}
-
-/* Update or query one configuration parameter of a particular channel. */
-
-static inline void update_tx_channel_config (hrz_dev * dev, short chan, u8 mode, u16 value) {
- wr_regw (dev, TX_CHANNEL_CONFIG_COMMAND_OFF,
- chan * TX_CHANNEL_CONFIG_MULT | mode);
- wr_regw (dev, TX_CHANNEL_CONFIG_DATA_OFF, value);
- return;
-}
-
-/********** dump functions **********/
-
-static inline void dump_skb (char * prefix, unsigned int vc, struct sk_buff * skb) {
-#ifdef DEBUG_HORIZON
- unsigned int i;
- unsigned char * data = skb->data;
- PRINTDB (DBG_DATA, "%s(%u) ", prefix, vc);
- for (i=0; i<skb->len && i < 256;i++)
- PRINTDM (DBG_DATA, "%02x ", data[i]);
- PRINTDE (DBG_DATA,"");
-#else
- (void) prefix;
- (void) vc;
- (void) skb;
-#endif
- return;
-}
-
-static inline void dump_regs (hrz_dev * dev) {
-#ifdef DEBUG_HORIZON
- PRINTD (DBG_REGS, "CONTROL 0: %#x", rd_regl (dev, CONTROL_0_REG));
- PRINTD (DBG_REGS, "RX CONFIG: %#x", rd_regw (dev, RX_CONFIG_OFF));
- PRINTD (DBG_REGS, "TX CONFIG: %#x", rd_regw (dev, TX_CONFIG_OFF));
- PRINTD (DBG_REGS, "TX STATUS: %#x", rd_regw (dev, TX_STATUS_OFF));
- PRINTD (DBG_REGS, "IRQ ENBLE: %#x", rd_regl (dev, INT_ENABLE_REG_OFF));
- PRINTD (DBG_REGS, "IRQ SORCE: %#x", rd_regl (dev, INT_SOURCE_REG_OFF));
-#else
- (void) dev;
-#endif
- return;
-}
-
-static inline void dump_framer (hrz_dev * dev) {
-#ifdef DEBUG_HORIZON
- unsigned int i;
- PRINTDB (DBG_REGS, "framer registers:");
- for (i = 0; i < 0x10; ++i)
- PRINTDM (DBG_REGS, " %02x", rd_framer (dev, i));
- PRINTDE (DBG_REGS,"");
-#else
- (void) dev;
-#endif
- return;
-}
-
-/********** VPI/VCI <-> (RX) channel conversions **********/
-
-/* RX channels are 10 bit integers, these fns are quite paranoid */
-
-static inline int vpivci_to_channel (u16 * channel, const short vpi, const int vci) {
- unsigned short vci_bits = 10 - vpi_bits;
- if (0 <= vpi && vpi < 1<<vpi_bits && 0 <= vci && vci < 1<<vci_bits) {
- *channel = vpi<<vci_bits | vci;
- return *channel ? 0 : -EINVAL;
- }
- return -EINVAL;
-}
-
-/********** decode RX queue entries **********/
-
-static inline u16 rx_q_entry_to_length (u32 x) {
- return x & RX_Q_ENTRY_LENGTH_MASK;
-}
-
-static inline u16 rx_q_entry_to_rx_channel (u32 x) {
- return (x>>RX_Q_ENTRY_CHANNEL_SHIFT) & RX_CHANNEL_MASK;
-}
-
-/* Cell Transmit Rate Values
- *
- * the cell transmit rate (cells per sec) can be set to a variety of
- * different values by specifying two parameters: a timer preload from
- * 1 to 16 (stored as 0 to 15) and a clock divider (2 to the power of
- * an exponent from 0 to 14; the special value 15 disables the timer).
- *
- * cellrate = baserate / (preload * 2^divider)
- *
- * The maximum cell rate that can be specified is therefore just the
- * base rate. Halving the preload is equivalent to adding 1 to the
- * divider and so values 1 to 8 of the preload are redundant except
- * in the case of a maximal divider (14).
- *
- * Given a desired cell rate, an algorithm to determine the preload
- * and divider is:
- *
- * a) x = baserate / cellrate, want p * 2^d = x (as far as possible)
- * b) if x > 16 * 2^14 then set p = 16, d = 14 (min rate), done
- * if x <= 16 then set p = x, d = 0 (high rates), done
- * c) now have 16 < x <= 2^18, or 1 < x/16 <= 2^14 and we want to
- * know n such that 2^(n-1) < x/16 <= 2^n, so slide a bit until
- * we find the range (n will be between 1 and 14), set d = n
- * d) Also have 8 < x/2^n <= 16, so set p nearest x/2^n
- *
- * The algorithm used below is a minor variant of the above.
- *
- * The base rate is derived from the oscillator frequency (Hz) using a
- * fixed divider:
- *
- * baserate = freq / 32 in the case of some Unknown Card
- * baserate = freq / 8 in the case of the Horizon 25
- * baserate = freq / 8 in the case of the Horizon Ultra 155
- *
- * The Horizon cards have oscillators and base rates as follows:
- *
- * Card Oscillator Base Rate
- * Unknown Card 33 MHz 1.03125 MHz (33 MHz = PCI freq)
- * Horizon 25 32 MHz 4 MHz
- * Horizon Ultra 155 40 MHz 5 MHz
- *
- * The following defines give the base rates in Hz. These were
- * previously a factor of 100 larger, no doubt someone was using
- * cps*100.
- */
-
-#define BR_UKN 1031250l
-#define BR_HRZ 4000000l
-#define BR_ULT 5000000l
-
-// d is an exponent
-#define CR_MIND 0
-#define CR_MAXD 14
-
-// p ranges from 1 to a power of 2
-#define CR_MAXPEXP 4
-
-static int make_rate (const hrz_dev * dev, u32 c, rounding r,
- u16 * bits, unsigned int * actual)
-{
- // note: rounding the rate down means rounding 'p' up
- const unsigned long br = test_bit(ultra, &dev->flags) ? BR_ULT : BR_HRZ;
-
- u32 div = CR_MIND;
- u32 pre;
-
- // br_exp and br_man are used to avoid overflowing (c*maxp*2^d) in
- // the tests below. We could think harder about exact possibilities
- // of failure...
-
- unsigned long br_man = br;
- unsigned int br_exp = 0;
-
- PRINTD (DBG_QOS|DBG_FLOW, "make_rate b=%lu, c=%u, %s", br, c,
- r == round_up ? "up" : r == round_down ? "down" : "nearest");
-
- // avoid div by zero
- if (!c) {
- PRINTD (DBG_QOS|DBG_ERR, "zero rate is not allowed!");
- return -EINVAL;
- }
-
- while (br_exp < CR_MAXPEXP + CR_MIND && (br_man % 2 == 0)) {
- br_man = br_man >> 1;
- ++br_exp;
- }
- // (br >>br_exp) <<br_exp == br and
- // br_exp <= CR_MAXPEXP+CR_MIND
-
- if (br_man <= (c << (CR_MAXPEXP+CR_MIND-br_exp))) {
- // Equivalent to: B <= (c << (MAXPEXP+MIND))
- // take care of rounding
- switch (r) {
- case round_down:
- pre = DIV_ROUND_UP(br, c<<div);
- // but p must be non-zero
- if (!pre)
- pre = 1;
- break;
- case round_nearest:
- pre = DIV_ROUND_CLOSEST(br, c<<div);
- // but p must be non-zero
- if (!pre)
- pre = 1;
- break;
- default: /* round_up */
- pre = br/(c<<div);
- // but p must be non-zero
- if (!pre)
- return -EINVAL;
- }
- PRINTD (DBG_QOS, "A: p=%u, d=%u", pre, div);
- goto got_it;
- }
-
- // at this point we have
- // d == MIND and (c << (MAXPEXP+MIND)) < B
- while (div < CR_MAXD) {
- div++;
- if (br_man <= (c << (CR_MAXPEXP+div-br_exp))) {
- // Equivalent to: B <= (c << (MAXPEXP+d))
- // c << (MAXPEXP+d-1) < B <= c << (MAXPEXP+d)
- // 1 << (MAXPEXP-1) < B/2^d/c <= 1 << MAXPEXP
- // MAXP/2 < B/c2^d <= MAXP
- // take care of rounding
- switch (r) {
- case round_down:
- pre = DIV_ROUND_UP(br, c<<div);
- break;
- case round_nearest:
- pre = DIV_ROUND_CLOSEST(br, c<<div);
- break;
- default: /* round_up */
- pre = br/(c<<div);
- }
- PRINTD (DBG_QOS, "B: p=%u, d=%u", pre, div);
- goto got_it;
- }
- }
- // at this point we have
- // d == MAXD and (c << (MAXPEXP+MAXD)) < B
- // but we cannot go any higher
- // take care of rounding
- if (r == round_down)
- return -EINVAL;
- pre = 1 << CR_MAXPEXP;
- PRINTD (DBG_QOS, "C: p=%u, d=%u", pre, div);
-got_it:
- // paranoia
- if (div > CR_MAXD || (!pre) || pre > 1<<CR_MAXPEXP) {
- PRINTD (DBG_QOS, "set_cr internal failure: d=%u p=%u",
- div, pre);
- return -EINVAL;
- } else {
- if (bits)
- *bits = (div<<CLOCK_SELECT_SHIFT) | (pre-1);
- if (actual) {
- *actual = DIV_ROUND_UP(br, pre<<div);
- PRINTD (DBG_QOS, "actual rate: %u", *actual);
- }
- return 0;
- }
-}
-
-static int make_rate_with_tolerance (const hrz_dev * dev, u32 c, rounding r, unsigned int tol,
- u16 * bit_pattern, unsigned int * actual) {
- unsigned int my_actual;
-
- PRINTD (DBG_QOS|DBG_FLOW, "make_rate_with_tolerance c=%u, %s, tol=%u",
- c, (r == round_up) ? "up" : (r == round_down) ? "down" : "nearest", tol);
-
- if (!actual)
- // actual rate is not returned
- actual = &my_actual;
-
- if (make_rate (dev, c, round_nearest, bit_pattern, actual))
- // should never happen as round_nearest always succeeds
- return -1;
-
- if (c - tol <= *actual && *actual <= c + tol)
- // within tolerance
- return 0;
- else
- // intolerant, try rounding instead
- return make_rate (dev, c, r, bit_pattern, actual);
-}
-
-/********** Listen on a VC **********/
-
-static int hrz_open_rx (hrz_dev * dev, u16 channel) {
- // is there any guarantee that we don't get two simulataneous
- // identical calls of this function from different processes? yes
- // rate_lock
- unsigned long flags;
- u32 channel_type; // u16?
-
- u16 buf_ptr = RX_CHANNEL_IDLE;
-
- rx_ch_desc * rx_desc = &memmap->rx_descs[channel];
-
- PRINTD (DBG_FLOW, "hrz_open_rx %x", channel);
-
- spin_lock_irqsave (&dev->mem_lock, flags);
- channel_type = rd_mem (dev, &rx_desc->wr_buf_type) & BUFFER_PTR_MASK;
- spin_unlock_irqrestore (&dev->mem_lock, flags);
-
- // very serious error, should never occur
- if (channel_type != RX_CHANNEL_DISABLED) {
- PRINTD (DBG_ERR|DBG_VCC, "RX channel for VC already open");
- return -EBUSY; // clean up?
- }
-
- // Give back spare buffer
- if (dev->noof_spare_buffers) {
- buf_ptr = dev->spare_buffers[--dev->noof_spare_buffers];
- PRINTD (DBG_VCC, "using a spare buffer: %u", buf_ptr);
- // should never occur
- if (buf_ptr == RX_CHANNEL_DISABLED || buf_ptr == RX_CHANNEL_IDLE) {
- // but easy to recover from
- PRINTD (DBG_ERR|DBG_VCC, "bad spare buffer pointer, using IDLE");
- buf_ptr = RX_CHANNEL_IDLE;
- }
- } else {
- PRINTD (DBG_VCC, "using IDLE buffer pointer");
- }
-
- // Channel is currently disabled so change its status to idle
-
- // do we really need to save the flags again?
- spin_lock_irqsave (&dev->mem_lock, flags);
-
- wr_mem (dev, &rx_desc->wr_buf_type,
- buf_ptr | CHANNEL_TYPE_AAL5 | FIRST_CELL_OF_AAL5_FRAME);
- if (buf_ptr != RX_CHANNEL_IDLE)
- wr_mem (dev, &rx_desc->rd_buf_type, buf_ptr);
-
- spin_unlock_irqrestore (&dev->mem_lock, flags);
-
- // rxer->rate = make_rate (qos->peak_cells);
-
- PRINTD (DBG_FLOW, "hrz_open_rx ok");
-
- return 0;
-}
-
-#if 0
-/********** change vc rate for a given vc **********/
-
-static void hrz_change_vc_qos (ATM_RXER * rxer, MAAL_QOS * qos) {
- rxer->rate = make_rate (qos->peak_cells);
-}
-#endif
-
-/********** free an skb (as per ATM device driver documentation) **********/
-
-static void hrz_kfree_skb (struct sk_buff * skb) {
- if (ATM_SKB(skb)->vcc->pop) {
- ATM_SKB(skb)->vcc->pop (ATM_SKB(skb)->vcc, skb);
- } else {
- dev_kfree_skb_any (skb);
- }
-}
-
-/********** cancel listen on a VC **********/
-
-static void hrz_close_rx (hrz_dev * dev, u16 vc) {
- unsigned long flags;
-
- u32 value;
-
- u32 r1, r2;
-
- rx_ch_desc * rx_desc = &memmap->rx_descs[vc];
-
- int was_idle = 0;
-
- spin_lock_irqsave (&dev->mem_lock, flags);
- value = rd_mem (dev, &rx_desc->wr_buf_type) & BUFFER_PTR_MASK;
- spin_unlock_irqrestore (&dev->mem_lock, flags);
-
- if (value == RX_CHANNEL_DISABLED) {
- // I suppose this could happen once we deal with _NONE traffic properly
- PRINTD (DBG_VCC, "closing VC: RX channel %u already disabled", vc);
- return;
- }
- if (value == RX_CHANNEL_IDLE)
- was_idle = 1;
-
- spin_lock_irqsave (&dev->mem_lock, flags);
-
- for (;;) {
- wr_mem (dev, &rx_desc->wr_buf_type, RX_CHANNEL_DISABLED);
-
- if ((rd_mem (dev, &rx_desc->wr_buf_type) & BUFFER_PTR_MASK) == RX_CHANNEL_DISABLED)
- break;
-
- was_idle = 0;
- }
-
- if (was_idle) {
- spin_unlock_irqrestore (&dev->mem_lock, flags);
- return;
- }
-
- WAIT_FLUSH_RX_COMPLETE(dev);
-
- // XXX Is this all really necessary? We can rely on the rx_data_av
- // handler to discard frames that remain queued for delivery. If the
- // worry is that immediately reopening the channel (perhaps by a
- // different process) may cause some data to be mis-delivered then
- // there may still be a simpler solution (such as busy-waiting on
- // rx_busy once the channel is disabled or before a new one is
- // opened - does this leave any holes?). Arguably setting up and
- // tearing down the TX and RX halves of each virtual circuit could
- // most safely be done within ?x_busy protected regions.
-
- // OK, current changes are that Simon's marker is disabled and we DO
- // look for NULL rxer elsewhere. The code here seems flush frames
- // and then remember the last dead cell belonging to the channel
- // just disabled - the cell gets relinked at the next vc_open.
- // However, when all VCs are closed or only a few opened there are a
- // handful of buffers that are unusable.
-
- // Does anyone feel like documenting spare_buffers properly?
- // Does anyone feel like fixing this in a nicer way?
-
- // Flush any data which is left in the channel
- for (;;) {
- // Change the rx channel port to something different to the RX
- // channel we are trying to close to force Horizon to flush the rx
- // channel read and write pointers.
-
- u16 other = vc^(RX_CHANS/2);
-
- SELECT_RX_CHANNEL (dev, other);
- WAIT_UPDATE_COMPLETE (dev);
-
- r1 = rd_mem (dev, &rx_desc->rd_buf_type);
-
- // Select this RX channel. Flush doesn't seem to work unless we
- // select an RX channel before hand
-
- SELECT_RX_CHANNEL (dev, vc);
- WAIT_UPDATE_COMPLETE (dev);
-
- // Attempt to flush a frame on this RX channel
-
- FLUSH_RX_CHANNEL (dev, vc);
- WAIT_FLUSH_RX_COMPLETE (dev);
-
- // Force Horizon to flush rx channel read and write pointers as before
-
- SELECT_RX_CHANNEL (dev, other);
- WAIT_UPDATE_COMPLETE (dev);
-
- r2 = rd_mem (dev, &rx_desc->rd_buf_type);
-
- PRINTD (DBG_VCC|DBG_RX, "r1 = %u, r2 = %u", r1, r2);
-
- if (r1 == r2) {
- dev->spare_buffers[dev->noof_spare_buffers++] = (u16)r1;
- break;
- }
- }
-
-#if 0
- {
- rx_q_entry * wr_ptr = &memmap->rx_q_entries[rd_regw (dev, RX_QUEUE_WR_PTR_OFF)];
- rx_q_entry * rd_ptr = dev->rx_q_entry;
-
- PRINTD (DBG_VCC|DBG_RX, "rd_ptr = %u, wr_ptr = %u", rd_ptr, wr_ptr);
-
- while (rd_ptr != wr_ptr) {
- u32 x = rd_mem (dev, (HDW *) rd_ptr);
-
- if (vc == rx_q_entry_to_rx_channel (x)) {
- x |= SIMONS_DODGEY_MARKER;
-
- PRINTD (DBG_RX|DBG_VCC|DBG_WARN, "marking a frame as dodgey");
-
- wr_mem (dev, (HDW *) rd_ptr, x);
- }
-
- if (rd_ptr == dev->rx_q_wrap)
- rd_ptr = dev->rx_q_reset;
- else
- rd_ptr++;
- }
- }
-#endif
-
- spin_unlock_irqrestore (&dev->mem_lock, flags);
-
- return;
-}
-
-/********** schedule RX transfers **********/
-
-// Note on tail recursion: a GCC developer said that it is not likely
-// to be fixed soon, so do not define TAILRECUSRIONWORKS unless you
-// are sure it does as you may otherwise overflow the kernel stack.
-
-// giving this fn a return value would help GCC, allegedly
-
-static void rx_schedule (hrz_dev * dev, int irq) {
- unsigned int rx_bytes;
-
- int pio_instead = 0;
-#ifndef TAILRECURSIONWORKS
- pio_instead = 1;
- while (pio_instead) {
-#endif
- // bytes waiting for RX transfer
- rx_bytes = dev->rx_bytes;
-
-#if 0
- spin_count = 0;
- while (rd_regl (dev, MASTER_RX_COUNT_REG_OFF)) {
- PRINTD (DBG_RX|DBG_WARN, "RX error: other PCI Bus Master RX still in progress!");
- if (++spin_count > 10) {
- PRINTD (DBG_RX|DBG_ERR, "spun out waiting PCI Bus Master RX completion");
- wr_regl (dev, MASTER_RX_COUNT_REG_OFF, 0);
- clear_bit (rx_busy, &dev->flags);
- hrz_kfree_skb (dev->rx_skb);
- return;
- }
- }
-#endif
-
- // this code follows the TX code but (at the moment) there is only
- // one region - the skb itself. I don't know if this will change,
- // but it doesn't hurt to have the code here, disabled.
-
- if (rx_bytes) {
- // start next transfer within same region
- if (rx_bytes <= MAX_PIO_COUNT) {
- PRINTD (DBG_RX|DBG_BUS, "(pio)");
- pio_instead = 1;
- }
- if (rx_bytes <= MAX_TRANSFER_COUNT) {
- PRINTD (DBG_RX|DBG_BUS, "(simple or last multi)");
- dev->rx_bytes = 0;
- } else {
- PRINTD (DBG_RX|DBG_BUS, "(continuing multi)");
- dev->rx_bytes = rx_bytes - MAX_TRANSFER_COUNT;
- rx_bytes = MAX_TRANSFER_COUNT;
- }
- } else {
- // rx_bytes == 0 -- we're between regions
- // regions remaining to transfer
-#if 0
- unsigned int rx_regions = dev->rx_regions;
-#else
- unsigned int rx_regions = 0;
-#endif
-
- if (rx_regions) {
-#if 0
- // start a new region
- dev->rx_addr = dev->rx_iovec->iov_base;
- rx_bytes = dev->rx_iovec->iov_len;
- ++dev->rx_iovec;
- dev->rx_regions = rx_regions - 1;
-
- if (rx_bytes <= MAX_PIO_COUNT) {
- PRINTD (DBG_RX|DBG_BUS, "(pio)");
- pio_instead = 1;
- }
- if (rx_bytes <= MAX_TRANSFER_COUNT) {
- PRINTD (DBG_RX|DBG_BUS, "(full region)");
- dev->rx_bytes = 0;
- } else {
- PRINTD (DBG_RX|DBG_BUS, "(start multi region)");
- dev->rx_bytes = rx_bytes - MAX_TRANSFER_COUNT;
- rx_bytes = MAX_TRANSFER_COUNT;
- }
-#endif
- } else {
- // rx_regions == 0
- // that's all folks - end of frame
- struct sk_buff * skb = dev->rx_skb;
- // dev->rx_iovec = 0;
-
- FLUSH_RX_CHANNEL (dev, dev->rx_channel);
-
- dump_skb ("<<<", dev->rx_channel, skb);
-
- PRINTD (DBG_RX|DBG_SKB, "push %p %u", skb->data, skb->len);
-
- {
- struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
- // VC layer stats
- atomic_inc(&vcc->stats->rx);
- __net_timestamp(skb);
- // end of our responsibility
- vcc->push (vcc, skb);
- }
- }
- }
-
- // note: writing RX_COUNT clears any interrupt condition
- if (rx_bytes) {
- if (pio_instead) {
- if (irq)
- wr_regl (dev, MASTER_RX_COUNT_REG_OFF, 0);
- rds_regb (dev, DATA_PORT_OFF, dev->rx_addr, rx_bytes);
- } else {
- wr_regl (dev, MASTER_RX_ADDR_REG_OFF, virt_to_bus (dev->rx_addr));
- wr_regl (dev, MASTER_RX_COUNT_REG_OFF, rx_bytes);
- }
- dev->rx_addr += rx_bytes;
- } else {
- if (irq)
- wr_regl (dev, MASTER_RX_COUNT_REG_OFF, 0);
- // allow another RX thread to start
- YELLOW_LED_ON(dev);
- clear_bit (rx_busy, &dev->flags);
- PRINTD (DBG_RX, "cleared rx_busy for dev %p", dev);
- }
-
-#ifdef TAILRECURSIONWORKS
- // and we all bless optimised tail calls
- if (pio_instead)
- return rx_schedule (dev, 0);
- return;
-#else
- // grrrrrrr!
- irq = 0;
- }
- return;
-#endif
-}
-
-/********** handle RX bus master complete events **********/
-
-static void rx_bus_master_complete_handler (hrz_dev * dev) {
- if (test_bit (rx_busy, &dev->flags)) {
- rx_schedule (dev, 1);
- } else {
- PRINTD (DBG_RX|DBG_ERR, "unexpected RX bus master completion");
- // clear interrupt condition on adapter
- wr_regl (dev, MASTER_RX_COUNT_REG_OFF, 0);
- }
- return;
-}
-
-/********** (queue to) become the next TX thread **********/
-
-static int tx_hold (hrz_dev * dev) {
- PRINTD (DBG_TX, "sleeping at tx lock %p %lu", dev, dev->flags);
- wait_event_interruptible(dev->tx_queue, (!test_and_set_bit(tx_busy, &dev->flags)));
- PRINTD (DBG_TX, "woken at tx lock %p %lu", dev, dev->flags);
- if (signal_pending (current))
- return -1;
- PRINTD (DBG_TX, "set tx_busy for dev %p", dev);
- return 0;
-}
-
-/********** allow another TX thread to start **********/
-
-static inline void tx_release (hrz_dev * dev) {
- clear_bit (tx_busy, &dev->flags);
- PRINTD (DBG_TX, "cleared tx_busy for dev %p", dev);
- wake_up_interruptible (&dev->tx_queue);
-}
-
-/********** schedule TX transfers **********/
-
-static void tx_schedule (hrz_dev * const dev, int irq) {
- unsigned int tx_bytes;
-
- int append_desc = 0;
-
- int pio_instead = 0;
-#ifndef TAILRECURSIONWORKS
- pio_instead = 1;
- while (pio_instead) {
-#endif
- // bytes in current region waiting for TX transfer
- tx_bytes = dev->tx_bytes;
-
-#if 0
- spin_count = 0;
- while (rd_regl (dev, MASTER_TX_COUNT_REG_OFF)) {
- PRINTD (DBG_TX|DBG_WARN, "TX error: other PCI Bus Master TX still in progress!");
- if (++spin_count > 10) {
- PRINTD (DBG_TX|DBG_ERR, "spun out waiting PCI Bus Master TX completion");
- wr_regl (dev, MASTER_TX_COUNT_REG_OFF, 0);
- tx_release (dev);
- hrz_kfree_skb (dev->tx_skb);
- return;
- }
- }
-#endif
-
- if (tx_bytes) {
- // start next transfer within same region
- if (!test_bit (ultra, &dev->flags) || tx_bytes <= MAX_PIO_COUNT) {
- PRINTD (DBG_TX|DBG_BUS, "(pio)");
- pio_instead = 1;
- }
- if (tx_bytes <= MAX_TRANSFER_COUNT) {
- PRINTD (DBG_TX|DBG_BUS, "(simple or last multi)");
- if (!dev->tx_iovec) {
- // end of last region
- append_desc = 1;
- }
- dev->tx_bytes = 0;
- } else {
- PRINTD (DBG_TX|DBG_BUS, "(continuing multi)");
- dev->tx_bytes = tx_bytes - MAX_TRANSFER_COUNT;
- tx_bytes = MAX_TRANSFER_COUNT;
- }
- } else {
- // tx_bytes == 0 -- we're between regions
- // regions remaining to transfer
- unsigned int tx_regions = dev->tx_regions;
-
- if (tx_regions) {
- // start a new region
- dev->tx_addr = dev->tx_iovec->iov_base;
- tx_bytes = dev->tx_iovec->iov_len;
- ++dev->tx_iovec;
- dev->tx_regions = tx_regions - 1;
-
- if (!test_bit (ultra, &dev->flags) || tx_bytes <= MAX_PIO_COUNT) {
- PRINTD (DBG_TX|DBG_BUS, "(pio)");
- pio_instead = 1;
- }
- if (tx_bytes <= MAX_TRANSFER_COUNT) {
- PRINTD (DBG_TX|DBG_BUS, "(full region)");
- dev->tx_bytes = 0;
- } else {
- PRINTD (DBG_TX|DBG_BUS, "(start multi region)");
- dev->tx_bytes = tx_bytes - MAX_TRANSFER_COUNT;
- tx_bytes = MAX_TRANSFER_COUNT;
- }
- } else {
- // tx_regions == 0
- // that's all folks - end of frame
- struct sk_buff * skb = dev->tx_skb;
- dev->tx_iovec = NULL;
-
- // VC layer stats
- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
-
- // free the skb
- hrz_kfree_skb (skb);
- }
- }
-
- // note: writing TX_COUNT clears any interrupt condition
- if (tx_bytes) {
- if (pio_instead) {
- if (irq)
- wr_regl (dev, MASTER_TX_COUNT_REG_OFF, 0);
- wrs_regb (dev, DATA_PORT_OFF, dev->tx_addr, tx_bytes);
- if (append_desc)
- wr_regl (dev, TX_DESCRIPTOR_PORT_OFF, cpu_to_be32 (dev->tx_skb->len));
- } else {
- wr_regl (dev, MASTER_TX_ADDR_REG_OFF, virt_to_bus (dev->tx_addr));
- if (append_desc)
- wr_regl (dev, TX_DESCRIPTOR_REG_OFF, cpu_to_be32 (dev->tx_skb->len));
- wr_regl (dev, MASTER_TX_COUNT_REG_OFF,
- append_desc
- ? tx_bytes | MASTER_TX_AUTO_APPEND_DESC
- : tx_bytes);
- }
- dev->tx_addr += tx_bytes;
- } else {
- if (irq)
- wr_regl (dev, MASTER_TX_COUNT_REG_OFF, 0);
- YELLOW_LED_ON(dev);
- tx_release (dev);
- }
-
-#ifdef TAILRECURSIONWORKS
- // and we all bless optimised tail calls
- if (pio_instead)
- return tx_schedule (dev, 0);
- return;
-#else
- // grrrrrrr!
- irq = 0;
- }
- return;
-#endif
-}
-
-/********** handle TX bus master complete events **********/
-
-static void tx_bus_master_complete_handler (hrz_dev * dev) {
- if (test_bit (tx_busy, &dev->flags)) {
- tx_schedule (dev, 1);
- } else {
- PRINTD (DBG_TX|DBG_ERR, "unexpected TX bus master completion");
- // clear interrupt condition on adapter
- wr_regl (dev, MASTER_TX_COUNT_REG_OFF, 0);
- }
- return;
-}
-
-/********** move RX Q pointer to next item in circular buffer **********/
-
-// called only from IRQ sub-handler
-static u32 rx_queue_entry_next (hrz_dev * dev) {
- u32 rx_queue_entry;
- spin_lock (&dev->mem_lock);
- rx_queue_entry = rd_mem (dev, &dev->rx_q_entry->entry);
- if (dev->rx_q_entry == dev->rx_q_wrap)
- dev->rx_q_entry = dev->rx_q_reset;
- else
- dev->rx_q_entry++;
- wr_regw (dev, RX_QUEUE_RD_PTR_OFF, dev->rx_q_entry - dev->rx_q_reset);
- spin_unlock (&dev->mem_lock);
- return rx_queue_entry;
-}
-
-/********** handle RX data received by device **********/
-
-// called from IRQ handler
-static void rx_data_av_handler (hrz_dev * dev) {
- u32 rx_queue_entry;
- u32 rx_queue_entry_flags;
- u16 rx_len;
- u16 rx_channel;
-
- PRINTD (DBG_FLOW, "hrz_data_av_handler");
-
- // try to grab rx lock (not possible during RX bus mastering)
- if (test_and_set_bit (rx_busy, &dev->flags)) {
- PRINTD (DBG_RX, "locked out of rx lock");
- return;
- }
- PRINTD (DBG_RX, "set rx_busy for dev %p", dev);
- // lock is cleared if we fail now, o/w after bus master completion
-
- YELLOW_LED_OFF(dev);
-
- rx_queue_entry = rx_queue_entry_next (dev);
-
- rx_len = rx_q_entry_to_length (rx_queue_entry);
- rx_channel = rx_q_entry_to_rx_channel (rx_queue_entry);
-
- WAIT_FLUSH_RX_COMPLETE (dev);
-
- SELECT_RX_CHANNEL (dev, rx_channel);
-
- PRINTD (DBG_RX, "rx_queue_entry is: %#x", rx_queue_entry);
- rx_queue_entry_flags = rx_queue_entry & (RX_CRC_32_OK|RX_COMPLETE_FRAME|SIMONS_DODGEY_MARKER);
-
- if (!rx_len) {
- // (at least) bus-mastering breaks if we try to handle a
- // zero-length frame, besides AAL5 does not support them
- PRINTK (KERN_ERR, "zero-length frame!");
- rx_queue_entry_flags &= ~RX_COMPLETE_FRAME;
- }
-
- if (rx_queue_entry_flags & SIMONS_DODGEY_MARKER) {
- PRINTD (DBG_RX|DBG_ERR, "Simon's marker detected!");
- }
- if (rx_queue_entry_flags == (RX_CRC_32_OK | RX_COMPLETE_FRAME)) {
- struct atm_vcc * atm_vcc;
-
- PRINTD (DBG_RX, "got a frame on rx_channel %x len %u", rx_channel, rx_len);
-
- atm_vcc = dev->rxer[rx_channel];
- // if no vcc is assigned to this channel, we should drop the frame
- // (is this what SIMONS etc. was trying to achieve?)
-
- if (atm_vcc) {
-
- if (atm_vcc->qos.rxtp.traffic_class != ATM_NONE) {
-
- if (rx_len <= atm_vcc->qos.rxtp.max_sdu) {
-
- struct sk_buff * skb = atm_alloc_charge (atm_vcc, rx_len, GFP_ATOMIC);
- if (skb) {
- // remember this so we can push it later
- dev->rx_skb = skb;
- // remember this so we can flush it later
- dev->rx_channel = rx_channel;
-
- // prepare socket buffer
- skb_put (skb, rx_len);
- ATM_SKB(skb)->vcc = atm_vcc;
-
- // simple transfer
- // dev->rx_regions = 0;
- // dev->rx_iovec = 0;
- dev->rx_bytes = rx_len;
- dev->rx_addr = skb->data;
- PRINTD (DBG_RX, "RX start simple transfer (addr %p, len %d)",
- skb->data, rx_len);
-
- // do the business
- rx_schedule (dev, 0);
- return;
-
- } else {
- PRINTD (DBG_SKB|DBG_WARN, "failed to get skb");
- }
-
- } else {
- PRINTK (KERN_INFO, "frame received on TX-only VC %x", rx_channel);
- // do we count this?
- }
-
- } else {
- PRINTK (KERN_WARNING, "dropped over-size frame");
- // do we count this?
- }
-
- } else {
- PRINTD (DBG_WARN|DBG_VCC|DBG_RX, "no VCC for this frame (VC closed)");
- // do we count this?
- }
-
- } else {
- // Wait update complete ? SPONG
- }
-
- // RX was aborted
- YELLOW_LED_ON(dev);
-
- FLUSH_RX_CHANNEL (dev,rx_channel);
- clear_bit (rx_busy, &dev->flags);
-
- return;
-}
-
-/********** interrupt handler **********/
-
-static irqreturn_t interrupt_handler(int irq, void *dev_id)
-{
- hrz_dev *dev = dev_id;
- u32 int_source;
- unsigned int irq_ok;
-
- PRINTD (DBG_FLOW, "interrupt_handler: %p", dev_id);
-
- // definitely for us
- irq_ok = 0;
- while ((int_source = rd_regl (dev, INT_SOURCE_REG_OFF)
- & INTERESTING_INTERRUPTS)) {
- // In the interests of fairness, the handlers below are
- // called in sequence and without immediate return to the head of
- // the while loop. This is only of issue for slow hosts (or when
- // debugging messages are on). Really slow hosts may find a fast
- // sender keeps them permanently in the IRQ handler. :(
-
- // (only an issue for slow hosts) RX completion goes before
- // rx_data_av as the former implies rx_busy and so the latter
- // would just abort. If it reschedules another transfer
- // (continuing the same frame) then it will not clear rx_busy.
-
- // (only an issue for slow hosts) TX completion goes before RX
- // data available as it is a much shorter routine - there is the
- // chance that any further transfers it schedules will be complete
- // by the time of the return to the head of the while loop
-
- if (int_source & RX_BUS_MASTER_COMPLETE) {
- ++irq_ok;
- PRINTD (DBG_IRQ|DBG_BUS|DBG_RX, "rx_bus_master_complete asserted");
- rx_bus_master_complete_handler (dev);
- }
- if (int_source & TX_BUS_MASTER_COMPLETE) {
- ++irq_ok;
- PRINTD (DBG_IRQ|DBG_BUS|DBG_TX, "tx_bus_master_complete asserted");
- tx_bus_master_complete_handler (dev);
- }
- if (int_source & RX_DATA_AV) {
- ++irq_ok;
- PRINTD (DBG_IRQ|DBG_RX, "rx_data_av asserted");
- rx_data_av_handler (dev);
- }
- }
- if (irq_ok) {
- PRINTD (DBG_IRQ, "work done: %u", irq_ok);
- } else {
- PRINTD (DBG_IRQ|DBG_WARN, "spurious interrupt source: %#x", int_source);
- }
-
- PRINTD (DBG_IRQ|DBG_FLOW, "interrupt_handler done: %p", dev_id);
- if (irq_ok)
- return IRQ_HANDLED;
- return IRQ_NONE;
-}
-
-/********** housekeeping **********/
-
-static void do_housekeeping (struct timer_list *t) {
- // just stats at the moment
- hrz_dev * dev = from_timer(dev, t, housekeeping);
-
- // collect device-specific (not driver/atm-linux) stats here
- dev->tx_cell_count += rd_regw (dev, TX_CELL_COUNT_OFF);
- dev->rx_cell_count += rd_regw (dev, RX_CELL_COUNT_OFF);
- dev->hec_error_count += rd_regw (dev, HEC_ERROR_COUNT_OFF);
- dev->unassigned_cell_count += rd_regw (dev, UNASSIGNED_CELL_COUNT_OFF);
-
- mod_timer (&dev->housekeeping, jiffies + HZ/10);
-
- return;
-}
-
-/********** find an idle channel for TX and set it up **********/
-
-// called with tx_busy set
-static short setup_idle_tx_channel (hrz_dev * dev, hrz_vcc * vcc) {
- unsigned short idle_channels;
- short tx_channel = -1;
- unsigned int spin_count;
- PRINTD (DBG_FLOW|DBG_TX, "setup_idle_tx_channel %p", dev);
-
- // better would be to fail immediately, the caller can then decide whether
- // to wait or drop (depending on whether this is UBR etc.)
- spin_count = 0;
- while (!(idle_channels = rd_regw (dev, TX_STATUS_OFF) & IDLE_CHANNELS_MASK)) {
- PRINTD (DBG_TX|DBG_WARN, "waiting for idle TX channel");
- // delay a bit here
- if (++spin_count > 100) {
- PRINTD (DBG_TX|DBG_ERR, "spun out waiting for idle TX channel");
- return -EBUSY;
- }
- }
-
- // got an idle channel
- {
- // tx_idle ensures we look for idle channels in RR order
- int chan = dev->tx_idle;
-
- int keep_going = 1;
- while (keep_going) {
- if (idle_channels & (1<<chan)) {
- tx_channel = chan;
- keep_going = 0;
- }
- ++chan;
- if (chan == TX_CHANS)
- chan = 0;
- }
-
- dev->tx_idle = chan;
- }
-
- // set up the channel we found
- {
- // Initialise the cell header in the transmit channel descriptor
- // a.k.a. prepare the channel and remember that we have done so.
-
- tx_ch_desc * tx_desc = &memmap->tx_descs[tx_channel];
- u32 rd_ptr;
- u32 wr_ptr;
- u16 channel = vcc->channel;
-
- unsigned long flags;
- spin_lock_irqsave (&dev->mem_lock, flags);
-
- // Update the transmit channel record.
- dev->tx_channel_record[tx_channel] = channel;
-
- // xBR channel
- update_tx_channel_config (dev, tx_channel, RATE_TYPE_ACCESS,
- vcc->tx_xbr_bits);
-
- // Update the PCR counter preload value etc.
- update_tx_channel_config (dev, tx_channel, PCR_TIMER_ACCESS,
- vcc->tx_pcr_bits);
-
-#if 0
- if (vcc->tx_xbr_bits == VBR_RATE_TYPE) {
- // SCR timer
- update_tx_channel_config (dev, tx_channel, SCR_TIMER_ACCESS,
- vcc->tx_scr_bits);
-
- // Bucket size...
- update_tx_channel_config (dev, tx_channel, BUCKET_CAPACITY_ACCESS,
- vcc->tx_bucket_bits);
-
- // ... and fullness
- update_tx_channel_config (dev, tx_channel, BUCKET_FULLNESS_ACCESS,
- vcc->tx_bucket_bits);
- }
-#endif
-
- // Initialise the read and write buffer pointers
- rd_ptr = rd_mem (dev, &tx_desc->rd_buf_type) & BUFFER_PTR_MASK;
- wr_ptr = rd_mem (dev, &tx_desc->wr_buf_type) & BUFFER_PTR_MASK;
-
- // idle TX channels should have identical pointers
- if (rd_ptr != wr_ptr) {
- PRINTD (DBG_TX|DBG_ERR, "TX buffer pointers are broken!");
- // spin_unlock... return -E...
- // I wonder if gcc would get rid of one of the pointer aliases
- }
- PRINTD (DBG_TX, "TX buffer pointers are: rd %x, wr %x.",
- rd_ptr, wr_ptr);
-
- switch (vcc->aal) {
- case aal0:
- PRINTD (DBG_QOS|DBG_TX, "tx_channel: aal0");
- rd_ptr |= CHANNEL_TYPE_RAW_CELLS;
- wr_ptr |= CHANNEL_TYPE_RAW_CELLS;
- break;
- case aal34:
- PRINTD (DBG_QOS|DBG_TX, "tx_channel: aal34");
- rd_ptr |= CHANNEL_TYPE_AAL3_4;
- wr_ptr |= CHANNEL_TYPE_AAL3_4;
- break;
- case aal5:
- rd_ptr |= CHANNEL_TYPE_AAL5;
- wr_ptr |= CHANNEL_TYPE_AAL5;
- // Initialise the CRC
- wr_mem (dev, &tx_desc->partial_crc, INITIAL_CRC);
- break;
- }
-
- wr_mem (dev, &tx_desc->rd_buf_type, rd_ptr);
- wr_mem (dev, &tx_desc->wr_buf_type, wr_ptr);
-
- // Write the Cell Header
- // Payload Type, CLP and GFC would go here if non-zero
- wr_mem (dev, &tx_desc->cell_header, channel);
-
- spin_unlock_irqrestore (&dev->mem_lock, flags);
- }
-
- return tx_channel;
-}
-
-/********** send a frame **********/
-
-static int hrz_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
- unsigned int spin_count;
- int free_buffers;
- hrz_dev * dev = HRZ_DEV(atm_vcc->dev);
- hrz_vcc * vcc = HRZ_VCC(atm_vcc);
- u16 channel = vcc->channel;
-
- u32 buffers_required;
-
- /* signed for error return */
- short tx_channel;
-
- PRINTD (DBG_FLOW|DBG_TX, "hrz_send vc %x data %p len %u",
- channel, skb->data, skb->len);
-
- dump_skb (">>>", channel, skb);
-
- if (atm_vcc->qos.txtp.traffic_class == ATM_NONE) {
- PRINTK (KERN_ERR, "attempt to send on RX-only VC %x", channel);
- hrz_kfree_skb (skb);
- return -EIO;
- }
-
- // don't understand this
- ATM_SKB(skb)->vcc = atm_vcc;
-
- if (skb->len > atm_vcc->qos.txtp.max_sdu) {
- PRINTK (KERN_ERR, "sk_buff length greater than agreed max_sdu, dropping...");
- hrz_kfree_skb (skb);
- return -EIO;
- }
-
- if (!channel) {
- PRINTD (DBG_ERR|DBG_TX, "attempt to transmit on zero (rx_)channel");
- hrz_kfree_skb (skb);
- return -EIO;
- }
-
-#if 0
- {
- // where would be a better place for this? housekeeping?
- u16 status;
- pci_read_config_word (dev->pci_dev, PCI_STATUS, &status);
- if (status & PCI_STATUS_REC_MASTER_ABORT) {
- PRINTD (DBG_BUS|DBG_ERR, "Clearing PCI Master Abort (and cleaning up)");
- status &= ~PCI_STATUS_REC_MASTER_ABORT;
- pci_write_config_word (dev->pci_dev, PCI_STATUS, status);
- if (test_bit (tx_busy, &dev->flags)) {
- hrz_kfree_skb (dev->tx_skb);
- tx_release (dev);
- }
- }
- }
-#endif
-
-#ifdef DEBUG_HORIZON
- /* wey-hey! */
- if (channel == 1023) {
- unsigned int i;
- unsigned short d = 0;
- char * s = skb->data;
- if (*s++ == 'D') {
- for (i = 0; i < 4; ++i)
- d = (d << 4) | hex_to_bin(*s++);
- PRINTK (KERN_INFO, "debug bitmap is now %hx", debug = d);
- }
- }
-#endif
-
- // wait until TX is free and grab lock
- if (tx_hold (dev)) {
- hrz_kfree_skb (skb);
- return -ERESTARTSYS;
- }
-
- // Wait for enough space to be available in transmit buffer memory.
-
- // should be number of cells needed + 2 (according to hardware docs)
- // = ((framelen+8)+47) / 48 + 2
- // = (framelen+7) / 48 + 3, hmm... faster to put addition inside XXX
- buffers_required = (skb->len+(ATM_AAL5_TRAILER-1)) / ATM_CELL_PAYLOAD + 3;
-
- // replace with timer and sleep, add dev->tx_buffers_queue (max 1 entry)
- spin_count = 0;
- while ((free_buffers = rd_regw (dev, TX_FREE_BUFFER_COUNT_OFF)) < buffers_required) {
- PRINTD (DBG_TX, "waiting for free TX buffers, got %d of %d",
- free_buffers, buffers_required);
- // what is the appropriate delay? implement a timeout? (depending on line speed?)
- // mdelay (1);
- // what happens if we kill (current_pid, SIGKILL) ?
- schedule();
- if (++spin_count > 1000) {
- PRINTD (DBG_TX|DBG_ERR, "spun out waiting for tx buffers, got %d of %d",
- free_buffers, buffers_required);
- tx_release (dev);
- hrz_kfree_skb (skb);
- return -ERESTARTSYS;
- }
- }
-
- // Select a channel to transmit the frame on.
- if (channel == dev->last_vc) {
- PRINTD (DBG_TX, "last vc hack: hit");
- tx_channel = dev->tx_last;
- } else {
- PRINTD (DBG_TX, "last vc hack: miss");
- // Are we currently transmitting this VC on one of the channels?
- for (tx_channel = 0; tx_channel < TX_CHANS; ++tx_channel)
- if (dev->tx_channel_record[tx_channel] == channel) {
- PRINTD (DBG_TX, "vc already on channel: hit");
- break;
- }
- if (tx_channel == TX_CHANS) {
- PRINTD (DBG_TX, "vc already on channel: miss");
- // Find and set up an idle channel.
- tx_channel = setup_idle_tx_channel (dev, vcc);
- if (tx_channel < 0) {
- PRINTD (DBG_TX|DBG_ERR, "failed to get channel");
- tx_release (dev);
- return tx_channel;
- }
- }
-
- PRINTD (DBG_TX, "got channel");
- SELECT_TX_CHANNEL(dev, tx_channel);
-
- dev->last_vc = channel;
- dev->tx_last = tx_channel;
- }
-
- PRINTD (DBG_TX, "using channel %u", tx_channel);
-
- YELLOW_LED_OFF(dev);
-
- // TX start transfer
-
- {
- unsigned int tx_len = skb->len;
- unsigned int tx_iovcnt = skb_shinfo(skb)->nr_frags;
- // remember this so we can free it later
- dev->tx_skb = skb;
-
- if (tx_iovcnt) {
- // scatter gather transfer
- dev->tx_regions = tx_iovcnt;
- dev->tx_iovec = NULL; /* @@@ needs rewritten */
- dev->tx_bytes = 0;
- PRINTD (DBG_TX|DBG_BUS, "TX start scatter-gather transfer (iovec %p, len %d)",
- skb->data, tx_len);
- tx_release (dev);
- hrz_kfree_skb (skb);
- return -EIO;
- } else {
- // simple transfer
- dev->tx_regions = 0;
- dev->tx_iovec = NULL;
- dev->tx_bytes = tx_len;
- dev->tx_addr = skb->data;
- PRINTD (DBG_TX|DBG_BUS, "TX start simple transfer (addr %p, len %d)",
- skb->data, tx_len);
- }
-
- // and do the business
- tx_schedule (dev, 0);
-
- }
-
- return 0;
-}
-
-/********** reset a card **********/
-
-static void hrz_reset (const hrz_dev * dev) {
- u32 control_0_reg = rd_regl (dev, CONTROL_0_REG);
-
- // why not set RESET_HORIZON to one and wait for the card to
- // reassert that bit as zero? Like so:
- control_0_reg = control_0_reg & RESET_HORIZON;
- wr_regl (dev, CONTROL_0_REG, control_0_reg);
- while (control_0_reg & RESET_HORIZON)
- control_0_reg = rd_regl (dev, CONTROL_0_REG);
-
- // old reset code retained:
- wr_regl (dev, CONTROL_0_REG, control_0_reg |
- RESET_ATM | RESET_RX | RESET_TX | RESET_HOST);
- // just guessing here
- udelay (1000);
-
- wr_regl (dev, CONTROL_0_REG, control_0_reg);
-}
-
-/********** read the burnt in address **********/
-
-static void WRITE_IT_WAIT (const hrz_dev *dev, u32 ctrl)
-{
- wr_regl (dev, CONTROL_0_REG, ctrl);
- udelay (5);
-}
-
-static void CLOCK_IT (const hrz_dev *dev, u32 ctrl)
-{
- // DI must be valid around rising SK edge
- WRITE_IT_WAIT(dev, ctrl & ~SEEPROM_SK);
- WRITE_IT_WAIT(dev, ctrl | SEEPROM_SK);
-}
-
-static u16 read_bia(const hrz_dev *dev, u16 addr)
-{
- u32 ctrl = rd_regl (dev, CONTROL_0_REG);
-
- const unsigned int addr_bits = 6;
- const unsigned int data_bits = 16;
-
- unsigned int i;
-
- u16 res;
-
- ctrl &= ~(SEEPROM_CS | SEEPROM_SK | SEEPROM_DI);
- WRITE_IT_WAIT(dev, ctrl);
-
- // wake Serial EEPROM and send 110 (READ) command
- ctrl |= (SEEPROM_CS | SEEPROM_DI);
- CLOCK_IT(dev, ctrl);
-
- ctrl |= SEEPROM_DI;
- CLOCK_IT(dev, ctrl);
-
- ctrl &= ~SEEPROM_DI;
- CLOCK_IT(dev, ctrl);
-
- for (i=0; i<addr_bits; i++) {
- if (addr & (1 << (addr_bits-1)))
- ctrl |= SEEPROM_DI;
- else
- ctrl &= ~SEEPROM_DI;
-
- CLOCK_IT(dev, ctrl);
-
- addr = addr << 1;
- }
-
- // we could check that we have DO = 0 here
- ctrl &= ~SEEPROM_DI;
-
- res = 0;
- for (i=0;i<data_bits;i++) {
- res = res >> 1;
-
- CLOCK_IT(dev, ctrl);
-
- if (rd_regl (dev, CONTROL_0_REG) & SEEPROM_DO)
- res |= (1 << (data_bits-1));
- }
-
- ctrl &= ~(SEEPROM_SK | SEEPROM_CS);
- WRITE_IT_WAIT(dev, ctrl);
-
- return res;
-}
-
-/********** initialise a card **********/
-
-static int hrz_init(hrz_dev *dev)
-{
- int onefivefive;
-
- u16 chan;
-
- int buff_count;
-
- HDW * mem;
-
- cell_buf * tx_desc;
- cell_buf * rx_desc;
-
- u32 ctrl;
-
- ctrl = rd_regl (dev, CONTROL_0_REG);
- PRINTD (DBG_INFO, "ctrl0reg is %#x", ctrl);
- onefivefive = ctrl & ATM_LAYER_STATUS;
-
- if (onefivefive)
- printk (DEV_LABEL ": Horizon Ultra (at 155.52 MBps)");
- else
- printk (DEV_LABEL ": Horizon (at 25 MBps)");
-
- printk (":");
- // Reset the card to get everything in a known state
-
- printk (" reset");
- hrz_reset (dev);
-
- // Clear all the buffer memory
-
- printk (" clearing memory");
-
- for (mem = (HDW *) memmap; mem < (HDW *) (memmap + 1); ++mem)
- wr_mem (dev, mem, 0);
-
- printk (" tx channels");
-
- // All transmit eight channels are set up as AAL5 ABR channels with
- // a 16us cell spacing. Why?
-
- // Channel 0 gets the free buffer at 100h, channel 1 gets the free
- // buffer at 110h etc.
-
- for (chan = 0; chan < TX_CHANS; ++chan) {
- tx_ch_desc * tx_desc = &memmap->tx_descs[chan];
- cell_buf * buf = &memmap->inittxbufs[chan];
-
- // initialise the read and write buffer pointers
- wr_mem (dev, &tx_desc->rd_buf_type, BUF_PTR(buf));
- wr_mem (dev, &tx_desc->wr_buf_type, BUF_PTR(buf));
-
- // set the status of the initial buffers to empty
- wr_mem (dev, &buf->next, BUFF_STATUS_EMPTY);
- }
-
- // Use space bufn3 at the moment for tx buffers
-
- printk (" tx buffers");
-
- tx_desc = memmap->bufn3;
-
- wr_mem (dev, &memmap->txfreebufstart.next, BUF_PTR(tx_desc) | BUFF_STATUS_EMPTY);
-
- for (buff_count = 0; buff_count < BUFN3_SIZE-1; buff_count++) {
- wr_mem (dev, &tx_desc->next, BUF_PTR(tx_desc+1) | BUFF_STATUS_EMPTY);
- tx_desc++;
- }
-
- wr_mem (dev, &tx_desc->next, BUF_PTR(&memmap->txfreebufend) | BUFF_STATUS_EMPTY);
-
- // Initialise the transmit free buffer count
- wr_regw (dev, TX_FREE_BUFFER_COUNT_OFF, BUFN3_SIZE);
-
- printk (" rx channels");
-
- // Initialise all of the receive channels to be AAL5 disabled with
- // an interrupt threshold of 0
-
- for (chan = 0; chan < RX_CHANS; ++chan) {
- rx_ch_desc * rx_desc = &memmap->rx_descs[chan];
-
- wr_mem (dev, &rx_desc->wr_buf_type, CHANNEL_TYPE_AAL5 | RX_CHANNEL_DISABLED);
- }
-
- printk (" rx buffers");
-
- // Use space bufn4 at the moment for rx buffers
-
- rx_desc = memmap->bufn4;
-
- wr_mem (dev, &memmap->rxfreebufstart.next, BUF_PTR(rx_desc) | BUFF_STATUS_EMPTY);
-
- for (buff_count = 0; buff_count < BUFN4_SIZE-1; buff_count++) {
- wr_mem (dev, &rx_desc->next, BUF_PTR(rx_desc+1) | BUFF_STATUS_EMPTY);
-
- rx_desc++;
- }
-
- wr_mem (dev, &rx_desc->next, BUF_PTR(&memmap->rxfreebufend) | BUFF_STATUS_EMPTY);
-
- // Initialise the receive free buffer count
- wr_regw (dev, RX_FREE_BUFFER_COUNT_OFF, BUFN4_SIZE);
-
- // Initialize Horizons registers
-
- // TX config
- wr_regw (dev, TX_CONFIG_OFF,
- ABR_ROUND_ROBIN | TX_NORMAL_OPERATION | DRVR_DRVRBAR_ENABLE);
-
- // RX config. Use 10-x VC bits, x VP bits, non user cells in channel 0.
- wr_regw (dev, RX_CONFIG_OFF,
- DISCARD_UNUSED_VPI_VCI_BITS_SET | NON_USER_CELLS_IN_ONE_CHANNEL | vpi_bits);
-
- // RX line config
- wr_regw (dev, RX_LINE_CONFIG_OFF,
- LOCK_DETECT_ENABLE | FREQUENCY_DETECT_ENABLE | GXTALOUT_SELECT_DIV4);
-
- // Set the max AAL5 cell count to be just enough to contain the
- // largest AAL5 frame that the user wants to receive
- wr_regw (dev, MAX_AAL5_CELL_COUNT_OFF,
- DIV_ROUND_UP(max_rx_size + ATM_AAL5_TRAILER, ATM_CELL_PAYLOAD));
-
- // Enable receive
- wr_regw (dev, RX_CONFIG_OFF, rd_regw (dev, RX_CONFIG_OFF) | RX_ENABLE);
-
- printk (" control");
-
- // Drive the OE of the LEDs then turn the green LED on
- ctrl |= GREEN_LED_OE | YELLOW_LED_OE | GREEN_LED | YELLOW_LED;
- wr_regl (dev, CONTROL_0_REG, ctrl);
-
- // Test for a 155-capable card
-
- if (onefivefive) {
- // Select 155 mode... make this a choice (or: how do we detect
- // external line speed and switch?)
- ctrl |= ATM_LAYER_SELECT;
- wr_regl (dev, CONTROL_0_REG, ctrl);
-
- // test SUNI-lite vs SAMBA
-
- // Register 0x00 in the SUNI will have some of bits 3-7 set, and
- // they will always be zero for the SAMBA. Ha! Bloody hardware
- // engineers. It'll never work.
-
- if (rd_framer (dev, 0) & 0x00f0) {
- // SUNI
- printk (" SUNI");
-
- // Reset, just in case
- wr_framer (dev, 0x00, 0x0080);
- wr_framer (dev, 0x00, 0x0000);
-
- // Configure transmit FIFO
- wr_framer (dev, 0x63, rd_framer (dev, 0x63) | 0x0002);
-
- // Set line timed mode
- wr_framer (dev, 0x05, rd_framer (dev, 0x05) | 0x0001);
- } else {
- // SAMBA
- printk (" SAMBA");
-
- // Reset, just in case
- wr_framer (dev, 0, rd_framer (dev, 0) | 0x0001);
- wr_framer (dev, 0, rd_framer (dev, 0) &~ 0x0001);
-
- // Turn off diagnostic loopback and enable line-timed mode
- wr_framer (dev, 0, 0x0002);
-
- // Turn on transmit outputs
- wr_framer (dev, 2, 0x0B80);
- }
- } else {
- // Select 25 mode
- ctrl &= ~ATM_LAYER_SELECT;
-
- // Madge B154 setup
- // none required?
- }
-
- printk (" LEDs");
-
- GREEN_LED_ON(dev);
- YELLOW_LED_ON(dev);
-
- printk (" ESI=");
-
- {
- u16 b = 0;
- int i;
- u8 * esi = dev->atm_dev->esi;
-
- // in the card I have, EEPROM
- // addresses 0, 1, 2 contain 0
- // addresess 5, 6 etc. contain ffff
- // NB: Madge prefix is 00 00 f6 (which is 00 00 6f in Ethernet bit order)
- // the read_bia routine gets the BIA in Ethernet bit order
-
- for (i=0; i < ESI_LEN; ++i) {
- if (i % 2 == 0)
- b = read_bia (dev, i/2 + 2);
- else
- b = b >> 8;
- esi[i] = b & 0xFF;
- printk ("%02x", esi[i]);
- }
- }
-
- // Enable RX_Q and ?X_COMPLETE interrupts only
- wr_regl (dev, INT_ENABLE_REG_OFF, INTERESTING_INTERRUPTS);
- printk (" IRQ on");
-
- printk (".\n");
-
- return onefivefive;
-}
-
-/********** check max_sdu **********/
-
-static int check_max_sdu (hrz_aal aal, struct atm_trafprm * tp, unsigned int max_frame_size) {
- PRINTD (DBG_FLOW|DBG_QOS, "check_max_sdu");
-
- switch (aal) {
- case aal0:
- if (!(tp->max_sdu)) {
- PRINTD (DBG_QOS, "defaulting max_sdu");
- tp->max_sdu = ATM_AAL0_SDU;
- } else if (tp->max_sdu != ATM_AAL0_SDU) {
- PRINTD (DBG_QOS|DBG_ERR, "rejecting max_sdu");
- return -EINVAL;
- }
- break;
- case aal34:
- if (tp->max_sdu == 0 || tp->max_sdu > ATM_MAX_AAL34_PDU) {
- PRINTD (DBG_QOS, "%sing max_sdu", tp->max_sdu ? "capp" : "default");
- tp->max_sdu = ATM_MAX_AAL34_PDU;
- }
- break;
- case aal5:
- if (tp->max_sdu == 0 || tp->max_sdu > max_frame_size) {
- PRINTD (DBG_QOS, "%sing max_sdu", tp->max_sdu ? "capp" : "default");
- tp->max_sdu = max_frame_size;
- }
- break;
- }
- return 0;
-}
-
-/********** check pcr **********/
-
-// something like this should be part of ATM Linux
-static int atm_pcr_check (struct atm_trafprm * tp, unsigned int pcr) {
- // we are assuming non-UBR, and non-special values of pcr
- if (tp->min_pcr == ATM_MAX_PCR)
- PRINTD (DBG_QOS, "luser gave min_pcr = ATM_MAX_PCR");
- else if (tp->min_pcr < 0)
- PRINTD (DBG_QOS, "luser gave negative min_pcr");
- else if (tp->min_pcr && tp->min_pcr > pcr)
- PRINTD (DBG_QOS, "pcr less than min_pcr");
- else
- // !! max_pcr = UNSPEC (0) is equivalent to max_pcr = MAX (-1)
- // easier to #define ATM_MAX_PCR 0 and have all rates unsigned?
- // [this would get rid of next two conditionals]
- if ((0) && tp->max_pcr == ATM_MAX_PCR)
- PRINTD (DBG_QOS, "luser gave max_pcr = ATM_MAX_PCR");
- else if ((tp->max_pcr != ATM_MAX_PCR) && tp->max_pcr < 0)
- PRINTD (DBG_QOS, "luser gave negative max_pcr");
- else if (tp->max_pcr && tp->max_pcr != ATM_MAX_PCR && tp->max_pcr < pcr)
- PRINTD (DBG_QOS, "pcr greater than max_pcr");
- else {
- // each limit unspecified or not violated
- PRINTD (DBG_QOS, "xBR(pcr) OK");
- return 0;
- }
- PRINTD (DBG_QOS, "pcr=%u, tp: min_pcr=%d, pcr=%d, max_pcr=%d",
- pcr, tp->min_pcr, tp->pcr, tp->max_pcr);
- return -EINVAL;
-}
-
-/********** open VC **********/
-
-static int hrz_open (struct atm_vcc *atm_vcc)
-{
- int error;
- u16 channel;
-
- struct atm_qos * qos;
- struct atm_trafprm * txtp;
- struct atm_trafprm * rxtp;
-
- hrz_dev * dev = HRZ_DEV(atm_vcc->dev);
- hrz_vcc vcc;
- hrz_vcc * vccp; // allocated late
- short vpi = atm_vcc->vpi;
- int vci = atm_vcc->vci;
- PRINTD (DBG_FLOW|DBG_VCC, "hrz_open %x %x", vpi, vci);
-
-#ifdef ATM_VPI_UNSPEC
- // UNSPEC is deprecated, remove this code eventually
- if (vpi == ATM_VPI_UNSPEC || vci == ATM_VCI_UNSPEC) {
- PRINTK (KERN_WARNING, "rejecting open with unspecified VPI/VCI (deprecated)");
- return -EINVAL;
- }
-#endif
-
- error = vpivci_to_channel (&channel, vpi, vci);
- if (error) {
- PRINTD (DBG_WARN|DBG_VCC, "VPI/VCI out of range: %hd/%d", vpi, vci);
- return error;
- }
-
- vcc.channel = channel;
- // max speed for the moment
- vcc.tx_rate = 0x0;
-
- qos = &atm_vcc->qos;
-
- // check AAL and remember it
- switch (qos->aal) {
- case ATM_AAL0:
- // we would if it were 48 bytes and not 52!
- PRINTD (DBG_QOS|DBG_VCC, "AAL0");
- vcc.aal = aal0;
- break;
- case ATM_AAL34:
- // we would if I knew how do the SAR!
- PRINTD (DBG_QOS|DBG_VCC, "AAL3/4");
- vcc.aal = aal34;
- break;
- case ATM_AAL5:
- PRINTD (DBG_QOS|DBG_VCC, "AAL5");
- vcc.aal = aal5;
- break;
- default:
- PRINTD (DBG_QOS|DBG_VCC, "Bad AAL!");
- return -EINVAL;
- }
-
- // TX traffic parameters
-
- // there are two, interrelated problems here: 1. the reservation of
- // PCR is not a binary choice, we are given bounds and/or a
- // desirable value; 2. the device is only capable of certain values,
- // most of which are not integers. It is almost certainly acceptable
- // to be off by a maximum of 1 to 10 cps.
-
- // Pragmatic choice: always store an integral PCR as that which has
- // been allocated, even if we allocate a little (or a lot) less,
- // after rounding. The actual allocation depends on what we can
- // manage with our rate selection algorithm. The rate selection
- // algorithm is given an integral PCR and a tolerance and told
- // whether it should round the value up or down if the tolerance is
- // exceeded; it returns: a) the actual rate selected (rounded up to
- // the nearest integer), b) a bit pattern to feed to the timer
- // register, and c) a failure value if no applicable rate exists.
-
- // Part of the job is done by atm_pcr_goal which gives us a PCR
- // specification which says: EITHER grab the maximum available PCR
- // (and perhaps a lower bound which we must not pass), OR grab this
- // amount, rounding down if you have to (and perhaps a lower bound
- // which we must not pass) OR grab this amount, rounding up if you
- // have to (and perhaps an upper bound which we must not pass). If any
- // bounds ARE passed we fail. Note that rounding is only rounding to
- // match device limitations, we do not round down to satisfy
- // bandwidth availability even if this would not violate any given
- // lower bound.
-
- // Note: telephony = 64kb/s = 48 byte cell payload @ 500/3 cells/s
- // (say) so this is not even a binary fixpoint cell rate (but this
- // device can do it). To avoid this sort of hassle we use a
- // tolerance parameter (currently fixed at 10 cps).
-
- PRINTD (DBG_QOS, "TX:");
-
- txtp = &qos->txtp;
-
- // set up defaults for no traffic
- vcc.tx_rate = 0;
- // who knows what would actually happen if you try and send on this?
- vcc.tx_xbr_bits = IDLE_RATE_TYPE;
- vcc.tx_pcr_bits = CLOCK_DISABLE;
-#if 0
- vcc.tx_scr_bits = CLOCK_DISABLE;
- vcc.tx_bucket_bits = 0;
-#endif
-
- if (txtp->traffic_class != ATM_NONE) {
- error = check_max_sdu (vcc.aal, txtp, max_tx_size);
- if (error) {
- PRINTD (DBG_QOS, "TX max_sdu check failed");
- return error;
- }
-
- switch (txtp->traffic_class) {
- case ATM_UBR: {
- // we take "the PCR" as a rate-cap
- // not reserved
- vcc.tx_rate = 0;
- make_rate (dev, 1<<30, round_nearest, &vcc.tx_pcr_bits, NULL);
- vcc.tx_xbr_bits = ABR_RATE_TYPE;
- break;
- }
-#if 0
- case ATM_ABR: {
- // reserve min, allow up to max
- vcc.tx_rate = 0; // ?
- make_rate (dev, 1<<30, round_nearest, &vcc.tx_pcr_bits, 0);
- vcc.tx_xbr_bits = ABR_RATE_TYPE;
- break;
- }
-#endif
- case ATM_CBR: {
- int pcr = atm_pcr_goal (txtp);
- rounding r;
- if (!pcr) {
- // down vs. up, remaining bandwidth vs. unlimited bandwidth!!
- // should really have: once someone gets unlimited bandwidth
- // that no more non-UBR channels can be opened until the
- // unlimited one closes?? For the moment, round_down means
- // greedy people actually get something and not nothing
- r = round_down;
- // slight race (no locking) here so we may get -EAGAIN
- // later; the greedy bastards would deserve it :)
- PRINTD (DBG_QOS, "snatching all remaining TX bandwidth");
- pcr = dev->tx_avail;
- } else if (pcr < 0) {
- r = round_down;
- pcr = -pcr;
- } else {
- r = round_up;
- }
- error = make_rate_with_tolerance (dev, pcr, r, 10,
- &vcc.tx_pcr_bits, &vcc.tx_rate);
- if (error) {
- PRINTD (DBG_QOS, "could not make rate from TX PCR");
- return error;
- }
- // not really clear what further checking is needed
- error = atm_pcr_check (txtp, vcc.tx_rate);
- if (error) {
- PRINTD (DBG_QOS, "TX PCR failed consistency check");
- return error;
- }
- vcc.tx_xbr_bits = CBR_RATE_TYPE;
- break;
- }
-#if 0
- case ATM_VBR: {
- int pcr = atm_pcr_goal (txtp);
- // int scr = atm_scr_goal (txtp);
- int scr = pcr/2; // just for fun
- unsigned int mbs = 60; // just for fun
- rounding pr;
- rounding sr;
- unsigned int bucket;
- if (!pcr) {
- pr = round_nearest;
- pcr = 1<<30;
- } else if (pcr < 0) {
- pr = round_down;
- pcr = -pcr;
- } else {
- pr = round_up;
- }
- error = make_rate_with_tolerance (dev, pcr, pr, 10,
- &vcc.tx_pcr_bits, 0);
- if (!scr) {
- // see comments for PCR with CBR above
- sr = round_down;
- // slight race (no locking) here so we may get -EAGAIN
- // later; the greedy bastards would deserve it :)
- PRINTD (DBG_QOS, "snatching all remaining TX bandwidth");
- scr = dev->tx_avail;
- } else if (scr < 0) {
- sr = round_down;
- scr = -scr;
- } else {
- sr = round_up;
- }
- error = make_rate_with_tolerance (dev, scr, sr, 10,
- &vcc.tx_scr_bits, &vcc.tx_rate);
- if (error) {
- PRINTD (DBG_QOS, "could not make rate from TX SCR");
- return error;
- }
- // not really clear what further checking is needed
- // error = atm_scr_check (txtp, vcc.tx_rate);
- if (error) {
- PRINTD (DBG_QOS, "TX SCR failed consistency check");
- return error;
- }
- // bucket calculations (from a piece of paper...) cell bucket
- // capacity must be largest integer smaller than m(p-s)/p + 1
- // where m = max burst size, p = pcr, s = scr
- bucket = mbs*(pcr-scr)/pcr;
- if (bucket*pcr != mbs*(pcr-scr))
- bucket += 1;
- if (bucket > BUCKET_MAX_SIZE) {
- PRINTD (DBG_QOS, "shrinking bucket from %u to %u",
- bucket, BUCKET_MAX_SIZE);
- bucket = BUCKET_MAX_SIZE;
- }
- vcc.tx_xbr_bits = VBR_RATE_TYPE;
- vcc.tx_bucket_bits = bucket;
- break;
- }
-#endif
- default: {
- PRINTD (DBG_QOS, "unsupported TX traffic class");
- return -EINVAL;
- }
- }
- }
-
- // RX traffic parameters
-
- PRINTD (DBG_QOS, "RX:");
-
- rxtp = &qos->rxtp;
-
- // set up defaults for no traffic
- vcc.rx_rate = 0;
-
- if (rxtp->traffic_class != ATM_NONE) {
- error = check_max_sdu (vcc.aal, rxtp, max_rx_size);
- if (error) {
- PRINTD (DBG_QOS, "RX max_sdu check failed");
- return error;
- }
- switch (rxtp->traffic_class) {
- case ATM_UBR: {
- // not reserved
- break;
- }
-#if 0
- case ATM_ABR: {
- // reserve min
- vcc.rx_rate = 0; // ?
- break;
- }
-#endif
- case ATM_CBR: {
- int pcr = atm_pcr_goal (rxtp);
- if (!pcr) {
- // slight race (no locking) here so we may get -EAGAIN
- // later; the greedy bastards would deserve it :)
- PRINTD (DBG_QOS, "snatching all remaining RX bandwidth");
- pcr = dev->rx_avail;
- } else if (pcr < 0) {
- pcr = -pcr;
- }
- vcc.rx_rate = pcr;
- // not really clear what further checking is needed
- error = atm_pcr_check (rxtp, vcc.rx_rate);
- if (error) {
- PRINTD (DBG_QOS, "RX PCR failed consistency check");
- return error;
- }
- break;
- }
-#if 0
- case ATM_VBR: {
- // int scr = atm_scr_goal (rxtp);
- int scr = 1<<16; // just for fun
- if (!scr) {
- // slight race (no locking) here so we may get -EAGAIN
- // later; the greedy bastards would deserve it :)
- PRINTD (DBG_QOS, "snatching all remaining RX bandwidth");
- scr = dev->rx_avail;
- } else if (scr < 0) {
- scr = -scr;
- }
- vcc.rx_rate = scr;
- // not really clear what further checking is needed
- // error = atm_scr_check (rxtp, vcc.rx_rate);
- if (error) {
- PRINTD (DBG_QOS, "RX SCR failed consistency check");
- return error;
- }
- break;
- }
-#endif
- default: {
- PRINTD (DBG_QOS, "unsupported RX traffic class");
- return -EINVAL;
- }
- }
- }
-
-
- // late abort useful for diagnostics
- if (vcc.aal != aal5) {
- PRINTD (DBG_QOS, "AAL not supported");
- return -EINVAL;
- }
-
- // get space for our vcc stuff and copy parameters into it
- vccp = kmalloc (sizeof(hrz_vcc), GFP_KERNEL);
- if (!vccp) {
- PRINTK (KERN_ERR, "out of memory!");
- return -ENOMEM;
- }
- *vccp = vcc;
-
- // clear error and grab cell rate resource lock
- error = 0;
- spin_lock (&dev->rate_lock);
-
- if (vcc.tx_rate > dev->tx_avail) {
- PRINTD (DBG_QOS, "not enough TX PCR left");
- error = -EAGAIN;
- }
-
- if (vcc.rx_rate > dev->rx_avail) {
- PRINTD (DBG_QOS, "not enough RX PCR left");
- error = -EAGAIN;
- }
-
- if (!error) {
- // really consume cell rates
- dev->tx_avail -= vcc.tx_rate;
- dev->rx_avail -= vcc.rx_rate;
- PRINTD (DBG_QOS|DBG_VCC, "reserving %u TX PCR and %u RX PCR",
- vcc.tx_rate, vcc.rx_rate);
- }
-
- // release lock and exit on error
- spin_unlock (&dev->rate_lock);
- if (error) {
- PRINTD (DBG_QOS|DBG_VCC, "insufficient cell rate resources");
- kfree (vccp);
- return error;
- }
-
- // this is "immediately before allocating the connection identifier
- // in hardware" - so long as the next call does not fail :)
- set_bit(ATM_VF_ADDR,&atm_vcc->flags);
-
- // any errors here are very serious and should never occur
-
- if (rxtp->traffic_class != ATM_NONE) {
- if (dev->rxer[channel]) {
- PRINTD (DBG_ERR|DBG_VCC, "VC already open for RX");
- error = -EBUSY;
- }
- if (!error)
- error = hrz_open_rx (dev, channel);
- if (error) {
- kfree (vccp);
- return error;
- }
- // this link allows RX frames through
- dev->rxer[channel] = atm_vcc;
- }
-
- // success, set elements of atm_vcc
- atm_vcc->dev_data = (void *) vccp;
-
- // indicate readiness
- set_bit(ATM_VF_READY,&atm_vcc->flags);
-
- return 0;
-}
-
-/********** close VC **********/
-
-static void hrz_close (struct atm_vcc * atm_vcc) {
- hrz_dev * dev = HRZ_DEV(atm_vcc->dev);
- hrz_vcc * vcc = HRZ_VCC(atm_vcc);
- u16 channel = vcc->channel;
- PRINTD (DBG_VCC|DBG_FLOW, "hrz_close");
-
- // indicate unreadiness
- clear_bit(ATM_VF_READY,&atm_vcc->flags);
-
- if (atm_vcc->qos.txtp.traffic_class != ATM_NONE) {
- unsigned int i;
-
- // let any TX on this channel that has started complete
- // no restart, just keep trying
- while (tx_hold (dev))
- ;
- // remove record of any tx_channel having been setup for this channel
- for (i = 0; i < TX_CHANS; ++i)
- if (dev->tx_channel_record[i] == channel) {
- dev->tx_channel_record[i] = -1;
- break;
- }
- if (dev->last_vc == channel)
- dev->tx_last = -1;
- tx_release (dev);
- }
-
- if (atm_vcc->qos.rxtp.traffic_class != ATM_NONE) {
- // disable RXing - it tries quite hard
- hrz_close_rx (dev, channel);
- // forget the vcc - no more skbs will be pushed
- if (atm_vcc != dev->rxer[channel])
- PRINTK (KERN_ERR, "%s atm_vcc=%p rxer[channel]=%p",
- "arghhh! we're going to die!",
- atm_vcc, dev->rxer[channel]);
- dev->rxer[channel] = NULL;
- }
-
- // atomically release our rate reservation
- spin_lock (&dev->rate_lock);
- PRINTD (DBG_QOS|DBG_VCC, "releasing %u TX PCR and %u RX PCR",
- vcc->tx_rate, vcc->rx_rate);
- dev->tx_avail += vcc->tx_rate;
- dev->rx_avail += vcc->rx_rate;
- spin_unlock (&dev->rate_lock);
-
- // free our structure
- kfree (vcc);
- // say the VPI/VCI is free again
- clear_bit(ATM_VF_ADDR,&atm_vcc->flags);
-}
-
-#if 0
-static int hrz_ioctl (struct atm_dev * atm_dev, unsigned int cmd, void *arg) {
- hrz_dev * dev = HRZ_DEV(atm_dev);
- PRINTD (DBG_FLOW, "hrz_ioctl");
- return -1;
-}
-
-unsigned char hrz_phy_get (struct atm_dev * atm_dev, unsigned long addr) {
- hrz_dev * dev = HRZ_DEV(atm_dev);
- PRINTD (DBG_FLOW, "hrz_phy_get");
- return 0;
-}
-
-static void hrz_phy_put (struct atm_dev * atm_dev, unsigned char value,
- unsigned long addr) {
- hrz_dev * dev = HRZ_DEV(atm_dev);
- PRINTD (DBG_FLOW, "hrz_phy_put");
-}
-
-static int hrz_change_qos (struct atm_vcc * atm_vcc, struct atm_qos *qos, int flgs) {
- hrz_dev * dev = HRZ_DEV(vcc->dev);
- PRINTD (DBG_FLOW, "hrz_change_qos");
- return -1;
-}
-#endif
-
-/********** proc file contents **********/
-
-static int hrz_proc_read (struct atm_dev * atm_dev, loff_t * pos, char * page) {
- hrz_dev * dev = HRZ_DEV(atm_dev);
- int left = *pos;
- PRINTD (DBG_FLOW, "hrz_proc_read");
-
- /* more diagnostics here? */
-
-#if 0
- if (!left--) {
- unsigned int count = sprintf (page, "vbr buckets:");
- unsigned int i;
- for (i = 0; i < TX_CHANS; ++i)
- count += sprintf (page, " %u/%u",
- query_tx_channel_config (dev, i, BUCKET_FULLNESS_ACCESS),
- query_tx_channel_config (dev, i, BUCKET_CAPACITY_ACCESS));
- count += sprintf (page+count, ".\n");
- return count;
- }
-#endif
-
- if (!left--)
- return sprintf (page,
- "cells: TX %lu, RX %lu, HEC errors %lu, unassigned %lu.\n",
- dev->tx_cell_count, dev->rx_cell_count,
- dev->hec_error_count, dev->unassigned_cell_count);
-
- if (!left--)
- return sprintf (page,
- "free cell buffers: TX %hu, RX %hu+%hu.\n",
- rd_regw (dev, TX_FREE_BUFFER_COUNT_OFF),
- rd_regw (dev, RX_FREE_BUFFER_COUNT_OFF),
- dev->noof_spare_buffers);
-
- if (!left--)
- return sprintf (page,
- "cps remaining: TX %u, RX %u\n",
- dev->tx_avail, dev->rx_avail);
-
- return 0;
-}
-
-static const struct atmdev_ops hrz_ops = {
- .open = hrz_open,
- .close = hrz_close,
- .send = hrz_send,
- .proc_read = hrz_proc_read,
- .owner = THIS_MODULE,
-};
-
-static int hrz_probe(struct pci_dev *pci_dev,
- const struct pci_device_id *pci_ent)
-{
- hrz_dev * dev;
- int err = 0;
-
- // adapter slot free, read resources from PCI configuration space
- u32 iobase = pci_resource_start (pci_dev, 0);
- u32 * membase = bus_to_virt (pci_resource_start (pci_dev, 1));
- unsigned int irq;
- unsigned char lat;
-
- PRINTD (DBG_FLOW, "hrz_probe");
-
- if (pci_enable_device(pci_dev))
- return -EINVAL;
-
- /* XXX DEV_LABEL is a guess */
- if (!request_region(iobase, HRZ_IO_EXTENT, DEV_LABEL)) {
- err = -EINVAL;
- goto out_disable;
- }
-
- dev = kzalloc(sizeof(hrz_dev), GFP_KERNEL);
- if (!dev) {
- // perhaps we should be nice: deregister all adapters and abort?
- PRINTD(DBG_ERR, "out of memory");
- err = -ENOMEM;
- goto out_release;
- }
-
- pci_set_drvdata(pci_dev, dev);
-
- // grab IRQ and install handler - move this someplace more sensible
- irq = pci_dev->irq;
- if (request_irq(irq,
- interrupt_handler,
- IRQF_SHARED, /* irqflags guess */
- DEV_LABEL, /* name guess */
- dev)) {
- PRINTD(DBG_WARN, "request IRQ failed!");
- err = -EINVAL;
- goto out_free;
- }
-
- PRINTD(DBG_INFO, "found Madge ATM adapter (hrz) at: IO %x, IRQ %u, MEM %p",
- iobase, irq, membase);
-
- dev->atm_dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &hrz_ops, -1,
- NULL);
- if (!(dev->atm_dev)) {
- PRINTD(DBG_ERR, "failed to register Madge ATM adapter");
- err = -EINVAL;
- goto out_free_irq;
- }
-
- PRINTD(DBG_INFO, "registered Madge ATM adapter (no. %d) (%p) at %p",
- dev->atm_dev->number, dev, dev->atm_dev);
- dev->atm_dev->dev_data = (void *) dev;
- dev->pci_dev = pci_dev;
-
- // enable bus master accesses
- pci_set_master(pci_dev);
-
- // frobnicate latency (upwards, usually)
- pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &lat);
- if (pci_lat) {
- PRINTD(DBG_INFO, "%s PCI latency timer from %hu to %hu",
- "changing", lat, pci_lat);
- pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, pci_lat);
- } else if (lat < MIN_PCI_LATENCY) {
- PRINTK(KERN_INFO, "%s PCI latency timer from %hu to %hu",
- "increasing", lat, MIN_PCI_LATENCY);
- pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, MIN_PCI_LATENCY);
- }
-
- dev->iobase = iobase;
- dev->irq = irq;
- dev->membase = membase;
-
- dev->rx_q_entry = dev->rx_q_reset = &memmap->rx_q_entries[0];
- dev->rx_q_wrap = &memmap->rx_q_entries[RX_CHANS-1];
-
- // these next three are performance hacks
- dev->last_vc = -1;
- dev->tx_last = -1;
- dev->tx_idle = 0;
-
- dev->tx_regions = 0;
- dev->tx_bytes = 0;
- dev->tx_skb = NULL;
- dev->tx_iovec = NULL;
-
- dev->tx_cell_count = 0;
- dev->rx_cell_count = 0;
- dev->hec_error_count = 0;
- dev->unassigned_cell_count = 0;
-
- dev->noof_spare_buffers = 0;
-
- {
- unsigned int i;
- for (i = 0; i < TX_CHANS; ++i)
- dev->tx_channel_record[i] = -1;
- }
-
- dev->flags = 0;
-
- // Allocate cell rates and remember ASIC version
- // Fibre: ATM_OC3_PCR = 1555200000/8/270*260/53 - 29/53
- // Copper: (WRONG) we want 6 into the above, close to 25Mb/s
- // Copper: (plagarise!) 25600000/8/270*260/53 - n/53
-
- if (hrz_init(dev)) {
- // to be really pedantic, this should be ATM_OC3c_PCR
- dev->tx_avail = ATM_OC3_PCR;
- dev->rx_avail = ATM_OC3_PCR;
- set_bit(ultra, &dev->flags); // NOT "|= ultra" !
- } else {
- dev->tx_avail = ((25600000/8)*26)/(27*53);
- dev->rx_avail = ((25600000/8)*26)/(27*53);
- PRINTD(DBG_WARN, "Buggy ASIC: no TX bus-mastering.");
- }
-
- // rate changes spinlock
- spin_lock_init(&dev->rate_lock);
-
- // on-board memory access spinlock; we want atomic reads and
- // writes to adapter memory (handles IRQ and SMP)
- spin_lock_init(&dev->mem_lock);
-
- init_waitqueue_head(&dev->tx_queue);
-
- // vpi in 0..4, vci in 6..10
- dev->atm_dev->ci_range.vpi_bits = vpi_bits;
- dev->atm_dev->ci_range.vci_bits = 10-vpi_bits;
-
- timer_setup(&dev->housekeeping, do_housekeeping, 0);
- mod_timer(&dev->housekeeping, jiffies);
-
-out:
- return err;
-
-out_free_irq:
- free_irq(irq, dev);
-out_free:
- kfree(dev);
-out_release:
- release_region(iobase, HRZ_IO_EXTENT);
-out_disable:
- pci_disable_device(pci_dev);
- goto out;
-}
-
-static void hrz_remove_one(struct pci_dev *pci_dev)
-{
- hrz_dev *dev;
-
- dev = pci_get_drvdata(pci_dev);
-
- PRINTD(DBG_INFO, "closing %p (atm_dev = %p)", dev, dev->atm_dev);
- del_timer_sync(&dev->housekeeping);
- hrz_reset(dev);
- atm_dev_deregister(dev->atm_dev);
- free_irq(dev->irq, dev);
- release_region(dev->iobase, HRZ_IO_EXTENT);
- kfree(dev);
-
- pci_disable_device(pci_dev);
-}
-
-static void __init hrz_check_args (void) {
-#ifdef DEBUG_HORIZON
- PRINTK (KERN_NOTICE, "debug bitmap is %hx", debug &= DBG_MASK);
-#else
- if (debug)
- PRINTK (KERN_NOTICE, "no debug support in this image");
-#endif
-
- if (vpi_bits > HRZ_MAX_VPI)
- PRINTK (KERN_ERR, "vpi_bits has been limited to %hu",
- vpi_bits = HRZ_MAX_VPI);
-
- if (max_tx_size < 0 || max_tx_size > TX_AAL5_LIMIT)
- PRINTK (KERN_NOTICE, "max_tx_size has been limited to %hu",
- max_tx_size = TX_AAL5_LIMIT);
-
- if (max_rx_size < 0 || max_rx_size > RX_AAL5_LIMIT)
- PRINTK (KERN_NOTICE, "max_rx_size has been limited to %hu",
- max_rx_size = RX_AAL5_LIMIT);
-
- return;
-}
-
-MODULE_AUTHOR(maintainer_string);
-MODULE_DESCRIPTION(description_string);
-MODULE_LICENSE("GPL");
-module_param(debug, ushort, 0644);
-module_param(vpi_bits, ushort, 0);
-module_param(max_tx_size, int, 0);
-module_param(max_rx_size, int, 0);
-module_param(pci_lat, byte, 0);
-MODULE_PARM_DESC(debug, "debug bitmap, see .h file");
-MODULE_PARM_DESC(vpi_bits, "number of bits (0..4) to allocate to VPIs");
-MODULE_PARM_DESC(max_tx_size, "maximum size of TX AAL5 frames");
-MODULE_PARM_DESC(max_rx_size, "maximum size of RX AAL5 frames");
-MODULE_PARM_DESC(pci_lat, "PCI latency in bus cycles");
-
-static const struct pci_device_id hrz_pci_tbl[] = {
- { PCI_VENDOR_ID_MADGE, PCI_DEVICE_ID_MADGE_HORIZON, PCI_ANY_ID, PCI_ANY_ID,
- 0, 0, 0 },
- { 0, }
-};
-
-MODULE_DEVICE_TABLE(pci, hrz_pci_tbl);
-
-static struct pci_driver hrz_driver = {
- .name = "horizon",
- .probe = hrz_probe,
- .remove = hrz_remove_one,
- .id_table = hrz_pci_tbl,
-};
-
-/********** module entry **********/
-
-static int __init hrz_module_init (void) {
- BUILD_BUG_ON(sizeof(struct MEMMAP) != 128*1024/4);
-
- show_version();
-
- // check arguments
- hrz_check_args();
-
- // get the juice
- return pci_register_driver(&hrz_driver);
-}
-
-/********** module exit **********/
-
-static void __exit hrz_module_exit (void) {
- PRINTD (DBG_FLOW, "cleanup_module");
-
- pci_unregister_driver(&hrz_driver);
-}
-
-module_init(hrz_module_init);
-module_exit(hrz_module_exit);
diff --git a/drivers/atm/horizon.h b/drivers/atm/horizon.h
deleted file mode 100644
index 7523eba19bad..000000000000
--- a/drivers/atm/horizon.h
+++ /dev/null
@@ -1,492 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- Madge Horizon ATM Adapter driver.
- Copyright (C) 1995-1999 Madge Networks Ltd.
-
-*/
-
-/*
- IMPORTANT NOTE: Madge Networks no longer makes the adapters
- supported by this driver and makes no commitment to maintain it.
-*/
-
-/* too many macros - change to inline functions */
-
-#ifndef DRIVER_ATM_HORIZON_H
-#define DRIVER_ATM_HORIZON_H
-
-
-#ifdef CONFIG_ATM_HORIZON_DEBUG
-#define DEBUG_HORIZON
-#endif
-
-#define DEV_LABEL "hrz"
-
-#ifndef PCI_VENDOR_ID_MADGE
-#define PCI_VENDOR_ID_MADGE 0x10B6
-#endif
-#ifndef PCI_DEVICE_ID_MADGE_HORIZON
-#define PCI_DEVICE_ID_MADGE_HORIZON 0x1000
-#endif
-
-// diagnostic output
-
-#define PRINTK(severity,format,args...) \
- printk(severity DEV_LABEL ": " format "\n" , ## args)
-
-#ifdef DEBUG_HORIZON
-
-#define DBG_ERR 0x0001
-#define DBG_WARN 0x0002
-#define DBG_INFO 0x0004
-#define DBG_VCC 0x0008
-#define DBG_QOS 0x0010
-#define DBG_TX 0x0020
-#define DBG_RX 0x0040
-#define DBG_SKB 0x0080
-#define DBG_IRQ 0x0100
-#define DBG_FLOW 0x0200
-#define DBG_BUS 0x0400
-#define DBG_REGS 0x0800
-#define DBG_DATA 0x1000
-#define DBG_MASK 0x1fff
-
-/* the ## prevents the annoying double expansion of the macro arguments */
-/* KERN_INFO is used since KERN_DEBUG often does not make it to the console */
-#define PRINTDB(bits,format,args...) \
- ( (debug & (bits)) ? printk (KERN_INFO DEV_LABEL ": " format , ## args) : 1 )
-#define PRINTDM(bits,format,args...) \
- ( (debug & (bits)) ? printk (format , ## args) : 1 )
-#define PRINTDE(bits,format,args...) \
- ( (debug & (bits)) ? printk (format "\n" , ## args) : 1 )
-#define PRINTD(bits,format,args...) \
- ( (debug & (bits)) ? printk (KERN_INFO DEV_LABEL ": " format "\n" , ## args) : 1 )
-
-#else
-
-#define PRINTD(bits,format,args...)
-#define PRINTDB(bits,format,args...)
-#define PRINTDM(bits,format,args...)
-#define PRINTDE(bits,format,args...)
-
-#endif
-
-#define PRINTDD(sec,fmt,args...)
-#define PRINTDDB(sec,fmt,args...)
-#define PRINTDDM(sec,fmt,args...)
-#define PRINTDDE(sec,fmt,args...)
-
-// fixed constants
-
-#define SPARE_BUFFER_POOL_SIZE MAX_VCS
-#define HRZ_MAX_VPI 4
-#define MIN_PCI_LATENCY 48 // 24 IS TOO SMALL
-
-/* Horizon specific bits */
-/* Register offsets */
-
-#define HRZ_IO_EXTENT 0x80
-
-#define DATA_PORT_OFF 0x00
-#define TX_CHANNEL_PORT_OFF 0x04
-#define TX_DESCRIPTOR_PORT_OFF 0x08
-#define MEMORY_PORT_OFF 0x0C
-#define MEM_WR_ADDR_REG_OFF 0x14
-#define MEM_RD_ADDR_REG_OFF 0x18
-#define CONTROL_0_REG 0x1C
-#define INT_SOURCE_REG_OFF 0x20
-#define INT_ENABLE_REG_OFF 0x24
-#define MASTER_RX_ADDR_REG_OFF 0x28
-#define MASTER_RX_COUNT_REG_OFF 0x2C
-#define MASTER_TX_ADDR_REG_OFF 0x30
-#define MASTER_TX_COUNT_REG_OFF 0x34
-#define TX_DESCRIPTOR_REG_OFF 0x38
-#define TX_CHANNEL_CONFIG_COMMAND_OFF 0x40
-#define TX_CHANNEL_CONFIG_DATA_OFF 0x44
-#define TX_FREE_BUFFER_COUNT_OFF 0x48
-#define RX_FREE_BUFFER_COUNT_OFF 0x4C
-#define TX_CONFIG_OFF 0x50
-#define TX_STATUS_OFF 0x54
-#define RX_CONFIG_OFF 0x58
-#define RX_LINE_CONFIG_OFF 0x5C
-#define RX_QUEUE_RD_PTR_OFF 0x60
-#define RX_QUEUE_WR_PTR_OFF 0x64
-#define MAX_AAL5_CELL_COUNT_OFF 0x68
-#define RX_CHANNEL_PORT_OFF 0x6C
-#define TX_CELL_COUNT_OFF 0x70
-#define RX_CELL_COUNT_OFF 0x74
-#define HEC_ERROR_COUNT_OFF 0x78
-#define UNASSIGNED_CELL_COUNT_OFF 0x7C
-
-/* Register bit definitions */
-
-/* Control 0 register */
-
-#define SEEPROM_DO 0x00000001
-#define SEEPROM_DI 0x00000002
-#define SEEPROM_SK 0x00000004
-#define SEEPROM_CS 0x00000008
-#define DEBUG_BIT_0 0x00000010
-#define DEBUG_BIT_1 0x00000020
-#define DEBUG_BIT_2 0x00000040
-// RESERVED 0x00000080
-#define DEBUG_BIT_0_OE 0x00000100
-#define DEBUG_BIT_1_OE 0x00000200
-#define DEBUG_BIT_2_OE 0x00000400
-// RESERVED 0x00000800
-#define DEBUG_BIT_0_STATE 0x00001000
-#define DEBUG_BIT_1_STATE 0x00002000
-#define DEBUG_BIT_2_STATE 0x00004000
-// RESERVED 0x00008000
-#define GENERAL_BIT_0 0x00010000
-#define GENERAL_BIT_1 0x00020000
-#define GENERAL_BIT_2 0x00040000
-#define GENERAL_BIT_3 0x00080000
-#define RESET_HORIZON 0x00100000
-#define RESET_ATM 0x00200000
-#define RESET_RX 0x00400000
-#define RESET_TX 0x00800000
-#define RESET_HOST 0x01000000
-// RESERVED 0x02000000
-#define TARGET_RETRY_DISABLE 0x04000000
-#define ATM_LAYER_SELECT 0x08000000
-#define ATM_LAYER_STATUS 0x10000000
-// RESERVED 0xE0000000
-
-/* Interrupt source and enable registers */
-
-#define RX_DATA_AV 0x00000001
-#define RX_DISABLED 0x00000002
-#define TIMING_MARKER 0x00000004
-#define FORCED 0x00000008
-#define RX_BUS_MASTER_COMPLETE 0x00000010
-#define TX_BUS_MASTER_COMPLETE 0x00000020
-#define ABR_TX_CELL_COUNT_INT 0x00000040
-#define DEBUG_INT 0x00000080
-// RESERVED 0xFFFFFF00
-
-/* PIO and Bus Mastering */
-
-#define MAX_PIO_COUNT 0x000000ff // 255 - make tunable?
-// 8188 is a hard limit for bus mastering
-#define MAX_TRANSFER_COUNT 0x00001ffc // 8188
-#define MASTER_TX_AUTO_APPEND_DESC 0x80000000
-
-/* TX channel config command port */
-
-#define PCR_TIMER_ACCESS 0x0000
-#define SCR_TIMER_ACCESS 0x0001
-#define BUCKET_CAPACITY_ACCESS 0x0002
-#define BUCKET_FULLNESS_ACCESS 0x0003
-#define RATE_TYPE_ACCESS 0x0004
-// UNUSED 0x00F8
-#define TX_CHANNEL_CONFIG_MULT 0x0100
-// UNUSED 0xF800
-#define BUCKET_MAX_SIZE 0x003f
-
-/* TX channel config data port */
-
-#define CLOCK_SELECT_SHIFT 4
-#define CLOCK_DISABLE 0x00ff
-
-#define IDLE_RATE_TYPE 0x0
-#define ABR_RATE_TYPE 0x1
-#define VBR_RATE_TYPE 0x2
-#define CBR_RATE_TYPE 0x3
-
-/* TX config register */
-
-#define DRVR_DRVRBAR_ENABLE 0x0001
-#define TXCLK_MUX_SELECT_RCLK 0x0002
-#define TRANSMIT_TIMING_MARKER 0x0004
-#define LOOPBACK_TIMING_MARKER 0x0008
-#define TX_TEST_MODE_16MHz 0x0000
-#define TX_TEST_MODE_8MHz 0x0010
-#define TX_TEST_MODE_5_33MHz 0x0020
-#define TX_TEST_MODE_4MHz 0x0030
-#define TX_TEST_MODE_3_2MHz 0x0040
-#define TX_TEST_MODE_2_66MHz 0x0050
-#define TX_TEST_MODE_2_29MHz 0x0060
-#define TX_NORMAL_OPERATION 0x0070
-#define ABR_ROUND_ROBIN 0x0080
-
-/* TX status register */
-
-#define IDLE_CHANNELS_MASK 0x00FF
-#define ABR_CELL_COUNT_REACHED_MULT 0x0100
-#define ABR_CELL_COUNT_REACHED_MASK 0xFF
-
-/* RX config register */
-
-#define NON_USER_CELLS_IN_ONE_CHANNEL 0x0008
-#define RX_ENABLE 0x0010
-#define IGNORE_UNUSED_VPI_VCI_BITS_SET 0x0000
-#define NON_USER_UNUSED_VPI_VCI_BITS_SET 0x0020
-#define DISCARD_UNUSED_VPI_VCI_BITS_SET 0x0040
-
-/* RX line config register */
-
-#define SIGNAL_LOSS 0x0001
-#define FREQUENCY_DETECT_ERROR 0x0002
-#define LOCK_DETECT_ERROR 0x0004
-#define SELECT_INTERNAL_LOOPBACK 0x0008
-#define LOCK_DETECT_ENABLE 0x0010
-#define FREQUENCY_DETECT_ENABLE 0x0020
-#define USER_FRAQ 0x0040
-#define GXTALOUT_SELECT_DIV4 0x0080
-#define GXTALOUT_SELECT_NO_GATING 0x0100
-#define TIMING_MARKER_RECEIVED 0x0200
-
-/* RX channel port */
-
-#define RX_CHANNEL_MASK 0x03FF
-// UNUSED 0x3C00
-#define FLUSH_CHANNEL 0x4000
-#define RX_CHANNEL_UPDATE_IN_PROGRESS 0x8000
-
-/* Receive queue entry */
-
-#define RX_Q_ENTRY_LENGTH_MASK 0x0000FFFF
-#define RX_Q_ENTRY_CHANNEL_SHIFT 16
-#define SIMONS_DODGEY_MARKER 0x08000000
-#define RX_CONGESTION_EXPERIENCED 0x10000000
-#define RX_CRC_10_OK 0x20000000
-#define RX_CRC_32_OK 0x40000000
-#define RX_COMPLETE_FRAME 0x80000000
-
-/* Offsets and constants for use with the buffer memory */
-
-/* Buffer pointers and channel types */
-
-#define BUFFER_PTR_MASK 0x0000FFFF
-#define RX_INT_THRESHOLD_MULT 0x00010000
-#define RX_INT_THRESHOLD_MASK 0x07FF
-#define INT_EVERY_N_CELLS 0x08000000
-#define CONGESTION_EXPERIENCED 0x10000000
-#define FIRST_CELL_OF_AAL5_FRAME 0x20000000
-#define CHANNEL_TYPE_AAL5 0x00000000
-#define CHANNEL_TYPE_RAW_CELLS 0x40000000
-#define CHANNEL_TYPE_AAL3_4 0x80000000
-
-/* Buffer status stuff */
-
-#define BUFF_STATUS_MASK 0x00030000
-#define BUFF_STATUS_EMPTY 0x00000000
-#define BUFF_STATUS_CELL_AV 0x00010000
-#define BUFF_STATUS_LAST_CELL_AV 0x00020000
-
-/* Transmit channel stuff */
-
-/* Receive channel stuff */
-
-#define RX_CHANNEL_DISABLED 0x00000000
-#define RX_CHANNEL_IDLE 0x00000001
-
-/* General things */
-
-#define INITIAL_CRC 0xFFFFFFFF
-
-// A Horizon u32, a byte! Really nasty. Horizon pointers are (32 bit)
-// word addresses and so standard C pointer operations break (as they
-// assume byte addresses); so we pretend that Horizon words (and word
-// pointers) are bytes (and byte pointers) for the purposes of having
-// a memory map that works.
-
-typedef u8 HDW;
-
-typedef struct cell_buf {
- HDW payload[12];
- HDW next;
- HDW cell_count; // AAL5 rx bufs
- HDW res;
- union {
- HDW partial_crc; // AAL5 rx bufs
- HDW cell_header; // RAW bufs
- } u;
-} cell_buf;
-
-typedef struct tx_ch_desc {
- HDW rd_buf_type;
- HDW wr_buf_type;
- HDW partial_crc;
- HDW cell_header;
-} tx_ch_desc;
-
-typedef struct rx_ch_desc {
- HDW wr_buf_type;
- HDW rd_buf_type;
-} rx_ch_desc;
-
-typedef struct rx_q_entry {
- HDW entry;
-} rx_q_entry;
-
-#define TX_CHANS 8
-#define RX_CHANS 1024
-#define RX_QS 1024
-#define MAX_VCS RX_CHANS
-
-/* Horizon buffer memory map */
-
-// TX Channel Descriptors 2
-// TX Initial Buffers 8 // TX_CHANS
-#define BUFN1_SIZE 118 // (126 - TX_CHANS)
-// RX/TX Start/End Buffers 4
-#define BUFN2_SIZE 124
-// RX Queue Entries 64
-#define BUFN3_SIZE 192
-// RX Channel Descriptors 128
-#define BUFN4_SIZE 1408
-// TOTAL cell_buff chunks 2048
-
-// cell_buf bufs[2048];
-// HDW dws[32768];
-
-typedef struct MEMMAP {
- tx_ch_desc tx_descs[TX_CHANS]; // 8 * 4 = 32 , 0x0020
- cell_buf inittxbufs[TX_CHANS]; // these are really
- cell_buf bufn1[BUFN1_SIZE]; // part of this pool
- cell_buf txfreebufstart;
- cell_buf txfreebufend;
- cell_buf rxfreebufstart;
- cell_buf rxfreebufend; // 8+118+1+1+1+1+124 = 254
- cell_buf bufn2[BUFN2_SIZE]; // 16 * 254 = 4064 , 0x1000
- rx_q_entry rx_q_entries[RX_QS]; // 1 * 1024 = 1024 , 0x1400
- cell_buf bufn3[BUFN3_SIZE]; // 16 * 192 = 3072 , 0x2000
- rx_ch_desc rx_descs[MAX_VCS]; // 2 * 1024 = 2048 , 0x2800
- cell_buf bufn4[BUFN4_SIZE]; // 16 * 1408 = 22528 , 0x8000
-} MEMMAP;
-
-#define memmap ((MEMMAP *)0)
-
-/* end horizon specific bits */
-
-typedef enum {
- aal0,
- aal34,
- aal5
-} hrz_aal;
-
-typedef enum {
- tx_busy,
- rx_busy,
- ultra
-} hrz_flags;
-
-// a single struct pointed to by atm_vcc->dev_data
-
-typedef struct {
- unsigned int tx_rate;
- unsigned int rx_rate;
- u16 channel;
- u16 tx_xbr_bits;
- u16 tx_pcr_bits;
-#if 0
- u16 tx_scr_bits;
- u16 tx_bucket_bits;
-#endif
- hrz_aal aal;
-} hrz_vcc;
-
-struct hrz_dev {
-
- u32 iobase;
- u32 * membase;
-
- struct sk_buff * rx_skb; // skb being RXed
- unsigned int rx_bytes; // bytes remaining to RX within region
- void * rx_addr; // addr to send bytes to (for PIO)
- unsigned int rx_channel; // channel that the skb is going out on
-
- struct sk_buff * tx_skb; // skb being TXed
- unsigned int tx_bytes; // bytes remaining to TX within region
- void * tx_addr; // addr to send bytes from (for PIO)
- struct iovec * tx_iovec; // remaining regions
- unsigned int tx_regions; // number of remaining regions
-
- spinlock_t mem_lock;
- wait_queue_head_t tx_queue;
-
- u8 irq;
- unsigned long flags;
- u8 tx_last;
- u8 tx_idle;
-
- rx_q_entry * rx_q_reset;
- rx_q_entry * rx_q_entry;
- rx_q_entry * rx_q_wrap;
-
- struct atm_dev * atm_dev;
-
- u32 last_vc;
-
- int noof_spare_buffers;
- u16 spare_buffers[SPARE_BUFFER_POOL_SIZE];
-
- u16 tx_channel_record[TX_CHANS];
-
- // this is what we follow when we get incoming data
- u32 txer[MAX_VCS/32];
- struct atm_vcc * rxer[MAX_VCS];
-
- // cell rate allocation
- spinlock_t rate_lock;
- unsigned int rx_avail;
- unsigned int tx_avail;
-
- // dev stats
- unsigned long tx_cell_count;
- unsigned long rx_cell_count;
- unsigned long hec_error_count;
- unsigned long unassigned_cell_count;
-
- struct pci_dev * pci_dev;
- struct timer_list housekeeping;
-};
-
-typedef struct hrz_dev hrz_dev;
-
-/* macros for use later */
-
-#define BUF_PTR(cbptr) ((cbptr) - (cell_buf *) 0)
-
-#define INTERESTING_INTERRUPTS \
- (RX_DATA_AV | RX_DISABLED | TX_BUS_MASTER_COMPLETE | RX_BUS_MASTER_COMPLETE)
-
-// 190 cells by default (192 TX buffers - 2 elbow room, see docs)
-#define TX_AAL5_LIMIT (190*ATM_CELL_PAYLOAD-ATM_AAL5_TRAILER) // 9112
-
-// Have enough RX buffers (unless we allow other buffer splits)
-#define RX_AAL5_LIMIT ATM_MAX_AAL5_PDU
-
-/* multi-statement macro protector */
-#define DW(x) do{ x } while(0)
-
-#define HRZ_DEV(atm_dev) ((hrz_dev *) (atm_dev)->dev_data)
-#define HRZ_VCC(atm_vcc) ((hrz_vcc *) (atm_vcc)->dev_data)
-
-/* Turn the LEDs on and off */
-// The LEDs bits are upside down in that setting the bit in the debug
-// register will turn the appropriate LED off.
-
-#define YELLOW_LED DEBUG_BIT_0
-#define GREEN_LED DEBUG_BIT_1
-#define YELLOW_LED_OE DEBUG_BIT_0_OE
-#define GREEN_LED_OE DEBUG_BIT_1_OE
-
-#define GREEN_LED_OFF(dev) \
- wr_regl (dev, CONTROL_0_REG, rd_regl (dev, CONTROL_0_REG) | GREEN_LED)
-#define GREEN_LED_ON(dev) \
- wr_regl (dev, CONTROL_0_REG, rd_regl (dev, CONTROL_0_REG) &~ GREEN_LED)
-#define YELLOW_LED_OFF(dev) \
- wr_regl (dev, CONTROL_0_REG, rd_regl (dev, CONTROL_0_REG) | YELLOW_LED)
-#define YELLOW_LED_ON(dev) \
- wr_regl (dev, CONTROL_0_REG, rd_regl (dev, CONTROL_0_REG) &~ YELLOW_LED)
-
-typedef enum {
- round_up,
- round_down,
- round_nearest
-} rounding;
-
-#endif /* DRIVER_ATM_HORIZON_H */
diff --git a/drivers/atm/nicstarmac.c b/drivers/atm/nicstarmac.c
index e0dda9062e6b..791f69a07ddf 100644
--- a/drivers/atm/nicstarmac.c
+++ b/drivers/atm/nicstarmac.c
@@ -14,11 +14,6 @@ typedef void __iomem *virt_addr_t;
#define CYCLE_DELAY 5
-/*
- This was the original definition
-#define osp_MicroDelay(microsec) \
- do { int _i = 4*microsec; while (--_i > 0) { __SLOW_DOWN_IO; }} while (0)
-*/
#define osp_MicroDelay(microsec) {unsigned long useconds = (microsec); \
udelay((useconds));}
/*
diff --git a/drivers/atm/uPD98401.h b/drivers/atm/uPD98401.h
deleted file mode 100644
index f766a5ef0c5d..000000000000
--- a/drivers/atm/uPD98401.h
+++ /dev/null
@@ -1,293 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* drivers/atm/uPD98401.h - NEC uPD98401 (SAR) declarations */
-
-/* Written 1995 by Werner Almesberger, EPFL LRC */
-
-
-#ifndef DRIVERS_ATM_uPD98401_H
-#define DRIVERS_ATM_uPD98401_H
-
-
-#define MAX_CRAM_SIZE (1 << 18) /* 2^18 words */
-#define RAM_INCREMENT 1024 /* check in 4 kB increments */
-
-#define uPD98401_PORTS 0x24 /* probably more ? */
-
-
-/*
- * Commands
- */
-
-#define uPD98401_OPEN_CHAN 0x20000000 /* open channel */
-#define uPD98401_CHAN_ADDR 0x0003fff8 /* channel address */
-#define uPD98401_CHAN_ADDR_SHIFT 3
-#define uPD98401_CLOSE_CHAN 0x24000000 /* close channel */
-#define uPD98401_CHAN_RT 0x02000000 /* RX/TX (0 TX, 1 RX) */
-#define uPD98401_DEACT_CHAN 0x28000000 /* deactivate channel */
-#define uPD98401_TX_READY 0x30000000 /* TX ready */
-#define uPD98401_ADD_BAT 0x34000000 /* add batches */
-#define uPD98401_POOL 0x000f0000 /* pool number */
-#define uPD98401_POOL_SHIFT 16
-#define uPD98401_POOL_NUMBAT 0x0000ffff /* number of batches */
-#define uPD98401_NOP 0x3f000000 /* NOP */
-#define uPD98401_IND_ACC 0x00000000 /* Indirect Access */
-#define uPD98401_IA_RW 0x10000000 /* Read/Write (0 W, 1 R) */
-#define uPD98401_IA_B3 0x08000000 /* Byte select, 1 enable */
-#define uPD98401_IA_B2 0x04000000
-#define uPD98401_IA_B1 0x02000000
-#define uPD98401_IA_B0 0x01000000
-#define uPD98401_IA_BALL 0x0f000000 /* whole longword */
-#define uPD98401_IA_TGT 0x000c0000 /* Target */
-#define uPD98401_IA_TGT_SHIFT 18
-#define uPD98401_IA_TGT_CM 0 /* - Control Memory */
-#define uPD98401_IA_TGT_SAR 1 /* - uPD98401 registers */
-#define uPD98401_IA_TGT_PHY 3 /* - PHY device */
-#define uPD98401_IA_ADDR 0x0003ffff
-
-/*
- * Command Register Status
- */
-
-#define uPD98401_BUSY 0x80000000 /* SAR is busy */
-#define uPD98401_LOCKED 0x40000000 /* SAR is locked by other CPU */
-
-/*
- * Indications
- */
-
-/* Normal (AAL5) Receive Indication */
-#define uPD98401_AAL5_UINFO 0xffff0000 /* user-supplied information */
-#define uPD98401_AAL5_UINFO_SHIFT 16
-#define uPD98401_AAL5_SIZE 0x0000ffff /* PDU size (in _CELLS_ !!) */
-#define uPD98401_AAL5_CHAN 0x7fff0000 /* Channel number */
-#define uPD98401_AAL5_CHAN_SHIFT 16
-#define uPD98401_AAL5_ERR 0x00008000 /* Error indication */
-#define uPD98401_AAL5_CI 0x00004000 /* Congestion Indication */
-#define uPD98401_AAL5_CLP 0x00002000 /* CLP (>= 1 cell had CLP=1) */
-#define uPD98401_AAL5_ES 0x00000f00 /* Error Status */
-#define uPD98401_AAL5_ES_SHIFT 8
-#define uPD98401_AAL5_ES_NONE 0 /* No error */
-#define uPD98401_AAL5_ES_FREE 1 /* Receiver free buf underflow */
-#define uPD98401_AAL5_ES_FIFO 2 /* Receiver FIFO overrun */
-#define uPD98401_AAL5_ES_TOOBIG 3 /* Maximum length violation */
-#define uPD98401_AAL5_ES_CRC 4 /* CRC error */
-#define uPD98401_AAL5_ES_ABORT 5 /* User abort */
-#define uPD98401_AAL5_ES_LENGTH 6 /* Length violation */
-#define uPD98401_AAL5_ES_T1 7 /* T1 error (timeout) */
-#define uPD98401_AAL5_ES_DEACT 8 /* Deactivated with DEACT_CHAN */
-#define uPD98401_AAL5_POOL 0x0000001f /* Free buffer pool number */
-
-/* Raw Cell Indication */
-#define uPD98401_RAW_UINFO uPD98401_AAL5_UINFO
-#define uPD98401_RAW_UINFO_SHIFT uPD98401_AAL5_UINFO_SHIFT
-#define uPD98401_RAW_HEC 0x000000ff /* HEC */
-#define uPD98401_RAW_CHAN uPD98401_AAL5_CHAN
-#define uPD98401_RAW_CHAN_SHIFT uPD98401_AAL5_CHAN_SHIFT
-
-/* Transmit Indication */
-#define uPD98401_TXI_CONN 0x7fff0000 /* Connection Number */
-#define uPD98401_TXI_CONN_SHIFT 16
-#define uPD98401_TXI_ACTIVE 0x00008000 /* Channel remains active */
-#define uPD98401_TXI_PQP 0x00007fff /* Packet Queue Pointer */
-
-/*
- * Directly Addressable Registers
- */
-
-#define uPD98401_GMR 0x00 /* General Mode Register */
-#define uPD98401_GSR 0x01 /* General Status Register */
-#define uPD98401_IMR 0x02 /* Interrupt Mask Register */
-#define uPD98401_RQU 0x03 /* Receive Queue Underrun */
-#define uPD98401_RQA 0x04 /* Receive Queue Alert */
-#define uPD98401_ADDR 0x05 /* Last Burst Address */
-#define uPD98401_VER 0x06 /* Version Number */
-#define uPD98401_SWR 0x07 /* Software Reset */
-#define uPD98401_CMR 0x08 /* Command Register */
-#define uPD98401_CMR_L 0x09 /* Command Register and Lock/Unlock */
-#define uPD98401_CER 0x0a /* Command Extension Register */
-#define uPD98401_CER_L 0x0b /* Command Ext Reg and Lock/Unlock */
-
-#define uPD98401_MSH(n) (0x10+(n)) /* Mailbox n Start Address High */
-#define uPD98401_MSL(n) (0x14+(n)) /* Mailbox n Start Address High */
-#define uPD98401_MBA(n) (0x18+(n)) /* Mailbox n Bottom Address */
-#define uPD98401_MTA(n) (0x1c+(n)) /* Mailbox n Tail Address */
-#define uPD98401_MWA(n) (0x20+(n)) /* Mailbox n Write Address */
-
-/* GMR is at 0x00 */
-#define uPD98401_GMR_ONE 0x80000000 /* Must be set to one */
-#define uPD98401_GMR_SLM 0x40000000 /* Address mode (0 word, 1 byte) */
-#define uPD98401_GMR_CPE 0x00008000 /* Control Memory Parity Enable */
-#define uPD98401_GMR_LP 0x00004000 /* Loopback */
-#define uPD98401_GMR_WA 0x00002000 /* Early Bus Write Abort/RDY */
-#define uPD98401_GMR_RA 0x00001000 /* Early Read Abort/RDY */
-#define uPD98401_GMR_SZ 0x00000f00 /* Burst Size Enable */
-#define uPD98401_BURST16 0x00000800 /* 16-word burst */
-#define uPD98401_BURST8 0x00000400 /* 8-word burst */
-#define uPD98401_BURST4 0x00000200 /* 4-word burst */
-#define uPD98401_BURST2 0x00000100 /* 2-word burst */
-#define uPD98401_GMR_AD 0x00000080 /* Address (burst resolution) Disable */
-#define uPD98401_GMR_BO 0x00000040 /* Byte Order (0 little, 1 big) */
-#define uPD98401_GMR_PM 0x00000020 /* Bus Parity Mode (0 byte, 1 word)*/
-#define uPD98401_GMR_PC 0x00000010 /* Bus Parity Control (0even,1odd) */
-#define uPD98401_GMR_BPE 0x00000008 /* Bus Parity Enable */
-#define uPD98401_GMR_DR 0x00000004 /* Receive Drop Mode (0drop,1don't)*/
-#define uPD98401_GMR_SE 0x00000002 /* Shapers Enable */
-#define uPD98401_GMR_RE 0x00000001 /* Receiver Enable */
-
-/* GSR is at 0x01, IMR is at 0x02 */
-#define uPD98401_INT_PI 0x80000000 /* PHY interrupt */
-#define uPD98401_INT_RQA 0x40000000 /* Receive Queue Alert */
-#define uPD98401_INT_RQU 0x20000000 /* Receive Queue Underrun */
-#define uPD98401_INT_RD 0x10000000 /* Receiver Deactivated */
-#define uPD98401_INT_SPE 0x08000000 /* System Parity Error */
-#define uPD98401_INT_CPE 0x04000000 /* Control Memory Parity Error */
-#define uPD98401_INT_SBE 0x02000000 /* System Bus Error */
-#define uPD98401_INT_IND 0x01000000 /* Initialization Done */
-#define uPD98401_INT_RCR 0x0000ff00 /* Raw Cell Received */
-#define uPD98401_INT_RCR_SHIFT 8
-#define uPD98401_INT_MF 0x000000f0 /* Mailbox Full */
-#define uPD98401_INT_MF_SHIFT 4
-#define uPD98401_INT_MM 0x0000000f /* Mailbox Modified */
-
-/* VER is at 0x06 */
-#define uPD98401_MAJOR 0x0000ff00 /* Major revision */
-#define uPD98401_MAJOR_SHIFT 8
-#define uPD98401_MINOR 0x000000ff /* Minor revision */
-
-/*
- * Indirectly Addressable Registers
- */
-
-#define uPD98401_IM(n) (0x40000+(n)) /* Scheduler n I and M */
-#define uPD98401_X(n) (0x40010+(n)) /* Scheduler n X */
-#define uPD98401_Y(n) (0x40020+(n)) /* Scheduler n Y */
-#define uPD98401_PC(n) (0x40030+(n)) /* Scheduler n P, C, p and c */
-#define uPD98401_PS(n) (0x40040+(n)) /* Scheduler n priority and status */
-
-/* IM contents */
-#define uPD98401_IM_I 0xff000000 /* I */
-#define uPD98401_IM_I_SHIFT 24
-#define uPD98401_IM_M 0x00ffffff /* M */
-
-/* PC contents */
-#define uPD98401_PC_P 0xff000000 /* P */
-#define uPD98401_PC_P_SHIFT 24
-#define uPD98401_PC_C 0x00ff0000 /* C */
-#define uPD98401_PC_C_SHIFT 16
-#define uPD98401_PC_p 0x0000ff00 /* p */
-#define uPD98401_PC_p_SHIFT 8
-#define uPD98401_PC_c 0x000000ff /* c */
-
-/* PS contents */
-#define uPD98401_PS_PRIO 0xf0 /* Priority level (0 high, 15 low) */
-#define uPD98401_PS_PRIO_SHIFT 4
-#define uPD98401_PS_S 0x08 /* Scan - must be 0 (internal) */
-#define uPD98401_PS_R 0x04 /* Round Robin (internal) */
-#define uPD98401_PS_A 0x02 /* Active (internal) */
-#define uPD98401_PS_E 0x01 /* Enabled */
-
-#define uPD98401_TOS 0x40100 /* Top of Stack Control Memory Address */
-#define uPD98401_SMA 0x40200 /* Shapers Control Memory Start Address */
-#define uPD98401_PMA 0x40201 /* Receive Pool Control Memory Start Address */
-#define uPD98401_T1R 0x40300 /* T1 Register */
-#define uPD98401_VRR 0x40301 /* VPI/VCI Reduction Register/Recv. Shutdown */
-#define uPD98401_TSR 0x40302 /* Time-Stamp Register */
-
-/* VRR is at 0x40301 */
-#define uPD98401_VRR_SDM 0x80000000 /* Shutdown Mode */
-#define uPD98401_VRR_SHIFT 0x000f0000 /* VPI/VCI Shift */
-#define uPD98401_VRR_SHIFT_SHIFT 16
-#define uPD98401_VRR_MASK 0x0000ffff /* VPI/VCI mask */
-
-/*
- * TX packet descriptor
- */
-
-#define uPD98401_TXPD_SIZE 16 /* descriptor size (in bytes) */
-
-#define uPD98401_TXPD_V 0x80000000 /* Valid bit */
-#define uPD98401_TXPD_DP 0x40000000 /* Descriptor (1) or Pointer (0) */
-#define uPD98401_TXPD_SM 0x20000000 /* Single (1) or Multiple (0) */
-#define uPD98401_TXPD_CLPM 0x18000000 /* CLP mode */
-#define uPD98401_CLPM_0 0 /* 00 CLP = 0 */
-#define uPD98401_CLPM_1 3 /* 11 CLP = 1 */
-#define uPD98401_CLPM_LAST 1 /* 01 CLP unless last cell */
-#define uPD98401_TXPD_CLPM_SHIFT 27
-#define uPD98401_TXPD_PTI 0x07000000 /* PTI pattern */
-#define uPD98401_TXPD_PTI_SHIFT 24
-#define uPD98401_TXPD_GFC 0x00f00000 /* GFC pattern */
-#define uPD98401_TXPD_GFC_SHIFT 20
-#define uPD98401_TXPD_C10 0x00040000 /* insert CRC-10 */
-#define uPD98401_TXPD_AAL5 0x00020000 /* AAL5 processing */
-#define uPD98401_TXPD_MB 0x00010000 /* TX mailbox number */
-#define uPD98401_TXPD_UU 0x0000ff00 /* CPCS-UU */
-#define uPD98401_TXPD_UU_SHIFT 8
-#define uPD98401_TXPD_CPI 0x000000ff /* CPI */
-
-/*
- * TX buffer descriptor
- */
-
-#define uPD98401_TXBD_SIZE 8 /* descriptor size (in bytes) */
-
-#define uPD98401_TXBD_LAST 0x80000000 /* last buffer in packet */
-
-/*
- * TX VC table
- */
-
-/* 1st word has the same structure as in a TX packet descriptor */
-#define uPD98401_TXVC_L 0x80000000 /* last buffer */
-#define uPD98401_TXVC_SHP 0x0f000000 /* shaper number */
-#define uPD98401_TXVC_SHP_SHIFT 24
-#define uPD98401_TXVC_VPI 0x00ff0000 /* VPI */
-#define uPD98401_TXVC_VPI_SHIFT 16
-#define uPD98401_TXVC_VCI 0x0000ffff /* VCI */
-#define uPD98401_TXVC_QRP 6 /* Queue Read Pointer is in word 6 */
-
-/*
- * RX free buffer pools descriptor
- */
-
-#define uPD98401_RXFP_ALERT 0x70000000 /* low water mark */
-#define uPD98401_RXFP_ALERT_SHIFT 28
-#define uPD98401_RXFP_BFSZ 0x0f000000 /* buffer size, 64*2^n */
-#define uPD98401_RXFP_BFSZ_SHIFT 24
-#define uPD98401_RXFP_BTSZ 0x00ff0000 /* batch size, n+1 */
-#define uPD98401_RXFP_BTSZ_SHIFT 16
-#define uPD98401_RXFP_REMAIN 0x0000ffff /* remaining batches in pool */
-
-/*
- * RX VC table
- */
-
-#define uPD98401_RXVC_BTSZ 0xff000000 /* remaining free buffers in batch */
-#define uPD98401_RXVC_BTSZ_SHIFT 24
-#define uPD98401_RXVC_MB 0x00200000 /* RX mailbox number */
-#define uPD98401_RXVC_POOL 0x001f0000 /* free buffer pool number */
-#define uPD98401_RXVC_POOL_SHIFT 16
-#define uPD98401_RXVC_UINFO 0x0000ffff /* user-supplied information */
-#define uPD98401_RXVC_T1 0xffff0000 /* T1 timestamp */
-#define uPD98401_RXVC_T1_SHIFT 16
-#define uPD98401_RXVC_PR 0x00008000 /* Packet Reception, 1 if busy */
-#define uPD98401_RXVC_DR 0x00004000 /* FIFO Drop */
-#define uPD98401_RXVC_OD 0x00001000 /* Drop OAM cells */
-#define uPD98401_RXVC_AR 0x00000800 /* AAL5 or raw cell; 1 if AAL5 */
-#define uPD98401_RXVC_MAXSEG 0x000007ff /* max number of segments per PDU */
-#define uPD98401_RXVC_REM 0xfffe0000 /* remaining words in curr buffer */
-#define uPD98401_RXVC_REM_SHIFT 17
-#define uPD98401_RXVC_CLP 0x00010000 /* CLP received */
-#define uPD98401_RXVC_BFA 0x00008000 /* Buffer Assigned */
-#define uPD98401_RXVC_BTA 0x00004000 /* Batch Assigned */
-#define uPD98401_RXVC_CI 0x00002000 /* Congestion Indication */
-#define uPD98401_RXVC_DD 0x00001000 /* Dropping incoming cells */
-#define uPD98401_RXVC_DP 0x00000800 /* like PR ? */
-#define uPD98401_RXVC_CURSEG 0x000007ff /* Current Segment count */
-
-/*
- * RX lookup table
- */
-
-#define uPD98401_RXLT_ENBL 0x8000 /* Enable */
-
-#endif
diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
deleted file mode 100644
index 239852d85558..000000000000
--- a/drivers/atm/uPD98402.c
+++ /dev/null
@@ -1,266 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* drivers/atm/uPD98402.c - NEC uPD98402 (PHY) declarations */
-
-/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
-
-
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/errno.h>
-#include <linux/atmdev.h>
-#include <linux/sonet.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/atomic.h>
-
-#include "uPD98402.h"
-
-
-#if 0
-#define DPRINTK(format,args...) printk(KERN_DEBUG format,##args)
-#else
-#define DPRINTK(format,args...)
-#endif
-
-
-struct uPD98402_priv {
- struct k_sonet_stats sonet_stats;/* link diagnostics */
- unsigned char framing; /* SONET/SDH framing */
- int loop_mode; /* loopback mode */
- spinlock_t lock;
-};
-
-
-#define PRIV(dev) ((struct uPD98402_priv *) dev->phy_data)
-
-#define PUT(val,reg) dev->ops->phy_put(dev,val,uPD98402_##reg)
-#define GET(reg) dev->ops->phy_get(dev,uPD98402_##reg)
-
-
-static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int zero)
-{
- struct sonet_stats tmp;
- int error = 0;
-
- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
- sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
- if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
- if (zero && !error) {
- /* unused fields are reported as -1, but we must not "adjust"
- them */
- tmp.corr_hcs = tmp.tx_cells = tmp.rx_cells = 0;
- sonet_subtract_stats(&PRIV(dev)->sonet_stats,&tmp);
- }
- return error ? -EFAULT : 0;
-}
-
-
-static int set_framing(struct atm_dev *dev,unsigned char framing)
-{
- static const unsigned char sonet[] = { 1,2,3,0 };
- static const unsigned char sdh[] = { 1,0,0,2 };
- const char *set;
- unsigned long flags;
-
- switch (framing) {
- case SONET_FRAME_SONET:
- set = sonet;
- break;
- case SONET_FRAME_SDH:
- set = sdh;
- break;
- default:
- return -EINVAL;
- }
- spin_lock_irqsave(&PRIV(dev)->lock, flags);
- PUT(set[0],C11T);
- PUT(set[1],C12T);
- PUT(set[2],C13T);
- PUT((GET(MDR) & ~uPD98402_MDR_SS_MASK) | (set[3] <<
- uPD98402_MDR_SS_SHIFT),MDR);
- spin_unlock_irqrestore(&PRIV(dev)->lock, flags);
- return 0;
-}
-
-
-static int get_sense(struct atm_dev *dev,u8 __user *arg)
-{
- unsigned long flags;
- unsigned char s[3];
-
- spin_lock_irqsave(&PRIV(dev)->lock, flags);
- s[0] = GET(C11R);
- s[1] = GET(C12R);
- s[2] = GET(C13R);
- spin_unlock_irqrestore(&PRIV(dev)->lock, flags);
- return (put_user(s[0], arg) || put_user(s[1], arg+1) ||
- put_user(s[2], arg+2) || put_user(0xff, arg+3) ||
- put_user(0xff, arg+4) || put_user(0xff, arg+5)) ? -EFAULT : 0;
-}
-
-
-static int set_loopback(struct atm_dev *dev,int mode)
-{
- unsigned char mode_reg;
-
- mode_reg = GET(MDR) & ~(uPD98402_MDR_TPLP | uPD98402_MDR_ALP |
- uPD98402_MDR_RPLP);
- switch (__ATM_LM_XTLOC(mode)) {
- case __ATM_LM_NONE:
- break;
- case __ATM_LM_PHY:
- mode_reg |= uPD98402_MDR_TPLP;
- break;
- case __ATM_LM_ATM:
- mode_reg |= uPD98402_MDR_ALP;
- break;
- default:
- return -EINVAL;
- }
- switch (__ATM_LM_XTRMT(mode)) {
- case __ATM_LM_NONE:
- break;
- case __ATM_LM_PHY:
- mode_reg |= uPD98402_MDR_RPLP;
- break;
- default:
- return -EINVAL;
- }
- PUT(mode_reg,MDR);
- PRIV(dev)->loop_mode = mode;
- return 0;
-}
-
-
-static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
-{
- switch (cmd) {
-
- case SONET_GETSTATZ:
- case SONET_GETSTAT:
- return fetch_stats(dev,arg, cmd == SONET_GETSTATZ);
- case SONET_SETFRAMING:
- return set_framing(dev, (int)(unsigned long)arg);
- case SONET_GETFRAMING:
- return put_user(PRIV(dev)->framing,(int __user *)arg) ?
- -EFAULT : 0;
- case SONET_GETFRSENSE:
- return get_sense(dev,arg);
- case ATM_SETLOOP:
- return set_loopback(dev, (int)(unsigned long)arg);
- case ATM_GETLOOP:
- return put_user(PRIV(dev)->loop_mode,(int __user *)arg) ?
- -EFAULT : 0;
- case ATM_QUERYLOOP:
- return put_user(ATM_LM_LOC_PHY | ATM_LM_LOC_ATM |
- ATM_LM_RMT_PHY,(int __user *)arg) ? -EFAULT : 0;
- default:
- return -ENOIOCTLCMD;
- }
-}
-
-
-#define ADD_LIMITED(s,v) \
- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
-
-
-static void stat_event(struct atm_dev *dev)
-{
- unsigned char events;
-
- events = GET(PCR);
- if (events & uPD98402_PFM_PFEB) ADD_LIMITED(path_febe,PFECB);
- if (events & uPD98402_PFM_LFEB) ADD_LIMITED(line_febe,LECCT);
- if (events & uPD98402_PFM_B3E) ADD_LIMITED(path_bip,B3ECT);
- if (events & uPD98402_PFM_B2E) ADD_LIMITED(line_bip,B2ECT);
- if (events & uPD98402_PFM_B1E) ADD_LIMITED(section_bip,B1ECT);
-}
-
-
-#undef ADD_LIMITED
-
-
-static void uPD98402_int(struct atm_dev *dev)
-{
- static unsigned long silence = 0;
- unsigned char reason;
-
- while ((reason = GET(PICR))) {
- if (reason & uPD98402_INT_LOS)
- printk(KERN_NOTICE "%s(itf %d): signal lost\n",
- dev->type,dev->number);
- if (reason & uPD98402_INT_PFM) stat_event(dev);
- if (reason & uPD98402_INT_PCO) {
- (void) GET(PCOCR); /* clear interrupt cause */
- atomic_add(GET(HECCT),
- &PRIV(dev)->sonet_stats.uncorr_hcs);
- }
- if ((reason & uPD98402_INT_RFO) &&
- (time_after(jiffies, silence) || silence == 0)) {
- printk(KERN_WARNING "%s(itf %d): uPD98402 receive "
- "FIFO overflow\n",dev->type,dev->number);
- silence = (jiffies+HZ/2)|1;
- }
- }
-}
-
-
-static int uPD98402_start(struct atm_dev *dev)
-{
- DPRINTK("phy_start\n");
- if (!(dev->phy_data = kmalloc(sizeof(struct uPD98402_priv),GFP_KERNEL)))
- return -ENOMEM;
- spin_lock_init(&PRIV(dev)->lock);
- memset(&PRIV(dev)->sonet_stats,0,sizeof(struct k_sonet_stats));
- (void) GET(PCR); /* clear performance events */
- PUT(uPD98402_PFM_FJ,PCMR); /* ignore frequency adj */
- (void) GET(PCOCR); /* clear overflows */
- PUT(~uPD98402_PCO_HECC,PCOMR);
- (void) GET(PICR); /* clear interrupts */
- PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
- uPD98402_INT_LOS),PIMR); /* enable them */
- (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
- return 0;
-}
-
-
-static int uPD98402_stop(struct atm_dev *dev)
-{
- /* let SAR driver worry about stopping interrupts */
- kfree(PRIV(dev));
- return 0;
-}
-
-
-static const struct atmphy_ops uPD98402_ops = {
- .start = uPD98402_start,
- .ioctl = uPD98402_ioctl,
- .interrupt = uPD98402_int,
- .stop = uPD98402_stop,
-};
-
-
-int uPD98402_init(struct atm_dev *dev)
-{
-DPRINTK("phy_init\n");
- dev->phy = &uPD98402_ops;
- return 0;
-}
-
-
-MODULE_LICENSE("GPL");
-
-EXPORT_SYMBOL(uPD98402_init);
-
-static __init int uPD98402_module_init(void)
-{
- return 0;
-}
-module_init(uPD98402_module_init);
-/* module_exit not defined so not unloadable */
diff --git a/drivers/atm/uPD98402.h b/drivers/atm/uPD98402.h
deleted file mode 100644
index 437cfaa20c96..000000000000
--- a/drivers/atm/uPD98402.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* drivers/atm/uPD98402.h - NEC uPD98402 (PHY) declarations */
-
-/* Written 1995 by Werner Almesberger, EPFL LRC */
-
-
-#ifndef DRIVERS_ATM_uPD98402_H
-#define DRIVERS_ATM_uPD98402_H
-
-/*
- * Registers
- */
-
-#define uPD98402_CMR 0x00 /* Command Register */
-#define uPD98402_MDR 0x01 /* Mode Register */
-#define uPD98402_PICR 0x02 /* PHY Interrupt Cause Register */
-#define uPD98402_PIMR 0x03 /* PHY Interrupt Mask Register */
-#define uPD98402_ACR 0x04 /* Alarm Cause Register */
-#define uPD98402_ACMR 0x05 /* Alarm Cause Mask Register */
-#define uPD98402_PCR 0x06 /* Performance Cause Register */
-#define uPD98402_PCMR 0x07 /* Performance Cause Mask Register */
-#define uPD98402_IACM 0x08 /* Internal Alarm Cause Mask Register */
-#define uPD98402_B1ECT 0x09 /* B1 Error Count Register */
-#define uPD98402_B2ECT 0x0a /* B2 Error Count Register */
-#define uPD98402_B3ECT 0x0b /* B3 Error Count Regster */
-#define uPD98402_PFECB 0x0c /* Path FEBE Count Register */
-#define uPD98402_LECCT 0x0d /* Line FEBE Count Register */
-#define uPD98402_HECCT 0x0e /* HEC Error Count Register */
-#define uPD98402_FJCT 0x0f /* Frequence Justification Count Reg */
-#define uPD98402_PCOCR 0x10 /* Perf. Counter Overflow Cause Reg */
-#define uPD98402_PCOMR 0x11 /* Perf. Counter Overflow Mask Reg */
-#define uPD98402_C11T 0x20 /* C11T Data Register */
-#define uPD98402_C12T 0x21 /* C12T Data Register */
-#define uPD98402_C13T 0x22 /* C13T Data Register */
-#define uPD98402_F1T 0x23 /* F1T Data Register */
-#define uPD98402_K2T 0x25 /* K2T Data Register */
-#define uPD98402_C2T 0x26 /* C2T Data Register */
-#define uPD98402_F2T 0x27 /* F2T Data Register */
-#define uPD98402_C11R 0x30 /* C11T Data Register */
-#define uPD98402_C12R 0x31 /* C12T Data Register */
-#define uPD98402_C13R 0x32 /* C13T Data Register */
-#define uPD98402_F1R 0x33 /* F1T Data Register */
-#define uPD98402_K2R 0x35 /* K2T Data Register */
-#define uPD98402_C2R 0x36 /* C2T Data Register */
-#define uPD98402_F2R 0x37 /* F2T Data Register */
-
-/* CMR is at 0x00 */
-#define uPD98402_CMR_PFRF 0x01 /* Send path FERF */
-#define uPD98402_CMR_LFRF 0x02 /* Send line FERF */
-#define uPD98402_CMR_PAIS 0x04 /* Send path AIS */
-#define uPD98402_CMR_LAIS 0x08 /* Send line AIS */
-
-/* MDR is at 0x01 */
-#define uPD98402_MDR_ALP 0x01 /* ATM layer loopback */
-#define uPD98402_MDR_TPLP 0x02 /* PMD loopback, to host */
-#define uPD98402_MDR_RPLP 0x04 /* PMD loopback, to network */
-#define uPD98402_MDR_SS0 0x08 /* SS0 */
-#define uPD98402_MDR_SS1 0x10 /* SS1 */
-#define uPD98402_MDR_SS_MASK 0x18 /* mask */
-#define uPD98402_MDR_SS_SHIFT 3 /* shift */
-#define uPD98402_MDR_HEC 0x20 /* disable HEC inbound processing */
-#define uPD98402_MDR_FSR 0x40 /* disable frame scrambler */
-#define uPD98402_MDR_CSR 0x80 /* disable cell scrambler */
-
-/* PICR is at 0x02, PIMR is at 0x03 */
-#define uPD98402_INT_PFM 0x01 /* performance counter has changed */
-#define uPD98402_INT_ALM 0x02 /* line fault */
-#define uPD98402_INT_RFO 0x04 /* receive FIFO overflow */
-#define uPD98402_INT_PCO 0x08 /* performance counter overflow */
-#define uPD98402_INT_OTD 0x20 /* OTD has occurred */
-#define uPD98402_INT_LOS 0x40 /* Loss Of Signal */
-#define uPD98402_INT_LOF 0x80 /* Loss Of Frame */
-
-/* ACR is as 0x04, ACMR is at 0x05 */
-#define uPD98402_ALM_PFRF 0x01 /* path FERF */
-#define uPD98402_ALM_LFRF 0x02 /* line FERF */
-#define uPD98402_ALM_PAIS 0x04 /* path AIS */
-#define uPD98402_ALM_LAIS 0x08 /* line AIS */
-#define uPD98402_ALM_LOD 0x10 /* loss of delineation */
-#define uPD98402_ALM_LOP 0x20 /* loss of pointer */
-#define uPD98402_ALM_OOF 0x40 /* out of frame */
-
-/* PCR is at 0x06, PCMR is at 0x07 */
-#define uPD98402_PFM_PFEB 0x01 /* path FEBE */
-#define uPD98402_PFM_LFEB 0x02 /* line FEBE */
-#define uPD98402_PFM_B3E 0x04 /* B3 error */
-#define uPD98402_PFM_B2E 0x08 /* B2 error */
-#define uPD98402_PFM_B1E 0x10 /* B1 error */
-#define uPD98402_PFM_FJ 0x20 /* frequency justification */
-
-/* IACM is at 0x08 */
-#define uPD98402_IACM_PFRF 0x01 /* don't generate path FERF */
-#define uPD98402_IACM_LFRF 0x02 /* don't generate line FERF */
-
-/* PCOCR is at 0x010, PCOMR is at 0x11 */
-#define uPD98402_PCO_B1EC 0x01 /* B1ECT overflow */
-#define uPD98402_PCO_B2EC 0x02 /* B2ECT overflow */
-#define uPD98402_PCO_B3EC 0x04 /* B3ECT overflow */
-#define uPD98402_PCO_PFBC 0x08 /* PFEBC overflow */
-#define uPD98402_PCO_LFBC 0x10 /* LFEVC overflow */
-#define uPD98402_PCO_HECC 0x20 /* HECCT overflow */
-#define uPD98402_PCO_FJC 0x40 /* FJCT overflow */
-
-
-int uPD98402_init(struct atm_dev *dev);
-
-#endif
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
deleted file mode 100644
index cf5fffcf98a1..000000000000
--- a/drivers/atm/zatm.c
+++ /dev/null
@@ -1,1652 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* drivers/atm/zatm.c - ZeitNet ZN122x device driver */
-
-/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
-
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/pci.h>
-#include <linux/errno.h>
-#include <linux/atm.h>
-#include <linux/atmdev.h>
-#include <linux/sonet.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/delay.h>
-#include <linux/uio.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/dma-mapping.h>
-#include <linux/atm_zatm.h>
-#include <linux/capability.h>
-#include <linux/bitops.h>
-#include <linux/wait.h>
-#include <linux/slab.h>
-#include <asm/byteorder.h>
-#include <asm/string.h>
-#include <asm/io.h>
-#include <linux/atomic.h>
-#include <linux/uaccess.h>
-#include <linux/nospec.h>
-
-#include "uPD98401.h"
-#include "uPD98402.h"
-#include "zeprom.h"
-#include "zatm.h"
-
-
-/*
- * TODO:
- *
- * Minor features
- * - support 64 kB SDUs (will have to use multibuffer batches then :-( )
- * - proper use of CDV, credit = max(1,CDVT*PCR)
- * - AAL0
- * - better receive timestamps
- * - OAM
- */
-
-#define ZATM_COPPER 1
-
-#if 0
-#define DPRINTK(format,args...) printk(KERN_DEBUG format,##args)
-#else
-#define DPRINTK(format,args...)
-#endif
-
-#ifndef CONFIG_ATM_ZATM_DEBUG
-
-
-#define NULLCHECK(x)
-
-#define EVENT(s,a,b)
-
-
-static void event_dump(void)
-{
-}
-
-
-#else
-
-
-/*
- * NULL pointer checking
- */
-
-#define NULLCHECK(x) \
- if ((unsigned long) (x) < 0x30) printk(KERN_CRIT #x "==0x%x\n", (int) (x))
-
-/*
- * Very extensive activity logging. Greatly improves bug detection speed but
- * costs a few Mbps if enabled.
- */
-
-#define EV 64
-
-static const char *ev[EV];
-static unsigned long ev_a[EV],ev_b[EV];
-static int ec = 0;
-
-
-static void EVENT(const char *s,unsigned long a,unsigned long b)
-{
- ev[ec] = s;
- ev_a[ec] = a;
- ev_b[ec] = b;
- ec = (ec+1) % EV;
-}
-
-
-static void event_dump(void)
-{
- int n,i;
-
- printk(KERN_NOTICE "----- event dump follows -----\n");
- for (n = 0; n < EV; n++) {
- i = (ec+n) % EV;
- printk(KERN_NOTICE);
- printk(ev[i] ? ev[i] : "(null)",ev_a[i],ev_b[i]);
- }
- printk(KERN_NOTICE "----- event dump ends here -----\n");
-}
-
-
-#endif /* CONFIG_ATM_ZATM_DEBUG */
-
-
-#define RING_BUSY 1 /* indication from do_tx that PDU has to be
- backlogged */
-
-static struct atm_dev *zatm_boards = NULL;
-static unsigned long dummy[2] = {0,0};
-
-
-#define zin_n(r) inl(zatm_dev->base+r*4)
-#define zin(r) inl(zatm_dev->base+uPD98401_##r*4)
-#define zout(v,r) outl(v,zatm_dev->base+uPD98401_##r*4)
-#define zwait() do {} while (zin(CMR) & uPD98401_BUSY)
-
-/* RX0, RX1, TX0, TX1 */
-static const int mbx_entries[NR_MBX] = { 1024,1024,1024,1024 };
-static const int mbx_esize[NR_MBX] = { 16,16,4,4 }; /* entry size in bytes */
-
-#define MBX_SIZE(i) (mbx_entries[i]*mbx_esize[i])
-
-
-/*-------------------------------- utilities --------------------------------*/
-
-
-static void zpokel(struct zatm_dev *zatm_dev,u32 value,u32 addr)
-{
- zwait();
- zout(value,CER);
- zout(uPD98401_IND_ACC | uPD98401_IA_BALL |
- (uPD98401_IA_TGT_CM << uPD98401_IA_TGT_SHIFT) | addr,CMR);
-}
-
-
-static u32 zpeekl(struct zatm_dev *zatm_dev,u32 addr)
-{
- zwait();
- zout(uPD98401_IND_ACC | uPD98401_IA_BALL | uPD98401_IA_RW |
- (uPD98401_IA_TGT_CM << uPD98401_IA_TGT_SHIFT) | addr,CMR);
- zwait();
- return zin(CER);
-}
-
-
-/*------------------------------- free lists --------------------------------*/
-
-
-/*
- * Free buffer head structure:
- * [0] pointer to buffer (for SAR)
- * [1] buffer descr link pointer (for SAR)
- * [2] back pointer to skb (for poll_rx)
- * [3] data
- * ...
- */
-
-struct rx_buffer_head {
- u32 buffer; /* pointer to buffer (for SAR) */
- u32 link; /* buffer descriptor link pointer (for SAR) */
- struct sk_buff *skb; /* back pointer to skb (for poll_rx) */
-};
-
-
-static void refill_pool(struct atm_dev *dev,int pool)
-{
- struct zatm_dev *zatm_dev;
- struct sk_buff *skb;
- struct rx_buffer_head *first;
- unsigned long flags;
- int align,offset,free,count,size;
-
- EVENT("refill_pool\n",0,0);
- zatm_dev = ZATM_DEV(dev);
- size = (64 << (pool <= ZATM_AAL5_POOL_BASE ? 0 :
- pool-ZATM_AAL5_POOL_BASE))+sizeof(struct rx_buffer_head);
- if (size < PAGE_SIZE) {
- align = 32; /* for 32 byte alignment */
- offset = sizeof(struct rx_buffer_head);
- }
- else {
- align = 4096;
- offset = zatm_dev->pool_info[pool].offset+
- sizeof(struct rx_buffer_head);
- }
- size += align;
- spin_lock_irqsave(&zatm_dev->lock, flags);
- free = zpeekl(zatm_dev,zatm_dev->pool_base+2*pool) &
- uPD98401_RXFP_REMAIN;
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- if (free >= zatm_dev->pool_info[pool].low_water) return;
- EVENT("starting ... POOL: 0x%x, 0x%x\n",
- zpeekl(zatm_dev,zatm_dev->pool_base+2*pool),
- zpeekl(zatm_dev,zatm_dev->pool_base+2*pool+1));
- EVENT("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
- count = 0;
- first = NULL;
- while (free < zatm_dev->pool_info[pool].high_water) {
- struct rx_buffer_head *head;
-
- skb = alloc_skb(size,GFP_ATOMIC);
- if (!skb) {
- printk(KERN_WARNING DEV_LABEL "(Itf %d): got no new "
- "skb (%d) with %d free\n",dev->number,size,free);
- break;
- }
- skb_reserve(skb,(unsigned char *) ((((unsigned long) skb->data+
- align+offset-1) & ~(unsigned long) (align-1))-offset)-
- skb->data);
- head = (struct rx_buffer_head *) skb->data;
- skb_reserve(skb,sizeof(struct rx_buffer_head));
- if (!first) first = head;
- count++;
- head->buffer = virt_to_bus(skb->data);
- head->link = 0;
- head->skb = skb;
- EVENT("enq skb 0x%08lx/0x%08lx\n",(unsigned long) skb,
- (unsigned long) head);
- spin_lock_irqsave(&zatm_dev->lock, flags);
- if (zatm_dev->last_free[pool])
- ((struct rx_buffer_head *) (zatm_dev->last_free[pool]->
- data))[-1].link = virt_to_bus(head);
- zatm_dev->last_free[pool] = skb;
- skb_queue_tail(&zatm_dev->pool[pool],skb);
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- free++;
- }
- if (first) {
- spin_lock_irqsave(&zatm_dev->lock, flags);
- zwait();
- zout(virt_to_bus(first),CER);
- zout(uPD98401_ADD_BAT | (pool << uPD98401_POOL_SHIFT) | count,
- CMR);
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- EVENT ("POOL: 0x%x, 0x%x\n",
- zpeekl(zatm_dev,zatm_dev->pool_base+2*pool),
- zpeekl(zatm_dev,zatm_dev->pool_base+2*pool+1));
- EVENT("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
- }
-}
-
-
-static void drain_free(struct atm_dev *dev,int pool)
-{
- skb_queue_purge(&ZATM_DEV(dev)->pool[pool]);
-}
-
-
-static int pool_index(int max_pdu)
-{
- int i;
-
- if (max_pdu % ATM_CELL_PAYLOAD)
- printk(KERN_ERR DEV_LABEL ": driver error in pool_index: "
- "max_pdu is %d\n",max_pdu);
- if (max_pdu > 65536) return -1;
- for (i = 0; (64 << i) < max_pdu; i++);
- return i+ZATM_AAL5_POOL_BASE;
-}
-
-
-/* use_pool isn't reentrant */
-
-
-static void use_pool(struct atm_dev *dev,int pool)
-{
- struct zatm_dev *zatm_dev;
- unsigned long flags;
- int size;
-
- zatm_dev = ZATM_DEV(dev);
- if (!(zatm_dev->pool_info[pool].ref_count++)) {
- skb_queue_head_init(&zatm_dev->pool[pool]);
- size = pool-ZATM_AAL5_POOL_BASE;
- if (size < 0) size = 0; /* 64B... */
- else if (size > 10) size = 10; /* ... 64kB */
- spin_lock_irqsave(&zatm_dev->lock, flags);
- zpokel(zatm_dev,((zatm_dev->pool_info[pool].low_water/4) <<
- uPD98401_RXFP_ALERT_SHIFT) |
- (1 << uPD98401_RXFP_BTSZ_SHIFT) |
- (size << uPD98401_RXFP_BFSZ_SHIFT),
- zatm_dev->pool_base+pool*2);
- zpokel(zatm_dev,(unsigned long) dummy,zatm_dev->pool_base+
- pool*2+1);
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- zatm_dev->last_free[pool] = NULL;
- refill_pool(dev,pool);
- }
- DPRINTK("pool %d: %d\n",pool,zatm_dev->pool_info[pool].ref_count);
-}
-
-
-static void unuse_pool(struct atm_dev *dev,int pool)
-{
- if (!(--ZATM_DEV(dev)->pool_info[pool].ref_count))
- drain_free(dev,pool);
-}
-
-/*----------------------------------- RX ------------------------------------*/
-
-
-#if 0
-static void exception(struct atm_vcc *vcc)
-{
- static int count = 0;
- struct zatm_dev *zatm_dev = ZATM_DEV(vcc->dev);
- struct zatm_vcc *zatm_vcc = ZATM_VCC(vcc);
- unsigned long *qrp;
- int i;
-
- if (count++ > 2) return;
- for (i = 0; i < 8; i++)
- printk("TX%d: 0x%08lx\n",i,
- zpeekl(zatm_dev,zatm_vcc->tx_chan*VC_SIZE/4+i));
- for (i = 0; i < 5; i++)
- printk("SH%d: 0x%08lx\n",i,
- zpeekl(zatm_dev,uPD98401_IM(zatm_vcc->shaper)+16*i));
- qrp = (unsigned long *) zpeekl(zatm_dev,zatm_vcc->tx_chan*VC_SIZE/4+
- uPD98401_TXVC_QRP);
- printk("qrp=0x%08lx\n",(unsigned long) qrp);
- for (i = 0; i < 4; i++) printk("QRP[%d]: 0x%08lx",i,qrp[i]);
-}
-#endif
-
-
-static const char *err_txt[] = {
- "No error",
- "RX buf underflow",
- "RX FIFO overrun",
- "Maximum len violation",
- "CRC error",
- "User abort",
- "Length violation",
- "T1 error",
- "Deactivated",
- "???",
- "???",
- "???",
- "???",
- "???",
- "???",
- "???"
-};
-
-
-static void poll_rx(struct atm_dev *dev,int mbx)
-{
- struct zatm_dev *zatm_dev;
- unsigned long pos;
- u32 x;
- int error;
-
- EVENT("poll_rx\n",0,0);
- zatm_dev = ZATM_DEV(dev);
- pos = (zatm_dev->mbx_start[mbx] & ~0xffffUL) | zin(MTA(mbx));
- while (x = zin(MWA(mbx)), (pos & 0xffff) != x) {
- u32 *here;
- struct sk_buff *skb;
- struct atm_vcc *vcc;
- int cells,size,chan;
-
- EVENT("MBX: host 0x%lx, nic 0x%x\n",pos,x);
- here = (u32 *) pos;
- if (((pos += 16) & 0xffff) == zatm_dev->mbx_end[mbx])
- pos = zatm_dev->mbx_start[mbx];
- cells = here[0] & uPD98401_AAL5_SIZE;
-#if 0
-printk("RX IND: 0x%x, 0x%x, 0x%x, 0x%x\n",here[0],here[1],here[2],here[3]);
-{
-unsigned long *x;
- printk("POOL: 0x%08x, 0x%08x\n",zpeekl(zatm_dev,
- zatm_dev->pool_base),
- zpeekl(zatm_dev,zatm_dev->pool_base+1));
- x = (unsigned long *) here[2];
- printk("[0..3] = 0x%08lx, 0x%08lx, 0x%08lx, 0x%08lx\n",
- x[0],x[1],x[2],x[3]);
-}
-#endif
- error = 0;
- if (here[3] & uPD98401_AAL5_ERR) {
- error = (here[3] & uPD98401_AAL5_ES) >>
- uPD98401_AAL5_ES_SHIFT;
- if (error == uPD98401_AAL5_ES_DEACT ||
- error == uPD98401_AAL5_ES_FREE) continue;
- }
-EVENT("error code 0x%x/0x%x\n",(here[3] & uPD98401_AAL5_ES) >>
- uPD98401_AAL5_ES_SHIFT,error);
- skb = ((struct rx_buffer_head *) bus_to_virt(here[2]))->skb;
- __net_timestamp(skb);
-#if 0
-printk("[-3..0] 0x%08lx 0x%08lx 0x%08lx 0x%08lx\n",((unsigned *) skb->data)[-3],
- ((unsigned *) skb->data)[-2],((unsigned *) skb->data)[-1],
- ((unsigned *) skb->data)[0]);
-#endif
- EVENT("skb 0x%lx, here 0x%lx\n",(unsigned long) skb,
- (unsigned long) here);
-#if 0
-printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
-#endif
- size = error ? 0 : ntohs(((__be16 *) skb->data)[cells*
- ATM_CELL_PAYLOAD/sizeof(u16)-3]);
- EVENT("got skb 0x%lx, size %d\n",(unsigned long) skb,size);
- chan = (here[3] & uPD98401_AAL5_CHAN) >>
- uPD98401_AAL5_CHAN_SHIFT;
- if (chan < zatm_dev->chans && zatm_dev->rx_map[chan]) {
- int pos;
- vcc = zatm_dev->rx_map[chan];
- pos = ZATM_VCC(vcc)->pool;
- if (skb == zatm_dev->last_free[pos])
- zatm_dev->last_free[pos] = NULL;
- skb_unlink(skb, zatm_dev->pool + pos);
- }
- else {
- printk(KERN_ERR DEV_LABEL "(itf %d): RX indication "
- "for non-existing channel\n",dev->number);
- size = 0;
- vcc = NULL;
- event_dump();
- }
- if (error) {
- static unsigned long silence = 0;
- static int last_error = 0;
-
- if (error != last_error ||
- time_after(jiffies, silence) || silence == 0){
- printk(KERN_WARNING DEV_LABEL "(itf %d): "
- "chan %d error %s\n",dev->number,chan,
- err_txt[error]);
- last_error = error;
- silence = (jiffies+2*HZ)|1;
- }
- size = 0;
- }
- if (size && (size > cells*ATM_CELL_PAYLOAD-ATM_AAL5_TRAILER ||
- size <= (cells-1)*ATM_CELL_PAYLOAD-ATM_AAL5_TRAILER)) {
- printk(KERN_ERR DEV_LABEL "(itf %d): size %d with %d "
- "cells\n",dev->number,size,cells);
- size = 0;
- event_dump();
- }
- if (size > ATM_MAX_AAL5_PDU) {
- printk(KERN_ERR DEV_LABEL "(itf %d): size too big "
- "(%d)\n",dev->number,size);
- size = 0;
- event_dump();
- }
- if (!size) {
- dev_kfree_skb_irq(skb);
- if (vcc) atomic_inc(&vcc->stats->rx_err);
- continue;
- }
- if (!atm_charge(vcc,skb->truesize)) {
- dev_kfree_skb_irq(skb);
- continue;
- }
- skb->len = size;
- ATM_SKB(skb)->vcc = vcc;
- vcc->push(vcc,skb);
- atomic_inc(&vcc->stats->rx);
- }
- zout(pos & 0xffff,MTA(mbx));
-#if 0 /* probably a stupid idea */
- refill_pool(dev,zatm_vcc->pool);
- /* maybe this saves us a few interrupts */
-#endif
-}
-
-
-static int open_rx_first(struct atm_vcc *vcc)
-{
- struct zatm_dev *zatm_dev;
- struct zatm_vcc *zatm_vcc;
- unsigned long flags;
- unsigned short chan;
- int cells;
-
- DPRINTK("open_rx_first (0x%x)\n",inb_p(0xc053));
- zatm_dev = ZATM_DEV(vcc->dev);
- zatm_vcc = ZATM_VCC(vcc);
- zatm_vcc->rx_chan = 0;
- if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0;
- if (vcc->qos.aal == ATM_AAL5) {
- if (vcc->qos.rxtp.max_sdu > 65464)
- vcc->qos.rxtp.max_sdu = 65464;
- /* fix this - we may want to receive 64kB SDUs
- later */
- cells = DIV_ROUND_UP(vcc->qos.rxtp.max_sdu + ATM_AAL5_TRAILER,
- ATM_CELL_PAYLOAD);
- zatm_vcc->pool = pool_index(cells*ATM_CELL_PAYLOAD);
- }
- else {
- cells = 1;
- zatm_vcc->pool = ZATM_AAL0_POOL;
- }
- if (zatm_vcc->pool < 0) return -EMSGSIZE;
- spin_lock_irqsave(&zatm_dev->lock, flags);
- zwait();
- zout(uPD98401_OPEN_CHAN,CMR);
- zwait();
- DPRINTK("0x%x 0x%x\n",zin(CMR),zin(CER));
- chan = (zin(CMR) & uPD98401_CHAN_ADDR) >> uPD98401_CHAN_ADDR_SHIFT;
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- DPRINTK("chan is %d\n",chan);
- if (!chan) return -EAGAIN;
- use_pool(vcc->dev,zatm_vcc->pool);
- DPRINTK("pool %d\n",zatm_vcc->pool);
- /* set up VC descriptor */
- spin_lock_irqsave(&zatm_dev->lock, flags);
- zpokel(zatm_dev,zatm_vcc->pool << uPD98401_RXVC_POOL_SHIFT,
- chan*VC_SIZE/4);
- zpokel(zatm_dev,uPD98401_RXVC_OD | (vcc->qos.aal == ATM_AAL5 ?
- uPD98401_RXVC_AR : 0) | cells,chan*VC_SIZE/4+1);
- zpokel(zatm_dev,0,chan*VC_SIZE/4+2);
- zatm_vcc->rx_chan = chan;
- zatm_dev->rx_map[chan] = vcc;
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- return 0;
-}
-
-
-static int open_rx_second(struct atm_vcc *vcc)
-{
- struct zatm_dev *zatm_dev;
- struct zatm_vcc *zatm_vcc;
- unsigned long flags;
- int pos,shift;
-
- DPRINTK("open_rx_second (0x%x)\n",inb_p(0xc053));
- zatm_dev = ZATM_DEV(vcc->dev);
- zatm_vcc = ZATM_VCC(vcc);
- if (!zatm_vcc->rx_chan) return 0;
- spin_lock_irqsave(&zatm_dev->lock, flags);
- /* should also handle VPI @@@ */
- pos = vcc->vci >> 1;
- shift = (1-(vcc->vci & 1)) << 4;
- zpokel(zatm_dev,(zpeekl(zatm_dev,pos) & ~(0xffff << shift)) |
- ((zatm_vcc->rx_chan | uPD98401_RXLT_ENBL) << shift),pos);
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- return 0;
-}
-
-
-static void close_rx(struct atm_vcc *vcc)
-{
- struct zatm_dev *zatm_dev;
- struct zatm_vcc *zatm_vcc;
- unsigned long flags;
- int pos,shift;
-
- zatm_vcc = ZATM_VCC(vcc);
- zatm_dev = ZATM_DEV(vcc->dev);
- if (!zatm_vcc->rx_chan) return;
- DPRINTK("close_rx\n");
- /* disable receiver */
- if (vcc->vpi != ATM_VPI_UNSPEC && vcc->vci != ATM_VCI_UNSPEC) {
- spin_lock_irqsave(&zatm_dev->lock, flags);
- pos = vcc->vci >> 1;
- shift = (1-(vcc->vci & 1)) << 4;
- zpokel(zatm_dev,zpeekl(zatm_dev,pos) & ~(0xffff << shift),pos);
- zwait();
- zout(uPD98401_NOP,CMR);
- zwait();
- zout(uPD98401_NOP,CMR);
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- }
- spin_lock_irqsave(&zatm_dev->lock, flags);
- zwait();
- zout(uPD98401_DEACT_CHAN | uPD98401_CHAN_RT | (zatm_vcc->rx_chan <<
- uPD98401_CHAN_ADDR_SHIFT),CMR);
- zwait();
- udelay(10); /* why oh why ... ? */
- zout(uPD98401_CLOSE_CHAN | uPD98401_CHAN_RT | (zatm_vcc->rx_chan <<
- uPD98401_CHAN_ADDR_SHIFT),CMR);
- zwait();
- if (!(zin(CMR) & uPD98401_CHAN_ADDR))
- printk(KERN_CRIT DEV_LABEL "(itf %d): can't close RX channel "
- "%d\n",vcc->dev->number,zatm_vcc->rx_chan);
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- zatm_dev->rx_map[zatm_vcc->rx_chan] = NULL;
- zatm_vcc->rx_chan = 0;
- unuse_pool(vcc->dev,zatm_vcc->pool);
-}
-
-
-static int start_rx(struct atm_dev *dev)
-{
- struct zatm_dev *zatm_dev;
- int i;
-
- DPRINTK("start_rx\n");
- zatm_dev = ZATM_DEV(dev);
- zatm_dev->rx_map = kcalloc(zatm_dev->chans,
- sizeof(*zatm_dev->rx_map),
- GFP_KERNEL);
- if (!zatm_dev->rx_map) return -ENOMEM;
- /* set VPI/VCI split (use all VCIs and give what's left to VPIs) */
- zpokel(zatm_dev,(1 << dev->ci_range.vci_bits)-1,uPD98401_VRR);
- /* prepare free buffer pools */
- for (i = 0; i <= ZATM_LAST_POOL; i++) {
- zatm_dev->pool_info[i].ref_count = 0;
- zatm_dev->pool_info[i].rqa_count = 0;
- zatm_dev->pool_info[i].rqu_count = 0;
- zatm_dev->pool_info[i].low_water = LOW_MARK;
- zatm_dev->pool_info[i].high_water = HIGH_MARK;
- zatm_dev->pool_info[i].offset = 0;
- zatm_dev->pool_info[i].next_off = 0;
- zatm_dev->pool_info[i].next_cnt = 0;
- zatm_dev->pool_info[i].next_thres = OFF_CNG_THRES;
- }
- return 0;
-}
-
-
-/*----------------------------------- TX ------------------------------------*/
-
-
-static int do_tx(struct sk_buff *skb)
-{
- struct atm_vcc *vcc;
- struct zatm_dev *zatm_dev;
- struct zatm_vcc *zatm_vcc;
- u32 *dsc;
- unsigned long flags;
-
- EVENT("do_tx\n",0,0);
- DPRINTK("sending skb %p\n",skb);
- vcc = ATM_SKB(skb)->vcc;
- zatm_dev = ZATM_DEV(vcc->dev);
- zatm_vcc = ZATM_VCC(vcc);
- EVENT("iovcnt=%d\n",skb_shinfo(skb)->nr_frags,0);
- spin_lock_irqsave(&zatm_dev->lock, flags);
- if (!skb_shinfo(skb)->nr_frags) {
- if (zatm_vcc->txing == RING_ENTRIES-1) {
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- return RING_BUSY;
- }
- zatm_vcc->txing++;
- dsc = zatm_vcc->ring+zatm_vcc->ring_curr;
- zatm_vcc->ring_curr = (zatm_vcc->ring_curr+RING_WORDS) &
- (RING_ENTRIES*RING_WORDS-1);
- dsc[1] = 0;
- dsc[2] = skb->len;
- dsc[3] = virt_to_bus(skb->data);
- mb();
- dsc[0] = uPD98401_TXPD_V | uPD98401_TXPD_DP | uPD98401_TXPD_SM
- | (vcc->qos.aal == ATM_AAL5 ? uPD98401_TXPD_AAL5 : 0 |
- (ATM_SKB(skb)->atm_options & ATM_ATMOPT_CLP ?
- uPD98401_CLPM_1 : uPD98401_CLPM_0));
- EVENT("dsc (0x%lx)\n",(unsigned long) dsc,0);
- }
- else {
-printk("NONONONOO!!!!\n");
- dsc = NULL;
-#if 0
- u32 *put;
- int i;
-
- dsc = kmalloc(uPD98401_TXPD_SIZE * 2 +
- uPD98401_TXBD_SIZE * ATM_SKB(skb)->iovcnt, GFP_ATOMIC);
- if (!dsc) {
- if (vcc->pop)
- vcc->pop(vcc, skb);
- else
- dev_kfree_skb_irq(skb);
- return -EAGAIN;
- }
- /* @@@ should check alignment */
- put = dsc+8;
- dsc[0] = uPD98401_TXPD_V | uPD98401_TXPD_DP |
- (vcc->aal == ATM_AAL5 ? uPD98401_TXPD_AAL5 : 0 |
- (ATM_SKB(skb)->atm_options & ATM_ATMOPT_CLP ?
- uPD98401_CLPM_1 : uPD98401_CLPM_0));
- dsc[1] = 0;
- dsc[2] = ATM_SKB(skb)->iovcnt * uPD98401_TXBD_SIZE;
- dsc[3] = virt_to_bus(put);
- for (i = 0; i < ATM_SKB(skb)->iovcnt; i++) {
- *put++ = ((struct iovec *) skb->data)[i].iov_len;
- *put++ = virt_to_bus(((struct iovec *)
- skb->data)[i].iov_base);
- }
- put[-2] |= uPD98401_TXBD_LAST;
-#endif
- }
- ZATM_PRV_DSC(skb) = dsc;
- skb_queue_tail(&zatm_vcc->tx_queue,skb);
- DPRINTK("QRP=0x%08lx\n",zpeekl(zatm_dev,zatm_vcc->tx_chan*VC_SIZE/4+
- uPD98401_TXVC_QRP));
- zwait();
- zout(uPD98401_TX_READY | (zatm_vcc->tx_chan <<
- uPD98401_CHAN_ADDR_SHIFT),CMR);
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- EVENT("done\n",0,0);
- return 0;
-}
-
-
-static inline void dequeue_tx(struct atm_vcc *vcc)
-{
- struct zatm_vcc *zatm_vcc;
- struct sk_buff *skb;
-
- EVENT("dequeue_tx\n",0,0);
- zatm_vcc = ZATM_VCC(vcc);
- skb = skb_dequeue(&zatm_vcc->tx_queue);
- if (!skb) {
- printk(KERN_CRIT DEV_LABEL "(itf %d): dequeue_tx but not "
- "txing\n",vcc->dev->number);
- return;
- }
-#if 0 /* @@@ would fail on CLP */
-if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
- uPD98401_TXPD_SM | uPD98401_TXPD_AAL5)) printk("@#*$!!!! (%08x)\n",
- *ZATM_PRV_DSC(skb));
-#endif
- *ZATM_PRV_DSC(skb) = 0; /* mark as invalid */
- zatm_vcc->txing--;
- if (vcc->pop) vcc->pop(vcc,skb);
- else dev_kfree_skb_irq(skb);
- while ((skb = skb_dequeue(&zatm_vcc->backlog)))
- if (do_tx(skb) == RING_BUSY) {
- skb_queue_head(&zatm_vcc->backlog,skb);
- break;
- }
- atomic_inc(&vcc->stats->tx);
- wake_up(&zatm_vcc->tx_wait);
-}
-
-
-static void poll_tx(struct atm_dev *dev,int mbx)
-{
- struct zatm_dev *zatm_dev;
- unsigned long pos;
- u32 x;
-
- EVENT("poll_tx\n",0,0);
- zatm_dev = ZATM_DEV(dev);
- pos = (zatm_dev->mbx_start[mbx] & ~0xffffUL) | zin(MTA(mbx));
- while (x = zin(MWA(mbx)), (pos & 0xffff) != x) {
- int chan;
-
-#if 1
- u32 data,*addr;
-
- EVENT("MBX: host 0x%lx, nic 0x%x\n",pos,x);
- addr = (u32 *) pos;
- data = *addr;
- chan = (data & uPD98401_TXI_CONN) >> uPD98401_TXI_CONN_SHIFT;
- EVENT("addr = 0x%lx, data = 0x%08x,",(unsigned long) addr,
- data);
- EVENT("chan = %d\n",chan,0);
-#else
-NO !
- chan = (zatm_dev->mbx_start[mbx][pos >> 2] & uPD98401_TXI_CONN)
- >> uPD98401_TXI_CONN_SHIFT;
-#endif
- if (chan < zatm_dev->chans && zatm_dev->tx_map[chan])
- dequeue_tx(zatm_dev->tx_map[chan]);
- else {
- printk(KERN_CRIT DEV_LABEL "(itf %d): TX indication "
- "for non-existing channel %d\n",dev->number,chan);
- event_dump();
- }
- if (((pos += 4) & 0xffff) == zatm_dev->mbx_end[mbx])
- pos = zatm_dev->mbx_start[mbx];
- }
- zout(pos & 0xffff,MTA(mbx));
-}
-
-
-/*
- * BUG BUG BUG: Doesn't handle "new-style" rate specification yet.
- */
-
-static int alloc_shaper(struct atm_dev *dev,int *pcr,int min,int max,int ubr)
-{
- struct zatm_dev *zatm_dev;
- unsigned long flags;
- unsigned long i,m,c;
- int shaper;
-
- DPRINTK("alloc_shaper (min = %d, max = %d)\n",min,max);
- zatm_dev = ZATM_DEV(dev);
- if (!zatm_dev->free_shapers) return -EAGAIN;
- for (shaper = 0; !((zatm_dev->free_shapers >> shaper) & 1); shaper++);
- zatm_dev->free_shapers &= ~1 << shaper;
- if (ubr) {
- c = 5;
- i = m = 1;
- zatm_dev->ubr_ref_cnt++;
- zatm_dev->ubr = shaper;
- *pcr = 0;
- }
- else {
- if (min) {
- if (min <= 255) {
- i = min;
- m = ATM_OC3_PCR;
- }
- else {
- i = 255;
- m = ATM_OC3_PCR*255/min;
- }
- }
- else {
- if (max > zatm_dev->tx_bw) max = zatm_dev->tx_bw;
- if (max <= 255) {
- i = max;
- m = ATM_OC3_PCR;
- }
- else {
- i = 255;
- m = DIV_ROUND_UP(ATM_OC3_PCR*255, max);
- }
- }
- if (i > m) {
- printk(KERN_CRIT DEV_LABEL "shaper algorithm botched "
- "[%d,%d] -> i=%ld,m=%ld\n",min,max,i,m);
- m = i;
- }
- *pcr = i*ATM_OC3_PCR/m;
- c = 20; /* @@@ should use max_cdv ! */
- if ((min && *pcr < min) || (max && *pcr > max)) return -EINVAL;
- if (zatm_dev->tx_bw < *pcr) return -EAGAIN;
- zatm_dev->tx_bw -= *pcr;
- }
- spin_lock_irqsave(&zatm_dev->lock, flags);
- DPRINTK("i = %d, m = %d, PCR = %d\n",i,m,*pcr);
- zpokel(zatm_dev,(i << uPD98401_IM_I_SHIFT) | m,uPD98401_IM(shaper));
- zpokel(zatm_dev,c << uPD98401_PC_C_SHIFT,uPD98401_PC(shaper));
- zpokel(zatm_dev,0,uPD98401_X(shaper));
- zpokel(zatm_dev,0,uPD98401_Y(shaper));
- zpokel(zatm_dev,uPD98401_PS_E,uPD98401_PS(shaper));
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- return shaper;
-}
-
-
-static void dealloc_shaper(struct atm_dev *dev,int shaper)
-{
- struct zatm_dev *zatm_dev;
- unsigned long flags;
-
- zatm_dev = ZATM_DEV(dev);
- if (shaper == zatm_dev->ubr) {
- if (--zatm_dev->ubr_ref_cnt) return;
- zatm_dev->ubr = -1;
- }
- spin_lock_irqsave(&zatm_dev->lock, flags);
- zpokel(zatm_dev,zpeekl(zatm_dev,uPD98401_PS(shaper)) & ~uPD98401_PS_E,
- uPD98401_PS(shaper));
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- zatm_dev->free_shapers |= 1 << shaper;
-}
-
-
-static void close_tx(struct atm_vcc *vcc)
-{
- struct zatm_dev *zatm_dev;
- struct zatm_vcc *zatm_vcc;
- unsigned long flags;
- int chan;
-
- zatm_vcc = ZATM_VCC(vcc);
- zatm_dev = ZATM_DEV(vcc->dev);
- chan = zatm_vcc->tx_chan;
- if (!chan) return;
- DPRINTK("close_tx\n");
- if (skb_peek(&zatm_vcc->backlog)) {
- printk("waiting for backlog to drain ...\n");
- event_dump();
- wait_event(zatm_vcc->tx_wait, !skb_peek(&zatm_vcc->backlog));
- }
- if (skb_peek(&zatm_vcc->tx_queue)) {
- printk("waiting for TX queue to drain ...\n");
- event_dump();
- wait_event(zatm_vcc->tx_wait, !skb_peek(&zatm_vcc->tx_queue));
- }
- spin_lock_irqsave(&zatm_dev->lock, flags);
-#if 0
- zwait();
- zout(uPD98401_DEACT_CHAN | (chan << uPD98401_CHAN_ADDR_SHIFT),CMR);
-#endif
- zwait();
- zout(uPD98401_CLOSE_CHAN | (chan << uPD98401_CHAN_ADDR_SHIFT),CMR);
- zwait();
- if (!(zin(CMR) & uPD98401_CHAN_ADDR))
- printk(KERN_CRIT DEV_LABEL "(itf %d): can't close TX channel "
- "%d\n",vcc->dev->number,chan);
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- zatm_vcc->tx_chan = 0;
- zatm_dev->tx_map[chan] = NULL;
- if (zatm_vcc->shaper != zatm_dev->ubr) {
- zatm_dev->tx_bw += vcc->qos.txtp.min_pcr;
- dealloc_shaper(vcc->dev,zatm_vcc->shaper);
- }
- kfree(zatm_vcc->ring);
-}
-
-
-static int open_tx_first(struct atm_vcc *vcc)
-{
- struct zatm_dev *zatm_dev;
- struct zatm_vcc *zatm_vcc;
- unsigned long flags;
- u32 *loop;
- unsigned short chan;
- int unlimited;
-
- DPRINTK("open_tx_first\n");
- zatm_dev = ZATM_DEV(vcc->dev);
- zatm_vcc = ZATM_VCC(vcc);
- zatm_vcc->tx_chan = 0;
- if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0;
- spin_lock_irqsave(&zatm_dev->lock, flags);
- zwait();
- zout(uPD98401_OPEN_CHAN,CMR);
- zwait();
- DPRINTK("0x%x 0x%x\n",zin(CMR),zin(CER));
- chan = (zin(CMR) & uPD98401_CHAN_ADDR) >> uPD98401_CHAN_ADDR_SHIFT;
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- DPRINTK("chan is %d\n",chan);
- if (!chan) return -EAGAIN;
- unlimited = vcc->qos.txtp.traffic_class == ATM_UBR &&
- (!vcc->qos.txtp.max_pcr || vcc->qos.txtp.max_pcr == ATM_MAX_PCR ||
- vcc->qos.txtp.max_pcr >= ATM_OC3_PCR);
- if (unlimited && zatm_dev->ubr != -1) zatm_vcc->shaper = zatm_dev->ubr;
- else {
- int pcr;
-
- if (unlimited) vcc->qos.txtp.max_sdu = ATM_MAX_AAL5_PDU;
- if ((zatm_vcc->shaper = alloc_shaper(vcc->dev,&pcr,
- vcc->qos.txtp.min_pcr,vcc->qos.txtp.max_pcr,unlimited))
- < 0) {
- close_tx(vcc);
- return zatm_vcc->shaper;
- }
- if (pcr > ATM_OC3_PCR) pcr = ATM_OC3_PCR;
- vcc->qos.txtp.min_pcr = vcc->qos.txtp.max_pcr = pcr;
- }
- zatm_vcc->tx_chan = chan;
- skb_queue_head_init(&zatm_vcc->tx_queue);
- init_waitqueue_head(&zatm_vcc->tx_wait);
- /* initialize ring */
- zatm_vcc->ring = kzalloc(RING_SIZE,GFP_KERNEL);
- if (!zatm_vcc->ring) return -ENOMEM;
- loop = zatm_vcc->ring+RING_ENTRIES*RING_WORDS;
- loop[0] = uPD98401_TXPD_V;
- loop[1] = loop[2] = 0;
- loop[3] = virt_to_bus(zatm_vcc->ring);
- zatm_vcc->ring_curr = 0;
- zatm_vcc->txing = 0;
- skb_queue_head_init(&zatm_vcc->backlog);
- zpokel(zatm_dev,virt_to_bus(zatm_vcc->ring),
- chan*VC_SIZE/4+uPD98401_TXVC_QRP);
- return 0;
-}
-
-
-static int open_tx_second(struct atm_vcc *vcc)
-{
- struct zatm_dev *zatm_dev;
- struct zatm_vcc *zatm_vcc;
- unsigned long flags;
-
- DPRINTK("open_tx_second\n");
- zatm_dev = ZATM_DEV(vcc->dev);
- zatm_vcc = ZATM_VCC(vcc);
- if (!zatm_vcc->tx_chan) return 0;
- /* set up VC descriptor */
- spin_lock_irqsave(&zatm_dev->lock, flags);
- zpokel(zatm_dev,0,zatm_vcc->tx_chan*VC_SIZE/4);
- zpokel(zatm_dev,uPD98401_TXVC_L | (zatm_vcc->shaper <<
- uPD98401_TXVC_SHP_SHIFT) | (vcc->vpi << uPD98401_TXVC_VPI_SHIFT) |
- vcc->vci,zatm_vcc->tx_chan*VC_SIZE/4+1);
- zpokel(zatm_dev,0,zatm_vcc->tx_chan*VC_SIZE/4+2);
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- zatm_dev->tx_map[zatm_vcc->tx_chan] = vcc;
- return 0;
-}
-
-
-static int start_tx(struct atm_dev *dev)
-{
- struct zatm_dev *zatm_dev;
- int i;
-
- DPRINTK("start_tx\n");
- zatm_dev = ZATM_DEV(dev);
- zatm_dev->tx_map = kmalloc_array(zatm_dev->chans,
- sizeof(*zatm_dev->tx_map),
- GFP_KERNEL);
- if (!zatm_dev->tx_map) return -ENOMEM;
- zatm_dev->tx_bw = ATM_OC3_PCR;
- zatm_dev->free_shapers = (1 << NR_SHAPERS)-1;
- zatm_dev->ubr = -1;
- zatm_dev->ubr_ref_cnt = 0;
- /* initialize shapers */
- for (i = 0; i < NR_SHAPERS; i++) zpokel(zatm_dev,0,uPD98401_PS(i));
- return 0;
-}
-
-
-/*------------------------------- interrupts --------------------------------*/
-
-
-static irqreturn_t zatm_int(int irq,void *dev_id)
-{
- struct atm_dev *dev;
- struct zatm_dev *zatm_dev;
- u32 reason;
- int handled = 0;
-
- dev = dev_id;
- zatm_dev = ZATM_DEV(dev);
- while ((reason = zin(GSR))) {
- handled = 1;
- EVENT("reason 0x%x\n",reason,0);
- if (reason & uPD98401_INT_PI) {
- EVENT("PHY int\n",0,0);
- dev->phy->interrupt(dev);
- }
- if (reason & uPD98401_INT_RQA) {
- unsigned long pools;
- int i;
-
- pools = zin(RQA);
- EVENT("RQA (0x%08x)\n",pools,0);
- for (i = 0; pools; i++) {
- if (pools & 1) {
- refill_pool(dev,i);
- zatm_dev->pool_info[i].rqa_count++;
- }
- pools >>= 1;
- }
- }
- if (reason & uPD98401_INT_RQU) {
- unsigned long pools;
- int i;
- pools = zin(RQU);
- printk(KERN_WARNING DEV_LABEL "(itf %d): RQU 0x%08lx\n",
- dev->number,pools);
- event_dump();
- for (i = 0; pools; i++) {
- if (pools & 1) {
- refill_pool(dev,i);
- zatm_dev->pool_info[i].rqu_count++;
- }
- pools >>= 1;
- }
- }
- /* don't handle RD */
- if (reason & uPD98401_INT_SPE)
- printk(KERN_ALERT DEV_LABEL "(itf %d): system parity "
- "error at 0x%08x\n",dev->number,zin(ADDR));
- if (reason & uPD98401_INT_CPE)
- printk(KERN_ALERT DEV_LABEL "(itf %d): control memory "
- "parity error at 0x%08x\n",dev->number,zin(ADDR));
- if (reason & uPD98401_INT_SBE) {
- printk(KERN_ALERT DEV_LABEL "(itf %d): system bus "
- "error at 0x%08x\n",dev->number,zin(ADDR));
- event_dump();
- }
- /* don't handle IND */
- if (reason & uPD98401_INT_MF) {
- printk(KERN_CRIT DEV_LABEL "(itf %d): mailbox full "
- "(0x%x)\n",dev->number,(reason & uPD98401_INT_MF)
- >> uPD98401_INT_MF_SHIFT);
- event_dump();
- /* @@@ should try to recover */
- }
- if (reason & uPD98401_INT_MM) {
- if (reason & 1) poll_rx(dev,0);
- if (reason & 2) poll_rx(dev,1);
- if (reason & 4) poll_tx(dev,2);
- if (reason & 8) poll_tx(dev,3);
- }
- /* @@@ handle RCRn */
- }
- return IRQ_RETVAL(handled);
-}
-
-
-/*----------------------------- (E)EPROM access -----------------------------*/
-
-
-static void eprom_set(struct zatm_dev *zatm_dev, unsigned long value,
- unsigned short cmd)
-{
- int error;
-
- if ((error = pci_write_config_dword(zatm_dev->pci_dev,cmd,value)))
- printk(KERN_ERR DEV_LABEL ": PCI write failed (0x%02x)\n",
- error);
-}
-
-
-static unsigned long eprom_get(struct zatm_dev *zatm_dev, unsigned short cmd)
-{
- unsigned int value;
- int error;
-
- if ((error = pci_read_config_dword(zatm_dev->pci_dev,cmd,&value)))
- printk(KERN_ERR DEV_LABEL ": PCI read failed (0x%02x)\n",
- error);
- return value;
-}
-
-
-static void eprom_put_bits(struct zatm_dev *zatm_dev, unsigned long data,
- int bits, unsigned short cmd)
-{
- unsigned long value;
- int i;
-
- for (i = bits-1; i >= 0; i--) {
- value = ZEPROM_CS | (((data >> i) & 1) ? ZEPROM_DI : 0);
- eprom_set(zatm_dev,value,cmd);
- eprom_set(zatm_dev,value | ZEPROM_SK,cmd);
- eprom_set(zatm_dev,value,cmd);
- }
-}
-
-
-static void eprom_get_byte(struct zatm_dev *zatm_dev, unsigned char *byte,
- unsigned short cmd)
-{
- int i;
-
- *byte = 0;
- for (i = 8; i; i--) {
- eprom_set(zatm_dev,ZEPROM_CS,cmd);
- eprom_set(zatm_dev,ZEPROM_CS | ZEPROM_SK,cmd);
- *byte <<= 1;
- if (eprom_get(zatm_dev,cmd) & ZEPROM_DO) *byte |= 1;
- eprom_set(zatm_dev,ZEPROM_CS,cmd);
- }
-}
-
-
-static int eprom_try_esi(struct atm_dev *dev, unsigned short cmd, int offset,
- int swap)
-{
- unsigned char buf[ZEPROM_SIZE];
- struct zatm_dev *zatm_dev;
- int i;
-
- zatm_dev = ZATM_DEV(dev);
- for (i = 0; i < ZEPROM_SIZE; i += 2) {
- eprom_set(zatm_dev,ZEPROM_CS,cmd); /* select EPROM */
- eprom_put_bits(zatm_dev,ZEPROM_CMD_READ,ZEPROM_CMD_LEN,cmd);
- eprom_put_bits(zatm_dev,i >> 1,ZEPROM_ADDR_LEN,cmd);
- eprom_get_byte(zatm_dev,buf+i+swap,cmd);
- eprom_get_byte(zatm_dev,buf+i+1-swap,cmd);
- eprom_set(zatm_dev,0,cmd); /* deselect EPROM */
- }
- memcpy(dev->esi,buf+offset,ESI_LEN);
- return memcmp(dev->esi,"\0\0\0\0\0",ESI_LEN); /* assumes ESI_LEN == 6 */
-}
-
-
-static void eprom_get_esi(struct atm_dev *dev)
-{
- if (eprom_try_esi(dev,ZEPROM_V1_REG,ZEPROM_V1_ESI_OFF,1)) return;
- (void) eprom_try_esi(dev,ZEPROM_V2_REG,ZEPROM_V2_ESI_OFF,0);
-}
-
-
-/*--------------------------------- entries ---------------------------------*/
-
-
-static int zatm_init(struct atm_dev *dev)
-{
- struct zatm_dev *zatm_dev;
- struct pci_dev *pci_dev;
- unsigned short command;
- int error,i,last;
- unsigned long t0,t1,t2;
-
- DPRINTK(">zatm_init\n");
- zatm_dev = ZATM_DEV(dev);
- spin_lock_init(&zatm_dev->lock);
- pci_dev = zatm_dev->pci_dev;
- zatm_dev->base = pci_resource_start(pci_dev, 0);
- zatm_dev->irq = pci_dev->irq;
- if ((error = pci_read_config_word(pci_dev,PCI_COMMAND,&command))) {
- printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%02x\n",
- dev->number,error);
- return -EINVAL;
- }
- if ((error = pci_write_config_word(pci_dev,PCI_COMMAND,
- command | PCI_COMMAND_IO | PCI_COMMAND_MASTER))) {
- printk(KERN_ERR DEV_LABEL "(itf %d): can't enable IO (0x%02x)"
- "\n",dev->number,error);
- return -EIO;
- }
- eprom_get_esi(dev);
- printk(KERN_NOTICE DEV_LABEL "(itf %d): rev.%d,base=0x%x,irq=%d,",
- dev->number,pci_dev->revision,zatm_dev->base,zatm_dev->irq);
- /* reset uPD98401 */
- zout(0,SWR);
- while (!(zin(GSR) & uPD98401_INT_IND));
- zout(uPD98401_GMR_ONE /*uPD98401_BURST4*/,GMR);
- last = MAX_CRAM_SIZE;
- for (i = last-RAM_INCREMENT; i >= 0; i -= RAM_INCREMENT) {
- zpokel(zatm_dev,0x55555555,i);
- if (zpeekl(zatm_dev,i) != 0x55555555) last = i;
- else {
- zpokel(zatm_dev,0xAAAAAAAA,i);
- if (zpeekl(zatm_dev,i) != 0xAAAAAAAA) last = i;
- else zpokel(zatm_dev,i,i);
- }
- }
- for (i = 0; i < last; i += RAM_INCREMENT)
- if (zpeekl(zatm_dev,i) != i) break;
- zatm_dev->mem = i << 2;
- while (i) zpokel(zatm_dev,0,--i);
- /* reset again to rebuild memory pointers */
- zout(0,SWR);
- while (!(zin(GSR) & uPD98401_INT_IND));
- zout(uPD98401_GMR_ONE | uPD98401_BURST8 | uPD98401_BURST4 |
- uPD98401_BURST2 | uPD98401_GMR_PM | uPD98401_GMR_DR,GMR);
- /* TODO: should shrink allocation now */
- printk("mem=%dkB,%s (",zatm_dev->mem >> 10,zatm_dev->copper ? "UTP" :
- "MMF");
- for (i = 0; i < ESI_LEN; i++)
- printk("%02X%s",dev->esi[i],i == ESI_LEN-1 ? ")\n" : "-");
- do {
- unsigned long flags;
-
- spin_lock_irqsave(&zatm_dev->lock, flags);
- t0 = zpeekl(zatm_dev,uPD98401_TSR);
- udelay(10);
- t1 = zpeekl(zatm_dev,uPD98401_TSR);
- udelay(1010);
- t2 = zpeekl(zatm_dev,uPD98401_TSR);
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- }
- while (t0 > t1 || t1 > t2); /* loop if wrapping ... */
- zatm_dev->khz = t2-2*t1+t0;
- printk(KERN_NOTICE DEV_LABEL "(itf %d): uPD98401 %d.%d at %d.%03d "
- "MHz\n",dev->number,
- (zin(VER) & uPD98401_MAJOR) >> uPD98401_MAJOR_SHIFT,
- zin(VER) & uPD98401_MINOR,zatm_dev->khz/1000,zatm_dev->khz % 1000);
- return uPD98402_init(dev);
-}
-
-
-static int zatm_start(struct atm_dev *dev)
-{
- struct zatm_dev *zatm_dev = ZATM_DEV(dev);
- struct pci_dev *pdev = zatm_dev->pci_dev;
- unsigned long curr;
- int pools,vccs,rx;
- int error, i, ld;
-
- DPRINTK("zatm_start\n");
- zatm_dev->rx_map = zatm_dev->tx_map = NULL;
- for (i = 0; i < NR_MBX; i++)
- zatm_dev->mbx_start[i] = 0;
- error = request_irq(zatm_dev->irq, zatm_int, IRQF_SHARED, DEV_LABEL, dev);
- if (error < 0) {
- printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n",
- dev->number,zatm_dev->irq);
- goto done;
- }
- /* define memory regions */
- pools = NR_POOLS;
- if (NR_SHAPERS*SHAPER_SIZE > pools*POOL_SIZE)
- pools = NR_SHAPERS*SHAPER_SIZE/POOL_SIZE;
- vccs = (zatm_dev->mem-NR_SHAPERS*SHAPER_SIZE-pools*POOL_SIZE)/
- (2*VC_SIZE+RX_SIZE);
- ld = -1;
- for (rx = 1; rx < vccs; rx <<= 1) ld++;
- dev->ci_range.vpi_bits = 0; /* @@@ no VPI for now */
- dev->ci_range.vci_bits = ld;
- dev->link_rate = ATM_OC3_PCR;
- zatm_dev->chans = vccs; /* ??? */
- curr = rx*RX_SIZE/4;
- DPRINTK("RX pool 0x%08lx\n",curr);
- zpokel(zatm_dev,curr,uPD98401_PMA); /* receive pool */
- zatm_dev->pool_base = curr;
- curr += pools*POOL_SIZE/4;
- DPRINTK("Shapers 0x%08lx\n",curr);
- zpokel(zatm_dev,curr,uPD98401_SMA); /* shapers */
- curr += NR_SHAPERS*SHAPER_SIZE/4;
- DPRINTK("Free 0x%08lx\n",curr);
- zpokel(zatm_dev,curr,uPD98401_TOS); /* free pool */
- printk(KERN_INFO DEV_LABEL "(itf %d): %d shapers, %d pools, %d RX, "
- "%ld VCs\n",dev->number,NR_SHAPERS,pools,rx,
- (zatm_dev->mem-curr*4)/VC_SIZE);
- /* create mailboxes */
- for (i = 0; i < NR_MBX; i++) {
- void *mbx;
- dma_addr_t mbx_dma;
-
- if (!mbx_entries[i])
- continue;
- mbx = dma_alloc_coherent(&pdev->dev,
- 2 * MBX_SIZE(i), &mbx_dma, GFP_KERNEL);
- if (!mbx) {
- error = -ENOMEM;
- goto out;
- }
- /*
- * Alignment provided by dma_alloc_coherent() isn't enough
- * for this device.
- */
- if (((unsigned long)mbx ^ mbx_dma) & 0xffff) {
- printk(KERN_ERR DEV_LABEL "(itf %d): system "
- "bus incompatible with driver\n", dev->number);
- dma_free_coherent(&pdev->dev, 2*MBX_SIZE(i), mbx, mbx_dma);
- error = -ENODEV;
- goto out;
- }
- DPRINTK("mbx@0x%08lx-0x%08lx\n", mbx, mbx + MBX_SIZE(i));
- zatm_dev->mbx_start[i] = (unsigned long)mbx;
- zatm_dev->mbx_dma[i] = mbx_dma;
- zatm_dev->mbx_end[i] = (zatm_dev->mbx_start[i] + MBX_SIZE(i)) &
- 0xffff;
- zout(mbx_dma >> 16, MSH(i));
- zout(mbx_dma, MSL(i));
- zout(zatm_dev->mbx_end[i], MBA(i));
- zout((unsigned long)mbx & 0xffff, MTA(i));
- zout((unsigned long)mbx & 0xffff, MWA(i));
- }
- error = start_tx(dev);
- if (error)
- goto out;
- error = start_rx(dev);
- if (error)
- goto out_tx;
- error = dev->phy->start(dev);
- if (error)
- goto out_rx;
- zout(0xffffffff,IMR); /* enable interrupts */
- /* enable TX & RX */
- zout(zin(GMR) | uPD98401_GMR_SE | uPD98401_GMR_RE,GMR);
-done:
- return error;
-
-out_rx:
- kfree(zatm_dev->rx_map);
-out_tx:
- kfree(zatm_dev->tx_map);
-out:
- while (i-- > 0) {
- dma_free_coherent(&pdev->dev, 2 * MBX_SIZE(i),
- (void *)zatm_dev->mbx_start[i],
- zatm_dev->mbx_dma[i]);
- }
- free_irq(zatm_dev->irq, dev);
- goto done;
-}
-
-
-static void zatm_close(struct atm_vcc *vcc)
-{
- DPRINTK(">zatm_close\n");
- if (!ZATM_VCC(vcc)) return;
- clear_bit(ATM_VF_READY,&vcc->flags);
- close_rx(vcc);
- EVENT("close_tx\n",0,0);
- close_tx(vcc);
- DPRINTK("zatm_close: done waiting\n");
- /* deallocate memory */
- kfree(ZATM_VCC(vcc));
- vcc->dev_data = NULL;
- clear_bit(ATM_VF_ADDR,&vcc->flags);
-}
-
-
-static int zatm_open(struct atm_vcc *vcc)
-{
- struct zatm_vcc *zatm_vcc;
- short vpi = vcc->vpi;
- int vci = vcc->vci;
- int error;
-
- DPRINTK(">zatm_open\n");
- if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))
- vcc->dev_data = NULL;
- if (vci != ATM_VPI_UNSPEC && vpi != ATM_VCI_UNSPEC)
- set_bit(ATM_VF_ADDR,&vcc->flags);
- if (vcc->qos.aal != ATM_AAL5) return -EINVAL; /* @@@ AAL0 */
- DPRINTK(DEV_LABEL "(itf %d): open %d.%d\n",vcc->dev->number,vcc->vpi,
- vcc->vci);
- if (!test_bit(ATM_VF_PARTIAL,&vcc->flags)) {
- zatm_vcc = kmalloc(sizeof(*zatm_vcc), GFP_KERNEL);
- if (!zatm_vcc) {
- clear_bit(ATM_VF_ADDR,&vcc->flags);
- return -ENOMEM;
- }
- vcc->dev_data = zatm_vcc;
- ZATM_VCC(vcc)->tx_chan = 0; /* for zatm_close after open_rx */
- if ((error = open_rx_first(vcc))) {
- zatm_close(vcc);
- return error;
- }
- if ((error = open_tx_first(vcc))) {
- zatm_close(vcc);
- return error;
- }
- }
- if (vci == ATM_VPI_UNSPEC || vpi == ATM_VCI_UNSPEC) return 0;
- if ((error = open_rx_second(vcc))) {
- zatm_close(vcc);
- return error;
- }
- if ((error = open_tx_second(vcc))) {
- zatm_close(vcc);
- return error;
- }
- set_bit(ATM_VF_READY,&vcc->flags);
- return 0;
-}
-
-
-static int zatm_change_qos(struct atm_vcc *vcc,struct atm_qos *qos,int flags)
-{
- printk("Not yet implemented\n");
- return -ENOSYS;
- /* @@@ */
-}
-
-
-static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
-{
- struct zatm_dev *zatm_dev;
- unsigned long flags;
-
- zatm_dev = ZATM_DEV(dev);
- switch (cmd) {
- case ZATM_GETPOOLZ:
- if (!capable(CAP_NET_ADMIN)) return -EPERM;
- fallthrough;
- case ZATM_GETPOOL:
- {
- struct zatm_pool_info info;
- int pool;
-
- if (get_user(pool,
- &((struct zatm_pool_req __user *) arg)->pool_num))
- return -EFAULT;
- if (pool < 0 || pool > ZATM_LAST_POOL)
- return -EINVAL;
- pool = array_index_nospec(pool,
- ZATM_LAST_POOL + 1);
- spin_lock_irqsave(&zatm_dev->lock, flags);
- info = zatm_dev->pool_info[pool];
- if (cmd == ZATM_GETPOOLZ) {
- zatm_dev->pool_info[pool].rqa_count = 0;
- zatm_dev->pool_info[pool].rqu_count = 0;
- }
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- return copy_to_user(
- &((struct zatm_pool_req __user *) arg)->info,
- &info,sizeof(info)) ? -EFAULT : 0;
- }
- case ZATM_SETPOOL:
- {
- struct zatm_pool_info info;
- int pool;
-
- if (!capable(CAP_NET_ADMIN)) return -EPERM;
- if (get_user(pool,
- &((struct zatm_pool_req __user *) arg)->pool_num))
- return -EFAULT;
- if (pool < 0 || pool > ZATM_LAST_POOL)
- return -EINVAL;
- pool = array_index_nospec(pool,
- ZATM_LAST_POOL + 1);
- if (copy_from_user(&info,
- &((struct zatm_pool_req __user *) arg)->info,
- sizeof(info))) return -EFAULT;
- if (!info.low_water)
- info.low_water = zatm_dev->
- pool_info[pool].low_water;
- if (!info.high_water)
- info.high_water = zatm_dev->
- pool_info[pool].high_water;
- if (!info.next_thres)
- info.next_thres = zatm_dev->
- pool_info[pool].next_thres;
- if (info.low_water >= info.high_water ||
- info.low_water < 0)
- return -EINVAL;
- spin_lock_irqsave(&zatm_dev->lock, flags);
- zatm_dev->pool_info[pool].low_water =
- info.low_water;
- zatm_dev->pool_info[pool].high_water =
- info.high_water;
- zatm_dev->pool_info[pool].next_thres =
- info.next_thres;
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- return 0;
- }
- default:
- if (!dev->phy->ioctl) return -ENOIOCTLCMD;
- return dev->phy->ioctl(dev,cmd,arg);
- }
-}
-
-static int zatm_send(struct atm_vcc *vcc,struct sk_buff *skb)
-{
- int error;
-
- EVENT(">zatm_send 0x%lx\n",(unsigned long) skb,0);
- if (!ZATM_VCC(vcc)->tx_chan || !test_bit(ATM_VF_READY,&vcc->flags)) {
- if (vcc->pop) vcc->pop(vcc,skb);
- else dev_kfree_skb(skb);
- return -EINVAL;
- }
- if (!skb) {
- printk(KERN_CRIT "!skb in zatm_send ?\n");
- if (vcc->pop) vcc->pop(vcc,skb);
- return -EINVAL;
- }
- ATM_SKB(skb)->vcc = vcc;
- error = do_tx(skb);
- if (error != RING_BUSY) return error;
- skb_queue_tail(&ZATM_VCC(vcc)->backlog,skb);
- return 0;
-}
-
-
-static void zatm_phy_put(struct atm_dev *dev,unsigned char value,
- unsigned long addr)
-{
- struct zatm_dev *zatm_dev;
-
- zatm_dev = ZATM_DEV(dev);
- zwait();
- zout(value,CER);
- zout(uPD98401_IND_ACC | uPD98401_IA_B0 |
- (uPD98401_IA_TGT_PHY << uPD98401_IA_TGT_SHIFT) | addr,CMR);
-}
-
-
-static unsigned char zatm_phy_get(struct atm_dev *dev,unsigned long addr)
-{
- struct zatm_dev *zatm_dev;
-
- zatm_dev = ZATM_DEV(dev);
- zwait();
- zout(uPD98401_IND_ACC | uPD98401_IA_B0 | uPD98401_IA_RW |
- (uPD98401_IA_TGT_PHY << uPD98401_IA_TGT_SHIFT) | addr,CMR);
- zwait();
- return zin(CER) & 0xff;
-}
-
-
-static const struct atmdev_ops ops = {
- .open = zatm_open,
- .close = zatm_close,
- .ioctl = zatm_ioctl,
- .send = zatm_send,
- .phy_put = zatm_phy_put,
- .phy_get = zatm_phy_get,
- .change_qos = zatm_change_qos,
-};
-
-static int zatm_init_one(struct pci_dev *pci_dev,
- const struct pci_device_id *ent)
-{
- struct atm_dev *dev;
- struct zatm_dev *zatm_dev;
- int ret = -ENOMEM;
-
- zatm_dev = kmalloc(sizeof(*zatm_dev), GFP_KERNEL);
- if (!zatm_dev) {
- printk(KERN_EMERG "%s: memory shortage\n", DEV_LABEL);
- goto out;
- }
-
- dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &ops, -1, NULL);
- if (!dev)
- goto out_free;
-
- ret = pci_enable_device(pci_dev);
- if (ret < 0)
- goto out_deregister;
-
- ret = pci_request_regions(pci_dev, DEV_LABEL);
- if (ret < 0)
- goto out_disable;
-
- ret = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
- if (ret < 0)
- goto out_release;
-
- zatm_dev->pci_dev = pci_dev;
- dev->dev_data = zatm_dev;
- zatm_dev->copper = (int)ent->driver_data;
- if ((ret = zatm_init(dev)) || (ret = zatm_start(dev)))
- goto out_release;
-
- pci_set_drvdata(pci_dev, dev);
- zatm_dev->more = zatm_boards;
- zatm_boards = dev;
- ret = 0;
-out:
- return ret;
-
-out_release:
- pci_release_regions(pci_dev);
-out_disable:
- pci_disable_device(pci_dev);
-out_deregister:
- atm_dev_deregister(dev);
-out_free:
- kfree(zatm_dev);
- goto out;
-}
-
-
-MODULE_LICENSE("GPL");
-
-static const struct pci_device_id zatm_pci_tbl[] = {
- { PCI_VDEVICE(ZEITNET, PCI_DEVICE_ID_ZEITNET_1221), ZATM_COPPER },
- { PCI_VDEVICE(ZEITNET, PCI_DEVICE_ID_ZEITNET_1225), 0 },
- { 0, }
-};
-MODULE_DEVICE_TABLE(pci, zatm_pci_tbl);
-
-static struct pci_driver zatm_driver = {
- .name = DEV_LABEL,
- .id_table = zatm_pci_tbl,
- .probe = zatm_init_one,
-};
-
-static int __init zatm_init_module(void)
-{
- return pci_register_driver(&zatm_driver);
-}
-
-module_init(zatm_init_module);
-/* module_exit not defined so not unloadable */
diff --git a/drivers/atm/zatm.h b/drivers/atm/zatm.h
deleted file mode 100644
index 8204369fe825..000000000000
--- a/drivers/atm/zatm.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* drivers/atm/zatm.h - ZeitNet ZN122x device driver declarations */
-
-/* Written 1995-1998 by Werner Almesberger, EPFL LRC/ICA */
-
-
-#ifndef DRIVER_ATM_ZATM_H
-#define DRIVER_ATM_ZATM_H
-
-#include <linux/skbuff.h>
-#include <linux/atm.h>
-#include <linux/atmdev.h>
-#include <linux/sonet.h>
-#include <linux/pci.h>
-
-
-#define DEV_LABEL "zatm"
-
-#define MAX_AAL5_PDU 10240 /* allocate for AAL5 PDUs of this size */
-#define MAX_RX_SIZE_LD 14 /* ceil(log2((MAX_AAL5_PDU+47)/48)) */
-
-#define LOW_MARK 12 /* start adding new buffers if less than 12 */
-#define HIGH_MARK 30 /* stop adding buffers after reaching 30 */
-#define OFF_CNG_THRES 5 /* threshold for offset changes */
-
-#define RX_SIZE 2 /* RX lookup entry size (in bytes) */
-#define NR_POOLS 32 /* number of free buffer pointers */
-#define POOL_SIZE 8 /* buffer entry size (in bytes) */
-#define NR_SHAPERS 16 /* number of shapers */
-#define SHAPER_SIZE 4 /* shaper entry size (in bytes) */
-#define VC_SIZE 32 /* VC dsc (TX or RX) size (in bytes) */
-
-#define RING_ENTRIES 32 /* ring entries (without back pointer) */
-#define RING_WORDS 4 /* ring element size */
-#define RING_SIZE (sizeof(unsigned long)*(RING_ENTRIES+1)*RING_WORDS)
-
-#define NR_MBX 4 /* four mailboxes */
-#define MBX_RX_0 0 /* mailbox indices */
-#define MBX_RX_1 1
-#define MBX_TX_0 2
-#define MBX_TX_1 3
-
-struct zatm_vcc {
- /*-------------------------------- RX part */
- int rx_chan; /* RX channel, 0 if none */
- int pool; /* free buffer pool */
- /*-------------------------------- TX part */
- int tx_chan; /* TX channel, 0 if none */
- int shaper; /* shaper, <0 if none */
- struct sk_buff_head tx_queue; /* list of buffers in transit */
- wait_queue_head_t tx_wait; /* for close */
- u32 *ring; /* transmit ring */
- int ring_curr; /* current write position */
- int txing; /* number of transmits in progress */
- struct sk_buff_head backlog; /* list of buffers waiting for ring */
-};
-
-struct zatm_dev {
- /*-------------------------------- TX part */
- int tx_bw; /* remaining bandwidth */
- u32 free_shapers; /* bit set */
- int ubr; /* UBR shaper; -1 if none */
- int ubr_ref_cnt; /* number of VCs using UBR shaper */
- /*-------------------------------- RX part */
- int pool_ref[NR_POOLS]; /* free buffer pool usage counters */
- volatile struct sk_buff *last_free[NR_POOLS];
- /* last entry in respective pool */
- struct sk_buff_head pool[NR_POOLS];/* free buffer pools */
- struct zatm_pool_info pool_info[NR_POOLS]; /* pool information */
- /*-------------------------------- maps */
- struct atm_vcc **tx_map; /* TX VCCs */
- struct atm_vcc **rx_map; /* RX VCCs */
- int chans; /* map size, must be 2^n */
- /*-------------------------------- mailboxes */
- unsigned long mbx_start[NR_MBX];/* start addresses */
- dma_addr_t mbx_dma[NR_MBX];
- u16 mbx_end[NR_MBX]; /* end offset (in bytes) */
- /*-------------------------------- other pointers */
- u32 pool_base; /* Free buffer pool dsc (word addr) */
- /*-------------------------------- ZATM links */
- struct atm_dev *more; /* other ZATM devices */
- /*-------------------------------- general information */
- int mem; /* RAM on board (in bytes) */
- int khz; /* timer clock */
- int copper; /* PHY type */
- unsigned char irq; /* IRQ */
- unsigned int base; /* IO base address */
- struct pci_dev *pci_dev; /* PCI stuff */
- spinlock_t lock;
-};
-
-
-#define ZATM_DEV(d) ((struct zatm_dev *) (d)->dev_data)
-#define ZATM_VCC(d) ((struct zatm_vcc *) (d)->dev_data)
-
-
-struct zatm_skb_prv {
- struct atm_skb_data _; /* reserved */
- u32 *dsc; /* pointer to skb's descriptor */
-};
-
-#define ZATM_PRV_DSC(skb) (((struct zatm_skb_prv *) (skb)->cb)->dsc)
-
-#endif
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index af6bea56f4e2..3fc3b5940bb3 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -296,6 +296,7 @@ int driver_deferred_probe_check_state(struct device *dev)
return -EPROBE_DEFER;
}
+EXPORT_SYMBOL_GPL(driver_deferred_probe_check_state);
static void deferred_probe_timeout_work_func(struct work_struct *work)
{
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 4b55e864a0a3..4d3efaa20b7b 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1638,22 +1638,22 @@ struct sib_info {
};
void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib);
-extern void notify_resource_state(struct sk_buff *,
+extern int notify_resource_state(struct sk_buff *,
unsigned int,
struct drbd_resource *,
struct resource_info *,
enum drbd_notification_type);
-extern void notify_device_state(struct sk_buff *,
+extern int notify_device_state(struct sk_buff *,
unsigned int,
struct drbd_device *,
struct device_info *,
enum drbd_notification_type);
-extern void notify_connection_state(struct sk_buff *,
+extern int notify_connection_state(struct sk_buff *,
unsigned int,
struct drbd_connection *,
struct connection_info *,
enum drbd_notification_type);
-extern void notify_peer_device_state(struct sk_buff *,
+extern int notify_peer_device_state(struct sk_buff *,
unsigned int,
struct drbd_peer_device *,
struct peer_device_info *,
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 9676a1d214bc..4b0b25cc916e 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2719,6 +2719,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
sprintf(disk->disk_name, "drbd%d", minor);
disk->private_data = device;
+ blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, disk->queue);
blk_queue_write_cache(disk->queue, true, true);
/* Setting the max_hw_sectors to an odd value of 8kibyte here
This triggers a max_bio_size message upon first attach or connect */
@@ -2773,12 +2774,12 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
if (init_submitter(device)) {
err = ERR_NOMEM;
- goto out_idr_remove_vol;
+ goto out_idr_remove_from_resource;
}
err = add_disk(disk);
if (err)
- goto out_idr_remove_vol;
+ goto out_idr_remove_from_resource;
/* inherit the connection state */
device->state.conn = first_connection(resource)->cstate;
@@ -2792,8 +2793,6 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
drbd_debugfs_device_add(device);
return NO_ERROR;
-out_idr_remove_vol:
- idr_remove(&connection->peer_devices, vnr);
out_idr_remove_from_resource:
for_each_connection(connection, resource) {
peer_device = idr_remove(&connection->peer_devices, vnr);
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 02030c9c4d3b..b7216c186ba4 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -4549,7 +4549,7 @@ static int nla_put_notification_header(struct sk_buff *msg,
return drbd_notification_header_to_skb(msg, &nh, true);
}
-void notify_resource_state(struct sk_buff *skb,
+int notify_resource_state(struct sk_buff *skb,
unsigned int seq,
struct drbd_resource *resource,
struct resource_info *resource_info,
@@ -4591,16 +4591,17 @@ void notify_resource_state(struct sk_buff *skb,
if (err && err != -ESRCH)
goto failed;
}
- return;
+ return 0;
nla_put_failure:
nlmsg_free(skb);
failed:
drbd_err(resource, "Error %d while broadcasting event. Event seq:%u\n",
err, seq);
+ return err;
}
-void notify_device_state(struct sk_buff *skb,
+int notify_device_state(struct sk_buff *skb,
unsigned int seq,
struct drbd_device *device,
struct device_info *device_info,
@@ -4640,16 +4641,17 @@ void notify_device_state(struct sk_buff *skb,
if (err && err != -ESRCH)
goto failed;
}
- return;
+ return 0;
nla_put_failure:
nlmsg_free(skb);
failed:
drbd_err(device, "Error %d while broadcasting event. Event seq:%u\n",
err, seq);
+ return err;
}
-void notify_connection_state(struct sk_buff *skb,
+int notify_connection_state(struct sk_buff *skb,
unsigned int seq,
struct drbd_connection *connection,
struct connection_info *connection_info,
@@ -4689,16 +4691,17 @@ void notify_connection_state(struct sk_buff *skb,
if (err && err != -ESRCH)
goto failed;
}
- return;
+ return 0;
nla_put_failure:
nlmsg_free(skb);
failed:
drbd_err(connection, "Error %d while broadcasting event. Event seq:%u\n",
err, seq);
+ return err;
}
-void notify_peer_device_state(struct sk_buff *skb,
+int notify_peer_device_state(struct sk_buff *skb,
unsigned int seq,
struct drbd_peer_device *peer_device,
struct peer_device_info *peer_device_info,
@@ -4739,13 +4742,14 @@ void notify_peer_device_state(struct sk_buff *skb,
if (err && err != -ESRCH)
goto failed;
}
- return;
+ return 0;
nla_put_failure:
nlmsg_free(skb);
failed:
drbd_err(peer_device, "Error %d while broadcasting event. Event seq:%u\n",
err, seq);
+ return err;
}
void notify_helper(enum drbd_notification_type type,
@@ -4796,7 +4800,7 @@ fail:
err, seq);
}
-static void notify_initial_state_done(struct sk_buff *skb, unsigned int seq)
+static int notify_initial_state_done(struct sk_buff *skb, unsigned int seq)
{
struct drbd_genlmsghdr *dh;
int err;
@@ -4810,11 +4814,12 @@ static void notify_initial_state_done(struct sk_buff *skb, unsigned int seq)
if (nla_put_notification_header(skb, NOTIFY_EXISTS))
goto nla_put_failure;
genlmsg_end(skb, dh);
- return;
+ return 0;
nla_put_failure:
nlmsg_free(skb);
pr_err("Error %d sending event. Event seq:%u\n", err, seq);
+ return err;
}
static void free_state_changes(struct list_head *list)
@@ -4841,6 +4846,7 @@ static int get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
unsigned int seq = cb->args[2];
unsigned int n;
enum drbd_notification_type flags = 0;
+ int err = 0;
/* There is no need for taking notification_mutex here: it doesn't
matter if the initial state events mix with later state chage
@@ -4849,32 +4855,32 @@ static int get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
cb->args[5]--;
if (cb->args[5] == 1) {
- notify_initial_state_done(skb, seq);
+ err = notify_initial_state_done(skb, seq);
goto out;
}
n = cb->args[4]++;
if (cb->args[4] < cb->args[3])
flags |= NOTIFY_CONTINUES;
if (n < 1) {
- notify_resource_state_change(skb, seq, state_change->resource,
+ err = notify_resource_state_change(skb, seq, state_change->resource,
NOTIFY_EXISTS | flags);
goto next;
}
n--;
if (n < state_change->n_connections) {
- notify_connection_state_change(skb, seq, &state_change->connections[n],
+ err = notify_connection_state_change(skb, seq, &state_change->connections[n],
NOTIFY_EXISTS | flags);
goto next;
}
n -= state_change->n_connections;
if (n < state_change->n_devices) {
- notify_device_state_change(skb, seq, &state_change->devices[n],
+ err = notify_device_state_change(skb, seq, &state_change->devices[n],
NOTIFY_EXISTS | flags);
goto next;
}
n -= state_change->n_devices;
if (n < state_change->n_devices * state_change->n_connections) {
- notify_peer_device_state_change(skb, seq, &state_change->peer_devices[n],
+ err = notify_peer_device_state_change(skb, seq, &state_change->peer_devices[n],
NOTIFY_EXISTS | flags);
goto next;
}
@@ -4889,7 +4895,10 @@ next:
cb->args[4] = 0;
}
out:
- return skb->len;
+ if (err)
+ return err;
+ else
+ return skb->len;
}
int drbd_adm_get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c
index b8a27818ab3f..4ee11aef6672 100644
--- a/drivers/block/drbd/drbd_state.c
+++ b/drivers/block/drbd/drbd_state.c
@@ -1537,7 +1537,7 @@ int drbd_bitmap_io_from_worker(struct drbd_device *device,
return rv;
}
-void notify_resource_state_change(struct sk_buff *skb,
+int notify_resource_state_change(struct sk_buff *skb,
unsigned int seq,
struct drbd_resource_state_change *resource_state_change,
enum drbd_notification_type type)
@@ -1550,10 +1550,10 @@ void notify_resource_state_change(struct sk_buff *skb,
.res_susp_fen = resource_state_change->susp_fen[NEW],
};
- notify_resource_state(skb, seq, resource, &resource_info, type);
+ return notify_resource_state(skb, seq, resource, &resource_info, type);
}
-void notify_connection_state_change(struct sk_buff *skb,
+int notify_connection_state_change(struct sk_buff *skb,
unsigned int seq,
struct drbd_connection_state_change *connection_state_change,
enum drbd_notification_type type)
@@ -1564,10 +1564,10 @@ void notify_connection_state_change(struct sk_buff *skb,
.conn_role = connection_state_change->peer_role[NEW],
};
- notify_connection_state(skb, seq, connection, &connection_info, type);
+ return notify_connection_state(skb, seq, connection, &connection_info, type);
}
-void notify_device_state_change(struct sk_buff *skb,
+int notify_device_state_change(struct sk_buff *skb,
unsigned int seq,
struct drbd_device_state_change *device_state_change,
enum drbd_notification_type type)
@@ -1577,10 +1577,10 @@ void notify_device_state_change(struct sk_buff *skb,
.dev_disk_state = device_state_change->disk_state[NEW],
};
- notify_device_state(skb, seq, device, &device_info, type);
+ return notify_device_state(skb, seq, device, &device_info, type);
}
-void notify_peer_device_state_change(struct sk_buff *skb,
+int notify_peer_device_state_change(struct sk_buff *skb,
unsigned int seq,
struct drbd_peer_device_state_change *p,
enum drbd_notification_type type)
@@ -1594,7 +1594,7 @@ void notify_peer_device_state_change(struct sk_buff *skb,
.peer_resync_susp_dependency = p->resync_susp_dependency[NEW],
};
- notify_peer_device_state(skb, seq, peer_device, &peer_device_info, type);
+ return notify_peer_device_state(skb, seq, peer_device, &peer_device_info, type);
}
static void broadcast_state_change(struct drbd_state_change *state_change)
@@ -1602,7 +1602,7 @@ static void broadcast_state_change(struct drbd_state_change *state_change)
struct drbd_resource_state_change *resource_state_change = &state_change->resource[0];
bool resource_state_has_changed;
unsigned int n_device, n_connection, n_peer_device, n_peer_devices;
- void (*last_func)(struct sk_buff *, unsigned int, void *,
+ int (*last_func)(struct sk_buff *, unsigned int, void *,
enum drbd_notification_type) = NULL;
void *last_arg = NULL;
diff --git a/drivers/block/drbd/drbd_state_change.h b/drivers/block/drbd/drbd_state_change.h
index ba80f612d6ab..d5b0479bc9a6 100644
--- a/drivers/block/drbd/drbd_state_change.h
+++ b/drivers/block/drbd/drbd_state_change.h
@@ -44,19 +44,19 @@ extern struct drbd_state_change *remember_old_state(struct drbd_resource *, gfp_
extern void copy_old_to_new_state_change(struct drbd_state_change *);
extern void forget_state_change(struct drbd_state_change *);
-extern void notify_resource_state_change(struct sk_buff *,
+extern int notify_resource_state_change(struct sk_buff *,
unsigned int,
struct drbd_resource_state_change *,
enum drbd_notification_type type);
-extern void notify_connection_state_change(struct sk_buff *,
+extern int notify_connection_state_change(struct sk_buff *,
unsigned int,
struct drbd_connection_state_change *,
enum drbd_notification_type type);
-extern void notify_device_state_change(struct sk_buff *,
+extern int notify_device_state_change(struct sk_buff *,
unsigned int,
struct drbd_device_state_change *,
enum drbd_notification_type type);
-extern void notify_peer_device_state_change(struct sk_buff *,
+extern int notify_peer_device_state_change(struct sk_buff *,
unsigned int,
struct drbd_peer_device_state_change *,
enum drbd_notification_type type);
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 05b1120e6623..c441a4972064 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -1600,7 +1600,7 @@ static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
* Only fake timeouts need to execute blk_mq_complete_request() here.
*/
cmd->error = BLK_STS_TIMEOUT;
- if (cmd->fake_timeout)
+ if (cmd->fake_timeout || hctx->type == HCTX_TYPE_POLL)
blk_mq_complete_request(rq);
return BLK_EH_DONE;
}
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 7bd10d63ddbe..2dc9da683a13 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -1365,7 +1365,6 @@ out_free:
*/
int cdrom_number_of_slots(struct cdrom_device_info *cdi)
{
- int status;
int nslots = 1;
struct cdrom_changer_info *info;
@@ -1377,7 +1376,7 @@ int cdrom_number_of_slots(struct cdrom_device_info *cdi)
if (!info)
return -ENOMEM;
- if ((status = cdrom_read_mech_status(cdi, info)) == 0)
+ if (cdrom_read_mech_status(cdi, info) == 0)
nslots = info->hdr.nslots;
kfree(info);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index e15063d61460..3a293f919af9 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -333,7 +333,7 @@ static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
chacha20_block(chacha_state, first_block);
memcpy(key, first_block, CHACHA_KEY_SIZE);
- memcpy(random_data, first_block + CHACHA_KEY_SIZE, random_data_len);
+ memmove(random_data, first_block + CHACHA_KEY_SIZE, random_data_len);
memzero_explicit(first_block, sizeof(first_block));
}
@@ -523,8 +523,7 @@ EXPORT_SYMBOL(get_random_bytes);
static ssize_t get_random_bytes_user(void __user *buf, size_t nbytes)
{
- ssize_t ret = 0;
- size_t len;
+ size_t len, left, ret = 0;
u32 chacha_state[CHACHA_STATE_WORDS];
u8 output[CHACHA_BLOCK_SIZE];
@@ -543,37 +542,40 @@ static ssize_t get_random_bytes_user(void __user *buf, size_t nbytes)
* the user directly.
*/
if (nbytes <= CHACHA_KEY_SIZE) {
- ret = copy_to_user(buf, &chacha_state[4], nbytes) ? -EFAULT : nbytes;
+ ret = nbytes - copy_to_user(buf, &chacha_state[4], nbytes);
goto out_zero_chacha;
}
- do {
+ for (;;) {
chacha20_block(chacha_state, output);
if (unlikely(chacha_state[12] == 0))
++chacha_state[13];
len = min_t(size_t, nbytes, CHACHA_BLOCK_SIZE);
- if (copy_to_user(buf, output, len)) {
- ret = -EFAULT;
+ left = copy_to_user(buf, output, len);
+ if (left) {
+ ret += len - left;
break;
}
- nbytes -= len;
buf += len;
ret += len;
+ nbytes -= len;
+ if (!nbytes)
+ break;
BUILD_BUG_ON(PAGE_SIZE % CHACHA_BLOCK_SIZE != 0);
- if (!(ret % PAGE_SIZE) && nbytes) {
+ if (ret % PAGE_SIZE == 0) {
if (signal_pending(current))
break;
cond_resched();
}
- } while (nbytes);
+ }
memzero_explicit(output, sizeof(output));
out_zero_chacha:
memzero_explicit(chacha_state, sizeof(chacha_state));
- return ret;
+ return ret ? ret : -EFAULT;
}
/*
@@ -1016,7 +1018,7 @@ int __init rand_initialize(void)
*/
void add_device_randomness(const void *buf, size_t size)
{
- cycles_t cycles = random_get_entropy();
+ unsigned long cycles = random_get_entropy();
unsigned long flags, now = jiffies;
if (crng_init == 0 && size)
@@ -1047,8 +1049,7 @@ struct timer_rand_state {
*/
static void add_timer_randomness(struct timer_rand_state *state, unsigned int num)
{
- cycles_t cycles = random_get_entropy();
- unsigned long flags, now = jiffies;
+ unsigned long cycles = random_get_entropy(), now = jiffies, flags;
long delta, delta2, delta3;
spin_lock_irqsave(&input_pool.lock, flags);
@@ -1337,8 +1338,7 @@ static void mix_interrupt_randomness(struct work_struct *work)
void add_interrupt_randomness(int irq)
{
enum { MIX_INFLIGHT = 1U << 31 };
- cycles_t cycles = random_get_entropy();
- unsigned long now = jiffies;
+ unsigned long cycles = random_get_entropy(), now = jiffies;
struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
struct pt_regs *regs = get_irq_regs();
unsigned int new_count;
@@ -1351,16 +1351,12 @@ void add_interrupt_randomness(int irq)
if (cycles == 0)
cycles = get_reg(fast_pool, regs);
- if (sizeof(cycles) == 8)
+ if (sizeof(unsigned long) == 8) {
irq_data.u64[0] = cycles ^ rol64(now, 32) ^ irq;
- else {
+ irq_data.u64[1] = regs ? instruction_pointer(regs) : _RET_IP_;
+ } else {
irq_data.u32[0] = cycles ^ irq;
irq_data.u32[1] = now;
- }
-
- if (sizeof(unsigned long) == 8)
- irq_data.u64[1] = regs ? instruction_pointer(regs) : _RET_IP_;
- else {
irq_data.u32[2] = regs ? instruction_pointer(regs) : _RET_IP_;
irq_data.u32[3] = get_reg(fast_pool, regs);
}
@@ -1407,7 +1403,7 @@ static void entropy_timer(struct timer_list *t)
static void try_to_generate_entropy(void)
{
struct {
- cycles_t cycles;
+ unsigned long cycles;
struct timer_list timer;
} stack;
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
index 8a7267d116b7..3f2182d66829 100644
--- a/drivers/cxl/pci.c
+++ b/drivers/cxl/pci.c
@@ -436,7 +436,6 @@ static int wait_for_media_ready(struct cxl_dev_state *cxlds)
for (i = mbox_ready_timeout; i; i--) {
u32 temp;
- int rc;
rc = pci_read_config_dword(
pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &temp);
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
index 511805dbeb75..4c9eb53ba3f8 100644
--- a/drivers/dma-buf/Makefile
+++ b/drivers/dma-buf/Makefile
@@ -12,6 +12,7 @@ dmabuf_selftests-y := \
selftest.o \
st-dma-fence.o \
st-dma-fence-chain.o \
+ st-dma-fence-unwrap.o \
st-dma-resv.o
obj-$(CONFIG_DMABUF_SELFTESTS) += dmabuf_selftests.o
diff --git a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c
index cb1bacb5a42b..5c8a7084577b 100644
--- a/drivers/dma-buf/dma-fence-array.c
+++ b/drivers/dma-buf/dma-fence-array.c
@@ -159,6 +159,8 @@ struct dma_fence_array *dma_fence_array_create(int num_fences,
struct dma_fence_array *array;
size_t size = sizeof(*array);
+ WARN_ON(!num_fences || !fences);
+
/* Allocate the callback structures behind the array. */
size += num_fences * sizeof(struct dma_fence_array_cb);
array = kzalloc(size, GFP_KERNEL);
@@ -219,3 +221,33 @@ bool dma_fence_match_context(struct dma_fence *fence, u64 context)
return true;
}
EXPORT_SYMBOL(dma_fence_match_context);
+
+struct dma_fence *dma_fence_array_first(struct dma_fence *head)
+{
+ struct dma_fence_array *array;
+
+ if (!head)
+ return NULL;
+
+ array = to_dma_fence_array(head);
+ if (!array)
+ return head;
+
+ if (!array->num_fences)
+ return NULL;
+
+ return array->fences[0];
+}
+EXPORT_SYMBOL(dma_fence_array_first);
+
+struct dma_fence *dma_fence_array_next(struct dma_fence *head,
+ unsigned int index)
+{
+ struct dma_fence_array *array = to_dma_fence_array(head);
+
+ if (!array || index >= array->num_fences)
+ return NULL;
+
+ return array->fences[index];
+}
+EXPORT_SYMBOL(dma_fence_array_next);
diff --git a/drivers/dma-buf/selftests.h b/drivers/dma-buf/selftests.h
index 97d73aaa31da..851965867d9c 100644
--- a/drivers/dma-buf/selftests.h
+++ b/drivers/dma-buf/selftests.h
@@ -12,4 +12,5 @@
selftest(sanitycheck, __sanitycheck__) /* keep first (igt selfcheck) */
selftest(dma_fence, dma_fence)
selftest(dma_fence_chain, dma_fence_chain)
+selftest(dma_fence_unwrap, dma_fence_unwrap)
selftest(dma_resv, dma_resv)
diff --git a/drivers/dma-buf/st-dma-fence-unwrap.c b/drivers/dma-buf/st-dma-fence-unwrap.c
new file mode 100644
index 000000000000..039f016b57be
--- /dev/null
+++ b/drivers/dma-buf/st-dma-fence-unwrap.c
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: MIT
+
+/*
+ * Copyright (C) 2022 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/dma-fence-unwrap.h>
+#if 0
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/mm.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/random.h>
+#endif
+
+#include "selftest.h"
+
+#define CHAIN_SZ (4 << 10)
+
+static inline struct mock_fence {
+ struct dma_fence base;
+ spinlock_t lock;
+} *to_mock_fence(struct dma_fence *f) {
+ return container_of(f, struct mock_fence, base);
+}
+
+static const char *mock_name(struct dma_fence *f)
+{
+ return "mock";
+}
+
+static const struct dma_fence_ops mock_ops = {
+ .get_driver_name = mock_name,
+ .get_timeline_name = mock_name,
+};
+
+static struct dma_fence *mock_fence(void)
+{
+ struct mock_fence *f;
+
+ f = kmalloc(sizeof(*f), GFP_KERNEL);
+ if (!f)
+ return NULL;
+
+ spin_lock_init(&f->lock);
+ dma_fence_init(&f->base, &mock_ops, &f->lock, 0, 0);
+
+ return &f->base;
+}
+
+static struct dma_fence *mock_array(unsigned int num_fences, ...)
+{
+ struct dma_fence_array *array;
+ struct dma_fence **fences;
+ va_list valist;
+ int i;
+
+ fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL);
+ if (!fences)
+ return NULL;
+
+ va_start(valist, num_fences);
+ for (i = 0; i < num_fences; ++i)
+ fences[i] = va_arg(valist, typeof(*fences));
+ va_end(valist);
+
+ array = dma_fence_array_create(num_fences, fences,
+ dma_fence_context_alloc(1),
+ 1, false);
+ if (!array)
+ goto cleanup;
+ return &array->base;
+
+cleanup:
+ for (i = 0; i < num_fences; ++i)
+ dma_fence_put(fences[i]);
+ kfree(fences);
+ return NULL;
+}
+
+static struct dma_fence *mock_chain(struct dma_fence *prev,
+ struct dma_fence *fence)
+{
+ struct dma_fence_chain *f;
+
+ f = dma_fence_chain_alloc();
+ if (!f) {
+ dma_fence_put(prev);
+ dma_fence_put(fence);
+ return NULL;
+ }
+
+ dma_fence_chain_init(f, prev, fence, 1);
+ return &f->base;
+}
+
+static int sanitycheck(void *arg)
+{
+ struct dma_fence *f, *chain, *array;
+ int err = 0;
+
+ f = mock_fence();
+ if (!f)
+ return -ENOMEM;
+
+ array = mock_array(1, f);
+ if (!array)
+ return -ENOMEM;
+
+ chain = mock_chain(NULL, array);
+ if (!chain)
+ return -ENOMEM;
+
+ dma_fence_signal(f);
+ dma_fence_put(chain);
+ return err;
+}
+
+static int unwrap_array(void *arg)
+{
+ struct dma_fence *fence, *f1, *f2, *array;
+ struct dma_fence_unwrap iter;
+ int err = 0;
+
+ f1 = mock_fence();
+ if (!f1)
+ return -ENOMEM;
+
+ f2 = mock_fence();
+ if (!f2) {
+ dma_fence_put(f1);
+ return -ENOMEM;
+ }
+
+ array = mock_array(2, f1, f2);
+ if (!array)
+ return -ENOMEM;
+
+ dma_fence_unwrap_for_each(fence, &iter, array) {
+ if (fence == f1) {
+ f1 = NULL;
+ } else if (fence == f2) {
+ f2 = NULL;
+ } else {
+ pr_err("Unexpected fence!\n");
+ err = -EINVAL;
+ }
+ }
+
+ if (f1 || f2) {
+ pr_err("Not all fences seen!\n");
+ err = -EINVAL;
+ }
+
+ dma_fence_signal(f1);
+ dma_fence_signal(f2);
+ dma_fence_put(array);
+ return 0;
+}
+
+static int unwrap_chain(void *arg)
+{
+ struct dma_fence *fence, *f1, *f2, *chain;
+ struct dma_fence_unwrap iter;
+ int err = 0;
+
+ f1 = mock_fence();
+ if (!f1)
+ return -ENOMEM;
+
+ f2 = mock_fence();
+ if (!f2) {
+ dma_fence_put(f1);
+ return -ENOMEM;
+ }
+
+ chain = mock_chain(f1, f2);
+ if (!chain)
+ return -ENOMEM;
+
+ dma_fence_unwrap_for_each(fence, &iter, chain) {
+ if (fence == f1) {
+ f1 = NULL;
+ } else if (fence == f2) {
+ f2 = NULL;
+ } else {
+ pr_err("Unexpected fence!\n");
+ err = -EINVAL;
+ }
+ }
+
+ if (f1 || f2) {
+ pr_err("Not all fences seen!\n");
+ err = -EINVAL;
+ }
+
+ dma_fence_signal(f1);
+ dma_fence_signal(f2);
+ dma_fence_put(chain);
+ return 0;
+}
+
+static int unwrap_chain_array(void *arg)
+{
+ struct dma_fence *fence, *f1, *f2, *array, *chain;
+ struct dma_fence_unwrap iter;
+ int err = 0;
+
+ f1 = mock_fence();
+ if (!f1)
+ return -ENOMEM;
+
+ f2 = mock_fence();
+ if (!f2) {
+ dma_fence_put(f1);
+ return -ENOMEM;
+ }
+
+ array = mock_array(2, f1, f2);
+ if (!array)
+ return -ENOMEM;
+
+ chain = mock_chain(NULL, array);
+ if (!chain)
+ return -ENOMEM;
+
+ dma_fence_unwrap_for_each(fence, &iter, chain) {
+ if (fence == f1) {
+ f1 = NULL;
+ } else if (fence == f2) {
+ f2 = NULL;
+ } else {
+ pr_err("Unexpected fence!\n");
+ err = -EINVAL;
+ }
+ }
+
+ if (f1 || f2) {
+ pr_err("Not all fences seen!\n");
+ err = -EINVAL;
+ }
+
+ dma_fence_signal(f1);
+ dma_fence_signal(f2);
+ dma_fence_put(chain);
+ return 0;
+}
+
+int dma_fence_unwrap(void)
+{
+ static const struct subtest tests[] = {
+ SUBTEST(sanitycheck),
+ SUBTEST(unwrap_array),
+ SUBTEST(unwrap_chain),
+ SUBTEST(unwrap_chain_array),
+ };
+
+ return subtests(tests, NULL);
+}
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index 394e6e1e9686..514d213261df 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -5,6 +5,7 @@
* Copyright (C) 2012 Google, Inc.
*/
+#include <linux/dma-fence-unwrap.h>
#include <linux/export.h>
#include <linux/file.h>
#include <linux/fs.h>
@@ -172,20 +173,6 @@ static int sync_file_set_fence(struct sync_file *sync_file,
return 0;
}
-static struct dma_fence **get_fences(struct sync_file *sync_file,
- int *num_fences)
-{
- if (dma_fence_is_array(sync_file->fence)) {
- struct dma_fence_array *array = to_dma_fence_array(sync_file->fence);
-
- *num_fences = array->num_fences;
- return array->fences;
- }
-
- *num_fences = 1;
- return &sync_file->fence;
-}
-
static void add_fence(struct dma_fence **fences,
int *i, struct dma_fence *fence)
{
@@ -210,86 +197,97 @@ static void add_fence(struct dma_fence **fences,
static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
struct sync_file *b)
{
+ struct dma_fence *a_fence, *b_fence, **fences;
+ struct dma_fence_unwrap a_iter, b_iter;
+ unsigned int index, num_fences;
struct sync_file *sync_file;
- struct dma_fence **fences = NULL, **nfences, **a_fences, **b_fences;
- int i = 0, i_a, i_b, num_fences, a_num_fences, b_num_fences;
sync_file = sync_file_alloc();
if (!sync_file)
return NULL;
- a_fences = get_fences(a, &a_num_fences);
- b_fences = get_fences(b, &b_num_fences);
- if (a_num_fences > INT_MAX - b_num_fences)
- goto err;
+ num_fences = 0;
+ dma_fence_unwrap_for_each(a_fence, &a_iter, a->fence)
+ ++num_fences;
+ dma_fence_unwrap_for_each(b_fence, &b_iter, b->fence)
+ ++num_fences;
- num_fences = a_num_fences + b_num_fences;
+ if (num_fences > INT_MAX)
+ goto err_free_sync_file;
fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL);
if (!fences)
- goto err;
+ goto err_free_sync_file;
/*
- * Assume sync_file a and b are both ordered and have no
- * duplicates with the same context.
+ * We can't guarantee that fences in both a and b are ordered, but it is
+ * still quite likely.
*
- * If a sync_file can only be created with sync_file_merge
- * and sync_file_create, this is a reasonable assumption.
+ * So attempt to order the fences as we pass over them and merge fences
+ * with the same context.
*/
- for (i_a = i_b = 0; i_a < a_num_fences && i_b < b_num_fences; ) {
- struct dma_fence *pt_a = a_fences[i_a];
- struct dma_fence *pt_b = b_fences[i_b];
- if (pt_a->context < pt_b->context) {
- add_fence(fences, &i, pt_a);
+ index = 0;
+ for (a_fence = dma_fence_unwrap_first(a->fence, &a_iter),
+ b_fence = dma_fence_unwrap_first(b->fence, &b_iter);
+ a_fence || b_fence; ) {
+
+ if (!b_fence) {
+ add_fence(fences, &index, a_fence);
+ a_fence = dma_fence_unwrap_next(&a_iter);
+
+ } else if (!a_fence) {
+ add_fence(fences, &index, b_fence);
+ b_fence = dma_fence_unwrap_next(&b_iter);
+
+ } else if (a_fence->context < b_fence->context) {
+ add_fence(fences, &index, a_fence);
+ a_fence = dma_fence_unwrap_next(&a_iter);
- i_a++;
- } else if (pt_a->context > pt_b->context) {
- add_fence(fences, &i, pt_b);
+ } else if (b_fence->context < a_fence->context) {
+ add_fence(fences, &index, b_fence);
+ b_fence = dma_fence_unwrap_next(&b_iter);
+
+ } else if (__dma_fence_is_later(a_fence->seqno, b_fence->seqno,
+ a_fence->ops)) {
+ add_fence(fences, &index, a_fence);
+ a_fence = dma_fence_unwrap_next(&a_iter);
+ b_fence = dma_fence_unwrap_next(&b_iter);
- i_b++;
} else {
- if (__dma_fence_is_later(pt_a->seqno, pt_b->seqno,
- pt_a->ops))
- add_fence(fences, &i, pt_a);
- else
- add_fence(fences, &i, pt_b);
-
- i_a++;
- i_b++;
+ add_fence(fences, &index, b_fence);
+ a_fence = dma_fence_unwrap_next(&a_iter);
+ b_fence = dma_fence_unwrap_next(&b_iter);
}
}
- for (; i_a < a_num_fences; i_a++)
- add_fence(fences, &i, a_fences[i_a]);
-
- for (; i_b < b_num_fences; i_b++)
- add_fence(fences, &i, b_fences[i_b]);
-
- if (i == 0)
- fences[i++] = dma_fence_get(a_fences[0]);
+ if (index == 0)
+ fences[index++] = dma_fence_get_stub();
- if (num_fences > i) {
- nfences = krealloc_array(fences, i, sizeof(*fences), GFP_KERNEL);
- if (!nfences)
- goto err;
+ if (num_fences > index) {
+ struct dma_fence **tmp;
- fences = nfences;
+ /* Keep going even when reducing the size failed */
+ tmp = krealloc_array(fences, index, sizeof(*fences),
+ GFP_KERNEL);
+ if (tmp)
+ fences = tmp;
}
- if (sync_file_set_fence(sync_file, fences, i) < 0)
- goto err;
+ if (sync_file_set_fence(sync_file, fences, index) < 0)
+ goto err_put_fences;
strlcpy(sync_file->user_name, name, sizeof(sync_file->user_name));
return sync_file;
-err:
- while (i)
- dma_fence_put(fences[--i]);
+err_put_fences:
+ while (index)
+ dma_fence_put(fences[--index]);
kfree(fences);
+
+err_free_sync_file:
fput(sync_file->file);
return NULL;
-
}
static int sync_file_release(struct inode *inode, struct file *file)
@@ -398,11 +396,13 @@ static int sync_fill_fence_info(struct dma_fence *fence,
static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
unsigned long arg)
{
- struct sync_file_info info;
struct sync_fence_info *fence_info = NULL;
- struct dma_fence **fences;
+ struct dma_fence_unwrap iter;
+ struct sync_file_info info;
+ unsigned int num_fences;
+ struct dma_fence *fence;
+ int ret;
__u32 size;
- int num_fences, ret, i;
if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
return -EFAULT;
@@ -410,7 +410,9 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
if (info.flags || info.pad)
return -EINVAL;
- fences = get_fences(sync_file, &num_fences);
+ num_fences = 0;
+ dma_fence_unwrap_for_each(fence, &iter, sync_file->fence)
+ ++num_fences;
/*
* Passing num_fences = 0 means that userspace doesn't want to
@@ -433,8 +435,11 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
if (!fence_info)
return -ENOMEM;
- for (i = 0; i < num_fences; i++) {
- int status = sync_fill_fence_info(fences[i], &fence_info[i]);
+ num_fences = 0;
+ dma_fence_unwrap_for_each(fence, &iter, sync_file->fence) {
+ int status;
+
+ status = sync_fill_fence_info(fence, &fence_info[num_fences++]);
info.status = info.status <= 0 ? info.status : status;
}
diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
index cf6fed6dec77..45600acc0f45 100644
--- a/drivers/firmware/arm_scmi/clock.c
+++ b/drivers/firmware/arm_scmi/clock.c
@@ -49,7 +49,7 @@ struct scmi_msg_resp_clock_describe_rates {
struct {
__le32 value_low;
__le32 value_high;
- } rate[0];
+ } rate[];
#define RATE_TO_U64(X) \
({ \
typeof(X) x = (X); \
@@ -210,7 +210,8 @@ scmi_clock_describe_rates_get(const struct scmi_protocol_handle *ph, u32 clk_id,
if (rate_discrete && rate) {
clk->list.num_rates = tot_rate_cnt;
- sort(rate, tot_rate_cnt, sizeof(*rate), rate_cmp_func, NULL);
+ sort(clk->list.rates, tot_rate_cnt, sizeof(*rate),
+ rate_cmp_func, NULL);
}
clk->rate_discrete = rate_discrete;
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 46118300a4d1..e17c6568344d 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -679,7 +679,8 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo,
xfer = scmi_xfer_command_acquire(cinfo, msg_hdr);
if (IS_ERR(xfer)) {
- scmi_clear_channel(info, cinfo);
+ if (MSG_XTRACT_TYPE(msg_hdr) == MSG_TYPE_DELAYED_RESP)
+ scmi_clear_channel(info, cinfo);
return;
}
diff --git a/drivers/firmware/arm_scmi/optee.c b/drivers/firmware/arm_scmi/optee.c
index 734f1eeee161..8302a2b4aeeb 100644
--- a/drivers/firmware/arm_scmi/optee.c
+++ b/drivers/firmware/arm_scmi/optee.c
@@ -405,8 +405,8 @@ static int scmi_optee_chan_free(int id, void *p, void *data)
return 0;
}
-static struct scmi_shared_mem *get_channel_shm(struct scmi_optee_channel *chan,
- struct scmi_xfer *xfer)
+static struct scmi_shared_mem __iomem *
+get_channel_shm(struct scmi_optee_channel *chan, struct scmi_xfer *xfer)
{
if (!chan)
return NULL;
@@ -419,7 +419,7 @@ static int scmi_optee_send_message(struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer)
{
struct scmi_optee_channel *channel = cinfo->transport_info;
- struct scmi_shared_mem *shmem = get_channel_shm(channel, xfer);
+ struct scmi_shared_mem __iomem *shmem = get_channel_shm(channel, xfer);
int ret;
mutex_lock(&channel->mu);
@@ -436,7 +436,7 @@ static void scmi_optee_fetch_response(struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer)
{
struct scmi_optee_channel *channel = cinfo->transport_info;
- struct scmi_shared_mem *shmem = get_channel_shm(channel, xfer);
+ struct scmi_shared_mem __iomem *shmem = get_channel_shm(channel, xfer);
shmem_fetch_response(shmem, xfer);
}
diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c
index 8e5d87984a48..41c31b10ae84 100644
--- a/drivers/gpio/gpio-sim.c
+++ b/drivers/gpio/gpio-sim.c
@@ -134,7 +134,7 @@ static int gpio_sim_get_multiple(struct gpio_chip *gc,
struct gpio_sim_chip *chip = gpiochip_get_data(gc);
mutex_lock(&chip->lock);
- bitmap_copy(bits, chip->value_map, gc->ngpio);
+ bitmap_replace(bits, bits, chip->value_map, mask, gc->ngpio);
mutex_unlock(&chip->lock);
return 0;
@@ -146,7 +146,7 @@ static void gpio_sim_set_multiple(struct gpio_chip *gc,
struct gpio_sim_chip *chip = gpiochip_get_data(gc);
mutex_lock(&chip->lock);
- bitmap_copy(chip->value_map, bits, gc->ngpio);
+ bitmap_replace(chip->value_map, chip->value_map, bits, mask, gc->ngpio);
mutex_unlock(&chip->lock);
}
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index a5495ad31c9c..c2523ac26fac 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -108,7 +108,7 @@ static int acpi_gpiochip_find(struct gpio_chip *gc, void *data)
* controller does not have GPIO chip registered at the moment. This is to
* support probe deferral.
*/
-static struct gpio_desc *acpi_get_gpiod(char *path, int pin)
+static struct gpio_desc *acpi_get_gpiod(char *path, unsigned int pin)
{
struct gpio_chip *chip;
acpi_handle handle;
@@ -136,7 +136,7 @@ static struct gpio_desc *acpi_get_gpiod(char *path, int pin)
* as it is intended for use outside of the GPIO layer (in a similar fashion to
* gpiod_get_index() for example) it also holds a reference to the GPIO device.
*/
-struct gpio_desc *acpi_get_and_request_gpiod(char *path, int pin, char *label)
+struct gpio_desc *acpi_get_and_request_gpiod(char *path, unsigned int pin, char *label)
{
struct gpio_desc *gpio;
int ret;
@@ -317,11 +317,12 @@ static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip,
return desc;
}
-static bool acpi_gpio_in_ignore_list(const char *controller_in, int pin_in)
+static bool acpi_gpio_in_ignore_list(const char *controller_in, unsigned int pin_in)
{
const char *controller, *pin_str;
- int len, pin;
+ unsigned int pin;
char *endp;
+ int len;
controller = ignore_wake;
while (controller) {
@@ -354,13 +355,13 @@ err:
static bool acpi_gpio_irq_is_wake(struct device *parent,
struct acpi_resource_gpio *agpio)
{
- int pin = agpio->pin_table[0];
+ unsigned int pin = agpio->pin_table[0];
if (agpio->wake_capable != ACPI_WAKE_CAPABLE)
return false;
if (acpi_gpio_in_ignore_list(dev_name(parent), pin)) {
- dev_info(parent, "Ignoring wakeup on pin %d\n", pin);
+ dev_info(parent, "Ignoring wakeup on pin %u\n", pin);
return false;
}
@@ -378,7 +379,8 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
struct acpi_gpio_event *event;
irq_handler_t handler = NULL;
struct gpio_desc *desc;
- int ret, pin, irq;
+ unsigned int pin;
+ int ret, irq;
if (!acpi_gpio_get_irq_resource(ares, &agpio))
return AE_OK;
@@ -387,8 +389,8 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
pin = agpio->pin_table[0];
if (pin <= 255) {
- char ev_name[5];
- sprintf(ev_name, "_%c%02hhX",
+ char ev_name[8];
+ sprintf(ev_name, "_%c%02X",
agpio->triggering == ACPI_EDGE_SENSITIVE ? 'E' : 'L',
pin);
if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle)))
@@ -1098,7 +1100,7 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
length = min_t(u16, agpio->pin_table_length, pin_index + bits);
for (i = pin_index; i < length; ++i) {
- int pin = agpio->pin_table[i];
+ unsigned int pin = agpio->pin_table[i];
struct acpi_gpio_connection *conn;
struct gpio_desc *desc;
bool found;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index e59884cc12a7..085348e08986 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1404,6 +1404,16 @@ static int gpiochip_to_irq(struct gpio_chip *gc, unsigned int offset)
{
struct irq_domain *domain = gc->irq.domain;
+#ifdef CONFIG_GPIOLIB_IRQCHIP
+ /*
+ * Avoid race condition with other code, which tries to lookup
+ * an IRQ before the irqchip has been properly registered,
+ * i.e. while gpiochip is still being brought up.
+ */
+ if (!gc->irq.initialized)
+ return -EPROBE_DEFER;
+#endif
+
if (!gpiochip_irqchip_irq_valid(gc, offset))
return -ENXIO;
@@ -1593,6 +1603,15 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc,
acpi_gpiochip_request_interrupts(gc);
+ /*
+ * Using barrier() here to prevent compiler from reordering
+ * gc->irq.initialized before initialization of above
+ * GPIO chip irq members.
+ */
+ barrier();
+
+ gc->irq.initialized = true;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/ObjectID.h b/drivers/gpu/drm/amd/amdgpu/ObjectID.h
index 5b393622f592..a0f0a17e224f 100644
--- a/drivers/gpu/drm/amd/amdgpu/ObjectID.h
+++ b/drivers/gpu/drm/amd/amdgpu/ObjectID.h
@@ -119,6 +119,7 @@
#define CONNECTOR_OBJECT_ID_eDP 0x14
#define CONNECTOR_OBJECT_ID_MXM 0x15
#define CONNECTOR_OBJECT_ID_LVDS_eDP 0x16
+#define CONNECTOR_OBJECT_ID_USBC 0x17
/* deleted */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 3987ecb24ef4..49f734137f15 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -5733,7 +5733,7 @@ void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
#ifdef CONFIG_X86_64
- if (adev->flags & AMD_IS_APU)
+ if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
return;
#endif
if (adev->gmc.xgmi.connected_to_cpu)
@@ -5749,7 +5749,7 @@ void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
#ifdef CONFIG_X86_64
- if (adev->flags & AMD_IS_APU)
+ if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
return;
#endif
if (adev->gmc.xgmi.connected_to_cpu)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index bb1c025d9001..29e9419a914b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -680,7 +680,7 @@ MODULE_PARM_DESC(sched_policy,
* Maximum number of processes that HWS can schedule concurrently. The maximum is the
* number of VMIDs assigned to the HWS, which is also the default.
*/
-int hws_max_conc_proc = 8;
+int hws_max_conc_proc = -1;
module_param(hws_max_conc_proc, int, 0444);
MODULE_PARM_DESC(hws_max_conc_proc,
"Max # processes HWS can execute concurrently when sched_policy=0 (0 = no concurrency, #VMIDs for KFD = Maximum(default))");
@@ -2323,18 +2323,23 @@ static int amdgpu_pmops_suspend(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
- int r;
if (amdgpu_acpi_is_s0ix_active(adev))
adev->in_s0ix = true;
else
adev->in_s3 = true;
- r = amdgpu_device_suspend(drm_dev, true);
- if (r)
- return r;
+ return amdgpu_device_suspend(drm_dev, true);
+}
+
+static int amdgpu_pmops_suspend_noirq(struct device *dev)
+{
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+
if (!adev->in_s0ix)
- r = amdgpu_asic_reset(adev);
- return r;
+ return amdgpu_asic_reset(adev);
+
+ return 0;
}
static int amdgpu_pmops_resume(struct device *dev)
@@ -2575,6 +2580,7 @@ static const struct dev_pm_ops amdgpu_pm_ops = {
.prepare = amdgpu_pmops_prepare,
.complete = amdgpu_pmops_complete,
.suspend = amdgpu_pmops_suspend,
+ .suspend_noirq = amdgpu_pmops_suspend_noirq,
.resume = amdgpu_pmops_resume,
.freeze = amdgpu_pmops_freeze,
.thaw = amdgpu_pmops_thaw,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 8fe939976224..28a736c507bb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -266,7 +266,7 @@ static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
* adev->gfx.mec.num_pipe_per_mec
* adev->gfx.mec.num_queue_per_pipe;
- while (queue_bit-- >= 0) {
+ while (--queue_bit >= 0) {
if (test_bit(queue_bit, adev->gfx.mec.queue_bitmap))
continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index ca2cfb65f976..a66a0881a934 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -561,9 +561,15 @@ void amdgpu_gmc_noretry_set(struct amdgpu_device *adev)
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(9, 0, 1):
+ case IP_VERSION(9, 3, 0):
case IP_VERSION(9, 4, 0):
case IP_VERSION(9, 4, 1):
case IP_VERSION(9, 4, 2):
+ case IP_VERSION(10, 3, 3):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 6):
+ case IP_VERSION(10, 3, 7):
/*
* noretry = 0 will cause kfd page fault tests fail
* for some ASICs, so set default to 1 for these ASICs.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 25731719c627..940752488330 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -1284,6 +1284,7 @@ void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem,
*/
void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct dma_fence *fence = NULL;
struct amdgpu_bo *abo;
int r;
@@ -1303,7 +1304,8 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo);
if (bo->resource->mem_type != TTM_PL_VRAM ||
- !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE))
+ !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE) ||
+ adev->in_suspend || adev->shutdown)
return;
if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv)))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 5320bb0883d8..317d80209e95 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -300,8 +300,8 @@ void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
void amdgpu_ring_commit(struct amdgpu_ring *ring);
void amdgpu_ring_undo(struct amdgpu_ring *ring);
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
- unsigned int ring_size, struct amdgpu_irq_src *irq_src,
- unsigned int irq_type, unsigned int prio,
+ unsigned int max_dw, struct amdgpu_irq_src *irq_src,
+ unsigned int irq_type, unsigned int hw_prio,
atomic_t *sched_score);
void amdgpu_ring_fini(struct amdgpu_ring *ring);
void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index f99093f2ebc7..a0ee828a4a97 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -52,7 +52,7 @@
#define FIRMWARE_ALDEBARAN "amdgpu/aldebaran_vcn.bin"
#define FIRMWARE_BEIGE_GOBY "amdgpu/beige_goby_vcn.bin"
#define FIRMWARE_YELLOW_CARP "amdgpu/yellow_carp_vcn.bin"
-#define FIRMWARE_VCN_3_1_2 "amdgpu/vcn_3_1_2_vcn.bin"
+#define FIRMWARE_VCN_3_1_2 "amdgpu/vcn_3_1_2.bin"
MODULE_FIRMWARE(FIRMWARE_RAVEN);
MODULE_FIRMWARE(FIRMWARE_PICASSO);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index e2fde88aaf5e..f06fb7f882e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -159,6 +159,7 @@
#define AMDGPU_VCN_MULTI_QUEUE_FLAG (1 << 8)
#define AMDGPU_VCN_SW_RING_FLAG (1 << 9)
#define AMDGPU_VCN_FW_LOGGING_FLAG (1 << 10)
+#define AMDGPU_VCN_SMU_VERSION_INFO_FLAG (1 << 11)
#define AMDGPU_VCN_IB_FLAG_DECODE_BUFFER 0x00000001
#define AMDGPU_VCN_CMD_FLAG_MSG_BUFFER 0x00000001
@@ -279,6 +280,11 @@ struct amdgpu_fw_shared_fw_logging {
uint32_t size;
};
+struct amdgpu_fw_shared_smu_interface_info {
+ uint8_t smu_interface_type;
+ uint8_t padding[3];
+};
+
struct amdgpu_fw_shared {
uint32_t present_flag_0;
uint8_t pad[44];
@@ -287,6 +293,7 @@ struct amdgpu_fw_shared {
struct amdgpu_fw_shared_multi_queue multi_queue;
struct amdgpu_fw_shared_sw_ring sw_ring;
struct amdgpu_fw_shared_fw_logging fw_log;
+ struct amdgpu_fw_shared_smu_interface_info smu_interface_info;
};
struct amdgpu_vcn_fwlog {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index f4c6accd3226..9426e252d8aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -3293,7 +3293,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_3[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1807ff, 0x00000242),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff1ffff, 0x00000500),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Vangogh, 0x1ff1ffff, 0x00000500),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0x000000ff, 0x000000e4),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x32103210),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x32103210),
@@ -3429,7 +3429,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_6[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1807ff, 0x00000042),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff1ffff, 0x00000500),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Vangogh, 0x1ff1ffff, 0x00000500),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0x000000ff, 0x00000044),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x32103210),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x32103210),
@@ -3454,7 +3454,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_7[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1807ff, 0x00000041),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff1ffff, 0x00000500),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Vangogh, 0x1ff1ffff, 0x00000500),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0x000000ff, 0x000000e4),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x32103210),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x32103210),
@@ -7689,6 +7689,7 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev)
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(10, 3, 1):
case IP_VERSION(10, 3, 3):
+ case IP_VERSION(10, 3, 7):
preempt_disable();
clock_hi = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh);
clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 46d4bf27ebbb..b8cfcc6b1125 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1205,6 +1205,8 @@ static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = {
{ 0x1002, 0x15dd, 0x103c, 0x83e7, 0xd3 },
/* GFXOFF is unstable on C6 parts with a VBIOS 113-RAVEN-114 */
{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc6 },
+ /* Apple MacBook Pro (15-inch, 2019) Radeon Pro Vega 20 4 GB */
+ { 0x1002, 0x69af, 0x106b, 0x019a, 0xc0 },
{ 0, 0, 0, 0, 0 },
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 3c1d440824a7..5228421b0f72 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -814,7 +814,7 @@ static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
#ifdef CONFIG_X86_64
- if (adev->flags & AMD_IS_APU) {
+ if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) {
adev->gmc.aper_base = adev->gfxhub.funcs->get_mc_fb_offset(adev);
adev->gmc.aper_size = adev->gmc.real_vram_size;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 344d819b4c1b..979da6f510e8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -381,8 +381,9 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
#ifdef CONFIG_X86_64
- if (adev->flags & AMD_IS_APU &&
- adev->gmc.real_vram_size > adev->gmc.aper_size) {
+ if ((adev->flags & AMD_IS_APU) &&
+ adev->gmc.real_vram_size > adev->gmc.aper_size &&
+ !amdgpu_passthrough(adev)) {
adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
adev->gmc.aper_size = adev->gmc.real_vram_size;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index ca9841d5669f..1932a3e4af7e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -581,7 +581,7 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
#ifdef CONFIG_X86_64
- if (adev->flags & AMD_IS_APU) {
+ if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) {
adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
adev->gmc.aper_size = adev->gmc.real_vram_size;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 431742eb7811..6009fbfdcc19 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -1456,7 +1456,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
*/
/* check whether both host-gpu and gpu-gpu xgmi links exist */
- if ((adev->flags & AMD_IS_APU) ||
+ if (((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) ||
(adev->gmc.xgmi.supported &&
adev->gmc.xgmi.connected_to_cpu)) {
adev->gmc.aper_base =
@@ -1721,7 +1721,7 @@ static int gmc_v9_0_sw_fini(void *handle)
amdgpu_gem_force_release(adev);
amdgpu_vm_manager_fini(adev);
amdgpu_gart_table_vram_free(adev);
- amdgpu_bo_unref(&adev->gmc.pdb0_bo);
+ amdgpu_bo_free_kernel(&adev->gmc.pdb0_bo, NULL, &adev->gmc.ptr_pdb0);
amdgpu_bo_fini(adev);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index dff54190b96c..f0fbcda76f5e 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -24,6 +24,7 @@
#include <linux/firmware.h>
#include "amdgpu.h"
+#include "amdgpu_cs.h"
#include "amdgpu_vcn.h"
#include "amdgpu_pm.h"
#include "soc15.h"
@@ -1900,6 +1901,75 @@ static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
.set_powergating_state = vcn_v1_0_set_powergating_state,
};
+/*
+ * It is a hardware issue that VCN can't handle a GTT TMZ buffer on
+ * CHIP_RAVEN series ASIC. Move such a GTT TMZ buffer to VRAM domain
+ * before command submission as a workaround.
+ */
+static int vcn_v1_0_validate_bo(struct amdgpu_cs_parser *parser,
+ struct amdgpu_job *job,
+ uint64_t addr)
+{
+ struct ttm_operation_ctx ctx = { false, false };
+ struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
+ struct amdgpu_vm *vm = &fpriv->vm;
+ struct amdgpu_bo_va_mapping *mapping;
+ struct amdgpu_bo *bo;
+ int r;
+
+ addr &= AMDGPU_GMC_HOLE_MASK;
+ if (addr & 0x7) {
+ DRM_ERROR("VCN messages must be 8 byte aligned!\n");
+ return -EINVAL;
+ }
+
+ mapping = amdgpu_vm_bo_lookup_mapping(vm, addr/AMDGPU_GPU_PAGE_SIZE);
+ if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
+ return -EINVAL;
+
+ bo = mapping->bo_va->base.bo;
+ if (!(bo->flags & AMDGPU_GEM_CREATE_ENCRYPTED))
+ return 0;
+
+ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (r) {
+ DRM_ERROR("Failed to validate the VCN message BO (%d)!\n", r);
+ return r;
+ }
+
+ return r;
+}
+
+static int vcn_v1_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib)
+{
+ uint32_t msg_lo = 0, msg_hi = 0;
+ int i, r;
+
+ if (!(ib->flags & AMDGPU_IB_FLAGS_SECURE))
+ return 0;
+
+ for (i = 0; i < ib->length_dw; i += 2) {
+ uint32_t reg = amdgpu_ib_get_value(ib, i);
+ uint32_t val = amdgpu_ib_get_value(ib, i + 1);
+
+ if (reg == PACKET0(p->adev->vcn.internal.data0, 0)) {
+ msg_lo = val;
+ } else if (reg == PACKET0(p->adev->vcn.internal.data1, 0)) {
+ msg_hi = val;
+ } else if (reg == PACKET0(p->adev->vcn.internal.cmd, 0)) {
+ r = vcn_v1_0_validate_bo(p, job,
+ ((u64)msg_hi) << 32 | msg_lo);
+ if (r)
+ return r;
+ }
+ }
+
+ return 0;
+}
+
static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_DEC,
.align_mask = 0xf,
@@ -1910,6 +1980,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
.get_rptr = vcn_v1_0_dec_ring_get_rptr,
.get_wptr = vcn_v1_0_dec_ring_get_wptr,
.set_wptr = vcn_v1_0_dec_ring_set_wptr,
+ .patch_cs_in_place = vcn_v1_0_ring_patch_cs_in_place,
.emit_frame_size =
6 + 6 + /* hdp invalidate / flush */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index c87263ed20ec..cb5f0a12333f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -219,6 +219,11 @@ static int vcn_v3_0_sw_init(void *handle)
cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG) |
cpu_to_le32(AMDGPU_VCN_FW_SHARED_FLAG_0_RB);
fw_shared->sw_ring.is_enabled = cpu_to_le32(DEC_SW_RING_ENABLED);
+ fw_shared->present_flag_0 |= AMDGPU_VCN_SMU_VERSION_INFO_FLAG;
+ if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(3, 1, 2))
+ fw_shared->smu_interface_info.smu_interface_type = 2;
+ else if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(3, 1, 1))
+ fw_shared->smu_interface_info.smu_interface_type = 1;
if (amdgpu_vcnfw_log)
amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
@@ -575,8 +580,8 @@ static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx
AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
/* VCN global tiling registers */
- WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
- UVD, 0, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
+ WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
+ UVD, inst_idx, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
}
static void vcn_v3_0_disable_static_power_gating(struct amdgpu_device *adev, int inst)
@@ -1480,8 +1485,11 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device *adev)
static int vcn_v3_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
{
+ struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
uint32_t tmp;
+ vcn_v3_0_pause_dpg_mode(adev, inst_idx, &state);
+
/* Wait for power status to be 1 */
SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 339e12c94cff..62aa6c9d5123 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -483,15 +483,10 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
}
/* Verify module parameters regarding mapped process number*/
- if ((hws_max_conc_proc < 0)
- || (hws_max_conc_proc > kfd->vm_info.vmid_num_kfd)) {
- dev_err(kfd_device,
- "hws_max_conc_proc %d must be between 0 and %d, use %d instead\n",
- hws_max_conc_proc, kfd->vm_info.vmid_num_kfd,
- kfd->vm_info.vmid_num_kfd);
+ if (hws_max_conc_proc >= 0)
+ kfd->max_proc_per_quantum = min((u32)hws_max_conc_proc, kfd->vm_info.vmid_num_kfd);
+ else
kfd->max_proc_per_quantum = kfd->vm_info.vmid_num_kfd;
- } else
- kfd->max_proc_per_quantum = hws_max_conc_proc;
/* calculate max size of mqds needed for queues */
size = max_num_of_queues_per_device *
@@ -536,7 +531,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
goto kfd_doorbell_error;
}
- kfd->hive_id = kfd->adev->gmc.xgmi.hive_id;
+ if (amdgpu_use_xgmi_p2p)
+ kfd->hive_id = kfd->adev->gmc.xgmi.hive_id;
kfd->noretry = kfd->adev->gmc.noretry;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index deecccebe5b6..64f4a51cc880 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -749,6 +749,8 @@ static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events)
event_waiters = kmalloc_array(num_events,
sizeof(struct kfd_event_waiter),
GFP_KERNEL);
+ if (!event_waiters)
+ return NULL;
for (i = 0; (event_waiters) && (i < num_events) ; i++) {
init_wait(&event_waiters[i].wait);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
index e4beebb1c80a..f2e1d506ba21 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
@@ -247,15 +247,6 @@ int kfd_smi_event_open(struct kfd_dev *dev, uint32_t *fd)
return ret;
}
- ret = anon_inode_getfd(kfd_smi_name, &kfd_smi_ev_fops, (void *)client,
- O_RDWR);
- if (ret < 0) {
- kfifo_free(&client->fifo);
- kfree(client);
- return ret;
- }
- *fd = ret;
-
init_waitqueue_head(&client->wait_queue);
spin_lock_init(&client->lock);
client->events = 0;
@@ -265,5 +256,20 @@ int kfd_smi_event_open(struct kfd_dev *dev, uint32_t *fd)
list_add_rcu(&client->list, &dev->smi_clients);
spin_unlock(&dev->smi_lock);
+ ret = anon_inode_getfd(kfd_smi_name, &kfd_smi_ev_fops, (void *)client,
+ O_RDWR);
+ if (ret < 0) {
+ spin_lock(&dev->smi_lock);
+ list_del_rcu(&client->list);
+ spin_unlock(&dev->smi_lock);
+
+ synchronize_rcu();
+
+ kfifo_free(&client->fifo);
+ kfree(client);
+ return ret;
+ }
+ *fd = ret;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index b30656959fd8..62139ff35476 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2714,7 +2714,8 @@ static int dm_resume(void *handle)
* this is the case when traversing through already created
* MST connectors, should be skipped
*/
- if (aconnector->mst_port)
+ if (aconnector->dc_link &&
+ aconnector->dc_link->type == dc_connection_mst_branch)
continue;
mutex_lock(&aconnector->hpd_lock);
@@ -3972,7 +3973,7 @@ static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *cap
max - min);
}
-static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
+static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
int bl_idx,
u32 user_brightness)
{
@@ -4003,7 +4004,8 @@ static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
}
- return rc ? 0 : 1;
+ if (rc)
+ dm->actual_brightness[bl_idx] = user_brightness;
}
static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
@@ -9947,7 +9949,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
/* restore the backlight level */
for (i = 0; i < dm->num_of_edps; i++) {
if (dm->backlight_dev[i] &&
- (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i]))
+ (dm->actual_brightness[i] != dm->brightness[i]))
amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
}
#endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 6a908d736d6a..7e44b0429448 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -540,6 +540,12 @@ struct amdgpu_display_manager {
* cached backlight values.
*/
u32 brightness[AMDGPU_DM_MAX_NUM_EDP];
+ /**
+ * @actual_brightness:
+ *
+ * last successfully applied backlight values.
+ */
+ u32 actual_brightness[AMDGPU_DM_MAX_NUM_EDP];
};
enum dsc_clock_force_state {
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
index dfba6138f538..26feefbb8990 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
@@ -374,7 +374,7 @@ void dce_clock_read_ss_info(struct clk_mgr_internal *clk_mgr_dce)
clk_mgr_dce->dprefclk_ss_percentage =
info.spread_spectrum_percentage;
}
- if (clk_mgr_dce->base.ctx->dc->debug.ignore_dpref_ss)
+ if (clk_mgr_dce->base.ctx->dc->config.ignore_dpref_ss)
clk_mgr_dce->dprefclk_ss_percentage = 0;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
index edda572dc570..8be4c1970628 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
@@ -436,57 +436,84 @@ static void dcn315_clk_mgr_helper_populate_bw_params(
struct integrated_info *bios_info,
const DpmClocks_315_t *clock_table)
{
- int i, j;
+ int i;
struct clk_bw_params *bw_params = clk_mgr->base.bw_params;
- uint32_t max_dispclk = 0, max_dppclk = 0;
-
- j = -1;
-
- ASSERT(NUM_DF_PSTATE_LEVELS <= MAX_NUM_DPM_LVL);
-
- /* Find lowest DPM, FCLK is filled in reverse order*/
-
- for (i = NUM_DF_PSTATE_LEVELS - 1; i >= 0; i--) {
- if (clock_table->DfPstateTable[i].FClk != 0) {
- j = i;
- break;
+ uint32_t max_dispclk, max_dppclk, max_pstate, max_socclk, max_fclk = 0, min_pstate = 0;
+ struct clk_limit_table_entry def_max = bw_params->clk_table.entries[bw_params->clk_table.num_entries - 1];
+
+ max_dispclk = find_max_clk_value(clock_table->DispClocks, clock_table->NumDispClkLevelsEnabled);
+ max_dppclk = find_max_clk_value(clock_table->DppClocks, clock_table->NumDispClkLevelsEnabled);
+ max_socclk = find_max_clk_value(clock_table->SocClocks, clock_table->NumSocClkLevelsEnabled);
+
+ /* Find highest fclk pstate */
+ for (i = 0; i < clock_table->NumDfPstatesEnabled; i++) {
+ if (clock_table->DfPstateTable[i].FClk > max_fclk) {
+ max_fclk = clock_table->DfPstateTable[i].FClk;
+ max_pstate = i;
}
}
- if (j == -1) {
- /* clock table is all 0s, just use our own hardcode */
- ASSERT(0);
- return;
- }
-
- bw_params->clk_table.num_entries = j + 1;
-
- /* dispclk and dppclk can be max at any voltage, same number of levels for both */
- if (clock_table->NumDispClkLevelsEnabled <= NUM_DISPCLK_DPM_LEVELS &&
- clock_table->NumDispClkLevelsEnabled <= NUM_DPPCLK_DPM_LEVELS) {
- max_dispclk = find_max_clk_value(clock_table->DispClocks, clock_table->NumDispClkLevelsEnabled);
- max_dppclk = find_max_clk_value(clock_table->DppClocks, clock_table->NumDispClkLevelsEnabled);
- } else {
- ASSERT(0);
- }
+ /* For 315 we want to base clock table on dcfclk, need at least one entry regardless of pmfw table */
+ for (i = 0; i < clock_table->NumDcfClkLevelsEnabled; i++) {
+ int j;
+ uint32_t min_fclk = clock_table->DfPstateTable[0].FClk;
- for (i = 0; i < bw_params->clk_table.num_entries; i++, j--) {
- int temp;
+ for (j = 1; j < clock_table->NumDfPstatesEnabled; j++) {
+ if (clock_table->DfPstateTable[j].Voltage <= clock_table->SocVoltage[i]
+ && clock_table->DfPstateTable[j].FClk < min_fclk) {
+ min_fclk = clock_table->DfPstateTable[j].FClk;
+ min_pstate = j;
+ }
+ }
- bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[j].FClk;
- bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].MemClk;
- bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].Voltage;
+ bw_params->clk_table.entries[i].fclk_mhz = min_fclk;
+ bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[min_pstate].MemClk;
+ bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[min_pstate].Voltage;
+ bw_params->clk_table.entries[i].dcfclk_mhz = clock_table->DcfClocks[i];
+ bw_params->clk_table.entries[i].socclk_mhz = clock_table->SocClocks[i];
+ bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
+ bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
bw_params->clk_table.entries[i].wck_ratio = 1;
- temp = find_clk_for_voltage(clock_table, clock_table->DcfClocks, clock_table->DfPstateTable[j].Voltage);
- if (temp)
- bw_params->clk_table.entries[i].dcfclk_mhz = temp;
- temp = find_clk_for_voltage(clock_table, clock_table->SocClocks, clock_table->DfPstateTable[j].Voltage);
- if (temp)
- bw_params->clk_table.entries[i].socclk_mhz = temp;
+ };
+
+ /* Make sure to include at least one entry and highest pstate */
+ if (max_pstate != min_pstate) {
+ bw_params->clk_table.entries[i].fclk_mhz = max_fclk;
+ bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[max_pstate].MemClk;
+ bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[max_pstate].Voltage;
+ bw_params->clk_table.entries[i].dcfclk_mhz = find_clk_for_voltage(
+ clock_table, clock_table->DcfClocks, clock_table->DfPstateTable[max_pstate].Voltage);
+ bw_params->clk_table.entries[i].socclk_mhz = find_clk_for_voltage(
+ clock_table, clock_table->SocClocks, clock_table->DfPstateTable[max_pstate].Voltage);
bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
+ bw_params->clk_table.entries[i].wck_ratio = 1;
+ i++;
}
+ bw_params->clk_table.num_entries = i;
+
+ /* Include highest socclk */
+ if (bw_params->clk_table.entries[i-1].socclk_mhz < max_socclk)
+ bw_params->clk_table.entries[i-1].socclk_mhz = max_socclk;
+ /* Set any 0 clocks to max default setting. Not an issue for
+ * power since we aren't doing switching in such case anyway
+ */
+ for (i = 0; i < bw_params->clk_table.num_entries; i++) {
+ if (!bw_params->clk_table.entries[i].fclk_mhz) {
+ bw_params->clk_table.entries[i].fclk_mhz = def_max.fclk_mhz;
+ bw_params->clk_table.entries[i].memclk_mhz = def_max.memclk_mhz;
+ bw_params->clk_table.entries[i].voltage = def_max.voltage;
+ }
+ if (!bw_params->clk_table.entries[i].dcfclk_mhz)
+ bw_params->clk_table.entries[i].dcfclk_mhz = def_max.dcfclk_mhz;
+ if (!bw_params->clk_table.entries[i].socclk_mhz)
+ bw_params->clk_table.entries[i].socclk_mhz = def_max.socclk_mhz;
+ if (!bw_params->clk_table.entries[i].dispclk_mhz)
+ bw_params->clk_table.entries[i].dispclk_mhz = def_max.dispclk_mhz;
+ if (!bw_params->clk_table.entries[i].dppclk_mhz)
+ bw_params->clk_table.entries[i].dppclk_mhz = def_max.dppclk_mhz;
+ }
bw_params->vram_type = bios_info->memory_type;
bw_params->num_channels = bios_info->ma_channel_number;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
index 880ffea2afc6..2600313fea57 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
@@ -80,8 +80,8 @@ static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D
#define VBIOSSMC_MSG_SetDppclkFreq 0x06 ///< Set DPP clock frequency in MHZ
#define VBIOSSMC_MSG_SetHardMinDcfclkByFreq 0x07 ///< Set DCF clock frequency hard min in MHZ
#define VBIOSSMC_MSG_SetMinDeepSleepDcfclk 0x08 ///< Set DCF clock minimum frequency in deep sleep in MHZ
-#define VBIOSSMC_MSG_SetPhyclkVoltageByFreq 0x09 ///< Set display phy clock frequency in MHZ in case VMIN does not support phy frequency
-#define VBIOSSMC_MSG_GetFclkFrequency 0x0A ///< Get FCLK frequency, return frequemcy in MHZ
+#define VBIOSSMC_MSG_GetDtbclkFreq 0x09 ///< Get display dtb clock frequency in MHZ in case VMIN does not support phy frequency
+#define VBIOSSMC_MSG_SetDtbClk 0x0A ///< Set dtb clock frequency, return frequemcy in MHZ
#define VBIOSSMC_MSG_SetDisplayCount 0x0B ///< Inform PMFW of number of display connected
#define VBIOSSMC_MSG_EnableTmdp48MHzRefclkPwrDown 0x0C ///< To ask PMFW turn off TMDP 48MHz refclk during display off to save power
#define VBIOSSMC_MSG_UpdatePmeRestore 0x0D ///< To ask PMFW to write into Azalia for PME wake up event
@@ -324,15 +324,26 @@ int dcn315_smu_get_dpref_clk(struct clk_mgr_internal *clk_mgr)
return (dprefclk_get_mhz * 1000);
}
-int dcn315_smu_get_smu_fclk(struct clk_mgr_internal *clk_mgr)
+int dcn315_smu_get_dtbclk(struct clk_mgr_internal *clk_mgr)
{
int fclk_get_mhz = -1;
if (clk_mgr->smu_present) {
fclk_get_mhz = dcn315_smu_send_msg_with_param(
clk_mgr,
- VBIOSSMC_MSG_GetFclkFrequency,
+ VBIOSSMC_MSG_GetDtbclkFreq,
0);
}
return (fclk_get_mhz * 1000);
}
+
+void dcn315_smu_set_dtbclk(struct clk_mgr_internal *clk_mgr, bool enable)
+{
+ if (!clk_mgr->smu_present)
+ return;
+
+ dcn315_smu_send_msg_with_param(
+ clk_mgr,
+ VBIOSSMC_MSG_SetDtbClk,
+ enable);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.h
index 66fa42f8dd18..5aa3275ac7d8 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.h
@@ -37,6 +37,7 @@
#define NUM_SOC_VOLTAGE_LEVELS 4
#define NUM_DF_PSTATE_LEVELS 4
+
typedef struct {
uint16_t MinClock; // This is either DCFCLK or SOCCLK (in MHz)
uint16_t MaxClock; // This is either DCFCLK or SOCCLK (in MHz)
@@ -124,5 +125,6 @@ void dcn315_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr);
void dcn315_smu_request_voltage_via_phyclk(struct clk_mgr_internal *clk_mgr, int requested_phyclk_khz);
void dcn315_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr);
int dcn315_smu_get_dpref_clk(struct clk_mgr_internal *clk_mgr);
-int dcn315_smu_get_smu_fclk(struct clk_mgr_internal *clk_mgr);
+int dcn315_smu_get_dtbclk(struct clk_mgr_internal *clk_mgr);
+void dcn315_smu_set_dtbclk(struct clk_mgr_internal *clk_mgr, bool enable);
#endif /* DAL_DC_315_SMU_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
index 702d00ce7da4..3121dd2d2a91 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
@@ -686,8 +686,8 @@ void dcn316_clk_mgr_construct(
clk_mgr->base.base.dprefclk_khz = dcn316_smu_get_dpref_clk(&clk_mgr->base);
clk_mgr->base.dccg->ref_dtbclk_khz = clk_mgr->base.base.dprefclk_khz;
dce_clock_read_ss_info(&clk_mgr->base);
- clk_mgr->base.dccg->ref_dtbclk_khz =
- dce_adjust_dp_ref_freq_for_ss(&clk_mgr->base, clk_mgr->base.base.dprefclk_khz);
+ /*clk_mgr->base.dccg->ref_dtbclk_khz =
+ dce_adjust_dp_ref_freq_for_ss(&clk_mgr->base, clk_mgr->base.base.dprefclk_khz);*/
clk_mgr->base.base.bw_params = &dcn316_bw_params;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index f6e19efea756..c436db416708 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -2389,6 +2389,8 @@ static enum surface_update_type check_update_surfaces_for_stream(
if (stream_update->mst_bw_update)
su_flags->bits.mst_bw = 1;
+ if (stream_update->crtc_timing_adjust && dc_extended_blank_supported(dc))
+ su_flags->bits.crtc_timing_adjust = 1;
if (su_flags->raw != 0)
overall_type = UPDATE_TYPE_FULL;
@@ -2650,6 +2652,9 @@ static void copy_stream_update_to_stream(struct dc *dc,
if (update->vrr_infopacket)
stream->vrr_infopacket = *update->vrr_infopacket;
+ if (update->crtc_timing_adjust)
+ stream->adjust = *update->crtc_timing_adjust;
+
if (update->dpms_off)
stream->dpms_off = *update->dpms_off;
@@ -4051,3 +4056,17 @@ void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bo
if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause)
pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst);
}
+/*
+ * dc_extended_blank_supported: Decide whether extended blank is supported
+ *
+ * Extended blank is a freesync optimization feature to be enabled in the future.
+ * During the extra vblank period gained from freesync, we have the ability to enter z9/z10.
+ *
+ * @param [in] dc: Current DC state
+ * @return: Indicate whether extended blank is supported (true or false)
+ */
+bool dc_extended_blank_supported(struct dc *dc)
+{
+ return dc->debug.extended_blank_optimization && !dc->debug.disable_z10
+ && dc->caps.zstate_support && dc->caps.is_apu;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index cb87dd643180..bbaa5abdf888 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -983,8 +983,7 @@ static bool should_verify_link_capability_destructively(struct dc_link *link,
destrictive = false;
}
}
- } else if (dc_is_hdmi_signal(link->local_sink->sink_signal))
- destrictive = true;
+ }
return destrictive;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 351081f574cb..22dabe596dfc 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -5216,6 +5216,62 @@ static void retrieve_cable_id(struct dc_link *link)
&link->dpcd_caps.cable_id, &usbc_cable_id);
}
+/* DPRX may take some time to respond to AUX messages after HPD asserted.
+ * If AUX read unsuccessful, try to wake unresponsive DPRX by toggling DPCD SET_POWER (0x600).
+ */
+static enum dc_status wa_try_to_wake_dprx(struct dc_link *link, uint64_t timeout_ms)
+{
+ enum dc_status status = DC_ERROR_UNEXPECTED;
+ uint8_t dpcd_data = 0;
+ uint64_t start_ts = 0;
+ uint64_t current_ts = 0;
+ uint64_t time_taken_ms = 0;
+ enum dc_connection_type type = dc_connection_none;
+
+ status = core_link_read_dpcd(
+ link,
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV,
+ &dpcd_data,
+ sizeof(dpcd_data));
+
+ if (status != DC_OK) {
+ DC_LOG_WARNING("%s: Read DPCD LTTPR_CAP failed - try to toggle DPCD SET_POWER for %lld ms.",
+ __func__,
+ timeout_ms);
+ start_ts = dm_get_timestamp(link->ctx);
+
+ do {
+ if (!dc_link_detect_sink(link, &type) || type == dc_connection_none)
+ break;
+
+ dpcd_data = DP_SET_POWER_D3;
+ status = core_link_write_dpcd(
+ link,
+ DP_SET_POWER,
+ &dpcd_data,
+ sizeof(dpcd_data));
+
+ dpcd_data = DP_SET_POWER_D0;
+ status = core_link_write_dpcd(
+ link,
+ DP_SET_POWER,
+ &dpcd_data,
+ sizeof(dpcd_data));
+
+ current_ts = dm_get_timestamp(link->ctx);
+ time_taken_ms = div_u64(dm_get_elapse_time_in_ns(link->ctx, current_ts, start_ts), 1000000);
+ } while (status != DC_OK && time_taken_ms < timeout_ms);
+
+ DC_LOG_WARNING("%s: DPCD SET_POWER %s after %lld ms%s",
+ __func__,
+ (status == DC_OK) ? "succeeded" : "failed",
+ time_taken_ms,
+ (type == dc_connection_none) ? ". Unplugged." : ".");
+ }
+
+ return status;
+}
+
static bool retrieve_link_cap(struct dc_link *link)
{
/* DP_ADAPTER_CAP - DP_DPCD_REV + 1 == 16 and also DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT + 1 == 16,
@@ -5251,6 +5307,15 @@ static bool retrieve_link_cap(struct dc_link *link)
dc_link_aux_try_to_configure_timeout(link->ddc,
LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD);
+ /* Try to ensure AUX channel active before proceeding. */
+ if (link->dc->debug.aux_wake_wa.bits.enable_wa) {
+ uint64_t timeout_ms = link->dc->debug.aux_wake_wa.bits.timeout_ms;
+
+ if (link->dc->debug.aux_wake_wa.bits.use_default_timeout)
+ timeout_ms = LINK_AUX_WAKE_TIMEOUT_MS;
+ status = wa_try_to_wake_dprx(link, timeout_ms);
+ }
+
is_lttpr_present = dp_retrieve_lttpr_cap(link);
/* Read DP tunneling information. */
status = dpcd_get_tunneling_device_data(link);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 7af153434e9e..d251c3f3a714 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1685,8 +1685,8 @@ bool dc_is_stream_unchanged(
if (old_stream->ignore_msa_timing_param != stream->ignore_msa_timing_param)
return false;
- // Only Have Audio left to check whether it is same or not. This is a corner case for Tiled sinks
- if (old_stream->audio_info.mode_count != stream->audio_info.mode_count)
+ /*compare audio info*/
+ if (memcmp(&old_stream->audio_info, &stream->audio_info, sizeof(stream->audio_info)) != 0)
return false;
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 4ffab7bb1098..9e79f60e6129 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -188,6 +188,7 @@ struct dc_caps {
bool psp_setup_panel_mode;
bool extended_aux_timeout_support;
bool dmcub_support;
+ bool zstate_support;
uint32_t num_of_internal_disp;
enum dp_protocol_version max_dp_protocol_version;
unsigned int mall_size_per_mem_channel;
@@ -339,6 +340,7 @@ struct dc_config {
bool is_asymmetric_memory;
bool is_single_rank_dimm;
bool use_pipe_ctx_sync_logic;
+ bool ignore_dpref_ss;
};
enum visual_confirm {
@@ -525,6 +527,22 @@ union dpia_debug_options {
uint32_t raw;
};
+/* AUX wake work around options
+ * 0: enable/disable work around
+ * 1: use default timeout LINK_AUX_WAKE_TIMEOUT_MS
+ * 15-2: reserved
+ * 31-16: timeout in ms
+ */
+union aux_wake_wa_options {
+ struct {
+ uint32_t enable_wa : 1;
+ uint32_t use_default_timeout : 1;
+ uint32_t rsvd: 14;
+ uint32_t timeout_ms : 16;
+ } bits;
+ uint32_t raw;
+};
+
struct dc_debug_data {
uint32_t ltFailCount;
uint32_t i2cErrorCount;
@@ -703,14 +721,15 @@ struct dc_debug_options {
bool enable_driver_sequence_debug;
enum det_size crb_alloc_policy;
int crb_alloc_policy_min_disp_count;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
bool disable_z10;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
bool enable_z9_disable_interface;
bool enable_sw_cntl_psr;
union dpia_debug_options dpia_debug;
#endif
bool apply_vendor_specific_lttpr_wa;
- bool ignore_dpref_ss;
+ bool extended_blank_optimization;
+ union aux_wake_wa_options aux_wake_wa;
uint8_t psr_power_use_phy_fsm;
};
@@ -1369,6 +1388,8 @@ struct dc_sink_init_data {
bool converter_disable_audio;
};
+bool dc_extended_blank_supported(struct dc *dc);
+
struct dc_sink *dc_sink_create(const struct dc_sink_init_data *init_params);
/* Newer interfaces */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 99a750f561f8..c4168c11257c 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -131,6 +131,7 @@ union stream_update_flags {
uint32_t wb_update:1;
uint32_t dsc_changed : 1;
uint32_t mst_bw : 1;
+ uint32_t crtc_timing_adjust : 1;
} bits;
uint32_t raw;
@@ -289,6 +290,7 @@ struct dc_stream_update {
struct dc_3dlut *lut3d_func;
struct test_pattern *pending_test_pattern;
+ struct dc_crtc_timing_adjust *crtc_timing_adjust;
};
bool dc_is_stream_unchanged(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index c3e141c19a77..83fbea2df410 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -1497,16 +1497,12 @@ void dcn10_init_hw(struct dc *dc)
link->link_status.link_active = true;
}
- /* Power gate DSCs */
- if (!is_optimized_init_done) {
- for (i = 0; i < res_pool->res_cap->num_dsc; i++)
- if (hws->funcs.dsc_pg_control != NULL)
- hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
- }
-
/* we want to turn off all dp displays before doing detection */
dc_link_blank_all_dp_displays(dc);
+ if (hws->funcs.enable_power_gating_plane)
+ hws->funcs.enable_power_gating_plane(dc->hwseq, true);
+
/* If taking control over from VBIOS, we may want to optimize our first
* mode set, so we need to skip powering down pipes until we know which
* pipes we want to use.
@@ -1559,8 +1555,6 @@ void dcn10_init_hw(struct dc *dc)
REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
}
- if (hws->funcs.enable_power_gating_plane)
- hws->funcs.enable_power_gating_plane(dc->hwseq, true);
if (dc->clk_mgr->funcs->notify_wm_ranges)
dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
@@ -2056,7 +2050,7 @@ static int dcn10_align_pixel_clocks(struct dc *dc, int group_size,
{
struct dc_context *dc_ctx = dc->ctx;
int i, master = -1, embedded = -1;
- struct dc_crtc_timing hw_crtc_timing[MAX_PIPES] = {0};
+ struct dc_crtc_timing *hw_crtc_timing;
uint64_t phase[MAX_PIPES];
uint64_t modulo[MAX_PIPES];
unsigned int pclk;
@@ -2067,6 +2061,10 @@ static int dcn10_align_pixel_clocks(struct dc *dc, int group_size,
uint32_t dp_ref_clk_100hz =
dc->res_pool->dp_clock_source->ctx->dc->clk_mgr->dprefclk_khz*10;
+ hw_crtc_timing = kcalloc(MAX_PIPES, sizeof(*hw_crtc_timing), GFP_KERNEL);
+ if (!hw_crtc_timing)
+ return master;
+
if (dc->config.vblank_alignment_dto_params &&
dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk) {
embedded_h_total =
@@ -2130,6 +2128,8 @@ static int dcn10_align_pixel_clocks(struct dc *dc, int group_size,
}
}
+
+ kfree(hw_crtc_timing);
return master;
}
@@ -2522,14 +2522,18 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
struct mpc *mpc = dc->res_pool->mpc;
struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
- if (per_pixel_alpha)
- blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
- else
- blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
-
blnd_cfg.overlap_only = false;
blnd_cfg.global_gain = 0xff;
+ if (per_pixel_alpha && pipe_ctx->plane_state->global_alpha) {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
+ blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
+ } else if (per_pixel_alpha) {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
+ } else {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
+ }
+
if (pipe_ctx->plane_state->global_alpha)
blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
else
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index ab910deed481..b627c41713cc 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1857,6 +1857,7 @@ void dcn20_optimize_bandwidth(
struct dc_state *context)
{
struct hubbub *hubbub = dc->res_pool->hubbub;
+ int i;
/* program dchubbub watermarks */
hubbub->funcs->program_watermarks(hubbub,
@@ -1873,6 +1874,17 @@ void dcn20_optimize_bandwidth(
dc->clk_mgr,
context,
true);
+ if (dc_extended_blank_supported(dc) && context->bw_ctx.bw.dcn.clk.zstate_support == DCN_ZSTATE_SUPPORT_ALLOW) {
+ for (i = 0; i < dc->res_pool->pipe_count; ++i) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream && pipe_ctx->plane_res.hubp->funcs->program_extended_blank
+ && pipe_ctx->stream->adjust.v_total_min == pipe_ctx->stream->adjust.v_total_max
+ && pipe_ctx->stream->adjust.v_total_max > pipe_ctx->stream->timing.v_total)
+ pipe_ctx->plane_res.hubp->funcs->program_extended_blank(pipe_ctx->plane_res.hubp,
+ pipe_ctx->dlg_regs.optimized_min_dst_y_next_start);
+ }
+ }
/* increase compbuf size */
if (hubbub->funcs->program_compbuf_size)
hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, true);
@@ -2332,14 +2344,18 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
struct mpc *mpc = dc->res_pool->mpc;
struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
- if (per_pixel_alpha)
- blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
- else
- blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
-
blnd_cfg.overlap_only = false;
blnd_cfg.global_gain = 0xff;
+ if (per_pixel_alpha && pipe_ctx->plane_state->global_alpha) {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
+ blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
+ } else if (per_pixel_alpha) {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
+ } else {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
+ }
+
if (pipe_ctx->plane_state->global_alpha)
blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
else
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index d473708d5399..7802d603f796 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -1976,7 +1976,6 @@ int dcn20_validate_apply_pipe_split_flags(
/*If need split for odm but 4 way split already*/
if (split[i] == 2 && ((pipe->prev_odm_pipe && !pipe->prev_odm_pipe->prev_odm_pipe)
|| !pipe->next_odm_pipe)) {
- ASSERT(0); /* NOT expected yet */
merge[i] = true; /* 4 -> 2 ODM */
} else if (split[i] == 0 && pipe->prev_odm_pipe) {
ASSERT(0); /* NOT expected yet */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index 612732656772..3fe4bfbb98a0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -644,7 +644,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.clock_trace = true,
.disable_pplib_clock_request = true,
.min_disp_clk_khz = 100000,
- .pipe_split_policy = MPC_SPLIT_DYNAMIC,
+ .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
index ed0a0e5fd805..f61ec8763844 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
@@ -547,6 +547,9 @@ void dcn30_init_hw(struct dc *dc)
/* we want to turn off all dp displays before doing detection */
dc_link_blank_all_dp_displays(dc);
+ if (hws->funcs.enable_power_gating_plane)
+ hws->funcs.enable_power_gating_plane(dc->hwseq, true);
+
/* If taking control over from VBIOS, we may want to optimize our first
* mode set, so we need to skip powering down pipes until we know which
* pipes we want to use.
@@ -624,8 +627,6 @@ void dcn30_init_hw(struct dc *dc)
REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
}
- if (hws->funcs.enable_power_gating_plane)
- hws->funcs.enable_power_gating_plane(dc->hwseq, true);
if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks)
dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
index 3e6d6ebd199e..51c5f3685470 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
@@ -1042,5 +1042,7 @@ void hubbub31_construct(struct dcn20_hubbub *hubbub31,
hubbub31->detile_buf_size = det_size_kb * 1024;
hubbub31->pixel_chunk_size = pixel_chunk_size_kb * 1024;
hubbub31->crb_size_segs = config_return_buffer_size_kb / DCN31_CRB_SEGMENT_SIZE_KB;
+
+ hubbub31->debug_test_index_pstate = 0x6;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c
index 53b792b997b7..8ae6117953ca 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c
@@ -54,6 +54,13 @@ void hubp31_soft_reset(struct hubp *hubp, bool reset)
REG_UPDATE(DCHUBP_CNTL, HUBP_SOFT_RESET, reset);
}
+void hubp31_program_extended_blank(struct hubp *hubp, unsigned int min_dst_y_next_start_optimized)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ REG_SET(BLANK_OFFSET_1, 0, MIN_DST_Y_NEXT_START, min_dst_y_next_start_optimized);
+}
+
static struct hubp_funcs dcn31_hubp_funcs = {
.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
.hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled,
@@ -80,6 +87,7 @@ static struct hubp_funcs dcn31_hubp_funcs = {
.set_unbounded_requesting = hubp31_set_unbounded_requesting,
.hubp_soft_reset = hubp31_soft_reset,
.hubp_in_blank = hubp1_in_blank,
+ .program_extended_blank = hubp31_program_extended_blank,
};
bool hubp31_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
index 4be228680909..631d8ac63aa4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
@@ -199,6 +199,9 @@ void dcn31_init_hw(struct dc *dc)
/* we want to turn off all dp displays before doing detection */
dc_link_blank_all_dp_displays(dc);
+ if (hws->funcs.enable_power_gating_plane)
+ hws->funcs.enable_power_gating_plane(dc->hwseq, true);
+
/* If taking control over from VBIOS, we may want to optimize our first
* mode set, so we need to skip powering down pipes until we know which
* pipes we want to use.
@@ -248,8 +251,6 @@ void dcn31_init_hw(struct dc *dc)
REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
}
- if (hws->funcs.enable_power_gating_plane)
- hws->funcs.enable_power_gating_plane(dc->hwseq, true);
if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks)
dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
@@ -338,20 +339,20 @@ void dcn31_enable_power_gating_plane(
bool enable)
{
bool force_on = true; /* disable power gating */
+ uint32_t org_ip_request_cntl = 0;
if (enable && !hws->ctx->dc->debug.disable_hubp_power_gate)
force_on = false;
+ REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
+ if (org_ip_request_cntl == 0)
+ REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
/* DCHUBP0/1/2/3/4/5 */
REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
- REG_WAIT(DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000);
REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
- REG_WAIT(DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000);
/* DPP0/1/2/3/4/5 */
REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
- REG_WAIT(DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000);
REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
- REG_WAIT(DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000);
force_on = true; /* disable power gating */
if (enable && !hws->ctx->dc->debug.disable_dsc_power_gate)
@@ -359,11 +360,11 @@ void dcn31_enable_power_gating_plane(
/* DCS0/1/2/3/4/5 */
REG_UPDATE(DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
- REG_WAIT(DOMAIN16_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000);
REG_UPDATE(DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
- REG_WAIT(DOMAIN17_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000);
REG_UPDATE(DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
- REG_WAIT(DOMAIN18_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000);
+
+ if (org_ip_request_cntl == 0)
+ REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0);
}
void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
index 8afe2130d7c5..e05527a3a8ba 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
@@ -124,7 +124,6 @@ static bool optc31_enable_crtc(struct timing_generator *optc)
static bool optc31_disable_crtc(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
-
/* disable otg request until end of the first line
* in the vertical blank region
*/
@@ -138,6 +137,7 @@ static bool optc31_disable_crtc(struct timing_generator *optc)
REG_WAIT(OTG_CLOCK_CONTROL,
OTG_BUSY, 0,
1, 100000);
+ optc1_clear_optc_underflow(optc);
return true;
}
@@ -158,6 +158,9 @@ static bool optc31_immediate_disable_crtc(struct timing_generator *optc)
OTG_BUSY, 0,
1, 100000);
+ /* clear the false state */
+ optc1_clear_optc_underflow(optc);
+
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index 89b7b6b7254a..63934ecf6be8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -2032,7 +2032,9 @@ bool dcn31_validate_bandwidth(struct dc *dc,
BW_VAL_TRACE_COUNT();
+ DC_FP_START();
out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate);
+ DC_FP_END();
// Disable fast_validate to set min dcfclk in alculate_wm_and_dlg
if (pipe_cnt == 0)
@@ -2232,6 +2234,7 @@ static bool dcn31_resource_construct(
dc->caps.extended_aux_timeout_support = true;
dc->caps.dmcub_support = true;
dc->caps.is_apu = true;
+ dc->caps.zstate_support = true;
/* Color pipeline capabilities */
dc->caps.color.dpp.dcn_arch = 1;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
index 2f6122153bdb..f93af45aeab4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
@@ -722,8 +722,10 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
{
int plane_count;
int i;
+ unsigned int optimized_min_dst_y_next_start_us;
plane_count = 0;
+ optimized_min_dst_y_next_start_us = 0;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (context->res_ctx.pipe_ctx[i].plane_state)
plane_count++;
@@ -744,11 +746,22 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
struct dc_link *link = context->streams[0]->sink->link;
struct dc_stream_status *stream_status = &context->stream_status[0];
+ if (dc_extended_blank_supported(dc)) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (context->res_ctx.pipe_ctx[i].stream == context->streams[0]
+ && context->res_ctx.pipe_ctx[i].stream->adjust.v_total_min == context->res_ctx.pipe_ctx[i].stream->adjust.v_total_max
+ && context->res_ctx.pipe_ctx[i].stream->adjust.v_total_min > context->res_ctx.pipe_ctx[i].stream->timing.v_total) {
+ optimized_min_dst_y_next_start_us =
+ context->res_ctx.pipe_ctx[i].dlg_regs.optimized_min_dst_y_next_start_us;
+ break;
+ }
+ }
+ }
/* zstate only supported on PWRSEQ0 and when there's <2 planes*/
if (link->link_index != 0 || stream_status->plane_count > 1)
return DCN_ZSTATE_SUPPORT_DISALLOW;
- if (context->bw_ctx.dml.vba.StutterPeriod > 5000.0)
+ if (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || optimized_min_dst_y_next_start_us > 5000)
return DCN_ZSTATE_SUPPORT_ALLOW;
else if (link->psr_settings.psr_version == DC_PSR_VERSION_1 && !dc->debug.disable_psr)
return DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY;
@@ -786,8 +799,6 @@ void dcn20_calculate_dlg_params(
!= dm_dram_clock_change_unsupported;
context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
- context->bw_ctx.bw.dcn.clk.zstate_support = decide_zstate_support(dc, context);
-
context->bw_ctx.bw.dcn.clk.dtbclk_en = is_dtbclk_required(dc, context);
if (context->bw_ctx.bw.dcn.clk.dispclk_khz < dc->debug.min_disp_clk_khz)
@@ -843,6 +854,7 @@ void dcn20_calculate_dlg_params(
&pipes[pipe_idx].pipe);
pipe_idx++;
}
+ context->bw_ctx.bw.dcn.clk.zstate_support = decide_zstate_support(dc, context);
}
static void swizzle_to_dml_params(
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
index e0fecf127bd5..53d760e169e6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
@@ -1055,6 +1055,7 @@ static void dml_rq_dlg_get_dlg_params(
float vba__refcyc_per_req_delivery_pre_l = get_refcyc_per_req_delivery_pre_l_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
float vba__refcyc_per_req_delivery_l = get_refcyc_per_req_delivery_l_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
+ int blank_lines;
memset(disp_dlg_regs, 0, sizeof(*disp_dlg_regs));
memset(disp_ttu_regs, 0, sizeof(*disp_ttu_regs));
@@ -1080,6 +1081,18 @@ static void dml_rq_dlg_get_dlg_params(
dlg_vblank_start = interlaced ? (vblank_start / 2) : vblank_start;
disp_dlg_regs->min_dst_y_next_start = (unsigned int) (((double) dlg_vblank_start) * dml_pow(2, 2));
+ blank_lines = (dst->vblank_end + dst->vtotal_min - dst->vblank_start - dst->vstartup_start - 1);
+ if (blank_lines < 0)
+ blank_lines = 0;
+ if (blank_lines != 0) {
+ disp_dlg_regs->optimized_min_dst_y_next_start_us =
+ ((unsigned int) blank_lines * dst->hactive) / (unsigned int) dst->pixel_rate_mhz;
+ disp_dlg_regs->optimized_min_dst_y_next_start =
+ (unsigned int)(((double) (dlg_vblank_start + blank_lines)) * dml_pow(2, 2));
+ } else {
+ // use unoptimized value
+ disp_dlg_regs->optimized_min_dst_y_next_start = disp_dlg_regs->min_dst_y_next_start;
+ }
ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int)dml_pow(2, 18));
dml_print("DML_DLG: %s: min_ttu_vblank (us) = %3.2f\n", __func__, min_ttu_vblank);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index 59f0a61c33cf..2df660cd8801 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -446,6 +446,8 @@ struct _vcs_dpi_display_dlg_regs_st {
unsigned int refcyc_h_blank_end;
unsigned int dlg_vblank_end;
unsigned int min_dst_y_next_start;
+ unsigned int optimized_min_dst_y_next_start;
+ unsigned int optimized_min_dst_y_next_start_us;
unsigned int refcyc_per_htotal;
unsigned int refcyc_x_after_scaler;
unsigned int dst_y_after_scaler;
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index efc2339f1fa0..4385d19bc489 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -864,11 +864,11 @@ static bool setup_dsc_config(
min_slices_h = inc_num_slices(dsc_common_caps.slice_caps, min_slices_h);
}
+ is_dsc_possible = (min_slices_h <= max_slices_h);
+
if (pic_width % min_slices_h != 0)
min_slices_h = 0; // DSC TODO: Maybe try increasing the number of slices first?
- is_dsc_possible = (min_slices_h <= max_slices_h);
-
if (min_slices_h == 0 && max_slices_h == 0)
is_dsc_possible = false;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
index ab9939db8cea..44f167d2584f 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
@@ -33,6 +33,7 @@
#define MAX_MTP_SLOT_COUNT 64
#define DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE 0x50
#define TRAINING_AUX_RD_INTERVAL 100 //us
+#define LINK_AUX_WAKE_TIMEOUT_MS 1500 // Timeout when trying to wake unresponsive DPRX.
struct dc_link;
struct dc_stream_state;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index e45b7993c5c5..ad69d78c4ac3 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -195,6 +195,9 @@ struct hubp_funcs {
void (*hubp_set_flip_int)(struct hubp *hubp);
+ void (*program_extended_blank)(struct hubp *hubp,
+ unsigned int min_dst_y_next_start_optimized);
+
void (*hubp_wait_pipe_read_start)(struct hubp *hubp);
};
diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
index b691aa45e84f..79bc207415bc 100644
--- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
@@ -100,7 +100,8 @@ enum vsc_packet_revision {
//PB7 = MD0
#define MASK_VTEM_MD0__VRR_EN 0x01
#define MASK_VTEM_MD0__M_CONST 0x02
-#define MASK_VTEM_MD0__RESERVED2 0x0C
+#define MASK_VTEM_MD0__QMS_EN 0x04
+#define MASK_VTEM_MD0__RESERVED2 0x08
#define MASK_VTEM_MD0__FVA_FACTOR_M1 0xF0
//MD1
@@ -109,7 +110,7 @@ enum vsc_packet_revision {
//MD2
#define MASK_VTEM_MD2__BASE_REFRESH_RATE_98 0x03
#define MASK_VTEM_MD2__RB 0x04
-#define MASK_VTEM_MD2__RESERVED3 0xF8
+#define MASK_VTEM_MD2__NEXT_TFR 0xF8
//MD3
#define MASK_VTEM_MD3__BASE_REFRESH_RATE_07 0xFF
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 89fbee568be4..5504d81c77b7 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -173,6 +173,17 @@ bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
return false;
+ /* Don't use baco for reset in S3.
+ * This is a workaround for some platforms
+ * where entering BACO during suspend
+ * seems to cause reboots or hangs.
+ * This might be related to the fact that BACO controls
+ * power to the whole GPU including devices like audio and USB.
+ * Powering down/up everything may adversely affect these other
+ * devices. Needs more investigation.
+ */
+ if (adev->in_s3)
+ return false;
mutex_lock(&adev->pm.mutex);
@@ -500,6 +511,9 @@ int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size)
struct smu_context *smu = adev->powerplay.pp_handle;
int ret = 0;
+ if (!is_support_sw_smu(adev))
+ return -EOPNOTSUPP;
+
mutex_lock(&adev->pm.mutex);
ret = smu_send_hbm_bad_pages_num(smu, size);
mutex_unlock(&adev->pm.mutex);
@@ -512,6 +526,9 @@ int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t si
struct smu_context *smu = adev->powerplay.pp_handle;
int ret = 0;
+ if (!is_support_sw_smu(adev))
+ return -EOPNOTSUPP;
+
mutex_lock(&adev->pm.mutex);
ret = smu_send_hbm_bad_channel_flag(smu, size);
mutex_unlock(&adev->pm.mutex);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
index 9ddd8491ff00..ede71de2343d 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
@@ -773,13 +773,13 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
hwmgr->display_config->num_display > 3 ?
- data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk :
+ (data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk / 100) :
min_mclk,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinSocclkByFreq,
- data->clock_vol_info.vdd_dep_on_socclk->entries[0].clk,
+ data->clock_vol_info.vdd_dep_on_socclk->entries[0].clk / 100,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinVcn,
@@ -792,11 +792,11 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxFclkByFreq,
- data->clock_vol_info.vdd_dep_on_fclk->entries[index_fclk].clk,
+ data->clock_vol_info.vdd_dep_on_fclk->entries[index_fclk].clk / 100,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxSocclkByFreq,
- data->clock_vol_info.vdd_dep_on_socclk->entries[index_socclk].clk,
+ data->clock_vol_info.vdd_dep_on_socclk->entries[index_socclk].clk / 100,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxVcn,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
index 7bfac029e513..b81711c4ff33 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
@@ -991,7 +991,7 @@ static int smu_v13_0_5_set_performance_level(struct smu_context *smu,
return -EINVAL;
}
- if (sclk_min && sclk_max) {
+ if (sclk_min && sclk_max && smu_v13_0_5_clk_dpm_is_enabled(smu, SMU_SCLK)) {
ret = smu_v13_0_5_set_soft_freq_limited_range(smu,
SMU_SCLK,
sclk_min,
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index 026e4e29a0f3..f4df344509a8 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -214,6 +214,29 @@ int drm_of_encoder_active_endpoint(struct device_node *node,
}
EXPORT_SYMBOL_GPL(drm_of_encoder_active_endpoint);
+static int find_panel_or_bridge(struct device_node *node,
+ struct drm_panel **panel,
+ struct drm_bridge **bridge)
+{
+ if (panel) {
+ *panel = of_drm_find_panel(node);
+ if (!IS_ERR(*panel))
+ return 0;
+
+ /* Clear the panel pointer in case of error. */
+ *panel = NULL;
+ }
+
+ /* No panel found yet, check for a bridge next. */
+ if (bridge) {
+ *bridge = of_drm_find_bridge(node);
+ if (*bridge)
+ return 0;
+ }
+
+ return -EPROBE_DEFER;
+}
+
/**
* drm_of_find_panel_or_bridge - return connected panel or bridge device
* @np: device tree node containing encoder output ports
@@ -236,66 +259,44 @@ int drm_of_find_panel_or_bridge(const struct device_node *np,
struct drm_panel **panel,
struct drm_bridge **bridge)
{
- int ret = -EPROBE_DEFER;
- struct device_node *remote;
+ struct device_node *node;
+ int ret;
if (!panel && !bridge)
return -EINVAL;
+
if (panel)
*panel = NULL;
-
- /**
- * Devices can also be child nodes when we also control that device
- * through the upstream device (ie, MIPI-DCS for a MIPI-DSI device).
- *
- * Lookup for a child node of the given parent that isn't either port
- * or ports.
- */
- for_each_available_child_of_node(np, remote) {
- if (of_node_name_eq(remote, "port") ||
- of_node_name_eq(remote, "ports"))
- continue;
-
- goto of_find_panel_or_bridge;
+ if (bridge)
+ *bridge = NULL;
+
+ /* Check for a graph on the device node first. */
+ if (of_graph_is_present(np)) {
+ node = of_graph_get_remote_node(np, port, endpoint);
+ if (node) {
+ ret = find_panel_or_bridge(node, panel, bridge);
+ of_node_put(node);
+
+ if (!ret)
+ return 0;
+ }
}
- /*
- * of_graph_get_remote_node() produces a noisy error message if port
- * node isn't found and the absence of the port is a legit case here,
- * so at first we silently check whether graph presents in the
- * device-tree node.
- */
- if (!of_graph_is_present(np))
- return -ENODEV;
-
- remote = of_graph_get_remote_node(np, port, endpoint);
-
-of_find_panel_or_bridge:
- if (!remote)
- return -ENODEV;
+ /* Otherwise check for any child node other than port/ports. */
+ for_each_available_child_of_node(np, node) {
+ if (of_node_name_eq(node, "port") ||
+ of_node_name_eq(node, "ports"))
+ continue;
- if (panel) {
- *panel = of_drm_find_panel(remote);
- if (!IS_ERR(*panel))
- ret = 0;
- else
- *panel = NULL;
- }
-
- /* No panel found yet, check for a bridge next. */
- if (bridge) {
- if (ret) {
- *bridge = of_drm_find_bridge(remote);
- if (*bridge)
- ret = 0;
- } else {
- *bridge = NULL;
- }
+ ret = find_panel_or_bridge(node, panel, bridge);
+ of_node_put(node);
+ /* Stop at the first found occurrence. */
+ if (!ret)
+ return 0;
}
- of_node_put(remote);
- return ret;
+ return -EPROBE_DEFER;
}
EXPORT_SYMBOL_GPL(drm_of_find_panel_or_bridge);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index c3ea243d414d..0c5c43852e24 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -70,7 +70,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
* mmap ioctl is disallowed for all discrete platforms,
* and for all platforms with GRAPHICS_VER > 12.
*/
- if (IS_DGFX(i915) || GRAPHICS_VER(i915) > 12)
+ if (IS_DGFX(i915) || GRAPHICS_VER_FULL(i915) > IP_VER(12, 0))
return -EOPNOTSUPP;
if (args->flags & ~(I915_MMAP_WC))
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c
index 87428fb23d9f..a2277a0d6d06 100644
--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
+++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
@@ -222,6 +222,7 @@ static int dw_hdmi_imx_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
const struct of_device_id *match = of_match_node(dw_hdmi_imx_dt_ids, np);
struct imx_hdmi *hdmi;
+ int ret;
hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
if (!hdmi)
@@ -243,10 +244,15 @@ static int dw_hdmi_imx_probe(struct platform_device *pdev)
hdmi->bridge = of_drm_find_bridge(np);
if (!hdmi->bridge) {
dev_err(hdmi->dev, "Unable to find bridge\n");
+ dw_hdmi_remove(hdmi->hdmi);
return -ENODEV;
}
- return component_add(&pdev->dev, &dw_hdmi_imx_ops);
+ ret = component_add(&pdev->dev, &dw_hdmi_imx_ops);
+ if (ret)
+ dw_hdmi_remove(hdmi->hdmi);
+
+ return ret;
}
static int dw_hdmi_imx_remove(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index e5078d03020d..fb0e951248f6 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -572,6 +572,8 @@ static int imx_ldb_panel_ddc(struct device *dev,
edidp = of_get_property(child, "edid", &edid_len);
if (edidp) {
channel->edid = kmemdup(edidp, edid_len, GFP_KERNEL);
+ if (!channel->edid)
+ return -ENOMEM;
} else if (!channel->panel) {
/* fallback to display-timings node */
ret = of_get_drm_display_mode(child,
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 06cb1a59b9bc..63ba2ad84679 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -75,8 +75,10 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
ret = of_get_drm_display_mode(np, &imxpd->mode,
&imxpd->bus_flags,
OF_USE_NATIVE_MODE);
- if (ret)
+ if (ret) {
+ drm_mode_destroy(connector->dev, mode);
return ret;
+ }
drm_mode_copy(mode, &imxpd->mode);
mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 83c31b2ad865..ccc4fcf7a630 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -1742,7 +1742,7 @@ a6xx_create_private_address_space(struct msm_gpu *gpu)
return ERR_CAST(mmu);
return msm_gem_address_space_create(mmu,
- "gpu", 0x100000000ULL, 0x1ffffffffULL);
+ "gpu", 0x100000000ULL, SZ_4G);
}
static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 89cfd84760d7..8706bcdd1472 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -599,43 +599,91 @@ static const struct of_device_id dt_match[] = {
{}
};
-#ifdef CONFIG_PM
-static int adreno_resume(struct device *dev)
+static int adreno_runtime_resume(struct device *dev)
{
struct msm_gpu *gpu = dev_to_gpu(dev);
return gpu->funcs->pm_resume(gpu);
}
-static int active_submits(struct msm_gpu *gpu)
+static int adreno_runtime_suspend(struct device *dev)
{
- int active_submits;
- mutex_lock(&gpu->active_lock);
- active_submits = gpu->active_submits;
- mutex_unlock(&gpu->active_lock);
- return active_submits;
+ struct msm_gpu *gpu = dev_to_gpu(dev);
+
+ /*
+ * We should be holding a runpm ref, which will prevent
+ * runtime suspend. In the system suspend path, we've
+ * already waited for active jobs to complete.
+ */
+ WARN_ON_ONCE(gpu->active_submits);
+
+ return gpu->funcs->pm_suspend(gpu);
+}
+
+static void suspend_scheduler(struct msm_gpu *gpu)
+{
+ int i;
+
+ /*
+ * Shut down the scheduler before we force suspend, so that
+ * suspend isn't racing with scheduler kthread feeding us
+ * more work.
+ *
+ * Note, we just want to park the thread, and let any jobs
+ * that are already on the hw queue complete normally, as
+ * opposed to the drm_sched_stop() path used for handling
+ * faulting/timed-out jobs. We can't really cancel any jobs
+ * already on the hw queue without racing with the GPU.
+ */
+ for (i = 0; i < gpu->nr_rings; i++) {
+ struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched;
+ kthread_park(sched->thread);
+ }
}
-static int adreno_suspend(struct device *dev)
+static void resume_scheduler(struct msm_gpu *gpu)
+{
+ int i;
+
+ for (i = 0; i < gpu->nr_rings; i++) {
+ struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched;
+ kthread_unpark(sched->thread);
+ }
+}
+
+static int adreno_system_suspend(struct device *dev)
{
struct msm_gpu *gpu = dev_to_gpu(dev);
- int remaining;
+ int remaining, ret;
+
+ suspend_scheduler(gpu);
remaining = wait_event_timeout(gpu->retire_event,
- active_submits(gpu) == 0,
+ gpu->active_submits == 0,
msecs_to_jiffies(1000));
if (remaining == 0) {
dev_err(dev, "Timeout waiting for GPU to suspend\n");
- return -EBUSY;
+ ret = -EBUSY;
+ goto out;
}
- return gpu->funcs->pm_suspend(gpu);
+ ret = pm_runtime_force_suspend(dev);
+out:
+ if (ret)
+ resume_scheduler(gpu);
+
+ return ret;
+}
+
+static int adreno_system_resume(struct device *dev)
+{
+ resume_scheduler(dev_to_gpu(dev));
+ return pm_runtime_force_resume(dev);
}
-#endif
static const struct dev_pm_ops adreno_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(adreno_suspend, adreno_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(adreno_system_suspend, adreno_system_resume)
+ RUNTIME_PM_OPS(adreno_runtime_suspend, adreno_runtime_resume, NULL)
};
static struct platform_driver adreno_driver = {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
index c515b7cf922c..c61b5b283f08 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
@@ -54,87 +54,87 @@ struct dpu_intr_reg {
* When making changes be sure to sync with dpu_hw_intr_reg
*/
static const struct dpu_intr_reg dpu_intr_set[] = {
- {
+ [MDP_SSPP_TOP0_INTR] = {
MDP_SSPP_TOP0_OFF+INTR_CLEAR,
MDP_SSPP_TOP0_OFF+INTR_EN,
MDP_SSPP_TOP0_OFF+INTR_STATUS
},
- {
+ [MDP_SSPP_TOP0_INTR2] = {
MDP_SSPP_TOP0_OFF+INTR2_CLEAR,
MDP_SSPP_TOP0_OFF+INTR2_EN,
MDP_SSPP_TOP0_OFF+INTR2_STATUS
},
- {
+ [MDP_SSPP_TOP0_HIST_INTR] = {
MDP_SSPP_TOP0_OFF+HIST_INTR_CLEAR,
MDP_SSPP_TOP0_OFF+HIST_INTR_EN,
MDP_SSPP_TOP0_OFF+HIST_INTR_STATUS
},
- {
+ [MDP_INTF0_INTR] = {
MDP_INTF_0_OFF+INTF_INTR_CLEAR,
MDP_INTF_0_OFF+INTF_INTR_EN,
MDP_INTF_0_OFF+INTF_INTR_STATUS
},
- {
+ [MDP_INTF1_INTR] = {
MDP_INTF_1_OFF+INTF_INTR_CLEAR,
MDP_INTF_1_OFF+INTF_INTR_EN,
MDP_INTF_1_OFF+INTF_INTR_STATUS
},
- {
+ [MDP_INTF2_INTR] = {
MDP_INTF_2_OFF+INTF_INTR_CLEAR,
MDP_INTF_2_OFF+INTF_INTR_EN,
MDP_INTF_2_OFF+INTF_INTR_STATUS
},
- {
+ [MDP_INTF3_INTR] = {
MDP_INTF_3_OFF+INTF_INTR_CLEAR,
MDP_INTF_3_OFF+INTF_INTR_EN,
MDP_INTF_3_OFF+INTF_INTR_STATUS
},
- {
+ [MDP_INTF4_INTR] = {
MDP_INTF_4_OFF+INTF_INTR_CLEAR,
MDP_INTF_4_OFF+INTF_INTR_EN,
MDP_INTF_4_OFF+INTF_INTR_STATUS
},
- {
+ [MDP_INTF5_INTR] = {
MDP_INTF_5_OFF+INTF_INTR_CLEAR,
MDP_INTF_5_OFF+INTF_INTR_EN,
MDP_INTF_5_OFF+INTF_INTR_STATUS
},
- {
+ [MDP_AD4_0_INTR] = {
MDP_AD4_0_OFF + MDP_AD4_INTR_CLEAR_OFF,
MDP_AD4_0_OFF + MDP_AD4_INTR_EN_OFF,
MDP_AD4_0_OFF + MDP_AD4_INTR_STATUS_OFF,
},
- {
+ [MDP_AD4_1_INTR] = {
MDP_AD4_1_OFF + MDP_AD4_INTR_CLEAR_OFF,
MDP_AD4_1_OFF + MDP_AD4_INTR_EN_OFF,
MDP_AD4_1_OFF + MDP_AD4_INTR_STATUS_OFF,
},
- {
+ [MDP_INTF0_7xxx_INTR] = {
MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_CLEAR,
MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_EN,
MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_STATUS
},
- {
+ [MDP_INTF1_7xxx_INTR] = {
MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_CLEAR,
MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_EN,
MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_STATUS
},
- {
+ [MDP_INTF2_7xxx_INTR] = {
MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_CLEAR,
MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_EN,
MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_STATUS
},
- {
+ [MDP_INTF3_7xxx_INTR] = {
MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_CLEAR,
MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_EN,
MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_STATUS
},
- {
+ [MDP_INTF4_7xxx_INTR] = {
MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_CLEAR,
MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_EN,
MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_STATUS
},
- {
+ [MDP_INTF5_7xxx_INTR] = {
MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_CLEAR,
MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_EN,
MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_STATUS
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
index 1ee824600995..c478d25f7825 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
@@ -98,7 +98,10 @@ static void mdp5_plane_reset(struct drm_plane *plane)
__drm_atomic_helper_plane_destroy_state(plane->state);
kfree(to_mdp5_plane_state(plane->state));
+ plane->state = NULL;
mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL);
+ if (!mdp5_state)
+ return;
__drm_atomic_helper_plane_reset(plane, &mdp5_state->base);
}
diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
index 5d2ff6791058..acfe1b31e079 100644
--- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
+++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
@@ -176,6 +176,8 @@ void msm_disp_snapshot_add_block(struct msm_disp_state *disp_state, u32 len,
va_list va;
new_blk = kzalloc(sizeof(struct msm_disp_state_block), GFP_KERNEL);
+ if (!new_blk)
+ return;
va_start(va, fmt);
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index 178b774a5fbd..a42732b67349 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -580,6 +580,12 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
dp->dp_display.connector_type, state);
mutex_unlock(&dp->event_mutex);
+ /*
+ * add fail safe mode outside event_mutex scope
+ * to avoid potiential circular lock with drm thread
+ */
+ dp_panel_add_fail_safe_mode(dp->dp_display.connector);
+
/* uevent will complete connection part */
return 0;
};
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index f1418722c549..26c3653c99ec 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -151,6 +151,15 @@ static int dp_panel_update_modes(struct drm_connector *connector,
return rc;
}
+void dp_panel_add_fail_safe_mode(struct drm_connector *connector)
+{
+ /* fail safe edid */
+ mutex_lock(&connector->dev->mode_config.mutex);
+ if (drm_add_modes_noedid(connector, 640, 480))
+ drm_set_preferred_mode(connector, 640, 480);
+ mutex_unlock(&connector->dev->mode_config.mutex);
+}
+
int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
struct drm_connector *connector)
{
@@ -207,16 +216,7 @@ int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
goto end;
}
- /* fail safe edid */
- mutex_lock(&connector->dev->mode_config.mutex);
- if (drm_add_modes_noedid(connector, 640, 480))
- drm_set_preferred_mode(connector, 640, 480);
- mutex_unlock(&connector->dev->mode_config.mutex);
- } else {
- /* always add fail-safe mode as backup mode */
- mutex_lock(&connector->dev->mode_config.mutex);
- drm_add_modes_noedid(connector, 640, 480);
- mutex_unlock(&connector->dev->mode_config.mutex);
+ dp_panel_add_fail_safe_mode(connector);
}
if (panel->aux_cfg_update_done) {
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h
index 9023e5bb4b8b..99739ea679a7 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.h
+++ b/drivers/gpu/drm/msm/dp/dp_panel.h
@@ -59,6 +59,7 @@ int dp_panel_init_panel_info(struct dp_panel *dp_panel);
int dp_panel_deinit(struct dp_panel *dp_panel);
int dp_panel_timing_cfg(struct dp_panel *dp_panel);
void dp_panel_dump_regs(struct dp_panel *dp_panel);
+void dp_panel_add_fail_safe_mode(struct drm_connector *connector);
int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
struct drm_connector *connector);
u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel, u32 mode_max_bpp,
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 0c1b7dde377c..9f6af0f0fe00 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -638,7 +638,7 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id)
return connector;
fail:
- connector->funcs->destroy(msm_dsi->connector);
+ connector->funcs->destroy(connector);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index affa95eb05fc..9c36b505daab 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -274,7 +274,7 @@ bool msm_use_mmu(struct drm_device *dev)
struct msm_drm_private *priv = dev->dev_private;
/* a2xx comes with its own MMU */
- return priv->is_a2xx || iommu_present(&platform_bus_type);
+ return priv->is_a2xx || device_iommu_mapped(dev->dev);
}
static int msm_init_vram(struct drm_device *dev)
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 02b9ae65a96a..a4f61972667b 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -926,6 +926,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
get_pid_task(aspace->pid, PIDTYPE_PID);
if (task) {
comm = kstrdup(task->comm, GFP_KERNEL);
+ put_task_struct(task);
} else {
comm = NULL;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
index e1772211b0a4..612310d5d481 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
@@ -216,6 +216,7 @@ gm20b_pmu = {
.intr = gt215_pmu_intr,
.recv = gm20b_pmu_recv,
.initmsg = gm20b_pmu_initmsg,
+ .reset = gf100_pmu_reset,
};
#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
index 6bf7fc1bd1e3..1a6f9c3af5ec 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
@@ -23,7 +23,7 @@
*/
#include "priv.h"
-static void
+void
gp102_pmu_reset(struct nvkm_pmu *pmu)
{
struct nvkm_device *device = pmu->subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
index ba1583bb618b..94cfb1791af6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
@@ -83,6 +83,7 @@ gp10b_pmu = {
.intr = gt215_pmu_intr,
.recv = gm20b_pmu_recv,
.initmsg = gm20b_pmu_initmsg,
+ .reset = gp102_pmu_reset,
};
#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
index bcaade758ff7..21abf31f4442 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
@@ -41,6 +41,7 @@ int gt215_pmu_send(struct nvkm_pmu *, u32[2], u32, u32, u32, u32);
bool gf100_pmu_enabled(struct nvkm_pmu *);
void gf100_pmu_reset(struct nvkm_pmu *);
+void gp102_pmu_reset(struct nvkm_pmu *pmu);
void gk110_pmu_pgob(struct nvkm_pmu *, bool);
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
index a07ef26234e5..6826f4d4826a 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
@@ -612,8 +612,10 @@ static int ili9341_dbi_probe(struct spi_device *spi, struct gpio_desc *dc,
int ret;
vcc = devm_regulator_get_optional(dev, "vcc");
- if (IS_ERR(vcc))
+ if (IS_ERR(vcc)) {
dev_err(dev, "get optional vcc failed\n");
+ vcc = NULL;
+ }
dbidev = devm_drm_dev_alloc(dev, &ili9341_dbi_driver,
struct mipi_dbi_dev, drm);
diff --git a/drivers/gpu/ipu-v3/ipu-di.c b/drivers/gpu/ipu-v3/ipu-di.c
index 666223c6bec4..0a34e0ab4fe6 100644
--- a/drivers/gpu/ipu-v3/ipu-di.c
+++ b/drivers/gpu/ipu-v3/ipu-di.c
@@ -447,8 +447,9 @@ static void ipu_di_config_clock(struct ipu_di *di,
error = rate / (sig->mode.pixelclock / 1000);
- dev_dbg(di->ipu->dev, " IPU clock can give %lu with divider %u, error %d.%u%%\n",
- rate, div, (signed)(error - 1000) / 10, error % 10);
+ dev_dbg(di->ipu->dev, " IPU clock can give %lu with divider %u, error %c%d.%d%%\n",
+ rate, div, error < 1000 ? '-' : '+',
+ abs(error - 1000) / 10, abs(error - 1000) % 10);
/* Allow a 1% error */
if (error < 1010 && error >= 990) {
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 27f969b3dc07..e9e2db68b9fb 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -179,6 +179,12 @@ struct imx_i2c_hwdata {
unsigned int ndivs;
unsigned int i2sr_clr_opcode;
unsigned int i2cr_ien_opcode;
+ /*
+ * Errata ERR007805 or e7805:
+ * I2C: When the I2C clock speed is configured for 400 kHz,
+ * the SCL low period violates the I2C spec of 1.3 uS min.
+ */
+ bool has_err007805;
};
struct imx_i2c_dma {
@@ -240,6 +246,16 @@ static const struct imx_i2c_hwdata imx21_i2c_hwdata = {
};
+static const struct imx_i2c_hwdata imx6_i2c_hwdata = {
+ .devtype = IMX21_I2C,
+ .regshift = IMX_I2C_REGSHIFT,
+ .clk_div = imx_i2c_clk_div,
+ .ndivs = ARRAY_SIZE(imx_i2c_clk_div),
+ .i2sr_clr_opcode = I2SR_CLR_OPCODE_W0C,
+ .i2cr_ien_opcode = I2CR_IEN_OPCODE_1,
+ .has_err007805 = true,
+};
+
static struct imx_i2c_hwdata vf610_i2c_hwdata = {
.devtype = VF610_I2C,
.regshift = VF610_I2C_REGSHIFT,
@@ -266,6 +282,16 @@ MODULE_DEVICE_TABLE(platform, imx_i2c_devtype);
static const struct of_device_id i2c_imx_dt_ids[] = {
{ .compatible = "fsl,imx1-i2c", .data = &imx1_i2c_hwdata, },
{ .compatible = "fsl,imx21-i2c", .data = &imx21_i2c_hwdata, },
+ { .compatible = "fsl,imx6q-i2c", .data = &imx6_i2c_hwdata, },
+ { .compatible = "fsl,imx6sl-i2c", .data = &imx6_i2c_hwdata, },
+ { .compatible = "fsl,imx6sll-i2c", .data = &imx6_i2c_hwdata, },
+ { .compatible = "fsl,imx6sx-i2c", .data = &imx6_i2c_hwdata, },
+ { .compatible = "fsl,imx6ul-i2c", .data = &imx6_i2c_hwdata, },
+ { .compatible = "fsl,imx7s-i2c", .data = &imx6_i2c_hwdata, },
+ { .compatible = "fsl,imx8mm-i2c", .data = &imx6_i2c_hwdata, },
+ { .compatible = "fsl,imx8mn-i2c", .data = &imx6_i2c_hwdata, },
+ { .compatible = "fsl,imx8mp-i2c", .data = &imx6_i2c_hwdata, },
+ { .compatible = "fsl,imx8mq-i2c", .data = &imx6_i2c_hwdata, },
{ .compatible = "fsl,vf610-i2c", .data = &vf610_i2c_hwdata, },
{ /* sentinel */ }
};
@@ -551,6 +577,13 @@ static void i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx,
unsigned int div;
int i;
+ if (i2c_imx->hwdata->has_err007805 && i2c_imx->bitrate > 384000) {
+ dev_dbg(&i2c_imx->adapter.dev,
+ "SoC errata ERR007805 or e7805 applies, bus frequency limited from %d Hz to 384000 Hz.\n",
+ i2c_imx->bitrate);
+ i2c_imx->bitrate = 384000;
+ }
+
/* Divider value calculation */
if (i2c_imx->cur_clk == i2c_clk_rate)
return;
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index f4820fd3dc13..c0364314877e 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -145,8 +145,8 @@
#define ISMT_SPGT_SPD_MASK 0xc0000000 /* SMBus Speed mask */
#define ISMT_SPGT_SPD_80K 0x00 /* 80 kHz */
#define ISMT_SPGT_SPD_100K (0x1 << 30) /* 100 kHz */
-#define ISMT_SPGT_SPD_400K (0x2 << 30) /* 400 kHz */
-#define ISMT_SPGT_SPD_1M (0x3 << 30) /* 1 MHz */
+#define ISMT_SPGT_SPD_400K (0x2U << 30) /* 400 kHz */
+#define ISMT_SPGT_SPD_1M (0x3U << 30) /* 1 MHz */
/* MSI Control Register (MSICTL) bit definitions */
diff --git a/drivers/i2c/busses/i2c-pasemi-core.c b/drivers/i2c/busses/i2c-pasemi-core.c
index 7728c8460dc0..9028ffb58cc0 100644
--- a/drivers/i2c/busses/i2c-pasemi-core.c
+++ b/drivers/i2c/busses/i2c-pasemi-core.c
@@ -137,6 +137,12 @@ static int pasemi_i2c_xfer_msg(struct i2c_adapter *adapter,
TXFIFO_WR(smbus, msg->buf[msg->len-1] |
(stop ? MTXFIFO_STOP : 0));
+
+ if (stop) {
+ err = pasemi_smb_waitready(smbus);
+ if (err)
+ goto reset_out;
+ }
}
return 0;
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index fc1dcc19f2a1..5b920f0fc7dd 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -843,10 +843,8 @@ static int geni_i2c_probe(struct platform_device *pdev)
/* FIFO is disabled, so we can only use GPI DMA */
gi2c->gpi_mode = true;
ret = setup_gpi_dma(gi2c);
- if (ret) {
- dev_err(dev, "Failed to setup GPI DMA mode:%d ret\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to setup GPI DMA mode\n");
dev_dbg(dev, "Using GPI DMA mode for I2C\n");
} else {
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index cf5d049342ea..ab0adaa130da 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -557,7 +557,7 @@ static long compat_i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned lo
.addr = umsg.addr,
.flags = umsg.flags,
.len = umsg.len,
- .buf = compat_ptr(umsg.buf)
+ .buf = (__force __u8 *)compat_ptr(umsg.buf),
};
}
@@ -668,16 +668,21 @@ static int i2cdev_attach_adapter(struct device *dev, void *dummy)
i2c_dev->dev.class = i2c_dev_class;
i2c_dev->dev.parent = &adap->dev;
i2c_dev->dev.release = i2cdev_dev_release;
- dev_set_name(&i2c_dev->dev, "i2c-%d", adap->nr);
+
+ res = dev_set_name(&i2c_dev->dev, "i2c-%d", adap->nr);
+ if (res)
+ goto err_put_i2c_dev;
res = cdev_device_add(&i2c_dev->cdev, &i2c_dev->dev);
- if (res) {
- put_i2c_dev(i2c_dev, false);
- return res;
- }
+ if (res)
+ goto err_put_i2c_dev;
pr_debug("adapter [%s] registered as minor %d\n", adap->name, adap->nr);
return 0;
+
+err_put_i2c_dev:
+ put_i2c_dev(i2c_dev, false);
+ return res;
}
static int i2cdev_detach_adapter(struct device *dev, void *dummy)
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 35f0d5e7533d..1c107d6d03b9 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -2824,6 +2824,7 @@ static int cm_dreq_handler(struct cm_work *work)
switch (cm_id_priv->id.state) {
case IB_CM_REP_SENT:
case IB_CM_DREQ_SENT:
+ case IB_CM_MRA_REP_RCVD:
ib_cancel_mad(cm_id_priv->msg);
break;
case IB_CM_ESTABLISHED:
@@ -2831,8 +2832,6 @@ static int cm_dreq_handler(struct cm_work *work)
cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
ib_cancel_mad(cm_id_priv->msg);
break;
- case IB_CM_MRA_REP_RCVD:
- break;
case IB_CM_TIMEWAIT:
atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
[CM_DREQ_COUNTER]);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index a311df07b1bd..4deb60a3b43f 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -2613,7 +2613,6 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, create_counters);
SET_DEVICE_OP(dev_ops, create_cq);
SET_DEVICE_OP(dev_ops, create_flow);
- SET_DEVICE_OP(dev_ops, create_flow_action_esp);
SET_DEVICE_OP(dev_ops, create_qp);
SET_DEVICE_OP(dev_ops, create_rwq_ind_table);
SET_DEVICE_OP(dev_ops, create_srq);
@@ -2676,7 +2675,6 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, modify_ah);
SET_DEVICE_OP(dev_ops, modify_cq);
SET_DEVICE_OP(dev_ops, modify_device);
- SET_DEVICE_OP(dev_ops, modify_flow_action_esp);
SET_DEVICE_OP(dev_ops, modify_hw_stat);
SET_DEVICE_OP(dev_ops, modify_port);
SET_DEVICE_OP(dev_ops, modify_qp);
diff --git a/drivers/infiniband/core/uverbs_std_types_flow_action.c b/drivers/infiniband/core/uverbs_std_types_flow_action.c
index d42ed7ff223e..0ddcf6da66c4 100644
--- a/drivers/infiniband/core/uverbs_std_types_flow_action.c
+++ b/drivers/infiniband/core/uverbs_std_types_flow_action.c
@@ -46,385 +46,6 @@ static int uverbs_free_flow_action(struct ib_uobject *uobject,
return action->device->ops.destroy_flow_action(action);
}
-static u64 esp_flags_uverbs_to_verbs(struct uverbs_attr_bundle *attrs,
- u32 flags, bool is_modify)
-{
- u64 verbs_flags = flags;
-
- if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_ESN))
- verbs_flags |= IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED;
-
- if (is_modify && uverbs_attr_is_valid(attrs,
- UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS))
- verbs_flags |= IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS;
-
- return verbs_flags;
-};
-
-static int validate_flow_action_esp_keymat_aes_gcm(struct ib_flow_action_attrs_esp_keymats *keymat)
-{
- struct ib_uverbs_flow_action_esp_keymat_aes_gcm *aes_gcm =
- &keymat->keymat.aes_gcm;
-
- if (aes_gcm->iv_algo > IB_UVERBS_FLOW_ACTION_IV_ALGO_SEQ)
- return -EOPNOTSUPP;
-
- if (aes_gcm->key_len != 32 &&
- aes_gcm->key_len != 24 &&
- aes_gcm->key_len != 16)
- return -EINVAL;
-
- if (aes_gcm->icv_len != 16 &&
- aes_gcm->icv_len != 8 &&
- aes_gcm->icv_len != 12)
- return -EINVAL;
-
- return 0;
-}
-
-static int (* const flow_action_esp_keymat_validate[])(struct ib_flow_action_attrs_esp_keymats *keymat) = {
- [IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM] = validate_flow_action_esp_keymat_aes_gcm,
-};
-
-static int flow_action_esp_replay_none(struct ib_flow_action_attrs_esp_replays *replay,
- bool is_modify)
-{
- /* This is used in order to modify an esp flow action with an enabled
- * replay protection to a disabled one. This is only supported via
- * modify, as in create verb we can simply drop the REPLAY attribute and
- * achieve the same thing.
- */
- return is_modify ? 0 : -EINVAL;
-}
-
-static int flow_action_esp_replay_def_ok(struct ib_flow_action_attrs_esp_replays *replay,
- bool is_modify)
-{
- /* Some replay protections could always be enabled without validating
- * anything.
- */
- return 0;
-}
-
-static int (* const flow_action_esp_replay_validate[])(struct ib_flow_action_attrs_esp_replays *replay,
- bool is_modify) = {
- [IB_UVERBS_FLOW_ACTION_ESP_REPLAY_NONE] = flow_action_esp_replay_none,
- [IB_UVERBS_FLOW_ACTION_ESP_REPLAY_BMP] = flow_action_esp_replay_def_ok,
-};
-
-static int parse_esp_ip(enum ib_flow_spec_type proto,
- const void __user *val_ptr,
- size_t len, union ib_flow_spec *out)
-{
- int ret;
- const struct ib_uverbs_flow_ipv4_filter ipv4 = {
- .src_ip = cpu_to_be32(0xffffffffUL),
- .dst_ip = cpu_to_be32(0xffffffffUL),
- .proto = 0xff,
- .tos = 0xff,
- .ttl = 0xff,
- .flags = 0xff,
- };
- const struct ib_uverbs_flow_ipv6_filter ipv6 = {
- .src_ip = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
- .dst_ip = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
- .flow_label = cpu_to_be32(0xffffffffUL),
- .next_hdr = 0xff,
- .traffic_class = 0xff,
- .hop_limit = 0xff,
- };
- union {
- struct ib_uverbs_flow_ipv4_filter ipv4;
- struct ib_uverbs_flow_ipv6_filter ipv6;
- } user_val = {};
- const void *user_pmask;
- size_t val_len;
-
- /* If the flow IPv4/IPv6 flow specifications are extended, the mask
- * should be changed as well.
- */
- BUILD_BUG_ON(offsetof(struct ib_uverbs_flow_ipv4_filter, flags) +
- sizeof(ipv4.flags) != sizeof(ipv4));
- BUILD_BUG_ON(offsetof(struct ib_uverbs_flow_ipv6_filter, reserved) +
- sizeof(ipv6.reserved) != sizeof(ipv6));
-
- switch (proto) {
- case IB_FLOW_SPEC_IPV4:
- if (len > sizeof(user_val.ipv4) &&
- !ib_is_buffer_cleared(val_ptr + sizeof(user_val.ipv4),
- len - sizeof(user_val.ipv4)))
- return -EOPNOTSUPP;
-
- val_len = min_t(size_t, len, sizeof(user_val.ipv4));
- ret = copy_from_user(&user_val.ipv4, val_ptr,
- val_len);
- if (ret)
- return -EFAULT;
-
- user_pmask = &ipv4;
- break;
- case IB_FLOW_SPEC_IPV6:
- if (len > sizeof(user_val.ipv6) &&
- !ib_is_buffer_cleared(val_ptr + sizeof(user_val.ipv6),
- len - sizeof(user_val.ipv6)))
- return -EOPNOTSUPP;
-
- val_len = min_t(size_t, len, sizeof(user_val.ipv6));
- ret = copy_from_user(&user_val.ipv6, val_ptr,
- val_len);
- if (ret)
- return -EFAULT;
-
- user_pmask = &ipv6;
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- return ib_uverbs_kern_spec_to_ib_spec_filter(proto, user_pmask,
- &user_val,
- val_len, out);
-}
-
-static int flow_action_esp_get_encap(struct ib_flow_spec_list *out,
- struct uverbs_attr_bundle *attrs)
-{
- struct ib_uverbs_flow_action_esp_encap uverbs_encap;
- int ret;
-
- ret = uverbs_copy_from(&uverbs_encap, attrs,
- UVERBS_ATTR_FLOW_ACTION_ESP_ENCAP);
- if (ret)
- return ret;
-
- /* We currently support only one encap */
- if (uverbs_encap.next_ptr)
- return -EOPNOTSUPP;
-
- if (uverbs_encap.type != IB_FLOW_SPEC_IPV4 &&
- uverbs_encap.type != IB_FLOW_SPEC_IPV6)
- return -EOPNOTSUPP;
-
- return parse_esp_ip(uverbs_encap.type,
- u64_to_user_ptr(uverbs_encap.val_ptr),
- uverbs_encap.len,
- &out->spec);
-}
-
-struct ib_flow_action_esp_attr {
- struct ib_flow_action_attrs_esp hdr;
- struct ib_flow_action_attrs_esp_keymats keymat;
- struct ib_flow_action_attrs_esp_replays replay;
- /* We currently support only one spec */
- struct ib_flow_spec_list encap;
-};
-
-#define ESP_LAST_SUPPORTED_FLAG IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW
-static int parse_flow_action_esp(struct ib_device *ib_dev,
- struct uverbs_attr_bundle *attrs,
- struct ib_flow_action_esp_attr *esp_attr,
- bool is_modify)
-{
- struct ib_uverbs_flow_action_esp uverbs_esp = {};
- int ret;
-
- /* Optional param, if it doesn't exist, we get -ENOENT and skip it */
- ret = uverbs_copy_from(&esp_attr->hdr.esn, attrs,
- UVERBS_ATTR_FLOW_ACTION_ESP_ESN);
- if (IS_UVERBS_COPY_ERR(ret))
- return ret;
-
- /* This can be called from FLOW_ACTION_ESP_MODIFY where
- * UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS is optional
- */
- if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS)) {
- ret = uverbs_copy_from_or_zero(&uverbs_esp, attrs,
- UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS);
- if (ret)
- return ret;
-
- if (uverbs_esp.flags & ~((ESP_LAST_SUPPORTED_FLAG << 1) - 1))
- return -EOPNOTSUPP;
-
- esp_attr->hdr.spi = uverbs_esp.spi;
- esp_attr->hdr.seq = uverbs_esp.seq;
- esp_attr->hdr.tfc_pad = uverbs_esp.tfc_pad;
- esp_attr->hdr.hard_limit_pkts = uverbs_esp.hard_limit_pkts;
- }
- esp_attr->hdr.flags = esp_flags_uverbs_to_verbs(attrs, uverbs_esp.flags,
- is_modify);
-
- if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT)) {
- esp_attr->keymat.protocol =
- uverbs_attr_get_enum_id(attrs,
- UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT);
- ret = uverbs_copy_from_or_zero(&esp_attr->keymat.keymat,
- attrs,
- UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT);
- if (ret)
- return ret;
-
- ret = flow_action_esp_keymat_validate[esp_attr->keymat.protocol](&esp_attr->keymat);
- if (ret)
- return ret;
-
- esp_attr->hdr.keymat = &esp_attr->keymat;
- }
-
- if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY)) {
- esp_attr->replay.protocol =
- uverbs_attr_get_enum_id(attrs,
- UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY);
-
- ret = uverbs_copy_from_or_zero(&esp_attr->replay.replay,
- attrs,
- UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY);
- if (ret)
- return ret;
-
- ret = flow_action_esp_replay_validate[esp_attr->replay.protocol](&esp_attr->replay,
- is_modify);
- if (ret)
- return ret;
-
- esp_attr->hdr.replay = &esp_attr->replay;
- }
-
- if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_ENCAP)) {
- ret = flow_action_esp_get_encap(&esp_attr->encap, attrs);
- if (ret)
- return ret;
-
- esp_attr->hdr.encap = &esp_attr->encap;
- }
-
- return 0;
-}
-
-static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE)(
- struct uverbs_attr_bundle *attrs)
-{
- struct ib_uobject *uobj = uverbs_attr_get_uobject(
- attrs, UVERBS_ATTR_CREATE_FLOW_ACTION_ESP_HANDLE);
- struct ib_device *ib_dev = attrs->context->device;
- int ret;
- struct ib_flow_action *action;
- struct ib_flow_action_esp_attr esp_attr = {};
-
- if (!ib_dev->ops.create_flow_action_esp)
- return -EOPNOTSUPP;
-
- ret = parse_flow_action_esp(ib_dev, attrs, &esp_attr, false);
- if (ret)
- return ret;
-
- /* No need to check as this attribute is marked as MANDATORY */
- action = ib_dev->ops.create_flow_action_esp(ib_dev, &esp_attr.hdr,
- attrs);
- if (IS_ERR(action))
- return PTR_ERR(action);
-
- uverbs_flow_action_fill_action(action, uobj, ib_dev,
- IB_FLOW_ACTION_ESP);
-
- return 0;
-}
-
-static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY)(
- struct uverbs_attr_bundle *attrs)
-{
- struct ib_uobject *uobj = uverbs_attr_get_uobject(
- attrs, UVERBS_ATTR_MODIFY_FLOW_ACTION_ESP_HANDLE);
- struct ib_flow_action *action = uobj->object;
- int ret;
- struct ib_flow_action_esp_attr esp_attr = {};
-
- if (!action->device->ops.modify_flow_action_esp)
- return -EOPNOTSUPP;
-
- ret = parse_flow_action_esp(action->device, attrs, &esp_attr, true);
- if (ret)
- return ret;
-
- if (action->type != IB_FLOW_ACTION_ESP)
- return -EINVAL;
-
- return action->device->ops.modify_flow_action_esp(action,
- &esp_attr.hdr,
- attrs);
-}
-
-static const struct uverbs_attr_spec uverbs_flow_action_esp_keymat[] = {
- [IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM] = {
- .type = UVERBS_ATTR_TYPE_PTR_IN,
- UVERBS_ATTR_STRUCT(
- struct ib_uverbs_flow_action_esp_keymat_aes_gcm,
- aes_key),
- },
-};
-
-static const struct uverbs_attr_spec uverbs_flow_action_esp_replay[] = {
- [IB_UVERBS_FLOW_ACTION_ESP_REPLAY_NONE] = {
- .type = UVERBS_ATTR_TYPE_PTR_IN,
- UVERBS_ATTR_NO_DATA(),
- },
- [IB_UVERBS_FLOW_ACTION_ESP_REPLAY_BMP] = {
- .type = UVERBS_ATTR_TYPE_PTR_IN,
- UVERBS_ATTR_STRUCT(struct ib_uverbs_flow_action_esp_replay_bmp,
- size),
- },
-};
-
-DECLARE_UVERBS_NAMED_METHOD(
- UVERBS_METHOD_FLOW_ACTION_ESP_CREATE,
- UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_FLOW_ACTION_ESP_HANDLE,
- UVERBS_OBJECT_FLOW_ACTION,
- UVERBS_ACCESS_NEW,
- UA_MANDATORY),
- UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS,
- UVERBS_ATTR_STRUCT(struct ib_uverbs_flow_action_esp,
- hard_limit_pkts),
- UA_MANDATORY),
- UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ESN,
- UVERBS_ATTR_TYPE(__u32),
- UA_OPTIONAL),
- UVERBS_ATTR_ENUM_IN(UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT,
- uverbs_flow_action_esp_keymat,
- UA_MANDATORY),
- UVERBS_ATTR_ENUM_IN(UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY,
- uverbs_flow_action_esp_replay,
- UA_OPTIONAL),
- UVERBS_ATTR_PTR_IN(
- UVERBS_ATTR_FLOW_ACTION_ESP_ENCAP,
- UVERBS_ATTR_TYPE(struct ib_uverbs_flow_action_esp_encap),
- UA_OPTIONAL));
-
-DECLARE_UVERBS_NAMED_METHOD(
- UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY,
- UVERBS_ATTR_IDR(UVERBS_ATTR_MODIFY_FLOW_ACTION_ESP_HANDLE,
- UVERBS_OBJECT_FLOW_ACTION,
- UVERBS_ACCESS_WRITE,
- UA_MANDATORY),
- UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS,
- UVERBS_ATTR_STRUCT(struct ib_uverbs_flow_action_esp,
- hard_limit_pkts),
- UA_OPTIONAL),
- UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ESN,
- UVERBS_ATTR_TYPE(__u32),
- UA_OPTIONAL),
- UVERBS_ATTR_ENUM_IN(UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT,
- uverbs_flow_action_esp_keymat,
- UA_OPTIONAL),
- UVERBS_ATTR_ENUM_IN(UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY,
- uverbs_flow_action_esp_replay,
- UA_OPTIONAL),
- UVERBS_ATTR_PTR_IN(
- UVERBS_ATTR_FLOW_ACTION_ESP_ENCAP,
- UVERBS_ATTR_TYPE(struct ib_uverbs_flow_action_esp_encap),
- UA_OPTIONAL));
-
DECLARE_UVERBS_NAMED_METHOD_DESTROY(
UVERBS_METHOD_FLOW_ACTION_DESTROY,
UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_FLOW_ACTION_HANDLE,
@@ -435,9 +56,7 @@ DECLARE_UVERBS_NAMED_METHOD_DESTROY(
DECLARE_UVERBS_NAMED_OBJECT(
UVERBS_OBJECT_FLOW_ACTION,
UVERBS_TYPE_ALLOC_IDR(uverbs_free_flow_action),
- &UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE),
- &UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_DESTROY),
- &UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY));
+ &UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_DESTROY));
const struct uapi_definition uverbs_def_obj_flow_action[] = {
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c
index 876cc78a22cc..7333646021bb 100644
--- a/drivers/infiniband/hw/hfi1/mmu_rb.c
+++ b/drivers/infiniband/hw/hfi1/mmu_rb.c
@@ -80,6 +80,9 @@ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler)
unsigned long flags;
struct list_head del_list;
+ /* Prevent freeing of mm until we are completely finished. */
+ mmgrab(handler->mn.mm);
+
/* Unregister first so we don't get any more notifications. */
mmu_notifier_unregister(&handler->mn, handler->mn.mm);
@@ -102,6 +105,9 @@ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler)
do_remove(handler, &del_list);
+ /* Now the mm may be freed. */
+ mmdrop(handler->mn.mm);
+
kfree(handler);
}
diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c
index 661ed2b44508..9c2886bc72cb 100644
--- a/drivers/infiniband/hw/mlx5/fs.c
+++ b/drivers/infiniband/hw/mlx5/fs.c
@@ -15,7 +15,6 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/fs.h>
#include <linux/mlx5/fs_helpers.h>
-#include <linux/mlx5/accel.h>
#include <linux/mlx5/eswitch.h>
#include <net/inet_ecn.h>
#include "mlx5_ib.h"
@@ -148,16 +147,6 @@ int parse_flow_flow_action(struct mlx5_ib_flow_action *maction,
{
switch (maction->ib_action.type) {
- case IB_FLOW_ACTION_ESP:
- if (action->action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
- MLX5_FLOW_CONTEXT_ACTION_DECRYPT))
- return -EINVAL;
- /* Currently only AES_GCM keymat is supported by the driver */
- action->esp_id = (uintptr_t)maction->esp_aes_gcm.ctx;
- action->action |= is_egress ?
- MLX5_FLOW_CONTEXT_ACTION_ENCRYPT :
- MLX5_FLOW_CONTEXT_ACTION_DECRYPT;
- return 0;
case IB_FLOW_ACTION_UNSPECIFIED:
if (maction->flow_action_raw.sub_type ==
MLX5_IB_FLOW_ACTION_MODIFY_HEADER) {
@@ -368,14 +357,7 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev,
ib_spec->type & IB_FLOW_SPEC_INNER);
break;
case IB_FLOW_SPEC_ESP:
- if (ib_spec->esp.mask.seq)
- return -EOPNOTSUPP;
-
- MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi,
- ntohl(ib_spec->esp.mask.spi));
- MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi,
- ntohl(ib_spec->esp.val.spi));
- break;
+ return -EOPNOTSUPP;
case IB_FLOW_SPEC_TCP:
if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
LAST_TCP_UDP_FIELD))
@@ -587,47 +569,6 @@ static bool flow_is_multicast_only(const struct ib_flow_attr *ib_attr)
return false;
}
-enum valid_spec {
- VALID_SPEC_INVALID,
- VALID_SPEC_VALID,
- VALID_SPEC_NA,
-};
-
-static enum valid_spec
-is_valid_esp_aes_gcm(struct mlx5_core_dev *mdev,
- const struct mlx5_flow_spec *spec,
- const struct mlx5_flow_act *flow_act,
- bool egress)
-{
- const u32 *match_c = spec->match_criteria;
- bool is_crypto =
- (flow_act->action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
- MLX5_FLOW_CONTEXT_ACTION_DECRYPT));
- bool is_ipsec = mlx5_fs_is_ipsec_flow(match_c);
- bool is_drop = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_DROP;
-
- /*
- * Currently only crypto is supported in egress, when regular egress
- * rules would be supported, always return VALID_SPEC_NA.
- */
- if (!is_crypto)
- return VALID_SPEC_NA;
-
- return is_crypto && is_ipsec &&
- (!egress || (!is_drop &&
- !(spec->flow_context.flags & FLOW_CONTEXT_HAS_TAG))) ?
- VALID_SPEC_VALID : VALID_SPEC_INVALID;
-}
-
-static bool is_valid_spec(struct mlx5_core_dev *mdev,
- const struct mlx5_flow_spec *spec,
- const struct mlx5_flow_act *flow_act,
- bool egress)
-{
- /* We curretly only support ipsec egress flow */
- return is_valid_esp_aes_gcm(mdev, spec, flow_act, egress) != VALID_SPEC_INVALID;
-}
-
static bool is_valid_ethertype(struct mlx5_core_dev *mdev,
const struct ib_flow_attr *flow_attr,
bool check_inner)
@@ -1154,8 +1095,7 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria);
- if (is_egress &&
- !is_valid_spec(dev->mdev, spec, &flow_act, is_egress)) {
+ if (is_egress) {
err = -EINVAL;
goto free;
}
@@ -1740,149 +1680,6 @@ unlock:
return ERR_PTR(err);
}
-static u32 mlx5_ib_flow_action_flags_to_accel_xfrm_flags(u32 mlx5_flags)
-{
- u32 flags = 0;
-
- if (mlx5_flags & MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA)
- flags |= MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA;
-
- return flags;
-}
-
-#define MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED \
- MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA
-static struct ib_flow_action *
-mlx5_ib_create_flow_action_esp(struct ib_device *device,
- const struct ib_flow_action_attrs_esp *attr,
- struct uverbs_attr_bundle *attrs)
-{
- struct mlx5_ib_dev *mdev = to_mdev(device);
- struct ib_uverbs_flow_action_esp_keymat_aes_gcm *aes_gcm;
- struct mlx5_accel_esp_xfrm_attrs accel_attrs = {};
- struct mlx5_ib_flow_action *action;
- u64 action_flags;
- u64 flags;
- int err = 0;
-
- err = uverbs_get_flags64(
- &action_flags, attrs, MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS,
- ((MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED << 1) - 1));
- if (err)
- return ERR_PTR(err);
-
- flags = mlx5_ib_flow_action_flags_to_accel_xfrm_flags(action_flags);
-
- /* We current only support a subset of the standard features. Only a
- * keymat of type AES_GCM, with icv_len == 16, iv_algo == SEQ and esn
- * (with overlap). Full offload mode isn't supported.
- */
- if (!attr->keymat || attr->replay || attr->encap ||
- attr->spi || attr->seq || attr->tfc_pad ||
- attr->hard_limit_pkts ||
- (attr->flags & ~(IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED |
- IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT)))
- return ERR_PTR(-EOPNOTSUPP);
-
- if (attr->keymat->protocol !=
- IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM)
- return ERR_PTR(-EOPNOTSUPP);
-
- aes_gcm = &attr->keymat->keymat.aes_gcm;
-
- if (aes_gcm->icv_len != 16 ||
- aes_gcm->iv_algo != IB_UVERBS_FLOW_ACTION_IV_ALGO_SEQ)
- return ERR_PTR(-EOPNOTSUPP);
-
- action = kmalloc(sizeof(*action), GFP_KERNEL);
- if (!action)
- return ERR_PTR(-ENOMEM);
-
- action->esp_aes_gcm.ib_flags = attr->flags;
- memcpy(&accel_attrs.keymat.aes_gcm.aes_key, &aes_gcm->aes_key,
- sizeof(accel_attrs.keymat.aes_gcm.aes_key));
- accel_attrs.keymat.aes_gcm.key_len = aes_gcm->key_len * 8;
- memcpy(&accel_attrs.keymat.aes_gcm.salt, &aes_gcm->salt,
- sizeof(accel_attrs.keymat.aes_gcm.salt));
- memcpy(&accel_attrs.keymat.aes_gcm.seq_iv, &aes_gcm->iv,
- sizeof(accel_attrs.keymat.aes_gcm.seq_iv));
- accel_attrs.keymat.aes_gcm.icv_len = aes_gcm->icv_len * 8;
- accel_attrs.keymat.aes_gcm.iv_algo = MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ;
- accel_attrs.keymat_type = MLX5_ACCEL_ESP_KEYMAT_AES_GCM;
-
- accel_attrs.esn = attr->esn;
- if (attr->flags & IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED)
- accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED;
- if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)
- accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
-
- if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT)
- accel_attrs.action |= MLX5_ACCEL_ESP_ACTION_ENCRYPT;
-
- action->esp_aes_gcm.ctx =
- mlx5_accel_esp_create_xfrm(mdev->mdev, &accel_attrs, flags);
- if (IS_ERR(action->esp_aes_gcm.ctx)) {
- err = PTR_ERR(action->esp_aes_gcm.ctx);
- goto err_parse;
- }
-
- action->esp_aes_gcm.ib_flags = attr->flags;
-
- return &action->ib_action;
-
-err_parse:
- kfree(action);
- return ERR_PTR(err);
-}
-
-static int
-mlx5_ib_modify_flow_action_esp(struct ib_flow_action *action,
- const struct ib_flow_action_attrs_esp *attr,
- struct uverbs_attr_bundle *attrs)
-{
- struct mlx5_ib_flow_action *maction = to_mflow_act(action);
- struct mlx5_accel_esp_xfrm_attrs accel_attrs;
- int err = 0;
-
- if (attr->keymat || attr->replay || attr->encap ||
- attr->spi || attr->seq || attr->tfc_pad ||
- attr->hard_limit_pkts ||
- (attr->flags & ~(IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED |
- IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS |
- IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)))
- return -EOPNOTSUPP;
-
- /* Only the ESN value or the MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP can
- * be modified.
- */
- if (!(maction->esp_aes_gcm.ib_flags &
- IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED) &&
- attr->flags & (IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED |
- IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW))
- return -EINVAL;
-
- memcpy(&accel_attrs, &maction->esp_aes_gcm.ctx->attrs,
- sizeof(accel_attrs));
-
- accel_attrs.esn = attr->esn;
- if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)
- accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
- else
- accel_attrs.flags &= ~MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
-
- err = mlx5_accel_esp_modify_xfrm(maction->esp_aes_gcm.ctx,
- &accel_attrs);
- if (err)
- return err;
-
- maction->esp_aes_gcm.ib_flags &=
- ~IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW;
- maction->esp_aes_gcm.ib_flags |=
- attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW;
-
- return 0;
-}
-
static void destroy_flow_action_raw(struct mlx5_ib_flow_action *maction)
{
switch (maction->flow_action_raw.sub_type) {
@@ -1906,13 +1703,6 @@ static int mlx5_ib_destroy_flow_action(struct ib_flow_action *action)
struct mlx5_ib_flow_action *maction = to_mflow_act(action);
switch (action->type) {
- case IB_FLOW_ACTION_ESP:
- /*
- * We only support aes_gcm by now, so we implicitly know this is
- * the underline crypto.
- */
- mlx5_accel_esp_destroy_xfrm(maction->esp_aes_gcm.ctx);
- break;
case IB_FLOW_ACTION_UNSPECIFIED:
destroy_flow_action_raw(maction);
break;
@@ -2709,11 +2499,6 @@ static const struct ib_device_ops flow_ops = {
.destroy_flow_action = mlx5_ib_destroy_flow_action,
};
-static const struct ib_device_ops flow_ipsec_ops = {
- .create_flow_action_esp = mlx5_ib_create_flow_action_esp,
- .modify_flow_action_esp = mlx5_ib_modify_flow_action_esp,
-};
-
int mlx5_ib_fs_init(struct mlx5_ib_dev *dev)
{
dev->flow_db = kzalloc(sizeof(*dev->flow_db), GFP_KERNEL);
@@ -2724,9 +2509,5 @@ int mlx5_ib_fs_init(struct mlx5_ib_dev *dev)
mutex_init(&dev->flow_db->lock);
ib_set_device_ops(&dev->ib_dev, &flow_ops);
- if (mlx5_accel_ipsec_device_caps(dev->mdev) &
- MLX5_ACCEL_IPSEC_CAP_DEVICE)
- ib_set_device_ops(&dev->ib_dev, &flow_ipsec_ops);
-
return 0;
}
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 32a0ea820573..61aa196d6484 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -41,7 +41,6 @@
#include "wr.h"
#include "restrack.h"
#include "counters.h"
-#include <linux/mlx5/accel.h>
#include <rdma/uverbs_std_types.h>
#include <rdma/uverbs_ioctl.h>
#include <rdma/mlx5_user_ioctl_verbs.h>
@@ -906,10 +905,6 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
MLX5_RX_HASH_SRC_PORT_UDP |
MLX5_RX_HASH_DST_PORT_UDP |
MLX5_RX_HASH_INNER;
- if (mlx5_accel_ipsec_device_caps(dev->mdev) &
- MLX5_ACCEL_IPSEC_CAP_DEVICE)
- resp.rss_caps.rx_hash_fields_mask |=
- MLX5_RX_HASH_IPSEC_SPI;
resp.response_length += sizeof(resp.rss_caps);
}
} else {
@@ -1791,23 +1786,6 @@ static int set_ucontext_resp(struct ib_ucontext *uctx,
resp->num_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
MLX5_CAP_GEN(dev->mdev,
num_of_uars_per_page) : 1;
-
- if (mlx5_accel_ipsec_device_caps(dev->mdev) &
- MLX5_ACCEL_IPSEC_CAP_DEVICE) {
- if (mlx5_get_flow_namespace(dev->mdev,
- MLX5_FLOW_NAMESPACE_EGRESS))
- resp->flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM;
- if (mlx5_accel_ipsec_device_caps(dev->mdev) &
- MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA)
- resp->flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_REQ_METADATA;
- if (MLX5_CAP_FLOWTABLE(dev->mdev, flow_table_properties_nic_receive.ft_field_support.outer_esp_spi))
- resp->flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_SPI_STEERING;
- if (mlx5_accel_ipsec_device_caps(dev->mdev) &
- MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN)
- resp->flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_TX_IV_IS_ESN;
- /* MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_FULL_OFFLOAD is currently always 0 */
- }
-
resp->tot_bfregs = bfregi->lib_uar_dyn ? 0 :
bfregi->total_num_bfregs - bfregi->num_dyn_bfregs;
resp->num_ports = dev->num_ports;
@@ -3605,13 +3583,6 @@ DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_UAR,
&UVERBS_METHOD(MLX5_IB_METHOD_UAR_OBJ_DESTROY));
ADD_UVERBS_ATTRIBUTES_SIMPLE(
- mlx5_ib_flow_action,
- UVERBS_OBJECT_FLOW_ACTION,
- UVERBS_METHOD_FLOW_ACTION_ESP_CREATE,
- UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS,
- enum mlx5_ib_uapi_flow_action_flags));
-
-ADD_UVERBS_ATTRIBUTES_SIMPLE(
mlx5_ib_query_context,
UVERBS_OBJECT_DEVICE,
UVERBS_METHOD_QUERY_CONTEXT,
@@ -3628,8 +3599,6 @@ static const struct uapi_definition mlx5_ib_defs[] = {
UAPI_DEF_CHAIN(mlx5_ib_std_types_defs),
UAPI_DEF_CHAIN(mlx5_ib_dm_defs),
- UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION,
- &mlx5_ib_flow_action),
UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DEVICE, &mlx5_ib_query_context),
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_VAR,
UAPI_DEF_IS_OBJ_SUPPORTED(var_is_supported)),
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 956f8e875daa..32ef67e9a6a7 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -574,8 +574,10 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
spin_lock_irq(&ent->lock);
if (ent->disabled)
goto out;
- if (need_delay)
+ if (need_delay) {
queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
+ goto out;
+ }
remove_cache_mr_locked(ent);
queue_adjust_cache_locked(ent);
}
@@ -625,6 +627,7 @@ static void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{
struct mlx5_cache_ent *ent = mr->cache_ent;
+ WRITE_ONCE(dev->cache.last_add, jiffies);
spin_lock_irq(&ent->lock);
list_add_tail(&mr->list, &ent->head);
ent->available_mrs++;
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index ae50b56e8913..8ef112f883a7 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -3190,7 +3190,11 @@ serr_no_r_lock:
spin_lock_irqsave(&sqp->s_lock, flags);
rvt_send_complete(sqp, wqe, send_status);
if (sqp->ibqp.qp_type == IB_QPT_RC) {
- int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
+ int lastwqe;
+
+ spin_lock(&sqp->r_lock);
+ lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
+ spin_unlock(&sqp->r_lock);
sqp->s_flags &= ~RVT_S_BUSY;
spin_unlock_irqrestore(&sqp->s_lock, flags);
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 4aab631ef517..d9cf2820c02e 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -1661,7 +1661,7 @@ static struct iommu_device *omap_iommu_probe_device(struct device *dev)
num_iommus = of_property_count_elems_of_size(dev->of_node, "iommus",
sizeof(phandle));
if (num_iommus < 0)
- return 0;
+ return ERR_PTR(-ENODEV);
arch_data = kcalloc(num_iommus + 1, sizeof(*arch_data), GFP_KERNEL);
if (!arch_data)
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 680d2fcf2686..15edb9a6fcae 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -433,6 +433,7 @@ config QCOM_PDC
config QCOM_MPM
tristate "QCOM MPM"
depends on ARCH_QCOM
+ depends on MAILBOX
select IRQ_DOMAIN_HIERARCHY
help
MSM Power Manager driver to manage and configure wakeup
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index cd772973114a..a0fc764ec9dc 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -3011,18 +3011,12 @@ static int __init allocate_lpi_tables(void)
return 0;
}
-static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set)
+static u64 read_vpend_dirty_clear(void __iomem *vlpi_base)
{
u32 count = 1000000; /* 1s! */
bool clean;
u64 val;
- val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
- val &= ~GICR_VPENDBASER_Valid;
- val &= ~clr;
- val |= set;
- gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
-
do {
val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
clean = !(val & GICR_VPENDBASER_Dirty);
@@ -3033,10 +3027,26 @@ static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set)
}
} while (!clean && count);
- if (unlikely(val & GICR_VPENDBASER_Dirty)) {
+ if (unlikely(!clean))
pr_err_ratelimited("ITS virtual pending table not cleaning\n");
+
+ return val;
+}
+
+static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set)
+{
+ u64 val;
+
+ /* Make sure we wait until the RD is done with the initial scan */
+ val = read_vpend_dirty_clear(vlpi_base);
+ val &= ~GICR_VPENDBASER_Valid;
+ val &= ~clr;
+ val |= set;
+ gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
+
+ val = read_vpend_dirty_clear(vlpi_base);
+ if (unlikely(val & GICR_VPENDBASER_Dirty))
val |= GICR_VPENDBASER_PendingLast;
- }
return val;
}
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 0efe1a9a9f3b..b252d5534547 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -206,11 +206,11 @@ static inline void __iomem *gic_dist_base(struct irq_data *d)
}
}
-static void gic_do_wait_for_rwp(void __iomem *base)
+static void gic_do_wait_for_rwp(void __iomem *base, u32 bit)
{
u32 count = 1000000; /* 1s! */
- while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) {
+ while (readl_relaxed(base + GICD_CTLR) & bit) {
count--;
if (!count) {
pr_err_ratelimited("RWP timeout, gone fishing\n");
@@ -224,13 +224,13 @@ static void gic_do_wait_for_rwp(void __iomem *base)
/* Wait for completion of a distributor change */
static void gic_dist_wait_for_rwp(void)
{
- gic_do_wait_for_rwp(gic_data.dist_base);
+ gic_do_wait_for_rwp(gic_data.dist_base, GICD_CTLR_RWP);
}
/* Wait for completion of a redistributor change */
static void gic_redist_wait_for_rwp(void)
{
- gic_do_wait_for_rwp(gic_data_rdist_rd_base());
+ gic_do_wait_for_rwp(gic_data_rdist_rd_base(), GICR_CTLR_RWP);
}
#ifdef CONFIG_ARM64
@@ -1466,6 +1466,12 @@ static int gic_irq_domain_translate(struct irq_domain *d,
if(fwspec->param_count != 2)
return -EINVAL;
+ if (fwspec->param[0] < 16) {
+ pr_err(FW_BUG "Illegal GSI%d translation request\n",
+ fwspec->param[0]);
+ return -EINVAL;
+ }
+
*hwirq = fwspec->param[0];
*type = fwspec->param[1];
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 58ba835bee1f..09c710ecc387 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -1123,6 +1123,12 @@ static int gic_irq_domain_translate(struct irq_domain *d,
if(fwspec->param_count != 2)
return -EINVAL;
+ if (fwspec->param[0] < 16) {
+ pr_err(FW_BUG "Illegal GSI%d translation request\n",
+ fwspec->param[0]);
+ return -EINVAL;
+ }
+
*hwirq = fwspec->param[0];
*type = fwspec->param[1];
diff --git a/drivers/irqchip/irq-qcom-mpm.c b/drivers/irqchip/irq-qcom-mpm.c
index eea5a753618c..d30614661eea 100644
--- a/drivers/irqchip/irq-qcom-mpm.c
+++ b/drivers/irqchip/irq-qcom-mpm.c
@@ -375,7 +375,7 @@ static int qcom_mpm_init(struct device_node *np, struct device_node *parent)
raw_spin_lock_init(&priv->lock);
priv->base = devm_platform_ioremap_resource(pdev, 0);
- if (!priv->base)
+ if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
for (i = 0; i < priv->reg_stride; i++) {
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index ad2d5faa2ebb..36ae30b73a6e 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -4399,6 +4399,7 @@ try_smaller_buffer:
}
if (ic->internal_hash) {
+ size_t recalc_tags_size;
ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1);
if (!ic->recalc_wq ) {
ti->error = "Cannot allocate workqueue";
@@ -4412,8 +4413,10 @@ try_smaller_buffer:
r = -ENOMEM;
goto bad;
}
- ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block,
- ic->tag_size, GFP_KERNEL);
+ recalc_tags_size = (RECALC_SECTORS >> ic->sb->log2_sectors_per_block) * ic->tag_size;
+ if (crypto_shash_digestsize(ic->internal_hash) > ic->tag_size)
+ recalc_tags_size += crypto_shash_digestsize(ic->internal_hash) - ic->tag_size;
+ ic->recalc_tags = kvmalloc(recalc_tags_size, GFP_KERNEL);
if (!ic->recalc_tags) {
ti->error = "Cannot allocate tags for recalculating";
r = -ENOMEM;
diff --git a/drivers/md/dm-ps-historical-service-time.c b/drivers/md/dm-ps-historical-service-time.c
index 875bca30a0dd..82f2a06153dc 100644
--- a/drivers/md/dm-ps-historical-service-time.c
+++ b/drivers/md/dm-ps-historical-service-time.c
@@ -27,7 +27,6 @@
#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/sched/clock.h>
#define DM_MSG_PREFIX "multipath historical-service-time"
@@ -433,7 +432,7 @@ static struct dm_path *hst_select_path(struct path_selector *ps,
{
struct selector *s = ps->context;
struct path_info *pi = NULL, *best = NULL;
- u64 time_now = sched_clock();
+ u64 time_now = ktime_get_ns();
struct dm_path *ret = NULL;
unsigned long flags;
@@ -474,7 +473,7 @@ static int hst_start_io(struct path_selector *ps, struct dm_path *path,
static u64 path_service_time(struct path_info *pi, u64 start_time)
{
- u64 sched_now = ktime_get_ns();
+ u64 now = ktime_get_ns();
/* if a previous disk request has finished after this IO was
* sent to the hardware, pretend the submission happened
@@ -483,11 +482,11 @@ static u64 path_service_time(struct path_info *pi, u64 start_time)
if (time_after64(pi->last_finish, start_time))
start_time = pi->last_finish;
- pi->last_finish = sched_now;
- if (time_before64(sched_now, start_time))
+ pi->last_finish = now;
+ if (time_before64(now, start_time))
return 0;
- return sched_now - start_time;
+ return now - start_time;
}
static int hst_end_io(struct path_selector *ps, struct dm_path *path,
diff --git a/drivers/md/dm-zone.c b/drivers/md/dm-zone.c
index c1ca9be4b79e..57daa86c19cf 100644
--- a/drivers/md/dm-zone.c
+++ b/drivers/md/dm-zone.c
@@ -360,16 +360,20 @@ static int dm_update_zone_wp_offset(struct mapped_device *md, unsigned int zno,
return 0;
}
+struct orig_bio_details {
+ unsigned int op;
+ unsigned int nr_sectors;
+};
+
/*
* First phase of BIO mapping for targets with zone append emulation:
* check all BIO that change a zone writer pointer and change zone
* append operations into regular write operations.
*/
static bool dm_zone_map_bio_begin(struct mapped_device *md,
- struct bio *orig_bio, struct bio *clone)
+ unsigned int zno, struct bio *clone)
{
sector_t zsectors = blk_queue_zone_sectors(md->queue);
- unsigned int zno = bio_zone_no(orig_bio);
unsigned int zwp_offset = READ_ONCE(md->zwp_offset[zno]);
/*
@@ -384,7 +388,7 @@ static bool dm_zone_map_bio_begin(struct mapped_device *md,
WRITE_ONCE(md->zwp_offset[zno], zwp_offset);
}
- switch (bio_op(orig_bio)) {
+ switch (bio_op(clone)) {
case REQ_OP_ZONE_RESET:
case REQ_OP_ZONE_FINISH:
return true;
@@ -401,9 +405,8 @@ static bool dm_zone_map_bio_begin(struct mapped_device *md,
* target zone.
*/
clone->bi_opf = REQ_OP_WRITE | REQ_NOMERGE |
- (orig_bio->bi_opf & (~REQ_OP_MASK));
- clone->bi_iter.bi_sector =
- orig_bio->bi_iter.bi_sector + zwp_offset;
+ (clone->bi_opf & (~REQ_OP_MASK));
+ clone->bi_iter.bi_sector += zwp_offset;
break;
default:
DMWARN_LIMIT("Invalid BIO operation");
@@ -423,11 +426,10 @@ static bool dm_zone_map_bio_begin(struct mapped_device *md,
* data written to a zone. Note that at this point, the remapped clone BIO
* may already have completed, so we do not touch it.
*/
-static blk_status_t dm_zone_map_bio_end(struct mapped_device *md,
- struct bio *orig_bio,
+static blk_status_t dm_zone_map_bio_end(struct mapped_device *md, unsigned int zno,
+ struct orig_bio_details *orig_bio_details,
unsigned int nr_sectors)
{
- unsigned int zno = bio_zone_no(orig_bio);
unsigned int zwp_offset = READ_ONCE(md->zwp_offset[zno]);
/* The clone BIO may already have been completed and failed */
@@ -435,7 +437,7 @@ static blk_status_t dm_zone_map_bio_end(struct mapped_device *md,
return BLK_STS_IOERR;
/* Update the zone wp offset */
- switch (bio_op(orig_bio)) {
+ switch (orig_bio_details->op) {
case REQ_OP_ZONE_RESET:
WRITE_ONCE(md->zwp_offset[zno], 0);
return BLK_STS_OK;
@@ -452,7 +454,7 @@ static blk_status_t dm_zone_map_bio_end(struct mapped_device *md,
* Check that the target did not truncate the write operation
* emulating a zone append.
*/
- if (nr_sectors != bio_sectors(orig_bio)) {
+ if (nr_sectors != orig_bio_details->nr_sectors) {
DMWARN_LIMIT("Truncated write for zone append");
return BLK_STS_IOERR;
}
@@ -488,7 +490,7 @@ static inline void dm_zone_unlock(struct request_queue *q,
bio_clear_flag(clone, BIO_ZONE_WRITE_LOCKED);
}
-static bool dm_need_zone_wp_tracking(struct bio *orig_bio)
+static bool dm_need_zone_wp_tracking(struct bio *bio)
{
/*
* Special processing is not needed for operations that do not need the
@@ -496,15 +498,15 @@ static bool dm_need_zone_wp_tracking(struct bio *orig_bio)
* zones and all operations that do not modify directly a sequential
* zone write pointer.
*/
- if (op_is_flush(orig_bio->bi_opf) && !bio_sectors(orig_bio))
+ if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
return false;
- switch (bio_op(orig_bio)) {
+ switch (bio_op(bio)) {
case REQ_OP_WRITE_ZEROES:
case REQ_OP_WRITE:
case REQ_OP_ZONE_RESET:
case REQ_OP_ZONE_FINISH:
case REQ_OP_ZONE_APPEND:
- return bio_zone_is_seq(orig_bio);
+ return bio_zone_is_seq(bio);
default:
return false;
}
@@ -519,8 +521,8 @@ int dm_zone_map_bio(struct dm_target_io *tio)
struct dm_target *ti = tio->ti;
struct mapped_device *md = io->md;
struct request_queue *q = md->queue;
- struct bio *orig_bio = io->orig_bio;
struct bio *clone = &tio->clone;
+ struct orig_bio_details orig_bio_details;
unsigned int zno;
blk_status_t sts;
int r;
@@ -529,18 +531,21 @@ int dm_zone_map_bio(struct dm_target_io *tio)
* IOs that do not change a zone write pointer do not need
* any additional special processing.
*/
- if (!dm_need_zone_wp_tracking(orig_bio))
+ if (!dm_need_zone_wp_tracking(clone))
return ti->type->map(ti, clone);
/* Lock the target zone */
- zno = bio_zone_no(orig_bio);
+ zno = bio_zone_no(clone);
dm_zone_lock(q, zno, clone);
+ orig_bio_details.nr_sectors = bio_sectors(clone);
+ orig_bio_details.op = bio_op(clone);
+
/*
* Check that the bio and the target zone write pointer offset are
* both valid, and if the bio is a zone append, remap it to a write.
*/
- if (!dm_zone_map_bio_begin(md, orig_bio, clone)) {
+ if (!dm_zone_map_bio_begin(md, zno, clone)) {
dm_zone_unlock(q, zno, clone);
return DM_MAPIO_KILL;
}
@@ -560,7 +565,8 @@ int dm_zone_map_bio(struct dm_target_io *tio)
* The target submitted the clone BIO. The target zone will
* be unlocked on completion of the clone.
*/
- sts = dm_zone_map_bio_end(md, orig_bio, *tio->len_ptr);
+ sts = dm_zone_map_bio_end(md, zno, &orig_bio_details,
+ *tio->len_ptr);
break;
case DM_MAPIO_REMAPPED:
/*
@@ -568,7 +574,8 @@ int dm_zone_map_bio(struct dm_target_io *tio)
* unlock the target zone here as the clone will not be
* submitted.
*/
- sts = dm_zone_map_bio_end(md, orig_bio, *tio->len_ptr);
+ sts = dm_zone_map_bio_end(md, zno, &orig_bio_details,
+ *tio->len_ptr);
if (sts != BLK_STS_OK)
dm_zone_unlock(q, zno, clone);
break;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 3c5fad7c4ee6..82957bd460e8 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1323,8 +1323,7 @@ static void __map_bio(struct bio *clone)
}
static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
- struct dm_target *ti, unsigned num_bios,
- unsigned *len)
+ struct dm_target *ti, unsigned num_bios)
{
struct bio *bio;
int try;
@@ -1335,7 +1334,7 @@ static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
if (try)
mutex_lock(&ci->io->md->table_devices_lock);
for (bio_nr = 0; bio_nr < num_bios; bio_nr++) {
- bio = alloc_tio(ci, ti, bio_nr, len,
+ bio = alloc_tio(ci, ti, bio_nr, NULL,
try ? GFP_NOIO : GFP_NOWAIT);
if (!bio)
break;
@@ -1363,11 +1362,11 @@ static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
break;
case 1:
clone = alloc_tio(ci, ti, 0, len, GFP_NOIO);
- dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO);
__map_bio(clone);
break;
default:
- alloc_multiple_bios(&blist, ci, ti, num_bios, len);
+ /* dm_accept_partial_bio() is not supported with shared tio->len_ptr */
+ alloc_multiple_bios(&blist, ci, ti, num_bios);
while ((clone = bio_list_pop(&blist))) {
dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO);
__map_bio(clone);
@@ -1392,6 +1391,7 @@ static void __send_empty_flush(struct clone_info *ci)
ci->bio = &flush_bio;
ci->sector_count = 0;
+ ci->io->tio.clone.bi_iter.bi_size = 0;
while ((ti = dm_table_get_target(ci->map, target_nr++)))
__send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
@@ -1407,14 +1407,10 @@ static void __send_changing_extent_only(struct clone_info *ci, struct dm_target
len = min_t(sector_t, ci->sector_count,
max_io_len_target_boundary(ti, dm_target_offset(ti, ci->sector)));
- /*
- * dm_accept_partial_bio cannot be used with duplicate bios,
- * so update clone_info cursor before __send_duplicate_bios().
- */
+ __send_duplicate_bios(ci, ti, num_bios, &len);
+
ci->sector += len;
ci->sector_count -= len;
-
- __send_duplicate_bios(ci, ti, num_bios, &len);
}
static bool is_abnormal_io(struct bio *bio)
diff --git a/drivers/media/platform/nxp/Kconfig b/drivers/media/platform/nxp/Kconfig
index 28f2bafc14d2..5afa373e534f 100644
--- a/drivers/media/platform/nxp/Kconfig
+++ b/drivers/media/platform/nxp/Kconfig
@@ -7,6 +7,7 @@ comment "NXP media platform drivers"
config VIDEO_IMX_MIPI_CSIS
tristate "NXP MIPI CSI-2 CSIS receiver found on i.MX7 and i.MX8 models"
depends on ARCH_MXC || COMPILE_TEST
+ depends on VIDEO_DEV
select MEDIA_CONTROLLER
select V4L2_FWNODE
select VIDEO_V4L2_SUBDEV_API
diff --git a/drivers/media/platform/rockchip/rga/rga.c b/drivers/media/platform/rockchip/rga/rga.c
index 4de5e8d2b261..3d3d1062e212 100644
--- a/drivers/media/platform/rockchip/rga/rga.c
+++ b/drivers/media/platform/rockchip/rga/rga.c
@@ -892,7 +892,7 @@ static int rga_probe(struct platform_device *pdev)
}
rga->dst_mmu_pages =
(unsigned int *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 3);
- if (rga->dst_mmu_pages) {
+ if (!rga->dst_mmu_pages) {
ret = -ENOMEM;
goto free_src_pages;
}
diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c
index 47029746b89e..0de587b412d4 100644
--- a/drivers/media/tuners/si2157.c
+++ b/drivers/media/tuners/si2157.c
@@ -77,16 +77,16 @@ err_mutex_unlock:
}
static const struct si2157_tuner_info si2157_tuners[] = {
- { SI2141, false, 0x60, SI2141_60_FIRMWARE, SI2141_A10_FIRMWARE },
- { SI2141, false, 0x61, SI2141_61_FIRMWARE, SI2141_A10_FIRMWARE },
- { SI2146, false, 0x11, SI2146_11_FIRMWARE, NULL },
- { SI2147, false, 0x50, SI2147_50_FIRMWARE, NULL },
- { SI2148, true, 0x32, SI2148_32_FIRMWARE, SI2158_A20_FIRMWARE },
- { SI2148, true, 0x33, SI2148_33_FIRMWARE, SI2158_A20_FIRMWARE },
- { SI2157, false, 0x50, SI2157_50_FIRMWARE, SI2157_A30_FIRMWARE },
- { SI2158, false, 0x50, SI2158_50_FIRMWARE, SI2158_A20_FIRMWARE },
- { SI2158, false, 0x51, SI2158_51_FIRMWARE, SI2158_A20_FIRMWARE },
- { SI2177, false, 0x50, SI2177_50_FIRMWARE, SI2157_A30_FIRMWARE },
+ { SI2141, 0x60, false, SI2141_60_FIRMWARE, SI2141_A10_FIRMWARE },
+ { SI2141, 0x61, false, SI2141_61_FIRMWARE, SI2141_A10_FIRMWARE },
+ { SI2146, 0x11, false, SI2146_11_FIRMWARE, NULL },
+ { SI2147, 0x50, false, SI2147_50_FIRMWARE, NULL },
+ { SI2148, 0x32, true, SI2148_32_FIRMWARE, SI2158_A20_FIRMWARE },
+ { SI2148, 0x33, true, SI2148_33_FIRMWARE, SI2158_A20_FIRMWARE },
+ { SI2157, 0x50, false, SI2157_50_FIRMWARE, SI2157_A30_FIRMWARE },
+ { SI2158, 0x50, false, SI2158_50_FIRMWARE, SI2158_A20_FIRMWARE },
+ { SI2158, 0x51, false, SI2158_51_FIRMWARE, SI2158_A20_FIRMWARE },
+ { SI2177, 0x50, false, SI2177_50_FIRMWARE, SI2157_A30_FIRMWARE },
};
static int si2157_load_firmware(struct dvb_frontend *fe,
@@ -178,7 +178,7 @@ static int si2157_find_and_load_firmware(struct dvb_frontend *fe)
}
}
- if (!fw_name && !fw_alt_name) {
+ if (required && !fw_name && !fw_alt_name) {
dev_err(&client->dev,
"unknown chip version Si21%d-%c%c%c ROM 0x%02x\n",
part_id, cmd.args[1], cmd.args[3], cmd.args[4], rom_id);
diff --git a/drivers/memory/atmel-ebi.c b/drivers/memory/atmel-ebi.c
index c267283b01fd..e749dcb3ddea 100644
--- a/drivers/memory/atmel-ebi.c
+++ b/drivers/memory/atmel-ebi.c
@@ -544,20 +544,27 @@ static int atmel_ebi_probe(struct platform_device *pdev)
smc_np = of_parse_phandle(dev->of_node, "atmel,smc", 0);
ebi->smc.regmap = syscon_node_to_regmap(smc_np);
- if (IS_ERR(ebi->smc.regmap))
- return PTR_ERR(ebi->smc.regmap);
+ if (IS_ERR(ebi->smc.regmap)) {
+ ret = PTR_ERR(ebi->smc.regmap);
+ goto put_node;
+ }
ebi->smc.layout = atmel_hsmc_get_reg_layout(smc_np);
- if (IS_ERR(ebi->smc.layout))
- return PTR_ERR(ebi->smc.layout);
+ if (IS_ERR(ebi->smc.layout)) {
+ ret = PTR_ERR(ebi->smc.layout);
+ goto put_node;
+ }
ebi->smc.clk = of_clk_get(smc_np, 0);
if (IS_ERR(ebi->smc.clk)) {
- if (PTR_ERR(ebi->smc.clk) != -ENOENT)
- return PTR_ERR(ebi->smc.clk);
+ if (PTR_ERR(ebi->smc.clk) != -ENOENT) {
+ ret = PTR_ERR(ebi->smc.clk);
+ goto put_node;
+ }
ebi->smc.clk = NULL;
}
+ of_node_put(smc_np);
ret = clk_prepare_enable(ebi->smc.clk);
if (ret)
return ret;
@@ -608,6 +615,10 @@ static int atmel_ebi_probe(struct platform_device *pdev)
}
return of_platform_populate(np, NULL, NULL, dev);
+
+put_node:
+ of_node_put(smc_np);
+ return ret;
}
static __maybe_unused int atmel_ebi_resume(struct device *dev)
diff --git a/drivers/memory/fsl_ifc.c b/drivers/memory/fsl_ifc.c
index 2f6939da21cd..e83b61c925a4 100644
--- a/drivers/memory/fsl_ifc.c
+++ b/drivers/memory/fsl_ifc.c
@@ -287,8 +287,7 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev)
}
/* legacy dts may still use "simple-bus" compatible */
- ret = of_platform_populate(dev->dev.of_node, NULL, NULL,
- &dev->dev);
+ ret = of_platform_default_populate(dev->dev.of_node, NULL, &dev->dev);
if (ret)
goto err_free_nandirq;
diff --git a/drivers/memory/renesas-rpc-if.c b/drivers/memory/renesas-rpc-if.c
index e4cc64f56019..2e545f473cc6 100644
--- a/drivers/memory/renesas-rpc-if.c
+++ b/drivers/memory/renesas-rpc-if.c
@@ -651,6 +651,7 @@ static int rpcif_probe(struct platform_device *pdev)
struct platform_device *vdev;
struct device_node *flash;
const char *name;
+ int ret;
flash = of_get_next_child(pdev->dev.of_node, NULL);
if (!flash) {
@@ -674,7 +675,14 @@ static int rpcif_probe(struct platform_device *pdev)
return -ENOMEM;
vdev->dev.parent = &pdev->dev;
platform_set_drvdata(pdev, vdev);
- return platform_device_add(vdev);
+
+ ret = platform_device_add(vdev);
+ if (ret) {
+ platform_device_put(vdev);
+ return ret;
+ }
+
+ return 0;
}
static int rpcif_remove(struct platform_device *pdev)
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index e90adfa57950..9b3ba2df71c7 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -6658,13 +6658,13 @@ static int mpt_summary_proc_show(struct seq_file *m, void *v)
static int mpt_version_proc_show(struct seq_file *m, void *v)
{
u8 cb_idx;
- int scsi, fc, sas, lan, ctl, targ, dmp;
+ int scsi, fc, sas, lan, ctl, targ;
char *drvname;
seq_printf(m, "%s-%s\n", "mptlinux", MPT_LINUX_VERSION_COMMON);
seq_printf(m, " Fusion MPT base driver\n");
- scsi = fc = sas = lan = ctl = targ = dmp = 0;
+ scsi = fc = sas = lan = ctl = targ = 0;
for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
drvname = NULL;
if (MptCallbacks[cb_idx]) {
diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c
index e008d82e4ba3..a13506dd8119 100644
--- a/drivers/misc/habanalabs/common/memory.c
+++ b/drivers/misc/habanalabs/common/memory.c
@@ -111,10 +111,10 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
if (contiguous) {
if (is_power_of_2(page_size))
- paddr = (u64) (uintptr_t) gen_pool_dma_alloc_align(vm->dram_pg_pool,
- total_size, NULL, page_size);
+ paddr = (uintptr_t) gen_pool_dma_alloc_align(vm->dram_pg_pool,
+ total_size, NULL, page_size);
else
- paddr = (u64) (uintptr_t) gen_pool_alloc(vm->dram_pg_pool, total_size);
+ paddr = gen_pool_alloc(vm->dram_pg_pool, total_size);
if (!paddr) {
dev_err(hdev->dev,
"failed to allocate %llu contiguous pages with total size of %llu\n",
@@ -150,12 +150,12 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
for (i = 0 ; i < num_pgs ; i++) {
if (is_power_of_2(page_size))
phys_pg_pack->pages[i] =
- (u64) gen_pool_dma_alloc_align(vm->dram_pg_pool,
- page_size, NULL,
- page_size);
+ (uintptr_t)gen_pool_dma_alloc_align(vm->dram_pg_pool,
+ page_size, NULL,
+ page_size);
else
- phys_pg_pack->pages[i] = (u64) gen_pool_alloc(vm->dram_pg_pool,
- page_size);
+ phys_pg_pack->pages[i] = gen_pool_alloc(vm->dram_pg_pool,
+ page_size);
if (!phys_pg_pack->pages[i]) {
dev_err(hdev->dev,
"Failed to allocate device memory (out of memory)\n");
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 4e67c1403cc9..506dc900f5c7 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -993,7 +993,7 @@ static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
return -EEXIST;
md->reset_done |= type;
- err = mmc_hw_reset(host);
+ err = mmc_hw_reset(host->card);
/* Ensure we switch back to the correct partition */
if (err) {
struct mmc_blk_data *main_md =
@@ -1880,6 +1880,31 @@ static inline bool mmc_blk_rq_error(struct mmc_blk_request *brq)
brq->data.error || brq->cmd.resp[0] & CMD_ERRORS;
}
+static int mmc_spi_err_check(struct mmc_card *card)
+{
+ u32 status = 0;
+ int err;
+
+ /*
+ * SPI does not have a TRAN state we have to wait on, instead the
+ * card is ready again when it no longer holds the line LOW.
+ * We still have to ensure two things here before we know the write
+ * was successful:
+ * 1. The card has not disconnected during busy and we actually read our
+ * own pull-up, thinking it was still connected, so ensure it
+ * still responds.
+ * 2. Check for any error bits, in particular R1_SPI_IDLE to catch a
+ * just reconnected card after being disconnected during busy.
+ */
+ err = __mmc_send_status(card, &status, 0);
+ if (err)
+ return err;
+ /* All R1 and R2 bits of SPI are errors in our case */
+ if (status)
+ return -EIO;
+ return 0;
+}
+
static int mmc_blk_busy_cb(void *cb_data, bool *busy)
{
struct mmc_blk_busy_data *data = cb_data;
@@ -1903,9 +1928,16 @@ static int mmc_blk_card_busy(struct mmc_card *card, struct request *req)
struct mmc_blk_busy_data cb_data;
int err;
- if (mmc_host_is_spi(card->host) || rq_data_dir(req) == READ)
+ if (rq_data_dir(req) == READ)
return 0;
+ if (mmc_host_is_spi(card->host)) {
+ err = mmc_spi_err_check(card);
+ if (err)
+ mqrq->brq.data.bytes_xfered = 0;
+ return err;
+ }
+
cb_data.card = card;
cb_data.status = 0;
err = __mmc_poll_for_busy(card->host, 0, MMC_BLK_TIMEOUT_MS,
@@ -2350,6 +2382,8 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
struct mmc_blk_data *md;
int devidx, ret;
char cap_str[10];
+ bool cache_enabled = false;
+ bool fua_enabled = false;
devidx = ida_simple_get(&mmc_blk_ida, 0, max_devices, GFP_KERNEL);
if (devidx < 0) {
@@ -2429,13 +2463,17 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
md->flags |= MMC_BLK_CMD23;
}
- if (mmc_card_mmc(card) &&
- md->flags & MMC_BLK_CMD23 &&
+ if (md->flags & MMC_BLK_CMD23 &&
((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
card->ext_csd.rel_sectors)) {
md->flags |= MMC_BLK_REL_WR;
- blk_queue_write_cache(md->queue.queue, true, true);
+ fua_enabled = true;
+ cache_enabled = true;
}
+ if (mmc_cache_enabled(card->host))
+ cache_enabled = true;
+
+ blk_queue_write_cache(md->queue.queue, cache_enabled, fua_enabled);
string_get_size((u64)size, 512, STRING_UNITS_2,
cap_str, sizeof(cap_str));
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 368f10405e13..c6ae16d40766 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1995,7 +1995,7 @@ static void mmc_hw_reset_for_init(struct mmc_host *host)
/**
* mmc_hw_reset - reset the card in hardware
- * @host: MMC host to which the card is attached
+ * @card: card to be reset
*
* Hard reset the card. This function is only for upper layers, like the
* block layer or card drivers. You cannot use it in host drivers (struct
@@ -2003,8 +2003,9 @@ static void mmc_hw_reset_for_init(struct mmc_host *host)
*
* Return: 0 on success, -errno on failure
*/
-int mmc_hw_reset(struct mmc_host *host)
+int mmc_hw_reset(struct mmc_card *card)
{
+ struct mmc_host *host = card->host;
int ret;
ret = host->bus_ops->hw_reset(host);
diff --git a/drivers/mmc/core/mmc_test.c b/drivers/mmc/core/mmc_test.c
index e6a2fd2c6d5c..8d9bceeff986 100644
--- a/drivers/mmc/core/mmc_test.c
+++ b/drivers/mmc/core/mmc_test.c
@@ -2325,10 +2325,9 @@ static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test)
static int mmc_test_reset(struct mmc_test_card *test)
{
struct mmc_card *card = test->card;
- struct mmc_host *host = card->host;
int err;
- err = mmc_hw_reset(host);
+ err = mmc_hw_reset(card);
if (!err) {
/*
* Reset will re-enable the card's command queue, but tests
diff --git a/drivers/mmc/host/mmci_stm32_sdmmc.c b/drivers/mmc/host/mmci_stm32_sdmmc.c
index 9c13f2c31365..4566d7fc9055 100644
--- a/drivers/mmc/host/mmci_stm32_sdmmc.c
+++ b/drivers/mmc/host/mmci_stm32_sdmmc.c
@@ -62,8 +62,8 @@ static int sdmmc_idma_validate_data(struct mmci_host *host,
* excepted the last element which has no constraint on idmasize
*/
for_each_sg(data->sg, sg, data->sg_len - 1, i) {
- if (!IS_ALIGNED(data->sg->offset, sizeof(u32)) ||
- !IS_ALIGNED(data->sg->length, SDMMC_IDMA_BURST)) {
+ if (!IS_ALIGNED(sg->offset, sizeof(u32)) ||
+ !IS_ALIGNED(sg->length, SDMMC_IDMA_BURST)) {
dev_err(mmc_dev(host->mmc),
"unaligned scatterlist: ofst:%x length:%d\n",
data->sg->offset, data->sg->length);
@@ -71,7 +71,7 @@ static int sdmmc_idma_validate_data(struct mmci_host *host,
}
}
- if (!IS_ALIGNED(data->sg->offset, sizeof(u32))) {
+ if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
dev_err(mmc_dev(host->mmc),
"unaligned last scatterlist: ofst:%x length:%d\n",
data->sg->offset, data->sg->length);
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index 2797a9c0f17d..ddb5ca2f559e 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -144,9 +144,9 @@ static unsigned int renesas_sdhi_clk_update(struct tmio_mmc_host *host,
return clk_get_rate(priv->clk);
if (priv->clkh) {
+ /* HS400 with 4TAP needs different clock settings */
bool use_4tap = priv->quirks && priv->quirks->hs400_4taps;
- bool need_slow_clkh = (host->mmc->ios.timing == MMC_TIMING_UHS_SDR104) ||
- (host->mmc->ios.timing == MMC_TIMING_MMC_HS400);
+ bool need_slow_clkh = host->mmc->ios.timing == MMC_TIMING_MMC_HS400;
clkh_shift = use_4tap && need_slow_clkh ? 1 : 2;
ref_clk = priv->clkh;
}
@@ -396,10 +396,10 @@ static void renesas_sdhi_hs400_complete(struct mmc_host *mmc)
SH_MOBILE_SDHI_SCC_TMPPORT2_HS400OSEL) |
sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT2));
- /* Set the sampling clock selection range of HS400 mode */
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DTCNTL,
SH_MOBILE_SDHI_SCC_DTCNTL_TAPEN |
- 0x4 << SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_SHIFT);
+ sd_scc_read32(host, priv,
+ SH_MOBILE_SDHI_SCC_DTCNTL));
/* Avoid bad TAP */
if (bad_taps & BIT(priv->tap_set)) {
diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c
index 666cee4c7f7c..08e838400b52 100644
--- a/drivers/mmc/host/sdhci-xenon.c
+++ b/drivers/mmc/host/sdhci-xenon.c
@@ -241,16 +241,6 @@ static void xenon_voltage_switch(struct sdhci_host *host)
{
/* Wait for 5ms after set 1.8V signal enable bit */
usleep_range(5000, 5500);
-
- /*
- * For some reason the controller's Host Control2 register reports
- * the bit representing 1.8V signaling as 0 when read after it was
- * written as 1. Subsequent read reports 1.
- *
- * Since this may cause some issues, do an empty read of the Host
- * Control2 register here to circumvent this.
- */
- sdhci_readw(host, SDHCI_HOST_CONTROL2);
}
static unsigned int xenon_get_max_clock(struct sdhci_host *host)
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 15eddca7b4b6..c9e75a9de282 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4027,14 +4027,19 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb, const v
return true;
}
-static u32 bond_ip_hash(u32 hash, struct flow_keys *flow)
+static u32 bond_ip_hash(u32 hash, struct flow_keys *flow, int xmit_policy)
{
hash ^= (__force u32)flow_get_u32_dst(flow) ^
(__force u32)flow_get_u32_src(flow);
hash ^= (hash >> 16);
hash ^= (hash >> 8);
+
/* discard lowest hash bit to deal with the common even ports pattern */
- return hash >> 1;
+ if (xmit_policy == BOND_XMIT_POLICY_LAYER34 ||
+ xmit_policy == BOND_XMIT_POLICY_ENCAP34)
+ return hash >> 1;
+
+ return hash;
}
/* Generate hash based on xmit policy. If @skb is given it is used to linearize
@@ -4064,7 +4069,7 @@ static u32 __bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, const voi
memcpy(&hash, &flow.ports.ports, sizeof(hash));
}
- return bond_ip_hash(hash, &flow);
+ return bond_ip_hash(hash, &flow, bond->params.xmit_policy);
}
/**
@@ -5221,7 +5226,7 @@ static void bond_sk_to_flow(struct sock *sk, struct flow_keys *flow)
switch (sk->sk_family) {
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
- if (sk->sk_ipv6only ||
+ if (ipv6_only_sock(sk) ||
ipv6_addr_type(&sk->sk_v6_daddr) != IPV6_ADDR_MAPPED) {
flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
flow->addrs.v6addrs.src = inet6_sk(sk)->saddr;
@@ -5259,7 +5264,7 @@ static u32 bond_sk_hash_l34(struct sock *sk)
/* L4 */
memcpy(&hash, &flow.ports.ports, sizeof(hash));
/* L3 */
- return bond_ip_hash(hash, &flow);
+ return bond_ip_hash(hash, &flow, BOND_XMIT_POLICY_LAYER34);
}
static struct net_device *__bond_sk_get_lower_dev(struct bonding *bond,
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index fff259247d52..ac760fd39282 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -170,6 +170,7 @@ config PCH_CAN
source "drivers/net/can/c_can/Kconfig"
source "drivers/net/can/cc770/Kconfig"
+source "drivers/net/can/ctucanfd/Kconfig"
source "drivers/net/can/ifi_canfd/Kconfig"
source "drivers/net/can/m_can/Kconfig"
source "drivers/net/can/mscan/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 1e660afcb61b..0af85983634c 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -16,6 +16,7 @@ obj-y += softing/
obj-$(CONFIG_CAN_AT91) += at91_can.o
obj-$(CONFIG_CAN_CC770) += cc770/
obj-$(CONFIG_CAN_C_CAN) += c_can/
+obj-$(CONFIG_CAN_CTUCANFD) += ctucanfd/
obj-$(CONFIG_CAN_FLEXCAN) += flexcan/
obj-$(CONFIG_CAN_GRCAN) += grcan.o
obj-$(CONFIG_CAN_IFI_CANFD) += ifi_canfd/
diff --git a/drivers/net/can/ctucanfd/Kconfig b/drivers/net/can/ctucanfd/Kconfig
new file mode 100644
index 000000000000..48963efc7f19
--- /dev/null
+++ b/drivers/net/can/ctucanfd/Kconfig
@@ -0,0 +1,34 @@
+config CAN_CTUCANFD
+ tristate "CTU CAN-FD IP core"
+ help
+ This driver adds support for the CTU CAN FD open-source IP core.
+ More documentation and core sources at project page
+ (https://gitlab.fel.cvut.cz/canbus/ctucanfd_ip_core).
+ The core integration to Xilinx Zynq system as platform driver
+ is available (https://gitlab.fel.cvut.cz/canbus/zynq/zynq-can-sja1000-top).
+ Implementation on Intel FPGA-based PCI Express board is available
+ from project (https://gitlab.fel.cvut.cz/canbus/pcie-ctucanfd) and
+ on Intel SoC from project (https://gitlab.fel.cvut.cz/canbus/intel-soc-ctucanfd).
+ Guidepost CTU FEE CAN bus projects page https://canbus.pages.fel.cvut.cz/ .
+
+config CAN_CTUCANFD_PCI
+ tristate "CTU CAN-FD IP core PCI/PCIe driver"
+ depends on CAN_CTUCANFD
+ depends on PCI
+ help
+ This driver adds PCI/PCIe support for CTU CAN-FD IP core.
+ The project providing FPGA design for Intel EP4CGX15 based DB4CGX15
+ PCIe board with PiKRON.com designed transceiver riser shield is available
+ at https://gitlab.fel.cvut.cz/canbus/pcie-ctucanfd .
+
+config CAN_CTUCANFD_PLATFORM
+ tristate "CTU CAN-FD IP core platform (FPGA, SoC) driver"
+ depends on CAN_CTUCANFD
+ depends on OF || COMPILE_TEST
+ help
+ The core has been tested together with OpenCores SJA1000
+ modified to be CAN FD frames tolerant on MicroZed Zynq based
+ MZ_APO education kits designed by Petr Porazil from PiKRON.com
+ company. FPGA design https://gitlab.fel.cvut.cz/canbus/zynq/zynq-can-sja1000-top.
+ The kit description at the Computer Architectures course pages
+ https://cw.fel.cvut.cz/wiki/courses/b35apo/documentation/mz_apo/start .
diff --git a/drivers/net/can/ctucanfd/Makefile b/drivers/net/can/ctucanfd/Makefile
new file mode 100644
index 000000000000..8078f1f2c30f
--- /dev/null
+++ b/drivers/net/can/ctucanfd/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Makefile for the CTU CAN-FD IP module drivers
+#
+
+obj-$(CONFIG_CAN_CTUCANFD) := ctucanfd.o
+ctucanfd-y := ctucanfd_base.o
+
+obj-$(CONFIG_CAN_CTUCANFD_PCI) += ctucanfd_pci.o
+obj-$(CONFIG_CAN_CTUCANFD_PLATFORM) += ctucanfd_platform.o
diff --git a/drivers/net/can/ctucanfd/ctucanfd.h b/drivers/net/can/ctucanfd/ctucanfd.h
new file mode 100644
index 000000000000..0e9904f6a05d
--- /dev/null
+++ b/drivers/net/can/ctucanfd/ctucanfd.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*******************************************************************************
+ *
+ * CTU CAN FD IP Core
+ *
+ * Copyright (C) 2015-2018 Ondrej Ille <ondrej.ille@gmail.com> FEE CTU
+ * Copyright (C) 2018-2021 Ondrej Ille <ondrej.ille@gmail.com> self-funded
+ * Copyright (C) 2018-2019 Martin Jerabek <martin.jerabek01@gmail.com> FEE CTU
+ * Copyright (C) 2018-2021 Pavel Pisa <pisa@cmp.felk.cvut.cz> FEE CTU/self-funded
+ *
+ * Project advisors:
+ * Jiri Novak <jnovak@fel.cvut.cz>
+ * Pavel Pisa <pisa@cmp.felk.cvut.cz>
+ *
+ * Department of Measurement (http://meas.fel.cvut.cz/)
+ * Faculty of Electrical Engineering (http://www.fel.cvut.cz)
+ * Czech Technical University (http://www.cvut.cz/)
+ ******************************************************************************/
+
+#ifndef __CTUCANFD__
+#define __CTUCANFD__
+
+#include <linux/netdevice.h>
+#include <linux/can/dev.h>
+#include <linux/list.h>
+
+enum ctu_can_fd_can_registers;
+
+struct ctucan_priv {
+ struct can_priv can; /* must be first member! */
+
+ void __iomem *mem_base;
+ u32 (*read_reg)(struct ctucan_priv *priv,
+ enum ctu_can_fd_can_registers reg);
+ void (*write_reg)(struct ctucan_priv *priv,
+ enum ctu_can_fd_can_registers reg, u32 val);
+
+ unsigned int txb_head;
+ unsigned int txb_tail;
+ u32 txb_prio;
+ unsigned int ntxbufs;
+ spinlock_t tx_lock; /* spinlock to serialize allocation and processing of TX buffers */
+
+ struct napi_struct napi;
+ struct device *dev;
+ struct clk *can_clk;
+
+ int irq_flags;
+ unsigned long drv_flags;
+
+ u32 rxfrm_first_word;
+
+ struct list_head peers_on_pdev;
+};
+
+/**
+ * ctucan_probe_common - Device type independent registration call
+ *
+ * This function does all the memory allocation and registration for the CAN
+ * device.
+ *
+ * @dev: Handle to the generic device structure
+ * @addr: Base address of CTU CAN FD core address
+ * @irq: Interrupt number
+ * @ntxbufs: Number of implemented Tx buffers
+ * @can_clk_rate: Clock rate, if 0 then clock are taken from device node
+ * @pm_enable_call: Whether pm_runtime_enable should be called
+ * @set_drvdata_fnc: Function to set network driver data for physical device
+ *
+ * Return: 0 on success and failure value on error
+ */
+int ctucan_probe_common(struct device *dev, void __iomem *addr,
+ int irq, unsigned int ntxbufs,
+ unsigned long can_clk_rate,
+ int pm_enable_call,
+ void (*set_drvdata_fnc)(struct device *dev,
+ struct net_device *ndev));
+
+int ctucan_suspend(struct device *dev) __maybe_unused;
+int ctucan_resume(struct device *dev) __maybe_unused;
+
+#endif /*__CTUCANFD__*/
diff --git a/drivers/net/can/ctucanfd/ctucanfd_base.c b/drivers/net/can/ctucanfd/ctucanfd_base.c
new file mode 100644
index 000000000000..7a4550f60abb
--- /dev/null
+++ b/drivers/net/can/ctucanfd/ctucanfd_base.c
@@ -0,0 +1,1490 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*******************************************************************************
+ *
+ * CTU CAN FD IP Core
+ *
+ * Copyright (C) 2015-2018 Ondrej Ille <ondrej.ille@gmail.com> FEE CTU
+ * Copyright (C) 2018-2021 Ondrej Ille <ondrej.ille@gmail.com> self-funded
+ * Copyright (C) 2018-2019 Martin Jerabek <martin.jerabek01@gmail.com> FEE CTU
+ * Copyright (C) 2018-2022 Pavel Pisa <pisa@cmp.felk.cvut.cz> FEE CTU/self-funded
+ *
+ * Project advisors:
+ * Jiri Novak <jnovak@fel.cvut.cz>
+ * Pavel Pisa <pisa@cmp.felk.cvut.cz>
+ *
+ * Department of Measurement (http://meas.fel.cvut.cz/)
+ * Faculty of Electrical Engineering (http://www.fel.cvut.cz)
+ * Czech Technical University (http://www.cvut.cz/)
+ ******************************************************************************/
+
+#include <linux/clk.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/bitfield.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/can/error.h>
+#include <linux/can/led.h>
+#include <linux/pm_runtime.h>
+#include <linux/version.h>
+
+#include "ctucanfd.h"
+#include "ctucanfd_kregs.h"
+#include "ctucanfd_kframe.h"
+
+#ifdef DEBUG
+#define ctucan_netdev_dbg(ndev, args...) \
+ netdev_dbg(ndev, args)
+#else
+#define ctucan_netdev_dbg(...) do { } while (0)
+#endif
+
+#define CTUCANFD_ID 0xCAFD
+
+/* TX buffer rotation:
+ * - when a buffer transitions to empty state, rotate order and priorities
+ * - if more buffers seem to transition at the same time, rotate by the number of buffers
+ * - it may be assumed that buffers transition to empty state in FIFO order (because we manage
+ * priorities that way)
+ * - at frame filling, do not rotate anything, just increment buffer modulo counter
+ */
+
+#define CTUCANFD_FLAG_RX_FFW_BUFFERED 1
+
+#define CTUCAN_STATE_TO_TEXT_ENTRY(st) \
+ [st] = #st
+
+enum ctucan_txtb_status {
+ TXT_NOT_EXIST = 0x0,
+ TXT_RDY = 0x1,
+ TXT_TRAN = 0x2,
+ TXT_ABTP = 0x3,
+ TXT_TOK = 0x4,
+ TXT_ERR = 0x6,
+ TXT_ABT = 0x7,
+ TXT_ETY = 0x8,
+};
+
+enum ctucan_txtb_command {
+ TXT_CMD_SET_EMPTY = 0x01,
+ TXT_CMD_SET_READY = 0x02,
+ TXT_CMD_SET_ABORT = 0x04
+};
+
+static const struct can_bittiming_const ctu_can_fd_bit_timing_max = {
+ .name = "ctu_can_fd",
+ .tseg1_min = 2,
+ .tseg1_max = 190,
+ .tseg2_min = 1,
+ .tseg2_max = 63,
+ .sjw_max = 31,
+ .brp_min = 1,
+ .brp_max = 8,
+ .brp_inc = 1,
+};
+
+static const struct can_bittiming_const ctu_can_fd_bit_timing_data_max = {
+ .name = "ctu_can_fd",
+ .tseg1_min = 2,
+ .tseg1_max = 94,
+ .tseg2_min = 1,
+ .tseg2_max = 31,
+ .sjw_max = 31,
+ .brp_min = 1,
+ .brp_max = 2,
+ .brp_inc = 1,
+};
+
+static const char * const ctucan_state_strings[CAN_STATE_MAX] = {
+ CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_ERROR_ACTIVE),
+ CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_ERROR_WARNING),
+ CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_ERROR_PASSIVE),
+ CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_BUS_OFF),
+ CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_STOPPED),
+ CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_SLEEPING)
+};
+
+static void ctucan_write32_le(struct ctucan_priv *priv,
+ enum ctu_can_fd_can_registers reg, u32 val)
+{
+ iowrite32(val, priv->mem_base + reg);
+}
+
+static void ctucan_write32_be(struct ctucan_priv *priv,
+ enum ctu_can_fd_can_registers reg, u32 val)
+{
+ iowrite32be(val, priv->mem_base + reg);
+}
+
+static u32 ctucan_read32_le(struct ctucan_priv *priv,
+ enum ctu_can_fd_can_registers reg)
+{
+ return ioread32(priv->mem_base + reg);
+}
+
+static u32 ctucan_read32_be(struct ctucan_priv *priv,
+ enum ctu_can_fd_can_registers reg)
+{
+ return ioread32be(priv->mem_base + reg);
+}
+
+static inline void ctucan_write32(struct ctucan_priv *priv, enum ctu_can_fd_can_registers reg,
+ u32 val)
+{
+ priv->write_reg(priv, reg, val);
+}
+
+static inline u32 ctucan_read32(struct ctucan_priv *priv, enum ctu_can_fd_can_registers reg)
+{
+ return priv->read_reg(priv, reg);
+}
+
+static void ctucan_write_txt_buf(struct ctucan_priv *priv, enum ctu_can_fd_can_registers buf_base,
+ u32 offset, u32 val)
+{
+ priv->write_reg(priv, buf_base + offset, val);
+}
+
+#define CTU_CAN_FD_TXTNF(priv) (!!FIELD_GET(REG_STATUS_TXNF, ctucan_read32(priv, CTUCANFD_STATUS)))
+#define CTU_CAN_FD_ENABLED(priv) (!!FIELD_GET(REG_MODE_ENA, ctucan_read32(priv, CTUCANFD_MODE)))
+
+/**
+ * ctucan_state_to_str() - Converts CAN controller state code to corresponding text
+ * @state: CAN controller state code
+ *
+ * Return: Pointer to string representation of the error state
+ */
+static const char *ctucan_state_to_str(enum can_state state)
+{
+ const char *txt = NULL;
+
+ if (state >= 0 && state < CAN_STATE_MAX)
+ txt = ctucan_state_strings[state];
+ return txt ? txt : "UNKNOWN";
+}
+
+/**
+ * ctucan_reset() - Issues software reset request to CTU CAN FD
+ * @ndev: Pointer to net_device structure
+ *
+ * Return: 0 for success, -%ETIMEDOUT if CAN controller does not leave reset
+ */
+static int ctucan_reset(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ int i = 100;
+
+ ctucan_netdev_dbg(ndev, "%s\n", __func__);
+
+ ctucan_write32(priv, CTUCANFD_MODE, REG_MODE_RST);
+ clear_bit(CTUCANFD_FLAG_RX_FFW_BUFFERED, &priv->drv_flags);
+
+ do {
+ u16 device_id = FIELD_GET(REG_DEVICE_ID_DEVICE_ID,
+ ctucan_read32(priv, CTUCANFD_DEVICE_ID));
+
+ if (device_id == 0xCAFD)
+ return 0;
+ if (!i--) {
+ netdev_warn(ndev, "device did not leave reset\n");
+ return -ETIMEDOUT;
+ }
+ usleep_range(100, 200);
+ } while (1);
+}
+
+/**
+ * ctucan_set_btr() - Sets CAN bus bit timing in CTU CAN FD
+ * @ndev: Pointer to net_device structure
+ * @bt: Pointer to Bit timing structure
+ * @nominal: True - Nominal bit timing, False - Data bit timing
+ *
+ * Return: 0 - OK, -%EPERM if controller is enabled
+ */
+static int ctucan_set_btr(struct net_device *ndev, struct can_bittiming *bt, bool nominal)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ int max_ph1_len = 31;
+ u32 btr = 0;
+ u32 prop_seg = bt->prop_seg;
+ u32 phase_seg1 = bt->phase_seg1;
+
+ if (CTU_CAN_FD_ENABLED(priv)) {
+ netdev_err(ndev, "BUG! Cannot set bittiming - CAN is enabled\n");
+ return -EPERM;
+ }
+
+ if (nominal)
+ max_ph1_len = 63;
+
+ /* The timing calculation functions have only constraints on tseg1, which is prop_seg +
+ * phase1_seg combined. tseg1 is then split in half and stored into prog_seg and phase_seg1.
+ * In CTU CAN FD, PROP is 6/7 bits wide but PH1 only 6/5, so we must re-distribute the
+ * values here.
+ */
+ if (phase_seg1 > max_ph1_len) {
+ prop_seg += phase_seg1 - max_ph1_len;
+ phase_seg1 = max_ph1_len;
+ bt->prop_seg = prop_seg;
+ bt->phase_seg1 = phase_seg1;
+ }
+
+ if (nominal) {
+ btr = FIELD_PREP(REG_BTR_PROP, prop_seg);
+ btr |= FIELD_PREP(REG_BTR_PH1, phase_seg1);
+ btr |= FIELD_PREP(REG_BTR_PH2, bt->phase_seg2);
+ btr |= FIELD_PREP(REG_BTR_BRP, bt->brp);
+ btr |= FIELD_PREP(REG_BTR_SJW, bt->sjw);
+
+ ctucan_write32(priv, CTUCANFD_BTR, btr);
+ } else {
+ btr = FIELD_PREP(REG_BTR_FD_PROP_FD, prop_seg);
+ btr |= FIELD_PREP(REG_BTR_FD_PH1_FD, phase_seg1);
+ btr |= FIELD_PREP(REG_BTR_FD_PH2_FD, bt->phase_seg2);
+ btr |= FIELD_PREP(REG_BTR_FD_BRP_FD, bt->brp);
+ btr |= FIELD_PREP(REG_BTR_FD_SJW_FD, bt->sjw);
+
+ ctucan_write32(priv, CTUCANFD_BTR_FD, btr);
+ }
+
+ return 0;
+}
+
+/**
+ * ctucan_set_bittiming() - CAN set nominal bit timing routine
+ * @ndev: Pointer to net_device structure
+ *
+ * Return: 0 on success, -%EPERM on error
+ */
+static int ctucan_set_bittiming(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ struct can_bittiming *bt = &priv->can.bittiming;
+
+ ctucan_netdev_dbg(ndev, "%s\n", __func__);
+
+ /* Note that bt may be modified here */
+ return ctucan_set_btr(ndev, bt, true);
+}
+
+/**
+ * ctucan_set_data_bittiming() - CAN set data bit timing routine
+ * @ndev: Pointer to net_device structure
+ *
+ * Return: 0 on success, -%EPERM on error
+ */
+static int ctucan_set_data_bittiming(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ struct can_bittiming *dbt = &priv->can.data_bittiming;
+
+ ctucan_netdev_dbg(ndev, "%s\n", __func__);
+
+ /* Note that dbt may be modified here */
+ return ctucan_set_btr(ndev, dbt, false);
+}
+
+/**
+ * ctucan_set_secondary_sample_point() - Sets secondary sample point in CTU CAN FD
+ * @ndev: Pointer to net_device structure
+ *
+ * Return: 0 on success, -%EPERM if controller is enabled
+ */
+static int ctucan_set_secondary_sample_point(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ struct can_bittiming *dbt = &priv->can.data_bittiming;
+ int ssp_offset = 0;
+ u32 ssp_cfg = 0; /* No SSP by default */
+
+ ctucan_netdev_dbg(ndev, "%s\n", __func__);
+
+ if (CTU_CAN_FD_ENABLED(priv)) {
+ netdev_err(ndev, "BUG! Cannot set SSP - CAN is enabled\n");
+ return -EPERM;
+ }
+
+ /* Use SSP for bit-rates above 1 Mbits/s */
+ if (dbt->bitrate > 1000000) {
+ /* Calculate SSP in minimal time quanta */
+ ssp_offset = (priv->can.clock.freq / 1000) * dbt->sample_point / dbt->bitrate;
+
+ if (ssp_offset > 127) {
+ netdev_warn(ndev, "SSP offset saturated to 127\n");
+ ssp_offset = 127;
+ }
+
+ ssp_cfg = FIELD_PREP(REG_TRV_DELAY_SSP_OFFSET, ssp_offset);
+ ssp_cfg |= FIELD_PREP(REG_TRV_DELAY_SSP_SRC, 0x1);
+ }
+
+ ctucan_write32(priv, CTUCANFD_TRV_DELAY, ssp_cfg);
+
+ return 0;
+}
+
+/**
+ * ctucan_set_mode() - Sets CTU CAN FDs mode
+ * @priv: Pointer to private data
+ * @mode: Pointer to controller modes to be set
+ */
+static void ctucan_set_mode(struct ctucan_priv *priv, const struct can_ctrlmode *mode)
+{
+ u32 mode_reg = ctucan_read32(priv, CTUCANFD_MODE);
+
+ mode_reg = (mode->flags & CAN_CTRLMODE_LOOPBACK) ?
+ (mode_reg | REG_MODE_ILBP) :
+ (mode_reg & ~REG_MODE_ILBP);
+
+ mode_reg = (mode->flags & CAN_CTRLMODE_LISTENONLY) ?
+ (mode_reg | REG_MODE_BMM) :
+ (mode_reg & ~REG_MODE_BMM);
+
+ mode_reg = (mode->flags & CAN_CTRLMODE_FD) ?
+ (mode_reg | REG_MODE_FDE) :
+ (mode_reg & ~REG_MODE_FDE);
+
+ mode_reg = (mode->flags & CAN_CTRLMODE_PRESUME_ACK) ?
+ (mode_reg | REG_MODE_ACF) :
+ (mode_reg & ~REG_MODE_ACF);
+
+ mode_reg = (mode->flags & CAN_CTRLMODE_FD_NON_ISO) ?
+ (mode_reg | REG_MODE_NISOFD) :
+ (mode_reg & ~REG_MODE_NISOFD);
+
+ /* One shot mode supported indirectly via Retransmit limit */
+ mode_reg &= ~FIELD_PREP(REG_MODE_RTRTH, 0xF);
+ mode_reg = (mode->flags & CAN_CTRLMODE_ONE_SHOT) ?
+ (mode_reg | REG_MODE_RTRLE) :
+ (mode_reg & ~REG_MODE_RTRLE);
+
+ /* Some bits fixed:
+ * TSTM - Off, User shall not be able to change REC/TEC by hand during operation
+ */
+ mode_reg &= ~REG_MODE_TSTM;
+
+ ctucan_write32(priv, CTUCANFD_MODE, mode_reg);
+}
+
+/**
+ * ctucan_chip_start() - This routine starts the driver
+ * @ndev: Pointer to net_device structure
+ *
+ * Routine expects that chip is in reset state. It setups initial
+ * Tx buffers for FIFO priorities, sets bittiming, enables interrupts,
+ * switches core to operational mode and changes controller
+ * state to %CAN_STATE_STOPPED.
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int ctucan_chip_start(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ u32 int_ena, int_msk;
+ u32 mode_reg;
+ int err;
+ struct can_ctrlmode mode;
+
+ ctucan_netdev_dbg(ndev, "%s\n", __func__);
+
+ priv->txb_prio = 0x01234567;
+ priv->txb_head = 0;
+ priv->txb_tail = 0;
+ ctucan_write32(priv, CTUCANFD_TX_PRIORITY, priv->txb_prio);
+
+ /* Configure bit-rates and ssp */
+ err = ctucan_set_bittiming(ndev);
+ if (err < 0)
+ return err;
+
+ err = ctucan_set_data_bittiming(ndev);
+ if (err < 0)
+ return err;
+
+ err = ctucan_set_secondary_sample_point(ndev);
+ if (err < 0)
+ return err;
+
+ /* Configure modes */
+ mode.flags = priv->can.ctrlmode;
+ mode.mask = 0xFFFFFFFF;
+ ctucan_set_mode(priv, &mode);
+
+ /* Configure interrupts */
+ int_ena = REG_INT_STAT_RBNEI |
+ REG_INT_STAT_TXBHCI |
+ REG_INT_STAT_EWLI |
+ REG_INT_STAT_FCSI;
+
+ /* Bus error reporting -> Allow Error/Arb.lost interrupts */
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) {
+ int_ena |= REG_INT_STAT_ALI |
+ REG_INT_STAT_BEI;
+ }
+
+ int_msk = ~int_ena; /* Mask all disabled interrupts */
+
+ /* It's after reset, so there is no need to clear anything */
+ ctucan_write32(priv, CTUCANFD_INT_MASK_SET, int_msk);
+ ctucan_write32(priv, CTUCANFD_INT_ENA_SET, int_ena);
+
+ /* Controller enters ERROR_ACTIVE on initial FCSI */
+ priv->can.state = CAN_STATE_STOPPED;
+
+ /* Enable the controller */
+ mode_reg = ctucan_read32(priv, CTUCANFD_MODE);
+ mode_reg |= REG_MODE_ENA;
+ ctucan_write32(priv, CTUCANFD_MODE, mode_reg);
+
+ return 0;
+}
+
+/**
+ * ctucan_do_set_mode() - Sets mode of the driver
+ * @ndev: Pointer to net_device structure
+ * @mode: Tells the mode of the driver
+ *
+ * This check the drivers state and calls the corresponding modes to set.
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int ctucan_do_set_mode(struct net_device *ndev, enum can_mode mode)
+{
+ int ret;
+
+ ctucan_netdev_dbg(ndev, "%s\n", __func__);
+
+ switch (mode) {
+ case CAN_MODE_START:
+ ret = ctucan_reset(ndev);
+ if (ret < 0)
+ return ret;
+ ret = ctucan_chip_start(ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "ctucan_chip_start failed!\n");
+ return ret;
+ }
+ netif_wake_queue(ndev);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * ctucan_get_tx_status() - Gets status of TXT buffer
+ * @priv: Pointer to private data
+ * @buf: Buffer index (0-based)
+ *
+ * Return: Status of TXT buffer
+ */
+static inline enum ctucan_txtb_status ctucan_get_tx_status(struct ctucan_priv *priv, u8 buf)
+{
+ u32 tx_status = ctucan_read32(priv, CTUCANFD_TX_STATUS);
+ enum ctucan_txtb_status status = (tx_status >> (buf * 4)) & 0x7;
+
+ return status;
+}
+
+/**
+ * ctucan_is_txt_buf_writable() - Checks if frame can be inserted to TXT Buffer
+ * @priv: Pointer to private data
+ * @buf: Buffer index (0-based)
+ *
+ * Return: True - Frame can be inserted to TXT Buffer, False - If attempted, frame will not be
+ * inserted to TXT Buffer
+ */
+static bool ctucan_is_txt_buf_writable(struct ctucan_priv *priv, u8 buf)
+{
+ enum ctucan_txtb_status buf_status;
+
+ buf_status = ctucan_get_tx_status(priv, buf);
+ if (buf_status == TXT_RDY || buf_status == TXT_TRAN || buf_status == TXT_ABTP)
+ return false;
+
+ return true;
+}
+
+/**
+ * ctucan_insert_frame() - Inserts frame to TXT buffer
+ * @priv: Pointer to private data
+ * @cf: Pointer to CAN frame to be inserted
+ * @buf: TXT Buffer index to which frame is inserted (0-based)
+ * @isfdf: True - CAN FD Frame, False - CAN 2.0 Frame
+ *
+ * Return: True - Frame inserted successfully
+ * False - Frame was not inserted due to one of:
+ * 1. TXT Buffer is not writable (it is in wrong state)
+ * 2. Invalid TXT buffer index
+ * 3. Invalid frame length
+ */
+static bool ctucan_insert_frame(struct ctucan_priv *priv, const struct canfd_frame *cf, u8 buf,
+ bool isfdf)
+{
+ u32 buf_base;
+ u32 ffw = 0;
+ u32 idw = 0;
+ unsigned int i;
+
+ if (buf >= priv->ntxbufs)
+ return false;
+
+ if (!ctucan_is_txt_buf_writable(priv, buf))
+ return false;
+
+ if (cf->len > CANFD_MAX_DLEN)
+ return false;
+
+ /* Prepare Frame format */
+ if (cf->can_id & CAN_RTR_FLAG)
+ ffw |= REG_FRAME_FORMAT_W_RTR;
+
+ if (cf->can_id & CAN_EFF_FLAG)
+ ffw |= REG_FRAME_FORMAT_W_IDE;
+
+ if (isfdf) {
+ ffw |= REG_FRAME_FORMAT_W_FDF;
+ if (cf->flags & CANFD_BRS)
+ ffw |= REG_FRAME_FORMAT_W_BRS;
+ }
+
+ ffw |= FIELD_PREP(REG_FRAME_FORMAT_W_DLC, can_fd_len2dlc(cf->len));
+
+ /* Prepare identifier */
+ if (cf->can_id & CAN_EFF_FLAG)
+ idw = cf->can_id & CAN_EFF_MASK;
+ else
+ idw = FIELD_PREP(REG_IDENTIFIER_W_IDENTIFIER_BASE, cf->can_id & CAN_SFF_MASK);
+
+ /* Write ID, Frame format, Don't write timestamp -> Time triggered transmission disabled */
+ buf_base = (buf + 1) * 0x100;
+ ctucan_write_txt_buf(priv, buf_base, CTUCANFD_FRAME_FORMAT_W, ffw);
+ ctucan_write_txt_buf(priv, buf_base, CTUCANFD_IDENTIFIER_W, idw);
+
+ /* Write Data payload */
+ if (!(cf->can_id & CAN_RTR_FLAG)) {
+ for (i = 0; i < cf->len; i += 4) {
+ u32 data = le32_to_cpu(*(__le32 *)(cf->data + i));
+
+ ctucan_write_txt_buf(priv, buf_base, CTUCANFD_DATA_1_4_W + i, data);
+ }
+ }
+
+ return true;
+}
+
+/**
+ * ctucan_give_txtb_cmd() - Applies command on TXT buffer
+ * @priv: Pointer to private data
+ * @cmd: Command to give
+ * @buf: Buffer index (0-based)
+ */
+static void ctucan_give_txtb_cmd(struct ctucan_priv *priv, enum ctucan_txtb_command cmd, u8 buf)
+{
+ u32 tx_cmd = cmd;
+
+ tx_cmd |= 1 << (buf + 8);
+ ctucan_write32(priv, CTUCANFD_TX_COMMAND, tx_cmd);
+}
+
+/**
+ * ctucan_start_xmit() - Starts the transmission
+ * @skb: sk_buff pointer that contains data to be Txed
+ * @ndev: Pointer to net_device structure
+ *
+ * Invoked from upper layers to initiate transmission. Uses the next available free TXT Buffer and
+ * populates its fields to start the transmission.
+ *
+ * Return: %NETDEV_TX_OK on success, %NETDEV_TX_BUSY when no free TXT buffer is available,
+ * negative return values reserved for error cases
+ */
+static netdev_tx_t ctucan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ struct canfd_frame *cf = (struct canfd_frame *)skb->data;
+ u32 txtb_id;
+ bool ok;
+ unsigned long flags;
+
+ if (can_dropped_invalid_skb(ndev, skb))
+ return NETDEV_TX_OK;
+
+ if (unlikely(!CTU_CAN_FD_TXTNF(priv))) {
+ netif_stop_queue(ndev);
+ netdev_err(ndev, "BUG!, no TXB free when queue awake!\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ txtb_id = priv->txb_head % priv->ntxbufs;
+ ctucan_netdev_dbg(ndev, "%s: using TXB#%u\n", __func__, txtb_id);
+ ok = ctucan_insert_frame(priv, cf, txtb_id, can_is_canfd_skb(skb));
+
+ if (!ok) {
+ netdev_err(ndev, "BUG! TXNF set but cannot insert frame into TXTB! HW Bug?");
+ kfree_skb(skb);
+ ndev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ can_put_echo_skb(skb, ndev, txtb_id, 0);
+
+ spin_lock_irqsave(&priv->tx_lock, flags);
+ ctucan_give_txtb_cmd(priv, TXT_CMD_SET_READY, txtb_id);
+ priv->txb_head++;
+
+ /* Check if all TX buffers are full */
+ if (!CTU_CAN_FD_TXTNF(priv))
+ netif_stop_queue(ndev);
+
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+/**
+ * ctucan_read_rx_frame() - Reads frame from RX FIFO
+ * @priv: Pointer to CTU CAN FD's private data
+ * @cf: Pointer to CAN frame struct
+ * @ffw: Previously read frame format word
+ *
+ * Note: Frame format word must be read separately and provided in 'ffw'.
+ */
+static void ctucan_read_rx_frame(struct ctucan_priv *priv, struct canfd_frame *cf, u32 ffw)
+{
+ u32 idw;
+ unsigned int i;
+ unsigned int wc;
+ unsigned int len;
+
+ idw = ctucan_read32(priv, CTUCANFD_RX_DATA);
+ if (FIELD_GET(REG_FRAME_FORMAT_W_IDE, ffw))
+ cf->can_id = (idw & CAN_EFF_MASK) | CAN_EFF_FLAG;
+ else
+ cf->can_id = (idw >> 18) & CAN_SFF_MASK;
+
+ /* BRS, ESI, RTR Flags */
+ cf->flags = 0;
+ if (FIELD_GET(REG_FRAME_FORMAT_W_FDF, ffw)) {
+ if (FIELD_GET(REG_FRAME_FORMAT_W_BRS, ffw))
+ cf->flags |= CANFD_BRS;
+ if (FIELD_GET(REG_FRAME_FORMAT_W_ESI_RSV, ffw))
+ cf->flags |= CANFD_ESI;
+ } else if (FIELD_GET(REG_FRAME_FORMAT_W_RTR, ffw)) {
+ cf->can_id |= CAN_RTR_FLAG;
+ }
+
+ wc = FIELD_GET(REG_FRAME_FORMAT_W_RWCNT, ffw) - 3;
+
+ /* DLC */
+ if (FIELD_GET(REG_FRAME_FORMAT_W_DLC, ffw) <= 8) {
+ len = FIELD_GET(REG_FRAME_FORMAT_W_DLC, ffw);
+ } else {
+ if (FIELD_GET(REG_FRAME_FORMAT_W_FDF, ffw))
+ len = wc << 2;
+ else
+ len = 8;
+ }
+ cf->len = len;
+ if (unlikely(len > wc * 4))
+ len = wc * 4;
+
+ /* Timestamp - Read and throw away */
+ ctucan_read32(priv, CTUCANFD_RX_DATA);
+ ctucan_read32(priv, CTUCANFD_RX_DATA);
+
+ /* Data */
+ for (i = 0; i < len; i += 4) {
+ u32 data = ctucan_read32(priv, CTUCANFD_RX_DATA);
+ *(__le32 *)(cf->data + i) = cpu_to_le32(data);
+ }
+ while (unlikely(i < wc * 4)) {
+ ctucan_read32(priv, CTUCANFD_RX_DATA);
+ i += 4;
+ }
+}
+
+/**
+ * ctucan_rx() - Called from CAN ISR to complete the received frame processing
+ * @ndev: Pointer to net_device structure
+ *
+ * This function is invoked from the CAN isr(poll) to process the Rx frames. It does minimal
+ * processing and invokes "netif_receive_skb" to complete further processing.
+ * Return: 1 when frame is passed to the network layer, 0 when the first frame word is read but
+ * system is out of free SKBs temporally and left code to resolve SKB allocation later,
+ * -%EAGAIN in a case of empty Rx FIFO.
+ */
+static int ctucan_rx(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ struct canfd_frame *cf;
+ struct sk_buff *skb;
+ u32 ffw;
+
+ if (test_bit(CTUCANFD_FLAG_RX_FFW_BUFFERED, &priv->drv_flags)) {
+ ffw = priv->rxfrm_first_word;
+ clear_bit(CTUCANFD_FLAG_RX_FFW_BUFFERED, &priv->drv_flags);
+ } else {
+ ffw = ctucan_read32(priv, CTUCANFD_RX_DATA);
+ }
+
+ if (!FIELD_GET(REG_FRAME_FORMAT_W_RWCNT, ffw))
+ return -EAGAIN;
+
+ if (FIELD_GET(REG_FRAME_FORMAT_W_FDF, ffw))
+ skb = alloc_canfd_skb(ndev, &cf);
+ else
+ skb = alloc_can_skb(ndev, (struct can_frame **)&cf);
+
+ if (unlikely(!skb)) {
+ priv->rxfrm_first_word = ffw;
+ set_bit(CTUCANFD_FLAG_RX_FFW_BUFFERED, &priv->drv_flags);
+ return 0;
+ }
+
+ ctucan_read_rx_frame(priv, cf, ffw);
+
+ stats->rx_bytes += cf->len;
+ stats->rx_packets++;
+ netif_receive_skb(skb);
+
+ return 1;
+}
+
+/**
+ * ctucan_read_fault_state() - Reads CTU CAN FDs fault confinement state.
+ * @priv: Pointer to private data
+ *
+ * Returns: Fault confinement state of controller
+ */
+static enum can_state ctucan_read_fault_state(struct ctucan_priv *priv)
+{
+ u32 fs;
+ u32 rec_tec;
+ u32 ewl;
+
+ fs = ctucan_read32(priv, CTUCANFD_EWL);
+ rec_tec = ctucan_read32(priv, CTUCANFD_REC);
+ ewl = FIELD_GET(REG_EWL_EW_LIMIT, fs);
+
+ if (FIELD_GET(REG_EWL_ERA, fs)) {
+ if (ewl > FIELD_GET(REG_REC_REC_VAL, rec_tec) &&
+ ewl > FIELD_GET(REG_REC_TEC_VAL, rec_tec))
+ return CAN_STATE_ERROR_ACTIVE;
+ else
+ return CAN_STATE_ERROR_WARNING;
+ } else if (FIELD_GET(REG_EWL_ERP, fs)) {
+ return CAN_STATE_ERROR_PASSIVE;
+ } else if (FIELD_GET(REG_EWL_BOF, fs)) {
+ return CAN_STATE_BUS_OFF;
+ }
+
+ WARN(true, "Invalid error state");
+ return CAN_STATE_ERROR_PASSIVE;
+}
+
+/**
+ * ctucan_get_rec_tec() - Reads REC/TEC counter values from controller
+ * @priv: Pointer to private data
+ * @bec: Pointer to Error counter structure
+ */
+static void ctucan_get_rec_tec(struct ctucan_priv *priv, struct can_berr_counter *bec)
+{
+ u32 err_ctrs = ctucan_read32(priv, CTUCANFD_REC);
+
+ bec->rxerr = FIELD_GET(REG_REC_REC_VAL, err_ctrs);
+ bec->txerr = FIELD_GET(REG_REC_TEC_VAL, err_ctrs);
+}
+
+/**
+ * ctucan_err_interrupt() - Error frame ISR
+ * @ndev: net_device pointer
+ * @isr: interrupt status register value
+ *
+ * This is the CAN error interrupt and it will check the type of error and forward the error
+ * frame to upper layers.
+ */
+static void ctucan_err_interrupt(struct net_device *ndev, u32 isr)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ enum can_state state;
+ struct can_berr_counter bec;
+ u32 err_capt_alc;
+ int dologerr = net_ratelimit();
+
+ ctucan_get_rec_tec(priv, &bec);
+ state = ctucan_read_fault_state(priv);
+ err_capt_alc = ctucan_read32(priv, CTUCANFD_ERR_CAPT);
+
+ if (dologerr)
+ netdev_info(ndev, "%s: ISR = 0x%08x, rxerr %d, txerr %d, error type %lu, pos %lu, ALC id_field %lu, bit %lu\n",
+ __func__, isr, bec.rxerr, bec.txerr,
+ FIELD_GET(REG_ERR_CAPT_ERR_TYPE, err_capt_alc),
+ FIELD_GET(REG_ERR_CAPT_ERR_POS, err_capt_alc),
+ FIELD_GET(REG_ERR_CAPT_ALC_ID_FIELD, err_capt_alc),
+ FIELD_GET(REG_ERR_CAPT_ALC_BIT, err_capt_alc));
+
+ skb = alloc_can_err_skb(ndev, &cf);
+
+ /* EWLI: error warning limit condition met
+ * FCSI: fault confinement state changed
+ * ALI: arbitration lost (just informative)
+ * BEI: bus error interrupt
+ */
+ if (FIELD_GET(REG_INT_STAT_FCSI, isr) || FIELD_GET(REG_INT_STAT_EWLI, isr)) {
+ netdev_info(ndev, "state changes from %s to %s\n",
+ ctucan_state_to_str(priv->can.state),
+ ctucan_state_to_str(state));
+
+ if (priv->can.state == state)
+ netdev_warn(ndev,
+ "current and previous state is the same! (missed interrupt?)\n");
+
+ priv->can.state = state;
+ switch (state) {
+ case CAN_STATE_BUS_OFF:
+ priv->can.can_stats.bus_off++;
+ can_bus_off(ndev);
+ if (skb)
+ cf->can_id |= CAN_ERR_BUSOFF;
+ break;
+ case CAN_STATE_ERROR_PASSIVE:
+ priv->can.can_stats.error_passive++;
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = (bec.rxerr > 127) ?
+ CAN_ERR_CRTL_RX_PASSIVE :
+ CAN_ERR_CRTL_TX_PASSIVE;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ }
+ break;
+ case CAN_STATE_ERROR_WARNING:
+ priv->can.can_stats.error_warning++;
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] |= (bec.txerr > bec.rxerr) ?
+ CAN_ERR_CRTL_TX_WARNING :
+ CAN_ERR_CRTL_RX_WARNING;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ }
+ break;
+ case CAN_STATE_ERROR_ACTIVE:
+ cf->data[1] = CAN_ERR_CRTL_ACTIVE;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ break;
+ default:
+ netdev_warn(ndev, "unhandled error state (%d:%s)!\n",
+ state, ctucan_state_to_str(state));
+ break;
+ }
+ }
+
+ /* Check for Arbitration Lost interrupt */
+ if (FIELD_GET(REG_INT_STAT_ALI, isr)) {
+ if (dologerr)
+ netdev_info(ndev, "arbitration lost\n");
+ priv->can.can_stats.arbitration_lost++;
+ if (skb) {
+ cf->can_id |= CAN_ERR_LOSTARB;
+ cf->data[0] = CAN_ERR_LOSTARB_UNSPEC;
+ }
+ }
+
+ /* Check for Bus Error interrupt */
+ if (FIELD_GET(REG_INT_STAT_BEI, isr)) {
+ netdev_info(ndev, "bus error\n");
+ priv->can.can_stats.bus_error++;
+ stats->rx_errors++;
+ if (skb) {
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ cf->data[2] = CAN_ERR_PROT_UNSPEC;
+ cf->data[3] = CAN_ERR_PROT_LOC_UNSPEC;
+ }
+ }
+
+ if (skb) {
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+ }
+}
+
+/**
+ * ctucan_rx_poll() - Poll routine for rx packets (NAPI)
+ * @napi: NAPI structure pointer
+ * @quota: Max number of rx packets to be processed.
+ *
+ * This is the poll routine for rx part. It will process the packets maximux quota value.
+ *
+ * Return: Number of packets received
+ */
+static int ctucan_rx_poll(struct napi_struct *napi, int quota)
+{
+ struct net_device *ndev = napi->dev;
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ int work_done = 0;
+ u32 status;
+ u32 framecnt;
+ int res = 1;
+
+ framecnt = FIELD_GET(REG_RX_STATUS_RXFRC, ctucan_read32(priv, CTUCANFD_RX_STATUS));
+ while (framecnt && work_done < quota && res > 0) {
+ res = ctucan_rx(ndev);
+ work_done++;
+ framecnt = FIELD_GET(REG_RX_STATUS_RXFRC, ctucan_read32(priv, CTUCANFD_RX_STATUS));
+ }
+
+ /* Check for RX FIFO Overflow */
+ status = ctucan_read32(priv, CTUCANFD_STATUS);
+ if (FIELD_GET(REG_STATUS_DOR, status)) {
+ struct net_device_stats *stats = &ndev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ netdev_info(ndev, "rx_poll: rx fifo overflow\n");
+ stats->rx_over_errors++;
+ stats->rx_errors++;
+ skb = alloc_can_err_skb(ndev, &cf);
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+ }
+
+ /* Clear Data Overrun */
+ ctucan_write32(priv, CTUCANFD_COMMAND, REG_COMMAND_CDO);
+ }
+
+ if (work_done)
+ can_led_event(ndev, CAN_LED_EVENT_RX);
+
+ if (!framecnt && res != 0) {
+ if (napi_complete_done(napi, work_done)) {
+ /* Clear and enable RBNEI. It is level-triggered, so
+ * there is no race condition.
+ */
+ ctucan_write32(priv, CTUCANFD_INT_STAT, REG_INT_STAT_RBNEI);
+ ctucan_write32(priv, CTUCANFD_INT_MASK_CLR, REG_INT_STAT_RBNEI);
+ }
+ }
+
+ return work_done;
+}
+
+/**
+ * ctucan_rotate_txb_prio() - Rotates priorities of TXT Buffers
+ * @ndev: net_device pointer
+ */
+static void ctucan_rotate_txb_prio(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ u32 prio = priv->txb_prio;
+
+ prio = (prio << 4) | ((prio >> ((priv->ntxbufs - 1) * 4)) & 0xF);
+ ctucan_netdev_dbg(ndev, "%s: from 0x%08x to 0x%08x\n", __func__, priv->txb_prio, prio);
+ priv->txb_prio = prio;
+ ctucan_write32(priv, CTUCANFD_TX_PRIORITY, prio);
+}
+
+/**
+ * ctucan_tx_interrupt() - Tx done Isr
+ * @ndev: net_device pointer
+ */
+static void ctucan_tx_interrupt(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ bool first = true;
+ bool some_buffers_processed;
+ unsigned long flags;
+ enum ctucan_txtb_status txtb_status;
+ u32 txtb_id;
+
+ /* read tx_status
+ * if txb[n].finished (bit 2)
+ * if ok -> echo
+ * if error / aborted -> ?? (find how to handle oneshot mode)
+ * txb_tail++
+ */
+ do {
+ spin_lock_irqsave(&priv->tx_lock, flags);
+
+ some_buffers_processed = false;
+ while ((int)(priv->txb_head - priv->txb_tail) > 0) {
+ txtb_id = priv->txb_tail % priv->ntxbufs;
+ txtb_status = ctucan_get_tx_status(priv, txtb_id);
+
+ ctucan_netdev_dbg(ndev, "TXI: TXB#%u: status 0x%x\n", txtb_id, txtb_status);
+
+ switch (txtb_status) {
+ case TXT_TOK:
+ ctucan_netdev_dbg(ndev, "TXT_OK\n");
+ stats->tx_bytes += can_get_echo_skb(ndev, txtb_id, NULL);
+ stats->tx_packets++;
+ break;
+ case TXT_ERR:
+ /* This indicated that retransmit limit has been reached. Obviously
+ * we should not echo the frame, but also not indicate any kind of
+ * error. If desired, it was already reported (possible multiple
+ * times) on each arbitration lost.
+ */
+ netdev_warn(ndev, "TXB in Error state\n");
+ can_free_echo_skb(ndev, txtb_id, NULL);
+ stats->tx_dropped++;
+ break;
+ case TXT_ABT:
+ /* Same as for TXT_ERR, only with different cause. We *could*
+ * re-queue the frame, but multiqueue/abort is not supported yet
+ * anyway.
+ */
+ netdev_warn(ndev, "TXB in Aborted state\n");
+ can_free_echo_skb(ndev, txtb_id, NULL);
+ stats->tx_dropped++;
+ break;
+ default:
+ /* Bug only if the first buffer is not finished, otherwise it is
+ * pretty much expected.
+ */
+ if (first) {
+ netdev_err(ndev,
+ "BUG: TXB#%u not in a finished state (0x%x)!\n",
+ txtb_id, txtb_status);
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+ /* do not clear nor wake */
+ return;
+ }
+ goto clear;
+ }
+ priv->txb_tail++;
+ first = false;
+ some_buffers_processed = true;
+ /* Adjust priorities *before* marking the buffer as empty. */
+ ctucan_rotate_txb_prio(ndev);
+ ctucan_give_txtb_cmd(priv, TXT_CMD_SET_EMPTY, txtb_id);
+ }
+clear:
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+
+ /* If no buffers were processed this time, we cannot clear - that would introduce
+ * a race condition.
+ */
+ if (some_buffers_processed) {
+ /* Clear the interrupt again. We do not want to receive again interrupt for
+ * the buffer already handled. If it is the last finished one then it would
+ * cause log of spurious interrupt.
+ */
+ ctucan_write32(priv, CTUCANFD_INT_STAT, REG_INT_STAT_TXBHCI);
+ }
+ } while (some_buffers_processed);
+
+ can_led_event(ndev, CAN_LED_EVENT_TX);
+
+ spin_lock_irqsave(&priv->tx_lock, flags);
+
+ /* Check if at least one TX buffer is free */
+ if (CTU_CAN_FD_TXTNF(priv))
+ netif_wake_queue(ndev);
+
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+}
+
+/**
+ * ctucan_interrupt() - CAN Isr
+ * @irq: irq number
+ * @dev_id: device id poniter
+ *
+ * This is the CTU CAN FD ISR. It checks for the type of interrupt
+ * and invokes the corresponding ISR.
+ *
+ * Return:
+ * IRQ_NONE - If CAN device is in sleep mode, IRQ_HANDLED otherwise
+ */
+static irqreturn_t ctucan_interrupt(int irq, void *dev_id)
+{
+ struct net_device *ndev = (struct net_device *)dev_id;
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ u32 isr, icr;
+ u32 imask;
+ int irq_loops;
+
+ ctucan_netdev_dbg(ndev, "%s\n", __func__);
+
+ for (irq_loops = 0; irq_loops < 10000; irq_loops++) {
+ /* Get the interrupt status */
+ isr = ctucan_read32(priv, CTUCANFD_INT_STAT);
+
+ if (!isr)
+ return irq_loops ? IRQ_HANDLED : IRQ_NONE;
+
+ /* Receive Buffer Not Empty Interrupt */
+ if (FIELD_GET(REG_INT_STAT_RBNEI, isr)) {
+ ctucan_netdev_dbg(ndev, "RXBNEI\n");
+ /* Mask RXBNEI the first, then clear interrupt and schedule NAPI. Even if
+ * another IRQ fires, RBNEI will always be 0 (masked).
+ */
+ icr = REG_INT_STAT_RBNEI;
+ ctucan_write32(priv, CTUCANFD_INT_MASK_SET, icr);
+ ctucan_write32(priv, CTUCANFD_INT_STAT, icr);
+ napi_schedule(&priv->napi);
+ }
+
+ /* TXT Buffer HW Command Interrupt */
+ if (FIELD_GET(REG_INT_STAT_TXBHCI, isr)) {
+ ctucan_netdev_dbg(ndev, "TXBHCI\n");
+ /* Cleared inside */
+ ctucan_tx_interrupt(ndev);
+ }
+
+ /* Error interrupts */
+ if (FIELD_GET(REG_INT_STAT_EWLI, isr) ||
+ FIELD_GET(REG_INT_STAT_FCSI, isr) ||
+ FIELD_GET(REG_INT_STAT_ALI, isr)) {
+ icr = isr & (REG_INT_STAT_EWLI | REG_INT_STAT_FCSI | REG_INT_STAT_ALI);
+
+ ctucan_netdev_dbg(ndev, "some ERR interrupt: clearing 0x%08x\n", icr);
+ ctucan_write32(priv, CTUCANFD_INT_STAT, icr);
+ ctucan_err_interrupt(ndev, isr);
+ }
+ /* Ignore RI, TI, LFI, RFI, BSI */
+ }
+
+ netdev_err(ndev, "%s: stuck interrupt (isr=0x%08x), stopping\n", __func__, isr);
+
+ if (FIELD_GET(REG_INT_STAT_TXBHCI, isr)) {
+ int i;
+
+ netdev_err(ndev, "txb_head=0x%08x txb_tail=0x%08x\n",
+ priv->txb_head, priv->txb_tail);
+ for (i = 0; i < priv->ntxbufs; i++) {
+ u32 status = ctucan_get_tx_status(priv, i);
+
+ netdev_err(ndev, "txb[%d] txb status=0x%08x\n", i, status);
+ }
+ }
+
+ imask = 0xffffffff;
+ ctucan_write32(priv, CTUCANFD_INT_ENA_CLR, imask);
+ ctucan_write32(priv, CTUCANFD_INT_MASK_SET, imask);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ctucan_chip_stop() - Driver stop routine
+ * @ndev: Pointer to net_device structure
+ *
+ * This is the drivers stop routine. It will disable the
+ * interrupts and disable the controller.
+ */
+static void ctucan_chip_stop(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ u32 mask = 0xffffffff;
+ u32 mode;
+
+ ctucan_netdev_dbg(ndev, "%s\n", __func__);
+
+ /* Disable interrupts and disable CAN */
+ ctucan_write32(priv, CTUCANFD_INT_ENA_CLR, mask);
+ ctucan_write32(priv, CTUCANFD_INT_MASK_SET, mask);
+ mode = ctucan_read32(priv, CTUCANFD_MODE);
+ mode &= ~REG_MODE_ENA;
+ ctucan_write32(priv, CTUCANFD_MODE, mode);
+
+ priv->can.state = CAN_STATE_STOPPED;
+}
+
+/**
+ * ctucan_open() - Driver open routine
+ * @ndev: Pointer to net_device structure
+ *
+ * This is the driver open routine.
+ * Return: 0 on success and failure value on error
+ */
+static int ctucan_open(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ ctucan_netdev_dbg(ndev, "%s\n", __func__);
+
+ ret = pm_runtime_get_sync(priv->dev);
+ if (ret < 0) {
+ netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
+ __func__, ret);
+ pm_runtime_put_noidle(priv->dev);
+ return ret;
+ }
+
+ ret = ctucan_reset(ndev);
+ if (ret < 0)
+ goto err_reset;
+
+ /* Common open */
+ ret = open_candev(ndev);
+ if (ret) {
+ netdev_warn(ndev, "open_candev failed!\n");
+ goto err_open;
+ }
+
+ ret = request_irq(ndev->irq, ctucan_interrupt, priv->irq_flags, ndev->name, ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "irq allocation for CAN failed\n");
+ goto err_irq;
+ }
+
+ ret = ctucan_chip_start(ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "ctucan_chip_start failed!\n");
+ goto err_chip_start;
+ }
+
+ netdev_info(ndev, "ctu_can_fd device registered\n");
+ can_led_event(ndev, CAN_LED_EVENT_OPEN);
+ napi_enable(&priv->napi);
+ netif_start_queue(ndev);
+
+ return 0;
+
+err_chip_start:
+ free_irq(ndev->irq, ndev);
+err_irq:
+ close_candev(ndev);
+err_open:
+err_reset:
+ pm_runtime_put(priv->dev);
+
+ return ret;
+}
+
+/**
+ * ctucan_close() - Driver close routine
+ * @ndev: Pointer to net_device structure
+ *
+ * Return: 0 always
+ */
+static int ctucan_close(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+
+ ctucan_netdev_dbg(ndev, "%s\n", __func__);
+
+ netif_stop_queue(ndev);
+ napi_disable(&priv->napi);
+ ctucan_chip_stop(ndev);
+ free_irq(ndev->irq, ndev);
+ close_candev(ndev);
+
+ can_led_event(ndev, CAN_LED_EVENT_STOP);
+ pm_runtime_put(priv->dev);
+
+ return 0;
+}
+
+/**
+ * ctucan_get_berr_counter() - error counter routine
+ * @ndev: Pointer to net_device structure
+ * @bec: Pointer to can_berr_counter structure
+ *
+ * This is the driver error counter routine.
+ * Return: 0 on success and failure value on error
+ */
+static int ctucan_get_berr_counter(const struct net_device *ndev, struct can_berr_counter *bec)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ ctucan_netdev_dbg(ndev, "%s\n", __func__);
+
+ ret = pm_runtime_get_sync(priv->dev);
+ if (ret < 0) {
+ netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", __func__, ret);
+ pm_runtime_put_noidle(priv->dev);
+ return ret;
+ }
+
+ ctucan_get_rec_tec(priv, bec);
+ pm_runtime_put(priv->dev);
+
+ return 0;
+}
+
+static const struct net_device_ops ctucan_netdev_ops = {
+ .ndo_open = ctucan_open,
+ .ndo_stop = ctucan_close,
+ .ndo_start_xmit = ctucan_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
+};
+
+int ctucan_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct ctucan_priv *priv = netdev_priv(ndev);
+
+ ctucan_netdev_dbg(ndev, "%s\n", __func__);
+
+ if (netif_running(ndev)) {
+ netif_stop_queue(ndev);
+ netif_device_detach(ndev);
+ }
+
+ priv->can.state = CAN_STATE_SLEEPING;
+
+ return 0;
+}
+EXPORT_SYMBOL(ctucan_suspend);
+
+int ctucan_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct ctucan_priv *priv = netdev_priv(ndev);
+
+ ctucan_netdev_dbg(ndev, "%s\n", __func__);
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ if (netif_running(ndev)) {
+ netif_device_attach(ndev);
+ netif_start_queue(ndev);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ctucan_resume);
+
+int ctucan_probe_common(struct device *dev, void __iomem *addr, int irq, unsigned int ntxbufs,
+ unsigned long can_clk_rate, int pm_enable_call,
+ void (*set_drvdata_fnc)(struct device *dev, struct net_device *ndev))
+{
+ struct ctucan_priv *priv;
+ struct net_device *ndev;
+ int ret;
+
+ /* Create a CAN device instance */
+ ndev = alloc_candev(sizeof(struct ctucan_priv), ntxbufs);
+ if (!ndev)
+ return -ENOMEM;
+
+ priv = netdev_priv(ndev);
+ spin_lock_init(&priv->tx_lock);
+ INIT_LIST_HEAD(&priv->peers_on_pdev);
+ priv->ntxbufs = ntxbufs;
+ priv->dev = dev;
+ priv->can.bittiming_const = &ctu_can_fd_bit_timing_max;
+ priv->can.data_bittiming_const = &ctu_can_fd_bit_timing_data_max;
+ priv->can.do_set_mode = ctucan_do_set_mode;
+
+ /* Needed for timing adjustment to be performed as soon as possible */
+ priv->can.do_set_bittiming = ctucan_set_bittiming;
+ priv->can.do_set_data_bittiming = ctucan_set_data_bittiming;
+
+ priv->can.do_get_berr_counter = ctucan_get_berr_counter;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK
+ | CAN_CTRLMODE_LISTENONLY
+ | CAN_CTRLMODE_FD
+ | CAN_CTRLMODE_PRESUME_ACK
+ | CAN_CTRLMODE_BERR_REPORTING
+ | CAN_CTRLMODE_FD_NON_ISO
+ | CAN_CTRLMODE_ONE_SHOT;
+ priv->mem_base = addr;
+
+ /* Get IRQ for the device */
+ ndev->irq = irq;
+ ndev->flags |= IFF_ECHO; /* We support local echo */
+
+ if (set_drvdata_fnc)
+ set_drvdata_fnc(dev, ndev);
+ SET_NETDEV_DEV(ndev, dev);
+ ndev->netdev_ops = &ctucan_netdev_ops;
+
+ /* Getting the can_clk info */
+ if (!can_clk_rate) {
+ priv->can_clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->can_clk)) {
+ dev_err(dev, "Device clock not found.\n");
+ ret = PTR_ERR(priv->can_clk);
+ goto err_free;
+ }
+ can_clk_rate = clk_get_rate(priv->can_clk);
+ }
+
+ priv->write_reg = ctucan_write32_le;
+ priv->read_reg = ctucan_read32_le;
+
+ if (pm_enable_call)
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
+ __func__, ret);
+ pm_runtime_put_noidle(priv->dev);
+ goto err_pmdisable;
+ }
+
+ /* Check for big-endianity and set according IO-accessors */
+ if ((ctucan_read32(priv, CTUCANFD_DEVICE_ID) & 0xFFFF) != CTUCANFD_ID) {
+ priv->write_reg = ctucan_write32_be;
+ priv->read_reg = ctucan_read32_be;
+ if ((ctucan_read32(priv, CTUCANFD_DEVICE_ID) & 0xFFFF) != CTUCANFD_ID) {
+ netdev_err(ndev, "CTU_CAN_FD signature not found\n");
+ ret = -ENODEV;
+ goto err_deviceoff;
+ }
+ }
+
+ ret = ctucan_reset(ndev);
+ if (ret < 0)
+ goto err_deviceoff;
+
+ priv->can.clock.freq = can_clk_rate;
+
+ netif_napi_add(ndev, &priv->napi, ctucan_rx_poll, NAPI_POLL_WEIGHT);
+
+ ret = register_candev(ndev);
+ if (ret) {
+ dev_err(dev, "fail to register failed (err=%d)\n", ret);
+ goto err_deviceoff;
+ }
+
+ devm_can_led_init(ndev);
+
+ pm_runtime_put(dev);
+
+ netdev_dbg(ndev, "mem_base=0x%p irq=%d clock=%d, no. of txt buffers:%d\n",
+ priv->mem_base, ndev->irq, priv->can.clock.freq, priv->ntxbufs);
+
+ return 0;
+
+err_deviceoff:
+ pm_runtime_put(priv->dev);
+err_pmdisable:
+ if (pm_enable_call)
+ pm_runtime_disable(dev);
+err_free:
+ list_del_init(&priv->peers_on_pdev);
+ free_candev(ndev);
+ return ret;
+}
+EXPORT_SYMBOL(ctucan_probe_common);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Martin Jerabek <martin.jerabek01@gmail.com>");
+MODULE_AUTHOR("Pavel Pisa <pisa@cmp.felk.cvut.cz>");
+MODULE_AUTHOR("Ondrej Ille <ondrej.ille@gmail.com>");
+MODULE_DESCRIPTION("CTU CAN FD interface");
diff --git a/drivers/net/can/ctucanfd/ctucanfd_kframe.h b/drivers/net/can/ctucanfd/ctucanfd_kframe.h
new file mode 100644
index 000000000000..3491299eaac2
--- /dev/null
+++ b/drivers/net/can/ctucanfd/ctucanfd_kframe.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*******************************************************************************
+ *
+ * CTU CAN FD IP Core
+ *
+ * Copyright (C) 2015-2018 Ondrej Ille <ondrej.ille@gmail.com> FEE CTU
+ * Copyright (C) 2018-2021 Ondrej Ille <ondrej.ille@gmail.com> self-funded
+ * Copyright (C) 2018-2019 Martin Jerabek <martin.jerabek01@gmail.com> FEE CTU
+ * Copyright (C) 2018-2021 Pavel Pisa <pisa@cmp.felk.cvut.cz> FEE CTU/self-funded
+ *
+ * Project advisors:
+ * Jiri Novak <jnovak@fel.cvut.cz>
+ * Pavel Pisa <pisa@cmp.felk.cvut.cz>
+ *
+ * Department of Measurement (http://meas.fel.cvut.cz/)
+ * Faculty of Electrical Engineering (http://www.fel.cvut.cz)
+ * Czech Technical University (http://www.cvut.cz/)
+ ******************************************************************************/
+
+/* This file is autogenerated, DO NOT EDIT! */
+
+#ifndef __CTU_CAN_FD_CAN_FD_FRAME_FORMAT__
+#define __CTU_CAN_FD_CAN_FD_FRAME_FORMAT__
+
+#include <linux/bits.h>
+
+/* CAN_Frame_format memory map */
+enum ctu_can_fd_can_frame_format {
+ CTUCANFD_FRAME_FORMAT_W = 0x0,
+ CTUCANFD_IDENTIFIER_W = 0x4,
+ CTUCANFD_TIMESTAMP_L_W = 0x8,
+ CTUCANFD_TIMESTAMP_U_W = 0xc,
+ CTUCANFD_DATA_1_4_W = 0x10,
+ CTUCANFD_DATA_5_8_W = 0x14,
+ CTUCANFD_DATA_61_64_W = 0x4c,
+};
+
+/* CAN_FD_Frame_format memory region */
+
+/* FRAME_FORMAT_W registers */
+#define REG_FRAME_FORMAT_W_DLC GENMASK(3, 0)
+#define REG_FRAME_FORMAT_W_RTR BIT(5)
+#define REG_FRAME_FORMAT_W_IDE BIT(6)
+#define REG_FRAME_FORMAT_W_FDF BIT(7)
+#define REG_FRAME_FORMAT_W_BRS BIT(9)
+#define REG_FRAME_FORMAT_W_ESI_RSV BIT(10)
+#define REG_FRAME_FORMAT_W_RWCNT GENMASK(15, 11)
+
+/* IDENTIFIER_W registers */
+#define REG_IDENTIFIER_W_IDENTIFIER_EXT GENMASK(17, 0)
+#define REG_IDENTIFIER_W_IDENTIFIER_BASE GENMASK(28, 18)
+
+/* TIMESTAMP_L_W registers */
+#define REG_TIMESTAMP_L_W_TIME_STAMP_L_W GENMASK(31, 0)
+
+/* TIMESTAMP_U_W registers */
+#define REG_TIMESTAMP_U_W_TIMESTAMP_U_W GENMASK(31, 0)
+
+/* DATA_1_4_W registers */
+#define REG_DATA_1_4_W_DATA_1 GENMASK(7, 0)
+#define REG_DATA_1_4_W_DATA_2 GENMASK(15, 8)
+#define REG_DATA_1_4_W_DATA_3 GENMASK(23, 16)
+#define REG_DATA_1_4_W_DATA_4 GENMASK(31, 24)
+
+/* DATA_5_8_W registers */
+#define REG_DATA_5_8_W_DATA_5 GENMASK(7, 0)
+#define REG_DATA_5_8_W_DATA_6 GENMASK(15, 8)
+#define REG_DATA_5_8_W_DATA_7 GENMASK(23, 16)
+#define REG_DATA_5_8_W_DATA_8 GENMASK(31, 24)
+
+/* DATA_61_64_W registers */
+#define REG_DATA_61_64_W_DATA_61 GENMASK(7, 0)
+#define REG_DATA_61_64_W_DATA_62 GENMASK(15, 8)
+#define REG_DATA_61_64_W_DATA_63 GENMASK(23, 16)
+#define REG_DATA_61_64_W_DATA_64 GENMASK(31, 24)
+
+#endif
diff --git a/drivers/net/can/ctucanfd/ctucanfd_kregs.h b/drivers/net/can/ctucanfd/ctucanfd_kregs.h
new file mode 100644
index 000000000000..edc1c1a24348
--- /dev/null
+++ b/drivers/net/can/ctucanfd/ctucanfd_kregs.h
@@ -0,0 +1,325 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*******************************************************************************
+ *
+ * CTU CAN FD IP Core
+ *
+ * Copyright (C) 2015-2018 Ondrej Ille <ondrej.ille@gmail.com> FEE CTU
+ * Copyright (C) 2018-2021 Ondrej Ille <ondrej.ille@gmail.com> self-funded
+ * Copyright (C) 2018-2019 Martin Jerabek <martin.jerabek01@gmail.com> FEE CTU
+ * Copyright (C) 2018-2021 Pavel Pisa <pisa@cmp.felk.cvut.cz> FEE CTU/self-funded
+ *
+ * Project advisors:
+ * Jiri Novak <jnovak@fel.cvut.cz>
+ * Pavel Pisa <pisa@cmp.felk.cvut.cz>
+ *
+ * Department of Measurement (http://meas.fel.cvut.cz/)
+ * Faculty of Electrical Engineering (http://www.fel.cvut.cz)
+ * Czech Technical University (http://www.cvut.cz/)
+ ******************************************************************************/
+
+/* This file is autogenerated, DO NOT EDIT! */
+
+#ifndef __CTU_CAN_FD_CAN_FD_REGISTER_MAP__
+#define __CTU_CAN_FD_CAN_FD_REGISTER_MAP__
+
+#include <linux/bits.h>
+
+/* CAN_Registers memory map */
+enum ctu_can_fd_can_registers {
+ CTUCANFD_DEVICE_ID = 0x0,
+ CTUCANFD_VERSION = 0x2,
+ CTUCANFD_MODE = 0x4,
+ CTUCANFD_SETTINGS = 0x6,
+ CTUCANFD_STATUS = 0x8,
+ CTUCANFD_COMMAND = 0xc,
+ CTUCANFD_INT_STAT = 0x10,
+ CTUCANFD_INT_ENA_SET = 0x14,
+ CTUCANFD_INT_ENA_CLR = 0x18,
+ CTUCANFD_INT_MASK_SET = 0x1c,
+ CTUCANFD_INT_MASK_CLR = 0x20,
+ CTUCANFD_BTR = 0x24,
+ CTUCANFD_BTR_FD = 0x28,
+ CTUCANFD_EWL = 0x2c,
+ CTUCANFD_ERP = 0x2d,
+ CTUCANFD_FAULT_STATE = 0x2e,
+ CTUCANFD_REC = 0x30,
+ CTUCANFD_TEC = 0x32,
+ CTUCANFD_ERR_NORM = 0x34,
+ CTUCANFD_ERR_FD = 0x36,
+ CTUCANFD_CTR_PRES = 0x38,
+ CTUCANFD_FILTER_A_MASK = 0x3c,
+ CTUCANFD_FILTER_A_VAL = 0x40,
+ CTUCANFD_FILTER_B_MASK = 0x44,
+ CTUCANFD_FILTER_B_VAL = 0x48,
+ CTUCANFD_FILTER_C_MASK = 0x4c,
+ CTUCANFD_FILTER_C_VAL = 0x50,
+ CTUCANFD_FILTER_RAN_LOW = 0x54,
+ CTUCANFD_FILTER_RAN_HIGH = 0x58,
+ CTUCANFD_FILTER_CONTROL = 0x5c,
+ CTUCANFD_FILTER_STATUS = 0x5e,
+ CTUCANFD_RX_MEM_INFO = 0x60,
+ CTUCANFD_RX_POINTERS = 0x64,
+ CTUCANFD_RX_STATUS = 0x68,
+ CTUCANFD_RX_SETTINGS = 0x6a,
+ CTUCANFD_RX_DATA = 0x6c,
+ CTUCANFD_TX_STATUS = 0x70,
+ CTUCANFD_TX_COMMAND = 0x74,
+ CTUCANFD_TX_PRIORITY = 0x78,
+ CTUCANFD_ERR_CAPT = 0x7c,
+ CTUCANFD_ALC = 0x7e,
+ CTUCANFD_TRV_DELAY = 0x80,
+ CTUCANFD_SSP_CFG = 0x82,
+ CTUCANFD_RX_FR_CTR = 0x84,
+ CTUCANFD_TX_FR_CTR = 0x88,
+ CTUCANFD_DEBUG_REGISTER = 0x8c,
+ CTUCANFD_YOLO_REG = 0x90,
+ CTUCANFD_TIMESTAMP_LOW = 0x94,
+ CTUCANFD_TIMESTAMP_HIGH = 0x98,
+ CTUCANFD_TXTB1_DATA_1 = 0x100,
+ CTUCANFD_TXTB1_DATA_2 = 0x104,
+ CTUCANFD_TXTB1_DATA_20 = 0x14c,
+ CTUCANFD_TXTB2_DATA_1 = 0x200,
+ CTUCANFD_TXTB2_DATA_2 = 0x204,
+ CTUCANFD_TXTB2_DATA_20 = 0x24c,
+ CTUCANFD_TXTB3_DATA_1 = 0x300,
+ CTUCANFD_TXTB3_DATA_2 = 0x304,
+ CTUCANFD_TXTB3_DATA_20 = 0x34c,
+ CTUCANFD_TXTB4_DATA_1 = 0x400,
+ CTUCANFD_TXTB4_DATA_2 = 0x404,
+ CTUCANFD_TXTB4_DATA_20 = 0x44c,
+};
+
+/* Control_registers memory region */
+
+/* DEVICE_ID VERSION registers */
+#define REG_DEVICE_ID_DEVICE_ID GENMASK(15, 0)
+#define REG_DEVICE_ID_VER_MINOR GENMASK(23, 16)
+#define REG_DEVICE_ID_VER_MAJOR GENMASK(31, 24)
+
+/* MODE SETTINGS registers */
+#define REG_MODE_RST BIT(0)
+#define REG_MODE_BMM BIT(1)
+#define REG_MODE_STM BIT(2)
+#define REG_MODE_AFM BIT(3)
+#define REG_MODE_FDE BIT(4)
+#define REG_MODE_ACF BIT(7)
+#define REG_MODE_TSTM BIT(8)
+#define REG_MODE_RTRLE BIT(16)
+#define REG_MODE_RTRTH GENMASK(20, 17)
+#define REG_MODE_ILBP BIT(21)
+#define REG_MODE_ENA BIT(22)
+#define REG_MODE_NISOFD BIT(23)
+#define REG_MODE_PEX BIT(24)
+#define REG_MODE_TBFBO BIT(25)
+#define REG_MODE_FDRF BIT(26)
+
+/* STATUS registers */
+#define REG_STATUS_RXNE BIT(0)
+#define REG_STATUS_DOR BIT(1)
+#define REG_STATUS_TXNF BIT(2)
+#define REG_STATUS_EFT BIT(3)
+#define REG_STATUS_RXS BIT(4)
+#define REG_STATUS_TXS BIT(5)
+#define REG_STATUS_EWL BIT(6)
+#define REG_STATUS_IDLE BIT(7)
+#define REG_STATUS_PEXS BIT(8)
+
+/* COMMAND registers */
+#define REG_COMMAND_RRB BIT(2)
+#define REG_COMMAND_CDO BIT(3)
+#define REG_COMMAND_ERCRST BIT(4)
+#define REG_COMMAND_RXFCRST BIT(5)
+#define REG_COMMAND_TXFCRST BIT(6)
+#define REG_COMMAND_CPEXS BIT(7)
+
+/* INT_STAT registers */
+#define REG_INT_STAT_RXI BIT(0)
+#define REG_INT_STAT_TXI BIT(1)
+#define REG_INT_STAT_EWLI BIT(2)
+#define REG_INT_STAT_DOI BIT(3)
+#define REG_INT_STAT_FCSI BIT(4)
+#define REG_INT_STAT_ALI BIT(5)
+#define REG_INT_STAT_BEI BIT(6)
+#define REG_INT_STAT_OFI BIT(7)
+#define REG_INT_STAT_RXFI BIT(8)
+#define REG_INT_STAT_BSI BIT(9)
+#define REG_INT_STAT_RBNEI BIT(10)
+#define REG_INT_STAT_TXBHCI BIT(11)
+
+/* INT_ENA_SET registers */
+#define REG_INT_ENA_SET_INT_ENA_SET GENMASK(11, 0)
+
+/* INT_ENA_CLR registers */
+#define REG_INT_ENA_CLR_INT_ENA_CLR GENMASK(11, 0)
+
+/* INT_MASK_SET registers */
+#define REG_INT_MASK_SET_INT_MASK_SET GENMASK(11, 0)
+
+/* INT_MASK_CLR registers */
+#define REG_INT_MASK_CLR_INT_MASK_CLR GENMASK(11, 0)
+
+/* BTR registers */
+#define REG_BTR_PROP GENMASK(6, 0)
+#define REG_BTR_PH1 GENMASK(12, 7)
+#define REG_BTR_PH2 GENMASK(18, 13)
+#define REG_BTR_BRP GENMASK(26, 19)
+#define REG_BTR_SJW GENMASK(31, 27)
+
+/* BTR_FD registers */
+#define REG_BTR_FD_PROP_FD GENMASK(5, 0)
+#define REG_BTR_FD_PH1_FD GENMASK(11, 7)
+#define REG_BTR_FD_PH2_FD GENMASK(17, 13)
+#define REG_BTR_FD_BRP_FD GENMASK(26, 19)
+#define REG_BTR_FD_SJW_FD GENMASK(31, 27)
+
+/* EWL ERP FAULT_STATE registers */
+#define REG_EWL_EW_LIMIT GENMASK(7, 0)
+#define REG_EWL_ERP_LIMIT GENMASK(15, 8)
+#define REG_EWL_ERA BIT(16)
+#define REG_EWL_ERP BIT(17)
+#define REG_EWL_BOF BIT(18)
+
+/* REC TEC registers */
+#define REG_REC_REC_VAL GENMASK(8, 0)
+#define REG_REC_TEC_VAL GENMASK(24, 16)
+
+/* ERR_NORM ERR_FD registers */
+#define REG_ERR_NORM_ERR_NORM_VAL GENMASK(15, 0)
+#define REG_ERR_NORM_ERR_FD_VAL GENMASK(31, 16)
+
+/* CTR_PRES registers */
+#define REG_CTR_PRES_CTPV GENMASK(8, 0)
+#define REG_CTR_PRES_PTX BIT(9)
+#define REG_CTR_PRES_PRX BIT(10)
+#define REG_CTR_PRES_ENORM BIT(11)
+#define REG_CTR_PRES_EFD BIT(12)
+
+/* FILTER_A_MASK registers */
+#define REG_FILTER_A_MASK_BIT_MASK_A_VAL GENMASK(28, 0)
+
+/* FILTER_A_VAL registers */
+#define REG_FILTER_A_VAL_BIT_VAL_A_VAL GENMASK(28, 0)
+
+/* FILTER_B_MASK registers */
+#define REG_FILTER_B_MASK_BIT_MASK_B_VAL GENMASK(28, 0)
+
+/* FILTER_B_VAL registers */
+#define REG_FILTER_B_VAL_BIT_VAL_B_VAL GENMASK(28, 0)
+
+/* FILTER_C_MASK registers */
+#define REG_FILTER_C_MASK_BIT_MASK_C_VAL GENMASK(28, 0)
+
+/* FILTER_C_VAL registers */
+#define REG_FILTER_C_VAL_BIT_VAL_C_VAL GENMASK(28, 0)
+
+/* FILTER_RAN_LOW registers */
+#define REG_FILTER_RAN_LOW_BIT_RAN_LOW_VAL GENMASK(28, 0)
+
+/* FILTER_RAN_HIGH registers */
+#define REG_FILTER_RAN_HIGH_BIT_RAN_HIGH_VAL GENMASK(28, 0)
+
+/* FILTER_CONTROL FILTER_STATUS registers */
+#define REG_FILTER_CONTROL_FANB BIT(0)
+#define REG_FILTER_CONTROL_FANE BIT(1)
+#define REG_FILTER_CONTROL_FAFB BIT(2)
+#define REG_FILTER_CONTROL_FAFE BIT(3)
+#define REG_FILTER_CONTROL_FBNB BIT(4)
+#define REG_FILTER_CONTROL_FBNE BIT(5)
+#define REG_FILTER_CONTROL_FBFB BIT(6)
+#define REG_FILTER_CONTROL_FBFE BIT(7)
+#define REG_FILTER_CONTROL_FCNB BIT(8)
+#define REG_FILTER_CONTROL_FCNE BIT(9)
+#define REG_FILTER_CONTROL_FCFB BIT(10)
+#define REG_FILTER_CONTROL_FCFE BIT(11)
+#define REG_FILTER_CONTROL_FRNB BIT(12)
+#define REG_FILTER_CONTROL_FRNE BIT(13)
+#define REG_FILTER_CONTROL_FRFB BIT(14)
+#define REG_FILTER_CONTROL_FRFE BIT(15)
+#define REG_FILTER_CONTROL_SFA BIT(16)
+#define REG_FILTER_CONTROL_SFB BIT(17)
+#define REG_FILTER_CONTROL_SFC BIT(18)
+#define REG_FILTER_CONTROL_SFR BIT(19)
+
+/* RX_MEM_INFO registers */
+#define REG_RX_MEM_INFO_RX_BUFF_SIZE GENMASK(12, 0)
+#define REG_RX_MEM_INFO_RX_MEM_FREE GENMASK(28, 16)
+
+/* RX_POINTERS registers */
+#define REG_RX_POINTERS_RX_WPP GENMASK(11, 0)
+#define REG_RX_POINTERS_RX_RPP GENMASK(27, 16)
+
+/* RX_STATUS RX_SETTINGS registers */
+#define REG_RX_STATUS_RXE BIT(0)
+#define REG_RX_STATUS_RXF BIT(1)
+#define REG_RX_STATUS_RXMOF BIT(2)
+#define REG_RX_STATUS_RXFRC GENMASK(14, 4)
+#define REG_RX_STATUS_RTSOP BIT(16)
+
+/* RX_DATA registers */
+#define REG_RX_DATA_RX_DATA GENMASK(31, 0)
+
+/* TX_STATUS registers */
+#define REG_TX_STATUS_TX1S GENMASK(3, 0)
+#define REG_TX_STATUS_TX2S GENMASK(7, 4)
+#define REG_TX_STATUS_TX3S GENMASK(11, 8)
+#define REG_TX_STATUS_TX4S GENMASK(15, 12)
+
+/* TX_COMMAND registers */
+#define REG_TX_COMMAND_TXCE BIT(0)
+#define REG_TX_COMMAND_TXCR BIT(1)
+#define REG_TX_COMMAND_TXCA BIT(2)
+#define REG_TX_COMMAND_TXB1 BIT(8)
+#define REG_TX_COMMAND_TXB2 BIT(9)
+#define REG_TX_COMMAND_TXB3 BIT(10)
+#define REG_TX_COMMAND_TXB4 BIT(11)
+
+/* TX_PRIORITY registers */
+#define REG_TX_PRIORITY_TXT1P GENMASK(2, 0)
+#define REG_TX_PRIORITY_TXT2P GENMASK(6, 4)
+#define REG_TX_PRIORITY_TXT3P GENMASK(10, 8)
+#define REG_TX_PRIORITY_TXT4P GENMASK(14, 12)
+
+/* ERR_CAPT ALC registers */
+#define REG_ERR_CAPT_ERR_POS GENMASK(4, 0)
+#define REG_ERR_CAPT_ERR_TYPE GENMASK(7, 5)
+#define REG_ERR_CAPT_ALC_BIT GENMASK(20, 16)
+#define REG_ERR_CAPT_ALC_ID_FIELD GENMASK(23, 21)
+
+/* TRV_DELAY SSP_CFG registers */
+#define REG_TRV_DELAY_TRV_DELAY_VALUE GENMASK(6, 0)
+#define REG_TRV_DELAY_SSP_OFFSET GENMASK(23, 16)
+#define REG_TRV_DELAY_SSP_SRC GENMASK(25, 24)
+
+/* RX_FR_CTR registers */
+#define REG_RX_FR_CTR_RX_FR_CTR_VAL GENMASK(31, 0)
+
+/* TX_FR_CTR registers */
+#define REG_TX_FR_CTR_TX_FR_CTR_VAL GENMASK(31, 0)
+
+/* DEBUG_REGISTER registers */
+#define REG_DEBUG_REGISTER_STUFF_COUNT GENMASK(2, 0)
+#define REG_DEBUG_REGISTER_DESTUFF_COUNT GENMASK(5, 3)
+#define REG_DEBUG_REGISTER_PC_ARB BIT(6)
+#define REG_DEBUG_REGISTER_PC_CON BIT(7)
+#define REG_DEBUG_REGISTER_PC_DAT BIT(8)
+#define REG_DEBUG_REGISTER_PC_STC BIT(9)
+#define REG_DEBUG_REGISTER_PC_CRC BIT(10)
+#define REG_DEBUG_REGISTER_PC_CRCD BIT(11)
+#define REG_DEBUG_REGISTER_PC_ACK BIT(12)
+#define REG_DEBUG_REGISTER_PC_ACKD BIT(13)
+#define REG_DEBUG_REGISTER_PC_EOF BIT(14)
+#define REG_DEBUG_REGISTER_PC_INT BIT(15)
+#define REG_DEBUG_REGISTER_PC_SUSP BIT(16)
+#define REG_DEBUG_REGISTER_PC_OVR BIT(17)
+#define REG_DEBUG_REGISTER_PC_SOF BIT(18)
+
+/* YOLO_REG registers */
+#define REG_YOLO_REG_YOLO_VAL GENMASK(31, 0)
+
+/* TIMESTAMP_LOW registers */
+#define REG_TIMESTAMP_LOW_TIMESTAMP_LOW GENMASK(31, 0)
+
+/* TIMESTAMP_HIGH registers */
+#define REG_TIMESTAMP_HIGH_TIMESTAMP_HIGH GENMASK(31, 0)
+
+#endif
diff --git a/drivers/net/can/ctucanfd/ctucanfd_pci.c b/drivers/net/can/ctucanfd/ctucanfd_pci.c
new file mode 100644
index 000000000000..c37a42480533
--- /dev/null
+++ b/drivers/net/can/ctucanfd/ctucanfd_pci.c
@@ -0,0 +1,304 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*******************************************************************************
+ *
+ * CTU CAN FD IP Core
+ *
+ * Copyright (C) 2015-2018 Ondrej Ille <ondrej.ille@gmail.com> FEE CTU
+ * Copyright (C) 2018-2021 Ondrej Ille <ondrej.ille@gmail.com> self-funded
+ * Copyright (C) 2018-2019 Martin Jerabek <martin.jerabek01@gmail.com> FEE CTU
+ * Copyright (C) 2018-2022 Pavel Pisa <pisa@cmp.felk.cvut.cz> FEE CTU/self-funded
+ *
+ * Project advisors:
+ * Jiri Novak <jnovak@fel.cvut.cz>
+ * Pavel Pisa <pisa@cmp.felk.cvut.cz>
+ *
+ * Department of Measurement (http://meas.fel.cvut.cz/)
+ * Faculty of Electrical Engineering (http://www.fel.cvut.cz)
+ * Czech Technical University (http://www.cvut.cz/)
+ ******************************************************************************/
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "ctucanfd.h"
+
+#ifndef PCI_DEVICE_DATA
+#define PCI_DEVICE_DATA(vend, dev, data) \
+.vendor = PCI_VENDOR_ID_##vend, \
+.device = PCI_DEVICE_ID_##vend##_##dev, \
+.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0, \
+.driver_data = (kernel_ulong_t)(data)
+#endif
+
+#ifndef PCI_VENDOR_ID_TEDIA
+#define PCI_VENDOR_ID_TEDIA 0x1760
+#endif
+
+#ifndef PCI_DEVICE_ID_TEDIA_CTUCAN_VER21
+#define PCI_DEVICE_ID_TEDIA_CTUCAN_VER21 0xff00
+#endif
+
+#define CTUCAN_BAR0_CTUCAN_ID 0x0000
+#define CTUCAN_BAR0_CRA_BASE 0x4000
+#define CYCLONE_IV_CRA_A2P_IE (0x0050)
+
+#define CTUCAN_WITHOUT_CTUCAN_ID 0
+#define CTUCAN_WITH_CTUCAN_ID 1
+
+static bool use_msi = true;
+module_param(use_msi, bool, 0444);
+MODULE_PARM_DESC(use_msi, "PCIe implementation use MSI interrupts. Default: 1 (yes)");
+
+static bool pci_use_second = true;
+module_param(pci_use_second, bool, 0444);
+MODULE_PARM_DESC(pci_use_second, "Use the second CAN core on PCIe card. Default: 1 (yes)");
+
+struct ctucan_pci_board_data {
+ void __iomem *bar0_base;
+ void __iomem *cra_base;
+ void __iomem *bar1_base;
+ struct list_head ndev_list_head;
+ int use_msi;
+};
+
+static struct ctucan_pci_board_data *ctucan_pci_get_bdata(struct pci_dev *pdev)
+{
+ return (struct ctucan_pci_board_data *)pci_get_drvdata(pdev);
+}
+
+static void ctucan_pci_set_drvdata(struct device *dev,
+ struct net_device *ndev)
+{
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ struct ctucan_pci_board_data *bdata = ctucan_pci_get_bdata(pdev);
+
+ list_add(&priv->peers_on_pdev, &bdata->ndev_list_head);
+ priv->irq_flags = IRQF_SHARED;
+}
+
+/**
+ * ctucan_pci_probe - PCI registration call
+ * @pdev: Handle to the pci device structure
+ * @ent: Pointer to the entry from ctucan_pci_tbl
+ *
+ * This function does all the memory allocation and registration for the CAN
+ * device.
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int ctucan_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ unsigned long driver_data = ent->driver_data;
+ struct ctucan_pci_board_data *bdata;
+ void __iomem *addr;
+ void __iomem *cra_addr;
+ void __iomem *bar0_base;
+ u32 cra_a2p_ie;
+ u32 ctucan_id = 0;
+ int ret;
+ unsigned int ntxbufs;
+ unsigned int num_cores = 1;
+ unsigned int core_i = 0;
+ int irq;
+ int msi_ok = 0;
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(dev, "pci_enable_device FAILED\n");
+ goto err;
+ }
+
+ ret = pci_request_regions(pdev, KBUILD_MODNAME);
+ if (ret) {
+ dev_err(dev, "pci_request_regions FAILED\n");
+ goto err_disable_device;
+ }
+
+ if (use_msi) {
+ ret = pci_enable_msi(pdev);
+ if (!ret) {
+ dev_info(dev, "MSI enabled\n");
+ pci_set_master(pdev);
+ msi_ok = 1;
+ }
+ }
+
+ dev_info(dev, "ctucan BAR0 0x%08llx 0x%08llx\n",
+ (long long)pci_resource_start(pdev, 0),
+ (long long)pci_resource_len(pdev, 0));
+
+ dev_info(dev, "ctucan BAR1 0x%08llx 0x%08llx\n",
+ (long long)pci_resource_start(pdev, 1),
+ (long long)pci_resource_len(pdev, 1));
+
+ addr = pci_iomap(pdev, 1, pci_resource_len(pdev, 1));
+ if (!addr) {
+ dev_err(dev, "PCI BAR 1 cannot be mapped\n");
+ ret = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ /* Cyclone IV PCI Express Control Registers Area */
+ bar0_base = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
+ if (!bar0_base) {
+ dev_err(dev, "PCI BAR 0 cannot be mapped\n");
+ ret = -EIO;
+ goto err_pci_iounmap_bar1;
+ }
+
+ if (driver_data == CTUCAN_WITHOUT_CTUCAN_ID) {
+ cra_addr = bar0_base;
+ num_cores = 2;
+ } else {
+ cra_addr = bar0_base + CTUCAN_BAR0_CRA_BASE;
+ ctucan_id = ioread32(bar0_base + CTUCAN_BAR0_CTUCAN_ID);
+ dev_info(dev, "ctucan_id 0x%08lx\n", (unsigned long)ctucan_id);
+ num_cores = ctucan_id & 0xf;
+ }
+
+ irq = pdev->irq;
+
+ ntxbufs = 4;
+
+ bdata = kzalloc(sizeof(*bdata), GFP_KERNEL);
+ if (!bdata) {
+ ret = -ENOMEM;
+ goto err_pci_iounmap_bar0;
+ }
+
+ INIT_LIST_HEAD(&bdata->ndev_list_head);
+ bdata->bar0_base = bar0_base;
+ bdata->cra_base = cra_addr;
+ bdata->bar1_base = addr;
+ bdata->use_msi = msi_ok;
+
+ pci_set_drvdata(pdev, bdata);
+
+ ret = ctucan_probe_common(dev, addr, irq, ntxbufs, 100000000,
+ 0, ctucan_pci_set_drvdata);
+ if (ret < 0)
+ goto err_free_board;
+
+ core_i++;
+
+ while (pci_use_second && (core_i < num_cores)) {
+ addr += 0x4000;
+ ret = ctucan_probe_common(dev, addr, irq, ntxbufs, 100000000,
+ 0, ctucan_pci_set_drvdata);
+ if (ret < 0) {
+ dev_info(dev, "CTU CAN FD core %d initialization failed\n",
+ core_i);
+ break;
+ }
+ core_i++;
+ }
+
+ /* enable interrupt in
+ * Avalon-MM to PCI Express Interrupt Enable Register
+ */
+ cra_a2p_ie = ioread32(cra_addr + CYCLONE_IV_CRA_A2P_IE);
+ dev_info(dev, "cra_a2p_ie 0x%08x\n", cra_a2p_ie);
+ cra_a2p_ie |= 1;
+ iowrite32(cra_a2p_ie, cra_addr + CYCLONE_IV_CRA_A2P_IE);
+ cra_a2p_ie = ioread32(cra_addr + CYCLONE_IV_CRA_A2P_IE);
+ dev_info(dev, "cra_a2p_ie 0x%08x\n", cra_a2p_ie);
+
+ return 0;
+
+err_free_board:
+ pci_set_drvdata(pdev, NULL);
+ kfree(bdata);
+err_pci_iounmap_bar0:
+ pci_iounmap(pdev, cra_addr);
+err_pci_iounmap_bar1:
+ pci_iounmap(pdev, addr);
+err_release_regions:
+ if (msi_ok) {
+ pci_disable_msi(pdev);
+ pci_clear_master(pdev);
+ }
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+err:
+ return ret;
+}
+
+/**
+ * ctucan_pci_remove - Unregister the device after releasing the resources
+ * @pdev: Handle to the pci device structure
+ *
+ * This function frees all the resources allocated to the device.
+ * Return: 0 always
+ */
+static void ctucan_pci_remove(struct pci_dev *pdev)
+{
+ struct net_device *ndev;
+ struct ctucan_priv *priv = NULL;
+ struct ctucan_pci_board_data *bdata = ctucan_pci_get_bdata(pdev);
+
+ dev_dbg(&pdev->dev, "ctucan_remove");
+
+ if (!bdata) {
+ dev_err(&pdev->dev, "%s: no list of devices\n", __func__);
+ return;
+ }
+
+ /* disable interrupt in
+ * Avalon-MM to PCI Express Interrupt Enable Register
+ */
+ if (bdata->cra_base)
+ iowrite32(0, bdata->cra_base + CYCLONE_IV_CRA_A2P_IE);
+
+ while ((priv = list_first_entry_or_null(&bdata->ndev_list_head, struct ctucan_priv,
+ peers_on_pdev)) != NULL) {
+ ndev = priv->can.dev;
+
+ unregister_candev(ndev);
+
+ netif_napi_del(&priv->napi);
+
+ list_del_init(&priv->peers_on_pdev);
+ free_candev(ndev);
+ }
+
+ pci_iounmap(pdev, bdata->bar1_base);
+
+ if (bdata->use_msi) {
+ pci_disable_msi(pdev);
+ pci_clear_master(pdev);
+ }
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+
+ pci_iounmap(pdev, bdata->bar0_base);
+
+ pci_set_drvdata(pdev, NULL);
+ kfree(bdata);
+}
+
+static SIMPLE_DEV_PM_OPS(ctucan_pci_pm_ops, ctucan_suspend, ctucan_resume);
+
+static const struct pci_device_id ctucan_pci_tbl[] = {
+ {PCI_DEVICE_DATA(TEDIA, CTUCAN_VER21,
+ CTUCAN_WITH_CTUCAN_ID)},
+ {},
+};
+
+static struct pci_driver ctucan_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = ctucan_pci_tbl,
+ .probe = ctucan_pci_probe,
+ .remove = ctucan_pci_remove,
+ .driver.pm = &ctucan_pci_pm_ops,
+};
+
+module_pci_driver(ctucan_pci_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pavel Pisa <pisa@cmp.felk.cvut.cz>");
+MODULE_DESCRIPTION("CTU CAN FD for PCI bus");
diff --git a/drivers/net/can/ctucanfd/ctucanfd_platform.c b/drivers/net/can/ctucanfd/ctucanfd_platform.c
new file mode 100644
index 000000000000..5e4806068662
--- /dev/null
+++ b/drivers/net/can/ctucanfd/ctucanfd_platform.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*******************************************************************************
+ *
+ * CTU CAN FD IP Core
+ *
+ * Copyright (C) 2015-2018 Ondrej Ille <ondrej.ille@gmail.com> FEE CTU
+ * Copyright (C) 2018-2021 Ondrej Ille <ondrej.ille@gmail.com> self-funded
+ * Copyright (C) 2018-2019 Martin Jerabek <martin.jerabek01@gmail.com> FEE CTU
+ * Copyright (C) 2018-2022 Pavel Pisa <pisa@cmp.felk.cvut.cz> FEE CTU/self-funded
+ *
+ * Project advisors:
+ * Jiri Novak <jnovak@fel.cvut.cz>
+ * Pavel Pisa <pisa@cmp.felk.cvut.cz>
+ *
+ * Department of Measurement (http://meas.fel.cvut.cz/)
+ * Faculty of Electrical Engineering (http://www.fel.cvut.cz)
+ * Czech Technical University (http://www.cvut.cz/)
+ ******************************************************************************/
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "ctucanfd.h"
+
+#define DRV_NAME "ctucanfd"
+
+static void ctucan_platform_set_drvdata(struct device *dev,
+ struct net_device *ndev)
+{
+ struct platform_device *pdev = container_of(dev, struct platform_device,
+ dev);
+
+ platform_set_drvdata(pdev, ndev);
+}
+
+/**
+ * ctucan_platform_probe - Platform registration call
+ * @pdev: Handle to the platform device structure
+ *
+ * This function does all the memory allocation and registration for the CAN
+ * device.
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int ctucan_platform_probe(struct platform_device *pdev)
+{
+ struct resource *res; /* IO mem resources */
+ struct device *dev = &pdev->dev;
+ void __iomem *addr;
+ int ret;
+ unsigned int ntxbufs;
+ int irq;
+
+ /* Get the virtual base address for the device */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(addr)) {
+ dev_err(dev, "Cannot remap address.\n");
+ ret = PTR_ERR(addr);
+ goto err;
+ }
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "Cannot find interrupt.\n");
+ ret = irq;
+ goto err;
+ }
+
+ /* Number of tx bufs might be change in HW for future. If so,
+ * it will be passed as property via device tree
+ */
+ ntxbufs = 4;
+ ret = ctucan_probe_common(dev, addr, irq, ntxbufs, 0,
+ 1, ctucan_platform_set_drvdata);
+
+ if (ret < 0)
+ platform_set_drvdata(pdev, NULL);
+
+err:
+ return ret;
+}
+
+/**
+ * ctucan_platform_remove - Unregister the device after releasing the resources
+ * @pdev: Handle to the platform device structure
+ *
+ * This function frees all the resources allocated to the device.
+ * Return: 0 always
+ */
+static int ctucan_platform_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct ctucan_priv *priv = netdev_priv(ndev);
+
+ netdev_dbg(ndev, "ctucan_remove");
+
+ unregister_candev(ndev);
+ pm_runtime_disable(&pdev->dev);
+ netif_napi_del(&priv->napi);
+ free_candev(ndev);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(ctucan_platform_pm_ops, ctucan_suspend, ctucan_resume);
+
+/* Match table for OF platform binding */
+static const struct of_device_id ctucan_of_match[] = {
+ { .compatible = "ctu,ctucanfd-2", },
+ { .compatible = "ctu,ctucanfd", },
+ { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(of, ctucan_of_match);
+
+static struct platform_driver ctucanfd_driver = {
+ .probe = ctucan_platform_probe,
+ .remove = ctucan_platform_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .pm = &ctucan_platform_pm_ops,
+ .of_match_table = ctucan_of_match,
+ },
+};
+
+module_platform_driver(ctucanfd_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Martin Jerabek");
+MODULE_DESCRIPTION("CTU CAN FD for platform");
diff --git a/drivers/net/can/dev/bittiming.c b/drivers/net/can/dev/bittiming.c
index 2103bcca9012..c1e76f0a5064 100644
--- a/drivers/net/can/dev/bittiming.c
+++ b/drivers/net/can/dev/bittiming.c
@@ -116,7 +116,7 @@ int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
can_update_sample_point(btc, sample_point_nominal, tseg / 2,
&tseg1, &tseg2, &sample_point_error);
- if (sample_point_error > best_sample_point_error)
+ if (sample_point_error >= best_sample_point_error)
continue;
best_sample_point_error = sample_point_error;
diff --git a/drivers/net/can/dev/rx-offload.c b/drivers/net/can/dev/rx-offload.c
index 7f80d8e1e750..6d0dc18c03e7 100644
--- a/drivers/net/can/dev/rx-offload.c
+++ b/drivers/net/can/dev/rx-offload.c
@@ -221,7 +221,7 @@ int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
}
EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo);
-int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
+int can_rx_offload_queue_timestamp(struct can_rx_offload *offload,
struct sk_buff *skb, u32 timestamp)
{
struct can_rx_offload_cb *cb;
@@ -240,7 +240,7 @@ int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
return 0;
}
-EXPORT_SYMBOL_GPL(can_rx_offload_queue_sorted);
+EXPORT_SYMBOL_GPL(can_rx_offload_queue_timestamp);
unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
unsigned int idx, u32 timestamp,
@@ -256,7 +256,7 @@ unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
if (!skb)
return 0;
- err = can_rx_offload_queue_sorted(offload, skb, timestamp);
+ err = can_rx_offload_queue_timestamp(offload, skb, timestamp);
if (err) {
stats->rx_errors++;
stats->tx_fifo_errors++;
diff --git a/drivers/net/can/flexcan/flexcan-core.c b/drivers/net/can/flexcan/flexcan-core.c
index 74d7fcbfd065..fe9bda0f5ec4 100644
--- a/drivers/net/can/flexcan/flexcan-core.c
+++ b/drivers/net/can/flexcan/flexcan-core.c
@@ -723,11 +723,9 @@ static int flexcan_get_berr_counter(const struct net_device *dev,
const struct flexcan_priv *priv = netdev_priv(dev);
int err;
- err = pm_runtime_get_sync(priv->dev);
- if (err < 0) {
- pm_runtime_put_noidle(priv->dev);
+ err = pm_runtime_resume_and_get(priv->dev);
+ if (err < 0)
return err;
- }
err = __flexcan_get_berr_counter(dev, bec);
@@ -845,7 +843,7 @@ static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr)
if (tx_errors)
dev->stats.tx_errors++;
- err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
if (err)
dev->stats.rx_fifo_errors++;
}
@@ -892,7 +890,7 @@ static void flexcan_irq_state(struct net_device *dev, u32 reg_esr)
if (unlikely(new_state == CAN_STATE_BUS_OFF))
can_bus_off(dev);
- err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
if (err)
dev->stats.rx_fifo_errors++;
}
@@ -1700,11 +1698,9 @@ static int flexcan_open(struct net_device *dev)
return -EINVAL;
}
- err = pm_runtime_get_sync(priv->dev);
- if (err < 0) {
- pm_runtime_put_noidle(priv->dev);
+ err = pm_runtime_resume_and_get(priv->dev);
+ if (err < 0)
return err;
- }
err = open_candev(dev);
if (err)
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index b3b5bc1c803b..2779bba390f2 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -464,7 +464,7 @@ static void m_can_receive_skb(struct m_can_classdev *cdev,
struct net_device_stats *stats = &cdev->net->stats;
int err;
- err = can_rx_offload_queue_sorted(&cdev->offload, skb,
+ err = can_rx_offload_queue_timestamp(&cdev->offload, skb,
timestamp);
if (err)
stats->rx_fifo_errors++;
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index de4ddf79ba9b..65ba6697bd7d 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -14,6 +14,8 @@
#include <linux/platform_device.h>
#include <linux/netdevice.h>
#include <linux/can/dev.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <sysdev/fsl_soc.h>
#include <linux/clk.h>
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index 110071b26921..4b2f9cb17fc3 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -107,7 +107,7 @@ config CAN_TSCAN1
depends on ISA
help
This driver is for Technologic Systems' TSCAN-1 PC104 boards.
- http://www.embeddedarm.com/products/board-detail.php?product=TS-CAN1
+ https://www.embeddedts.com/products/TS-CAN1
The driver supports multiple boards and automatically configures them:
PLD IO base addresses are read from jumpers JP1 and JP2,
IRQ numbers are read from jumpers JP4 and JP5,
diff --git a/drivers/net/can/sja1000/tscan1.c b/drivers/net/can/sja1000/tscan1.c
index 3dbba8d61afb..f3862bed3d40 100644
--- a/drivers/net/can/sja1000/tscan1.c
+++ b/drivers/net/can/sja1000/tscan1.c
@@ -5,10 +5,9 @@
* Copyright 2010 Andre B. Oliveira
*/
-/*
- * References:
- * - Getting started with TS-CAN1, Technologic Systems, Jun 2009
- * http://www.embeddedarm.com/documentation/ts-can1-manual.pdf
+/* References:
+ * - Getting started with TS-CAN1, Technologic Systems, Feb 2022
+ * https://docs.embeddedts.com/TS-CAN1
*/
#include <linux/init.h>
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
index f9dd8fdba12b..b21252390216 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
@@ -37,6 +37,12 @@ static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp2518fd = {
.model = MCP251XFD_MODEL_MCP2518FD,
};
+static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp251863 = {
+ .quirks = MCP251XFD_QUIRK_CRC_REG | MCP251XFD_QUIRK_CRC_RX |
+ MCP251XFD_QUIRK_CRC_TX | MCP251XFD_QUIRK_ECC,
+ .model = MCP251XFD_MODEL_MCP251863,
+};
+
/* Autodetect model, start with CRC enabled. */
static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp251xfd = {
.quirks = MCP251XFD_QUIRK_CRC_REG | MCP251XFD_QUIRK_CRC_RX |
@@ -75,6 +81,8 @@ static const char *__mcp251xfd_get_model_str(enum mcp251xfd_model model)
return "MCP2517FD";
case MCP251XFD_MODEL_MCP2518FD:
return "MCP2518FD";
+ case MCP251XFD_MODEL_MCP251863:
+ return "MCP251863";
case MCP251XFD_MODEL_MCP251XFD:
return "MCP251xFD";
}
@@ -916,7 +924,7 @@ static int mcp251xfd_handle_rxovif(struct mcp251xfd_priv *priv)
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
- err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
if (err)
stats->rx_fifo_errors++;
@@ -1021,7 +1029,7 @@ static int mcp251xfd_handle_ivmif(struct mcp251xfd_priv *priv)
return 0;
mcp251xfd_skb_set_timestamp(priv, skb, timestamp);
- err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
if (err)
stats->rx_fifo_errors++;
@@ -1094,7 +1102,7 @@ static int mcp251xfd_handle_cerrif(struct mcp251xfd_priv *priv)
cf->data[7] = bec.rxerr;
}
- err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
if (err)
stats->rx_fifo_errors++;
@@ -1259,7 +1267,8 @@ mcp251xfd_handle_eccif_recover(struct mcp251xfd_priv *priv, u8 nr)
* - for mcp2518fd: offset not 0 or 1
*/
if (chip_tx_tail != tx_tail ||
- !(offset == 0 || (offset == 1 && mcp251xfd_is_2518(priv)))) {
+ !(offset == 0 || (offset == 1 && (mcp251xfd_is_2518FD(priv) ||
+ mcp251xfd_is_251863(priv))))) {
netdev_err(priv->ndev,
"ECC Error information inconsistent (addr=0x%04x, nr=%d, tx_tail=0x%08x(%d), chip_tx_tail=%d, offset=%d).\n",
addr, nr, tx_ring->tail, tx_tail, chip_tx_tail,
@@ -1697,7 +1706,7 @@ static int mcp251xfd_register_chip_detect(struct mcp251xfd_priv *priv)
else
devtype_data = &mcp251xfd_devtype_data_mcp2517fd;
- if (!mcp251xfd_is_251X(priv) &&
+ if (!mcp251xfd_is_251XFD(priv) &&
priv->devtype_data.model != devtype_data->model) {
netdev_info(ndev,
"Detected %s, but firmware specifies a %s. Fixing up.\n",
@@ -1930,6 +1939,9 @@ static const struct of_device_id mcp251xfd_of_match[] = {
.compatible = "microchip,mcp2518fd",
.data = &mcp251xfd_devtype_data_mcp2518fd,
}, {
+ .compatible = "microchip,mcp251863",
+ .data = &mcp251xfd_devtype_data_mcp251863,
+ }, {
.compatible = "microchip,mcp251xfd",
.data = &mcp251xfd_devtype_data_mcp251xfd,
}, {
@@ -1946,6 +1958,9 @@ static const struct spi_device_id mcp251xfd_id_table[] = {
.name = "mcp2518fd",
.driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp2518fd,
}, {
+ .name = "mcp251863",
+ .driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp251863,
+ }, {
.name = "mcp251xfd",
.driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp251xfd,
}, {
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c
index d09f7fbf2ba7..ced8d9c81f8c 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c
@@ -173,7 +173,7 @@ mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv,
}
mcp251xfd_hw_rx_obj_to_skb(priv, hw_rx_obj, skb);
- err = can_rx_offload_queue_sorted(&priv->offload, skb, hw_rx_obj->ts);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, hw_rx_obj->ts);
if (err)
stats->rx_fifo_errors++;
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
index 9cb6b5ad8dda..1d43bccc29bf 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
@@ -586,7 +586,8 @@ struct mcp251xfd_regs_status {
enum mcp251xfd_model {
MCP251XFD_MODEL_MCP2517FD = 0x2517,
MCP251XFD_MODEL_MCP2518FD = 0x2518,
- MCP251XFD_MODEL_MCP251XFD = 0xffff, /* autodetect model */
+ MCP251XFD_MODEL_MCP251863 = 0x251863,
+ MCP251XFD_MODEL_MCP251XFD = 0xffffffff, /* autodetect model */
};
struct mcp251xfd_devtype_data {
@@ -659,12 +660,13 @@ struct mcp251xfd_priv {
static inline bool \
mcp251xfd_is_##_model(const struct mcp251xfd_priv *priv) \
{ \
- return priv->devtype_data.model == MCP251XFD_MODEL_MCP##_model##FD; \
+ return priv->devtype_data.model == MCP251XFD_MODEL_MCP##_model; \
}
-MCP251XFD_IS(2517);
-MCP251XFD_IS(2518);
-MCP251XFD_IS(251X);
+MCP251XFD_IS(2517FD);
+MCP251XFD_IS(2518FD);
+MCP251XFD_IS(251863);
+MCP251XFD_IS(251XFD);
static inline bool mcp251xfd_is_fd_mode(const struct mcp251xfd_priv *priv)
{
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index ff31b993ab17..bb3f2e3b004c 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -633,7 +633,7 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
cf->data[3] = CAN_ERR_PROT_LOC_ACK;
timestamp = hecc_read(priv, HECC_CANLNT);
- err = can_rx_offload_queue_sorted(&priv->offload, skb,
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb,
timestamp);
if (err)
ndev->stats.rx_fifo_errors++;
@@ -668,7 +668,7 @@ static void ti_hecc_change_state(struct net_device *ndev,
}
timestamp = hecc_read(priv, HECC_CANLNT);
- err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
if (err)
ndev->stats.rx_fifo_errors++;
}
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index e562c5ab1149..43f0c6a064ba 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -239,7 +239,7 @@ static const struct can_bittiming_const xcan_bittiming_const_canfd = {
};
/* AXI CANFD Data Bittiming constants as per AXI CANFD 1.0 specs */
-static struct can_bittiming_const xcan_data_bittiming_const_canfd = {
+static const struct can_bittiming_const xcan_data_bittiming_const_canfd = {
.name = DRIVER_NAME,
.tseg1_min = 1,
.tseg1_max = 16,
@@ -265,7 +265,7 @@ static const struct can_bittiming_const xcan_bittiming_const_canfd2 = {
};
/* AXI CANFD 2.0 Data Bittiming constants as per AXI CANFD 2.0 spec */
-static struct can_bittiming_const xcan_data_bittiming_const_canfd2 = {
+static const struct can_bittiming_const xcan_data_bittiming_const_canfd2 = {
.name = DRIVER_NAME,
.tseg1_min = 1,
.tseg1_max = 32,
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
index b2752978cb09..f91deea9368e 100644
--- a/drivers/net/dsa/microchip/ksz8795.c
+++ b/drivers/net/dsa/microchip/ksz8795.c
@@ -1027,40 +1027,7 @@ static void ksz8_cfg_port_member(struct ksz_device *dev, int port, u8 member)
static void ksz8_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
{
- struct ksz_device *dev = ds->priv;
- struct ksz_port *p;
- u8 data;
-
- ksz_pread8(dev, port, P_STP_CTRL, &data);
- data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE);
-
- switch (state) {
- case BR_STATE_DISABLED:
- data |= PORT_LEARN_DISABLE;
- break;
- case BR_STATE_LISTENING:
- data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE);
- break;
- case BR_STATE_LEARNING:
- data |= PORT_RX_ENABLE;
- break;
- case BR_STATE_FORWARDING:
- data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
- break;
- case BR_STATE_BLOCKING:
- data |= PORT_LEARN_DISABLE;
- break;
- default:
- dev_err(ds->dev, "invalid STP state: %d\n", state);
- return;
- }
-
- ksz_pwrite8(dev, port, P_STP_CTRL, data);
-
- p = &dev->ports[port];
- p->stp_state = state;
-
- ksz_update_port_member(dev, port);
+ ksz_port_stp_state_set(ds, port, state, P_STP_CTRL);
}
static void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port)
diff --git a/drivers/net/dsa/microchip/ksz8795_reg.h b/drivers/net/dsa/microchip/ksz8795_reg.h
index d74defcd86b4..4109433b6b6c 100644
--- a/drivers/net/dsa/microchip/ksz8795_reg.h
+++ b/drivers/net/dsa/microchip/ksz8795_reg.h
@@ -160,9 +160,6 @@
#define PORT_DISCARD_NON_VID BIT(5)
#define PORT_FORCE_FLOW_CTRL BIT(4)
#define PORT_BACK_PRESSURE BIT(3)
-#define PORT_TX_ENABLE BIT(2)
-#define PORT_RX_ENABLE BIT(1)
-#define PORT_LEARN_DISABLE BIT(0)
#define REG_PORT_1_CTRL_3 0x13
#define REG_PORT_2_CTRL_3 0x23
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index 8222c8a6c5ec..48c90e4cda30 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -65,100 +65,6 @@ static const struct {
{ 0x83, "tx_discards" },
};
-struct ksz9477_stats_raw {
- u64 rx_hi;
- u64 rx_undersize;
- u64 rx_fragments;
- u64 rx_oversize;
- u64 rx_jabbers;
- u64 rx_symbol_err;
- u64 rx_crc_err;
- u64 rx_align_err;
- u64 rx_mac_ctrl;
- u64 rx_pause;
- u64 rx_bcast;
- u64 rx_mcast;
- u64 rx_ucast;
- u64 rx_64_or_less;
- u64 rx_65_127;
- u64 rx_128_255;
- u64 rx_256_511;
- u64 rx_512_1023;
- u64 rx_1024_1522;
- u64 rx_1523_2000;
- u64 rx_2001;
- u64 tx_hi;
- u64 tx_late_col;
- u64 tx_pause;
- u64 tx_bcast;
- u64 tx_mcast;
- u64 tx_ucast;
- u64 tx_deferred;
- u64 tx_total_col;
- u64 tx_exc_col;
- u64 tx_single_col;
- u64 tx_mult_col;
- u64 rx_total;
- u64 tx_total;
- u64 rx_discards;
- u64 tx_discards;
-};
-
-static void ksz9477_r_mib_stats64(struct ksz_device *dev, int port)
-{
- struct rtnl_link_stats64 *stats;
- struct ksz9477_stats_raw *raw;
- struct ksz_port_mib *mib;
-
- mib = &dev->ports[port].mib;
- stats = &mib->stats64;
- raw = (struct ksz9477_stats_raw *)mib->counters;
-
- spin_lock(&mib->stats64_lock);
-
- stats->rx_packets = raw->rx_bcast + raw->rx_mcast + raw->rx_ucast;
- stats->tx_packets = raw->tx_bcast + raw->tx_mcast + raw->tx_ucast;
-
- /* HW counters are counting bytes + FCS which is not acceptable
- * for rtnl_link_stats64 interface
- */
- stats->rx_bytes = raw->rx_total - stats->rx_packets * ETH_FCS_LEN;
- stats->tx_bytes = raw->tx_total - stats->tx_packets * ETH_FCS_LEN;
-
- stats->rx_length_errors = raw->rx_undersize + raw->rx_fragments +
- raw->rx_oversize;
-
- stats->rx_crc_errors = raw->rx_crc_err;
- stats->rx_frame_errors = raw->rx_align_err;
- stats->rx_dropped = raw->rx_discards;
- stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
- stats->rx_frame_errors + stats->rx_dropped;
-
- stats->tx_window_errors = raw->tx_late_col;
- stats->tx_fifo_errors = raw->tx_discards;
- stats->tx_aborted_errors = raw->tx_exc_col;
- stats->tx_errors = stats->tx_window_errors + stats->tx_fifo_errors +
- stats->tx_aborted_errors;
-
- stats->multicast = raw->rx_mcast;
- stats->collisions = raw->tx_total_col;
-
- spin_unlock(&mib->stats64_lock);
-}
-
-static void ksz9477_get_stats64(struct dsa_switch *ds, int port,
- struct rtnl_link_stats64 *s)
-{
- struct ksz_device *dev = ds->priv;
- struct ksz_port_mib *mib;
-
- mib = &dev->ports[port].mib;
-
- spin_lock(&mib->stats64_lock);
- memcpy(s, &mib->stats64, sizeof(*s));
- spin_unlock(&mib->stats64_lock);
-}
-
static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
{
regmap_update_bits(dev->regmap[0], addr, bits, set ? bits : 0);
@@ -517,38 +423,7 @@ static void ksz9477_cfg_port_member(struct ksz_device *dev, int port,
static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port,
u8 state)
{
- struct ksz_device *dev = ds->priv;
- struct ksz_port *p = &dev->ports[port];
- u8 data;
-
- ksz_pread8(dev, port, P_STP_CTRL, &data);
- data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE);
-
- switch (state) {
- case BR_STATE_DISABLED:
- data |= PORT_LEARN_DISABLE;
- break;
- case BR_STATE_LISTENING:
- data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE);
- break;
- case BR_STATE_LEARNING:
- data |= PORT_RX_ENABLE;
- break;
- case BR_STATE_FORWARDING:
- data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
- break;
- case BR_STATE_BLOCKING:
- data |= PORT_LEARN_DISABLE;
- break;
- default:
- dev_err(ds->dev, "invalid STP state: %d\n", state);
- return;
- }
-
- ksz_pwrite8(dev, port, P_STP_CTRL, data);
- p->stp_state = state;
-
- ksz_update_port_member(dev, port);
+ ksz_port_stp_state_set(ds, port, state, P_STP_CTRL);
}
static void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port)
@@ -1493,7 +1368,7 @@ static const struct dsa_switch_ops ksz9477_switch_ops = {
.port_mdb_del = ksz9477_port_mdb_del,
.port_mirror_add = ksz9477_port_mirror_add,
.port_mirror_del = ksz9477_port_mirror_del,
- .get_stats64 = ksz9477_get_stats64,
+ .get_stats64 = ksz_get_stats64,
.port_change_mtu = ksz9477_change_mtu,
.port_max_mtu = ksz9477_max_mtu,
};
@@ -1684,7 +1559,7 @@ static const struct ksz_dev_ops ksz9477_dev_ops = {
.port_setup = ksz9477_port_setup,
.r_mib_cnt = ksz9477_r_mib_cnt,
.r_mib_pkt = ksz9477_r_mib_pkt,
- .r_mib_stat64 = ksz9477_r_mib_stats64,
+ .r_mib_stat64 = ksz_r_mib_stats64,
.freeze_mib = ksz9477_freeze_mib,
.port_init_cnt = ksz9477_port_init_cnt,
.shutdown = ksz9477_reset_switch,
diff --git a/drivers/net/dsa/microchip/ksz9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h
index 0bd58467181f..7a2c8d4767af 100644
--- a/drivers/net/dsa/microchip/ksz9477_reg.h
+++ b/drivers/net/dsa/microchip/ksz9477_reg.h
@@ -1586,10 +1586,6 @@
#define REG_PORT_LUE_MSTP_STATE 0x0B04
-#define PORT_TX_ENABLE BIT(2)
-#define PORT_RX_ENABLE BIT(1)
-#define PORT_LEARN_DISABLE BIT(0)
-
/* C - PTP */
#define REG_PTP_PORT_RX_DELAY__2 0x0C00
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 8014b18d9391..10f127b09e58 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -20,6 +20,102 @@
#include "ksz_common.h"
+struct ksz_stats_raw {
+ u64 rx_hi;
+ u64 rx_undersize;
+ u64 rx_fragments;
+ u64 rx_oversize;
+ u64 rx_jabbers;
+ u64 rx_symbol_err;
+ u64 rx_crc_err;
+ u64 rx_align_err;
+ u64 rx_mac_ctrl;
+ u64 rx_pause;
+ u64 rx_bcast;
+ u64 rx_mcast;
+ u64 rx_ucast;
+ u64 rx_64_or_less;
+ u64 rx_65_127;
+ u64 rx_128_255;
+ u64 rx_256_511;
+ u64 rx_512_1023;
+ u64 rx_1024_1522;
+ u64 rx_1523_2000;
+ u64 rx_2001;
+ u64 tx_hi;
+ u64 tx_late_col;
+ u64 tx_pause;
+ u64 tx_bcast;
+ u64 tx_mcast;
+ u64 tx_ucast;
+ u64 tx_deferred;
+ u64 tx_total_col;
+ u64 tx_exc_col;
+ u64 tx_single_col;
+ u64 tx_mult_col;
+ u64 rx_total;
+ u64 tx_total;
+ u64 rx_discards;
+ u64 tx_discards;
+};
+
+void ksz_r_mib_stats64(struct ksz_device *dev, int port)
+{
+ struct rtnl_link_stats64 *stats;
+ struct ksz_stats_raw *raw;
+ struct ksz_port_mib *mib;
+
+ mib = &dev->ports[port].mib;
+ stats = &mib->stats64;
+ raw = (struct ksz_stats_raw *)mib->counters;
+
+ spin_lock(&mib->stats64_lock);
+
+ stats->rx_packets = raw->rx_bcast + raw->rx_mcast + raw->rx_ucast;
+ stats->tx_packets = raw->tx_bcast + raw->tx_mcast + raw->tx_ucast;
+
+ /* HW counters are counting bytes + FCS which is not acceptable
+ * for rtnl_link_stats64 interface
+ */
+ stats->rx_bytes = raw->rx_total - stats->rx_packets * ETH_FCS_LEN;
+ stats->tx_bytes = raw->tx_total - stats->tx_packets * ETH_FCS_LEN;
+
+ stats->rx_length_errors = raw->rx_undersize + raw->rx_fragments +
+ raw->rx_oversize;
+
+ stats->rx_crc_errors = raw->rx_crc_err;
+ stats->rx_frame_errors = raw->rx_align_err;
+ stats->rx_dropped = raw->rx_discards;
+ stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
+ stats->rx_frame_errors + stats->rx_dropped;
+
+ stats->tx_window_errors = raw->tx_late_col;
+ stats->tx_fifo_errors = raw->tx_discards;
+ stats->tx_aborted_errors = raw->tx_exc_col;
+ stats->tx_errors = stats->tx_window_errors + stats->tx_fifo_errors +
+ stats->tx_aborted_errors;
+
+ stats->multicast = raw->rx_mcast;
+ stats->collisions = raw->tx_total_col;
+
+ spin_unlock(&mib->stats64_lock);
+}
+EXPORT_SYMBOL_GPL(ksz_r_mib_stats64);
+
+void ksz_get_stats64(struct dsa_switch *ds, int port,
+ struct rtnl_link_stats64 *s)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port_mib *mib;
+
+ mib = &dev->ports[port].mib;
+
+ spin_lock(&mib->stats64_lock);
+ memcpy(s, &mib->stats64, sizeof(*s));
+ spin_unlock(&mib->stats64_lock);
+}
+EXPORT_SYMBOL_GPL(ksz_get_stats64);
+
void ksz_update_port_member(struct ksz_device *dev, int port)
{
struct ksz_port *p = &dev->ports[port];
@@ -372,6 +468,46 @@ int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
}
EXPORT_SYMBOL_GPL(ksz_enable_port);
+void ksz_port_stp_state_set(struct dsa_switch *ds, int port,
+ u8 state, int reg)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port *p;
+ u8 data;
+
+ ksz_pread8(dev, port, reg, &data);
+ data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE);
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ data |= PORT_LEARN_DISABLE;
+ break;
+ case BR_STATE_LISTENING:
+ data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE);
+ break;
+ case BR_STATE_LEARNING:
+ data |= PORT_RX_ENABLE;
+ break;
+ case BR_STATE_FORWARDING:
+ data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
+ break;
+ case BR_STATE_BLOCKING:
+ data |= PORT_LEARN_DISABLE;
+ break;
+ default:
+ dev_err(ds->dev, "invalid STP state: %d\n", state);
+ return;
+ }
+
+ ksz_pwrite8(dev, port, reg, data);
+
+ p = &dev->ports[port];
+ p->stp_state = state;
+
+ ksz_update_port_member(dev, port);
+}
+EXPORT_SYMBOL_GPL(ksz_port_stp_state_set);
+
struct ksz_device *ksz_switch_alloc(struct device *base, void *priv)
{
struct dsa_switch *ds;
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index 485d4a948c38..28cda79b090f 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -151,6 +151,9 @@ int ksz9477_switch_register(struct ksz_device *dev);
void ksz_update_port_member(struct ksz_device *dev, int port);
void ksz_init_mib_timer(struct ksz_device *dev);
+void ksz_r_mib_stats64(struct ksz_device *dev, int port);
+void ksz_get_stats64(struct dsa_switch *ds, int port,
+ struct rtnl_link_stats64 *s);
/* Common DSA access functions */
@@ -165,6 +168,8 @@ int ksz_port_bridge_join(struct dsa_switch *ds, int port,
struct netlink_ext_ack *extack);
void ksz_port_bridge_leave(struct dsa_switch *ds, int port,
struct dsa_bridge bridge);
+void ksz_port_stp_state_set(struct dsa_switch *ds, int port,
+ u8 state, int reg);
void ksz_port_fast_age(struct dsa_switch *ds, int port);
int ksz_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb,
void *data);
@@ -292,6 +297,11 @@ static inline void ksz_regmap_unlock(void *__mtx)
mutex_unlock(mtx);
}
+/* STP State Defines */
+#define PORT_TX_ENABLE BIT(2)
+#define PORT_RX_ENABLE BIT(1)
+#define PORT_LEARN_DISABLE BIT(0)
+
/* Regmap tables generation */
#define KSZ_SPI_OP_RD 3
#define KSZ_SPI_OP_WR 2
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 19f0035d4410..e55d962fef9f 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -24,6 +24,11 @@
#include "mt7530.h"
+static struct mt753x_pcs *pcs_to_mt753x_pcs(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct mt753x_pcs, pcs);
+}
+
/* String, offset, and register size in bytes if different from 4 bytes */
static const struct mt7530_mib_desc mt7530_mib[] = {
MIB_DESC(1, 0x00, "TxDrop"),
@@ -2389,35 +2394,30 @@ mt7531_setup(struct dsa_switch *ds)
return 0;
}
-static bool
-mt7530_phy_mode_supported(struct dsa_switch *ds, int port,
- const struct phylink_link_state *state)
+static void mt7530_mac_port_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
- struct mt7530_priv *priv = ds->priv;
-
switch (port) {
case 0 ... 4: /* Internal phy */
- if (state->interface != PHY_INTERFACE_MODE_GMII)
- return false;
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
break;
+
case 5: /* 2nd cpu port with phy of port 0 or 4 / external phy */
- if (!phy_interface_mode_is_rgmii(state->interface) &&
- state->interface != PHY_INTERFACE_MODE_MII &&
- state->interface != PHY_INTERFACE_MODE_GMII)
- return false;
+ phy_interface_set_rgmii(config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
break;
+
case 6: /* 1st cpu port */
- if (state->interface != PHY_INTERFACE_MODE_RGMII &&
- state->interface != PHY_INTERFACE_MODE_TRGMII)
- return false;
+ __set_bit(PHY_INTERFACE_MODE_RGMII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_TRGMII,
+ config->supported_interfaces);
break;
- default:
- dev_err(priv->dev, "%s: unsupported port: %i\n", __func__,
- port);
- return false;
}
-
- return true;
}
static bool mt7531_is_rgmii_port(struct mt7530_priv *priv, u32 port)
@@ -2425,42 +2425,35 @@ static bool mt7531_is_rgmii_port(struct mt7530_priv *priv, u32 port)
return (port == 5) && (priv->p5_intf_sel != P5_INTF_SEL_GMAC5_SGMII);
}
-static bool
-mt7531_phy_mode_supported(struct dsa_switch *ds, int port,
- const struct phylink_link_state *state)
+static void mt7531_mac_port_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
struct mt7530_priv *priv = ds->priv;
switch (port) {
case 0 ... 4: /* Internal phy */
- if (state->interface != PHY_INTERFACE_MODE_GMII)
- return false;
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
break;
+
case 5: /* 2nd cpu port supports either rgmii or sgmii/8023z */
- if (mt7531_is_rgmii_port(priv, port))
- return phy_interface_mode_is_rgmii(state->interface);
+ if (mt7531_is_rgmii_port(priv, port)) {
+ phy_interface_set_rgmii(config->supported_interfaces);
+ break;
+ }
fallthrough;
+
case 6: /* 1st cpu port supports sgmii/8023z only */
- if (state->interface != PHY_INTERFACE_MODE_SGMII &&
- !phy_interface_mode_is_8023z(state->interface))
- return false;
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ config->supported_interfaces);
+
+ config->mac_capabilities |= MAC_2500FD;
break;
- default:
- dev_err(priv->dev, "%s: unsupported port: %i\n", __func__,
- port);
- return false;
}
-
- return true;
-}
-
-static bool
-mt753x_phy_mode_supported(struct dsa_switch *ds, int port,
- const struct phylink_link_state *state)
-{
- struct mt7530_priv *priv = ds->priv;
-
- return priv->info->phy_mode_supported(ds, port, state);
}
static int
@@ -2533,30 +2526,11 @@ static int mt7531_rgmii_setup(struct mt7530_priv *priv, u32 port,
return 0;
}
-static void mt7531_sgmii_validate(struct mt7530_priv *priv, int port,
- unsigned long *supported)
-{
- /* Port5 supports ethier RGMII or SGMII.
- * Port6 supports SGMII only.
- */
- switch (port) {
- case 5:
- if (mt7531_is_rgmii_port(priv, port))
- break;
- fallthrough;
- case 6:
- phylink_set(supported, 1000baseX_Full);
- phylink_set(supported, 2500baseX_Full);
- phylink_set(supported, 2500baseT_Full);
- }
-}
-
-static void
-mt7531_sgmii_link_up_force(struct dsa_switch *ds, int port,
- unsigned int mode, phy_interface_t interface,
- int speed, int duplex)
+static void mt7531_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface, int speed, int duplex)
{
- struct mt7530_priv *priv = ds->priv;
+ struct mt7530_priv *priv = pcs_to_mt753x_pcs(pcs)->priv;
+ int port = pcs_to_mt753x_pcs(pcs)->port;
unsigned int val;
/* For adjusting speed and duplex of SGMII force mode. */
@@ -2582,6 +2556,9 @@ mt7531_sgmii_link_up_force(struct dsa_switch *ds, int port,
/* MT7531 SGMII 1G force mode can only work in full duplex mode,
* no matter MT7531_SGMII_FORCE_HALF_DUPLEX is set or not.
+ *
+ * The speed check is unnecessary as the MAC capabilities apply
+ * this restriction. --rmk
*/
if ((speed == SPEED_10 || speed == SPEED_100) &&
duplex != DUPLEX_FULL)
@@ -2657,9 +2634,10 @@ static int mt7531_sgmii_setup_mode_an(struct mt7530_priv *priv, int port,
return 0;
}
-static void mt7531_sgmii_restart_an(struct dsa_switch *ds, int port)
+static void mt7531_pcs_an_restart(struct phylink_pcs *pcs)
{
- struct mt7530_priv *priv = ds->priv;
+ struct mt7530_priv *priv = pcs_to_mt753x_pcs(pcs)->priv;
+ int port = pcs_to_mt753x_pcs(pcs)->port;
u32 val;
/* Only restart AN when AN is enabled */
@@ -2716,6 +2694,24 @@ mt753x_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
return priv->info->mac_port_config(ds, port, mode, state->interface);
}
+static struct phylink_pcs *
+mt753x_phylink_mac_select_pcs(struct dsa_switch *ds, int port,
+ phy_interface_t interface)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_TRGMII:
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ return &priv->pcs[port].pcs;
+
+ default:
+ return NULL;
+ }
+}
+
static void
mt753x_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
const struct phylink_link_state *state)
@@ -2723,9 +2719,6 @@ mt753x_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
struct mt7530_priv *priv = ds->priv;
u32 mcr_cur, mcr_new;
- if (!mt753x_phy_mode_supported(ds, port, state))
- goto unsupported;
-
switch (port) {
case 0 ... 4: /* Internal phy */
if (state->interface != PHY_INTERFACE_MODE_GMII)
@@ -2780,17 +2773,6 @@ unsupported:
mt7530_write(priv, MT7530_PMCR_P(port), mcr_new);
}
-static void
-mt753x_phylink_mac_an_restart(struct dsa_switch *ds, int port)
-{
- struct mt7530_priv *priv = ds->priv;
-
- if (!priv->info->mac_pcs_an_restart)
- return;
-
- priv->info->mac_pcs_an_restart(ds, port);
-}
-
static void mt753x_phylink_mac_link_down(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface)
@@ -2800,16 +2782,13 @@ static void mt753x_phylink_mac_link_down(struct dsa_switch *ds, int port,
mt7530_clear(priv, MT7530_PMCR_P(port), PMCR_LINK_SETTINGS_MASK);
}
-static void mt753x_mac_pcs_link_up(struct dsa_switch *ds, int port,
- unsigned int mode, phy_interface_t interface,
- int speed, int duplex)
+static void mt753x_phylink_pcs_link_up(struct phylink_pcs *pcs,
+ unsigned int mode,
+ phy_interface_t interface,
+ int speed, int duplex)
{
- struct mt7530_priv *priv = ds->priv;
-
- if (!priv->info->mac_pcs_link_up)
- return;
-
- priv->info->mac_pcs_link_up(ds, port, mode, interface, speed, duplex);
+ if (pcs->ops->pcs_link_up)
+ pcs->ops->pcs_link_up(pcs, mode, interface, speed, duplex);
}
static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port,
@@ -2822,8 +2801,6 @@ static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port,
struct mt7530_priv *priv = ds->priv;
u32 mcr;
- mt753x_mac_pcs_link_up(ds, port, mode, interface, speed, duplex);
-
mcr = PMCR_RX_EN | PMCR_TX_EN | PMCR_FORCE_LNK;
/* MT753x MAC works in 1G full duplex mode for all up-clocked
@@ -2903,81 +2880,51 @@ mt7531_cpu_port_config(struct dsa_switch *ds, int port)
return ret;
mt7530_write(priv, MT7530_PMCR_P(port),
PMCR_CPU_PORT_SETTING(priv->id));
+ mt753x_phylink_pcs_link_up(&priv->pcs[port].pcs, MLO_AN_FIXED,
+ interface, speed, DUPLEX_FULL);
mt753x_phylink_mac_link_up(ds, port, MLO_AN_FIXED, interface, NULL,
speed, DUPLEX_FULL, true, true);
return 0;
}
-static void
-mt7530_mac_port_validate(struct dsa_switch *ds, int port,
- unsigned long *supported)
+static void mt753x_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
- if (port == 5)
- phylink_set(supported, 1000baseX_Full);
-}
-
-static void mt7531_mac_port_validate(struct dsa_switch *ds, int port,
- unsigned long *supported)
-{
- struct mt7530_priv *priv = ds->priv;
-
- mt7531_sgmii_validate(priv, port, supported);
-}
-
-static void
-mt753x_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct mt7530_priv *priv = ds->priv;
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- !mt753x_phy_mode_supported(ds, port, state)) {
- linkmode_zero(supported);
- return;
- }
-
- phylink_set_port_modes(mask);
-
- if (state->interface != PHY_INTERFACE_MODE_TRGMII &&
- !phy_interface_mode_is_8023z(state->interface)) {
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
- phylink_set(mask, Autoneg);
- }
-
- /* This switch only supports 1G full-duplex. */
- if (state->interface != PHY_INTERFACE_MODE_MII)
- phylink_set(mask, 1000baseT_Full);
+ /* This switch only supports full-duplex at 1Gbps */
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000FD;
- priv->info->mac_port_validate(ds, port, mask);
+ /* This driver does not make use of the speed, duplex, pause or the
+ * advertisement in its mac_config, so it is safe to mark this driver
+ * as non-legacy.
+ */
+ config->legacy_pre_march2020 = false;
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
+ priv->info->mac_port_get_caps(ds, port, config);
+}
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
+static int mt753x_pcs_validate(struct phylink_pcs *pcs,
+ unsigned long *supported,
+ const struct phylink_link_state *state)
+{
+ /* Autonegotiation is not supported in TRGMII nor 802.3z modes */
+ if (state->interface == PHY_INTERFACE_MODE_TRGMII ||
+ phy_interface_mode_is_8023z(state->interface))
+ phylink_clear(supported, Autoneg);
- /* We can only operate at 2500BaseX or 1000BaseX. If requested
- * to advertise both, only report advertising at 2500BaseX.
- */
- phylink_helper_basex_speed(state);
+ return 0;
}
-static int
-mt7530_phylink_mac_link_state(struct dsa_switch *ds, int port,
- struct phylink_link_state *state)
+static void mt7530_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
{
- struct mt7530_priv *priv = ds->priv;
+ struct mt7530_priv *priv = pcs_to_mt753x_pcs(pcs)->priv;
+ int port = pcs_to_mt753x_pcs(pcs)->port;
u32 pmsr;
- if (port < 0 || port >= MT7530_NUM_PORTS)
- return -EINVAL;
-
pmsr = mt7530_read(priv, MT7530_PMSR_P(port));
state->link = (pmsr & PMSR_LINK);
@@ -3004,8 +2951,6 @@ mt7530_phylink_mac_link_state(struct dsa_switch *ds, int port,
state->pause |= MLO_PAUSE_RX;
if (pmsr & PMSR_TX_FC)
state->pause |= MLO_PAUSE_TX;
-
- return 1;
}
static int
@@ -3047,33 +2992,59 @@ mt7531_sgmii_pcs_get_state_an(struct mt7530_priv *priv, int port,
return 0;
}
-static int
-mt7531_phylink_mac_link_state(struct dsa_switch *ds, int port,
- struct phylink_link_state *state)
+static void mt7531_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
{
- struct mt7530_priv *priv = ds->priv;
+ struct mt7530_priv *priv = pcs_to_mt753x_pcs(pcs)->priv;
+ int port = pcs_to_mt753x_pcs(pcs)->port;
if (state->interface == PHY_INTERFACE_MODE_SGMII)
- return mt7531_sgmii_pcs_get_state_an(priv, port, state);
-
- return -EOPNOTSUPP;
+ mt7531_sgmii_pcs_get_state_an(priv, port, state);
+ else
+ state->link = false;
}
-static int
-mt753x_phylink_mac_link_state(struct dsa_switch *ds, int port,
- struct phylink_link_state *state)
+static int mt753x_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
{
- struct mt7530_priv *priv = ds->priv;
+ return 0;
+}
- return priv->info->mac_port_get_state(ds, port, state);
+static void mt7530_pcs_an_restart(struct phylink_pcs *pcs)
+{
}
+static const struct phylink_pcs_ops mt7530_pcs_ops = {
+ .pcs_validate = mt753x_pcs_validate,
+ .pcs_get_state = mt7530_pcs_get_state,
+ .pcs_config = mt753x_pcs_config,
+ .pcs_an_restart = mt7530_pcs_an_restart,
+};
+
+static const struct phylink_pcs_ops mt7531_pcs_ops = {
+ .pcs_validate = mt753x_pcs_validate,
+ .pcs_get_state = mt7531_pcs_get_state,
+ .pcs_config = mt753x_pcs_config,
+ .pcs_an_restart = mt7531_pcs_an_restart,
+ .pcs_link_up = mt7531_pcs_link_up,
+};
+
static int
mt753x_setup(struct dsa_switch *ds)
{
struct mt7530_priv *priv = ds->priv;
- int ret = priv->info->sw_setup(ds);
+ int i, ret;
+
+ /* Initialise the PCS devices */
+ for (i = 0; i < priv->ds->num_ports; i++) {
+ priv->pcs[i].pcs.ops = priv->info->pcs_ops;
+ priv->pcs[i].priv = priv;
+ priv->pcs[i].port = i;
+ }
+ ret = priv->info->sw_setup(ds);
if (ret)
return ret;
@@ -3144,10 +3115,9 @@ static const struct dsa_switch_ops mt7530_switch_ops = {
.port_vlan_del = mt7530_port_vlan_del,
.port_mirror_add = mt753x_port_mirror_add,
.port_mirror_del = mt753x_port_mirror_del,
- .phylink_validate = mt753x_phylink_validate,
- .phylink_mac_link_state = mt753x_phylink_mac_link_state,
+ .phylink_get_caps = mt753x_phylink_get_caps,
+ .phylink_mac_select_pcs = mt753x_phylink_mac_select_pcs,
.phylink_mac_config = mt753x_phylink_mac_config,
- .phylink_mac_an_restart = mt753x_phylink_mac_an_restart,
.phylink_mac_link_down = mt753x_phylink_mac_link_down,
.phylink_mac_link_up = mt753x_phylink_mac_link_up,
.get_mac_eee = mt753x_get_mac_eee,
@@ -3157,39 +3127,34 @@ static const struct dsa_switch_ops mt7530_switch_ops = {
static const struct mt753x_info mt753x_table[] = {
[ID_MT7621] = {
.id = ID_MT7621,
+ .pcs_ops = &mt7530_pcs_ops,
.sw_setup = mt7530_setup,
.phy_read = mt7530_phy_read,
.phy_write = mt7530_phy_write,
.pad_setup = mt7530_pad_clk_setup,
- .phy_mode_supported = mt7530_phy_mode_supported,
- .mac_port_validate = mt7530_mac_port_validate,
- .mac_port_get_state = mt7530_phylink_mac_link_state,
+ .mac_port_get_caps = mt7530_mac_port_get_caps,
.mac_port_config = mt7530_mac_config,
},
[ID_MT7530] = {
.id = ID_MT7530,
+ .pcs_ops = &mt7530_pcs_ops,
.sw_setup = mt7530_setup,
.phy_read = mt7530_phy_read,
.phy_write = mt7530_phy_write,
.pad_setup = mt7530_pad_clk_setup,
- .phy_mode_supported = mt7530_phy_mode_supported,
- .mac_port_validate = mt7530_mac_port_validate,
- .mac_port_get_state = mt7530_phylink_mac_link_state,
+ .mac_port_get_caps = mt7530_mac_port_get_caps,
.mac_port_config = mt7530_mac_config,
},
[ID_MT7531] = {
.id = ID_MT7531,
+ .pcs_ops = &mt7531_pcs_ops,
.sw_setup = mt7531_setup,
.phy_read = mt7531_ind_phy_read,
.phy_write = mt7531_ind_phy_write,
.pad_setup = mt7531_pad_setup,
.cpu_port_config = mt7531_cpu_port_config,
- .phy_mode_supported = mt7531_phy_mode_supported,
- .mac_port_validate = mt7531_mac_port_validate,
- .mac_port_get_state = mt7531_phylink_mac_link_state,
+ .mac_port_get_caps = mt7531_mac_port_get_caps,
.mac_port_config = mt7531_mac_config,
- .mac_pcs_an_restart = mt7531_sgmii_restart_an,
- .mac_pcs_link_up = mt7531_sgmii_link_up_force,
},
};
@@ -3246,9 +3211,8 @@ mt7530_probe(struct mdio_device *mdiodev)
*/
if (!priv->info->sw_setup || !priv->info->pad_setup ||
!priv->info->phy_read || !priv->info->phy_write ||
- !priv->info->phy_mode_supported ||
- !priv->info->mac_port_validate ||
- !priv->info->mac_port_get_state || !priv->info->mac_port_config)
+ !priv->info->mac_port_get_caps ||
+ !priv->info->mac_port_config)
return -EINVAL;
priv->id = priv->info->id;
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index 91508e2feef9..71e36b69b96d 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -741,6 +741,12 @@ static const char *p5_intf_modes(unsigned int p5_interface)
struct mt7530_priv;
+struct mt753x_pcs {
+ struct phylink_pcs pcs;
+ struct mt7530_priv *priv;
+ int port;
+};
+
/* struct mt753x_info - This is the main data structure for holding the specific
* part for each supported device
* @sw_setup: Holding the handler to a device initialization
@@ -752,36 +758,27 @@ struct mt7530_priv;
* port
* @mac_port_validate: Holding the way to set addition validate type for a
* certan MAC port
- * @mac_port_get_state: Holding the way getting the MAC/PCS state for a certain
- * MAC port
* @mac_port_config: Holding the way setting up the PHY attribute to a
* certain MAC port
- * @mac_pcs_an_restart Holding the way restarting PCS autonegotiation for a
- * certain MAC port
- * @mac_pcs_link_up: Holding the way setting up the PHY attribute to the pcs
- * of the certain MAC port
*/
struct mt753x_info {
enum mt753x_id id;
+ const struct phylink_pcs_ops *pcs_ops;
+
int (*sw_setup)(struct dsa_switch *ds);
int (*phy_read)(struct mt7530_priv *priv, int port, int regnum);
int (*phy_write)(struct mt7530_priv *priv, int port, int regnum, u16 val);
int (*pad_setup)(struct dsa_switch *ds, phy_interface_t interface);
int (*cpu_port_config)(struct dsa_switch *ds, int port);
- bool (*phy_mode_supported)(struct dsa_switch *ds, int port,
- const struct phylink_link_state *state);
+ void (*mac_port_get_caps)(struct dsa_switch *ds, int port,
+ struct phylink_config *config);
void (*mac_port_validate)(struct dsa_switch *ds, int port,
+ phy_interface_t interface,
unsigned long *supported);
- int (*mac_port_get_state)(struct dsa_switch *ds, int port,
- struct phylink_link_state *state);
int (*mac_port_config)(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface);
- void (*mac_pcs_an_restart)(struct dsa_switch *ds, int port);
- void (*mac_pcs_link_up)(struct dsa_switch *ds, int port,
- unsigned int mode, phy_interface_t interface,
- int speed, int duplex);
};
/* struct mt7530_priv - This is the main data structure for holding the state
@@ -823,6 +820,7 @@ struct mt7530_priv {
u8 mirror_tx;
struct mt7530_port ports[MT7530_NUM_PORTS];
+ struct mt753x_pcs pcs[MT7530_NUM_PORTS];
/* protect among processes for registers access*/
struct mutex reg_mutex;
int irq;
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 413b0006e9a2..9e28219b223d 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -670,6 +670,8 @@ static int felix_change_tag_protocol(struct dsa_switch *ds, int cpu,
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
enum dsa_tag_protocol old_proto = felix->tag_proto;
+ bool cpu_port_active = false;
+ struct dsa_port *dp;
int err;
if (proto != DSA_TAG_PROTO_SEVILLE &&
@@ -677,6 +679,27 @@ static int felix_change_tag_protocol(struct dsa_switch *ds, int cpu,
proto != DSA_TAG_PROTO_OCELOT_8021Q)
return -EPROTONOSUPPORT;
+ /* We don't support multiple CPU ports, yet the DT blob may have
+ * multiple CPU ports defined. The first CPU port is the active one,
+ * the others are inactive. In this case, DSA will call
+ * ->change_tag_protocol() multiple times, once per CPU port.
+ * Since we implement the tagging protocol change towards "ocelot" or
+ * "seville" as effectively initializing the NPI port, what we are
+ * doing is effectively changing who the NPI port is to the last @cpu
+ * argument passed, which is an unused DSA CPU port and not the one
+ * that should actively pass traffic.
+ * Suppress DSA's calls on CPU ports that are inactive.
+ */
+ dsa_switch_for_each_user_port(dp, ds) {
+ if (dp->cpu_dp->index == cpu) {
+ cpu_port_active = true;
+ break;
+ }
+ }
+
+ if (!cpu_port_active)
+ return 0;
+
felix_del_tag_protocol(ds, cpu, old_proto);
err = felix_set_tag_protocol(ds, cpu, proto);
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 8d382b27e625..52a8566071ed 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -2316,7 +2316,7 @@ static int felix_pci_probe(struct pci_dev *pdev,
err = dsa_register_switch(ds);
if (err) {
- dev_err(&pdev->dev, "Failed to register DSA switch: %d\n", err);
+ dev_err_probe(&pdev->dev, err, "Failed to register DSA switch\n");
goto err_register_ds;
}
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index d3ed0a7f8077..2727d3169c25 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -1287,87 +1287,71 @@ qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum)
if (ret >= 0)
return ret;
- return qca8k_mdio_read(priv, phy, regnum);
+ ret = qca8k_mdio_read(priv, phy, regnum);
+
+ if (ret < 0)
+ return 0xffff;
+
+ return ret;
}
static int
-qca8k_phy_write(struct dsa_switch *ds, int port, int regnum, u16 data)
+qca8k_legacy_mdio_write(struct mii_bus *slave_bus, int port, int regnum, u16 data)
{
- struct qca8k_priv *priv = ds->priv;
- int ret;
+ port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
- /* Check if the legacy mapping should be used and the
- * port is not correctly mapped to the right PHY in the
- * devicetree
- */
- if (priv->legacy_phy_port_mapping)
- port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
-
- /* Use mdio Ethernet when available, fallback to legacy one on error */
- ret = qca8k_phy_eth_command(priv, false, port, regnum, 0);
- if (!ret)
- return ret;
-
- return qca8k_mdio_write(priv, port, regnum, data);
+ return qca8k_internal_mdio_write(slave_bus, port, regnum, data);
}
static int
-qca8k_phy_read(struct dsa_switch *ds, int port, int regnum)
+qca8k_legacy_mdio_read(struct mii_bus *slave_bus, int port, int regnum)
{
- struct qca8k_priv *priv = ds->priv;
- int ret;
+ port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
- /* Check if the legacy mapping should be used and the
- * port is not correctly mapped to the right PHY in the
- * devicetree
- */
- if (priv->legacy_phy_port_mapping)
- port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
-
- /* Use mdio Ethernet when available, fallback to legacy one on error */
- ret = qca8k_phy_eth_command(priv, true, port, regnum, 0);
- if (ret >= 0)
- return ret;
-
- ret = qca8k_mdio_read(priv, port, regnum);
-
- if (ret < 0)
- return 0xffff;
-
- return ret;
+ return qca8k_internal_mdio_read(slave_bus, port, regnum);
}
static int
-qca8k_mdio_register(struct qca8k_priv *priv, struct device_node *mdio)
+qca8k_mdio_register(struct qca8k_priv *priv)
{
struct dsa_switch *ds = priv->ds;
+ struct device_node *mdio;
struct mii_bus *bus;
bus = devm_mdiobus_alloc(ds->dev);
-
if (!bus)
return -ENOMEM;
bus->priv = (void *)priv;
- bus->name = "qca8k slave mii";
- bus->read = qca8k_internal_mdio_read;
- bus->write = qca8k_internal_mdio_write;
- snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d",
- ds->index);
-
+ snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d.%d",
+ ds->dst->index, ds->index);
bus->parent = ds->dev;
bus->phy_mask = ~ds->phys_mii_mask;
-
ds->slave_mii_bus = bus;
- return devm_of_mdiobus_register(priv->dev, bus, mdio);
+ /* Check if the devicetree declare the port:phy mapping */
+ mdio = of_get_child_by_name(priv->dev->of_node, "mdio");
+ if (of_device_is_available(mdio)) {
+ bus->name = "qca8k slave mii";
+ bus->read = qca8k_internal_mdio_read;
+ bus->write = qca8k_internal_mdio_write;
+ return devm_of_mdiobus_register(priv->dev, bus, mdio);
+ }
+
+ /* If a mapping can't be found the legacy mapping is used,
+ * using the qca8k_port_to_phy function
+ */
+ bus->name = "qca8k-legacy slave mii";
+ bus->read = qca8k_legacy_mdio_read;
+ bus->write = qca8k_legacy_mdio_write;
+ return devm_mdiobus_register(priv->dev, bus);
}
static int
qca8k_setup_mdio_bus(struct qca8k_priv *priv)
{
u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg;
- struct device_node *ports, *port, *mdio;
+ struct device_node *ports, *port;
phy_interface_t mode;
int err;
@@ -1429,24 +1413,7 @@ qca8k_setup_mdio_bus(struct qca8k_priv *priv)
QCA8K_MDIO_MASTER_EN);
}
- /* Check if the devicetree declare the port:phy mapping */
- mdio = of_get_child_by_name(priv->dev->of_node, "mdio");
- if (of_device_is_available(mdio)) {
- err = qca8k_mdio_register(priv, mdio);
- if (err)
- of_node_put(mdio);
-
- return err;
- }
-
- /* If a mapping can't be found the legacy mapping is used,
- * using the qca8k_port_to_phy function
- */
- priv->legacy_phy_port_mapping = true;
- priv->ops.phy_read = qca8k_phy_read;
- priv->ops.phy_write = qca8k_phy_write;
-
- return 0;
+ return qca8k_mdio_register(priv);
}
static int
@@ -2346,7 +2313,7 @@ qca8k_port_enable(struct dsa_switch *ds, int port,
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
qca8k_port_set_status(priv, port, 1);
- priv->port_sts[port].enabled = 1;
+ priv->port_enabled_map |= BIT(port);
if (dsa_is_user_port(ds, port))
phy_support_asym_pause(phy);
@@ -2360,23 +2327,25 @@ qca8k_port_disable(struct dsa_switch *ds, int port)
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
qca8k_port_set_status(priv, port, 0);
- priv->port_sts[port].enabled = 0;
+ priv->port_enabled_map &= ~BIT(port);
}
static int
qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
{
struct qca8k_priv *priv = ds->priv;
- int i, mtu = 0;
- priv->port_mtu[port] = new_mtu;
-
- for (i = 0; i < QCA8K_NUM_PORTS; i++)
- if (priv->port_mtu[i] > mtu)
- mtu = priv->port_mtu[i];
+ /* We have only have a general MTU setting.
+ * DSA always set the CPU port's MTU to the largest MTU of the slave
+ * ports.
+ * Setting MTU just for the CPU port is sufficient to correctly set a
+ * value for every port.
+ */
+ if (!dsa_is_cpu_port(ds, port))
+ return 0;
/* Include L2 header / FCS length */
- return qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, mtu + ETH_HLEN + ETH_FCS_LEN);
+ return qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, new_mtu + ETH_HLEN + ETH_FCS_LEN);
}
static int
@@ -3033,16 +3002,6 @@ qca8k_setup(struct dsa_switch *ds)
QCA8K_PORT_HOL_CTRL1_WRED_EN,
mask);
}
-
- /* Set initial MTU for every port.
- * We have only have a general MTU setting. So track
- * every port and set the max across all port.
- * Set per port MTU to 1500 as the MTU change function
- * will add the overhead and if its set to 1518 then it
- * will apply the overhead again and we will end up with
- * MTU of 1536 instead of 1518
- */
- priv->port_mtu[i] = ETH_DATA_LEN;
}
/* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */
@@ -3202,8 +3161,7 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
priv->ds->dev = &mdiodev->dev;
priv->ds->num_ports = QCA8K_NUM_PORTS;
priv->ds->priv = priv;
- priv->ops = qca8k_switch_ops;
- priv->ds->ops = &priv->ops;
+ priv->ds->ops = &qca8k_switch_ops;
mutex_init(&priv->reg_mutex);
dev_set_drvdata(&mdiodev->dev, priv);
@@ -3243,13 +3201,16 @@ static void qca8k_sw_shutdown(struct mdio_device *mdiodev)
static void
qca8k_set_pm(struct qca8k_priv *priv, int enable)
{
- int i;
+ int port;
- for (i = 0; i < QCA8K_NUM_PORTS; i++) {
- if (!priv->port_sts[i].enabled)
+ for (port = 0; port < QCA8K_NUM_PORTS; port++) {
+ /* Do not enable on resume if the port was
+ * disabled before.
+ */
+ if (!(priv->port_enabled_map & BIT(port)))
continue;
- qca8k_port_set_status(priv, i, enable);
+ qca8k_port_set_status(priv, port, enable);
}
}
diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h
index f375627174c8..04408e11402a 100644
--- a/drivers/net/dsa/qca8k.h
+++ b/drivers/net/dsa/qca8k.h
@@ -324,10 +324,6 @@ enum qca8k_mid_cmd {
QCA8K_MIB_CAST = 3,
};
-struct ar8xxx_port_status {
- int enabled;
-};
-
struct qca8k_match_data {
u8 id;
bool reduced_package;
@@ -388,17 +384,17 @@ struct qca8k_priv {
u8 mirror_rx;
u8 mirror_tx;
u8 lag_hash_mode;
- bool legacy_phy_port_mapping;
+ /* Each bit correspond to a port. This switch can support a max of 7 port.
+ * Bit 1: port enabled. Bit 0: port disabled.
+ */
+ u8 port_enabled_map;
struct qca8k_ports_config ports_config;
struct regmap *regmap;
struct mii_bus *bus;
- struct ar8xxx_port_status port_sts[QCA8K_NUM_PORTS];
struct dsa_switch *ds;
struct mutex reg_mutex;
struct device *dev;
- struct dsa_switch_ops ops;
struct gpio_desc *reset_gpio;
- unsigned int port_mtu[QCA8K_NUM_PORTS];
struct net_device *mgmt_master; /* Track if mdio/mib Ethernet is available */
struct qca8k_mgmt_eth_data mgmt_eth_data;
struct qca8k_mib_eth_data mib_eth_data;
diff --git a/drivers/net/dsa/realtek/Kconfig b/drivers/net/dsa/realtek/Kconfig
index 1aa79735355f..060165a85fb7 100644
--- a/drivers/net/dsa/realtek/Kconfig
+++ b/drivers/net/dsa/realtek/Kconfig
@@ -9,34 +9,46 @@ menuconfig NET_DSA_REALTEK
help
Select to enable support for Realtek Ethernet switch chips.
+ Note that at least one interface driver must be enabled for the
+ subdrivers to be loaded. Moreover, an interface driver cannot achieve
+ anything without at least one subdriver enabled.
+
+if NET_DSA_REALTEK
+
config NET_DSA_REALTEK_MDIO
- tristate "Realtek MDIO connected switch driver"
- depends on NET_DSA_REALTEK
+ tristate "Realtek MDIO interface driver"
depends on OF
+ depends on NET_DSA_REALTEK_RTL8365MB || NET_DSA_REALTEK_RTL8366RB
+ depends on NET_DSA_REALTEK_RTL8365MB || !NET_DSA_REALTEK_RTL8365MB
+ depends on NET_DSA_REALTEK_RTL8366RB || !NET_DSA_REALTEK_RTL8366RB
help
Select to enable support for registering switches configured
through MDIO.
config NET_DSA_REALTEK_SMI
- tristate "Realtek SMI connected switch driver"
- depends on NET_DSA_REALTEK
+ tristate "Realtek SMI interface driver"
depends on OF
+ depends on NET_DSA_REALTEK_RTL8365MB || NET_DSA_REALTEK_RTL8366RB
+ depends on NET_DSA_REALTEK_RTL8365MB || !NET_DSA_REALTEK_RTL8365MB
+ depends on NET_DSA_REALTEK_RTL8366RB || !NET_DSA_REALTEK_RTL8366RB
help
Select to enable support for registering switches connected
through SMI.
config NET_DSA_REALTEK_RTL8365MB
tristate "Realtek RTL8365MB switch subdriver"
- depends on NET_DSA_REALTEK
- depends on NET_DSA_REALTEK_SMI || NET_DSA_REALTEK_MDIO
+ imply NET_DSA_REALTEK_SMI
+ imply NET_DSA_REALTEK_MDIO
select NET_DSA_TAG_RTL8_4
help
Select to enable support for Realtek RTL8365MB-VC and RTL8367S.
config NET_DSA_REALTEK_RTL8366RB
tristate "Realtek RTL8366RB switch subdriver"
- depends on NET_DSA_REALTEK
- depends on NET_DSA_REALTEK_SMI || NET_DSA_REALTEK_MDIO
+ imply NET_DSA_REALTEK_SMI
+ imply NET_DSA_REALTEK_MDIO
select NET_DSA_TAG_RTL4_A
help
- Select to enable support for Realtek RTL8366RB
+ Select to enable support for Realtek RTL8366RB.
+
+endif
diff --git a/drivers/net/dsa/realtek/realtek-mdio.c b/drivers/net/dsa/realtek/realtek-mdio.c
index 31e1f100e48e..c58f49d558d2 100644
--- a/drivers/net/dsa/realtek/realtek-mdio.c
+++ b/drivers/net/dsa/realtek/realtek-mdio.c
@@ -267,7 +267,6 @@ static const struct of_device_id realtek_mdio_of_match[] = {
#endif
#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8365MB)
{ .compatible = "realtek,rtl8365mb", .data = &rtl8365mb_variant, },
- { .compatible = "realtek,rtl8367s", .data = &rtl8365mb_variant, },
#endif
{ /* sentinel */ },
};
diff --git a/drivers/net/dsa/realtek/realtek-smi.c b/drivers/net/dsa/realtek/realtek-smi.c
index 2243d3da55b2..45992f79ec8d 100644
--- a/drivers/net/dsa/realtek/realtek-smi.c
+++ b/drivers/net/dsa/realtek/realtek-smi.c
@@ -546,20 +546,11 @@ static const struct of_device_id realtek_smi_of_match[] = {
.data = &rtl8366rb_variant,
},
#endif
- {
- /* FIXME: add support for RTL8366S and more */
- .compatible = "realtek,rtl8366s",
- .data = NULL,
- },
#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8365MB)
{
.compatible = "realtek,rtl8365mb",
.data = &rtl8365mb_variant,
},
- {
- .compatible = "realtek,rtl8367s",
- .data = &rtl8365mb_variant,
- },
#endif
{ /* sentinel */ },
};
diff --git a/drivers/net/eql.c b/drivers/net/eql.c
index 1111d1f33865..557ca8ff9dec 100644
--- a/drivers/net/eql.c
+++ b/drivers/net/eql.c
@@ -425,14 +425,13 @@ static int eql_enslave(struct net_device *master_dev, slaving_request_t __user *
if ((master_dev->flags & IFF_UP) == IFF_UP) {
/* slave is not a master & not already a slave: */
if (!eql_is_master(slave_dev) && !eql_is_slave(slave_dev)) {
- slave_t *s = kmalloc(sizeof(*s), GFP_KERNEL);
+ slave_t *s = kzalloc(sizeof(*s), GFP_KERNEL);
equalizer_t *eql = netdev_priv(master_dev);
int ret;
if (!s)
return -ENOMEM;
- memset(s, 0, sizeof(*s));
s->dev = slave_dev;
s->priority = srq.priority;
s->priority_bps = srq.priority;
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index bd4cb9d7c35d..827993022386 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -35,15 +35,6 @@ source "drivers/net/ethernet/aquantia/Kconfig"
source "drivers/net/ethernet/arc/Kconfig"
source "drivers/net/ethernet/asix/Kconfig"
source "drivers/net/ethernet/atheros/Kconfig"
-source "drivers/net/ethernet/broadcom/Kconfig"
-source "drivers/net/ethernet/brocade/Kconfig"
-source "drivers/net/ethernet/cadence/Kconfig"
-source "drivers/net/ethernet/calxeda/Kconfig"
-source "drivers/net/ethernet/cavium/Kconfig"
-source "drivers/net/ethernet/chelsio/Kconfig"
-source "drivers/net/ethernet/cirrus/Kconfig"
-source "drivers/net/ethernet/cisco/Kconfig"
-source "drivers/net/ethernet/cortina/Kconfig"
config CX_ECAT
tristate "Beckhoff CX5020 EtherCAT master support"
@@ -57,6 +48,14 @@ config CX_ECAT
To compile this driver as a module, choose M here. The module
will be called ec_bhf.
+source "drivers/net/ethernet/broadcom/Kconfig"
+source "drivers/net/ethernet/cadence/Kconfig"
+source "drivers/net/ethernet/calxeda/Kconfig"
+source "drivers/net/ethernet/cavium/Kconfig"
+source "drivers/net/ethernet/chelsio/Kconfig"
+source "drivers/net/ethernet/cirrus/Kconfig"
+source "drivers/net/ethernet/cisco/Kconfig"
+source "drivers/net/ethernet/cortina/Kconfig"
source "drivers/net/ethernet/davicom/Kconfig"
config DNET
@@ -85,7 +84,6 @@ source "drivers/net/ethernet/huawei/Kconfig"
source "drivers/net/ethernet/i825xx/Kconfig"
source "drivers/net/ethernet/ibm/Kconfig"
source "drivers/net/ethernet/intel/Kconfig"
-source "drivers/net/ethernet/microsoft/Kconfig"
source "drivers/net/ethernet/xscale/Kconfig"
config JME
@@ -128,8 +126,9 @@ source "drivers/net/ethernet/mediatek/Kconfig"
source "drivers/net/ethernet/mellanox/Kconfig"
source "drivers/net/ethernet/micrel/Kconfig"
source "drivers/net/ethernet/microchip/Kconfig"
-source "drivers/net/ethernet/moxa/Kconfig"
source "drivers/net/ethernet/mscc/Kconfig"
+source "drivers/net/ethernet/microsoft/Kconfig"
+source "drivers/net/ethernet/moxa/Kconfig"
source "drivers/net/ethernet/myricom/Kconfig"
config FEALNX
@@ -141,10 +140,10 @@ config FEALNX
Say Y here to support the Myson MTD-800 family of PCI-based Ethernet
cards. <http://www.myson.com.tw/>
+source "drivers/net/ethernet/ni/Kconfig"
source "drivers/net/ethernet/natsemi/Kconfig"
source "drivers/net/ethernet/neterion/Kconfig"
source "drivers/net/ethernet/netronome/Kconfig"
-source "drivers/net/ethernet/ni/Kconfig"
source "drivers/net/ethernet/8390/Kconfig"
source "drivers/net/ethernet/nvidia/Kconfig"
source "drivers/net/ethernet/nxp/Kconfig"
@@ -164,6 +163,7 @@ source "drivers/net/ethernet/packetengines/Kconfig"
source "drivers/net/ethernet/pasemi/Kconfig"
source "drivers/net/ethernet/pensando/Kconfig"
source "drivers/net/ethernet/qlogic/Kconfig"
+source "drivers/net/ethernet/brocade/Kconfig"
source "drivers/net/ethernet/qualcomm/Kconfig"
source "drivers/net/ethernet/rdc/Kconfig"
source "drivers/net/ethernet/realtek/Kconfig"
@@ -171,10 +171,10 @@ source "drivers/net/ethernet/renesas/Kconfig"
source "drivers/net/ethernet/rocker/Kconfig"
source "drivers/net/ethernet/samsung/Kconfig"
source "drivers/net/ethernet/seeq/Kconfig"
-source "drivers/net/ethernet/sfc/Kconfig"
source "drivers/net/ethernet/sgi/Kconfig"
source "drivers/net/ethernet/silan/Kconfig"
source "drivers/net/ethernet/sis/Kconfig"
+source "drivers/net/ethernet/sfc/Kconfig"
source "drivers/net/ethernet/smsc/Kconfig"
source "drivers/net/ethernet/socionext/Kconfig"
source "drivers/net/ethernet/stmicro/Kconfig"
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
index 52b9833fda99..8bcda2cb3a2e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -40,6 +40,7 @@
#define AQ_CFG_RX_HDR_SIZE 256U
#define AQ_CFG_RX_PAGEORDER 0U
+#define AQ_CFG_XDP_PAGEORDER 2U
/* LRO */
#define AQ_CFG_IS_LRO_DEF 1U
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index a418238f6309..1daecd483b8d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -97,6 +97,15 @@ static const char * const aq_ethtool_queue_rx_stat_names[] = {
"%sQueue[%d] AllocFails",
"%sQueue[%d] SkbAllocFails",
"%sQueue[%d] Polls",
+ "%sQueue[%d] PageFlips",
+ "%sQueue[%d] PageReuses",
+ "%sQueue[%d] PageFrees",
+ "%sQueue[%d] XdpAbort",
+ "%sQueue[%d] XdpDrop",
+ "%sQueue[%d] XdpPass",
+ "%sQueue[%d] XdpTx",
+ "%sQueue[%d] XdpInvalid",
+ "%sQueue[%d] XdpRedirect",
};
static const char * const aq_ethtool_queue_tx_stat_names[] = {
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index e65ce7199dac..88595863d8bc 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -14,17 +14,22 @@
#include "aq_ptp.h"
#include "aq_filters.h"
#include "aq_hw_utils.h"
+#include "aq_vec.h"
#include <linux/netdevice.h>
#include <linux/module.h>
#include <linux/ip.h>
#include <linux/udp.h>
#include <net/pkt_cls.h>
+#include <linux/filter.h>
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR(AQ_CFG_DRV_AUTHOR);
MODULE_DESCRIPTION(AQ_CFG_DRV_DESC);
+DEFINE_STATIC_KEY_FALSE(aq_xdp_locking_key);
+EXPORT_SYMBOL(aq_xdp_locking_key);
+
static const char aq_ndev_driver_name[] = AQ_CFG_DRV_NAME;
static const struct net_device_ops aq_ndev_ops;
@@ -126,9 +131,19 @@ static netdev_tx_t aq_ndev_start_xmit(struct sk_buff *skb, struct net_device *nd
static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu)
{
+ int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN;
struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct bpf_prog *prog;
int err;
+ prog = READ_ONCE(aq_nic->xdp_prog);
+ if (prog && !prog->aux->xdp_has_frags &&
+ new_frame_size > AQ_CFG_RX_FRAME_MAX) {
+ netdev_err(ndev, "Illegal MTU %d for XDP prog without frags\n",
+ ndev->mtu);
+ return -EOPNOTSUPP;
+ }
+
err = aq_nic_set_mtu(aq_nic, new_mtu + ETH_HLEN);
if (err < 0)
@@ -204,6 +219,25 @@ err_exit:
return err;
}
+static netdev_features_t aq_ndev_fix_features(struct net_device *ndev,
+ netdev_features_t features)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct bpf_prog *prog;
+
+ if (!(features & NETIF_F_RXCSUM))
+ features &= ~NETIF_F_LRO;
+
+ prog = READ_ONCE(aq_nic->xdp_prog);
+ if (prog && !prog->aux->xdp_has_frags &&
+ aq_nic->xdp_prog && features & NETIF_F_LRO) {
+ netdev_err(ndev, "LRO is not supported with single buffer XDP, disabling\n");
+ features &= ~NETIF_F_LRO;
+ }
+
+ return features;
+}
+
static int aq_ndev_set_mac_address(struct net_device *ndev, void *addr)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
@@ -410,6 +444,56 @@ static int aq_ndo_setup_tc(struct net_device *dev, enum tc_setup_type type,
mqprio->qopt.prio_tc_map);
}
+static int aq_xdp_setup(struct net_device *ndev, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
+{
+ bool need_update, running = netif_running(ndev);
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct bpf_prog *old_prog;
+
+ if (prog && !prog->aux->xdp_has_frags) {
+ if (ndev->mtu > AQ_CFG_RX_FRAME_MAX) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "prog does not support XDP frags");
+ return -EOPNOTSUPP;
+ }
+
+ if (prog && ndev->features & NETIF_F_LRO) {
+ netdev_err(ndev,
+ "LRO is not supported with single buffer XDP, disabling\n");
+ ndev->features &= ~NETIF_F_LRO;
+ }
+ }
+
+ need_update = !!aq_nic->xdp_prog != !!prog;
+ if (running && need_update)
+ aq_ndev_close(ndev);
+
+ old_prog = xchg(&aq_nic->xdp_prog, prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ if (!old_prog && prog)
+ static_branch_inc(&aq_xdp_locking_key);
+ else if (old_prog && !prog)
+ static_branch_dec(&aq_xdp_locking_key);
+
+ if (running && need_update)
+ return aq_ndev_open(ndev);
+
+ return 0;
+}
+
+static int aq_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return aq_xdp_setup(dev, xdp->prog, xdp->extack);
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct net_device_ops aq_ndev_ops = {
.ndo_open = aq_ndev_open,
.ndo_stop = aq_ndev_close,
@@ -418,10 +502,13 @@ static const struct net_device_ops aq_ndev_ops = {
.ndo_change_mtu = aq_ndev_change_mtu,
.ndo_set_mac_address = aq_ndev_set_mac_address,
.ndo_set_features = aq_ndev_set_features,
+ .ndo_fix_features = aq_ndev_fix_features,
.ndo_eth_ioctl = aq_ndev_ioctl,
.ndo_vlan_rx_add_vid = aq_ndo_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = aq_ndo_vlan_rx_kill_vid,
.ndo_setup_tc = aq_ndo_setup_tc,
+ .ndo_bpf = aq_xdp,
+ .ndo_xdp_xmit = aq_xdp_xmit,
};
static int __init aq_ndev_init_module(void)
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.h b/drivers/net/ethernet/aquantia/atlantic/aq_main.h
index a5a624b9ce73..99870865f66d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.h
@@ -12,6 +12,8 @@
#include "aq_common.h"
#include "aq_nic.h"
+DECLARE_STATIC_KEY_FALSE(aq_xdp_locking_key);
+
void aq_ndev_schedule_work(struct work_struct *work);
struct net_device *aq_ndev_alloc(void);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 33f1a1377588..e11cc29d3264 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -486,8 +486,8 @@ int aq_nic_start(struct aq_nic_s *self)
if (err < 0)
goto err_exit;
- for (i = 0U, aq_vec = self->aq_vec[0];
- self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
+ for (i = 0U; self->aq_vecs > i; ++i) {
+ aq_vec = self->aq_vec[i];
err = aq_vec_start(aq_vec);
if (err < 0)
goto err_exit;
@@ -517,8 +517,8 @@ int aq_nic_start(struct aq_nic_s *self)
mod_timer(&self->polling_timer, jiffies +
AQ_CFG_POLLING_TIMER_INTERVAL);
} else {
- for (i = 0U, aq_vec = self->aq_vec[0];
- self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
+ for (i = 0U; self->aq_vecs > i; ++i) {
+ aq_vec = self->aq_vec[i];
err = aq_pci_func_alloc_irq(self, i, self->ndev->name,
aq_vec_isr, aq_vec,
aq_vec_get_affinity_mask(aq_vec));
@@ -569,6 +569,103 @@ err_exit:
return err;
}
+static unsigned int aq_nic_map_xdp(struct aq_nic_s *self,
+ struct xdp_frame *xdpf,
+ struct aq_ring_s *ring)
+{
+ struct device *dev = aq_nic_get_dev(self);
+ struct aq_ring_buff_s *first = NULL;
+ unsigned int dx = ring->sw_tail;
+ struct aq_ring_buff_s *dx_buff;
+ struct skb_shared_info *sinfo;
+ unsigned int frag_count = 0U;
+ unsigned int nr_frags = 0U;
+ unsigned int ret = 0U;
+ u16 total_len;
+
+ dx_buff = &ring->buff_ring[dx];
+ dx_buff->flags = 0U;
+
+ sinfo = xdp_get_shared_info_from_frame(xdpf);
+ total_len = xdpf->len;
+ dx_buff->len = total_len;
+ if (xdp_frame_has_frags(xdpf)) {
+ nr_frags = sinfo->nr_frags;
+ total_len += sinfo->xdp_frags_size;
+ }
+ dx_buff->pa = dma_map_single(dev, xdpf->data, dx_buff->len,
+ DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(dev, dx_buff->pa)))
+ goto exit;
+
+ first = dx_buff;
+ dx_buff->len_pkt = total_len;
+ dx_buff->is_sop = 1U;
+ dx_buff->is_mapped = 1U;
+ ++ret;
+
+ for (; nr_frags--; ++frag_count) {
+ skb_frag_t *frag = &sinfo->frags[frag_count];
+ unsigned int frag_len = skb_frag_size(frag);
+ unsigned int buff_offset = 0U;
+ unsigned int buff_size = 0U;
+ dma_addr_t frag_pa;
+
+ while (frag_len) {
+ if (frag_len > AQ_CFG_TX_FRAME_MAX)
+ buff_size = AQ_CFG_TX_FRAME_MAX;
+ else
+ buff_size = frag_len;
+
+ frag_pa = skb_frag_dma_map(dev, frag, buff_offset,
+ buff_size, DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(dev, frag_pa)))
+ goto mapping_error;
+
+ dx = aq_ring_next_dx(ring, dx);
+ dx_buff = &ring->buff_ring[dx];
+
+ dx_buff->flags = 0U;
+ dx_buff->len = buff_size;
+ dx_buff->pa = frag_pa;
+ dx_buff->is_mapped = 1U;
+ dx_buff->eop_index = 0xffffU;
+
+ frag_len -= buff_size;
+ buff_offset += buff_size;
+
+ ++ret;
+ }
+ }
+
+ first->eop_index = dx;
+ dx_buff->is_eop = 1U;
+ dx_buff->skb = NULL;
+ dx_buff->xdpf = xdpf;
+ goto exit;
+
+mapping_error:
+ for (dx = ring->sw_tail;
+ ret > 0;
+ --ret, dx = aq_ring_next_dx(ring, dx)) {
+ dx_buff = &ring->buff_ring[dx];
+
+ if (!dx_buff->pa)
+ continue;
+ if (unlikely(dx_buff->is_sop))
+ dma_unmap_single(dev, dx_buff->pa, dx_buff->len,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_page(dev, dx_buff->pa, dx_buff->len,
+ DMA_TO_DEVICE);
+ }
+
+exit:
+ return ret;
+}
+
unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
struct aq_ring_s *ring)
{
@@ -697,6 +794,7 @@ unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
first->eop_index = dx;
dx_buff->is_eop = 1U;
dx_buff->skb = skb;
+ dx_buff->xdpf = NULL;
goto exit;
mapping_error:
@@ -725,6 +823,44 @@ exit:
return ret;
}
+int aq_nic_xmit_xdpf(struct aq_nic_s *aq_nic, struct aq_ring_s *tx_ring,
+ struct xdp_frame *xdpf)
+{
+ u16 queue_index = AQ_NIC_RING2QMAP(aq_nic, tx_ring->idx);
+ struct net_device *ndev = aq_nic_get_ndev(aq_nic);
+ struct skb_shared_info *sinfo;
+ int cpu = smp_processor_id();
+ int err = NETDEV_TX_BUSY;
+ struct netdev_queue *nq;
+ unsigned int frags = 1;
+
+ if (xdp_frame_has_frags(xdpf)) {
+ sinfo = xdp_get_shared_info_from_frame(xdpf);
+ frags += sinfo->nr_frags;
+ }
+
+ if (frags > AQ_CFG_SKB_FRAGS_MAX)
+ return err;
+
+ nq = netdev_get_tx_queue(ndev, tx_ring->idx);
+ __netif_tx_lock(nq, cpu);
+
+ aq_ring_update_queue_state(tx_ring);
+
+ /* Above status update may stop the queue. Check this. */
+ if (__netif_subqueue_stopped(aq_nic_get_ndev(aq_nic), queue_index))
+ goto out;
+
+ frags = aq_nic_map_xdp(aq_nic, xdpf, tx_ring);
+ if (likely(frags))
+ err = aq_nic->aq_hw_ops->hw_ring_tx_xmit(aq_nic->aq_hw, tx_ring,
+ frags);
+out:
+ __netif_tx_unlock(nq);
+
+ return err;
+}
+
int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
{
struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(self);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index 1a7148041e3d..935ba889bd9a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -11,6 +11,8 @@
#define AQ_NIC_H
#include <linux/ethtool.h>
+#include <net/xdp.h>
+#include <linux/bpf.h>
#include "aq_common.h"
#include "aq_rss.h"
@@ -128,6 +130,7 @@ struct aq_nic_s {
struct aq_vec_s *aq_vec[AQ_CFG_VECS_MAX];
struct aq_ring_s *aq_ring_tx[AQ_HW_QUEUES_MAX];
struct aq_hw_s *aq_hw;
+ struct bpf_prog *xdp_prog;
struct net_device *ndev;
unsigned int aq_vecs;
unsigned int packet_filter;
@@ -177,6 +180,8 @@ void aq_nic_ndev_free(struct aq_nic_s *self);
int aq_nic_start(struct aq_nic_s *self);
unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
struct aq_ring_s *ring);
+int aq_nic_xmit_xdpf(struct aq_nic_s *aq_nic, struct aq_ring_s *tx_ring,
+ struct xdp_frame *xdpf);
int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb);
int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p);
int aq_nic_get_regs_count(struct aq_nic_s *self);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 797a95142d1f..3a529ee8c834 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -444,22 +444,22 @@ err_exit:
static int aq_pm_freeze(struct device *dev)
{
- return aq_suspend_common(dev, false);
+ return aq_suspend_common(dev, true);
}
static int aq_pm_suspend_poweroff(struct device *dev)
{
- return aq_suspend_common(dev, true);
+ return aq_suspend_common(dev, false);
}
static int aq_pm_thaw(struct device *dev)
{
- return atl_resume_common(dev, false);
+ return atl_resume_common(dev, true);
}
static int aq_pm_resume_restore(struct device *dev)
{
- return atl_resume_common(dev, true);
+ return atl_resume_common(dev, false);
}
static const struct dev_pm_ops aq_pm_ops = {
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 77e76c9efd32..ea740210803f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -7,15 +7,37 @@
/* File aq_ring.c: Definition of functions for Rx/Tx rings. */
-#include "aq_ring.h"
#include "aq_nic.h"
#include "aq_hw.h"
#include "aq_hw_utils.h"
#include "aq_ptp.h"
+#include "aq_vec.h"
+#include "aq_main.h"
+#include <net/xdp.h>
+#include <linux/filter.h>
+#include <linux/bpf_trace.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+static void aq_get_rxpages_xdp(struct aq_ring_buff_s *buff,
+ struct xdp_buff *xdp)
+{
+ struct skb_shared_info *sinfo;
+ int i;
+
+ if (xdp_buff_has_frags(xdp)) {
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+
+ for (i = 0; i < sinfo->nr_frags; i++) {
+ skb_frag_t *frag = &sinfo->frags[i];
+
+ page_ref_inc(skb_frag_page(frag));
+ }
+ }
+ page_ref_inc(buff->rxdata.page);
+}
+
static inline void aq_free_rxpage(struct aq_rxpage *rxpage, struct device *dev)
{
unsigned int len = PAGE_SIZE << rxpage->order;
@@ -27,9 +49,10 @@ static inline void aq_free_rxpage(struct aq_rxpage *rxpage, struct device *dev)
rxpage->page = NULL;
}
-static int aq_get_rxpage(struct aq_rxpage *rxpage, unsigned int order,
- struct device *dev)
+static int aq_alloc_rxpages(struct aq_rxpage *rxpage, struct aq_ring_s *rx_ring)
{
+ struct device *dev = aq_nic_get_dev(rx_ring->aq_nic);
+ unsigned int order = rx_ring->page_order;
struct page *page;
int ret = -ENOMEM;
dma_addr_t daddr;
@@ -47,7 +70,7 @@ static int aq_get_rxpage(struct aq_rxpage *rxpage, unsigned int order,
rxpage->page = page;
rxpage->daddr = daddr;
rxpage->order = order;
- rxpage->pg_off = 0;
+ rxpage->pg_off = rx_ring->page_offset;
return 0;
@@ -58,21 +81,26 @@ err_exit:
return ret;
}
-static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf,
- int order)
+static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf)
{
+ unsigned int order = self->page_order;
+ u16 page_offset = self->page_offset;
+ u16 frame_max = self->frame_max;
+ u16 tail_size = self->tail_size;
int ret;
if (rxbuf->rxdata.page) {
/* One means ring is the only user and can reuse */
if (page_ref_count(rxbuf->rxdata.page) > 1) {
/* Try reuse buffer */
- rxbuf->rxdata.pg_off += AQ_CFG_RX_FRAME_MAX;
- if (rxbuf->rxdata.pg_off + AQ_CFG_RX_FRAME_MAX <=
- (PAGE_SIZE << order)) {
+ rxbuf->rxdata.pg_off += frame_max + page_offset +
+ tail_size;
+ if (rxbuf->rxdata.pg_off + frame_max + tail_size <=
+ (PAGE_SIZE << order)) {
u64_stats_update_begin(&self->stats.rx.syncp);
self->stats.rx.pg_flips++;
u64_stats_update_end(&self->stats.rx.syncp);
+
} else {
/* Buffer exhausted. We have other users and
* should release this page and realloc
@@ -84,7 +112,7 @@ static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf,
u64_stats_update_end(&self->stats.rx.syncp);
}
} else {
- rxbuf->rxdata.pg_off = 0;
+ rxbuf->rxdata.pg_off = page_offset;
u64_stats_update_begin(&self->stats.rx.syncp);
self->stats.rx.pg_reuses++;
u64_stats_update_end(&self->stats.rx.syncp);
@@ -92,8 +120,7 @@ static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf,
}
if (!rxbuf->rxdata.page) {
- ret = aq_get_rxpage(&rxbuf->rxdata, order,
- aq_nic_get_dev(self->aq_nic));
+ ret = aq_alloc_rxpages(&rxbuf->rxdata, self);
if (ret) {
u64_stats_update_begin(&self->stats.rx.syncp);
self->stats.rx.alloc_fails++;
@@ -117,6 +144,7 @@ static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self,
err = -ENOMEM;
goto err_exit;
}
+
self->dx_ring = dma_alloc_coherent(aq_nic_get_dev(aq_nic),
self->size * self->dx_size,
&self->dx_ring_pa, GFP_KERNEL);
@@ -172,11 +200,22 @@ struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
self->idx = idx;
self->size = aq_nic_cfg->rxds;
self->dx_size = aq_nic_cfg->aq_hw_caps->rxd_size;
- self->page_order = fls(AQ_CFG_RX_FRAME_MAX / PAGE_SIZE +
- (AQ_CFG_RX_FRAME_MAX % PAGE_SIZE ? 1 : 0)) - 1;
-
- if (aq_nic_cfg->rxpageorder > self->page_order)
- self->page_order = aq_nic_cfg->rxpageorder;
+ self->xdp_prog = aq_nic->xdp_prog;
+ self->frame_max = AQ_CFG_RX_FRAME_MAX;
+
+ /* Only order-2 is allowed if XDP is enabled */
+ if (READ_ONCE(self->xdp_prog)) {
+ self->page_offset = AQ_XDP_HEADROOM;
+ self->page_order = AQ_CFG_XDP_PAGEORDER;
+ self->tail_size = AQ_XDP_TAILROOM;
+ } else {
+ self->page_offset = 0;
+ self->page_order = fls(self->frame_max / PAGE_SIZE +
+ (self->frame_max % PAGE_SIZE ? 1 : 0)) - 1;
+ if (aq_nic_cfg->rxpageorder > self->page_order)
+ self->page_order = aq_nic_cfg->rxpageorder;
+ self->tail_size = 0;
+ }
self = aq_ring_alloc(self, aq_nic);
if (!self) {
@@ -298,15 +337,26 @@ bool aq_ring_tx_clean(struct aq_ring_s *self)
}
}
- if (unlikely(buff->is_eop && buff->skb)) {
+ if (likely(!buff->is_eop))
+ goto out;
+
+ if (buff->skb) {
u64_stats_update_begin(&self->stats.tx.syncp);
++self->stats.tx.packets;
self->stats.tx.bytes += buff->skb->len;
u64_stats_update_end(&self->stats.tx.syncp);
-
dev_kfree_skb_any(buff->skb);
- buff->skb = NULL;
+ } else if (buff->xdpf) {
+ u64_stats_update_begin(&self->stats.tx.syncp);
+ ++self->stats.tx.packets;
+ self->stats.tx.bytes += xdp_get_frame_len(buff->xdpf);
+ u64_stats_update_end(&self->stats.tx.syncp);
+ xdp_return_frame_rx_napi(buff->xdpf);
}
+
+out:
+ buff->skb = NULL;
+ buff->xdpf = NULL;
buff->pa = 0U;
buff->eop_index = 0xffffU;
self->sw_head = aq_ring_next_dx(self, self->sw_head);
@@ -339,11 +389,159 @@ static void aq_rx_checksum(struct aq_ring_s *self,
__skb_incr_checksum_unnecessary(skb);
}
-#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
-int aq_ring_rx_clean(struct aq_ring_s *self,
- struct napi_struct *napi,
- int *work_done,
- int budget)
+int aq_xdp_xmit(struct net_device *dev, int num_frames,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(dev);
+ unsigned int vec, i, drop = 0;
+ int cpu = smp_processor_id();
+ struct aq_nic_cfg_s *aq_cfg;
+ struct aq_ring_s *ring;
+
+ aq_cfg = aq_nic_get_cfg(aq_nic);
+ vec = cpu % aq_cfg->vecs;
+ ring = aq_nic->aq_ring_tx[AQ_NIC_CFG_TCVEC2RING(aq_cfg, 0, vec)];
+
+ for (i = 0; i < num_frames; i++) {
+ struct xdp_frame *xdpf = frames[i];
+
+ if (aq_nic_xmit_xdpf(aq_nic, ring, xdpf) == NETDEV_TX_BUSY)
+ drop++;
+ }
+
+ return num_frames - drop;
+}
+
+static struct sk_buff *aq_xdp_run_prog(struct aq_nic_s *aq_nic,
+ struct xdp_buff *xdp,
+ struct aq_ring_s *rx_ring,
+ struct aq_ring_buff_s *buff)
+{
+ int result = NETDEV_TX_BUSY;
+ struct aq_ring_s *tx_ring;
+ struct xdp_frame *xdpf;
+ struct bpf_prog *prog;
+ u32 act = XDP_ABORTED;
+ struct sk_buff *skb;
+
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.packets;
+ rx_ring->stats.rx.bytes += xdp_get_buff_len(xdp);
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+
+ prog = READ_ONCE(rx_ring->xdp_prog);
+ if (!prog)
+ goto pass;
+
+ prefetchw(xdp->data_hard_start); /* xdp_frame write */
+
+ /* single buffer XDP program, but packet is multi buffer, aborted */
+ if (xdp_buff_has_frags(xdp) && !prog->aux->xdp_has_frags)
+ goto out_aborted;
+
+ act = bpf_prog_run_xdp(prog, xdp);
+ switch (act) {
+ case XDP_PASS:
+pass:
+ xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf))
+ goto out_aborted;
+ skb = xdp_build_skb_from_frame(xdpf, aq_nic->ndev);
+ if (!skb)
+ goto out_aborted;
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.xdp_pass;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ aq_get_rxpages_xdp(buff, xdp);
+ return skb;
+ case XDP_TX:
+ xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf))
+ goto out_aborted;
+ tx_ring = aq_nic->aq_ring_tx[rx_ring->idx];
+ result = aq_nic_xmit_xdpf(aq_nic, tx_ring, xdpf);
+ if (result == NETDEV_TX_BUSY)
+ goto out_aborted;
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.xdp_tx;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ aq_get_rxpages_xdp(buff, xdp);
+ break;
+ case XDP_REDIRECT:
+ if (xdp_do_redirect(aq_nic->ndev, xdp, prog) < 0)
+ goto out_aborted;
+ xdp_do_flush();
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.xdp_redirect;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ aq_get_rxpages_xdp(buff, xdp);
+ break;
+ default:
+ fallthrough;
+ case XDP_ABORTED:
+out_aborted:
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.xdp_aborted;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ trace_xdp_exception(aq_nic->ndev, prog, act);
+ bpf_warn_invalid_xdp_action(aq_nic->ndev, prog, act);
+ break;
+ case XDP_DROP:
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.xdp_drop;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ break;
+ }
+
+ return ERR_PTR(-result);
+}
+
+static bool aq_add_rx_fragment(struct device *dev,
+ struct aq_ring_s *ring,
+ struct aq_ring_buff_s *buff,
+ struct xdp_buff *xdp)
+{
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
+ struct aq_ring_buff_s *buff_ = buff;
+
+ memset(sinfo, 0, sizeof(*sinfo));
+ do {
+ skb_frag_t *frag;
+
+ if (unlikely(sinfo->nr_frags >= MAX_SKB_FRAGS))
+ return true;
+
+ frag = &sinfo->frags[sinfo->nr_frags++];
+ buff_ = &ring->buff_ring[buff_->next];
+ dma_sync_single_range_for_cpu(dev,
+ buff_->rxdata.daddr,
+ buff_->rxdata.pg_off,
+ buff_->len,
+ DMA_FROM_DEVICE);
+ skb_frag_off_set(frag, buff_->rxdata.pg_off);
+ skb_frag_size_set(frag, buff_->len);
+ sinfo->xdp_frags_size += buff_->len;
+ __skb_frag_set_page(frag, buff_->rxdata.page);
+
+ buff_->is_cleaned = 1;
+
+ buff->is_ip_cso &= buff_->is_ip_cso;
+ buff->is_udp_cso &= buff_->is_udp_cso;
+ buff->is_tcp_cso &= buff_->is_tcp_cso;
+ buff->is_cso_err |= buff_->is_cso_err;
+
+ if (page_is_pfmemalloc(buff_->rxdata.page))
+ xdp_buff_set_frag_pfmemalloc(xdp);
+
+ } while (!buff_->is_eop);
+
+ xdp_buff_set_frags_flag(xdp);
+
+ return false;
+}
+
+static int __aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi,
+ int *work_done, int budget)
{
struct net_device *ndev = aq_nic_get_ndev(self->aq_nic);
bool is_rsc_completed = true;
@@ -449,7 +647,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
skb_add_rx_frag(skb, 0, buff->rxdata.page,
buff->rxdata.pg_off + hdr_len,
buff->len - hdr_len,
- AQ_CFG_RX_FRAME_MAX);
+ self->frame_max);
page_ref_inc(buff->rxdata.page);
}
@@ -469,7 +667,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
buff_->rxdata.page,
buff_->rxdata.pg_off,
buff_->len,
- AQ_CFG_RX_FRAME_MAX);
+ self->frame_max);
page_ref_inc(buff_->rxdata.page);
buff_->is_cleaned = 1;
@@ -510,6 +708,149 @@ err_exit:
return err;
}
+static int __aq_ring_xdp_clean(struct aq_ring_s *rx_ring,
+ struct napi_struct *napi, int *work_done,
+ int budget)
+{
+ int frame_sz = rx_ring->page_offset + rx_ring->frame_max +
+ rx_ring->tail_size;
+ struct aq_nic_s *aq_nic = rx_ring->aq_nic;
+ bool is_rsc_completed = true;
+ struct device *dev;
+ int err = 0;
+
+ dev = aq_nic_get_dev(aq_nic);
+ for (; (rx_ring->sw_head != rx_ring->hw_head) && budget;
+ rx_ring->sw_head = aq_ring_next_dx(rx_ring, rx_ring->sw_head),
+ --budget, ++(*work_done)) {
+ struct aq_ring_buff_s *buff = &rx_ring->buff_ring[rx_ring->sw_head];
+ bool is_ptp_ring = aq_ptp_ring(rx_ring->aq_nic, rx_ring);
+ struct aq_ring_buff_s *buff_ = NULL;
+ struct sk_buff *skb = NULL;
+ unsigned int next_ = 0U;
+ struct xdp_buff xdp;
+ void *hard_start;
+
+ if (buff->is_cleaned)
+ continue;
+
+ if (!buff->is_eop) {
+ buff_ = buff;
+ do {
+ if (buff_->next >= rx_ring->size) {
+ err = -EIO;
+ goto err_exit;
+ }
+ next_ = buff_->next;
+ buff_ = &rx_ring->buff_ring[next_];
+ is_rsc_completed =
+ aq_ring_dx_in_range(rx_ring->sw_head,
+ next_,
+ rx_ring->hw_head);
+
+ if (unlikely(!is_rsc_completed))
+ break;
+
+ buff->is_error |= buff_->is_error;
+ buff->is_cso_err |= buff_->is_cso_err;
+ } while (!buff_->is_eop);
+
+ if (!is_rsc_completed) {
+ err = 0;
+ goto err_exit;
+ }
+ if (buff->is_error ||
+ (buff->is_lro && buff->is_cso_err)) {
+ buff_ = buff;
+ do {
+ if (buff_->next >= rx_ring->size) {
+ err = -EIO;
+ goto err_exit;
+ }
+ next_ = buff_->next;
+ buff_ = &rx_ring->buff_ring[next_];
+
+ buff_->is_cleaned = true;
+ } while (!buff_->is_eop);
+
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.errors;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ continue;
+ }
+ }
+
+ if (buff->is_error) {
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.errors;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ continue;
+ }
+
+ dma_sync_single_range_for_cpu(dev,
+ buff->rxdata.daddr,
+ buff->rxdata.pg_off,
+ buff->len, DMA_FROM_DEVICE);
+ hard_start = page_address(buff->rxdata.page) +
+ buff->rxdata.pg_off - rx_ring->page_offset;
+
+ if (is_ptp_ring)
+ buff->len -=
+ aq_ptp_extract_ts(rx_ring->aq_nic, skb,
+ aq_buf_vaddr(&buff->rxdata),
+ buff->len);
+
+ xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
+ xdp_prepare_buff(&xdp, hard_start, rx_ring->page_offset,
+ buff->len, false);
+ if (!buff->is_eop) {
+ if (aq_add_rx_fragment(dev, rx_ring, buff, &xdp)) {
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.packets;
+ rx_ring->stats.rx.bytes += xdp_get_buff_len(&xdp);
+ ++rx_ring->stats.rx.xdp_aborted;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ continue;
+ }
+ }
+
+ skb = aq_xdp_run_prog(aq_nic, &xdp, rx_ring, buff);
+ if (IS_ERR(skb) || !skb)
+ continue;
+
+ if (buff->is_vlan)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ buff->vlan_rx_tag);
+
+ aq_rx_checksum(rx_ring, buff, skb);
+
+ skb_set_hash(skb, buff->rss_hash,
+ buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
+ PKT_HASH_TYPE_NONE);
+ /* Send all PTP traffic to 0 queue */
+ skb_record_rx_queue(skb,
+ is_ptp_ring ? 0
+ : AQ_NIC_RING2QMAP(rx_ring->aq_nic,
+ rx_ring->idx));
+
+ napi_gro_receive(napi, skb);
+ }
+
+err_exit:
+ return err;
+}
+
+int aq_ring_rx_clean(struct aq_ring_s *self,
+ struct napi_struct *napi,
+ int *work_done,
+ int budget)
+{
+ if (static_branch_unlikely(&aq_xdp_locking_key))
+ return __aq_ring_xdp_clean(self, napi, work_done, budget);
+ else
+ return __aq_ring_rx_clean(self, napi, work_done, budget);
+}
+
void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic)
{
#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
@@ -529,7 +870,6 @@ void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic)
int aq_ring_rx_fill(struct aq_ring_s *self)
{
- unsigned int page_order = self->page_order;
struct aq_ring_buff_s *buff = NULL;
int err = 0;
int i = 0;
@@ -543,9 +883,9 @@ int aq_ring_rx_fill(struct aq_ring_s *self)
buff = &self->buff_ring[self->sw_tail];
buff->flags = 0U;
- buff->len = AQ_CFG_RX_FRAME_MAX;
+ buff->len = self->frame_max;
- err = aq_get_rxpages(self, buff, page_order);
+ err = aq_get_rxpages(self, buff);
if (err)
goto err_exit;
@@ -600,6 +940,15 @@ unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data)
data[++count] = self->stats.rx.alloc_fails;
data[++count] = self->stats.rx.skb_alloc_fails;
data[++count] = self->stats.rx.polls;
+ data[++count] = self->stats.rx.pg_flips;
+ data[++count] = self->stats.rx.pg_reuses;
+ data[++count] = self->stats.rx.pg_losts;
+ data[++count] = self->stats.rx.xdp_aborted;
+ data[++count] = self->stats.rx.xdp_drop;
+ data[++count] = self->stats.rx.xdp_pass;
+ data[++count] = self->stats.rx.xdp_tx;
+ data[++count] = self->stats.rx.xdp_invalid;
+ data[++count] = self->stats.rx.xdp_redirect;
} while (u64_stats_fetch_retry_irq(&self->stats.rx.syncp, start));
} else {
/* This data should mimic aq_ethtool_queue_tx_stat_names structure */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
index 93659e58f1ce..0a6c34438c1d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
@@ -11,6 +11,10 @@
#define AQ_RING_H
#include "aq_common.h"
+#include "aq_vec.h"
+
+#define AQ_XDP_HEADROOM ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8)
+#define AQ_XDP_TAILROOM SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
struct page;
struct aq_nic_cfg_s;
@@ -51,6 +55,7 @@ struct __packed aq_ring_buff_s {
struct {
dma_addr_t pa_eop;
struct sk_buff *skb;
+ struct xdp_frame *xdpf;
};
/* TxC */
struct {
@@ -101,6 +106,12 @@ struct aq_ring_stats_rx_s {
u64 pg_losts;
u64 pg_flips;
u64 pg_reuses;
+ u64 xdp_aborted;
+ u64 xdp_drop;
+ u64 xdp_pass;
+ u64 xdp_tx;
+ u64 xdp_invalid;
+ u64 xdp_redirect;
};
struct aq_ring_stats_tx_s {
@@ -132,10 +143,15 @@ struct aq_ring_s {
unsigned int size; /* descriptors number */
unsigned int dx_size; /* TX or RX descriptor size, */
/* stored here for fater math */
- unsigned int page_order;
+ u16 page_order;
+ u16 page_offset;
+ u16 frame_max;
+ u16 tail_size;
union aq_ring_stats_s stats;
dma_addr_t dx_ring_pa;
+ struct bpf_prog *xdp_prog;
enum atl_ring_type ring_type;
+ struct xdp_rxq_info xdp_rxq;
};
struct aq_ring_param_s {
@@ -175,6 +191,7 @@ struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
struct aq_nic_s *aq_nic,
unsigned int idx,
struct aq_nic_cfg_s *aq_nic_cfg);
+
int aq_ring_init(struct aq_ring_s *self, const enum atl_ring_type ring_type);
void aq_ring_rx_deinit(struct aq_ring_s *self);
void aq_ring_free(struct aq_ring_s *self);
@@ -182,6 +199,8 @@ void aq_ring_update_queue_state(struct aq_ring_s *ring);
void aq_ring_queue_wake(struct aq_ring_s *ring);
void aq_ring_queue_stop(struct aq_ring_s *ring);
bool aq_ring_tx_clean(struct aq_ring_s *self);
+int aq_xdp_xmit(struct net_device *dev, int num_frames,
+ struct xdp_frame **frames, u32 flags);
int aq_ring_rx_clean(struct aq_ring_s *self,
struct napi_struct *napi,
int *work_done,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
index f4774cf051c9..e839e1002ec7 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -10,11 +10,6 @@
*/
#include "aq_vec.h"
-#include "aq_nic.h"
-#include "aq_ring.h"
-#include "aq_hw.h"
-
-#include <linux/netdevice.h>
struct aq_vec_s {
const struct aq_hw_ops *aq_hw_ops;
@@ -43,8 +38,8 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
if (!self) {
err = -EINVAL;
} else {
- for (i = 0U, ring = self->ring[0];
- self->tx_rings > i; ++i, ring = self->ring[i]) {
+ for (i = 0U; self->tx_rings > i; ++i) {
+ ring = self->ring[i];
u64_stats_update_begin(&ring[AQ_VEC_RX_ID].stats.rx.syncp);
ring[AQ_VEC_RX_ID].stats.rx.polls++;
u64_stats_update_end(&ring[AQ_VEC_RX_ID].stats.rx.syncp);
@@ -153,9 +148,23 @@ int aq_vec_ring_alloc(struct aq_vec_s *self, struct aq_nic_s *aq_nic,
aq_nic_set_tx_ring(aq_nic, idx_ring, ring);
+ if (xdp_rxq_info_reg(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq,
+ aq_nic->ndev, idx,
+ self->napi.napi_id) < 0) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+ if (xdp_rxq_info_reg_mem_model(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq,
+ MEM_TYPE_PAGE_SHARED, NULL) < 0) {
+ xdp_rxq_info_unreg(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq);
+ err = -ENOMEM;
+ goto err_exit;
+ }
+
ring = aq_ring_rx_alloc(&self->ring[i][AQ_VEC_RX_ID], aq_nic,
idx_ring, aq_nic_cfg);
if (!ring) {
+ xdp_rxq_info_unreg(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq);
err = -ENOMEM;
goto err_exit;
}
@@ -182,8 +191,8 @@ int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops,
self->aq_hw_ops = aq_hw_ops;
self->aq_hw = aq_hw;
- for (i = 0U, ring = self->ring[0];
- self->tx_rings > i; ++i, ring = self->ring[i]) {
+ for (i = 0U; self->tx_rings > i; ++i) {
+ ring = self->ring[i];
err = aq_ring_init(&ring[AQ_VEC_TX_ID], ATL_RING_TX);
if (err < 0)
goto err_exit;
@@ -224,8 +233,8 @@ int aq_vec_start(struct aq_vec_s *self)
unsigned int i = 0U;
int err = 0;
- for (i = 0U, ring = self->ring[0];
- self->tx_rings > i; ++i, ring = self->ring[i]) {
+ for (i = 0U; self->tx_rings > i; ++i) {
+ ring = self->ring[i];
err = self->aq_hw_ops->hw_ring_tx_start(self->aq_hw,
&ring[AQ_VEC_TX_ID]);
if (err < 0)
@@ -248,8 +257,8 @@ void aq_vec_stop(struct aq_vec_s *self)
struct aq_ring_s *ring = NULL;
unsigned int i = 0U;
- for (i = 0U, ring = self->ring[0];
- self->tx_rings > i; ++i, ring = self->ring[i]) {
+ for (i = 0U; self->tx_rings > i; ++i) {
+ ring = self->ring[i];
self->aq_hw_ops->hw_ring_tx_stop(self->aq_hw,
&ring[AQ_VEC_TX_ID]);
@@ -268,8 +277,8 @@ void aq_vec_deinit(struct aq_vec_s *self)
if (!self)
goto err_exit;
- for (i = 0U, ring = self->ring[0];
- self->tx_rings > i; ++i, ring = self->ring[i]) {
+ for (i = 0U; self->tx_rings > i; ++i) {
+ ring = self->ring[i];
aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
aq_ring_rx_deinit(&ring[AQ_VEC_RX_ID]);
}
@@ -297,11 +306,13 @@ void aq_vec_ring_free(struct aq_vec_s *self)
if (!self)
goto err_exit;
- for (i = 0U, ring = self->ring[0];
- self->tx_rings > i; ++i, ring = self->ring[i]) {
+ for (i = 0U; self->tx_rings > i; ++i) {
+ ring = self->ring[i];
aq_ring_free(&ring[AQ_VEC_TX_ID]);
- if (i < self->rx_rings)
+ if (i < self->rx_rings) {
+ xdp_rxq_info_unreg(&ring[AQ_VEC_RX_ID].xdp_rxq);
aq_ring_free(&ring[AQ_VEC_RX_ID]);
+ }
}
self->tx_rings = 0;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.h b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h
index 567f3d4b79a2..78fac609b71d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h
@@ -13,7 +13,13 @@
#define AQ_VEC_H
#include "aq_common.h"
+#include "aq_nic.h"
+#include "aq_ring.h"
+#include "aq_hw.h"
+
#include <linux/irqreturn.h>
+#include <linux/filter.h>
+#include <linux/netdevice.h>
struct aq_hw_s;
struct aq_hw_ops;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index 4625ccb79499..9dfd68f0fda9 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -531,7 +531,7 @@ static int hw_atl_a0_hw_ring_rx_init(struct aq_hw_s *self,
hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
hw_atl_rdm_rx_desc_data_buff_size_set(self,
- AQ_CFG_RX_FRAME_MAX / 1024U,
+ aq_ring->frame_max / 1024U,
aq_ring->idx);
hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
@@ -706,9 +706,9 @@ static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self,
if (HW_ATL_A0_RXD_WB_STAT2_EOP & rxd_wb->status) {
buff->len = rxd_wb->pkt_len %
- AQ_CFG_RX_FRAME_MAX;
+ ring->frame_max;
buff->len = buff->len ?
- buff->len : AQ_CFG_RX_FRAME_MAX;
+ buff->len : ring->frame_max;
buff->next = 0U;
buff->is_eop = 1U;
} else {
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index d875ce3ec759..878a53abec33 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -766,7 +766,7 @@ int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
hw_atl_rdm_rx_desc_data_buff_size_set(self,
- AQ_CFG_RX_FRAME_MAX / 1024U,
+ aq_ring->frame_max / 1024U,
aq_ring->idx);
hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
@@ -969,15 +969,15 @@ int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, struct aq_ring_s *ring)
rxd_wb->status);
if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) {
buff->len = rxd_wb->pkt_len %
- AQ_CFG_RX_FRAME_MAX;
+ ring->frame_max;
buff->len = buff->len ?
- buff->len : AQ_CFG_RX_FRAME_MAX;
+ buff->len : ring->frame_max;
buff->next = 0U;
buff->is_eop = 1U;
} else {
buff->len =
- rxd_wb->pkt_len > AQ_CFG_RX_FRAME_MAX ?
- AQ_CFG_RX_FRAME_MAX : rxd_wb->pkt_len;
+ rxd_wb->pkt_len > ring->frame_max ?
+ ring->frame_max : rxd_wb->pkt_len;
if (buff->is_lro) {
/* LRO */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 881ac33fe914..4e9215bce4ad 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -2212,7 +2212,7 @@
* MAC DA 2. The reset default is set to mask out all parameters.
*/
#define NIG_REG_P0_LLH_PTP_PARAM_MASK 0x187a0
-/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set
+/* [RW 14] Mask register for the rules used in detecting PTP packets. Set
* each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
* 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
* 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
@@ -2381,7 +2381,7 @@
* MAC DA 2. The reset default is set to mask out all parameters.
*/
#define NIG_REG_P1_LLH_PTP_PARAM_MASK 0x187c8
-/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set
+/* [RW 14] Mask register for the rules used in detecting PTP packets. Set
* each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
* 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
* 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
@@ -2493,7 +2493,7 @@
* MAC DA 2. The reset default is set to mask out all parameters.
*/
#define NIG_REG_P0_TLLH_PTP_PARAM_MASK 0x187f0
-/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set
+/* [RW 14] Mask register for the rules used in detecting PTP packets. Set
* each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
* 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
* 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
@@ -2529,7 +2529,7 @@
* MAC DA 2. The reset default is set to mask out all parameters.
*/
#define NIG_REG_P1_TLLH_PTP_PARAM_MASK 0x187f8
-/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set
+/* [RW 14] Mask register for the rules used in detecting PTP packets. Set
* each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
* 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
* 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 2dd79af9411b..9a41145dadfc 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -76,7 +76,7 @@ static inline void bcmgenet_writel(u32 value, void __iomem *offset)
if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
__raw_writel(value, offset);
else
- writel(value, offset);
+ writel_relaxed(value, offset);
}
static inline u32 bcmgenet_readl(void __iomem *offset)
@@ -84,7 +84,7 @@ static inline u32 bcmgenet_readl(void __iomem *offset)
if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
return __raw_readl(offset);
else
- return readl(offset);
+ return readl_relaxed(offset);
}
static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index a1a38456c9a3..5d5f10180158 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -2534,7 +2534,12 @@ static int sbmac_probe(struct platform_device *pldev)
int err;
res = platform_get_resource(pldev, IORESOURCE_MEM, 0);
- BUG_ON(!res);
+ if (!res) {
+ printk(KERN_ERR "%s: failed to get resource\n",
+ dev_name(&pldev->dev));
+ err = -EINVAL;
+ goto out_out;
+ }
sbm_base = ioremap(res->start, resource_size(res));
if (!sbm_base) {
printk(KERN_ERR "%s: unable to map device registers\n",
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 800d5ced5800..6434e74c04f1 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -337,11 +337,9 @@ static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
struct macb *bp = bus->priv;
int status;
- status = pm_runtime_get_sync(&bp->pdev->dev);
- if (status < 0) {
- pm_runtime_put_noidle(&bp->pdev->dev);
+ status = pm_runtime_resume_and_get(&bp->pdev->dev);
+ if (status < 0)
goto mdio_pm_exit;
- }
status = macb_mdio_wait_for_idle(bp);
if (status < 0)
@@ -391,11 +389,9 @@ static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
struct macb *bp = bus->priv;
int status;
- status = pm_runtime_get_sync(&bp->pdev->dev);
- if (status < 0) {
- pm_runtime_put_noidle(&bp->pdev->dev);
+ status = pm_runtime_resume_and_get(&bp->pdev->dev);
+ if (status < 0)
goto mdio_pm_exit;
- }
status = macb_mdio_wait_for_idle(bp);
if (status < 0)
@@ -1658,6 +1654,7 @@ static void macb_tx_restart(struct macb_queue *queue)
unsigned int head = queue->tx_head;
unsigned int tail = queue->tx_tail;
struct macb *bp = queue->bp;
+ unsigned int head_idx, tbqp;
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
queue_writel(queue, ISR, MACB_BIT(TXUBR));
@@ -1665,6 +1662,13 @@ static void macb_tx_restart(struct macb_queue *queue)
if (head == tail)
return;
+ tbqp = queue_readl(queue, TBQP) / macb_dma_desc_get_size(bp);
+ tbqp = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, tbqp));
+ head_idx = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, head));
+
+ if (tbqp == head_idx)
+ return;
+
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
}
@@ -2745,9 +2749,9 @@ static int macb_open(struct net_device *dev)
netdev_dbg(bp->dev, "open\n");
- err = pm_runtime_get_sync(&bp->pdev->dev);
+ err = pm_runtime_resume_and_get(&bp->pdev->dev);
if (err < 0)
- goto pm_exit;
+ return err;
/* RX buffers initialization */
macb_init_rx_buffer_size(bp, bufsz);
@@ -4134,11 +4138,9 @@ static int at91ether_open(struct net_device *dev)
u32 ctl;
int ret;
- ret = pm_runtime_get_sync(&lp->pdev->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(&lp->pdev->dev);
+ ret = pm_runtime_resume_and_get(&lp->pdev->dev);
+ if (ret < 0)
return ret;
- }
/* Clear internal statistics */
ctl = macb_readl(lp, NCR);
@@ -4586,7 +4588,7 @@ static int zynqmp_init(struct platform_device *pdev)
if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) {
/* Ensure PS-GTR PHY device used in SGMII mode is ready */
- bp->sgmii_phy = devm_phy_get(&pdev->dev, "sgmii-phy");
+ bp->sgmii_phy = devm_phy_optional_get(&pdev->dev, NULL);
if (IS_ERR(bp->sgmii_phy)) {
ret = PTR_ERR(bp->sgmii_phy);
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 457cb7121000..1281d1565ef8 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -1224,7 +1224,7 @@ static int xgmac_rx(struct xgmac_priv *priv, int limit)
* @budget : maximum number of packets that the current CPU can receive from
* all interfaces.
* Description :
- * This function implements the the reception process.
+ * This function implements the reception process.
* Also it runs the TX completion thread
*/
static int xgmac_poll(struct napi_struct *napi, int budget)
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
index 59683f79959c..60b648b46f75 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
@@ -483,7 +483,7 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
tx_info->ip_family = AF_INET;
#if IS_ENABLED(CONFIG_IPV6)
} else {
- if (!sk->sk_ipv6only &&
+ if (!ipv6_only_sock(sk) &&
ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) {
memcpy(daaddr, &sk->sk_daddr, 4);
tx_info->ip_family = AF_INET;
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
index 9e2378013642..41714203ace8 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
@@ -567,7 +567,7 @@ void chtls_shutdown(struct sock *sk, int how);
void chtls_destroy_sock(struct sock *sk);
int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
int chtls_recvmsg(struct sock *sk, struct msghdr *msg,
- size_t len, int nonblock, int flags, int *addr_len);
+ size_t len, int flags, int *addr_len);
int chtls_sendpage(struct sock *sk, struct page *page,
int offset, size_t size, int flags);
int send_tx_flowc_wr(struct sock *sk, int compl,
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
index c320cc8ca68d..539992dad8ba 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
@@ -1426,7 +1426,7 @@ static void chtls_cleanup_rbuf(struct sock *sk, int copied)
}
static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
- int nonblock, int flags, int *addr_len)
+ int flags, int *addr_len)
{
struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
struct chtls_hws *hws = &csk->tlshws;
@@ -1441,7 +1441,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
buffers_freed = 0;
- timeo = sock_rcvtimeo(sk, nonblock);
+ timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
if (unlikely(csk_flag(sk, CSK_UPDATE_RCV_WND)))
@@ -1616,7 +1616,7 @@ skip_copy:
* Peek at data in a socket's receive buffer.
*/
static int peekmsg(struct sock *sk, struct msghdr *msg,
- size_t len, int nonblock, int flags)
+ size_t len, int flags)
{
struct tcp_sock *tp = tcp_sk(sk);
u32 peek_seq, offset;
@@ -1626,7 +1626,7 @@ static int peekmsg(struct sock *sk, struct msghdr *msg,
long timeo;
lock_sock(sk);
- timeo = sock_rcvtimeo(sk, nonblock);
+ timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
peek_seq = tp->copied_seq;
do {
@@ -1737,7 +1737,7 @@ found_ok_skb:
}
int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
- int nonblock, int flags, int *addr_len)
+ int flags, int *addr_len)
{
struct tcp_sock *tp = tcp_sk(sk);
struct chtls_sock *csk;
@@ -1750,25 +1750,23 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
buffers_freed = 0;
if (unlikely(flags & MSG_OOB))
- return tcp_prot.recvmsg(sk, msg, len, nonblock, flags,
- addr_len);
+ return tcp_prot.recvmsg(sk, msg, len, flags, addr_len);
if (unlikely(flags & MSG_PEEK))
- return peekmsg(sk, msg, len, nonblock, flags);
+ return peekmsg(sk, msg, len, flags);
if (sk_can_busy_loop(sk) &&
skb_queue_empty_lockless(&sk->sk_receive_queue) &&
sk->sk_state == TCP_ESTABLISHED)
- sk_busy_loop(sk, nonblock);
+ sk_busy_loop(sk, flags & MSG_DONTWAIT);
lock_sock(sk);
csk = rcu_dereference_sk_user_data(sk);
if (is_tls_rx(csk))
- return chtls_pt_recvmsg(sk, msg, len, nonblock,
- flags, addr_len);
+ return chtls_pt_recvmsg(sk, msg, len, flags, addr_len);
- timeo = sock_rcvtimeo(sk, nonblock);
+ timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
if (unlikely(csk_flag(sk, CSK_UPDATE_RCV_WND)))
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 86b1d23eba83..1db19463fd46 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -474,8 +474,6 @@ err_out_netdev:
No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need
a delay. Note that pre-2.0.34 kernels had a cache-alignment bug that
made udelay() unreliable.
- The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is
- deprecated.
*/
#define eeprom_delay(ee_addr) ioread32(ee_addr)
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
index 904f3304727e..49c93aa38862 100644
--- a/drivers/net/ethernet/engleder/tsnep_main.c
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -1091,8 +1091,7 @@ static int tsnep_mdio_init(struct tsnep_adapter *adapter)
retval = of_mdiobus_register(adapter->mdiobus, np);
out:
- if (np)
- of_node_put(np);
+ of_node_put(np);
return retval;
}
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index d5356db7539a..caf48023f8ea 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1835,11 +1835,6 @@ static int ftgmac100_probe(struct platform_device *pdev)
priv->rxdes0_edorr_mask = BIT(30);
priv->txdes0_edotr_mask = BIT(30);
priv->is_aspeed = true;
- /* Disable ast2600 problematic HW arbitration */
- if (of_device_is_compatible(np, "aspeed,ast2600-mac")) {
- iowrite32(FTGMAC100_TM_DEFAULT,
- priv->base + FTGMAC100_OFFSET_TM);
- }
} else {
priv->rxdes0_edorr_mask = BIT(15);
priv->txdes0_edotr_mask = BIT(15);
@@ -1911,6 +1906,11 @@ static int ftgmac100_probe(struct platform_device *pdev)
err = ftgmac100_setup_clk(priv);
if (err)
goto err_phy_connect;
+
+ /* Disable ast2600 problematic HW arbitration */
+ if (of_device_is_compatible(np, "aspeed,ast2600-mac"))
+ iowrite32(FTGMAC100_TM_DEFAULT,
+ priv->base + FTGMAC100_OFFSET_TM);
}
/* Default ring sizes */
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
index 763d2c7b5fb1..5750f9a56393 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -489,11 +489,15 @@ static int dpaa_get_ts_info(struct net_device *net_dev,
info->phc_index = -1;
fman_node = of_get_parent(mac_node);
- if (fman_node)
+ if (fman_node) {
ptp_node = of_parse_phandle(fman_node, "ptimer-handle", 0);
+ of_node_put(fman_node);
+ }
- if (ptp_node)
+ if (ptp_node) {
ptp_dev = of_find_device_by_node(ptp_node);
+ of_node_put(ptp_node);
+ }
if (ptp_dev)
ptp = platform_get_drvdata(ptp_dev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index b668df6193be..8c7fadf2b734 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -135,10 +135,19 @@ struct hclge_vf_to_pf_msg {
struct hclge_pf_to_vf_msg {
u16 code;
- u16 vf_mbx_msg_code;
- u16 vf_mbx_msg_subcode;
- u16 resp_status;
- u8 resp_data[HCLGE_MBX_MAX_RESP_DATA_SIZE];
+ union {
+ /* used for mbx response */
+ struct {
+ u16 vf_mbx_msg_code;
+ u16 vf_mbx_msg_subcode;
+ u16 resp_status;
+ u8 resp_data[HCLGE_MBX_MAX_RESP_DATA_SIZE];
+ };
+ /* used for general mbx */
+ struct {
+ u8 msg_data[HCLGE_MBX_MAX_MSG_SIZE];
+ };
+ };
};
struct hclge_mbx_vf_to_pf_cmd {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 79c64f4e67d2..8a3a446219f7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -96,6 +96,7 @@ enum HNAE3_DEV_CAP_BITS {
HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B,
HNAE3_DEV_SUPPORT_MC_MAC_MNG_B,
+ HNAE3_DEV_SUPPORT_CQ_B,
};
#define hnae3_dev_fd_supported(hdev) \
@@ -155,6 +156,9 @@ enum HNAE3_DEV_CAP_BITS {
#define hnae3_ae_dev_mc_mac_mng_supported(ae_dev) \
test_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, (ae_dev)->caps)
+#define hnae3_ae_dev_cq_supported(ae_dev) \
+ test_bit(HNAE3_DEV_SUPPORT_CQ_B, (ae_dev)->caps)
+
enum HNAE3_PF_CAP_BITS {
HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B = 0,
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
index c15ca710dabb..c8b151d29f53 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
@@ -149,6 +149,7 @@ static const struct hclge_comm_caps_bit_map hclge_pf_cmd_caps[] = {
{HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B,
HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B},
{HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B},
+ {HCLGE_COMM_CAP_CQ_B, HNAE3_DEV_SUPPORT_CQ_B},
};
static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = {
@@ -160,6 +161,7 @@ static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = {
{HCLGE_COMM_CAP_QB_B, HNAE3_DEV_SUPPORT_QB_B},
{HCLGE_COMM_CAP_TX_PUSH_B, HNAE3_DEV_SUPPORT_TX_PUSH_B},
{HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B},
+ {HCLGE_COMM_CAP_CQ_B, HNAE3_DEV_SUPPORT_CQ_B},
};
static void
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
index 876650eddac4..7a7d4cf9bf35 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
@@ -338,6 +338,7 @@ enum HCLGE_COMM_CAP_BITS {
HCLGE_COMM_CAP_PAUSE_B = 14,
HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B = 15,
HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B = 17,
+ HCLGE_COMM_CAP_CQ_B = 18,
};
enum HCLGE_COMM_API_CAP_BITS {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 14dc12c2155d..7e9f9da2f392 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -5159,10 +5159,7 @@ static void hns3_set_cq_period_mode(struct hns3_nic_priv *priv,
priv->tqp_vector[i].rx_group.dim.mode = mode;
}
- /* only device version above V3(include V3), GL can switch CQ/EQ
- * period mode.
- */
- if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) {
+ if (hnae3_ae_dev_cq_supported(ae_dev)) {
u32 new_mode;
u64 reg;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index f4da77452126..1db8a86f046d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -664,6 +664,8 @@ static void hns3_get_ringparam(struct net_device *netdev,
param->tx_pending = priv->ring[0].desc_num;
param->rx_pending = priv->ring[rx_queue_index].desc_num;
kernel_param->rx_buf_len = priv->ring[rx_queue_index].buf_size;
+ kernel_param->tx_push = test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE,
+ &priv->state);
}
static void hns3_get_pauseparam(struct net_device *netdev,
@@ -1104,6 +1106,36 @@ static int hns3_check_ringparam(struct net_device *ndev,
return 0;
}
+static bool
+hns3_is_ringparam_changed(struct net_device *ndev,
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct hns3_ring_param *old_ringparam,
+ struct hns3_ring_param *new_ringparam)
+{
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = priv->ae_handle;
+ u16 queue_num = h->kinfo.num_tqps;
+
+ new_ringparam->tx_desc_num = ALIGN(param->tx_pending,
+ HNS3_RING_BD_MULTIPLE);
+ new_ringparam->rx_desc_num = ALIGN(param->rx_pending,
+ HNS3_RING_BD_MULTIPLE);
+ old_ringparam->tx_desc_num = priv->ring[0].desc_num;
+ old_ringparam->rx_desc_num = priv->ring[queue_num].desc_num;
+ old_ringparam->rx_buf_len = priv->ring[queue_num].buf_size;
+ new_ringparam->rx_buf_len = kernel_param->rx_buf_len;
+
+ if (old_ringparam->tx_desc_num == new_ringparam->tx_desc_num &&
+ old_ringparam->rx_desc_num == new_ringparam->rx_desc_num &&
+ old_ringparam->rx_buf_len == new_ringparam->rx_buf_len) {
+ netdev_info(ndev, "ringparam not changed\n");
+ return false;
+ }
+
+ return true;
+}
+
static int hns3_change_rx_buf_len(struct net_device *ndev, u32 rx_buf_len)
{
struct hns3_nic_priv *priv = netdev_priv(ndev);
@@ -1120,62 +1152,80 @@ static int hns3_change_rx_buf_len(struct net_device *ndev, u32 rx_buf_len)
return 0;
}
+static int hns3_set_tx_push(struct net_device *netdev, u32 tx_push)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ u32 old_state = test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state);
+
+ if (!test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps) && tx_push)
+ return -EOPNOTSUPP;
+
+ if (tx_push == old_state)
+ return 0;
+
+ netdev_dbg(netdev, "Changing tx push from %s to %s\n",
+ old_state ? "on" : "off", tx_push ? "on" : "off");
+
+ if (tx_push)
+ set_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state);
+ else
+ clear_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state);
+
+ return 0;
+}
+
static int hns3_set_ringparam(struct net_device *ndev,
struct ethtool_ringparam *param,
struct kernel_ethtool_ringparam *kernel_param,
struct netlink_ext_ack *extack)
{
+ struct hns3_ring_param old_ringparam, new_ringparam;
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = priv->ae_handle;
struct hns3_enet_ring *tmp_rings;
bool if_running = netif_running(ndev);
- u32 old_tx_desc_num, new_tx_desc_num;
- u32 old_rx_desc_num, new_rx_desc_num;
- u16 queue_num = h->kinfo.num_tqps;
- u32 old_rx_buf_len;
int ret, i;
ret = hns3_check_ringparam(ndev, param, kernel_param);
if (ret)
return ret;
- /* Hardware requires that its descriptors must be multiple of eight */
- new_tx_desc_num = ALIGN(param->tx_pending, HNS3_RING_BD_MULTIPLE);
- new_rx_desc_num = ALIGN(param->rx_pending, HNS3_RING_BD_MULTIPLE);
- old_tx_desc_num = priv->ring[0].desc_num;
- old_rx_desc_num = priv->ring[queue_num].desc_num;
- old_rx_buf_len = priv->ring[queue_num].buf_size;
- if (old_tx_desc_num == new_tx_desc_num &&
- old_rx_desc_num == new_rx_desc_num &&
- kernel_param->rx_buf_len == old_rx_buf_len)
+ ret = hns3_set_tx_push(ndev, kernel_param->tx_push);
+ if (ret)
+ return ret;
+
+ if (!hns3_is_ringparam_changed(ndev, param, kernel_param,
+ &old_ringparam, &new_ringparam))
return 0;
tmp_rings = hns3_backup_ringparam(priv);
if (!tmp_rings) {
- netdev_err(ndev,
- "backup ring param failed by allocating memory fail\n");
+ netdev_err(ndev, "backup ring param failed by allocating memory fail\n");
return -ENOMEM;
}
netdev_info(ndev,
- "Changing Tx/Rx ring depth from %u/%u to %u/%u, Changing rx buffer len from %d to %d\n",
- old_tx_desc_num, old_rx_desc_num,
- new_tx_desc_num, new_rx_desc_num,
- old_rx_buf_len, kernel_param->rx_buf_len);
+ "Changing Tx/Rx ring depth from %u/%u to %u/%u, Changing rx buffer len from %u to %u\n",
+ old_ringparam.tx_desc_num, old_ringparam.rx_desc_num,
+ new_ringparam.tx_desc_num, new_ringparam.rx_desc_num,
+ old_ringparam.rx_buf_len, new_ringparam.rx_buf_len);
if (if_running)
ndev->netdev_ops->ndo_stop(ndev);
- hns3_change_all_ring_bd_num(priv, new_tx_desc_num, new_rx_desc_num);
- hns3_change_rx_buf_len(ndev, kernel_param->rx_buf_len);
+ hns3_change_all_ring_bd_num(priv, new_ringparam.tx_desc_num,
+ new_ringparam.rx_desc_num);
+ hns3_change_rx_buf_len(ndev, new_ringparam.rx_buf_len);
ret = hns3_init_all_ring(priv);
if (ret) {
netdev_err(ndev, "set ringparam fail, revert to old value(%d)\n",
ret);
- hns3_change_rx_buf_len(ndev, old_rx_buf_len);
- hns3_change_all_ring_bd_num(priv, old_tx_desc_num,
- old_rx_desc_num);
+ hns3_change_rx_buf_len(ndev, old_ringparam.rx_buf_len);
+ hns3_change_all_ring_bd_num(priv, old_ringparam.tx_desc_num,
+ old_ringparam.rx_desc_num);
for (i = 0; i < h->kinfo.num_tqps * 2; i++)
memcpy(&priv->ring[i], &tmp_rings[i],
sizeof(struct hns3_enet_ring));
@@ -1385,11 +1435,33 @@ static int hns3_check_ql_coalesce_param(struct net_device *netdev,
return 0;
}
-static int hns3_check_coalesce_para(struct net_device *netdev,
- struct ethtool_coalesce *cmd)
+static int
+hns3_check_cqe_coalesce_param(struct net_device *netdev,
+ struct kernel_ethtool_coalesce *kernel_coal)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+
+ if ((kernel_coal->use_cqe_mode_tx || kernel_coal->use_cqe_mode_rx) &&
+ !hnae3_ae_dev_cq_supported(ae_dev)) {
+ netdev_err(netdev, "coalesced cqe mode is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
+hns3_check_coalesce_para(struct net_device *netdev,
+ struct ethtool_coalesce *cmd,
+ struct kernel_ethtool_coalesce *kernel_coal)
{
int ret;
+ ret = hns3_check_cqe_coalesce_param(netdev, kernel_coal);
+ if (ret)
+ return ret;
+
ret = hns3_check_gl_coalesce_para(netdev, cmd);
if (ret) {
netdev_err(netdev,
@@ -1464,7 +1536,7 @@ static int hns3_set_coalesce(struct net_device *netdev,
if (hns3_nic_resetting(netdev))
return -EBUSY;
- ret = hns3_check_coalesce_para(netdev, cmd);
+ ret = hns3_check_coalesce_para(netdev, cmd, kernel_coal);
if (ret)
return ret;
@@ -1825,23 +1897,27 @@ static int hns3_set_tunable(struct net_device *netdev,
case ETHTOOL_TX_COPYBREAK_BUF_SIZE:
old_tx_spare_buf_size = h->kinfo.tx_spare_buf_size;
new_tx_spare_buf_size = *(u32 *)data;
+ netdev_info(netdev, "request to set tx spare buf size from %u to %u\n",
+ old_tx_spare_buf_size, new_tx_spare_buf_size);
ret = hns3_set_tx_spare_buf_size(netdev, new_tx_spare_buf_size);
if (ret ||
(!priv->ring->tx_spare && new_tx_spare_buf_size != 0)) {
int ret1;
- netdev_warn(netdev,
- "change tx spare buf size fail, revert to old value\n");
+ netdev_warn(netdev, "change tx spare buf size fail, revert to old value\n");
ret1 = hns3_set_tx_spare_buf_size(netdev,
old_tx_spare_buf_size);
if (ret1) {
- netdev_err(netdev,
- "revert to old tx spare buf size fail\n");
+ netdev_err(netdev, "revert to old tx spare buf size fail\n");
return ret1;
}
return ret;
}
+
+ netdev_info(netdev, "the active tx spare buf size is %u, due to page order\n",
+ priv->ring->tx_spare->len);
+
break;
default:
ret = -EOPNOTSUPP;
@@ -1858,7 +1934,8 @@ static int hns3_set_tunable(struct net_device *netdev,
ETHTOOL_COALESCE_MAX_FRAMES | \
ETHTOOL_COALESCE_USE_CQE)
-#define HNS3_ETHTOOL_RING ETHTOOL_RING_USE_RX_BUF_LEN
+#define HNS3_ETHTOOL_RING (ETHTOOL_RING_USE_RX_BUF_LEN | \
+ ETHTOOL_RING_USE_TX_PUSH)
static int hns3_get_ts_info(struct net_device *netdev,
struct ethtool_ts_info *info)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h
index 822d6fcbc73b..da207d1d9aa9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h
@@ -28,4 +28,10 @@ struct hns3_ethtool_link_ext_state_mapping {
u8 link_ext_substate;
};
+struct hns3_ring_param {
+ u32 tx_desc_num;
+ u32 rx_desc_num;
+ u32 rx_buf_len;
+};
+
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index 42a9e73d8588..6efd768cc07c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -1977,7 +1977,7 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
* @num: number of extended command structures
*
* This function handles all the PF RAS errors in the
- * hw register/s using command.
+ * hw registers using command.
*/
static int hclge_handle_pf_ras_error(struct hclge_dev *hdev,
struct hclge_desc *desc,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 8cebb180c812..a5dd2c8c244a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -10449,6 +10449,9 @@ int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
/* PF's mps must be greater then VF's mps */
for (i = 1; i < hdev->num_alloc_vport; i++)
if (max_frm_size < hdev->vport[i].mps) {
+ dev_err(&hdev->pdev->dev,
+ "failed to set pf mtu for less than vport %d, mps = %u.\n",
+ i, hdev->vport[i].mps);
mutex_unlock(&hdev->vport_lock);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 6799d16de34b..76d0f17d6be3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -102,7 +102,7 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
resp_pf_to_vf->msg_len = msg_len;
resp_pf_to_vf->msg.code = mbx_opcode;
- memcpy(&resp_pf_to_vf->msg.vf_mbx_msg_code, msg, msg_len);
+ memcpy(resp_pf_to_vf->msg.msg_data, msg, msg_len);
trace_hclge_pf_mbx_send(hdev, resp_pf_to_vf);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 342d7cdf6285..e13d71abd9f7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -2963,7 +2963,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_config;
}
- /* ensure vf tbl list as empty before init*/
+ /* ensure vf tbl list as empty before init */
ret = hclgevf_clear_vport_list(hdev);
if (ret) {
dev_err(&pdev->dev,
@@ -3315,7 +3315,7 @@ static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
for (i = 0; i < reg_um; i++)
*reg++ = hclgevf_read_dev(&hdev->hw,
ring_reg_addr_list[i] +
- 0x200 * j);
+ HCLGEVF_TQP_REG_SIZE * j);
for (i = 0; i < separator_num; i++)
*reg++ = SEPARATOR_VALUE;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index d5e0a3f762f7..c8055d69255c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -17,7 +17,7 @@ static int hclgevf_resp_to_errno(u16 resp_code)
static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev)
{
/* this function should be called with mbx_resp.mbx_mutex held
- * to prtect the received_response from race condition
+ * to protect the received_response from race condition
*/
hdev->mbx_resp.received_resp = false;
hdev->mbx_resp.origin_mbx_msg = 0;
@@ -32,8 +32,10 @@ static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev)
/* hclgevf_get_mbx_resp: used to get a response from PF after VF sends a mailbox
* message to PF.
* @hdev: pointer to struct hclgevf_dev
- * @resp_msg: pointer to store the original message type and response status
- * @len: the resp_msg data array length.
+ * @code0: the message opcode VF send to PF.
+ * @code1: the message sub-opcode VF send to PF.
+ * @resp_data: pointer to store response data from PF to VF.
+ * @resp_len: the length of resp_data from PF to VF.
*/
static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
u8 *resp_data, u16 resp_len)
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 77683909ca3d..4840ad4ccd10 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -257,12 +257,14 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
struct ibmvnic_long_term_buff *ltb, int size)
{
struct device *dev = &adapter->vdev->dev;
+ u64 prev = 0;
int rc;
if (!reuse_ltb(ltb, size)) {
dev_dbg(dev,
"LTB size changed from 0x%llx to 0x%x, reallocating\n",
ltb->size, size);
+ prev = ltb->size;
free_long_term_buff(adapter, ltb);
}
@@ -283,8 +285,8 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
bitmap_set(adapter->map_ids, ltb->map_id, 1);
dev_dbg(dev,
- "Allocated new LTB [map %d, size 0x%llx]\n",
- ltb->map_id, ltb->size);
+ "Allocated new LTB [map %d, size 0x%llx was 0x%llx]\n",
+ ltb->map_id, ltb->size, prev);
}
/* Ensure ltb is zeroed - specially when reusing it. */
@@ -345,6 +347,208 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter,
ltb->map_id = 0;
}
+/**
+ * free_ltb_set - free the given set of long term buffers (LTBS)
+ * @adapter: The ibmvnic adapter containing this ltb set
+ * @ltb_set: The ltb_set to be freed
+ *
+ * Free the set of LTBs in the given set.
+ */
+
+static void free_ltb_set(struct ibmvnic_adapter *adapter,
+ struct ibmvnic_ltb_set *ltb_set)
+{
+ int i;
+
+ for (i = 0; i < ltb_set->num_ltbs; i++)
+ free_long_term_buff(adapter, &ltb_set->ltbs[i]);
+
+ kfree(ltb_set->ltbs);
+ ltb_set->ltbs = NULL;
+ ltb_set->num_ltbs = 0;
+}
+
+/**
+ * alloc_ltb_set() - Allocate a set of long term buffers (LTBs)
+ *
+ * @adapter: ibmvnic adapter associated to the LTB
+ * @ltb_set: container object for the set of LTBs
+ * @num_buffs: Number of buffers in the LTB
+ * @buff_size: Size of each buffer in the LTB
+ *
+ * Allocate a set of LTBs to accommodate @num_buffs buffers of @buff_size
+ * each. We currently cap size each LTB to IBMVNIC_ONE_LTB_SIZE. If the
+ * new set of LTBs have fewer LTBs than the old set, free the excess LTBs.
+ * If new set needs more than in old set, allocate the remaining ones.
+ * Try and reuse as many LTBs as possible and avoid reallocation.
+ *
+ * Any changes to this allocation strategy must be reflected in
+ * map_rxpool_buff_to_ltb() and map_txpool_buff_to_ltb().
+ */
+static int alloc_ltb_set(struct ibmvnic_adapter *adapter,
+ struct ibmvnic_ltb_set *ltb_set, int num_buffs,
+ int buff_size)
+{
+ struct device *dev = &adapter->vdev->dev;
+ struct ibmvnic_ltb_set old_set;
+ struct ibmvnic_ltb_set new_set;
+ int rem_size;
+ int tot_size; /* size of all ltbs */
+ int ltb_size; /* size of one ltb */
+ int nltbs;
+ int rc;
+ int n;
+ int i;
+
+ dev_dbg(dev, "%s() num_buffs %d, buff_size %d\n", __func__, num_buffs,
+ buff_size);
+
+ ltb_size = rounddown(IBMVNIC_ONE_LTB_SIZE, buff_size);
+ tot_size = num_buffs * buff_size;
+
+ if (ltb_size > tot_size)
+ ltb_size = tot_size;
+
+ nltbs = tot_size / ltb_size;
+ if (tot_size % ltb_size)
+ nltbs++;
+
+ old_set = *ltb_set;
+
+ if (old_set.num_ltbs == nltbs) {
+ new_set = old_set;
+ } else {
+ int tmp = nltbs * sizeof(struct ibmvnic_long_term_buff);
+
+ new_set.ltbs = kzalloc(tmp, GFP_KERNEL);
+ if (!new_set.ltbs)
+ return -ENOMEM;
+
+ new_set.num_ltbs = nltbs;
+
+ /* Free any excess ltbs in old set */
+ for (i = new_set.num_ltbs; i < old_set.num_ltbs; i++)
+ free_long_term_buff(adapter, &old_set.ltbs[i]);
+
+ /* Copy remaining ltbs to new set. All LTBs except the
+ * last one are of the same size. alloc_long_term_buff()
+ * will realloc if the size changes.
+ */
+ n = min(old_set.num_ltbs, new_set.num_ltbs);
+ for (i = 0; i < n; i++)
+ new_set.ltbs[i] = old_set.ltbs[i];
+
+ /* Any additional ltbs in new set will have NULL ltbs for
+ * now and will be allocated in alloc_long_term_buff().
+ */
+
+ /* We no longer need the old_set so free it. Note that we
+ * may have reused some ltbs from old set and freed excess
+ * ltbs above. So we only need to free the container now
+ * not the LTBs themselves. (i.e. dont free_ltb_set()!)
+ */
+ kfree(old_set.ltbs);
+ old_set.ltbs = NULL;
+ old_set.num_ltbs = 0;
+
+ /* Install the new set. If allocations fail below, we will
+ * retry later and know what size LTBs we need.
+ */
+ *ltb_set = new_set;
+ }
+
+ i = 0;
+ rem_size = tot_size;
+ while (rem_size) {
+ if (ltb_size > rem_size)
+ ltb_size = rem_size;
+
+ rem_size -= ltb_size;
+
+ rc = alloc_long_term_buff(adapter, &new_set.ltbs[i], ltb_size);
+ if (rc)
+ goto out;
+ i++;
+ }
+
+ WARN_ON(i != new_set.num_ltbs);
+
+ return 0;
+out:
+ /* We may have allocated one/more LTBs before failing and we
+ * want to try and reuse on next reset. So don't free ltb set.
+ */
+ return rc;
+}
+
+/**
+ * map_rxpool_buf_to_ltb - Map given rxpool buffer to offset in an LTB.
+ * @rxpool: The receive buffer pool containing buffer
+ * @bufidx: Index of buffer in rxpool
+ * @ltbp: (Output) pointer to the long term buffer containing the buffer
+ * @offset: (Output) offset of buffer in the LTB from @ltbp
+ *
+ * Map the given buffer identified by [rxpool, bufidx] to an LTB in the
+ * pool and its corresponding offset. Assume for now that each LTB is of
+ * different size but could possibly be optimized based on the allocation
+ * strategy in alloc_ltb_set().
+ */
+static void map_rxpool_buf_to_ltb(struct ibmvnic_rx_pool *rxpool,
+ unsigned int bufidx,
+ struct ibmvnic_long_term_buff **ltbp,
+ unsigned int *offset)
+{
+ struct ibmvnic_long_term_buff *ltb;
+ int nbufs; /* # of buffers in one ltb */
+ int i;
+
+ WARN_ON(bufidx >= rxpool->size);
+
+ for (i = 0; i < rxpool->ltb_set.num_ltbs; i++) {
+ ltb = &rxpool->ltb_set.ltbs[i];
+ nbufs = ltb->size / rxpool->buff_size;
+ if (bufidx < nbufs)
+ break;
+ bufidx -= nbufs;
+ }
+
+ *ltbp = ltb;
+ *offset = bufidx * rxpool->buff_size;
+}
+
+/**
+ * map_txpool_buf_to_ltb - Map given txpool buffer to offset in an LTB.
+ * @txpool: The transmit buffer pool containing buffer
+ * @bufidx: Index of buffer in txpool
+ * @ltbp: (Output) pointer to the long term buffer (LTB) containing the buffer
+ * @offset: (Output) offset of buffer in the LTB from @ltbp
+ *
+ * Map the given buffer identified by [txpool, bufidx] to an LTB in the
+ * pool and its corresponding offset.
+ */
+static void map_txpool_buf_to_ltb(struct ibmvnic_tx_pool *txpool,
+ unsigned int bufidx,
+ struct ibmvnic_long_term_buff **ltbp,
+ unsigned int *offset)
+{
+ struct ibmvnic_long_term_buff *ltb;
+ int nbufs; /* # of buffers in one ltb */
+ int i;
+
+ WARN_ON_ONCE(bufidx >= txpool->num_buffers);
+
+ for (i = 0; i < txpool->ltb_set.num_ltbs; i++) {
+ ltb = &txpool->ltb_set.ltbs[i];
+ nbufs = ltb->size / txpool->buf_size;
+ if (bufidx < nbufs)
+ break;
+ bufidx -= nbufs;
+ }
+
+ *ltbp = ltb;
+ *offset = bufidx * txpool->buf_size;
+}
+
static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
{
int i;
@@ -361,6 +565,7 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
struct device *dev = &adapter->vdev->dev;
struct ibmvnic_ind_xmit_queue *ind_bufp;
struct ibmvnic_sub_crq_queue *rx_scrq;
+ struct ibmvnic_long_term_buff *ltb;
union sub_crq *sub_crq;
int buffers_added = 0;
unsigned long lpar_rc;
@@ -369,7 +574,7 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
dma_addr_t dma_addr;
unsigned char *dst;
int shift = 0;
- int index;
+ int bufidx;
int i;
if (!pool->active)
@@ -385,14 +590,14 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
* be 0.
*/
for (i = ind_bufp->index; i < count; ++i) {
- index = pool->free_map[pool->next_free];
+ bufidx = pool->free_map[pool->next_free];
/* We maybe reusing the skb from earlier resets. Allocate
* only if necessary. But since the LTB may have changed
* during reset (see init_rx_pools()), update LTB below
* even if reusing skb.
*/
- skb = pool->rx_buff[index].skb;
+ skb = pool->rx_buff[bufidx].skb;
if (!skb) {
skb = netdev_alloc_skb(adapter->netdev,
pool->buff_size);
@@ -407,26 +612,26 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
pool->next_free = (pool->next_free + 1) % pool->size;
/* Copy the skb to the long term mapped DMA buffer */
- offset = index * pool->buff_size;
- dst = pool->long_term_buff.buff + offset;
+ map_rxpool_buf_to_ltb(pool, bufidx, &ltb, &offset);
+ dst = ltb->buff + offset;
memset(dst, 0, pool->buff_size);
- dma_addr = pool->long_term_buff.addr + offset;
+ dma_addr = ltb->addr + offset;
/* add the skb to an rx_buff in the pool */
- pool->rx_buff[index].data = dst;
- pool->rx_buff[index].dma = dma_addr;
- pool->rx_buff[index].skb = skb;
- pool->rx_buff[index].pool_index = pool->index;
- pool->rx_buff[index].size = pool->buff_size;
+ pool->rx_buff[bufidx].data = dst;
+ pool->rx_buff[bufidx].dma = dma_addr;
+ pool->rx_buff[bufidx].skb = skb;
+ pool->rx_buff[bufidx].pool_index = pool->index;
+ pool->rx_buff[bufidx].size = pool->buff_size;
/* queue the rx_buff for the next send_subcrq_indirect */
sub_crq = &ind_bufp->indir_arr[ind_bufp->index++];
memset(sub_crq, 0, sizeof(*sub_crq));
sub_crq->rx_add.first = IBMVNIC_CRQ_CMD;
sub_crq->rx_add.correlator =
- cpu_to_be64((u64)&pool->rx_buff[index]);
+ cpu_to_be64((u64)&pool->rx_buff[bufidx]);
sub_crq->rx_add.ioba = cpu_to_be32(dma_addr);
- sub_crq->rx_add.map_id = pool->long_term_buff.map_id;
+ sub_crq->rx_add.map_id = ltb->map_id;
/* The length field of the sCRQ is defined to be 24 bits so the
* buffer size needs to be left shifted by a byte before it is
@@ -466,10 +671,10 @@ failure:
sub_crq = &ind_bufp->indir_arr[i];
rx_buff = (struct ibmvnic_rx_buff *)
be64_to_cpu(sub_crq->rx_add.correlator);
- index = (int)(rx_buff - pool->rx_buff);
- pool->free_map[pool->next_free] = index;
- dev_kfree_skb_any(pool->rx_buff[index].skb);
- pool->rx_buff[index].skb = NULL;
+ bufidx = (int)(rx_buff - pool->rx_buff);
+ pool->free_map[pool->next_free] = bufidx;
+ dev_kfree_skb_any(pool->rx_buff[bufidx].skb);
+ pool->rx_buff[bufidx].skb = NULL;
}
adapter->replenish_add_buff_failure += ind_bufp->index;
atomic_add(buffers_added, &pool->available);
@@ -579,7 +784,7 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter)
kfree(rx_pool->free_map);
- free_long_term_buff(adapter, &rx_pool->long_term_buff);
+ free_ltb_set(adapter, &rx_pool->ltb_set);
if (!rx_pool->rx_buff)
continue;
@@ -724,8 +929,8 @@ update_ltb:
dev_dbg(dev, "Updating LTB for rx pool %d [%d, %d]\n",
i, rx_pool->size, rx_pool->buff_size);
- rc = alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
- rx_pool->size * rx_pool->buff_size);
+ rc = alloc_ltb_set(adapter, &rx_pool->ltb_set,
+ rx_pool->size, rx_pool->buff_size);
if (rc)
goto out;
@@ -782,7 +987,7 @@ static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
{
kfree(tx_pool->tx_buff);
kfree(tx_pool->free_map);
- free_long_term_buff(adapter, &tx_pool->long_term_buff);
+ free_ltb_set(adapter, &tx_pool->ltb_set);
}
/**
@@ -972,17 +1177,16 @@ update_ltb:
for (i = 0; i < num_pools; i++) {
struct ibmvnic_tx_pool *tso_pool;
struct ibmvnic_tx_pool *tx_pool;
- u32 ltb_size;
tx_pool = &adapter->tx_pool[i];
- ltb_size = tx_pool->num_buffers * tx_pool->buf_size;
- if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
- ltb_size))
- goto out;
- dev_dbg(dev, "Updated LTB for tx pool %d [%p, %d, %d]\n",
- i, tx_pool->long_term_buff.buff,
- tx_pool->num_buffers, tx_pool->buf_size);
+ dev_dbg(dev, "Updating LTB for tx pool %d [%d, %d]\n",
+ i, tx_pool->num_buffers, tx_pool->buf_size);
+
+ rc = alloc_ltb_set(adapter, &tx_pool->ltb_set,
+ tx_pool->num_buffers, tx_pool->buf_size);
+ if (rc)
+ goto out;
tx_pool->consumer_index = 0;
tx_pool->producer_index = 0;
@@ -991,14 +1195,14 @@ update_ltb:
tx_pool->free_map[j] = j;
tso_pool = &adapter->tso_pool[i];
- ltb_size = tso_pool->num_buffers * tso_pool->buf_size;
- if (alloc_long_term_buff(adapter, &tso_pool->long_term_buff,
- ltb_size))
- goto out;
- dev_dbg(dev, "Updated LTB for tso pool %d [%p, %d, %d]\n",
- i, tso_pool->long_term_buff.buff,
- tso_pool->num_buffers, tso_pool->buf_size);
+ dev_dbg(dev, "Updating LTB for tso pool %d [%d, %d]\n",
+ i, tso_pool->num_buffers, tso_pool->buf_size);
+
+ rc = alloc_ltb_set(adapter, &tso_pool->ltb_set,
+ tso_pool->num_buffers, tso_pool->buf_size);
+ if (rc)
+ goto out;
tso_pool->consumer_index = 0;
tso_pool->producer_index = 0;
@@ -1911,6 +2115,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
struct ibmvnic_ind_xmit_queue *ind_bufp;
struct ibmvnic_tx_buff *tx_buff = NULL;
struct ibmvnic_sub_crq_queue *tx_scrq;
+ struct ibmvnic_long_term_buff *ltb;
struct ibmvnic_tx_pool *tx_pool;
unsigned int tx_send_failed = 0;
netdev_tx_t ret = NETDEV_TX_OK;
@@ -1926,7 +2131,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
unsigned int offset;
int num_entries = 1;
unsigned char *dst;
- int index = 0;
+ int bufidx = 0;
u8 proto = 0;
/* If a reset is in progress, drop the packet since
@@ -1960,9 +2165,9 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
else
tx_pool = &adapter->tx_pool[queue_num];
- index = tx_pool->free_map[tx_pool->consumer_index];
+ bufidx = tx_pool->free_map[tx_pool->consumer_index];
- if (index == IBMVNIC_INVALID_MAP) {
+ if (bufidx == IBMVNIC_INVALID_MAP) {
dev_kfree_skb_any(skb);
tx_send_failed++;
tx_dropped++;
@@ -1973,10 +2178,11 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP;
- offset = index * tx_pool->buf_size;
- dst = tx_pool->long_term_buff.buff + offset;
+ map_txpool_buf_to_ltb(tx_pool, bufidx, &ltb, &offset);
+
+ dst = ltb->buff + offset;
memset(dst, 0, tx_pool->buf_size);
- data_dma_addr = tx_pool->long_term_buff.addr + offset;
+ data_dma_addr = ltb->addr + offset;
if (skb_shinfo(skb)->nr_frags) {
int cur, i;
@@ -2003,9 +2209,9 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_pool->consumer_index =
(tx_pool->consumer_index + 1) % tx_pool->num_buffers;
- tx_buff = &tx_pool->tx_buff[index];
+ tx_buff = &tx_pool->tx_buff[bufidx];
tx_buff->skb = skb;
- tx_buff->index = index;
+ tx_buff->index = bufidx;
tx_buff->pool_index = queue_num;
memset(&tx_crq, 0, sizeof(tx_crq));
@@ -2017,10 +2223,10 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
if (skb_is_gso(skb))
tx_crq.v1.correlator =
- cpu_to_be32(index | IBMVNIC_TSO_POOL_MASK);
+ cpu_to_be32(bufidx | IBMVNIC_TSO_POOL_MASK);
else
- tx_crq.v1.correlator = cpu_to_be32(index);
- tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
+ tx_crq.v1.correlator = cpu_to_be32(bufidx);
+ tx_crq.v1.dma_reg = cpu_to_be16(ltb->map_id);
tx_crq.v1.sge_len = cpu_to_be32(skb->len);
tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
@@ -4031,16 +4237,16 @@ static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
adapter->desired.rx_entries =
adapter->max_rx_add_entries_per_subcrq;
- max_entries = IBMVNIC_MAX_LTB_SIZE /
+ max_entries = IBMVNIC_LTB_SET_SIZE /
(adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
- adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) {
+ adapter->desired.tx_entries > IBMVNIC_LTB_SET_SIZE) {
adapter->desired.tx_entries = max_entries;
}
if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
- adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) {
+ adapter->desired.rx_entries > IBMVNIC_LTB_SET_SIZE) {
adapter->desired.rx_entries = max_entries;
}
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 8f5cefb932dd..90d6833fb1db 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -36,9 +36,50 @@
#define IBMVNIC_TSO_BUFS 64
#define IBMVNIC_TSO_POOL_MASK 0x80000000
-#define IBMVNIC_MAX_LTB_SIZE ((1 << (MAX_ORDER - 1)) * PAGE_SIZE)
-#define IBMVNIC_BUFFER_HLEN 500
+/* A VNIC adapter has set of Rx and Tx pools (aka queues). Each Rx/Tx pool
+ * has a set of buffers. The size of each buffer is determined by the MTU.
+ *
+ * Each Rx/Tx pool is also associated with a DMA region that is shared
+ * with the "hardware" (VIOS) and used to send/receive packets. The DMA
+ * region is also referred to as a Long Term Buffer or LTB.
+ *
+ * The size of the DMA region required for an Rx/Tx pool depends on the
+ * number and size (MTU) of the buffers in the pool. At the max levels
+ * of 4096 jumbo frames (MTU=9000) we will need about 9K*4K = 36MB plus
+ * some padding.
+ *
+ * But the size of a single DMA region is limited by MAX_ORDER in the
+ * kernel (about 16MB currently). To support say 4K Jumbo frames, we
+ * use a set of LTBs (struct ltb_set) per pool.
+ *
+ * IBMVNIC_ONE_LTB_MAX - max size of each LTB supported by kernel
+ * IBMVNIC_ONE_LTB_SIZE - current max size of each LTB in an ltb_set
+ * (must be <= IBMVNIC_ONE_LTB_MAX)
+ * IBMVNIC_LTB_SET_SIZE - current size of all LTBs in an ltb_set
+ *
+ * Each VNIC can have upto 16 Rx, 16 Tx and 16 TSO pools. The TSO pools
+ * are of fixed length (IBMVNIC_TSO_BUF_SZ * IBMVNIC_TSO_BUFS) of 4MB.
+ *
+ * The Rx and Tx pools can have upto 4096 buffers. The max size of these
+ * buffers is about 9588 (for jumbo frames, including IBMVNIC_BUFFER_HLEN).
+ * So, setting the IBMVNIC_LTB_SET_SIZE for a pool to 4096 * 9588 ~= 38MB.
+ *
+ * There is a trade-off in setting IBMVNIC_ONE_LTB_SIZE. If it is large,
+ * the allocation of the LTB can fail when system is low in memory. If
+ * its too small, we would need several mappings for each of the Rx/
+ * Tx/TSO pools but there is a limit of 255 mappings per vnic in the
+ * VNIC protocol.
+ *
+ * So setting IBMVNIC_ONE_LTB_SIZE to 8MB. With IBMVNIC_LTB_SET_SIZE set
+ * to 38MB, we will need 5 LTBs per Rx and Tx pool and 1 LTB per TSO
+ * pool for the 4MB. Thus the 16 Rx and Tx queues require 32 * 5 = 160
+ * plus 16 for the TSO pools for a total of 176 LTB mappings per VNIC.
+ */
+#define IBMVNIC_ONE_LTB_MAX ((u32)((1 << (MAX_ORDER - 1)) * PAGE_SIZE))
+#define IBMVNIC_ONE_LTB_SIZE min((u32)(8 << 20), IBMVNIC_ONE_LTB_MAX)
+#define IBMVNIC_LTB_SET_SIZE (38 << 20)
+#define IBMVNIC_BUFFER_HLEN 500
#define IBMVNIC_RESET_DELAY 100
static const char ibmvnic_priv_flags[][ETH_GSTRING_LEN] = {
@@ -798,6 +839,11 @@ struct ibmvnic_long_term_buff {
u8 map_id;
};
+struct ibmvnic_ltb_set {
+ int num_ltbs;
+ struct ibmvnic_long_term_buff *ltbs;
+};
+
struct ibmvnic_tx_buff {
struct sk_buff *skb;
int index;
@@ -810,7 +856,7 @@ struct ibmvnic_tx_pool {
int *free_map;
int consumer_index;
int producer_index;
- struct ibmvnic_long_term_buff long_term_buff;
+ struct ibmvnic_ltb_set ltb_set;
int num_buffers;
int buf_size;
} ____cacheline_aligned;
@@ -833,7 +879,7 @@ struct ibmvnic_rx_pool {
int next_free;
int next_alloc;
int active;
- struct ibmvnic_long_term_buff long_term_buff;
+ struct ibmvnic_ltb_set ltb_set;
} ____cacheline_aligned;
struct ibmvnic_vpd {
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index d60e2016d03c..e6c8e6d5234f 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1009,8 +1009,8 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
{
u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
- u16 max_ltr_enc_d = 0; /* maximum LTR decoded by platform */
- u16 lat_enc_d = 0; /* latency decoded */
+ u32 max_ltr_enc_d = 0; /* maximum LTR decoded by platform */
+ u32 lat_enc_d = 0; /* latency decoded */
u16 lat_enc = 0; /* latency encoded */
if (link) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 55c6bce5da61..18558a019353 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -852,6 +852,7 @@ struct i40e_vsi {
u64 tx_busy;
u64 tx_linearize;
u64 tx_force_wb;
+ u64 tx_stopped;
u64 rx_buf_failed;
u64 rx_page_failed;
u64 rx_page_reuse;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 6aefffd83615..2819e261a126 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -47,6 +47,7 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_1G_BASE_T_X722:
case I40E_DEV_ID_10G_BASE_T_X722:
case I40E_DEV_ID_SFP_I_X722:
+ case I40E_DEV_ID_SFP_X722_A:
hw->mac.type = I40E_MAC_X722;
break;
default:
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index be7c6f34d45c..c9dcd6d92c83 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -309,10 +309,11 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
tx_ring->stats.bytes,
tx_ring->tx_stats.restart_queue);
dev_info(&pf->pdev->dev,
- " tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld\n",
+ " tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld, tx_stopped = %lld\n",
i,
tx_ring->tx_stats.tx_busy,
- tx_ring->tx_stats.tx_done_old);
+ tx_ring->tx_stats.tx_done_old,
+ tx_ring->tx_stats.tx_stopped);
dev_info(&pf->pdev->dev,
" tx_rings[%i]: size = %i\n",
i, tx_ring->size);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h
index 1bcb0ec0f0c0..2610338002fe 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_devids.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h
@@ -33,6 +33,7 @@
#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1
#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
#define I40E_DEV_ID_SFP_I_X722 0x37D3
+#define I40E_DEV_ID_SFP_X722_A 0x0DDA
#endif /* _I40E_DEVIDS_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index e48499624d22..610f00cbaff9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -293,12 +293,14 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = {
I40E_VSI_STAT("tx_linearize", tx_linearize),
I40E_VSI_STAT("tx_force_wb", tx_force_wb),
I40E_VSI_STAT("tx_busy", tx_busy),
+ I40E_VSI_STAT("tx_stopped", tx_stopped),
I40E_VSI_STAT("rx_alloc_fail", rx_buf_failed),
I40E_VSI_STAT("rx_pg_alloc_fail", rx_page_failed),
I40E_VSI_STAT("rx_cache_reuse", rx_page_reuse),
I40E_VSI_STAT("rx_cache_alloc", rx_page_alloc),
I40E_VSI_STAT("rx_cache_waive", rx_page_waive),
I40E_VSI_STAT("rx_cache_busy", rx_page_busy),
+ I40E_VSI_STAT("tx_restart", tx_restart),
};
/* These PF_STATs might look like duplicates of some NETDEV_STATs,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 6778df2177a1..358c2edc118d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -77,6 +77,7 @@ static const struct pci_device_id i40e_pci_tbl[] = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722_A), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_X710_N3000), 0},
@@ -785,6 +786,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
unsigned int start;
u64 tx_linearize;
u64 tx_force_wb;
+ u64 tx_stopped;
u64 rx_p, rx_b;
u64 tx_p, tx_b;
u16 q;
@@ -804,6 +806,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
rx_b = rx_p = 0;
tx_b = tx_p = 0;
tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
+ tx_stopped = 0;
rx_page = 0;
rx_buf = 0;
rx_reuse = 0;
@@ -828,6 +831,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
tx_busy += p->tx_stats.tx_busy;
tx_linearize += p->tx_stats.tx_linearize;
tx_force_wb += p->tx_stats.tx_force_wb;
+ tx_stopped += p->tx_stats.tx_stopped;
/* locate Rx ring */
p = READ_ONCE(vsi->rx_rings[q]);
@@ -872,6 +876,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
vsi->tx_busy = tx_busy;
vsi->tx_linearize = tx_linearize;
vsi->tx_force_wb = tx_force_wb;
+ vsi->tx_stopped = tx_stopped;
vsi->rx_page_failed = rx_page;
vsi->rx_buf_failed = rx_buf;
vsi->rx_page_reuse = rx_reuse;
@@ -13436,8 +13441,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
np->vsi = vsi;
hw_enc_features = NETIF_F_SG |
- NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM |
+ NETIF_F_HW_CSUM |
NETIF_F_HIGHDMA |
NETIF_F_SOFT_FEATURES |
NETIF_F_TSO |
@@ -13468,6 +13472,23 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
/* record features VLANs can make use of */
netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
+#define I40E_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
+ NETIF_F_GSO_GRE_CSUM | \
+ NETIF_F_GSO_IPXIP4 | \
+ NETIF_F_GSO_IPXIP6 | \
+ NETIF_F_GSO_UDP_TUNNEL | \
+ NETIF_F_GSO_UDP_TUNNEL_CSUM)
+
+ netdev->gso_partial_features = I40E_GSO_PARTIAL_FEATURES;
+ netdev->features |= NETIF_F_GSO_PARTIAL |
+ I40E_GSO_PARTIAL_FEATURES;
+
+ netdev->mpls_features |= NETIF_F_SG;
+ netdev->mpls_features |= NETIF_F_HW_CSUM;
+ netdev->mpls_features |= NETIF_F_TSO;
+ netdev->mpls_features |= NETIF_F_TSO6;
+ netdev->mpls_features |= I40E_GSO_PARTIAL_FEATURES;
+
/* enable macvlan offloads */
netdev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 0eae5858f2fe..7bc1174edf6b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -3,6 +3,7 @@
#include <linux/prefetch.h>
#include <linux/bpf_trace.h>
+#include <net/mpls.h>
#include <net/xdp.h>
#include "i40e.h"
#include "i40e_trace.h"
@@ -3015,6 +3016,7 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
{
struct sk_buff *skb = first->skb;
u64 cd_cmd, cd_tso_len, cd_mss;
+ __be16 protocol;
union {
struct iphdr *v4;
struct ipv6hdr *v6;
@@ -3026,7 +3028,7 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
unsigned char *hdr;
} l4;
u32 paylen, l4_offset;
- u16 gso_segs, gso_size;
+ u16 gso_size;
int err;
if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -3039,15 +3041,23 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
if (err < 0)
return err;
- ip.hdr = skb_network_header(skb);
- l4.hdr = skb_transport_header(skb);
+ protocol = vlan_get_protocol(skb);
+
+ if (eth_p_mpls(protocol))
+ ip.hdr = skb_inner_network_header(skb);
+ else
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_checksum_start(skb);
/* initialize outer IP header fields */
if (ip.v4->version == 4) {
ip.v4->tot_len = 0;
ip.v4->check = 0;
+
+ first->tx_flags |= I40E_TX_FLAGS_TSO;
} else {
ip.v6->payload_len = 0;
+ first->tx_flags |= I40E_TX_FLAGS_TSO;
}
if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
@@ -3100,10 +3110,9 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
/* pull values out of skb_shinfo */
gso_size = skb_shinfo(skb)->gso_size;
- gso_segs = skb_shinfo(skb)->gso_segs;
/* update GSO size and bytecount with header size */
- first->gso_segs = gso_segs;
+ first->gso_segs = skb_shinfo(skb)->gso_segs;
first->bytecount += (first->gso_segs - 1) * *hdr_len;
/* find the field values */
@@ -3187,13 +3196,27 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
unsigned char *exthdr;
u32 offset, cmd = 0;
__be16 frag_off;
+ __be16 protocol;
u8 l4_proto = 0;
if (skb->ip_summed != CHECKSUM_PARTIAL)
return 0;
- ip.hdr = skb_network_header(skb);
- l4.hdr = skb_transport_header(skb);
+ protocol = vlan_get_protocol(skb);
+
+ if (eth_p_mpls(protocol))
+ ip.hdr = skb_inner_network_header(skb);
+ else
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_checksum_start(skb);
+
+ /* set the tx_flags to indicate the IP protocol type. this is
+ * required so that checksum header computation below is accurate.
+ */
+ if (ip.v4->version == 4)
+ *tx_flags |= I40E_TX_FLAGS_IPV4;
+ else
+ *tx_flags |= I40E_TX_FLAGS_IPV6;
/* compute outer L2 header size */
offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
@@ -3373,6 +3396,8 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
/* Memory barrier before checking head and tail */
smp_mb();
+ ++tx_ring->tx_stats.tx_stopped;
+
/* Check again in a case another CPU has just made room available. */
if (likely(I40E_DESC_UNUSED(tx_ring) < size))
return -EBUSY;
@@ -3749,7 +3774,6 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
struct i40e_tx_buffer *first;
u32 td_offset = 0;
u32 tx_flags = 0;
- __be16 protocol;
u32 td_cmd = 0;
u8 hdr_len = 0;
int tso, count;
@@ -3791,15 +3815,6 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
goto out_drop;
- /* obtain protocol of skb */
- protocol = vlan_get_protocol(skb);
-
- /* setup IPv4/IPv6 offloads */
- if (protocol == htons(ETH_P_IP))
- tx_flags |= I40E_TX_FLAGS_IPV4;
- else if (protocol == htons(ETH_P_IPV6))
- tx_flags |= I40E_TX_FLAGS_IPV6;
-
tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
if (tso < 0)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index c471c2da313c..41f86e9535a0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -290,6 +290,7 @@ struct i40e_tx_queue_stats {
u64 tx_done_old;
u64 tx_linearize;
u64 tx_force_wb;
+ u64 tx_stopped;
int prev_pkt_ctr;
};
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 190590d32faf..7dfcf78b57fb 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -2871,7 +2871,6 @@ continue_reset:
running = adapter->state == __IAVF_RUNNING;
if (running) {
- netdev->flags &= ~IFF_UP;
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
adapter->link_up = false;
@@ -2988,7 +2987,7 @@ continue_reset:
* to __IAVF_RUNNING
*/
iavf_up_complete(adapter);
- netdev->flags |= IFF_UP;
+
iavf_irq_enable(adapter, true);
} else {
iavf_change_state(adapter, __IAVF_DOWN);
@@ -3004,10 +3003,8 @@ continue_reset:
reset_err:
mutex_unlock(&adapter->client_lock);
mutex_unlock(&adapter->crit_lock);
- if (running) {
+ if (running)
iavf_change_state(adapter, __IAVF_RUNNING);
- netdev->flags |= IFF_UP;
- }
dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
iavf_close(netdev);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c
index 5daade32ea62..fba178e07600 100644
--- a/drivers/net/ethernet/intel/ice/ice_arfs.c
+++ b/drivers/net/ethernet/intel/ice/ice_arfs.c
@@ -577,7 +577,7 @@ void ice_free_cpu_rx_rmap(struct ice_vsi *vsi)
{
struct net_device *netdev;
- if (!vsi || vsi->type != ICE_VSI_PF || !vsi->arfs_fltr_list)
+ if (!vsi || vsi->type != ICE_VSI_PF)
return;
netdev = vsi->netdev;
@@ -599,7 +599,7 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
int base_idx, i;
if (!vsi || vsi->type != ICE_VSI_PF)
- return -EINVAL;
+ return 0;
pf = vsi->back;
netdev = vsi->netdev;
@@ -636,7 +636,6 @@ void ice_remove_arfs(struct ice_pf *pf)
if (!pf_vsi)
return;
- ice_free_cpu_rx_rmap(pf_vsi);
ice_clear_arfs(pf_vsi);
}
@@ -653,9 +652,5 @@ void ice_rebuild_arfs(struct ice_pf *pf)
return;
ice_remove_arfs(pf);
- if (ice_set_cpu_rx_rmap(pf_vsi)) {
- dev_err(ice_pf_to_dev(pf), "Failed to rebuild aRFS\n");
- return;
- }
ice_init_arfs(pf_vsi);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
index 9a84d746a6c4..6a463b242c7d 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -361,7 +361,8 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
np = netdev_priv(netdev);
vsi = np->vsi;
- if (ice_is_reset_in_progress(vsi->back->state))
+ if (ice_is_reset_in_progress(vsi->back->state) ||
+ test_bit(ICE_VF_DIS, vsi->back->state))
return NETDEV_TX_BUSY;
repr = ice_netdev_to_repr(netdev);
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h
index bd58d9d2e565..6a413331572b 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.h
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h
@@ -52,7 +52,7 @@ static inline void ice_eswitch_update_repr(struct ice_vsi *vsi) { }
static inline int ice_eswitch_configure(struct ice_pf *pf)
{
- return -EOPNOTSUPP;
+ return 0;
}
static inline int ice_eswitch_rebuild(struct ice_pf *pf)
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 2774cbd5b12a..6d19c58ccacd 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -2689,6 +2689,8 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
return;
vsi->irqs_ready = false;
+ ice_free_cpu_rx_rmap(vsi);
+
ice_for_each_q_vector(vsi, i) {
u16 vector = i + base;
int irq_num;
@@ -2702,7 +2704,8 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
continue;
/* clear the affinity notifier in the IRQ descriptor */
- irq_set_affinity_notifier(irq_num, NULL);
+ if (!IS_ENABLED(CONFIG_RFS_ACCEL))
+ irq_set_affinity_notifier(irq_num, NULL);
/* clear the affinity_mask in the IRQ descriptor */
irq_set_affinity_hint(irq_num, NULL);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index d768925785ca..fde839ef0613 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -2510,6 +2510,13 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
}
+ err = ice_set_cpu_rx_rmap(vsi);
+ if (err) {
+ netdev_err(vsi->netdev, "Failed to setup CPU RMAP on VSI %u: %pe\n",
+ vsi->vsi_num, ERR_PTR(err));
+ goto free_q_irqs;
+ }
+
vsi->irqs_ready = true;
return 0;
@@ -3329,7 +3336,9 @@ static void ice_set_netdev_features(struct net_device *netdev)
vlano_features | tso_features;
/* add support for HW_CSUM on packets with MPLS header */
- netdev->mpls_features = NETIF_F_HW_CSUM;
+ netdev->mpls_features = NETIF_F_HW_CSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO6;
/* enable features */
netdev->features |= netdev->hw_features;
@@ -3692,20 +3701,12 @@ static int ice_setup_pf_sw(struct ice_pf *pf)
*/
ice_napi_add(vsi);
- status = ice_set_cpu_rx_rmap(vsi);
- if (status) {
- dev_err(dev, "Failed to set CPU Rx map VSI %d error %d\n",
- vsi->vsi_num, status);
- goto unroll_napi_add;
- }
status = ice_init_mac_fltr(pf);
if (status)
- goto free_cpu_rx_map;
+ goto unroll_napi_add;
return 0;
-free_cpu_rx_map:
- ice_free_cpu_rx_rmap(vsi);
unroll_napi_add:
ice_tc_indir_block_unregister(vsi);
unroll_cfg_netdev:
@@ -5167,7 +5168,6 @@ static int __maybe_unused ice_suspend(struct device *dev)
continue;
ice_vsi_free_q_vectors(pf->vsi[v]);
}
- ice_free_cpu_rx_rmap(ice_get_main_vsi(pf));
ice_clear_interrupt_scheme(pf);
pci_save_state(pdev);
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index 4eb0599714f4..13cdb5ea594d 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -641,6 +641,7 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,
status = ice_read_flash_module(hw, bank, ICE_SR_1ST_OROM_BANK_PTR, 0,
orom_data, hw->flash.banks.orom_size);
if (status) {
+ vfree(orom_data);
ice_debug(hw, ICE_DBG_NVM, "Unable to read Option ROM data\n");
return status;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index f9bf008471c9..3f8b7274ed2f 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -8,6 +8,7 @@
#include <linux/prefetch.h>
#include <linux/bpf_trace.h>
#include <net/dsfield.h>
+#include <net/mpls.h>
#include <net/xdp.h>
#include "ice_txrx_lib.h"
#include "ice_lib.h"
@@ -1748,18 +1749,24 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
if (skb->ip_summed != CHECKSUM_PARTIAL)
return 0;
- ip.hdr = skb_network_header(skb);
- l4.hdr = skb_transport_header(skb);
+ protocol = vlan_get_protocol(skb);
+
+ if (eth_p_mpls(protocol))
+ ip.hdr = skb_inner_network_header(skb);
+ else
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_checksum_start(skb);
/* compute outer L2 header size */
l2_len = ip.hdr - skb->data;
offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S;
- protocol = vlan_get_protocol(skb);
-
- if (protocol == htons(ETH_P_IP))
+ /* set the tx_flags to indicate the IP protocol type. this is
+ * required so that checksum header computation below is accurate.
+ */
+ if (ip.v4->version == 4)
first->tx_flags |= ICE_TX_FLAGS_IPV4;
- else if (protocol == htons(ETH_P_IPV6))
+ else if (ip.v6->version == 6)
first->tx_flags |= ICE_TX_FLAGS_IPV6;
if (skb->encapsulation) {
@@ -1957,6 +1964,7 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
unsigned char *hdr;
} l4;
u64 cd_mss, cd_tso_len;
+ __be16 protocol;
u32 paylen;
u8 l4_start;
int err;
@@ -1972,8 +1980,13 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
return err;
/* cppcheck-suppress unreadVariable */
- ip.hdr = skb_network_header(skb);
- l4.hdr = skb_transport_header(skb);
+ protocol = vlan_get_protocol(skb);
+
+ if (eth_p_mpls(protocol))
+ ip.hdr = skb_inner_network_header(skb);
+ else
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_checksum_start(skb);
/* initialize outer IP header fields */
if (ip.v4->version == 4) {
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index c26b408ba419..49ba8bfdbf04 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -415,8 +415,8 @@ static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp,
*/
static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
{
+ u32 nb_buffs_extra = 0, nb_buffs = 0;
union ice_32b_rx_flex_desc *rx_desc;
- u32 nb_buffs_extra = 0, nb_buffs;
u16 ntu = rx_ring->next_to_use;
u16 total_count = count;
struct xdp_buff **xdp;
@@ -428,6 +428,10 @@ static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
nb_buffs_extra = ice_fill_rx_descs(rx_ring->xsk_pool, xdp,
rx_desc,
rx_ring->count - ntu);
+ if (nb_buffs_extra != rx_ring->count - ntu) {
+ ntu += nb_buffs_extra;
+ goto exit;
+ }
rx_desc = ICE_RX_DESC(rx_ring, 0);
xdp = ice_xdp_buf(rx_ring, 0);
ntu = 0;
@@ -441,6 +445,7 @@ static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
if (ntu == rx_ring->count)
ntu = 0;
+exit:
if (rx_ring->next_to_use != ntu)
ice_release_rx_desc(rx_ring, ntu);
diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c
index 66ea566488d1..59d5c467ea6e 100644
--- a/drivers/net/ethernet/intel/igc/igc_i225.c
+++ b/drivers/net/ethernet/intel/igc/igc_i225.c
@@ -156,8 +156,15 @@ void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask)
{
u32 swfw_sync;
- while (igc_get_hw_semaphore_i225(hw))
- ; /* Empty */
+ /* Releasing the resource requires first getting the HW semaphore.
+ * If we fail to get the semaphore, there is nothing we can do,
+ * except log an error and quit. We are not allowed to hang here
+ * indefinitely, as it may cause denial of service or system crash.
+ */
+ if (igc_get_hw_semaphore_i225(hw)) {
+ hw_dbg("Failed to release SW_FW_SYNC.\n");
+ return;
+ }
swfw_sync = rd32(IGC_SW_FW_SYNC);
swfw_sync &= ~mask;
diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c
index 40dbf4b43234..6961f65d36b9 100644
--- a/drivers/net/ethernet/intel/igc/igc_phy.c
+++ b/drivers/net/ethernet/intel/igc/igc_phy.c
@@ -581,7 +581,7 @@ static s32 igc_read_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 *data)
* the lower time out
*/
for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) {
- usleep_range(500, 1000);
+ udelay(50);
mdic = rd32(IGC_MDIC);
if (mdic & IGC_MDIC_READY)
break;
@@ -638,7 +638,7 @@ static s32 igc_write_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 data)
* the lower time out
*/
for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) {
- usleep_range(500, 1000);
+ udelay(50);
mdic = rd32(IGC_MDIC);
if (mdic & IGC_MDIC_READY)
break;
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index 0d6e3215e98f..653e9f1e35b5 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -992,6 +992,17 @@ static void igc_ptp_time_restore(struct igc_adapter *adapter)
igc_ptp_write_i225(adapter, &ts);
}
+static void igc_ptm_stop(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 ctrl;
+
+ ctrl = rd32(IGC_PTM_CTRL);
+ ctrl &= ~IGC_PTM_CTRL_EN;
+
+ wr32(IGC_PTM_CTRL, ctrl);
+}
+
/**
* igc_ptp_suspend - Disable PTP work items and prepare for suspend
* @adapter: Board private structure
@@ -1009,8 +1020,10 @@ void igc_ptp_suspend(struct igc_adapter *adapter)
adapter->ptp_tx_skb = NULL;
clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
- if (pci_device_is_present(adapter->pdev))
+ if (pci_device_is_present(adapter->pdev)) {
igc_ptp_time_save(adapter);
+ igc_ptm_stop(adapter);
+ }
}
/**
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index fe0989c0fc25..f58a1c0144ba 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -62,6 +62,7 @@ config MVNETA
select MVMDIO
select PHYLINK
select PAGE_POOL
+ select PAGE_POOL_STATS
help
This driver supports the network interface units in the
Marvell ARMADA XP, ARMADA 370, ARMADA 38x and
@@ -177,6 +178,7 @@ config SKY2_DEBUG
source "drivers/net/ethernet/marvell/octeontx2/Kconfig"
+source "drivers/net/ethernet/marvell/octeon_ep/Kconfig"
source "drivers/net/ethernet/marvell/prestera/Kconfig"
endif # NET_VENDOR_MARVELL
diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile
index 9f88fe822555..ceba4aa4f026 100644
--- a/drivers/net/ethernet/marvell/Makefile
+++ b/drivers/net/ethernet/marvell/Makefile
@@ -11,5 +11,6 @@ obj-$(CONFIG_MVPP2) += mvpp2/
obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o
obj-$(CONFIG_SKGE) += skge.o
obj-$(CONFIG_SKY2) += sky2.o
+obj-y += octeon_ep/
obj-y += octeontx2/
obj-y += prestera/
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 934f6dd90992..f6a54c7f0c69 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -4735,6 +4735,9 @@ static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
memcpy(data + i * ETH_GSTRING_LEN,
mvneta_statistics[i].name, ETH_GSTRING_LEN);
+
+ data += ETH_GSTRING_LEN * ARRAY_SIZE(mvneta_statistics);
+ page_pool_ethtool_stats_get_strings(data);
}
}
@@ -4847,6 +4850,17 @@ static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
}
}
+static void mvneta_ethtool_pp_stats(struct mvneta_port *pp, u64 *data)
+{
+ struct page_pool_stats stats = {};
+ int i;
+
+ for (i = 0; i < rxq_number; i++)
+ page_pool_get_stats(pp->rxqs[i].page_pool, &stats);
+
+ page_pool_ethtool_stats_get(data, &stats);
+}
+
static void mvneta_ethtool_get_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
@@ -4857,12 +4871,16 @@ static void mvneta_ethtool_get_stats(struct net_device *dev,
for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
*data++ = pp->ethtool_stats[i];
+
+ mvneta_ethtool_pp_stats(pp, data);
}
static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset)
{
if (sset == ETH_SS_STATS)
- return ARRAY_SIZE(mvneta_statistics);
+ return ARRAY_SIZE(mvneta_statistics) +
+ page_pool_ethtool_stats_get_count();
+
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/Kconfig b/drivers/net/ethernet/marvell/octeon_ep/Kconfig
new file mode 100644
index 000000000000..0d7db815340e
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/Kconfig
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Marvell's Octeon PCI Endpoint NIC Driver Configuration
+#
+
+config OCTEON_EP
+ tristate "Marvell Octeon PCI Endpoint NIC Driver"
+ depends on 64BIT
+ depends on PCI
+ depends on PTP_1588_CLOCK_OPTIONAL
+ help
+ This driver supports networking functionality of Marvell's
+ Octeon PCI Endpoint NIC.
+
+ To know the list of devices supported by this driver, refer
+ documentation in
+ <file:Documentation/networking/device_drivers/ethernet/marvell/octeon_ep.rst>.
+
+ To compile this drivers as a module, choose M here. Name of the
+ module is octeon_ep.
diff --git a/drivers/net/ethernet/marvell/octeon_ep/Makefile b/drivers/net/ethernet/marvell/octeon_ep/Makefile
new file mode 100644
index 000000000000..2026c8118158
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Network driver for Marvell's Octeon PCI Endpoint NIC
+#
+
+obj-$(CONFIG_OCTEON_EP) += octeon_ep.o
+
+octeon_ep-y := octep_main.o octep_cn9k_pf.o octep_tx.o octep_rx.o \
+ octep_ethtool.o octep_ctrl_mbox.o octep_ctrl_net.o
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c b/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
new file mode 100644
index 000000000000..6ad88d0fe43f
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
@@ -0,0 +1,737 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include "octep_config.h"
+#include "octep_main.h"
+#include "octep_regs_cn9k_pf.h"
+
+/* Names of Hardware non-queue generic interrupts */
+static char *cn93_non_ioq_msix_names[] = {
+ "epf_ire_rint",
+ "epf_ore_rint",
+ "epf_vfire_rint0",
+ "epf_vfire_rint1",
+ "epf_vfore_rint0",
+ "epf_vfore_rint1",
+ "epf_mbox_rint0",
+ "epf_mbox_rint1",
+ "epf_oei_rint",
+ "epf_dma_rint",
+ "epf_dma_vf_rint0",
+ "epf_dma_vf_rint1",
+ "epf_pp_vf_rint0",
+ "epf_pp_vf_rint1",
+ "epf_misc_rint",
+ "epf_rsvd",
+};
+
+/* Dump useful hardware CSRs for debug purpose */
+static void cn93_dump_regs(struct octep_device *oct, int qno)
+{
+ struct device *dev = &oct->pdev->dev;
+
+ dev_info(dev, "IQ-%d register dump\n", qno);
+ dev_info(dev, "R[%d]_IN_INSTR_DBELL[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_INSTR_DBELL(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(qno)));
+ dev_info(dev, "R[%d]_IN_CONTROL[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_CONTROL(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_CONTROL(qno)));
+ dev_info(dev, "R[%d]_IN_ENABLE[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_ENABLE(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_ENABLE(qno)));
+ dev_info(dev, "R[%d]_IN_INSTR_BADDR[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_INSTR_BADDR(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_BADDR(qno)));
+ dev_info(dev, "R[%d]_IN_INSTR_RSIZE[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_INSTR_RSIZE(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_RSIZE(qno)));
+ dev_info(dev, "R[%d]_IN_CNTS[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_CNTS(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_CNTS(qno)));
+ dev_info(dev, "R[%d]_IN_INT_LEVELS[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_INT_LEVELS(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(qno)));
+ dev_info(dev, "R[%d]_IN_PKT_CNT[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_PKT_CNT(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_PKT_CNT(qno)));
+ dev_info(dev, "R[%d]_IN_BYTE_CNT[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_BYTE_CNT(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_BYTE_CNT(qno)));
+
+ dev_info(dev, "OQ-%d register dump\n", qno);
+ dev_info(dev, "R[%d]_OUT_SLIST_DBELL[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_SLIST_DBELL(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_SLIST_DBELL(qno)));
+ dev_info(dev, "R[%d]_OUT_CONTROL[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_CONTROL(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_CONTROL(qno)));
+ dev_info(dev, "R[%d]_OUT_ENABLE[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_ENABLE(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_ENABLE(qno)));
+ dev_info(dev, "R[%d]_OUT_SLIST_BADDR[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_SLIST_BADDR(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_SLIST_BADDR(qno)));
+ dev_info(dev, "R[%d]_OUT_SLIST_RSIZE[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_SLIST_RSIZE(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_SLIST_RSIZE(qno)));
+ dev_info(dev, "R[%d]_OUT_CNTS[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_CNTS(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_CNTS(qno)));
+ dev_info(dev, "R[%d]_OUT_INT_LEVELS[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_INT_LEVELS(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(qno)));
+ dev_info(dev, "R[%d]_OUT_PKT_CNT[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_PKT_CNT(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_PKT_CNT(qno)));
+ dev_info(dev, "R[%d]_OUT_BYTE_CNT[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_BYTE_CNT(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_BYTE_CNT(qno)));
+ dev_info(dev, "R[%d]_ERR_TYPE[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_ERR_TYPE(qno),
+ octep_read_csr64(oct, CN93_SDP_R_ERR_TYPE(qno)));
+}
+
+/* Reset Hardware Tx queue */
+static int cn93_reset_iq(struct octep_device *oct, int q_no)
+{
+ struct octep_config *conf = oct->conf;
+ u64 val = 0ULL;
+
+ dev_dbg(&oct->pdev->dev, "Reset PF IQ-%d\n", q_no);
+
+ /* Get absolute queue number */
+ q_no += conf->pf_ring_cfg.srn;
+
+ /* Disable the Tx/Instruction Ring */
+ octep_write_csr64(oct, CN93_SDP_R_IN_ENABLE(q_no), val);
+
+ /* clear the Instruction Ring packet/byte counts and doorbell CSRs */
+ octep_write_csr64(oct, CN93_SDP_R_IN_CNTS(q_no), val);
+ octep_write_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(q_no), val);
+ octep_write_csr64(oct, CN93_SDP_R_IN_PKT_CNT(q_no), val);
+ octep_write_csr64(oct, CN93_SDP_R_IN_BYTE_CNT(q_no), val);
+ octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_BADDR(q_no), val);
+ octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_RSIZE(q_no), val);
+
+ val = 0xFFFFFFFF;
+ octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(q_no), val);
+
+ return 0;
+}
+
+/* Reset Hardware Rx queue */
+static void cn93_reset_oq(struct octep_device *oct, int q_no)
+{
+ u64 val = 0ULL;
+
+ q_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+
+ /* Disable Output (Rx) Ring */
+ octep_write_csr64(oct, CN93_SDP_R_OUT_ENABLE(q_no), val);
+
+ /* Clear count CSRs */
+ val = octep_read_csr(oct, CN93_SDP_R_OUT_CNTS(q_no));
+ octep_write_csr(oct, CN93_SDP_R_OUT_CNTS(q_no), val);
+
+ octep_write_csr64(oct, CN93_SDP_R_OUT_PKT_CNT(q_no), 0xFFFFFFFFFULL);
+ octep_write_csr64(oct, CN93_SDP_R_OUT_SLIST_DBELL(q_no), 0xFFFFFFFF);
+}
+
+/* Reset all hardware Tx/Rx queues */
+static void octep_reset_io_queues_cn93_pf(struct octep_device *oct)
+{
+ struct pci_dev *pdev = oct->pdev;
+ int q;
+
+ dev_dbg(&pdev->dev, "Reset OCTEP_CN93 PF IO Queues\n");
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ cn93_reset_iq(oct, q);
+ cn93_reset_oq(oct, q);
+ }
+}
+
+/* Initialize windowed addresses to access some hardware registers */
+static void octep_setup_pci_window_regs_cn93_pf(struct octep_device *oct)
+{
+ u8 __iomem *bar0_pciaddr = oct->mmio[0].hw_addr;
+
+ oct->pci_win_regs.pci_win_wr_addr = (u8 __iomem *)(bar0_pciaddr + CN93_SDP_WIN_WR_ADDR64);
+ oct->pci_win_regs.pci_win_rd_addr = (u8 __iomem *)(bar0_pciaddr + CN93_SDP_WIN_RD_ADDR64);
+ oct->pci_win_regs.pci_win_wr_data = (u8 __iomem *)(bar0_pciaddr + CN93_SDP_WIN_WR_DATA64);
+ oct->pci_win_regs.pci_win_rd_data = (u8 __iomem *)(bar0_pciaddr + CN93_SDP_WIN_RD_DATA64);
+}
+
+/* Configure Hardware mapping: inform hardware which rings belong to PF. */
+static void octep_configure_ring_mapping_cn93_pf(struct octep_device *oct)
+{
+ struct octep_config *conf = oct->conf;
+ struct pci_dev *pdev = oct->pdev;
+ u64 pf_srn = CFG_GET_PORTS_PF_SRN(oct->conf);
+ int q;
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(conf); q++) {
+ u64 regval = 0;
+
+ if (oct->pcie_port)
+ regval = 8 << CN93_SDP_FUNC_SEL_EPF_BIT_POS;
+
+ octep_write_csr64(oct, CN93_SDP_EPVF_RING(pf_srn + q), regval);
+
+ regval = octep_read_csr64(oct, CN93_SDP_EPVF_RING(pf_srn + q));
+ dev_dbg(&pdev->dev, "Write SDP_EPVF_RING[0x%llx] = 0x%llx\n",
+ CN93_SDP_EPVF_RING(pf_srn + q), regval);
+ }
+}
+
+/* Initialize configuration limits and initial active config 93xx PF. */
+static void octep_init_config_cn93_pf(struct octep_device *oct)
+{
+ struct octep_config *conf = oct->conf;
+ struct pci_dev *pdev = oct->pdev;
+ u64 val;
+
+ /* Read ring configuration:
+ * PF ring count, number of VFs and rings per VF supported
+ */
+ val = octep_read_csr64(oct, CN93_SDP_EPF_RINFO);
+ conf->sriov_cfg.max_rings_per_vf = CN93_SDP_EPF_RINFO_RPVF(val);
+ conf->sriov_cfg.active_rings_per_vf = conf->sriov_cfg.max_rings_per_vf;
+ conf->sriov_cfg.max_vfs = CN93_SDP_EPF_RINFO_NVFS(val);
+ conf->sriov_cfg.active_vfs = conf->sriov_cfg.max_vfs;
+ conf->sriov_cfg.vf_srn = CN93_SDP_EPF_RINFO_SRN(val);
+
+ val = octep_read_csr64(oct, CN93_SDP_MAC_PF_RING_CTL(oct->pcie_port));
+ conf->pf_ring_cfg.srn = CN93_SDP_MAC_PF_RING_CTL_SRN(val);
+ conf->pf_ring_cfg.max_io_rings = CN93_SDP_MAC_PF_RING_CTL_RPPF(val);
+ conf->pf_ring_cfg.active_io_rings = conf->pf_ring_cfg.max_io_rings;
+ dev_info(&pdev->dev, "pf_srn=%u rpvf=%u nvfs=%u rppf=%u\n",
+ conf->pf_ring_cfg.srn, conf->sriov_cfg.active_rings_per_vf,
+ conf->sriov_cfg.active_vfs, conf->pf_ring_cfg.active_io_rings);
+
+ conf->iq.num_descs = OCTEP_IQ_MAX_DESCRIPTORS;
+ conf->iq.instr_type = OCTEP_64BYTE_INSTR;
+ conf->iq.pkind = 0;
+ conf->iq.db_min = OCTEP_DB_MIN;
+ conf->iq.intr_threshold = OCTEP_IQ_INTR_THRESHOLD;
+
+ conf->oq.num_descs = OCTEP_OQ_MAX_DESCRIPTORS;
+ conf->oq.buf_size = OCTEP_OQ_BUF_SIZE;
+ conf->oq.refill_threshold = OCTEP_OQ_REFILL_THRESHOLD;
+ conf->oq.oq_intr_pkt = OCTEP_OQ_INTR_PKT_THRESHOLD;
+ conf->oq.oq_intr_time = OCTEP_OQ_INTR_TIME_THRESHOLD;
+
+ conf->msix_cfg.non_ioq_msix = CN93_NUM_NON_IOQ_INTR;
+ conf->msix_cfg.ioq_msix = conf->pf_ring_cfg.active_io_rings;
+ conf->msix_cfg.non_ioq_msix_names = cn93_non_ioq_msix_names;
+
+ conf->ctrl_mbox_cfg.barmem_addr = (void __iomem *)oct->mmio[2].hw_addr + (0x400000ull * 7);
+}
+
+/* Setup registers for a hardware Tx Queue */
+static void octep_setup_iq_regs_cn93_pf(struct octep_device *oct, int iq_no)
+{
+ struct octep_iq *iq = oct->iq[iq_no];
+ u32 reset_instr_cnt;
+ u64 reg_val;
+
+ iq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_CONTROL(iq_no));
+
+ /* wait for IDLE to set to 1 */
+ if (!(reg_val & CN93_R_IN_CTL_IDLE)) {
+ do {
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_CONTROL(iq_no));
+ } while (!(reg_val & CN93_R_IN_CTL_IDLE));
+ }
+
+ reg_val |= CN93_R_IN_CTL_RDSIZE;
+ reg_val |= CN93_R_IN_CTL_IS_64B;
+ reg_val |= CN93_R_IN_CTL_ESR;
+ octep_write_csr64(oct, CN93_SDP_R_IN_CONTROL(iq_no), reg_val);
+
+ /* Write the start of the input queue's ring and its size */
+ octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_BADDR(iq_no),
+ iq->desc_ring_dma);
+ octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_RSIZE(iq_no),
+ iq->max_count);
+
+ /* Remember the doorbell & instruction count register addr
+ * for this queue
+ */
+ iq->doorbell_reg = oct->mmio[0].hw_addr +
+ CN93_SDP_R_IN_INSTR_DBELL(iq_no);
+ iq->inst_cnt_reg = oct->mmio[0].hw_addr +
+ CN93_SDP_R_IN_CNTS(iq_no);
+ iq->intr_lvl_reg = oct->mmio[0].hw_addr +
+ CN93_SDP_R_IN_INT_LEVELS(iq_no);
+
+ /* Store the current instruction counter (used in flush_iq calculation) */
+ reset_instr_cnt = readl(iq->inst_cnt_reg);
+ writel(reset_instr_cnt, iq->inst_cnt_reg);
+
+ /* INTR_THRESHOLD is set to max(FFFFFFFF) to disable the INTR */
+ reg_val = CFG_GET_IQ_INTR_THRESHOLD(oct->conf) & 0xffffffff;
+ octep_write_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(iq_no), reg_val);
+}
+
+/* Setup registers for a hardware Rx Queue */
+static void octep_setup_oq_regs_cn93_pf(struct octep_device *oct, int oq_no)
+{
+ u64 reg_val;
+ u64 oq_ctl = 0ULL;
+ u32 time_threshold = 0;
+ struct octep_oq *oq = oct->oq[oq_no];
+
+ oq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no));
+
+ /* wait for IDLE to set to 1 */
+ if (!(reg_val & CN93_R_OUT_CTL_IDLE)) {
+ do {
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no));
+ } while (!(reg_val & CN93_R_OUT_CTL_IDLE));
+ }
+
+ reg_val &= ~(CN93_R_OUT_CTL_IMODE);
+ reg_val &= ~(CN93_R_OUT_CTL_ROR_P);
+ reg_val &= ~(CN93_R_OUT_CTL_NSR_P);
+ reg_val &= ~(CN93_R_OUT_CTL_ROR_I);
+ reg_val &= ~(CN93_R_OUT_CTL_NSR_I);
+ reg_val &= ~(CN93_R_OUT_CTL_ES_I);
+ reg_val &= ~(CN93_R_OUT_CTL_ROR_D);
+ reg_val &= ~(CN93_R_OUT_CTL_NSR_D);
+ reg_val &= ~(CN93_R_OUT_CTL_ES_D);
+ reg_val |= (CN93_R_OUT_CTL_ES_P);
+
+ octep_write_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no), reg_val);
+ octep_write_csr64(oct, CN93_SDP_R_OUT_SLIST_BADDR(oq_no),
+ oq->desc_ring_dma);
+ octep_write_csr64(oct, CN93_SDP_R_OUT_SLIST_RSIZE(oq_no),
+ oq->max_count);
+
+ oq_ctl = octep_read_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no));
+ oq_ctl &= ~0x7fffffULL; //clear the ISIZE and BSIZE (22-0)
+ oq_ctl |= (oq->buffer_size & 0xffff); //populate the BSIZE (15-0)
+ octep_write_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no), oq_ctl);
+
+ /* Get the mapped address of the pkt_sent and pkts_credit regs */
+ oq->pkts_sent_reg = oct->mmio[0].hw_addr + CN93_SDP_R_OUT_CNTS(oq_no);
+ oq->pkts_credit_reg = oct->mmio[0].hw_addr +
+ CN93_SDP_R_OUT_SLIST_DBELL(oq_no);
+
+ time_threshold = CFG_GET_OQ_INTR_TIME(oct->conf);
+ reg_val = ((u64)time_threshold << 32) |
+ CFG_GET_OQ_INTR_PKT(oct->conf);
+ octep_write_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
+}
+
+/* Setup registers for a PF mailbox */
+static void octep_setup_mbox_regs_cn93_pf(struct octep_device *oct, int q_no)
+{
+ struct octep_mbox *mbox = oct->mbox[q_no];
+
+ mbox->q_no = q_no;
+
+ /* PF mbox interrupt reg */
+ mbox->mbox_int_reg = oct->mmio[0].hw_addr + CN93_SDP_EPF_MBOX_RINT(0);
+
+ /* PF to VF DATA reg. PF writes into this reg */
+ mbox->mbox_write_reg = oct->mmio[0].hw_addr + CN93_SDP_R_MBOX_PF_VF_DATA(q_no);
+
+ /* VF to PF DATA reg. PF reads from this reg */
+ mbox->mbox_read_reg = oct->mmio[0].hw_addr + CN93_SDP_R_MBOX_VF_PF_DATA(q_no);
+}
+
+/* Mailbox Interrupt handler */
+static void cn93_handle_pf_mbox_intr(struct octep_device *oct)
+{
+ u64 mbox_int_val = 0ULL, val = 0ULL, qno = 0ULL;
+
+ mbox_int_val = readq(oct->mbox[0]->mbox_int_reg);
+ for (qno = 0; qno < OCTEP_MAX_VF; qno++) {
+ val = readq(oct->mbox[qno]->mbox_read_reg);
+ dev_dbg(&oct->pdev->dev,
+ "PF MBOX READ: val:%llx from VF:%llx\n", val, qno);
+ }
+
+ writeq(mbox_int_val, oct->mbox[0]->mbox_int_reg);
+}
+
+/* Interrupts handler for all non-queue generic interrupts. */
+static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ struct pci_dev *pdev = oct->pdev;
+ u64 reg_val = 0;
+ int i = 0;
+
+ /* Check for IRERR INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_IRERR_RINT);
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "received IRERR_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CN93_SDP_EPF_IRERR_RINT, reg_val);
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ reg_val = octep_read_csr64(oct,
+ CN93_SDP_R_ERR_TYPE(i));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received err type on IQ-%d: 0x%llx\n",
+ i, reg_val);
+ octep_write_csr64(oct, CN93_SDP_R_ERR_TYPE(i),
+ reg_val);
+ }
+ }
+ goto irq_handled;
+ }
+
+ /* Check for ORERR INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_ORERR_RINT);
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received ORERR_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CN93_SDP_EPF_ORERR_RINT, reg_val);
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_ERR_TYPE(i));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received err type on OQ-%d: 0x%llx\n",
+ i, reg_val);
+ octep_write_csr64(oct, CN93_SDP_R_ERR_TYPE(i),
+ reg_val);
+ }
+ }
+
+ goto irq_handled;
+ }
+
+ /* Check for VFIRE INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_VFIRE_RINT(0));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received VFIRE_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CN93_SDP_EPF_VFIRE_RINT(0), reg_val);
+ goto irq_handled;
+ }
+
+ /* Check for VFORE INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_VFORE_RINT(0));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received VFORE_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CN93_SDP_EPF_VFORE_RINT(0), reg_val);
+ goto irq_handled;
+ }
+
+ /* Check for MBOX INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_MBOX_RINT(0));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received MBOX_RINT intr: 0x%llx\n", reg_val);
+ cn93_handle_pf_mbox_intr(oct);
+ goto irq_handled;
+ }
+
+ /* Check for OEI INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_OEI_RINT);
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received OEI_EINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT, reg_val);
+ queue_work(octep_wq, &oct->ctrl_mbox_task);
+ goto irq_handled;
+ }
+
+ /* Check for DMA INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_DMA_RINT);
+ if (reg_val) {
+ octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT, reg_val);
+ goto irq_handled;
+ }
+
+ /* Check for DMA VF INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT(0));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received DMA_VF_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT(0), reg_val);
+ goto irq_handled;
+ }
+
+ /* Check for PPVF INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_PP_VF_RINT(0));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received PP_VF_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CN93_SDP_EPF_PP_VF_RINT(0), reg_val);
+ goto irq_handled;
+ }
+
+ /* Check for MISC INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_MISC_RINT);
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received MISC_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT, reg_val);
+ goto irq_handled;
+ }
+
+ dev_info(&pdev->dev, "Reserved interrupts raised; Ignore\n");
+irq_handled:
+ return IRQ_HANDLED;
+}
+
+/* Tx/Rx queue interrupt handler */
+static irqreturn_t octep_ioq_intr_handler_cn93_pf(void *data)
+{
+ struct octep_ioq_vector *vector = (struct octep_ioq_vector *)data;
+ struct octep_oq *oq = vector->oq;
+
+ napi_schedule_irqoff(oq->napi);
+ return IRQ_HANDLED;
+}
+
+/* soft reset of 93xx */
+static int octep_soft_reset_cn93_pf(struct octep_device *oct)
+{
+ dev_info(&oct->pdev->dev, "CN93XX: Doing soft reset\n");
+
+ octep_write_csr64(oct, CN93_SDP_WIN_WR_MASK_REG, 0xFF);
+
+ /* Set core domain reset bit */
+ OCTEP_PCI_WIN_WRITE(oct, CN93_RST_CORE_DOMAIN_W1S, 1);
+ /* Wait for 100ms as Octeon resets. */
+ mdelay(100);
+ /* clear core domain reset bit */
+ OCTEP_PCI_WIN_WRITE(oct, CN93_RST_CORE_DOMAIN_W1C, 1);
+
+ return 0;
+}
+
+/* Re-initialize Octeon hardware registers */
+static void octep_reinit_regs_cn93_pf(struct octep_device *oct)
+{
+ u32 i;
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ oct->hw_ops.setup_iq_regs(oct, i);
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ oct->hw_ops.setup_oq_regs(oct, i);
+
+ oct->hw_ops.enable_interrupts(oct);
+ oct->hw_ops.enable_io_queues(oct);
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg);
+}
+
+/* Enable all interrupts */
+static void octep_enable_interrupts_cn93_pf(struct octep_device *oct)
+{
+ u64 intr_mask = 0ULL;
+ int srn, num_rings, i;
+
+ srn = CFG_GET_PORTS_PF_SRN(oct->conf);
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+
+ for (i = 0; i < num_rings; i++)
+ intr_mask |= (0x1ULL << (srn + i));
+
+ octep_write_csr64(oct, CN93_SDP_EPF_IRERR_RINT_ENA_W1S, intr_mask);
+ octep_write_csr64(oct, CN93_SDP_EPF_ORERR_RINT_ENA_W1S, intr_mask);
+ octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT_ENA_W1S, -1ULL);
+ octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT_ENA_W1S, intr_mask);
+ octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT_ENA_W1S, intr_mask);
+}
+
+/* Disable all interrupts */
+static void octep_disable_interrupts_cn93_pf(struct octep_device *oct)
+{
+ u64 intr_mask = 0ULL;
+ int srn, num_rings, i;
+
+ srn = CFG_GET_PORTS_PF_SRN(oct->conf);
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+
+ for (i = 0; i < num_rings; i++)
+ intr_mask |= (0x1ULL << (srn + i));
+
+ octep_write_csr64(oct, CN93_SDP_EPF_IRERR_RINT_ENA_W1C, intr_mask);
+ octep_write_csr64(oct, CN93_SDP_EPF_ORERR_RINT_ENA_W1C, intr_mask);
+ octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT_ENA_W1C, -1ULL);
+ octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT_ENA_W1C, intr_mask);
+ octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT_ENA_W1C, intr_mask);
+}
+
+/* Get new Octeon Read Index: index of descriptor that Octeon reads next. */
+static u32 octep_update_iq_read_index_cn93_pf(struct octep_iq *iq)
+{
+ u32 pkt_in_done = readl(iq->inst_cnt_reg);
+ u32 last_done, new_idx;
+
+ last_done = pkt_in_done - iq->pkt_in_done;
+ iq->pkt_in_done = pkt_in_done;
+
+ new_idx = (iq->octep_read_index + last_done) % iq->max_count;
+
+ return new_idx;
+}
+
+/* Enable a hardware Tx Queue */
+static void octep_enable_iq_cn93_pf(struct octep_device *oct, int iq_no)
+{
+ u64 loop = HZ;
+ u64 reg_val;
+
+ iq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+
+ octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(iq_no), 0xFFFFFFFF);
+
+ while (octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(iq_no)) &&
+ loop--) {
+ schedule_timeout_interruptible(1);
+ }
+
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(iq_no));
+ reg_val |= (0x1ULL << 62);
+ octep_write_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(iq_no), reg_val);
+
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_ENABLE(iq_no));
+ reg_val |= 0x1ULL;
+ octep_write_csr64(oct, CN93_SDP_R_IN_ENABLE(iq_no), reg_val);
+}
+
+/* Enable a hardware Rx Queue */
+static void octep_enable_oq_cn93_pf(struct octep_device *oct, int oq_no)
+{
+ u64 reg_val = 0ULL;
+
+ oq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(oq_no));
+ reg_val |= (0x1ULL << 62);
+ octep_write_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
+
+ octep_write_csr64(oct, CN93_SDP_R_OUT_SLIST_DBELL(oq_no), 0xFFFFFFFF);
+
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_ENABLE(oq_no));
+ reg_val |= 0x1ULL;
+ octep_write_csr64(oct, CN93_SDP_R_OUT_ENABLE(oq_no), reg_val);
+}
+
+/* Enable all hardware Tx/Rx Queues assined to PF */
+static void octep_enable_io_queues_cn93_pf(struct octep_device *oct)
+{
+ u8 q;
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ octep_enable_iq_cn93_pf(oct, q);
+ octep_enable_oq_cn93_pf(oct, q);
+ }
+}
+
+/* Disable a hardware Tx Queue assined to PF */
+static void octep_disable_iq_cn93_pf(struct octep_device *oct, int iq_no)
+{
+ u64 reg_val = 0ULL;
+
+ iq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_ENABLE(iq_no));
+ reg_val &= ~0x1ULL;
+ octep_write_csr64(oct, CN93_SDP_R_IN_ENABLE(iq_no), reg_val);
+}
+
+/* Disable a hardware Rx Queue assined to PF */
+static void octep_disable_oq_cn93_pf(struct octep_device *oct, int oq_no)
+{
+ u64 reg_val = 0ULL;
+
+ oq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_ENABLE(oq_no));
+ reg_val &= ~0x1ULL;
+ octep_write_csr64(oct, CN93_SDP_R_OUT_ENABLE(oq_no), reg_val);
+}
+
+/* Disable all hardware Tx/Rx Queues assined to PF */
+static void octep_disable_io_queues_cn93_pf(struct octep_device *oct)
+{
+ int q = 0;
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ octep_disable_iq_cn93_pf(oct, q);
+ octep_disable_oq_cn93_pf(oct, q);
+ }
+}
+
+/* Dump hardware registers (including Tx/Rx queues) for debugging. */
+static void octep_dump_registers_cn93_pf(struct octep_device *oct)
+{
+ u8 srn, num_rings, q;
+
+ srn = CFG_GET_PORTS_PF_SRN(oct->conf);
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+
+ for (q = srn; q < srn + num_rings; q++)
+ cn93_dump_regs(oct, q);
+}
+
+/**
+ * octep_device_setup_cn93_pf() - Setup Octeon device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * - initialize hardware operations.
+ * - get target side pcie port number for the device.
+ * - setup window access to hardware registers.
+ * - set initial configuration and max limits.
+ * - setup hardware mapping of rings to the PF device.
+ */
+void octep_device_setup_cn93_pf(struct octep_device *oct)
+{
+ oct->hw_ops.setup_iq_regs = octep_setup_iq_regs_cn93_pf;
+ oct->hw_ops.setup_oq_regs = octep_setup_oq_regs_cn93_pf;
+ oct->hw_ops.setup_mbox_regs = octep_setup_mbox_regs_cn93_pf;
+
+ oct->hw_ops.non_ioq_intr_handler = octep_non_ioq_intr_handler_cn93_pf;
+ oct->hw_ops.ioq_intr_handler = octep_ioq_intr_handler_cn93_pf;
+ oct->hw_ops.soft_reset = octep_soft_reset_cn93_pf;
+ oct->hw_ops.reinit_regs = octep_reinit_regs_cn93_pf;
+
+ oct->hw_ops.enable_interrupts = octep_enable_interrupts_cn93_pf;
+ oct->hw_ops.disable_interrupts = octep_disable_interrupts_cn93_pf;
+
+ oct->hw_ops.update_iq_read_idx = octep_update_iq_read_index_cn93_pf;
+
+ oct->hw_ops.enable_iq = octep_enable_iq_cn93_pf;
+ oct->hw_ops.enable_oq = octep_enable_oq_cn93_pf;
+ oct->hw_ops.enable_io_queues = octep_enable_io_queues_cn93_pf;
+
+ oct->hw_ops.disable_iq = octep_disable_iq_cn93_pf;
+ oct->hw_ops.disable_oq = octep_disable_oq_cn93_pf;
+ oct->hw_ops.disable_io_queues = octep_disable_io_queues_cn93_pf;
+ oct->hw_ops.reset_io_queues = octep_reset_io_queues_cn93_pf;
+
+ oct->hw_ops.dump_registers = octep_dump_registers_cn93_pf;
+
+ octep_setup_pci_window_regs_cn93_pf(oct);
+
+ oct->pcie_port = octep_read_csr64(oct, CN93_SDP_MAC_NUMBER) & 0xff;
+ dev_info(&oct->pdev->dev,
+ "Octeon device using PCIE Port %d\n", oct->pcie_port);
+
+ octep_init_config_cn93_pf(oct);
+ octep_configure_ring_mapping_cn93_pf(oct);
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_config.h b/drivers/net/ethernet/marvell/octeon_ep/octep_config.h
new file mode 100644
index 000000000000..f208f3f9a447
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_config.h
@@ -0,0 +1,204 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_CONFIG_H_
+#define _OCTEP_CONFIG_H_
+
+/* Tx instruction types by length */
+#define OCTEP_32BYTE_INSTR 32
+#define OCTEP_64BYTE_INSTR 64
+
+/* Tx Queue: maximum descriptors per ring */
+#define OCTEP_IQ_MAX_DESCRIPTORS 1024
+/* Minimum input (Tx) requests to be enqueued to ring doorbell */
+#define OCTEP_DB_MIN 1
+/* Packet threshold for Tx queue interrupt */
+#define OCTEP_IQ_INTR_THRESHOLD 0x0
+
+/* Rx Queue: maximum descriptors per ring */
+#define OCTEP_OQ_MAX_DESCRIPTORS 1024
+
+/* Rx buffer size: Use page size buffers.
+ * Build skb from allocated page buffer once the packet is received.
+ * When a gathered packet is received, make head page as skb head and
+ * page buffers in consecutive Rx descriptors as fragments.
+ */
+#define OCTEP_OQ_BUF_SIZE (SKB_WITH_OVERHEAD(PAGE_SIZE))
+#define OCTEP_OQ_PKTS_PER_INTR 128
+#define OCTEP_OQ_REFILL_THRESHOLD (OCTEP_OQ_MAX_DESCRIPTORS / 4)
+
+#define OCTEP_OQ_INTR_PKT_THRESHOLD 1
+#define OCTEP_OQ_INTR_TIME_THRESHOLD 10
+
+#define OCTEP_MSIX_NAME_SIZE (IFNAMSIZ + 32)
+
+/* Tx Queue wake threshold
+ * wakeup a stopped Tx queue if minimum 2 descriptors are available.
+ * Even a skb with fragments consume only one Tx queue descriptor entry.
+ */
+#define OCTEP_WAKE_QUEUE_THRESHOLD 2
+
+/* Minimum MTU supported by Octeon network interface */
+#define OCTEP_MIN_MTU ETH_MIN_MTU
+/* Maximum MTU supported by Octeon interface*/
+#define OCTEP_MAX_MTU (10000 - (ETH_HLEN + ETH_FCS_LEN))
+/* Default MTU */
+#define OCTEP_DEFAULT_MTU 1500
+
+/* Macros to get octeon config params */
+#define CFG_GET_IQ_CFG(cfg) ((cfg)->iq)
+#define CFG_GET_IQ_NUM_DESC(cfg) ((cfg)->iq.num_descs)
+#define CFG_GET_IQ_INSTR_TYPE(cfg) ((cfg)->iq.instr_type)
+#define CFG_GET_IQ_PKIND(cfg) ((cfg)->iq.pkind)
+#define CFG_GET_IQ_INSTR_SIZE(cfg) (64)
+#define CFG_GET_IQ_DB_MIN(cfg) ((cfg)->iq.db_min)
+#define CFG_GET_IQ_INTR_THRESHOLD(cfg) ((cfg)->iq.intr_threshold)
+
+#define CFG_GET_OQ_NUM_DESC(cfg) ((cfg)->oq.num_descs)
+#define CFG_GET_OQ_BUF_SIZE(cfg) ((cfg)->oq.buf_size)
+#define CFG_GET_OQ_REFILL_THRESHOLD(cfg) ((cfg)->oq.refill_threshold)
+#define CFG_GET_OQ_INTR_PKT(cfg) ((cfg)->oq.oq_intr_pkt)
+#define CFG_GET_OQ_INTR_TIME(cfg) ((cfg)->oq.oq_intr_time)
+
+#define CFG_GET_PORTS_MAX_IO_RINGS(cfg) ((cfg)->pf_ring_cfg.max_io_rings)
+#define CFG_GET_PORTS_ACTIVE_IO_RINGS(cfg) ((cfg)->pf_ring_cfg.active_io_rings)
+#define CFG_GET_PORTS_PF_SRN(cfg) ((cfg)->pf_ring_cfg.srn)
+
+#define CFG_GET_DPI_PKIND(cfg) ((cfg)->core_cfg.dpi_pkind)
+#define CFG_GET_CORE_TICS_PER_US(cfg) ((cfg)->core_cfg.core_tics_per_us)
+#define CFG_GET_COPROC_TICS_PER_US(cfg) ((cfg)->core_cfg.coproc_tics_per_us)
+
+#define CFG_GET_MAX_VFS(cfg) ((cfg)->sriov_cfg.max_vfs)
+#define CFG_GET_ACTIVE_VFS(cfg) ((cfg)->sriov_cfg.active_vfs)
+#define CFG_GET_MAX_RPVF(cfg) ((cfg)->sriov_cfg.max_rings_per_vf)
+#define CFG_GET_ACTIVE_RPVF(cfg) ((cfg)->sriov_cfg.active_rings_per_vf)
+#define CFG_GET_VF_SRN(cfg) ((cfg)->sriov_cfg.vf_srn)
+
+#define CFG_GET_IOQ_MSIX(cfg) ((cfg)->msix_cfg.ioq_msix)
+#define CFG_GET_NON_IOQ_MSIX(cfg) ((cfg)->msix_cfg.non_ioq_msix)
+#define CFG_GET_NON_IOQ_MSIX_NAMES(cfg) ((cfg)->msix_cfg.non_ioq_msix_names)
+
+#define CFG_GET_CTRL_MBOX_MEM_ADDR(cfg) ((cfg)->ctrl_mbox_cfg.barmem_addr)
+
+/* Hardware Tx Queue configuration. */
+struct octep_iq_config {
+ /* Size of the Input queue (number of commands) */
+ u16 num_descs;
+
+ /* Command size - 32 or 64 bytes */
+ u16 instr_type;
+
+ /* pkind for packets sent to Octeon */
+ u16 pkind;
+
+ /* Minimum number of commands pending to be posted to Octeon before driver
+ * hits the Input queue doorbell.
+ */
+ u16 db_min;
+
+ /* Trigger the IQ interrupt when processed cmd count reaches
+ * this level.
+ */
+ u32 intr_threshold;
+};
+
+/* Hardware Rx Queue configuration. */
+struct octep_oq_config {
+ /* Size of Output queue (number of descriptors) */
+ u16 num_descs;
+
+ /* Size of buffer in this Output queue. */
+ u16 buf_size;
+
+ /* The number of buffers that were consumed during packet processing
+ * by the driver on this Output queue before the driver attempts to
+ * replenish the descriptor ring with new buffers.
+ */
+ u16 refill_threshold;
+
+ /* Interrupt Coalescing (Packet Count). Octeon will interrupt the host
+ * only if it sent as many packets as specified by this field.
+ * The driver usually does not use packet count interrupt coalescing.
+ */
+ u32 oq_intr_pkt;
+
+ /* Interrupt Coalescing (Time Interval). Octeon will interrupt the host
+ * if at least one packet was sent in the time interval specified by
+ * this field. The driver uses time interval interrupt coalescing by
+ * default. The time is specified in microseconds.
+ */
+ u32 oq_intr_time;
+};
+
+/* Tx/Rx configuration */
+struct octep_pf_ring_config {
+ /* Max number of IOQs */
+ u16 max_io_rings;
+
+ /* Number of active IOQs */
+ u16 active_io_rings;
+
+ /* Starting IOQ number: this changes based on which PEM is used */
+ u16 srn;
+};
+
+/* Octeon Hardware SRIOV config */
+struct octep_sriov_config {
+ /* Max number of VF devices supported */
+ u16 max_vfs;
+
+ /* Number of VF devices enabled */
+ u16 active_vfs;
+
+ /* Max number of rings assigned to VF */
+ u8 max_rings_per_vf;
+
+ /* Number of rings enabled per VF */
+ u8 active_rings_per_vf;
+
+ /* starting ring number of VF's: ring-0 of VF-0 of the PF */
+ u16 vf_srn;
+};
+
+/* Octeon MSI-x config. */
+struct octep_msix_config {
+ /* Number of IOQ interrupts */
+ u16 ioq_msix;
+
+ /* Number of Non IOQ interrupts */
+ u16 non_ioq_msix;
+
+ /* Names of Non IOQ interrupts */
+ char **non_ioq_msix_names;
+};
+
+struct octep_ctrl_mbox_config {
+ /* Barmem address for control mbox */
+ void __iomem *barmem_addr;
+};
+
+/* Data Structure to hold configuration limits and active config */
+struct octep_config {
+ /* Input Queue attributes. */
+ struct octep_iq_config iq;
+
+ /* Output Queue attributes. */
+ struct octep_oq_config oq;
+
+ /* NIC Port Configuration */
+ struct octep_pf_ring_config pf_ring_cfg;
+
+ /* SRIOV configuration of the PF */
+ struct octep_sriov_config sriov_cfg;
+
+ /* MSI-X interrupt config */
+ struct octep_msix_config msix_cfg;
+
+ /* ctrl mbox config */
+ struct octep_ctrl_mbox_config ctrl_mbox_cfg;
+};
+#endif /* _OCTEP_CONFIG_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
new file mode 100644
index 000000000000..39322e4dd100
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
@@ -0,0 +1,245 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mutex.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/sched/signal.h>
+#include <linux/io.h>
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+
+#include "octep_ctrl_mbox.h"
+#include "octep_config.h"
+#include "octep_main.h"
+
+/* Timeout in msecs for message response */
+#define OCTEP_CTRL_MBOX_MSG_TIMEOUT_MS 100
+/* Time in msecs to wait for message response */
+#define OCTEP_CTRL_MBOX_MSG_WAIT_MS 10
+
+#define OCTEP_CTRL_MBOX_INFO_MAGIC_NUM_OFFSET(m) (m)
+#define OCTEP_CTRL_MBOX_INFO_BARMEM_SZ_OFFSET(m) ((m) + 8)
+#define OCTEP_CTRL_MBOX_INFO_HOST_STATUS_OFFSET(m) ((m) + 24)
+#define OCTEP_CTRL_MBOX_INFO_FW_STATUS_OFFSET(m) ((m) + 144)
+
+#define OCTEP_CTRL_MBOX_H2FQ_INFO_OFFSET(m) ((m) + OCTEP_CTRL_MBOX_INFO_SZ)
+#define OCTEP_CTRL_MBOX_H2FQ_PROD_OFFSET(m) (OCTEP_CTRL_MBOX_H2FQ_INFO_OFFSET(m))
+#define OCTEP_CTRL_MBOX_H2FQ_CONS_OFFSET(m) ((OCTEP_CTRL_MBOX_H2FQ_INFO_OFFSET(m)) + 4)
+#define OCTEP_CTRL_MBOX_H2FQ_ELEM_SZ_OFFSET(m) ((OCTEP_CTRL_MBOX_H2FQ_INFO_OFFSET(m)) + 8)
+#define OCTEP_CTRL_MBOX_H2FQ_ELEM_CNT_OFFSET(m) ((OCTEP_CTRL_MBOX_H2FQ_INFO_OFFSET(m)) + 12)
+
+#define OCTEP_CTRL_MBOX_F2HQ_INFO_OFFSET(m) ((m) + \
+ OCTEP_CTRL_MBOX_INFO_SZ + \
+ OCTEP_CTRL_MBOX_H2FQ_INFO_SZ)
+#define OCTEP_CTRL_MBOX_F2HQ_PROD_OFFSET(m) (OCTEP_CTRL_MBOX_F2HQ_INFO_OFFSET(m))
+#define OCTEP_CTRL_MBOX_F2HQ_CONS_OFFSET(m) ((OCTEP_CTRL_MBOX_F2HQ_INFO_OFFSET(m)) + 4)
+#define OCTEP_CTRL_MBOX_F2HQ_ELEM_SZ_OFFSET(m) ((OCTEP_CTRL_MBOX_F2HQ_INFO_OFFSET(m)) + 8)
+#define OCTEP_CTRL_MBOX_F2HQ_ELEM_CNT_OFFSET(m) ((OCTEP_CTRL_MBOX_F2HQ_INFO_OFFSET(m)) + 12)
+
+#define OCTEP_CTRL_MBOX_Q_OFFSET(m, i) ((m) + \
+ (sizeof(struct octep_ctrl_mbox_msg) * (i)))
+
+static u32 octep_ctrl_mbox_circq_inc(u32 index, u32 mask)
+{
+ return (index + 1) & mask;
+}
+
+static u32 octep_ctrl_mbox_circq_space(u32 pi, u32 ci, u32 mask)
+{
+ return mask - ((pi - ci) & mask);
+}
+
+static u32 octep_ctrl_mbox_circq_depth(u32 pi, u32 ci, u32 mask)
+{
+ return ((pi - ci) & mask);
+}
+
+int octep_ctrl_mbox_init(struct octep_ctrl_mbox *mbox)
+{
+ u64 magic_num, status;
+
+ if (!mbox)
+ return -EINVAL;
+
+ if (!mbox->barmem) {
+ pr_info("octep_ctrl_mbox : Invalid barmem %p\n", mbox->barmem);
+ return -EINVAL;
+ }
+
+ magic_num = readq(OCTEP_CTRL_MBOX_INFO_MAGIC_NUM_OFFSET(mbox->barmem));
+ if (magic_num != OCTEP_CTRL_MBOX_MAGIC_NUMBER) {
+ pr_info("octep_ctrl_mbox : Invalid magic number %llx\n", magic_num);
+ return -EINVAL;
+ }
+
+ status = readq(OCTEP_CTRL_MBOX_INFO_FW_STATUS_OFFSET(mbox->barmem));
+ if (status != OCTEP_CTRL_MBOX_STATUS_READY) {
+ pr_info("octep_ctrl_mbox : Firmware is not ready.\n");
+ return -EINVAL;
+ }
+
+ mbox->barmem_sz = readl(OCTEP_CTRL_MBOX_INFO_BARMEM_SZ_OFFSET(mbox->barmem));
+
+ writeq(OCTEP_CTRL_MBOX_STATUS_INIT, OCTEP_CTRL_MBOX_INFO_HOST_STATUS_OFFSET(mbox->barmem));
+
+ mbox->h2fq.elem_cnt = readl(OCTEP_CTRL_MBOX_H2FQ_ELEM_CNT_OFFSET(mbox->barmem));
+ mbox->h2fq.elem_sz = readl(OCTEP_CTRL_MBOX_H2FQ_ELEM_SZ_OFFSET(mbox->barmem));
+ mbox->h2fq.mask = (mbox->h2fq.elem_cnt - 1);
+ mutex_init(&mbox->h2fq_lock);
+
+ mbox->f2hq.elem_cnt = readl(OCTEP_CTRL_MBOX_F2HQ_ELEM_CNT_OFFSET(mbox->barmem));
+ mbox->f2hq.elem_sz = readl(OCTEP_CTRL_MBOX_F2HQ_ELEM_SZ_OFFSET(mbox->barmem));
+ mbox->f2hq.mask = (mbox->f2hq.elem_cnt - 1);
+ mutex_init(&mbox->f2hq_lock);
+
+ mbox->h2fq.hw_prod = OCTEP_CTRL_MBOX_H2FQ_PROD_OFFSET(mbox->barmem);
+ mbox->h2fq.hw_cons = OCTEP_CTRL_MBOX_H2FQ_CONS_OFFSET(mbox->barmem);
+ mbox->h2fq.hw_q = mbox->barmem +
+ OCTEP_CTRL_MBOX_INFO_SZ +
+ OCTEP_CTRL_MBOX_H2FQ_INFO_SZ +
+ OCTEP_CTRL_MBOX_F2HQ_INFO_SZ;
+
+ mbox->f2hq.hw_prod = OCTEP_CTRL_MBOX_F2HQ_PROD_OFFSET(mbox->barmem);
+ mbox->f2hq.hw_cons = OCTEP_CTRL_MBOX_F2HQ_CONS_OFFSET(mbox->barmem);
+ mbox->f2hq.hw_q = mbox->h2fq.hw_q +
+ ((mbox->h2fq.elem_sz + sizeof(union octep_ctrl_mbox_msg_hdr)) *
+ mbox->h2fq.elem_cnt);
+
+ /* ensure ready state is seen after everything is initialized */
+ wmb();
+ writeq(OCTEP_CTRL_MBOX_STATUS_READY, OCTEP_CTRL_MBOX_INFO_HOST_STATUS_OFFSET(mbox->barmem));
+
+ pr_info("Octep ctrl mbox : Init successful.\n");
+
+ return 0;
+}
+
+int octep_ctrl_mbox_send(struct octep_ctrl_mbox *mbox, struct octep_ctrl_mbox_msg *msg)
+{
+ unsigned long timeout = msecs_to_jiffies(OCTEP_CTRL_MBOX_MSG_TIMEOUT_MS);
+ unsigned long period = msecs_to_jiffies(OCTEP_CTRL_MBOX_MSG_WAIT_MS);
+ struct octep_ctrl_mbox_q *q;
+ unsigned long expire;
+ u64 *mbuf, *word0;
+ u8 __iomem *qidx;
+ u16 pi, ci;
+ int i;
+
+ if (!mbox || !msg)
+ return -EINVAL;
+
+ q = &mbox->h2fq;
+ pi = readl(q->hw_prod);
+ ci = readl(q->hw_cons);
+
+ if (!octep_ctrl_mbox_circq_space(pi, ci, q->mask))
+ return -ENOMEM;
+
+ qidx = OCTEP_CTRL_MBOX_Q_OFFSET(q->hw_q, pi);
+ mbuf = (u64 *)msg->msg;
+ word0 = &msg->hdr.word0;
+
+ mutex_lock(&mbox->h2fq_lock);
+ for (i = 1; i <= msg->hdr.sizew; i++)
+ writeq(*mbuf++, (qidx + (i * 8)));
+
+ writeq(*word0, qidx);
+
+ pi = octep_ctrl_mbox_circq_inc(pi, q->mask);
+ writel(pi, q->hw_prod);
+ mutex_unlock(&mbox->h2fq_lock);
+
+ /* don't check for notification response */
+ if (msg->hdr.flags & OCTEP_CTRL_MBOX_MSG_HDR_FLAG_NOTIFY)
+ return 0;
+
+ expire = jiffies + timeout;
+ while (true) {
+ *word0 = readq(qidx);
+ if (msg->hdr.flags == OCTEP_CTRL_MBOX_MSG_HDR_FLAG_RESP)
+ break;
+ schedule_timeout_interruptible(period);
+ if (signal_pending(current) || time_after(jiffies, expire)) {
+ pr_info("octep_ctrl_mbox: Timed out\n");
+ return -EBUSY;
+ }
+ }
+ mbuf = (u64 *)msg->msg;
+ for (i = 1; i <= msg->hdr.sizew; i++)
+ *mbuf++ = readq(qidx + (i * 8));
+
+ return 0;
+}
+
+int octep_ctrl_mbox_recv(struct octep_ctrl_mbox *mbox, struct octep_ctrl_mbox_msg *msg)
+{
+ struct octep_ctrl_mbox_q *q;
+ u32 count, pi, ci;
+ u8 __iomem *qidx;
+ u64 *mbuf;
+ int i;
+
+ if (!mbox || !msg)
+ return -EINVAL;
+
+ q = &mbox->f2hq;
+ pi = readl(q->hw_prod);
+ ci = readl(q->hw_cons);
+ count = octep_ctrl_mbox_circq_depth(pi, ci, q->mask);
+ if (!count)
+ return -EAGAIN;
+
+ qidx = OCTEP_CTRL_MBOX_Q_OFFSET(q->hw_q, ci);
+ mbuf = (u64 *)msg->msg;
+
+ mutex_lock(&mbox->f2hq_lock);
+
+ msg->hdr.word0 = readq(qidx);
+ for (i = 1; i <= msg->hdr.sizew; i++)
+ *mbuf++ = readq(qidx + (i * 8));
+
+ ci = octep_ctrl_mbox_circq_inc(ci, q->mask);
+ writel(ci, q->hw_cons);
+
+ mutex_unlock(&mbox->f2hq_lock);
+
+ if (msg->hdr.flags != OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ || !mbox->process_req)
+ return 0;
+
+ mbox->process_req(mbox->user_ctx, msg);
+ mbuf = (u64 *)msg->msg;
+ for (i = 1; i <= msg->hdr.sizew; i++)
+ writeq(*mbuf++, (qidx + (i * 8)));
+
+ writeq(msg->hdr.word0, qidx);
+
+ return 0;
+}
+
+int octep_ctrl_mbox_uninit(struct octep_ctrl_mbox *mbox)
+{
+ if (!mbox)
+ return -EINVAL;
+
+ writeq(OCTEP_CTRL_MBOX_STATUS_UNINIT,
+ OCTEP_CTRL_MBOX_INFO_HOST_STATUS_OFFSET(mbox->barmem));
+ /* ensure uninit state is written before uninitialization */
+ wmb();
+
+ mutex_destroy(&mbox->h2fq_lock);
+ mutex_destroy(&mbox->f2hq_lock);
+
+ writeq(OCTEP_CTRL_MBOX_STATUS_INVALID,
+ OCTEP_CTRL_MBOX_INFO_HOST_STATUS_OFFSET(mbox->barmem));
+
+ pr_info("Octep ctrl mbox : Uninit successful.\n");
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h
new file mode 100644
index 000000000000..2dc5753cfec6
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h
@@ -0,0 +1,170 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+ #ifndef __OCTEP_CTRL_MBOX_H__
+#define __OCTEP_CTRL_MBOX_H__
+
+/* barmem structure
+ * |===========================================|
+ * |Info (16 + 120 + 120 = 256 bytes) |
+ * |-------------------------------------------|
+ * |magic number (8 bytes) |
+ * |bar memory size (4 bytes) |
+ * |reserved (4 bytes) |
+ * |-------------------------------------------|
+ * |host version (8 bytes) |
+ * |host status (8 bytes) |
+ * |host reserved (104 bytes) |
+ * |-------------------------------------------|
+ * |fw version (8 bytes) |
+ * |fw status (8 bytes) |
+ * |fw reserved (104 bytes) |
+ * |===========================================|
+ * |Host to Fw Queue info (16 bytes) |
+ * |-------------------------------------------|
+ * |producer index (4 bytes) |
+ * |consumer index (4 bytes) |
+ * |element size (4 bytes) |
+ * |element count (4 bytes) |
+ * |===========================================|
+ * |Fw to Host Queue info (16 bytes) |
+ * |-------------------------------------------|
+ * |producer index (4 bytes) |
+ * |consumer index (4 bytes) |
+ * |element size (4 bytes) |
+ * |element count (4 bytes) |
+ * |===========================================|
+ * |Host to Fw Queue |
+ * |-------------------------------------------|
+ * |((elem_sz + hdr(8 bytes)) * elem_cnt) bytes|
+ * |===========================================|
+ * |===========================================|
+ * |Fw to Host Queue |
+ * |-------------------------------------------|
+ * |((elem_sz + hdr(8 bytes)) * elem_cnt) bytes|
+ * |===========================================|
+ */
+
+#define OCTEP_CTRL_MBOX_MAGIC_NUMBER 0xdeaddeadbeefbeefull
+
+/* Size of mbox info in bytes */
+#define OCTEP_CTRL_MBOX_INFO_SZ 256
+/* Size of mbox host to target queue info in bytes */
+#define OCTEP_CTRL_MBOX_H2FQ_INFO_SZ 16
+/* Size of mbox target to host queue info in bytes */
+#define OCTEP_CTRL_MBOX_F2HQ_INFO_SZ 16
+/* Size of mbox queue in bytes */
+#define OCTEP_CTRL_MBOX_Q_SZ(sz, cnt) (((sz) + 8) * (cnt))
+/* Size of mbox in bytes */
+#define OCTEP_CTRL_MBOX_SZ(hsz, hcnt, fsz, fcnt) (OCTEP_CTRL_MBOX_INFO_SZ + \
+ OCTEP_CTRL_MBOX_H2FQ_INFO_SZ + \
+ OCTEP_CTRL_MBOX_F2HQ_INFO_SZ + \
+ OCTEP_CTRL_MBOX_Q_SZ(hsz, hcnt) + \
+ OCTEP_CTRL_MBOX_Q_SZ(fsz, fcnt))
+
+/* Valid request message */
+#define OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ BIT(0)
+/* Valid response message */
+#define OCTEP_CTRL_MBOX_MSG_HDR_FLAG_RESP BIT(1)
+/* Valid notification, no response required */
+#define OCTEP_CTRL_MBOX_MSG_HDR_FLAG_NOTIFY BIT(2)
+
+enum octep_ctrl_mbox_status {
+ OCTEP_CTRL_MBOX_STATUS_INVALID = 0,
+ OCTEP_CTRL_MBOX_STATUS_INIT,
+ OCTEP_CTRL_MBOX_STATUS_READY,
+ OCTEP_CTRL_MBOX_STATUS_UNINIT
+};
+
+/* mbox message */
+union octep_ctrl_mbox_msg_hdr {
+ u64 word0;
+ struct {
+ /* OCTEP_CTRL_MBOX_MSG_HDR_FLAG_* */
+ u32 flags;
+ /* size of message in words excluding header */
+ u32 sizew;
+ };
+};
+
+/* mbox message */
+struct octep_ctrl_mbox_msg {
+ /* mbox transaction header */
+ union octep_ctrl_mbox_msg_hdr hdr;
+ /* pointer to message buffer */
+ void *msg;
+};
+
+/* Mbox queue */
+struct octep_ctrl_mbox_q {
+ /* q element size, should be aligned to unsigned long */
+ u16 elem_sz;
+ /* q element count, should be power of 2 */
+ u16 elem_cnt;
+ /* q mask */
+ u16 mask;
+ /* producer address in bar mem */
+ u8 __iomem *hw_prod;
+ /* consumer address in bar mem */
+ u8 __iomem *hw_cons;
+ /* q base address in bar mem */
+ u8 __iomem *hw_q;
+};
+
+struct octep_ctrl_mbox {
+ /* host driver version */
+ u64 version;
+ /* size of bar memory */
+ u32 barmem_sz;
+ /* pointer to BAR memory */
+ u8 __iomem *barmem;
+ /* user context for callback, can be null */
+ void *user_ctx;
+ /* callback handler for processing request, called from octep_ctrl_mbox_recv */
+ int (*process_req)(void *user_ctx, struct octep_ctrl_mbox_msg *msg);
+ /* host-to-fw queue */
+ struct octep_ctrl_mbox_q h2fq;
+ /* fw-to-host queue */
+ struct octep_ctrl_mbox_q f2hq;
+ /* lock for h2fq */
+ struct mutex h2fq_lock;
+ /* lock for f2hq */
+ struct mutex f2hq_lock;
+};
+
+/* Initialize control mbox.
+ *
+ * @param mbox: non-null pointer to struct octep_ctrl_mbox.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_ctrl_mbox_init(struct octep_ctrl_mbox *mbox);
+
+/* Send mbox message.
+ *
+ * @param mbox: non-null pointer to struct octep_ctrl_mbox.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_ctrl_mbox_send(struct octep_ctrl_mbox *mbox, struct octep_ctrl_mbox_msg *msg);
+
+/* Retrieve mbox message.
+ *
+ * @param mbox: non-null pointer to struct octep_ctrl_mbox.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_ctrl_mbox_recv(struct octep_ctrl_mbox *mbox, struct octep_ctrl_mbox_msg *msg);
+
+/* Uninitialize control mbox.
+ *
+ * @param ep: non-null pointer to struct octep_ctrl_mbox.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_ctrl_mbox_uninit(struct octep_ctrl_mbox *mbox);
+
+#endif /* __OCTEP_CTRL_MBOX_H__ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c
new file mode 100644
index 000000000000..7c00c896ab98
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/etherdevice.h>
+#include <linux/pci.h>
+
+#include "octep_config.h"
+#include "octep_main.h"
+#include "octep_ctrl_net.h"
+
+int octep_get_link_status(struct octep_device *oct)
+{
+ struct octep_ctrl_net_h2f_req req = {};
+ struct octep_ctrl_net_h2f_resp *resp;
+ struct octep_ctrl_mbox_msg msg = {};
+ int err;
+
+ req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_STATUS;
+ req.link.cmd = OCTEP_CTRL_NET_CMD_GET;
+
+ msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ;
+ msg.hdr.sizew = OCTEP_CTRL_NET_H2F_STATE_REQ_SZW;
+ msg.msg = &req;
+ err = octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg);
+ if (err)
+ return err;
+
+ resp = (struct octep_ctrl_net_h2f_resp *)&req;
+ return resp->link.state;
+}
+
+void octep_set_link_status(struct octep_device *oct, bool up)
+{
+ struct octep_ctrl_net_h2f_req req = {};
+ struct octep_ctrl_mbox_msg msg = {};
+
+ req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_STATUS;
+ req.link.cmd = OCTEP_CTRL_NET_CMD_SET;
+ req.link.state = (up) ? OCTEP_CTRL_NET_STATE_UP : OCTEP_CTRL_NET_STATE_DOWN;
+
+ msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ;
+ msg.hdr.sizew = OCTEP_CTRL_NET_H2F_STATE_REQ_SZW;
+ msg.msg = &req;
+ octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg);
+}
+
+void octep_set_rx_state(struct octep_device *oct, bool up)
+{
+ struct octep_ctrl_net_h2f_req req = {};
+ struct octep_ctrl_mbox_msg msg = {};
+
+ req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_RX_STATE;
+ req.link.cmd = OCTEP_CTRL_NET_CMD_SET;
+ req.link.state = (up) ? OCTEP_CTRL_NET_STATE_UP : OCTEP_CTRL_NET_STATE_DOWN;
+
+ msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ;
+ msg.hdr.sizew = OCTEP_CTRL_NET_H2F_STATE_REQ_SZW;
+ msg.msg = &req;
+ octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg);
+}
+
+int octep_get_mac_addr(struct octep_device *oct, u8 *addr)
+{
+ struct octep_ctrl_net_h2f_req req = {};
+ struct octep_ctrl_net_h2f_resp *resp;
+ struct octep_ctrl_mbox_msg msg = {};
+ int err;
+
+ req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_MAC;
+ req.link.cmd = OCTEP_CTRL_NET_CMD_GET;
+
+ msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ;
+ msg.hdr.sizew = OCTEP_CTRL_NET_H2F_MAC_REQ_SZW;
+ msg.msg = &req;
+ err = octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg);
+ if (err)
+ return err;
+
+ resp = (struct octep_ctrl_net_h2f_resp *)&req;
+ memcpy(addr, resp->mac.addr, ETH_ALEN);
+
+ return err;
+}
+
+int octep_set_mac_addr(struct octep_device *oct, u8 *addr)
+{
+ struct octep_ctrl_net_h2f_req req = {};
+ struct octep_ctrl_mbox_msg msg = {};
+
+ req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_MAC;
+ req.mac.cmd = OCTEP_CTRL_NET_CMD_SET;
+ memcpy(&req.mac.addr, addr, ETH_ALEN);
+
+ msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ;
+ msg.hdr.sizew = OCTEP_CTRL_NET_H2F_MAC_REQ_SZW;
+ msg.msg = &req;
+
+ return octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg);
+}
+
+int octep_set_mtu(struct octep_device *oct, int mtu)
+{
+ struct octep_ctrl_net_h2f_req req = {};
+ struct octep_ctrl_mbox_msg msg = {};
+
+ req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_MTU;
+ req.mtu.cmd = OCTEP_CTRL_NET_CMD_SET;
+ req.mtu.val = mtu;
+
+ msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ;
+ msg.hdr.sizew = OCTEP_CTRL_NET_H2F_MTU_REQ_SZW;
+ msg.msg = &req;
+
+ return octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg);
+}
+
+int octep_get_if_stats(struct octep_device *oct)
+{
+ void __iomem *iface_rx_stats;
+ void __iomem *iface_tx_stats;
+ struct octep_ctrl_net_h2f_req req = {};
+ struct octep_ctrl_mbox_msg msg = {};
+ int err;
+
+ req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_GET_IF_STATS;
+ req.mac.cmd = OCTEP_CTRL_NET_CMD_GET;
+ req.get_stats.offset = oct->ctrl_mbox_ifstats_offset;
+
+ msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ;
+ msg.hdr.sizew = OCTEP_CTRL_NET_H2F_GET_STATS_REQ_SZW;
+ msg.msg = &req;
+ err = octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg);
+ if (err)
+ return err;
+
+ iface_rx_stats = oct->ctrl_mbox.barmem + oct->ctrl_mbox_ifstats_offset;
+ iface_tx_stats = oct->ctrl_mbox.barmem + oct->ctrl_mbox_ifstats_offset +
+ sizeof(struct octep_iface_rx_stats);
+ memcpy_fromio(&oct->iface_rx_stats, iface_rx_stats, sizeof(struct octep_iface_rx_stats));
+ memcpy_fromio(&oct->iface_tx_stats, iface_tx_stats, sizeof(struct octep_iface_tx_stats));
+
+ return err;
+}
+
+int octep_get_link_info(struct octep_device *oct)
+{
+ struct octep_ctrl_net_h2f_req req = {};
+ struct octep_ctrl_net_h2f_resp *resp;
+ struct octep_ctrl_mbox_msg msg = {};
+ int err;
+
+ req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_INFO;
+ req.mac.cmd = OCTEP_CTRL_NET_CMD_GET;
+
+ msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ;
+ msg.hdr.sizew = OCTEP_CTRL_NET_H2F_LINK_INFO_REQ_SZW;
+ msg.msg = &req;
+ err = octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg);
+ if (err)
+ return err;
+
+ resp = (struct octep_ctrl_net_h2f_resp *)&req;
+ oct->link_info.supported_modes = resp->link_info.supported_modes;
+ oct->link_info.advertised_modes = resp->link_info.advertised_modes;
+ oct->link_info.autoneg = resp->link_info.autoneg;
+ oct->link_info.pause = resp->link_info.pause;
+ oct->link_info.speed = resp->link_info.speed;
+
+ return err;
+}
+
+int octep_set_link_info(struct octep_device *oct, struct octep_iface_link_info *link_info)
+{
+ struct octep_ctrl_net_h2f_req req = {};
+ struct octep_ctrl_mbox_msg msg = {};
+
+ req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_INFO;
+ req.link_info.cmd = OCTEP_CTRL_NET_CMD_SET;
+ req.link_info.info.advertised_modes = link_info->advertised_modes;
+ req.link_info.info.autoneg = link_info->autoneg;
+ req.link_info.info.pause = link_info->pause;
+ req.link_info.info.speed = link_info->speed;
+
+ msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ;
+ msg.hdr.sizew = OCTEP_CTRL_NET_H2F_LINK_INFO_REQ_SZW;
+ msg.msg = &req;
+
+ return octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg);
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h
new file mode 100644
index 000000000000..f23b58381322
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h
@@ -0,0 +1,299 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+#ifndef __OCTEP_CTRL_NET_H__
+#define __OCTEP_CTRL_NET_H__
+
+/* Supported commands */
+enum octep_ctrl_net_cmd {
+ OCTEP_CTRL_NET_CMD_GET = 0,
+ OCTEP_CTRL_NET_CMD_SET,
+};
+
+/* Supported states */
+enum octep_ctrl_net_state {
+ OCTEP_CTRL_NET_STATE_DOWN = 0,
+ OCTEP_CTRL_NET_STATE_UP,
+};
+
+/* Supported replies */
+enum octep_ctrl_net_reply {
+ OCTEP_CTRL_NET_REPLY_OK = 0,
+ OCTEP_CTRL_NET_REPLY_GENERIC_FAIL,
+ OCTEP_CTRL_NET_REPLY_INVALID_PARAM,
+};
+
+/* Supported host to fw commands */
+enum octep_ctrl_net_h2f_cmd {
+ OCTEP_CTRL_NET_H2F_CMD_INVALID = 0,
+ OCTEP_CTRL_NET_H2F_CMD_MTU,
+ OCTEP_CTRL_NET_H2F_CMD_MAC,
+ OCTEP_CTRL_NET_H2F_CMD_GET_IF_STATS,
+ OCTEP_CTRL_NET_H2F_CMD_GET_XSTATS,
+ OCTEP_CTRL_NET_H2F_CMD_GET_Q_STATS,
+ OCTEP_CTRL_NET_H2F_CMD_LINK_STATUS,
+ OCTEP_CTRL_NET_H2F_CMD_RX_STATE,
+ OCTEP_CTRL_NET_H2F_CMD_LINK_INFO,
+};
+
+/* Supported fw to host commands */
+enum octep_ctrl_net_f2h_cmd {
+ OCTEP_CTRL_NET_F2H_CMD_INVALID = 0,
+ OCTEP_CTRL_NET_F2H_CMD_LINK_STATUS,
+};
+
+struct octep_ctrl_net_req_hdr {
+ /* sender id */
+ u16 sender;
+ /* receiver id */
+ u16 receiver;
+ /* octep_ctrl_net_h2t_cmd */
+ u16 cmd;
+ /* reserved */
+ u16 rsvd0;
+};
+
+/* get/set mtu request */
+struct octep_ctrl_net_h2f_req_cmd_mtu {
+ /* enum octep_ctrl_net_cmd */
+ u16 cmd;
+ /* 0-65535 */
+ u16 val;
+};
+
+/* get/set mac request */
+struct octep_ctrl_net_h2f_req_cmd_mac {
+ /* enum octep_ctrl_net_cmd */
+ u16 cmd;
+ /* xx:xx:xx:xx:xx:xx */
+ u8 addr[ETH_ALEN];
+};
+
+/* get if_stats, xstats, q_stats request */
+struct octep_ctrl_net_h2f_req_cmd_get_stats {
+ /* offset into barmem where fw should copy over stats */
+ u32 offset;
+};
+
+/* get/set link state, rx state */
+struct octep_ctrl_net_h2f_req_cmd_state {
+ /* enum octep_ctrl_net_cmd */
+ u16 cmd;
+ /* enum octep_ctrl_net_state */
+ u16 state;
+};
+
+/* link info */
+struct octep_ctrl_net_link_info {
+ /* Bitmap of Supported link speeds/modes */
+ u64 supported_modes;
+ /* Bitmap of Advertised link speeds/modes */
+ u64 advertised_modes;
+ /* Autonegotation state; bit 0=disabled; bit 1=enabled */
+ u8 autoneg;
+ /* Pause frames setting. bit 0=disabled; bit 1=enabled */
+ u8 pause;
+ /* Negotiated link speed in Mbps */
+ u32 speed;
+};
+
+/* get/set link info */
+struct octep_ctrl_net_h2f_req_cmd_link_info {
+ /* enum octep_ctrl_net_cmd */
+ u16 cmd;
+ /* struct octep_ctrl_net_link_info */
+ struct octep_ctrl_net_link_info info;
+};
+
+/* Host to fw request data */
+struct octep_ctrl_net_h2f_req {
+ struct octep_ctrl_net_req_hdr hdr;
+ union {
+ struct octep_ctrl_net_h2f_req_cmd_mtu mtu;
+ struct octep_ctrl_net_h2f_req_cmd_mac mac;
+ struct octep_ctrl_net_h2f_req_cmd_get_stats get_stats;
+ struct octep_ctrl_net_h2f_req_cmd_state link;
+ struct octep_ctrl_net_h2f_req_cmd_state rx;
+ struct octep_ctrl_net_h2f_req_cmd_link_info link_info;
+ };
+} __packed;
+
+struct octep_ctrl_net_resp_hdr {
+ /* sender id */
+ u16 sender;
+ /* receiver id */
+ u16 receiver;
+ /* octep_ctrl_net_h2t_cmd */
+ u16 cmd;
+ /* octep_ctrl_net_reply */
+ u16 reply;
+};
+
+/* get mtu response */
+struct octep_ctrl_net_h2f_resp_cmd_mtu {
+ /* 0-65535 */
+ u16 val;
+};
+
+/* get mac response */
+struct octep_ctrl_net_h2f_resp_cmd_mac {
+ /* xx:xx:xx:xx:xx:xx */
+ u8 addr[ETH_ALEN];
+};
+
+/* get link state, rx state response */
+struct octep_ctrl_net_h2f_resp_cmd_state {
+ /* enum octep_ctrl_net_state */
+ u16 state;
+};
+
+/* Host to fw response data */
+struct octep_ctrl_net_h2f_resp {
+ struct octep_ctrl_net_resp_hdr hdr;
+ union {
+ struct octep_ctrl_net_h2f_resp_cmd_mtu mtu;
+ struct octep_ctrl_net_h2f_resp_cmd_mac mac;
+ struct octep_ctrl_net_h2f_resp_cmd_state link;
+ struct octep_ctrl_net_h2f_resp_cmd_state rx;
+ struct octep_ctrl_net_link_info link_info;
+ };
+} __packed;
+
+/* link state notofication */
+struct octep_ctrl_net_f2h_req_cmd_state {
+ /* enum octep_ctrl_net_state */
+ u16 state;
+};
+
+/* Fw to host request data */
+struct octep_ctrl_net_f2h_req {
+ struct octep_ctrl_net_req_hdr hdr;
+ union {
+ struct octep_ctrl_net_f2h_req_cmd_state link;
+ };
+};
+
+/* Fw to host response data */
+struct octep_ctrl_net_f2h_resp {
+ struct octep_ctrl_net_resp_hdr hdr;
+};
+
+/* Size of host to fw octep_ctrl_mbox queue element */
+union octep_ctrl_net_h2f_data_sz {
+ struct octep_ctrl_net_h2f_req h2f_req;
+ struct octep_ctrl_net_h2f_resp h2f_resp;
+};
+
+/* Size of fw to host octep_ctrl_mbox queue element */
+union octep_ctrl_net_f2h_data_sz {
+ struct octep_ctrl_net_f2h_req f2h_req;
+ struct octep_ctrl_net_f2h_resp f2h_resp;
+};
+
+/* size of host to fw data in words */
+#define OCTEP_CTRL_NET_H2F_DATA_SZW ((sizeof(union octep_ctrl_net_h2f_data_sz)) / \
+ (sizeof(unsigned long)))
+
+/* size of fw to host data in words */
+#define OCTEP_CTRL_NET_F2H_DATA_SZW ((sizeof(union octep_ctrl_net_f2h_data_sz)) / \
+ (sizeof(unsigned long)))
+
+/* size in words of get/set mtu request */
+#define OCTEP_CTRL_NET_H2F_MTU_REQ_SZW 2
+/* size in words of get/set mac request */
+#define OCTEP_CTRL_NET_H2F_MAC_REQ_SZW 2
+/* size in words of get stats request */
+#define OCTEP_CTRL_NET_H2F_GET_STATS_REQ_SZW 2
+/* size in words of get/set state request */
+#define OCTEP_CTRL_NET_H2F_STATE_REQ_SZW 2
+/* size in words of get/set link info request */
+#define OCTEP_CTRL_NET_H2F_LINK_INFO_REQ_SZW 4
+
+/* size in words of get mtu response */
+#define OCTEP_CTRL_NET_H2F_GET_MTU_RESP_SZW 2
+/* size in words of set mtu response */
+#define OCTEP_CTRL_NET_H2F_SET_MTU_RESP_SZW 1
+/* size in words of get mac response */
+#define OCTEP_CTRL_NET_H2F_GET_MAC_RESP_SZW 2
+/* size in words of set mac response */
+#define OCTEP_CTRL_NET_H2F_SET_MAC_RESP_SZW 1
+/* size in words of get state request */
+#define OCTEP_CTRL_NET_H2F_GET_STATE_RESP_SZW 2
+/* size in words of set state request */
+#define OCTEP_CTRL_NET_H2F_SET_STATE_RESP_SZW 1
+/* size in words of get link info request */
+#define OCTEP_CTRL_NET_H2F_GET_LINK_INFO_RESP_SZW 4
+/* size in words of set link info request */
+#define OCTEP_CTRL_NET_H2F_SET_LINK_INFO_RESP_SZW 1
+
+/** Get link status from firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ *
+ * return value: link status 0=down, 1=up.
+ */
+int octep_get_link_status(struct octep_device *oct);
+
+/** Set link status in firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ * @param up: boolean status.
+ */
+void octep_set_link_status(struct octep_device *oct, bool up);
+
+/** Set rx state in firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ * @param up: boolean status.
+ */
+void octep_set_rx_state(struct octep_device *oct, bool up);
+
+/** Get mac address from firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ * @param addr: non-null pointer to mac address.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_get_mac_addr(struct octep_device *oct, u8 *addr);
+
+/** Set mac address in firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ * @param addr: non-null pointer to mac address.
+ */
+int octep_set_mac_addr(struct octep_device *oct, u8 *addr);
+
+/** Set mtu in firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ * @param mtu: mtu.
+ */
+int octep_set_mtu(struct octep_device *oct, int mtu);
+
+/** Get interface statistics from firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_get_if_stats(struct octep_device *oct);
+
+/** Get link info from firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_get_link_info(struct octep_device *oct);
+
+/** Set link info in firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ */
+int octep_set_link_info(struct octep_device *oct, struct octep_iface_link_info *link_info);
+
+#endif /* __OCTEP_CTRL_NET_H__ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
new file mode 100644
index 000000000000..87ef129b269a
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
@@ -0,0 +1,463 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+
+#include "octep_config.h"
+#include "octep_main.h"
+#include "octep_ctrl_net.h"
+
+static const char octep_gstrings_global_stats[][ETH_GSTRING_LEN] = {
+ "rx_packets",
+ "tx_packets",
+ "rx_bytes",
+ "tx_bytes",
+ "rx_alloc_errors",
+ "tx_busy_errors",
+ "rx_dropped",
+ "tx_dropped",
+ "tx_hw_pkts",
+ "tx_hw_octs",
+ "tx_hw_bcast",
+ "tx_hw_mcast",
+ "tx_hw_underflow",
+ "tx_hw_control",
+ "tx_less_than_64",
+ "tx_equal_64",
+ "tx_equal_65_to_127",
+ "tx_equal_128_to_255",
+ "tx_equal_256_to_511",
+ "tx_equal_512_to_1023",
+ "tx_equal_1024_to_1518",
+ "tx_greater_than_1518",
+ "rx_hw_pkts",
+ "rx_hw_bytes",
+ "rx_hw_bcast",
+ "rx_hw_mcast",
+ "rx_pause_pkts",
+ "rx_pause_bytes",
+ "rx_dropped_pkts_fifo_full",
+ "rx_dropped_bytes_fifo_full",
+ "rx_err_pkts",
+};
+
+#define OCTEP_GLOBAL_STATS_CNT (sizeof(octep_gstrings_global_stats) / ETH_GSTRING_LEN)
+
+static const char octep_gstrings_tx_q_stats[][ETH_GSTRING_LEN] = {
+ "tx_packets_posted[Q-%u]",
+ "tx_packets_completed[Q-%u]",
+ "tx_bytes[Q-%u]",
+ "tx_busy[Q-%u]",
+};
+
+#define OCTEP_TX_Q_STATS_CNT (sizeof(octep_gstrings_tx_q_stats) / ETH_GSTRING_LEN)
+
+static const char octep_gstrings_rx_q_stats[][ETH_GSTRING_LEN] = {
+ "rx_packets[Q-%u]",
+ "rx_bytes[Q-%u]",
+ "rx_alloc_errors[Q-%u]",
+};
+
+#define OCTEP_RX_Q_STATS_CNT (sizeof(octep_gstrings_rx_q_stats) / ETH_GSTRING_LEN)
+
+static void octep_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+
+ strscpy(info->driver, OCTEP_DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(oct->pdev), sizeof(info->bus_info));
+}
+
+static void octep_get_strings(struct net_device *netdev,
+ u32 stringset, u8 *data)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ u16 num_queues = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+ char *strings = (char *)data;
+ int i, j;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < OCTEP_GLOBAL_STATS_CNT; i++) {
+ snprintf(strings, ETH_GSTRING_LEN,
+ octep_gstrings_global_stats[i]);
+ strings += ETH_GSTRING_LEN;
+ }
+
+ for (i = 0; i < num_queues; i++) {
+ for (j = 0; j < OCTEP_TX_Q_STATS_CNT; j++) {
+ snprintf(strings, ETH_GSTRING_LEN,
+ octep_gstrings_tx_q_stats[j], i);
+ strings += ETH_GSTRING_LEN;
+ }
+ }
+
+ for (i = 0; i < num_queues; i++) {
+ for (j = 0; j < OCTEP_RX_Q_STATS_CNT; j++) {
+ snprintf(strings, ETH_GSTRING_LEN,
+ octep_gstrings_rx_q_stats[j], i);
+ strings += ETH_GSTRING_LEN;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static int octep_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ u16 num_queues = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ return OCTEP_GLOBAL_STATS_CNT + (num_queues *
+ (OCTEP_TX_Q_STATS_CNT + OCTEP_RX_Q_STATS_CNT));
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void
+octep_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ struct octep_iface_tx_stats *iface_tx_stats;
+ struct octep_iface_rx_stats *iface_rx_stats;
+ u64 rx_packets, rx_bytes;
+ u64 tx_packets, tx_bytes;
+ u64 rx_alloc_errors, tx_busy_errors;
+ int q, i;
+
+ rx_packets = 0;
+ rx_bytes = 0;
+ tx_packets = 0;
+ tx_bytes = 0;
+ rx_alloc_errors = 0;
+ tx_busy_errors = 0;
+ tx_packets = 0;
+ tx_bytes = 0;
+ rx_packets = 0;
+ rx_bytes = 0;
+
+ octep_get_if_stats(oct);
+ iface_tx_stats = &oct->iface_tx_stats;
+ iface_rx_stats = &oct->iface_rx_stats;
+
+ for (q = 0; q < oct->num_oqs; q++) {
+ struct octep_iq *iq = oct->iq[q];
+ struct octep_oq *oq = oct->oq[q];
+
+ tx_packets += iq->stats.instr_completed;
+ tx_bytes += iq->stats.bytes_sent;
+ tx_busy_errors += iq->stats.tx_busy;
+
+ rx_packets += oq->stats.packets;
+ rx_bytes += oq->stats.bytes;
+ rx_alloc_errors += oq->stats.alloc_failures;
+ }
+ i = 0;
+ data[i++] = rx_packets;
+ data[i++] = tx_packets;
+ data[i++] = rx_bytes;
+ data[i++] = tx_bytes;
+ data[i++] = rx_alloc_errors;
+ data[i++] = tx_busy_errors;
+ data[i++] = iface_rx_stats->dropped_pkts_fifo_full +
+ iface_rx_stats->err_pkts;
+ data[i++] = iface_tx_stats->xscol +
+ iface_tx_stats->xsdef;
+ data[i++] = iface_tx_stats->pkts;
+ data[i++] = iface_tx_stats->octs;
+ data[i++] = iface_tx_stats->bcst;
+ data[i++] = iface_tx_stats->mcst;
+ data[i++] = iface_tx_stats->undflw;
+ data[i++] = iface_tx_stats->ctl;
+ data[i++] = iface_tx_stats->hist_lt64;
+ data[i++] = iface_tx_stats->hist_eq64;
+ data[i++] = iface_tx_stats->hist_65to127;
+ data[i++] = iface_tx_stats->hist_128to255;
+ data[i++] = iface_tx_stats->hist_256to511;
+ data[i++] = iface_tx_stats->hist_512to1023;
+ data[i++] = iface_tx_stats->hist_1024to1518;
+ data[i++] = iface_tx_stats->hist_gt1518;
+ data[i++] = iface_rx_stats->pkts;
+ data[i++] = iface_rx_stats->octets;
+ data[i++] = iface_rx_stats->mcast_pkts;
+ data[i++] = iface_rx_stats->bcast_pkts;
+ data[i++] = iface_rx_stats->pause_pkts;
+ data[i++] = iface_rx_stats->pause_octets;
+ data[i++] = iface_rx_stats->dropped_pkts_fifo_full;
+ data[i++] = iface_rx_stats->dropped_octets_fifo_full;
+ data[i++] = iface_rx_stats->err_pkts;
+
+ /* Per Tx Queue stats */
+ for (q = 0; q < oct->num_iqs; q++) {
+ struct octep_iq *iq = oct->iq[q];
+
+ data[i++] = iq->stats.instr_posted;
+ data[i++] = iq->stats.instr_completed;
+ data[i++] = iq->stats.bytes_sent;
+ data[i++] = iq->stats.tx_busy;
+ }
+
+ /* Per Rx Queue stats */
+ for (q = 0; q < oct->num_oqs; q++) {
+ struct octep_oq *oq = oct->oq[q];
+
+ data[i++] = oq->stats.packets;
+ data[i++] = oq->stats.bytes;
+ data[i++] = oq->stats.alloc_failures;
+ }
+}
+
+#define OCTEP_SET_ETHTOOL_LINK_MODES_BITMAP(octep_speeds, ksettings, name) \
+{ \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_T)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseT_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_R)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseR_FEC); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_CR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseCR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_KR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseKR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_LR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseLR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_SR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseSR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_25GBASE_CR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseCR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_25GBASE_KR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseKR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_25GBASE_SR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseSR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_40GBASE_CR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseCR4_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_40GBASE_KR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseKR4_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_40GBASE_LR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseLR4_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_40GBASE_SR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseSR4_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_CR2)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseCR2_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_KR2)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseKR2_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_SR2)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseSR2_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_CR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseCR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_KR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseKR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_LR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseLR_ER_FR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_SR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseSR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_100GBASE_CR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseCR4_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_100GBASE_KR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseKR4_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_100GBASE_LR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseLR4_ER4_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_100GBASE_SR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseSR4_Full); \
+}
+
+static int octep_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ struct octep_iface_link_info *link_info;
+ u32 advertised_modes, supported_modes;
+
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+
+ octep_get_link_info(oct);
+
+ advertised_modes = oct->link_info.advertised_modes;
+ supported_modes = oct->link_info.supported_modes;
+ link_info = &oct->link_info;
+
+ OCTEP_SET_ETHTOOL_LINK_MODES_BITMAP(supported_modes, cmd, supported);
+ OCTEP_SET_ETHTOOL_LINK_MODES_BITMAP(advertised_modes, cmd, advertising);
+
+ if (link_info->autoneg) {
+ if (link_info->autoneg & OCTEP_LINK_MODE_AUTONEG_SUPPORTED)
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
+ if (link_info->autoneg & OCTEP_LINK_MODE_AUTONEG_ADVERTISED) {
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
+ cmd->base.autoneg = AUTONEG_ENABLE;
+ } else {
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ }
+ } else {
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ }
+
+ if (link_info->pause) {
+ if (link_info->pause & OCTEP_LINK_MODE_PAUSE_SUPPORTED)
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
+ if (link_info->pause & OCTEP_LINK_MODE_PAUSE_ADVERTISED)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
+ }
+
+ cmd->base.port = PORT_FIBRE;
+ ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
+
+ if (netif_carrier_ok(netdev)) {
+ cmd->base.speed = link_info->speed;
+ cmd->base.duplex = DUPLEX_FULL;
+ } else {
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ }
+ return 0;
+}
+
+static int octep_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ struct octep_iface_link_info link_info_new;
+ struct octep_iface_link_info *link_info;
+ u64 advertised = 0;
+ u8 autoneg = 0;
+ int err;
+
+ link_info = &oct->link_info;
+ memcpy(&link_info_new, link_info, sizeof(struct octep_iface_link_info));
+
+ /* Only Full duplex is supported;
+ * Assume full duplex when duplex is unknown.
+ */
+ if (cmd->base.duplex != DUPLEX_FULL &&
+ cmd->base.duplex != DUPLEX_UNKNOWN)
+ return -EOPNOTSUPP;
+
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
+ if (!(link_info->autoneg & OCTEP_LINK_MODE_AUTONEG_SUPPORTED))
+ return -EOPNOTSUPP;
+ autoneg = 1;
+ }
+
+ if (!bitmap_subset(cmd->link_modes.advertising,
+ cmd->link_modes.supported,
+ __ETHTOOL_LINK_MODE_MASK_NBITS))
+ return -EINVAL;
+
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 10000baseT_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_10GBASE_T);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 10000baseR_FEC))
+ advertised |= BIT(OCTEP_LINK_MODE_10GBASE_R);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 10000baseCR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_10GBASE_CR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 10000baseKR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_10GBASE_KR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 10000baseLR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_10GBASE_LR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 10000baseSR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_10GBASE_SR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 25000baseCR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_25GBASE_CR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 25000baseKR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_25GBASE_KR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 25000baseSR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_25GBASE_SR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 40000baseCR4_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_40GBASE_CR4);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 40000baseKR4_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_40GBASE_KR4);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 40000baseLR4_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_40GBASE_LR4);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 40000baseSR4_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_40GBASE_SR4);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 50000baseCR2_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_50GBASE_CR2);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 50000baseKR2_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_50GBASE_KR2);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 50000baseSR2_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_50GBASE_SR2);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 50000baseCR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_50GBASE_CR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 50000baseKR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_50GBASE_KR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 50000baseLR_ER_FR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_50GBASE_LR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 50000baseSR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_50GBASE_SR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 100000baseCR4_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_100GBASE_CR4);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 100000baseKR4_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_100GBASE_KR4);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 100000baseLR4_ER4_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_100GBASE_LR4);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 100000baseSR4_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_100GBASE_SR4);
+
+ if (advertised == link_info->advertised_modes &&
+ cmd->base.speed == link_info->speed &&
+ cmd->base.autoneg == link_info->autoneg)
+ return 0;
+
+ link_info_new.advertised_modes = advertised;
+ link_info_new.speed = cmd->base.speed;
+ link_info_new.autoneg = autoneg;
+
+ err = octep_set_link_info(oct, &link_info_new);
+ if (err)
+ return err;
+
+ memcpy(link_info, &link_info_new, sizeof(struct octep_iface_link_info));
+ return 0;
+}
+
+static const struct ethtool_ops octep_ethtool_ops = {
+ .get_drvinfo = octep_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_strings = octep_get_strings,
+ .get_sset_count = octep_get_sset_count,
+ .get_ethtool_stats = octep_get_ethtool_stats,
+ .get_link_ksettings = octep_get_link_ksettings,
+ .set_link_ksettings = octep_set_link_ksettings,
+};
+
+void octep_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &octep_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
new file mode 100644
index 000000000000..e020c81f3455
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
@@ -0,0 +1,1176 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/vmalloc.h>
+
+#include "octep_config.h"
+#include "octep_main.h"
+#include "octep_ctrl_net.h"
+
+struct workqueue_struct *octep_wq;
+
+/* Supported Devices */
+static const struct pci_device_id octep_pci_id_tbl[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN93_PF)},
+ {0, },
+};
+MODULE_DEVICE_TABLE(pci, octep_pci_id_tbl);
+
+MODULE_AUTHOR("Veerasenareddy Burru <vburru@marvell.com>");
+MODULE_DESCRIPTION(OCTEP_DRV_STRING);
+MODULE_LICENSE("GPL");
+
+/**
+ * octep_alloc_ioq_vectors() - Allocate Tx/Rx Queue interrupt info.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Allocate resources to hold per Tx/Rx queue interrupt info.
+ * This is the information passed to interrupt handler, from which napi poll
+ * is scheduled and includes quick access to private data of Tx/Rx queue
+ * corresponding to the interrupt being handled.
+ *
+ * Return: 0, on successful allocation of resources for all queue interrupts.
+ * -1, if failed to allocate any resource.
+ */
+static int octep_alloc_ioq_vectors(struct octep_device *oct)
+{
+ int i;
+ struct octep_ioq_vector *ioq_vector;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ oct->ioq_vector[i] = vzalloc(sizeof(*oct->ioq_vector[i]));
+ if (!oct->ioq_vector[i])
+ goto free_ioq_vector;
+
+ ioq_vector = oct->ioq_vector[i];
+ ioq_vector->iq = oct->iq[i];
+ ioq_vector->oq = oct->oq[i];
+ ioq_vector->octep_dev = oct;
+ }
+
+ dev_info(&oct->pdev->dev, "Allocated %d IOQ vectors\n", oct->num_oqs);
+ return 0;
+
+free_ioq_vector:
+ while (i) {
+ i--;
+ vfree(oct->ioq_vector[i]);
+ oct->ioq_vector[i] = NULL;
+ }
+ return -1;
+}
+
+/**
+ * octep_free_ioq_vectors() - Free Tx/Rx Queue interrupt vector info.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_free_ioq_vectors(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ if (oct->ioq_vector[i]) {
+ vfree(oct->ioq_vector[i]);
+ oct->ioq_vector[i] = NULL;
+ }
+ }
+ netdev_info(oct->netdev, "Freed IOQ Vectors\n");
+}
+
+/**
+ * octep_enable_msix_range() - enable MSI-x interrupts.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Allocate and enable all MSI-x interrupts (queue and non-queue interrupts)
+ * for the Octeon device.
+ *
+ * Return: 0, on successfully enabling all MSI-x interrupts.
+ * -1, if failed to enable any MSI-x interrupt.
+ */
+static int octep_enable_msix_range(struct octep_device *oct)
+{
+ int num_msix, msix_allocated;
+ int i;
+
+ /* Generic interrupts apart from input/output queues */
+ num_msix = oct->num_oqs + CFG_GET_NON_IOQ_MSIX(oct->conf);
+ oct->msix_entries = kcalloc(num_msix,
+ sizeof(struct msix_entry), GFP_KERNEL);
+ if (!oct->msix_entries)
+ goto msix_alloc_err;
+
+ for (i = 0; i < num_msix; i++)
+ oct->msix_entries[i].entry = i;
+
+ msix_allocated = pci_enable_msix_range(oct->pdev, oct->msix_entries,
+ num_msix, num_msix);
+ if (msix_allocated != num_msix) {
+ dev_err(&oct->pdev->dev,
+ "Failed to enable %d msix irqs; got only %d\n",
+ num_msix, msix_allocated);
+ goto enable_msix_err;
+ }
+ oct->num_irqs = msix_allocated;
+ dev_info(&oct->pdev->dev, "MSI-X enabled successfully\n");
+
+ return 0;
+
+enable_msix_err:
+ if (msix_allocated > 0)
+ pci_disable_msix(oct->pdev);
+ kfree(oct->msix_entries);
+ oct->msix_entries = NULL;
+msix_alloc_err:
+ return -1;
+}
+
+/**
+ * octep_disable_msix() - disable MSI-x interrupts.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Disable MSI-x on the Octeon device.
+ */
+static void octep_disable_msix(struct octep_device *oct)
+{
+ pci_disable_msix(oct->pdev);
+ kfree(oct->msix_entries);
+ oct->msix_entries = NULL;
+ dev_info(&oct->pdev->dev, "Disabled MSI-X\n");
+}
+
+/**
+ * octep_non_ioq_intr_handler() - common handler for all generic interrupts.
+ *
+ * @irq: Interrupt number.
+ * @data: interrupt data.
+ *
+ * this is common handler for all non-queue (generic) interrupts.
+ */
+static irqreturn_t octep_non_ioq_intr_handler(int irq, void *data)
+{
+ struct octep_device *oct = data;
+
+ return oct->hw_ops.non_ioq_intr_handler(oct);
+}
+
+/**
+ * octep_ioq_intr_handler() - handler for all Tx/Rx queue interrupts.
+ *
+ * @irq: Interrupt number.
+ * @data: interrupt data contains pointers to Tx/Rx queue private data
+ * and correspong NAPI context.
+ *
+ * this is common handler for all non-queue (generic) interrupts.
+ */
+static irqreturn_t octep_ioq_intr_handler(int irq, void *data)
+{
+ struct octep_ioq_vector *ioq_vector = data;
+ struct octep_device *oct = ioq_vector->octep_dev;
+
+ return oct->hw_ops.ioq_intr_handler(ioq_vector);
+}
+
+/**
+ * octep_request_irqs() - Register interrupt handlers.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Register handlers for all queue and non-queue interrupts.
+ *
+ * Return: 0, on successful registration of all interrupt handlers.
+ * -1, on any error.
+ */
+static int octep_request_irqs(struct octep_device *oct)
+{
+ struct net_device *netdev = oct->netdev;
+ struct octep_ioq_vector *ioq_vector;
+ struct msix_entry *msix_entry;
+ char **non_ioq_msix_names;
+ int num_non_ioq_msix;
+ int ret, i;
+
+ num_non_ioq_msix = CFG_GET_NON_IOQ_MSIX(oct->conf);
+ non_ioq_msix_names = CFG_GET_NON_IOQ_MSIX_NAMES(oct->conf);
+
+ oct->non_ioq_irq_names = kcalloc(num_non_ioq_msix,
+ OCTEP_MSIX_NAME_SIZE, GFP_KERNEL);
+ if (!oct->non_ioq_irq_names)
+ goto alloc_err;
+
+ /* First few MSI-X interrupts are non-queue interrupts */
+ for (i = 0; i < num_non_ioq_msix; i++) {
+ char *irq_name;
+
+ irq_name = &oct->non_ioq_irq_names[i * OCTEP_MSIX_NAME_SIZE];
+ msix_entry = &oct->msix_entries[i];
+
+ snprintf(irq_name, OCTEP_MSIX_NAME_SIZE,
+ "%s-%s", netdev->name, non_ioq_msix_names[i]);
+ ret = request_irq(msix_entry->vector,
+ octep_non_ioq_intr_handler, 0,
+ irq_name, oct);
+ if (ret) {
+ netdev_err(netdev,
+ "request_irq failed for %s; err=%d",
+ irq_name, ret);
+ goto non_ioq_irq_err;
+ }
+ }
+
+ /* Request IRQs for Tx/Rx queues */
+ for (i = 0; i < oct->num_oqs; i++) {
+ ioq_vector = oct->ioq_vector[i];
+ msix_entry = &oct->msix_entries[i + num_non_ioq_msix];
+
+ snprintf(ioq_vector->name, sizeof(ioq_vector->name),
+ "%s-q%d", netdev->name, i);
+ ret = request_irq(msix_entry->vector,
+ octep_ioq_intr_handler, 0,
+ ioq_vector->name, ioq_vector);
+ if (ret) {
+ netdev_err(netdev,
+ "request_irq failed for Q-%d; err=%d",
+ i, ret);
+ goto ioq_irq_err;
+ }
+
+ cpumask_set_cpu(i % num_online_cpus(),
+ &ioq_vector->affinity_mask);
+ irq_set_affinity_hint(msix_entry->vector,
+ &ioq_vector->affinity_mask);
+ }
+
+ return 0;
+ioq_irq_err:
+ while (i > num_non_ioq_msix) {
+ --i;
+ irq_set_affinity_hint(oct->msix_entries[i].vector, NULL);
+ free_irq(oct->msix_entries[i].vector, oct->ioq_vector[i]);
+ }
+non_ioq_irq_err:
+ while (i) {
+ --i;
+ free_irq(oct->msix_entries[i].vector, oct);
+ }
+alloc_err:
+ return -1;
+}
+
+/**
+ * octep_free_irqs() - free all registered interrupts.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Free all queue and non-queue interrupts of the Octeon device.
+ */
+static void octep_free_irqs(struct octep_device *oct)
+{
+ int i;
+
+ /* First few MSI-X interrupts are non queue interrupts; free them */
+ for (i = 0; i < CFG_GET_NON_IOQ_MSIX(oct->conf); i++)
+ free_irq(oct->msix_entries[i].vector, oct);
+ kfree(oct->non_ioq_irq_names);
+
+ /* Free IRQs for Input/Output (Tx/Rx) queues */
+ for (i = CFG_GET_NON_IOQ_MSIX(oct->conf); i < oct->num_irqs; i++) {
+ irq_set_affinity_hint(oct->msix_entries[i].vector, NULL);
+ free_irq(oct->msix_entries[i].vector,
+ oct->ioq_vector[i - CFG_GET_NON_IOQ_MSIX(oct->conf)]);
+ }
+ netdev_info(oct->netdev, "IRQs freed\n");
+}
+
+/**
+ * octep_setup_irqs() - setup interrupts for the Octeon device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Allocate data structures to hold per interrupt information, allocate/enable
+ * MSI-x interrupt and register interrupt handlers.
+ *
+ * Return: 0, on successful allocation and registration of all interrupts.
+ * -1, on any error.
+ */
+static int octep_setup_irqs(struct octep_device *oct)
+{
+ if (octep_alloc_ioq_vectors(oct))
+ goto ioq_vector_err;
+
+ if (octep_enable_msix_range(oct))
+ goto enable_msix_err;
+
+ if (octep_request_irqs(oct))
+ goto request_irq_err;
+
+ return 0;
+
+request_irq_err:
+ octep_disable_msix(oct);
+enable_msix_err:
+ octep_free_ioq_vectors(oct);
+ioq_vector_err:
+ return -1;
+}
+
+/**
+ * octep_clean_irqs() - free all interrupts and its resources.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_clean_irqs(struct octep_device *oct)
+{
+ octep_free_irqs(oct);
+ octep_disable_msix(oct);
+ octep_free_ioq_vectors(oct);
+}
+
+/**
+ * octep_enable_ioq_irq() - Enable MSI-x interrupt of a Tx/Rx queue.
+ *
+ * @iq: Octeon Tx queue data structure.
+ * @oq: Octeon Rx queue data structure.
+ */
+static void octep_enable_ioq_irq(struct octep_iq *iq, struct octep_oq *oq)
+{
+ u32 pkts_pend = oq->pkts_pending;
+
+ netdev_dbg(iq->netdev, "enabling intr for Q-%u\n", iq->q_no);
+ if (iq->pkts_processed) {
+ writel(iq->pkts_processed, iq->inst_cnt_reg);
+ iq->pkt_in_done -= iq->pkts_processed;
+ iq->pkts_processed = 0;
+ }
+ if (oq->last_pkt_count - pkts_pend) {
+ writel(oq->last_pkt_count - pkts_pend, oq->pkts_sent_reg);
+ oq->last_pkt_count = pkts_pend;
+ }
+
+ /* Flush the previous wrties before writing to RESEND bit */
+ wmb();
+ writeq(1UL << OCTEP_OQ_INTR_RESEND_BIT, oq->pkts_sent_reg);
+ writeq(1UL << OCTEP_IQ_INTR_RESEND_BIT, iq->inst_cnt_reg);
+}
+
+/**
+ * octep_napi_poll() - NAPI poll function for Tx/Rx.
+ *
+ * @napi: pointer to napi context.
+ * @budget: max number of packets to be processed in single invocation.
+ */
+static int octep_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct octep_ioq_vector *ioq_vector =
+ container_of(napi, struct octep_ioq_vector, napi);
+ u32 tx_pending, rx_done;
+
+ tx_pending = octep_iq_process_completions(ioq_vector->iq, budget);
+ rx_done = octep_oq_process_rx(ioq_vector->oq, budget);
+
+ /* need more polling if tx completion processing is still pending or
+ * processed at least 'budget' number of rx packets.
+ */
+ if (tx_pending || rx_done >= budget)
+ return budget;
+
+ napi_complete(napi);
+ octep_enable_ioq_irq(ioq_vector->iq, ioq_vector->oq);
+ return rx_done;
+}
+
+/**
+ * octep_napi_add() - Add NAPI poll for all Tx/Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_napi_add(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ netdev_dbg(oct->netdev, "Adding NAPI on Q-%d\n", i);
+ netif_napi_add(oct->netdev, &oct->ioq_vector[i]->napi,
+ octep_napi_poll, 64);
+ oct->oq[i]->napi = &oct->ioq_vector[i]->napi;
+ }
+}
+
+/**
+ * octep_napi_delete() - delete NAPI poll callback for all Tx/Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_napi_delete(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ netdev_dbg(oct->netdev, "Deleting NAPI on Q-%d\n", i);
+ netif_napi_del(&oct->ioq_vector[i]->napi);
+ oct->oq[i]->napi = NULL;
+ }
+}
+
+/**
+ * octep_napi_enable() - enable NAPI for all Tx/Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_napi_enable(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ netdev_dbg(oct->netdev, "Enabling NAPI on Q-%d\n", i);
+ napi_enable(&oct->ioq_vector[i]->napi);
+ }
+}
+
+/**
+ * octep_napi_disable() - disable NAPI for all Tx/Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_napi_disable(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ netdev_dbg(oct->netdev, "Disabling NAPI on Q-%d\n", i);
+ napi_disable(&oct->ioq_vector[i]->napi);
+ }
+}
+
+static void octep_link_up(struct net_device *netdev)
+{
+ netif_carrier_on(netdev);
+ netif_tx_start_all_queues(netdev);
+}
+
+/**
+ * octep_open() - start the octeon network device.
+ *
+ * @netdev: pointer to kernel network device.
+ *
+ * setup Tx/Rx queues, interrupts and enable hardware operation of Tx/Rx queues
+ * and interrupts..
+ *
+ * Return: 0, on successfully setting up device and bring it up.
+ * -1, on any error.
+ */
+static int octep_open(struct net_device *netdev)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ int err, ret;
+
+ netdev_info(netdev, "Starting netdev ...\n");
+ netif_carrier_off(netdev);
+
+ oct->hw_ops.reset_io_queues(oct);
+
+ if (octep_setup_iqs(oct))
+ goto setup_iq_err;
+ if (octep_setup_oqs(oct))
+ goto setup_oq_err;
+ if (octep_setup_irqs(oct))
+ goto setup_irq_err;
+
+ err = netif_set_real_num_tx_queues(netdev, oct->num_oqs);
+ if (err)
+ goto set_queues_err;
+ err = netif_set_real_num_rx_queues(netdev, oct->num_iqs);
+ if (err)
+ goto set_queues_err;
+
+ octep_napi_add(oct);
+ octep_napi_enable(oct);
+
+ oct->link_info.admin_up = 1;
+ octep_set_rx_state(oct, true);
+
+ ret = octep_get_link_status(oct);
+ if (!ret)
+ octep_set_link_status(oct, true);
+
+ /* Enable the input and output queues for this Octeon device */
+ oct->hw_ops.enable_io_queues(oct);
+
+ /* Enable Octeon device interrupts */
+ oct->hw_ops.enable_interrupts(oct);
+
+ octep_oq_dbell_init(oct);
+
+ ret = octep_get_link_status(oct);
+ if (ret)
+ octep_link_up(netdev);
+
+ return 0;
+
+set_queues_err:
+ octep_napi_disable(oct);
+ octep_napi_delete(oct);
+ octep_clean_irqs(oct);
+setup_irq_err:
+ octep_free_oqs(oct);
+setup_oq_err:
+ octep_free_iqs(oct);
+setup_iq_err:
+ return -1;
+}
+
+/**
+ * octep_stop() - stop the octeon network device.
+ *
+ * @netdev: pointer to kernel network device.
+ *
+ * stop the device Tx/Rx operations, bring down the link and
+ * free up all resources allocated for Tx/Rx queues and interrupts.
+ */
+static int octep_stop(struct net_device *netdev)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+
+ netdev_info(netdev, "Stopping the device ...\n");
+
+ /* Stop Tx from stack */
+ netif_tx_stop_all_queues(netdev);
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+
+ octep_set_link_status(oct, false);
+ octep_set_rx_state(oct, false);
+
+ oct->link_info.admin_up = 0;
+ oct->link_info.oper_up = 0;
+
+ oct->hw_ops.disable_interrupts(oct);
+ octep_napi_disable(oct);
+ octep_napi_delete(oct);
+
+ octep_clean_irqs(oct);
+ octep_clean_iqs(oct);
+
+ oct->hw_ops.disable_io_queues(oct);
+ oct->hw_ops.reset_io_queues(oct);
+ octep_free_oqs(oct);
+ octep_free_iqs(oct);
+ netdev_info(netdev, "Device stopped !!\n");
+ return 0;
+}
+
+/**
+ * octep_iq_full_check() - check if a Tx queue is full.
+ *
+ * @iq: Octeon Tx queue data structure.
+ *
+ * Return: 0, if the Tx queue is not full.
+ * 1, if the Tx queue is full.
+ */
+static inline int octep_iq_full_check(struct octep_iq *iq)
+{
+ if (likely((iq->max_count - atomic_read(&iq->instr_pending)) >=
+ OCTEP_WAKE_QUEUE_THRESHOLD))
+ return 0;
+
+ /* Stop the queue if unable to send */
+ netif_stop_subqueue(iq->netdev, iq->q_no);
+
+ /* check again and restart the queue, in case NAPI has just freed
+ * enough Tx ring entries.
+ */
+ if (unlikely((iq->max_count - atomic_read(&iq->instr_pending)) >=
+ OCTEP_WAKE_QUEUE_THRESHOLD)) {
+ netif_start_subqueue(iq->netdev, iq->q_no);
+ iq->stats.restart_cnt++;
+ return 0;
+ }
+
+ return 1;
+}
+
+/**
+ * octep_start_xmit() - Enqueue packet to Octoen hardware Tx Queue.
+ *
+ * @skb: packet skbuff pointer.
+ * @netdev: kernel network device.
+ *
+ * Return: NETDEV_TX_BUSY, if Tx Queue is full.
+ * NETDEV_TX_OK, if successfully enqueued to hardware Tx queue.
+ */
+static netdev_tx_t octep_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ struct octep_tx_sglist_desc *sglist;
+ struct octep_tx_buffer *tx_buffer;
+ struct octep_tx_desc_hw *hw_desc;
+ struct skb_shared_info *shinfo;
+ struct octep_instr_hdr *ih;
+ struct octep_iq *iq;
+ skb_frag_t *frag;
+ u16 nr_frags, si;
+ u16 q_no, wi;
+
+ q_no = skb_get_queue_mapping(skb);
+ if (q_no >= oct->num_iqs) {
+ netdev_err(netdev, "Invalid Tx skb->queue_mapping=%d\n", q_no);
+ q_no = q_no % oct->num_iqs;
+ }
+
+ iq = oct->iq[q_no];
+ if (octep_iq_full_check(iq)) {
+ iq->stats.tx_busy++;
+ return NETDEV_TX_BUSY;
+ }
+
+ shinfo = skb_shinfo(skb);
+ nr_frags = shinfo->nr_frags;
+
+ wi = iq->host_write_index;
+ hw_desc = &iq->desc_ring[wi];
+ hw_desc->ih64 = 0;
+
+ tx_buffer = iq->buff_info + wi;
+ tx_buffer->skb = skb;
+
+ ih = &hw_desc->ih;
+ ih->tlen = skb->len;
+ ih->pkind = oct->pkind;
+
+ if (!nr_frags) {
+ tx_buffer->gather = 0;
+ tx_buffer->dma = dma_map_single(iq->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(iq->dev, tx_buffer->dma))
+ goto dma_map_err;
+ hw_desc->dptr = tx_buffer->dma;
+ } else {
+ /* Scatter/Gather */
+ dma_addr_t dma;
+ u16 len;
+
+ sglist = tx_buffer->sglist;
+
+ ih->gsz = nr_frags + 1;
+ ih->gather = 1;
+ tx_buffer->gather = 1;
+
+ len = skb_headlen(skb);
+ dma = dma_map_single(iq->dev, skb->data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(iq->dev, dma))
+ goto dma_map_err;
+
+ dma_sync_single_for_cpu(iq->dev, tx_buffer->sglist_dma,
+ OCTEP_SGLIST_SIZE_PER_PKT,
+ DMA_TO_DEVICE);
+ memset(sglist, 0, OCTEP_SGLIST_SIZE_PER_PKT);
+ sglist[0].len[3] = len;
+ sglist[0].dma_ptr[0] = dma;
+
+ si = 1; /* entry 0 is main skb, mapped above */
+ frag = &shinfo->frags[0];
+ while (nr_frags--) {
+ len = skb_frag_size(frag);
+ dma = skb_frag_dma_map(iq->dev, frag, 0,
+ len, DMA_TO_DEVICE);
+ if (dma_mapping_error(iq->dev, dma))
+ goto dma_map_sg_err;
+
+ sglist[si >> 2].len[3 - (si & 3)] = len;
+ sglist[si >> 2].dma_ptr[si & 3] = dma;
+
+ frag++;
+ si++;
+ }
+ dma_sync_single_for_device(iq->dev, tx_buffer->sglist_dma,
+ OCTEP_SGLIST_SIZE_PER_PKT,
+ DMA_TO_DEVICE);
+
+ hw_desc->dptr = tx_buffer->sglist_dma;
+ }
+
+ /* Flush the hw descriptor before writing to doorbell */
+ wmb();
+
+ /* Ring Doorbell to notify the NIC there is a new packet */
+ writel(1, iq->doorbell_reg);
+ atomic_inc(&iq->instr_pending);
+ wi++;
+ if (wi == iq->max_count)
+ wi = 0;
+ iq->host_write_index = wi;
+
+ netdev_tx_sent_queue(iq->netdev_q, skb->len);
+ iq->stats.instr_posted++;
+ skb_tx_timestamp(skb);
+ return NETDEV_TX_OK;
+
+dma_map_sg_err:
+ if (si > 0) {
+ dma_unmap_single(iq->dev, sglist[0].dma_ptr[0],
+ sglist[0].len[0], DMA_TO_DEVICE);
+ sglist[0].len[0] = 0;
+ }
+ while (si > 1) {
+ dma_unmap_page(iq->dev, sglist[si >> 2].dma_ptr[si & 3],
+ sglist[si >> 2].len[si & 3], DMA_TO_DEVICE);
+ sglist[si >> 2].len[si & 3] = 0;
+ si--;
+ }
+ tx_buffer->gather = 0;
+dma_map_err:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+/**
+ * octep_get_stats64() - Get Octeon network device statistics.
+ *
+ * @netdev: kernel network device.
+ * @stats: pointer to stats structure to be filled in.
+ */
+static void octep_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
+ struct octep_device *oct = netdev_priv(netdev);
+ int q;
+
+ octep_get_if_stats(oct);
+ tx_packets = 0;
+ tx_bytes = 0;
+ rx_packets = 0;
+ rx_bytes = 0;
+ for (q = 0; q < oct->num_oqs; q++) {
+ struct octep_iq *iq = oct->iq[q];
+ struct octep_oq *oq = oct->oq[q];
+
+ tx_packets += iq->stats.instr_completed;
+ tx_bytes += iq->stats.bytes_sent;
+ rx_packets += oq->stats.packets;
+ rx_bytes += oq->stats.bytes;
+ }
+ stats->tx_packets = tx_packets;
+ stats->tx_bytes = tx_bytes;
+ stats->rx_packets = rx_packets;
+ stats->rx_bytes = rx_bytes;
+ stats->multicast = oct->iface_rx_stats.mcast_pkts;
+ stats->rx_errors = oct->iface_rx_stats.err_pkts;
+ stats->collisions = oct->iface_tx_stats.xscol;
+ stats->tx_fifo_errors = oct->iface_tx_stats.undflw;
+}
+
+/**
+ * octep_tx_timeout_task - work queue task to Handle Tx queue timeout.
+ *
+ * @work: pointer to Tx queue timeout work_struct
+ *
+ * Stop and start the device so that it frees up all queue resources
+ * and restarts the queues, that potentially clears a Tx queue timeout
+ * condition.
+ **/
+static void octep_tx_timeout_task(struct work_struct *work)
+{
+ struct octep_device *oct = container_of(work, struct octep_device,
+ tx_timeout_task);
+ struct net_device *netdev = oct->netdev;
+
+ rtnl_lock();
+ if (netif_running(netdev)) {
+ octep_stop(netdev);
+ octep_open(netdev);
+ }
+ rtnl_unlock();
+}
+
+/**
+ * octep_tx_timeout() - Handle Tx Queue timeout.
+ *
+ * @netdev: pointer to kernel network device.
+ * @txqueue: Timed out Tx queue number.
+ *
+ * Schedule a work to handle Tx queue timeout.
+ */
+static void octep_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+
+ queue_work(octep_wq, &oct->tx_timeout_task);
+}
+
+static int octep_set_mac(struct net_device *netdev, void *p)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ struct sockaddr *addr = (struct sockaddr *)p;
+ int err;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ err = octep_set_mac_addr(oct, addr->sa_data);
+ if (err)
+ return err;
+
+ memcpy(oct->mac_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(netdev, addr->sa_data);
+
+ return 0;
+}
+
+static int octep_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ struct octep_iface_link_info *link_info;
+ int err = 0;
+
+ link_info = &oct->link_info;
+ if (link_info->mtu == new_mtu)
+ return 0;
+
+ err = octep_set_mtu(oct, new_mtu);
+ if (!err) {
+ oct->link_info.mtu = new_mtu;
+ netdev->mtu = new_mtu;
+ }
+
+ return err;
+}
+
+static const struct net_device_ops octep_netdev_ops = {
+ .ndo_open = octep_open,
+ .ndo_stop = octep_stop,
+ .ndo_start_xmit = octep_start_xmit,
+ .ndo_get_stats64 = octep_get_stats64,
+ .ndo_tx_timeout = octep_tx_timeout,
+ .ndo_set_mac_address = octep_set_mac,
+ .ndo_change_mtu = octep_change_mtu,
+};
+
+/**
+ * octep_ctrl_mbox_task - work queue task to handle ctrl mbox messages.
+ *
+ * @work: pointer to ctrl mbox work_struct
+ *
+ * Poll ctrl mbox message queue and handle control messages from firmware.
+ **/
+static void octep_ctrl_mbox_task(struct work_struct *work)
+{
+ struct octep_device *oct = container_of(work, struct octep_device,
+ ctrl_mbox_task);
+ struct net_device *netdev = oct->netdev;
+ struct octep_ctrl_net_f2h_req req = {};
+ struct octep_ctrl_mbox_msg msg;
+ int ret = 0;
+
+ msg.msg = &req;
+ while (true) {
+ ret = octep_ctrl_mbox_recv(&oct->ctrl_mbox, &msg);
+ if (ret)
+ break;
+
+ switch (req.hdr.cmd) {
+ case OCTEP_CTRL_NET_F2H_CMD_LINK_STATUS:
+ if (netif_running(netdev)) {
+ if (req.link.state) {
+ dev_info(&oct->pdev->dev, "netif_carrier_on\n");
+ netif_carrier_on(netdev);
+ } else {
+ dev_info(&oct->pdev->dev, "netif_carrier_off\n");
+ netif_carrier_off(netdev);
+ }
+ }
+ break;
+ default:
+ pr_info("Unknown mbox req : %u\n", req.hdr.cmd);
+ break;
+ }
+ }
+}
+
+/**
+ * octep_device_setup() - Setup Octeon Device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Setup Octeon device hardware operations, configuration, etc ...
+ */
+int octep_device_setup(struct octep_device *oct)
+{
+ struct octep_ctrl_mbox *ctrl_mbox;
+ struct pci_dev *pdev = oct->pdev;
+ int i, ret;
+
+ /* allocate memory for oct->conf */
+ oct->conf = kzalloc(sizeof(*oct->conf), GFP_KERNEL);
+ if (!oct->conf)
+ return -ENOMEM;
+
+ /* Map BAR regions */
+ for (i = 0; i < OCTEP_MMIO_REGIONS; i++) {
+ oct->mmio[i].hw_addr =
+ ioremap(pci_resource_start(oct->pdev, i * 2),
+ pci_resource_len(oct->pdev, i * 2));
+ oct->mmio[i].mapped = 1;
+ }
+
+ oct->chip_id = pdev->device;
+ oct->rev_id = pdev->revision;
+ dev_info(&pdev->dev, "chip_id = 0x%x\n", pdev->device);
+
+ switch (oct->chip_id) {
+ case OCTEP_PCI_DEVICE_ID_CN93_PF:
+ dev_info(&pdev->dev,
+ "Setting up OCTEON CN93XX PF PASS%d.%d\n",
+ OCTEP_MAJOR_REV(oct), OCTEP_MINOR_REV(oct));
+ octep_device_setup_cn93_pf(oct);
+ break;
+ default:
+ dev_err(&pdev->dev,
+ "%s: unsupported device\n", __func__);
+ goto unsupported_dev;
+ }
+
+ oct->pkind = CFG_GET_IQ_PKIND(oct->conf);
+
+ /* Initialize control mbox */
+ ctrl_mbox = &oct->ctrl_mbox;
+ ctrl_mbox->barmem = CFG_GET_CTRL_MBOX_MEM_ADDR(oct->conf);
+ ret = octep_ctrl_mbox_init(ctrl_mbox);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to initialize control mbox\n");
+ return -1;
+ }
+ oct->ctrl_mbox_ifstats_offset = OCTEP_CTRL_MBOX_SZ(ctrl_mbox->h2fq.elem_sz,
+ ctrl_mbox->h2fq.elem_cnt,
+ ctrl_mbox->f2hq.elem_sz,
+ ctrl_mbox->f2hq.elem_cnt);
+
+ return 0;
+
+unsupported_dev:
+ return -1;
+}
+
+/**
+ * octep_device_cleanup() - Cleanup Octeon Device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Cleanup Octeon device allocated resources.
+ */
+static void octep_device_cleanup(struct octep_device *oct)
+{
+ int i;
+
+ dev_info(&oct->pdev->dev, "Cleaning up Octeon Device ...\n");
+
+ for (i = 0; i < OCTEP_MAX_VF; i++) {
+ if (oct->mbox[i])
+ vfree(oct->mbox[i]);
+ oct->mbox[i] = NULL;
+ }
+
+ octep_ctrl_mbox_uninit(&oct->ctrl_mbox);
+
+ oct->hw_ops.soft_reset(oct);
+ for (i = 0; i < OCTEP_MMIO_REGIONS; i++) {
+ if (oct->mmio[i].mapped)
+ iounmap(oct->mmio[i].hw_addr);
+ }
+
+ kfree(oct->conf);
+ oct->conf = NULL;
+}
+
+/**
+ * octep_probe() - Octeon PCI device probe handler.
+ *
+ * @pdev: PCI device structure.
+ * @ent: entry in Octeon PCI device ID table.
+ *
+ * Initializes and enables the Octeon PCI device for network operations.
+ * Initializes Octeon private data structure and registers a network device.
+ */
+static int octep_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct octep_device *octep_dev = NULL;
+ struct net_device *netdev;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to enable PCI device\n");
+ return err;
+ }
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "Failed to set DMA mask !!\n");
+ goto err_dma_mask;
+ }
+
+ err = pci_request_mem_regions(pdev, OCTEP_DRV_NAME);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to map PCI memory regions\n");
+ goto err_pci_regions;
+ }
+
+ pci_enable_pcie_error_reporting(pdev);
+ pci_set_master(pdev);
+
+ netdev = alloc_etherdev_mq(sizeof(struct octep_device),
+ OCTEP_MAX_QUEUES);
+ if (!netdev) {
+ dev_err(&pdev->dev, "Failed to allocate netdev\n");
+ err = -ENOMEM;
+ goto err_alloc_netdev;
+ }
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ octep_dev = netdev_priv(netdev);
+ octep_dev->netdev = netdev;
+ octep_dev->pdev = pdev;
+ octep_dev->dev = &pdev->dev;
+ pci_set_drvdata(pdev, octep_dev);
+
+ err = octep_device_setup(octep_dev);
+ if (err) {
+ dev_err(&pdev->dev, "Device setup failed\n");
+ goto err_octep_config;
+ }
+ INIT_WORK(&octep_dev->tx_timeout_task, octep_tx_timeout_task);
+ INIT_WORK(&octep_dev->ctrl_mbox_task, octep_ctrl_mbox_task);
+
+ netdev->netdev_ops = &octep_netdev_ops;
+ octep_set_ethtool_ops(netdev);
+ netif_carrier_off(netdev);
+
+ netdev->hw_features = NETIF_F_SG;
+ netdev->features |= netdev->hw_features;
+ netdev->min_mtu = OCTEP_MIN_MTU;
+ netdev->max_mtu = OCTEP_MAX_MTU;
+ netdev->mtu = OCTEP_DEFAULT_MTU;
+
+ octep_get_mac_addr(octep_dev, octep_dev->mac_addr);
+ eth_hw_addr_set(netdev, octep_dev->mac_addr);
+
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register netdev\n");
+ goto register_dev_err;
+ }
+ dev_info(&pdev->dev, "Device probe successful\n");
+ return 0;
+
+register_dev_err:
+ octep_device_cleanup(octep_dev);
+err_octep_config:
+ free_netdev(netdev);
+err_alloc_netdev:
+ pci_disable_pcie_error_reporting(pdev);
+ pci_release_mem_regions(pdev);
+err_pci_regions:
+err_dma_mask:
+ pci_disable_device(pdev);
+ return err;
+}
+
+/**
+ * octep_remove() - Remove Octeon PCI device from driver control.
+ *
+ * @pdev: PCI device structure of the Octeon device.
+ *
+ * Cleanup all resources allocated for the Octeon device.
+ * Unregister from network device and disable the PCI device.
+ */
+static void octep_remove(struct pci_dev *pdev)
+{
+ struct octep_device *oct = pci_get_drvdata(pdev);
+ struct net_device *netdev;
+
+ if (!oct)
+ return;
+
+ cancel_work_sync(&oct->tx_timeout_task);
+ cancel_work_sync(&oct->ctrl_mbox_task);
+ netdev = oct->netdev;
+ if (netdev->reg_state == NETREG_REGISTERED)
+ unregister_netdev(netdev);
+
+ octep_device_cleanup(oct);
+ pci_release_mem_regions(pdev);
+ free_netdev(netdev);
+ pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
+}
+
+static struct pci_driver octep_driver = {
+ .name = OCTEP_DRV_NAME,
+ .id_table = octep_pci_id_tbl,
+ .probe = octep_probe,
+ .remove = octep_remove,
+};
+
+/**
+ * octep_init_module() - Module initialiation.
+ *
+ * create common resource for the driver and register PCI driver.
+ */
+static int __init octep_init_module(void)
+{
+ int ret;
+
+ pr_info("%s: Loading %s ...\n", OCTEP_DRV_NAME, OCTEP_DRV_STRING);
+
+ /* work queue for all deferred tasks */
+ octep_wq = create_singlethread_workqueue(OCTEP_DRV_NAME);
+ if (!octep_wq) {
+ pr_err("%s: Failed to create common workqueue\n",
+ OCTEP_DRV_NAME);
+ return -ENOMEM;
+ }
+
+ ret = pci_register_driver(&octep_driver);
+ if (ret < 0) {
+ pr_err("%s: Failed to register PCI driver; err=%d\n",
+ OCTEP_DRV_NAME, ret);
+ return ret;
+ }
+
+ pr_info("%s: Loaded successfully !\n", OCTEP_DRV_NAME);
+
+ return ret;
+}
+
+/**
+ * octep_exit_module() - Module exit routine.
+ *
+ * unregister the driver with PCI subsystem and cleanup common resources.
+ */
+static void __exit octep_exit_module(void)
+{
+ pr_info("%s: Unloading ...\n", OCTEP_DRV_NAME);
+
+ pci_unregister_driver(&octep_driver);
+ destroy_workqueue(octep_wq);
+
+ pr_info("%s: Unloading complete\n", OCTEP_DRV_NAME);
+}
+
+module_init(octep_init_module);
+module_exit(octep_exit_module);
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
new file mode 100644
index 000000000000..025626a61383
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
@@ -0,0 +1,357 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_MAIN_H_
+#define _OCTEP_MAIN_H_
+
+#include "octep_tx.h"
+#include "octep_rx.h"
+#include "octep_ctrl_mbox.h"
+
+#define OCTEP_DRV_NAME "octeon_ep"
+#define OCTEP_DRV_STRING "Marvell Octeon EndPoint NIC Driver"
+
+#define OCTEP_PCIID_CN93_PF 0xB200177d
+#define OCTEP_PCIID_CN93_VF 0xB203177d
+
+#define OCTEP_PCI_DEVICE_ID_CN93_PF 0xB200
+#define OCTEP_PCI_DEVICE_ID_CN93_VF 0xB203
+
+#define OCTEP_MAX_QUEUES 63
+#define OCTEP_MAX_IQ OCTEP_MAX_QUEUES
+#define OCTEP_MAX_OQ OCTEP_MAX_QUEUES
+#define OCTEP_MAX_VF 64
+
+#define OCTEP_MAX_MSIX_VECTORS OCTEP_MAX_OQ
+
+/* Flags to disable and enable Interrupts */
+#define OCTEP_INPUT_INTR (1)
+#define OCTEP_OUTPUT_INTR (2)
+#define OCTEP_MBOX_INTR (4)
+#define OCTEP_ALL_INTR 0xff
+
+#define OCTEP_IQ_INTR_RESEND_BIT 59
+#define OCTEP_OQ_INTR_RESEND_BIT 59
+
+#define OCTEP_MMIO_REGIONS 3
+/* PCI address space mapping information.
+ * Each of the 3 address spaces given by BAR0, BAR2 and BAR4 of
+ * Octeon gets mapped to different physical address spaces in
+ * the kernel.
+ */
+struct octep_mmio {
+ /* The physical address to which the PCI address space is mapped. */
+ u8 __iomem *hw_addr;
+
+ /* Flag indicating the mapping was successful. */
+ int mapped;
+};
+
+struct octep_pci_win_regs {
+ u8 __iomem *pci_win_wr_addr;
+ u8 __iomem *pci_win_rd_addr;
+ u8 __iomem *pci_win_wr_data;
+ u8 __iomem *pci_win_rd_data;
+};
+
+struct octep_hw_ops {
+ void (*setup_iq_regs)(struct octep_device *oct, int q);
+ void (*setup_oq_regs)(struct octep_device *oct, int q);
+ void (*setup_mbox_regs)(struct octep_device *oct, int mbox);
+
+ irqreturn_t (*non_ioq_intr_handler)(void *ioq_vector);
+ irqreturn_t (*ioq_intr_handler)(void *ioq_vector);
+ int (*soft_reset)(struct octep_device *oct);
+ void (*reinit_regs)(struct octep_device *oct);
+ u32 (*update_iq_read_idx)(struct octep_iq *iq);
+
+ void (*enable_interrupts)(struct octep_device *oct);
+ void (*disable_interrupts)(struct octep_device *oct);
+
+ void (*enable_io_queues)(struct octep_device *oct);
+ void (*disable_io_queues)(struct octep_device *oct);
+ void (*enable_iq)(struct octep_device *oct, int q);
+ void (*disable_iq)(struct octep_device *oct, int q);
+ void (*enable_oq)(struct octep_device *oct, int q);
+ void (*disable_oq)(struct octep_device *oct, int q);
+ void (*reset_io_queues)(struct octep_device *oct);
+ void (*dump_registers)(struct octep_device *oct);
+};
+
+/* Octeon mailbox data */
+struct octep_mbox_data {
+ u32 cmd;
+ u32 total_len;
+ u32 recv_len;
+ u32 rsvd;
+ u64 *data;
+};
+
+/* Octeon device mailbox */
+struct octep_mbox {
+ /* A spinlock to protect access to this q_mbox. */
+ spinlock_t lock;
+
+ u32 q_no;
+ u32 state;
+
+ /* SLI_MAC_PF_MBOX_INT for PF, SLI_PKT_MBOX_INT for VF. */
+ u8 __iomem *mbox_int_reg;
+
+ /* SLI_PKT_PF_VF_MBOX_SIG(0) for PF,
+ * SLI_PKT_PF_VF_MBOX_SIG(1) for VF.
+ */
+ u8 __iomem *mbox_write_reg;
+
+ /* SLI_PKT_PF_VF_MBOX_SIG(1) for PF,
+ * SLI_PKT_PF_VF_MBOX_SIG(0) for VF.
+ */
+ u8 __iomem *mbox_read_reg;
+
+ struct octep_mbox_data mbox_data;
+};
+
+/* Tx/Rx queue vector per interrupt. */
+struct octep_ioq_vector {
+ char name[OCTEP_MSIX_NAME_SIZE];
+ struct napi_struct napi;
+ struct octep_device *octep_dev;
+ struct octep_iq *iq;
+ struct octep_oq *oq;
+ cpumask_t affinity_mask;
+};
+
+/* Octeon hardware/firmware offload capability flags. */
+#define OCTEP_CAP_TX_CHECKSUM BIT(0)
+#define OCTEP_CAP_RX_CHECKSUM BIT(1)
+#define OCTEP_CAP_TSO BIT(2)
+
+/* Link modes */
+enum octep_link_mode_bit_indices {
+ OCTEP_LINK_MODE_10GBASE_T = 0,
+ OCTEP_LINK_MODE_10GBASE_R,
+ OCTEP_LINK_MODE_10GBASE_CR,
+ OCTEP_LINK_MODE_10GBASE_KR,
+ OCTEP_LINK_MODE_10GBASE_LR,
+ OCTEP_LINK_MODE_10GBASE_SR,
+ OCTEP_LINK_MODE_25GBASE_CR,
+ OCTEP_LINK_MODE_25GBASE_KR,
+ OCTEP_LINK_MODE_25GBASE_SR,
+ OCTEP_LINK_MODE_40GBASE_CR4,
+ OCTEP_LINK_MODE_40GBASE_KR4,
+ OCTEP_LINK_MODE_40GBASE_LR4,
+ OCTEP_LINK_MODE_40GBASE_SR4,
+ OCTEP_LINK_MODE_50GBASE_CR2,
+ OCTEP_LINK_MODE_50GBASE_KR2,
+ OCTEP_LINK_MODE_50GBASE_SR2,
+ OCTEP_LINK_MODE_50GBASE_CR,
+ OCTEP_LINK_MODE_50GBASE_KR,
+ OCTEP_LINK_MODE_50GBASE_LR,
+ OCTEP_LINK_MODE_50GBASE_SR,
+ OCTEP_LINK_MODE_100GBASE_CR4,
+ OCTEP_LINK_MODE_100GBASE_KR4,
+ OCTEP_LINK_MODE_100GBASE_LR4,
+ OCTEP_LINK_MODE_100GBASE_SR4,
+ OCTEP_LINK_MODE_NBITS
+};
+
+/* Hardware interface link state information. */
+struct octep_iface_link_info {
+ /* Bitmap of Supported link speeds/modes. */
+ u64 supported_modes;
+
+ /* Bitmap of Advertised link speeds/modes. */
+ u64 advertised_modes;
+
+ /* Negotiated link speed in Mbps. */
+ u32 speed;
+
+ /* MTU */
+ u16 mtu;
+
+ /* Autonegotation state. */
+#define OCTEP_LINK_MODE_AUTONEG_SUPPORTED BIT(0)
+#define OCTEP_LINK_MODE_AUTONEG_ADVERTISED BIT(1)
+ u8 autoneg;
+
+ /* Pause frames setting. */
+#define OCTEP_LINK_MODE_PAUSE_SUPPORTED BIT(0)
+#define OCTEP_LINK_MODE_PAUSE_ADVERTISED BIT(1)
+ u8 pause;
+
+ /* Admin state of the link (ifconfig <iface> up/down */
+ u8 admin_up;
+
+ /* Operational state of the link: physical link is up down */
+ u8 oper_up;
+};
+
+/* The Octeon device specific private data structure.
+ * Each Octeon device has this structure to represent all its components.
+ */
+struct octep_device {
+ struct octep_config *conf;
+
+ /* Octeon Chip type. */
+ u16 chip_id;
+ u16 rev_id;
+
+ /* Device capabilities enabled */
+ u64 caps_enabled;
+ /* Device capabilities supported */
+ u64 caps_supported;
+
+ /* Pointer to basic Linux device */
+ struct device *dev;
+ /* Linux PCI device pointer */
+ struct pci_dev *pdev;
+ /* Netdev corresponding to the Octeon device */
+ struct net_device *netdev;
+
+ /* memory mapped io range */
+ struct octep_mmio mmio[OCTEP_MMIO_REGIONS];
+
+ /* MAC address */
+ u8 mac_addr[ETH_ALEN];
+
+ /* Tx queues (IQ: Instruction Queue) */
+ u16 num_iqs;
+ /* pkind value to be used in every Tx hardware descriptor */
+ u8 pkind;
+ /* Pointers to Octeon Tx queues */
+ struct octep_iq *iq[OCTEP_MAX_IQ];
+
+ /* Rx queues (OQ: Output Queue) */
+ u16 num_oqs;
+ /* Pointers to Octeon Rx queues */
+ struct octep_oq *oq[OCTEP_MAX_OQ];
+
+ /* Hardware port number of the PCIe interface */
+ u16 pcie_port;
+
+ /* PCI Window registers to access some hardware CSRs */
+ struct octep_pci_win_regs pci_win_regs;
+ /* Hardware operations */
+ struct octep_hw_ops hw_ops;
+
+ /* IRQ info */
+ u16 num_irqs;
+ u16 num_non_ioq_irqs;
+ char *non_ioq_irq_names;
+ struct msix_entry *msix_entries;
+ /* IOq information of it's corresponding MSI-X interrupt. */
+ struct octep_ioq_vector *ioq_vector[OCTEP_MAX_QUEUES];
+
+ /* Hardware Interface Tx statistics */
+ struct octep_iface_tx_stats iface_tx_stats;
+ /* Hardware Interface Rx statistics */
+ struct octep_iface_rx_stats iface_rx_stats;
+
+ /* Hardware Interface Link info like supported modes, aneg support */
+ struct octep_iface_link_info link_info;
+
+ /* Mailbox to talk to VFs */
+ struct octep_mbox *mbox[OCTEP_MAX_VF];
+
+ /* Work entry to handle Tx timeout */
+ struct work_struct tx_timeout_task;
+
+ /* control mbox over pf */
+ struct octep_ctrl_mbox ctrl_mbox;
+
+ /* offset for iface stats */
+ u32 ctrl_mbox_ifstats_offset;
+
+ /* Work entry to handle ctrl mbox interrupt */
+ struct work_struct ctrl_mbox_task;
+
+};
+
+static inline u16 OCTEP_MAJOR_REV(struct octep_device *oct)
+{
+ u16 rev = (oct->rev_id & 0xC) >> 2;
+
+ return (rev == 0) ? 1 : rev;
+}
+
+static inline u16 OCTEP_MINOR_REV(struct octep_device *oct)
+{
+ return (oct->rev_id & 0x3);
+}
+
+/* Octeon CSR read/write access APIs */
+#define octep_write_csr(octep_dev, reg_off, value) \
+ writel(value, (octep_dev)->mmio[0].hw_addr + (reg_off))
+
+#define octep_write_csr64(octep_dev, reg_off, val64) \
+ writeq(val64, (octep_dev)->mmio[0].hw_addr + (reg_off))
+
+#define octep_read_csr(octep_dev, reg_off) \
+ readl((octep_dev)->mmio[0].hw_addr + (reg_off))
+
+#define octep_read_csr64(octep_dev, reg_off) \
+ readq((octep_dev)->mmio[0].hw_addr + (reg_off))
+
+/* Read windowed register.
+ * @param oct - pointer to the Octeon device.
+ * @param addr - Address of the register to read.
+ *
+ * This routine is called to read from the indirectly accessed
+ * Octeon registers that are visible through a PCI BAR0 mapped window
+ * register.
+ * @return - 64 bit value read from the register.
+ */
+static inline u64
+OCTEP_PCI_WIN_READ(struct octep_device *oct, u64 addr)
+{
+ u64 val64;
+
+ addr |= 1ull << 53; /* read 8 bytes */
+ writeq(addr, oct->pci_win_regs.pci_win_rd_addr);
+ val64 = readq(oct->pci_win_regs.pci_win_rd_data);
+
+ dev_dbg(&oct->pdev->dev,
+ "%s: reg: 0x%016llx val: 0x%016llx\n", __func__, addr, val64);
+
+ return val64;
+}
+
+/* Write windowed register.
+ * @param oct - pointer to the Octeon device.
+ * @param addr - Address of the register to write
+ * @param val - Value to write
+ *
+ * This routine is called to write to the indirectly accessed
+ * Octeon registers that are visible through a PCI BAR0 mapped window
+ * register.
+ * @return Nothing.
+ */
+static inline void
+OCTEP_PCI_WIN_WRITE(struct octep_device *oct, u64 addr, u64 val)
+{
+ writeq(addr, oct->pci_win_regs.pci_win_wr_addr);
+ writeq(val, oct->pci_win_regs.pci_win_wr_data);
+
+ dev_dbg(&oct->pdev->dev,
+ "%s: reg: 0x%016llx val: 0x%016llx\n", __func__, addr, val);
+}
+
+extern struct workqueue_struct *octep_wq;
+
+int octep_device_setup(struct octep_device *oct);
+int octep_setup_iqs(struct octep_device *oct);
+void octep_free_iqs(struct octep_device *oct);
+void octep_clean_iqs(struct octep_device *oct);
+int octep_setup_oqs(struct octep_device *oct);
+void octep_free_oqs(struct octep_device *oct);
+void octep_oq_dbell_init(struct octep_device *oct);
+void octep_device_setup_cn93_pf(struct octep_device *oct);
+int octep_iq_process_completions(struct octep_iq *iq, u16 budget);
+int octep_oq_process_rx(struct octep_oq *oq, int budget);
+void octep_set_ethtool_ops(struct net_device *netdev);
+
+#endif /* _OCTEP_MAIN_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h
new file mode 100644
index 000000000000..cc51149790ff
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h
@@ -0,0 +1,367 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_REGS_CN9K_PF_H_
+#define _OCTEP_REGS_CN9K_PF_H_
+
+/* ############################ RST ######################### */
+#define CN93_RST_BOOT 0x000087E006001600ULL
+#define CN93_RST_CORE_DOMAIN_W1S 0x000087E006001820ULL
+#define CN93_RST_CORE_DOMAIN_W1C 0x000087E006001828ULL
+
+#define CN93_CONFIG_XPANSION_BAR 0x38
+#define CN93_CONFIG_PCIE_CAP 0x70
+#define CN93_CONFIG_PCIE_DEVCAP 0x74
+#define CN93_CONFIG_PCIE_DEVCTL 0x78
+#define CN93_CONFIG_PCIE_LINKCAP 0x7C
+#define CN93_CONFIG_PCIE_LINKCTL 0x80
+#define CN93_CONFIG_PCIE_SLOTCAP 0x84
+#define CN93_CONFIG_PCIE_SLOTCTL 0x88
+
+#define CN93_PCIE_SRIOV_FDL 0x188 /* 0x98 */
+#define CN93_PCIE_SRIOV_FDL_BIT_POS 0x10
+#define CN93_PCIE_SRIOV_FDL_MASK 0xFF
+
+#define CN93_CONFIG_PCIE_FLTMSK 0x720
+
+/* ################# Offsets of RING, EPF, MAC ######################### */
+#define CN93_RING_OFFSET (0x1ULL << 17)
+#define CN93_EPF_OFFSET (0x1ULL << 25)
+#define CN93_MAC_OFFSET (0x1ULL << 4)
+#define CN93_BIT_ARRAY_OFFSET (0x1ULL << 4)
+#define CN93_EPVF_RING_OFFSET (0x1ULL << 4)
+
+/* ################# Scratch Registers ######################### */
+#define CN93_SDP_EPF_SCRATCH 0x205E0
+
+/* ################# Window Registers ######################### */
+#define CN93_SDP_WIN_WR_ADDR64 0x20000
+#define CN93_SDP_WIN_RD_ADDR64 0x20010
+#define CN93_SDP_WIN_WR_DATA64 0x20020
+#define CN93_SDP_WIN_WR_MASK_REG 0x20030
+#define CN93_SDP_WIN_RD_DATA64 0x20040
+
+#define CN93_SDP_MAC_NUMBER 0x2C100
+
+/* ################# Global Previliged registers ######################### */
+#define CN93_SDP_EPF_RINFO 0x205F0
+
+#define CN93_SDP_EPF_RINFO_SRN(val) ((val) & 0xFF)
+#define CN93_SDP_EPF_RINFO_RPVF(val) (((val) >> 32) & 0xF)
+#define CN93_SDP_EPF_RINFO_NVFS(val) (((val) >> 48) && 0xFF)
+
+/* SDP Function select */
+#define CN93_SDP_FUNC_SEL_EPF_BIT_POS 8
+#define CN93_SDP_FUNC_SEL_FUNC_BIT_POS 0
+
+/* ##### RING IN (Into device from PCI: Tx Ring) REGISTERS #### */
+#define CN93_SDP_R_IN_CONTROL_START 0x10000
+#define CN93_SDP_R_IN_ENABLE_START 0x10010
+#define CN93_SDP_R_IN_INSTR_BADDR_START 0x10020
+#define CN93_SDP_R_IN_INSTR_RSIZE_START 0x10030
+#define CN93_SDP_R_IN_INSTR_DBELL_START 0x10040
+#define CN93_SDP_R_IN_CNTS_START 0x10050
+#define CN93_SDP_R_IN_INT_LEVELS_START 0x10060
+#define CN93_SDP_R_IN_PKT_CNT_START 0x10080
+#define CN93_SDP_R_IN_BYTE_CNT_START 0x10090
+
+#define CN93_SDP_R_IN_CONTROL(ring) \
+ (CN93_SDP_R_IN_CONTROL_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_ENABLE(ring) \
+ (CN93_SDP_R_IN_ENABLE_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_INSTR_BADDR(ring) \
+ (CN93_SDP_R_IN_INSTR_BADDR_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_INSTR_RSIZE(ring) \
+ (CN93_SDP_R_IN_INSTR_RSIZE_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_INSTR_DBELL(ring) \
+ (CN93_SDP_R_IN_INSTR_DBELL_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_CNTS(ring) \
+ (CN93_SDP_R_IN_CNTS_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_INT_LEVELS(ring) \
+ (CN93_SDP_R_IN_INT_LEVELS_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_PKT_CNT(ring) \
+ (CN93_SDP_R_IN_PKT_CNT_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_BYTE_CNT(ring) \
+ (CN93_SDP_R_IN_BYTE_CNT_START + ((ring) * CN93_RING_OFFSET))
+
+/* Rings per Virtual Function */
+#define CN93_R_IN_CTL_RPVF_MASK (0xF)
+#define CN93_R_IN_CTL_RPVF_POS (48)
+
+/* Number of instructions to be read in one MAC read request.
+ * setting to Max value(4)
+ */
+#define CN93_R_IN_CTL_IDLE (0x1ULL << 28)
+#define CN93_R_IN_CTL_RDSIZE (0x3ULL << 25)
+#define CN93_R_IN_CTL_IS_64B (0x1ULL << 24)
+#define CN93_R_IN_CTL_D_NSR (0x1ULL << 8)
+#define CN93_R_IN_CTL_D_ESR (0x1ULL << 6)
+#define CN93_R_IN_CTL_D_ROR (0x1ULL << 5)
+#define CN93_R_IN_CTL_NSR (0x1ULL << 3)
+#define CN93_R_IN_CTL_ESR (0x1ULL << 1)
+#define CN93_R_IN_CTL_ROR (0x1ULL << 0)
+
+#define CN93_R_IN_CTL_MASK (CN93_R_IN_CTL_RDSIZE | CN93_R_IN_CTL_IS_64B)
+
+/* ##### RING OUT (out from device to PCI host: Rx Ring) REGISTERS #### */
+#define CN93_SDP_R_OUT_CNTS_START 0x10100
+#define CN93_SDP_R_OUT_INT_LEVELS_START 0x10110
+#define CN93_SDP_R_OUT_SLIST_BADDR_START 0x10120
+#define CN93_SDP_R_OUT_SLIST_RSIZE_START 0x10130
+#define CN93_SDP_R_OUT_SLIST_DBELL_START 0x10140
+#define CN93_SDP_R_OUT_CONTROL_START 0x10150
+#define CN93_SDP_R_OUT_ENABLE_START 0x10160
+#define CN93_SDP_R_OUT_PKT_CNT_START 0x10180
+#define CN93_SDP_R_OUT_BYTE_CNT_START 0x10190
+
+#define CN93_SDP_R_OUT_CONTROL(ring) \
+ (CN93_SDP_R_OUT_CONTROL_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_ENABLE(ring) \
+ (CN93_SDP_R_OUT_ENABLE_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_SLIST_BADDR(ring) \
+ (CN93_SDP_R_OUT_SLIST_BADDR_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_SLIST_RSIZE(ring) \
+ (CN93_SDP_R_OUT_SLIST_RSIZE_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_SLIST_DBELL(ring) \
+ (CN93_SDP_R_OUT_SLIST_DBELL_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_CNTS(ring) \
+ (CN93_SDP_R_OUT_CNTS_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_INT_LEVELS(ring) \
+ (CN93_SDP_R_OUT_INT_LEVELS_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_PKT_CNT(ring) \
+ (CN93_SDP_R_OUT_PKT_CNT_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_BYTE_CNT(ring) \
+ (CN93_SDP_R_OUT_BYTE_CNT_START + ((ring) * CN93_RING_OFFSET))
+
+/*------------------ R_OUT Masks ----------------*/
+#define CN93_R_OUT_INT_LEVELS_BMODE BIT_ULL(63)
+#define CN93_R_OUT_INT_LEVELS_TIMET (32)
+
+#define CN93_R_OUT_CTL_IDLE BIT_ULL(40)
+#define CN93_R_OUT_CTL_ES_I BIT_ULL(34)
+#define CN93_R_OUT_CTL_NSR_I BIT_ULL(33)
+#define CN93_R_OUT_CTL_ROR_I BIT_ULL(32)
+#define CN93_R_OUT_CTL_ES_D BIT_ULL(30)
+#define CN93_R_OUT_CTL_NSR_D BIT_ULL(29)
+#define CN93_R_OUT_CTL_ROR_D BIT_ULL(28)
+#define CN93_R_OUT_CTL_ES_P BIT_ULL(26)
+#define CN93_R_OUT_CTL_NSR_P BIT_ULL(25)
+#define CN93_R_OUT_CTL_ROR_P BIT_ULL(24)
+#define CN93_R_OUT_CTL_IMODE BIT_ULL(23)
+
+/* ############### Interrupt Moderation Registers ############### */
+#define CN93_SDP_R_IN_INT_MDRT_CTL0_START 0x10280
+#define CN93_SDP_R_IN_INT_MDRT_CTL1_START 0x102A0
+#define CN93_SDP_R_IN_INT_MDRT_DBG_START 0x102C0
+
+#define CN93_SDP_R_OUT_INT_MDRT_CTL0_START 0x10380
+#define CN93_SDP_R_OUT_INT_MDRT_CTL1_START 0x103A0
+#define CN93_SDP_R_OUT_INT_MDRT_DBG_START 0x103C0
+
+#define CN93_SDP_R_IN_INT_MDRT_CTL0(ring) \
+ (CN93_SDP_R_IN_INT_MDRT_CTL0_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_INT_MDRT_CTL1(ring) \
+ (CN93_SDP_R_IN_INT_MDRT_CTL1_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_INT_MDRT_DBG(ring) \
+ (CN93_SDP_R_IN_INT_MDRT_DBG_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_INT_MDRT_CTL0(ring) \
+ (CN93_SDP_R_OUT_INT_MDRT_CTL0_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_INT_MDRT_CTL1(ring) \
+ (CN93_SDP_R_OUT_INT_MDRT_CTL1_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_INT_MDRT_DBG(ring) \
+ (CN93_SDP_R_OUT_INT_MDRT_DBG_START + ((ring) * CN93_RING_OFFSET))
+
+/* ##################### Mail Box Registers ########################## */
+/* INT register for VF. when a MBOX write from PF happed to a VF,
+ * corresponding bit will be set in this register as well as in
+ * PF_VF_INT register.
+ *
+ * This is a RO register, the int can be cleared by writing 1 to PF_VF_INT
+ */
+/* Basically first 3 are from PF to VF. The last one is data from VF to PF */
+#define CN93_SDP_R_MBOX_PF_VF_DATA_START 0x10210
+#define CN93_SDP_R_MBOX_PF_VF_INT_START 0x10220
+#define CN93_SDP_R_MBOX_VF_PF_DATA_START 0x10230
+
+#define CN93_SDP_R_MBOX_PF_VF_DATA(ring) \
+ (CN93_SDP_R_MBOX_PF_VF_DATA_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_MBOX_PF_VF_INT(ring) \
+ (CN93_SDP_R_MBOX_PF_VF_INT_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_MBOX_VF_PF_DATA(ring) \
+ (CN93_SDP_R_MBOX_VF_PF_DATA_START + ((ring) * CN93_RING_OFFSET))
+
+/* ##################### Interrupt Registers ########################## */
+#define CN93_SDP_R_ERR_TYPE_START 0x10400
+
+#define CN93_SDP_R_ERR_TYPE(ring) \
+ (CN93_SDP_R_ERR_TYPE_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_MBOX_ISM_START 0x10500
+#define CN93_SDP_R_OUT_CNTS_ISM_START 0x10510
+#define CN93_SDP_R_IN_CNTS_ISM_START 0x10520
+
+#define CN93_SDP_R_MBOX_ISM(ring) \
+ (CN93_SDP_R_MBOX_ISM_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_CNTS_ISM(ring) \
+ (CN93_SDP_R_OUT_CNTS_ISM_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_CNTS_ISM(ring) \
+ (CN93_SDP_R_IN_CNTS_ISM_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_EPF_MBOX_RINT_START 0x20100
+#define CN93_SDP_EPF_MBOX_RINT_W1S_START 0x20120
+#define CN93_SDP_EPF_MBOX_RINT_ENA_W1C_START 0x20140
+#define CN93_SDP_EPF_MBOX_RINT_ENA_W1S_START 0x20160
+
+#define CN93_SDP_EPF_VFIRE_RINT_START 0x20180
+#define CN93_SDP_EPF_VFIRE_RINT_W1S_START 0x201A0
+#define CN93_SDP_EPF_VFIRE_RINT_ENA_W1C_START 0x201C0
+#define CN93_SDP_EPF_VFIRE_RINT_ENA_W1S_START 0x201E0
+
+#define CN93_SDP_EPF_IRERR_RINT 0x20200
+#define CN93_SDP_EPF_IRERR_RINT_W1S 0x20210
+#define CN93_SDP_EPF_IRERR_RINT_ENA_W1C 0x20220
+#define CN93_SDP_EPF_IRERR_RINT_ENA_W1S 0x20230
+
+#define CN93_SDP_EPF_VFORE_RINT_START 0x20240
+#define CN93_SDP_EPF_VFORE_RINT_W1S_START 0x20260
+#define CN93_SDP_EPF_VFORE_RINT_ENA_W1C_START 0x20280
+#define CN93_SDP_EPF_VFORE_RINT_ENA_W1S_START 0x202A0
+
+#define CN93_SDP_EPF_ORERR_RINT 0x20320
+#define CN93_SDP_EPF_ORERR_RINT_W1S 0x20330
+#define CN93_SDP_EPF_ORERR_RINT_ENA_W1C 0x20340
+#define CN93_SDP_EPF_ORERR_RINT_ENA_W1S 0x20350
+
+#define CN93_SDP_EPF_OEI_RINT 0x20360
+#define CN93_SDP_EPF_OEI_RINT_W1S 0x20370
+#define CN93_SDP_EPF_OEI_RINT_ENA_W1C 0x20380
+#define CN93_SDP_EPF_OEI_RINT_ENA_W1S 0x20390
+
+#define CN93_SDP_EPF_DMA_RINT 0x20400
+#define CN93_SDP_EPF_DMA_RINT_W1S 0x20410
+#define CN93_SDP_EPF_DMA_RINT_ENA_W1C 0x20420
+#define CN93_SDP_EPF_DMA_RINT_ENA_W1S 0x20430
+
+#define CN93_SDP_EPF_DMA_INT_LEVEL_START 0x20440
+#define CN93_SDP_EPF_DMA_CNT_START 0x20460
+#define CN93_SDP_EPF_DMA_TIM_START 0x20480
+
+#define CN93_SDP_EPF_MISC_RINT 0x204A0
+#define CN93_SDP_EPF_MISC_RINT_W1S 0x204B0
+#define CN93_SDP_EPF_MISC_RINT_ENA_W1C 0x204C0
+#define CN93_SDP_EPF_MISC_RINT_ENA_W1S 0x204D0
+
+#define CN93_SDP_EPF_DMA_VF_RINT_START 0x204E0
+#define CN93_SDP_EPF_DMA_VF_RINT_W1S_START 0x20500
+#define CN93_SDP_EPF_DMA_VF_RINT_ENA_W1C_START 0x20520
+#define CN93_SDP_EPF_DMA_VF_RINT_ENA_W1S_START 0x20540
+
+#define CN93_SDP_EPF_PP_VF_RINT_START 0x20560
+#define CN93_SDP_EPF_PP_VF_RINT_W1S_START 0x20580
+#define CN93_SDP_EPF_PP_VF_RINT_ENA_W1C_START 0x205A0
+#define CN93_SDP_EPF_PP_VF_RINT_ENA_W1S_START 0x205C0
+
+#define CN93_SDP_EPF_MBOX_RINT(index) \
+ (CN93_SDP_EPF_MBOX_RINT_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_MBOX_RINT_W1S(index) \
+ (CN93_SDP_EPF_MBOX_RINT_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_MBOX_RINT_ENA_W1C(index) \
+ (CN93_SDP_EPF_MBOX_RINT_ENA_W1C_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_MBOX_RINT_ENA_W1S(index) \
+ (CN93_SDP_EPF_MBOX_RINT_ENA_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+
+#define CN93_SDP_EPF_VFIRE_RINT(index) \
+ (CN93_SDP_EPF_VFIRE_RINT_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_VFIRE_RINT_W1S(index) \
+ (CN93_SDP_EPF_VFIRE_RINT_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_VFIRE_RINT_ENA_W1C(index) \
+ (CN93_SDP_EPF_VFIRE_RINT_ENA_W1C_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_VFIRE_RINT_ENA_W1S(index) \
+ (CN93_SDP_EPF_VFIRE_RINT_ENA_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+
+#define CN93_SDP_EPF_VFORE_RINT(index) \
+ (CN93_SDP_EPF_VFORE_RINT_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_VFORE_RINT_W1S(index) \
+ (CN93_SDP_EPF_VFORE_RINT_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_VFORE_RINT_ENA_W1C(index) \
+ (CN93_SDP_EPF_VFORE_RINT_ENA_W1C_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_VFORE_RINT_ENA_W1S(index) \
+ (CN93_SDP_EPF_VFORE_RINT_ENA_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+
+#define CN93_SDP_EPF_DMA_VF_RINT(index) \
+ (CN93_SDP_EPF_DMA_VF_RINT_START + ((index) + CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_DMA_VF_RINT_W1S(index) \
+ (CN93_SDP_EPF_DMA_VF_RINT_W1S_START + ((index) + CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_DMA_VF_RINT_ENA_W1C(index) \
+ (CN93_SDP_EPF_DMA_VF_RINT_ENA_W1C_START + ((index) + CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_DMA_VF_RINT_ENA_W1S(index) \
+ (CN93_SDP_EPF_DMA_VF_RINT_ENA_W1S_START + ((index) + CN93_BIT_ARRAY_OFFSET))
+
+#define CN93_SDP_EPF_PP_VF_RINT(index) \
+ (CN93_SDP_EPF_PP_VF_RINT_START + ((index) + CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_PP_VF_RINT_W1S(index) \
+ (CN93_SDP_EPF_PP_VF_RINT_W1S_START + ((index) + CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_PP_VF_RINT_ENA_W1C(index) \
+ (CN93_SDP_EPF_PP_VF_RINT_ENA_W1C_START + ((index) + CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_PP_VF_RINT_ENA_W1S(index) \
+ (CN93_SDP_EPF_PP_VF_RINT_ENA_W1S_START + ((index) + CN93_BIT_ARRAY_OFFSET))
+
+/*------------------ Interrupt Masks ----------------*/
+#define CN93_INTR_R_SEND_ISM BIT_ULL(63)
+#define CN93_INTR_R_OUT_INT BIT_ULL(62)
+#define CN93_INTR_R_IN_INT BIT_ULL(61)
+#define CN93_INTR_R_MBOX_INT BIT_ULL(60)
+#define CN93_INTR_R_RESEND BIT_ULL(59)
+#define CN93_INTR_R_CLR_TIM BIT_ULL(58)
+
+/* ####################### Ring Mapping Registers ################################## */
+#define CN93_SDP_EPVF_RING_START 0x26000
+#define CN93_SDP_IN_RING_TB_MAP_START 0x28000
+#define CN93_SDP_IN_RATE_LIMIT_START 0x2A000
+#define CN93_SDP_MAC_PF_RING_CTL_START 0x2C000
+
+#define CN93_SDP_EPVF_RING(ring) \
+ (CN93_SDP_EPVF_RING_START + ((ring) * CN93_EPVF_RING_OFFSET))
+#define CN93_SDP_IN_RING_TB_MAP(ring) \
+ (CN93_SDP_N_RING_TB_MAP_START + ((ring) * CN93_EPVF_RING_OFFSET))
+#define CN93_SDP_IN_RATE_LIMIT(ring) \
+ (CN93_SDP_IN_RATE_LIMIT_START + ((ring) * CN93_EPVF_RING_OFFSET))
+#define CN93_SDP_MAC_PF_RING_CTL(mac) \
+ (CN93_SDP_MAC_PF_RING_CTL_START + ((mac) * CN93_MAC_OFFSET))
+
+#define CN93_SDP_MAC_PF_RING_CTL_NPFS(val) ((val) & 0xF)
+#define CN93_SDP_MAC_PF_RING_CTL_SRN(val) (((val) >> 8) & 0xFF)
+#define CN93_SDP_MAC_PF_RING_CTL_RPPF(val) (((val) >> 16) & 0x3F)
+
+/* Number of non-queue interrupts in CN93xx */
+#define CN93_NUM_NON_IOQ_INTR 16
+#endif /* _OCTEP_REGS_CN9K_PF_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
new file mode 100644
index 000000000000..945947ec7723
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
@@ -0,0 +1,508 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include <linux/vmalloc.h>
+
+#include "octep_config.h"
+#include "octep_main.h"
+
+static void octep_oq_reset_indices(struct octep_oq *oq)
+{
+ oq->host_read_idx = 0;
+ oq->host_refill_idx = 0;
+ oq->refill_count = 0;
+ oq->last_pkt_count = 0;
+ oq->pkts_pending = 0;
+}
+
+/**
+ * octep_oq_fill_ring_buffers() - fill initial receive buffers for Rx ring.
+ *
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Return: 0, if successfully filled receive buffers for all descriptors.
+ * -1, if failed to allocate a buffer or failed to map for DMA.
+ */
+static int octep_oq_fill_ring_buffers(struct octep_oq *oq)
+{
+ struct octep_oq_desc_hw *desc_ring = oq->desc_ring;
+ struct page *page;
+ u32 i;
+
+ for (i = 0; i < oq->max_count; i++) {
+ page = dev_alloc_page();
+ if (unlikely(!page)) {
+ dev_err(oq->dev, "Rx buffer alloc failed\n");
+ goto rx_buf_alloc_err;
+ }
+ desc_ring[i].buffer_ptr = dma_map_page(oq->dev, page, 0,
+ PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(oq->dev, desc_ring[i].buffer_ptr)) {
+ dev_err(oq->dev,
+ "OQ-%d buffer alloc: DMA mapping error!\n",
+ oq->q_no);
+ put_page(page);
+ goto dma_map_err;
+ }
+ oq->buff_info[i].page = page;
+ }
+
+ return 0;
+
+dma_map_err:
+rx_buf_alloc_err:
+ while (i) {
+ i--;
+ dma_unmap_page(oq->dev, desc_ring[i].buffer_ptr, PAGE_SIZE, DMA_FROM_DEVICE);
+ put_page(oq->buff_info[i].page);
+ oq->buff_info[i].page = NULL;
+ }
+
+ return -1;
+}
+
+/**
+ * octep_oq_refill() - refill buffers for used Rx ring descriptors.
+ *
+ * @oct: Octeon device private data structure.
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Return: number of descriptors successfully refilled with receive buffers.
+ */
+static int octep_oq_refill(struct octep_device *oct, struct octep_oq *oq)
+{
+ struct octep_oq_desc_hw *desc_ring = oq->desc_ring;
+ struct page *page;
+ u32 refill_idx, i;
+
+ refill_idx = oq->host_refill_idx;
+ for (i = 0; i < oq->refill_count; i++) {
+ page = dev_alloc_page();
+ if (unlikely(!page)) {
+ dev_err(oq->dev, "refill: rx buffer alloc failed\n");
+ oq->stats.alloc_failures++;
+ break;
+ }
+
+ desc_ring[refill_idx].buffer_ptr = dma_map_page(oq->dev, page, 0,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(oq->dev, desc_ring[refill_idx].buffer_ptr)) {
+ dev_err(oq->dev,
+ "OQ-%d buffer refill: DMA mapping error!\n",
+ oq->q_no);
+ put_page(page);
+ oq->stats.alloc_failures++;
+ break;
+ }
+ oq->buff_info[refill_idx].page = page;
+ refill_idx++;
+ if (refill_idx == oq->max_count)
+ refill_idx = 0;
+ }
+ oq->host_refill_idx = refill_idx;
+ oq->refill_count -= i;
+
+ return i;
+}
+
+/**
+ * octep_setup_oq() - Setup a Rx queue.
+ *
+ * @oct: Octeon device private data structure.
+ * @q_no: Rx queue number to be setup.
+ *
+ * Allocate resources for a Rx queue.
+ */
+static int octep_setup_oq(struct octep_device *oct, int q_no)
+{
+ struct octep_oq *oq;
+ u32 desc_ring_size;
+
+ oq = vzalloc(sizeof(*oq));
+ if (!oq)
+ goto create_oq_fail;
+ oct->oq[q_no] = oq;
+
+ oq->octep_dev = oct;
+ oq->netdev = oct->netdev;
+ oq->dev = &oct->pdev->dev;
+ oq->q_no = q_no;
+ oq->max_count = CFG_GET_OQ_NUM_DESC(oct->conf);
+ oq->ring_size_mask = oq->max_count - 1;
+ oq->buffer_size = CFG_GET_OQ_BUF_SIZE(oct->conf);
+ oq->max_single_buffer_size = oq->buffer_size - OCTEP_OQ_RESP_HW_SIZE;
+
+ /* When the hardware/firmware supports additional capabilities,
+ * additional header is filled-in by Octeon after length field in
+ * Rx packets. this header contains additional packet information.
+ */
+ if (oct->caps_enabled)
+ oq->max_single_buffer_size -= OCTEP_OQ_RESP_HW_EXT_SIZE;
+
+ oq->refill_threshold = CFG_GET_OQ_REFILL_THRESHOLD(oct->conf);
+
+ desc_ring_size = oq->max_count * OCTEP_OQ_DESC_SIZE;
+ oq->desc_ring = dma_alloc_coherent(oq->dev, desc_ring_size,
+ &oq->desc_ring_dma, GFP_KERNEL);
+
+ if (unlikely(!oq->desc_ring)) {
+ dev_err(oq->dev,
+ "Failed to allocate DMA memory for OQ-%d !!\n", q_no);
+ goto desc_dma_alloc_err;
+ }
+
+ oq->buff_info = (struct octep_rx_buffer *)
+ vzalloc(oq->max_count * OCTEP_OQ_RECVBUF_SIZE);
+ if (unlikely(!oq->buff_info)) {
+ dev_err(&oct->pdev->dev,
+ "Failed to allocate buffer info for OQ-%d\n", q_no);
+ goto buf_list_err;
+ }
+
+ if (octep_oq_fill_ring_buffers(oq))
+ goto oq_fill_buff_err;
+
+ octep_oq_reset_indices(oq);
+ oct->hw_ops.setup_oq_regs(oct, q_no);
+ oct->num_oqs++;
+
+ return 0;
+
+oq_fill_buff_err:
+ vfree(oq->buff_info);
+ oq->buff_info = NULL;
+buf_list_err:
+ dma_free_coherent(oq->dev, desc_ring_size,
+ oq->desc_ring, oq->desc_ring_dma);
+ oq->desc_ring = NULL;
+desc_dma_alloc_err:
+ vfree(oq);
+ oct->oq[q_no] = NULL;
+create_oq_fail:
+ return -1;
+}
+
+/**
+ * octep_oq_free_ring_buffers() - Free ring buffers.
+ *
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Free receive buffers in unused Rx queue descriptors.
+ */
+static void octep_oq_free_ring_buffers(struct octep_oq *oq)
+{
+ struct octep_oq_desc_hw *desc_ring = oq->desc_ring;
+ int i;
+
+ if (!oq->desc_ring || !oq->buff_info)
+ return;
+
+ for (i = 0; i < oq->max_count; i++) {
+ if (oq->buff_info[i].page) {
+ dma_unmap_page(oq->dev, desc_ring[i].buffer_ptr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ put_page(oq->buff_info[i].page);
+ oq->buff_info[i].page = NULL;
+ desc_ring[i].buffer_ptr = 0;
+ }
+ }
+ octep_oq_reset_indices(oq);
+}
+
+/**
+ * octep_free_oq() - Free Rx queue resources.
+ *
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Free all resources of a Rx queue.
+ */
+static int octep_free_oq(struct octep_oq *oq)
+{
+ struct octep_device *oct = oq->octep_dev;
+ int q_no = oq->q_no;
+
+ octep_oq_free_ring_buffers(oq);
+
+ if (oq->buff_info)
+ vfree(oq->buff_info);
+
+ if (oq->desc_ring)
+ dma_free_coherent(oq->dev,
+ oq->max_count * OCTEP_OQ_DESC_SIZE,
+ oq->desc_ring, oq->desc_ring_dma);
+
+ vfree(oq);
+ oct->oq[q_no] = NULL;
+ oct->num_oqs--;
+ return 0;
+}
+
+/**
+ * octep_setup_oqs() - setup resources for all Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+int octep_setup_oqs(struct octep_device *oct)
+{
+ int i, retval = 0;
+
+ oct->num_oqs = 0;
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ retval = octep_setup_oq(oct, i);
+ if (retval) {
+ dev_err(&oct->pdev->dev,
+ "Failed to setup OQ(RxQ)-%d.\n", i);
+ goto oq_setup_err;
+ }
+ dev_dbg(&oct->pdev->dev, "Successfully setup OQ(RxQ)-%d.\n", i);
+ }
+
+ return 0;
+
+oq_setup_err:
+ while (i) {
+ i--;
+ octep_free_oq(oct->oq[i]);
+ }
+ return -1;
+}
+
+/**
+ * octep_oq_dbell_init() - Initialize Rx queue doorbell.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Write number of descriptors to Rx queue doorbell register.
+ */
+void octep_oq_dbell_init(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++)
+ writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg);
+}
+
+/**
+ * octep_free_oqs() - Free resources of all Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+void octep_free_oqs(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ if (!oct->oq[i])
+ continue;
+ octep_free_oq(oct->oq[i]);
+ dev_dbg(&oct->pdev->dev,
+ "Successfully freed OQ(RxQ)-%d.\n", i);
+ }
+}
+
+/**
+ * octep_oq_check_hw_for_pkts() - Check for new Rx packets.
+ *
+ * @oct: Octeon device private data structure.
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Return: packets received after previous check.
+ */
+static int octep_oq_check_hw_for_pkts(struct octep_device *oct,
+ struct octep_oq *oq)
+{
+ u32 pkt_count, new_pkts;
+
+ pkt_count = readl(oq->pkts_sent_reg);
+ new_pkts = pkt_count - oq->last_pkt_count;
+
+ /* Clear the hardware packets counter register if the rx queue is
+ * being processed continuously with-in a single interrupt and
+ * reached half its max value.
+ * this counter is not cleared every time read, to save write cycles.
+ */
+ if (unlikely(pkt_count > 0xF0000000U)) {
+ writel(pkt_count, oq->pkts_sent_reg);
+ pkt_count = readl(oq->pkts_sent_reg);
+ new_pkts += pkt_count;
+ }
+ oq->last_pkt_count = pkt_count;
+ oq->pkts_pending += new_pkts;
+ return new_pkts;
+}
+
+/**
+ * __octep_oq_process_rx() - Process hardware Rx queue and push to stack.
+ *
+ * @oct: Octeon device private data structure.
+ * @oq: Octeon Rx queue data structure.
+ * @pkts_to_process: number of packets to be processed.
+ *
+ * Process the new packets in Rx queue.
+ * Packets larger than single Rx buffer arrive in consecutive descriptors.
+ * But, count returned by the API only accounts full packets, not fragments.
+ *
+ * Return: number of packets processed and pushed to stack.
+ */
+static int __octep_oq_process_rx(struct octep_device *oct,
+ struct octep_oq *oq, u16 pkts_to_process)
+{
+ struct octep_oq_resp_hw_ext *resp_hw_ext = NULL;
+ struct octep_rx_buffer *buff_info;
+ struct octep_oq_resp_hw *resp_hw;
+ u32 pkt, rx_bytes, desc_used;
+ struct sk_buff *skb;
+ u16 data_offset;
+ u32 read_idx;
+
+ read_idx = oq->host_read_idx;
+ rx_bytes = 0;
+ desc_used = 0;
+ for (pkt = 0; pkt < pkts_to_process; pkt++) {
+ buff_info = (struct octep_rx_buffer *)&oq->buff_info[read_idx];
+ dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ resp_hw = page_address(buff_info->page);
+ buff_info->page = NULL;
+
+ /* Swap the length field that is in Big-Endian to CPU */
+ buff_info->len = be64_to_cpu(resp_hw->length);
+ if (oct->caps_enabled & OCTEP_CAP_RX_CHECKSUM) {
+ /* Extended response header is immediately after
+ * response header (resp_hw)
+ */
+ resp_hw_ext = (struct octep_oq_resp_hw_ext *)
+ (resp_hw + 1);
+ buff_info->len -= OCTEP_OQ_RESP_HW_EXT_SIZE;
+ /* Packet Data is immediately after
+ * extended response header.
+ */
+ data_offset = OCTEP_OQ_RESP_HW_SIZE +
+ OCTEP_OQ_RESP_HW_EXT_SIZE;
+ } else {
+ /* Data is immediately after
+ * Hardware Rx response header.
+ */
+ data_offset = OCTEP_OQ_RESP_HW_SIZE;
+ }
+ rx_bytes += buff_info->len;
+
+ if (buff_info->len <= oq->max_single_buffer_size) {
+ skb = build_skb((void *)resp_hw, PAGE_SIZE);
+ skb_reserve(skb, data_offset);
+ skb_put(skb, buff_info->len);
+ read_idx++;
+ desc_used++;
+ if (read_idx == oq->max_count)
+ read_idx = 0;
+ } else {
+ struct skb_shared_info *shinfo;
+ u16 data_len;
+
+ skb = build_skb((void *)resp_hw, PAGE_SIZE);
+ skb_reserve(skb, data_offset);
+ /* Head fragment includes response header(s);
+ * subsequent fragments contains only data.
+ */
+ skb_put(skb, oq->max_single_buffer_size);
+ read_idx++;
+ desc_used++;
+ if (read_idx == oq->max_count)
+ read_idx = 0;
+
+ shinfo = skb_shinfo(skb);
+ data_len = buff_info->len - oq->max_single_buffer_size;
+ while (data_len) {
+ dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ buff_info = (struct octep_rx_buffer *)
+ &oq->buff_info[read_idx];
+ if (data_len < oq->buffer_size) {
+ buff_info->len = data_len;
+ data_len = 0;
+ } else {
+ buff_info->len = oq->buffer_size;
+ data_len -= oq->buffer_size;
+ }
+
+ skb_add_rx_frag(skb, shinfo->nr_frags,
+ buff_info->page, 0,
+ buff_info->len,
+ buff_info->len);
+ buff_info->page = NULL;
+ read_idx++;
+ desc_used++;
+ if (read_idx == oq->max_count)
+ read_idx = 0;
+ }
+ }
+
+ skb->dev = oq->netdev;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ if (resp_hw_ext &&
+ resp_hw_ext->csum_verified == OCTEP_CSUM_VERIFIED)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb->ip_summed = CHECKSUM_NONE;
+ napi_gro_receive(oq->napi, skb);
+ }
+
+ oq->host_read_idx = read_idx;
+ oq->refill_count += desc_used;
+ oq->stats.packets += pkt;
+ oq->stats.bytes += rx_bytes;
+
+ return pkt;
+}
+
+/**
+ * octep_oq_process_rx() - Process Rx queue.
+ *
+ * @oq: Octeon Rx queue data structure.
+ * @budget: max number of packets can be processed in one invocation.
+ *
+ * Check for newly received packets and process them.
+ * Keeps checking for new packets until budget is used or no new packets seen.
+ *
+ * Return: number of packets processed.
+ */
+int octep_oq_process_rx(struct octep_oq *oq, int budget)
+{
+ u32 pkts_available, pkts_processed, total_pkts_processed;
+ struct octep_device *oct = oq->octep_dev;
+
+ pkts_available = 0;
+ pkts_processed = 0;
+ total_pkts_processed = 0;
+ while (total_pkts_processed < budget) {
+ /* update pending count only when current one exhausted */
+ if (oq->pkts_pending == 0)
+ octep_oq_check_hw_for_pkts(oct, oq);
+ pkts_available = min(budget - total_pkts_processed,
+ oq->pkts_pending);
+ if (!pkts_available)
+ break;
+
+ pkts_processed = __octep_oq_process_rx(oct, oq,
+ pkts_available);
+ oq->pkts_pending -= pkts_processed;
+ total_pkts_processed += pkts_processed;
+ }
+
+ if (oq->refill_count >= oq->refill_threshold) {
+ u32 desc_refilled = octep_oq_refill(oct, oq);
+
+ /* flush pending writes before updating credits */
+ wmb();
+ writel(desc_refilled, oq->pkts_credit_reg);
+ }
+
+ return total_pkts_processed;
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h
new file mode 100644
index 000000000000..782a24f27f3e
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_RX_H_
+#define _OCTEP_RX_H_
+
+/* struct octep_oq_desc_hw - Octeon Hardware OQ descriptor format.
+ *
+ * The descriptor ring is made of descriptors which have 2 64-bit values:
+ *
+ * @buffer_ptr: DMA address of the skb->data
+ * @info_ptr: DMA address of host memory, used to update pkt count by hw.
+ * This is currently unused to save pci writes.
+ */
+struct octep_oq_desc_hw {
+ dma_addr_t buffer_ptr;
+ u64 info_ptr;
+};
+
+#define OCTEP_OQ_DESC_SIZE (sizeof(struct octep_oq_desc_hw))
+
+#define OCTEP_CSUM_L4_VERIFIED 0x1
+#define OCTEP_CSUM_IP_VERIFIED 0x2
+#define OCTEP_CSUM_VERIFIED (OCTEP_CSUM_L4_VERIFIED | OCTEP_CSUM_IP_VERIFIED)
+
+/* Extended Response Header in packet data received from Hardware.
+ * Includes metadata like checksum status.
+ * this is valid only if hardware/firmware published support for this.
+ * This is at offset 0 of packet data (skb->data).
+ */
+struct octep_oq_resp_hw_ext {
+ /* Reserved. */
+ u64 reserved:62;
+
+ /* checksum verified. */
+ u64 csum_verified:2;
+};
+
+#define OCTEP_OQ_RESP_HW_EXT_SIZE (sizeof(struct octep_oq_resp_hw_ext))
+
+/* Length of Rx packet DMA'ed by Octeon to Host.
+ * this is in bigendian; so need to be converted to cpu endian.
+ * Octeon writes this at the beginning of Rx buffer (skb->data).
+ */
+struct octep_oq_resp_hw {
+ /* The Length of the packet. */
+ __be64 length;
+};
+
+#define OCTEP_OQ_RESP_HW_SIZE (sizeof(struct octep_oq_resp_hw))
+
+/* Pointer to data buffer.
+ * Driver keeps a pointer to the data buffer that it made available to
+ * the Octeon device. Since the descriptor ring keeps physical (bus)
+ * addresses, this field is required for the driver to keep track of
+ * the virtual address pointers. The fields are operated by
+ * OS-dependent routines.
+ */
+struct octep_rx_buffer {
+ struct page *page;
+
+ /* length from rx hardware descriptor after converting to cpu endian */
+ u64 len;
+};
+
+#define OCTEP_OQ_RECVBUF_SIZE (sizeof(struct octep_rx_buffer))
+
+/* Output Queue statistics. Each output queue has four stats fields. */
+struct octep_oq_stats {
+ /* Number of packets received from the Device. */
+ u64 packets;
+
+ /* Number of bytes received from the Device. */
+ u64 bytes;
+
+ /* Number of times failed to allocate buffers. */
+ u64 alloc_failures;
+};
+
+#define OCTEP_OQ_STATS_SIZE (sizeof(struct octep_oq_stats))
+
+/* Hardware interface Rx statistics */
+struct octep_iface_rx_stats {
+ /* Received packets */
+ u64 pkts;
+
+ /* Octets of received packets */
+ u64 octets;
+
+ /* Received PAUSE and Control packets */
+ u64 pause_pkts;
+
+ /* Received PAUSE and Control octets */
+ u64 pause_octets;
+
+ /* Filtered DMAC0 packets */
+ u64 dmac0_pkts;
+
+ /* Filtered DMAC0 octets */
+ u64 dmac0_octets;
+
+ /* Packets dropped due to RX FIFO full */
+ u64 dropped_pkts_fifo_full;
+
+ /* Octets dropped due to RX FIFO full */
+ u64 dropped_octets_fifo_full;
+
+ /* Error packets */
+ u64 err_pkts;
+
+ /* Filtered DMAC1 packets */
+ u64 dmac1_pkts;
+
+ /* Filtered DMAC1 octets */
+ u64 dmac1_octets;
+
+ /* NCSI-bound packets dropped */
+ u64 ncsi_dropped_pkts;
+
+ /* NCSI-bound octets dropped */
+ u64 ncsi_dropped_octets;
+
+ /* Multicast packets received. */
+ u64 mcast_pkts;
+
+ /* Broadcast packets received. */
+ u64 bcast_pkts;
+
+};
+
+/* The Descriptor Ring Output Queue structure.
+ * This structure has all the information required to implement a
+ * Octeon OQ.
+ */
+struct octep_oq {
+ u32 q_no;
+
+ struct octep_device *octep_dev;
+ struct net_device *netdev;
+ struct device *dev;
+
+ struct napi_struct *napi;
+
+ /* The receive buffer list. This list has the virtual addresses
+ * of the buffers.
+ */
+ struct octep_rx_buffer *buff_info;
+
+ /* Pointer to the mapped packet credit register.
+ * Host writes number of info/buffer ptrs available to this register
+ */
+ u8 __iomem *pkts_credit_reg;
+
+ /* Pointer to the mapped packet sent register.
+ * Octeon writes the number of packets DMA'ed to host memory
+ * in this register.
+ */
+ u8 __iomem *pkts_sent_reg;
+
+ /* Statistics for this OQ. */
+ struct octep_oq_stats stats;
+
+ /* Packets pending to be processed */
+ u32 pkts_pending;
+ u32 last_pkt_count;
+
+ /* Index in the ring where the driver should read the next packet */
+ u32 host_read_idx;
+
+ /* Number of descriptors in this ring. */
+ u32 max_count;
+ u32 ring_size_mask;
+
+ /* The number of descriptors pending refill. */
+ u32 refill_count;
+
+ /* Index in the ring where the driver will refill the
+ * descriptor's buffer
+ */
+ u32 host_refill_idx;
+ u32 refill_threshold;
+
+ /* The size of each buffer pointed by the buffer pointer. */
+ u32 buffer_size;
+ u32 max_single_buffer_size;
+
+ /* The 8B aligned descriptor ring starts at this address. */
+ struct octep_oq_desc_hw *desc_ring;
+
+ /* DMA mapped address of the OQ descriptor ring. */
+ dma_addr_t desc_ring_dma;
+};
+
+#define OCTEP_OQ_SIZE (sizeof(struct octep_oq))
+#endif /* _OCTEP_RX_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
new file mode 100644
index 000000000000..511552bc3e87
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include <linux/vmalloc.h>
+
+#include "octep_config.h"
+#include "octep_main.h"
+
+/* Reset various index of Tx queue data structure. */
+static void octep_iq_reset_indices(struct octep_iq *iq)
+{
+ iq->fill_cnt = 0;
+ iq->host_write_index = 0;
+ iq->octep_read_index = 0;
+ iq->flush_index = 0;
+ iq->pkts_processed = 0;
+ iq->pkt_in_done = 0;
+ atomic_set(&iq->instr_pending, 0);
+}
+
+/**
+ * octep_iq_process_completions() - Process Tx queue completions.
+ *
+ * @iq: Octeon Tx queue data structure.
+ * @budget: max number of completions to be processed in one invocation.
+ */
+int octep_iq_process_completions(struct octep_iq *iq, u16 budget)
+{
+ u32 compl_pkts, compl_bytes, compl_sg;
+ struct octep_device *oct = iq->octep_dev;
+ struct octep_tx_buffer *tx_buffer;
+ struct skb_shared_info *shinfo;
+ u32 fi = iq->flush_index;
+ struct sk_buff *skb;
+ u8 frags, i;
+
+ compl_pkts = 0;
+ compl_sg = 0;
+ compl_bytes = 0;
+ iq->octep_read_index = oct->hw_ops.update_iq_read_idx(iq);
+
+ while (likely(budget && (fi != iq->octep_read_index))) {
+ tx_buffer = iq->buff_info + fi;
+ skb = tx_buffer->skb;
+
+ fi++;
+ if (unlikely(fi == iq->max_count))
+ fi = 0;
+ compl_bytes += skb->len;
+ compl_pkts++;
+ budget--;
+
+ if (!tx_buffer->gather) {
+ dma_unmap_single(iq->dev, tx_buffer->dma,
+ tx_buffer->skb->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ /* Scatter/Gather */
+ shinfo = skb_shinfo(skb);
+ frags = shinfo->nr_frags;
+ compl_sg++;
+
+ dma_unmap_single(iq->dev, tx_buffer->sglist[0].dma_ptr[0],
+ tx_buffer->sglist[0].len[0], DMA_TO_DEVICE);
+
+ i = 1; /* entry 0 is main skb, unmapped above */
+ while (frags--) {
+ dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3],
+ tx_buffer->sglist[i >> 2].len[i & 3], DMA_TO_DEVICE);
+ i++;
+ }
+
+ dev_kfree_skb_any(skb);
+ }
+
+ iq->pkts_processed += compl_pkts;
+ atomic_sub(compl_pkts, &iq->instr_pending);
+ iq->stats.instr_completed += compl_pkts;
+ iq->stats.bytes_sent += compl_bytes;
+ iq->stats.sgentry_sent += compl_sg;
+ iq->flush_index = fi;
+
+ netdev_tx_completed_queue(iq->netdev_q, compl_pkts, compl_bytes);
+
+ if (unlikely(__netif_subqueue_stopped(iq->netdev, iq->q_no)) &&
+ ((iq->max_count - atomic_read(&iq->instr_pending)) >
+ OCTEP_WAKE_QUEUE_THRESHOLD))
+ netif_wake_subqueue(iq->netdev, iq->q_no);
+ return !budget;
+}
+
+/**
+ * octep_iq_free_pending() - Free Tx buffers for pending completions.
+ *
+ * @iq: Octeon Tx queue data structure.
+ */
+static void octep_iq_free_pending(struct octep_iq *iq)
+{
+ struct octep_tx_buffer *tx_buffer;
+ struct skb_shared_info *shinfo;
+ u32 fi = iq->flush_index;
+ struct sk_buff *skb;
+ u8 frags, i;
+
+ while (fi != iq->host_write_index) {
+ tx_buffer = iq->buff_info + fi;
+ skb = tx_buffer->skb;
+
+ fi++;
+ if (unlikely(fi == iq->max_count))
+ fi = 0;
+
+ if (!tx_buffer->gather) {
+ dma_unmap_single(iq->dev, tx_buffer->dma,
+ tx_buffer->skb->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ /* Scatter/Gather */
+ shinfo = skb_shinfo(skb);
+ frags = shinfo->nr_frags;
+
+ dma_unmap_single(iq->dev,
+ tx_buffer->sglist[0].dma_ptr[0],
+ tx_buffer->sglist[0].len[0],
+ DMA_TO_DEVICE);
+
+ i = 1; /* entry 0 is main skb, unmapped above */
+ while (frags--) {
+ dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3],
+ tx_buffer->sglist[i >> 2].len[i & 3], DMA_TO_DEVICE);
+ i++;
+ }
+
+ dev_kfree_skb_any(skb);
+ }
+
+ atomic_set(&iq->instr_pending, 0);
+ iq->flush_index = fi;
+ netdev_tx_reset_queue(netdev_get_tx_queue(iq->netdev, iq->q_no));
+}
+
+/**
+ * octep_clean_iqs() - Clean Tx queues to shutdown the device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Free the buffers in Tx queue descriptors pending completion and
+ * reset queue indices
+ */
+void octep_clean_iqs(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_iqs; i++) {
+ octep_iq_free_pending(oct->iq[i]);
+ octep_iq_reset_indices(oct->iq[i]);
+ }
+}
+
+/**
+ * octep_setup_iq() - Setup a Tx queue.
+ *
+ * @oct: Octeon device private data structure.
+ * @q_no: Tx queue number to be setup.
+ *
+ * Allocate resources for a Tx queue.
+ */
+static int octep_setup_iq(struct octep_device *oct, int q_no)
+{
+ u32 desc_ring_size, buff_info_size, sglist_size;
+ struct octep_iq *iq;
+ int i;
+
+ iq = vzalloc(sizeof(*iq));
+ if (!iq)
+ goto iq_alloc_err;
+ oct->iq[q_no] = iq;
+
+ iq->octep_dev = oct;
+ iq->netdev = oct->netdev;
+ iq->dev = &oct->pdev->dev;
+ iq->q_no = q_no;
+ iq->max_count = CFG_GET_IQ_NUM_DESC(oct->conf);
+ iq->ring_size_mask = iq->max_count - 1;
+ iq->fill_threshold = CFG_GET_IQ_DB_MIN(oct->conf);
+ iq->netdev_q = netdev_get_tx_queue(iq->netdev, q_no);
+
+ /* Allocate memory for hardware queue descriptors */
+ desc_ring_size = OCTEP_IQ_DESC_SIZE * CFG_GET_IQ_NUM_DESC(oct->conf);
+ iq->desc_ring = dma_alloc_coherent(iq->dev, desc_ring_size,
+ &iq->desc_ring_dma, GFP_KERNEL);
+ if (unlikely(!iq->desc_ring)) {
+ dev_err(iq->dev,
+ "Failed to allocate DMA memory for IQ-%d\n", q_no);
+ goto desc_dma_alloc_err;
+ }
+
+ /* Allocate memory for hardware SGLIST descriptors */
+ sglist_size = OCTEP_SGLIST_SIZE_PER_PKT *
+ CFG_GET_IQ_NUM_DESC(oct->conf);
+ iq->sglist = dma_alloc_coherent(iq->dev, sglist_size,
+ &iq->sglist_dma, GFP_KERNEL);
+ if (unlikely(!iq->sglist)) {
+ dev_err(iq->dev,
+ "Failed to allocate DMA memory for IQ-%d SGLIST\n",
+ q_no);
+ goto sglist_alloc_err;
+ }
+
+ /* allocate memory to manage Tx packets pending completion */
+ buff_info_size = OCTEP_IQ_TXBUFF_INFO_SIZE * iq->max_count;
+ iq->buff_info = vzalloc(buff_info_size);
+ if (!iq->buff_info) {
+ dev_err(iq->dev,
+ "Failed to allocate buff info for IQ-%d\n", q_no);
+ goto buff_info_err;
+ }
+
+ /* Setup sglist addresses in tx_buffer entries */
+ for (i = 0; i < CFG_GET_IQ_NUM_DESC(oct->conf); i++) {
+ struct octep_tx_buffer *tx_buffer;
+
+ tx_buffer = &iq->buff_info[i];
+ tx_buffer->sglist =
+ &iq->sglist[i * OCTEP_SGLIST_ENTRIES_PER_PKT];
+ tx_buffer->sglist_dma =
+ iq->sglist_dma + (i * OCTEP_SGLIST_SIZE_PER_PKT);
+ }
+
+ octep_iq_reset_indices(iq);
+ oct->hw_ops.setup_iq_regs(oct, q_no);
+
+ oct->num_iqs++;
+ return 0;
+
+buff_info_err:
+ dma_free_coherent(iq->dev, sglist_size, iq->sglist, iq->sglist_dma);
+sglist_alloc_err:
+ dma_free_coherent(iq->dev, desc_ring_size,
+ iq->desc_ring, iq->desc_ring_dma);
+desc_dma_alloc_err:
+ vfree(iq);
+ oct->iq[q_no] = NULL;
+iq_alloc_err:
+ return -1;
+}
+
+/**
+ * octep_free_iq() - Free Tx queue resources.
+ *
+ * @iq: Octeon Tx queue data structure.
+ *
+ * Free all the resources allocated for a Tx queue.
+ */
+static void octep_free_iq(struct octep_iq *iq)
+{
+ struct octep_device *oct = iq->octep_dev;
+ u64 desc_ring_size, sglist_size;
+ int q_no = iq->q_no;
+
+ desc_ring_size = OCTEP_IQ_DESC_SIZE * CFG_GET_IQ_NUM_DESC(oct->conf);
+
+ if (iq->buff_info)
+ vfree(iq->buff_info);
+
+ if (iq->desc_ring)
+ dma_free_coherent(iq->dev, desc_ring_size,
+ iq->desc_ring, iq->desc_ring_dma);
+
+ sglist_size = OCTEP_SGLIST_SIZE_PER_PKT *
+ CFG_GET_IQ_NUM_DESC(oct->conf);
+ if (iq->sglist)
+ dma_free_coherent(iq->dev, sglist_size,
+ iq->sglist, iq->sglist_dma);
+
+ vfree(iq);
+ oct->iq[q_no] = NULL;
+ oct->num_iqs--;
+}
+
+/**
+ * octep_setup_iqs() - setup resources for all Tx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+int octep_setup_iqs(struct octep_device *oct)
+{
+ int i;
+
+ oct->num_iqs = 0;
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ if (octep_setup_iq(oct, i)) {
+ dev_err(&oct->pdev->dev,
+ "Failed to setup IQ(TxQ)-%d.\n", i);
+ goto iq_setup_err;
+ }
+ dev_dbg(&oct->pdev->dev, "Successfully setup IQ(TxQ)-%d.\n", i);
+ }
+
+ return 0;
+
+iq_setup_err:
+ while (i) {
+ i--;
+ octep_free_iq(oct->iq[i]);
+ }
+ return -1;
+}
+
+/**
+ * octep_free_iqs() - Free resources of all Tx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+void octep_free_iqs(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ octep_free_iq(oct->iq[i]);
+ dev_dbg(&oct->pdev->dev,
+ "Successfully destroyed IQ(TxQ)-%d.\n", i);
+ }
+ oct->num_iqs = 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
new file mode 100644
index 000000000000..2ef57980eb47
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
@@ -0,0 +1,284 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_TX_H_
+#define _OCTEP_TX_H_
+
+#define IQ_SEND_OK 0
+#define IQ_SEND_STOP 1
+#define IQ_SEND_FAILED -1
+
+#define TX_BUFTYPE_NONE 0
+#define TX_BUFTYPE_NET 1
+#define TX_BUFTYPE_NET_SG 2
+#define NUM_TX_BUFTYPES 3
+
+/* Hardware format for Scatter/Gather list */
+struct octep_tx_sglist_desc {
+ u16 len[4];
+ dma_addr_t dma_ptr[4];
+};
+
+/* Each Scatter/Gather entry sent to hardwar hold four pointers.
+ * So, number of entries required is (MAX_SKB_FRAGS + 1)/4, where '+1'
+ * is for main skb which also goes as a gather buffer to Octeon hardware.
+ * To allocate sufficient SGLIST entries for a packet with max fragments,
+ * align by adding 3 before calcuating max SGLIST entries per packet.
+ */
+#define OCTEP_SGLIST_ENTRIES_PER_PKT ((MAX_SKB_FRAGS + 1 + 3) / 4)
+#define OCTEP_SGLIST_SIZE_PER_PKT \
+ (OCTEP_SGLIST_ENTRIES_PER_PKT * sizeof(struct octep_tx_sglist_desc))
+
+struct octep_tx_buffer {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ struct octep_tx_sglist_desc *sglist;
+ dma_addr_t sglist_dma;
+ u8 gather;
+};
+
+#define OCTEP_IQ_TXBUFF_INFO_SIZE (sizeof(struct octep_tx_buffer))
+
+/* Hardware interface Tx statistics */
+struct octep_iface_tx_stats {
+ /* Packets dropped due to excessive collisions */
+ u64 xscol;
+
+ /* Packets dropped due to excessive deferral */
+ u64 xsdef;
+
+ /* Packets sent that experienced multiple collisions before successful
+ * transmission
+ */
+ u64 mcol;
+
+ /* Packets sent that experienced a single collision before successful
+ * transmission
+ */
+ u64 scol;
+
+ /* Total octets sent on the interface */
+ u64 octs;
+
+ /* Total frames sent on the interface */
+ u64 pkts;
+
+ /* Packets sent with an octet count < 64 */
+ u64 hist_lt64;
+
+ /* Packets sent with an octet count == 64 */
+ u64 hist_eq64;
+
+ /* Packets sent with an octet count of 65–127 */
+ u64 hist_65to127;
+
+ /* Packets sent with an octet count of 128–255 */
+ u64 hist_128to255;
+
+ /* Packets sent with an octet count of 256–511 */
+ u64 hist_256to511;
+
+ /* Packets sent with an octet count of 512–1023 */
+ u64 hist_512to1023;
+
+ /* Packets sent with an octet count of 1024-1518 */
+ u64 hist_1024to1518;
+
+ /* Packets sent with an octet count of > 1518 */
+ u64 hist_gt1518;
+
+ /* Packets sent to a broadcast DMAC */
+ u64 bcst;
+
+ /* Packets sent to the multicast DMAC */
+ u64 mcst;
+
+ /* Packets sent that experienced a transmit underflow and were
+ * truncated
+ */
+ u64 undflw;
+
+ /* Control/PAUSE packets sent */
+ u64 ctl;
+};
+
+/* Input Queue statistics. Each input queue has four stats fields. */
+struct octep_iq_stats {
+ /* Instructions posted to this queue. */
+ u64 instr_posted;
+
+ /* Instructions copied by hardware for processing. */
+ u64 instr_completed;
+
+ /* Instructions that could not be processed. */
+ u64 instr_dropped;
+
+ /* Bytes sent through this queue. */
+ u64 bytes_sent;
+
+ /* Gather entries sent through this queue. */
+ u64 sgentry_sent;
+
+ /* Number of transmit failures due to TX_BUSY */
+ u64 tx_busy;
+
+ /* Number of times the queue is restarted */
+ u64 restart_cnt;
+};
+
+/* The instruction (input) queue.
+ * The input queue is used to post raw (instruction) mode data or packet
+ * data to Octeon device from the host. Each input queue (up to 4) for
+ * a Octeon device has one such structure to represent it.
+ */
+struct octep_iq {
+ u32 q_no;
+
+ struct octep_device *octep_dev;
+ struct net_device *netdev;
+ struct device *dev;
+ struct netdev_queue *netdev_q;
+
+ /* Index in input ring where driver should write the next packet */
+ u16 host_write_index;
+
+ /* Index in input ring where Octeon is expected to read next packet */
+ u16 octep_read_index;
+
+ /* This index aids in finding the window in the queue where Octeon
+ * has read the commands.
+ */
+ u16 flush_index;
+
+ /* Statistics for this input queue. */
+ struct octep_iq_stats stats;
+
+ /* This field keeps track of the instructions pending in this queue. */
+ atomic_t instr_pending;
+
+ /* Pointer to the Virtual Base addr of the input ring. */
+ struct octep_tx_desc_hw *desc_ring;
+
+ /* DMA mapped base address of the input descriptor ring. */
+ dma_addr_t desc_ring_dma;
+
+ /* Info of Tx buffers pending completion. */
+ struct octep_tx_buffer *buff_info;
+
+ /* Base pointer to Scatter/Gather lists for all ring descriptors. */
+ struct octep_tx_sglist_desc *sglist;
+
+ /* DMA mapped addr of Scatter Gather Lists */
+ dma_addr_t sglist_dma;
+
+ /* Octeon doorbell register for the ring. */
+ u8 __iomem *doorbell_reg;
+
+ /* Octeon instruction count register for this ring. */
+ u8 __iomem *inst_cnt_reg;
+
+ /* interrupt level register for this ring */
+ u8 __iomem *intr_lvl_reg;
+
+ /* Maximum no. of instructions in this queue. */
+ u32 max_count;
+ u32 ring_size_mask;
+
+ u32 pkt_in_done;
+ u32 pkts_processed;
+
+ u32 status;
+
+ /* Number of instructions pending to be posted to Octeon. */
+ u32 fill_cnt;
+
+ /* The max. number of instructions that can be held pending by the
+ * driver before ringing doorbell.
+ */
+ u32 fill_threshold;
+};
+
+/* Hardware Tx Instruction Header */
+struct octep_instr_hdr {
+ /* Data Len */
+ u64 tlen:16;
+
+ /* Reserved */
+ u64 rsvd:20;
+
+ /* PKIND for SDP */
+ u64 pkind:6;
+
+ /* Front Data size */
+ u64 fsz:6;
+
+ /* No. of entries in gather list */
+ u64 gsz:14;
+
+ /* Gather indicator 1=gather*/
+ u64 gather:1;
+
+ /* Reserved3 */
+ u64 reserved3:1;
+};
+
+/* Hardware Tx completion response header */
+struct octep_instr_resp_hdr {
+ /* Request ID */
+ u64 rid:16;
+
+ /* PCIe port to use for response */
+ u64 pcie_port:3;
+
+ /* Scatter indicator 1=scatter */
+ u64 scatter:1;
+
+ /* Size of Expected result OR no. of entries in scatter list */
+ u64 rlenssz:14;
+
+ /* Desired destination port for result */
+ u64 dport:6;
+
+ /* Opcode Specific parameters */
+ u64 param:8;
+
+ /* Opcode for the return packet */
+ u64 opcode:16;
+};
+
+/* 64-byte Tx instruction format.
+ * Format of instruction for a 64-byte mode input queue.
+ *
+ * only first 16-bytes (dptr and ih) are mandatory; rest are optional
+ * and filled by the driver based on firmware/hardware capabilities.
+ * These optional headers together called Front Data and its size is
+ * described by ih->fsz.
+ */
+struct octep_tx_desc_hw {
+ /* Pointer where the input data is available. */
+ u64 dptr;
+
+ /* Instruction Header. */
+ union {
+ struct octep_instr_hdr ih;
+ u64 ih64;
+ };
+
+ /* Pointer where the response for a RAW mode packet will be written
+ * by Octeon.
+ */
+ u64 rptr;
+
+ /* Input Instruction Response Header. */
+ struct octep_instr_resp_hdr irh;
+
+ /* Additional headers available in a 64-byte instruction. */
+ u64 exhdr[4];
+};
+
+#define OCTEP_IQ_DESC_SIZE (sizeof(struct octep_tx_desc_hw))
+#endif /* _OCTEP_TX_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router.c b/drivers/net/ethernet/marvell/prestera/prestera_router.c
index 6c5618cf4f08..3754d8aec76d 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_router.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_router.c
@@ -4,6 +4,7 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/inetdevice.h>
+#include <net/inet_dscp.h>
#include <net/switchdev.h>
#include <linux/rhashtable.h>
@@ -26,7 +27,7 @@ struct prestera_kern_fib_cache {
/* Indicate if route is not overlapped by another table */
struct rhash_head ht_node; /* node of prestera_router */
struct fib_info *fi;
- u8 kern_tos;
+ dscp_t kern_dscp;
u8 kern_type;
bool reachable;
};
@@ -88,7 +89,7 @@ prestera_kern_fib_cache_destroy(struct prestera_switch *sw,
static struct prestera_kern_fib_cache *
prestera_kern_fib_cache_create(struct prestera_switch *sw,
struct prestera_kern_fib_cache_key *key,
- struct fib_info *fi, u8 tos, u8 type)
+ struct fib_info *fi, dscp_t dscp, u8 type)
{
struct prestera_kern_fib_cache *fib_cache;
int err;
@@ -100,7 +101,7 @@ prestera_kern_fib_cache_create(struct prestera_switch *sw,
memcpy(&fib_cache->key, key, sizeof(*key));
fib_info_hold(fi);
fib_cache->fi = fi;
- fib_cache->kern_tos = tos;
+ fib_cache->kern_dscp = dscp;
fib_cache->kern_type = type;
err = rhashtable_insert_fast(&sw->router->kern_fib_cache_ht,
@@ -132,7 +133,7 @@ __prestera_k_arb_fib_lpm_offload_set(struct prestera_switch *sw,
fri.tb_id = fc->key.kern_tb_id;
fri.dst = fc->key.addr.u.ipv4;
fri.dst_len = fc->key.prefix_len;
- fri.tos = fc->kern_tos;
+ fri.dscp = fc->kern_dscp;
fri.type = fc->kern_type;
/* flags begin */
fri.offload = offload;
@@ -305,7 +306,7 @@ prestera_k_arb_fib_evt(struct prestera_switch *sw,
if (replace) {
fib_cache = prestera_kern_fib_cache_create(sw, &fc_key,
fen_info->fi,
- fen_info->tos,
+ fen_info->dscp,
fen_info->type);
if (!fib_cache) {
dev_err(sw->dev->dev, "fib_cache == NULL");
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 209d00f56f62..18eebcaa6a76 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -3149,7 +3149,7 @@ static int mtk_probe(struct platform_device *pdev)
struct regmap *cci;
cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
- "mediatek,cci-control");
+ "cci-control-port");
/* enable CPU/bus coherency */
if (!IS_ERR(cci))
regmap_write(cci, 0, 3);
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
index 282a1f34a88a..683f89f8e3b2 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
@@ -600,6 +600,7 @@ void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
struct mtk_foe_entry *hwe = &ppe->foe_table[hash];
struct mtk_flow_entry *entry;
struct mtk_foe_bridge key = {};
+ struct hlist_node *n;
struct ethhdr *eh;
bool found = false;
u8 *tag;
@@ -609,7 +610,7 @@ void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND)
goto out;
- hlist_for_each_entry(entry, head, list) {
+ hlist_for_each_entry_safe(entry, n, head, list) {
if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) {
if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) ==
MTK_FOE_STATE_BIND))
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
index f0eacf819cd9..8f0cd3196aac 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
@@ -144,16 +144,17 @@ mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) {
u32 txd_size;
+ u32 ctrl;
txd_size = dev->wlan.init_buf(buf, buf_phys, token++);
- desc->buf0 = buf_phys;
- desc->buf1 = buf_phys + txd_size;
- desc->ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0,
- txd_size) |
- FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
- MTK_WED_BUF_SIZE - txd_size) |
- MTK_WDMA_DESC_CTRL_LAST_SEG1;
+ desc->buf0 = cpu_to_le32(buf_phys);
+ desc->buf1 = cpu_to_le32(buf_phys + txd_size);
+ ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) |
+ FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
+ MTK_WED_BUF_SIZE - txd_size) |
+ MTK_WDMA_DESC_CTRL_LAST_SEG1;
+ desc->ctrl = cpu_to_le32(ctrl);
desc->info = 0;
desc++;
@@ -184,12 +185,14 @@ mtk_wed_free_buffer(struct mtk_wed_device *dev)
for (i = 0, page_idx = 0; i < dev->buf_ring.size; i += MTK_WED_BUF_PER_PAGE) {
void *page = page_list[page_idx++];
+ dma_addr_t buf_addr;
if (!page)
break;
- dma_unmap_page(dev->hw->dev, desc[i].buf0,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
+ buf_addr = le32_to_cpu(desc[i].buf0);
+ dma_unmap_page(dev->hw->dev, buf_addr, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
__free_page(page);
}
@@ -813,7 +816,7 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
return;
regs = syscon_regmap_lookup_by_phandle(np, NULL);
- if (!regs)
+ if (IS_ERR(regs))
return;
rcu_assign_pointer(mtk_soc_wed_ops, &wed_ops);
@@ -824,6 +827,8 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
goto unlock;
hw = kzalloc(sizeof(*hw), GFP_KERNEL);
+ if (!hw)
+ goto unlock;
hw->node = np;
hw->regs = regs;
hw->eth = eth;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 4ba1a78c6515..bfc0cd5ec423 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -16,13 +16,9 @@ config MLX5_CORE
Core driver for low level functionality of the ConnectX-4 and
Connect-IB cards by Mellanox Technologies.
-config MLX5_ACCEL
- bool
-
config MLX5_FPGA
bool "Mellanox Technologies Innova support"
depends on MLX5_CORE
- select MLX5_ACCEL
help
Build support for the Innova family of network cards by Mellanox
Technologies. Innova network cards are comprised of a ConnectX chip
@@ -143,71 +139,21 @@ config MLX5_CORE_IPOIB
help
MLX5 IPoIB offloads & acceleration support.
-config MLX5_FPGA_IPSEC
- bool "Mellanox Technologies IPsec Innova support"
- depends on MLX5_CORE
- depends on MLX5_FPGA
- help
- Build IPsec support for the Innova family of network cards by Mellanox
- Technologies. Innova network cards are comprised of a ConnectX chip
- and an FPGA chip on one board. If you select this option, the
- mlx5_core driver will include the Innova FPGA core and allow building
- sandbox-specific client drivers.
-
-config MLX5_IPSEC
- bool "Mellanox Technologies IPsec Connect-X support"
- depends on MLX5_CORE_EN
- depends on XFRM_OFFLOAD
- depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
- select MLX5_ACCEL
- help
- Build IPsec support for the Connect-X family of network cards by Mellanox
- Technologies.
- Note: If you select this option, the mlx5_core driver will include
- IPsec support for the Connect-X family.
-
config MLX5_EN_IPSEC
- bool "IPSec XFRM cryptography-offload acceleration"
+ bool "Mellanox Technologies IPsec Connect-X support"
depends on MLX5_CORE_EN
depends on XFRM_OFFLOAD
depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
- depends on MLX5_FPGA_IPSEC || MLX5_IPSEC
help
Build support for IPsec cryptography-offload acceleration in the NIC.
- Note: Support for hardware with this capability needs to be selected
- for this option to become available.
-
-config MLX5_FPGA_TLS
- bool "Mellanox Technologies TLS Innova support"
- depends on TLS_DEVICE
- depends on TLS=y || MLX5_CORE=m
- depends on MLX5_CORE_EN
- depends on MLX5_FPGA
- select MLX5_EN_TLS
- help
- Build TLS support for the Innova family of network cards by Mellanox
- Technologies. Innova network cards are comprised of a ConnectX chip
- and an FPGA chip on one board. If you select this option, the
- mlx5_core driver will include the Innova FPGA core and allow building
- sandbox-specific client drivers.
-config MLX5_TLS
+config MLX5_EN_TLS
bool "Mellanox Technologies TLS Connect-X support"
depends on TLS_DEVICE
depends on TLS=y || MLX5_CORE=m
depends on MLX5_CORE_EN
- select MLX5_ACCEL
- select MLX5_EN_TLS
- help
- Build TLS support for the Connect-X family of network cards by Mellanox
- Technologies.
-
-config MLX5_EN_TLS
- bool
help
Build support for TLS cryptography-offload acceleration in the NIC.
- Note: Support for hardware with this capability needs to be selected
- for this option to become available.
config MLX5_SW_STEERING
bool "Mellanox Technologies software-managed steering"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 4bc666714a35..81620c25c77e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -28,7 +28,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en/rqt.o en/tir.o en/rss.o en/rx_res.o \
en_selftest.o en/port.o en/monitor_stats.o en/health.o \
en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/pool.o \
en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o en/ptp.o \
- en/qos.o en/trap.o en/fs_tt_redirect.o en/selq.o
+ en/qos.o en/trap.o en/fs_tt_redirect.o en/selq.o lib/crypto.o
#
# Netdev extra
@@ -88,17 +88,13 @@ mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib
#
# Accelerations & FPGA
#
-mlx5_core-$(CONFIG_MLX5_IPSEC) += accel/ipsec_offload.o
-mlx5_core-$(CONFIG_MLX5_FPGA_IPSEC) += fpga/ipsec.o
-mlx5_core-$(CONFIG_MLX5_FPGA_TLS) += fpga/tls.o
-mlx5_core-$(CONFIG_MLX5_ACCEL) += lib/crypto.o accel/tls.o accel/ipsec.o
-
mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o
mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \
- en_accel/ipsec_stats.o en_accel/ipsec_fs.o
+ en_accel/ipsec_stats.o en_accel/ipsec_fs.o \
+ en_accel/ipsec_offload.o
-mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/tls_stats.o \
+mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/ktls_stats.o \
en_accel/fs_tcp.o en_accel/ktls.o en_accel/ktls_txrx.o \
en_accel/ktls_tx.o en_accel/ktls_rx.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h
deleted file mode 100644
index 82b185121edb..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h
+++ /dev/null
@@ -1,36 +0,0 @@
-#ifndef __MLX5E_ACCEL_H__
-#define __MLX5E_ACCEL_H__
-
-#ifdef CONFIG_MLX5_ACCEL
-
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-
-static inline bool is_metadata_hdr_valid(struct sk_buff *skb)
-{
- __be16 *ethtype;
-
- if (unlikely(skb->len < ETH_HLEN + MLX5E_METADATA_ETHER_LEN))
- return false;
- ethtype = (__be16 *)(skb->data + ETH_ALEN * 2);
- if (*ethtype != cpu_to_be16(MLX5E_METADATA_ETHER_TYPE))
- return false;
- return true;
-}
-
-static inline void remove_metadata_hdr(struct sk_buff *skb)
-{
- struct ethhdr *old_eth;
- struct ethhdr *new_eth;
-
- /* Remove the metadata from the buffer */
- old_eth = (struct ethhdr *)skb->data;
- new_eth = (struct ethhdr *)(skb->data + MLX5E_METADATA_ETHER_LEN);
- memmove(new_eth, old_eth, 2 * ETH_ALEN);
- /* Ethertype is already in its new place */
- skb_pull_inline(skb, MLX5E_METADATA_ETHER_LEN);
-}
-
-#endif /* CONFIG_MLX5_ACCEL */
-
-#endif /* __MLX5E_EN_ACCEL_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c
deleted file mode 100644
index 09f5ce97af46..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/mlx5/device.h>
-
-#include "accel/ipsec.h"
-#include "mlx5_core.h"
-#include "fpga/ipsec.h"
-#include "accel/ipsec_offload.h"
-
-void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev)
-{
- const struct mlx5_accel_ipsec_ops *ipsec_ops;
- int err = 0;
-
- ipsec_ops = (mlx5_ipsec_offload_ops(mdev)) ?
- mlx5_ipsec_offload_ops(mdev) :
- mlx5_fpga_ipsec_ops(mdev);
-
- if (!ipsec_ops || !ipsec_ops->init) {
- mlx5_core_dbg(mdev, "IPsec ops is not supported\n");
- return;
- }
-
- err = ipsec_ops->init(mdev);
- if (err) {
- mlx5_core_warn_once(mdev, "Failed to start IPsec device, err = %d\n", err);
- return;
- }
-
- mdev->ipsec_ops = ipsec_ops;
-}
-
-void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev)
-{
- const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
-
- if (!ipsec_ops || !ipsec_ops->cleanup)
- return;
-
- ipsec_ops->cleanup(mdev);
-}
-
-u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev)
-{
- const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
-
- if (!ipsec_ops || !ipsec_ops->device_caps)
- return 0;
-
- return ipsec_ops->device_caps(mdev);
-}
-EXPORT_SYMBOL_GPL(mlx5_accel_ipsec_device_caps);
-
-unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev)
-{
- const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
-
- if (!ipsec_ops || !ipsec_ops->counters_count)
- return -EOPNOTSUPP;
-
- return ipsec_ops->counters_count(mdev);
-}
-
-int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
- unsigned int count)
-{
- const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
-
- if (!ipsec_ops || !ipsec_ops->counters_read)
- return -EOPNOTSUPP;
-
- return ipsec_ops->counters_read(mdev, counters, count);
-}
-
-void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
- struct mlx5_accel_esp_xfrm *xfrm,
- u32 *sa_handle)
-{
- const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
- __be32 saddr[4] = {}, daddr[4] = {};
-
- if (!ipsec_ops || !ipsec_ops->create_hw_context)
- return ERR_PTR(-EOPNOTSUPP);
-
- if (!xfrm->attrs.is_ipv6) {
- saddr[3] = xfrm->attrs.saddr.a4;
- daddr[3] = xfrm->attrs.daddr.a4;
- } else {
- memcpy(saddr, xfrm->attrs.saddr.a6, sizeof(saddr));
- memcpy(daddr, xfrm->attrs.daddr.a6, sizeof(daddr));
- }
-
- return ipsec_ops->create_hw_context(mdev, xfrm, saddr, daddr, xfrm->attrs.spi,
- xfrm->attrs.is_ipv6, sa_handle);
-}
-
-void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context)
-{
- const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
-
- if (!ipsec_ops || !ipsec_ops->free_hw_context)
- return;
-
- ipsec_ops->free_hw_context(context);
-}
-
-struct mlx5_accel_esp_xfrm *
-mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev,
- const struct mlx5_accel_esp_xfrm_attrs *attrs,
- u32 flags)
-{
- const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
- struct mlx5_accel_esp_xfrm *xfrm;
-
- if (!ipsec_ops || !ipsec_ops->esp_create_xfrm)
- return ERR_PTR(-EOPNOTSUPP);
-
- xfrm = ipsec_ops->esp_create_xfrm(mdev, attrs, flags);
- if (IS_ERR(xfrm))
- return xfrm;
-
- xfrm->mdev = mdev;
- return xfrm;
-}
-EXPORT_SYMBOL_GPL(mlx5_accel_esp_create_xfrm);
-
-void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
-{
- const struct mlx5_accel_ipsec_ops *ipsec_ops = xfrm->mdev->ipsec_ops;
-
- if (!ipsec_ops || !ipsec_ops->esp_destroy_xfrm)
- return;
-
- ipsec_ops->esp_destroy_xfrm(xfrm);
-}
-EXPORT_SYMBOL_GPL(mlx5_accel_esp_destroy_xfrm);
-
-int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
- const struct mlx5_accel_esp_xfrm_attrs *attrs)
-{
- const struct mlx5_accel_ipsec_ops *ipsec_ops = xfrm->mdev->ipsec_ops;
-
- if (!ipsec_ops || !ipsec_ops->esp_modify_xfrm)
- return -EOPNOTSUPP;
-
- return ipsec_ops->esp_modify_xfrm(xfrm, attrs);
-}
-EXPORT_SYMBOL_GPL(mlx5_accel_esp_modify_xfrm);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h
deleted file mode 100644
index fbb9c5415d53..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef __MLX5_ACCEL_IPSEC_H__
-#define __MLX5_ACCEL_IPSEC_H__
-
-#include <linux/mlx5/driver.h>
-#include <linux/mlx5/accel.h>
-
-#ifdef CONFIG_MLX5_ACCEL
-
-#define MLX5_IPSEC_DEV(mdev) (mlx5_accel_ipsec_device_caps(mdev) & \
- MLX5_ACCEL_IPSEC_CAP_DEVICE)
-
-unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev);
-int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
- unsigned int count);
-
-void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
- struct mlx5_accel_esp_xfrm *xfrm,
- u32 *sa_handle);
-void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context);
-
-void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev);
-void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev);
-
-struct mlx5_accel_ipsec_ops {
- u32 (*device_caps)(struct mlx5_core_dev *mdev);
- unsigned int (*counters_count)(struct mlx5_core_dev *mdev);
- int (*counters_read)(struct mlx5_core_dev *mdev, u64 *counters, unsigned int count);
- void* (*create_hw_context)(struct mlx5_core_dev *mdev,
- struct mlx5_accel_esp_xfrm *xfrm,
- const __be32 saddr[4], const __be32 daddr[4],
- const __be32 spi, bool is_ipv6, u32 *sa_handle);
- void (*free_hw_context)(void *context);
- int (*init)(struct mlx5_core_dev *mdev);
- void (*cleanup)(struct mlx5_core_dev *mdev);
- struct mlx5_accel_esp_xfrm* (*esp_create_xfrm)(struct mlx5_core_dev *mdev,
- const struct mlx5_accel_esp_xfrm_attrs *attrs,
- u32 flags);
- int (*esp_modify_xfrm)(struct mlx5_accel_esp_xfrm *xfrm,
- const struct mlx5_accel_esp_xfrm_attrs *attrs);
- void (*esp_destroy_xfrm)(struct mlx5_accel_esp_xfrm *xfrm);
-};
-
-#else
-
-#define MLX5_IPSEC_DEV(mdev) false
-
-static inline void *
-mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
- struct mlx5_accel_esp_xfrm *xfrm,
- u32 *sa_handle)
-{
- return NULL;
-}
-
-static inline void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context) {}
-
-static inline void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev) {}
-
-static inline void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev) {}
-
-#endif /* CONFIG_MLX5_ACCEL */
-
-#endif /* __MLX5_ACCEL_IPSEC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.h
deleted file mode 100644
index 970c66d19c1d..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
-/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
-
-#ifndef __MLX5_IPSEC_OFFLOAD_H__
-#define __MLX5_IPSEC_OFFLOAD_H__
-
-#include <linux/mlx5/driver.h>
-#include "accel/ipsec.h"
-
-#ifdef CONFIG_MLX5_IPSEC
-
-const struct mlx5_accel_ipsec_ops *mlx5_ipsec_offload_ops(struct mlx5_core_dev *mdev);
-static inline bool mlx5_is_ipsec_device(struct mlx5_core_dev *mdev)
-{
- if (!MLX5_CAP_GEN(mdev, ipsec_offload))
- return false;
-
- if (!MLX5_CAP_GEN(mdev, log_max_dek))
- return false;
-
- if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
- MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC))
- return false;
-
- return MLX5_CAP_IPSEC(mdev, ipsec_crypto_offload) &&
- MLX5_CAP_ETH(mdev, insert_trailer);
-}
-
-#else
-static inline const struct mlx5_accel_ipsec_ops *
-mlx5_ipsec_offload_ops(struct mlx5_core_dev *mdev) { return NULL; }
-static inline bool mlx5_is_ipsec_device(struct mlx5_core_dev *mdev)
-{
- return false;
-}
-
-#endif /* CONFIG_MLX5_IPSEC */
-#endif /* __MLX5_IPSEC_OFFLOAD_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c
deleted file mode 100644
index 6c2b86a26863..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/mlx5/device.h>
-
-#include "accel/tls.h"
-#include "mlx5_core.h"
-#include "lib/mlx5.h"
-
-#ifdef CONFIG_MLX5_FPGA_TLS
-#include "fpga/tls.h"
-
-int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn, u32 *p_swid,
- bool direction_sx)
-{
- return mlx5_fpga_tls_add_flow(mdev, flow, crypto_info,
- start_offload_tcp_sn, p_swid,
- direction_sx);
-}
-
-void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
- bool direction_sx)
-{
- mlx5_fpga_tls_del_flow(mdev, swid, GFP_KERNEL, direction_sx);
-}
-
-int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle,
- u32 seq, __be64 rcd_sn)
-{
- return mlx5_fpga_tls_resync_rx(mdev, handle, seq, rcd_sn);
-}
-
-bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev)
-{
- return mlx5_fpga_is_tls_device(mdev) ||
- mlx5_accel_is_ktls_device(mdev);
-}
-
-u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev)
-{
- return mlx5_fpga_tls_device_caps(mdev);
-}
-
-int mlx5_accel_tls_init(struct mlx5_core_dev *mdev)
-{
- return mlx5_fpga_tls_init(mdev);
-}
-
-void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev)
-{
- mlx5_fpga_tls_cleanup(mdev);
-}
-#endif
-
-#ifdef CONFIG_MLX5_TLS
-int mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
- struct tls_crypto_info *crypto_info,
- u32 *p_key_id)
-{
- u32 sz_bytes;
- void *key;
-
- switch (crypto_info->cipher_type) {
- case TLS_CIPHER_AES_GCM_128: {
- struct tls12_crypto_info_aes_gcm_128 *info =
- (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
-
- key = info->key;
- sz_bytes = sizeof(info->key);
- break;
- }
- case TLS_CIPHER_AES_GCM_256: {
- struct tls12_crypto_info_aes_gcm_256 *info =
- (struct tls12_crypto_info_aes_gcm_256 *)crypto_info;
-
- key = info->key;
- sz_bytes = sizeof(info->key);
- break;
- }
- default:
- return -EINVAL;
- }
-
- return mlx5_create_encryption_key(mdev, key, sz_bytes,
- MLX5_ACCEL_OBJ_TLS_KEY,
- p_key_id);
-}
-
-void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id)
-{
- mlx5_destroy_encryption_key(mdev, key_id);
-}
-#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h
deleted file mode 100644
index fd874f0c380a..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef __MLX5_ACCEL_TLS_H__
-#define __MLX5_ACCEL_TLS_H__
-
-#include <linux/mlx5/driver.h>
-#include <linux/tls.h>
-
-#ifdef CONFIG_MLX5_TLS
-int mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
- struct tls_crypto_info *crypto_info,
- u32 *p_key_id);
-void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id);
-
-static inline bool mlx5_accel_is_ktls_tx(struct mlx5_core_dev *mdev)
-{
- return MLX5_CAP_GEN(mdev, tls_tx);
-}
-
-static inline bool mlx5_accel_is_ktls_rx(struct mlx5_core_dev *mdev)
-{
- return MLX5_CAP_GEN(mdev, tls_rx);
-}
-
-static inline bool mlx5_accel_is_ktls_device(struct mlx5_core_dev *mdev)
-{
- if (!mlx5_accel_is_ktls_tx(mdev) &&
- !mlx5_accel_is_ktls_rx(mdev))
- return false;
-
- if (!MLX5_CAP_GEN(mdev, log_max_dek))
- return false;
-
- return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128);
-}
-
-static inline bool mlx5e_ktls_type_check(struct mlx5_core_dev *mdev,
- struct tls_crypto_info *crypto_info)
-{
- switch (crypto_info->cipher_type) {
- case TLS_CIPHER_AES_GCM_128:
- if (crypto_info->version == TLS_1_2_VERSION)
- return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128);
- break;
- }
-
- return false;
-}
-#else
-static inline bool mlx5_accel_is_ktls_tx(struct mlx5_core_dev *mdev)
-{ return false; }
-
-static inline bool mlx5_accel_is_ktls_rx(struct mlx5_core_dev *mdev)
-{ return false; }
-
-static inline int
-mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
- struct tls_crypto_info *crypto_info,
- u32 *p_key_id) { return -ENOTSUPP; }
-static inline void
-mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id) {}
-
-static inline bool
-mlx5_accel_is_ktls_device(struct mlx5_core_dev *mdev) { return false; }
-static inline bool
-mlx5e_ktls_type_check(struct mlx5_core_dev *mdev,
- struct tls_crypto_info *crypto_info) { return false; }
-#endif
-
-enum {
- MLX5_ACCEL_TLS_TX = BIT(0),
- MLX5_ACCEL_TLS_RX = BIT(1),
- MLX5_ACCEL_TLS_V12 = BIT(2),
- MLX5_ACCEL_TLS_V13 = BIT(3),
- MLX5_ACCEL_TLS_LRO = BIT(4),
- MLX5_ACCEL_TLS_IPV6 = BIT(5),
- MLX5_ACCEL_TLS_AES_GCM128 = BIT(30),
- MLX5_ACCEL_TLS_AES_GCM256 = BIT(31),
-};
-
-struct mlx5_ifc_tls_flow_bits {
- u8 src_port[0x10];
- u8 dst_port[0x10];
- union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits src_ipv4_src_ipv6;
- union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6;
- u8 ipv6[0x1];
- u8 direction_sx[0x1];
- u8 reserved_at_2[0x1e];
-};
-
-#ifdef CONFIG_MLX5_FPGA_TLS
-int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn, u32 *p_swid,
- bool direction_sx);
-void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
- bool direction_sx);
-int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle,
- u32 seq, __be64 rcd_sn);
-bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev);
-u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev);
-int mlx5_accel_tls_init(struct mlx5_core_dev *mdev);
-void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev);
-
-#else
-
-static inline int
-mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn, u32 *p_swid,
- bool direction_sx) { return -ENOTSUPP; }
-static inline void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
- bool direction_sx) { }
-static inline int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle,
- u32 seq, __be64 rcd_sn) { return 0; }
-static inline bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev)
-{
- return mlx5_accel_is_ktls_device(mdev);
-}
-static inline u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev) { return 0; }
-static inline int mlx5_accel_tls_init(struct mlx5_core_dev *mdev) { return 0; }
-static inline void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev) { }
-#endif
-
-#endif /* __MLX5_ACCEL_TLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 8653ac0fd865..50818081bdc0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -354,7 +354,6 @@ enum {
MLX5E_RQ_STATE_AM,
MLX5E_RQ_STATE_NO_CSUM_COMPLETE,
MLX5E_RQ_STATE_CSUM_FULL, /* cqe_csum_full hw bit is set */
- MLX5E_RQ_STATE_FPGA_TLS, /* FPGA TLS enabled */
MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, /* set when mini_cqe_resp_stride_index cap is used */
MLX5E_RQ_STATE_SHAMPO, /* set when SHAMPO cap is used */
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 08fd1370a8b0..1e8700957280 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -5,8 +5,7 @@
#include "en/txrx.h"
#include "en/port.h"
#include "en_accel/en_accel.h"
-#include "accel/ipsec.h"
-#include "fpga/ipsec.h"
+#include "en_accel/ipsec_offload.h"
static bool mlx5e_rx_is_xdp(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
@@ -207,7 +206,7 @@ u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *par
bool is_mpwqe = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE);
u16 stop_room;
- stop_room = mlx5e_tls_get_stop_room(mdev, params);
+ stop_room = mlx5e_ktls_get_stop_room(mdev, params);
stop_room += mlx5e_stop_room_for_max_wqe(mdev);
if (is_mpwqe)
/* A MPWQE can take up to the maximum-sized WQE + all the normal
@@ -327,9 +326,6 @@ bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
return false;
- if (mlx5_fpga_is_ipsec_device(mdev))
- return false;
-
if (params->xdp_prog) {
/* XSK params are not considered here. If striding RQ is in use,
* and an XSK is being opened, mlx5e_rx_mpwqe_is_linear_skb will
@@ -423,9 +419,6 @@ static int mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
int max_mtu;
int i;
- if (mlx5_fpga_is_ipsec_device(mdev))
- byte_count += MLX5E_METADATA_ETHER_LEN;
-
if (mlx5e_rx_is_linear_skb(params, xsk)) {
int frag_stride;
@@ -696,8 +689,8 @@ void mlx5e_build_sq_param(struct mlx5_core_dev *mdev,
void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
bool allow_swp;
- allow_swp = mlx5_geneve_tx_allowed(mdev) ||
- !!MLX5_IPSEC_DEV(mdev);
+ allow_swp =
+ mlx5_geneve_tx_allowed(mdev) || !!mlx5_ipsec_device_caps(mdev);
mlx5e_build_sq_param_common(mdev, param);
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
MLX5_SET(sqc, sqc, allow_swp, allow_swp);
@@ -804,7 +797,7 @@ static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5_core_dev *mdev,
static u8 mlx5e_build_async_icosq_log_wq_sz(struct mlx5_core_dev *mdev)
{
- if (mlx5e_accel_is_ktls_rx(mdev))
+ if (mlx5e_is_ktls_rx(mdev))
return MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
@@ -833,7 +826,7 @@ static void mlx5e_build_async_icosq_param(struct mlx5_core_dev *mdev,
mlx5e_build_sq_param_common(mdev, param);
param->stop_room = mlx5e_stop_room_for_wqe(mdev, 1); /* for XSK NOP */
- param->is_tls = mlx5e_accel_is_ktls_rx(mdev);
+ param->is_tls = mlx5e_is_ktls_rx(mdev);
if (param->is_tls)
param->stop_room += mlx5e_stop_room_for_wqe(mdev, 1); /* for TLS RX resync NOP */
MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index 62cde3e87c2e..04c0a5e1c89a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -37,8 +37,8 @@
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include "en_accel/ipsec_rxtx.h"
-#include "en_accel/tls.h"
-#include "en_accel/tls_rxtx.h"
+#include "en_accel/ktls.h"
+#include "en_accel/ktls_txrx.h"
#include "en.h"
#include "en/txrx.h"
@@ -124,8 +124,9 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
#ifdef CONFIG_MLX5_EN_TLS
/* May send SKBs and WQEs. */
- if (mlx5e_tls_skb_offloaded(skb))
- if (unlikely(!mlx5e_tls_handle_tx_skb(dev, sq, skb, &state->tls)))
+ if (mlx5e_ktls_skb_offloaded(skb))
+ if (unlikely(!mlx5e_ktls_handle_tx_skb(dev, sq, skb,
+ &state->tls)))
return false;
#endif
@@ -174,7 +175,7 @@ static inline void mlx5e_accel_tx_finish(struct mlx5e_txqsq *sq,
struct mlx5_wqe_inline_seg *inlseg)
{
#ifdef CONFIG_MLX5_EN_TLS
- mlx5e_tls_handle_tx_wqe(&wqe->ctrl, &state->tls);
+ mlx5e_ktls_handle_tx_wqe(&wqe->ctrl, &state->tls);
#endif
#ifdef CONFIG_MLX5_EN_IPSEC
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
index 4c4ee524176c..3ae6067c7e6b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
@@ -102,7 +102,7 @@ struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_priv *priv,
break;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
- if (!sk->sk_ipv6only &&
+ if (!ipv6_only_sock(sk) &&
ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) {
accel_fs_tcp_set_ipv4_flow(spec, sk);
ft = &fs_tcp->tables[ACCEL_FS_IPV4_TCP];
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 299e3f0fcb5c..c280a18ff002 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -226,8 +226,7 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
return -EINVAL;
}
if (x->props.flags & XFRM_STATE_ESN &&
- !(mlx5_accel_ipsec_device_caps(priv->mdev) &
- MLX5_ACCEL_IPSEC_CAP_ESN)) {
+ !(mlx5_ipsec_device_caps(priv->mdev) & MLX5_ACCEL_IPSEC_CAP_ESN)) {
netdev_info(netdev, "Cannot offload ESN xfrm states\n");
return -EINVAL;
}
@@ -275,8 +274,7 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
return -EINVAL;
}
if (x->props.family == AF_INET6 &&
- !(mlx5_accel_ipsec_device_caps(priv->mdev) &
- MLX5_ACCEL_IPSEC_CAP_IPV6)) {
+ !(mlx5_ipsec_device_caps(priv->mdev) & MLX5_ACCEL_IPSEC_CAP_IPV6)) {
netdev_info(netdev, "IPv6 xfrm state offload is not supported by this device\n");
return -EINVAL;
}
@@ -286,9 +284,6 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
static int mlx5e_xfrm_fs_add_rule(struct mlx5e_priv *priv,
struct mlx5e_ipsec_sa_entry *sa_entry)
{
- if (!mlx5_is_ipsec_device(priv->mdev))
- return 0;
-
return mlx5e_accel_ipsec_fs_add_rule(priv, &sa_entry->xfrm->attrs,
sa_entry->ipsec_obj_id,
&sa_entry->ipsec_rule);
@@ -297,9 +292,6 @@ static int mlx5e_xfrm_fs_add_rule(struct mlx5e_priv *priv,
static void mlx5e_xfrm_fs_del_rule(struct mlx5e_priv *priv,
struct mlx5e_ipsec_sa_entry *sa_entry)
{
- if (!mlx5_is_ipsec_device(priv->mdev))
- return;
-
mlx5e_accel_ipsec_fs_del_rule(priv, &sa_entry->xfrm->attrs,
&sa_entry->ipsec_rule);
}
@@ -333,9 +325,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
/* create xfrm */
mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &attrs);
- sa_entry->xfrm =
- mlx5_accel_esp_create_xfrm(priv->mdev, &attrs,
- MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA);
+ sa_entry->xfrm = mlx5_accel_esp_create_xfrm(priv->mdev, &attrs);
if (IS_ERR(sa_entry->xfrm)) {
err = PTR_ERR(sa_entry->xfrm);
goto err_sa_entry;
@@ -414,7 +404,7 @@ int mlx5e_ipsec_init(struct mlx5e_priv *priv)
{
struct mlx5e_ipsec *ipsec = NULL;
- if (!MLX5_IPSEC_DEV(priv->mdev)) {
+ if (!mlx5_ipsec_device_caps(priv->mdev)) {
netdev_dbg(priv->netdev, "Not an IPSec offload device\n");
return 0;
}
@@ -425,10 +415,7 @@ int mlx5e_ipsec_init(struct mlx5e_priv *priv)
hash_init(ipsec->sadb_rx);
spin_lock_init(&ipsec->sadb_rx_lock);
- ida_init(&ipsec->halloc);
ipsec->en_priv = priv;
- ipsec->no_trailer = !!(mlx5_accel_ipsec_device_caps(priv->mdev) &
- MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER);
ipsec->wq = alloc_ordered_workqueue("mlx5e_ipsec: %s", 0,
priv->netdev->name);
if (!ipsec->wq) {
@@ -452,7 +439,6 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
mlx5e_accel_ipsec_fs_cleanup(priv);
destroy_workqueue(ipsec->wq);
- ida_destroy(&ipsec->halloc);
kfree(ipsec);
priv->ipsec = NULL;
}
@@ -531,7 +517,7 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
struct mlx5_core_dev *mdev = priv->mdev;
struct net_device *netdev = priv->netdev;
- if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_ESP) ||
+ if (!(mlx5_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_ESP) ||
!MLX5_CAP_ETH(mdev, swp)) {
mlx5_core_dbg(mdev, "mlx5e: ESP and SWP offload not supported\n");
return;
@@ -550,15 +536,13 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
netdev->features |= NETIF_F_HW_ESP_TX_CSUM;
netdev->hw_enc_features |= NETIF_F_HW_ESP_TX_CSUM;
- if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_LSO) ||
+ if (!(mlx5_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_LSO) ||
!MLX5_CAP_ETH(mdev, swp_lso)) {
mlx5_core_dbg(mdev, "mlx5e: ESP LSO not supported\n");
return;
}
- if (mlx5_is_ipsec_device(mdev))
- netdev->gso_partial_features |= NETIF_F_GSO_ESP;
-
+ netdev->gso_partial_features |= NETIF_F_GSO_ESP;
mlx5_core_dbg(mdev, "mlx5e: ESP GSO capability turned on\n");
netdev->features |= NETIF_F_GSO_ESP;
netdev->hw_features |= NETIF_F_GSO_ESP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index 6164c7f59efb..a0e9dade09e9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -40,7 +40,7 @@
#include <net/xfrm.h>
#include <linux/idr.h>
-#include "accel/ipsec.h"
+#include "ipsec_offload.h"
#define MLX5E_IPSEC_SADB_RX_BITS 10
#define MLX5E_IPSEC_ESN_SCOPE_MID 0x80000000L
@@ -55,24 +55,6 @@ struct mlx5e_ipsec_sw_stats {
atomic64_t ipsec_tx_drop_no_state;
atomic64_t ipsec_tx_drop_not_ip;
atomic64_t ipsec_tx_drop_trailer;
- atomic64_t ipsec_tx_drop_metadata;
-};
-
-struct mlx5e_ipsec_stats {
- u64 ipsec_dec_in_packets;
- u64 ipsec_dec_out_packets;
- u64 ipsec_dec_bypass_packets;
- u64 ipsec_enc_in_packets;
- u64 ipsec_enc_out_packets;
- u64 ipsec_enc_bypass_packets;
- u64 ipsec_dec_drop_packets;
- u64 ipsec_dec_auth_fail_packets;
- u64 ipsec_enc_drop_packets;
- u64 ipsec_add_sa_success;
- u64 ipsec_add_sa_fail;
- u64 ipsec_del_sa_success;
- u64 ipsec_del_sa_fail;
- u64 ipsec_cmd_drop;
};
struct mlx5e_accel_fs_esp;
@@ -81,11 +63,8 @@ struct mlx5e_ipsec_tx;
struct mlx5e_ipsec {
struct mlx5e_priv *en_priv;
DECLARE_HASHTABLE(sadb_rx, MLX5E_IPSEC_SADB_RX_BITS);
- bool no_trailer;
- spinlock_t sadb_rx_lock; /* Protects sadb_rx and halloc */
- struct ida halloc;
+ spinlock_t sadb_rx_lock; /* Protects sadb_rx */
struct mlx5e_ipsec_sw_stats sw_stats;
- struct mlx5e_ipsec_stats stats;
struct workqueue_struct *wq;
struct mlx5e_accel_fs_esp *rx_fs;
struct mlx5e_ipsec_tx *tx_fs;
@@ -116,7 +95,6 @@ struct mlx5e_ipsec_sa_entry {
struct mlx5e_ipsec_rule ipsec_rule;
};
-void mlx5e_ipsec_build_inverse_table(void);
int mlx5e_ipsec_init(struct mlx5e_priv *priv);
void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv);
void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv);
@@ -125,11 +103,6 @@ struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *dev,
unsigned int handle);
#else
-
-static inline void mlx5e_ipsec_build_inverse_table(void)
-{
-}
-
static inline int mlx5e_ipsec_init(struct mlx5e_priv *priv)
{
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index 17da23dff0ed..66b529e36ea1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -2,7 +2,7 @@
/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
#include <linux/netdevice.h>
-#include "accel/ipsec_offload.h"
+#include "ipsec_offload.h"
#include "ipsec_fs.h"
#include "fs_core.h"
@@ -700,9 +700,6 @@ int mlx5e_accel_ipsec_fs_init(struct mlx5e_priv *priv)
{
int err;
- if (!mlx5_is_ipsec_device(priv->mdev) || !priv->ipsec)
- return -EOPNOTSUPP;
-
err = fs_init_tx(priv);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h
index 3389b3bb3ef8..b70953979709 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h
@@ -6,10 +6,9 @@
#include "en.h"
#include "ipsec.h"
-#include "accel/ipsec_offload.h"
+#include "ipsec_offload.h"
#include "en/fs.h"
-#ifdef CONFIG_MLX5_EN_IPSEC
void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_priv *priv);
int mlx5e_accel_ipsec_fs_init(struct mlx5e_priv *priv);
int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv,
@@ -19,8 +18,4 @@ int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv,
void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv,
struct mlx5_accel_esp_xfrm_attrs *attrs,
struct mlx5e_ipsec_rule *ipsec_rule);
-#else
-static inline void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_priv *priv) {}
-static inline int mlx5e_accel_ipsec_fs_init(struct mlx5e_priv *priv) { return 0; }
-#endif
#endif /* __MLX5_IPSEC_STEERING_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
index d6667d38e1de..37c9880719cf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
@@ -1,14 +1,11 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
+/* Copyright (c) 2017, Mellanox Technologies inc. All rights reserved. */
#include "mlx5_core.h"
#include "ipsec_offload.h"
#include "lib/mlx5.h"
#include "en_accel/ipsec_fs.h"
-#define MLX5_IPSEC_DEV_BASIC_CAPS (MLX5_ACCEL_IPSEC_CAP_DEVICE | MLX5_ACCEL_IPSEC_CAP_IPV6 | \
- MLX5_ACCEL_IPSEC_CAP_LSO)
-
struct mlx5_ipsec_sa_ctx {
struct rhash_head hash;
u32 enc_key_id;
@@ -25,25 +22,37 @@ struct mlx5_ipsec_esp_xfrm {
struct mlx5_accel_esp_xfrm accel_xfrm;
};
-static u32 mlx5_ipsec_offload_device_caps(struct mlx5_core_dev *mdev)
+u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
{
- u32 caps = MLX5_IPSEC_DEV_BASIC_CAPS;
+ u32 caps;
+
+ if (!MLX5_CAP_GEN(mdev, ipsec_offload))
+ return 0;
+
+ if (!MLX5_CAP_GEN(mdev, log_max_dek))
+ return 0;
+
+ if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
+ MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC))
+ return 0;
- if (!mlx5_is_ipsec_device(mdev))
+ if (!MLX5_CAP_IPSEC(mdev, ipsec_crypto_offload) ||
+ !MLX5_CAP_ETH(mdev, insert_trailer))
return 0;
if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ipsec_encrypt) ||
!MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ipsec_decrypt))
return 0;
+ caps = MLX5_ACCEL_IPSEC_CAP_DEVICE | MLX5_ACCEL_IPSEC_CAP_IPV6 |
+ MLX5_ACCEL_IPSEC_CAP_LSO;
+
if (MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_encrypt) &&
MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_decrypt))
caps |= MLX5_ACCEL_IPSEC_CAP_ESP;
- if (MLX5_CAP_IPSEC(mdev, ipsec_esn)) {
+ if (MLX5_CAP_IPSEC(mdev, ipsec_esn))
caps |= MLX5_ACCEL_IPSEC_CAP_ESN;
- caps |= MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN;
- }
/* We can accommodate up to 2^24 different IPsec objects
* because we use up to 24 bit in flow table metadata
@@ -52,6 +61,7 @@ static u32 mlx5_ipsec_offload_device_caps(struct mlx5_core_dev *mdev)
WARN_ON_ONCE(MLX5_CAP_IPSEC(mdev, log_max_ipsec_offload) > 24);
return caps;
}
+EXPORT_SYMBOL_GPL(mlx5_ipsec_device_caps);
static int
mlx5_ipsec_offload_esp_validate_xfrm_attrs(struct mlx5_core_dev *mdev,
@@ -94,8 +104,7 @@ mlx5_ipsec_offload_esp_validate_xfrm_attrs(struct mlx5_core_dev *mdev,
static struct mlx5_accel_esp_xfrm *
mlx5_ipsec_offload_esp_create_xfrm(struct mlx5_core_dev *mdev,
- const struct mlx5_accel_esp_xfrm_attrs *attrs,
- u32 flags)
+ const struct mlx5_accel_esp_xfrm_attrs *attrs)
{
struct mlx5_ipsec_esp_xfrm *mxfrm;
int err = 0;
@@ -274,11 +283,6 @@ static void mlx5_ipsec_offload_delete_sa_ctx(void *context)
mutex_unlock(&mxfrm->lock);
}
-static int mlx5_ipsec_offload_init(struct mlx5_core_dev *mdev)
-{
- return 0;
-}
-
static int mlx5_modify_ipsec_obj(struct mlx5_core_dev *mdev,
struct mlx5_ipsec_obj_attrs *attrs,
u32 ipsec_id)
@@ -366,20 +370,51 @@ change_sw_xfrm_attrs:
return err;
}
-static const struct mlx5_accel_ipsec_ops ipsec_offload_ops = {
- .device_caps = mlx5_ipsec_offload_device_caps,
- .create_hw_context = mlx5_ipsec_offload_create_sa_ctx,
- .free_hw_context = mlx5_ipsec_offload_delete_sa_ctx,
- .init = mlx5_ipsec_offload_init,
- .esp_create_xfrm = mlx5_ipsec_offload_esp_create_xfrm,
- .esp_destroy_xfrm = mlx5_ipsec_offload_esp_destroy_xfrm,
- .esp_modify_xfrm = mlx5_ipsec_offload_esp_modify_xfrm,
-};
+void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
+ struct mlx5_accel_esp_xfrm *xfrm,
+ u32 *sa_handle)
+{
+ __be32 saddr[4] = {}, daddr[4] = {};
+
+ if (!xfrm->attrs.is_ipv6) {
+ saddr[3] = xfrm->attrs.saddr.a4;
+ daddr[3] = xfrm->attrs.daddr.a4;
+ } else {
+ memcpy(saddr, xfrm->attrs.saddr.a6, sizeof(saddr));
+ memcpy(daddr, xfrm->attrs.daddr.a6, sizeof(daddr));
+ }
+
+ return mlx5_ipsec_offload_create_sa_ctx(mdev, xfrm, saddr, daddr,
+ xfrm->attrs.spi,
+ xfrm->attrs.is_ipv6, sa_handle);
+}
+
+void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context)
+{
+ mlx5_ipsec_offload_delete_sa_ctx(context);
+}
-const struct mlx5_accel_ipsec_ops *mlx5_ipsec_offload_ops(struct mlx5_core_dev *mdev)
+struct mlx5_accel_esp_xfrm *
+mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev,
+ const struct mlx5_accel_esp_xfrm_attrs *attrs)
{
- if (!mlx5_ipsec_offload_device_caps(mdev))
- return NULL;
+ struct mlx5_accel_esp_xfrm *xfrm;
- return &ipsec_offload_ops;
+ xfrm = mlx5_ipsec_offload_esp_create_xfrm(mdev, attrs);
+ if (IS_ERR(xfrm))
+ return xfrm;
+
+ xfrm->mdev = mdev;
+ return xfrm;
+}
+
+void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
+{
+ mlx5_ipsec_offload_esp_destroy_xfrm(xfrm);
+}
+
+int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
+ const struct mlx5_accel_esp_xfrm_attrs *attrs)
+{
+ return mlx5_ipsec_offload_esp_modify_xfrm(xfrm, attrs);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.h
new file mode 100644
index 000000000000..7dac104e6ef1
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
+
+#ifndef __MLX5_IPSEC_OFFLOAD_H__
+#define __MLX5_IPSEC_OFFLOAD_H__
+
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/accel.h>
+
+void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
+ struct mlx5_accel_esp_xfrm *xfrm,
+ u32 *sa_handle);
+void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context);
+#endif /* __MLX5_IPSEC_OFFLOAD_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index b56fea142c24..9b65c765cbd9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -34,78 +34,16 @@
#include <crypto/aead.h>
#include <net/xfrm.h>
#include <net/esp.h>
-#include "accel/ipsec_offload.h"
+#include "ipsec_offload.h"
#include "en_accel/ipsec_rxtx.h"
#include "en_accel/ipsec.h"
-#include "accel/accel.h"
#include "en.h"
enum {
- MLX5E_IPSEC_RX_SYNDROME_DECRYPTED = 0x11,
- MLX5E_IPSEC_RX_SYNDROME_AUTH_FAILED = 0x12,
- MLX5E_IPSEC_RX_SYNDROME_BAD_PROTO = 0x17,
-};
-
-struct mlx5e_ipsec_rx_metadata {
- unsigned char nexthdr;
- __be32 sa_handle;
-} __packed;
-
-enum {
MLX5E_IPSEC_TX_SYNDROME_OFFLOAD = 0x8,
MLX5E_IPSEC_TX_SYNDROME_OFFLOAD_WITH_LSO_TCP = 0x9,
};
-struct mlx5e_ipsec_tx_metadata {
- __be16 mss_inv; /* 1/MSS in 16bit fixed point, only for LSO */
- __be16 seq; /* LSBs of the first TCP seq, only for LSO */
- u8 esp_next_proto; /* Next protocol of ESP */
-} __packed;
-
-struct mlx5e_ipsec_metadata {
- unsigned char syndrome;
- union {
- unsigned char raw[5];
- /* from FPGA to host, on successful decrypt */
- struct mlx5e_ipsec_rx_metadata rx;
- /* from host to FPGA */
- struct mlx5e_ipsec_tx_metadata tx;
- } __packed content;
- /* packet type ID field */
- __be16 ethertype;
-} __packed;
-
-#define MAX_LSO_MSS 2048
-
-/* Pre-calculated (Q0.16) fixed-point inverse 1/x function */
-static __be16 mlx5e_ipsec_inverse_table[MAX_LSO_MSS];
-
-static inline __be16 mlx5e_ipsec_mss_inv(struct sk_buff *skb)
-{
- return mlx5e_ipsec_inverse_table[skb_shinfo(skb)->gso_size];
-}
-
-static struct mlx5e_ipsec_metadata *mlx5e_ipsec_add_metadata(struct sk_buff *skb)
-{
- struct mlx5e_ipsec_metadata *mdata;
- struct ethhdr *eth;
-
- if (unlikely(skb_cow_head(skb, sizeof(*mdata))))
- return ERR_PTR(-ENOMEM);
-
- eth = (struct ethhdr *)skb_push(skb, sizeof(*mdata));
- skb->mac_header -= sizeof(*mdata);
- mdata = (struct mlx5e_ipsec_metadata *)(eth + 1);
-
- memmove(skb->data, skb->data + sizeof(*mdata),
- 2 * ETH_ALEN);
-
- eth->h_proto = cpu_to_be16(MLX5E_METADATA_ETHER_TYPE);
-
- memset(mdata->content.raw, 0, sizeof(mdata->content.raw));
- return mdata;
-}
-
static int mlx5e_ipsec_remove_trailer(struct sk_buff *skb, struct xfrm_state *x)
{
unsigned int alen = crypto_aead_authsize(x->data);
@@ -244,40 +182,6 @@ void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x,
skb_store_bits(skb, iv_offset, &seqno, 8);
}
-static void mlx5e_ipsec_set_metadata(struct sk_buff *skb,
- struct mlx5e_ipsec_metadata *mdata,
- struct xfrm_offload *xo)
-{
- struct ip_esp_hdr *esph;
- struct tcphdr *tcph;
-
- if (skb_is_gso(skb)) {
- /* Add LSO metadata indication */
- esph = ip_esp_hdr(skb);
- tcph = inner_tcp_hdr(skb);
- netdev_dbg(skb->dev, " Offloading GSO packet outer L3 %u; L4 %u; Inner L3 %u; L4 %u\n",
- skb->network_header,
- skb->transport_header,
- skb->inner_network_header,
- skb->inner_transport_header);
- netdev_dbg(skb->dev, " Offloading GSO packet of len %u; mss %u; TCP sp %u dp %u seq 0x%x ESP seq 0x%x\n",
- skb->len, skb_shinfo(skb)->gso_size,
- ntohs(tcph->source), ntohs(tcph->dest),
- ntohl(tcph->seq), ntohl(esph->seq_no));
- mdata->syndrome = MLX5E_IPSEC_TX_SYNDROME_OFFLOAD_WITH_LSO_TCP;
- mdata->content.tx.mss_inv = mlx5e_ipsec_mss_inv(skb);
- mdata->content.tx.seq = htons(ntohl(tcph->seq) & 0xFFFF);
- } else {
- mdata->syndrome = MLX5E_IPSEC_TX_SYNDROME_OFFLOAD;
- }
- mdata->content.tx.esp_next_proto = xo->proto;
-
- netdev_dbg(skb->dev, " TX metadata syndrome %u proto %u mss_inv %04x seq %04x\n",
- mdata->syndrome, mdata->content.tx.esp_next_proto,
- ntohs(mdata->content.tx.mss_inv),
- ntohs(mdata->content.tx.seq));
-}
-
void mlx5e_ipsec_handle_tx_wqe(struct mlx5e_tx_wqe *wqe,
struct mlx5e_accel_tx_ipsec_state *ipsec_st,
struct mlx5_wqe_inline_seg *inlseg)
@@ -298,16 +202,14 @@ static int mlx5e_ipsec_set_state(struct mlx5e_priv *priv,
ipsec_st->x = x;
ipsec_st->xo = xo;
- if (mlx5_is_ipsec_device(priv->mdev)) {
- aead = x->data;
- alen = crypto_aead_authsize(aead);
- blksize = ALIGN(crypto_aead_blocksize(aead), 4);
- clen = ALIGN(skb->len + 2, blksize);
- plen = max_t(u32, clen - skb->len, 4);
- tailen = plen + alen;
- ipsec_st->plen = plen;
- ipsec_st->tailen = tailen;
- }
+ aead = x->data;
+ alen = crypto_aead_authsize(aead);
+ blksize = ALIGN(crypto_aead_blocksize(aead), 4);
+ clen = ALIGN(skb->len + 2, blksize);
+ plen = max_t(u32, clen - skb->len, 4);
+ tailen = plen + alen;
+ ipsec_st->plen = plen;
+ ipsec_st->tailen = tailen;
return 0;
}
@@ -340,19 +242,17 @@ void mlx5e_ipsec_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb,
((struct iphdr *)skb_network_header(skb))->protocol :
((struct ipv6hdr *)skb_network_header(skb))->nexthdr;
- if (mlx5_is_ipsec_device(priv->mdev)) {
- eseg->flow_table_metadata |= cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC);
- eseg->trailer |= cpu_to_be32(MLX5_ETH_WQE_INSERT_TRAILER);
- encap = x->encap;
- if (!encap) {
- eseg->trailer |= (l3_proto == IPPROTO_ESP) ?
- cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_IP_ASSOC) :
- cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_L4_ASSOC);
- } else if (encap->encap_type == UDP_ENCAP_ESPINUDP) {
- eseg->trailer |= (l3_proto == IPPROTO_ESP) ?
- cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_IP_ASSOC) :
- cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_L4_ASSOC);
- }
+ eseg->flow_table_metadata |= cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC);
+ eseg->trailer |= cpu_to_be32(MLX5_ETH_WQE_INSERT_TRAILER);
+ encap = x->encap;
+ if (!encap) {
+ eseg->trailer |= (l3_proto == IPPROTO_ESP) ?
+ cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_IP_ASSOC) :
+ cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_L4_ASSOC);
+ } else if (encap->encap_type == UDP_ENCAP_ESPINUDP) {
+ eseg->trailer |= (l3_proto == IPPROTO_ESP) ?
+ cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_IP_ASSOC) :
+ cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_L4_ASSOC);
}
}
@@ -363,7 +263,6 @@ bool mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
struct mlx5e_priv *priv = netdev_priv(netdev);
struct xfrm_offload *xo = xfrm_offload(skb);
struct mlx5e_ipsec_sa_entry *sa_entry;
- struct mlx5e_ipsec_metadata *mdata;
struct xfrm_state *x;
struct sec_path *sp;
@@ -392,19 +291,8 @@ bool mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
goto drop;
}
- if (MLX5_CAP_GEN(priv->mdev, fpga)) {
- mdata = mlx5e_ipsec_add_metadata(skb);
- if (IS_ERR(mdata)) {
- atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_metadata);
- goto drop;
- }
- }
-
sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
sa_entry->set_iv_op(skb, x, xo);
- if (MLX5_CAP_GEN(priv->mdev, fpga))
- mlx5e_ipsec_set_metadata(skb, mdata, xo);
-
mlx5e_ipsec_set_state(priv, skb, x, xo, ipsec_st);
return true;
@@ -414,79 +302,6 @@ drop:
return false;
}
-static inline struct xfrm_state *
-mlx5e_ipsec_build_sp(struct net_device *netdev, struct sk_buff *skb,
- struct mlx5e_ipsec_metadata *mdata)
-{
- struct mlx5e_priv *priv = netdev_priv(netdev);
- struct xfrm_offload *xo;
- struct xfrm_state *xs;
- struct sec_path *sp;
- u32 sa_handle;
-
- sp = secpath_set(skb);
- if (unlikely(!sp)) {
- atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sp_alloc);
- return NULL;
- }
-
- sa_handle = be32_to_cpu(mdata->content.rx.sa_handle);
- xs = mlx5e_ipsec_sadb_rx_lookup(priv->ipsec, sa_handle);
- if (unlikely(!xs)) {
- atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sadb_miss);
- return NULL;
- }
-
- sp = skb_sec_path(skb);
- sp->xvec[sp->len++] = xs;
- sp->olen++;
-
- xo = xfrm_offload(skb);
- xo->flags = CRYPTO_DONE;
- switch (mdata->syndrome) {
- case MLX5E_IPSEC_RX_SYNDROME_DECRYPTED:
- xo->status = CRYPTO_SUCCESS;
- if (likely(priv->ipsec->no_trailer)) {
- xo->flags |= XFRM_ESP_NO_TRAILER;
- xo->proto = mdata->content.rx.nexthdr;
- }
- break;
- case MLX5E_IPSEC_RX_SYNDROME_AUTH_FAILED:
- xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED;
- break;
- case MLX5E_IPSEC_RX_SYNDROME_BAD_PROTO:
- xo->status = CRYPTO_INVALID_PROTOCOL;
- break;
- default:
- atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_syndrome);
- return NULL;
- }
- return xs;
-}
-
-struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
- struct sk_buff *skb, u32 *cqe_bcnt)
-{
- struct mlx5e_ipsec_metadata *mdata;
- struct xfrm_state *xs;
-
- if (!is_metadata_hdr_valid(skb))
- return skb;
-
- /* Use the metadata */
- mdata = (struct mlx5e_ipsec_metadata *)(skb->data + ETH_HLEN);
- xs = mlx5e_ipsec_build_sp(netdev, skb, mdata);
- if (unlikely(!xs)) {
- kfree_skb(skb);
- return NULL;
- }
-
- remove_metadata_hdr(skb);
- *cqe_bcnt -= MLX5E_METADATA_ETHER_LEN;
-
- return skb;
-}
-
enum {
MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED,
MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED,
@@ -528,8 +343,6 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
switch (MLX5_IPSEC_METADATA_SYNDROM(ipsec_meta_data)) {
case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED:
xo->status = CRYPTO_SUCCESS;
- if (WARN_ON_ONCE(priv->ipsec->no_trailer))
- xo->flags |= XFRM_ESP_NO_TRAILER;
break;
case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED:
xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED;
@@ -541,21 +354,3 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_syndrome);
}
}
-
-void mlx5e_ipsec_build_inverse_table(void)
-{
- u16 mss_inv;
- u32 mss;
-
- /* Calculate 1/x inverse table for use in GSO data path.
- * Using this table, we provide the IPSec accelerator with the value of
- * 1/gso_size so that it can infer the position of each segment inside
- * the GSO, and increment the ESP sequence number, and generate the IV.
- * The HW needs this value in Q0.16 fixed-point number format
- */
- mlx5e_ipsec_inverse_table[1] = htons(0xFFFF);
- for (mss = 2; mss < MAX_LSO_MSS; mss++) {
- mss_inv = div_u64(1ULL << 32, mss) >> 16;
- mlx5e_ipsec_inverse_table[mss] = htons(mss_inv);
- }
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index 428881e0adcb..0ae4e12ce528 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -53,9 +53,6 @@ struct mlx5e_accel_tx_ipsec_state {
#ifdef CONFIG_MLX5_EN_IPSEC
-struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
- struct sk_buff *skb, u32 *cqe_bcnt);
-
void mlx5e_ipsec_inverse_table_init(void);
void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
struct xfrm_offload *xo);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
index 5cb936541b9e..3aace1c2a763 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
@@ -35,27 +35,9 @@
#include <net/sock.h>
#include "en.h"
-#include "accel/ipsec.h"
+#include "ipsec_offload.h"
#include "fpga/sdk.h"
#include "en_accel/ipsec.h"
-#include "fpga/ipsec.h"
-
-static const struct counter_desc mlx5e_ipsec_hw_stats_desc[] = {
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_in_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_out_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_bypass_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_enc_in_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_enc_out_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_enc_bypass_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_drop_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_auth_fail_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_enc_drop_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_add_sa_success) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_add_sa_fail) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_del_sa_success) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_del_sa_fail) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_cmd_drop) },
-};
static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_sp_alloc) },
@@ -65,13 +47,11 @@ static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_no_state) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_not_ip) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_trailer) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_metadata) },
};
#define MLX5E_READ_CTR_ATOMIC64(ptr, dsc, i) \
atomic64_read((atomic64_t *)((char *)(ptr) + (dsc)[i].offset))
-#define NUM_IPSEC_HW_COUNTERS ARRAY_SIZE(mlx5e_ipsec_hw_stats_desc)
#define NUM_IPSEC_SW_COUNTERS ARRAY_SIZE(mlx5e_ipsec_sw_stats_desc)
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_sw)
@@ -103,45 +83,4 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_sw)
return idx;
}
-static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_hw)
-{
- return (priv->ipsec && mlx5_fpga_ipsec_device_caps(priv->mdev)) ? NUM_IPSEC_HW_COUNTERS : 0;
-}
-
-static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec_hw)
-{
- int ret = 0;
-
- if (priv->ipsec)
- ret = mlx5_accel_ipsec_counters_read(priv->mdev, (u64 *)&priv->ipsec->stats,
- NUM_IPSEC_HW_COUNTERS);
- if (ret)
- memset(&priv->ipsec->stats, 0, sizeof(priv->ipsec->stats));
-}
-
-static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ipsec_hw)
-{
- unsigned int i;
-
- if (priv->ipsec && mlx5_fpga_ipsec_device_caps(priv->mdev))
- for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- mlx5e_ipsec_hw_stats_desc[i].format);
-
- return idx;
-}
-
-static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_hw)
-{
- int i;
-
- if (priv->ipsec && mlx5_fpga_ipsec_device_caps(priv->mdev))
- for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_CPU(&priv->ipsec->stats,
- mlx5e_ipsec_hw_stats_desc,
- i);
- return idx;
-}
-
MLX5E_DEFINE_STATS_GRP(ipsec_sw, 0);
-MLX5E_DEFINE_STATS_GRP(ipsec_hw, 0);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
index d93aadbf10da..814f2a56f633 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
@@ -2,11 +2,49 @@
// Copyright (c) 2019 Mellanox Technologies.
#include "en.h"
-#include "en_accel/tls.h"
+#include "lib/mlx5.h"
#include "en_accel/ktls.h"
#include "en_accel/ktls_utils.h"
#include "en_accel/fs_tcp.h"
+int mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
+ struct tls_crypto_info *crypto_info,
+ u32 *p_key_id)
+{
+ u32 sz_bytes;
+ void *key;
+
+ switch (crypto_info->cipher_type) {
+ case TLS_CIPHER_AES_GCM_128: {
+ struct tls12_crypto_info_aes_gcm_128 *info =
+ (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
+
+ key = info->key;
+ sz_bytes = sizeof(info->key);
+ break;
+ }
+ case TLS_CIPHER_AES_GCM_256: {
+ struct tls12_crypto_info_aes_gcm_256 *info =
+ (struct tls12_crypto_info_aes_gcm_256 *)crypto_info;
+
+ key = info->key;
+ sz_bytes = sizeof(info->key);
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+
+ return mlx5_create_encryption_key(mdev, key, sz_bytes,
+ MLX5_ACCEL_OBJ_TLS_KEY,
+ p_key_id);
+}
+
+void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id)
+{
+ mlx5_destroy_encryption_key(mdev, key_id);
+}
+
static int mlx5e_ktls_add(struct net_device *netdev, struct sock *sk,
enum tls_offload_ctx_dir direction,
struct tls_crypto_info *crypto_info,
@@ -59,15 +97,15 @@ void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
- if (!mlx5e_accel_is_ktls_tx(mdev) && !mlx5e_accel_is_ktls_rx(mdev))
+ if (!mlx5e_is_ktls_tx(mdev) && !mlx5e_is_ktls_rx(mdev))
return;
- if (mlx5e_accel_is_ktls_tx(mdev)) {
+ if (mlx5e_is_ktls_tx(mdev)) {
netdev->hw_features |= NETIF_F_HW_TLS_TX;
netdev->features |= NETIF_F_HW_TLS_TX;
}
- if (mlx5e_accel_is_ktls_rx(mdev))
+ if (mlx5e_is_ktls_rx(mdev))
netdev->hw_features |= NETIF_F_HW_TLS_RX;
netdev->tlsdev_ops = &mlx5e_ktls_ops;
@@ -92,7 +130,7 @@ int mlx5e_ktls_init_rx(struct mlx5e_priv *priv)
{
int err;
- if (!mlx5e_accel_is_ktls_rx(priv->mdev))
+ if (!mlx5e_is_ktls_rx(priv->mdev))
return 0;
priv->tls->rx_wq = create_singlethread_workqueue("mlx5e_tls_rx");
@@ -112,7 +150,7 @@ int mlx5e_ktls_init_rx(struct mlx5e_priv *priv)
void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv)
{
- if (!mlx5e_accel_is_ktls_rx(priv->mdev))
+ if (!mlx5e_is_ktls_rx(priv->mdev))
return;
if (priv->netdev->features & NETIF_F_HW_TLS_RX)
@@ -120,3 +158,24 @@ void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv)
destroy_workqueue(priv->tls->rx_wq);
}
+
+int mlx5e_ktls_init(struct mlx5e_priv *priv)
+{
+ struct mlx5e_tls *tls;
+
+ if (!mlx5e_is_ktls_device(priv->mdev))
+ return 0;
+
+ tls = kzalloc(sizeof(*tls), GFP_KERNEL);
+ if (!tls)
+ return -ENOMEM;
+
+ priv->tls = tls;
+ return 0;
+}
+
+void mlx5e_ktls_cleanup(struct mlx5e_priv *priv)
+{
+ kfree(priv->tls);
+ priv->tls = NULL;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
index 5833deb2354c..d016624fbc9d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
@@ -4,9 +4,42 @@
#ifndef __MLX5E_KTLS_H__
#define __MLX5E_KTLS_H__
+#include <linux/tls.h>
+#include <net/tls.h>
#include "en.h"
#ifdef CONFIG_MLX5_EN_TLS
+int mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
+ struct tls_crypto_info *crypto_info,
+ u32 *p_key_id);
+void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id);
+
+static inline bool mlx5e_is_ktls_device(struct mlx5_core_dev *mdev)
+{
+ if (is_kdump_kernel())
+ return false;
+
+ if (!MLX5_CAP_GEN(mdev, tls_tx) && !MLX5_CAP_GEN(mdev, tls_rx))
+ return false;
+
+ if (!MLX5_CAP_GEN(mdev, log_max_dek))
+ return false;
+
+ return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128);
+}
+
+static inline bool mlx5e_ktls_type_check(struct mlx5_core_dev *mdev,
+ struct tls_crypto_info *crypto_info)
+{
+ switch (crypto_info->cipher_type) {
+ case TLS_CIPHER_AES_GCM_128:
+ if (crypto_info->version == TLS_1_2_VERSION)
+ return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128);
+ break;
+ }
+
+ return false;
+}
void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv);
int mlx5e_ktls_init_rx(struct mlx5e_priv *priv);
@@ -16,26 +49,36 @@ struct mlx5e_ktls_resync_resp *
mlx5e_ktls_rx_resync_create_resp_list(void);
void mlx5e_ktls_rx_resync_destroy_resp_list(struct mlx5e_ktls_resync_resp *resp_list);
-static inline bool mlx5e_accel_is_ktls_tx(struct mlx5_core_dev *mdev)
+static inline bool mlx5e_is_ktls_tx(struct mlx5_core_dev *mdev)
{
- return !is_kdump_kernel() &&
- mlx5_accel_is_ktls_tx(mdev);
+ return !is_kdump_kernel() && MLX5_CAP_GEN(mdev, tls_tx);
}
-static inline bool mlx5e_accel_is_ktls_rx(struct mlx5_core_dev *mdev)
+static inline bool mlx5e_is_ktls_rx(struct mlx5_core_dev *mdev)
{
- return !is_kdump_kernel() &&
- mlx5_accel_is_ktls_rx(mdev);
+ return !is_kdump_kernel() && MLX5_CAP_GEN(mdev, tls_rx);
}
-static inline bool mlx5e_accel_is_ktls_device(struct mlx5_core_dev *mdev)
-{
- return !is_kdump_kernel() &&
- mlx5_accel_is_ktls_device(mdev);
-}
+struct mlx5e_tls_sw_stats {
+ atomic64_t tx_tls_ctx;
+ atomic64_t tx_tls_del;
+ atomic64_t rx_tls_ctx;
+ atomic64_t rx_tls_del;
+};
-#else
+struct mlx5e_tls {
+ struct mlx5e_tls_sw_stats sw_stats;
+ struct workqueue_struct *rx_wq;
+};
+int mlx5e_ktls_init(struct mlx5e_priv *priv);
+void mlx5e_ktls_cleanup(struct mlx5e_priv *priv);
+
+int mlx5e_ktls_get_count(struct mlx5e_priv *priv);
+int mlx5e_ktls_get_strings(struct mlx5e_priv *priv, uint8_t *data);
+int mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 *data);
+
+#else
static inline void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
{
}
@@ -64,10 +107,23 @@ mlx5e_ktls_rx_resync_create_resp_list(void)
static inline void
mlx5e_ktls_rx_resync_destroy_resp_list(struct mlx5e_ktls_resync_resp *resp_list) {}
-static inline bool mlx5e_accel_is_ktls_tx(struct mlx5_core_dev *mdev) { return false; }
-static inline bool mlx5e_accel_is_ktls_rx(struct mlx5_core_dev *mdev) { return false; }
-static inline bool mlx5e_accel_is_ktls_device(struct mlx5_core_dev *mdev) { return false; }
+static inline bool mlx5e_is_ktls_rx(struct mlx5_core_dev *mdev)
+{
+ return false;
+}
+
+static inline int mlx5e_ktls_init(struct mlx5e_priv *priv) { return 0; }
+static inline void mlx5e_ktls_cleanup(struct mlx5e_priv *priv) { }
+static inline int mlx5e_ktls_get_count(struct mlx5e_priv *priv) { return 0; }
+static inline int mlx5e_ktls_get_strings(struct mlx5e_priv *priv, uint8_t *data)
+{
+ return 0;
+}
+static inline int mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 *data)
+{
+ return 0;
+}
#endif
#endif /* __MLX5E_TLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
index 96064a2033f7..0bb0633b7542 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
@@ -3,7 +3,7 @@
#include <net/inet6_hashtables.h>
#include "en_accel/en_accel.h"
-#include "en_accel/tls.h"
+#include "en_accel/ktls.h"
#include "en_accel/ktls_txrx.h"
#include "en_accel/ktls_utils.h"
#include "en_accel/fs_tcp.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c
index 56e7b2aee85f..2ab46c4247ff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c
@@ -36,14 +36,7 @@
#include "en.h"
#include "fpga/sdk.h"
-#include "en_accel/tls.h"
-
-static const struct counter_desc mlx5e_tls_sw_stats_desc[] = {
- { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_metadata) },
- { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_resync_alloc) },
- { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_no_sync_data) },
- { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_bypass_required) },
-};
+#include "en_accel/ktls.h"
static const struct counter_desc mlx5e_ktls_sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_ctx) },
@@ -55,51 +48,43 @@ static const struct counter_desc mlx5e_ktls_sw_stats_desc[] = {
#define MLX5E_READ_CTR_ATOMIC64(ptr, dsc, i) \
atomic64_read((atomic64_t *)((char *)(ptr) + (dsc)[i].offset))
-static const struct counter_desc *get_tls_atomic_stats(struct mlx5e_priv *priv)
-{
- if (!priv->tls)
- return NULL;
- if (mlx5e_accel_is_ktls_device(priv->mdev))
- return mlx5e_ktls_sw_stats_desc;
- return mlx5e_tls_sw_stats_desc;
-}
-
-int mlx5e_tls_get_count(struct mlx5e_priv *priv)
+int mlx5e_ktls_get_count(struct mlx5e_priv *priv)
{
if (!priv->tls)
return 0;
- if (mlx5e_accel_is_ktls_device(priv->mdev))
- return ARRAY_SIZE(mlx5e_ktls_sw_stats_desc);
- return ARRAY_SIZE(mlx5e_tls_sw_stats_desc);
+
+ return ARRAY_SIZE(mlx5e_ktls_sw_stats_desc);
}
-int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data)
+int mlx5e_ktls_get_strings(struct mlx5e_priv *priv, uint8_t *data)
{
- const struct counter_desc *stats_desc;
unsigned int i, n, idx = 0;
- stats_desc = get_tls_atomic_stats(priv);
- n = mlx5e_tls_get_count(priv);
+ if (!priv->tls)
+ return 0;
+
+ n = mlx5e_ktls_get_count(priv);
for (i = 0; i < n; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
- stats_desc[i].format);
+ mlx5e_ktls_sw_stats_desc[i].format);
return n;
}
-int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data)
+int mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 *data)
{
- const struct counter_desc *stats_desc;
unsigned int i, n, idx = 0;
- stats_desc = get_tls_atomic_stats(priv);
- n = mlx5e_tls_get_count(priv);
+ if (!priv->tls)
+ return 0;
+
+ n = mlx5e_ktls_get_count(priv);
for (i = 0; i < n; i++)
- data[idx++] =
- MLX5E_READ_CTR_ATOMIC64(&priv->tls->sw_stats,
- stats_desc, i);
+ data[idx++] = MLX5E_READ_CTR_ATOMIC64(&priv->tls->sw_stats,
+ mlx5e_ktls_sw_stats_desc,
+ i);
return n;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index aaf11c66bf4c..4b6f0d1ea59a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2019 Mellanox Technologies.
-#include "en_accel/tls.h"
+#include "en_accel/ktls.h"
#include "en_accel/ktls_txrx.h"
#include "en_accel/ktls_utils.h"
@@ -27,7 +27,7 @@ u16 mlx5e_ktls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *pa
{
u16 num_dumps, stop_room = 0;
- if (!mlx5e_accel_is_ktls_tx(mdev))
+ if (!mlx5e_is_ktls_tx(mdev))
return 0;
num_dumps = mlx5e_ktls_dumps_num_wqes(params, MAX_SKB_FRAGS, TLS_MAX_PAYLOAD_SIZE);
@@ -448,14 +448,26 @@ err_out:
return MLX5E_KTLS_SYNC_FAIL;
}
-bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *sq,
- struct sk_buff *skb, int datalen,
+bool mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
+ struct sk_buff *skb,
struct mlx5e_accel_tx_tls_state *state)
{
struct mlx5e_ktls_offload_context_tx *priv_tx;
struct mlx5e_sq_stats *stats = sq->stats;
+ struct tls_context *tls_ctx;
+ int datalen;
u32 seq;
+ datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ if (!datalen)
+ return true;
+
+ mlx5e_tx_mpwqe_ensure_complete(sq);
+
+ tls_ctx = tls_get_ctx(skb->sk);
+ if (WARN_ON_ONCE(tls_ctx->netdev != netdev))
+ goto err_out;
+
priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx))) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
index 08c9d5134479..2dd78dd4ad65 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
@@ -16,8 +16,8 @@ struct mlx5e_accel_tx_tls_state {
u16 mlx5e_ktls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
-bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *sq,
- struct sk_buff *skb, int datalen,
+bool mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
+ struct sk_buff *skb,
struct mlx5e_accel_tx_tls_state *state);
void mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb,
struct mlx5_cqe64 *cqe, u32 *cqe_bcnt);
@@ -48,6 +48,18 @@ mlx5e_ktls_rx_pending_resync_list(struct mlx5e_channel *c, int budget)
{
return budget && test_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &c->async_icosq.state);
}
+
+static inline bool mlx5e_ktls_skb_offloaded(struct sk_buff *skb)
+{
+ return skb->sk && tls_is_sk_tx_device_offloaded(skb->sk);
+}
+
+static inline void
+mlx5e_ktls_handle_tx_wqe(struct mlx5_wqe_ctrl_seg *cseg,
+ struct mlx5e_accel_tx_tls_state *state)
+{
+ cseg->tis_tir_num = cpu_to_be32(state->tls_tisn << 8);
+}
#else
static inline bool
mlx5e_ktls_tx_try_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
@@ -69,6 +81,18 @@ mlx5e_ktls_rx_pending_resync_list(struct mlx5e_channel *c, int budget)
return false;
}
+static inline u16 mlx5e_ktls_get_stop_room(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
+{
+ return 0;
+}
+
+static inline void mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq,
+ struct sk_buff *skb,
+ struct mlx5_cqe64 *cqe,
+ u32 *cqe_bcnt)
+{
+}
#endif /* CONFIG_MLX5_EN_TLS */
#endif /* __MLX5E_TLS_TXRX_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h
index e5c180f2403b..0dc715c4c10d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h
@@ -6,7 +6,6 @@
#include <net/tls.h>
#include "en.h"
-#include "accel/tls.h"
enum {
MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD = 0,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
deleted file mode 100644
index b8fc863aa68d..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
+++ /dev/null
@@ -1,247 +0,0 @@
-/*
- * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/netdevice.h>
-#include <net/ipv6.h>
-#include "en_accel/tls.h"
-#include "accel/tls.h"
-
-static void mlx5e_tls_set_ipv4_flow(void *flow, struct sock *sk)
-{
- struct inet_sock *inet = inet_sk(sk);
-
- MLX5_SET(tls_flow, flow, ipv6, 0);
- memcpy(MLX5_ADDR_OF(tls_flow, flow, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
- &inet->inet_daddr, MLX5_FLD_SZ_BYTES(ipv4_layout, ipv4));
- memcpy(MLX5_ADDR_OF(tls_flow, flow, src_ipv4_src_ipv6.ipv4_layout.ipv4),
- &inet->inet_rcv_saddr, MLX5_FLD_SZ_BYTES(ipv4_layout, ipv4));
-}
-
-#if IS_ENABLED(CONFIG_IPV6)
-static void mlx5e_tls_set_ipv6_flow(void *flow, struct sock *sk)
-{
- struct ipv6_pinfo *np = inet6_sk(sk);
-
- MLX5_SET(tls_flow, flow, ipv6, 1);
- memcpy(MLX5_ADDR_OF(tls_flow, flow, dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
- &sk->sk_v6_daddr, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
- memcpy(MLX5_ADDR_OF(tls_flow, flow, src_ipv4_src_ipv6.ipv6_layout.ipv6),
- &np->saddr, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
-}
-#endif
-
-static void mlx5e_tls_set_flow_tcp_ports(void *flow, struct sock *sk)
-{
- struct inet_sock *inet = inet_sk(sk);
-
- memcpy(MLX5_ADDR_OF(tls_flow, flow, src_port), &inet->inet_sport,
- MLX5_FLD_SZ_BYTES(tls_flow, src_port));
- memcpy(MLX5_ADDR_OF(tls_flow, flow, dst_port), &inet->inet_dport,
- MLX5_FLD_SZ_BYTES(tls_flow, dst_port));
-}
-
-static int mlx5e_tls_set_flow(void *flow, struct sock *sk, u32 caps)
-{
- switch (sk->sk_family) {
- case AF_INET:
- mlx5e_tls_set_ipv4_flow(flow, sk);
- break;
-#if IS_ENABLED(CONFIG_IPV6)
- case AF_INET6:
- if (!sk->sk_ipv6only &&
- ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) {
- mlx5e_tls_set_ipv4_flow(flow, sk);
- break;
- }
- if (!(caps & MLX5_ACCEL_TLS_IPV6))
- goto error_out;
-
- mlx5e_tls_set_ipv6_flow(flow, sk);
- break;
-#endif
- default:
- goto error_out;
- }
-
- mlx5e_tls_set_flow_tcp_ports(flow, sk);
- return 0;
-error_out:
- return -EINVAL;
-}
-
-static int mlx5e_tls_add(struct net_device *netdev, struct sock *sk,
- enum tls_offload_ctx_dir direction,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn)
-{
- struct mlx5e_priv *priv = netdev_priv(netdev);
- struct tls_context *tls_ctx = tls_get_ctx(sk);
- struct mlx5_core_dev *mdev = priv->mdev;
- u32 caps = mlx5_accel_tls_device_caps(mdev);
- int ret = -ENOMEM;
- void *flow;
- u32 swid;
-
- flow = kzalloc(MLX5_ST_SZ_BYTES(tls_flow), GFP_KERNEL);
- if (!flow)
- return ret;
-
- ret = mlx5e_tls_set_flow(flow, sk, caps);
- if (ret)
- goto free_flow;
-
- ret = mlx5_accel_tls_add_flow(mdev, flow, crypto_info,
- start_offload_tcp_sn, &swid,
- direction == TLS_OFFLOAD_CTX_DIR_TX);
- if (ret < 0)
- goto free_flow;
-
- if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
- struct mlx5e_tls_offload_context_tx *tx_ctx =
- mlx5e_get_tls_tx_context(tls_ctx);
-
- tx_ctx->swid = htonl(swid);
- tx_ctx->expected_seq = start_offload_tcp_sn;
- } else {
- struct mlx5e_tls_offload_context_rx *rx_ctx =
- mlx5e_get_tls_rx_context(tls_ctx);
-
- rx_ctx->handle = htonl(swid);
- }
-
- return 0;
-free_flow:
- kfree(flow);
- return ret;
-}
-
-static void mlx5e_tls_del(struct net_device *netdev,
- struct tls_context *tls_ctx,
- enum tls_offload_ctx_dir direction)
-{
- struct mlx5e_priv *priv = netdev_priv(netdev);
- unsigned int handle;
-
- handle = ntohl((direction == TLS_OFFLOAD_CTX_DIR_TX) ?
- mlx5e_get_tls_tx_context(tls_ctx)->swid :
- mlx5e_get_tls_rx_context(tls_ctx)->handle);
-
- mlx5_accel_tls_del_flow(priv->mdev, handle,
- direction == TLS_OFFLOAD_CTX_DIR_TX);
-}
-
-static int mlx5e_tls_resync(struct net_device *netdev, struct sock *sk,
- u32 seq, u8 *rcd_sn_data,
- enum tls_offload_ctx_dir direction)
-{
- struct tls_context *tls_ctx = tls_get_ctx(sk);
- struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5e_tls_offload_context_rx *rx_ctx;
- __be64 rcd_sn = *(__be64 *)rcd_sn_data;
-
- if (WARN_ON_ONCE(direction != TLS_OFFLOAD_CTX_DIR_RX))
- return -EINVAL;
- rx_ctx = mlx5e_get_tls_rx_context(tls_ctx);
-
- netdev_info(netdev, "resyncing seq %d rcd %lld\n", seq,
- be64_to_cpu(rcd_sn));
- mlx5_accel_tls_resync_rx(priv->mdev, rx_ctx->handle, seq, rcd_sn);
- atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_reply);
-
- return 0;
-}
-
-static const struct tlsdev_ops mlx5e_tls_ops = {
- .tls_dev_add = mlx5e_tls_add,
- .tls_dev_del = mlx5e_tls_del,
- .tls_dev_resync = mlx5e_tls_resync,
-};
-
-void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
-{
- struct net_device *netdev = priv->netdev;
- u32 caps;
-
- if (mlx5e_accel_is_ktls_device(priv->mdev)) {
- mlx5e_ktls_build_netdev(priv);
- return;
- }
-
- /* FPGA */
- if (!mlx5e_accel_is_tls_device(priv->mdev))
- return;
-
- caps = mlx5_accel_tls_device_caps(priv->mdev);
- if (caps & MLX5_ACCEL_TLS_TX) {
- netdev->features |= NETIF_F_HW_TLS_TX;
- netdev->hw_features |= NETIF_F_HW_TLS_TX;
- }
-
- if (caps & MLX5_ACCEL_TLS_RX) {
- netdev->features |= NETIF_F_HW_TLS_RX;
- netdev->hw_features |= NETIF_F_HW_TLS_RX;
- }
-
- if (!(caps & MLX5_ACCEL_TLS_LRO)) {
- netdev->features &= ~NETIF_F_LRO;
- netdev->hw_features &= ~NETIF_F_LRO;
- }
-
- netdev->tlsdev_ops = &mlx5e_tls_ops;
-}
-
-int mlx5e_tls_init(struct mlx5e_priv *priv)
-{
- struct mlx5e_tls *tls;
-
- if (!mlx5e_accel_is_tls_device(priv->mdev))
- return 0;
-
- tls = kzalloc(sizeof(*tls), GFP_KERNEL);
- if (!tls)
- return -ENOMEM;
-
- priv->tls = tls;
- return 0;
-}
-
-void mlx5e_tls_cleanup(struct mlx5e_priv *priv)
-{
- struct mlx5e_tls *tls = priv->tls;
-
- if (!tls)
- return;
-
- kfree(tls);
- priv->tls = NULL;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
deleted file mode 100644
index 62ecf14bf86a..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-#ifndef __MLX5E_TLS_H__
-#define __MLX5E_TLS_H__
-
-#include "accel/tls.h"
-#include "en_accel/ktls.h"
-
-#ifdef CONFIG_MLX5_EN_TLS
-#include <net/tls.h>
-#include "en.h"
-
-struct mlx5e_tls_sw_stats {
- atomic64_t tx_tls_ctx;
- atomic64_t tx_tls_del;
- atomic64_t tx_tls_drop_metadata;
- atomic64_t tx_tls_drop_resync_alloc;
- atomic64_t tx_tls_drop_no_sync_data;
- atomic64_t tx_tls_drop_bypass_required;
- atomic64_t rx_tls_ctx;
- atomic64_t rx_tls_del;
- atomic64_t rx_tls_drop_resync_request;
- atomic64_t rx_tls_resync_request;
- atomic64_t rx_tls_resync_reply;
- atomic64_t rx_tls_auth_fail;
-};
-
-struct mlx5e_tls {
- struct mlx5e_tls_sw_stats sw_stats;
- struct workqueue_struct *rx_wq;
-};
-
-struct mlx5e_tls_offload_context_tx {
- struct tls_offload_context_tx base;
- u32 expected_seq;
- __be32 swid;
-};
-
-static inline struct mlx5e_tls_offload_context_tx *
-mlx5e_get_tls_tx_context(struct tls_context *tls_ctx)
-{
- BUILD_BUG_ON(sizeof(struct mlx5e_tls_offload_context_tx) >
- TLS_OFFLOAD_CONTEXT_SIZE_TX);
- return container_of(tls_offload_ctx_tx(tls_ctx),
- struct mlx5e_tls_offload_context_tx,
- base);
-}
-
-struct mlx5e_tls_offload_context_rx {
- struct tls_offload_context_rx base;
- __be32 handle;
-};
-
-static inline struct mlx5e_tls_offload_context_rx *
-mlx5e_get_tls_rx_context(struct tls_context *tls_ctx)
-{
- BUILD_BUG_ON(sizeof(struct mlx5e_tls_offload_context_rx) >
- TLS_OFFLOAD_CONTEXT_SIZE_RX);
- return container_of(tls_offload_ctx_rx(tls_ctx),
- struct mlx5e_tls_offload_context_rx,
- base);
-}
-
-static inline bool mlx5e_is_tls_on(struct mlx5e_priv *priv)
-{
- return priv->tls;
-}
-
-void mlx5e_tls_build_netdev(struct mlx5e_priv *priv);
-int mlx5e_tls_init(struct mlx5e_priv *priv);
-void mlx5e_tls_cleanup(struct mlx5e_priv *priv);
-
-int mlx5e_tls_get_count(struct mlx5e_priv *priv);
-int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data);
-int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data);
-
-static inline bool mlx5e_accel_is_tls_device(struct mlx5_core_dev *mdev)
-{
- return !is_kdump_kernel() &&
- mlx5_accel_is_tls_device(mdev);
-}
-
-#else
-
-static inline void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
-{
- if (!is_kdump_kernel() &&
- mlx5_accel_is_ktls_device(priv->mdev))
- mlx5e_ktls_build_netdev(priv);
-}
-
-static inline bool mlx5e_is_tls_on(struct mlx5e_priv *priv) { return false; }
-static inline int mlx5e_tls_init(struct mlx5e_priv *priv) { return 0; }
-static inline void mlx5e_tls_cleanup(struct mlx5e_priv *priv) { }
-static inline int mlx5e_tls_get_count(struct mlx5e_priv *priv) { return 0; }
-static inline int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data) { return 0; }
-static inline int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data) { return 0; }
-static inline bool mlx5e_accel_is_tls_device(struct mlx5_core_dev *mdev) { return false; }
-
-#endif
-
-#endif /* __MLX5E_TLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
deleted file mode 100644
index a05580cea481..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
+++ /dev/null
@@ -1,390 +0,0 @@
-/*
- * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include "en_accel/tls.h"
-#include "en_accel/tls_rxtx.h"
-#include "accel/accel.h"
-
-#include <net/inet6_hashtables.h>
-#include <linux/ipv6.h>
-
-#define SYNDROM_DECRYPTED 0x30
-#define SYNDROM_RESYNC_REQUEST 0x31
-#define SYNDROM_AUTH_FAILED 0x32
-
-#define SYNDROME_OFFLOAD_REQUIRED 32
-#define SYNDROME_SYNC 33
-
-struct sync_info {
- u64 rcd_sn;
- s32 sync_len;
- int nr_frags;
- skb_frag_t frags[MAX_SKB_FRAGS];
-};
-
-struct recv_metadata_content {
- u8 syndrome;
- u8 reserved;
- __be32 sync_seq;
-} __packed;
-
-struct send_metadata_content {
- /* One byte of syndrome followed by 3 bytes of swid */
- __be32 syndrome_swid;
- __be16 first_seq;
-} __packed;
-
-struct mlx5e_tls_metadata {
- union {
- /* from fpga to host */
- struct recv_metadata_content recv;
- /* from host to fpga */
- struct send_metadata_content send;
- unsigned char raw[6];
- } __packed content;
- /* packet type ID field */
- __be16 ethertype;
-} __packed;
-
-static int mlx5e_tls_add_metadata(struct sk_buff *skb, __be32 swid)
-{
- struct mlx5e_tls_metadata *pet;
- struct ethhdr *eth;
-
- if (skb_cow_head(skb, sizeof(struct mlx5e_tls_metadata)))
- return -ENOMEM;
-
- eth = (struct ethhdr *)skb_push(skb, sizeof(struct mlx5e_tls_metadata));
- skb->mac_header -= sizeof(struct mlx5e_tls_metadata);
- pet = (struct mlx5e_tls_metadata *)(eth + 1);
-
- memmove(skb->data, skb->data + sizeof(struct mlx5e_tls_metadata),
- 2 * ETH_ALEN);
-
- eth->h_proto = cpu_to_be16(MLX5E_METADATA_ETHER_TYPE);
- pet->content.send.syndrome_swid =
- htonl(SYNDROME_OFFLOAD_REQUIRED << 24) | swid;
-
- return 0;
-}
-
-static int mlx5e_tls_get_sync_data(struct mlx5e_tls_offload_context_tx *context,
- u32 tcp_seq, struct sync_info *info)
-{
- int remaining, i = 0, ret = -EINVAL;
- struct tls_record_info *record;
- unsigned long flags;
- s32 sync_size;
-
- spin_lock_irqsave(&context->base.lock, flags);
- record = tls_get_record(&context->base, tcp_seq, &info->rcd_sn);
-
- if (unlikely(!record))
- goto out;
-
- sync_size = tcp_seq - tls_record_start_seq(record);
- info->sync_len = sync_size;
- if (unlikely(sync_size < 0)) {
- if (tls_record_is_start_marker(record))
- goto done;
-
- goto out;
- }
-
- remaining = sync_size;
- while (remaining > 0) {
- info->frags[i] = record->frags[i];
- __skb_frag_ref(&info->frags[i]);
- remaining -= skb_frag_size(&info->frags[i]);
-
- if (remaining < 0)
- skb_frag_size_add(&info->frags[i], remaining);
-
- i++;
- }
- info->nr_frags = i;
-done:
- ret = 0;
-out:
- spin_unlock_irqrestore(&context->base.lock, flags);
- return ret;
-}
-
-static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb,
- struct sk_buff *nskb, u32 tcp_seq,
- int headln, __be64 rcd_sn)
-{
- struct mlx5e_tls_metadata *pet;
- u8 syndrome = SYNDROME_SYNC;
- struct iphdr *iph;
- struct tcphdr *th;
- int data_len, mss;
-
- nskb->dev = skb->dev;
- skb_reset_mac_header(nskb);
- skb_set_network_header(nskb, skb_network_offset(skb));
- skb_set_transport_header(nskb, skb_transport_offset(skb));
- memcpy(nskb->data, skb->data, headln);
- memcpy(nskb->data + headln, &rcd_sn, sizeof(rcd_sn));
-
- iph = ip_hdr(nskb);
- iph->tot_len = htons(nskb->len - skb_network_offset(nskb));
- th = tcp_hdr(nskb);
- data_len = nskb->len - headln;
- tcp_seq -= data_len;
- th->seq = htonl(tcp_seq);
-
- mss = nskb->dev->mtu - (headln - skb_network_offset(nskb));
- skb_shinfo(nskb)->gso_size = 0;
- if (data_len > mss) {
- skb_shinfo(nskb)->gso_size = mss;
- skb_shinfo(nskb)->gso_segs = DIV_ROUND_UP(data_len, mss);
- }
- skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type;
-
- pet = (struct mlx5e_tls_metadata *)(nskb->data + sizeof(struct ethhdr));
- memcpy(pet, &syndrome, sizeof(syndrome));
- pet->content.send.first_seq = htons(tcp_seq);
-
- /* MLX5 devices don't care about the checksum partial start, offset
- * and pseudo header
- */
- nskb->ip_summed = CHECKSUM_PARTIAL;
-
- nskb->queue_mapping = skb->queue_mapping;
-}
-
-static bool mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
- struct mlx5e_txqsq *sq, struct sk_buff *skb,
- struct mlx5e_tls *tls)
-{
- u32 tcp_seq = ntohl(tcp_hdr(skb)->seq);
- struct sync_info info;
- struct sk_buff *nskb;
- int linear_len = 0;
- int headln;
- int i;
-
- sq->stats->tls_ooo++;
-
- if (mlx5e_tls_get_sync_data(context, tcp_seq, &info)) {
- /* We might get here if a retransmission reaches the driver
- * after the relevant record is acked.
- * It should be safe to drop the packet in this case
- */
- atomic64_inc(&tls->sw_stats.tx_tls_drop_no_sync_data);
- goto err_out;
- }
-
- if (unlikely(info.sync_len < 0)) {
- u32 payload;
-
- headln = skb_transport_offset(skb) + tcp_hdrlen(skb);
- payload = skb->len - headln;
- if (likely(payload <= -info.sync_len))
- /* SKB payload doesn't require offload
- */
- return true;
-
- atomic64_inc(&tls->sw_stats.tx_tls_drop_bypass_required);
- goto err_out;
- }
-
- if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) {
- atomic64_inc(&tls->sw_stats.tx_tls_drop_metadata);
- goto err_out;
- }
-
- headln = skb_transport_offset(skb) + tcp_hdrlen(skb);
- linear_len += headln + sizeof(info.rcd_sn);
- nskb = alloc_skb(linear_len, GFP_ATOMIC);
- if (unlikely(!nskb)) {
- atomic64_inc(&tls->sw_stats.tx_tls_drop_resync_alloc);
- goto err_out;
- }
-
- context->expected_seq = tcp_seq + skb->len - headln;
- skb_put(nskb, linear_len);
- for (i = 0; i < info.nr_frags; i++)
- skb_shinfo(nskb)->frags[i] = info.frags[i];
-
- skb_shinfo(nskb)->nr_frags = info.nr_frags;
- nskb->data_len = info.sync_len;
- nskb->len += info.sync_len;
- sq->stats->tls_resync_bytes += nskb->len;
- mlx5e_tls_complete_sync_skb(skb, nskb, tcp_seq, headln,
- cpu_to_be64(info.rcd_sn));
- mlx5e_sq_xmit_simple(sq, nskb, true);
-
- return true;
-
-err_out:
- dev_kfree_skb_any(skb);
- return false;
-}
-
-bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
- struct sk_buff *skb, struct mlx5e_accel_tx_tls_state *state)
-{
- struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5e_tls_offload_context_tx *context;
- struct tls_context *tls_ctx;
- u32 expected_seq;
- int datalen;
- u32 skb_seq;
-
- datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
- if (!datalen)
- return true;
-
- mlx5e_tx_mpwqe_ensure_complete(sq);
-
- tls_ctx = tls_get_ctx(skb->sk);
- if (WARN_ON_ONCE(tls_ctx->netdev != netdev))
- goto err_out;
-
- if (mlx5e_accel_is_ktls_tx(sq->mdev))
- return mlx5e_ktls_handle_tx_skb(tls_ctx, sq, skb, datalen, state);
-
- /* FPGA */
- skb_seq = ntohl(tcp_hdr(skb)->seq);
- context = mlx5e_get_tls_tx_context(tls_ctx);
- expected_seq = context->expected_seq;
-
- if (unlikely(expected_seq != skb_seq))
- return mlx5e_tls_handle_ooo(context, sq, skb, priv->tls);
-
- if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) {
- atomic64_inc(&priv->tls->sw_stats.tx_tls_drop_metadata);
- dev_kfree_skb_any(skb);
- return false;
- }
-
- context->expected_seq = skb_seq + datalen;
- return true;
-
-err_out:
- dev_kfree_skb_any(skb);
- return false;
-}
-
-static int tls_update_resync_sn(struct net_device *netdev,
- struct sk_buff *skb,
- struct mlx5e_tls_metadata *mdata)
-{
- struct sock *sk = NULL;
- struct iphdr *iph;
- struct tcphdr *th;
- __be32 seq;
-
- if (mdata->ethertype != htons(ETH_P_IP))
- return -EINVAL;
-
- iph = (struct iphdr *)(mdata + 1);
-
- th = ((void *)iph) + iph->ihl * 4;
-
- if (iph->version == 4) {
- sk = inet_lookup_established(dev_net(netdev), &tcp_hashinfo,
- iph->saddr, th->source, iph->daddr,
- th->dest, netdev->ifindex);
-#if IS_ENABLED(CONFIG_IPV6)
- } else {
- struct ipv6hdr *ipv6h = (struct ipv6hdr *)iph;
-
- sk = __inet6_lookup_established(dev_net(netdev), &tcp_hashinfo,
- &ipv6h->saddr, th->source,
- &ipv6h->daddr, ntohs(th->dest),
- netdev->ifindex, 0);
-#endif
- }
- if (!sk || sk->sk_state == TCP_TIME_WAIT) {
- struct mlx5e_priv *priv = netdev_priv(netdev);
-
- atomic64_inc(&priv->tls->sw_stats.rx_tls_drop_resync_request);
- goto out;
- }
-
- skb->sk = sk;
- skb->destructor = sock_edemux;
-
- memcpy(&seq, &mdata->content.recv.sync_seq, sizeof(seq));
- tls_offload_rx_resync_request(sk, seq);
-out:
- return 0;
-}
-
-/* FPGA tls rx handler */
-void mlx5e_tls_handle_rx_skb_metadata(struct mlx5e_rq *rq, struct sk_buff *skb,
- u32 *cqe_bcnt)
-{
- struct mlx5e_tls_metadata *mdata;
- struct mlx5e_priv *priv;
-
- /* Use the metadata */
- mdata = (struct mlx5e_tls_metadata *)(skb->data + ETH_HLEN);
- switch (mdata->content.recv.syndrome) {
- case SYNDROM_DECRYPTED:
- skb->decrypted = 1;
- break;
- case SYNDROM_RESYNC_REQUEST:
- tls_update_resync_sn(rq->netdev, skb, mdata);
- priv = netdev_priv(rq->netdev);
- atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_request);
- break;
- case SYNDROM_AUTH_FAILED:
- /* Authentication failure will be observed and verified by kTLS */
- priv = netdev_priv(rq->netdev);
- atomic64_inc(&priv->tls->sw_stats.rx_tls_auth_fail);
- break;
- default:
- /* Bypass the metadata header to others */
- return;
- }
-
- remove_metadata_hdr(skb);
- *cqe_bcnt -= MLX5E_METADATA_ETHER_LEN;
-}
-
-u16 mlx5e_tls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
-{
- if (!mlx5e_accel_is_tls_device(mdev))
- return 0;
-
- if (mlx5e_accel_is_ktls_device(mdev))
- return mlx5e_ktls_get_stop_room(mdev, params);
-
- /* FPGA */
- /* Resync SKB. */
- return mlx5e_stop_room_for_max_wqe(mdev);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
deleted file mode 100644
index 0ca0a023fb8d..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef __MLX5E_TLS_RXTX_H__
-#define __MLX5E_TLS_RXTX_H__
-
-#include "accel/accel.h"
-#include "en_accel/ktls_txrx.h"
-
-#ifdef CONFIG_MLX5_EN_TLS
-
-#include <linux/skbuff.h>
-#include "en.h"
-#include "en/txrx.h"
-
-u16 mlx5e_tls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
-
-bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
- struct sk_buff *skb, struct mlx5e_accel_tx_tls_state *state);
-
-static inline bool mlx5e_tls_skb_offloaded(struct sk_buff *skb)
-{
- return skb->sk && tls_is_sk_tx_device_offloaded(skb->sk);
-}
-
-static inline void
-mlx5e_tls_handle_tx_wqe(struct mlx5_wqe_ctrl_seg *cseg,
- struct mlx5e_accel_tx_tls_state *state)
-{
- cseg->tis_tir_num = cpu_to_be32(state->tls_tisn << 8);
-}
-
-void mlx5e_tls_handle_rx_skb_metadata(struct mlx5e_rq *rq, struct sk_buff *skb,
- u32 *cqe_bcnt);
-
-static inline void
-mlx5e_tls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb,
- struct mlx5_cqe64 *cqe, u32 *cqe_bcnt)
-{
- if (unlikely(get_cqe_tls_offload(cqe))) /* cqe bit indicates a TLS device */
- return mlx5e_ktls_handle_rx_skb(rq, skb, cqe, cqe_bcnt);
-
- if (unlikely(test_bit(MLX5E_RQ_STATE_FPGA_TLS, &rq->state) && is_metadata_hdr_valid(skb)))
- return mlx5e_tls_handle_rx_skb_metadata(rq, skb, cqe_bcnt);
-}
-
-#else
-
-static inline bool
-mlx5e_accel_is_tls(struct mlx5_cqe64 *cqe, struct sk_buff *skb) { return false; }
-static inline void
-mlx5e_tls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb,
- struct mlx5_cqe64 *cqe, u32 *cqe_bcnt) {}
-static inline u16 mlx5e_tls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
-{
- return 0;
-}
-
-#endif /* CONFIG_MLX5_EN_TLS */
-
-#endif /* __MLX5E_TLS_RXTX_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 2f1dedc721d1..12b72a0bcb1a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -47,9 +47,8 @@
#include "en_rep.h"
#include "en_accel/ipsec.h"
#include "en_accel/en_accel.h"
-#include "en_accel/tls.h"
-#include "accel/ipsec.h"
-#include "accel/tls.h"
+#include "en_accel/ktls.h"
+#include "en_accel/ipsec_offload.h"
#include "lib/vxlan.h"
#include "lib/clock.h"
#include "en/port.h"
@@ -68,7 +67,6 @@
#include "en/ptp.h"
#include "qos.h"
#include "en/trap.h"
-#include "fpga/ipsec.h"
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
{
@@ -1036,9 +1034,6 @@ int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param,
if (err)
goto err_destroy_rq;
- if (mlx5e_is_tls_on(rq->priv) && !mlx5e_accel_is_ktls_device(mdev))
- __set_bit(MLX5E_RQ_STATE_FPGA_TLS, &rq->state); /* must be FPGA */
-
if (MLX5_CAP_ETH(mdev, cqe_checksum_full))
__set_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state);
@@ -1334,7 +1329,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
- if (MLX5_IPSEC_DEV(c->priv->mdev))
+ if (mlx5_ipsec_device_caps(c->priv->mdev))
set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
if (param->is_mpw)
set_bit(MLX5E_SQ_STATE_MPWQE, &sq->state);
@@ -4471,12 +4466,6 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
return -EINVAL;
}
- if (mlx5_fpga_is_ipsec_device(priv->mdev)) {
- netdev_warn(netdev,
- "XDP is not available on Innova cards with IPsec support\n");
- return -EINVAL;
- }
-
new_params = priv->channels.params;
new_params.xdp_prog = prog;
@@ -4934,7 +4923,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
mlx5e_set_netdev_dev_addr(netdev);
mlx5e_ipsec_build_netdev(priv);
- mlx5e_tls_build_netdev(priv);
+ mlx5e_ktls_build_netdev(priv);
}
void mlx5e_create_q_counters(struct mlx5e_priv *priv)
@@ -4996,7 +4985,7 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
if (err)
mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err);
- err = mlx5e_tls_init(priv);
+ err = mlx5e_ktls_init(priv);
if (err)
mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
@@ -5007,7 +4996,7 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
{
mlx5e_health_destroy_reporters(priv);
- mlx5e_tls_cleanup(priv);
+ mlx5e_ktls_cleanup(priv);
mlx5e_ipsec_cleanup(priv);
mlx5e_fs_cleanup(priv);
}
@@ -5704,7 +5693,6 @@ int mlx5e_init(void)
{
int ret;
- mlx5e_ipsec_build_inverse_table();
mlx5e_build_ptys2ethtool_map();
ret = auxiliary_driver_register(&mlx5e_driver);
if (ret)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 6b7e7ea6ded2..47f7b4c034cc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -1112,7 +1112,6 @@ static mlx5e_stats_grp_t mlx5e_ul_rep_stats_grps[] = {
&MLX5E_STATS_GRP(per_port_buff_congest),
#ifdef CONFIG_MLX5_EN_IPSEC
&MLX5E_STATS_GRP(ipsec_sw),
- &MLX5E_STATS_GRP(ipsec_hw),
#endif
&MLX5E_STATS_GRP(ptp),
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 56bb58704bf9..a5f6fd16b665 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -48,10 +48,9 @@
#include "en_rep.h"
#include "en/rep/tc.h"
#include "ipoib/ipoib.h"
-#include "accel/ipsec.h"
-#include "fpga/ipsec.h"
+#include "en_accel/ipsec_offload.h"
#include "en_accel/ipsec_rxtx.h"
-#include "en_accel/tls_rxtx.h"
+#include "en_accel/ktls_txrx.h"
#include "en/xdp.h"
#include "en/xsk/rx.h"
#include "en/health.h"
@@ -1416,7 +1415,8 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
skb->mac_len = ETH_HLEN;
- mlx5e_tls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt);
+ if (unlikely(get_cqe_tls_offload(cqe)))
+ mlx5e_ktls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt);
if (unlikely(mlx5_ipsec_is_rx_flow(cqe)))
mlx5e_ipsec_offload_handle_rx_skb(netdev, skb, cqe);
@@ -2383,46 +2383,6 @@ const struct mlx5e_rx_handlers mlx5i_rx_handlers = {
};
#endif /* CONFIG_MLX5_CORE_IPOIB */
-#ifdef CONFIG_MLX5_EN_IPSEC
-
-static void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
-{
- struct mlx5_wq_cyc *wq = &rq->wqe.wq;
- struct mlx5e_wqe_frag_info *wi;
- struct sk_buff *skb;
- u32 cqe_bcnt;
- u16 ci;
-
- ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
- wi = get_frag(rq, ci);
- cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
-
- if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
- rq->stats->wqe_err++;
- goto wq_free_wqe;
- }
-
- skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
- mlx5e_skb_from_cqe_linear,
- mlx5e_skb_from_cqe_nonlinear,
- rq, cqe, wi, cqe_bcnt);
- if (unlikely(!skb)) /* a DROP, save the page-reuse checks */
- goto wq_free_wqe;
-
- skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb, &cqe_bcnt);
- if (unlikely(!skb))
- goto wq_free_wqe;
-
- mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
- napi_gro_receive(rq->cq.napi, skb);
-
-wq_free_wqe:
- mlx5e_free_rx_wqe(rq, wi, true);
- mlx5_wq_cyc_pop(wq);
-}
-
-#endif /* CONFIG_MLX5_EN_IPSEC */
-
int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk)
{
struct net_device *netdev = rq->netdev;
@@ -2439,10 +2399,6 @@ int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool
rq->post_wqes = mlx5e_post_rx_mpwqes;
rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
- if (mlx5_fpga_is_ipsec_device(mdev)) {
- netdev_err(netdev, "MPWQE RQ with Innova IPSec offload not supported\n");
- return -EINVAL;
- }
if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) {
rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe_shampo;
if (!rq->handle_rx_cqe) {
@@ -2466,14 +2422,7 @@ int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool
mlx5e_skb_from_cqe_nonlinear;
rq->post_wqes = mlx5e_post_rx_wqes;
rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
-
-#ifdef CONFIG_MLX5_EN_IPSEC
- if ((mlx5_fpga_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) &&
- priv->ipsec)
- rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe;
- else
-#endif
- rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe;
+ rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe;
if (!rq->handle_rx_cqe) {
netdev_err(netdev, "RX handler of RQ is not set\n");
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index bdc870f9c2f3..57fa0489eeb8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -32,7 +32,7 @@
#include "lib/mlx5.h"
#include "en.h"
-#include "en_accel/tls.h"
+#include "en_accel/ktls.h"
#include "en_accel/en_accel.h"
#include "en/ptp.h"
#include "en/port.h"
@@ -1900,17 +1900,17 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme) { return; }
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls)
{
- return mlx5e_tls_get_count(priv);
+ return mlx5e_ktls_get_count(priv);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls)
{
- return idx + mlx5e_tls_get_strings(priv, data + idx * ETH_GSTRING_LEN);
+ return idx + mlx5e_ktls_get_strings(priv, data + idx * ETH_GSTRING_LEN);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls)
{
- return idx + mlx5e_tls_get_stats(priv, data + idx);
+ return idx + mlx5e_ktls_get_stats(priv, data + idx);
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls) { return; }
@@ -2443,7 +2443,6 @@ mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
&MLX5E_STATS_GRP(pme),
#ifdef CONFIG_MLX5_EN_IPSEC
&MLX5E_STATS_GRP(ipsec_sw),
- &MLX5E_STATS_GRP(ipsec_hw),
#endif
&MLX5E_STATS_GRP(tls),
&MLX5E_STATS_GRP(channels),
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index a7a025d15c14..e48b15b55b6f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -482,7 +482,6 @@ extern MLX5E_DECLARE_STATS_GRP(per_prio);
extern MLX5E_DECLARE_STATS_GRP(pme);
extern MLX5E_DECLARE_STATS_GRP(channels);
extern MLX5E_DECLARE_STATS_GRP(per_port_buff_congest);
-extern MLX5E_DECLARE_STATS_GRP(ipsec_hw);
extern MLX5E_DECLARE_STATS_GRP(ipsec_sw);
extern MLX5E_DECLARE_STATS_GRP(ptp);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h
index 2a984e82ae16..750c32050165 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h
@@ -57,9 +57,6 @@ struct mlx5_fpga_device {
u32 mkey;
struct mlx5_uars_page *uar;
} conn_res;
-
- struct mlx5_fpga_ipsec *ipsec;
- struct mlx5_fpga_tls *tls;
};
#define mlx5_fpga_dbg(__adev, format, ...) \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
deleted file mode 100644
index 8ec148010d62..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
+++ /dev/null
@@ -1,1582 +0,0 @@
-/*
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/rhashtable.h>
-#include <linux/mlx5/driver.h>
-#include <linux/mlx5/fs_helpers.h>
-#include <linux/mlx5/fs.h>
-#include <linux/rbtree.h>
-
-#include "mlx5_core.h"
-#include "fs_cmd.h"
-#include "fpga/ipsec.h"
-#include "fpga/sdk.h"
-#include "fpga/core.h"
-
-enum mlx5_fpga_ipsec_cmd_status {
- MLX5_FPGA_IPSEC_CMD_PENDING,
- MLX5_FPGA_IPSEC_CMD_SEND_FAIL,
- MLX5_FPGA_IPSEC_CMD_COMPLETE,
-};
-
-struct mlx5_fpga_ipsec_cmd_context {
- struct mlx5_fpga_dma_buf buf;
- enum mlx5_fpga_ipsec_cmd_status status;
- struct mlx5_ifc_fpga_ipsec_cmd_resp resp;
- int status_code;
- struct completion complete;
- struct mlx5_fpga_device *dev;
- struct list_head list; /* Item in pending_cmds */
- u8 command[];
-};
-
-struct mlx5_fpga_esp_xfrm;
-
-struct mlx5_fpga_ipsec_sa_ctx {
- struct rhash_head hash;
- struct mlx5_ifc_fpga_ipsec_sa hw_sa;
- u32 sa_handle;
- struct mlx5_core_dev *dev;
- struct mlx5_fpga_esp_xfrm *fpga_xfrm;
-};
-
-struct mlx5_fpga_esp_xfrm {
- unsigned int num_rules;
- struct mlx5_fpga_ipsec_sa_ctx *sa_ctx;
- struct mutex lock; /* xfrm lock */
- struct mlx5_accel_esp_xfrm accel_xfrm;
-};
-
-struct mlx5_fpga_ipsec_rule {
- struct rb_node node;
- struct fs_fte *fte;
- struct mlx5_fpga_ipsec_sa_ctx *ctx;
-};
-
-static const struct rhashtable_params rhash_sa = {
- /* Keep out "cmd" field from the key as it's
- * value is not constant during the lifetime
- * of the key object.
- */
- .key_len = sizeof_field(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) -
- sizeof_field(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd),
- .key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) +
- sizeof_field(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd),
- .head_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hash),
- .automatic_shrinking = true,
- .min_size = 1,
-};
-
-struct mlx5_fpga_ipsec {
- struct mlx5_fpga_device *fdev;
- struct list_head pending_cmds;
- spinlock_t pending_cmds_lock; /* Protects pending_cmds */
- u32 caps[MLX5_ST_SZ_DW(ipsec_extended_cap)];
- struct mlx5_fpga_conn *conn;
-
- struct notifier_block fs_notifier_ingress_bypass;
- struct notifier_block fs_notifier_egress;
-
- /* Map hardware SA --> SA context
- * (mlx5_fpga_ipsec_sa) (mlx5_fpga_ipsec_sa_ctx)
- * We will use this hash to avoid SAs duplication in fpga which
- * aren't allowed
- */
- struct rhashtable sa_hash; /* hw_sa -> mlx5_fpga_ipsec_sa_ctx */
- struct mutex sa_hash_lock;
-
- /* Tree holding all rules for this fpga device
- * Key for searching a rule (mlx5_fpga_ipsec_rule) is (ft, id)
- */
- struct rb_root rules_rb;
- struct mutex rules_rb_lock; /* rules lock */
-
- struct ida halloc;
-};
-
-bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev)
-{
- if (!mdev->fpga || !MLX5_CAP_GEN(mdev, fpga))
- return false;
-
- if (MLX5_CAP_FPGA(mdev, ieee_vendor_id) !=
- MLX5_FPGA_CAP_SANDBOX_VENDOR_ID_MLNX)
- return false;
-
- if (MLX5_CAP_FPGA(mdev, sandbox_product_id) !=
- MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_IPSEC)
- return false;
-
- return true;
-}
-
-static void mlx5_fpga_ipsec_send_complete(struct mlx5_fpga_conn *conn,
- struct mlx5_fpga_device *fdev,
- struct mlx5_fpga_dma_buf *buf,
- u8 status)
-{
- struct mlx5_fpga_ipsec_cmd_context *context;
-
- if (status) {
- context = container_of(buf, struct mlx5_fpga_ipsec_cmd_context,
- buf);
- mlx5_fpga_warn(fdev, "IPSec command send failed with status %u\n",
- status);
- context->status = MLX5_FPGA_IPSEC_CMD_SEND_FAIL;
- complete(&context->complete);
- }
-}
-
-static inline
-int syndrome_to_errno(enum mlx5_ifc_fpga_ipsec_response_syndrome syndrome)
-{
- switch (syndrome) {
- case MLX5_FPGA_IPSEC_RESPONSE_SUCCESS:
- return 0;
- case MLX5_FPGA_IPSEC_RESPONSE_SADB_ISSUE:
- return -EEXIST;
- case MLX5_FPGA_IPSEC_RESPONSE_ILLEGAL_REQUEST:
- return -EINVAL;
- case MLX5_FPGA_IPSEC_RESPONSE_WRITE_RESPONSE_ISSUE:
- return -EIO;
- }
- return -EIO;
-}
-
-static void mlx5_fpga_ipsec_recv(void *cb_arg, struct mlx5_fpga_dma_buf *buf)
-{
- struct mlx5_ifc_fpga_ipsec_cmd_resp *resp = buf->sg[0].data;
- struct mlx5_fpga_ipsec_cmd_context *context;
- enum mlx5_ifc_fpga_ipsec_response_syndrome syndrome;
- struct mlx5_fpga_device *fdev = cb_arg;
- unsigned long flags;
-
- if (buf->sg[0].size < sizeof(*resp)) {
- mlx5_fpga_warn(fdev, "Short receive from FPGA IPSec: %u < %zu bytes\n",
- buf->sg[0].size, sizeof(*resp));
- return;
- }
-
- mlx5_fpga_dbg(fdev, "mlx5_ipsec recv_cb syndrome %08x\n",
- ntohl(resp->syndrome));
-
- spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
- context = list_first_entry_or_null(&fdev->ipsec->pending_cmds,
- struct mlx5_fpga_ipsec_cmd_context,
- list);
- if (context)
- list_del(&context->list);
- spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags);
-
- if (!context) {
- mlx5_fpga_warn(fdev, "Received IPSec offload response without pending command request\n");
- return;
- }
- mlx5_fpga_dbg(fdev, "Handling response for %p\n", context);
-
- syndrome = ntohl(resp->syndrome);
- context->status_code = syndrome_to_errno(syndrome);
- context->status = MLX5_FPGA_IPSEC_CMD_COMPLETE;
- memcpy(&context->resp, resp, sizeof(*resp));
-
- if (context->status_code)
- mlx5_fpga_warn(fdev, "IPSec command failed with syndrome %08x\n",
- syndrome);
-
- complete(&context->complete);
-}
-
-static void *mlx5_fpga_ipsec_cmd_exec(struct mlx5_core_dev *mdev,
- const void *cmd, int cmd_size)
-{
- struct mlx5_fpga_ipsec_cmd_context *context;
- struct mlx5_fpga_device *fdev = mdev->fpga;
- unsigned long flags;
- int res;
-
- if (!fdev || !fdev->ipsec)
- return ERR_PTR(-EOPNOTSUPP);
-
- if (cmd_size & 3)
- return ERR_PTR(-EINVAL);
-
- context = kzalloc(sizeof(*context) + cmd_size, GFP_ATOMIC);
- if (!context)
- return ERR_PTR(-ENOMEM);
-
- context->status = MLX5_FPGA_IPSEC_CMD_PENDING;
- context->dev = fdev;
- context->buf.complete = mlx5_fpga_ipsec_send_complete;
- init_completion(&context->complete);
- memcpy(&context->command, cmd, cmd_size);
- context->buf.sg[0].size = cmd_size;
- context->buf.sg[0].data = &context->command;
-
- spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
- res = mlx5_fpga_sbu_conn_sendmsg(fdev->ipsec->conn, &context->buf);
- if (!res)
- list_add_tail(&context->list, &fdev->ipsec->pending_cmds);
- spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags);
-
- if (res) {
- mlx5_fpga_warn(fdev, "Failed to send IPSec command: %d\n", res);
- kfree(context);
- return ERR_PTR(res);
- }
-
- /* Context should be freed by the caller after completion. */
- return context;
-}
-
-static int mlx5_fpga_ipsec_cmd_wait(void *ctx)
-{
- struct mlx5_fpga_ipsec_cmd_context *context = ctx;
- unsigned long timeout =
- msecs_to_jiffies(MLX5_FPGA_CMD_TIMEOUT_MSEC);
- int res;
-
- res = wait_for_completion_timeout(&context->complete, timeout);
- if (!res) {
- mlx5_fpga_warn(context->dev, "Failure waiting for IPSec command response\n");
- return -ETIMEDOUT;
- }
-
- if (context->status == MLX5_FPGA_IPSEC_CMD_COMPLETE)
- res = context->status_code;
- else
- res = -EIO;
-
- return res;
-}
-
-static inline bool is_v2_sadb_supported(struct mlx5_fpga_ipsec *fipsec)
-{
- if (MLX5_GET(ipsec_extended_cap, fipsec->caps, v2_command))
- return true;
- return false;
-}
-
-static int mlx5_fpga_ipsec_update_hw_sa(struct mlx5_fpga_device *fdev,
- struct mlx5_ifc_fpga_ipsec_sa *hw_sa,
- int opcode)
-{
- struct mlx5_core_dev *dev = fdev->mdev;
- struct mlx5_ifc_fpga_ipsec_sa *sa;
- struct mlx5_fpga_ipsec_cmd_context *cmd_context;
- size_t sa_cmd_size;
- int err;
-
- hw_sa->ipsec_sa_v1.cmd = htonl(opcode);
- if (is_v2_sadb_supported(fdev->ipsec))
- sa_cmd_size = sizeof(*hw_sa);
- else
- sa_cmd_size = sizeof(hw_sa->ipsec_sa_v1);
-
- cmd_context = (struct mlx5_fpga_ipsec_cmd_context *)
- mlx5_fpga_ipsec_cmd_exec(dev, hw_sa, sa_cmd_size);
- if (IS_ERR(cmd_context))
- return PTR_ERR(cmd_context);
-
- err = mlx5_fpga_ipsec_cmd_wait(cmd_context);
- if (err)
- goto out;
-
- sa = (struct mlx5_ifc_fpga_ipsec_sa *)&cmd_context->command;
- if (sa->ipsec_sa_v1.sw_sa_handle != cmd_context->resp.sw_sa_handle) {
- mlx5_fpga_err(fdev, "mismatch SA handle. cmd 0x%08x vs resp 0x%08x\n",
- ntohl(sa->ipsec_sa_v1.sw_sa_handle),
- ntohl(cmd_context->resp.sw_sa_handle));
- err = -EIO;
- }
-
-out:
- kfree(cmd_context);
- return err;
-}
-
-u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev)
-{
- struct mlx5_fpga_device *fdev = mdev->fpga;
- u32 ret = 0;
-
- if (mlx5_fpga_is_ipsec_device(mdev)) {
- ret |= MLX5_ACCEL_IPSEC_CAP_DEVICE;
- ret |= MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA;
- } else {
- return ret;
- }
-
- if (!fdev->ipsec)
- return ret;
-
- if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, esp))
- ret |= MLX5_ACCEL_IPSEC_CAP_ESP;
-
- if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, ipv6))
- ret |= MLX5_ACCEL_IPSEC_CAP_IPV6;
-
- if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, lso))
- ret |= MLX5_ACCEL_IPSEC_CAP_LSO;
-
- if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, rx_no_trailer))
- ret |= MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER;
-
- if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, esn)) {
- ret |= MLX5_ACCEL_IPSEC_CAP_ESN;
- ret |= MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN;
- }
-
- return ret;
-}
-
-static unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev)
-{
- struct mlx5_fpga_device *fdev = mdev->fpga;
-
- if (!fdev || !fdev->ipsec)
- return 0;
-
- return MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps,
- number_of_ipsec_counters);
-}
-
-static int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
- unsigned int counters_count)
-{
- struct mlx5_fpga_device *fdev = mdev->fpga;
- unsigned int i;
- __be32 *data;
- u32 count;
- u64 addr;
- int ret;
-
- if (!fdev || !fdev->ipsec)
- return 0;
-
- addr = (u64)MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps,
- ipsec_counters_addr_low) +
- ((u64)MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps,
- ipsec_counters_addr_high) << 32);
-
- count = mlx5_fpga_ipsec_counters_count(mdev);
-
- data = kzalloc(array3_size(sizeof(*data), count, 2), GFP_KERNEL);
- if (!data) {
- ret = -ENOMEM;
- goto out;
- }
-
- ret = mlx5_fpga_mem_read(fdev, count * sizeof(u64), addr, data,
- MLX5_FPGA_ACCESS_TYPE_DONTCARE);
- if (ret < 0) {
- mlx5_fpga_err(fdev, "Failed to read IPSec counters from HW: %d\n",
- ret);
- goto out;
- }
- ret = 0;
-
- if (count > counters_count)
- count = counters_count;
-
- /* Each counter is low word, then high. But each word is big-endian */
- for (i = 0; i < count; i++)
- counters[i] = (u64)ntohl(data[i * 2]) |
- ((u64)ntohl(data[i * 2 + 1]) << 32);
-
-out:
- kfree(data);
- return ret;
-}
-
-static int mlx5_fpga_ipsec_set_caps(struct mlx5_core_dev *mdev, u32 flags)
-{
- struct mlx5_fpga_ipsec_cmd_context *context;
- struct mlx5_ifc_fpga_ipsec_cmd_cap cmd = {0};
- int err;
-
- cmd.cmd = htonl(MLX5_FPGA_IPSEC_CMD_OP_SET_CAP);
- cmd.flags = htonl(flags);
- context = mlx5_fpga_ipsec_cmd_exec(mdev, &cmd, sizeof(cmd));
- if (IS_ERR(context))
- return PTR_ERR(context);
-
- err = mlx5_fpga_ipsec_cmd_wait(context);
- if (err)
- goto out;
-
- if ((context->resp.flags & cmd.flags) != cmd.flags) {
- mlx5_fpga_err(context->dev, "Failed to set capabilities. cmd 0x%08x vs resp 0x%08x\n",
- cmd.flags,
- context->resp.flags);
- err = -EIO;
- }
-
-out:
- kfree(context);
- return err;
-}
-
-static int mlx5_fpga_ipsec_enable_supported_caps(struct mlx5_core_dev *mdev)
-{
- u32 dev_caps = mlx5_fpga_ipsec_device_caps(mdev);
- u32 flags = 0;
-
- if (dev_caps & MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER)
- flags |= MLX5_FPGA_IPSEC_CAP_NO_TRAILER;
-
- return mlx5_fpga_ipsec_set_caps(mdev, flags);
-}
-
-static void
-mlx5_fpga_ipsec_build_hw_xfrm(struct mlx5_core_dev *mdev,
- const struct mlx5_accel_esp_xfrm_attrs *xfrm_attrs,
- struct mlx5_ifc_fpga_ipsec_sa *hw_sa)
-{
- const struct aes_gcm_keymat *aes_gcm = &xfrm_attrs->keymat.aes_gcm;
-
- /* key */
- memcpy(&hw_sa->ipsec_sa_v1.key_enc, aes_gcm->aes_key,
- aes_gcm->key_len / 8);
- /* Duplicate 128 bit key twice according to HW layout */
- if (aes_gcm->key_len == 128)
- memcpy(&hw_sa->ipsec_sa_v1.key_enc[16],
- aes_gcm->aes_key, aes_gcm->key_len / 8);
-
- /* salt and seq_iv */
- memcpy(&hw_sa->ipsec_sa_v1.gcm.salt_iv, &aes_gcm->seq_iv,
- sizeof(aes_gcm->seq_iv));
- memcpy(&hw_sa->ipsec_sa_v1.gcm.salt, &aes_gcm->salt,
- sizeof(aes_gcm->salt));
-
- /* esn */
- if (xfrm_attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) {
- hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_ESN_EN;
- hw_sa->ipsec_sa_v1.flags |=
- (xfrm_attrs->flags &
- MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) ?
- MLX5_FPGA_IPSEC_SA_ESN_OVERLAP : 0;
- hw_sa->esn = htonl(xfrm_attrs->esn);
- } else {
- hw_sa->ipsec_sa_v1.flags &= ~MLX5_FPGA_IPSEC_SA_ESN_EN;
- hw_sa->ipsec_sa_v1.flags &=
- ~(xfrm_attrs->flags &
- MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) ?
- MLX5_FPGA_IPSEC_SA_ESN_OVERLAP : 0;
- hw_sa->esn = 0;
- }
-
- /* rx handle */
- hw_sa->ipsec_sa_v1.sw_sa_handle = htonl(xfrm_attrs->sa_handle);
-
- /* enc mode */
- switch (aes_gcm->key_len) {
- case 128:
- hw_sa->ipsec_sa_v1.enc_mode =
- MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_128_AUTH_128;
- break;
- case 256:
- hw_sa->ipsec_sa_v1.enc_mode =
- MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_256_AUTH_128;
- break;
- }
-
- /* flags */
- hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_SA_VALID |
- MLX5_FPGA_IPSEC_SA_SPI_EN |
- MLX5_FPGA_IPSEC_SA_IP_ESP;
-
- if (xfrm_attrs->action & MLX5_ACCEL_ESP_ACTION_ENCRYPT)
- hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_DIR_SX;
- else
- hw_sa->ipsec_sa_v1.flags &= ~MLX5_FPGA_IPSEC_SA_DIR_SX;
-}
-
-static void
-mlx5_fpga_ipsec_build_hw_sa(struct mlx5_core_dev *mdev,
- struct mlx5_accel_esp_xfrm_attrs *xfrm_attrs,
- const __be32 saddr[4],
- const __be32 daddr[4],
- const __be32 spi, bool is_ipv6,
- struct mlx5_ifc_fpga_ipsec_sa *hw_sa)
-{
- mlx5_fpga_ipsec_build_hw_xfrm(mdev, xfrm_attrs, hw_sa);
-
- /* IPs */
- memcpy(hw_sa->ipsec_sa_v1.sip, saddr, sizeof(hw_sa->ipsec_sa_v1.sip));
- memcpy(hw_sa->ipsec_sa_v1.dip, daddr, sizeof(hw_sa->ipsec_sa_v1.dip));
-
- /* SPI */
- hw_sa->ipsec_sa_v1.spi = spi;
-
- /* flags */
- if (is_ipv6)
- hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_IPV6;
-}
-
-static bool is_full_mask(const void *p, size_t len)
-{
- WARN_ON(len % 4);
-
- return !memchr_inv(p, 0xff, len);
-}
-
-static bool validate_fpga_full_mask(struct mlx5_core_dev *dev,
- const u32 *match_c,
- const u32 *match_v)
-{
- const void *misc_params_c = MLX5_ADDR_OF(fte_match_param,
- match_c,
- misc_parameters);
- const void *headers_c = MLX5_ADDR_OF(fte_match_param,
- match_c,
- outer_headers);
- const void *headers_v = MLX5_ADDR_OF(fte_match_param,
- match_v,
- outer_headers);
-
- if (mlx5_fs_is_outer_ipv4_flow(dev, headers_c, headers_v)) {
- const void *s_ipv4_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
- headers_c,
- src_ipv4_src_ipv6.ipv4_layout.ipv4);
- const void *d_ipv4_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
- headers_c,
- dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
-
- if (!is_full_mask(s_ipv4_c, MLX5_FLD_SZ_BYTES(ipv4_layout,
- ipv4)) ||
- !is_full_mask(d_ipv4_c, MLX5_FLD_SZ_BYTES(ipv4_layout,
- ipv4)))
- return false;
- } else {
- const void *s_ipv6_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
- headers_c,
- src_ipv4_src_ipv6.ipv6_layout.ipv6);
- const void *d_ipv6_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
- headers_c,
- dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
-
- if (!is_full_mask(s_ipv6_c, MLX5_FLD_SZ_BYTES(ipv6_layout,
- ipv6)) ||
- !is_full_mask(d_ipv6_c, MLX5_FLD_SZ_BYTES(ipv6_layout,
- ipv6)))
- return false;
- }
-
- if (!is_full_mask(MLX5_ADDR_OF(fte_match_set_misc, misc_params_c,
- outer_esp_spi),
- MLX5_FLD_SZ_BYTES(fte_match_set_misc, outer_esp_spi)))
- return false;
-
- return true;
-}
-
-static bool mlx5_is_fpga_ipsec_rule(struct mlx5_core_dev *dev,
- u8 match_criteria_enable,
- const u32 *match_c,
- const u32 *match_v)
-{
- u32 ipsec_dev_caps = mlx5_fpga_ipsec_device_caps(dev);
- bool ipv6_flow;
-
- ipv6_flow = mlx5_fs_is_outer_ipv6_flow(dev, match_c, match_v);
-
- if (!(match_criteria_enable & MLX5_MATCH_OUTER_HEADERS) ||
- mlx5_fs_is_outer_udp_flow(match_c, match_v) ||
- mlx5_fs_is_outer_tcp_flow(match_c, match_v) ||
- mlx5_fs_is_vxlan_flow(match_c) ||
- !(mlx5_fs_is_outer_ipv4_flow(dev, match_c, match_v) ||
- ipv6_flow))
- return false;
-
- if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_DEVICE))
- return false;
-
- if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_ESP) &&
- mlx5_fs_is_outer_ipsec_flow(match_c))
- return false;
-
- if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_IPV6) &&
- ipv6_flow)
- return false;
-
- if (!validate_fpga_full_mask(dev, match_c, match_v))
- return false;
-
- return true;
-}
-
-static bool mlx5_is_fpga_egress_ipsec_rule(struct mlx5_core_dev *dev,
- u8 match_criteria_enable,
- const u32 *match_c,
- const u32 *match_v,
- struct mlx5_flow_act *flow_act,
- struct mlx5_flow_context *flow_context)
-{
- const void *outer_c = MLX5_ADDR_OF(fte_match_param, match_c,
- outer_headers);
- bool is_dmac = MLX5_GET(fte_match_set_lyr_2_4, outer_c, dmac_47_16) ||
- MLX5_GET(fte_match_set_lyr_2_4, outer_c, dmac_15_0);
- bool is_smac = MLX5_GET(fte_match_set_lyr_2_4, outer_c, smac_47_16) ||
- MLX5_GET(fte_match_set_lyr_2_4, outer_c, smac_15_0);
- int ret;
-
- ret = mlx5_is_fpga_ipsec_rule(dev, match_criteria_enable, match_c,
- match_v);
- if (!ret)
- return ret;
-
- if (is_dmac || is_smac ||
- (match_criteria_enable &
- ~(MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS)) ||
- (flow_act->action & ~(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | MLX5_FLOW_CONTEXT_ACTION_ALLOW)) ||
- (flow_context->flags & FLOW_CONTEXT_HAS_TAG))
- return false;
-
- return true;
-}
-
-static void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev,
- struct mlx5_accel_esp_xfrm *accel_xfrm,
- const __be32 saddr[4], const __be32 daddr[4],
- const __be32 spi, bool is_ipv6, u32 *sa_handle)
-{
- struct mlx5_fpga_ipsec_sa_ctx *sa_ctx;
- struct mlx5_fpga_esp_xfrm *fpga_xfrm =
- container_of(accel_xfrm, typeof(*fpga_xfrm),
- accel_xfrm);
- struct mlx5_fpga_device *fdev = mdev->fpga;
- struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
- int opcode, err;
- void *context;
-
- /* alloc SA */
- sa_ctx = kzalloc(sizeof(*sa_ctx), GFP_KERNEL);
- if (!sa_ctx)
- return ERR_PTR(-ENOMEM);
-
- sa_ctx->dev = mdev;
-
- /* build candidate SA */
- mlx5_fpga_ipsec_build_hw_sa(mdev, &accel_xfrm->attrs,
- saddr, daddr, spi, is_ipv6,
- &sa_ctx->hw_sa);
-
- mutex_lock(&fpga_xfrm->lock);
-
- if (fpga_xfrm->sa_ctx) { /* multiple rules for same accel_xfrm */
- /* all rules must be with same IPs and SPI */
- if (memcmp(&sa_ctx->hw_sa, &fpga_xfrm->sa_ctx->hw_sa,
- sizeof(sa_ctx->hw_sa))) {
- context = ERR_PTR(-EINVAL);
- goto exists;
- }
-
- ++fpga_xfrm->num_rules;
- context = fpga_xfrm->sa_ctx;
- goto exists;
- }
-
- if (accel_xfrm->attrs.action == MLX5_ACCEL_ESP_ACTION_DECRYPT) {
- err = ida_alloc_min(&fipsec->halloc, 1, GFP_KERNEL);
- if (err < 0) {
- context = ERR_PTR(err);
- goto exists;
- }
-
- sa_ctx->sa_handle = err;
- if (sa_handle)
- *sa_handle = sa_ctx->sa_handle;
- }
- /* This is unbounded fpga_xfrm, try to add to hash */
- mutex_lock(&fipsec->sa_hash_lock);
-
- err = rhashtable_lookup_insert_fast(&fipsec->sa_hash, &sa_ctx->hash,
- rhash_sa);
- if (err) {
- /* Can't bound different accel_xfrm to already existing sa_ctx.
- * This is because we can't support multiple ketmats for
- * same IPs and SPI
- */
- context = ERR_PTR(-EEXIST);
- goto unlock_hash;
- }
-
- /* Bound accel_xfrm to sa_ctx */
- opcode = is_v2_sadb_supported(fdev->ipsec) ?
- MLX5_FPGA_IPSEC_CMD_OP_ADD_SA_V2 :
- MLX5_FPGA_IPSEC_CMD_OP_ADD_SA;
- err = mlx5_fpga_ipsec_update_hw_sa(fdev, &sa_ctx->hw_sa, opcode);
- sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0;
- if (err) {
- context = ERR_PTR(err);
- goto delete_hash;
- }
-
- mutex_unlock(&fipsec->sa_hash_lock);
-
- ++fpga_xfrm->num_rules;
- fpga_xfrm->sa_ctx = sa_ctx;
- sa_ctx->fpga_xfrm = fpga_xfrm;
-
- mutex_unlock(&fpga_xfrm->lock);
-
- return sa_ctx;
-
-delete_hash:
- WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, &sa_ctx->hash,
- rhash_sa));
-unlock_hash:
- mutex_unlock(&fipsec->sa_hash_lock);
- if (accel_xfrm->attrs.action == MLX5_ACCEL_ESP_ACTION_DECRYPT)
- ida_free(&fipsec->halloc, sa_ctx->sa_handle);
-exists:
- mutex_unlock(&fpga_xfrm->lock);
- kfree(sa_ctx);
- return context;
-}
-
-static void *
-mlx5_fpga_ipsec_fs_create_sa_ctx(struct mlx5_core_dev *mdev,
- struct fs_fte *fte,
- bool is_egress)
-{
- struct mlx5_accel_esp_xfrm *accel_xfrm;
- __be32 saddr[4], daddr[4], spi;
- struct mlx5_flow_group *fg;
- bool is_ipv6 = false;
-
- fs_get_obj(fg, fte->node.parent);
- /* validate */
- if (is_egress &&
- !mlx5_is_fpga_egress_ipsec_rule(mdev,
- fg->mask.match_criteria_enable,
- fg->mask.match_criteria,
- fte->val,
- &fte->action,
- &fte->flow_context))
- return ERR_PTR(-EINVAL);
- else if (!mlx5_is_fpga_ipsec_rule(mdev,
- fg->mask.match_criteria_enable,
- fg->mask.match_criteria,
- fte->val))
- return ERR_PTR(-EINVAL);
-
- /* get xfrm context */
- accel_xfrm =
- (struct mlx5_accel_esp_xfrm *)fte->action.esp_id;
-
- /* IPs */
- if (mlx5_fs_is_outer_ipv4_flow(mdev, fg->mask.match_criteria,
- fte->val)) {
- memcpy(&saddr[3],
- MLX5_ADDR_OF(fte_match_set_lyr_2_4,
- fte->val,
- src_ipv4_src_ipv6.ipv4_layout.ipv4),
- sizeof(saddr[3]));
- memcpy(&daddr[3],
- MLX5_ADDR_OF(fte_match_set_lyr_2_4,
- fte->val,
- dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
- sizeof(daddr[3]));
- } else {
- memcpy(saddr,
- MLX5_ADDR_OF(fte_match_param,
- fte->val,
- outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
- sizeof(saddr));
- memcpy(daddr,
- MLX5_ADDR_OF(fte_match_param,
- fte->val,
- outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
- sizeof(daddr));
- is_ipv6 = true;
- }
-
- /* SPI */
- spi = MLX5_GET_BE(typeof(spi),
- fte_match_param, fte->val,
- misc_parameters.outer_esp_spi);
-
- /* create */
- return mlx5_fpga_ipsec_create_sa_ctx(mdev, accel_xfrm,
- saddr, daddr,
- spi, is_ipv6, NULL);
-}
-
-static void
-mlx5_fpga_ipsec_release_sa_ctx(struct mlx5_fpga_ipsec_sa_ctx *sa_ctx)
-{
- struct mlx5_fpga_device *fdev = sa_ctx->dev->fpga;
- struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
- int opcode = is_v2_sadb_supported(fdev->ipsec) ?
- MLX5_FPGA_IPSEC_CMD_OP_DEL_SA_V2 :
- MLX5_FPGA_IPSEC_CMD_OP_DEL_SA;
- int err;
-
- err = mlx5_fpga_ipsec_update_hw_sa(fdev, &sa_ctx->hw_sa, opcode);
- sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0;
- if (err) {
- WARN_ON(err);
- return;
- }
-
- if (sa_ctx->fpga_xfrm->accel_xfrm.attrs.action ==
- MLX5_ACCEL_ESP_ACTION_DECRYPT)
- ida_free(&fipsec->halloc, sa_ctx->sa_handle);
-
- mutex_lock(&fipsec->sa_hash_lock);
- WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, &sa_ctx->hash,
- rhash_sa));
- mutex_unlock(&fipsec->sa_hash_lock);
-}
-
-static void mlx5_fpga_ipsec_delete_sa_ctx(void *context)
-{
- struct mlx5_fpga_esp_xfrm *fpga_xfrm =
- ((struct mlx5_fpga_ipsec_sa_ctx *)context)->fpga_xfrm;
-
- mutex_lock(&fpga_xfrm->lock);
- if (!--fpga_xfrm->num_rules) {
- mlx5_fpga_ipsec_release_sa_ctx(fpga_xfrm->sa_ctx);
- kfree(fpga_xfrm->sa_ctx);
- fpga_xfrm->sa_ctx = NULL;
- }
- mutex_unlock(&fpga_xfrm->lock);
-}
-
-static inline struct mlx5_fpga_ipsec_rule *
-_rule_search(struct rb_root *root, struct fs_fte *fte)
-{
- struct rb_node *node = root->rb_node;
-
- while (node) {
- struct mlx5_fpga_ipsec_rule *rule =
- container_of(node, struct mlx5_fpga_ipsec_rule,
- node);
-
- if (rule->fte < fte)
- node = node->rb_left;
- else if (rule->fte > fte)
- node = node->rb_right;
- else
- return rule;
- }
- return NULL;
-}
-
-static struct mlx5_fpga_ipsec_rule *
-rule_search(struct mlx5_fpga_ipsec *ipsec_dev, struct fs_fte *fte)
-{
- struct mlx5_fpga_ipsec_rule *rule;
-
- mutex_lock(&ipsec_dev->rules_rb_lock);
- rule = _rule_search(&ipsec_dev->rules_rb, fte);
- mutex_unlock(&ipsec_dev->rules_rb_lock);
-
- return rule;
-}
-
-static inline int _rule_insert(struct rb_root *root,
- struct mlx5_fpga_ipsec_rule *rule)
-{
- struct rb_node **new = &root->rb_node, *parent = NULL;
-
- /* Figure out where to put new node */
- while (*new) {
- struct mlx5_fpga_ipsec_rule *this =
- container_of(*new, struct mlx5_fpga_ipsec_rule,
- node);
-
- parent = *new;
- if (rule->fte < this->fte)
- new = &((*new)->rb_left);
- else if (rule->fte > this->fte)
- new = &((*new)->rb_right);
- else
- return -EEXIST;
- }
-
- /* Add new node and rebalance tree. */
- rb_link_node(&rule->node, parent, new);
- rb_insert_color(&rule->node, root);
-
- return 0;
-}
-
-static int rule_insert(struct mlx5_fpga_ipsec *ipsec_dev,
- struct mlx5_fpga_ipsec_rule *rule)
-{
- int ret;
-
- mutex_lock(&ipsec_dev->rules_rb_lock);
- ret = _rule_insert(&ipsec_dev->rules_rb, rule);
- mutex_unlock(&ipsec_dev->rules_rb_lock);
-
- return ret;
-}
-
-static inline void _rule_delete(struct mlx5_fpga_ipsec *ipsec_dev,
- struct mlx5_fpga_ipsec_rule *rule)
-{
- struct rb_root *root = &ipsec_dev->rules_rb;
-
- mutex_lock(&ipsec_dev->rules_rb_lock);
- rb_erase(&rule->node, root);
- mutex_unlock(&ipsec_dev->rules_rb_lock);
-}
-
-static void rule_delete(struct mlx5_fpga_ipsec *ipsec_dev,
- struct mlx5_fpga_ipsec_rule *rule)
-{
- _rule_delete(ipsec_dev, rule);
- kfree(rule);
-}
-
-struct mailbox_mod {
- uintptr_t saved_esp_id;
- u32 saved_action;
- u32 saved_outer_esp_spi_value;
-};
-
-static void restore_spec_mailbox(struct fs_fte *fte,
- struct mailbox_mod *mbox_mod)
-{
- char *misc_params_v = MLX5_ADDR_OF(fte_match_param,
- fte->val,
- misc_parameters);
-
- MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi,
- mbox_mod->saved_outer_esp_spi_value);
- fte->action.action |= mbox_mod->saved_action;
- fte->action.esp_id = (uintptr_t)mbox_mod->saved_esp_id;
-}
-
-static void modify_spec_mailbox(struct mlx5_core_dev *mdev,
- struct fs_fte *fte,
- struct mailbox_mod *mbox_mod)
-{
- char *misc_params_v = MLX5_ADDR_OF(fte_match_param,
- fte->val,
- misc_parameters);
-
- mbox_mod->saved_esp_id = fte->action.esp_id;
- mbox_mod->saved_action = fte->action.action &
- (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
- MLX5_FLOW_CONTEXT_ACTION_DECRYPT);
- mbox_mod->saved_outer_esp_spi_value =
- MLX5_GET(fte_match_set_misc, misc_params_v,
- outer_esp_spi);
-
- fte->action.esp_id = 0;
- fte->action.action &= ~(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
- MLX5_FLOW_CONTEXT_ACTION_DECRYPT);
- if (!MLX5_CAP_FLOWTABLE(mdev,
- flow_table_properties_nic_receive.ft_field_support.outer_esp_spi))
- MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi, 0);
-}
-
-static enum fs_flow_table_type egress_to_fs_ft(bool egress)
-{
- return egress ? FS_FT_NIC_TX : FS_FT_NIC_RX;
-}
-
-static int fpga_ipsec_fs_create_flow_group(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- u32 *in,
- struct mlx5_flow_group *fg,
- bool is_egress)
-{
- int (*create_flow_group)(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft, u32 *in,
- struct mlx5_flow_group *fg) =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->create_flow_group;
- char *misc_params_c = MLX5_ADDR_OF(create_flow_group_in, in,
- match_criteria.misc_parameters);
- struct mlx5_core_dev *dev = ns->dev;
- u32 saved_outer_esp_spi_mask;
- u8 match_criteria_enable;
- int ret;
-
- if (MLX5_CAP_FLOWTABLE(dev,
- flow_table_properties_nic_receive.ft_field_support.outer_esp_spi))
- return create_flow_group(ns, ft, in, fg);
-
- match_criteria_enable =
- MLX5_GET(create_flow_group_in, in, match_criteria_enable);
- saved_outer_esp_spi_mask =
- MLX5_GET(fte_match_set_misc, misc_params_c, outer_esp_spi);
- if (!match_criteria_enable || !saved_outer_esp_spi_mask)
- return create_flow_group(ns, ft, in, fg);
-
- MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, 0);
-
- if (!(*misc_params_c) &&
- !memcmp(misc_params_c, misc_params_c + 1, MLX5_ST_SZ_BYTES(fte_match_set_misc) - 1))
- MLX5_SET(create_flow_group_in, in, match_criteria_enable,
- match_criteria_enable & ~MLX5_MATCH_MISC_PARAMETERS);
-
- ret = create_flow_group(ns, ft, in, fg);
-
- MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, saved_outer_esp_spi_mask);
- MLX5_SET(create_flow_group_in, in, match_criteria_enable, match_criteria_enable);
-
- return ret;
-}
-
-static int fpga_ipsec_fs_create_fte(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- struct mlx5_flow_group *fg,
- struct fs_fte *fte,
- bool is_egress)
-{
- int (*create_fte)(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- struct mlx5_flow_group *fg,
- struct fs_fte *fte) =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->create_fte;
- struct mlx5_core_dev *dev = ns->dev;
- struct mlx5_fpga_device *fdev = dev->fpga;
- struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
- struct mlx5_fpga_ipsec_rule *rule;
- bool is_esp = fte->action.esp_id;
- struct mailbox_mod mbox_mod;
- int ret;
-
- if (!is_esp ||
- !(fte->action.action &
- (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
- MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
- return create_fte(ns, ft, fg, fte);
-
- rule = kzalloc(sizeof(*rule), GFP_KERNEL);
- if (!rule)
- return -ENOMEM;
-
- rule->ctx = mlx5_fpga_ipsec_fs_create_sa_ctx(dev, fte, is_egress);
- if (IS_ERR(rule->ctx)) {
- int err = PTR_ERR(rule->ctx);
-
- kfree(rule);
- return err;
- }
-
- rule->fte = fte;
- WARN_ON(rule_insert(fipsec, rule));
-
- modify_spec_mailbox(dev, fte, &mbox_mod);
- ret = create_fte(ns, ft, fg, fte);
- restore_spec_mailbox(fte, &mbox_mod);
- if (ret) {
- _rule_delete(fipsec, rule);
- mlx5_fpga_ipsec_delete_sa_ctx(rule->ctx);
- kfree(rule);
- }
-
- return ret;
-}
-
-static int fpga_ipsec_fs_update_fte(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- struct mlx5_flow_group *fg,
- int modify_mask,
- struct fs_fte *fte,
- bool is_egress)
-{
- int (*update_fte)(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- struct mlx5_flow_group *fg,
- int modify_mask,
- struct fs_fte *fte) =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->update_fte;
- struct mlx5_core_dev *dev = ns->dev;
- bool is_esp = fte->action.esp_id;
- struct mailbox_mod mbox_mod;
- int ret;
-
- if (!is_esp ||
- !(fte->action.action &
- (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
- MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
- return update_fte(ns, ft, fg, modify_mask, fte);
-
- modify_spec_mailbox(dev, fte, &mbox_mod);
- ret = update_fte(ns, ft, fg, modify_mask, fte);
- restore_spec_mailbox(fte, &mbox_mod);
-
- return ret;
-}
-
-static int fpga_ipsec_fs_delete_fte(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- struct fs_fte *fte,
- bool is_egress)
-{
- int (*delete_fte)(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- struct fs_fte *fte) =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->delete_fte;
- struct mlx5_core_dev *dev = ns->dev;
- struct mlx5_fpga_device *fdev = dev->fpga;
- struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
- struct mlx5_fpga_ipsec_rule *rule;
- bool is_esp = fte->action.esp_id;
- struct mailbox_mod mbox_mod;
- int ret;
-
- if (!is_esp ||
- !(fte->action.action &
- (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
- MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
- return delete_fte(ns, ft, fte);
-
- rule = rule_search(fipsec, fte);
- if (!rule)
- return -ENOENT;
-
- mlx5_fpga_ipsec_delete_sa_ctx(rule->ctx);
- rule_delete(fipsec, rule);
-
- modify_spec_mailbox(dev, fte, &mbox_mod);
- ret = delete_fte(ns, ft, fte);
- restore_spec_mailbox(fte, &mbox_mod);
-
- return ret;
-}
-
-static int
-mlx5_fpga_ipsec_fs_create_flow_group_egress(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- u32 *in,
- struct mlx5_flow_group *fg)
-{
- return fpga_ipsec_fs_create_flow_group(ns, ft, in, fg, true);
-}
-
-static int
-mlx5_fpga_ipsec_fs_create_fte_egress(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- struct mlx5_flow_group *fg,
- struct fs_fte *fte)
-{
- return fpga_ipsec_fs_create_fte(ns, ft, fg, fte, true);
-}
-
-static int
-mlx5_fpga_ipsec_fs_update_fte_egress(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- struct mlx5_flow_group *fg,
- int modify_mask,
- struct fs_fte *fte)
-{
- return fpga_ipsec_fs_update_fte(ns, ft, fg, modify_mask, fte,
- true);
-}
-
-static int
-mlx5_fpga_ipsec_fs_delete_fte_egress(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- struct fs_fte *fte)
-{
- return fpga_ipsec_fs_delete_fte(ns, ft, fte, true);
-}
-
-static int
-mlx5_fpga_ipsec_fs_create_flow_group_ingress(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- u32 *in,
- struct mlx5_flow_group *fg)
-{
- return fpga_ipsec_fs_create_flow_group(ns, ft, in, fg, false);
-}
-
-static int
-mlx5_fpga_ipsec_fs_create_fte_ingress(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- struct mlx5_flow_group *fg,
- struct fs_fte *fte)
-{
- return fpga_ipsec_fs_create_fte(ns, ft, fg, fte, false);
-}
-
-static int
-mlx5_fpga_ipsec_fs_update_fte_ingress(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- struct mlx5_flow_group *fg,
- int modify_mask,
- struct fs_fte *fte)
-{
- return fpga_ipsec_fs_update_fte(ns, ft, fg, modify_mask, fte,
- false);
-}
-
-static int
-mlx5_fpga_ipsec_fs_delete_fte_ingress(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- struct fs_fte *fte)
-{
- return fpga_ipsec_fs_delete_fte(ns, ft, fte, false);
-}
-
-static struct mlx5_flow_cmds fpga_ipsec_ingress;
-static struct mlx5_flow_cmds fpga_ipsec_egress;
-
-const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type)
-{
- switch (type) {
- case FS_FT_NIC_RX:
- return &fpga_ipsec_ingress;
- case FS_FT_NIC_TX:
- return &fpga_ipsec_egress;
- default:
- WARN_ON(true);
- return NULL;
- }
-}
-
-static int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev)
-{
- struct mlx5_fpga_conn_attr init_attr = {0};
- struct mlx5_fpga_device *fdev = mdev->fpga;
- struct mlx5_fpga_conn *conn;
- int err;
-
- if (!mlx5_fpga_is_ipsec_device(mdev))
- return 0;
-
- fdev->ipsec = kzalloc(sizeof(*fdev->ipsec), GFP_KERNEL);
- if (!fdev->ipsec)
- return -ENOMEM;
-
- fdev->ipsec->fdev = fdev;
-
- err = mlx5_fpga_get_sbu_caps(fdev, sizeof(fdev->ipsec->caps),
- fdev->ipsec->caps);
- if (err) {
- mlx5_fpga_err(fdev, "Failed to retrieve IPSec extended capabilities: %d\n",
- err);
- goto error;
- }
-
- INIT_LIST_HEAD(&fdev->ipsec->pending_cmds);
- spin_lock_init(&fdev->ipsec->pending_cmds_lock);
-
- init_attr.rx_size = SBU_QP_QUEUE_SIZE;
- init_attr.tx_size = SBU_QP_QUEUE_SIZE;
- init_attr.recv_cb = mlx5_fpga_ipsec_recv;
- init_attr.cb_arg = fdev;
- conn = mlx5_fpga_sbu_conn_create(fdev, &init_attr);
- if (IS_ERR(conn)) {
- err = PTR_ERR(conn);
- mlx5_fpga_err(fdev, "Error creating IPSec command connection %d\n",
- err);
- goto error;
- }
- fdev->ipsec->conn = conn;
-
- err = rhashtable_init(&fdev->ipsec->sa_hash, &rhash_sa);
- if (err)
- goto err_destroy_conn;
- mutex_init(&fdev->ipsec->sa_hash_lock);
-
- fdev->ipsec->rules_rb = RB_ROOT;
- mutex_init(&fdev->ipsec->rules_rb_lock);
-
- err = mlx5_fpga_ipsec_enable_supported_caps(mdev);
- if (err) {
- mlx5_fpga_err(fdev, "Failed to enable IPSec extended capabilities: %d\n",
- err);
- goto err_destroy_hash;
- }
-
- ida_init(&fdev->ipsec->halloc);
-
- return 0;
-
-err_destroy_hash:
- rhashtable_destroy(&fdev->ipsec->sa_hash);
-
-err_destroy_conn:
- mlx5_fpga_sbu_conn_destroy(conn);
-
-error:
- kfree(fdev->ipsec);
- fdev->ipsec = NULL;
- return err;
-}
-
-static void destroy_rules_rb(struct rb_root *root)
-{
- struct mlx5_fpga_ipsec_rule *r, *tmp;
-
- rbtree_postorder_for_each_entry_safe(r, tmp, root, node) {
- rb_erase(&r->node, root);
- mlx5_fpga_ipsec_delete_sa_ctx(r->ctx);
- kfree(r);
- }
-}
-
-static void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev)
-{
- struct mlx5_fpga_device *fdev = mdev->fpga;
-
- if (!mlx5_fpga_is_ipsec_device(mdev))
- return;
-
- ida_destroy(&fdev->ipsec->halloc);
- destroy_rules_rb(&fdev->ipsec->rules_rb);
- rhashtable_destroy(&fdev->ipsec->sa_hash);
-
- mlx5_fpga_sbu_conn_destroy(fdev->ipsec->conn);
- kfree(fdev->ipsec);
- fdev->ipsec = NULL;
-}
-
-void mlx5_fpga_ipsec_build_fs_cmds(void)
-{
- /* ingress */
- fpga_ipsec_ingress.create_flow_table =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->create_flow_table;
- fpga_ipsec_ingress.destroy_flow_table =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->destroy_flow_table;
- fpga_ipsec_ingress.modify_flow_table =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->modify_flow_table;
- fpga_ipsec_ingress.create_flow_group =
- mlx5_fpga_ipsec_fs_create_flow_group_ingress;
- fpga_ipsec_ingress.destroy_flow_group =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->destroy_flow_group;
- fpga_ipsec_ingress.create_fte =
- mlx5_fpga_ipsec_fs_create_fte_ingress;
- fpga_ipsec_ingress.update_fte =
- mlx5_fpga_ipsec_fs_update_fte_ingress;
- fpga_ipsec_ingress.delete_fte =
- mlx5_fpga_ipsec_fs_delete_fte_ingress;
- fpga_ipsec_ingress.update_root_ft =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->update_root_ft;
-
- /* egress */
- fpga_ipsec_egress.create_flow_table =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->create_flow_table;
- fpga_ipsec_egress.destroy_flow_table =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->destroy_flow_table;
- fpga_ipsec_egress.modify_flow_table =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->modify_flow_table;
- fpga_ipsec_egress.create_flow_group =
- mlx5_fpga_ipsec_fs_create_flow_group_egress;
- fpga_ipsec_egress.destroy_flow_group =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->destroy_flow_group;
- fpga_ipsec_egress.create_fte =
- mlx5_fpga_ipsec_fs_create_fte_egress;
- fpga_ipsec_egress.update_fte =
- mlx5_fpga_ipsec_fs_update_fte_egress;
- fpga_ipsec_egress.delete_fte =
- mlx5_fpga_ipsec_fs_delete_fte_egress;
- fpga_ipsec_egress.update_root_ft =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->update_root_ft;
-}
-
-static int
-mlx5_fpga_esp_validate_xfrm_attrs(struct mlx5_core_dev *mdev,
- const struct mlx5_accel_esp_xfrm_attrs *attrs)
-{
- if (attrs->tfc_pad) {
- mlx5_core_err(mdev, "Cannot offload xfrm states with tfc padding\n");
- return -EOPNOTSUPP;
- }
-
- if (attrs->replay_type != MLX5_ACCEL_ESP_REPLAY_NONE) {
- mlx5_core_err(mdev, "Cannot offload xfrm states with anti replay\n");
- return -EOPNOTSUPP;
- }
-
- if (attrs->keymat_type != MLX5_ACCEL_ESP_KEYMAT_AES_GCM) {
- mlx5_core_err(mdev, "Only aes gcm keymat is supported\n");
- return -EOPNOTSUPP;
- }
-
- if (attrs->keymat.aes_gcm.iv_algo !=
- MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ) {
- mlx5_core_err(mdev, "Only iv sequence algo is supported\n");
- return -EOPNOTSUPP;
- }
-
- if (attrs->keymat.aes_gcm.icv_len != 128) {
- mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD ICV length other than 128bit\n");
- return -EOPNOTSUPP;
- }
-
- if (attrs->keymat.aes_gcm.key_len != 128 &&
- attrs->keymat.aes_gcm.key_len != 256) {
- mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
- return -EOPNOTSUPP;
- }
-
- if ((attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) &&
- (!MLX5_GET(ipsec_extended_cap, mdev->fpga->ipsec->caps,
- v2_command))) {
- mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
-static struct mlx5_accel_esp_xfrm *
-mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev,
- const struct mlx5_accel_esp_xfrm_attrs *attrs,
- u32 flags)
-{
- struct mlx5_fpga_esp_xfrm *fpga_xfrm;
-
- if (!(flags & MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA)) {
- mlx5_core_warn(mdev, "Tried to create an esp action without metadata\n");
- return ERR_PTR(-EINVAL);
- }
-
- if (mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) {
- mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n");
- return ERR_PTR(-EOPNOTSUPP);
- }
-
- fpga_xfrm = kzalloc(sizeof(*fpga_xfrm), GFP_KERNEL);
- if (!fpga_xfrm)
- return ERR_PTR(-ENOMEM);
-
- mutex_init(&fpga_xfrm->lock);
- memcpy(&fpga_xfrm->accel_xfrm.attrs, attrs,
- sizeof(fpga_xfrm->accel_xfrm.attrs));
-
- return &fpga_xfrm->accel_xfrm;
-}
-
-static void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
-{
- struct mlx5_fpga_esp_xfrm *fpga_xfrm =
- container_of(xfrm, struct mlx5_fpga_esp_xfrm,
- accel_xfrm);
- /* assuming no sa_ctx are connected to this xfrm_ctx */
- kfree(fpga_xfrm);
-}
-
-static int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
- const struct mlx5_accel_esp_xfrm_attrs *attrs)
-{
- struct mlx5_core_dev *mdev = xfrm->mdev;
- struct mlx5_fpga_device *fdev = mdev->fpga;
- struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
- struct mlx5_fpga_esp_xfrm *fpga_xfrm;
- struct mlx5_ifc_fpga_ipsec_sa org_hw_sa;
-
- int err = 0;
-
- if (!memcmp(&xfrm->attrs, attrs, sizeof(xfrm->attrs)))
- return 0;
-
- if (mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) {
- mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n");
- return -EOPNOTSUPP;
- }
-
- if (is_v2_sadb_supported(fipsec)) {
- mlx5_core_warn(mdev, "Modify esp is not supported\n");
- return -EOPNOTSUPP;
- }
-
- fpga_xfrm = container_of(xfrm, struct mlx5_fpga_esp_xfrm, accel_xfrm);
-
- mutex_lock(&fpga_xfrm->lock);
-
- if (!fpga_xfrm->sa_ctx)
- /* Unbounded xfrm, change only sw attrs */
- goto change_sw_xfrm_attrs;
-
- /* copy original hw sa */
- memcpy(&org_hw_sa, &fpga_xfrm->sa_ctx->hw_sa, sizeof(org_hw_sa));
- mutex_lock(&fipsec->sa_hash_lock);
- /* remove original hw sa from hash */
- WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash,
- &fpga_xfrm->sa_ctx->hash, rhash_sa));
- /* update hw_sa with new xfrm attrs*/
- mlx5_fpga_ipsec_build_hw_xfrm(xfrm->mdev, attrs,
- &fpga_xfrm->sa_ctx->hw_sa);
- /* try to insert new hw_sa to hash */
- err = rhashtable_insert_fast(&fipsec->sa_hash,
- &fpga_xfrm->sa_ctx->hash, rhash_sa);
- if (err)
- goto rollback_sa;
-
- /* modify device with new hw_sa */
- err = mlx5_fpga_ipsec_update_hw_sa(fdev, &fpga_xfrm->sa_ctx->hw_sa,
- MLX5_FPGA_IPSEC_CMD_OP_MOD_SA_V2);
- fpga_xfrm->sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0;
- if (err)
- WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash,
- &fpga_xfrm->sa_ctx->hash,
- rhash_sa));
-rollback_sa:
- if (err) {
- /* return original hw_sa to hash */
- memcpy(&fpga_xfrm->sa_ctx->hw_sa, &org_hw_sa,
- sizeof(org_hw_sa));
- WARN_ON(rhashtable_insert_fast(&fipsec->sa_hash,
- &fpga_xfrm->sa_ctx->hash,
- rhash_sa));
- }
- mutex_unlock(&fipsec->sa_hash_lock);
-
-change_sw_xfrm_attrs:
- if (!err)
- memcpy(&xfrm->attrs, attrs, sizeof(xfrm->attrs));
- mutex_unlock(&fpga_xfrm->lock);
- return err;
-}
-
-static const struct mlx5_accel_ipsec_ops fpga_ipsec_ops = {
- .device_caps = mlx5_fpga_ipsec_device_caps,
- .counters_count = mlx5_fpga_ipsec_counters_count,
- .counters_read = mlx5_fpga_ipsec_counters_read,
- .create_hw_context = mlx5_fpga_ipsec_create_sa_ctx,
- .free_hw_context = mlx5_fpga_ipsec_delete_sa_ctx,
- .init = mlx5_fpga_ipsec_init,
- .cleanup = mlx5_fpga_ipsec_cleanup,
- .esp_create_xfrm = mlx5_fpga_esp_create_xfrm,
- .esp_modify_xfrm = mlx5_fpga_esp_modify_xfrm,
- .esp_destroy_xfrm = mlx5_fpga_esp_destroy_xfrm,
-};
-
-const struct mlx5_accel_ipsec_ops *mlx5_fpga_ipsec_ops(struct mlx5_core_dev *mdev)
-{
- if (!mlx5_fpga_is_ipsec_device(mdev))
- return NULL;
-
- return &fpga_ipsec_ops;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h
deleted file mode 100644
index 8931b5584477..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef __MLX5_FPGA_IPSEC_H__
-#define __MLX5_FPGA_IPSEC_H__
-
-#include "accel/ipsec.h"
-#include "fs_cmd.h"
-
-#ifdef CONFIG_MLX5_FPGA_IPSEC
-const struct mlx5_accel_ipsec_ops *mlx5_fpga_ipsec_ops(struct mlx5_core_dev *mdev);
-u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev);
-const struct mlx5_flow_cmds *
-mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type);
-void mlx5_fpga_ipsec_build_fs_cmds(void);
-bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev);
-#else
-static inline
-const struct mlx5_accel_ipsec_ops *mlx5_fpga_ipsec_ops(struct mlx5_core_dev *mdev)
-{ return NULL; }
-static inline u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev) { return 0; }
-static inline const struct mlx5_flow_cmds *
-mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type)
-{
- return mlx5_fs_cmd_get_default(type);
-}
-
-static inline void mlx5_fpga_ipsec_build_fs_cmds(void) {};
-static inline bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev) { return false; }
-
-#endif /* CONFIG_MLX5_FPGA_IPSEC */
-#endif /* __MLX5_FPGA_IPSEC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
deleted file mode 100644
index 29b7339ebfa3..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
+++ /dev/null
@@ -1,622 +0,0 @@
-/*
- * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/mlx5/device.h>
-#include "fpga/tls.h"
-#include "fpga/cmd.h"
-#include "fpga/sdk.h"
-#include "fpga/core.h"
-#include "accel/tls.h"
-
-struct mlx5_fpga_tls_command_context;
-
-typedef void (*mlx5_fpga_tls_command_complete)
- (struct mlx5_fpga_conn *conn, struct mlx5_fpga_device *fdev,
- struct mlx5_fpga_tls_command_context *ctx,
- struct mlx5_fpga_dma_buf *resp);
-
-struct mlx5_fpga_tls_command_context {
- struct list_head list;
- /* There is no guarantee on the order between the TX completion
- * and the command response.
- * The TX completion is going to touch cmd->buf even in
- * the case of successful transmission.
- * So instead of requiring separate allocations for cmd
- * and cmd->buf we've decided to use a reference counter
- */
- refcount_t ref;
- struct mlx5_fpga_dma_buf buf;
- mlx5_fpga_tls_command_complete complete;
-};
-
-static void
-mlx5_fpga_tls_put_command_ctx(struct mlx5_fpga_tls_command_context *ctx)
-{
- if (refcount_dec_and_test(&ctx->ref))
- kfree(ctx);
-}
-
-static void mlx5_fpga_tls_cmd_complete(struct mlx5_fpga_device *fdev,
- struct mlx5_fpga_dma_buf *resp)
-{
- struct mlx5_fpga_conn *conn = fdev->tls->conn;
- struct mlx5_fpga_tls_command_context *ctx;
- struct mlx5_fpga_tls *tls = fdev->tls;
- unsigned long flags;
-
- spin_lock_irqsave(&tls->pending_cmds_lock, flags);
- ctx = list_first_entry(&tls->pending_cmds,
- struct mlx5_fpga_tls_command_context, list);
- list_del(&ctx->list);
- spin_unlock_irqrestore(&tls->pending_cmds_lock, flags);
- ctx->complete(conn, fdev, ctx, resp);
-}
-
-static void mlx5_fpga_cmd_send_complete(struct mlx5_fpga_conn *conn,
- struct mlx5_fpga_device *fdev,
- struct mlx5_fpga_dma_buf *buf,
- u8 status)
-{
- struct mlx5_fpga_tls_command_context *ctx =
- container_of(buf, struct mlx5_fpga_tls_command_context, buf);
-
- mlx5_fpga_tls_put_command_ctx(ctx);
-
- if (unlikely(status))
- mlx5_fpga_tls_cmd_complete(fdev, NULL);
-}
-
-static void mlx5_fpga_tls_cmd_send(struct mlx5_fpga_device *fdev,
- struct mlx5_fpga_tls_command_context *cmd,
- mlx5_fpga_tls_command_complete complete)
-{
- struct mlx5_fpga_tls *tls = fdev->tls;
- unsigned long flags;
- int ret;
-
- refcount_set(&cmd->ref, 2);
- cmd->complete = complete;
- cmd->buf.complete = mlx5_fpga_cmd_send_complete;
-
- spin_lock_irqsave(&tls->pending_cmds_lock, flags);
- /* mlx5_fpga_sbu_conn_sendmsg is called under pending_cmds_lock
- * to make sure commands are inserted to the tls->pending_cmds list
- * and the command QP in the same order.
- */
- ret = mlx5_fpga_sbu_conn_sendmsg(tls->conn, &cmd->buf);
- if (likely(!ret))
- list_add_tail(&cmd->list, &tls->pending_cmds);
- else
- complete(tls->conn, fdev, cmd, NULL);
- spin_unlock_irqrestore(&tls->pending_cmds_lock, flags);
-}
-
-/* Start of context identifiers range (inclusive) */
-#define SWID_START 0
-/* End of context identifiers range (exclusive) */
-#define SWID_END BIT(24)
-
-static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock,
- void *ptr)
-{
- unsigned long flags;
- int ret;
-
- /* TLS metadata format is 1 byte for syndrome followed
- * by 3 bytes of swid (software ID)
- * swid must not exceed 3 bytes.
- * See tls_rxtx.c:insert_pet() for details
- */
- BUILD_BUG_ON((SWID_END - 1) & 0xFF000000);
-
- idr_preload(GFP_KERNEL);
- spin_lock_irqsave(idr_spinlock, flags);
- ret = idr_alloc(idr, ptr, SWID_START, SWID_END, GFP_ATOMIC);
- spin_unlock_irqrestore(idr_spinlock, flags);
- idr_preload_end();
-
- return ret;
-}
-
-static void *mlx5_fpga_tls_release_swid(struct idr *idr,
- spinlock_t *idr_spinlock, u32 swid)
-{
- unsigned long flags;
- void *ptr;
-
- spin_lock_irqsave(idr_spinlock, flags);
- ptr = idr_remove(idr, swid);
- spin_unlock_irqrestore(idr_spinlock, flags);
- return ptr;
-}
-
-static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn,
- struct mlx5_fpga_device *fdev,
- struct mlx5_fpga_dma_buf *buf, u8 status)
-{
- kfree(buf);
-}
-
-static void
-mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn,
- struct mlx5_fpga_device *fdev,
- struct mlx5_fpga_tls_command_context *cmd,
- struct mlx5_fpga_dma_buf *resp)
-{
- if (resp) {
- u32 syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome);
-
- if (syndrome)
- mlx5_fpga_err(fdev,
- "Teardown stream failed with syndrome = %d",
- syndrome);
- }
- mlx5_fpga_tls_put_command_ctx(cmd);
-}
-
-static void mlx5_fpga_tls_flow_to_cmd(void *flow, void *cmd)
-{
- memcpy(MLX5_ADDR_OF(tls_cmd, cmd, src_port), flow,
- MLX5_BYTE_OFF(tls_flow, ipv6));
-
- MLX5_SET(tls_cmd, cmd, ipv6, MLX5_GET(tls_flow, flow, ipv6));
- MLX5_SET(tls_cmd, cmd, direction_sx,
- MLX5_GET(tls_flow, flow, direction_sx));
-}
-
-int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle,
- u32 seq, __be64 rcd_sn)
-{
- struct mlx5_fpga_dma_buf *buf;
- int size = sizeof(*buf) + MLX5_TLS_COMMAND_SIZE;
- void *flow;
- void *cmd;
- int ret;
-
- buf = kzalloc(size, GFP_ATOMIC);
- if (!buf)
- return -ENOMEM;
-
- cmd = (buf + 1);
-
- rcu_read_lock();
- flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
- if (unlikely(!flow)) {
- rcu_read_unlock();
- WARN_ONCE(1, "Received NULL pointer for handle\n");
- kfree(buf);
- return -EINVAL;
- }
- mlx5_fpga_tls_flow_to_cmd(flow, cmd);
- rcu_read_unlock();
-
- MLX5_SET(tls_cmd, cmd, swid, ntohl(handle));
- MLX5_SET64(tls_cmd, cmd, tls_rcd_sn, be64_to_cpu(rcd_sn));
- MLX5_SET(tls_cmd, cmd, tcp_sn, seq);
- MLX5_SET(tls_cmd, cmd, command_type, CMD_RESYNC_RX);
-
- buf->sg[0].data = cmd;
- buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
- buf->complete = mlx_tls_kfree_complete;
-
- ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf);
- if (ret < 0)
- kfree(buf);
-
- return ret;
-}
-
-static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
- void *flow, u32 swid, gfp_t flags)
-{
- struct mlx5_fpga_tls_command_context *ctx;
- struct mlx5_fpga_dma_buf *buf;
- void *cmd;
-
- ctx = kzalloc(sizeof(*ctx) + MLX5_TLS_COMMAND_SIZE, flags);
- if (!ctx)
- return;
-
- buf = &ctx->buf;
- cmd = (ctx + 1);
- MLX5_SET(tls_cmd, cmd, command_type, CMD_TEARDOWN_STREAM);
- MLX5_SET(tls_cmd, cmd, swid, swid);
-
- mlx5_fpga_tls_flow_to_cmd(flow, cmd);
- kfree(flow);
-
- buf->sg[0].data = cmd;
- buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
-
- mlx5_fpga_tls_cmd_send(mdev->fpga, ctx,
- mlx5_fpga_tls_teardown_completion);
-}
-
-void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
- gfp_t flags, bool direction_sx)
-{
- struct mlx5_fpga_tls *tls = mdev->fpga->tls;
- void *flow;
-
- if (direction_sx)
- flow = mlx5_fpga_tls_release_swid(&tls->tx_idr,
- &tls->tx_idr_spinlock,
- swid);
- else
- flow = mlx5_fpga_tls_release_swid(&tls->rx_idr,
- &tls->rx_idr_spinlock,
- swid);
-
- if (!flow) {
- mlx5_fpga_err(mdev->fpga, "No flow information for swid %u\n",
- swid);
- return;
- }
-
- synchronize_rcu(); /* before kfree(flow) */
- mlx5_fpga_tls_send_teardown_cmd(mdev, flow, swid, flags);
-}
-
-enum mlx5_fpga_setup_stream_status {
- MLX5_FPGA_CMD_PENDING,
- MLX5_FPGA_CMD_SEND_FAILED,
- MLX5_FPGA_CMD_RESPONSE_RECEIVED,
- MLX5_FPGA_CMD_ABANDONED,
-};
-
-struct mlx5_setup_stream_context {
- struct mlx5_fpga_tls_command_context cmd;
- atomic_t status;
- u32 syndrome;
- struct completion comp;
-};
-
-static void
-mlx5_fpga_tls_setup_completion(struct mlx5_fpga_conn *conn,
- struct mlx5_fpga_device *fdev,
- struct mlx5_fpga_tls_command_context *cmd,
- struct mlx5_fpga_dma_buf *resp)
-{
- struct mlx5_setup_stream_context *ctx =
- container_of(cmd, struct mlx5_setup_stream_context, cmd);
- int status = MLX5_FPGA_CMD_SEND_FAILED;
- void *tls_cmd = ctx + 1;
-
- /* If we failed to send to command resp == NULL */
- if (resp) {
- ctx->syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome);
- status = MLX5_FPGA_CMD_RESPONSE_RECEIVED;
- }
-
- status = atomic_xchg_release(&ctx->status, status);
- if (likely(status != MLX5_FPGA_CMD_ABANDONED)) {
- complete(&ctx->comp);
- return;
- }
-
- mlx5_fpga_err(fdev, "Command was abandoned, syndrome = %u\n",
- ctx->syndrome);
-
- if (!ctx->syndrome) {
- /* The process was killed while waiting for the context to be
- * added, and the add completed successfully.
- * We need to destroy the HW context, and we can't can't reuse
- * the command context because we might not have received
- * the tx completion yet.
- */
- mlx5_fpga_tls_del_flow(fdev->mdev,
- MLX5_GET(tls_cmd, tls_cmd, swid),
- GFP_ATOMIC,
- MLX5_GET(tls_cmd, tls_cmd,
- direction_sx));
- }
-
- mlx5_fpga_tls_put_command_ctx(cmd);
-}
-
-static int mlx5_fpga_tls_setup_stream_cmd(struct mlx5_core_dev *mdev,
- struct mlx5_setup_stream_context *ctx)
-{
- struct mlx5_fpga_dma_buf *buf;
- void *cmd = ctx + 1;
- int status, ret = 0;
-
- buf = &ctx->cmd.buf;
- buf->sg[0].data = cmd;
- buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
- MLX5_SET(tls_cmd, cmd, command_type, CMD_SETUP_STREAM);
-
- init_completion(&ctx->comp);
- atomic_set(&ctx->status, MLX5_FPGA_CMD_PENDING);
- ctx->syndrome = -1;
-
- mlx5_fpga_tls_cmd_send(mdev->fpga, &ctx->cmd,
- mlx5_fpga_tls_setup_completion);
- wait_for_completion_killable(&ctx->comp);
-
- status = atomic_xchg_acquire(&ctx->status, MLX5_FPGA_CMD_ABANDONED);
- if (unlikely(status == MLX5_FPGA_CMD_PENDING))
- /* ctx is going to be released in mlx5_fpga_tls_setup_completion */
- return -EINTR;
-
- if (unlikely(ctx->syndrome))
- ret = -ENOMEM;
-
- mlx5_fpga_tls_put_command_ctx(&ctx->cmd);
- return ret;
-}
-
-static void mlx5_fpga_tls_hw_qp_recv_cb(void *cb_arg,
- struct mlx5_fpga_dma_buf *buf)
-{
- struct mlx5_fpga_device *fdev = (struct mlx5_fpga_device *)cb_arg;
-
- mlx5_fpga_tls_cmd_complete(fdev, buf);
-}
-
-bool mlx5_fpga_is_tls_device(struct mlx5_core_dev *mdev)
-{
- if (!mdev->fpga || !MLX5_CAP_GEN(mdev, fpga))
- return false;
-
- if (MLX5_CAP_FPGA(mdev, ieee_vendor_id) !=
- MLX5_FPGA_CAP_SANDBOX_VENDOR_ID_MLNX)
- return false;
-
- if (MLX5_CAP_FPGA(mdev, sandbox_product_id) !=
- MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_TLS)
- return false;
-
- if (MLX5_CAP_FPGA(mdev, sandbox_product_version) != 0)
- return false;
-
- return true;
-}
-
-static int mlx5_fpga_tls_get_caps(struct mlx5_fpga_device *fdev,
- u32 *p_caps)
-{
- int err, cap_size = MLX5_ST_SZ_BYTES(tls_extended_cap);
- u32 caps = 0;
- void *buf;
-
- buf = kzalloc(cap_size, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- err = mlx5_fpga_get_sbu_caps(fdev, cap_size, buf);
- if (err)
- goto out;
-
- if (MLX5_GET(tls_extended_cap, buf, tx))
- caps |= MLX5_ACCEL_TLS_TX;
- if (MLX5_GET(tls_extended_cap, buf, rx))
- caps |= MLX5_ACCEL_TLS_RX;
- if (MLX5_GET(tls_extended_cap, buf, tls_v12))
- caps |= MLX5_ACCEL_TLS_V12;
- if (MLX5_GET(tls_extended_cap, buf, tls_v13))
- caps |= MLX5_ACCEL_TLS_V13;
- if (MLX5_GET(tls_extended_cap, buf, lro))
- caps |= MLX5_ACCEL_TLS_LRO;
- if (MLX5_GET(tls_extended_cap, buf, ipv6))
- caps |= MLX5_ACCEL_TLS_IPV6;
-
- if (MLX5_GET(tls_extended_cap, buf, aes_gcm_128))
- caps |= MLX5_ACCEL_TLS_AES_GCM128;
- if (MLX5_GET(tls_extended_cap, buf, aes_gcm_256))
- caps |= MLX5_ACCEL_TLS_AES_GCM256;
-
- *p_caps = caps;
- err = 0;
-out:
- kfree(buf);
- return err;
-}
-
-int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev)
-{
- struct mlx5_fpga_device *fdev = mdev->fpga;
- struct mlx5_fpga_conn_attr init_attr = {0};
- struct mlx5_fpga_conn *conn;
- struct mlx5_fpga_tls *tls;
- int err = 0;
-
- if (!mlx5_fpga_is_tls_device(mdev) || !fdev)
- return 0;
-
- tls = kzalloc(sizeof(*tls), GFP_KERNEL);
- if (!tls)
- return -ENOMEM;
-
- err = mlx5_fpga_tls_get_caps(fdev, &tls->caps);
- if (err)
- goto error;
-
- if (!(tls->caps & (MLX5_ACCEL_TLS_V12 | MLX5_ACCEL_TLS_AES_GCM128))) {
- err = -ENOTSUPP;
- goto error;
- }
-
- init_attr.rx_size = SBU_QP_QUEUE_SIZE;
- init_attr.tx_size = SBU_QP_QUEUE_SIZE;
- init_attr.recv_cb = mlx5_fpga_tls_hw_qp_recv_cb;
- init_attr.cb_arg = fdev;
- conn = mlx5_fpga_sbu_conn_create(fdev, &init_attr);
- if (IS_ERR(conn)) {
- err = PTR_ERR(conn);
- mlx5_fpga_err(fdev, "Error creating TLS command connection %d\n",
- err);
- goto error;
- }
-
- tls->conn = conn;
- spin_lock_init(&tls->pending_cmds_lock);
- INIT_LIST_HEAD(&tls->pending_cmds);
-
- idr_init(&tls->tx_idr);
- idr_init(&tls->rx_idr);
- spin_lock_init(&tls->tx_idr_spinlock);
- spin_lock_init(&tls->rx_idr_spinlock);
- fdev->tls = tls;
- return 0;
-
-error:
- kfree(tls);
- return err;
-}
-
-void mlx5_fpga_tls_cleanup(struct mlx5_core_dev *mdev)
-{
- struct mlx5_fpga_device *fdev = mdev->fpga;
-
- if (!fdev || !fdev->tls)
- return;
-
- mlx5_fpga_sbu_conn_destroy(fdev->tls->conn);
- kfree(fdev->tls);
- fdev->tls = NULL;
-}
-
-static void mlx5_fpga_tls_set_aes_gcm128_ctx(void *cmd,
- struct tls_crypto_info *info,
- __be64 *rcd_sn)
-{
- struct tls12_crypto_info_aes_gcm_128 *crypto_info =
- (struct tls12_crypto_info_aes_gcm_128 *)info;
-
- memcpy(MLX5_ADDR_OF(tls_cmd, cmd, tls_rcd_sn), crypto_info->rec_seq,
- TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE);
-
- memcpy(MLX5_ADDR_OF(tls_cmd, cmd, tls_implicit_iv),
- crypto_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
- memcpy(MLX5_ADDR_OF(tls_cmd, cmd, encryption_key),
- crypto_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
-
- /* in AES-GCM 128 we need to write the key twice */
- memcpy(MLX5_ADDR_OF(tls_cmd, cmd, encryption_key) +
- TLS_CIPHER_AES_GCM_128_KEY_SIZE,
- crypto_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
-
- MLX5_SET(tls_cmd, cmd, alg, MLX5_TLS_ALG_AES_GCM_128);
-}
-
-static int mlx5_fpga_tls_set_key_material(void *cmd, u32 caps,
- struct tls_crypto_info *crypto_info)
-{
- __be64 rcd_sn;
-
- switch (crypto_info->cipher_type) {
- case TLS_CIPHER_AES_GCM_128:
- if (!(caps & MLX5_ACCEL_TLS_AES_GCM128))
- return -EINVAL;
- mlx5_fpga_tls_set_aes_gcm128_ctx(cmd, crypto_info, &rcd_sn);
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int _mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
- struct tls_crypto_info *crypto_info,
- u32 swid, u32 tcp_sn)
-{
- u32 caps = mlx5_fpga_tls_device_caps(mdev);
- struct mlx5_setup_stream_context *ctx;
- int ret = -ENOMEM;
- size_t cmd_size;
- void *cmd;
-
- cmd_size = MLX5_TLS_COMMAND_SIZE + sizeof(*ctx);
- ctx = kzalloc(cmd_size, GFP_KERNEL);
- if (!ctx)
- goto out;
-
- cmd = ctx + 1;
- ret = mlx5_fpga_tls_set_key_material(cmd, caps, crypto_info);
- if (ret)
- goto free_ctx;
-
- mlx5_fpga_tls_flow_to_cmd(flow, cmd);
-
- MLX5_SET(tls_cmd, cmd, swid, swid);
- MLX5_SET(tls_cmd, cmd, tcp_sn, tcp_sn);
-
- return mlx5_fpga_tls_setup_stream_cmd(mdev, ctx);
-
-free_ctx:
- kfree(ctx);
-out:
- return ret;
-}
-
-int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn, u32 *p_swid,
- bool direction_sx)
-{
- struct mlx5_fpga_tls *tls = mdev->fpga->tls;
- int ret = -ENOMEM;
- u32 swid;
-
- if (direction_sx)
- ret = mlx5_fpga_tls_alloc_swid(&tls->tx_idr,
- &tls->tx_idr_spinlock, flow);
- else
- ret = mlx5_fpga_tls_alloc_swid(&tls->rx_idr,
- &tls->rx_idr_spinlock, flow);
-
- if (ret < 0)
- return ret;
-
- swid = ret;
- MLX5_SET(tls_flow, flow, direction_sx, direction_sx ? 1 : 0);
-
- ret = _mlx5_fpga_tls_add_flow(mdev, flow, crypto_info, swid,
- start_offload_tcp_sn);
- if (ret && ret != -EINTR)
- goto free_swid;
-
- *p_swid = swid;
- return 0;
-free_swid:
- if (direction_sx)
- mlx5_fpga_tls_release_swid(&tls->tx_idr,
- &tls->tx_idr_spinlock, swid);
- else
- mlx5_fpga_tls_release_swid(&tls->rx_idr,
- &tls->rx_idr_spinlock, swid);
-
- return ret;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h
deleted file mode 100644
index 5714cf391d1b..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef __MLX5_FPGA_TLS_H__
-#define __MLX5_FPGA_TLS_H__
-
-#include <linux/mlx5/driver.h>
-
-#include <net/tls.h>
-#include "fpga/core.h"
-
-struct mlx5_fpga_tls {
- struct list_head pending_cmds;
- spinlock_t pending_cmds_lock; /* Protects pending_cmds */
- u32 caps;
- struct mlx5_fpga_conn *conn;
-
- struct idr tx_idr;
- struct idr rx_idr;
- spinlock_t tx_idr_spinlock; /* protects the IDR */
- spinlock_t rx_idr_spinlock; /* protects the IDR */
-};
-
-int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn, u32 *p_swid,
- bool direction_sx);
-
-void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
- gfp_t flags, bool direction_sx);
-
-bool mlx5_fpga_is_tls_device(struct mlx5_core_dev *mdev);
-int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev);
-void mlx5_fpga_tls_cleanup(struct mlx5_core_dev *mdev);
-
-static inline u32 mlx5_fpga_tls_device_caps(struct mlx5_core_dev *mdev)
-{
- return mdev->fpga->tls->caps;
-}
-
-int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle,
- u32 seq, __be64 rcd_sn);
-
-#endif /* __MLX5_FPGA_TLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index a0ac17c3f12f..33e9f86cf7d4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -878,9 +878,7 @@ static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
table_type = FS_FT_NIC_RX;
break;
case MLX5_FLOW_NAMESPACE_EGRESS:
-#ifdef CONFIG_MLX5_IPSEC
case MLX5_FLOW_NAMESPACE_EGRESS_KERNEL:
-#endif
max_actions = MLX5_CAP_FLOWTABLE_NIC_TX(dev, max_modify_header_actions);
table_type = FS_FT_NIC_TX;
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 816d991f7621..297e6a468a3e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -40,8 +40,6 @@
#include "fs_cmd.h"
#include "fs_ft_pool.h"
#include "diag/fs_tracepoint.h"
-#include "accel/ipsec.h"
-#include "fpga/ipsec.h"
#define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
sizeof(struct init_tree_node))
@@ -188,24 +186,18 @@ static struct init_tree_node {
static struct init_tree_node egress_root_fs = {
.type = FS_TYPE_NAMESPACE,
-#ifdef CONFIG_MLX5_IPSEC
.ar_size = 2,
-#else
- .ar_size = 1,
-#endif
.children = (struct init_tree_node[]) {
ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
FS_CHAINING_CAPS_EGRESS,
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
BY_PASS_PRIO_NUM_LEVELS))),
-#ifdef CONFIG_MLX5_IPSEC
ADD_PRIO(0, KERNEL_TX_MIN_LEVEL, 0,
FS_CHAINING_CAPS_EGRESS,
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
ADD_MULTIPLE_PRIO(KERNEL_TX_IPSEC_NUM_PRIOS,
KERNEL_TX_IPSEC_NUM_LEVELS))),
-#endif
}
};
@@ -2519,10 +2511,6 @@ static struct mlx5_flow_root_namespace
struct mlx5_flow_root_namespace *root_ns;
struct mlx5_flow_namespace *ns;
- if (mlx5_fpga_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE &&
- (table_type == FS_FT_NIC_RX || table_type == FS_FT_NIC_TX))
- cmds = mlx5_fs_cmd_get_default_ipsec_fpga_cmds(table_type);
-
/* Create the root namespace */
root_ns = kzalloc(sizeof(*root_ns), GFP_KERNEL);
if (!root_ns)
@@ -3172,8 +3160,7 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
goto err;
}
- if (mlx5_fpga_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE ||
- MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
+ if (MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
err = init_egress_root_ns(steering);
if (err)
goto err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 614687e0e3d9..cfb8bedba512 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -35,7 +35,6 @@
#include "mlx5_core.h"
#include "../../mlxfw/mlxfw.h"
#include "lib/tout.h"
-#include "accel/tls.h"
enum {
MCQS_IDENTIFIER_BOOT_IMG = 0x1,
@@ -249,7 +248,7 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return err;
}
- if (mlx5_accel_is_ktls_tx(dev) || mlx5_accel_is_ktls_rx(dev)) {
+ if (MLX5_CAP_GEN(dev, tls_tx) || MLX5_CAP_GEN(dev, tls_rx)) {
err = mlx5_core_get_caps(dev, MLX5_CAP_TLS);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 2589e39eb9c7..d504c8cb8f96 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -62,9 +62,7 @@
#include "lib/mlx5.h"
#include "lib/tout.h"
#include "fpga/core.h"
-#include "fpga/ipsec.h"
-#include "accel/ipsec.h"
-#include "accel/tls.h"
+#include "en_accel/ipsec_offload.h"
#include "lib/clock.h"
#include "lib/vxlan.h"
#include "lib/geneve.h"
@@ -1183,14 +1181,6 @@ static int mlx5_load(struct mlx5_core_dev *dev)
goto err_fpga_start;
}
- mlx5_accel_ipsec_init(dev);
-
- err = mlx5_accel_tls_init(dev);
- if (err) {
- mlx5_core_err(dev, "TLS device start failed %d\n", err);
- goto err_tls_start;
- }
-
err = mlx5_init_fs(dev);
if (err) {
mlx5_core_err(dev, "Failed to init flow steering\n");
@@ -1238,9 +1228,6 @@ err_vhca:
err_set_hca:
mlx5_cleanup_fs(dev);
err_fs:
- mlx5_accel_tls_cleanup(dev);
-err_tls_start:
- mlx5_accel_ipsec_cleanup(dev);
mlx5_fpga_device_stop(dev);
err_fpga_start:
mlx5_rsc_dump_cleanup(dev);
@@ -1266,8 +1253,6 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
mlx5_sf_hw_table_destroy(dev);
mlx5_vhca_event_stop(dev);
mlx5_cleanup_fs(dev);
- mlx5_accel_ipsec_cleanup(dev);
- mlx5_accel_tls_cleanup(dev);
mlx5_fpga_device_stop(dev);
mlx5_rsc_dump_cleanup(dev);
mlx5_hv_vhca_cleanup(dev->hv_vhca);
@@ -1947,7 +1932,6 @@ static int __init init(void)
get_random_bytes(&sw_owner_id, sizeof(sw_owner_id));
mlx5_core_verify_params();
- mlx5_fpga_ipsec_build_fs_cmds();
mlx5_register_debugfs();
err = pci_register_driver(&mlx5_core_driver);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index 196adeb33495..1a465fd5d8b3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -1,7 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_MLXSW_CORE) += mlxsw_core.o
mlxsw_core-objs := core.o core_acl_flex_keys.o \
- core_acl_flex_actions.o core_env.o
+ core_acl_flex_actions.o core_env.o \
+ core_linecards.o
mlxsw_core-$(CONFIG_MLXSW_CORE_HWMON) += core_hwmon.o
mlxsw_core-$(CONFIG_MLXSW_CORE_THERMAL) += core_thermal.o
obj-$(CONFIG_MLXSW_PCI) += mlxsw_pci.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index b13e0f8d232a..fc52832241b3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -48,6 +48,7 @@ struct mlxsw_core_port {
struct devlink_port devlink_port;
void *port_driver_priv;
u16 local_port;
+ struct mlxsw_linecard *linecard;
};
void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port)
@@ -82,6 +83,7 @@ struct mlxsw_core {
struct mlxsw_res res;
struct mlxsw_hwmon *hwmon;
struct mlxsw_thermal *thermal;
+ struct mlxsw_linecards *linecards;
struct mlxsw_core_port *ports;
unsigned int max_ports;
atomic_t active_ports_count;
@@ -94,6 +96,17 @@ struct mlxsw_core {
/* driver_priv has to be always the last item */
};
+struct mlxsw_linecards *mlxsw_core_linecards(struct mlxsw_core *mlxsw_core)
+{
+ return mlxsw_core->linecards;
+}
+
+void mlxsw_core_linecards_set(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards *linecards)
+{
+ mlxsw_core->linecards = linecards;
+}
+
#define MLXSW_PORT_MAX_PORTS_DEFAULT 0x40
static u64 mlxsw_ports_occ_get(void *priv)
@@ -2145,6 +2158,10 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (err)
goto err_fw_rev_validate;
+ err = mlxsw_linecards_init(mlxsw_core, mlxsw_bus_info);
+ if (err)
+ goto err_linecards_init;
+
err = mlxsw_core_health_init(mlxsw_core);
if (err)
goto err_health_init;
@@ -2158,7 +2175,7 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (err)
goto err_thermal_init;
- err = mlxsw_env_init(mlxsw_core, &mlxsw_core->env);
+ err = mlxsw_env_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->env);
if (err)
goto err_env_init;
@@ -2183,6 +2200,8 @@ err_thermal_init:
err_hwmon_init:
mlxsw_core_health_fini(mlxsw_core);
err_health_init:
+ mlxsw_linecards_fini(mlxsw_core);
+err_linecards_init:
err_fw_rev_validate:
if (!reload)
mlxsw_core_params_unregister(mlxsw_core);
@@ -2255,6 +2274,7 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
mlxsw_thermal_fini(mlxsw_core->thermal);
mlxsw_hwmon_fini(mlxsw_core->hwmon);
mlxsw_core_health_fini(mlxsw_core);
+ mlxsw_linecards_fini(mlxsw_core);
if (!reload)
mlxsw_core_params_unregister(mlxsw_core);
mlxsw_emad_fini(mlxsw_core);
@@ -2956,7 +2976,7 @@ EXPORT_SYMBOL(mlxsw_core_res_get);
static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port,
enum devlink_port_flavour flavour,
- u32 port_number, bool split,
+ u8 slot_index, u32 port_number, bool split,
u32 split_port_subnumber,
bool splittable, u32 lanes,
const unsigned char *switch_id,
@@ -2979,6 +2999,15 @@ static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port,
attrs.switch_id.id_len = switch_id_len;
mlxsw_core_port->local_port = local_port;
devlink_port_attrs_set(devlink_port, &attrs);
+ if (slot_index) {
+ struct mlxsw_linecard *linecard;
+
+ linecard = mlxsw_linecard_get(mlxsw_core->linecards,
+ slot_index);
+ mlxsw_core_port->linecard = linecard;
+ devlink_port_linecard_set(devlink_port,
+ linecard->devlink_linecard);
+ }
err = devl_port_register(devlink, devlink_port, local_port);
if (err)
memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
@@ -2996,7 +3025,7 @@ static void __mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u16 local_port
}
int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port,
- u32 port_number, bool split,
+ u8 slot_index, u32 port_number, bool split,
u32 split_port_subnumber,
bool splittable, u32 lanes,
const unsigned char *switch_id,
@@ -3005,7 +3034,7 @@ int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port,
int err;
err = __mlxsw_core_port_init(mlxsw_core, local_port,
- DEVLINK_PORT_FLAVOUR_PHYSICAL,
+ DEVLINK_PORT_FLAVOUR_PHYSICAL, slot_index,
port_number, split, split_port_subnumber,
splittable, lanes,
switch_id, switch_id_len);
@@ -3036,7 +3065,7 @@ int mlxsw_core_cpu_port_init(struct mlxsw_core *mlxsw_core,
err = __mlxsw_core_port_init(mlxsw_core, MLXSW_PORT_CPU_PORT,
DEVLINK_PORT_FLAVOUR_CPU,
- 0, false, 0, false, 0,
+ 0, 0, false, 0, false, 0,
switch_id, switch_id_len);
if (err)
return err;
@@ -3112,6 +3141,16 @@ mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_core_port_devlink_port_get);
+struct mlxsw_linecard *
+mlxsw_core_port_linecard_get(struct mlxsw_core *mlxsw_core,
+ u16 local_port)
+{
+ struct mlxsw_core_port *mlxsw_core_port =
+ &mlxsw_core->ports[local_port];
+
+ return mlxsw_core_port->linecard;
+}
+
bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u16 local_port)
{
const struct mlxsw_bus_info *bus_info = mlxsw_core->bus_info;
@@ -3124,6 +3163,15 @@ bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u16 local_port)
}
EXPORT_SYMBOL(mlxsw_core_port_is_xm);
+void mlxsw_core_ports_remove_selected(struct mlxsw_core *mlxsw_core,
+ bool (*selector)(void *priv, u16 local_port),
+ void *priv)
+{
+ if (WARN_ON_ONCE(!mlxsw_core->driver->ports_remove_selected))
+ return;
+ mlxsw_core->driver->ports_remove_selected(mlxsw_core, selector, priv);
+}
+
struct mlxsw_env *mlxsw_core_env(const struct mlxsw_core *mlxsw_core)
{
return mlxsw_core->env;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 16ee5e90973d..d008282d7f2e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -35,6 +35,11 @@ unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core);
void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core);
+struct mlxsw_linecards *mlxsw_core_linecards(struct mlxsw_core *mlxsw_core);
+
+void mlxsw_core_linecards_set(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards *linecard);
+
bool
mlxsw_core_fw_rev_minor_subminor_validate(const struct mlxsw_fw_rev *rev,
const struct mlxsw_fw_rev *req_rev);
@@ -231,7 +236,8 @@ void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port);
int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port,
- u32 port_number, bool split, u32 split_port_subnumber,
+ u8 slot_index, u32 port_number, bool split,
+ u32 split_port_subnumber,
bool splittable, u32 lanes,
const unsigned char *switch_id,
unsigned char switch_id_len);
@@ -252,7 +258,14 @@ enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core,
struct devlink_port *
mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
u16 local_port);
+struct mlxsw_linecard *
+mlxsw_core_port_linecard_get(struct mlxsw_core *mlxsw_core,
+ u16 local_port);
bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u16 local_port);
+void mlxsw_core_ports_remove_selected(struct mlxsw_core *mlxsw_core,
+ bool (*selector)(void *priv,
+ u16 local_port),
+ void *priv);
struct mlxsw_env *mlxsw_core_env(const struct mlxsw_core *mlxsw_core);
int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay);
@@ -326,6 +339,10 @@ struct mlxsw_driver {
unsigned int count, struct netlink_ext_ack *extack);
int (*port_unsplit)(struct mlxsw_core *mlxsw_core, u16 local_port,
struct netlink_ext_ack *extack);
+ void (*ports_remove_selected)(struct mlxsw_core *mlxsw_core,
+ bool (*selector)(void *priv,
+ u16 local_port),
+ void *priv);
int (*sb_pool_get)(struct mlxsw_core *mlxsw_core,
unsigned int sb_index, u16 pool_index,
struct devlink_sb_pool_info *pool_info);
@@ -543,4 +560,65 @@ static inline struct mlxsw_skb_cb *mlxsw_skb_cb(struct sk_buff *skb)
return (struct mlxsw_skb_cb *) skb->cb;
}
+struct mlxsw_linecards;
+
+enum mlxsw_linecard_status_event_type {
+ MLXSW_LINECARD_STATUS_EVENT_TYPE_PROVISION,
+ MLXSW_LINECARD_STATUS_EVENT_TYPE_UNPROVISION,
+};
+
+struct mlxsw_linecard {
+ u8 slot_index;
+ struct mlxsw_linecards *linecards;
+ struct devlink_linecard *devlink_linecard;
+ struct mutex lock; /* Locks accesses to the linecard structure */
+ char name[MLXSW_REG_MDDQ_SLOT_ASCII_NAME_LEN];
+ char mbct_pl[MLXSW_REG_MBCT_LEN]; /* Too big for stack */
+ enum mlxsw_linecard_status_event_type status_event_type_to;
+ struct delayed_work status_event_to_dw;
+ u8 provisioned:1,
+ ready:1,
+ active:1;
+ u16 hw_revision;
+ u16 ini_version;
+ struct list_head device_list;
+};
+
+struct mlxsw_linecard_types_info;
+
+struct mlxsw_linecards {
+ struct mlxsw_core *mlxsw_core;
+ const struct mlxsw_bus_info *bus_info;
+ u8 count;
+ struct mlxsw_linecard_types_info *types_info;
+ struct list_head event_ops_list;
+ struct mutex event_ops_list_lock; /* Locks accesses to event ops list */
+ struct mlxsw_linecard linecards[];
+};
+
+static inline struct mlxsw_linecard *
+mlxsw_linecard_get(struct mlxsw_linecards *linecards, u8 slot_index)
+{
+ return &linecards->linecards[slot_index - 1];
+}
+
+int mlxsw_linecards_init(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *bus_info);
+void mlxsw_linecards_fini(struct mlxsw_core *mlxsw_core);
+
+typedef void mlxsw_linecards_event_op_t(struct mlxsw_core *mlxsw_core,
+ u8 slot_index, void *priv);
+
+struct mlxsw_linecards_event_ops {
+ mlxsw_linecards_event_op_t *got_active;
+ mlxsw_linecards_event_op_t *got_inactive;
+};
+
+int mlxsw_linecards_event_ops_register(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards_event_ops *ops,
+ void *priv);
+void mlxsw_linecards_event_ops_unregister(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards_event_ops *ops,
+ void *priv);
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
index 29a74b8bd5b5..34bec9cd572c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
@@ -21,19 +21,60 @@ struct mlxsw_env_module_info {
enum mlxsw_reg_pmtm_module_type type;
};
-struct mlxsw_env {
- struct mlxsw_core *core;
+struct mlxsw_env_line_card {
u8 module_count;
- struct mutex module_info_lock; /* Protects 'module_info'. */
+ bool active;
struct mlxsw_env_module_info module_info[];
};
-static int __mlxsw_env_validate_module_type(struct mlxsw_core *core, u8 module)
+struct mlxsw_env {
+ struct mlxsw_core *core;
+ const struct mlxsw_bus_info *bus_info;
+ u8 max_module_count; /* Maximum number of modules per-slot. */
+ u8 num_of_slots; /* Including the main board. */
+ struct mutex line_cards_lock; /* Protects line cards. */
+ struct mlxsw_env_line_card *line_cards[];
+};
+
+static bool __mlxsw_env_linecard_is_active(struct mlxsw_env *mlxsw_env,
+ u8 slot_index)
+{
+ return mlxsw_env->line_cards[slot_index]->active;
+}
+
+static bool mlxsw_env_linecard_is_active(struct mlxsw_env *mlxsw_env,
+ u8 slot_index)
+{
+ bool active;
+
+ mutex_lock(&mlxsw_env->line_cards_lock);
+ active = __mlxsw_env_linecard_is_active(mlxsw_env, slot_index);
+ mutex_unlock(&mlxsw_env->line_cards_lock);
+
+ return active;
+}
+
+static struct
+mlxsw_env_module_info *mlxsw_env_module_info_get(struct mlxsw_core *mlxsw_core,
+ u8 slot_index, u8 module)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+
+ return &mlxsw_env->line_cards[slot_index]->module_info[module];
+}
+
+static int __mlxsw_env_validate_module_type(struct mlxsw_core *core,
+ u8 slot_index, u8 module)
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(core);
+ struct mlxsw_env_module_info *module_info;
int err;
- switch (mlxsw_env->module_info[module].type) {
+ if (!__mlxsw_env_linecard_is_active(mlxsw_env, slot_index))
+ return 0;
+
+ module_info = mlxsw_env_module_info_get(core, slot_index, module);
+ switch (module_info->type) {
case MLXSW_REG_PMTM_MODULE_TYPE_TWISTED_PAIR:
err = -EINVAL;
break;
@@ -44,32 +85,34 @@ static int __mlxsw_env_validate_module_type(struct mlxsw_core *core, u8 module)
return err;
}
-static int mlxsw_env_validate_module_type(struct mlxsw_core *core, u8 module)
+static int mlxsw_env_validate_module_type(struct mlxsw_core *core,
+ u8 slot_index, u8 module)
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(core);
int err;
- mutex_lock(&mlxsw_env->module_info_lock);
- err = __mlxsw_env_validate_module_type(core, module);
- mutex_unlock(&mlxsw_env->module_info_lock);
+ mutex_lock(&mlxsw_env->line_cards_lock);
+ err = __mlxsw_env_validate_module_type(core, slot_index, module);
+ mutex_unlock(&mlxsw_env->line_cards_lock);
return err;
}
static int
-mlxsw_env_validate_cable_ident(struct mlxsw_core *core, int id, bool *qsfp,
- bool *cmis)
+mlxsw_env_validate_cable_ident(struct mlxsw_core *core, u8 slot_index, int id,
+ bool *qsfp, bool *cmis)
{
char mcia_pl[MLXSW_REG_MCIA_LEN];
char *eeprom_tmp;
u8 ident;
int err;
- err = mlxsw_env_validate_module_type(core, id);
+ err = mlxsw_env_validate_module_type(core, slot_index, id);
if (err)
return err;
- mlxsw_reg_mcia_pack(mcia_pl, id, 0, MLXSW_REG_MCIA_PAGE0_LO_OFF, 0, 1,
+ mlxsw_reg_mcia_pack(mcia_pl, slot_index, id, 0,
+ MLXSW_REG_MCIA_PAGE0_LO_OFF, 0, 1,
MLXSW_REG_MCIA_I2C_ADDR_LOW);
err = mlxsw_reg_query(core, MLXSW_REG(mcia), mcia_pl);
if (err)
@@ -99,8 +142,8 @@ mlxsw_env_validate_cable_ident(struct mlxsw_core *core, int id, bool *qsfp,
}
static int
-mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module,
- u16 offset, u16 size, void *data,
+mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ int module, u16 offset, u16 size, void *data,
bool qsfp, unsigned int *p_read_size)
{
char mcia_pl[MLXSW_REG_MCIA_LEN];
@@ -145,7 +188,8 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module,
}
}
- mlxsw_reg_mcia_pack(mcia_pl, module, 0, page, offset, size, i2c_addr);
+ mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, 0, page, offset, size,
+ i2c_addr);
err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcia), mcia_pl);
if (err)
@@ -162,8 +206,9 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module,
return 0;
}
-int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
- int off, int *temp)
+int
+mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, u8 slot_index,
+ int module, int off, int *temp)
{
unsigned int module_temp, module_crit, module_emerg;
union {
@@ -177,8 +222,9 @@ int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
int page;
int err;
- mlxsw_reg_mtmp_pack(mtmp_pl, MLXSW_REG_MTMP_MODULE_INDEX_MIN + module,
- false, false);
+ mlxsw_reg_mtmp_pack(mtmp_pl, slot_index,
+ MLXSW_REG_MTMP_MODULE_INDEX_MIN + module, false,
+ false);
err = mlxsw_reg_query(core, MLXSW_REG(mtmp), mtmp_pl);
if (err)
return err;
@@ -207,7 +253,8 @@ int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
*/
/* Validate module identifier value. */
- err = mlxsw_env_validate_cable_ident(core, module, &qsfp, &cmis);
+ err = mlxsw_env_validate_cable_ident(core, slot_index, module, &qsfp,
+ &cmis);
if (err)
return err;
@@ -219,12 +266,12 @@ int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
page = MLXSW_REG_MCIA_TH_PAGE_CMIS_NUM;
else
page = MLXSW_REG_MCIA_TH_PAGE_NUM;
- mlxsw_reg_mcia_pack(mcia_pl, module, 0, page,
+ mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, 0, page,
MLXSW_REG_MCIA_TH_PAGE_OFF + off,
MLXSW_REG_MCIA_TH_ITEM_SIZE,
MLXSW_REG_MCIA_I2C_ADDR_LOW);
} else {
- mlxsw_reg_mcia_pack(mcia_pl, module, 0,
+ mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, 0,
MLXSW_REG_MCIA_PAGE0_LO,
off, MLXSW_REG_MCIA_TH_ITEM_SIZE,
MLXSW_REG_MCIA_I2C_ADDR_HIGH);
@@ -242,24 +289,31 @@ int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
}
int mlxsw_env_get_module_info(struct net_device *netdev,
- struct mlxsw_core *mlxsw_core, int module,
- struct ethtool_modinfo *modinfo)
+ struct mlxsw_core *mlxsw_core, u8 slot_index,
+ int module, struct ethtool_modinfo *modinfo)
{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
u8 module_info[MLXSW_REG_MCIA_EEPROM_MODULE_INFO_SIZE];
u16 offset = MLXSW_REG_MCIA_EEPROM_MODULE_INFO_SIZE;
u8 module_rev_id, module_id, diag_mon;
unsigned int read_size;
int err;
- err = mlxsw_env_validate_module_type(mlxsw_core, module);
+ if (!mlxsw_env_linecard_is_active(mlxsw_env, slot_index)) {
+ netdev_err(netdev, "Cannot read EEPROM of module on an inactive line card\n");
+ return -EIO;
+ }
+
+ err = mlxsw_env_validate_module_type(mlxsw_core, slot_index, module);
if (err) {
netdev_err(netdev,
"EEPROM is not equipped on port module type");
return err;
}
- err = mlxsw_env_query_module_eeprom(mlxsw_core, module, 0, offset,
- module_info, false, &read_size);
+ err = mlxsw_env_query_module_eeprom(mlxsw_core, slot_index, module, 0,
+ offset, module_info, false,
+ &read_size);
if (err)
return err;
@@ -288,9 +342,10 @@ int mlxsw_env_get_module_info(struct net_device *netdev,
break;
case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_SFP:
/* Verify if transceiver provides diagnostic monitoring page */
- err = mlxsw_env_query_module_eeprom(mlxsw_core, module,
- SFP_DIAGMON, 1, &diag_mon,
- false, &read_size);
+ err = mlxsw_env_query_module_eeprom(mlxsw_core, slot_index,
+ module, SFP_DIAGMON, 1,
+ &diag_mon, false,
+ &read_size);
if (err)
return err;
@@ -329,9 +384,11 @@ int mlxsw_env_get_module_info(struct net_device *netdev,
EXPORT_SYMBOL(mlxsw_env_get_module_info);
int mlxsw_env_get_module_eeprom(struct net_device *netdev,
- struct mlxsw_core *mlxsw_core, int module,
- struct ethtool_eeprom *ee, u8 *data)
+ struct mlxsw_core *mlxsw_core, u8 slot_index,
+ int module, struct ethtool_eeprom *ee,
+ u8 *data)
{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
int offset = ee->offset;
unsigned int read_size;
bool qsfp, cmis;
@@ -341,14 +398,21 @@ int mlxsw_env_get_module_eeprom(struct net_device *netdev,
if (!ee->len)
return -EINVAL;
+ if (!mlxsw_env_linecard_is_active(mlxsw_env, slot_index)) {
+ netdev_err(netdev, "Cannot read EEPROM of module on an inactive line card\n");
+ return -EIO;
+ }
+
memset(data, 0, ee->len);
/* Validate module identifier value. */
- err = mlxsw_env_validate_cable_ident(mlxsw_core, module, &qsfp, &cmis);
+ err = mlxsw_env_validate_cable_ident(mlxsw_core, slot_index, module,
+ &qsfp, &cmis);
if (err)
return err;
while (i < ee->len) {
- err = mlxsw_env_query_module_eeprom(mlxsw_core, module, offset,
+ err = mlxsw_env_query_module_eeprom(mlxsw_core, slot_index,
+ module, offset,
ee->len - i, data + i,
qsfp, &read_size);
if (err) {
@@ -394,15 +458,23 @@ static int mlxsw_env_mcia_status_process(const char *mcia_pl,
}
int
-mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, u8 module,
+mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core,
+ u8 slot_index, u8 module,
const struct ethtool_module_eeprom *page,
struct netlink_ext_ack *extack)
{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
u32 bytes_read = 0;
u16 device_addr;
int err;
- err = mlxsw_env_validate_module_type(mlxsw_core, module);
+ if (!mlxsw_env_linecard_is_active(mlxsw_env, slot_index)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot read EEPROM of module on an inactive line card");
+ return -EIO;
+ }
+
+ err = mlxsw_env_validate_module_type(mlxsw_core, slot_index, module);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "EEPROM is not equipped on port module type");
return err;
@@ -419,7 +491,7 @@ mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, u8 module,
size = min_t(u8, page->length - bytes_read,
MLXSW_REG_MCIA_EEPROM_SIZE);
- mlxsw_reg_mcia_pack(mcia_pl, module, 0, page->page,
+ mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, 0, page->page,
device_addr + bytes_read, size,
page->i2c_address);
mlxsw_reg_mcia_bank_number_set(mcia_pl, page->bank);
@@ -443,20 +515,23 @@ mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, u8 module,
}
EXPORT_SYMBOL(mlxsw_env_get_module_eeprom_by_page);
-static int mlxsw_env_module_reset(struct mlxsw_core *mlxsw_core, u8 module)
+static int mlxsw_env_module_reset(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module)
{
char pmaos_pl[MLXSW_REG_PMAOS_LEN];
- mlxsw_reg_pmaos_pack(pmaos_pl, module);
+ mlxsw_reg_pmaos_pack(pmaos_pl, slot_index, module);
mlxsw_reg_pmaos_rst_set(pmaos_pl, true);
return mlxsw_reg_write(mlxsw_core, MLXSW_REG(pmaos), pmaos_pl);
}
int mlxsw_env_reset_module(struct net_device *netdev,
- struct mlxsw_core *mlxsw_core, u8 module, u32 *flags)
+ struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module, u32 *flags)
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ struct mlxsw_env_module_info *module_info;
u32 req = *flags;
int err;
@@ -464,28 +539,34 @@ int mlxsw_env_reset_module(struct net_device *netdev,
!(req & (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT)))
return 0;
- mutex_lock(&mlxsw_env->module_info_lock);
+ if (!mlxsw_env_linecard_is_active(mlxsw_env, slot_index)) {
+ netdev_err(netdev, "Cannot reset module on an inactive line card\n");
+ return -EIO;
+ }
+
+ mutex_lock(&mlxsw_env->line_cards_lock);
- err = __mlxsw_env_validate_module_type(mlxsw_core, module);
+ err = __mlxsw_env_validate_module_type(mlxsw_core, slot_index, module);
if (err) {
netdev_err(netdev, "Reset module is not supported on port module type\n");
goto out;
}
- if (mlxsw_env->module_info[module].num_ports_up) {
+ module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module);
+ if (module_info->num_ports_up) {
netdev_err(netdev, "Cannot reset module when ports using it are administratively up\n");
err = -EINVAL;
goto out;
}
- if (mlxsw_env->module_info[module].num_ports_mapped > 1 &&
+ if (module_info->num_ports_mapped > 1 &&
!(req & (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT))) {
netdev_err(netdev, "Cannot reset module without \"phy-shared\" flag when shared by multiple ports\n");
err = -EINVAL;
goto out;
}
- err = mlxsw_env_module_reset(mlxsw_core, module);
+ err = mlxsw_env_module_reset(mlxsw_core, slot_index, module);
if (err) {
netdev_err(netdev, "Failed to reset module\n");
goto out;
@@ -494,32 +575,39 @@ int mlxsw_env_reset_module(struct net_device *netdev,
*flags &= ~(ETH_RESET_PHY | (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT));
out:
- mutex_unlock(&mlxsw_env->module_info_lock);
+ mutex_unlock(&mlxsw_env->line_cards_lock);
return err;
}
EXPORT_SYMBOL(mlxsw_env_reset_module);
int
-mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module,
+mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module,
struct ethtool_module_power_mode_params *params,
struct netlink_ext_ack *extack)
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ struct mlxsw_env_module_info *module_info;
char mcion_pl[MLXSW_REG_MCION_LEN];
u32 status_bits;
- int err;
+ int err = 0;
- mutex_lock(&mlxsw_env->module_info_lock);
+ mutex_lock(&mlxsw_env->line_cards_lock);
- err = __mlxsw_env_validate_module_type(mlxsw_core, module);
+ err = __mlxsw_env_validate_module_type(mlxsw_core, slot_index, module);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Power mode is not supported on port module type");
goto out;
}
- params->policy = mlxsw_env->module_info[module].power_mode_policy;
+ module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module);
+ params->policy = module_info->power_mode_policy;
+
+ /* Avoid accessing an inactive line card, as it will result in an error. */
+ if (!__mlxsw_env_linecard_is_active(mlxsw_env, slot_index))
+ goto out;
- mlxsw_reg_mcion_pack(mcion_pl, module);
+ mlxsw_reg_mcion_pack(mcion_pl, slot_index, module);
err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcion), mcion_pl);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed to retrieve module's power mode");
@@ -536,18 +624,18 @@ mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module,
params->mode = ETHTOOL_MODULE_POWER_MODE_HIGH;
out:
- mutex_unlock(&mlxsw_env->module_info_lock);
+ mutex_unlock(&mlxsw_env->line_cards_lock);
return err;
}
EXPORT_SYMBOL(mlxsw_env_get_module_power_mode);
static int mlxsw_env_module_enable_set(struct mlxsw_core *mlxsw_core,
- u8 module, bool enable)
+ u8 slot_index, u8 module, bool enable)
{
enum mlxsw_reg_pmaos_admin_status admin_status;
char pmaos_pl[MLXSW_REG_PMAOS_LEN];
- mlxsw_reg_pmaos_pack(pmaos_pl, module);
+ mlxsw_reg_pmaos_pack(pmaos_pl, slot_index, module);
admin_status = enable ? MLXSW_REG_PMAOS_ADMIN_STATUS_ENABLED :
MLXSW_REG_PMAOS_ADMIN_STATUS_DISABLED;
mlxsw_reg_pmaos_admin_status_set(pmaos_pl, admin_status);
@@ -557,12 +645,13 @@ static int mlxsw_env_module_enable_set(struct mlxsw_core *mlxsw_core,
}
static int mlxsw_env_module_low_power_set(struct mlxsw_core *mlxsw_core,
- u8 module, bool low_power)
+ u8 slot_index, u8 module,
+ bool low_power)
{
u16 eeprom_override_mask, eeprom_override;
char pmmp_pl[MLXSW_REG_PMMP_LEN];
- mlxsw_reg_pmmp_pack(pmmp_pl, module);
+ mlxsw_reg_pmmp_pack(pmmp_pl, slot_index, module);
mlxsw_reg_pmmp_sticky_set(pmmp_pl, true);
/* Mask all the bits except low power mode. */
eeprom_override_mask = ~MLXSW_REG_PMMP_EEPROM_OVERRIDE_LOW_POWER_MASK;
@@ -575,24 +664,34 @@ static int mlxsw_env_module_low_power_set(struct mlxsw_core *mlxsw_core,
}
static int __mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core,
- u8 module, bool low_power,
+ u8 slot_index, u8 module,
+ bool low_power,
struct netlink_ext_ack *extack)
{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
int err;
- err = mlxsw_env_module_enable_set(mlxsw_core, module, false);
+ /* Avoid accessing an inactive line card, as it will result in an error.
+ * Cached configuration will be applied by mlxsw_env_got_active() when
+ * line card becomes active.
+ */
+ if (!__mlxsw_env_linecard_is_active(mlxsw_env, slot_index))
+ return 0;
+
+ err = mlxsw_env_module_enable_set(mlxsw_core, slot_index, module, false);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed to disable module");
return err;
}
- err = mlxsw_env_module_low_power_set(mlxsw_core, module, low_power);
+ err = mlxsw_env_module_low_power_set(mlxsw_core, slot_index, module,
+ low_power);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed to set module's power mode");
goto err_module_low_power_set;
}
- err = mlxsw_env_module_enable_set(mlxsw_core, module, true);
+ err = mlxsw_env_module_enable_set(mlxsw_core, slot_index, module, true);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed to enable module");
goto err_module_enable_set;
@@ -601,67 +700,84 @@ static int __mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core,
return 0;
err_module_enable_set:
- mlxsw_env_module_low_power_set(mlxsw_core, module, !low_power);
+ mlxsw_env_module_low_power_set(mlxsw_core, slot_index, module,
+ !low_power);
err_module_low_power_set:
- mlxsw_env_module_enable_set(mlxsw_core, module, true);
+ mlxsw_env_module_enable_set(mlxsw_core, slot_index, module, true);
return err;
}
-int
-mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module,
- enum ethtool_module_power_mode_policy policy,
- struct netlink_ext_ack *extack)
+static int
+mlxsw_env_set_module_power_mode_apply(struct mlxsw_core *mlxsw_core,
+ u8 slot_index, u8 module,
+ enum ethtool_module_power_mode_policy policy,
+ struct netlink_ext_ack *extack)
{
- struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ struct mlxsw_env_module_info *module_info;
bool low_power;
int err = 0;
- if (policy != ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH &&
- policy != ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO) {
- NL_SET_ERR_MSG_MOD(extack, "Unsupported power mode policy");
- return -EOPNOTSUPP;
- }
-
- mutex_lock(&mlxsw_env->module_info_lock);
-
- err = __mlxsw_env_validate_module_type(mlxsw_core, module);
+ err = __mlxsw_env_validate_module_type(mlxsw_core, slot_index, module);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"Power mode set is not supported on port module type");
goto out;
}
- if (mlxsw_env->module_info[module].power_mode_policy == policy)
+ module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module);
+ if (module_info->power_mode_policy == policy)
goto out;
/* If any ports are up, we are already in high power mode. */
- if (mlxsw_env->module_info[module].num_ports_up)
+ if (module_info->num_ports_up)
goto out_set_policy;
low_power = policy == ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO;
- err = __mlxsw_env_set_module_power_mode(mlxsw_core, module, low_power,
- extack);
+ err = __mlxsw_env_set_module_power_mode(mlxsw_core, slot_index, module,
+ low_power, extack);
if (err)
goto out;
out_set_policy:
- mlxsw_env->module_info[module].power_mode_policy = policy;
+ module_info->power_mode_policy = policy;
out:
- mutex_unlock(&mlxsw_env->module_info_lock);
+ return err;
+}
+
+int
+mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module,
+ enum ethtool_module_power_mode_policy policy,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ int err;
+
+ if (policy != ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH &&
+ policy != ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported power mode policy");
+ return -EOPNOTSUPP;
+ }
+
+ mutex_lock(&mlxsw_env->line_cards_lock);
+ err = mlxsw_env_set_module_power_mode_apply(mlxsw_core, slot_index,
+ module, policy, extack);
+ mutex_unlock(&mlxsw_env->line_cards_lock);
+
return err;
}
EXPORT_SYMBOL(mlxsw_env_set_module_power_mode);
static int mlxsw_env_module_has_temp_sensor(struct mlxsw_core *mlxsw_core,
- u8 module,
+ u8 slot_index, u8 module,
bool *p_has_temp_sensor)
{
char mtbr_pl[MLXSW_REG_MTBR_LEN];
u16 temp;
int err;
- mlxsw_reg_mtbr_pack(mtbr_pl, MLXSW_REG_MTBR_BASE_MODULE_INDEX + module,
- 1);
+ mlxsw_reg_mtbr_pack(mtbr_pl, slot_index,
+ MLXSW_REG_MTBR_BASE_MODULE_INDEX + module, 1);
err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mtbr), mtbr_pl);
if (err)
return err;
@@ -681,13 +797,15 @@ static int mlxsw_env_module_has_temp_sensor(struct mlxsw_core *mlxsw_core,
return 0;
}
-static int mlxsw_env_temp_event_set(struct mlxsw_core *mlxsw_core,
- u16 sensor_index, bool enable)
+static int
+mlxsw_env_temp_event_set(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u16 sensor_index, bool enable)
{
char mtmp_pl[MLXSW_REG_MTMP_LEN] = {0};
enum mlxsw_reg_mtmp_tee tee;
int err, threshold_hi;
+ mlxsw_reg_mtmp_slot_index_set(mtmp_pl, slot_index);
mlxsw_reg_mtmp_sensor_index_set(mtmp_pl, sensor_index);
err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mtmp), mtmp_pl);
if (err)
@@ -695,6 +813,7 @@ static int mlxsw_env_temp_event_set(struct mlxsw_core *mlxsw_core,
if (enable) {
err = mlxsw_env_module_temp_thresholds_get(mlxsw_core,
+ slot_index,
sensor_index -
MLXSW_REG_MTMP_MODULE_INDEX_MIN,
SFP_TEMP_HIGH_WARN,
@@ -721,14 +840,16 @@ static int mlxsw_env_temp_event_set(struct mlxsw_core *mlxsw_core,
return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtmp), mtmp_pl);
}
-static int mlxsw_env_module_temp_event_enable(struct mlxsw_core *mlxsw_core)
+static int mlxsw_env_module_temp_event_enable(struct mlxsw_core *mlxsw_core,
+ u8 slot_index)
{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
int i, err, sensor_index;
bool has_temp_sensor;
- for (i = 0; i < mlxsw_core_env(mlxsw_core)->module_count; i++) {
- err = mlxsw_env_module_has_temp_sensor(mlxsw_core, i,
- &has_temp_sensor);
+ for (i = 0; i < mlxsw_env->line_cards[slot_index]->module_count; i++) {
+ err = mlxsw_env_module_has_temp_sensor(mlxsw_core, slot_index,
+ i, &has_temp_sensor);
if (err)
return err;
@@ -736,7 +857,8 @@ static int mlxsw_env_module_temp_event_enable(struct mlxsw_core *mlxsw_core)
continue;
sensor_index = i + MLXSW_REG_MTMP_MODULE_INDEX_MIN;
- err = mlxsw_env_temp_event_set(mlxsw_core, sensor_index, true);
+ err = mlxsw_env_temp_event_set(mlxsw_core, slot_index,
+ sensor_index, true);
if (err)
return err;
}
@@ -753,6 +875,7 @@ struct mlxsw_env_module_temp_warn_event {
static void mlxsw_env_mtwe_event_work(struct work_struct *work)
{
struct mlxsw_env_module_temp_warn_event *event;
+ struct mlxsw_env_module_info *module_info;
struct mlxsw_env *mlxsw_env;
int i, sensor_warning;
bool is_overheat;
@@ -761,7 +884,7 @@ static void mlxsw_env_mtwe_event_work(struct work_struct *work)
work);
mlxsw_env = event->mlxsw_env;
- for (i = 0; i < mlxsw_env->module_count; i++) {
+ for (i = 0; i < mlxsw_env->max_module_count; i++) {
/* 64-127 of sensor_index are mapped to the port modules
* sequentially (module 0 is mapped to sensor_index 64,
* module 1 to sensor_index 65 and so on)
@@ -769,9 +892,10 @@ static void mlxsw_env_mtwe_event_work(struct work_struct *work)
sensor_warning =
mlxsw_reg_mtwe_sensor_warning_get(event->mtwe_pl,
i + MLXSW_REG_MTMP_MODULE_INDEX_MIN);
- mutex_lock(&mlxsw_env->module_info_lock);
- is_overheat =
- mlxsw_env->module_info[i].is_overheat;
+ mutex_lock(&mlxsw_env->line_cards_lock);
+ /* MTWE only supports main board. */
+ module_info = mlxsw_env_module_info_get(mlxsw_env->core, 0, i);
+ is_overheat = module_info->is_overheat;
if ((is_overheat && sensor_warning) ||
(!is_overheat && !sensor_warning)) {
@@ -779,21 +903,21 @@ static void mlxsw_env_mtwe_event_work(struct work_struct *work)
* warning OR current state in "no warning" and MTWE
* does not report warning.
*/
- mutex_unlock(&mlxsw_env->module_info_lock);
+ mutex_unlock(&mlxsw_env->line_cards_lock);
continue;
} else if (is_overheat && !sensor_warning) {
/* MTWE reports "no warning", turn is_overheat off.
*/
- mlxsw_env->module_info[i].is_overheat = false;
- mutex_unlock(&mlxsw_env->module_info_lock);
+ module_info->is_overheat = false;
+ mutex_unlock(&mlxsw_env->line_cards_lock);
} else {
/* Current state is "no warning" and MTWE reports
* "warning", increase the counter and turn is_overheat
* on.
*/
- mlxsw_env->module_info[i].is_overheat = true;
- mlxsw_env->module_info[i].module_overheat_counter++;
- mutex_unlock(&mlxsw_env->module_info_lock);
+ module_info->is_overheat = true;
+ module_info->module_overheat_counter++;
+ mutex_unlock(&mlxsw_env->line_cards_lock);
}
}
@@ -837,6 +961,7 @@ static void mlxsw_env_temp_warn_event_unregister(struct mlxsw_env *mlxsw_env)
struct mlxsw_env_module_plug_unplug_event {
struct mlxsw_env *mlxsw_env;
+ u8 slot_index;
u8 module;
struct work_struct work;
};
@@ -844,6 +969,7 @@ struct mlxsw_env_module_plug_unplug_event {
static void mlxsw_env_pmpe_event_work(struct work_struct *work)
{
struct mlxsw_env_module_plug_unplug_event *event;
+ struct mlxsw_env_module_info *module_info;
struct mlxsw_env *mlxsw_env;
bool has_temp_sensor;
u16 sensor_index;
@@ -853,11 +979,16 @@ static void mlxsw_env_pmpe_event_work(struct work_struct *work)
work);
mlxsw_env = event->mlxsw_env;
- mutex_lock(&mlxsw_env->module_info_lock);
- mlxsw_env->module_info[event->module].is_overheat = false;
- mutex_unlock(&mlxsw_env->module_info_lock);
+ mutex_lock(&mlxsw_env->line_cards_lock);
+ module_info = mlxsw_env_module_info_get(mlxsw_env->core,
+ event->slot_index,
+ event->module);
+ module_info->is_overheat = false;
+ mutex_unlock(&mlxsw_env->line_cards_lock);
- err = mlxsw_env_module_has_temp_sensor(mlxsw_env->core, event->module,
+ err = mlxsw_env_module_has_temp_sensor(mlxsw_env->core,
+ event->slot_index,
+ event->module,
&has_temp_sensor);
/* Do not disable events on modules without sensors or faulty sensors
* because FW returns errors.
@@ -869,7 +1000,8 @@ static void mlxsw_env_pmpe_event_work(struct work_struct *work)
goto out;
sensor_index = event->module + MLXSW_REG_MTMP_MODULE_INDEX_MIN;
- mlxsw_env_temp_event_set(mlxsw_env->core, sensor_index, true);
+ mlxsw_env_temp_event_set(mlxsw_env->core, event->slot_index,
+ sensor_index, true);
out:
kfree(event);
@@ -879,12 +1011,14 @@ static void
mlxsw_env_pmpe_listener_func(const struct mlxsw_reg_info *reg, char *pmpe_pl,
void *priv)
{
+ u8 slot_index = mlxsw_reg_pmpe_slot_index_get(pmpe_pl);
struct mlxsw_env_module_plug_unplug_event *event;
enum mlxsw_reg_pmpe_module_status module_status;
u8 module = mlxsw_reg_pmpe_module_get(pmpe_pl);
struct mlxsw_env *mlxsw_env = priv;
- if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
+ if (WARN_ON_ONCE(module >= mlxsw_env->max_module_count ||
+ slot_index >= mlxsw_env->num_of_slots))
return;
module_status = mlxsw_reg_pmpe_module_status_get(pmpe_pl);
@@ -896,6 +1030,7 @@ mlxsw_env_pmpe_listener_func(const struct mlxsw_reg_info *reg, char *pmpe_pl,
return;
event->mlxsw_env = mlxsw_env;
+ event->slot_index = slot_index;
event->module = module;
INIT_WORK(&event->work, mlxsw_env_pmpe_event_work);
mlxsw_core_schedule_work(&event->work);
@@ -923,14 +1058,16 @@ mlxsw_env_module_plug_event_unregister(struct mlxsw_env *mlxsw_env)
}
static int
-mlxsw_env_module_oper_state_event_enable(struct mlxsw_core *mlxsw_core)
+mlxsw_env_module_oper_state_event_enable(struct mlxsw_core *mlxsw_core,
+ u8 slot_index)
{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
int i, err;
- for (i = 0; i < mlxsw_core_env(mlxsw_core)->module_count; i++) {
+ for (i = 0; i < mlxsw_env->line_cards[slot_index]->module_count; i++) {
char pmaos_pl[MLXSW_REG_PMAOS_LEN];
- mlxsw_reg_pmaos_pack(pmaos_pl, i);
+ mlxsw_reg_pmaos_pack(pmaos_pl, slot_index, i);
mlxsw_reg_pmaos_e_set(pmaos_pl,
MLXSW_REG_PMAOS_E_GENERATE_EVENT);
mlxsw_reg_pmaos_ee_set(pmaos_pl, true);
@@ -942,146 +1079,330 @@ mlxsw_env_module_oper_state_event_enable(struct mlxsw_core *mlxsw_core)
}
int
-mlxsw_env_module_overheat_counter_get(struct mlxsw_core *mlxsw_core, u8 module,
- u64 *p_counter)
+mlxsw_env_module_overheat_counter_get(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module, u64 *p_counter)
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ struct mlxsw_env_module_info *module_info;
- mutex_lock(&mlxsw_env->module_info_lock);
- *p_counter = mlxsw_env->module_info[module].module_overheat_counter;
- mutex_unlock(&mlxsw_env->module_info_lock);
+ mutex_lock(&mlxsw_env->line_cards_lock);
+ module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module);
+ *p_counter = module_info->module_overheat_counter;
+ mutex_unlock(&mlxsw_env->line_cards_lock);
return 0;
}
EXPORT_SYMBOL(mlxsw_env_module_overheat_counter_get);
-void mlxsw_env_module_port_map(struct mlxsw_core *mlxsw_core, u8 module)
+void mlxsw_env_module_port_map(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module)
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ struct mlxsw_env_module_info *module_info;
- mutex_lock(&mlxsw_env->module_info_lock);
- mlxsw_env->module_info[module].num_ports_mapped++;
- mutex_unlock(&mlxsw_env->module_info_lock);
+ mutex_lock(&mlxsw_env->line_cards_lock);
+ module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module);
+ module_info->num_ports_mapped++;
+ mutex_unlock(&mlxsw_env->line_cards_lock);
}
EXPORT_SYMBOL(mlxsw_env_module_port_map);
-void mlxsw_env_module_port_unmap(struct mlxsw_core *mlxsw_core, u8 module)
+void mlxsw_env_module_port_unmap(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module)
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ struct mlxsw_env_module_info *module_info;
- mutex_lock(&mlxsw_env->module_info_lock);
- mlxsw_env->module_info[module].num_ports_mapped--;
- mutex_unlock(&mlxsw_env->module_info_lock);
+ mutex_lock(&mlxsw_env->line_cards_lock);
+ module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module);
+ module_info->num_ports_mapped--;
+ mutex_unlock(&mlxsw_env->line_cards_lock);
}
EXPORT_SYMBOL(mlxsw_env_module_port_unmap);
-int mlxsw_env_module_port_up(struct mlxsw_core *mlxsw_core, u8 module)
+int mlxsw_env_module_port_up(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module)
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ struct mlxsw_env_module_info *module_info;
int err = 0;
- mutex_lock(&mlxsw_env->module_info_lock);
+ mutex_lock(&mlxsw_env->line_cards_lock);
- if (mlxsw_env->module_info[module].power_mode_policy !=
+ module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module);
+ if (module_info->power_mode_policy !=
ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO)
goto out_inc;
- if (mlxsw_env->module_info[module].num_ports_up != 0)
+ if (module_info->num_ports_up != 0)
goto out_inc;
/* Transition to high power mode following first port using the module
* being put administratively up.
*/
- err = __mlxsw_env_set_module_power_mode(mlxsw_core, module, false,
- NULL);
+ err = __mlxsw_env_set_module_power_mode(mlxsw_core, slot_index, module,
+ false, NULL);
if (err)
goto out_unlock;
out_inc:
- mlxsw_env->module_info[module].num_ports_up++;
+ module_info->num_ports_up++;
out_unlock:
- mutex_unlock(&mlxsw_env->module_info_lock);
+ mutex_unlock(&mlxsw_env->line_cards_lock);
return err;
}
EXPORT_SYMBOL(mlxsw_env_module_port_up);
-void mlxsw_env_module_port_down(struct mlxsw_core *mlxsw_core, u8 module)
+void mlxsw_env_module_port_down(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module)
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ struct mlxsw_env_module_info *module_info;
- mutex_lock(&mlxsw_env->module_info_lock);
+ mutex_lock(&mlxsw_env->line_cards_lock);
- mlxsw_env->module_info[module].num_ports_up--;
+ module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module);
+ module_info->num_ports_up--;
- if (mlxsw_env->module_info[module].power_mode_policy !=
+ if (module_info->power_mode_policy !=
ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO)
goto out_unlock;
- if (mlxsw_env->module_info[module].num_ports_up != 0)
+ if (module_info->num_ports_up != 0)
goto out_unlock;
/* Transition to low power mode following last port using the module
* being put administratively down.
*/
- __mlxsw_env_set_module_power_mode(mlxsw_core, module, true, NULL);
+ __mlxsw_env_set_module_power_mode(mlxsw_core, slot_index, module, true,
+ NULL);
out_unlock:
- mutex_unlock(&mlxsw_env->module_info_lock);
+ mutex_unlock(&mlxsw_env->line_cards_lock);
}
EXPORT_SYMBOL(mlxsw_env_module_port_down);
+static int mlxsw_env_line_cards_alloc(struct mlxsw_env *env)
+{
+ struct mlxsw_env_module_info *module_info;
+ int i, j;
+
+ for (i = 0; i < env->num_of_slots; i++) {
+ env->line_cards[i] = kzalloc(struct_size(env->line_cards[i],
+ module_info,
+ env->max_module_count),
+ GFP_KERNEL);
+ if (!env->line_cards[i])
+ goto kzalloc_err;
+
+ /* Firmware defaults to high power mode policy where modules
+ * are transitioned to high power mode following plug-in.
+ */
+ for (j = 0; j < env->max_module_count; j++) {
+ module_info = &env->line_cards[i]->module_info[j];
+ module_info->power_mode_policy =
+ ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH;
+ }
+ }
+
+ return 0;
+
+kzalloc_err:
+ for (i--; i >= 0; i--)
+ kfree(env->line_cards[i]);
+ return -ENOMEM;
+}
+
+static void mlxsw_env_line_cards_free(struct mlxsw_env *env)
+{
+ int i = env->num_of_slots;
+
+ for (i--; i >= 0; i--)
+ kfree(env->line_cards[i]);
+}
+
+static int
+mlxsw_env_module_event_enable(struct mlxsw_env *mlxsw_env, u8 slot_index)
+{
+ int err;
+
+ err = mlxsw_env_module_oper_state_event_enable(mlxsw_env->core,
+ slot_index);
+ if (err)
+ return err;
+
+ err = mlxsw_env_module_temp_event_enable(mlxsw_env->core, slot_index);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void
+mlxsw_env_module_event_disable(struct mlxsw_env *mlxsw_env, u8 slot_index)
+{
+}
+
static int
-mlxsw_env_module_type_set(struct mlxsw_core *mlxsw_core)
+mlxsw_env_module_type_set(struct mlxsw_core *mlxsw_core, u8 slot_index)
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
int i;
- for (i = 0; i < mlxsw_env->module_count; i++) {
+ for (i = 0; i < mlxsw_env->line_cards[slot_index]->module_count; i++) {
+ struct mlxsw_env_module_info *module_info;
char pmtm_pl[MLXSW_REG_PMTM_LEN];
int err;
- mlxsw_reg_pmtm_pack(pmtm_pl, 0, i);
+ mlxsw_reg_pmtm_pack(pmtm_pl, slot_index, i);
err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtm), pmtm_pl);
if (err)
return err;
- mlxsw_env->module_info[i].type =
- mlxsw_reg_pmtm_module_type_get(pmtm_pl);
+ module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index,
+ i);
+ module_info->type = mlxsw_reg_pmtm_module_type_get(pmtm_pl);
}
return 0;
}
-int mlxsw_env_init(struct mlxsw_core *mlxsw_core, struct mlxsw_env **p_env)
+static void
+mlxsw_env_linecard_modules_power_mode_apply(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_env *env,
+ u8 slot_index)
{
+ int i;
+
+ for (i = 0; i < env->line_cards[slot_index]->module_count; i++) {
+ enum ethtool_module_power_mode_policy policy;
+ struct mlxsw_env_module_info *module_info;
+ struct netlink_ext_ack extack;
+ int err;
+
+ module_info = &env->line_cards[slot_index]->module_info[i];
+ policy = module_info->power_mode_policy;
+ err = mlxsw_env_set_module_power_mode_apply(mlxsw_core,
+ slot_index, i,
+ policy, &extack);
+ if (err)
+ dev_err(env->bus_info->dev, "%s\n", extack._msg);
+ }
+}
+
+static void
+mlxsw_env_got_active(struct mlxsw_core *mlxsw_core, u8 slot_index, void *priv)
+{
+ struct mlxsw_env *mlxsw_env = priv;
+ char mgpir_pl[MLXSW_REG_MGPIR_LEN];
+ int err;
+
+ mutex_lock(&mlxsw_env->line_cards_lock);
+ if (__mlxsw_env_linecard_is_active(mlxsw_env, slot_index))
+ goto out_unlock;
+
+ mlxsw_reg_mgpir_pack(mgpir_pl, slot_index);
+ err = mlxsw_reg_query(mlxsw_env->core, MLXSW_REG(mgpir), mgpir_pl);
+ if (err)
+ goto out_unlock;
+
+ mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL,
+ &mlxsw_env->line_cards[slot_index]->module_count,
+ NULL);
+
+ err = mlxsw_env_module_event_enable(mlxsw_env, slot_index);
+ if (err) {
+ dev_err(mlxsw_env->bus_info->dev, "Failed to enable port module events for line card in slot %d\n",
+ slot_index);
+ goto err_mlxsw_env_module_event_enable;
+ }
+ err = mlxsw_env_module_type_set(mlxsw_env->core, slot_index);
+ if (err) {
+ dev_err(mlxsw_env->bus_info->dev, "Failed to set modules' type for line card in slot %d\n",
+ slot_index);
+ goto err_type_set;
+ }
+
+ mlxsw_env->line_cards[slot_index]->active = true;
+ /* Apply power mode policy. */
+ mlxsw_env_linecard_modules_power_mode_apply(mlxsw_core, mlxsw_env,
+ slot_index);
+ mutex_unlock(&mlxsw_env->line_cards_lock);
+
+ return;
+
+err_type_set:
+ mlxsw_env_module_event_disable(mlxsw_env, slot_index);
+err_mlxsw_env_module_event_enable:
+out_unlock:
+ mutex_unlock(&mlxsw_env->line_cards_lock);
+}
+
+static void
+mlxsw_env_got_inactive(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ void *priv)
+{
+ struct mlxsw_env *mlxsw_env = priv;
+
+ mutex_lock(&mlxsw_env->line_cards_lock);
+ if (!__mlxsw_env_linecard_is_active(mlxsw_env, slot_index))
+ goto out_unlock;
+ mlxsw_env->line_cards[slot_index]->active = false;
+ mlxsw_env_module_event_disable(mlxsw_env, slot_index);
+ mlxsw_env->line_cards[slot_index]->module_count = 0;
+out_unlock:
+ mutex_unlock(&mlxsw_env->line_cards_lock);
+}
+
+static struct mlxsw_linecards_event_ops mlxsw_env_event_ops = {
+ .got_active = mlxsw_env_got_active,
+ .got_inactive = mlxsw_env_got_inactive,
+};
+
+int mlxsw_env_init(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *bus_info,
+ struct mlxsw_env **p_env)
+{
+ u8 module_count, num_of_slots, max_module_count;
char mgpir_pl[MLXSW_REG_MGPIR_LEN];
struct mlxsw_env *env;
- u8 module_count;
- int i, err;
+ int err;
- mlxsw_reg_mgpir_pack(mgpir_pl);
+ mlxsw_reg_mgpir_pack(mgpir_pl, 0);
err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgpir), mgpir_pl);
if (err)
return err;
- mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL, &module_count);
+ mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL, &module_count,
+ &num_of_slots);
+ /* If the system is modular, get the maximum number of modules per-slot.
+ * Otherwise, get the maximum number of modules on the main board.
+ */
+ max_module_count = num_of_slots ?
+ mlxsw_reg_mgpir_max_modules_per_slot_get(mgpir_pl) :
+ module_count;
- env = kzalloc(struct_size(env, module_info, module_count), GFP_KERNEL);
+ env = kzalloc(struct_size(env, line_cards, num_of_slots + 1),
+ GFP_KERNEL);
if (!env)
return -ENOMEM;
- /* Firmware defaults to high power mode policy where modules are
- * transitioned to high power mode following plug-in.
- */
- for (i = 0; i < module_count; i++)
- env->module_info[i].power_mode_policy =
- ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH;
-
- mutex_init(&env->module_info_lock);
env->core = mlxsw_core;
- env->module_count = module_count;
+ env->bus_info = bus_info;
+ env->num_of_slots = num_of_slots + 1;
+ env->max_module_count = max_module_count;
+ err = mlxsw_env_line_cards_alloc(env);
+ if (err)
+ goto err_mlxsw_env_line_cards_alloc;
+
+ mutex_init(&env->line_cards_lock);
*p_env = env;
+ err = mlxsw_linecards_event_ops_register(env->core,
+ &mlxsw_env_event_ops, env);
+ if (err)
+ goto err_linecards_event_ops_register;
+
err = mlxsw_env_temp_warn_event_register(mlxsw_core);
if (err)
goto err_temp_warn_event_register;
@@ -1090,38 +1411,54 @@ int mlxsw_env_init(struct mlxsw_core *mlxsw_core, struct mlxsw_env **p_env)
if (err)
goto err_module_plug_event_register;
- err = mlxsw_env_module_oper_state_event_enable(mlxsw_core);
- if (err)
- goto err_oper_state_event_enable;
-
- err = mlxsw_env_module_temp_event_enable(mlxsw_core);
+ /* Set 'module_count' only for main board. Actual count for line card
+ * is to be set after line card is activated.
+ */
+ env->line_cards[0]->module_count = num_of_slots ? 0 : module_count;
+ /* Enable events only for main board. Line card events are to be
+ * configured only after line card is activated. Before that, access to
+ * modules on line cards is not allowed.
+ */
+ err = mlxsw_env_module_event_enable(env, 0);
if (err)
- goto err_temp_event_enable;
+ goto err_mlxsw_env_module_event_enable;
- err = mlxsw_env_module_type_set(mlxsw_core);
+ err = mlxsw_env_module_type_set(mlxsw_core, 0);
if (err)
goto err_type_set;
+ env->line_cards[0]->active = true;
+
return 0;
err_type_set:
-err_temp_event_enable:
-err_oper_state_event_enable:
+ mlxsw_env_module_event_disable(env, 0);
+err_mlxsw_env_module_event_enable:
mlxsw_env_module_plug_event_unregister(env);
err_module_plug_event_register:
mlxsw_env_temp_warn_event_unregister(env);
err_temp_warn_event_register:
- mutex_destroy(&env->module_info_lock);
+ mlxsw_linecards_event_ops_unregister(env->core,
+ &mlxsw_env_event_ops, env);
+err_linecards_event_ops_register:
+ mutex_destroy(&env->line_cards_lock);
+ mlxsw_env_line_cards_free(env);
+err_mlxsw_env_line_cards_alloc:
kfree(env);
return err;
}
void mlxsw_env_fini(struct mlxsw_env *env)
{
+ env->line_cards[0]->active = false;
+ mlxsw_env_module_event_disable(env, 0);
mlxsw_env_module_plug_event_unregister(env);
/* Make sure there is no more event work scheduled. */
mlxsw_core_flush_owq();
mlxsw_env_temp_warn_event_unregister(env);
- mutex_destroy(&env->module_info_lock);
+ mlxsw_linecards_event_ops_unregister(env->core,
+ &mlxsw_env_event_ops, env);
+ mutex_destroy(&env->line_cards_lock);
+ mlxsw_env_line_cards_free(env);
kfree(env);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.h b/drivers/net/ethernet/mellanox/mlxsw/core_env.h
index ec6564e5d2ee..a197e3ae069c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.h
@@ -9,49 +9,60 @@
struct ethtool_modinfo;
struct ethtool_eeprom;
-int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
- int off, int *temp);
+int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core,
+ u8 slot_index, int module, int off,
+ int *temp);
int mlxsw_env_get_module_info(struct net_device *netdev,
- struct mlxsw_core *mlxsw_core, int module,
- struct ethtool_modinfo *modinfo);
+ struct mlxsw_core *mlxsw_core, u8 slot_index,
+ int module, struct ethtool_modinfo *modinfo);
int mlxsw_env_get_module_eeprom(struct net_device *netdev,
- struct mlxsw_core *mlxsw_core, int module,
- struct ethtool_eeprom *ee, u8 *data);
+ struct mlxsw_core *mlxsw_core, u8 slot_index,
+ int module, struct ethtool_eeprom *ee,
+ u8 *data);
int
-mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, u8 module,
+mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core,
+ u8 slot_index, u8 module,
const struct ethtool_module_eeprom *page,
struct netlink_ext_ack *extack);
int mlxsw_env_reset_module(struct net_device *netdev,
- struct mlxsw_core *mlxsw_core, u8 module,
- u32 *flags);
+ struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module, u32 *flags);
int
-mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module,
+mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module,
struct ethtool_module_power_mode_params *params,
struct netlink_ext_ack *extack);
int
-mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module,
+mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module,
enum ethtool_module_power_mode_policy policy,
struct netlink_ext_ack *extack);
int
-mlxsw_env_module_overheat_counter_get(struct mlxsw_core *mlxsw_core, u8 module,
- u64 *p_counter);
+mlxsw_env_module_overheat_counter_get(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module, u64 *p_counter);
-void mlxsw_env_module_port_map(struct mlxsw_core *mlxsw_core, u8 module);
+void mlxsw_env_module_port_map(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module);
-void mlxsw_env_module_port_unmap(struct mlxsw_core *mlxsw_core, u8 module);
+void mlxsw_env_module_port_unmap(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module);
-int mlxsw_env_module_port_up(struct mlxsw_core *mlxsw_core, u8 module);
+int mlxsw_env_module_port_up(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module);
-void mlxsw_env_module_port_down(struct mlxsw_core *mlxsw_core, u8 module);
+void mlxsw_env_module_port_down(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module);
-int mlxsw_env_init(struct mlxsw_core *core, struct mlxsw_env **p_env);
+int mlxsw_env_init(struct mlxsw_core *core,
+ const struct mlxsw_bus_info *bus_info,
+ struct mlxsw_env **p_env);
void mlxsw_env_fini(struct mlxsw_env *env);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
index 8b170ad92302..70735068cf29 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
@@ -19,6 +19,7 @@
#define MLXSW_HWMON_ATTR_PER_SENSOR 3
#define MLXSW_HWMON_ATTR_PER_MODULE 7
#define MLXSW_HWMON_ATTR_PER_GEARBOX 4
+#define MLXSW_HWMON_DEV_NAME_LEN_MAX 16
#define MLXSW_HWMON_ATTR_COUNT (MLXSW_HWMON_SENSORS_MAX_COUNT * MLXSW_HWMON_ATTR_PER_SENSOR + \
MLXSW_HWMON_MODULES_MAX_COUNT * MLXSW_HWMON_ATTR_PER_MODULE + \
@@ -27,7 +28,7 @@
struct mlxsw_hwmon_attr {
struct device_attribute dev_attr;
- struct mlxsw_hwmon *hwmon;
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev;
unsigned int type_index;
char name[32];
};
@@ -40,9 +41,9 @@ static int mlxsw_hwmon_get_attr_index(int index, int count)
return index;
}
-struct mlxsw_hwmon {
- struct mlxsw_core *core;
- const struct mlxsw_bus_info *bus_info;
+struct mlxsw_hwmon_dev {
+ char name[MLXSW_HWMON_DEV_NAME_LEN_MAX];
+ struct mlxsw_hwmon *hwmon;
struct device *hwmon_dev;
struct attribute_group group;
const struct attribute_group *groups[2];
@@ -51,6 +52,14 @@ struct mlxsw_hwmon {
unsigned int attrs_count;
u8 sensor_count;
u8 module_sensor_max;
+ u8 slot_index;
+ bool active;
+};
+
+struct mlxsw_hwmon {
+ struct mlxsw_core *core;
+ const struct mlxsw_bus_info *bus_info;
+ struct mlxsw_hwmon_dev line_cards[];
};
static ssize_t mlxsw_hwmon_temp_show(struct device *dev,
@@ -59,14 +68,16 @@ static ssize_t mlxsw_hwmon_temp_show(struct device *dev,
{
struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
char mtmp_pl[MLXSW_REG_MTMP_LEN];
int temp, index;
int err;
index = mlxsw_hwmon_get_attr_index(mlxsw_hwmon_attr->type_index,
- mlxsw_hwmon->module_sensor_max);
- mlxsw_reg_mtmp_pack(mtmp_pl, index, false, false);
+ mlxsw_hwmon_dev->module_sensor_max);
+ mlxsw_reg_mtmp_pack(mtmp_pl, mlxsw_hwmon_dev->slot_index, index, false,
+ false);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl);
if (err) {
dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query temp sensor\n");
@@ -82,14 +93,16 @@ static ssize_t mlxsw_hwmon_temp_max_show(struct device *dev,
{
struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
char mtmp_pl[MLXSW_REG_MTMP_LEN];
int temp_max, index;
int err;
index = mlxsw_hwmon_get_attr_index(mlxsw_hwmon_attr->type_index,
- mlxsw_hwmon->module_sensor_max);
- mlxsw_reg_mtmp_pack(mtmp_pl, index, false, false);
+ mlxsw_hwmon_dev->module_sensor_max);
+ mlxsw_reg_mtmp_pack(mtmp_pl, mlxsw_hwmon_dev->slot_index, index, false,
+ false);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl);
if (err) {
dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query temp sensor\n");
@@ -105,8 +118,9 @@ static ssize_t mlxsw_hwmon_temp_rst_store(struct device *dev,
{
struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
- char mtmp_pl[MLXSW_REG_MTMP_LEN] = {0};
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
+ char mtmp_pl[MLXSW_REG_MTMP_LEN];
unsigned long val;
int index;
int err;
@@ -118,8 +132,9 @@ static ssize_t mlxsw_hwmon_temp_rst_store(struct device *dev,
return -EINVAL;
index = mlxsw_hwmon_get_attr_index(mlxsw_hwmon_attr->type_index,
- mlxsw_hwmon->module_sensor_max);
+ mlxsw_hwmon_dev->module_sensor_max);
+ mlxsw_reg_mtmp_slot_index_set(mtmp_pl, mlxsw_hwmon_dev->slot_index);
mlxsw_reg_mtmp_sensor_index_set(mtmp_pl, index);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl);
if (err)
@@ -140,7 +155,8 @@ static ssize_t mlxsw_hwmon_fan_rpm_show(struct device *dev,
{
struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
char mfsm_pl[MLXSW_REG_MFSM_LEN];
int err;
@@ -159,7 +175,8 @@ static ssize_t mlxsw_hwmon_fan_fault_show(struct device *dev,
{
struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
char fore_pl[MLXSW_REG_FORE_LEN];
bool fault;
int err;
@@ -180,7 +197,8 @@ static ssize_t mlxsw_hwmon_pwm_show(struct device *dev,
{
struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
char mfsc_pl[MLXSW_REG_MFSC_LEN];
int err;
@@ -200,7 +218,8 @@ static ssize_t mlxsw_hwmon_pwm_store(struct device *dev,
{
struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
char mfsc_pl[MLXSW_REG_MFSC_LEN];
unsigned long val;
int err;
@@ -226,14 +245,16 @@ static int mlxsw_hwmon_module_temp_get(struct device *dev,
{
struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
char mtmp_pl[MLXSW_REG_MTMP_LEN];
u8 module;
int err;
- module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count;
- mlxsw_reg_mtmp_pack(mtmp_pl, MLXSW_REG_MTMP_MODULE_INDEX_MIN + module,
- false, false);
+ module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon_dev->sensor_count;
+ mlxsw_reg_mtmp_pack(mtmp_pl, mlxsw_hwmon_dev->slot_index,
+ MLXSW_REG_MTMP_MODULE_INDEX_MIN + module, false,
+ false);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl);
if (err) {
dev_err(dev, "Failed to query module temperature\n");
@@ -263,15 +284,16 @@ static ssize_t mlxsw_hwmon_module_temp_fault_show(struct device *dev,
{
struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
char mtbr_pl[MLXSW_REG_MTBR_LEN] = {0};
u8 module, fault;
u16 temp;
int err;
- module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count;
- mlxsw_reg_mtbr_pack(mtbr_pl, MLXSW_REG_MTBR_BASE_MODULE_INDEX + module,
- 1);
+ module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon_dev->sensor_count;
+ mlxsw_reg_mtbr_pack(mtbr_pl, mlxsw_hwmon_dev->slot_index,
+ MLXSW_REG_MTBR_BASE_MODULE_INDEX + module, 1);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtbr), mtbr_pl);
if (err) {
dev_err(dev, "Failed to query module temperature sensor\n");
@@ -305,13 +327,16 @@ static int mlxsw_hwmon_module_temp_critical_get(struct device *dev,
{
struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
u8 module;
int err;
- module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count;
- err = mlxsw_env_module_temp_thresholds_get(mlxsw_hwmon->core, module,
- SFP_TEMP_HIGH_WARN, p_temp);
+ module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon_dev->sensor_count;
+ err = mlxsw_env_module_temp_thresholds_get(mlxsw_hwmon->core,
+ mlxsw_hwmon_dev->slot_index,
+ module, SFP_TEMP_HIGH_WARN,
+ p_temp);
if (err) {
dev_err(dev, "Failed to query module temperature thresholds\n");
return err;
@@ -339,13 +364,16 @@ static int mlxsw_hwmon_module_temp_emergency_get(struct device *dev,
{
struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
u8 module;
int err;
- module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count;
- err = mlxsw_env_module_temp_thresholds_get(mlxsw_hwmon->core, module,
- SFP_TEMP_HIGH_ALARM, p_temp);
+ module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon_dev->sensor_count;
+ err = mlxsw_env_module_temp_thresholds_get(mlxsw_hwmon->core,
+ mlxsw_hwmon_dev->slot_index,
+ module, SFP_TEMP_HIGH_ALARM,
+ p_temp);
if (err) {
dev_err(dev, "Failed to query module temperature thresholds\n");
return err;
@@ -387,9 +415,9 @@ mlxsw_hwmon_gbox_temp_label_show(struct device *dev,
{
struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
int index = mlxsw_hwmon_attr->type_index -
- mlxsw_hwmon->module_sensor_max + 1;
+ mlxsw_hwmon_dev->module_sensor_max + 1;
return sprintf(buf, "gearbox %03u\n", index);
}
@@ -458,14 +486,15 @@ enum mlxsw_hwmon_attr_type {
MLXSW_HWMON_ATTR_TYPE_TEMP_EMERGENCY_ALARM,
};
-static void mlxsw_hwmon_attr_add(struct mlxsw_hwmon *mlxsw_hwmon,
+static void mlxsw_hwmon_attr_add(struct mlxsw_hwmon_dev *mlxsw_hwmon_dev,
enum mlxsw_hwmon_attr_type attr_type,
- unsigned int type_index, unsigned int num) {
+ unsigned int type_index, unsigned int num)
+{
struct mlxsw_hwmon_attr *mlxsw_hwmon_attr;
unsigned int attr_index;
- attr_index = mlxsw_hwmon->attrs_count;
- mlxsw_hwmon_attr = &mlxsw_hwmon->hwmon_attrs[attr_index];
+ attr_index = mlxsw_hwmon_dev->attrs_count;
+ mlxsw_hwmon_attr = &mlxsw_hwmon_dev->hwmon_attrs[attr_index];
switch (attr_type) {
case MLXSW_HWMON_ATTR_TYPE_TEMP:
@@ -565,16 +594,17 @@ static void mlxsw_hwmon_attr_add(struct mlxsw_hwmon *mlxsw_hwmon,
}
mlxsw_hwmon_attr->type_index = type_index;
- mlxsw_hwmon_attr->hwmon = mlxsw_hwmon;
+ mlxsw_hwmon_attr->mlxsw_hwmon_dev = mlxsw_hwmon_dev;
mlxsw_hwmon_attr->dev_attr.attr.name = mlxsw_hwmon_attr->name;
sysfs_attr_init(&mlxsw_hwmon_attr->dev_attr.attr);
- mlxsw_hwmon->attrs[attr_index] = &mlxsw_hwmon_attr->dev_attr.attr;
- mlxsw_hwmon->attrs_count++;
+ mlxsw_hwmon_dev->attrs[attr_index] = &mlxsw_hwmon_attr->dev_attr.attr;
+ mlxsw_hwmon_dev->attrs_count++;
}
-static int mlxsw_hwmon_temp_init(struct mlxsw_hwmon *mlxsw_hwmon)
+static int mlxsw_hwmon_temp_init(struct mlxsw_hwmon_dev *mlxsw_hwmon_dev)
{
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
char mtcap_pl[MLXSW_REG_MTCAP_LEN] = {0};
int i;
int err;
@@ -584,10 +614,12 @@ static int mlxsw_hwmon_temp_init(struct mlxsw_hwmon *mlxsw_hwmon)
dev_err(mlxsw_hwmon->bus_info->dev, "Failed to get number of temp sensors\n");
return err;
}
- mlxsw_hwmon->sensor_count = mlxsw_reg_mtcap_sensor_count_get(mtcap_pl);
- for (i = 0; i < mlxsw_hwmon->sensor_count; i++) {
+ mlxsw_hwmon_dev->sensor_count = mlxsw_reg_mtcap_sensor_count_get(mtcap_pl);
+ for (i = 0; i < mlxsw_hwmon_dev->sensor_count; i++) {
char mtmp_pl[MLXSW_REG_MTMP_LEN] = {0};
+ mlxsw_reg_mtmp_slot_index_set(mtmp_pl,
+ mlxsw_hwmon_dev->slot_index);
mlxsw_reg_mtmp_sensor_index_set(mtmp_pl, i);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp),
mtmp_pl);
@@ -602,18 +634,19 @@ static int mlxsw_hwmon_temp_init(struct mlxsw_hwmon *mlxsw_hwmon)
i);
return err;
}
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_TEMP, i, i);
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_TEMP_MAX, i, i);
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_TEMP_RST, i, i);
}
return 0;
}
-static int mlxsw_hwmon_fans_init(struct mlxsw_hwmon *mlxsw_hwmon)
+static int mlxsw_hwmon_fans_init(struct mlxsw_hwmon_dev *mlxsw_hwmon_dev)
{
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
char mfcr_pl[MLXSW_REG_MFCR_LEN] = {0};
enum mlxsw_reg_mfcr_pwm_frequency freq;
unsigned int type_index;
@@ -631,10 +664,10 @@ static int mlxsw_hwmon_fans_init(struct mlxsw_hwmon *mlxsw_hwmon)
num = 0;
for (type_index = 0; type_index < MLXSW_MFCR_TACHOS_MAX; type_index++) {
if (tacho_active & BIT(type_index)) {
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_FAN_RPM,
type_index, num);
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_FAN_FAULT,
type_index, num++);
}
@@ -642,54 +675,55 @@ static int mlxsw_hwmon_fans_init(struct mlxsw_hwmon *mlxsw_hwmon)
num = 0;
for (type_index = 0; type_index < MLXSW_MFCR_PWMS_MAX; type_index++) {
if (pwm_active & BIT(type_index))
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_PWM,
type_index, num++);
}
return 0;
}
-static int mlxsw_hwmon_module_init(struct mlxsw_hwmon *mlxsw_hwmon)
+static int mlxsw_hwmon_module_init(struct mlxsw_hwmon_dev *mlxsw_hwmon_dev)
{
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
char mgpir_pl[MLXSW_REG_MGPIR_LEN];
u8 module_sensor_max;
int i, err;
- mlxsw_reg_mgpir_pack(mgpir_pl);
+ mlxsw_reg_mgpir_pack(mgpir_pl, mlxsw_hwmon_dev->slot_index);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mgpir), mgpir_pl);
if (err)
return err;
mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL,
- &module_sensor_max);
+ &module_sensor_max, NULL);
/* Add extra attributes for module temperature. Sensor index is
* assigned to sensor_count value, while all indexed before
* sensor_count are already utilized by the sensors connected through
* mtmp register by mlxsw_hwmon_temp_init().
*/
- mlxsw_hwmon->module_sensor_max = mlxsw_hwmon->sensor_count +
- module_sensor_max;
- for (i = mlxsw_hwmon->sensor_count;
- i < mlxsw_hwmon->module_sensor_max; i++) {
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_dev->module_sensor_max = mlxsw_hwmon_dev->sensor_count +
+ module_sensor_max;
+ for (i = mlxsw_hwmon_dev->sensor_count;
+ i < mlxsw_hwmon_dev->module_sensor_max; i++) {
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE, i, i);
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_FAULT,
i, i);
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_CRIT, i,
i);
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_EMERG,
i, i);
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_LABEL,
i, i);
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_TEMP_CRIT_ALARM,
i, i);
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_TEMP_EMERGENCY_ALARM,
i, i);
}
@@ -697,8 +731,9 @@ static int mlxsw_hwmon_module_init(struct mlxsw_hwmon *mlxsw_hwmon)
return 0;
}
-static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon *mlxsw_hwmon)
+static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon_dev *mlxsw_hwmon_dev)
{
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
enum mlxsw_reg_mgpir_device_type device_type;
int index, max_index, sensor_index;
char mgpir_pl[MLXSW_REG_MGPIR_LEN];
@@ -706,22 +741,24 @@ static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon *mlxsw_hwmon)
u8 gbox_num;
int err;
- mlxsw_reg_mgpir_pack(mgpir_pl);
+ mlxsw_reg_mgpir_pack(mgpir_pl, mlxsw_hwmon_dev->slot_index);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mgpir), mgpir_pl);
if (err)
return err;
- mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, &device_type, NULL, NULL);
+ mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, &device_type, NULL, NULL,
+ NULL);
if (device_type != MLXSW_REG_MGPIR_DEVICE_TYPE_GEARBOX_DIE ||
!gbox_num)
return 0;
- index = mlxsw_hwmon->module_sensor_max;
- max_index = mlxsw_hwmon->module_sensor_max + gbox_num;
+ index = mlxsw_hwmon_dev->module_sensor_max;
+ max_index = mlxsw_hwmon_dev->module_sensor_max + gbox_num;
while (index < max_index) {
- sensor_index = index % mlxsw_hwmon->module_sensor_max +
+ sensor_index = index % mlxsw_hwmon_dev->module_sensor_max +
MLXSW_REG_MTMP_GBOX_INDEX_MIN;
- mlxsw_reg_mtmp_pack(mtmp_pl, sensor_index, true, true);
+ mlxsw_reg_mtmp_pack(mtmp_pl, mlxsw_hwmon_dev->slot_index,
+ sensor_index, true, true);
err = mlxsw_reg_write(mlxsw_hwmon->core,
MLXSW_REG(mtmp), mtmp_pl);
if (err) {
@@ -729,15 +766,15 @@ static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon *mlxsw_hwmon)
sensor_index);
return err;
}
- mlxsw_hwmon_attr_add(mlxsw_hwmon, MLXSW_HWMON_ATTR_TYPE_TEMP,
- index, index);
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
+ MLXSW_HWMON_ATTR_TYPE_TEMP, index, index);
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_TEMP_MAX, index,
index);
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_TEMP_RST, index,
index);
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_TEMP_GBOX_LABEL,
index, index);
index++;
@@ -746,51 +783,144 @@ static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon *mlxsw_hwmon)
return 0;
}
+static void
+mlxsw_hwmon_got_active(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ void *priv)
+{
+ struct mlxsw_hwmon *hwmon = priv;
+ struct mlxsw_hwmon_dev *linecard;
+ struct device *dev;
+ int err;
+
+ dev = hwmon->bus_info->dev;
+ linecard = &hwmon->line_cards[slot_index];
+ if (linecard->active)
+ return;
+ /* For the main board, module sensor indexes start from 1, sensor index
+ * 0 is used for the ASIC. Use the same numbering for line cards.
+ */
+ linecard->sensor_count = 1;
+ linecard->slot_index = slot_index;
+ linecard->hwmon = hwmon;
+ err = mlxsw_hwmon_module_init(linecard);
+ if (err) {
+ dev_err(dev, "Failed to configure hwmon objects for line card modules in slot %d\n",
+ slot_index);
+ return;
+ }
+
+ err = mlxsw_hwmon_gearbox_init(linecard);
+ if (err) {
+ dev_err(dev, "Failed to configure hwmon objects for line card gearboxes in slot %d\n",
+ slot_index);
+ return;
+ }
+
+ linecard->groups[0] = &linecard->group;
+ linecard->group.attrs = linecard->attrs;
+ sprintf(linecard->name, "%s#%02u", "linecard", slot_index);
+ linecard->hwmon_dev =
+ hwmon_device_register_with_groups(dev, linecard->name,
+ linecard, linecard->groups);
+ if (IS_ERR(linecard->hwmon_dev)) {
+ dev_err(dev, "Failed to register hwmon objects for line card in slot %d\n",
+ slot_index);
+ return;
+ }
+
+ linecard->active = true;
+}
+
+static void
+mlxsw_hwmon_got_inactive(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ void *priv)
+{
+ struct mlxsw_hwmon *hwmon = priv;
+ struct mlxsw_hwmon_dev *linecard;
+
+ linecard = &hwmon->line_cards[slot_index];
+ if (!linecard->active)
+ return;
+ linecard->active = false;
+ hwmon_device_unregister(linecard->hwmon_dev);
+ /* Reset attributes counter */
+ linecard->attrs_count = 0;
+}
+
+static struct mlxsw_linecards_event_ops mlxsw_hwmon_event_ops = {
+ .got_active = mlxsw_hwmon_got_active,
+ .got_inactive = mlxsw_hwmon_got_inactive,
+};
+
int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core,
const struct mlxsw_bus_info *mlxsw_bus_info,
struct mlxsw_hwmon **p_hwmon)
{
+ char mgpir_pl[MLXSW_REG_MGPIR_LEN];
struct mlxsw_hwmon *mlxsw_hwmon;
struct device *hwmon_dev;
+ u8 num_of_slots;
int err;
- mlxsw_hwmon = kzalloc(sizeof(*mlxsw_hwmon), GFP_KERNEL);
+ mlxsw_reg_mgpir_pack(mgpir_pl, 0);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgpir), mgpir_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL, NULL,
+ &num_of_slots);
+
+ mlxsw_hwmon = kzalloc(struct_size(mlxsw_hwmon, line_cards,
+ num_of_slots + 1), GFP_KERNEL);
if (!mlxsw_hwmon)
return -ENOMEM;
+
mlxsw_hwmon->core = mlxsw_core;
mlxsw_hwmon->bus_info = mlxsw_bus_info;
+ mlxsw_hwmon->line_cards[0].hwmon = mlxsw_hwmon;
+ mlxsw_hwmon->line_cards[0].slot_index = 0;
- err = mlxsw_hwmon_temp_init(mlxsw_hwmon);
+ err = mlxsw_hwmon_temp_init(&mlxsw_hwmon->line_cards[0]);
if (err)
goto err_temp_init;
- err = mlxsw_hwmon_fans_init(mlxsw_hwmon);
+ err = mlxsw_hwmon_fans_init(&mlxsw_hwmon->line_cards[0]);
if (err)
goto err_fans_init;
- err = mlxsw_hwmon_module_init(mlxsw_hwmon);
+ err = mlxsw_hwmon_module_init(&mlxsw_hwmon->line_cards[0]);
if (err)
goto err_temp_module_init;
- err = mlxsw_hwmon_gearbox_init(mlxsw_hwmon);
+ err = mlxsw_hwmon_gearbox_init(&mlxsw_hwmon->line_cards[0]);
if (err)
goto err_temp_gearbox_init;
- mlxsw_hwmon->groups[0] = &mlxsw_hwmon->group;
- mlxsw_hwmon->group.attrs = mlxsw_hwmon->attrs;
+ mlxsw_hwmon->line_cards[0].groups[0] = &mlxsw_hwmon->line_cards[0].group;
+ mlxsw_hwmon->line_cards[0].group.attrs = mlxsw_hwmon->line_cards[0].attrs;
hwmon_dev = hwmon_device_register_with_groups(mlxsw_bus_info->dev,
- "mlxsw", mlxsw_hwmon,
- mlxsw_hwmon->groups);
+ "mlxsw",
+ &mlxsw_hwmon->line_cards[0],
+ mlxsw_hwmon->line_cards[0].groups);
if (IS_ERR(hwmon_dev)) {
err = PTR_ERR(hwmon_dev);
goto err_hwmon_register;
}
- mlxsw_hwmon->hwmon_dev = hwmon_dev;
+ err = mlxsw_linecards_event_ops_register(mlxsw_hwmon->core,
+ &mlxsw_hwmon_event_ops,
+ mlxsw_hwmon);
+ if (err)
+ goto err_linecards_event_ops_register;
+
+ mlxsw_hwmon->line_cards[0].hwmon_dev = hwmon_dev;
+ mlxsw_hwmon->line_cards[0].active = true;
*p_hwmon = mlxsw_hwmon;
return 0;
+err_linecards_event_ops_register:
+ hwmon_device_unregister(mlxsw_hwmon->line_cards[0].hwmon_dev);
err_hwmon_register:
err_temp_gearbox_init:
err_temp_module_init:
@@ -802,6 +932,9 @@ err_temp_init:
void mlxsw_hwmon_fini(struct mlxsw_hwmon *mlxsw_hwmon)
{
- hwmon_device_unregister(mlxsw_hwmon->hwmon_dev);
+ mlxsw_hwmon->line_cards[0].active = false;
+ mlxsw_linecards_event_ops_unregister(mlxsw_hwmon->core,
+ &mlxsw_hwmon_event_ops, mlxsw_hwmon);
+ hwmon_device_unregister(mlxsw_hwmon->line_cards[0].hwmon_dev);
kfree(mlxsw_hwmon);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
new file mode 100644
index 000000000000..2abd31a62776
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
@@ -0,0 +1,1373 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2022 NVIDIA Corporation and Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+#include <linux/gfp.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/vmalloc.h>
+
+#include "core.h"
+
+struct mlxsw_linecard_ini_file {
+ __le16 size;
+ union {
+ u8 data[0];
+ struct {
+ __be16 hw_revision;
+ __be16 ini_version;
+ u8 __dontcare[3];
+ u8 type;
+ u8 name[20];
+ } format;
+ };
+};
+
+struct mlxsw_linecard_types_info {
+ struct mlxsw_linecard_ini_file **ini_files;
+ unsigned int count;
+ size_t data_size;
+ char *data;
+};
+
+#define MLXSW_LINECARD_STATUS_EVENT_TO (10 * MSEC_PER_SEC)
+
+static void
+mlxsw_linecard_status_event_to_schedule(struct mlxsw_linecard *linecard,
+ enum mlxsw_linecard_status_event_type status_event_type)
+{
+ cancel_delayed_work_sync(&linecard->status_event_to_dw);
+ linecard->status_event_type_to = status_event_type;
+ mlxsw_core_schedule_dw(&linecard->status_event_to_dw,
+ msecs_to_jiffies(MLXSW_LINECARD_STATUS_EVENT_TO));
+}
+
+static void
+mlxsw_linecard_status_event_done(struct mlxsw_linecard *linecard,
+ enum mlxsw_linecard_status_event_type status_event_type)
+{
+ if (linecard->status_event_type_to == status_event_type)
+ cancel_delayed_work_sync(&linecard->status_event_to_dw);
+}
+
+static const char *
+mlxsw_linecard_types_lookup(struct mlxsw_linecards *linecards, u8 card_type)
+{
+ struct mlxsw_linecard_types_info *types_info;
+ struct mlxsw_linecard_ini_file *ini_file;
+ int i;
+
+ types_info = linecards->types_info;
+ if (!types_info)
+ return NULL;
+ for (i = 0; i < types_info->count; i++) {
+ ini_file = linecards->types_info->ini_files[i];
+ if (ini_file->format.type == card_type)
+ return ini_file->format.name;
+ }
+ return NULL;
+}
+
+static const char *mlxsw_linecard_type_name(struct mlxsw_linecard *linecard)
+{
+ struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core;
+ char mddq_pl[MLXSW_REG_MDDQ_LEN];
+ int err;
+
+ mlxsw_reg_mddq_slot_name_pack(mddq_pl, linecard->slot_index);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mddq), mddq_pl);
+ if (err)
+ return ERR_PTR(err);
+ mlxsw_reg_mddq_slot_name_unpack(mddq_pl, linecard->name);
+ return linecard->name;
+}
+
+struct mlxsw_linecard_device_info {
+ u16 fw_major;
+ u16 fw_minor;
+ u16 fw_sub_minor;
+};
+
+struct mlxsw_linecard_device {
+ struct list_head list;
+ u8 index;
+ struct mlxsw_linecard *linecard;
+ struct devlink_linecard_device *devlink_device;
+ struct mlxsw_linecard_device_info info;
+};
+
+static struct mlxsw_linecard_device *
+mlxsw_linecard_device_lookup(struct mlxsw_linecard *linecard, u8 index)
+{
+ struct mlxsw_linecard_device *device;
+
+ list_for_each_entry(device, &linecard->device_list, list)
+ if (device->index == index)
+ return device;
+ return NULL;
+}
+
+static int mlxsw_linecard_device_attach(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecard *linecard,
+ u8 device_index, bool flash_owner)
+{
+ struct mlxsw_linecard_device *device;
+ int err;
+
+ device = kzalloc(sizeof(*device), GFP_KERNEL);
+ if (!device)
+ return -ENOMEM;
+ device->index = device_index;
+ device->linecard = linecard;
+
+ device->devlink_device = devlink_linecard_device_create(linecard->devlink_linecard,
+ device_index, device);
+ if (IS_ERR(device->devlink_device)) {
+ err = PTR_ERR(device->devlink_device);
+ goto err_devlink_linecard_device_attach;
+ }
+
+ list_add_tail(&device->list, &linecard->device_list);
+ return 0;
+
+err_devlink_linecard_device_attach:
+ kfree(device);
+ return err;
+}
+
+static void mlxsw_linecard_device_detach(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecard *linecard,
+ struct mlxsw_linecard_device *device)
+{
+ list_del(&device->list);
+ devlink_linecard_device_destroy(linecard->devlink_linecard,
+ device->devlink_device);
+ kfree(device);
+}
+
+static void mlxsw_linecard_devices_detach(struct mlxsw_linecard *linecard)
+{
+ struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core;
+ struct mlxsw_linecard_device *device, *tmp;
+
+ list_for_each_entry_safe(device, tmp, &linecard->device_list, list)
+ mlxsw_linecard_device_detach(mlxsw_core, linecard, device);
+}
+
+static int mlxsw_linecard_devices_attach(struct mlxsw_linecard *linecard)
+{
+ struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core;
+ u8 msg_seq = 0;
+ int err;
+
+ do {
+ char mddq_pl[MLXSW_REG_MDDQ_LEN];
+ bool flash_owner;
+ bool data_valid;
+ u8 device_index;
+
+ mlxsw_reg_mddq_device_info_pack(mddq_pl, linecard->slot_index,
+ msg_seq);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mddq), mddq_pl);
+ if (err)
+ return err;
+ mlxsw_reg_mddq_device_info_unpack(mddq_pl, &msg_seq,
+ &data_valid, &flash_owner,
+ &device_index, NULL,
+ NULL, NULL);
+ if (!data_valid)
+ break;
+ err = mlxsw_linecard_device_attach(mlxsw_core, linecard,
+ device_index, flash_owner);
+ if (err)
+ goto rollback;
+ } while (msg_seq);
+
+ return 0;
+
+rollback:
+ mlxsw_linecard_devices_detach(linecard);
+ return err;
+}
+
+static void mlxsw_linecard_device_update(struct mlxsw_linecard *linecard,
+ u8 device_index,
+ struct mlxsw_linecard_device_info *info)
+{
+ struct mlxsw_linecard_device *device;
+
+ device = mlxsw_linecard_device_lookup(linecard, device_index);
+ if (!device)
+ return;
+ device->info = *info;
+}
+
+static int mlxsw_linecard_devices_update(struct mlxsw_linecard *linecard)
+{
+ struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core;
+ u8 msg_seq = 0;
+
+ do {
+ struct mlxsw_linecard_device_info info;
+ char mddq_pl[MLXSW_REG_MDDQ_LEN];
+ bool data_valid;
+ u8 device_index;
+ int err;
+
+ mlxsw_reg_mddq_device_info_pack(mddq_pl, linecard->slot_index,
+ msg_seq);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mddq), mddq_pl);
+ if (err)
+ return err;
+ mlxsw_reg_mddq_device_info_unpack(mddq_pl, &msg_seq,
+ &data_valid, NULL,
+ &device_index,
+ &info.fw_major,
+ &info.fw_minor,
+ &info.fw_sub_minor);
+ if (!data_valid)
+ break;
+ mlxsw_linecard_device_update(linecard, device_index, &info);
+ } while (msg_seq);
+
+ return 0;
+}
+
+static int
+mlxsw_linecard_device_info_get(struct devlink_linecard_device *devlink_linecard_device,
+ void *priv, struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_linecard_device *device = priv;
+ struct mlxsw_linecard_device_info *info;
+ struct mlxsw_linecard *linecard;
+ char buf[32];
+
+ linecard = device->linecard;
+ mutex_lock(&linecard->lock);
+ if (!linecard->active) {
+ mutex_unlock(&linecard->lock);
+ return 0;
+ }
+
+ info = &device->info;
+
+ sprintf(buf, "%u.%u.%u", info->fw_major, info->fw_minor,
+ info->fw_sub_minor);
+ mutex_unlock(&linecard->lock);
+
+ return devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ buf);
+}
+
+static void mlxsw_linecard_provision_fail(struct mlxsw_linecard *linecard)
+{
+ linecard->provisioned = false;
+ linecard->ready = false;
+ linecard->active = false;
+ mlxsw_linecard_devices_detach(linecard);
+ devlink_linecard_provision_fail(linecard->devlink_linecard);
+}
+
+struct mlxsw_linecards_event_ops_item {
+ struct list_head list;
+ const struct mlxsw_linecards_event_ops *event_ops;
+ void *priv;
+};
+
+static void
+mlxsw_linecard_event_op_call(struct mlxsw_linecard *linecard,
+ mlxsw_linecards_event_op_t *op, void *priv)
+{
+ struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core;
+
+ if (!op)
+ return;
+ op(mlxsw_core, linecard->slot_index, priv);
+}
+
+static void
+mlxsw_linecard_active_ops_call(struct mlxsw_linecard *linecard)
+{
+ struct mlxsw_linecards *linecards = linecard->linecards;
+ struct mlxsw_linecards_event_ops_item *item;
+
+ mutex_lock(&linecards->event_ops_list_lock);
+ list_for_each_entry(item, &linecards->event_ops_list, list)
+ mlxsw_linecard_event_op_call(linecard,
+ item->event_ops->got_active,
+ item->priv);
+ mutex_unlock(&linecards->event_ops_list_lock);
+}
+
+static void
+mlxsw_linecard_inactive_ops_call(struct mlxsw_linecard *linecard)
+{
+ struct mlxsw_linecards *linecards = linecard->linecards;
+ struct mlxsw_linecards_event_ops_item *item;
+
+ mutex_lock(&linecards->event_ops_list_lock);
+ list_for_each_entry(item, &linecards->event_ops_list, list)
+ mlxsw_linecard_event_op_call(linecard,
+ item->event_ops->got_inactive,
+ item->priv);
+ mutex_unlock(&linecards->event_ops_list_lock);
+}
+
+static void
+mlxsw_linecards_event_ops_register_call(struct mlxsw_linecards *linecards,
+ const struct mlxsw_linecards_event_ops_item *item)
+{
+ struct mlxsw_linecard *linecard;
+ int i;
+
+ for (i = 0; i < linecards->count; i++) {
+ linecard = mlxsw_linecard_get(linecards, i + 1);
+ mutex_lock(&linecard->lock);
+ if (linecard->active)
+ mlxsw_linecard_event_op_call(linecard,
+ item->event_ops->got_active,
+ item->priv);
+ mutex_unlock(&linecard->lock);
+ }
+}
+
+static void
+mlxsw_linecards_event_ops_unregister_call(struct mlxsw_linecards *linecards,
+ const struct mlxsw_linecards_event_ops_item *item)
+{
+ struct mlxsw_linecard *linecard;
+ int i;
+
+ for (i = 0; i < linecards->count; i++) {
+ linecard = mlxsw_linecard_get(linecards, i + 1);
+ mutex_lock(&linecard->lock);
+ if (linecard->active)
+ mlxsw_linecard_event_op_call(linecard,
+ item->event_ops->got_inactive,
+ item->priv);
+ mutex_unlock(&linecard->lock);
+ }
+}
+
+int mlxsw_linecards_event_ops_register(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards_event_ops *ops,
+ void *priv)
+{
+ struct mlxsw_linecards *linecards = mlxsw_core_linecards(mlxsw_core);
+ struct mlxsw_linecards_event_ops_item *item;
+
+ if (!linecards)
+ return 0;
+ item = kzalloc(sizeof(*item), GFP_KERNEL);
+ if (!item)
+ return -ENOMEM;
+ item->event_ops = ops;
+ item->priv = priv;
+
+ mutex_lock(&linecards->event_ops_list_lock);
+ list_add_tail(&item->list, &linecards->event_ops_list);
+ mutex_unlock(&linecards->event_ops_list_lock);
+ mlxsw_linecards_event_ops_register_call(linecards, item);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_linecards_event_ops_register);
+
+void mlxsw_linecards_event_ops_unregister(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards_event_ops *ops,
+ void *priv)
+{
+ struct mlxsw_linecards *linecards = mlxsw_core_linecards(mlxsw_core);
+ struct mlxsw_linecards_event_ops_item *item, *tmp;
+ bool found = false;
+
+ if (!linecards)
+ return;
+ mutex_lock(&linecards->event_ops_list_lock);
+ list_for_each_entry_safe(item, tmp, &linecards->event_ops_list, list) {
+ if (item->event_ops == ops && item->priv == priv) {
+ list_del(&item->list);
+ found = true;
+ break;
+ }
+ }
+ mutex_unlock(&linecards->event_ops_list_lock);
+
+ if (!found)
+ return;
+ mlxsw_linecards_event_ops_unregister_call(linecards, item);
+ kfree(item);
+}
+EXPORT_SYMBOL(mlxsw_linecards_event_ops_unregister);
+
+static int
+mlxsw_linecard_provision_set(struct mlxsw_linecard *linecard, u8 card_type,
+ u16 hw_revision, u16 ini_version)
+{
+ struct mlxsw_linecards *linecards = linecard->linecards;
+ const char *type;
+ int err;
+
+ type = mlxsw_linecard_types_lookup(linecards, card_type);
+ mlxsw_linecard_status_event_done(linecard,
+ MLXSW_LINECARD_STATUS_EVENT_TYPE_PROVISION);
+ if (!type) {
+ /* It is possible for a line card to be provisioned before
+ * driver initialization. Due to a missing INI bundle file
+ * or an outdated one, the queried card's type might not
+ * be recognized by the driver. In this case, try to query
+ * the card's name from the device.
+ */
+ type = mlxsw_linecard_type_name(linecard);
+ if (IS_ERR(type)) {
+ mlxsw_linecard_provision_fail(linecard);
+ return PTR_ERR(type);
+ }
+ }
+ err = mlxsw_linecard_devices_attach(linecard);
+ if (err) {
+ mlxsw_linecard_provision_fail(linecard);
+ return err;
+ }
+ linecard->provisioned = true;
+ linecard->hw_revision = hw_revision;
+ linecard->ini_version = ini_version;
+ devlink_linecard_provision_set(linecard->devlink_linecard, type);
+ return 0;
+}
+
+static void mlxsw_linecard_provision_clear(struct mlxsw_linecard *linecard)
+{
+ mlxsw_linecard_status_event_done(linecard,
+ MLXSW_LINECARD_STATUS_EVENT_TYPE_UNPROVISION);
+ linecard->provisioned = false;
+ mlxsw_linecard_devices_detach(linecard);
+ devlink_linecard_provision_clear(linecard->devlink_linecard);
+}
+
+static int mlxsw_linecard_ready_set(struct mlxsw_linecard *linecard)
+{
+ struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core;
+ char mddc_pl[MLXSW_REG_MDDC_LEN];
+ int err;
+
+ mlxsw_reg_mddc_pack(mddc_pl, linecard->slot_index, false, true);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddc), mddc_pl);
+ if (err)
+ return err;
+ linecard->ready = true;
+ return 0;
+}
+
+static int mlxsw_linecard_ready_clear(struct mlxsw_linecard *linecard)
+{
+ struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core;
+ char mddc_pl[MLXSW_REG_MDDC_LEN];
+ int err;
+
+ mlxsw_reg_mddc_pack(mddc_pl, linecard->slot_index, false, false);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddc), mddc_pl);
+ if (err)
+ return err;
+ linecard->ready = false;
+ return 0;
+}
+
+static int mlxsw_linecard_active_set(struct mlxsw_linecard *linecard)
+{
+ int err;
+
+ err = mlxsw_linecard_devices_update(linecard);
+ if (err)
+ return err;
+
+ mlxsw_linecard_active_ops_call(linecard);
+ linecard->active = true;
+ devlink_linecard_activate(linecard->devlink_linecard);
+ return 0;
+}
+
+static void mlxsw_linecard_active_clear(struct mlxsw_linecard *linecard)
+{
+ mlxsw_linecard_inactive_ops_call(linecard);
+ linecard->active = false;
+ devlink_linecard_deactivate(linecard->devlink_linecard);
+}
+
+static int mlxsw_linecard_status_process(struct mlxsw_linecards *linecards,
+ struct mlxsw_linecard *linecard,
+ const char *mddq_pl)
+{
+ enum mlxsw_reg_mddq_slot_info_ready ready;
+ bool provisioned, sr_valid, active;
+ u16 ini_version, hw_revision;
+ u8 slot_index, card_type;
+ int err = 0;
+
+ mlxsw_reg_mddq_slot_info_unpack(mddq_pl, &slot_index, &provisioned,
+ &sr_valid, &ready, &active,
+ &hw_revision, &ini_version,
+ &card_type);
+
+ if (linecard) {
+ if (WARN_ON(slot_index != linecard->slot_index))
+ return -EINVAL;
+ } else {
+ if (WARN_ON(slot_index > linecards->count))
+ return -EINVAL;
+ linecard = mlxsw_linecard_get(linecards, slot_index);
+ }
+
+ mutex_lock(&linecard->lock);
+
+ if (provisioned && linecard->provisioned != provisioned) {
+ err = mlxsw_linecard_provision_set(linecard, card_type,
+ hw_revision, ini_version);
+ if (err)
+ goto out;
+ }
+
+ if (ready == MLXSW_REG_MDDQ_SLOT_INFO_READY_READY && !linecard->ready) {
+ err = mlxsw_linecard_ready_set(linecard);
+ if (err)
+ goto out;
+ }
+
+ if (active && linecard->active != active) {
+ err = mlxsw_linecard_active_set(linecard);
+ if (err)
+ goto out;
+ }
+
+ if (!active && linecard->active != active)
+ mlxsw_linecard_active_clear(linecard);
+
+ if (ready != MLXSW_REG_MDDQ_SLOT_INFO_READY_READY &&
+ linecard->ready) {
+ err = mlxsw_linecard_ready_clear(linecard);
+ if (err)
+ goto out;
+ }
+
+ if (!provisioned && linecard->provisioned != provisioned)
+ mlxsw_linecard_provision_clear(linecard);
+
+out:
+ mutex_unlock(&linecard->lock);
+ return err;
+}
+
+static int mlxsw_linecard_status_get_and_process(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards *linecards,
+ struct mlxsw_linecard *linecard)
+{
+ char mddq_pl[MLXSW_REG_MDDQ_LEN];
+ int err;
+
+ mlxsw_reg_mddq_slot_info_pack(mddq_pl, linecard->slot_index, false);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mddq), mddq_pl);
+ if (err)
+ return err;
+
+ return mlxsw_linecard_status_process(linecards, linecard, mddq_pl);
+}
+
+static const char * const mlxsw_linecard_status_event_type_name[] = {
+ [MLXSW_LINECARD_STATUS_EVENT_TYPE_PROVISION] = "provision",
+ [MLXSW_LINECARD_STATUS_EVENT_TYPE_UNPROVISION] = "unprovision",
+};
+
+static void mlxsw_linecard_status_event_to_work(struct work_struct *work)
+{
+ struct mlxsw_linecard *linecard =
+ container_of(work, struct mlxsw_linecard,
+ status_event_to_dw.work);
+
+ mutex_lock(&linecard->lock);
+ dev_err(linecard->linecards->bus_info->dev, "linecard %u: Timeout reached waiting on %s status event",
+ linecard->slot_index,
+ mlxsw_linecard_status_event_type_name[linecard->status_event_type_to]);
+ mlxsw_linecard_provision_fail(linecard);
+ mutex_unlock(&linecard->lock);
+}
+
+static int __mlxsw_linecard_fix_fsm_state(struct mlxsw_linecard *linecard)
+{
+ dev_info(linecard->linecards->bus_info->dev, "linecard %u: Clearing FSM state error",
+ linecard->slot_index);
+ mlxsw_reg_mbct_pack(linecard->mbct_pl, linecard->slot_index,
+ MLXSW_REG_MBCT_OP_CLEAR_ERRORS, false);
+ return mlxsw_reg_write(linecard->linecards->mlxsw_core,
+ MLXSW_REG(mbct), linecard->mbct_pl);
+}
+
+static int mlxsw_linecard_fix_fsm_state(struct mlxsw_linecard *linecard,
+ enum mlxsw_reg_mbct_fsm_state fsm_state)
+{
+ if (fsm_state != MLXSW_REG_MBCT_FSM_STATE_ERROR)
+ return 0;
+ return __mlxsw_linecard_fix_fsm_state(linecard);
+}
+
+static int
+mlxsw_linecard_query_ini_status(struct mlxsw_linecard *linecard,
+ enum mlxsw_reg_mbct_status *status,
+ enum mlxsw_reg_mbct_fsm_state *fsm_state,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ mlxsw_reg_mbct_pack(linecard->mbct_pl, linecard->slot_index,
+ MLXSW_REG_MBCT_OP_QUERY_STATUS, false);
+ err = mlxsw_reg_query(linecard->linecards->mlxsw_core, MLXSW_REG(mbct),
+ linecard->mbct_pl);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to query linecard INI status");
+ return err;
+ }
+ mlxsw_reg_mbct_unpack(linecard->mbct_pl, NULL, status, fsm_state);
+ return err;
+}
+
+static int
+mlxsw_linecard_ini_transfer(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecard *linecard,
+ const struct mlxsw_linecard_ini_file *ini_file,
+ struct netlink_ext_ack *extack)
+{
+ enum mlxsw_reg_mbct_fsm_state fsm_state;
+ enum mlxsw_reg_mbct_status status;
+ size_t size_left;
+ const u8 *data;
+ int err;
+
+ size_left = le16_to_cpu(ini_file->size);
+ data = ini_file->data;
+ while (size_left) {
+ size_t data_size = MLXSW_REG_MBCT_DATA_LEN;
+ bool is_last = false;
+
+ if (size_left <= MLXSW_REG_MBCT_DATA_LEN) {
+ data_size = size_left;
+ is_last = true;
+ }
+
+ mlxsw_reg_mbct_pack(linecard->mbct_pl, linecard->slot_index,
+ MLXSW_REG_MBCT_OP_DATA_TRANSFER, false);
+ mlxsw_reg_mbct_dt_pack(linecard->mbct_pl, data_size,
+ is_last, data);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(mbct),
+ linecard->mbct_pl);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to issue linecard INI data transfer");
+ return err;
+ }
+ mlxsw_reg_mbct_unpack(linecard->mbct_pl, NULL,
+ &status, &fsm_state);
+ if ((!is_last && status != MLXSW_REG_MBCT_STATUS_PART_DATA) ||
+ (is_last && status != MLXSW_REG_MBCT_STATUS_LAST_DATA)) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to transfer linecard INI data");
+ mlxsw_linecard_fix_fsm_state(linecard, fsm_state);
+ return -EINVAL;
+ }
+ size_left -= data_size;
+ data += data_size;
+ }
+
+ return 0;
+}
+
+static int
+mlxsw_linecard_ini_erase(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecard *linecard,
+ struct netlink_ext_ack *extack)
+{
+ enum mlxsw_reg_mbct_fsm_state fsm_state;
+ enum mlxsw_reg_mbct_status status;
+ int err;
+
+ mlxsw_reg_mbct_pack(linecard->mbct_pl, linecard->slot_index,
+ MLXSW_REG_MBCT_OP_ERASE_INI_IMAGE, false);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(mbct),
+ linecard->mbct_pl);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to issue linecard INI erase");
+ return err;
+ }
+ mlxsw_reg_mbct_unpack(linecard->mbct_pl, NULL, &status, &fsm_state);
+ switch (status) {
+ case MLXSW_REG_MBCT_STATUS_ERASE_COMPLETE:
+ break;
+ default:
+ /* Should not happen */
+ fallthrough;
+ case MLXSW_REG_MBCT_STATUS_ERASE_FAILED:
+ NL_SET_ERR_MSG_MOD(extack, "Failed to erase linecard INI");
+ goto fix_fsm_err_out;
+ case MLXSW_REG_MBCT_STATUS_ERROR_INI_IN_USE:
+ NL_SET_ERR_MSG_MOD(extack, "Failed to erase linecard INI while being used");
+ goto fix_fsm_err_out;
+ }
+ return 0;
+
+fix_fsm_err_out:
+ mlxsw_linecard_fix_fsm_state(linecard, fsm_state);
+ return -EINVAL;
+}
+
+static void mlxsw_linecard_bct_process(struct mlxsw_core *mlxsw_core,
+ const char *mbct_pl)
+{
+ struct mlxsw_linecards *linecards = mlxsw_core_linecards(mlxsw_core);
+ enum mlxsw_reg_mbct_fsm_state fsm_state;
+ enum mlxsw_reg_mbct_status status;
+ struct mlxsw_linecard *linecard;
+ u8 slot_index;
+
+ mlxsw_reg_mbct_unpack(mbct_pl, &slot_index, &status, &fsm_state);
+ if (WARN_ON(slot_index > linecards->count))
+ return;
+ linecard = mlxsw_linecard_get(linecards, slot_index);
+ mutex_lock(&linecard->lock);
+ if (status == MLXSW_REG_MBCT_STATUS_ACTIVATION_FAILED) {
+ dev_err(linecards->bus_info->dev, "linecard %u: Failed to activate INI",
+ linecard->slot_index);
+ goto fix_fsm_out;
+ }
+ mutex_unlock(&linecard->lock);
+ return;
+
+fix_fsm_out:
+ mlxsw_linecard_fix_fsm_state(linecard, fsm_state);
+ mlxsw_linecard_provision_fail(linecard);
+ mutex_unlock(&linecard->lock);
+}
+
+static int
+mlxsw_linecard_ini_activate(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecard *linecard,
+ struct netlink_ext_ack *extack)
+{
+ enum mlxsw_reg_mbct_fsm_state fsm_state;
+ enum mlxsw_reg_mbct_status status;
+ int err;
+
+ mlxsw_reg_mbct_pack(linecard->mbct_pl, linecard->slot_index,
+ MLXSW_REG_MBCT_OP_ACTIVATE, true);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(mbct), linecard->mbct_pl);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to issue linecard INI activation");
+ return err;
+ }
+ mlxsw_reg_mbct_unpack(linecard->mbct_pl, NULL, &status, &fsm_state);
+ if (status == MLXSW_REG_MBCT_STATUS_ACTIVATION_FAILED) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to activate linecard INI");
+ goto fix_fsm_err_out;
+ }
+
+ return 0;
+
+fix_fsm_err_out:
+ mlxsw_linecard_fix_fsm_state(linecard, fsm_state);
+ return -EINVAL;
+}
+
+#define MLXSW_LINECARD_INI_WAIT_RETRIES 10
+#define MLXSW_LINECARD_INI_WAIT_MS 500
+
+static int
+mlxsw_linecard_ini_in_use_wait(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecard *linecard,
+ struct netlink_ext_ack *extack)
+{
+ enum mlxsw_reg_mbct_fsm_state fsm_state;
+ enum mlxsw_reg_mbct_status status;
+ unsigned int ini_wait_retries = 0;
+ int err;
+
+query_ini_status:
+ err = mlxsw_linecard_query_ini_status(linecard, &status,
+ &fsm_state, extack);
+ if (err)
+ return err;
+
+ switch (fsm_state) {
+ case MLXSW_REG_MBCT_FSM_STATE_INI_IN_USE:
+ if (ini_wait_retries++ > MLXSW_LINECARD_INI_WAIT_RETRIES) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to wait for linecard INI to be unused");
+ return -EINVAL;
+ }
+ mdelay(MLXSW_LINECARD_INI_WAIT_MS);
+ goto query_ini_status;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static bool mlxsw_linecard_port_selector(void *priv, u16 local_port)
+{
+ struct mlxsw_linecard *linecard = priv;
+ struct mlxsw_core *mlxsw_core;
+
+ mlxsw_core = linecard->linecards->mlxsw_core;
+ return linecard == mlxsw_core_port_linecard_get(mlxsw_core, local_port);
+}
+
+static int mlxsw_linecard_provision(struct devlink_linecard *devlink_linecard,
+ void *priv, const char *type,
+ const void *type_priv,
+ struct netlink_ext_ack *extack)
+{
+ const struct mlxsw_linecard_ini_file *ini_file = type_priv;
+ struct mlxsw_linecard *linecard = priv;
+ struct mlxsw_core *mlxsw_core;
+ int err;
+
+ mutex_lock(&linecard->lock);
+
+ mlxsw_core = linecard->linecards->mlxsw_core;
+
+ err = mlxsw_linecard_ini_erase(mlxsw_core, linecard, extack);
+ if (err)
+ goto err_out;
+
+ err = mlxsw_linecard_ini_transfer(mlxsw_core, linecard,
+ ini_file, extack);
+ if (err)
+ goto err_out;
+
+ mlxsw_linecard_status_event_to_schedule(linecard,
+ MLXSW_LINECARD_STATUS_EVENT_TYPE_PROVISION);
+ err = mlxsw_linecard_ini_activate(mlxsw_core, linecard, extack);
+ if (err)
+ goto err_out;
+
+ goto out;
+
+err_out:
+ mlxsw_linecard_provision_fail(linecard);
+out:
+ mutex_unlock(&linecard->lock);
+ return err;
+}
+
+static int mlxsw_linecard_unprovision(struct devlink_linecard *devlink_linecard,
+ void *priv,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_linecard *linecard = priv;
+ struct mlxsw_core *mlxsw_core;
+ int err;
+
+ mutex_lock(&linecard->lock);
+
+ mlxsw_core = linecard->linecards->mlxsw_core;
+
+ mlxsw_core_ports_remove_selected(mlxsw_core,
+ mlxsw_linecard_port_selector,
+ linecard);
+
+ err = mlxsw_linecard_ini_in_use_wait(mlxsw_core, linecard, extack);
+ if (err)
+ goto err_out;
+
+ mlxsw_linecard_status_event_to_schedule(linecard,
+ MLXSW_LINECARD_STATUS_EVENT_TYPE_UNPROVISION);
+ err = mlxsw_linecard_ini_erase(mlxsw_core, linecard, extack);
+ if (err)
+ goto err_out;
+
+ goto out;
+
+err_out:
+ mlxsw_linecard_provision_fail(linecard);
+out:
+ mutex_unlock(&linecard->lock);
+ return err;
+}
+
+static bool mlxsw_linecard_same_provision(struct devlink_linecard *devlink_linecard,
+ void *priv, const char *type,
+ const void *type_priv)
+{
+ const struct mlxsw_linecard_ini_file *ini_file = type_priv;
+ struct mlxsw_linecard *linecard = priv;
+ bool ret;
+
+ mutex_lock(&linecard->lock);
+ ret = linecard->hw_revision == be16_to_cpu(ini_file->format.hw_revision) &&
+ linecard->ini_version == be16_to_cpu(ini_file->format.ini_version);
+ mutex_unlock(&linecard->lock);
+ return ret;
+}
+
+static unsigned int
+mlxsw_linecard_types_count(struct devlink_linecard *devlink_linecard,
+ void *priv)
+{
+ struct mlxsw_linecard *linecard = priv;
+
+ return linecard->linecards->types_info ?
+ linecard->linecards->types_info->count : 0;
+}
+
+static void mlxsw_linecard_types_get(struct devlink_linecard *devlink_linecard,
+ void *priv, unsigned int index,
+ const char **type, const void **type_priv)
+{
+ struct mlxsw_linecard_types_info *types_info;
+ struct mlxsw_linecard_ini_file *ini_file;
+ struct mlxsw_linecard *linecard = priv;
+
+ types_info = linecard->linecards->types_info;
+ if (WARN_ON_ONCE(!types_info))
+ return;
+ ini_file = types_info->ini_files[index];
+ *type = ini_file->format.name;
+ *type_priv = ini_file;
+}
+
+static int
+mlxsw_linecard_info_get(struct devlink_linecard *devlink_linecard, void *priv,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_linecard *linecard = priv;
+ char buf[32];
+ int err;
+
+ mutex_lock(&linecard->lock);
+ if (!linecard->provisioned) {
+ err = 0;
+ goto unlock;
+ }
+
+ sprintf(buf, "%d", linecard->hw_revision);
+ err = devlink_info_version_fixed_put(req, "hw.revision", buf);
+ if (err)
+ goto unlock;
+
+ sprintf(buf, "%d", linecard->ini_version);
+ err = devlink_info_version_running_put(req, "ini.version", buf);
+ if (err)
+ goto unlock;
+
+unlock:
+ mutex_unlock(&linecard->lock);
+ return err;
+}
+
+static const struct devlink_linecard_ops mlxsw_linecard_ops = {
+ .provision = mlxsw_linecard_provision,
+ .unprovision = mlxsw_linecard_unprovision,
+ .same_provision = mlxsw_linecard_same_provision,
+ .types_count = mlxsw_linecard_types_count,
+ .types_get = mlxsw_linecard_types_get,
+ .info_get = mlxsw_linecard_info_get,
+ .device_info_get = mlxsw_linecard_device_info_get,
+};
+
+struct mlxsw_linecard_status_event {
+ struct mlxsw_core *mlxsw_core;
+ char mddq_pl[MLXSW_REG_MDDQ_LEN];
+ struct work_struct work;
+};
+
+static void mlxsw_linecard_status_event_work(struct work_struct *work)
+{
+ struct mlxsw_linecard_status_event *event;
+ struct mlxsw_linecards *linecards;
+ struct mlxsw_core *mlxsw_core;
+
+ event = container_of(work, struct mlxsw_linecard_status_event, work);
+ mlxsw_core = event->mlxsw_core;
+ linecards = mlxsw_core_linecards(mlxsw_core);
+ mlxsw_linecard_status_process(linecards, NULL, event->mddq_pl);
+ kfree(event);
+}
+
+static void
+mlxsw_linecard_status_listener_func(const struct mlxsw_reg_info *reg,
+ char *mddq_pl, void *priv)
+{
+ struct mlxsw_linecard_status_event *event;
+ struct mlxsw_core *mlxsw_core = priv;
+
+ event = kmalloc(sizeof(*event), GFP_ATOMIC);
+ if (!event)
+ return;
+ event->mlxsw_core = mlxsw_core;
+ memcpy(event->mddq_pl, mddq_pl, sizeof(event->mddq_pl));
+ INIT_WORK(&event->work, mlxsw_linecard_status_event_work);
+ mlxsw_core_schedule_work(&event->work);
+}
+
+struct mlxsw_linecard_bct_event {
+ struct mlxsw_core *mlxsw_core;
+ char mbct_pl[MLXSW_REG_MBCT_LEN];
+ struct work_struct work;
+};
+
+static void mlxsw_linecard_bct_event_work(struct work_struct *work)
+{
+ struct mlxsw_linecard_bct_event *event;
+ struct mlxsw_core *mlxsw_core;
+
+ event = container_of(work, struct mlxsw_linecard_bct_event, work);
+ mlxsw_core = event->mlxsw_core;
+ mlxsw_linecard_bct_process(mlxsw_core, event->mbct_pl);
+ kfree(event);
+}
+
+static void
+mlxsw_linecard_bct_listener_func(const struct mlxsw_reg_info *reg,
+ char *mbct_pl, void *priv)
+{
+ struct mlxsw_linecard_bct_event *event;
+ struct mlxsw_core *mlxsw_core = priv;
+
+ event = kmalloc(sizeof(*event), GFP_ATOMIC);
+ if (!event)
+ return;
+ event->mlxsw_core = mlxsw_core;
+ memcpy(event->mbct_pl, mbct_pl, sizeof(event->mbct_pl));
+ INIT_WORK(&event->work, mlxsw_linecard_bct_event_work);
+ mlxsw_core_schedule_work(&event->work);
+}
+
+static const struct mlxsw_listener mlxsw_linecard_listener[] = {
+ MLXSW_CORE_EVENTL(mlxsw_linecard_status_listener_func, DSDSC),
+ MLXSW_CORE_EVENTL(mlxsw_linecard_bct_listener_func, BCTOE),
+};
+
+static int mlxsw_linecard_event_delivery_set(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecard *linecard,
+ bool enable)
+{
+ char mddq_pl[MLXSW_REG_MDDQ_LEN];
+
+ mlxsw_reg_mddq_slot_info_pack(mddq_pl, linecard->slot_index, enable);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddq), mddq_pl);
+}
+
+static int mlxsw_linecard_init(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards *linecards,
+ u8 slot_index)
+{
+ struct devlink_linecard *devlink_linecard;
+ struct mlxsw_linecard *linecard;
+ int err;
+
+ linecard = mlxsw_linecard_get(linecards, slot_index);
+ linecard->slot_index = slot_index;
+ linecard->linecards = linecards;
+ mutex_init(&linecard->lock);
+ INIT_LIST_HEAD(&linecard->device_list);
+
+ devlink_linecard = devlink_linecard_create(priv_to_devlink(mlxsw_core),
+ slot_index, &mlxsw_linecard_ops,
+ linecard);
+ if (IS_ERR(devlink_linecard)) {
+ err = PTR_ERR(devlink_linecard);
+ goto err_devlink_linecard_create;
+ }
+ linecard->devlink_linecard = devlink_linecard;
+ INIT_DELAYED_WORK(&linecard->status_event_to_dw,
+ &mlxsw_linecard_status_event_to_work);
+
+ err = mlxsw_linecard_event_delivery_set(mlxsw_core, linecard, true);
+ if (err)
+ goto err_event_delivery_set;
+
+ err = mlxsw_linecard_status_get_and_process(mlxsw_core, linecards,
+ linecard);
+ if (err)
+ goto err_status_get_and_process;
+
+ return 0;
+
+err_status_get_and_process:
+ mlxsw_linecard_event_delivery_set(mlxsw_core, linecard, false);
+err_event_delivery_set:
+ devlink_linecard_destroy(linecard->devlink_linecard);
+err_devlink_linecard_create:
+ mutex_destroy(&linecard->lock);
+ return err;
+}
+
+static void mlxsw_linecard_fini(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards *linecards,
+ u8 slot_index)
+{
+ struct mlxsw_linecard *linecard;
+
+ linecard = mlxsw_linecard_get(linecards, slot_index);
+ mlxsw_linecard_event_delivery_set(mlxsw_core, linecard, false);
+ cancel_delayed_work_sync(&linecard->status_event_to_dw);
+ /* Make sure all scheduled events are processed */
+ mlxsw_core_flush_owq();
+ if (linecard->active)
+ mlxsw_linecard_active_clear(linecard);
+ mlxsw_linecard_devices_detach(linecard);
+ devlink_linecard_destroy(linecard->devlink_linecard);
+ mutex_destroy(&linecard->lock);
+}
+
+/* LINECARDS INI BUNDLE FILE
+ * +----------------------------------+
+ * | MAGIC ("NVLCINI+") |
+ * +----------------------------------+ +--------------------+
+ * | INI 0 +---> | __le16 size |
+ * +----------------------------------+ | __be16 hw_revision |
+ * | INI 1 | | __be16 ini_version |
+ * +----------------------------------+ | u8 __dontcare[3] |
+ * | ... | | u8 type |
+ * +----------------------------------+ | u8 name[20] |
+ * | INI N | | ... |
+ * +----------------------------------+ +--------------------+
+ */
+
+#define MLXSW_LINECARDS_INI_BUNDLE_MAGIC "NVLCINI+"
+
+static int
+mlxsw_linecard_types_file_validate(struct mlxsw_linecards *linecards,
+ struct mlxsw_linecard_types_info *types_info)
+{
+ size_t magic_size = strlen(MLXSW_LINECARDS_INI_BUNDLE_MAGIC);
+ struct mlxsw_linecard_ini_file *ini_file;
+ size_t size = types_info->data_size;
+ const u8 *data = types_info->data;
+ unsigned int count = 0;
+ u16 ini_file_size;
+
+ if (size < magic_size) {
+ dev_warn(linecards->bus_info->dev, "Invalid linecards INIs file size, smaller than magic size\n");
+ return -EINVAL;
+ }
+ if (memcmp(data, MLXSW_LINECARDS_INI_BUNDLE_MAGIC, magic_size)) {
+ dev_warn(linecards->bus_info->dev, "Invalid linecards INIs file magic pattern\n");
+ return -EINVAL;
+ }
+
+ data += magic_size;
+ size -= magic_size;
+
+ while (size > 0) {
+ if (size < sizeof(*ini_file)) {
+ dev_warn(linecards->bus_info->dev, "Linecards INIs file contains INI which is smaller than bare minimum\n");
+ return -EINVAL;
+ }
+ ini_file = (struct mlxsw_linecard_ini_file *) data;
+ ini_file_size = le16_to_cpu(ini_file->size);
+ if (ini_file_size + sizeof(__le16) > size) {
+ dev_warn(linecards->bus_info->dev, "Linecards INIs file appears to be truncated\n");
+ return -EINVAL;
+ }
+ if (ini_file_size % 4) {
+ dev_warn(linecards->bus_info->dev, "Linecards INIs file contains INI with invalid size\n");
+ return -EINVAL;
+ }
+ data += ini_file_size + sizeof(__le16);
+ size -= ini_file_size + sizeof(__le16);
+ count++;
+ }
+ if (!count) {
+ dev_warn(linecards->bus_info->dev, "Linecards INIs file does not contain any INI\n");
+ return -EINVAL;
+ }
+ types_info->count = count;
+ return 0;
+}
+
+static void
+mlxsw_linecard_types_file_parse(struct mlxsw_linecard_types_info *types_info)
+{
+ size_t magic_size = strlen(MLXSW_LINECARDS_INI_BUNDLE_MAGIC);
+ size_t size = types_info->data_size - magic_size;
+ const u8 *data = types_info->data + magic_size;
+ struct mlxsw_linecard_ini_file *ini_file;
+ unsigned int count = 0;
+ u16 ini_file_size;
+ int i;
+
+ while (size) {
+ ini_file = (struct mlxsw_linecard_ini_file *) data;
+ ini_file_size = le16_to_cpu(ini_file->size);
+ for (i = 0; i < ini_file_size / 4; i++) {
+ u32 *val = &((u32 *) ini_file->data)[i];
+
+ *val = swab32(*val);
+ }
+ types_info->ini_files[count] = ini_file;
+ data += ini_file_size + sizeof(__le16);
+ size -= ini_file_size + sizeof(__le16);
+ count++;
+ }
+}
+
+#define MLXSW_LINECARDS_INI_BUNDLE_FILENAME_FMT \
+ "mellanox/lc_ini_bundle_%u_%u.bin"
+#define MLXSW_LINECARDS_INI_BUNDLE_FILENAME_LEN \
+ (sizeof(MLXSW_LINECARDS_INI_BUNDLE_FILENAME_FMT) + 4)
+
+static int mlxsw_linecard_types_init(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards *linecards)
+{
+ const struct mlxsw_fw_rev *rev = &linecards->bus_info->fw_rev;
+ char filename[MLXSW_LINECARDS_INI_BUNDLE_FILENAME_LEN];
+ struct mlxsw_linecard_types_info *types_info;
+ const struct firmware *firmware;
+ int err;
+
+ err = snprintf(filename, sizeof(filename),
+ MLXSW_LINECARDS_INI_BUNDLE_FILENAME_FMT,
+ rev->minor, rev->subminor);
+ WARN_ON(err >= sizeof(filename));
+
+ err = request_firmware_direct(&firmware, filename,
+ linecards->bus_info->dev);
+ if (err) {
+ dev_warn(linecards->bus_info->dev, "Could not request linecards INI file \"%s\", provisioning will not be possible\n",
+ filename);
+ return 0;
+ }
+
+ types_info = kzalloc(sizeof(*types_info), GFP_KERNEL);
+ if (!types_info) {
+ release_firmware(firmware);
+ return -ENOMEM;
+ }
+ linecards->types_info = types_info;
+
+ types_info->data_size = firmware->size;
+ types_info->data = vmalloc(types_info->data_size);
+ if (!types_info->data) {
+ err = -ENOMEM;
+ release_firmware(firmware);
+ goto err_data_alloc;
+ }
+ memcpy(types_info->data, firmware->data, types_info->data_size);
+ release_firmware(firmware);
+
+ err = mlxsw_linecard_types_file_validate(linecards, types_info);
+ if (err) {
+ err = 0;
+ goto err_type_file_file_validate;
+ }
+
+ types_info->ini_files = kmalloc_array(types_info->count,
+ sizeof(struct mlxsw_linecard_ini_file *),
+ GFP_KERNEL);
+ if (!types_info->ini_files) {
+ err = -ENOMEM;
+ goto err_ini_files_alloc;
+ }
+
+ mlxsw_linecard_types_file_parse(types_info);
+
+ return 0;
+
+err_ini_files_alloc:
+err_type_file_file_validate:
+ vfree(types_info->data);
+err_data_alloc:
+ kfree(types_info);
+ return err;
+}
+
+static void mlxsw_linecard_types_fini(struct mlxsw_linecards *linecards)
+{
+ struct mlxsw_linecard_types_info *types_info = linecards->types_info;
+
+ if (!types_info)
+ return;
+ kfree(types_info->ini_files);
+ vfree(types_info->data);
+ kfree(types_info);
+}
+
+int mlxsw_linecards_init(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *bus_info)
+{
+ char mgpir_pl[MLXSW_REG_MGPIR_LEN];
+ struct mlxsw_linecards *linecards;
+ u8 slot_count;
+ int err;
+ int i;
+
+ mlxsw_reg_mgpir_pack(mgpir_pl, 0);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgpir), mgpir_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL,
+ NULL, &slot_count);
+ if (!slot_count)
+ return 0;
+
+ linecards = vzalloc(struct_size(linecards, linecards, slot_count));
+ if (!linecards)
+ return -ENOMEM;
+ linecards->count = slot_count;
+ linecards->mlxsw_core = mlxsw_core;
+ linecards->bus_info = bus_info;
+ INIT_LIST_HEAD(&linecards->event_ops_list);
+ mutex_init(&linecards->event_ops_list_lock);
+
+ err = mlxsw_linecard_types_init(mlxsw_core, linecards);
+ if (err)
+ goto err_types_init;
+
+ err = mlxsw_core_traps_register(mlxsw_core, mlxsw_linecard_listener,
+ ARRAY_SIZE(mlxsw_linecard_listener),
+ mlxsw_core);
+ if (err)
+ goto err_traps_register;
+
+ mlxsw_core_linecards_set(mlxsw_core, linecards);
+
+ for (i = 0; i < linecards->count; i++) {
+ err = mlxsw_linecard_init(mlxsw_core, linecards, i + 1);
+ if (err)
+ goto err_linecard_init;
+ }
+
+ return 0;
+
+err_linecard_init:
+ for (i--; i >= 0; i--)
+ mlxsw_linecard_fini(mlxsw_core, linecards, i + 1);
+ mlxsw_core_traps_unregister(mlxsw_core, mlxsw_linecard_listener,
+ ARRAY_SIZE(mlxsw_linecard_listener),
+ mlxsw_core);
+err_traps_register:
+ mlxsw_linecard_types_fini(linecards);
+err_types_init:
+ vfree(linecards);
+ return err;
+}
+
+void mlxsw_linecards_fini(struct mlxsw_core *mlxsw_core)
+{
+ struct mlxsw_linecards *linecards = mlxsw_core_linecards(mlxsw_core);
+ int i;
+
+ if (!linecards)
+ return;
+ for (i = 0; i < linecards->count; i++)
+ mlxsw_linecard_fini(mlxsw_core, linecards, i + 1);
+ mlxsw_core_traps_unregister(mlxsw_core, mlxsw_linecard_listener,
+ ARRAY_SIZE(mlxsw_linecard_listener),
+ mlxsw_core);
+ mlxsw_linecard_types_fini(linecards);
+ mutex_destroy(&linecards->event_ops_list_lock);
+ WARN_ON(!list_empty(&linecards->event_ops_list));
+ vfree(linecards);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index 05f54bd982c0..3548fe1df7c8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -21,7 +21,6 @@
#define MLXSW_THERMAL_ASIC_TEMP_HOT 105000 /* 105C */
#define MLXSW_THERMAL_HYSTERESIS_TEMP 5000 /* 5C */
#define MLXSW_THERMAL_MODULE_TEMP_SHIFT (MLXSW_THERMAL_HYSTERESIS_TEMP * 2)
-#define MLXSW_THERMAL_ZONE_MAX_NAME 16
#define MLXSW_THERMAL_TEMP_SCORE_MAX GENMASK(31, 0)
#define MLXSW_THERMAL_MAX_STATE 10
#define MLXSW_THERMAL_MIN_STATE 2
@@ -82,6 +81,16 @@ struct mlxsw_thermal_module {
struct thermal_zone_device *tzdev;
struct mlxsw_thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS];
int module; /* Module or gearbox number */
+ u8 slot_index;
+};
+
+struct mlxsw_thermal_area {
+ struct mlxsw_thermal_module *tz_module_arr;
+ u8 tz_module_num;
+ struct mlxsw_thermal_module *tz_gearbox_arr;
+ u8 tz_gearbox_num;
+ u8 slot_index;
+ bool active;
};
struct mlxsw_thermal {
@@ -92,12 +101,9 @@ struct mlxsw_thermal {
struct thermal_cooling_device *cdevs[MLXSW_MFCR_PWMS_MAX];
u8 cooling_levels[MLXSW_THERMAL_MAX_STATE + 1];
struct mlxsw_thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS];
- struct mlxsw_thermal_module *tz_module_arr;
- u8 tz_module_num;
- struct mlxsw_thermal_module *tz_gearbox_arr;
- u8 tz_gearbox_num;
unsigned int tz_highest_score;
struct thermal_zone_device *tz_highest_dev;
+ struct mlxsw_thermal_area line_cards[];
};
static inline u8 mlxsw_state_to_duty(int state)
@@ -123,8 +129,7 @@ static int mlxsw_get_cooling_device_idx(struct mlxsw_thermal *thermal,
/* Allow mlxsw thermal zone binding to an external cooling device */
for (i = 0; i < ARRAY_SIZE(mlxsw_thermal_external_allowed_cdev); i++) {
- if (strnstr(cdev->type, mlxsw_thermal_external_allowed_cdev[i],
- strlen(cdev->type)))
+ if (!strcmp(cdev->type, mlxsw_thermal_external_allowed_cdev[i]))
return 0;
}
@@ -150,13 +155,15 @@ mlxsw_thermal_module_trips_update(struct device *dev, struct mlxsw_core *core,
* EEPROM if we got valid thresholds from MTMP.
*/
if (!emerg_temp || !crit_temp) {
- err = mlxsw_env_module_temp_thresholds_get(core, tz->module,
+ err = mlxsw_env_module_temp_thresholds_get(core, tz->slot_index,
+ tz->module,
SFP_TEMP_HIGH_WARN,
&crit_temp);
if (err)
return err;
- err = mlxsw_env_module_temp_thresholds_get(core, tz->module,
+ err = mlxsw_env_module_temp_thresholds_get(core, tz->slot_index,
+ tz->module,
SFP_TEMP_HIGH_ALARM,
&emerg_temp);
if (err)
@@ -271,7 +278,7 @@ static int mlxsw_thermal_get_temp(struct thermal_zone_device *tzdev,
int temp;
int err;
- mlxsw_reg_mtmp_pack(mtmp_pl, 0, false, false);
+ mlxsw_reg_mtmp_pack(mtmp_pl, 0, 0, false, false);
err = mlxsw_reg_query(thermal->core, MLXSW_REG(mtmp), mtmp_pl);
if (err) {
@@ -423,15 +430,16 @@ static int mlxsw_thermal_module_unbind(struct thermal_zone_device *tzdev,
static void
mlxsw_thermal_module_temp_and_thresholds_get(struct mlxsw_core *core,
- u16 sensor_index, int *p_temp,
- int *p_crit_temp,
+ u8 slot_index, u16 sensor_index,
+ int *p_temp, int *p_crit_temp,
int *p_emerg_temp)
{
char mtmp_pl[MLXSW_REG_MTMP_LEN];
int err;
/* Read module temperature and thresholds. */
- mlxsw_reg_mtmp_pack(mtmp_pl, sensor_index, false, false);
+ mlxsw_reg_mtmp_pack(mtmp_pl, slot_index, sensor_index,
+ false, false);
err = mlxsw_reg_query(core, MLXSW_REG(mtmp), mtmp_pl);
if (err) {
/* Set temperature and thresholds to zero to avoid passing
@@ -462,6 +470,7 @@ static int mlxsw_thermal_module_temp_get(struct thermal_zone_device *tzdev,
/* Read module temperature and thresholds. */
mlxsw_thermal_module_temp_and_thresholds_get(thermal->core,
+ tz->slot_index,
sensor_index, &temp,
&crit_temp, &emerg_temp);
*p_temp = temp;
@@ -576,7 +585,7 @@ static int mlxsw_thermal_gearbox_temp_get(struct thermal_zone_device *tzdev,
int err;
index = MLXSW_REG_MTMP_GBOX_INDEX_MIN + tz->module;
- mlxsw_reg_mtmp_pack(mtmp_pl, index, false, false);
+ mlxsw_reg_mtmp_pack(mtmp_pl, tz->slot_index, index, false, false);
err = mlxsw_reg_query(thermal->core, MLXSW_REG(mtmp), mtmp_pl);
if (err)
@@ -672,11 +681,15 @@ static const struct thermal_cooling_device_ops mlxsw_cooling_ops = {
static int
mlxsw_thermal_module_tz_init(struct mlxsw_thermal_module *module_tz)
{
- char tz_name[MLXSW_THERMAL_ZONE_MAX_NAME];
+ char tz_name[THERMAL_NAME_LENGTH];
int err;
- snprintf(tz_name, sizeof(tz_name), "mlxsw-module%d",
- module_tz->module + 1);
+ if (module_tz->slot_index)
+ snprintf(tz_name, sizeof(tz_name), "mlxsw-lc%d-module%d",
+ module_tz->slot_index, module_tz->module + 1);
+ else
+ snprintf(tz_name, sizeof(tz_name), "mlxsw-module%d",
+ module_tz->module + 1);
module_tz->tzdev = thermal_zone_device_register(tz_name,
MLXSW_THERMAL_NUM_TRIPS,
MLXSW_THERMAL_TRIP_MASK,
@@ -704,25 +717,28 @@ static void mlxsw_thermal_module_tz_fini(struct thermal_zone_device *tzdev)
static int
mlxsw_thermal_module_init(struct device *dev, struct mlxsw_core *core,
- struct mlxsw_thermal *thermal, u8 module)
+ struct mlxsw_thermal *thermal,
+ struct mlxsw_thermal_area *area, u8 module)
{
struct mlxsw_thermal_module *module_tz;
int dummy_temp, crit_temp, emerg_temp;
u16 sensor_index;
sensor_index = MLXSW_REG_MTMP_MODULE_INDEX_MIN + module;
- module_tz = &thermal->tz_module_arr[module];
+ module_tz = &area->tz_module_arr[module];
/* Skip if parent is already set (case of port split). */
if (module_tz->parent)
return 0;
module_tz->module = module;
+ module_tz->slot_index = area->slot_index;
module_tz->parent = thermal;
memcpy(module_tz->trips, default_thermal_trips,
sizeof(thermal->trips));
/* Initialize all trip point. */
mlxsw_thermal_module_trips_reset(module_tz);
/* Read module temperature and thresholds. */
- mlxsw_thermal_module_temp_and_thresholds_get(core, sensor_index, &dummy_temp,
+ mlxsw_thermal_module_temp_and_thresholds_get(core, area->slot_index,
+ sensor_index, &dummy_temp,
&crit_temp, &emerg_temp);
/* Update trip point according to the module data. */
return mlxsw_thermal_module_trips_update(dev, core, module_tz,
@@ -740,34 +756,39 @@ static void mlxsw_thermal_module_fini(struct mlxsw_thermal_module *module_tz)
static int
mlxsw_thermal_modules_init(struct device *dev, struct mlxsw_core *core,
- struct mlxsw_thermal *thermal)
+ struct mlxsw_thermal *thermal,
+ struct mlxsw_thermal_area *area)
{
struct mlxsw_thermal_module *module_tz;
char mgpir_pl[MLXSW_REG_MGPIR_LEN];
int i, err;
- mlxsw_reg_mgpir_pack(mgpir_pl);
+ mlxsw_reg_mgpir_pack(mgpir_pl, area->slot_index);
err = mlxsw_reg_query(core, MLXSW_REG(mgpir), mgpir_pl);
if (err)
return err;
mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL,
- &thermal->tz_module_num);
+ &area->tz_module_num, NULL);
+
+ /* For modular system module counter could be zero. */
+ if (!area->tz_module_num)
+ return 0;
- thermal->tz_module_arr = kcalloc(thermal->tz_module_num,
- sizeof(*thermal->tz_module_arr),
- GFP_KERNEL);
- if (!thermal->tz_module_arr)
+ area->tz_module_arr = kcalloc(area->tz_module_num,
+ sizeof(*area->tz_module_arr),
+ GFP_KERNEL);
+ if (!area->tz_module_arr)
return -ENOMEM;
- for (i = 0; i < thermal->tz_module_num; i++) {
- err = mlxsw_thermal_module_init(dev, core, thermal, i);
+ for (i = 0; i < area->tz_module_num; i++) {
+ err = mlxsw_thermal_module_init(dev, core, thermal, area, i);
if (err)
goto err_thermal_module_init;
}
- for (i = 0; i < thermal->tz_module_num; i++) {
- module_tz = &thermal->tz_module_arr[i];
+ for (i = 0; i < area->tz_module_num; i++) {
+ module_tz = &area->tz_module_arr[i];
if (!module_tz->parent)
continue;
err = mlxsw_thermal_module_tz_init(module_tz);
@@ -779,30 +800,35 @@ mlxsw_thermal_modules_init(struct device *dev, struct mlxsw_core *core,
err_thermal_module_tz_init:
err_thermal_module_init:
- for (i = thermal->tz_module_num - 1; i >= 0; i--)
- mlxsw_thermal_module_fini(&thermal->tz_module_arr[i]);
- kfree(thermal->tz_module_arr);
+ for (i = area->tz_module_num - 1; i >= 0; i--)
+ mlxsw_thermal_module_fini(&area->tz_module_arr[i]);
+ kfree(area->tz_module_arr);
return err;
}
static void
-mlxsw_thermal_modules_fini(struct mlxsw_thermal *thermal)
+mlxsw_thermal_modules_fini(struct mlxsw_thermal *thermal,
+ struct mlxsw_thermal_area *area)
{
int i;
- for (i = thermal->tz_module_num - 1; i >= 0; i--)
- mlxsw_thermal_module_fini(&thermal->tz_module_arr[i]);
- kfree(thermal->tz_module_arr);
+ for (i = area->tz_module_num - 1; i >= 0; i--)
+ mlxsw_thermal_module_fini(&area->tz_module_arr[i]);
+ kfree(area->tz_module_arr);
}
static int
mlxsw_thermal_gearbox_tz_init(struct mlxsw_thermal_module *gearbox_tz)
{
- char tz_name[MLXSW_THERMAL_ZONE_MAX_NAME];
+ char tz_name[THERMAL_NAME_LENGTH];
int ret;
- snprintf(tz_name, sizeof(tz_name), "mlxsw-gearbox%d",
- gearbox_tz->module + 1);
+ if (gearbox_tz->slot_index)
+ snprintf(tz_name, sizeof(tz_name), "mlxsw-lc%d-gearbox%d",
+ gearbox_tz->slot_index, gearbox_tz->module + 1);
+ else
+ snprintf(tz_name, sizeof(tz_name), "mlxsw-gearbox%d",
+ gearbox_tz->module + 1);
gearbox_tz->tzdev = thermal_zone_device_register(tz_name,
MLXSW_THERMAL_NUM_TRIPS,
MLXSW_THERMAL_TRIP_MASK,
@@ -828,7 +854,8 @@ mlxsw_thermal_gearbox_tz_fini(struct mlxsw_thermal_module *gearbox_tz)
static int
mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core,
- struct mlxsw_thermal *thermal)
+ struct mlxsw_thermal *thermal,
+ struct mlxsw_thermal_area *area)
{
enum mlxsw_reg_mgpir_device_type device_type;
struct mlxsw_thermal_module *gearbox_tz;
@@ -837,30 +864,31 @@ mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core,
int i;
int err;
- mlxsw_reg_mgpir_pack(mgpir_pl);
+ mlxsw_reg_mgpir_pack(mgpir_pl, area->slot_index);
err = mlxsw_reg_query(core, MLXSW_REG(mgpir), mgpir_pl);
if (err)
return err;
mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, &device_type, NULL,
- NULL);
+ NULL, NULL);
if (device_type != MLXSW_REG_MGPIR_DEVICE_TYPE_GEARBOX_DIE ||
!gbox_num)
return 0;
- thermal->tz_gearbox_num = gbox_num;
- thermal->tz_gearbox_arr = kcalloc(thermal->tz_gearbox_num,
- sizeof(*thermal->tz_gearbox_arr),
- GFP_KERNEL);
- if (!thermal->tz_gearbox_arr)
+ area->tz_gearbox_num = gbox_num;
+ area->tz_gearbox_arr = kcalloc(area->tz_gearbox_num,
+ sizeof(*area->tz_gearbox_arr),
+ GFP_KERNEL);
+ if (!area->tz_gearbox_arr)
return -ENOMEM;
- for (i = 0; i < thermal->tz_gearbox_num; i++) {
- gearbox_tz = &thermal->tz_gearbox_arr[i];
+ for (i = 0; i < area->tz_gearbox_num; i++) {
+ gearbox_tz = &area->tz_gearbox_arr[i];
memcpy(gearbox_tz->trips, default_thermal_trips,
sizeof(thermal->trips));
gearbox_tz->module = i;
gearbox_tz->parent = thermal;
+ gearbox_tz->slot_index = area->slot_index;
err = mlxsw_thermal_gearbox_tz_init(gearbox_tz);
if (err)
goto err_thermal_gearbox_tz_init;
@@ -870,21 +898,80 @@ mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core,
err_thermal_gearbox_tz_init:
for (i--; i >= 0; i--)
- mlxsw_thermal_gearbox_tz_fini(&thermal->tz_gearbox_arr[i]);
- kfree(thermal->tz_gearbox_arr);
+ mlxsw_thermal_gearbox_tz_fini(&area->tz_gearbox_arr[i]);
+ kfree(area->tz_gearbox_arr);
return err;
}
static void
-mlxsw_thermal_gearboxes_fini(struct mlxsw_thermal *thermal)
+mlxsw_thermal_gearboxes_fini(struct mlxsw_thermal *thermal,
+ struct mlxsw_thermal_area *area)
{
int i;
- for (i = thermal->tz_gearbox_num - 1; i >= 0; i--)
- mlxsw_thermal_gearbox_tz_fini(&thermal->tz_gearbox_arr[i]);
- kfree(thermal->tz_gearbox_arr);
+ for (i = area->tz_gearbox_num - 1; i >= 0; i--)
+ mlxsw_thermal_gearbox_tz_fini(&area->tz_gearbox_arr[i]);
+ kfree(area->tz_gearbox_arr);
+}
+
+static void
+mlxsw_thermal_got_active(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ void *priv)
+{
+ struct mlxsw_thermal *thermal = priv;
+ struct mlxsw_thermal_area *linecard;
+ int err;
+
+ linecard = &thermal->line_cards[slot_index];
+
+ if (linecard->active)
+ return;
+
+ linecard->slot_index = slot_index;
+ err = mlxsw_thermal_modules_init(thermal->bus_info->dev, thermal->core,
+ thermal, linecard);
+ if (err) {
+ dev_err(thermal->bus_info->dev, "Failed to configure thermal objects for line card modules in slot %d\n",
+ slot_index);
+ return;
+ }
+
+ err = mlxsw_thermal_gearboxes_init(thermal->bus_info->dev,
+ thermal->core, thermal, linecard);
+ if (err) {
+ dev_err(thermal->bus_info->dev, "Failed to configure thermal objects for line card gearboxes in slot %d\n",
+ slot_index);
+ goto err_thermal_linecard_gearboxes_init;
+ }
+
+ linecard->active = true;
+
+ return;
+
+err_thermal_linecard_gearboxes_init:
+ mlxsw_thermal_modules_fini(thermal, linecard);
+}
+
+static void
+mlxsw_thermal_got_inactive(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ void *priv)
+{
+ struct mlxsw_thermal *thermal = priv;
+ struct mlxsw_thermal_area *linecard;
+
+ linecard = &thermal->line_cards[slot_index];
+ if (!linecard->active)
+ return;
+ linecard->active = false;
+ mlxsw_thermal_gearboxes_fini(thermal, linecard);
+ mlxsw_thermal_modules_fini(thermal, linecard);
}
+static struct mlxsw_linecards_event_ops mlxsw_thermal_event_ops = {
+ .got_active = mlxsw_thermal_got_active,
+ .got_inactive = mlxsw_thermal_got_inactive,
+};
+
int mlxsw_thermal_init(struct mlxsw_core *core,
const struct mlxsw_bus_info *bus_info,
struct mlxsw_thermal **p_thermal)
@@ -892,19 +979,29 @@ int mlxsw_thermal_init(struct mlxsw_core *core,
char mfcr_pl[MLXSW_REG_MFCR_LEN] = { 0 };
enum mlxsw_reg_mfcr_pwm_frequency freq;
struct device *dev = bus_info->dev;
+ char mgpir_pl[MLXSW_REG_MGPIR_LEN];
struct mlxsw_thermal *thermal;
+ u8 pwm_active, num_of_slots;
u16 tacho_active;
- u8 pwm_active;
int err, i;
- thermal = devm_kzalloc(dev, sizeof(*thermal),
- GFP_KERNEL);
+ mlxsw_reg_mgpir_pack(mgpir_pl, 0);
+ err = mlxsw_reg_query(core, MLXSW_REG(mgpir), mgpir_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL, NULL,
+ &num_of_slots);
+
+ thermal = kzalloc(struct_size(thermal, line_cards, num_of_slots + 1),
+ GFP_KERNEL);
if (!thermal)
return -ENOMEM;
thermal->core = core;
thermal->bus_info = bus_info;
memcpy(thermal->trips, default_thermal_trips, sizeof(thermal->trips));
+ thermal->line_cards[0].slot_index = 0;
err = mlxsw_reg_query(thermal->core, MLXSW_REG(mfcr), mfcr_pl);
if (err) {
@@ -970,25 +1067,38 @@ int mlxsw_thermal_init(struct mlxsw_core *core,
goto err_thermal_zone_device_register;
}
- err = mlxsw_thermal_modules_init(dev, core, thermal);
+ err = mlxsw_thermal_modules_init(dev, core, thermal,
+ &thermal->line_cards[0]);
if (err)
goto err_thermal_modules_init;
- err = mlxsw_thermal_gearboxes_init(dev, core, thermal);
+ err = mlxsw_thermal_gearboxes_init(dev, core, thermal,
+ &thermal->line_cards[0]);
if (err)
goto err_thermal_gearboxes_init;
+ err = mlxsw_linecards_event_ops_register(core,
+ &mlxsw_thermal_event_ops,
+ thermal);
+ if (err)
+ goto err_linecards_event_ops_register;
+
err = thermal_zone_device_enable(thermal->tzdev);
if (err)
goto err_thermal_zone_device_enable;
+ thermal->line_cards[0].active = true;
*p_thermal = thermal;
return 0;
err_thermal_zone_device_enable:
- mlxsw_thermal_gearboxes_fini(thermal);
+ mlxsw_linecards_event_ops_unregister(thermal->core,
+ &mlxsw_thermal_event_ops,
+ thermal);
+err_linecards_event_ops_register:
+ mlxsw_thermal_gearboxes_fini(thermal, &thermal->line_cards[0]);
err_thermal_gearboxes_init:
- mlxsw_thermal_modules_fini(thermal);
+ mlxsw_thermal_modules_fini(thermal, &thermal->line_cards[0]);
err_thermal_modules_init:
if (thermal->tzdev) {
thermal_zone_device_unregister(thermal->tzdev);
@@ -1001,7 +1111,7 @@ err_thermal_cooling_device_register:
thermal_cooling_device_unregister(thermal->cdevs[i]);
err_reg_write:
err_reg_query:
- devm_kfree(dev, thermal);
+ kfree(thermal);
return err;
}
@@ -1009,8 +1119,12 @@ void mlxsw_thermal_fini(struct mlxsw_thermal *thermal)
{
int i;
- mlxsw_thermal_gearboxes_fini(thermal);
- mlxsw_thermal_modules_fini(thermal);
+ thermal->line_cards[0].active = false;
+ mlxsw_linecards_event_ops_unregister(thermal->core,
+ &mlxsw_thermal_event_ops,
+ thermal);
+ mlxsw_thermal_gearboxes_fini(thermal, &thermal->line_cards[0]);
+ mlxsw_thermal_modules_fini(thermal, &thermal->line_cards[0]);
if (thermal->tzdev) {
thermal_zone_device_unregister(thermal->tzdev);
thermal->tzdev = NULL;
@@ -1023,5 +1137,5 @@ void mlxsw_thermal_fini(struct mlxsw_thermal *thermal)
}
}
- devm_kfree(thermal->bus_info->dev, thermal);
+ kfree(thermal);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
index 939b692ffc33..ce843ea91464 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
@@ -650,6 +650,7 @@ static int mlxsw_i2c_probe(struct i2c_client *client,
return 0;
errout:
+ mutex_destroy(&mlxsw_i2c->cmd.lock);
i2c_set_clientdata(client, NULL);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
index 3bc012dafd08..d9660d4cce96 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
@@ -59,7 +59,8 @@ static int mlxsw_m_port_open(struct net_device *dev)
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(dev);
struct mlxsw_m *mlxsw_m = mlxsw_m_port->mlxsw_m;
- return mlxsw_env_module_port_up(mlxsw_m->core, mlxsw_m_port->module);
+ return mlxsw_env_module_port_up(mlxsw_m->core, 0,
+ mlxsw_m_port->module);
}
static int mlxsw_m_port_stop(struct net_device *dev)
@@ -67,7 +68,7 @@ static int mlxsw_m_port_stop(struct net_device *dev)
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(dev);
struct mlxsw_m *mlxsw_m = mlxsw_m_port->mlxsw_m;
- mlxsw_env_module_port_down(mlxsw_m->core, mlxsw_m_port->module);
+ mlxsw_env_module_port_down(mlxsw_m->core, 0, mlxsw_m_port->module);
return 0;
}
@@ -110,7 +111,7 @@ static int mlxsw_m_get_module_info(struct net_device *netdev,
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
- return mlxsw_env_get_module_info(netdev, core, mlxsw_m_port->module,
+ return mlxsw_env_get_module_info(netdev, core, 0, mlxsw_m_port->module,
modinfo);
}
@@ -121,8 +122,8 @@ mlxsw_m_get_module_eeprom(struct net_device *netdev, struct ethtool_eeprom *ee,
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
- return mlxsw_env_get_module_eeprom(netdev, core, mlxsw_m_port->module,
- ee, data);
+ return mlxsw_env_get_module_eeprom(netdev, core, 0,
+ mlxsw_m_port->module, ee, data);
}
static int
@@ -133,7 +134,8 @@ mlxsw_m_get_module_eeprom_by_page(struct net_device *netdev,
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
- return mlxsw_env_get_module_eeprom_by_page(core, mlxsw_m_port->module,
+ return mlxsw_env_get_module_eeprom_by_page(core, 0,
+ mlxsw_m_port->module,
page, extack);
}
@@ -142,7 +144,7 @@ static int mlxsw_m_reset(struct net_device *netdev, u32 *flags)
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
- return mlxsw_env_reset_module(netdev, core, mlxsw_m_port->module,
+ return mlxsw_env_reset_module(netdev, core, 0, mlxsw_m_port->module,
flags);
}
@@ -154,7 +156,7 @@ mlxsw_m_get_module_power_mode(struct net_device *netdev,
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
- return mlxsw_env_get_module_power_mode(core, mlxsw_m_port->module,
+ return mlxsw_env_get_module_power_mode(core, 0, mlxsw_m_port->module,
params, extack);
}
@@ -166,7 +168,7 @@ mlxsw_m_set_module_power_mode(struct net_device *netdev,
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
- return mlxsw_env_set_module_power_mode(core, mlxsw_m_port->module,
+ return mlxsw_env_set_module_power_mode(core, 0, mlxsw_m_port->module,
params->policy, extack);
}
@@ -221,7 +223,7 @@ mlxsw_m_port_create(struct mlxsw_m *mlxsw_m, u16 local_port, u8 module)
struct net_device *dev;
int err;
- err = mlxsw_core_port_init(mlxsw_m->core, local_port,
+ err = mlxsw_core_port_init(mlxsw_m->core, local_port, 0,
module + 1, false, 0, false,
0, mlxsw_m->base_mac,
sizeof(mlxsw_m->base_mac));
@@ -311,7 +313,7 @@ static int mlxsw_m_port_module_map(struct mlxsw_m *mlxsw_m, u16 local_port,
if (WARN_ON_ONCE(module >= max_ports))
return -EINVAL;
- mlxsw_env_module_port_map(mlxsw_m->core, module);
+ mlxsw_env_module_port_map(mlxsw_m->core, 0, module);
mlxsw_m->module_to_port[module] = ++mlxsw_m->max_ports;
return 0;
@@ -320,12 +322,13 @@ static int mlxsw_m_port_module_map(struct mlxsw_m *mlxsw_m, u16 local_port,
static void mlxsw_m_port_module_unmap(struct mlxsw_m *mlxsw_m, u8 module)
{
mlxsw_m->module_to_port[module] = -1;
- mlxsw_env_module_port_unmap(mlxsw_m->core, module);
+ mlxsw_env_module_port_unmap(mlxsw_m->core, 0, module);
}
static int mlxsw_m_ports_create(struct mlxsw_m *mlxsw_m)
{
unsigned int max_ports = mlxsw_core_max_ports(mlxsw_m->core);
+ struct devlink *devlink = priv_to_devlink(mlxsw_m->core);
u8 last_module = max_ports;
int i;
int err;
@@ -354,6 +357,7 @@ static int mlxsw_m_ports_create(struct mlxsw_m *mlxsw_m)
}
/* Create port objects for each valid entry */
+ devl_lock(devlink);
for (i = 0; i < mlxsw_m->max_ports; i++) {
if (mlxsw_m->module_to_port[i] > 0 &&
!mlxsw_core_port_is_xm(mlxsw_m->core, i)) {
@@ -364,6 +368,7 @@ static int mlxsw_m_ports_create(struct mlxsw_m *mlxsw_m)
goto err_module_to_port_create;
}
}
+ devl_unlock(devlink);
return 0;
@@ -373,6 +378,7 @@ err_module_to_port_create:
mlxsw_m_port_remove(mlxsw_m,
mlxsw_m->module_to_port[i]);
}
+ devl_unlock(devlink);
i = max_ports;
err_module_to_port_map:
for (i--; i > 0; i--)
@@ -385,8 +391,10 @@ err_module_to_port_alloc:
static void mlxsw_m_ports_remove(struct mlxsw_m *mlxsw_m)
{
+ struct devlink *devlink = priv_to_devlink(mlxsw_m->core);
int i;
+ devl_lock(devlink);
for (i = 0; i < mlxsw_m->max_ports; i++) {
if (mlxsw_m->module_to_port[i] > 0) {
mlxsw_m_port_remove(mlxsw_m,
@@ -394,6 +402,7 @@ static void mlxsw_m_ports_remove(struct mlxsw_m *mlxsw_m)
mlxsw_m_port_module_unmap(mlxsw_m, i);
}
}
+ devl_unlock(devlink);
kfree(mlxsw_m->module_to_port);
kfree(mlxsw_m->ports);
@@ -422,7 +431,6 @@ static int mlxsw_m_init(struct mlxsw_core *mlxsw_core,
struct netlink_ext_ack *extack)
{
struct mlxsw_m *mlxsw_m = mlxsw_core_driver_priv(mlxsw_core);
- struct devlink *devlink = priv_to_devlink(mlxsw_core);
int err;
mlxsw_m->core = mlxsw_core;
@@ -438,9 +446,7 @@ static int mlxsw_m_init(struct mlxsw_core *mlxsw_core,
return err;
}
- devl_lock(devlink);
err = mlxsw_m_ports_create(mlxsw_m);
- devl_unlock(devlink);
if (err) {
dev_err(mlxsw_m->bus_info->dev, "Failed to create ports\n");
return err;
@@ -452,11 +458,8 @@ static int mlxsw_m_init(struct mlxsw_core *mlxsw_core,
static void mlxsw_m_fini(struct mlxsw_core *mlxsw_core)
{
struct mlxsw_m *mlxsw_m = mlxsw_core_driver_priv(mlxsw_core);
- struct devlink *devlink = priv_to_devlink(mlxsw_core);
- devl_lock(devlink);
mlxsw_m_ports_remove(mlxsw_m);
- devl_unlock(devlink);
}
static const struct mlxsw_config_profile mlxsw_m_config_profile;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 67b1a2f8397f..04c4d7a4bd83 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -4325,6 +4325,15 @@ MLXSW_ITEM32(reg, pmlp, width, 0x00, 0, 8);
*/
MLXSW_ITEM32_INDEXED(reg, pmlp, module, 0x04, 0, 8, 0x04, 0x00, false);
+/* reg_pmlp_slot_index
+ * Module number.
+ * Slot_index
+ * Slot_index = 0 represent the onboard (motherboard).
+ * In case of non-modular system only slot_index = 0 is available.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pmlp, slot_index, 0x04, 8, 4, 0x04, 0x00, false);
+
/* reg_pmlp_tx_lane
* Tx Lane. When rxtx field is cleared, this field is used for Rx as well.
* Access: RW
@@ -5769,9 +5778,10 @@ enum mlxsw_reg_pmaos_e {
*/
MLXSW_ITEM32(reg, pmaos, e, 0x04, 0, 2);
-static inline void mlxsw_reg_pmaos_pack(char *payload, u8 module)
+static inline void mlxsw_reg_pmaos_pack(char *payload, u8 slot_index, u8 module)
{
MLXSW_REG_ZERO(pmaos, payload);
+ mlxsw_reg_pmaos_slot_index_set(payload, slot_index);
mlxsw_reg_pmaos_module_set(payload, module);
}
@@ -5874,6 +5884,69 @@ static inline void mlxsw_reg_pmtdb_pack(char *payload, u8 slot_index, u8 module,
mlxsw_reg_pmtdb_num_ports_set(payload, num_ports);
}
+/* PMECR - Ports Mapping Event Configuration Register
+ * --------------------------------------------------
+ * The PMECR register is used to enable/disable event triggering
+ * in case of local port mapping change.
+ */
+#define MLXSW_REG_PMECR_ID 0x501B
+#define MLXSW_REG_PMECR_LEN 0x20
+
+MLXSW_REG_DEFINE(pmecr, MLXSW_REG_PMECR_ID, MLXSW_REG_PMECR_LEN);
+
+/* reg_pmecr_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32_LP(reg, pmecr, 0x00, 16, 0x00, 12);
+
+/* reg_pmecr_ee
+ * Event update enable. If this bit is set, event generation will be updated
+ * based on the e field. Only relevant on Set operations.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, pmecr, ee, 0x04, 30, 1);
+
+/* reg_pmecr_eswi
+ * Software ignore enable bit. If this bit is set, the value of swi is used.
+ * If this bit is clear, the value of swi is ignored.
+ * Only relevant on Set operations.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, pmecr, eswi, 0x04, 24, 1);
+
+/* reg_pmecr_swi
+ * Software ignore. If this bit is set, the device shouldn't generate events
+ * in case of PMLP SET operation but only upon self local port mapping change
+ * (if applicable according to e configuration). This is supplementary
+ * configuration on top of e value.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmecr, swi, 0x04, 8, 1);
+
+enum mlxsw_reg_pmecr_e {
+ MLXSW_REG_PMECR_E_DO_NOT_GENERATE_EVENT,
+ MLXSW_REG_PMECR_E_GENERATE_EVENT,
+ MLXSW_REG_PMECR_E_GENERATE_SINGLE_EVENT,
+};
+
+/* reg_pmecr_e
+ * Event generation on local port mapping change.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmecr, e, 0x04, 0, 2);
+
+static inline void mlxsw_reg_pmecr_pack(char *payload, u16 local_port,
+ enum mlxsw_reg_pmecr_e e)
+{
+ MLXSW_REG_ZERO(pmecr, payload);
+ mlxsw_reg_pmecr_local_port_set(payload, local_port);
+ mlxsw_reg_pmecr_e_set(payload, e);
+ mlxsw_reg_pmecr_ee_set(payload, true);
+ mlxsw_reg_pmecr_swi_set(payload, true);
+ mlxsw_reg_pmecr_eswi_set(payload, true);
+}
+
/* PMPE - Port Module Plug/Unplug Event Register
* ---------------------------------------------
* This register reports any operational status change of a module.
@@ -5984,6 +6057,12 @@ MLXSW_REG_DEFINE(pmmp, MLXSW_REG_PMMP_ID, MLXSW_REG_PMMP_LEN);
*/
MLXSW_ITEM32(reg, pmmp, module, 0x00, 16, 8);
+/* reg_pmmp_slot_index
+ * Slot index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmmp, slot_index, 0x00, 24, 4);
+
/* reg_pmmp_sticky
* When set, will keep eeprom_override values after plug-out event.
* Access: OP
@@ -6011,9 +6090,10 @@ enum {
*/
MLXSW_ITEM32(reg, pmmp, eeprom_override, 0x04, 0, 16);
-static inline void mlxsw_reg_pmmp_pack(char *payload, u8 module)
+static inline void mlxsw_reg_pmmp_pack(char *payload, u8 slot_index, u8 module)
{
MLXSW_REG_ZERO(pmmp, payload);
+ mlxsw_reg_pmmp_slot_index_set(payload, slot_index);
mlxsw_reg_pmmp_module_set(payload, module);
}
@@ -9721,6 +9801,12 @@ MLXSW_ITEM32(reg, mtcap, sensor_count, 0x00, 0, 7);
MLXSW_REG_DEFINE(mtmp, MLXSW_REG_MTMP_ID, MLXSW_REG_MTMP_LEN);
+/* reg_mtmp_slot_index
+ * Slot index (0: Main board).
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mtmp, slot_index, 0x00, 16, 4);
+
#define MLXSW_REG_MTMP_MODULE_INDEX_MIN 64
#define MLXSW_REG_MTMP_GBOX_INDEX_MIN 256
/* reg_mtmp_sensor_index
@@ -9810,11 +9896,12 @@ MLXSW_ITEM32(reg, mtmp, temperature_threshold_lo, 0x10, 0, 16);
*/
MLXSW_ITEM_BUF(reg, mtmp, sensor_name, 0x18, MLXSW_REG_MTMP_SENSOR_NAME_SIZE);
-static inline void mlxsw_reg_mtmp_pack(char *payload, u16 sensor_index,
- bool max_temp_enable,
+static inline void mlxsw_reg_mtmp_pack(char *payload, u8 slot_index,
+ u16 sensor_index, bool max_temp_enable,
bool max_temp_reset)
{
MLXSW_REG_ZERO(mtmp, payload);
+ mlxsw_reg_mtmp_slot_index_set(payload, slot_index);
mlxsw_reg_mtmp_sensor_index_set(payload, sensor_index);
mlxsw_reg_mtmp_mte_set(payload, max_temp_enable);
mlxsw_reg_mtmp_mtr_set(payload, max_temp_reset);
@@ -9880,6 +9967,12 @@ MLXSW_ITEM_BIT_ARRAY(reg, mtwe, sensor_warning, 0x0, 0x10, 1);
MLXSW_REG_DEFINE(mtbr, MLXSW_REG_MTBR_ID, MLXSW_REG_MTBR_LEN);
+/* reg_mtbr_slot_index
+ * Slot index (0: Main board).
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mtbr, slot_index, 0x00, 16, 4);
+
/* reg_mtbr_base_sensor_index
* Base sensors index to access (0 - ASIC sensor, 1-63 - ambient sensors,
* 64-127 are mapped to the SFP+/QSFP modules sequentially).
@@ -9912,10 +10005,11 @@ MLXSW_ITEM32_INDEXED(reg, mtbr, rec_max_temp, MLXSW_REG_MTBR_BASE_LEN, 16,
MLXSW_ITEM32_INDEXED(reg, mtbr, rec_temp, MLXSW_REG_MTBR_BASE_LEN, 0, 16,
MLXSW_REG_MTBR_REC_LEN, 0x00, false);
-static inline void mlxsw_reg_mtbr_pack(char *payload, u16 base_sensor_index,
- u8 num_rec)
+static inline void mlxsw_reg_mtbr_pack(char *payload, u8 slot_index,
+ u16 base_sensor_index, u8 num_rec)
{
MLXSW_REG_ZERO(mtbr, payload);
+ mlxsw_reg_mtbr_slot_index_set(payload, slot_index);
mlxsw_reg_mtbr_base_sensor_index_set(payload, base_sensor_index);
mlxsw_reg_mtbr_num_rec_set(payload, num_rec);
}
@@ -9964,6 +10058,12 @@ MLXSW_ITEM32(reg, mcia, l, 0x00, 31, 1);
*/
MLXSW_ITEM32(reg, mcia, module, 0x00, 16, 8);
+/* reg_mcia_slot_index
+ * Slot index (0: Main board)
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mcia, slot, 0x00, 12, 4);
+
enum {
MLXSW_REG_MCIA_STATUS_GOOD = 0,
/* No response from module's EEPROM. */
@@ -10063,11 +10163,13 @@ MLXSW_ITEM_BUF(reg, mcia, eeprom, 0x10, MLXSW_REG_MCIA_EEPROM_SIZE);
MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH) / \
MLXSW_REG_MCIA_EEPROM_UP_PAGE_LENGTH + 1)
-static inline void mlxsw_reg_mcia_pack(char *payload, u8 module, u8 lock,
- u8 page_number, u16 device_addr,
- u8 size, u8 i2c_device_addr)
+static inline void mlxsw_reg_mcia_pack(char *payload, u8 slot_index, u8 module,
+ u8 lock, u8 page_number,
+ u16 device_addr, u8 size,
+ u8 i2c_device_addr)
{
MLXSW_REG_ZERO(mcia, payload);
+ mlxsw_reg_mcia_slot_set(payload, slot_index);
mlxsw_reg_mcia_module_set(payload, module);
mlxsw_reg_mcia_l_set(payload, lock);
mlxsw_reg_mcia_page_number_set(payload, page_number);
@@ -10499,6 +10601,12 @@ MLXSW_REG_DEFINE(mcion, MLXSW_REG_MCION_ID, MLXSW_REG_MCION_LEN);
*/
MLXSW_ITEM32(reg, mcion, module, 0x00, 16, 8);
+/* reg_mcion_slot_index
+ * Slot index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mcion, slot_index, 0x00, 12, 4);
+
enum {
MLXSW_REG_MCION_MODULE_STATUS_BITS_PRESENT_MASK = BIT(0),
MLXSW_REG_MCION_MODULE_STATUS_BITS_LOW_POWER_MASK = BIT(8),
@@ -10510,9 +10618,10 @@ enum {
*/
MLXSW_ITEM32(reg, mcion, module_status_bits, 0x04, 0, 16);
-static inline void mlxsw_reg_mcion_pack(char *payload, u8 module)
+static inline void mlxsw_reg_mcion_pack(char *payload, u8 slot_index, u8 module)
{
MLXSW_REG_ZERO(mcion, payload);
+ mlxsw_reg_mcion_slot_index_set(payload, slot_index);
mlxsw_reg_mcion_module_set(payload, module);
}
@@ -11326,6 +11435,12 @@ enum mlxsw_reg_mgpir_device_type {
MLXSW_REG_MGPIR_DEVICE_TYPE_GEARBOX_DIE,
};
+/* mgpir_slot_index
+ * Slot index (0: Main board).
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mgpir, slot_index, 0x00, 28, 4);
+
/* mgpir_device_type
* Access: RO
*/
@@ -11343,21 +11458,35 @@ MLXSW_ITEM32(reg, mgpir, devices_per_flash, 0x00, 16, 8);
*/
MLXSW_ITEM32(reg, mgpir, num_of_devices, 0x00, 0, 8);
+/* max_modules_per_slot
+ * Maximum number of modules that can be connected per slot.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mgpir, max_modules_per_slot, 0x04, 16, 8);
+
+/* mgpir_num_of_slots
+ * Number of slots in the system.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mgpir, num_of_slots, 0x04, 8, 8);
+
/* mgpir_num_of_modules
* Number of modules.
* Access: RO
*/
MLXSW_ITEM32(reg, mgpir, num_of_modules, 0x04, 0, 8);
-static inline void mlxsw_reg_mgpir_pack(char *payload)
+static inline void mlxsw_reg_mgpir_pack(char *payload, u8 slot_index)
{
MLXSW_REG_ZERO(mgpir, payload);
+ mlxsw_reg_mgpir_slot_index_set(payload, slot_index);
}
static inline void
mlxsw_reg_mgpir_unpack(char *payload, u8 *num_of_devices,
enum mlxsw_reg_mgpir_device_type *device_type,
- u8 *devices_per_flash, u8 *num_of_modules)
+ u8 *devices_per_flash, u8 *num_of_modules,
+ u8 *num_of_slots)
{
if (num_of_devices)
*num_of_devices = mlxsw_reg_mgpir_num_of_devices_get(payload);
@@ -11368,6 +11497,393 @@ mlxsw_reg_mgpir_unpack(char *payload, u8 *num_of_devices,
mlxsw_reg_mgpir_devices_per_flash_get(payload);
if (num_of_modules)
*num_of_modules = mlxsw_reg_mgpir_num_of_modules_get(payload);
+ if (num_of_slots)
+ *num_of_slots = mlxsw_reg_mgpir_num_of_slots_get(payload);
+}
+
+/* MBCT - Management Binary Code Transfer Register
+ * -----------------------------------------------
+ * This register allows to transfer binary codes from the host to
+ * the management FW by transferring it by chunks of maximum 1KB.
+ */
+#define MLXSW_REG_MBCT_ID 0x9120
+#define MLXSW_REG_MBCT_LEN 0x420
+
+MLXSW_REG_DEFINE(mbct, MLXSW_REG_MBCT_ID, MLXSW_REG_MBCT_LEN);
+
+/* reg_mbct_slot_index
+ * Slot index. 0 is reserved.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mbct, slot_index, 0x00, 0, 4);
+
+/* reg_mbct_data_size
+ * Actual data field size in bytes for the current data transfer.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, mbct, data_size, 0x04, 0, 11);
+
+enum mlxsw_reg_mbct_op {
+ MLXSW_REG_MBCT_OP_ERASE_INI_IMAGE = 1,
+ MLXSW_REG_MBCT_OP_DATA_TRANSFER, /* Download */
+ MLXSW_REG_MBCT_OP_ACTIVATE,
+ MLXSW_REG_MBCT_OP_CLEAR_ERRORS = 6,
+ MLXSW_REG_MBCT_OP_QUERY_STATUS,
+};
+
+/* reg_mbct_op
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, mbct, op, 0x08, 28, 4);
+
+/* reg_mbct_last
+ * Indicates that the current data field is the last chunk of the INI.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, mbct, last, 0x08, 26, 1);
+
+/* reg_mbct_oee
+ * Opcode Event Enable. When set a BCTOE event will be sent once the opcode
+ * was executed and the fsm_state has changed.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, mbct, oee, 0x08, 25, 1);
+
+enum mlxsw_reg_mbct_status {
+ /* Partial data transfer completed successfully and ready for next
+ * data transfer.
+ */
+ MLXSW_REG_MBCT_STATUS_PART_DATA = 2,
+ MLXSW_REG_MBCT_STATUS_LAST_DATA,
+ MLXSW_REG_MBCT_STATUS_ERASE_COMPLETE,
+ /* Error - trying to erase INI while it being used. */
+ MLXSW_REG_MBCT_STATUS_ERROR_INI_IN_USE,
+ /* Last data transfer completed, applying magic pattern. */
+ MLXSW_REG_MBCT_STATUS_ERASE_FAILED = 7,
+ MLXSW_REG_MBCT_STATUS_INI_ERROR,
+ MLXSW_REG_MBCT_STATUS_ACTIVATION_FAILED,
+ MLXSW_REG_MBCT_STATUS_ILLEGAL_OPERATION = 11,
+};
+
+/* reg_mbct_status
+ * Status.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mbct, status, 0x0C, 24, 5);
+
+enum mlxsw_reg_mbct_fsm_state {
+ MLXSW_REG_MBCT_FSM_STATE_INI_IN_USE = 5,
+ MLXSW_REG_MBCT_FSM_STATE_ERROR,
+};
+
+/* reg_mbct_fsm_state
+ * FSM state.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mbct, fsm_state, 0x0C, 16, 4);
+
+#define MLXSW_REG_MBCT_DATA_LEN 1024
+
+/* reg_mbct_data
+ * Up to 1KB of data.
+ * Access: WO
+ */
+MLXSW_ITEM_BUF(reg, mbct, data, 0x20, MLXSW_REG_MBCT_DATA_LEN);
+
+static inline void mlxsw_reg_mbct_pack(char *payload, u8 slot_index,
+ enum mlxsw_reg_mbct_op op, bool oee)
+{
+ MLXSW_REG_ZERO(mbct, payload);
+ mlxsw_reg_mbct_slot_index_set(payload, slot_index);
+ mlxsw_reg_mbct_op_set(payload, op);
+ mlxsw_reg_mbct_oee_set(payload, oee);
+}
+
+static inline void mlxsw_reg_mbct_dt_pack(char *payload,
+ u16 data_size, bool last,
+ const char *data)
+{
+ if (WARN_ON(data_size > MLXSW_REG_MBCT_DATA_LEN))
+ return;
+ mlxsw_reg_mbct_data_size_set(payload, data_size);
+ mlxsw_reg_mbct_last_set(payload, last);
+ mlxsw_reg_mbct_data_memcpy_to(payload, data);
+}
+
+static inline void
+mlxsw_reg_mbct_unpack(const char *payload, u8 *p_slot_index,
+ enum mlxsw_reg_mbct_status *p_status,
+ enum mlxsw_reg_mbct_fsm_state *p_fsm_state)
+{
+ if (p_slot_index)
+ *p_slot_index = mlxsw_reg_mbct_slot_index_get(payload);
+ *p_status = mlxsw_reg_mbct_status_get(payload);
+ if (p_fsm_state)
+ *p_fsm_state = mlxsw_reg_mbct_fsm_state_get(payload);
+}
+
+/* MDDQ - Management DownStream Device Query Register
+ * --------------------------------------------------
+ * This register allows to query the DownStream device properties. The desired
+ * information is chosen upon the query_type field and is delivered by 32B
+ * of data blocks.
+ */
+#define MLXSW_REG_MDDQ_ID 0x9161
+#define MLXSW_REG_MDDQ_LEN 0x30
+
+MLXSW_REG_DEFINE(mddq, MLXSW_REG_MDDQ_ID, MLXSW_REG_MDDQ_LEN);
+
+/* reg_mddq_sie
+ * Slot info event enable.
+ * When set to '1', each change in the slot_info.provisioned / sr_valid /
+ * active / ready will generate a DSDSC event.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mddq, sie, 0x00, 31, 1);
+
+enum mlxsw_reg_mddq_query_type {
+ MLXSW_REG_MDDQ_QUERY_TYPE_SLOT_INFO = 1,
+ MLXSW_REG_MDDQ_QUERY_TYPE_DEVICE_INFO, /* If there are no devices
+ * on the slot, data_valid
+ * will be '0'.
+ */
+ MLXSW_REG_MDDQ_QUERY_TYPE_SLOT_NAME,
+};
+
+/* reg_mddq_query_type
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mddq, query_type, 0x00, 16, 8);
+
+/* reg_mddq_slot_index
+ * Slot index. 0 is reserved.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mddq, slot_index, 0x00, 0, 4);
+
+/* reg_mddq_response_msg_seq
+ * Response message sequential number. For a specific request, the response
+ * message sequential number is the following one. In addition, the last
+ * message should be 0.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, response_msg_seq, 0x04, 16, 8);
+
+/* reg_mddq_request_msg_seq
+ * Request message sequential number.
+ * The first message number should be 0.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mddq, request_msg_seq, 0x04, 0, 8);
+
+/* reg_mddq_data_valid
+ * If set, the data in the data field is valid and contain the information
+ * for the queried index.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, data_valid, 0x08, 31, 1);
+
+/* reg_mddq_slot_info_provisioned
+ * If set, the INI file is applied and the card is provisioned.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, slot_info_provisioned, 0x10, 31, 1);
+
+/* reg_mddq_slot_info_sr_valid
+ * If set, Shift Register is valid (after being provisioned) and data
+ * can be sent from the switch ASIC to the line-card CPLD over Shift-Register.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, slot_info_sr_valid, 0x10, 30, 1);
+
+enum mlxsw_reg_mddq_slot_info_ready {
+ MLXSW_REG_MDDQ_SLOT_INFO_READY_NOT_READY,
+ MLXSW_REG_MDDQ_SLOT_INFO_READY_READY,
+ MLXSW_REG_MDDQ_SLOT_INFO_READY_ERROR,
+};
+
+/* reg_mddq_slot_info_lc_ready
+ * If set, the LC is powered on, matching the INI version and a new FW
+ * version can be burnt (if necessary).
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, slot_info_lc_ready, 0x10, 28, 2);
+
+/* reg_mddq_slot_info_active
+ * If set, the FW has completed the MDDC.device_enable command.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, slot_info_active, 0x10, 27, 1);
+
+/* reg_mddq_slot_info_hw_revision
+ * Major user-configured version number of the current INI file.
+ * Valid only when active or ready are '1'.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, slot_info_hw_revision, 0x14, 16, 16);
+
+/* reg_mddq_slot_info_ini_file_version
+ * User-configured version number of the current INI file.
+ * Valid only when active or lc_ready are '1'.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, slot_info_ini_file_version, 0x14, 0, 16);
+
+/* reg_mddq_slot_info_card_type
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, slot_info_card_type, 0x18, 0, 8);
+
+static inline void
+__mlxsw_reg_mddq_pack(char *payload, u8 slot_index,
+ enum mlxsw_reg_mddq_query_type query_type)
+{
+ MLXSW_REG_ZERO(mddq, payload);
+ mlxsw_reg_mddq_slot_index_set(payload, slot_index);
+ mlxsw_reg_mddq_query_type_set(payload, query_type);
+}
+
+static inline void
+mlxsw_reg_mddq_slot_info_pack(char *payload, u8 slot_index, bool sie)
+{
+ __mlxsw_reg_mddq_pack(payload, slot_index,
+ MLXSW_REG_MDDQ_QUERY_TYPE_SLOT_INFO);
+ mlxsw_reg_mddq_sie_set(payload, sie);
+}
+
+static inline void
+mlxsw_reg_mddq_slot_info_unpack(const char *payload, u8 *p_slot_index,
+ bool *p_provisioned, bool *p_sr_valid,
+ enum mlxsw_reg_mddq_slot_info_ready *p_lc_ready,
+ bool *p_active, u16 *p_hw_revision,
+ u16 *p_ini_file_version,
+ u8 *p_card_type)
+{
+ *p_slot_index = mlxsw_reg_mddq_slot_index_get(payload);
+ *p_provisioned = mlxsw_reg_mddq_slot_info_provisioned_get(payload);
+ *p_sr_valid = mlxsw_reg_mddq_slot_info_sr_valid_get(payload);
+ *p_lc_ready = mlxsw_reg_mddq_slot_info_lc_ready_get(payload);
+ *p_active = mlxsw_reg_mddq_slot_info_active_get(payload);
+ *p_hw_revision = mlxsw_reg_mddq_slot_info_hw_revision_get(payload);
+ *p_ini_file_version = mlxsw_reg_mddq_slot_info_ini_file_version_get(payload);
+ *p_card_type = mlxsw_reg_mddq_slot_info_card_type_get(payload);
+}
+
+/* reg_mddq_device_info_flash_owner
+ * If set, the device is the flash owner. Otherwise, a shared flash
+ * is used by this device (another device is the flash owner).
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, device_info_flash_owner, 0x10, 30, 1);
+
+/* reg_mddq_device_info_device_index
+ * Device index. The first device should number 0.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, device_info_device_index, 0x10, 0, 8);
+
+/* reg_mddq_device_info_fw_major
+ * Major FW version number.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, device_info_fw_major, 0x14, 16, 16);
+
+/* reg_mddq_device_info_fw_minor
+ * Minor FW version number.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, device_info_fw_minor, 0x18, 16, 16);
+
+/* reg_mddq_device_info_fw_sub_minor
+ * Sub-minor FW version number.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, device_info_fw_sub_minor, 0x18, 0, 16);
+
+static inline void
+mlxsw_reg_mddq_device_info_pack(char *payload, u8 slot_index,
+ u8 request_msg_seq)
+{
+ __mlxsw_reg_mddq_pack(payload, slot_index,
+ MLXSW_REG_MDDQ_QUERY_TYPE_DEVICE_INFO);
+ mlxsw_reg_mddq_request_msg_seq_set(payload, request_msg_seq);
+}
+
+static inline void
+mlxsw_reg_mddq_device_info_unpack(const char *payload, u8 *p_response_msg_seq,
+ bool *p_data_valid, bool *p_flash_owner,
+ u8 *p_device_index, u16 *p_fw_major,
+ u16 *p_fw_minor, u16 *p_fw_sub_minor)
+{
+ *p_response_msg_seq = mlxsw_reg_mddq_response_msg_seq_get(payload);
+ *p_data_valid = mlxsw_reg_mddq_data_valid_get(payload);
+ if (p_flash_owner)
+ *p_flash_owner = mlxsw_reg_mddq_device_info_flash_owner_get(payload);
+ *p_device_index = mlxsw_reg_mddq_device_info_device_index_get(payload);
+ if (p_fw_major)
+ *p_fw_major = mlxsw_reg_mddq_device_info_fw_major_get(payload);
+ if (p_fw_minor)
+ *p_fw_minor = mlxsw_reg_mddq_device_info_fw_minor_get(payload);
+ if (p_fw_sub_minor)
+ *p_fw_sub_minor = mlxsw_reg_mddq_device_info_fw_sub_minor_get(payload);
+}
+
+#define MLXSW_REG_MDDQ_SLOT_ASCII_NAME_LEN 20
+
+/* reg_mddq_slot_ascii_name
+ * Slot's ASCII name.
+ * Access: RO
+ */
+MLXSW_ITEM_BUF(reg, mddq, slot_ascii_name, 0x10,
+ MLXSW_REG_MDDQ_SLOT_ASCII_NAME_LEN);
+
+static inline void
+mlxsw_reg_mddq_slot_name_pack(char *payload, u8 slot_index)
+{
+ __mlxsw_reg_mddq_pack(payload, slot_index,
+ MLXSW_REG_MDDQ_QUERY_TYPE_SLOT_NAME);
+}
+
+static inline void
+mlxsw_reg_mddq_slot_name_unpack(const char *payload, char *slot_ascii_name)
+{
+ mlxsw_reg_mddq_slot_ascii_name_memcpy_from(payload, slot_ascii_name);
+}
+
+/* MDDC - Management DownStream Device Control Register
+ * ----------------------------------------------------
+ * This register allows to control downstream devices and line cards.
+ */
+#define MLXSW_REG_MDDC_ID 0x9163
+#define MLXSW_REG_MDDC_LEN 0x30
+
+MLXSW_REG_DEFINE(mddc, MLXSW_REG_MDDC_ID, MLXSW_REG_MDDC_LEN);
+
+/* reg_mddc_slot_index
+ * Slot index. 0 is reserved.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mddc, slot_index, 0x00, 0, 4);
+
+/* reg_mddc_rst
+ * Reset request.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, mddc, rst, 0x04, 29, 1);
+
+/* reg_mddc_device_enable
+ * When set, FW is the manager and allowed to program the downstream device.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mddc, device_enable, 0x04, 28, 1);
+
+static inline void mlxsw_reg_mddc_pack(char *payload, u8 slot_index, bool rst,
+ bool device_enable)
+{
+ MLXSW_REG_ZERO(mddc, payload);
+ mlxsw_reg_mddc_slot_index_set(payload, slot_index);
+ mlxsw_reg_mddc_rst_set(payload, rst);
+ mlxsw_reg_mddc_device_enable_set(payload, device_enable);
}
/* MFDE - Monitoring FW Debug Register
@@ -12619,6 +13135,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(pmaos),
MLXSW_REG(pplr),
MLXSW_REG(pmtdb),
+ MLXSW_REG(pmecr),
MLXSW_REG(pmpe),
MLXSW_REG(pddr),
MLXSW_REG(pmmp),
@@ -12688,6 +13205,9 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(mtptpt),
MLXSW_REG(mfgd),
MLXSW_REG(mgpir),
+ MLXSW_REG(mbct),
+ MLXSW_REG(mddq),
+ MLXSW_REG(mddc),
MLXSW_REG(mfde),
MLXSW_REG(tngcr),
MLXSW_REG(tnumt),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 8eb05090ffec..ac6348e2ff1f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -89,6 +89,11 @@ static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = {
"." __stringify(MLXSW_SP_FWREV_MINOR) \
"." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2"
+#define MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME \
+ "mellanox/lc_ini_bundle_" \
+ __stringify(MLXSW_SP_FWREV_MINOR) "_" \
+ __stringify(MLXSW_SP_FWREV_SUBMINOR) ".bin"
+
static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum";
static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2";
static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3";
@@ -481,23 +486,22 @@ mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
}
static int
-mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port,
- struct mlxsw_sp_port_mapping *port_mapping)
+mlxsw_sp_port_module_info_parse(struct mlxsw_sp *mlxsw_sp,
+ u16 local_port, char *pmlp_pl,
+ struct mlxsw_sp_port_mapping *port_mapping)
{
- char pmlp_pl[MLXSW_REG_PMLP_LEN];
bool separate_rxtx;
+ u8 first_lane;
+ u8 slot_index;
u8 module;
u8 width;
- int err;
int i;
- mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
- if (err)
- return err;
module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
+ slot_index = mlxsw_reg_pmlp_slot_index_get(pmlp_pl, 0);
width = mlxsw_reg_pmlp_width_get(pmlp_pl);
separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl);
+ first_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
if (width && !is_power_of_2(width)) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n",
@@ -511,6 +515,11 @@ mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port,
local_port);
return -EINVAL;
}
+ if (mlxsw_reg_pmlp_slot_index_get(pmlp_pl, i) != slot_index) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple slot indexes\n",
+ local_port);
+ return -EINVAL;
+ }
if (separate_rxtx &&
mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) !=
mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) {
@@ -518,7 +527,7 @@ mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port,
local_port);
return -EINVAL;
}
- if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i) {
+ if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i + first_lane) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n",
local_port);
return -EINVAL;
@@ -526,6 +535,7 @@ mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port,
}
port_mapping->module = module;
+ port_mapping->slot_index = slot_index;
port_mapping->width = width;
port_mapping->module_width = width;
port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
@@ -533,17 +543,35 @@ mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port,
}
static int
+mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port,
+ struct mlxsw_sp_port_mapping *port_mapping)
+{
+ char pmlp_pl[MLXSW_REG_PMLP_LEN];
+ int err;
+
+ mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
+ if (err)
+ return err;
+ return mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port,
+ pmlp_pl, port_mapping);
+}
+
+static int
mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u16 local_port,
const struct mlxsw_sp_port_mapping *port_mapping)
{
char pmlp_pl[MLXSW_REG_PMLP_LEN];
int i, err;
- mlxsw_env_module_port_map(mlxsw_sp->core, port_mapping->module);
+ mlxsw_env_module_port_map(mlxsw_sp->core, port_mapping->slot_index,
+ port_mapping->module);
mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width);
for (i = 0; i < port_mapping->width; i++) {
+ mlxsw_reg_pmlp_slot_index_set(pmlp_pl, i,
+ port_mapping->slot_index);
mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module);
mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */
}
@@ -554,19 +582,20 @@ mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u16 local_port,
return 0;
err_pmlp_write:
- mlxsw_env_module_port_unmap(mlxsw_sp->core, port_mapping->module);
+ mlxsw_env_module_port_unmap(mlxsw_sp->core, port_mapping->slot_index,
+ port_mapping->module);
return err;
}
static void mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u16 local_port,
- u8 module)
+ u8 slot_index, u8 module)
{
char pmlp_pl[MLXSW_REG_PMLP_LEN];
mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
- mlxsw_env_module_port_unmap(mlxsw_sp->core, module);
+ mlxsw_env_module_port_unmap(mlxsw_sp->core, slot_index, module);
}
static int mlxsw_sp_port_open(struct net_device *dev)
@@ -576,6 +605,7 @@ static int mlxsw_sp_port_open(struct net_device *dev)
int err;
err = mlxsw_env_module_port_up(mlxsw_sp->core,
+ mlxsw_sp_port->mapping.slot_index,
mlxsw_sp_port->mapping.module);
if (err)
return err;
@@ -587,6 +617,7 @@ static int mlxsw_sp_port_open(struct net_device *dev)
err_port_admin_status_set:
mlxsw_env_module_port_down(mlxsw_sp->core,
+ mlxsw_sp_port->mapping.slot_index,
mlxsw_sp_port->mapping.module);
return err;
}
@@ -599,6 +630,7 @@ static int mlxsw_sp_port_stop(struct net_device *dev)
netif_stop_queue(dev);
mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
mlxsw_env_module_port_down(mlxsw_sp->core,
+ mlxsw_sp_port->mapping.slot_index,
mlxsw_sp_port->mapping.module);
return 0;
}
@@ -1445,12 +1477,13 @@ static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
static int mlxsw_sp_port_overheat_init_val_set(struct mlxsw_sp_port *mlxsw_sp_port)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 slot_index = mlxsw_sp_port->mapping.slot_index;
u8 module = mlxsw_sp_port->mapping.module;
u64 overheat_counter;
int err;
- err = mlxsw_env_module_overheat_counter_get(mlxsw_sp->core, module,
- &overheat_counter);
+ err = mlxsw_env_module_overheat_counter_get(mlxsw_sp->core, slot_index,
+ module, &overheat_counter);
if (err)
return err;
@@ -1525,7 +1558,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port,
}
splittable = lanes > 1 && !split;
- err = mlxsw_core_port_init(mlxsw_sp->core, local_port,
+ err = mlxsw_core_port_init(mlxsw_sp->core, local_port, slot_index,
port_number, split, split_port_subnumber,
splittable, lanes, mlxsw_sp->base_mac,
sizeof(mlxsw_sp->base_mac));
@@ -1775,13 +1808,16 @@ err_port_label_info_get:
mlxsw_sp_port_swid_set(mlxsw_sp, local_port,
MLXSW_PORT_SWID_DISABLED_PORT);
err_port_swid_set:
- mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, port_mapping->module);
+ mlxsw_sp_port_module_unmap(mlxsw_sp, local_port,
+ port_mapping->slot_index,
+ port_mapping->module);
return err;
}
static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u16 local_port)
{
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ u8 slot_index = mlxsw_sp_port->mapping.slot_index;
u8 module = mlxsw_sp_port->mapping.module;
cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
@@ -1804,7 +1840,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u16 local_port)
mlxsw_core_port_fini(mlxsw_sp->core, local_port);
mlxsw_sp_port_swid_set(mlxsw_sp, local_port,
MLXSW_PORT_SWID_DISABLED_PORT);
- mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, module);
+ mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, slot_index, module);
}
static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp)
@@ -1858,21 +1894,148 @@ static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u16 local_port)
return mlxsw_sp->ports[local_port] != NULL;
}
+static int mlxsw_sp_port_mapping_event_set(struct mlxsw_sp *mlxsw_sp,
+ u16 local_port, bool enable)
+{
+ char pmecr_pl[MLXSW_REG_PMECR_LEN];
+
+ mlxsw_reg_pmecr_pack(pmecr_pl, local_port,
+ enable ? MLXSW_REG_PMECR_E_GENERATE_EVENT :
+ MLXSW_REG_PMECR_E_DO_NOT_GENERATE_EVENT);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmecr), pmecr_pl);
+}
+
+struct mlxsw_sp_port_mapping_event {
+ struct list_head list;
+ char pmlp_pl[MLXSW_REG_PMLP_LEN];
+};
+
+static void mlxsw_sp_port_mapping_events_work(struct work_struct *work)
+{
+ struct mlxsw_sp_port_mapping_event *event, *next_event;
+ struct mlxsw_sp_port_mapping_events *events;
+ struct mlxsw_sp_port_mapping port_mapping;
+ struct mlxsw_sp *mlxsw_sp;
+ struct devlink *devlink;
+ LIST_HEAD(event_queue);
+ u16 local_port;
+ int err;
+
+ events = container_of(work, struct mlxsw_sp_port_mapping_events, work);
+ mlxsw_sp = container_of(events, struct mlxsw_sp, port_mapping_events);
+ devlink = priv_to_devlink(mlxsw_sp->core);
+
+ spin_lock_bh(&events->queue_lock);
+ list_splice_init(&events->queue, &event_queue);
+ spin_unlock_bh(&events->queue_lock);
+
+ list_for_each_entry_safe(event, next_event, &event_queue, list) {
+ local_port = mlxsw_reg_pmlp_local_port_get(event->pmlp_pl);
+ err = mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port,
+ event->pmlp_pl, &port_mapping);
+ if (err)
+ goto out;
+
+ if (WARN_ON_ONCE(!port_mapping.width))
+ goto out;
+
+ devl_lock(devlink);
+
+ if (!mlxsw_sp_port_created(mlxsw_sp, local_port))
+ mlxsw_sp_port_create(mlxsw_sp, local_port,
+ false, &port_mapping);
+ else
+ WARN_ON_ONCE(1);
+
+ devl_unlock(devlink);
+
+ mlxsw_sp->port_mapping[local_port] = port_mapping;
+
+out:
+ kfree(event);
+ }
+}
+
+static void
+mlxsw_sp_port_mapping_listener_func(const struct mlxsw_reg_info *reg,
+ char *pmlp_pl, void *priv)
+{
+ struct mlxsw_sp_port_mapping_events *events;
+ struct mlxsw_sp_port_mapping_event *event;
+ struct mlxsw_sp *mlxsw_sp = priv;
+ u16 local_port;
+
+ local_port = mlxsw_reg_pmlp_local_port_get(pmlp_pl);
+ if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
+ return;
+
+ events = &mlxsw_sp->port_mapping_events;
+ event = kmalloc(sizeof(*event), GFP_ATOMIC);
+ if (!event)
+ return;
+ memcpy(event->pmlp_pl, pmlp_pl, sizeof(event->pmlp_pl));
+ spin_lock(&events->queue_lock);
+ list_add_tail(&event->list, &events->queue);
+ spin_unlock(&events->queue_lock);
+ mlxsw_core_schedule_work(&events->work);
+}
+
+static void
+__mlxsw_sp_port_mapping_events_cancel(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_port_mapping_event *event, *next_event;
+ struct mlxsw_sp_port_mapping_events *events;
+
+ events = &mlxsw_sp->port_mapping_events;
+
+ /* Caller needs to make sure that no new event is going to appear. */
+ cancel_work_sync(&events->work);
+ list_for_each_entry_safe(event, next_event, &events->queue, list) {
+ list_del(&event->list);
+ kfree(event);
+ }
+}
+
static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
{
+ unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
int i;
- for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
+ for (i = 1; i < max_ports; i++)
+ mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false);
+ /* Make sure all scheduled events are processed */
+ __mlxsw_sp_port_mapping_events_cancel(mlxsw_sp);
+
+ devl_lock(devlink);
+ for (i = 1; i < max_ports; i++)
if (mlxsw_sp_port_created(mlxsw_sp, i))
mlxsw_sp_port_remove(mlxsw_sp, i);
mlxsw_sp_cpu_port_remove(mlxsw_sp);
+ devl_unlock(devlink);
kfree(mlxsw_sp->ports);
mlxsw_sp->ports = NULL;
}
+static void
+mlxsw_sp_ports_remove_selected(struct mlxsw_core *mlxsw_core,
+ bool (*selector)(void *priv, u16 local_port),
+ void *priv)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ unsigned int max_ports = mlxsw_core_max_ports(mlxsw_core);
+ int i;
+
+ for (i = 1; i < max_ports; i++)
+ if (mlxsw_sp_port_created(mlxsw_sp, i) && selector(priv, i))
+ mlxsw_sp_port_remove(mlxsw_sp, i);
+}
+
static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
{
unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ struct mlxsw_sp_port_mapping_events *events;
struct mlxsw_sp_port_mapping *port_mapping;
size_t alloc_size;
int i;
@@ -1883,26 +2046,46 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
if (!mlxsw_sp->ports)
return -ENOMEM;
+ events = &mlxsw_sp->port_mapping_events;
+ INIT_LIST_HEAD(&events->queue);
+ spin_lock_init(&events->queue_lock);
+ INIT_WORK(&events->work, mlxsw_sp_port_mapping_events_work);
+
+ for (i = 1; i < max_ports; i++) {
+ err = mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, true);
+ if (err)
+ goto err_event_enable;
+ }
+
+ devl_lock(devlink);
err = mlxsw_sp_cpu_port_create(mlxsw_sp);
if (err)
goto err_cpu_port_create;
for (i = 1; i < max_ports; i++) {
- port_mapping = mlxsw_sp->port_mapping[i];
- if (!port_mapping)
+ port_mapping = &mlxsw_sp->port_mapping[i];
+ if (!port_mapping->width)
continue;
err = mlxsw_sp_port_create(mlxsw_sp, i, false, port_mapping);
if (err)
goto err_port_create;
}
+ devl_unlock(devlink);
return 0;
err_port_create:
for (i--; i >= 1; i--)
if (mlxsw_sp_port_created(mlxsw_sp, i))
mlxsw_sp_port_remove(mlxsw_sp, i);
+ i = max_ports;
mlxsw_sp_cpu_port_remove(mlxsw_sp);
err_cpu_port_create:
+ devl_unlock(devlink);
+err_event_enable:
+ for (i--; i >= 1; i--)
+ mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false);
+ /* Make sure all scheduled events are processed */
+ __mlxsw_sp_port_mapping_events_cancel(mlxsw_sp);
kfree(mlxsw_sp->ports);
mlxsw_sp->ports = NULL;
return err;
@@ -1911,12 +2094,12 @@ err_cpu_port_create:
static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp)
{
unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
- struct mlxsw_sp_port_mapping port_mapping;
+ struct mlxsw_sp_port_mapping *port_mapping;
int i;
int err;
mlxsw_sp->port_mapping = kcalloc(max_ports,
- sizeof(struct mlxsw_sp_port_mapping *),
+ sizeof(struct mlxsw_sp_port_mapping),
GFP_KERNEL);
if (!mlxsw_sp->port_mapping)
return -ENOMEM;
@@ -1925,36 +2108,20 @@ static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp)
if (mlxsw_core_port_is_xm(mlxsw_sp->core, i))
continue;
- err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &port_mapping);
+ port_mapping = &mlxsw_sp->port_mapping[i];
+ err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, port_mapping);
if (err)
goto err_port_module_info_get;
- if (!port_mapping.width)
- continue;
-
- mlxsw_sp->port_mapping[i] = kmemdup(&port_mapping,
- sizeof(port_mapping),
- GFP_KERNEL);
- if (!mlxsw_sp->port_mapping[i]) {
- err = -ENOMEM;
- goto err_port_module_info_dup;
- }
}
return 0;
err_port_module_info_get:
-err_port_module_info_dup:
- for (i--; i >= 1; i--)
- kfree(mlxsw_sp->port_mapping[i]);
kfree(mlxsw_sp->port_mapping);
return err;
}
static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp)
{
- int i;
-
- for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
- kfree(mlxsw_sp->port_mapping[i]);
kfree(mlxsw_sp->port_mapping);
}
@@ -2004,8 +2171,8 @@ static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
for (i = 0; i < count; i++) {
u16 local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
- port_mapping = mlxsw_sp->port_mapping[local_port];
- if (!port_mapping || !mlxsw_sp_local_port_valid(local_port))
+ port_mapping = &mlxsw_sp->port_mapping[local_port];
+ if (!port_mapping->width || !mlxsw_sp_local_port_valid(local_port))
continue;
mlxsw_sp_port_create(mlxsw_sp, local_port,
false, port_mapping);
@@ -2045,7 +2212,8 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u16 local_port,
return -EINVAL;
}
- mlxsw_reg_pmtdb_pack(pmtdb_pl, 0, mlxsw_sp_port->mapping.module,
+ mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index,
+ mlxsw_sp_port->mapping.module,
mlxsw_sp_port->mapping.module_width / count,
count);
err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl);
@@ -2080,6 +2248,7 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u16 local_port,
err_port_split_create:
mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl);
+
return err;
}
@@ -2109,7 +2278,8 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u16 local_port,
count = mlxsw_sp_port->mapping.module_width /
mlxsw_sp_port->mapping.width;
- mlxsw_reg_pmtdb_pack(pmtdb_pl, 0, mlxsw_sp_port->mapping.module,
+ mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index,
+ mlxsw_sp_port->mapping.module,
mlxsw_sp_port->mapping.module_width / count,
count);
err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl);
@@ -2300,6 +2470,11 @@ static const struct mlxsw_listener mlxsw_sp1_listener[] = {
MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0),
};
+static const struct mlxsw_listener mlxsw_sp2_listener[] = {
+ /* Events */
+ MLXSW_SP_EVENTL(mlxsw_sp_port_mapping_listener_func, PMLPE),
+};
+
static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
@@ -2818,7 +2993,6 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
- struct devlink *devlink = priv_to_devlink(mlxsw_core);
int err;
mlxsw_sp->core = mlxsw_core;
@@ -2979,9 +3153,7 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_sample_trigger_init;
}
- devl_lock(devlink);
err = mlxsw_sp_ports_create(mlxsw_sp);
- devl_unlock(devlink);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
goto err_ports_create;
@@ -3094,6 +3266,8 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
+ mlxsw_sp->listeners = mlxsw_sp2_listener;
+ mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
@@ -3124,6 +3298,8 @@ static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
+ mlxsw_sp->listeners = mlxsw_sp2_listener;
+ mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
@@ -3154,6 +3330,8 @@ static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
+ mlxsw_sp->listeners = mlxsw_sp2_listener;
+ mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP4;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
@@ -3162,12 +3340,8 @@ static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core,
static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
- struct devlink *devlink = priv_to_devlink(mlxsw_core);
- devl_lock(devlink);
mlxsw_sp_ports_remove(mlxsw_sp);
- devl_unlock(devlink);
-
rhashtable_destroy(&mlxsw_sp->sample_trigger_ht);
mlxsw_sp_port_module_info_fini(mlxsw_sp);
mlxsw_sp_dpipe_fini(mlxsw_sp);
@@ -3645,6 +3819,7 @@ static struct mlxsw_driver mlxsw_sp2_driver = {
.fini = mlxsw_sp_fini,
.port_split = mlxsw_sp_port_split,
.port_unsplit = mlxsw_sp_port_unsplit,
+ .ports_remove_selected = mlxsw_sp_ports_remove_selected,
.sb_pool_get = mlxsw_sp_sb_pool_get,
.sb_pool_set = mlxsw_sp_sb_pool_set,
.sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
@@ -3682,6 +3857,7 @@ static struct mlxsw_driver mlxsw_sp3_driver = {
.fini = mlxsw_sp_fini,
.port_split = mlxsw_sp_port_split,
.port_unsplit = mlxsw_sp_port_unsplit,
+ .ports_remove_selected = mlxsw_sp_ports_remove_selected,
.sb_pool_get = mlxsw_sp_sb_pool_get,
.sb_pool_set = mlxsw_sp_sb_pool_set,
.sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
@@ -3717,6 +3893,7 @@ static struct mlxsw_driver mlxsw_sp4_driver = {
.fini = mlxsw_sp_fini,
.port_split = mlxsw_sp_port_split,
.port_unsplit = mlxsw_sp_port_unsplit,
+ .ports_remove_selected = mlxsw_sp_ports_remove_selected,
.sb_pool_get = mlxsw_sp_sb_pool_get,
.sb_pool_set = mlxsw_sp_sb_pool_set,
.sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
@@ -5024,3 +5201,4 @@ MODULE_DEVICE_TABLE(pci, mlxsw_sp4_pci_id_table);
MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME);
MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME);
MODULE_FIRMWARE(MLXSW_SP3_FW_FILENAME);
+MODULE_FIRMWARE(MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 20588e699588..2ad29ae1c640 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -145,11 +145,18 @@ struct mlxsw_sp_mall_entry;
struct mlxsw_sp_port_mapping {
u8 module;
+ u8 slot_index;
u8 width; /* Number of lanes used by the port */
u8 module_width; /* Number of lanes in the module (static) */
u8 lane;
};
+struct mlxsw_sp_port_mapping_events {
+ struct list_head queue;
+ spinlock_t queue_lock; /* protects queue */
+ struct work_struct work;
+};
+
struct mlxsw_sp_parsing {
refcount_t parsing_depth_ref;
u16 parsing_depth;
@@ -164,7 +171,8 @@ struct mlxsw_sp {
unsigned char base_mac[ETH_ALEN];
const unsigned char *mac_mask;
struct mlxsw_sp_upper *lags;
- struct mlxsw_sp_port_mapping **port_mapping;
+ struct mlxsw_sp_port_mapping *port_mapping;
+ struct mlxsw_sp_port_mapping_events port_mapping_events;
struct rhashtable sample_trigger_ht;
struct mlxsw_sp_sb *sb;
struct mlxsw_sp_bridge *bridge;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
index 8b5d7f83b9b0..915dffb85a1c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
@@ -568,14 +568,14 @@ struct mlxsw_sp_port_stats {
static u64
mlxsw_sp_port_get_transceiver_overheat_stats(struct mlxsw_sp_port *mlxsw_sp_port)
{
- struct mlxsw_sp_port_mapping port_mapping = mlxsw_sp_port->mapping;
struct mlxsw_core *mlxsw_core = mlxsw_sp_port->mlxsw_sp->core;
+ u8 slot_index = mlxsw_sp_port->mapping.slot_index;
+ u8 module = mlxsw_sp_port->mapping.module;
u64 stats;
int err;
- err = mlxsw_env_module_overheat_counter_get(mlxsw_core,
- port_mapping.module,
- &stats);
+ err = mlxsw_env_module_overheat_counter_get(mlxsw_core, slot_index,
+ module, &stats);
if (err)
return mlxsw_sp_port->module_overheat_initial_val;
@@ -1036,6 +1036,7 @@ static int mlxsw_sp_get_module_info(struct net_device *netdev,
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
return mlxsw_env_get_module_info(netdev, mlxsw_sp->core,
+ mlxsw_sp_port->mapping.slot_index,
mlxsw_sp_port->mapping.module,
modinfo);
}
@@ -1045,10 +1046,11 @@ static int mlxsw_sp_get_module_eeprom(struct net_device *netdev,
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 slot_index = mlxsw_sp_port->mapping.slot_index;
+ u8 module = mlxsw_sp_port->mapping.module;
- return mlxsw_env_get_module_eeprom(netdev, mlxsw_sp->core,
- mlxsw_sp_port->mapping.module, ee,
- data);
+ return mlxsw_env_get_module_eeprom(netdev, mlxsw_sp->core, slot_index,
+ module, ee, data);
}
static int
@@ -1058,10 +1060,11 @@ mlxsw_sp_get_module_eeprom_by_page(struct net_device *dev,
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 slot_index = mlxsw_sp_port->mapping.slot_index;
u8 module = mlxsw_sp_port->mapping.module;
- return mlxsw_env_get_module_eeprom_by_page(mlxsw_sp->core, module, page,
- extack);
+ return mlxsw_env_get_module_eeprom_by_page(mlxsw_sp->core, slot_index,
+ module, page, extack);
}
static int
@@ -1202,9 +1205,11 @@ static int mlxsw_sp_reset(struct net_device *dev, u32 *flags)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 slot_index = mlxsw_sp_port->mapping.slot_index;
u8 module = mlxsw_sp_port->mapping.module;
- return mlxsw_env_reset_module(dev, mlxsw_sp->core, module, flags);
+ return mlxsw_env_reset_module(dev, mlxsw_sp->core, slot_index,
+ module, flags);
}
static int
@@ -1214,10 +1219,11 @@ mlxsw_sp_get_module_power_mode(struct net_device *dev,
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 slot_index = mlxsw_sp_port->mapping.slot_index;
u8 module = mlxsw_sp_port->mapping.module;
- return mlxsw_env_get_module_power_mode(mlxsw_sp->core, module, params,
- extack);
+ return mlxsw_env_get_module_power_mode(mlxsw_sp->core, slot_index,
+ module, params, extack);
}
static int
@@ -1227,10 +1233,11 @@ mlxsw_sp_set_module_power_mode(struct net_device *dev,
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 slot_index = mlxsw_sp_port->mapping.slot_index;
u8 module = mlxsw_sp_port->mapping.module;
- return mlxsw_env_set_module_power_mode(mlxsw_sp->core, module,
- params->policy, extack);
+ return mlxsw_env_set_module_power_mode(mlxsw_sp->core, slot_index,
+ module, params->policy, extack);
}
const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 79fd486e29e3..dc820d9f2696 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -21,6 +21,7 @@
#include <net/netevent.h>
#include <net/neighbour.h>
#include <net/arp.h>
+#include <net/inet_dscp.h>
#include <net/ip_fib.h>
#include <net/ip6_fib.h>
#include <net/nexthop.h>
@@ -507,7 +508,7 @@ struct mlxsw_sp_fib4_entry {
struct mlxsw_sp_fib_entry common;
struct fib_info *fi;
u32 tb_id;
- u8 tos;
+ dscp_t dscp;
u8 type;
};
@@ -5559,7 +5560,7 @@ mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
common);
- return !fib4_entry->tos;
+ return !fib4_entry->dscp;
}
static bool
@@ -5620,7 +5621,7 @@ mlxsw_sp_fib4_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
fri.tb_id = fen_info->tb_id;
fri.dst = cpu_to_be32(*p_dst);
fri.dst_len = fen_info->dst_len;
- fri.tos = fen_info->tos;
+ fri.dscp = fen_info->dscp;
fri.type = fen_info->type;
fri.offload = false;
fri.trap = false;
@@ -5645,7 +5646,7 @@ mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
fri.tb_id = fib4_entry->tb_id;
fri.dst = cpu_to_be32(*p_dst);
fri.dst_len = dst_len;
- fri.tos = fib4_entry->tos;
+ fri.dscp = fib4_entry->dscp;
fri.type = fib4_entry->type;
fri.offload = should_offload;
fri.trap = !should_offload;
@@ -5668,7 +5669,7 @@ mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
fri.tb_id = fib4_entry->tb_id;
fri.dst = cpu_to_be32(*p_dst);
fri.dst_len = dst_len;
- fri.tos = fib4_entry->tos;
+ fri.dscp = fib4_entry->dscp;
fri.type = fib4_entry->type;
fri.offload = false;
fri.trap = false;
@@ -6250,7 +6251,7 @@ mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
fib_info_hold(fib4_entry->fi);
fib4_entry->tb_id = fen_info->tb_id;
fib4_entry->type = fen_info->type;
- fib4_entry->tos = fen_info->tos;
+ fib4_entry->dscp = fen_info->dscp;
fib_entry->fib_node = fib_node;
@@ -6304,7 +6305,7 @@ mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
fib4_entry = container_of(fib_node->fib_entry,
struct mlxsw_sp_fib4_entry, common);
if (fib4_entry->tb_id == fen_info->tb_id &&
- fib4_entry->tos == fen_info->tos &&
+ fib4_entry->dscp == fen_info->dscp &&
fib4_entry->type == fen_info->type &&
fib4_entry->fi == fen_info->fi)
return fib4_entry;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index b73466470f75..fe663b0ab708 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -423,7 +423,7 @@ mlxsw_sp_span_gretap4_route(const struct net_device *to_dev,
parms = mlxsw_sp_ipip_netdev_parms4(to_dev);
ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp,
- 0, 0, parms.link, tun->fwmark, 0);
+ 0, 0, dev_net(to_dev), parms.link, tun->fwmark, 0);
rt = ip_route_output_key(tun->net, &fl4);
if (IS_ERR(rt))
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index 9e070ab3ed76..d888498aed33 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -133,6 +133,12 @@ enum mlxsw_event_trap_id {
MLXSW_TRAP_ID_PTP_ING_FIFO = 0x2D,
/* PTP Egress FIFO has a new entry */
MLXSW_TRAP_ID_PTP_EGR_FIFO = 0x2E,
+ /* Downstream Device Status Change */
+ MLXSW_TRAP_ID_DSDSC = 0x321,
+ /* Binary Code Transfer Operation Executed Event */
+ MLXSW_TRAP_ID_BCTOE = 0x322,
+ /* Port mapping change */
+ MLXSW_TRAP_ID_PMLPE = 0x32E,
};
#endif /* _MLXSW_TRAP_H */
diff --git a/drivers/net/ethernet/microchip/lan966x/Makefile b/drivers/net/ethernet/microchip/lan966x/Makefile
index a9ffc719aa0e..fd2e0ebb2427 100644
--- a/drivers/net/ethernet/microchip/lan966x/Makefile
+++ b/drivers/net/ethernet/microchip/lan966x/Makefile
@@ -8,4 +8,4 @@ obj-$(CONFIG_LAN966X_SWITCH) += lan966x-switch.o
lan966x-switch-objs := lan966x_main.o lan966x_phylink.o lan966x_port.o \
lan966x_mac.o lan966x_ethtool.o lan966x_switchdev.o \
lan966x_vlan.o lan966x_fdb.o lan966x_mdb.o \
- lan966x_ptp.o
+ lan966x_ptp.o lan966x_fdma.o
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
new file mode 100644
index 000000000000..9e2a7323eaf0
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
@@ -0,0 +1,842 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+static int lan966x_fdma_channel_active(struct lan966x *lan966x)
+{
+ return lan_rd(lan966x, FDMA_CH_ACTIVE);
+}
+
+static struct page *lan966x_fdma_rx_alloc_page(struct lan966x_rx *rx,
+ struct lan966x_db *db)
+{
+ struct lan966x *lan966x = rx->lan966x;
+ dma_addr_t dma_addr;
+ struct page *page;
+
+ page = dev_alloc_pages(rx->page_order);
+ if (unlikely(!page))
+ return NULL;
+
+ dma_addr = dma_map_page(lan966x->dev, page, 0,
+ PAGE_SIZE << rx->page_order,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(lan966x->dev, dma_addr)))
+ goto free_page;
+
+ db->dataptr = dma_addr;
+
+ return page;
+
+free_page:
+ __free_pages(page, rx->page_order);
+ return NULL;
+}
+
+static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx)
+{
+ struct lan966x *lan966x = rx->lan966x;
+ struct lan966x_rx_dcb *dcb;
+ struct lan966x_db *db;
+ int i, j;
+
+ for (i = 0; i < FDMA_DCB_MAX; ++i) {
+ dcb = &rx->dcbs[i];
+
+ for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j) {
+ db = &dcb->db[j];
+ dma_unmap_single(lan966x->dev,
+ (dma_addr_t)db->dataptr,
+ PAGE_SIZE << rx->page_order,
+ DMA_FROM_DEVICE);
+ __free_pages(rx->page[i][j], rx->page_order);
+ }
+ }
+}
+
+static void lan966x_fdma_rx_add_dcb(struct lan966x_rx *rx,
+ struct lan966x_rx_dcb *dcb,
+ u64 nextptr)
+{
+ struct lan966x_db *db;
+ int i;
+
+ for (i = 0; i < FDMA_RX_DCB_MAX_DBS; ++i) {
+ db = &dcb->db[i];
+ db->status = FDMA_DCB_STATUS_INTR;
+ }
+
+ dcb->nextptr = FDMA_DCB_INVALID_DATA;
+ dcb->info = FDMA_DCB_INFO_DATAL(PAGE_SIZE << rx->page_order);
+
+ rx->last_entry->nextptr = nextptr;
+ rx->last_entry = dcb;
+}
+
+static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx)
+{
+ struct lan966x *lan966x = rx->lan966x;
+ struct lan966x_rx_dcb *dcb;
+ struct lan966x_db *db;
+ struct page *page;
+ int i, j;
+ int size;
+
+ /* calculate how many pages are needed to allocate the dcbs */
+ size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX;
+ size = ALIGN(size, PAGE_SIZE);
+
+ rx->dcbs = dma_alloc_coherent(lan966x->dev, size, &rx->dma, GFP_KERNEL);
+ if (!rx->dcbs)
+ return -ENOMEM;
+
+ rx->last_entry = rx->dcbs;
+ rx->db_index = 0;
+ rx->dcb_index = 0;
+
+ /* Now for each dcb allocate the dbs */
+ for (i = 0; i < FDMA_DCB_MAX; ++i) {
+ dcb = &rx->dcbs[i];
+ dcb->info = 0;
+
+ /* For each db allocate a page and map it to the DB dataptr. */
+ for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j) {
+ db = &dcb->db[j];
+ page = lan966x_fdma_rx_alloc_page(rx, db);
+ if (!page)
+ return -ENOMEM;
+
+ db->status = 0;
+ rx->page[i][j] = page;
+ }
+
+ lan966x_fdma_rx_add_dcb(rx, dcb, rx->dma + sizeof(*dcb) * i);
+ }
+
+ return 0;
+}
+
+static void lan966x_fdma_rx_free(struct lan966x_rx *rx)
+{
+ struct lan966x *lan966x = rx->lan966x;
+ u32 size;
+
+ /* Now it is possible to do the cleanup of dcb */
+ size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
+ size = ALIGN(size, PAGE_SIZE);
+ dma_free_coherent(lan966x->dev, size, rx->dcbs, rx->dma);
+}
+
+static void lan966x_fdma_rx_start(struct lan966x_rx *rx)
+{
+ struct lan966x *lan966x = rx->lan966x;
+ u32 mask;
+
+ /* When activating a channel, first is required to write the first DCB
+ * address and then to activate it
+ */
+ lan_wr(lower_32_bits((u64)rx->dma), lan966x,
+ FDMA_DCB_LLP(rx->channel_id));
+ lan_wr(upper_32_bits((u64)rx->dma), lan966x,
+ FDMA_DCB_LLP1(rx->channel_id));
+
+ lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_RX_DCB_MAX_DBS) |
+ FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
+ FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
+ FDMA_CH_CFG_CH_MEM_SET(1),
+ lan966x, FDMA_CH_CFG(rx->channel_id));
+
+ /* Start fdma */
+ lan_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(0),
+ FDMA_PORT_CTRL_XTR_STOP,
+ lan966x, FDMA_PORT_CTRL(0));
+
+ /* Enable interrupts */
+ mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
+ mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
+ mask |= BIT(rx->channel_id);
+ lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
+ FDMA_INTR_DB_ENA_INTR_DB_ENA,
+ lan966x, FDMA_INTR_DB_ENA);
+
+ /* Activate the channel */
+ lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(rx->channel_id)),
+ FDMA_CH_ACTIVATE_CH_ACTIVATE,
+ lan966x, FDMA_CH_ACTIVATE);
+}
+
+static void lan966x_fdma_rx_disable(struct lan966x_rx *rx)
+{
+ struct lan966x *lan966x = rx->lan966x;
+ u32 val;
+
+ /* Disable the channel */
+ lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(rx->channel_id)),
+ FDMA_CH_DISABLE_CH_DISABLE,
+ lan966x, FDMA_CH_DISABLE);
+
+ readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
+ val, !(val & BIT(rx->channel_id)),
+ READL_SLEEP_US, READL_TIMEOUT_US);
+
+ lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(rx->channel_id)),
+ FDMA_CH_DB_DISCARD_DB_DISCARD,
+ lan966x, FDMA_CH_DB_DISCARD);
+}
+
+static void lan966x_fdma_rx_reload(struct lan966x_rx *rx)
+{
+ struct lan966x *lan966x = rx->lan966x;
+
+ lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(rx->channel_id)),
+ FDMA_CH_RELOAD_CH_RELOAD,
+ lan966x, FDMA_CH_RELOAD);
+}
+
+static void lan966x_fdma_tx_add_dcb(struct lan966x_tx *tx,
+ struct lan966x_tx_dcb *dcb)
+{
+ dcb->nextptr = FDMA_DCB_INVALID_DATA;
+ dcb->info = 0;
+}
+
+static int lan966x_fdma_tx_alloc(struct lan966x_tx *tx)
+{
+ struct lan966x *lan966x = tx->lan966x;
+ struct lan966x_tx_dcb *dcb;
+ struct lan966x_db *db;
+ int size;
+ int i, j;
+
+ tx->dcbs_buf = kcalloc(FDMA_DCB_MAX, sizeof(struct lan966x_tx_dcb_buf),
+ GFP_KERNEL);
+ if (!tx->dcbs_buf)
+ return -ENOMEM;
+
+ /* calculate how many pages are needed to allocate the dcbs */
+ size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
+ size = ALIGN(size, PAGE_SIZE);
+ tx->dcbs = dma_alloc_coherent(lan966x->dev, size, &tx->dma, GFP_KERNEL);
+ if (!tx->dcbs)
+ goto out;
+
+ /* Now for each dcb allocate the db */
+ for (i = 0; i < FDMA_DCB_MAX; ++i) {
+ dcb = &tx->dcbs[i];
+
+ for (j = 0; j < FDMA_TX_DCB_MAX_DBS; ++j) {
+ db = &dcb->db[j];
+ db->dataptr = 0;
+ db->status = 0;
+ }
+
+ lan966x_fdma_tx_add_dcb(tx, dcb);
+ }
+
+ return 0;
+
+out:
+ kfree(tx->dcbs_buf);
+ return -ENOMEM;
+}
+
+static void lan966x_fdma_tx_free(struct lan966x_tx *tx)
+{
+ struct lan966x *lan966x = tx->lan966x;
+ int size;
+
+ kfree(tx->dcbs_buf);
+
+ size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
+ size = ALIGN(size, PAGE_SIZE);
+ dma_free_coherent(lan966x->dev, size, tx->dcbs, tx->dma);
+}
+
+static void lan966x_fdma_tx_activate(struct lan966x_tx *tx)
+{
+ struct lan966x *lan966x = tx->lan966x;
+ u32 mask;
+
+ /* When activating a channel, first is required to write the first DCB
+ * address and then to activate it
+ */
+ lan_wr(lower_32_bits((u64)tx->dma), lan966x,
+ FDMA_DCB_LLP(tx->channel_id));
+ lan_wr(upper_32_bits((u64)tx->dma), lan966x,
+ FDMA_DCB_LLP1(tx->channel_id));
+
+ lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_TX_DCB_MAX_DBS) |
+ FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
+ FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
+ FDMA_CH_CFG_CH_MEM_SET(1),
+ lan966x, FDMA_CH_CFG(tx->channel_id));
+
+ /* Start fdma */
+ lan_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0),
+ FDMA_PORT_CTRL_INJ_STOP,
+ lan966x, FDMA_PORT_CTRL(0));
+
+ /* Enable interrupts */
+ mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
+ mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
+ mask |= BIT(tx->channel_id);
+ lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
+ FDMA_INTR_DB_ENA_INTR_DB_ENA,
+ lan966x, FDMA_INTR_DB_ENA);
+
+ /* Activate the channel */
+ lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(tx->channel_id)),
+ FDMA_CH_ACTIVATE_CH_ACTIVATE,
+ lan966x, FDMA_CH_ACTIVATE);
+}
+
+static void lan966x_fdma_tx_disable(struct lan966x_tx *tx)
+{
+ struct lan966x *lan966x = tx->lan966x;
+ u32 val;
+
+ /* Disable the channel */
+ lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(tx->channel_id)),
+ FDMA_CH_DISABLE_CH_DISABLE,
+ lan966x, FDMA_CH_DISABLE);
+
+ readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
+ val, !(val & BIT(tx->channel_id)),
+ READL_SLEEP_US, READL_TIMEOUT_US);
+
+ lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(tx->channel_id)),
+ FDMA_CH_DB_DISCARD_DB_DISCARD,
+ lan966x, FDMA_CH_DB_DISCARD);
+
+ tx->activated = false;
+}
+
+static void lan966x_fdma_tx_reload(struct lan966x_tx *tx)
+{
+ struct lan966x *lan966x = tx->lan966x;
+
+ /* Write the registers to reload the channel */
+ lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(tx->channel_id)),
+ FDMA_CH_RELOAD_CH_RELOAD,
+ lan966x, FDMA_CH_RELOAD);
+}
+
+static void lan966x_fdma_wakeup_netdev(struct lan966x *lan966x)
+{
+ struct lan966x_port *port;
+ int i;
+
+ for (i = 0; i < lan966x->num_phys_ports; ++i) {
+ port = lan966x->ports[i];
+ if (!port)
+ continue;
+
+ if (netif_queue_stopped(port->dev))
+ netif_wake_queue(port->dev);
+ }
+}
+
+static void lan966x_fdma_stop_netdev(struct lan966x *lan966x)
+{
+ struct lan966x_port *port;
+ int i;
+
+ for (i = 0; i < lan966x->num_phys_ports; ++i) {
+ port = lan966x->ports[i];
+ if (!port)
+ continue;
+
+ netif_stop_queue(port->dev);
+ }
+}
+
+static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight)
+{
+ struct lan966x_tx *tx = &lan966x->tx;
+ struct lan966x_tx_dcb_buf *dcb_buf;
+ struct lan966x_db *db;
+ unsigned long flags;
+ bool clear = false;
+ int i;
+
+ spin_lock_irqsave(&lan966x->tx_lock, flags);
+ for (i = 0; i < FDMA_DCB_MAX; ++i) {
+ dcb_buf = &tx->dcbs_buf[i];
+
+ if (!dcb_buf->used)
+ continue;
+
+ db = &tx->dcbs[i].db[0];
+ if (!(db->status & FDMA_DCB_STATUS_DONE))
+ continue;
+
+ dcb_buf->dev->stats.tx_packets++;
+ dcb_buf->dev->stats.tx_bytes += dcb_buf->skb->len;
+
+ dcb_buf->used = false;
+ dma_unmap_single(lan966x->dev,
+ dcb_buf->dma_addr,
+ dcb_buf->skb->len,
+ DMA_TO_DEVICE);
+ if (!dcb_buf->ptp)
+ dev_kfree_skb_any(dcb_buf->skb);
+
+ clear = true;
+ }
+
+ if (clear)
+ lan966x_fdma_wakeup_netdev(lan966x);
+
+ spin_unlock_irqrestore(&lan966x->tx_lock, flags);
+}
+
+static bool lan966x_fdma_rx_more_frames(struct lan966x_rx *rx)
+{
+ struct lan966x_db *db;
+
+ /* Check if there is any data */
+ db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
+ if (unlikely(!(db->status & FDMA_DCB_STATUS_DONE)))
+ return false;
+
+ return true;
+}
+
+static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx)
+{
+ struct lan966x *lan966x = rx->lan966x;
+ u64 src_port, timestamp;
+ struct lan966x_db *db;
+ struct sk_buff *skb;
+ struct page *page;
+
+ /* Get the received frame and unmap it */
+ db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
+ page = rx->page[rx->dcb_index][rx->db_index];
+ skb = build_skb(page_address(page), PAGE_SIZE << rx->page_order);
+ if (unlikely(!skb))
+ goto unmap_page;
+
+ dma_unmap_single(lan966x->dev, (dma_addr_t)db->dataptr,
+ FDMA_DCB_STATUS_BLOCKL(db->status),
+ DMA_FROM_DEVICE);
+ skb_put(skb, FDMA_DCB_STATUS_BLOCKL(db->status));
+
+ lan966x_ifh_get_src_port(skb->data, &src_port);
+ lan966x_ifh_get_timestamp(skb->data, &timestamp);
+
+ WARN_ON(src_port >= lan966x->num_phys_ports);
+
+ skb->dev = lan966x->ports[src_port]->dev;
+ skb_pull(skb, IFH_LEN * sizeof(u32));
+
+ if (likely(!(skb->dev->features & NETIF_F_RXFCS)))
+ skb_trim(skb, skb->len - ETH_FCS_LEN);
+
+ lan966x_ptp_rxtstamp(lan966x, skb, timestamp);
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ if (lan966x->bridge_mask & BIT(src_port)) {
+ skb->offload_fwd_mark = 1;
+
+ skb_reset_network_header(skb);
+ if (!lan966x_hw_offload(lan966x, src_port, skb))
+ skb->offload_fwd_mark = 0;
+ }
+
+ skb->dev->stats.rx_bytes += skb->len;
+ skb->dev->stats.rx_packets++;
+
+ return skb;
+
+unmap_page:
+ dma_unmap_page(lan966x->dev, (dma_addr_t)db->dataptr,
+ FDMA_DCB_STATUS_BLOCKL(db->status),
+ DMA_FROM_DEVICE);
+ __free_pages(page, rx->page_order);
+
+ return NULL;
+}
+
+static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
+{
+ struct lan966x *lan966x = container_of(napi, struct lan966x, napi);
+ struct lan966x_rx *rx = &lan966x->rx;
+ int dcb_reload = rx->dcb_index;
+ struct lan966x_rx_dcb *old_dcb;
+ struct lan966x_db *db;
+ struct sk_buff *skb;
+ struct page *page;
+ int counter = 0;
+ u64 nextptr;
+
+ lan966x_fdma_tx_clear_buf(lan966x, weight);
+
+ /* Get all received skb */
+ while (counter < weight) {
+ if (!lan966x_fdma_rx_more_frames(rx))
+ break;
+
+ skb = lan966x_fdma_rx_get_frame(rx);
+
+ rx->page[rx->dcb_index][rx->db_index] = NULL;
+ rx->dcb_index++;
+ rx->dcb_index &= FDMA_DCB_MAX - 1;
+
+ if (!skb)
+ break;
+
+ napi_gro_receive(&lan966x->napi, skb);
+ counter++;
+ }
+
+ /* Allocate new pages and map them */
+ while (dcb_reload != rx->dcb_index) {
+ db = &rx->dcbs[dcb_reload].db[rx->db_index];
+ page = lan966x_fdma_rx_alloc_page(rx, db);
+ if (unlikely(!page))
+ break;
+ rx->page[dcb_reload][rx->db_index] = page;
+
+ old_dcb = &rx->dcbs[dcb_reload];
+ dcb_reload++;
+ dcb_reload &= FDMA_DCB_MAX - 1;
+
+ nextptr = rx->dma + ((unsigned long)old_dcb -
+ (unsigned long)rx->dcbs);
+ lan966x_fdma_rx_add_dcb(rx, old_dcb, nextptr);
+ lan966x_fdma_rx_reload(rx);
+ }
+
+ if (counter < weight && napi_complete_done(napi, counter))
+ lan_wr(0xff, lan966x, FDMA_INTR_DB_ENA);
+
+ return counter;
+}
+
+irqreturn_t lan966x_fdma_irq_handler(int irq, void *args)
+{
+ struct lan966x *lan966x = args;
+ u32 db, err, err_type;
+
+ db = lan_rd(lan966x, FDMA_INTR_DB);
+ err = lan_rd(lan966x, FDMA_INTR_ERR);
+
+ if (db) {
+ lan_wr(0, lan966x, FDMA_INTR_DB_ENA);
+ lan_wr(db, lan966x, FDMA_INTR_DB);
+
+ napi_schedule(&lan966x->napi);
+ }
+
+ if (err) {
+ err_type = lan_rd(lan966x, FDMA_ERRORS);
+
+ WARN(1, "Unexpected error: %d, error_type: %d\n", err, err_type);
+
+ lan_wr(err, lan966x, FDMA_INTR_ERR);
+ lan_wr(err_type, lan966x, FDMA_ERRORS);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int lan966x_fdma_get_next_dcb(struct lan966x_tx *tx)
+{
+ struct lan966x_tx_dcb_buf *dcb_buf;
+ int i;
+
+ for (i = 0; i < FDMA_DCB_MAX; ++i) {
+ dcb_buf = &tx->dcbs_buf[i];
+ if (!dcb_buf->used && i != tx->last_in_use)
+ return i;
+ }
+
+ return -1;
+}
+
+int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ struct lan966x_tx_dcb_buf *next_dcb_buf;
+ struct lan966x_tx_dcb *next_dcb, *dcb;
+ struct lan966x_tx *tx = &lan966x->tx;
+ struct lan966x_db *next_db;
+ int needed_headroom;
+ int needed_tailroom;
+ dma_addr_t dma_addr;
+ int next_to_use;
+ int err;
+
+ /* Get next index */
+ next_to_use = lan966x_fdma_get_next_dcb(tx);
+ if (next_to_use < 0) {
+ netif_stop_queue(dev);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (skb_put_padto(skb, ETH_ZLEN)) {
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ /* skb processing */
+ needed_headroom = max_t(int, IFH_LEN * sizeof(u32) - skb_headroom(skb), 0);
+ needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0);
+ if (needed_headroom || needed_tailroom || skb_header_cloned(skb)) {
+ err = pskb_expand_head(skb, needed_headroom, needed_tailroom,
+ GFP_ATOMIC);
+ if (unlikely(err)) {
+ dev->stats.tx_dropped++;
+ err = NETDEV_TX_OK;
+ goto release;
+ }
+ }
+
+ skb_tx_timestamp(skb);
+ skb_push(skb, IFH_LEN * sizeof(u32));
+ memcpy(skb->data, ifh, IFH_LEN * sizeof(u32));
+ skb_put(skb, 4);
+
+ dma_addr = dma_map_single(lan966x->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(lan966x->dev, dma_addr)) {
+ dev->stats.tx_dropped++;
+ err = NETDEV_TX_OK;
+ goto release;
+ }
+
+ /* Setup next dcb */
+ next_dcb = &tx->dcbs[next_to_use];
+ next_dcb->nextptr = FDMA_DCB_INVALID_DATA;
+
+ next_db = &next_dcb->db[0];
+ next_db->dataptr = dma_addr;
+ next_db->status = FDMA_DCB_STATUS_SOF |
+ FDMA_DCB_STATUS_EOF |
+ FDMA_DCB_STATUS_INTR |
+ FDMA_DCB_STATUS_BLOCKO(0) |
+ FDMA_DCB_STATUS_BLOCKL(skb->len);
+
+ /* Fill up the buffer */
+ next_dcb_buf = &tx->dcbs_buf[next_to_use];
+ next_dcb_buf->skb = skb;
+ next_dcb_buf->dma_addr = dma_addr;
+ next_dcb_buf->used = true;
+ next_dcb_buf->ptp = false;
+ next_dcb_buf->dev = dev;
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
+ next_dcb_buf->ptp = true;
+
+ if (likely(lan966x->tx.activated)) {
+ /* Connect current dcb to the next db */
+ dcb = &tx->dcbs[tx->last_in_use];
+ dcb->nextptr = tx->dma + (next_to_use *
+ sizeof(struct lan966x_tx_dcb));
+
+ lan966x_fdma_tx_reload(tx);
+ } else {
+ /* Because it is first time, then just activate */
+ lan966x->tx.activated = true;
+ lan966x_fdma_tx_activate(tx);
+ }
+
+ /* Move to next dcb because this last in use */
+ tx->last_in_use = next_to_use;
+
+ return NETDEV_TX_OK;
+
+release:
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
+ lan966x_ptp_txtstamp_release(port, skb);
+
+ dev_kfree_skb_any(skb);
+ return err;
+}
+
+static int lan966x_fdma_get_max_mtu(struct lan966x *lan966x)
+{
+ int max_mtu = 0;
+ int i;
+
+ for (i = 0; i < lan966x->num_phys_ports; ++i) {
+ int mtu;
+
+ if (!lan966x->ports[i])
+ continue;
+
+ mtu = lan966x->ports[i]->dev->mtu;
+ if (mtu > max_mtu)
+ max_mtu = mtu;
+ }
+
+ return max_mtu;
+}
+
+static int lan966x_qsys_sw_status(struct lan966x *lan966x)
+{
+ return lan_rd(lan966x, QSYS_SW_STATUS(CPU_PORT));
+}
+
+static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
+{
+ void *rx_dcbs, *tx_dcbs, *tx_dcbs_buf;
+ dma_addr_t rx_dma, tx_dma;
+ u32 size;
+ int err;
+
+ /* Store these for later to free them */
+ rx_dma = lan966x->rx.dma;
+ tx_dma = lan966x->tx.dma;
+ rx_dcbs = lan966x->rx.dcbs;
+ tx_dcbs = lan966x->tx.dcbs;
+ tx_dcbs_buf = lan966x->tx.dcbs_buf;
+
+ napi_synchronize(&lan966x->napi);
+ napi_disable(&lan966x->napi);
+ lan966x_fdma_stop_netdev(lan966x);
+
+ lan966x_fdma_rx_disable(&lan966x->rx);
+ lan966x_fdma_rx_free_pages(&lan966x->rx);
+ lan966x->rx.page_order = round_up(new_mtu, PAGE_SIZE) / PAGE_SIZE - 1;
+ err = lan966x_fdma_rx_alloc(&lan966x->rx);
+ if (err)
+ goto restore;
+ lan966x_fdma_rx_start(&lan966x->rx);
+
+ size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX;
+ size = ALIGN(size, PAGE_SIZE);
+ dma_free_coherent(lan966x->dev, size, rx_dcbs, rx_dma);
+
+ lan966x_fdma_tx_disable(&lan966x->tx);
+ err = lan966x_fdma_tx_alloc(&lan966x->tx);
+ if (err)
+ goto restore_tx;
+
+ size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
+ size = ALIGN(size, PAGE_SIZE);
+ dma_free_coherent(lan966x->dev, size, tx_dcbs, tx_dma);
+
+ kfree(tx_dcbs_buf);
+
+ lan966x_fdma_wakeup_netdev(lan966x);
+ napi_enable(&lan966x->napi);
+
+ return err;
+restore:
+ lan966x->rx.dma = rx_dma;
+ lan966x->tx.dma = tx_dma;
+ lan966x_fdma_rx_start(&lan966x->rx);
+
+restore_tx:
+ lan966x->rx.dcbs = rx_dcbs;
+ lan966x->tx.dcbs = tx_dcbs;
+ lan966x->tx.dcbs_buf = tx_dcbs_buf;
+
+ return err;
+}
+
+int lan966x_fdma_change_mtu(struct lan966x *lan966x)
+{
+ int max_mtu;
+ int err;
+ u32 val;
+
+ max_mtu = lan966x_fdma_get_max_mtu(lan966x);
+ max_mtu += IFH_LEN * sizeof(u32);
+
+ if (round_up(max_mtu, PAGE_SIZE) / PAGE_SIZE - 1 ==
+ lan966x->rx.page_order)
+ return 0;
+
+ /* Disable the CPU port */
+ lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(0),
+ QSYS_SW_PORT_MODE_PORT_ENA,
+ lan966x, QSYS_SW_PORT_MODE(CPU_PORT));
+
+ /* Flush the CPU queues */
+ readx_poll_timeout(lan966x_qsys_sw_status, lan966x,
+ val, !(QSYS_SW_STATUS_EQ_AVAIL_GET(val)),
+ READL_SLEEP_US, READL_TIMEOUT_US);
+
+ /* Add a sleep in case there are frames between the queues and the CPU
+ * port
+ */
+ usleep_range(1000, 2000);
+
+ err = lan966x_fdma_reload(lan966x, max_mtu);
+
+ /* Enable back the CPU port */
+ lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(1),
+ QSYS_SW_PORT_MODE_PORT_ENA,
+ lan966x, QSYS_SW_PORT_MODE(CPU_PORT));
+
+ return err;
+}
+
+void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev)
+{
+ if (lan966x->fdma_ndev)
+ return;
+
+ lan966x->fdma_ndev = dev;
+ netif_napi_add(dev, &lan966x->napi, lan966x_fdma_napi_poll,
+ NAPI_POLL_WEIGHT);
+ napi_enable(&lan966x->napi);
+}
+
+void lan966x_fdma_netdev_deinit(struct lan966x *lan966x, struct net_device *dev)
+{
+ if (lan966x->fdma_ndev == dev) {
+ netif_napi_del(&lan966x->napi);
+ lan966x->fdma_ndev = NULL;
+ }
+}
+
+int lan966x_fdma_init(struct lan966x *lan966x)
+{
+ int err;
+
+ if (!lan966x->fdma)
+ return 0;
+
+ lan966x->rx.lan966x = lan966x;
+ lan966x->rx.channel_id = FDMA_XTR_CHANNEL;
+ lan966x->tx.lan966x = lan966x;
+ lan966x->tx.channel_id = FDMA_INJ_CHANNEL;
+ lan966x->tx.last_in_use = -1;
+
+ err = lan966x_fdma_rx_alloc(&lan966x->rx);
+ if (err)
+ return err;
+
+ err = lan966x_fdma_tx_alloc(&lan966x->tx);
+ if (err) {
+ lan966x_fdma_rx_free(&lan966x->rx);
+ return err;
+ }
+
+ lan966x_fdma_rx_start(&lan966x->rx);
+
+ return 0;
+}
+
+void lan966x_fdma_deinit(struct lan966x *lan966x)
+{
+ if (!lan966x->fdma)
+ return;
+
+ lan966x_fdma_rx_disable(&lan966x->rx);
+ lan966x_fdma_tx_disable(&lan966x->tx);
+
+ napi_synchronize(&lan966x->napi);
+ napi_disable(&lan966x->napi);
+
+ lan966x_fdma_rx_free_pages(&lan966x->rx);
+ lan966x_fdma_rx_free(&lan966x->rx);
+ lan966x_fdma_tx_free(&lan966x->tx);
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c b/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c
index ce5970bdcc6a..2679111ef669 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c
@@ -346,7 +346,8 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
lan966x_mac_process_raw_entry(&raw_entries[column],
mac, &vid, &dest_idx);
- WARN_ON(dest_idx > lan966x->num_phys_ports);
+ if (WARN_ON(dest_idx > lan966x->num_phys_ports))
+ continue;
/* If the entry in SW is found, then there is nothing
* to do
@@ -392,7 +393,8 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
lan966x_mac_process_raw_entry(&raw_entries[column],
mac, &vid, &dest_idx);
- WARN_ON(dest_idx > lan966x->num_phys_ports);
+ if (WARN_ON(dest_idx > lan966x->num_phys_ports))
+ continue;
mac_entry = lan966x_mac_alloc_entry(mac, vid, dest_idx);
if (!mac_entry)
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index 1f8c67f0261b..6579c7062aa5 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -24,9 +24,6 @@
#define XTR_NOT_READY 0x07000080U
#define XTR_VALID_BYTES(x) (4 - (((x) >> 24) & 3))
-#define READL_SLEEP_US 10
-#define READL_TIMEOUT_US 100000000
-
#define IO_RANGES 2
static const struct of_device_id lan966x_match[] = {
@@ -43,6 +40,7 @@ struct lan966x_main_io_resource {
static const struct lan966x_main_io_resource lan966x_main_iomap[] = {
{ TARGET_CPU, 0xc0000, 0 }, /* 0xe00c0000 */
+ { TARGET_FDMA, 0xc0400, 0 }, /* 0xe00c0400 */
{ TARGET_ORG, 0, 1 }, /* 0xe2000000 */
{ TARGET_GCB, 0x4000, 1 }, /* 0xe2004000 */
{ TARGET_QS, 0x8000, 1 }, /* 0xe2008000 */
@@ -343,7 +341,10 @@ static int lan966x_port_xmit(struct sk_buff *skb, struct net_device *dev)
}
spin_lock(&lan966x->tx_lock);
- err = lan966x_port_ifh_xmit(skb, ifh, dev);
+ if (port->lan966x->fdma)
+ err = lan966x_fdma_xmit(skb, ifh, dev);
+ else
+ err = lan966x_port_ifh_xmit(skb, ifh, dev);
spin_unlock(&lan966x->tx_lock);
return err;
@@ -353,12 +354,24 @@ static int lan966x_port_change_mtu(struct net_device *dev, int new_mtu)
{
struct lan966x_port *port = netdev_priv(dev);
struct lan966x *lan966x = port->lan966x;
+ int old_mtu = dev->mtu;
+ int err;
lan_wr(DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(new_mtu),
lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port));
dev->mtu = new_mtu;
- return 0;
+ if (!lan966x->fdma)
+ return 0;
+
+ err = lan966x_fdma_change_mtu(lan966x);
+ if (err) {
+ lan_wr(DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(old_mtu),
+ lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port));
+ dev->mtu = old_mtu;
+ }
+
+ return err;
}
static int lan966x_mc_unsync(struct net_device *dev, const unsigned char *addr)
@@ -432,8 +445,7 @@ bool lan966x_netdevice_check(const struct net_device *dev)
return dev->netdev_ops == &lan966x_port_netdev_ops;
}
-static bool lan966x_hw_offload(struct lan966x *lan966x, u32 port,
- struct sk_buff *skb)
+bool lan966x_hw_offload(struct lan966x *lan966x, u32 port, struct sk_buff *skb)
{
u32 val;
@@ -446,6 +458,12 @@ static bool lan966x_hw_offload(struct lan966x *lan966x, u32 port,
ANA_CPU_FWD_CFG_MLD_REDIR_ENA)))
return true;
+ if (eth_type_vlan(skb->protocol)) {
+ skb = skb_vlan_untag(skb);
+ if (unlikely(!skb))
+ return false;
+ }
+
if (skb->protocol == htons(ETH_P_IP) &&
ip_hdr(skb)->protocol == IPPROTO_IGMP)
return false;
@@ -514,7 +532,7 @@ static int lan966x_rx_frame_word(struct lan966x *lan966x, u8 grp, u32 *rval)
}
}
-static void lan966x_ifh_get_src_port(void *ifh, u64 *src_port)
+void lan966x_ifh_get_src_port(void *ifh, u64 *src_port)
{
packing(ifh, src_port, IFH_POS_SRCPORT + IFH_WID_SRCPORT - 1,
IFH_POS_SRCPORT, IFH_LEN * 4, UNPACK, 0);
@@ -526,7 +544,7 @@ static void lan966x_ifh_get_len(void *ifh, u64 *len)
IFH_POS_LEN, IFH_LEN * 4, UNPACK, 0);
}
-static void lan966x_ifh_get_timestamp(void *ifh, u64 *timestamp)
+void lan966x_ifh_get_timestamp(void *ifh, u64 *timestamp)
{
packing(ifh, timestamp, IFH_POS_TIMESTAMP + IFH_WID_TIMESTAMP - 1,
IFH_POS_TIMESTAMP, IFH_LEN * 4, UNPACK, 0);
@@ -646,6 +664,9 @@ static void lan966x_cleanup_ports(struct lan966x *lan966x)
if (port->dev)
unregister_netdev(port->dev);
+ if (lan966x->fdma && lan966x->fdma_ndev == port->dev)
+ lan966x_fdma_netdev_deinit(lan966x, port->dev);
+
if (port->phylink) {
rtnl_lock();
lan966x_port_stop(port->dev);
@@ -665,6 +686,15 @@ static void lan966x_cleanup_ports(struct lan966x *lan966x)
disable_irq(lan966x->ana_irq);
lan966x->ana_irq = -ENXIO;
}
+
+ if (lan966x->fdma)
+ devm_free_irq(lan966x->dev, lan966x->fdma_irq, lan966x);
+
+ if (lan966x->ptp_irq)
+ devm_free_irq(lan966x->dev, lan966x->ptp_irq, lan966x);
+
+ if (lan966x->ptp_ext_irq)
+ devm_free_irq(lan966x->dev, lan966x->ptp_ext_irq, lan966x);
}
static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
@@ -793,12 +823,12 @@ static void lan966x_init(struct lan966x *lan966x)
/* Do byte-swap and expect status after last data word
* Extraction: Mode: manual extraction) | Byte_swap
*/
- lan_wr(QS_XTR_GRP_CFG_MODE_SET(1) |
+ lan_wr(QS_XTR_GRP_CFG_MODE_SET(lan966x->fdma ? 2 : 1) |
QS_XTR_GRP_CFG_BYTE_SWAP_SET(1),
lan966x, QS_XTR_GRP_CFG(0));
/* Injection: Mode: manual injection | Byte_swap */
- lan_wr(QS_INJ_GRP_CFG_MODE_SET(1) |
+ lan_wr(QS_INJ_GRP_CFG_MODE_SET(lan966x->fdma ? 2 : 1) |
QS_INJ_GRP_CFG_BYTE_SWAP_SET(1),
lan966x, QS_INJ_GRP_CFG(0));
@@ -1020,6 +1050,31 @@ static int lan966x_probe(struct platform_device *pdev)
lan966x->ptp = 1;
}
+ lan966x->fdma_irq = platform_get_irq_byname(pdev, "fdma");
+ if (lan966x->fdma_irq > 0) {
+ err = devm_request_irq(&pdev->dev, lan966x->fdma_irq,
+ lan966x_fdma_irq_handler, 0,
+ "fdma irq", lan966x);
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "Unable to use fdma irq");
+
+ lan966x->fdma = true;
+ }
+
+ if (lan966x->ptp) {
+ lan966x->ptp_ext_irq = platform_get_irq_byname(pdev, "ptp-ext");
+ if (lan966x->ptp_ext_irq > 0) {
+ err = devm_request_threaded_irq(&pdev->dev,
+ lan966x->ptp_ext_irq, NULL,
+ lan966x_ptp_ext_irq_handler,
+ IRQF_ONESHOT,
+ "ptp-ext irq", lan966x);
+ if (err)
+ return dev_err_probe(&pdev->dev, err,
+ "Unable to use ptp-ext irq");
+ }
+ }
+
/* init switch */
lan966x_init(lan966x);
lan966x_stats_init(lan966x);
@@ -1058,8 +1113,15 @@ static int lan966x_probe(struct platform_device *pdev)
if (err)
goto cleanup_fdb;
+ err = lan966x_fdma_init(lan966x);
+ if (err)
+ goto cleanup_ptp;
+
return 0;
+cleanup_ptp:
+ lan966x_ptp_deinit(lan966x);
+
cleanup_fdb:
lan966x_fdb_deinit(lan966x);
@@ -1079,6 +1141,7 @@ static int lan966x_remove(struct platform_device *pdev)
{
struct lan966x *lan966x = platform_get_drvdata(pdev);
+ lan966x_fdma_deinit(lan966x);
lan966x_cleanup_ports(lan966x);
cancel_delayed_work_sync(&lan966x->stats_work);
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
index ae282da1da74..3b86ddddc756 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
@@ -17,6 +17,9 @@
#define TABLE_UPDATE_SLEEP_US 10
#define TABLE_UPDATE_TIMEOUT_US 100000
+#define READL_SLEEP_US 10
+#define READL_TIMEOUT_US 100000000
+
#define LAN966X_BUFFER_CELL_SZ 64
#define LAN966X_BUFFER_MEMORY (160 * 1024)
#define LAN966X_BUFFER_MIN_SZ 60
@@ -53,11 +56,28 @@
#define LAN966X_PHC_COUNT 3
#define LAN966X_PHC_PORT 0
+#define LAN966X_PHC_PINS_NUM 7
#define IFH_REW_OP_NOOP 0x0
#define IFH_REW_OP_ONE_STEP_PTP 0x3
#define IFH_REW_OP_TWO_STEP_PTP 0x4
+#define FDMA_RX_DCB_MAX_DBS 1
+#define FDMA_TX_DCB_MAX_DBS 1
+#define FDMA_DCB_INFO_DATAL(x) ((x) & GENMASK(15, 0))
+
+#define FDMA_DCB_STATUS_BLOCKL(x) ((x) & GENMASK(15, 0))
+#define FDMA_DCB_STATUS_SOF BIT(16)
+#define FDMA_DCB_STATUS_EOF BIT(17)
+#define FDMA_DCB_STATUS_INTR BIT(18)
+#define FDMA_DCB_STATUS_DONE BIT(19)
+#define FDMA_DCB_STATUS_BLOCKO(x) (((x) << 20) & GENMASK(31, 20))
+#define FDMA_DCB_INVALID_DATA 0x1
+
+#define FDMA_XTR_CHANNEL 6
+#define FDMA_INJ_CHANNEL 0
+#define FDMA_DCB_MAX 512
+
/* MAC table entry types.
* ENTRYTYPE_NORMAL is subject to aging.
* ENTRYTYPE_LOCKED is not subject to aging.
@@ -73,6 +93,83 @@ enum macaccess_entry_type {
struct lan966x_port;
+struct lan966x_db {
+ u64 dataptr;
+ u64 status;
+};
+
+struct lan966x_rx_dcb {
+ u64 nextptr;
+ u64 info;
+ struct lan966x_db db[FDMA_RX_DCB_MAX_DBS];
+};
+
+struct lan966x_tx_dcb {
+ u64 nextptr;
+ u64 info;
+ struct lan966x_db db[FDMA_TX_DCB_MAX_DBS];
+};
+
+struct lan966x_rx {
+ struct lan966x *lan966x;
+
+ /* Pointer to the array of hardware dcbs. */
+ struct lan966x_rx_dcb *dcbs;
+
+ /* Pointer to the last address in the dcbs. */
+ struct lan966x_rx_dcb *last_entry;
+
+ /* For each DB, there is a page */
+ struct page *page[FDMA_DCB_MAX][FDMA_RX_DCB_MAX_DBS];
+
+ /* Represents the db_index, it can have a value between 0 and
+ * FDMA_RX_DCB_MAX_DBS, once it reaches the value of FDMA_RX_DCB_MAX_DBS
+ * it means that the DCB can be reused.
+ */
+ int db_index;
+
+ /* Represents the index in the dcbs. It has a value between 0 and
+ * FDMA_DCB_MAX
+ */
+ int dcb_index;
+
+ /* Represents the dma address to the dcbs array */
+ dma_addr_t dma;
+
+ /* Represents the page order that is used to allocate the pages for the
+ * RX buffers. This value is calculated based on max MTU of the devices.
+ */
+ u8 page_order;
+
+ u8 channel_id;
+};
+
+struct lan966x_tx_dcb_buf {
+ struct net_device *dev;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ bool used;
+ bool ptp;
+};
+
+struct lan966x_tx {
+ struct lan966x *lan966x;
+
+ /* Pointer to the dcb list */
+ struct lan966x_tx_dcb *dcbs;
+ u16 last_in_use;
+
+ /* Represents the DMA address to the first entry of the dcb entries. */
+ dma_addr_t dma;
+
+ /* Array of dcbs that are given to the HW */
+ struct lan966x_tx_dcb_buf *dcbs_buf;
+
+ u8 channel_id;
+
+ bool activated;
+};
+
struct lan966x_stat_layout {
u32 offset;
char name[ETH_GSTRING_LEN];
@@ -81,6 +178,7 @@ struct lan966x_stat_layout {
struct lan966x_phc {
struct ptp_clock *clock;
struct ptp_clock_info info;
+ struct ptp_pin_desc pins[LAN966X_PHC_PINS_NUM];
struct hwtstamp_config hwtstamp_config;
struct lan966x *lan966x;
u8 index;
@@ -134,6 +232,8 @@ struct lan966x {
int xtr_irq;
int ana_irq;
int ptp_irq;
+ int fdma_irq;
+ int ptp_ext_irq;
/* worqueue for fdb */
struct workqueue_struct *fdb_work;
@@ -150,6 +250,13 @@ struct lan966x {
spinlock_t ptp_ts_id_lock; /* lock for ts_id */
struct mutex ptp_lock; /* lock for ptp interface state */
u16 ptp_skbs;
+
+ /* fdma */
+ bool fdma;
+ struct net_device *fdma_ndev;
+ struct lan966x_rx rx;
+ struct lan966x_tx tx;
+ struct napi_struct napi;
};
struct lan966x_port_config {
@@ -195,6 +302,11 @@ bool lan966x_netdevice_check(const struct net_device *dev);
void lan966x_register_notifier_blocks(void);
void lan966x_unregister_notifier_blocks(void);
+bool lan966x_hw_offload(struct lan966x *lan966x, u32 port, struct sk_buff *skb);
+
+void lan966x_ifh_get_src_port(void *ifh, u64 *src_port);
+void lan966x_ifh_get_timestamp(void *ifh, u64 *timestamp);
+
void lan966x_stats_get(struct net_device *dev,
struct rtnl_link_stats64 *stats);
int lan966x_stats_init(struct lan966x *lan966x);
@@ -283,6 +395,15 @@ int lan966x_ptp_txtstamp_request(struct lan966x_port *port,
void lan966x_ptp_txtstamp_release(struct lan966x_port *port,
struct sk_buff *skb);
irqreturn_t lan966x_ptp_irq_handler(int irq, void *args);
+irqreturn_t lan966x_ptp_ext_irq_handler(int irq, void *args);
+
+int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev);
+int lan966x_fdma_change_mtu(struct lan966x *lan966x);
+void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev);
+void lan966x_fdma_netdev_deinit(struct lan966x *lan966x, struct net_device *dev);
+int lan966x_fdma_init(struct lan966x *lan966x);
+void lan966x_fdma_deinit(struct lan966x *lan966x);
+irqreturn_t lan966x_fdma_irq_handler(int irq, void *args);
static inline void __iomem *lan_addr(void __iomem *base[],
int id, int tinst, int tcnt,
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
index 237555845a52..f141644e4372 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
@@ -393,6 +393,9 @@ void lan966x_port_init(struct lan966x_port *port)
lan966x_port_config_down(port);
+ if (lan966x->fdma)
+ lan966x_fdma_netdev_init(lan966x, port->dev);
+
if (config->portmode != PHY_INTERFACE_MODE_QSGMII)
return;
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
index ae782778d6dd..3a621c5165bc 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
@@ -16,7 +16,7 @@
*/
#define LAN966X_1PPB_FORMAT 3480517749LL
-#define TOD_ACC_PIN 0x5
+#define TOD_ACC_PIN 0x7
enum {
PTP_PIN_ACTION_IDLE = 0,
@@ -29,10 +29,10 @@ enum {
static u64 lan966x_ptp_get_nominal_value(void)
{
- u64 res = 0x304d2df1;
-
- res <<= 32;
- return res;
+ /* This is the default value that for each system clock, the time of day
+ * is increased. It has the format 5.59 nanosecond.
+ */
+ return 0x304d4873ecade305;
}
int lan966x_ptp_hwtstamp_set(struct lan966x_port *port, struct ifreq *ifr)
@@ -321,6 +321,63 @@ irqreturn_t lan966x_ptp_irq_handler(int irq, void *args)
return IRQ_HANDLED;
}
+irqreturn_t lan966x_ptp_ext_irq_handler(int irq, void *args)
+{
+ struct lan966x *lan966x = args;
+ struct lan966x_phc *phc;
+ unsigned long flags;
+ u64 time = 0;
+ time64_t s;
+ int pin, i;
+ s64 ns;
+
+ if (!(lan_rd(lan966x, PTP_PIN_INTR)))
+ return IRQ_NONE;
+
+ /* Go through all domains and see which pin generated the interrupt */
+ for (i = 0; i < LAN966X_PHC_COUNT; ++i) {
+ struct ptp_clock_event ptp_event = {0};
+
+ phc = &lan966x->phc[i];
+ pin = ptp_find_pin_unlocked(phc->clock, PTP_PF_EXTTS, 0);
+ if (pin == -1)
+ continue;
+
+ if (!(lan_rd(lan966x, PTP_PIN_INTR) & BIT(pin)))
+ continue;
+
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+
+ /* Enable to get the new interrupt.
+ * By writing 1 it clears the bit
+ */
+ lan_wr(BIT(pin), lan966x, PTP_PIN_INTR);
+
+ /* Get current time */
+ s = lan_rd(lan966x, PTP_TOD_SEC_MSB(pin));
+ s <<= 32;
+ s |= lan_rd(lan966x, PTP_TOD_SEC_LSB(pin));
+ ns = lan_rd(lan966x, PTP_TOD_NSEC(pin));
+ ns &= PTP_TOD_NSEC_TOD_NSEC;
+
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+
+ if ((ns & 0xFFFFFFF0) == 0x3FFFFFF0) {
+ s--;
+ ns &= 0xf;
+ ns += 999999984;
+ }
+ time = ktime_set(s, ns);
+
+ ptp_event.index = pin;
+ ptp_event.timestamp = time;
+ ptp_event.type = PTP_CLOCK_EXTTS;
+ ptp_clock_event(phc->clock, &ptp_event);
+ }
+
+ return IRQ_HANDLED;
+}
+
static int lan966x_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info);
@@ -493,6 +550,207 @@ static int lan966x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
return 0;
}
+static int lan966x_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info);
+ struct lan966x *lan966x = phc->lan966x;
+ struct ptp_clock_info *info;
+ int i;
+
+ /* Currently support only 1 channel */
+ if (chan != 0)
+ return -1;
+
+ switch (func) {
+ case PTP_PF_NONE:
+ case PTP_PF_PEROUT:
+ case PTP_PF_EXTTS:
+ break;
+ default:
+ return -1;
+ }
+
+ /* The PTP pins are shared by all the PHC. So it is required to see if
+ * the pin is connected to another PHC. The pin is connected to another
+ * PHC if that pin already has a function on that PHC.
+ */
+ for (i = 0; i < LAN966X_PHC_COUNT; ++i) {
+ info = &lan966x->phc[i].info;
+
+ /* Ignore the check with ourself */
+ if (ptp == info)
+ continue;
+
+ if (info->pin_config[pin].func == PTP_PF_PEROUT ||
+ info->pin_config[pin].func == PTP_PF_EXTTS)
+ return -1;
+ }
+
+ return 0;
+}
+
+static int lan966x_ptp_perout(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info);
+ struct lan966x *lan966x = phc->lan966x;
+ struct timespec64 ts_phase, ts_period;
+ unsigned long flags;
+ s64 wf_high, wf_low;
+ bool pps = false;
+ int pin;
+
+ if (rq->perout.flags & ~(PTP_PEROUT_DUTY_CYCLE |
+ PTP_PEROUT_PHASE))
+ return -EOPNOTSUPP;
+
+ pin = ptp_find_pin(phc->clock, PTP_PF_PEROUT, rq->perout.index);
+ if (pin == -1 || pin >= LAN966X_PHC_PINS_NUM)
+ return -EINVAL;
+
+ if (!on) {
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_IDLE) |
+ PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
+ PTP_PIN_CFG_PIN_SYNC_SET(0),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SYNC,
+ lan966x, PTP_PIN_CFG(pin));
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+ return 0;
+ }
+
+ if (rq->perout.period.sec == 1 &&
+ rq->perout.period.nsec == 0)
+ pps = true;
+
+ if (rq->perout.flags & PTP_PEROUT_PHASE) {
+ ts_phase.tv_sec = rq->perout.phase.sec;
+ ts_phase.tv_nsec = rq->perout.phase.nsec;
+ } else {
+ ts_phase.tv_sec = rq->perout.start.sec;
+ ts_phase.tv_nsec = rq->perout.start.nsec;
+ }
+
+ if (ts_phase.tv_sec || (ts_phase.tv_nsec && !pps)) {
+ dev_warn(lan966x->dev,
+ "Absolute time not supported!\n");
+ return -EINVAL;
+ }
+
+ if (rq->perout.flags & PTP_PEROUT_DUTY_CYCLE) {
+ struct timespec64 ts_on;
+
+ ts_on.tv_sec = rq->perout.on.sec;
+ ts_on.tv_nsec = rq->perout.on.nsec;
+
+ wf_high = timespec64_to_ns(&ts_on);
+ } else {
+ wf_high = 5000;
+ }
+
+ if (pps) {
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+ lan_wr(PTP_WF_LOW_PERIOD_PIN_WFL(ts_phase.tv_nsec),
+ lan966x, PTP_WF_LOW_PERIOD(pin));
+ lan_wr(PTP_WF_HIGH_PERIOD_PIN_WFH(wf_high),
+ lan966x, PTP_WF_HIGH_PERIOD(pin));
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_CLOCK) |
+ PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
+ PTP_PIN_CFG_PIN_SYNC_SET(3),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SYNC,
+ lan966x, PTP_PIN_CFG(pin));
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+ return 0;
+ }
+
+ ts_period.tv_sec = rq->perout.period.sec;
+ ts_period.tv_nsec = rq->perout.period.nsec;
+
+ wf_low = timespec64_to_ns(&ts_period);
+ wf_low -= wf_high;
+
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+ lan_wr(PTP_WF_LOW_PERIOD_PIN_WFL(wf_low),
+ lan966x, PTP_WF_LOW_PERIOD(pin));
+ lan_wr(PTP_WF_HIGH_PERIOD_PIN_WFH(wf_high),
+ lan966x, PTP_WF_HIGH_PERIOD(pin));
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_CLOCK) |
+ PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
+ PTP_PIN_CFG_PIN_SYNC_SET(0),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SYNC,
+ lan966x, PTP_PIN_CFG(pin));
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+
+ return 0;
+}
+
+static int lan966x_ptp_extts(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info);
+ struct lan966x *lan966x = phc->lan966x;
+ unsigned long flags;
+ int pin;
+ u32 val;
+
+ if (lan966x->ptp_ext_irq <= 0)
+ return -EOPNOTSUPP;
+
+ /* Reject requests with unsupported flags */
+ if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
+ PTP_RISING_EDGE |
+ PTP_STRICT_FLAGS))
+ return -EOPNOTSUPP;
+
+ pin = ptp_find_pin(phc->clock, PTP_PF_EXTTS, rq->extts.index);
+ if (pin == -1 || pin >= LAN966X_PHC_PINS_NUM)
+ return -EINVAL;
+
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_SAVE) |
+ PTP_PIN_CFG_PIN_SYNC_SET(on ? 3 : 0) |
+ PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
+ PTP_PIN_CFG_PIN_SELECT_SET(pin),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_SYNC |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SELECT,
+ lan966x, PTP_PIN_CFG(pin));
+
+ val = lan_rd(lan966x, PTP_PIN_INTR_ENA);
+ if (on)
+ val |= BIT(pin);
+ else
+ val &= ~BIT(pin);
+ lan_wr(val, lan966x, PTP_PIN_INTR_ENA);
+
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+
+ return 0;
+}
+
+static int lan966x_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ switch (rq->type) {
+ case PTP_CLK_REQ_PEROUT:
+ return lan966x_ptp_perout(ptp, rq, on);
+ case PTP_CLK_REQ_EXTTS:
+ return lan966x_ptp_extts(ptp, rq, on);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static struct ptp_clock_info lan966x_ptp_clock_info = {
.owner = THIS_MODULE,
.name = "lan966x ptp",
@@ -501,6 +759,11 @@ static struct ptp_clock_info lan966x_ptp_clock_info = {
.settime64 = lan966x_ptp_settime64,
.adjtime = lan966x_ptp_adjtime,
.adjfine = lan966x_ptp_adjfine,
+ .verify = lan966x_ptp_verify,
+ .enable = lan966x_ptp_enable,
+ .n_per_out = LAN966X_PHC_PINS_NUM,
+ .n_ext_ts = LAN966X_PHC_PINS_NUM,
+ .n_pins = LAN966X_PHC_PINS_NUM,
};
static int lan966x_ptp_phc_init(struct lan966x *lan966x,
@@ -508,8 +771,19 @@ static int lan966x_ptp_phc_init(struct lan966x *lan966x,
struct ptp_clock_info *clock_info)
{
struct lan966x_phc *phc = &lan966x->phc[index];
+ struct ptp_pin_desc *p;
+ int i;
+
+ for (i = 0; i < LAN966X_PHC_PINS_NUM; i++) {
+ p = &phc->pins[i];
+
+ snprintf(p->name, sizeof(p->name), "pin%d", i);
+ p->index = i;
+ p->func = PTP_PF_NONE;
+ }
phc->info = *clock_info;
+ phc->info.pin_config = &phc->pins[0];
phc->clock = ptp_clock_register(&phc->info, lan966x->dev);
if (IS_ERR(phc->clock))
return PTR_ERR(phc->clock);
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h b/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h
index 0c0b3e173d53..8265ad89f0bc 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h
@@ -17,6 +17,7 @@ enum lan966x_target {
TARGET_CHIP_TOP = 5,
TARGET_CPU = 6,
TARGET_DEV = 13,
+ TARGET_FDMA = 21,
TARGET_GCB = 27,
TARGET_ORG = 36,
TARGET_PTP = 41,
@@ -578,6 +579,129 @@ enum lan966x_target {
#define DEV_PCS1G_STICKY_LINK_DOWN_STICKY_GET(x)\
FIELD_GET(DEV_PCS1G_STICKY_LINK_DOWN_STICKY, x)
+/* FDMA:FDMA:FDMA_CH_ACTIVATE */
+#define FDMA_CH_ACTIVATE __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 0, 0, 1, 4)
+
+#define FDMA_CH_ACTIVATE_CH_ACTIVATE GENMASK(7, 0)
+#define FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(x)\
+ FIELD_PREP(FDMA_CH_ACTIVATE_CH_ACTIVATE, x)
+#define FDMA_CH_ACTIVATE_CH_ACTIVATE_GET(x)\
+ FIELD_GET(FDMA_CH_ACTIVATE_CH_ACTIVATE, x)
+
+/* FDMA:FDMA:FDMA_CH_RELOAD */
+#define FDMA_CH_RELOAD __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 4, 0, 1, 4)
+
+#define FDMA_CH_RELOAD_CH_RELOAD GENMASK(7, 0)
+#define FDMA_CH_RELOAD_CH_RELOAD_SET(x)\
+ FIELD_PREP(FDMA_CH_RELOAD_CH_RELOAD, x)
+#define FDMA_CH_RELOAD_CH_RELOAD_GET(x)\
+ FIELD_GET(FDMA_CH_RELOAD_CH_RELOAD, x)
+
+/* FDMA:FDMA:FDMA_CH_DISABLE */
+#define FDMA_CH_DISABLE __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 8, 0, 1, 4)
+
+#define FDMA_CH_DISABLE_CH_DISABLE GENMASK(7, 0)
+#define FDMA_CH_DISABLE_CH_DISABLE_SET(x)\
+ FIELD_PREP(FDMA_CH_DISABLE_CH_DISABLE, x)
+#define FDMA_CH_DISABLE_CH_DISABLE_GET(x)\
+ FIELD_GET(FDMA_CH_DISABLE_CH_DISABLE, x)
+
+/* FDMA:FDMA:FDMA_CH_DB_DISCARD */
+#define FDMA_CH_DB_DISCARD __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 16, 0, 1, 4)
+
+#define FDMA_CH_DB_DISCARD_DB_DISCARD GENMASK(7, 0)
+#define FDMA_CH_DB_DISCARD_DB_DISCARD_SET(x)\
+ FIELD_PREP(FDMA_CH_DB_DISCARD_DB_DISCARD, x)
+#define FDMA_CH_DB_DISCARD_DB_DISCARD_GET(x)\
+ FIELD_GET(FDMA_CH_DB_DISCARD_DB_DISCARD, x)
+
+/* FDMA:FDMA:FDMA_DCB_LLP */
+#define FDMA_DCB_LLP(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 52, r, 8, 4)
+
+/* FDMA:FDMA:FDMA_DCB_LLP1 */
+#define FDMA_DCB_LLP1(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 84, r, 8, 4)
+
+/* FDMA:FDMA:FDMA_CH_ACTIVE */
+#define FDMA_CH_ACTIVE __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 180, 0, 1, 4)
+
+/* FDMA:FDMA:FDMA_CH_CFG */
+#define FDMA_CH_CFG(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 224, r, 8, 4)
+
+#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY BIT(4)
+#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(x)\
+ FIELD_PREP(FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY, x)
+#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_GET(x)\
+ FIELD_GET(FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY, x)
+
+#define FDMA_CH_CFG_CH_INJ_PORT BIT(3)
+#define FDMA_CH_CFG_CH_INJ_PORT_SET(x)\
+ FIELD_PREP(FDMA_CH_CFG_CH_INJ_PORT, x)
+#define FDMA_CH_CFG_CH_INJ_PORT_GET(x)\
+ FIELD_GET(FDMA_CH_CFG_CH_INJ_PORT, x)
+
+#define FDMA_CH_CFG_CH_DCB_DB_CNT GENMASK(2, 1)
+#define FDMA_CH_CFG_CH_DCB_DB_CNT_SET(x)\
+ FIELD_PREP(FDMA_CH_CFG_CH_DCB_DB_CNT, x)
+#define FDMA_CH_CFG_CH_DCB_DB_CNT_GET(x)\
+ FIELD_GET(FDMA_CH_CFG_CH_DCB_DB_CNT, x)
+
+#define FDMA_CH_CFG_CH_MEM BIT(0)
+#define FDMA_CH_CFG_CH_MEM_SET(x)\
+ FIELD_PREP(FDMA_CH_CFG_CH_MEM, x)
+#define FDMA_CH_CFG_CH_MEM_GET(x)\
+ FIELD_GET(FDMA_CH_CFG_CH_MEM, x)
+
+/* FDMA:FDMA:FDMA_PORT_CTRL */
+#define FDMA_PORT_CTRL(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 376, r, 2, 4)
+
+#define FDMA_PORT_CTRL_INJ_STOP BIT(4)
+#define FDMA_PORT_CTRL_INJ_STOP_SET(x)\
+ FIELD_PREP(FDMA_PORT_CTRL_INJ_STOP, x)
+#define FDMA_PORT_CTRL_INJ_STOP_GET(x)\
+ FIELD_GET(FDMA_PORT_CTRL_INJ_STOP, x)
+
+#define FDMA_PORT_CTRL_XTR_STOP BIT(2)
+#define FDMA_PORT_CTRL_XTR_STOP_SET(x)\
+ FIELD_PREP(FDMA_PORT_CTRL_XTR_STOP, x)
+#define FDMA_PORT_CTRL_XTR_STOP_GET(x)\
+ FIELD_GET(FDMA_PORT_CTRL_XTR_STOP, x)
+
+/* FDMA:FDMA:FDMA_INTR_DB */
+#define FDMA_INTR_DB __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 392, 0, 1, 4)
+
+/* FDMA:FDMA:FDMA_INTR_DB_ENA */
+#define FDMA_INTR_DB_ENA __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 396, 0, 1, 4)
+
+#define FDMA_INTR_DB_ENA_INTR_DB_ENA GENMASK(7, 0)
+#define FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(x)\
+ FIELD_PREP(FDMA_INTR_DB_ENA_INTR_DB_ENA, x)
+#define FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(x)\
+ FIELD_GET(FDMA_INTR_DB_ENA_INTR_DB_ENA, x)
+
+/* FDMA:FDMA:FDMA_INTR_ERR */
+#define FDMA_INTR_ERR __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 400, 0, 1, 4)
+
+/* FDMA:FDMA:FDMA_ERRORS */
+#define FDMA_ERRORS __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 412, 0, 1, 4)
+
+/* PTP:PTP_CFG:PTP_PIN_INTR */
+#define PTP_PIN_INTR __REG(TARGET_PTP, 0, 1, 512, 0, 1, 16, 0, 0, 1, 4)
+
+#define PTP_PIN_INTR_INTR_PTP GENMASK(7, 0)
+#define PTP_PIN_INTR_INTR_PTP_SET(x)\
+ FIELD_PREP(PTP_PIN_INTR_INTR_PTP, x)
+#define PTP_PIN_INTR_INTR_PTP_GET(x)\
+ FIELD_GET(PTP_PIN_INTR_INTR_PTP, x)
+
+/* PTP:PTP_CFG:PTP_PIN_INTR_ENA */
+#define PTP_PIN_INTR_ENA __REG(TARGET_PTP, 0, 1, 512, 0, 1, 16, 4, 0, 1, 4)
+
+#define PTP_PIN_INTR_ENA_INTR_ENA GENMASK(7, 0)
+#define PTP_PIN_INTR_ENA_INTR_ENA_SET(x)\
+ FIELD_PREP(PTP_PIN_INTR_ENA_INTR_ENA, x)
+#define PTP_PIN_INTR_ENA_INTR_ENA_GET(x)\
+ FIELD_GET(PTP_PIN_INTR_ENA_INTR_ENA, x)
+
/* PTP:PTP_CFG:PTP_DOM_CFG */
#define PTP_DOM_CFG __REG(TARGET_PTP, 0, 1, 512, 0, 1, 16, 12, 0, 1, 4)
@@ -611,6 +735,12 @@ enum lan966x_target {
#define PTP_PIN_CFG_PIN_SYNC_GET(x)\
FIELD_GET(PTP_PIN_CFG_PIN_SYNC, x)
+#define PTP_PIN_CFG_PIN_SELECT GENMASK(23, 21)
+#define PTP_PIN_CFG_PIN_SELECT_SET(x)\
+ FIELD_PREP(PTP_PIN_CFG_PIN_SELECT, x)
+#define PTP_PIN_CFG_PIN_SELECT_GET(x)\
+ FIELD_GET(PTP_PIN_CFG_PIN_SELECT, x)
+
#define PTP_PIN_CFG_PIN_DOM GENMASK(17, 16)
#define PTP_PIN_CFG_PIN_DOM_SET(x)\
FIELD_PREP(PTP_PIN_CFG_PIN_DOM, x)
@@ -638,6 +768,22 @@ enum lan966x_target {
#define PTP_TOD_NSEC_TOD_NSEC_GET(x)\
FIELD_GET(PTP_TOD_NSEC_TOD_NSEC, x)
+/* PTP:PTP_PINS:WF_HIGH_PERIOD */
+#define PTP_WF_HIGH_PERIOD(g) __REG(TARGET_PTP,\
+ 0, 1, 0, g, 8, 64, 24, 0, 1, 4)
+
+#define PTP_WF_HIGH_PERIOD_PIN_WFH(x) ((x) & GENMASK(29, 0))
+#define PTP_WF_HIGH_PERIOD_PIN_WFH_M GENMASK(29, 0)
+#define PTP_WF_HIGH_PERIOD_PIN_WFH_X(x) ((x) & GENMASK(29, 0))
+
+/* PTP:PTP_PINS:WF_LOW_PERIOD */
+#define PTP_WF_LOW_PERIOD(g) __REG(TARGET_PTP,\
+ 0, 1, 0, g, 8, 64, 28, 0, 1, 4)
+
+#define PTP_WF_LOW_PERIOD_PIN_WFL(x) ((x) & GENMASK(29, 0))
+#define PTP_WF_LOW_PERIOD_PIN_WFL_M GENMASK(29, 0)
+#define PTP_WF_LOW_PERIOD_PIN_WFL_X(x) ((x) & GENMASK(29, 0))
+
/* PTP:PTP_TS_FIFO:PTP_TWOSTEP_CTRL */
#define PTP_TWOSTEP_CTRL __REG(TARGET_PTP, 0, 1, 612, 0, 1, 12, 0, 0, 1, 4)
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
index e3555c94294d..df2bee678559 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
@@ -322,8 +322,7 @@ static int lan966x_port_prechangeupper(struct net_device *dev,
if (netif_is_bridge_master(info->upper_dev) && !info->linking)
switchdev_bridge_port_unoffload(port->dev, port,
- &lan966x_switchdev_nb,
- &lan966x_switchdev_blocking_nb);
+ NULL, NULL);
return NOTIFY_DONE;
}
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index e443bd8b2d09..ee9c607d62a7 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -2859,6 +2859,8 @@ static void ocelot_port_set_mcast_flood(struct ocelot *ocelot, int port,
val = BIT(port);
ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MC);
+ ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MCIPV4);
+ ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MCIPV6);
}
static void ocelot_port_set_bcast_flood(struct ocelot *ocelot, int port,
diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.c b/drivers/net/ethernet/mscc/ocelot_vcap.c
index c8701ac955a8..1e74bdb215ec 100644
--- a/drivers/net/ethernet/mscc/ocelot_vcap.c
+++ b/drivers/net/ethernet/mscc/ocelot_vcap.c
@@ -672,12 +672,10 @@ static void is1_entry_set(struct ocelot *ocelot, int ix,
{
const struct vcap_props *vcap = &ocelot->vcap[VCAP_IS1];
struct ocelot_vcap_key_vlan *tag = &filter->vlan;
- struct ocelot_vcap_u64 payload;
struct vcap_data data;
int row = ix / 2;
u32 type;
- memset(&payload, 0, sizeof(payload));
memset(&data, 0, sizeof(data));
/* Read row */
@@ -813,11 +811,9 @@ static void es0_entry_set(struct ocelot *ocelot, int ix,
{
const struct vcap_props *vcap = &ocelot->vcap[VCAP_ES0];
struct ocelot_vcap_key_vlan *tag = &filter->vlan;
- struct ocelot_vcap_u64 payload;
struct vcap_data data;
int row = ix;
- memset(&payload, 0, sizeof(payload));
memset(&data, 0, sizeof(data));
/* Read row */
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 21d2645885ce..fe5e77330f5f 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -579,7 +579,7 @@ static int myri10ge_load_hotplug_firmware(struct myri10ge_priv *mgp, u32 * size)
int status;
unsigned i;
- if ((status = request_firmware(&fw, mgp->fw_name, dev)) < 0) {
+ if (request_firmware(&fw, mgp->fw_name, dev) < 0) {
dev_err(dev, "Unable to load %s firmware image via hotplug\n",
mgp->fw_name);
status = -EINVAL;
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 82a22711ce45..50bca486a244 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -989,8 +989,6 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need
a delay. Note that pre-2.0.34 kernels had a cache-alignment bug that
made udelay() unreliable.
- The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is
- deprecated.
*/
#define eeprom_delay(ee_addr) readl(ee_addr)
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/tls.c b/drivers/net/ethernet/netronome/nfp/crypto/tls.c
index 84d66d138c3d..78368e71ce83 100644
--- a/drivers/net/ethernet/netronome/nfp/crypto/tls.c
+++ b/drivers/net/ethernet/netronome/nfp/crypto/tls.c
@@ -289,7 +289,7 @@ nfp_net_tls_add(struct net_device *netdev, struct sock *sk,
switch (sk->sk_family) {
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
- if (sk->sk_ipv6only ||
+ if (ipv6_only_sock(sk) ||
ipv6_addr_type(&sk->sk_v6_daddr) != IPV6_ADDR_MAPPED) {
req_sz = sizeof(struct nfp_crypto_req_add_v6);
ipv6 = true;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c
index 3fdaaf8ed2ba..4627715a5e32 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c
@@ -95,15 +95,17 @@ int nfp_app_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
__be16 vlan_proto)
{
struct nfp_app *app = nfp_app_from_netdev(netdev);
+ u16 update = NFP_NET_VF_CFG_MB_UPD_VLAN;
+ bool is_proto_sup = true;
unsigned int vf_offset;
- u16 vlan_tci;
+ u32 vlan_tag;
int err;
err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN, "vlan");
if (err)
return err;
- if (vlan_proto != htons(ETH_P_8021Q))
+ if (!eth_type_vlan(vlan_proto))
return -EOPNOTSUPP;
if (vlan > 4095 || qos > 7) {
@@ -112,14 +114,32 @@ int nfp_app_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
return -EINVAL;
}
+ /* Check if fw supports or not */
+ err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO, "vlan_proto");
+ if (err)
+ is_proto_sup = false;
+
+ if (vlan_proto != htons(ETH_P_8021Q)) {
+ if (!is_proto_sup)
+ return -EOPNOTSUPP;
+ update |= NFP_NET_VF_CFG_MB_UPD_VLAN_PROTO;
+ }
+
/* Write VLAN tag to VF entry in VF config symbol */
- vlan_tci = FIELD_PREP(NFP_NET_VF_CFG_VLAN_VID, vlan) |
+ vlan_tag = FIELD_PREP(NFP_NET_VF_CFG_VLAN_VID, vlan) |
FIELD_PREP(NFP_NET_VF_CFG_VLAN_QOS, qos);
+
+ /* vlan_tag of 0 means that the configuration should be cleared and in
+ * such circumstances setting the TPID has no meaning when
+ * configuring firmware.
+ */
+ if (vlan_tag && is_proto_sup)
+ vlan_tag |= FIELD_PREP(NFP_NET_VF_CFG_VLAN_PROT, ntohs(vlan_proto));
+
vf_offset = NFP_NET_VF_CFG_MB_SZ + vf * NFP_NET_VF_CFG_SZ;
- writew(vlan_tci, app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_VLAN);
+ writel(vlan_tag, app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_VLAN);
- return nfp_net_sriov_update(app, vf, NFP_NET_VF_CFG_MB_UPD_VLAN,
- "vlan");
+ return nfp_net_sriov_update(app, vf, update, "vlan");
}
int nfp_app_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
@@ -209,7 +229,7 @@ int nfp_app_get_vf_config(struct net_device *netdev, int vf,
{
struct nfp_app *app = nfp_app_from_netdev(netdev);
unsigned int vf_offset;
- u16 vlan_tci;
+ u32 vlan_tag;
u32 mac_hi;
u16 mac_lo;
u8 flags;
@@ -225,7 +245,7 @@ int nfp_app_get_vf_config(struct net_device *netdev, int vf,
mac_lo = readw(app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_MAC_LO);
flags = readb(app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_CTRL);
- vlan_tci = readw(app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_VLAN);
+ vlan_tag = readl(app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_VLAN);
memset(ivi, 0, sizeof(*ivi));
ivi->vf = vf;
@@ -233,9 +253,10 @@ int nfp_app_get_vf_config(struct net_device *netdev, int vf,
put_unaligned_be32(mac_hi, &ivi->mac[0]);
put_unaligned_be16(mac_lo, &ivi->mac[4]);
- ivi->vlan = FIELD_GET(NFP_NET_VF_CFG_VLAN_VID, vlan_tci);
- ivi->qos = FIELD_GET(NFP_NET_VF_CFG_VLAN_QOS, vlan_tci);
-
+ ivi->vlan = FIELD_GET(NFP_NET_VF_CFG_VLAN_VID, vlan_tag);
+ ivi->qos = FIELD_GET(NFP_NET_VF_CFG_VLAN_QOS, vlan_tag);
+ if (!nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO, "vlan_proto"))
+ ivi->vlan_proto = htons(FIELD_GET(NFP_NET_VF_CFG_VLAN_PROT, vlan_tag));
ivi->spoofchk = FIELD_GET(NFP_NET_VF_CFG_CTRL_SPOOF, flags);
ivi->trusted = FIELD_GET(NFP_NET_VF_CFG_CTRL_TRUST, flags);
ivi->linkstate = FIELD_GET(NFP_NET_VF_CFG_CTRL_LINK_STATE, flags);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h
index 786be58a907e..7b72cc083476 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h
@@ -19,6 +19,7 @@
#define NFP_NET_VF_CFG_MB_CAP_SPOOF (0x1 << 2)
#define NFP_NET_VF_CFG_MB_CAP_LINK_STATE (0x1 << 3)
#define NFP_NET_VF_CFG_MB_CAP_TRUST (0x1 << 4)
+#define NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO (0x1 << 5)
#define NFP_NET_VF_CFG_MB_RET 0x2
#define NFP_NET_VF_CFG_MB_UPD 0x4
#define NFP_NET_VF_CFG_MB_UPD_MAC (0x1 << 0)
@@ -26,6 +27,7 @@
#define NFP_NET_VF_CFG_MB_UPD_SPOOF (0x1 << 2)
#define NFP_NET_VF_CFG_MB_UPD_LINK_STATE (0x1 << 3)
#define NFP_NET_VF_CFG_MB_UPD_TRUST (0x1 << 4)
+#define NFP_NET_VF_CFG_MB_UPD_VLAN_PROTO (0x1 << 5)
#define NFP_NET_VF_CFG_MB_VF_NUM 0x7
/* VF config entry
@@ -43,6 +45,7 @@
#define NFP_NET_VF_CFG_LS_MODE_ENABLE 1
#define NFP_NET_VF_CFG_LS_MODE_DISABLE 2
#define NFP_NET_VF_CFG_VLAN 0x8
+#define NFP_NET_VF_CFG_VLAN_PROT 0xffff0000
#define NFP_NET_VF_CFG_VLAN_QOS 0xe000
#define NFP_NET_VF_CFG_VLAN_VID 0x0fff
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
index 3d379e937184..ddb34bfb9bef 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
@@ -13,22 +13,36 @@
#include <linux/ctype.h>
#include <linux/types.h>
#include <linux/sizes.h>
+#include <linux/stringify.h>
#ifndef NFP_SUBSYS
#define NFP_SUBSYS "nfp"
#endif
-#define nfp_err(cpp, fmt, args...) \
+#define string_format(x) __FILE__ ":" __stringify(__LINE__) ": " x
+
+#define __nfp_err(cpp, fmt, args...) \
dev_err(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args)
-#define nfp_warn(cpp, fmt, args...) \
+#define __nfp_warn(cpp, fmt, args...) \
dev_warn(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args)
-#define nfp_info(cpp, fmt, args...) \
+#define __nfp_info(cpp, fmt, args...) \
dev_info(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args)
-#define nfp_dbg(cpp, fmt, args...) \
+#define __nfp_dbg(cpp, fmt, args...) \
dev_dbg(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args)
+#define __nfp_printk(level, cpp, fmt, args...) \
+ dev_printk(level, nfp_cpp_device(cpp)->parent, \
+ NFP_SUBSYS ": " fmt, ## args)
+
+#define nfp_err(cpp, fmt, args...) \
+ __nfp_err(cpp, string_format(fmt), ## args)
+#define nfp_warn(cpp, fmt, args...) \
+ __nfp_warn(cpp, string_format(fmt), ## args)
+#define nfp_info(cpp, fmt, args...) \
+ __nfp_info(cpp, string_format(fmt), ## args)
+#define nfp_dbg(cpp, fmt, args...) \
+ __nfp_dbg(cpp, string_format(fmt), ## args)
#define nfp_printk(level, cpp, fmt, args...) \
- dev_printk(level, nfp_cpp_device(cpp)->parent, \
- NFP_SUBSYS ": " fmt, ## args)
+ __nfp_printk(level, cpp, string_format(fmt), ## args)
#define PCI_64BIT_BAR_COUNT 3
diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile
index 0d9c2fe0245d..3d2098f21bb7 100644
--- a/drivers/net/ethernet/qlogic/qed/Makefile
+++ b/drivers/net/ethernet/qlogic/qed/Makefile
@@ -30,8 +30,7 @@ qed-$(CONFIG_QED_OOO) += qed_ooo.o
qed-$(CONFIG_QED_NVMETCP) += \
qed_nvmetcp.o \
- qed_nvmetcp_fw_funcs.o \
- qed_nvmetcp_ip_services.o
+ qed_nvmetcp_fw_funcs.o
qed-$(CONFIG_QED_RDMA) += \
qed_iwarp.o \
diff --git a/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_ip_services.c b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_ip_services.c
deleted file mode 100644
index 7e286cddbedb..000000000000
--- a/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_ip_services.c
+++ /dev/null
@@ -1,238 +0,0 @@
-// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
-/*
- * Copyright 2021 Marvell. All rights reserved.
- */
-
-#include <linux/types.h>
-#include <asm/byteorder.h>
-#include <asm/param.h>
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/etherdevice.h>
-#include <linux/kernel.h>
-#include <linux/stddef.h>
-#include <linux/errno.h>
-
-#include <net/tcp.h>
-
-#include <linux/qed/qed_nvmetcp_ip_services_if.h>
-
-#define QED_IP_RESOL_TIMEOUT 4
-
-int qed_route_ipv4(struct sockaddr_storage *local_addr,
- struct sockaddr_storage *remote_addr,
- struct sockaddr *hardware_address,
- struct net_device **ndev)
-{
- struct neighbour *neigh = NULL;
- __be32 *loc_ip, *rem_ip;
- struct rtable *rt;
- int rc = -ENXIO;
- int retry;
-
- loc_ip = &((struct sockaddr_in *)local_addr)->sin_addr.s_addr;
- rem_ip = &((struct sockaddr_in *)remote_addr)->sin_addr.s_addr;
- *ndev = NULL;
- rt = ip_route_output(&init_net, *rem_ip, *loc_ip, 0/*tos*/, 0/*oif*/);
- if (IS_ERR(rt)) {
- pr_err("lookup route failed\n");
- rc = PTR_ERR(rt);
- goto return_err;
- }
-
- neigh = dst_neigh_lookup(&rt->dst, rem_ip);
- if (!neigh) {
- rc = -ENOMEM;
- ip_rt_put(rt);
- goto return_err;
- }
-
- *ndev = rt->dst.dev;
- ip_rt_put(rt);
-
- /* If not resolved, kick-off state machine towards resolution */
- if (!(neigh->nud_state & NUD_VALID))
- neigh_event_send(neigh, NULL);
-
- /* query neighbor until resolved or timeout */
- retry = QED_IP_RESOL_TIMEOUT;
- while (!(neigh->nud_state & NUD_VALID) && retry > 0) {
- msleep(1000);
- retry--;
- }
-
- if (neigh->nud_state & NUD_VALID) {
- /* copy resolved MAC address */
- neigh_ha_snapshot(hardware_address->sa_data, neigh, *ndev);
- hardware_address->sa_family = (*ndev)->type;
- rc = 0;
- }
-
- neigh_release(neigh);
- if (!(*loc_ip)) {
- *loc_ip = inet_select_addr(*ndev, *rem_ip, RT_SCOPE_UNIVERSE);
- local_addr->ss_family = AF_INET;
- }
-
-return_err:
-
- return rc;
-}
-EXPORT_SYMBOL(qed_route_ipv4);
-
-int qed_route_ipv6(struct sockaddr_storage *local_addr,
- struct sockaddr_storage *remote_addr,
- struct sockaddr *hardware_address,
- struct net_device **ndev)
-{
- struct neighbour *neigh = NULL;
- struct dst_entry *dst;
- struct flowi6 fl6;
- int rc = -ENXIO;
- int retry;
-
- memset(&fl6, 0, sizeof(fl6));
- fl6.saddr = ((struct sockaddr_in6 *)local_addr)->sin6_addr;
- fl6.daddr = ((struct sockaddr_in6 *)remote_addr)->sin6_addr;
- dst = ip6_route_output(&init_net, NULL, &fl6);
- if (!dst || dst->error) {
- if (dst) {
- dst_release(dst);
- pr_err("lookup route failed %d\n", dst->error);
- }
-
- goto out;
- }
-
- neigh = dst_neigh_lookup(dst, &fl6.daddr);
- if (neigh) {
- *ndev = ip6_dst_idev(dst)->dev;
-
- /* If not resolved, kick-off state machine towards resolution */
- if (!(neigh->nud_state & NUD_VALID))
- neigh_event_send(neigh, NULL);
-
- /* query neighbor until resolved or timeout */
- retry = QED_IP_RESOL_TIMEOUT;
- while (!(neigh->nud_state & NUD_VALID) && retry > 0) {
- msleep(1000);
- retry--;
- }
-
- if (neigh->nud_state & NUD_VALID) {
- neigh_ha_snapshot((u8 *)hardware_address->sa_data,
- neigh, *ndev);
- hardware_address->sa_family = (*ndev)->type;
- rc = 0;
- }
-
- neigh_release(neigh);
-
- if (ipv6_addr_any(&fl6.saddr)) {
- if (ipv6_dev_get_saddr(dev_net(*ndev), *ndev,
- &fl6.daddr, 0, &fl6.saddr)) {
- pr_err("Unable to find source IP address\n");
- goto out;
- }
-
- local_addr->ss_family = AF_INET6;
- ((struct sockaddr_in6 *)local_addr)->sin6_addr =
- fl6.saddr;
- }
- }
-
- dst_release(dst);
-
-out:
-
- return rc;
-}
-EXPORT_SYMBOL(qed_route_ipv6);
-
-void qed_vlan_get_ndev(struct net_device **ndev, u16 *vlan_id)
-{
- if (is_vlan_dev(*ndev)) {
- *vlan_id = vlan_dev_vlan_id(*ndev);
- *ndev = vlan_dev_real_dev(*ndev);
- }
-}
-EXPORT_SYMBOL(qed_vlan_get_ndev);
-
-struct pci_dev *qed_validate_ndev(struct net_device *ndev)
-{
- struct net_device *upper;
- struct pci_dev *pdev;
-
- for_each_pci_dev(pdev) {
- if (pdev->driver &&
- !strcmp(pdev->driver->name, "qede")) {
- upper = pci_get_drvdata(pdev);
- if (upper->ifindex == ndev->ifindex)
- return pdev;
- }
- }
-
- return NULL;
-}
-EXPORT_SYMBOL(qed_validate_ndev);
-
-__be16 qed_get_in_port(struct sockaddr_storage *sa)
-{
- return sa->ss_family == AF_INET
- ? ((struct sockaddr_in *)sa)->sin_port
- : ((struct sockaddr_in6 *)sa)->sin6_port;
-}
-EXPORT_SYMBOL(qed_get_in_port);
-
-int qed_fetch_tcp_port(struct sockaddr_storage local_ip_addr,
- struct socket **sock, u16 *port)
-{
- struct sockaddr_storage sa;
- int rc = 0;
-
- rc = sock_create(local_ip_addr.ss_family, SOCK_STREAM, IPPROTO_TCP,
- sock);
- if (rc) {
- pr_warn("failed to create socket: %d\n", rc);
- goto err;
- }
-
- (*sock)->sk->sk_allocation = GFP_KERNEL;
- sk_set_memalloc((*sock)->sk);
-
- rc = kernel_bind(*sock, (struct sockaddr *)&local_ip_addr,
- sizeof(local_ip_addr));
-
- if (rc) {
- pr_warn("failed to bind socket: %d\n", rc);
- goto err_sock;
- }
-
- rc = kernel_getsockname(*sock, (struct sockaddr *)&sa);
- if (rc < 0) {
- pr_warn("getsockname() failed: %d\n", rc);
- goto err_sock;
- }
-
- *port = ntohs(qed_get_in_port(&sa));
-
- return 0;
-
-err_sock:
- sock_release(*sock);
- sock = NULL;
-err:
-
- return rc;
-}
-EXPORT_SYMBOL(qed_fetch_tcp_port);
-
-void qed_return_tcp_port(struct socket *sock)
-{
- if (sock && sock->sk) {
- tcp_set_state(sock->sk, TCP_CLOSE);
- sock_release(sock);
- }
-}
-EXPORT_SYMBOL(qed_return_tcp_port);
diff --git a/drivers/net/ethernet/realtek/atp.h b/drivers/net/ethernet/realtek/atp.h
index 63f0d2d0e87b..b202184eddd4 100644
--- a/drivers/net/ethernet/realtek/atp.h
+++ b/drivers/net/ethernet/realtek/atp.h
@@ -255,10 +255,6 @@ static inline void write_word_mode0(short ioaddr, unsigned short value)
#define EE_DATA_WRITE 0x01 /* EEPROM chip data in. */
#define EE_DATA_READ 0x08 /* EEPROM chip data out. */
-/* Delay between EEPROM clock transitions. */
-#define eeprom_delay(ticks) \
-do { int _i = 40; while (--_i > 0) { __SLOW_DOWN_IO; } } while (0)
-
/* The EEPROM commands include the alway-set leading bit. */
#define EE_WRITE_CMD(offset) (((5 << 6) + (offset)) << 17)
#define EE_READ(offset) (((6 << 6) + (offset)) << 17)
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 50d535981a35..c9ee5011803f 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -2256,7 +2256,7 @@ int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
* guaranteed to satisfy the second as we only attempt TSO if
* inner_network_header <= 208.
*/
- ip_tot_len = -EFX_TSO2_MAX_HDRLEN;
+ ip_tot_len = 0x10000 - EFX_TSO2_MAX_HDRLEN;
EFX_WARN_ON_ONCE_PARANOID(mss + EFX_TSO2_MAX_HDRLEN +
(tcp->doff << 2u) > ip_tot_len);
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index daf0c00c1242..c05a83da9e44 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -28,7 +28,6 @@ static inline netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct
ef100_enqueue_skb, __efx_enqueue_skb,
tx_queue, skb);
}
-void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
void efx_xmit_done_single(struct efx_tx_queue *tx_queue);
int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
void *type_data);
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
index 377df8b7f015..eec80b024195 100644
--- a/drivers/net/ethernet/sfc/efx_channels.c
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@ -51,28 +51,7 @@ MODULE_PARM_DESC(irq_adapt_high_thresh,
*/
static int napi_weight = 64;
-/***************
- * Housekeeping
- ***************/
-
-int efx_channel_dummy_op_int(struct efx_channel *channel)
-{
- return 0;
-}
-
-void efx_channel_dummy_op_void(struct efx_channel *channel)
-{
-}
-
-static const struct efx_channel_type efx_default_channel_type = {
- .pre_probe = efx_channel_dummy_op_int,
- .post_remove = efx_channel_dummy_op_void,
- .get_name = efx_get_channel_name,
- .copy = efx_copy_channel,
- .want_txqs = efx_default_channel_want_txqs,
- .keep_eventq = false,
- .want_pio = true,
-};
+static const struct efx_channel_type efx_default_channel_type;
/*************
* INTERRUPTS
@@ -619,6 +598,7 @@ void efx_fini_channels(struct efx_nic *efx)
/* Allocate and initialise a channel structure, copying parameters
* (but not resources) from an old channel structure.
*/
+static
struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel)
{
struct efx_rx_queue *rx_queue;
@@ -696,7 +676,8 @@ fail:
return rc;
}
-void efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
+static void efx_get_channel_name(struct efx_channel *channel, char *buf,
+ size_t len)
{
struct efx_nic *efx = channel->efx;
const char *type;
@@ -1004,7 +985,7 @@ int efx_set_channels(struct efx_nic *efx)
return netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
}
-bool efx_default_channel_want_txqs(struct efx_channel *channel)
+static bool efx_default_channel_want_txqs(struct efx_channel *channel)
{
return channel->channel - channel->efx->tx_channel_offset <
channel->efx->n_tx_channels;
@@ -1362,3 +1343,26 @@ void efx_fini_napi(struct efx_nic *efx)
efx_for_each_channel(channel, efx)
efx_fini_napi_channel(channel);
}
+
+/***************
+ * Housekeeping
+ ***************/
+
+static int efx_channel_dummy_op_int(struct efx_channel *channel)
+{
+ return 0;
+}
+
+void efx_channel_dummy_op_void(struct efx_channel *channel)
+{
+}
+
+static const struct efx_channel_type efx_default_channel_type = {
+ .pre_probe = efx_channel_dummy_op_int,
+ .post_remove = efx_channel_dummy_op_void,
+ .get_name = efx_get_channel_name,
+ .copy = efx_copy_channel,
+ .want_txqs = efx_default_channel_want_txqs,
+ .keep_eventq = false,
+ .want_pio = true,
+};
diff --git a/drivers/net/ethernet/sfc/efx_channels.h b/drivers/net/ethernet/sfc/efx_channels.h
index d77ec1f77fb1..64abb99a56b8 100644
--- a/drivers/net/ethernet/sfc/efx_channels.h
+++ b/drivers/net/ethernet/sfc/efx_channels.h
@@ -32,16 +32,13 @@ void efx_fini_eventq(struct efx_channel *channel);
void efx_remove_eventq(struct efx_channel *channel);
int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
-void efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len);
void efx_set_channel_names(struct efx_nic *efx);
int efx_init_channels(struct efx_nic *efx);
int efx_probe_channels(struct efx_nic *efx);
int efx_set_channels(struct efx_nic *efx);
-bool efx_default_channel_want_txqs(struct efx_channel *channel);
void efx_remove_channel(struct efx_channel *channel);
void efx_remove_channels(struct efx_nic *efx);
void efx_fini_channels(struct efx_nic *efx);
-struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel);
void efx_start_channels(struct efx_nic *efx);
void efx_stop_channels(struct efx_nic *efx);
@@ -50,7 +47,6 @@ void efx_init_napi(struct efx_nic *efx);
void efx_fini_napi_channel(struct efx_channel *channel);
void efx_fini_napi(struct efx_nic *efx);
-int efx_channel_dummy_op_int(struct efx_channel *channel);
void efx_channel_dummy_op_void(struct efx_channel *channel);
#endif
diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c
index af37c990217e..f6577e74d6e6 100644
--- a/drivers/net/ethernet/sfc/efx_common.c
+++ b/drivers/net/ethernet/sfc/efx_common.c
@@ -51,8 +51,8 @@ static unsigned int efx_monitor_interval = 1 * HZ;
/* Default stats update time */
#define STATS_PERIOD_MS_DEFAULT 1000
-const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
-const char *const efx_reset_type_names[] = {
+static const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
+static const char *const efx_reset_type_names[] = {
[RESET_TYPE_INVISIBLE] = "INVISIBLE",
[RESET_TYPE_ALL] = "ALL",
[RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL",
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index 148dcd48b58d..9599123bc28d 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -16,6 +16,7 @@
#include "bitfield.h"
#include "efx.h"
#include "rx_common.h"
+#include "tx_common.h"
#include "nic.h"
#include "farch_regs.h"
#include "sriov.h"
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index d3fcbf930dba..ff617b1b38d3 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -73,8 +73,8 @@
* \------------------------------ Resync (always set)
*
* The client writes it's request into MC shared memory, and rings the
- * doorbell. Each request is completed by either by the MC writting
- * back into shared memory, or by writting out an event.
+ * doorbell. Each request is completed by either by the MC writing
+ * back into shared memory, or by writing out an event.
*
* All MCDI commands support completion by shared memory response. Each
* request may also contain additional data (accounted for by HEADER.LEN),
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index c75dc75e2857..318db906a154 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -612,11 +612,6 @@ extern const unsigned int efx_loopback_mode_max;
#define LOOPBACK_MODE(efx) \
STRING_TABLE_LOOKUP((efx)->loopback_mode, efx_loopback_mode)
-extern const char *const efx_reset_type_names[];
-extern const unsigned int efx_reset_type_max;
-#define RESET_TYPE(type) \
- STRING_TABLE_LOOKUP(type, efx_reset_type)
-
enum efx_int_mode {
/* Be careful if altering to correct macro below */
EFX_INT_MODE_MSIX = 0,
diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
index cd478d2cd871..00f6d347eaf7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
@@ -57,10 +57,6 @@
#define TSE_PCS_USE_SGMII_ENA BIT(0)
#define TSE_PCS_IF_USE_SGMII 0x03
-#define SGMII_ADAPTER_CTRL_REG 0x00
-#define SGMII_ADAPTER_DISABLE 0x0001
-#define SGMII_ADAPTER_ENABLE 0x0000
-
#define AUTONEGO_LINK_TIMER 20
static int tse_pcs_reset(void __iomem *base, struct tse_pcs *pcs)
@@ -202,12 +198,8 @@ void tse_pcs_fix_mac_speed(struct tse_pcs *pcs, struct phy_device *phy_dev,
unsigned int speed)
{
void __iomem *tse_pcs_base = pcs->tse_pcs_base;
- void __iomem *sgmii_adapter_base = pcs->sgmii_adapter_base;
u32 val;
- writew(SGMII_ADAPTER_ENABLE,
- sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
-
pcs->autoneg = phy_dev->autoneg;
if (phy_dev->autoneg == AUTONEG_ENABLE) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h
index 442812c0a4bd..694ac25ef426 100644
--- a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h
@@ -10,6 +10,10 @@
#include <linux/phy.h>
#include <linux/timer.h>
+#define SGMII_ADAPTER_CTRL_REG 0x00
+#define SGMII_ADAPTER_ENABLE 0x0000
+#define SGMII_ADAPTER_DISABLE 0x0001
+
struct tse_pcs {
struct device *dev;
void __iomem *tse_pcs_base;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
index 84651207a1de..bd52fb7cf486 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
@@ -197,9 +197,9 @@ imx_dwmac_parse_dt(struct imx_priv_data *dwmac, struct device *dev)
}
if (of_machine_is_compatible("fsl,imx8mp")) {
- /* Binding doc describes the propety:
+ /* Binding doc describes the property:
is required by i.MX8MP.
- is optinoal for i.MX8DXL.
+ is optional for i.MX8DXL.
*/
dwmac->intf_regmap = syscon_regmap_lookup_by_phandle(np, "intf_mode");
if (IS_ERR(dwmac->intf_regmap))
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index b7c2579c963b..ac9e6c7a33b5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -18,9 +18,6 @@
#include "altr_tse_pcs.h"
-#define SGMII_ADAPTER_CTRL_REG 0x00
-#define SGMII_ADAPTER_DISABLE 0x0001
-
#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII 0x0
#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII 0x1
#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RMII 0x2
@@ -62,16 +59,14 @@ static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed)
{
struct socfpga_dwmac *dwmac = (struct socfpga_dwmac *)priv;
void __iomem *splitter_base = dwmac->splitter_base;
- void __iomem *tse_pcs_base = dwmac->pcs.tse_pcs_base;
void __iomem *sgmii_adapter_base = dwmac->pcs.sgmii_adapter_base;
struct device *dev = dwmac->dev;
struct net_device *ndev = dev_get_drvdata(dev);
struct phy_device *phy_dev = ndev->phydev;
u32 val;
- if ((tse_pcs_base) && (sgmii_adapter_base))
- writew(SGMII_ADAPTER_DISABLE,
- sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
+ writew(SGMII_ADAPTER_DISABLE,
+ sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
if (splitter_base) {
val = readl(splitter_base + EMAC_SPLITTER_CTRL_REG);
@@ -93,7 +88,9 @@ static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed)
writel(val, splitter_base + EMAC_SPLITTER_CTRL_REG);
}
- if (tse_pcs_base && sgmii_adapter_base)
+ writew(SGMII_ADAPTER_ENABLE,
+ sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
+ if (phy_dev)
tse_pcs_fix_mac_speed(&dwmac->pcs, phy_dev, speed);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index 22fea0f67245..92d32940aff0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -71,9 +71,9 @@ static int init_systime(void __iomem *ioaddr, u32 sec, u32 nsec)
writel(value, ioaddr + PTP_TCR);
/* wait for present system time initialize to complete */
- return readl_poll_timeout(ioaddr + PTP_TCR, value,
+ return readl_poll_timeout_atomic(ioaddr + PTP_TCR, value,
!(value & PTP_TCR_TSINIT),
- 10000, 100000);
+ 10, 100000);
}
static int config_addend(void __iomem *ioaddr, u32 addend)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index c9e077b2a56e..7c834c02e084 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -3643,11 +3643,9 @@ static int stmmac_open(struct net_device *dev)
u32 chan;
int ret;
- ret = pm_runtime_get_sync(priv->device);
- if (ret < 0) {
- pm_runtime_put_noidle(priv->device);
+ ret = pm_runtime_resume_and_get(priv->device);
+ if (ret < 0)
return ret;
- }
if (priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI &&
@@ -5886,11 +5884,9 @@ static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
struct stmmac_priv *priv = netdev_priv(ndev);
int ret = 0;
- ret = pm_runtime_get_sync(priv->device);
- if (ret < 0) {
- pm_runtime_put_noidle(priv->device);
+ ret = pm_runtime_resume_and_get(priv->device);
+ if (ret < 0)
return ret;
- }
ret = eth_mac_addr(ndev, addr);
if (ret)
@@ -6220,11 +6216,9 @@ static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vi
bool is_double = false;
int ret;
- ret = pm_runtime_get_sync(priv->device);
- if (ret < 0) {
- pm_runtime_put_noidle(priv->device);
+ ret = pm_runtime_resume_and_get(priv->device);
+ if (ret < 0)
return ret;
- }
if (be16_to_cpu(proto) == ETH_P_8021AD)
is_double = true;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index a5d150c5f3d8..9bc625fccca0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -88,11 +88,9 @@ static int stmmac_xgmac2_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
u32 tmp, addr, value = MII_XGMAC_BUSY;
int ret;
- ret = pm_runtime_get_sync(priv->device);
- if (ret < 0) {
- pm_runtime_put_noidle(priv->device);
+ ret = pm_runtime_resume_and_get(priv->device);
+ if (ret < 0)
return ret;
- }
/* Wait until any existing MII operation is complete */
if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
@@ -156,11 +154,9 @@ static int stmmac_xgmac2_mdio_write(struct mii_bus *bus, int phyaddr,
u32 addr, tmp, value = MII_XGMAC_BUSY;
int ret;
- ret = pm_runtime_get_sync(priv->device);
- if (ret < 0) {
- pm_runtime_put_noidle(priv->device);
+ ret = pm_runtime_resume_and_get(priv->device);
+ if (ret < 0)
return ret;
- }
/* Wait until any existing MII operation is complete */
if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
@@ -229,11 +225,9 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
int data = 0;
u32 v;
- data = pm_runtime_get_sync(priv->device);
- if (data < 0) {
- pm_runtime_put_noidle(priv->device);
+ data = pm_runtime_resume_and_get(priv->device);
+ if (data < 0)
return data;
- }
value |= (phyaddr << priv->hw->mii.addr_shift)
& priv->hw->mii.addr_mask;
@@ -297,11 +291,9 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
u32 value = MII_BUSY;
u32 v;
- ret = pm_runtime_get_sync(priv->device);
- if (ret < 0) {
- pm_runtime_put_noidle(priv->device);
+ ret = pm_runtime_resume_and_get(priv->device);
+ if (ret < 0)
return ret;
- }
value |= (phyaddr << priv->hw->mii.addr_shift)
& priv->hw->mii.addr_mask;
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index affcf92cd3aa..fb30bc5d56cb 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -94,6 +94,7 @@ config TI_K3_AM65_CPSW_NUSS
depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER
select NET_DEVLINK
select TI_DAVINCI_MDIO
+ select PHYLINK
imply PHY_TI_GMII_SEL
depends on TI_K3_AM65_CPTS || !TI_K3_AM65_CPTS
help
diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
index 72acdf802258..abc1e4276cf0 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
@@ -380,11 +380,9 @@ static int am65_cpsw_ethtool_op_begin(struct net_device *ndev)
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
int ret;
- ret = pm_runtime_get_sync(common->dev);
- if (ret < 0) {
+ ret = pm_runtime_resume_and_get(common->dev);
+ if (ret < 0)
dev_err(common->dev, "ethtool begin failed %d\n", ret);
- pm_runtime_put_noidle(common->dev);
- }
return ret;
}
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index d2747e9db286..b7ebd741f284 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -173,11 +173,9 @@ static int am65_cpsw_nuss_ndo_slave_add_vid(struct net_device *ndev,
if (!netif_running(ndev) || !vid)
return 0;
- ret = pm_runtime_get_sync(common->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(common->dev);
+ ret = pm_runtime_resume_and_get(common->dev);
+ if (ret < 0)
return ret;
- }
port_mask = BIT(port->port_id) | ALE_PORT_HOST;
if (!vid)
@@ -203,11 +201,9 @@ static int am65_cpsw_nuss_ndo_slave_kill_vid(struct net_device *ndev,
if (!netif_running(ndev) || !vid)
return 0;
- ret = pm_runtime_get_sync(common->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(common->dev);
+ ret = pm_runtime_resume_and_get(common->dev);
+ if (ret < 0)
return ret;
- }
dev_info(common->dev, "Removing vlan %d from vlan filter\n", vid);
ret = cpsw_ale_del_vlan(common->ale, vid,
@@ -557,11 +553,9 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
int ret, i;
- ret = pm_runtime_get_sync(common->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(common->dev);
+ ret = pm_runtime_resume_and_get(common->dev);
+ if (ret < 0)
return ret;
- }
/* Notify the stack of the actual queue counts. */
ret = netif_set_real_num_tx_queues(ndev, common->tx_ch_num);
@@ -1214,11 +1208,9 @@ static int am65_cpsw_nuss_ndo_slave_set_mac_address(struct net_device *ndev,
if (ret < 0)
return ret;
- ret = pm_runtime_get_sync(common->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(common->dev);
+ ret = pm_runtime_resume_and_get(common->dev);
+ if (ret < 0)
return ret;
- }
cpsw_ale_del_ucast(common->ale, ndev->dev_addr,
HOST_PORT_NUM, 0, 0);
@@ -2692,9 +2684,8 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
common->bus_freq = clk_get_rate(clk);
pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
- pm_runtime_put_noidle(dev);
pm_runtime_disable(dev);
return ret;
}
@@ -2789,11 +2780,9 @@ static int am65_cpsw_nuss_remove(struct platform_device *pdev)
common = dev_get_drvdata(dev);
- ret = pm_runtime_get_sync(&pdev->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret < 0)
return ret;
- }
am65_cpsw_nuss_phylink_cleanup(common);
am65_cpsw_unregister_devlink(common);
diff --git a/drivers/net/ethernet/ti/am65-cpsw-qos.c b/drivers/net/ethernet/ti/am65-cpsw-qos.c
index ebcc6386cc34..aa32dd905e2b 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-qos.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-qos.c
@@ -8,10 +8,12 @@
#include <linux/pm_runtime.h>
#include <linux/time.h>
+#include <net/pkt_cls.h>
#include "am65-cpsw-nuss.h"
#include "am65-cpsw-qos.h"
#include "am65-cpts.h"
+#include "cpsw_ale.h"
#define AM65_CPSW_REG_CTL 0x004
#define AM65_CPSW_PN_REG_CTL 0x004
@@ -588,12 +590,190 @@ static int am65_cpsw_setup_taprio(struct net_device *ndev, void *type_data)
return am65_cpsw_set_taprio(ndev, type_data);
}
+static int am65_cpsw_qos_clsflower_add_policer(struct am65_cpsw_port *port,
+ struct netlink_ext_ack *extack,
+ struct flow_cls_offload *cls,
+ u64 rate_pkt_ps)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct flow_dissector *dissector = rule->match.dissector;
+ static const u8 mc_mac[] = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00};
+ struct am65_cpsw_qos *qos = &port->qos;
+ struct flow_match_eth_addrs match;
+ int ret;
+
+ if (dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported keys used");
+ return -EOPNOTSUPP;
+ }
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ NL_SET_ERR_MSG_MOD(extack, "Not matching on eth address");
+ return -EOPNOTSUPP;
+ }
+
+ flow_rule_match_eth_addrs(rule, &match);
+
+ if (!is_zero_ether_addr(match.mask->src)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on source MAC not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (is_broadcast_ether_addr(match.key->dst) &&
+ is_broadcast_ether_addr(match.mask->dst)) {
+ ret = cpsw_ale_rx_ratelimit_bc(port->common->ale, port->port_id, rate_pkt_ps);
+ if (ret)
+ return ret;
+
+ qos->ale_bc_ratelimit.cookie = cls->cookie;
+ qos->ale_bc_ratelimit.rate_packet_ps = rate_pkt_ps;
+ } else if (ether_addr_equal_unaligned(match.key->dst, mc_mac) &&
+ ether_addr_equal_unaligned(match.mask->dst, mc_mac)) {
+ ret = cpsw_ale_rx_ratelimit_mc(port->common->ale, port->port_id, rate_pkt_ps);
+ if (ret)
+ return ret;
+
+ qos->ale_mc_ratelimit.cookie = cls->cookie;
+ qos->ale_mc_ratelimit.rate_packet_ps = rate_pkt_ps;
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "Not supported matching key");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int am65_cpsw_qos_clsflower_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(action, act)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is ok, but action is not last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.rate_bytes_ps || act->police.peakrate_bytes_ps ||
+ act->police.avrate || act->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when bytes per second/peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int am65_cpsw_qos_configure_clsflower(struct am65_cpsw_port *port,
+ struct flow_cls_offload *cls)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct netlink_ext_ack *extack = cls->common.extack;
+ const struct flow_action_entry *act;
+ int i, ret;
+
+ flow_action_for_each(i, act, &rule->action) {
+ switch (act->id) {
+ case FLOW_ACTION_POLICE:
+ ret = am65_cpsw_qos_clsflower_policer_validate(&rule->action, act, extack);
+ if (ret)
+ return ret;
+
+ return am65_cpsw_qos_clsflower_add_policer(port, extack, cls,
+ act->police.rate_pkt_ps);
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Action not supported");
+ return -EOPNOTSUPP;
+ }
+ }
+ return -EOPNOTSUPP;
+}
+
+static int am65_cpsw_qos_delete_clsflower(struct am65_cpsw_port *port, struct flow_cls_offload *cls)
+{
+ struct am65_cpsw_qos *qos = &port->qos;
+
+ if (cls->cookie == qos->ale_bc_ratelimit.cookie) {
+ qos->ale_bc_ratelimit.cookie = 0;
+ qos->ale_bc_ratelimit.rate_packet_ps = 0;
+ cpsw_ale_rx_ratelimit_bc(port->common->ale, port->port_id, 0);
+ }
+
+ if (cls->cookie == qos->ale_mc_ratelimit.cookie) {
+ qos->ale_mc_ratelimit.cookie = 0;
+ qos->ale_mc_ratelimit.rate_packet_ps = 0;
+ cpsw_ale_rx_ratelimit_mc(port->common->ale, port->port_id, 0);
+ }
+
+ return 0;
+}
+
+static int am65_cpsw_qos_setup_tc_clsflower(struct am65_cpsw_port *port,
+ struct flow_cls_offload *cls_flower)
+{
+ switch (cls_flower->command) {
+ case FLOW_CLS_REPLACE:
+ return am65_cpsw_qos_configure_clsflower(port, cls_flower);
+ case FLOW_CLS_DESTROY:
+ return am65_cpsw_qos_delete_clsflower(port, cls_flower);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int am65_cpsw_qos_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
+{
+ struct am65_cpsw_port *port = cb_priv;
+
+ if (!tc_cls_can_offload_and_chain0(port->ndev, type_data))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return am65_cpsw_qos_setup_tc_clsflower(port, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static LIST_HEAD(am65_cpsw_qos_block_cb_list);
+
+static int am65_cpsw_qos_setup_tc_block(struct net_device *ndev, struct flow_block_offload *f)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+
+ return flow_block_cb_setup_simple(f, &am65_cpsw_qos_block_cb_list,
+ am65_cpsw_qos_setup_tc_block_cb,
+ port, port, true);
+}
+
int am65_cpsw_qos_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
void *type_data)
{
switch (type) {
case TC_SETUP_QDISC_TAPRIO:
return am65_cpsw_setup_taprio(ndev, type_data);
+ case TC_SETUP_BLOCK:
+ return am65_cpsw_qos_setup_tc_block(ndev, type_data);
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/ti/am65-cpsw-qos.h b/drivers/net/ethernet/ti/am65-cpsw-qos.h
index e8f1b6b59e93..fb223b43b196 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-qos.h
+++ b/drivers/net/ethernet/ti/am65-cpsw-qos.h
@@ -14,11 +14,19 @@ struct am65_cpsw_est {
struct tc_taprio_qopt_offload taprio;
};
+struct am65_cpsw_ale_ratelimit {
+ unsigned long cookie;
+ u64 rate_packet_ps;
+};
+
struct am65_cpsw_qos {
struct am65_cpsw_est *est_admin;
struct am65_cpsw_est *est_oper;
ktime_t link_down_time;
int link_speed;
+
+ struct am65_cpsw_ale_ratelimit ale_bc_ratelimit;
+ struct am65_cpsw_ale_ratelimit ale_mc_ratelimit;
};
int am65_cpsw_qos_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 03575c017500..e6ad2e53f1cd 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -335,7 +335,7 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
static unsigned int cpsw_rxbuf_total_len(unsigned int len)
{
- len += CPSW_HEADROOM;
+ len += CPSW_HEADROOM_NA;
len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
return SKB_DATA_ALIGN(len);
@@ -756,11 +756,9 @@ static int cpsw_ndo_open(struct net_device *ndev)
int ret;
u32 reg;
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(cpsw->dev);
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
return ret;
- }
netif_carrier_off(ndev);
@@ -968,11 +966,9 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(cpsw->dev);
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
return ret;
- }
if (cpsw->data.dual_emac) {
vid = cpsw->slaves[priv->emac_port].port_vlan;
@@ -1052,11 +1048,9 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
if (vid == cpsw->data.default_vlan)
return 0;
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(cpsw->dev);
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
return ret;
- }
if (cpsw->data.dual_emac) {
/* In dual EMAC, reserved VLAN id should not be used for
@@ -1090,11 +1084,9 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
if (vid == cpsw->data.default_vlan)
return 0;
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(cpsw->dev);
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
return ret;
- }
if (cpsw->data.dual_emac) {
int i;
@@ -1567,11 +1559,9 @@ static int cpsw_probe(struct platform_device *pdev)
/* Need to enable clocks with runtime PM api to access module
* registers
*/
- ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
- pm_runtime_put_noidle(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
goto clean_runtime_disable_ret;
- }
ret = cpsw_probe_dt(&cpsw->data, pdev);
if (ret)
@@ -1734,11 +1724,9 @@ static int cpsw_remove(struct platform_device *pdev)
struct cpsw_common *cpsw = platform_get_drvdata(pdev);
int i, ret;
- ret = pm_runtime_get_sync(&pdev->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret < 0)
return ret;
- }
for (i = 0; i < cpsw->data.slaves; i++)
if (cpsw->slaves[i].ndev)
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 1ef0aaef5c61..231370e9a801 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -50,6 +50,8 @@
/* ALE_AGING_TIMER */
#define ALE_AGING_TIMER_MASK GENMASK(23, 0)
+#define ALE_RATE_LIMIT_MIN_PPS 1000
+
/**
* struct ale_entry_fld - The ALE tbl entry field description
* @start_bit: field start bit
@@ -1136,6 +1138,50 @@ int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control)
return tmp & BITMASK(info->bits);
}
+int cpsw_ale_rx_ratelimit_mc(struct cpsw_ale *ale, int port, unsigned int ratelimit_pps)
+
+{
+ int val = ratelimit_pps / ALE_RATE_LIMIT_MIN_PPS;
+ u32 remainder = ratelimit_pps % ALE_RATE_LIMIT_MIN_PPS;
+
+ if (ratelimit_pps && !val) {
+ dev_err(ale->params.dev, "ALE MC port:%d ratelimit min value 1000pps\n", port);
+ return -EINVAL;
+ }
+
+ if (remainder)
+ dev_info(ale->params.dev, "ALE port:%d MC ratelimit set to %dpps (requested %d)\n",
+ port, ratelimit_pps - remainder, ratelimit_pps);
+
+ cpsw_ale_control_set(ale, port, ALE_PORT_MCAST_LIMIT, val);
+
+ dev_dbg(ale->params.dev, "ALE port:%d MC ratelimit set %d\n",
+ port, val * ALE_RATE_LIMIT_MIN_PPS);
+ return 0;
+}
+
+int cpsw_ale_rx_ratelimit_bc(struct cpsw_ale *ale, int port, unsigned int ratelimit_pps)
+
+{
+ int val = ratelimit_pps / ALE_RATE_LIMIT_MIN_PPS;
+ u32 remainder = ratelimit_pps % ALE_RATE_LIMIT_MIN_PPS;
+
+ if (ratelimit_pps && !val) {
+ dev_err(ale->params.dev, "ALE port:%d BC ratelimit min value 1000pps\n", port);
+ return -EINVAL;
+ }
+
+ if (remainder)
+ dev_info(ale->params.dev, "ALE port:%d BC ratelimit set to %dpps (requested %d)\n",
+ port, ratelimit_pps - remainder, ratelimit_pps);
+
+ cpsw_ale_control_set(ale, port, ALE_PORT_BCAST_LIMIT, val);
+
+ dev_dbg(ale->params.dev, "ALE port:%d BC ratelimit set %d\n",
+ port, val * ALE_RATE_LIMIT_MIN_PPS);
+ return 0;
+}
+
static void cpsw_ale_timer(struct timer_list *t)
{
struct cpsw_ale *ale = from_timer(ale, t, timer);
@@ -1199,6 +1245,26 @@ static void cpsw_ale_aging_stop(struct cpsw_ale *ale)
void cpsw_ale_start(struct cpsw_ale *ale)
{
+ unsigned long ale_prescale;
+
+ /* configure Broadcast and Multicast Rate Limit
+ * number_of_packets = (Fclk / ALE_PRESCALE) * port.BCAST/MCAST_LIMIT
+ * ALE_PRESCALE width is 19bit and min value 0x10
+ * port.BCAST/MCAST_LIMIT is 8bit
+ *
+ * For multi port configuration support the ALE_PRESCALE is configured to 1ms interval,
+ * which allows to configure port.BCAST/MCAST_LIMIT per port and achieve:
+ * min number_of_packets = 1000 when port.BCAST/MCAST_LIMIT = 1
+ * max number_of_packets = 1000 * 255 = 255000 when port.BCAST/MCAST_LIMIT = 0xFF
+ */
+ ale_prescale = ale->params.bus_freq / ALE_RATE_LIMIT_MIN_PPS;
+ writel((u32)ale_prescale, ale->params.ale_regs + ALE_PRESCALE);
+
+ /* Allow MC/BC rate limiting globally.
+ * The actual Rate Limit cfg enabled per-port by port.BCAST/MCAST_LIMIT
+ */
+ cpsw_ale_control_set(ale, 0, ALE_RATE_LIMIT, 1);
+
cpsw_ale_control_set(ale, 0, ALE_ENABLE, 1);
cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1);
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index 13fe47687fde..aba4572cfa3b 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -120,6 +120,8 @@ int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
int reg_mcast, int unreg_mcast);
int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port);
void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti, int port);
+int cpsw_ale_rx_ratelimit_bc(struct cpsw_ale *ale, int port, unsigned int ratelimit_pps);
+int cpsw_ale_rx_ratelimit_mc(struct cpsw_ale *ale, int port, unsigned int ratelimit_pps);
int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control);
int cpsw_ale_control_set(struct cpsw_ale *ale, int port,
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index bd4b1528cf99..0f31cb4168bb 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -273,7 +273,7 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
static unsigned int cpsw_rxbuf_total_len(unsigned int len)
{
- len += CPSW_HEADROOM;
+ len += CPSW_HEADROOM_NA;
len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
return SKB_DATA_ALIGN(len);
@@ -449,11 +449,9 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
if (vid == cpsw->data.default_vlan)
return 0;
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(cpsw->dev);
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
return ret;
- }
/* In dual EMAC, reserved VLAN id should not be used for
* creating VLAN interfaces as this can break the dual
@@ -498,6 +496,8 @@ static void cpsw_restore(struct cpsw_priv *priv)
/* restore CBS offload */
cpsw_cbs_resume(&cpsw->slaves[priv->emac_port - 1], priv);
+
+ cpsw_qos_clsflower_resume(priv);
}
static void cpsw_init_stp_ale_entry(struct cpsw_common *cpsw)
@@ -829,11 +829,9 @@ static int cpsw_ndo_open(struct net_device *ndev)
dev_info(priv->dev, "starting ndev. mode: %s\n",
cpsw_is_switch_en(cpsw) ? "switch" : "dual_mac");
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(cpsw->dev);
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
return ret;
- }
/* Notify the stack of the actual queue counts. */
ret = netif_set_real_num_tx_queues(ndev, cpsw->tx_ch_num);
@@ -985,11 +983,9 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(cpsw->dev);
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
return ret;
- }
vid = cpsw->slaves[slave_no].port_vlan;
flags = ALE_VLAN | ALE_SECURE;
@@ -1024,11 +1020,9 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
if (vid == cpsw->data.default_vlan)
return 0;
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(cpsw->dev);
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
return ret;
- }
/* reset the return code as pm_runtime_get_sync() can return
* non zero values as well.
@@ -1407,7 +1401,7 @@ static int cpsw_create_ports(struct cpsw_common *cpsw)
cpsw->slaves[i].ndev = ndev;
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
- NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_NETNS_LOCAL;
+ NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_NETNS_LOCAL | NETIF_F_HW_TC;
ndev->netdev_ops = &cpsw_netdev_ops;
ndev->ethtool_ops = &cpsw_ethtool_ops;
@@ -1918,9 +1912,8 @@ static int cpsw_probe(struct platform_device *pdev)
/* Need to enable clocks with runtime PM api to access module
* registers
*/
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
- pm_runtime_put_noidle(dev);
pm_runtime_disable(dev);
return ret;
}
@@ -2045,11 +2038,9 @@ static int cpsw_remove(struct platform_device *pdev)
struct cpsw_common *cpsw = platform_get_drvdata(pdev);
int ret;
- ret = pm_runtime_get_sync(&pdev->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret < 0)
return ret;
- }
cpsw_unregister_notifiers(cpsw);
cpsw_unregister_devlink(cpsw);
diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c
index 8f6817f346ba..887285c57db8 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.c
+++ b/drivers/net/ethernet/ti/cpsw_priv.c
@@ -502,6 +502,7 @@ int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs,
ale_params.ale_ageout = ale_ageout;
ale_params.ale_ports = CPSW_ALE_PORTS_NUM;
ale_params.dev_id = "cpsw";
+ ale_params.bus_freq = cpsw->bus_freq_mhz * 1000000;
cpsw->ale = cpsw_ale_create(&ale_params);
if (IS_ERR(cpsw->ale)) {
@@ -754,11 +755,9 @@ int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate)
return -EINVAL;
}
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(cpsw->dev);
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
return ret;
- }
ret = cpdma_chan_set_rate(cpsw->txv[queue].ch, ch_rate);
pm_runtime_put(cpsw->dev);
@@ -970,11 +969,9 @@ static int cpsw_set_cbs(struct net_device *ndev,
return -1;
}
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(cpsw->dev);
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
return ret;
- }
bw = qopt->enable ? qopt->idleslope : 0;
ret = cpsw_set_fifo_rlimit(priv, fifo, bw);
@@ -1008,11 +1005,9 @@ static int cpsw_set_mqprio(struct net_device *ndev, void *type_data)
if (mqprio->mode != TC_MQPRIO_MODE_DCB)
return -EINVAL;
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(cpsw->dev);
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
return ret;
- }
if (num_tc) {
for (i = 0; i < 8; i++) {
@@ -1048,6 +1043,8 @@ static int cpsw_set_mqprio(struct net_device *ndev, void *type_data)
return 0;
}
+static int cpsw_qos_setup_tc_block(struct net_device *ndev, struct flow_block_offload *f);
+
int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
void *type_data)
{
@@ -1058,6 +1055,9 @@ int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
case TC_SETUP_QDISC_MQPRIO:
return cpsw_set_mqprio(ndev, type_data);
+ case TC_SETUP_BLOCK:
+ return cpsw_qos_setup_tc_block(ndev, type_data);
+
default:
return -EOPNOTSUPP;
}
@@ -1381,3 +1381,202 @@ drop:
page_pool_recycle_direct(cpsw->page_pool[ch], page);
return ret;
}
+
+static int cpsw_qos_clsflower_add_policer(struct cpsw_priv *priv,
+ struct netlink_ext_ack *extack,
+ struct flow_cls_offload *cls,
+ u64 rate_pkt_ps)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct flow_dissector *dissector = rule->match.dissector;
+ static const u8 mc_mac[] = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00};
+ struct flow_match_eth_addrs match;
+ u32 port_id;
+ int ret;
+
+ if (dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported keys used");
+ return -EOPNOTSUPP;
+ }
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ NL_SET_ERR_MSG_MOD(extack, "Not matching on eth address");
+ return -EOPNOTSUPP;
+ }
+
+ flow_rule_match_eth_addrs(rule, &match);
+
+ if (!is_zero_ether_addr(match.mask->src)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on source MAC not supported");
+ return -EOPNOTSUPP;
+ }
+
+ port_id = cpsw_slave_index(priv->cpsw, priv) + 1;
+
+ if (is_broadcast_ether_addr(match.key->dst) &&
+ is_broadcast_ether_addr(match.mask->dst)) {
+ ret = cpsw_ale_rx_ratelimit_bc(priv->cpsw->ale, port_id, rate_pkt_ps);
+ if (ret)
+ return ret;
+
+ priv->ale_bc_ratelimit.cookie = cls->cookie;
+ priv->ale_bc_ratelimit.rate_packet_ps = rate_pkt_ps;
+ } else if (ether_addr_equal_unaligned(match.key->dst, mc_mac) &&
+ ether_addr_equal_unaligned(match.mask->dst, mc_mac)) {
+ ret = cpsw_ale_rx_ratelimit_mc(priv->cpsw->ale, port_id, rate_pkt_ps);
+ if (ret)
+ return ret;
+
+ priv->ale_mc_ratelimit.cookie = cls->cookie;
+ priv->ale_mc_ratelimit.rate_packet_ps = rate_pkt_ps;
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "Not supported matching key");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int cpsw_qos_clsflower_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(action, act)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is ok, but action is not last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.rate_bytes_ps || act->police.peakrate_bytes_ps ||
+ act->police.avrate || act->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when bytes per second/peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int cpsw_qos_configure_clsflower(struct cpsw_priv *priv, struct flow_cls_offload *cls)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct netlink_ext_ack *extack = cls->common.extack;
+ const struct flow_action_entry *act;
+ int i, ret;
+
+ flow_action_for_each(i, act, &rule->action) {
+ switch (act->id) {
+ case FLOW_ACTION_POLICE:
+ ret = cpsw_qos_clsflower_policer_validate(&rule->action, act, extack);
+ if (ret)
+ return ret;
+
+ return cpsw_qos_clsflower_add_policer(priv, extack, cls,
+ act->police.rate_pkt_ps);
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Action not supported");
+ return -EOPNOTSUPP;
+ }
+ }
+ return -EOPNOTSUPP;
+}
+
+static int cpsw_qos_delete_clsflower(struct cpsw_priv *priv, struct flow_cls_offload *cls)
+{
+ u32 port_id = cpsw_slave_index(priv->cpsw, priv) + 1;
+
+ if (cls->cookie == priv->ale_bc_ratelimit.cookie) {
+ priv->ale_bc_ratelimit.cookie = 0;
+ priv->ale_bc_ratelimit.rate_packet_ps = 0;
+ cpsw_ale_rx_ratelimit_bc(priv->cpsw->ale, port_id, 0);
+ }
+
+ if (cls->cookie == priv->ale_mc_ratelimit.cookie) {
+ priv->ale_mc_ratelimit.cookie = 0;
+ priv->ale_mc_ratelimit.rate_packet_ps = 0;
+ cpsw_ale_rx_ratelimit_mc(priv->cpsw->ale, port_id, 0);
+ }
+
+ return 0;
+}
+
+static int cpsw_qos_setup_tc_clsflower(struct cpsw_priv *priv, struct flow_cls_offload *cls_flower)
+{
+ switch (cls_flower->command) {
+ case FLOW_CLS_REPLACE:
+ return cpsw_qos_configure_clsflower(priv, cls_flower);
+ case FLOW_CLS_DESTROY:
+ return cpsw_qos_delete_clsflower(priv, cls_flower);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int cpsw_qos_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
+{
+ struct cpsw_priv *priv = cb_priv;
+ int ret;
+
+ if (!tc_cls_can_offload_and_chain0(priv->ndev, type_data))
+ return -EOPNOTSUPP;
+
+ ret = pm_runtime_get_sync(priv->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(priv->dev);
+ return ret;
+ }
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ ret = cpsw_qos_setup_tc_clsflower(priv, type_data);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ pm_runtime_put(priv->dev);
+ return ret;
+}
+
+static LIST_HEAD(cpsw_qos_block_cb_list);
+
+static int cpsw_qos_setup_tc_block(struct net_device *ndev, struct flow_block_offload *f)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+
+ return flow_block_cb_setup_simple(f, &cpsw_qos_block_cb_list,
+ cpsw_qos_setup_tc_block_cb,
+ priv, priv, true);
+}
+
+void cpsw_qos_clsflower_resume(struct cpsw_priv *priv)
+{
+ u32 port_id = cpsw_slave_index(priv->cpsw, priv) + 1;
+
+ if (priv->ale_bc_ratelimit.cookie)
+ cpsw_ale_rx_ratelimit_bc(priv->cpsw->ale, port_id,
+ priv->ale_bc_ratelimit.rate_packet_ps);
+
+ if (priv->ale_mc_ratelimit.cookie)
+ cpsw_ale_rx_ratelimit_mc(priv->cpsw->ale, port_id,
+ priv->ale_mc_ratelimit.rate_packet_ps);
+}
diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h
index 74555970730c..fc591f5ebe18 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.h
+++ b/drivers/net/ethernet/ti/cpsw_priv.h
@@ -364,6 +364,11 @@ struct cpsw_common {
u8 base_mac[ETH_ALEN];
};
+struct cpsw_ale_ratelimit {
+ unsigned long cookie;
+ u64 rate_packet_ps;
+};
+
struct cpsw_priv {
struct net_device *ndev;
struct device *dev;
@@ -384,6 +389,8 @@ struct cpsw_priv {
struct cpsw_common *cpsw;
int offload_fwd_mark;
u32 tx_packet_min;
+ struct cpsw_ale_ratelimit ale_bc_ratelimit;
+ struct cpsw_ale_ratelimit ale_mc_ratelimit;
};
#define ndev_to_cpsw(ndev) (((struct cpsw_priv *)netdev_priv(ndev))->cpsw)
@@ -411,7 +418,6 @@ struct __aligned(sizeof(long)) cpsw_meta_xdp {
/* The buf includes headroom compatible with both skb and xdpf */
#define CPSW_HEADROOM_NA (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + NET_IP_ALIGN)
-#define CPSW_HEADROOM ALIGN(CPSW_HEADROOM_NA, sizeof(long))
static inline int cpsw_is_xdpf_handle(void *handle)
{
@@ -462,6 +468,7 @@ int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
bool cpsw_shp_is_off(struct cpsw_priv *priv);
void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv);
void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv);
+void cpsw_qos_clsflower_resume(struct cpsw_priv *priv);
/* ethtool */
u32 cpsw_get_msglevel(struct net_device *ndev);
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 4b6aed78d392..9d1e98db308b 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1422,9 +1422,8 @@ static int emac_dev_open(struct net_device *ndev)
struct phy_device *phydev = NULL;
struct device *phy = NULL;
- ret = pm_runtime_get_sync(&priv->pdev->dev);
+ ret = pm_runtime_resume_and_get(&priv->pdev->dev);
if (ret < 0) {
- pm_runtime_put_noidle(&priv->pdev->dev);
dev_err(&priv->pdev->dev, "%s: failed to get_sync(%d)\n",
__func__, ret);
return ret;
@@ -1661,9 +1660,8 @@ static struct net_device_stats *emac_dev_getnetstats(struct net_device *ndev)
u32 stats_clear_mask;
int err;
- err = pm_runtime_get_sync(&priv->pdev->dev);
+ err = pm_runtime_resume_and_get(&priv->pdev->dev);
if (err < 0) {
- pm_runtime_put_noidle(&priv->pdev->dev);
dev_err(&priv->pdev->dev, "%s: failed to get_sync(%d)\n",
__func__, err);
return &ndev->stats;
@@ -1954,9 +1952,8 @@ static int davinci_emac_probe(struct platform_device *pdev)
netif_napi_add(ndev, &priv->napi, emac_poll, EMAC_POLL_WEIGHT);
pm_runtime_enable(&pdev->dev);
- rc = pm_runtime_get_sync(&pdev->dev);
+ rc = pm_runtime_resume_and_get(&pdev->dev);
if (rc < 0) {
- pm_runtime_put_noidle(&pdev->dev);
dev_err(&pdev->dev, "%s: failed to get_sync(%d)\n",
__func__, rc);
goto err_napi_del;
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index fce2626e34fa..ea3772618043 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -134,11 +134,9 @@ static int davinci_mdio_reset(struct mii_bus *bus)
u32 phy_mask, ver;
int ret;
- ret = pm_runtime_get_sync(data->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(data->dev);
+ ret = pm_runtime_resume_and_get(data->dev);
+ if (ret < 0)
return ret;
- }
/* wait for scan logic to settle */
msleep(PHY_MAX_ADDR * data->access_time);
@@ -232,11 +230,9 @@ static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
return -EINVAL;
- ret = pm_runtime_get_sync(data->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(data->dev);
+ ret = pm_runtime_resume_and_get(data->dev);
+ if (ret < 0)
return ret;
- }
reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) |
(phy_id << 16));
@@ -276,11 +272,9 @@ static int davinci_mdio_write(struct mii_bus *bus, int phy_id,
if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
return -EINVAL;
- ret = pm_runtime_get_sync(data->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(data->dev);
+ ret = pm_runtime_resume_and_get(data->dev);
+ if (ret < 0)
return ret;
- }
reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) |
(phy_id << 16) | (phy_data & USERACCESS_DATA));
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 57a24f62e353..7a86ae82fcc1 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -1,11 +1,10 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Xilinx EmacLite Linux driver for the Xilinx Ethernet MAC Lite device.
+/* Xilinx EmacLite Linux driver for the Xilinx Ethernet MAC Lite device.
*
* This is a new flat driver which is based on the original emac_lite
* driver from John Williams <john.williams@xilinx.com>.
*
- * 2007 - 2013 (c) Xilinx, Inc.
+ * Copyright (c) 2007 - 2013 Xilinx, Inc.
*/
#include <linux/module.h>
@@ -91,13 +90,7 @@
#define XEL_ARP_PACKET_SIZE 28 /* Max ARP packet size */
#define XEL_HEADER_IP_LENGTH_OFFSET 16 /* IP Length Offset */
-
-
#define TX_TIMEOUT (60 * HZ) /* Tx timeout is 60 seconds. */
-#define ALIGNMENT 4
-
-/* BUFFER_ALIGN(adr) calculates the number of bytes to the next alignment. */
-#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((uintptr_t)adr)) % ALIGNMENT)
#ifdef __BIG_ENDIAN
#define xemaclite_readl ioread32be
@@ -124,7 +117,6 @@
* @last_link: last link status
*/
struct net_local {
-
struct net_device *ndev;
bool tx_ping_pong;
@@ -133,7 +125,7 @@ struct net_local {
u32 next_rx_buf_to_use;
void __iomem *base_addr;
- spinlock_t reset_lock;
+ spinlock_t reset_lock; /* serialize xmit and tx_timeout execution */
struct sk_buff *deferred_skb;
struct phy_device *phy_dev;
@@ -144,7 +136,6 @@ struct net_local {
int last_link;
};
-
/*************************/
/* EmacLite driver calls */
/*************************/
@@ -207,7 +198,7 @@ static void xemaclite_disable_interrupts(struct net_local *drvdata)
* address in the EmacLite device.
*/
static void xemaclite_aligned_write(const void *src_ptr, u32 *dest_ptr,
- unsigned length)
+ unsigned int length)
{
const u16 *from_u16_ptr;
u32 align_buffer;
@@ -265,7 +256,7 @@ static void xemaclite_aligned_write(const void *src_ptr, u32 *dest_ptr,
* to a 16-bit aligned buffer.
*/
static void xemaclite_aligned_read(u32 *src_ptr, u8 *dest_ptr,
- unsigned length)
+ unsigned int length)
{
u16 *to_u16_ptr, *from_u16_ptr;
u32 *from_u32_ptr;
@@ -330,7 +321,6 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
XEL_TSR_XMIT_ACTIVE_MASK)) == 0) {
-
/* Switch to next buffer if configured */
if (drvdata->tx_ping_pong != 0)
drvdata->next_tx_buf_to_use ^= XEL_BUFFER_OFFSET;
@@ -346,8 +336,9 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
XEL_TSR_XMIT_ACTIVE_MASK)) != 0)
return -1; /* Buffers were full, return failure */
- } else
+ } else {
return -1; /* Buffer was full, return failure */
+ }
/* Write the frame to the buffer */
xemaclite_aligned_write(data, (u32 __force *)addr, byte_count);
@@ -423,7 +414,6 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
* or an IP packet or an ARP packet
*/
if (proto_type > ETH_DATA_LEN) {
-
if (proto_type == ETH_P_IP) {
length = ((ntohl(xemaclite_readl(addr +
XEL_HEADER_IP_LENGTH_OFFSET +
@@ -433,23 +423,25 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
length = min_t(u16, length, ETH_DATA_LEN);
length += ETH_HLEN + ETH_FCS_LEN;
- } else if (proto_type == ETH_P_ARP)
+ } else if (proto_type == ETH_P_ARP) {
length = XEL_ARP_PACKET_SIZE + ETH_HLEN + ETH_FCS_LEN;
- else
+ } else {
/* Field contains type other than IP or ARP, use max
* frame size and let user parse it
*/
length = ETH_FRAME_LEN + ETH_FCS_LEN;
- } else
+ }
+ } else {
/* Use the length in the frame, plus the header and trailer */
length = proto_type + ETH_HLEN + ETH_FCS_LEN;
+ }
if (WARN_ON(length > maxlen))
length = maxlen;
/* Read from the EmacLite device */
xemaclite_aligned_read((u32 __force *)(addr + XEL_RXBUFF_OFFSET),
- data, length);
+ data, length);
/* Acknowledge the frame */
reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET);
@@ -599,11 +591,10 @@ static void xemaclite_rx_handler(struct net_device *dev)
{
struct net_local *lp = netdev_priv(dev);
struct sk_buff *skb;
- unsigned int align;
u32 len;
len = ETH_FRAME_LEN + ETH_FCS_LEN;
- skb = netdev_alloc_skb(dev, len + ALIGNMENT);
+ skb = netdev_alloc_skb(dev, len + NET_IP_ALIGN);
if (!skb) {
/* Couldn't get memory. */
dev->stats.rx_dropped++;
@@ -611,16 +602,7 @@ static void xemaclite_rx_handler(struct net_device *dev)
return;
}
- /* A new skb should have the data halfword aligned, but this code is
- * here just in case that isn't true. Calculate how many
- * bytes we should reserve to get the data to start on a word
- * boundary
- */
- align = BUFFER_ALIGN(skb->data);
- if (align)
- skb_reserve(skb, align);
-
- skb_reserve(skb, 2);
+ skb_reserve(skb, NET_IP_ALIGN);
len = xemaclite_recv_data(lp, (u8 *)skb->data, len);
@@ -671,8 +653,7 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
/* Check if the Transmission for the first buffer is completed */
tx_status = xemaclite_readl(base_addr + XEL_TSR_OFFSET);
if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
- (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
-
+ (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
xemaclite_writel(tx_status, base_addr + XEL_TSR_OFFSET);
@@ -682,8 +663,7 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
/* Check if the Transmission for the second buffer is completed */
tx_status = xemaclite_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
- (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
-
+ (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
xemaclite_writel(tx_status, base_addr + XEL_BUFFER_OFFSET +
XEL_TSR_OFFSET);
@@ -840,6 +820,7 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
of_address_to_resource(npp, 0, &res);
if (lp->ndev->mem_start != res.start) {
struct phy_device *phydev;
+
phydev = of_phy_find_device(lp->phy_node);
if (!phydev)
dev_info(dev,
diff --git a/drivers/net/ethernet/xscale/ptp_ixp46x.c b/drivers/net/ethernet/xscale/ptp_ixp46x.c
index 1f382777aa5a..9abbdb71e629 100644
--- a/drivers/net/ethernet/xscale/ptp_ixp46x.c
+++ b/drivers/net/ethernet/xscale/ptp_ixp46x.c
@@ -271,7 +271,7 @@ static int ptp_ixp_probe(struct platform_device *pdev)
ixp_clock.master_irq = platform_get_irq(pdev, 0);
ixp_clock.slave_irq = platform_get_irq(pdev, 1);
if (IS_ERR(ixp_clock.regs) ||
- !ixp_clock.master_irq || !ixp_clock.slave_irq)
+ ixp_clock.master_irq < 0 || ixp_clock.slave_irq < 0)
return -ENXIO;
ixp_clock.caps = ptp_ixp_caps;
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 7db6c135ac6c..2495a5719e1c 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -533,14 +533,16 @@ static struct sk_buff *geneve_gro_receive(struct sock *sk,
}
}
+ skb_gro_pull(skb, gh_len);
+ skb_gro_postpull_rcsum(skb, gh, gh_len);
type = gh->proto_type;
+ if (likely(type == htons(ETH_P_TEB)))
+ return call_gro_receive(eth_gro_receive, head, skb);
ptype = gro_find_receive_by_type(type);
if (!ptype)
goto out;
- skb_gro_pull(skb, gh_len);
- skb_gro_postpull_rcsum(skb, gh, gh_len);
pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
flush = 0;
@@ -563,6 +565,10 @@ static int geneve_gro_complete(struct sock *sk, struct sk_buff *skb,
gh_len = geneve_hlen(gh);
type = gh->proto_type;
+ /* since skb->encapsulation is set, eth_gro_complete() sets the inner mac header */
+ if (likely(type == htons(ETH_P_TEB)))
+ return eth_gro_complete(skb, nhoff + gh_len);
+
ptype = gro_find_complete_by_type(type);
if (ptype)
err = ptype->callbacks.gro_complete(skb, nhoff + gh_len);
diff --git a/drivers/net/hamradio/Kconfig b/drivers/net/hamradio/Kconfig
index 441da03c23ee..a9c44f08199d 100644
--- a/drivers/net/hamradio/Kconfig
+++ b/drivers/net/hamradio/Kconfig
@@ -45,40 +45,6 @@ config BPQETHER
useful if some other computer on your local network has a direct
amateur radio connection.
-config DMASCC
- tristate "High-speed (DMA) SCC driver for AX.25"
- depends on ISA && AX25 && BROKEN_ON_SMP && ISA_DMA_API
- depends on VIRT_TO_BUS
- help
- This is a driver for high-speed SCC boards, i.e. those supporting
- DMA on one port. You usually use those boards to connect your
- computer to an amateur radio modem (such as the WA4DSY 56kbps
- modem), in order to send and receive AX.25 packet radio network
- traffic.
-
- Currently, this driver supports Ottawa PI/PI2, Paccomm/Gracilis
- PackeTwin, and S5SCC/DMA boards. They are detected automatically.
- If you have one of these cards, say Y here and read the AX25-HOWTO,
- available from <http://www.tldp.org/docs.html#howto>.
-
- This driver can operate multiple boards simultaneously. If you
- compile it as a module (by saying M instead of Y), it will be called
- dmascc. If you don't pass any parameter to the driver, all
- possible I/O addresses are probed. This could irritate other devices
- that are currently not in use. You may specify the list of addresses
- to be probed by "dmascc.io=addr1,addr2,..." (when compiled into the
- kernel image) or "io=addr1,addr2,..." (when loaded as a module). The
- network interfaces will be called dmascc0 and dmascc1 for the board
- detected first, dmascc2 and dmascc3 for the second one, and so on.
-
- Before you configure each interface with ifconfig, you MUST set
- certain parameters, such as channel access timing, clock mode, and
- DMA channel. This is accomplished with a small utility program,
- dmascc_cfg, available at
- <http://www.linux-ax25.org/wiki/Ax25-tools>. Please be sure to
- get at least version 1.27 of dmascc_cfg, as older versions will not
- work with the current driver.
-
config SCC
tristate "Z8530 SCC driver"
depends on ISA && AX25 && ISA_DMA_API
diff --git a/drivers/net/hamradio/Makefile b/drivers/net/hamradio/Makefile
index 7a1518d763e3..25fc400369ba 100644
--- a/drivers/net/hamradio/Makefile
+++ b/drivers/net/hamradio/Makefile
@@ -11,7 +11,6 @@
# Christoph Hellwig <hch@infradead.org>
#
-obj-$(CONFIG_DMASCC) += dmascc.o
obj-$(CONFIG_SCC) += scc.o
obj-$(CONFIG_MKISS) += mkiss.o
obj-$(CONFIG_6PACK) += 6pack.o
diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c
deleted file mode 100644
index a2a12208e3ad..000000000000
--- a/drivers/net/hamradio/dmascc.c
+++ /dev/null
@@ -1,1450 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Driver for high-speed SCC boards (those with DMA support)
- * Copyright (C) 1997-2000 Klaus Kudielka
- *
- * S5SCC/DMA support by Janko Koleznik S52HI
- */
-
-
-#include <linux/module.h>
-#include <linux/bitops.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/if_arp.h>
-#include <linux/in.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/netdevice.h>
-#include <linux/slab.h>
-#include <linux/rtnetlink.h>
-#include <linux/sockios.h>
-#include <linux/workqueue.h>
-#include <linux/atomic.h>
-#include <asm/dma.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <linux/uaccess.h>
-#include <linux/jiffies.h>
-#include <net/ax25.h>
-#include "z8530.h"
-
-
-/* Number of buffers per channel */
-
-#define NUM_TX_BUF 2 /* NUM_TX_BUF >= 1 (min. 2 recommended) */
-#define NUM_RX_BUF 6 /* NUM_RX_BUF >= 1 (min. 2 recommended) */
-#define BUF_SIZE 1576 /* BUF_SIZE >= mtu + hard_header_len */
-
-
-/* Cards supported */
-
-#define HW_PI { "Ottawa PI", 0x300, 0x20, 0x10, 8, \
- 0, 8, 1843200, 3686400 }
-#define HW_PI2 { "Ottawa PI2", 0x300, 0x20, 0x10, 8, \
- 0, 8, 3686400, 7372800 }
-#define HW_TWIN { "Gracilis PackeTwin", 0x200, 0x10, 0x10, 32, \
- 0, 4, 6144000, 6144000 }
-#define HW_S5 { "S5SCC/DMA", 0x200, 0x10, 0x10, 32, \
- 0, 8, 4915200, 9830400 }
-
-#define HARDWARE { HW_PI, HW_PI2, HW_TWIN, HW_S5 }
-
-#define TMR_0_HZ 25600 /* Frequency of timer 0 */
-
-#define TYPE_PI 0
-#define TYPE_PI2 1
-#define TYPE_TWIN 2
-#define TYPE_S5 3
-#define NUM_TYPES 4
-
-#define MAX_NUM_DEVS 32
-
-
-/* SCC chips supported */
-
-#define Z8530 0
-#define Z85C30 1
-#define Z85230 2
-
-#define CHIPNAMES { "Z8530", "Z85C30", "Z85230" }
-
-
-/* I/O registers */
-
-/* 8530 registers relative to card base */
-#define SCCB_CMD 0x00
-#define SCCB_DATA 0x01
-#define SCCA_CMD 0x02
-#define SCCA_DATA 0x03
-
-/* 8253/8254 registers relative to card base */
-#define TMR_CNT0 0x00
-#define TMR_CNT1 0x01
-#define TMR_CNT2 0x02
-#define TMR_CTRL 0x03
-
-/* Additional PI/PI2 registers relative to card base */
-#define PI_DREQ_MASK 0x04
-
-/* Additional PackeTwin registers relative to card base */
-#define TWIN_INT_REG 0x08
-#define TWIN_CLR_TMR1 0x09
-#define TWIN_CLR_TMR2 0x0a
-#define TWIN_SPARE_1 0x0b
-#define TWIN_DMA_CFG 0x08
-#define TWIN_SERIAL_CFG 0x09
-#define TWIN_DMA_CLR_FF 0x0a
-#define TWIN_SPARE_2 0x0b
-
-
-/* PackeTwin I/O register values */
-
-/* INT_REG */
-#define TWIN_SCC_MSK 0x01
-#define TWIN_TMR1_MSK 0x02
-#define TWIN_TMR2_MSK 0x04
-#define TWIN_INT_MSK 0x07
-
-/* SERIAL_CFG */
-#define TWIN_DTRA_ON 0x01
-#define TWIN_DTRB_ON 0x02
-#define TWIN_EXTCLKA 0x04
-#define TWIN_EXTCLKB 0x08
-#define TWIN_LOOPA_ON 0x10
-#define TWIN_LOOPB_ON 0x20
-#define TWIN_EI 0x80
-
-/* DMA_CFG */
-#define TWIN_DMA_HDX_T1 0x08
-#define TWIN_DMA_HDX_R1 0x0a
-#define TWIN_DMA_HDX_T3 0x14
-#define TWIN_DMA_HDX_R3 0x16
-#define TWIN_DMA_FDX_T3R1 0x1b
-#define TWIN_DMA_FDX_T1R3 0x1d
-
-
-/* Status values */
-
-#define IDLE 0
-#define TX_HEAD 1
-#define TX_DATA 2
-#define TX_PAUSE 3
-#define TX_TAIL 4
-#define RTS_OFF 5
-#define WAIT 6
-#define DCD_ON 7
-#define RX_ON 8
-#define DCD_OFF 9
-
-
-/* Ioctls */
-
-#define SIOCGSCCPARAM SIOCDEVPRIVATE
-#define SIOCSSCCPARAM (SIOCDEVPRIVATE+1)
-
-
-/* Data types */
-
-struct scc_param {
- int pclk_hz; /* frequency of BRG input (don't change) */
- int brg_tc; /* BRG terminal count; BRG disabled if < 0 */
- int nrzi; /* 0 (nrz), 1 (nrzi) */
- int clocks; /* see dmascc_cfg documentation */
- int txdelay; /* [1/TMR_0_HZ] */
- int txtimeout; /* [1/HZ] */
- int txtail; /* [1/TMR_0_HZ] */
- int waittime; /* [1/TMR_0_HZ] */
- int slottime; /* [1/TMR_0_HZ] */
- int persist; /* 1 ... 256 */
- int dma; /* -1 (disable), 0, 1, 3 */
- int txpause; /* [1/TMR_0_HZ] */
- int rtsoff; /* [1/TMR_0_HZ] */
- int dcdon; /* [1/TMR_0_HZ] */
- int dcdoff; /* [1/TMR_0_HZ] */
-};
-
-struct scc_hardware {
- char *name;
- int io_region;
- int io_delta;
- int io_size;
- int num_devs;
- int scc_offset;
- int tmr_offset;
- int tmr_hz;
- int pclk_hz;
-};
-
-struct scc_priv {
- int type;
- int chip;
- struct net_device *dev;
- struct scc_info *info;
-
- int channel;
- int card_base, scc_cmd, scc_data;
- int tmr_cnt, tmr_ctrl, tmr_mode;
- struct scc_param param;
- char rx_buf[NUM_RX_BUF][BUF_SIZE];
- int rx_len[NUM_RX_BUF];
- int rx_ptr;
- struct work_struct rx_work;
- int rx_head, rx_tail, rx_count;
- int rx_over;
- char tx_buf[NUM_TX_BUF][BUF_SIZE];
- int tx_len[NUM_TX_BUF];
- int tx_ptr;
- int tx_head, tx_tail, tx_count;
- int state;
- unsigned long tx_start;
- int rr0;
- spinlock_t *register_lock; /* Per scc_info */
- spinlock_t ring_lock;
-};
-
-struct scc_info {
- int irq_used;
- int twin_serial_cfg;
- struct net_device *dev[2];
- struct scc_priv priv[2];
- struct scc_info *next;
- spinlock_t register_lock; /* Per device register lock */
-};
-
-
-/* Function declarations */
-static int setup_adapter(int card_base, int type, int n) __init;
-
-static void write_scc(struct scc_priv *priv, int reg, int val);
-static void write_scc_data(struct scc_priv *priv, int val, int fast);
-static int read_scc(struct scc_priv *priv, int reg);
-static int read_scc_data(struct scc_priv *priv);
-
-static int scc_open(struct net_device *dev);
-static int scc_close(struct net_device *dev);
-static int scc_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
- void __user *data, int cmd);
-static int scc_send_packet(struct sk_buff *skb, struct net_device *dev);
-static int scc_set_mac_address(struct net_device *dev, void *sa);
-
-static inline void tx_on(struct scc_priv *priv);
-static inline void rx_on(struct scc_priv *priv);
-static inline void rx_off(struct scc_priv *priv);
-static void start_timer(struct scc_priv *priv, int t, int r15);
-static inline unsigned char random(void);
-
-static inline void z8530_isr(struct scc_info *info);
-static irqreturn_t scc_isr(int irq, void *dev_id);
-static void rx_isr(struct scc_priv *priv);
-static void special_condition(struct scc_priv *priv, int rc);
-static void rx_bh(struct work_struct *);
-static void tx_isr(struct scc_priv *priv);
-static void es_isr(struct scc_priv *priv);
-static void tm_isr(struct scc_priv *priv);
-
-
-/* Initialization variables */
-
-static int io[MAX_NUM_DEVS] __initdata = { 0, };
-
-/* Beware! hw[] is also used in dmascc_exit(). */
-static struct scc_hardware hw[NUM_TYPES] = HARDWARE;
-
-
-/* Global variables */
-
-static struct scc_info *first;
-static unsigned long rand;
-
-
-MODULE_AUTHOR("Klaus Kudielka");
-MODULE_DESCRIPTION("Driver for high-speed SCC boards");
-module_param_hw_array(io, int, ioport, NULL, 0);
-MODULE_LICENSE("GPL");
-
-static void __exit dmascc_exit(void)
-{
- int i;
- struct scc_info *info;
-
- while (first) {
- info = first;
-
- /* Unregister devices */
- for (i = 0; i < 2; i++)
- unregister_netdev(info->dev[i]);
-
- /* Reset board */
- if (info->priv[0].type == TYPE_TWIN)
- outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG);
- write_scc(&info->priv[0], R9, FHWRES);
- release_region(info->dev[0]->base_addr,
- hw[info->priv[0].type].io_size);
-
- for (i = 0; i < 2; i++)
- free_netdev(info->dev[i]);
-
- /* Free memory */
- first = info->next;
- kfree(info);
- }
-}
-
-static int __init dmascc_init(void)
-{
- int h, i, j, n;
- int base[MAX_NUM_DEVS], tcmd[MAX_NUM_DEVS], t0[MAX_NUM_DEVS],
- t1[MAX_NUM_DEVS];
- unsigned t_val;
- unsigned long time, start[MAX_NUM_DEVS], delay[MAX_NUM_DEVS],
- counting[MAX_NUM_DEVS];
-
- /* Initialize random number generator */
- rand = jiffies;
- /* Cards found = 0 */
- n = 0;
- /* Warning message */
- if (!io[0])
- printk(KERN_INFO "dmascc: autoprobing (dangerous)\n");
-
- /* Run autodetection for each card type */
- for (h = 0; h < NUM_TYPES; h++) {
-
- if (io[0]) {
- /* User-specified I/O address regions */
- for (i = 0; i < hw[h].num_devs; i++)
- base[i] = 0;
- for (i = 0; i < MAX_NUM_DEVS && io[i]; i++) {
- j = (io[i] -
- hw[h].io_region) / hw[h].io_delta;
- if (j >= 0 && j < hw[h].num_devs &&
- hw[h].io_region +
- j * hw[h].io_delta == io[i]) {
- base[j] = io[i];
- }
- }
- } else {
- /* Default I/O address regions */
- for (i = 0; i < hw[h].num_devs; i++) {
- base[i] =
- hw[h].io_region + i * hw[h].io_delta;
- }
- }
-
- /* Check valid I/O address regions */
- for (i = 0; i < hw[h].num_devs; i++)
- if (base[i]) {
- if (!request_region
- (base[i], hw[h].io_size, "dmascc"))
- base[i] = 0;
- else {
- tcmd[i] =
- base[i] + hw[h].tmr_offset +
- TMR_CTRL;
- t0[i] =
- base[i] + hw[h].tmr_offset +
- TMR_CNT0;
- t1[i] =
- base[i] + hw[h].tmr_offset +
- TMR_CNT1;
- }
- }
-
- /* Start timers */
- for (i = 0; i < hw[h].num_devs; i++)
- if (base[i]) {
- /* Timer 0: LSB+MSB, Mode 3, TMR_0_HZ */
- outb(0x36, tcmd[i]);
- outb((hw[h].tmr_hz / TMR_0_HZ) & 0xFF,
- t0[i]);
- outb((hw[h].tmr_hz / TMR_0_HZ) >> 8,
- t0[i]);
- /* Timer 1: LSB+MSB, Mode 0, HZ/10 */
- outb(0x70, tcmd[i]);
- outb((TMR_0_HZ / HZ * 10) & 0xFF, t1[i]);
- outb((TMR_0_HZ / HZ * 10) >> 8, t1[i]);
- start[i] = jiffies;
- delay[i] = 0;
- counting[i] = 1;
- /* Timer 2: LSB+MSB, Mode 0 */
- outb(0xb0, tcmd[i]);
- }
- time = jiffies;
- /* Wait until counter registers are loaded */
- udelay(2000000 / TMR_0_HZ);
-
- /* Timing loop */
- while (time_is_after_jiffies(time + 13)) {
- for (i = 0; i < hw[h].num_devs; i++)
- if (base[i] && counting[i]) {
- /* Read back Timer 1: latch; read LSB; read MSB */
- outb(0x40, tcmd[i]);
- t_val =
- inb(t1[i]) + (inb(t1[i]) << 8);
- /* Also check whether counter did wrap */
- if (t_val == 0 ||
- t_val > TMR_0_HZ / HZ * 10)
- counting[i] = 0;
- delay[i] = jiffies - start[i];
- }
- }
-
- /* Evaluate measurements */
- for (i = 0; i < hw[h].num_devs; i++)
- if (base[i]) {
- if ((delay[i] >= 9 && delay[i] <= 11) &&
- /* Ok, we have found an adapter */
- (setup_adapter(base[i], h, n) == 0))
- n++;
- else
- release_region(base[i],
- hw[h].io_size);
- }
-
- } /* NUM_TYPES */
-
- /* If any adapter was successfully initialized, return ok */
- if (n)
- return 0;
-
- /* If no adapter found, return error */
- printk(KERN_INFO "dmascc: no adapters found\n");
- return -EIO;
-}
-
-module_init(dmascc_init);
-module_exit(dmascc_exit);
-
-static void __init dev_setup(struct net_device *dev)
-{
- dev->type = ARPHRD_AX25;
- dev->hard_header_len = AX25_MAX_HEADER_LEN;
- dev->mtu = 1500;
- dev->addr_len = AX25_ADDR_LEN;
- dev->tx_queue_len = 64;
- memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
- dev_addr_set(dev, (u8 *)&ax25_defaddr);
-}
-
-static const struct net_device_ops scc_netdev_ops = {
- .ndo_open = scc_open,
- .ndo_stop = scc_close,
- .ndo_start_xmit = scc_send_packet,
- .ndo_siocdevprivate = scc_siocdevprivate,
- .ndo_set_mac_address = scc_set_mac_address,
-};
-
-static int __init setup_adapter(int card_base, int type, int n)
-{
- int i, irq, chip, err;
- struct scc_info *info;
- struct net_device *dev;
- struct scc_priv *priv;
- unsigned long time;
- unsigned int irqs;
- int tmr_base = card_base + hw[type].tmr_offset;
- int scc_base = card_base + hw[type].scc_offset;
- char *chipnames[] = CHIPNAMES;
-
- /* Initialize what is necessary for write_scc and write_scc_data */
- info = kzalloc(sizeof(struct scc_info), GFP_KERNEL | GFP_DMA);
- if (!info) {
- err = -ENOMEM;
- goto out;
- }
-
- info->dev[0] = alloc_netdev(0, "", NET_NAME_UNKNOWN, dev_setup);
- if (!info->dev[0]) {
- printk(KERN_ERR "dmascc: "
- "could not allocate memory for %s at %#3x\n",
- hw[type].name, card_base);
- err = -ENOMEM;
- goto out1;
- }
-
- info->dev[1] = alloc_netdev(0, "", NET_NAME_UNKNOWN, dev_setup);
- if (!info->dev[1]) {
- printk(KERN_ERR "dmascc: "
- "could not allocate memory for %s at %#3x\n",
- hw[type].name, card_base);
- err = -ENOMEM;
- goto out2;
- }
- spin_lock_init(&info->register_lock);
-
- priv = &info->priv[0];
- priv->type = type;
- priv->card_base = card_base;
- priv->scc_cmd = scc_base + SCCA_CMD;
- priv->scc_data = scc_base + SCCA_DATA;
- priv->register_lock = &info->register_lock;
-
- /* Reset SCC */
- write_scc(priv, R9, FHWRES | MIE | NV);
-
- /* Determine type of chip by enabling SDLC/HDLC enhancements */
- write_scc(priv, R15, SHDLCE);
- if (!read_scc(priv, R15)) {
- /* WR7' not present. This is an ordinary Z8530 SCC. */
- chip = Z8530;
- } else {
- /* Put one character in TX FIFO */
- write_scc_data(priv, 0, 0);
- if (read_scc(priv, R0) & Tx_BUF_EMP) {
- /* TX FIFO not full. This is a Z85230 ESCC with a 4-byte FIFO. */
- chip = Z85230;
- } else {
- /* TX FIFO full. This is a Z85C30 SCC with a 1-byte FIFO. */
- chip = Z85C30;
- }
- }
- write_scc(priv, R15, 0);
-
- /* Start IRQ auto-detection */
- irqs = probe_irq_on();
-
- /* Enable interrupts */
- if (type == TYPE_TWIN) {
- outb(0, card_base + TWIN_DMA_CFG);
- inb(card_base + TWIN_CLR_TMR1);
- inb(card_base + TWIN_CLR_TMR2);
- info->twin_serial_cfg = TWIN_EI;
- outb(info->twin_serial_cfg, card_base + TWIN_SERIAL_CFG);
- } else {
- write_scc(priv, R15, CTSIE);
- write_scc(priv, R0, RES_EXT_INT);
- write_scc(priv, R1, EXT_INT_ENAB);
- }
-
- /* Start timer */
- outb(1, tmr_base + TMR_CNT1);
- outb(0, tmr_base + TMR_CNT1);
-
- /* Wait and detect IRQ */
- time = jiffies;
- while (time_is_after_jiffies(time + 2 + HZ / TMR_0_HZ));
- irq = probe_irq_off(irqs);
-
- /* Clear pending interrupt, disable interrupts */
- if (type == TYPE_TWIN) {
- inb(card_base + TWIN_CLR_TMR1);
- } else {
- write_scc(priv, R1, 0);
- write_scc(priv, R15, 0);
- write_scc(priv, R0, RES_EXT_INT);
- }
-
- if (irq <= 0) {
- printk(KERN_ERR
- "dmascc: could not find irq of %s at %#3x (irq=%d)\n",
- hw[type].name, card_base, irq);
- err = -ENODEV;
- goto out3;
- }
-
- /* Set up data structures */
- for (i = 0; i < 2; i++) {
- dev = info->dev[i];
- priv = &info->priv[i];
- priv->type = type;
- priv->chip = chip;
- priv->dev = dev;
- priv->info = info;
- priv->channel = i;
- spin_lock_init(&priv->ring_lock);
- priv->register_lock = &info->register_lock;
- priv->card_base = card_base;
- priv->scc_cmd = scc_base + (i ? SCCB_CMD : SCCA_CMD);
- priv->scc_data = scc_base + (i ? SCCB_DATA : SCCA_DATA);
- priv->tmr_cnt = tmr_base + (i ? TMR_CNT2 : TMR_CNT1);
- priv->tmr_ctrl = tmr_base + TMR_CTRL;
- priv->tmr_mode = i ? 0xb0 : 0x70;
- priv->param.pclk_hz = hw[type].pclk_hz;
- priv->param.brg_tc = -1;
- priv->param.clocks = TCTRxCP | RCRTxCP;
- priv->param.persist = 256;
- priv->param.dma = -1;
- INIT_WORK(&priv->rx_work, rx_bh);
- dev->ml_priv = priv;
- snprintf(dev->name, sizeof(dev->name), "dmascc%i", 2 * n + i);
- dev->base_addr = card_base;
- dev->irq = irq;
- dev->netdev_ops = &scc_netdev_ops;
- dev->header_ops = &ax25_header_ops;
- }
- if (register_netdev(info->dev[0])) {
- printk(KERN_ERR "dmascc: could not register %s\n",
- info->dev[0]->name);
- err = -ENODEV;
- goto out3;
- }
- if (register_netdev(info->dev[1])) {
- printk(KERN_ERR "dmascc: could not register %s\n",
- info->dev[1]->name);
- err = -ENODEV;
- goto out4;
- }
-
-
- info->next = first;
- first = info;
- printk(KERN_INFO "dmascc: found %s (%s) at %#3x, irq %d\n",
- hw[type].name, chipnames[chip], card_base, irq);
- return 0;
-
- out4:
- unregister_netdev(info->dev[0]);
- out3:
- if (info->priv[0].type == TYPE_TWIN)
- outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG);
- write_scc(&info->priv[0], R9, FHWRES);
- free_netdev(info->dev[1]);
- out2:
- free_netdev(info->dev[0]);
- out1:
- kfree(info);
- out:
- return err;
-}
-
-
-/* Driver functions */
-
-static void write_scc(struct scc_priv *priv, int reg, int val)
-{
- unsigned long flags;
- switch (priv->type) {
- case TYPE_S5:
- if (reg)
- outb(reg, priv->scc_cmd);
- outb(val, priv->scc_cmd);
- return;
- case TYPE_TWIN:
- if (reg)
- outb_p(reg, priv->scc_cmd);
- outb_p(val, priv->scc_cmd);
- return;
- default:
- spin_lock_irqsave(priv->register_lock, flags);
- outb_p(0, priv->card_base + PI_DREQ_MASK);
- if (reg)
- outb_p(reg, priv->scc_cmd);
- outb_p(val, priv->scc_cmd);
- outb(1, priv->card_base + PI_DREQ_MASK);
- spin_unlock_irqrestore(priv->register_lock, flags);
- return;
- }
-}
-
-
-static void write_scc_data(struct scc_priv *priv, int val, int fast)
-{
- unsigned long flags;
- switch (priv->type) {
- case TYPE_S5:
- outb(val, priv->scc_data);
- return;
- case TYPE_TWIN:
- outb_p(val, priv->scc_data);
- return;
- default:
- if (fast)
- outb_p(val, priv->scc_data);
- else {
- spin_lock_irqsave(priv->register_lock, flags);
- outb_p(0, priv->card_base + PI_DREQ_MASK);
- outb_p(val, priv->scc_data);
- outb(1, priv->card_base + PI_DREQ_MASK);
- spin_unlock_irqrestore(priv->register_lock, flags);
- }
- return;
- }
-}
-
-
-static int read_scc(struct scc_priv *priv, int reg)
-{
- int rc;
- unsigned long flags;
- switch (priv->type) {
- case TYPE_S5:
- if (reg)
- outb(reg, priv->scc_cmd);
- return inb(priv->scc_cmd);
- case TYPE_TWIN:
- if (reg)
- outb_p(reg, priv->scc_cmd);
- return inb_p(priv->scc_cmd);
- default:
- spin_lock_irqsave(priv->register_lock, flags);
- outb_p(0, priv->card_base + PI_DREQ_MASK);
- if (reg)
- outb_p(reg, priv->scc_cmd);
- rc = inb_p(priv->scc_cmd);
- outb(1, priv->card_base + PI_DREQ_MASK);
- spin_unlock_irqrestore(priv->register_lock, flags);
- return rc;
- }
-}
-
-
-static int read_scc_data(struct scc_priv *priv)
-{
- int rc;
- unsigned long flags;
- switch (priv->type) {
- case TYPE_S5:
- return inb(priv->scc_data);
- case TYPE_TWIN:
- return inb_p(priv->scc_data);
- default:
- spin_lock_irqsave(priv->register_lock, flags);
- outb_p(0, priv->card_base + PI_DREQ_MASK);
- rc = inb_p(priv->scc_data);
- outb(1, priv->card_base + PI_DREQ_MASK);
- spin_unlock_irqrestore(priv->register_lock, flags);
- return rc;
- }
-}
-
-
-static int scc_open(struct net_device *dev)
-{
- struct scc_priv *priv = dev->ml_priv;
- struct scc_info *info = priv->info;
- int card_base = priv->card_base;
-
- /* Request IRQ if not already used by other channel */
- if (!info->irq_used) {
- if (request_irq(dev->irq, scc_isr, 0, "dmascc", info)) {
- return -EAGAIN;
- }
- }
- info->irq_used++;
-
- /* Request DMA if required */
- if (priv->param.dma >= 0) {
- if (request_dma(priv->param.dma, "dmascc")) {
- if (--info->irq_used == 0)
- free_irq(dev->irq, info);
- return -EAGAIN;
- } else {
- unsigned long flags = claim_dma_lock();
- clear_dma_ff(priv->param.dma);
- release_dma_lock(flags);
- }
- }
-
- /* Initialize local variables */
- priv->rx_ptr = 0;
- priv->rx_over = 0;
- priv->rx_head = priv->rx_tail = priv->rx_count = 0;
- priv->state = IDLE;
- priv->tx_head = priv->tx_tail = priv->tx_count = 0;
- priv->tx_ptr = 0;
-
- /* Reset channel */
- write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
- /* X1 clock, SDLC mode */
- write_scc(priv, R4, SDLC | X1CLK);
- /* DMA */
- write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
- /* 8 bit RX char, RX disable */
- write_scc(priv, R3, Rx8);
- /* 8 bit TX char, TX disable */
- write_scc(priv, R5, Tx8);
- /* SDLC address field */
- write_scc(priv, R6, 0);
- /* SDLC flag */
- write_scc(priv, R7, FLAG);
- switch (priv->chip) {
- case Z85C30:
- /* Select WR7' */
- write_scc(priv, R15, SHDLCE);
- /* Auto EOM reset */
- write_scc(priv, R7, AUTOEOM);
- write_scc(priv, R15, 0);
- break;
- case Z85230:
- /* Select WR7' */
- write_scc(priv, R15, SHDLCE);
- /* The following bits are set (see 2.5.2.1):
- - Automatic EOM reset
- - Interrupt request if RX FIFO is half full
- This bit should be ignored in DMA mode (according to the
- documentation), but actually isn't. The receiver doesn't work if
- it is set. Thus, we have to clear it in DMA mode.
- - Interrupt/DMA request if TX FIFO is completely empty
- a) If set, the ESCC behaves as if it had no TX FIFO (Z85C30
- compatibility).
- b) If cleared, DMA requests may follow each other very quickly,
- filling up the TX FIFO.
- Advantage: TX works even in case of high bus latency.
- Disadvantage: Edge-triggered DMA request circuitry may miss
- a request. No more data is delivered, resulting
- in a TX FIFO underrun.
- Both PI2 and S5SCC/DMA seem to work fine with TXFIFOE cleared.
- The PackeTwin doesn't. I don't know about the PI, but let's
- assume it behaves like the PI2.
- */
- if (priv->param.dma >= 0) {
- if (priv->type == TYPE_TWIN)
- write_scc(priv, R7, AUTOEOM | TXFIFOE);
- else
- write_scc(priv, R7, AUTOEOM);
- } else {
- write_scc(priv, R7, AUTOEOM | RXFIFOH);
- }
- write_scc(priv, R15, 0);
- break;
- }
- /* Preset CRC, NRZ(I) encoding */
- write_scc(priv, R10, CRCPS | (priv->param.nrzi ? NRZI : NRZ));
-
- /* Configure baud rate generator */
- if (priv->param.brg_tc >= 0) {
- /* Program BR generator */
- write_scc(priv, R12, priv->param.brg_tc & 0xFF);
- write_scc(priv, R13, (priv->param.brg_tc >> 8) & 0xFF);
- /* BRG source = SYS CLK; enable BRG; DTR REQ function (required by
- PackeTwin, not connected on the PI2); set DPLL source to BRG */
- write_scc(priv, R14, SSBR | DTRREQ | BRSRC | BRENABL);
- /* Enable DPLL */
- write_scc(priv, R14, SEARCH | DTRREQ | BRSRC | BRENABL);
- } else {
- /* Disable BR generator */
- write_scc(priv, R14, DTRREQ | BRSRC);
- }
-
- /* Configure clocks */
- if (priv->type == TYPE_TWIN) {
- /* Disable external TX clock receiver */
- outb((info->twin_serial_cfg &=
- ~(priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
- card_base + TWIN_SERIAL_CFG);
- }
- write_scc(priv, R11, priv->param.clocks);
- if ((priv->type == TYPE_TWIN) && !(priv->param.clocks & TRxCOI)) {
- /* Enable external TX clock receiver */
- outb((info->twin_serial_cfg |=
- (priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
- card_base + TWIN_SERIAL_CFG);
- }
-
- /* Configure PackeTwin */
- if (priv->type == TYPE_TWIN) {
- /* Assert DTR, enable interrupts */
- outb((info->twin_serial_cfg |= TWIN_EI |
- (priv->channel ? TWIN_DTRB_ON : TWIN_DTRA_ON)),
- card_base + TWIN_SERIAL_CFG);
- }
-
- /* Read current status */
- priv->rr0 = read_scc(priv, R0);
- /* Enable DCD interrupt */
- write_scc(priv, R15, DCDIE);
-
- netif_start_queue(dev);
-
- return 0;
-}
-
-
-static int scc_close(struct net_device *dev)
-{
- struct scc_priv *priv = dev->ml_priv;
- struct scc_info *info = priv->info;
- int card_base = priv->card_base;
-
- netif_stop_queue(dev);
-
- if (priv->type == TYPE_TWIN) {
- /* Drop DTR */
- outb((info->twin_serial_cfg &=
- (priv->channel ? ~TWIN_DTRB_ON : ~TWIN_DTRA_ON)),
- card_base + TWIN_SERIAL_CFG);
- }
-
- /* Reset channel, free DMA and IRQ */
- write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
- if (priv->param.dma >= 0) {
- if (priv->type == TYPE_TWIN)
- outb(0, card_base + TWIN_DMA_CFG);
- free_dma(priv->param.dma);
- }
- if (--info->irq_used == 0)
- free_irq(dev->irq, info);
-
- return 0;
-}
-
-
-static int scc_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __user *data, int cmd)
-{
- struct scc_priv *priv = dev->ml_priv;
-
- switch (cmd) {
- case SIOCGSCCPARAM:
- if (copy_to_user(data, &priv->param, sizeof(struct scc_param)))
- return -EFAULT;
- return 0;
- case SIOCSSCCPARAM:
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- if (netif_running(dev))
- return -EAGAIN;
- if (copy_from_user(&priv->param, data,
- sizeof(struct scc_param)))
- return -EFAULT;
- return 0;
- default:
- return -EOPNOTSUPP;
- }
-}
-
-
-static int scc_send_packet(struct sk_buff *skb, struct net_device *dev)
-{
- struct scc_priv *priv = dev->ml_priv;
- unsigned long flags;
- int i;
-
- if (skb->protocol == htons(ETH_P_IP))
- return ax25_ip_xmit(skb);
-
- /* Temporarily stop the scheduler feeding us packets */
- netif_stop_queue(dev);
-
- /* Transfer data to DMA buffer */
- i = priv->tx_head;
- skb_copy_from_linear_data_offset(skb, 1, priv->tx_buf[i], skb->len - 1);
- priv->tx_len[i] = skb->len - 1;
-
- /* Clear interrupts while we touch our circular buffers */
-
- spin_lock_irqsave(&priv->ring_lock, flags);
- /* Move the ring buffer's head */
- priv->tx_head = (i + 1) % NUM_TX_BUF;
- priv->tx_count++;
-
- /* If we just filled up the last buffer, leave queue stopped.
- The higher layers must wait until we have a DMA buffer
- to accept the data. */
- if (priv->tx_count < NUM_TX_BUF)
- netif_wake_queue(dev);
-
- /* Set new TX state */
- if (priv->state == IDLE) {
- /* Assert RTS, start timer */
- priv->state = TX_HEAD;
- priv->tx_start = jiffies;
- write_scc(priv, R5, TxCRC_ENAB | RTS | TxENAB | Tx8);
- write_scc(priv, R15, 0);
- start_timer(priv, priv->param.txdelay, 0);
- }
-
- /* Turn interrupts back on and free buffer */
- spin_unlock_irqrestore(&priv->ring_lock, flags);
- dev_kfree_skb(skb);
-
- return NETDEV_TX_OK;
-}
-
-
-static int scc_set_mac_address(struct net_device *dev, void *sa)
-{
- dev_addr_set(dev, ((struct sockaddr *)sa)->sa_data);
- return 0;
-}
-
-
-static inline void tx_on(struct scc_priv *priv)
-{
- int i, n;
- unsigned long flags;
-
- if (priv->param.dma >= 0) {
- n = (priv->chip == Z85230) ? 3 : 1;
- /* Program DMA controller */
- flags = claim_dma_lock();
- set_dma_mode(priv->param.dma, DMA_MODE_WRITE);
- set_dma_addr(priv->param.dma,
- virt_to_bus(priv->tx_buf[priv->tx_tail]) + n);
- set_dma_count(priv->param.dma,
- priv->tx_len[priv->tx_tail] - n);
- release_dma_lock(flags);
- /* Enable TX underrun interrupt */
- write_scc(priv, R15, TxUIE);
- /* Configure DREQ */
- if (priv->type == TYPE_TWIN)
- outb((priv->param.dma ==
- 1) ? TWIN_DMA_HDX_T1 : TWIN_DMA_HDX_T3,
- priv->card_base + TWIN_DMA_CFG);
- else
- write_scc(priv, R1,
- EXT_INT_ENAB | WT_FN_RDYFN |
- WT_RDY_ENAB);
- /* Write first byte(s) */
- spin_lock_irqsave(priv->register_lock, flags);
- for (i = 0; i < n; i++)
- write_scc_data(priv,
- priv->tx_buf[priv->tx_tail][i], 1);
- enable_dma(priv->param.dma);
- spin_unlock_irqrestore(priv->register_lock, flags);
- } else {
- write_scc(priv, R15, TxUIE);
- write_scc(priv, R1,
- EXT_INT_ENAB | WT_FN_RDYFN | TxINT_ENAB);
- tx_isr(priv);
- }
- /* Reset EOM latch if we do not have the AUTOEOM feature */
- if (priv->chip == Z8530)
- write_scc(priv, R0, RES_EOM_L);
-}
-
-
-static inline void rx_on(struct scc_priv *priv)
-{
- unsigned long flags;
-
- /* Clear RX FIFO */
- while (read_scc(priv, R0) & Rx_CH_AV)
- read_scc_data(priv);
- priv->rx_over = 0;
- if (priv->param.dma >= 0) {
- /* Program DMA controller */
- flags = claim_dma_lock();
- set_dma_mode(priv->param.dma, DMA_MODE_READ);
- set_dma_addr(priv->param.dma,
- virt_to_bus(priv->rx_buf[priv->rx_head]));
- set_dma_count(priv->param.dma, BUF_SIZE);
- release_dma_lock(flags);
- enable_dma(priv->param.dma);
- /* Configure PackeTwin DMA */
- if (priv->type == TYPE_TWIN) {
- outb((priv->param.dma ==
- 1) ? TWIN_DMA_HDX_R1 : TWIN_DMA_HDX_R3,
- priv->card_base + TWIN_DMA_CFG);
- }
- /* Sp. cond. intr. only, ext int enable, RX DMA enable */
- write_scc(priv, R1, EXT_INT_ENAB | INT_ERR_Rx |
- WT_RDY_RT | WT_FN_RDYFN | WT_RDY_ENAB);
- } else {
- /* Reset current frame */
- priv->rx_ptr = 0;
- /* Intr. on all Rx characters and Sp. cond., ext int enable */
- write_scc(priv, R1, EXT_INT_ENAB | INT_ALL_Rx | WT_RDY_RT |
- WT_FN_RDYFN);
- }
- write_scc(priv, R0, ERR_RES);
- write_scc(priv, R3, RxENABLE | Rx8 | RxCRC_ENAB);
-}
-
-
-static inline void rx_off(struct scc_priv *priv)
-{
- /* Disable receiver */
- write_scc(priv, R3, Rx8);
- /* Disable DREQ / RX interrupt */
- if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
- outb(0, priv->card_base + TWIN_DMA_CFG);
- else
- write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
- /* Disable DMA */
- if (priv->param.dma >= 0)
- disable_dma(priv->param.dma);
-}
-
-
-static void start_timer(struct scc_priv *priv, int t, int r15)
-{
- outb(priv->tmr_mode, priv->tmr_ctrl);
- if (t == 0) {
- tm_isr(priv);
- } else if (t > 0) {
- outb(t & 0xFF, priv->tmr_cnt);
- outb((t >> 8) & 0xFF, priv->tmr_cnt);
- if (priv->type != TYPE_TWIN) {
- write_scc(priv, R15, r15 | CTSIE);
- priv->rr0 |= CTS;
- }
- }
-}
-
-
-static inline unsigned char random(void)
-{
- /* See "Numerical Recipes in C", second edition, p. 284 */
- rand = rand * 1664525L + 1013904223L;
- return (unsigned char) (rand >> 24);
-}
-
-static inline void z8530_isr(struct scc_info *info)
-{
- int is, i = 100;
-
- while ((is = read_scc(&info->priv[0], R3)) && i--) {
- if (is & CHARxIP) {
- rx_isr(&info->priv[0]);
- } else if (is & CHATxIP) {
- tx_isr(&info->priv[0]);
- } else if (is & CHAEXT) {
- es_isr(&info->priv[0]);
- } else if (is & CHBRxIP) {
- rx_isr(&info->priv[1]);
- } else if (is & CHBTxIP) {
- tx_isr(&info->priv[1]);
- } else {
- es_isr(&info->priv[1]);
- }
- write_scc(&info->priv[0], R0, RES_H_IUS);
- i++;
- }
- if (i < 0) {
- printk(KERN_ERR "dmascc: stuck in ISR with RR3=0x%02x.\n",
- is);
- }
- /* Ok, no interrupts pending from this 8530. The INT line should
- be inactive now. */
-}
-
-
-static irqreturn_t scc_isr(int irq, void *dev_id)
-{
- struct scc_info *info = dev_id;
-
- spin_lock(info->priv[0].register_lock);
- /* At this point interrupts are enabled, and the interrupt under service
- is already acknowledged, but masked off.
-
- Interrupt processing: We loop until we know that the IRQ line is
- low. If another positive edge occurs afterwards during the ISR,
- another interrupt will be triggered by the interrupt controller
- as soon as the IRQ level is enabled again (see asm/irq.h).
-
- Bottom-half handlers will be processed after scc_isr(). This is
- important, since we only have small ringbuffers and want new data
- to be fetched/delivered immediately. */
-
- if (info->priv[0].type == TYPE_TWIN) {
- int is, card_base = info->priv[0].card_base;
- while ((is = ~inb(card_base + TWIN_INT_REG)) &
- TWIN_INT_MSK) {
- if (is & TWIN_SCC_MSK) {
- z8530_isr(info);
- } else if (is & TWIN_TMR1_MSK) {
- inb(card_base + TWIN_CLR_TMR1);
- tm_isr(&info->priv[0]);
- } else {
- inb(card_base + TWIN_CLR_TMR2);
- tm_isr(&info->priv[1]);
- }
- }
- } else
- z8530_isr(info);
- spin_unlock(info->priv[0].register_lock);
- return IRQ_HANDLED;
-}
-
-
-static void rx_isr(struct scc_priv *priv)
-{
- if (priv->param.dma >= 0) {
- /* Check special condition and perform error reset. See 2.4.7.5. */
- special_condition(priv, read_scc(priv, R1));
- write_scc(priv, R0, ERR_RES);
- } else {
- /* Check special condition for each character. Error reset not necessary.
- Same algorithm for SCC and ESCC. See 2.4.7.1 and 2.4.7.4. */
- int rc;
- while (read_scc(priv, R0) & Rx_CH_AV) {
- rc = read_scc(priv, R1);
- if (priv->rx_ptr < BUF_SIZE)
- priv->rx_buf[priv->rx_head][priv->
- rx_ptr++] =
- read_scc_data(priv);
- else {
- priv->rx_over = 2;
- read_scc_data(priv);
- }
- special_condition(priv, rc);
- }
- }
-}
-
-
-static void special_condition(struct scc_priv *priv, int rc)
-{
- int cb;
- unsigned long flags;
-
- /* See Figure 2-15. Only overrun and EOF need to be checked. */
-
- if (rc & Rx_OVR) {
- /* Receiver overrun */
- priv->rx_over = 1;
- if (priv->param.dma < 0)
- write_scc(priv, R0, ERR_RES);
- } else if (rc & END_FR) {
- /* End of frame. Get byte count */
- if (priv->param.dma >= 0) {
- flags = claim_dma_lock();
- cb = BUF_SIZE - get_dma_residue(priv->param.dma) -
- 2;
- release_dma_lock(flags);
- } else {
- cb = priv->rx_ptr - 2;
- }
- if (priv->rx_over) {
- /* We had an overrun */
- priv->dev->stats.rx_errors++;
- if (priv->rx_over == 2)
- priv->dev->stats.rx_length_errors++;
- else
- priv->dev->stats.rx_fifo_errors++;
- priv->rx_over = 0;
- } else if (rc & CRC_ERR) {
- /* Count invalid CRC only if packet length >= minimum */
- if (cb >= 15) {
- priv->dev->stats.rx_errors++;
- priv->dev->stats.rx_crc_errors++;
- }
- } else {
- if (cb >= 15) {
- if (priv->rx_count < NUM_RX_BUF - 1) {
- /* Put good frame in FIFO */
- priv->rx_len[priv->rx_head] = cb;
- priv->rx_head =
- (priv->rx_head +
- 1) % NUM_RX_BUF;
- priv->rx_count++;
- schedule_work(&priv->rx_work);
- } else {
- priv->dev->stats.rx_errors++;
- priv->dev->stats.rx_over_errors++;
- }
- }
- }
- /* Get ready for new frame */
- if (priv->param.dma >= 0) {
- flags = claim_dma_lock();
- set_dma_addr(priv->param.dma,
- virt_to_bus(priv->rx_buf[priv->rx_head]));
- set_dma_count(priv->param.dma, BUF_SIZE);
- release_dma_lock(flags);
- } else {
- priv->rx_ptr = 0;
- }
- }
-}
-
-
-static void rx_bh(struct work_struct *ugli_api)
-{
- struct scc_priv *priv = container_of(ugli_api, struct scc_priv, rx_work);
- int i = priv->rx_tail;
- int cb;
- unsigned long flags;
- struct sk_buff *skb;
- unsigned char *data;
-
- spin_lock_irqsave(&priv->ring_lock, flags);
- while (priv->rx_count) {
- spin_unlock_irqrestore(&priv->ring_lock, flags);
- cb = priv->rx_len[i];
- /* Allocate buffer */
- skb = dev_alloc_skb(cb + 1);
- if (skb == NULL) {
- /* Drop packet */
- priv->dev->stats.rx_dropped++;
- } else {
- /* Fill buffer */
- data = skb_put(skb, cb + 1);
- data[0] = 0;
- memcpy(&data[1], priv->rx_buf[i], cb);
- skb->protocol = ax25_type_trans(skb, priv->dev);
- netif_rx(skb);
- priv->dev->stats.rx_packets++;
- priv->dev->stats.rx_bytes += cb;
- }
- spin_lock_irqsave(&priv->ring_lock, flags);
- /* Move tail */
- priv->rx_tail = i = (i + 1) % NUM_RX_BUF;
- priv->rx_count--;
- }
- spin_unlock_irqrestore(&priv->ring_lock, flags);
-}
-
-
-static void tx_isr(struct scc_priv *priv)
-{
- int i = priv->tx_tail, p = priv->tx_ptr;
-
- /* Suspend TX interrupts if we don't want to send anything.
- See Figure 2-22. */
- if (p == priv->tx_len[i]) {
- write_scc(priv, R0, RES_Tx_P);
- return;
- }
-
- /* Write characters */
- while ((read_scc(priv, R0) & Tx_BUF_EMP) && p < priv->tx_len[i]) {
- write_scc_data(priv, priv->tx_buf[i][p++], 0);
- }
-
- /* Reset EOM latch of Z8530 */
- if (!priv->tx_ptr && p && priv->chip == Z8530)
- write_scc(priv, R0, RES_EOM_L);
-
- priv->tx_ptr = p;
-}
-
-
-static void es_isr(struct scc_priv *priv)
-{
- int i, rr0, drr0, res;
- unsigned long flags;
-
- /* Read status, reset interrupt bit (open latches) */
- rr0 = read_scc(priv, R0);
- write_scc(priv, R0, RES_EXT_INT);
- drr0 = priv->rr0 ^ rr0;
- priv->rr0 = rr0;
-
- /* Transmit underrun (2.4.9.6). We can't check the TxEOM flag, since
- it might have already been cleared again by AUTOEOM. */
- if (priv->state == TX_DATA) {
- /* Get remaining bytes */
- i = priv->tx_tail;
- if (priv->param.dma >= 0) {
- disable_dma(priv->param.dma);
- flags = claim_dma_lock();
- res = get_dma_residue(priv->param.dma);
- release_dma_lock(flags);
- } else {
- res = priv->tx_len[i] - priv->tx_ptr;
- priv->tx_ptr = 0;
- }
- /* Disable DREQ / TX interrupt */
- if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
- outb(0, priv->card_base + TWIN_DMA_CFG);
- else
- write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
- if (res) {
- /* Update packet statistics */
- priv->dev->stats.tx_errors++;
- priv->dev->stats.tx_fifo_errors++;
- /* Other underrun interrupts may already be waiting */
- write_scc(priv, R0, RES_EXT_INT);
- write_scc(priv, R0, RES_EXT_INT);
- } else {
- /* Update packet statistics */
- priv->dev->stats.tx_packets++;
- priv->dev->stats.tx_bytes += priv->tx_len[i];
- /* Remove frame from FIFO */
- priv->tx_tail = (i + 1) % NUM_TX_BUF;
- priv->tx_count--;
- /* Inform upper layers */
- netif_wake_queue(priv->dev);
- }
- /* Switch state */
- write_scc(priv, R15, 0);
- if (priv->tx_count &&
- time_is_after_jiffies(priv->tx_start + priv->param.txtimeout)) {
- priv->state = TX_PAUSE;
- start_timer(priv, priv->param.txpause, 0);
- } else {
- priv->state = TX_TAIL;
- start_timer(priv, priv->param.txtail, 0);
- }
- }
-
- /* DCD transition */
- if (drr0 & DCD) {
- if (rr0 & DCD) {
- switch (priv->state) {
- case IDLE:
- case WAIT:
- priv->state = DCD_ON;
- write_scc(priv, R15, 0);
- start_timer(priv, priv->param.dcdon, 0);
- }
- } else {
- switch (priv->state) {
- case RX_ON:
- rx_off(priv);
- priv->state = DCD_OFF;
- write_scc(priv, R15, 0);
- start_timer(priv, priv->param.dcdoff, 0);
- }
- }
- }
-
- /* CTS transition */
- if ((drr0 & CTS) && (~rr0 & CTS) && priv->type != TYPE_TWIN)
- tm_isr(priv);
-
-}
-
-
-static void tm_isr(struct scc_priv *priv)
-{
- switch (priv->state) {
- case TX_HEAD:
- case TX_PAUSE:
- tx_on(priv);
- priv->state = TX_DATA;
- break;
- case TX_TAIL:
- write_scc(priv, R5, TxCRC_ENAB | Tx8);
- priv->state = RTS_OFF;
- if (priv->type != TYPE_TWIN)
- write_scc(priv, R15, 0);
- start_timer(priv, priv->param.rtsoff, 0);
- break;
- case RTS_OFF:
- write_scc(priv, R15, DCDIE);
- priv->rr0 = read_scc(priv, R0);
- if (priv->rr0 & DCD) {
- priv->dev->stats.collisions++;
- rx_on(priv);
- priv->state = RX_ON;
- } else {
- priv->state = WAIT;
- start_timer(priv, priv->param.waittime, DCDIE);
- }
- break;
- case WAIT:
- if (priv->tx_count) {
- priv->state = TX_HEAD;
- priv->tx_start = jiffies;
- write_scc(priv, R5,
- TxCRC_ENAB | RTS | TxENAB | Tx8);
- write_scc(priv, R15, 0);
- start_timer(priv, priv->param.txdelay, 0);
- } else {
- priv->state = IDLE;
- if (priv->type != TYPE_TWIN)
- write_scc(priv, R15, DCDIE);
- }
- break;
- case DCD_ON:
- case DCD_OFF:
- write_scc(priv, R15, DCDIE);
- priv->rr0 = read_scc(priv, R0);
- if (priv->rr0 & DCD) {
- rx_on(priv);
- priv->state = RX_ON;
- } else {
- priv->state = WAIT;
- start_timer(priv,
- random() / priv->param.persist *
- priv->param.slottime, DCDIE);
- }
- break;
- }
-}
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index 16105292b140..74e845fa2e07 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -1355,7 +1355,9 @@ static int rr_close(struct net_device *dev)
rrpriv->fw_running = 0;
+ spin_unlock_irqrestore(&rrpriv->lock, flags);
del_timer_sync(&rrpriv->timer);
+ spin_lock_irqsave(&rrpriv->lock, flags);
writel(0, &regs->TxPi);
writel(0, &regs->IpRxPi);
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index cf69da0e296c..25b38a374e3c 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -15,6 +15,7 @@
#include <linux/list.h>
#include <linux/hyperv.h>
#include <linux/rndis.h>
+#include <linux/jhash.h>
/* RSS related */
#define OID_GEN_RECEIVE_SCALE_CAPABILITIES 0x00010203 /* query only */
@@ -237,6 +238,7 @@ int netvsc_recv_callback(struct net_device *net,
void netvsc_channel_cb(void *context);
int netvsc_poll(struct napi_struct *napi, int budget);
+void netvsc_xdp_xmit(struct sk_buff *skb, struct net_device *ndev);
u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan,
struct xdp_buff *xdp);
unsigned int netvsc_xdp_fraglen(unsigned int len);
@@ -246,6 +248,8 @@ int netvsc_xdp_set(struct net_device *dev, struct bpf_prog *prog,
struct netvsc_device *nvdev);
int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog);
int netvsc_bpf(struct net_device *dev, struct netdev_bpf *bpf);
+int netvsc_ndoxdp_xmit(struct net_device *ndev, int n,
+ struct xdp_frame **frames, u32 flags);
int rndis_set_subchannel(struct net_device *ndev,
struct netvsc_device *nvdev,
@@ -942,12 +946,21 @@ struct nvsc_rsc {
#define NVSC_RSC_CSUM_INFO BIT(1) /* valid/present bit for 'csum_info' */
#define NVSC_RSC_HASH_INFO BIT(2) /* valid/present bit for 'hash_info' */
-struct netvsc_stats {
+struct netvsc_stats_tx {
+ u64 packets;
+ u64 bytes;
+ u64 xdp_xmit;
+ struct u64_stats_sync syncp;
+};
+
+struct netvsc_stats_rx {
u64 packets;
u64 bytes;
u64 broadcast;
u64 multicast;
u64 xdp_drop;
+ u64 xdp_redirect;
+ u64 xdp_tx;
struct u64_stats_sync syncp;
};
@@ -1046,6 +1059,55 @@ struct net_device_context {
struct netvsc_device_info *saved_netvsc_dev_info;
};
+/* Azure hosts don't support non-TCP port numbers in hashing for fragmented
+ * packets. We can use ethtool to change UDP hash level when necessary.
+ */
+static inline u32 netvsc_get_hash(struct sk_buff *skb,
+ const struct net_device_context *ndc)
+{
+ struct flow_keys flow;
+ u32 hash, pkt_proto = 0;
+ static u32 hashrnd __read_mostly;
+
+ net_get_random_once(&hashrnd, sizeof(hashrnd));
+
+ if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
+ return 0;
+
+ switch (flow.basic.ip_proto) {
+ case IPPROTO_TCP:
+ if (flow.basic.n_proto == htons(ETH_P_IP))
+ pkt_proto = HV_TCP4_L4HASH;
+ else if (flow.basic.n_proto == htons(ETH_P_IPV6))
+ pkt_proto = HV_TCP6_L4HASH;
+
+ break;
+
+ case IPPROTO_UDP:
+ if (flow.basic.n_proto == htons(ETH_P_IP))
+ pkt_proto = HV_UDP4_L4HASH;
+ else if (flow.basic.n_proto == htons(ETH_P_IPV6))
+ pkt_proto = HV_UDP6_L4HASH;
+
+ break;
+ }
+
+ if (pkt_proto & ndc->l4_hash) {
+ return skb_get_hash(skb);
+ } else {
+ if (flow.basic.n_proto == htons(ETH_P_IP))
+ hash = jhash2((u32 *)&flow.addrs.v4addrs, 2, hashrnd);
+ else if (flow.basic.n_proto == htons(ETH_P_IPV6))
+ hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd);
+ else
+ return 0;
+
+ __skb_set_sw_hash(skb, hash, false);
+ }
+
+ return hash;
+}
+
/* Per channel data */
struct netvsc_channel {
struct vmbus_channel *channel;
@@ -1060,9 +1122,10 @@ struct netvsc_channel {
struct bpf_prog __rcu *bpf_prog;
struct xdp_rxq_info xdp_rxq;
+ bool xdp_flush;
- struct netvsc_stats tx_stats;
- struct netvsc_stats rx_stats;
+ struct netvsc_stats_tx tx_stats;
+ struct netvsc_stats_rx rx_stats;
};
/* Per netvsc device */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 4061af5baaea..6e42cb03e226 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -20,6 +20,7 @@
#include <linux/vmalloc.h>
#include <linux/rtnetlink.h>
#include <linux/prefetch.h>
+#include <linux/filter.h>
#include <asm/sync_bitops.h>
#include <asm/mshyperv.h>
@@ -805,7 +806,7 @@ static void netvsc_send_tx_complete(struct net_device *ndev,
struct hv_netvsc_packet *packet
= (struct hv_netvsc_packet *)skb->cb;
u32 send_index = packet->send_buf_index;
- struct netvsc_stats *tx_stats;
+ struct netvsc_stats_tx *tx_stats;
if (send_index != NETVSC_INVALID_INDEX)
netvsc_free_send_slot(net_device, send_index);
@@ -1670,12 +1671,17 @@ int netvsc_poll(struct napi_struct *napi, int budget)
if (!nvchan->desc)
nvchan->desc = hv_pkt_iter_first(channel);
+ nvchan->xdp_flush = false;
+
while (nvchan->desc && work_done < budget) {
work_done += netvsc_process_raw_pkt(device, nvchan, net_device,
ndev, nvchan->desc, budget);
nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc);
}
+ if (nvchan->xdp_flush)
+ xdp_do_flush();
+
/* Send any pending receive completions */
ret = send_recv_completions(ndev, net_device, nvchan);
diff --git a/drivers/net/hyperv/netvsc_bpf.c b/drivers/net/hyperv/netvsc_bpf.c
index 232c4a0efd7b..4a9522689fa4 100644
--- a/drivers/net/hyperv/netvsc_bpf.c
+++ b/drivers/net/hyperv/netvsc_bpf.c
@@ -10,6 +10,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
+#include <linux/netpoll.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <linux/kernel.h>
@@ -23,11 +24,13 @@
u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan,
struct xdp_buff *xdp)
{
+ struct netvsc_stats_rx *rx_stats = &nvchan->rx_stats;
void *data = nvchan->rsc.data[0];
u32 len = nvchan->rsc.len[0];
struct page *page = NULL;
struct bpf_prog *prog;
u32 act = XDP_PASS;
+ bool drop = true;
xdp->data_hard_start = NULL;
@@ -60,9 +63,34 @@ u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan,
switch (act) {
case XDP_PASS:
case XDP_TX:
+ drop = false;
+ break;
+
case XDP_DROP:
break;
+ case XDP_REDIRECT:
+ if (!xdp_do_redirect(ndev, xdp, prog)) {
+ nvchan->xdp_flush = true;
+ drop = false;
+
+ u64_stats_update_begin(&rx_stats->syncp);
+
+ rx_stats->xdp_redirect++;
+ rx_stats->packets++;
+ rx_stats->bytes += nvchan->rsc.pktlen;
+
+ u64_stats_update_end(&rx_stats->syncp);
+
+ break;
+ } else {
+ u64_stats_update_begin(&rx_stats->syncp);
+ rx_stats->xdp_drop++;
+ u64_stats_update_end(&rx_stats->syncp);
+ }
+
+ fallthrough;
+
case XDP_ABORTED:
trace_xdp_exception(ndev, prog, act);
break;
@@ -74,7 +102,7 @@ u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan,
out:
rcu_read_unlock();
- if (page && act != XDP_PASS && act != XDP_TX) {
+ if (page && drop) {
__free_page(page);
xdp->data_hard_start = NULL;
}
@@ -197,3 +225,68 @@ int netvsc_bpf(struct net_device *dev, struct netdev_bpf *bpf)
return -EINVAL;
}
}
+
+static int netvsc_ndoxdp_xmit_fm(struct net_device *ndev,
+ struct xdp_frame *frame, u16 q_idx)
+{
+ struct sk_buff *skb;
+
+ skb = xdp_build_skb_from_frame(frame, ndev);
+ if (unlikely(!skb))
+ return -ENOMEM;
+
+ netvsc_get_hash(skb, netdev_priv(ndev));
+
+ skb_record_rx_queue(skb, q_idx);
+
+ netvsc_xdp_xmit(skb, ndev);
+
+ return 0;
+}
+
+int netvsc_ndoxdp_xmit(struct net_device *ndev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct net_device_context *ndev_ctx = netdev_priv(ndev);
+ const struct net_device_ops *vf_ops;
+ struct netvsc_stats_tx *tx_stats;
+ struct netvsc_device *nvsc_dev;
+ struct net_device *vf_netdev;
+ int i, count = 0;
+ u16 q_idx;
+
+ /* Don't transmit if netvsc_device is gone */
+ nvsc_dev = rcu_dereference_bh(ndev_ctx->nvdev);
+ if (unlikely(!nvsc_dev || nvsc_dev->destroy))
+ return 0;
+
+ /* If VF is present and up then redirect packets to it.
+ * Skip the VF if it is marked down or has no carrier.
+ * If netpoll is in uses, then VF can not be used either.
+ */
+ vf_netdev = rcu_dereference_bh(ndev_ctx->vf_netdev);
+ if (vf_netdev && netif_running(vf_netdev) &&
+ netif_carrier_ok(vf_netdev) && !netpoll_tx_running(ndev) &&
+ vf_netdev->netdev_ops->ndo_xdp_xmit &&
+ ndev_ctx->data_path_is_vf) {
+ vf_ops = vf_netdev->netdev_ops;
+ return vf_ops->ndo_xdp_xmit(vf_netdev, n, frames, flags);
+ }
+
+ q_idx = smp_processor_id() % ndev->real_num_tx_queues;
+
+ for (i = 0; i < n; i++) {
+ if (netvsc_ndoxdp_xmit_fm(ndev, frames[i], q_idx))
+ break;
+
+ count++;
+ }
+
+ tx_stats = &nvsc_dev->chan_table[q_idx].tx_stats;
+
+ u64_stats_update_begin(&tx_stats->syncp);
+ tx_stats->xdp_xmit += count;
+ u64_stats_update_end(&tx_stats->syncp);
+
+ return count;
+}
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index fde1c492ca02..27f6bbca6619 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -242,56 +242,6 @@ static inline void *init_ppi_data(struct rndis_message *msg,
return ppi + 1;
}
-/* Azure hosts don't support non-TCP port numbers in hashing for fragmented
- * packets. We can use ethtool to change UDP hash level when necessary.
- */
-static inline u32 netvsc_get_hash(
- struct sk_buff *skb,
- const struct net_device_context *ndc)
-{
- struct flow_keys flow;
- u32 hash, pkt_proto = 0;
- static u32 hashrnd __read_mostly;
-
- net_get_random_once(&hashrnd, sizeof(hashrnd));
-
- if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
- return 0;
-
- switch (flow.basic.ip_proto) {
- case IPPROTO_TCP:
- if (flow.basic.n_proto == htons(ETH_P_IP))
- pkt_proto = HV_TCP4_L4HASH;
- else if (flow.basic.n_proto == htons(ETH_P_IPV6))
- pkt_proto = HV_TCP6_L4HASH;
-
- break;
-
- case IPPROTO_UDP:
- if (flow.basic.n_proto == htons(ETH_P_IP))
- pkt_proto = HV_UDP4_L4HASH;
- else if (flow.basic.n_proto == htons(ETH_P_IPV6))
- pkt_proto = HV_UDP6_L4HASH;
-
- break;
- }
-
- if (pkt_proto & ndc->l4_hash) {
- return skb_get_hash(skb);
- } else {
- if (flow.basic.n_proto == htons(ETH_P_IP))
- hash = jhash2((u32 *)&flow.addrs.v4addrs, 2, hashrnd);
- else if (flow.basic.n_proto == htons(ETH_P_IPV6))
- hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd);
- else
- return 0;
-
- __skb_set_sw_hash(skb, hash, false);
- }
-
- return hash;
-}
-
static inline int netvsc_get_tx_queue(struct net_device *ndev,
struct sk_buff *skb, int old_idx)
{
@@ -804,7 +754,7 @@ void netvsc_linkstatus_callback(struct net_device *net,
}
/* This function should only be called after skb_record_rx_queue() */
-static void netvsc_xdp_xmit(struct sk_buff *skb, struct net_device *ndev)
+void netvsc_xdp_xmit(struct sk_buff *skb, struct net_device *ndev)
{
int rc;
@@ -925,7 +875,7 @@ int netvsc_recv_callback(struct net_device *net,
struct vmbus_channel *channel = nvchan->channel;
u16 q_idx = channel->offermsg.offer.sub_channel_index;
struct sk_buff *skb;
- struct netvsc_stats *rx_stats = &nvchan->rx_stats;
+ struct netvsc_stats_rx *rx_stats = &nvchan->rx_stats;
struct xdp_buff xdp;
u32 act;
@@ -934,6 +884,9 @@ int netvsc_recv_callback(struct net_device *net,
act = netvsc_run_xdp(net, nvchan, &xdp);
+ if (act == XDP_REDIRECT)
+ return NVSP_STAT_SUCCESS;
+
if (act != XDP_PASS && act != XDP_TX) {
u64_stats_update_begin(&rx_stats->syncp);
rx_stats->xdp_drop++;
@@ -958,6 +911,9 @@ int netvsc_recv_callback(struct net_device *net,
* statistics will not work correctly.
*/
u64_stats_update_begin(&rx_stats->syncp);
+ if (act == XDP_TX)
+ rx_stats->xdp_tx++;
+
rx_stats->packets++;
rx_stats->bytes += nvchan->rsc.pktlen;
@@ -1353,28 +1309,29 @@ static void netvsc_get_pcpu_stats(struct net_device *net,
/* fetch percpu stats of netvsc */
for (i = 0; i < nvdev->num_chn; i++) {
const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
- const struct netvsc_stats *stats;
+ const struct netvsc_stats_tx *tx_stats;
+ const struct netvsc_stats_rx *rx_stats;
struct netvsc_ethtool_pcpu_stats *this_tot =
&pcpu_tot[nvchan->channel->target_cpu];
u64 packets, bytes;
unsigned int start;
- stats = &nvchan->tx_stats;
+ tx_stats = &nvchan->tx_stats;
do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
- packets = stats->packets;
- bytes = stats->bytes;
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
+ packets = tx_stats->packets;
+ bytes = tx_stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
this_tot->tx_bytes += bytes;
this_tot->tx_packets += packets;
- stats = &nvchan->rx_stats;
+ rx_stats = &nvchan->rx_stats;
do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
- packets = stats->packets;
- bytes = stats->bytes;
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
+ packets = rx_stats->packets;
+ bytes = rx_stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
this_tot->rx_bytes += bytes;
this_tot->rx_packets += packets;
@@ -1406,27 +1363,28 @@ static void netvsc_get_stats64(struct net_device *net,
for (i = 0; i < nvdev->num_chn; i++) {
const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
- const struct netvsc_stats *stats;
+ const struct netvsc_stats_tx *tx_stats;
+ const struct netvsc_stats_rx *rx_stats;
u64 packets, bytes, multicast;
unsigned int start;
- stats = &nvchan->tx_stats;
+ tx_stats = &nvchan->tx_stats;
do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
- packets = stats->packets;
- bytes = stats->bytes;
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
+ packets = tx_stats->packets;
+ bytes = tx_stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
t->tx_bytes += bytes;
t->tx_packets += packets;
- stats = &nvchan->rx_stats;
+ rx_stats = &nvchan->rx_stats;
do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
- packets = stats->packets;
- bytes = stats->bytes;
- multicast = stats->multicast + stats->broadcast;
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
+ packets = rx_stats->packets;
+ bytes = rx_stats->bytes;
+ multicast = rx_stats->multicast + rx_stats->broadcast;
+ } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
t->rx_bytes += bytes;
t->rx_packets += packets;
@@ -1515,8 +1473,8 @@ static const struct {
/* statistics per queue (rx/tx packets/bytes) */
#define NETVSC_PCPU_STATS_LEN (num_present_cpus() * ARRAY_SIZE(pcpu_stats))
-/* 5 statistics per queue (rx/tx packets/bytes, rx xdp_drop) */
-#define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 5)
+/* 8 statistics per queue (rx/tx packets/bytes, XDP actions) */
+#define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 8)
static int netvsc_get_sset_count(struct net_device *dev, int string_set)
{
@@ -1543,12 +1501,16 @@ static void netvsc_get_ethtool_stats(struct net_device *dev,
struct net_device_context *ndc = netdev_priv(dev);
struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
const void *nds = &ndc->eth_stats;
- const struct netvsc_stats *qstats;
+ const struct netvsc_stats_tx *tx_stats;
+ const struct netvsc_stats_rx *rx_stats;
struct netvsc_vf_pcpu_stats sum;
struct netvsc_ethtool_pcpu_stats *pcpu_sum;
unsigned int start;
u64 packets, bytes;
u64 xdp_drop;
+ u64 xdp_redirect;
+ u64 xdp_tx;
+ u64 xdp_xmit;
int i, j, cpu;
if (!nvdev)
@@ -1562,26 +1524,32 @@ static void netvsc_get_ethtool_stats(struct net_device *dev,
data[i++] = *(u64 *)((void *)&sum + vf_stats[j].offset);
for (j = 0; j < nvdev->num_chn; j++) {
- qstats = &nvdev->chan_table[j].tx_stats;
+ tx_stats = &nvdev->chan_table[j].tx_stats;
do {
- start = u64_stats_fetch_begin_irq(&qstats->syncp);
- packets = qstats->packets;
- bytes = qstats->bytes;
- } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
+ start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
+ packets = tx_stats->packets;
+ bytes = tx_stats->bytes;
+ xdp_xmit = tx_stats->xdp_xmit;
+ } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
data[i++] = packets;
data[i++] = bytes;
+ data[i++] = xdp_xmit;
- qstats = &nvdev->chan_table[j].rx_stats;
+ rx_stats = &nvdev->chan_table[j].rx_stats;
do {
- start = u64_stats_fetch_begin_irq(&qstats->syncp);
- packets = qstats->packets;
- bytes = qstats->bytes;
- xdp_drop = qstats->xdp_drop;
- } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
+ start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
+ packets = rx_stats->packets;
+ bytes = rx_stats->bytes;
+ xdp_drop = rx_stats->xdp_drop;
+ xdp_redirect = rx_stats->xdp_redirect;
+ xdp_tx = rx_stats->xdp_tx;
+ } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
data[i++] = packets;
data[i++] = bytes;
data[i++] = xdp_drop;
+ data[i++] = xdp_redirect;
+ data[i++] = xdp_tx;
}
pcpu_sum = kvmalloc_array(num_possible_cpus(),
@@ -1622,9 +1590,12 @@ static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
for (i = 0; i < nvdev->num_chn; i++) {
ethtool_sprintf(&p, "tx_queue_%u_packets", i);
ethtool_sprintf(&p, "tx_queue_%u_bytes", i);
+ ethtool_sprintf(&p, "tx_queue_%u_xdp_xmit", i);
ethtool_sprintf(&p, "rx_queue_%u_packets", i);
ethtool_sprintf(&p, "rx_queue_%u_bytes", i);
ethtool_sprintf(&p, "rx_queue_%u_xdp_drop", i);
+ ethtool_sprintf(&p, "rx_queue_%u_xdp_redirect", i);
+ ethtool_sprintf(&p, "rx_queue_%u_xdp_tx", i);
}
for_each_present_cpu(cpu) {
@@ -2057,6 +2028,7 @@ static const struct net_device_ops device_ops = {
.ndo_select_queue = netvsc_select_queue,
.ndo_get_stats64 = netvsc_get_stats64,
.ndo_bpf = netvsc_bpf,
+ .ndo_xdp_xmit = netvsc_ndoxdp_xmit,
};
/*
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index 888e94278a84..e133eb2bebcf 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -130,9 +130,10 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
*/
if (data->endpoint.config.aggregation) {
limit += SZ_1K * aggr_byte_limit_max(ipa->version);
- if (buffer_size > limit) {
+ if (buffer_size - NET_SKB_PAD > limit) {
dev_err(dev, "RX buffer size too large for aggregated RX endpoint %u (%u > %u)\n",
- data->endpoint_id, buffer_size, limit);
+ data->endpoint_id,
+ buffer_size - NET_SKB_PAD, limit);
return false;
}
@@ -739,6 +740,7 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
if (endpoint->data->aggregation) {
if (!endpoint->toward_ipa) {
const struct ipa_endpoint_rx_data *rx_data;
+ u32 buffer_size;
bool close_eof;
u32 limit;
@@ -746,7 +748,8 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK);
val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK);
- limit = ipa_aggr_size_kb(rx_data->buffer_size);
+ buffer_size = rx_data->buffer_size;
+ limit = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD);
val |= aggr_byte_limit_encoded(version, limit);
limit = IPA_AGGR_TIME_LIMIT;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 069e8824c264..b00bc8173abe 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -460,8 +460,10 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
return RX_HANDLER_CONSUMED;
*pskb = skb;
eth = eth_hdr(skb);
- if (macvlan_forward_source(skb, port, eth->h_source))
+ if (macvlan_forward_source(skb, port, eth->h_source)) {
+ kfree_skb(skb);
return RX_HANDLER_CONSUMED;
+ }
src = macvlan_hash_lookup(port, eth->h_source);
if (src && src->mode != MACVLAN_MODE_VEPA &&
src->mode != MACVLAN_MODE_BRIDGE) {
@@ -480,8 +482,10 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
return RX_HANDLER_PASS;
}
- if (macvlan_forward_source(skb, port, eth->h_source))
+ if (macvlan_forward_source(skb, port, eth->h_source)) {
+ kfree_skb(skb);
return RX_HANDLER_CONSUMED;
+ }
if (macvlan_passthru(port))
vlan = list_first_or_null_rcu(&port->vlans,
struct macvlan_dev, list);
diff --git a/drivers/net/mdio/fwnode_mdio.c b/drivers/net/mdio/fwnode_mdio.c
index 1becb1a731f6..1c1584fca632 100644
--- a/drivers/net/mdio/fwnode_mdio.c
+++ b/drivers/net/mdio/fwnode_mdio.c
@@ -43,6 +43,11 @@ int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio,
int rc;
rc = fwnode_irq_get(child, 0);
+ /* Don't wait forever if the IRQ provider doesn't become available,
+ * just fall back to poll mode
+ */
+ if (rc == -EPROBE_DEFER)
+ rc = driver_deferred_probe_check_state(&phy->mdio.dev);
if (rc == -EPROBE_DEFER)
return rc;
diff --git a/drivers/net/mdio/mdio-mscc-miim.c b/drivers/net/mdio/mdio-mscc-miim.c
index 85b4b097da05..08541007b18a 100644
--- a/drivers/net/mdio/mdio-mscc-miim.c
+++ b/drivers/net/mdio/mdio-mscc-miim.c
@@ -272,6 +272,7 @@ static int mscc_miim_probe(struct platform_device *pdev)
{
struct regmap *mii_regmap, *phy_regmap = NULL;
struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
void __iomem *regs, *phy_regs;
struct mscc_miim_dev *miim;
struct resource *res;
@@ -280,57 +281,55 @@ static int mscc_miim_probe(struct platform_device *pdev)
regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(regs)) {
- dev_err(&pdev->dev, "Unable to map MIIM registers\n");
+ dev_err(dev, "Unable to map MIIM registers\n");
return PTR_ERR(regs);
}
- mii_regmap = devm_regmap_init_mmio(&pdev->dev, regs,
- &mscc_miim_regmap_config);
+ mii_regmap = devm_regmap_init_mmio(dev, regs, &mscc_miim_regmap_config);
if (IS_ERR(mii_regmap)) {
- dev_err(&pdev->dev, "Unable to create MIIM regmap\n");
+ dev_err(dev, "Unable to create MIIM regmap\n");
return PTR_ERR(mii_regmap);
}
/* This resource is optional */
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (res) {
- phy_regs = devm_ioremap_resource(&pdev->dev, res);
+ phy_regs = devm_ioremap_resource(dev, res);
if (IS_ERR(phy_regs)) {
- dev_err(&pdev->dev, "Unable to map internal phy registers\n");
+ dev_err(dev, "Unable to map internal phy registers\n");
return PTR_ERR(phy_regs);
}
- phy_regmap = devm_regmap_init_mmio(&pdev->dev, phy_regs,
+ phy_regmap = devm_regmap_init_mmio(dev, phy_regs,
&mscc_miim_phy_regmap_config);
if (IS_ERR(phy_regmap)) {
- dev_err(&pdev->dev, "Unable to create phy register regmap\n");
+ dev_err(dev, "Unable to create phy register regmap\n");
return PTR_ERR(phy_regmap);
}
}
- ret = mscc_miim_setup(&pdev->dev, &bus, "mscc_miim", mii_regmap, 0);
+ ret = mscc_miim_setup(dev, &bus, "mscc_miim", mii_regmap, 0);
if (ret < 0) {
- dev_err(&pdev->dev, "Unable to setup the MDIO bus\n");
+ dev_err(dev, "Unable to setup the MDIO bus\n");
return ret;
}
miim = bus->priv;
miim->phy_regs = phy_regmap;
- miim->info = device_get_match_data(&pdev->dev);
+ miim->info = device_get_match_data(dev);
if (!miim->info)
return -EINVAL;
- miim->clk = devm_clk_get_optional(&pdev->dev, NULL);
+ miim->clk = devm_clk_get_optional(dev, NULL);
if (IS_ERR(miim->clk))
return PTR_ERR(miim->clk);
of_property_read_u32(np, "clock-frequency", &miim->bus_freq);
if (miim->bus_freq && !miim->clk) {
- dev_err(&pdev->dev,
- "cannot use clock-frequency without a clock\n");
+ dev_err(dev, "cannot use clock-frequency without a clock\n");
return -EINVAL;
}
@@ -344,7 +343,7 @@ static int mscc_miim_probe(struct platform_device *pdev)
ret = of_mdiobus_register(bus, np);
if (ret < 0) {
- dev_err(&pdev->dev, "Cannot register MDIO bus (%d)\n", ret);
+ dev_err(dev, "Cannot register MDIO bus (%d)\n", ret);
goto out_disable_clk;
}
diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c
index 378ee779061c..c8f398f5bc5b 100644
--- a/drivers/net/netdevsim/fib.c
+++ b/drivers/net/netdevsim/fib.c
@@ -22,6 +22,7 @@
#include <linux/spinlock_types.h>
#include <linux/types.h>
#include <net/fib_notifier.h>
+#include <net/inet_dscp.h>
#include <net/ip_fib.h>
#include <net/ip6_fib.h>
#include <net/fib_rules.h>
@@ -78,7 +79,7 @@ struct nsim_fib_rt {
struct nsim_fib4_rt {
struct nsim_fib_rt common;
struct fib_info *fi;
- u8 tos;
+ dscp_t dscp;
u8 type;
};
@@ -283,7 +284,7 @@ nsim_fib4_rt_create(struct nsim_fib_data *data,
fib4_rt->fi = fen_info->fi;
fib_info_hold(fib4_rt->fi);
- fib4_rt->tos = fen_info->tos;
+ fib4_rt->dscp = fen_info->dscp;
fib4_rt->type = fen_info->type;
return fib4_rt;
@@ -322,7 +323,7 @@ nsim_fib4_rt_offload_failed_flag_set(struct net *net,
fri.tb_id = fen_info->tb_id;
fri.dst = cpu_to_be32(*p_dst);
fri.dst_len = fen_info->dst_len;
- fri.tos = fen_info->tos;
+ fri.dscp = fen_info->dscp;
fri.type = fen_info->type;
fri.offload = false;
fri.trap = false;
@@ -342,7 +343,7 @@ static void nsim_fib4_rt_hw_flags_set(struct net *net,
fri.tb_id = fib4_rt->common.key.tb_id;
fri.dst = cpu_to_be32(*p_dst);
fri.dst_len = dst_len;
- fri.tos = fib4_rt->tos;
+ fri.dscp = fib4_rt->dscp;
fri.type = fib4_rt->type;
fri.offload = false;
fri.trap = trap;
diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c
index 389df3f4293c..d4c93d59bc53 100644
--- a/drivers/net/phy/microchip_t1.c
+++ b/drivers/net/phy/microchip_t1.c
@@ -68,7 +68,12 @@
#define T1_POST_LCK_MUFACT_CFG_REG 0x1C
#define T1_TX_RX_FIFO_CFG_REG 0x02
#define T1_TX_LPF_FIR_CFG_REG 0x55
+#define T1_COEF_CLK_PWR_DN_CFG 0x04
+#define T1_COEF_RW_CTL_CFG 0x0D
#define T1_SQI_CONFIG_REG 0x2E
+#define T1_SQI_CONFIG2_REG 0x4A
+#define T1_DCQ_SQI_REG 0xC3
+#define T1_DCQ_SQI_MSK GENMASK(3, 1)
#define T1_MDIO_CONTROL2_REG 0x10
#define T1_INTERRUPT_SOURCE_REG 0x18
#define T1_INTERRUPT2_SOURCE_REG 0x08
@@ -82,6 +87,9 @@
#define T1_MODE_STAT_REG 0x11
#define T1_LINK_UP_MSK BIT(0)
+/* SQI defines */
+#define LAN87XX_MAX_SQI 0x07
+
#define DRIVER_AUTHOR "Nisar Sayed <nisar.sayed@microchip.com>"
#define DRIVER_DESC "Microchip LAN87XX/LAN937x T1 PHY driver"
@@ -346,9 +354,20 @@ static int lan87xx_phy_init(struct phy_device *phydev)
T1_TX_LPF_FIR_CFG_REG, 0x1011, 0 },
{ PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
T1_TX_LPF_FIR_CFG_REG, 0x1000, 0 },
+ /* Setup SQI measurement */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_COEF_CLK_PWR_DN_CFG, 0x16d6, 0 },
/* SQI enable */
{ PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
T1_SQI_CONFIG_REG, 0x9572, 0 },
+ /* SQI select mode 5 */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_SQI_CONFIG2_REG, 0x0001, 0 },
+ /* Throws the first SQI reading */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_COEF_RW_CTL_CFG, 0x0301, 0 },
+ { PHYACC_ATTR_MODE_READ, PHYACC_ATTR_BANK_DSP,
+ T1_DCQ_SQI_REG, 0, 0 },
/* Flag LPS and WUR as idle errors */
{ PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_SMI,
T1_MDIO_CONTROL2_REG, 0x0014, 0 },
@@ -706,7 +725,6 @@ static int lan87xx_read_status(struct phy_device *phydev)
static int lan87xx_config_aneg(struct phy_device *phydev)
{
u16 ctl = 0;
- int rc;
switch (phydev->master_slave_set) {
case MASTER_SLAVE_CFG_MASTER_FORCE:
@@ -722,11 +740,32 @@ static int lan87xx_config_aneg(struct phy_device *phydev)
return -EOPNOTSUPP;
}
- rc = phy_modify_changed(phydev, MII_CTRL1000, CTL1000_AS_MASTER, ctl);
- if (rc == 1)
- rc = genphy_soft_reset(phydev);
+ return phy_modify_changed(phydev, MII_CTRL1000, CTL1000_AS_MASTER, ctl);
+}
+
+static int lan87xx_get_sqi(struct phy_device *phydev)
+{
+ u8 sqi_value = 0;
+ int rc;
- return rc;
+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_WRITE,
+ PHYACC_ATTR_BANK_DSP, T1_COEF_RW_CTL_CFG, 0x0301);
+ if (rc < 0)
+ return rc;
+
+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_READ,
+ PHYACC_ATTR_BANK_DSP, T1_DCQ_SQI_REG, 0x0);
+ if (rc < 0)
+ return rc;
+
+ sqi_value = FIELD_GET(T1_DCQ_SQI_MSK, rc);
+
+ return sqi_value;
+}
+
+static int lan87xx_get_sqi_max(struct phy_device *phydev)
+{
+ return LAN87XX_MAX_SQI;
}
static struct phy_driver microchip_t1_phy_driver[] = {
@@ -742,18 +781,25 @@ static struct phy_driver microchip_t1_phy_driver[] = {
.resume = genphy_resume,
.config_aneg = lan87xx_config_aneg,
.read_status = lan87xx_read_status,
+ .get_sqi = lan87xx_get_sqi,
+ .get_sqi_max = lan87xx_get_sqi_max,
.cable_test_start = lan87xx_cable_test_start,
.cable_test_get_status = lan87xx_cable_test_get_status,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_LAN937X),
.name = "Microchip LAN937x T1",
+ .flags = PHY_POLL_CABLE_TEST,
.features = PHY_BASIC_T1_FEATURES,
.config_init = lan87xx_config_init,
+ .config_intr = lan87xx_phy_config_intr,
+ .handle_interrupt = lan87xx_handle_interrupt,
.suspend = genphy_suspend,
.resume = genphy_resume,
.config_aneg = lan87xx_config_aneg,
.read_status = lan87xx_read_status,
+ .get_sqi = lan87xx_get_sqi,
+ .get_sqi_max = lan87xx_get_sqi_max,
.cable_test_start = lan87xx_cable_test_start,
.cable_test_get_status = lan87xx_cable_test_get_status,
}
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 06943889d747..33c285252584 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -2778,34 +2778,6 @@ static const struct sfp_upstream_ops sfp_phylink_ops = {
/* Helpers for MAC drivers */
-/**
- * phylink_helper_basex_speed() - 1000BaseX/2500BaseX helper
- * @state: a pointer to a &struct phylink_link_state
- *
- * Inspect the interface mode, advertising mask or forced speed and
- * decide whether to run at 2.5Gbit or 1Gbit appropriately, switching
- * the interface mode to suit. @state->interface is appropriately
- * updated, and the advertising mask has the "other" baseX_Full flag
- * cleared.
- */
-void phylink_helper_basex_speed(struct phylink_link_state *state)
-{
- if (phy_interface_mode_is_8023z(state->interface)) {
- bool want_2500 = state->an_enabled ?
- phylink_test(state->advertising, 2500baseX_Full) :
- state->speed == SPEED_2500;
-
- if (want_2500) {
- phylink_clear(state->advertising, 1000baseX_Full);
- state->interface = PHY_INTERFACE_MODE_2500BASEX;
- } else {
- phylink_clear(state->advertising, 2500baseX_Full);
- state->interface = PHY_INTERFACE_MODE_1000BASEX;
- }
- }
-}
-EXPORT_SYMBOL_GPL(phylink_helper_basex_speed);
-
static void phylink_decode_c37_word(struct phylink_link_state *state,
uint16_t config_reg, int speed)
{
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 276a0e42ca8e..dbe4c0a4be2c 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1124,7 +1124,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
/* NETIF_F_LLTX requires to do our own update of trans_start */
queue = netdev_get_tx_queue(dev, txq);
- queue->trans_start = jiffies;
+ txq_trans_cond_update(queue);
/* Notify and wake up reader process */
if (tfile->flags & TUN_FASYNC)
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 9b4dfa3001d6..2de09ad5bac0 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -479,7 +479,7 @@ static int usbnet_cdc_zte_bind(struct usbnet *dev, struct usb_interface *intf)
* device MAC address has been updated). Always set MAC address to that of the
* device.
*/
-static int usbnet_cdc_zte_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+int usbnet_cdc_zte_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
{
if (skb->len < ETH_HLEN || !(skb->data[0] & 0x02))
return 1;
@@ -489,6 +489,7 @@ static int usbnet_cdc_zte_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
return 1;
}
+EXPORT_SYMBOL_GPL(usbnet_cdc_zte_rx_fixup);
/* Ensure correct link state
*
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 15f91d691bba..cdca00c0dc1f 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1492,19 +1492,19 @@ static void cdc_ncm_txpath_bh(struct tasklet_struct *t)
struct cdc_ncm_ctx *ctx = from_tasklet(ctx, t, bh);
struct usbnet *dev = ctx->dev;
- spin_lock_bh(&ctx->mtx);
+ spin_lock(&ctx->mtx);
if (ctx->tx_timer_pending != 0) {
ctx->tx_timer_pending--;
cdc_ncm_tx_timeout_start(ctx);
- spin_unlock_bh(&ctx->mtx);
+ spin_unlock(&ctx->mtx);
} else if (dev->net != NULL) {
ctx->tx_reason_timeout++; /* count reason for transmitting */
- spin_unlock_bh(&ctx->mtx);
+ spin_unlock(&ctx->mtx);
netif_tx_lock_bh(dev->net);
usbnet_start_xmit(NULL, dev->net);
netif_tx_unlock_bh(dev->net);
} else {
- spin_unlock_bh(&ctx->mtx);
+ spin_unlock(&ctx->mtx);
}
}
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 17cf0d10fef9..79f8bd849b1a 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1350,6 +1350,7 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_SET_DTR(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */
{QMI_QUIRK_SET_DTR(0x1199, 0x907b, 10)},/* Sierra Wireless EM74xx */
{QMI_QUIRK_SET_DTR(0x1199, 0x9091, 8)}, /* Sierra Wireless EM7565 */
+ {QMI_QUIRK_SET_DTR(0x1199, 0xc081, 8)}, /* Sierra Wireless EM7590 */
{QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
{QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
@@ -1357,6 +1358,7 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1031, 3)}, /* Telit LE910C1-EUX */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1057, 2)}, /* Telit FN980 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1060, 2)}, /* Telit LN920 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1070, 2)}, /* Telit FN990 */
{QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index 247f58cb0f84..4e70dec30e5a 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -418,10 +418,7 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags)
goto halt_fail_and_release;
}
- if (bp[0] & 0x02)
- eth_hw_addr_random(net);
- else
- eth_hw_addr_set(net, bp);
+ eth_hw_addr_set(net, bp);
/* set a nonzero filter to enable data transfers */
memset(u.set, 0, sizeof *u.set);
@@ -463,6 +460,16 @@ static int rndis_bind(struct usbnet *dev, struct usb_interface *intf)
return generic_rndis_bind(dev, intf, FLAG_RNDIS_PHYM_NOT_WIRELESS);
}
+static int zte_rndis_bind(struct usbnet *dev, struct usb_interface *intf)
+{
+ int status = rndis_bind(dev, intf);
+
+ if (!status && (dev->net->dev_addr[0] & 0x02))
+ eth_hw_addr_random(dev->net);
+
+ return status;
+}
+
void rndis_unbind(struct usbnet *dev, struct usb_interface *intf)
{
struct rndis_halt *halt;
@@ -485,10 +492,14 @@ EXPORT_SYMBOL_GPL(rndis_unbind);
*/
int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
{
+ bool dst_mac_fixup;
+
/* This check is no longer done by usbnet */
if (skb->len < dev->net->hard_header_len)
return 0;
+ dst_mac_fixup = !!(dev->driver_info->data & RNDIS_DRIVER_DATA_DST_MAC_FIXUP);
+
/* peripheral may have batched packets to us... */
while (likely(skb->len)) {
struct rndis_data_hdr *hdr = (void *)skb->data;
@@ -523,10 +534,17 @@ int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
break;
skb_pull(skb, msg_len - sizeof *hdr);
skb_trim(skb2, data_len);
+
+ if (unlikely(dst_mac_fixup))
+ usbnet_cdc_zte_rx_fixup(dev, skb2);
+
usbnet_skb_return(dev, skb2);
}
/* caller will usbnet_skb_return the remaining packet */
+ if (unlikely(dst_mac_fixup))
+ usbnet_cdc_zte_rx_fixup(dev, skb);
+
return 1;
}
EXPORT_SYMBOL_GPL(rndis_rx_fixup);
@@ -600,6 +618,17 @@ static const struct driver_info rndis_poll_status_info = {
.tx_fixup = rndis_tx_fixup,
};
+static const struct driver_info zte_rndis_info = {
+ .description = "ZTE RNDIS device",
+ .flags = FLAG_ETHER | FLAG_POINTTOPOINT | FLAG_FRAMING_RN | FLAG_NO_SETINT,
+ .data = RNDIS_DRIVER_DATA_DST_MAC_FIXUP,
+ .bind = zte_rndis_bind,
+ .unbind = rndis_unbind,
+ .status = rndis_status,
+ .rx_fixup = rndis_rx_fixup,
+ .tx_fixup = rndis_tx_fixup,
+};
+
/*-------------------------------------------------------------------------*/
static const struct usb_device_id products [] = {
@@ -614,6 +643,16 @@ static const struct usb_device_id products [] = {
USB_CLASS_COMM, 2 /* ACM */, 0x0ff),
.driver_info = (unsigned long)&rndis_info,
}, {
+ /* ZTE WWAN modules */
+ USB_VENDOR_AND_INTERFACE_INFO(0x19d2,
+ USB_CLASS_WIRELESS_CONTROLLER, 1, 3),
+ .driver_info = (unsigned long)&zte_rndis_info,
+}, {
+ /* ZTE WWAN modules, ACM flavour */
+ USB_VENDOR_AND_INTERFACE_INFO(0x19d2,
+ USB_CLASS_COMM, 2 /* ACM */, 0x0ff),
+ .driver_info = (unsigned long)&zte_rndis_info,
+}, {
/* RNDIS is MSFT's un-official variant of CDC ACM */
USB_INTERFACE_INFO(USB_CLASS_COMM, 2 /* ACM */, 0x0ff),
.driver_info = (unsigned long) &rndis_info,
diff --git a/drivers/net/usb/sr9800.h b/drivers/net/usb/sr9800.h
index 18f670251275..952e6f7c0321 100644
--- a/drivers/net/usb/sr9800.h
+++ b/drivers/net/usb/sr9800.h
@@ -163,7 +163,7 @@
#define SR9800_MAX_BULKIN_24K 6
#define SR9800_MAX_BULKIN_32K 7
-struct {unsigned short size, byte_cnt, threshold; } SR9800_BULKIN_SIZE[] = {
+static const struct {unsigned short size, byte_cnt, threshold; } SR9800_BULKIN_SIZE[] = {
/* 2k */
{2048, 0x8000, 0x8001},
/* 4k */
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 1b5714926d81..eb0121a64d6d 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -320,7 +320,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
rcu_read_lock();
rcv = rcu_dereference(priv->peer);
- if (unlikely(!rcv)) {
+ if (unlikely(!rcv) || !pskb_may_pull(skb, ETH_HLEN)) {
kfree_skb(skb);
goto drop;
}
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index de97ff98d36e..8a5e3a6d32d7 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -651,11 +651,11 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
if (rd == NULL)
- return -ENOBUFS;
+ return -ENOMEM;
if (dst_cache_init(&rd->dst_cache, GFP_ATOMIC)) {
kfree(rd);
- return -ENOBUFS;
+ return -ENOMEM;
}
rd->remote_ip = *ip;
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 588b2333cdb8..dcb069dde66b 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -23,50 +23,6 @@ menuconfig WAN
if WAN
-# There is no way to detect a comtrol sv11 - force it modular for now.
-config HOSTESS_SV11
- tristate "Comtrol Hostess SV-11 support"
- depends on ISA && m && ISA_DMA_API && INET && HDLC && VIRT_TO_BUS
- help
- Driver for Comtrol Hostess SV-11 network card which
- operates on low speed synchronous serial links at up to
- 256Kbps, supporting PPP and Cisco HDLC.
-
- The driver will be compiled as a module: the
- module will be called hostess_sv11.
-
-# The COSA/SRP driver has not been tested as non-modular yet.
-config COSA
- tristate "COSA/SRP sync serial boards support"
- depends on ISA && m && ISA_DMA_API && HDLC && VIRT_TO_BUS
- help
- Driver for COSA and SRP synchronous serial boards.
-
- These boards allow to connect synchronous serial devices (for example
- base-band modems, or any other device with the X.21, V.24, V.35 or
- V.36 interface) to your Linux box. The cards can work as the
- character device, synchronous PPP network device, or the Cisco HDLC
- network device.
-
- You will need user-space utilities COSA or SRP boards for downloading
- the firmware to the cards and to set them up. Look at the
- <http://www.fi.muni.cz/~kas/cosa/> for more information. You can also
- read the comment at the top of the <file:drivers/net/wan/cosa.c> for
- details about the cards and the driver itself.
-
- The driver will be compiled as a module: the
- module will be called cosa.
-
-# There is no way to detect a Sealevel board. Force it modular
-config SEALEVEL_4021
- tristate "Sealevel Systems 4021 support"
- depends on ISA && m && ISA_DMA_API && INET && HDLC && VIRT_TO_BUS
- help
- This is a driver for the Sealevel Systems ACB 56 serial I/O adapter.
-
- The driver will be compiled as a module: the
- module will be called sealevel.
-
# Generic HDLC
config HDLC
tristate "Generic HDLC layer"
diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile
index 1cd42147b34f..5bec8fae47f8 100644
--- a/drivers/net/wan/Makefile
+++ b/drivers/net/wan/Makefile
@@ -14,9 +14,6 @@ obj-$(CONFIG_HDLC_FR) += hdlc_fr.o
obj-$(CONFIG_HDLC_PPP) += hdlc_ppp.o
obj-$(CONFIG_HDLC_X25) += hdlc_x25.o
-obj-$(CONFIG_HOSTESS_SV11) += z85230.o hostess_sv11.o
-obj-$(CONFIG_SEALEVEL_4021) += z85230.o sealevel.o
-obj-$(CONFIG_COSA) += cosa.o
obj-$(CONFIG_FARSYNC) += farsync.o
obj-$(CONFIG_LAPBETHER) += lapbether.o
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
deleted file mode 100644
index 23d2954d9747..000000000000
--- a/drivers/net/wan/cosa.c
+++ /dev/null
@@ -1,2052 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* $Id: cosa.c,v 1.31 2000/03/08 17:47:16 kas Exp $ */
-
-/* Copyright (C) 1995-1997 Jan "Yenya" Kasprzak <kas@fi.muni.cz>
- * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>
- */
-
-/* The driver for the SRP and COSA synchronous serial cards.
- *
- * HARDWARE INFO
- *
- * Both cards are developed at the Institute of Computer Science,
- * Masaryk University (https://www.ics.muni.cz/). The hardware is
- * developed by Jiri Novotny <novotny@ics.muni.cz>. More information
- * and the photo of both cards is available at
- * http://www.pavoucek.cz/cosa.html. The card documentation, firmwares
- * and other goods can be downloaded from ftp://ftp.ics.muni.cz/pub/cosa/.
- * For Linux-specific utilities, see below in the "Software info" section.
- * If you want to order the card, contact Jiri Novotny.
- *
- * The SRP (serial port?, the Czech word "srp" means "sickle") card
- * is a 2-port intelligent (with its own 8-bit CPU) synchronous serial card
- * with V.24 interfaces up to 80kb/s each.
- *
- * The COSA (communication serial adapter?, the Czech word "kosa" means
- * "scythe") is a next-generation sync/async board with two interfaces
- * - currently any of V.24, X.21, V.35 and V.36 can be selected.
- * It has a 16-bit SAB80166 CPU and can do up to 10 Mb/s per channel.
- * The 8-channels version is in development.
- *
- * Both types have downloadable firmware and communicate via ISA DMA.
- * COSA can be also a bus-mastering device.
- *
- * SOFTWARE INFO
- *
- * The homepage of the Linux driver is at https://www.fi.muni.cz/~kas/cosa/.
- * The CVS tree of Linux driver can be viewed there, as well as the
- * firmware binaries and user-space utilities for downloading the firmware
- * into the card and setting up the card.
- *
- * The Linux driver (unlike the present *BSD drivers :-) can work even
- * for the COSA and SRP in one computer and allows each channel to work
- * in one of the two modes (character or network device).
- *
- * AUTHOR
- *
- * The Linux driver was written by Jan "Yenya" Kasprzak <kas@fi.muni.cz>.
- *
- * You can mail me bugfixes and even success reports. I am especially
- * interested in the SMP and/or muliti-channel success/failure reports
- * (I wonder if I did the locking properly :-).
- *
- * THE AUTHOR USED THE FOLLOWING SOURCES WHEN PROGRAMMING THE DRIVER
- *
- * The COSA/SRP NetBSD driver by Zdenek Salvet and Ivos Cernohlavek
- * The skeleton.c by Donald Becker
- * The SDL Riscom/N2 driver by Mike Natale
- * The Comtrol Hostess SV11 driver by Alan Cox
- * The Sync PPP/Cisco HDLC layer (syncppp.c) ported to Linux by Alan Cox
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched/signal.h>
-#include <linux/slab.h>
-#include <linux/poll.h>
-#include <linux/fs.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/hdlc.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/netdevice.h>
-#include <linux/spinlock.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
-#include <asm/io.h>
-#include <asm/dma.h>
-#include <asm/byteorder.h>
-
-#undef COSA_SLOW_IO /* for testing purposes only */
-
-#include "cosa.h"
-
-/* Maximum length of the identification string. */
-#define COSA_MAX_ID_STRING 128
-
-/* Maximum length of the channel name */
-#define COSA_MAX_NAME (sizeof("cosaXXXcXXX") + 1)
-
-/* Per-channel data structure */
-
-struct channel_data {
- int usage; /* Usage count; >0 for chrdev, -1 for netdev */
- int num; /* Number of the channel */
- struct cosa_data *cosa; /* Pointer to the per-card structure */
- int txsize; /* Size of transmitted data */
- char *txbuf; /* Transmit buffer */
- char name[COSA_MAX_NAME]; /* channel name */
-
- /* The HW layer interface */
- /* routine called from the RX interrupt */
- char *(*setup_rx)(struct channel_data *channel, int size);
- /* routine called when the RX is done (from the EOT interrupt) */
- int (*rx_done)(struct channel_data *channel);
- /* routine called when the TX is done (from the EOT interrupt) */
- int (*tx_done)(struct channel_data *channel, int size);
-
- /* Character device parts */
- struct mutex rlock;
- struct semaphore wsem;
- char *rxdata;
- int rxsize;
- wait_queue_head_t txwaitq, rxwaitq;
- int tx_status, rx_status;
-
- /* generic HDLC device parts */
- struct net_device *netdev;
- struct sk_buff *rx_skb, *tx_skb;
-};
-
-/* cosa->firmware_status bits */
-#define COSA_FW_RESET BIT(0) /* Is the ROM monitor active? */
-#define COSA_FW_DOWNLOAD BIT(1) /* Is the microcode downloaded? */
-#define COSA_FW_START BIT(2) /* Is the microcode running? */
-
-struct cosa_data {
- int num; /* Card number */
- char name[COSA_MAX_NAME]; /* Card name - e.g "cosa0" */
- unsigned int datareg, statusreg; /* I/O ports */
- unsigned short irq, dma; /* IRQ and DMA number */
- unsigned short startaddr; /* Firmware start address */
- unsigned short busmaster; /* Use busmastering? */
- int nchannels; /* # of channels on this card */
- int driver_status; /* For communicating with firmware */
- int firmware_status; /* Downloaded, reseted, etc. */
- unsigned long rxbitmap, txbitmap;/* Bitmap of channels who are willing to send/receive data */
- unsigned long rxtx; /* RX or TX in progress? */
- int enabled;
- int usage; /* usage count */
- int txchan, txsize, rxsize;
- struct channel_data *rxchan;
- char *bouncebuf;
- char *txbuf, *rxbuf;
- struct channel_data *chan;
- spinlock_t lock; /* For exclusive operations on this structure */
- char id_string[COSA_MAX_ID_STRING]; /* ROM monitor ID string */
- char *type; /* card type */
-};
-
-/* Define this if you want all the possible ports to be autoprobed.
- * It is here but it probably is not a good idea to use this.
- */
-/* #define COSA_ISA_AUTOPROBE 1*/
-
-/* Character device major number. 117 was allocated for us.
- * The value of 0 means to allocate a first free one.
- */
-static DEFINE_MUTEX(cosa_chardev_mutex);
-static int cosa_major = 117;
-
-/* Encoding of the minor numbers:
- * The lowest CARD_MINOR_BITS bits means the channel on the single card,
- * the highest bits means the card number.
- */
-#define CARD_MINOR_BITS 4 /* How many bits in minor number are reserved
- * for the single card
- */
-/* The following depends on CARD_MINOR_BITS. Unfortunately, the "MODULE_STRING"
- * macro doesn't like anything other than the raw number as an argument :-(
- */
-#define MAX_CARDS 16
-/* #define MAX_CARDS (1 << (8-CARD_MINOR_BITS)) */
-
-#define DRIVER_RX_READY 0x0001
-#define DRIVER_TX_READY 0x0002
-#define DRIVER_TXMAP_SHIFT 2
-#define DRIVER_TXMAP_MASK 0x0c /* FIXME: 0xfc for 8-channel version */
-
-/* for cosa->rxtx - indicates whether either transmit or receive is
- * in progress. These values are mean number of the bit.
- */
-#define TXBIT 0
-#define RXBIT 1
-#define IRQBIT 2
-
-#define COSA_MTU 2000 /* FIXME: I don't know this exactly */
-
-#undef DEBUG_DATA //1 /* Dump the data read or written to the channel */
-#undef DEBUG_IRQS //1 /* Print the message when the IRQ is received */
-#undef DEBUG_IO //1 /* Dump the I/O traffic */
-
-#define TX_TIMEOUT (5 * HZ)
-
-/* Maybe the following should be allocated dynamically */
-static struct cosa_data cosa_cards[MAX_CARDS];
-static int nr_cards;
-
-#ifdef COSA_ISA_AUTOPROBE
-static int io[MAX_CARDS + 1] = {0x220, 0x228, 0x210, 0x218, 0,};
-/* NOTE: DMA is not autoprobed!!! */
-static int dma[MAX_CARDS + 1] = {1, 7, 1, 7, 1, 7, 1, 7, 0,};
-#else
-static int io[MAX_CARDS + 1];
-static int dma[MAX_CARDS + 1];
-#endif
-/* IRQ can be safely autoprobed */
-static int irq[MAX_CARDS + 1] = {-1, -1, -1, -1, -1, -1, 0,};
-
-/* for class stuff*/
-static struct class *cosa_class;
-
-#ifdef MODULE
-module_param_hw_array(io, int, ioport, NULL, 0);
-MODULE_PARM_DESC(io, "The I/O bases of the COSA or SRP cards");
-module_param_hw_array(irq, int, irq, NULL, 0);
-MODULE_PARM_DESC(irq, "The IRQ lines of the COSA or SRP cards");
-module_param_hw_array(dma, int, dma, NULL, 0);
-MODULE_PARM_DESC(dma, "The DMA channels of the COSA or SRP cards");
-
-MODULE_AUTHOR("Jan \"Yenya\" Kasprzak, <kas@fi.muni.cz>");
-MODULE_DESCRIPTION("Modular driver for the COSA or SRP synchronous card");
-MODULE_LICENSE("GPL");
-#endif
-
-/* I use this mainly for testing purposes */
-#ifdef COSA_SLOW_IO
-#define cosa_outb outb_p
-#define cosa_outw outw_p
-#define cosa_inb inb_p
-#define cosa_inw inw_p
-#else
-#define cosa_outb outb
-#define cosa_outw outw
-#define cosa_inb inb
-#define cosa_inw inw
-#endif
-
-#define is_8bit(cosa) (!((cosa)->datareg & 0x08))
-
-#define cosa_getstatus(cosa) (cosa_inb((cosa)->statusreg))
-#define cosa_putstatus(cosa, stat) (cosa_outb(stat, (cosa)->statusreg))
-#define cosa_getdata16(cosa) (cosa_inw((cosa)->datareg))
-#define cosa_getdata8(cosa) (cosa_inb((cosa)->datareg))
-#define cosa_putdata16(cosa, dt) (cosa_outw(dt, (cosa)->datareg))
-#define cosa_putdata8(cosa, dt) (cosa_outb(dt, (cosa)->datareg))
-
-/* Initialization stuff */
-static int cosa_probe(int ioaddr, int irq, int dma);
-
-/* HW interface */
-static void cosa_enable_rx(struct channel_data *chan);
-static void cosa_disable_rx(struct channel_data *chan);
-static int cosa_start_tx(struct channel_data *channel, char *buf, int size);
-static void cosa_kick(struct cosa_data *cosa);
-static int cosa_dma_able(struct channel_data *chan, char *buf, int data);
-
-/* Network device stuff */
-static int cosa_net_attach(struct net_device *dev, unsigned short encoding,
- unsigned short parity);
-static int cosa_net_open(struct net_device *d);
-static int cosa_net_close(struct net_device *d);
-static void cosa_net_timeout(struct net_device *d, unsigned int txqueue);
-static netdev_tx_t cosa_net_tx(struct sk_buff *skb, struct net_device *d);
-static char *cosa_net_setup_rx(struct channel_data *channel, int size);
-static int cosa_net_rx_done(struct channel_data *channel);
-static int cosa_net_tx_done(struct channel_data *channel, int size);
-
-/* Character device */
-static char *chrdev_setup_rx(struct channel_data *channel, int size);
-static int chrdev_rx_done(struct channel_data *channel);
-static int chrdev_tx_done(struct channel_data *channel, int size);
-static ssize_t cosa_read(struct file *file,
- char __user *buf, size_t count, loff_t *ppos);
-static ssize_t cosa_write(struct file *file,
- const char __user *buf, size_t count, loff_t *ppos);
-static unsigned int cosa_poll(struct file *file, poll_table *poll);
-static int cosa_open(struct inode *inode, struct file *file);
-static int cosa_release(struct inode *inode, struct file *file);
-static long cosa_chardev_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg);
-#ifdef COSA_FASYNC_WORKING
-static int cosa_fasync(struct inode *inode, struct file *file, int on);
-#endif
-
-static const struct file_operations cosa_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .read = cosa_read,
- .write = cosa_write,
- .poll = cosa_poll,
- .unlocked_ioctl = cosa_chardev_ioctl,
- .open = cosa_open,
- .release = cosa_release,
-#ifdef COSA_FASYNC_WORKING
- .fasync = cosa_fasync,
-#endif
-};
-
-/* Ioctls */
-static int cosa_start(struct cosa_data *cosa, int address);
-static int cosa_reset(struct cosa_data *cosa);
-static int cosa_download(struct cosa_data *cosa, void __user *a);
-static int cosa_readmem(struct cosa_data *cosa, void __user *a);
-
-/* COSA/SRP ROM monitor */
-static int download(struct cosa_data *cosa, const char __user *data, int addr, int len);
-static int startmicrocode(struct cosa_data *cosa, int address);
-static int readmem(struct cosa_data *cosa, char __user *data, int addr, int len);
-static int cosa_reset_and_read_id(struct cosa_data *cosa, char *id);
-
-/* Auxiliary functions */
-static int get_wait_data(struct cosa_data *cosa);
-static int put_wait_data(struct cosa_data *cosa, int data);
-static int puthexnumber(struct cosa_data *cosa, int number);
-static void put_driver_status(struct cosa_data *cosa);
-static void put_driver_status_nolock(struct cosa_data *cosa);
-
-/* Interrupt handling */
-static irqreturn_t cosa_interrupt(int irq, void *cosa);
-
-/* I/O ops debugging */
-#ifdef DEBUG_IO
-static void debug_data_in(struct cosa_data *cosa, int data);
-static void debug_data_out(struct cosa_data *cosa, int data);
-static void debug_data_cmd(struct cosa_data *cosa, int data);
-static void debug_status_in(struct cosa_data *cosa, int status);
-static void debug_status_out(struct cosa_data *cosa, int status);
-#endif
-
-static inline struct channel_data *dev_to_chan(struct net_device *dev)
-{
- return (struct channel_data *)dev_to_hdlc(dev)->priv;
-}
-
-/* ---------- Initialization stuff ---------- */
-
-static int __init cosa_init(void)
-{
- int i, err = 0;
-
- if (cosa_major > 0) {
- if (register_chrdev(cosa_major, "cosa", &cosa_fops)) {
- pr_warn("unable to get major %d\n", cosa_major);
- err = -EIO;
- goto out;
- }
- } else {
- cosa_major = register_chrdev(0, "cosa", &cosa_fops);
- if (!cosa_major) {
- pr_warn("unable to register chardev\n");
- err = -EIO;
- goto out;
- }
- }
- for (i = 0; i < MAX_CARDS; i++)
- cosa_cards[i].num = -1;
- for (i = 0; io[i] != 0 && i < MAX_CARDS; i++)
- cosa_probe(io[i], irq[i], dma[i]);
- if (!nr_cards) {
- pr_warn("no devices found\n");
- unregister_chrdev(cosa_major, "cosa");
- err = -ENODEV;
- goto out;
- }
- cosa_class = class_create(THIS_MODULE, "cosa");
- if (IS_ERR(cosa_class)) {
- err = PTR_ERR(cosa_class);
- goto out_chrdev;
- }
- for (i = 0; i < nr_cards; i++)
- device_create(cosa_class, NULL, MKDEV(cosa_major, i), NULL,
- "cosa%d", i);
- err = 0;
- goto out;
-
-out_chrdev:
- unregister_chrdev(cosa_major, "cosa");
-out:
- return err;
-}
-module_init(cosa_init);
-
-static void __exit cosa_exit(void)
-{
- struct cosa_data *cosa;
- int i;
-
- for (i = 0; i < nr_cards; i++)
- device_destroy(cosa_class, MKDEV(cosa_major, i));
- class_destroy(cosa_class);
-
- for (cosa = cosa_cards; nr_cards--; cosa++) {
- /* Clean up the per-channel data */
- for (i = 0; i < cosa->nchannels; i++) {
- /* Chardev driver has no alloc'd per-channel data */
- unregister_hdlc_device(cosa->chan[i].netdev);
- free_netdev(cosa->chan[i].netdev);
- }
- /* Clean up the per-card data */
- kfree(cosa->chan);
- kfree(cosa->bouncebuf);
- free_irq(cosa->irq, cosa);
- free_dma(cosa->dma);
- release_region(cosa->datareg, is_8bit(cosa) ? 2 : 4);
- }
- unregister_chrdev(cosa_major, "cosa");
-}
-module_exit(cosa_exit);
-
-static const struct net_device_ops cosa_ops = {
- .ndo_open = cosa_net_open,
- .ndo_stop = cosa_net_close,
- .ndo_start_xmit = hdlc_start_xmit,
- .ndo_siocwandev = hdlc_ioctl,
- .ndo_tx_timeout = cosa_net_timeout,
-};
-
-static int cosa_probe(int base, int irq, int dma)
-{
- struct cosa_data *cosa = cosa_cards + nr_cards;
- int i, err = 0;
-
- memset(cosa, 0, sizeof(struct cosa_data));
-
- /* Checking validity of parameters: */
- /* IRQ should be 2-7 or 10-15; negative IRQ means autoprobe */
- if ((irq >= 0 && irq < 2) || irq > 15 || (irq < 10 && irq > 7)) {
- pr_info("invalid IRQ %d\n", irq);
- return -1;
- }
- /* I/O address should be between 0x100 and 0x3ff and should be
- * multiple of 8.
- */
- if (base < 0x100 || base > 0x3ff || base & 0x7) {
- pr_info("invalid I/O address 0x%x\n", base);
- return -1;
- }
- /* DMA should be 0,1 or 3-7 */
- if (dma < 0 || dma == 4 || dma > 7) {
- pr_info("invalid DMA %d\n", dma);
- return -1;
- }
- /* and finally, on 16-bit COSA DMA should be 4-7 and
- * I/O base should not be multiple of 0x10
- */
- if (((base & 0x8) && dma < 4) || (!(base & 0x8) && dma > 3)) {
- pr_info("8/16 bit base and DMA mismatch (base=0x%x, dma=%d)\n",
- base, dma);
- return -1;
- }
-
- cosa->dma = dma;
- cosa->datareg = base;
- cosa->statusreg = is_8bit(cosa) ? base + 1 : base + 2;
- spin_lock_init(&cosa->lock);
-
- if (!request_region(base, is_8bit(cosa) ? 2 : 4, "cosa"))
- return -1;
-
- if (cosa_reset_and_read_id(cosa, cosa->id_string) < 0) {
- printk(KERN_DEBUG "probe at 0x%x failed.\n", base);
- err = -1;
- goto err_out;
- }
-
- /* Test the validity of identification string */
- if (!strncmp(cosa->id_string, "SRP", 3)) {
- cosa->type = "srp";
- } else if (!strncmp(cosa->id_string, "COSA", 4)) {
- cosa->type = is_8bit(cosa) ? "cosa8" : "cosa16";
- } else {
-/* Print a warning only if we are not autoprobing */
-#ifndef COSA_ISA_AUTOPROBE
- pr_info("valid signature not found at 0x%x\n", base);
-#endif
- err = -1;
- goto err_out;
- }
- /* Update the name of the region now we know the type of card */
- release_region(base, is_8bit(cosa) ? 2 : 4);
- if (!request_region(base, is_8bit(cosa) ? 2 : 4, cosa->type)) {
- printk(KERN_DEBUG "changing name at 0x%x failed.\n", base);
- return -1;
- }
-
- /* Now do IRQ autoprobe */
- if (irq < 0) {
- unsigned long irqs;
-/* pr_info("IRQ autoprobe\n"); */
- irqs = probe_irq_on();
- /* Enable interrupt on tx buffer empty (it sure is)
- * really sure ?
- * FIXME: When this code is not used as module, we should
- * probably call udelay() instead of the interruptible sleep.
- */
- set_current_state(TASK_INTERRUPTIBLE);
- cosa_putstatus(cosa, SR_TX_INT_ENA);
- schedule_timeout(msecs_to_jiffies(300));
- irq = probe_irq_off(irqs);
- /* Disable all IRQs from the card */
- cosa_putstatus(cosa, 0);
- /* Empty the received data register */
- cosa_getdata8(cosa);
-
- if (irq < 0) {
- pr_info("multiple interrupts obtained (%d, board at 0x%x)\n",
- irq, cosa->datareg);
- err = -1;
- goto err_out;
- }
- if (irq == 0) {
- pr_info("no interrupt obtained (board at 0x%x)\n",
- cosa->datareg);
- /* return -1; */
- }
- }
-
- cosa->irq = irq;
- cosa->num = nr_cards;
- cosa->usage = 0;
- cosa->nchannels = 2; /* FIXME: how to determine this? */
-
- if (request_irq(cosa->irq, cosa_interrupt, 0, cosa->type, cosa)) {
- err = -1;
- goto err_out;
- }
- if (request_dma(cosa->dma, cosa->type)) {
- err = -1;
- goto err_out1;
- }
-
- cosa->bouncebuf = kmalloc(COSA_MTU, GFP_KERNEL | GFP_DMA);
- if (!cosa->bouncebuf) {
- err = -ENOMEM;
- goto err_out2;
- }
- sprintf(cosa->name, "cosa%d", cosa->num);
-
- /* Initialize the per-channel data */
- cosa->chan = kcalloc(cosa->nchannels, sizeof(struct channel_data), GFP_KERNEL);
- if (!cosa->chan) {
- err = -ENOMEM;
- goto err_out3;
- }
-
- for (i = 0; i < cosa->nchannels; i++) {
- struct channel_data *chan = &cosa->chan[i];
-
- chan->cosa = cosa;
- chan->num = i;
- sprintf(chan->name, "cosa%dc%d", chan->cosa->num, i);
-
- /* Initialize the chardev data structures */
- mutex_init(&chan->rlock);
- sema_init(&chan->wsem, 1);
-
- /* Register the network interface */
- chan->netdev = alloc_hdlcdev(chan);
- if (!chan->netdev) {
- pr_warn("%s: alloc_hdlcdev failed\n", chan->name);
- err = -ENOMEM;
- goto err_hdlcdev;
- }
- dev_to_hdlc(chan->netdev)->attach = cosa_net_attach;
- dev_to_hdlc(chan->netdev)->xmit = cosa_net_tx;
- chan->netdev->netdev_ops = &cosa_ops;
- chan->netdev->watchdog_timeo = TX_TIMEOUT;
- chan->netdev->base_addr = chan->cosa->datareg;
- chan->netdev->irq = chan->cosa->irq;
- chan->netdev->dma = chan->cosa->dma;
- err = register_hdlc_device(chan->netdev);
- if (err) {
- netdev_warn(chan->netdev,
- "register_hdlc_device() failed\n");
- free_netdev(chan->netdev);
- goto err_hdlcdev;
- }
- }
-
- pr_info("cosa%d: %s (%s at 0x%x irq %d dma %d), %d channels\n",
- cosa->num, cosa->id_string, cosa->type,
- cosa->datareg, cosa->irq, cosa->dma, cosa->nchannels);
-
- return nr_cards++;
-
-err_hdlcdev:
- while (i-- > 0) {
- unregister_hdlc_device(cosa->chan[i].netdev);
- free_netdev(cosa->chan[i].netdev);
- }
- kfree(cosa->chan);
-err_out3:
- kfree(cosa->bouncebuf);
-err_out2:
- free_dma(cosa->dma);
-err_out1:
- free_irq(cosa->irq, cosa);
-err_out:
- release_region(cosa->datareg, is_8bit(cosa) ? 2 : 4);
- pr_notice("cosa%d: allocating resources failed\n", cosa->num);
- return err;
-}
-
-/*---------- network device ---------- */
-
-static int cosa_net_attach(struct net_device *dev, unsigned short encoding,
- unsigned short parity)
-{
- if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT)
- return 0;
- return -EINVAL;
-}
-
-static int cosa_net_open(struct net_device *dev)
-{
- struct channel_data *chan = dev_to_chan(dev);
- int err;
- unsigned long flags;
-
- if (!(chan->cosa->firmware_status & COSA_FW_START)) {
- pr_notice("%s: start the firmware first (status %d)\n",
- chan->cosa->name, chan->cosa->firmware_status);
- return -EPERM;
- }
- spin_lock_irqsave(&chan->cosa->lock, flags);
- if (chan->usage != 0) {
- pr_warn("%s: cosa_net_open called with usage count %d\n",
- chan->name, chan->usage);
- spin_unlock_irqrestore(&chan->cosa->lock, flags);
- return -EBUSY;
- }
- chan->setup_rx = cosa_net_setup_rx;
- chan->tx_done = cosa_net_tx_done;
- chan->rx_done = cosa_net_rx_done;
- chan->usage = -1;
- chan->cosa->usage++;
- spin_unlock_irqrestore(&chan->cosa->lock, flags);
-
- err = hdlc_open(dev);
- if (err) {
- spin_lock_irqsave(&chan->cosa->lock, flags);
- chan->usage = 0;
- chan->cosa->usage--;
- spin_unlock_irqrestore(&chan->cosa->lock, flags);
- return err;
- }
-
- netif_start_queue(dev);
- cosa_enable_rx(chan);
- return 0;
-}
-
-static netdev_tx_t cosa_net_tx(struct sk_buff *skb,
- struct net_device *dev)
-{
- struct channel_data *chan = dev_to_chan(dev);
-
- netif_stop_queue(dev);
-
- chan->tx_skb = skb;
- cosa_start_tx(chan, skb->data, skb->len);
- return NETDEV_TX_OK;
-}
-
-static void cosa_net_timeout(struct net_device *dev, unsigned int txqueue)
-{
- struct channel_data *chan = dev_to_chan(dev);
-
- if (test_bit(RXBIT, &chan->cosa->rxtx)) {
- chan->netdev->stats.rx_errors++;
- chan->netdev->stats.rx_missed_errors++;
- } else {
- chan->netdev->stats.tx_errors++;
- chan->netdev->stats.tx_aborted_errors++;
- }
- cosa_kick(chan->cosa);
- if (chan->tx_skb) {
- dev_kfree_skb(chan->tx_skb);
- chan->tx_skb = NULL;
- }
- netif_wake_queue(dev);
-}
-
-static int cosa_net_close(struct net_device *dev)
-{
- struct channel_data *chan = dev_to_chan(dev);
- unsigned long flags;
-
- netif_stop_queue(dev);
- hdlc_close(dev);
- cosa_disable_rx(chan);
- spin_lock_irqsave(&chan->cosa->lock, flags);
- if (chan->rx_skb) {
- kfree_skb(chan->rx_skb);
- chan->rx_skb = NULL;
- }
- if (chan->tx_skb) {
- kfree_skb(chan->tx_skb);
- chan->tx_skb = NULL;
- }
- chan->usage = 0;
- chan->cosa->usage--;
- spin_unlock_irqrestore(&chan->cosa->lock, flags);
- return 0;
-}
-
-static char *cosa_net_setup_rx(struct channel_data *chan, int size)
-{
- /* We can safely fall back to non-dma-able memory, because we have
- * the cosa->bouncebuf pre-allocated.
- */
- kfree_skb(chan->rx_skb);
- chan->rx_skb = dev_alloc_skb(size);
- if (!chan->rx_skb) {
- pr_notice("%s: Memory squeeze, dropping packet\n", chan->name);
- chan->netdev->stats.rx_dropped++;
- return NULL;
- }
- netif_trans_update(chan->netdev);
- return skb_put(chan->rx_skb, size);
-}
-
-static int cosa_net_rx_done(struct channel_data *chan)
-{
- if (!chan->rx_skb) {
- pr_warn("%s: rx_done with empty skb!\n", chan->name);
- chan->netdev->stats.rx_errors++;
- chan->netdev->stats.rx_frame_errors++;
- return 0;
- }
- chan->rx_skb->protocol = hdlc_type_trans(chan->rx_skb, chan->netdev);
- chan->rx_skb->dev = chan->netdev;
- skb_reset_mac_header(chan->rx_skb);
- chan->netdev->stats.rx_packets++;
- chan->netdev->stats.rx_bytes += chan->cosa->rxsize;
- netif_rx(chan->rx_skb);
- chan->rx_skb = NULL;
- return 0;
-}
-
-/* ARGSUSED */
-static int cosa_net_tx_done(struct channel_data *chan, int size)
-{
- if (!chan->tx_skb) {
- pr_warn("%s: tx_done with empty skb!\n", chan->name);
- chan->netdev->stats.tx_errors++;
- chan->netdev->stats.tx_aborted_errors++;
- return 1;
- }
- dev_consume_skb_irq(chan->tx_skb);
- chan->tx_skb = NULL;
- chan->netdev->stats.tx_packets++;
- chan->netdev->stats.tx_bytes += size;
- netif_wake_queue(chan->netdev);
- return 1;
-}
-
-/*---------- Character device ---------- */
-
-static ssize_t cosa_read(struct file *file,
- char __user *buf, size_t count, loff_t *ppos)
-{
- DECLARE_WAITQUEUE(wait, current);
- unsigned long flags;
- struct channel_data *chan = file->private_data;
- struct cosa_data *cosa = chan->cosa;
- char *kbuf;
-
- if (!(cosa->firmware_status & COSA_FW_START)) {
- pr_notice("%s: start the firmware first (status %d)\n",
- cosa->name, cosa->firmware_status);
- return -EPERM;
- }
- if (mutex_lock_interruptible(&chan->rlock))
- return -ERESTARTSYS;
-
- chan->rxdata = kmalloc(COSA_MTU, GFP_DMA | GFP_KERNEL);
- if (!chan->rxdata) {
- mutex_unlock(&chan->rlock);
- return -ENOMEM;
- }
-
- chan->rx_status = 0;
- cosa_enable_rx(chan);
- spin_lock_irqsave(&cosa->lock, flags);
- add_wait_queue(&chan->rxwaitq, &wait);
- while (!chan->rx_status) {
- set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irqrestore(&cosa->lock, flags);
- schedule();
- spin_lock_irqsave(&cosa->lock, flags);
- if (signal_pending(current) && chan->rx_status == 0) {
- chan->rx_status = 1;
- remove_wait_queue(&chan->rxwaitq, &wait);
- __set_current_state(TASK_RUNNING);
- spin_unlock_irqrestore(&cosa->lock, flags);
- mutex_unlock(&chan->rlock);
- return -ERESTARTSYS;
- }
- }
- remove_wait_queue(&chan->rxwaitq, &wait);
- __set_current_state(TASK_RUNNING);
- kbuf = chan->rxdata;
- count = chan->rxsize;
- spin_unlock_irqrestore(&cosa->lock, flags);
- mutex_unlock(&chan->rlock);
-
- if (copy_to_user(buf, kbuf, count)) {
- kfree(kbuf);
- return -EFAULT;
- }
- kfree(kbuf);
- return count;
-}
-
-static char *chrdev_setup_rx(struct channel_data *chan, int size)
-{
- /* Expect size <= COSA_MTU */
- chan->rxsize = size;
- return chan->rxdata;
-}
-
-static int chrdev_rx_done(struct channel_data *chan)
-{
- if (chan->rx_status) { /* Reader has died */
- kfree(chan->rxdata);
- up(&chan->wsem);
- }
- chan->rx_status = 1;
- wake_up_interruptible(&chan->rxwaitq);
- return 1;
-}
-
-static ssize_t cosa_write(struct file *file,
- const char __user *buf, size_t count, loff_t *ppos)
-{
- DECLARE_WAITQUEUE(wait, current);
- struct channel_data *chan = file->private_data;
- struct cosa_data *cosa = chan->cosa;
- unsigned long flags;
- char *kbuf;
-
- if (!(cosa->firmware_status & COSA_FW_START)) {
- pr_notice("%s: start the firmware first (status %d)\n",
- cosa->name, cosa->firmware_status);
- return -EPERM;
- }
- if (down_interruptible(&chan->wsem))
- return -ERESTARTSYS;
-
- if (count > COSA_MTU)
- count = COSA_MTU;
-
- /* Allocate the buffer */
- kbuf = kmalloc(count, GFP_KERNEL | GFP_DMA);
- if (!kbuf) {
- up(&chan->wsem);
- return -ENOMEM;
- }
- if (copy_from_user(kbuf, buf, count)) {
- up(&chan->wsem);
- kfree(kbuf);
- return -EFAULT;
- }
- chan->tx_status = 0;
- cosa_start_tx(chan, kbuf, count);
-
- spin_lock_irqsave(&cosa->lock, flags);
- add_wait_queue(&chan->txwaitq, &wait);
- while (!chan->tx_status) {
- set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irqrestore(&cosa->lock, flags);
- schedule();
- spin_lock_irqsave(&cosa->lock, flags);
- if (signal_pending(current) && chan->tx_status == 0) {
- chan->tx_status = 1;
- remove_wait_queue(&chan->txwaitq, &wait);
- __set_current_state(TASK_RUNNING);
- chan->tx_status = 1;
- spin_unlock_irqrestore(&cosa->lock, flags);
- up(&chan->wsem);
- kfree(kbuf);
- return -ERESTARTSYS;
- }
- }
- remove_wait_queue(&chan->txwaitq, &wait);
- __set_current_state(TASK_RUNNING);
- up(&chan->wsem);
- spin_unlock_irqrestore(&cosa->lock, flags);
- kfree(kbuf);
- return count;
-}
-
-static int chrdev_tx_done(struct channel_data *chan, int size)
-{
- if (chan->tx_status) { /* Writer was interrupted */
- kfree(chan->txbuf);
- up(&chan->wsem);
- }
- chan->tx_status = 1;
- wake_up_interruptible(&chan->txwaitq);
- return 1;
-}
-
-static __poll_t cosa_poll(struct file *file, poll_table *poll)
-{
- pr_info("cosa_poll is here\n");
- return 0;
-}
-
-static int cosa_open(struct inode *inode, struct file *file)
-{
- struct cosa_data *cosa;
- struct channel_data *chan;
- unsigned long flags;
- int n;
- int ret = 0;
-
- mutex_lock(&cosa_chardev_mutex);
- n = iminor(file_inode(file)) >> CARD_MINOR_BITS;
- if (n >= nr_cards) {
- ret = -ENODEV;
- goto out;
- }
- cosa = cosa_cards + n;
-
- n = iminor(file_inode(file)) & ((1 << CARD_MINOR_BITS) - 1);
- if (n >= cosa->nchannels) {
- ret = -ENODEV;
- goto out;
- }
- chan = cosa->chan + n;
-
- file->private_data = chan;
-
- spin_lock_irqsave(&cosa->lock, flags);
-
- if (chan->usage < 0) { /* in netdev mode */
- spin_unlock_irqrestore(&cosa->lock, flags);
- ret = -EBUSY;
- goto out;
- }
- cosa->usage++;
- chan->usage++;
-
- chan->tx_done = chrdev_tx_done;
- chan->setup_rx = chrdev_setup_rx;
- chan->rx_done = chrdev_rx_done;
- spin_unlock_irqrestore(&cosa->lock, flags);
-out:
- mutex_unlock(&cosa_chardev_mutex);
- return ret;
-}
-
-static int cosa_release(struct inode *inode, struct file *file)
-{
- struct channel_data *channel = file->private_data;
- struct cosa_data *cosa;
- unsigned long flags;
-
- cosa = channel->cosa;
- spin_lock_irqsave(&cosa->lock, flags);
- cosa->usage--;
- channel->usage--;
- spin_unlock_irqrestore(&cosa->lock, flags);
- return 0;
-}
-
-#ifdef COSA_FASYNC_WORKING
-static struct fasync_struct *fasync[256] = { NULL, };
-
-/* To be done ... */
-static int cosa_fasync(struct inode *inode, struct file *file, int on)
-{
- int port = iminor(inode);
-
- return fasync_helper(inode, file, on, &fasync[port]);
-}
-#endif
-
-/* ---------- Ioctls ---------- */
-
-/* Ioctl subroutines can safely be made inline, because they are called
- * only from cosa_ioctl().
- */
-static inline int cosa_reset(struct cosa_data *cosa)
-{
- char idstring[COSA_MAX_ID_STRING];
-
- if (cosa->usage > 1)
- pr_info("cosa%d: WARNING: reset requested with cosa->usage > 1 (%d). Odd things may happen.\n",
- cosa->num, cosa->usage);
- cosa->firmware_status &= ~(COSA_FW_RESET | COSA_FW_START);
- if (cosa_reset_and_read_id(cosa, idstring) < 0) {
- pr_notice("cosa%d: reset failed\n", cosa->num);
- return -EIO;
- }
- pr_info("cosa%d: resetting device: %s\n", cosa->num, idstring);
- cosa->firmware_status |= COSA_FW_RESET;
- return 0;
-}
-
-/* High-level function to download data into COSA memory. Calls download() */
-static inline int cosa_download(struct cosa_data *cosa, void __user *arg)
-{
- struct cosa_download d;
- int i;
-
- if (cosa->usage > 1)
- pr_info("%s: WARNING: download of microcode requested with cosa->usage > 1 (%d). Odd things may happen.\n",
- cosa->name, cosa->usage);
- if (!(cosa->firmware_status & COSA_FW_RESET)) {
- pr_notice("%s: reset the card first (status %d)\n",
- cosa->name, cosa->firmware_status);
- return -EPERM;
- }
-
- if (copy_from_user(&d, arg, sizeof(d)))
- return -EFAULT;
-
- if (d.addr < 0 || d.addr > COSA_MAX_FIRMWARE_SIZE)
- return -EINVAL;
- if (d.len < 0 || d.len > COSA_MAX_FIRMWARE_SIZE)
- return -EINVAL;
-
- /* If something fails, force the user to reset the card */
- cosa->firmware_status &= ~(COSA_FW_RESET | COSA_FW_DOWNLOAD);
-
- i = download(cosa, d.code, d.len, d.addr);
- if (i < 0) {
- pr_notice("cosa%d: microcode download failed: %d\n",
- cosa->num, i);
- return -EIO;
- }
- pr_info("cosa%d: downloading microcode - 0x%04x bytes at 0x%04x\n",
- cosa->num, d.len, d.addr);
- cosa->firmware_status |= COSA_FW_RESET | COSA_FW_DOWNLOAD;
- return 0;
-}
-
-/* High-level function to read COSA memory. Calls readmem() */
-static inline int cosa_readmem(struct cosa_data *cosa, void __user *arg)
-{
- struct cosa_download d;
- int i;
-
- if (cosa->usage > 1)
- pr_info("cosa%d: WARNING: readmem requested with cosa->usage > 1 (%d). Odd things may happen.\n",
- cosa->num, cosa->usage);
- if (!(cosa->firmware_status & COSA_FW_RESET)) {
- pr_notice("%s: reset the card first (status %d)\n",
- cosa->name, cosa->firmware_status);
- return -EPERM;
- }
-
- if (copy_from_user(&d, arg, sizeof(d)))
- return -EFAULT;
-
- /* If something fails, force the user to reset the card */
- cosa->firmware_status &= ~COSA_FW_RESET;
-
- i = readmem(cosa, d.code, d.len, d.addr);
- if (i < 0) {
- pr_notice("cosa%d: reading memory failed: %d\n", cosa->num, i);
- return -EIO;
- }
- pr_info("cosa%d: reading card memory - 0x%04x bytes at 0x%04x\n",
- cosa->num, d.len, d.addr);
- cosa->firmware_status |= COSA_FW_RESET;
- return 0;
-}
-
-/* High-level function to start microcode. Calls startmicrocode(). */
-static inline int cosa_start(struct cosa_data *cosa, int address)
-{
- int i;
-
- if (cosa->usage > 1)
- pr_info("cosa%d: WARNING: start microcode requested with cosa->usage > 1 (%d). Odd things may happen.\n",
- cosa->num, cosa->usage);
-
- if ((cosa->firmware_status & (COSA_FW_RESET | COSA_FW_DOWNLOAD))
- != (COSA_FW_RESET | COSA_FW_DOWNLOAD)) {
- pr_notice("%s: download the microcode and/or reset the card first (status %d)\n",
- cosa->name, cosa->firmware_status);
- return -EPERM;
- }
- cosa->firmware_status &= ~COSA_FW_RESET;
- i = startmicrocode(cosa, address);
- if (i < 0) {
- pr_notice("cosa%d: start microcode at 0x%04x failed: %d\n",
- cosa->num, address, i);
- return -EIO;
- }
- pr_info("cosa%d: starting microcode at 0x%04x\n", cosa->num, address);
- cosa->startaddr = address;
- cosa->firmware_status |= COSA_FW_START;
- return 0;
-}
-
-/* Buffer of size at least COSA_MAX_ID_STRING is expected */
-static inline int cosa_getidstr(struct cosa_data *cosa, char __user *string)
-{
- int l = strlen(cosa->id_string) + 1;
-
- if (copy_to_user(string, cosa->id_string, l))
- return -EFAULT;
- return l;
-}
-
-/* Buffer of size at least COSA_MAX_ID_STRING is expected */
-static inline int cosa_gettype(struct cosa_data *cosa, char __user *string)
-{
- int l = strlen(cosa->type) + 1;
-
- if (copy_to_user(string, cosa->type, l))
- return -EFAULT;
- return l;
-}
-
-static int cosa_ioctl_common(struct cosa_data *cosa,
- struct channel_data *channel, unsigned int cmd,
- unsigned long arg)
-{
- void __user *argp = (void __user *)arg;
-
- switch (cmd) {
- case COSAIORSET: /* Reset the device */
- if (!capable(CAP_NET_ADMIN))
- return -EACCES;
- return cosa_reset(cosa);
- case COSAIOSTRT: /* Start the firmware */
- if (!capable(CAP_SYS_RAWIO))
- return -EACCES;
- return cosa_start(cosa, arg);
- case COSAIODOWNLD: /* Download the firmware */
- if (!capable(CAP_SYS_RAWIO))
- return -EACCES;
-
- return cosa_download(cosa, argp);
- case COSAIORMEM:
- if (!capable(CAP_SYS_RAWIO))
- return -EACCES;
- return cosa_readmem(cosa, argp);
- case COSAIORTYPE:
- return cosa_gettype(cosa, argp);
- case COSAIORIDSTR:
- return cosa_getidstr(cosa, argp);
- case COSAIONRCARDS:
- return nr_cards;
- case COSAIONRCHANS:
- return cosa->nchannels;
- case COSAIOBMSET:
- if (!capable(CAP_SYS_RAWIO))
- return -EACCES;
- if (is_8bit(cosa))
- return -EINVAL;
- if (arg != COSA_BM_OFF && arg != COSA_BM_ON)
- return -EINVAL;
- cosa->busmaster = arg;
- return 0;
- case COSAIOBMGET:
- return cosa->busmaster;
- }
- return -ENOIOCTLCMD;
-}
-
-static long cosa_chardev_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- struct channel_data *channel = file->private_data;
- struct cosa_data *cosa;
- long ret;
-
- mutex_lock(&cosa_chardev_mutex);
- cosa = channel->cosa;
- ret = cosa_ioctl_common(cosa, channel, cmd, arg);
- mutex_unlock(&cosa_chardev_mutex);
- return ret;
-}
-
-/*---------- HW layer interface ---------- */
-
-/* The higher layer can bind itself to the HW layer by setting the callbacks
- * in the channel_data structure and by using these routines.
- */
-static void cosa_enable_rx(struct channel_data *chan)
-{
- struct cosa_data *cosa = chan->cosa;
-
- if (!test_and_set_bit(chan->num, &cosa->rxbitmap))
- put_driver_status(cosa);
-}
-
-static void cosa_disable_rx(struct channel_data *chan)
-{
- struct cosa_data *cosa = chan->cosa;
-
- if (test_and_clear_bit(chan->num, &cosa->rxbitmap))
- put_driver_status(cosa);
-}
-
-/* FIXME: This routine probably should check for cosa_start_tx() called when
- * the previous transmit is still unfinished. In this case the non-zero
- * return value should indicate to the caller that the queuing(sp?) up
- * the transmit has failed.
- */
-static int cosa_start_tx(struct channel_data *chan, char *buf, int len)
-{
- struct cosa_data *cosa = chan->cosa;
- unsigned long flags;
-#ifdef DEBUG_DATA
- int i;
-
- pr_info("cosa%dc%d: starting tx(0x%x)",
- chan->cosa->num, chan->num, len);
- for (i = 0; i < len; i++)
- pr_cont(" %02x", buf[i]&0xff);
- pr_cont("\n");
-#endif
- spin_lock_irqsave(&cosa->lock, flags);
- chan->txbuf = buf;
- chan->txsize = len;
- if (len > COSA_MTU)
- chan->txsize = COSA_MTU;
- spin_unlock_irqrestore(&cosa->lock, flags);
-
- /* Tell the firmware we are ready */
- set_bit(chan->num, &cosa->txbitmap);
- put_driver_status(cosa);
-
- return 0;
-}
-
-static void put_driver_status(struct cosa_data *cosa)
-{
- unsigned long flags;
- int status;
-
- spin_lock_irqsave(&cosa->lock, flags);
-
- status = (cosa->rxbitmap ? DRIVER_RX_READY : 0)
- | (cosa->txbitmap ? DRIVER_TX_READY : 0)
- | (cosa->txbitmap ? ~(cosa->txbitmap << DRIVER_TXMAP_SHIFT)
- & DRIVER_TXMAP_MASK : 0);
- if (!cosa->rxtx) {
- if (cosa->rxbitmap | cosa->txbitmap) {
- if (!cosa->enabled) {
- cosa_putstatus(cosa, SR_RX_INT_ENA);
-#ifdef DEBUG_IO
- debug_status_out(cosa, SR_RX_INT_ENA);
-#endif
- cosa->enabled = 1;
- }
- } else if (cosa->enabled) {
- cosa->enabled = 0;
- cosa_putstatus(cosa, 0);
-#ifdef DEBUG_IO
- debug_status_out(cosa, 0);
-#endif
- }
- cosa_putdata8(cosa, status);
-#ifdef DEBUG_IO
- debug_data_cmd(cosa, status);
-#endif
- }
- spin_unlock_irqrestore(&cosa->lock, flags);
-}
-
-static void put_driver_status_nolock(struct cosa_data *cosa)
-{
- int status;
-
- status = (cosa->rxbitmap ? DRIVER_RX_READY : 0)
- | (cosa->txbitmap ? DRIVER_TX_READY : 0)
- | (cosa->txbitmap ? ~(cosa->txbitmap << DRIVER_TXMAP_SHIFT)
- & DRIVER_TXMAP_MASK : 0);
-
- if (cosa->rxbitmap | cosa->txbitmap) {
- cosa_putstatus(cosa, SR_RX_INT_ENA);
-#ifdef DEBUG_IO
- debug_status_out(cosa, SR_RX_INT_ENA);
-#endif
- cosa->enabled = 1;
- } else {
- cosa_putstatus(cosa, 0);
-#ifdef DEBUG_IO
- debug_status_out(cosa, 0);
-#endif
- cosa->enabled = 0;
- }
- cosa_putdata8(cosa, status);
-#ifdef DEBUG_IO
- debug_data_cmd(cosa, status);
-#endif
-}
-
-/* The "kickme" function: When the DMA times out, this is called to
- * clean up the driver status.
- * FIXME: Preliminary support, the interface is probably wrong.
- */
-static void cosa_kick(struct cosa_data *cosa)
-{
- unsigned long flags, flags1;
- char *s = "(probably) IRQ";
-
- if (test_bit(RXBIT, &cosa->rxtx))
- s = "RX DMA";
- if (test_bit(TXBIT, &cosa->rxtx))
- s = "TX DMA";
-
- pr_info("%s: %s timeout - restarting\n", cosa->name, s);
- spin_lock_irqsave(&cosa->lock, flags);
- cosa->rxtx = 0;
-
- flags1 = claim_dma_lock();
- disable_dma(cosa->dma);
- clear_dma_ff(cosa->dma);
- release_dma_lock(flags1);
-
- /* FIXME: Anything else? */
- udelay(100);
- cosa_putstatus(cosa, 0);
- udelay(100);
- (void)cosa_getdata8(cosa);
- udelay(100);
- cosa_putdata8(cosa, 0);
- udelay(100);
- put_driver_status_nolock(cosa);
- spin_unlock_irqrestore(&cosa->lock, flags);
-}
-
-/* Check if the whole buffer is DMA-able. It means it is below the 16M of
- * physical memory and doesn't span the 64k boundary. For now it seems
- * SKB's never do this, but we'll check this anyway.
- */
-static int cosa_dma_able(struct channel_data *chan, char *buf, int len)
-{
- static int count;
- unsigned long b = (unsigned long)buf;
-
- if (b + len >= MAX_DMA_ADDRESS)
- return 0;
- if ((b ^ (b + len)) & 0x10000) {
- if (count++ < 5)
- pr_info("%s: packet spanning a 64k boundary\n",
- chan->name);
- return 0;
- }
- return 1;
-}
-
-/* ---------- The SRP/COSA ROM monitor functions ---------- */
-
-/* Downloading SRP microcode: say "w" to SRP monitor, it answers by "w=",
- * drivers need to say 4-digit hex number meaning start address of the microcode
- * separated by a single space. Monitor replies by saying " =". Now driver
- * has to write 4-digit hex number meaning the last byte address ended
- * by a single space. Monitor has to reply with a space. Now the download
- * begins. After the download monitor replies with "\r\n." (CR LF dot).
- */
-static int download(struct cosa_data *cosa, const char __user *microcode, int length, int address)
-{
- int i;
-
- if (put_wait_data(cosa, 'w') == -1)
- return -1;
- if ((i=get_wait_data(cosa)) != 'w') { printk("dnld: 0x%04x\n",i); return -2;}
- if (get_wait_data(cosa) != '=')
- return -3;
-
- if (puthexnumber(cosa, address) < 0)
- return -4;
- if (put_wait_data(cosa, ' ') == -1)
- return -10;
- if (get_wait_data(cosa) != ' ')
- return -11;
- if (get_wait_data(cosa) != '=')
- return -12;
-
- if (puthexnumber(cosa, address + length - 1) < 0)
- return -13;
- if (put_wait_data(cosa, ' ') == -1)
- return -18;
- if (get_wait_data(cosa) != ' ')
- return -19;
-
- while (length--) {
- char c;
-#ifndef SRP_DOWNLOAD_AT_BOOT
- if (get_user(c, microcode))
- return -23; /* ??? */
-#else
- c = *microcode;
-#endif
- if (put_wait_data(cosa, c) == -1)
- return -20;
- microcode++;
- }
-
- if (get_wait_data(cosa) != '\r')
- return -21;
- if (get_wait_data(cosa) != '\n')
- return -22;
- if (get_wait_data(cosa) != '.')
- return -23;
-#if 0
- printk(KERN_DEBUG "cosa%d: download completed.\n", cosa->num);
-#endif
- return 0;
-}
-
-/* Starting microcode is done via the "g" command of the SRP monitor.
- * The chat should be the following: "g" "g=" "<addr><CR>"
- * "<CR><CR><LF><CR><LF>".
- */
-static int startmicrocode(struct cosa_data *cosa, int address)
-{
- if (put_wait_data(cosa, 'g') == -1)
- return -1;
- if (get_wait_data(cosa) != 'g')
- return -2;
- if (get_wait_data(cosa) != '=')
- return -3;
-
- if (puthexnumber(cosa, address) < 0)
- return -4;
- if (put_wait_data(cosa, '\r') == -1)
- return -5;
-
- if (get_wait_data(cosa) != '\r')
- return -6;
- if (get_wait_data(cosa) != '\r')
- return -7;
- if (get_wait_data(cosa) != '\n')
- return -8;
- if (get_wait_data(cosa) != '\r')
- return -9;
- if (get_wait_data(cosa) != '\n')
- return -10;
-#if 0
- printk(KERN_DEBUG "cosa%d: microcode started\n", cosa->num);
-#endif
- return 0;
-}
-
-/* Reading memory is done via the "r" command of the SRP monitor.
- * The chat is the following "r" "r=" "<addr> " " =" "<last_byte> " " "
- * Then driver can read the data and the conversation is finished
- * by SRP monitor sending "<CR><LF>." (dot at the end).
- *
- * This routine is not needed during the normal operation and serves
- * for debugging purposes only.
- */
-static int readmem(struct cosa_data *cosa, char __user *microcode, int length, int address)
-{
- if (put_wait_data(cosa, 'r') == -1)
- return -1;
- if ((get_wait_data(cosa)) != 'r')
- return -2;
- if ((get_wait_data(cosa)) != '=')
- return -3;
-
- if (puthexnumber(cosa, address) < 0)
- return -4;
- if (put_wait_data(cosa, ' ') == -1)
- return -5;
- if (get_wait_data(cosa) != ' ')
- return -6;
- if (get_wait_data(cosa) != '=')
- return -7;
-
- if (puthexnumber(cosa, address + length - 1) < 0)
- return -8;
- if (put_wait_data(cosa, ' ') == -1)
- return -9;
- if (get_wait_data(cosa) != ' ')
- return -10;
-
- while (length--) {
- char c;
- int i;
-
- i = get_wait_data(cosa);
- if (i == -1) {
- pr_info("0x%04x bytes remaining\n", length);
- return -11;
- }
- c = i;
-#if 1
- if (put_user(c, microcode))
- return -23; /* ??? */
-#else
- *microcode = c;
-#endif
- microcode++;
- }
-
- if (get_wait_data(cosa) != '\r')
- return -21;
- if (get_wait_data(cosa) != '\n')
- return -22;
- if (get_wait_data(cosa) != '.')
- return -23;
-#if 0
- printk(KERN_DEBUG "cosa%d: readmem completed.\n", cosa->num);
-#endif
- return 0;
-}
-
-/* This function resets the device and reads the initial prompt
- * of the device's ROM monitor.
- */
-static int cosa_reset_and_read_id(struct cosa_data *cosa, char *idstring)
-{
- int i = 0, id = 0, prev = 0, curr = 0;
-
- /* Reset the card ... */
- cosa_putstatus(cosa, 0);
- cosa_getdata8(cosa);
- cosa_putstatus(cosa, SR_RST);
- msleep(500);
- /* Disable all IRQs from the card */
- cosa_putstatus(cosa, 0);
-
- /* Try to read the ID string. The card then prints out the
- * identification string ended by the "\n\x2e".
- *
- * The following loop is indexed through i (instead of id)
- * to avoid looping forever when for any reason
- * the port returns '\r', '\n' or '\x2e' permanently.
- */
- for (i = 0; i < COSA_MAX_ID_STRING - 1; i++, prev = curr) {
- curr = get_wait_data(cosa);
- if (curr == -1)
- return -1;
-
- curr &= 0xff;
- if (curr != '\r' && curr != '\n' && curr != 0x2e)
- idstring[id++] = curr;
- if (curr == 0x2e && prev == '\n')
- break;
- }
- /* Perhaps we should fail when i==COSA_MAX_ID_STRING-1 ? */
- idstring[id] = '\0';
- return id;
-}
-
-/* ---------- Auxiliary routines for COSA/SRP monitor ---------- */
-
-/* This routine gets the data byte from the card waiting for the SR_RX_RDY
- * bit to be set in a loop. It should be used in the exceptional cases
- * only (for example when resetting the card or downloading the firmware.
- */
-static int get_wait_data(struct cosa_data *cosa)
-{
- int retries = 1000;
-
- while (--retries) {
- /* read data and return them */
- if (cosa_getstatus(cosa) & SR_RX_RDY) {
- short r;
-
- r = cosa_getdata8(cosa);
-#if 0
- pr_info("get_wait_data returning after %d retries\n",
- 999 - retries);
-#endif
- return r;
- }
- /* sleep if not ready to read */
- schedule_timeout_interruptible(1);
- }
- pr_info("timeout in get_wait_data (status 0x%x)\n",
- cosa_getstatus(cosa));
- return -1;
-}
-
-/* This routine puts the data byte to the card waiting for the SR_TX_RDY
- * bit to be set in a loop. It should be used in the exceptional cases
- * only (for example when resetting the card or downloading the firmware).
- */
-static int put_wait_data(struct cosa_data *cosa, int data)
-{
- int retries = 1000;
-
- while (--retries) {
- /* read data and return them */
- if (cosa_getstatus(cosa) & SR_TX_RDY) {
- cosa_putdata8(cosa, data);
-#if 0
- pr_info("Putdata: %d retries\n", 999 - retries);
-#endif
- return 0;
- }
-#if 0
- /* sleep if not ready to read */
- schedule_timeout_interruptible(1);
-#endif
- }
- pr_info("cosa%d: timeout in put_wait_data (status 0x%x)\n",
- cosa->num, cosa_getstatus(cosa));
- return -1;
-}
-
-/* The following routine puts the hexadecimal number into the SRP monitor
- * and verifies the proper echo of the sent bytes. Returns 0 on success,
- * negative number on failure (-1,-3,-5,-7) means that put_wait_data() failed,
- * (-2,-4,-6,-8) means that reading echo failed.
- */
-static int puthexnumber(struct cosa_data *cosa, int number)
-{
- char temp[5];
- int i;
-
- /* Well, I should probably replace this by something faster. */
- sprintf(temp, "%04X", number);
- for (i = 0; i < 4; i++) {
- if (put_wait_data(cosa, temp[i]) == -1) {
- pr_notice("cosa%d: puthexnumber failed to write byte %d\n",
- cosa->num, i);
- return -1 - 2 * i;
- }
- if (get_wait_data(cosa) != temp[i]) {
- pr_notice("cosa%d: puthexhumber failed to read echo of byte %d\n",
- cosa->num, i);
- return -2 - 2 * i;
- }
- }
- return 0;
-}
-
-/* ---------- Interrupt routines ---------- */
-
-/* There are three types of interrupt:
- * At the beginning of transmit - this handled is in tx_interrupt(),
- * at the beginning of receive - it is in rx_interrupt() and
- * at the end of transmit/receive - it is the eot_interrupt() function.
- * These functions are multiplexed by cosa_interrupt() according to the
- * COSA status byte. I have moved the rx/tx/eot interrupt handling into
- * separate functions to make it more readable. These functions are inline,
- * so there should be no overhead of function call.
- *
- * In the COSA bus-master mode, we need to tell the card the address of a
- * buffer. Unfortunately, COSA may be too slow for us, so we must busy-wait.
- * It's time to use the bottom half :-(
- */
-
-/* Transmit interrupt routine - called when COSA is willing to obtain
- * data from the OS. The most tricky part of the routine is selection
- * of channel we (OS) want to send packet for. For SRP we should probably
- * use the round-robin approach. The newer COSA firmwares have a simple
- * flow-control - in the status word has bits 2 and 3 set to 1 means that the
- * channel 0 or 1 doesn't want to receive data.
- *
- * It seems there is a bug in COSA firmware (need to trace it further):
- * When the driver status says that the kernel has no more data for transmit
- * (e.g. at the end of TX DMA) and then the kernel changes its mind
- * (e.g. new packet is queued to hard_start_xmit()), the card issues
- * the TX interrupt but does not mark the channel as ready-to-transmit.
- * The fix seems to be to push the packet to COSA despite its request.
- * We first try to obey the card's opinion, and then fall back to forced TX.
- */
-static inline void tx_interrupt(struct cosa_data *cosa, int status)
-{
- unsigned long flags, flags1;
-#ifdef DEBUG_IRQS
- pr_info("cosa%d: SR_DOWN_REQUEST status=0x%04x\n", cosa->num, status);
-#endif
- spin_lock_irqsave(&cosa->lock, flags);
- set_bit(TXBIT, &cosa->rxtx);
- if (!test_bit(IRQBIT, &cosa->rxtx)) {
- /* flow control, see the comment above */
- int i = 0;
-
- if (!cosa->txbitmap) {
- pr_warn("%s: No channel wants data in TX IRQ. Expect DMA timeout.\n",
- cosa->name);
- put_driver_status_nolock(cosa);
- clear_bit(TXBIT, &cosa->rxtx);
- spin_unlock_irqrestore(&cosa->lock, flags);
- return;
- }
- while (1) {
- cosa->txchan++;
- i++;
- if (cosa->txchan >= cosa->nchannels)
- cosa->txchan = 0;
- if (!(cosa->txbitmap & (1 << cosa->txchan)))
- continue;
- if (~status &
- (1 << (cosa->txchan + DRIVER_TXMAP_SHIFT)))
- break;
- /* in second pass, accept first ready-to-TX channel */
- if (i > cosa->nchannels) {
- /* Can be safely ignored */
-#ifdef DEBUG_IRQS
- printk(KERN_DEBUG "%s: Forcing TX "
- "to not-ready channel %d\n",
- cosa->name, cosa->txchan);
-#endif
- break;
- }
- }
-
- cosa->txsize = cosa->chan[cosa->txchan].txsize;
- if (cosa_dma_able(cosa->chan + cosa->txchan,
- cosa->chan[cosa->txchan].txbuf,
- cosa->txsize)) {
- cosa->txbuf = cosa->chan[cosa->txchan].txbuf;
- } else {
- memcpy(cosa->bouncebuf, cosa->chan[cosa->txchan].txbuf,
- cosa->txsize);
- cosa->txbuf = cosa->bouncebuf;
- }
- }
-
- if (is_8bit(cosa)) {
- if (!test_bit(IRQBIT, &cosa->rxtx)) {
- cosa_putstatus(cosa, SR_TX_INT_ENA);
- cosa_putdata8(cosa, ((cosa->txchan << 5) & 0xe0) |
- ((cosa->txsize >> 8) & 0x1f));
-#ifdef DEBUG_IO
- debug_status_out(cosa, SR_TX_INT_ENA);
- debug_data_out(cosa, ((cosa->txchan << 5) & 0xe0) |
- ((cosa->txsize >> 8) & 0x1f));
- debug_data_in(cosa, cosa_getdata8(cosa));
-#else
- cosa_getdata8(cosa);
-#endif
- set_bit(IRQBIT, &cosa->rxtx);
- spin_unlock_irqrestore(&cosa->lock, flags);
- return;
- } else {
- clear_bit(IRQBIT, &cosa->rxtx);
- cosa_putstatus(cosa, 0);
- cosa_putdata8(cosa, cosa->txsize & 0xff);
-#ifdef DEBUG_IO
- debug_status_out(cosa, 0);
- debug_data_out(cosa, cosa->txsize & 0xff);
-#endif
- }
- } else {
- cosa_putstatus(cosa, SR_TX_INT_ENA);
- cosa_putdata16(cosa, ((cosa->txchan << 13) & 0xe000)
- | (cosa->txsize & 0x1fff));
-#ifdef DEBUG_IO
- debug_status_out(cosa, SR_TX_INT_ENA);
- debug_data_out(cosa, ((cosa->txchan << 13) & 0xe000) |
- (cosa->txsize & 0x1fff));
- debug_data_in(cosa, cosa_getdata8(cosa));
- debug_status_out(cosa, 0);
-#else
- cosa_getdata8(cosa);
-#endif
- cosa_putstatus(cosa, 0);
- }
-
- if (cosa->busmaster) {
- unsigned long addr = virt_to_bus(cosa->txbuf);
- int count = 0;
-
- pr_info("busmaster IRQ\n");
- while (!(cosa_getstatus(cosa) & SR_TX_RDY)) {
- count++;
- udelay(10);
- if (count > 1000)
- break;
- }
- pr_info("status %x\n", cosa_getstatus(cosa));
- pr_info("ready after %d loops\n", count);
- cosa_putdata16(cosa, (addr >> 16) & 0xffff);
-
- count = 0;
- while (!(cosa_getstatus(cosa) & SR_TX_RDY)) {
- count++;
- if (count > 1000)
- break;
- udelay(10);
- }
- pr_info("ready after %d loops\n", count);
- cosa_putdata16(cosa, addr & 0xffff);
- flags1 = claim_dma_lock();
- set_dma_mode(cosa->dma, DMA_MODE_CASCADE);
- enable_dma(cosa->dma);
- release_dma_lock(flags1);
- } else {
- /* start the DMA */
- flags1 = claim_dma_lock();
- disable_dma(cosa->dma);
- clear_dma_ff(cosa->dma);
- set_dma_mode(cosa->dma, DMA_MODE_WRITE);
- set_dma_addr(cosa->dma, virt_to_bus(cosa->txbuf));
- set_dma_count(cosa->dma, cosa->txsize);
- enable_dma(cosa->dma);
- release_dma_lock(flags1);
- }
- cosa_putstatus(cosa, SR_TX_DMA_ENA | SR_USR_INT_ENA);
-#ifdef DEBUG_IO
- debug_status_out(cosa, SR_TX_DMA_ENA | SR_USR_INT_ENA);
-#endif
- spin_unlock_irqrestore(&cosa->lock, flags);
-}
-
-static inline void rx_interrupt(struct cosa_data *cosa, int status)
-{
- unsigned long flags;
-#ifdef DEBUG_IRQS
- pr_info("cosa%d: SR_UP_REQUEST\n", cosa->num);
-#endif
-
- spin_lock_irqsave(&cosa->lock, flags);
- set_bit(RXBIT, &cosa->rxtx);
-
- if (is_8bit(cosa)) {
- if (!test_bit(IRQBIT, &cosa->rxtx)) {
- set_bit(IRQBIT, &cosa->rxtx);
- put_driver_status_nolock(cosa);
- cosa->rxsize = cosa_getdata8(cosa) << 8;
-#ifdef DEBUG_IO
- debug_data_in(cosa, cosa->rxsize >> 8);
-#endif
- spin_unlock_irqrestore(&cosa->lock, flags);
- return;
- } else {
- clear_bit(IRQBIT, &cosa->rxtx);
- cosa->rxsize |= cosa_getdata8(cosa) & 0xff;
-#ifdef DEBUG_IO
- debug_data_in(cosa, cosa->rxsize & 0xff);
-#endif
-#if 0
- pr_info("cosa%d: receive rxsize = (0x%04x)\n",
- cosa->num, cosa->rxsize);
-#endif
- }
- } else {
- cosa->rxsize = cosa_getdata16(cosa);
-#ifdef DEBUG_IO
- debug_data_in(cosa, cosa->rxsize);
-#endif
-#if 0
- pr_info("cosa%d: receive rxsize = (0x%04x)\n",
- cosa->num, cosa->rxsize);
-#endif
- }
- if (((cosa->rxsize & 0xe000) >> 13) >= cosa->nchannels) {
- pr_warn("%s: rx for unknown channel (0x%04x)\n",
- cosa->name, cosa->rxsize);
- spin_unlock_irqrestore(&cosa->lock, flags);
- goto reject;
- }
- cosa->rxchan = cosa->chan + ((cosa->rxsize & 0xe000) >> 13);
- cosa->rxsize &= 0x1fff;
- spin_unlock_irqrestore(&cosa->lock, flags);
-
- cosa->rxbuf = NULL;
- if (cosa->rxchan->setup_rx)
- cosa->rxbuf = cosa->rxchan->setup_rx(cosa->rxchan, cosa->rxsize);
-
- if (!cosa->rxbuf) {
-reject: /* Reject the packet */
- pr_info("cosa%d: rejecting packet on channel %d\n",
- cosa->num, cosa->rxchan->num);
- cosa->rxbuf = cosa->bouncebuf;
- }
-
- /* start the DMA */
- flags = claim_dma_lock();
- disable_dma(cosa->dma);
- clear_dma_ff(cosa->dma);
- set_dma_mode(cosa->dma, DMA_MODE_READ);
- if (cosa_dma_able(cosa->rxchan, cosa->rxbuf, cosa->rxsize & 0x1fff))
- set_dma_addr(cosa->dma, virt_to_bus(cosa->rxbuf));
- else
- set_dma_addr(cosa->dma, virt_to_bus(cosa->bouncebuf));
-
- set_dma_count(cosa->dma, (cosa->rxsize & 0x1fff));
- enable_dma(cosa->dma);
- release_dma_lock(flags);
- spin_lock_irqsave(&cosa->lock, flags);
- cosa_putstatus(cosa, SR_RX_DMA_ENA | SR_USR_INT_ENA);
- if (!is_8bit(cosa) && (status & SR_TX_RDY))
- cosa_putdata8(cosa, DRIVER_RX_READY);
-#ifdef DEBUG_IO
- debug_status_out(cosa, SR_RX_DMA_ENA | SR_USR_INT_ENA);
- if (!is_8bit(cosa) && (status & SR_TX_RDY))
- debug_data_cmd(cosa, DRIVER_RX_READY);
-#endif
- spin_unlock_irqrestore(&cosa->lock, flags);
-}
-
-static inline void eot_interrupt(struct cosa_data *cosa, int status)
-{
- unsigned long flags, flags1;
-
- spin_lock_irqsave(&cosa->lock, flags);
- flags1 = claim_dma_lock();
- disable_dma(cosa->dma);
- clear_dma_ff(cosa->dma);
- release_dma_lock(flags1);
- if (test_bit(TXBIT, &cosa->rxtx)) {
- struct channel_data *chan = cosa->chan + cosa->txchan;
-
- if (chan->tx_done)
- if (chan->tx_done(chan, cosa->txsize))
- clear_bit(chan->num, &cosa->txbitmap);
- } else if (test_bit(RXBIT, &cosa->rxtx)) {
-#ifdef DEBUG_DATA
- {
- int i;
-
- pr_info("cosa%dc%d: done rx(0x%x)",
- cosa->num, cosa->rxchan->num, cosa->rxsize);
- for (i = 0; i < cosa->rxsize; i++)
- pr_cont(" %02x", cosa->rxbuf[i]&0xff);
- pr_cont("\n");
- }
-#endif
- /* Packet for unknown channel? */
- if (cosa->rxbuf == cosa->bouncebuf)
- goto out;
- if (!cosa_dma_able(cosa->rxchan, cosa->rxbuf, cosa->rxsize))
- memcpy(cosa->rxbuf, cosa->bouncebuf, cosa->rxsize);
- if (cosa->rxchan->rx_done)
- if (cosa->rxchan->rx_done(cosa->rxchan))
- clear_bit(cosa->rxchan->num, &cosa->rxbitmap);
- } else {
- pr_notice("cosa%d: unexpected EOT interrupt\n", cosa->num);
- }
- /* Clear the RXBIT, TXBIT and IRQBIT (the latest should be
- * cleared anyway). We should do it as soon as possible
- * so that we can tell the COSA we are done and to give it a time
- * for recovery.
- */
-out:
- cosa->rxtx = 0;
- put_driver_status_nolock(cosa);
- spin_unlock_irqrestore(&cosa->lock, flags);
-}
-
-static irqreturn_t cosa_interrupt(int irq, void *cosa_)
-{
- unsigned status;
- int count = 0;
- struct cosa_data *cosa = cosa_;
-again:
- status = cosa_getstatus(cosa);
-#ifdef DEBUG_IRQS
- pr_info("cosa%d: got IRQ, status 0x%02x\n", cosa->num, status & 0xff);
-#endif
-#ifdef DEBUG_IO
- debug_status_in(cosa, status);
-#endif
- switch (status & SR_CMD_FROM_SRP_MASK) {
- case SR_DOWN_REQUEST:
- tx_interrupt(cosa, status);
- break;
- case SR_UP_REQUEST:
- rx_interrupt(cosa, status);
- break;
- case SR_END_OF_TRANSFER:
- eot_interrupt(cosa, status);
- break;
- default:
- /* We may be too fast for SRP. Try to wait a bit more. */
- if (count++ < 100) {
- udelay(100);
- goto again;
- }
- pr_info("cosa%d: unknown status 0x%02x in IRQ after %d retries\n",
- cosa->num, status & 0xff, count);
- }
-#ifdef DEBUG_IRQS
- if (count)
- pr_info("%s: %d-times got unknown status in IRQ\n",
- cosa->name, count);
- else
- pr_info("%s: returning from IRQ\n", cosa->name);
-#endif
- return IRQ_HANDLED;
-}
-
-/* ---------- I/O debugging routines ---------- */
-/* These routines can be used to monitor COSA/SRP I/O and to printk()
- * the data being transferred on the data and status I/O port in a
- * readable way.
- */
-
-#ifdef DEBUG_IO
-static void debug_status_in(struct cosa_data *cosa, int status)
-{
- char *s;
-
- switch (status & SR_CMD_FROM_SRP_MASK) {
- case SR_UP_REQUEST:
- s = "RX_REQ";
- break;
- case SR_DOWN_REQUEST:
- s = "TX_REQ";
- break;
- case SR_END_OF_TRANSFER:
- s = "ET_REQ";
- break;
- default:
- s = "NO_REQ";
- break;
- }
- pr_info("%s: IO: status -> 0x%02x (%s%s%s%s)\n",
- cosa->name,
- status,
- status & SR_USR_RQ ? "USR_RQ|" : "",
- status & SR_TX_RDY ? "TX_RDY|" : "",
- status & SR_RX_RDY ? "RX_RDY|" : "",
- s);
-}
-
-static void debug_status_out(struct cosa_data *cosa, int status)
-{
- pr_info("%s: IO: status <- 0x%02x (%s%s%s%s%s%s)\n",
- cosa->name,
- status,
- status & SR_RX_DMA_ENA ? "RXDMA|" : "!rxdma|",
- status & SR_TX_DMA_ENA ? "TXDMA|" : "!txdma|",
- status & SR_RST ? "RESET|" : "",
- status & SR_USR_INT_ENA ? "USRINT|" : "!usrint|",
- status & SR_TX_INT_ENA ? "TXINT|" : "!txint|",
- status & SR_RX_INT_ENA ? "RXINT" : "!rxint");
-}
-
-static void debug_data_in(struct cosa_data *cosa, int data)
-{
- pr_info("%s: IO: data -> 0x%04x\n", cosa->name, data);
-}
-
-static void debug_data_out(struct cosa_data *cosa, int data)
-{
- pr_info("%s: IO: data <- 0x%04x\n", cosa->name, data);
-}
-
-static void debug_data_cmd(struct cosa_data *cosa, int data)
-{
- pr_info("%s: IO: data <- 0x%04x (%s|%s)\n",
- cosa->name, data,
- data & SR_RDY_RCV ? "RX_RDY" : "!rx_rdy",
- data & SR_RDY_SND ? "TX_RDY" : "!tx_rdy");
-}
-#endif
-
-/* EOF -- this file has not been truncated */
diff --git a/drivers/net/wan/cosa.h b/drivers/net/wan/cosa.h
deleted file mode 100644
index f57e0af9d56a..000000000000
--- a/drivers/net/wan/cosa.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/* $Id: cosa.h,v 1.6 1999/01/06 14:02:44 kas Exp $ */
-
-/*
- * Copyright (C) 1995-1997 Jan "Yenya" Kasprzak <kas@fi.muni.cz>
- */
-
-#ifndef COSA_H__
-#define COSA_H__
-
-#include <linux/ioctl.h>
-
-#ifdef __KERNEL__
-/* status register - output bits */
-#define SR_RX_DMA_ENA 0x04 /* receiver DMA enable bit */
-#define SR_TX_DMA_ENA 0x08 /* transmitter DMA enable bit */
-#define SR_RST 0x10 /* SRP reset */
-#define SR_USR_INT_ENA 0x20 /* user interrupt enable bit */
-#define SR_TX_INT_ENA 0x40 /* transmitter interrupt enable bit */
-#define SR_RX_INT_ENA 0x80 /* receiver interrupt enable bit */
-
-/* status register - input bits */
-#define SR_USR_RQ 0x20 /* user interrupt request pending */
-#define SR_TX_RDY 0x40 /* transmitter empty (ready) */
-#define SR_RX_RDY 0x80 /* receiver data ready */
-
-#define SR_UP_REQUEST 0x02 /* request from SRP to transfer data
- up to PC */
-#define SR_DOWN_REQUEST 0x01 /* SRP is able to transfer data down
- from PC to SRP */
-#define SR_END_OF_TRANSFER 0x03 /* SRP signalize end of
- transfer (up or down) */
-
-#define SR_CMD_FROM_SRP_MASK 0x03 /* mask to get SRP command */
-
-/* bits in driver status byte definitions : */
-#define SR_RDY_RCV 0x01 /* ready to receive packet */
-#define SR_RDY_SND 0x02 /* ready to send packet */
-#define SR_CMD_PND 0x04 /* command pending */ /* not currently used */
-
-/* ???? */
-#define SR_PKT_UP 0x01 /* transfer of packet up in progress */
-#define SR_PKT_DOWN 0x02 /* transfer of packet down in progress */
-
-#endif /* __KERNEL__ */
-
-#define SR_LOAD_ADDR 0x4400 /* SRP microcode load address */
-#define SR_START_ADDR 0x4400 /* SRP microcode start address */
-
-#define COSA_LOAD_ADDR 0x400 /* SRP microcode load address */
-#define COSA_MAX_FIRMWARE_SIZE 0x10000
-
-/* ioctls */
-struct cosa_download {
- int addr, len;
- char __user *code;
-};
-
-/* Reset the device */
-#define COSAIORSET _IO('C',0xf0)
-
-/* Start microcode at given address */
-#define COSAIOSTRT _IOW('C',0xf1, int)
-
-/* Read the block from the device memory */
-#define COSAIORMEM _IOWR('C',0xf2, struct cosa_download *)
- /* actually the struct cosa_download itself; this is to keep
- * the ioctl number same as in 2.4 in order to keep the user-space
- * utils compatible. */
-
-/* Write the block to the device memory (i.e. download the microcode) */
-#define COSAIODOWNLD _IOW('C',0xf2, struct cosa_download *)
- /* actually the struct cosa_download itself; this is to keep
- * the ioctl number same as in 2.4 in order to keep the user-space
- * utils compatible. */
-
-/* Read the device type (one of "srp", "cosa", and "cosa8" for now) */
-#define COSAIORTYPE _IOR('C',0xf3, char *)
-
-/* Read the device identification string */
-#define COSAIORIDSTR _IOR('C',0xf4, char *)
-/* Maximum length of the identification string. */
-#define COSA_MAX_ID_STRING 128
-
-/* Increment/decrement the module usage count :-) */
-/* #define COSAIOMINC _IO('C',0xf5) */
-/* #define COSAIOMDEC _IO('C',0xf6) */
-
-/* Get the total number of cards installed */
-#define COSAIONRCARDS _IO('C',0xf7)
-
-/* Get the number of channels on this card */
-#define COSAIONRCHANS _IO('C',0xf8)
-
-/* Set the driver for the bus-master operations */
-#define COSAIOBMSET _IOW('C', 0xf9, unsigned short)
-
-#define COSA_BM_OFF 0 /* Bus-mastering off - use ISA DMA (default) */
-#define COSA_BM_ON 1 /* Bus-mastering on - faster but untested */
-
-/* Gets the busmaster status */
-#define COSAIOBMGET _IO('C', 0xfa)
-
-#endif /* !COSA_H__ */
diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c
deleted file mode 100644
index e985e54ba75d..000000000000
--- a/drivers/net/wan/hostess_sv11.c
+++ /dev/null
@@ -1,336 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Comtrol SV11 card driver
- *
- * This is a slightly odd Z85230 synchronous driver. All you need to
- * know basically is
- *
- * Its a genuine Z85230
- *
- * It supports DMA using two DMA channels in SYNC mode. The driver doesn't
- * use these facilities
- *
- * The control port is at io+1, the data at io+3 and turning off the DMA
- * is done by writing 0 to io+4
- *
- * The hardware does the bus handling to avoid the need for delays between
- * touching control registers.
- *
- * Port B isn't wired (why - beats me)
- *
- * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/net.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/if_arp.h>
-#include <linux/delay.h>
-#include <linux/hdlc.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <net/arp.h>
-
-#include <asm/irq.h>
-#include <asm/io.h>
-#include <asm/dma.h>
-#include <asm/byteorder.h>
-#include "z85230.h"
-
-static int dma;
-
-/* Network driver support routines
- */
-
-static inline struct z8530_dev *dev_to_sv(struct net_device *dev)
-{
- return (struct z8530_dev *)dev_to_hdlc(dev)->priv;
-}
-
-/* Frame receive. Simple for our card as we do HDLC and there
- * is no funny garbage involved
- */
-
-static void hostess_input(struct z8530_channel *c, struct sk_buff *skb)
-{
- /* Drop the CRC - it's not a good idea to try and negotiate it ;) */
- skb_trim(skb, skb->len - 2);
- skb->protocol = hdlc_type_trans(skb, c->netdevice);
- skb_reset_mac_header(skb);
- skb->dev = c->netdevice;
- /* Send it to the PPP layer. We don't have time to process
- * it right now.
- */
- netif_rx(skb);
-}
-
-/* We've been placed in the UP state
- */
-
-static int hostess_open(struct net_device *d)
-{
- struct z8530_dev *sv11 = dev_to_sv(d);
- int err = -1;
-
- /* Link layer up
- */
- switch (dma) {
- case 0:
- err = z8530_sync_open(d, &sv11->chanA);
- break;
- case 1:
- err = z8530_sync_dma_open(d, &sv11->chanA);
- break;
- case 2:
- err = z8530_sync_txdma_open(d, &sv11->chanA);
- break;
- }
-
- if (err)
- return err;
-
- err = hdlc_open(d);
- if (err) {
- switch (dma) {
- case 0:
- z8530_sync_close(d, &sv11->chanA);
- break;
- case 1:
- z8530_sync_dma_close(d, &sv11->chanA);
- break;
- case 2:
- z8530_sync_txdma_close(d, &sv11->chanA);
- break;
- }
- return err;
- }
- sv11->chanA.rx_function = hostess_input;
-
- /*
- * Go go go
- */
-
- netif_start_queue(d);
- return 0;
-}
-
-static int hostess_close(struct net_device *d)
-{
- struct z8530_dev *sv11 = dev_to_sv(d);
- /* Discard new frames
- */
- sv11->chanA.rx_function = z8530_null_rx;
-
- hdlc_close(d);
- netif_stop_queue(d);
-
- switch (dma) {
- case 0:
- z8530_sync_close(d, &sv11->chanA);
- break;
- case 1:
- z8530_sync_dma_close(d, &sv11->chanA);
- break;
- case 2:
- z8530_sync_txdma_close(d, &sv11->chanA);
- break;
- }
- return 0;
-}
-
-/* Passed network frames, fire them downwind.
- */
-
-static netdev_tx_t hostess_queue_xmit(struct sk_buff *skb,
- struct net_device *d)
-{
- return z8530_queue_xmit(&dev_to_sv(d)->chanA, skb);
-}
-
-static int hostess_attach(struct net_device *dev, unsigned short encoding,
- unsigned short parity)
-{
- if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT)
- return 0;
- return -EINVAL;
-}
-
-/* Description block for a Comtrol Hostess SV11 card
- */
-
-static const struct net_device_ops hostess_ops = {
- .ndo_open = hostess_open,
- .ndo_stop = hostess_close,
- .ndo_start_xmit = hdlc_start_xmit,
- .ndo_siocwandev = hdlc_ioctl,
-};
-
-static struct z8530_dev *sv11_init(int iobase, int irq)
-{
- struct z8530_dev *sv;
- struct net_device *netdev;
- /* Get the needed I/O space
- */
-
- if (!request_region(iobase, 8, "Comtrol SV11")) {
- pr_warn("I/O 0x%X already in use\n", iobase);
- return NULL;
- }
-
- sv = kzalloc(sizeof(struct z8530_dev), GFP_KERNEL);
- if (!sv)
- goto err_kzalloc;
-
- /* Stuff in the I/O addressing
- */
-
- sv->active = 0;
-
- sv->chanA.ctrlio = iobase + 1;
- sv->chanA.dataio = iobase + 3;
- sv->chanB.ctrlio = -1;
- sv->chanB.dataio = -1;
- sv->chanA.irqs = &z8530_nop;
- sv->chanB.irqs = &z8530_nop;
-
- outb(0, iobase + 4); /* DMA off */
-
- /* We want a fast IRQ for this device. Actually we'd like an even faster
- * IRQ ;) - This is one driver RtLinux is made for
- */
-
- if (request_irq(irq, z8530_interrupt, 0,
- "Hostess SV11", sv) < 0) {
- pr_warn("IRQ %d already in use\n", irq);
- goto err_irq;
- }
-
- sv->irq = irq;
- sv->chanA.private = sv;
- sv->chanA.dev = sv;
- sv->chanB.dev = sv;
-
- if (dma) {
- /* You can have DMA off or 1 and 3 thats the lot
- * on the Comtrol.
- */
- sv->chanA.txdma = 3;
- sv->chanA.rxdma = 1;
- outb(0x03 | 0x08, iobase + 4); /* DMA on */
- if (request_dma(sv->chanA.txdma, "Hostess SV/11 (TX)"))
- goto err_txdma;
-
- if (dma == 1)
- if (request_dma(sv->chanA.rxdma, "Hostess SV/11 (RX)"))
- goto err_rxdma;
- }
-
- /* Kill our private IRQ line the hostess can end up chattering
- * until the configuration is set
- */
- disable_irq(irq);
-
- /* Begin normal initialise
- */
-
- if (z8530_init(sv)) {
- pr_err("Z8530 series device not found\n");
- enable_irq(irq);
- goto free_dma;
- }
- z8530_channel_load(&sv->chanB, z8530_dead_port);
- if (sv->type == Z85C30)
- z8530_channel_load(&sv->chanA, z8530_hdlc_kilostream);
- else
- z8530_channel_load(&sv->chanA, z8530_hdlc_kilostream_85230);
-
- enable_irq(irq);
-
- /* Now we can take the IRQ
- */
-
- sv->chanA.netdevice = netdev = alloc_hdlcdev(sv);
- if (!netdev)
- goto free_dma;
-
- dev_to_hdlc(netdev)->attach = hostess_attach;
- dev_to_hdlc(netdev)->xmit = hostess_queue_xmit;
- netdev->netdev_ops = &hostess_ops;
- netdev->base_addr = iobase;
- netdev->irq = irq;
-
- if (register_hdlc_device(netdev)) {
- pr_err("unable to register HDLC device\n");
- free_netdev(netdev);
- goto free_dma;
- }
-
- z8530_describe(sv, "I/O", iobase);
- sv->active = 1;
- return sv;
-
-free_dma:
- if (dma == 1)
- free_dma(sv->chanA.rxdma);
-err_rxdma:
- if (dma)
- free_dma(sv->chanA.txdma);
-err_txdma:
- free_irq(irq, sv);
-err_irq:
- kfree(sv);
-err_kzalloc:
- release_region(iobase, 8);
- return NULL;
-}
-
-static void sv11_shutdown(struct z8530_dev *dev)
-{
- unregister_hdlc_device(dev->chanA.netdevice);
- z8530_shutdown(dev);
- free_irq(dev->irq, dev);
- if (dma) {
- if (dma == 1)
- free_dma(dev->chanA.rxdma);
- free_dma(dev->chanA.txdma);
- }
- release_region(dev->chanA.ctrlio - 1, 8);
- free_netdev(dev->chanA.netdevice);
- kfree(dev);
-}
-
-static int io = 0x200;
-static int irq = 9;
-
-module_param_hw(io, int, ioport, 0);
-MODULE_PARM_DESC(io, "The I/O base of the Comtrol Hostess SV11 card");
-module_param_hw(dma, int, dma, 0);
-MODULE_PARM_DESC(dma, "Set this to 1 to use DMA1/DMA3 for TX/RX");
-module_param_hw(irq, int, irq, 0);
-MODULE_PARM_DESC(irq, "The interrupt line setting for the Comtrol Hostess SV11 card");
-
-MODULE_AUTHOR("Alan Cox");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Modular driver for the Comtrol Hostess SV11");
-
-static struct z8530_dev *sv11_unit;
-
-static int sv11_module_init(void)
-{
- sv11_unit = sv11_init(io, irq);
- if (!sv11_unit)
- return -ENODEV;
- return 0;
-}
-module_init(sv11_module_init);
-
-static void sv11_module_cleanup(void)
-{
- if (sv11_unit)
- sv11_shutdown(sv11_unit);
-}
-module_exit(sv11_module_cleanup);
diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c
deleted file mode 100644
index eddd20aab691..000000000000
--- a/drivers/net/wan/sealevel.c
+++ /dev/null
@@ -1,352 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* Sealevel Systems 4021 driver.
- *
- * (c) Copyright 1999, 2001 Alan Cox
- * (c) Copyright 2001 Red Hat Inc.
- * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/net.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/if_arp.h>
-#include <linux/delay.h>
-#include <linux/hdlc.h>
-#include <linux/ioport.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <net/arp.h>
-
-#include <asm/irq.h>
-#include <asm/io.h>
-#include <asm/dma.h>
-#include <asm/byteorder.h>
-#include "z85230.h"
-
-struct slvl_device {
- struct z8530_channel *chan;
- int channel;
-};
-
-struct slvl_board {
- struct slvl_device dev[2];
- struct z8530_dev board;
- int iobase;
-};
-
- /* Network driver support routines */
-
-static inline struct slvl_device *dev_to_chan(struct net_device *dev)
-{
- return (struct slvl_device *)dev_to_hdlc(dev)->priv;
-}
-
-/* Frame receive. Simple for our card as we do HDLC and there
- * is no funny garbage involved
- */
-
-static void sealevel_input(struct z8530_channel *c, struct sk_buff *skb)
-{
- /* Drop the CRC - it's not a good idea to try and negotiate it ;) */
- skb_trim(skb, skb->len - 2);
- skb->protocol = hdlc_type_trans(skb, c->netdevice);
- skb_reset_mac_header(skb);
- skb->dev = c->netdevice;
- netif_rx(skb);
-}
-
- /* We've been placed in the UP state */
-
-static int sealevel_open(struct net_device *d)
-{
- struct slvl_device *slvl = dev_to_chan(d);
- int err = -1;
- int unit = slvl->channel;
-
- /* Link layer up. */
-
- switch (unit) {
- case 0:
- err = z8530_sync_dma_open(d, slvl->chan);
- break;
- case 1:
- err = z8530_sync_open(d, slvl->chan);
- break;
- }
-
- if (err)
- return err;
-
- err = hdlc_open(d);
- if (err) {
- switch (unit) {
- case 0:
- z8530_sync_dma_close(d, slvl->chan);
- break;
- case 1:
- z8530_sync_close(d, slvl->chan);
- break;
- }
- return err;
- }
-
- slvl->chan->rx_function = sealevel_input;
-
- netif_start_queue(d);
- return 0;
-}
-
-static int sealevel_close(struct net_device *d)
-{
- struct slvl_device *slvl = dev_to_chan(d);
- int unit = slvl->channel;
-
- /* Discard new frames */
-
- slvl->chan->rx_function = z8530_null_rx;
-
- hdlc_close(d);
- netif_stop_queue(d);
-
- switch (unit) {
- case 0:
- z8530_sync_dma_close(d, slvl->chan);
- break;
- case 1:
- z8530_sync_close(d, slvl->chan);
- break;
- }
- return 0;
-}
-
-/* Passed network frames, fire them downwind. */
-
-static netdev_tx_t sealevel_queue_xmit(struct sk_buff *skb,
- struct net_device *d)
-{
- return z8530_queue_xmit(dev_to_chan(d)->chan, skb);
-}
-
-static int sealevel_attach(struct net_device *dev, unsigned short encoding,
- unsigned short parity)
-{
- if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT)
- return 0;
- return -EINVAL;
-}
-
-static const struct net_device_ops sealevel_ops = {
- .ndo_open = sealevel_open,
- .ndo_stop = sealevel_close,
- .ndo_start_xmit = hdlc_start_xmit,
- .ndo_siocwandev = hdlc_ioctl,
-};
-
-static int slvl_setup(struct slvl_device *sv, int iobase, int irq)
-{
- struct net_device *dev = alloc_hdlcdev(sv);
-
- if (!dev)
- return -1;
-
- dev_to_hdlc(dev)->attach = sealevel_attach;
- dev_to_hdlc(dev)->xmit = sealevel_queue_xmit;
- dev->netdev_ops = &sealevel_ops;
- dev->base_addr = iobase;
- dev->irq = irq;
-
- if (register_hdlc_device(dev)) {
- pr_err("unable to register HDLC device\n");
- free_netdev(dev);
- return -1;
- }
-
- sv->chan->netdevice = dev;
- return 0;
-}
-
-/* Allocate and setup Sealevel board. */
-
-static __init struct slvl_board *slvl_init(int iobase, int irq,
- int txdma, int rxdma, int slow)
-{
- struct z8530_dev *dev;
- struct slvl_board *b;
-
- /* Get the needed I/O space */
-
- if (!request_region(iobase, 8, "Sealevel 4021")) {
- pr_warn("I/O 0x%X already in use\n", iobase);
- return NULL;
- }
-
- b = kzalloc(sizeof(struct slvl_board), GFP_KERNEL);
- if (!b)
- goto err_kzalloc;
-
- b->dev[0].chan = &b->board.chanA;
- b->dev[0].channel = 0;
-
- b->dev[1].chan = &b->board.chanB;
- b->dev[1].channel = 1;
-
- dev = &b->board;
-
- /* Stuff in the I/O addressing */
-
- dev->active = 0;
-
- b->iobase = iobase;
-
- /* Select 8530 delays for the old board */
-
- if (slow)
- iobase |= Z8530_PORT_SLEEP;
-
- dev->chanA.ctrlio = iobase + 1;
- dev->chanA.dataio = iobase;
- dev->chanB.ctrlio = iobase + 3;
- dev->chanB.dataio = iobase + 2;
-
- dev->chanA.irqs = &z8530_nop;
- dev->chanB.irqs = &z8530_nop;
-
- /* Assert DTR enable DMA */
-
- outb(3 | (1 << 7), b->iobase + 4);
-
- /* We want a fast IRQ for this device. Actually we'd like an even faster
- * IRQ ;) - This is one driver RtLinux is made for
- */
-
- if (request_irq(irq, z8530_interrupt, 0,
- "SeaLevel", dev) < 0) {
- pr_warn("IRQ %d already in use\n", irq);
- goto err_request_irq;
- }
-
- dev->irq = irq;
- dev->chanA.private = &b->dev[0];
- dev->chanB.private = &b->dev[1];
- dev->chanA.dev = dev;
- dev->chanB.dev = dev;
-
- dev->chanA.txdma = 3;
- dev->chanA.rxdma = 1;
- if (request_dma(dev->chanA.txdma, "SeaLevel (TX)"))
- goto err_dma_tx;
-
- if (request_dma(dev->chanA.rxdma, "SeaLevel (RX)"))
- goto err_dma_rx;
-
- disable_irq(irq);
-
- /* Begin normal initialise */
-
- if (z8530_init(dev) != 0) {
- pr_err("Z8530 series device not found\n");
- enable_irq(irq);
- goto free_hw;
- }
- if (dev->type == Z85C30) {
- z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream);
- z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream);
- } else {
- z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream_85230);
- z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream_85230);
- }
-
- /* Now we can take the IRQ */
-
- enable_irq(irq);
-
- if (slvl_setup(&b->dev[0], iobase, irq))
- goto free_hw;
- if (slvl_setup(&b->dev[1], iobase, irq))
- goto free_netdev0;
-
- z8530_describe(dev, "I/O", iobase);
- dev->active = 1;
- return b;
-
-free_netdev0:
- unregister_hdlc_device(b->dev[0].chan->netdevice);
- free_netdev(b->dev[0].chan->netdevice);
-free_hw:
- free_dma(dev->chanA.rxdma);
-err_dma_rx:
- free_dma(dev->chanA.txdma);
-err_dma_tx:
- free_irq(irq, dev);
-err_request_irq:
- kfree(b);
-err_kzalloc:
- release_region(iobase, 8);
- return NULL;
-}
-
-static void __exit slvl_shutdown(struct slvl_board *b)
-{
- int u;
-
- z8530_shutdown(&b->board);
-
- for (u = 0; u < 2; u++) {
- struct net_device *d = b->dev[u].chan->netdevice;
-
- unregister_hdlc_device(d);
- free_netdev(d);
- }
-
- free_irq(b->board.irq, &b->board);
- free_dma(b->board.chanA.rxdma);
- free_dma(b->board.chanA.txdma);
- /* DMA off on the card, drop DTR */
- outb(0, b->iobase);
- release_region(b->iobase, 8);
- kfree(b);
-}
-
-static int io = 0x238;
-static int txdma = 1;
-static int rxdma = 3;
-static int irq = 5;
-static bool slow;
-
-module_param_hw(io, int, ioport, 0);
-MODULE_PARM_DESC(io, "The I/O base of the Sealevel card");
-module_param_hw(txdma, int, dma, 0);
-MODULE_PARM_DESC(txdma, "Transmit DMA channel");
-module_param_hw(rxdma, int, dma, 0);
-MODULE_PARM_DESC(rxdma, "Receive DMA channel");
-module_param_hw(irq, int, irq, 0);
-MODULE_PARM_DESC(irq, "The interrupt line setting for the SeaLevel card");
-module_param(slow, bool, 0);
-MODULE_PARM_DESC(slow, "Set this for an older Sealevel card such as the 4012");
-
-MODULE_AUTHOR("Alan Cox");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Modular driver for the SeaLevel 4021");
-
-static struct slvl_board *slvl_unit;
-
-static int __init slvl_init_module(void)
-{
- slvl_unit = slvl_init(io, irq, txdma, rxdma, slow);
-
- return slvl_unit ? 0 : -ENODEV;
-}
-
-static void __exit slvl_cleanup_module(void)
-{
- if (slvl_unit)
- slvl_shutdown(slvl_unit);
-}
-
-module_init(slvl_init_module);
-module_exit(slvl_cleanup_module);
diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
deleted file mode 100644
index 982a03488a00..000000000000
--- a/drivers/net/wan/z85230.c
+++ /dev/null
@@ -1,1641 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* (c) Copyright 1998 Alan Cox <alan@lxorguk.ukuu.org.uk>
- * (c) Copyright 2000, 2001 Red Hat Inc
- *
- * Development of this driver was funded by Equiinet Ltd
- * http://www.equiinet.com
- *
- * ChangeLog:
- *
- * Asynchronous mode dropped for 2.2. For 2.5 we will attempt the
- * unification of all the Z85x30 asynchronous drivers for real.
- *
- * DMA now uses get_free_page as kmalloc buffers may span a 64K
- * boundary.
- *
- * Modified for SMP safety and SMP locking by Alan Cox
- * <alan@lxorguk.ukuu.org.uk>
- *
- * Performance
- *
- * Z85230:
- * Non DMA you want a 486DX50 or better to do 64Kbits. 9600 baud
- * X.25 is not unrealistic on all machines. DMA mode can in theory
- * handle T1/E1 quite nicely. In practice the limit seems to be about
- * 512Kbit->1Mbit depending on motherboard.
- *
- * Z85C30:
- * 64K will take DMA, 9600 baud X.25 should be ok.
- *
- * Z8530:
- * Synchronous mode without DMA is unlikely to pass about 2400 baud.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/net.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/if_arp.h>
-#include <linux/delay.h>
-#include <linux/hdlc.h>
-#include <linux/ioport.h>
-#include <linux/init.h>
-#include <linux/gfp.h>
-#include <asm/dma.h>
-#include <asm/io.h>
-#define RT_LOCK
-#define RT_UNLOCK
-#include <linux/spinlock.h>
-
-#include "z85230.h"
-
-/**
- * z8530_read_port - Architecture specific interface function
- * @p: port to read
- *
- * Provided port access methods. The Comtrol SV11 requires no delays
- * between accesses and uses PC I/O. Some drivers may need a 5uS delay
- *
- * In the longer term this should become an architecture specific
- * section so that this can become a generic driver interface for all
- * platforms. For now we only handle PC I/O ports with or without the
- * dread 5uS sanity delay.
- *
- * The caller must hold sufficient locks to avoid violating the horrible
- * 5uS delay rule.
- */
-
-static inline int z8530_read_port(unsigned long p)
-{
- u8 r = inb(Z8530_PORT_OF(p));
-
- if (p & Z8530_PORT_SLEEP) /* gcc should figure this out efficiently ! */
- udelay(5);
- return r;
-}
-
-/**
- * z8530_write_port - Architecture specific interface function
- * @p: port to write
- * @d: value to write
- *
- * Write a value to a port with delays if need be. Note that the
- * caller must hold locks to avoid read/writes from other contexts
- * violating the 5uS rule
- *
- * In the longer term this should become an architecture specific
- * section so that this can become a generic driver interface for all
- * platforms. For now we only handle PC I/O ports with or without the
- * dread 5uS sanity delay.
- */
-
-static inline void z8530_write_port(unsigned long p, u8 d)
-{
- outb(d, Z8530_PORT_OF(p));
- if (p & Z8530_PORT_SLEEP)
- udelay(5);
-}
-
-static void z8530_rx_done(struct z8530_channel *c);
-static void z8530_tx_done(struct z8530_channel *c);
-
-/**
- * read_zsreg - Read a register from a Z85230
- * @c: Z8530 channel to read from (2 per chip)
- * @reg: Register to read
- * FIXME: Use a spinlock.
- *
- * Most of the Z8530 registers are indexed off the control registers.
- * A read is done by writing to the control register and reading the
- * register back. The caller must hold the lock
- */
-
-static inline u8 read_zsreg(struct z8530_channel *c, u8 reg)
-{
- if (reg)
- z8530_write_port(c->ctrlio, reg);
- return z8530_read_port(c->ctrlio);
-}
-
-/**
- * read_zsdata - Read the data port of a Z8530 channel
- * @c: The Z8530 channel to read the data port from
- *
- * The data port provides fast access to some things. We still
- * have all the 5uS delays to worry about.
- */
-
-static inline u8 read_zsdata(struct z8530_channel *c)
-{
- u8 r;
-
- r = z8530_read_port(c->dataio);
- return r;
-}
-
-/**
- * write_zsreg - Write to a Z8530 channel register
- * @c: The Z8530 channel
- * @reg: Register number
- * @val: Value to write
- *
- * Write a value to an indexed register. The caller must hold the lock
- * to honour the irritating delay rules. We know about register 0
- * being fast to access.
- *
- * Assumes c->lock is held.
- */
-static inline void write_zsreg(struct z8530_channel *c, u8 reg, u8 val)
-{
- if (reg)
- z8530_write_port(c->ctrlio, reg);
- z8530_write_port(c->ctrlio, val);
-}
-
-/**
- * write_zsctrl - Write to a Z8530 control register
- * @c: The Z8530 channel
- * @val: Value to write
- *
- * Write directly to the control register on the Z8530
- */
-
-static inline void write_zsctrl(struct z8530_channel *c, u8 val)
-{
- z8530_write_port(c->ctrlio, val);
-}
-
-/**
- * write_zsdata - Write to a Z8530 control register
- * @c: The Z8530 channel
- * @val: Value to write
- *
- * Write directly to the data register on the Z8530
- */
-static inline void write_zsdata(struct z8530_channel *c, u8 val)
-{
- z8530_write_port(c->dataio, val);
-}
-
-/* Register loading parameters for a dead port
- */
-
-u8 z8530_dead_port[] = {
- 255
-};
-EXPORT_SYMBOL(z8530_dead_port);
-
-/* Register loading parameters for currently supported circuit types
- */
-
-/* Data clocked by telco end. This is the correct data for the UK
- * "kilostream" service, and most other similar services.
- */
-
-u8 z8530_hdlc_kilostream[] = {
- 4, SYNC_ENAB | SDLC | X1CLK,
- 2, 0, /* No vector */
- 1, 0,
- 3, ENT_HM | RxCRC_ENAB | Rx8,
- 5, TxCRC_ENAB | RTS | TxENAB | Tx8 | DTR,
- 9, 0, /* Disable interrupts */
- 6, 0xFF,
- 7, FLAG,
- 10, ABUNDER | NRZ | CRCPS,/*MARKIDLE ??*/
- 11, TCTRxCP,
- 14, DISDPLL,
- 15, DCDIE | SYNCIE | CTSIE | TxUIE | BRKIE,
- 1, EXT_INT_ENAB | TxINT_ENAB | INT_ALL_Rx,
- 9, NV | MIE | NORESET,
- 255
-};
-EXPORT_SYMBOL(z8530_hdlc_kilostream);
-
-/* As above but for enhanced chips.
- */
-
-u8 z8530_hdlc_kilostream_85230[] = {
- 4, SYNC_ENAB | SDLC | X1CLK,
- 2, 0, /* No vector */
- 1, 0,
- 3, ENT_HM | RxCRC_ENAB | Rx8,
- 5, TxCRC_ENAB | RTS | TxENAB | Tx8 | DTR,
- 9, 0, /* Disable interrupts */
- 6, 0xFF,
- 7, FLAG,
- 10, ABUNDER | NRZ | CRCPS, /* MARKIDLE?? */
- 11, TCTRxCP,
- 14, DISDPLL,
- 15, DCDIE | SYNCIE | CTSIE | TxUIE | BRKIE,
- 1, EXT_INT_ENAB | TxINT_ENAB | INT_ALL_Rx,
- 9, NV | MIE | NORESET,
- 23, 3, /* Extended mode AUTO TX and EOM*/
-
- 255
-};
-EXPORT_SYMBOL(z8530_hdlc_kilostream_85230);
-
-/**
- * z8530_flush_fifo - Flush on chip RX FIFO
- * @c: Channel to flush
- *
- * Flush the receive FIFO. There is no specific option for this, we
- * blindly read bytes and discard them. Reading when there is no data
- * is harmless. The 8530 has a 4 byte FIFO, the 85230 has 8 bytes.
- *
- * All locking is handled for the caller. On return data may still be
- * present if it arrived during the flush.
- */
-
-static void z8530_flush_fifo(struct z8530_channel *c)
-{
- read_zsreg(c, R1);
- read_zsreg(c, R1);
- read_zsreg(c, R1);
- read_zsreg(c, R1);
- if (c->dev->type == Z85230) {
- read_zsreg(c, R1);
- read_zsreg(c, R1);
- read_zsreg(c, R1);
- read_zsreg(c, R1);
- }
-}
-
-/**
- * z8530_rtsdtr - Control the outgoing DTS/RTS line
- * @c: The Z8530 channel to control;
- * @set: 1 to set, 0 to clear
- *
- * Sets or clears DTR/RTS on the requested line. All locking is handled
- * by the caller. For now we assume all boards use the actual RTS/DTR
- * on the chip. Apparently one or two don't. We'll scream about them
- * later.
- */
-
-static void z8530_rtsdtr(struct z8530_channel *c, int set)
-{
- if (set)
- c->regs[5] |= (RTS | DTR);
- else
- c->regs[5] &= ~(RTS | DTR);
- write_zsreg(c, R5, c->regs[5]);
-}
-
-/**
- * z8530_rx - Handle a PIO receive event
- * @c: Z8530 channel to process
- *
- * Receive handler for receiving in PIO mode. This is much like the
- * async one but not quite the same or as complex
- *
- * Note: Its intended that this handler can easily be separated from
- * the main code to run realtime. That'll be needed for some machines
- * (eg to ever clock 64kbits on a sparc ;)).
- *
- * The RT_LOCK macros don't do anything now. Keep the code covered
- * by them as short as possible in all circumstances - clocks cost
- * baud. The interrupt handler is assumed to be atomic w.r.t. to
- * other code - this is true in the RT case too.
- *
- * We only cover the sync cases for this. If you want 2Mbit async
- * do it yourself but consider medical assistance first. This non DMA
- * synchronous mode is portable code. The DMA mode assumes PCI like
- * ISA DMA
- *
- * Called with the device lock held
- */
-
-static void z8530_rx(struct z8530_channel *c)
-{
- u8 ch, stat;
-
- while (1) {
- /* FIFO empty ? */
- if (!(read_zsreg(c, R0) & 1))
- break;
- ch = read_zsdata(c);
- stat = read_zsreg(c, R1);
-
- /* Overrun ?
- */
- if (c->count < c->max) {
- *c->dptr++ = ch;
- c->count++;
- }
-
- if (stat & END_FR) {
- /* Error ?
- */
- if (stat & (Rx_OVR | CRC_ERR)) {
- /* Rewind the buffer and return */
- if (c->skb)
- c->dptr = c->skb->data;
- c->count = 0;
- if (stat & Rx_OVR) {
- pr_warn("%s: overrun\n", c->dev->name);
- c->rx_overrun++;
- }
- if (stat & CRC_ERR) {
- c->rx_crc_err++;
- /* printk("crc error\n"); */
- }
- /* Shove the frame upstream */
- } else {
- /* Drop the lock for RX processing, or
- * there are deadlocks
- */
- z8530_rx_done(c);
- write_zsctrl(c, RES_Rx_CRC);
- }
- }
- }
- /* Clear irq
- */
- write_zsctrl(c, ERR_RES);
- write_zsctrl(c, RES_H_IUS);
-}
-
-/**
- * z8530_tx - Handle a PIO transmit event
- * @c: Z8530 channel to process
- *
- * Z8530 transmit interrupt handler for the PIO mode. The basic
- * idea is to attempt to keep the FIFO fed. We fill as many bytes
- * in as possible, its quite possible that we won't keep up with the
- * data rate otherwise.
- */
-
-static void z8530_tx(struct z8530_channel *c)
-{
- while (c->txcount) {
- /* FIFO full ? */
- if (!(read_zsreg(c, R0) & 4))
- return;
- c->txcount--;
- /* Shovel out the byte
- */
- write_zsreg(c, R8, *c->tx_ptr++);
- write_zsctrl(c, RES_H_IUS);
- /* We are about to underflow */
- if (c->txcount == 0) {
- write_zsctrl(c, RES_EOM_L);
- write_zsreg(c, R10, c->regs[10] & ~ABUNDER);
- }
- }
-
- /* End of frame TX - fire another one
- */
-
- write_zsctrl(c, RES_Tx_P);
-
- z8530_tx_done(c);
- write_zsctrl(c, RES_H_IUS);
-}
-
-/**
- * z8530_status - Handle a PIO status exception
- * @chan: Z8530 channel to process
- *
- * A status event occurred in PIO synchronous mode. There are several
- * reasons the chip will bother us here. A transmit underrun means we
- * failed to feed the chip fast enough and just broke a packet. A DCD
- * change is a line up or down.
- */
-
-static void z8530_status(struct z8530_channel *chan)
-{
- u8 status, altered;
-
- status = read_zsreg(chan, R0);
- altered = chan->status ^ status;
-
- chan->status = status;
-
- if (status & TxEOM) {
-/* printk("%s: Tx underrun.\n", chan->dev->name); */
- chan->netdevice->stats.tx_fifo_errors++;
- write_zsctrl(chan, ERR_RES);
- z8530_tx_done(chan);
- }
-
- if (altered & chan->dcdcheck) {
- if (status & chan->dcdcheck) {
- pr_info("%s: DCD raised\n", chan->dev->name);
- write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
- if (chan->netdevice)
- netif_carrier_on(chan->netdevice);
- } else {
- pr_info("%s: DCD lost\n", chan->dev->name);
- write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);
- z8530_flush_fifo(chan);
- if (chan->netdevice)
- netif_carrier_off(chan->netdevice);
- }
- }
- write_zsctrl(chan, RES_EXT_INT);
- write_zsctrl(chan, RES_H_IUS);
-}
-
-struct z8530_irqhandler z8530_sync = {
- .rx = z8530_rx,
- .tx = z8530_tx,
- .status = z8530_status,
-};
-EXPORT_SYMBOL(z8530_sync);
-
-/**
- * z8530_dma_rx - Handle a DMA RX event
- * @chan: Channel to handle
- *
- * Non bus mastering DMA interfaces for the Z8x30 devices. This
- * is really pretty PC specific. The DMA mode means that most receive
- * events are handled by the DMA hardware. We get a kick here only if
- * a frame ended.
- */
-
-static void z8530_dma_rx(struct z8530_channel *chan)
-{
- if (chan->rxdma_on) {
- /* Special condition check only */
- u8 status;
-
- read_zsreg(chan, R7);
- read_zsreg(chan, R6);
-
- status = read_zsreg(chan, R1);
-
- if (status & END_FR)
- z8530_rx_done(chan); /* Fire up the next one */
-
- write_zsctrl(chan, ERR_RES);
- write_zsctrl(chan, RES_H_IUS);
- } else {
- /* DMA is off right now, drain the slow way */
- z8530_rx(chan);
- }
-}
-
-/**
- * z8530_dma_tx - Handle a DMA TX event
- * @chan: The Z8530 channel to handle
- *
- * We have received an interrupt while doing DMA transmissions. It
- * shouldn't happen. Scream loudly if it does.
- */
-static void z8530_dma_tx(struct z8530_channel *chan)
-{
- if (!chan->dma_tx) {
- pr_warn("Hey who turned the DMA off?\n");
- z8530_tx(chan);
- return;
- }
- /* This shouldn't occur in DMA mode */
- pr_err("DMA tx - bogus event!\n");
- z8530_tx(chan);
-}
-
-/**
- * z8530_dma_status - Handle a DMA status exception
- * @chan: Z8530 channel to process
- *
- * A status event occurred on the Z8530. We receive these for two reasons
- * when in DMA mode. Firstly if we finished a packet transfer we get one
- * and kick the next packet out. Secondly we may see a DCD change.
- *
- */
-static void z8530_dma_status(struct z8530_channel *chan)
-{
- u8 status, altered;
-
- status = read_zsreg(chan, R0);
- altered = chan->status ^ status;
-
- chan->status = status;
-
- if (chan->dma_tx) {
- if (status & TxEOM) {
- unsigned long flags;
-
- flags = claim_dma_lock();
- disable_dma(chan->txdma);
- clear_dma_ff(chan->txdma);
- chan->txdma_on = 0;
- release_dma_lock(flags);
- z8530_tx_done(chan);
- }
- }
-
- if (altered & chan->dcdcheck) {
- if (status & chan->dcdcheck) {
- pr_info("%s: DCD raised\n", chan->dev->name);
- write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
- if (chan->netdevice)
- netif_carrier_on(chan->netdevice);
- } else {
- pr_info("%s: DCD lost\n", chan->dev->name);
- write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);
- z8530_flush_fifo(chan);
- if (chan->netdevice)
- netif_carrier_off(chan->netdevice);
- }
- }
-
- write_zsctrl(chan, RES_EXT_INT);
- write_zsctrl(chan, RES_H_IUS);
-}
-
-static struct z8530_irqhandler z8530_dma_sync = {
- .rx = z8530_dma_rx,
- .tx = z8530_dma_tx,
- .status = z8530_dma_status,
-};
-
-static struct z8530_irqhandler z8530_txdma_sync = {
- .rx = z8530_rx,
- .tx = z8530_dma_tx,
- .status = z8530_dma_status,
-};
-
-/**
- * z8530_rx_clear - Handle RX events from a stopped chip
- * @c: Z8530 channel to shut up
- *
- * Receive interrupt vectors for a Z8530 that is in 'parked' mode.
- * For machines with PCI Z85x30 cards, or level triggered interrupts
- * (eg the MacII) we must clear the interrupt cause or die.
- */
-
-static void z8530_rx_clear(struct z8530_channel *c)
-{
- /* Data and status bytes
- */
- u8 stat;
-
- read_zsdata(c);
- stat = read_zsreg(c, R1);
-
- if (stat & END_FR)
- write_zsctrl(c, RES_Rx_CRC);
- /* Clear irq
- */
- write_zsctrl(c, ERR_RES);
- write_zsctrl(c, RES_H_IUS);
-}
-
-/**
- * z8530_tx_clear - Handle TX events from a stopped chip
- * @c: Z8530 channel to shut up
- *
- * Transmit interrupt vectors for a Z8530 that is in 'parked' mode.
- * For machines with PCI Z85x30 cards, or level triggered interrupts
- * (eg the MacII) we must clear the interrupt cause or die.
- */
-
-static void z8530_tx_clear(struct z8530_channel *c)
-{
- write_zsctrl(c, RES_Tx_P);
- write_zsctrl(c, RES_H_IUS);
-}
-
-/**
- * z8530_status_clear - Handle status events from a stopped chip
- * @chan: Z8530 channel to shut up
- *
- * Status interrupt vectors for a Z8530 that is in 'parked' mode.
- * For machines with PCI Z85x30 cards, or level triggered interrupts
- * (eg the MacII) we must clear the interrupt cause or die.
- */
-
-static void z8530_status_clear(struct z8530_channel *chan)
-{
- u8 status = read_zsreg(chan, R0);
-
- if (status & TxEOM)
- write_zsctrl(chan, ERR_RES);
- write_zsctrl(chan, RES_EXT_INT);
- write_zsctrl(chan, RES_H_IUS);
-}
-
-struct z8530_irqhandler z8530_nop = {
- .rx = z8530_rx_clear,
- .tx = z8530_tx_clear,
- .status = z8530_status_clear,
-};
-EXPORT_SYMBOL(z8530_nop);
-
-/**
- * z8530_interrupt - Handle an interrupt from a Z8530
- * @irq: Interrupt number
- * @dev_id: The Z8530 device that is interrupting.
- *
- * A Z85[2]30 device has stuck its hand in the air for attention.
- * We scan both the channels on the chip for events and then call
- * the channel specific call backs for each channel that has events.
- * We have to use callback functions because the two channels can be
- * in different modes.
- *
- * Locking is done for the handlers. Note that locking is done
- * at the chip level (the 5uS delay issue is per chip not per
- * channel). c->lock for both channels points to dev->lock
- */
-
-irqreturn_t z8530_interrupt(int irq, void *dev_id)
-{
- struct z8530_dev *dev = dev_id;
- u8 intr;
- static volatile int locker=0;
- int work = 0;
- struct z8530_irqhandler *irqs;
-
- if (locker) {
- pr_err("IRQ re-enter\n");
- return IRQ_NONE;
- }
- locker = 1;
-
- spin_lock(&dev->lock);
-
- while (++work < 5000) {
- intr = read_zsreg(&dev->chanA, R3);
- if (!(intr &
- (CHARxIP | CHATxIP | CHAEXT | CHBRxIP | CHBTxIP | CHBEXT)))
- break;
-
- /* This holds the IRQ status. On the 8530 you must read it
- * from chan A even though it applies to the whole chip
- */
-
- /* Now walk the chip and see what it is wanting - it may be
- * an IRQ for someone else remember
- */
-
- irqs = dev->chanA.irqs;
-
- if (intr & (CHARxIP | CHATxIP | CHAEXT)) {
- if (intr & CHARxIP)
- irqs->rx(&dev->chanA);
- if (intr & CHATxIP)
- irqs->tx(&dev->chanA);
- if (intr & CHAEXT)
- irqs->status(&dev->chanA);
- }
-
- irqs = dev->chanB.irqs;
-
- if (intr & (CHBRxIP | CHBTxIP | CHBEXT)) {
- if (intr & CHBRxIP)
- irqs->rx(&dev->chanB);
- if (intr & CHBTxIP)
- irqs->tx(&dev->chanB);
- if (intr & CHBEXT)
- irqs->status(&dev->chanB);
- }
- }
- spin_unlock(&dev->lock);
- if (work == 5000)
- pr_err("%s: interrupt jammed - abort(0x%X)!\n",
- dev->name, intr);
- /* Ok all done */
- locker = 0;
- return IRQ_HANDLED;
-}
-EXPORT_SYMBOL(z8530_interrupt);
-
-static const u8 reg_init[16] = {
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0x55, 0, 0, 0
-};
-
-/**
- * z8530_sync_open - Open a Z8530 channel for PIO
- * @dev: The network interface we are using
- * @c: The Z8530 channel to open in synchronous PIO mode
- *
- * Switch a Z8530 into synchronous mode without DMA assist. We
- * raise the RTS/DTR and commence network operation.
- */
-int z8530_sync_open(struct net_device *dev, struct z8530_channel *c)
-{
- unsigned long flags;
-
- spin_lock_irqsave(c->lock, flags);
-
- c->sync = 1;
- c->mtu = dev->mtu + 64;
- c->count = 0;
- c->skb = NULL;
- c->skb2 = NULL;
- c->irqs = &z8530_sync;
-
- /* This loads the double buffer up */
- z8530_rx_done(c); /* Load the frame ring */
- z8530_rx_done(c); /* Load the backup frame */
- z8530_rtsdtr(c, 1);
- c->dma_tx = 0;
- c->regs[R1] |= TxINT_ENAB;
- write_zsreg(c, R1, c->regs[R1]);
- write_zsreg(c, R3, c->regs[R3] | RxENABLE);
-
- spin_unlock_irqrestore(c->lock, flags);
- return 0;
-}
-EXPORT_SYMBOL(z8530_sync_open);
-
-/**
- * z8530_sync_close - Close a PIO Z8530 channel
- * @dev: Network device to close
- * @c: Z8530 channel to disassociate and move to idle
- *
- * Close down a Z8530 interface and switch its interrupt handlers
- * to discard future events.
- */
-int z8530_sync_close(struct net_device *dev, struct z8530_channel *c)
-{
- u8 chk;
- unsigned long flags;
-
- spin_lock_irqsave(c->lock, flags);
- c->irqs = &z8530_nop;
- c->max = 0;
- c->sync = 0;
-
- chk = read_zsreg(c, R0);
- write_zsreg(c, R3, c->regs[R3]);
- z8530_rtsdtr(c, 0);
-
- spin_unlock_irqrestore(c->lock, flags);
- return 0;
-}
-EXPORT_SYMBOL(z8530_sync_close);
-
-/**
- * z8530_sync_dma_open - Open a Z8530 for DMA I/O
- * @dev: The network device to attach
- * @c: The Z8530 channel to configure in sync DMA mode.
- *
- * Set up a Z85x30 device for synchronous DMA in both directions. Two
- * ISA DMA channels must be available for this to work. We assume ISA
- * DMA driven I/O and PC limits on access.
- */
-int z8530_sync_dma_open(struct net_device *dev, struct z8530_channel *c)
-{
- unsigned long cflags, dflags;
-
- c->sync = 1;
- c->mtu = dev->mtu + 64;
- c->count = 0;
- c->skb = NULL;
- c->skb2 = NULL;
-
- /* Load the DMA interfaces up
- */
- c->rxdma_on = 0;
- c->txdma_on = 0;
-
- /* Allocate the DMA flip buffers. Limit by page size.
- * Everyone runs 1500 mtu or less on wan links so this
- * should be fine.
- */
-
- if (c->mtu > PAGE_SIZE / 2)
- return -EMSGSIZE;
-
- c->rx_buf[0] = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!c->rx_buf[0])
- return -ENOBUFS;
- c->rx_buf[1] = c->rx_buf[0] + PAGE_SIZE / 2;
-
- c->tx_dma_buf[0] = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!c->tx_dma_buf[0]) {
- free_page((unsigned long)c->rx_buf[0]);
- c->rx_buf[0] = NULL;
- return -ENOBUFS;
- }
- c->tx_dma_buf[1] = c->tx_dma_buf[0] + PAGE_SIZE / 2;
-
- c->tx_dma_used = 0;
- c->dma_tx = 1;
- c->dma_num = 0;
- c->dma_ready = 1;
-
- /* Enable DMA control mode
- */
-
- spin_lock_irqsave(c->lock, cflags);
-
- /* TX DMA via DIR/REQ
- */
-
- c->regs[R14] |= DTRREQ;
- write_zsreg(c, R14, c->regs[R14]);
-
- c->regs[R1] &= ~TxINT_ENAB;
- write_zsreg(c, R1, c->regs[R1]);
-
- /* RX DMA via W/Req
- */
-
- c->regs[R1] |= WT_FN_RDYFN;
- c->regs[R1] |= WT_RDY_RT;
- c->regs[R1] |= INT_ERR_Rx;
- c->regs[R1] &= ~TxINT_ENAB;
- write_zsreg(c, R1, c->regs[R1]);
- c->regs[R1] |= WT_RDY_ENAB;
- write_zsreg(c, R1, c->regs[R1]);
-
- /* DMA interrupts
- */
-
- /* Set up the DMA configuration
- */
-
- dflags = claim_dma_lock();
-
- disable_dma(c->rxdma);
- clear_dma_ff(c->rxdma);
- set_dma_mode(c->rxdma, DMA_MODE_READ | 0x10);
- set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[0]));
- set_dma_count(c->rxdma, c->mtu);
- enable_dma(c->rxdma);
-
- disable_dma(c->txdma);
- clear_dma_ff(c->txdma);
- set_dma_mode(c->txdma, DMA_MODE_WRITE);
- disable_dma(c->txdma);
-
- release_dma_lock(dflags);
-
- /* Select the DMA interrupt handlers
- */
-
- c->rxdma_on = 1;
- c->txdma_on = 1;
- c->tx_dma_used = 1;
-
- c->irqs = &z8530_dma_sync;
- z8530_rtsdtr(c, 1);
- write_zsreg(c, R3, c->regs[R3] | RxENABLE);
-
- spin_unlock_irqrestore(c->lock, cflags);
-
- return 0;
-}
-EXPORT_SYMBOL(z8530_sync_dma_open);
-
-/**
- * z8530_sync_dma_close - Close down DMA I/O
- * @dev: Network device to detach
- * @c: Z8530 channel to move into discard mode
- *
- * Shut down a DMA mode synchronous interface. Halt the DMA, and
- * free the buffers.
- */
-int z8530_sync_dma_close(struct net_device *dev, struct z8530_channel *c)
-{
- u8 chk;
- unsigned long flags;
-
- c->irqs = &z8530_nop;
- c->max = 0;
- c->sync = 0;
-
- /* Disable the PC DMA channels
- */
-
- flags = claim_dma_lock();
- disable_dma(c->rxdma);
- clear_dma_ff(c->rxdma);
-
- c->rxdma_on = 0;
-
- disable_dma(c->txdma);
- clear_dma_ff(c->txdma);
- release_dma_lock(flags);
-
- c->txdma_on = 0;
- c->tx_dma_used = 0;
-
- spin_lock_irqsave(c->lock, flags);
-
- /* Disable DMA control mode
- */
-
- c->regs[R1] &= ~WT_RDY_ENAB;
- write_zsreg(c, R1, c->regs[R1]);
- c->regs[R1] &= ~(WT_RDY_RT | WT_FN_RDYFN | INT_ERR_Rx);
- c->regs[R1] |= INT_ALL_Rx;
- write_zsreg(c, R1, c->regs[R1]);
- c->regs[R14] &= ~DTRREQ;
- write_zsreg(c, R14, c->regs[R14]);
-
- if (c->rx_buf[0]) {
- free_page((unsigned long)c->rx_buf[0]);
- c->rx_buf[0] = NULL;
- }
- if (c->tx_dma_buf[0]) {
- free_page((unsigned long)c->tx_dma_buf[0]);
- c->tx_dma_buf[0] = NULL;
- }
- chk = read_zsreg(c, R0);
- write_zsreg(c, R3, c->regs[R3]);
- z8530_rtsdtr(c, 0);
-
- spin_unlock_irqrestore(c->lock, flags);
-
- return 0;
-}
-EXPORT_SYMBOL(z8530_sync_dma_close);
-
-/**
- * z8530_sync_txdma_open - Open a Z8530 for TX driven DMA
- * @dev: The network device to attach
- * @c: The Z8530 channel to configure in sync DMA mode.
- *
- * Set up a Z85x30 device for synchronous DMA transmission. One
- * ISA DMA channel must be available for this to work. The receive
- * side is run in PIO mode, but then it has the bigger FIFO.
- */
-
-int z8530_sync_txdma_open(struct net_device *dev, struct z8530_channel *c)
-{
- unsigned long cflags, dflags;
-
- printk("Opening sync interface for TX-DMA\n");
- c->sync = 1;
- c->mtu = dev->mtu + 64;
- c->count = 0;
- c->skb = NULL;
- c->skb2 = NULL;
-
- /* Allocate the DMA flip buffers. Limit by page size.
- * Everyone runs 1500 mtu or less on wan links so this
- * should be fine.
- */
-
- if (c->mtu > PAGE_SIZE / 2)
- return -EMSGSIZE;
-
- c->tx_dma_buf[0] = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!c->tx_dma_buf[0])
- return -ENOBUFS;
-
- c->tx_dma_buf[1] = c->tx_dma_buf[0] + PAGE_SIZE / 2;
-
- spin_lock_irqsave(c->lock, cflags);
-
- /* Load the PIO receive ring
- */
-
- z8530_rx_done(c);
- z8530_rx_done(c);
-
- /* Load the DMA interfaces up
- */
-
- c->rxdma_on = 0;
- c->txdma_on = 0;
-
- c->tx_dma_used = 0;
- c->dma_num = 0;
- c->dma_ready = 1;
- c->dma_tx = 1;
-
- /* Enable DMA control mode
- */
-
- /* TX DMA via DIR/REQ
- */
- c->regs[R14] |= DTRREQ;
- write_zsreg(c, R14, c->regs[R14]);
-
- c->regs[R1] &= ~TxINT_ENAB;
- write_zsreg(c, R1, c->regs[R1]);
-
- /* Set up the DMA configuration
- */
-
- dflags = claim_dma_lock();
-
- disable_dma(c->txdma);
- clear_dma_ff(c->txdma);
- set_dma_mode(c->txdma, DMA_MODE_WRITE);
- disable_dma(c->txdma);
-
- release_dma_lock(dflags);
-
- /* Select the DMA interrupt handlers
- */
-
- c->rxdma_on = 0;
- c->txdma_on = 1;
- c->tx_dma_used = 1;
-
- c->irqs = &z8530_txdma_sync;
- z8530_rtsdtr(c, 1);
- write_zsreg(c, R3, c->regs[R3] | RxENABLE);
- spin_unlock_irqrestore(c->lock, cflags);
-
- return 0;
-}
-EXPORT_SYMBOL(z8530_sync_txdma_open);
-
-/**
- * z8530_sync_txdma_close - Close down a TX driven DMA channel
- * @dev: Network device to detach
- * @c: Z8530 channel to move into discard mode
- *
- * Shut down a DMA/PIO split mode synchronous interface. Halt the DMA,
- * and free the buffers.
- */
-
-int z8530_sync_txdma_close(struct net_device *dev, struct z8530_channel *c)
-{
- unsigned long dflags, cflags;
- u8 chk;
-
- spin_lock_irqsave(c->lock, cflags);
-
- c->irqs = &z8530_nop;
- c->max = 0;
- c->sync = 0;
-
- /* Disable the PC DMA channels
- */
-
- dflags = claim_dma_lock();
-
- disable_dma(c->txdma);
- clear_dma_ff(c->txdma);
- c->txdma_on = 0;
- c->tx_dma_used = 0;
-
- release_dma_lock(dflags);
-
- /* Disable DMA control mode
- */
-
- c->regs[R1] &= ~WT_RDY_ENAB;
- write_zsreg(c, R1, c->regs[R1]);
- c->regs[R1] &= ~(WT_RDY_RT | WT_FN_RDYFN | INT_ERR_Rx);
- c->regs[R1] |= INT_ALL_Rx;
- write_zsreg(c, R1, c->regs[R1]);
- c->regs[R14] &= ~DTRREQ;
- write_zsreg(c, R14, c->regs[R14]);
-
- if (c->tx_dma_buf[0]) {
- free_page((unsigned long)c->tx_dma_buf[0]);
- c->tx_dma_buf[0] = NULL;
- }
- chk = read_zsreg(c, R0);
- write_zsreg(c, R3, c->regs[R3]);
- z8530_rtsdtr(c, 0);
-
- spin_unlock_irqrestore(c->lock, cflags);
- return 0;
-}
-EXPORT_SYMBOL(z8530_sync_txdma_close);
-
-/* Name strings for Z8530 chips. SGI claim to have a 130, Zilog deny
- * it exists...
- */
-static const char * const z8530_type_name[] = {
- "Z8530",
- "Z85C30",
- "Z85230"
-};
-
-/**
- * z8530_describe - Uniformly describe a Z8530 port
- * @dev: Z8530 device to describe
- * @mapping: string holding mapping type (eg "I/O" or "Mem")
- * @io: the port value in question
- *
- * Describe a Z8530 in a standard format. We must pass the I/O as
- * the port offset isn't predictable. The main reason for this function
- * is to try and get a common format of report.
- */
-
-void z8530_describe(struct z8530_dev *dev, char *mapping, unsigned long io)
-{
- pr_info("%s: %s found at %s 0x%lX, IRQ %d\n",
- dev->name,
- z8530_type_name[dev->type],
- mapping,
- Z8530_PORT_OF(io),
- dev->irq);
-}
-EXPORT_SYMBOL(z8530_describe);
-
-/* Locked operation part of the z8530 init code
- */
-static inline int do_z8530_init(struct z8530_dev *dev)
-{
- /* NOP the interrupt handlers first - we might get a
- * floating IRQ transition when we reset the chip
- */
- dev->chanA.irqs = &z8530_nop;
- dev->chanB.irqs = &z8530_nop;
- dev->chanA.dcdcheck = DCD;
- dev->chanB.dcdcheck = DCD;
-
- /* Reset the chip */
- write_zsreg(&dev->chanA, R9, 0xC0);
- udelay(200);
- /* Now check its valid */
- write_zsreg(&dev->chanA, R12, 0xAA);
- if (read_zsreg(&dev->chanA, R12) != 0xAA)
- return -ENODEV;
- write_zsreg(&dev->chanA, R12, 0x55);
- if (read_zsreg(&dev->chanA, R12) != 0x55)
- return -ENODEV;
-
- dev->type = Z8530;
-
- /* See the application note.
- */
-
- write_zsreg(&dev->chanA, R15, 0x01);
-
- /* If we can set the low bit of R15 then
- * the chip is enhanced.
- */
-
- if (read_zsreg(&dev->chanA, R15) == 0x01) {
- /* This C30 versus 230 detect is from Klaus Kudielka's dmascc */
- /* Put a char in the fifo */
- write_zsreg(&dev->chanA, R8, 0);
- if (read_zsreg(&dev->chanA, R0) & Tx_BUF_EMP)
- dev->type = Z85230; /* Has a FIFO */
- else
- dev->type = Z85C30; /* Z85C30, 1 byte FIFO */
- }
-
- /* The code assumes R7' and friends are
- * off. Use write_zsext() for these and keep
- * this bit clear.
- */
-
- write_zsreg(&dev->chanA, R15, 0);
-
- /* At this point it looks like the chip is behaving
- */
-
- memcpy(dev->chanA.regs, reg_init, 16);
- memcpy(dev->chanB.regs, reg_init, 16);
-
- return 0;
-}
-
-/**
- * z8530_init - Initialise a Z8530 device
- * @dev: Z8530 device to initialise.
- *
- * Configure up a Z8530/Z85C30 or Z85230 chip. We check the device
- * is present, identify the type and then program it to hopefully
- * keep quite and behave. This matters a lot, a Z8530 in the wrong
- * state will sometimes get into stupid modes generating 10Khz
- * interrupt streams and the like.
- *
- * We set the interrupt handler up to discard any events, in case
- * we get them during reset or setp.
- *
- * Return 0 for success, or a negative value indicating the problem
- * in errno form.
- */
-
-int z8530_init(struct z8530_dev *dev)
-{
- unsigned long flags;
- int ret;
-
- /* Set up the chip level lock */
- spin_lock_init(&dev->lock);
- dev->chanA.lock = &dev->lock;
- dev->chanB.lock = &dev->lock;
-
- spin_lock_irqsave(&dev->lock, flags);
- ret = do_z8530_init(dev);
- spin_unlock_irqrestore(&dev->lock, flags);
-
- return ret;
-}
-EXPORT_SYMBOL(z8530_init);
-
-/**
- * z8530_shutdown - Shutdown a Z8530 device
- * @dev: The Z8530 chip to shutdown
- *
- * We set the interrupt handlers to silence any interrupts. We then
- * reset the chip and wait 100uS to be sure the reset completed. Just
- * in case the caller then tries to do stuff.
- *
- * This is called without the lock held
- */
-int z8530_shutdown(struct z8530_dev *dev)
-{
- unsigned long flags;
- /* Reset the chip */
-
- spin_lock_irqsave(&dev->lock, flags);
- dev->chanA.irqs = &z8530_nop;
- dev->chanB.irqs = &z8530_nop;
- write_zsreg(&dev->chanA, R9, 0xC0);
- /* We must lock the udelay, the chip is offlimits here */
- udelay(100);
- spin_unlock_irqrestore(&dev->lock, flags);
- return 0;
-}
-EXPORT_SYMBOL(z8530_shutdown);
-
-/**
- * z8530_channel_load - Load channel data
- * @c: Z8530 channel to configure
- * @rtable: table of register, value pairs
- * FIXME: ioctl to allow user uploaded tables
- *
- * Load a Z8530 channel up from the system data. We use +16 to
- * indicate the "prime" registers. The value 255 terminates the
- * table.
- */
-
-int z8530_channel_load(struct z8530_channel *c, u8 *rtable)
-{
- unsigned long flags;
-
- spin_lock_irqsave(c->lock, flags);
-
- while (*rtable != 255) {
- int reg = *rtable++;
-
- if (reg > 0x0F)
- write_zsreg(c, R15, c->regs[15] | 1);
- write_zsreg(c, reg & 0x0F, *rtable);
- if (reg > 0x0F)
- write_zsreg(c, R15, c->regs[15] & ~1);
- c->regs[reg] = *rtable++;
- }
- c->rx_function = z8530_null_rx;
- c->skb = NULL;
- c->tx_skb = NULL;
- c->tx_next_skb = NULL;
- c->mtu = 1500;
- c->max = 0;
- c->count = 0;
- c->status = read_zsreg(c, R0);
- c->sync = 1;
- write_zsreg(c, R3, c->regs[R3] | RxENABLE);
-
- spin_unlock_irqrestore(c->lock, flags);
- return 0;
-}
-EXPORT_SYMBOL(z8530_channel_load);
-
-/**
- * z8530_tx_begin - Begin packet transmission
- * @c: The Z8530 channel to kick
- *
- * This is the speed sensitive side of transmission. If we are called
- * and no buffer is being transmitted we commence the next buffer. If
- * nothing is queued we idle the sync.
- *
- * Note: We are handling this code path in the interrupt path, keep it
- * fast or bad things will happen.
- *
- * Called with the lock held.
- */
-
-static void z8530_tx_begin(struct z8530_channel *c)
-{
- unsigned long flags;
-
- if (c->tx_skb)
- return;
-
- c->tx_skb = c->tx_next_skb;
- c->tx_next_skb = NULL;
- c->tx_ptr = c->tx_next_ptr;
-
- if (!c->tx_skb) {
- /* Idle on */
- if (c->dma_tx) {
- flags = claim_dma_lock();
- disable_dma(c->txdma);
- /* Check if we crapped out.
- */
- if (get_dma_residue(c->txdma)) {
- c->netdevice->stats.tx_dropped++;
- c->netdevice->stats.tx_fifo_errors++;
- }
- release_dma_lock(flags);
- }
- c->txcount = 0;
- } else {
- c->txcount = c->tx_skb->len;
-
- if (c->dma_tx) {
- /* FIXME. DMA is broken for the original 8530,
- * on the older parts we need to set a flag and
- * wait for a further TX interrupt to fire this
- * stage off
- */
-
- flags = claim_dma_lock();
- disable_dma(c->txdma);
-
- /* These two are needed by the 8530/85C30
- * and must be issued when idling.
- */
- if (c->dev->type != Z85230) {
- write_zsctrl(c, RES_Tx_CRC);
- write_zsctrl(c, RES_EOM_L);
- }
- write_zsreg(c, R10, c->regs[10] & ~ABUNDER);
- clear_dma_ff(c->txdma);
- set_dma_addr(c->txdma, virt_to_bus(c->tx_ptr));
- set_dma_count(c->txdma, c->txcount);
- enable_dma(c->txdma);
- release_dma_lock(flags);
- write_zsctrl(c, RES_EOM_L);
- write_zsreg(c, R5, c->regs[R5] | TxENAB);
- } else {
- /* ABUNDER off */
- write_zsreg(c, R10, c->regs[10]);
- write_zsctrl(c, RES_Tx_CRC);
-
- while (c->txcount && (read_zsreg(c, R0) & Tx_BUF_EMP)) {
- write_zsreg(c, R8, *c->tx_ptr++);
- c->txcount--;
- }
- }
- }
- /* Since we emptied tx_skb we can ask for more
- */
- netif_wake_queue(c->netdevice);
-}
-
-/**
- * z8530_tx_done - TX complete callback
- * @c: The channel that completed a transmit.
- *
- * This is called when we complete a packet send. We wake the queue,
- * start the next packet going and then free the buffer of the existing
- * packet. This code is fairly timing sensitive.
- *
- * Called with the register lock held.
- */
-
-static void z8530_tx_done(struct z8530_channel *c)
-{
- struct sk_buff *skb;
-
- /* Actually this can happen.*/
- if (!c->tx_skb)
- return;
-
- skb = c->tx_skb;
- c->tx_skb = NULL;
- z8530_tx_begin(c);
- c->netdevice->stats.tx_packets++;
- c->netdevice->stats.tx_bytes += skb->len;
- dev_consume_skb_irq(skb);
-}
-
-/**
- * z8530_null_rx - Discard a packet
- * @c: The channel the packet arrived on
- * @skb: The buffer
- *
- * We point the receive handler at this function when idle. Instead
- * of processing the frames we get to throw them away.
- */
-void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb)
-{
- dev_kfree_skb_any(skb);
-}
-EXPORT_SYMBOL(z8530_null_rx);
-
-/**
- * z8530_rx_done - Receive completion callback
- * @c: The channel that completed a receive
- *
- * A new packet is complete. Our goal here is to get back into receive
- * mode as fast as possible. On the Z85230 we could change to using
- * ESCC mode, but on the older chips we have no choice. We flip to the
- * new buffer immediately in DMA mode so that the DMA of the next
- * frame can occur while we are copying the previous buffer to an sk_buff
- *
- * Called with the lock held
- */
-static void z8530_rx_done(struct z8530_channel *c)
-{
- struct sk_buff *skb;
- int ct;
-
- /* Is our receive engine in DMA mode
- */
- if (c->rxdma_on) {
- /* Save the ready state and the buffer currently
- * being used as the DMA target
- */
- int ready = c->dma_ready;
- unsigned char *rxb = c->rx_buf[c->dma_num];
- unsigned long flags;
-
- /* Complete this DMA. Necessary to find the length
- */
- flags = claim_dma_lock();
-
- disable_dma(c->rxdma);
- clear_dma_ff(c->rxdma);
- c->rxdma_on = 0;
- ct = c->mtu - get_dma_residue(c->rxdma);
- if (ct < 0)
- ct = 2; /* Shit happens.. */
- c->dma_ready = 0;
-
- /* Normal case: the other slot is free, start the next DMA
- * into it immediately.
- */
-
- if (ready) {
- c->dma_num ^= 1;
- set_dma_mode(c->rxdma, DMA_MODE_READ | 0x10);
- set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[c->dma_num]));
- set_dma_count(c->rxdma, c->mtu);
- c->rxdma_on = 1;
- enable_dma(c->rxdma);
- /* Stop any frames that we missed the head of
- * from passing
- */
- write_zsreg(c, R0, RES_Rx_CRC);
- } else {
- /* Can't occur as we dont reenable the DMA irq until
- * after the flip is done
- */
- netdev_warn(c->netdevice, "DMA flip overrun!\n");
- }
-
- release_dma_lock(flags);
-
- /* Shove the old buffer into an sk_buff. We can't DMA
- * directly into one on a PC - it might be above the 16Mb
- * boundary. Optimisation - we could check to see if we
- * can avoid the copy. Optimisation 2 - make the memcpy
- * a copychecksum.
- */
-
- skb = dev_alloc_skb(ct);
- if (!skb) {
- c->netdevice->stats.rx_dropped++;
- netdev_warn(c->netdevice, "Memory squeeze\n");
- } else {
- skb_put(skb, ct);
- skb_copy_to_linear_data(skb, rxb, ct);
- c->netdevice->stats.rx_packets++;
- c->netdevice->stats.rx_bytes += ct;
- }
- c->dma_ready = 1;
- } else {
- RT_LOCK;
- skb = c->skb;
-
- /* The game we play for non DMA is similar. We want to
- * get the controller set up for the next packet as fast
- * as possible. We potentially only have one byte + the
- * fifo length for this. Thus we want to flip to the new
- * buffer and then mess around copying and allocating
- * things. For the current case it doesn't matter but
- * if you build a system where the sync irq isn't blocked
- * by the kernel IRQ disable then you need only block the
- * sync IRQ for the RT_LOCK area.
- *
- */
- ct = c->count;
-
- c->skb = c->skb2;
- c->count = 0;
- c->max = c->mtu;
- if (c->skb) {
- c->dptr = c->skb->data;
- c->max = c->mtu;
- } else {
- c->count = 0;
- c->max = 0;
- }
- RT_UNLOCK;
-
- c->skb2 = dev_alloc_skb(c->mtu);
- if (c->skb2)
- skb_put(c->skb2, c->mtu);
-
- c->netdevice->stats.rx_packets++;
- c->netdevice->stats.rx_bytes += ct;
- }
- /* If we received a frame we must now process it.
- */
- if (skb) {
- skb_trim(skb, ct);
- c->rx_function(c, skb);
- } else {
- c->netdevice->stats.rx_dropped++;
- netdev_err(c->netdevice, "Lost a frame\n");
- }
-}
-
-/**
- * spans_boundary - Check a packet can be ISA DMA'd
- * @skb: The buffer to check
- *
- * Returns true if the buffer cross a DMA boundary on a PC. The poor
- * thing can only DMA within a 64K block not across the edges of it.
- */
-
-static inline int spans_boundary(struct sk_buff *skb)
-{
- unsigned long a = (unsigned long)skb->data;
-
- a ^= (a + skb->len);
- if (a & 0x00010000) /* If the 64K bit is different.. */
- return 1;
- return 0;
-}
-
-/**
- * z8530_queue_xmit - Queue a packet
- * @c: The channel to use
- * @skb: The packet to kick down the channel
- *
- * Queue a packet for transmission. Because we have rather
- * hard to hit interrupt latencies for the Z85230 per packet
- * even in DMA mode we do the flip to DMA buffer if needed here
- * not in the IRQ.
- *
- * Called from the network code. The lock is not held at this
- * point.
- */
-netdev_tx_t z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb)
-{
- unsigned long flags;
-
- netif_stop_queue(c->netdevice);
- if (c->tx_next_skb)
- return NETDEV_TX_BUSY;
-
- /* PC SPECIFIC - DMA limits */
- /* If we will DMA the transmit and its gone over the ISA bus
- * limit, then copy to the flip buffer
- */
-
- if (c->dma_tx &&
- ((unsigned long)(virt_to_bus(skb->data + skb->len)) >=
- 16 * 1024 * 1024 || spans_boundary(skb))) {
- /* Send the flip buffer, and flip the flippy bit.
- * We don't care which is used when just so long as
- * we never use the same buffer twice in a row. Since
- * only one buffer can be going out at a time the other
- * has to be safe.
- */
- c->tx_next_ptr = c->tx_dma_buf[c->tx_dma_used];
- c->tx_dma_used ^= 1; /* Flip temp buffer */
- skb_copy_from_linear_data(skb, c->tx_next_ptr, skb->len);
- } else {
- c->tx_next_ptr = skb->data;
- }
- RT_LOCK;
- c->tx_next_skb = skb;
- RT_UNLOCK;
-
- spin_lock_irqsave(c->lock, flags);
- z8530_tx_begin(c);
- spin_unlock_irqrestore(c->lock, flags);
-
- return NETDEV_TX_OK;
-}
-EXPORT_SYMBOL(z8530_queue_xmit);
-
-/* Module support
- */
-static const char banner[] __initconst =
- KERN_INFO "Generic Z85C30/Z85230 interface driver v0.02\n";
-
-static int __init z85230_init_driver(void)
-{
- printk(banner);
- return 0;
-}
-module_init(z85230_init_driver);
-
-static void __exit z85230_cleanup_driver(void)
-{
-}
-module_exit(z85230_cleanup_driver);
-
-MODULE_AUTHOR("Red Hat Inc.");
-MODULE_DESCRIPTION("Z85x30 synchronous driver core");
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/wan/z85230.h b/drivers/net/wan/z85230.h
deleted file mode 100644
index 462cb620bc5d..000000000000
--- a/drivers/net/wan/z85230.h
+++ /dev/null
@@ -1,407 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Description of Z8530 Z85C30 and Z85230 communications chips
- *
- * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
- * Copyright (C) 1998 Alan Cox <alan@lxorguk.ukuu.org.uk>
- */
-
-#ifndef _Z8530_H
-#define _Z8530_H
-
-#include <linux/tty.h>
-#include <linux/interrupt.h>
-
-/* Conversion routines to/from brg time constants from/to bits
- * per second.
- */
-#define BRG_TO_BPS(brg, freq) ((freq) / 2 / ((brg) + 2))
-#define BPS_TO_BRG(bps, freq) ((((freq) + (bps)) / (2 * (bps))) - 2)
-
-/* The Zilog register set */
-
-#define FLAG 0x7e
-
-/* Write Register 0 */
-#define R0 0 /* Register selects */
-#define R1 1
-#define R2 2
-#define R3 3
-#define R4 4
-#define R5 5
-#define R6 6
-#define R7 7
-#define R8 8
-#define R9 9
-#define R10 10
-#define R11 11
-#define R12 12
-#define R13 13
-#define R14 14
-#define R15 15
-
-#define RPRIME 16 /* Indicate a prime register access on 230 */
-
-#define NULLCODE 0 /* Null Code */
-#define POINT_HIGH 0x8 /* Select upper half of registers */
-#define RES_EXT_INT 0x10 /* Reset Ext. Status Interrupts */
-#define SEND_ABORT 0x18 /* HDLC Abort */
-#define RES_RxINT_FC 0x20 /* Reset RxINT on First Character */
-#define RES_Tx_P 0x28 /* Reset TxINT Pending */
-#define ERR_RES 0x30 /* Error Reset */
-#define RES_H_IUS 0x38 /* Reset highest IUS */
-
-#define RES_Rx_CRC 0x40 /* Reset Rx CRC Checker */
-#define RES_Tx_CRC 0x80 /* Reset Tx CRC Checker */
-#define RES_EOM_L 0xC0 /* Reset EOM latch */
-
-/* Write Register 1 */
-
-#define EXT_INT_ENAB 0x1 /* Ext Int Enable */
-#define TxINT_ENAB 0x2 /* Tx Int Enable */
-#define PAR_SPEC 0x4 /* Parity is special condition */
-
-#define RxINT_DISAB 0 /* Rx Int Disable */
-#define RxINT_FCERR 0x8 /* Rx Int on First Character Only or Error */
-#define INT_ALL_Rx 0x10 /* Int on all Rx Characters or error */
-#define INT_ERR_Rx 0x18 /* Int on error only */
-
-#define WT_RDY_RT 0x20 /* Wait/Ready on R/T */
-#define WT_FN_RDYFN 0x40 /* Wait/FN/Ready FN */
-#define WT_RDY_ENAB 0x80 /* Wait/Ready Enable */
-
-/* Write Register #2 (Interrupt Vector) */
-
-/* Write Register 3 */
-
-#define RxENABLE 0x1 /* Rx Enable */
-#define SYNC_L_INH 0x2 /* Sync Character Load Inhibit */
-#define ADD_SM 0x4 /* Address Search Mode (SDLC) */
-#define RxCRC_ENAB 0x8 /* Rx CRC Enable */
-#define ENT_HM 0x10 /* Enter Hunt Mode */
-#define AUTO_ENAB 0x20 /* Auto Enables */
-#define Rx5 0x0 /* Rx 5 Bits/Character */
-#define Rx7 0x40 /* Rx 7 Bits/Character */
-#define Rx6 0x80 /* Rx 6 Bits/Character */
-#define Rx8 0xc0 /* Rx 8 Bits/Character */
-
-/* Write Register 4 */
-
-#define PAR_ENA 0x1 /* Parity Enable */
-#define PAR_EVEN 0x2 /* Parity Even/Odd* */
-
-#define SYNC_ENAB 0 /* Sync Modes Enable */
-#define SB1 0x4 /* 1 stop bit/char */
-#define SB15 0x8 /* 1.5 stop bits/char */
-#define SB2 0xc /* 2 stop bits/char */
-
-#define MONSYNC 0 /* 8 Bit Sync character */
-#define BISYNC 0x10 /* 16 bit sync character */
-#define SDLC 0x20 /* SDLC Mode (01111110 Sync Flag) */
-#define EXTSYNC 0x30 /* External Sync Mode */
-
-#define X1CLK 0x0 /* x1 clock mode */
-#define X16CLK 0x40 /* x16 clock mode */
-#define X32CLK 0x80 /* x32 clock mode */
-#define X64CLK 0xC0 /* x64 clock mode */
-
-/* Write Register 5 */
-
-#define TxCRC_ENAB 0x1 /* Tx CRC Enable */
-#define RTS 0x2 /* RTS */
-#define SDLC_CRC 0x4 /* SDLC/CRC-16 */
-#define TxENAB 0x8 /* Tx Enable */
-#define SND_BRK 0x10 /* Send Break */
-#define Tx5 0x0 /* Tx 5 bits (or less)/character */
-#define Tx7 0x20 /* Tx 7 bits/character */
-#define Tx6 0x40 /* Tx 6 bits/character */
-#define Tx8 0x60 /* Tx 8 bits/character */
-#define DTR 0x80 /* DTR */
-
-/* Write Register 6 (Sync bits 0-7/SDLC Address Field) */
-
-/* Write Register 7 (Sync bits 8-15/SDLC 01111110) */
-
-/* Write Register 8 (transmit buffer) */
-
-/* Write Register 9 (Master interrupt control) */
-#define VIS 1 /* Vector Includes Status */
-#define NV 2 /* No Vector */
-#define DLC 4 /* Disable Lower Chain */
-#define MIE 8 /* Master Interrupt Enable */
-#define STATHI 0x10 /* Status high */
-#define NORESET 0 /* No reset on write to R9 */
-#define CHRB 0x40 /* Reset channel B */
-#define CHRA 0x80 /* Reset channel A */
-#define FHWRES 0xc0 /* Force hardware reset */
-
-/* Write Register 10 (misc control bits) */
-#define BIT6 1 /* 6 bit/8bit sync */
-#define LOOPMODE 2 /* SDLC Loop mode */
-#define ABUNDER 4 /* Abort/flag on SDLC xmit underrun */
-#define MARKIDLE 8 /* Mark/flag on idle */
-#define GAOP 0x10 /* Go active on poll */
-#define NRZ 0 /* NRZ mode */
-#define NRZI 0x20 /* NRZI mode */
-#define FM1 0x40 /* FM1 (transition = 1) */
-#define FM0 0x60 /* FM0 (transition = 0) */
-#define CRCPS 0x80 /* CRC Preset I/O */
-
-/* Write Register 11 (Clock Mode control) */
-#define TRxCXT 0 /* TRxC = Xtal output */
-#define TRxCTC 1 /* TRxC = Transmit clock */
-#define TRxCBR 2 /* TRxC = BR Generator Output */
-#define TRxCDP 3 /* TRxC = DPLL output */
-#define TRxCOI 4 /* TRxC O/I */
-#define TCRTxCP 0 /* Transmit clock = RTxC pin */
-#define TCTRxCP 8 /* Transmit clock = TRxC pin */
-#define TCBR 0x10 /* Transmit clock = BR Generator output */
-#define TCDPLL 0x18 /* Transmit clock = DPLL output */
-#define RCRTxCP 0 /* Receive clock = RTxC pin */
-#define RCTRxCP 0x20 /* Receive clock = TRxC pin */
-#define RCBR 0x40 /* Receive clock = BR Generator output */
-#define RCDPLL 0x60 /* Receive clock = DPLL output */
-#define RTxCX 0x80 /* RTxC Xtal/No Xtal */
-
-/* Write Register 12 (lower byte of baud rate generator time constant) */
-
-/* Write Register 13 (upper byte of baud rate generator time constant) */
-
-/* Write Register 14 (Misc control bits) */
-#define BRENABL 1 /* Baud rate generator enable */
-#define BRSRC 2 /* Baud rate generator source */
-#define DTRREQ 4 /* DTR/Request function */
-#define AUTOECHO 8 /* Auto Echo */
-#define LOOPBAK 0x10 /* Local loopback */
-#define SEARCH 0x20 /* Enter search mode */
-#define RMC 0x40 /* Reset missing clock */
-#define DISDPLL 0x60 /* Disable DPLL */
-#define SSBR 0x80 /* Set DPLL source = BR generator */
-#define SSRTxC 0xa0 /* Set DPLL source = RTxC */
-#define SFMM 0xc0 /* Set FM mode */
-#define SNRZI 0xe0 /* Set NRZI mode */
-
-/* Write Register 15 (external/status interrupt control) */
-#define PRIME 1 /* R5' etc register access (Z85C30/230 only) */
-#define ZCIE 2 /* Zero count IE */
-#define FIFOE 4 /* Z85230 only */
-#define DCDIE 8 /* DCD IE */
-#define SYNCIE 0x10 /* Sync/hunt IE */
-#define CTSIE 0x20 /* CTS IE */
-#define TxUIE 0x40 /* Tx Underrun/EOM IE */
-#define BRKIE 0x80 /* Break/Abort IE */
-
-
-/* Read Register 0 */
-#define Rx_CH_AV 0x1 /* Rx Character Available */
-#define ZCOUNT 0x2 /* Zero count */
-#define Tx_BUF_EMP 0x4 /* Tx Buffer empty */
-#define DCD 0x8 /* DCD */
-#define SYNC_HUNT 0x10 /* Sync/hunt */
-#define CTS 0x20 /* CTS */
-#define TxEOM 0x40 /* Tx underrun */
-#define BRK_ABRT 0x80 /* Break/Abort */
-
-/* Read Register 1 */
-#define ALL_SNT 0x1 /* All sent */
-/* Residue Data for 8 Rx bits/char programmed */
-#define RES3 0x8 /* 0/3 */
-#define RES4 0x4 /* 0/4 */
-#define RES5 0xc /* 0/5 */
-#define RES6 0x2 /* 0/6 */
-#define RES7 0xa /* 0/7 */
-#define RES8 0x6 /* 0/8 */
-#define RES18 0xe /* 1/8 */
-#define RES28 0x0 /* 2/8 */
-/* Special Rx Condition Interrupts */
-#define PAR_ERR 0x10 /* Parity error */
-#define Rx_OVR 0x20 /* Rx Overrun Error */
-#define CRC_ERR 0x40 /* CRC/Framing Error */
-#define END_FR 0x80 /* End of Frame (SDLC) */
-
-/* Read Register 2 (channel b only) - Interrupt vector */
-
-/* Read Register 3 (interrupt pending register) ch a only */
-#define CHBEXT 0x1 /* Channel B Ext/Stat IP */
-#define CHBTxIP 0x2 /* Channel B Tx IP */
-#define CHBRxIP 0x4 /* Channel B Rx IP */
-#define CHAEXT 0x8 /* Channel A Ext/Stat IP */
-#define CHATxIP 0x10 /* Channel A Tx IP */
-#define CHARxIP 0x20 /* Channel A Rx IP */
-
-/* Read Register 8 (receive data register) */
-
-/* Read Register 10 (misc status bits) */
-#define ONLOOP 2 /* On loop */
-#define LOOPSEND 0x10 /* Loop sending */
-#define CLK2MIS 0x40 /* Two clocks missing */
-#define CLK1MIS 0x80 /* One clock missing */
-
-/* Read Register 12 (lower byte of baud rate generator constant) */
-
-/* Read Register 13 (upper byte of baud rate generator constant) */
-
-/* Read Register 15 (value of WR 15) */
-
-
-/*
- * Interrupt handling functions for this SCC
- */
-
-struct z8530_channel;
-
-struct z8530_irqhandler
-{
- void (*rx)(struct z8530_channel *);
- void (*tx)(struct z8530_channel *);
- void (*status)(struct z8530_channel *);
-};
-
-/*
- * A channel of the Z8530
- */
-
-struct z8530_channel
-{
- struct z8530_irqhandler *irqs; /* IRQ handlers */
- /*
- * Synchronous
- */
- u16 count; /* Buyes received */
- u16 max; /* Most we can receive this frame */
- u16 mtu; /* MTU of the device */
- u8 *dptr; /* Pointer into rx buffer */
- struct sk_buff *skb; /* Buffer dptr points into */
- struct sk_buff *skb2; /* Pending buffer */
- u8 status; /* Current DCD */
- u8 dcdcheck; /* which bit to check for line */
- u8 sync; /* Set if in sync mode */
-
- u8 regs[32]; /* Register map for the chip */
- u8 pendregs[32]; /* Pending register values */
-
- struct sk_buff *tx_skb; /* Buffer being transmitted */
- struct sk_buff *tx_next_skb; /* Next transmit buffer */
- u8 *tx_ptr; /* Byte pointer into the buffer */
- u8 *tx_next_ptr; /* Next pointer to use */
- u8 *tx_dma_buf[2]; /* TX flip buffers for DMA */
- u8 tx_dma_used; /* Flip buffer usage toggler */
- u16 txcount; /* Count of bytes to transmit */
-
- void (*rx_function)(struct z8530_channel *, struct sk_buff *);
-
- /*
- * Sync DMA
- */
-
- u8 rxdma; /* DMA channels */
- u8 txdma;
- u8 rxdma_on; /* DMA active if flag set */
- u8 txdma_on;
- u8 dma_num; /* Buffer we are DMAing into */
- u8 dma_ready; /* Is the other buffer free */
- u8 dma_tx; /* TX is to use DMA */
- u8 *rx_buf[2]; /* The flip buffers */
-
- /*
- * System
- */
-
- struct z8530_dev *dev; /* Z85230 chip instance we are from */
- unsigned long ctrlio; /* I/O ports */
- unsigned long dataio;
-
- /*
- * For PC we encode this way.
- */
-#define Z8530_PORT_SLEEP 0x80000000
-#define Z8530_PORT_OF(x) ((x)&0xFFFF)
-
- u32 rx_overrun; /* Overruns - not done yet */
- u32 rx_crc_err;
-
- /*
- * Bound device pointers
- */
-
- void *private; /* For our owner */
- struct net_device *netdevice; /* Network layer device */
-
- spinlock_t *lock; /* Device lock */
-};
-
-/*
- * Each Z853x0 device.
- */
-
-struct z8530_dev
-{
- char *name; /* Device instance name */
- struct z8530_channel chanA; /* SCC channel A */
- struct z8530_channel chanB; /* SCC channel B */
- int type;
-#define Z8530 0 /* NMOS dinosaur */
-#define Z85C30 1 /* CMOS - better */
-#define Z85230 2 /* CMOS with real FIFO */
- int irq; /* Interrupt for the device */
- int active; /* Soft interrupt enable - the Mac doesn't
- always have a hard disable on its 8530s... */
- spinlock_t lock;
-};
-
-
-/*
- * Functions
- */
-
-extern u8 z8530_dead_port[];
-extern u8 z8530_hdlc_kilostream_85230[];
-extern u8 z8530_hdlc_kilostream[];
-irqreturn_t z8530_interrupt(int, void *);
-void z8530_describe(struct z8530_dev *, char *mapping, unsigned long io);
-int z8530_init(struct z8530_dev *);
-int z8530_shutdown(struct z8530_dev *);
-int z8530_sync_open(struct net_device *, struct z8530_channel *);
-int z8530_sync_close(struct net_device *, struct z8530_channel *);
-int z8530_sync_dma_open(struct net_device *, struct z8530_channel *);
-int z8530_sync_dma_close(struct net_device *, struct z8530_channel *);
-int z8530_sync_txdma_open(struct net_device *, struct z8530_channel *);
-int z8530_sync_txdma_close(struct net_device *, struct z8530_channel *);
-int z8530_channel_load(struct z8530_channel *, u8 *);
-netdev_tx_t z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb);
-void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb);
-
-
-/*
- * Standard interrupt vector sets
- */
-
-extern struct z8530_irqhandler z8530_sync, z8530_async, z8530_nop;
-
-/*
- * Asynchronous Interfacing
- */
-
-/*
- * The size of the serial xmit buffer is 1 page, or 4096 bytes
- */
-
-#define SERIAL_XMIT_SIZE 4096
-#define WAKEUP_CHARS 256
-
-/*
- * Events are used to schedule things to happen at timer-interrupt
- * time, instead of at rs interrupt time.
- */
-#define RS_EVENT_WRITE_WAKEUP 0
-
-/* Internal flags used only by kernel/chr_drv/serial.c */
-#define ZILOG_INITIALIZED 0x80000000 /* Serial port was initialized */
-#define ZILOG_CALLOUT_ACTIVE 0x40000000 /* Call out device is active */
-#define ZILOG_NORMAL_ACTIVE 0x20000000 /* Normal device is active */
-#define ZILOG_BOOT_AUTOCONF 0x10000000 /* Autoconfigure port on bootup */
-#define ZILOG_CLOSING 0x08000000 /* Serial port is closing */
-#define ZILOG_CTS_FLOW 0x04000000 /* Do CTS flow control */
-#define ZILOG_CHECK_CD 0x02000000 /* i.e., CLOCAL */
-
-#endif /* !(_Z8530_H) */
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 9cabd342d156..7527a382327f 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -1500,7 +1500,7 @@ static int ar5523_load_firmware(struct usb_device *dev)
return -ENOENT;
}
- txblock = kmalloc(sizeof(*txblock), GFP_KERNEL);
+ txblock = kzalloc(sizeof(*txblock), GFP_KERNEL);
if (!txblock)
goto out;
@@ -1512,7 +1512,6 @@ static int ar5523_load_firmware(struct usb_device *dev)
if (!fwbuf)
goto out_free_rxblock;
- memset(txblock, 0, sizeof(struct ar5523_fwblock));
txblock->flags = cpu_to_be32(AR5523_WRITE_BLOCK);
txblock->total = cpu_to_be32(fw->size);
diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
index 63e1c2d783c5..73693c66cef1 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.c
+++ b/drivers/net/wireless/ath/ath10k/sdio.c
@@ -1633,7 +1633,7 @@ static void ath10k_sdio_hif_power_down(struct ath10k *ar)
return;
}
- ret = mmc_hw_reset(ar_sdio->func->card->host);
+ ret = mmc_hw_reset(ar_sdio->func->card);
if (ret)
ath10k_warn(ar, "unable to reset sdio: %d\n", ret);
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index d5b83f90d27a..e6b34b0d61bd 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -3136,6 +3136,20 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
arvif->do_not_send_tmpl = true;
else
arvif->do_not_send_tmpl = false;
+
+ if (vif->bss_conf.he_support) {
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ WMI_VDEV_PARAM_BA_MODE,
+ WMI_BA_MODE_BUFFER_SIZE_256);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed to set BA BUFFER SIZE 256 for vdev: %d\n",
+ arvif->vdev_id);
+ else
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "Set BA BUFFER SIZE 256 for VDEV: %d\n",
+ arvif->vdev_id);
+ }
}
if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) {
@@ -3171,14 +3185,6 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
if (arvif->is_up && vif->bss_conf.he_support &&
vif->bss_conf.he_oper.params) {
- ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
- WMI_VDEV_PARAM_BA_MODE,
- WMI_BA_MODE_BUFFER_SIZE_256);
- if (ret)
- ath11k_warn(ar->ab,
- "failed to set BA BUFFER SIZE 256 for vdev: %d\n",
- arvif->vdev_id);
-
param_id = WMI_VDEV_PARAM_HEOPS_0_31;
param_value = vif->bss_conf.he_oper.params;
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 98090e40e1cf..e2791d45f5f5 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -839,7 +839,7 @@ static bool ath9k_txq_list_has_key(struct list_head *txq_list, u32 keyix)
continue;
txinfo = IEEE80211_SKB_CB(bf->bf_mpdu);
- fi = (struct ath_frame_info *)&txinfo->rate_driver_data[0];
+ fi = (struct ath_frame_info *)&txinfo->status.status_driver_data[0];
if (fi->keyix == keyix)
return true;
}
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index d0caf1de2bde..db83cc4ba810 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -141,8 +141,8 @@ static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
{
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
BUILD_BUG_ON(sizeof(struct ath_frame_info) >
- sizeof(tx_info->rate_driver_data));
- return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
+ sizeof(tx_info->status.status_driver_data));
+ return (struct ath_frame_info *) &tx_info->status.status_driver_data[0];
}
static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
@@ -2542,6 +2542,16 @@ skip_tx_complete:
spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
}
+static void ath_clear_tx_status(struct ieee80211_tx_info *tx_info)
+{
+ void *ptr = &tx_info->status;
+
+ memset(ptr + sizeof(tx_info->status.rates), 0,
+ sizeof(tx_info->status) -
+ sizeof(tx_info->status.rates) -
+ sizeof(tx_info->status.status_driver_data));
+}
+
static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
struct ath_tx_status *ts, int nframes, int nbad,
int txok)
@@ -2553,6 +2563,8 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
struct ath_hw *ah = sc->sc_ah;
u8 i, tx_rateindex;
+ ath_clear_tx_status(tx_info);
+
if (txok)
tx_info->status.ack_signal = ts->ts_rssi;
@@ -2567,6 +2579,13 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
tx_info->status.ampdu_len = nframes;
tx_info->status.ampdu_ack_len = nframes - nbad;
+ tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
+
+ for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
+ tx_info->status.rates[i].count = 0;
+ tx_info->status.rates[i].idx = -1;
+ }
+
if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
(tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
/*
@@ -2588,16 +2607,6 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
tx_info->status.rates[tx_rateindex].count =
hw->max_rate_tries;
}
-
- for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
- tx_info->status.rates[i].count = 0;
- tx_info->status.rates[i].idx = -1;
- }
-
- tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
-
- /* we report airtime in ath_tx_count_airtime(), don't report twice */
- tx_info->status.tx_time = 0;
}
static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index ba3c159111d3..212fbbe1cd7e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -557,7 +557,7 @@ enum brcmf_sdio_frmtype {
BRCMF_SDIO_FT_SUB,
};
-#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu))
+#define SDIOD_DRVSTR_KEY(chip, pmu) (((unsigned int)(chip) << 16) | (pmu))
/* SDIO Pad drive strength to select value mappings */
struct sdiod_drive_str {
@@ -4165,7 +4165,7 @@ static int brcmf_sdio_bus_reset(struct device *dev)
/* reset the adapter */
sdio_claim_host(sdiodev->func1);
- mmc_hw_reset(sdiodev->func1->card->host);
+ mmc_hw_reset(sdiodev->func1->card);
sdio_release_host(sdiodev->func1);
brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_DOWN);
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index bde9e4bbfffe..4f3238d2a171 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -2639,7 +2639,7 @@ static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter)
/* Run a HW reset of the SDIO interface. */
sdio_claim_host(func);
- ret = mmc_hw_reset(func->card->host);
+ ret = mmc_hw_reset(func->card);
sdio_release_host(func);
switch (ret) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
index 8a22ee581674..df85ebc6e1df 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
@@ -80,7 +80,7 @@ mt76x2e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mt76_rmw_field(dev, 0x15a10, 0x1f << 16, 0x9);
/* RG_SSUSB_G1_CDR_BIC_LTR = 0xf */
- mt76_rmw_field(dev, 0x15a0c, 0xf << 28, 0xf);
+ mt76_rmw_field(dev, 0x15a0c, 0xfU << 28, 0xf);
/* RG_SSUSB_CDR_BR_PE1D = 0x3 */
mt76_rmw_field(dev, 0x15c58, 0x3 << 6, 0x3);
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index 72fc41ac83c0..9140b0163474 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -146,7 +146,7 @@ static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue)
* To guarantee that the SDIO card is power cycled, as required to make
* the FW programming to succeed, let's do a brute force HW reset.
*/
- mmc_hw_reset(card->host);
+ mmc_hw_reset(card);
sdio_enable_func(func);
sdio_release_host(func);
diff --git a/drivers/net/wwan/wwan_hwsim.c b/drivers/net/wwan/wwan_hwsim.c
index 5b62cf3b3c42..fad642f9ffd8 100644
--- a/drivers/net/wwan/wwan_hwsim.c
+++ b/drivers/net/wwan/wwan_hwsim.c
@@ -33,6 +33,7 @@ static struct dentry *wwan_hwsim_debugfs_devcreate;
static DEFINE_SPINLOCK(wwan_hwsim_devs_lock);
static LIST_HEAD(wwan_hwsim_devs);
static unsigned int wwan_hwsim_dev_idx;
+static struct workqueue_struct *wwan_wq;
struct wwan_hwsim_dev {
struct list_head list;
@@ -371,7 +372,7 @@ static ssize_t wwan_hwsim_debugfs_portdestroy_write(struct file *file,
* waiting this callback to finish in the debugfs_remove() call. So,
* use workqueue.
*/
- schedule_work(&port->del_work);
+ queue_work(wwan_wq, &port->del_work);
return count;
}
@@ -416,7 +417,7 @@ static ssize_t wwan_hwsim_debugfs_devdestroy_write(struct file *file,
* waiting this callback to finish in the debugfs_remove() call. So,
* use workqueue.
*/
- schedule_work(&dev->del_work);
+ queue_work(wwan_wq, &dev->del_work);
return count;
}
@@ -506,9 +507,15 @@ static int __init wwan_hwsim_init(void)
if (wwan_hwsim_devsnum < 0 || wwan_hwsim_devsnum > 128)
return -EINVAL;
+ wwan_wq = alloc_workqueue("wwan_wq", 0, 0);
+ if (!wwan_wq)
+ return -ENOMEM;
+
wwan_hwsim_class = class_create(THIS_MODULE, "wwan_hwsim");
- if (IS_ERR(wwan_hwsim_class))
- return PTR_ERR(wwan_hwsim_class);
+ if (IS_ERR(wwan_hwsim_class)) {
+ err = PTR_ERR(wwan_hwsim_class);
+ goto err_wq_destroy;
+ }
wwan_hwsim_debugfs_topdir = debugfs_create_dir("wwan_hwsim", NULL);
wwan_hwsim_debugfs_devcreate =
@@ -523,9 +530,13 @@ static int __init wwan_hwsim_init(void)
return 0;
err_clean_devs:
+ debugfs_remove(wwan_hwsim_debugfs_devcreate); /* Avoid new devs */
wwan_hwsim_free_devs();
+ flush_workqueue(wwan_wq); /* Wait deletion works completion */
debugfs_remove(wwan_hwsim_debugfs_topdir);
class_destroy(wwan_hwsim_class);
+err_wq_destroy:
+ destroy_workqueue(wwan_wq);
return err;
}
@@ -534,9 +545,10 @@ static void __exit wwan_hwsim_exit(void)
{
debugfs_remove(wwan_hwsim_debugfs_devcreate); /* Avoid new devs */
wwan_hwsim_free_devs();
- flush_scheduled_work(); /* Wait deletion works completion */
+ flush_workqueue(wwan_wq); /* Wait deletion works completion */
debugfs_remove(wwan_hwsim_debugfs_topdir);
class_destroy(wwan_hwsim_class);
+ destroy_workqueue(wwan_wq);
}
module_init(wwan_hwsim_init);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index efb85c6d8e2d..e1846d04817f 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -366,7 +366,7 @@ static inline void nvme_end_req(struct request *req)
{
blk_status_t status = nvme_error_status(nvme_req(req)->status);
- if (unlikely(nvme_req(req)->status != NVME_SC_SUCCESS))
+ if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET)))
nvme_log_error(req);
nvme_end_req_zoned(req);
nvme_trace_bio_complete(req);
@@ -1015,6 +1015,7 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
goto out;
}
+ req->rq_flags |= RQF_QUIET;
ret = nvme_execute_rq(req, at_head);
if (result && ret >= 0)
*result = nvme_req(req)->result;
@@ -1287,6 +1288,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
warn_str, cur->nidl);
return -1;
}
+ if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
+ return NVME_NIDT_EUI64_LEN;
memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN);
return NVME_NIDT_EUI64_LEN;
case NVME_NIDT_NGUID:
@@ -1295,6 +1298,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
warn_str, cur->nidl);
return -1;
}
+ if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
+ return NVME_NIDT_NGUID_LEN;
memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN);
return NVME_NIDT_NGUID_LEN;
case NVME_NIDT_UUID:
@@ -1303,6 +1308,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
warn_str, cur->nidl);
return -1;
}
+ if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
+ return NVME_NIDT_UUID_LEN;
uuid_copy(&ids->uuid, data + sizeof(*cur));
return NVME_NIDT_UUID_LEN;
case NVME_NIDT_CSI:
@@ -1399,12 +1406,18 @@ static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
if ((*id)->ncap == 0) /* namespace not allocated or attached */
goto out_free_id;
- if (ctrl->vs >= NVME_VS(1, 1, 0) &&
- !memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
- memcpy(ids->eui64, (*id)->eui64, sizeof(ids->eui64));
- if (ctrl->vs >= NVME_VS(1, 2, 0) &&
- !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
- memcpy(ids->nguid, (*id)->nguid, sizeof(ids->nguid));
+
+ if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) {
+ dev_info(ctrl->device,
+ "Ignoring bogus Namespace Identifiers\n");
+ } else {
+ if (ctrl->vs >= NVME_VS(1, 1, 0) &&
+ !memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
+ memcpy(ids->eui64, (*id)->eui64, sizeof(ids->eui64));
+ if (ctrl->vs >= NVME_VS(1, 2, 0) &&
+ !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
+ memcpy(ids->nguid, (*id)->nguid, sizeof(ids->nguid));
+ }
return 0;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 1393bbf82d71..a2b53ca63335 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -144,6 +144,11 @@ enum nvme_quirks {
* encoding the generation sequence number.
*/
NVME_QUIRK_SKIP_CID_GEN = (1 << 17),
+
+ /*
+ * Reports garbage in the namespace identifiers (eui64, nguid, uuid).
+ */
+ NVME_QUIRK_BOGUS_NID = (1 << 18),
};
/*
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index d817ca17463e..3aacf1c0d5a5 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -3409,7 +3409,10 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
.driver_data = NVME_QUIRK_IDENTIFY_CNS |
- NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ NVME_QUIRK_DISABLE_WRITE_ZEROES |
+ NVME_QUIRK_BOGUS_NID, },
+ { PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */
.driver_data = NVME_QUIRK_NO_NS_DESC_LIST, },
{ PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */
@@ -3447,6 +3450,10 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
{ PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
+ { PCI_DEVICE(0x1e4B, 0x1002), /* MAXIO MAP1002 */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1e4B, 0x1202), /* MAXIO MAP1202 */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065),
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index afdcb91601d2..1e2d69453771 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -187,7 +187,7 @@ source "drivers/perf/hisilicon/Kconfig"
config MARVELL_CN10K_DDR_PMU
tristate "Enable MARVELL CN10K DRAM Subsystem(DSS) PMU Support"
- depends on ARM64 || (COMPILE_TEST && 64BIT)
+ depends on ARCH_THUNDER || (COMPILE_TEST && 64BIT)
help
Enable perf support for Marvell DDR Performance monitoring
event on CN10K platform.
diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c
index 94ebc1ecace7..b1b2a55de77f 100644
--- a/drivers/perf/fsl_imx8_ddr_perf.c
+++ b/drivers/perf/fsl_imx8_ddr_perf.c
@@ -29,7 +29,7 @@
#define CNTL_OVER_MASK 0xFFFFFFFE
#define CNTL_CSV_SHIFT 24
-#define CNTL_CSV_MASK (0xFF << CNTL_CSV_SHIFT)
+#define CNTL_CSV_MASK (0xFFU << CNTL_CSV_SHIFT)
#define EVENT_CYCLES_ID 0
#define EVENT_CYCLES_COUNTER 0
diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c
index 7640491aab12..30234c261b05 100644
--- a/drivers/perf/qcom_l2_pmu.c
+++ b/drivers/perf/qcom_l2_pmu.c
@@ -736,7 +736,7 @@ static struct cluster_pmu *l2_cache_associate_cpu_with_cluster(
{
u64 mpidr;
int cpu_cluster_id;
- struct cluster_pmu *cluster = NULL;
+ struct cluster_pmu *cluster;
/*
* This assumes that the cluster_id is in MPIDR[aff1] for
@@ -758,10 +758,10 @@ static struct cluster_pmu *l2_cache_associate_cpu_with_cluster(
cluster->cluster_id);
cpumask_set_cpu(cpu, &cluster->cluster_cpus);
*per_cpu_ptr(l2cache_pmu->pmu_cluster, cpu) = cluster;
- break;
+ return cluster;
}
- return cluster;
+ return NULL;
}
static int l2cache_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
index 6b8b3ab8db48..3463629f8764 100644
--- a/drivers/platform/x86/acerhdf.c
+++ b/drivers/platform/x86/acerhdf.c
@@ -584,21 +584,6 @@ static struct platform_driver acerhdf_driver = {
.remove = acerhdf_remove,
};
-/* checks if str begins with start */
-static int str_starts_with(const char *str, const char *start)
-{
- unsigned long str_len = 0, start_len = 0;
-
- str_len = strlen(str);
- start_len = strlen(start);
-
- if (str_len >= start_len &&
- !strncmp(str, start, start_len))
- return 1;
-
- return 0;
-}
-
/* check hardware */
static int __init acerhdf_check_hardware(void)
{
@@ -651,9 +636,9 @@ static int __init acerhdf_check_hardware(void)
* check if actual hardware BIOS vendor, product and version
* IDs start with the strings of BIOS table entry
*/
- if (str_starts_with(vendor, bt->vendor) &&
- str_starts_with(product, bt->product) &&
- str_starts_with(version, bt->version)) {
+ if (strstarts(vendor, bt->vendor) &&
+ strstarts(product, bt->product) &&
+ strstarts(version, bt->version)) {
found = 1;
break;
}
diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c
index e9d0dbbb2887..fa4123dbdf7f 100644
--- a/drivers/platform/x86/amd-pmc.c
+++ b/drivers/platform/x86/amd-pmc.c
@@ -160,8 +160,10 @@ MODULE_PARM_DESC(enable_stb, "Enable the STB debug mechanism");
static struct amd_pmc_dev pmc;
static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg, bool ret);
-static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data);
static int amd_pmc_read_stb(struct amd_pmc_dev *dev, u32 *buf);
+#ifdef CONFIG_SUSPEND
+static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data);
+#endif
static inline u32 amd_pmc_reg_read(struct amd_pmc_dev *dev, int reg_offset)
{
@@ -325,6 +327,7 @@ static int get_metrics_table(struct amd_pmc_dev *pdev, struct smu_metrics *table
return 0;
}
+#ifdef CONFIG_SUSPEND
static void amd_pmc_validate_deepest(struct amd_pmc_dev *pdev)
{
struct smu_metrics table;
@@ -338,6 +341,7 @@ static void amd_pmc_validate_deepest(struct amd_pmc_dev *pdev)
dev_dbg(pdev->dev, "Last suspend in deepest state for %lluus\n",
table.timein_s0i3_lastcapture);
}
+#endif
#ifdef CONFIG_DEBUG_FS
static int smu_fw_info_show(struct seq_file *s, void *unused)
@@ -569,6 +573,7 @@ out_unlock:
return rc;
}
+#ifdef CONFIG_SUSPEND
static int amd_pmc_get_os_hint(struct amd_pmc_dev *dev)
{
switch (dev->cpu_id) {
@@ -694,6 +699,7 @@ static struct acpi_s2idle_dev_ops amd_pmc_s2idle_dev_ops = {
.prepare = amd_pmc_s2idle_prepare,
.restore = amd_pmc_s2idle_restore,
};
+#endif
static const struct pci_device_id pmc_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_YC) },
@@ -733,6 +739,7 @@ static int amd_pmc_s2d_init(struct amd_pmc_dev *dev)
return 0;
}
+#ifdef CONFIG_SUSPEND
static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data)
{
int err;
@@ -753,6 +760,7 @@ static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data)
return 0;
}
+#endif
static int amd_pmc_read_stb(struct amd_pmc_dev *dev, u32 *buf)
{
@@ -859,9 +867,11 @@ static int amd_pmc_probe(struct platform_device *pdev)
amd_pmc_get_smu_version(dev);
platform_set_drvdata(pdev, dev);
+#ifdef CONFIG_SUSPEND
err = acpi_register_lps0_dev(&amd_pmc_s2idle_dev_ops);
if (err)
dev_warn(dev->dev, "failed to register LPS0 sleep handler, expect increased power consumption\n");
+#endif
amd_pmc_dbgfs_register(dev);
return 0;
@@ -875,7 +885,9 @@ static int amd_pmc_remove(struct platform_device *pdev)
{
struct amd_pmc_dev *dev = platform_get_drvdata(pdev);
+#ifdef CONFIG_SUSPEND
acpi_unregister_lps0_dev(&amd_pmc_s2idle_dev_ops);
+#endif
amd_pmc_dbgfs_unregister(dev);
pci_dev_put(dev->rdev);
mutex_destroy(&dev->lock);
diff --git a/drivers/platform/x86/barco-p50-gpio.c b/drivers/platform/x86/barco-p50-gpio.c
index f5c72e33f9ae..05534287bc26 100644
--- a/drivers/platform/x86/barco-p50-gpio.c
+++ b/drivers/platform/x86/barco-p50-gpio.c
@@ -10,7 +10,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/io.h>
#include <linux/delay.h>
#include <linux/dmi.h>
#include <linux/err.h>
diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
index c1d9ed9b7b67..19f6b456234f 100644
--- a/drivers/platform/x86/samsung-laptop.c
+++ b/drivers/platform/x86/samsung-laptop.c
@@ -1121,8 +1121,6 @@ static void kbd_led_set(struct led_classdev *led_cdev,
if (value > samsung->kbd_led.max_brightness)
value = samsung->kbd_led.max_brightness;
- else if (value < 0)
- value = 0;
samsung->kbd_led_wk = value;
queue_work(samsung->led_workqueue, &samsung->kbd_led_work);
diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c
index bce17ca97947..a01a92769c1a 100644
--- a/drivers/platform/x86/think-lmi.c
+++ b/drivers/platform/x86/think-lmi.c
@@ -740,16 +740,8 @@ static ssize_t certificate_store(struct kobject *kobj,
if (!tlmi_priv.certificate_support)
return -EOPNOTSUPP;
- new_cert = kstrdup(buf, GFP_KERNEL);
- if (!new_cert)
- return -ENOMEM;
- /* Strip out CR if one is present */
- strip_cr(new_cert);
-
/* If empty then clear installed certificate */
- if (new_cert[0] == '\0') { /* Clear installed certificate */
- kfree(new_cert);
-
+ if ((buf[0] == '\0') || (buf[0] == '\n')) { /* Clear installed certificate */
/* Check that signature is set */
if (!setting->signature || !setting->signature[0])
return -EACCES;
@@ -763,14 +755,16 @@ static ssize_t certificate_store(struct kobject *kobj,
ret = tlmi_simple_call(LENOVO_CLEAR_BIOS_CERT_GUID, auth_str);
kfree(auth_str);
- if (ret)
- return ret;
- kfree(setting->certificate);
- setting->certificate = NULL;
- return count;
+ return ret ?: count;
}
+ new_cert = kstrdup(buf, GFP_KERNEL);
+ if (!new_cert)
+ return -ENOMEM;
+ /* Strip out CR if one is present */
+ strip_cr(new_cert);
+
if (setting->cert_installed) {
/* Certificate is installed so this is an update */
if (!setting->signature || !setting->signature[0]) {
@@ -792,21 +786,14 @@ static ssize_t certificate_store(struct kobject *kobj,
auth_str = kasprintf(GFP_KERNEL, "%s,%s",
new_cert, setting->password);
}
- if (!auth_str) {
- kfree(new_cert);
+ kfree(new_cert);
+ if (!auth_str)
return -ENOMEM;
- }
ret = tlmi_simple_call(guid, auth_str);
kfree(auth_str);
- if (ret) {
- kfree(new_cert);
- return ret;
- }
- kfree(setting->certificate);
- setting->certificate = new_cert;
- return count;
+ return ret ?: count;
}
static struct kobj_attribute auth_certificate = __ATTR_WO(certificate);
@@ -1194,6 +1181,10 @@ static void tlmi_release_attr(void)
kset_unregister(tlmi_priv.attribute_kset);
+ /* Free up any saved signatures */
+ kfree(tlmi_priv.pwd_admin->signature);
+ kfree(tlmi_priv.pwd_admin->save_signature);
+
/* Authentication structures */
sysfs_remove_group(&tlmi_priv.pwd_admin->kobj, &auth_attr_group);
kobject_put(&tlmi_priv.pwd_admin->kobj);
@@ -1210,11 +1201,6 @@ static void tlmi_release_attr(void)
}
kset_unregister(tlmi_priv.authentication_kset);
-
- /* Free up any saved certificates/signatures */
- kfree(tlmi_priv.pwd_admin->certificate);
- kfree(tlmi_priv.pwd_admin->signature);
- kfree(tlmi_priv.pwd_admin->save_signature);
}
static int tlmi_sysfs_init(void)
diff --git a/drivers/platform/x86/think-lmi.h b/drivers/platform/x86/think-lmi.h
index 4f69df6eed07..4daba6151cd6 100644
--- a/drivers/platform/x86/think-lmi.h
+++ b/drivers/platform/x86/think-lmi.h
@@ -63,7 +63,6 @@ struct tlmi_pwd_setting {
int index; /*Used for HDD and NVME auth */
enum level_option level;
bool cert_installed;
- char *certificate;
char *signature;
char *save_signature;
};
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index ea02c8dcd748..d925cb137e12 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -604,6 +604,12 @@ int power_supply_get_battery_info(struct power_supply *psy,
err = samsung_sdi_battery_get_info(&psy->dev, value, &info);
if (!err)
goto out_ret_pointer;
+ else if (err == -ENODEV)
+ /*
+ * Device does not have a static battery.
+ * Proceed to look for a simple battery.
+ */
+ err = 0;
if (strcmp("simple-battery", value)) {
err = -ENODEV;
diff --git a/drivers/power/supply/samsung-sdi-battery.c b/drivers/power/supply/samsung-sdi-battery.c
index 9d59f277f519..b33daab798b9 100644
--- a/drivers/power/supply/samsung-sdi-battery.c
+++ b/drivers/power/supply/samsung-sdi-battery.c
@@ -824,6 +824,7 @@ static struct samsung_sdi_battery samsung_sdi_batteries[] = {
.constant_charge_current_max_ua = 900000,
.constant_charge_voltage_max_uv = 4200000,
.charge_term_current_ua = 200000,
+ .charge_restart_voltage_uv = 4170000,
.maintenance_charge = samsung_maint_charge_table,
.maintenance_charge_size = ARRAY_SIZE(samsung_maint_charge_table),
.alert_low_temp_charge_current_ua = 300000,
@@ -867,6 +868,7 @@ static struct samsung_sdi_battery samsung_sdi_batteries[] = {
.constant_charge_current_max_ua = 1500000,
.constant_charge_voltage_max_uv = 4350000,
.charge_term_current_ua = 120000,
+ .charge_restart_voltage_uv = 4300000,
.maintenance_charge = samsung_maint_charge_table,
.maintenance_charge_size = ARRAY_SIZE(samsung_maint_charge_table),
.alert_low_temp_charge_current_ua = 300000,
diff --git a/drivers/regulator/atc260x-regulator.c b/drivers/regulator/atc260x-regulator.c
index 05147d2c3842..485e58b264c0 100644
--- a/drivers/regulator/atc260x-regulator.c
+++ b/drivers/regulator/atc260x-regulator.c
@@ -292,6 +292,7 @@ enum atc2603c_reg_ids {
.bypass_mask = BIT(5), \
.active_discharge_reg = ATC2603C_PMU_SWITCH_CTL, \
.active_discharge_mask = BIT(1), \
+ .active_discharge_on = BIT(1), \
.owner = THIS_MODULE, \
}
diff --git a/drivers/regulator/rtq2134-regulator.c b/drivers/regulator/rtq2134-regulator.c
index f21e3f8b21f2..8e13dea354a2 100644
--- a/drivers/regulator/rtq2134-regulator.c
+++ b/drivers/regulator/rtq2134-regulator.c
@@ -285,6 +285,7 @@ static const unsigned int rtq2134_buck_ramp_delay_table[] = {
.enable_mask = RTQ2134_VOUTEN_MASK, \
.active_discharge_reg = RTQ2134_REG_BUCK##_id##_CFG0, \
.active_discharge_mask = RTQ2134_ACTDISCHG_MASK, \
+ .active_discharge_on = RTQ2134_ACTDISCHG_MASK, \
.ramp_reg = RTQ2134_REG_BUCK##_id##_RSPCFG, \
.ramp_mask = RTQ2134_RSPUP_MASK, \
.ramp_delay_table = rtq2134_buck_ramp_delay_table, \
diff --git a/drivers/regulator/wm8994-regulator.c b/drivers/regulator/wm8994-regulator.c
index cadea0344486..40befdd9dfa9 100644
--- a/drivers/regulator/wm8994-regulator.c
+++ b/drivers/regulator/wm8994-regulator.c
@@ -82,6 +82,35 @@ static const struct regulator_desc wm8994_ldo_desc[] = {
.min_uV = 2400000,
.uV_step = 100000,
.enable_time = 3000,
+ .off_on_delay = 36000,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "LDO2",
+ .id = 2,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = WM8994_LDO2_MAX_SELECTOR + 1,
+ .vsel_reg = WM8994_LDO_2,
+ .vsel_mask = WM8994_LDO2_VSEL_MASK,
+ .ops = &wm8994_ldo2_ops,
+ .enable_time = 3000,
+ .off_on_delay = 36000,
+ .owner = THIS_MODULE,
+ },
+};
+
+static const struct regulator_desc wm8958_ldo_desc[] = {
+ {
+ .name = "LDO1",
+ .id = 1,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = WM8994_LDO1_MAX_SELECTOR + 1,
+ .vsel_reg = WM8994_LDO_1,
+ .vsel_mask = WM8994_LDO1_VSEL_MASK,
+ .ops = &wm8994_ldo1_ops,
+ .min_uV = 2400000,
+ .uV_step = 100000,
+ .enable_time = 3000,
.owner = THIS_MODULE,
},
{
@@ -172,9 +201,16 @@ static int wm8994_ldo_probe(struct platform_device *pdev)
* regulator core and we need not worry about it on the
* error path.
*/
- ldo->regulator = devm_regulator_register(&pdev->dev,
- &wm8994_ldo_desc[id],
- &config);
+ if (ldo->wm8994->type == WM8994) {
+ ldo->regulator = devm_regulator_register(&pdev->dev,
+ &wm8994_ldo_desc[id],
+ &config);
+ } else {
+ ldo->regulator = devm_regulator_register(&pdev->dev,
+ &wm8958_ldo_desc[id],
+ &config);
+ }
+
if (IS_ERR(ldo->regulator)) {
ret = PTR_ERR(ldo->regulator);
dev_err(wm8994->dev, "Failed to register LDO%d: %d\n",
diff --git a/drivers/reset/reset-rzg2l-usbphy-ctrl.c b/drivers/reset/reset-rzg2l-usbphy-ctrl.c
index 1e8315038850..a8dde4606360 100644
--- a/drivers/reset/reset-rzg2l-usbphy-ctrl.c
+++ b/drivers/reset/reset-rzg2l-usbphy-ctrl.c
@@ -121,7 +121,9 @@ static int rzg2l_usbphy_ctrl_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(priv->rstc),
"failed to get reset\n");
- reset_control_deassert(priv->rstc);
+ error = reset_control_deassert(priv->rstc);
+ if (error)
+ return error;
priv->rcdev.ops = &rzg2l_usbphy_ctrl_reset_ops;
priv->rcdev.of_reset_n_cells = 1;
diff --git a/drivers/reset/tegra/reset-bpmp.c b/drivers/reset/tegra/reset-bpmp.c
index 24d3395964cc..4c5bba52b105 100644
--- a/drivers/reset/tegra/reset-bpmp.c
+++ b/drivers/reset/tegra/reset-bpmp.c
@@ -20,6 +20,7 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
struct tegra_bpmp *bpmp = to_tegra_bpmp(rstc);
struct mrq_reset_request request;
struct tegra_bpmp_message msg;
+ int err;
memset(&request, 0, sizeof(request));
request.cmd = command;
@@ -30,7 +31,13 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
msg.tx.data = &request;
msg.tx.size = sizeof(request);
- return tegra_bpmp_transfer(bpmp, &msg);
+ err = tegra_bpmp_transfer(bpmp, &msg);
+ if (err)
+ return err;
+ if (msg.rx.ret)
+ return -EINVAL;
+
+ return 0;
}
static int tegra_bpmp_reset_module(struct reset_controller_dev *rstc,
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index 5f554a3a0f62..caeebfb67149 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -317,14 +317,18 @@ enum {
};
struct aha152x_cmd_priv {
- struct scsi_pointer scsi_pointer;
+ char *ptr;
+ int this_residual;
+ struct scatterlist *buffer;
+ int status;
+ int message;
+ int sent_command;
+ int phase;
};
-static struct scsi_pointer *aha152x_scsi_pointer(struct scsi_cmnd *cmd)
+static struct aha152x_cmd_priv *aha152x_priv(struct scsi_cmnd *cmd)
{
- struct aha152x_cmd_priv *acmd = scsi_cmd_priv(cmd);
-
- return &acmd->scsi_pointer;
+ return scsi_cmd_priv(cmd);
}
MODULE_AUTHOR("Jürgen Fischer");
@@ -890,17 +894,16 @@ void aha152x_release(struct Scsi_Host *shpnt)
static int setup_expected_interrupts(struct Scsi_Host *shpnt)
{
if(CURRENT_SC) {
- struct scsi_pointer *scsi_pointer =
- aha152x_scsi_pointer(CURRENT_SC);
+ struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
- scsi_pointer->phase |= 1 << 16;
+ acp->phase |= 1 << 16;
- if (scsi_pointer->phase & selecting) {
+ if (acp->phase & selecting) {
SETPORT(SSTAT1, SELTO);
SETPORT(SIMODE0, ENSELDO | (DISCONNECTED_SC ? ENSELDI : 0));
SETPORT(SIMODE1, ENSELTIMO);
} else {
- SETPORT(SIMODE0, (scsi_pointer->phase & spiordy) ? ENSPIORDY : 0);
+ SETPORT(SIMODE0, (acp->phase & spiordy) ? ENSPIORDY : 0);
SETPORT(SIMODE1, ENPHASEMIS | ENSCSIRST | ENSCSIPERR | ENBUSFREE);
}
} else if(STATE==seldi) {
@@ -924,17 +927,16 @@ static int setup_expected_interrupts(struct Scsi_Host *shpnt)
static int aha152x_internal_queue(struct scsi_cmnd *SCpnt,
struct completion *complete, int phase)
{
- struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(SCpnt);
+ struct aha152x_cmd_priv *acp = aha152x_priv(SCpnt);
struct Scsi_Host *shpnt = SCpnt->device->host;
unsigned long flags;
- scsi_pointer->phase = not_issued | phase;
- scsi_pointer->Status = 0x1; /* Ilegal status by SCSI standard */
- scsi_pointer->Message = 0;
- scsi_pointer->have_data_in = 0;
- scsi_pointer->sent_command = 0;
+ acp->phase = not_issued | phase;
+ acp->status = 0x1; /* Illegal status by SCSI standard */
+ acp->message = 0;
+ acp->sent_command = 0;
- if (scsi_pointer->phase & (resetting | check_condition)) {
+ if (acp->phase & (resetting | check_condition)) {
if (!SCpnt->host_scribble || SCSEM(SCpnt) || SCNEXT(SCpnt)) {
scmd_printk(KERN_ERR, SCpnt, "cannot reuse command\n");
return FAILED;
@@ -957,15 +959,15 @@ static int aha152x_internal_queue(struct scsi_cmnd *SCpnt,
SCp.phase : current state of the command */
if ((phase & resetting) || !scsi_sglist(SCpnt)) {
- scsi_pointer->ptr = NULL;
- scsi_pointer->this_residual = 0;
+ acp->ptr = NULL;
+ acp->this_residual = 0;
scsi_set_resid(SCpnt, 0);
- scsi_pointer->buffer = NULL;
+ acp->buffer = NULL;
} else {
scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
- scsi_pointer->buffer = scsi_sglist(SCpnt);
- scsi_pointer->ptr = SG_ADDRESS(scsi_pointer->buffer);
- scsi_pointer->this_residual = scsi_pointer->buffer->length;
+ acp->buffer = scsi_sglist(SCpnt);
+ acp->ptr = SG_ADDRESS(acp->buffer);
+ acp->this_residual = acp->buffer->length;
}
DO_LOCK(flags);
@@ -1015,7 +1017,7 @@ static void reset_done(struct scsi_cmnd *SCpnt)
static void aha152x_scsi_done(struct scsi_cmnd *SCpnt)
{
- if (aha152x_scsi_pointer(SCpnt)->phase & resetting)
+ if (aha152x_priv(SCpnt)->phase & resetting)
reset_done(SCpnt);
else
scsi_done(SCpnt);
@@ -1101,7 +1103,7 @@ static int aha152x_device_reset(struct scsi_cmnd * SCpnt)
DO_LOCK(flags);
- if (aha152x_scsi_pointer(SCpnt)->phase & resetted) {
+ if (aha152x_priv(SCpnt)->phase & resetted) {
HOSTDATA(shpnt)->commands--;
if (!HOSTDATA(shpnt)->commands)
SETPORT(PORTA, 0);
@@ -1395,31 +1397,30 @@ static void busfree_run(struct Scsi_Host *shpnt)
SETPORT(SSTAT1, CLRBUSFREE);
if(CURRENT_SC) {
- struct scsi_pointer *scsi_pointer =
- aha152x_scsi_pointer(CURRENT_SC);
+ struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
#if defined(AHA152X_STAT)
action++;
#endif
- scsi_pointer->phase &= ~syncneg;
+ acp->phase &= ~syncneg;
- if (scsi_pointer->phase & completed) {
+ if (acp->phase & completed) {
/* target sent COMMAND COMPLETE */
- done(shpnt, scsi_pointer->Status, DID_OK);
+ done(shpnt, acp->status, DID_OK);
- } else if (scsi_pointer->phase & aborted) {
- done(shpnt, scsi_pointer->Status, DID_ABORT);
+ } else if (acp->phase & aborted) {
+ done(shpnt, acp->status, DID_ABORT);
- } else if (scsi_pointer->phase & resetted) {
- done(shpnt, scsi_pointer->Status, DID_RESET);
+ } else if (acp->phase & resetted) {
+ done(shpnt, acp->status, DID_RESET);
- } else if (scsi_pointer->phase & disconnected) {
+ } else if (acp->phase & disconnected) {
/* target sent DISCONNECT */
#if defined(AHA152X_STAT)
HOSTDATA(shpnt)->disconnections++;
#endif
append_SC(&DISCONNECTED_SC, CURRENT_SC);
- scsi_pointer->phase |= 1 << 16;
+ acp->phase |= 1 << 16;
CURRENT_SC = NULL;
} else {
@@ -1438,24 +1439,23 @@ static void busfree_run(struct Scsi_Host *shpnt)
action++;
#endif
- if (aha152x_scsi_pointer(DONE_SC)->phase & check_condition) {
+ if (aha152x_priv(DONE_SC)->phase & check_condition) {
struct scsi_cmnd *cmd = HOSTDATA(shpnt)->done_SC;
struct aha152x_scdata *sc = SCDATA(cmd);
scsi_eh_restore_cmnd(cmd, &sc->ses);
- aha152x_scsi_pointer(cmd)->Status = SAM_STAT_CHECK_CONDITION;
+ aha152x_priv(cmd)->status = SAM_STAT_CHECK_CONDITION;
HOSTDATA(shpnt)->commands--;
if (!HOSTDATA(shpnt)->commands)
SETPORT(PORTA, 0); /* turn led off */
- } else if (aha152x_scsi_pointer(DONE_SC)->Status ==
- SAM_STAT_CHECK_CONDITION) {
+ } else if (aha152x_priv(DONE_SC)->status == SAM_STAT_CHECK_CONDITION) {
#if defined(AHA152X_STAT)
HOSTDATA(shpnt)->busfree_with_check_condition++;
#endif
- if(!(aha152x_scsi_pointer(DONE_SC)->phase & not_issued)) {
+ if (!(aha152x_priv(DONE_SC)->phase & not_issued)) {
struct aha152x_scdata *sc;
struct scsi_cmnd *ptr = DONE_SC;
DONE_SC=NULL;
@@ -1480,7 +1480,7 @@ static void busfree_run(struct Scsi_Host *shpnt)
if (!HOSTDATA(shpnt)->commands)
SETPORT(PORTA, 0); /* turn led off */
- if (!(aha152x_scsi_pointer(ptr)->phase & resetting)) {
+ if (!(aha152x_priv(ptr)->phase & resetting)) {
kfree(ptr->host_scribble);
ptr->host_scribble=NULL;
}
@@ -1503,13 +1503,12 @@ static void busfree_run(struct Scsi_Host *shpnt)
DO_UNLOCK(flags);
if(CURRENT_SC) {
- struct scsi_pointer *scsi_pointer =
- aha152x_scsi_pointer(CURRENT_SC);
+ struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
#if defined(AHA152X_STAT)
action++;
#endif
- scsi_pointer->phase |= selecting;
+ acp->phase |= selecting;
/* clear selection timeout */
SETPORT(SSTAT1, SELTO);
@@ -1537,13 +1536,13 @@ static void busfree_run(struct Scsi_Host *shpnt)
*/
static void seldo_run(struct Scsi_Host *shpnt)
{
- struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
+ struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
SETPORT(SCSISIG, 0);
SETPORT(SSTAT1, CLRBUSFREE);
SETPORT(SSTAT1, CLRPHASECHG);
- scsi_pointer->phase &= ~(selecting | not_issued);
+ acp->phase &= ~(selecting | not_issued);
SETPORT(SCSISEQ, 0);
@@ -1558,12 +1557,12 @@ static void seldo_run(struct Scsi_Host *shpnt)
ADDMSGO(IDENTIFY(RECONNECT, CURRENT_SC->device->lun));
- if (scsi_pointer->phase & aborting) {
+ if (acp->phase & aborting) {
ADDMSGO(ABORT);
- } else if (scsi_pointer->phase & resetting) {
+ } else if (acp->phase & resetting) {
ADDMSGO(BUS_DEVICE_RESET);
} else if (SYNCNEG==0 && SYNCHRONOUS) {
- scsi_pointer->phase |= syncneg;
+ acp->phase |= syncneg;
MSGOLEN += spi_populate_sync_msg(&MSGO(MSGOLEN), 50, 8);
SYNCNEG=1; /* negotiation in progress */
}
@@ -1578,7 +1577,7 @@ static void seldo_run(struct Scsi_Host *shpnt)
*/
static void selto_run(struct Scsi_Host *shpnt)
{
- struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
+ struct aha152x_cmd_priv *acp;
SETPORT(SCSISEQ, 0);
SETPORT(SSTAT1, CLRSELTIMO);
@@ -1586,9 +1585,10 @@ static void selto_run(struct Scsi_Host *shpnt)
if (!CURRENT_SC)
return;
- scsi_pointer->phase &= ~selecting;
+ acp = aha152x_priv(CURRENT_SC);
+ acp->phase &= ~selecting;
- if (scsi_pointer->phase & aborted)
+ if (acp->phase & aborted)
done(shpnt, SAM_STAT_GOOD, DID_ABORT);
else if (TESTLO(SSTAT0, SELINGO))
done(shpnt, SAM_STAT_GOOD, DID_BUS_BUSY);
@@ -1616,10 +1616,9 @@ static void seldi_run(struct Scsi_Host *shpnt)
SETPORT(SSTAT1, CLRPHASECHG);
if(CURRENT_SC) {
- struct scsi_pointer *scsi_pointer =
- aha152x_scsi_pointer(CURRENT_SC);
+ struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
- if (!(scsi_pointer->phase & not_issued))
+ if (!(acp->phase & not_issued))
scmd_printk(KERN_ERR, CURRENT_SC,
"command should not have been issued yet\n");
@@ -1676,7 +1675,7 @@ static void seldi_run(struct Scsi_Host *shpnt)
static void msgi_run(struct Scsi_Host *shpnt)
{
for(;;) {
- struct scsi_pointer *scsi_pointer;
+ struct aha152x_cmd_priv *acp;
int sstat1 = GETPORT(SSTAT1);
if(sstat1 & (PHASECHG|PHASEMIS|BUSFREE) || !(sstat1 & REQINIT))
@@ -1714,9 +1713,9 @@ static void msgi_run(struct Scsi_Host *shpnt)
continue;
}
- scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
- scsi_pointer->Message = MSGI(0);
- scsi_pointer->phase &= ~disconnected;
+ acp = aha152x_priv(CURRENT_SC);
+ acp->message = MSGI(0);
+ acp->phase &= ~disconnected;
MSGILEN=0;
@@ -1724,8 +1723,8 @@ static void msgi_run(struct Scsi_Host *shpnt)
continue;
}
- scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
- scsi_pointer->Message = MSGI(0);
+ acp = aha152x_priv(CURRENT_SC);
+ acp->message = MSGI(0);
switch (MSGI(0)) {
case DISCONNECT:
@@ -1733,11 +1732,11 @@ static void msgi_run(struct Scsi_Host *shpnt)
scmd_printk(KERN_WARNING, CURRENT_SC,
"target was not allowed to disconnect\n");
- scsi_pointer->phase |= disconnected;
+ acp->phase |= disconnected;
break;
case COMMAND_COMPLETE:
- scsi_pointer->phase |= completed;
+ acp->phase |= completed;
break;
case MESSAGE_REJECT:
@@ -1867,11 +1866,9 @@ static void msgi_end(struct Scsi_Host *shpnt)
*/
static void msgo_init(struct Scsi_Host *shpnt)
{
- struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
-
if(MSGOLEN==0) {
- if ((scsi_pointer->phase & syncneg) && SYNCNEG==2 &&
- SYNCRATE==0) {
+ if ((aha152x_priv(CURRENT_SC)->phase & syncneg) &&
+ SYNCNEG == 2 && SYNCRATE == 0) {
ADDMSGO(IDENTIFY(RECONNECT, CURRENT_SC->device->lun));
} else {
scmd_printk(KERN_INFO, CURRENT_SC,
@@ -1888,7 +1885,7 @@ static void msgo_init(struct Scsi_Host *shpnt)
*/
static void msgo_run(struct Scsi_Host *shpnt)
{
- struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
+ struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
while(MSGO_I<MSGOLEN) {
if (TESTLO(SSTAT0, SPIORDY))
@@ -1901,13 +1898,13 @@ static void msgo_run(struct Scsi_Host *shpnt)
if (MSGO(MSGO_I) & IDENTIFY_BASE)
- scsi_pointer->phase |= identified;
+ acp->phase |= identified;
if (MSGO(MSGO_I)==ABORT)
- scsi_pointer->phase |= aborted;
+ acp->phase |= aborted;
if (MSGO(MSGO_I)==BUS_DEVICE_RESET)
- scsi_pointer->phase |= resetted;
+ acp->phase |= resetted;
SETPORT(SCSIDAT, MSGO(MSGO_I++));
}
@@ -1936,7 +1933,7 @@ static void msgo_end(struct Scsi_Host *shpnt)
*/
static void cmd_init(struct Scsi_Host *shpnt)
{
- if (aha152x_scsi_pointer(CURRENT_SC)->sent_command) {
+ if (aha152x_priv(CURRENT_SC)->sent_command) {
scmd_printk(KERN_ERR, CURRENT_SC,
"command already sent\n");
done(shpnt, SAM_STAT_GOOD, DID_ERROR);
@@ -1967,7 +1964,7 @@ static void cmd_end(struct Scsi_Host *shpnt)
"command sent incompletely (%d/%d)\n",
CMD_I, CURRENT_SC->cmd_len);
else
- aha152x_scsi_pointer(CURRENT_SC)->sent_command++;
+ aha152x_priv(CURRENT_SC)->sent_command++;
}
/*
@@ -1979,7 +1976,7 @@ static void status_run(struct Scsi_Host *shpnt)
if (TESTLO(SSTAT0, SPIORDY))
return;
- aha152x_scsi_pointer(CURRENT_SC)->Status = GETPORT(SCSIDAT);
+ aha152x_priv(CURRENT_SC)->status = GETPORT(SCSIDAT);
}
@@ -2003,7 +2000,7 @@ static void datai_init(struct Scsi_Host *shpnt)
static void datai_run(struct Scsi_Host *shpnt)
{
- struct scsi_pointer *scsi_pointer;
+ struct aha152x_cmd_priv *acp;
unsigned long the_time;
int fifodata, data_count;
@@ -2041,36 +2038,35 @@ static void datai_run(struct Scsi_Host *shpnt)
fifodata = GETPORT(FIFOSTAT);
}
- scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
- if (scsi_pointer->this_residual > 0) {
- while (fifodata > 0 && scsi_pointer->this_residual > 0) {
- data_count = fifodata > scsi_pointer->this_residual ?
- scsi_pointer->this_residual :
- fifodata;
+ acp = aha152x_priv(CURRENT_SC);
+ if (acp->this_residual > 0) {
+ while (fifodata > 0 && acp->this_residual > 0) {
+ data_count = fifodata > acp->this_residual ?
+ acp->this_residual : fifodata;
fifodata -= data_count;
if (data_count & 1) {
SETPORT(DMACNTRL0, ENDMA|_8BIT);
- *scsi_pointer->ptr++ = GETPORT(DATAPORT);
- scsi_pointer->this_residual--;
+ *acp->ptr++ = GETPORT(DATAPORT);
+ acp->this_residual--;
DATA_LEN++;
SETPORT(DMACNTRL0, ENDMA);
}
if (data_count > 1) {
data_count >>= 1;
- insw(DATAPORT, scsi_pointer->ptr, data_count);
- scsi_pointer->ptr += 2 * data_count;
- scsi_pointer->this_residual -= 2 * data_count;
+ insw(DATAPORT, acp->ptr, data_count);
+ acp->ptr += 2 * data_count;
+ acp->this_residual -= 2 * data_count;
DATA_LEN += 2 * data_count;
}
- if (scsi_pointer->this_residual == 0 &&
- !sg_is_last(scsi_pointer->buffer)) {
+ if (acp->this_residual == 0 &&
+ !sg_is_last(acp->buffer)) {
/* advance to next buffer */
- scsi_pointer->buffer = sg_next(scsi_pointer->buffer);
- scsi_pointer->ptr = SG_ADDRESS(scsi_pointer->buffer);
- scsi_pointer->this_residual = scsi_pointer->buffer->length;
+ acp->buffer = sg_next(acp->buffer);
+ acp->ptr = SG_ADDRESS(acp->buffer);
+ acp->this_residual = acp->buffer->length;
}
}
} else if (fifodata > 0) {
@@ -2138,15 +2134,15 @@ static void datao_init(struct Scsi_Host *shpnt)
static void datao_run(struct Scsi_Host *shpnt)
{
- struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
+ struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
unsigned long the_time;
int data_count;
/* until phase changes or all data sent */
- while (TESTLO(DMASTAT, INTSTAT) && scsi_pointer->this_residual > 0) {
+ while (TESTLO(DMASTAT, INTSTAT) && acp->this_residual > 0) {
data_count = 128;
- if (data_count > scsi_pointer->this_residual)
- data_count = scsi_pointer->this_residual;
+ if (data_count > acp->this_residual)
+ data_count = acp->this_residual;
if(TESTLO(DMASTAT, DFIFOEMP)) {
scmd_printk(KERN_ERR, CURRENT_SC,
@@ -2157,26 +2153,25 @@ static void datao_run(struct Scsi_Host *shpnt)
if(data_count & 1) {
SETPORT(DMACNTRL0,WRITE_READ|ENDMA|_8BIT);
- SETPORT(DATAPORT, *scsi_pointer->ptr++);
- scsi_pointer->this_residual--;
+ SETPORT(DATAPORT, *acp->ptr++);
+ acp->this_residual--;
CMD_INC_RESID(CURRENT_SC, -1);
SETPORT(DMACNTRL0,WRITE_READ|ENDMA);
}
if(data_count > 1) {
data_count >>= 1;
- outsw(DATAPORT, scsi_pointer->ptr, data_count);
- scsi_pointer->ptr += 2 * data_count;
- scsi_pointer->this_residual -= 2 * data_count;
+ outsw(DATAPORT, acp->ptr, data_count);
+ acp->ptr += 2 * data_count;
+ acp->this_residual -= 2 * data_count;
CMD_INC_RESID(CURRENT_SC, -2 * data_count);
}
- if (scsi_pointer->this_residual == 0 &&
- !sg_is_last(scsi_pointer->buffer)) {
+ if (acp->this_residual == 0 && !sg_is_last(acp->buffer)) {
/* advance to next buffer */
- scsi_pointer->buffer = sg_next(scsi_pointer->buffer);
- scsi_pointer->ptr = SG_ADDRESS(scsi_pointer->buffer);
- scsi_pointer->this_residual = scsi_pointer->buffer->length;
+ acp->buffer = sg_next(acp->buffer);
+ acp->ptr = SG_ADDRESS(acp->buffer);
+ acp->this_residual = acp->buffer->length;
}
the_time=jiffies + 100*HZ;
@@ -2192,7 +2187,7 @@ static void datao_run(struct Scsi_Host *shpnt)
static void datao_end(struct Scsi_Host *shpnt)
{
- struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
+ struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
if(TESTLO(DMASTAT, DFIFOEMP)) {
u32 datao_cnt = GETSTCNT();
@@ -2211,10 +2206,9 @@ static void datao_end(struct Scsi_Host *shpnt)
sg = sg_next(sg);
}
- scsi_pointer->buffer = sg;
- scsi_pointer->ptr = SG_ADDRESS(scsi_pointer->buffer) + done;
- scsi_pointer->this_residual = scsi_pointer->buffer->length -
- done;
+ acp->buffer = sg;
+ acp->ptr = SG_ADDRESS(acp->buffer) + done;
+ acp->this_residual = acp->buffer->length - done;
}
SETPORT(SXFRCTL0, CH1|CLRCH1|CLRSTCNT);
@@ -2229,7 +2223,6 @@ static void datao_end(struct Scsi_Host *shpnt)
*/
static int update_state(struct Scsi_Host *shpnt)
{
- struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
int dataphase=0;
unsigned int stat0 = GETPORT(SSTAT0);
unsigned int stat1 = GETPORT(SSTAT1);
@@ -2244,7 +2237,7 @@ static int update_state(struct Scsi_Host *shpnt)
} else if (stat0 & SELDI && PREVSTATE == busfree) {
STATE=seldi;
} else if (stat0 & SELDO && CURRENT_SC &&
- (scsi_pointer->phase & selecting)) {
+ (aha152x_priv(CURRENT_SC)->phase & selecting)) {
STATE=seldo;
} else if(stat1 & SELTO) {
STATE=selto;
@@ -2376,8 +2369,7 @@ static void is_complete(struct Scsi_Host *shpnt)
SETPORT(SXFRCTL0, CH1);
SETPORT(DMACNTRL0, 0);
if(CURRENT_SC)
- aha152x_scsi_pointer(CURRENT_SC)->phase &=
- ~spiordy;
+ aha152x_priv(CURRENT_SC)->phase &= ~spiordy;
}
/*
@@ -2399,8 +2391,7 @@ static void is_complete(struct Scsi_Host *shpnt)
SETPORT(DMACNTRL0, 0);
SETPORT(SXFRCTL0, CH1|SPIOEN);
if(CURRENT_SC)
- aha152x_scsi_pointer(CURRENT_SC)->phase |=
- spiordy;
+ aha152x_priv(CURRENT_SC)->phase |= spiordy;
}
/*
@@ -2490,7 +2481,7 @@ static void disp_enintr(struct Scsi_Host *shpnt)
*/
static void show_command(struct scsi_cmnd *ptr)
{
- const int phase = aha152x_scsi_pointer(ptr)->phase;
+ const int phase = aha152x_priv(ptr)->phase;
scsi_print_command(ptr);
scmd_printk(KERN_DEBUG, ptr,
@@ -2538,8 +2529,8 @@ static void show_queues(struct Scsi_Host *shpnt)
static void get_command(struct seq_file *m, struct scsi_cmnd * ptr)
{
- struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(ptr);
- const int phase = scsi_pointer->phase;
+ struct aha152x_cmd_priv *acp = aha152x_priv(ptr);
+ const int phase = acp->phase;
int i;
seq_printf(m, "%p: target=%d; lun=%d; cmnd=( ",
@@ -2549,8 +2540,8 @@ static void get_command(struct seq_file *m, struct scsi_cmnd * ptr)
seq_printf(m, "0x%02x ", ptr->cmnd[i]);
seq_printf(m, "); resid=%d; residual=%d; buffers=%d; phase |",
- scsi_get_resid(ptr), scsi_pointer->this_residual,
- sg_nents(scsi_pointer->buffer) - 1);
+ scsi_get_resid(ptr), acp->this_residual,
+ sg_nents(acp->buffer) - 1);
if (phase & not_issued)
seq_puts(m, "not issued|");
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.h b/drivers/scsi/aic7xxx/aic79xx_osm.h
index 679a4fd13874..793fe19993a9 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.h
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.h
@@ -420,8 +420,6 @@ ahd_unlock(struct ahd_softc *ahd, unsigned long *flags)
/* config registers for header type 0 devices */
#define PCIR_MAPS 0x10
-#define PCIR_SUBVEND_0 0x2c
-#define PCIR_SUBDEV_0 0x2e
/****************************** PCI-X definitions *****************************/
#define PCIXR_COMMAND 0x96
diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.c b/drivers/scsi/aic7xxx/aic79xx_pci.c
index 2f0bdb9225a4..5fad41b1ab58 100644
--- a/drivers/scsi/aic7xxx/aic79xx_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_pci.c
@@ -260,8 +260,8 @@ ahd_find_pci_device(ahd_dev_softc_t pci)
vendor = ahd_pci_read_config(pci, PCIR_DEVVENDOR, /*bytes*/2);
device = ahd_pci_read_config(pci, PCIR_DEVICE, /*bytes*/2);
- subvendor = ahd_pci_read_config(pci, PCIR_SUBVEND_0, /*bytes*/2);
- subdevice = ahd_pci_read_config(pci, PCIR_SUBDEV_0, /*bytes*/2);
+ subvendor = ahd_pci_read_config(pci, PCI_SUBSYSTEM_VENDOR_ID, /*bytes*/2);
+ subdevice = ahd_pci_read_config(pci, PCI_SUBSYSTEM_ID, /*bytes*/2);
full_id = ahd_compose_id(device,
vendor,
subdevice,
@@ -298,7 +298,7 @@ ahd_pci_config(struct ahd_softc *ahd, const struct ahd_pci_identity *entry)
* Record if this is an HP board.
*/
subvendor = ahd_pci_read_config(ahd->dev_softc,
- PCIR_SUBVEND_0, /*bytes*/2);
+ PCI_SUBSYSTEM_VENDOR_ID, /*bytes*/2);
if (subvendor == SUBID_HP)
ahd->flags |= AHD_HP_BOARD;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.h b/drivers/scsi/aic7xxx/aic7xxx_osm.h
index 4782a304e93c..51d9f4de0734 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.h
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.h
@@ -433,8 +433,6 @@ ahc_unlock(struct ahc_softc *ahc, unsigned long *flags)
/* config registers for header type 0 devices */
#define PCIR_MAPS 0x10
-#define PCIR_SUBVEND_0 0x2c
-#define PCIR_SUBDEV_0 0x2e
typedef enum
{
diff --git a/drivers/scsi/aic7xxx/aic7xxx_pci.c b/drivers/scsi/aic7xxx/aic7xxx_pci.c
index dab3a6d12c4d..2d4c85426dc3 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_pci.c
@@ -673,8 +673,8 @@ ahc_find_pci_device(ahc_dev_softc_t pci)
vendor = ahc_pci_read_config(pci, PCIR_DEVVENDOR, /*bytes*/2);
device = ahc_pci_read_config(pci, PCIR_DEVICE, /*bytes*/2);
- subvendor = ahc_pci_read_config(pci, PCIR_SUBVEND_0, /*bytes*/2);
- subdevice = ahc_pci_read_config(pci, PCIR_SUBDEV_0, /*bytes*/2);
+ subvendor = ahc_pci_read_config(pci, PCI_SUBSYSTEM_VENDOR_ID, /*bytes*/2);
+ subdevice = ahc_pci_read_config(pci, PCI_SUBSYSTEM_ID, /*bytes*/2);
full_id = ahc_compose_id(device, vendor, subdevice, subvendor);
/*
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 0103f811cc25..776544385598 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -1169,7 +1169,7 @@ static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
ofld_kcqe->fcoe_conn_context_id);
interface = tgt->port->priv;
if (hba != interface->hba) {
- printk(KERN_ERR PFX "ERROR:ofld_cmpl: HBA mis-match\n");
+ printk(KERN_ERR PFX "ERROR:ofld_cmpl: HBA mismatch\n");
goto ofld_cmpl_err;
}
/*
@@ -1226,12 +1226,12 @@ static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
* and enable
*/
if (tgt->context_id != context_id) {
- printk(KERN_ERR PFX "context id mis-match\n");
+ printk(KERN_ERR PFX "context id mismatch\n");
return;
}
interface = tgt->port->priv;
if (hba != interface->hba) {
- printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mis-match\n");
+ printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mismatch\n");
goto enbl_cmpl_err;
}
if (!ofld_kcqe->completion_status)
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 5521469ce678..6c864b093ac9 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -1977,7 +1977,7 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
if (nopin->cq_req_sn != qp->cqe_exp_seq_sn)
break;
- if (unlikely(test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx))) {
+ if (unlikely(test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))) {
if (nopin->op_code == ISCSI_OP_NOOP_IN &&
nopin->itt == (u16) RESERVED_ITT) {
printk(KERN_ALERT "bnx2i: Unsolicited "
@@ -2398,7 +2398,7 @@ static void bnx2i_process_conn_destroy_cmpl(struct bnx2i_hba *hba,
}
if (hba != ep->hba) {
- printk(KERN_ALERT "conn destroy- error hba mis-match\n");
+ printk(KERN_ALERT "conn destroy- error hba mismatch\n");
return;
}
@@ -2432,7 +2432,7 @@ static void bnx2i_process_ofld_cmpl(struct bnx2i_hba *hba,
}
if (hba != ep->hba) {
- printk(KERN_ALERT "ofld_cmpl: error hba mis-match\n");
+ printk(KERN_ALERT "ofld_cmpl: error hba mismatch\n");
return;
}
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index fe86fd61a995..15fbd09baa94 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -1721,7 +1721,7 @@ static int bnx2i_tear_down_conn(struct bnx2i_hba *hba,
struct iscsi_conn *conn = ep->conn->cls_conn->dd_data;
/* Must suspend all rx queue activity for this ep */
- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
+ set_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags);
}
/* CONN_DISCONNECT timeout may or may not be an issue depending
* on what transcribed in TCP layer, different targets behave
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 8c7d4dda4cf2..4365d52c6430 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -1634,11 +1634,11 @@ void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk)
log_debug(1 << CXGBI_DBG_PDU_RX,
"csk 0x%p, conn 0x%p.\n", csk, conn);
- if (unlikely(!conn || conn->suspend_rx)) {
+ if (unlikely(!conn || test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))) {
log_debug(1 << CXGBI_DBG_PDU_RX,
- "csk 0x%p, conn 0x%p, id %d, suspend_rx %lu!\n",
+ "csk 0x%p, conn 0x%p, id %d, conn flags 0x%lx!\n",
csk, conn, conn ? conn->id : 0xFF,
- conn ? conn->suspend_rx : 0xFF);
+ conn ? conn->flags : 0xFF);
return;
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 461ef8a76c4c..4bda2f6cb352 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -442,7 +442,6 @@ void hisi_sas_task_deliver(struct hisi_hba *hisi_hba,
case SAS_PROTOCOL_INTERNAL_ABORT:
hisi_sas_task_prep_abort(hisi_hba, slot);
break;
- fallthrough;
default:
return;
}
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 80238e6a3c98..eee1a24f7e15 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -36,7 +36,7 @@
#define IBMVSCSIS_VERSION "v0.2"
-#define INITIAL_SRP_LIMIT 800
+#define INITIAL_SRP_LIMIT 1024
#define DEFAULT_MAX_SECTORS 256
#define MAX_TXU 1024 * 1024
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
index d690d9cf7eb1..35589b6af90d 100644
--- a/drivers/scsi/isci/host.c
+++ b/drivers/scsi/isci/host.c
@@ -413,7 +413,7 @@ static void sci_controller_event_completion(struct isci_host *ihost, u32 ent)
dev_warn(&ihost->pdev->dev,
"%s: SCIC Controller 0x%p received "
"event 0x%x for io request object "
- "that doesnt exist.\n",
+ "that doesn't exist.\n",
__func__,
ihost,
ent);
@@ -428,7 +428,7 @@ static void sci_controller_event_completion(struct isci_host *ihost, u32 ent)
dev_warn(&ihost->pdev->dev,
"%s: SCIC Controller 0x%p received "
"event 0x%x for remote device object "
- "that doesnt exist.\n",
+ "that doesn't exist.\n",
__func__,
ihost,
ent);
@@ -462,7 +462,7 @@ static void sci_controller_event_completion(struct isci_host *ihost, u32 ent)
} else
dev_err(&ihost->pdev->dev,
"%s: SCIC Controller 0x%p received event 0x%x "
- "for remote device object 0x%0x that doesnt "
+ "for remote device object 0x%0x that doesn't "
"exist.\n",
__func__,
ihost,
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index d09926e6c8a8..797abf4f5399 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -678,7 +678,8 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
struct iscsi_task *task;
itt_t itt;
- if (session->state == ISCSI_STATE_TERMINATE)
+ if (session->state == ISCSI_STATE_TERMINATE ||
+ !test_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags))
return NULL;
if (opcode == ISCSI_OP_LOGIN || opcode == ISCSI_OP_TEXT) {
@@ -1392,8 +1393,8 @@ static bool iscsi_set_conn_failed(struct iscsi_conn *conn)
if (conn->stop_stage == 0)
session->state = ISCSI_STATE_FAILED;
- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
+ set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
+ set_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags);
return true;
}
@@ -1454,7 +1455,7 @@ static int iscsi_xmit_task(struct iscsi_conn *conn, struct iscsi_task *task,
* Do this after dropping the extra ref because if this was a requeue
* it's removed from that list and cleanup_queued_task would miss it.
*/
- if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
+ if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) {
/*
* Save the task and ref in case we weren't cleaning up this
* task and get woken up again.
@@ -1532,7 +1533,7 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
int rc = 0;
spin_lock_bh(&conn->session->frwd_lock);
- if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
+ if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) {
ISCSI_DBG_SESSION(conn->session, "Tx suspended!\n");
spin_unlock_bh(&conn->session->frwd_lock);
return -ENODATA;
@@ -1746,7 +1747,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
goto fault;
}
- if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
+ if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) {
reason = FAILURE_SESSION_IN_RECOVERY;
sc->result = DID_REQUEUE << 16;
goto fault;
@@ -1935,7 +1936,7 @@ static void fail_scsi_tasks(struct iscsi_conn *conn, u64 lun, int error)
void iscsi_suspend_queue(struct iscsi_conn *conn)
{
spin_lock_bh(&conn->session->frwd_lock);
- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+ set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
spin_unlock_bh(&conn->session->frwd_lock);
}
EXPORT_SYMBOL_GPL(iscsi_suspend_queue);
@@ -1953,7 +1954,7 @@ void iscsi_suspend_tx(struct iscsi_conn *conn)
struct Scsi_Host *shost = conn->session->host;
struct iscsi_host *ihost = shost_priv(shost);
- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+ set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
if (ihost->workq)
flush_workqueue(ihost->workq);
}
@@ -1961,7 +1962,7 @@ EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
static void iscsi_start_tx(struct iscsi_conn *conn)
{
- clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+ clear_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
iscsi_conn_queue_work(conn);
}
@@ -2214,6 +2215,8 @@ void iscsi_conn_unbind(struct iscsi_cls_conn *cls_conn, bool is_active)
iscsi_suspend_tx(conn);
spin_lock_bh(&session->frwd_lock);
+ clear_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags);
+
if (!is_active) {
/*
* if logout timed out before userspace could even send a PDU
@@ -3045,7 +3048,6 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
if (!cls_conn)
return NULL;
conn = cls_conn->dd_data;
- memset(conn, 0, sizeof(*conn) + dd_size);
conn->dd_data = cls_conn->dd_data + sizeof(*conn);
conn->session = session;
@@ -3318,6 +3320,8 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
spin_lock_bh(&session->frwd_lock);
if (is_leading)
session->leadconn = conn;
+
+ set_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags);
spin_unlock_bh(&session->frwd_lock);
/*
@@ -3330,8 +3334,8 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
/*
* Unblock xmitworker(), Login Phase will pass through.
*/
- clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
- clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+ clear_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags);
+ clear_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
return 0;
}
EXPORT_SYMBOL_GPL(iscsi_conn_bind);
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index 2e9ffe3d1a55..883005757ddb 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -927,7 +927,7 @@ int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb,
*/
conn->last_recv = jiffies;
- if (unlikely(conn->suspend_rx)) {
+ if (unlikely(test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))) {
ISCSI_DBG_TCP(conn, "Rx suspended!\n");
*status = ISCSI_TCP_SUSPENDED;
return 0;
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index f0cf8ffdc5f3..0025760230e5 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -897,6 +897,11 @@ enum lpfc_irq_chann_mode {
NHT_MODE,
};
+enum lpfc_hba_bit_flags {
+ FABRIC_COMANDS_BLOCKED,
+ HBA_PCI_ERR,
+};
+
struct lpfc_hba {
/* SCSI interface function jump table entries */
struct lpfc_io_buf * (*lpfc_get_scsi_buf)
@@ -1043,7 +1048,6 @@ struct lpfc_hba {
* Firmware supports Forced Link Speed
* capability
*/
-#define HBA_PCI_ERR 0x80000 /* The PCI slot is offline */
#define HBA_FLOGI_ISSUED 0x100000 /* FLOGI was issued */
#define HBA_SHORT_CMF 0x200000 /* shorter CMF timer routine */
#define HBA_CGN_DAY_WRAP 0x400000 /* HBA Congestion info day wraps */
@@ -1350,7 +1354,6 @@ struct lpfc_hba {
atomic_t fabric_iocb_count;
struct timer_list fabric_block_timer;
unsigned long bit_flags;
-#define FABRIC_COMANDS_BLOCKED 0
atomic_t num_rsrc_err;
atomic_t num_cmd_success;
unsigned long last_rsrc_error_time;
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 96408cd6c4c8..9897a1aa387b 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -670,3 +670,6 @@ struct lpfc_vmid *lpfc_get_vmid_from_hashtable(struct lpfc_vport *vport,
uint32_t hash, uint8_t *buf);
void lpfc_vmid_vport_cleanup(struct lpfc_vport *vport);
int lpfc_issue_els_qfpa(struct lpfc_vport *vport);
+
+void lpfc_sli_rpi_release(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp);
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 0144da30e3db..2b877dff5ed4 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -109,8 +109,8 @@ lpfc_rport_invalid(struct fc_rport *rport)
ndlp = rdata->pnode;
if (!rdata->pnode) {
- pr_err("**** %s: NULL ndlp on rport x%px SID x%x\n",
- __func__, rport, rport->scsi_target_id);
+ pr_info("**** %s: NULL ndlp on rport x%px SID x%x\n",
+ __func__, rport, rport->scsi_target_id);
return -EINVAL;
}
@@ -169,9 +169,10 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
"3181 dev_loss_callbk x%06x, rport x%px flg x%x "
- "load_flag x%x refcnt %d\n",
+ "load_flag x%x refcnt %d state %d xpt x%x\n",
ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag,
- vport->load_flag, kref_read(&ndlp->kref));
+ vport->load_flag, kref_read(&ndlp->kref),
+ ndlp->nlp_state, ndlp->fc4_xpt_flags);
/* Don't schedule a worker thread event if the vport is going down.
* The teardown process cleans up the node via lpfc_drop_node.
@@ -181,6 +182,11 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
ndlp->rport = NULL;
ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD;
+ /* clear the NLP_XPT_REGD if the node is not registered
+ * with nvme-fc
+ */
+ if (ndlp->fc4_xpt_flags == NLP_XPT_REGD)
+ ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD;
/* Remove the node reference from remote_port_add now.
* The driver will not call remote_port_delete.
@@ -225,18 +231,36 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
ndlp->rport = NULL;
spin_unlock_irqrestore(&ndlp->lock, iflags);
- /* We need to hold the node by incrementing the reference
- * count until this queued work is done
- */
- evtp->evt_arg1 = lpfc_nlp_get(ndlp);
+ if (phba->worker_thread) {
+ /* We need to hold the node by incrementing the reference
+ * count until this queued work is done
+ */
+ evtp->evt_arg1 = lpfc_nlp_get(ndlp);
+
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ if (evtp->evt_arg1) {
+ evtp->evt = LPFC_EVT_DEV_LOSS;
+ list_add_tail(&evtp->evt_listp, &phba->work_list);
+ lpfc_worker_wake_up(phba);
+ }
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ } else {
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
+ "3188 worker thread is stopped %s x%06x, "
+ " rport x%px flg x%x load_flag x%x refcnt "
+ "%d\n", __func__, ndlp->nlp_DID,
+ ndlp->rport, ndlp->nlp_flag,
+ vport->load_flag, kref_read(&ndlp->kref));
+ if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) {
+ spin_lock_irqsave(&ndlp->lock, iflags);
+ /* Node is in dev loss. No further transaction. */
+ ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RM);
+ }
- spin_lock_irqsave(&phba->hbalock, iflags);
- if (evtp->evt_arg1) {
- evtp->evt = LPFC_EVT_DEV_LOSS;
- list_add_tail(&evtp->evt_listp, &phba->work_list);
- lpfc_worker_wake_up(phba);
}
- spin_unlock_irqrestore(&phba->hbalock, iflags);
return;
}
@@ -503,11 +527,12 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0203 Devloss timeout on "
"WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
- "NPort x%06x Data: x%x x%x x%x\n",
+ "NPort x%06x Data: x%x x%x x%x refcnt %d\n",
*name, *(name+1), *(name+2), *(name+3),
*(name+4), *(name+5), *(name+6), *(name+7),
ndlp->nlp_DID, ndlp->nlp_flag,
- ndlp->nlp_state, ndlp->nlp_rpi);
+ ndlp->nlp_state, ndlp->nlp_rpi,
+ kref_read(&ndlp->kref));
} else {
lpfc_printf_vlog(vport, KERN_INFO, LOG_TRACE_EVENT,
"0204 Devloss timeout on "
@@ -755,18 +780,22 @@ lpfc_work_list_done(struct lpfc_hba *phba)
int free_evt;
int fcf_inuse;
uint32_t nlp_did;
+ bool hba_pci_err;
spin_lock_irq(&phba->hbalock);
while (!list_empty(&phba->work_list)) {
list_remove_head((&phba->work_list), evtp, typeof(*evtp),
evt_listp);
spin_unlock_irq(&phba->hbalock);
+ hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags);
free_evt = 1;
switch (evtp->evt) {
case LPFC_EVT_ELS_RETRY:
ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
- lpfc_els_retry_delay_handler(ndlp);
- free_evt = 0; /* evt is part of ndlp */
+ if (!hba_pci_err) {
+ lpfc_els_retry_delay_handler(ndlp);
+ free_evt = 0; /* evt is part of ndlp */
+ }
/* decrement the node reference count held
* for this queued work
*/
@@ -788,8 +817,10 @@ lpfc_work_list_done(struct lpfc_hba *phba)
break;
case LPFC_EVT_RECOVER_PORT:
ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
- lpfc_sli_abts_recover_port(ndlp->vport, ndlp);
- free_evt = 0;
+ if (!hba_pci_err) {
+ lpfc_sli_abts_recover_port(ndlp->vport, ndlp);
+ free_evt = 0;
+ }
/* decrement the node reference count held for
* this queued work
*/
@@ -859,14 +890,18 @@ lpfc_work_done(struct lpfc_hba *phba)
struct lpfc_vport **vports;
struct lpfc_vport *vport;
int i;
+ bool hba_pci_err;
+ hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags);
spin_lock_irq(&phba->hbalock);
ha_copy = phba->work_ha;
phba->work_ha = 0;
spin_unlock_irq(&phba->hbalock);
+ if (hba_pci_err)
+ ha_copy = 0;
/* First, try to post the next mailbox command to SLI4 device */
- if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
+ if (phba->pci_dev_grp == LPFC_PCI_DEV_OC && !hba_pci_err)
lpfc_sli4_post_async_mbox(phba);
if (ha_copy & HA_ERATT) {
@@ -886,7 +921,7 @@ lpfc_work_done(struct lpfc_hba *phba)
lpfc_handle_latt(phba);
/* Handle VMID Events */
- if (lpfc_is_vmid_enabled(phba)) {
+ if (lpfc_is_vmid_enabled(phba) && !hba_pci_err) {
if (phba->pport->work_port_events &
WORKER_CHECK_VMID_ISSUE_QFPA) {
lpfc_check_vmid_qfpa_issue(phba);
@@ -936,6 +971,8 @@ lpfc_work_done(struct lpfc_hba *phba)
work_port_events = vport->work_port_events;
vport->work_port_events &= ~work_port_events;
spin_unlock_irq(&vport->work_port_lock);
+ if (hba_pci_err)
+ continue;
if (work_port_events & WORKER_DISC_TMO)
lpfc_disc_timeout_handler(vport);
if (work_port_events & WORKER_ELS_TMO)
@@ -1173,12 +1210,14 @@ lpfc_linkdown(struct lpfc_hba *phba)
struct lpfc_vport **vports;
LPFC_MBOXQ_t *mb;
int i;
+ int offline;
if (phba->link_state == LPFC_LINK_DOWN)
return 0;
/* Block all SCSI stack I/Os */
lpfc_scsi_dev_block(phba);
+ offline = pci_channel_offline(phba->pcidev);
phba->defer_flogi_acc_flag = false;
@@ -1219,7 +1258,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
lpfc_destroy_vport_work_array(phba, vports);
/* Clean up any SLI3 firmware default rpi's */
- if (phba->sli_rev > LPFC_SLI_REV3)
+ if (phba->sli_rev > LPFC_SLI_REV3 || offline)
goto skip_unreg_did;
mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -4712,6 +4751,11 @@ lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
spin_lock_irqsave(&ndlp->lock, iflags);
if (!(ndlp->fc4_xpt_flags & NLP_XPT_REGD)) {
spin_unlock_irqrestore(&ndlp->lock, iflags);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+ "0999 %s Not regd: ndlp x%px rport x%px DID "
+ "x%x FLG x%x XPT x%x\n",
+ __func__, ndlp, ndlp->rport, ndlp->nlp_DID,
+ ndlp->nlp_flag, ndlp->fc4_xpt_flags);
return;
}
@@ -4722,6 +4766,13 @@ lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
ndlp->fc4_xpt_flags & SCSI_XPT_REGD) {
vport->phba->nport_event_cnt++;
lpfc_unregister_remote_port(ndlp);
+ } else if (!ndlp->rport) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+ "1999 %s NDLP in devloss x%px DID x%x FLG x%x"
+ " XPT x%x refcnt %d\n",
+ __func__, ndlp, ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->fc4_xpt_flags,
+ kref_read(&ndlp->kref));
}
if (ndlp->fc4_xpt_flags & NVME_XPT_REGD) {
@@ -5371,6 +5422,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
ndlp->nlp_flag &= ~NLP_UNREG_INP;
mempool_free(mbox, phba->mbox_mem_pool);
acc_plogi = 1;
+ lpfc_nlp_put(ndlp);
}
} else {
lpfc_printf_vlog(vport, KERN_INFO,
@@ -6097,12 +6149,34 @@ lpfc_disc_flush_list(struct lpfc_vport *vport)
}
}
+/*
+ * lpfc_notify_xport_npr - notifies xport of node disappearance
+ * @vport: Pointer to Virtual Port object.
+ *
+ * Transitions all ndlps to NPR state. When lpfc_nlp_set_state
+ * calls lpfc_nlp_state_cleanup, the ndlp->rport is unregistered
+ * and transport notified that the node is gone.
+ * Return Code:
+ * none
+ */
+static void
+lpfc_notify_xport_npr(struct lpfc_vport *vport)
+{
+ struct lpfc_nodelist *ndlp, *next_ndlp;
+
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
+ nlp_listp) {
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ }
+}
void
lpfc_cleanup_discovery_resources(struct lpfc_vport *vport)
{
lpfc_els_flush_rscn(vport);
lpfc_els_flush_cmd(vport);
lpfc_disc_flush_list(vport);
+ if (pci_channel_offline(vport->phba->pcidev))
+ lpfc_notify_xport_npr(vport);
}
/*****************************************************************************/
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index eed6464bd880..461d333b1b3a 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -95,6 +95,7 @@ static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *);
static int lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *);
+static void lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba);
static struct scsi_transport_template *lpfc_transport_template = NULL;
static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
@@ -1642,7 +1643,7 @@ lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
{
spin_lock_irq(&phba->hbalock);
if (phba->link_state == LPFC_HBA_ERROR &&
- phba->hba_flag & HBA_PCI_ERR) {
+ test_bit(HBA_PCI_ERR, &phba->bit_flags)) {
spin_unlock_irq(&phba->hbalock);
return;
}
@@ -1985,6 +1986,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
if (pci_channel_offline(phba->pcidev)) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3166 pci channel is offline\n");
+ lpfc_sli_flush_io_rings(phba);
return;
}
@@ -2973,6 +2975,22 @@ lpfc_cleanup(struct lpfc_vport *vport)
NLP_EVT_DEVICE_RM);
}
+ /* This is a special case flush to return all
+ * IOs before entering this loop. There are
+ * two points in the code where a flush is
+ * avoided if the FC_UNLOADING flag is set.
+ * one is in the multipool destroy,
+ * (this prevents a crash) and the other is
+ * in the nvme abort handler, ( also prevents
+ * a crash). Both of these exceptions are
+ * cases where the slot is still accessible.
+ * The flush here is only when the pci slot
+ * is offline.
+ */
+ if (vport->load_flag & FC_UNLOADING &&
+ pci_channel_offline(phba->pcidev))
+ lpfc_sli_flush_io_rings(vport->phba);
+
/* At this point, ALL ndlp's should be gone
* because of the previous NLP_EVT_DEVICE_RM.
* Lets wait for this to happen, if needed.
@@ -2985,7 +3003,7 @@ lpfc_cleanup(struct lpfc_vport *vport)
list_for_each_entry_safe(ndlp, next_ndlp,
&vport->fc_nodes, nlp_listp) {
lpfc_printf_vlog(ndlp->vport, KERN_ERR,
- LOG_TRACE_EVENT,
+ LOG_DISCOVERY,
"0282 did:x%x ndlp:x%px "
"refcnt:%d xflags x%x nflag x%x\n",
ndlp->nlp_DID, (void *)ndlp,
@@ -3682,7 +3700,8 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
struct lpfc_vport **vports;
struct Scsi_Host *shost;
int i;
- int offline = 0;
+ int offline;
+ bool hba_pci_err;
if (vport->fc_flag & FC_OFFLINE_MODE)
return;
@@ -3692,6 +3711,7 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
lpfc_linkdown(phba);
offline = pci_channel_offline(phba->pcidev);
+ hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags);
/* Issue an unreg_login to all nodes on all vports */
vports = lpfc_create_vport_work_array(phba);
@@ -3715,11 +3735,14 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
spin_unlock_irq(&ndlp->lock);
- if (offline) {
+ if (offline || hba_pci_err) {
spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~(NLP_UNREG_INP |
NLP_RPI_REGISTERED);
spin_unlock_irq(&ndlp->lock);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ lpfc_sli_rpi_release(vports[i],
+ ndlp);
} else {
lpfc_unreg_rpi(vports[i], ndlp);
}
@@ -13354,8 +13377,9 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
/* Abort all iocbs associated with the hba */
lpfc_sli_hba_iocb_abort(phba);
- /* Wait for completion of device XRI exchange busy */
- lpfc_sli4_xri_exchange_busy_wait(phba);
+ if (!pci_channel_offline(phba->pcidev))
+ /* Wait for completion of device XRI exchange busy */
+ lpfc_sli4_xri_exchange_busy_wait(phba);
/* per-phba callback de-registration for hotplug event */
if (phba->pport)
@@ -13374,15 +13398,12 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
/* Disable FW logging to host memory */
lpfc_ras_stop_fwlog(phba);
- /* Unset the queues shared with the hardware then release all
- * allocated resources.
- */
- lpfc_sli4_queue_unset(phba);
- lpfc_sli4_queue_destroy(phba);
-
/* Reset SLI4 HBA FCoE function */
lpfc_pci_function_reset(phba);
+ /* release all queue allocated resources. */
+ lpfc_sli4_queue_destroy(phba);
+
/* Free RAS DMA memory */
if (phba->ras_fwlog.ras_enabled)
lpfc_sli4_ras_dma_free(phba);
@@ -14262,6 +14283,7 @@ lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
"2711 PCI channel permanent disable for failure\n");
/* Block all SCSI devices' I/Os on the host */
lpfc_scsi_dev_block(phba);
+ lpfc_sli4_prep_dev_for_reset(phba);
/* stop all timers */
lpfc_stop_hba_timers(phba);
@@ -15057,24 +15079,28 @@ lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
static void
lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
{
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
- "2826 PCI channel disable preparing for reset\n");
+ int offline = pci_channel_offline(phba->pcidev);
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2826 PCI channel disable preparing for reset offline"
+ " %d\n", offline);
/* Block any management I/Os to the device */
lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
- /* Block all SCSI devices' I/Os on the host */
- lpfc_scsi_dev_block(phba);
+ /* HBA_PCI_ERR was set in io_error_detect */
+ lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
/* Flush all driver's outstanding I/Os as we are to reset */
lpfc_sli_flush_io_rings(phba);
+ lpfc_offline(phba);
/* stop all timers */
lpfc_stop_hba_timers(phba);
+ lpfc_sli4_queue_destroy(phba);
/* Disable interrupt and pci device */
lpfc_sli4_disable_intr(phba);
- lpfc_sli4_queue_destroy(phba);
pci_disable_device(phba->pcidev);
}
@@ -15123,6 +15149,7 @@ lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+ bool hba_pci_err;
switch (state) {
case pci_channel_io_normal:
@@ -15130,17 +15157,24 @@ lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
lpfc_sli4_prep_dev_for_recover(phba);
return PCI_ERS_RESULT_CAN_RECOVER;
case pci_channel_io_frozen:
- phba->hba_flag |= HBA_PCI_ERR;
+ hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags);
/* Fatal error, prepare for slot reset */
- lpfc_sli4_prep_dev_for_reset(phba);
+ if (!hba_pci_err)
+ lpfc_sli4_prep_dev_for_reset(phba);
+ else
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2832 Already handling PCI error "
+ "state: x%x\n", state);
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
- phba->hba_flag |= HBA_PCI_ERR;
+ set_bit(HBA_PCI_ERR, &phba->bit_flags);
/* Permanent failure, prepare for device down */
lpfc_sli4_prep_dev_for_perm_failure(phba);
return PCI_ERS_RESULT_DISCONNECT;
default:
- phba->hba_flag |= HBA_PCI_ERR;
+ hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags);
+ if (!hba_pci_err)
+ lpfc_sli4_prep_dev_for_reset(phba);
/* Unknown state, prepare and request slot reset */
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2825 Unknown PCI error state: x%x\n", state);
@@ -15174,17 +15208,21 @@ lpfc_io_slot_reset_s4(struct pci_dev *pdev)
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
struct lpfc_sli *psli = &phba->sli;
uint32_t intr_mode;
+ bool hba_pci_err;
dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
if (pci_enable_device_mem(pdev)) {
printk(KERN_ERR "lpfc: Cannot re-enable "
- "PCI device after reset.\n");
+ "PCI device after reset.\n");
return PCI_ERS_RESULT_DISCONNECT;
}
pci_restore_state(pdev);
- phba->hba_flag &= ~HBA_PCI_ERR;
+ hba_pci_err = test_and_clear_bit(HBA_PCI_ERR, &phba->bit_flags);
+ if (!hba_pci_err)
+ dev_info(&pdev->dev,
+ "hba_pci_err was not set, recovering slot reset.\n");
/*
* As the new kernel behavior of pci_restore_state() API call clears
* device saved_state flag, need to save the restored state again.
@@ -15198,6 +15236,8 @@ lpfc_io_slot_reset_s4(struct pci_dev *pdev)
psli->sli_flag &= ~LPFC_SLI_ACTIVE;
spin_unlock_irq(&phba->hbalock);
+ /* Init cpu_map array */
+ lpfc_cpu_map_array_init(phba);
/* Configure and enable interrupt */
intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
if (intr_mode == LPFC_INTR_ERROR) {
@@ -15239,8 +15279,6 @@ lpfc_io_resume_s4(struct pci_dev *pdev)
*/
if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
/* Perform device reset */
- lpfc_offline_prep(phba, LPFC_MBX_WAIT);
- lpfc_offline(phba);
lpfc_sli_brdrestart(phba);
/* Bring the device back online */
lpfc_online(phba);
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 1213a299f9aa..8d26f207ebd2 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -93,6 +93,11 @@ lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
vport = lport->vport;
+
+ if (!vport || vport->load_flag & FC_UNLOADING ||
+ vport->phba->hba_flag & HBA_IOQ_FLUSH)
+ return -ENODEV;
+
qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL);
if (qhandle == NULL)
return -ENOMEM;
@@ -267,7 +272,8 @@ lpfc_nvme_handle_lsreq(struct lpfc_hba *phba,
return -EINVAL;
remoteport = lpfc_rport->remoteport;
- if (!vport->localport)
+ if (!vport->localport ||
+ vport->phba->hba_flag & HBA_IOQ_FLUSH)
return -EINVAL;
lport = vport->localport->private;
@@ -559,6 +565,8 @@ __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_DID, ntype, nstate);
return -ENODEV;
}
+ if (vport->phba->hba_flag & HBA_IOQ_FLUSH)
+ return -ENODEV;
if (!vport->phba->sli4_hba.nvmels_wq)
return -ENOMEM;
@@ -662,7 +670,8 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
return -EINVAL;
vport = lport->vport;
- if (vport->load_flag & FC_UNLOADING)
+ if (vport->load_flag & FC_UNLOADING ||
+ vport->phba->hba_flag & HBA_IOQ_FLUSH)
return -ENODEV;
atomic_inc(&lport->fc4NvmeLsRequests);
@@ -1516,7 +1525,8 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
phba = vport->phba;
- if (unlikely(vport->load_flag & FC_UNLOADING)) {
+ if ((unlikely(vport->load_flag & FC_UNLOADING)) ||
+ phba->hba_flag & HBA_IOQ_FLUSH) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
"6124 Fail IO, Driver unload\n");
atomic_inc(&lport->xmt_fcp_err);
@@ -2169,8 +2179,7 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
abts_nvme = 0;
for (i = 0; i < phba->cfg_hdw_queue; i++) {
qp = &phba->sli4_hba.hdwq[i];
- if (!vport || !vport->localport ||
- !qp || !qp->io_wq)
+ if (!vport->localport || !qp || !qp->io_wq)
return;
pring = qp->io_wq->pring;
@@ -2180,8 +2189,9 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
abts_scsi += qp->abts_scsi_io_bufs;
abts_nvme += qp->abts_nvme_io_bufs;
}
- if (!vport || !vport->localport ||
- vport->phba->hba_flag & HBA_PCI_ERR)
+ if (!vport->localport ||
+ test_bit(HBA_PCI_ERR, &vport->phba->bit_flags) ||
+ vport->load_flag & FC_UNLOADING)
return;
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
@@ -2541,8 +2551,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* return values is ignored. The upcall is a courtesy to the
* transport.
*/
- if (vport->load_flag & FC_UNLOADING ||
- unlikely(vport->phba->hba_flag & HBA_PCI_ERR))
+ if (vport->load_flag & FC_UNLOADING)
(void)nvme_fc_set_remoteport_devloss(remoteport, 0);
ret = nvme_fc_unregister_remoteport(remoteport);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 3c132604fd91..ba9dbb51b75f 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -5929,13 +5929,15 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
}
lpfc_cmd->waitq = &waitq;
- if (phba->sli_rev == LPFC_SLI_REV4)
+ if (phba->sli_rev == LPFC_SLI_REV4) {
spin_unlock(&pring_s4->ring_lock);
- else
+ ret_val = lpfc_sli4_issue_abort_iotag(phba, iocb,
+ lpfc_sli_abort_fcp_cmpl);
+ } else {
pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
-
- ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocb,
- lpfc_sli_abort_fcp_cmpl);
+ ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocb,
+ lpfc_sli_abort_fcp_cmpl);
+ }
/* Make sure HBA is alive */
lpfc_issue_hb_tmo(phba);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 20d40957a385..bda2a7ba4e77 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -2828,6 +2828,12 @@ __lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
ndlp->nlp_flag &= ~NLP_UNREG_INP;
}
+void
+lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+ __lpfc_sli_rpi_release(vport, ndlp);
+}
+
/**
* lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
* @phba: Pointer to HBA context object.
@@ -3715,7 +3721,15 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
unsigned long iflag;
u32 ulp_command, ulp_status, ulp_word4, ulp_context, iotag;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_lock_irqsave(&pring->ring_lock, iflag);
+ else
+ spin_lock_irqsave(&phba->hbalock, iflag);
cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_unlock_irqrestore(&pring->ring_lock, iflag);
+ else
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
ulp_command = get_job_cmnd(phba, saveq);
ulp_status = get_job_ulpstatus(phba, saveq);
@@ -4052,10 +4066,8 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
break;
}
- spin_unlock_irqrestore(&phba->hbalock, iflag);
cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
&rspiocbq);
- spin_lock_irqsave(&phba->hbalock, iflag);
if (unlikely(!cmdiocbq))
break;
if (cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED)
@@ -4536,42 +4548,62 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
void
lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
- LIST_HEAD(completions);
+ LIST_HEAD(tx_completions);
+ LIST_HEAD(txcmplq_completions);
struct lpfc_iocbq *iocb, *next_iocb;
+ int offline;
if (pring->ringno == LPFC_ELS_RING) {
lpfc_fabric_abort_hba(phba);
}
+ offline = pci_channel_offline(phba->pcidev);
/* Error everything on txq and txcmplq
* First do the txq.
*/
if (phba->sli_rev >= LPFC_SLI_REV4) {
spin_lock_irq(&pring->ring_lock);
- list_splice_init(&pring->txq, &completions);
+ list_splice_init(&pring->txq, &tx_completions);
pring->txq_cnt = 0;
- spin_unlock_irq(&pring->ring_lock);
- spin_lock_irq(&phba->hbalock);
- /* Next issue ABTS for everything on the txcmplq */
- list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
- lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
- spin_unlock_irq(&phba->hbalock);
+ if (offline) {
+ list_splice_init(&pring->txcmplq,
+ &txcmplq_completions);
+ } else {
+ /* Next issue ABTS for everything on the txcmplq */
+ list_for_each_entry_safe(iocb, next_iocb,
+ &pring->txcmplq, list)
+ lpfc_sli_issue_abort_iotag(phba, pring,
+ iocb, NULL);
+ }
+ spin_unlock_irq(&pring->ring_lock);
} else {
spin_lock_irq(&phba->hbalock);
- list_splice_init(&pring->txq, &completions);
+ list_splice_init(&pring->txq, &tx_completions);
pring->txq_cnt = 0;
- /* Next issue ABTS for everything on the txcmplq */
- list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
- lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
+ if (offline) {
+ list_splice_init(&pring->txcmplq, &txcmplq_completions);
+ } else {
+ /* Next issue ABTS for everything on the txcmplq */
+ list_for_each_entry_safe(iocb, next_iocb,
+ &pring->txcmplq, list)
+ lpfc_sli_issue_abort_iotag(phba, pring,
+ iocb, NULL);
+ }
spin_unlock_irq(&phba->hbalock);
}
- /* Make sure HBA is alive */
- lpfc_issue_hb_tmo(phba);
+ if (offline) {
+ /* Cancel all the IOCBs from the completions list */
+ lpfc_sli_cancel_iocbs(phba, &txcmplq_completions,
+ IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
+ } else {
+ /* Make sure HBA is alive */
+ lpfc_issue_hb_tmo(phba);
+ }
/* Cancel all the IOCBs from the completions list */
- lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
+ lpfc_sli_cancel_iocbs(phba, &tx_completions, IOSTAT_LOCAL_REJECT,
IOERR_SLI_ABORTED);
}
@@ -4624,11 +4656,6 @@ lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
struct lpfc_iocbq *piocb, *next_iocb;
spin_lock_irq(&phba->hbalock);
- if (phba->hba_flag & HBA_IOQ_FLUSH ||
- !phba->sli4_hba.hdwq) {
- spin_unlock_irq(&phba->hbalock);
- return;
- }
/* Indicate the I/O queues are flushed */
phba->hba_flag |= HBA_IOQ_FLUSH;
spin_unlock_irq(&phba->hbalock);
@@ -10997,6 +11024,10 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
unsigned long iflags;
int rc;
+ /* If the PCI channel is in offline state, do not post iocbs. */
+ if (unlikely(pci_channel_offline(phba->pcidev)))
+ return IOCB_ERROR;
+
if (phba->sli_rev == LPFC_SLI_REV4) {
lpfc_sli_prep_wqe(phba, piocb);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index e52f37e5d896..a4d3259b8c52 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "14.2.0.0"
+#define LPFC_DRIVER_VERSION "14.2.0.1"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 611871ef15b5..4919ea54b827 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -2560,6 +2560,9 @@ struct megasas_instance_template {
#define MEGASAS_IS_LOGICAL(sdev) \
((sdev->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1)
+#define MEGASAS_IS_LUN_VALID(sdev) \
+ (((sdev)->lun == 0) ? 1 : 0)
+
#define MEGASAS_DEV_INDEX(scp) \
(((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \
scp->device->id)
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 8bf72dbc33b7..db6793608447 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -2126,6 +2126,9 @@ static int megasas_slave_alloc(struct scsi_device *sdev)
goto scan_target;
}
return -ENXIO;
+ } else if (!MEGASAS_IS_LUN_VALID(sdev)) {
+ sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__);
+ return -ENXIO;
}
scan_target:
@@ -2156,6 +2159,10 @@ static void megasas_slave_destroy(struct scsi_device *sdev)
instance = megasas_lookup_instance(sdev->host->host_no);
if (MEGASAS_IS_LOGICAL(sdev)) {
+ if (!MEGASAS_IS_LUN_VALID(sdev)) {
+ sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__);
+ return;
+ }
ld_tgt_id = MEGASAS_TARGET_ID(sdev);
instance->ld_tgtid_status[ld_tgt_id] = LD_TARGET_ID_DELETED;
if (megasas_dbg_lvl & LD_PD_DEBUG)
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index b57f1803371e..538d2c0cd971 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -5716,13 +5716,12 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
/**
* mpt3sas_check_same_4gb_region - checks whether all reply queues in a set are
* having same upper 32bits in their base memory address.
- * @reply_pool_start_address: Base address of a reply queue set
+ * @start_address: Base address of a reply queue set
* @pool_sz: Size of single Reply Descriptor Post Queues pool size
*
* Return: 1 if reply queues in a set have a same upper 32bits in their base
* memory address, else 0.
*/
-
static int
mpt3sas_check_same_4gb_region(dma_addr_t start_address, u32 pool_sz)
{
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
index 0563078227de..a8dd14c91efd 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -394,10 +394,13 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
retry_count++;
if (ioc->config_cmds.smid == smid)
mpt3sas_base_free_smid(ioc, smid);
- if ((ioc->shost_recovery) || (ioc->config_cmds.status &
- MPT3_CMD_RESET) || ioc->pci_error_recovery)
+ if (ioc->config_cmds.status & MPT3_CMD_RESET)
goto retry_config;
- issue_host_reset = 1;
+ if (ioc->shost_recovery || ioc->pci_error_recovery) {
+ issue_host_reset = 0;
+ r = -EFAULT;
+ } else
+ issue_host_reset = 1;
goto free_mem;
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 00792767c620..7e476f50935b 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -11035,6 +11035,7 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
{
struct _sas_port *mpt3sas_port, *next;
unsigned long flags;
+ int port_id;
/* remove sibling ports attached to this expander */
list_for_each_entry_safe(mpt3sas_port, next,
@@ -11055,6 +11056,8 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
mpt3sas_port->hba_port);
}
+ port_id = sas_expander->port->port_id;
+
mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
sas_expander->sas_address_parent, sas_expander->port);
@@ -11062,7 +11065,7 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
"expander_remove: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
sas_expander->handle, (unsigned long long)
sas_expander->sas_address,
- sas_expander->port->port_id);
+ port_id);
spin_lock_irqsave(&ioc->sas_node_lock, flags);
list_del(&sas_expander->list);
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 7ac63eb5ccd3..2fde496fff5f 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -647,6 +647,7 @@ static struct pci_device_id mvs_pci_table[] = {
{ PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 },
{ PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 },
{ PCI_VDEVICE(ADAPTEC2, 0x0450), chip_6440 },
+ { PCI_VDEVICE(TTI, 0x2640), chip_6440 },
{ PCI_VDEVICE(TTI, 0x2710), chip_9480 },
{ PCI_VDEVICE(TTI, 0x2720), chip_9480 },
{ PCI_VDEVICE(TTI, 0x2721), chip_9480 },
diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c
index c4a838635893..5d7dfefd6f6c 100644
--- a/drivers/scsi/pcmcia/sym53c500_cs.c
+++ b/drivers/scsi/pcmcia/sym53c500_cs.c
@@ -192,10 +192,11 @@ struct sym53c500_data {
int fast_pio;
};
-static struct scsi_pointer *sym53c500_scsi_pointer(struct scsi_cmnd *cmd)
-{
- return scsi_cmd_priv(cmd);
-}
+struct sym53c500_cmd_priv {
+ int status;
+ int message;
+ int phase;
+};
enum Phase {
idle,
@@ -356,7 +357,7 @@ SYM53C500_intr(int irq, void *dev_id)
struct sym53c500_data *data =
(struct sym53c500_data *)dev->hostdata;
struct scsi_cmnd *curSC = data->current_SC;
- struct scsi_pointer *scsi_pointer = sym53c500_scsi_pointer(curSC);
+ struct sym53c500_cmd_priv *scp = scsi_cmd_priv(curSC);
int fast_pio = data->fast_pio;
spin_lock_irqsave(dev->host_lock, flags);
@@ -403,12 +404,11 @@ SYM53C500_intr(int irq, void *dev_id)
if (int_reg & 0x20) { /* Disconnect */
DEB(printk("SYM53C500: disconnect intr received\n"));
- if (scsi_pointer->phase != message_in) { /* Unexpected disconnect */
+ if (scp->phase != message_in) { /* Unexpected disconnect */
curSC->result = DID_NO_CONNECT << 16;
} else { /* Command complete, return status and message */
- curSC->result = (scsi_pointer->Status & 0xff) |
- ((scsi_pointer->Message & 0xff) << 8) |
- (DID_OK << 16);
+ curSC->result = (scp->status & 0xff) |
+ ((scp->message & 0xff) << 8) | (DID_OK << 16);
}
goto idle_out;
}
@@ -419,7 +419,7 @@ SYM53C500_intr(int irq, void *dev_id)
struct scatterlist *sg;
int i;
- scsi_pointer->phase = data_out;
+ scp->phase = data_out;
VDEB(printk("SYM53C500: Data-Out phase\n"));
outb(FLUSH_FIFO, port_base + CMD_REG);
LOAD_DMA_COUNT(port_base, scsi_bufflen(curSC)); /* Max transfer size */
@@ -438,7 +438,7 @@ SYM53C500_intr(int irq, void *dev_id)
struct scatterlist *sg;
int i;
- scsi_pointer->phase = data_in;
+ scp->phase = data_in;
VDEB(printk("SYM53C500: Data-In phase\n"));
outb(FLUSH_FIFO, port_base + CMD_REG);
LOAD_DMA_COUNT(port_base, scsi_bufflen(curSC)); /* Max transfer size */
@@ -453,12 +453,12 @@ SYM53C500_intr(int irq, void *dev_id)
break;
case 0x02: /* COMMAND */
- scsi_pointer->phase = command_ph;
+ scp->phase = command_ph;
printk("SYM53C500: Warning: Unknown interrupt occurred in command phase!\n");
break;
case 0x03: /* STATUS */
- scsi_pointer->phase = status_ph;
+ scp->phase = status_ph;
VDEB(printk("SYM53C500: Status phase\n"));
outb(FLUSH_FIFO, port_base + CMD_REG);
outb(INIT_CMD_COMPLETE, port_base + CMD_REG);
@@ -471,24 +471,22 @@ SYM53C500_intr(int irq, void *dev_id)
case 0x06: /* MESSAGE-OUT */
DEB(printk("SYM53C500: Message-Out phase\n"));
- scsi_pointer->phase = message_out;
+ scp->phase = message_out;
outb(SET_ATN, port_base + CMD_REG); /* Reject the message */
outb(MSG_ACCEPT, port_base + CMD_REG);
break;
case 0x07: /* MESSAGE-IN */
VDEB(printk("SYM53C500: Message-In phase\n"));
- scsi_pointer->phase = message_in;
+ scp->phase = message_in;
- scsi_pointer->Status = inb(port_base + SCSI_FIFO);
- scsi_pointer->Message = inb(port_base + SCSI_FIFO);
+ scp->status = inb(port_base + SCSI_FIFO);
+ scp->message = inb(port_base + SCSI_FIFO);
VDEB(printk("SCSI FIFO size=%d\n", inb(port_base + FIFO_FLAGS) & 0x1f));
- DEB(printk("Status = %02x Message = %02x\n",
- scsi_pointer->Status, scsi_pointer->Message));
+ DEB(printk("Status = %02x Message = %02x\n", scp->status, scp->message));
- if (scsi_pointer->Message == SAVE_POINTERS ||
- scsi_pointer->Message == DISCONNECT) {
+ if (scp->message == SAVE_POINTERS || scp->message == DISCONNECT) {
outb(SET_ATN, port_base + CMD_REG); /* Reject message */
DEB(printk("Discarding SAVE_POINTERS message\n"));
}
@@ -500,7 +498,7 @@ out:
return IRQ_HANDLED;
idle_out:
- scsi_pointer->phase = idle;
+ scp->phase = idle;
scsi_done(curSC);
goto out;
}
@@ -548,7 +546,7 @@ SYM53C500_info(struct Scsi_Host *SChost)
static int SYM53C500_queue_lck(struct scsi_cmnd *SCpnt)
{
- struct scsi_pointer *scsi_pointer = sym53c500_scsi_pointer(SCpnt);
+ struct sym53c500_cmd_priv *scp = scsi_cmd_priv(SCpnt);
int i;
int port_base = SCpnt->device->host->io_port;
struct sym53c500_data *data =
@@ -565,9 +563,9 @@ static int SYM53C500_queue_lck(struct scsi_cmnd *SCpnt)
VDEB(printk("\n"));
data->current_SC = SCpnt;
- scsi_pointer->phase = command_ph;
- scsi_pointer->Status = 0;
- scsi_pointer->Message = 0;
+ scp->phase = command_ph;
+ scp->status = 0;
+ scp->message = 0;
/* We are locked here already by the mid layer */
REG0(port_base);
@@ -682,7 +680,7 @@ static struct scsi_host_template sym53c500_driver_template = {
.this_id = 7,
.sg_tablesize = 32,
.shost_groups = SYM53C500_shost_groups,
- .cmd_size = sizeof(struct scsi_pointer),
+ .cmd_size = sizeof(struct sym53c500_cmd_priv),
};
static int SYM53C500_config_check(struct pcmcia_device *p_dev, void *priv_data)
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index f90b707c190b..01c5e8ff4cc5 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -766,6 +766,10 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity = 0x01;
pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt = 0x01;
+ /* Enable higher IQs and OQs, 32 to 63, bit 16 */
+ if (pm8001_ha->max_q_num > 32)
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt |=
+ 1 << 16;
/* Disable end to end CRC checking */
pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump = (0x1 << 16);
@@ -1027,6 +1031,13 @@ static int mpi_init_check(struct pm8001_hba_info *pm8001_ha)
if (0x0000 != gst_len_mpistate)
return -EBUSY;
+ /*
+ * As per controller datasheet, after successful MPI
+ * initialization minimum 500ms delay is required before
+ * issuing commands.
+ */
+ msleep(500);
+
return 0;
}
@@ -1727,10 +1738,11 @@ static void
pm80xx_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec)
{
#ifdef PM8001_USE_MSIX
- u32 mask;
- mask = (u32)(1 << vec);
-
- pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, (u32)(mask & 0xFFFFFFFF));
+ if (vec < 32)
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, 1U << vec);
+ else
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR_U,
+ 1U << (vec - 32));
return;
#endif
pm80xx_chip_intx_interrupt_enable(pm8001_ha);
@@ -1746,12 +1758,15 @@ static void
pm80xx_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec)
{
#ifdef PM8001_USE_MSIX
- u32 mask;
- if (vec == 0xFF)
- mask = 0xFFFFFFFF;
+ if (vec == 0xFF) {
+ /* disable all vectors 0-31, 32-63 */
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, 0xFFFFFFFF);
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_U, 0xFFFFFFFF);
+ } else if (vec < 32)
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, 1U << vec);
else
- mask = (u32)(1 << vec);
- pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, (u32)(mask & 0xFFFFFFFF));
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_U,
+ 1U << (vec - 32));
return;
#endif
pm80xx_chip_intx_interrupt_disable(pm8001_ha);
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 928532180d32..fd674ed1febe 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -3182,124 +3182,6 @@ static int pmcraid_build_ioadl(
}
/**
- * pmcraid_free_sglist - Frees an allocated SG buffer list
- * @sglist: scatter/gather list pointer
- *
- * Free a DMA'able memory previously allocated with pmcraid_alloc_sglist
- *
- * Return value:
- * none
- */
-static void pmcraid_free_sglist(struct pmcraid_sglist *sglist)
-{
- sgl_free_order(sglist->scatterlist, sglist->order);
- kfree(sglist);
-}
-
-/**
- * pmcraid_alloc_sglist - Allocates memory for a SG list
- * @buflen: buffer length
- *
- * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
- * list.
- *
- * Return value
- * pointer to sglist / NULL on failure
- */
-static struct pmcraid_sglist *pmcraid_alloc_sglist(int buflen)
-{
- struct pmcraid_sglist *sglist;
- int sg_size;
- int order;
-
- sg_size = buflen / (PMCRAID_MAX_IOADLS - 1);
- order = (sg_size > 0) ? get_order(sg_size) : 0;
-
- /* Allocate a scatter/gather list for the DMA */
- sglist = kzalloc(sizeof(struct pmcraid_sglist), GFP_KERNEL);
- if (sglist == NULL)
- return NULL;
-
- sglist->order = order;
- sgl_alloc_order(buflen, order, false, GFP_KERNEL | __GFP_ZERO,
- &sglist->num_sg);
-
- return sglist;
-}
-
-/**
- * pmcraid_copy_sglist - Copy user buffer to kernel buffer's SG list
- * @sglist: scatter/gather list pointer
- * @buffer: buffer pointer
- * @len: buffer length
- * @direction: data transfer direction
- *
- * Copy a user buffer into a buffer allocated by pmcraid_alloc_sglist
- *
- * Return value:
- * 0 on success / other on failure
- */
-static int pmcraid_copy_sglist(
- struct pmcraid_sglist *sglist,
- void __user *buffer,
- u32 len,
- int direction
-)
-{
- struct scatterlist *sg;
- void *kaddr;
- int bsize_elem;
- int i;
- int rc = 0;
-
- /* Determine the actual number of bytes per element */
- bsize_elem = PAGE_SIZE * (1 << sglist->order);
-
- sg = sglist->scatterlist;
-
- for (i = 0; i < (len / bsize_elem); i++, sg = sg_next(sg), buffer += bsize_elem) {
- struct page *page = sg_page(sg);
-
- kaddr = kmap(page);
- if (direction == DMA_TO_DEVICE)
- rc = copy_from_user(kaddr, buffer, bsize_elem);
- else
- rc = copy_to_user(buffer, kaddr, bsize_elem);
-
- kunmap(page);
-
- if (rc) {
- pmcraid_err("failed to copy user data into sg list\n");
- return -EFAULT;
- }
-
- sg->length = bsize_elem;
- }
-
- if (len % bsize_elem) {
- struct page *page = sg_page(sg);
-
- kaddr = kmap(page);
-
- if (direction == DMA_TO_DEVICE)
- rc = copy_from_user(kaddr, buffer, len % bsize_elem);
- else
- rc = copy_to_user(buffer, kaddr, len % bsize_elem);
-
- kunmap(page);
-
- sg->length = len % bsize_elem;
- }
-
- if (rc) {
- pmcraid_err("failed to copy user data into sg list\n");
- rc = -EFAULT;
- }
-
- return rc;
-}
-
-/**
* pmcraid_queuecommand_lck - Queue a mid-layer request
* @scsi_cmd: scsi command struct
*
@@ -3454,365 +3336,6 @@ static int pmcraid_chr_fasync(int fd, struct file *filep, int mode)
return rc;
}
-
-/**
- * pmcraid_build_passthrough_ioadls - builds SG elements for passthrough
- * commands sent over IOCTL interface
- *
- * @cmd : pointer to struct pmcraid_cmd
- * @buflen : length of the request buffer
- * @direction : data transfer direction
- *
- * Return value
- * 0 on success, non-zero error code on failure
- */
-static int pmcraid_build_passthrough_ioadls(
- struct pmcraid_cmd *cmd,
- int buflen,
- int direction
-)
-{
- struct pmcraid_sglist *sglist = NULL;
- struct scatterlist *sg = NULL;
- struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
- struct pmcraid_ioadl_desc *ioadl;
- int i;
-
- sglist = pmcraid_alloc_sglist(buflen);
-
- if (!sglist) {
- pmcraid_err("can't allocate memory for passthrough SGls\n");
- return -ENOMEM;
- }
-
- sglist->num_dma_sg = dma_map_sg(&cmd->drv_inst->pdev->dev,
- sglist->scatterlist,
- sglist->num_sg, direction);
-
- if (!sglist->num_dma_sg || sglist->num_dma_sg > PMCRAID_MAX_IOADLS) {
- dev_err(&cmd->drv_inst->pdev->dev,
- "Failed to map passthrough buffer!\n");
- pmcraid_free_sglist(sglist);
- return -EIO;
- }
-
- cmd->sglist = sglist;
- ioarcb->request_flags0 |= NO_LINK_DESCS;
-
- ioadl = pmcraid_init_ioadls(cmd, sglist->num_dma_sg);
-
- /* Initialize IOADL descriptor addresses */
- for_each_sg(sglist->scatterlist, sg, sglist->num_dma_sg, i) {
- ioadl[i].data_len = cpu_to_le32(sg_dma_len(sg));
- ioadl[i].address = cpu_to_le64(sg_dma_address(sg));
- ioadl[i].flags = 0;
- }
-
- /* setup the last descriptor */
- ioadl[i - 1].flags = IOADL_FLAGS_LAST_DESC;
-
- return 0;
-}
-
-
-/**
- * pmcraid_release_passthrough_ioadls - release passthrough ioadls
- *
- * @cmd: pointer to struct pmcraid_cmd for which ioadls were allocated
- * @buflen: size of the request buffer
- * @direction: data transfer direction
- *
- * Return value
- * 0 on success, non-zero error code on failure
- */
-static void pmcraid_release_passthrough_ioadls(
- struct pmcraid_cmd *cmd,
- int buflen,
- int direction
-)
-{
- struct pmcraid_sglist *sglist = cmd->sglist;
-
- if (buflen > 0) {
- dma_unmap_sg(&cmd->drv_inst->pdev->dev,
- sglist->scatterlist,
- sglist->num_sg,
- direction);
- pmcraid_free_sglist(sglist);
- cmd->sglist = NULL;
- }
-}
-
-/**
- * pmcraid_ioctl_passthrough - handling passthrough IOCTL commands
- *
- * @pinstance: pointer to adapter instance structure
- * @ioctl_cmd: ioctl code
- * @buflen: unused
- * @arg: pointer to pmcraid_passthrough_buffer user buffer
- *
- * Return value
- * 0 on success, non-zero error code on failure
- */
-static long pmcraid_ioctl_passthrough(
- struct pmcraid_instance *pinstance,
- unsigned int ioctl_cmd,
- unsigned int buflen,
- void __user *arg
-)
-{
- struct pmcraid_passthrough_ioctl_buffer *buffer;
- struct pmcraid_ioarcb *ioarcb;
- struct pmcraid_cmd *cmd;
- struct pmcraid_cmd *cancel_cmd;
- void __user *request_buffer;
- unsigned long request_offset;
- unsigned long lock_flags;
- void __user *ioasa;
- u32 ioasc;
- int request_size;
- int buffer_size;
- u8 direction;
- int rc = 0;
-
- /* If IOA reset is in progress, wait 10 secs for reset to complete */
- if (pinstance->ioa_reset_in_progress) {
- rc = wait_event_interruptible_timeout(
- pinstance->reset_wait_q,
- !pinstance->ioa_reset_in_progress,
- msecs_to_jiffies(10000));
-
- if (!rc)
- return -ETIMEDOUT;
- else if (rc < 0)
- return -ERESTARTSYS;
- }
-
- /* If adapter is not in operational state, return error */
- if (pinstance->ioa_state != IOA_STATE_OPERATIONAL) {
- pmcraid_err("IOA is not operational\n");
- return -ENOTTY;
- }
-
- buffer_size = sizeof(struct pmcraid_passthrough_ioctl_buffer);
- buffer = kmalloc(buffer_size, GFP_KERNEL);
-
- if (!buffer) {
- pmcraid_err("no memory for passthrough buffer\n");
- return -ENOMEM;
- }
-
- request_offset =
- offsetof(struct pmcraid_passthrough_ioctl_buffer, request_buffer);
-
- request_buffer = arg + request_offset;
-
- rc = copy_from_user(buffer, arg,
- sizeof(struct pmcraid_passthrough_ioctl_buffer));
-
- ioasa = arg + offsetof(struct pmcraid_passthrough_ioctl_buffer, ioasa);
-
- if (rc) {
- pmcraid_err("ioctl: can't copy passthrough buffer\n");
- rc = -EFAULT;
- goto out_free_buffer;
- }
-
- request_size = le32_to_cpu(buffer->ioarcb.data_transfer_length);
-
- if (buffer->ioarcb.request_flags0 & TRANSFER_DIR_WRITE) {
- direction = DMA_TO_DEVICE;
- } else {
- direction = DMA_FROM_DEVICE;
- }
-
- if (request_size < 0) {
- rc = -EINVAL;
- goto out_free_buffer;
- }
-
- /* check if we have any additional command parameters */
- if (le16_to_cpu(buffer->ioarcb.add_cmd_param_length)
- > PMCRAID_ADD_CMD_PARAM_LEN) {
- rc = -EINVAL;
- goto out_free_buffer;
- }
-
- cmd = pmcraid_get_free_cmd(pinstance);
-
- if (!cmd) {
- pmcraid_err("free command block is not available\n");
- rc = -ENOMEM;
- goto out_free_buffer;
- }
-
- cmd->scsi_cmd = NULL;
- ioarcb = &(cmd->ioa_cb->ioarcb);
-
- /* Copy the user-provided IOARCB stuff field by field */
- ioarcb->resource_handle = buffer->ioarcb.resource_handle;
- ioarcb->data_transfer_length = buffer->ioarcb.data_transfer_length;
- ioarcb->cmd_timeout = buffer->ioarcb.cmd_timeout;
- ioarcb->request_type = buffer->ioarcb.request_type;
- ioarcb->request_flags0 = buffer->ioarcb.request_flags0;
- ioarcb->request_flags1 = buffer->ioarcb.request_flags1;
- memcpy(ioarcb->cdb, buffer->ioarcb.cdb, PMCRAID_MAX_CDB_LEN);
-
- if (buffer->ioarcb.add_cmd_param_length) {
- ioarcb->add_cmd_param_length =
- buffer->ioarcb.add_cmd_param_length;
- ioarcb->add_cmd_param_offset =
- buffer->ioarcb.add_cmd_param_offset;
- memcpy(ioarcb->add_data.u.add_cmd_params,
- buffer->ioarcb.add_data.u.add_cmd_params,
- le16_to_cpu(buffer->ioarcb.add_cmd_param_length));
- }
-
- /* set hrrq number where the IOA should respond to. Note that all cmds
- * generated internally uses hrrq_id 0, exception to this is the cmd
- * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
- * hrrq_id assigned here in queuecommand
- */
- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
- pinstance->num_hrrq;
-
- if (request_size) {
- rc = pmcraid_build_passthrough_ioadls(cmd,
- request_size,
- direction);
- if (rc) {
- pmcraid_err("couldn't build passthrough ioadls\n");
- goto out_free_cmd;
- }
- }
-
- /* If data is being written into the device, copy the data from user
- * buffers
- */
- if (direction == DMA_TO_DEVICE && request_size > 0) {
- rc = pmcraid_copy_sglist(cmd->sglist,
- request_buffer,
- request_size,
- direction);
- if (rc) {
- pmcraid_err("failed to copy user buffer\n");
- goto out_free_sglist;
- }
- }
-
- /* passthrough ioctl is a blocking command so, put the user to sleep
- * until timeout. Note that a timeout value of 0 means, do timeout.
- */
- cmd->cmd_done = pmcraid_internal_done;
- init_completion(&cmd->wait_for_completion);
- cmd->completion_req = 1;
-
- pmcraid_info("command(%d) (CDB[0] = %x) for %x\n",
- le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2,
- cmd->ioa_cb->ioarcb.cdb[0],
- le32_to_cpu(cmd->ioa_cb->ioarcb.resource_handle));
-
- spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
- _pmcraid_fire_command(cmd);
- spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
-
- /* NOTE ! Remove the below line once abort_task is implemented
- * in firmware. This line disables ioctl command timeout handling logic
- * similar to IO command timeout handling, making ioctl commands to wait
- * until the command completion regardless of timeout value specified in
- * ioarcb
- */
- buffer->ioarcb.cmd_timeout = 0;
-
- /* If command timeout is specified put caller to wait till that time,
- * otherwise it would be blocking wait. If command gets timed out, it
- * will be aborted.
- */
- if (buffer->ioarcb.cmd_timeout == 0) {
- wait_for_completion(&cmd->wait_for_completion);
- } else if (!wait_for_completion_timeout(
- &cmd->wait_for_completion,
- msecs_to_jiffies(le16_to_cpu(buffer->ioarcb.cmd_timeout) * 1000))) {
-
- pmcraid_info("aborting cmd %d (CDB[0] = %x) due to timeout\n",
- le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2,
- cmd->ioa_cb->ioarcb.cdb[0]);
-
- spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
- cancel_cmd = pmcraid_abort_cmd(cmd);
- spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
-
- if (cancel_cmd) {
- wait_for_completion(&cancel_cmd->wait_for_completion);
- ioasc = le32_to_cpu(cancel_cmd->ioa_cb->ioasa.ioasc);
- pmcraid_return_cmd(cancel_cmd);
-
- /* if abort task couldn't find the command i.e it got
- * completed prior to aborting, return good completion.
- * if command got aborted successfully or there was IOA
- * reset due to abort task itself getting timedout then
- * return -ETIMEDOUT
- */
- if (ioasc == PMCRAID_IOASC_IOA_WAS_RESET ||
- PMCRAID_IOASC_SENSE_KEY(ioasc) == 0x00) {
- if (ioasc != PMCRAID_IOASC_GC_IOARCB_NOTFOUND)
- rc = -ETIMEDOUT;
- goto out_handle_response;
- }
- }
-
- /* no command block for abort task or abort task failed to abort
- * the IOARCB, then wait for 150 more seconds and initiate reset
- * sequence after timeout
- */
- if (!wait_for_completion_timeout(
- &cmd->wait_for_completion,
- msecs_to_jiffies(150 * 1000))) {
- pmcraid_reset_bringup(cmd->drv_inst);
- rc = -ETIMEDOUT;
- }
- }
-
-out_handle_response:
- /* copy entire IOASA buffer and return IOCTL success.
- * If copying IOASA to user-buffer fails, return
- * EFAULT
- */
- if (copy_to_user(ioasa, &cmd->ioa_cb->ioasa,
- sizeof(struct pmcraid_ioasa))) {
- pmcraid_err("failed to copy ioasa buffer to user\n");
- rc = -EFAULT;
- }
-
- /* If the data transfer was from device, copy the data onto user
- * buffers
- */
- else if (direction == DMA_FROM_DEVICE && request_size > 0) {
- rc = pmcraid_copy_sglist(cmd->sglist,
- request_buffer,
- request_size,
- direction);
- if (rc) {
- pmcraid_err("failed to copy user buffer\n");
- rc = -EFAULT;
- }
- }
-
-out_free_sglist:
- pmcraid_release_passthrough_ioadls(cmd, request_size, direction);
-
-out_free_cmd:
- pmcraid_return_cmd(cmd);
-
-out_free_buffer:
- kfree(buffer);
-
- return rc;
-}
-
-
-
-
/**
* pmcraid_ioctl_driver - ioctl handler for commands handled by driver itself
*
@@ -3922,20 +3445,6 @@ static long pmcraid_chr_ioctl(
switch (_IOC_TYPE(cmd)) {
- case PMCRAID_PASSTHROUGH_IOCTL:
- /* If ioctl code is to download microcode, we need to block
- * mid-layer requests.
- */
- if (cmd == PMCRAID_IOCTL_DOWNLOAD_MICROCODE)
- scsi_block_requests(pinstance->host);
-
- retval = pmcraid_ioctl_passthrough(pinstance, cmd,
- hdr->buffer_length, argp);
-
- if (cmd == PMCRAID_IOCTL_DOWNLOAD_MICROCODE)
- scsi_unblock_requests(pinstance->host);
- break;
-
case PMCRAID_DRIVER_IOCTL:
arg += sizeof(struct pmcraid_ioctl_header);
retval = pmcraid_ioctl_driver(pinstance, cmd,
diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
index bbb75318f1e7..9f59930e8b4f 100644
--- a/drivers/scsi/pmcraid.h
+++ b/drivers/scsi/pmcraid.h
@@ -1023,40 +1023,15 @@ struct pmcraid_ioctl_header {
#define PMCRAID_IOCTL_SIGNATURE "PMCRAID"
/*
- * pmcraid_passthrough_ioctl_buffer - structure given as argument to
- * passthrough(or firmware handled) IOCTL commands. Note that ioarcb requires
- * 32-byte alignment so, it is necessary to pack this structure to avoid any
- * holes between ioctl_header and passthrough buffer
- *
- * .ioactl_header : ioctl header
- * .ioarcb : filled-up ioarcb buffer, driver always reads this buffer
- * .ioasa : buffer for ioasa, driver fills this with IOASA from firmware
- * .request_buffer: The I/O buffer (flat), driver reads/writes to this based on
- * the transfer directions passed in ioarcb.flags0. Contents
- * of this buffer are valid only when ioarcb.data_transfer_len
- * is not zero.
- */
-struct pmcraid_passthrough_ioctl_buffer {
- struct pmcraid_ioctl_header ioctl_header;
- struct pmcraid_ioarcb ioarcb;
- struct pmcraid_ioasa ioasa;
- u8 request_buffer[];
-} __attribute__ ((packed, aligned(PMCRAID_IOARCB_ALIGNMENT)));
-
-/*
* keys to differentiate between driver handled IOCTLs and passthrough
* IOCTLs passed to IOA. driver determines the ioctl type using macro
* _IOC_TYPE
*/
#define PMCRAID_DRIVER_IOCTL 'D'
-#define PMCRAID_PASSTHROUGH_IOCTL 'F'
#define DRV_IOCTL(n, size) \
_IOC(_IOC_READ|_IOC_WRITE, PMCRAID_DRIVER_IOCTL, (n), (size))
-#define FMW_IOCTL(n, size) \
- _IOC(_IOC_READ|_IOC_WRITE, PMCRAID_PASSTHROUGH_IOCTL, (n), (size))
-
/*
* _ARGSIZE: macro that gives size of the argument type passed to an IOCTL cmd.
* This is to facilitate applications avoiding un-necessary memory allocations.
@@ -1069,12 +1044,4 @@ struct pmcraid_passthrough_ioctl_buffer {
#define PMCRAID_IOCTL_RESET_ADAPTER \
DRV_IOCTL(5, sizeof(struct pmcraid_ioctl_header))
-/* passthrough/firmware handled commands */
-#define PMCRAID_IOCTL_PASSTHROUGH_COMMAND \
- FMW_IOCTL(1, sizeof(struct pmcraid_passthrough_ioctl_buffer))
-
-#define PMCRAID_IOCTL_DOWNLOAD_MICROCODE \
- FMW_IOCTL(2, sizeof(struct pmcraid_passthrough_ioctl_buffer))
-
-
#endif /* _PMCRAID_H */
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index 8196f89f404e..31ec429104e2 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -860,6 +860,37 @@ static int qedi_task_xmit(struct iscsi_task *task)
return qedi_iscsi_send_ioreq(task);
}
+static void qedi_offload_work(struct work_struct *work)
+{
+ struct qedi_endpoint *qedi_ep =
+ container_of(work, struct qedi_endpoint, offload_work);
+ struct qedi_ctx *qedi;
+ int wait_delay = 5 * HZ;
+ int ret;
+
+ qedi = qedi_ep->qedi;
+
+ ret = qedi_iscsi_offload_conn(qedi_ep);
+ if (ret) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n",
+ qedi_ep->iscsi_cid, qedi_ep, ret);
+ qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
+ return;
+ }
+
+ ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait,
+ (qedi_ep->state ==
+ EP_STATE_OFLDCONN_COMPL),
+ wait_delay);
+ if (ret <= 0 || qedi_ep->state != EP_STATE_OFLDCONN_COMPL) {
+ qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n",
+ qedi_ep->iscsi_cid, qedi_ep);
+ }
+}
+
static struct iscsi_endpoint *
qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
int non_blocking)
@@ -908,6 +939,7 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
}
qedi_ep = ep->dd_data;
memset(qedi_ep, 0, sizeof(struct qedi_endpoint));
+ INIT_WORK(&qedi_ep->offload_work, qedi_offload_work);
qedi_ep->state = EP_STATE_IDLE;
qedi_ep->iscsi_cid = (u32)-1;
qedi_ep->qedi = qedi;
@@ -1056,12 +1088,11 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
qedi_ep = ep->dd_data;
qedi = qedi_ep->qedi;
+ flush_work(&qedi_ep->offload_work);
+
if (qedi_ep->state == EP_STATE_OFLDCONN_START)
goto ep_exit_recover;
- if (qedi_ep->state != EP_STATE_OFLDCONN_NONE)
- flush_work(&qedi_ep->offload_work);
-
if (qedi_ep->conn) {
qedi_conn = qedi_ep->conn;
abrt_conn = qedi_conn->abrt_conn;
@@ -1235,37 +1266,6 @@ static int qedi_data_avail(struct qedi_ctx *qedi, u16 vlanid)
return rc;
}
-static void qedi_offload_work(struct work_struct *work)
-{
- struct qedi_endpoint *qedi_ep =
- container_of(work, struct qedi_endpoint, offload_work);
- struct qedi_ctx *qedi;
- int wait_delay = 5 * HZ;
- int ret;
-
- qedi = qedi_ep->qedi;
-
- ret = qedi_iscsi_offload_conn(qedi_ep);
- if (ret) {
- QEDI_ERR(&qedi->dbg_ctx,
- "offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n",
- qedi_ep->iscsi_cid, qedi_ep, ret);
- qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
- return;
- }
-
- ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait,
- (qedi_ep->state ==
- EP_STATE_OFLDCONN_COMPL),
- wait_delay);
- if ((ret <= 0) || (qedi_ep->state != EP_STATE_OFLDCONN_COMPL)) {
- qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
- QEDI_ERR(&qedi->dbg_ctx,
- "Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n",
- qedi_ep->iscsi_cid, qedi_ep);
- }
-}
-
static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
{
struct qedi_ctx *qedi;
@@ -1381,7 +1381,6 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
qedi_ep->dst_addr, qedi_ep->dst_port);
}
- INIT_WORK(&qedi_ep->offload_work, qedi_offload_work);
queue_work(qedi->offload_thread, &qedi_ep->offload_work);
ret = 0;
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index c607755cce00..592a290e6cfa 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -32,7 +32,6 @@
#include <linux/blkdev.h>
#include <linux/crc-t10dif.h>
#include <linux/spinlock.h>
-#include <linux/mutex.h>
#include <linux/interrupt.h>
#include <linux/atomic.h>
#include <linux/hrtimer.h>
@@ -732,9 +731,7 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
};
-static atomic_t sdebug_num_hosts;
-static DEFINE_MUTEX(add_host_mutex);
-
+static int sdebug_num_hosts;
static int sdebug_add_host = DEF_NUM_HOST; /* in sysfs this is relative */
static int sdebug_ato = DEF_ATO;
static int sdebug_cdb_len = DEF_CDB_LEN;
@@ -781,7 +778,6 @@ static int sdebug_uuid_ctl = DEF_UUID_CTL;
static bool sdebug_random = DEF_RANDOM;
static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
static bool sdebug_removable = DEF_REMOVABLE;
-static bool sdebug_deflect_incoming;
static bool sdebug_clustering;
static bool sdebug_host_lock = DEF_HOST_LOCK;
static bool sdebug_strict = DEF_STRICT;
@@ -5122,10 +5118,6 @@ static int scsi_debug_slave_configure(struct scsi_device *sdp)
sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
- if (smp_load_acquire(&sdebug_deflect_incoming)) {
- pr_info("Exit early due to deflect_incoming\n");
- return 1;
- }
if (devip == NULL) {
devip = find_build_dev_info(sdp);
if (devip == NULL)
@@ -5211,7 +5203,7 @@ static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
}
/* Deletes (stops) timers or work queues of all queued commands */
-static void stop_all_queued(bool done_with_no_conn)
+static void stop_all_queued(void)
{
unsigned long iflags;
int j, k;
@@ -5220,15 +5212,13 @@ static void stop_all_queued(bool done_with_no_conn)
struct sdebug_queued_cmd *sqcp;
struct sdebug_dev_info *devip;
struct sdebug_defer *sd_dp;
- struct scsi_cmnd *scp;
for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
spin_lock_irqsave(&sqp->qc_lock, iflags);
for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
if (test_bit(k, sqp->in_use_bm)) {
sqcp = &sqp->qc_arr[k];
- scp = sqcp->a_cmnd;
- if (!scp)
+ if (sqcp->a_cmnd == NULL)
continue;
devip = (struct sdebug_dev_info *)
sqcp->a_cmnd->device->hostdata;
@@ -5243,10 +5233,6 @@ static void stop_all_queued(bool done_with_no_conn)
l_defer_t = SDEB_DEFER_NONE;
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
stop_qc_helper(sd_dp, l_defer_t);
- if (done_with_no_conn && l_defer_t != SDEB_DEFER_NONE) {
- scp->result = DID_NO_CONNECT << 16;
- scsi_done(scp);
- }
clear_bit(k, sqp->in_use_bm);
spin_lock_irqsave(&sqp->qc_lock, iflags);
}
@@ -5389,7 +5375,7 @@ static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
}
}
spin_unlock(&sdebug_host_list_lock);
- stop_all_queued(false);
+ stop_all_queued();
if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
sdev_printk(KERN_INFO, SCpnt->device,
"%s: %d device(s) found\n", __func__, k);
@@ -5449,50 +5435,13 @@ static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
}
}
-static void sdeb_block_all_queues(void)
-{
- int j;
- struct sdebug_queue *sqp;
-
- for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
- atomic_set(&sqp->blocked, (int)true);
-}
-
-static void sdeb_unblock_all_queues(void)
+static void block_unblock_all_queues(bool block)
{
int j;
struct sdebug_queue *sqp;
for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
- atomic_set(&sqp->blocked, (int)false);
-}
-
-static void
-sdeb_add_n_hosts(int num_hosts)
-{
- if (num_hosts < 1)
- return;
- do {
- bool found;
- unsigned long idx;
- struct sdeb_store_info *sip;
- bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
-
- found = false;
- if (want_phs) {
- xa_for_each_marked(per_store_ap, idx, sip, SDEB_XA_NOT_IN_USE) {
- sdeb_most_recent_idx = (int)idx;
- found = true;
- break;
- }
- if (found) /* re-use case */
- sdebug_add_host_helper((int)idx);
- else
- sdebug_do_add_host(true /* make new store */);
- } else {
- sdebug_do_add_host(false);
- }
- } while (--num_hosts);
+ atomic_set(&sqp->blocked, (int)block);
}
/* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
@@ -5505,10 +5454,10 @@ static void tweak_cmnd_count(void)
modulo = abs(sdebug_every_nth);
if (modulo < 2)
return;
- sdeb_block_all_queues();
+ block_unblock_all_queues(true);
count = atomic_read(&sdebug_cmnd_count);
atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
- sdeb_unblock_all_queues();
+ block_unblock_all_queues(false);
}
static void clear_queue_stats(void)
@@ -5526,15 +5475,6 @@ static bool inject_on_this_cmd(void)
return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
}
-static int process_deflect_incoming(struct scsi_cmnd *scp)
-{
- u8 opcode = scp->cmnd[0];
-
- if (opcode == SYNCHRONIZE_CACHE || opcode == SYNCHRONIZE_CACHE_16)
- return 0;
- return DID_NO_CONNECT << 16;
-}
-
#define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */
/* Complete the processing of the thread that queued a SCSI command to this
@@ -5544,7 +5484,8 @@ static int process_deflect_incoming(struct scsi_cmnd *scp)
*/
static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
int scsi_result,
- int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *),
+ int (*pfp)(struct scsi_cmnd *,
+ struct sdebug_dev_info *),
int delta_jiff, int ndelay)
{
bool new_sd_dp;
@@ -5565,27 +5506,13 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
}
sdp = cmnd->device;
- if (delta_jiff == 0) {
- sqp = get_queue(cmnd);
- if (atomic_read(&sqp->blocked)) {
- if (smp_load_acquire(&sdebug_deflect_incoming))
- return process_deflect_incoming(cmnd);
- else
- return SCSI_MLQUEUE_HOST_BUSY;
- }
+ if (delta_jiff == 0)
goto respond_in_thread;
- }
sqp = get_queue(cmnd);
spin_lock_irqsave(&sqp->qc_lock, iflags);
if (unlikely(atomic_read(&sqp->blocked))) {
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
- if (smp_load_acquire(&sdebug_deflect_incoming)) {
- scsi_result = process_deflect_incoming(cmnd);
- goto respond_in_thread;
- }
- if (sdebug_verbose)
- pr_info("blocked --> SCSI_MLQUEUE_HOST_BUSY\n");
return SCSI_MLQUEUE_HOST_BUSY;
}
num_in_q = atomic_read(&devip->num_in_q);
@@ -5774,12 +5701,8 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
respond_in_thread: /* call back to mid-layer using invocation thread */
cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
cmnd->result &= ~SDEG_RES_IMMED_MASK;
- if (cmnd->result == 0 && scsi_result != 0) {
+ if (cmnd->result == 0 && scsi_result != 0)
cmnd->result = scsi_result;
- if (sdebug_verbose)
- pr_info("respond_in_thread: tag=0x%x, scp->result=0x%x\n",
- blk_mq_unique_tag(scsi_cmd_to_rq(cmnd)), scsi_result);
- }
scsi_done(cmnd);
return 0;
}
@@ -6064,7 +5987,7 @@ static ssize_t delay_store(struct device_driver *ddp, const char *buf,
int j, k;
struct sdebug_queue *sqp;
- sdeb_block_all_queues();
+ block_unblock_all_queues(true);
for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
++j, ++sqp) {
k = find_first_bit(sqp->in_use_bm,
@@ -6078,7 +6001,7 @@ static ssize_t delay_store(struct device_driver *ddp, const char *buf,
sdebug_jdelay = jdelay;
sdebug_ndelay = 0;
}
- sdeb_unblock_all_queues();
+ block_unblock_all_queues(false);
}
return res;
}
@@ -6104,7 +6027,7 @@ static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
int j, k;
struct sdebug_queue *sqp;
- sdeb_block_all_queues();
+ block_unblock_all_queues(true);
for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
++j, ++sqp) {
k = find_first_bit(sqp->in_use_bm,
@@ -6119,7 +6042,7 @@ static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN
: DEF_JDELAY;
}
- sdeb_unblock_all_queues();
+ block_unblock_all_queues(false);
}
return res;
}
@@ -6433,7 +6356,7 @@ static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
(n <= SDEBUG_CANQUEUE) &&
(sdebug_host_max_queue == 0)) {
- sdeb_block_all_queues();
+ block_unblock_all_queues(true);
k = 0;
for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
++j, ++sqp) {
@@ -6448,7 +6371,7 @@ static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
atomic_set(&retired_max_queue, k + 1);
else
atomic_set(&retired_max_queue, 0);
- sdeb_unblock_all_queues();
+ block_unblock_all_queues(false);
return count;
}
return -EINVAL;
@@ -6537,48 +6460,43 @@ static DRIVER_ATTR_RW(virtual_gb);
static ssize_t add_host_show(struct device_driver *ddp, char *buf)
{
/* absolute number of hosts currently active is what is shown */
- return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&sdebug_num_hosts));
+ return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
}
-/*
- * Accept positive and negative values. Hex values (only positive) may be prefixed by '0x'.
- * To remove all hosts use a large negative number (e.g. -9999). The value 0 does nothing.
- * Returns -EBUSY if another add_host sysfs invocation is active.
- */
static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
size_t count)
{
+ bool found;
+ unsigned long idx;
+ struct sdeb_store_info *sip;
+ bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
int delta_hosts;
- if (count == 0 || kstrtoint(buf, 0, &delta_hosts))
+ if (sscanf(buf, "%d", &delta_hosts) != 1)
return -EINVAL;
- if (sdebug_verbose)
- pr_info("prior num_hosts=%d, num_to_add=%d\n",
- atomic_read(&sdebug_num_hosts), delta_hosts);
- if (delta_hosts == 0)
- return count;
- if (mutex_trylock(&add_host_mutex) == 0)
- return -EBUSY;
if (delta_hosts > 0) {
- sdeb_add_n_hosts(delta_hosts);
- } else if (delta_hosts < 0) {
- smp_store_release(&sdebug_deflect_incoming, true);
- sdeb_block_all_queues();
- if (delta_hosts >= atomic_read(&sdebug_num_hosts))
- stop_all_queued(true);
do {
- if (atomic_read(&sdebug_num_hosts) < 1) {
- free_all_queued();
- break;
+ found = false;
+ if (want_phs) {
+ xa_for_each_marked(per_store_ap, idx, sip,
+ SDEB_XA_NOT_IN_USE) {
+ sdeb_most_recent_idx = (int)idx;
+ found = true;
+ break;
+ }
+ if (found) /* re-use case */
+ sdebug_add_host_helper((int)idx);
+ else
+ sdebug_do_add_host(true);
+ } else {
+ sdebug_do_add_host(false);
}
+ } while (--delta_hosts);
+ } else if (delta_hosts < 0) {
+ do {
sdebug_do_remove_host(false);
} while (++delta_hosts);
- sdeb_unblock_all_queues();
- smp_store_release(&sdebug_deflect_incoming, false);
}
- mutex_unlock(&add_host_mutex);
- if (sdebug_verbose)
- pr_info("post num_hosts=%d\n", atomic_read(&sdebug_num_hosts));
return count;
}
static DRIVER_ATTR_RW(add_host);
@@ -7089,10 +7007,6 @@ static int __init scsi_debug_init(void)
sdebug_add_host = 0;
for (k = 0; k < hosts_to_add; k++) {
- if (smp_load_acquire(&sdebug_deflect_incoming)) {
- pr_info("exit early as sdebug_deflect_incoming is set\n");
- return 0;
- }
if (want_store && k == 0) {
ret = sdebug_add_host_helper(idx);
if (ret < 0) {
@@ -7110,12 +7024,8 @@ static int __init scsi_debug_init(void)
}
}
if (sdebug_verbose)
- pr_info("built %d host(s)\n", atomic_read(&sdebug_num_hosts));
+ pr_info("built %d host(s)\n", sdebug_num_hosts);
- /*
- * Even though all the hosts have been established, due to async device (LU) scanning
- * by the scsi mid-level, there may still be devices (LUs) being set up.
- */
return 0;
bus_unreg:
@@ -7131,17 +7041,12 @@ free_q_arr:
static void __exit scsi_debug_exit(void)
{
- int k;
+ int k = sdebug_num_hosts;
- /* Possible race with LUs still being set up; stop them asap */
- sdeb_block_all_queues();
- smp_store_release(&sdebug_deflect_incoming, true);
- stop_all_queued(false);
- for (k = 0; atomic_read(&sdebug_num_hosts) > 0; k++)
+ stop_all_queued();
+ for (; k; k--)
sdebug_do_remove_host(true);
free_all_queued();
- if (sdebug_verbose)
- pr_info("removed %d hosts\n", k);
driver_unregister(&sdebug_driverfs_driver);
bus_unregister(&pseudo_lld_bus);
root_device_unregister(pseudo_primary);
@@ -7311,13 +7216,13 @@ static int sdebug_add_host_helper(int per_host_idx)
sdbg_host->dev.bus = &pseudo_lld_bus;
sdbg_host->dev.parent = pseudo_primary;
sdbg_host->dev.release = &sdebug_release_adapter;
- dev_set_name(&sdbg_host->dev, "adapter%d", atomic_read(&sdebug_num_hosts));
+ dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
error = device_register(&sdbg_host->dev);
if (error)
goto clean;
- atomic_inc(&sdebug_num_hosts);
+ ++sdebug_num_hosts;
return 0;
clean:
@@ -7381,7 +7286,7 @@ static void sdebug_do_remove_host(bool the_end)
return;
device_unregister(&sdbg_host->dev);
- atomic_dec(&sdebug_num_hosts);
+ --sdebug_num_hosts;
}
static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
@@ -7389,10 +7294,10 @@ static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
int num_in_q = 0;
struct sdebug_dev_info *devip;
- sdeb_block_all_queues();
+ block_unblock_all_queues(true);
devip = (struct sdebug_dev_info *)sdev->hostdata;
if (NULL == devip) {
- sdeb_unblock_all_queues();
+ block_unblock_all_queues(false);
return -ENODEV;
}
num_in_q = atomic_read(&devip->num_in_q);
@@ -7411,7 +7316,7 @@ static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
__func__, qdepth, num_in_q);
}
- sdeb_unblock_all_queues();
+ block_unblock_all_queues(false);
return sdev->queue_depth;
}
@@ -7519,12 +7424,13 @@ static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
struct sdebug_defer *sd_dp;
sqp = sdebug_q_arr + queue_num;
- qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
- if (qc_idx >= sdebug_max_queue)
- return 0;
spin_lock_irqsave(&sqp->qc_lock, iflags);
+ qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
+ if (qc_idx >= sdebug_max_queue)
+ goto unlock;
+
for (first = true; first || qc_idx + 1 < sdebug_max_queue; ) {
if (first) {
first = false;
@@ -7589,6 +7495,7 @@ static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
break;
}
+unlock:
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
if (num_entries > 0)
diff --git a/drivers/scsi/scsi_logging.c b/drivers/scsi/scsi_logging.c
index ff89de86545d..b02af340c2d3 100644
--- a/drivers/scsi/scsi_logging.c
+++ b/drivers/scsi/scsi_logging.c
@@ -30,7 +30,7 @@ static inline const char *scmd_name(const struct scsi_cmnd *scmd)
{
struct request *rq = scsi_cmd_to_rq((struct scsi_cmnd *)scmd);
- if (!rq->q->disk)
+ if (!rq->q || !rq->q->disk)
return NULL;
return rq->q->disk->disk_name;
}
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index f4e6c68ac99e..2ef78083f1ef 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -223,6 +223,8 @@ static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
int ret;
struct sbitmap sb_backup;
+ depth = min_t(unsigned int, depth, scsi_device_max_queue_depth(sdev));
+
/*
* realloc if new shift is calculated, which is caused by setting
* up one new default queue depth after calling ->slave_configure
@@ -245,6 +247,9 @@ static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
scsi_device_max_queue_depth(sdev),
new_shift, GFP_KERNEL,
sdev->request_queue->node, false, true);
+ if (!ret)
+ sbitmap_resize(&sdev->budget_map, depth);
+
if (need_free) {
if (ret)
sdev->budget_map = sb_backup;
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 226a50944c00..dc6872e352bd 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1384,10 +1384,6 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
if (IS_ENABLED(CONFIG_BLK_DEV_BSG)) {
sdev->bsg_dev = scsi_bsg_register_queue(sdev);
if (IS_ERR(sdev->bsg_dev)) {
- /*
- * We're treating error on bsg register as non-fatal, so
- * pretend nothing went wrong.
- */
error = PTR_ERR(sdev->bsg_dev);
sdev_printk(KERN_INFO, sdev,
"Failed to register bsg queue, errno=%d\n",
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 27951ea05dd4..2c0dd64159b0 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -86,6 +86,9 @@ struct iscsi_internal {
struct transport_container session_cont;
};
+static DEFINE_IDR(iscsi_ep_idr);
+static DEFINE_MUTEX(iscsi_ep_idr_mutex);
+
static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
static struct workqueue_struct *iscsi_conn_cleanup_workq;
@@ -168,6 +171,11 @@ struct device_attribute dev_attr_##_prefix##_##_name = \
static void iscsi_endpoint_release(struct device *dev)
{
struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+
+ mutex_lock(&iscsi_ep_idr_mutex);
+ idr_remove(&iscsi_ep_idr, ep->id);
+ mutex_unlock(&iscsi_ep_idr_mutex);
+
kfree(ep);
}
@@ -180,7 +188,7 @@ static ssize_t
show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf)
{
struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
- return sysfs_emit(buf, "%llu\n", (unsigned long long) ep->id);
+ return sysfs_emit(buf, "%d\n", ep->id);
}
static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL);
@@ -193,48 +201,32 @@ static struct attribute_group iscsi_endpoint_group = {
.attrs = iscsi_endpoint_attrs,
};
-#define ISCSI_MAX_EPID -1
-
-static int iscsi_match_epid(struct device *dev, const void *data)
-{
- struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
- const uint64_t *epid = data;
-
- return *epid == ep->id;
-}
-
struct iscsi_endpoint *
iscsi_create_endpoint(int dd_size)
{
- struct device *dev;
struct iscsi_endpoint *ep;
- uint64_t id;
- int err;
-
- for (id = 1; id < ISCSI_MAX_EPID; id++) {
- dev = class_find_device(&iscsi_endpoint_class, NULL, &id,
- iscsi_match_epid);
- if (!dev)
- break;
- else
- put_device(dev);
- }
- if (id == ISCSI_MAX_EPID) {
- printk(KERN_ERR "Too many connections. Max supported %u\n",
- ISCSI_MAX_EPID - 1);
- return NULL;
- }
+ int err, id;
ep = kzalloc(sizeof(*ep) + dd_size, GFP_KERNEL);
if (!ep)
return NULL;
+ mutex_lock(&iscsi_ep_idr_mutex);
+ id = idr_alloc(&iscsi_ep_idr, ep, 0, -1, GFP_NOIO);
+ if (id < 0) {
+ mutex_unlock(&iscsi_ep_idr_mutex);
+ printk(KERN_ERR "Could not allocate endpoint ID. Error %d.\n",
+ id);
+ goto free_ep;
+ }
+ mutex_unlock(&iscsi_ep_idr_mutex);
+
ep->id = id;
ep->dev.class = &iscsi_endpoint_class;
- dev_set_name(&ep->dev, "ep-%llu", (unsigned long long) id);
+ dev_set_name(&ep->dev, "ep-%d", id);
err = device_register(&ep->dev);
if (err)
- goto free_ep;
+ goto free_id;
err = sysfs_create_group(&ep->dev.kobj, &iscsi_endpoint_group);
if (err)
@@ -248,6 +240,10 @@ unregister_dev:
device_unregister(&ep->dev);
return NULL;
+free_id:
+ mutex_lock(&iscsi_ep_idr_mutex);
+ idr_remove(&iscsi_ep_idr, id);
+ mutex_unlock(&iscsi_ep_idr_mutex);
free_ep:
kfree(ep);
return NULL;
@@ -275,14 +271,17 @@ EXPORT_SYMBOL_GPL(iscsi_put_endpoint);
*/
struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle)
{
- struct device *dev;
+ struct iscsi_endpoint *ep;
- dev = class_find_device(&iscsi_endpoint_class, NULL, &handle,
- iscsi_match_epid);
- if (!dev)
- return NULL;
+ mutex_lock(&iscsi_ep_idr_mutex);
+ ep = idr_find(&iscsi_ep_idr, handle);
+ if (!ep)
+ goto unlock;
- return iscsi_dev_to_endpoint(dev);
+ get_device(&ep->dev);
+unlock:
+ mutex_unlock(&iscsi_ep_idr_mutex);
+ return ep;
}
EXPORT_SYMBOL_GPL(iscsi_lookup_endpoint);
@@ -2202,10 +2201,10 @@ static void iscsi_stop_conn(struct iscsi_cls_conn *conn, int flag)
switch (flag) {
case STOP_CONN_RECOVER:
- conn->state = ISCSI_CONN_FAILED;
+ WRITE_ONCE(conn->state, ISCSI_CONN_FAILED);
break;
case STOP_CONN_TERM:
- conn->state = ISCSI_CONN_DOWN;
+ WRITE_ONCE(conn->state, ISCSI_CONN_DOWN);
break;
default:
iscsi_cls_conn_printk(KERN_ERR, conn, "invalid stop flag %d\n",
@@ -2217,6 +2216,49 @@ static void iscsi_stop_conn(struct iscsi_cls_conn *conn, int flag)
ISCSI_DBG_TRANS_CONN(conn, "Stopping conn done.\n");
}
+static void iscsi_ep_disconnect(struct iscsi_cls_conn *conn, bool is_active)
+{
+ struct iscsi_cls_session *session = iscsi_conn_to_session(conn);
+ struct iscsi_endpoint *ep;
+
+ ISCSI_DBG_TRANS_CONN(conn, "disconnect ep.\n");
+ WRITE_ONCE(conn->state, ISCSI_CONN_FAILED);
+
+ if (!conn->ep || !session->transport->ep_disconnect)
+ return;
+
+ ep = conn->ep;
+ conn->ep = NULL;
+
+ session->transport->unbind_conn(conn, is_active);
+ session->transport->ep_disconnect(ep);
+ ISCSI_DBG_TRANS_CONN(conn, "disconnect ep done.\n");
+}
+
+static void iscsi_if_disconnect_bound_ep(struct iscsi_cls_conn *conn,
+ struct iscsi_endpoint *ep,
+ bool is_active)
+{
+ /* Check if this was a conn error and the kernel took ownership */
+ spin_lock_irq(&conn->lock);
+ if (!test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
+ spin_unlock_irq(&conn->lock);
+ iscsi_ep_disconnect(conn, is_active);
+ } else {
+ spin_unlock_irq(&conn->lock);
+ ISCSI_DBG_TRANS_CONN(conn, "flush kernel conn cleanup.\n");
+ mutex_unlock(&conn->ep_mutex);
+
+ flush_work(&conn->cleanup_work);
+ /*
+ * Userspace is now done with the EP so we can release the ref
+ * iscsi_cleanup_conn_work_fn took.
+ */
+ iscsi_put_endpoint(ep);
+ mutex_lock(&conn->ep_mutex);
+ }
+}
+
static int iscsi_if_stop_conn(struct iscsi_transport *transport,
struct iscsi_uevent *ev)
{
@@ -2238,11 +2280,24 @@ static int iscsi_if_stop_conn(struct iscsi_transport *transport,
iscsi_stop_conn(conn, flag);
} else {
/*
+ * For offload, when iscsid is restarted it won't know about
+ * existing endpoints so it can't do a ep_disconnect. We clean
+ * it up here for userspace.
+ */
+ mutex_lock(&conn->ep_mutex);
+ if (conn->ep)
+ iscsi_if_disconnect_bound_ep(conn, conn->ep, true);
+ mutex_unlock(&conn->ep_mutex);
+
+ /*
* Figure out if it was the kernel or userspace initiating this.
*/
+ spin_lock_irq(&conn->lock);
if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
+ spin_unlock_irq(&conn->lock);
iscsi_stop_conn(conn, flag);
} else {
+ spin_unlock_irq(&conn->lock);
ISCSI_DBG_TRANS_CONN(conn,
"flush kernel conn cleanup.\n");
flush_work(&conn->cleanup_work);
@@ -2251,31 +2306,14 @@ static int iscsi_if_stop_conn(struct iscsi_transport *transport,
* Only clear for recovery to avoid extra cleanup runs during
* termination.
*/
+ spin_lock_irq(&conn->lock);
clear_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags);
+ spin_unlock_irq(&conn->lock);
}
ISCSI_DBG_TRANS_CONN(conn, "iscsi if conn stop done.\n");
return 0;
}
-static void iscsi_ep_disconnect(struct iscsi_cls_conn *conn, bool is_active)
-{
- struct iscsi_cls_session *session = iscsi_conn_to_session(conn);
- struct iscsi_endpoint *ep;
-
- ISCSI_DBG_TRANS_CONN(conn, "disconnect ep.\n");
- conn->state = ISCSI_CONN_FAILED;
-
- if (!conn->ep || !session->transport->ep_disconnect)
- return;
-
- ep = conn->ep;
- conn->ep = NULL;
-
- session->transport->unbind_conn(conn, is_active);
- session->transport->ep_disconnect(ep);
- ISCSI_DBG_TRANS_CONN(conn, "disconnect ep done.\n");
-}
-
static void iscsi_cleanup_conn_work_fn(struct work_struct *work)
{
struct iscsi_cls_conn *conn = container_of(work, struct iscsi_cls_conn,
@@ -2284,18 +2322,11 @@ static void iscsi_cleanup_conn_work_fn(struct work_struct *work)
mutex_lock(&conn->ep_mutex);
/*
- * If we are not at least bound there is nothing for us to do. Userspace
- * will do a ep_disconnect call if offload is used, but will not be
- * doing a stop since there is nothing to clean up, so we have to clear
- * the cleanup bit here.
+ * Get a ref to the ep, so we don't release its ID until after
+ * userspace is done referencing it in iscsi_if_disconnect_bound_ep.
*/
- if (conn->state != ISCSI_CONN_BOUND && conn->state != ISCSI_CONN_UP) {
- ISCSI_DBG_TRANS_CONN(conn, "Got error while conn is already failed. Ignoring.\n");
- clear_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags);
- mutex_unlock(&conn->ep_mutex);
- return;
- }
-
+ if (conn->ep)
+ get_device(&conn->ep->dev);
iscsi_ep_disconnect(conn, false);
if (system_state != SYSTEM_RUNNING) {
@@ -2340,11 +2371,12 @@ iscsi_alloc_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
conn->dd_data = &conn[1];
mutex_init(&conn->ep_mutex);
+ spin_lock_init(&conn->lock);
INIT_LIST_HEAD(&conn->conn_list);
INIT_WORK(&conn->cleanup_work, iscsi_cleanup_conn_work_fn);
conn->transport = transport;
conn->cid = cid;
- conn->state = ISCSI_CONN_DOWN;
+ WRITE_ONCE(conn->state, ISCSI_CONN_DOWN);
/* this is released in the dev's release function */
if (!get_device(&session->dev))
@@ -2542,9 +2574,32 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
struct iscsi_uevent *ev;
struct iscsi_internal *priv;
int len = nlmsg_total_size(sizeof(*ev));
+ unsigned long flags;
+ int state;
- if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags))
- queue_work(iscsi_conn_cleanup_workq, &conn->cleanup_work);
+ spin_lock_irqsave(&conn->lock, flags);
+ /*
+ * Userspace will only do a stop call if we are at least bound. And, we
+ * only need to do the in kernel cleanup if in the UP state so cmds can
+ * be released to upper layers. If in other states just wait for
+ * userspace to avoid races that can leave the cleanup_work queued.
+ */
+ state = READ_ONCE(conn->state);
+ switch (state) {
+ case ISCSI_CONN_BOUND:
+ case ISCSI_CONN_UP:
+ if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP,
+ &conn->flags)) {
+ queue_work(iscsi_conn_cleanup_workq,
+ &conn->cleanup_work);
+ }
+ break;
+ default:
+ ISCSI_DBG_TRANS_CONN(conn, "Got conn error in state %d\n",
+ state);
+ break;
+ }
+ spin_unlock_irqrestore(&conn->lock, flags);
priv = iscsi_if_transport_lookup(conn->transport);
if (!priv)
@@ -2894,7 +2949,7 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
char *data = (char*)ev + sizeof(*ev);
struct iscsi_cls_conn *conn;
struct iscsi_cls_session *session;
- int err = 0, value = 0;
+ int err = 0, value = 0, state;
if (ev->u.set_param.len > PAGE_SIZE)
return -EINVAL;
@@ -2911,8 +2966,8 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
session->recovery_tmo = value;
break;
default:
- if ((conn->state == ISCSI_CONN_BOUND) ||
- (conn->state == ISCSI_CONN_UP)) {
+ state = READ_ONCE(conn->state);
+ if (state == ISCSI_CONN_BOUND || state == ISCSI_CONN_UP) {
err = transport->set_param(conn, ev->u.set_param.param,
data, ev->u.set_param.len);
} else {
@@ -2984,16 +3039,7 @@ static int iscsi_if_ep_disconnect(struct iscsi_transport *transport,
}
mutex_lock(&conn->ep_mutex);
- /* Check if this was a conn error and the kernel took ownership */
- if (test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
- ISCSI_DBG_TRANS_CONN(conn, "flush kernel conn cleanup.\n");
- mutex_unlock(&conn->ep_mutex);
-
- flush_work(&conn->cleanup_work);
- goto put_ep;
- }
-
- iscsi_ep_disconnect(conn, false);
+ iscsi_if_disconnect_bound_ep(conn, ep, false);
mutex_unlock(&conn->ep_mutex);
put_ep:
iscsi_put_endpoint(ep);
@@ -3696,24 +3742,17 @@ static int iscsi_if_transport_conn(struct iscsi_transport *transport,
return -EINVAL;
mutex_lock(&conn->ep_mutex);
+ spin_lock_irq(&conn->lock);
if (test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
+ spin_unlock_irq(&conn->lock);
mutex_unlock(&conn->ep_mutex);
ev->r.retcode = -ENOTCONN;
return 0;
}
+ spin_unlock_irq(&conn->lock);
switch (nlh->nlmsg_type) {
case ISCSI_UEVENT_BIND_CONN:
- if (conn->ep) {
- /*
- * For offload boot support where iscsid is restarted
- * during the pivot root stage, the ep will be intact
- * here when the new iscsid instance starts up and
- * reconnects.
- */
- iscsi_ep_disconnect(conn, true);
- }
-
session = iscsi_session_lookup(ev->u.b_conn.sid);
if (!session) {
err = -EINVAL;
@@ -3724,7 +3763,7 @@ static int iscsi_if_transport_conn(struct iscsi_transport *transport,
ev->u.b_conn.transport_eph,
ev->u.b_conn.is_leading);
if (!ev->r.retcode)
- conn->state = ISCSI_CONN_BOUND;
+ WRITE_ONCE(conn->state, ISCSI_CONN_BOUND);
if (ev->r.retcode || !transport->ep_connect)
break;
@@ -3743,7 +3782,8 @@ static int iscsi_if_transport_conn(struct iscsi_transport *transport,
case ISCSI_UEVENT_START_CONN:
ev->r.retcode = transport->start_conn(conn);
if (!ev->r.retcode)
- conn->state = ISCSI_CONN_UP;
+ WRITE_ONCE(conn->state, ISCSI_CONN_UP);
+
break;
case ISCSI_UEVENT_SEND_PDU:
pdu_len = nlh->nlmsg_len - sizeof(*nlh) - sizeof(*ev);
@@ -4050,10 +4090,11 @@ static ssize_t show_conn_state(struct device *dev,
{
struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev->parent);
const char *state = "unknown";
+ int conn_state = READ_ONCE(conn->state);
- if (conn->state >= 0 &&
- conn->state < ARRAY_SIZE(connection_state_names))
- state = connection_state_names[conn->state];
+ if (conn_state >= 0 &&
+ conn_state < ARRAY_SIZE(connection_state_names))
+ state = connection_state_names[conn_state];
return sysfs_emit(buf, "%s\n", state);
}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index a390679cf458..dc6e55761fd1 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -3216,6 +3216,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
sd_read_block_limits(sdkp);
sd_read_block_characteristics(sdkp);
sd_zbc_read_zones(sdkp, buffer);
+ sd_read_cpr(sdkp);
}
sd_print_capacity(sdkp, old_capacity);
@@ -3225,7 +3226,6 @@ static int sd_revalidate_disk(struct gendisk *disk)
sd_read_app_tag_own(sdkp, buffer);
sd_read_write_same(sdkp, buffer);
sd_read_security(sdkp, buffer);
- sd_read_cpr(sdkp);
}
/*
@@ -3475,6 +3475,7 @@ static int sd_probe(struct device *dev)
error = device_add_disk(dev, gd, NULL);
if (error) {
put_device(&sdkp->disk_dev);
+ blk_cleanup_disk(gd);
goto out;
}
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 5ba9df334968..cbd92891a762 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -535,7 +535,7 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
scsi_autopm_get_device(sdev);
- if (ret != CDROMCLOSETRAY && ret != CDROMEJECT) {
+ if (cmd != CDROMCLOSETRAY && cmd != CDROMEJECT) {
ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg);
if (ret != -ENOSYS)
goto put;
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 0d2e950d0865..586c0e567ff9 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -957,18 +957,6 @@ static const struct reset_control_ops ufs_qcom_reset_ops = {
.deassert = ufs_qcom_reset_deassert,
};
-#define ANDROID_BOOT_DEV_MAX 30
-static char android_boot_dev[ANDROID_BOOT_DEV_MAX];
-
-#ifndef MODULE
-static int __init get_android_boot_dev(char *str)
-{
- strlcpy(android_boot_dev, str, ANDROID_BOOT_DEV_MAX);
- return 1;
-}
-__setup("androidboot.bootdevice=", get_android_boot_dev);
-#endif
-
/**
* ufs_qcom_init - bind phy with controller
* @hba: host controller instance
@@ -988,9 +976,6 @@ static int ufs_qcom_init(struct ufs_hba *hba)
struct resource *res;
struct ufs_clk_info *clki;
- if (strlen(android_boot_dev) && strcmp(android_boot_dev, dev_name(dev)))
- return -ENODEV;
-
host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
if (!host) {
err = -ENOMEM;
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
index f76692053ca1..e892b9feffb1 100644
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -428,6 +428,12 @@ static int ufs_intel_adl_init(struct ufs_hba *hba)
return ufs_intel_common_init(hba);
}
+static int ufs_intel_mtl_init(struct ufs_hba *hba)
+{
+ hba->caps |= UFSHCD_CAP_CRYPTO | UFSHCD_CAP_WB_EN;
+ return ufs_intel_common_init(hba);
+}
+
static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = {
.name = "intel-pci",
.init = ufs_intel_common_init,
@@ -465,6 +471,16 @@ static struct ufs_hba_variant_ops ufs_intel_adl_hba_vops = {
.device_reset = ufs_intel_device_reset,
};
+static struct ufs_hba_variant_ops ufs_intel_mtl_hba_vops = {
+ .name = "intel-pci",
+ .init = ufs_intel_mtl_init,
+ .exit = ufs_intel_common_exit,
+ .hce_enable_notify = ufs_intel_hce_enable_notify,
+ .link_startup_notify = ufs_intel_link_startup_notify,
+ .resume = ufs_intel_resume,
+ .device_reset = ufs_intel_device_reset,
+};
+
#ifdef CONFIG_PM_SLEEP
static int ufshcd_pci_restore(struct device *dev)
{
@@ -579,6 +595,7 @@ static const struct pci_device_id ufshcd_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x98FA), (kernel_ulong_t)&ufs_intel_lkf_hba_vops },
{ PCI_VDEVICE(INTEL, 0x51FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops },
{ PCI_VDEVICE(INTEL, 0x54FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops },
+ { PCI_VDEVICE(INTEL, 0x7E47), (kernel_ulong_t)&ufs_intel_mtl_hba_vops },
{ } /* terminate list */
};
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 88c20f3608c2..94f545be183a 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -820,8 +820,6 @@ struct ufs_hba {
enum ufs_pm_level rpm_lvl;
/* Desired UFS power management level during system PM */
enum ufs_pm_level spm_lvl;
- struct device_attribute rpm_lvl_attr;
- struct device_attribute spm_lvl_attr;
int pm_op_in_progress;
/* Auto-Hibernate Idle Timer register value */
diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c
index b2bec19022cd..81099b68bbfb 100644
--- a/drivers/scsi/ufs/ufshpb.c
+++ b/drivers/scsi/ufs/ufshpb.c
@@ -867,12 +867,6 @@ static struct ufshpb_region *ufshpb_victim_lru_info(struct ufshpb_lu *hpb)
struct ufshpb_region *rgn, *victim_rgn = NULL;
list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn) {
- if (!rgn) {
- dev_err(&hpb->sdev_ufs_lu->sdev_dev,
- "%s: no region allocated\n",
- __func__);
- return NULL;
- }
if (ufshpb_check_srgns_issue_state(hpb, rgn))
continue;
@@ -888,6 +882,11 @@ static struct ufshpb_region *ufshpb_victim_lru_info(struct ufshpb_lu *hpb)
break;
}
+ if (!victim_rgn)
+ dev_err(&hpb->sdev_ufs_lu->sdev_dev,
+ "%s: no region allocated\n",
+ __func__);
+
return victim_rgn;
}
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 0e6110da69e7..578c4b6d0f7d 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -988,7 +988,7 @@ static struct virtio_driver virtio_scsi_driver = {
.remove = virtscsi_remove,
};
-static int __init init(void)
+static int __init virtio_scsi_init(void)
{
int ret = -ENOMEM;
@@ -1020,14 +1020,14 @@ error:
return ret;
}
-static void __exit fini(void)
+static void __exit virtio_scsi_fini(void)
{
unregister_virtio_driver(&virtio_scsi_driver);
mempool_destroy(virtscsi_cmd_pool);
kmem_cache_destroy(virtscsi_cmd_cache);
}
-module_init(init);
-module_exit(fini);
+module_init(virtio_scsi_init);
+module_exit(virtio_scsi_fini);
MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_DESCRIPTION("Virtio SCSI HBA driver");
diff --git a/drivers/scsi/zorro7xx.c b/drivers/scsi/zorro7xx.c
index 27b9e2baab1a..7acf9193a9e8 100644
--- a/drivers/scsi/zorro7xx.c
+++ b/drivers/scsi/zorro7xx.c
@@ -159,6 +159,8 @@ static void zorro7xx_remove_one(struct zorro_dev *z)
scsi_remove_host(host);
NCR_700_release(host);
+ if (host->base > 0x01000000)
+ iounmap(hostdata->base);
kfree(hostdata);
free_irq(host->irq, host);
zorro_release_device(z);
diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
index 92d9610df1fd..938017a60c8e 100644
--- a/drivers/spi/atmel-quadspi.c
+++ b/drivers/spi/atmel-quadspi.c
@@ -277,6 +277,9 @@ static int atmel_qspi_find_mode(const struct spi_mem_op *op)
static bool atmel_qspi_supports_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
+ if (!spi_mem_default_supports_op(mem, op))
+ return false;
+
if (atmel_qspi_find_mode(op) < 0)
return false;
diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
index 86c76211b3d3..cad2d55dcd3d 100644
--- a/drivers/spi/spi-bcm-qspi.c
+++ b/drivers/spi/spi-bcm-qspi.c
@@ -1205,7 +1205,7 @@ static int bcm_qspi_exec_mem_op(struct spi_mem *mem,
addr = op->addr.val;
len = op->data.nbytes;
- if (bcm_qspi_bspi_ver_three(qspi) == true) {
+ if (has_bspi(qspi) && bcm_qspi_bspi_ver_three(qspi) == true) {
/*
* The address coming into this function is a raw flash offset.
* But for BSPI <= V3, we need to convert it to a remapped BSPI
@@ -1224,7 +1224,7 @@ static int bcm_qspi_exec_mem_op(struct spi_mem *mem,
len < 4)
mspi_read = true;
- if (mspi_read)
+ if (!has_bspi(qspi) || mspi_read)
return bcm_qspi_mspi_exec_mem_op(spi, op);
ret = bcm_qspi_bspi_set_mode(qspi, op, 0);
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index b0c9f62ccefb..19686fb47bb3 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -19,6 +19,7 @@
#include <linux/iopoll.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
+#include <linux/log2.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of.h>
@@ -102,12 +103,6 @@ struct cqspi_driver_platdata {
#define CQSPI_TIMEOUT_MS 500
#define CQSPI_READ_TIMEOUT_MS 10
-/* Instruction type */
-#define CQSPI_INST_TYPE_SINGLE 0
-#define CQSPI_INST_TYPE_DUAL 1
-#define CQSPI_INST_TYPE_QUAD 2
-#define CQSPI_INST_TYPE_OCTAL 3
-
#define CQSPI_DUMMY_CLKS_PER_BYTE 8
#define CQSPI_DUMMY_BYTES_MAX 4
#define CQSPI_DUMMY_CLKS_MAX 31
@@ -376,10 +371,6 @@ static unsigned int cqspi_calc_dummy(const struct spi_mem_op *op, bool dtr)
static int cqspi_set_protocol(struct cqspi_flash_pdata *f_pdata,
const struct spi_mem_op *op)
{
- f_pdata->inst_width = CQSPI_INST_TYPE_SINGLE;
- f_pdata->addr_width = CQSPI_INST_TYPE_SINGLE;
- f_pdata->data_width = CQSPI_INST_TYPE_SINGLE;
-
/*
* For an op to be DTR, cmd phase along with every other non-empty
* phase should have dtr field set to 1. If an op phase has zero
@@ -389,32 +380,23 @@ static int cqspi_set_protocol(struct cqspi_flash_pdata *f_pdata,
(!op->addr.nbytes || op->addr.dtr) &&
(!op->data.nbytes || op->data.dtr);
- switch (op->data.buswidth) {
- case 0:
- break;
- case 1:
- f_pdata->data_width = CQSPI_INST_TYPE_SINGLE;
- break;
- case 2:
- f_pdata->data_width = CQSPI_INST_TYPE_DUAL;
- break;
- case 4:
- f_pdata->data_width = CQSPI_INST_TYPE_QUAD;
- break;
- case 8:
- f_pdata->data_width = CQSPI_INST_TYPE_OCTAL;
- break;
- default:
- return -EINVAL;
- }
+ f_pdata->inst_width = 0;
+ if (op->cmd.buswidth)
+ f_pdata->inst_width = ilog2(op->cmd.buswidth);
+
+ f_pdata->addr_width = 0;
+ if (op->addr.buswidth)
+ f_pdata->addr_width = ilog2(op->addr.buswidth);
+
+ f_pdata->data_width = 0;
+ if (op->data.buswidth)
+ f_pdata->data_width = ilog2(op->data.buswidth);
/* Right now we only support 8-8-8 DTR mode. */
if (f_pdata->dtr) {
switch (op->cmd.buswidth) {
case 0:
- break;
case 8:
- f_pdata->inst_width = CQSPI_INST_TYPE_OCTAL;
break;
default:
return -EINVAL;
@@ -422,9 +404,7 @@ static int cqspi_set_protocol(struct cqspi_flash_pdata *f_pdata,
switch (op->addr.buswidth) {
case 0:
- break;
case 8:
- f_pdata->addr_width = CQSPI_INST_TYPE_OCTAL;
break;
default:
return -EINVAL;
@@ -432,9 +412,7 @@ static int cqspi_set_protocol(struct cqspi_flash_pdata *f_pdata,
switch (op->data.buswidth) {
case 0:
- break;
case 8:
- f_pdata->data_width = CQSPI_INST_TYPE_OCTAL;
break;
default:
return -EINVAL;
@@ -1437,9 +1415,24 @@ static bool cqspi_supports_mem_op(struct spi_mem *mem,
all_false = !op->cmd.dtr && !op->addr.dtr && !op->dummy.dtr &&
!op->data.dtr;
- /* Mixed DTR modes not supported. */
- if (!(all_true || all_false))
+ if (all_true) {
+ /* Right now we only support 8-8-8 DTR mode. */
+ if (op->cmd.nbytes && op->cmd.buswidth != 8)
+ return false;
+ if (op->addr.nbytes && op->addr.buswidth != 8)
+ return false;
+ if (op->data.nbytes && op->data.buswidth != 8)
+ return false;
+ } else if (all_false) {
+ /* Only 1-1-X ops are supported without DTR */
+ if (op->cmd.nbytes && op->cmd.buswidth > 1)
+ return false;
+ if (op->addr.nbytes && op->addr.buswidth > 1)
+ return false;
+ } else {
+ /* Mixed DTR modes are not supported. */
return false;
+ }
return spi_mem_default_supports_op(mem, op);
}
diff --git a/drivers/spi/spi-intel-pci.c b/drivers/spi/spi-intel-pci.c
index a5ef7a526a7f..f6eec7a869b6 100644
--- a/drivers/spi/spi-intel-pci.c
+++ b/drivers/spi/spi-intel-pci.c
@@ -72,6 +72,7 @@ static const struct pci_device_id intel_spi_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x4da4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x51a4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x54a4), (unsigned long)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0x7a24), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x7aa4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0xa0a4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0xa1a4), (unsigned long)&bxt_info },
diff --git a/drivers/spi/spi-mtk-nor.c b/drivers/spi/spi-mtk-nor.c
index 94fb09696677..d167699a1a96 100644
--- a/drivers/spi/spi-mtk-nor.c
+++ b/drivers/spi/spi-mtk-nor.c
@@ -960,7 +960,17 @@ static int __maybe_unused mtk_nor_suspend(struct device *dev)
static int __maybe_unused mtk_nor_resume(struct device *dev)
{
- return pm_runtime_force_resume(dev);
+ struct spi_controller *ctlr = dev_get_drvdata(dev);
+ struct mtk_nor *sp = spi_controller_get_devdata(ctlr);
+ int ret;
+
+ ret = pm_runtime_force_resume(dev);
+ if (ret)
+ return ret;
+
+ mtk_nor_init(sp);
+
+ return 0;
}
static const struct dev_pm_ops mtk_nor_pm_ops = {
diff --git a/drivers/spi/spi-mxic.c b/drivers/spi/spi-mxic.c
index 55c092069301..65be8e085ab8 100644
--- a/drivers/spi/spi-mxic.c
+++ b/drivers/spi/spi-mxic.c
@@ -813,6 +813,7 @@ static int mxic_spi_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "spi_register_master failed\n");
pm_runtime_disable(&pdev->dev);
+ mxic_spi_mem_ecc_remove(mxic);
}
return ret;
diff --git a/drivers/spi/spi-rpc-if.c b/drivers/spi/spi-rpc-if.c
index fe82f3575df4..24ec1c83f379 100644
--- a/drivers/spi/spi-rpc-if.c
+++ b/drivers/spi/spi-rpc-if.c
@@ -158,14 +158,18 @@ static int rpcif_spi_probe(struct platform_device *pdev)
error = rpcif_hw_init(rpc, false);
if (error)
- return error;
+ goto out_disable_rpm;
error = spi_register_controller(ctlr);
if (error) {
dev_err(&pdev->dev, "spi_register_controller failed\n");
- rpcif_disable_rpm(rpc);
+ goto out_disable_rpm;
}
+ return 0;
+
+out_disable_rpm:
+ rpcif_disable_rpm(rpc);
return error;
}
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index c4dd1200fe99..2e6d6bbeb784 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1130,11 +1130,15 @@ static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
if (ctlr->dma_tx)
tx_dev = ctlr->dma_tx->device->dev;
+ else if (ctlr->dma_map_dev)
+ tx_dev = ctlr->dma_map_dev;
else
tx_dev = ctlr->dev.parent;
if (ctlr->dma_rx)
rx_dev = ctlr->dma_rx->device->dev;
+ else if (ctlr->dma_map_dev)
+ rx_dev = ctlr->dma_map_dev;
else
rx_dev = ctlr->dev.parent;
@@ -2406,7 +2410,8 @@ static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
} else {
struct acpi_device *adev;
- if (acpi_bus_get_device(parent_handle, &adev))
+ adev = acpi_fetch_acpi_dev(parent_handle);
+ if (!adev)
return -ENODEV;
ctlr = acpi_spi_find_controller_by_adev(adev);
diff --git a/drivers/staging/r8188eu/core/rtw_br_ext.c b/drivers/staging/r8188eu/core/rtw_br_ext.c
index d68611ef22f8..f056204c0fdb 100644
--- a/drivers/staging/r8188eu/core/rtw_br_ext.c
+++ b/drivers/staging/r8188eu/core/rtw_br_ext.c
@@ -70,7 +70,7 @@ static int __nat25_add_pppoe_tag(struct sk_buff *skb, struct pppoe_tag *tag)
struct pppoe_hdr *ph = (struct pppoe_hdr *)(skb->data + ETH_HLEN);
int data_len;
- data_len = tag->tag_len + TAG_HDR_LEN;
+ data_len = be16_to_cpu(tag->tag_len) + TAG_HDR_LEN;
if (skb_tailroom(skb) < data_len)
return -1;
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 95d4ca50a605..fd7267baa707 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -1821,6 +1821,7 @@ static struct page *tcmu_try_get_data_page(struct tcmu_dev *udev, uint32_t dpi)
mutex_lock(&udev->cmdr_lock);
page = xa_load(&udev->data_pages, dpi);
if (likely(page)) {
+ get_page(page);
mutex_unlock(&udev->cmdr_lock);
return page;
}
@@ -1877,6 +1878,7 @@ static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf)
/* For the vmalloc()ed cmd area pages */
addr = (void *)(unsigned long)info->mem[mi].addr + offset;
page = vmalloc_to_page(addr);
+ get_page(page);
} else {
uint32_t dpi;
@@ -1887,7 +1889,6 @@ static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}
- get_page(page);
vmf->page = page;
return 0;
}
diff --git a/drivers/tty/serial/mpc52xx_uart.c b/drivers/tty/serial/mpc52xx_uart.c
index 8a6958377764..3acc0f185762 100644
--- a/drivers/tty/serial/mpc52xx_uart.c
+++ b/drivers/tty/serial/mpc52xx_uart.c
@@ -436,31 +436,31 @@ static void mpc512x_psc_fifo_init(struct uart_port *port)
out_be32(&FIFO_512x(port)->rximr, MPC512x_PSC_FIFO_ALARM);
}
-static int mpc512x_psc_raw_rx_rdy(struct uart_port *port)
+static unsigned int mpc512x_psc_raw_rx_rdy(struct uart_port *port)
{
return !(in_be32(&FIFO_512x(port)->rxsr) & MPC512x_PSC_FIFO_EMPTY);
}
-static int mpc512x_psc_raw_tx_rdy(struct uart_port *port)
+static unsigned int mpc512x_psc_raw_tx_rdy(struct uart_port *port)
{
return !(in_be32(&FIFO_512x(port)->txsr) & MPC512x_PSC_FIFO_FULL);
}
-static int mpc512x_psc_rx_rdy(struct uart_port *port)
+static unsigned int mpc512x_psc_rx_rdy(struct uart_port *port)
{
return in_be32(&FIFO_512x(port)->rxsr)
& in_be32(&FIFO_512x(port)->rximr)
& MPC512x_PSC_FIFO_ALARM;
}
-static int mpc512x_psc_tx_rdy(struct uart_port *port)
+static unsigned int mpc512x_psc_tx_rdy(struct uart_port *port)
{
return in_be32(&FIFO_512x(port)->txsr)
& in_be32(&FIFO_512x(port)->tximr)
& MPC512x_PSC_FIFO_ALARM;
}
-static int mpc512x_psc_tx_empty(struct uart_port *port)
+static unsigned int mpc512x_psc_tx_empty(struct uart_port *port)
{
return in_be32(&FIFO_512x(port)->txsr)
& MPC512x_PSC_FIFO_EMPTY;
@@ -780,29 +780,29 @@ static void mpc5125_psc_fifo_init(struct uart_port *port)
out_be32(&FIFO_5125(port)->rximr, MPC512x_PSC_FIFO_ALARM);
}
-static int mpc5125_psc_raw_rx_rdy(struct uart_port *port)
+static unsigned int mpc5125_psc_raw_rx_rdy(struct uart_port *port)
{
return !(in_be32(&FIFO_5125(port)->rxsr) & MPC512x_PSC_FIFO_EMPTY);
}
-static int mpc5125_psc_raw_tx_rdy(struct uart_port *port)
+static unsigned int mpc5125_psc_raw_tx_rdy(struct uart_port *port)
{
return !(in_be32(&FIFO_5125(port)->txsr) & MPC512x_PSC_FIFO_FULL);
}
-static int mpc5125_psc_rx_rdy(struct uart_port *port)
+static unsigned int mpc5125_psc_rx_rdy(struct uart_port *port)
{
return in_be32(&FIFO_5125(port)->rxsr) &
in_be32(&FIFO_5125(port)->rximr) & MPC512x_PSC_FIFO_ALARM;
}
-static int mpc5125_psc_tx_rdy(struct uart_port *port)
+static unsigned int mpc5125_psc_tx_rdy(struct uart_port *port)
{
return in_be32(&FIFO_5125(port)->txsr) &
in_be32(&FIFO_5125(port)->tximr) & MPC512x_PSC_FIFO_ALARM;
}
-static int mpc5125_psc_tx_empty(struct uart_port *port)
+static unsigned int mpc5125_psc_tx_empty(struct uart_port *port)
{
return in_be32(&FIFO_5125(port)->txsr) & MPC512x_PSC_FIFO_EMPTY;
}
diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
index b7bb16f92ac6..06b6f3594a13 100644
--- a/drivers/vfio/pci/vfio_pci_core.c
+++ b/drivers/vfio/pci/vfio_pci_core.c
@@ -36,6 +36,10 @@ static bool nointxmask;
static bool disable_vga;
static bool disable_idle_d3;
+/* List of PF's that vfio_pci_core_sriov_configure() has been called on */
+static DEFINE_MUTEX(vfio_pci_sriov_pfs_mutex);
+static LIST_HEAD(vfio_pci_sriov_pfs);
+
static inline bool vfio_vga_disabled(void)
{
#ifdef CONFIG_VFIO_PCI_VGA
@@ -434,47 +438,17 @@ out:
}
EXPORT_SYMBOL_GPL(vfio_pci_core_disable);
-static struct vfio_pci_core_device *get_pf_vdev(struct vfio_pci_core_device *vdev)
-{
- struct pci_dev *physfn = pci_physfn(vdev->pdev);
- struct vfio_device *pf_dev;
-
- if (!vdev->pdev->is_virtfn)
- return NULL;
-
- pf_dev = vfio_device_get_from_dev(&physfn->dev);
- if (!pf_dev)
- return NULL;
-
- if (pci_dev_driver(physfn) != pci_dev_driver(vdev->pdev)) {
- vfio_device_put(pf_dev);
- return NULL;
- }
-
- return container_of(pf_dev, struct vfio_pci_core_device, vdev);
-}
-
-static void vfio_pci_vf_token_user_add(struct vfio_pci_core_device *vdev, int val)
-{
- struct vfio_pci_core_device *pf_vdev = get_pf_vdev(vdev);
-
- if (!pf_vdev)
- return;
-
- mutex_lock(&pf_vdev->vf_token->lock);
- pf_vdev->vf_token->users += val;
- WARN_ON(pf_vdev->vf_token->users < 0);
- mutex_unlock(&pf_vdev->vf_token->lock);
-
- vfio_device_put(&pf_vdev->vdev);
-}
-
void vfio_pci_core_close_device(struct vfio_device *core_vdev)
{
struct vfio_pci_core_device *vdev =
container_of(core_vdev, struct vfio_pci_core_device, vdev);
- vfio_pci_vf_token_user_add(vdev, -1);
+ if (vdev->sriov_pf_core_dev) {
+ mutex_lock(&vdev->sriov_pf_core_dev->vf_token->lock);
+ WARN_ON(!vdev->sriov_pf_core_dev->vf_token->users);
+ vdev->sriov_pf_core_dev->vf_token->users--;
+ mutex_unlock(&vdev->sriov_pf_core_dev->vf_token->lock);
+ }
vfio_spapr_pci_eeh_release(vdev->pdev);
vfio_pci_core_disable(vdev);
@@ -495,7 +469,12 @@ void vfio_pci_core_finish_enable(struct vfio_pci_core_device *vdev)
{
vfio_pci_probe_mmaps(vdev);
vfio_spapr_pci_eeh_open(vdev->pdev);
- vfio_pci_vf_token_user_add(vdev, 1);
+
+ if (vdev->sriov_pf_core_dev) {
+ mutex_lock(&vdev->sriov_pf_core_dev->vf_token->lock);
+ vdev->sriov_pf_core_dev->vf_token->users++;
+ mutex_unlock(&vdev->sriov_pf_core_dev->vf_token->lock);
+ }
}
EXPORT_SYMBOL_GPL(vfio_pci_core_finish_enable);
@@ -1583,11 +1562,8 @@ static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev,
*
* If the VF token is provided but unused, an error is generated.
*/
- if (!vdev->pdev->is_virtfn && !vdev->vf_token && !vf_token)
- return 0; /* No VF token provided or required */
-
if (vdev->pdev->is_virtfn) {
- struct vfio_pci_core_device *pf_vdev = get_pf_vdev(vdev);
+ struct vfio_pci_core_device *pf_vdev = vdev->sriov_pf_core_dev;
bool match;
if (!pf_vdev) {
@@ -1600,7 +1576,6 @@ static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev,
}
if (!vf_token) {
- vfio_device_put(&pf_vdev->vdev);
pci_info_ratelimited(vdev->pdev,
"VF token required to access device\n");
return -EACCES;
@@ -1610,8 +1585,6 @@ static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev,
match = uuid_equal(uuid, &pf_vdev->vf_token->uuid);
mutex_unlock(&pf_vdev->vf_token->lock);
- vfio_device_put(&pf_vdev->vdev);
-
if (!match) {
pci_info_ratelimited(vdev->pdev,
"Incorrect VF token provided for device\n");
@@ -1732,8 +1705,30 @@ static int vfio_pci_bus_notifier(struct notifier_block *nb,
static int vfio_pci_vf_init(struct vfio_pci_core_device *vdev)
{
struct pci_dev *pdev = vdev->pdev;
+ struct vfio_pci_core_device *cur;
+ struct pci_dev *physfn;
int ret;
+ if (pdev->is_virtfn) {
+ /*
+ * If this VF was created by our vfio_pci_core_sriov_configure()
+ * then we can find the PF vfio_pci_core_device now, and due to
+ * the locking in pci_disable_sriov() it cannot change until
+ * this VF device driver is removed.
+ */
+ physfn = pci_physfn(vdev->pdev);
+ mutex_lock(&vfio_pci_sriov_pfs_mutex);
+ list_for_each_entry(cur, &vfio_pci_sriov_pfs, sriov_pfs_item) {
+ if (cur->pdev == physfn) {
+ vdev->sriov_pf_core_dev = cur;
+ break;
+ }
+ }
+ mutex_unlock(&vfio_pci_sriov_pfs_mutex);
+ return 0;
+ }
+
+ /* Not a SRIOV PF */
if (!pdev->is_physfn)
return 0;
@@ -1805,6 +1800,7 @@ void vfio_pci_core_init_device(struct vfio_pci_core_device *vdev,
INIT_LIST_HEAD(&vdev->ioeventfds_list);
mutex_init(&vdev->vma_lock);
INIT_LIST_HEAD(&vdev->vma_list);
+ INIT_LIST_HEAD(&vdev->sriov_pfs_item);
init_rwsem(&vdev->memory_lock);
}
EXPORT_SYMBOL_GPL(vfio_pci_core_init_device);
@@ -1896,7 +1892,7 @@ void vfio_pci_core_unregister_device(struct vfio_pci_core_device *vdev)
{
struct pci_dev *pdev = vdev->pdev;
- pci_disable_sriov(pdev);
+ vfio_pci_core_sriov_configure(pdev, 0);
vfio_unregister_group_dev(&vdev->vdev);
@@ -1935,21 +1931,49 @@ EXPORT_SYMBOL_GPL(vfio_pci_core_aer_err_detected);
int vfio_pci_core_sriov_configure(struct pci_dev *pdev, int nr_virtfn)
{
+ struct vfio_pci_core_device *vdev;
struct vfio_device *device;
int ret = 0;
+ device_lock_assert(&pdev->dev);
+
device = vfio_device_get_from_dev(&pdev->dev);
if (!device)
return -ENODEV;
- if (nr_virtfn == 0)
- pci_disable_sriov(pdev);
- else
+ vdev = container_of(device, struct vfio_pci_core_device, vdev);
+
+ if (nr_virtfn) {
+ mutex_lock(&vfio_pci_sriov_pfs_mutex);
+ /*
+ * The thread that adds the vdev to the list is the only thread
+ * that gets to call pci_enable_sriov() and we will only allow
+ * it to be called once without going through
+ * pci_disable_sriov()
+ */
+ if (!list_empty(&vdev->sriov_pfs_item)) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ list_add_tail(&vdev->sriov_pfs_item, &vfio_pci_sriov_pfs);
+ mutex_unlock(&vfio_pci_sriov_pfs_mutex);
ret = pci_enable_sriov(pdev, nr_virtfn);
+ if (ret)
+ goto out_del;
+ ret = nr_virtfn;
+ goto out_put;
+ }
- vfio_device_put(device);
+ pci_disable_sriov(pdev);
- return ret < 0 ? ret : nr_virtfn;
+out_del:
+ mutex_lock(&vfio_pci_sriov_pfs_mutex);
+ list_del_init(&vdev->sriov_pfs_item);
+out_unlock:
+ mutex_unlock(&vfio_pci_sriov_pfs_mutex);
+out_put:
+ vfio_device_put(device);
+ return ret;
}
EXPORT_SYMBOL_GPL(vfio_pci_core_sriov_configure);
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 34d6bb1bf82e..a6bb0e438216 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -1579,7 +1579,14 @@ static void do_remove_conflicting_framebuffers(struct apertures_struct *a,
* If it's not a platform device, at least print a warning. A
* fix would add code to remove the device from the system.
*/
- if (dev_is_platform(device)) {
+ if (!device) {
+ /* TODO: Represent each OF framebuffer as its own
+ * device in the device hierarchy. For now, offb
+ * doesn't have such a device, so unregister the
+ * framebuffer as before without warning.
+ */
+ do_unregister_framebuffer(registered_fb[i]);
+ } else if (dev_is_platform(device)) {
registered_fb[i]->forced_out = true;
platform_device_unregister(to_platform_device(device));
} else {
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index dfe26fa17e95..617a7f4f07a8 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -689,29 +689,34 @@ void xen_free_ballooned_pages(unsigned int nr_pages, struct page **pages)
}
EXPORT_SYMBOL(xen_free_ballooned_pages);
-#if defined(CONFIG_XEN_PV) && !defined(CONFIG_XEN_UNPOPULATED_ALLOC)
-static void __init balloon_add_region(unsigned long start_pfn,
- unsigned long pages)
+static void __init balloon_add_regions(void)
{
+#if defined(CONFIG_XEN_PV)
+ unsigned long start_pfn, pages;
unsigned long pfn, extra_pfn_end;
+ unsigned int i;
- /*
- * If the amount of usable memory has been limited (e.g., with
- * the 'mem' command line parameter), don't add pages beyond
- * this limit.
- */
- extra_pfn_end = min(max_pfn, start_pfn + pages);
+ for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
+ pages = xen_extra_mem[i].n_pfns;
+ if (!pages)
+ continue;
- for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) {
- /* totalram_pages and totalhigh_pages do not
- include the boot-time balloon extension, so
- don't subtract from it. */
- balloon_append(pfn_to_page(pfn));
- }
+ start_pfn = xen_extra_mem[i].start_pfn;
- balloon_stats.total_pages += extra_pfn_end - start_pfn;
-}
+ /*
+ * If the amount of usable memory has been limited (e.g., with
+ * the 'mem' command line parameter), don't add pages beyond
+ * this limit.
+ */
+ extra_pfn_end = min(max_pfn, start_pfn + pages);
+
+ for (pfn = start_pfn; pfn < extra_pfn_end; pfn++)
+ balloon_append(pfn_to_page(pfn));
+
+ balloon_stats.total_pages += extra_pfn_end - start_pfn;
+ }
#endif
+}
static int __init balloon_init(void)
{
@@ -745,20 +750,7 @@ static int __init balloon_init(void)
register_sysctl_table(xen_root);
#endif
-#if defined(CONFIG_XEN_PV) && !defined(CONFIG_XEN_UNPOPULATED_ALLOC)
- {
- int i;
-
- /*
- * Initialize the balloon with pages from the extra memory
- * regions (see arch/x86/xen/setup.c).
- */
- for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++)
- if (xen_extra_mem[i].n_pfns)
- balloon_add_region(xen_extra_mem[i].start_pfn,
- xen_extra_mem[i].n_pfns);
- }
-#endif
+ balloon_add_regions();
task = kthread_run(balloon_thread, NULL, "xen-balloon");
if (IS_ERR(task)) {
diff --git a/drivers/xen/unpopulated-alloc.c b/drivers/xen/unpopulated-alloc.c
index a8b41057c382..a39f2d36dd9c 100644
--- a/drivers/xen/unpopulated-alloc.c
+++ b/drivers/xen/unpopulated-alloc.c
@@ -230,39 +230,6 @@ void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages)
}
EXPORT_SYMBOL(xen_free_unpopulated_pages);
-#ifdef CONFIG_XEN_PV
-static int __init init(void)
-{
- unsigned int i;
-
- if (!xen_domain())
- return -ENODEV;
-
- if (!xen_pv_domain())
- return 0;
-
- /*
- * Initialize with pages from the extra memory regions (see
- * arch/x86/xen/setup.c).
- */
- for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
- unsigned int j;
-
- for (j = 0; j < xen_extra_mem[i].n_pfns; j++) {
- struct page *pg =
- pfn_to_page(xen_extra_mem[i].start_pfn + j);
-
- pg->zone_device_data = page_list;
- page_list = pg;
- list_count++;
- }
- }
-
- return 0;
-}
-subsys_initcall(init);
-#endif
-
static int __init unpopulated_init(void)
{
int ret;