summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/atm/Kconfig79
-rw-r--r--drivers/atm/Makefile4
-rw-r--r--drivers/atm/ambassador.c2400
-rw-r--r--drivers/atm/ambassador.h648
-rw-r--r--drivers/atm/firestream.c2057
-rw-r--r--drivers/atm/firestream.h502
-rw-r--r--drivers/atm/horizon.c2853
-rw-r--r--drivers/atm/horizon.h492
-rw-r--r--drivers/atm/nicstarmac.c5
-rw-r--r--drivers/atm/uPD98401.h293
-rw-r--r--drivers/atm/uPD98402.c266
-rw-r--r--drivers/atm/uPD98402.h107
-rw-r--r--drivers/atm/zatm.c1652
-rw-r--r--drivers/atm/zatm.h104
-rw-r--r--drivers/infiniband/core/device.c2
-rw-r--r--drivers/infiniband/core/uverbs_std_types_flow_action.c383
-rw-r--r--drivers/infiniband/hw/mlx5/fs.c223
-rw-r--r--drivers/infiniband/hw/mlx5/main.c31
-rw-r--r--drivers/isdn/mISDN/socket.c2
-rw-r--r--drivers/media/rc/bpf-lirc.c8
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/can/Kconfig1
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/ctucanfd/Kconfig34
-rw-r--r--drivers/net/can/ctucanfd/Makefile10
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd.h82
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_base.c1490
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_kframe.h77
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_kregs.h325
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_pci.c304
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_platform.c132
-rw-r--r--drivers/net/can/dev/bittiming.c2
-rw-r--r--drivers/net/can/dev/rx-offload.c6
-rw-r--r--drivers/net/can/flexcan/flexcan-core.c16
-rw-r--r--drivers/net/can/m_can/m_can.c2
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c2
-rw-r--r--drivers/net/can/sja1000/Kconfig2
-rw-r--r--drivers/net/can/sja1000/tscan1.c7
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c25
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c2
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd.h12
-rw-r--r--drivers/net/can/ti_hecc.c4
-rw-r--r--drivers/net/can/xilinx_can.c4
-rw-r--r--drivers/net/dsa/microchip/ksz8795.c35
-rw-r--r--drivers/net/dsa/microchip/ksz8795_reg.h3
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c131
-rw-r--r--drivers/net/dsa/microchip/ksz9477_reg.h4
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c136
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h10
-rw-r--r--drivers/net/dsa/mt7530.c332
-rw-r--r--drivers/net/dsa/mt7530.h26
-rw-r--r--drivers/net/dsa/qca8k.c145
-rw-r--r--drivers/net/dsa/qca8k.h12
-rw-r--r--drivers/net/eql.c3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c9
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c87
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c136
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c409
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.h21
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c23
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.h6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c10
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h10
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c304
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h16
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c191
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h16
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c7
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c24
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c22
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c2
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h17
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c141
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c8
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c310
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h54
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_devids.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c25
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c49
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx_common.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c39
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c489
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c29
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c53
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c53
-rw-r--r--drivers/net/ethernet/marvell/Kconfig2
-rw-r--r--drivers/net/ethernet/marvell/Makefile1
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c20
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/Kconfig20
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/Makefile9
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c737
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_config.h204
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c245
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h170
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c194
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h299
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c463
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.c1176
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.h357
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h367
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_rx.c508
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_rx.h199
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_tx.c335
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_tx.h284
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_acl.c7
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flower.c18
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_router.c11
-rw-r--r--drivers/net/ethernet/mediatek/Kconfig4
-rw-r--r--drivers/net/ethernet/mediatek/Makefile5
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c131
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h14
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.c369
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.h89
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c1
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_offload.c189
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.c880
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.h135
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_debugfs.c175
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_ops.c8
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_regs.h251
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c179
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h96
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.h38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c125
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h156
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c)95
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c245
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c63
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c71
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h86
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c)51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c247
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h132
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c390
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h91
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c1582
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h62
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c622
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h74
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c58
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h80
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.c681
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.h47
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c311
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_linecards.c1373
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c250
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h544
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c282
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h6
-rw-r--r--drivers/net/ethernet/microchip/lan966x/Makefile2
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c842
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c76
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.h121
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_port.c3
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c276
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_regs.h146
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vcap.c4
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c2
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/tls.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c43
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h26
-rw-r--r--drivers/net/ethernet/qlogic/qed/Makefile3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_nvmetcp_ip_services.c238
-rw-r--r--drivers/net/ethernet/realtek/atp.h4
-rw-r--r--drivers/net/ethernet/sfc/ef10.c2
-rw-r--r--drivers/net/ethernet/sfc/efx.h1
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c52
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.h4
-rw-r--r--drivers/net/ethernet/sfc/efx_common.c4
-rw-r--r--drivers/net/ethernet/sfc/falcon/rx.c4
-rw-r--r--drivers/net/ethernet/sfc/farch.c1
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h4
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h5
-rw-r--r--drivers/net/ethernet/sfc/tx.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c24
-rw-r--r--drivers/net/ethernet/ti/Kconfig1
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-ethtool.c6
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c33
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-qos.c180
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-qos.h8
-rw-r--r--drivers/net/ethernet/ti/cpsw.c38
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c66
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h2
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c39
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c223
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h9
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c9
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c18
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c55
-rw-r--r--drivers/net/ethernet/xscale/ptp_ixp46x.c2
-rw-r--r--drivers/net/geneve.c10
-rw-r--r--drivers/net/hamradio/Kconfig34
-rw-r--r--drivers/net/hamradio/Makefile1
-rw-r--r--drivers/net/hamradio/dmascc.c1450
-rw-r--r--drivers/net/hyperv/hyperv_net.h69
-rw-r--r--drivers/net/hyperv/netvsc.c16
-rw-r--r--drivers/net/hyperv/netvsc_bpf.c101
-rw-r--r--drivers/net/hyperv/netvsc_drv.c150
-rw-r--r--drivers/net/ipa/ipa_endpoint.c9
-rw-r--r--drivers/net/mdio/mdio-aspeed.c138
-rw-r--r--drivers/net/mdio/mdio-mscc-miim.c81
-rw-r--r--drivers/net/netdevsim/fib.c9
-rw-r--r--drivers/net/phy/micrel.c221
-rw-r--r--drivers/net/phy/microchip_t1.c50
-rw-r--r--drivers/net/phy/phylink.c28
-rw-r--r--drivers/net/ppp/pppoe.c3
-rw-r--r--drivers/net/usb/cdc_ether.c3
-rw-r--r--drivers/net/usb/cdc_ncm.c8
-rw-r--r--drivers/net/usb/qmi_wwan.c3
-rw-r--r--drivers/net/usb/rndis_host.c47
-rw-r--r--drivers/net/usb/sr9800.h2
-rw-r--r--drivers/net/wan/Kconfig72
-rw-r--r--drivers/net/wan/Makefile5
-rw-r--r--drivers/net/wan/cosa.c2052
-rw-r--r--drivers/net/wan/cosa.h104
-rw-r--r--drivers/net/wan/hostess_sv11.c336
-rw-r--r--drivers/net/wan/lmc/Makefile18
-rw-r--r--drivers/net/wan/lmc/lmc.h33
-rw-r--r--drivers/net/wan/lmc/lmc_debug.c65
-rw-r--r--drivers/net/wan/lmc/lmc_debug.h52
-rw-r--r--drivers/net/wan/lmc/lmc_ioctl.h255
-rw-r--r--drivers/net/wan/lmc/lmc_main.c2009
-rw-r--r--drivers/net/wan/lmc/lmc_media.c1206
-rw-r--r--drivers/net/wan/lmc/lmc_proto.c106
-rw-r--r--drivers/net/wan/lmc/lmc_proto.h18
-rw-r--r--drivers/net/wan/lmc/lmc_var.h468
-rw-r--r--drivers/net/wan/sealevel.c352
-rw-r--r--drivers/net/wan/z85230.c1641
-rw-r--r--drivers/net/wan/z85230.h407
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c3
-rw-r--r--drivers/net/wwan/wwan_hwsim.c22
296 files changed, 19100 insertions, 29760 deletions
diff --git a/drivers/atm/Kconfig b/drivers/atm/Kconfig
index b9370bbca828..63cdb46a3439 100644
--- a/drivers/atm/Kconfig
+++ b/drivers/atm/Kconfig
@@ -146,36 +146,6 @@ config ATM_ENI_BURST_RX_2W
try this if you have disabled 4W and 8W bursts. Enabling 2W if 4W or
8W are also set may or may not improve throughput.
-config ATM_FIRESTREAM
- tristate "Fujitsu FireStream (FS50/FS155) "
- depends on PCI && VIRT_TO_BUS
- help
- Driver for the Fujitsu FireStream 155 (MB86697) and
- FireStream 50 (MB86695) ATM PCI chips.
-
- To compile this driver as a module, choose M here: the module will
- be called firestream.
-
-config ATM_ZATM
- tristate "ZeitNet ZN1221/ZN1225"
- depends on PCI && VIRT_TO_BUS
- help
- Driver for the ZeitNet ZN1221 (MMF) and ZN1225 (UTP-5) 155 Mbps ATM
- adapters.
-
- To compile this driver as a module, choose M here: the module will
- be called zatm.
-
-config ATM_ZATM_DEBUG
- bool "Enable extended debugging"
- depends on ATM_ZATM
- help
- Extended debugging records various events and displays that list
- when an inconsistency is detected. This mechanism is faster than
- generally using printks, but still has some impact on performance.
- Note that extended debugging may create certain race conditions
- itself. Enable this ONLY if you suspect problems with the driver.
-
config ATM_NICSTAR
tristate "IDT 77201 (NICStAR) (ForeRunnerLE)"
depends on PCI
@@ -244,55 +214,6 @@ config ATM_IDT77252_USE_SUNI
depends on ATM_IDT77252
default y
-config ATM_AMBASSADOR
- tristate "Madge Ambassador (Collage PCI 155 Server)"
- depends on PCI && VIRT_TO_BUS
- select BITREVERSE
- help
- This is a driver for ATMizer based ATM card produced by Madge
- Networks Ltd. Say Y (or M to compile as a module named ambassador)
- here if you have one of these cards.
-
-config ATM_AMBASSADOR_DEBUG
- bool "Enable debugging messages"
- depends on ATM_AMBASSADOR
- help
- Somewhat useful debugging messages are available. The choice of
- messages is controlled by a bitmap. This may be specified as a
- module argument (kernel command line argument as well?), changed
- dynamically using an ioctl (not yet) or changed by sending the
- string "Dxxxx" to VCI 1023 (where x is a hex digit). See the file
- <file:drivers/atm/ambassador.h> for the meanings of the bits in the
- mask.
-
- When active, these messages can have a significant impact on the
- speed of the driver, and the size of your syslog files! When
- inactive, they will have only a modest impact on performance.
-
-config ATM_HORIZON
- tristate "Madge Horizon [Ultra] (Collage PCI 25 and Collage PCI 155 Client)"
- depends on PCI && VIRT_TO_BUS
- help
- This is a driver for the Horizon chipset ATM adapter cards once
- produced by Madge Networks Ltd. Say Y (or M to compile as a module
- named horizon) here if you have one of these cards.
-
-config ATM_HORIZON_DEBUG
- bool "Enable debugging messages"
- depends on ATM_HORIZON
- help
- Somewhat useful debugging messages are available. The choice of
- messages is controlled by a bitmap. This may be specified as a
- module argument (kernel command line argument as well?), changed
- dynamically using an ioctl (not yet) or changed by sending the
- string "Dxxxx" to VCI 1023 (where x is a hex digit). See the file
- <file:drivers/atm/horizon.h> for the meanings of the bits in the
- mask.
-
- When active, these messages can have a significant impact on the
- speed of the driver, and the size of your syslog files! When
- inactive, they will have only a modest impact on performance.
-
config ATM_IA
tristate "Interphase ATM PCI x575/x525/x531"
depends on PCI
diff --git a/drivers/atm/Makefile b/drivers/atm/Makefile
index aa191616a72e..c9eade92019b 100644
--- a/drivers/atm/Makefile
+++ b/drivers/atm/Makefile
@@ -5,10 +5,7 @@
fore_200e-y := fore200e.o
-obj-$(CONFIG_ATM_ZATM) += zatm.o uPD98402.o
obj-$(CONFIG_ATM_NICSTAR) += nicstar.o
-obj-$(CONFIG_ATM_AMBASSADOR) += ambassador.o
-obj-$(CONFIG_ATM_HORIZON) += horizon.o
obj-$(CONFIG_ATM_IA) += iphase.o suni.o
obj-$(CONFIG_ATM_FORE200E) += fore_200e.o
obj-$(CONFIG_ATM_ENI) += eni.o suni.o
@@ -27,7 +24,6 @@ endif
obj-$(CONFIG_ATM_DUMMY) += adummy.o
obj-$(CONFIG_ATM_TCP) += atmtcp.o
-obj-$(CONFIG_ATM_FIRESTREAM) += firestream.o
obj-$(CONFIG_ATM_LANAI) += lanai.o
obj-$(CONFIG_ATM_HE) += he.o
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
deleted file mode 100644
index c039b8a4fefe..000000000000
--- a/drivers/atm/ambassador.c
+++ /dev/null
@@ -1,2400 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- Madge Ambassador ATM Adapter driver.
- Copyright (C) 1995-1999 Madge Networks Ltd.
-
-*/
-
-/* * dedicated to the memory of Graham Gordon 1971-1998 * */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/atmdev.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/poison.h>
-#include <linux/bitrev.h>
-#include <linux/mutex.h>
-#include <linux/firmware.h>
-#include <linux/ihex.h>
-#include <linux/slab.h>
-
-#include <linux/atomic.h>
-#include <asm/io.h>
-#include <asm/byteorder.h>
-
-#include "ambassador.h"
-
-#define maintainer_string "Giuliano Procida at Madge Networks <gprocida@madge.com>"
-#define description_string "Madge ATM Ambassador driver"
-#define version_string "1.2.4"
-
-static inline void __init show_version (void) {
- printk ("%s version %s\n", description_string, version_string);
-}
-
-/*
-
- Theory of Operation
-
- I Hardware, detection, initialisation and shutdown.
-
- 1. Supported Hardware
-
- This driver is for the PCI ATMizer-based Ambassador card (except
- very early versions). It is not suitable for the similar EISA "TR7"
- card. Commercially, both cards are known as Collage Server ATM
- adapters.
-
- The loader supports image transfer to the card, image start and few
- other miscellaneous commands.
-
- Only AAL5 is supported with vpi = 0 and vci in the range 0 to 1023.
-
- The cards are big-endian.
-
- 2. Detection
-
- Standard PCI stuff, the early cards are detected and rejected.
-
- 3. Initialisation
-
- The cards are reset and the self-test results are checked. The
- microcode image is then transferred and started. This waits for a
- pointer to a descriptor containing details of the host-based queues
- and buffers and various parameters etc. Once they are processed
- normal operations may begin. The BIA is read using a microcode
- command.
-
- 4. Shutdown
-
- This may be accomplished either by a card reset or via the microcode
- shutdown command. Further investigation required.
-
- 5. Persistent state
-
- The card reset does not affect PCI configuration (good) or the
- contents of several other "shared run-time registers" (bad) which
- include doorbell and interrupt control as well as EEPROM and PCI
- control. The driver must be careful when modifying these registers
- not to touch bits it does not use and to undo any changes at exit.
-
- II Driver software
-
- 0. Generalities
-
- The adapter is quite intelligent (fast) and has a simple interface
- (few features). VPI is always zero, 1024 VCIs are supported. There
- is limited cell rate support. UBR channels can be capped and ABR
- (explicit rate, but not EFCI) is supported. There is no CBR or VBR
- support.
-
- 1. Driver <-> Adapter Communication
-
- Apart from the basic loader commands, the driver communicates
- through three entities: the command queue (CQ), the transmit queue
- pair (TXQ) and the receive queue pairs (RXQ). These three entities
- are set up by the host and passed to the microcode just after it has
- been started.
-
- All queues are host-based circular queues. They are contiguous and
- (due to hardware limitations) have some restrictions as to their
- locations in (bus) memory. They are of the "full means the same as
- empty so don't do that" variety since the adapter uses pointers
- internally.
-
- The queue pairs work as follows: one queue is for supply to the
- adapter, items in it are pending and are owned by the adapter; the
- other is the queue for return from the adapter, items in it have
- been dealt with by the adapter. The host adds items to the supply
- (TX descriptors and free RX buffer descriptors) and removes items
- from the return (TX and RX completions). The adapter deals with out
- of order completions.
-
- Interrupts (card to host) and the doorbell (host to card) are used
- for signalling.
-
- 1. CQ
-
- This is to communicate "open VC", "close VC", "get stats" etc. to
- the adapter. At most one command is retired every millisecond by the
- card. There is no out of order completion or notification. The
- driver needs to check the return code of the command, waiting as
- appropriate.
-
- 2. TXQ
-
- TX supply items are of variable length (scatter gather support) and
- so the queue items are (more or less) pointers to the real thing.
- Each TX supply item contains a unique, host-supplied handle (the skb
- bus address seems most sensible as this works for Alphas as well,
- there is no need to do any endian conversions on the handles).
-
- TX return items consist of just the handles above.
-
- 3. RXQ (up to 4 of these with different lengths and buffer sizes)
-
- RX supply items consist of a unique, host-supplied handle (the skb
- bus address again) and a pointer to the buffer data area.
-
- RX return items consist of the handle above, the VC, length and a
- status word. This just screams "oh so easy" doesn't it?
-
- Note on RX pool sizes:
-
- Each pool should have enough buffers to handle a back-to-back stream
- of minimum sized frames on a single VC. For example:
-
- frame spacing = 3us (about right)
-
- delay = IRQ lat + RX handling + RX buffer replenish = 20 (us) (a guess)
-
- min number of buffers for one VC = 1 + delay/spacing (buffers)
-
- delay/spacing = latency = (20+2)/3 = 7 (buffers) (rounding up)
-
- The 20us delay assumes that there is no need to sleep; if we need to
- sleep to get buffers we are going to drop frames anyway.
-
- In fact, each pool should have enough buffers to support the
- simultaneous reassembly of a separate frame on each VC and cope with
- the case in which frames complete in round robin cell fashion on
- each VC.
-
- Only one frame can complete at each cell arrival, so if "n" VCs are
- open, the worst case is to have them all complete frames together
- followed by all starting new frames together.
-
- desired number of buffers = n + delay/spacing
-
- These are the extreme requirements, however, they are "n+k" for some
- "k" so we have only the constant to choose. This is the argument
- rx_lats which current defaults to 7.
-
- Actually, "n ? n+k : 0" is better and this is what is implemented,
- subject to the limit given by the pool size.
-
- 4. Driver locking
-
- Simple spinlocks are used around the TX and RX queue mechanisms.
- Anyone with a faster, working method is welcome to implement it.
-
- The adapter command queue is protected with a spinlock. We always
- wait for commands to complete.
-
- A more complex form of locking is used around parts of the VC open
- and close functions. There are three reasons for a lock: 1. we need
- to do atomic rate reservation and release (not used yet), 2. Opening
- sometimes involves two adapter commands which must not be separated
- by another command on the same VC, 3. the changes to RX pool size
- must be atomic. The lock needs to work over context switches, so we
- use a semaphore.
-
- III Hardware Features and Microcode Bugs
-
- 1. Byte Ordering
-
- *%^"$&%^$*&^"$(%^$#&^%$(&#%$*(&^#%!"!"!*!
-
- 2. Memory access
-
- All structures that are not accessed using DMA must be 4-byte
- aligned (not a problem) and must not cross 4MB boundaries.
-
- There is a DMA memory hole at E0000000-E00000FF (groan).
-
- TX fragments (DMA read) must not cross 4MB boundaries (would be 16MB
- but for a hardware bug).
-
- RX buffers (DMA write) must not cross 16MB boundaries and must
- include spare trailing bytes up to the next 4-byte boundary; they
- will be written with rubbish.
-
- The PLX likes to prefetch; if reading up to 4 u32 past the end of
- each TX fragment is not a problem, then TX can be made to go a
- little faster by passing a flag at init that disables a prefetch
- workaround. We do not pass this flag. (new microcode only)
-
- Now we:
- . Note that alloc_skb rounds up size to a 16byte boundary.
- . Ensure all areas do not traverse 4MB boundaries.
- . Ensure all areas do not start at a E00000xx bus address.
- (I cannot be certain, but this may always hold with Linux)
- . Make all failures cause a loud message.
- . Discard non-conforming SKBs (causes TX failure or RX fill delay).
- . Discard non-conforming TX fragment descriptors (the TX fails).
- In the future we could:
- . Allow RX areas that traverse 4MB (but not 16MB) boundaries.
- . Segment TX areas into some/more fragments, when necessary.
- . Relax checks for non-DMA items (ignore hole).
- . Give scatter-gather (iovec) requirements using ???. (?)
-
- 3. VC close is broken (only for new microcode)
-
- The VC close adapter microcode command fails to do anything if any
- frames have been received on the VC but none have been transmitted.
- Frames continue to be reassembled and passed (with IRQ) to the
- driver.
-
- IV To Do List
-
- . Fix bugs!
-
- . Timer code may be broken.
-
- . Deal with buggy VC close (somehow) in microcode 12.
-
- . Handle interrupted and/or non-blocking writes - is this a job for
- the protocol layer?
-
- . Add code to break up TX fragments when they span 4MB boundaries.
-
- . Add SUNI phy layer (need to know where SUNI lives on card).
-
- . Implement a tx_alloc fn to (a) satisfy TX alignment etc. and (b)
- leave extra headroom space for Ambassador TX descriptors.
-
- . Understand these elements of struct atm_vcc: recvq (proto?),
- sleep, callback, listenq, backlog_quota, reply and user_back.
-
- . Adjust TX/RX skb allocation to favour IP with LANE/CLIP (configurable).
-
- . Impose a TX-pending limit (2?) on each VC, help avoid TX q overflow.
-
- . Decide whether RX buffer recycling is or can be made completely safe;
- turn it back on. It looks like Werner is going to axe this.
-
- . Implement QoS changes on open VCs (involves extracting parts of VC open
- and close into separate functions and using them to make changes).
-
- . Hack on command queue so that someone can issue multiple commands and wait
- on the last one (OR only "no-op" or "wait" commands are waited for).
-
- . Eliminate need for while-schedule around do_command.
-
-*/
-
-static void do_housekeeping (struct timer_list *t);
-/********** globals **********/
-
-static unsigned short debug = 0;
-static unsigned int cmds = 8;
-static unsigned int txs = 32;
-static unsigned int rxs[NUM_RX_POOLS] = { 64, 64, 64, 64 };
-static unsigned int rxs_bs[NUM_RX_POOLS] = { 4080, 12240, 36720, 65535 };
-static unsigned int rx_lats = 7;
-static unsigned char pci_lat = 0;
-
-static const unsigned long onegigmask = -1 << 30;
-
-/********** access to adapter **********/
-
-static inline void wr_plain (const amb_dev * dev, size_t addr, u32 data) {
- PRINTD (DBG_FLOW|DBG_REGS, "wr: %08zx <- %08x", addr, data);
-#ifdef AMB_MMIO
- dev->membase[addr / sizeof(u32)] = data;
-#else
- outl (data, dev->iobase + addr);
-#endif
-}
-
-static inline u32 rd_plain (const amb_dev * dev, size_t addr) {
-#ifdef AMB_MMIO
- u32 data = dev->membase[addr / sizeof(u32)];
-#else
- u32 data = inl (dev->iobase + addr);
-#endif
- PRINTD (DBG_FLOW|DBG_REGS, "rd: %08zx -> %08x", addr, data);
- return data;
-}
-
-static inline void wr_mem (const amb_dev * dev, size_t addr, u32 data) {
- __be32 be = cpu_to_be32 (data);
- PRINTD (DBG_FLOW|DBG_REGS, "wr: %08zx <- %08x b[%08x]", addr, data, be);
-#ifdef AMB_MMIO
- dev->membase[addr / sizeof(u32)] = be;
-#else
- outl (be, dev->iobase + addr);
-#endif
-}
-
-static inline u32 rd_mem (const amb_dev * dev, size_t addr) {
-#ifdef AMB_MMIO
- __be32 be = dev->membase[addr / sizeof(u32)];
-#else
- __be32 be = inl (dev->iobase + addr);
-#endif
- u32 data = be32_to_cpu (be);
- PRINTD (DBG_FLOW|DBG_REGS, "rd: %08zx -> %08x b[%08x]", addr, data, be);
- return data;
-}
-
-/********** dump routines **********/
-
-static inline void dump_registers (const amb_dev * dev) {
-#ifdef DEBUG_AMBASSADOR
- if (debug & DBG_REGS) {
- size_t i;
- PRINTD (DBG_REGS, "reading PLX control: ");
- for (i = 0x00; i < 0x30; i += sizeof(u32))
- rd_mem (dev, i);
- PRINTD (DBG_REGS, "reading mailboxes: ");
- for (i = 0x40; i < 0x60; i += sizeof(u32))
- rd_mem (dev, i);
- PRINTD (DBG_REGS, "reading doorb irqev irqen reset:");
- for (i = 0x60; i < 0x70; i += sizeof(u32))
- rd_mem (dev, i);
- }
-#else
- (void) dev;
-#endif
- return;
-}
-
-static inline void dump_loader_block (volatile loader_block * lb) {
-#ifdef DEBUG_AMBASSADOR
- unsigned int i;
- PRINTDB (DBG_LOAD, "lb @ %p; res: %d, cmd: %d, pay:",
- lb, be32_to_cpu (lb->result), be32_to_cpu (lb->command));
- for (i = 0; i < MAX_COMMAND_DATA; ++i)
- PRINTDM (DBG_LOAD, " %08x", be32_to_cpu (lb->payload.data[i]));
- PRINTDE (DBG_LOAD, ", vld: %08x", be32_to_cpu (lb->valid));
-#else
- (void) lb;
-#endif
- return;
-}
-
-static inline void dump_command (command * cmd) {
-#ifdef DEBUG_AMBASSADOR
- unsigned int i;
- PRINTDB (DBG_CMD, "cmd @ %p, req: %08x, pars:",
- cmd, /*be32_to_cpu*/ (cmd->request));
- for (i = 0; i < 3; ++i)
- PRINTDM (DBG_CMD, " %08x", /*be32_to_cpu*/ (cmd->args.par[i]));
- PRINTDE (DBG_CMD, "");
-#else
- (void) cmd;
-#endif
- return;
-}
-
-static inline void dump_skb (char * prefix, unsigned int vc, struct sk_buff * skb) {
-#ifdef DEBUG_AMBASSADOR
- unsigned int i;
- unsigned char * data = skb->data;
- PRINTDB (DBG_DATA, "%s(%u) ", prefix, vc);
- for (i=0; i<skb->len && i < 256;i++)
- PRINTDM (DBG_DATA, "%02x ", data[i]);
- PRINTDE (DBG_DATA,"");
-#else
- (void) prefix;
- (void) vc;
- (void) skb;
-#endif
- return;
-}
-
-/********** check memory areas for use by Ambassador **********/
-
-/* see limitations under Hardware Features */
-
-static int check_area (void * start, size_t length) {
- // assumes length > 0
- const u32 fourmegmask = -1 << 22;
- const u32 twofivesixmask = -1 << 8;
- const u32 starthole = 0xE0000000;
- u32 startaddress = virt_to_bus (start);
- u32 lastaddress = startaddress+length-1;
- if ((startaddress ^ lastaddress) & fourmegmask ||
- (startaddress & twofivesixmask) == starthole) {
- PRINTK (KERN_ERR, "check_area failure: [%x,%x] - mail maintainer!",
- startaddress, lastaddress);
- return -1;
- } else {
- return 0;
- }
-}
-
-/********** free an skb (as per ATM device driver documentation) **********/
-
-static void amb_kfree_skb (struct sk_buff * skb) {
- if (ATM_SKB(skb)->vcc->pop) {
- ATM_SKB(skb)->vcc->pop (ATM_SKB(skb)->vcc, skb);
- } else {
- dev_kfree_skb_any (skb);
- }
-}
-
-/********** TX completion **********/
-
-static void tx_complete (amb_dev * dev, tx_out * tx) {
- tx_simple * tx_descr = bus_to_virt (tx->handle);
- struct sk_buff * skb = tx_descr->skb;
-
- PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
-
- // VC layer stats
- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
-
- // free the descriptor
- kfree (tx_descr);
-
- // free the skb
- amb_kfree_skb (skb);
-
- dev->stats.tx_ok++;
- return;
-}
-
-/********** RX completion **********/
-
-static void rx_complete (amb_dev * dev, rx_out * rx) {
- struct sk_buff * skb = bus_to_virt (rx->handle);
- u16 vc = be16_to_cpu (rx->vc);
- // unused: u16 lec_id = be16_to_cpu (rx->lec_id);
- u16 status = be16_to_cpu (rx->status);
- u16 rx_len = be16_to_cpu (rx->length);
-
- PRINTD (DBG_FLOW|DBG_RX, "rx_complete %p %p (len=%hu)", dev, rx, rx_len);
-
- // XXX move this in and add to VC stats ???
- if (!status) {
- struct atm_vcc * atm_vcc = dev->rxer[vc];
- dev->stats.rx.ok++;
-
- if (atm_vcc) {
-
- if (rx_len <= atm_vcc->qos.rxtp.max_sdu) {
-
- if (atm_charge (atm_vcc, skb->truesize)) {
-
- // prepare socket buffer
- ATM_SKB(skb)->vcc = atm_vcc;
- skb_put (skb, rx_len);
-
- dump_skb ("<<<", vc, skb);
-
- // VC layer stats
- atomic_inc(&atm_vcc->stats->rx);
- __net_timestamp(skb);
- // end of our responsibility
- atm_vcc->push (atm_vcc, skb);
- return;
-
- } else {
- // someone fix this (message), please!
- PRINTD (DBG_INFO|DBG_RX, "dropped thanks to atm_charge (vc %hu, truesize %u)", vc, skb->truesize);
- // drop stats incremented in atm_charge
- }
-
- } else {
- PRINTK (KERN_INFO, "dropped over-size frame");
- // should we count this?
- atomic_inc(&atm_vcc->stats->rx_drop);
- }
-
- } else {
- PRINTD (DBG_WARN|DBG_RX, "got frame but RX closed for channel %hu", vc);
- // this is an adapter bug, only in new version of microcode
- }
-
- } else {
- dev->stats.rx.error++;
- if (status & CRC_ERR)
- dev->stats.rx.badcrc++;
- if (status & LEN_ERR)
- dev->stats.rx.toolong++;
- if (status & ABORT_ERR)
- dev->stats.rx.aborted++;
- if (status & UNUSED_ERR)
- dev->stats.rx.unused++;
- }
-
- dev_kfree_skb_any (skb);
- return;
-}
-
-/*
-
- Note on queue handling.
-
- Here "give" and "take" refer to queue entries and a queue (pair)
- rather than frames to or from the host or adapter. Empty frame
- buffers are given to the RX queue pair and returned unused or
- containing RX frames. TX frames (well, pointers to TX fragment
- lists) are given to the TX queue pair, completions are returned.
-
-*/
-
-/********** command queue **********/
-
-// I really don't like this, but it's the best I can do at the moment
-
-// also, the callers are responsible for byte order as the microcode
-// sometimes does 16-bit accesses (yuk yuk yuk)
-
-static int command_do (amb_dev * dev, command * cmd) {
- amb_cq * cq = &dev->cq;
- volatile amb_cq_ptrs * ptrs = &cq->ptrs;
- command * my_slot;
-
- PRINTD (DBG_FLOW|DBG_CMD, "command_do %p", dev);
-
- if (test_bit (dead, &dev->flags))
- return 0;
-
- spin_lock (&cq->lock);
-
- // if not full...
- if (cq->pending < cq->maximum) {
- // remember my slot for later
- my_slot = ptrs->in;
- PRINTD (DBG_CMD, "command in slot %p", my_slot);
-
- dump_command (cmd);
-
- // copy command in
- *ptrs->in = *cmd;
- cq->pending++;
- ptrs->in = NEXTQ (ptrs->in, ptrs->start, ptrs->limit);
-
- // mail the command
- wr_mem (dev, offsetof(amb_mem, mb.adapter.cmd_address), virt_to_bus (ptrs->in));
-
- if (cq->pending > cq->high)
- cq->high = cq->pending;
- spin_unlock (&cq->lock);
-
- // these comments were in a while-loop before, msleep removes the loop
- // go to sleep
- // PRINTD (DBG_CMD, "wait: sleeping %lu for command", timeout);
- msleep(cq->pending);
-
- // wait for my slot to be reached (all waiters are here or above, until...)
- while (ptrs->out != my_slot) {
- PRINTD (DBG_CMD, "wait: command slot (now at %p)", ptrs->out);
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule();
- }
-
- // wait on my slot (... one gets to its slot, and... )
- while (ptrs->out->request != cpu_to_be32 (SRB_COMPLETE)) {
- PRINTD (DBG_CMD, "wait: command slot completion");
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule();
- }
-
- PRINTD (DBG_CMD, "command complete");
- // update queue (... moves the queue along to the next slot)
- spin_lock (&cq->lock);
- cq->pending--;
- // copy command out
- *cmd = *ptrs->out;
- ptrs->out = NEXTQ (ptrs->out, ptrs->start, ptrs->limit);
- spin_unlock (&cq->lock);
-
- return 0;
- } else {
- cq->filled++;
- spin_unlock (&cq->lock);
- return -EAGAIN;
- }
-
-}
-
-/********** TX queue pair **********/
-
-static int tx_give (amb_dev * dev, tx_in * tx) {
- amb_txq * txq = &dev->txq;
- unsigned long flags;
-
- PRINTD (DBG_FLOW|DBG_TX, "tx_give %p", dev);
-
- if (test_bit (dead, &dev->flags))
- return 0;
-
- spin_lock_irqsave (&txq->lock, flags);
-
- if (txq->pending < txq->maximum) {
- PRINTD (DBG_TX, "TX in slot %p", txq->in.ptr);
-
- *txq->in.ptr = *tx;
- txq->pending++;
- txq->in.ptr = NEXTQ (txq->in.ptr, txq->in.start, txq->in.limit);
- // hand over the TX and ring the bell
- wr_mem (dev, offsetof(amb_mem, mb.adapter.tx_address), virt_to_bus (txq->in.ptr));
- wr_mem (dev, offsetof(amb_mem, doorbell), TX_FRAME);
-
- if (txq->pending > txq->high)
- txq->high = txq->pending;
- spin_unlock_irqrestore (&txq->lock, flags);
- return 0;
- } else {
- txq->filled++;
- spin_unlock_irqrestore (&txq->lock, flags);
- return -EAGAIN;
- }
-}
-
-static int tx_take (amb_dev * dev) {
- amb_txq * txq = &dev->txq;
- unsigned long flags;
-
- PRINTD (DBG_FLOW|DBG_TX, "tx_take %p", dev);
-
- spin_lock_irqsave (&txq->lock, flags);
-
- if (txq->pending && txq->out.ptr->handle) {
- // deal with TX completion
- tx_complete (dev, txq->out.ptr);
- // mark unused again
- txq->out.ptr->handle = 0;
- // remove item
- txq->pending--;
- txq->out.ptr = NEXTQ (txq->out.ptr, txq->out.start, txq->out.limit);
-
- spin_unlock_irqrestore (&txq->lock, flags);
- return 0;
- } else {
-
- spin_unlock_irqrestore (&txq->lock, flags);
- return -1;
- }
-}
-
-/********** RX queue pairs **********/
-
-static int rx_give (amb_dev * dev, rx_in * rx, unsigned char pool) {
- amb_rxq * rxq = &dev->rxq[pool];
- unsigned long flags;
-
- PRINTD (DBG_FLOW|DBG_RX, "rx_give %p[%hu]", dev, pool);
-
- spin_lock_irqsave (&rxq->lock, flags);
-
- if (rxq->pending < rxq->maximum) {
- PRINTD (DBG_RX, "RX in slot %p", rxq->in.ptr);
-
- *rxq->in.ptr = *rx;
- rxq->pending++;
- rxq->in.ptr = NEXTQ (rxq->in.ptr, rxq->in.start, rxq->in.limit);
- // hand over the RX buffer
- wr_mem (dev, offsetof(amb_mem, mb.adapter.rx_address[pool]), virt_to_bus (rxq->in.ptr));
-
- spin_unlock_irqrestore (&rxq->lock, flags);
- return 0;
- } else {
- spin_unlock_irqrestore (&rxq->lock, flags);
- return -1;
- }
-}
-
-static int rx_take (amb_dev * dev, unsigned char pool) {
- amb_rxq * rxq = &dev->rxq[pool];
- unsigned long flags;
-
- PRINTD (DBG_FLOW|DBG_RX, "rx_take %p[%hu]", dev, pool);
-
- spin_lock_irqsave (&rxq->lock, flags);
-
- if (rxq->pending && (rxq->out.ptr->status || rxq->out.ptr->length)) {
- // deal with RX completion
- rx_complete (dev, rxq->out.ptr);
- // mark unused again
- rxq->out.ptr->status = 0;
- rxq->out.ptr->length = 0;
- // remove item
- rxq->pending--;
- rxq->out.ptr = NEXTQ (rxq->out.ptr, rxq->out.start, rxq->out.limit);
-
- if (rxq->pending < rxq->low)
- rxq->low = rxq->pending;
- spin_unlock_irqrestore (&rxq->lock, flags);
- return 0;
- } else {
- if (!rxq->pending && rxq->buffers_wanted)
- rxq->emptied++;
- spin_unlock_irqrestore (&rxq->lock, flags);
- return -1;
- }
-}
-
-/********** RX Pool handling **********/
-
-/* pre: buffers_wanted = 0, post: pending = 0 */
-static void drain_rx_pool (amb_dev * dev, unsigned char pool) {
- amb_rxq * rxq = &dev->rxq[pool];
-
- PRINTD (DBG_FLOW|DBG_POOL, "drain_rx_pool %p %hu", dev, pool);
-
- if (test_bit (dead, &dev->flags))
- return;
-
- /* we are not quite like the fill pool routines as we cannot just
- remove one buffer, we have to remove all of them, but we might as
- well pretend... */
- if (rxq->pending > rxq->buffers_wanted) {
- command cmd;
- cmd.request = cpu_to_be32 (SRB_FLUSH_BUFFER_Q);
- cmd.args.flush.flags = cpu_to_be32 (pool << SRB_POOL_SHIFT);
- while (command_do (dev, &cmd))
- schedule();
- /* the pool may also be emptied via the interrupt handler */
- while (rxq->pending > rxq->buffers_wanted)
- if (rx_take (dev, pool))
- schedule();
- }
-
- return;
-}
-
-static void drain_rx_pools (amb_dev * dev) {
- unsigned char pool;
-
- PRINTD (DBG_FLOW|DBG_POOL, "drain_rx_pools %p", dev);
-
- for (pool = 0; pool < NUM_RX_POOLS; ++pool)
- drain_rx_pool (dev, pool);
-}
-
-static void fill_rx_pool (amb_dev * dev, unsigned char pool,
- gfp_t priority)
-{
- rx_in rx;
- amb_rxq * rxq;
-
- PRINTD (DBG_FLOW|DBG_POOL, "fill_rx_pool %p %hu %x", dev, pool, priority);
-
- if (test_bit (dead, &dev->flags))
- return;
-
- rxq = &dev->rxq[pool];
- while (rxq->pending < rxq->maximum && rxq->pending < rxq->buffers_wanted) {
-
- struct sk_buff * skb = alloc_skb (rxq->buffer_size, priority);
- if (!skb) {
- PRINTD (DBG_SKB|DBG_POOL, "failed to allocate skb for RX pool %hu", pool);
- return;
- }
- if (check_area (skb->data, skb->truesize)) {
- dev_kfree_skb_any (skb);
- return;
- }
- // cast needed as there is no %? for pointer differences
- PRINTD (DBG_SKB, "allocated skb at %p, head %p, area %li",
- skb, skb->head, (long) skb_end_offset(skb));
- rx.handle = virt_to_bus (skb);
- rx.host_address = cpu_to_be32 (virt_to_bus (skb->data));
- if (rx_give (dev, &rx, pool))
- dev_kfree_skb_any (skb);
-
- }
-
- return;
-}
-
-// top up all RX pools
-static void fill_rx_pools (amb_dev * dev) {
- unsigned char pool;
-
- PRINTD (DBG_FLOW|DBG_POOL, "fill_rx_pools %p", dev);
-
- for (pool = 0; pool < NUM_RX_POOLS; ++pool)
- fill_rx_pool (dev, pool, GFP_ATOMIC);
-
- return;
-}
-
-/********** enable host interrupts **********/
-
-static void interrupts_on (amb_dev * dev) {
- wr_plain (dev, offsetof(amb_mem, interrupt_control),
- rd_plain (dev, offsetof(amb_mem, interrupt_control))
- | AMB_INTERRUPT_BITS);
-}
-
-/********** disable host interrupts **********/
-
-static void interrupts_off (amb_dev * dev) {
- wr_plain (dev, offsetof(amb_mem, interrupt_control),
- rd_plain (dev, offsetof(amb_mem, interrupt_control))
- &~ AMB_INTERRUPT_BITS);
-}
-
-/********** interrupt handling **********/
-
-static irqreturn_t interrupt_handler(int irq, void *dev_id) {
- amb_dev * dev = dev_id;
-
- PRINTD (DBG_IRQ|DBG_FLOW, "interrupt_handler: %p", dev_id);
-
- {
- u32 interrupt = rd_plain (dev, offsetof(amb_mem, interrupt));
-
- // for us or someone else sharing the same interrupt
- if (!interrupt) {
- PRINTD (DBG_IRQ, "irq not for me: %d", irq);
- return IRQ_NONE;
- }
-
- // definitely for us
- PRINTD (DBG_IRQ, "FYI: interrupt was %08x", interrupt);
- wr_plain (dev, offsetof(amb_mem, interrupt), -1);
- }
-
- {
- unsigned int irq_work = 0;
- unsigned char pool;
- for (pool = 0; pool < NUM_RX_POOLS; ++pool)
- while (!rx_take (dev, pool))
- ++irq_work;
- while (!tx_take (dev))
- ++irq_work;
-
- if (irq_work) {
- fill_rx_pools (dev);
-
- PRINTD (DBG_IRQ, "work done: %u", irq_work);
- } else {
- PRINTD (DBG_IRQ|DBG_WARN, "no work done");
- }
- }
-
- PRINTD (DBG_IRQ|DBG_FLOW, "interrupt_handler done: %p", dev_id);
- return IRQ_HANDLED;
-}
-
-/********** make rate (not quite as much fun as Horizon) **********/
-
-static int make_rate (unsigned int rate, rounding r,
- u16 * bits, unsigned int * actual) {
- unsigned char exp = -1; // hush gcc
- unsigned int man = -1; // hush gcc
-
- PRINTD (DBG_FLOW|DBG_QOS, "make_rate %u", rate);
-
- // rates in cells per second, ITU format (nasty 16-bit floating-point)
- // given 5-bit e and 9-bit m:
- // rate = EITHER (1+m/2^9)*2^e OR 0
- // bits = EITHER 1<<14 | e<<9 | m OR 0
- // (bit 15 is "reserved", bit 14 "non-zero")
- // smallest rate is 0 (special representation)
- // largest rate is (1+511/512)*2^31 = 4290772992 (< 2^32-1)
- // smallest non-zero rate is (1+0/512)*2^0 = 1 (> 0)
- // simple algorithm:
- // find position of top bit, this gives e
- // remove top bit and shift (rounding if feeling clever) by 9-e
-
- // ucode bug: please don't set bit 14! so 0 rate not representable
-
- if (rate > 0xffc00000U) {
- // larger than largest representable rate
-
- if (r == round_up) {
- return -EINVAL;
- } else {
- exp = 31;
- man = 511;
- }
-
- } else if (rate) {
- // representable rate
-
- exp = 31;
- man = rate;
-
- // invariant: rate = man*2^(exp-31)
- while (!(man & (1<<31))) {
- exp = exp - 1;
- man = man<<1;
- }
-
- // man has top bit set
- // rate = (2^31+(man-2^31))*2^(exp-31)
- // rate = (1+(man-2^31)/2^31)*2^exp
- man = man<<1;
- man &= 0xffffffffU; // a nop on 32-bit systems
- // rate = (1+man/2^32)*2^exp
-
- // exp is in the range 0 to 31, man is in the range 0 to 2^32-1
- // time to lose significance... we want m in the range 0 to 2^9-1
- // rounding presents a minor problem... we first decide which way
- // we are rounding (based on given rounding direction and possibly
- // the bits of the mantissa that are to be discarded).
-
- switch (r) {
- case round_down: {
- // just truncate
- man = man>>(32-9);
- break;
- }
- case round_up: {
- // check all bits that we are discarding
- if (man & (~0U>>9)) {
- man = (man>>(32-9)) + 1;
- if (man == (1<<9)) {
- // no need to check for round up outside of range
- man = 0;
- exp += 1;
- }
- } else {
- man = (man>>(32-9));
- }
- break;
- }
- case round_nearest: {
- // check msb that we are discarding
- if (man & (1<<(32-9-1))) {
- man = (man>>(32-9)) + 1;
- if (man == (1<<9)) {
- // no need to check for round up outside of range
- man = 0;
- exp += 1;
- }
- } else {
- man = (man>>(32-9));
- }
- break;
- }
- }
-
- } else {
- // zero rate - not representable
-
- if (r == round_down) {
- return -EINVAL;
- } else {
- exp = 0;
- man = 0;
- }
-
- }
-
- PRINTD (DBG_QOS, "rate: man=%u, exp=%hu", man, exp);
-
- if (bits)
- *bits = /* (1<<14) | */ (exp<<9) | man;
-
- if (actual)
- *actual = (exp >= 9)
- ? (1 << exp) + (man << (exp-9))
- : (1 << exp) + ((man + (1<<(9-exp-1))) >> (9-exp));
-
- return 0;
-}
-
-/********** Linux ATM Operations **********/
-
-// some are not yet implemented while others do not make sense for
-// this device
-
-/********** Open a VC **********/
-
-static int amb_open (struct atm_vcc * atm_vcc)
-{
- int error;
-
- struct atm_qos * qos;
- struct atm_trafprm * txtp;
- struct atm_trafprm * rxtp;
- u16 tx_rate_bits = -1; // hush gcc
- u16 tx_vc_bits = -1; // hush gcc
- u16 tx_frame_bits = -1; // hush gcc
-
- amb_dev * dev = AMB_DEV(atm_vcc->dev);
- amb_vcc * vcc;
- unsigned char pool = -1; // hush gcc
- short vpi = atm_vcc->vpi;
- int vci = atm_vcc->vci;
-
- PRINTD (DBG_FLOW|DBG_VCC, "amb_open %x %x", vpi, vci);
-
-#ifdef ATM_VPI_UNSPEC
- // UNSPEC is deprecated, remove this code eventually
- if (vpi == ATM_VPI_UNSPEC || vci == ATM_VCI_UNSPEC) {
- PRINTK (KERN_WARNING, "rejecting open with unspecified VPI/VCI (deprecated)");
- return -EINVAL;
- }
-#endif
-
- if (!(0 <= vpi && vpi < (1<<NUM_VPI_BITS) &&
- 0 <= vci && vci < (1<<NUM_VCI_BITS))) {
- PRINTD (DBG_WARN|DBG_VCC, "VPI/VCI out of range: %hd/%d", vpi, vci);
- return -EINVAL;
- }
-
- qos = &atm_vcc->qos;
-
- if (qos->aal != ATM_AAL5) {
- PRINTD (DBG_QOS, "AAL not supported");
- return -EINVAL;
- }
-
- // traffic parameters
-
- PRINTD (DBG_QOS, "TX:");
- txtp = &qos->txtp;
- if (txtp->traffic_class != ATM_NONE) {
- switch (txtp->traffic_class) {
- case ATM_UBR: {
- // we take "the PCR" as a rate-cap
- int pcr = atm_pcr_goal (txtp);
- if (!pcr) {
- // no rate cap
- tx_rate_bits = 0;
- tx_vc_bits = TX_UBR;
- tx_frame_bits = TX_FRAME_NOTCAP;
- } else {
- rounding r;
- if (pcr < 0) {
- r = round_down;
- pcr = -pcr;
- } else {
- r = round_up;
- }
- error = make_rate (pcr, r, &tx_rate_bits, NULL);
- if (error)
- return error;
- tx_vc_bits = TX_UBR_CAPPED;
- tx_frame_bits = TX_FRAME_CAPPED;
- }
- break;
- }
-#if 0
- case ATM_ABR: {
- pcr = atm_pcr_goal (txtp);
- PRINTD (DBG_QOS, "pcr goal = %d", pcr);
- break;
- }
-#endif
- default: {
- // PRINTD (DBG_QOS, "request for non-UBR/ABR denied");
- PRINTD (DBG_QOS, "request for non-UBR denied");
- return -EINVAL;
- }
- }
- PRINTD (DBG_QOS, "tx_rate_bits=%hx, tx_vc_bits=%hx",
- tx_rate_bits, tx_vc_bits);
- }
-
- PRINTD (DBG_QOS, "RX:");
- rxtp = &qos->rxtp;
- if (rxtp->traffic_class == ATM_NONE) {
- // do nothing
- } else {
- // choose an RX pool (arranged in increasing size)
- for (pool = 0; pool < NUM_RX_POOLS; ++pool)
- if ((unsigned int) rxtp->max_sdu <= dev->rxq[pool].buffer_size) {
- PRINTD (DBG_VCC|DBG_QOS|DBG_POOL, "chose pool %hu (max_sdu %u <= %u)",
- pool, rxtp->max_sdu, dev->rxq[pool].buffer_size);
- break;
- }
- if (pool == NUM_RX_POOLS) {
- PRINTD (DBG_WARN|DBG_VCC|DBG_QOS|DBG_POOL,
- "no pool suitable for VC (RX max_sdu %d is too large)",
- rxtp->max_sdu);
- return -EINVAL;
- }
-
- switch (rxtp->traffic_class) {
- case ATM_UBR: {
- break;
- }
-#if 0
- case ATM_ABR: {
- pcr = atm_pcr_goal (rxtp);
- PRINTD (DBG_QOS, "pcr goal = %d", pcr);
- break;
- }
-#endif
- default: {
- // PRINTD (DBG_QOS, "request for non-UBR/ABR denied");
- PRINTD (DBG_QOS, "request for non-UBR denied");
- return -EINVAL;
- }
- }
- }
-
- // get space for our vcc stuff
- vcc = kmalloc (sizeof(amb_vcc), GFP_KERNEL);
- if (!vcc) {
- PRINTK (KERN_ERR, "out of memory!");
- return -ENOMEM;
- }
- atm_vcc->dev_data = (void *) vcc;
-
- // no failures beyond this point
-
- // we are not really "immediately before allocating the connection
- // identifier in hardware", but it will just have to do!
- set_bit(ATM_VF_ADDR,&atm_vcc->flags);
-
- if (txtp->traffic_class != ATM_NONE) {
- command cmd;
-
- vcc->tx_frame_bits = tx_frame_bits;
-
- mutex_lock(&dev->vcc_sf);
- if (dev->rxer[vci]) {
- // RXer on the channel already, just modify rate...
- cmd.request = cpu_to_be32 (SRB_MODIFY_VC_RATE);
- cmd.args.modify_rate.vc = cpu_to_be32 (vci); // vpi 0
- cmd.args.modify_rate.rate = cpu_to_be32 (tx_rate_bits << SRB_RATE_SHIFT);
- while (command_do (dev, &cmd))
- schedule();
- // ... and TX flags, preserving the RX pool
- cmd.request = cpu_to_be32 (SRB_MODIFY_VC_FLAGS);
- cmd.args.modify_flags.vc = cpu_to_be32 (vci); // vpi 0
- cmd.args.modify_flags.flags = cpu_to_be32
- ( (AMB_VCC(dev->rxer[vci])->rx_info.pool << SRB_POOL_SHIFT)
- | (tx_vc_bits << SRB_FLAGS_SHIFT) );
- while (command_do (dev, &cmd))
- schedule();
- } else {
- // no RXer on the channel, just open (with pool zero)
- cmd.request = cpu_to_be32 (SRB_OPEN_VC);
- cmd.args.open.vc = cpu_to_be32 (vci); // vpi 0
- cmd.args.open.flags = cpu_to_be32 (tx_vc_bits << SRB_FLAGS_SHIFT);
- cmd.args.open.rate = cpu_to_be32 (tx_rate_bits << SRB_RATE_SHIFT);
- while (command_do (dev, &cmd))
- schedule();
- }
- dev->txer[vci].tx_present = 1;
- mutex_unlock(&dev->vcc_sf);
- }
-
- if (rxtp->traffic_class != ATM_NONE) {
- command cmd;
-
- vcc->rx_info.pool = pool;
-
- mutex_lock(&dev->vcc_sf);
- /* grow RX buffer pool */
- if (!dev->rxq[pool].buffers_wanted)
- dev->rxq[pool].buffers_wanted = rx_lats;
- dev->rxq[pool].buffers_wanted += 1;
- fill_rx_pool (dev, pool, GFP_KERNEL);
-
- if (dev->txer[vci].tx_present) {
- // TXer on the channel already
- // switch (from pool zero) to this pool, preserving the TX bits
- cmd.request = cpu_to_be32 (SRB_MODIFY_VC_FLAGS);
- cmd.args.modify_flags.vc = cpu_to_be32 (vci); // vpi 0
- cmd.args.modify_flags.flags = cpu_to_be32
- ( (pool << SRB_POOL_SHIFT)
- | (dev->txer[vci].tx_vc_bits << SRB_FLAGS_SHIFT) );
- } else {
- // no TXer on the channel, open the VC (with no rate info)
- cmd.request = cpu_to_be32 (SRB_OPEN_VC);
- cmd.args.open.vc = cpu_to_be32 (vci); // vpi 0
- cmd.args.open.flags = cpu_to_be32 (pool << SRB_POOL_SHIFT);
- cmd.args.open.rate = cpu_to_be32 (0);
- }
- while (command_do (dev, &cmd))
- schedule();
- // this link allows RX frames through
- dev->rxer[vci] = atm_vcc;
- mutex_unlock(&dev->vcc_sf);
- }
-
- // indicate readiness
- set_bit(ATM_VF_READY,&atm_vcc->flags);
-
- return 0;
-}
-
-/********** Close a VC **********/
-
-static void amb_close (struct atm_vcc * atm_vcc) {
- amb_dev * dev = AMB_DEV (atm_vcc->dev);
- amb_vcc * vcc = AMB_VCC (atm_vcc);
- u16 vci = atm_vcc->vci;
-
- PRINTD (DBG_VCC|DBG_FLOW, "amb_close");
-
- // indicate unreadiness
- clear_bit(ATM_VF_READY,&atm_vcc->flags);
-
- // disable TXing
- if (atm_vcc->qos.txtp.traffic_class != ATM_NONE) {
- command cmd;
-
- mutex_lock(&dev->vcc_sf);
- if (dev->rxer[vci]) {
- // RXer still on the channel, just modify rate... XXX not really needed
- cmd.request = cpu_to_be32 (SRB_MODIFY_VC_RATE);
- cmd.args.modify_rate.vc = cpu_to_be32 (vci); // vpi 0
- cmd.args.modify_rate.rate = cpu_to_be32 (0);
- // ... and clear TX rate flags (XXX to stop RM cell output?), preserving RX pool
- } else {
- // no RXer on the channel, close channel
- cmd.request = cpu_to_be32 (SRB_CLOSE_VC);
- cmd.args.close.vc = cpu_to_be32 (vci); // vpi 0
- }
- dev->txer[vci].tx_present = 0;
- while (command_do (dev, &cmd))
- schedule();
- mutex_unlock(&dev->vcc_sf);
- }
-
- // disable RXing
- if (atm_vcc->qos.rxtp.traffic_class != ATM_NONE) {
- command cmd;
-
- // this is (the?) one reason why we need the amb_vcc struct
- unsigned char pool = vcc->rx_info.pool;
-
- mutex_lock(&dev->vcc_sf);
- if (dev->txer[vci].tx_present) {
- // TXer still on the channel, just go to pool zero XXX not really needed
- cmd.request = cpu_to_be32 (SRB_MODIFY_VC_FLAGS);
- cmd.args.modify_flags.vc = cpu_to_be32 (vci); // vpi 0
- cmd.args.modify_flags.flags = cpu_to_be32
- (dev->txer[vci].tx_vc_bits << SRB_FLAGS_SHIFT);
- } else {
- // no TXer on the channel, close the VC
- cmd.request = cpu_to_be32 (SRB_CLOSE_VC);
- cmd.args.close.vc = cpu_to_be32 (vci); // vpi 0
- }
- // forget the rxer - no more skbs will be pushed
- if (atm_vcc != dev->rxer[vci])
- PRINTK (KERN_ERR, "%s vcc=%p rxer[vci]=%p",
- "arghhh! we're going to die!",
- vcc, dev->rxer[vci]);
- dev->rxer[vci] = NULL;
- while (command_do (dev, &cmd))
- schedule();
-
- /* shrink RX buffer pool */
- dev->rxq[pool].buffers_wanted -= 1;
- if (dev->rxq[pool].buffers_wanted == rx_lats) {
- dev->rxq[pool].buffers_wanted = 0;
- drain_rx_pool (dev, pool);
- }
- mutex_unlock(&dev->vcc_sf);
- }
-
- // free our structure
- kfree (vcc);
-
- // say the VPI/VCI is free again
- clear_bit(ATM_VF_ADDR,&atm_vcc->flags);
-
- return;
-}
-
-/********** Send **********/
-
-static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
- amb_dev * dev = AMB_DEV(atm_vcc->dev);
- amb_vcc * vcc = AMB_VCC(atm_vcc);
- u16 vc = atm_vcc->vci;
- unsigned int tx_len = skb->len;
- unsigned char * tx_data = skb->data;
- tx_simple * tx_descr;
- tx_in tx;
-
- if (test_bit (dead, &dev->flags))
- return -EIO;
-
- PRINTD (DBG_FLOW|DBG_TX, "amb_send vc %x data %p len %u",
- vc, tx_data, tx_len);
-
- dump_skb (">>>", vc, skb);
-
- if (!dev->txer[vc].tx_present) {
- PRINTK (KERN_ERR, "attempt to send on RX-only VC %x", vc);
- return -EBADFD;
- }
-
- // this is a driver private field so we have to set it ourselves,
- // despite the fact that we are _required_ to use it to check for a
- // pop function
- ATM_SKB(skb)->vcc = atm_vcc;
-
- if (skb->len > (size_t) atm_vcc->qos.txtp.max_sdu) {
- PRINTK (KERN_ERR, "sk_buff length greater than agreed max_sdu, dropping...");
- return -EIO;
- }
-
- if (check_area (skb->data, skb->len)) {
- atomic_inc(&atm_vcc->stats->tx_err);
- return -ENOMEM; // ?
- }
-
- // allocate memory for fragments
- tx_descr = kmalloc (sizeof(tx_simple), GFP_KERNEL);
- if (!tx_descr) {
- PRINTK (KERN_ERR, "could not allocate TX descriptor");
- return -ENOMEM;
- }
- if (check_area (tx_descr, sizeof(tx_simple))) {
- kfree (tx_descr);
- return -ENOMEM;
- }
- PRINTD (DBG_TX, "fragment list allocated at %p", tx_descr);
-
- tx_descr->skb = skb;
-
- tx_descr->tx_frag.bytes = cpu_to_be32 (tx_len);
- tx_descr->tx_frag.address = cpu_to_be32 (virt_to_bus (tx_data));
-
- tx_descr->tx_frag_end.handle = virt_to_bus (tx_descr);
- tx_descr->tx_frag_end.vc = 0;
- tx_descr->tx_frag_end.next_descriptor_length = 0;
- tx_descr->tx_frag_end.next_descriptor = 0;
-#ifdef AMB_NEW_MICROCODE
- tx_descr->tx_frag_end.cpcs_uu = 0;
- tx_descr->tx_frag_end.cpi = 0;
- tx_descr->tx_frag_end.pad = 0;
-#endif
-
- tx.vc = cpu_to_be16 (vcc->tx_frame_bits | vc);
- tx.tx_descr_length = cpu_to_be16 (sizeof(tx_frag)+sizeof(tx_frag_end));
- tx.tx_descr_addr = cpu_to_be32 (virt_to_bus (&tx_descr->tx_frag));
-
- while (tx_give (dev, &tx))
- schedule();
- return 0;
-}
-
-/********** Change QoS on a VC **********/
-
-// int amb_change_qos (struct atm_vcc * atm_vcc, struct atm_qos * qos, int flags);
-
-/********** Free RX Socket Buffer **********/
-
-#if 0
-static void amb_free_rx_skb (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
- amb_dev * dev = AMB_DEV (atm_vcc->dev);
- amb_vcc * vcc = AMB_VCC (atm_vcc);
- unsigned char pool = vcc->rx_info.pool;
- rx_in rx;
-
- // This may be unsafe for various reasons that I cannot really guess
- // at. However, I note that the ATM layer calls kfree_skb rather
- // than dev_kfree_skb at this point so we are least covered as far
- // as buffer locking goes. There may be bugs if pcap clones RX skbs.
-
- PRINTD (DBG_FLOW|DBG_SKB, "amb_rx_free skb %p (atm_vcc %p, vcc %p)",
- skb, atm_vcc, vcc);
-
- rx.handle = virt_to_bus (skb);
- rx.host_address = cpu_to_be32 (virt_to_bus (skb->data));
-
- skb->data = skb->head;
- skb_reset_tail_pointer(skb);
- skb->len = 0;
-
- if (!rx_give (dev, &rx, pool)) {
- // success
- PRINTD (DBG_SKB|DBG_POOL, "recycled skb for pool %hu", pool);
- return;
- }
-
- // just do what the ATM layer would have done
- dev_kfree_skb_any (skb);
-
- return;
-}
-#endif
-
-/********** Proc File Output **********/
-
-static int amb_proc_read (struct atm_dev * atm_dev, loff_t * pos, char * page) {
- amb_dev * dev = AMB_DEV (atm_dev);
- int left = *pos;
- unsigned char pool;
-
- PRINTD (DBG_FLOW, "amb_proc_read");
-
- /* more diagnostics here? */
-
- if (!left--) {
- amb_stats * s = &dev->stats;
- return sprintf (page,
- "frames: TX OK %lu, RX OK %lu, RX bad %lu "
- "(CRC %lu, long %lu, aborted %lu, unused %lu).\n",
- s->tx_ok, s->rx.ok, s->rx.error,
- s->rx.badcrc, s->rx.toolong,
- s->rx.aborted, s->rx.unused);
- }
-
- if (!left--) {
- amb_cq * c = &dev->cq;
- return sprintf (page, "cmd queue [cur/hi/max]: %u/%u/%u. ",
- c->pending, c->high, c->maximum);
- }
-
- if (!left--) {
- amb_txq * t = &dev->txq;
- return sprintf (page, "TX queue [cur/max high full]: %u/%u %u %u.\n",
- t->pending, t->maximum, t->high, t->filled);
- }
-
- if (!left--) {
- unsigned int count = sprintf (page, "RX queues [cur/max/req low empty]:");
- for (pool = 0; pool < NUM_RX_POOLS; ++pool) {
- amb_rxq * r = &dev->rxq[pool];
- count += sprintf (page+count, " %u/%u/%u %u %u",
- r->pending, r->maximum, r->buffers_wanted, r->low, r->emptied);
- }
- count += sprintf (page+count, ".\n");
- return count;
- }
-
- if (!left--) {
- unsigned int count = sprintf (page, "RX buffer sizes:");
- for (pool = 0; pool < NUM_RX_POOLS; ++pool) {
- amb_rxq * r = &dev->rxq[pool];
- count += sprintf (page+count, " %u", r->buffer_size);
- }
- count += sprintf (page+count, ".\n");
- return count;
- }
-
-#if 0
- if (!left--) {
- // suni block etc?
- }
-#endif
-
- return 0;
-}
-
-/********** Operation Structure **********/
-
-static const struct atmdev_ops amb_ops = {
- .open = amb_open,
- .close = amb_close,
- .send = amb_send,
- .proc_read = amb_proc_read,
- .owner = THIS_MODULE,
-};
-
-/********** housekeeping **********/
-static void do_housekeeping (struct timer_list *t) {
- amb_dev * dev = from_timer(dev, t, housekeeping);
-
- // could collect device-specific (not driver/atm-linux) stats here
-
- // last resort refill once every ten seconds
- fill_rx_pools (dev);
- mod_timer(&dev->housekeeping, jiffies + 10*HZ);
-
- return;
-}
-
-/********** creation of communication queues **********/
-
-static int create_queues(amb_dev *dev, unsigned int cmds, unsigned int txs,
- unsigned int *rxs, unsigned int *rx_buffer_sizes)
-{
- unsigned char pool;
- size_t total = 0;
- void * memory;
- void * limit;
-
- PRINTD (DBG_FLOW, "create_queues %p", dev);
-
- total += cmds * sizeof(command);
-
- total += txs * (sizeof(tx_in) + sizeof(tx_out));
-
- for (pool = 0; pool < NUM_RX_POOLS; ++pool)
- total += rxs[pool] * (sizeof(rx_in) + sizeof(rx_out));
-
- memory = kmalloc (total, GFP_KERNEL);
- if (!memory) {
- PRINTK (KERN_ERR, "could not allocate queues");
- return -ENOMEM;
- }
- if (check_area (memory, total)) {
- PRINTK (KERN_ERR, "queues allocated in nasty area");
- kfree (memory);
- return -ENOMEM;
- }
-
- limit = memory + total;
- PRINTD (DBG_INIT, "queues from %p to %p", memory, limit);
-
- PRINTD (DBG_CMD, "command queue at %p", memory);
-
- {
- command * cmd = memory;
- amb_cq * cq = &dev->cq;
-
- cq->pending = 0;
- cq->high = 0;
- cq->maximum = cmds - 1;
-
- cq->ptrs.start = cmd;
- cq->ptrs.in = cmd;
- cq->ptrs.out = cmd;
- cq->ptrs.limit = cmd + cmds;
-
- memory = cq->ptrs.limit;
- }
-
- PRINTD (DBG_TX, "TX queue pair at %p", memory);
-
- {
- tx_in * in = memory;
- tx_out * out;
- amb_txq * txq = &dev->txq;
-
- txq->pending = 0;
- txq->high = 0;
- txq->filled = 0;
- txq->maximum = txs - 1;
-
- txq->in.start = in;
- txq->in.ptr = in;
- txq->in.limit = in + txs;
-
- memory = txq->in.limit;
- out = memory;
-
- txq->out.start = out;
- txq->out.ptr = out;
- txq->out.limit = out + txs;
-
- memory = txq->out.limit;
- }
-
- PRINTD (DBG_RX, "RX queue pairs at %p", memory);
-
- for (pool = 0; pool < NUM_RX_POOLS; ++pool) {
- rx_in * in = memory;
- rx_out * out;
- amb_rxq * rxq = &dev->rxq[pool];
-
- rxq->buffer_size = rx_buffer_sizes[pool];
- rxq->buffers_wanted = 0;
-
- rxq->pending = 0;
- rxq->low = rxs[pool] - 1;
- rxq->emptied = 0;
- rxq->maximum = rxs[pool] - 1;
-
- rxq->in.start = in;
- rxq->in.ptr = in;
- rxq->in.limit = in + rxs[pool];
-
- memory = rxq->in.limit;
- out = memory;
-
- rxq->out.start = out;
- rxq->out.ptr = out;
- rxq->out.limit = out + rxs[pool];
-
- memory = rxq->out.limit;
- }
-
- if (memory == limit) {
- return 0;
- } else {
- PRINTK (KERN_ERR, "bad queue alloc %p != %p (tell maintainer)", memory, limit);
- kfree (limit - total);
- return -ENOMEM;
- }
-
-}
-
-/********** destruction of communication queues **********/
-
-static void destroy_queues (amb_dev * dev) {
- // all queues assumed empty
- void * memory = dev->cq.ptrs.start;
- // includes txq.in, txq.out, rxq[].in and rxq[].out
-
- PRINTD (DBG_FLOW, "destroy_queues %p", dev);
-
- PRINTD (DBG_INIT, "freeing queues at %p", memory);
- kfree (memory);
-
- return;
-}
-
-/********** basic loader commands and error handling **********/
-// centisecond timeouts - guessing away here
-static unsigned int command_timeouts [] = {
- [host_memory_test] = 15,
- [read_adapter_memory] = 2,
- [write_adapter_memory] = 2,
- [adapter_start] = 50,
- [get_version_number] = 10,
- [interrupt_host] = 1,
- [flash_erase_sector] = 1,
- [adap_download_block] = 1,
- [adap_erase_flash] = 1,
- [adap_run_in_iram] = 1,
- [adap_end_download] = 1
-};
-
-
-static unsigned int command_successes [] = {
- [host_memory_test] = COMMAND_PASSED_TEST,
- [read_adapter_memory] = COMMAND_READ_DATA_OK,
- [write_adapter_memory] = COMMAND_WRITE_DATA_OK,
- [adapter_start] = COMMAND_COMPLETE,
- [get_version_number] = COMMAND_COMPLETE,
- [interrupt_host] = COMMAND_COMPLETE,
- [flash_erase_sector] = COMMAND_COMPLETE,
- [adap_download_block] = COMMAND_COMPLETE,
- [adap_erase_flash] = COMMAND_COMPLETE,
- [adap_run_in_iram] = COMMAND_COMPLETE,
- [adap_end_download] = COMMAND_COMPLETE
-};
-
-static int decode_loader_result (loader_command cmd, u32 result)
-{
- int res;
- const char *msg;
-
- if (result == command_successes[cmd])
- return 0;
-
- switch (result) {
- case BAD_COMMAND:
- res = -EINVAL;
- msg = "bad command";
- break;
- case COMMAND_IN_PROGRESS:
- res = -ETIMEDOUT;
- msg = "command in progress";
- break;
- case COMMAND_PASSED_TEST:
- res = 0;
- msg = "command passed test";
- break;
- case COMMAND_FAILED_TEST:
- res = -EIO;
- msg = "command failed test";
- break;
- case COMMAND_READ_DATA_OK:
- res = 0;
- msg = "command read data ok";
- break;
- case COMMAND_READ_BAD_ADDRESS:
- res = -EINVAL;
- msg = "command read bad address";
- break;
- case COMMAND_WRITE_DATA_OK:
- res = 0;
- msg = "command write data ok";
- break;
- case COMMAND_WRITE_BAD_ADDRESS:
- res = -EINVAL;
- msg = "command write bad address";
- break;
- case COMMAND_WRITE_FLASH_FAILURE:
- res = -EIO;
- msg = "command write flash failure";
- break;
- case COMMAND_COMPLETE:
- res = 0;
- msg = "command complete";
- break;
- case COMMAND_FLASH_ERASE_FAILURE:
- res = -EIO;
- msg = "command flash erase failure";
- break;
- case COMMAND_WRITE_BAD_DATA:
- res = -EINVAL;
- msg = "command write bad data";
- break;
- default:
- res = -EINVAL;
- msg = "unknown error";
- PRINTD (DBG_LOAD|DBG_ERR,
- "decode_loader_result got %d=%x !",
- result, result);
- break;
- }
-
- PRINTK (KERN_ERR, "%s", msg);
- return res;
-}
-
-static int do_loader_command(volatile loader_block *lb, const amb_dev *dev,
- loader_command cmd)
-{
-
- unsigned long timeout;
-
- PRINTD (DBG_FLOW|DBG_LOAD, "do_loader_command");
-
- /* do a command
-
- Set the return value to zero, set the command type and set the
- valid entry to the right magic value. The payload is already
- correctly byte-ordered so we leave it alone. Hit the doorbell
- with the bus address of this structure.
-
- */
-
- lb->result = 0;
- lb->command = cpu_to_be32 (cmd);
- lb->valid = cpu_to_be32 (DMA_VALID);
- // dump_registers (dev);
- // dump_loader_block (lb);
- wr_mem (dev, offsetof(amb_mem, doorbell), virt_to_bus (lb) & ~onegigmask);
-
- timeout = command_timeouts[cmd] * 10;
-
- while (!lb->result || lb->result == cpu_to_be32 (COMMAND_IN_PROGRESS))
- if (timeout) {
- timeout = msleep_interruptible(timeout);
- } else {
- PRINTD (DBG_LOAD|DBG_ERR, "command %d timed out", cmd);
- dump_registers (dev);
- dump_loader_block (lb);
- return -ETIMEDOUT;
- }
-
- if (cmd == adapter_start) {
- // wait for start command to acknowledge...
- timeout = 100;
- while (rd_plain (dev, offsetof(amb_mem, doorbell)))
- if (timeout) {
- timeout = msleep_interruptible(timeout);
- } else {
- PRINTD (DBG_LOAD|DBG_ERR, "start command did not clear doorbell, res=%08x",
- be32_to_cpu (lb->result));
- dump_registers (dev);
- return -ETIMEDOUT;
- }
- return 0;
- } else {
- return decode_loader_result (cmd, be32_to_cpu (lb->result));
- }
-
-}
-
-/* loader: determine loader version */
-
-static int get_loader_version(loader_block *lb, const amb_dev *dev,
- u32 *version)
-{
- int res;
-
- PRINTD (DBG_FLOW|DBG_LOAD, "get_loader_version");
-
- res = do_loader_command (lb, dev, get_version_number);
- if (res)
- return res;
- if (version)
- *version = be32_to_cpu (lb->payload.version);
- return 0;
-}
-
-/* loader: write memory data blocks */
-
-static int loader_write(loader_block *lb, const amb_dev *dev,
- const struct ihex_binrec *rec)
-{
- transfer_block * tb = &lb->payload.transfer;
-
- PRINTD (DBG_FLOW|DBG_LOAD, "loader_write");
-
- tb->address = rec->addr;
- tb->count = cpu_to_be32(be16_to_cpu(rec->len) / 4);
- memcpy(tb->data, rec->data, be16_to_cpu(rec->len));
- return do_loader_command (lb, dev, write_adapter_memory);
-}
-
-/* loader: verify memory data blocks */
-
-static int loader_verify(loader_block *lb, const amb_dev *dev,
- const struct ihex_binrec *rec)
-{
- transfer_block * tb = &lb->payload.transfer;
- int res;
-
- PRINTD (DBG_FLOW|DBG_LOAD, "loader_verify");
-
- tb->address = rec->addr;
- tb->count = cpu_to_be32(be16_to_cpu(rec->len) / 4);
- res = do_loader_command (lb, dev, read_adapter_memory);
- if (!res && memcmp(tb->data, rec->data, be16_to_cpu(rec->len)))
- res = -EINVAL;
- return res;
-}
-
-/* loader: start microcode */
-
-static int loader_start(loader_block *lb, const amb_dev *dev, u32 address)
-{
- PRINTD (DBG_FLOW|DBG_LOAD, "loader_start");
-
- lb->payload.start = cpu_to_be32 (address);
- return do_loader_command (lb, dev, adapter_start);
-}
-
-/********** reset card **********/
-
-static inline void sf (const char * msg)
-{
- PRINTK (KERN_ERR, "self-test failed: %s", msg);
-}
-
-static int amb_reset (amb_dev * dev, int diags) {
- u32 word;
-
- PRINTD (DBG_FLOW|DBG_LOAD, "amb_reset");
-
- word = rd_plain (dev, offsetof(amb_mem, reset_control));
- // put card into reset state
- wr_plain (dev, offsetof(amb_mem, reset_control), word | AMB_RESET_BITS);
- // wait a short while
- udelay (10);
-#if 1
- // put card into known good state
- wr_plain (dev, offsetof(amb_mem, interrupt_control), AMB_DOORBELL_BITS);
- // clear all interrupts just in case
- wr_plain (dev, offsetof(amb_mem, interrupt), -1);
-#endif
- // clear self-test done flag
- wr_plain (dev, offsetof(amb_mem, mb.loader.ready), 0);
- // take card out of reset state
- wr_plain (dev, offsetof(amb_mem, reset_control), word &~ AMB_RESET_BITS);
-
- if (diags) {
- unsigned long timeout;
- // 4.2 second wait
- msleep(4200);
- // half second time-out
- timeout = 500;
- while (!rd_plain (dev, offsetof(amb_mem, mb.loader.ready)))
- if (timeout) {
- timeout = msleep_interruptible(timeout);
- } else {
- PRINTD (DBG_LOAD|DBG_ERR, "reset timed out");
- return -ETIMEDOUT;
- }
-
- // get results of self-test
- // XXX double check byte-order
- word = rd_mem (dev, offsetof(amb_mem, mb.loader.result));
- if (word & SELF_TEST_FAILURE) {
- if (word & GPINT_TST_FAILURE)
- sf ("interrupt");
- if (word & SUNI_DATA_PATTERN_FAILURE)
- sf ("SUNI data pattern");
- if (word & SUNI_DATA_BITS_FAILURE)
- sf ("SUNI data bits");
- if (word & SUNI_UTOPIA_FAILURE)
- sf ("SUNI UTOPIA interface");
- if (word & SUNI_FIFO_FAILURE)
- sf ("SUNI cell buffer FIFO");
- if (word & SRAM_FAILURE)
- sf ("bad SRAM");
- // better return value?
- return -EIO;
- }
-
- }
- return 0;
-}
-
-/********** transfer and start the microcode **********/
-
-static int ucode_init(loader_block *lb, amb_dev *dev)
-{
- const struct firmware *fw;
- unsigned long start_address;
- const struct ihex_binrec *rec;
- const char *errmsg = NULL;
- int res;
-
- res = request_ihex_firmware(&fw, "atmsar11.fw", &dev->pci_dev->dev);
- if (res) {
- PRINTK (KERN_ERR, "Cannot load microcode data");
- return res;
- }
-
- /* First record contains just the start address */
- rec = (const struct ihex_binrec *)fw->data;
- if (be16_to_cpu(rec->len) != sizeof(__be32) || be32_to_cpu(rec->addr)) {
- errmsg = "no start record";
- goto fail;
- }
- start_address = be32_to_cpup((__be32 *)rec->data);
-
- rec = ihex_next_binrec(rec);
-
- PRINTD (DBG_FLOW|DBG_LOAD, "ucode_init");
-
- while (rec) {
- PRINTD (DBG_LOAD, "starting region (%x, %u)", be32_to_cpu(rec->addr),
- be16_to_cpu(rec->len));
- if (be16_to_cpu(rec->len) > 4 * MAX_TRANSFER_DATA) {
- errmsg = "record too long";
- goto fail;
- }
- if (be16_to_cpu(rec->len) & 3) {
- errmsg = "odd number of bytes";
- goto fail;
- }
- res = loader_write(lb, dev, rec);
- if (res)
- break;
-
- res = loader_verify(lb, dev, rec);
- if (res)
- break;
- rec = ihex_next_binrec(rec);
- }
- release_firmware(fw);
- if (!res)
- res = loader_start(lb, dev, start_address);
-
- return res;
-fail:
- release_firmware(fw);
- PRINTK(KERN_ERR, "Bad microcode data (%s)", errmsg);
- return -EINVAL;
-}
-
-/********** give adapter parameters **********/
-
-static inline __be32 bus_addr(void * addr) {
- return cpu_to_be32 (virt_to_bus (addr));
-}
-
-static int amb_talk(amb_dev *dev)
-{
- adap_talk_block a;
- unsigned char pool;
- unsigned long timeout;
-
- PRINTD (DBG_FLOW, "amb_talk %p", dev);
-
- a.command_start = bus_addr (dev->cq.ptrs.start);
- a.command_end = bus_addr (dev->cq.ptrs.limit);
- a.tx_start = bus_addr (dev->txq.in.start);
- a.tx_end = bus_addr (dev->txq.in.limit);
- a.txcom_start = bus_addr (dev->txq.out.start);
- a.txcom_end = bus_addr (dev->txq.out.limit);
-
- for (pool = 0; pool < NUM_RX_POOLS; ++pool) {
- // the other "a" items are set up by the adapter
- a.rec_struct[pool].buffer_start = bus_addr (dev->rxq[pool].in.start);
- a.rec_struct[pool].buffer_end = bus_addr (dev->rxq[pool].in.limit);
- a.rec_struct[pool].rx_start = bus_addr (dev->rxq[pool].out.start);
- a.rec_struct[pool].rx_end = bus_addr (dev->rxq[pool].out.limit);
- a.rec_struct[pool].buffer_size = cpu_to_be32 (dev->rxq[pool].buffer_size);
- }
-
-#ifdef AMB_NEW_MICROCODE
- // disable fast PLX prefetching
- a.init_flags = 0;
-#endif
-
- // pass the structure
- wr_mem (dev, offsetof(amb_mem, doorbell), virt_to_bus (&a));
-
- // 2.2 second wait (must not touch doorbell during 2 second DMA test)
- msleep(2200);
- // give the adapter another half second?
- timeout = 500;
- while (rd_plain (dev, offsetof(amb_mem, doorbell)))
- if (timeout) {
- timeout = msleep_interruptible(timeout);
- } else {
- PRINTD (DBG_INIT|DBG_ERR, "adapter init timed out");
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
-// get microcode version
-static void amb_ucode_version(amb_dev *dev)
-{
- u32 major;
- u32 minor;
- command cmd;
- cmd.request = cpu_to_be32 (SRB_GET_VERSION);
- while (command_do (dev, &cmd)) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule();
- }
- major = be32_to_cpu (cmd.args.version.major);
- minor = be32_to_cpu (cmd.args.version.minor);
- PRINTK (KERN_INFO, "microcode version is %u.%u", major, minor);
-}
-
-// get end station address
-static void amb_esi(amb_dev *dev, u8 *esi)
-{
- u32 lower4;
- u16 upper2;
- command cmd;
-
- cmd.request = cpu_to_be32 (SRB_GET_BIA);
- while (command_do (dev, &cmd)) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule();
- }
- lower4 = be32_to_cpu (cmd.args.bia.lower4);
- upper2 = be32_to_cpu (cmd.args.bia.upper2);
- PRINTD (DBG_LOAD, "BIA: lower4: %08x, upper2 %04x", lower4, upper2);
-
- if (esi) {
- unsigned int i;
-
- PRINTDB (DBG_INIT, "ESI:");
- for (i = 0; i < ESI_LEN; ++i) {
- if (i < 4)
- esi[i] = bitrev8(lower4>>(8*i));
- else
- esi[i] = bitrev8(upper2>>(8*(i-4)));
- PRINTDM (DBG_INIT, " %02x", esi[i]);
- }
-
- PRINTDE (DBG_INIT, "");
- }
-
- return;
-}
-
-static void fixup_plx_window (amb_dev *dev, loader_block *lb)
-{
- // fix up the PLX-mapped window base address to match the block
- unsigned long blb;
- u32 mapreg;
- blb = virt_to_bus(lb);
- // the kernel stack had better not ever cross a 1Gb boundary!
- mapreg = rd_plain (dev, offsetof(amb_mem, stuff[10]));
- mapreg &= ~onegigmask;
- mapreg |= blb & onegigmask;
- wr_plain (dev, offsetof(amb_mem, stuff[10]), mapreg);
- return;
-}
-
-static int amb_init(amb_dev *dev)
-{
- loader_block lb;
-
- u32 version;
-
- if (amb_reset (dev, 1)) {
- PRINTK (KERN_ERR, "card reset failed!");
- } else {
- fixup_plx_window (dev, &lb);
-
- if (get_loader_version (&lb, dev, &version)) {
- PRINTK (KERN_INFO, "failed to get loader version");
- } else {
- PRINTK (KERN_INFO, "loader version is %08x", version);
-
- if (ucode_init (&lb, dev)) {
- PRINTK (KERN_ERR, "microcode failure");
- } else if (create_queues (dev, cmds, txs, rxs, rxs_bs)) {
- PRINTK (KERN_ERR, "failed to get memory for queues");
- } else {
-
- if (amb_talk (dev)) {
- PRINTK (KERN_ERR, "adapter did not accept queues");
- } else {
-
- amb_ucode_version (dev);
- return 0;
-
- } /* amb_talk */
-
- destroy_queues (dev);
- } /* create_queues, ucode_init */
-
- amb_reset (dev, 0);
- } /* get_loader_version */
-
- } /* amb_reset */
-
- return -EINVAL;
-}
-
-static void setup_dev(amb_dev *dev, struct pci_dev *pci_dev)
-{
- unsigned char pool;
-
- // set up known dev items straight away
- dev->pci_dev = pci_dev;
- pci_set_drvdata(pci_dev, dev);
-
- dev->iobase = pci_resource_start (pci_dev, 1);
- dev->irq = pci_dev->irq;
- dev->membase = bus_to_virt(pci_resource_start(pci_dev, 0));
-
- // flags (currently only dead)
- dev->flags = 0;
-
- // Allocate cell rates (fibre)
- // ATM_OC3_PCR = 1555200000/8/270*260/53 - 29/53
- // to be really pedantic, this should be ATM_OC3c_PCR
- dev->tx_avail = ATM_OC3_PCR;
- dev->rx_avail = ATM_OC3_PCR;
-
- // semaphore for txer/rxer modifications - we cannot use a
- // spinlock as the critical region needs to switch processes
- mutex_init(&dev->vcc_sf);
- // queue manipulation spinlocks; we want atomic reads and
- // writes to the queue descriptors (handles IRQ and SMP)
- // consider replacing "int pending" -> "atomic_t available"
- // => problem related to who gets to move queue pointers
- spin_lock_init (&dev->cq.lock);
- spin_lock_init (&dev->txq.lock);
- for (pool = 0; pool < NUM_RX_POOLS; ++pool)
- spin_lock_init (&dev->rxq[pool].lock);
-}
-
-static void setup_pci_dev(struct pci_dev *pci_dev)
-{
- unsigned char lat;
-
- // enable bus master accesses
- pci_set_master(pci_dev);
-
- // frobnicate latency (upwards, usually)
- pci_read_config_byte (pci_dev, PCI_LATENCY_TIMER, &lat);
-
- if (!pci_lat)
- pci_lat = (lat < MIN_PCI_LATENCY) ? MIN_PCI_LATENCY : lat;
-
- if (lat != pci_lat) {
- PRINTK (KERN_INFO, "Changing PCI latency timer from %hu to %hu",
- lat, pci_lat);
- pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, pci_lat);
- }
-}
-
-static int amb_probe(struct pci_dev *pci_dev,
- const struct pci_device_id *pci_ent)
-{
- amb_dev * dev;
- int err;
- unsigned int irq;
-
- err = pci_enable_device(pci_dev);
- if (err < 0) {
- PRINTK (KERN_ERR, "skipped broken (PLX rev 2) card");
- goto out;
- }
-
- // read resources from PCI configuration space
- irq = pci_dev->irq;
-
- if (pci_dev->device == PCI_DEVICE_ID_MADGE_AMBASSADOR_BAD) {
- PRINTK (KERN_ERR, "skipped broken (PLX rev 2) card");
- err = -EINVAL;
- goto out_disable;
- }
-
- PRINTD (DBG_INFO, "found Madge ATM adapter (amb) at"
- " IO %llx, IRQ %u, MEM %p",
- (unsigned long long)pci_resource_start(pci_dev, 1),
- irq, bus_to_virt(pci_resource_start(pci_dev, 0)));
-
- // check IO region
- err = pci_request_region(pci_dev, 1, DEV_LABEL);
- if (err < 0) {
- PRINTK (KERN_ERR, "IO range already in use!");
- goto out_disable;
- }
-
- dev = kzalloc(sizeof(amb_dev), GFP_KERNEL);
- if (!dev) {
- PRINTK (KERN_ERR, "out of memory!");
- err = -ENOMEM;
- goto out_release;
- }
-
- setup_dev(dev, pci_dev);
-
- err = amb_init(dev);
- if (err < 0) {
- PRINTK (KERN_ERR, "adapter initialisation failure");
- goto out_free;
- }
-
- setup_pci_dev(pci_dev);
-
- // grab (but share) IRQ and install handler
- err = request_irq(irq, interrupt_handler, IRQF_SHARED, DEV_LABEL, dev);
- if (err < 0) {
- PRINTK (KERN_ERR, "request IRQ failed!");
- goto out_reset;
- }
-
- dev->atm_dev = atm_dev_register (DEV_LABEL, &pci_dev->dev, &amb_ops, -1,
- NULL);
- if (!dev->atm_dev) {
- PRINTD (DBG_ERR, "failed to register Madge ATM adapter");
- err = -EINVAL;
- goto out_free_irq;
- }
-
- PRINTD (DBG_INFO, "registered Madge ATM adapter (no. %d) (%p) at %p",
- dev->atm_dev->number, dev, dev->atm_dev);
- dev->atm_dev->dev_data = (void *) dev;
-
- // register our address
- amb_esi (dev, dev->atm_dev->esi);
-
- // 0 bits for vpi, 10 bits for vci
- dev->atm_dev->ci_range.vpi_bits = NUM_VPI_BITS;
- dev->atm_dev->ci_range.vci_bits = NUM_VCI_BITS;
-
- timer_setup(&dev->housekeeping, do_housekeeping, 0);
- mod_timer(&dev->housekeeping, jiffies);
-
- // enable host interrupts
- interrupts_on (dev);
-
-out:
- return err;
-
-out_free_irq:
- free_irq(irq, dev);
-out_reset:
- amb_reset(dev, 0);
-out_free:
- kfree(dev);
-out_release:
- pci_release_region(pci_dev, 1);
-out_disable:
- pci_disable_device(pci_dev);
- goto out;
-}
-
-
-static void amb_remove_one(struct pci_dev *pci_dev)
-{
- struct amb_dev *dev;
-
- dev = pci_get_drvdata(pci_dev);
-
- PRINTD(DBG_INFO|DBG_INIT, "closing %p (atm_dev = %p)", dev, dev->atm_dev);
- del_timer_sync(&dev->housekeeping);
- // the drain should not be necessary
- drain_rx_pools(dev);
- interrupts_off(dev);
- amb_reset(dev, 0);
- free_irq(dev->irq, dev);
- pci_disable_device(pci_dev);
- destroy_queues(dev);
- atm_dev_deregister(dev->atm_dev);
- kfree(dev);
- pci_release_region(pci_dev, 1);
-}
-
-static void __init amb_check_args (void) {
- unsigned char pool;
- unsigned int max_rx_size;
-
-#ifdef DEBUG_AMBASSADOR
- PRINTK (KERN_NOTICE, "debug bitmap is %hx", debug &= DBG_MASK);
-#else
- if (debug)
- PRINTK (KERN_NOTICE, "no debugging support");
-#endif
-
- if (cmds < MIN_QUEUE_SIZE)
- PRINTK (KERN_NOTICE, "cmds has been raised to %u",
- cmds = MIN_QUEUE_SIZE);
-
- if (txs < MIN_QUEUE_SIZE)
- PRINTK (KERN_NOTICE, "txs has been raised to %u",
- txs = MIN_QUEUE_SIZE);
-
- for (pool = 0; pool < NUM_RX_POOLS; ++pool)
- if (rxs[pool] < MIN_QUEUE_SIZE)
- PRINTK (KERN_NOTICE, "rxs[%hu] has been raised to %u",
- pool, rxs[pool] = MIN_QUEUE_SIZE);
-
- // buffers sizes should be greater than zero and strictly increasing
- max_rx_size = 0;
- for (pool = 0; pool < NUM_RX_POOLS; ++pool)
- if (rxs_bs[pool] <= max_rx_size)
- PRINTK (KERN_NOTICE, "useless pool (rxs_bs[%hu] = %u)",
- pool, rxs_bs[pool]);
- else
- max_rx_size = rxs_bs[pool];
-
- if (rx_lats < MIN_RX_BUFFERS)
- PRINTK (KERN_NOTICE, "rx_lats has been raised to %u",
- rx_lats = MIN_RX_BUFFERS);
-
- return;
-}
-
-/********** module stuff **********/
-
-MODULE_AUTHOR(maintainer_string);
-MODULE_DESCRIPTION(description_string);
-MODULE_LICENSE("GPL");
-MODULE_FIRMWARE("atmsar11.fw");
-module_param(debug, ushort, 0644);
-module_param(cmds, uint, 0);
-module_param(txs, uint, 0);
-module_param_array(rxs, uint, NULL, 0);
-module_param_array(rxs_bs, uint, NULL, 0);
-module_param(rx_lats, uint, 0);
-module_param(pci_lat, byte, 0);
-MODULE_PARM_DESC(debug, "debug bitmap, see .h file");
-MODULE_PARM_DESC(cmds, "number of command queue entries");
-MODULE_PARM_DESC(txs, "number of TX queue entries");
-MODULE_PARM_DESC(rxs, "number of RX queue entries [" __MODULE_STRING(NUM_RX_POOLS) "]");
-MODULE_PARM_DESC(rxs_bs, "size of RX buffers [" __MODULE_STRING(NUM_RX_POOLS) "]");
-MODULE_PARM_DESC(rx_lats, "number of extra buffers to cope with RX latencies");
-MODULE_PARM_DESC(pci_lat, "PCI latency in bus cycles");
-
-/********** module entry **********/
-
-static const struct pci_device_id amb_pci_tbl[] = {
- { PCI_VDEVICE(MADGE, PCI_DEVICE_ID_MADGE_AMBASSADOR), 0 },
- { PCI_VDEVICE(MADGE, PCI_DEVICE_ID_MADGE_AMBASSADOR_BAD), 0 },
- { 0, }
-};
-
-MODULE_DEVICE_TABLE(pci, amb_pci_tbl);
-
-static struct pci_driver amb_driver = {
- .name = "amb",
- .probe = amb_probe,
- .remove = amb_remove_one,
- .id_table = amb_pci_tbl,
-};
-
-static int __init amb_module_init (void)
-{
- PRINTD (DBG_FLOW|DBG_INIT, "init_module");
-
- BUILD_BUG_ON(sizeof(amb_mem) != 4*16 + 4*12);
-
- show_version();
-
- amb_check_args();
-
- // get the juice
- return pci_register_driver(&amb_driver);
-}
-
-/********** module exit **********/
-
-static void __exit amb_module_exit (void)
-{
- PRINTD (DBG_FLOW|DBG_INIT, "cleanup_module");
-
- pci_unregister_driver(&amb_driver);
-}
-
-module_init(amb_module_init);
-module_exit(amb_module_exit);
diff --git a/drivers/atm/ambassador.h b/drivers/atm/ambassador.h
deleted file mode 100644
index 086ceb8568dc..000000000000
--- a/drivers/atm/ambassador.h
+++ /dev/null
@@ -1,648 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- Madge Ambassador ATM Adapter driver.
- Copyright (C) 1995-1999 Madge Networks Ltd.
-
-*/
-
-#ifndef AMBASSADOR_H
-#define AMBASSADOR_H
-
-
-#ifdef CONFIG_ATM_AMBASSADOR_DEBUG
-#define DEBUG_AMBASSADOR
-#endif
-
-#define DEV_LABEL "amb"
-
-#ifndef PCI_VENDOR_ID_MADGE
-#define PCI_VENDOR_ID_MADGE 0x10B6
-#endif
-#ifndef PCI_VENDOR_ID_MADGE_AMBASSADOR
-#define PCI_DEVICE_ID_MADGE_AMBASSADOR 0x1001
-#endif
-#ifndef PCI_VENDOR_ID_MADGE_AMBASSADOR_BAD
-#define PCI_DEVICE_ID_MADGE_AMBASSADOR_BAD 0x1002
-#endif
-
-// diagnostic output
-
-#define PRINTK(severity,format,args...) \
- printk(severity DEV_LABEL ": " format "\n" , ## args)
-
-#ifdef DEBUG_AMBASSADOR
-
-#define DBG_ERR 0x0001
-#define DBG_WARN 0x0002
-#define DBG_INFO 0x0004
-#define DBG_INIT 0x0008
-#define DBG_LOAD 0x0010
-#define DBG_VCC 0x0020
-#define DBG_QOS 0x0040
-#define DBG_CMD 0x0080
-#define DBG_TX 0x0100
-#define DBG_RX 0x0200
-#define DBG_SKB 0x0400
-#define DBG_POOL 0x0800
-#define DBG_IRQ 0x1000
-#define DBG_FLOW 0x2000
-#define DBG_REGS 0x4000
-#define DBG_DATA 0x8000
-#define DBG_MASK 0xffff
-
-/* the ## prevents the annoying double expansion of the macro arguments */
-/* KERN_INFO is used since KERN_DEBUG often does not make it to the console */
-#define PRINTDB(bits,format,args...) \
- ( (debug & (bits)) ? printk (KERN_INFO DEV_LABEL ": " format , ## args) : 1 )
-#define PRINTDM(bits,format,args...) \
- ( (debug & (bits)) ? printk (format , ## args) : 1 )
-#define PRINTDE(bits,format,args...) \
- ( (debug & (bits)) ? printk (format "\n" , ## args) : 1 )
-#define PRINTD(bits,format,args...) \
- ( (debug & (bits)) ? printk (KERN_INFO DEV_LABEL ": " format "\n" , ## args) : 1 )
-
-#else
-
-#define PRINTD(bits,format,args...)
-#define PRINTDB(bits,format,args...)
-#define PRINTDM(bits,format,args...)
-#define PRINTDE(bits,format,args...)
-
-#endif
-
-#define PRINTDD(bits,format,args...)
-#define PRINTDDB(sec,fmt,args...)
-#define PRINTDDM(sec,fmt,args...)
-#define PRINTDDE(sec,fmt,args...)
-
-// tunable values (?)
-
-/* MUST be powers of two -- why ? */
-#define COM_Q_ENTRIES 8
-#define TX_Q_ENTRIES 32
-#define RX_Q_ENTRIES 64
-
-// fixed values
-
-// guessing
-#define AMB_EXTENT 0x80
-
-// Minimum allowed size for an Ambassador queue
-#define MIN_QUEUE_SIZE 2
-
-// Ambassador microcode allows 1 to 4 pools, we use 4 (simpler)
-#define NUM_RX_POOLS 4
-
-// minimum RX buffers required to cope with replenishing delay
-#define MIN_RX_BUFFERS 1
-
-// minimum PCI latency we will tolerate (32 IS TOO SMALL)
-#define MIN_PCI_LATENCY 64 // 255
-
-// VCs supported by card (VPI always 0)
-#define NUM_VPI_BITS 0
-#define NUM_VCI_BITS 10
-#define NUM_VCS 1024
-
-/* The status field bits defined so far. */
-#define RX_ERR 0x8000 // always present if there is an error (hmm)
-#define CRC_ERR 0x4000 // AAL5 CRC error
-#define LEN_ERR 0x2000 // overlength frame
-#define ABORT_ERR 0x1000 // zero length field in received frame
-#define UNUSED_ERR 0x0800 // buffer returned unused
-
-// Adaptor commands
-
-#define SRB_OPEN_VC 0
-/* par_0: dwordswap(VC_number) */
-/* par_1: dwordswap(flags<<16) or wordswap(flags)*/
-/* flags: */
-
-/* LANE: 0x0004 */
-/* NOT_UBR: 0x0008 */
-/* ABR: 0x0010 */
-
-/* RxPool0: 0x0000 */
-/* RxPool1: 0x0020 */
-/* RxPool2: 0x0040 */
-/* RxPool3: 0x0060 */
-
-/* par_2: dwordswap(fp_rate<<16) or wordswap(fp_rate) */
-
-#define SRB_CLOSE_VC 1
-/* par_0: dwordswap(VC_number) */
-
-#define SRB_GET_BIA 2
-/* returns */
-/* par_0: dwordswap(half BIA) */
-/* par_1: dwordswap(half BIA) */
-
-#define SRB_GET_SUNI_STATS 3
-/* par_0: dwordswap(physical_host_address) */
-
-#define SRB_SET_BITS_8 4
-#define SRB_SET_BITS_16 5
-#define SRB_SET_BITS_32 6
-#define SRB_CLEAR_BITS_8 7
-#define SRB_CLEAR_BITS_16 8
-#define SRB_CLEAR_BITS_32 9
-/* par_0: dwordswap(ATMizer address) */
-/* par_1: dwordswap(mask) */
-
-#define SRB_SET_8 10
-#define SRB_SET_16 11
-#define SRB_SET_32 12
-/* par_0: dwordswap(ATMizer address) */
-/* par_1: dwordswap(data) */
-
-#define SRB_GET_32 13
-/* par_0: dwordswap(ATMizer address) */
-/* returns */
-/* par_1: dwordswap(ATMizer data) */
-
-#define SRB_GET_VERSION 14
-/* returns */
-/* par_0: dwordswap(Major Version) */
-/* par_1: dwordswap(Minor Version) */
-
-#define SRB_FLUSH_BUFFER_Q 15
-/* Only flags to define which buffer pool; all others must be zero */
-/* par_0: dwordswap(flags<<16) or wordswap(flags)*/
-
-#define SRB_GET_DMA_SPEEDS 16
-/* returns */
-/* par_0: dwordswap(Read speed (bytes/sec)) */
-/* par_1: dwordswap(Write speed (bytes/sec)) */
-
-#define SRB_MODIFY_VC_RATE 17
-/* par_0: dwordswap(VC_number) */
-/* par_1: dwordswap(fp_rate<<16) or wordswap(fp_rate) */
-
-#define SRB_MODIFY_VC_FLAGS 18
-/* par_0: dwordswap(VC_number) */
-/* par_1: dwordswap(flags<<16) or wordswap(flags)*/
-
-/* flags: */
-
-/* LANE: 0x0004 */
-/* NOT_UBR: 0x0008 */
-/* ABR: 0x0010 */
-
-/* RxPool0: 0x0000 */
-/* RxPool1: 0x0020 */
-/* RxPool2: 0x0040 */
-/* RxPool3: 0x0060 */
-
-#define SRB_RATE_SHIFT 16
-#define SRB_POOL_SHIFT (SRB_FLAGS_SHIFT+5)
-#define SRB_FLAGS_SHIFT 16
-
-#define SRB_STOP_TASKING 19
-#define SRB_START_TASKING 20
-#define SRB_SHUT_DOWN 21
-#define MAX_SRB 21
-
-#define SRB_COMPLETE 0xffffffff
-
-#define TX_FRAME 0x80000000
-
-// number of types of SRB MUST be a power of two -- why?
-#define NUM_OF_SRB 32
-
-// number of bits of period info for rate
-#define MAX_RATE_BITS 6
-
-#define TX_UBR 0x0000
-#define TX_UBR_CAPPED 0x0008
-#define TX_ABR 0x0018
-#define TX_FRAME_NOTCAP 0x0000
-#define TX_FRAME_CAPPED 0x8000
-
-#define FP_155_RATE 0x24b1
-#define FP_25_RATE 0x1f9d
-
-/* #define VERSION_NUMBER 0x01000000 // initial release */
-/* #define VERSION_NUMBER 0x01010000 // fixed startup probs PLX MB0 not cleared */
-/* #define VERSION_NUMBER 0x01020000 // changed SUNI reset timings; allowed r/w onchip */
-
-/* #define VERSION_NUMBER 0x01030000 // clear local doorbell int reg on reset */
-/* #define VERSION_NUMBER 0x01040000 // PLX bug work around version PLUS */
-/* remove race conditions on basic interface */
-/* indicate to the host that diagnostics */
-/* have finished; if failed, how and what */
-/* failed */
-/* fix host memory test to fix PLX bug */
-/* allow flash upgrade and BIA upgrade directly */
-/* */
-#define VERSION_NUMBER 0x01050025 /* Jason's first hacked version. */
-/* Change in download algorithm */
-
-#define DMA_VALID 0xb728e149 /* completely random */
-
-#define FLASH_BASE 0xa0c00000
-#define FLASH_SIZE 0x00020000 /* 128K */
-#define BIA_BASE (FLASH_BASE+0x0001c000) /* Flash Sector 7 */
-#define BIA_ADDRESS ((void *)0xa0c1c000)
-#define PLX_BASE 0xe0000000
-
-typedef enum {
- host_memory_test = 1,
- read_adapter_memory,
- write_adapter_memory,
- adapter_start,
- get_version_number,
- interrupt_host,
- flash_erase_sector,
- adap_download_block = 0x20,
- adap_erase_flash,
- adap_run_in_iram,
- adap_end_download
-} loader_command;
-
-#define BAD_COMMAND (-1)
-#define COMMAND_IN_PROGRESS 1
-#define COMMAND_PASSED_TEST 2
-#define COMMAND_FAILED_TEST 3
-#define COMMAND_READ_DATA_OK 4
-#define COMMAND_READ_BAD_ADDRESS 5
-#define COMMAND_WRITE_DATA_OK 6
-#define COMMAND_WRITE_BAD_ADDRESS 7
-#define COMMAND_WRITE_FLASH_FAILURE 8
-#define COMMAND_COMPLETE 9
-#define COMMAND_FLASH_ERASE_FAILURE 10
-#define COMMAND_WRITE_BAD_DATA 11
-
-/* bit fields for mailbox[0] return values */
-
-#define GPINT_TST_FAILURE 0x00000001
-#define SUNI_DATA_PATTERN_FAILURE 0x00000002
-#define SUNI_DATA_BITS_FAILURE 0x00000004
-#define SUNI_UTOPIA_FAILURE 0x00000008
-#define SUNI_FIFO_FAILURE 0x00000010
-#define SRAM_FAILURE 0x00000020
-#define SELF_TEST_FAILURE 0x0000003f
-
-/* mailbox[1] = 0 in progress, -1 on completion */
-/* mailbox[2] = current test 00 00 test(8 bit) phase(8 bit) */
-/* mailbox[3] = last failure, 00 00 test(8 bit) phase(8 bit) */
-/* mailbox[4],mailbox[5],mailbox[6] random failure values */
-
-/* PLX/etc. memory map including command structure */
-
-/* These registers may also be memory mapped in PCI memory */
-
-#define UNUSED_LOADER_MAILBOXES 6
-
-typedef struct {
- u32 stuff[16];
- union {
- struct {
- u32 result;
- u32 ready;
- u32 stuff[UNUSED_LOADER_MAILBOXES];
- } loader;
- struct {
- u32 cmd_address;
- u32 tx_address;
- u32 rx_address[NUM_RX_POOLS];
- u32 gen_counter;
- u32 spare;
- } adapter;
- } mb;
- u32 doorbell;
- u32 interrupt;
- u32 interrupt_control;
- u32 reset_control;
-} amb_mem;
-
-/* RESET bit, IRQ (card to host) and doorbell (host to card) enable bits */
-#define AMB_RESET_BITS 0x40000000
-#define AMB_INTERRUPT_BITS 0x00000300
-#define AMB_DOORBELL_BITS 0x00030000
-
-/* loader commands */
-
-#define MAX_COMMAND_DATA 13
-#define MAX_TRANSFER_DATA 11
-
-typedef struct {
- __be32 address;
- __be32 count;
- __be32 data[MAX_TRANSFER_DATA];
-} transfer_block;
-
-typedef struct {
- __be32 result;
- __be32 command;
- union {
- transfer_block transfer;
- __be32 version;
- __be32 start;
- __be32 data[MAX_COMMAND_DATA];
- } payload;
- __be32 valid;
-} loader_block;
-
-/* command queue */
-
-/* Again all data are BIG ENDIAN */
-
-typedef struct {
- union {
- struct {
- __be32 vc;
- __be32 flags;
- __be32 rate;
- } open;
- struct {
- __be32 vc;
- __be32 rate;
- } modify_rate;
- struct {
- __be32 vc;
- __be32 flags;
- } modify_flags;
- struct {
- __be32 vc;
- } close;
- struct {
- __be32 lower4;
- __be32 upper2;
- } bia;
- struct {
- __be32 address;
- } suni;
- struct {
- __be32 major;
- __be32 minor;
- } version;
- struct {
- __be32 read;
- __be32 write;
- } speed;
- struct {
- __be32 flags;
- } flush;
- struct {
- __be32 address;
- __be32 data;
- } memory;
- __be32 par[3];
- } args;
- __be32 request;
-} command;
-
-/* transmit queues and associated structures */
-
-/* The hosts transmit structure. All BIG ENDIAN; host address
- restricted to first 1GByte, but address passed to the card must
- have the top MS bit or'ed in. -- check this */
-
-/* TX is described by 1+ tx_frags followed by a tx_frag_end */
-
-typedef struct {
- __be32 bytes;
- __be32 address;
-} tx_frag;
-
-/* apart from handle the fields here are for the adapter to play with
- and should be set to zero */
-
-typedef struct {
- u32 handle;
- u16 vc;
- u16 next_descriptor_length;
- u32 next_descriptor;
-#ifdef AMB_NEW_MICROCODE
- u8 cpcs_uu;
- u8 cpi;
- u16 pad;
-#endif
-} tx_frag_end;
-
-typedef struct {
- tx_frag tx_frag;
- tx_frag_end tx_frag_end;
- struct sk_buff * skb;
-} tx_simple;
-
-#if 0
-typedef union {
- tx_frag fragment;
- tx_frag_end end_of_list;
-} tx_descr;
-#endif
-
-/* this "points" to the sequence of fragments and trailer */
-
-typedef struct {
- __be16 vc;
- __be16 tx_descr_length;
- __be32 tx_descr_addr;
-} tx_in;
-
-/* handle is the handle from tx_in */
-
-typedef struct {
- u32 handle;
-} tx_out;
-
-/* receive frame structure */
-
-/* All BIG ENDIAN; handle is as passed from host; length is zero for
- aborted frames, and frames with errors. Header is actually VC
- number, lec-id is NOT yet supported. */
-
-typedef struct {
- u32 handle;
- __be16 vc;
- __be16 lec_id; // unused
- __be16 status;
- __be16 length;
-} rx_out;
-
-/* buffer supply structure */
-
-typedef struct {
- u32 handle;
- __be32 host_address;
-} rx_in;
-
-/* This first structure is the area in host memory where the adapter
- writes its pointer values. These pointer values are BIG ENDIAN and
- reside in the same 4MB 'page' as this structure. The host gives the
- adapter the address of this block by sending a doorbell interrupt
- to the adapter after downloading the code and setting it going. The
- addresses have the top 10 bits set to 1010000010b -- really?
-
- The host must initialise these before handing the block to the
- adapter. */
-
-typedef struct {
- __be32 command_start; /* SRB commands completions */
- __be32 command_end; /* SRB commands completions */
- __be32 tx_start;
- __be32 tx_end;
- __be32 txcom_start; /* tx completions */
- __be32 txcom_end; /* tx completions */
- struct {
- __be32 buffer_start;
- __be32 buffer_end;
- u32 buffer_q_get;
- u32 buffer_q_end;
- u32 buffer_aptr;
- __be32 rx_start; /* rx completions */
- __be32 rx_end;
- u32 rx_ptr;
- __be32 buffer_size; /* size of host buffer */
- } rec_struct[NUM_RX_POOLS];
-#ifdef AMB_NEW_MICROCODE
- u16 init_flags;
- u16 talk_block_spare;
-#endif
-} adap_talk_block;
-
-/* This structure must be kept in line with the vcr image in sarmain.h
-
- This is the structure in the host filled in by the adapter by
- GET_SUNI_STATS */
-
-typedef struct {
- u8 racp_chcs;
- u8 racp_uhcs;
- u16 spare;
- u32 racp_rcell;
- u32 tacp_tcell;
- u32 flags;
- u32 dropped_cells;
- u32 dropped_frames;
-} suni_stats;
-
-typedef enum {
- dead
-} amb_flags;
-
-#define NEXTQ(current,start,limit) \
- ( (current)+1 < (limit) ? (current)+1 : (start) )
-
-typedef struct {
- command * start;
- command * in;
- command * out;
- command * limit;
-} amb_cq_ptrs;
-
-typedef struct {
- spinlock_t lock;
- unsigned int pending;
- unsigned int high;
- unsigned int filled;
- unsigned int maximum; // size - 1 (q implementation)
- amb_cq_ptrs ptrs;
-} amb_cq;
-
-typedef struct {
- spinlock_t lock;
- unsigned int pending;
- unsigned int high;
- unsigned int filled;
- unsigned int maximum; // size - 1 (q implementation)
- struct {
- tx_in * start;
- tx_in * ptr;
- tx_in * limit;
- } in;
- struct {
- tx_out * start;
- tx_out * ptr;
- tx_out * limit;
- } out;
-} amb_txq;
-
-typedef struct {
- spinlock_t lock;
- unsigned int pending;
- unsigned int low;
- unsigned int emptied;
- unsigned int maximum; // size - 1 (q implementation)
- struct {
- rx_in * start;
- rx_in * ptr;
- rx_in * limit;
- } in;
- struct {
- rx_out * start;
- rx_out * ptr;
- rx_out * limit;
- } out;
- unsigned int buffers_wanted;
- unsigned int buffer_size;
-} amb_rxq;
-
-typedef struct {
- unsigned long tx_ok;
- struct {
- unsigned long ok;
- unsigned long error;
- unsigned long badcrc;
- unsigned long toolong;
- unsigned long aborted;
- unsigned long unused;
- } rx;
-} amb_stats;
-
-// a single struct pointed to by atm_vcc->dev_data
-
-typedef struct {
- u8 tx_vc_bits:7;
- u8 tx_present:1;
-} amb_tx_info;
-
-typedef struct {
- unsigned char pool;
-} amb_rx_info;
-
-typedef struct {
- amb_rx_info rx_info;
- u16 tx_frame_bits;
- unsigned int tx_rate;
- unsigned int rx_rate;
-} amb_vcc;
-
-struct amb_dev {
- u8 irq;
- unsigned long flags;
- u32 iobase;
- u32 * membase;
-
- amb_cq cq;
- amb_txq txq;
- amb_rxq rxq[NUM_RX_POOLS];
-
- struct mutex vcc_sf;
- amb_tx_info txer[NUM_VCS];
- struct atm_vcc * rxer[NUM_VCS];
- unsigned int tx_avail;
- unsigned int rx_avail;
-
- amb_stats stats;
-
- struct atm_dev * atm_dev;
- struct pci_dev * pci_dev;
- struct timer_list housekeeping;
-};
-
-typedef struct amb_dev amb_dev;
-
-#define AMB_DEV(atm_dev) ((amb_dev *) (atm_dev)->dev_data)
-#define AMB_VCC(atm_vcc) ((amb_vcc *) (atm_vcc)->dev_data)
-
-/* rate rounding */
-
-typedef enum {
- round_up,
- round_down,
- round_nearest
-} rounding;
-
-#endif
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
deleted file mode 100644
index 4f67404fe64c..000000000000
--- a/drivers/atm/firestream.c
+++ /dev/null
@@ -1,2057 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-/* drivers/atm/firestream.c - FireStream 155 (MB86697) and
- * FireStream 50 (MB86695) device driver
- */
-
-/* Written & (C) 2000 by R.E.Wolff@BitWizard.nl
- * Copied snippets from zatm.c by Werner Almesberger, EPFL LRC/ICA
- * and ambassador.c Copyright (C) 1995-1999 Madge Networks Ltd
- */
-
-/*
-*/
-
-
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/pci.h>
-#include <linux/poison.h>
-#include <linux/errno.h>
-#include <linux/atm.h>
-#include <linux/atmdev.h>
-#include <linux/sonet.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/delay.h>
-#include <linux/ioport.h> /* for request_region */
-#include <linux/uio.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/capability.h>
-#include <linux/bitops.h>
-#include <linux/slab.h>
-#include <asm/byteorder.h>
-#include <asm/string.h>
-#include <asm/io.h>
-#include <linux/atomic.h>
-#include <linux/uaccess.h>
-#include <linux/wait.h>
-
-#include "firestream.h"
-
-static int loopback = 0;
-static int num=0x5a;
-
-/* According to measurements (but they look suspicious to me!) done in
- * '97, 37% of the packets are one cell in size. So it pays to have
- * buffers allocated at that size. A large jump in percentage of
- * packets occurs at packets around 536 bytes in length. So it also
- * pays to have those pre-allocated. Unfortunately, we can't fully
- * take advantage of this as the majority of the packets is likely to
- * be TCP/IP (As where obviously the measurement comes from) There the
- * link would be opened with say a 1500 byte MTU, and we can't handle
- * smaller buffers more efficiently than the larger ones. -- REW
- */
-
-/* Due to the way Linux memory management works, specifying "576" as
- * an allocation size here isn't going to help. They are allocated
- * from 1024-byte regions anyway. With the size of the sk_buffs (quite
- * large), it doesn't pay to allocate the smallest size (64) -- REW */
-
-/* This is all guesswork. Hard numbers to back this up or disprove this,
- * are appreciated. -- REW */
-
-/* The last entry should be about 64k. However, the "buffer size" is
- * passed to the chip in a 16 bit field. I don't know how "65536"
- * would be interpreted. -- REW */
-
-#define NP FS_NR_FREE_POOLS
-static int rx_buf_sizes[NP] = {128, 256, 512, 1024, 2048, 4096, 16384, 65520};
-/* log2: 7 8 9 10 11 12 14 16 */
-
-#if 0
-static int rx_pool_sizes[NP] = {1024, 1024, 512, 256, 128, 64, 32, 32};
-#else
-/* debug */
-static int rx_pool_sizes[NP] = {128, 128, 128, 64, 64, 64, 32, 32};
-#endif
-/* log2: 10 10 9 8 7 6 5 5 */
-/* sumlog2: 17 18 18 18 18 18 19 21 */
-/* mem allocated: 128k 256k 256k 256k 256k 256k 512k 2M */
-/* tot mem: almost 4M */
-
-/* NP is shorter, so that it fits on a single line. */
-#undef NP
-
-
-/* Small hardware gotcha:
-
- The FS50 CAM (VP/VC match registers) always take the lowest channel
- number that matches. This is not a problem.
-
- However, they also ignore whether the channel is enabled or
- not. This means that if you allocate channel 0 to 1.2 and then
- channel 1 to 0.0, then disabeling channel 0 and writing 0 to the
- match channel for channel 0 will "steal" the traffic from channel
- 1, even if you correctly disable channel 0.
-
- Workaround:
-
- - When disabling channels, write an invalid VP/VC value to the
- match register. (We use 0xffffffff, which in the worst case
- matches VP/VC = <maxVP>/<maxVC>, but I expect it not to match
- anything as some "when not in use, program to 0" bits are now
- programmed to 1...)
-
- - Don't initialize the match registers to 0, as 0.0 is a valid
- channel.
-*/
-
-
-/* Optimization hints and tips.
-
- The FireStream chips are very capable of reducing the amount of
- "interrupt-traffic" for the CPU. This driver requests an interrupt on EVERY
- action. You could try to minimize this a bit.
-
- Besides that, the userspace->kernel copy and the PCI bus are the
- performance limiting issues for this driver.
-
- You could queue up a bunch of outgoing packets without telling the
- FireStream. I'm not sure that's going to win you much though. The
- Linux layer won't tell us in advance when it's not going to give us
- any more packets in a while. So this is tricky to implement right without
- introducing extra delays.
-
- -- REW
- */
-
-
-
-
-/* The strings that define what the RX queue entry is all about. */
-/* Fujitsu: Please tell me which ones can have a pointer to a
- freepool descriptor! */
-static char *res_strings[] = {
- "RX OK: streaming not EOP",
- "RX OK: streaming EOP",
- "RX OK: Single buffer packet",
- "RX OK: packet mode",
- "RX OK: F4 OAM (end to end)",
- "RX OK: F4 OAM (Segment)",
- "RX OK: F5 OAM (end to end)",
- "RX OK: F5 OAM (Segment)",
- "RX OK: RM cell",
- "RX OK: TRANSP cell",
- "RX OK: TRANSPC cell",
- "Unmatched cell",
- "reserved 12",
- "reserved 13",
- "reserved 14",
- "Unrecognized cell",
- "reserved 16",
- "reassembly abort: AAL5 abort",
- "packet purged",
- "packet ageing timeout",
- "channel ageing timeout",
- "calculated length error",
- "programmed length limit error",
- "aal5 crc32 error",
- "oam transp or transpc crc10 error",
- "reserved 25",
- "reserved 26",
- "reserved 27",
- "reserved 28",
- "reserved 29",
- "reserved 30", /* FIXME: The strings between 30-40 might be wrong. */
- "reassembly abort: no buffers",
- "receive buffer overflow",
- "change in GFC",
- "receive buffer full",
- "low priority discard - no receive descriptor",
- "low priority discard - missing end of packet",
- "reserved 37",
- "reserved 38",
- "reserved 39",
- "reserved 40",
- "reserved 41",
- "reserved 42",
- "reserved 43",
- "reserved 44",
- "reserved 45",
- "reserved 46",
- "reserved 47",
- "reserved 48",
- "reserved 49",
- "reserved 50",
- "reserved 51",
- "reserved 52",
- "reserved 53",
- "reserved 54",
- "reserved 55",
- "reserved 56",
- "reserved 57",
- "reserved 58",
- "reserved 59",
- "reserved 60",
- "reserved 61",
- "reserved 62",
- "reserved 63",
-};
-
-static char *irq_bitname[] = {
- "LPCO",
- "DPCO",
- "RBRQ0_W",
- "RBRQ1_W",
- "RBRQ2_W",
- "RBRQ3_W",
- "RBRQ0_NF",
- "RBRQ1_NF",
- "RBRQ2_NF",
- "RBRQ3_NF",
- "BFP_SC",
- "INIT",
- "INIT_ERR",
- "USCEO",
- "UPEC0",
- "VPFCO",
- "CRCCO",
- "HECO",
- "TBRQ_W",
- "TBRQ_NF",
- "CTPQ_E",
- "GFC_C0",
- "PCI_FTL",
- "CSQ_W",
- "CSQ_NF",
- "EXT_INT",
- "RXDMA_S"
-};
-
-
-#define PHY_EOF -1
-#define PHY_CLEARALL -2
-
-struct reginit_item {
- int reg, val;
-};
-
-
-static struct reginit_item PHY_NTC_INIT[] = {
- { PHY_CLEARALL, 0x40 },
- { 0x12, 0x0001 },
- { 0x13, 0x7605 },
- { 0x1A, 0x0001 },
- { 0x1B, 0x0005 },
- { 0x38, 0x0003 },
- { 0x39, 0x0006 }, /* changed here to make loopback */
- { 0x01, 0x5262 },
- { 0x15, 0x0213 },
- { 0x00, 0x0003 },
- { PHY_EOF, 0}, /* -1 signals end of list */
-};
-
-
-/* Safetyfeature: If the card interrupts more than this number of times
- in a jiffy (1/100th of a second) then we just disable the interrupt and
- print a message. This prevents the system from hanging.
-
- 150000 packets per second is close to the limit a PC is going to have
- anyway. We therefore have to disable this for production. -- REW */
-#undef IRQ_RATE_LIMIT // 100
-
-/* Interrupts work now. Unlike serial cards, ATM cards don't work all
- that great without interrupts. -- REW */
-#undef FS_POLL_FREQ // 100
-
-/*
- This driver can spew a whole lot of debugging output at you. If you
- need maximum performance, you should disable the DEBUG define. To
- aid in debugging in the field, I'm leaving the compile-time debug
- features enabled, and disable them "runtime". That allows me to
- instruct people with problems to enable debugging without requiring
- them to recompile... -- REW
-*/
-#define DEBUG
-
-#ifdef DEBUG
-#define fs_dprintk(f, str...) if (fs_debug & f) printk (str)
-#else
-#define fs_dprintk(f, str...) /* nothing */
-#endif
-
-
-static int fs_keystream = 0;
-
-#ifdef DEBUG
-/* I didn't forget to set this to zero before shipping. Hit me with a stick
- if you get this with the debug default not set to zero again. -- REW */
-static int fs_debug = 0;
-#else
-#define fs_debug 0
-#endif
-
-#ifdef MODULE
-#ifdef DEBUG
-module_param(fs_debug, int, 0644);
-#endif
-module_param(loopback, int, 0);
-module_param(num, int, 0);
-module_param(fs_keystream, int, 0);
-/* XXX Add rx_buf_sizes, and rx_pool_sizes As per request Amar. -- REW */
-#endif
-
-
-#define FS_DEBUG_FLOW 0x00000001
-#define FS_DEBUG_OPEN 0x00000002
-#define FS_DEBUG_QUEUE 0x00000004
-#define FS_DEBUG_IRQ 0x00000008
-#define FS_DEBUG_INIT 0x00000010
-#define FS_DEBUG_SEND 0x00000020
-#define FS_DEBUG_PHY 0x00000040
-#define FS_DEBUG_CLEANUP 0x00000080
-#define FS_DEBUG_QOS 0x00000100
-#define FS_DEBUG_TXQ 0x00000200
-#define FS_DEBUG_ALLOC 0x00000400
-#define FS_DEBUG_TXMEM 0x00000800
-#define FS_DEBUG_QSIZE 0x00001000
-
-
-#define func_enter() fs_dprintk(FS_DEBUG_FLOW, "fs: enter %s\n", __func__)
-#define func_exit() fs_dprintk(FS_DEBUG_FLOW, "fs: exit %s\n", __func__)
-
-
-static struct fs_dev *fs_boards = NULL;
-
-#ifdef DEBUG
-
-static void my_hd (void *addr, int len)
-{
- int j, ch;
- unsigned char *ptr = addr;
-
- while (len > 0) {
- printk ("%p ", ptr);
- for (j=0;j < ((len < 16)?len:16);j++) {
- printk ("%02x %s", ptr[j], (j==7)?" ":"");
- }
- for ( ;j < 16;j++) {
- printk (" %s", (j==7)?" ":"");
- }
- for (j=0;j < ((len < 16)?len:16);j++) {
- ch = ptr[j];
- printk ("%c", (ch < 0x20)?'.':((ch > 0x7f)?'.':ch));
- }
- printk ("\n");
- ptr += 16;
- len -= 16;
- }
-}
-#else /* DEBUG */
-static void my_hd (void *addr, int len){}
-#endif /* DEBUG */
-
-/********** free an skb (as per ATM device driver documentation) **********/
-
-/* Hmm. If this is ATM specific, why isn't there an ATM routine for this?
- * I copied it over from the ambassador driver. -- REW */
-
-static inline void fs_kfree_skb (struct sk_buff * skb)
-{
- if (ATM_SKB(skb)->vcc->pop)
- ATM_SKB(skb)->vcc->pop (ATM_SKB(skb)->vcc, skb);
- else
- dev_kfree_skb_any (skb);
-}
-
-
-
-
-/* It seems the ATM forum recommends this horribly complicated 16bit
- * floating point format. Turns out the Ambassador uses the exact same
- * encoding. I just copied it over. If Mitch agrees, I'll move it over
- * to the atm_misc file or something like that. (and remove it from
- * here and the ambassador driver) -- REW
- */
-
-/* The good thing about this format is that it is monotonic. So,
- a conversion routine need not be very complicated. To be able to
- round "nearest" we need to take along a few extra bits. Lets
- put these after 16 bits, so that we can just return the top 16
- bits of the 32bit number as the result:
-
- int mr (unsigned int rate, int r)
- {
- int e = 16+9;
- static int round[4]={0, 0, 0xffff, 0x8000};
- if (!rate) return 0;
- while (rate & 0xfc000000) {
- rate >>= 1;
- e++;
- }
- while (! (rate & 0xfe000000)) {
- rate <<= 1;
- e--;
- }
-
-// Now the mantissa is in positions bit 16-25. Excepf for the "hidden 1" that's in bit 26.
- rate &= ~0x02000000;
-// Next add in the exponent
- rate |= e << (16+9);
-// And perform the rounding:
- return (rate + round[r]) >> 16;
- }
-
- 14 lines-of-code. Compare that with the 120 that the Ambassador
- guys needed. (would be 8 lines shorter if I'd try to really reduce
- the number of lines:
-
- int mr (unsigned int rate, int r)
- {
- int e = 16+9;
- static int round[4]={0, 0, 0xffff, 0x8000};
- if (!rate) return 0;
- for (; rate & 0xfc000000 ;rate >>= 1, e++);
- for (;!(rate & 0xfe000000);rate <<= 1, e--);
- return ((rate & ~0x02000000) | (e << (16+9)) + round[r]) >> 16;
- }
-
- Exercise for the reader: Remove one more line-of-code, without
- cheating. (Just joining two lines is cheating). (I know it's
- possible, don't think you've beat me if you found it... If you
- manage to lose two lines or more, keep me updated! ;-)
-
- -- REW */
-
-
-#define ROUND_UP 1
-#define ROUND_DOWN 2
-#define ROUND_NEAREST 3
-/********** make rate (not quite as much fun as Horizon) **********/
-
-static int make_rate(unsigned int rate, int r,
- u16 *bits, unsigned int *actual)
-{
- unsigned char exp = -1; /* hush gcc */
- unsigned int man = -1; /* hush gcc */
-
- fs_dprintk (FS_DEBUG_QOS, "make_rate %u", rate);
-
- /* rates in cells per second, ITU format (nasty 16-bit floating-point)
- given 5-bit e and 9-bit m:
- rate = EITHER (1+m/2^9)*2^e OR 0
- bits = EITHER 1<<14 | e<<9 | m OR 0
- (bit 15 is "reserved", bit 14 "non-zero")
- smallest rate is 0 (special representation)
- largest rate is (1+511/512)*2^31 = 4290772992 (< 2^32-1)
- smallest non-zero rate is (1+0/512)*2^0 = 1 (> 0)
- simple algorithm:
- find position of top bit, this gives e
- remove top bit and shift (rounding if feeling clever) by 9-e
- */
- /* Ambassador ucode bug: please don't set bit 14! so 0 rate not
- representable. // This should move into the ambassador driver
- when properly merged. -- REW */
-
- if (rate > 0xffc00000U) {
- /* larger than largest representable rate */
-
- if (r == ROUND_UP) {
- return -EINVAL;
- } else {
- exp = 31;
- man = 511;
- }
-
- } else if (rate) {
- /* representable rate */
-
- exp = 31;
- man = rate;
-
- /* invariant: rate = man*2^(exp-31) */
- while (!(man & (1<<31))) {
- exp = exp - 1;
- man = man<<1;
- }
-
- /* man has top bit set
- rate = (2^31+(man-2^31))*2^(exp-31)
- rate = (1+(man-2^31)/2^31)*2^exp
- */
- man = man<<1;
- man &= 0xffffffffU; /* a nop on 32-bit systems */
- /* rate = (1+man/2^32)*2^exp
-
- exp is in the range 0 to 31, man is in the range 0 to 2^32-1
- time to lose significance... we want m in the range 0 to 2^9-1
- rounding presents a minor problem... we first decide which way
- we are rounding (based on given rounding direction and possibly
- the bits of the mantissa that are to be discarded).
- */
-
- switch (r) {
- case ROUND_DOWN: {
- /* just truncate */
- man = man>>(32-9);
- break;
- }
- case ROUND_UP: {
- /* check all bits that we are discarding */
- if (man & (~0U>>9)) {
- man = (man>>(32-9)) + 1;
- if (man == (1<<9)) {
- /* no need to check for round up outside of range */
- man = 0;
- exp += 1;
- }
- } else {
- man = (man>>(32-9));
- }
- break;
- }
- case ROUND_NEAREST: {
- /* check msb that we are discarding */
- if (man & (1<<(32-9-1))) {
- man = (man>>(32-9)) + 1;
- if (man == (1<<9)) {
- /* no need to check for round up outside of range */
- man = 0;
- exp += 1;
- }
- } else {
- man = (man>>(32-9));
- }
- break;
- }
- }
-
- } else {
- /* zero rate - not representable */
-
- if (r == ROUND_DOWN) {
- return -EINVAL;
- } else {
- exp = 0;
- man = 0;
- }
- }
-
- fs_dprintk (FS_DEBUG_QOS, "rate: man=%u, exp=%hu", man, exp);
-
- if (bits)
- *bits = /* (1<<14) | */ (exp<<9) | man;
-
- if (actual)
- *actual = (exp >= 9)
- ? (1 << exp) + (man << (exp-9))
- : (1 << exp) + ((man + (1<<(9-exp-1))) >> (9-exp));
-
- return 0;
-}
-
-
-
-
-/* FireStream access routines */
-/* For DEEP-DOWN debugging these can be rigged to intercept accesses to
- certain registers or to just log all accesses. */
-
-static inline void write_fs (struct fs_dev *dev, int offset, u32 val)
-{
- writel (val, dev->base + offset);
-}
-
-
-static inline u32 read_fs (struct fs_dev *dev, int offset)
-{
- return readl (dev->base + offset);
-}
-
-
-
-static inline struct FS_QENTRY *get_qentry (struct fs_dev *dev, struct queue *q)
-{
- return bus_to_virt (read_fs (dev, Q_WP(q->offset)) & Q_ADDR_MASK);
-}
-
-
-static void submit_qentry (struct fs_dev *dev, struct queue *q, struct FS_QENTRY *qe)
-{
- u32 wp;
- struct FS_QENTRY *cqe;
-
- /* XXX Sanity check: the write pointer can be checked to be
- still the same as the value passed as qe... -- REW */
- /* udelay (5); */
- while ((wp = read_fs (dev, Q_WP (q->offset))) & Q_FULL) {
- fs_dprintk (FS_DEBUG_TXQ, "Found queue at %x full. Waiting.\n",
- q->offset);
- schedule ();
- }
-
- wp &= ~0xf;
- cqe = bus_to_virt (wp);
- if (qe != cqe) {
- fs_dprintk (FS_DEBUG_TXQ, "q mismatch! %p %p\n", qe, cqe);
- }
-
- write_fs (dev, Q_WP(q->offset), Q_INCWRAP);
-
- {
- static int c;
- if (!(c++ % 100))
- {
- int rp, wp;
- rp = read_fs (dev, Q_RP(q->offset));
- wp = read_fs (dev, Q_WP(q->offset));
- fs_dprintk (FS_DEBUG_TXQ, "q at %d: %x-%x: %x entries.\n",
- q->offset, rp, wp, wp-rp);
- }
- }
-}
-
-#ifdef DEBUG_EXTRA
-static struct FS_QENTRY pq[60];
-static int qp;
-
-static struct FS_BPENTRY dq[60];
-static int qd;
-static void *da[60];
-#endif
-
-static void submit_queue (struct fs_dev *dev, struct queue *q,
- u32 cmd, u32 p1, u32 p2, u32 p3)
-{
- struct FS_QENTRY *qe;
-
- qe = get_qentry (dev, q);
- qe->cmd = cmd;
- qe->p0 = p1;
- qe->p1 = p2;
- qe->p2 = p3;
- submit_qentry (dev, q, qe);
-
-#ifdef DEBUG_EXTRA
- pq[qp].cmd = cmd;
- pq[qp].p0 = p1;
- pq[qp].p1 = p2;
- pq[qp].p2 = p3;
- qp++;
- if (qp >= 60) qp = 0;
-#endif
-}
-
-/* Test the "other" way one day... -- REW */
-#if 1
-#define submit_command submit_queue
-#else
-
-static void submit_command (struct fs_dev *dev, struct queue *q,
- u32 cmd, u32 p1, u32 p2, u32 p3)
-{
- write_fs (dev, CMDR0, cmd);
- write_fs (dev, CMDR1, p1);
- write_fs (dev, CMDR2, p2);
- write_fs (dev, CMDR3, p3);
-}
-#endif
-
-
-
-static void process_return_queue (struct fs_dev *dev, struct queue *q)
-{
- long rq;
- struct FS_QENTRY *qe;
- void *tc;
-
- while (!((rq = read_fs (dev, Q_RP(q->offset))) & Q_EMPTY)) {
- fs_dprintk (FS_DEBUG_QUEUE, "reaping return queue entry at %lx\n", rq);
- qe = bus_to_virt (rq);
-
- fs_dprintk (FS_DEBUG_QUEUE, "queue entry: %08x %08x %08x %08x. (%d)\n",
- qe->cmd, qe->p0, qe->p1, qe->p2, STATUS_CODE (qe));
-
- switch (STATUS_CODE (qe)) {
- case 5:
- tc = bus_to_virt (qe->p0);
- fs_dprintk (FS_DEBUG_ALLOC, "Free tc: %p\n", tc);
- kfree (tc);
- break;
- }
-
- write_fs (dev, Q_RP(q->offset), Q_INCWRAP);
- }
-}
-
-
-static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
-{
- long rq;
- long tmp;
- struct FS_QENTRY *qe;
- struct sk_buff *skb;
- struct FS_BPENTRY *td;
-
- while (!((rq = read_fs (dev, Q_RP(q->offset))) & Q_EMPTY)) {
- fs_dprintk (FS_DEBUG_QUEUE, "reaping txdone entry at %lx\n", rq);
- qe = bus_to_virt (rq);
-
- fs_dprintk (FS_DEBUG_QUEUE, "queue entry: %08x %08x %08x %08x: %d\n",
- qe->cmd, qe->p0, qe->p1, qe->p2, STATUS_CODE (qe));
-
- if (STATUS_CODE (qe) != 2)
- fs_dprintk (FS_DEBUG_TXMEM, "queue entry: %08x %08x %08x %08x: %d\n",
- qe->cmd, qe->p0, qe->p1, qe->p2, STATUS_CODE (qe));
-
-
- switch (STATUS_CODE (qe)) {
- case 0x01: /* This is for AAL0 where we put the chip in streaming mode */
- fallthrough;
- case 0x02:
- /* Process a real txdone entry. */
- tmp = qe->p0;
- if (tmp & 0x0f)
- printk (KERN_WARNING "td not aligned: %ld\n", tmp);
- tmp &= ~0x0f;
- td = bus_to_virt (tmp);
-
- fs_dprintk (FS_DEBUG_QUEUE, "Pool entry: %08x %08x %08x %08x %p.\n",
- td->flags, td->next, td->bsa, td->aal_bufsize, td->skb );
-
- skb = td->skb;
- if (skb == FS_VCC (ATM_SKB(skb)->vcc)->last_skb) {
- FS_VCC (ATM_SKB(skb)->vcc)->last_skb = NULL;
- wake_up_interruptible (& FS_VCC (ATM_SKB(skb)->vcc)->close_wait);
- }
- td->dev->ntxpckts--;
-
- {
- static int c=0;
-
- if (!(c++ % 100)) {
- fs_dprintk (FS_DEBUG_QSIZE, "[%d]", td->dev->ntxpckts);
- }
- }
-
- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
-
- fs_dprintk (FS_DEBUG_TXMEM, "i");
- fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
- fs_kfree_skb (skb);
-
- fs_dprintk (FS_DEBUG_ALLOC, "Free trans-d: %p\n", td);
- memset (td, ATM_POISON_FREE, sizeof(struct FS_BPENTRY));
- kfree (td);
- break;
- default:
- /* Here we get the tx purge inhibit command ... */
- /* Action, I believe, is "don't do anything". -- REW */
- ;
- }
-
- write_fs (dev, Q_RP(q->offset), Q_INCWRAP);
- }
-}
-
-
-static void process_incoming (struct fs_dev *dev, struct queue *q)
-{
- long rq;
- struct FS_QENTRY *qe;
- struct FS_BPENTRY *pe;
- struct sk_buff *skb;
- unsigned int channo;
- struct atm_vcc *atm_vcc;
-
- while (!((rq = read_fs (dev, Q_RP(q->offset))) & Q_EMPTY)) {
- fs_dprintk (FS_DEBUG_QUEUE, "reaping incoming queue entry at %lx\n", rq);
- qe = bus_to_virt (rq);
-
- fs_dprintk (FS_DEBUG_QUEUE, "queue entry: %08x %08x %08x %08x. ",
- qe->cmd, qe->p0, qe->p1, qe->p2);
-
- fs_dprintk (FS_DEBUG_QUEUE, "-> %x: %s\n",
- STATUS_CODE (qe),
- res_strings[STATUS_CODE(qe)]);
-
- pe = bus_to_virt (qe->p0);
- fs_dprintk (FS_DEBUG_QUEUE, "Pool entry: %08x %08x %08x %08x %p %p.\n",
- pe->flags, pe->next, pe->bsa, pe->aal_bufsize,
- pe->skb, pe->fp);
-
- channo = qe->cmd & 0xffff;
-
- if (channo < dev->nchannels)
- atm_vcc = dev->atm_vccs[channo];
- else
- atm_vcc = NULL;
-
- /* Single buffer packet */
- switch (STATUS_CODE (qe)) {
- case 0x1:
- /* Fall through for streaming mode */
- fallthrough;
- case 0x2:/* Packet received OK.... */
- if (atm_vcc) {
- skb = pe->skb;
- pe->fp->n--;
-#if 0
- fs_dprintk (FS_DEBUG_QUEUE, "Got skb: %p\n", skb);
- if (FS_DEBUG_QUEUE & fs_debug) my_hd (bus_to_virt (pe->bsa), 0x20);
-#endif
- skb_put (skb, qe->p1 & 0xffff);
- ATM_SKB(skb)->vcc = atm_vcc;
- atomic_inc(&atm_vcc->stats->rx);
- __net_timestamp(skb);
- fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
- atm_vcc->push (atm_vcc, skb);
- fs_dprintk (FS_DEBUG_ALLOC, "Free rec-d: %p\n", pe);
- kfree (pe);
- } else {
- printk (KERN_ERR "Got a receive on a non-open channel %d.\n", channo);
- }
- break;
- case 0x17:/* AAL 5 CRC32 error. IFF the length field is nonzero, a buffer
- has been consumed and needs to be processed. -- REW */
- if (qe->p1 & 0xffff) {
- pe = bus_to_virt (qe->p0);
- pe->fp->n--;
- fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p\n", pe->skb);
- dev_kfree_skb_any (pe->skb);
- fs_dprintk (FS_DEBUG_ALLOC, "Free rec-d: %p\n", pe);
- kfree (pe);
- }
- if (atm_vcc)
- atomic_inc(&atm_vcc->stats->rx_drop);
- break;
- case 0x1f: /* Reassembly abort: no buffers. */
- /* Silently increment error counter. */
- if (atm_vcc)
- atomic_inc(&atm_vcc->stats->rx_drop);
- break;
- default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
- printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
- STATUS_CODE(qe), res_strings[STATUS_CODE (qe)]);
- }
- write_fs (dev, Q_RP(q->offset), Q_INCWRAP);
- }
-}
-
-
-
-#define DO_DIRECTION(tp) ((tp)->traffic_class != ATM_NONE)
-
-static int fs_open(struct atm_vcc *atm_vcc)
-{
- struct fs_dev *dev;
- struct fs_vcc *vcc;
- struct fs_transmit_config *tc;
- struct atm_trafprm * txtp;
- struct atm_trafprm * rxtp;
- /* struct fs_receive_config *rc;*/
- /* struct FS_QENTRY *qe; */
- int error;
- int bfp;
- int to;
- unsigned short tmc0;
- short vpi = atm_vcc->vpi;
- int vci = atm_vcc->vci;
-
- func_enter ();
-
- dev = FS_DEV(atm_vcc->dev);
- fs_dprintk (FS_DEBUG_OPEN, "fs: open on dev: %p, vcc at %p\n",
- dev, atm_vcc);
-
- if (vci != ATM_VPI_UNSPEC && vpi != ATM_VCI_UNSPEC)
- set_bit(ATM_VF_ADDR, &atm_vcc->flags);
-
- if ((atm_vcc->qos.aal != ATM_AAL5) &&
- (atm_vcc->qos.aal != ATM_AAL2))
- return -EINVAL; /* XXX AAL0 */
-
- fs_dprintk (FS_DEBUG_OPEN, "fs: (itf %d): open %d.%d\n",
- atm_vcc->dev->number, atm_vcc->vpi, atm_vcc->vci);
-
- /* XXX handle qos parameters (rate limiting) ? */
-
- vcc = kmalloc(sizeof(struct fs_vcc), GFP_KERNEL);
- fs_dprintk (FS_DEBUG_ALLOC, "Alloc VCC: %p(%zd)\n", vcc, sizeof(struct fs_vcc));
- if (!vcc) {
- clear_bit(ATM_VF_ADDR, &atm_vcc->flags);
- return -ENOMEM;
- }
-
- atm_vcc->dev_data = vcc;
- vcc->last_skb = NULL;
-
- init_waitqueue_head (&vcc->close_wait);
-
- txtp = &atm_vcc->qos.txtp;
- rxtp = &atm_vcc->qos.rxtp;
-
- if (!test_bit(ATM_VF_PARTIAL, &atm_vcc->flags)) {
- if (IS_FS50(dev)) {
- /* Increment the channel numer: take a free one next time. */
- for (to=33;to;to--, dev->channo++) {
- /* We only have 32 channels */
- if (dev->channo >= 32)
- dev->channo = 0;
- /* If we need to do RX, AND the RX is inuse, try the next */
- if (DO_DIRECTION(rxtp) && dev->atm_vccs[dev->channo])
- continue;
- /* If we need to do TX, AND the TX is inuse, try the next */
- if (DO_DIRECTION(txtp) && test_bit (dev->channo, dev->tx_inuse))
- continue;
- /* Ok, both are free! (or not needed) */
- break;
- }
- if (!to) {
- printk ("No more free channels for FS50..\n");
- kfree(vcc);
- return -EBUSY;
- }
- vcc->channo = dev->channo;
- dev->channo &= dev->channel_mask;
-
- } else {
- vcc->channo = (vpi << FS155_VCI_BITS) | (vci);
- if (((DO_DIRECTION(rxtp) && dev->atm_vccs[vcc->channo])) ||
- ( DO_DIRECTION(txtp) && test_bit (vcc->channo, dev->tx_inuse))) {
- printk ("Channel is in use for FS155.\n");
- kfree(vcc);
- return -EBUSY;
- }
- }
- fs_dprintk (FS_DEBUG_OPEN, "OK. Allocated channel %x(%d).\n",
- vcc->channo, vcc->channo);
- }
-
- if (DO_DIRECTION (txtp)) {
- tc = kmalloc (sizeof (struct fs_transmit_config), GFP_KERNEL);
- fs_dprintk (FS_DEBUG_ALLOC, "Alloc tc: %p(%zd)\n",
- tc, sizeof (struct fs_transmit_config));
- if (!tc) {
- fs_dprintk (FS_DEBUG_OPEN, "fs: can't alloc transmit_config.\n");
- kfree(vcc);
- return -ENOMEM;
- }
-
- /* Allocate the "open" entry from the high priority txq. This makes
- it most likely that the chip will notice it. It also prevents us
- from having to wait for completion. On the other hand, we may
- need to wait for completion anyway, to see if it completed
- successfully. */
-
- switch (atm_vcc->qos.aal) {
- case ATM_AAL2:
- case ATM_AAL0:
- tc->flags = 0
- | TC_FLAGS_TRANSPARENT_PAYLOAD
- | TC_FLAGS_PACKET
- | (1 << 28)
- | TC_FLAGS_TYPE_UBR /* XXX Change to VBR -- PVDL */
- | TC_FLAGS_CAL0;
- break;
- case ATM_AAL5:
- tc->flags = 0
- | TC_FLAGS_AAL5
- | TC_FLAGS_PACKET /* ??? */
- | TC_FLAGS_TYPE_CBR
- | TC_FLAGS_CAL0;
- break;
- default:
- printk ("Unknown aal: %d\n", atm_vcc->qos.aal);
- tc->flags = 0;
- }
- /* Docs are vague about this atm_hdr field. By the way, the FS
- * chip makes odd errors if lower bits are set.... -- REW */
- tc->atm_hdr = (vpi << 20) | (vci << 4);
- tmc0 = 0;
- {
- int pcr = atm_pcr_goal (txtp);
-
- fs_dprintk (FS_DEBUG_OPEN, "pcr = %d.\n", pcr);
-
- /* XXX Hmm. officially we're only allowed to do this if rounding
- is round_down -- REW */
- if (IS_FS50(dev)) {
- if (pcr > 51840000/53/8) pcr = 51840000/53/8;
- } else {
- if (pcr > 155520000/53/8) pcr = 155520000/53/8;
- }
- if (!pcr) {
- /* no rate cap */
- tmc0 = IS_FS50(dev)?0x61BE:0x64c9; /* Just copied over the bits from Fujitsu -- REW */
- } else {
- int r;
- if (pcr < 0) {
- r = ROUND_DOWN;
- pcr = -pcr;
- } else {
- r = ROUND_UP;
- }
- error = make_rate (pcr, r, &tmc0, NULL);
- if (error) {
- kfree(tc);
- kfree(vcc);
- return error;
- }
- }
- fs_dprintk (FS_DEBUG_OPEN, "pcr = %d.\n", pcr);
- }
-
- tc->TMC[0] = tmc0 | 0x4000;
- tc->TMC[1] = 0; /* Unused */
- tc->TMC[2] = 0; /* Unused */
- tc->TMC[3] = 0; /* Unused */
-
- tc->spec = 0; /* UTOPIA address, UDF, HEC: Unused -> 0 */
- tc->rtag[0] = 0; /* What should I do with routing tags???
- -- Not used -- AS -- Thanks -- REW*/
- tc->rtag[1] = 0;
- tc->rtag[2] = 0;
-
- if (fs_debug & FS_DEBUG_OPEN) {
- fs_dprintk (FS_DEBUG_OPEN, "TX config record:\n");
- my_hd (tc, sizeof (*tc));
- }
-
- /* We now use the "submit_command" function to submit commands to
- the firestream. There is a define up near the definition of
- that routine that switches this routine between immediate write
- to the immediate command registers and queuing the commands in
- the HPTXQ for execution. This last technique might be more
- efficient if we know we're going to submit a whole lot of
- commands in one go, but this driver is not setup to be able to
- use such a construct. So it probably doen't matter much right
- now. -- REW */
-
- /* The command is IMMediate and INQueue. The parameters are out-of-line.. */
- submit_command (dev, &dev->hp_txq,
- QE_CMD_CONFIG_TX | QE_CMD_IMM_INQ | vcc->channo,
- virt_to_bus (tc), 0, 0);
-
- submit_command (dev, &dev->hp_txq,
- QE_CMD_TX_EN | QE_CMD_IMM_INQ | vcc->channo,
- 0, 0, 0);
- set_bit (vcc->channo, dev->tx_inuse);
- }
-
- if (DO_DIRECTION (rxtp)) {
- dev->atm_vccs[vcc->channo] = atm_vcc;
-
- for (bfp = 0;bfp < FS_NR_FREE_POOLS; bfp++)
- if (atm_vcc->qos.rxtp.max_sdu <= dev->rx_fp[bfp].bufsize) break;
- if (bfp >= FS_NR_FREE_POOLS) {
- fs_dprintk (FS_DEBUG_OPEN, "No free pool fits sdu: %d.\n",
- atm_vcc->qos.rxtp.max_sdu);
- /* XXX Cleanup? -- Would just calling fs_close work??? -- REW */
-
- /* XXX clear tx inuse. Close TX part? */
- dev->atm_vccs[vcc->channo] = NULL;
- kfree (vcc);
- return -EINVAL;
- }
-
- switch (atm_vcc->qos.aal) {
- case ATM_AAL0:
- case ATM_AAL2:
- submit_command (dev, &dev->hp_txq,
- QE_CMD_CONFIG_RX | QE_CMD_IMM_INQ | vcc->channo,
- RC_FLAGS_TRANSP |
- RC_FLAGS_BFPS_BFP * bfp |
- RC_FLAGS_RXBM_PSB, 0, 0);
- break;
- case ATM_AAL5:
- submit_command (dev, &dev->hp_txq,
- QE_CMD_CONFIG_RX | QE_CMD_IMM_INQ | vcc->channo,
- RC_FLAGS_AAL5 |
- RC_FLAGS_BFPS_BFP * bfp |
- RC_FLAGS_RXBM_PSB, 0, 0);
- break;
- }
- if (IS_FS50 (dev)) {
- submit_command (dev, &dev->hp_txq,
- QE_CMD_REG_WR | QE_CMD_IMM_INQ,
- 0x80 + vcc->channo,
- (vpi << 16) | vci, 0 ); /* XXX -- Use defines. */
- }
- submit_command (dev, &dev->hp_txq,
- QE_CMD_RX_EN | QE_CMD_IMM_INQ | vcc->channo,
- 0, 0, 0);
- }
-
- /* Indicate we're done! */
- set_bit(ATM_VF_READY, &atm_vcc->flags);
-
- func_exit ();
- return 0;
-}
-
-
-static void fs_close(struct atm_vcc *atm_vcc)
-{
- struct fs_dev *dev = FS_DEV (atm_vcc->dev);
- struct fs_vcc *vcc = FS_VCC (atm_vcc);
- struct atm_trafprm * txtp;
- struct atm_trafprm * rxtp;
-
- func_enter ();
-
- clear_bit(ATM_VF_READY, &atm_vcc->flags);
-
- fs_dprintk (FS_DEBUG_QSIZE, "--==**[%d]**==--", dev->ntxpckts);
- if (vcc->last_skb) {
- fs_dprintk (FS_DEBUG_QUEUE, "Waiting for skb %p to be sent.\n",
- vcc->last_skb);
- /* We're going to wait for the last packet to get sent on this VC. It would
- be impolite not to send them don't you think?
- XXX
- We don't know which packets didn't get sent. So if we get interrupted in
- this sleep_on, we'll lose any reference to these packets. Memory leak!
- On the other hand, it's awfully convenient that we can abort a "close" that
- is taking too long. Maybe just use non-interruptible sleep on? -- REW */
- wait_event_interruptible(vcc->close_wait, !vcc->last_skb);
- }
-
- txtp = &atm_vcc->qos.txtp;
- rxtp = &atm_vcc->qos.rxtp;
-
-
- /* See App note XXX (Unpublished as of now) for the reason for the
- removal of the "CMD_IMM_INQ" part of the TX_PURGE_INH... -- REW */
-
- if (DO_DIRECTION (txtp)) {
- submit_command (dev, &dev->hp_txq,
- QE_CMD_TX_PURGE_INH | /*QE_CMD_IMM_INQ|*/ vcc->channo, 0,0,0);
- clear_bit (vcc->channo, dev->tx_inuse);
- }
-
- if (DO_DIRECTION (rxtp)) {
- submit_command (dev, &dev->hp_txq,
- QE_CMD_RX_PURGE_INH | QE_CMD_IMM_INQ | vcc->channo, 0,0,0);
- dev->atm_vccs [vcc->channo] = NULL;
-
- /* This means that this is configured as a receive channel */
- if (IS_FS50 (dev)) {
- /* Disable the receive filter. Is 0/0 indeed an invalid receive
- channel? -- REW. Yes it is. -- Hang. Ok. I'll use -1
- (0xfff...) -- REW */
- submit_command (dev, &dev->hp_txq,
- QE_CMD_REG_WR | QE_CMD_IMM_INQ,
- 0x80 + vcc->channo, -1, 0 );
- }
- }
-
- fs_dprintk (FS_DEBUG_ALLOC, "Free vcc: %p\n", vcc);
- kfree (vcc);
-
- func_exit ();
-}
-
-
-static int fs_send (struct atm_vcc *atm_vcc, struct sk_buff *skb)
-{
- struct fs_dev *dev = FS_DEV (atm_vcc->dev);
- struct fs_vcc *vcc = FS_VCC (atm_vcc);
- struct FS_BPENTRY *td;
-
- func_enter ();
-
- fs_dprintk (FS_DEBUG_TXMEM, "I");
- fs_dprintk (FS_DEBUG_SEND, "Send: atm_vcc %p skb %p vcc %p dev %p\n",
- atm_vcc, skb, vcc, dev);
-
- fs_dprintk (FS_DEBUG_ALLOC, "Alloc t-skb: %p (atm_send)\n", skb);
-
- ATM_SKB(skb)->vcc = atm_vcc;
-
- vcc->last_skb = skb;
-
- td = kmalloc (sizeof (struct FS_BPENTRY), GFP_ATOMIC);
- fs_dprintk (FS_DEBUG_ALLOC, "Alloc transd: %p(%zd)\n", td, sizeof (struct FS_BPENTRY));
- if (!td) {
- /* Oops out of mem */
- return -ENOMEM;
- }
-
- fs_dprintk (FS_DEBUG_SEND, "first word in buffer: %x\n",
- *(int *) skb->data);
-
- td->flags = TD_EPI | TD_DATA | skb->len;
- td->next = 0;
- td->bsa = virt_to_bus (skb->data);
- td->skb = skb;
- td->dev = dev;
- dev->ntxpckts++;
-
-#ifdef DEBUG_EXTRA
- da[qd] = td;
- dq[qd].flags = td->flags;
- dq[qd].next = td->next;
- dq[qd].bsa = td->bsa;
- dq[qd].skb = td->skb;
- dq[qd].dev = td->dev;
- qd++;
- if (qd >= 60) qd = 0;
-#endif
-
- submit_queue (dev, &dev->hp_txq,
- QE_TRANSMIT_DE | vcc->channo,
- virt_to_bus (td), 0,
- virt_to_bus (td));
-
- fs_dprintk (FS_DEBUG_QUEUE, "in send: txq %d txrq %d\n",
- read_fs (dev, Q_EA (dev->hp_txq.offset)) -
- read_fs (dev, Q_SA (dev->hp_txq.offset)),
- read_fs (dev, Q_EA (dev->tx_relq.offset)) -
- read_fs (dev, Q_SA (dev->tx_relq.offset)));
-
- func_exit ();
- return 0;
-}
-
-
-/* Some function placeholders for functions we don't yet support. */
-
-#if 0
-static int fs_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
-{
- func_enter ();
- func_exit ();
- return -ENOIOCTLCMD;
-}
-
-
-static int fs_getsockopt(struct atm_vcc *vcc,int level,int optname,
- void __user *optval,int optlen)
-{
- func_enter ();
- func_exit ();
- return 0;
-}
-
-
-static int fs_setsockopt(struct atm_vcc *vcc,int level,int optname,
- void __user *optval,unsigned int optlen)
-{
- func_enter ();
- func_exit ();
- return 0;
-}
-
-
-static void fs_phy_put(struct atm_dev *dev,unsigned char value,
- unsigned long addr)
-{
- func_enter ();
- func_exit ();
-}
-
-
-static unsigned char fs_phy_get(struct atm_dev *dev,unsigned long addr)
-{
- func_enter ();
- func_exit ();
- return 0;
-}
-
-
-static int fs_change_qos(struct atm_vcc *vcc,struct atm_qos *qos,int flags)
-{
- func_enter ();
- func_exit ();
- return 0;
-};
-
-#endif
-
-
-static const struct atmdev_ops ops = {
- .open = fs_open,
- .close = fs_close,
- .send = fs_send,
- .owner = THIS_MODULE,
- /* ioctl: fs_ioctl, */
- /* change_qos: fs_change_qos, */
-
- /* For now implement these internally here... */
- /* phy_put: fs_phy_put, */
- /* phy_get: fs_phy_get, */
-};
-
-
-static void undocumented_pci_fix(struct pci_dev *pdev)
-{
- u32 tint;
-
- /* The Windows driver says: */
- /* Switch off FireStream Retry Limit Threshold
- */
-
- /* The register at 0x28 is documented as "reserved", no further
- comments. */
-
- pci_read_config_dword (pdev, 0x28, &tint);
- if (tint != 0x80) {
- tint = 0x80;
- pci_write_config_dword (pdev, 0x28, tint);
- }
-}
-
-
-
-/**************************************************************************
- * PHY routines *
- **************************************************************************/
-
-static void write_phy(struct fs_dev *dev, int regnum, int val)
-{
- submit_command (dev, &dev->hp_txq, QE_CMD_PRP_WR | QE_CMD_IMM_INQ,
- regnum, val, 0);
-}
-
-static int init_phy(struct fs_dev *dev, struct reginit_item *reginit)
-{
- int i;
-
- func_enter ();
- while (reginit->reg != PHY_EOF) {
- if (reginit->reg == PHY_CLEARALL) {
- /* "PHY_CLEARALL means clear all registers. Numregisters is in "val". */
- for (i=0;i<reginit->val;i++) {
- write_phy (dev, i, 0);
- }
- } else {
- write_phy (dev, reginit->reg, reginit->val);
- }
- reginit++;
- }
- func_exit ();
- return 0;
-}
-
-static void reset_chip (struct fs_dev *dev)
-{
- int i;
-
- write_fs (dev, SARMODE0, SARMODE0_SRTS0);
-
- /* Undocumented delay */
- udelay (128);
-
- /* The "internal registers are documented to all reset to zero, but
- comments & code in the Windows driver indicates that the pools are
- NOT reset. */
- for (i=0;i < FS_NR_FREE_POOLS;i++) {
- write_fs (dev, FP_CNF (RXB_FP(i)), 0);
- write_fs (dev, FP_SA (RXB_FP(i)), 0);
- write_fs (dev, FP_EA (RXB_FP(i)), 0);
- write_fs (dev, FP_CNT (RXB_FP(i)), 0);
- write_fs (dev, FP_CTU (RXB_FP(i)), 0);
- }
-
- /* The same goes for the match channel registers, although those are
- NOT documented that way in the Windows driver. -- REW */
- /* The Windows driver DOES write 0 to these registers somewhere in
- the init sequence. However, a small hardware-feature, will
- prevent reception of data on VPI/VCI = 0/0 (Unless the channel
- allocated happens to have no disabled channels that have a lower
- number. -- REW */
-
- /* Clear the match channel registers. */
- if (IS_FS50 (dev)) {
- for (i=0;i<FS50_NR_CHANNELS;i++) {
- write_fs (dev, 0x200 + i * 4, -1);
- }
- }
-}
-
-static void *aligned_kmalloc(int size, gfp_t flags, int alignment)
-{
- void *t;
-
- if (alignment <= 0x10) {
- t = kmalloc (size, flags);
- if ((unsigned long)t & (alignment-1)) {
- printk ("Kmalloc doesn't align things correctly! %p\n", t);
- kfree (t);
- return aligned_kmalloc (size, flags, alignment * 4);
- }
- return t;
- }
- printk (KERN_ERR "Request for > 0x10 alignment not yet implemented (hard!)\n");
- return NULL;
-}
-
-static int init_q(struct fs_dev *dev, struct queue *txq, int queue,
- int nentries, int is_rq)
-{
- int sz = nentries * sizeof (struct FS_QENTRY);
- struct FS_QENTRY *p;
-
- func_enter ();
-
- fs_dprintk (FS_DEBUG_INIT, "Initializing queue at %x: %d entries:\n",
- queue, nentries);
-
- p = aligned_kmalloc (sz, GFP_KERNEL, 0x10);
- fs_dprintk (FS_DEBUG_ALLOC, "Alloc queue: %p(%d)\n", p, sz);
-
- if (!p) return 0;
-
- write_fs (dev, Q_SA(queue), virt_to_bus(p));
- write_fs (dev, Q_EA(queue), virt_to_bus(p+nentries-1));
- write_fs (dev, Q_WP(queue), virt_to_bus(p));
- write_fs (dev, Q_RP(queue), virt_to_bus(p));
- if (is_rq) {
- /* Configuration for the receive queue: 0: interrupt immediately,
- no pre-warning to empty queues: We do our best to keep the
- queue filled anyway. */
- write_fs (dev, Q_CNF(queue), 0 );
- }
-
- txq->sa = p;
- txq->ea = p;
- txq->offset = queue;
-
- func_exit ();
- return 1;
-}
-
-
-static int init_fp(struct fs_dev *dev, struct freepool *fp, int queue,
- int bufsize, int nr_buffers)
-{
- func_enter ();
-
- fs_dprintk (FS_DEBUG_INIT, "Initializing free pool at %x:\n", queue);
-
- write_fs (dev, FP_CNF(queue), (bufsize * RBFP_RBS) | RBFP_RBSVAL | RBFP_CME);
- write_fs (dev, FP_SA(queue), 0);
- write_fs (dev, FP_EA(queue), 0);
- write_fs (dev, FP_CTU(queue), 0);
- write_fs (dev, FP_CNT(queue), 0);
-
- fp->offset = queue;
- fp->bufsize = bufsize;
- fp->nr_buffers = nr_buffers;
-
- func_exit ();
- return 1;
-}
-
-
-static inline int nr_buffers_in_freepool (struct fs_dev *dev, struct freepool *fp)
-{
-#if 0
- /* This seems to be unreliable.... */
- return read_fs (dev, FP_CNT (fp->offset));
-#else
- return fp->n;
-#endif
-}
-
-
-/* Check if this gets going again if a pool ever runs out. -- Yes, it
- does. I've seen "receive abort: no buffers" and things started
- working again after that... -- REW */
-
-static void top_off_fp (struct fs_dev *dev, struct freepool *fp,
- gfp_t gfp_flags)
-{
- struct FS_BPENTRY *qe, *ne;
- struct sk_buff *skb;
- int n = 0;
- u32 qe_tmp;
-
- fs_dprintk (FS_DEBUG_QUEUE, "Topping off queue at %x (%d-%d/%d)\n",
- fp->offset, read_fs (dev, FP_CNT (fp->offset)), fp->n,
- fp->nr_buffers);
- while (nr_buffers_in_freepool(dev, fp) < fp->nr_buffers) {
-
- skb = alloc_skb (fp->bufsize, gfp_flags);
- fs_dprintk (FS_DEBUG_ALLOC, "Alloc rec-skb: %p(%d)\n", skb, fp->bufsize);
- if (!skb) break;
- ne = kmalloc (sizeof (struct FS_BPENTRY), gfp_flags);
- fs_dprintk (FS_DEBUG_ALLOC, "Alloc rec-d: %p(%zd)\n", ne, sizeof (struct FS_BPENTRY));
- if (!ne) {
- fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p\n", skb);
- dev_kfree_skb_any (skb);
- break;
- }
-
- fs_dprintk (FS_DEBUG_QUEUE, "Adding skb %p desc %p -> %p(%p) ",
- skb, ne, skb->data, skb->head);
- n++;
- ne->flags = FP_FLAGS_EPI | fp->bufsize;
- ne->next = virt_to_bus (NULL);
- ne->bsa = virt_to_bus (skb->data);
- ne->aal_bufsize = fp->bufsize;
- ne->skb = skb;
- ne->fp = fp;
-
- /*
- * FIXME: following code encodes and decodes
- * machine pointers (could be 64-bit) into a
- * 32-bit register.
- */
-
- qe_tmp = read_fs (dev, FP_EA(fp->offset));
- fs_dprintk (FS_DEBUG_QUEUE, "link at %x\n", qe_tmp);
- if (qe_tmp) {
- qe = bus_to_virt ((long) qe_tmp);
- qe->next = virt_to_bus(ne);
- qe->flags &= ~FP_FLAGS_EPI;
- } else
- write_fs (dev, FP_SA(fp->offset), virt_to_bus(ne));
-
- write_fs (dev, FP_EA(fp->offset), virt_to_bus (ne));
- fp->n++; /* XXX Atomic_inc? */
- write_fs (dev, FP_CTU(fp->offset), 1);
- }
-
- fs_dprintk (FS_DEBUG_QUEUE, "Added %d entries. \n", n);
-}
-
-static void free_queue(struct fs_dev *dev, struct queue *txq)
-{
- func_enter ();
-
- write_fs (dev, Q_SA(txq->offset), 0);
- write_fs (dev, Q_EA(txq->offset), 0);
- write_fs (dev, Q_RP(txq->offset), 0);
- write_fs (dev, Q_WP(txq->offset), 0);
- /* Configuration ? */
-
- fs_dprintk (FS_DEBUG_ALLOC, "Free queue: %p\n", txq->sa);
- kfree (txq->sa);
-
- func_exit ();
-}
-
-static void free_freepool(struct fs_dev *dev, struct freepool *fp)
-{
- func_enter ();
-
- write_fs (dev, FP_CNF(fp->offset), 0);
- write_fs (dev, FP_SA (fp->offset), 0);
- write_fs (dev, FP_EA (fp->offset), 0);
- write_fs (dev, FP_CNT(fp->offset), 0);
- write_fs (dev, FP_CTU(fp->offset), 0);
-
- func_exit ();
-}
-
-
-
-static irqreturn_t fs_irq (int irq, void *dev_id)
-{
- int i;
- u32 status;
- struct fs_dev *dev = dev_id;
-
- status = read_fs (dev, ISR);
- if (!status)
- return IRQ_NONE;
-
- func_enter ();
-
-#ifdef IRQ_RATE_LIMIT
- /* Aaargh! I'm ashamed. This costs more lines-of-code than the actual
- interrupt routine!. (Well, used to when I wrote that comment) -- REW */
- {
- static int lastjif;
- static int nintr=0;
-
- if (lastjif == jiffies) {
- if (++nintr > IRQ_RATE_LIMIT) {
- free_irq (dev->irq, dev_id);
- printk (KERN_ERR "fs: Too many interrupts. Turning off interrupt %d.\n",
- dev->irq);
- }
- } else {
- lastjif = jiffies;
- nintr = 0;
- }
- }
-#endif
- fs_dprintk (FS_DEBUG_QUEUE, "in intr: txq %d txrq %d\n",
- read_fs (dev, Q_EA (dev->hp_txq.offset)) -
- read_fs (dev, Q_SA (dev->hp_txq.offset)),
- read_fs (dev, Q_EA (dev->tx_relq.offset)) -
- read_fs (dev, Q_SA (dev->tx_relq.offset)));
-
- /* print the bits in the ISR register. */
- if (fs_debug & FS_DEBUG_IRQ) {
- /* The FS_DEBUG things are unnecessary here. But this way it is
- clear for grep that these are debug prints. */
- fs_dprintk (FS_DEBUG_IRQ, "IRQ status:");
- for (i=0;i<27;i++)
- if (status & (1 << i))
- fs_dprintk (FS_DEBUG_IRQ, " %s", irq_bitname[i]);
- fs_dprintk (FS_DEBUG_IRQ, "\n");
- }
-
- if (status & ISR_RBRQ0_W) {
- fs_dprintk (FS_DEBUG_IRQ, "Iiiin-coming (0)!!!!\n");
- process_incoming (dev, &dev->rx_rq[0]);
- /* items mentioned on RBRQ0 are from FP 0 or 1. */
- top_off_fp (dev, &dev->rx_fp[0], GFP_ATOMIC);
- top_off_fp (dev, &dev->rx_fp[1], GFP_ATOMIC);
- }
-
- if (status & ISR_RBRQ1_W) {
- fs_dprintk (FS_DEBUG_IRQ, "Iiiin-coming (1)!!!!\n");
- process_incoming (dev, &dev->rx_rq[1]);
- top_off_fp (dev, &dev->rx_fp[2], GFP_ATOMIC);
- top_off_fp (dev, &dev->rx_fp[3], GFP_ATOMIC);
- }
-
- if (status & ISR_RBRQ2_W) {
- fs_dprintk (FS_DEBUG_IRQ, "Iiiin-coming (2)!!!!\n");
- process_incoming (dev, &dev->rx_rq[2]);
- top_off_fp (dev, &dev->rx_fp[4], GFP_ATOMIC);
- top_off_fp (dev, &dev->rx_fp[5], GFP_ATOMIC);
- }
-
- if (status & ISR_RBRQ3_W) {
- fs_dprintk (FS_DEBUG_IRQ, "Iiiin-coming (3)!!!!\n");
- process_incoming (dev, &dev->rx_rq[3]);
- top_off_fp (dev, &dev->rx_fp[6], GFP_ATOMIC);
- top_off_fp (dev, &dev->rx_fp[7], GFP_ATOMIC);
- }
-
- if (status & ISR_CSQ_W) {
- fs_dprintk (FS_DEBUG_IRQ, "Command executed ok!\n");
- process_return_queue (dev, &dev->st_q);
- }
-
- if (status & ISR_TBRQ_W) {
- fs_dprintk (FS_DEBUG_IRQ, "Data transmitted!\n");
- process_txdone_queue (dev, &dev->tx_relq);
- }
-
- func_exit ();
- return IRQ_HANDLED;
-}
-
-
-#ifdef FS_POLL_FREQ
-static void fs_poll (struct timer_list *t)
-{
- struct fs_dev *dev = from_timer(dev, t, timer);
-
- fs_irq (0, dev);
- dev->timer.expires = jiffies + FS_POLL_FREQ;
- add_timer (&dev->timer);
-}
-#endif
-
-static int fs_init(struct fs_dev *dev)
-{
- struct pci_dev *pci_dev;
- int isr, to;
- int i;
-
- func_enter ();
- pci_dev = dev->pci_dev;
-
- printk (KERN_INFO "found a FireStream %d card, base %16llx, irq%d.\n",
- IS_FS50(dev)?50:155,
- (unsigned long long)pci_resource_start(pci_dev, 0),
- dev->pci_dev->irq);
-
- if (fs_debug & FS_DEBUG_INIT)
- my_hd ((unsigned char *) dev, sizeof (*dev));
-
- undocumented_pci_fix (pci_dev);
-
- dev->hw_base = pci_resource_start(pci_dev, 0);
-
- dev->base = ioremap(dev->hw_base, 0x1000);
- if (!dev->base)
- return 1;
-
- reset_chip (dev);
-
- write_fs (dev, SARMODE0, 0
- | (0 * SARMODE0_SHADEN) /* We don't use shadow registers. */
- | (1 * SARMODE0_INTMODE_READCLEAR)
- | (1 * SARMODE0_CWRE)
- | (IS_FS50(dev) ? SARMODE0_PRPWT_FS50_5:
- SARMODE0_PRPWT_FS155_3)
- | (1 * SARMODE0_CALSUP_1)
- | (IS_FS50(dev) ? (0
- | SARMODE0_RXVCS_32
- | SARMODE0_ABRVCS_32
- | SARMODE0_TXVCS_32):
- (0
- | SARMODE0_RXVCS_1k
- | SARMODE0_ABRVCS_1k
- | SARMODE0_TXVCS_1k)));
-
- /* 10ms * 100 is 1 second. That should be enough, as AN3:9 says it takes
- 1ms. */
- to = 100;
- while (--to) {
- isr = read_fs (dev, ISR);
-
- /* This bit is documented as "RESERVED" */
- if (isr & ISR_INIT_ERR) {
- printk (KERN_ERR "Error initializing the FS... \n");
- goto unmap;
- }
- if (isr & ISR_INIT) {
- fs_dprintk (FS_DEBUG_INIT, "Ha! Initialized OK!\n");
- break;
- }
-
- /* Try again after 10ms. */
- msleep(10);
- }
-
- if (!to) {
- printk (KERN_ERR "timeout initializing the FS... \n");
- goto unmap;
- }
-
- /* XXX fix for fs155 */
- dev->channel_mask = 0x1f;
- dev->channo = 0;
-
- /* AN3: 10 */
- write_fs (dev, SARMODE1, 0
- | (fs_keystream * SARMODE1_DEFHEC) /* XXX PHY */
- | ((loopback == 1) * SARMODE1_TSTLP) /* XXX Loopback mode enable... */
- | (1 * SARMODE1_DCRM)
- | (1 * SARMODE1_DCOAM)
- | (0 * SARMODE1_OAMCRC)
- | (0 * SARMODE1_DUMPE)
- | (0 * SARMODE1_GPLEN)
- | (0 * SARMODE1_GNAM)
- | (0 * SARMODE1_GVAS)
- | (0 * SARMODE1_GPAS)
- | (1 * SARMODE1_GPRI)
- | (0 * SARMODE1_PMS)
- | (0 * SARMODE1_GFCR)
- | (1 * SARMODE1_HECM2)
- | (1 * SARMODE1_HECM1)
- | (1 * SARMODE1_HECM0)
- | (1 << 12) /* That's what hang's driver does. Program to 0 */
- | (0 * 0xff) /* XXX FS155 */);
-
-
- /* Cal prescale etc */
-
- /* AN3: 11 */
- write_fs (dev, TMCONF, 0x0000000f);
- write_fs (dev, CALPRESCALE, 0x01010101 * num);
- write_fs (dev, 0x80, 0x000F00E4);
-
- /* AN3: 12 */
- write_fs (dev, CELLOSCONF, 0
- | ( 0 * CELLOSCONF_CEN)
- | ( CELLOSCONF_SC1)
- | (0x80 * CELLOSCONF_COBS)
- | (num * CELLOSCONF_COPK) /* Changed from 0xff to 0x5a */
- | (num * CELLOSCONF_COST));/* after a hint from Hang.
- * performance jumped 50->70... */
-
- /* Magic value by Hang */
- write_fs (dev, CELLOSCONF_COST, 0x0B809191);
-
- if (IS_FS50 (dev)) {
- write_fs (dev, RAS0, RAS0_DCD_XHLT);
- dev->atm_dev->ci_range.vpi_bits = 12;
- dev->atm_dev->ci_range.vci_bits = 16;
- dev->nchannels = FS50_NR_CHANNELS;
- } else {
- write_fs (dev, RAS0, RAS0_DCD_XHLT
- | (((1 << FS155_VPI_BITS) - 1) * RAS0_VPSEL)
- | (((1 << FS155_VCI_BITS) - 1) * RAS0_VCSEL));
- /* We can chose the split arbitrarily. We might be able to
- support more. Whatever. This should do for now. */
- dev->atm_dev->ci_range.vpi_bits = FS155_VPI_BITS;
- dev->atm_dev->ci_range.vci_bits = FS155_VCI_BITS;
-
- /* Address bits we can't use should be compared to 0. */
- write_fs (dev, RAC, 0);
-
- /* Manual (AN9, page 6) says ASF1=0 means compare Utopia address
- * too. I can't find ASF1 anywhere. Anyway, we AND with just the
- * other bits, then compare with 0, which is exactly what we
- * want. */
- write_fs (dev, RAM, (1 << (28 - FS155_VPI_BITS - FS155_VCI_BITS)) - 1);
- dev->nchannels = FS155_NR_CHANNELS;
- }
- dev->atm_vccs = kcalloc (dev->nchannels, sizeof (struct atm_vcc *),
- GFP_KERNEL);
- fs_dprintk (FS_DEBUG_ALLOC, "Alloc atmvccs: %p(%zd)\n",
- dev->atm_vccs, dev->nchannels * sizeof (struct atm_vcc *));
-
- if (!dev->atm_vccs) {
- printk (KERN_WARNING "Couldn't allocate memory for VCC buffers. Woops!\n");
- /* XXX Clean up..... */
- goto unmap;
- }
-
- dev->tx_inuse = kzalloc (dev->nchannels / 8 /* bits/byte */ , GFP_KERNEL);
- fs_dprintk (FS_DEBUG_ALLOC, "Alloc tx_inuse: %p(%d)\n",
- dev->atm_vccs, dev->nchannels / 8);
-
- if (!dev->tx_inuse) {
- printk (KERN_WARNING "Couldn't allocate memory for tx_inuse bits!\n");
- /* XXX Clean up..... */
- goto unmap;
- }
- /* -- RAS1 : FS155 and 50 differ. Default (0) should be OK for both */
- /* -- RAS2 : FS50 only: Default is OK. */
-
- /* DMAMODE, default should be OK. -- REW */
- write_fs (dev, DMAMR, DMAMR_TX_MODE_FULL);
-
- init_q (dev, &dev->hp_txq, TX_PQ(TXQ_HP), TXQ_NENTRIES, 0);
- init_q (dev, &dev->lp_txq, TX_PQ(TXQ_LP), TXQ_NENTRIES, 0);
- init_q (dev, &dev->tx_relq, TXB_RQ, TXQ_NENTRIES, 1);
- init_q (dev, &dev->st_q, ST_Q, TXQ_NENTRIES, 1);
-
- for (i=0;i < FS_NR_FREE_POOLS;i++) {
- init_fp (dev, &dev->rx_fp[i], RXB_FP(i),
- rx_buf_sizes[i], rx_pool_sizes[i]);
- top_off_fp (dev, &dev->rx_fp[i], GFP_KERNEL);
- }
-
-
- for (i=0;i < FS_NR_RX_QUEUES;i++)
- init_q (dev, &dev->rx_rq[i], RXB_RQ(i), RXRQ_NENTRIES, 1);
-
- dev->irq = pci_dev->irq;
- if (request_irq (dev->irq, fs_irq, IRQF_SHARED, "firestream", dev)) {
- printk (KERN_WARNING "couldn't get irq %d for firestream.\n", pci_dev->irq);
- /* XXX undo all previous stuff... */
- goto unmap;
- }
- fs_dprintk (FS_DEBUG_INIT, "Grabbed irq %d for dev at %p.\n", dev->irq, dev);
-
- /* We want to be notified of most things. Just the statistics count
- overflows are not interesting */
- write_fs (dev, IMR, 0
- | ISR_RBRQ0_W
- | ISR_RBRQ1_W
- | ISR_RBRQ2_W
- | ISR_RBRQ3_W
- | ISR_TBRQ_W
- | ISR_CSQ_W);
-
- write_fs (dev, SARMODE0, 0
- | (0 * SARMODE0_SHADEN) /* We don't use shadow registers. */
- | (1 * SARMODE0_GINT)
- | (1 * SARMODE0_INTMODE_READCLEAR)
- | (0 * SARMODE0_CWRE)
- | (IS_FS50(dev)?SARMODE0_PRPWT_FS50_5:
- SARMODE0_PRPWT_FS155_3)
- | (1 * SARMODE0_CALSUP_1)
- | (IS_FS50 (dev)?(0
- | SARMODE0_RXVCS_32
- | SARMODE0_ABRVCS_32
- | SARMODE0_TXVCS_32):
- (0
- | SARMODE0_RXVCS_1k
- | SARMODE0_ABRVCS_1k
- | SARMODE0_TXVCS_1k))
- | (1 * SARMODE0_RUN));
-
- init_phy (dev, PHY_NTC_INIT);
-
- if (loopback == 2) {
- write_phy (dev, 0x39, 0x000e);
- }
-
-#ifdef FS_POLL_FREQ
- timer_setup(&dev->timer, fs_poll, 0);
- dev->timer.expires = jiffies + FS_POLL_FREQ;
- add_timer (&dev->timer);
-#endif
-
- dev->atm_dev->dev_data = dev;
-
- func_exit ();
- return 0;
-unmap:
- iounmap(dev->base);
- return 1;
-}
-
-static int firestream_init_one(struct pci_dev *pci_dev,
- const struct pci_device_id *ent)
-{
- struct atm_dev *atm_dev;
- struct fs_dev *fs_dev;
-
- if (pci_enable_device(pci_dev))
- goto err_out;
-
- fs_dev = kzalloc (sizeof (struct fs_dev), GFP_KERNEL);
- fs_dprintk (FS_DEBUG_ALLOC, "Alloc fs-dev: %p(%zd)\n",
- fs_dev, sizeof (struct fs_dev));
- if (!fs_dev)
- goto err_out;
- atm_dev = atm_dev_register("fs", &pci_dev->dev, &ops, -1, NULL);
- if (!atm_dev)
- goto err_out_free_fs_dev;
-
- fs_dev->pci_dev = pci_dev;
- fs_dev->atm_dev = atm_dev;
- fs_dev->flags = ent->driver_data;
-
- if (fs_init(fs_dev))
- goto err_out_free_atm_dev;
-
- fs_dev->next = fs_boards;
- fs_boards = fs_dev;
- return 0;
-
- err_out_free_atm_dev:
- atm_dev_deregister(atm_dev);
- err_out_free_fs_dev:
- kfree(fs_dev);
- err_out:
- return -ENODEV;
-}
-
-static void firestream_remove_one(struct pci_dev *pdev)
-{
- int i;
- struct fs_dev *dev, *nxtdev;
- struct fs_vcc *vcc;
- struct FS_BPENTRY *fp, *nxt;
-
- func_enter ();
-
-#if 0
- printk ("hptxq:\n");
- for (i=0;i<60;i++) {
- printk ("%d: %08x %08x %08x %08x \n",
- i, pq[qp].cmd, pq[qp].p0, pq[qp].p1, pq[qp].p2);
- qp++;
- if (qp >= 60) qp = 0;
- }
-
- printk ("descriptors:\n");
- for (i=0;i<60;i++) {
- printk ("%d: %p: %08x %08x %p %p\n",
- i, da[qd], dq[qd].flags, dq[qd].bsa, dq[qd].skb, dq[qd].dev);
- qd++;
- if (qd >= 60) qd = 0;
- }
-#endif
-
- for (dev = fs_boards;dev != NULL;dev=nxtdev) {
- fs_dprintk (FS_DEBUG_CLEANUP, "Releasing resources for dev at %p.\n", dev);
-
- /* XXX Hit all the tx channels too! */
-
- for (i=0;i < dev->nchannels;i++) {
- if (dev->atm_vccs[i]) {
- vcc = FS_VCC (dev->atm_vccs[i]);
- submit_command (dev, &dev->hp_txq,
- QE_CMD_TX_PURGE_INH | QE_CMD_IMM_INQ | vcc->channo, 0,0,0);
- submit_command (dev, &dev->hp_txq,
- QE_CMD_RX_PURGE_INH | QE_CMD_IMM_INQ | vcc->channo, 0,0,0);
-
- }
- }
-
- /* XXX Wait a while for the chip to release all buffers. */
-
- for (i=0;i < FS_NR_FREE_POOLS;i++) {
- for (fp=bus_to_virt (read_fs (dev, FP_SA(dev->rx_fp[i].offset)));
- !(fp->flags & FP_FLAGS_EPI);fp = nxt) {
- fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p\n", fp->skb);
- dev_kfree_skb_any (fp->skb);
- nxt = bus_to_virt (fp->next);
- fs_dprintk (FS_DEBUG_ALLOC, "Free rec-d: %p\n", fp);
- kfree (fp);
- }
- fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p\n", fp->skb);
- dev_kfree_skb_any (fp->skb);
- fs_dprintk (FS_DEBUG_ALLOC, "Free rec-d: %p\n", fp);
- kfree (fp);
- }
-
- /* Hang the chip in "reset", prevent it clobbering memory that is
- no longer ours. */
- reset_chip (dev);
-
- fs_dprintk (FS_DEBUG_CLEANUP, "Freeing irq%d.\n", dev->irq);
- free_irq (dev->irq, dev);
- del_timer_sync (&dev->timer);
-
- atm_dev_deregister(dev->atm_dev);
- free_queue (dev, &dev->hp_txq);
- free_queue (dev, &dev->lp_txq);
- free_queue (dev, &dev->tx_relq);
- free_queue (dev, &dev->st_q);
-
- fs_dprintk (FS_DEBUG_ALLOC, "Free atmvccs: %p\n", dev->atm_vccs);
- kfree (dev->atm_vccs);
-
- for (i=0;i< FS_NR_FREE_POOLS;i++)
- free_freepool (dev, &dev->rx_fp[i]);
-
- for (i=0;i < FS_NR_RX_QUEUES;i++)
- free_queue (dev, &dev->rx_rq[i]);
-
- iounmap(dev->base);
- fs_dprintk (FS_DEBUG_ALLOC, "Free fs-dev: %p\n", dev);
- nxtdev = dev->next;
- kfree (dev);
- }
-
- func_exit ();
-}
-
-static const struct pci_device_id firestream_pci_tbl[] = {
- { PCI_VDEVICE(FUJITSU_ME, PCI_DEVICE_ID_FUJITSU_FS50), FS_IS50},
- { PCI_VDEVICE(FUJITSU_ME, PCI_DEVICE_ID_FUJITSU_FS155), FS_IS155},
- { 0, }
-};
-
-MODULE_DEVICE_TABLE(pci, firestream_pci_tbl);
-
-static struct pci_driver firestream_driver = {
- .name = "firestream",
- .id_table = firestream_pci_tbl,
- .probe = firestream_init_one,
- .remove = firestream_remove_one,
-};
-
-static int __init firestream_init_module (void)
-{
- int error;
-
- func_enter ();
- error = pci_register_driver(&firestream_driver);
- func_exit ();
- return error;
-}
-
-static void __exit firestream_cleanup_module(void)
-{
- pci_unregister_driver(&firestream_driver);
-}
-
-module_init(firestream_init_module);
-module_exit(firestream_cleanup_module);
-
-MODULE_LICENSE("GPL");
-
-
-
diff --git a/drivers/atm/firestream.h b/drivers/atm/firestream.h
deleted file mode 100644
index 6d684160808d..000000000000
--- a/drivers/atm/firestream.h
+++ /dev/null
@@ -1,502 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/* drivers/atm/firestream.h - FireStream 155 (MB86697) and
- * FireStream 50 (MB86695) device driver
- */
-
-/* Written & (C) 2000 by R.E.Wolff@BitWizard.nl
- * Copied snippets from zatm.c by Werner Almesberger, EPFL LRC/ICA
- * and ambassador.c Copyright (C) 1995-1999 Madge Networks Ltd
- */
-
-/*
-*/
-
-
-/***********************************************************************
- * first the defines for the chip. *
- ***********************************************************************/
-
-
-/********************* General chip parameters. ************************/
-
-#define FS_NR_FREE_POOLS 8
-#define FS_NR_RX_QUEUES 4
-
-
-/********************* queues and queue access macros ******************/
-
-
-/* A queue entry. */
-struct FS_QENTRY {
- u32 cmd;
- u32 p0, p1, p2;
-};
-
-
-/* A freepool entry. */
-struct FS_BPENTRY {
- u32 flags;
- u32 next;
- u32 bsa;
- u32 aal_bufsize;
-
- /* The hardware doesn't look at this, but we need the SKB somewhere... */
- struct sk_buff *skb;
- struct freepool *fp;
- struct fs_dev *dev;
-};
-
-
-#define STATUS_CODE(qe) ((qe->cmd >> 22) & 0x3f)
-
-
-/* OFFSETS against the base of a QUEUE... */
-#define QSA 0x00
-#define QEA 0x04
-#define QRP 0x08
-#define QWP 0x0c
-#define QCNF 0x10 /* Only for Release queues! */
-/* Not for the transmit pending queue. */
-
-
-/* OFFSETS against the base of a FREE POOL... */
-#define FPCNF 0x00
-#define FPSA 0x04
-#define FPEA 0x08
-#define FPCNT 0x0c
-#define FPCTU 0x10
-
-#define Q_SA(b) (b + QSA )
-#define Q_EA(b) (b + QEA )
-#define Q_RP(b) (b + QRP )
-#define Q_WP(b) (b + QWP )
-#define Q_CNF(b) (b + QCNF)
-
-#define FP_CNF(b) (b + FPCNF)
-#define FP_SA(b) (b + FPSA)
-#define FP_EA(b) (b + FPEA)
-#define FP_CNT(b) (b + FPCNT)
-#define FP_CTU(b) (b + FPCTU)
-
-/* bits in a queue register. */
-#define Q_FULL 0x1
-#define Q_EMPTY 0x2
-#define Q_INCWRAP 0x4
-#define Q_ADDR_MASK 0xfffffff0
-
-/* bits in a FreePool config register */
-#define RBFP_RBS (0x1 << 16)
-#define RBFP_RBSVAL (0x1 << 15)
-#define RBFP_CME (0x1 << 12)
-#define RBFP_DLP (0x1 << 11)
-#define RBFP_BFPWT (0x1 << 0)
-
-
-
-
-/* FireStream commands. */
-#define QE_CMD_NULL (0x00 << 22)
-#define QE_CMD_REG_RD (0x01 << 22)
-#define QE_CMD_REG_RDM (0x02 << 22)
-#define QE_CMD_REG_WR (0x03 << 22)
-#define QE_CMD_REG_WRM (0x04 << 22)
-#define QE_CMD_CONFIG_TX (0x05 << 22)
-#define QE_CMD_CONFIG_RX (0x06 << 22)
-#define QE_CMD_PRP_RD (0x07 << 22)
-#define QE_CMD_PRP_RDM (0x2a << 22)
-#define QE_CMD_PRP_WR (0x09 << 22)
-#define QE_CMD_PRP_WRM (0x2b << 22)
-#define QE_CMD_RX_EN (0x0a << 22)
-#define QE_CMD_RX_PURGE (0x0b << 22)
-#define QE_CMD_RX_PURGE_INH (0x0c << 22)
-#define QE_CMD_TX_EN (0x0d << 22)
-#define QE_CMD_TX_PURGE (0x0e << 22)
-#define QE_CMD_TX_PURGE_INH (0x0f << 22)
-#define QE_CMD_RST_CG (0x10 << 22)
-#define QE_CMD_SET_CG (0x11 << 22)
-#define QE_CMD_RST_CLP (0x12 << 22)
-#define QE_CMD_SET_CLP (0x13 << 22)
-#define QE_CMD_OVERRIDE (0x14 << 22)
-#define QE_CMD_ADD_BFP (0x15 << 22)
-#define QE_CMD_DUMP_TX (0x16 << 22)
-#define QE_CMD_DUMP_RX (0x17 << 22)
-#define QE_CMD_LRAM_RD (0x18 << 22)
-#define QE_CMD_LRAM_RDM (0x28 << 22)
-#define QE_CMD_LRAM_WR (0x19 << 22)
-#define QE_CMD_LRAM_WRM (0x29 << 22)
-#define QE_CMD_LRAM_BSET (0x1a << 22)
-#define QE_CMD_LRAM_BCLR (0x1b << 22)
-#define QE_CMD_CONFIG_SEGM (0x1c << 22)
-#define QE_CMD_READ_SEGM (0x1d << 22)
-#define QE_CMD_CONFIG_ROUT (0x1e << 22)
-#define QE_CMD_READ_ROUT (0x1f << 22)
-#define QE_CMD_CONFIG_TM (0x20 << 22)
-#define QE_CMD_READ_TM (0x21 << 22)
-#define QE_CMD_CONFIG_TXBM (0x22 << 22)
-#define QE_CMD_READ_TXBM (0x23 << 22)
-#define QE_CMD_CONFIG_RXBM (0x24 << 22)
-#define QE_CMD_READ_RXBM (0x25 << 22)
-#define QE_CMD_CONFIG_REAS (0x26 << 22)
-#define QE_CMD_READ_REAS (0x27 << 22)
-
-#define QE_TRANSMIT_DE (0x0 << 30)
-#define QE_CMD_LINKED (0x1 << 30)
-#define QE_CMD_IMM (0x2 << 30)
-#define QE_CMD_IMM_INQ (0x3 << 30)
-
-#define TD_EPI (0x1 << 27)
-#define TD_COMMAND (0x1 << 28)
-
-#define TD_DATA (0x0 << 29)
-#define TD_RM_CELL (0x1 << 29)
-#define TD_OAM_CELL (0x2 << 29)
-#define TD_OAM_CELL_SEGMENT (0x3 << 29)
-
-#define TD_BPI (0x1 << 20)
-
-#define FP_FLAGS_EPI (0x1 << 27)
-
-
-#define TX_PQ(i) (0x00 + (i) * 0x10)
-#define TXB_RQ (0x20)
-#define ST_Q (0x48)
-#define RXB_FP(i) (0x90 + (i) * 0x14)
-#define RXB_RQ(i) (0x134 + (i) * 0x14)
-
-
-#define TXQ_HP 0
-#define TXQ_LP 1
-
-/* Phew. You don't want to know how many revisions these simple queue
- * address macros went through before I got them nice and compact as
- * they are now. -- REW
- */
-
-
-/* And now for something completely different:
- * The rest of the registers... */
-
-
-#define CMDR0 0x34
-#define CMDR1 0x38
-#define CMDR2 0x3c
-#define CMDR3 0x40
-
-
-#define SARMODE0 0x5c
-
-#define SARMODE0_TXVCS_0 (0x0 << 0)
-#define SARMODE0_TXVCS_1k (0x1 << 0)
-#define SARMODE0_TXVCS_2k (0x2 << 0)
-#define SARMODE0_TXVCS_4k (0x3 << 0)
-#define SARMODE0_TXVCS_8k (0x4 << 0)
-#define SARMODE0_TXVCS_16k (0x5 << 0)
-#define SARMODE0_TXVCS_32k (0x6 << 0)
-#define SARMODE0_TXVCS_64k (0x7 << 0)
-#define SARMODE0_TXVCS_32 (0x8 << 0)
-
-#define SARMODE0_ABRVCS_0 (0x0 << 4)
-#define SARMODE0_ABRVCS_512 (0x1 << 4)
-#define SARMODE0_ABRVCS_1k (0x2 << 4)
-#define SARMODE0_ABRVCS_2k (0x3 << 4)
-#define SARMODE0_ABRVCS_4k (0x4 << 4)
-#define SARMODE0_ABRVCS_8k (0x5 << 4)
-#define SARMODE0_ABRVCS_16k (0x6 << 4)
-#define SARMODE0_ABRVCS_32k (0x7 << 4)
-#define SARMODE0_ABRVCS_32 (0x9 << 4) /* The others are "8", this one really has to
- be 9. Tell me you don't believe me. -- REW */
-
-#define SARMODE0_RXVCS_0 (0x0 << 8)
-#define SARMODE0_RXVCS_1k (0x1 << 8)
-#define SARMODE0_RXVCS_2k (0x2 << 8)
-#define SARMODE0_RXVCS_4k (0x3 << 8)
-#define SARMODE0_RXVCS_8k (0x4 << 8)
-#define SARMODE0_RXVCS_16k (0x5 << 8)
-#define SARMODE0_RXVCS_32k (0x6 << 8)
-#define SARMODE0_RXVCS_64k (0x7 << 8)
-#define SARMODE0_RXVCS_32 (0x8 << 8)
-
-#define SARMODE0_CALSUP_1 (0x0 << 12)
-#define SARMODE0_CALSUP_2 (0x1 << 12)
-#define SARMODE0_CALSUP_3 (0x2 << 12)
-#define SARMODE0_CALSUP_4 (0x3 << 12)
-
-#define SARMODE0_PRPWT_FS50_0 (0x0 << 14)
-#define SARMODE0_PRPWT_FS50_2 (0x1 << 14)
-#define SARMODE0_PRPWT_FS50_5 (0x2 << 14)
-#define SARMODE0_PRPWT_FS50_11 (0x3 << 14)
-
-#define SARMODE0_PRPWT_FS155_0 (0x0 << 14)
-#define SARMODE0_PRPWT_FS155_1 (0x1 << 14)
-#define SARMODE0_PRPWT_FS155_2 (0x2 << 14)
-#define SARMODE0_PRPWT_FS155_3 (0x3 << 14)
-
-#define SARMODE0_SRTS0 (0x1 << 23)
-#define SARMODE0_SRTS1 (0x1 << 24)
-
-#define SARMODE0_RUN (0x1 << 25)
-
-#define SARMODE0_UNLOCK (0x1 << 26)
-#define SARMODE0_CWRE (0x1 << 27)
-
-
-#define SARMODE0_INTMODE_READCLEAR (0x0 << 28)
-#define SARMODE0_INTMODE_READNOCLEAR (0x1 << 28)
-#define SARMODE0_INTMODE_READNOCLEARINHIBIT (0x2 << 28)
-#define SARMODE0_INTMODE_READCLEARINHIBIT (0x3 << 28) /* Tell me you don't believe me. */
-
-#define SARMODE0_GINT (0x1 << 30)
-#define SARMODE0_SHADEN (0x1 << 31)
-
-
-#define SARMODE1 0x60
-
-
-#define SARMODE1_TRTL_SHIFT 0 /* Program to 0 */
-#define SARMODE1_RRTL_SHIFT 4 /* Program to 0 */
-
-#define SARMODE1_TAGM (0x1 << 8) /* Program to 0 */
-
-#define SARMODE1_HECM0 (0x1 << 9)
-#define SARMODE1_HECM1 (0x1 << 10)
-#define SARMODE1_HECM2 (0x1 << 11)
-
-#define SARMODE1_GFCE (0x1 << 14)
-#define SARMODE1_GFCR (0x1 << 15)
-#define SARMODE1_PMS (0x1 << 18)
-#define SARMODE1_GPRI (0x1 << 19)
-#define SARMODE1_GPAS (0x1 << 20)
-#define SARMODE1_GVAS (0x1 << 21)
-#define SARMODE1_GNAM (0x1 << 22)
-#define SARMODE1_GPLEN (0x1 << 23)
-#define SARMODE1_DUMPE (0x1 << 24)
-#define SARMODE1_OAMCRC (0x1 << 25)
-#define SARMODE1_DCOAM (0x1 << 26)
-#define SARMODE1_DCRM (0x1 << 27)
-#define SARMODE1_TSTLP (0x1 << 28)
-#define SARMODE1_DEFHEC (0x1 << 29)
-
-
-#define ISR 0x64
-#define IUSR 0x68
-#define IMR 0x6c
-
-#define ISR_LPCO (0x1 << 0)
-#define ISR_DPCO (0x1 << 1)
-#define ISR_RBRQ0_W (0x1 << 2)
-#define ISR_RBRQ1_W (0x1 << 3)
-#define ISR_RBRQ2_W (0x1 << 4)
-#define ISR_RBRQ3_W (0x1 << 5)
-#define ISR_RBRQ0_NF (0x1 << 6)
-#define ISR_RBRQ1_NF (0x1 << 7)
-#define ISR_RBRQ2_NF (0x1 << 8)
-#define ISR_RBRQ3_NF (0x1 << 9)
-#define ISR_BFP_SC (0x1 << 10)
-#define ISR_INIT (0x1 << 11)
-#define ISR_INIT_ERR (0x1 << 12) /* Documented as "reserved" */
-#define ISR_USCEO (0x1 << 13)
-#define ISR_UPEC0 (0x1 << 14)
-#define ISR_VPFCO (0x1 << 15)
-#define ISR_CRCCO (0x1 << 16)
-#define ISR_HECO (0x1 << 17)
-#define ISR_TBRQ_W (0x1 << 18)
-#define ISR_TBRQ_NF (0x1 << 19)
-#define ISR_CTPQ_E (0x1 << 20)
-#define ISR_GFC_C0 (0x1 << 21)
-#define ISR_PCI_FTL (0x1 << 22)
-#define ISR_CSQ_W (0x1 << 23)
-#define ISR_CSQ_NF (0x1 << 24)
-#define ISR_EXT_INT (0x1 << 25)
-#define ISR_RXDMA_S (0x1 << 26)
-
-
-#define TMCONF 0x78
-/* Bits? */
-
-
-#define CALPRESCALE 0x7c
-/* Bits? */
-
-#define CELLOSCONF 0x84
-#define CELLOSCONF_COTS (0x1 << 28)
-#define CELLOSCONF_CEN (0x1 << 27)
-#define CELLOSCONF_SC8 (0x3 << 24)
-#define CELLOSCONF_SC4 (0x2 << 24)
-#define CELLOSCONF_SC2 (0x1 << 24)
-#define CELLOSCONF_SC1 (0x0 << 24)
-
-#define CELLOSCONF_COBS (0x1 << 16)
-#define CELLOSCONF_COPK (0x1 << 8)
-#define CELLOSCONF_COST (0x1 << 0)
-/* Bits? */
-
-#define RAS0 0x1bc
-#define RAS0_DCD_XHLT (0x1 << 31)
-
-#define RAS0_VPSEL (0x1 << 16)
-#define RAS0_VCSEL (0x1 << 0)
-
-#define RAS1 0x1c0
-#define RAS1_UTREG (0x1 << 5)
-
-
-#define DMAMR 0x1cc
-#define DMAMR_TX_MODE_FULL (0x0 << 0)
-#define DMAMR_TX_MODE_PART (0x1 << 0)
-#define DMAMR_TX_MODE_NONE (0x2 << 0) /* And 3 */
-
-
-
-#define RAS2 0x280
-
-#define RAS2_NNI (0x1 << 0)
-#define RAS2_USEL (0x1 << 1)
-#define RAS2_UBS (0x1 << 2)
-
-
-
-struct fs_transmit_config {
- u32 flags;
- u32 atm_hdr;
- u32 TMC[4];
- u32 spec;
- u32 rtag[3];
-};
-
-#define TC_FLAGS_AAL5 (0x0 << 29)
-#define TC_FLAGS_TRANSPARENT_PAYLOAD (0x1 << 29)
-#define TC_FLAGS_TRANSPARENT_CELL (0x2 << 29)
-#define TC_FLAGS_STREAMING (0x1 << 28)
-#define TC_FLAGS_PACKET (0x0)
-#define TC_FLAGS_TYPE_ABR (0x0 << 22)
-#define TC_FLAGS_TYPE_CBR (0x1 << 22)
-#define TC_FLAGS_TYPE_VBR (0x2 << 22)
-#define TC_FLAGS_TYPE_UBR (0x3 << 22)
-#define TC_FLAGS_CAL0 (0x0 << 20)
-#define TC_FLAGS_CAL1 (0x1 << 20)
-#define TC_FLAGS_CAL2 (0x2 << 20)
-#define TC_FLAGS_CAL3 (0x3 << 20)
-
-
-#define RC_FLAGS_NAM (0x1 << 13)
-#define RC_FLAGS_RXBM_PSB (0x0 << 14)
-#define RC_FLAGS_RXBM_CIF (0x1 << 14)
-#define RC_FLAGS_RXBM_PMB (0x2 << 14)
-#define RC_FLAGS_RXBM_STR (0x4 << 14)
-#define RC_FLAGS_RXBM_SAF (0x6 << 14)
-#define RC_FLAGS_RXBM_POS (0x6 << 14)
-#define RC_FLAGS_BFPS (0x1 << 17)
-
-#define RC_FLAGS_BFPS_BFP (0x1 << 17)
-
-#define RC_FLAGS_BFPS_BFP0 (0x0 << 17)
-#define RC_FLAGS_BFPS_BFP1 (0x1 << 17)
-#define RC_FLAGS_BFPS_BFP2 (0x2 << 17)
-#define RC_FLAGS_BFPS_BFP3 (0x3 << 17)
-#define RC_FLAGS_BFPS_BFP4 (0x4 << 17)
-#define RC_FLAGS_BFPS_BFP5 (0x5 << 17)
-#define RC_FLAGS_BFPS_BFP6 (0x6 << 17)
-#define RC_FLAGS_BFPS_BFP7 (0x7 << 17)
-#define RC_FLAGS_BFPS_BFP01 (0x8 << 17)
-#define RC_FLAGS_BFPS_BFP23 (0x9 << 17)
-#define RC_FLAGS_BFPS_BFP45 (0xa << 17)
-#define RC_FLAGS_BFPS_BFP67 (0xb << 17)
-#define RC_FLAGS_BFPS_BFP07 (0xc << 17)
-#define RC_FLAGS_BFPS_BFP27 (0xd << 17)
-#define RC_FLAGS_BFPS_BFP47 (0xe << 17)
-
-#define RC_FLAGS_BFPP (0x1 << 21)
-#define RC_FLAGS_TEVC (0x1 << 22)
-#define RC_FLAGS_TEP (0x1 << 23)
-#define RC_FLAGS_AAL5 (0x0 << 24)
-#define RC_FLAGS_TRANSP (0x1 << 24)
-#define RC_FLAGS_TRANSC (0x2 << 24)
-#define RC_FLAGS_ML (0x1 << 27)
-#define RC_FLAGS_TRBRM (0x1 << 28)
-#define RC_FLAGS_PRI (0x1 << 29)
-#define RC_FLAGS_HOAM (0x1 << 30)
-#define RC_FLAGS_CRC10 (0x1 << 31)
-
-
-#define RAC 0x1c8
-#define RAM 0x1c4
-
-
-
-/************************************************************************
- * Then the datastructures that the DRIVER uses. *
- ************************************************************************/
-
-#define TXQ_NENTRIES 32
-#define RXRQ_NENTRIES 1024
-
-
-struct fs_vcc {
- int channo;
- wait_queue_head_t close_wait;
- struct sk_buff *last_skb;
-};
-
-
-struct queue {
- struct FS_QENTRY *sa, *ea;
- int offset;
-};
-
-struct freepool {
- int offset;
- int bufsize;
- int nr_buffers;
- int n;
-};
-
-
-struct fs_dev {
- struct fs_dev *next; /* other FS devices */
- int flags;
-
- unsigned char irq; /* IRQ */
- struct pci_dev *pci_dev; /* PCI stuff */
- struct atm_dev *atm_dev;
- struct timer_list timer;
-
- unsigned long hw_base; /* mem base address */
- void __iomem *base; /* Mapping of base address */
- int channo;
- unsigned long channel_mask;
-
- struct queue hp_txq, lp_txq, tx_relq, st_q;
- struct freepool rx_fp[FS_NR_FREE_POOLS];
- struct queue rx_rq[FS_NR_RX_QUEUES];
-
- int nchannels;
- struct atm_vcc **atm_vccs;
- void *tx_inuse;
- int ntxpckts;
-};
-
-
-
-
-/* Number of channesl that the FS50 supports. */
-#define FS50_CHANNEL_BITS 5
-#define FS50_NR_CHANNELS (1 << FS50_CHANNEL_BITS)
-
-
-#define FS_DEV(atm_dev) ((struct fs_dev *) (atm_dev)->dev_data)
-#define FS_VCC(atm_vcc) ((struct fs_vcc *) (atm_vcc)->dev_data)
-
-
-#define FS_IS50 0x1
-#define FS_IS155 0x2
-
-#define IS_FS50(dev) (dev->flags & FS_IS50)
-#define IS_FS155(dev) (dev->flags & FS_IS155)
-
-/* Within limits this is user-configurable. */
-/* Note: Currently the sum (10 -> 1k channels) is hardcoded in the driver. */
-#define FS155_VPI_BITS 4
-#define FS155_VCI_BITS 6
-
-#define FS155_CHANNEL_BITS (FS155_VPI_BITS + FS155_VCI_BITS)
-#define FS155_NR_CHANNELS (1 << FS155_CHANNEL_BITS)
diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
deleted file mode 100644
index d0e67ec46216..000000000000
--- a/drivers/atm/horizon.c
+++ /dev/null
@@ -1,2853 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- Madge Horizon ATM Adapter driver.
- Copyright (C) 1995-1999 Madge Networks Ltd.
-
-*/
-
-/*
- IMPORTANT NOTE: Madge Networks no longer makes the adapters
- supported by this driver and makes no commitment to maintain it.
-*/
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched/signal.h>
-#include <linux/mm.h>
-#include <linux/pci.h>
-#include <linux/errno.h>
-#include <linux/atm.h>
-#include <linux/atmdev.h>
-#include <linux/sonet.h>
-#include <linux/skbuff.h>
-#include <linux/time.h>
-#include <linux/delay.h>
-#include <linux/uio.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/wait.h>
-#include <linux/slab.h>
-
-#include <asm/io.h>
-#include <linux/atomic.h>
-#include <linux/uaccess.h>
-#include <asm/string.h>
-#include <asm/byteorder.h>
-
-#include "horizon.h"
-
-#define maintainer_string "Giuliano Procida at Madge Networks <gprocida@madge.com>"
-#define description_string "Madge ATM Horizon [Ultra] driver"
-#define version_string "1.2.1"
-
-static inline void __init show_version (void) {
- printk ("%s version %s\n", description_string, version_string);
-}
-
-/*
-
- CREDITS
-
- Driver and documentation by:
-
- Chris Aston Madge Networks
- Giuliano Procida Madge Networks
- Simon Benham Madge Networks
- Simon Johnson Madge Networks
- Various Others Madge Networks
-
- Some inspiration taken from other drivers by:
-
- Alexandru Cucos UTBv
- Kari Mettinen University of Helsinki
- Werner Almesberger EPFL LRC
-
- Theory of Operation
-
- I Hardware, detection, initialisation and shutdown.
-
- 1. Supported Hardware
-
- This driver should handle all variants of the PCI Madge ATM adapters
- with the Horizon chipset. These are all PCI cards supporting PIO, BM
- DMA and a form of MMIO (registers only, not internal RAM).
-
- The driver is only known to work with SONET and UTP Horizon Ultra
- cards at 155Mb/s. However, code is in place to deal with both the
- original Horizon and 25Mb/s operation.
-
- There are two revisions of the Horizon ASIC: the original and the
- Ultra. Details of hardware bugs are in section III.
-
- The ASIC version can be distinguished by chip markings but is NOT
- indicated by the PCI revision (all adapters seem to have PCI rev 1).
-
- I believe that:
-
- Horizon => Collage 25 PCI Adapter (UTP and STP)
- Horizon Ultra => Collage 155 PCI Client (UTP or SONET)
- Ambassador x => Collage 155 PCI Server (completely different)
-
- Horizon (25Mb/s) is fitted with UTP and STP connectors. It seems to
- have a Madge B154 plus glue logic serializer. I have also found a
- really ancient version of this with slightly different glue. It
- comes with the revision 0 (140-025-01) ASIC.
-
- Horizon Ultra (155Mb/s) is fitted with either a Pulse Medialink
- output (UTP) or an HP HFBR 5205 output (SONET). It has either
- Madge's SAMBA framer or a SUNI-lite device (early versions). It
- comes with the revision 1 (140-027-01) ASIC.
-
- 2. Detection
-
- All Horizon-based cards present with the same PCI Vendor and Device
- IDs. The standard Linux 2.2 PCI API is used to locate any cards and
- to enable bus-mastering (with appropriate latency).
-
- ATM_LAYER_STATUS in the control register distinguishes between the
- two possible physical layers (25 and 155). It is not clear whether
- the 155 cards can also operate at 25Mbps. We rely on the fact that a
- card operates at 155 if and only if it has the newer Horizon Ultra
- ASIC.
-
- For 155 cards the two possible framers are probed for and then set
- up for loop-timing.
-
- 3. Initialisation
-
- The card is reset and then put into a known state. The physical
- layer is configured for normal operation at the appropriate speed;
- in the case of the 155 cards, the framer is initialised with
- line-based timing; the internal RAM is zeroed and the allocation of
- buffers for RX and TX is made; the Burnt In Address is read and
- copied to the ATM ESI; various policy settings for RX (VPI bits,
- unknown VCs, oam cells) are made. Ideally all policy items should be
- configurable at module load (if not actually on-demand), however,
- only the vpi vs vci bit allocation can be specified at insmod.
-
- 4. Shutdown
-
- This is in response to module_cleaup. No VCs are in use and the card
- should be idle; it is reset.
-
- II Driver software (as it should be)
-
- 0. Traffic Parameters
-
- The traffic classes (not an enumeration) are currently: ATM_NONE (no
- traffic), ATM_UBR, ATM_CBR, ATM_VBR and ATM_ABR, ATM_ANYCLASS
- (compatible with everything). Together with (perhaps only some of)
- the following items they make up the traffic specification.
-
- struct atm_trafprm {
- unsigned char traffic_class; traffic class (ATM_UBR, ...)
- int max_pcr; maximum PCR in cells per second
- int pcr; desired PCR in cells per second
- int min_pcr; minimum PCR in cells per second
- int max_cdv; maximum CDV in microseconds
- int max_sdu; maximum SDU in bytes
- };
-
- Note that these denote bandwidth available not bandwidth used; the
- possibilities according to ATMF are:
-
- Real Time (cdv and max CDT given)
-
- CBR(pcr) pcr bandwidth always available
- rtVBR(pcr,scr,mbs) scr bandwidth always available, up to pcr at mbs too
-
- Non Real Time
-
- nrtVBR(pcr,scr,mbs) scr bandwidth always available, up to pcr at mbs too
- UBR()
- ABR(mcr,pcr) mcr bandwidth always available, up to pcr (depending) too
-
- mbs is max burst size (bucket)
- pcr and scr have associated cdvt values
- mcr is like scr but has no cdtv
- cdtv may differ at each hop
-
- Some of the above items are qos items (as opposed to traffic
- parameters). We have nothing to do with qos. All except ABR can have
- their traffic parameters converted to GCRA parameters. The GCRA may
- be implemented as a (real-number) leaky bucket. The GCRA can be used
- in complicated ways by switches and in simpler ways by end-stations.
- It can be used both to filter incoming cells and shape out-going
- cells.
-
- ATM Linux actually supports:
-
- ATM_NONE() (no traffic in this direction)
- ATM_UBR(max_frame_size)
- ATM_CBR(max/min_pcr, max_cdv, max_frame_size)
-
- 0 or ATM_MAX_PCR are used to indicate maximum available PCR
-
- A traffic specification consists of the AAL type and separate
- traffic specifications for either direction. In ATM Linux it is:
-
- struct atm_qos {
- struct atm_trafprm txtp;
- struct atm_trafprm rxtp;
- unsigned char aal;
- };
-
- AAL types are:
-
- ATM_NO_AAL AAL not specified
- ATM_AAL0 "raw" ATM cells
- ATM_AAL1 AAL1 (CBR)
- ATM_AAL2 AAL2 (VBR)
- ATM_AAL34 AAL3/4 (data)
- ATM_AAL5 AAL5 (data)
- ATM_SAAL signaling AAL
-
- The Horizon has support for AAL frame types: 0, 3/4 and 5. However,
- it does not implement AAL 3/4 SAR and it has a different notion of
- "raw cell" to ATM Linux's (48 bytes vs. 52 bytes) so neither are
- supported by this driver.
-
- The Horizon has limited support for ABR (including UBR), VBR and
- CBR. Each TX channel has a bucket (containing up to 31 cell units)
- and two timers (PCR and SCR) associated with it that can be used to
- govern cell emissions and host notification (in the case of ABR this
- is presumably so that RM cells may be emitted at appropriate times).
- The timers may either be disabled or may be set to any of 240 values
- (determined by the clock crystal, a fixed (?) per-device divider, a
- configurable divider and a configurable timer preload value).
-
- At the moment only UBR and CBR are supported by the driver. VBR will
- be supported as soon as ATM for Linux supports it. ABR support is
- very unlikely as RM cell handling is completely up to the driver.
-
- 1. TX (TX channel setup and TX transfer)
-
- The TX half of the driver owns the TX Horizon registers. The TX
- component in the IRQ handler is the BM completion handler. This can
- only be entered when tx_busy is true (enforced by hardware). The
- other TX component can only be entered when tx_busy is false
- (enforced by driver). So TX is single-threaded.
-
- Apart from a minor optimisation to not re-select the last channel,
- the TX send component works as follows:
-
- Atomic test and set tx_busy until we succeed; we should implement
- some sort of timeout so that tx_busy will never be stuck at true.
-
- If no TX channel is set up for this VC we wait for an idle one (if
- necessary) and set it up.
-
- At this point we have a TX channel ready for use. We wait for enough
- buffers to become available then start a TX transmit (set the TX
- descriptor, schedule transfer, exit).
-
- The IRQ component handles TX completion (stats, free buffer, tx_busy
- unset, exit). We also re-schedule further transfers for the same
- frame if needed.
-
- TX setup in more detail:
-
- TX open is a nop, the relevant information is held in the hrz_vcc
- (vcc->dev_data) structure and is "cached" on the card.
-
- TX close gets the TX lock and clears the channel from the "cache".
-
- 2. RX (Data Available and RX transfer)
-
- The RX half of the driver owns the RX registers. There are two RX
- components in the IRQ handler: the data available handler deals with
- fresh data that has arrived on the card, the BM completion handler
- is very similar to the TX completion handler. The data available
- handler grabs the rx_lock and it is only released once the data has
- been discarded or completely transferred to the host. The BM
- completion handler only runs when the lock is held; the data
- available handler is locked out over the same period.
-
- Data available on the card triggers an interrupt. If the data is not
- suitable for our existing RX channels or we cannot allocate a buffer
- it is flushed. Otherwise an RX receive is scheduled. Multiple RX
- transfers may be scheduled for the same frame.
-
- RX setup in more detail:
-
- RX open...
- RX close...
-
- III Hardware Bugs
-
- 0. Byte vs Word addressing of adapter RAM.
-
- A design feature; see the .h file (especially the memory map).
-
- 1. Bus Master Data Transfers (original Horizon only, fixed in Ultra)
-
- The host must not start a transmit direction transfer at a
- non-four-byte boundary in host memory. Instead the host should
- perform a byte, or a two byte, or one byte followed by two byte
- transfer in order to start the rest of the transfer on a four byte
- boundary. RX is OK.
-
- Simultaneous transmit and receive direction bus master transfers are
- not allowed.
-
- The simplest solution to these two is to always do PIO (never DMA)
- in the TX direction on the original Horizon. More complicated
- solutions are likely to hurt my brain.
-
- 2. Loss of buffer on close VC
-
- When a VC is being closed, the buffer associated with it is not
- returned to the pool. The host must store the reference to this
- buffer and when opening a new VC then give it to that new VC.
-
- The host intervention currently consists of stacking such a buffer
- pointer at VC close and checking the stack at VC open.
-
- 3. Failure to close a VC
-
- If a VC is currently receiving a frame then closing the VC may fail
- and the frame continues to be received.
-
- The solution is to make sure any received frames are flushed when
- ready. This is currently done just before the solution to 2.
-
- 4. PCI bus (original Horizon only, fixed in Ultra)
-
- Reading from the data port prior to initialisation will hang the PCI
- bus. Just don't do that then! We don't.
-
- IV To Do List
-
- . Timer code may be broken.
-
- . Allow users to specify buffer allocation split for TX and RX.
-
- . Deal once and for all with buggy VC close.
-
- . Handle interrupted and/or non-blocking operations.
-
- . Change some macros to functions and move from .h to .c.
-
- . Try to limit the number of TX frames each VC may have queued, in
- order to reduce the chances of TX buffer exhaustion.
-
- . Implement VBR (bucket and timers not understood) and ABR (need to
- do RM cells manually); also no Linux support for either.
-
- . Implement QoS changes on open VCs (involves extracting parts of VC open
- and close into separate functions and using them to make changes).
-
-*/
-
-/********** globals **********/
-
-static void do_housekeeping (struct timer_list *t);
-
-static unsigned short debug = 0;
-static unsigned short vpi_bits = 0;
-static int max_tx_size = 9000;
-static int max_rx_size = 9000;
-static unsigned char pci_lat = 0;
-
-/********** access functions **********/
-
-/* Read / Write Horizon registers */
-static inline void wr_regl (const hrz_dev * dev, unsigned char reg, u32 data) {
- outl (cpu_to_le32 (data), dev->iobase + reg);
-}
-
-static inline u32 rd_regl (const hrz_dev * dev, unsigned char reg) {
- return le32_to_cpu (inl (dev->iobase + reg));
-}
-
-static inline void wr_regw (const hrz_dev * dev, unsigned char reg, u16 data) {
- outw (cpu_to_le16 (data), dev->iobase + reg);
-}
-
-static inline u16 rd_regw (const hrz_dev * dev, unsigned char reg) {
- return le16_to_cpu (inw (dev->iobase + reg));
-}
-
-static inline void wrs_regb (const hrz_dev * dev, unsigned char reg, void * addr, u32 len) {
- outsb (dev->iobase + reg, addr, len);
-}
-
-static inline void rds_regb (const hrz_dev * dev, unsigned char reg, void * addr, u32 len) {
- insb (dev->iobase + reg, addr, len);
-}
-
-/* Read / Write to a given address in Horizon buffer memory.
- Interrupts must be disabled between the address register and data
- port accesses as these must form an atomic operation. */
-static inline void wr_mem (const hrz_dev * dev, HDW * addr, u32 data) {
- // wr_regl (dev, MEM_WR_ADDR_REG_OFF, (u32) addr);
- wr_regl (dev, MEM_WR_ADDR_REG_OFF, (addr - (HDW *) 0) * sizeof(HDW));
- wr_regl (dev, MEMORY_PORT_OFF, data);
-}
-
-static inline u32 rd_mem (const hrz_dev * dev, HDW * addr) {
- // wr_regl (dev, MEM_RD_ADDR_REG_OFF, (u32) addr);
- wr_regl (dev, MEM_RD_ADDR_REG_OFF, (addr - (HDW *) 0) * sizeof(HDW));
- return rd_regl (dev, MEMORY_PORT_OFF);
-}
-
-static inline void wr_framer (const hrz_dev * dev, u32 addr, u32 data) {
- wr_regl (dev, MEM_WR_ADDR_REG_OFF, (u32) addr | 0x80000000);
- wr_regl (dev, MEMORY_PORT_OFF, data);
-}
-
-static inline u32 rd_framer (const hrz_dev * dev, u32 addr) {
- wr_regl (dev, MEM_RD_ADDR_REG_OFF, (u32) addr | 0x80000000);
- return rd_regl (dev, MEMORY_PORT_OFF);
-}
-
-/********** specialised access functions **********/
-
-/* RX */
-
-static inline void FLUSH_RX_CHANNEL (hrz_dev * dev, u16 channel) {
- wr_regw (dev, RX_CHANNEL_PORT_OFF, FLUSH_CHANNEL | channel);
- return;
-}
-
-static void WAIT_FLUSH_RX_COMPLETE (hrz_dev * dev) {
- while (rd_regw (dev, RX_CHANNEL_PORT_OFF) & FLUSH_CHANNEL)
- ;
- return;
-}
-
-static inline void SELECT_RX_CHANNEL (hrz_dev * dev, u16 channel) {
- wr_regw (dev, RX_CHANNEL_PORT_OFF, channel);
- return;
-}
-
-static void WAIT_UPDATE_COMPLETE (hrz_dev * dev) {
- while (rd_regw (dev, RX_CHANNEL_PORT_OFF) & RX_CHANNEL_UPDATE_IN_PROGRESS)
- ;
- return;
-}
-
-/* TX */
-
-static inline void SELECT_TX_CHANNEL (hrz_dev * dev, u16 tx_channel) {
- wr_regl (dev, TX_CHANNEL_PORT_OFF, tx_channel);
- return;
-}
-
-/* Update or query one configuration parameter of a particular channel. */
-
-static inline void update_tx_channel_config (hrz_dev * dev, short chan, u8 mode, u16 value) {
- wr_regw (dev, TX_CHANNEL_CONFIG_COMMAND_OFF,
- chan * TX_CHANNEL_CONFIG_MULT | mode);
- wr_regw (dev, TX_CHANNEL_CONFIG_DATA_OFF, value);
- return;
-}
-
-/********** dump functions **********/
-
-static inline void dump_skb (char * prefix, unsigned int vc, struct sk_buff * skb) {
-#ifdef DEBUG_HORIZON
- unsigned int i;
- unsigned char * data = skb->data;
- PRINTDB (DBG_DATA, "%s(%u) ", prefix, vc);
- for (i=0; i<skb->len && i < 256;i++)
- PRINTDM (DBG_DATA, "%02x ", data[i]);
- PRINTDE (DBG_DATA,"");
-#else
- (void) prefix;
- (void) vc;
- (void) skb;
-#endif
- return;
-}
-
-static inline void dump_regs (hrz_dev * dev) {
-#ifdef DEBUG_HORIZON
- PRINTD (DBG_REGS, "CONTROL 0: %#x", rd_regl (dev, CONTROL_0_REG));
- PRINTD (DBG_REGS, "RX CONFIG: %#x", rd_regw (dev, RX_CONFIG_OFF));
- PRINTD (DBG_REGS, "TX CONFIG: %#x", rd_regw (dev, TX_CONFIG_OFF));
- PRINTD (DBG_REGS, "TX STATUS: %#x", rd_regw (dev, TX_STATUS_OFF));
- PRINTD (DBG_REGS, "IRQ ENBLE: %#x", rd_regl (dev, INT_ENABLE_REG_OFF));
- PRINTD (DBG_REGS, "IRQ SORCE: %#x", rd_regl (dev, INT_SOURCE_REG_OFF));
-#else
- (void) dev;
-#endif
- return;
-}
-
-static inline void dump_framer (hrz_dev * dev) {
-#ifdef DEBUG_HORIZON
- unsigned int i;
- PRINTDB (DBG_REGS, "framer registers:");
- for (i = 0; i < 0x10; ++i)
- PRINTDM (DBG_REGS, " %02x", rd_framer (dev, i));
- PRINTDE (DBG_REGS,"");
-#else
- (void) dev;
-#endif
- return;
-}
-
-/********** VPI/VCI <-> (RX) channel conversions **********/
-
-/* RX channels are 10 bit integers, these fns are quite paranoid */
-
-static inline int vpivci_to_channel (u16 * channel, const short vpi, const int vci) {
- unsigned short vci_bits = 10 - vpi_bits;
- if (0 <= vpi && vpi < 1<<vpi_bits && 0 <= vci && vci < 1<<vci_bits) {
- *channel = vpi<<vci_bits | vci;
- return *channel ? 0 : -EINVAL;
- }
- return -EINVAL;
-}
-
-/********** decode RX queue entries **********/
-
-static inline u16 rx_q_entry_to_length (u32 x) {
- return x & RX_Q_ENTRY_LENGTH_MASK;
-}
-
-static inline u16 rx_q_entry_to_rx_channel (u32 x) {
- return (x>>RX_Q_ENTRY_CHANNEL_SHIFT) & RX_CHANNEL_MASK;
-}
-
-/* Cell Transmit Rate Values
- *
- * the cell transmit rate (cells per sec) can be set to a variety of
- * different values by specifying two parameters: a timer preload from
- * 1 to 16 (stored as 0 to 15) and a clock divider (2 to the power of
- * an exponent from 0 to 14; the special value 15 disables the timer).
- *
- * cellrate = baserate / (preload * 2^divider)
- *
- * The maximum cell rate that can be specified is therefore just the
- * base rate. Halving the preload is equivalent to adding 1 to the
- * divider and so values 1 to 8 of the preload are redundant except
- * in the case of a maximal divider (14).
- *
- * Given a desired cell rate, an algorithm to determine the preload
- * and divider is:
- *
- * a) x = baserate / cellrate, want p * 2^d = x (as far as possible)
- * b) if x > 16 * 2^14 then set p = 16, d = 14 (min rate), done
- * if x <= 16 then set p = x, d = 0 (high rates), done
- * c) now have 16 < x <= 2^18, or 1 < x/16 <= 2^14 and we want to
- * know n such that 2^(n-1) < x/16 <= 2^n, so slide a bit until
- * we find the range (n will be between 1 and 14), set d = n
- * d) Also have 8 < x/2^n <= 16, so set p nearest x/2^n
- *
- * The algorithm used below is a minor variant of the above.
- *
- * The base rate is derived from the oscillator frequency (Hz) using a
- * fixed divider:
- *
- * baserate = freq / 32 in the case of some Unknown Card
- * baserate = freq / 8 in the case of the Horizon 25
- * baserate = freq / 8 in the case of the Horizon Ultra 155
- *
- * The Horizon cards have oscillators and base rates as follows:
- *
- * Card Oscillator Base Rate
- * Unknown Card 33 MHz 1.03125 MHz (33 MHz = PCI freq)
- * Horizon 25 32 MHz 4 MHz
- * Horizon Ultra 155 40 MHz 5 MHz
- *
- * The following defines give the base rates in Hz. These were
- * previously a factor of 100 larger, no doubt someone was using
- * cps*100.
- */
-
-#define BR_UKN 1031250l
-#define BR_HRZ 4000000l
-#define BR_ULT 5000000l
-
-// d is an exponent
-#define CR_MIND 0
-#define CR_MAXD 14
-
-// p ranges from 1 to a power of 2
-#define CR_MAXPEXP 4
-
-static int make_rate (const hrz_dev * dev, u32 c, rounding r,
- u16 * bits, unsigned int * actual)
-{
- // note: rounding the rate down means rounding 'p' up
- const unsigned long br = test_bit(ultra, &dev->flags) ? BR_ULT : BR_HRZ;
-
- u32 div = CR_MIND;
- u32 pre;
-
- // br_exp and br_man are used to avoid overflowing (c*maxp*2^d) in
- // the tests below. We could think harder about exact possibilities
- // of failure...
-
- unsigned long br_man = br;
- unsigned int br_exp = 0;
-
- PRINTD (DBG_QOS|DBG_FLOW, "make_rate b=%lu, c=%u, %s", br, c,
- r == round_up ? "up" : r == round_down ? "down" : "nearest");
-
- // avoid div by zero
- if (!c) {
- PRINTD (DBG_QOS|DBG_ERR, "zero rate is not allowed!");
- return -EINVAL;
- }
-
- while (br_exp < CR_MAXPEXP + CR_MIND && (br_man % 2 == 0)) {
- br_man = br_man >> 1;
- ++br_exp;
- }
- // (br >>br_exp) <<br_exp == br and
- // br_exp <= CR_MAXPEXP+CR_MIND
-
- if (br_man <= (c << (CR_MAXPEXP+CR_MIND-br_exp))) {
- // Equivalent to: B <= (c << (MAXPEXP+MIND))
- // take care of rounding
- switch (r) {
- case round_down:
- pre = DIV_ROUND_UP(br, c<<div);
- // but p must be non-zero
- if (!pre)
- pre = 1;
- break;
- case round_nearest:
- pre = DIV_ROUND_CLOSEST(br, c<<div);
- // but p must be non-zero
- if (!pre)
- pre = 1;
- break;
- default: /* round_up */
- pre = br/(c<<div);
- // but p must be non-zero
- if (!pre)
- return -EINVAL;
- }
- PRINTD (DBG_QOS, "A: p=%u, d=%u", pre, div);
- goto got_it;
- }
-
- // at this point we have
- // d == MIND and (c << (MAXPEXP+MIND)) < B
- while (div < CR_MAXD) {
- div++;
- if (br_man <= (c << (CR_MAXPEXP+div-br_exp))) {
- // Equivalent to: B <= (c << (MAXPEXP+d))
- // c << (MAXPEXP+d-1) < B <= c << (MAXPEXP+d)
- // 1 << (MAXPEXP-1) < B/2^d/c <= 1 << MAXPEXP
- // MAXP/2 < B/c2^d <= MAXP
- // take care of rounding
- switch (r) {
- case round_down:
- pre = DIV_ROUND_UP(br, c<<div);
- break;
- case round_nearest:
- pre = DIV_ROUND_CLOSEST(br, c<<div);
- break;
- default: /* round_up */
- pre = br/(c<<div);
- }
- PRINTD (DBG_QOS, "B: p=%u, d=%u", pre, div);
- goto got_it;
- }
- }
- // at this point we have
- // d == MAXD and (c << (MAXPEXP+MAXD)) < B
- // but we cannot go any higher
- // take care of rounding
- if (r == round_down)
- return -EINVAL;
- pre = 1 << CR_MAXPEXP;
- PRINTD (DBG_QOS, "C: p=%u, d=%u", pre, div);
-got_it:
- // paranoia
- if (div > CR_MAXD || (!pre) || pre > 1<<CR_MAXPEXP) {
- PRINTD (DBG_QOS, "set_cr internal failure: d=%u p=%u",
- div, pre);
- return -EINVAL;
- } else {
- if (bits)
- *bits = (div<<CLOCK_SELECT_SHIFT) | (pre-1);
- if (actual) {
- *actual = DIV_ROUND_UP(br, pre<<div);
- PRINTD (DBG_QOS, "actual rate: %u", *actual);
- }
- return 0;
- }
-}
-
-static int make_rate_with_tolerance (const hrz_dev * dev, u32 c, rounding r, unsigned int tol,
- u16 * bit_pattern, unsigned int * actual) {
- unsigned int my_actual;
-
- PRINTD (DBG_QOS|DBG_FLOW, "make_rate_with_tolerance c=%u, %s, tol=%u",
- c, (r == round_up) ? "up" : (r == round_down) ? "down" : "nearest", tol);
-
- if (!actual)
- // actual rate is not returned
- actual = &my_actual;
-
- if (make_rate (dev, c, round_nearest, bit_pattern, actual))
- // should never happen as round_nearest always succeeds
- return -1;
-
- if (c - tol <= *actual && *actual <= c + tol)
- // within tolerance
- return 0;
- else
- // intolerant, try rounding instead
- return make_rate (dev, c, r, bit_pattern, actual);
-}
-
-/********** Listen on a VC **********/
-
-static int hrz_open_rx (hrz_dev * dev, u16 channel) {
- // is there any guarantee that we don't get two simulataneous
- // identical calls of this function from different processes? yes
- // rate_lock
- unsigned long flags;
- u32 channel_type; // u16?
-
- u16 buf_ptr = RX_CHANNEL_IDLE;
-
- rx_ch_desc * rx_desc = &memmap->rx_descs[channel];
-
- PRINTD (DBG_FLOW, "hrz_open_rx %x", channel);
-
- spin_lock_irqsave (&dev->mem_lock, flags);
- channel_type = rd_mem (dev, &rx_desc->wr_buf_type) & BUFFER_PTR_MASK;
- spin_unlock_irqrestore (&dev->mem_lock, flags);
-
- // very serious error, should never occur
- if (channel_type != RX_CHANNEL_DISABLED) {
- PRINTD (DBG_ERR|DBG_VCC, "RX channel for VC already open");
- return -EBUSY; // clean up?
- }
-
- // Give back spare buffer
- if (dev->noof_spare_buffers) {
- buf_ptr = dev->spare_buffers[--dev->noof_spare_buffers];
- PRINTD (DBG_VCC, "using a spare buffer: %u", buf_ptr);
- // should never occur
- if (buf_ptr == RX_CHANNEL_DISABLED || buf_ptr == RX_CHANNEL_IDLE) {
- // but easy to recover from
- PRINTD (DBG_ERR|DBG_VCC, "bad spare buffer pointer, using IDLE");
- buf_ptr = RX_CHANNEL_IDLE;
- }
- } else {
- PRINTD (DBG_VCC, "using IDLE buffer pointer");
- }
-
- // Channel is currently disabled so change its status to idle
-
- // do we really need to save the flags again?
- spin_lock_irqsave (&dev->mem_lock, flags);
-
- wr_mem (dev, &rx_desc->wr_buf_type,
- buf_ptr | CHANNEL_TYPE_AAL5 | FIRST_CELL_OF_AAL5_FRAME);
- if (buf_ptr != RX_CHANNEL_IDLE)
- wr_mem (dev, &rx_desc->rd_buf_type, buf_ptr);
-
- spin_unlock_irqrestore (&dev->mem_lock, flags);
-
- // rxer->rate = make_rate (qos->peak_cells);
-
- PRINTD (DBG_FLOW, "hrz_open_rx ok");
-
- return 0;
-}
-
-#if 0
-/********** change vc rate for a given vc **********/
-
-static void hrz_change_vc_qos (ATM_RXER * rxer, MAAL_QOS * qos) {
- rxer->rate = make_rate (qos->peak_cells);
-}
-#endif
-
-/********** free an skb (as per ATM device driver documentation) **********/
-
-static void hrz_kfree_skb (struct sk_buff * skb) {
- if (ATM_SKB(skb)->vcc->pop) {
- ATM_SKB(skb)->vcc->pop (ATM_SKB(skb)->vcc, skb);
- } else {
- dev_kfree_skb_any (skb);
- }
-}
-
-/********** cancel listen on a VC **********/
-
-static void hrz_close_rx (hrz_dev * dev, u16 vc) {
- unsigned long flags;
-
- u32 value;
-
- u32 r1, r2;
-
- rx_ch_desc * rx_desc = &memmap->rx_descs[vc];
-
- int was_idle = 0;
-
- spin_lock_irqsave (&dev->mem_lock, flags);
- value = rd_mem (dev, &rx_desc->wr_buf_type) & BUFFER_PTR_MASK;
- spin_unlock_irqrestore (&dev->mem_lock, flags);
-
- if (value == RX_CHANNEL_DISABLED) {
- // I suppose this could happen once we deal with _NONE traffic properly
- PRINTD (DBG_VCC, "closing VC: RX channel %u already disabled", vc);
- return;
- }
- if (value == RX_CHANNEL_IDLE)
- was_idle = 1;
-
- spin_lock_irqsave (&dev->mem_lock, flags);
-
- for (;;) {
- wr_mem (dev, &rx_desc->wr_buf_type, RX_CHANNEL_DISABLED);
-
- if ((rd_mem (dev, &rx_desc->wr_buf_type) & BUFFER_PTR_MASK) == RX_CHANNEL_DISABLED)
- break;
-
- was_idle = 0;
- }
-
- if (was_idle) {
- spin_unlock_irqrestore (&dev->mem_lock, flags);
- return;
- }
-
- WAIT_FLUSH_RX_COMPLETE(dev);
-
- // XXX Is this all really necessary? We can rely on the rx_data_av
- // handler to discard frames that remain queued for delivery. If the
- // worry is that immediately reopening the channel (perhaps by a
- // different process) may cause some data to be mis-delivered then
- // there may still be a simpler solution (such as busy-waiting on
- // rx_busy once the channel is disabled or before a new one is
- // opened - does this leave any holes?). Arguably setting up and
- // tearing down the TX and RX halves of each virtual circuit could
- // most safely be done within ?x_busy protected regions.
-
- // OK, current changes are that Simon's marker is disabled and we DO
- // look for NULL rxer elsewhere. The code here seems flush frames
- // and then remember the last dead cell belonging to the channel
- // just disabled - the cell gets relinked at the next vc_open.
- // However, when all VCs are closed or only a few opened there are a
- // handful of buffers that are unusable.
-
- // Does anyone feel like documenting spare_buffers properly?
- // Does anyone feel like fixing this in a nicer way?
-
- // Flush any data which is left in the channel
- for (;;) {
- // Change the rx channel port to something different to the RX
- // channel we are trying to close to force Horizon to flush the rx
- // channel read and write pointers.
-
- u16 other = vc^(RX_CHANS/2);
-
- SELECT_RX_CHANNEL (dev, other);
- WAIT_UPDATE_COMPLETE (dev);
-
- r1 = rd_mem (dev, &rx_desc->rd_buf_type);
-
- // Select this RX channel. Flush doesn't seem to work unless we
- // select an RX channel before hand
-
- SELECT_RX_CHANNEL (dev, vc);
- WAIT_UPDATE_COMPLETE (dev);
-
- // Attempt to flush a frame on this RX channel
-
- FLUSH_RX_CHANNEL (dev, vc);
- WAIT_FLUSH_RX_COMPLETE (dev);
-
- // Force Horizon to flush rx channel read and write pointers as before
-
- SELECT_RX_CHANNEL (dev, other);
- WAIT_UPDATE_COMPLETE (dev);
-
- r2 = rd_mem (dev, &rx_desc->rd_buf_type);
-
- PRINTD (DBG_VCC|DBG_RX, "r1 = %u, r2 = %u", r1, r2);
-
- if (r1 == r2) {
- dev->spare_buffers[dev->noof_spare_buffers++] = (u16)r1;
- break;
- }
- }
-
-#if 0
- {
- rx_q_entry * wr_ptr = &memmap->rx_q_entries[rd_regw (dev, RX_QUEUE_WR_PTR_OFF)];
- rx_q_entry * rd_ptr = dev->rx_q_entry;
-
- PRINTD (DBG_VCC|DBG_RX, "rd_ptr = %u, wr_ptr = %u", rd_ptr, wr_ptr);
-
- while (rd_ptr != wr_ptr) {
- u32 x = rd_mem (dev, (HDW *) rd_ptr);
-
- if (vc == rx_q_entry_to_rx_channel (x)) {
- x |= SIMONS_DODGEY_MARKER;
-
- PRINTD (DBG_RX|DBG_VCC|DBG_WARN, "marking a frame as dodgey");
-
- wr_mem (dev, (HDW *) rd_ptr, x);
- }
-
- if (rd_ptr == dev->rx_q_wrap)
- rd_ptr = dev->rx_q_reset;
- else
- rd_ptr++;
- }
- }
-#endif
-
- spin_unlock_irqrestore (&dev->mem_lock, flags);
-
- return;
-}
-
-/********** schedule RX transfers **********/
-
-// Note on tail recursion: a GCC developer said that it is not likely
-// to be fixed soon, so do not define TAILRECUSRIONWORKS unless you
-// are sure it does as you may otherwise overflow the kernel stack.
-
-// giving this fn a return value would help GCC, allegedly
-
-static void rx_schedule (hrz_dev * dev, int irq) {
- unsigned int rx_bytes;
-
- int pio_instead = 0;
-#ifndef TAILRECURSIONWORKS
- pio_instead = 1;
- while (pio_instead) {
-#endif
- // bytes waiting for RX transfer
- rx_bytes = dev->rx_bytes;
-
-#if 0
- spin_count = 0;
- while (rd_regl (dev, MASTER_RX_COUNT_REG_OFF)) {
- PRINTD (DBG_RX|DBG_WARN, "RX error: other PCI Bus Master RX still in progress!");
- if (++spin_count > 10) {
- PRINTD (DBG_RX|DBG_ERR, "spun out waiting PCI Bus Master RX completion");
- wr_regl (dev, MASTER_RX_COUNT_REG_OFF, 0);
- clear_bit (rx_busy, &dev->flags);
- hrz_kfree_skb (dev->rx_skb);
- return;
- }
- }
-#endif
-
- // this code follows the TX code but (at the moment) there is only
- // one region - the skb itself. I don't know if this will change,
- // but it doesn't hurt to have the code here, disabled.
-
- if (rx_bytes) {
- // start next transfer within same region
- if (rx_bytes <= MAX_PIO_COUNT) {
- PRINTD (DBG_RX|DBG_BUS, "(pio)");
- pio_instead = 1;
- }
- if (rx_bytes <= MAX_TRANSFER_COUNT) {
- PRINTD (DBG_RX|DBG_BUS, "(simple or last multi)");
- dev->rx_bytes = 0;
- } else {
- PRINTD (DBG_RX|DBG_BUS, "(continuing multi)");
- dev->rx_bytes = rx_bytes - MAX_TRANSFER_COUNT;
- rx_bytes = MAX_TRANSFER_COUNT;
- }
- } else {
- // rx_bytes == 0 -- we're between regions
- // regions remaining to transfer
-#if 0
- unsigned int rx_regions = dev->rx_regions;
-#else
- unsigned int rx_regions = 0;
-#endif
-
- if (rx_regions) {
-#if 0
- // start a new region
- dev->rx_addr = dev->rx_iovec->iov_base;
- rx_bytes = dev->rx_iovec->iov_len;
- ++dev->rx_iovec;
- dev->rx_regions = rx_regions - 1;
-
- if (rx_bytes <= MAX_PIO_COUNT) {
- PRINTD (DBG_RX|DBG_BUS, "(pio)");
- pio_instead = 1;
- }
- if (rx_bytes <= MAX_TRANSFER_COUNT) {
- PRINTD (DBG_RX|DBG_BUS, "(full region)");
- dev->rx_bytes = 0;
- } else {
- PRINTD (DBG_RX|DBG_BUS, "(start multi region)");
- dev->rx_bytes = rx_bytes - MAX_TRANSFER_COUNT;
- rx_bytes = MAX_TRANSFER_COUNT;
- }
-#endif
- } else {
- // rx_regions == 0
- // that's all folks - end of frame
- struct sk_buff * skb = dev->rx_skb;
- // dev->rx_iovec = 0;
-
- FLUSH_RX_CHANNEL (dev, dev->rx_channel);
-
- dump_skb ("<<<", dev->rx_channel, skb);
-
- PRINTD (DBG_RX|DBG_SKB, "push %p %u", skb->data, skb->len);
-
- {
- struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
- // VC layer stats
- atomic_inc(&vcc->stats->rx);
- __net_timestamp(skb);
- // end of our responsibility
- vcc->push (vcc, skb);
- }
- }
- }
-
- // note: writing RX_COUNT clears any interrupt condition
- if (rx_bytes) {
- if (pio_instead) {
- if (irq)
- wr_regl (dev, MASTER_RX_COUNT_REG_OFF, 0);
- rds_regb (dev, DATA_PORT_OFF, dev->rx_addr, rx_bytes);
- } else {
- wr_regl (dev, MASTER_RX_ADDR_REG_OFF, virt_to_bus (dev->rx_addr));
- wr_regl (dev, MASTER_RX_COUNT_REG_OFF, rx_bytes);
- }
- dev->rx_addr += rx_bytes;
- } else {
- if (irq)
- wr_regl (dev, MASTER_RX_COUNT_REG_OFF, 0);
- // allow another RX thread to start
- YELLOW_LED_ON(dev);
- clear_bit (rx_busy, &dev->flags);
- PRINTD (DBG_RX, "cleared rx_busy for dev %p", dev);
- }
-
-#ifdef TAILRECURSIONWORKS
- // and we all bless optimised tail calls
- if (pio_instead)
- return rx_schedule (dev, 0);
- return;
-#else
- // grrrrrrr!
- irq = 0;
- }
- return;
-#endif
-}
-
-/********** handle RX bus master complete events **********/
-
-static void rx_bus_master_complete_handler (hrz_dev * dev) {
- if (test_bit (rx_busy, &dev->flags)) {
- rx_schedule (dev, 1);
- } else {
- PRINTD (DBG_RX|DBG_ERR, "unexpected RX bus master completion");
- // clear interrupt condition on adapter
- wr_regl (dev, MASTER_RX_COUNT_REG_OFF, 0);
- }
- return;
-}
-
-/********** (queue to) become the next TX thread **********/
-
-static int tx_hold (hrz_dev * dev) {
- PRINTD (DBG_TX, "sleeping at tx lock %p %lu", dev, dev->flags);
- wait_event_interruptible(dev->tx_queue, (!test_and_set_bit(tx_busy, &dev->flags)));
- PRINTD (DBG_TX, "woken at tx lock %p %lu", dev, dev->flags);
- if (signal_pending (current))
- return -1;
- PRINTD (DBG_TX, "set tx_busy for dev %p", dev);
- return 0;
-}
-
-/********** allow another TX thread to start **********/
-
-static inline void tx_release (hrz_dev * dev) {
- clear_bit (tx_busy, &dev->flags);
- PRINTD (DBG_TX, "cleared tx_busy for dev %p", dev);
- wake_up_interruptible (&dev->tx_queue);
-}
-
-/********** schedule TX transfers **********/
-
-static void tx_schedule (hrz_dev * const dev, int irq) {
- unsigned int tx_bytes;
-
- int append_desc = 0;
-
- int pio_instead = 0;
-#ifndef TAILRECURSIONWORKS
- pio_instead = 1;
- while (pio_instead) {
-#endif
- // bytes in current region waiting for TX transfer
- tx_bytes = dev->tx_bytes;
-
-#if 0
- spin_count = 0;
- while (rd_regl (dev, MASTER_TX_COUNT_REG_OFF)) {
- PRINTD (DBG_TX|DBG_WARN, "TX error: other PCI Bus Master TX still in progress!");
- if (++spin_count > 10) {
- PRINTD (DBG_TX|DBG_ERR, "spun out waiting PCI Bus Master TX completion");
- wr_regl (dev, MASTER_TX_COUNT_REG_OFF, 0);
- tx_release (dev);
- hrz_kfree_skb (dev->tx_skb);
- return;
- }
- }
-#endif
-
- if (tx_bytes) {
- // start next transfer within same region
- if (!test_bit (ultra, &dev->flags) || tx_bytes <= MAX_PIO_COUNT) {
- PRINTD (DBG_TX|DBG_BUS, "(pio)");
- pio_instead = 1;
- }
- if (tx_bytes <= MAX_TRANSFER_COUNT) {
- PRINTD (DBG_TX|DBG_BUS, "(simple or last multi)");
- if (!dev->tx_iovec) {
- // end of last region
- append_desc = 1;
- }
- dev->tx_bytes = 0;
- } else {
- PRINTD (DBG_TX|DBG_BUS, "(continuing multi)");
- dev->tx_bytes = tx_bytes - MAX_TRANSFER_COUNT;
- tx_bytes = MAX_TRANSFER_COUNT;
- }
- } else {
- // tx_bytes == 0 -- we're between regions
- // regions remaining to transfer
- unsigned int tx_regions = dev->tx_regions;
-
- if (tx_regions) {
- // start a new region
- dev->tx_addr = dev->tx_iovec->iov_base;
- tx_bytes = dev->tx_iovec->iov_len;
- ++dev->tx_iovec;
- dev->tx_regions = tx_regions - 1;
-
- if (!test_bit (ultra, &dev->flags) || tx_bytes <= MAX_PIO_COUNT) {
- PRINTD (DBG_TX|DBG_BUS, "(pio)");
- pio_instead = 1;
- }
- if (tx_bytes <= MAX_TRANSFER_COUNT) {
- PRINTD (DBG_TX|DBG_BUS, "(full region)");
- dev->tx_bytes = 0;
- } else {
- PRINTD (DBG_TX|DBG_BUS, "(start multi region)");
- dev->tx_bytes = tx_bytes - MAX_TRANSFER_COUNT;
- tx_bytes = MAX_TRANSFER_COUNT;
- }
- } else {
- // tx_regions == 0
- // that's all folks - end of frame
- struct sk_buff * skb = dev->tx_skb;
- dev->tx_iovec = NULL;
-
- // VC layer stats
- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
-
- // free the skb
- hrz_kfree_skb (skb);
- }
- }
-
- // note: writing TX_COUNT clears any interrupt condition
- if (tx_bytes) {
- if (pio_instead) {
- if (irq)
- wr_regl (dev, MASTER_TX_COUNT_REG_OFF, 0);
- wrs_regb (dev, DATA_PORT_OFF, dev->tx_addr, tx_bytes);
- if (append_desc)
- wr_regl (dev, TX_DESCRIPTOR_PORT_OFF, cpu_to_be32 (dev->tx_skb->len));
- } else {
- wr_regl (dev, MASTER_TX_ADDR_REG_OFF, virt_to_bus (dev->tx_addr));
- if (append_desc)
- wr_regl (dev, TX_DESCRIPTOR_REG_OFF, cpu_to_be32 (dev->tx_skb->len));
- wr_regl (dev, MASTER_TX_COUNT_REG_OFF,
- append_desc
- ? tx_bytes | MASTER_TX_AUTO_APPEND_DESC
- : tx_bytes);
- }
- dev->tx_addr += tx_bytes;
- } else {
- if (irq)
- wr_regl (dev, MASTER_TX_COUNT_REG_OFF, 0);
- YELLOW_LED_ON(dev);
- tx_release (dev);
- }
-
-#ifdef TAILRECURSIONWORKS
- // and we all bless optimised tail calls
- if (pio_instead)
- return tx_schedule (dev, 0);
- return;
-#else
- // grrrrrrr!
- irq = 0;
- }
- return;
-#endif
-}
-
-/********** handle TX bus master complete events **********/
-
-static void tx_bus_master_complete_handler (hrz_dev * dev) {
- if (test_bit (tx_busy, &dev->flags)) {
- tx_schedule (dev, 1);
- } else {
- PRINTD (DBG_TX|DBG_ERR, "unexpected TX bus master completion");
- // clear interrupt condition on adapter
- wr_regl (dev, MASTER_TX_COUNT_REG_OFF, 0);
- }
- return;
-}
-
-/********** move RX Q pointer to next item in circular buffer **********/
-
-// called only from IRQ sub-handler
-static u32 rx_queue_entry_next (hrz_dev * dev) {
- u32 rx_queue_entry;
- spin_lock (&dev->mem_lock);
- rx_queue_entry = rd_mem (dev, &dev->rx_q_entry->entry);
- if (dev->rx_q_entry == dev->rx_q_wrap)
- dev->rx_q_entry = dev->rx_q_reset;
- else
- dev->rx_q_entry++;
- wr_regw (dev, RX_QUEUE_RD_PTR_OFF, dev->rx_q_entry - dev->rx_q_reset);
- spin_unlock (&dev->mem_lock);
- return rx_queue_entry;
-}
-
-/********** handle RX data received by device **********/
-
-// called from IRQ handler
-static void rx_data_av_handler (hrz_dev * dev) {
- u32 rx_queue_entry;
- u32 rx_queue_entry_flags;
- u16 rx_len;
- u16 rx_channel;
-
- PRINTD (DBG_FLOW, "hrz_data_av_handler");
-
- // try to grab rx lock (not possible during RX bus mastering)
- if (test_and_set_bit (rx_busy, &dev->flags)) {
- PRINTD (DBG_RX, "locked out of rx lock");
- return;
- }
- PRINTD (DBG_RX, "set rx_busy for dev %p", dev);
- // lock is cleared if we fail now, o/w after bus master completion
-
- YELLOW_LED_OFF(dev);
-
- rx_queue_entry = rx_queue_entry_next (dev);
-
- rx_len = rx_q_entry_to_length (rx_queue_entry);
- rx_channel = rx_q_entry_to_rx_channel (rx_queue_entry);
-
- WAIT_FLUSH_RX_COMPLETE (dev);
-
- SELECT_RX_CHANNEL (dev, rx_channel);
-
- PRINTD (DBG_RX, "rx_queue_entry is: %#x", rx_queue_entry);
- rx_queue_entry_flags = rx_queue_entry & (RX_CRC_32_OK|RX_COMPLETE_FRAME|SIMONS_DODGEY_MARKER);
-
- if (!rx_len) {
- // (at least) bus-mastering breaks if we try to handle a
- // zero-length frame, besides AAL5 does not support them
- PRINTK (KERN_ERR, "zero-length frame!");
- rx_queue_entry_flags &= ~RX_COMPLETE_FRAME;
- }
-
- if (rx_queue_entry_flags & SIMONS_DODGEY_MARKER) {
- PRINTD (DBG_RX|DBG_ERR, "Simon's marker detected!");
- }
- if (rx_queue_entry_flags == (RX_CRC_32_OK | RX_COMPLETE_FRAME)) {
- struct atm_vcc * atm_vcc;
-
- PRINTD (DBG_RX, "got a frame on rx_channel %x len %u", rx_channel, rx_len);
-
- atm_vcc = dev->rxer[rx_channel];
- // if no vcc is assigned to this channel, we should drop the frame
- // (is this what SIMONS etc. was trying to achieve?)
-
- if (atm_vcc) {
-
- if (atm_vcc->qos.rxtp.traffic_class != ATM_NONE) {
-
- if (rx_len <= atm_vcc->qos.rxtp.max_sdu) {
-
- struct sk_buff * skb = atm_alloc_charge (atm_vcc, rx_len, GFP_ATOMIC);
- if (skb) {
- // remember this so we can push it later
- dev->rx_skb = skb;
- // remember this so we can flush it later
- dev->rx_channel = rx_channel;
-
- // prepare socket buffer
- skb_put (skb, rx_len);
- ATM_SKB(skb)->vcc = atm_vcc;
-
- // simple transfer
- // dev->rx_regions = 0;
- // dev->rx_iovec = 0;
- dev->rx_bytes = rx_len;
- dev->rx_addr = skb->data;
- PRINTD (DBG_RX, "RX start simple transfer (addr %p, len %d)",
- skb->data, rx_len);
-
- // do the business
- rx_schedule (dev, 0);
- return;
-
- } else {
- PRINTD (DBG_SKB|DBG_WARN, "failed to get skb");
- }
-
- } else {
- PRINTK (KERN_INFO, "frame received on TX-only VC %x", rx_channel);
- // do we count this?
- }
-
- } else {
- PRINTK (KERN_WARNING, "dropped over-size frame");
- // do we count this?
- }
-
- } else {
- PRINTD (DBG_WARN|DBG_VCC|DBG_RX, "no VCC for this frame (VC closed)");
- // do we count this?
- }
-
- } else {
- // Wait update complete ? SPONG
- }
-
- // RX was aborted
- YELLOW_LED_ON(dev);
-
- FLUSH_RX_CHANNEL (dev,rx_channel);
- clear_bit (rx_busy, &dev->flags);
-
- return;
-}
-
-/********** interrupt handler **********/
-
-static irqreturn_t interrupt_handler(int irq, void *dev_id)
-{
- hrz_dev *dev = dev_id;
- u32 int_source;
- unsigned int irq_ok;
-
- PRINTD (DBG_FLOW, "interrupt_handler: %p", dev_id);
-
- // definitely for us
- irq_ok = 0;
- while ((int_source = rd_regl (dev, INT_SOURCE_REG_OFF)
- & INTERESTING_INTERRUPTS)) {
- // In the interests of fairness, the handlers below are
- // called in sequence and without immediate return to the head of
- // the while loop. This is only of issue for slow hosts (or when
- // debugging messages are on). Really slow hosts may find a fast
- // sender keeps them permanently in the IRQ handler. :(
-
- // (only an issue for slow hosts) RX completion goes before
- // rx_data_av as the former implies rx_busy and so the latter
- // would just abort. If it reschedules another transfer
- // (continuing the same frame) then it will not clear rx_busy.
-
- // (only an issue for slow hosts) TX completion goes before RX
- // data available as it is a much shorter routine - there is the
- // chance that any further transfers it schedules will be complete
- // by the time of the return to the head of the while loop
-
- if (int_source & RX_BUS_MASTER_COMPLETE) {
- ++irq_ok;
- PRINTD (DBG_IRQ|DBG_BUS|DBG_RX, "rx_bus_master_complete asserted");
- rx_bus_master_complete_handler (dev);
- }
- if (int_source & TX_BUS_MASTER_COMPLETE) {
- ++irq_ok;
- PRINTD (DBG_IRQ|DBG_BUS|DBG_TX, "tx_bus_master_complete asserted");
- tx_bus_master_complete_handler (dev);
- }
- if (int_source & RX_DATA_AV) {
- ++irq_ok;
- PRINTD (DBG_IRQ|DBG_RX, "rx_data_av asserted");
- rx_data_av_handler (dev);
- }
- }
- if (irq_ok) {
- PRINTD (DBG_IRQ, "work done: %u", irq_ok);
- } else {
- PRINTD (DBG_IRQ|DBG_WARN, "spurious interrupt source: %#x", int_source);
- }
-
- PRINTD (DBG_IRQ|DBG_FLOW, "interrupt_handler done: %p", dev_id);
- if (irq_ok)
- return IRQ_HANDLED;
- return IRQ_NONE;
-}
-
-/********** housekeeping **********/
-
-static void do_housekeeping (struct timer_list *t) {
- // just stats at the moment
- hrz_dev * dev = from_timer(dev, t, housekeeping);
-
- // collect device-specific (not driver/atm-linux) stats here
- dev->tx_cell_count += rd_regw (dev, TX_CELL_COUNT_OFF);
- dev->rx_cell_count += rd_regw (dev, RX_CELL_COUNT_OFF);
- dev->hec_error_count += rd_regw (dev, HEC_ERROR_COUNT_OFF);
- dev->unassigned_cell_count += rd_regw (dev, UNASSIGNED_CELL_COUNT_OFF);
-
- mod_timer (&dev->housekeeping, jiffies + HZ/10);
-
- return;
-}
-
-/********** find an idle channel for TX and set it up **********/
-
-// called with tx_busy set
-static short setup_idle_tx_channel (hrz_dev * dev, hrz_vcc * vcc) {
- unsigned short idle_channels;
- short tx_channel = -1;
- unsigned int spin_count;
- PRINTD (DBG_FLOW|DBG_TX, "setup_idle_tx_channel %p", dev);
-
- // better would be to fail immediately, the caller can then decide whether
- // to wait or drop (depending on whether this is UBR etc.)
- spin_count = 0;
- while (!(idle_channels = rd_regw (dev, TX_STATUS_OFF) & IDLE_CHANNELS_MASK)) {
- PRINTD (DBG_TX|DBG_WARN, "waiting for idle TX channel");
- // delay a bit here
- if (++spin_count > 100) {
- PRINTD (DBG_TX|DBG_ERR, "spun out waiting for idle TX channel");
- return -EBUSY;
- }
- }
-
- // got an idle channel
- {
- // tx_idle ensures we look for idle channels in RR order
- int chan = dev->tx_idle;
-
- int keep_going = 1;
- while (keep_going) {
- if (idle_channels & (1<<chan)) {
- tx_channel = chan;
- keep_going = 0;
- }
- ++chan;
- if (chan == TX_CHANS)
- chan = 0;
- }
-
- dev->tx_idle = chan;
- }
-
- // set up the channel we found
- {
- // Initialise the cell header in the transmit channel descriptor
- // a.k.a. prepare the channel and remember that we have done so.
-
- tx_ch_desc * tx_desc = &memmap->tx_descs[tx_channel];
- u32 rd_ptr;
- u32 wr_ptr;
- u16 channel = vcc->channel;
-
- unsigned long flags;
- spin_lock_irqsave (&dev->mem_lock, flags);
-
- // Update the transmit channel record.
- dev->tx_channel_record[tx_channel] = channel;
-
- // xBR channel
- update_tx_channel_config (dev, tx_channel, RATE_TYPE_ACCESS,
- vcc->tx_xbr_bits);
-
- // Update the PCR counter preload value etc.
- update_tx_channel_config (dev, tx_channel, PCR_TIMER_ACCESS,
- vcc->tx_pcr_bits);
-
-#if 0
- if (vcc->tx_xbr_bits == VBR_RATE_TYPE) {
- // SCR timer
- update_tx_channel_config (dev, tx_channel, SCR_TIMER_ACCESS,
- vcc->tx_scr_bits);
-
- // Bucket size...
- update_tx_channel_config (dev, tx_channel, BUCKET_CAPACITY_ACCESS,
- vcc->tx_bucket_bits);
-
- // ... and fullness
- update_tx_channel_config (dev, tx_channel, BUCKET_FULLNESS_ACCESS,
- vcc->tx_bucket_bits);
- }
-#endif
-
- // Initialise the read and write buffer pointers
- rd_ptr = rd_mem (dev, &tx_desc->rd_buf_type) & BUFFER_PTR_MASK;
- wr_ptr = rd_mem (dev, &tx_desc->wr_buf_type) & BUFFER_PTR_MASK;
-
- // idle TX channels should have identical pointers
- if (rd_ptr != wr_ptr) {
- PRINTD (DBG_TX|DBG_ERR, "TX buffer pointers are broken!");
- // spin_unlock... return -E...
- // I wonder if gcc would get rid of one of the pointer aliases
- }
- PRINTD (DBG_TX, "TX buffer pointers are: rd %x, wr %x.",
- rd_ptr, wr_ptr);
-
- switch (vcc->aal) {
- case aal0:
- PRINTD (DBG_QOS|DBG_TX, "tx_channel: aal0");
- rd_ptr |= CHANNEL_TYPE_RAW_CELLS;
- wr_ptr |= CHANNEL_TYPE_RAW_CELLS;
- break;
- case aal34:
- PRINTD (DBG_QOS|DBG_TX, "tx_channel: aal34");
- rd_ptr |= CHANNEL_TYPE_AAL3_4;
- wr_ptr |= CHANNEL_TYPE_AAL3_4;
- break;
- case aal5:
- rd_ptr |= CHANNEL_TYPE_AAL5;
- wr_ptr |= CHANNEL_TYPE_AAL5;
- // Initialise the CRC
- wr_mem (dev, &tx_desc->partial_crc, INITIAL_CRC);
- break;
- }
-
- wr_mem (dev, &tx_desc->rd_buf_type, rd_ptr);
- wr_mem (dev, &tx_desc->wr_buf_type, wr_ptr);
-
- // Write the Cell Header
- // Payload Type, CLP and GFC would go here if non-zero
- wr_mem (dev, &tx_desc->cell_header, channel);
-
- spin_unlock_irqrestore (&dev->mem_lock, flags);
- }
-
- return tx_channel;
-}
-
-/********** send a frame **********/
-
-static int hrz_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
- unsigned int spin_count;
- int free_buffers;
- hrz_dev * dev = HRZ_DEV(atm_vcc->dev);
- hrz_vcc * vcc = HRZ_VCC(atm_vcc);
- u16 channel = vcc->channel;
-
- u32 buffers_required;
-
- /* signed for error return */
- short tx_channel;
-
- PRINTD (DBG_FLOW|DBG_TX, "hrz_send vc %x data %p len %u",
- channel, skb->data, skb->len);
-
- dump_skb (">>>", channel, skb);
-
- if (atm_vcc->qos.txtp.traffic_class == ATM_NONE) {
- PRINTK (KERN_ERR, "attempt to send on RX-only VC %x", channel);
- hrz_kfree_skb (skb);
- return -EIO;
- }
-
- // don't understand this
- ATM_SKB(skb)->vcc = atm_vcc;
-
- if (skb->len > atm_vcc->qos.txtp.max_sdu) {
- PRINTK (KERN_ERR, "sk_buff length greater than agreed max_sdu, dropping...");
- hrz_kfree_skb (skb);
- return -EIO;
- }
-
- if (!channel) {
- PRINTD (DBG_ERR|DBG_TX, "attempt to transmit on zero (rx_)channel");
- hrz_kfree_skb (skb);
- return -EIO;
- }
-
-#if 0
- {
- // where would be a better place for this? housekeeping?
- u16 status;
- pci_read_config_word (dev->pci_dev, PCI_STATUS, &status);
- if (status & PCI_STATUS_REC_MASTER_ABORT) {
- PRINTD (DBG_BUS|DBG_ERR, "Clearing PCI Master Abort (and cleaning up)");
- status &= ~PCI_STATUS_REC_MASTER_ABORT;
- pci_write_config_word (dev->pci_dev, PCI_STATUS, status);
- if (test_bit (tx_busy, &dev->flags)) {
- hrz_kfree_skb (dev->tx_skb);
- tx_release (dev);
- }
- }
- }
-#endif
-
-#ifdef DEBUG_HORIZON
- /* wey-hey! */
- if (channel == 1023) {
- unsigned int i;
- unsigned short d = 0;
- char * s = skb->data;
- if (*s++ == 'D') {
- for (i = 0; i < 4; ++i)
- d = (d << 4) | hex_to_bin(*s++);
- PRINTK (KERN_INFO, "debug bitmap is now %hx", debug = d);
- }
- }
-#endif
-
- // wait until TX is free and grab lock
- if (tx_hold (dev)) {
- hrz_kfree_skb (skb);
- return -ERESTARTSYS;
- }
-
- // Wait for enough space to be available in transmit buffer memory.
-
- // should be number of cells needed + 2 (according to hardware docs)
- // = ((framelen+8)+47) / 48 + 2
- // = (framelen+7) / 48 + 3, hmm... faster to put addition inside XXX
- buffers_required = (skb->len+(ATM_AAL5_TRAILER-1)) / ATM_CELL_PAYLOAD + 3;
-
- // replace with timer and sleep, add dev->tx_buffers_queue (max 1 entry)
- spin_count = 0;
- while ((free_buffers = rd_regw (dev, TX_FREE_BUFFER_COUNT_OFF)) < buffers_required) {
- PRINTD (DBG_TX, "waiting for free TX buffers, got %d of %d",
- free_buffers, buffers_required);
- // what is the appropriate delay? implement a timeout? (depending on line speed?)
- // mdelay (1);
- // what happens if we kill (current_pid, SIGKILL) ?
- schedule();
- if (++spin_count > 1000) {
- PRINTD (DBG_TX|DBG_ERR, "spun out waiting for tx buffers, got %d of %d",
- free_buffers, buffers_required);
- tx_release (dev);
- hrz_kfree_skb (skb);
- return -ERESTARTSYS;
- }
- }
-
- // Select a channel to transmit the frame on.
- if (channel == dev->last_vc) {
- PRINTD (DBG_TX, "last vc hack: hit");
- tx_channel = dev->tx_last;
- } else {
- PRINTD (DBG_TX, "last vc hack: miss");
- // Are we currently transmitting this VC on one of the channels?
- for (tx_channel = 0; tx_channel < TX_CHANS; ++tx_channel)
- if (dev->tx_channel_record[tx_channel] == channel) {
- PRINTD (DBG_TX, "vc already on channel: hit");
- break;
- }
- if (tx_channel == TX_CHANS) {
- PRINTD (DBG_TX, "vc already on channel: miss");
- // Find and set up an idle channel.
- tx_channel = setup_idle_tx_channel (dev, vcc);
- if (tx_channel < 0) {
- PRINTD (DBG_TX|DBG_ERR, "failed to get channel");
- tx_release (dev);
- return tx_channel;
- }
- }
-
- PRINTD (DBG_TX, "got channel");
- SELECT_TX_CHANNEL(dev, tx_channel);
-
- dev->last_vc = channel;
- dev->tx_last = tx_channel;
- }
-
- PRINTD (DBG_TX, "using channel %u", tx_channel);
-
- YELLOW_LED_OFF(dev);
-
- // TX start transfer
-
- {
- unsigned int tx_len = skb->len;
- unsigned int tx_iovcnt = skb_shinfo(skb)->nr_frags;
- // remember this so we can free it later
- dev->tx_skb = skb;
-
- if (tx_iovcnt) {
- // scatter gather transfer
- dev->tx_regions = tx_iovcnt;
- dev->tx_iovec = NULL; /* @@@ needs rewritten */
- dev->tx_bytes = 0;
- PRINTD (DBG_TX|DBG_BUS, "TX start scatter-gather transfer (iovec %p, len %d)",
- skb->data, tx_len);
- tx_release (dev);
- hrz_kfree_skb (skb);
- return -EIO;
- } else {
- // simple transfer
- dev->tx_regions = 0;
- dev->tx_iovec = NULL;
- dev->tx_bytes = tx_len;
- dev->tx_addr = skb->data;
- PRINTD (DBG_TX|DBG_BUS, "TX start simple transfer (addr %p, len %d)",
- skb->data, tx_len);
- }
-
- // and do the business
- tx_schedule (dev, 0);
-
- }
-
- return 0;
-}
-
-/********** reset a card **********/
-
-static void hrz_reset (const hrz_dev * dev) {
- u32 control_0_reg = rd_regl (dev, CONTROL_0_REG);
-
- // why not set RESET_HORIZON to one and wait for the card to
- // reassert that bit as zero? Like so:
- control_0_reg = control_0_reg & RESET_HORIZON;
- wr_regl (dev, CONTROL_0_REG, control_0_reg);
- while (control_0_reg & RESET_HORIZON)
- control_0_reg = rd_regl (dev, CONTROL_0_REG);
-
- // old reset code retained:
- wr_regl (dev, CONTROL_0_REG, control_0_reg |
- RESET_ATM | RESET_RX | RESET_TX | RESET_HOST);
- // just guessing here
- udelay (1000);
-
- wr_regl (dev, CONTROL_0_REG, control_0_reg);
-}
-
-/********** read the burnt in address **********/
-
-static void WRITE_IT_WAIT (const hrz_dev *dev, u32 ctrl)
-{
- wr_regl (dev, CONTROL_0_REG, ctrl);
- udelay (5);
-}
-
-static void CLOCK_IT (const hrz_dev *dev, u32 ctrl)
-{
- // DI must be valid around rising SK edge
- WRITE_IT_WAIT(dev, ctrl & ~SEEPROM_SK);
- WRITE_IT_WAIT(dev, ctrl | SEEPROM_SK);
-}
-
-static u16 read_bia(const hrz_dev *dev, u16 addr)
-{
- u32 ctrl = rd_regl (dev, CONTROL_0_REG);
-
- const unsigned int addr_bits = 6;
- const unsigned int data_bits = 16;
-
- unsigned int i;
-
- u16 res;
-
- ctrl &= ~(SEEPROM_CS | SEEPROM_SK | SEEPROM_DI);
- WRITE_IT_WAIT(dev, ctrl);
-
- // wake Serial EEPROM and send 110 (READ) command
- ctrl |= (SEEPROM_CS | SEEPROM_DI);
- CLOCK_IT(dev, ctrl);
-
- ctrl |= SEEPROM_DI;
- CLOCK_IT(dev, ctrl);
-
- ctrl &= ~SEEPROM_DI;
- CLOCK_IT(dev, ctrl);
-
- for (i=0; i<addr_bits; i++) {
- if (addr & (1 << (addr_bits-1)))
- ctrl |= SEEPROM_DI;
- else
- ctrl &= ~SEEPROM_DI;
-
- CLOCK_IT(dev, ctrl);
-
- addr = addr << 1;
- }
-
- // we could check that we have DO = 0 here
- ctrl &= ~SEEPROM_DI;
-
- res = 0;
- for (i=0;i<data_bits;i++) {
- res = res >> 1;
-
- CLOCK_IT(dev, ctrl);
-
- if (rd_regl (dev, CONTROL_0_REG) & SEEPROM_DO)
- res |= (1 << (data_bits-1));
- }
-
- ctrl &= ~(SEEPROM_SK | SEEPROM_CS);
- WRITE_IT_WAIT(dev, ctrl);
-
- return res;
-}
-
-/********** initialise a card **********/
-
-static int hrz_init(hrz_dev *dev)
-{
- int onefivefive;
-
- u16 chan;
-
- int buff_count;
-
- HDW * mem;
-
- cell_buf * tx_desc;
- cell_buf * rx_desc;
-
- u32 ctrl;
-
- ctrl = rd_regl (dev, CONTROL_0_REG);
- PRINTD (DBG_INFO, "ctrl0reg is %#x", ctrl);
- onefivefive = ctrl & ATM_LAYER_STATUS;
-
- if (onefivefive)
- printk (DEV_LABEL ": Horizon Ultra (at 155.52 MBps)");
- else
- printk (DEV_LABEL ": Horizon (at 25 MBps)");
-
- printk (":");
- // Reset the card to get everything in a known state
-
- printk (" reset");
- hrz_reset (dev);
-
- // Clear all the buffer memory
-
- printk (" clearing memory");
-
- for (mem = (HDW *) memmap; mem < (HDW *) (memmap + 1); ++mem)
- wr_mem (dev, mem, 0);
-
- printk (" tx channels");
-
- // All transmit eight channels are set up as AAL5 ABR channels with
- // a 16us cell spacing. Why?
-
- // Channel 0 gets the free buffer at 100h, channel 1 gets the free
- // buffer at 110h etc.
-
- for (chan = 0; chan < TX_CHANS; ++chan) {
- tx_ch_desc * tx_desc = &memmap->tx_descs[chan];
- cell_buf * buf = &memmap->inittxbufs[chan];
-
- // initialise the read and write buffer pointers
- wr_mem (dev, &tx_desc->rd_buf_type, BUF_PTR(buf));
- wr_mem (dev, &tx_desc->wr_buf_type, BUF_PTR(buf));
-
- // set the status of the initial buffers to empty
- wr_mem (dev, &buf->next, BUFF_STATUS_EMPTY);
- }
-
- // Use space bufn3 at the moment for tx buffers
-
- printk (" tx buffers");
-
- tx_desc = memmap->bufn3;
-
- wr_mem (dev, &memmap->txfreebufstart.next, BUF_PTR(tx_desc) | BUFF_STATUS_EMPTY);
-
- for (buff_count = 0; buff_count < BUFN3_SIZE-1; buff_count++) {
- wr_mem (dev, &tx_desc->next, BUF_PTR(tx_desc+1) | BUFF_STATUS_EMPTY);
- tx_desc++;
- }
-
- wr_mem (dev, &tx_desc->next, BUF_PTR(&memmap->txfreebufend) | BUFF_STATUS_EMPTY);
-
- // Initialise the transmit free buffer count
- wr_regw (dev, TX_FREE_BUFFER_COUNT_OFF, BUFN3_SIZE);
-
- printk (" rx channels");
-
- // Initialise all of the receive channels to be AAL5 disabled with
- // an interrupt threshold of 0
-
- for (chan = 0; chan < RX_CHANS; ++chan) {
- rx_ch_desc * rx_desc = &memmap->rx_descs[chan];
-
- wr_mem (dev, &rx_desc->wr_buf_type, CHANNEL_TYPE_AAL5 | RX_CHANNEL_DISABLED);
- }
-
- printk (" rx buffers");
-
- // Use space bufn4 at the moment for rx buffers
-
- rx_desc = memmap->bufn4;
-
- wr_mem (dev, &memmap->rxfreebufstart.next, BUF_PTR(rx_desc) | BUFF_STATUS_EMPTY);
-
- for (buff_count = 0; buff_count < BUFN4_SIZE-1; buff_count++) {
- wr_mem (dev, &rx_desc->next, BUF_PTR(rx_desc+1) | BUFF_STATUS_EMPTY);
-
- rx_desc++;
- }
-
- wr_mem (dev, &rx_desc->next, BUF_PTR(&memmap->rxfreebufend) | BUFF_STATUS_EMPTY);
-
- // Initialise the receive free buffer count
- wr_regw (dev, RX_FREE_BUFFER_COUNT_OFF, BUFN4_SIZE);
-
- // Initialize Horizons registers
-
- // TX config
- wr_regw (dev, TX_CONFIG_OFF,
- ABR_ROUND_ROBIN | TX_NORMAL_OPERATION | DRVR_DRVRBAR_ENABLE);
-
- // RX config. Use 10-x VC bits, x VP bits, non user cells in channel 0.
- wr_regw (dev, RX_CONFIG_OFF,
- DISCARD_UNUSED_VPI_VCI_BITS_SET | NON_USER_CELLS_IN_ONE_CHANNEL | vpi_bits);
-
- // RX line config
- wr_regw (dev, RX_LINE_CONFIG_OFF,
- LOCK_DETECT_ENABLE | FREQUENCY_DETECT_ENABLE | GXTALOUT_SELECT_DIV4);
-
- // Set the max AAL5 cell count to be just enough to contain the
- // largest AAL5 frame that the user wants to receive
- wr_regw (dev, MAX_AAL5_CELL_COUNT_OFF,
- DIV_ROUND_UP(max_rx_size + ATM_AAL5_TRAILER, ATM_CELL_PAYLOAD));
-
- // Enable receive
- wr_regw (dev, RX_CONFIG_OFF, rd_regw (dev, RX_CONFIG_OFF) | RX_ENABLE);
-
- printk (" control");
-
- // Drive the OE of the LEDs then turn the green LED on
- ctrl |= GREEN_LED_OE | YELLOW_LED_OE | GREEN_LED | YELLOW_LED;
- wr_regl (dev, CONTROL_0_REG, ctrl);
-
- // Test for a 155-capable card
-
- if (onefivefive) {
- // Select 155 mode... make this a choice (or: how do we detect
- // external line speed and switch?)
- ctrl |= ATM_LAYER_SELECT;
- wr_regl (dev, CONTROL_0_REG, ctrl);
-
- // test SUNI-lite vs SAMBA
-
- // Register 0x00 in the SUNI will have some of bits 3-7 set, and
- // they will always be zero for the SAMBA. Ha! Bloody hardware
- // engineers. It'll never work.
-
- if (rd_framer (dev, 0) & 0x00f0) {
- // SUNI
- printk (" SUNI");
-
- // Reset, just in case
- wr_framer (dev, 0x00, 0x0080);
- wr_framer (dev, 0x00, 0x0000);
-
- // Configure transmit FIFO
- wr_framer (dev, 0x63, rd_framer (dev, 0x63) | 0x0002);
-
- // Set line timed mode
- wr_framer (dev, 0x05, rd_framer (dev, 0x05) | 0x0001);
- } else {
- // SAMBA
- printk (" SAMBA");
-
- // Reset, just in case
- wr_framer (dev, 0, rd_framer (dev, 0) | 0x0001);
- wr_framer (dev, 0, rd_framer (dev, 0) &~ 0x0001);
-
- // Turn off diagnostic loopback and enable line-timed mode
- wr_framer (dev, 0, 0x0002);
-
- // Turn on transmit outputs
- wr_framer (dev, 2, 0x0B80);
- }
- } else {
- // Select 25 mode
- ctrl &= ~ATM_LAYER_SELECT;
-
- // Madge B154 setup
- // none required?
- }
-
- printk (" LEDs");
-
- GREEN_LED_ON(dev);
- YELLOW_LED_ON(dev);
-
- printk (" ESI=");
-
- {
- u16 b = 0;
- int i;
- u8 * esi = dev->atm_dev->esi;
-
- // in the card I have, EEPROM
- // addresses 0, 1, 2 contain 0
- // addresess 5, 6 etc. contain ffff
- // NB: Madge prefix is 00 00 f6 (which is 00 00 6f in Ethernet bit order)
- // the read_bia routine gets the BIA in Ethernet bit order
-
- for (i=0; i < ESI_LEN; ++i) {
- if (i % 2 == 0)
- b = read_bia (dev, i/2 + 2);
- else
- b = b >> 8;
- esi[i] = b & 0xFF;
- printk ("%02x", esi[i]);
- }
- }
-
- // Enable RX_Q and ?X_COMPLETE interrupts only
- wr_regl (dev, INT_ENABLE_REG_OFF, INTERESTING_INTERRUPTS);
- printk (" IRQ on");
-
- printk (".\n");
-
- return onefivefive;
-}
-
-/********** check max_sdu **********/
-
-static int check_max_sdu (hrz_aal aal, struct atm_trafprm * tp, unsigned int max_frame_size) {
- PRINTD (DBG_FLOW|DBG_QOS, "check_max_sdu");
-
- switch (aal) {
- case aal0:
- if (!(tp->max_sdu)) {
- PRINTD (DBG_QOS, "defaulting max_sdu");
- tp->max_sdu = ATM_AAL0_SDU;
- } else if (tp->max_sdu != ATM_AAL0_SDU) {
- PRINTD (DBG_QOS|DBG_ERR, "rejecting max_sdu");
- return -EINVAL;
- }
- break;
- case aal34:
- if (tp->max_sdu == 0 || tp->max_sdu > ATM_MAX_AAL34_PDU) {
- PRINTD (DBG_QOS, "%sing max_sdu", tp->max_sdu ? "capp" : "default");
- tp->max_sdu = ATM_MAX_AAL34_PDU;
- }
- break;
- case aal5:
- if (tp->max_sdu == 0 || tp->max_sdu > max_frame_size) {
- PRINTD (DBG_QOS, "%sing max_sdu", tp->max_sdu ? "capp" : "default");
- tp->max_sdu = max_frame_size;
- }
- break;
- }
- return 0;
-}
-
-/********** check pcr **********/
-
-// something like this should be part of ATM Linux
-static int atm_pcr_check (struct atm_trafprm * tp, unsigned int pcr) {
- // we are assuming non-UBR, and non-special values of pcr
- if (tp->min_pcr == ATM_MAX_PCR)
- PRINTD (DBG_QOS, "luser gave min_pcr = ATM_MAX_PCR");
- else if (tp->min_pcr < 0)
- PRINTD (DBG_QOS, "luser gave negative min_pcr");
- else if (tp->min_pcr && tp->min_pcr > pcr)
- PRINTD (DBG_QOS, "pcr less than min_pcr");
- else
- // !! max_pcr = UNSPEC (0) is equivalent to max_pcr = MAX (-1)
- // easier to #define ATM_MAX_PCR 0 and have all rates unsigned?
- // [this would get rid of next two conditionals]
- if ((0) && tp->max_pcr == ATM_MAX_PCR)
- PRINTD (DBG_QOS, "luser gave max_pcr = ATM_MAX_PCR");
- else if ((tp->max_pcr != ATM_MAX_PCR) && tp->max_pcr < 0)
- PRINTD (DBG_QOS, "luser gave negative max_pcr");
- else if (tp->max_pcr && tp->max_pcr != ATM_MAX_PCR && tp->max_pcr < pcr)
- PRINTD (DBG_QOS, "pcr greater than max_pcr");
- else {
- // each limit unspecified or not violated
- PRINTD (DBG_QOS, "xBR(pcr) OK");
- return 0;
- }
- PRINTD (DBG_QOS, "pcr=%u, tp: min_pcr=%d, pcr=%d, max_pcr=%d",
- pcr, tp->min_pcr, tp->pcr, tp->max_pcr);
- return -EINVAL;
-}
-
-/********** open VC **********/
-
-static int hrz_open (struct atm_vcc *atm_vcc)
-{
- int error;
- u16 channel;
-
- struct atm_qos * qos;
- struct atm_trafprm * txtp;
- struct atm_trafprm * rxtp;
-
- hrz_dev * dev = HRZ_DEV(atm_vcc->dev);
- hrz_vcc vcc;
- hrz_vcc * vccp; // allocated late
- short vpi = atm_vcc->vpi;
- int vci = atm_vcc->vci;
- PRINTD (DBG_FLOW|DBG_VCC, "hrz_open %x %x", vpi, vci);
-
-#ifdef ATM_VPI_UNSPEC
- // UNSPEC is deprecated, remove this code eventually
- if (vpi == ATM_VPI_UNSPEC || vci == ATM_VCI_UNSPEC) {
- PRINTK (KERN_WARNING, "rejecting open with unspecified VPI/VCI (deprecated)");
- return -EINVAL;
- }
-#endif
-
- error = vpivci_to_channel (&channel, vpi, vci);
- if (error) {
- PRINTD (DBG_WARN|DBG_VCC, "VPI/VCI out of range: %hd/%d", vpi, vci);
- return error;
- }
-
- vcc.channel = channel;
- // max speed for the moment
- vcc.tx_rate = 0x0;
-
- qos = &atm_vcc->qos;
-
- // check AAL and remember it
- switch (qos->aal) {
- case ATM_AAL0:
- // we would if it were 48 bytes and not 52!
- PRINTD (DBG_QOS|DBG_VCC, "AAL0");
- vcc.aal = aal0;
- break;
- case ATM_AAL34:
- // we would if I knew how do the SAR!
- PRINTD (DBG_QOS|DBG_VCC, "AAL3/4");
- vcc.aal = aal34;
- break;
- case ATM_AAL5:
- PRINTD (DBG_QOS|DBG_VCC, "AAL5");
- vcc.aal = aal5;
- break;
- default:
- PRINTD (DBG_QOS|DBG_VCC, "Bad AAL!");
- return -EINVAL;
- }
-
- // TX traffic parameters
-
- // there are two, interrelated problems here: 1. the reservation of
- // PCR is not a binary choice, we are given bounds and/or a
- // desirable value; 2. the device is only capable of certain values,
- // most of which are not integers. It is almost certainly acceptable
- // to be off by a maximum of 1 to 10 cps.
-
- // Pragmatic choice: always store an integral PCR as that which has
- // been allocated, even if we allocate a little (or a lot) less,
- // after rounding. The actual allocation depends on what we can
- // manage with our rate selection algorithm. The rate selection
- // algorithm is given an integral PCR and a tolerance and told
- // whether it should round the value up or down if the tolerance is
- // exceeded; it returns: a) the actual rate selected (rounded up to
- // the nearest integer), b) a bit pattern to feed to the timer
- // register, and c) a failure value if no applicable rate exists.
-
- // Part of the job is done by atm_pcr_goal which gives us a PCR
- // specification which says: EITHER grab the maximum available PCR
- // (and perhaps a lower bound which we must not pass), OR grab this
- // amount, rounding down if you have to (and perhaps a lower bound
- // which we must not pass) OR grab this amount, rounding up if you
- // have to (and perhaps an upper bound which we must not pass). If any
- // bounds ARE passed we fail. Note that rounding is only rounding to
- // match device limitations, we do not round down to satisfy
- // bandwidth availability even if this would not violate any given
- // lower bound.
-
- // Note: telephony = 64kb/s = 48 byte cell payload @ 500/3 cells/s
- // (say) so this is not even a binary fixpoint cell rate (but this
- // device can do it). To avoid this sort of hassle we use a
- // tolerance parameter (currently fixed at 10 cps).
-
- PRINTD (DBG_QOS, "TX:");
-
- txtp = &qos->txtp;
-
- // set up defaults for no traffic
- vcc.tx_rate = 0;
- // who knows what would actually happen if you try and send on this?
- vcc.tx_xbr_bits = IDLE_RATE_TYPE;
- vcc.tx_pcr_bits = CLOCK_DISABLE;
-#if 0
- vcc.tx_scr_bits = CLOCK_DISABLE;
- vcc.tx_bucket_bits = 0;
-#endif
-
- if (txtp->traffic_class != ATM_NONE) {
- error = check_max_sdu (vcc.aal, txtp, max_tx_size);
- if (error) {
- PRINTD (DBG_QOS, "TX max_sdu check failed");
- return error;
- }
-
- switch (txtp->traffic_class) {
- case ATM_UBR: {
- // we take "the PCR" as a rate-cap
- // not reserved
- vcc.tx_rate = 0;
- make_rate (dev, 1<<30, round_nearest, &vcc.tx_pcr_bits, NULL);
- vcc.tx_xbr_bits = ABR_RATE_TYPE;
- break;
- }
-#if 0
- case ATM_ABR: {
- // reserve min, allow up to max
- vcc.tx_rate = 0; // ?
- make_rate (dev, 1<<30, round_nearest, &vcc.tx_pcr_bits, 0);
- vcc.tx_xbr_bits = ABR_RATE_TYPE;
- break;
- }
-#endif
- case ATM_CBR: {
- int pcr = atm_pcr_goal (txtp);
- rounding r;
- if (!pcr) {
- // down vs. up, remaining bandwidth vs. unlimited bandwidth!!
- // should really have: once someone gets unlimited bandwidth
- // that no more non-UBR channels can be opened until the
- // unlimited one closes?? For the moment, round_down means
- // greedy people actually get something and not nothing
- r = round_down;
- // slight race (no locking) here so we may get -EAGAIN
- // later; the greedy bastards would deserve it :)
- PRINTD (DBG_QOS, "snatching all remaining TX bandwidth");
- pcr = dev->tx_avail;
- } else if (pcr < 0) {
- r = round_down;
- pcr = -pcr;
- } else {
- r = round_up;
- }
- error = make_rate_with_tolerance (dev, pcr, r, 10,
- &vcc.tx_pcr_bits, &vcc.tx_rate);
- if (error) {
- PRINTD (DBG_QOS, "could not make rate from TX PCR");
- return error;
- }
- // not really clear what further checking is needed
- error = atm_pcr_check (txtp, vcc.tx_rate);
- if (error) {
- PRINTD (DBG_QOS, "TX PCR failed consistency check");
- return error;
- }
- vcc.tx_xbr_bits = CBR_RATE_TYPE;
- break;
- }
-#if 0
- case ATM_VBR: {
- int pcr = atm_pcr_goal (txtp);
- // int scr = atm_scr_goal (txtp);
- int scr = pcr/2; // just for fun
- unsigned int mbs = 60; // just for fun
- rounding pr;
- rounding sr;
- unsigned int bucket;
- if (!pcr) {
- pr = round_nearest;
- pcr = 1<<30;
- } else if (pcr < 0) {
- pr = round_down;
- pcr = -pcr;
- } else {
- pr = round_up;
- }
- error = make_rate_with_tolerance (dev, pcr, pr, 10,
- &vcc.tx_pcr_bits, 0);
- if (!scr) {
- // see comments for PCR with CBR above
- sr = round_down;
- // slight race (no locking) here so we may get -EAGAIN
- // later; the greedy bastards would deserve it :)
- PRINTD (DBG_QOS, "snatching all remaining TX bandwidth");
- scr = dev->tx_avail;
- } else if (scr < 0) {
- sr = round_down;
- scr = -scr;
- } else {
- sr = round_up;
- }
- error = make_rate_with_tolerance (dev, scr, sr, 10,
- &vcc.tx_scr_bits, &vcc.tx_rate);
- if (error) {
- PRINTD (DBG_QOS, "could not make rate from TX SCR");
- return error;
- }
- // not really clear what further checking is needed
- // error = atm_scr_check (txtp, vcc.tx_rate);
- if (error) {
- PRINTD (DBG_QOS, "TX SCR failed consistency check");
- return error;
- }
- // bucket calculations (from a piece of paper...) cell bucket
- // capacity must be largest integer smaller than m(p-s)/p + 1
- // where m = max burst size, p = pcr, s = scr
- bucket = mbs*(pcr-scr)/pcr;
- if (bucket*pcr != mbs*(pcr-scr))
- bucket += 1;
- if (bucket > BUCKET_MAX_SIZE) {
- PRINTD (DBG_QOS, "shrinking bucket from %u to %u",
- bucket, BUCKET_MAX_SIZE);
- bucket = BUCKET_MAX_SIZE;
- }
- vcc.tx_xbr_bits = VBR_RATE_TYPE;
- vcc.tx_bucket_bits = bucket;
- break;
- }
-#endif
- default: {
- PRINTD (DBG_QOS, "unsupported TX traffic class");
- return -EINVAL;
- }
- }
- }
-
- // RX traffic parameters
-
- PRINTD (DBG_QOS, "RX:");
-
- rxtp = &qos->rxtp;
-
- // set up defaults for no traffic
- vcc.rx_rate = 0;
-
- if (rxtp->traffic_class != ATM_NONE) {
- error = check_max_sdu (vcc.aal, rxtp, max_rx_size);
- if (error) {
- PRINTD (DBG_QOS, "RX max_sdu check failed");
- return error;
- }
- switch (rxtp->traffic_class) {
- case ATM_UBR: {
- // not reserved
- break;
- }
-#if 0
- case ATM_ABR: {
- // reserve min
- vcc.rx_rate = 0; // ?
- break;
- }
-#endif
- case ATM_CBR: {
- int pcr = atm_pcr_goal (rxtp);
- if (!pcr) {
- // slight race (no locking) here so we may get -EAGAIN
- // later; the greedy bastards would deserve it :)
- PRINTD (DBG_QOS, "snatching all remaining RX bandwidth");
- pcr = dev->rx_avail;
- } else if (pcr < 0) {
- pcr = -pcr;
- }
- vcc.rx_rate = pcr;
- // not really clear what further checking is needed
- error = atm_pcr_check (rxtp, vcc.rx_rate);
- if (error) {
- PRINTD (DBG_QOS, "RX PCR failed consistency check");
- return error;
- }
- break;
- }
-#if 0
- case ATM_VBR: {
- // int scr = atm_scr_goal (rxtp);
- int scr = 1<<16; // just for fun
- if (!scr) {
- // slight race (no locking) here so we may get -EAGAIN
- // later; the greedy bastards would deserve it :)
- PRINTD (DBG_QOS, "snatching all remaining RX bandwidth");
- scr = dev->rx_avail;
- } else if (scr < 0) {
- scr = -scr;
- }
- vcc.rx_rate = scr;
- // not really clear what further checking is needed
- // error = atm_scr_check (rxtp, vcc.rx_rate);
- if (error) {
- PRINTD (DBG_QOS, "RX SCR failed consistency check");
- return error;
- }
- break;
- }
-#endif
- default: {
- PRINTD (DBG_QOS, "unsupported RX traffic class");
- return -EINVAL;
- }
- }
- }
-
-
- // late abort useful for diagnostics
- if (vcc.aal != aal5) {
- PRINTD (DBG_QOS, "AAL not supported");
- return -EINVAL;
- }
-
- // get space for our vcc stuff and copy parameters into it
- vccp = kmalloc (sizeof(hrz_vcc), GFP_KERNEL);
- if (!vccp) {
- PRINTK (KERN_ERR, "out of memory!");
- return -ENOMEM;
- }
- *vccp = vcc;
-
- // clear error and grab cell rate resource lock
- error = 0;
- spin_lock (&dev->rate_lock);
-
- if (vcc.tx_rate > dev->tx_avail) {
- PRINTD (DBG_QOS, "not enough TX PCR left");
- error = -EAGAIN;
- }
-
- if (vcc.rx_rate > dev->rx_avail) {
- PRINTD (DBG_QOS, "not enough RX PCR left");
- error = -EAGAIN;
- }
-
- if (!error) {
- // really consume cell rates
- dev->tx_avail -= vcc.tx_rate;
- dev->rx_avail -= vcc.rx_rate;
- PRINTD (DBG_QOS|DBG_VCC, "reserving %u TX PCR and %u RX PCR",
- vcc.tx_rate, vcc.rx_rate);
- }
-
- // release lock and exit on error
- spin_unlock (&dev->rate_lock);
- if (error) {
- PRINTD (DBG_QOS|DBG_VCC, "insufficient cell rate resources");
- kfree (vccp);
- return error;
- }
-
- // this is "immediately before allocating the connection identifier
- // in hardware" - so long as the next call does not fail :)
- set_bit(ATM_VF_ADDR,&atm_vcc->flags);
-
- // any errors here are very serious and should never occur
-
- if (rxtp->traffic_class != ATM_NONE) {
- if (dev->rxer[channel]) {
- PRINTD (DBG_ERR|DBG_VCC, "VC already open for RX");
- error = -EBUSY;
- }
- if (!error)
- error = hrz_open_rx (dev, channel);
- if (error) {
- kfree (vccp);
- return error;
- }
- // this link allows RX frames through
- dev->rxer[channel] = atm_vcc;
- }
-
- // success, set elements of atm_vcc
- atm_vcc->dev_data = (void *) vccp;
-
- // indicate readiness
- set_bit(ATM_VF_READY,&atm_vcc->flags);
-
- return 0;
-}
-
-/********** close VC **********/
-
-static void hrz_close (struct atm_vcc * atm_vcc) {
- hrz_dev * dev = HRZ_DEV(atm_vcc->dev);
- hrz_vcc * vcc = HRZ_VCC(atm_vcc);
- u16 channel = vcc->channel;
- PRINTD (DBG_VCC|DBG_FLOW, "hrz_close");
-
- // indicate unreadiness
- clear_bit(ATM_VF_READY,&atm_vcc->flags);
-
- if (atm_vcc->qos.txtp.traffic_class != ATM_NONE) {
- unsigned int i;
-
- // let any TX on this channel that has started complete
- // no restart, just keep trying
- while (tx_hold (dev))
- ;
- // remove record of any tx_channel having been setup for this channel
- for (i = 0; i < TX_CHANS; ++i)
- if (dev->tx_channel_record[i] == channel) {
- dev->tx_channel_record[i] = -1;
- break;
- }
- if (dev->last_vc == channel)
- dev->tx_last = -1;
- tx_release (dev);
- }
-
- if (atm_vcc->qos.rxtp.traffic_class != ATM_NONE) {
- // disable RXing - it tries quite hard
- hrz_close_rx (dev, channel);
- // forget the vcc - no more skbs will be pushed
- if (atm_vcc != dev->rxer[channel])
- PRINTK (KERN_ERR, "%s atm_vcc=%p rxer[channel]=%p",
- "arghhh! we're going to die!",
- atm_vcc, dev->rxer[channel]);
- dev->rxer[channel] = NULL;
- }
-
- // atomically release our rate reservation
- spin_lock (&dev->rate_lock);
- PRINTD (DBG_QOS|DBG_VCC, "releasing %u TX PCR and %u RX PCR",
- vcc->tx_rate, vcc->rx_rate);
- dev->tx_avail += vcc->tx_rate;
- dev->rx_avail += vcc->rx_rate;
- spin_unlock (&dev->rate_lock);
-
- // free our structure
- kfree (vcc);
- // say the VPI/VCI is free again
- clear_bit(ATM_VF_ADDR,&atm_vcc->flags);
-}
-
-#if 0
-static int hrz_ioctl (struct atm_dev * atm_dev, unsigned int cmd, void *arg) {
- hrz_dev * dev = HRZ_DEV(atm_dev);
- PRINTD (DBG_FLOW, "hrz_ioctl");
- return -1;
-}
-
-unsigned char hrz_phy_get (struct atm_dev * atm_dev, unsigned long addr) {
- hrz_dev * dev = HRZ_DEV(atm_dev);
- PRINTD (DBG_FLOW, "hrz_phy_get");
- return 0;
-}
-
-static void hrz_phy_put (struct atm_dev * atm_dev, unsigned char value,
- unsigned long addr) {
- hrz_dev * dev = HRZ_DEV(atm_dev);
- PRINTD (DBG_FLOW, "hrz_phy_put");
-}
-
-static int hrz_change_qos (struct atm_vcc * atm_vcc, struct atm_qos *qos, int flgs) {
- hrz_dev * dev = HRZ_DEV(vcc->dev);
- PRINTD (DBG_FLOW, "hrz_change_qos");
- return -1;
-}
-#endif
-
-/********** proc file contents **********/
-
-static int hrz_proc_read (struct atm_dev * atm_dev, loff_t * pos, char * page) {
- hrz_dev * dev = HRZ_DEV(atm_dev);
- int left = *pos;
- PRINTD (DBG_FLOW, "hrz_proc_read");
-
- /* more diagnostics here? */
-
-#if 0
- if (!left--) {
- unsigned int count = sprintf (page, "vbr buckets:");
- unsigned int i;
- for (i = 0; i < TX_CHANS; ++i)
- count += sprintf (page, " %u/%u",
- query_tx_channel_config (dev, i, BUCKET_FULLNESS_ACCESS),
- query_tx_channel_config (dev, i, BUCKET_CAPACITY_ACCESS));
- count += sprintf (page+count, ".\n");
- return count;
- }
-#endif
-
- if (!left--)
- return sprintf (page,
- "cells: TX %lu, RX %lu, HEC errors %lu, unassigned %lu.\n",
- dev->tx_cell_count, dev->rx_cell_count,
- dev->hec_error_count, dev->unassigned_cell_count);
-
- if (!left--)
- return sprintf (page,
- "free cell buffers: TX %hu, RX %hu+%hu.\n",
- rd_regw (dev, TX_FREE_BUFFER_COUNT_OFF),
- rd_regw (dev, RX_FREE_BUFFER_COUNT_OFF),
- dev->noof_spare_buffers);
-
- if (!left--)
- return sprintf (page,
- "cps remaining: TX %u, RX %u\n",
- dev->tx_avail, dev->rx_avail);
-
- return 0;
-}
-
-static const struct atmdev_ops hrz_ops = {
- .open = hrz_open,
- .close = hrz_close,
- .send = hrz_send,
- .proc_read = hrz_proc_read,
- .owner = THIS_MODULE,
-};
-
-static int hrz_probe(struct pci_dev *pci_dev,
- const struct pci_device_id *pci_ent)
-{
- hrz_dev * dev;
- int err = 0;
-
- // adapter slot free, read resources from PCI configuration space
- u32 iobase = pci_resource_start (pci_dev, 0);
- u32 * membase = bus_to_virt (pci_resource_start (pci_dev, 1));
- unsigned int irq;
- unsigned char lat;
-
- PRINTD (DBG_FLOW, "hrz_probe");
-
- if (pci_enable_device(pci_dev))
- return -EINVAL;
-
- /* XXX DEV_LABEL is a guess */
- if (!request_region(iobase, HRZ_IO_EXTENT, DEV_LABEL)) {
- err = -EINVAL;
- goto out_disable;
- }
-
- dev = kzalloc(sizeof(hrz_dev), GFP_KERNEL);
- if (!dev) {
- // perhaps we should be nice: deregister all adapters and abort?
- PRINTD(DBG_ERR, "out of memory");
- err = -ENOMEM;
- goto out_release;
- }
-
- pci_set_drvdata(pci_dev, dev);
-
- // grab IRQ and install handler - move this someplace more sensible
- irq = pci_dev->irq;
- if (request_irq(irq,
- interrupt_handler,
- IRQF_SHARED, /* irqflags guess */
- DEV_LABEL, /* name guess */
- dev)) {
- PRINTD(DBG_WARN, "request IRQ failed!");
- err = -EINVAL;
- goto out_free;
- }
-
- PRINTD(DBG_INFO, "found Madge ATM adapter (hrz) at: IO %x, IRQ %u, MEM %p",
- iobase, irq, membase);
-
- dev->atm_dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &hrz_ops, -1,
- NULL);
- if (!(dev->atm_dev)) {
- PRINTD(DBG_ERR, "failed to register Madge ATM adapter");
- err = -EINVAL;
- goto out_free_irq;
- }
-
- PRINTD(DBG_INFO, "registered Madge ATM adapter (no. %d) (%p) at %p",
- dev->atm_dev->number, dev, dev->atm_dev);
- dev->atm_dev->dev_data = (void *) dev;
- dev->pci_dev = pci_dev;
-
- // enable bus master accesses
- pci_set_master(pci_dev);
-
- // frobnicate latency (upwards, usually)
- pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &lat);
- if (pci_lat) {
- PRINTD(DBG_INFO, "%s PCI latency timer from %hu to %hu",
- "changing", lat, pci_lat);
- pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, pci_lat);
- } else if (lat < MIN_PCI_LATENCY) {
- PRINTK(KERN_INFO, "%s PCI latency timer from %hu to %hu",
- "increasing", lat, MIN_PCI_LATENCY);
- pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, MIN_PCI_LATENCY);
- }
-
- dev->iobase = iobase;
- dev->irq = irq;
- dev->membase = membase;
-
- dev->rx_q_entry = dev->rx_q_reset = &memmap->rx_q_entries[0];
- dev->rx_q_wrap = &memmap->rx_q_entries[RX_CHANS-1];
-
- // these next three are performance hacks
- dev->last_vc = -1;
- dev->tx_last = -1;
- dev->tx_idle = 0;
-
- dev->tx_regions = 0;
- dev->tx_bytes = 0;
- dev->tx_skb = NULL;
- dev->tx_iovec = NULL;
-
- dev->tx_cell_count = 0;
- dev->rx_cell_count = 0;
- dev->hec_error_count = 0;
- dev->unassigned_cell_count = 0;
-
- dev->noof_spare_buffers = 0;
-
- {
- unsigned int i;
- for (i = 0; i < TX_CHANS; ++i)
- dev->tx_channel_record[i] = -1;
- }
-
- dev->flags = 0;
-
- // Allocate cell rates and remember ASIC version
- // Fibre: ATM_OC3_PCR = 1555200000/8/270*260/53 - 29/53
- // Copper: (WRONG) we want 6 into the above, close to 25Mb/s
- // Copper: (plagarise!) 25600000/8/270*260/53 - n/53
-
- if (hrz_init(dev)) {
- // to be really pedantic, this should be ATM_OC3c_PCR
- dev->tx_avail = ATM_OC3_PCR;
- dev->rx_avail = ATM_OC3_PCR;
- set_bit(ultra, &dev->flags); // NOT "|= ultra" !
- } else {
- dev->tx_avail = ((25600000/8)*26)/(27*53);
- dev->rx_avail = ((25600000/8)*26)/(27*53);
- PRINTD(DBG_WARN, "Buggy ASIC: no TX bus-mastering.");
- }
-
- // rate changes spinlock
- spin_lock_init(&dev->rate_lock);
-
- // on-board memory access spinlock; we want atomic reads and
- // writes to adapter memory (handles IRQ and SMP)
- spin_lock_init(&dev->mem_lock);
-
- init_waitqueue_head(&dev->tx_queue);
-
- // vpi in 0..4, vci in 6..10
- dev->atm_dev->ci_range.vpi_bits = vpi_bits;
- dev->atm_dev->ci_range.vci_bits = 10-vpi_bits;
-
- timer_setup(&dev->housekeeping, do_housekeeping, 0);
- mod_timer(&dev->housekeeping, jiffies);
-
-out:
- return err;
-
-out_free_irq:
- free_irq(irq, dev);
-out_free:
- kfree(dev);
-out_release:
- release_region(iobase, HRZ_IO_EXTENT);
-out_disable:
- pci_disable_device(pci_dev);
- goto out;
-}
-
-static void hrz_remove_one(struct pci_dev *pci_dev)
-{
- hrz_dev *dev;
-
- dev = pci_get_drvdata(pci_dev);
-
- PRINTD(DBG_INFO, "closing %p (atm_dev = %p)", dev, dev->atm_dev);
- del_timer_sync(&dev->housekeeping);
- hrz_reset(dev);
- atm_dev_deregister(dev->atm_dev);
- free_irq(dev->irq, dev);
- release_region(dev->iobase, HRZ_IO_EXTENT);
- kfree(dev);
-
- pci_disable_device(pci_dev);
-}
-
-static void __init hrz_check_args (void) {
-#ifdef DEBUG_HORIZON
- PRINTK (KERN_NOTICE, "debug bitmap is %hx", debug &= DBG_MASK);
-#else
- if (debug)
- PRINTK (KERN_NOTICE, "no debug support in this image");
-#endif
-
- if (vpi_bits > HRZ_MAX_VPI)
- PRINTK (KERN_ERR, "vpi_bits has been limited to %hu",
- vpi_bits = HRZ_MAX_VPI);
-
- if (max_tx_size < 0 || max_tx_size > TX_AAL5_LIMIT)
- PRINTK (KERN_NOTICE, "max_tx_size has been limited to %hu",
- max_tx_size = TX_AAL5_LIMIT);
-
- if (max_rx_size < 0 || max_rx_size > RX_AAL5_LIMIT)
- PRINTK (KERN_NOTICE, "max_rx_size has been limited to %hu",
- max_rx_size = RX_AAL5_LIMIT);
-
- return;
-}
-
-MODULE_AUTHOR(maintainer_string);
-MODULE_DESCRIPTION(description_string);
-MODULE_LICENSE("GPL");
-module_param(debug, ushort, 0644);
-module_param(vpi_bits, ushort, 0);
-module_param(max_tx_size, int, 0);
-module_param(max_rx_size, int, 0);
-module_param(pci_lat, byte, 0);
-MODULE_PARM_DESC(debug, "debug bitmap, see .h file");
-MODULE_PARM_DESC(vpi_bits, "number of bits (0..4) to allocate to VPIs");
-MODULE_PARM_DESC(max_tx_size, "maximum size of TX AAL5 frames");
-MODULE_PARM_DESC(max_rx_size, "maximum size of RX AAL5 frames");
-MODULE_PARM_DESC(pci_lat, "PCI latency in bus cycles");
-
-static const struct pci_device_id hrz_pci_tbl[] = {
- { PCI_VENDOR_ID_MADGE, PCI_DEVICE_ID_MADGE_HORIZON, PCI_ANY_ID, PCI_ANY_ID,
- 0, 0, 0 },
- { 0, }
-};
-
-MODULE_DEVICE_TABLE(pci, hrz_pci_tbl);
-
-static struct pci_driver hrz_driver = {
- .name = "horizon",
- .probe = hrz_probe,
- .remove = hrz_remove_one,
- .id_table = hrz_pci_tbl,
-};
-
-/********** module entry **********/
-
-static int __init hrz_module_init (void) {
- BUILD_BUG_ON(sizeof(struct MEMMAP) != 128*1024/4);
-
- show_version();
-
- // check arguments
- hrz_check_args();
-
- // get the juice
- return pci_register_driver(&hrz_driver);
-}
-
-/********** module exit **********/
-
-static void __exit hrz_module_exit (void) {
- PRINTD (DBG_FLOW, "cleanup_module");
-
- pci_unregister_driver(&hrz_driver);
-}
-
-module_init(hrz_module_init);
-module_exit(hrz_module_exit);
diff --git a/drivers/atm/horizon.h b/drivers/atm/horizon.h
deleted file mode 100644
index 7523eba19bad..000000000000
--- a/drivers/atm/horizon.h
+++ /dev/null
@@ -1,492 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- Madge Horizon ATM Adapter driver.
- Copyright (C) 1995-1999 Madge Networks Ltd.
-
-*/
-
-/*
- IMPORTANT NOTE: Madge Networks no longer makes the adapters
- supported by this driver and makes no commitment to maintain it.
-*/
-
-/* too many macros - change to inline functions */
-
-#ifndef DRIVER_ATM_HORIZON_H
-#define DRIVER_ATM_HORIZON_H
-
-
-#ifdef CONFIG_ATM_HORIZON_DEBUG
-#define DEBUG_HORIZON
-#endif
-
-#define DEV_LABEL "hrz"
-
-#ifndef PCI_VENDOR_ID_MADGE
-#define PCI_VENDOR_ID_MADGE 0x10B6
-#endif
-#ifndef PCI_DEVICE_ID_MADGE_HORIZON
-#define PCI_DEVICE_ID_MADGE_HORIZON 0x1000
-#endif
-
-// diagnostic output
-
-#define PRINTK(severity,format,args...) \
- printk(severity DEV_LABEL ": " format "\n" , ## args)
-
-#ifdef DEBUG_HORIZON
-
-#define DBG_ERR 0x0001
-#define DBG_WARN 0x0002
-#define DBG_INFO 0x0004
-#define DBG_VCC 0x0008
-#define DBG_QOS 0x0010
-#define DBG_TX 0x0020
-#define DBG_RX 0x0040
-#define DBG_SKB 0x0080
-#define DBG_IRQ 0x0100
-#define DBG_FLOW 0x0200
-#define DBG_BUS 0x0400
-#define DBG_REGS 0x0800
-#define DBG_DATA 0x1000
-#define DBG_MASK 0x1fff
-
-/* the ## prevents the annoying double expansion of the macro arguments */
-/* KERN_INFO is used since KERN_DEBUG often does not make it to the console */
-#define PRINTDB(bits,format,args...) \
- ( (debug & (bits)) ? printk (KERN_INFO DEV_LABEL ": " format , ## args) : 1 )
-#define PRINTDM(bits,format,args...) \
- ( (debug & (bits)) ? printk (format , ## args) : 1 )
-#define PRINTDE(bits,format,args...) \
- ( (debug & (bits)) ? printk (format "\n" , ## args) : 1 )
-#define PRINTD(bits,format,args...) \
- ( (debug & (bits)) ? printk (KERN_INFO DEV_LABEL ": " format "\n" , ## args) : 1 )
-
-#else
-
-#define PRINTD(bits,format,args...)
-#define PRINTDB(bits,format,args...)
-#define PRINTDM(bits,format,args...)
-#define PRINTDE(bits,format,args...)
-
-#endif
-
-#define PRINTDD(sec,fmt,args...)
-#define PRINTDDB(sec,fmt,args...)
-#define PRINTDDM(sec,fmt,args...)
-#define PRINTDDE(sec,fmt,args...)
-
-// fixed constants
-
-#define SPARE_BUFFER_POOL_SIZE MAX_VCS
-#define HRZ_MAX_VPI 4
-#define MIN_PCI_LATENCY 48 // 24 IS TOO SMALL
-
-/* Horizon specific bits */
-/* Register offsets */
-
-#define HRZ_IO_EXTENT 0x80
-
-#define DATA_PORT_OFF 0x00
-#define TX_CHANNEL_PORT_OFF 0x04
-#define TX_DESCRIPTOR_PORT_OFF 0x08
-#define MEMORY_PORT_OFF 0x0C
-#define MEM_WR_ADDR_REG_OFF 0x14
-#define MEM_RD_ADDR_REG_OFF 0x18
-#define CONTROL_0_REG 0x1C
-#define INT_SOURCE_REG_OFF 0x20
-#define INT_ENABLE_REG_OFF 0x24
-#define MASTER_RX_ADDR_REG_OFF 0x28
-#define MASTER_RX_COUNT_REG_OFF 0x2C
-#define MASTER_TX_ADDR_REG_OFF 0x30
-#define MASTER_TX_COUNT_REG_OFF 0x34
-#define TX_DESCRIPTOR_REG_OFF 0x38
-#define TX_CHANNEL_CONFIG_COMMAND_OFF 0x40
-#define TX_CHANNEL_CONFIG_DATA_OFF 0x44
-#define TX_FREE_BUFFER_COUNT_OFF 0x48
-#define RX_FREE_BUFFER_COUNT_OFF 0x4C
-#define TX_CONFIG_OFF 0x50
-#define TX_STATUS_OFF 0x54
-#define RX_CONFIG_OFF 0x58
-#define RX_LINE_CONFIG_OFF 0x5C
-#define RX_QUEUE_RD_PTR_OFF 0x60
-#define RX_QUEUE_WR_PTR_OFF 0x64
-#define MAX_AAL5_CELL_COUNT_OFF 0x68
-#define RX_CHANNEL_PORT_OFF 0x6C
-#define TX_CELL_COUNT_OFF 0x70
-#define RX_CELL_COUNT_OFF 0x74
-#define HEC_ERROR_COUNT_OFF 0x78
-#define UNASSIGNED_CELL_COUNT_OFF 0x7C
-
-/* Register bit definitions */
-
-/* Control 0 register */
-
-#define SEEPROM_DO 0x00000001
-#define SEEPROM_DI 0x00000002
-#define SEEPROM_SK 0x00000004
-#define SEEPROM_CS 0x00000008
-#define DEBUG_BIT_0 0x00000010
-#define DEBUG_BIT_1 0x00000020
-#define DEBUG_BIT_2 0x00000040
-// RESERVED 0x00000080
-#define DEBUG_BIT_0_OE 0x00000100
-#define DEBUG_BIT_1_OE 0x00000200
-#define DEBUG_BIT_2_OE 0x00000400
-// RESERVED 0x00000800
-#define DEBUG_BIT_0_STATE 0x00001000
-#define DEBUG_BIT_1_STATE 0x00002000
-#define DEBUG_BIT_2_STATE 0x00004000
-// RESERVED 0x00008000
-#define GENERAL_BIT_0 0x00010000
-#define GENERAL_BIT_1 0x00020000
-#define GENERAL_BIT_2 0x00040000
-#define GENERAL_BIT_3 0x00080000
-#define RESET_HORIZON 0x00100000
-#define RESET_ATM 0x00200000
-#define RESET_RX 0x00400000
-#define RESET_TX 0x00800000
-#define RESET_HOST 0x01000000
-// RESERVED 0x02000000
-#define TARGET_RETRY_DISABLE 0x04000000
-#define ATM_LAYER_SELECT 0x08000000
-#define ATM_LAYER_STATUS 0x10000000
-// RESERVED 0xE0000000
-
-/* Interrupt source and enable registers */
-
-#define RX_DATA_AV 0x00000001
-#define RX_DISABLED 0x00000002
-#define TIMING_MARKER 0x00000004
-#define FORCED 0x00000008
-#define RX_BUS_MASTER_COMPLETE 0x00000010
-#define TX_BUS_MASTER_COMPLETE 0x00000020
-#define ABR_TX_CELL_COUNT_INT 0x00000040
-#define DEBUG_INT 0x00000080
-// RESERVED 0xFFFFFF00
-
-/* PIO and Bus Mastering */
-
-#define MAX_PIO_COUNT 0x000000ff // 255 - make tunable?
-// 8188 is a hard limit for bus mastering
-#define MAX_TRANSFER_COUNT 0x00001ffc // 8188
-#define MASTER_TX_AUTO_APPEND_DESC 0x80000000
-
-/* TX channel config command port */
-
-#define PCR_TIMER_ACCESS 0x0000
-#define SCR_TIMER_ACCESS 0x0001
-#define BUCKET_CAPACITY_ACCESS 0x0002
-#define BUCKET_FULLNESS_ACCESS 0x0003
-#define RATE_TYPE_ACCESS 0x0004
-// UNUSED 0x00F8
-#define TX_CHANNEL_CONFIG_MULT 0x0100
-// UNUSED 0xF800
-#define BUCKET_MAX_SIZE 0x003f
-
-/* TX channel config data port */
-
-#define CLOCK_SELECT_SHIFT 4
-#define CLOCK_DISABLE 0x00ff
-
-#define IDLE_RATE_TYPE 0x0
-#define ABR_RATE_TYPE 0x1
-#define VBR_RATE_TYPE 0x2
-#define CBR_RATE_TYPE 0x3
-
-/* TX config register */
-
-#define DRVR_DRVRBAR_ENABLE 0x0001
-#define TXCLK_MUX_SELECT_RCLK 0x0002
-#define TRANSMIT_TIMING_MARKER 0x0004
-#define LOOPBACK_TIMING_MARKER 0x0008
-#define TX_TEST_MODE_16MHz 0x0000
-#define TX_TEST_MODE_8MHz 0x0010
-#define TX_TEST_MODE_5_33MHz 0x0020
-#define TX_TEST_MODE_4MHz 0x0030
-#define TX_TEST_MODE_3_2MHz 0x0040
-#define TX_TEST_MODE_2_66MHz 0x0050
-#define TX_TEST_MODE_2_29MHz 0x0060
-#define TX_NORMAL_OPERATION 0x0070
-#define ABR_ROUND_ROBIN 0x0080
-
-/* TX status register */
-
-#define IDLE_CHANNELS_MASK 0x00FF
-#define ABR_CELL_COUNT_REACHED_MULT 0x0100
-#define ABR_CELL_COUNT_REACHED_MASK 0xFF
-
-/* RX config register */
-
-#define NON_USER_CELLS_IN_ONE_CHANNEL 0x0008
-#define RX_ENABLE 0x0010
-#define IGNORE_UNUSED_VPI_VCI_BITS_SET 0x0000
-#define NON_USER_UNUSED_VPI_VCI_BITS_SET 0x0020
-#define DISCARD_UNUSED_VPI_VCI_BITS_SET 0x0040
-
-/* RX line config register */
-
-#define SIGNAL_LOSS 0x0001
-#define FREQUENCY_DETECT_ERROR 0x0002
-#define LOCK_DETECT_ERROR 0x0004
-#define SELECT_INTERNAL_LOOPBACK 0x0008
-#define LOCK_DETECT_ENABLE 0x0010
-#define FREQUENCY_DETECT_ENABLE 0x0020
-#define USER_FRAQ 0x0040
-#define GXTALOUT_SELECT_DIV4 0x0080
-#define GXTALOUT_SELECT_NO_GATING 0x0100
-#define TIMING_MARKER_RECEIVED 0x0200
-
-/* RX channel port */
-
-#define RX_CHANNEL_MASK 0x03FF
-// UNUSED 0x3C00
-#define FLUSH_CHANNEL 0x4000
-#define RX_CHANNEL_UPDATE_IN_PROGRESS 0x8000
-
-/* Receive queue entry */
-
-#define RX_Q_ENTRY_LENGTH_MASK 0x0000FFFF
-#define RX_Q_ENTRY_CHANNEL_SHIFT 16
-#define SIMONS_DODGEY_MARKER 0x08000000
-#define RX_CONGESTION_EXPERIENCED 0x10000000
-#define RX_CRC_10_OK 0x20000000
-#define RX_CRC_32_OK 0x40000000
-#define RX_COMPLETE_FRAME 0x80000000
-
-/* Offsets and constants for use with the buffer memory */
-
-/* Buffer pointers and channel types */
-
-#define BUFFER_PTR_MASK 0x0000FFFF
-#define RX_INT_THRESHOLD_MULT 0x00010000
-#define RX_INT_THRESHOLD_MASK 0x07FF
-#define INT_EVERY_N_CELLS 0x08000000
-#define CONGESTION_EXPERIENCED 0x10000000
-#define FIRST_CELL_OF_AAL5_FRAME 0x20000000
-#define CHANNEL_TYPE_AAL5 0x00000000
-#define CHANNEL_TYPE_RAW_CELLS 0x40000000
-#define CHANNEL_TYPE_AAL3_4 0x80000000
-
-/* Buffer status stuff */
-
-#define BUFF_STATUS_MASK 0x00030000
-#define BUFF_STATUS_EMPTY 0x00000000
-#define BUFF_STATUS_CELL_AV 0x00010000
-#define BUFF_STATUS_LAST_CELL_AV 0x00020000
-
-/* Transmit channel stuff */
-
-/* Receive channel stuff */
-
-#define RX_CHANNEL_DISABLED 0x00000000
-#define RX_CHANNEL_IDLE 0x00000001
-
-/* General things */
-
-#define INITIAL_CRC 0xFFFFFFFF
-
-// A Horizon u32, a byte! Really nasty. Horizon pointers are (32 bit)
-// word addresses and so standard C pointer operations break (as they
-// assume byte addresses); so we pretend that Horizon words (and word
-// pointers) are bytes (and byte pointers) for the purposes of having
-// a memory map that works.
-
-typedef u8 HDW;
-
-typedef struct cell_buf {
- HDW payload[12];
- HDW next;
- HDW cell_count; // AAL5 rx bufs
- HDW res;
- union {
- HDW partial_crc; // AAL5 rx bufs
- HDW cell_header; // RAW bufs
- } u;
-} cell_buf;
-
-typedef struct tx_ch_desc {
- HDW rd_buf_type;
- HDW wr_buf_type;
- HDW partial_crc;
- HDW cell_header;
-} tx_ch_desc;
-
-typedef struct rx_ch_desc {
- HDW wr_buf_type;
- HDW rd_buf_type;
-} rx_ch_desc;
-
-typedef struct rx_q_entry {
- HDW entry;
-} rx_q_entry;
-
-#define TX_CHANS 8
-#define RX_CHANS 1024
-#define RX_QS 1024
-#define MAX_VCS RX_CHANS
-
-/* Horizon buffer memory map */
-
-// TX Channel Descriptors 2
-// TX Initial Buffers 8 // TX_CHANS
-#define BUFN1_SIZE 118 // (126 - TX_CHANS)
-// RX/TX Start/End Buffers 4
-#define BUFN2_SIZE 124
-// RX Queue Entries 64
-#define BUFN3_SIZE 192
-// RX Channel Descriptors 128
-#define BUFN4_SIZE 1408
-// TOTAL cell_buff chunks 2048
-
-// cell_buf bufs[2048];
-// HDW dws[32768];
-
-typedef struct MEMMAP {
- tx_ch_desc tx_descs[TX_CHANS]; // 8 * 4 = 32 , 0x0020
- cell_buf inittxbufs[TX_CHANS]; // these are really
- cell_buf bufn1[BUFN1_SIZE]; // part of this pool
- cell_buf txfreebufstart;
- cell_buf txfreebufend;
- cell_buf rxfreebufstart;
- cell_buf rxfreebufend; // 8+118+1+1+1+1+124 = 254
- cell_buf bufn2[BUFN2_SIZE]; // 16 * 254 = 4064 , 0x1000
- rx_q_entry rx_q_entries[RX_QS]; // 1 * 1024 = 1024 , 0x1400
- cell_buf bufn3[BUFN3_SIZE]; // 16 * 192 = 3072 , 0x2000
- rx_ch_desc rx_descs[MAX_VCS]; // 2 * 1024 = 2048 , 0x2800
- cell_buf bufn4[BUFN4_SIZE]; // 16 * 1408 = 22528 , 0x8000
-} MEMMAP;
-
-#define memmap ((MEMMAP *)0)
-
-/* end horizon specific bits */
-
-typedef enum {
- aal0,
- aal34,
- aal5
-} hrz_aal;
-
-typedef enum {
- tx_busy,
- rx_busy,
- ultra
-} hrz_flags;
-
-// a single struct pointed to by atm_vcc->dev_data
-
-typedef struct {
- unsigned int tx_rate;
- unsigned int rx_rate;
- u16 channel;
- u16 tx_xbr_bits;
- u16 tx_pcr_bits;
-#if 0
- u16 tx_scr_bits;
- u16 tx_bucket_bits;
-#endif
- hrz_aal aal;
-} hrz_vcc;
-
-struct hrz_dev {
-
- u32 iobase;
- u32 * membase;
-
- struct sk_buff * rx_skb; // skb being RXed
- unsigned int rx_bytes; // bytes remaining to RX within region
- void * rx_addr; // addr to send bytes to (for PIO)
- unsigned int rx_channel; // channel that the skb is going out on
-
- struct sk_buff * tx_skb; // skb being TXed
- unsigned int tx_bytes; // bytes remaining to TX within region
- void * tx_addr; // addr to send bytes from (for PIO)
- struct iovec * tx_iovec; // remaining regions
- unsigned int tx_regions; // number of remaining regions
-
- spinlock_t mem_lock;
- wait_queue_head_t tx_queue;
-
- u8 irq;
- unsigned long flags;
- u8 tx_last;
- u8 tx_idle;
-
- rx_q_entry * rx_q_reset;
- rx_q_entry * rx_q_entry;
- rx_q_entry * rx_q_wrap;
-
- struct atm_dev * atm_dev;
-
- u32 last_vc;
-
- int noof_spare_buffers;
- u16 spare_buffers[SPARE_BUFFER_POOL_SIZE];
-
- u16 tx_channel_record[TX_CHANS];
-
- // this is what we follow when we get incoming data
- u32 txer[MAX_VCS/32];
- struct atm_vcc * rxer[MAX_VCS];
-
- // cell rate allocation
- spinlock_t rate_lock;
- unsigned int rx_avail;
- unsigned int tx_avail;
-
- // dev stats
- unsigned long tx_cell_count;
- unsigned long rx_cell_count;
- unsigned long hec_error_count;
- unsigned long unassigned_cell_count;
-
- struct pci_dev * pci_dev;
- struct timer_list housekeeping;
-};
-
-typedef struct hrz_dev hrz_dev;
-
-/* macros for use later */
-
-#define BUF_PTR(cbptr) ((cbptr) - (cell_buf *) 0)
-
-#define INTERESTING_INTERRUPTS \
- (RX_DATA_AV | RX_DISABLED | TX_BUS_MASTER_COMPLETE | RX_BUS_MASTER_COMPLETE)
-
-// 190 cells by default (192 TX buffers - 2 elbow room, see docs)
-#define TX_AAL5_LIMIT (190*ATM_CELL_PAYLOAD-ATM_AAL5_TRAILER) // 9112
-
-// Have enough RX buffers (unless we allow other buffer splits)
-#define RX_AAL5_LIMIT ATM_MAX_AAL5_PDU
-
-/* multi-statement macro protector */
-#define DW(x) do{ x } while(0)
-
-#define HRZ_DEV(atm_dev) ((hrz_dev *) (atm_dev)->dev_data)
-#define HRZ_VCC(atm_vcc) ((hrz_vcc *) (atm_vcc)->dev_data)
-
-/* Turn the LEDs on and off */
-// The LEDs bits are upside down in that setting the bit in the debug
-// register will turn the appropriate LED off.
-
-#define YELLOW_LED DEBUG_BIT_0
-#define GREEN_LED DEBUG_BIT_1
-#define YELLOW_LED_OE DEBUG_BIT_0_OE
-#define GREEN_LED_OE DEBUG_BIT_1_OE
-
-#define GREEN_LED_OFF(dev) \
- wr_regl (dev, CONTROL_0_REG, rd_regl (dev, CONTROL_0_REG) | GREEN_LED)
-#define GREEN_LED_ON(dev) \
- wr_regl (dev, CONTROL_0_REG, rd_regl (dev, CONTROL_0_REG) &~ GREEN_LED)
-#define YELLOW_LED_OFF(dev) \
- wr_regl (dev, CONTROL_0_REG, rd_regl (dev, CONTROL_0_REG) | YELLOW_LED)
-#define YELLOW_LED_ON(dev) \
- wr_regl (dev, CONTROL_0_REG, rd_regl (dev, CONTROL_0_REG) &~ YELLOW_LED)
-
-typedef enum {
- round_up,
- round_down,
- round_nearest
-} rounding;
-
-#endif /* DRIVER_ATM_HORIZON_H */
diff --git a/drivers/atm/nicstarmac.c b/drivers/atm/nicstarmac.c
index e0dda9062e6b..791f69a07ddf 100644
--- a/drivers/atm/nicstarmac.c
+++ b/drivers/atm/nicstarmac.c
@@ -14,11 +14,6 @@ typedef void __iomem *virt_addr_t;
#define CYCLE_DELAY 5
-/*
- This was the original definition
-#define osp_MicroDelay(microsec) \
- do { int _i = 4*microsec; while (--_i > 0) { __SLOW_DOWN_IO; }} while (0)
-*/
#define osp_MicroDelay(microsec) {unsigned long useconds = (microsec); \
udelay((useconds));}
/*
diff --git a/drivers/atm/uPD98401.h b/drivers/atm/uPD98401.h
deleted file mode 100644
index f766a5ef0c5d..000000000000
--- a/drivers/atm/uPD98401.h
+++ /dev/null
@@ -1,293 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* drivers/atm/uPD98401.h - NEC uPD98401 (SAR) declarations */
-
-/* Written 1995 by Werner Almesberger, EPFL LRC */
-
-
-#ifndef DRIVERS_ATM_uPD98401_H
-#define DRIVERS_ATM_uPD98401_H
-
-
-#define MAX_CRAM_SIZE (1 << 18) /* 2^18 words */
-#define RAM_INCREMENT 1024 /* check in 4 kB increments */
-
-#define uPD98401_PORTS 0x24 /* probably more ? */
-
-
-/*
- * Commands
- */
-
-#define uPD98401_OPEN_CHAN 0x20000000 /* open channel */
-#define uPD98401_CHAN_ADDR 0x0003fff8 /* channel address */
-#define uPD98401_CHAN_ADDR_SHIFT 3
-#define uPD98401_CLOSE_CHAN 0x24000000 /* close channel */
-#define uPD98401_CHAN_RT 0x02000000 /* RX/TX (0 TX, 1 RX) */
-#define uPD98401_DEACT_CHAN 0x28000000 /* deactivate channel */
-#define uPD98401_TX_READY 0x30000000 /* TX ready */
-#define uPD98401_ADD_BAT 0x34000000 /* add batches */
-#define uPD98401_POOL 0x000f0000 /* pool number */
-#define uPD98401_POOL_SHIFT 16
-#define uPD98401_POOL_NUMBAT 0x0000ffff /* number of batches */
-#define uPD98401_NOP 0x3f000000 /* NOP */
-#define uPD98401_IND_ACC 0x00000000 /* Indirect Access */
-#define uPD98401_IA_RW 0x10000000 /* Read/Write (0 W, 1 R) */
-#define uPD98401_IA_B3 0x08000000 /* Byte select, 1 enable */
-#define uPD98401_IA_B2 0x04000000
-#define uPD98401_IA_B1 0x02000000
-#define uPD98401_IA_B0 0x01000000
-#define uPD98401_IA_BALL 0x0f000000 /* whole longword */
-#define uPD98401_IA_TGT 0x000c0000 /* Target */
-#define uPD98401_IA_TGT_SHIFT 18
-#define uPD98401_IA_TGT_CM 0 /* - Control Memory */
-#define uPD98401_IA_TGT_SAR 1 /* - uPD98401 registers */
-#define uPD98401_IA_TGT_PHY 3 /* - PHY device */
-#define uPD98401_IA_ADDR 0x0003ffff
-
-/*
- * Command Register Status
- */
-
-#define uPD98401_BUSY 0x80000000 /* SAR is busy */
-#define uPD98401_LOCKED 0x40000000 /* SAR is locked by other CPU */
-
-/*
- * Indications
- */
-
-/* Normal (AAL5) Receive Indication */
-#define uPD98401_AAL5_UINFO 0xffff0000 /* user-supplied information */
-#define uPD98401_AAL5_UINFO_SHIFT 16
-#define uPD98401_AAL5_SIZE 0x0000ffff /* PDU size (in _CELLS_ !!) */
-#define uPD98401_AAL5_CHAN 0x7fff0000 /* Channel number */
-#define uPD98401_AAL5_CHAN_SHIFT 16
-#define uPD98401_AAL5_ERR 0x00008000 /* Error indication */
-#define uPD98401_AAL5_CI 0x00004000 /* Congestion Indication */
-#define uPD98401_AAL5_CLP 0x00002000 /* CLP (>= 1 cell had CLP=1) */
-#define uPD98401_AAL5_ES 0x00000f00 /* Error Status */
-#define uPD98401_AAL5_ES_SHIFT 8
-#define uPD98401_AAL5_ES_NONE 0 /* No error */
-#define uPD98401_AAL5_ES_FREE 1 /* Receiver free buf underflow */
-#define uPD98401_AAL5_ES_FIFO 2 /* Receiver FIFO overrun */
-#define uPD98401_AAL5_ES_TOOBIG 3 /* Maximum length violation */
-#define uPD98401_AAL5_ES_CRC 4 /* CRC error */
-#define uPD98401_AAL5_ES_ABORT 5 /* User abort */
-#define uPD98401_AAL5_ES_LENGTH 6 /* Length violation */
-#define uPD98401_AAL5_ES_T1 7 /* T1 error (timeout) */
-#define uPD98401_AAL5_ES_DEACT 8 /* Deactivated with DEACT_CHAN */
-#define uPD98401_AAL5_POOL 0x0000001f /* Free buffer pool number */
-
-/* Raw Cell Indication */
-#define uPD98401_RAW_UINFO uPD98401_AAL5_UINFO
-#define uPD98401_RAW_UINFO_SHIFT uPD98401_AAL5_UINFO_SHIFT
-#define uPD98401_RAW_HEC 0x000000ff /* HEC */
-#define uPD98401_RAW_CHAN uPD98401_AAL5_CHAN
-#define uPD98401_RAW_CHAN_SHIFT uPD98401_AAL5_CHAN_SHIFT
-
-/* Transmit Indication */
-#define uPD98401_TXI_CONN 0x7fff0000 /* Connection Number */
-#define uPD98401_TXI_CONN_SHIFT 16
-#define uPD98401_TXI_ACTIVE 0x00008000 /* Channel remains active */
-#define uPD98401_TXI_PQP 0x00007fff /* Packet Queue Pointer */
-
-/*
- * Directly Addressable Registers
- */
-
-#define uPD98401_GMR 0x00 /* General Mode Register */
-#define uPD98401_GSR 0x01 /* General Status Register */
-#define uPD98401_IMR 0x02 /* Interrupt Mask Register */
-#define uPD98401_RQU 0x03 /* Receive Queue Underrun */
-#define uPD98401_RQA 0x04 /* Receive Queue Alert */
-#define uPD98401_ADDR 0x05 /* Last Burst Address */
-#define uPD98401_VER 0x06 /* Version Number */
-#define uPD98401_SWR 0x07 /* Software Reset */
-#define uPD98401_CMR 0x08 /* Command Register */
-#define uPD98401_CMR_L 0x09 /* Command Register and Lock/Unlock */
-#define uPD98401_CER 0x0a /* Command Extension Register */
-#define uPD98401_CER_L 0x0b /* Command Ext Reg and Lock/Unlock */
-
-#define uPD98401_MSH(n) (0x10+(n)) /* Mailbox n Start Address High */
-#define uPD98401_MSL(n) (0x14+(n)) /* Mailbox n Start Address High */
-#define uPD98401_MBA(n) (0x18+(n)) /* Mailbox n Bottom Address */
-#define uPD98401_MTA(n) (0x1c+(n)) /* Mailbox n Tail Address */
-#define uPD98401_MWA(n) (0x20+(n)) /* Mailbox n Write Address */
-
-/* GMR is at 0x00 */
-#define uPD98401_GMR_ONE 0x80000000 /* Must be set to one */
-#define uPD98401_GMR_SLM 0x40000000 /* Address mode (0 word, 1 byte) */
-#define uPD98401_GMR_CPE 0x00008000 /* Control Memory Parity Enable */
-#define uPD98401_GMR_LP 0x00004000 /* Loopback */
-#define uPD98401_GMR_WA 0x00002000 /* Early Bus Write Abort/RDY */
-#define uPD98401_GMR_RA 0x00001000 /* Early Read Abort/RDY */
-#define uPD98401_GMR_SZ 0x00000f00 /* Burst Size Enable */
-#define uPD98401_BURST16 0x00000800 /* 16-word burst */
-#define uPD98401_BURST8 0x00000400 /* 8-word burst */
-#define uPD98401_BURST4 0x00000200 /* 4-word burst */
-#define uPD98401_BURST2 0x00000100 /* 2-word burst */
-#define uPD98401_GMR_AD 0x00000080 /* Address (burst resolution) Disable */
-#define uPD98401_GMR_BO 0x00000040 /* Byte Order (0 little, 1 big) */
-#define uPD98401_GMR_PM 0x00000020 /* Bus Parity Mode (0 byte, 1 word)*/
-#define uPD98401_GMR_PC 0x00000010 /* Bus Parity Control (0even,1odd) */
-#define uPD98401_GMR_BPE 0x00000008 /* Bus Parity Enable */
-#define uPD98401_GMR_DR 0x00000004 /* Receive Drop Mode (0drop,1don't)*/
-#define uPD98401_GMR_SE 0x00000002 /* Shapers Enable */
-#define uPD98401_GMR_RE 0x00000001 /* Receiver Enable */
-
-/* GSR is at 0x01, IMR is at 0x02 */
-#define uPD98401_INT_PI 0x80000000 /* PHY interrupt */
-#define uPD98401_INT_RQA 0x40000000 /* Receive Queue Alert */
-#define uPD98401_INT_RQU 0x20000000 /* Receive Queue Underrun */
-#define uPD98401_INT_RD 0x10000000 /* Receiver Deactivated */
-#define uPD98401_INT_SPE 0x08000000 /* System Parity Error */
-#define uPD98401_INT_CPE 0x04000000 /* Control Memory Parity Error */
-#define uPD98401_INT_SBE 0x02000000 /* System Bus Error */
-#define uPD98401_INT_IND 0x01000000 /* Initialization Done */
-#define uPD98401_INT_RCR 0x0000ff00 /* Raw Cell Received */
-#define uPD98401_INT_RCR_SHIFT 8
-#define uPD98401_INT_MF 0x000000f0 /* Mailbox Full */
-#define uPD98401_INT_MF_SHIFT 4
-#define uPD98401_INT_MM 0x0000000f /* Mailbox Modified */
-
-/* VER is at 0x06 */
-#define uPD98401_MAJOR 0x0000ff00 /* Major revision */
-#define uPD98401_MAJOR_SHIFT 8
-#define uPD98401_MINOR 0x000000ff /* Minor revision */
-
-/*
- * Indirectly Addressable Registers
- */
-
-#define uPD98401_IM(n) (0x40000+(n)) /* Scheduler n I and M */
-#define uPD98401_X(n) (0x40010+(n)) /* Scheduler n X */
-#define uPD98401_Y(n) (0x40020+(n)) /* Scheduler n Y */
-#define uPD98401_PC(n) (0x40030+(n)) /* Scheduler n P, C, p and c */
-#define uPD98401_PS(n) (0x40040+(n)) /* Scheduler n priority and status */
-
-/* IM contents */
-#define uPD98401_IM_I 0xff000000 /* I */
-#define uPD98401_IM_I_SHIFT 24
-#define uPD98401_IM_M 0x00ffffff /* M */
-
-/* PC contents */
-#define uPD98401_PC_P 0xff000000 /* P */
-#define uPD98401_PC_P_SHIFT 24
-#define uPD98401_PC_C 0x00ff0000 /* C */
-#define uPD98401_PC_C_SHIFT 16
-#define uPD98401_PC_p 0x0000ff00 /* p */
-#define uPD98401_PC_p_SHIFT 8
-#define uPD98401_PC_c 0x000000ff /* c */
-
-/* PS contents */
-#define uPD98401_PS_PRIO 0xf0 /* Priority level (0 high, 15 low) */
-#define uPD98401_PS_PRIO_SHIFT 4
-#define uPD98401_PS_S 0x08 /* Scan - must be 0 (internal) */
-#define uPD98401_PS_R 0x04 /* Round Robin (internal) */
-#define uPD98401_PS_A 0x02 /* Active (internal) */
-#define uPD98401_PS_E 0x01 /* Enabled */
-
-#define uPD98401_TOS 0x40100 /* Top of Stack Control Memory Address */
-#define uPD98401_SMA 0x40200 /* Shapers Control Memory Start Address */
-#define uPD98401_PMA 0x40201 /* Receive Pool Control Memory Start Address */
-#define uPD98401_T1R 0x40300 /* T1 Register */
-#define uPD98401_VRR 0x40301 /* VPI/VCI Reduction Register/Recv. Shutdown */
-#define uPD98401_TSR 0x40302 /* Time-Stamp Register */
-
-/* VRR is at 0x40301 */
-#define uPD98401_VRR_SDM 0x80000000 /* Shutdown Mode */
-#define uPD98401_VRR_SHIFT 0x000f0000 /* VPI/VCI Shift */
-#define uPD98401_VRR_SHIFT_SHIFT 16
-#define uPD98401_VRR_MASK 0x0000ffff /* VPI/VCI mask */
-
-/*
- * TX packet descriptor
- */
-
-#define uPD98401_TXPD_SIZE 16 /* descriptor size (in bytes) */
-
-#define uPD98401_TXPD_V 0x80000000 /* Valid bit */
-#define uPD98401_TXPD_DP 0x40000000 /* Descriptor (1) or Pointer (0) */
-#define uPD98401_TXPD_SM 0x20000000 /* Single (1) or Multiple (0) */
-#define uPD98401_TXPD_CLPM 0x18000000 /* CLP mode */
-#define uPD98401_CLPM_0 0 /* 00 CLP = 0 */
-#define uPD98401_CLPM_1 3 /* 11 CLP = 1 */
-#define uPD98401_CLPM_LAST 1 /* 01 CLP unless last cell */
-#define uPD98401_TXPD_CLPM_SHIFT 27
-#define uPD98401_TXPD_PTI 0x07000000 /* PTI pattern */
-#define uPD98401_TXPD_PTI_SHIFT 24
-#define uPD98401_TXPD_GFC 0x00f00000 /* GFC pattern */
-#define uPD98401_TXPD_GFC_SHIFT 20
-#define uPD98401_TXPD_C10 0x00040000 /* insert CRC-10 */
-#define uPD98401_TXPD_AAL5 0x00020000 /* AAL5 processing */
-#define uPD98401_TXPD_MB 0x00010000 /* TX mailbox number */
-#define uPD98401_TXPD_UU 0x0000ff00 /* CPCS-UU */
-#define uPD98401_TXPD_UU_SHIFT 8
-#define uPD98401_TXPD_CPI 0x000000ff /* CPI */
-
-/*
- * TX buffer descriptor
- */
-
-#define uPD98401_TXBD_SIZE 8 /* descriptor size (in bytes) */
-
-#define uPD98401_TXBD_LAST 0x80000000 /* last buffer in packet */
-
-/*
- * TX VC table
- */
-
-/* 1st word has the same structure as in a TX packet descriptor */
-#define uPD98401_TXVC_L 0x80000000 /* last buffer */
-#define uPD98401_TXVC_SHP 0x0f000000 /* shaper number */
-#define uPD98401_TXVC_SHP_SHIFT 24
-#define uPD98401_TXVC_VPI 0x00ff0000 /* VPI */
-#define uPD98401_TXVC_VPI_SHIFT 16
-#define uPD98401_TXVC_VCI 0x0000ffff /* VCI */
-#define uPD98401_TXVC_QRP 6 /* Queue Read Pointer is in word 6 */
-
-/*
- * RX free buffer pools descriptor
- */
-
-#define uPD98401_RXFP_ALERT 0x70000000 /* low water mark */
-#define uPD98401_RXFP_ALERT_SHIFT 28
-#define uPD98401_RXFP_BFSZ 0x0f000000 /* buffer size, 64*2^n */
-#define uPD98401_RXFP_BFSZ_SHIFT 24
-#define uPD98401_RXFP_BTSZ 0x00ff0000 /* batch size, n+1 */
-#define uPD98401_RXFP_BTSZ_SHIFT 16
-#define uPD98401_RXFP_REMAIN 0x0000ffff /* remaining batches in pool */
-
-/*
- * RX VC table
- */
-
-#define uPD98401_RXVC_BTSZ 0xff000000 /* remaining free buffers in batch */
-#define uPD98401_RXVC_BTSZ_SHIFT 24
-#define uPD98401_RXVC_MB 0x00200000 /* RX mailbox number */
-#define uPD98401_RXVC_POOL 0x001f0000 /* free buffer pool number */
-#define uPD98401_RXVC_POOL_SHIFT 16
-#define uPD98401_RXVC_UINFO 0x0000ffff /* user-supplied information */
-#define uPD98401_RXVC_T1 0xffff0000 /* T1 timestamp */
-#define uPD98401_RXVC_T1_SHIFT 16
-#define uPD98401_RXVC_PR 0x00008000 /* Packet Reception, 1 if busy */
-#define uPD98401_RXVC_DR 0x00004000 /* FIFO Drop */
-#define uPD98401_RXVC_OD 0x00001000 /* Drop OAM cells */
-#define uPD98401_RXVC_AR 0x00000800 /* AAL5 or raw cell; 1 if AAL5 */
-#define uPD98401_RXVC_MAXSEG 0x000007ff /* max number of segments per PDU */
-#define uPD98401_RXVC_REM 0xfffe0000 /* remaining words in curr buffer */
-#define uPD98401_RXVC_REM_SHIFT 17
-#define uPD98401_RXVC_CLP 0x00010000 /* CLP received */
-#define uPD98401_RXVC_BFA 0x00008000 /* Buffer Assigned */
-#define uPD98401_RXVC_BTA 0x00004000 /* Batch Assigned */
-#define uPD98401_RXVC_CI 0x00002000 /* Congestion Indication */
-#define uPD98401_RXVC_DD 0x00001000 /* Dropping incoming cells */
-#define uPD98401_RXVC_DP 0x00000800 /* like PR ? */
-#define uPD98401_RXVC_CURSEG 0x000007ff /* Current Segment count */
-
-/*
- * RX lookup table
- */
-
-#define uPD98401_RXLT_ENBL 0x8000 /* Enable */
-
-#endif
diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
deleted file mode 100644
index 239852d85558..000000000000
--- a/drivers/atm/uPD98402.c
+++ /dev/null
@@ -1,266 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* drivers/atm/uPD98402.c - NEC uPD98402 (PHY) declarations */
-
-/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
-
-
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/errno.h>
-#include <linux/atmdev.h>
-#include <linux/sonet.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/atomic.h>
-
-#include "uPD98402.h"
-
-
-#if 0
-#define DPRINTK(format,args...) printk(KERN_DEBUG format,##args)
-#else
-#define DPRINTK(format,args...)
-#endif
-
-
-struct uPD98402_priv {
- struct k_sonet_stats sonet_stats;/* link diagnostics */
- unsigned char framing; /* SONET/SDH framing */
- int loop_mode; /* loopback mode */
- spinlock_t lock;
-};
-
-
-#define PRIV(dev) ((struct uPD98402_priv *) dev->phy_data)
-
-#define PUT(val,reg) dev->ops->phy_put(dev,val,uPD98402_##reg)
-#define GET(reg) dev->ops->phy_get(dev,uPD98402_##reg)
-
-
-static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int zero)
-{
- struct sonet_stats tmp;
- int error = 0;
-
- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
- sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
- if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
- if (zero && !error) {
- /* unused fields are reported as -1, but we must not "adjust"
- them */
- tmp.corr_hcs = tmp.tx_cells = tmp.rx_cells = 0;
- sonet_subtract_stats(&PRIV(dev)->sonet_stats,&tmp);
- }
- return error ? -EFAULT : 0;
-}
-
-
-static int set_framing(struct atm_dev *dev,unsigned char framing)
-{
- static const unsigned char sonet[] = { 1,2,3,0 };
- static const unsigned char sdh[] = { 1,0,0,2 };
- const char *set;
- unsigned long flags;
-
- switch (framing) {
- case SONET_FRAME_SONET:
- set = sonet;
- break;
- case SONET_FRAME_SDH:
- set = sdh;
- break;
- default:
- return -EINVAL;
- }
- spin_lock_irqsave(&PRIV(dev)->lock, flags);
- PUT(set[0],C11T);
- PUT(set[1],C12T);
- PUT(set[2],C13T);
- PUT((GET(MDR) & ~uPD98402_MDR_SS_MASK) | (set[3] <<
- uPD98402_MDR_SS_SHIFT),MDR);
- spin_unlock_irqrestore(&PRIV(dev)->lock, flags);
- return 0;
-}
-
-
-static int get_sense(struct atm_dev *dev,u8 __user *arg)
-{
- unsigned long flags;
- unsigned char s[3];
-
- spin_lock_irqsave(&PRIV(dev)->lock, flags);
- s[0] = GET(C11R);
- s[1] = GET(C12R);
- s[2] = GET(C13R);
- spin_unlock_irqrestore(&PRIV(dev)->lock, flags);
- return (put_user(s[0], arg) || put_user(s[1], arg+1) ||
- put_user(s[2], arg+2) || put_user(0xff, arg+3) ||
- put_user(0xff, arg+4) || put_user(0xff, arg+5)) ? -EFAULT : 0;
-}
-
-
-static int set_loopback(struct atm_dev *dev,int mode)
-{
- unsigned char mode_reg;
-
- mode_reg = GET(MDR) & ~(uPD98402_MDR_TPLP | uPD98402_MDR_ALP |
- uPD98402_MDR_RPLP);
- switch (__ATM_LM_XTLOC(mode)) {
- case __ATM_LM_NONE:
- break;
- case __ATM_LM_PHY:
- mode_reg |= uPD98402_MDR_TPLP;
- break;
- case __ATM_LM_ATM:
- mode_reg |= uPD98402_MDR_ALP;
- break;
- default:
- return -EINVAL;
- }
- switch (__ATM_LM_XTRMT(mode)) {
- case __ATM_LM_NONE:
- break;
- case __ATM_LM_PHY:
- mode_reg |= uPD98402_MDR_RPLP;
- break;
- default:
- return -EINVAL;
- }
- PUT(mode_reg,MDR);
- PRIV(dev)->loop_mode = mode;
- return 0;
-}
-
-
-static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
-{
- switch (cmd) {
-
- case SONET_GETSTATZ:
- case SONET_GETSTAT:
- return fetch_stats(dev,arg, cmd == SONET_GETSTATZ);
- case SONET_SETFRAMING:
- return set_framing(dev, (int)(unsigned long)arg);
- case SONET_GETFRAMING:
- return put_user(PRIV(dev)->framing,(int __user *)arg) ?
- -EFAULT : 0;
- case SONET_GETFRSENSE:
- return get_sense(dev,arg);
- case ATM_SETLOOP:
- return set_loopback(dev, (int)(unsigned long)arg);
- case ATM_GETLOOP:
- return put_user(PRIV(dev)->loop_mode,(int __user *)arg) ?
- -EFAULT : 0;
- case ATM_QUERYLOOP:
- return put_user(ATM_LM_LOC_PHY | ATM_LM_LOC_ATM |
- ATM_LM_RMT_PHY,(int __user *)arg) ? -EFAULT : 0;
- default:
- return -ENOIOCTLCMD;
- }
-}
-
-
-#define ADD_LIMITED(s,v) \
- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
-
-
-static void stat_event(struct atm_dev *dev)
-{
- unsigned char events;
-
- events = GET(PCR);
- if (events & uPD98402_PFM_PFEB) ADD_LIMITED(path_febe,PFECB);
- if (events & uPD98402_PFM_LFEB) ADD_LIMITED(line_febe,LECCT);
- if (events & uPD98402_PFM_B3E) ADD_LIMITED(path_bip,B3ECT);
- if (events & uPD98402_PFM_B2E) ADD_LIMITED(line_bip,B2ECT);
- if (events & uPD98402_PFM_B1E) ADD_LIMITED(section_bip,B1ECT);
-}
-
-
-#undef ADD_LIMITED
-
-
-static void uPD98402_int(struct atm_dev *dev)
-{
- static unsigned long silence = 0;
- unsigned char reason;
-
- while ((reason = GET(PICR))) {
- if (reason & uPD98402_INT_LOS)
- printk(KERN_NOTICE "%s(itf %d): signal lost\n",
- dev->type,dev->number);
- if (reason & uPD98402_INT_PFM) stat_event(dev);
- if (reason & uPD98402_INT_PCO) {
- (void) GET(PCOCR); /* clear interrupt cause */
- atomic_add(GET(HECCT),
- &PRIV(dev)->sonet_stats.uncorr_hcs);
- }
- if ((reason & uPD98402_INT_RFO) &&
- (time_after(jiffies, silence) || silence == 0)) {
- printk(KERN_WARNING "%s(itf %d): uPD98402 receive "
- "FIFO overflow\n",dev->type,dev->number);
- silence = (jiffies+HZ/2)|1;
- }
- }
-}
-
-
-static int uPD98402_start(struct atm_dev *dev)
-{
- DPRINTK("phy_start\n");
- if (!(dev->phy_data = kmalloc(sizeof(struct uPD98402_priv),GFP_KERNEL)))
- return -ENOMEM;
- spin_lock_init(&PRIV(dev)->lock);
- memset(&PRIV(dev)->sonet_stats,0,sizeof(struct k_sonet_stats));
- (void) GET(PCR); /* clear performance events */
- PUT(uPD98402_PFM_FJ,PCMR); /* ignore frequency adj */
- (void) GET(PCOCR); /* clear overflows */
- PUT(~uPD98402_PCO_HECC,PCOMR);
- (void) GET(PICR); /* clear interrupts */
- PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
- uPD98402_INT_LOS),PIMR); /* enable them */
- (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
- return 0;
-}
-
-
-static int uPD98402_stop(struct atm_dev *dev)
-{
- /* let SAR driver worry about stopping interrupts */
- kfree(PRIV(dev));
- return 0;
-}
-
-
-static const struct atmphy_ops uPD98402_ops = {
- .start = uPD98402_start,
- .ioctl = uPD98402_ioctl,
- .interrupt = uPD98402_int,
- .stop = uPD98402_stop,
-};
-
-
-int uPD98402_init(struct atm_dev *dev)
-{
-DPRINTK("phy_init\n");
- dev->phy = &uPD98402_ops;
- return 0;
-}
-
-
-MODULE_LICENSE("GPL");
-
-EXPORT_SYMBOL(uPD98402_init);
-
-static __init int uPD98402_module_init(void)
-{
- return 0;
-}
-module_init(uPD98402_module_init);
-/* module_exit not defined so not unloadable */
diff --git a/drivers/atm/uPD98402.h b/drivers/atm/uPD98402.h
deleted file mode 100644
index 437cfaa20c96..000000000000
--- a/drivers/atm/uPD98402.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* drivers/atm/uPD98402.h - NEC uPD98402 (PHY) declarations */
-
-/* Written 1995 by Werner Almesberger, EPFL LRC */
-
-
-#ifndef DRIVERS_ATM_uPD98402_H
-#define DRIVERS_ATM_uPD98402_H
-
-/*
- * Registers
- */
-
-#define uPD98402_CMR 0x00 /* Command Register */
-#define uPD98402_MDR 0x01 /* Mode Register */
-#define uPD98402_PICR 0x02 /* PHY Interrupt Cause Register */
-#define uPD98402_PIMR 0x03 /* PHY Interrupt Mask Register */
-#define uPD98402_ACR 0x04 /* Alarm Cause Register */
-#define uPD98402_ACMR 0x05 /* Alarm Cause Mask Register */
-#define uPD98402_PCR 0x06 /* Performance Cause Register */
-#define uPD98402_PCMR 0x07 /* Performance Cause Mask Register */
-#define uPD98402_IACM 0x08 /* Internal Alarm Cause Mask Register */
-#define uPD98402_B1ECT 0x09 /* B1 Error Count Register */
-#define uPD98402_B2ECT 0x0a /* B2 Error Count Register */
-#define uPD98402_B3ECT 0x0b /* B3 Error Count Regster */
-#define uPD98402_PFECB 0x0c /* Path FEBE Count Register */
-#define uPD98402_LECCT 0x0d /* Line FEBE Count Register */
-#define uPD98402_HECCT 0x0e /* HEC Error Count Register */
-#define uPD98402_FJCT 0x0f /* Frequence Justification Count Reg */
-#define uPD98402_PCOCR 0x10 /* Perf. Counter Overflow Cause Reg */
-#define uPD98402_PCOMR 0x11 /* Perf. Counter Overflow Mask Reg */
-#define uPD98402_C11T 0x20 /* C11T Data Register */
-#define uPD98402_C12T 0x21 /* C12T Data Register */
-#define uPD98402_C13T 0x22 /* C13T Data Register */
-#define uPD98402_F1T 0x23 /* F1T Data Register */
-#define uPD98402_K2T 0x25 /* K2T Data Register */
-#define uPD98402_C2T 0x26 /* C2T Data Register */
-#define uPD98402_F2T 0x27 /* F2T Data Register */
-#define uPD98402_C11R 0x30 /* C11T Data Register */
-#define uPD98402_C12R 0x31 /* C12T Data Register */
-#define uPD98402_C13R 0x32 /* C13T Data Register */
-#define uPD98402_F1R 0x33 /* F1T Data Register */
-#define uPD98402_K2R 0x35 /* K2T Data Register */
-#define uPD98402_C2R 0x36 /* C2T Data Register */
-#define uPD98402_F2R 0x37 /* F2T Data Register */
-
-/* CMR is at 0x00 */
-#define uPD98402_CMR_PFRF 0x01 /* Send path FERF */
-#define uPD98402_CMR_LFRF 0x02 /* Send line FERF */
-#define uPD98402_CMR_PAIS 0x04 /* Send path AIS */
-#define uPD98402_CMR_LAIS 0x08 /* Send line AIS */
-
-/* MDR is at 0x01 */
-#define uPD98402_MDR_ALP 0x01 /* ATM layer loopback */
-#define uPD98402_MDR_TPLP 0x02 /* PMD loopback, to host */
-#define uPD98402_MDR_RPLP 0x04 /* PMD loopback, to network */
-#define uPD98402_MDR_SS0 0x08 /* SS0 */
-#define uPD98402_MDR_SS1 0x10 /* SS1 */
-#define uPD98402_MDR_SS_MASK 0x18 /* mask */
-#define uPD98402_MDR_SS_SHIFT 3 /* shift */
-#define uPD98402_MDR_HEC 0x20 /* disable HEC inbound processing */
-#define uPD98402_MDR_FSR 0x40 /* disable frame scrambler */
-#define uPD98402_MDR_CSR 0x80 /* disable cell scrambler */
-
-/* PICR is at 0x02, PIMR is at 0x03 */
-#define uPD98402_INT_PFM 0x01 /* performance counter has changed */
-#define uPD98402_INT_ALM 0x02 /* line fault */
-#define uPD98402_INT_RFO 0x04 /* receive FIFO overflow */
-#define uPD98402_INT_PCO 0x08 /* performance counter overflow */
-#define uPD98402_INT_OTD 0x20 /* OTD has occurred */
-#define uPD98402_INT_LOS 0x40 /* Loss Of Signal */
-#define uPD98402_INT_LOF 0x80 /* Loss Of Frame */
-
-/* ACR is as 0x04, ACMR is at 0x05 */
-#define uPD98402_ALM_PFRF 0x01 /* path FERF */
-#define uPD98402_ALM_LFRF 0x02 /* line FERF */
-#define uPD98402_ALM_PAIS 0x04 /* path AIS */
-#define uPD98402_ALM_LAIS 0x08 /* line AIS */
-#define uPD98402_ALM_LOD 0x10 /* loss of delineation */
-#define uPD98402_ALM_LOP 0x20 /* loss of pointer */
-#define uPD98402_ALM_OOF 0x40 /* out of frame */
-
-/* PCR is at 0x06, PCMR is at 0x07 */
-#define uPD98402_PFM_PFEB 0x01 /* path FEBE */
-#define uPD98402_PFM_LFEB 0x02 /* line FEBE */
-#define uPD98402_PFM_B3E 0x04 /* B3 error */
-#define uPD98402_PFM_B2E 0x08 /* B2 error */
-#define uPD98402_PFM_B1E 0x10 /* B1 error */
-#define uPD98402_PFM_FJ 0x20 /* frequency justification */
-
-/* IACM is at 0x08 */
-#define uPD98402_IACM_PFRF 0x01 /* don't generate path FERF */
-#define uPD98402_IACM_LFRF 0x02 /* don't generate line FERF */
-
-/* PCOCR is at 0x010, PCOMR is at 0x11 */
-#define uPD98402_PCO_B1EC 0x01 /* B1ECT overflow */
-#define uPD98402_PCO_B2EC 0x02 /* B2ECT overflow */
-#define uPD98402_PCO_B3EC 0x04 /* B3ECT overflow */
-#define uPD98402_PCO_PFBC 0x08 /* PFEBC overflow */
-#define uPD98402_PCO_LFBC 0x10 /* LFEVC overflow */
-#define uPD98402_PCO_HECC 0x20 /* HECCT overflow */
-#define uPD98402_PCO_FJC 0x40 /* FJCT overflow */
-
-
-int uPD98402_init(struct atm_dev *dev);
-
-#endif
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
deleted file mode 100644
index cf5fffcf98a1..000000000000
--- a/drivers/atm/zatm.c
+++ /dev/null
@@ -1,1652 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* drivers/atm/zatm.c - ZeitNet ZN122x device driver */
-
-/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
-
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/pci.h>
-#include <linux/errno.h>
-#include <linux/atm.h>
-#include <linux/atmdev.h>
-#include <linux/sonet.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/delay.h>
-#include <linux/uio.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/dma-mapping.h>
-#include <linux/atm_zatm.h>
-#include <linux/capability.h>
-#include <linux/bitops.h>
-#include <linux/wait.h>
-#include <linux/slab.h>
-#include <asm/byteorder.h>
-#include <asm/string.h>
-#include <asm/io.h>
-#include <linux/atomic.h>
-#include <linux/uaccess.h>
-#include <linux/nospec.h>
-
-#include "uPD98401.h"
-#include "uPD98402.h"
-#include "zeprom.h"
-#include "zatm.h"
-
-
-/*
- * TODO:
- *
- * Minor features
- * - support 64 kB SDUs (will have to use multibuffer batches then :-( )
- * - proper use of CDV, credit = max(1,CDVT*PCR)
- * - AAL0
- * - better receive timestamps
- * - OAM
- */
-
-#define ZATM_COPPER 1
-
-#if 0
-#define DPRINTK(format,args...) printk(KERN_DEBUG format,##args)
-#else
-#define DPRINTK(format,args...)
-#endif
-
-#ifndef CONFIG_ATM_ZATM_DEBUG
-
-
-#define NULLCHECK(x)
-
-#define EVENT(s,a,b)
-
-
-static void event_dump(void)
-{
-}
-
-
-#else
-
-
-/*
- * NULL pointer checking
- */
-
-#define NULLCHECK(x) \
- if ((unsigned long) (x) < 0x30) printk(KERN_CRIT #x "==0x%x\n", (int) (x))
-
-/*
- * Very extensive activity logging. Greatly improves bug detection speed but
- * costs a few Mbps if enabled.
- */
-
-#define EV 64
-
-static const char *ev[EV];
-static unsigned long ev_a[EV],ev_b[EV];
-static int ec = 0;
-
-
-static void EVENT(const char *s,unsigned long a,unsigned long b)
-{
- ev[ec] = s;
- ev_a[ec] = a;
- ev_b[ec] = b;
- ec = (ec+1) % EV;
-}
-
-
-static void event_dump(void)
-{
- int n,i;
-
- printk(KERN_NOTICE "----- event dump follows -----\n");
- for (n = 0; n < EV; n++) {
- i = (ec+n) % EV;
- printk(KERN_NOTICE);
- printk(ev[i] ? ev[i] : "(null)",ev_a[i],ev_b[i]);
- }
- printk(KERN_NOTICE "----- event dump ends here -----\n");
-}
-
-
-#endif /* CONFIG_ATM_ZATM_DEBUG */
-
-
-#define RING_BUSY 1 /* indication from do_tx that PDU has to be
- backlogged */
-
-static struct atm_dev *zatm_boards = NULL;
-static unsigned long dummy[2] = {0,0};
-
-
-#define zin_n(r) inl(zatm_dev->base+r*4)
-#define zin(r) inl(zatm_dev->base+uPD98401_##r*4)
-#define zout(v,r) outl(v,zatm_dev->base+uPD98401_##r*4)
-#define zwait() do {} while (zin(CMR) & uPD98401_BUSY)
-
-/* RX0, RX1, TX0, TX1 */
-static const int mbx_entries[NR_MBX] = { 1024,1024,1024,1024 };
-static const int mbx_esize[NR_MBX] = { 16,16,4,4 }; /* entry size in bytes */
-
-#define MBX_SIZE(i) (mbx_entries[i]*mbx_esize[i])
-
-
-/*-------------------------------- utilities --------------------------------*/
-
-
-static void zpokel(struct zatm_dev *zatm_dev,u32 value,u32 addr)
-{
- zwait();
- zout(value,CER);
- zout(uPD98401_IND_ACC | uPD98401_IA_BALL |
- (uPD98401_IA_TGT_CM << uPD98401_IA_TGT_SHIFT) | addr,CMR);
-}
-
-
-static u32 zpeekl(struct zatm_dev *zatm_dev,u32 addr)
-{
- zwait();
- zout(uPD98401_IND_ACC | uPD98401_IA_BALL | uPD98401_IA_RW |
- (uPD98401_IA_TGT_CM << uPD98401_IA_TGT_SHIFT) | addr,CMR);
- zwait();
- return zin(CER);
-}
-
-
-/*------------------------------- free lists --------------------------------*/
-
-
-/*
- * Free buffer head structure:
- * [0] pointer to buffer (for SAR)
- * [1] buffer descr link pointer (for SAR)
- * [2] back pointer to skb (for poll_rx)
- * [3] data
- * ...
- */
-
-struct rx_buffer_head {
- u32 buffer; /* pointer to buffer (for SAR) */
- u32 link; /* buffer descriptor link pointer (for SAR) */
- struct sk_buff *skb; /* back pointer to skb (for poll_rx) */
-};
-
-
-static void refill_pool(struct atm_dev *dev,int pool)
-{
- struct zatm_dev *zatm_dev;
- struct sk_buff *skb;
- struct rx_buffer_head *first;
- unsigned long flags;
- int align,offset,free,count,size;
-
- EVENT("refill_pool\n",0,0);
- zatm_dev = ZATM_DEV(dev);
- size = (64 << (pool <= ZATM_AAL5_POOL_BASE ? 0 :
- pool-ZATM_AAL5_POOL_BASE))+sizeof(struct rx_buffer_head);
- if (size < PAGE_SIZE) {
- align = 32; /* for 32 byte alignment */
- offset = sizeof(struct rx_buffer_head);
- }
- else {
- align = 4096;
- offset = zatm_dev->pool_info[pool].offset+
- sizeof(struct rx_buffer_head);
- }
- size += align;
- spin_lock_irqsave(&zatm_dev->lock, flags);
- free = zpeekl(zatm_dev,zatm_dev->pool_base+2*pool) &
- uPD98401_RXFP_REMAIN;
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- if (free >= zatm_dev->pool_info[pool].low_water) return;
- EVENT("starting ... POOL: 0x%x, 0x%x\n",
- zpeekl(zatm_dev,zatm_dev->pool_base+2*pool),
- zpeekl(zatm_dev,zatm_dev->pool_base+2*pool+1));
- EVENT("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
- count = 0;
- first = NULL;
- while (free < zatm_dev->pool_info[pool].high_water) {
- struct rx_buffer_head *head;
-
- skb = alloc_skb(size,GFP_ATOMIC);
- if (!skb) {
- printk(KERN_WARNING DEV_LABEL "(Itf %d): got no new "
- "skb (%d) with %d free\n",dev->number,size,free);
- break;
- }
- skb_reserve(skb,(unsigned char *) ((((unsigned long) skb->data+
- align+offset-1) & ~(unsigned long) (align-1))-offset)-
- skb->data);
- head = (struct rx_buffer_head *) skb->data;
- skb_reserve(skb,sizeof(struct rx_buffer_head));
- if (!first) first = head;
- count++;
- head->buffer = virt_to_bus(skb->data);
- head->link = 0;
- head->skb = skb;
- EVENT("enq skb 0x%08lx/0x%08lx\n",(unsigned long) skb,
- (unsigned long) head);
- spin_lock_irqsave(&zatm_dev->lock, flags);
- if (zatm_dev->last_free[pool])
- ((struct rx_buffer_head *) (zatm_dev->last_free[pool]->
- data))[-1].link = virt_to_bus(head);
- zatm_dev->last_free[pool] = skb;
- skb_queue_tail(&zatm_dev->pool[pool],skb);
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- free++;
- }
- if (first) {
- spin_lock_irqsave(&zatm_dev->lock, flags);
- zwait();
- zout(virt_to_bus(first),CER);
- zout(uPD98401_ADD_BAT | (pool << uPD98401_POOL_SHIFT) | count,
- CMR);
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- EVENT ("POOL: 0x%x, 0x%x\n",
- zpeekl(zatm_dev,zatm_dev->pool_base+2*pool),
- zpeekl(zatm_dev,zatm_dev->pool_base+2*pool+1));
- EVENT("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
- }
-}
-
-
-static void drain_free(struct atm_dev *dev,int pool)
-{
- skb_queue_purge(&ZATM_DEV(dev)->pool[pool]);
-}
-
-
-static int pool_index(int max_pdu)
-{
- int i;
-
- if (max_pdu % ATM_CELL_PAYLOAD)
- printk(KERN_ERR DEV_LABEL ": driver error in pool_index: "
- "max_pdu is %d\n",max_pdu);
- if (max_pdu > 65536) return -1;
- for (i = 0; (64 << i) < max_pdu; i++);
- return i+ZATM_AAL5_POOL_BASE;
-}
-
-
-/* use_pool isn't reentrant */
-
-
-static void use_pool(struct atm_dev *dev,int pool)
-{
- struct zatm_dev *zatm_dev;
- unsigned long flags;
- int size;
-
- zatm_dev = ZATM_DEV(dev);
- if (!(zatm_dev->pool_info[pool].ref_count++)) {
- skb_queue_head_init(&zatm_dev->pool[pool]);
- size = pool-ZATM_AAL5_POOL_BASE;
- if (size < 0) size = 0; /* 64B... */
- else if (size > 10) size = 10; /* ... 64kB */
- spin_lock_irqsave(&zatm_dev->lock, flags);
- zpokel(zatm_dev,((zatm_dev->pool_info[pool].low_water/4) <<
- uPD98401_RXFP_ALERT_SHIFT) |
- (1 << uPD98401_RXFP_BTSZ_SHIFT) |
- (size << uPD98401_RXFP_BFSZ_SHIFT),
- zatm_dev->pool_base+pool*2);
- zpokel(zatm_dev,(unsigned long) dummy,zatm_dev->pool_base+
- pool*2+1);
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- zatm_dev->last_free[pool] = NULL;
- refill_pool(dev,pool);
- }
- DPRINTK("pool %d: %d\n",pool,zatm_dev->pool_info[pool].ref_count);
-}
-
-
-static void unuse_pool(struct atm_dev *dev,int pool)
-{
- if (!(--ZATM_DEV(dev)->pool_info[pool].ref_count))
- drain_free(dev,pool);
-}
-
-/*----------------------------------- RX ------------------------------------*/
-
-
-#if 0
-static void exception(struct atm_vcc *vcc)
-{
- static int count = 0;
- struct zatm_dev *zatm_dev = ZATM_DEV(vcc->dev);
- struct zatm_vcc *zatm_vcc = ZATM_VCC(vcc);
- unsigned long *qrp;
- int i;
-
- if (count++ > 2) return;
- for (i = 0; i < 8; i++)
- printk("TX%d: 0x%08lx\n",i,
- zpeekl(zatm_dev,zatm_vcc->tx_chan*VC_SIZE/4+i));
- for (i = 0; i < 5; i++)
- printk("SH%d: 0x%08lx\n",i,
- zpeekl(zatm_dev,uPD98401_IM(zatm_vcc->shaper)+16*i));
- qrp = (unsigned long *) zpeekl(zatm_dev,zatm_vcc->tx_chan*VC_SIZE/4+
- uPD98401_TXVC_QRP);
- printk("qrp=0x%08lx\n",(unsigned long) qrp);
- for (i = 0; i < 4; i++) printk("QRP[%d]: 0x%08lx",i,qrp[i]);
-}
-#endif
-
-
-static const char *err_txt[] = {
- "No error",
- "RX buf underflow",
- "RX FIFO overrun",
- "Maximum len violation",
- "CRC error",
- "User abort",
- "Length violation",
- "T1 error",
- "Deactivated",
- "???",
- "???",
- "???",
- "???",
- "???",
- "???",
- "???"
-};
-
-
-static void poll_rx(struct atm_dev *dev,int mbx)
-{
- struct zatm_dev *zatm_dev;
- unsigned long pos;
- u32 x;
- int error;
-
- EVENT("poll_rx\n",0,0);
- zatm_dev = ZATM_DEV(dev);
- pos = (zatm_dev->mbx_start[mbx] & ~0xffffUL) | zin(MTA(mbx));
- while (x = zin(MWA(mbx)), (pos & 0xffff) != x) {
- u32 *here;
- struct sk_buff *skb;
- struct atm_vcc *vcc;
- int cells,size,chan;
-
- EVENT("MBX: host 0x%lx, nic 0x%x\n",pos,x);
- here = (u32 *) pos;
- if (((pos += 16) & 0xffff) == zatm_dev->mbx_end[mbx])
- pos = zatm_dev->mbx_start[mbx];
- cells = here[0] & uPD98401_AAL5_SIZE;
-#if 0
-printk("RX IND: 0x%x, 0x%x, 0x%x, 0x%x\n",here[0],here[1],here[2],here[3]);
-{
-unsigned long *x;
- printk("POOL: 0x%08x, 0x%08x\n",zpeekl(zatm_dev,
- zatm_dev->pool_base),
- zpeekl(zatm_dev,zatm_dev->pool_base+1));
- x = (unsigned long *) here[2];
- printk("[0..3] = 0x%08lx, 0x%08lx, 0x%08lx, 0x%08lx\n",
- x[0],x[1],x[2],x[3]);
-}
-#endif
- error = 0;
- if (here[3] & uPD98401_AAL5_ERR) {
- error = (here[3] & uPD98401_AAL5_ES) >>
- uPD98401_AAL5_ES_SHIFT;
- if (error == uPD98401_AAL5_ES_DEACT ||
- error == uPD98401_AAL5_ES_FREE) continue;
- }
-EVENT("error code 0x%x/0x%x\n",(here[3] & uPD98401_AAL5_ES) >>
- uPD98401_AAL5_ES_SHIFT,error);
- skb = ((struct rx_buffer_head *) bus_to_virt(here[2]))->skb;
- __net_timestamp(skb);
-#if 0
-printk("[-3..0] 0x%08lx 0x%08lx 0x%08lx 0x%08lx\n",((unsigned *) skb->data)[-3],
- ((unsigned *) skb->data)[-2],((unsigned *) skb->data)[-1],
- ((unsigned *) skb->data)[0]);
-#endif
- EVENT("skb 0x%lx, here 0x%lx\n",(unsigned long) skb,
- (unsigned long) here);
-#if 0
-printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
-#endif
- size = error ? 0 : ntohs(((__be16 *) skb->data)[cells*
- ATM_CELL_PAYLOAD/sizeof(u16)-3]);
- EVENT("got skb 0x%lx, size %d\n",(unsigned long) skb,size);
- chan = (here[3] & uPD98401_AAL5_CHAN) >>
- uPD98401_AAL5_CHAN_SHIFT;
- if (chan < zatm_dev->chans && zatm_dev->rx_map[chan]) {
- int pos;
- vcc = zatm_dev->rx_map[chan];
- pos = ZATM_VCC(vcc)->pool;
- if (skb == zatm_dev->last_free[pos])
- zatm_dev->last_free[pos] = NULL;
- skb_unlink(skb, zatm_dev->pool + pos);
- }
- else {
- printk(KERN_ERR DEV_LABEL "(itf %d): RX indication "
- "for non-existing channel\n",dev->number);
- size = 0;
- vcc = NULL;
- event_dump();
- }
- if (error) {
- static unsigned long silence = 0;
- static int last_error = 0;
-
- if (error != last_error ||
- time_after(jiffies, silence) || silence == 0){
- printk(KERN_WARNING DEV_LABEL "(itf %d): "
- "chan %d error %s\n",dev->number,chan,
- err_txt[error]);
- last_error = error;
- silence = (jiffies+2*HZ)|1;
- }
- size = 0;
- }
- if (size && (size > cells*ATM_CELL_PAYLOAD-ATM_AAL5_TRAILER ||
- size <= (cells-1)*ATM_CELL_PAYLOAD-ATM_AAL5_TRAILER)) {
- printk(KERN_ERR DEV_LABEL "(itf %d): size %d with %d "
- "cells\n",dev->number,size,cells);
- size = 0;
- event_dump();
- }
- if (size > ATM_MAX_AAL5_PDU) {
- printk(KERN_ERR DEV_LABEL "(itf %d): size too big "
- "(%d)\n",dev->number,size);
- size = 0;
- event_dump();
- }
- if (!size) {
- dev_kfree_skb_irq(skb);
- if (vcc) atomic_inc(&vcc->stats->rx_err);
- continue;
- }
- if (!atm_charge(vcc,skb->truesize)) {
- dev_kfree_skb_irq(skb);
- continue;
- }
- skb->len = size;
- ATM_SKB(skb)->vcc = vcc;
- vcc->push(vcc,skb);
- atomic_inc(&vcc->stats->rx);
- }
- zout(pos & 0xffff,MTA(mbx));
-#if 0 /* probably a stupid idea */
- refill_pool(dev,zatm_vcc->pool);
- /* maybe this saves us a few interrupts */
-#endif
-}
-
-
-static int open_rx_first(struct atm_vcc *vcc)
-{
- struct zatm_dev *zatm_dev;
- struct zatm_vcc *zatm_vcc;
- unsigned long flags;
- unsigned short chan;
- int cells;
-
- DPRINTK("open_rx_first (0x%x)\n",inb_p(0xc053));
- zatm_dev = ZATM_DEV(vcc->dev);
- zatm_vcc = ZATM_VCC(vcc);
- zatm_vcc->rx_chan = 0;
- if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0;
- if (vcc->qos.aal == ATM_AAL5) {
- if (vcc->qos.rxtp.max_sdu > 65464)
- vcc->qos.rxtp.max_sdu = 65464;
- /* fix this - we may want to receive 64kB SDUs
- later */
- cells = DIV_ROUND_UP(vcc->qos.rxtp.max_sdu + ATM_AAL5_TRAILER,
- ATM_CELL_PAYLOAD);
- zatm_vcc->pool = pool_index(cells*ATM_CELL_PAYLOAD);
- }
- else {
- cells = 1;
- zatm_vcc->pool = ZATM_AAL0_POOL;
- }
- if (zatm_vcc->pool < 0) return -EMSGSIZE;
- spin_lock_irqsave(&zatm_dev->lock, flags);
- zwait();
- zout(uPD98401_OPEN_CHAN,CMR);
- zwait();
- DPRINTK("0x%x 0x%x\n",zin(CMR),zin(CER));
- chan = (zin(CMR) & uPD98401_CHAN_ADDR) >> uPD98401_CHAN_ADDR_SHIFT;
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- DPRINTK("chan is %d\n",chan);
- if (!chan) return -EAGAIN;
- use_pool(vcc->dev,zatm_vcc->pool);
- DPRINTK("pool %d\n",zatm_vcc->pool);
- /* set up VC descriptor */
- spin_lock_irqsave(&zatm_dev->lock, flags);
- zpokel(zatm_dev,zatm_vcc->pool << uPD98401_RXVC_POOL_SHIFT,
- chan*VC_SIZE/4);
- zpokel(zatm_dev,uPD98401_RXVC_OD | (vcc->qos.aal == ATM_AAL5 ?
- uPD98401_RXVC_AR : 0) | cells,chan*VC_SIZE/4+1);
- zpokel(zatm_dev,0,chan*VC_SIZE/4+2);
- zatm_vcc->rx_chan = chan;
- zatm_dev->rx_map[chan] = vcc;
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- return 0;
-}
-
-
-static int open_rx_second(struct atm_vcc *vcc)
-{
- struct zatm_dev *zatm_dev;
- struct zatm_vcc *zatm_vcc;
- unsigned long flags;
- int pos,shift;
-
- DPRINTK("open_rx_second (0x%x)\n",inb_p(0xc053));
- zatm_dev = ZATM_DEV(vcc->dev);
- zatm_vcc = ZATM_VCC(vcc);
- if (!zatm_vcc->rx_chan) return 0;
- spin_lock_irqsave(&zatm_dev->lock, flags);
- /* should also handle VPI @@@ */
- pos = vcc->vci >> 1;
- shift = (1-(vcc->vci & 1)) << 4;
- zpokel(zatm_dev,(zpeekl(zatm_dev,pos) & ~(0xffff << shift)) |
- ((zatm_vcc->rx_chan | uPD98401_RXLT_ENBL) << shift),pos);
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- return 0;
-}
-
-
-static void close_rx(struct atm_vcc *vcc)
-{
- struct zatm_dev *zatm_dev;
- struct zatm_vcc *zatm_vcc;
- unsigned long flags;
- int pos,shift;
-
- zatm_vcc = ZATM_VCC(vcc);
- zatm_dev = ZATM_DEV(vcc->dev);
- if (!zatm_vcc->rx_chan) return;
- DPRINTK("close_rx\n");
- /* disable receiver */
- if (vcc->vpi != ATM_VPI_UNSPEC && vcc->vci != ATM_VCI_UNSPEC) {
- spin_lock_irqsave(&zatm_dev->lock, flags);
- pos = vcc->vci >> 1;
- shift = (1-(vcc->vci & 1)) << 4;
- zpokel(zatm_dev,zpeekl(zatm_dev,pos) & ~(0xffff << shift),pos);
- zwait();
- zout(uPD98401_NOP,CMR);
- zwait();
- zout(uPD98401_NOP,CMR);
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- }
- spin_lock_irqsave(&zatm_dev->lock, flags);
- zwait();
- zout(uPD98401_DEACT_CHAN | uPD98401_CHAN_RT | (zatm_vcc->rx_chan <<
- uPD98401_CHAN_ADDR_SHIFT),CMR);
- zwait();
- udelay(10); /* why oh why ... ? */
- zout(uPD98401_CLOSE_CHAN | uPD98401_CHAN_RT | (zatm_vcc->rx_chan <<
- uPD98401_CHAN_ADDR_SHIFT),CMR);
- zwait();
- if (!(zin(CMR) & uPD98401_CHAN_ADDR))
- printk(KERN_CRIT DEV_LABEL "(itf %d): can't close RX channel "
- "%d\n",vcc->dev->number,zatm_vcc->rx_chan);
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- zatm_dev->rx_map[zatm_vcc->rx_chan] = NULL;
- zatm_vcc->rx_chan = 0;
- unuse_pool(vcc->dev,zatm_vcc->pool);
-}
-
-
-static int start_rx(struct atm_dev *dev)
-{
- struct zatm_dev *zatm_dev;
- int i;
-
- DPRINTK("start_rx\n");
- zatm_dev = ZATM_DEV(dev);
- zatm_dev->rx_map = kcalloc(zatm_dev->chans,
- sizeof(*zatm_dev->rx_map),
- GFP_KERNEL);
- if (!zatm_dev->rx_map) return -ENOMEM;
- /* set VPI/VCI split (use all VCIs and give what's left to VPIs) */
- zpokel(zatm_dev,(1 << dev->ci_range.vci_bits)-1,uPD98401_VRR);
- /* prepare free buffer pools */
- for (i = 0; i <= ZATM_LAST_POOL; i++) {
- zatm_dev->pool_info[i].ref_count = 0;
- zatm_dev->pool_info[i].rqa_count = 0;
- zatm_dev->pool_info[i].rqu_count = 0;
- zatm_dev->pool_info[i].low_water = LOW_MARK;
- zatm_dev->pool_info[i].high_water = HIGH_MARK;
- zatm_dev->pool_info[i].offset = 0;
- zatm_dev->pool_info[i].next_off = 0;
- zatm_dev->pool_info[i].next_cnt = 0;
- zatm_dev->pool_info[i].next_thres = OFF_CNG_THRES;
- }
- return 0;
-}
-
-
-/*----------------------------------- TX ------------------------------------*/
-
-
-static int do_tx(struct sk_buff *skb)
-{
- struct atm_vcc *vcc;
- struct zatm_dev *zatm_dev;
- struct zatm_vcc *zatm_vcc;
- u32 *dsc;
- unsigned long flags;
-
- EVENT("do_tx\n",0,0);
- DPRINTK("sending skb %p\n",skb);
- vcc = ATM_SKB(skb)->vcc;
- zatm_dev = ZATM_DEV(vcc->dev);
- zatm_vcc = ZATM_VCC(vcc);
- EVENT("iovcnt=%d\n",skb_shinfo(skb)->nr_frags,0);
- spin_lock_irqsave(&zatm_dev->lock, flags);
- if (!skb_shinfo(skb)->nr_frags) {
- if (zatm_vcc->txing == RING_ENTRIES-1) {
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- return RING_BUSY;
- }
- zatm_vcc->txing++;
- dsc = zatm_vcc->ring+zatm_vcc->ring_curr;
- zatm_vcc->ring_curr = (zatm_vcc->ring_curr+RING_WORDS) &
- (RING_ENTRIES*RING_WORDS-1);
- dsc[1] = 0;
- dsc[2] = skb->len;
- dsc[3] = virt_to_bus(skb->data);
- mb();
- dsc[0] = uPD98401_TXPD_V | uPD98401_TXPD_DP | uPD98401_TXPD_SM
- | (vcc->qos.aal == ATM_AAL5 ? uPD98401_TXPD_AAL5 : 0 |
- (ATM_SKB(skb)->atm_options & ATM_ATMOPT_CLP ?
- uPD98401_CLPM_1 : uPD98401_CLPM_0));
- EVENT("dsc (0x%lx)\n",(unsigned long) dsc,0);
- }
- else {
-printk("NONONONOO!!!!\n");
- dsc = NULL;
-#if 0
- u32 *put;
- int i;
-
- dsc = kmalloc(uPD98401_TXPD_SIZE * 2 +
- uPD98401_TXBD_SIZE * ATM_SKB(skb)->iovcnt, GFP_ATOMIC);
- if (!dsc) {
- if (vcc->pop)
- vcc->pop(vcc, skb);
- else
- dev_kfree_skb_irq(skb);
- return -EAGAIN;
- }
- /* @@@ should check alignment */
- put = dsc+8;
- dsc[0] = uPD98401_TXPD_V | uPD98401_TXPD_DP |
- (vcc->aal == ATM_AAL5 ? uPD98401_TXPD_AAL5 : 0 |
- (ATM_SKB(skb)->atm_options & ATM_ATMOPT_CLP ?
- uPD98401_CLPM_1 : uPD98401_CLPM_0));
- dsc[1] = 0;
- dsc[2] = ATM_SKB(skb)->iovcnt * uPD98401_TXBD_SIZE;
- dsc[3] = virt_to_bus(put);
- for (i = 0; i < ATM_SKB(skb)->iovcnt; i++) {
- *put++ = ((struct iovec *) skb->data)[i].iov_len;
- *put++ = virt_to_bus(((struct iovec *)
- skb->data)[i].iov_base);
- }
- put[-2] |= uPD98401_TXBD_LAST;
-#endif
- }
- ZATM_PRV_DSC(skb) = dsc;
- skb_queue_tail(&zatm_vcc->tx_queue,skb);
- DPRINTK("QRP=0x%08lx\n",zpeekl(zatm_dev,zatm_vcc->tx_chan*VC_SIZE/4+
- uPD98401_TXVC_QRP));
- zwait();
- zout(uPD98401_TX_READY | (zatm_vcc->tx_chan <<
- uPD98401_CHAN_ADDR_SHIFT),CMR);
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- EVENT("done\n",0,0);
- return 0;
-}
-
-
-static inline void dequeue_tx(struct atm_vcc *vcc)
-{
- struct zatm_vcc *zatm_vcc;
- struct sk_buff *skb;
-
- EVENT("dequeue_tx\n",0,0);
- zatm_vcc = ZATM_VCC(vcc);
- skb = skb_dequeue(&zatm_vcc->tx_queue);
- if (!skb) {
- printk(KERN_CRIT DEV_LABEL "(itf %d): dequeue_tx but not "
- "txing\n",vcc->dev->number);
- return;
- }
-#if 0 /* @@@ would fail on CLP */
-if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
- uPD98401_TXPD_SM | uPD98401_TXPD_AAL5)) printk("@#*$!!!! (%08x)\n",
- *ZATM_PRV_DSC(skb));
-#endif
- *ZATM_PRV_DSC(skb) = 0; /* mark as invalid */
- zatm_vcc->txing--;
- if (vcc->pop) vcc->pop(vcc,skb);
- else dev_kfree_skb_irq(skb);
- while ((skb = skb_dequeue(&zatm_vcc->backlog)))
- if (do_tx(skb) == RING_BUSY) {
- skb_queue_head(&zatm_vcc->backlog,skb);
- break;
- }
- atomic_inc(&vcc->stats->tx);
- wake_up(&zatm_vcc->tx_wait);
-}
-
-
-static void poll_tx(struct atm_dev *dev,int mbx)
-{
- struct zatm_dev *zatm_dev;
- unsigned long pos;
- u32 x;
-
- EVENT("poll_tx\n",0,0);
- zatm_dev = ZATM_DEV(dev);
- pos = (zatm_dev->mbx_start[mbx] & ~0xffffUL) | zin(MTA(mbx));
- while (x = zin(MWA(mbx)), (pos & 0xffff) != x) {
- int chan;
-
-#if 1
- u32 data,*addr;
-
- EVENT("MBX: host 0x%lx, nic 0x%x\n",pos,x);
- addr = (u32 *) pos;
- data = *addr;
- chan = (data & uPD98401_TXI_CONN) >> uPD98401_TXI_CONN_SHIFT;
- EVENT("addr = 0x%lx, data = 0x%08x,",(unsigned long) addr,
- data);
- EVENT("chan = %d\n",chan,0);
-#else
-NO !
- chan = (zatm_dev->mbx_start[mbx][pos >> 2] & uPD98401_TXI_CONN)
- >> uPD98401_TXI_CONN_SHIFT;
-#endif
- if (chan < zatm_dev->chans && zatm_dev->tx_map[chan])
- dequeue_tx(zatm_dev->tx_map[chan]);
- else {
- printk(KERN_CRIT DEV_LABEL "(itf %d): TX indication "
- "for non-existing channel %d\n",dev->number,chan);
- event_dump();
- }
- if (((pos += 4) & 0xffff) == zatm_dev->mbx_end[mbx])
- pos = zatm_dev->mbx_start[mbx];
- }
- zout(pos & 0xffff,MTA(mbx));
-}
-
-
-/*
- * BUG BUG BUG: Doesn't handle "new-style" rate specification yet.
- */
-
-static int alloc_shaper(struct atm_dev *dev,int *pcr,int min,int max,int ubr)
-{
- struct zatm_dev *zatm_dev;
- unsigned long flags;
- unsigned long i,m,c;
- int shaper;
-
- DPRINTK("alloc_shaper (min = %d, max = %d)\n",min,max);
- zatm_dev = ZATM_DEV(dev);
- if (!zatm_dev->free_shapers) return -EAGAIN;
- for (shaper = 0; !((zatm_dev->free_shapers >> shaper) & 1); shaper++);
- zatm_dev->free_shapers &= ~1 << shaper;
- if (ubr) {
- c = 5;
- i = m = 1;
- zatm_dev->ubr_ref_cnt++;
- zatm_dev->ubr = shaper;
- *pcr = 0;
- }
- else {
- if (min) {
- if (min <= 255) {
- i = min;
- m = ATM_OC3_PCR;
- }
- else {
- i = 255;
- m = ATM_OC3_PCR*255/min;
- }
- }
- else {
- if (max > zatm_dev->tx_bw) max = zatm_dev->tx_bw;
- if (max <= 255) {
- i = max;
- m = ATM_OC3_PCR;
- }
- else {
- i = 255;
- m = DIV_ROUND_UP(ATM_OC3_PCR*255, max);
- }
- }
- if (i > m) {
- printk(KERN_CRIT DEV_LABEL "shaper algorithm botched "
- "[%d,%d] -> i=%ld,m=%ld\n",min,max,i,m);
- m = i;
- }
- *pcr = i*ATM_OC3_PCR/m;
- c = 20; /* @@@ should use max_cdv ! */
- if ((min && *pcr < min) || (max && *pcr > max)) return -EINVAL;
- if (zatm_dev->tx_bw < *pcr) return -EAGAIN;
- zatm_dev->tx_bw -= *pcr;
- }
- spin_lock_irqsave(&zatm_dev->lock, flags);
- DPRINTK("i = %d, m = %d, PCR = %d\n",i,m,*pcr);
- zpokel(zatm_dev,(i << uPD98401_IM_I_SHIFT) | m,uPD98401_IM(shaper));
- zpokel(zatm_dev,c << uPD98401_PC_C_SHIFT,uPD98401_PC(shaper));
- zpokel(zatm_dev,0,uPD98401_X(shaper));
- zpokel(zatm_dev,0,uPD98401_Y(shaper));
- zpokel(zatm_dev,uPD98401_PS_E,uPD98401_PS(shaper));
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- return shaper;
-}
-
-
-static void dealloc_shaper(struct atm_dev *dev,int shaper)
-{
- struct zatm_dev *zatm_dev;
- unsigned long flags;
-
- zatm_dev = ZATM_DEV(dev);
- if (shaper == zatm_dev->ubr) {
- if (--zatm_dev->ubr_ref_cnt) return;
- zatm_dev->ubr = -1;
- }
- spin_lock_irqsave(&zatm_dev->lock, flags);
- zpokel(zatm_dev,zpeekl(zatm_dev,uPD98401_PS(shaper)) & ~uPD98401_PS_E,
- uPD98401_PS(shaper));
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- zatm_dev->free_shapers |= 1 << shaper;
-}
-
-
-static void close_tx(struct atm_vcc *vcc)
-{
- struct zatm_dev *zatm_dev;
- struct zatm_vcc *zatm_vcc;
- unsigned long flags;
- int chan;
-
- zatm_vcc = ZATM_VCC(vcc);
- zatm_dev = ZATM_DEV(vcc->dev);
- chan = zatm_vcc->tx_chan;
- if (!chan) return;
- DPRINTK("close_tx\n");
- if (skb_peek(&zatm_vcc->backlog)) {
- printk("waiting for backlog to drain ...\n");
- event_dump();
- wait_event(zatm_vcc->tx_wait, !skb_peek(&zatm_vcc->backlog));
- }
- if (skb_peek(&zatm_vcc->tx_queue)) {
- printk("waiting for TX queue to drain ...\n");
- event_dump();
- wait_event(zatm_vcc->tx_wait, !skb_peek(&zatm_vcc->tx_queue));
- }
- spin_lock_irqsave(&zatm_dev->lock, flags);
-#if 0
- zwait();
- zout(uPD98401_DEACT_CHAN | (chan << uPD98401_CHAN_ADDR_SHIFT),CMR);
-#endif
- zwait();
- zout(uPD98401_CLOSE_CHAN | (chan << uPD98401_CHAN_ADDR_SHIFT),CMR);
- zwait();
- if (!(zin(CMR) & uPD98401_CHAN_ADDR))
- printk(KERN_CRIT DEV_LABEL "(itf %d): can't close TX channel "
- "%d\n",vcc->dev->number,chan);
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- zatm_vcc->tx_chan = 0;
- zatm_dev->tx_map[chan] = NULL;
- if (zatm_vcc->shaper != zatm_dev->ubr) {
- zatm_dev->tx_bw += vcc->qos.txtp.min_pcr;
- dealloc_shaper(vcc->dev,zatm_vcc->shaper);
- }
- kfree(zatm_vcc->ring);
-}
-
-
-static int open_tx_first(struct atm_vcc *vcc)
-{
- struct zatm_dev *zatm_dev;
- struct zatm_vcc *zatm_vcc;
- unsigned long flags;
- u32 *loop;
- unsigned short chan;
- int unlimited;
-
- DPRINTK("open_tx_first\n");
- zatm_dev = ZATM_DEV(vcc->dev);
- zatm_vcc = ZATM_VCC(vcc);
- zatm_vcc->tx_chan = 0;
- if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0;
- spin_lock_irqsave(&zatm_dev->lock, flags);
- zwait();
- zout(uPD98401_OPEN_CHAN,CMR);
- zwait();
- DPRINTK("0x%x 0x%x\n",zin(CMR),zin(CER));
- chan = (zin(CMR) & uPD98401_CHAN_ADDR) >> uPD98401_CHAN_ADDR_SHIFT;
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- DPRINTK("chan is %d\n",chan);
- if (!chan) return -EAGAIN;
- unlimited = vcc->qos.txtp.traffic_class == ATM_UBR &&
- (!vcc->qos.txtp.max_pcr || vcc->qos.txtp.max_pcr == ATM_MAX_PCR ||
- vcc->qos.txtp.max_pcr >= ATM_OC3_PCR);
- if (unlimited && zatm_dev->ubr != -1) zatm_vcc->shaper = zatm_dev->ubr;
- else {
- int pcr;
-
- if (unlimited) vcc->qos.txtp.max_sdu = ATM_MAX_AAL5_PDU;
- if ((zatm_vcc->shaper = alloc_shaper(vcc->dev,&pcr,
- vcc->qos.txtp.min_pcr,vcc->qos.txtp.max_pcr,unlimited))
- < 0) {
- close_tx(vcc);
- return zatm_vcc->shaper;
- }
- if (pcr > ATM_OC3_PCR) pcr = ATM_OC3_PCR;
- vcc->qos.txtp.min_pcr = vcc->qos.txtp.max_pcr = pcr;
- }
- zatm_vcc->tx_chan = chan;
- skb_queue_head_init(&zatm_vcc->tx_queue);
- init_waitqueue_head(&zatm_vcc->tx_wait);
- /* initialize ring */
- zatm_vcc->ring = kzalloc(RING_SIZE,GFP_KERNEL);
- if (!zatm_vcc->ring) return -ENOMEM;
- loop = zatm_vcc->ring+RING_ENTRIES*RING_WORDS;
- loop[0] = uPD98401_TXPD_V;
- loop[1] = loop[2] = 0;
- loop[3] = virt_to_bus(zatm_vcc->ring);
- zatm_vcc->ring_curr = 0;
- zatm_vcc->txing = 0;
- skb_queue_head_init(&zatm_vcc->backlog);
- zpokel(zatm_dev,virt_to_bus(zatm_vcc->ring),
- chan*VC_SIZE/4+uPD98401_TXVC_QRP);
- return 0;
-}
-
-
-static int open_tx_second(struct atm_vcc *vcc)
-{
- struct zatm_dev *zatm_dev;
- struct zatm_vcc *zatm_vcc;
- unsigned long flags;
-
- DPRINTK("open_tx_second\n");
- zatm_dev = ZATM_DEV(vcc->dev);
- zatm_vcc = ZATM_VCC(vcc);
- if (!zatm_vcc->tx_chan) return 0;
- /* set up VC descriptor */
- spin_lock_irqsave(&zatm_dev->lock, flags);
- zpokel(zatm_dev,0,zatm_vcc->tx_chan*VC_SIZE/4);
- zpokel(zatm_dev,uPD98401_TXVC_L | (zatm_vcc->shaper <<
- uPD98401_TXVC_SHP_SHIFT) | (vcc->vpi << uPD98401_TXVC_VPI_SHIFT) |
- vcc->vci,zatm_vcc->tx_chan*VC_SIZE/4+1);
- zpokel(zatm_dev,0,zatm_vcc->tx_chan*VC_SIZE/4+2);
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- zatm_dev->tx_map[zatm_vcc->tx_chan] = vcc;
- return 0;
-}
-
-
-static int start_tx(struct atm_dev *dev)
-{
- struct zatm_dev *zatm_dev;
- int i;
-
- DPRINTK("start_tx\n");
- zatm_dev = ZATM_DEV(dev);
- zatm_dev->tx_map = kmalloc_array(zatm_dev->chans,
- sizeof(*zatm_dev->tx_map),
- GFP_KERNEL);
- if (!zatm_dev->tx_map) return -ENOMEM;
- zatm_dev->tx_bw = ATM_OC3_PCR;
- zatm_dev->free_shapers = (1 << NR_SHAPERS)-1;
- zatm_dev->ubr = -1;
- zatm_dev->ubr_ref_cnt = 0;
- /* initialize shapers */
- for (i = 0; i < NR_SHAPERS; i++) zpokel(zatm_dev,0,uPD98401_PS(i));
- return 0;
-}
-
-
-/*------------------------------- interrupts --------------------------------*/
-
-
-static irqreturn_t zatm_int(int irq,void *dev_id)
-{
- struct atm_dev *dev;
- struct zatm_dev *zatm_dev;
- u32 reason;
- int handled = 0;
-
- dev = dev_id;
- zatm_dev = ZATM_DEV(dev);
- while ((reason = zin(GSR))) {
- handled = 1;
- EVENT("reason 0x%x\n",reason,0);
- if (reason & uPD98401_INT_PI) {
- EVENT("PHY int\n",0,0);
- dev->phy->interrupt(dev);
- }
- if (reason & uPD98401_INT_RQA) {
- unsigned long pools;
- int i;
-
- pools = zin(RQA);
- EVENT("RQA (0x%08x)\n",pools,0);
- for (i = 0; pools; i++) {
- if (pools & 1) {
- refill_pool(dev,i);
- zatm_dev->pool_info[i].rqa_count++;
- }
- pools >>= 1;
- }
- }
- if (reason & uPD98401_INT_RQU) {
- unsigned long pools;
- int i;
- pools = zin(RQU);
- printk(KERN_WARNING DEV_LABEL "(itf %d): RQU 0x%08lx\n",
- dev->number,pools);
- event_dump();
- for (i = 0; pools; i++) {
- if (pools & 1) {
- refill_pool(dev,i);
- zatm_dev->pool_info[i].rqu_count++;
- }
- pools >>= 1;
- }
- }
- /* don't handle RD */
- if (reason & uPD98401_INT_SPE)
- printk(KERN_ALERT DEV_LABEL "(itf %d): system parity "
- "error at 0x%08x\n",dev->number,zin(ADDR));
- if (reason & uPD98401_INT_CPE)
- printk(KERN_ALERT DEV_LABEL "(itf %d): control memory "
- "parity error at 0x%08x\n",dev->number,zin(ADDR));
- if (reason & uPD98401_INT_SBE) {
- printk(KERN_ALERT DEV_LABEL "(itf %d): system bus "
- "error at 0x%08x\n",dev->number,zin(ADDR));
- event_dump();
- }
- /* don't handle IND */
- if (reason & uPD98401_INT_MF) {
- printk(KERN_CRIT DEV_LABEL "(itf %d): mailbox full "
- "(0x%x)\n",dev->number,(reason & uPD98401_INT_MF)
- >> uPD98401_INT_MF_SHIFT);
- event_dump();
- /* @@@ should try to recover */
- }
- if (reason & uPD98401_INT_MM) {
- if (reason & 1) poll_rx(dev,0);
- if (reason & 2) poll_rx(dev,1);
- if (reason & 4) poll_tx(dev,2);
- if (reason & 8) poll_tx(dev,3);
- }
- /* @@@ handle RCRn */
- }
- return IRQ_RETVAL(handled);
-}
-
-
-/*----------------------------- (E)EPROM access -----------------------------*/
-
-
-static void eprom_set(struct zatm_dev *zatm_dev, unsigned long value,
- unsigned short cmd)
-{
- int error;
-
- if ((error = pci_write_config_dword(zatm_dev->pci_dev,cmd,value)))
- printk(KERN_ERR DEV_LABEL ": PCI write failed (0x%02x)\n",
- error);
-}
-
-
-static unsigned long eprom_get(struct zatm_dev *zatm_dev, unsigned short cmd)
-{
- unsigned int value;
- int error;
-
- if ((error = pci_read_config_dword(zatm_dev->pci_dev,cmd,&value)))
- printk(KERN_ERR DEV_LABEL ": PCI read failed (0x%02x)\n",
- error);
- return value;
-}
-
-
-static void eprom_put_bits(struct zatm_dev *zatm_dev, unsigned long data,
- int bits, unsigned short cmd)
-{
- unsigned long value;
- int i;
-
- for (i = bits-1; i >= 0; i--) {
- value = ZEPROM_CS | (((data >> i) & 1) ? ZEPROM_DI : 0);
- eprom_set(zatm_dev,value,cmd);
- eprom_set(zatm_dev,value | ZEPROM_SK,cmd);
- eprom_set(zatm_dev,value,cmd);
- }
-}
-
-
-static void eprom_get_byte(struct zatm_dev *zatm_dev, unsigned char *byte,
- unsigned short cmd)
-{
- int i;
-
- *byte = 0;
- for (i = 8; i; i--) {
- eprom_set(zatm_dev,ZEPROM_CS,cmd);
- eprom_set(zatm_dev,ZEPROM_CS | ZEPROM_SK,cmd);
- *byte <<= 1;
- if (eprom_get(zatm_dev,cmd) & ZEPROM_DO) *byte |= 1;
- eprom_set(zatm_dev,ZEPROM_CS,cmd);
- }
-}
-
-
-static int eprom_try_esi(struct atm_dev *dev, unsigned short cmd, int offset,
- int swap)
-{
- unsigned char buf[ZEPROM_SIZE];
- struct zatm_dev *zatm_dev;
- int i;
-
- zatm_dev = ZATM_DEV(dev);
- for (i = 0; i < ZEPROM_SIZE; i += 2) {
- eprom_set(zatm_dev,ZEPROM_CS,cmd); /* select EPROM */
- eprom_put_bits(zatm_dev,ZEPROM_CMD_READ,ZEPROM_CMD_LEN,cmd);
- eprom_put_bits(zatm_dev,i >> 1,ZEPROM_ADDR_LEN,cmd);
- eprom_get_byte(zatm_dev,buf+i+swap,cmd);
- eprom_get_byte(zatm_dev,buf+i+1-swap,cmd);
- eprom_set(zatm_dev,0,cmd); /* deselect EPROM */
- }
- memcpy(dev->esi,buf+offset,ESI_LEN);
- return memcmp(dev->esi,"\0\0\0\0\0",ESI_LEN); /* assumes ESI_LEN == 6 */
-}
-
-
-static void eprom_get_esi(struct atm_dev *dev)
-{
- if (eprom_try_esi(dev,ZEPROM_V1_REG,ZEPROM_V1_ESI_OFF,1)) return;
- (void) eprom_try_esi(dev,ZEPROM_V2_REG,ZEPROM_V2_ESI_OFF,0);
-}
-
-
-/*--------------------------------- entries ---------------------------------*/
-
-
-static int zatm_init(struct atm_dev *dev)
-{
- struct zatm_dev *zatm_dev;
- struct pci_dev *pci_dev;
- unsigned short command;
- int error,i,last;
- unsigned long t0,t1,t2;
-
- DPRINTK(">zatm_init\n");
- zatm_dev = ZATM_DEV(dev);
- spin_lock_init(&zatm_dev->lock);
- pci_dev = zatm_dev->pci_dev;
- zatm_dev->base = pci_resource_start(pci_dev, 0);
- zatm_dev->irq = pci_dev->irq;
- if ((error = pci_read_config_word(pci_dev,PCI_COMMAND,&command))) {
- printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%02x\n",
- dev->number,error);
- return -EINVAL;
- }
- if ((error = pci_write_config_word(pci_dev,PCI_COMMAND,
- command | PCI_COMMAND_IO | PCI_COMMAND_MASTER))) {
- printk(KERN_ERR DEV_LABEL "(itf %d): can't enable IO (0x%02x)"
- "\n",dev->number,error);
- return -EIO;
- }
- eprom_get_esi(dev);
- printk(KERN_NOTICE DEV_LABEL "(itf %d): rev.%d,base=0x%x,irq=%d,",
- dev->number,pci_dev->revision,zatm_dev->base,zatm_dev->irq);
- /* reset uPD98401 */
- zout(0,SWR);
- while (!(zin(GSR) & uPD98401_INT_IND));
- zout(uPD98401_GMR_ONE /*uPD98401_BURST4*/,GMR);
- last = MAX_CRAM_SIZE;
- for (i = last-RAM_INCREMENT; i >= 0; i -= RAM_INCREMENT) {
- zpokel(zatm_dev,0x55555555,i);
- if (zpeekl(zatm_dev,i) != 0x55555555) last = i;
- else {
- zpokel(zatm_dev,0xAAAAAAAA,i);
- if (zpeekl(zatm_dev,i) != 0xAAAAAAAA) last = i;
- else zpokel(zatm_dev,i,i);
- }
- }
- for (i = 0; i < last; i += RAM_INCREMENT)
- if (zpeekl(zatm_dev,i) != i) break;
- zatm_dev->mem = i << 2;
- while (i) zpokel(zatm_dev,0,--i);
- /* reset again to rebuild memory pointers */
- zout(0,SWR);
- while (!(zin(GSR) & uPD98401_INT_IND));
- zout(uPD98401_GMR_ONE | uPD98401_BURST8 | uPD98401_BURST4 |
- uPD98401_BURST2 | uPD98401_GMR_PM | uPD98401_GMR_DR,GMR);
- /* TODO: should shrink allocation now */
- printk("mem=%dkB,%s (",zatm_dev->mem >> 10,zatm_dev->copper ? "UTP" :
- "MMF");
- for (i = 0; i < ESI_LEN; i++)
- printk("%02X%s",dev->esi[i],i == ESI_LEN-1 ? ")\n" : "-");
- do {
- unsigned long flags;
-
- spin_lock_irqsave(&zatm_dev->lock, flags);
- t0 = zpeekl(zatm_dev,uPD98401_TSR);
- udelay(10);
- t1 = zpeekl(zatm_dev,uPD98401_TSR);
- udelay(1010);
- t2 = zpeekl(zatm_dev,uPD98401_TSR);
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- }
- while (t0 > t1 || t1 > t2); /* loop if wrapping ... */
- zatm_dev->khz = t2-2*t1+t0;
- printk(KERN_NOTICE DEV_LABEL "(itf %d): uPD98401 %d.%d at %d.%03d "
- "MHz\n",dev->number,
- (zin(VER) & uPD98401_MAJOR) >> uPD98401_MAJOR_SHIFT,
- zin(VER) & uPD98401_MINOR,zatm_dev->khz/1000,zatm_dev->khz % 1000);
- return uPD98402_init(dev);
-}
-
-
-static int zatm_start(struct atm_dev *dev)
-{
- struct zatm_dev *zatm_dev = ZATM_DEV(dev);
- struct pci_dev *pdev = zatm_dev->pci_dev;
- unsigned long curr;
- int pools,vccs,rx;
- int error, i, ld;
-
- DPRINTK("zatm_start\n");
- zatm_dev->rx_map = zatm_dev->tx_map = NULL;
- for (i = 0; i < NR_MBX; i++)
- zatm_dev->mbx_start[i] = 0;
- error = request_irq(zatm_dev->irq, zatm_int, IRQF_SHARED, DEV_LABEL, dev);
- if (error < 0) {
- printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n",
- dev->number,zatm_dev->irq);
- goto done;
- }
- /* define memory regions */
- pools = NR_POOLS;
- if (NR_SHAPERS*SHAPER_SIZE > pools*POOL_SIZE)
- pools = NR_SHAPERS*SHAPER_SIZE/POOL_SIZE;
- vccs = (zatm_dev->mem-NR_SHAPERS*SHAPER_SIZE-pools*POOL_SIZE)/
- (2*VC_SIZE+RX_SIZE);
- ld = -1;
- for (rx = 1; rx < vccs; rx <<= 1) ld++;
- dev->ci_range.vpi_bits = 0; /* @@@ no VPI for now */
- dev->ci_range.vci_bits = ld;
- dev->link_rate = ATM_OC3_PCR;
- zatm_dev->chans = vccs; /* ??? */
- curr = rx*RX_SIZE/4;
- DPRINTK("RX pool 0x%08lx\n",curr);
- zpokel(zatm_dev,curr,uPD98401_PMA); /* receive pool */
- zatm_dev->pool_base = curr;
- curr += pools*POOL_SIZE/4;
- DPRINTK("Shapers 0x%08lx\n",curr);
- zpokel(zatm_dev,curr,uPD98401_SMA); /* shapers */
- curr += NR_SHAPERS*SHAPER_SIZE/4;
- DPRINTK("Free 0x%08lx\n",curr);
- zpokel(zatm_dev,curr,uPD98401_TOS); /* free pool */
- printk(KERN_INFO DEV_LABEL "(itf %d): %d shapers, %d pools, %d RX, "
- "%ld VCs\n",dev->number,NR_SHAPERS,pools,rx,
- (zatm_dev->mem-curr*4)/VC_SIZE);
- /* create mailboxes */
- for (i = 0; i < NR_MBX; i++) {
- void *mbx;
- dma_addr_t mbx_dma;
-
- if (!mbx_entries[i])
- continue;
- mbx = dma_alloc_coherent(&pdev->dev,
- 2 * MBX_SIZE(i), &mbx_dma, GFP_KERNEL);
- if (!mbx) {
- error = -ENOMEM;
- goto out;
- }
- /*
- * Alignment provided by dma_alloc_coherent() isn't enough
- * for this device.
- */
- if (((unsigned long)mbx ^ mbx_dma) & 0xffff) {
- printk(KERN_ERR DEV_LABEL "(itf %d): system "
- "bus incompatible with driver\n", dev->number);
- dma_free_coherent(&pdev->dev, 2*MBX_SIZE(i), mbx, mbx_dma);
- error = -ENODEV;
- goto out;
- }
- DPRINTK("mbx@0x%08lx-0x%08lx\n", mbx, mbx + MBX_SIZE(i));
- zatm_dev->mbx_start[i] = (unsigned long)mbx;
- zatm_dev->mbx_dma[i] = mbx_dma;
- zatm_dev->mbx_end[i] = (zatm_dev->mbx_start[i] + MBX_SIZE(i)) &
- 0xffff;
- zout(mbx_dma >> 16, MSH(i));
- zout(mbx_dma, MSL(i));
- zout(zatm_dev->mbx_end[i], MBA(i));
- zout((unsigned long)mbx & 0xffff, MTA(i));
- zout((unsigned long)mbx & 0xffff, MWA(i));
- }
- error = start_tx(dev);
- if (error)
- goto out;
- error = start_rx(dev);
- if (error)
- goto out_tx;
- error = dev->phy->start(dev);
- if (error)
- goto out_rx;
- zout(0xffffffff,IMR); /* enable interrupts */
- /* enable TX & RX */
- zout(zin(GMR) | uPD98401_GMR_SE | uPD98401_GMR_RE,GMR);
-done:
- return error;
-
-out_rx:
- kfree(zatm_dev->rx_map);
-out_tx:
- kfree(zatm_dev->tx_map);
-out:
- while (i-- > 0) {
- dma_free_coherent(&pdev->dev, 2 * MBX_SIZE(i),
- (void *)zatm_dev->mbx_start[i],
- zatm_dev->mbx_dma[i]);
- }
- free_irq(zatm_dev->irq, dev);
- goto done;
-}
-
-
-static void zatm_close(struct atm_vcc *vcc)
-{
- DPRINTK(">zatm_close\n");
- if (!ZATM_VCC(vcc)) return;
- clear_bit(ATM_VF_READY,&vcc->flags);
- close_rx(vcc);
- EVENT("close_tx\n",0,0);
- close_tx(vcc);
- DPRINTK("zatm_close: done waiting\n");
- /* deallocate memory */
- kfree(ZATM_VCC(vcc));
- vcc->dev_data = NULL;
- clear_bit(ATM_VF_ADDR,&vcc->flags);
-}
-
-
-static int zatm_open(struct atm_vcc *vcc)
-{
- struct zatm_vcc *zatm_vcc;
- short vpi = vcc->vpi;
- int vci = vcc->vci;
- int error;
-
- DPRINTK(">zatm_open\n");
- if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))
- vcc->dev_data = NULL;
- if (vci != ATM_VPI_UNSPEC && vpi != ATM_VCI_UNSPEC)
- set_bit(ATM_VF_ADDR,&vcc->flags);
- if (vcc->qos.aal != ATM_AAL5) return -EINVAL; /* @@@ AAL0 */
- DPRINTK(DEV_LABEL "(itf %d): open %d.%d\n",vcc->dev->number,vcc->vpi,
- vcc->vci);
- if (!test_bit(ATM_VF_PARTIAL,&vcc->flags)) {
- zatm_vcc = kmalloc(sizeof(*zatm_vcc), GFP_KERNEL);
- if (!zatm_vcc) {
- clear_bit(ATM_VF_ADDR,&vcc->flags);
- return -ENOMEM;
- }
- vcc->dev_data = zatm_vcc;
- ZATM_VCC(vcc)->tx_chan = 0; /* for zatm_close after open_rx */
- if ((error = open_rx_first(vcc))) {
- zatm_close(vcc);
- return error;
- }
- if ((error = open_tx_first(vcc))) {
- zatm_close(vcc);
- return error;
- }
- }
- if (vci == ATM_VPI_UNSPEC || vpi == ATM_VCI_UNSPEC) return 0;
- if ((error = open_rx_second(vcc))) {
- zatm_close(vcc);
- return error;
- }
- if ((error = open_tx_second(vcc))) {
- zatm_close(vcc);
- return error;
- }
- set_bit(ATM_VF_READY,&vcc->flags);
- return 0;
-}
-
-
-static int zatm_change_qos(struct atm_vcc *vcc,struct atm_qos *qos,int flags)
-{
- printk("Not yet implemented\n");
- return -ENOSYS;
- /* @@@ */
-}
-
-
-static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
-{
- struct zatm_dev *zatm_dev;
- unsigned long flags;
-
- zatm_dev = ZATM_DEV(dev);
- switch (cmd) {
- case ZATM_GETPOOLZ:
- if (!capable(CAP_NET_ADMIN)) return -EPERM;
- fallthrough;
- case ZATM_GETPOOL:
- {
- struct zatm_pool_info info;
- int pool;
-
- if (get_user(pool,
- &((struct zatm_pool_req __user *) arg)->pool_num))
- return -EFAULT;
- if (pool < 0 || pool > ZATM_LAST_POOL)
- return -EINVAL;
- pool = array_index_nospec(pool,
- ZATM_LAST_POOL + 1);
- spin_lock_irqsave(&zatm_dev->lock, flags);
- info = zatm_dev->pool_info[pool];
- if (cmd == ZATM_GETPOOLZ) {
- zatm_dev->pool_info[pool].rqa_count = 0;
- zatm_dev->pool_info[pool].rqu_count = 0;
- }
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- return copy_to_user(
- &((struct zatm_pool_req __user *) arg)->info,
- &info,sizeof(info)) ? -EFAULT : 0;
- }
- case ZATM_SETPOOL:
- {
- struct zatm_pool_info info;
- int pool;
-
- if (!capable(CAP_NET_ADMIN)) return -EPERM;
- if (get_user(pool,
- &((struct zatm_pool_req __user *) arg)->pool_num))
- return -EFAULT;
- if (pool < 0 || pool > ZATM_LAST_POOL)
- return -EINVAL;
- pool = array_index_nospec(pool,
- ZATM_LAST_POOL + 1);
- if (copy_from_user(&info,
- &((struct zatm_pool_req __user *) arg)->info,
- sizeof(info))) return -EFAULT;
- if (!info.low_water)
- info.low_water = zatm_dev->
- pool_info[pool].low_water;
- if (!info.high_water)
- info.high_water = zatm_dev->
- pool_info[pool].high_water;
- if (!info.next_thres)
- info.next_thres = zatm_dev->
- pool_info[pool].next_thres;
- if (info.low_water >= info.high_water ||
- info.low_water < 0)
- return -EINVAL;
- spin_lock_irqsave(&zatm_dev->lock, flags);
- zatm_dev->pool_info[pool].low_water =
- info.low_water;
- zatm_dev->pool_info[pool].high_water =
- info.high_water;
- zatm_dev->pool_info[pool].next_thres =
- info.next_thres;
- spin_unlock_irqrestore(&zatm_dev->lock, flags);
- return 0;
- }
- default:
- if (!dev->phy->ioctl) return -ENOIOCTLCMD;
- return dev->phy->ioctl(dev,cmd,arg);
- }
-}
-
-static int zatm_send(struct atm_vcc *vcc,struct sk_buff *skb)
-{
- int error;
-
- EVENT(">zatm_send 0x%lx\n",(unsigned long) skb,0);
- if (!ZATM_VCC(vcc)->tx_chan || !test_bit(ATM_VF_READY,&vcc->flags)) {
- if (vcc->pop) vcc->pop(vcc,skb);
- else dev_kfree_skb(skb);
- return -EINVAL;
- }
- if (!skb) {
- printk(KERN_CRIT "!skb in zatm_send ?\n");
- if (vcc->pop) vcc->pop(vcc,skb);
- return -EINVAL;
- }
- ATM_SKB(skb)->vcc = vcc;
- error = do_tx(skb);
- if (error != RING_BUSY) return error;
- skb_queue_tail(&ZATM_VCC(vcc)->backlog,skb);
- return 0;
-}
-
-
-static void zatm_phy_put(struct atm_dev *dev,unsigned char value,
- unsigned long addr)
-{
- struct zatm_dev *zatm_dev;
-
- zatm_dev = ZATM_DEV(dev);
- zwait();
- zout(value,CER);
- zout(uPD98401_IND_ACC | uPD98401_IA_B0 |
- (uPD98401_IA_TGT_PHY << uPD98401_IA_TGT_SHIFT) | addr,CMR);
-}
-
-
-static unsigned char zatm_phy_get(struct atm_dev *dev,unsigned long addr)
-{
- struct zatm_dev *zatm_dev;
-
- zatm_dev = ZATM_DEV(dev);
- zwait();
- zout(uPD98401_IND_ACC | uPD98401_IA_B0 | uPD98401_IA_RW |
- (uPD98401_IA_TGT_PHY << uPD98401_IA_TGT_SHIFT) | addr,CMR);
- zwait();
- return zin(CER) & 0xff;
-}
-
-
-static const struct atmdev_ops ops = {
- .open = zatm_open,
- .close = zatm_close,
- .ioctl = zatm_ioctl,
- .send = zatm_send,
- .phy_put = zatm_phy_put,
- .phy_get = zatm_phy_get,
- .change_qos = zatm_change_qos,
-};
-
-static int zatm_init_one(struct pci_dev *pci_dev,
- const struct pci_device_id *ent)
-{
- struct atm_dev *dev;
- struct zatm_dev *zatm_dev;
- int ret = -ENOMEM;
-
- zatm_dev = kmalloc(sizeof(*zatm_dev), GFP_KERNEL);
- if (!zatm_dev) {
- printk(KERN_EMERG "%s: memory shortage\n", DEV_LABEL);
- goto out;
- }
-
- dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &ops, -1, NULL);
- if (!dev)
- goto out_free;
-
- ret = pci_enable_device(pci_dev);
- if (ret < 0)
- goto out_deregister;
-
- ret = pci_request_regions(pci_dev, DEV_LABEL);
- if (ret < 0)
- goto out_disable;
-
- ret = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
- if (ret < 0)
- goto out_release;
-
- zatm_dev->pci_dev = pci_dev;
- dev->dev_data = zatm_dev;
- zatm_dev->copper = (int)ent->driver_data;
- if ((ret = zatm_init(dev)) || (ret = zatm_start(dev)))
- goto out_release;
-
- pci_set_drvdata(pci_dev, dev);
- zatm_dev->more = zatm_boards;
- zatm_boards = dev;
- ret = 0;
-out:
- return ret;
-
-out_release:
- pci_release_regions(pci_dev);
-out_disable:
- pci_disable_device(pci_dev);
-out_deregister:
- atm_dev_deregister(dev);
-out_free:
- kfree(zatm_dev);
- goto out;
-}
-
-
-MODULE_LICENSE("GPL");
-
-static const struct pci_device_id zatm_pci_tbl[] = {
- { PCI_VDEVICE(ZEITNET, PCI_DEVICE_ID_ZEITNET_1221), ZATM_COPPER },
- { PCI_VDEVICE(ZEITNET, PCI_DEVICE_ID_ZEITNET_1225), 0 },
- { 0, }
-};
-MODULE_DEVICE_TABLE(pci, zatm_pci_tbl);
-
-static struct pci_driver zatm_driver = {
- .name = DEV_LABEL,
- .id_table = zatm_pci_tbl,
- .probe = zatm_init_one,
-};
-
-static int __init zatm_init_module(void)
-{
- return pci_register_driver(&zatm_driver);
-}
-
-module_init(zatm_init_module);
-/* module_exit not defined so not unloadable */
diff --git a/drivers/atm/zatm.h b/drivers/atm/zatm.h
deleted file mode 100644
index 8204369fe825..000000000000
--- a/drivers/atm/zatm.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* drivers/atm/zatm.h - ZeitNet ZN122x device driver declarations */
-
-/* Written 1995-1998 by Werner Almesberger, EPFL LRC/ICA */
-
-
-#ifndef DRIVER_ATM_ZATM_H
-#define DRIVER_ATM_ZATM_H
-
-#include <linux/skbuff.h>
-#include <linux/atm.h>
-#include <linux/atmdev.h>
-#include <linux/sonet.h>
-#include <linux/pci.h>
-
-
-#define DEV_LABEL "zatm"
-
-#define MAX_AAL5_PDU 10240 /* allocate for AAL5 PDUs of this size */
-#define MAX_RX_SIZE_LD 14 /* ceil(log2((MAX_AAL5_PDU+47)/48)) */
-
-#define LOW_MARK 12 /* start adding new buffers if less than 12 */
-#define HIGH_MARK 30 /* stop adding buffers after reaching 30 */
-#define OFF_CNG_THRES 5 /* threshold for offset changes */
-
-#define RX_SIZE 2 /* RX lookup entry size (in bytes) */
-#define NR_POOLS 32 /* number of free buffer pointers */
-#define POOL_SIZE 8 /* buffer entry size (in bytes) */
-#define NR_SHAPERS 16 /* number of shapers */
-#define SHAPER_SIZE 4 /* shaper entry size (in bytes) */
-#define VC_SIZE 32 /* VC dsc (TX or RX) size (in bytes) */
-
-#define RING_ENTRIES 32 /* ring entries (without back pointer) */
-#define RING_WORDS 4 /* ring element size */
-#define RING_SIZE (sizeof(unsigned long)*(RING_ENTRIES+1)*RING_WORDS)
-
-#define NR_MBX 4 /* four mailboxes */
-#define MBX_RX_0 0 /* mailbox indices */
-#define MBX_RX_1 1
-#define MBX_TX_0 2
-#define MBX_TX_1 3
-
-struct zatm_vcc {
- /*-------------------------------- RX part */
- int rx_chan; /* RX channel, 0 if none */
- int pool; /* free buffer pool */
- /*-------------------------------- TX part */
- int tx_chan; /* TX channel, 0 if none */
- int shaper; /* shaper, <0 if none */
- struct sk_buff_head tx_queue; /* list of buffers in transit */
- wait_queue_head_t tx_wait; /* for close */
- u32 *ring; /* transmit ring */
- int ring_curr; /* current write position */
- int txing; /* number of transmits in progress */
- struct sk_buff_head backlog; /* list of buffers waiting for ring */
-};
-
-struct zatm_dev {
- /*-------------------------------- TX part */
- int tx_bw; /* remaining bandwidth */
- u32 free_shapers; /* bit set */
- int ubr; /* UBR shaper; -1 if none */
- int ubr_ref_cnt; /* number of VCs using UBR shaper */
- /*-------------------------------- RX part */
- int pool_ref[NR_POOLS]; /* free buffer pool usage counters */
- volatile struct sk_buff *last_free[NR_POOLS];
- /* last entry in respective pool */
- struct sk_buff_head pool[NR_POOLS];/* free buffer pools */
- struct zatm_pool_info pool_info[NR_POOLS]; /* pool information */
- /*-------------------------------- maps */
- struct atm_vcc **tx_map; /* TX VCCs */
- struct atm_vcc **rx_map; /* RX VCCs */
- int chans; /* map size, must be 2^n */
- /*-------------------------------- mailboxes */
- unsigned long mbx_start[NR_MBX];/* start addresses */
- dma_addr_t mbx_dma[NR_MBX];
- u16 mbx_end[NR_MBX]; /* end offset (in bytes) */
- /*-------------------------------- other pointers */
- u32 pool_base; /* Free buffer pool dsc (word addr) */
- /*-------------------------------- ZATM links */
- struct atm_dev *more; /* other ZATM devices */
- /*-------------------------------- general information */
- int mem; /* RAM on board (in bytes) */
- int khz; /* timer clock */
- int copper; /* PHY type */
- unsigned char irq; /* IRQ */
- unsigned int base; /* IO base address */
- struct pci_dev *pci_dev; /* PCI stuff */
- spinlock_t lock;
-};
-
-
-#define ZATM_DEV(d) ((struct zatm_dev *) (d)->dev_data)
-#define ZATM_VCC(d) ((struct zatm_vcc *) (d)->dev_data)
-
-
-struct zatm_skb_prv {
- struct atm_skb_data _; /* reserved */
- u32 *dsc; /* pointer to skb's descriptor */
-};
-
-#define ZATM_PRV_DSC(skb) (((struct zatm_skb_prv *) (skb)->cb)->dsc)
-
-#endif
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index a311df07b1bd..4deb60a3b43f 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -2613,7 +2613,6 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, create_counters);
SET_DEVICE_OP(dev_ops, create_cq);
SET_DEVICE_OP(dev_ops, create_flow);
- SET_DEVICE_OP(dev_ops, create_flow_action_esp);
SET_DEVICE_OP(dev_ops, create_qp);
SET_DEVICE_OP(dev_ops, create_rwq_ind_table);
SET_DEVICE_OP(dev_ops, create_srq);
@@ -2676,7 +2675,6 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, modify_ah);
SET_DEVICE_OP(dev_ops, modify_cq);
SET_DEVICE_OP(dev_ops, modify_device);
- SET_DEVICE_OP(dev_ops, modify_flow_action_esp);
SET_DEVICE_OP(dev_ops, modify_hw_stat);
SET_DEVICE_OP(dev_ops, modify_port);
SET_DEVICE_OP(dev_ops, modify_qp);
diff --git a/drivers/infiniband/core/uverbs_std_types_flow_action.c b/drivers/infiniband/core/uverbs_std_types_flow_action.c
index d42ed7ff223e..0ddcf6da66c4 100644
--- a/drivers/infiniband/core/uverbs_std_types_flow_action.c
+++ b/drivers/infiniband/core/uverbs_std_types_flow_action.c
@@ -46,385 +46,6 @@ static int uverbs_free_flow_action(struct ib_uobject *uobject,
return action->device->ops.destroy_flow_action(action);
}
-static u64 esp_flags_uverbs_to_verbs(struct uverbs_attr_bundle *attrs,
- u32 flags, bool is_modify)
-{
- u64 verbs_flags = flags;
-
- if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_ESN))
- verbs_flags |= IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED;
-
- if (is_modify && uverbs_attr_is_valid(attrs,
- UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS))
- verbs_flags |= IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS;
-
- return verbs_flags;
-};
-
-static int validate_flow_action_esp_keymat_aes_gcm(struct ib_flow_action_attrs_esp_keymats *keymat)
-{
- struct ib_uverbs_flow_action_esp_keymat_aes_gcm *aes_gcm =
- &keymat->keymat.aes_gcm;
-
- if (aes_gcm->iv_algo > IB_UVERBS_FLOW_ACTION_IV_ALGO_SEQ)
- return -EOPNOTSUPP;
-
- if (aes_gcm->key_len != 32 &&
- aes_gcm->key_len != 24 &&
- aes_gcm->key_len != 16)
- return -EINVAL;
-
- if (aes_gcm->icv_len != 16 &&
- aes_gcm->icv_len != 8 &&
- aes_gcm->icv_len != 12)
- return -EINVAL;
-
- return 0;
-}
-
-static int (* const flow_action_esp_keymat_validate[])(struct ib_flow_action_attrs_esp_keymats *keymat) = {
- [IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM] = validate_flow_action_esp_keymat_aes_gcm,
-};
-
-static int flow_action_esp_replay_none(struct ib_flow_action_attrs_esp_replays *replay,
- bool is_modify)
-{
- /* This is used in order to modify an esp flow action with an enabled
- * replay protection to a disabled one. This is only supported via
- * modify, as in create verb we can simply drop the REPLAY attribute and
- * achieve the same thing.
- */
- return is_modify ? 0 : -EINVAL;
-}
-
-static int flow_action_esp_replay_def_ok(struct ib_flow_action_attrs_esp_replays *replay,
- bool is_modify)
-{
- /* Some replay protections could always be enabled without validating
- * anything.
- */
- return 0;
-}
-
-static int (* const flow_action_esp_replay_validate[])(struct ib_flow_action_attrs_esp_replays *replay,
- bool is_modify) = {
- [IB_UVERBS_FLOW_ACTION_ESP_REPLAY_NONE] = flow_action_esp_replay_none,
- [IB_UVERBS_FLOW_ACTION_ESP_REPLAY_BMP] = flow_action_esp_replay_def_ok,
-};
-
-static int parse_esp_ip(enum ib_flow_spec_type proto,
- const void __user *val_ptr,
- size_t len, union ib_flow_spec *out)
-{
- int ret;
- const struct ib_uverbs_flow_ipv4_filter ipv4 = {
- .src_ip = cpu_to_be32(0xffffffffUL),
- .dst_ip = cpu_to_be32(0xffffffffUL),
- .proto = 0xff,
- .tos = 0xff,
- .ttl = 0xff,
- .flags = 0xff,
- };
- const struct ib_uverbs_flow_ipv6_filter ipv6 = {
- .src_ip = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
- .dst_ip = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
- .flow_label = cpu_to_be32(0xffffffffUL),
- .next_hdr = 0xff,
- .traffic_class = 0xff,
- .hop_limit = 0xff,
- };
- union {
- struct ib_uverbs_flow_ipv4_filter ipv4;
- struct ib_uverbs_flow_ipv6_filter ipv6;
- } user_val = {};
- const void *user_pmask;
- size_t val_len;
-
- /* If the flow IPv4/IPv6 flow specifications are extended, the mask
- * should be changed as well.
- */
- BUILD_BUG_ON(offsetof(struct ib_uverbs_flow_ipv4_filter, flags) +
- sizeof(ipv4.flags) != sizeof(ipv4));
- BUILD_BUG_ON(offsetof(struct ib_uverbs_flow_ipv6_filter, reserved) +
- sizeof(ipv6.reserved) != sizeof(ipv6));
-
- switch (proto) {
- case IB_FLOW_SPEC_IPV4:
- if (len > sizeof(user_val.ipv4) &&
- !ib_is_buffer_cleared(val_ptr + sizeof(user_val.ipv4),
- len - sizeof(user_val.ipv4)))
- return -EOPNOTSUPP;
-
- val_len = min_t(size_t, len, sizeof(user_val.ipv4));
- ret = copy_from_user(&user_val.ipv4, val_ptr,
- val_len);
- if (ret)
- return -EFAULT;
-
- user_pmask = &ipv4;
- break;
- case IB_FLOW_SPEC_IPV6:
- if (len > sizeof(user_val.ipv6) &&
- !ib_is_buffer_cleared(val_ptr + sizeof(user_val.ipv6),
- len - sizeof(user_val.ipv6)))
- return -EOPNOTSUPP;
-
- val_len = min_t(size_t, len, sizeof(user_val.ipv6));
- ret = copy_from_user(&user_val.ipv6, val_ptr,
- val_len);
- if (ret)
- return -EFAULT;
-
- user_pmask = &ipv6;
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- return ib_uverbs_kern_spec_to_ib_spec_filter(proto, user_pmask,
- &user_val,
- val_len, out);
-}
-
-static int flow_action_esp_get_encap(struct ib_flow_spec_list *out,
- struct uverbs_attr_bundle *attrs)
-{
- struct ib_uverbs_flow_action_esp_encap uverbs_encap;
- int ret;
-
- ret = uverbs_copy_from(&uverbs_encap, attrs,
- UVERBS_ATTR_FLOW_ACTION_ESP_ENCAP);
- if (ret)
- return ret;
-
- /* We currently support only one encap */
- if (uverbs_encap.next_ptr)
- return -EOPNOTSUPP;
-
- if (uverbs_encap.type != IB_FLOW_SPEC_IPV4 &&
- uverbs_encap.type != IB_FLOW_SPEC_IPV6)
- return -EOPNOTSUPP;
-
- return parse_esp_ip(uverbs_encap.type,
- u64_to_user_ptr(uverbs_encap.val_ptr),
- uverbs_encap.len,
- &out->spec);
-}
-
-struct ib_flow_action_esp_attr {
- struct ib_flow_action_attrs_esp hdr;
- struct ib_flow_action_attrs_esp_keymats keymat;
- struct ib_flow_action_attrs_esp_replays replay;
- /* We currently support only one spec */
- struct ib_flow_spec_list encap;
-};
-
-#define ESP_LAST_SUPPORTED_FLAG IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW
-static int parse_flow_action_esp(struct ib_device *ib_dev,
- struct uverbs_attr_bundle *attrs,
- struct ib_flow_action_esp_attr *esp_attr,
- bool is_modify)
-{
- struct ib_uverbs_flow_action_esp uverbs_esp = {};
- int ret;
-
- /* Optional param, if it doesn't exist, we get -ENOENT and skip it */
- ret = uverbs_copy_from(&esp_attr->hdr.esn, attrs,
- UVERBS_ATTR_FLOW_ACTION_ESP_ESN);
- if (IS_UVERBS_COPY_ERR(ret))
- return ret;
-
- /* This can be called from FLOW_ACTION_ESP_MODIFY where
- * UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS is optional
- */
- if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS)) {
- ret = uverbs_copy_from_or_zero(&uverbs_esp, attrs,
- UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS);
- if (ret)
- return ret;
-
- if (uverbs_esp.flags & ~((ESP_LAST_SUPPORTED_FLAG << 1) - 1))
- return -EOPNOTSUPP;
-
- esp_attr->hdr.spi = uverbs_esp.spi;
- esp_attr->hdr.seq = uverbs_esp.seq;
- esp_attr->hdr.tfc_pad = uverbs_esp.tfc_pad;
- esp_attr->hdr.hard_limit_pkts = uverbs_esp.hard_limit_pkts;
- }
- esp_attr->hdr.flags = esp_flags_uverbs_to_verbs(attrs, uverbs_esp.flags,
- is_modify);
-
- if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT)) {
- esp_attr->keymat.protocol =
- uverbs_attr_get_enum_id(attrs,
- UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT);
- ret = uverbs_copy_from_or_zero(&esp_attr->keymat.keymat,
- attrs,
- UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT);
- if (ret)
- return ret;
-
- ret = flow_action_esp_keymat_validate[esp_attr->keymat.protocol](&esp_attr->keymat);
- if (ret)
- return ret;
-
- esp_attr->hdr.keymat = &esp_attr->keymat;
- }
-
- if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY)) {
- esp_attr->replay.protocol =
- uverbs_attr_get_enum_id(attrs,
- UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY);
-
- ret = uverbs_copy_from_or_zero(&esp_attr->replay.replay,
- attrs,
- UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY);
- if (ret)
- return ret;
-
- ret = flow_action_esp_replay_validate[esp_attr->replay.protocol](&esp_attr->replay,
- is_modify);
- if (ret)
- return ret;
-
- esp_attr->hdr.replay = &esp_attr->replay;
- }
-
- if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_ENCAP)) {
- ret = flow_action_esp_get_encap(&esp_attr->encap, attrs);
- if (ret)
- return ret;
-
- esp_attr->hdr.encap = &esp_attr->encap;
- }
-
- return 0;
-}
-
-static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE)(
- struct uverbs_attr_bundle *attrs)
-{
- struct ib_uobject *uobj = uverbs_attr_get_uobject(
- attrs, UVERBS_ATTR_CREATE_FLOW_ACTION_ESP_HANDLE);
- struct ib_device *ib_dev = attrs->context->device;
- int ret;
- struct ib_flow_action *action;
- struct ib_flow_action_esp_attr esp_attr = {};
-
- if (!ib_dev->ops.create_flow_action_esp)
- return -EOPNOTSUPP;
-
- ret = parse_flow_action_esp(ib_dev, attrs, &esp_attr, false);
- if (ret)
- return ret;
-
- /* No need to check as this attribute is marked as MANDATORY */
- action = ib_dev->ops.create_flow_action_esp(ib_dev, &esp_attr.hdr,
- attrs);
- if (IS_ERR(action))
- return PTR_ERR(action);
-
- uverbs_flow_action_fill_action(action, uobj, ib_dev,
- IB_FLOW_ACTION_ESP);
-
- return 0;
-}
-
-static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY)(
- struct uverbs_attr_bundle *attrs)
-{
- struct ib_uobject *uobj = uverbs_attr_get_uobject(
- attrs, UVERBS_ATTR_MODIFY_FLOW_ACTION_ESP_HANDLE);
- struct ib_flow_action *action = uobj->object;
- int ret;
- struct ib_flow_action_esp_attr esp_attr = {};
-
- if (!action->device->ops.modify_flow_action_esp)
- return -EOPNOTSUPP;
-
- ret = parse_flow_action_esp(action->device, attrs, &esp_attr, true);
- if (ret)
- return ret;
-
- if (action->type != IB_FLOW_ACTION_ESP)
- return -EINVAL;
-
- return action->device->ops.modify_flow_action_esp(action,
- &esp_attr.hdr,
- attrs);
-}
-
-static const struct uverbs_attr_spec uverbs_flow_action_esp_keymat[] = {
- [IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM] = {
- .type = UVERBS_ATTR_TYPE_PTR_IN,
- UVERBS_ATTR_STRUCT(
- struct ib_uverbs_flow_action_esp_keymat_aes_gcm,
- aes_key),
- },
-};
-
-static const struct uverbs_attr_spec uverbs_flow_action_esp_replay[] = {
- [IB_UVERBS_FLOW_ACTION_ESP_REPLAY_NONE] = {
- .type = UVERBS_ATTR_TYPE_PTR_IN,
- UVERBS_ATTR_NO_DATA(),
- },
- [IB_UVERBS_FLOW_ACTION_ESP_REPLAY_BMP] = {
- .type = UVERBS_ATTR_TYPE_PTR_IN,
- UVERBS_ATTR_STRUCT(struct ib_uverbs_flow_action_esp_replay_bmp,
- size),
- },
-};
-
-DECLARE_UVERBS_NAMED_METHOD(
- UVERBS_METHOD_FLOW_ACTION_ESP_CREATE,
- UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_FLOW_ACTION_ESP_HANDLE,
- UVERBS_OBJECT_FLOW_ACTION,
- UVERBS_ACCESS_NEW,
- UA_MANDATORY),
- UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS,
- UVERBS_ATTR_STRUCT(struct ib_uverbs_flow_action_esp,
- hard_limit_pkts),
- UA_MANDATORY),
- UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ESN,
- UVERBS_ATTR_TYPE(__u32),
- UA_OPTIONAL),
- UVERBS_ATTR_ENUM_IN(UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT,
- uverbs_flow_action_esp_keymat,
- UA_MANDATORY),
- UVERBS_ATTR_ENUM_IN(UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY,
- uverbs_flow_action_esp_replay,
- UA_OPTIONAL),
- UVERBS_ATTR_PTR_IN(
- UVERBS_ATTR_FLOW_ACTION_ESP_ENCAP,
- UVERBS_ATTR_TYPE(struct ib_uverbs_flow_action_esp_encap),
- UA_OPTIONAL));
-
-DECLARE_UVERBS_NAMED_METHOD(
- UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY,
- UVERBS_ATTR_IDR(UVERBS_ATTR_MODIFY_FLOW_ACTION_ESP_HANDLE,
- UVERBS_OBJECT_FLOW_ACTION,
- UVERBS_ACCESS_WRITE,
- UA_MANDATORY),
- UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS,
- UVERBS_ATTR_STRUCT(struct ib_uverbs_flow_action_esp,
- hard_limit_pkts),
- UA_OPTIONAL),
- UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ESN,
- UVERBS_ATTR_TYPE(__u32),
- UA_OPTIONAL),
- UVERBS_ATTR_ENUM_IN(UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT,
- uverbs_flow_action_esp_keymat,
- UA_OPTIONAL),
- UVERBS_ATTR_ENUM_IN(UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY,
- uverbs_flow_action_esp_replay,
- UA_OPTIONAL),
- UVERBS_ATTR_PTR_IN(
- UVERBS_ATTR_FLOW_ACTION_ESP_ENCAP,
- UVERBS_ATTR_TYPE(struct ib_uverbs_flow_action_esp_encap),
- UA_OPTIONAL));
-
DECLARE_UVERBS_NAMED_METHOD_DESTROY(
UVERBS_METHOD_FLOW_ACTION_DESTROY,
UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_FLOW_ACTION_HANDLE,
@@ -435,9 +56,7 @@ DECLARE_UVERBS_NAMED_METHOD_DESTROY(
DECLARE_UVERBS_NAMED_OBJECT(
UVERBS_OBJECT_FLOW_ACTION,
UVERBS_TYPE_ALLOC_IDR(uverbs_free_flow_action),
- &UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE),
- &UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_DESTROY),
- &UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY));
+ &UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_DESTROY));
const struct uapi_definition uverbs_def_obj_flow_action[] = {
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c
index 661ed2b44508..9c2886bc72cb 100644
--- a/drivers/infiniband/hw/mlx5/fs.c
+++ b/drivers/infiniband/hw/mlx5/fs.c
@@ -15,7 +15,6 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/fs.h>
#include <linux/mlx5/fs_helpers.h>
-#include <linux/mlx5/accel.h>
#include <linux/mlx5/eswitch.h>
#include <net/inet_ecn.h>
#include "mlx5_ib.h"
@@ -148,16 +147,6 @@ int parse_flow_flow_action(struct mlx5_ib_flow_action *maction,
{
switch (maction->ib_action.type) {
- case IB_FLOW_ACTION_ESP:
- if (action->action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
- MLX5_FLOW_CONTEXT_ACTION_DECRYPT))
- return -EINVAL;
- /* Currently only AES_GCM keymat is supported by the driver */
- action->esp_id = (uintptr_t)maction->esp_aes_gcm.ctx;
- action->action |= is_egress ?
- MLX5_FLOW_CONTEXT_ACTION_ENCRYPT :
- MLX5_FLOW_CONTEXT_ACTION_DECRYPT;
- return 0;
case IB_FLOW_ACTION_UNSPECIFIED:
if (maction->flow_action_raw.sub_type ==
MLX5_IB_FLOW_ACTION_MODIFY_HEADER) {
@@ -368,14 +357,7 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev,
ib_spec->type & IB_FLOW_SPEC_INNER);
break;
case IB_FLOW_SPEC_ESP:
- if (ib_spec->esp.mask.seq)
- return -EOPNOTSUPP;
-
- MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi,
- ntohl(ib_spec->esp.mask.spi));
- MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi,
- ntohl(ib_spec->esp.val.spi));
- break;
+ return -EOPNOTSUPP;
case IB_FLOW_SPEC_TCP:
if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
LAST_TCP_UDP_FIELD))
@@ -587,47 +569,6 @@ static bool flow_is_multicast_only(const struct ib_flow_attr *ib_attr)
return false;
}
-enum valid_spec {
- VALID_SPEC_INVALID,
- VALID_SPEC_VALID,
- VALID_SPEC_NA,
-};
-
-static enum valid_spec
-is_valid_esp_aes_gcm(struct mlx5_core_dev *mdev,
- const struct mlx5_flow_spec *spec,
- const struct mlx5_flow_act *flow_act,
- bool egress)
-{
- const u32 *match_c = spec->match_criteria;
- bool is_crypto =
- (flow_act->action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
- MLX5_FLOW_CONTEXT_ACTION_DECRYPT));
- bool is_ipsec = mlx5_fs_is_ipsec_flow(match_c);
- bool is_drop = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_DROP;
-
- /*
- * Currently only crypto is supported in egress, when regular egress
- * rules would be supported, always return VALID_SPEC_NA.
- */
- if (!is_crypto)
- return VALID_SPEC_NA;
-
- return is_crypto && is_ipsec &&
- (!egress || (!is_drop &&
- !(spec->flow_context.flags & FLOW_CONTEXT_HAS_TAG))) ?
- VALID_SPEC_VALID : VALID_SPEC_INVALID;
-}
-
-static bool is_valid_spec(struct mlx5_core_dev *mdev,
- const struct mlx5_flow_spec *spec,
- const struct mlx5_flow_act *flow_act,
- bool egress)
-{
- /* We curretly only support ipsec egress flow */
- return is_valid_esp_aes_gcm(mdev, spec, flow_act, egress) != VALID_SPEC_INVALID;
-}
-
static bool is_valid_ethertype(struct mlx5_core_dev *mdev,
const struct ib_flow_attr *flow_attr,
bool check_inner)
@@ -1154,8 +1095,7 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria);
- if (is_egress &&
- !is_valid_spec(dev->mdev, spec, &flow_act, is_egress)) {
+ if (is_egress) {
err = -EINVAL;
goto free;
}
@@ -1740,149 +1680,6 @@ unlock:
return ERR_PTR(err);
}
-static u32 mlx5_ib_flow_action_flags_to_accel_xfrm_flags(u32 mlx5_flags)
-{
- u32 flags = 0;
-
- if (mlx5_flags & MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA)
- flags |= MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA;
-
- return flags;
-}
-
-#define MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED \
- MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA
-static struct ib_flow_action *
-mlx5_ib_create_flow_action_esp(struct ib_device *device,
- const struct ib_flow_action_attrs_esp *attr,
- struct uverbs_attr_bundle *attrs)
-{
- struct mlx5_ib_dev *mdev = to_mdev(device);
- struct ib_uverbs_flow_action_esp_keymat_aes_gcm *aes_gcm;
- struct mlx5_accel_esp_xfrm_attrs accel_attrs = {};
- struct mlx5_ib_flow_action *action;
- u64 action_flags;
- u64 flags;
- int err = 0;
-
- err = uverbs_get_flags64(
- &action_flags, attrs, MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS,
- ((MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED << 1) - 1));
- if (err)
- return ERR_PTR(err);
-
- flags = mlx5_ib_flow_action_flags_to_accel_xfrm_flags(action_flags);
-
- /* We current only support a subset of the standard features. Only a
- * keymat of type AES_GCM, with icv_len == 16, iv_algo == SEQ and esn
- * (with overlap). Full offload mode isn't supported.
- */
- if (!attr->keymat || attr->replay || attr->encap ||
- attr->spi || attr->seq || attr->tfc_pad ||
- attr->hard_limit_pkts ||
- (attr->flags & ~(IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED |
- IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT)))
- return ERR_PTR(-EOPNOTSUPP);
-
- if (attr->keymat->protocol !=
- IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM)
- return ERR_PTR(-EOPNOTSUPP);
-
- aes_gcm = &attr->keymat->keymat.aes_gcm;
-
- if (aes_gcm->icv_len != 16 ||
- aes_gcm->iv_algo != IB_UVERBS_FLOW_ACTION_IV_ALGO_SEQ)
- return ERR_PTR(-EOPNOTSUPP);
-
- action = kmalloc(sizeof(*action), GFP_KERNEL);
- if (!action)
- return ERR_PTR(-ENOMEM);
-
- action->esp_aes_gcm.ib_flags = attr->flags;
- memcpy(&accel_attrs.keymat.aes_gcm.aes_key, &aes_gcm->aes_key,
- sizeof(accel_attrs.keymat.aes_gcm.aes_key));
- accel_attrs.keymat.aes_gcm.key_len = aes_gcm->key_len * 8;
- memcpy(&accel_attrs.keymat.aes_gcm.salt, &aes_gcm->salt,
- sizeof(accel_attrs.keymat.aes_gcm.salt));
- memcpy(&accel_attrs.keymat.aes_gcm.seq_iv, &aes_gcm->iv,
- sizeof(accel_attrs.keymat.aes_gcm.seq_iv));
- accel_attrs.keymat.aes_gcm.icv_len = aes_gcm->icv_len * 8;
- accel_attrs.keymat.aes_gcm.iv_algo = MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ;
- accel_attrs.keymat_type = MLX5_ACCEL_ESP_KEYMAT_AES_GCM;
-
- accel_attrs.esn = attr->esn;
- if (attr->flags & IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED)
- accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED;
- if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)
- accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
-
- if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT)
- accel_attrs.action |= MLX5_ACCEL_ESP_ACTION_ENCRYPT;
-
- action->esp_aes_gcm.ctx =
- mlx5_accel_esp_create_xfrm(mdev->mdev, &accel_attrs, flags);
- if (IS_ERR(action->esp_aes_gcm.ctx)) {
- err = PTR_ERR(action->esp_aes_gcm.ctx);
- goto err_parse;
- }
-
- action->esp_aes_gcm.ib_flags = attr->flags;
-
- return &action->ib_action;
-
-err_parse:
- kfree(action);
- return ERR_PTR(err);
-}
-
-static int
-mlx5_ib_modify_flow_action_esp(struct ib_flow_action *action,
- const struct ib_flow_action_attrs_esp *attr,
- struct uverbs_attr_bundle *attrs)
-{
- struct mlx5_ib_flow_action *maction = to_mflow_act(action);
- struct mlx5_accel_esp_xfrm_attrs accel_attrs;
- int err = 0;
-
- if (attr->keymat || attr->replay || attr->encap ||
- attr->spi || attr->seq || attr->tfc_pad ||
- attr->hard_limit_pkts ||
- (attr->flags & ~(IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED |
- IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS |
- IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)))
- return -EOPNOTSUPP;
-
- /* Only the ESN value or the MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP can
- * be modified.
- */
- if (!(maction->esp_aes_gcm.ib_flags &
- IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED) &&
- attr->flags & (IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED |
- IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW))
- return -EINVAL;
-
- memcpy(&accel_attrs, &maction->esp_aes_gcm.ctx->attrs,
- sizeof(accel_attrs));
-
- accel_attrs.esn = attr->esn;
- if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)
- accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
- else
- accel_attrs.flags &= ~MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
-
- err = mlx5_accel_esp_modify_xfrm(maction->esp_aes_gcm.ctx,
- &accel_attrs);
- if (err)
- return err;
-
- maction->esp_aes_gcm.ib_flags &=
- ~IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW;
- maction->esp_aes_gcm.ib_flags |=
- attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW;
-
- return 0;
-}
-
static void destroy_flow_action_raw(struct mlx5_ib_flow_action *maction)
{
switch (maction->flow_action_raw.sub_type) {
@@ -1906,13 +1703,6 @@ static int mlx5_ib_destroy_flow_action(struct ib_flow_action *action)
struct mlx5_ib_flow_action *maction = to_mflow_act(action);
switch (action->type) {
- case IB_FLOW_ACTION_ESP:
- /*
- * We only support aes_gcm by now, so we implicitly know this is
- * the underline crypto.
- */
- mlx5_accel_esp_destroy_xfrm(maction->esp_aes_gcm.ctx);
- break;
case IB_FLOW_ACTION_UNSPECIFIED:
destroy_flow_action_raw(maction);
break;
@@ -2709,11 +2499,6 @@ static const struct ib_device_ops flow_ops = {
.destroy_flow_action = mlx5_ib_destroy_flow_action,
};
-static const struct ib_device_ops flow_ipsec_ops = {
- .create_flow_action_esp = mlx5_ib_create_flow_action_esp,
- .modify_flow_action_esp = mlx5_ib_modify_flow_action_esp,
-};
-
int mlx5_ib_fs_init(struct mlx5_ib_dev *dev)
{
dev->flow_db = kzalloc(sizeof(*dev->flow_db), GFP_KERNEL);
@@ -2724,9 +2509,5 @@ int mlx5_ib_fs_init(struct mlx5_ib_dev *dev)
mutex_init(&dev->flow_db->lock);
ib_set_device_ops(&dev->ib_dev, &flow_ops);
- if (mlx5_accel_ipsec_device_caps(dev->mdev) &
- MLX5_ACCEL_IPSEC_CAP_DEVICE)
- ib_set_device_ops(&dev->ib_dev, &flow_ipsec_ops);
-
return 0;
}
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 32a0ea820573..61aa196d6484 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -41,7 +41,6 @@
#include "wr.h"
#include "restrack.h"
#include "counters.h"
-#include <linux/mlx5/accel.h>
#include <rdma/uverbs_std_types.h>
#include <rdma/uverbs_ioctl.h>
#include <rdma/mlx5_user_ioctl_verbs.h>
@@ -906,10 +905,6 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
MLX5_RX_HASH_SRC_PORT_UDP |
MLX5_RX_HASH_DST_PORT_UDP |
MLX5_RX_HASH_INNER;
- if (mlx5_accel_ipsec_device_caps(dev->mdev) &
- MLX5_ACCEL_IPSEC_CAP_DEVICE)
- resp.rss_caps.rx_hash_fields_mask |=
- MLX5_RX_HASH_IPSEC_SPI;
resp.response_length += sizeof(resp.rss_caps);
}
} else {
@@ -1791,23 +1786,6 @@ static int set_ucontext_resp(struct ib_ucontext *uctx,
resp->num_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
MLX5_CAP_GEN(dev->mdev,
num_of_uars_per_page) : 1;
-
- if (mlx5_accel_ipsec_device_caps(dev->mdev) &
- MLX5_ACCEL_IPSEC_CAP_DEVICE) {
- if (mlx5_get_flow_namespace(dev->mdev,
- MLX5_FLOW_NAMESPACE_EGRESS))
- resp->flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM;
- if (mlx5_accel_ipsec_device_caps(dev->mdev) &
- MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA)
- resp->flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_REQ_METADATA;
- if (MLX5_CAP_FLOWTABLE(dev->mdev, flow_table_properties_nic_receive.ft_field_support.outer_esp_spi))
- resp->flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_SPI_STEERING;
- if (mlx5_accel_ipsec_device_caps(dev->mdev) &
- MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN)
- resp->flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_TX_IV_IS_ESN;
- /* MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_FULL_OFFLOAD is currently always 0 */
- }
-
resp->tot_bfregs = bfregi->lib_uar_dyn ? 0 :
bfregi->total_num_bfregs - bfregi->num_dyn_bfregs;
resp->num_ports = dev->num_ports;
@@ -3605,13 +3583,6 @@ DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_UAR,
&UVERBS_METHOD(MLX5_IB_METHOD_UAR_OBJ_DESTROY));
ADD_UVERBS_ATTRIBUTES_SIMPLE(
- mlx5_ib_flow_action,
- UVERBS_OBJECT_FLOW_ACTION,
- UVERBS_METHOD_FLOW_ACTION_ESP_CREATE,
- UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS,
- enum mlx5_ib_uapi_flow_action_flags));
-
-ADD_UVERBS_ATTRIBUTES_SIMPLE(
mlx5_ib_query_context,
UVERBS_OBJECT_DEVICE,
UVERBS_METHOD_QUERY_CONTEXT,
@@ -3628,8 +3599,6 @@ static const struct uapi_definition mlx5_ib_defs[] = {
UAPI_DEF_CHAIN(mlx5_ib_std_types_defs),
UAPI_DEF_CHAIN(mlx5_ib_dm_defs),
- UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION,
- &mlx5_ib_flow_action),
UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DEVICE, &mlx5_ib_query_context),
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_VAR,
UAPI_DEF_IS_OBJ_SUPPORTED(var_is_supported)),
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index a6606736d8c5..2776ca5fc33f 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -121,7 +121,7 @@ mISDN_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
if (sk->sk_state == MISDN_CLOSED)
return 0;
- skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
+ skb = skb_recv_datagram(sk, flags, &err);
if (!skb)
return err;
diff --git a/drivers/media/rc/bpf-lirc.c b/drivers/media/rc/bpf-lirc.c
index 3eff08d7b8e5..fe17c7f98e81 100644
--- a/drivers/media/rc/bpf-lirc.c
+++ b/drivers/media/rc/bpf-lirc.c
@@ -216,8 +216,12 @@ void lirc_bpf_run(struct rc_dev *rcdev, u32 sample)
raw->bpf_sample = sample;
- if (raw->progs)
- BPF_PROG_RUN_ARRAY(raw->progs, &raw->bpf_sample, bpf_prog_run);
+ if (raw->progs) {
+ rcu_read_lock();
+ bpf_prog_run_array(rcu_dereference(raw->progs),
+ &raw->bpf_sample, bpf_prog_run);
+ rcu_read_unlock();
+ }
}
/*
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 38e152548126..c9e75a9de282 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -5226,7 +5226,7 @@ static void bond_sk_to_flow(struct sock *sk, struct flow_keys *flow)
switch (sk->sk_family) {
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
- if (sk->sk_ipv6only ||
+ if (ipv6_only_sock(sk) ||
ipv6_addr_type(&sk->sk_v6_daddr) != IPV6_ADDR_MAPPED) {
flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
flow->addrs.v6addrs.src = inet6_sk(sk)->saddr;
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index fff259247d52..ac760fd39282 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -170,6 +170,7 @@ config PCH_CAN
source "drivers/net/can/c_can/Kconfig"
source "drivers/net/can/cc770/Kconfig"
+source "drivers/net/can/ctucanfd/Kconfig"
source "drivers/net/can/ifi_canfd/Kconfig"
source "drivers/net/can/m_can/Kconfig"
source "drivers/net/can/mscan/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 1e660afcb61b..0af85983634c 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -16,6 +16,7 @@ obj-y += softing/
obj-$(CONFIG_CAN_AT91) += at91_can.o
obj-$(CONFIG_CAN_CC770) += cc770/
obj-$(CONFIG_CAN_C_CAN) += c_can/
+obj-$(CONFIG_CAN_CTUCANFD) += ctucanfd/
obj-$(CONFIG_CAN_FLEXCAN) += flexcan/
obj-$(CONFIG_CAN_GRCAN) += grcan.o
obj-$(CONFIG_CAN_IFI_CANFD) += ifi_canfd/
diff --git a/drivers/net/can/ctucanfd/Kconfig b/drivers/net/can/ctucanfd/Kconfig
new file mode 100644
index 000000000000..48963efc7f19
--- /dev/null
+++ b/drivers/net/can/ctucanfd/Kconfig
@@ -0,0 +1,34 @@
+config CAN_CTUCANFD
+ tristate "CTU CAN-FD IP core"
+ help
+ This driver adds support for the CTU CAN FD open-source IP core.
+ More documentation and core sources at project page
+ (https://gitlab.fel.cvut.cz/canbus/ctucanfd_ip_core).
+ The core integration to Xilinx Zynq system as platform driver
+ is available (https://gitlab.fel.cvut.cz/canbus/zynq/zynq-can-sja1000-top).
+ Implementation on Intel FPGA-based PCI Express board is available
+ from project (https://gitlab.fel.cvut.cz/canbus/pcie-ctucanfd) and
+ on Intel SoC from project (https://gitlab.fel.cvut.cz/canbus/intel-soc-ctucanfd).
+ Guidepost CTU FEE CAN bus projects page https://canbus.pages.fel.cvut.cz/ .
+
+config CAN_CTUCANFD_PCI
+ tristate "CTU CAN-FD IP core PCI/PCIe driver"
+ depends on CAN_CTUCANFD
+ depends on PCI
+ help
+ This driver adds PCI/PCIe support for CTU CAN-FD IP core.
+ The project providing FPGA design for Intel EP4CGX15 based DB4CGX15
+ PCIe board with PiKRON.com designed transceiver riser shield is available
+ at https://gitlab.fel.cvut.cz/canbus/pcie-ctucanfd .
+
+config CAN_CTUCANFD_PLATFORM
+ tristate "CTU CAN-FD IP core platform (FPGA, SoC) driver"
+ depends on CAN_CTUCANFD
+ depends on OF || COMPILE_TEST
+ help
+ The core has been tested together with OpenCores SJA1000
+ modified to be CAN FD frames tolerant on MicroZed Zynq based
+ MZ_APO education kits designed by Petr Porazil from PiKRON.com
+ company. FPGA design https://gitlab.fel.cvut.cz/canbus/zynq/zynq-can-sja1000-top.
+ The kit description at the Computer Architectures course pages
+ https://cw.fel.cvut.cz/wiki/courses/b35apo/documentation/mz_apo/start .
diff --git a/drivers/net/can/ctucanfd/Makefile b/drivers/net/can/ctucanfd/Makefile
new file mode 100644
index 000000000000..8078f1f2c30f
--- /dev/null
+++ b/drivers/net/can/ctucanfd/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Makefile for the CTU CAN-FD IP module drivers
+#
+
+obj-$(CONFIG_CAN_CTUCANFD) := ctucanfd.o
+ctucanfd-y := ctucanfd_base.o
+
+obj-$(CONFIG_CAN_CTUCANFD_PCI) += ctucanfd_pci.o
+obj-$(CONFIG_CAN_CTUCANFD_PLATFORM) += ctucanfd_platform.o
diff --git a/drivers/net/can/ctucanfd/ctucanfd.h b/drivers/net/can/ctucanfd/ctucanfd.h
new file mode 100644
index 000000000000..0e9904f6a05d
--- /dev/null
+++ b/drivers/net/can/ctucanfd/ctucanfd.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*******************************************************************************
+ *
+ * CTU CAN FD IP Core
+ *
+ * Copyright (C) 2015-2018 Ondrej Ille <ondrej.ille@gmail.com> FEE CTU
+ * Copyright (C) 2018-2021 Ondrej Ille <ondrej.ille@gmail.com> self-funded
+ * Copyright (C) 2018-2019 Martin Jerabek <martin.jerabek01@gmail.com> FEE CTU
+ * Copyright (C) 2018-2021 Pavel Pisa <pisa@cmp.felk.cvut.cz> FEE CTU/self-funded
+ *
+ * Project advisors:
+ * Jiri Novak <jnovak@fel.cvut.cz>
+ * Pavel Pisa <pisa@cmp.felk.cvut.cz>
+ *
+ * Department of Measurement (http://meas.fel.cvut.cz/)
+ * Faculty of Electrical Engineering (http://www.fel.cvut.cz)
+ * Czech Technical University (http://www.cvut.cz/)
+ ******************************************************************************/
+
+#ifndef __CTUCANFD__
+#define __CTUCANFD__
+
+#include <linux/netdevice.h>
+#include <linux/can/dev.h>
+#include <linux/list.h>
+
+enum ctu_can_fd_can_registers;
+
+struct ctucan_priv {
+ struct can_priv can; /* must be first member! */
+
+ void __iomem *mem_base;
+ u32 (*read_reg)(struct ctucan_priv *priv,
+ enum ctu_can_fd_can_registers reg);
+ void (*write_reg)(struct ctucan_priv *priv,
+ enum ctu_can_fd_can_registers reg, u32 val);
+
+ unsigned int txb_head;
+ unsigned int txb_tail;
+ u32 txb_prio;
+ unsigned int ntxbufs;
+ spinlock_t tx_lock; /* spinlock to serialize allocation and processing of TX buffers */
+
+ struct napi_struct napi;
+ struct device *dev;
+ struct clk *can_clk;
+
+ int irq_flags;
+ unsigned long drv_flags;
+
+ u32 rxfrm_first_word;
+
+ struct list_head peers_on_pdev;
+};
+
+/**
+ * ctucan_probe_common - Device type independent registration call
+ *
+ * This function does all the memory allocation and registration for the CAN
+ * device.
+ *
+ * @dev: Handle to the generic device structure
+ * @addr: Base address of CTU CAN FD core address
+ * @irq: Interrupt number
+ * @ntxbufs: Number of implemented Tx buffers
+ * @can_clk_rate: Clock rate, if 0 then clock are taken from device node
+ * @pm_enable_call: Whether pm_runtime_enable should be called
+ * @set_drvdata_fnc: Function to set network driver data for physical device
+ *
+ * Return: 0 on success and failure value on error
+ */
+int ctucan_probe_common(struct device *dev, void __iomem *addr,
+ int irq, unsigned int ntxbufs,
+ unsigned long can_clk_rate,
+ int pm_enable_call,
+ void (*set_drvdata_fnc)(struct device *dev,
+ struct net_device *ndev));
+
+int ctucan_suspend(struct device *dev) __maybe_unused;
+int ctucan_resume(struct device *dev) __maybe_unused;
+
+#endif /*__CTUCANFD__*/
diff --git a/drivers/net/can/ctucanfd/ctucanfd_base.c b/drivers/net/can/ctucanfd/ctucanfd_base.c
new file mode 100644
index 000000000000..7a4550f60abb
--- /dev/null
+++ b/drivers/net/can/ctucanfd/ctucanfd_base.c
@@ -0,0 +1,1490 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*******************************************************************************
+ *
+ * CTU CAN FD IP Core
+ *
+ * Copyright (C) 2015-2018 Ondrej Ille <ondrej.ille@gmail.com> FEE CTU
+ * Copyright (C) 2018-2021 Ondrej Ille <ondrej.ille@gmail.com> self-funded
+ * Copyright (C) 2018-2019 Martin Jerabek <martin.jerabek01@gmail.com> FEE CTU
+ * Copyright (C) 2018-2022 Pavel Pisa <pisa@cmp.felk.cvut.cz> FEE CTU/self-funded
+ *
+ * Project advisors:
+ * Jiri Novak <jnovak@fel.cvut.cz>
+ * Pavel Pisa <pisa@cmp.felk.cvut.cz>
+ *
+ * Department of Measurement (http://meas.fel.cvut.cz/)
+ * Faculty of Electrical Engineering (http://www.fel.cvut.cz)
+ * Czech Technical University (http://www.cvut.cz/)
+ ******************************************************************************/
+
+#include <linux/clk.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/bitfield.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/can/error.h>
+#include <linux/can/led.h>
+#include <linux/pm_runtime.h>
+#include <linux/version.h>
+
+#include "ctucanfd.h"
+#include "ctucanfd_kregs.h"
+#include "ctucanfd_kframe.h"
+
+#ifdef DEBUG
+#define ctucan_netdev_dbg(ndev, args...) \
+ netdev_dbg(ndev, args)
+#else
+#define ctucan_netdev_dbg(...) do { } while (0)
+#endif
+
+#define CTUCANFD_ID 0xCAFD
+
+/* TX buffer rotation:
+ * - when a buffer transitions to empty state, rotate order and priorities
+ * - if more buffers seem to transition at the same time, rotate by the number of buffers
+ * - it may be assumed that buffers transition to empty state in FIFO order (because we manage
+ * priorities that way)
+ * - at frame filling, do not rotate anything, just increment buffer modulo counter
+ */
+
+#define CTUCANFD_FLAG_RX_FFW_BUFFERED 1
+
+#define CTUCAN_STATE_TO_TEXT_ENTRY(st) \
+ [st] = #st
+
+enum ctucan_txtb_status {
+ TXT_NOT_EXIST = 0x0,
+ TXT_RDY = 0x1,
+ TXT_TRAN = 0x2,
+ TXT_ABTP = 0x3,
+ TXT_TOK = 0x4,
+ TXT_ERR = 0x6,
+ TXT_ABT = 0x7,
+ TXT_ETY = 0x8,
+};
+
+enum ctucan_txtb_command {
+ TXT_CMD_SET_EMPTY = 0x01,
+ TXT_CMD_SET_READY = 0x02,
+ TXT_CMD_SET_ABORT = 0x04
+};
+
+static const struct can_bittiming_const ctu_can_fd_bit_timing_max = {
+ .name = "ctu_can_fd",
+ .tseg1_min = 2,
+ .tseg1_max = 190,
+ .tseg2_min = 1,
+ .tseg2_max = 63,
+ .sjw_max = 31,
+ .brp_min = 1,
+ .brp_max = 8,
+ .brp_inc = 1,
+};
+
+static const struct can_bittiming_const ctu_can_fd_bit_timing_data_max = {
+ .name = "ctu_can_fd",
+ .tseg1_min = 2,
+ .tseg1_max = 94,
+ .tseg2_min = 1,
+ .tseg2_max = 31,
+ .sjw_max = 31,
+ .brp_min = 1,
+ .brp_max = 2,
+ .brp_inc = 1,
+};
+
+static const char * const ctucan_state_strings[CAN_STATE_MAX] = {
+ CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_ERROR_ACTIVE),
+ CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_ERROR_WARNING),
+ CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_ERROR_PASSIVE),
+ CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_BUS_OFF),
+ CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_STOPPED),
+ CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_SLEEPING)
+};
+
+static void ctucan_write32_le(struct ctucan_priv *priv,
+ enum ctu_can_fd_can_registers reg, u32 val)
+{
+ iowrite32(val, priv->mem_base + reg);
+}
+
+static void ctucan_write32_be(struct ctucan_priv *priv,
+ enum ctu_can_fd_can_registers reg, u32 val)
+{
+ iowrite32be(val, priv->mem_base + reg);
+}
+
+static u32 ctucan_read32_le(struct ctucan_priv *priv,
+ enum ctu_can_fd_can_registers reg)
+{
+ return ioread32(priv->mem_base + reg);
+}
+
+static u32 ctucan_read32_be(struct ctucan_priv *priv,
+ enum ctu_can_fd_can_registers reg)
+{
+ return ioread32be(priv->mem_base + reg);
+}
+
+static inline void ctucan_write32(struct ctucan_priv *priv, enum ctu_can_fd_can_registers reg,
+ u32 val)
+{
+ priv->write_reg(priv, reg, val);
+}
+
+static inline u32 ctucan_read32(struct ctucan_priv *priv, enum ctu_can_fd_can_registers reg)
+{
+ return priv->read_reg(priv, reg);
+}
+
+static void ctucan_write_txt_buf(struct ctucan_priv *priv, enum ctu_can_fd_can_registers buf_base,
+ u32 offset, u32 val)
+{
+ priv->write_reg(priv, buf_base + offset, val);
+}
+
+#define CTU_CAN_FD_TXTNF(priv) (!!FIELD_GET(REG_STATUS_TXNF, ctucan_read32(priv, CTUCANFD_STATUS)))
+#define CTU_CAN_FD_ENABLED(priv) (!!FIELD_GET(REG_MODE_ENA, ctucan_read32(priv, CTUCANFD_MODE)))
+
+/**
+ * ctucan_state_to_str() - Converts CAN controller state code to corresponding text
+ * @state: CAN controller state code
+ *
+ * Return: Pointer to string representation of the error state
+ */
+static const char *ctucan_state_to_str(enum can_state state)
+{
+ const char *txt = NULL;
+
+ if (state >= 0 && state < CAN_STATE_MAX)
+ txt = ctucan_state_strings[state];
+ return txt ? txt : "UNKNOWN";
+}
+
+/**
+ * ctucan_reset() - Issues software reset request to CTU CAN FD
+ * @ndev: Pointer to net_device structure
+ *
+ * Return: 0 for success, -%ETIMEDOUT if CAN controller does not leave reset
+ */
+static int ctucan_reset(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ int i = 100;
+
+ ctucan_netdev_dbg(ndev, "%s\n", __func__);
+
+ ctucan_write32(priv, CTUCANFD_MODE, REG_MODE_RST);
+ clear_bit(CTUCANFD_FLAG_RX_FFW_BUFFERED, &priv->drv_flags);
+
+ do {
+ u16 device_id = FIELD_GET(REG_DEVICE_ID_DEVICE_ID,
+ ctucan_read32(priv, CTUCANFD_DEVICE_ID));
+
+ if (device_id == 0xCAFD)
+ return 0;
+ if (!i--) {
+ netdev_warn(ndev, "device did not leave reset\n");
+ return -ETIMEDOUT;
+ }
+ usleep_range(100, 200);
+ } while (1);
+}
+
+/**
+ * ctucan_set_btr() - Sets CAN bus bit timing in CTU CAN FD
+ * @ndev: Pointer to net_device structure
+ * @bt: Pointer to Bit timing structure
+ * @nominal: True - Nominal bit timing, False - Data bit timing
+ *
+ * Return: 0 - OK, -%EPERM if controller is enabled
+ */
+static int ctucan_set_btr(struct net_device *ndev, struct can_bittiming *bt, bool nominal)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ int max_ph1_len = 31;
+ u32 btr = 0;
+ u32 prop_seg = bt->prop_seg;
+ u32 phase_seg1 = bt->phase_seg1;
+
+ if (CTU_CAN_FD_ENABLED(priv)) {
+ netdev_err(ndev, "BUG! Cannot set bittiming - CAN is enabled\n");
+ return -EPERM;
+ }
+
+ if (nominal)
+ max_ph1_len = 63;
+
+ /* The timing calculation functions have only constraints on tseg1, which is prop_seg +
+ * phase1_seg combined. tseg1 is then split in half and stored into prog_seg and phase_seg1.
+ * In CTU CAN FD, PROP is 6/7 bits wide but PH1 only 6/5, so we must re-distribute the
+ * values here.
+ */
+ if (phase_seg1 > max_ph1_len) {
+ prop_seg += phase_seg1 - max_ph1_len;
+ phase_seg1 = max_ph1_len;
+ bt->prop_seg = prop_seg;
+ bt->phase_seg1 = phase_seg1;
+ }
+
+ if (nominal) {
+ btr = FIELD_PREP(REG_BTR_PROP, prop_seg);
+ btr |= FIELD_PREP(REG_BTR_PH1, phase_seg1);
+ btr |= FIELD_PREP(REG_BTR_PH2, bt->phase_seg2);
+ btr |= FIELD_PREP(REG_BTR_BRP, bt->brp);
+ btr |= FIELD_PREP(REG_BTR_SJW, bt->sjw);
+
+ ctucan_write32(priv, CTUCANFD_BTR, btr);
+ } else {
+ btr = FIELD_PREP(REG_BTR_FD_PROP_FD, prop_seg);
+ btr |= FIELD_PREP(REG_BTR_FD_PH1_FD, phase_seg1);
+ btr |= FIELD_PREP(REG_BTR_FD_PH2_FD, bt->phase_seg2);
+ btr |= FIELD_PREP(REG_BTR_FD_BRP_FD, bt->brp);
+ btr |= FIELD_PREP(REG_BTR_FD_SJW_FD, bt->sjw);
+
+ ctucan_write32(priv, CTUCANFD_BTR_FD, btr);
+ }
+
+ return 0;
+}
+
+/**
+ * ctucan_set_bittiming() - CAN set nominal bit timing routine
+ * @ndev: Pointer to net_device structure
+ *
+ * Return: 0 on success, -%EPERM on error
+ */
+static int ctucan_set_bittiming(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ struct can_bittiming *bt = &priv->can.bittiming;
+
+ ctucan_netdev_dbg(ndev, "%s\n", __func__);
+
+ /* Note that bt may be modified here */
+ return ctucan_set_btr(ndev, bt, true);
+}
+
+/**
+ * ctucan_set_data_bittiming() - CAN set data bit timing routine
+ * @ndev: Pointer to net_device structure
+ *
+ * Return: 0 on success, -%EPERM on error
+ */
+static int ctucan_set_data_bittiming(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ struct can_bittiming *dbt = &priv->can.data_bittiming;
+
+ ctucan_netdev_dbg(ndev, "%s\n", __func__);
+
+ /* Note that dbt may be modified here */
+ return ctucan_set_btr(ndev, dbt, false);
+}
+
+/**
+ * ctucan_set_secondary_sample_point() - Sets secondary sample point in CTU CAN FD
+ * @ndev: Pointer to net_device structure
+ *
+ * Return: 0 on success, -%EPERM if controller is enabled
+ */
+static int ctucan_set_secondary_sample_point(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ struct can_bittiming *dbt = &priv->can.data_bittiming;
+ int ssp_offset = 0;
+ u32 ssp_cfg = 0; /* No SSP by default */
+
+ ctucan_netdev_dbg(ndev, "%s\n", __func__);
+
+ if (CTU_CAN_FD_ENABLED(priv)) {
+ netdev_err(ndev, "BUG! Cannot set SSP - CAN is enabled\n");
+ return -EPERM;
+ }
+
+ /* Use SSP for bit-rates above 1 Mbits/s */
+ if (dbt->bitrate > 1000000) {
+ /* Calculate SSP in minimal time quanta */
+ ssp_offset = (priv->can.clock.freq / 1000) * dbt->sample_point / dbt->bitrate;
+
+ if (ssp_offset > 127) {
+ netdev_warn(ndev, "SSP offset saturated to 127\n");
+ ssp_offset = 127;
+ }
+
+ ssp_cfg = FIELD_PREP(REG_TRV_DELAY_SSP_OFFSET, ssp_offset);
+ ssp_cfg |= FIELD_PREP(REG_TRV_DELAY_SSP_SRC, 0x1);
+ }
+
+ ctucan_write32(priv, CTUCANFD_TRV_DELAY, ssp_cfg);
+
+ return 0;
+}
+
+/**
+ * ctucan_set_mode() - Sets CTU CAN FDs mode
+ * @priv: Pointer to private data
+ * @mode: Pointer to controller modes to be set
+ */
+static void ctucan_set_mode(struct ctucan_priv *priv, const struct can_ctrlmode *mode)
+{
+ u32 mode_reg = ctucan_read32(priv, CTUCANFD_MODE);
+
+ mode_reg = (mode->flags & CAN_CTRLMODE_LOOPBACK) ?
+ (mode_reg | REG_MODE_ILBP) :
+ (mode_reg & ~REG_MODE_ILBP);
+
+ mode_reg = (mode->flags & CAN_CTRLMODE_LISTENONLY) ?
+ (mode_reg | REG_MODE_BMM) :
+ (mode_reg & ~REG_MODE_BMM);
+
+ mode_reg = (mode->flags & CAN_CTRLMODE_FD) ?
+ (mode_reg | REG_MODE_FDE) :
+ (mode_reg & ~REG_MODE_FDE);
+
+ mode_reg = (mode->flags & CAN_CTRLMODE_PRESUME_ACK) ?
+ (mode_reg | REG_MODE_ACF) :
+ (mode_reg & ~REG_MODE_ACF);
+
+ mode_reg = (mode->flags & CAN_CTRLMODE_FD_NON_ISO) ?
+ (mode_reg | REG_MODE_NISOFD) :
+ (mode_reg & ~REG_MODE_NISOFD);
+
+ /* One shot mode supported indirectly via Retransmit limit */
+ mode_reg &= ~FIELD_PREP(REG_MODE_RTRTH, 0xF);
+ mode_reg = (mode->flags & CAN_CTRLMODE_ONE_SHOT) ?
+ (mode_reg | REG_MODE_RTRLE) :
+ (mode_reg & ~REG_MODE_RTRLE);
+
+ /* Some bits fixed:
+ * TSTM - Off, User shall not be able to change REC/TEC by hand during operation
+ */
+ mode_reg &= ~REG_MODE_TSTM;
+
+ ctucan_write32(priv, CTUCANFD_MODE, mode_reg);
+}
+
+/**
+ * ctucan_chip_start() - This routine starts the driver
+ * @ndev: Pointer to net_device structure
+ *
+ * Routine expects that chip is in reset state. It setups initial
+ * Tx buffers for FIFO priorities, sets bittiming, enables interrupts,
+ * switches core to operational mode and changes controller
+ * state to %CAN_STATE_STOPPED.
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int ctucan_chip_start(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ u32 int_ena, int_msk;
+ u32 mode_reg;
+ int err;
+ struct can_ctrlmode mode;
+
+ ctucan_netdev_dbg(ndev, "%s\n", __func__);
+
+ priv->txb_prio = 0x01234567;
+ priv->txb_head = 0;
+ priv->txb_tail = 0;
+ ctucan_write32(priv, CTUCANFD_TX_PRIORITY, priv->txb_prio);
+
+ /* Configure bit-rates and ssp */
+ err = ctucan_set_bittiming(ndev);
+ if (err < 0)
+ return err;
+
+ err = ctucan_set_data_bittiming(ndev);
+ if (err < 0)
+ return err;
+
+ err = ctucan_set_secondary_sample_point(ndev);
+ if (err < 0)
+ return err;
+
+ /* Configure modes */
+ mode.flags = priv->can.ctrlmode;
+ mode.mask = 0xFFFFFFFF;
+ ctucan_set_mode(priv, &mode);
+
+ /* Configure interrupts */
+ int_ena = REG_INT_STAT_RBNEI |
+ REG_INT_STAT_TXBHCI |
+ REG_INT_STAT_EWLI |
+ REG_INT_STAT_FCSI;
+
+ /* Bus error reporting -> Allow Error/Arb.lost interrupts */
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) {
+ int_ena |= REG_INT_STAT_ALI |
+ REG_INT_STAT_BEI;
+ }
+
+ int_msk = ~int_ena; /* Mask all disabled interrupts */
+
+ /* It's after reset, so there is no need to clear anything */
+ ctucan_write32(priv, CTUCANFD_INT_MASK_SET, int_msk);
+ ctucan_write32(priv, CTUCANFD_INT_ENA_SET, int_ena);
+
+ /* Controller enters ERROR_ACTIVE on initial FCSI */
+ priv->can.state = CAN_STATE_STOPPED;
+
+ /* Enable the controller */
+ mode_reg = ctucan_read32(priv, CTUCANFD_MODE);
+ mode_reg |= REG_MODE_ENA;
+ ctucan_write32(priv, CTUCANFD_MODE, mode_reg);
+
+ return 0;
+}
+
+/**
+ * ctucan_do_set_mode() - Sets mode of the driver
+ * @ndev: Pointer to net_device structure
+ * @mode: Tells the mode of the driver
+ *
+ * This check the drivers state and calls the corresponding modes to set.
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int ctucan_do_set_mode(struct net_device *ndev, enum can_mode mode)
+{
+ int ret;
+
+ ctucan_netdev_dbg(ndev, "%s\n", __func__);
+
+ switch (mode) {
+ case CAN_MODE_START:
+ ret = ctucan_reset(ndev);
+ if (ret < 0)
+ return ret;
+ ret = ctucan_chip_start(ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "ctucan_chip_start failed!\n");
+ return ret;
+ }
+ netif_wake_queue(ndev);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * ctucan_get_tx_status() - Gets status of TXT buffer
+ * @priv: Pointer to private data
+ * @buf: Buffer index (0-based)
+ *
+ * Return: Status of TXT buffer
+ */
+static inline enum ctucan_txtb_status ctucan_get_tx_status(struct ctucan_priv *priv, u8 buf)
+{
+ u32 tx_status = ctucan_read32(priv, CTUCANFD_TX_STATUS);
+ enum ctucan_txtb_status status = (tx_status >> (buf * 4)) & 0x7;
+
+ return status;
+}
+
+/**
+ * ctucan_is_txt_buf_writable() - Checks if frame can be inserted to TXT Buffer
+ * @priv: Pointer to private data
+ * @buf: Buffer index (0-based)
+ *
+ * Return: True - Frame can be inserted to TXT Buffer, False - If attempted, frame will not be
+ * inserted to TXT Buffer
+ */
+static bool ctucan_is_txt_buf_writable(struct ctucan_priv *priv, u8 buf)
+{
+ enum ctucan_txtb_status buf_status;
+
+ buf_status = ctucan_get_tx_status(priv, buf);
+ if (buf_status == TXT_RDY || buf_status == TXT_TRAN || buf_status == TXT_ABTP)
+ return false;
+
+ return true;
+}
+
+/**
+ * ctucan_insert_frame() - Inserts frame to TXT buffer
+ * @priv: Pointer to private data
+ * @cf: Pointer to CAN frame to be inserted
+ * @buf: TXT Buffer index to which frame is inserted (0-based)
+ * @isfdf: True - CAN FD Frame, False - CAN 2.0 Frame
+ *
+ * Return: True - Frame inserted successfully
+ * False - Frame was not inserted due to one of:
+ * 1. TXT Buffer is not writable (it is in wrong state)
+ * 2. Invalid TXT buffer index
+ * 3. Invalid frame length
+ */
+static bool ctucan_insert_frame(struct ctucan_priv *priv, const struct canfd_frame *cf, u8 buf,
+ bool isfdf)
+{
+ u32 buf_base;
+ u32 ffw = 0;
+ u32 idw = 0;
+ unsigned int i;
+
+ if (buf >= priv->ntxbufs)
+ return false;
+
+ if (!ctucan_is_txt_buf_writable(priv, buf))
+ return false;
+
+ if (cf->len > CANFD_MAX_DLEN)
+ return false;
+
+ /* Prepare Frame format */
+ if (cf->can_id & CAN_RTR_FLAG)
+ ffw |= REG_FRAME_FORMAT_W_RTR;
+
+ if (cf->can_id & CAN_EFF_FLAG)
+ ffw |= REG_FRAME_FORMAT_W_IDE;
+
+ if (isfdf) {
+ ffw |= REG_FRAME_FORMAT_W_FDF;
+ if (cf->flags & CANFD_BRS)
+ ffw |= REG_FRAME_FORMAT_W_BRS;
+ }
+
+ ffw |= FIELD_PREP(REG_FRAME_FORMAT_W_DLC, can_fd_len2dlc(cf->len));
+
+ /* Prepare identifier */
+ if (cf->can_id & CAN_EFF_FLAG)
+ idw = cf->can_id & CAN_EFF_MASK;
+ else
+ idw = FIELD_PREP(REG_IDENTIFIER_W_IDENTIFIER_BASE, cf->can_id & CAN_SFF_MASK);
+
+ /* Write ID, Frame format, Don't write timestamp -> Time triggered transmission disabled */
+ buf_base = (buf + 1) * 0x100;
+ ctucan_write_txt_buf(priv, buf_base, CTUCANFD_FRAME_FORMAT_W, ffw);
+ ctucan_write_txt_buf(priv, buf_base, CTUCANFD_IDENTIFIER_W, idw);
+
+ /* Write Data payload */
+ if (!(cf->can_id & CAN_RTR_FLAG)) {
+ for (i = 0; i < cf->len; i += 4) {
+ u32 data = le32_to_cpu(*(__le32 *)(cf->data + i));
+
+ ctucan_write_txt_buf(priv, buf_base, CTUCANFD_DATA_1_4_W + i, data);
+ }
+ }
+
+ return true;
+}
+
+/**
+ * ctucan_give_txtb_cmd() - Applies command on TXT buffer
+ * @priv: Pointer to private data
+ * @cmd: Command to give
+ * @buf: Buffer index (0-based)
+ */
+static void ctucan_give_txtb_cmd(struct ctucan_priv *priv, enum ctucan_txtb_command cmd, u8 buf)
+{
+ u32 tx_cmd = cmd;
+
+ tx_cmd |= 1 << (buf + 8);
+ ctucan_write32(priv, CTUCANFD_TX_COMMAND, tx_cmd);
+}
+
+/**
+ * ctucan_start_xmit() - Starts the transmission
+ * @skb: sk_buff pointer that contains data to be Txed
+ * @ndev: Pointer to net_device structure
+ *
+ * Invoked from upper layers to initiate transmission. Uses the next available free TXT Buffer and
+ * populates its fields to start the transmission.
+ *
+ * Return: %NETDEV_TX_OK on success, %NETDEV_TX_BUSY when no free TXT buffer is available,
+ * negative return values reserved for error cases
+ */
+static netdev_tx_t ctucan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ struct canfd_frame *cf = (struct canfd_frame *)skb->data;
+ u32 txtb_id;
+ bool ok;
+ unsigned long flags;
+
+ if (can_dropped_invalid_skb(ndev, skb))
+ return NETDEV_TX_OK;
+
+ if (unlikely(!CTU_CAN_FD_TXTNF(priv))) {
+ netif_stop_queue(ndev);
+ netdev_err(ndev, "BUG!, no TXB free when queue awake!\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ txtb_id = priv->txb_head % priv->ntxbufs;
+ ctucan_netdev_dbg(ndev, "%s: using TXB#%u\n", __func__, txtb_id);
+ ok = ctucan_insert_frame(priv, cf, txtb_id, can_is_canfd_skb(skb));
+
+ if (!ok) {
+ netdev_err(ndev, "BUG! TXNF set but cannot insert frame into TXTB! HW Bug?");
+ kfree_skb(skb);
+ ndev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ can_put_echo_skb(skb, ndev, txtb_id, 0);
+
+ spin_lock_irqsave(&priv->tx_lock, flags);
+ ctucan_give_txtb_cmd(priv, TXT_CMD_SET_READY, txtb_id);
+ priv->txb_head++;
+
+ /* Check if all TX buffers are full */
+ if (!CTU_CAN_FD_TXTNF(priv))
+ netif_stop_queue(ndev);
+
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+/**
+ * ctucan_read_rx_frame() - Reads frame from RX FIFO
+ * @priv: Pointer to CTU CAN FD's private data
+ * @cf: Pointer to CAN frame struct
+ * @ffw: Previously read frame format word
+ *
+ * Note: Frame format word must be read separately and provided in 'ffw'.
+ */
+static void ctucan_read_rx_frame(struct ctucan_priv *priv, struct canfd_frame *cf, u32 ffw)
+{
+ u32 idw;
+ unsigned int i;
+ unsigned int wc;
+ unsigned int len;
+
+ idw = ctucan_read32(priv, CTUCANFD_RX_DATA);
+ if (FIELD_GET(REG_FRAME_FORMAT_W_IDE, ffw))
+ cf->can_id = (idw & CAN_EFF_MASK) | CAN_EFF_FLAG;
+ else
+ cf->can_id = (idw >> 18) & CAN_SFF_MASK;
+
+ /* BRS, ESI, RTR Flags */
+ cf->flags = 0;
+ if (FIELD_GET(REG_FRAME_FORMAT_W_FDF, ffw)) {
+ if (FIELD_GET(REG_FRAME_FORMAT_W_BRS, ffw))
+ cf->flags |= CANFD_BRS;
+ if (FIELD_GET(REG_FRAME_FORMAT_W_ESI_RSV, ffw))
+ cf->flags |= CANFD_ESI;
+ } else if (FIELD_GET(REG_FRAME_FORMAT_W_RTR, ffw)) {
+ cf->can_id |= CAN_RTR_FLAG;
+ }
+
+ wc = FIELD_GET(REG_FRAME_FORMAT_W_RWCNT, ffw) - 3;
+
+ /* DLC */
+ if (FIELD_GET(REG_FRAME_FORMAT_W_DLC, ffw) <= 8) {
+ len = FIELD_GET(REG_FRAME_FORMAT_W_DLC, ffw);
+ } else {
+ if (FIELD_GET(REG_FRAME_FORMAT_W_FDF, ffw))
+ len = wc << 2;
+ else
+ len = 8;
+ }
+ cf->len = len;
+ if (unlikely(len > wc * 4))
+ len = wc * 4;
+
+ /* Timestamp - Read and throw away */
+ ctucan_read32(priv, CTUCANFD_RX_DATA);
+ ctucan_read32(priv, CTUCANFD_RX_DATA);
+
+ /* Data */
+ for (i = 0; i < len; i += 4) {
+ u32 data = ctucan_read32(priv, CTUCANFD_RX_DATA);
+ *(__le32 *)(cf->data + i) = cpu_to_le32(data);
+ }
+ while (unlikely(i < wc * 4)) {
+ ctucan_read32(priv, CTUCANFD_RX_DATA);
+ i += 4;
+ }
+}
+
+/**
+ * ctucan_rx() - Called from CAN ISR to complete the received frame processing
+ * @ndev: Pointer to net_device structure
+ *
+ * This function is invoked from the CAN isr(poll) to process the Rx frames. It does minimal
+ * processing and invokes "netif_receive_skb" to complete further processing.
+ * Return: 1 when frame is passed to the network layer, 0 when the first frame word is read but
+ * system is out of free SKBs temporally and left code to resolve SKB allocation later,
+ * -%EAGAIN in a case of empty Rx FIFO.
+ */
+static int ctucan_rx(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ struct canfd_frame *cf;
+ struct sk_buff *skb;
+ u32 ffw;
+
+ if (test_bit(CTUCANFD_FLAG_RX_FFW_BUFFERED, &priv->drv_flags)) {
+ ffw = priv->rxfrm_first_word;
+ clear_bit(CTUCANFD_FLAG_RX_FFW_BUFFERED, &priv->drv_flags);
+ } else {
+ ffw = ctucan_read32(priv, CTUCANFD_RX_DATA);
+ }
+
+ if (!FIELD_GET(REG_FRAME_FORMAT_W_RWCNT, ffw))
+ return -EAGAIN;
+
+ if (FIELD_GET(REG_FRAME_FORMAT_W_FDF, ffw))
+ skb = alloc_canfd_skb(ndev, &cf);
+ else
+ skb = alloc_can_skb(ndev, (struct can_frame **)&cf);
+
+ if (unlikely(!skb)) {
+ priv->rxfrm_first_word = ffw;
+ set_bit(CTUCANFD_FLAG_RX_FFW_BUFFERED, &priv->drv_flags);
+ return 0;
+ }
+
+ ctucan_read_rx_frame(priv, cf, ffw);
+
+ stats->rx_bytes += cf->len;
+ stats->rx_packets++;
+ netif_receive_skb(skb);
+
+ return 1;
+}
+
+/**
+ * ctucan_read_fault_state() - Reads CTU CAN FDs fault confinement state.
+ * @priv: Pointer to private data
+ *
+ * Returns: Fault confinement state of controller
+ */
+static enum can_state ctucan_read_fault_state(struct ctucan_priv *priv)
+{
+ u32 fs;
+ u32 rec_tec;
+ u32 ewl;
+
+ fs = ctucan_read32(priv, CTUCANFD_EWL);
+ rec_tec = ctucan_read32(priv, CTUCANFD_REC);
+ ewl = FIELD_GET(REG_EWL_EW_LIMIT, fs);
+
+ if (FIELD_GET(REG_EWL_ERA, fs)) {
+ if (ewl > FIELD_GET(REG_REC_REC_VAL, rec_tec) &&
+ ewl > FIELD_GET(REG_REC_TEC_VAL, rec_tec))
+ return CAN_STATE_ERROR_ACTIVE;
+ else
+ return CAN_STATE_ERROR_WARNING;
+ } else if (FIELD_GET(REG_EWL_ERP, fs)) {
+ return CAN_STATE_ERROR_PASSIVE;
+ } else if (FIELD_GET(REG_EWL_BOF, fs)) {
+ return CAN_STATE_BUS_OFF;
+ }
+
+ WARN(true, "Invalid error state");
+ return CAN_STATE_ERROR_PASSIVE;
+}
+
+/**
+ * ctucan_get_rec_tec() - Reads REC/TEC counter values from controller
+ * @priv: Pointer to private data
+ * @bec: Pointer to Error counter structure
+ */
+static void ctucan_get_rec_tec(struct ctucan_priv *priv, struct can_berr_counter *bec)
+{
+ u32 err_ctrs = ctucan_read32(priv, CTUCANFD_REC);
+
+ bec->rxerr = FIELD_GET(REG_REC_REC_VAL, err_ctrs);
+ bec->txerr = FIELD_GET(REG_REC_TEC_VAL, err_ctrs);
+}
+
+/**
+ * ctucan_err_interrupt() - Error frame ISR
+ * @ndev: net_device pointer
+ * @isr: interrupt status register value
+ *
+ * This is the CAN error interrupt and it will check the type of error and forward the error
+ * frame to upper layers.
+ */
+static void ctucan_err_interrupt(struct net_device *ndev, u32 isr)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ enum can_state state;
+ struct can_berr_counter bec;
+ u32 err_capt_alc;
+ int dologerr = net_ratelimit();
+
+ ctucan_get_rec_tec(priv, &bec);
+ state = ctucan_read_fault_state(priv);
+ err_capt_alc = ctucan_read32(priv, CTUCANFD_ERR_CAPT);
+
+ if (dologerr)
+ netdev_info(ndev, "%s: ISR = 0x%08x, rxerr %d, txerr %d, error type %lu, pos %lu, ALC id_field %lu, bit %lu\n",
+ __func__, isr, bec.rxerr, bec.txerr,
+ FIELD_GET(REG_ERR_CAPT_ERR_TYPE, err_capt_alc),
+ FIELD_GET(REG_ERR_CAPT_ERR_POS, err_capt_alc),
+ FIELD_GET(REG_ERR_CAPT_ALC_ID_FIELD, err_capt_alc),
+ FIELD_GET(REG_ERR_CAPT_ALC_BIT, err_capt_alc));
+
+ skb = alloc_can_err_skb(ndev, &cf);
+
+ /* EWLI: error warning limit condition met
+ * FCSI: fault confinement state changed
+ * ALI: arbitration lost (just informative)
+ * BEI: bus error interrupt
+ */
+ if (FIELD_GET(REG_INT_STAT_FCSI, isr) || FIELD_GET(REG_INT_STAT_EWLI, isr)) {
+ netdev_info(ndev, "state changes from %s to %s\n",
+ ctucan_state_to_str(priv->can.state),
+ ctucan_state_to_str(state));
+
+ if (priv->can.state == state)
+ netdev_warn(ndev,
+ "current and previous state is the same! (missed interrupt?)\n");
+
+ priv->can.state = state;
+ switch (state) {
+ case CAN_STATE_BUS_OFF:
+ priv->can.can_stats.bus_off++;
+ can_bus_off(ndev);
+ if (skb)
+ cf->can_id |= CAN_ERR_BUSOFF;
+ break;
+ case CAN_STATE_ERROR_PASSIVE:
+ priv->can.can_stats.error_passive++;
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = (bec.rxerr > 127) ?
+ CAN_ERR_CRTL_RX_PASSIVE :
+ CAN_ERR_CRTL_TX_PASSIVE;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ }
+ break;
+ case CAN_STATE_ERROR_WARNING:
+ priv->can.can_stats.error_warning++;
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] |= (bec.txerr > bec.rxerr) ?
+ CAN_ERR_CRTL_TX_WARNING :
+ CAN_ERR_CRTL_RX_WARNING;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ }
+ break;
+ case CAN_STATE_ERROR_ACTIVE:
+ cf->data[1] = CAN_ERR_CRTL_ACTIVE;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ break;
+ default:
+ netdev_warn(ndev, "unhandled error state (%d:%s)!\n",
+ state, ctucan_state_to_str(state));
+ break;
+ }
+ }
+
+ /* Check for Arbitration Lost interrupt */
+ if (FIELD_GET(REG_INT_STAT_ALI, isr)) {
+ if (dologerr)
+ netdev_info(ndev, "arbitration lost\n");
+ priv->can.can_stats.arbitration_lost++;
+ if (skb) {
+ cf->can_id |= CAN_ERR_LOSTARB;
+ cf->data[0] = CAN_ERR_LOSTARB_UNSPEC;
+ }
+ }
+
+ /* Check for Bus Error interrupt */
+ if (FIELD_GET(REG_INT_STAT_BEI, isr)) {
+ netdev_info(ndev, "bus error\n");
+ priv->can.can_stats.bus_error++;
+ stats->rx_errors++;
+ if (skb) {
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ cf->data[2] = CAN_ERR_PROT_UNSPEC;
+ cf->data[3] = CAN_ERR_PROT_LOC_UNSPEC;
+ }
+ }
+
+ if (skb) {
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+ }
+}
+
+/**
+ * ctucan_rx_poll() - Poll routine for rx packets (NAPI)
+ * @napi: NAPI structure pointer
+ * @quota: Max number of rx packets to be processed.
+ *
+ * This is the poll routine for rx part. It will process the packets maximux quota value.
+ *
+ * Return: Number of packets received
+ */
+static int ctucan_rx_poll(struct napi_struct *napi, int quota)
+{
+ struct net_device *ndev = napi->dev;
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ int work_done = 0;
+ u32 status;
+ u32 framecnt;
+ int res = 1;
+
+ framecnt = FIELD_GET(REG_RX_STATUS_RXFRC, ctucan_read32(priv, CTUCANFD_RX_STATUS));
+ while (framecnt && work_done < quota && res > 0) {
+ res = ctucan_rx(ndev);
+ work_done++;
+ framecnt = FIELD_GET(REG_RX_STATUS_RXFRC, ctucan_read32(priv, CTUCANFD_RX_STATUS));
+ }
+
+ /* Check for RX FIFO Overflow */
+ status = ctucan_read32(priv, CTUCANFD_STATUS);
+ if (FIELD_GET(REG_STATUS_DOR, status)) {
+ struct net_device_stats *stats = &ndev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ netdev_info(ndev, "rx_poll: rx fifo overflow\n");
+ stats->rx_over_errors++;
+ stats->rx_errors++;
+ skb = alloc_can_err_skb(ndev, &cf);
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+ }
+
+ /* Clear Data Overrun */
+ ctucan_write32(priv, CTUCANFD_COMMAND, REG_COMMAND_CDO);
+ }
+
+ if (work_done)
+ can_led_event(ndev, CAN_LED_EVENT_RX);
+
+ if (!framecnt && res != 0) {
+ if (napi_complete_done(napi, work_done)) {
+ /* Clear and enable RBNEI. It is level-triggered, so
+ * there is no race condition.
+ */
+ ctucan_write32(priv, CTUCANFD_INT_STAT, REG_INT_STAT_RBNEI);
+ ctucan_write32(priv, CTUCANFD_INT_MASK_CLR, REG_INT_STAT_RBNEI);
+ }
+ }
+
+ return work_done;
+}
+
+/**
+ * ctucan_rotate_txb_prio() - Rotates priorities of TXT Buffers
+ * @ndev: net_device pointer
+ */
+static void ctucan_rotate_txb_prio(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ u32 prio = priv->txb_prio;
+
+ prio = (prio << 4) | ((prio >> ((priv->ntxbufs - 1) * 4)) & 0xF);
+ ctucan_netdev_dbg(ndev, "%s: from 0x%08x to 0x%08x\n", __func__, priv->txb_prio, prio);
+ priv->txb_prio = prio;
+ ctucan_write32(priv, CTUCANFD_TX_PRIORITY, prio);
+}
+
+/**
+ * ctucan_tx_interrupt() - Tx done Isr
+ * @ndev: net_device pointer
+ */
+static void ctucan_tx_interrupt(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ bool first = true;
+ bool some_buffers_processed;
+ unsigned long flags;
+ enum ctucan_txtb_status txtb_status;
+ u32 txtb_id;
+
+ /* read tx_status
+ * if txb[n].finished (bit 2)
+ * if ok -> echo
+ * if error / aborted -> ?? (find how to handle oneshot mode)
+ * txb_tail++
+ */
+ do {
+ spin_lock_irqsave(&priv->tx_lock, flags);
+
+ some_buffers_processed = false;
+ while ((int)(priv->txb_head - priv->txb_tail) > 0) {
+ txtb_id = priv->txb_tail % priv->ntxbufs;
+ txtb_status = ctucan_get_tx_status(priv, txtb_id);
+
+ ctucan_netdev_dbg(ndev, "TXI: TXB#%u: status 0x%x\n", txtb_id, txtb_status);
+
+ switch (txtb_status) {
+ case TXT_TOK:
+ ctucan_netdev_dbg(ndev, "TXT_OK\n");
+ stats->tx_bytes += can_get_echo_skb(ndev, txtb_id, NULL);
+ stats->tx_packets++;
+ break;
+ case TXT_ERR:
+ /* This indicated that retransmit limit has been reached. Obviously
+ * we should not echo the frame, but also not indicate any kind of
+ * error. If desired, it was already reported (possible multiple
+ * times) on each arbitration lost.
+ */
+ netdev_warn(ndev, "TXB in Error state\n");
+ can_free_echo_skb(ndev, txtb_id, NULL);
+ stats->tx_dropped++;
+ break;
+ case TXT_ABT:
+ /* Same as for TXT_ERR, only with different cause. We *could*
+ * re-queue the frame, but multiqueue/abort is not supported yet
+ * anyway.
+ */
+ netdev_warn(ndev, "TXB in Aborted state\n");
+ can_free_echo_skb(ndev, txtb_id, NULL);
+ stats->tx_dropped++;
+ break;
+ default:
+ /* Bug only if the first buffer is not finished, otherwise it is
+ * pretty much expected.
+ */
+ if (first) {
+ netdev_err(ndev,
+ "BUG: TXB#%u not in a finished state (0x%x)!\n",
+ txtb_id, txtb_status);
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+ /* do not clear nor wake */
+ return;
+ }
+ goto clear;
+ }
+ priv->txb_tail++;
+ first = false;
+ some_buffers_processed = true;
+ /* Adjust priorities *before* marking the buffer as empty. */
+ ctucan_rotate_txb_prio(ndev);
+ ctucan_give_txtb_cmd(priv, TXT_CMD_SET_EMPTY, txtb_id);
+ }
+clear:
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+
+ /* If no buffers were processed this time, we cannot clear - that would introduce
+ * a race condition.
+ */
+ if (some_buffers_processed) {
+ /* Clear the interrupt again. We do not want to receive again interrupt for
+ * the buffer already handled. If it is the last finished one then it would
+ * cause log of spurious interrupt.
+ */
+ ctucan_write32(priv, CTUCANFD_INT_STAT, REG_INT_STAT_TXBHCI);
+ }
+ } while (some_buffers_processed);
+
+ can_led_event(ndev, CAN_LED_EVENT_TX);
+
+ spin_lock_irqsave(&priv->tx_lock, flags);
+
+ /* Check if at least one TX buffer is free */
+ if (CTU_CAN_FD_TXTNF(priv))
+ netif_wake_queue(ndev);
+
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+}
+
+/**
+ * ctucan_interrupt() - CAN Isr
+ * @irq: irq number
+ * @dev_id: device id poniter
+ *
+ * This is the CTU CAN FD ISR. It checks for the type of interrupt
+ * and invokes the corresponding ISR.
+ *
+ * Return:
+ * IRQ_NONE - If CAN device is in sleep mode, IRQ_HANDLED otherwise
+ */
+static irqreturn_t ctucan_interrupt(int irq, void *dev_id)
+{
+ struct net_device *ndev = (struct net_device *)dev_id;
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ u32 isr, icr;
+ u32 imask;
+ int irq_loops;
+
+ ctucan_netdev_dbg(ndev, "%s\n", __func__);
+
+ for (irq_loops = 0; irq_loops < 10000; irq_loops++) {
+ /* Get the interrupt status */
+ isr = ctucan_read32(priv, CTUCANFD_INT_STAT);
+
+ if (!isr)
+ return irq_loops ? IRQ_HANDLED : IRQ_NONE;
+
+ /* Receive Buffer Not Empty Interrupt */
+ if (FIELD_GET(REG_INT_STAT_RBNEI, isr)) {
+ ctucan_netdev_dbg(ndev, "RXBNEI\n");
+ /* Mask RXBNEI the first, then clear interrupt and schedule NAPI. Even if
+ * another IRQ fires, RBNEI will always be 0 (masked).
+ */
+ icr = REG_INT_STAT_RBNEI;
+ ctucan_write32(priv, CTUCANFD_INT_MASK_SET, icr);
+ ctucan_write32(priv, CTUCANFD_INT_STAT, icr);
+ napi_schedule(&priv->napi);
+ }
+
+ /* TXT Buffer HW Command Interrupt */
+ if (FIELD_GET(REG_INT_STAT_TXBHCI, isr)) {
+ ctucan_netdev_dbg(ndev, "TXBHCI\n");
+ /* Cleared inside */
+ ctucan_tx_interrupt(ndev);
+ }
+
+ /* Error interrupts */
+ if (FIELD_GET(REG_INT_STAT_EWLI, isr) ||
+ FIELD_GET(REG_INT_STAT_FCSI, isr) ||
+ FIELD_GET(REG_INT_STAT_ALI, isr)) {
+ icr = isr & (REG_INT_STAT_EWLI | REG_INT_STAT_FCSI | REG_INT_STAT_ALI);
+
+ ctucan_netdev_dbg(ndev, "some ERR interrupt: clearing 0x%08x\n", icr);
+ ctucan_write32(priv, CTUCANFD_INT_STAT, icr);
+ ctucan_err_interrupt(ndev, isr);
+ }
+ /* Ignore RI, TI, LFI, RFI, BSI */
+ }
+
+ netdev_err(ndev, "%s: stuck interrupt (isr=0x%08x), stopping\n", __func__, isr);
+
+ if (FIELD_GET(REG_INT_STAT_TXBHCI, isr)) {
+ int i;
+
+ netdev_err(ndev, "txb_head=0x%08x txb_tail=0x%08x\n",
+ priv->txb_head, priv->txb_tail);
+ for (i = 0; i < priv->ntxbufs; i++) {
+ u32 status = ctucan_get_tx_status(priv, i);
+
+ netdev_err(ndev, "txb[%d] txb status=0x%08x\n", i, status);
+ }
+ }
+
+ imask = 0xffffffff;
+ ctucan_write32(priv, CTUCANFD_INT_ENA_CLR, imask);
+ ctucan_write32(priv, CTUCANFD_INT_MASK_SET, imask);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ctucan_chip_stop() - Driver stop routine
+ * @ndev: Pointer to net_device structure
+ *
+ * This is the drivers stop routine. It will disable the
+ * interrupts and disable the controller.
+ */
+static void ctucan_chip_stop(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ u32 mask = 0xffffffff;
+ u32 mode;
+
+ ctucan_netdev_dbg(ndev, "%s\n", __func__);
+
+ /* Disable interrupts and disable CAN */
+ ctucan_write32(priv, CTUCANFD_INT_ENA_CLR, mask);
+ ctucan_write32(priv, CTUCANFD_INT_MASK_SET, mask);
+ mode = ctucan_read32(priv, CTUCANFD_MODE);
+ mode &= ~REG_MODE_ENA;
+ ctucan_write32(priv, CTUCANFD_MODE, mode);
+
+ priv->can.state = CAN_STATE_STOPPED;
+}
+
+/**
+ * ctucan_open() - Driver open routine
+ * @ndev: Pointer to net_device structure
+ *
+ * This is the driver open routine.
+ * Return: 0 on success and failure value on error
+ */
+static int ctucan_open(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ ctucan_netdev_dbg(ndev, "%s\n", __func__);
+
+ ret = pm_runtime_get_sync(priv->dev);
+ if (ret < 0) {
+ netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
+ __func__, ret);
+ pm_runtime_put_noidle(priv->dev);
+ return ret;
+ }
+
+ ret = ctucan_reset(ndev);
+ if (ret < 0)
+ goto err_reset;
+
+ /* Common open */
+ ret = open_candev(ndev);
+ if (ret) {
+ netdev_warn(ndev, "open_candev failed!\n");
+ goto err_open;
+ }
+
+ ret = request_irq(ndev->irq, ctucan_interrupt, priv->irq_flags, ndev->name, ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "irq allocation for CAN failed\n");
+ goto err_irq;
+ }
+
+ ret = ctucan_chip_start(ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "ctucan_chip_start failed!\n");
+ goto err_chip_start;
+ }
+
+ netdev_info(ndev, "ctu_can_fd device registered\n");
+ can_led_event(ndev, CAN_LED_EVENT_OPEN);
+ napi_enable(&priv->napi);
+ netif_start_queue(ndev);
+
+ return 0;
+
+err_chip_start:
+ free_irq(ndev->irq, ndev);
+err_irq:
+ close_candev(ndev);
+err_open:
+err_reset:
+ pm_runtime_put(priv->dev);
+
+ return ret;
+}
+
+/**
+ * ctucan_close() - Driver close routine
+ * @ndev: Pointer to net_device structure
+ *
+ * Return: 0 always
+ */
+static int ctucan_close(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+
+ ctucan_netdev_dbg(ndev, "%s\n", __func__);
+
+ netif_stop_queue(ndev);
+ napi_disable(&priv->napi);
+ ctucan_chip_stop(ndev);
+ free_irq(ndev->irq, ndev);
+ close_candev(ndev);
+
+ can_led_event(ndev, CAN_LED_EVENT_STOP);
+ pm_runtime_put(priv->dev);
+
+ return 0;
+}
+
+/**
+ * ctucan_get_berr_counter() - error counter routine
+ * @ndev: Pointer to net_device structure
+ * @bec: Pointer to can_berr_counter structure
+ *
+ * This is the driver error counter routine.
+ * Return: 0 on success and failure value on error
+ */
+static int ctucan_get_berr_counter(const struct net_device *ndev, struct can_berr_counter *bec)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ ctucan_netdev_dbg(ndev, "%s\n", __func__);
+
+ ret = pm_runtime_get_sync(priv->dev);
+ if (ret < 0) {
+ netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", __func__, ret);
+ pm_runtime_put_noidle(priv->dev);
+ return ret;
+ }
+
+ ctucan_get_rec_tec(priv, bec);
+ pm_runtime_put(priv->dev);
+
+ return 0;
+}
+
+static const struct net_device_ops ctucan_netdev_ops = {
+ .ndo_open = ctucan_open,
+ .ndo_stop = ctucan_close,
+ .ndo_start_xmit = ctucan_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
+};
+
+int ctucan_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct ctucan_priv *priv = netdev_priv(ndev);
+
+ ctucan_netdev_dbg(ndev, "%s\n", __func__);
+
+ if (netif_running(ndev)) {
+ netif_stop_queue(ndev);
+ netif_device_detach(ndev);
+ }
+
+ priv->can.state = CAN_STATE_SLEEPING;
+
+ return 0;
+}
+EXPORT_SYMBOL(ctucan_suspend);
+
+int ctucan_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct ctucan_priv *priv = netdev_priv(ndev);
+
+ ctucan_netdev_dbg(ndev, "%s\n", __func__);
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ if (netif_running(ndev)) {
+ netif_device_attach(ndev);
+ netif_start_queue(ndev);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ctucan_resume);
+
+int ctucan_probe_common(struct device *dev, void __iomem *addr, int irq, unsigned int ntxbufs,
+ unsigned long can_clk_rate, int pm_enable_call,
+ void (*set_drvdata_fnc)(struct device *dev, struct net_device *ndev))
+{
+ struct ctucan_priv *priv;
+ struct net_device *ndev;
+ int ret;
+
+ /* Create a CAN device instance */
+ ndev = alloc_candev(sizeof(struct ctucan_priv), ntxbufs);
+ if (!ndev)
+ return -ENOMEM;
+
+ priv = netdev_priv(ndev);
+ spin_lock_init(&priv->tx_lock);
+ INIT_LIST_HEAD(&priv->peers_on_pdev);
+ priv->ntxbufs = ntxbufs;
+ priv->dev = dev;
+ priv->can.bittiming_const = &ctu_can_fd_bit_timing_max;
+ priv->can.data_bittiming_const = &ctu_can_fd_bit_timing_data_max;
+ priv->can.do_set_mode = ctucan_do_set_mode;
+
+ /* Needed for timing adjustment to be performed as soon as possible */
+ priv->can.do_set_bittiming = ctucan_set_bittiming;
+ priv->can.do_set_data_bittiming = ctucan_set_data_bittiming;
+
+ priv->can.do_get_berr_counter = ctucan_get_berr_counter;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK
+ | CAN_CTRLMODE_LISTENONLY
+ | CAN_CTRLMODE_FD
+ | CAN_CTRLMODE_PRESUME_ACK
+ | CAN_CTRLMODE_BERR_REPORTING
+ | CAN_CTRLMODE_FD_NON_ISO
+ | CAN_CTRLMODE_ONE_SHOT;
+ priv->mem_base = addr;
+
+ /* Get IRQ for the device */
+ ndev->irq = irq;
+ ndev->flags |= IFF_ECHO; /* We support local echo */
+
+ if (set_drvdata_fnc)
+ set_drvdata_fnc(dev, ndev);
+ SET_NETDEV_DEV(ndev, dev);
+ ndev->netdev_ops = &ctucan_netdev_ops;
+
+ /* Getting the can_clk info */
+ if (!can_clk_rate) {
+ priv->can_clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->can_clk)) {
+ dev_err(dev, "Device clock not found.\n");
+ ret = PTR_ERR(priv->can_clk);
+ goto err_free;
+ }
+ can_clk_rate = clk_get_rate(priv->can_clk);
+ }
+
+ priv->write_reg = ctucan_write32_le;
+ priv->read_reg = ctucan_read32_le;
+
+ if (pm_enable_call)
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
+ __func__, ret);
+ pm_runtime_put_noidle(priv->dev);
+ goto err_pmdisable;
+ }
+
+ /* Check for big-endianity and set according IO-accessors */
+ if ((ctucan_read32(priv, CTUCANFD_DEVICE_ID) & 0xFFFF) != CTUCANFD_ID) {
+ priv->write_reg = ctucan_write32_be;
+ priv->read_reg = ctucan_read32_be;
+ if ((ctucan_read32(priv, CTUCANFD_DEVICE_ID) & 0xFFFF) != CTUCANFD_ID) {
+ netdev_err(ndev, "CTU_CAN_FD signature not found\n");
+ ret = -ENODEV;
+ goto err_deviceoff;
+ }
+ }
+
+ ret = ctucan_reset(ndev);
+ if (ret < 0)
+ goto err_deviceoff;
+
+ priv->can.clock.freq = can_clk_rate;
+
+ netif_napi_add(ndev, &priv->napi, ctucan_rx_poll, NAPI_POLL_WEIGHT);
+
+ ret = register_candev(ndev);
+ if (ret) {
+ dev_err(dev, "fail to register failed (err=%d)\n", ret);
+ goto err_deviceoff;
+ }
+
+ devm_can_led_init(ndev);
+
+ pm_runtime_put(dev);
+
+ netdev_dbg(ndev, "mem_base=0x%p irq=%d clock=%d, no. of txt buffers:%d\n",
+ priv->mem_base, ndev->irq, priv->can.clock.freq, priv->ntxbufs);
+
+ return 0;
+
+err_deviceoff:
+ pm_runtime_put(priv->dev);
+err_pmdisable:
+ if (pm_enable_call)
+ pm_runtime_disable(dev);
+err_free:
+ list_del_init(&priv->peers_on_pdev);
+ free_candev(ndev);
+ return ret;
+}
+EXPORT_SYMBOL(ctucan_probe_common);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Martin Jerabek <martin.jerabek01@gmail.com>");
+MODULE_AUTHOR("Pavel Pisa <pisa@cmp.felk.cvut.cz>");
+MODULE_AUTHOR("Ondrej Ille <ondrej.ille@gmail.com>");
+MODULE_DESCRIPTION("CTU CAN FD interface");
diff --git a/drivers/net/can/ctucanfd/ctucanfd_kframe.h b/drivers/net/can/ctucanfd/ctucanfd_kframe.h
new file mode 100644
index 000000000000..3491299eaac2
--- /dev/null
+++ b/drivers/net/can/ctucanfd/ctucanfd_kframe.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*******************************************************************************
+ *
+ * CTU CAN FD IP Core
+ *
+ * Copyright (C) 2015-2018 Ondrej Ille <ondrej.ille@gmail.com> FEE CTU
+ * Copyright (C) 2018-2021 Ondrej Ille <ondrej.ille@gmail.com> self-funded
+ * Copyright (C) 2018-2019 Martin Jerabek <martin.jerabek01@gmail.com> FEE CTU
+ * Copyright (C) 2018-2021 Pavel Pisa <pisa@cmp.felk.cvut.cz> FEE CTU/self-funded
+ *
+ * Project advisors:
+ * Jiri Novak <jnovak@fel.cvut.cz>
+ * Pavel Pisa <pisa@cmp.felk.cvut.cz>
+ *
+ * Department of Measurement (http://meas.fel.cvut.cz/)
+ * Faculty of Electrical Engineering (http://www.fel.cvut.cz)
+ * Czech Technical University (http://www.cvut.cz/)
+ ******************************************************************************/
+
+/* This file is autogenerated, DO NOT EDIT! */
+
+#ifndef __CTU_CAN_FD_CAN_FD_FRAME_FORMAT__
+#define __CTU_CAN_FD_CAN_FD_FRAME_FORMAT__
+
+#include <linux/bits.h>
+
+/* CAN_Frame_format memory map */
+enum ctu_can_fd_can_frame_format {
+ CTUCANFD_FRAME_FORMAT_W = 0x0,
+ CTUCANFD_IDENTIFIER_W = 0x4,
+ CTUCANFD_TIMESTAMP_L_W = 0x8,
+ CTUCANFD_TIMESTAMP_U_W = 0xc,
+ CTUCANFD_DATA_1_4_W = 0x10,
+ CTUCANFD_DATA_5_8_W = 0x14,
+ CTUCANFD_DATA_61_64_W = 0x4c,
+};
+
+/* CAN_FD_Frame_format memory region */
+
+/* FRAME_FORMAT_W registers */
+#define REG_FRAME_FORMAT_W_DLC GENMASK(3, 0)
+#define REG_FRAME_FORMAT_W_RTR BIT(5)
+#define REG_FRAME_FORMAT_W_IDE BIT(6)
+#define REG_FRAME_FORMAT_W_FDF BIT(7)
+#define REG_FRAME_FORMAT_W_BRS BIT(9)
+#define REG_FRAME_FORMAT_W_ESI_RSV BIT(10)
+#define REG_FRAME_FORMAT_W_RWCNT GENMASK(15, 11)
+
+/* IDENTIFIER_W registers */
+#define REG_IDENTIFIER_W_IDENTIFIER_EXT GENMASK(17, 0)
+#define REG_IDENTIFIER_W_IDENTIFIER_BASE GENMASK(28, 18)
+
+/* TIMESTAMP_L_W registers */
+#define REG_TIMESTAMP_L_W_TIME_STAMP_L_W GENMASK(31, 0)
+
+/* TIMESTAMP_U_W registers */
+#define REG_TIMESTAMP_U_W_TIMESTAMP_U_W GENMASK(31, 0)
+
+/* DATA_1_4_W registers */
+#define REG_DATA_1_4_W_DATA_1 GENMASK(7, 0)
+#define REG_DATA_1_4_W_DATA_2 GENMASK(15, 8)
+#define REG_DATA_1_4_W_DATA_3 GENMASK(23, 16)
+#define REG_DATA_1_4_W_DATA_4 GENMASK(31, 24)
+
+/* DATA_5_8_W registers */
+#define REG_DATA_5_8_W_DATA_5 GENMASK(7, 0)
+#define REG_DATA_5_8_W_DATA_6 GENMASK(15, 8)
+#define REG_DATA_5_8_W_DATA_7 GENMASK(23, 16)
+#define REG_DATA_5_8_W_DATA_8 GENMASK(31, 24)
+
+/* DATA_61_64_W registers */
+#define REG_DATA_61_64_W_DATA_61 GENMASK(7, 0)
+#define REG_DATA_61_64_W_DATA_62 GENMASK(15, 8)
+#define REG_DATA_61_64_W_DATA_63 GENMASK(23, 16)
+#define REG_DATA_61_64_W_DATA_64 GENMASK(31, 24)
+
+#endif
diff --git a/drivers/net/can/ctucanfd/ctucanfd_kregs.h b/drivers/net/can/ctucanfd/ctucanfd_kregs.h
new file mode 100644
index 000000000000..edc1c1a24348
--- /dev/null
+++ b/drivers/net/can/ctucanfd/ctucanfd_kregs.h
@@ -0,0 +1,325 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*******************************************************************************
+ *
+ * CTU CAN FD IP Core
+ *
+ * Copyright (C) 2015-2018 Ondrej Ille <ondrej.ille@gmail.com> FEE CTU
+ * Copyright (C) 2018-2021 Ondrej Ille <ondrej.ille@gmail.com> self-funded
+ * Copyright (C) 2018-2019 Martin Jerabek <martin.jerabek01@gmail.com> FEE CTU
+ * Copyright (C) 2018-2021 Pavel Pisa <pisa@cmp.felk.cvut.cz> FEE CTU/self-funded
+ *
+ * Project advisors:
+ * Jiri Novak <jnovak@fel.cvut.cz>
+ * Pavel Pisa <pisa@cmp.felk.cvut.cz>
+ *
+ * Department of Measurement (http://meas.fel.cvut.cz/)
+ * Faculty of Electrical Engineering (http://www.fel.cvut.cz)
+ * Czech Technical University (http://www.cvut.cz/)
+ ******************************************************************************/
+
+/* This file is autogenerated, DO NOT EDIT! */
+
+#ifndef __CTU_CAN_FD_CAN_FD_REGISTER_MAP__
+#define __CTU_CAN_FD_CAN_FD_REGISTER_MAP__
+
+#include <linux/bits.h>
+
+/* CAN_Registers memory map */
+enum ctu_can_fd_can_registers {
+ CTUCANFD_DEVICE_ID = 0x0,
+ CTUCANFD_VERSION = 0x2,
+ CTUCANFD_MODE = 0x4,
+ CTUCANFD_SETTINGS = 0x6,
+ CTUCANFD_STATUS = 0x8,
+ CTUCANFD_COMMAND = 0xc,
+ CTUCANFD_INT_STAT = 0x10,
+ CTUCANFD_INT_ENA_SET = 0x14,
+ CTUCANFD_INT_ENA_CLR = 0x18,
+ CTUCANFD_INT_MASK_SET = 0x1c,
+ CTUCANFD_INT_MASK_CLR = 0x20,
+ CTUCANFD_BTR = 0x24,
+ CTUCANFD_BTR_FD = 0x28,
+ CTUCANFD_EWL = 0x2c,
+ CTUCANFD_ERP = 0x2d,
+ CTUCANFD_FAULT_STATE = 0x2e,
+ CTUCANFD_REC = 0x30,
+ CTUCANFD_TEC = 0x32,
+ CTUCANFD_ERR_NORM = 0x34,
+ CTUCANFD_ERR_FD = 0x36,
+ CTUCANFD_CTR_PRES = 0x38,
+ CTUCANFD_FILTER_A_MASK = 0x3c,
+ CTUCANFD_FILTER_A_VAL = 0x40,
+ CTUCANFD_FILTER_B_MASK = 0x44,
+ CTUCANFD_FILTER_B_VAL = 0x48,
+ CTUCANFD_FILTER_C_MASK = 0x4c,
+ CTUCANFD_FILTER_C_VAL = 0x50,
+ CTUCANFD_FILTER_RAN_LOW = 0x54,
+ CTUCANFD_FILTER_RAN_HIGH = 0x58,
+ CTUCANFD_FILTER_CONTROL = 0x5c,
+ CTUCANFD_FILTER_STATUS = 0x5e,
+ CTUCANFD_RX_MEM_INFO = 0x60,
+ CTUCANFD_RX_POINTERS = 0x64,
+ CTUCANFD_RX_STATUS = 0x68,
+ CTUCANFD_RX_SETTINGS = 0x6a,
+ CTUCANFD_RX_DATA = 0x6c,
+ CTUCANFD_TX_STATUS = 0x70,
+ CTUCANFD_TX_COMMAND = 0x74,
+ CTUCANFD_TX_PRIORITY = 0x78,
+ CTUCANFD_ERR_CAPT = 0x7c,
+ CTUCANFD_ALC = 0x7e,
+ CTUCANFD_TRV_DELAY = 0x80,
+ CTUCANFD_SSP_CFG = 0x82,
+ CTUCANFD_RX_FR_CTR = 0x84,
+ CTUCANFD_TX_FR_CTR = 0x88,
+ CTUCANFD_DEBUG_REGISTER = 0x8c,
+ CTUCANFD_YOLO_REG = 0x90,
+ CTUCANFD_TIMESTAMP_LOW = 0x94,
+ CTUCANFD_TIMESTAMP_HIGH = 0x98,
+ CTUCANFD_TXTB1_DATA_1 = 0x100,
+ CTUCANFD_TXTB1_DATA_2 = 0x104,
+ CTUCANFD_TXTB1_DATA_20 = 0x14c,
+ CTUCANFD_TXTB2_DATA_1 = 0x200,
+ CTUCANFD_TXTB2_DATA_2 = 0x204,
+ CTUCANFD_TXTB2_DATA_20 = 0x24c,
+ CTUCANFD_TXTB3_DATA_1 = 0x300,
+ CTUCANFD_TXTB3_DATA_2 = 0x304,
+ CTUCANFD_TXTB3_DATA_20 = 0x34c,
+ CTUCANFD_TXTB4_DATA_1 = 0x400,
+ CTUCANFD_TXTB4_DATA_2 = 0x404,
+ CTUCANFD_TXTB4_DATA_20 = 0x44c,
+};
+
+/* Control_registers memory region */
+
+/* DEVICE_ID VERSION registers */
+#define REG_DEVICE_ID_DEVICE_ID GENMASK(15, 0)
+#define REG_DEVICE_ID_VER_MINOR GENMASK(23, 16)
+#define REG_DEVICE_ID_VER_MAJOR GENMASK(31, 24)
+
+/* MODE SETTINGS registers */
+#define REG_MODE_RST BIT(0)
+#define REG_MODE_BMM BIT(1)
+#define REG_MODE_STM BIT(2)
+#define REG_MODE_AFM BIT(3)
+#define REG_MODE_FDE BIT(4)
+#define REG_MODE_ACF BIT(7)
+#define REG_MODE_TSTM BIT(8)
+#define REG_MODE_RTRLE BIT(16)
+#define REG_MODE_RTRTH GENMASK(20, 17)
+#define REG_MODE_ILBP BIT(21)
+#define REG_MODE_ENA BIT(22)
+#define REG_MODE_NISOFD BIT(23)
+#define REG_MODE_PEX BIT(24)
+#define REG_MODE_TBFBO BIT(25)
+#define REG_MODE_FDRF BIT(26)
+
+/* STATUS registers */
+#define REG_STATUS_RXNE BIT(0)
+#define REG_STATUS_DOR BIT(1)
+#define REG_STATUS_TXNF BIT(2)
+#define REG_STATUS_EFT BIT(3)
+#define REG_STATUS_RXS BIT(4)
+#define REG_STATUS_TXS BIT(5)
+#define REG_STATUS_EWL BIT(6)
+#define REG_STATUS_IDLE BIT(7)
+#define REG_STATUS_PEXS BIT(8)
+
+/* COMMAND registers */
+#define REG_COMMAND_RRB BIT(2)
+#define REG_COMMAND_CDO BIT(3)
+#define REG_COMMAND_ERCRST BIT(4)
+#define REG_COMMAND_RXFCRST BIT(5)
+#define REG_COMMAND_TXFCRST BIT(6)
+#define REG_COMMAND_CPEXS BIT(7)
+
+/* INT_STAT registers */
+#define REG_INT_STAT_RXI BIT(0)
+#define REG_INT_STAT_TXI BIT(1)
+#define REG_INT_STAT_EWLI BIT(2)
+#define REG_INT_STAT_DOI BIT(3)
+#define REG_INT_STAT_FCSI BIT(4)
+#define REG_INT_STAT_ALI BIT(5)
+#define REG_INT_STAT_BEI BIT(6)
+#define REG_INT_STAT_OFI BIT(7)
+#define REG_INT_STAT_RXFI BIT(8)
+#define REG_INT_STAT_BSI BIT(9)
+#define REG_INT_STAT_RBNEI BIT(10)
+#define REG_INT_STAT_TXBHCI BIT(11)
+
+/* INT_ENA_SET registers */
+#define REG_INT_ENA_SET_INT_ENA_SET GENMASK(11, 0)
+
+/* INT_ENA_CLR registers */
+#define REG_INT_ENA_CLR_INT_ENA_CLR GENMASK(11, 0)
+
+/* INT_MASK_SET registers */
+#define REG_INT_MASK_SET_INT_MASK_SET GENMASK(11, 0)
+
+/* INT_MASK_CLR registers */
+#define REG_INT_MASK_CLR_INT_MASK_CLR GENMASK(11, 0)
+
+/* BTR registers */
+#define REG_BTR_PROP GENMASK(6, 0)
+#define REG_BTR_PH1 GENMASK(12, 7)
+#define REG_BTR_PH2 GENMASK(18, 13)
+#define REG_BTR_BRP GENMASK(26, 19)
+#define REG_BTR_SJW GENMASK(31, 27)
+
+/* BTR_FD registers */
+#define REG_BTR_FD_PROP_FD GENMASK(5, 0)
+#define REG_BTR_FD_PH1_FD GENMASK(11, 7)
+#define REG_BTR_FD_PH2_FD GENMASK(17, 13)
+#define REG_BTR_FD_BRP_FD GENMASK(26, 19)
+#define REG_BTR_FD_SJW_FD GENMASK(31, 27)
+
+/* EWL ERP FAULT_STATE registers */
+#define REG_EWL_EW_LIMIT GENMASK(7, 0)
+#define REG_EWL_ERP_LIMIT GENMASK(15, 8)
+#define REG_EWL_ERA BIT(16)
+#define REG_EWL_ERP BIT(17)
+#define REG_EWL_BOF BIT(18)
+
+/* REC TEC registers */
+#define REG_REC_REC_VAL GENMASK(8, 0)
+#define REG_REC_TEC_VAL GENMASK(24, 16)
+
+/* ERR_NORM ERR_FD registers */
+#define REG_ERR_NORM_ERR_NORM_VAL GENMASK(15, 0)
+#define REG_ERR_NORM_ERR_FD_VAL GENMASK(31, 16)
+
+/* CTR_PRES registers */
+#define REG_CTR_PRES_CTPV GENMASK(8, 0)
+#define REG_CTR_PRES_PTX BIT(9)
+#define REG_CTR_PRES_PRX BIT(10)
+#define REG_CTR_PRES_ENORM BIT(11)
+#define REG_CTR_PRES_EFD BIT(12)
+
+/* FILTER_A_MASK registers */
+#define REG_FILTER_A_MASK_BIT_MASK_A_VAL GENMASK(28, 0)
+
+/* FILTER_A_VAL registers */
+#define REG_FILTER_A_VAL_BIT_VAL_A_VAL GENMASK(28, 0)
+
+/* FILTER_B_MASK registers */
+#define REG_FILTER_B_MASK_BIT_MASK_B_VAL GENMASK(28, 0)
+
+/* FILTER_B_VAL registers */
+#define REG_FILTER_B_VAL_BIT_VAL_B_VAL GENMASK(28, 0)
+
+/* FILTER_C_MASK registers */
+#define REG_FILTER_C_MASK_BIT_MASK_C_VAL GENMASK(28, 0)
+
+/* FILTER_C_VAL registers */
+#define REG_FILTER_C_VAL_BIT_VAL_C_VAL GENMASK(28, 0)
+
+/* FILTER_RAN_LOW registers */
+#define REG_FILTER_RAN_LOW_BIT_RAN_LOW_VAL GENMASK(28, 0)
+
+/* FILTER_RAN_HIGH registers */
+#define REG_FILTER_RAN_HIGH_BIT_RAN_HIGH_VAL GENMASK(28, 0)
+
+/* FILTER_CONTROL FILTER_STATUS registers */
+#define REG_FILTER_CONTROL_FANB BIT(0)
+#define REG_FILTER_CONTROL_FANE BIT(1)
+#define REG_FILTER_CONTROL_FAFB BIT(2)
+#define REG_FILTER_CONTROL_FAFE BIT(3)
+#define REG_FILTER_CONTROL_FBNB BIT(4)
+#define REG_FILTER_CONTROL_FBNE BIT(5)
+#define REG_FILTER_CONTROL_FBFB BIT(6)
+#define REG_FILTER_CONTROL_FBFE BIT(7)
+#define REG_FILTER_CONTROL_FCNB BIT(8)
+#define REG_FILTER_CONTROL_FCNE BIT(9)
+#define REG_FILTER_CONTROL_FCFB BIT(10)
+#define REG_FILTER_CONTROL_FCFE BIT(11)
+#define REG_FILTER_CONTROL_FRNB BIT(12)
+#define REG_FILTER_CONTROL_FRNE BIT(13)
+#define REG_FILTER_CONTROL_FRFB BIT(14)
+#define REG_FILTER_CONTROL_FRFE BIT(15)
+#define REG_FILTER_CONTROL_SFA BIT(16)
+#define REG_FILTER_CONTROL_SFB BIT(17)
+#define REG_FILTER_CONTROL_SFC BIT(18)
+#define REG_FILTER_CONTROL_SFR BIT(19)
+
+/* RX_MEM_INFO registers */
+#define REG_RX_MEM_INFO_RX_BUFF_SIZE GENMASK(12, 0)
+#define REG_RX_MEM_INFO_RX_MEM_FREE GENMASK(28, 16)
+
+/* RX_POINTERS registers */
+#define REG_RX_POINTERS_RX_WPP GENMASK(11, 0)
+#define REG_RX_POINTERS_RX_RPP GENMASK(27, 16)
+
+/* RX_STATUS RX_SETTINGS registers */
+#define REG_RX_STATUS_RXE BIT(0)
+#define REG_RX_STATUS_RXF BIT(1)
+#define REG_RX_STATUS_RXMOF BIT(2)
+#define REG_RX_STATUS_RXFRC GENMASK(14, 4)
+#define REG_RX_STATUS_RTSOP BIT(16)
+
+/* RX_DATA registers */
+#define REG_RX_DATA_RX_DATA GENMASK(31, 0)
+
+/* TX_STATUS registers */
+#define REG_TX_STATUS_TX1S GENMASK(3, 0)
+#define REG_TX_STATUS_TX2S GENMASK(7, 4)
+#define REG_TX_STATUS_TX3S GENMASK(11, 8)
+#define REG_TX_STATUS_TX4S GENMASK(15, 12)
+
+/* TX_COMMAND registers */
+#define REG_TX_COMMAND_TXCE BIT(0)
+#define REG_TX_COMMAND_TXCR BIT(1)
+#define REG_TX_COMMAND_TXCA BIT(2)
+#define REG_TX_COMMAND_TXB1 BIT(8)
+#define REG_TX_COMMAND_TXB2 BIT(9)
+#define REG_TX_COMMAND_TXB3 BIT(10)
+#define REG_TX_COMMAND_TXB4 BIT(11)
+
+/* TX_PRIORITY registers */
+#define REG_TX_PRIORITY_TXT1P GENMASK(2, 0)
+#define REG_TX_PRIORITY_TXT2P GENMASK(6, 4)
+#define REG_TX_PRIORITY_TXT3P GENMASK(10, 8)
+#define REG_TX_PRIORITY_TXT4P GENMASK(14, 12)
+
+/* ERR_CAPT ALC registers */
+#define REG_ERR_CAPT_ERR_POS GENMASK(4, 0)
+#define REG_ERR_CAPT_ERR_TYPE GENMASK(7, 5)
+#define REG_ERR_CAPT_ALC_BIT GENMASK(20, 16)
+#define REG_ERR_CAPT_ALC_ID_FIELD GENMASK(23, 21)
+
+/* TRV_DELAY SSP_CFG registers */
+#define REG_TRV_DELAY_TRV_DELAY_VALUE GENMASK(6, 0)
+#define REG_TRV_DELAY_SSP_OFFSET GENMASK(23, 16)
+#define REG_TRV_DELAY_SSP_SRC GENMASK(25, 24)
+
+/* RX_FR_CTR registers */
+#define REG_RX_FR_CTR_RX_FR_CTR_VAL GENMASK(31, 0)
+
+/* TX_FR_CTR registers */
+#define REG_TX_FR_CTR_TX_FR_CTR_VAL GENMASK(31, 0)
+
+/* DEBUG_REGISTER registers */
+#define REG_DEBUG_REGISTER_STUFF_COUNT GENMASK(2, 0)
+#define REG_DEBUG_REGISTER_DESTUFF_COUNT GENMASK(5, 3)
+#define REG_DEBUG_REGISTER_PC_ARB BIT(6)
+#define REG_DEBUG_REGISTER_PC_CON BIT(7)
+#define REG_DEBUG_REGISTER_PC_DAT BIT(8)
+#define REG_DEBUG_REGISTER_PC_STC BIT(9)
+#define REG_DEBUG_REGISTER_PC_CRC BIT(10)
+#define REG_DEBUG_REGISTER_PC_CRCD BIT(11)
+#define REG_DEBUG_REGISTER_PC_ACK BIT(12)
+#define REG_DEBUG_REGISTER_PC_ACKD BIT(13)
+#define REG_DEBUG_REGISTER_PC_EOF BIT(14)
+#define REG_DEBUG_REGISTER_PC_INT BIT(15)
+#define REG_DEBUG_REGISTER_PC_SUSP BIT(16)
+#define REG_DEBUG_REGISTER_PC_OVR BIT(17)
+#define REG_DEBUG_REGISTER_PC_SOF BIT(18)
+
+/* YOLO_REG registers */
+#define REG_YOLO_REG_YOLO_VAL GENMASK(31, 0)
+
+/* TIMESTAMP_LOW registers */
+#define REG_TIMESTAMP_LOW_TIMESTAMP_LOW GENMASK(31, 0)
+
+/* TIMESTAMP_HIGH registers */
+#define REG_TIMESTAMP_HIGH_TIMESTAMP_HIGH GENMASK(31, 0)
+
+#endif
diff --git a/drivers/net/can/ctucanfd/ctucanfd_pci.c b/drivers/net/can/ctucanfd/ctucanfd_pci.c
new file mode 100644
index 000000000000..c37a42480533
--- /dev/null
+++ b/drivers/net/can/ctucanfd/ctucanfd_pci.c
@@ -0,0 +1,304 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*******************************************************************************
+ *
+ * CTU CAN FD IP Core
+ *
+ * Copyright (C) 2015-2018 Ondrej Ille <ondrej.ille@gmail.com> FEE CTU
+ * Copyright (C) 2018-2021 Ondrej Ille <ondrej.ille@gmail.com> self-funded
+ * Copyright (C) 2018-2019 Martin Jerabek <martin.jerabek01@gmail.com> FEE CTU
+ * Copyright (C) 2018-2022 Pavel Pisa <pisa@cmp.felk.cvut.cz> FEE CTU/self-funded
+ *
+ * Project advisors:
+ * Jiri Novak <jnovak@fel.cvut.cz>
+ * Pavel Pisa <pisa@cmp.felk.cvut.cz>
+ *
+ * Department of Measurement (http://meas.fel.cvut.cz/)
+ * Faculty of Electrical Engineering (http://www.fel.cvut.cz)
+ * Czech Technical University (http://www.cvut.cz/)
+ ******************************************************************************/
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "ctucanfd.h"
+
+#ifndef PCI_DEVICE_DATA
+#define PCI_DEVICE_DATA(vend, dev, data) \
+.vendor = PCI_VENDOR_ID_##vend, \
+.device = PCI_DEVICE_ID_##vend##_##dev, \
+.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0, \
+.driver_data = (kernel_ulong_t)(data)
+#endif
+
+#ifndef PCI_VENDOR_ID_TEDIA
+#define PCI_VENDOR_ID_TEDIA 0x1760
+#endif
+
+#ifndef PCI_DEVICE_ID_TEDIA_CTUCAN_VER21
+#define PCI_DEVICE_ID_TEDIA_CTUCAN_VER21 0xff00
+#endif
+
+#define CTUCAN_BAR0_CTUCAN_ID 0x0000
+#define CTUCAN_BAR0_CRA_BASE 0x4000
+#define CYCLONE_IV_CRA_A2P_IE (0x0050)
+
+#define CTUCAN_WITHOUT_CTUCAN_ID 0
+#define CTUCAN_WITH_CTUCAN_ID 1
+
+static bool use_msi = true;
+module_param(use_msi, bool, 0444);
+MODULE_PARM_DESC(use_msi, "PCIe implementation use MSI interrupts. Default: 1 (yes)");
+
+static bool pci_use_second = true;
+module_param(pci_use_second, bool, 0444);
+MODULE_PARM_DESC(pci_use_second, "Use the second CAN core on PCIe card. Default: 1 (yes)");
+
+struct ctucan_pci_board_data {
+ void __iomem *bar0_base;
+ void __iomem *cra_base;
+ void __iomem *bar1_base;
+ struct list_head ndev_list_head;
+ int use_msi;
+};
+
+static struct ctucan_pci_board_data *ctucan_pci_get_bdata(struct pci_dev *pdev)
+{
+ return (struct ctucan_pci_board_data *)pci_get_drvdata(pdev);
+}
+
+static void ctucan_pci_set_drvdata(struct device *dev,
+ struct net_device *ndev)
+{
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ struct ctucan_pci_board_data *bdata = ctucan_pci_get_bdata(pdev);
+
+ list_add(&priv->peers_on_pdev, &bdata->ndev_list_head);
+ priv->irq_flags = IRQF_SHARED;
+}
+
+/**
+ * ctucan_pci_probe - PCI registration call
+ * @pdev: Handle to the pci device structure
+ * @ent: Pointer to the entry from ctucan_pci_tbl
+ *
+ * This function does all the memory allocation and registration for the CAN
+ * device.
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int ctucan_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ unsigned long driver_data = ent->driver_data;
+ struct ctucan_pci_board_data *bdata;
+ void __iomem *addr;
+ void __iomem *cra_addr;
+ void __iomem *bar0_base;
+ u32 cra_a2p_ie;
+ u32 ctucan_id = 0;
+ int ret;
+ unsigned int ntxbufs;
+ unsigned int num_cores = 1;
+ unsigned int core_i = 0;
+ int irq;
+ int msi_ok = 0;
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(dev, "pci_enable_device FAILED\n");
+ goto err;
+ }
+
+ ret = pci_request_regions(pdev, KBUILD_MODNAME);
+ if (ret) {
+ dev_err(dev, "pci_request_regions FAILED\n");
+ goto err_disable_device;
+ }
+
+ if (use_msi) {
+ ret = pci_enable_msi(pdev);
+ if (!ret) {
+ dev_info(dev, "MSI enabled\n");
+ pci_set_master(pdev);
+ msi_ok = 1;
+ }
+ }
+
+ dev_info(dev, "ctucan BAR0 0x%08llx 0x%08llx\n",
+ (long long)pci_resource_start(pdev, 0),
+ (long long)pci_resource_len(pdev, 0));
+
+ dev_info(dev, "ctucan BAR1 0x%08llx 0x%08llx\n",
+ (long long)pci_resource_start(pdev, 1),
+ (long long)pci_resource_len(pdev, 1));
+
+ addr = pci_iomap(pdev, 1, pci_resource_len(pdev, 1));
+ if (!addr) {
+ dev_err(dev, "PCI BAR 1 cannot be mapped\n");
+ ret = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ /* Cyclone IV PCI Express Control Registers Area */
+ bar0_base = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
+ if (!bar0_base) {
+ dev_err(dev, "PCI BAR 0 cannot be mapped\n");
+ ret = -EIO;
+ goto err_pci_iounmap_bar1;
+ }
+
+ if (driver_data == CTUCAN_WITHOUT_CTUCAN_ID) {
+ cra_addr = bar0_base;
+ num_cores = 2;
+ } else {
+ cra_addr = bar0_base + CTUCAN_BAR0_CRA_BASE;
+ ctucan_id = ioread32(bar0_base + CTUCAN_BAR0_CTUCAN_ID);
+ dev_info(dev, "ctucan_id 0x%08lx\n", (unsigned long)ctucan_id);
+ num_cores = ctucan_id & 0xf;
+ }
+
+ irq = pdev->irq;
+
+ ntxbufs = 4;
+
+ bdata = kzalloc(sizeof(*bdata), GFP_KERNEL);
+ if (!bdata) {
+ ret = -ENOMEM;
+ goto err_pci_iounmap_bar0;
+ }
+
+ INIT_LIST_HEAD(&bdata->ndev_list_head);
+ bdata->bar0_base = bar0_base;
+ bdata->cra_base = cra_addr;
+ bdata->bar1_base = addr;
+ bdata->use_msi = msi_ok;
+
+ pci_set_drvdata(pdev, bdata);
+
+ ret = ctucan_probe_common(dev, addr, irq, ntxbufs, 100000000,
+ 0, ctucan_pci_set_drvdata);
+ if (ret < 0)
+ goto err_free_board;
+
+ core_i++;
+
+ while (pci_use_second && (core_i < num_cores)) {
+ addr += 0x4000;
+ ret = ctucan_probe_common(dev, addr, irq, ntxbufs, 100000000,
+ 0, ctucan_pci_set_drvdata);
+ if (ret < 0) {
+ dev_info(dev, "CTU CAN FD core %d initialization failed\n",
+ core_i);
+ break;
+ }
+ core_i++;
+ }
+
+ /* enable interrupt in
+ * Avalon-MM to PCI Express Interrupt Enable Register
+ */
+ cra_a2p_ie = ioread32(cra_addr + CYCLONE_IV_CRA_A2P_IE);
+ dev_info(dev, "cra_a2p_ie 0x%08x\n", cra_a2p_ie);
+ cra_a2p_ie |= 1;
+ iowrite32(cra_a2p_ie, cra_addr + CYCLONE_IV_CRA_A2P_IE);
+ cra_a2p_ie = ioread32(cra_addr + CYCLONE_IV_CRA_A2P_IE);
+ dev_info(dev, "cra_a2p_ie 0x%08x\n", cra_a2p_ie);
+
+ return 0;
+
+err_free_board:
+ pci_set_drvdata(pdev, NULL);
+ kfree(bdata);
+err_pci_iounmap_bar0:
+ pci_iounmap(pdev, cra_addr);
+err_pci_iounmap_bar1:
+ pci_iounmap(pdev, addr);
+err_release_regions:
+ if (msi_ok) {
+ pci_disable_msi(pdev);
+ pci_clear_master(pdev);
+ }
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+err:
+ return ret;
+}
+
+/**
+ * ctucan_pci_remove - Unregister the device after releasing the resources
+ * @pdev: Handle to the pci device structure
+ *
+ * This function frees all the resources allocated to the device.
+ * Return: 0 always
+ */
+static void ctucan_pci_remove(struct pci_dev *pdev)
+{
+ struct net_device *ndev;
+ struct ctucan_priv *priv = NULL;
+ struct ctucan_pci_board_data *bdata = ctucan_pci_get_bdata(pdev);
+
+ dev_dbg(&pdev->dev, "ctucan_remove");
+
+ if (!bdata) {
+ dev_err(&pdev->dev, "%s: no list of devices\n", __func__);
+ return;
+ }
+
+ /* disable interrupt in
+ * Avalon-MM to PCI Express Interrupt Enable Register
+ */
+ if (bdata->cra_base)
+ iowrite32(0, bdata->cra_base + CYCLONE_IV_CRA_A2P_IE);
+
+ while ((priv = list_first_entry_or_null(&bdata->ndev_list_head, struct ctucan_priv,
+ peers_on_pdev)) != NULL) {
+ ndev = priv->can.dev;
+
+ unregister_candev(ndev);
+
+ netif_napi_del(&priv->napi);
+
+ list_del_init(&priv->peers_on_pdev);
+ free_candev(ndev);
+ }
+
+ pci_iounmap(pdev, bdata->bar1_base);
+
+ if (bdata->use_msi) {
+ pci_disable_msi(pdev);
+ pci_clear_master(pdev);
+ }
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+
+ pci_iounmap(pdev, bdata->bar0_base);
+
+ pci_set_drvdata(pdev, NULL);
+ kfree(bdata);
+}
+
+static SIMPLE_DEV_PM_OPS(ctucan_pci_pm_ops, ctucan_suspend, ctucan_resume);
+
+static const struct pci_device_id ctucan_pci_tbl[] = {
+ {PCI_DEVICE_DATA(TEDIA, CTUCAN_VER21,
+ CTUCAN_WITH_CTUCAN_ID)},
+ {},
+};
+
+static struct pci_driver ctucan_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = ctucan_pci_tbl,
+ .probe = ctucan_pci_probe,
+ .remove = ctucan_pci_remove,
+ .driver.pm = &ctucan_pci_pm_ops,
+};
+
+module_pci_driver(ctucan_pci_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pavel Pisa <pisa@cmp.felk.cvut.cz>");
+MODULE_DESCRIPTION("CTU CAN FD for PCI bus");
diff --git a/drivers/net/can/ctucanfd/ctucanfd_platform.c b/drivers/net/can/ctucanfd/ctucanfd_platform.c
new file mode 100644
index 000000000000..5e4806068662
--- /dev/null
+++ b/drivers/net/can/ctucanfd/ctucanfd_platform.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*******************************************************************************
+ *
+ * CTU CAN FD IP Core
+ *
+ * Copyright (C) 2015-2018 Ondrej Ille <ondrej.ille@gmail.com> FEE CTU
+ * Copyright (C) 2018-2021 Ondrej Ille <ondrej.ille@gmail.com> self-funded
+ * Copyright (C) 2018-2019 Martin Jerabek <martin.jerabek01@gmail.com> FEE CTU
+ * Copyright (C) 2018-2022 Pavel Pisa <pisa@cmp.felk.cvut.cz> FEE CTU/self-funded
+ *
+ * Project advisors:
+ * Jiri Novak <jnovak@fel.cvut.cz>
+ * Pavel Pisa <pisa@cmp.felk.cvut.cz>
+ *
+ * Department of Measurement (http://meas.fel.cvut.cz/)
+ * Faculty of Electrical Engineering (http://www.fel.cvut.cz)
+ * Czech Technical University (http://www.cvut.cz/)
+ ******************************************************************************/
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "ctucanfd.h"
+
+#define DRV_NAME "ctucanfd"
+
+static void ctucan_platform_set_drvdata(struct device *dev,
+ struct net_device *ndev)
+{
+ struct platform_device *pdev = container_of(dev, struct platform_device,
+ dev);
+
+ platform_set_drvdata(pdev, ndev);
+}
+
+/**
+ * ctucan_platform_probe - Platform registration call
+ * @pdev: Handle to the platform device structure
+ *
+ * This function does all the memory allocation and registration for the CAN
+ * device.
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int ctucan_platform_probe(struct platform_device *pdev)
+{
+ struct resource *res; /* IO mem resources */
+ struct device *dev = &pdev->dev;
+ void __iomem *addr;
+ int ret;
+ unsigned int ntxbufs;
+ int irq;
+
+ /* Get the virtual base address for the device */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(addr)) {
+ dev_err(dev, "Cannot remap address.\n");
+ ret = PTR_ERR(addr);
+ goto err;
+ }
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "Cannot find interrupt.\n");
+ ret = irq;
+ goto err;
+ }
+
+ /* Number of tx bufs might be change in HW for future. If so,
+ * it will be passed as property via device tree
+ */
+ ntxbufs = 4;
+ ret = ctucan_probe_common(dev, addr, irq, ntxbufs, 0,
+ 1, ctucan_platform_set_drvdata);
+
+ if (ret < 0)
+ platform_set_drvdata(pdev, NULL);
+
+err:
+ return ret;
+}
+
+/**
+ * ctucan_platform_remove - Unregister the device after releasing the resources
+ * @pdev: Handle to the platform device structure
+ *
+ * This function frees all the resources allocated to the device.
+ * Return: 0 always
+ */
+static int ctucan_platform_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct ctucan_priv *priv = netdev_priv(ndev);
+
+ netdev_dbg(ndev, "ctucan_remove");
+
+ unregister_candev(ndev);
+ pm_runtime_disable(&pdev->dev);
+ netif_napi_del(&priv->napi);
+ free_candev(ndev);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(ctucan_platform_pm_ops, ctucan_suspend, ctucan_resume);
+
+/* Match table for OF platform binding */
+static const struct of_device_id ctucan_of_match[] = {
+ { .compatible = "ctu,ctucanfd-2", },
+ { .compatible = "ctu,ctucanfd", },
+ { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(of, ctucan_of_match);
+
+static struct platform_driver ctucanfd_driver = {
+ .probe = ctucan_platform_probe,
+ .remove = ctucan_platform_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .pm = &ctucan_platform_pm_ops,
+ .of_match_table = ctucan_of_match,
+ },
+};
+
+module_platform_driver(ctucanfd_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Martin Jerabek");
+MODULE_DESCRIPTION("CTU CAN FD for platform");
diff --git a/drivers/net/can/dev/bittiming.c b/drivers/net/can/dev/bittiming.c
index 2103bcca9012..c1e76f0a5064 100644
--- a/drivers/net/can/dev/bittiming.c
+++ b/drivers/net/can/dev/bittiming.c
@@ -116,7 +116,7 @@ int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
can_update_sample_point(btc, sample_point_nominal, tseg / 2,
&tseg1, &tseg2, &sample_point_error);
- if (sample_point_error > best_sample_point_error)
+ if (sample_point_error >= best_sample_point_error)
continue;
best_sample_point_error = sample_point_error;
diff --git a/drivers/net/can/dev/rx-offload.c b/drivers/net/can/dev/rx-offload.c
index 7f80d8e1e750..6d0dc18c03e7 100644
--- a/drivers/net/can/dev/rx-offload.c
+++ b/drivers/net/can/dev/rx-offload.c
@@ -221,7 +221,7 @@ int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
}
EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo);
-int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
+int can_rx_offload_queue_timestamp(struct can_rx_offload *offload,
struct sk_buff *skb, u32 timestamp)
{
struct can_rx_offload_cb *cb;
@@ -240,7 +240,7 @@ int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
return 0;
}
-EXPORT_SYMBOL_GPL(can_rx_offload_queue_sorted);
+EXPORT_SYMBOL_GPL(can_rx_offload_queue_timestamp);
unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
unsigned int idx, u32 timestamp,
@@ -256,7 +256,7 @@ unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
if (!skb)
return 0;
- err = can_rx_offload_queue_sorted(offload, skb, timestamp);
+ err = can_rx_offload_queue_timestamp(offload, skb, timestamp);
if (err) {
stats->rx_errors++;
stats->tx_fifo_errors++;
diff --git a/drivers/net/can/flexcan/flexcan-core.c b/drivers/net/can/flexcan/flexcan-core.c
index 74d7fcbfd065..fe9bda0f5ec4 100644
--- a/drivers/net/can/flexcan/flexcan-core.c
+++ b/drivers/net/can/flexcan/flexcan-core.c
@@ -723,11 +723,9 @@ static int flexcan_get_berr_counter(const struct net_device *dev,
const struct flexcan_priv *priv = netdev_priv(dev);
int err;
- err = pm_runtime_get_sync(priv->dev);
- if (err < 0) {
- pm_runtime_put_noidle(priv->dev);
+ err = pm_runtime_resume_and_get(priv->dev);
+ if (err < 0)
return err;
- }
err = __flexcan_get_berr_counter(dev, bec);
@@ -845,7 +843,7 @@ static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr)
if (tx_errors)
dev->stats.tx_errors++;
- err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
if (err)
dev->stats.rx_fifo_errors++;
}
@@ -892,7 +890,7 @@ static void flexcan_irq_state(struct net_device *dev, u32 reg_esr)
if (unlikely(new_state == CAN_STATE_BUS_OFF))
can_bus_off(dev);
- err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
if (err)
dev->stats.rx_fifo_errors++;
}
@@ -1700,11 +1698,9 @@ static int flexcan_open(struct net_device *dev)
return -EINVAL;
}
- err = pm_runtime_get_sync(priv->dev);
- if (err < 0) {
- pm_runtime_put_noidle(priv->dev);
+ err = pm_runtime_resume_and_get(priv->dev);
+ if (err < 0)
return err;
- }
err = open_candev(dev);
if (err)
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index b3b5bc1c803b..2779bba390f2 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -464,7 +464,7 @@ static void m_can_receive_skb(struct m_can_classdev *cdev,
struct net_device_stats *stats = &cdev->net->stats;
int err;
- err = can_rx_offload_queue_sorted(&cdev->offload, skb,
+ err = can_rx_offload_queue_timestamp(&cdev->offload, skb,
timestamp);
if (err)
stats->rx_fifo_errors++;
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index de4ddf79ba9b..65ba6697bd7d 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -14,6 +14,8 @@
#include <linux/platform_device.h>
#include <linux/netdevice.h>
#include <linux/can/dev.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <sysdev/fsl_soc.h>
#include <linux/clk.h>
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index 110071b26921..4b2f9cb17fc3 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -107,7 +107,7 @@ config CAN_TSCAN1
depends on ISA
help
This driver is for Technologic Systems' TSCAN-1 PC104 boards.
- http://www.embeddedarm.com/products/board-detail.php?product=TS-CAN1
+ https://www.embeddedts.com/products/TS-CAN1
The driver supports multiple boards and automatically configures them:
PLD IO base addresses are read from jumpers JP1 and JP2,
IRQ numbers are read from jumpers JP4 and JP5,
diff --git a/drivers/net/can/sja1000/tscan1.c b/drivers/net/can/sja1000/tscan1.c
index 3dbba8d61afb..f3862bed3d40 100644
--- a/drivers/net/can/sja1000/tscan1.c
+++ b/drivers/net/can/sja1000/tscan1.c
@@ -5,10 +5,9 @@
* Copyright 2010 Andre B. Oliveira
*/
-/*
- * References:
- * - Getting started with TS-CAN1, Technologic Systems, Jun 2009
- * http://www.embeddedarm.com/documentation/ts-can1-manual.pdf
+/* References:
+ * - Getting started with TS-CAN1, Technologic Systems, Feb 2022
+ * https://docs.embeddedts.com/TS-CAN1
*/
#include <linux/init.h>
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
index f9dd8fdba12b..b21252390216 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
@@ -37,6 +37,12 @@ static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp2518fd = {
.model = MCP251XFD_MODEL_MCP2518FD,
};
+static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp251863 = {
+ .quirks = MCP251XFD_QUIRK_CRC_REG | MCP251XFD_QUIRK_CRC_RX |
+ MCP251XFD_QUIRK_CRC_TX | MCP251XFD_QUIRK_ECC,
+ .model = MCP251XFD_MODEL_MCP251863,
+};
+
/* Autodetect model, start with CRC enabled. */
static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp251xfd = {
.quirks = MCP251XFD_QUIRK_CRC_REG | MCP251XFD_QUIRK_CRC_RX |
@@ -75,6 +81,8 @@ static const char *__mcp251xfd_get_model_str(enum mcp251xfd_model model)
return "MCP2517FD";
case MCP251XFD_MODEL_MCP2518FD:
return "MCP2518FD";
+ case MCP251XFD_MODEL_MCP251863:
+ return "MCP251863";
case MCP251XFD_MODEL_MCP251XFD:
return "MCP251xFD";
}
@@ -916,7 +924,7 @@ static int mcp251xfd_handle_rxovif(struct mcp251xfd_priv *priv)
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
- err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
if (err)
stats->rx_fifo_errors++;
@@ -1021,7 +1029,7 @@ static int mcp251xfd_handle_ivmif(struct mcp251xfd_priv *priv)
return 0;
mcp251xfd_skb_set_timestamp(priv, skb, timestamp);
- err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
if (err)
stats->rx_fifo_errors++;
@@ -1094,7 +1102,7 @@ static int mcp251xfd_handle_cerrif(struct mcp251xfd_priv *priv)
cf->data[7] = bec.rxerr;
}
- err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
if (err)
stats->rx_fifo_errors++;
@@ -1259,7 +1267,8 @@ mcp251xfd_handle_eccif_recover(struct mcp251xfd_priv *priv, u8 nr)
* - for mcp2518fd: offset not 0 or 1
*/
if (chip_tx_tail != tx_tail ||
- !(offset == 0 || (offset == 1 && mcp251xfd_is_2518(priv)))) {
+ !(offset == 0 || (offset == 1 && (mcp251xfd_is_2518FD(priv) ||
+ mcp251xfd_is_251863(priv))))) {
netdev_err(priv->ndev,
"ECC Error information inconsistent (addr=0x%04x, nr=%d, tx_tail=0x%08x(%d), chip_tx_tail=%d, offset=%d).\n",
addr, nr, tx_ring->tail, tx_tail, chip_tx_tail,
@@ -1697,7 +1706,7 @@ static int mcp251xfd_register_chip_detect(struct mcp251xfd_priv *priv)
else
devtype_data = &mcp251xfd_devtype_data_mcp2517fd;
- if (!mcp251xfd_is_251X(priv) &&
+ if (!mcp251xfd_is_251XFD(priv) &&
priv->devtype_data.model != devtype_data->model) {
netdev_info(ndev,
"Detected %s, but firmware specifies a %s. Fixing up.\n",
@@ -1930,6 +1939,9 @@ static const struct of_device_id mcp251xfd_of_match[] = {
.compatible = "microchip,mcp2518fd",
.data = &mcp251xfd_devtype_data_mcp2518fd,
}, {
+ .compatible = "microchip,mcp251863",
+ .data = &mcp251xfd_devtype_data_mcp251863,
+ }, {
.compatible = "microchip,mcp251xfd",
.data = &mcp251xfd_devtype_data_mcp251xfd,
}, {
@@ -1946,6 +1958,9 @@ static const struct spi_device_id mcp251xfd_id_table[] = {
.name = "mcp2518fd",
.driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp2518fd,
}, {
+ .name = "mcp251863",
+ .driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp251863,
+ }, {
.name = "mcp251xfd",
.driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp251xfd,
}, {
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c
index d09f7fbf2ba7..ced8d9c81f8c 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c
@@ -173,7 +173,7 @@ mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv,
}
mcp251xfd_hw_rx_obj_to_skb(priv, hw_rx_obj, skb);
- err = can_rx_offload_queue_sorted(&priv->offload, skb, hw_rx_obj->ts);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, hw_rx_obj->ts);
if (err)
stats->rx_fifo_errors++;
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
index 9cb6b5ad8dda..1d43bccc29bf 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
@@ -586,7 +586,8 @@ struct mcp251xfd_regs_status {
enum mcp251xfd_model {
MCP251XFD_MODEL_MCP2517FD = 0x2517,
MCP251XFD_MODEL_MCP2518FD = 0x2518,
- MCP251XFD_MODEL_MCP251XFD = 0xffff, /* autodetect model */
+ MCP251XFD_MODEL_MCP251863 = 0x251863,
+ MCP251XFD_MODEL_MCP251XFD = 0xffffffff, /* autodetect model */
};
struct mcp251xfd_devtype_data {
@@ -659,12 +660,13 @@ struct mcp251xfd_priv {
static inline bool \
mcp251xfd_is_##_model(const struct mcp251xfd_priv *priv) \
{ \
- return priv->devtype_data.model == MCP251XFD_MODEL_MCP##_model##FD; \
+ return priv->devtype_data.model == MCP251XFD_MODEL_MCP##_model; \
}
-MCP251XFD_IS(2517);
-MCP251XFD_IS(2518);
-MCP251XFD_IS(251X);
+MCP251XFD_IS(2517FD);
+MCP251XFD_IS(2518FD);
+MCP251XFD_IS(251863);
+MCP251XFD_IS(251XFD);
static inline bool mcp251xfd_is_fd_mode(const struct mcp251xfd_priv *priv)
{
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index ff31b993ab17..bb3f2e3b004c 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -633,7 +633,7 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
cf->data[3] = CAN_ERR_PROT_LOC_ACK;
timestamp = hecc_read(priv, HECC_CANLNT);
- err = can_rx_offload_queue_sorted(&priv->offload, skb,
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb,
timestamp);
if (err)
ndev->stats.rx_fifo_errors++;
@@ -668,7 +668,7 @@ static void ti_hecc_change_state(struct net_device *ndev,
}
timestamp = hecc_read(priv, HECC_CANLNT);
- err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
if (err)
ndev->stats.rx_fifo_errors++;
}
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index e562c5ab1149..43f0c6a064ba 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -239,7 +239,7 @@ static const struct can_bittiming_const xcan_bittiming_const_canfd = {
};
/* AXI CANFD Data Bittiming constants as per AXI CANFD 1.0 specs */
-static struct can_bittiming_const xcan_data_bittiming_const_canfd = {
+static const struct can_bittiming_const xcan_data_bittiming_const_canfd = {
.name = DRIVER_NAME,
.tseg1_min = 1,
.tseg1_max = 16,
@@ -265,7 +265,7 @@ static const struct can_bittiming_const xcan_bittiming_const_canfd2 = {
};
/* AXI CANFD 2.0 Data Bittiming constants as per AXI CANFD 2.0 spec */
-static struct can_bittiming_const xcan_data_bittiming_const_canfd2 = {
+static const struct can_bittiming_const xcan_data_bittiming_const_canfd2 = {
.name = DRIVER_NAME,
.tseg1_min = 1,
.tseg1_max = 32,
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
index b2752978cb09..f91deea9368e 100644
--- a/drivers/net/dsa/microchip/ksz8795.c
+++ b/drivers/net/dsa/microchip/ksz8795.c
@@ -1027,40 +1027,7 @@ static void ksz8_cfg_port_member(struct ksz_device *dev, int port, u8 member)
static void ksz8_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
{
- struct ksz_device *dev = ds->priv;
- struct ksz_port *p;
- u8 data;
-
- ksz_pread8(dev, port, P_STP_CTRL, &data);
- data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE);
-
- switch (state) {
- case BR_STATE_DISABLED:
- data |= PORT_LEARN_DISABLE;
- break;
- case BR_STATE_LISTENING:
- data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE);
- break;
- case BR_STATE_LEARNING:
- data |= PORT_RX_ENABLE;
- break;
- case BR_STATE_FORWARDING:
- data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
- break;
- case BR_STATE_BLOCKING:
- data |= PORT_LEARN_DISABLE;
- break;
- default:
- dev_err(ds->dev, "invalid STP state: %d\n", state);
- return;
- }
-
- ksz_pwrite8(dev, port, P_STP_CTRL, data);
-
- p = &dev->ports[port];
- p->stp_state = state;
-
- ksz_update_port_member(dev, port);
+ ksz_port_stp_state_set(ds, port, state, P_STP_CTRL);
}
static void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port)
diff --git a/drivers/net/dsa/microchip/ksz8795_reg.h b/drivers/net/dsa/microchip/ksz8795_reg.h
index d74defcd86b4..4109433b6b6c 100644
--- a/drivers/net/dsa/microchip/ksz8795_reg.h
+++ b/drivers/net/dsa/microchip/ksz8795_reg.h
@@ -160,9 +160,6 @@
#define PORT_DISCARD_NON_VID BIT(5)
#define PORT_FORCE_FLOW_CTRL BIT(4)
#define PORT_BACK_PRESSURE BIT(3)
-#define PORT_TX_ENABLE BIT(2)
-#define PORT_RX_ENABLE BIT(1)
-#define PORT_LEARN_DISABLE BIT(0)
#define REG_PORT_1_CTRL_3 0x13
#define REG_PORT_2_CTRL_3 0x23
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index 8222c8a6c5ec..48c90e4cda30 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -65,100 +65,6 @@ static const struct {
{ 0x83, "tx_discards" },
};
-struct ksz9477_stats_raw {
- u64 rx_hi;
- u64 rx_undersize;
- u64 rx_fragments;
- u64 rx_oversize;
- u64 rx_jabbers;
- u64 rx_symbol_err;
- u64 rx_crc_err;
- u64 rx_align_err;
- u64 rx_mac_ctrl;
- u64 rx_pause;
- u64 rx_bcast;
- u64 rx_mcast;
- u64 rx_ucast;
- u64 rx_64_or_less;
- u64 rx_65_127;
- u64 rx_128_255;
- u64 rx_256_511;
- u64 rx_512_1023;
- u64 rx_1024_1522;
- u64 rx_1523_2000;
- u64 rx_2001;
- u64 tx_hi;
- u64 tx_late_col;
- u64 tx_pause;
- u64 tx_bcast;
- u64 tx_mcast;
- u64 tx_ucast;
- u64 tx_deferred;
- u64 tx_total_col;
- u64 tx_exc_col;
- u64 tx_single_col;
- u64 tx_mult_col;
- u64 rx_total;
- u64 tx_total;
- u64 rx_discards;
- u64 tx_discards;
-};
-
-static void ksz9477_r_mib_stats64(struct ksz_device *dev, int port)
-{
- struct rtnl_link_stats64 *stats;
- struct ksz9477_stats_raw *raw;
- struct ksz_port_mib *mib;
-
- mib = &dev->ports[port].mib;
- stats = &mib->stats64;
- raw = (struct ksz9477_stats_raw *)mib->counters;
-
- spin_lock(&mib->stats64_lock);
-
- stats->rx_packets = raw->rx_bcast + raw->rx_mcast + raw->rx_ucast;
- stats->tx_packets = raw->tx_bcast + raw->tx_mcast + raw->tx_ucast;
-
- /* HW counters are counting bytes + FCS which is not acceptable
- * for rtnl_link_stats64 interface
- */
- stats->rx_bytes = raw->rx_total - stats->rx_packets * ETH_FCS_LEN;
- stats->tx_bytes = raw->tx_total - stats->tx_packets * ETH_FCS_LEN;
-
- stats->rx_length_errors = raw->rx_undersize + raw->rx_fragments +
- raw->rx_oversize;
-
- stats->rx_crc_errors = raw->rx_crc_err;
- stats->rx_frame_errors = raw->rx_align_err;
- stats->rx_dropped = raw->rx_discards;
- stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
- stats->rx_frame_errors + stats->rx_dropped;
-
- stats->tx_window_errors = raw->tx_late_col;
- stats->tx_fifo_errors = raw->tx_discards;
- stats->tx_aborted_errors = raw->tx_exc_col;
- stats->tx_errors = stats->tx_window_errors + stats->tx_fifo_errors +
- stats->tx_aborted_errors;
-
- stats->multicast = raw->rx_mcast;
- stats->collisions = raw->tx_total_col;
-
- spin_unlock(&mib->stats64_lock);
-}
-
-static void ksz9477_get_stats64(struct dsa_switch *ds, int port,
- struct rtnl_link_stats64 *s)
-{
- struct ksz_device *dev = ds->priv;
- struct ksz_port_mib *mib;
-
- mib = &dev->ports[port].mib;
-
- spin_lock(&mib->stats64_lock);
- memcpy(s, &mib->stats64, sizeof(*s));
- spin_unlock(&mib->stats64_lock);
-}
-
static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
{
regmap_update_bits(dev->regmap[0], addr, bits, set ? bits : 0);
@@ -517,38 +423,7 @@ static void ksz9477_cfg_port_member(struct ksz_device *dev, int port,
static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port,
u8 state)
{
- struct ksz_device *dev = ds->priv;
- struct ksz_port *p = &dev->ports[port];
- u8 data;
-
- ksz_pread8(dev, port, P_STP_CTRL, &data);
- data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE);
-
- switch (state) {
- case BR_STATE_DISABLED:
- data |= PORT_LEARN_DISABLE;
- break;
- case BR_STATE_LISTENING:
- data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE);
- break;
- case BR_STATE_LEARNING:
- data |= PORT_RX_ENABLE;
- break;
- case BR_STATE_FORWARDING:
- data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
- break;
- case BR_STATE_BLOCKING:
- data |= PORT_LEARN_DISABLE;
- break;
- default:
- dev_err(ds->dev, "invalid STP state: %d\n", state);
- return;
- }
-
- ksz_pwrite8(dev, port, P_STP_CTRL, data);
- p->stp_state = state;
-
- ksz_update_port_member(dev, port);
+ ksz_port_stp_state_set(ds, port, state, P_STP_CTRL);
}
static void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port)
@@ -1493,7 +1368,7 @@ static const struct dsa_switch_ops ksz9477_switch_ops = {
.port_mdb_del = ksz9477_port_mdb_del,
.port_mirror_add = ksz9477_port_mirror_add,
.port_mirror_del = ksz9477_port_mirror_del,
- .get_stats64 = ksz9477_get_stats64,
+ .get_stats64 = ksz_get_stats64,
.port_change_mtu = ksz9477_change_mtu,
.port_max_mtu = ksz9477_max_mtu,
};
@@ -1684,7 +1559,7 @@ static const struct ksz_dev_ops ksz9477_dev_ops = {
.port_setup = ksz9477_port_setup,
.r_mib_cnt = ksz9477_r_mib_cnt,
.r_mib_pkt = ksz9477_r_mib_pkt,
- .r_mib_stat64 = ksz9477_r_mib_stats64,
+ .r_mib_stat64 = ksz_r_mib_stats64,
.freeze_mib = ksz9477_freeze_mib,
.port_init_cnt = ksz9477_port_init_cnt,
.shutdown = ksz9477_reset_switch,
diff --git a/drivers/net/dsa/microchip/ksz9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h
index 0bd58467181f..7a2c8d4767af 100644
--- a/drivers/net/dsa/microchip/ksz9477_reg.h
+++ b/drivers/net/dsa/microchip/ksz9477_reg.h
@@ -1586,10 +1586,6 @@
#define REG_PORT_LUE_MSTP_STATE 0x0B04
-#define PORT_TX_ENABLE BIT(2)
-#define PORT_RX_ENABLE BIT(1)
-#define PORT_LEARN_DISABLE BIT(0)
-
/* C - PTP */
#define REG_PTP_PORT_RX_DELAY__2 0x0C00
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 8014b18d9391..10f127b09e58 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -20,6 +20,102 @@
#include "ksz_common.h"
+struct ksz_stats_raw {
+ u64 rx_hi;
+ u64 rx_undersize;
+ u64 rx_fragments;
+ u64 rx_oversize;
+ u64 rx_jabbers;
+ u64 rx_symbol_err;
+ u64 rx_crc_err;
+ u64 rx_align_err;
+ u64 rx_mac_ctrl;
+ u64 rx_pause;
+ u64 rx_bcast;
+ u64 rx_mcast;
+ u64 rx_ucast;
+ u64 rx_64_or_less;
+ u64 rx_65_127;
+ u64 rx_128_255;
+ u64 rx_256_511;
+ u64 rx_512_1023;
+ u64 rx_1024_1522;
+ u64 rx_1523_2000;
+ u64 rx_2001;
+ u64 tx_hi;
+ u64 tx_late_col;
+ u64 tx_pause;
+ u64 tx_bcast;
+ u64 tx_mcast;
+ u64 tx_ucast;
+ u64 tx_deferred;
+ u64 tx_total_col;
+ u64 tx_exc_col;
+ u64 tx_single_col;
+ u64 tx_mult_col;
+ u64 rx_total;
+ u64 tx_total;
+ u64 rx_discards;
+ u64 tx_discards;
+};
+
+void ksz_r_mib_stats64(struct ksz_device *dev, int port)
+{
+ struct rtnl_link_stats64 *stats;
+ struct ksz_stats_raw *raw;
+ struct ksz_port_mib *mib;
+
+ mib = &dev->ports[port].mib;
+ stats = &mib->stats64;
+ raw = (struct ksz_stats_raw *)mib->counters;
+
+ spin_lock(&mib->stats64_lock);
+
+ stats->rx_packets = raw->rx_bcast + raw->rx_mcast + raw->rx_ucast;
+ stats->tx_packets = raw->tx_bcast + raw->tx_mcast + raw->tx_ucast;
+
+ /* HW counters are counting bytes + FCS which is not acceptable
+ * for rtnl_link_stats64 interface
+ */
+ stats->rx_bytes = raw->rx_total - stats->rx_packets * ETH_FCS_LEN;
+ stats->tx_bytes = raw->tx_total - stats->tx_packets * ETH_FCS_LEN;
+
+ stats->rx_length_errors = raw->rx_undersize + raw->rx_fragments +
+ raw->rx_oversize;
+
+ stats->rx_crc_errors = raw->rx_crc_err;
+ stats->rx_frame_errors = raw->rx_align_err;
+ stats->rx_dropped = raw->rx_discards;
+ stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
+ stats->rx_frame_errors + stats->rx_dropped;
+
+ stats->tx_window_errors = raw->tx_late_col;
+ stats->tx_fifo_errors = raw->tx_discards;
+ stats->tx_aborted_errors = raw->tx_exc_col;
+ stats->tx_errors = stats->tx_window_errors + stats->tx_fifo_errors +
+ stats->tx_aborted_errors;
+
+ stats->multicast = raw->rx_mcast;
+ stats->collisions = raw->tx_total_col;
+
+ spin_unlock(&mib->stats64_lock);
+}
+EXPORT_SYMBOL_GPL(ksz_r_mib_stats64);
+
+void ksz_get_stats64(struct dsa_switch *ds, int port,
+ struct rtnl_link_stats64 *s)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port_mib *mib;
+
+ mib = &dev->ports[port].mib;
+
+ spin_lock(&mib->stats64_lock);
+ memcpy(s, &mib->stats64, sizeof(*s));
+ spin_unlock(&mib->stats64_lock);
+}
+EXPORT_SYMBOL_GPL(ksz_get_stats64);
+
void ksz_update_port_member(struct ksz_device *dev, int port)
{
struct ksz_port *p = &dev->ports[port];
@@ -372,6 +468,46 @@ int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
}
EXPORT_SYMBOL_GPL(ksz_enable_port);
+void ksz_port_stp_state_set(struct dsa_switch *ds, int port,
+ u8 state, int reg)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port *p;
+ u8 data;
+
+ ksz_pread8(dev, port, reg, &data);
+ data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE);
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ data |= PORT_LEARN_DISABLE;
+ break;
+ case BR_STATE_LISTENING:
+ data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE);
+ break;
+ case BR_STATE_LEARNING:
+ data |= PORT_RX_ENABLE;
+ break;
+ case BR_STATE_FORWARDING:
+ data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
+ break;
+ case BR_STATE_BLOCKING:
+ data |= PORT_LEARN_DISABLE;
+ break;
+ default:
+ dev_err(ds->dev, "invalid STP state: %d\n", state);
+ return;
+ }
+
+ ksz_pwrite8(dev, port, reg, data);
+
+ p = &dev->ports[port];
+ p->stp_state = state;
+
+ ksz_update_port_member(dev, port);
+}
+EXPORT_SYMBOL_GPL(ksz_port_stp_state_set);
+
struct ksz_device *ksz_switch_alloc(struct device *base, void *priv)
{
struct dsa_switch *ds;
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index 485d4a948c38..28cda79b090f 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -151,6 +151,9 @@ int ksz9477_switch_register(struct ksz_device *dev);
void ksz_update_port_member(struct ksz_device *dev, int port);
void ksz_init_mib_timer(struct ksz_device *dev);
+void ksz_r_mib_stats64(struct ksz_device *dev, int port);
+void ksz_get_stats64(struct dsa_switch *ds, int port,
+ struct rtnl_link_stats64 *s);
/* Common DSA access functions */
@@ -165,6 +168,8 @@ int ksz_port_bridge_join(struct dsa_switch *ds, int port,
struct netlink_ext_ack *extack);
void ksz_port_bridge_leave(struct dsa_switch *ds, int port,
struct dsa_bridge bridge);
+void ksz_port_stp_state_set(struct dsa_switch *ds, int port,
+ u8 state, int reg);
void ksz_port_fast_age(struct dsa_switch *ds, int port);
int ksz_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb,
void *data);
@@ -292,6 +297,11 @@ static inline void ksz_regmap_unlock(void *__mtx)
mutex_unlock(mtx);
}
+/* STP State Defines */
+#define PORT_TX_ENABLE BIT(2)
+#define PORT_RX_ENABLE BIT(1)
+#define PORT_LEARN_DISABLE BIT(0)
+
/* Regmap tables generation */
#define KSZ_SPI_OP_RD 3
#define KSZ_SPI_OP_WR 2
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 19f0035d4410..e55d962fef9f 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -24,6 +24,11 @@
#include "mt7530.h"
+static struct mt753x_pcs *pcs_to_mt753x_pcs(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct mt753x_pcs, pcs);
+}
+
/* String, offset, and register size in bytes if different from 4 bytes */
static const struct mt7530_mib_desc mt7530_mib[] = {
MIB_DESC(1, 0x00, "TxDrop"),
@@ -2389,35 +2394,30 @@ mt7531_setup(struct dsa_switch *ds)
return 0;
}
-static bool
-mt7530_phy_mode_supported(struct dsa_switch *ds, int port,
- const struct phylink_link_state *state)
+static void mt7530_mac_port_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
- struct mt7530_priv *priv = ds->priv;
-
switch (port) {
case 0 ... 4: /* Internal phy */
- if (state->interface != PHY_INTERFACE_MODE_GMII)
- return false;
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
break;
+
case 5: /* 2nd cpu port with phy of port 0 or 4 / external phy */
- if (!phy_interface_mode_is_rgmii(state->interface) &&
- state->interface != PHY_INTERFACE_MODE_MII &&
- state->interface != PHY_INTERFACE_MODE_GMII)
- return false;
+ phy_interface_set_rgmii(config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
break;
+
case 6: /* 1st cpu port */
- if (state->interface != PHY_INTERFACE_MODE_RGMII &&
- state->interface != PHY_INTERFACE_MODE_TRGMII)
- return false;
+ __set_bit(PHY_INTERFACE_MODE_RGMII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_TRGMII,
+ config->supported_interfaces);
break;
- default:
- dev_err(priv->dev, "%s: unsupported port: %i\n", __func__,
- port);
- return false;
}
-
- return true;
}
static bool mt7531_is_rgmii_port(struct mt7530_priv *priv, u32 port)
@@ -2425,42 +2425,35 @@ static bool mt7531_is_rgmii_port(struct mt7530_priv *priv, u32 port)
return (port == 5) && (priv->p5_intf_sel != P5_INTF_SEL_GMAC5_SGMII);
}
-static bool
-mt7531_phy_mode_supported(struct dsa_switch *ds, int port,
- const struct phylink_link_state *state)
+static void mt7531_mac_port_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
struct mt7530_priv *priv = ds->priv;
switch (port) {
case 0 ... 4: /* Internal phy */
- if (state->interface != PHY_INTERFACE_MODE_GMII)
- return false;
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
break;
+
case 5: /* 2nd cpu port supports either rgmii or sgmii/8023z */
- if (mt7531_is_rgmii_port(priv, port))
- return phy_interface_mode_is_rgmii(state->interface);
+ if (mt7531_is_rgmii_port(priv, port)) {
+ phy_interface_set_rgmii(config->supported_interfaces);
+ break;
+ }
fallthrough;
+
case 6: /* 1st cpu port supports sgmii/8023z only */
- if (state->interface != PHY_INTERFACE_MODE_SGMII &&
- !phy_interface_mode_is_8023z(state->interface))
- return false;
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ config->supported_interfaces);
+
+ config->mac_capabilities |= MAC_2500FD;
break;
- default:
- dev_err(priv->dev, "%s: unsupported port: %i\n", __func__,
- port);
- return false;
}
-
- return true;
-}
-
-static bool
-mt753x_phy_mode_supported(struct dsa_switch *ds, int port,
- const struct phylink_link_state *state)
-{
- struct mt7530_priv *priv = ds->priv;
-
- return priv->info->phy_mode_supported(ds, port, state);
}
static int
@@ -2533,30 +2526,11 @@ static int mt7531_rgmii_setup(struct mt7530_priv *priv, u32 port,
return 0;
}
-static void mt7531_sgmii_validate(struct mt7530_priv *priv, int port,
- unsigned long *supported)
-{
- /* Port5 supports ethier RGMII or SGMII.
- * Port6 supports SGMII only.
- */
- switch (port) {
- case 5:
- if (mt7531_is_rgmii_port(priv, port))
- break;
- fallthrough;
- case 6:
- phylink_set(supported, 1000baseX_Full);
- phylink_set(supported, 2500baseX_Full);
- phylink_set(supported, 2500baseT_Full);
- }
-}
-
-static void
-mt7531_sgmii_link_up_force(struct dsa_switch *ds, int port,
- unsigned int mode, phy_interface_t interface,
- int speed, int duplex)
+static void mt7531_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface, int speed, int duplex)
{
- struct mt7530_priv *priv = ds->priv;
+ struct mt7530_priv *priv = pcs_to_mt753x_pcs(pcs)->priv;
+ int port = pcs_to_mt753x_pcs(pcs)->port;
unsigned int val;
/* For adjusting speed and duplex of SGMII force mode. */
@@ -2582,6 +2556,9 @@ mt7531_sgmii_link_up_force(struct dsa_switch *ds, int port,
/* MT7531 SGMII 1G force mode can only work in full duplex mode,
* no matter MT7531_SGMII_FORCE_HALF_DUPLEX is set or not.
+ *
+ * The speed check is unnecessary as the MAC capabilities apply
+ * this restriction. --rmk
*/
if ((speed == SPEED_10 || speed == SPEED_100) &&
duplex != DUPLEX_FULL)
@@ -2657,9 +2634,10 @@ static int mt7531_sgmii_setup_mode_an(struct mt7530_priv *priv, int port,
return 0;
}
-static void mt7531_sgmii_restart_an(struct dsa_switch *ds, int port)
+static void mt7531_pcs_an_restart(struct phylink_pcs *pcs)
{
- struct mt7530_priv *priv = ds->priv;
+ struct mt7530_priv *priv = pcs_to_mt753x_pcs(pcs)->priv;
+ int port = pcs_to_mt753x_pcs(pcs)->port;
u32 val;
/* Only restart AN when AN is enabled */
@@ -2716,6 +2694,24 @@ mt753x_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
return priv->info->mac_port_config(ds, port, mode, state->interface);
}
+static struct phylink_pcs *
+mt753x_phylink_mac_select_pcs(struct dsa_switch *ds, int port,
+ phy_interface_t interface)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_TRGMII:
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ return &priv->pcs[port].pcs;
+
+ default:
+ return NULL;
+ }
+}
+
static void
mt753x_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
const struct phylink_link_state *state)
@@ -2723,9 +2719,6 @@ mt753x_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
struct mt7530_priv *priv = ds->priv;
u32 mcr_cur, mcr_new;
- if (!mt753x_phy_mode_supported(ds, port, state))
- goto unsupported;
-
switch (port) {
case 0 ... 4: /* Internal phy */
if (state->interface != PHY_INTERFACE_MODE_GMII)
@@ -2780,17 +2773,6 @@ unsupported:
mt7530_write(priv, MT7530_PMCR_P(port), mcr_new);
}
-static void
-mt753x_phylink_mac_an_restart(struct dsa_switch *ds, int port)
-{
- struct mt7530_priv *priv = ds->priv;
-
- if (!priv->info->mac_pcs_an_restart)
- return;
-
- priv->info->mac_pcs_an_restart(ds, port);
-}
-
static void mt753x_phylink_mac_link_down(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface)
@@ -2800,16 +2782,13 @@ static void mt753x_phylink_mac_link_down(struct dsa_switch *ds, int port,
mt7530_clear(priv, MT7530_PMCR_P(port), PMCR_LINK_SETTINGS_MASK);
}
-static void mt753x_mac_pcs_link_up(struct dsa_switch *ds, int port,
- unsigned int mode, phy_interface_t interface,
- int speed, int duplex)
+static void mt753x_phylink_pcs_link_up(struct phylink_pcs *pcs,
+ unsigned int mode,
+ phy_interface_t interface,
+ int speed, int duplex)
{
- struct mt7530_priv *priv = ds->priv;
-
- if (!priv->info->mac_pcs_link_up)
- return;
-
- priv->info->mac_pcs_link_up(ds, port, mode, interface, speed, duplex);
+ if (pcs->ops->pcs_link_up)
+ pcs->ops->pcs_link_up(pcs, mode, interface, speed, duplex);
}
static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port,
@@ -2822,8 +2801,6 @@ static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port,
struct mt7530_priv *priv = ds->priv;
u32 mcr;
- mt753x_mac_pcs_link_up(ds, port, mode, interface, speed, duplex);
-
mcr = PMCR_RX_EN | PMCR_TX_EN | PMCR_FORCE_LNK;
/* MT753x MAC works in 1G full duplex mode for all up-clocked
@@ -2903,81 +2880,51 @@ mt7531_cpu_port_config(struct dsa_switch *ds, int port)
return ret;
mt7530_write(priv, MT7530_PMCR_P(port),
PMCR_CPU_PORT_SETTING(priv->id));
+ mt753x_phylink_pcs_link_up(&priv->pcs[port].pcs, MLO_AN_FIXED,
+ interface, speed, DUPLEX_FULL);
mt753x_phylink_mac_link_up(ds, port, MLO_AN_FIXED, interface, NULL,
speed, DUPLEX_FULL, true, true);
return 0;
}
-static void
-mt7530_mac_port_validate(struct dsa_switch *ds, int port,
- unsigned long *supported)
+static void mt753x_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
- if (port == 5)
- phylink_set(supported, 1000baseX_Full);
-}
-
-static void mt7531_mac_port_validate(struct dsa_switch *ds, int port,
- unsigned long *supported)
-{
- struct mt7530_priv *priv = ds->priv;
-
- mt7531_sgmii_validate(priv, port, supported);
-}
-
-static void
-mt753x_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct mt7530_priv *priv = ds->priv;
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- !mt753x_phy_mode_supported(ds, port, state)) {
- linkmode_zero(supported);
- return;
- }
-
- phylink_set_port_modes(mask);
-
- if (state->interface != PHY_INTERFACE_MODE_TRGMII &&
- !phy_interface_mode_is_8023z(state->interface)) {
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
- phylink_set(mask, Autoneg);
- }
-
- /* This switch only supports 1G full-duplex. */
- if (state->interface != PHY_INTERFACE_MODE_MII)
- phylink_set(mask, 1000baseT_Full);
+ /* This switch only supports full-duplex at 1Gbps */
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000FD;
- priv->info->mac_port_validate(ds, port, mask);
+ /* This driver does not make use of the speed, duplex, pause or the
+ * advertisement in its mac_config, so it is safe to mark this driver
+ * as non-legacy.
+ */
+ config->legacy_pre_march2020 = false;
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
+ priv->info->mac_port_get_caps(ds, port, config);
+}
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
+static int mt753x_pcs_validate(struct phylink_pcs *pcs,
+ unsigned long *supported,
+ const struct phylink_link_state *state)
+{
+ /* Autonegotiation is not supported in TRGMII nor 802.3z modes */
+ if (state->interface == PHY_INTERFACE_MODE_TRGMII ||
+ phy_interface_mode_is_8023z(state->interface))
+ phylink_clear(supported, Autoneg);
- /* We can only operate at 2500BaseX or 1000BaseX. If requested
- * to advertise both, only report advertising at 2500BaseX.
- */
- phylink_helper_basex_speed(state);
+ return 0;
}
-static int
-mt7530_phylink_mac_link_state(struct dsa_switch *ds, int port,
- struct phylink_link_state *state)
+static void mt7530_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
{
- struct mt7530_priv *priv = ds->priv;
+ struct mt7530_priv *priv = pcs_to_mt753x_pcs(pcs)->priv;
+ int port = pcs_to_mt753x_pcs(pcs)->port;
u32 pmsr;
- if (port < 0 || port >= MT7530_NUM_PORTS)
- return -EINVAL;
-
pmsr = mt7530_read(priv, MT7530_PMSR_P(port));
state->link = (pmsr & PMSR_LINK);
@@ -3004,8 +2951,6 @@ mt7530_phylink_mac_link_state(struct dsa_switch *ds, int port,
state->pause |= MLO_PAUSE_RX;
if (pmsr & PMSR_TX_FC)
state->pause |= MLO_PAUSE_TX;
-
- return 1;
}
static int
@@ -3047,33 +2992,59 @@ mt7531_sgmii_pcs_get_state_an(struct mt7530_priv *priv, int port,
return 0;
}
-static int
-mt7531_phylink_mac_link_state(struct dsa_switch *ds, int port,
- struct phylink_link_state *state)
+static void mt7531_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
{
- struct mt7530_priv *priv = ds->priv;
+ struct mt7530_priv *priv = pcs_to_mt753x_pcs(pcs)->priv;
+ int port = pcs_to_mt753x_pcs(pcs)->port;
if (state->interface == PHY_INTERFACE_MODE_SGMII)
- return mt7531_sgmii_pcs_get_state_an(priv, port, state);
-
- return -EOPNOTSUPP;
+ mt7531_sgmii_pcs_get_state_an(priv, port, state);
+ else
+ state->link = false;
}
-static int
-mt753x_phylink_mac_link_state(struct dsa_switch *ds, int port,
- struct phylink_link_state *state)
+static int mt753x_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
{
- struct mt7530_priv *priv = ds->priv;
+ return 0;
+}
- return priv->info->mac_port_get_state(ds, port, state);
+static void mt7530_pcs_an_restart(struct phylink_pcs *pcs)
+{
}
+static const struct phylink_pcs_ops mt7530_pcs_ops = {
+ .pcs_validate = mt753x_pcs_validate,
+ .pcs_get_state = mt7530_pcs_get_state,
+ .pcs_config = mt753x_pcs_config,
+ .pcs_an_restart = mt7530_pcs_an_restart,
+};
+
+static const struct phylink_pcs_ops mt7531_pcs_ops = {
+ .pcs_validate = mt753x_pcs_validate,
+ .pcs_get_state = mt7531_pcs_get_state,
+ .pcs_config = mt753x_pcs_config,
+ .pcs_an_restart = mt7531_pcs_an_restart,
+ .pcs_link_up = mt7531_pcs_link_up,
+};
+
static int
mt753x_setup(struct dsa_switch *ds)
{
struct mt7530_priv *priv = ds->priv;
- int ret = priv->info->sw_setup(ds);
+ int i, ret;
+
+ /* Initialise the PCS devices */
+ for (i = 0; i < priv->ds->num_ports; i++) {
+ priv->pcs[i].pcs.ops = priv->info->pcs_ops;
+ priv->pcs[i].priv = priv;
+ priv->pcs[i].port = i;
+ }
+ ret = priv->info->sw_setup(ds);
if (ret)
return ret;
@@ -3144,10 +3115,9 @@ static const struct dsa_switch_ops mt7530_switch_ops = {
.port_vlan_del = mt7530_port_vlan_del,
.port_mirror_add = mt753x_port_mirror_add,
.port_mirror_del = mt753x_port_mirror_del,
- .phylink_validate = mt753x_phylink_validate,
- .phylink_mac_link_state = mt753x_phylink_mac_link_state,
+ .phylink_get_caps = mt753x_phylink_get_caps,
+ .phylink_mac_select_pcs = mt753x_phylink_mac_select_pcs,
.phylink_mac_config = mt753x_phylink_mac_config,
- .phylink_mac_an_restart = mt753x_phylink_mac_an_restart,
.phylink_mac_link_down = mt753x_phylink_mac_link_down,
.phylink_mac_link_up = mt753x_phylink_mac_link_up,
.get_mac_eee = mt753x_get_mac_eee,
@@ -3157,39 +3127,34 @@ static const struct dsa_switch_ops mt7530_switch_ops = {
static const struct mt753x_info mt753x_table[] = {
[ID_MT7621] = {
.id = ID_MT7621,
+ .pcs_ops = &mt7530_pcs_ops,
.sw_setup = mt7530_setup,
.phy_read = mt7530_phy_read,
.phy_write = mt7530_phy_write,
.pad_setup = mt7530_pad_clk_setup,
- .phy_mode_supported = mt7530_phy_mode_supported,
- .mac_port_validate = mt7530_mac_port_validate,
- .mac_port_get_state = mt7530_phylink_mac_link_state,
+ .mac_port_get_caps = mt7530_mac_port_get_caps,
.mac_port_config = mt7530_mac_config,
},
[ID_MT7530] = {
.id = ID_MT7530,
+ .pcs_ops = &mt7530_pcs_ops,
.sw_setup = mt7530_setup,
.phy_read = mt7530_phy_read,
.phy_write = mt7530_phy_write,
.pad_setup = mt7530_pad_clk_setup,
- .phy_mode_supported = mt7530_phy_mode_supported,
- .mac_port_validate = mt7530_mac_port_validate,
- .mac_port_get_state = mt7530_phylink_mac_link_state,
+ .mac_port_get_caps = mt7530_mac_port_get_caps,
.mac_port_config = mt7530_mac_config,
},
[ID_MT7531] = {
.id = ID_MT7531,
+ .pcs_ops = &mt7531_pcs_ops,
.sw_setup = mt7531_setup,
.phy_read = mt7531_ind_phy_read,
.phy_write = mt7531_ind_phy_write,
.pad_setup = mt7531_pad_setup,
.cpu_port_config = mt7531_cpu_port_config,
- .phy_mode_supported = mt7531_phy_mode_supported,
- .mac_port_validate = mt7531_mac_port_validate,
- .mac_port_get_state = mt7531_phylink_mac_link_state,
+ .mac_port_get_caps = mt7531_mac_port_get_caps,
.mac_port_config = mt7531_mac_config,
- .mac_pcs_an_restart = mt7531_sgmii_restart_an,
- .mac_pcs_link_up = mt7531_sgmii_link_up_force,
},
};
@@ -3246,9 +3211,8 @@ mt7530_probe(struct mdio_device *mdiodev)
*/
if (!priv->info->sw_setup || !priv->info->pad_setup ||
!priv->info->phy_read || !priv->info->phy_write ||
- !priv->info->phy_mode_supported ||
- !priv->info->mac_port_validate ||
- !priv->info->mac_port_get_state || !priv->info->mac_port_config)
+ !priv->info->mac_port_get_caps ||
+ !priv->info->mac_port_config)
return -EINVAL;
priv->id = priv->info->id;
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index 91508e2feef9..71e36b69b96d 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -741,6 +741,12 @@ static const char *p5_intf_modes(unsigned int p5_interface)
struct mt7530_priv;
+struct mt753x_pcs {
+ struct phylink_pcs pcs;
+ struct mt7530_priv *priv;
+ int port;
+};
+
/* struct mt753x_info - This is the main data structure for holding the specific
* part for each supported device
* @sw_setup: Holding the handler to a device initialization
@@ -752,36 +758,27 @@ struct mt7530_priv;
* port
* @mac_port_validate: Holding the way to set addition validate type for a
* certan MAC port
- * @mac_port_get_state: Holding the way getting the MAC/PCS state for a certain
- * MAC port
* @mac_port_config: Holding the way setting up the PHY attribute to a
* certain MAC port
- * @mac_pcs_an_restart Holding the way restarting PCS autonegotiation for a
- * certain MAC port
- * @mac_pcs_link_up: Holding the way setting up the PHY attribute to the pcs
- * of the certain MAC port
*/
struct mt753x_info {
enum mt753x_id id;
+ const struct phylink_pcs_ops *pcs_ops;
+
int (*sw_setup)(struct dsa_switch *ds);
int (*phy_read)(struct mt7530_priv *priv, int port, int regnum);
int (*phy_write)(struct mt7530_priv *priv, int port, int regnum, u16 val);
int (*pad_setup)(struct dsa_switch *ds, phy_interface_t interface);
int (*cpu_port_config)(struct dsa_switch *ds, int port);
- bool (*phy_mode_supported)(struct dsa_switch *ds, int port,
- const struct phylink_link_state *state);
+ void (*mac_port_get_caps)(struct dsa_switch *ds, int port,
+ struct phylink_config *config);
void (*mac_port_validate)(struct dsa_switch *ds, int port,
+ phy_interface_t interface,
unsigned long *supported);
- int (*mac_port_get_state)(struct dsa_switch *ds, int port,
- struct phylink_link_state *state);
int (*mac_port_config)(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface);
- void (*mac_pcs_an_restart)(struct dsa_switch *ds, int port);
- void (*mac_pcs_link_up)(struct dsa_switch *ds, int port,
- unsigned int mode, phy_interface_t interface,
- int speed, int duplex);
};
/* struct mt7530_priv - This is the main data structure for holding the state
@@ -823,6 +820,7 @@ struct mt7530_priv {
u8 mirror_tx;
struct mt7530_port ports[MT7530_NUM_PORTS];
+ struct mt753x_pcs pcs[MT7530_NUM_PORTS];
/* protect among processes for registers access*/
struct mutex reg_mutex;
int irq;
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index d3ed0a7f8077..2727d3169c25 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -1287,87 +1287,71 @@ qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum)
if (ret >= 0)
return ret;
- return qca8k_mdio_read(priv, phy, regnum);
+ ret = qca8k_mdio_read(priv, phy, regnum);
+
+ if (ret < 0)
+ return 0xffff;
+
+ return ret;
}
static int
-qca8k_phy_write(struct dsa_switch *ds, int port, int regnum, u16 data)
+qca8k_legacy_mdio_write(struct mii_bus *slave_bus, int port, int regnum, u16 data)
{
- struct qca8k_priv *priv = ds->priv;
- int ret;
+ port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
- /* Check if the legacy mapping should be used and the
- * port is not correctly mapped to the right PHY in the
- * devicetree
- */
- if (priv->legacy_phy_port_mapping)
- port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
-
- /* Use mdio Ethernet when available, fallback to legacy one on error */
- ret = qca8k_phy_eth_command(priv, false, port, regnum, 0);
- if (!ret)
- return ret;
-
- return qca8k_mdio_write(priv, port, regnum, data);
+ return qca8k_internal_mdio_write(slave_bus, port, regnum, data);
}
static int
-qca8k_phy_read(struct dsa_switch *ds, int port, int regnum)
+qca8k_legacy_mdio_read(struct mii_bus *slave_bus, int port, int regnum)
{
- struct qca8k_priv *priv = ds->priv;
- int ret;
+ port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
- /* Check if the legacy mapping should be used and the
- * port is not correctly mapped to the right PHY in the
- * devicetree
- */
- if (priv->legacy_phy_port_mapping)
- port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
-
- /* Use mdio Ethernet when available, fallback to legacy one on error */
- ret = qca8k_phy_eth_command(priv, true, port, regnum, 0);
- if (ret >= 0)
- return ret;
-
- ret = qca8k_mdio_read(priv, port, regnum);
-
- if (ret < 0)
- return 0xffff;
-
- return ret;
+ return qca8k_internal_mdio_read(slave_bus, port, regnum);
}
static int
-qca8k_mdio_register(struct qca8k_priv *priv, struct device_node *mdio)
+qca8k_mdio_register(struct qca8k_priv *priv)
{
struct dsa_switch *ds = priv->ds;
+ struct device_node *mdio;
struct mii_bus *bus;
bus = devm_mdiobus_alloc(ds->dev);
-
if (!bus)
return -ENOMEM;
bus->priv = (void *)priv;
- bus->name = "qca8k slave mii";
- bus->read = qca8k_internal_mdio_read;
- bus->write = qca8k_internal_mdio_write;
- snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d",
- ds->index);
-
+ snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d.%d",
+ ds->dst->index, ds->index);
bus->parent = ds->dev;
bus->phy_mask = ~ds->phys_mii_mask;
-
ds->slave_mii_bus = bus;
- return devm_of_mdiobus_register(priv->dev, bus, mdio);
+ /* Check if the devicetree declare the port:phy mapping */
+ mdio = of_get_child_by_name(priv->dev->of_node, "mdio");
+ if (of_device_is_available(mdio)) {
+ bus->name = "qca8k slave mii";
+ bus->read = qca8k_internal_mdio_read;
+ bus->write = qca8k_internal_mdio_write;
+ return devm_of_mdiobus_register(priv->dev, bus, mdio);
+ }
+
+ /* If a mapping can't be found the legacy mapping is used,
+ * using the qca8k_port_to_phy function
+ */
+ bus->name = "qca8k-legacy slave mii";
+ bus->read = qca8k_legacy_mdio_read;
+ bus->write = qca8k_legacy_mdio_write;
+ return devm_mdiobus_register(priv->dev, bus);
}
static int
qca8k_setup_mdio_bus(struct qca8k_priv *priv)
{
u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg;
- struct device_node *ports, *port, *mdio;
+ struct device_node *ports, *port;
phy_interface_t mode;
int err;
@@ -1429,24 +1413,7 @@ qca8k_setup_mdio_bus(struct qca8k_priv *priv)
QCA8K_MDIO_MASTER_EN);
}
- /* Check if the devicetree declare the port:phy mapping */
- mdio = of_get_child_by_name(priv->dev->of_node, "mdio");
- if (of_device_is_available(mdio)) {
- err = qca8k_mdio_register(priv, mdio);
- if (err)
- of_node_put(mdio);
-
- return err;
- }
-
- /* If a mapping can't be found the legacy mapping is used,
- * using the qca8k_port_to_phy function
- */
- priv->legacy_phy_port_mapping = true;
- priv->ops.phy_read = qca8k_phy_read;
- priv->ops.phy_write = qca8k_phy_write;
-
- return 0;
+ return qca8k_mdio_register(priv);
}
static int
@@ -2346,7 +2313,7 @@ qca8k_port_enable(struct dsa_switch *ds, int port,
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
qca8k_port_set_status(priv, port, 1);
- priv->port_sts[port].enabled = 1;
+ priv->port_enabled_map |= BIT(port);
if (dsa_is_user_port(ds, port))
phy_support_asym_pause(phy);
@@ -2360,23 +2327,25 @@ qca8k_port_disable(struct dsa_switch *ds, int port)
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
qca8k_port_set_status(priv, port, 0);
- priv->port_sts[port].enabled = 0;
+ priv->port_enabled_map &= ~BIT(port);
}
static int
qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
{
struct qca8k_priv *priv = ds->priv;
- int i, mtu = 0;
- priv->port_mtu[port] = new_mtu;
-
- for (i = 0; i < QCA8K_NUM_PORTS; i++)
- if (priv->port_mtu[i] > mtu)
- mtu = priv->port_mtu[i];
+ /* We have only have a general MTU setting.
+ * DSA always set the CPU port's MTU to the largest MTU of the slave
+ * ports.
+ * Setting MTU just for the CPU port is sufficient to correctly set a
+ * value for every port.
+ */
+ if (!dsa_is_cpu_port(ds, port))
+ return 0;
/* Include L2 header / FCS length */
- return qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, mtu + ETH_HLEN + ETH_FCS_LEN);
+ return qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, new_mtu + ETH_HLEN + ETH_FCS_LEN);
}
static int
@@ -3033,16 +3002,6 @@ qca8k_setup(struct dsa_switch *ds)
QCA8K_PORT_HOL_CTRL1_WRED_EN,
mask);
}
-
- /* Set initial MTU for every port.
- * We have only have a general MTU setting. So track
- * every port and set the max across all port.
- * Set per port MTU to 1500 as the MTU change function
- * will add the overhead and if its set to 1518 then it
- * will apply the overhead again and we will end up with
- * MTU of 1536 instead of 1518
- */
- priv->port_mtu[i] = ETH_DATA_LEN;
}
/* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */
@@ -3202,8 +3161,7 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
priv->ds->dev = &mdiodev->dev;
priv->ds->num_ports = QCA8K_NUM_PORTS;
priv->ds->priv = priv;
- priv->ops = qca8k_switch_ops;
- priv->ds->ops = &priv->ops;
+ priv->ds->ops = &qca8k_switch_ops;
mutex_init(&priv->reg_mutex);
dev_set_drvdata(&mdiodev->dev, priv);
@@ -3243,13 +3201,16 @@ static void qca8k_sw_shutdown(struct mdio_device *mdiodev)
static void
qca8k_set_pm(struct qca8k_priv *priv, int enable)
{
- int i;
+ int port;
- for (i = 0; i < QCA8K_NUM_PORTS; i++) {
- if (!priv->port_sts[i].enabled)
+ for (port = 0; port < QCA8K_NUM_PORTS; port++) {
+ /* Do not enable on resume if the port was
+ * disabled before.
+ */
+ if (!(priv->port_enabled_map & BIT(port)))
continue;
- qca8k_port_set_status(priv, i, enable);
+ qca8k_port_set_status(priv, port, enable);
}
}
diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h
index f375627174c8..04408e11402a 100644
--- a/drivers/net/dsa/qca8k.h
+++ b/drivers/net/dsa/qca8k.h
@@ -324,10 +324,6 @@ enum qca8k_mid_cmd {
QCA8K_MIB_CAST = 3,
};
-struct ar8xxx_port_status {
- int enabled;
-};
-
struct qca8k_match_data {
u8 id;
bool reduced_package;
@@ -388,17 +384,17 @@ struct qca8k_priv {
u8 mirror_rx;
u8 mirror_tx;
u8 lag_hash_mode;
- bool legacy_phy_port_mapping;
+ /* Each bit correspond to a port. This switch can support a max of 7 port.
+ * Bit 1: port enabled. Bit 0: port disabled.
+ */
+ u8 port_enabled_map;
struct qca8k_ports_config ports_config;
struct regmap *regmap;
struct mii_bus *bus;
- struct ar8xxx_port_status port_sts[QCA8K_NUM_PORTS];
struct dsa_switch *ds;
struct mutex reg_mutex;
struct device *dev;
- struct dsa_switch_ops ops;
struct gpio_desc *reset_gpio;
- unsigned int port_mtu[QCA8K_NUM_PORTS];
struct net_device *mgmt_master; /* Track if mdio/mib Ethernet is available */
struct qca8k_mgmt_eth_data mgmt_eth_data;
struct qca8k_mib_eth_data mib_eth_data;
diff --git a/drivers/net/eql.c b/drivers/net/eql.c
index 1111d1f33865..557ca8ff9dec 100644
--- a/drivers/net/eql.c
+++ b/drivers/net/eql.c
@@ -425,14 +425,13 @@ static int eql_enslave(struct net_device *master_dev, slaving_request_t __user *
if ((master_dev->flags & IFF_UP) == IFF_UP) {
/* slave is not a master & not already a slave: */
if (!eql_is_master(slave_dev) && !eql_is_slave(slave_dev)) {
- slave_t *s = kmalloc(sizeof(*s), GFP_KERNEL);
+ slave_t *s = kzalloc(sizeof(*s), GFP_KERNEL);
equalizer_t *eql = netdev_priv(master_dev);
int ret;
if (!s)
return -ENOMEM;
- memset(s, 0, sizeof(*s));
s->dev = slave_dev;
s->priority = srq.priority;
s->priority_bps = srq.priority;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
index 52b9833fda99..8bcda2cb3a2e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -40,6 +40,7 @@
#define AQ_CFG_RX_HDR_SIZE 256U
#define AQ_CFG_RX_PAGEORDER 0U
+#define AQ_CFG_XDP_PAGEORDER 2U
/* LRO */
#define AQ_CFG_IS_LRO_DEF 1U
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index a418238f6309..1daecd483b8d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -97,6 +97,15 @@ static const char * const aq_ethtool_queue_rx_stat_names[] = {
"%sQueue[%d] AllocFails",
"%sQueue[%d] SkbAllocFails",
"%sQueue[%d] Polls",
+ "%sQueue[%d] PageFlips",
+ "%sQueue[%d] PageReuses",
+ "%sQueue[%d] PageFrees",
+ "%sQueue[%d] XdpAbort",
+ "%sQueue[%d] XdpDrop",
+ "%sQueue[%d] XdpPass",
+ "%sQueue[%d] XdpTx",
+ "%sQueue[%d] XdpInvalid",
+ "%sQueue[%d] XdpRedirect",
};
static const char * const aq_ethtool_queue_tx_stat_names[] = {
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index e65ce7199dac..88595863d8bc 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -14,17 +14,22 @@
#include "aq_ptp.h"
#include "aq_filters.h"
#include "aq_hw_utils.h"
+#include "aq_vec.h"
#include <linux/netdevice.h>
#include <linux/module.h>
#include <linux/ip.h>
#include <linux/udp.h>
#include <net/pkt_cls.h>
+#include <linux/filter.h>
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR(AQ_CFG_DRV_AUTHOR);
MODULE_DESCRIPTION(AQ_CFG_DRV_DESC);
+DEFINE_STATIC_KEY_FALSE(aq_xdp_locking_key);
+EXPORT_SYMBOL(aq_xdp_locking_key);
+
static const char aq_ndev_driver_name[] = AQ_CFG_DRV_NAME;
static const struct net_device_ops aq_ndev_ops;
@@ -126,9 +131,19 @@ static netdev_tx_t aq_ndev_start_xmit(struct sk_buff *skb, struct net_device *nd
static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu)
{
+ int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN;
struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct bpf_prog *prog;
int err;
+ prog = READ_ONCE(aq_nic->xdp_prog);
+ if (prog && !prog->aux->xdp_has_frags &&
+ new_frame_size > AQ_CFG_RX_FRAME_MAX) {
+ netdev_err(ndev, "Illegal MTU %d for XDP prog without frags\n",
+ ndev->mtu);
+ return -EOPNOTSUPP;
+ }
+
err = aq_nic_set_mtu(aq_nic, new_mtu + ETH_HLEN);
if (err < 0)
@@ -204,6 +219,25 @@ err_exit:
return err;
}
+static netdev_features_t aq_ndev_fix_features(struct net_device *ndev,
+ netdev_features_t features)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct bpf_prog *prog;
+
+ if (!(features & NETIF_F_RXCSUM))
+ features &= ~NETIF_F_LRO;
+
+ prog = READ_ONCE(aq_nic->xdp_prog);
+ if (prog && !prog->aux->xdp_has_frags &&
+ aq_nic->xdp_prog && features & NETIF_F_LRO) {
+ netdev_err(ndev, "LRO is not supported with single buffer XDP, disabling\n");
+ features &= ~NETIF_F_LRO;
+ }
+
+ return features;
+}
+
static int aq_ndev_set_mac_address(struct net_device *ndev, void *addr)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
@@ -410,6 +444,56 @@ static int aq_ndo_setup_tc(struct net_device *dev, enum tc_setup_type type,
mqprio->qopt.prio_tc_map);
}
+static int aq_xdp_setup(struct net_device *ndev, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
+{
+ bool need_update, running = netif_running(ndev);
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct bpf_prog *old_prog;
+
+ if (prog && !prog->aux->xdp_has_frags) {
+ if (ndev->mtu > AQ_CFG_RX_FRAME_MAX) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "prog does not support XDP frags");
+ return -EOPNOTSUPP;
+ }
+
+ if (prog && ndev->features & NETIF_F_LRO) {
+ netdev_err(ndev,
+ "LRO is not supported with single buffer XDP, disabling\n");
+ ndev->features &= ~NETIF_F_LRO;
+ }
+ }
+
+ need_update = !!aq_nic->xdp_prog != !!prog;
+ if (running && need_update)
+ aq_ndev_close(ndev);
+
+ old_prog = xchg(&aq_nic->xdp_prog, prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ if (!old_prog && prog)
+ static_branch_inc(&aq_xdp_locking_key);
+ else if (old_prog && !prog)
+ static_branch_dec(&aq_xdp_locking_key);
+
+ if (running && need_update)
+ return aq_ndev_open(ndev);
+
+ return 0;
+}
+
+static int aq_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return aq_xdp_setup(dev, xdp->prog, xdp->extack);
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct net_device_ops aq_ndev_ops = {
.ndo_open = aq_ndev_open,
.ndo_stop = aq_ndev_close,
@@ -418,10 +502,13 @@ static const struct net_device_ops aq_ndev_ops = {
.ndo_change_mtu = aq_ndev_change_mtu,
.ndo_set_mac_address = aq_ndev_set_mac_address,
.ndo_set_features = aq_ndev_set_features,
+ .ndo_fix_features = aq_ndev_fix_features,
.ndo_eth_ioctl = aq_ndev_ioctl,
.ndo_vlan_rx_add_vid = aq_ndo_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = aq_ndo_vlan_rx_kill_vid,
.ndo_setup_tc = aq_ndo_setup_tc,
+ .ndo_bpf = aq_xdp,
+ .ndo_xdp_xmit = aq_xdp_xmit,
};
static int __init aq_ndev_init_module(void)
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.h b/drivers/net/ethernet/aquantia/atlantic/aq_main.h
index a5a624b9ce73..99870865f66d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.h
@@ -12,6 +12,8 @@
#include "aq_common.h"
#include "aq_nic.h"
+DECLARE_STATIC_KEY_FALSE(aq_xdp_locking_key);
+
void aq_ndev_schedule_work(struct work_struct *work);
struct net_device *aq_ndev_alloc(void);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 24d715c28a35..e11cc29d3264 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -569,6 +569,103 @@ err_exit:
return err;
}
+static unsigned int aq_nic_map_xdp(struct aq_nic_s *self,
+ struct xdp_frame *xdpf,
+ struct aq_ring_s *ring)
+{
+ struct device *dev = aq_nic_get_dev(self);
+ struct aq_ring_buff_s *first = NULL;
+ unsigned int dx = ring->sw_tail;
+ struct aq_ring_buff_s *dx_buff;
+ struct skb_shared_info *sinfo;
+ unsigned int frag_count = 0U;
+ unsigned int nr_frags = 0U;
+ unsigned int ret = 0U;
+ u16 total_len;
+
+ dx_buff = &ring->buff_ring[dx];
+ dx_buff->flags = 0U;
+
+ sinfo = xdp_get_shared_info_from_frame(xdpf);
+ total_len = xdpf->len;
+ dx_buff->len = total_len;
+ if (xdp_frame_has_frags(xdpf)) {
+ nr_frags = sinfo->nr_frags;
+ total_len += sinfo->xdp_frags_size;
+ }
+ dx_buff->pa = dma_map_single(dev, xdpf->data, dx_buff->len,
+ DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(dev, dx_buff->pa)))
+ goto exit;
+
+ first = dx_buff;
+ dx_buff->len_pkt = total_len;
+ dx_buff->is_sop = 1U;
+ dx_buff->is_mapped = 1U;
+ ++ret;
+
+ for (; nr_frags--; ++frag_count) {
+ skb_frag_t *frag = &sinfo->frags[frag_count];
+ unsigned int frag_len = skb_frag_size(frag);
+ unsigned int buff_offset = 0U;
+ unsigned int buff_size = 0U;
+ dma_addr_t frag_pa;
+
+ while (frag_len) {
+ if (frag_len > AQ_CFG_TX_FRAME_MAX)
+ buff_size = AQ_CFG_TX_FRAME_MAX;
+ else
+ buff_size = frag_len;
+
+ frag_pa = skb_frag_dma_map(dev, frag, buff_offset,
+ buff_size, DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(dev, frag_pa)))
+ goto mapping_error;
+
+ dx = aq_ring_next_dx(ring, dx);
+ dx_buff = &ring->buff_ring[dx];
+
+ dx_buff->flags = 0U;
+ dx_buff->len = buff_size;
+ dx_buff->pa = frag_pa;
+ dx_buff->is_mapped = 1U;
+ dx_buff->eop_index = 0xffffU;
+
+ frag_len -= buff_size;
+ buff_offset += buff_size;
+
+ ++ret;
+ }
+ }
+
+ first->eop_index = dx;
+ dx_buff->is_eop = 1U;
+ dx_buff->skb = NULL;
+ dx_buff->xdpf = xdpf;
+ goto exit;
+
+mapping_error:
+ for (dx = ring->sw_tail;
+ ret > 0;
+ --ret, dx = aq_ring_next_dx(ring, dx)) {
+ dx_buff = &ring->buff_ring[dx];
+
+ if (!dx_buff->pa)
+ continue;
+ if (unlikely(dx_buff->is_sop))
+ dma_unmap_single(dev, dx_buff->pa, dx_buff->len,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_page(dev, dx_buff->pa, dx_buff->len,
+ DMA_TO_DEVICE);
+ }
+
+exit:
+ return ret;
+}
+
unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
struct aq_ring_s *ring)
{
@@ -697,6 +794,7 @@ unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
first->eop_index = dx;
dx_buff->is_eop = 1U;
dx_buff->skb = skb;
+ dx_buff->xdpf = NULL;
goto exit;
mapping_error:
@@ -725,6 +823,44 @@ exit:
return ret;
}
+int aq_nic_xmit_xdpf(struct aq_nic_s *aq_nic, struct aq_ring_s *tx_ring,
+ struct xdp_frame *xdpf)
+{
+ u16 queue_index = AQ_NIC_RING2QMAP(aq_nic, tx_ring->idx);
+ struct net_device *ndev = aq_nic_get_ndev(aq_nic);
+ struct skb_shared_info *sinfo;
+ int cpu = smp_processor_id();
+ int err = NETDEV_TX_BUSY;
+ struct netdev_queue *nq;
+ unsigned int frags = 1;
+
+ if (xdp_frame_has_frags(xdpf)) {
+ sinfo = xdp_get_shared_info_from_frame(xdpf);
+ frags += sinfo->nr_frags;
+ }
+
+ if (frags > AQ_CFG_SKB_FRAGS_MAX)
+ return err;
+
+ nq = netdev_get_tx_queue(ndev, tx_ring->idx);
+ __netif_tx_lock(nq, cpu);
+
+ aq_ring_update_queue_state(tx_ring);
+
+ /* Above status update may stop the queue. Check this. */
+ if (__netif_subqueue_stopped(aq_nic_get_ndev(aq_nic), queue_index))
+ goto out;
+
+ frags = aq_nic_map_xdp(aq_nic, xdpf, tx_ring);
+ if (likely(frags))
+ err = aq_nic->aq_hw_ops->hw_ring_tx_xmit(aq_nic->aq_hw, tx_ring,
+ frags);
+out:
+ __netif_tx_unlock(nq);
+
+ return err;
+}
+
int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
{
struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(self);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index 1a7148041e3d..935ba889bd9a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -11,6 +11,8 @@
#define AQ_NIC_H
#include <linux/ethtool.h>
+#include <net/xdp.h>
+#include <linux/bpf.h>
#include "aq_common.h"
#include "aq_rss.h"
@@ -128,6 +130,7 @@ struct aq_nic_s {
struct aq_vec_s *aq_vec[AQ_CFG_VECS_MAX];
struct aq_ring_s *aq_ring_tx[AQ_HW_QUEUES_MAX];
struct aq_hw_s *aq_hw;
+ struct bpf_prog *xdp_prog;
struct net_device *ndev;
unsigned int aq_vecs;
unsigned int packet_filter;
@@ -177,6 +180,8 @@ void aq_nic_ndev_free(struct aq_nic_s *self);
int aq_nic_start(struct aq_nic_s *self);
unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
struct aq_ring_s *ring);
+int aq_nic_xmit_xdpf(struct aq_nic_s *aq_nic, struct aq_ring_s *tx_ring,
+ struct xdp_frame *xdpf);
int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb);
int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p);
int aq_nic_get_regs_count(struct aq_nic_s *self);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 77e76c9efd32..ea740210803f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -7,15 +7,37 @@
/* File aq_ring.c: Definition of functions for Rx/Tx rings. */
-#include "aq_ring.h"
#include "aq_nic.h"
#include "aq_hw.h"
#include "aq_hw_utils.h"
#include "aq_ptp.h"
+#include "aq_vec.h"
+#include "aq_main.h"
+#include <net/xdp.h>
+#include <linux/filter.h>
+#include <linux/bpf_trace.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+static void aq_get_rxpages_xdp(struct aq_ring_buff_s *buff,
+ struct xdp_buff *xdp)
+{
+ struct skb_shared_info *sinfo;
+ int i;
+
+ if (xdp_buff_has_frags(xdp)) {
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+
+ for (i = 0; i < sinfo->nr_frags; i++) {
+ skb_frag_t *frag = &sinfo->frags[i];
+
+ page_ref_inc(skb_frag_page(frag));
+ }
+ }
+ page_ref_inc(buff->rxdata.page);
+}
+
static inline void aq_free_rxpage(struct aq_rxpage *rxpage, struct device *dev)
{
unsigned int len = PAGE_SIZE << rxpage->order;
@@ -27,9 +49,10 @@ static inline void aq_free_rxpage(struct aq_rxpage *rxpage, struct device *dev)
rxpage->page = NULL;
}
-static int aq_get_rxpage(struct aq_rxpage *rxpage, unsigned int order,
- struct device *dev)
+static int aq_alloc_rxpages(struct aq_rxpage *rxpage, struct aq_ring_s *rx_ring)
{
+ struct device *dev = aq_nic_get_dev(rx_ring->aq_nic);
+ unsigned int order = rx_ring->page_order;
struct page *page;
int ret = -ENOMEM;
dma_addr_t daddr;
@@ -47,7 +70,7 @@ static int aq_get_rxpage(struct aq_rxpage *rxpage, unsigned int order,
rxpage->page = page;
rxpage->daddr = daddr;
rxpage->order = order;
- rxpage->pg_off = 0;
+ rxpage->pg_off = rx_ring->page_offset;
return 0;
@@ -58,21 +81,26 @@ err_exit:
return ret;
}
-static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf,
- int order)
+static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf)
{
+ unsigned int order = self->page_order;
+ u16 page_offset = self->page_offset;
+ u16 frame_max = self->frame_max;
+ u16 tail_size = self->tail_size;
int ret;
if (rxbuf->rxdata.page) {
/* One means ring is the only user and can reuse */
if (page_ref_count(rxbuf->rxdata.page) > 1) {
/* Try reuse buffer */
- rxbuf->rxdata.pg_off += AQ_CFG_RX_FRAME_MAX;
- if (rxbuf->rxdata.pg_off + AQ_CFG_RX_FRAME_MAX <=
- (PAGE_SIZE << order)) {
+ rxbuf->rxdata.pg_off += frame_max + page_offset +
+ tail_size;
+ if (rxbuf->rxdata.pg_off + frame_max + tail_size <=
+ (PAGE_SIZE << order)) {
u64_stats_update_begin(&self->stats.rx.syncp);
self->stats.rx.pg_flips++;
u64_stats_update_end(&self->stats.rx.syncp);
+
} else {
/* Buffer exhausted. We have other users and
* should release this page and realloc
@@ -84,7 +112,7 @@ static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf,
u64_stats_update_end(&self->stats.rx.syncp);
}
} else {
- rxbuf->rxdata.pg_off = 0;
+ rxbuf->rxdata.pg_off = page_offset;
u64_stats_update_begin(&self->stats.rx.syncp);
self->stats.rx.pg_reuses++;
u64_stats_update_end(&self->stats.rx.syncp);
@@ -92,8 +120,7 @@ static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf,
}
if (!rxbuf->rxdata.page) {
- ret = aq_get_rxpage(&rxbuf->rxdata, order,
- aq_nic_get_dev(self->aq_nic));
+ ret = aq_alloc_rxpages(&rxbuf->rxdata, self);
if (ret) {
u64_stats_update_begin(&self->stats.rx.syncp);
self->stats.rx.alloc_fails++;
@@ -117,6 +144,7 @@ static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self,
err = -ENOMEM;
goto err_exit;
}
+
self->dx_ring = dma_alloc_coherent(aq_nic_get_dev(aq_nic),
self->size * self->dx_size,
&self->dx_ring_pa, GFP_KERNEL);
@@ -172,11 +200,22 @@ struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
self->idx = idx;
self->size = aq_nic_cfg->rxds;
self->dx_size = aq_nic_cfg->aq_hw_caps->rxd_size;
- self->page_order = fls(AQ_CFG_RX_FRAME_MAX / PAGE_SIZE +
- (AQ_CFG_RX_FRAME_MAX % PAGE_SIZE ? 1 : 0)) - 1;
-
- if (aq_nic_cfg->rxpageorder > self->page_order)
- self->page_order = aq_nic_cfg->rxpageorder;
+ self->xdp_prog = aq_nic->xdp_prog;
+ self->frame_max = AQ_CFG_RX_FRAME_MAX;
+
+ /* Only order-2 is allowed if XDP is enabled */
+ if (READ_ONCE(self->xdp_prog)) {
+ self->page_offset = AQ_XDP_HEADROOM;
+ self->page_order = AQ_CFG_XDP_PAGEORDER;
+ self->tail_size = AQ_XDP_TAILROOM;
+ } else {
+ self->page_offset = 0;
+ self->page_order = fls(self->frame_max / PAGE_SIZE +
+ (self->frame_max % PAGE_SIZE ? 1 : 0)) - 1;
+ if (aq_nic_cfg->rxpageorder > self->page_order)
+ self->page_order = aq_nic_cfg->rxpageorder;
+ self->tail_size = 0;
+ }
self = aq_ring_alloc(self, aq_nic);
if (!self) {
@@ -298,15 +337,26 @@ bool aq_ring_tx_clean(struct aq_ring_s *self)
}
}
- if (unlikely(buff->is_eop && buff->skb)) {
+ if (likely(!buff->is_eop))
+ goto out;
+
+ if (buff->skb) {
u64_stats_update_begin(&self->stats.tx.syncp);
++self->stats.tx.packets;
self->stats.tx.bytes += buff->skb->len;
u64_stats_update_end(&self->stats.tx.syncp);
-
dev_kfree_skb_any(buff->skb);
- buff->skb = NULL;
+ } else if (buff->xdpf) {
+ u64_stats_update_begin(&self->stats.tx.syncp);
+ ++self->stats.tx.packets;
+ self->stats.tx.bytes += xdp_get_frame_len(buff->xdpf);
+ u64_stats_update_end(&self->stats.tx.syncp);
+ xdp_return_frame_rx_napi(buff->xdpf);
}
+
+out:
+ buff->skb = NULL;
+ buff->xdpf = NULL;
buff->pa = 0U;
buff->eop_index = 0xffffU;
self->sw_head = aq_ring_next_dx(self, self->sw_head);
@@ -339,11 +389,159 @@ static void aq_rx_checksum(struct aq_ring_s *self,
__skb_incr_checksum_unnecessary(skb);
}
-#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
-int aq_ring_rx_clean(struct aq_ring_s *self,
- struct napi_struct *napi,
- int *work_done,
- int budget)
+int aq_xdp_xmit(struct net_device *dev, int num_frames,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(dev);
+ unsigned int vec, i, drop = 0;
+ int cpu = smp_processor_id();
+ struct aq_nic_cfg_s *aq_cfg;
+ struct aq_ring_s *ring;
+
+ aq_cfg = aq_nic_get_cfg(aq_nic);
+ vec = cpu % aq_cfg->vecs;
+ ring = aq_nic->aq_ring_tx[AQ_NIC_CFG_TCVEC2RING(aq_cfg, 0, vec)];
+
+ for (i = 0; i < num_frames; i++) {
+ struct xdp_frame *xdpf = frames[i];
+
+ if (aq_nic_xmit_xdpf(aq_nic, ring, xdpf) == NETDEV_TX_BUSY)
+ drop++;
+ }
+
+ return num_frames - drop;
+}
+
+static struct sk_buff *aq_xdp_run_prog(struct aq_nic_s *aq_nic,
+ struct xdp_buff *xdp,
+ struct aq_ring_s *rx_ring,
+ struct aq_ring_buff_s *buff)
+{
+ int result = NETDEV_TX_BUSY;
+ struct aq_ring_s *tx_ring;
+ struct xdp_frame *xdpf;
+ struct bpf_prog *prog;
+ u32 act = XDP_ABORTED;
+ struct sk_buff *skb;
+
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.packets;
+ rx_ring->stats.rx.bytes += xdp_get_buff_len(xdp);
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+
+ prog = READ_ONCE(rx_ring->xdp_prog);
+ if (!prog)
+ goto pass;
+
+ prefetchw(xdp->data_hard_start); /* xdp_frame write */
+
+ /* single buffer XDP program, but packet is multi buffer, aborted */
+ if (xdp_buff_has_frags(xdp) && !prog->aux->xdp_has_frags)
+ goto out_aborted;
+
+ act = bpf_prog_run_xdp(prog, xdp);
+ switch (act) {
+ case XDP_PASS:
+pass:
+ xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf))
+ goto out_aborted;
+ skb = xdp_build_skb_from_frame(xdpf, aq_nic->ndev);
+ if (!skb)
+ goto out_aborted;
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.xdp_pass;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ aq_get_rxpages_xdp(buff, xdp);
+ return skb;
+ case XDP_TX:
+ xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf))
+ goto out_aborted;
+ tx_ring = aq_nic->aq_ring_tx[rx_ring->idx];
+ result = aq_nic_xmit_xdpf(aq_nic, tx_ring, xdpf);
+ if (result == NETDEV_TX_BUSY)
+ goto out_aborted;
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.xdp_tx;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ aq_get_rxpages_xdp(buff, xdp);
+ break;
+ case XDP_REDIRECT:
+ if (xdp_do_redirect(aq_nic->ndev, xdp, prog) < 0)
+ goto out_aborted;
+ xdp_do_flush();
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.xdp_redirect;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ aq_get_rxpages_xdp(buff, xdp);
+ break;
+ default:
+ fallthrough;
+ case XDP_ABORTED:
+out_aborted:
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.xdp_aborted;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ trace_xdp_exception(aq_nic->ndev, prog, act);
+ bpf_warn_invalid_xdp_action(aq_nic->ndev, prog, act);
+ break;
+ case XDP_DROP:
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.xdp_drop;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ break;
+ }
+
+ return ERR_PTR(-result);
+}
+
+static bool aq_add_rx_fragment(struct device *dev,
+ struct aq_ring_s *ring,
+ struct aq_ring_buff_s *buff,
+ struct xdp_buff *xdp)
+{
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
+ struct aq_ring_buff_s *buff_ = buff;
+
+ memset(sinfo, 0, sizeof(*sinfo));
+ do {
+ skb_frag_t *frag;
+
+ if (unlikely(sinfo->nr_frags >= MAX_SKB_FRAGS))
+ return true;
+
+ frag = &sinfo->frags[sinfo->nr_frags++];
+ buff_ = &ring->buff_ring[buff_->next];
+ dma_sync_single_range_for_cpu(dev,
+ buff_->rxdata.daddr,
+ buff_->rxdata.pg_off,
+ buff_->len,
+ DMA_FROM_DEVICE);
+ skb_frag_off_set(frag, buff_->rxdata.pg_off);
+ skb_frag_size_set(frag, buff_->len);
+ sinfo->xdp_frags_size += buff_->len;
+ __skb_frag_set_page(frag, buff_->rxdata.page);
+
+ buff_->is_cleaned = 1;
+
+ buff->is_ip_cso &= buff_->is_ip_cso;
+ buff->is_udp_cso &= buff_->is_udp_cso;
+ buff->is_tcp_cso &= buff_->is_tcp_cso;
+ buff->is_cso_err |= buff_->is_cso_err;
+
+ if (page_is_pfmemalloc(buff_->rxdata.page))
+ xdp_buff_set_frag_pfmemalloc(xdp);
+
+ } while (!buff_->is_eop);
+
+ xdp_buff_set_frags_flag(xdp);
+
+ return false;
+}
+
+static int __aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi,
+ int *work_done, int budget)
{
struct net_device *ndev = aq_nic_get_ndev(self->aq_nic);
bool is_rsc_completed = true;
@@ -449,7 +647,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
skb_add_rx_frag(skb, 0, buff->rxdata.page,
buff->rxdata.pg_off + hdr_len,
buff->len - hdr_len,
- AQ_CFG_RX_FRAME_MAX);
+ self->frame_max);
page_ref_inc(buff->rxdata.page);
}
@@ -469,7 +667,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
buff_->rxdata.page,
buff_->rxdata.pg_off,
buff_->len,
- AQ_CFG_RX_FRAME_MAX);
+ self->frame_max);
page_ref_inc(buff_->rxdata.page);
buff_->is_cleaned = 1;
@@ -510,6 +708,149 @@ err_exit:
return err;
}
+static int __aq_ring_xdp_clean(struct aq_ring_s *rx_ring,
+ struct napi_struct *napi, int *work_done,
+ int budget)
+{
+ int frame_sz = rx_ring->page_offset + rx_ring->frame_max +
+ rx_ring->tail_size;
+ struct aq_nic_s *aq_nic = rx_ring->aq_nic;
+ bool is_rsc_completed = true;
+ struct device *dev;
+ int err = 0;
+
+ dev = aq_nic_get_dev(aq_nic);
+ for (; (rx_ring->sw_head != rx_ring->hw_head) && budget;
+ rx_ring->sw_head = aq_ring_next_dx(rx_ring, rx_ring->sw_head),
+ --budget, ++(*work_done)) {
+ struct aq_ring_buff_s *buff = &rx_ring->buff_ring[rx_ring->sw_head];
+ bool is_ptp_ring = aq_ptp_ring(rx_ring->aq_nic, rx_ring);
+ struct aq_ring_buff_s *buff_ = NULL;
+ struct sk_buff *skb = NULL;
+ unsigned int next_ = 0U;
+ struct xdp_buff xdp;
+ void *hard_start;
+
+ if (buff->is_cleaned)
+ continue;
+
+ if (!buff->is_eop) {
+ buff_ = buff;
+ do {
+ if (buff_->next >= rx_ring->size) {
+ err = -EIO;
+ goto err_exit;
+ }
+ next_ = buff_->next;
+ buff_ = &rx_ring->buff_ring[next_];
+ is_rsc_completed =
+ aq_ring_dx_in_range(rx_ring->sw_head,
+ next_,
+ rx_ring->hw_head);
+
+ if (unlikely(!is_rsc_completed))
+ break;
+
+ buff->is_error |= buff_->is_error;
+ buff->is_cso_err |= buff_->is_cso_err;
+ } while (!buff_->is_eop);
+
+ if (!is_rsc_completed) {
+ err = 0;
+ goto err_exit;
+ }
+ if (buff->is_error ||
+ (buff->is_lro && buff->is_cso_err)) {
+ buff_ = buff;
+ do {
+ if (buff_->next >= rx_ring->size) {
+ err = -EIO;
+ goto err_exit;
+ }
+ next_ = buff_->next;
+ buff_ = &rx_ring->buff_ring[next_];
+
+ buff_->is_cleaned = true;
+ } while (!buff_->is_eop);
+
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.errors;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ continue;
+ }
+ }
+
+ if (buff->is_error) {
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.errors;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ continue;
+ }
+
+ dma_sync_single_range_for_cpu(dev,
+ buff->rxdata.daddr,
+ buff->rxdata.pg_off,
+ buff->len, DMA_FROM_DEVICE);
+ hard_start = page_address(buff->rxdata.page) +
+ buff->rxdata.pg_off - rx_ring->page_offset;
+
+ if (is_ptp_ring)
+ buff->len -=
+ aq_ptp_extract_ts(rx_ring->aq_nic, skb,
+ aq_buf_vaddr(&buff->rxdata),
+ buff->len);
+
+ xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
+ xdp_prepare_buff(&xdp, hard_start, rx_ring->page_offset,
+ buff->len, false);
+ if (!buff->is_eop) {
+ if (aq_add_rx_fragment(dev, rx_ring, buff, &xdp)) {
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.packets;
+ rx_ring->stats.rx.bytes += xdp_get_buff_len(&xdp);
+ ++rx_ring->stats.rx.xdp_aborted;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ continue;
+ }
+ }
+
+ skb = aq_xdp_run_prog(aq_nic, &xdp, rx_ring, buff);
+ if (IS_ERR(skb) || !skb)
+ continue;
+
+ if (buff->is_vlan)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ buff->vlan_rx_tag);
+
+ aq_rx_checksum(rx_ring, buff, skb);
+
+ skb_set_hash(skb, buff->rss_hash,
+ buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
+ PKT_HASH_TYPE_NONE);
+ /* Send all PTP traffic to 0 queue */
+ skb_record_rx_queue(skb,
+ is_ptp_ring ? 0
+ : AQ_NIC_RING2QMAP(rx_ring->aq_nic,
+ rx_ring->idx));
+
+ napi_gro_receive(napi, skb);
+ }
+
+err_exit:
+ return err;
+}
+
+int aq_ring_rx_clean(struct aq_ring_s *self,
+ struct napi_struct *napi,
+ int *work_done,
+ int budget)
+{
+ if (static_branch_unlikely(&aq_xdp_locking_key))
+ return __aq_ring_xdp_clean(self, napi, work_done, budget);
+ else
+ return __aq_ring_rx_clean(self, napi, work_done, budget);
+}
+
void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic)
{
#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
@@ -529,7 +870,6 @@ void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic)
int aq_ring_rx_fill(struct aq_ring_s *self)
{
- unsigned int page_order = self->page_order;
struct aq_ring_buff_s *buff = NULL;
int err = 0;
int i = 0;
@@ -543,9 +883,9 @@ int aq_ring_rx_fill(struct aq_ring_s *self)
buff = &self->buff_ring[self->sw_tail];
buff->flags = 0U;
- buff->len = AQ_CFG_RX_FRAME_MAX;
+ buff->len = self->frame_max;
- err = aq_get_rxpages(self, buff, page_order);
+ err = aq_get_rxpages(self, buff);
if (err)
goto err_exit;
@@ -600,6 +940,15 @@ unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data)
data[++count] = self->stats.rx.alloc_fails;
data[++count] = self->stats.rx.skb_alloc_fails;
data[++count] = self->stats.rx.polls;
+ data[++count] = self->stats.rx.pg_flips;
+ data[++count] = self->stats.rx.pg_reuses;
+ data[++count] = self->stats.rx.pg_losts;
+ data[++count] = self->stats.rx.xdp_aborted;
+ data[++count] = self->stats.rx.xdp_drop;
+ data[++count] = self->stats.rx.xdp_pass;
+ data[++count] = self->stats.rx.xdp_tx;
+ data[++count] = self->stats.rx.xdp_invalid;
+ data[++count] = self->stats.rx.xdp_redirect;
} while (u64_stats_fetch_retry_irq(&self->stats.rx.syncp, start));
} else {
/* This data should mimic aq_ethtool_queue_tx_stat_names structure */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
index 93659e58f1ce..0a6c34438c1d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
@@ -11,6 +11,10 @@
#define AQ_RING_H
#include "aq_common.h"
+#include "aq_vec.h"
+
+#define AQ_XDP_HEADROOM ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8)
+#define AQ_XDP_TAILROOM SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
struct page;
struct aq_nic_cfg_s;
@@ -51,6 +55,7 @@ struct __packed aq_ring_buff_s {
struct {
dma_addr_t pa_eop;
struct sk_buff *skb;
+ struct xdp_frame *xdpf;
};
/* TxC */
struct {
@@ -101,6 +106,12 @@ struct aq_ring_stats_rx_s {
u64 pg_losts;
u64 pg_flips;
u64 pg_reuses;
+ u64 xdp_aborted;
+ u64 xdp_drop;
+ u64 xdp_pass;
+ u64 xdp_tx;
+ u64 xdp_invalid;
+ u64 xdp_redirect;
};
struct aq_ring_stats_tx_s {
@@ -132,10 +143,15 @@ struct aq_ring_s {
unsigned int size; /* descriptors number */
unsigned int dx_size; /* TX or RX descriptor size, */
/* stored here for fater math */
- unsigned int page_order;
+ u16 page_order;
+ u16 page_offset;
+ u16 frame_max;
+ u16 tail_size;
union aq_ring_stats_s stats;
dma_addr_t dx_ring_pa;
+ struct bpf_prog *xdp_prog;
enum atl_ring_type ring_type;
+ struct xdp_rxq_info xdp_rxq;
};
struct aq_ring_param_s {
@@ -175,6 +191,7 @@ struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
struct aq_nic_s *aq_nic,
unsigned int idx,
struct aq_nic_cfg_s *aq_nic_cfg);
+
int aq_ring_init(struct aq_ring_s *self, const enum atl_ring_type ring_type);
void aq_ring_rx_deinit(struct aq_ring_s *self);
void aq_ring_free(struct aq_ring_s *self);
@@ -182,6 +199,8 @@ void aq_ring_update_queue_state(struct aq_ring_s *ring);
void aq_ring_queue_wake(struct aq_ring_s *ring);
void aq_ring_queue_stop(struct aq_ring_s *ring);
bool aq_ring_tx_clean(struct aq_ring_s *self);
+int aq_xdp_xmit(struct net_device *dev, int num_frames,
+ struct xdp_frame **frames, u32 flags);
int aq_ring_rx_clean(struct aq_ring_s *self,
struct napi_struct *napi,
int *work_done,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
index 6ab1f3212d24..e839e1002ec7 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -10,11 +10,6 @@
*/
#include "aq_vec.h"
-#include "aq_nic.h"
-#include "aq_ring.h"
-#include "aq_hw.h"
-
-#include <linux/netdevice.h>
struct aq_vec_s {
const struct aq_hw_ops *aq_hw_ops;
@@ -153,9 +148,23 @@ int aq_vec_ring_alloc(struct aq_vec_s *self, struct aq_nic_s *aq_nic,
aq_nic_set_tx_ring(aq_nic, idx_ring, ring);
+ if (xdp_rxq_info_reg(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq,
+ aq_nic->ndev, idx,
+ self->napi.napi_id) < 0) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+ if (xdp_rxq_info_reg_mem_model(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq,
+ MEM_TYPE_PAGE_SHARED, NULL) < 0) {
+ xdp_rxq_info_unreg(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq);
+ err = -ENOMEM;
+ goto err_exit;
+ }
+
ring = aq_ring_rx_alloc(&self->ring[i][AQ_VEC_RX_ID], aq_nic,
idx_ring, aq_nic_cfg);
if (!ring) {
+ xdp_rxq_info_unreg(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq);
err = -ENOMEM;
goto err_exit;
}
@@ -300,8 +309,10 @@ void aq_vec_ring_free(struct aq_vec_s *self)
for (i = 0U; self->tx_rings > i; ++i) {
ring = self->ring[i];
aq_ring_free(&ring[AQ_VEC_TX_ID]);
- if (i < self->rx_rings)
+ if (i < self->rx_rings) {
+ xdp_rxq_info_unreg(&ring[AQ_VEC_RX_ID].xdp_rxq);
aq_ring_free(&ring[AQ_VEC_RX_ID]);
+ }
}
self->tx_rings = 0;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.h b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h
index 567f3d4b79a2..78fac609b71d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h
@@ -13,7 +13,13 @@
#define AQ_VEC_H
#include "aq_common.h"
+#include "aq_nic.h"
+#include "aq_ring.h"
+#include "aq_hw.h"
+
#include <linux/irqreturn.h>
+#include <linux/filter.h>
+#include <linux/netdevice.h>
struct aq_hw_s;
struct aq_hw_ops;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index 4625ccb79499..9dfd68f0fda9 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -531,7 +531,7 @@ static int hw_atl_a0_hw_ring_rx_init(struct aq_hw_s *self,
hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
hw_atl_rdm_rx_desc_data_buff_size_set(self,
- AQ_CFG_RX_FRAME_MAX / 1024U,
+ aq_ring->frame_max / 1024U,
aq_ring->idx);
hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
@@ -706,9 +706,9 @@ static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self,
if (HW_ATL_A0_RXD_WB_STAT2_EOP & rxd_wb->status) {
buff->len = rxd_wb->pkt_len %
- AQ_CFG_RX_FRAME_MAX;
+ ring->frame_max;
buff->len = buff->len ?
- buff->len : AQ_CFG_RX_FRAME_MAX;
+ buff->len : ring->frame_max;
buff->next = 0U;
buff->is_eop = 1U;
} else {
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index d875ce3ec759..878a53abec33 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -766,7 +766,7 @@ int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
hw_atl_rdm_rx_desc_data_buff_size_set(self,
- AQ_CFG_RX_FRAME_MAX / 1024U,
+ aq_ring->frame_max / 1024U,
aq_ring->idx);
hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
@@ -969,15 +969,15 @@ int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, struct aq_ring_s *ring)
rxd_wb->status);
if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) {
buff->len = rxd_wb->pkt_len %
- AQ_CFG_RX_FRAME_MAX;
+ ring->frame_max;
buff->len = buff->len ?
- buff->len : AQ_CFG_RX_FRAME_MAX;
+ buff->len : ring->frame_max;
buff->next = 0U;
buff->is_eop = 1U;
} else {
buff->len =
- rxd_wb->pkt_len > AQ_CFG_RX_FRAME_MAX ?
- AQ_CFG_RX_FRAME_MAX : rxd_wb->pkt_len;
+ rxd_wb->pkt_len > ring->frame_max ?
+ ring->frame_max : rxd_wb->pkt_len;
if (buff->is_lro) {
/* LRO */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 5caa75b41b73..4e9215bce4ad 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -2212,7 +2212,7 @@
* MAC DA 2. The reset default is set to mask out all parameters.
*/
#define NIG_REG_P0_LLH_PTP_PARAM_MASK 0x187a0
-/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set
+/* [RW 14] Mask register for the rules used in detecting PTP packets. Set
* each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
* 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
* 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
@@ -2381,7 +2381,7 @@
* MAC DA 2. The reset default is set to mask out all parameters.
*/
#define NIG_REG_P1_LLH_PTP_PARAM_MASK 0x187c8
-/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set
+/* [RW 14] Mask register for the rules used in detecting PTP packets. Set
* each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
* 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
* 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
@@ -2493,7 +2493,7 @@
* MAC DA 2. The reset default is set to mask out all parameters.
*/
#define NIG_REG_P0_TLLH_PTP_PARAM_MASK 0x187f0
-/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set
+/* [RW 14] Mask register for the rules used in detecting PTP packets. Set
* each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
* 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
* 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
@@ -2529,7 +2529,7 @@
* MAC DA 2. The reset default is set to mask out all parameters.
*/
#define NIG_REG_P1_TLLH_PTP_PARAM_MASK 0x187f8
-/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set
+/* [RW 14] Mask register for the rules used in detecting PTP packets. Set
* each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
* 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
* 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
@@ -6218,7 +6218,7 @@
#define AEU_INPUTS_ATTN_BITS_GPIO0_FUNCTION_0 (0x1<<2)
#define AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR (0x1<<12)
#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY (0x1<<28)
-#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY (0x1<<31)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY (0x1U<<31)
#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY (0x1<<29)
#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY (0x1<<30)
#define AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT (0x1<<15)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 874fad0a5cf8..0489c1c2e7dd 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -56,6 +56,7 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <net/page_pool.h>
+#include <linux/align.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
@@ -738,7 +739,6 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
page_pool_recycle_direct(rxr->page_pool, page);
return NULL;
}
- *mapping += bp->rx_dma_offset;
return page;
}
@@ -780,6 +780,7 @@ int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
if (!page)
return -ENOMEM;
+ mapping += bp->rx_dma_offset;
rx_buf->data = page;
rx_buf->data_ptr = page_address(page) + bp->rx_offset;
} else {
@@ -840,33 +841,41 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
u16 sw_prod = rxr->rx_sw_agg_prod;
unsigned int offset = 0;
- if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
- page = rxr->rx_page;
- if (!page) {
+ if (BNXT_RX_PAGE_MODE(bp)) {
+ page = __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
+
+ if (!page)
+ return -ENOMEM;
+
+ } else {
+ if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
+ page = rxr->rx_page;
+ if (!page) {
+ page = alloc_page(gfp);
+ if (!page)
+ return -ENOMEM;
+ rxr->rx_page = page;
+ rxr->rx_page_offset = 0;
+ }
+ offset = rxr->rx_page_offset;
+ rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
+ if (rxr->rx_page_offset == PAGE_SIZE)
+ rxr->rx_page = NULL;
+ else
+ get_page(page);
+ } else {
page = alloc_page(gfp);
if (!page)
return -ENOMEM;
- rxr->rx_page = page;
- rxr->rx_page_offset = 0;
}
- offset = rxr->rx_page_offset;
- rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
- if (rxr->rx_page_offset == PAGE_SIZE)
- rxr->rx_page = NULL;
- else
- get_page(page);
- } else {
- page = alloc_page(gfp);
- if (!page)
- return -ENOMEM;
- }
- mapping = dma_map_page_attrs(&pdev->dev, page, offset,
- BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
- DMA_ATTR_WEAK_ORDERING);
- if (dma_mapping_error(&pdev->dev, mapping)) {
- __free_page(page);
- return -EIO;
+ mapping = dma_map_page_attrs(&pdev->dev, page, offset,
+ BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
+ DMA_ATTR_WEAK_ORDERING);
+ if (dma_mapping_error(&pdev->dev, mapping)) {
+ __free_page(page);
+ return -EIO;
+ }
}
if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
@@ -962,6 +971,39 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
rxr->rx_sw_agg_prod = sw_prod;
}
+static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr,
+ u16 cons, void *data, u8 *data_ptr,
+ dma_addr_t dma_addr,
+ unsigned int offset_and_len)
+{
+ unsigned int len = offset_and_len & 0xffff;
+ struct page *page = data;
+ u16 prod = rxr->rx_prod;
+ struct sk_buff *skb;
+ int err;
+
+ err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
+ if (unlikely(err)) {
+ bnxt_reuse_rx_data(rxr, cons, data);
+ return NULL;
+ }
+ dma_addr -= bp->rx_dma_offset;
+ dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
+ DMA_ATTR_WEAK_ORDERING);
+ skb = build_skb(page_address(page), BNXT_PAGE_MODE_BUF_SIZE +
+ bp->rx_dma_offset);
+ if (!skb) {
+ __free_page(page);
+ return NULL;
+ }
+ skb_mark_for_recycle(skb);
+ skb_reserve(skb, bp->rx_dma_offset);
+ __skb_put(skb, len);
+
+ return skb;
+}
+
static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr,
u16 cons, void *data, u8 *data_ptr,
@@ -984,7 +1026,6 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
dma_addr -= bp->rx_dma_offset;
dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
- page_pool_release_page(rxr->page_pool, page);
if (unlikely(!payload))
payload = eth_get_headlen(bp->dev, data_ptr, len);
@@ -995,6 +1036,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
return NULL;
}
+ skb_mark_for_recycle(skb);
off = (void *)data_ptr - page_address(page);
skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
@@ -1038,22 +1080,24 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
return skb;
}
-static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
- struct bnxt_cp_ring_info *cpr,
- struct sk_buff *skb, u16 idx,
- u32 agg_bufs, bool tpa)
+static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr,
+ struct skb_shared_info *shinfo,
+ u16 idx, u32 agg_bufs, bool tpa,
+ struct xdp_buff *xdp)
{
struct bnxt_napi *bnapi = cpr->bnapi;
struct pci_dev *pdev = bp->pdev;
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
u16 prod = rxr->rx_agg_prod;
+ u32 i, total_frag_len = 0;
bool p5_tpa = false;
- u32 i;
if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
p5_tpa = true;
for (i = 0; i < agg_bufs; i++) {
+ skb_frag_t *frag = &shinfo->frags[i];
u16 cons, frag_len;
struct rx_agg_cmp *agg;
struct bnxt_sw_rx_agg_bd *cons_rx_buf;
@@ -1069,8 +1113,10 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
cons_rx_buf = &rxr->rx_agg_ring[cons];
- skb_fill_page_desc(skb, i, cons_rx_buf->page,
- cons_rx_buf->offset, frag_len);
+ skb_frag_off_set(frag, cons_rx_buf->offset);
+ skb_frag_size_set(frag, frag_len);
+ __skb_frag_set_page(frag, cons_rx_buf->page);
+ shinfo->nr_frags = i + 1;
__clear_bit(cons, rxr->rx_agg_bmap);
/* It is possible for bnxt_alloc_rx_page() to allocate
@@ -1081,16 +1127,14 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
page = cons_rx_buf->page;
cons_rx_buf->page = NULL;
+ if (xdp && page_is_pfmemalloc(page))
+ xdp_buff_set_frag_pfmemalloc(xdp);
+
if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
- struct skb_shared_info *shinfo;
unsigned int nr_frags;
- shinfo = skb_shinfo(skb);
nr_frags = --shinfo->nr_frags;
__skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
-
- dev_kfree_skb(skb);
-
cons_rx_buf->page = page;
/* Update prod since possibly some pages have been
@@ -1098,23 +1142,62 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
*/
rxr->rx_agg_prod = prod;
bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
- return NULL;
+ return 0;
}
dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
- DMA_FROM_DEVICE,
+ bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
- skb->data_len += frag_len;
- skb->len += frag_len;
- skb->truesize += PAGE_SIZE;
-
+ total_frag_len += frag_len;
prod = NEXT_RX_AGG(prod);
}
rxr->rx_agg_prod = prod;
+ return total_frag_len;
+}
+
+static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr,
+ struct sk_buff *skb, u16 idx,
+ u32 agg_bufs, bool tpa)
+{
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+ u32 total_frag_len = 0;
+
+ total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx,
+ agg_bufs, tpa, NULL);
+ if (!total_frag_len) {
+ dev_kfree_skb(skb);
+ return NULL;
+ }
+
+ skb->data_len += total_frag_len;
+ skb->len += total_frag_len;
+ skb->truesize += PAGE_SIZE * agg_bufs;
return skb;
}
+static u32 bnxt_rx_agg_pages_xdp(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr,
+ struct xdp_buff *xdp, u16 idx,
+ u32 agg_bufs, bool tpa)
+{
+ struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp);
+ u32 total_frag_len = 0;
+
+ if (!xdp_buff_has_frags(xdp))
+ shinfo->nr_frags = 0;
+
+ total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo,
+ idx, agg_bufs, tpa, xdp);
+ if (total_frag_len) {
+ xdp_buff_set_frags_flag(xdp);
+ shinfo->nr_frags = agg_bufs;
+ shinfo->xdp_frags_size = total_frag_len;
+ }
+ return total_frag_len;
+}
+
static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
u8 agg_bufs, u32 *raw_cons)
{
@@ -1644,7 +1727,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
}
if (agg_bufs) {
- skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true);
+ skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true);
if (!skb) {
/* Page reuse already handled by bnxt_rx_pages(). */
cpr->sw_stats.rx.rx_oom_discards += 1;
@@ -1729,8 +1812,10 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
struct bnxt_sw_rx_bd *rx_buf;
unsigned int len;
u8 *data_ptr, agg_bufs, cmp_type;
+ bool xdp_active = false;
dma_addr_t dma_addr;
struct sk_buff *skb;
+ struct xdp_buff xdp;
u32 flags, misc;
void *data;
int rc = 0;
@@ -1839,18 +1924,39 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
len = flags >> RX_CMP_LEN_SHIFT;
dma_addr = rx_buf->mapping;
- if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
- rc = 1;
- goto next_rx;
+ if (bnxt_xdp_attached(bp, rxr)) {
+ bnxt_xdp_buff_init(bp, rxr, cons, &data_ptr, &len, &xdp);
+ if (agg_bufs) {
+ u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp,
+ cp_cons, agg_bufs,
+ false);
+ if (!frag_len) {
+ cpr->sw_stats.rx.rx_oom_discards += 1;
+ rc = -ENOMEM;
+ goto next_rx;
+ }
+ }
+ xdp_active = true;
+ }
+
+ if (xdp_active) {
+ if (bnxt_rx_xdp(bp, rxr, cons, xdp, data, &len, event)) {
+ rc = 1;
+ goto next_rx;
+ }
}
if (len <= bp->rx_copy_thresh) {
skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
bnxt_reuse_rx_data(rxr, cons, data);
if (!skb) {
- if (agg_bufs)
- bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
- agg_bufs, false);
+ if (agg_bufs) {
+ if (!xdp_active)
+ bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
+ agg_bufs, false);
+ else
+ bnxt_xdp_buff_frags_free(rxr, &xdp);
+ }
cpr->sw_stats.rx.rx_oom_discards += 1;
rc = -ENOMEM;
goto next_rx;
@@ -1872,11 +1978,22 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
}
if (agg_bufs) {
- skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false);
- if (!skb) {
- cpr->sw_stats.rx.rx_oom_discards += 1;
- rc = -ENOMEM;
- goto next_rx;
+ if (!xdp_active) {
+ skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false);
+ if (!skb) {
+ cpr->sw_stats.rx.rx_oom_discards += 1;
+ rc = -ENOMEM;
+ goto next_rx;
+ }
+ } else {
+ skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1);
+ if (!skb) {
+ /* we should be able to free the old skb here */
+ bnxt_xdp_buff_frags_free(rxr, &xdp);
+ cpr->sw_stats.rx.rx_oom_discards += 1;
+ rc = -ENOMEM;
+ goto next_rx;
+ }
}
}
@@ -2492,10 +2609,13 @@ static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
- if (bnapi->events & BNXT_AGG_EVENT)
- bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
}
+ if (bnapi->events & BNXT_AGG_EVENT) {
+ struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
+
+ bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
+ }
bnapi->events = 0;
}
@@ -2872,14 +2992,23 @@ skip_rx_buf_free:
if (!page)
continue;
- dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
- BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
- DMA_ATTR_WEAK_ORDERING);
+ if (BNXT_RX_PAGE_MODE(bp)) {
+ dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
+ BNXT_RX_PAGE_SIZE, bp->rx_dir,
+ DMA_ATTR_WEAK_ORDERING);
+ rx_agg_buf->page = NULL;
+ __clear_bit(i, rxr->rx_agg_bmap);
- rx_agg_buf->page = NULL;
- __clear_bit(i, rxr->rx_agg_bmap);
+ page_pool_recycle_direct(rxr->page_pool, page);
+ } else {
+ dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
+ BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
+ DMA_ATTR_WEAK_ORDERING);
+ rx_agg_buf->page = NULL;
+ __clear_bit(i, rxr->rx_agg_bmap);
- __free_page(page);
+ __free_page(page);
+ }
}
skip_rx_agg_free:
@@ -3793,7 +3922,7 @@ void bnxt_set_ring_params(struct bnxt *bp)
/* 8 for CRC and VLAN */
rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
- rx_space = rx_size + NET_SKB_PAD +
+ rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
@@ -3834,9 +3963,15 @@ void bnxt_set_ring_params(struct bnxt *bp)
}
bp->rx_agg_ring_size = agg_ring_size;
bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
- rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
- rx_space = rx_size + NET_SKB_PAD +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ if (BNXT_RX_PAGE_MODE(bp)) {
+ rx_space = BNXT_PAGE_MODE_BUF_SIZE;
+ rx_size = BNXT_MAX_PAGE_MODE_MTU;
+ } else {
+ rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
+ rx_space = rx_size + NET_SKB_PAD +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ }
}
bp->rx_buf_use_size = rx_size;
@@ -3877,14 +4012,21 @@ void bnxt_set_ring_params(struct bnxt *bp)
int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
{
if (page_mode) {
- if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
- return -EOPNOTSUPP;
- bp->dev->max_mtu =
- min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
bp->flags &= ~BNXT_FLAG_AGG_RINGS;
- bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
+ bp->flags |= BNXT_FLAG_RX_PAGE_MODE;
+
+ if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
+ bp->flags |= BNXT_FLAG_JUMBO;
+ bp->rx_skb_func = bnxt_rx_multi_page_skb;
+ bp->dev->max_mtu =
+ min_t(u16, bp->max_mtu, BNXT_MAX_MTU);
+ } else {
+ bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
+ bp->rx_skb_func = bnxt_rx_page_skb;
+ bp->dev->max_mtu =
+ min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
+ }
bp->rx_dir = DMA_BIDIRECTIONAL;
- bp->rx_skb_func = bnxt_rx_page_skb;
/* Disable LRO or GRO_HW */
netdev_update_features(bp->dev);
} else {
@@ -5226,12 +5368,15 @@ static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
if (rc)
return rc;
- req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
- VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
- VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
- req->enables =
- cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
- VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
+ req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT);
+ req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID);
+
+ if (BNXT_RX_PAGE_MODE(bp) && !BNXT_RX_JUMBO_MODE(bp)) {
+ req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
+ VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
+ req->enables |=
+ cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
+ }
/* thresholds not implemented in firmware yet */
req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
@@ -11031,6 +11176,9 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
+ if (!(bp->flags & BNXT_FLAG_TPA))
+ features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
+
if (!(features & NETIF_F_GRO))
features &= ~NETIF_F_GRO_HW;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 98453a78cbd0..a498ee297946 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -591,10 +591,12 @@ struct nqe_cn {
#define BNXT_RX_PAGE_SIZE (1 << BNXT_RX_PAGE_SHIFT)
#define BNXT_MAX_MTU 9500
-#define BNXT_MAX_PAGE_MODE_MTU \
+#define BNXT_PAGE_MODE_BUF_SIZE \
((unsigned int)PAGE_SIZE - VLAN_ETH_HLEN - NET_IP_ALIGN - \
- XDP_PACKET_HEADROOM - \
- SKB_DATA_ALIGN((unsigned int)sizeof(struct skb_shared_info)))
+ XDP_PACKET_HEADROOM)
+#define BNXT_MAX_PAGE_MODE_MTU \
+ BNXT_PAGE_MODE_BUF_SIZE - \
+ SKB_DATA_ALIGN((unsigned int)sizeof(struct skb_shared_info))
#define BNXT_MIN_PKT_SIZE 52
@@ -699,13 +701,12 @@ struct bnxt_sw_tx_bd {
};
DEFINE_DMA_UNMAP_ADDR(mapping);
DEFINE_DMA_UNMAP_LEN(len);
+ struct page *page;
u8 is_gso;
u8 is_push;
u8 action;
- union {
- unsigned short nr_frags;
- u16 rx_prod;
- };
+ unsigned short nr_frags;
+ u16 rx_prod;
};
struct bnxt_sw_rx_bd {
@@ -1817,6 +1818,7 @@ struct bnxt {
#define BNXT_SUPPORTS_TPA(bp) (!BNXT_CHIP_TYPE_NITRO_A0(bp) && \
(!((bp)->flags & BNXT_FLAG_CHIP_P5) || \
(bp)->max_tpa_v2) && !is_kdump_kernel())
+#define BNXT_RX_JUMBO_MODE(bp) ((bp)->flags & BNXT_FLAG_JUMBO)
#define BNXT_CHIP_SR2(bp) \
((bp)->chip_num == CHIP_NUM_58818)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 22e965e18fbc..b3a48d6675fe 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -3491,7 +3491,7 @@ static int bnxt_run_loopback(struct bnxt *bp)
dev_kfree_skb(skb);
return -EIO;
}
- bnxt_xmit_bd(bp, txr, map, pkt_size);
+ bnxt_xmit_bd(bp, txr, map, pkt_size, NULL);
/* Sync BD data before updating doorbell */
wmb();
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 03b1d6c04504..f02fe906dedb 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -24,36 +24,91 @@ DEFINE_STATIC_KEY_FALSE(bnxt_xdp_locking_key);
struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
struct bnxt_tx_ring_info *txr,
- dma_addr_t mapping, u32 len)
+ dma_addr_t mapping, u32 len,
+ struct xdp_buff *xdp)
{
- struct bnxt_sw_tx_bd *tx_buf;
+ struct skb_shared_info *sinfo;
+ struct bnxt_sw_tx_bd *tx_buf, *first_buf;
struct tx_bd *txbd;
+ int num_frags = 0;
u32 flags;
u16 prod;
+ int i;
+
+ if (xdp && xdp_buff_has_frags(xdp)) {
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+ num_frags = sinfo->nr_frags;
+ }
+ /* fill up the first buffer */
prod = txr->tx_prod;
tx_buf = &txr->tx_buf_ring[prod];
+ first_buf = tx_buf;
+ tx_buf->nr_frags = num_frags;
+ if (xdp)
+ tx_buf->page = virt_to_head_page(xdp->data);
txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
- flags = (len << TX_BD_LEN_SHIFT) | (1 << TX_BD_FLAGS_BD_CNT_SHIFT) |
- TX_BD_FLAGS_PACKET_END | bnxt_lhint_arr[len >> 9];
+ flags = ((len) << TX_BD_LEN_SHIFT) | ((num_frags + 1) << TX_BD_FLAGS_BD_CNT_SHIFT);
txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
txbd->tx_bd_opaque = prod;
txbd->tx_bd_haddr = cpu_to_le64(mapping);
+ /* now let us fill up the frags into the next buffers */
+ for (i = 0; i < num_frags ; i++) {
+ skb_frag_t *frag = &sinfo->frags[i];
+ struct bnxt_sw_tx_bd *frag_tx_buf;
+ struct pci_dev *pdev = bp->pdev;
+ dma_addr_t frag_mapping;
+ int frag_len;
+
+ prod = NEXT_TX(prod);
+ txr->tx_prod = prod;
+
+ /* first fill up the first buffer */
+ frag_tx_buf = &txr->tx_buf_ring[prod];
+ frag_tx_buf->page = skb_frag_page(frag);
+
+ txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+
+ frag_len = skb_frag_size(frag);
+ frag_mapping = skb_frag_dma_map(&pdev->dev, frag, 0,
+ frag_len, DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(&pdev->dev, frag_mapping)))
+ return NULL;
+
+ dma_unmap_addr_set(frag_tx_buf, mapping, frag_mapping);
+
+ flags = frag_len << TX_BD_LEN_SHIFT;
+ txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
+ txbd->tx_bd_opaque = prod;
+ txbd->tx_bd_haddr = cpu_to_le64(frag_mapping);
+
+ len = frag_len;
+ }
+
+ flags &= ~TX_BD_LEN;
+ txbd->tx_bd_len_flags_type = cpu_to_le32(((len) << TX_BD_LEN_SHIFT) | flags |
+ TX_BD_FLAGS_PACKET_END);
+ /* Sync TX BD */
+ wmb();
prod = NEXT_TX(prod);
txr->tx_prod = prod;
- return tx_buf;
+
+ return first_buf;
}
static void __bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
- dma_addr_t mapping, u32 len, u16 rx_prod)
+ dma_addr_t mapping, u32 len, u16 rx_prod,
+ struct xdp_buff *xdp)
{
struct bnxt_sw_tx_bd *tx_buf;
- tx_buf = bnxt_xmit_bd(bp, txr, mapping, len);
+ tx_buf = bnxt_xmit_bd(bp, txr, mapping, len, xdp);
tx_buf->rx_prod = rx_prod;
tx_buf->action = XDP_TX;
+
}
static void __bnxt_xmit_xdp_redirect(struct bnxt *bp,
@@ -63,7 +118,7 @@ static void __bnxt_xmit_xdp_redirect(struct bnxt *bp,
{
struct bnxt_sw_tx_bd *tx_buf;
- tx_buf = bnxt_xmit_bd(bp, txr, mapping, len);
+ tx_buf = bnxt_xmit_bd(bp, txr, mapping, len, NULL);
tx_buf->action = XDP_REDIRECT;
tx_buf->xdpf = xdpf;
dma_unmap_addr_set(tx_buf, mapping, mapping);
@@ -78,7 +133,7 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
struct bnxt_sw_tx_bd *tx_buf;
u16 tx_cons = txr->tx_cons;
u16 last_tx_cons = tx_cons;
- int i;
+ int i, j, frags;
for (i = 0; i < nr_pkts; i++) {
tx_buf = &txr->tx_buf_ring[tx_cons];
@@ -96,6 +151,13 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
} else if (tx_buf->action == XDP_TX) {
rx_doorbell_needed = true;
last_tx_cons = tx_cons;
+
+ frags = tx_buf->nr_frags;
+ for (j = 0; j < frags; j++) {
+ tx_cons = NEXT_TX(tx_cons);
+ tx_buf = &txr->tx_buf_ring[tx_cons];
+ page_pool_recycle_direct(rxr->page_pool, tx_buf->page);
+ }
}
tx_cons = NEXT_TX(tx_cons);
}
@@ -103,7 +165,52 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
if (rx_doorbell_needed) {
tx_buf = &txr->tx_buf_ring[last_tx_cons];
bnxt_db_write(bp, &rxr->rx_db, tx_buf->rx_prod);
+
+ }
+}
+
+bool bnxt_xdp_attached(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
+{
+ struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog);
+
+ return !!xdp_prog;
+}
+
+void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+ u16 cons, u8 **data_ptr, unsigned int *len,
+ struct xdp_buff *xdp)
+{
+ struct bnxt_sw_rx_bd *rx_buf;
+ struct pci_dev *pdev;
+ dma_addr_t mapping;
+ u32 offset;
+
+ pdev = bp->pdev;
+ rx_buf = &rxr->rx_buf_ring[cons];
+ offset = bp->rx_offset;
+
+ mapping = rx_buf->mapping - bp->rx_dma_offset;
+ dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir);
+
+ xdp_init_buff(xdp, BNXT_PAGE_MODE_BUF_SIZE + offset, &rxr->xdp_rxq);
+ xdp_prepare_buff(xdp, *data_ptr - offset, offset, *len, false);
+}
+
+void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
+ struct xdp_buff *xdp)
+{
+ struct skb_shared_info *shinfo;
+ int i;
+
+ if (!xdp || !xdp_buff_has_frags(xdp))
+ return;
+ shinfo = xdp_get_shared_info_from_buff(xdp);
+ for (i = 0; i < shinfo->nr_frags; i++) {
+ struct page *page = skb_frag_page(&shinfo->frags[i]);
+
+ page_pool_recycle_direct(rxr->page_pool, page);
}
+ shinfo->nr_frags = 0;
}
/* returns the following:
@@ -111,14 +218,14 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
* false - packet should be passed to the stack.
*/
bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
- struct page *page, u8 **data_ptr, unsigned int *len, u8 *event)
+ struct xdp_buff xdp, struct page *page, unsigned int *len, u8 *event)
{
struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog);
struct bnxt_tx_ring_info *txr;
struct bnxt_sw_rx_bd *rx_buf;
struct pci_dev *pdev;
- struct xdp_buff xdp;
dma_addr_t mapping;
+ u32 tx_needed = 1;
void *orig_data;
u32 tx_avail;
u32 offset;
@@ -128,16 +235,10 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
return false;
pdev = bp->pdev;
- rx_buf = &rxr->rx_buf_ring[cons];
offset = bp->rx_offset;
- mapping = rx_buf->mapping - bp->rx_dma_offset;
- dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir);
-
txr = rxr->bnapi->tx_ring;
/* BNXT_RX_PAGE_MODE(bp) when XDP enabled */
- xdp_init_buff(&xdp, PAGE_SIZE, &rxr->xdp_rxq);
- xdp_prepare_buff(&xdp, *data_ptr - offset, offset, *len, false);
orig_data = xdp.data;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
@@ -150,26 +251,38 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
*event &= ~BNXT_RX_EVENT;
*len = xdp.data_end - xdp.data;
- if (orig_data != xdp.data) {
+ if (orig_data != xdp.data)
offset = xdp.data - xdp.data_hard_start;
- *data_ptr = xdp.data_hard_start + offset;
- }
+
switch (act) {
case XDP_PASS:
return false;
case XDP_TX:
- if (tx_avail < 1) {
+ rx_buf = &rxr->rx_buf_ring[cons];
+ mapping = rx_buf->mapping - bp->rx_dma_offset;
+ *event = 0;
+
+ if (unlikely(xdp_buff_has_frags(&xdp))) {
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(&xdp);
+
+ tx_needed += sinfo->nr_frags;
+ *event = BNXT_AGG_EVENT;
+ }
+
+ if (tx_avail < tx_needed) {
trace_xdp_exception(bp->dev, xdp_prog, act);
+ bnxt_xdp_buff_frags_free(rxr, &xdp);
bnxt_reuse_rx_data(rxr, cons, page);
return true;
}
- *event = BNXT_TX_EVENT;
dma_sync_single_for_device(&pdev->dev, mapping + offset, *len,
bp->rx_dir);
+
+ *event |= BNXT_TX_EVENT;
__bnxt_xmit_xdp(bp, txr, mapping + offset, *len,
- NEXT_RX(rxr->rx_prod));
+ NEXT_RX(rxr->rx_prod), &xdp);
bnxt_reuse_rx_data(rxr, cons, page);
return true;
case XDP_REDIRECT:
@@ -177,6 +290,8 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
* redirect is coming from a frame received by the
* bnxt_en driver.
*/
+ rx_buf = &rxr->rx_buf_ring[cons];
+ mapping = rx_buf->mapping - bp->rx_dma_offset;
dma_unmap_page_attrs(&pdev->dev, mapping,
PAGE_SIZE, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
@@ -184,6 +299,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
/* if we are unable to allocate a new buffer, abort and reuse */
if (bnxt_alloc_rx_data(bp, rxr, rxr->rx_prod, GFP_ATOMIC)) {
trace_xdp_exception(bp->dev, xdp_prog, act);
+ bnxt_xdp_buff_frags_free(rxr, &xdp);
bnxt_reuse_rx_data(rxr, cons, page);
return true;
}
@@ -203,6 +319,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
trace_xdp_exception(bp->dev, xdp_prog, act);
fallthrough;
case XDP_DROP:
+ bnxt_xdp_buff_frags_free(rxr, &xdp);
bnxt_reuse_rx_data(rxr, cons, page);
break;
}
@@ -270,8 +387,9 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
int tx_xdp = 0, rc, tc;
struct bpf_prog *old;
- if (prog && bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
- netdev_warn(dev, "MTU %d larger than largest XDP supported MTU %d.\n",
+ if (prog && !prog->aux->xdp_has_frags &&
+ bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
+ netdev_warn(dev, "MTU %d larger than %d without XDP frag support.\n",
bp->dev->mtu, BNXT_MAX_PAGE_MODE_MTU);
return -EOPNOTSUPP;
}
@@ -337,3 +455,26 @@ int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp)
}
return rc;
}
+
+struct sk_buff *
+bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags,
+ struct page_pool *pool, struct xdp_buff *xdp,
+ struct rx_cmp_ext *rxcmp1)
+{
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
+
+ if (!skb)
+ return NULL;
+ skb_checksum_none_assert(skb);
+ if (RX_CMP_L4_CS_OK(rxcmp1)) {
+ if (bp->dev->features & NETIF_F_RXCSUM) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->csum_level = RX_CMP_ENCAP(rxcmp1);
+ }
+ }
+ xdp_update_skb_shared_info(skb, num_frags,
+ sinfo->xdp_frags_size,
+ PAGE_SIZE * sinfo->nr_frags,
+ xdp_buff_is_frag_pfmemalloc(xdp));
+ return skb;
+}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
index 067bb5e821f5..505911ae095d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
@@ -14,13 +14,25 @@ DECLARE_STATIC_KEY_FALSE(bnxt_xdp_locking_key);
struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
struct bnxt_tx_ring_info *txr,
- dma_addr_t mapping, u32 len);
+ dma_addr_t mapping, u32 len,
+ struct xdp_buff *xdp);
void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts);
bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
- struct page *page, u8 **data_ptr, unsigned int *len,
+ struct xdp_buff xdp, struct page *page, unsigned int *len,
u8 *event);
int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp);
int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
struct xdp_frame **frames, u32 flags);
+bool bnxt_xdp_attached(struct bnxt *bp, struct bnxt_rx_ring_info *rxr);
+
+void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+ u16 cons, u8 **data_ptr, unsigned int *len,
+ struct xdp_buff *xdp);
+void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
+ struct xdp_buff *xdp);
+struct sk_buff *bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb,
+ u8 num_frags, struct page_pool *pool,
+ struct xdp_buff *xdp,
+ struct rx_cmp_ext *rxcmp1);
#endif
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index a1a38456c9a3..5d5f10180158 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -2534,7 +2534,12 @@ static int sbmac_probe(struct platform_device *pldev)
int err;
res = platform_get_resource(pldev, IORESOURCE_MEM, 0);
- BUG_ON(!res);
+ if (!res) {
+ printk(KERN_ERR "%s: failed to get resource\n",
+ dev_name(&pldev->dev));
+ err = -EINVAL;
+ goto out_out;
+ }
sbm_base = ioremap(res->start, resource_size(res));
if (!sbm_base) {
printk(KERN_ERR "%s: unable to map device registers\n",
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index e475be29845c..6434e74c04f1 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -337,11 +337,9 @@ static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
struct macb *bp = bus->priv;
int status;
- status = pm_runtime_get_sync(&bp->pdev->dev);
- if (status < 0) {
- pm_runtime_put_noidle(&bp->pdev->dev);
+ status = pm_runtime_resume_and_get(&bp->pdev->dev);
+ if (status < 0)
goto mdio_pm_exit;
- }
status = macb_mdio_wait_for_idle(bp);
if (status < 0)
@@ -391,11 +389,9 @@ static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
struct macb *bp = bus->priv;
int status;
- status = pm_runtime_get_sync(&bp->pdev->dev);
- if (status < 0) {
- pm_runtime_put_noidle(&bp->pdev->dev);
+ status = pm_runtime_resume_and_get(&bp->pdev->dev);
+ if (status < 0)
goto mdio_pm_exit;
- }
status = macb_mdio_wait_for_idle(bp);
if (status < 0)
@@ -2753,9 +2749,9 @@ static int macb_open(struct net_device *dev)
netdev_dbg(bp->dev, "open\n");
- err = pm_runtime_get_sync(&bp->pdev->dev);
+ err = pm_runtime_resume_and_get(&bp->pdev->dev);
if (err < 0)
- goto pm_exit;
+ return err;
/* RX buffers initialization */
macb_init_rx_buffer_size(bp, bufsz);
@@ -4142,11 +4138,9 @@ static int at91ether_open(struct net_device *dev)
u32 ctl;
int ret;
- ret = pm_runtime_get_sync(&lp->pdev->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(&lp->pdev->dev);
+ ret = pm_runtime_resume_and_get(&lp->pdev->dev);
+ if (ret < 0)
return ret;
- }
/* Clear internal statistics */
ctl = macb_readl(lp, NCR);
@@ -4594,7 +4588,7 @@ static int zynqmp_init(struct platform_device *pdev)
if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) {
/* Ensure PS-GTR PHY device used in SGMII mode is ready */
- bp->sgmii_phy = devm_phy_get(&pdev->dev, "sgmii-phy");
+ bp->sgmii_phy = devm_phy_optional_get(&pdev->dev, NULL);
if (IS_ERR(bp->sgmii_phy)) {
ret = PTR_ERR(bp->sgmii_phy);
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 457cb7121000..1281d1565ef8 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -1224,7 +1224,7 @@ static int xgmac_rx(struct xgmac_priv *priv, int limit)
* @budget : maximum number of packets that the current CPU can receive from
* all interfaces.
* Description :
- * This function implements the the reception process.
+ * This function implements the reception process.
* Also it runs the TX completion thread
*/
static int xgmac_poll(struct napi_struct *napi, int budget)
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
index 59683f79959c..60b648b46f75 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
@@ -483,7 +483,7 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
tx_info->ip_family = AF_INET;
#if IS_ENABLED(CONFIG_IPV6)
} else {
- if (!sk->sk_ipv6only &&
+ if (!ipv6_only_sock(sk) &&
ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) {
memcpy(daaddr, &sk->sk_daddr, 4);
tx_info->ip_family = AF_INET;
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
index 9e2378013642..41714203ace8 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
@@ -567,7 +567,7 @@ void chtls_shutdown(struct sock *sk, int how);
void chtls_destroy_sock(struct sock *sk);
int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
int chtls_recvmsg(struct sock *sk, struct msghdr *msg,
- size_t len, int nonblock, int flags, int *addr_len);
+ size_t len, int flags, int *addr_len);
int chtls_sendpage(struct sock *sk, struct page *page,
int offset, size_t size, int flags);
int send_tx_flowc_wr(struct sock *sk, int compl,
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
index c320cc8ca68d..539992dad8ba 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
@@ -1426,7 +1426,7 @@ static void chtls_cleanup_rbuf(struct sock *sk, int copied)
}
static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
- int nonblock, int flags, int *addr_len)
+ int flags, int *addr_len)
{
struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
struct chtls_hws *hws = &csk->tlshws;
@@ -1441,7 +1441,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
buffers_freed = 0;
- timeo = sock_rcvtimeo(sk, nonblock);
+ timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
if (unlikely(csk_flag(sk, CSK_UPDATE_RCV_WND)))
@@ -1616,7 +1616,7 @@ skip_copy:
* Peek at data in a socket's receive buffer.
*/
static int peekmsg(struct sock *sk, struct msghdr *msg,
- size_t len, int nonblock, int flags)
+ size_t len, int flags)
{
struct tcp_sock *tp = tcp_sk(sk);
u32 peek_seq, offset;
@@ -1626,7 +1626,7 @@ static int peekmsg(struct sock *sk, struct msghdr *msg,
long timeo;
lock_sock(sk);
- timeo = sock_rcvtimeo(sk, nonblock);
+ timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
peek_seq = tp->copied_seq;
do {
@@ -1737,7 +1737,7 @@ found_ok_skb:
}
int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
- int nonblock, int flags, int *addr_len)
+ int flags, int *addr_len)
{
struct tcp_sock *tp = tcp_sk(sk);
struct chtls_sock *csk;
@@ -1750,25 +1750,23 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
buffers_freed = 0;
if (unlikely(flags & MSG_OOB))
- return tcp_prot.recvmsg(sk, msg, len, nonblock, flags,
- addr_len);
+ return tcp_prot.recvmsg(sk, msg, len, flags, addr_len);
if (unlikely(flags & MSG_PEEK))
- return peekmsg(sk, msg, len, nonblock, flags);
+ return peekmsg(sk, msg, len, flags);
if (sk_can_busy_loop(sk) &&
skb_queue_empty_lockless(&sk->sk_receive_queue) &&
sk->sk_state == TCP_ESTABLISHED)
- sk_busy_loop(sk, nonblock);
+ sk_busy_loop(sk, flags & MSG_DONTWAIT);
lock_sock(sk);
csk = rcu_dereference_sk_user_data(sk);
if (is_tls_rx(csk))
- return chtls_pt_recvmsg(sk, msg, len, nonblock,
- flags, addr_len);
+ return chtls_pt_recvmsg(sk, msg, len, flags, addr_len);
- timeo = sock_rcvtimeo(sk, nonblock);
+ timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
if (unlikely(csk_flag(sk, CSK_UPDATE_RCV_WND)))
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 86b1d23eba83..1db19463fd46 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -474,8 +474,6 @@ err_out_netdev:
No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need
a delay. Note that pre-2.0.34 kernels had a cache-alignment bug that
made udelay() unreliable.
- The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is
- deprecated.
*/
#define eeprom_delay(ee_addr) ioread32(ee_addr)
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
index 904f3304727e..49c93aa38862 100644
--- a/drivers/net/ethernet/engleder/tsnep_main.c
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -1091,8 +1091,7 @@ static int tsnep_mdio_init(struct tsnep_adapter *adapter)
retval = of_mdiobus_register(adapter->mdiobus, np);
out:
- if (np)
- of_node_put(np);
+ of_node_put(np);
return retval;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index b668df6193be..8c7fadf2b734 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -135,10 +135,19 @@ struct hclge_vf_to_pf_msg {
struct hclge_pf_to_vf_msg {
u16 code;
- u16 vf_mbx_msg_code;
- u16 vf_mbx_msg_subcode;
- u16 resp_status;
- u8 resp_data[HCLGE_MBX_MAX_RESP_DATA_SIZE];
+ union {
+ /* used for mbx response */
+ struct {
+ u16 vf_mbx_msg_code;
+ u16 vf_mbx_msg_subcode;
+ u16 resp_status;
+ u8 resp_data[HCLGE_MBX_MAX_RESP_DATA_SIZE];
+ };
+ /* used for general mbx */
+ struct {
+ u8 msg_data[HCLGE_MBX_MAX_MSG_SIZE];
+ };
+ };
};
struct hclge_mbx_vf_to_pf_cmd {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 79c64f4e67d2..8a3a446219f7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -96,6 +96,7 @@ enum HNAE3_DEV_CAP_BITS {
HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B,
HNAE3_DEV_SUPPORT_MC_MAC_MNG_B,
+ HNAE3_DEV_SUPPORT_CQ_B,
};
#define hnae3_dev_fd_supported(hdev) \
@@ -155,6 +156,9 @@ enum HNAE3_DEV_CAP_BITS {
#define hnae3_ae_dev_mc_mac_mng_supported(ae_dev) \
test_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, (ae_dev)->caps)
+#define hnae3_ae_dev_cq_supported(ae_dev) \
+ test_bit(HNAE3_DEV_SUPPORT_CQ_B, (ae_dev)->caps)
+
enum HNAE3_PF_CAP_BITS {
HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B = 0,
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
index c15ca710dabb..c8b151d29f53 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
@@ -149,6 +149,7 @@ static const struct hclge_comm_caps_bit_map hclge_pf_cmd_caps[] = {
{HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B,
HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B},
{HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B},
+ {HCLGE_COMM_CAP_CQ_B, HNAE3_DEV_SUPPORT_CQ_B},
};
static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = {
@@ -160,6 +161,7 @@ static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = {
{HCLGE_COMM_CAP_QB_B, HNAE3_DEV_SUPPORT_QB_B},
{HCLGE_COMM_CAP_TX_PUSH_B, HNAE3_DEV_SUPPORT_TX_PUSH_B},
{HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B},
+ {HCLGE_COMM_CAP_CQ_B, HNAE3_DEV_SUPPORT_CQ_B},
};
static void
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
index 876650eddac4..7a7d4cf9bf35 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
@@ -338,6 +338,7 @@ enum HCLGE_COMM_CAP_BITS {
HCLGE_COMM_CAP_PAUSE_B = 14,
HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B = 15,
HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B = 17,
+ HCLGE_COMM_CAP_CQ_B = 18,
};
enum HCLGE_COMM_API_CAP_BITS {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index a3ee7875d6a7..ae56306400b8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -5159,10 +5159,7 @@ static void hns3_set_cq_period_mode(struct hns3_nic_priv *priv,
priv->tqp_vector[i].rx_group.dim.mode = mode;
}
- /* only device version above V3(include V3), GL can switch CQ/EQ
- * period mode.
- */
- if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) {
+ if (hnae3_ae_dev_cq_supported(ae_dev)) {
u32 new_mode;
u64 reg;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index f4da77452126..1db8a86f046d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -664,6 +664,8 @@ static void hns3_get_ringparam(struct net_device *netdev,
param->tx_pending = priv->ring[0].desc_num;
param->rx_pending = priv->ring[rx_queue_index].desc_num;
kernel_param->rx_buf_len = priv->ring[rx_queue_index].buf_size;
+ kernel_param->tx_push = test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE,
+ &priv->state);
}
static void hns3_get_pauseparam(struct net_device *netdev,
@@ -1104,6 +1106,36 @@ static int hns3_check_ringparam(struct net_device *ndev,
return 0;
}
+static bool
+hns3_is_ringparam_changed(struct net_device *ndev,
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct hns3_ring_param *old_ringparam,
+ struct hns3_ring_param *new_ringparam)
+{
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = priv->ae_handle;
+ u16 queue_num = h->kinfo.num_tqps;
+
+ new_ringparam->tx_desc_num = ALIGN(param->tx_pending,
+ HNS3_RING_BD_MULTIPLE);
+ new_ringparam->rx_desc_num = ALIGN(param->rx_pending,
+ HNS3_RING_BD_MULTIPLE);
+ old_ringparam->tx_desc_num = priv->ring[0].desc_num;
+ old_ringparam->rx_desc_num = priv->ring[queue_num].desc_num;
+ old_ringparam->rx_buf_len = priv->ring[queue_num].buf_size;
+ new_ringparam->rx_buf_len = kernel_param->rx_buf_len;
+
+ if (old_ringparam->tx_desc_num == new_ringparam->tx_desc_num &&
+ old_ringparam->rx_desc_num == new_ringparam->rx_desc_num &&
+ old_ringparam->rx_buf_len == new_ringparam->rx_buf_len) {
+ netdev_info(ndev, "ringparam not changed\n");
+ return false;
+ }
+
+ return true;
+}
+
static int hns3_change_rx_buf_len(struct net_device *ndev, u32 rx_buf_len)
{
struct hns3_nic_priv *priv = netdev_priv(ndev);
@@ -1120,62 +1152,80 @@ static int hns3_change_rx_buf_len(struct net_device *ndev, u32 rx_buf_len)
return 0;
}
+static int hns3_set_tx_push(struct net_device *netdev, u32 tx_push)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ u32 old_state = test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state);
+
+ if (!test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps) && tx_push)
+ return -EOPNOTSUPP;
+
+ if (tx_push == old_state)
+ return 0;
+
+ netdev_dbg(netdev, "Changing tx push from %s to %s\n",
+ old_state ? "on" : "off", tx_push ? "on" : "off");
+
+ if (tx_push)
+ set_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state);
+ else
+ clear_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state);
+
+ return 0;
+}
+
static int hns3_set_ringparam(struct net_device *ndev,
struct ethtool_ringparam *param,
struct kernel_ethtool_ringparam *kernel_param,
struct netlink_ext_ack *extack)
{
+ struct hns3_ring_param old_ringparam, new_ringparam;
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = priv->ae_handle;
struct hns3_enet_ring *tmp_rings;
bool if_running = netif_running(ndev);
- u32 old_tx_desc_num, new_tx_desc_num;
- u32 old_rx_desc_num, new_rx_desc_num;
- u16 queue_num = h->kinfo.num_tqps;
- u32 old_rx_buf_len;
int ret, i;
ret = hns3_check_ringparam(ndev, param, kernel_param);
if (ret)
return ret;
- /* Hardware requires that its descriptors must be multiple of eight */
- new_tx_desc_num = ALIGN(param->tx_pending, HNS3_RING_BD_MULTIPLE);
- new_rx_desc_num = ALIGN(param->rx_pending, HNS3_RING_BD_MULTIPLE);
- old_tx_desc_num = priv->ring[0].desc_num;
- old_rx_desc_num = priv->ring[queue_num].desc_num;
- old_rx_buf_len = priv->ring[queue_num].buf_size;
- if (old_tx_desc_num == new_tx_desc_num &&
- old_rx_desc_num == new_rx_desc_num &&
- kernel_param->rx_buf_len == old_rx_buf_len)
+ ret = hns3_set_tx_push(ndev, kernel_param->tx_push);
+ if (ret)
+ return ret;
+
+ if (!hns3_is_ringparam_changed(ndev, param, kernel_param,
+ &old_ringparam, &new_ringparam))
return 0;
tmp_rings = hns3_backup_ringparam(priv);
if (!tmp_rings) {
- netdev_err(ndev,
- "backup ring param failed by allocating memory fail\n");
+ netdev_err(ndev, "backup ring param failed by allocating memory fail\n");
return -ENOMEM;
}
netdev_info(ndev,
- "Changing Tx/Rx ring depth from %u/%u to %u/%u, Changing rx buffer len from %d to %d\n",
- old_tx_desc_num, old_rx_desc_num,
- new_tx_desc_num, new_rx_desc_num,
- old_rx_buf_len, kernel_param->rx_buf_len);
+ "Changing Tx/Rx ring depth from %u/%u to %u/%u, Changing rx buffer len from %u to %u\n",
+ old_ringparam.tx_desc_num, old_ringparam.rx_desc_num,
+ new_ringparam.tx_desc_num, new_ringparam.rx_desc_num,
+ old_ringparam.rx_buf_len, new_ringparam.rx_buf_len);
if (if_running)
ndev->netdev_ops->ndo_stop(ndev);
- hns3_change_all_ring_bd_num(priv, new_tx_desc_num, new_rx_desc_num);
- hns3_change_rx_buf_len(ndev, kernel_param->rx_buf_len);
+ hns3_change_all_ring_bd_num(priv, new_ringparam.tx_desc_num,
+ new_ringparam.rx_desc_num);
+ hns3_change_rx_buf_len(ndev, new_ringparam.rx_buf_len);
ret = hns3_init_all_ring(priv);
if (ret) {
netdev_err(ndev, "set ringparam fail, revert to old value(%d)\n",
ret);
- hns3_change_rx_buf_len(ndev, old_rx_buf_len);
- hns3_change_all_ring_bd_num(priv, old_tx_desc_num,
- old_rx_desc_num);
+ hns3_change_rx_buf_len(ndev, old_ringparam.rx_buf_len);
+ hns3_change_all_ring_bd_num(priv, old_ringparam.tx_desc_num,
+ old_ringparam.rx_desc_num);
for (i = 0; i < h->kinfo.num_tqps * 2; i++)
memcpy(&priv->ring[i], &tmp_rings[i],
sizeof(struct hns3_enet_ring));
@@ -1385,11 +1435,33 @@ static int hns3_check_ql_coalesce_param(struct net_device *netdev,
return 0;
}
-static int hns3_check_coalesce_para(struct net_device *netdev,
- struct ethtool_coalesce *cmd)
+static int
+hns3_check_cqe_coalesce_param(struct net_device *netdev,
+ struct kernel_ethtool_coalesce *kernel_coal)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+
+ if ((kernel_coal->use_cqe_mode_tx || kernel_coal->use_cqe_mode_rx) &&
+ !hnae3_ae_dev_cq_supported(ae_dev)) {
+ netdev_err(netdev, "coalesced cqe mode is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
+hns3_check_coalesce_para(struct net_device *netdev,
+ struct ethtool_coalesce *cmd,
+ struct kernel_ethtool_coalesce *kernel_coal)
{
int ret;
+ ret = hns3_check_cqe_coalesce_param(netdev, kernel_coal);
+ if (ret)
+ return ret;
+
ret = hns3_check_gl_coalesce_para(netdev, cmd);
if (ret) {
netdev_err(netdev,
@@ -1464,7 +1536,7 @@ static int hns3_set_coalesce(struct net_device *netdev,
if (hns3_nic_resetting(netdev))
return -EBUSY;
- ret = hns3_check_coalesce_para(netdev, cmd);
+ ret = hns3_check_coalesce_para(netdev, cmd, kernel_coal);
if (ret)
return ret;
@@ -1825,23 +1897,27 @@ static int hns3_set_tunable(struct net_device *netdev,
case ETHTOOL_TX_COPYBREAK_BUF_SIZE:
old_tx_spare_buf_size = h->kinfo.tx_spare_buf_size;
new_tx_spare_buf_size = *(u32 *)data;
+ netdev_info(netdev, "request to set tx spare buf size from %u to %u\n",
+ old_tx_spare_buf_size, new_tx_spare_buf_size);
ret = hns3_set_tx_spare_buf_size(netdev, new_tx_spare_buf_size);
if (ret ||
(!priv->ring->tx_spare && new_tx_spare_buf_size != 0)) {
int ret1;
- netdev_warn(netdev,
- "change tx spare buf size fail, revert to old value\n");
+ netdev_warn(netdev, "change tx spare buf size fail, revert to old value\n");
ret1 = hns3_set_tx_spare_buf_size(netdev,
old_tx_spare_buf_size);
if (ret1) {
- netdev_err(netdev,
- "revert to old tx spare buf size fail\n");
+ netdev_err(netdev, "revert to old tx spare buf size fail\n");
return ret1;
}
return ret;
}
+
+ netdev_info(netdev, "the active tx spare buf size is %u, due to page order\n",
+ priv->ring->tx_spare->len);
+
break;
default:
ret = -EOPNOTSUPP;
@@ -1858,7 +1934,8 @@ static int hns3_set_tunable(struct net_device *netdev,
ETHTOOL_COALESCE_MAX_FRAMES | \
ETHTOOL_COALESCE_USE_CQE)
-#define HNS3_ETHTOOL_RING ETHTOOL_RING_USE_RX_BUF_LEN
+#define HNS3_ETHTOOL_RING (ETHTOOL_RING_USE_RX_BUF_LEN | \
+ ETHTOOL_RING_USE_TX_PUSH)
static int hns3_get_ts_info(struct net_device *netdev,
struct ethtool_ts_info *info)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h
index 822d6fcbc73b..da207d1d9aa9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h
@@ -28,4 +28,10 @@ struct hns3_ethtool_link_ext_state_mapping {
u8 link_ext_substate;
};
+struct hns3_ring_param {
+ u32 tx_desc_num;
+ u32 rx_desc_num;
+ u32 rx_buf_len;
+};
+
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index 42a9e73d8588..6efd768cc07c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -1977,7 +1977,7 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
* @num: number of extended command structures
*
* This function handles all the PF RAS errors in the
- * hw register/s using command.
+ * hw registers using command.
*/
static int hclge_handle_pf_ras_error(struct hclge_dev *hdev,
struct hclge_desc *desc,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 8cebb180c812..a5dd2c8c244a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -10449,6 +10449,9 @@ int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
/* PF's mps must be greater then VF's mps */
for (i = 1; i < hdev->num_alloc_vport; i++)
if (max_frm_size < hdev->vport[i].mps) {
+ dev_err(&hdev->pdev->dev,
+ "failed to set pf mtu for less than vport %d, mps = %u.\n",
+ i, hdev->vport[i].mps);
mutex_unlock(&hdev->vport_lock);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 7998ca617a92..49c40744cda5 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -109,7 +109,7 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
resp_pf_to_vf->msg_len = msg_len;
resp_pf_to_vf->msg.code = mbx_opcode;
- memcpy(&resp_pf_to_vf->msg.vf_mbx_msg_code, msg, msg_len);
+ memcpy(resp_pf_to_vf->msg.msg_data, msg, msg_len);
trace_hclge_pf_mbx_send(hdev, resp_pf_to_vf);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 342d7cdf6285..e13d71abd9f7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -2963,7 +2963,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_config;
}
- /* ensure vf tbl list as empty before init*/
+ /* ensure vf tbl list as empty before init */
ret = hclgevf_clear_vport_list(hdev);
if (ret) {
dev_err(&pdev->dev,
@@ -3315,7 +3315,7 @@ static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
for (i = 0; i < reg_um; i++)
*reg++ = hclgevf_read_dev(&hdev->hw,
ring_reg_addr_list[i] +
- 0x200 * j);
+ HCLGEVF_TQP_REG_SIZE * j);
for (i = 0; i < separator_num; i++)
*reg++ = SEPARATOR_VALUE;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index d5e0a3f762f7..c8055d69255c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -17,7 +17,7 @@ static int hclgevf_resp_to_errno(u16 resp_code)
static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev)
{
/* this function should be called with mbx_resp.mbx_mutex held
- * to prtect the received_response from race condition
+ * to protect the received_response from race condition
*/
hdev->mbx_resp.received_resp = false;
hdev->mbx_resp.origin_mbx_msg = 0;
@@ -32,8 +32,10 @@ static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev)
/* hclgevf_get_mbx_resp: used to get a response from PF after VF sends a mailbox
* message to PF.
* @hdev: pointer to struct hclgevf_dev
- * @resp_msg: pointer to store the original message type and response status
- * @len: the resp_msg data array length.
+ * @code0: the message opcode VF send to PF.
+ * @code1: the message sub-opcode VF send to PF.
+ * @resp_data: pointer to store response data from PF to VF.
+ * @resp_len: the length of resp_data from PF to VF.
*/
static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
u8 *resp_data, u16 resp_len)
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 5c5931dba51d..3699c5435396 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -257,12 +257,14 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
struct ibmvnic_long_term_buff *ltb, int size)
{
struct device *dev = &adapter->vdev->dev;
+ u64 prev = 0;
int rc;
if (!reuse_ltb(ltb, size)) {
dev_dbg(dev,
"LTB size changed from 0x%llx to 0x%x, reallocating\n",
ltb->size, size);
+ prev = ltb->size;
free_long_term_buff(adapter, ltb);
}
@@ -283,8 +285,8 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
bitmap_set(adapter->map_ids, ltb->map_id, 1);
dev_dbg(dev,
- "Allocated new LTB [map %d, size 0x%llx]\n",
- ltb->map_id, ltb->size);
+ "Allocated new LTB [map %d, size 0x%llx was 0x%llx]\n",
+ ltb->map_id, ltb->size, prev);
}
/* Ensure ltb is zeroed - specially when reusing it. */
@@ -345,6 +347,208 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter,
ltb->map_id = 0;
}
+/**
+ * free_ltb_set - free the given set of long term buffers (LTBS)
+ * @adapter: The ibmvnic adapter containing this ltb set
+ * @ltb_set: The ltb_set to be freed
+ *
+ * Free the set of LTBs in the given set.
+ */
+
+static void free_ltb_set(struct ibmvnic_adapter *adapter,
+ struct ibmvnic_ltb_set *ltb_set)
+{
+ int i;
+
+ for (i = 0; i < ltb_set->num_ltbs; i++)
+ free_long_term_buff(adapter, &ltb_set->ltbs[i]);
+
+ kfree(ltb_set->ltbs);
+ ltb_set->ltbs = NULL;
+ ltb_set->num_ltbs = 0;
+}
+
+/**
+ * alloc_ltb_set() - Allocate a set of long term buffers (LTBs)
+ *
+ * @adapter: ibmvnic adapter associated to the LTB
+ * @ltb_set: container object for the set of LTBs
+ * @num_buffs: Number of buffers in the LTB
+ * @buff_size: Size of each buffer in the LTB
+ *
+ * Allocate a set of LTBs to accommodate @num_buffs buffers of @buff_size
+ * each. We currently cap size each LTB to IBMVNIC_ONE_LTB_SIZE. If the
+ * new set of LTBs have fewer LTBs than the old set, free the excess LTBs.
+ * If new set needs more than in old set, allocate the remaining ones.
+ * Try and reuse as many LTBs as possible and avoid reallocation.
+ *
+ * Any changes to this allocation strategy must be reflected in
+ * map_rxpool_buff_to_ltb() and map_txpool_buff_to_ltb().
+ */
+static int alloc_ltb_set(struct ibmvnic_adapter *adapter,
+ struct ibmvnic_ltb_set *ltb_set, int num_buffs,
+ int buff_size)
+{
+ struct device *dev = &adapter->vdev->dev;
+ struct ibmvnic_ltb_set old_set;
+ struct ibmvnic_ltb_set new_set;
+ int rem_size;
+ int tot_size; /* size of all ltbs */
+ int ltb_size; /* size of one ltb */
+ int nltbs;
+ int rc;
+ int n;
+ int i;
+
+ dev_dbg(dev, "%s() num_buffs %d, buff_size %d\n", __func__, num_buffs,
+ buff_size);
+
+ ltb_size = rounddown(IBMVNIC_ONE_LTB_SIZE, buff_size);
+ tot_size = num_buffs * buff_size;
+
+ if (ltb_size > tot_size)
+ ltb_size = tot_size;
+
+ nltbs = tot_size / ltb_size;
+ if (tot_size % ltb_size)
+ nltbs++;
+
+ old_set = *ltb_set;
+
+ if (old_set.num_ltbs == nltbs) {
+ new_set = old_set;
+ } else {
+ int tmp = nltbs * sizeof(struct ibmvnic_long_term_buff);
+
+ new_set.ltbs = kzalloc(tmp, GFP_KERNEL);
+ if (!new_set.ltbs)
+ return -ENOMEM;
+
+ new_set.num_ltbs = nltbs;
+
+ /* Free any excess ltbs in old set */
+ for (i = new_set.num_ltbs; i < old_set.num_ltbs; i++)
+ free_long_term_buff(adapter, &old_set.ltbs[i]);
+
+ /* Copy remaining ltbs to new set. All LTBs except the
+ * last one are of the same size. alloc_long_term_buff()
+ * will realloc if the size changes.
+ */
+ n = min(old_set.num_ltbs, new_set.num_ltbs);
+ for (i = 0; i < n; i++)
+ new_set.ltbs[i] = old_set.ltbs[i];
+
+ /* Any additional ltbs in new set will have NULL ltbs for
+ * now and will be allocated in alloc_long_term_buff().
+ */
+
+ /* We no longer need the old_set so free it. Note that we
+ * may have reused some ltbs from old set and freed excess
+ * ltbs above. So we only need to free the container now
+ * not the LTBs themselves. (i.e. dont free_ltb_set()!)
+ */
+ kfree(old_set.ltbs);
+ old_set.ltbs = NULL;
+ old_set.num_ltbs = 0;
+
+ /* Install the new set. If allocations fail below, we will
+ * retry later and know what size LTBs we need.
+ */
+ *ltb_set = new_set;
+ }
+
+ i = 0;
+ rem_size = tot_size;
+ while (rem_size) {
+ if (ltb_size > rem_size)
+ ltb_size = rem_size;
+
+ rem_size -= ltb_size;
+
+ rc = alloc_long_term_buff(adapter, &new_set.ltbs[i], ltb_size);
+ if (rc)
+ goto out;
+ i++;
+ }
+
+ WARN_ON(i != new_set.num_ltbs);
+
+ return 0;
+out:
+ /* We may have allocated one/more LTBs before failing and we
+ * want to try and reuse on next reset. So don't free ltb set.
+ */
+ return rc;
+}
+
+/**
+ * map_rxpool_buf_to_ltb - Map given rxpool buffer to offset in an LTB.
+ * @rxpool: The receive buffer pool containing buffer
+ * @bufidx: Index of buffer in rxpool
+ * @ltbp: (Output) pointer to the long term buffer containing the buffer
+ * @offset: (Output) offset of buffer in the LTB from @ltbp
+ *
+ * Map the given buffer identified by [rxpool, bufidx] to an LTB in the
+ * pool and its corresponding offset. Assume for now that each LTB is of
+ * different size but could possibly be optimized based on the allocation
+ * strategy in alloc_ltb_set().
+ */
+static void map_rxpool_buf_to_ltb(struct ibmvnic_rx_pool *rxpool,
+ unsigned int bufidx,
+ struct ibmvnic_long_term_buff **ltbp,
+ unsigned int *offset)
+{
+ struct ibmvnic_long_term_buff *ltb;
+ int nbufs; /* # of buffers in one ltb */
+ int i;
+
+ WARN_ON(bufidx >= rxpool->size);
+
+ for (i = 0; i < rxpool->ltb_set.num_ltbs; i++) {
+ ltb = &rxpool->ltb_set.ltbs[i];
+ nbufs = ltb->size / rxpool->buff_size;
+ if (bufidx < nbufs)
+ break;
+ bufidx -= nbufs;
+ }
+
+ *ltbp = ltb;
+ *offset = bufidx * rxpool->buff_size;
+}
+
+/**
+ * map_txpool_buf_to_ltb - Map given txpool buffer to offset in an LTB.
+ * @txpool: The transmit buffer pool containing buffer
+ * @bufidx: Index of buffer in txpool
+ * @ltbp: (Output) pointer to the long term buffer (LTB) containing the buffer
+ * @offset: (Output) offset of buffer in the LTB from @ltbp
+ *
+ * Map the given buffer identified by [txpool, bufidx] to an LTB in the
+ * pool and its corresponding offset.
+ */
+static void map_txpool_buf_to_ltb(struct ibmvnic_tx_pool *txpool,
+ unsigned int bufidx,
+ struct ibmvnic_long_term_buff **ltbp,
+ unsigned int *offset)
+{
+ struct ibmvnic_long_term_buff *ltb;
+ int nbufs; /* # of buffers in one ltb */
+ int i;
+
+ WARN_ON_ONCE(bufidx >= txpool->num_buffers);
+
+ for (i = 0; i < txpool->ltb_set.num_ltbs; i++) {
+ ltb = &txpool->ltb_set.ltbs[i];
+ nbufs = ltb->size / txpool->buf_size;
+ if (bufidx < nbufs)
+ break;
+ bufidx -= nbufs;
+ }
+
+ *ltbp = ltb;
+ *offset = bufidx * txpool->buf_size;
+}
+
static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
{
int i;
@@ -361,6 +565,7 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
struct device *dev = &adapter->vdev->dev;
struct ibmvnic_ind_xmit_queue *ind_bufp;
struct ibmvnic_sub_crq_queue *rx_scrq;
+ struct ibmvnic_long_term_buff *ltb;
union sub_crq *sub_crq;
int buffers_added = 0;
unsigned long lpar_rc;
@@ -369,7 +574,7 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
dma_addr_t dma_addr;
unsigned char *dst;
int shift = 0;
- int index;
+ int bufidx;
int i;
if (!pool->active)
@@ -385,14 +590,14 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
* be 0.
*/
for (i = ind_bufp->index; i < count; ++i) {
- index = pool->free_map[pool->next_free];
+ bufidx = pool->free_map[pool->next_free];
/* We maybe reusing the skb from earlier resets. Allocate
* only if necessary. But since the LTB may have changed
* during reset (see init_rx_pools()), update LTB below
* even if reusing skb.
*/
- skb = pool->rx_buff[index].skb;
+ skb = pool->rx_buff[bufidx].skb;
if (!skb) {
skb = netdev_alloc_skb(adapter->netdev,
pool->buff_size);
@@ -407,26 +612,26 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
pool->next_free = (pool->next_free + 1) % pool->size;
/* Copy the skb to the long term mapped DMA buffer */
- offset = index * pool->buff_size;
- dst = pool->long_term_buff.buff + offset;
+ map_rxpool_buf_to_ltb(pool, bufidx, &ltb, &offset);
+ dst = ltb->buff + offset;
memset(dst, 0, pool->buff_size);
- dma_addr = pool->long_term_buff.addr + offset;
+ dma_addr = ltb->addr + offset;
/* add the skb to an rx_buff in the pool */
- pool->rx_buff[index].data = dst;
- pool->rx_buff[index].dma = dma_addr;
- pool->rx_buff[index].skb = skb;
- pool->rx_buff[index].pool_index = pool->index;
- pool->rx_buff[index].size = pool->buff_size;
+ pool->rx_buff[bufidx].data = dst;
+ pool->rx_buff[bufidx].dma = dma_addr;
+ pool->rx_buff[bufidx].skb = skb;
+ pool->rx_buff[bufidx].pool_index = pool->index;
+ pool->rx_buff[bufidx].size = pool->buff_size;
/* queue the rx_buff for the next send_subcrq_indirect */
sub_crq = &ind_bufp->indir_arr[ind_bufp->index++];
memset(sub_crq, 0, sizeof(*sub_crq));
sub_crq->rx_add.first = IBMVNIC_CRQ_CMD;
sub_crq->rx_add.correlator =
- cpu_to_be64((u64)&pool->rx_buff[index]);
+ cpu_to_be64((u64)&pool->rx_buff[bufidx]);
sub_crq->rx_add.ioba = cpu_to_be32(dma_addr);
- sub_crq->rx_add.map_id = pool->long_term_buff.map_id;
+ sub_crq->rx_add.map_id = ltb->map_id;
/* The length field of the sCRQ is defined to be 24 bits so the
* buffer size needs to be left shifted by a byte before it is
@@ -466,10 +671,10 @@ failure:
sub_crq = &ind_bufp->indir_arr[i];
rx_buff = (struct ibmvnic_rx_buff *)
be64_to_cpu(sub_crq->rx_add.correlator);
- index = (int)(rx_buff - pool->rx_buff);
- pool->free_map[pool->next_free] = index;
- dev_kfree_skb_any(pool->rx_buff[index].skb);
- pool->rx_buff[index].skb = NULL;
+ bufidx = (int)(rx_buff - pool->rx_buff);
+ pool->free_map[pool->next_free] = bufidx;
+ dev_kfree_skb_any(pool->rx_buff[bufidx].skb);
+ pool->rx_buff[bufidx].skb = NULL;
}
adapter->replenish_add_buff_failure += ind_bufp->index;
atomic_add(buffers_added, &pool->available);
@@ -579,7 +784,7 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter)
kfree(rx_pool->free_map);
- free_long_term_buff(adapter, &rx_pool->long_term_buff);
+ free_ltb_set(adapter, &rx_pool->ltb_set);
if (!rx_pool->rx_buff)
continue;
@@ -724,8 +929,8 @@ update_ltb:
dev_dbg(dev, "Updating LTB for rx pool %d [%d, %d]\n",
i, rx_pool->size, rx_pool->buff_size);
- rc = alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
- rx_pool->size * rx_pool->buff_size);
+ rc = alloc_ltb_set(adapter, &rx_pool->ltb_set,
+ rx_pool->size, rx_pool->buff_size);
if (rc)
goto out;
@@ -782,7 +987,7 @@ static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
{
kfree(tx_pool->tx_buff);
kfree(tx_pool->free_map);
- free_long_term_buff(adapter, &tx_pool->long_term_buff);
+ free_ltb_set(adapter, &tx_pool->ltb_set);
}
/**
@@ -972,17 +1177,16 @@ update_ltb:
for (i = 0; i < num_pools; i++) {
struct ibmvnic_tx_pool *tso_pool;
struct ibmvnic_tx_pool *tx_pool;
- u32 ltb_size;
tx_pool = &adapter->tx_pool[i];
- ltb_size = tx_pool->num_buffers * tx_pool->buf_size;
- if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
- ltb_size))
- goto out;
- dev_dbg(dev, "Updated LTB for tx pool %d [%p, %d, %d]\n",
- i, tx_pool->long_term_buff.buff,
- tx_pool->num_buffers, tx_pool->buf_size);
+ dev_dbg(dev, "Updating LTB for tx pool %d [%d, %d]\n",
+ i, tx_pool->num_buffers, tx_pool->buf_size);
+
+ rc = alloc_ltb_set(adapter, &tx_pool->ltb_set,
+ tx_pool->num_buffers, tx_pool->buf_size);
+ if (rc)
+ goto out;
tx_pool->consumer_index = 0;
tx_pool->producer_index = 0;
@@ -991,14 +1195,14 @@ update_ltb:
tx_pool->free_map[j] = j;
tso_pool = &adapter->tso_pool[i];
- ltb_size = tso_pool->num_buffers * tso_pool->buf_size;
- if (alloc_long_term_buff(adapter, &tso_pool->long_term_buff,
- ltb_size))
- goto out;
- dev_dbg(dev, "Updated LTB for tso pool %d [%p, %d, %d]\n",
- i, tso_pool->long_term_buff.buff,
- tso_pool->num_buffers, tso_pool->buf_size);
+ dev_dbg(dev, "Updating LTB for tso pool %d [%d, %d]\n",
+ i, tso_pool->num_buffers, tso_pool->buf_size);
+
+ rc = alloc_ltb_set(adapter, &tso_pool->ltb_set,
+ tso_pool->num_buffers, tso_pool->buf_size);
+ if (rc)
+ goto out;
tso_pool->consumer_index = 0;
tso_pool->producer_index = 0;
@@ -1911,6 +2115,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
struct ibmvnic_ind_xmit_queue *ind_bufp;
struct ibmvnic_tx_buff *tx_buff = NULL;
struct ibmvnic_sub_crq_queue *tx_scrq;
+ struct ibmvnic_long_term_buff *ltb;
struct ibmvnic_tx_pool *tx_pool;
unsigned int tx_send_failed = 0;
netdev_tx_t ret = NETDEV_TX_OK;
@@ -1926,7 +2131,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
unsigned int offset;
int num_entries = 1;
unsigned char *dst;
- int index = 0;
+ int bufidx = 0;
u8 proto = 0;
/* If a reset is in progress, drop the packet since
@@ -1960,9 +2165,9 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
else
tx_pool = &adapter->tx_pool[queue_num];
- index = tx_pool->free_map[tx_pool->consumer_index];
+ bufidx = tx_pool->free_map[tx_pool->consumer_index];
- if (index == IBMVNIC_INVALID_MAP) {
+ if (bufidx == IBMVNIC_INVALID_MAP) {
dev_kfree_skb_any(skb);
tx_send_failed++;
tx_dropped++;
@@ -1973,10 +2178,11 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP;
- offset = index * tx_pool->buf_size;
- dst = tx_pool->long_term_buff.buff + offset;
+ map_txpool_buf_to_ltb(tx_pool, bufidx, &ltb, &offset);
+
+ dst = ltb->buff + offset;
memset(dst, 0, tx_pool->buf_size);
- data_dma_addr = tx_pool->long_term_buff.addr + offset;
+ data_dma_addr = ltb->addr + offset;
if (skb_shinfo(skb)->nr_frags) {
int cur, i;
@@ -2003,9 +2209,9 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_pool->consumer_index =
(tx_pool->consumer_index + 1) % tx_pool->num_buffers;
- tx_buff = &tx_pool->tx_buff[index];
+ tx_buff = &tx_pool->tx_buff[bufidx];
tx_buff->skb = skb;
- tx_buff->index = index;
+ tx_buff->index = bufidx;
tx_buff->pool_index = queue_num;
memset(&tx_crq, 0, sizeof(tx_crq));
@@ -2017,10 +2223,10 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
if (skb_is_gso(skb))
tx_crq.v1.correlator =
- cpu_to_be32(index | IBMVNIC_TSO_POOL_MASK);
+ cpu_to_be32(bufidx | IBMVNIC_TSO_POOL_MASK);
else
- tx_crq.v1.correlator = cpu_to_be32(index);
- tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
+ tx_crq.v1.correlator = cpu_to_be32(bufidx);
+ tx_crq.v1.dma_reg = cpu_to_be16(ltb->map_id);
tx_crq.v1.sge_len = cpu_to_be32(skb->len);
tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
@@ -3972,16 +4178,16 @@ static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
adapter->desired.rx_entries =
adapter->max_rx_add_entries_per_subcrq;
- max_entries = IBMVNIC_MAX_LTB_SIZE /
+ max_entries = IBMVNIC_LTB_SET_SIZE /
(adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
- adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) {
+ adapter->desired.tx_entries > IBMVNIC_LTB_SET_SIZE) {
adapter->desired.tx_entries = max_entries;
}
if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
- adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) {
+ adapter->desired.rx_entries > IBMVNIC_LTB_SET_SIZE) {
adapter->desired.rx_entries = max_entries;
}
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 1310c861bf83..e5c6ff3d0c47 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -36,9 +36,50 @@
#define IBMVNIC_TSO_BUFS 64
#define IBMVNIC_TSO_POOL_MASK 0x80000000
-#define IBMVNIC_MAX_LTB_SIZE ((1 << (MAX_ORDER - 1)) * PAGE_SIZE)
-#define IBMVNIC_BUFFER_HLEN 500
+/* A VNIC adapter has set of Rx and Tx pools (aka queues). Each Rx/Tx pool
+ * has a set of buffers. The size of each buffer is determined by the MTU.
+ *
+ * Each Rx/Tx pool is also associated with a DMA region that is shared
+ * with the "hardware" (VIOS) and used to send/receive packets. The DMA
+ * region is also referred to as a Long Term Buffer or LTB.
+ *
+ * The size of the DMA region required for an Rx/Tx pool depends on the
+ * number and size (MTU) of the buffers in the pool. At the max levels
+ * of 4096 jumbo frames (MTU=9000) we will need about 9K*4K = 36MB plus
+ * some padding.
+ *
+ * But the size of a single DMA region is limited by MAX_ORDER in the
+ * kernel (about 16MB currently). To support say 4K Jumbo frames, we
+ * use a set of LTBs (struct ltb_set) per pool.
+ *
+ * IBMVNIC_ONE_LTB_MAX - max size of each LTB supported by kernel
+ * IBMVNIC_ONE_LTB_SIZE - current max size of each LTB in an ltb_set
+ * (must be <= IBMVNIC_ONE_LTB_MAX)
+ * IBMVNIC_LTB_SET_SIZE - current size of all LTBs in an ltb_set
+ *
+ * Each VNIC can have upto 16 Rx, 16 Tx and 16 TSO pools. The TSO pools
+ * are of fixed length (IBMVNIC_TSO_BUF_SZ * IBMVNIC_TSO_BUFS) of 4MB.
+ *
+ * The Rx and Tx pools can have upto 4096 buffers. The max size of these
+ * buffers is about 9588 (for jumbo frames, including IBMVNIC_BUFFER_HLEN).
+ * So, setting the IBMVNIC_LTB_SET_SIZE for a pool to 4096 * 9588 ~= 38MB.
+ *
+ * There is a trade-off in setting IBMVNIC_ONE_LTB_SIZE. If it is large,
+ * the allocation of the LTB can fail when system is low in memory. If
+ * its too small, we would need several mappings for each of the Rx/
+ * Tx/TSO pools but there is a limit of 255 mappings per vnic in the
+ * VNIC protocol.
+ *
+ * So setting IBMVNIC_ONE_LTB_SIZE to 8MB. With IBMVNIC_LTB_SET_SIZE set
+ * to 38MB, we will need 5 LTBs per Rx and Tx pool and 1 LTB per TSO
+ * pool for the 4MB. Thus the 16 Rx and Tx queues require 32 * 5 = 160
+ * plus 16 for the TSO pools for a total of 176 LTB mappings per VNIC.
+ */
+#define IBMVNIC_ONE_LTB_MAX ((u32)((1 << (MAX_ORDER - 1)) * PAGE_SIZE))
+#define IBMVNIC_ONE_LTB_SIZE min((u32)(8 << 20), IBMVNIC_ONE_LTB_MAX)
+#define IBMVNIC_LTB_SET_SIZE (38 << 20)
+#define IBMVNIC_BUFFER_HLEN 500
#define IBMVNIC_RESET_DELAY 100
struct ibmvnic_login_buffer {
@@ -793,6 +834,11 @@ struct ibmvnic_long_term_buff {
u8 map_id;
};
+struct ibmvnic_ltb_set {
+ int num_ltbs;
+ struct ibmvnic_long_term_buff *ltbs;
+};
+
struct ibmvnic_tx_buff {
struct sk_buff *skb;
int index;
@@ -805,7 +851,7 @@ struct ibmvnic_tx_pool {
int *free_map;
int consumer_index;
int producer_index;
- struct ibmvnic_long_term_buff long_term_buff;
+ struct ibmvnic_ltb_set ltb_set;
int num_buffers;
int buf_size;
} ____cacheline_aligned;
@@ -828,7 +874,7 @@ struct ibmvnic_rx_pool {
int next_free;
int next_alloc;
int active;
- struct ibmvnic_long_term_buff long_term_buff;
+ struct ibmvnic_ltb_set ltb_set;
} ____cacheline_aligned;
struct ibmvnic_vpd {
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 55c6bce5da61..18558a019353 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -852,6 +852,7 @@ struct i40e_vsi {
u64 tx_busy;
u64 tx_linearize;
u64 tx_force_wb;
+ u64 tx_stopped;
u64 rx_buf_failed;
u64 rx_page_failed;
u64 rx_page_reuse;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 6aefffd83615..2819e261a126 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -47,6 +47,7 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_1G_BASE_T_X722:
case I40E_DEV_ID_10G_BASE_T_X722:
case I40E_DEV_ID_SFP_I_X722:
+ case I40E_DEV_ID_SFP_X722_A:
hw->mac.type = I40E_MAC_X722;
break;
default:
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index be7c6f34d45c..c9dcd6d92c83 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -309,10 +309,11 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
tx_ring->stats.bytes,
tx_ring->tx_stats.restart_queue);
dev_info(&pf->pdev->dev,
- " tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld\n",
+ " tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld, tx_stopped = %lld\n",
i,
tx_ring->tx_stats.tx_busy,
- tx_ring->tx_stats.tx_done_old);
+ tx_ring->tx_stats.tx_done_old,
+ tx_ring->tx_stats.tx_stopped);
dev_info(&pf->pdev->dev,
" tx_rings[%i]: size = %i\n",
i, tx_ring->size);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h
index 1bcb0ec0f0c0..2610338002fe 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_devids.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h
@@ -33,6 +33,7 @@
#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1
#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
#define I40E_DEV_ID_SFP_I_X722 0x37D3
+#define I40E_DEV_ID_SFP_X722_A 0x0DDA
#endif /* _I40E_DEVIDS_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index e48499624d22..610f00cbaff9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -293,12 +293,14 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = {
I40E_VSI_STAT("tx_linearize", tx_linearize),
I40E_VSI_STAT("tx_force_wb", tx_force_wb),
I40E_VSI_STAT("tx_busy", tx_busy),
+ I40E_VSI_STAT("tx_stopped", tx_stopped),
I40E_VSI_STAT("rx_alloc_fail", rx_buf_failed),
I40E_VSI_STAT("rx_pg_alloc_fail", rx_page_failed),
I40E_VSI_STAT("rx_cache_reuse", rx_page_reuse),
I40E_VSI_STAT("rx_cache_alloc", rx_page_alloc),
I40E_VSI_STAT("rx_cache_waive", rx_page_waive),
I40E_VSI_STAT("rx_cache_busy", rx_page_busy),
+ I40E_VSI_STAT("tx_restart", tx_restart),
};
/* These PF_STATs might look like duplicates of some NETDEV_STATs,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 6778df2177a1..358c2edc118d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -77,6 +77,7 @@ static const struct pci_device_id i40e_pci_tbl[] = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722_A), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_X710_N3000), 0},
@@ -785,6 +786,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
unsigned int start;
u64 tx_linearize;
u64 tx_force_wb;
+ u64 tx_stopped;
u64 rx_p, rx_b;
u64 tx_p, tx_b;
u16 q;
@@ -804,6 +806,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
rx_b = rx_p = 0;
tx_b = tx_p = 0;
tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
+ tx_stopped = 0;
rx_page = 0;
rx_buf = 0;
rx_reuse = 0;
@@ -828,6 +831,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
tx_busy += p->tx_stats.tx_busy;
tx_linearize += p->tx_stats.tx_linearize;
tx_force_wb += p->tx_stats.tx_force_wb;
+ tx_stopped += p->tx_stats.tx_stopped;
/* locate Rx ring */
p = READ_ONCE(vsi->rx_rings[q]);
@@ -872,6 +876,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
vsi->tx_busy = tx_busy;
vsi->tx_linearize = tx_linearize;
vsi->tx_force_wb = tx_force_wb;
+ vsi->tx_stopped = tx_stopped;
vsi->rx_page_failed = rx_page;
vsi->rx_buf_failed = rx_buf;
vsi->rx_page_reuse = rx_reuse;
@@ -13436,8 +13441,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
np->vsi = vsi;
hw_enc_features = NETIF_F_SG |
- NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM |
+ NETIF_F_HW_CSUM |
NETIF_F_HIGHDMA |
NETIF_F_SOFT_FEATURES |
NETIF_F_TSO |
@@ -13468,6 +13472,23 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
/* record features VLANs can make use of */
netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
+#define I40E_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
+ NETIF_F_GSO_GRE_CSUM | \
+ NETIF_F_GSO_IPXIP4 | \
+ NETIF_F_GSO_IPXIP6 | \
+ NETIF_F_GSO_UDP_TUNNEL | \
+ NETIF_F_GSO_UDP_TUNNEL_CSUM)
+
+ netdev->gso_partial_features = I40E_GSO_PARTIAL_FEATURES;
+ netdev->features |= NETIF_F_GSO_PARTIAL |
+ I40E_GSO_PARTIAL_FEATURES;
+
+ netdev->mpls_features |= NETIF_F_SG;
+ netdev->mpls_features |= NETIF_F_HW_CSUM;
+ netdev->mpls_features |= NETIF_F_TSO;
+ netdev->mpls_features |= NETIF_F_TSO6;
+ netdev->mpls_features |= I40E_GSO_PARTIAL_FEATURES;
+
/* enable macvlan offloads */
netdev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 0eae5858f2fe..7bc1174edf6b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -3,6 +3,7 @@
#include <linux/prefetch.h>
#include <linux/bpf_trace.h>
+#include <net/mpls.h>
#include <net/xdp.h>
#include "i40e.h"
#include "i40e_trace.h"
@@ -3015,6 +3016,7 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
{
struct sk_buff *skb = first->skb;
u64 cd_cmd, cd_tso_len, cd_mss;
+ __be16 protocol;
union {
struct iphdr *v4;
struct ipv6hdr *v6;
@@ -3026,7 +3028,7 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
unsigned char *hdr;
} l4;
u32 paylen, l4_offset;
- u16 gso_segs, gso_size;
+ u16 gso_size;
int err;
if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -3039,15 +3041,23 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
if (err < 0)
return err;
- ip.hdr = skb_network_header(skb);
- l4.hdr = skb_transport_header(skb);
+ protocol = vlan_get_protocol(skb);
+
+ if (eth_p_mpls(protocol))
+ ip.hdr = skb_inner_network_header(skb);
+ else
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_checksum_start(skb);
/* initialize outer IP header fields */
if (ip.v4->version == 4) {
ip.v4->tot_len = 0;
ip.v4->check = 0;
+
+ first->tx_flags |= I40E_TX_FLAGS_TSO;
} else {
ip.v6->payload_len = 0;
+ first->tx_flags |= I40E_TX_FLAGS_TSO;
}
if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
@@ -3100,10 +3110,9 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
/* pull values out of skb_shinfo */
gso_size = skb_shinfo(skb)->gso_size;
- gso_segs = skb_shinfo(skb)->gso_segs;
/* update GSO size and bytecount with header size */
- first->gso_segs = gso_segs;
+ first->gso_segs = skb_shinfo(skb)->gso_segs;
first->bytecount += (first->gso_segs - 1) * *hdr_len;
/* find the field values */
@@ -3187,13 +3196,27 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
unsigned char *exthdr;
u32 offset, cmd = 0;
__be16 frag_off;
+ __be16 protocol;
u8 l4_proto = 0;
if (skb->ip_summed != CHECKSUM_PARTIAL)
return 0;
- ip.hdr = skb_network_header(skb);
- l4.hdr = skb_transport_header(skb);
+ protocol = vlan_get_protocol(skb);
+
+ if (eth_p_mpls(protocol))
+ ip.hdr = skb_inner_network_header(skb);
+ else
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_checksum_start(skb);
+
+ /* set the tx_flags to indicate the IP protocol type. this is
+ * required so that checksum header computation below is accurate.
+ */
+ if (ip.v4->version == 4)
+ *tx_flags |= I40E_TX_FLAGS_IPV4;
+ else
+ *tx_flags |= I40E_TX_FLAGS_IPV6;
/* compute outer L2 header size */
offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
@@ -3373,6 +3396,8 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
/* Memory barrier before checking head and tail */
smp_mb();
+ ++tx_ring->tx_stats.tx_stopped;
+
/* Check again in a case another CPU has just made room available. */
if (likely(I40E_DESC_UNUSED(tx_ring) < size))
return -EBUSY;
@@ -3749,7 +3774,6 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
struct i40e_tx_buffer *first;
u32 td_offset = 0;
u32 tx_flags = 0;
- __be16 protocol;
u32 td_cmd = 0;
u8 hdr_len = 0;
int tso, count;
@@ -3791,15 +3815,6 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
goto out_drop;
- /* obtain protocol of skb */
- protocol = vlan_get_protocol(skb);
-
- /* setup IPv4/IPv6 offloads */
- if (protocol == htons(ETH_P_IP))
- tx_flags |= I40E_TX_FLAGS_IPV4;
- else if (protocol == htons(ETH_P_IPV6))
- tx_flags |= I40E_TX_FLAGS_IPV6;
-
tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
if (tso < 0)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index c471c2da313c..41f86e9535a0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -290,6 +290,7 @@ struct i40e_tx_queue_stats {
u64 tx_done_old;
u64 tx_linearize;
u64 tx_force_wb;
+ u64 tx_stopped;
int prev_pkt_ctr;
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
index 19da3b22160f..8c5118c8baaf 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
@@ -20,6 +20,7 @@ void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val);
#define I40E_XDP_CONSUMED BIT(0)
#define I40E_XDP_TX BIT(1)
#define I40E_XDP_REDIR BIT(2)
+#define I40E_XDP_EXIT BIT(3)
/*
* build_ctob - Builds the Tx descriptor (cmd, offset and type) qword
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index c1d25b0b0ca2..af3e7e6afc85 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -161,9 +161,13 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
if (likely(act == XDP_REDIRECT)) {
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
- if (err)
- goto out_failure;
- return I40E_XDP_REDIR;
+ if (!err)
+ return I40E_XDP_REDIR;
+ if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS)
+ result = I40E_XDP_EXIT;
+ else
+ result = I40E_XDP_CONSUMED;
+ goto out_failure;
}
switch (act) {
@@ -175,16 +179,16 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
if (result == I40E_XDP_CONSUMED)
goto out_failure;
break;
+ case XDP_DROP:
+ result = I40E_XDP_CONSUMED;
+ break;
default:
bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
+ result = I40E_XDP_CONSUMED;
out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
- fallthrough; /* handle aborts by dropping packet */
- case XDP_DROP:
- result = I40E_XDP_CONSUMED;
- break;
}
return result;
}
@@ -271,7 +275,8 @@ static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring,
unsigned int *rx_packets,
unsigned int *rx_bytes,
unsigned int size,
- unsigned int xdp_res)
+ unsigned int xdp_res,
+ bool *failure)
{
struct sk_buff *skb;
@@ -281,11 +286,15 @@ static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring,
if (likely(xdp_res == I40E_XDP_REDIR) || xdp_res == I40E_XDP_TX)
return;
+ if (xdp_res == I40E_XDP_EXIT) {
+ *failure = true;
+ return;
+ }
+
if (xdp_res == I40E_XDP_CONSUMED) {
xsk_buff_free(xdp_buff);
return;
}
-
if (xdp_res == I40E_XDP_PASS) {
/* NB! We are not checking for errors using
* i40e_test_staterr with
@@ -371,7 +380,9 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
xdp_res = i40e_run_xdp_zc(rx_ring, bi);
i40e_handle_xdp_result_zc(rx_ring, bi, rx_desc, &rx_packets,
- &rx_bytes, size, xdp_res);
+ &rx_bytes, size, xdp_res, &failure);
+ if (failure)
+ break;
total_rx_packets += rx_packets;
total_rx_bytes += rx_bytes;
xdp_xmit |= xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR);
@@ -382,7 +393,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
cleaned_count = (next_to_clean - rx_ring->next_to_use - 1) & count_mask;
if (cleaned_count >= I40E_RX_BUFFER_WRITE)
- failure = !i40e_alloc_rx_buffers_zc(rx_ring, cleaned_count);
+ failure |= !i40e_alloc_rx_buffers_zc(rx_ring, cleaned_count);
i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
@@ -594,13 +605,13 @@ int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
return -ENETDOWN;
if (!i40e_enabled_xdp_vsi(vsi))
- return -ENXIO;
+ return -EINVAL;
if (queue_id >= vsi->num_queue_pairs)
- return -ENXIO;
+ return -EINVAL;
if (!vsi->xdp_rings[queue_id]->xsk_pool)
- return -ENXIO;
+ return -EINVAL;
ring = vsi->xdp_rings[queue_id];
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 9a0a358a15c2..6d8beb84d852 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -3336,7 +3336,9 @@ static void ice_set_netdev_features(struct net_device *netdev)
vlano_features | tso_features;
/* add support for HW_CSUM on packets with MPLS header */
- netdev->mpls_features = NETIF_F_HW_CSUM;
+ netdev->mpls_features = NETIF_F_HW_CSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO6;
/* enable features */
netdev->features |= netdev->hw_features;
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 25b8f6f726eb..496250f9f8fc 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -30,12 +30,46 @@ static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
0x2, 0, 0, 0, 0, 0,
0x81, 0, 0, 0};
+enum {
+ ICE_PKT_VLAN = BIT(0),
+ ICE_PKT_OUTER_IPV6 = BIT(1),
+ ICE_PKT_TUN_GTPC = BIT(2),
+ ICE_PKT_TUN_GTPU = BIT(3),
+ ICE_PKT_TUN_NVGRE = BIT(4),
+ ICE_PKT_TUN_UDP = BIT(5),
+ ICE_PKT_INNER_IPV6 = BIT(6),
+ ICE_PKT_INNER_TCP = BIT(7),
+ ICE_PKT_INNER_UDP = BIT(8),
+ ICE_PKT_GTP_NOPAY = BIT(9),
+};
+
struct ice_dummy_pkt_offsets {
enum ice_protocol_type type;
u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
};
-static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
+struct ice_dummy_pkt_profile {
+ const struct ice_dummy_pkt_offsets *offsets;
+ const u8 *pkt;
+ u32 match;
+ u16 pkt_len;
+};
+
+#define ICE_DECLARE_PKT_OFFSETS(type) \
+ static const struct ice_dummy_pkt_offsets \
+ ice_dummy_##type##_packet_offsets[]
+
+#define ICE_DECLARE_PKT_TEMPLATE(type) \
+ static const u8 ice_dummy_##type##_packet[]
+
+#define ICE_PKT_PROFILE(type, m) { \
+ .match = (m), \
+ .pkt = ice_dummy_##type##_packet, \
+ .pkt_len = sizeof(ice_dummy_##type##_packet), \
+ .offsets = ice_dummy_##type##_packet_offsets, \
+}
+
+ICE_DECLARE_PKT_OFFSETS(gre_tcp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
{ ICE_IPV4_OFOS, 14 },
@@ -47,7 +81,7 @@ static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_gre_tcp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(gre_tcp) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -82,7 +116,7 @@ static const u8 dummy_gre_tcp_packet[] = {
0x00, 0x00, 0x00, 0x00
};
-static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(gre_udp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
{ ICE_IPV4_OFOS, 14 },
@@ -94,7 +128,7 @@ static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_gre_udp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(gre_udp) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -126,7 +160,7 @@ static const u8 dummy_gre_udp_packet[] = {
0x00, 0x08, 0x00, 0x00,
};
-static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(udp_tun_tcp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
{ ICE_IPV4_OFOS, 14 },
@@ -141,7 +175,7 @@ static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_udp_tun_tcp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(udp_tun_tcp) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -179,7 +213,7 @@ static const u8 dummy_udp_tun_tcp_packet[] = {
0x00, 0x00, 0x00, 0x00
};
-static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(udp_tun_udp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
{ ICE_IPV4_OFOS, 14 },
@@ -194,7 +228,7 @@ static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_udp_tun_udp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(udp_tun_udp) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -229,8 +263,7 @@ static const u8 dummy_udp_tun_udp_packet[] = {
0x00, 0x08, 0x00, 0x00,
};
-static const struct ice_dummy_pkt_offsets
-dummy_gre_ipv6_tcp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(gre_ipv6_tcp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
{ ICE_IPV4_OFOS, 14 },
@@ -242,7 +275,7 @@ dummy_gre_ipv6_tcp_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_gre_ipv6_tcp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(gre_ipv6_tcp) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -282,8 +315,7 @@ static const u8 dummy_gre_ipv6_tcp_packet[] = {
0x00, 0x00, 0x00, 0x00
};
-static const struct ice_dummy_pkt_offsets
-dummy_gre_ipv6_udp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(gre_ipv6_udp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
{ ICE_IPV4_OFOS, 14 },
@@ -295,7 +327,7 @@ dummy_gre_ipv6_udp_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_gre_ipv6_udp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(gre_ipv6_udp) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -332,8 +364,7 @@ static const u8 dummy_gre_ipv6_udp_packet[] = {
0x00, 0x08, 0x00, 0x00,
};
-static const struct ice_dummy_pkt_offsets
-dummy_udp_tun_ipv6_tcp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(udp_tun_ipv6_tcp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
{ ICE_IPV4_OFOS, 14 },
@@ -348,7 +379,7 @@ dummy_udp_tun_ipv6_tcp_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_udp_tun_ipv6_tcp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(udp_tun_ipv6_tcp) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -391,8 +422,7 @@ static const u8 dummy_udp_tun_ipv6_tcp_packet[] = {
0x00, 0x00, 0x00, 0x00
};
-static const struct ice_dummy_pkt_offsets
-dummy_udp_tun_ipv6_udp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(udp_tun_ipv6_udp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
{ ICE_IPV4_OFOS, 14 },
@@ -407,7 +437,7 @@ dummy_udp_tun_ipv6_udp_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_udp_tun_ipv6_udp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(udp_tun_ipv6_udp) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -448,7 +478,7 @@ static const u8 dummy_udp_tun_ipv6_udp_packet[] = {
};
/* offset info for MAC + IPv4 + UDP dummy packet */
-static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(udp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
{ ICE_IPV4_OFOS, 14 },
@@ -457,7 +487,7 @@ static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
};
/* Dummy packet for MAC + IPv4 + UDP */
-static const u8 dummy_udp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(udp) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -477,7 +507,7 @@ static const u8 dummy_udp_packet[] = {
};
/* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
-static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(vlan_udp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_VLAN_OFOS, 12 },
{ ICE_ETYPE_OL, 16 },
@@ -487,7 +517,7 @@ static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
};
/* C-tag (801.1Q), IPv4:UDP dummy packet */
-static const u8 dummy_vlan_udp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(vlan_udp) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -509,7 +539,7 @@ static const u8 dummy_vlan_udp_packet[] = {
};
/* offset info for MAC + IPv4 + TCP dummy packet */
-static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(tcp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
{ ICE_IPV4_OFOS, 14 },
@@ -518,7 +548,7 @@ static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
};
/* Dummy packet for MAC + IPv4 + TCP */
-static const u8 dummy_tcp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(tcp) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -541,7 +571,7 @@ static const u8 dummy_tcp_packet[] = {
};
/* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
-static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(vlan_tcp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_VLAN_OFOS, 12 },
{ ICE_ETYPE_OL, 16 },
@@ -551,7 +581,7 @@ static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
};
/* C-tag (801.1Q), IPv4:TCP dummy packet */
-static const u8 dummy_vlan_tcp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(vlan_tcp) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -575,7 +605,7 @@ static const u8 dummy_vlan_tcp_packet[] = {
0x00, 0x00, /* 2 bytes for 4 byte alignment */
};
-static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(tcp_ipv6) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
{ ICE_IPV6_OFOS, 14 },
@@ -583,7 +613,7 @@ static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_tcp_ipv6_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(tcp_ipv6) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -611,8 +641,7 @@ static const u8 dummy_tcp_ipv6_packet[] = {
};
/* C-tag (802.1Q): IPv6 + TCP */
-static const struct ice_dummy_pkt_offsets
-dummy_vlan_tcp_ipv6_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(vlan_tcp_ipv6) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_VLAN_OFOS, 12 },
{ ICE_ETYPE_OL, 16 },
@@ -622,7 +651,7 @@ dummy_vlan_tcp_ipv6_packet_offsets[] = {
};
/* C-tag (802.1Q), IPv6 + TCP dummy packet */
-static const u8 dummy_vlan_tcp_ipv6_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(vlan_tcp_ipv6) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -652,7 +681,7 @@ static const u8 dummy_vlan_tcp_ipv6_packet[] = {
};
/* IPv6 + UDP */
-static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(udp_ipv6) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
{ ICE_IPV6_OFOS, 14 },
@@ -661,7 +690,7 @@ static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
};
/* IPv6 + UDP dummy packet */
-static const u8 dummy_udp_ipv6_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(udp_ipv6) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -689,8 +718,7 @@ static const u8 dummy_udp_ipv6_packet[] = {
};
/* C-tag (802.1Q): IPv6 + UDP */
-static const struct ice_dummy_pkt_offsets
-dummy_vlan_udp_ipv6_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(vlan_udp_ipv6) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_VLAN_OFOS, 12 },
{ ICE_ETYPE_OL, 16 },
@@ -700,7 +728,7 @@ dummy_vlan_udp_ipv6_packet_offsets[] = {
};
/* C-tag (802.1Q), IPv6 + UDP dummy packet */
-static const u8 dummy_vlan_udp_ipv6_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(vlan_udp_ipv6) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -727,8 +755,7 @@ static const u8 dummy_vlan_udp_ipv6_packet[] = {
};
/* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
-static const
-struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_tcp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4_tcp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_IPV4_OFOS, 14 },
{ ICE_UDP_OF, 34 },
@@ -738,7 +765,7 @@ struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_tcp_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_ipv4_gtpu_ipv4_tcp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4_tcp) = {
0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -776,8 +803,7 @@ static const u8 dummy_ipv4_gtpu_ipv4_tcp_packet[] = {
};
/* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner UDP */
-static const
-struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_udp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4_udp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_IPV4_OFOS, 14 },
{ ICE_UDP_OF, 34 },
@@ -787,7 +813,7 @@ struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_udp_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_ipv4_gtpu_ipv4_udp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4_udp) = {
0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -822,8 +848,7 @@ static const u8 dummy_ipv4_gtpu_ipv4_udp_packet[] = {
};
/* Outer IPv6 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
-static const
-struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_tcp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv6_tcp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_IPV4_OFOS, 14 },
{ ICE_UDP_OF, 34 },
@@ -833,7 +858,7 @@ struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_tcp_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_ipv4_gtpu_ipv6_tcp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv6_tcp) = {
0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -875,8 +900,7 @@ static const u8 dummy_ipv4_gtpu_ipv6_tcp_packet[] = {
0x00, 0x00, /* 2 bytes for 4 byte alignment */
};
-static const
-struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_udp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv6_udp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_IPV4_OFOS, 14 },
{ ICE_UDP_OF, 34 },
@@ -886,7 +910,7 @@ struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_udp_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_ipv4_gtpu_ipv6_udp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv6_udp) = {
0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -925,8 +949,7 @@ static const u8 dummy_ipv4_gtpu_ipv6_udp_packet[] = {
0x00, 0x00, /* 2 bytes for 4 byte alignment */
};
-static const
-struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_tcp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv4_tcp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_IPV6_OFOS, 14 },
{ ICE_UDP_OF, 54 },
@@ -936,7 +959,7 @@ struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_tcp_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_ipv6_gtpu_ipv4_tcp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv4_tcp) = {
0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -978,8 +1001,7 @@ static const u8 dummy_ipv6_gtpu_ipv4_tcp_packet[] = {
0x00, 0x00, /* 2 bytes for 4 byte alignment */
};
-static const
-struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_udp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv4_udp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_IPV6_OFOS, 14 },
{ ICE_UDP_OF, 54 },
@@ -989,7 +1011,7 @@ struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_udp_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_ipv6_gtpu_ipv4_udp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv4_udp) = {
0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -1028,8 +1050,7 @@ static const u8 dummy_ipv6_gtpu_ipv4_udp_packet[] = {
0x00, 0x00, /* 2 bytes for 4 byte alignment */
};
-static const
-struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_tcp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv6_tcp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_IPV6_OFOS, 14 },
{ ICE_UDP_OF, 54 },
@@ -1039,7 +1060,7 @@ struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_tcp_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_ipv6_gtpu_ipv6_tcp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv6_tcp) = {
0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -1086,8 +1107,7 @@ static const u8 dummy_ipv6_gtpu_ipv6_tcp_packet[] = {
0x00, 0x00, /* 2 bytes for 4 byte alignment */
};
-static const
-struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_udp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv6_udp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_IPV6_OFOS, 14 },
{ ICE_UDP_OF, 54 },
@@ -1097,7 +1117,7 @@ struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_udp_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_ipv6_gtpu_ipv6_udp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv6_udp) = {
0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -1141,7 +1161,15 @@ static const u8 dummy_ipv6_gtpu_ipv6_udp_packet[] = {
0x00, 0x00, /* 2 bytes for 4 byte alignment */
};
-static const u8 dummy_ipv4_gtpu_ipv4_packet[] = {
+ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_UDP_OF, 34 },
+ { ICE_GTP_NO_PAY, 42 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -1171,17 +1199,7 @@ static const u8 dummy_ipv4_gtpu_ipv4_packet[] = {
0x00, 0x00,
};
-static const
-struct ice_dummy_pkt_offsets dummy_ipv4_gtp_no_pay_packet_offsets[] = {
- { ICE_MAC_OFOS, 0 },
- { ICE_IPV4_OFOS, 14 },
- { ICE_UDP_OF, 34 },
- { ICE_GTP_NO_PAY, 42 },
- { ICE_PROTOCOL_LAST, 0 },
-};
-
-static const
-struct ice_dummy_pkt_offsets dummy_ipv6_gtp_no_pay_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(ipv6_gtp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_IPV6_OFOS, 14 },
{ ICE_UDP_OF, 54 },
@@ -1189,7 +1207,7 @@ struct ice_dummy_pkt_offsets dummy_ipv6_gtp_no_pay_packet_offsets[] = {
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_ipv6_gtp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(ipv6_gtp) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -1215,6 +1233,55 @@ static const u8 dummy_ipv6_gtp_packet[] = {
0x00, 0x00,
};
+static const struct ice_dummy_pkt_profile ice_dummy_pkt_profiles[] = {
+ ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPU | ICE_PKT_OUTER_IPV6 |
+ ICE_PKT_GTP_NOPAY),
+ ICE_PKT_PROFILE(ipv6_gtpu_ipv6_udp, ICE_PKT_TUN_GTPU |
+ ICE_PKT_OUTER_IPV6 |
+ ICE_PKT_INNER_IPV6 |
+ ICE_PKT_INNER_UDP),
+ ICE_PKT_PROFILE(ipv6_gtpu_ipv6_tcp, ICE_PKT_TUN_GTPU |
+ ICE_PKT_OUTER_IPV6 |
+ ICE_PKT_INNER_IPV6),
+ ICE_PKT_PROFILE(ipv6_gtpu_ipv4_udp, ICE_PKT_TUN_GTPU |
+ ICE_PKT_OUTER_IPV6 |
+ ICE_PKT_INNER_UDP),
+ ICE_PKT_PROFILE(ipv6_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU |
+ ICE_PKT_OUTER_IPV6),
+ ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPU | ICE_PKT_GTP_NOPAY),
+ ICE_PKT_PROFILE(ipv4_gtpu_ipv6_udp, ICE_PKT_TUN_GTPU |
+ ICE_PKT_INNER_IPV6 |
+ ICE_PKT_INNER_UDP),
+ ICE_PKT_PROFILE(ipv4_gtpu_ipv6_tcp, ICE_PKT_TUN_GTPU |
+ ICE_PKT_INNER_IPV6),
+ ICE_PKT_PROFILE(ipv4_gtpu_ipv4_udp, ICE_PKT_TUN_GTPU |
+ ICE_PKT_INNER_UDP),
+ ICE_PKT_PROFILE(ipv4_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU),
+ ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPC | ICE_PKT_OUTER_IPV6),
+ ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPC),
+ ICE_PKT_PROFILE(gre_ipv6_tcp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_IPV6 |
+ ICE_PKT_INNER_TCP),
+ ICE_PKT_PROFILE(gre_tcp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_TCP),
+ ICE_PKT_PROFILE(gre_ipv6_udp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_IPV6),
+ ICE_PKT_PROFILE(gre_udp, ICE_PKT_TUN_NVGRE),
+ ICE_PKT_PROFILE(udp_tun_ipv6_tcp, ICE_PKT_TUN_UDP |
+ ICE_PKT_INNER_IPV6 |
+ ICE_PKT_INNER_TCP),
+ ICE_PKT_PROFILE(udp_tun_tcp, ICE_PKT_TUN_UDP | ICE_PKT_INNER_TCP),
+ ICE_PKT_PROFILE(udp_tun_ipv6_udp, ICE_PKT_TUN_UDP |
+ ICE_PKT_INNER_IPV6),
+ ICE_PKT_PROFILE(udp_tun_udp, ICE_PKT_TUN_UDP),
+ ICE_PKT_PROFILE(vlan_udp_ipv6, ICE_PKT_OUTER_IPV6 | ICE_PKT_INNER_UDP |
+ ICE_PKT_VLAN),
+ ICE_PKT_PROFILE(udp_ipv6, ICE_PKT_OUTER_IPV6 | ICE_PKT_INNER_UDP),
+ ICE_PKT_PROFILE(vlan_udp, ICE_PKT_INNER_UDP | ICE_PKT_VLAN),
+ ICE_PKT_PROFILE(udp, ICE_PKT_INNER_UDP),
+ ICE_PKT_PROFILE(vlan_tcp_ipv6, ICE_PKT_OUTER_IPV6 | ICE_PKT_VLAN),
+ ICE_PKT_PROFILE(tcp_ipv6, ICE_PKT_OUTER_IPV6),
+ ICE_PKT_PROFILE(vlan_tcp, ICE_PKT_VLAN),
+ ICE_PKT_PROFILE(tcp, 0),
+};
+
#define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
(offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr) + \
(DUMMY_ETH_HDR_LEN * \
@@ -5501,212 +5568,66 @@ err_free_lkup_exts:
* structure per protocol header
* @lkups_cnt: number of protocols
* @tun_type: tunnel type
- * @pkt: dummy packet to fill according to filter match criteria
- * @pkt_len: packet length of dummy packet
- * @offsets: pointer to receive the pointer to the offsets for the packet
+ *
+ * Returns the &ice_dummy_pkt_profile corresponding to these lookup params.
*/
-static void
+static const struct ice_dummy_pkt_profile *
ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
- enum ice_sw_tunnel_type tun_type,
- const u8 **pkt, u16 *pkt_len,
- const struct ice_dummy_pkt_offsets **offsets)
+ enum ice_sw_tunnel_type tun_type)
{
- bool inner_tcp = false, inner_udp = false, outer_ipv6 = false;
- bool vlan = false, inner_ipv6 = false, gtp_no_pay = false;
+ const struct ice_dummy_pkt_profile *ret = ice_dummy_pkt_profiles;
+ u32 match = 0;
u16 i;
+ switch (tun_type) {
+ case ICE_SW_TUN_GTPC:
+ match |= ICE_PKT_TUN_GTPC;
+ break;
+ case ICE_SW_TUN_GTPU:
+ match |= ICE_PKT_TUN_GTPU;
+ break;
+ case ICE_SW_TUN_NVGRE:
+ match |= ICE_PKT_TUN_NVGRE;
+ break;
+ case ICE_SW_TUN_GENEVE:
+ case ICE_SW_TUN_VXLAN:
+ match |= ICE_PKT_TUN_UDP;
+ break;
+ default:
+ break;
+ }
+
for (i = 0; i < lkups_cnt; i++) {
if (lkups[i].type == ICE_UDP_ILOS)
- inner_udp = true;
+ match |= ICE_PKT_INNER_UDP;
else if (lkups[i].type == ICE_TCP_IL)
- inner_tcp = true;
+ match |= ICE_PKT_INNER_TCP;
else if (lkups[i].type == ICE_IPV6_OFOS)
- outer_ipv6 = true;
+ match |= ICE_PKT_OUTER_IPV6;
else if (lkups[i].type == ICE_VLAN_OFOS)
- vlan = true;
+ match |= ICE_PKT_VLAN;
else if (lkups[i].type == ICE_ETYPE_OL &&
lkups[i].h_u.ethertype.ethtype_id ==
cpu_to_be16(ICE_IPV6_ETHER_ID) &&
lkups[i].m_u.ethertype.ethtype_id ==
cpu_to_be16(0xFFFF))
- outer_ipv6 = true;
+ match |= ICE_PKT_OUTER_IPV6;
else if (lkups[i].type == ICE_ETYPE_IL &&
lkups[i].h_u.ethertype.ethtype_id ==
cpu_to_be16(ICE_IPV6_ETHER_ID) &&
lkups[i].m_u.ethertype.ethtype_id ==
cpu_to_be16(0xFFFF))
- inner_ipv6 = true;
+ match |= ICE_PKT_INNER_IPV6;
else if (lkups[i].type == ICE_IPV6_IL)
- inner_ipv6 = true;
+ match |= ICE_PKT_INNER_IPV6;
else if (lkups[i].type == ICE_GTP_NO_PAY)
- gtp_no_pay = true;
+ match |= ICE_PKT_GTP_NOPAY;
}
- if (tun_type == ICE_SW_TUN_GTPU) {
- if (outer_ipv6) {
- if (gtp_no_pay) {
- *pkt = dummy_ipv6_gtp_packet;
- *pkt_len = sizeof(dummy_ipv6_gtp_packet);
- *offsets = dummy_ipv6_gtp_no_pay_packet_offsets;
- } else if (inner_ipv6) {
- if (inner_udp) {
- *pkt = dummy_ipv6_gtpu_ipv6_udp_packet;
- *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_udp_packet);
- *offsets = dummy_ipv6_gtpu_ipv6_udp_packet_offsets;
- } else {
- *pkt = dummy_ipv6_gtpu_ipv6_tcp_packet;
- *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_tcp_packet);
- *offsets = dummy_ipv6_gtpu_ipv6_tcp_packet_offsets;
- }
- } else {
- if (inner_udp) {
- *pkt = dummy_ipv6_gtpu_ipv4_udp_packet;
- *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_udp_packet);
- *offsets = dummy_ipv6_gtpu_ipv4_udp_packet_offsets;
- } else {
- *pkt = dummy_ipv6_gtpu_ipv4_tcp_packet;
- *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_tcp_packet);
- *offsets = dummy_ipv6_gtpu_ipv4_tcp_packet_offsets;
- }
- }
- } else {
- if (gtp_no_pay) {
- *pkt = dummy_ipv4_gtpu_ipv4_packet;
- *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
- *offsets = dummy_ipv4_gtp_no_pay_packet_offsets;
- } else if (inner_ipv6) {
- if (inner_udp) {
- *pkt = dummy_ipv4_gtpu_ipv6_udp_packet;
- *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_udp_packet);
- *offsets = dummy_ipv4_gtpu_ipv6_udp_packet_offsets;
- } else {
- *pkt = dummy_ipv4_gtpu_ipv6_tcp_packet;
- *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_tcp_packet);
- *offsets = dummy_ipv4_gtpu_ipv6_tcp_packet_offsets;
- }
- } else {
- if (inner_udp) {
- *pkt = dummy_ipv4_gtpu_ipv4_udp_packet;
- *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_udp_packet);
- *offsets = dummy_ipv4_gtpu_ipv4_udp_packet_offsets;
- } else {
- *pkt = dummy_ipv4_gtpu_ipv4_tcp_packet;
- *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_tcp_packet);
- *offsets = dummy_ipv4_gtpu_ipv4_tcp_packet_offsets;
- }
- }
- }
- return;
- }
-
- if (tun_type == ICE_SW_TUN_GTPC) {
- if (outer_ipv6) {
- *pkt = dummy_ipv6_gtp_packet;
- *pkt_len = sizeof(dummy_ipv6_gtp_packet);
- *offsets = dummy_ipv6_gtp_no_pay_packet_offsets;
- } else {
- *pkt = dummy_ipv4_gtpu_ipv4_packet;
- *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
- *offsets = dummy_ipv4_gtp_no_pay_packet_offsets;
- }
- return;
- }
-
- if (tun_type == ICE_SW_TUN_NVGRE) {
- if (inner_tcp && inner_ipv6) {
- *pkt = dummy_gre_ipv6_tcp_packet;
- *pkt_len = sizeof(dummy_gre_ipv6_tcp_packet);
- *offsets = dummy_gre_ipv6_tcp_packet_offsets;
- return;
- }
- if (inner_tcp) {
- *pkt = dummy_gre_tcp_packet;
- *pkt_len = sizeof(dummy_gre_tcp_packet);
- *offsets = dummy_gre_tcp_packet_offsets;
- return;
- }
- if (inner_ipv6) {
- *pkt = dummy_gre_ipv6_udp_packet;
- *pkt_len = sizeof(dummy_gre_ipv6_udp_packet);
- *offsets = dummy_gre_ipv6_udp_packet_offsets;
- return;
- }
- *pkt = dummy_gre_udp_packet;
- *pkt_len = sizeof(dummy_gre_udp_packet);
- *offsets = dummy_gre_udp_packet_offsets;
- return;
- }
-
- if (tun_type == ICE_SW_TUN_VXLAN ||
- tun_type == ICE_SW_TUN_GENEVE) {
- if (inner_tcp && inner_ipv6) {
- *pkt = dummy_udp_tun_ipv6_tcp_packet;
- *pkt_len = sizeof(dummy_udp_tun_ipv6_tcp_packet);
- *offsets = dummy_udp_tun_ipv6_tcp_packet_offsets;
- return;
- }
- if (inner_tcp) {
- *pkt = dummy_udp_tun_tcp_packet;
- *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
- *offsets = dummy_udp_tun_tcp_packet_offsets;
- return;
- }
- if (inner_ipv6) {
- *pkt = dummy_udp_tun_ipv6_udp_packet;
- *pkt_len = sizeof(dummy_udp_tun_ipv6_udp_packet);
- *offsets = dummy_udp_tun_ipv6_udp_packet_offsets;
- return;
- }
- *pkt = dummy_udp_tun_udp_packet;
- *pkt_len = sizeof(dummy_udp_tun_udp_packet);
- *offsets = dummy_udp_tun_udp_packet_offsets;
- return;
- }
+ while (ret->match && (match & ret->match) != ret->match)
+ ret++;
- if (inner_udp && !outer_ipv6) {
- if (vlan) {
- *pkt = dummy_vlan_udp_packet;
- *pkt_len = sizeof(dummy_vlan_udp_packet);
- *offsets = dummy_vlan_udp_packet_offsets;
- return;
- }
- *pkt = dummy_udp_packet;
- *pkt_len = sizeof(dummy_udp_packet);
- *offsets = dummy_udp_packet_offsets;
- return;
- } else if (inner_udp && outer_ipv6) {
- if (vlan) {
- *pkt = dummy_vlan_udp_ipv6_packet;
- *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
- *offsets = dummy_vlan_udp_ipv6_packet_offsets;
- return;
- }
- *pkt = dummy_udp_ipv6_packet;
- *pkt_len = sizeof(dummy_udp_ipv6_packet);
- *offsets = dummy_udp_ipv6_packet_offsets;
- return;
- } else if ((inner_tcp && outer_ipv6) || outer_ipv6) {
- if (vlan) {
- *pkt = dummy_vlan_tcp_ipv6_packet;
- *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
- *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
- return;
- }
- *pkt = dummy_tcp_ipv6_packet;
- *pkt_len = sizeof(dummy_tcp_ipv6_packet);
- *offsets = dummy_tcp_ipv6_packet_offsets;
- return;
- }
-
- if (vlan) {
- *pkt = dummy_vlan_tcp_packet;
- *pkt_len = sizeof(dummy_vlan_tcp_packet);
- *offsets = dummy_vlan_tcp_packet_offsets;
- } else {
- *pkt = dummy_tcp_packet;
- *pkt_len = sizeof(dummy_tcp_packet);
- *offsets = dummy_tcp_packet_offsets;
- }
+ return ret;
}
/**
@@ -5716,15 +5637,12 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
* structure per protocol header
* @lkups_cnt: number of protocols
* @s_rule: stores rule information from the match criteria
- * @dummy_pkt: dummy packet to fill according to filter match criteria
- * @pkt_len: packet length of dummy packet
- * @offsets: offset info for the dummy packet
+ * @profile: dummy packet profile (the template, its size and header offsets)
*/
static int
ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
struct ice_aqc_sw_rules_elem *s_rule,
- const u8 *dummy_pkt, u16 pkt_len,
- const struct ice_dummy_pkt_offsets *offsets)
+ const struct ice_dummy_pkt_profile *profile)
{
u8 *pkt;
u16 i;
@@ -5734,9 +5652,10 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
*/
pkt = s_rule->pdata.lkup_tx_rx.hdr;
- memcpy(pkt, dummy_pkt, pkt_len);
+ memcpy(pkt, profile->pkt, profile->pkt_len);
for (i = 0; i < lkups_cnt; i++) {
+ const struct ice_dummy_pkt_offsets *offsets = profile->offsets;
enum ice_protocol_type type;
u16 offset = 0, len = 0, j;
bool found = false;
@@ -5810,16 +5729,18 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
* indicated by the mask to make sure we don't improperly write
* over any significant packet data.
*/
- for (j = 0; j < len / sizeof(u16); j++)
- if (((u16 *)&lkups[i].m_u)[j])
- ((u16 *)(pkt + offset))[j] =
- (((u16 *)(pkt + offset))[j] &
- ~((u16 *)&lkups[i].m_u)[j]) |
- (((u16 *)&lkups[i].h_u)[j] &
- ((u16 *)&lkups[i].m_u)[j]);
+ for (j = 0; j < len / sizeof(u16); j++) {
+ u16 *ptr = (u16 *)(pkt + offset);
+ u16 mask = lkups[i].m_raw[j];
+
+ if (!mask)
+ continue;
+
+ ptr[j] = (ptr[j] & ~mask) | (lkups[i].h_raw[j] & mask);
+ }
}
- s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(pkt_len);
+ s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(profile->pkt_len);
return 0;
}
@@ -6042,12 +5963,11 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
struct ice_rule_query_data *added_entry)
{
struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
- u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
- const struct ice_dummy_pkt_offsets *pkt_offsets;
struct ice_aqc_sw_rules_elem *s_rule = NULL;
+ const struct ice_dummy_pkt_profile *profile;
+ u16 rid = 0, i, rule_buf_sz, vsi_handle;
struct list_head *rule_head;
struct ice_switch_info *sw;
- const u8 *pkt = NULL;
u16 word_cnt;
u32 act = 0;
int status;
@@ -6065,24 +5985,18 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
/* get # of words we need to match */
word_cnt = 0;
for (i = 0; i < lkups_cnt; i++) {
- u16 j, *ptr;
+ u16 j;
- ptr = (u16 *)&lkups[i].m_u;
- for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
- if (ptr[j] != 0)
+ for (j = 0; j < ARRAY_SIZE(lkups->m_raw); j++)
+ if (lkups[i].m_raw[j])
word_cnt++;
}
if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
return -EINVAL;
- /* make sure that we can locate a dummy packet */
- ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
- &pkt_offsets);
- if (!pkt) {
- status = -EINVAL;
- goto err_ice_add_adv_rule;
- }
+ /* locate a dummy packet */
+ profile = ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type);
if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
@@ -6123,7 +6037,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
}
return status;
}
- rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
+ rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + profile->pkt_len;
s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
if (!s_rule)
return -ENOMEM;
@@ -6183,8 +6097,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(rid);
s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act);
- status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
- pkt_len, pkt_offsets);
+ status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, profile);
if (status)
goto err_ice_add_adv_rule;
@@ -6192,7 +6105,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
s_rule->pdata.lkup_tx_rx.hdr,
- pkt_offsets);
+ profile->offsets);
if (status)
goto err_ice_add_adv_rule;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index ed3d1d03befa..ecac75e71395 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -138,8 +138,16 @@ struct ice_update_recipe_lkup_idx_params {
struct ice_adv_lkup_elem {
enum ice_protocol_type type;
- union ice_prot_hdr h_u; /* Header values */
- union ice_prot_hdr m_u; /* Mask of header values to match */
+ union {
+ union ice_prot_hdr h_u; /* Header values */
+ /* Used to iterate over the headers */
+ u16 h_raw[sizeof(union ice_prot_hdr) / sizeof(u16)];
+ };
+ union {
+ union ice_prot_hdr m_u; /* Mask of header values to match */
+ /* Used to iterate over header mask */
+ u16 m_raw[sizeof(union ice_prot_hdr) / sizeof(u16)];
+ };
};
struct ice_sw_act_ctrl {
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index f9bf008471c9..3f8b7274ed2f 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -8,6 +8,7 @@
#include <linux/prefetch.h>
#include <linux/bpf_trace.h>
#include <net/dsfield.h>
+#include <net/mpls.h>
#include <net/xdp.h>
#include "ice_txrx_lib.h"
#include "ice_lib.h"
@@ -1748,18 +1749,24 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
if (skb->ip_summed != CHECKSUM_PARTIAL)
return 0;
- ip.hdr = skb_network_header(skb);
- l4.hdr = skb_transport_header(skb);
+ protocol = vlan_get_protocol(skb);
+
+ if (eth_p_mpls(protocol))
+ ip.hdr = skb_inner_network_header(skb);
+ else
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_checksum_start(skb);
/* compute outer L2 header size */
l2_len = ip.hdr - skb->data;
offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S;
- protocol = vlan_get_protocol(skb);
-
- if (protocol == htons(ETH_P_IP))
+ /* set the tx_flags to indicate the IP protocol type. this is
+ * required so that checksum header computation below is accurate.
+ */
+ if (ip.v4->version == 4)
first->tx_flags |= ICE_TX_FLAGS_IPV4;
- else if (protocol == htons(ETH_P_IPV6))
+ else if (ip.v6->version == 6)
first->tx_flags |= ICE_TX_FLAGS_IPV6;
if (skb->encapsulation) {
@@ -1957,6 +1964,7 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
unsigned char *hdr;
} l4;
u64 cd_mss, cd_tso_len;
+ __be16 protocol;
u32 paylen;
u8 l4_start;
int err;
@@ -1972,8 +1980,13 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
return err;
/* cppcheck-suppress unreadVariable */
- ip.hdr = skb_network_header(skb);
- l4.hdr = skb_transport_header(skb);
+ protocol = vlan_get_protocol(skb);
+
+ if (eth_p_mpls(protocol))
+ ip.hdr = skb_inner_network_header(skb);
+ else
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_checksum_start(skb);
/* initialize outer IP header fields */
if (ip.v4->version == 4) {
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index cead3eb149bd..f5a906c03669 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -133,6 +133,7 @@ static inline int ice_skb_pad(void)
#define ICE_XDP_CONSUMED BIT(0)
#define ICE_XDP_TX BIT(1)
#define ICE_XDP_REDIR BIT(2)
+#define ICE_XDP_EXIT BIT(3)
#define ICE_RX_DMA_ATTR \
(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 9dd38f667059..49ba8bfdbf04 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -545,9 +545,13 @@ ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
if (likely(act == XDP_REDIRECT)) {
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
- if (err)
- goto out_failure;
- return ICE_XDP_REDIR;
+ if (!err)
+ return ICE_XDP_REDIR;
+ if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS)
+ result = ICE_XDP_EXIT;
+ else
+ result = ICE_XDP_CONSUMED;
+ goto out_failure;
}
switch (act) {
@@ -558,15 +562,16 @@ ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
if (result == ICE_XDP_CONSUMED)
goto out_failure;
break;
+ case XDP_DROP:
+ result = ICE_XDP_CONSUMED;
+ break;
default:
bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
+ result = ICE_XDP_CONSUMED;
out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
- fallthrough;
- case XDP_DROP:
- result = ICE_XDP_CONSUMED;
break;
}
@@ -587,6 +592,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
unsigned int xdp_xmit = 0;
struct bpf_prog *xdp_prog;
bool failure = false;
+ int entries_to_alloc;
/* ZC patch is enabled only when XDP program is set,
* so here it can not be NULL
@@ -634,18 +640,23 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
xsk_buff_dma_sync_for_cpu(xdp, rx_ring->xsk_pool);
xdp_res = ice_run_xdp_zc(rx_ring, xdp, xdp_prog, xdp_ring);
- if (xdp_res) {
- if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))
- xdp_xmit |= xdp_res;
- else
- xsk_buff_free(xdp);
+ if (likely(xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))) {
+ xdp_xmit |= xdp_res;
+ } else if (xdp_res == ICE_XDP_EXIT) {
+ failure = true;
+ break;
+ } else if (xdp_res == ICE_XDP_CONSUMED) {
+ xsk_buff_free(xdp);
+ } else if (xdp_res == ICE_XDP_PASS) {
+ goto construct_skb;
+ }
- total_rx_bytes += size;
- total_rx_packets++;
+ total_rx_bytes += size;
+ total_rx_packets++;
+
+ ice_bump_ntc(rx_ring);
+ continue;
- ice_bump_ntc(rx_ring);
- continue;
- }
construct_skb:
/* XDP_PASS path */
skb = ice_construct_skb_zc(rx_ring, xdp);
@@ -673,7 +684,9 @@ construct_skb:
ice_receive_skb(rx_ring, skb, vlan_tag);
}
- failure = !ice_alloc_rx_bufs_zc(rx_ring, ICE_DESC_UNUSED(rx_ring));
+ entries_to_alloc = ICE_DESC_UNUSED(rx_ring);
+ if (entries_to_alloc > ICE_RING_QUARTER(rx_ring))
+ failure |= !ice_alloc_rx_bufs_zc(rx_ring, entries_to_alloc);
ice_finalize_xdp_rx(xdp_ring, xdp_xmit);
ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
@@ -929,13 +942,13 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
return -ENETDOWN;
if (!ice_is_xdp_ena_vsi(vsi))
- return -ENXIO;
+ return -EINVAL;
if (queue_id >= vsi->num_txq)
- return -ENXIO;
+ return -EINVAL;
if (!vsi->xdp_rings[queue_id]->xsk_pool)
- return -ENXIO;
+ return -EINVAL;
ring = vsi->xdp_rings[queue_id];
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
index bba3feaf3318..f1f69ce67420 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
@@ -8,6 +8,7 @@
#define IXGBE_XDP_CONSUMED BIT(0)
#define IXGBE_XDP_TX BIT(1)
#define IXGBE_XDP_REDIR BIT(2)
+#define IXGBE_XDP_EXIT BIT(3)
#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
IXGBE_TXD_CMD_RS)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index dd7ff66d422f..1703c640a434 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -109,9 +109,13 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
if (likely(act == XDP_REDIRECT)) {
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
- if (err)
- goto out_failure;
- return IXGBE_XDP_REDIR;
+ if (!err)
+ return IXGBE_XDP_REDIR;
+ if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS)
+ result = IXGBE_XDP_EXIT;
+ else
+ result = IXGBE_XDP_CONSUMED;
+ goto out_failure;
}
switch (act) {
@@ -130,16 +134,16 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
if (result == IXGBE_XDP_CONSUMED)
goto out_failure;
break;
+ case XDP_DROP:
+ result = IXGBE_XDP_CONSUMED;
+ break;
default:
bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
+ result = IXGBE_XDP_CONSUMED;
out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
- fallthrough; /* handle aborts by dropping packet */
- case XDP_DROP:
- result = IXGBE_XDP_CONSUMED;
- break;
}
return result;
}
@@ -303,21 +307,26 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
xsk_buff_dma_sync_for_cpu(bi->xdp, rx_ring->xsk_pool);
xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, bi->xdp);
- if (xdp_res) {
- if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR))
- xdp_xmit |= xdp_res;
- else
- xsk_buff_free(bi->xdp);
+ if (likely(xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR))) {
+ xdp_xmit |= xdp_res;
+ } else if (xdp_res == IXGBE_XDP_EXIT) {
+ failure = true;
+ break;
+ } else if (xdp_res == IXGBE_XDP_CONSUMED) {
+ xsk_buff_free(bi->xdp);
+ } else if (xdp_res == IXGBE_XDP_PASS) {
+ goto construct_skb;
+ }
- bi->xdp = NULL;
- total_rx_packets++;
- total_rx_bytes += size;
+ bi->xdp = NULL;
+ total_rx_packets++;
+ total_rx_bytes += size;
- cleaned_count++;
- ixgbe_inc_ntc(rx_ring);
- continue;
- }
+ cleaned_count++;
+ ixgbe_inc_ntc(rx_ring);
+ continue;
+construct_skb:
/* XDP_PASS path */
skb = ixgbe_construct_skb_zc(rx_ring, bi->xdp);
if (!skb) {
@@ -516,10 +525,10 @@ int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
return -ENETDOWN;
if (!READ_ONCE(adapter->xdp_prog))
- return -ENXIO;
+ return -EINVAL;
if (qid >= adapter->num_xdp_queues)
- return -ENXIO;
+ return -EINVAL;
ring = adapter->xdp_ring[qid];
@@ -527,7 +536,7 @@ int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
return -ENETDOWN;
if (!ring->xsk_pool)
- return -ENXIO;
+ return -EINVAL;
if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) {
u64 eics = BIT_ULL(ring->q_vector->v_idx);
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index fe0989c0fc25..f58a1c0144ba 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -62,6 +62,7 @@ config MVNETA
select MVMDIO
select PHYLINK
select PAGE_POOL
+ select PAGE_POOL_STATS
help
This driver supports the network interface units in the
Marvell ARMADA XP, ARMADA 370, ARMADA 38x and
@@ -177,6 +178,7 @@ config SKY2_DEBUG
source "drivers/net/ethernet/marvell/octeontx2/Kconfig"
+source "drivers/net/ethernet/marvell/octeon_ep/Kconfig"
source "drivers/net/ethernet/marvell/prestera/Kconfig"
endif # NET_VENDOR_MARVELL
diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile
index 9f88fe822555..ceba4aa4f026 100644
--- a/drivers/net/ethernet/marvell/Makefile
+++ b/drivers/net/ethernet/marvell/Makefile
@@ -11,5 +11,6 @@ obj-$(CONFIG_MVPP2) += mvpp2/
obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o
obj-$(CONFIG_SKGE) += skge.o
obj-$(CONFIG_SKY2) += sky2.o
+obj-y += octeon_ep/
obj-y += octeontx2/
obj-y += prestera/
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 934f6dd90992..f6a54c7f0c69 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -4735,6 +4735,9 @@ static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
memcpy(data + i * ETH_GSTRING_LEN,
mvneta_statistics[i].name, ETH_GSTRING_LEN);
+
+ data += ETH_GSTRING_LEN * ARRAY_SIZE(mvneta_statistics);
+ page_pool_ethtool_stats_get_strings(data);
}
}
@@ -4847,6 +4850,17 @@ static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
}
}
+static void mvneta_ethtool_pp_stats(struct mvneta_port *pp, u64 *data)
+{
+ struct page_pool_stats stats = {};
+ int i;
+
+ for (i = 0; i < rxq_number; i++)
+ page_pool_get_stats(pp->rxqs[i].page_pool, &stats);
+
+ page_pool_ethtool_stats_get(data, &stats);
+}
+
static void mvneta_ethtool_get_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
@@ -4857,12 +4871,16 @@ static void mvneta_ethtool_get_stats(struct net_device *dev,
for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
*data++ = pp->ethtool_stats[i];
+
+ mvneta_ethtool_pp_stats(pp, data);
}
static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset)
{
if (sset == ETH_SS_STATS)
- return ARRAY_SIZE(mvneta_statistics);
+ return ARRAY_SIZE(mvneta_statistics) +
+ page_pool_ethtool_stats_get_count();
+
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/Kconfig b/drivers/net/ethernet/marvell/octeon_ep/Kconfig
new file mode 100644
index 000000000000..0d7db815340e
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/Kconfig
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Marvell's Octeon PCI Endpoint NIC Driver Configuration
+#
+
+config OCTEON_EP
+ tristate "Marvell Octeon PCI Endpoint NIC Driver"
+ depends on 64BIT
+ depends on PCI
+ depends on PTP_1588_CLOCK_OPTIONAL
+ help
+ This driver supports networking functionality of Marvell's
+ Octeon PCI Endpoint NIC.
+
+ To know the list of devices supported by this driver, refer
+ documentation in
+ <file:Documentation/networking/device_drivers/ethernet/marvell/octeon_ep.rst>.
+
+ To compile this drivers as a module, choose M here. Name of the
+ module is octeon_ep.
diff --git a/drivers/net/ethernet/marvell/octeon_ep/Makefile b/drivers/net/ethernet/marvell/octeon_ep/Makefile
new file mode 100644
index 000000000000..2026c8118158
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Network driver for Marvell's Octeon PCI Endpoint NIC
+#
+
+obj-$(CONFIG_OCTEON_EP) += octeon_ep.o
+
+octeon_ep-y := octep_main.o octep_cn9k_pf.o octep_tx.o octep_rx.o \
+ octep_ethtool.o octep_ctrl_mbox.o octep_ctrl_net.o
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c b/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
new file mode 100644
index 000000000000..6ad88d0fe43f
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
@@ -0,0 +1,737 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include "octep_config.h"
+#include "octep_main.h"
+#include "octep_regs_cn9k_pf.h"
+
+/* Names of Hardware non-queue generic interrupts */
+static char *cn93_non_ioq_msix_names[] = {
+ "epf_ire_rint",
+ "epf_ore_rint",
+ "epf_vfire_rint0",
+ "epf_vfire_rint1",
+ "epf_vfore_rint0",
+ "epf_vfore_rint1",
+ "epf_mbox_rint0",
+ "epf_mbox_rint1",
+ "epf_oei_rint",
+ "epf_dma_rint",
+ "epf_dma_vf_rint0",
+ "epf_dma_vf_rint1",
+ "epf_pp_vf_rint0",
+ "epf_pp_vf_rint1",
+ "epf_misc_rint",
+ "epf_rsvd",
+};
+
+/* Dump useful hardware CSRs for debug purpose */
+static void cn93_dump_regs(struct octep_device *oct, int qno)
+{
+ struct device *dev = &oct->pdev->dev;
+
+ dev_info(dev, "IQ-%d register dump\n", qno);
+ dev_info(dev, "R[%d]_IN_INSTR_DBELL[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_INSTR_DBELL(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(qno)));
+ dev_info(dev, "R[%d]_IN_CONTROL[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_CONTROL(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_CONTROL(qno)));
+ dev_info(dev, "R[%d]_IN_ENABLE[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_ENABLE(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_ENABLE(qno)));
+ dev_info(dev, "R[%d]_IN_INSTR_BADDR[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_INSTR_BADDR(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_BADDR(qno)));
+ dev_info(dev, "R[%d]_IN_INSTR_RSIZE[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_INSTR_RSIZE(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_RSIZE(qno)));
+ dev_info(dev, "R[%d]_IN_CNTS[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_CNTS(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_CNTS(qno)));
+ dev_info(dev, "R[%d]_IN_INT_LEVELS[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_INT_LEVELS(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(qno)));
+ dev_info(dev, "R[%d]_IN_PKT_CNT[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_PKT_CNT(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_PKT_CNT(qno)));
+ dev_info(dev, "R[%d]_IN_BYTE_CNT[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_BYTE_CNT(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_BYTE_CNT(qno)));
+
+ dev_info(dev, "OQ-%d register dump\n", qno);
+ dev_info(dev, "R[%d]_OUT_SLIST_DBELL[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_SLIST_DBELL(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_SLIST_DBELL(qno)));
+ dev_info(dev, "R[%d]_OUT_CONTROL[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_CONTROL(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_CONTROL(qno)));
+ dev_info(dev, "R[%d]_OUT_ENABLE[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_ENABLE(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_ENABLE(qno)));
+ dev_info(dev, "R[%d]_OUT_SLIST_BADDR[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_SLIST_BADDR(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_SLIST_BADDR(qno)));
+ dev_info(dev, "R[%d]_OUT_SLIST_RSIZE[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_SLIST_RSIZE(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_SLIST_RSIZE(qno)));
+ dev_info(dev, "R[%d]_OUT_CNTS[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_CNTS(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_CNTS(qno)));
+ dev_info(dev, "R[%d]_OUT_INT_LEVELS[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_INT_LEVELS(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(qno)));
+ dev_info(dev, "R[%d]_OUT_PKT_CNT[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_PKT_CNT(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_PKT_CNT(qno)));
+ dev_info(dev, "R[%d]_OUT_BYTE_CNT[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_BYTE_CNT(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_BYTE_CNT(qno)));
+ dev_info(dev, "R[%d]_ERR_TYPE[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_ERR_TYPE(qno),
+ octep_read_csr64(oct, CN93_SDP_R_ERR_TYPE(qno)));
+}
+
+/* Reset Hardware Tx queue */
+static int cn93_reset_iq(struct octep_device *oct, int q_no)
+{
+ struct octep_config *conf = oct->conf;
+ u64 val = 0ULL;
+
+ dev_dbg(&oct->pdev->dev, "Reset PF IQ-%d\n", q_no);
+
+ /* Get absolute queue number */
+ q_no += conf->pf_ring_cfg.srn;
+
+ /* Disable the Tx/Instruction Ring */
+ octep_write_csr64(oct, CN93_SDP_R_IN_ENABLE(q_no), val);
+
+ /* clear the Instruction Ring packet/byte counts and doorbell CSRs */
+ octep_write_csr64(oct, CN93_SDP_R_IN_CNTS(q_no), val);
+ octep_write_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(q_no), val);
+ octep_write_csr64(oct, CN93_SDP_R_IN_PKT_CNT(q_no), val);
+ octep_write_csr64(oct, CN93_SDP_R_IN_BYTE_CNT(q_no), val);
+ octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_BADDR(q_no), val);
+ octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_RSIZE(q_no), val);
+
+ val = 0xFFFFFFFF;
+ octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(q_no), val);
+
+ return 0;
+}
+
+/* Reset Hardware Rx queue */
+static void cn93_reset_oq(struct octep_device *oct, int q_no)
+{
+ u64 val = 0ULL;
+
+ q_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+
+ /* Disable Output (Rx) Ring */
+ octep_write_csr64(oct, CN93_SDP_R_OUT_ENABLE(q_no), val);
+
+ /* Clear count CSRs */
+ val = octep_read_csr(oct, CN93_SDP_R_OUT_CNTS(q_no));
+ octep_write_csr(oct, CN93_SDP_R_OUT_CNTS(q_no), val);
+
+ octep_write_csr64(oct, CN93_SDP_R_OUT_PKT_CNT(q_no), 0xFFFFFFFFFULL);
+ octep_write_csr64(oct, CN93_SDP_R_OUT_SLIST_DBELL(q_no), 0xFFFFFFFF);
+}
+
+/* Reset all hardware Tx/Rx queues */
+static void octep_reset_io_queues_cn93_pf(struct octep_device *oct)
+{
+ struct pci_dev *pdev = oct->pdev;
+ int q;
+
+ dev_dbg(&pdev->dev, "Reset OCTEP_CN93 PF IO Queues\n");
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ cn93_reset_iq(oct, q);
+ cn93_reset_oq(oct, q);
+ }
+}
+
+/* Initialize windowed addresses to access some hardware registers */
+static void octep_setup_pci_window_regs_cn93_pf(struct octep_device *oct)
+{
+ u8 __iomem *bar0_pciaddr = oct->mmio[0].hw_addr;
+
+ oct->pci_win_regs.pci_win_wr_addr = (u8 __iomem *)(bar0_pciaddr + CN93_SDP_WIN_WR_ADDR64);
+ oct->pci_win_regs.pci_win_rd_addr = (u8 __iomem *)(bar0_pciaddr + CN93_SDP_WIN_RD_ADDR64);
+ oct->pci_win_regs.pci_win_wr_data = (u8 __iomem *)(bar0_pciaddr + CN93_SDP_WIN_WR_DATA64);
+ oct->pci_win_regs.pci_win_rd_data = (u8 __iomem *)(bar0_pciaddr + CN93_SDP_WIN_RD_DATA64);
+}
+
+/* Configure Hardware mapping: inform hardware which rings belong to PF. */
+static void octep_configure_ring_mapping_cn93_pf(struct octep_device *oct)
+{
+ struct octep_config *conf = oct->conf;
+ struct pci_dev *pdev = oct->pdev;
+ u64 pf_srn = CFG_GET_PORTS_PF_SRN(oct->conf);
+ int q;
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(conf); q++) {
+ u64 regval = 0;
+
+ if (oct->pcie_port)
+ regval = 8 << CN93_SDP_FUNC_SEL_EPF_BIT_POS;
+
+ octep_write_csr64(oct, CN93_SDP_EPVF_RING(pf_srn + q), regval);
+
+ regval = octep_read_csr64(oct, CN93_SDP_EPVF_RING(pf_srn + q));
+ dev_dbg(&pdev->dev, "Write SDP_EPVF_RING[0x%llx] = 0x%llx\n",
+ CN93_SDP_EPVF_RING(pf_srn + q), regval);
+ }
+}
+
+/* Initialize configuration limits and initial active config 93xx PF. */
+static void octep_init_config_cn93_pf(struct octep_device *oct)
+{
+ struct octep_config *conf = oct->conf;
+ struct pci_dev *pdev = oct->pdev;
+ u64 val;
+
+ /* Read ring configuration:
+ * PF ring count, number of VFs and rings per VF supported
+ */
+ val = octep_read_csr64(oct, CN93_SDP_EPF_RINFO);
+ conf->sriov_cfg.max_rings_per_vf = CN93_SDP_EPF_RINFO_RPVF(val);
+ conf->sriov_cfg.active_rings_per_vf = conf->sriov_cfg.max_rings_per_vf;
+ conf->sriov_cfg.max_vfs = CN93_SDP_EPF_RINFO_NVFS(val);
+ conf->sriov_cfg.active_vfs = conf->sriov_cfg.max_vfs;
+ conf->sriov_cfg.vf_srn = CN93_SDP_EPF_RINFO_SRN(val);
+
+ val = octep_read_csr64(oct, CN93_SDP_MAC_PF_RING_CTL(oct->pcie_port));
+ conf->pf_ring_cfg.srn = CN93_SDP_MAC_PF_RING_CTL_SRN(val);
+ conf->pf_ring_cfg.max_io_rings = CN93_SDP_MAC_PF_RING_CTL_RPPF(val);
+ conf->pf_ring_cfg.active_io_rings = conf->pf_ring_cfg.max_io_rings;
+ dev_info(&pdev->dev, "pf_srn=%u rpvf=%u nvfs=%u rppf=%u\n",
+ conf->pf_ring_cfg.srn, conf->sriov_cfg.active_rings_per_vf,
+ conf->sriov_cfg.active_vfs, conf->pf_ring_cfg.active_io_rings);
+
+ conf->iq.num_descs = OCTEP_IQ_MAX_DESCRIPTORS;
+ conf->iq.instr_type = OCTEP_64BYTE_INSTR;
+ conf->iq.pkind = 0;
+ conf->iq.db_min = OCTEP_DB_MIN;
+ conf->iq.intr_threshold = OCTEP_IQ_INTR_THRESHOLD;
+
+ conf->oq.num_descs = OCTEP_OQ_MAX_DESCRIPTORS;
+ conf->oq.buf_size = OCTEP_OQ_BUF_SIZE;
+ conf->oq.refill_threshold = OCTEP_OQ_REFILL_THRESHOLD;
+ conf->oq.oq_intr_pkt = OCTEP_OQ_INTR_PKT_THRESHOLD;
+ conf->oq.oq_intr_time = OCTEP_OQ_INTR_TIME_THRESHOLD;
+
+ conf->msix_cfg.non_ioq_msix = CN93_NUM_NON_IOQ_INTR;
+ conf->msix_cfg.ioq_msix = conf->pf_ring_cfg.active_io_rings;
+ conf->msix_cfg.non_ioq_msix_names = cn93_non_ioq_msix_names;
+
+ conf->ctrl_mbox_cfg.barmem_addr = (void __iomem *)oct->mmio[2].hw_addr + (0x400000ull * 7);
+}
+
+/* Setup registers for a hardware Tx Queue */
+static void octep_setup_iq_regs_cn93_pf(struct octep_device *oct, int iq_no)
+{
+ struct octep_iq *iq = oct->iq[iq_no];
+ u32 reset_instr_cnt;
+ u64 reg_val;
+
+ iq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_CONTROL(iq_no));
+
+ /* wait for IDLE to set to 1 */
+ if (!(reg_val & CN93_R_IN_CTL_IDLE)) {
+ do {
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_CONTROL(iq_no));
+ } while (!(reg_val & CN93_R_IN_CTL_IDLE));
+ }
+
+ reg_val |= CN93_R_IN_CTL_RDSIZE;
+ reg_val |= CN93_R_IN_CTL_IS_64B;
+ reg_val |= CN93_R_IN_CTL_ESR;
+ octep_write_csr64(oct, CN93_SDP_R_IN_CONTROL(iq_no), reg_val);
+
+ /* Write the start of the input queue's ring and its size */
+ octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_BADDR(iq_no),
+ iq->desc_ring_dma);
+ octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_RSIZE(iq_no),
+ iq->max_count);
+
+ /* Remember the doorbell & instruction count register addr
+ * for this queue
+ */
+ iq->doorbell_reg = oct->mmio[0].hw_addr +
+ CN93_SDP_R_IN_INSTR_DBELL(iq_no);
+ iq->inst_cnt_reg = oct->mmio[0].hw_addr +
+ CN93_SDP_R_IN_CNTS(iq_no);
+ iq->intr_lvl_reg = oct->mmio[0].hw_addr +
+ CN93_SDP_R_IN_INT_LEVELS(iq_no);
+
+ /* Store the current instruction counter (used in flush_iq calculation) */
+ reset_instr_cnt = readl(iq->inst_cnt_reg);
+ writel(reset_instr_cnt, iq->inst_cnt_reg);
+
+ /* INTR_THRESHOLD is set to max(FFFFFFFF) to disable the INTR */
+ reg_val = CFG_GET_IQ_INTR_THRESHOLD(oct->conf) & 0xffffffff;
+ octep_write_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(iq_no), reg_val);
+}
+
+/* Setup registers for a hardware Rx Queue */
+static void octep_setup_oq_regs_cn93_pf(struct octep_device *oct, int oq_no)
+{
+ u64 reg_val;
+ u64 oq_ctl = 0ULL;
+ u32 time_threshold = 0;
+ struct octep_oq *oq = oct->oq[oq_no];
+
+ oq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no));
+
+ /* wait for IDLE to set to 1 */
+ if (!(reg_val & CN93_R_OUT_CTL_IDLE)) {
+ do {
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no));
+ } while (!(reg_val & CN93_R_OUT_CTL_IDLE));
+ }
+
+ reg_val &= ~(CN93_R_OUT_CTL_IMODE);
+ reg_val &= ~(CN93_R_OUT_CTL_ROR_P);
+ reg_val &= ~(CN93_R_OUT_CTL_NSR_P);
+ reg_val &= ~(CN93_R_OUT_CTL_ROR_I);
+ reg_val &= ~(CN93_R_OUT_CTL_NSR_I);
+ reg_val &= ~(CN93_R_OUT_CTL_ES_I);
+ reg_val &= ~(CN93_R_OUT_CTL_ROR_D);
+ reg_val &= ~(CN93_R_OUT_CTL_NSR_D);
+ reg_val &= ~(CN93_R_OUT_CTL_ES_D);
+ reg_val |= (CN93_R_OUT_CTL_ES_P);
+
+ octep_write_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no), reg_val);
+ octep_write_csr64(oct, CN93_SDP_R_OUT_SLIST_BADDR(oq_no),
+ oq->desc_ring_dma);
+ octep_write_csr64(oct, CN93_SDP_R_OUT_SLIST_RSIZE(oq_no),
+ oq->max_count);
+
+ oq_ctl = octep_read_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no));
+ oq_ctl &= ~0x7fffffULL; //clear the ISIZE and BSIZE (22-0)
+ oq_ctl |= (oq->buffer_size & 0xffff); //populate the BSIZE (15-0)
+ octep_write_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no), oq_ctl);
+
+ /* Get the mapped address of the pkt_sent and pkts_credit regs */
+ oq->pkts_sent_reg = oct->mmio[0].hw_addr + CN93_SDP_R_OUT_CNTS(oq_no);
+ oq->pkts_credit_reg = oct->mmio[0].hw_addr +
+ CN93_SDP_R_OUT_SLIST_DBELL(oq_no);
+
+ time_threshold = CFG_GET_OQ_INTR_TIME(oct->conf);
+ reg_val = ((u64)time_threshold << 32) |
+ CFG_GET_OQ_INTR_PKT(oct->conf);
+ octep_write_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
+}
+
+/* Setup registers for a PF mailbox */
+static void octep_setup_mbox_regs_cn93_pf(struct octep_device *oct, int q_no)
+{
+ struct octep_mbox *mbox = oct->mbox[q_no];
+
+ mbox->q_no = q_no;
+
+ /* PF mbox interrupt reg */
+ mbox->mbox_int_reg = oct->mmio[0].hw_addr + CN93_SDP_EPF_MBOX_RINT(0);
+
+ /* PF to VF DATA reg. PF writes into this reg */
+ mbox->mbox_write_reg = oct->mmio[0].hw_addr + CN93_SDP_R_MBOX_PF_VF_DATA(q_no);
+
+ /* VF to PF DATA reg. PF reads from this reg */
+ mbox->mbox_read_reg = oct->mmio[0].hw_addr + CN93_SDP_R_MBOX_VF_PF_DATA(q_no);
+}
+
+/* Mailbox Interrupt handler */
+static void cn93_handle_pf_mbox_intr(struct octep_device *oct)
+{
+ u64 mbox_int_val = 0ULL, val = 0ULL, qno = 0ULL;
+
+ mbox_int_val = readq(oct->mbox[0]->mbox_int_reg);
+ for (qno = 0; qno < OCTEP_MAX_VF; qno++) {
+ val = readq(oct->mbox[qno]->mbox_read_reg);
+ dev_dbg(&oct->pdev->dev,
+ "PF MBOX READ: val:%llx from VF:%llx\n", val, qno);
+ }
+
+ writeq(mbox_int_val, oct->mbox[0]->mbox_int_reg);
+}
+
+/* Interrupts handler for all non-queue generic interrupts. */
+static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ struct pci_dev *pdev = oct->pdev;
+ u64 reg_val = 0;
+ int i = 0;
+
+ /* Check for IRERR INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_IRERR_RINT);
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "received IRERR_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CN93_SDP_EPF_IRERR_RINT, reg_val);
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ reg_val = octep_read_csr64(oct,
+ CN93_SDP_R_ERR_TYPE(i));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received err type on IQ-%d: 0x%llx\n",
+ i, reg_val);
+ octep_write_csr64(oct, CN93_SDP_R_ERR_TYPE(i),
+ reg_val);
+ }
+ }
+ goto irq_handled;
+ }
+
+ /* Check for ORERR INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_ORERR_RINT);
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received ORERR_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CN93_SDP_EPF_ORERR_RINT, reg_val);
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_ERR_TYPE(i));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received err type on OQ-%d: 0x%llx\n",
+ i, reg_val);
+ octep_write_csr64(oct, CN93_SDP_R_ERR_TYPE(i),
+ reg_val);
+ }
+ }
+
+ goto irq_handled;
+ }
+
+ /* Check for VFIRE INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_VFIRE_RINT(0));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received VFIRE_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CN93_SDP_EPF_VFIRE_RINT(0), reg_val);
+ goto irq_handled;
+ }
+
+ /* Check for VFORE INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_VFORE_RINT(0));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received VFORE_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CN93_SDP_EPF_VFORE_RINT(0), reg_val);
+ goto irq_handled;
+ }
+
+ /* Check for MBOX INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_MBOX_RINT(0));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received MBOX_RINT intr: 0x%llx\n", reg_val);
+ cn93_handle_pf_mbox_intr(oct);
+ goto irq_handled;
+ }
+
+ /* Check for OEI INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_OEI_RINT);
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received OEI_EINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT, reg_val);
+ queue_work(octep_wq, &oct->ctrl_mbox_task);
+ goto irq_handled;
+ }
+
+ /* Check for DMA INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_DMA_RINT);
+ if (reg_val) {
+ octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT, reg_val);
+ goto irq_handled;
+ }
+
+ /* Check for DMA VF INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT(0));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received DMA_VF_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT(0), reg_val);
+ goto irq_handled;
+ }
+
+ /* Check for PPVF INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_PP_VF_RINT(0));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received PP_VF_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CN93_SDP_EPF_PP_VF_RINT(0), reg_val);
+ goto irq_handled;
+ }
+
+ /* Check for MISC INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_MISC_RINT);
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received MISC_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT, reg_val);
+ goto irq_handled;
+ }
+
+ dev_info(&pdev->dev, "Reserved interrupts raised; Ignore\n");
+irq_handled:
+ return IRQ_HANDLED;
+}
+
+/* Tx/Rx queue interrupt handler */
+static irqreturn_t octep_ioq_intr_handler_cn93_pf(void *data)
+{
+ struct octep_ioq_vector *vector = (struct octep_ioq_vector *)data;
+ struct octep_oq *oq = vector->oq;
+
+ napi_schedule_irqoff(oq->napi);
+ return IRQ_HANDLED;
+}
+
+/* soft reset of 93xx */
+static int octep_soft_reset_cn93_pf(struct octep_device *oct)
+{
+ dev_info(&oct->pdev->dev, "CN93XX: Doing soft reset\n");
+
+ octep_write_csr64(oct, CN93_SDP_WIN_WR_MASK_REG, 0xFF);
+
+ /* Set core domain reset bit */
+ OCTEP_PCI_WIN_WRITE(oct, CN93_RST_CORE_DOMAIN_W1S, 1);
+ /* Wait for 100ms as Octeon resets. */
+ mdelay(100);
+ /* clear core domain reset bit */
+ OCTEP_PCI_WIN_WRITE(oct, CN93_RST_CORE_DOMAIN_W1C, 1);
+
+ return 0;
+}
+
+/* Re-initialize Octeon hardware registers */
+static void octep_reinit_regs_cn93_pf(struct octep_device *oct)
+{
+ u32 i;
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ oct->hw_ops.setup_iq_regs(oct, i);
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ oct->hw_ops.setup_oq_regs(oct, i);
+
+ oct->hw_ops.enable_interrupts(oct);
+ oct->hw_ops.enable_io_queues(oct);
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg);
+}
+
+/* Enable all interrupts */
+static void octep_enable_interrupts_cn93_pf(struct octep_device *oct)
+{
+ u64 intr_mask = 0ULL;
+ int srn, num_rings, i;
+
+ srn = CFG_GET_PORTS_PF_SRN(oct->conf);
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+
+ for (i = 0; i < num_rings; i++)
+ intr_mask |= (0x1ULL << (srn + i));
+
+ octep_write_csr64(oct, CN93_SDP_EPF_IRERR_RINT_ENA_W1S, intr_mask);
+ octep_write_csr64(oct, CN93_SDP_EPF_ORERR_RINT_ENA_W1S, intr_mask);
+ octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT_ENA_W1S, -1ULL);
+ octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT_ENA_W1S, intr_mask);
+ octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT_ENA_W1S, intr_mask);
+}
+
+/* Disable all interrupts */
+static void octep_disable_interrupts_cn93_pf(struct octep_device *oct)
+{
+ u64 intr_mask = 0ULL;
+ int srn, num_rings, i;
+
+ srn = CFG_GET_PORTS_PF_SRN(oct->conf);
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+
+ for (i = 0; i < num_rings; i++)
+ intr_mask |= (0x1ULL << (srn + i));
+
+ octep_write_csr64(oct, CN93_SDP_EPF_IRERR_RINT_ENA_W1C, intr_mask);
+ octep_write_csr64(oct, CN93_SDP_EPF_ORERR_RINT_ENA_W1C, intr_mask);
+ octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT_ENA_W1C, -1ULL);
+ octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT_ENA_W1C, intr_mask);
+ octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT_ENA_W1C, intr_mask);
+}
+
+/* Get new Octeon Read Index: index of descriptor that Octeon reads next. */
+static u32 octep_update_iq_read_index_cn93_pf(struct octep_iq *iq)
+{
+ u32 pkt_in_done = readl(iq->inst_cnt_reg);
+ u32 last_done, new_idx;
+
+ last_done = pkt_in_done - iq->pkt_in_done;
+ iq->pkt_in_done = pkt_in_done;
+
+ new_idx = (iq->octep_read_index + last_done) % iq->max_count;
+
+ return new_idx;
+}
+
+/* Enable a hardware Tx Queue */
+static void octep_enable_iq_cn93_pf(struct octep_device *oct, int iq_no)
+{
+ u64 loop = HZ;
+ u64 reg_val;
+
+ iq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+
+ octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(iq_no), 0xFFFFFFFF);
+
+ while (octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(iq_no)) &&
+ loop--) {
+ schedule_timeout_interruptible(1);
+ }
+
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(iq_no));
+ reg_val |= (0x1ULL << 62);
+ octep_write_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(iq_no), reg_val);
+
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_ENABLE(iq_no));
+ reg_val |= 0x1ULL;
+ octep_write_csr64(oct, CN93_SDP_R_IN_ENABLE(iq_no), reg_val);
+}
+
+/* Enable a hardware Rx Queue */
+static void octep_enable_oq_cn93_pf(struct octep_device *oct, int oq_no)
+{
+ u64 reg_val = 0ULL;
+
+ oq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(oq_no));
+ reg_val |= (0x1ULL << 62);
+ octep_write_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
+
+ octep_write_csr64(oct, CN93_SDP_R_OUT_SLIST_DBELL(oq_no), 0xFFFFFFFF);
+
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_ENABLE(oq_no));
+ reg_val |= 0x1ULL;
+ octep_write_csr64(oct, CN93_SDP_R_OUT_ENABLE(oq_no), reg_val);
+}
+
+/* Enable all hardware Tx/Rx Queues assined to PF */
+static void octep_enable_io_queues_cn93_pf(struct octep_device *oct)
+{
+ u8 q;
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ octep_enable_iq_cn93_pf(oct, q);
+ octep_enable_oq_cn93_pf(oct, q);
+ }
+}
+
+/* Disable a hardware Tx Queue assined to PF */
+static void octep_disable_iq_cn93_pf(struct octep_device *oct, int iq_no)
+{
+ u64 reg_val = 0ULL;
+
+ iq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_ENABLE(iq_no));
+ reg_val &= ~0x1ULL;
+ octep_write_csr64(oct, CN93_SDP_R_IN_ENABLE(iq_no), reg_val);
+}
+
+/* Disable a hardware Rx Queue assined to PF */
+static void octep_disable_oq_cn93_pf(struct octep_device *oct, int oq_no)
+{
+ u64 reg_val = 0ULL;
+
+ oq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_ENABLE(oq_no));
+ reg_val &= ~0x1ULL;
+ octep_write_csr64(oct, CN93_SDP_R_OUT_ENABLE(oq_no), reg_val);
+}
+
+/* Disable all hardware Tx/Rx Queues assined to PF */
+static void octep_disable_io_queues_cn93_pf(struct octep_device *oct)
+{
+ int q = 0;
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ octep_disable_iq_cn93_pf(oct, q);
+ octep_disable_oq_cn93_pf(oct, q);
+ }
+}
+
+/* Dump hardware registers (including Tx/Rx queues) for debugging. */
+static void octep_dump_registers_cn93_pf(struct octep_device *oct)
+{
+ u8 srn, num_rings, q;
+
+ srn = CFG_GET_PORTS_PF_SRN(oct->conf);
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+
+ for (q = srn; q < srn + num_rings; q++)
+ cn93_dump_regs(oct, q);
+}
+
+/**
+ * octep_device_setup_cn93_pf() - Setup Octeon device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * - initialize hardware operations.
+ * - get target side pcie port number for the device.
+ * - setup window access to hardware registers.
+ * - set initial configuration and max limits.
+ * - setup hardware mapping of rings to the PF device.
+ */
+void octep_device_setup_cn93_pf(struct octep_device *oct)
+{
+ oct->hw_ops.setup_iq_regs = octep_setup_iq_regs_cn93_pf;
+ oct->hw_ops.setup_oq_regs = octep_setup_oq_regs_cn93_pf;
+ oct->hw_ops.setup_mbox_regs = octep_setup_mbox_regs_cn93_pf;
+
+ oct->hw_ops.non_ioq_intr_handler = octep_non_ioq_intr_handler_cn93_pf;
+ oct->hw_ops.ioq_intr_handler = octep_ioq_intr_handler_cn93_pf;
+ oct->hw_ops.soft_reset = octep_soft_reset_cn93_pf;
+ oct->hw_ops.reinit_regs = octep_reinit_regs_cn93_pf;
+
+ oct->hw_ops.enable_interrupts = octep_enable_interrupts_cn93_pf;
+ oct->hw_ops.disable_interrupts = octep_disable_interrupts_cn93_pf;
+
+ oct->hw_ops.update_iq_read_idx = octep_update_iq_read_index_cn93_pf;
+
+ oct->hw_ops.enable_iq = octep_enable_iq_cn93_pf;
+ oct->hw_ops.enable_oq = octep_enable_oq_cn93_pf;
+ oct->hw_ops.enable_io_queues = octep_enable_io_queues_cn93_pf;
+
+ oct->hw_ops.disable_iq = octep_disable_iq_cn93_pf;
+ oct->hw_ops.disable_oq = octep_disable_oq_cn93_pf;
+ oct->hw_ops.disable_io_queues = octep_disable_io_queues_cn93_pf;
+ oct->hw_ops.reset_io_queues = octep_reset_io_queues_cn93_pf;
+
+ oct->hw_ops.dump_registers = octep_dump_registers_cn93_pf;
+
+ octep_setup_pci_window_regs_cn93_pf(oct);
+
+ oct->pcie_port = octep_read_csr64(oct, CN93_SDP_MAC_NUMBER) & 0xff;
+ dev_info(&oct->pdev->dev,
+ "Octeon device using PCIE Port %d\n", oct->pcie_port);
+
+ octep_init_config_cn93_pf(oct);
+ octep_configure_ring_mapping_cn93_pf(oct);
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_config.h b/drivers/net/ethernet/marvell/octeon_ep/octep_config.h
new file mode 100644
index 000000000000..f208f3f9a447
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_config.h
@@ -0,0 +1,204 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_CONFIG_H_
+#define _OCTEP_CONFIG_H_
+
+/* Tx instruction types by length */
+#define OCTEP_32BYTE_INSTR 32
+#define OCTEP_64BYTE_INSTR 64
+
+/* Tx Queue: maximum descriptors per ring */
+#define OCTEP_IQ_MAX_DESCRIPTORS 1024
+/* Minimum input (Tx) requests to be enqueued to ring doorbell */
+#define OCTEP_DB_MIN 1
+/* Packet threshold for Tx queue interrupt */
+#define OCTEP_IQ_INTR_THRESHOLD 0x0
+
+/* Rx Queue: maximum descriptors per ring */
+#define OCTEP_OQ_MAX_DESCRIPTORS 1024
+
+/* Rx buffer size: Use page size buffers.
+ * Build skb from allocated page buffer once the packet is received.
+ * When a gathered packet is received, make head page as skb head and
+ * page buffers in consecutive Rx descriptors as fragments.
+ */
+#define OCTEP_OQ_BUF_SIZE (SKB_WITH_OVERHEAD(PAGE_SIZE))
+#define OCTEP_OQ_PKTS_PER_INTR 128
+#define OCTEP_OQ_REFILL_THRESHOLD (OCTEP_OQ_MAX_DESCRIPTORS / 4)
+
+#define OCTEP_OQ_INTR_PKT_THRESHOLD 1
+#define OCTEP_OQ_INTR_TIME_THRESHOLD 10
+
+#define OCTEP_MSIX_NAME_SIZE (IFNAMSIZ + 32)
+
+/* Tx Queue wake threshold
+ * wakeup a stopped Tx queue if minimum 2 descriptors are available.
+ * Even a skb with fragments consume only one Tx queue descriptor entry.
+ */
+#define OCTEP_WAKE_QUEUE_THRESHOLD 2
+
+/* Minimum MTU supported by Octeon network interface */
+#define OCTEP_MIN_MTU ETH_MIN_MTU
+/* Maximum MTU supported by Octeon interface*/
+#define OCTEP_MAX_MTU (10000 - (ETH_HLEN + ETH_FCS_LEN))
+/* Default MTU */
+#define OCTEP_DEFAULT_MTU 1500
+
+/* Macros to get octeon config params */
+#define CFG_GET_IQ_CFG(cfg) ((cfg)->iq)
+#define CFG_GET_IQ_NUM_DESC(cfg) ((cfg)->iq.num_descs)
+#define CFG_GET_IQ_INSTR_TYPE(cfg) ((cfg)->iq.instr_type)
+#define CFG_GET_IQ_PKIND(cfg) ((cfg)->iq.pkind)
+#define CFG_GET_IQ_INSTR_SIZE(cfg) (64)
+#define CFG_GET_IQ_DB_MIN(cfg) ((cfg)->iq.db_min)
+#define CFG_GET_IQ_INTR_THRESHOLD(cfg) ((cfg)->iq.intr_threshold)
+
+#define CFG_GET_OQ_NUM_DESC(cfg) ((cfg)->oq.num_descs)
+#define CFG_GET_OQ_BUF_SIZE(cfg) ((cfg)->oq.buf_size)
+#define CFG_GET_OQ_REFILL_THRESHOLD(cfg) ((cfg)->oq.refill_threshold)
+#define CFG_GET_OQ_INTR_PKT(cfg) ((cfg)->oq.oq_intr_pkt)
+#define CFG_GET_OQ_INTR_TIME(cfg) ((cfg)->oq.oq_intr_time)
+
+#define CFG_GET_PORTS_MAX_IO_RINGS(cfg) ((cfg)->pf_ring_cfg.max_io_rings)
+#define CFG_GET_PORTS_ACTIVE_IO_RINGS(cfg) ((cfg)->pf_ring_cfg.active_io_rings)
+#define CFG_GET_PORTS_PF_SRN(cfg) ((cfg)->pf_ring_cfg.srn)
+
+#define CFG_GET_DPI_PKIND(cfg) ((cfg)->core_cfg.dpi_pkind)
+#define CFG_GET_CORE_TICS_PER_US(cfg) ((cfg)->core_cfg.core_tics_per_us)
+#define CFG_GET_COPROC_TICS_PER_US(cfg) ((cfg)->core_cfg.coproc_tics_per_us)
+
+#define CFG_GET_MAX_VFS(cfg) ((cfg)->sriov_cfg.max_vfs)
+#define CFG_GET_ACTIVE_VFS(cfg) ((cfg)->sriov_cfg.active_vfs)
+#define CFG_GET_MAX_RPVF(cfg) ((cfg)->sriov_cfg.max_rings_per_vf)
+#define CFG_GET_ACTIVE_RPVF(cfg) ((cfg)->sriov_cfg.active_rings_per_vf)
+#define CFG_GET_VF_SRN(cfg) ((cfg)->sriov_cfg.vf_srn)
+
+#define CFG_GET_IOQ_MSIX(cfg) ((cfg)->msix_cfg.ioq_msix)
+#define CFG_GET_NON_IOQ_MSIX(cfg) ((cfg)->msix_cfg.non_ioq_msix)
+#define CFG_GET_NON_IOQ_MSIX_NAMES(cfg) ((cfg)->msix_cfg.non_ioq_msix_names)
+
+#define CFG_GET_CTRL_MBOX_MEM_ADDR(cfg) ((cfg)->ctrl_mbox_cfg.barmem_addr)
+
+/* Hardware Tx Queue configuration. */
+struct octep_iq_config {
+ /* Size of the Input queue (number of commands) */
+ u16 num_descs;
+
+ /* Command size - 32 or 64 bytes */
+ u16 instr_type;
+
+ /* pkind for packets sent to Octeon */
+ u16 pkind;
+
+ /* Minimum number of commands pending to be posted to Octeon before driver
+ * hits the Input queue doorbell.
+ */
+ u16 db_min;
+
+ /* Trigger the IQ interrupt when processed cmd count reaches
+ * this level.
+ */
+ u32 intr_threshold;
+};
+
+/* Hardware Rx Queue configuration. */
+struct octep_oq_config {
+ /* Size of Output queue (number of descriptors) */
+ u16 num_descs;
+
+ /* Size of buffer in this Output queue. */
+ u16 buf_size;
+
+ /* The number of buffers that were consumed during packet processing
+ * by the driver on this Output queue before the driver attempts to
+ * replenish the descriptor ring with new buffers.
+ */
+ u16 refill_threshold;
+
+ /* Interrupt Coalescing (Packet Count). Octeon will interrupt the host
+ * only if it sent as many packets as specified by this field.
+ * The driver usually does not use packet count interrupt coalescing.
+ */
+ u32 oq_intr_pkt;
+
+ /* Interrupt Coalescing (Time Interval). Octeon will interrupt the host
+ * if at least one packet was sent in the time interval specified by
+ * this field. The driver uses time interval interrupt coalescing by
+ * default. The time is specified in microseconds.
+ */
+ u32 oq_intr_time;
+};
+
+/* Tx/Rx configuration */
+struct octep_pf_ring_config {
+ /* Max number of IOQs */
+ u16 max_io_rings;
+
+ /* Number of active IOQs */
+ u16 active_io_rings;
+
+ /* Starting IOQ number: this changes based on which PEM is used */
+ u16 srn;
+};
+
+/* Octeon Hardware SRIOV config */
+struct octep_sriov_config {
+ /* Max number of VF devices supported */
+ u16 max_vfs;
+
+ /* Number of VF devices enabled */
+ u16 active_vfs;
+
+ /* Max number of rings assigned to VF */
+ u8 max_rings_per_vf;
+
+ /* Number of rings enabled per VF */
+ u8 active_rings_per_vf;
+
+ /* starting ring number of VF's: ring-0 of VF-0 of the PF */
+ u16 vf_srn;
+};
+
+/* Octeon MSI-x config. */
+struct octep_msix_config {
+ /* Number of IOQ interrupts */
+ u16 ioq_msix;
+
+ /* Number of Non IOQ interrupts */
+ u16 non_ioq_msix;
+
+ /* Names of Non IOQ interrupts */
+ char **non_ioq_msix_names;
+};
+
+struct octep_ctrl_mbox_config {
+ /* Barmem address for control mbox */
+ void __iomem *barmem_addr;
+};
+
+/* Data Structure to hold configuration limits and active config */
+struct octep_config {
+ /* Input Queue attributes. */
+ struct octep_iq_config iq;
+
+ /* Output Queue attributes. */
+ struct octep_oq_config oq;
+
+ /* NIC Port Configuration */
+ struct octep_pf_ring_config pf_ring_cfg;
+
+ /* SRIOV configuration of the PF */
+ struct octep_sriov_config sriov_cfg;
+
+ /* MSI-X interrupt config */
+ struct octep_msix_config msix_cfg;
+
+ /* ctrl mbox config */
+ struct octep_ctrl_mbox_config ctrl_mbox_cfg;
+};
+#endif /* _OCTEP_CONFIG_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
new file mode 100644
index 000000000000..39322e4dd100
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
@@ -0,0 +1,245 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mutex.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/sched/signal.h>
+#include <linux/io.h>
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+
+#include "octep_ctrl_mbox.h"
+#include "octep_config.h"
+#include "octep_main.h"
+
+/* Timeout in msecs for message response */
+#define OCTEP_CTRL_MBOX_MSG_TIMEOUT_MS 100
+/* Time in msecs to wait for message response */
+#define OCTEP_CTRL_MBOX_MSG_WAIT_MS 10
+
+#define OCTEP_CTRL_MBOX_INFO_MAGIC_NUM_OFFSET(m) (m)
+#define OCTEP_CTRL_MBOX_INFO_BARMEM_SZ_OFFSET(m) ((m) + 8)
+#define OCTEP_CTRL_MBOX_INFO_HOST_STATUS_OFFSET(m) ((m) + 24)
+#define OCTEP_CTRL_MBOX_INFO_FW_STATUS_OFFSET(m) ((m) + 144)
+
+#define OCTEP_CTRL_MBOX_H2FQ_INFO_OFFSET(m) ((m) + OCTEP_CTRL_MBOX_INFO_SZ)
+#define OCTEP_CTRL_MBOX_H2FQ_PROD_OFFSET(m) (OCTEP_CTRL_MBOX_H2FQ_INFO_OFFSET(m))
+#define OCTEP_CTRL_MBOX_H2FQ_CONS_OFFSET(m) ((OCTEP_CTRL_MBOX_H2FQ_INFO_OFFSET(m)) + 4)
+#define OCTEP_CTRL_MBOX_H2FQ_ELEM_SZ_OFFSET(m) ((OCTEP_CTRL_MBOX_H2FQ_INFO_OFFSET(m)) + 8)
+#define OCTEP_CTRL_MBOX_H2FQ_ELEM_CNT_OFFSET(m) ((OCTEP_CTRL_MBOX_H2FQ_INFO_OFFSET(m)) + 12)
+
+#define OCTEP_CTRL_MBOX_F2HQ_INFO_OFFSET(m) ((m) + \
+ OCTEP_CTRL_MBOX_INFO_SZ + \
+ OCTEP_CTRL_MBOX_H2FQ_INFO_SZ)
+#define OCTEP_CTRL_MBOX_F2HQ_PROD_OFFSET(m) (OCTEP_CTRL_MBOX_F2HQ_INFO_OFFSET(m))
+#define OCTEP_CTRL_MBOX_F2HQ_CONS_OFFSET(m) ((OCTEP_CTRL_MBOX_F2HQ_INFO_OFFSET(m)) + 4)
+#define OCTEP_CTRL_MBOX_F2HQ_ELEM_SZ_OFFSET(m) ((OCTEP_CTRL_MBOX_F2HQ_INFO_OFFSET(m)) + 8)
+#define OCTEP_CTRL_MBOX_F2HQ_ELEM_CNT_OFFSET(m) ((OCTEP_CTRL_MBOX_F2HQ_INFO_OFFSET(m)) + 12)
+
+#define OCTEP_CTRL_MBOX_Q_OFFSET(m, i) ((m) + \
+ (sizeof(struct octep_ctrl_mbox_msg) * (i)))
+
+static u32 octep_ctrl_mbox_circq_inc(u32 index, u32 mask)
+{
+ return (index + 1) & mask;
+}
+
+static u32 octep_ctrl_mbox_circq_space(u32 pi, u32 ci, u32 mask)
+{
+ return mask - ((pi - ci) & mask);
+}
+
+static u32 octep_ctrl_mbox_circq_depth(u32 pi, u32 ci, u32 mask)
+{
+ return ((pi - ci) & mask);
+}
+
+int octep_ctrl_mbox_init(struct octep_ctrl_mbox *mbox)
+{
+ u64 magic_num, status;
+
+ if (!mbox)
+ return -EINVAL;
+
+ if (!mbox->barmem) {
+ pr_info("octep_ctrl_mbox : Invalid barmem %p\n", mbox->barmem);
+ return -EINVAL;
+ }
+
+ magic_num = readq(OCTEP_CTRL_MBOX_INFO_MAGIC_NUM_OFFSET(mbox->barmem));
+ if (magic_num != OCTEP_CTRL_MBOX_MAGIC_NUMBER) {
+ pr_info("octep_ctrl_mbox : Invalid magic number %llx\n", magic_num);
+ return -EINVAL;
+ }
+
+ status = readq(OCTEP_CTRL_MBOX_INFO_FW_STATUS_OFFSET(mbox->barmem));
+ if (status != OCTEP_CTRL_MBOX_STATUS_READY) {
+ pr_info("octep_ctrl_mbox : Firmware is not ready.\n");
+ return -EINVAL;
+ }
+
+ mbox->barmem_sz = readl(OCTEP_CTRL_MBOX_INFO_BARMEM_SZ_OFFSET(mbox->barmem));
+
+ writeq(OCTEP_CTRL_MBOX_STATUS_INIT, OCTEP_CTRL_MBOX_INFO_HOST_STATUS_OFFSET(mbox->barmem));
+
+ mbox->h2fq.elem_cnt = readl(OCTEP_CTRL_MBOX_H2FQ_ELEM_CNT_OFFSET(mbox->barmem));
+ mbox->h2fq.elem_sz = readl(OCTEP_CTRL_MBOX_H2FQ_ELEM_SZ_OFFSET(mbox->barmem));
+ mbox->h2fq.mask = (mbox->h2fq.elem_cnt - 1);
+ mutex_init(&mbox->h2fq_lock);
+
+ mbox->f2hq.elem_cnt = readl(OCTEP_CTRL_MBOX_F2HQ_ELEM_CNT_OFFSET(mbox->barmem));
+ mbox->f2hq.elem_sz = readl(OCTEP_CTRL_MBOX_F2HQ_ELEM_SZ_OFFSET(mbox->barmem));
+ mbox->f2hq.mask = (mbox->f2hq.elem_cnt - 1);
+ mutex_init(&mbox->f2hq_lock);
+
+ mbox->h2fq.hw_prod = OCTEP_CTRL_MBOX_H2FQ_PROD_OFFSET(mbox->barmem);
+ mbox->h2fq.hw_cons = OCTEP_CTRL_MBOX_H2FQ_CONS_OFFSET(mbox->barmem);
+ mbox->h2fq.hw_q = mbox->barmem +
+ OCTEP_CTRL_MBOX_INFO_SZ +
+ OCTEP_CTRL_MBOX_H2FQ_INFO_SZ +
+ OCTEP_CTRL_MBOX_F2HQ_INFO_SZ;
+
+ mbox->f2hq.hw_prod = OCTEP_CTRL_MBOX_F2HQ_PROD_OFFSET(mbox->barmem);
+ mbox->f2hq.hw_cons = OCTEP_CTRL_MBOX_F2HQ_CONS_OFFSET(mbox->barmem);
+ mbox->f2hq.hw_q = mbox->h2fq.hw_q +
+ ((mbox->h2fq.elem_sz + sizeof(union octep_ctrl_mbox_msg_hdr)) *
+ mbox->h2fq.elem_cnt);
+
+ /* ensure ready state is seen after everything is initialized */
+ wmb();
+ writeq(OCTEP_CTRL_MBOX_STATUS_READY, OCTEP_CTRL_MBOX_INFO_HOST_STATUS_OFFSET(mbox->barmem));
+
+ pr_info("Octep ctrl mbox : Init successful.\n");
+
+ return 0;
+}
+
+int octep_ctrl_mbox_send(struct octep_ctrl_mbox *mbox, struct octep_ctrl_mbox_msg *msg)
+{
+ unsigned long timeout = msecs_to_jiffies(OCTEP_CTRL_MBOX_MSG_TIMEOUT_MS);
+ unsigned long period = msecs_to_jiffies(OCTEP_CTRL_MBOX_MSG_WAIT_MS);
+ struct octep_ctrl_mbox_q *q;
+ unsigned long expire;
+ u64 *mbuf, *word0;
+ u8 __iomem *qidx;
+ u16 pi, ci;
+ int i;
+
+ if (!mbox || !msg)
+ return -EINVAL;
+
+ q = &mbox->h2fq;
+ pi = readl(q->hw_prod);
+ ci = readl(q->hw_cons);
+
+ if (!octep_ctrl_mbox_circq_space(pi, ci, q->mask))
+ return -ENOMEM;
+
+ qidx = OCTEP_CTRL_MBOX_Q_OFFSET(q->hw_q, pi);
+ mbuf = (u64 *)msg->msg;
+ word0 = &msg->hdr.word0;
+
+ mutex_lock(&mbox->h2fq_lock);
+ for (i = 1; i <= msg->hdr.sizew; i++)
+ writeq(*mbuf++, (qidx + (i * 8)));
+
+ writeq(*word0, qidx);
+
+ pi = octep_ctrl_mbox_circq_inc(pi, q->mask);
+ writel(pi, q->hw_prod);
+ mutex_unlock(&mbox->h2fq_lock);
+
+ /* don't check for notification response */
+ if (msg->hdr.flags & OCTEP_CTRL_MBOX_MSG_HDR_FLAG_NOTIFY)
+ return 0;
+
+ expire = jiffies + timeout;
+ while (true) {
+ *word0 = readq(qidx);
+ if (msg->hdr.flags == OCTEP_CTRL_MBOX_MSG_HDR_FLAG_RESP)
+ break;
+ schedule_timeout_interruptible(period);
+ if (signal_pending(current) || time_after(jiffies, expire)) {
+ pr_info("octep_ctrl_mbox: Timed out\n");
+ return -EBUSY;
+ }
+ }
+ mbuf = (u64 *)msg->msg;
+ for (i = 1; i <= msg->hdr.sizew; i++)
+ *mbuf++ = readq(qidx + (i * 8));
+
+ return 0;
+}
+
+int octep_ctrl_mbox_recv(struct octep_ctrl_mbox *mbox, struct octep_ctrl_mbox_msg *msg)
+{
+ struct octep_ctrl_mbox_q *q;
+ u32 count, pi, ci;
+ u8 __iomem *qidx;
+ u64 *mbuf;
+ int i;
+
+ if (!mbox || !msg)
+ return -EINVAL;
+
+ q = &mbox->f2hq;
+ pi = readl(q->hw_prod);
+ ci = readl(q->hw_cons);
+ count = octep_ctrl_mbox_circq_depth(pi, ci, q->mask);
+ if (!count)
+ return -EAGAIN;
+
+ qidx = OCTEP_CTRL_MBOX_Q_OFFSET(q->hw_q, ci);
+ mbuf = (u64 *)msg->msg;
+
+ mutex_lock(&mbox->f2hq_lock);
+
+ msg->hdr.word0 = readq(qidx);
+ for (i = 1; i <= msg->hdr.sizew; i++)
+ *mbuf++ = readq(qidx + (i * 8));
+
+ ci = octep_ctrl_mbox_circq_inc(ci, q->mask);
+ writel(ci, q->hw_cons);
+
+ mutex_unlock(&mbox->f2hq_lock);
+
+ if (msg->hdr.flags != OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ || !mbox->process_req)
+ return 0;
+
+ mbox->process_req(mbox->user_ctx, msg);
+ mbuf = (u64 *)msg->msg;
+ for (i = 1; i <= msg->hdr.sizew; i++)
+ writeq(*mbuf++, (qidx + (i * 8)));
+
+ writeq(msg->hdr.word0, qidx);
+
+ return 0;
+}
+
+int octep_ctrl_mbox_uninit(struct octep_ctrl_mbox *mbox)
+{
+ if (!mbox)
+ return -EINVAL;
+
+ writeq(OCTEP_CTRL_MBOX_STATUS_UNINIT,
+ OCTEP_CTRL_MBOX_INFO_HOST_STATUS_OFFSET(mbox->barmem));
+ /* ensure uninit state is written before uninitialization */
+ wmb();
+
+ mutex_destroy(&mbox->h2fq_lock);
+ mutex_destroy(&mbox->f2hq_lock);
+
+ writeq(OCTEP_CTRL_MBOX_STATUS_INVALID,
+ OCTEP_CTRL_MBOX_INFO_HOST_STATUS_OFFSET(mbox->barmem));
+
+ pr_info("Octep ctrl mbox : Uninit successful.\n");
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h
new file mode 100644
index 000000000000..2dc5753cfec6
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h
@@ -0,0 +1,170 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+ #ifndef __OCTEP_CTRL_MBOX_H__
+#define __OCTEP_CTRL_MBOX_H__
+
+/* barmem structure
+ * |===========================================|
+ * |Info (16 + 120 + 120 = 256 bytes) |
+ * |-------------------------------------------|
+ * |magic number (8 bytes) |
+ * |bar memory size (4 bytes) |
+ * |reserved (4 bytes) |
+ * |-------------------------------------------|
+ * |host version (8 bytes) |
+ * |host status (8 bytes) |
+ * |host reserved (104 bytes) |
+ * |-------------------------------------------|
+ * |fw version (8 bytes) |
+ * |fw status (8 bytes) |
+ * |fw reserved (104 bytes) |
+ * |===========================================|
+ * |Host to Fw Queue info (16 bytes) |
+ * |-------------------------------------------|
+ * |producer index (4 bytes) |
+ * |consumer index (4 bytes) |
+ * |element size (4 bytes) |
+ * |element count (4 bytes) |
+ * |===========================================|
+ * |Fw to Host Queue info (16 bytes) |
+ * |-------------------------------------------|
+ * |producer index (4 bytes) |
+ * |consumer index (4 bytes) |
+ * |element size (4 bytes) |
+ * |element count (4 bytes) |
+ * |===========================================|
+ * |Host to Fw Queue |
+ * |-------------------------------------------|
+ * |((elem_sz + hdr(8 bytes)) * elem_cnt) bytes|
+ * |===========================================|
+ * |===========================================|
+ * |Fw to Host Queue |
+ * |-------------------------------------------|
+ * |((elem_sz + hdr(8 bytes)) * elem_cnt) bytes|
+ * |===========================================|
+ */
+
+#define OCTEP_CTRL_MBOX_MAGIC_NUMBER 0xdeaddeadbeefbeefull
+
+/* Size of mbox info in bytes */
+#define OCTEP_CTRL_MBOX_INFO_SZ 256
+/* Size of mbox host to target queue info in bytes */
+#define OCTEP_CTRL_MBOX_H2FQ_INFO_SZ 16
+/* Size of mbox target to host queue info in bytes */
+#define OCTEP_CTRL_MBOX_F2HQ_INFO_SZ 16
+/* Size of mbox queue in bytes */
+#define OCTEP_CTRL_MBOX_Q_SZ(sz, cnt) (((sz) + 8) * (cnt))
+/* Size of mbox in bytes */
+#define OCTEP_CTRL_MBOX_SZ(hsz, hcnt, fsz, fcnt) (OCTEP_CTRL_MBOX_INFO_SZ + \
+ OCTEP_CTRL_MBOX_H2FQ_INFO_SZ + \
+ OCTEP_CTRL_MBOX_F2HQ_INFO_SZ + \
+ OCTEP_CTRL_MBOX_Q_SZ(hsz, hcnt) + \
+ OCTEP_CTRL_MBOX_Q_SZ(fsz, fcnt))
+
+/* Valid request message */
+#define OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ BIT(0)
+/* Valid response message */
+#define OCTEP_CTRL_MBOX_MSG_HDR_FLAG_RESP BIT(1)
+/* Valid notification, no response required */
+#define OCTEP_CTRL_MBOX_MSG_HDR_FLAG_NOTIFY BIT(2)
+
+enum octep_ctrl_mbox_status {
+ OCTEP_CTRL_MBOX_STATUS_INVALID = 0,
+ OCTEP_CTRL_MBOX_STATUS_INIT,
+ OCTEP_CTRL_MBOX_STATUS_READY,
+ OCTEP_CTRL_MBOX_STATUS_UNINIT
+};
+
+/* mbox message */
+union octep_ctrl_mbox_msg_hdr {
+ u64 word0;
+ struct {
+ /* OCTEP_CTRL_MBOX_MSG_HDR_FLAG_* */
+ u32 flags;
+ /* size of message in words excluding header */
+ u32 sizew;
+ };
+};
+
+/* mbox message */
+struct octep_ctrl_mbox_msg {
+ /* mbox transaction header */
+ union octep_ctrl_mbox_msg_hdr hdr;
+ /* pointer to message buffer */
+ void *msg;
+};
+
+/* Mbox queue */
+struct octep_ctrl_mbox_q {
+ /* q element size, should be aligned to unsigned long */
+ u16 elem_sz;
+ /* q element count, should be power of 2 */
+ u16 elem_cnt;
+ /* q mask */
+ u16 mask;
+ /* producer address in bar mem */
+ u8 __iomem *hw_prod;
+ /* consumer address in bar mem */
+ u8 __iomem *hw_cons;
+ /* q base address in bar mem */
+ u8 __iomem *hw_q;
+};
+
+struct octep_ctrl_mbox {
+ /* host driver version */
+ u64 version;
+ /* size of bar memory */
+ u32 barmem_sz;
+ /* pointer to BAR memory */
+ u8 __iomem *barmem;
+ /* user context for callback, can be null */
+ void *user_ctx;
+ /* callback handler for processing request, called from octep_ctrl_mbox_recv */
+ int (*process_req)(void *user_ctx, struct octep_ctrl_mbox_msg *msg);
+ /* host-to-fw queue */
+ struct octep_ctrl_mbox_q h2fq;
+ /* fw-to-host queue */
+ struct octep_ctrl_mbox_q f2hq;
+ /* lock for h2fq */
+ struct mutex h2fq_lock;
+ /* lock for f2hq */
+ struct mutex f2hq_lock;
+};
+
+/* Initialize control mbox.
+ *
+ * @param mbox: non-null pointer to struct octep_ctrl_mbox.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_ctrl_mbox_init(struct octep_ctrl_mbox *mbox);
+
+/* Send mbox message.
+ *
+ * @param mbox: non-null pointer to struct octep_ctrl_mbox.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_ctrl_mbox_send(struct octep_ctrl_mbox *mbox, struct octep_ctrl_mbox_msg *msg);
+
+/* Retrieve mbox message.
+ *
+ * @param mbox: non-null pointer to struct octep_ctrl_mbox.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_ctrl_mbox_recv(struct octep_ctrl_mbox *mbox, struct octep_ctrl_mbox_msg *msg);
+
+/* Uninitialize control mbox.
+ *
+ * @param ep: non-null pointer to struct octep_ctrl_mbox.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_ctrl_mbox_uninit(struct octep_ctrl_mbox *mbox);
+
+#endif /* __OCTEP_CTRL_MBOX_H__ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c
new file mode 100644
index 000000000000..7c00c896ab98
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/etherdevice.h>
+#include <linux/pci.h>
+
+#include "octep_config.h"
+#include "octep_main.h"
+#include "octep_ctrl_net.h"
+
+int octep_get_link_status(struct octep_device *oct)
+{
+ struct octep_ctrl_net_h2f_req req = {};
+ struct octep_ctrl_net_h2f_resp *resp;
+ struct octep_ctrl_mbox_msg msg = {};
+ int err;
+
+ req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_STATUS;
+ req.link.cmd = OCTEP_CTRL_NET_CMD_GET;
+
+ msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ;
+ msg.hdr.sizew = OCTEP_CTRL_NET_H2F_STATE_REQ_SZW;
+ msg.msg = &req;
+ err = octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg);
+ if (err)
+ return err;
+
+ resp = (struct octep_ctrl_net_h2f_resp *)&req;
+ return resp->link.state;
+}
+
+void octep_set_link_status(struct octep_device *oct, bool up)
+{
+ struct octep_ctrl_net_h2f_req req = {};
+ struct octep_ctrl_mbox_msg msg = {};
+
+ req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_STATUS;
+ req.link.cmd = OCTEP_CTRL_NET_CMD_SET;
+ req.link.state = (up) ? OCTEP_CTRL_NET_STATE_UP : OCTEP_CTRL_NET_STATE_DOWN;
+
+ msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ;
+ msg.hdr.sizew = OCTEP_CTRL_NET_H2F_STATE_REQ_SZW;
+ msg.msg = &req;
+ octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg);
+}
+
+void octep_set_rx_state(struct octep_device *oct, bool up)
+{
+ struct octep_ctrl_net_h2f_req req = {};
+ struct octep_ctrl_mbox_msg msg = {};
+
+ req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_RX_STATE;
+ req.link.cmd = OCTEP_CTRL_NET_CMD_SET;
+ req.link.state = (up) ? OCTEP_CTRL_NET_STATE_UP : OCTEP_CTRL_NET_STATE_DOWN;
+
+ msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ;
+ msg.hdr.sizew = OCTEP_CTRL_NET_H2F_STATE_REQ_SZW;
+ msg.msg = &req;
+ octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg);
+}
+
+int octep_get_mac_addr(struct octep_device *oct, u8 *addr)
+{
+ struct octep_ctrl_net_h2f_req req = {};
+ struct octep_ctrl_net_h2f_resp *resp;
+ struct octep_ctrl_mbox_msg msg = {};
+ int err;
+
+ req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_MAC;
+ req.link.cmd = OCTEP_CTRL_NET_CMD_GET;
+
+ msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ;
+ msg.hdr.sizew = OCTEP_CTRL_NET_H2F_MAC_REQ_SZW;
+ msg.msg = &req;
+ err = octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg);
+ if (err)
+ return err;
+
+ resp = (struct octep_ctrl_net_h2f_resp *)&req;
+ memcpy(addr, resp->mac.addr, ETH_ALEN);
+
+ return err;
+}
+
+int octep_set_mac_addr(struct octep_device *oct, u8 *addr)
+{
+ struct octep_ctrl_net_h2f_req req = {};
+ struct octep_ctrl_mbox_msg msg = {};
+
+ req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_MAC;
+ req.mac.cmd = OCTEP_CTRL_NET_CMD_SET;
+ memcpy(&req.mac.addr, addr, ETH_ALEN);
+
+ msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ;
+ msg.hdr.sizew = OCTEP_CTRL_NET_H2F_MAC_REQ_SZW;
+ msg.msg = &req;
+
+ return octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg);
+}
+
+int octep_set_mtu(struct octep_device *oct, int mtu)
+{
+ struct octep_ctrl_net_h2f_req req = {};
+ struct octep_ctrl_mbox_msg msg = {};
+
+ req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_MTU;
+ req.mtu.cmd = OCTEP_CTRL_NET_CMD_SET;
+ req.mtu.val = mtu;
+
+ msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ;
+ msg.hdr.sizew = OCTEP_CTRL_NET_H2F_MTU_REQ_SZW;
+ msg.msg = &req;
+
+ return octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg);
+}
+
+int octep_get_if_stats(struct octep_device *oct)
+{
+ void __iomem *iface_rx_stats;
+ void __iomem *iface_tx_stats;
+ struct octep_ctrl_net_h2f_req req = {};
+ struct octep_ctrl_mbox_msg msg = {};
+ int err;
+
+ req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_GET_IF_STATS;
+ req.mac.cmd = OCTEP_CTRL_NET_CMD_GET;
+ req.get_stats.offset = oct->ctrl_mbox_ifstats_offset;
+
+ msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ;
+ msg.hdr.sizew = OCTEP_CTRL_NET_H2F_GET_STATS_REQ_SZW;
+ msg.msg = &req;
+ err = octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg);
+ if (err)
+ return err;
+
+ iface_rx_stats = oct->ctrl_mbox.barmem + oct->ctrl_mbox_ifstats_offset;
+ iface_tx_stats = oct->ctrl_mbox.barmem + oct->ctrl_mbox_ifstats_offset +
+ sizeof(struct octep_iface_rx_stats);
+ memcpy_fromio(&oct->iface_rx_stats, iface_rx_stats, sizeof(struct octep_iface_rx_stats));
+ memcpy_fromio(&oct->iface_tx_stats, iface_tx_stats, sizeof(struct octep_iface_tx_stats));
+
+ return err;
+}
+
+int octep_get_link_info(struct octep_device *oct)
+{
+ struct octep_ctrl_net_h2f_req req = {};
+ struct octep_ctrl_net_h2f_resp *resp;
+ struct octep_ctrl_mbox_msg msg = {};
+ int err;
+
+ req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_INFO;
+ req.mac.cmd = OCTEP_CTRL_NET_CMD_GET;
+
+ msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ;
+ msg.hdr.sizew = OCTEP_CTRL_NET_H2F_LINK_INFO_REQ_SZW;
+ msg.msg = &req;
+ err = octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg);
+ if (err)
+ return err;
+
+ resp = (struct octep_ctrl_net_h2f_resp *)&req;
+ oct->link_info.supported_modes = resp->link_info.supported_modes;
+ oct->link_info.advertised_modes = resp->link_info.advertised_modes;
+ oct->link_info.autoneg = resp->link_info.autoneg;
+ oct->link_info.pause = resp->link_info.pause;
+ oct->link_info.speed = resp->link_info.speed;
+
+ return err;
+}
+
+int octep_set_link_info(struct octep_device *oct, struct octep_iface_link_info *link_info)
+{
+ struct octep_ctrl_net_h2f_req req = {};
+ struct octep_ctrl_mbox_msg msg = {};
+
+ req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_INFO;
+ req.link_info.cmd = OCTEP_CTRL_NET_CMD_SET;
+ req.link_info.info.advertised_modes = link_info->advertised_modes;
+ req.link_info.info.autoneg = link_info->autoneg;
+ req.link_info.info.pause = link_info->pause;
+ req.link_info.info.speed = link_info->speed;
+
+ msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ;
+ msg.hdr.sizew = OCTEP_CTRL_NET_H2F_LINK_INFO_REQ_SZW;
+ msg.msg = &req;
+
+ return octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg);
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h
new file mode 100644
index 000000000000..f23b58381322
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h
@@ -0,0 +1,299 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+#ifndef __OCTEP_CTRL_NET_H__
+#define __OCTEP_CTRL_NET_H__
+
+/* Supported commands */
+enum octep_ctrl_net_cmd {
+ OCTEP_CTRL_NET_CMD_GET = 0,
+ OCTEP_CTRL_NET_CMD_SET,
+};
+
+/* Supported states */
+enum octep_ctrl_net_state {
+ OCTEP_CTRL_NET_STATE_DOWN = 0,
+ OCTEP_CTRL_NET_STATE_UP,
+};
+
+/* Supported replies */
+enum octep_ctrl_net_reply {
+ OCTEP_CTRL_NET_REPLY_OK = 0,
+ OCTEP_CTRL_NET_REPLY_GENERIC_FAIL,
+ OCTEP_CTRL_NET_REPLY_INVALID_PARAM,
+};
+
+/* Supported host to fw commands */
+enum octep_ctrl_net_h2f_cmd {
+ OCTEP_CTRL_NET_H2F_CMD_INVALID = 0,
+ OCTEP_CTRL_NET_H2F_CMD_MTU,
+ OCTEP_CTRL_NET_H2F_CMD_MAC,
+ OCTEP_CTRL_NET_H2F_CMD_GET_IF_STATS,
+ OCTEP_CTRL_NET_H2F_CMD_GET_XSTATS,
+ OCTEP_CTRL_NET_H2F_CMD_GET_Q_STATS,
+ OCTEP_CTRL_NET_H2F_CMD_LINK_STATUS,
+ OCTEP_CTRL_NET_H2F_CMD_RX_STATE,
+ OCTEP_CTRL_NET_H2F_CMD_LINK_INFO,
+};
+
+/* Supported fw to host commands */
+enum octep_ctrl_net_f2h_cmd {
+ OCTEP_CTRL_NET_F2H_CMD_INVALID = 0,
+ OCTEP_CTRL_NET_F2H_CMD_LINK_STATUS,
+};
+
+struct octep_ctrl_net_req_hdr {
+ /* sender id */
+ u16 sender;
+ /* receiver id */
+ u16 receiver;
+ /* octep_ctrl_net_h2t_cmd */
+ u16 cmd;
+ /* reserved */
+ u16 rsvd0;
+};
+
+/* get/set mtu request */
+struct octep_ctrl_net_h2f_req_cmd_mtu {
+ /* enum octep_ctrl_net_cmd */
+ u16 cmd;
+ /* 0-65535 */
+ u16 val;
+};
+
+/* get/set mac request */
+struct octep_ctrl_net_h2f_req_cmd_mac {
+ /* enum octep_ctrl_net_cmd */
+ u16 cmd;
+ /* xx:xx:xx:xx:xx:xx */
+ u8 addr[ETH_ALEN];
+};
+
+/* get if_stats, xstats, q_stats request */
+struct octep_ctrl_net_h2f_req_cmd_get_stats {
+ /* offset into barmem where fw should copy over stats */
+ u32 offset;
+};
+
+/* get/set link state, rx state */
+struct octep_ctrl_net_h2f_req_cmd_state {
+ /* enum octep_ctrl_net_cmd */
+ u16 cmd;
+ /* enum octep_ctrl_net_state */
+ u16 state;
+};
+
+/* link info */
+struct octep_ctrl_net_link_info {
+ /* Bitmap of Supported link speeds/modes */
+ u64 supported_modes;
+ /* Bitmap of Advertised link speeds/modes */
+ u64 advertised_modes;
+ /* Autonegotation state; bit 0=disabled; bit 1=enabled */
+ u8 autoneg;
+ /* Pause frames setting. bit 0=disabled; bit 1=enabled */
+ u8 pause;
+ /* Negotiated link speed in Mbps */
+ u32 speed;
+};
+
+/* get/set link info */
+struct octep_ctrl_net_h2f_req_cmd_link_info {
+ /* enum octep_ctrl_net_cmd */
+ u16 cmd;
+ /* struct octep_ctrl_net_link_info */
+ struct octep_ctrl_net_link_info info;
+};
+
+/* Host to fw request data */
+struct octep_ctrl_net_h2f_req {
+ struct octep_ctrl_net_req_hdr hdr;
+ union {
+ struct octep_ctrl_net_h2f_req_cmd_mtu mtu;
+ struct octep_ctrl_net_h2f_req_cmd_mac mac;
+ struct octep_ctrl_net_h2f_req_cmd_get_stats get_stats;
+ struct octep_ctrl_net_h2f_req_cmd_state link;
+ struct octep_ctrl_net_h2f_req_cmd_state rx;
+ struct octep_ctrl_net_h2f_req_cmd_link_info link_info;
+ };
+} __packed;
+
+struct octep_ctrl_net_resp_hdr {
+ /* sender id */
+ u16 sender;
+ /* receiver id */
+ u16 receiver;
+ /* octep_ctrl_net_h2t_cmd */
+ u16 cmd;
+ /* octep_ctrl_net_reply */
+ u16 reply;
+};
+
+/* get mtu response */
+struct octep_ctrl_net_h2f_resp_cmd_mtu {
+ /* 0-65535 */
+ u16 val;
+};
+
+/* get mac response */
+struct octep_ctrl_net_h2f_resp_cmd_mac {
+ /* xx:xx:xx:xx:xx:xx */
+ u8 addr[ETH_ALEN];
+};
+
+/* get link state, rx state response */
+struct octep_ctrl_net_h2f_resp_cmd_state {
+ /* enum octep_ctrl_net_state */
+ u16 state;
+};
+
+/* Host to fw response data */
+struct octep_ctrl_net_h2f_resp {
+ struct octep_ctrl_net_resp_hdr hdr;
+ union {
+ struct octep_ctrl_net_h2f_resp_cmd_mtu mtu;
+ struct octep_ctrl_net_h2f_resp_cmd_mac mac;
+ struct octep_ctrl_net_h2f_resp_cmd_state link;
+ struct octep_ctrl_net_h2f_resp_cmd_state rx;
+ struct octep_ctrl_net_link_info link_info;
+ };
+} __packed;
+
+/* link state notofication */
+struct octep_ctrl_net_f2h_req_cmd_state {
+ /* enum octep_ctrl_net_state */
+ u16 state;
+};
+
+/* Fw to host request data */
+struct octep_ctrl_net_f2h_req {
+ struct octep_ctrl_net_req_hdr hdr;
+ union {
+ struct octep_ctrl_net_f2h_req_cmd_state link;
+ };
+};
+
+/* Fw to host response data */
+struct octep_ctrl_net_f2h_resp {
+ struct octep_ctrl_net_resp_hdr hdr;
+};
+
+/* Size of host to fw octep_ctrl_mbox queue element */
+union octep_ctrl_net_h2f_data_sz {
+ struct octep_ctrl_net_h2f_req h2f_req;
+ struct octep_ctrl_net_h2f_resp h2f_resp;
+};
+
+/* Size of fw to host octep_ctrl_mbox queue element */
+union octep_ctrl_net_f2h_data_sz {
+ struct octep_ctrl_net_f2h_req f2h_req;
+ struct octep_ctrl_net_f2h_resp f2h_resp;
+};
+
+/* size of host to fw data in words */
+#define OCTEP_CTRL_NET_H2F_DATA_SZW ((sizeof(union octep_ctrl_net_h2f_data_sz)) / \
+ (sizeof(unsigned long)))
+
+/* size of fw to host data in words */
+#define OCTEP_CTRL_NET_F2H_DATA_SZW ((sizeof(union octep_ctrl_net_f2h_data_sz)) / \
+ (sizeof(unsigned long)))
+
+/* size in words of get/set mtu request */
+#define OCTEP_CTRL_NET_H2F_MTU_REQ_SZW 2
+/* size in words of get/set mac request */
+#define OCTEP_CTRL_NET_H2F_MAC_REQ_SZW 2
+/* size in words of get stats request */
+#define OCTEP_CTRL_NET_H2F_GET_STATS_REQ_SZW 2
+/* size in words of get/set state request */
+#define OCTEP_CTRL_NET_H2F_STATE_REQ_SZW 2
+/* size in words of get/set link info request */
+#define OCTEP_CTRL_NET_H2F_LINK_INFO_REQ_SZW 4
+
+/* size in words of get mtu response */
+#define OCTEP_CTRL_NET_H2F_GET_MTU_RESP_SZW 2
+/* size in words of set mtu response */
+#define OCTEP_CTRL_NET_H2F_SET_MTU_RESP_SZW 1
+/* size in words of get mac response */
+#define OCTEP_CTRL_NET_H2F_GET_MAC_RESP_SZW 2
+/* size in words of set mac response */
+#define OCTEP_CTRL_NET_H2F_SET_MAC_RESP_SZW 1
+/* size in words of get state request */
+#define OCTEP_CTRL_NET_H2F_GET_STATE_RESP_SZW 2
+/* size in words of set state request */
+#define OCTEP_CTRL_NET_H2F_SET_STATE_RESP_SZW 1
+/* size in words of get link info request */
+#define OCTEP_CTRL_NET_H2F_GET_LINK_INFO_RESP_SZW 4
+/* size in words of set link info request */
+#define OCTEP_CTRL_NET_H2F_SET_LINK_INFO_RESP_SZW 1
+
+/** Get link status from firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ *
+ * return value: link status 0=down, 1=up.
+ */
+int octep_get_link_status(struct octep_device *oct);
+
+/** Set link status in firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ * @param up: boolean status.
+ */
+void octep_set_link_status(struct octep_device *oct, bool up);
+
+/** Set rx state in firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ * @param up: boolean status.
+ */
+void octep_set_rx_state(struct octep_device *oct, bool up);
+
+/** Get mac address from firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ * @param addr: non-null pointer to mac address.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_get_mac_addr(struct octep_device *oct, u8 *addr);
+
+/** Set mac address in firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ * @param addr: non-null pointer to mac address.
+ */
+int octep_set_mac_addr(struct octep_device *oct, u8 *addr);
+
+/** Set mtu in firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ * @param mtu: mtu.
+ */
+int octep_set_mtu(struct octep_device *oct, int mtu);
+
+/** Get interface statistics from firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_get_if_stats(struct octep_device *oct);
+
+/** Get link info from firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_get_link_info(struct octep_device *oct);
+
+/** Set link info in firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ */
+int octep_set_link_info(struct octep_device *oct, struct octep_iface_link_info *link_info);
+
+#endif /* __OCTEP_CTRL_NET_H__ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
new file mode 100644
index 000000000000..87ef129b269a
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
@@ -0,0 +1,463 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+
+#include "octep_config.h"
+#include "octep_main.h"
+#include "octep_ctrl_net.h"
+
+static const char octep_gstrings_global_stats[][ETH_GSTRING_LEN] = {
+ "rx_packets",
+ "tx_packets",
+ "rx_bytes",
+ "tx_bytes",
+ "rx_alloc_errors",
+ "tx_busy_errors",
+ "rx_dropped",
+ "tx_dropped",
+ "tx_hw_pkts",
+ "tx_hw_octs",
+ "tx_hw_bcast",
+ "tx_hw_mcast",
+ "tx_hw_underflow",
+ "tx_hw_control",
+ "tx_less_than_64",
+ "tx_equal_64",
+ "tx_equal_65_to_127",
+ "tx_equal_128_to_255",
+ "tx_equal_256_to_511",
+ "tx_equal_512_to_1023",
+ "tx_equal_1024_to_1518",
+ "tx_greater_than_1518",
+ "rx_hw_pkts",
+ "rx_hw_bytes",
+ "rx_hw_bcast",
+ "rx_hw_mcast",
+ "rx_pause_pkts",
+ "rx_pause_bytes",
+ "rx_dropped_pkts_fifo_full",
+ "rx_dropped_bytes_fifo_full",
+ "rx_err_pkts",
+};
+
+#define OCTEP_GLOBAL_STATS_CNT (sizeof(octep_gstrings_global_stats) / ETH_GSTRING_LEN)
+
+static const char octep_gstrings_tx_q_stats[][ETH_GSTRING_LEN] = {
+ "tx_packets_posted[Q-%u]",
+ "tx_packets_completed[Q-%u]",
+ "tx_bytes[Q-%u]",
+ "tx_busy[Q-%u]",
+};
+
+#define OCTEP_TX_Q_STATS_CNT (sizeof(octep_gstrings_tx_q_stats) / ETH_GSTRING_LEN)
+
+static const char octep_gstrings_rx_q_stats[][ETH_GSTRING_LEN] = {
+ "rx_packets[Q-%u]",
+ "rx_bytes[Q-%u]",
+ "rx_alloc_errors[Q-%u]",
+};
+
+#define OCTEP_RX_Q_STATS_CNT (sizeof(octep_gstrings_rx_q_stats) / ETH_GSTRING_LEN)
+
+static void octep_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+
+ strscpy(info->driver, OCTEP_DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(oct->pdev), sizeof(info->bus_info));
+}
+
+static void octep_get_strings(struct net_device *netdev,
+ u32 stringset, u8 *data)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ u16 num_queues = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+ char *strings = (char *)data;
+ int i, j;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < OCTEP_GLOBAL_STATS_CNT; i++) {
+ snprintf(strings, ETH_GSTRING_LEN,
+ octep_gstrings_global_stats[i]);
+ strings += ETH_GSTRING_LEN;
+ }
+
+ for (i = 0; i < num_queues; i++) {
+ for (j = 0; j < OCTEP_TX_Q_STATS_CNT; j++) {
+ snprintf(strings, ETH_GSTRING_LEN,
+ octep_gstrings_tx_q_stats[j], i);
+ strings += ETH_GSTRING_LEN;
+ }
+ }
+
+ for (i = 0; i < num_queues; i++) {
+ for (j = 0; j < OCTEP_RX_Q_STATS_CNT; j++) {
+ snprintf(strings, ETH_GSTRING_LEN,
+ octep_gstrings_rx_q_stats[j], i);
+ strings += ETH_GSTRING_LEN;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static int octep_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ u16 num_queues = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ return OCTEP_GLOBAL_STATS_CNT + (num_queues *
+ (OCTEP_TX_Q_STATS_CNT + OCTEP_RX_Q_STATS_CNT));
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void
+octep_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ struct octep_iface_tx_stats *iface_tx_stats;
+ struct octep_iface_rx_stats *iface_rx_stats;
+ u64 rx_packets, rx_bytes;
+ u64 tx_packets, tx_bytes;
+ u64 rx_alloc_errors, tx_busy_errors;
+ int q, i;
+
+ rx_packets = 0;
+ rx_bytes = 0;
+ tx_packets = 0;
+ tx_bytes = 0;
+ rx_alloc_errors = 0;
+ tx_busy_errors = 0;
+ tx_packets = 0;
+ tx_bytes = 0;
+ rx_packets = 0;
+ rx_bytes = 0;
+
+ octep_get_if_stats(oct);
+ iface_tx_stats = &oct->iface_tx_stats;
+ iface_rx_stats = &oct->iface_rx_stats;
+
+ for (q = 0; q < oct->num_oqs; q++) {
+ struct octep_iq *iq = oct->iq[q];
+ struct octep_oq *oq = oct->oq[q];
+
+ tx_packets += iq->stats.instr_completed;
+ tx_bytes += iq->stats.bytes_sent;
+ tx_busy_errors += iq->stats.tx_busy;
+
+ rx_packets += oq->stats.packets;
+ rx_bytes += oq->stats.bytes;
+ rx_alloc_errors += oq->stats.alloc_failures;
+ }
+ i = 0;
+ data[i++] = rx_packets;
+ data[i++] = tx_packets;
+ data[i++] = rx_bytes;
+ data[i++] = tx_bytes;
+ data[i++] = rx_alloc_errors;
+ data[i++] = tx_busy_errors;
+ data[i++] = iface_rx_stats->dropped_pkts_fifo_full +
+ iface_rx_stats->err_pkts;
+ data[i++] = iface_tx_stats->xscol +
+ iface_tx_stats->xsdef;
+ data[i++] = iface_tx_stats->pkts;
+ data[i++] = iface_tx_stats->octs;
+ data[i++] = iface_tx_stats->bcst;
+ data[i++] = iface_tx_stats->mcst;
+ data[i++] = iface_tx_stats->undflw;
+ data[i++] = iface_tx_stats->ctl;
+ data[i++] = iface_tx_stats->hist_lt64;
+ data[i++] = iface_tx_stats->hist_eq64;
+ data[i++] = iface_tx_stats->hist_65to127;
+ data[i++] = iface_tx_stats->hist_128to255;
+ data[i++] = iface_tx_stats->hist_256to511;
+ data[i++] = iface_tx_stats->hist_512to1023;
+ data[i++] = iface_tx_stats->hist_1024to1518;
+ data[i++] = iface_tx_stats->hist_gt1518;
+ data[i++] = iface_rx_stats->pkts;
+ data[i++] = iface_rx_stats->octets;
+ data[i++] = iface_rx_stats->mcast_pkts;
+ data[i++] = iface_rx_stats->bcast_pkts;
+ data[i++] = iface_rx_stats->pause_pkts;
+ data[i++] = iface_rx_stats->pause_octets;
+ data[i++] = iface_rx_stats->dropped_pkts_fifo_full;
+ data[i++] = iface_rx_stats->dropped_octets_fifo_full;
+ data[i++] = iface_rx_stats->err_pkts;
+
+ /* Per Tx Queue stats */
+ for (q = 0; q < oct->num_iqs; q++) {
+ struct octep_iq *iq = oct->iq[q];
+
+ data[i++] = iq->stats.instr_posted;
+ data[i++] = iq->stats.instr_completed;
+ data[i++] = iq->stats.bytes_sent;
+ data[i++] = iq->stats.tx_busy;
+ }
+
+ /* Per Rx Queue stats */
+ for (q = 0; q < oct->num_oqs; q++) {
+ struct octep_oq *oq = oct->oq[q];
+
+ data[i++] = oq->stats.packets;
+ data[i++] = oq->stats.bytes;
+ data[i++] = oq->stats.alloc_failures;
+ }
+}
+
+#define OCTEP_SET_ETHTOOL_LINK_MODES_BITMAP(octep_speeds, ksettings, name) \
+{ \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_T)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseT_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_R)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseR_FEC); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_CR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseCR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_KR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseKR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_LR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseLR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_SR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseSR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_25GBASE_CR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseCR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_25GBASE_KR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseKR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_25GBASE_SR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseSR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_40GBASE_CR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseCR4_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_40GBASE_KR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseKR4_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_40GBASE_LR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseLR4_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_40GBASE_SR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseSR4_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_CR2)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseCR2_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_KR2)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseKR2_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_SR2)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseSR2_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_CR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseCR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_KR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseKR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_LR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseLR_ER_FR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_SR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseSR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_100GBASE_CR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseCR4_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_100GBASE_KR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseKR4_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_100GBASE_LR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseLR4_ER4_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_100GBASE_SR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseSR4_Full); \
+}
+
+static int octep_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ struct octep_iface_link_info *link_info;
+ u32 advertised_modes, supported_modes;
+
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+
+ octep_get_link_info(oct);
+
+ advertised_modes = oct->link_info.advertised_modes;
+ supported_modes = oct->link_info.supported_modes;
+ link_info = &oct->link_info;
+
+ OCTEP_SET_ETHTOOL_LINK_MODES_BITMAP(supported_modes, cmd, supported);
+ OCTEP_SET_ETHTOOL_LINK_MODES_BITMAP(advertised_modes, cmd, advertising);
+
+ if (link_info->autoneg) {
+ if (link_info->autoneg & OCTEP_LINK_MODE_AUTONEG_SUPPORTED)
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
+ if (link_info->autoneg & OCTEP_LINK_MODE_AUTONEG_ADVERTISED) {
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
+ cmd->base.autoneg = AUTONEG_ENABLE;
+ } else {
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ }
+ } else {
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ }
+
+ if (link_info->pause) {
+ if (link_info->pause & OCTEP_LINK_MODE_PAUSE_SUPPORTED)
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
+ if (link_info->pause & OCTEP_LINK_MODE_PAUSE_ADVERTISED)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
+ }
+
+ cmd->base.port = PORT_FIBRE;
+ ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
+
+ if (netif_carrier_ok(netdev)) {
+ cmd->base.speed = link_info->speed;
+ cmd->base.duplex = DUPLEX_FULL;
+ } else {
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ }
+ return 0;
+}
+
+static int octep_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ struct octep_iface_link_info link_info_new;
+ struct octep_iface_link_info *link_info;
+ u64 advertised = 0;
+ u8 autoneg = 0;
+ int err;
+
+ link_info = &oct->link_info;
+ memcpy(&link_info_new, link_info, sizeof(struct octep_iface_link_info));
+
+ /* Only Full duplex is supported;
+ * Assume full duplex when duplex is unknown.
+ */
+ if (cmd->base.duplex != DUPLEX_FULL &&
+ cmd->base.duplex != DUPLEX_UNKNOWN)
+ return -EOPNOTSUPP;
+
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
+ if (!(link_info->autoneg & OCTEP_LINK_MODE_AUTONEG_SUPPORTED))
+ return -EOPNOTSUPP;
+ autoneg = 1;
+ }
+
+ if (!bitmap_subset(cmd->link_modes.advertising,
+ cmd->link_modes.supported,
+ __ETHTOOL_LINK_MODE_MASK_NBITS))
+ return -EINVAL;
+
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 10000baseT_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_10GBASE_T);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 10000baseR_FEC))
+ advertised |= BIT(OCTEP_LINK_MODE_10GBASE_R);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 10000baseCR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_10GBASE_CR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 10000baseKR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_10GBASE_KR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 10000baseLR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_10GBASE_LR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 10000baseSR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_10GBASE_SR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 25000baseCR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_25GBASE_CR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 25000baseKR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_25GBASE_KR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 25000baseSR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_25GBASE_SR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 40000baseCR4_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_40GBASE_CR4);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 40000baseKR4_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_40GBASE_KR4);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 40000baseLR4_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_40GBASE_LR4);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 40000baseSR4_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_40GBASE_SR4);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 50000baseCR2_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_50GBASE_CR2);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 50000baseKR2_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_50GBASE_KR2);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 50000baseSR2_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_50GBASE_SR2);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 50000baseCR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_50GBASE_CR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 50000baseKR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_50GBASE_KR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 50000baseLR_ER_FR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_50GBASE_LR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 50000baseSR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_50GBASE_SR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 100000baseCR4_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_100GBASE_CR4);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 100000baseKR4_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_100GBASE_KR4);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 100000baseLR4_ER4_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_100GBASE_LR4);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 100000baseSR4_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_100GBASE_SR4);
+
+ if (advertised == link_info->advertised_modes &&
+ cmd->base.speed == link_info->speed &&
+ cmd->base.autoneg == link_info->autoneg)
+ return 0;
+
+ link_info_new.advertised_modes = advertised;
+ link_info_new.speed = cmd->base.speed;
+ link_info_new.autoneg = autoneg;
+
+ err = octep_set_link_info(oct, &link_info_new);
+ if (err)
+ return err;
+
+ memcpy(link_info, &link_info_new, sizeof(struct octep_iface_link_info));
+ return 0;
+}
+
+static const struct ethtool_ops octep_ethtool_ops = {
+ .get_drvinfo = octep_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_strings = octep_get_strings,
+ .get_sset_count = octep_get_sset_count,
+ .get_ethtool_stats = octep_get_ethtool_stats,
+ .get_link_ksettings = octep_get_link_ksettings,
+ .set_link_ksettings = octep_set_link_ksettings,
+};
+
+void octep_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &octep_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
new file mode 100644
index 000000000000..e020c81f3455
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
@@ -0,0 +1,1176 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/vmalloc.h>
+
+#include "octep_config.h"
+#include "octep_main.h"
+#include "octep_ctrl_net.h"
+
+struct workqueue_struct *octep_wq;
+
+/* Supported Devices */
+static const struct pci_device_id octep_pci_id_tbl[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN93_PF)},
+ {0, },
+};
+MODULE_DEVICE_TABLE(pci, octep_pci_id_tbl);
+
+MODULE_AUTHOR("Veerasenareddy Burru <vburru@marvell.com>");
+MODULE_DESCRIPTION(OCTEP_DRV_STRING);
+MODULE_LICENSE("GPL");
+
+/**
+ * octep_alloc_ioq_vectors() - Allocate Tx/Rx Queue interrupt info.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Allocate resources to hold per Tx/Rx queue interrupt info.
+ * This is the information passed to interrupt handler, from which napi poll
+ * is scheduled and includes quick access to private data of Tx/Rx queue
+ * corresponding to the interrupt being handled.
+ *
+ * Return: 0, on successful allocation of resources for all queue interrupts.
+ * -1, if failed to allocate any resource.
+ */
+static int octep_alloc_ioq_vectors(struct octep_device *oct)
+{
+ int i;
+ struct octep_ioq_vector *ioq_vector;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ oct->ioq_vector[i] = vzalloc(sizeof(*oct->ioq_vector[i]));
+ if (!oct->ioq_vector[i])
+ goto free_ioq_vector;
+
+ ioq_vector = oct->ioq_vector[i];
+ ioq_vector->iq = oct->iq[i];
+ ioq_vector->oq = oct->oq[i];
+ ioq_vector->octep_dev = oct;
+ }
+
+ dev_info(&oct->pdev->dev, "Allocated %d IOQ vectors\n", oct->num_oqs);
+ return 0;
+
+free_ioq_vector:
+ while (i) {
+ i--;
+ vfree(oct->ioq_vector[i]);
+ oct->ioq_vector[i] = NULL;
+ }
+ return -1;
+}
+
+/**
+ * octep_free_ioq_vectors() - Free Tx/Rx Queue interrupt vector info.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_free_ioq_vectors(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ if (oct->ioq_vector[i]) {
+ vfree(oct->ioq_vector[i]);
+ oct->ioq_vector[i] = NULL;
+ }
+ }
+ netdev_info(oct->netdev, "Freed IOQ Vectors\n");
+}
+
+/**
+ * octep_enable_msix_range() - enable MSI-x interrupts.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Allocate and enable all MSI-x interrupts (queue and non-queue interrupts)
+ * for the Octeon device.
+ *
+ * Return: 0, on successfully enabling all MSI-x interrupts.
+ * -1, if failed to enable any MSI-x interrupt.
+ */
+static int octep_enable_msix_range(struct octep_device *oct)
+{
+ int num_msix, msix_allocated;
+ int i;
+
+ /* Generic interrupts apart from input/output queues */
+ num_msix = oct->num_oqs + CFG_GET_NON_IOQ_MSIX(oct->conf);
+ oct->msix_entries = kcalloc(num_msix,
+ sizeof(struct msix_entry), GFP_KERNEL);
+ if (!oct->msix_entries)
+ goto msix_alloc_err;
+
+ for (i = 0; i < num_msix; i++)
+ oct->msix_entries[i].entry = i;
+
+ msix_allocated = pci_enable_msix_range(oct->pdev, oct->msix_entries,
+ num_msix, num_msix);
+ if (msix_allocated != num_msix) {
+ dev_err(&oct->pdev->dev,
+ "Failed to enable %d msix irqs; got only %d\n",
+ num_msix, msix_allocated);
+ goto enable_msix_err;
+ }
+ oct->num_irqs = msix_allocated;
+ dev_info(&oct->pdev->dev, "MSI-X enabled successfully\n");
+
+ return 0;
+
+enable_msix_err:
+ if (msix_allocated > 0)
+ pci_disable_msix(oct->pdev);
+ kfree(oct->msix_entries);
+ oct->msix_entries = NULL;
+msix_alloc_err:
+ return -1;
+}
+
+/**
+ * octep_disable_msix() - disable MSI-x interrupts.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Disable MSI-x on the Octeon device.
+ */
+static void octep_disable_msix(struct octep_device *oct)
+{
+ pci_disable_msix(oct->pdev);
+ kfree(oct->msix_entries);
+ oct->msix_entries = NULL;
+ dev_info(&oct->pdev->dev, "Disabled MSI-X\n");
+}
+
+/**
+ * octep_non_ioq_intr_handler() - common handler for all generic interrupts.
+ *
+ * @irq: Interrupt number.
+ * @data: interrupt data.
+ *
+ * this is common handler for all non-queue (generic) interrupts.
+ */
+static irqreturn_t octep_non_ioq_intr_handler(int irq, void *data)
+{
+ struct octep_device *oct = data;
+
+ return oct->hw_ops.non_ioq_intr_handler(oct);
+}
+
+/**
+ * octep_ioq_intr_handler() - handler for all Tx/Rx queue interrupts.
+ *
+ * @irq: Interrupt number.
+ * @data: interrupt data contains pointers to Tx/Rx queue private data
+ * and correspong NAPI context.
+ *
+ * this is common handler for all non-queue (generic) interrupts.
+ */
+static irqreturn_t octep_ioq_intr_handler(int irq, void *data)
+{
+ struct octep_ioq_vector *ioq_vector = data;
+ struct octep_device *oct = ioq_vector->octep_dev;
+
+ return oct->hw_ops.ioq_intr_handler(ioq_vector);
+}
+
+/**
+ * octep_request_irqs() - Register interrupt handlers.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Register handlers for all queue and non-queue interrupts.
+ *
+ * Return: 0, on successful registration of all interrupt handlers.
+ * -1, on any error.
+ */
+static int octep_request_irqs(struct octep_device *oct)
+{
+ struct net_device *netdev = oct->netdev;
+ struct octep_ioq_vector *ioq_vector;
+ struct msix_entry *msix_entry;
+ char **non_ioq_msix_names;
+ int num_non_ioq_msix;
+ int ret, i;
+
+ num_non_ioq_msix = CFG_GET_NON_IOQ_MSIX(oct->conf);
+ non_ioq_msix_names = CFG_GET_NON_IOQ_MSIX_NAMES(oct->conf);
+
+ oct->non_ioq_irq_names = kcalloc(num_non_ioq_msix,
+ OCTEP_MSIX_NAME_SIZE, GFP_KERNEL);
+ if (!oct->non_ioq_irq_names)
+ goto alloc_err;
+
+ /* First few MSI-X interrupts are non-queue interrupts */
+ for (i = 0; i < num_non_ioq_msix; i++) {
+ char *irq_name;
+
+ irq_name = &oct->non_ioq_irq_names[i * OCTEP_MSIX_NAME_SIZE];
+ msix_entry = &oct->msix_entries[i];
+
+ snprintf(irq_name, OCTEP_MSIX_NAME_SIZE,
+ "%s-%s", netdev->name, non_ioq_msix_names[i]);
+ ret = request_irq(msix_entry->vector,
+ octep_non_ioq_intr_handler, 0,
+ irq_name, oct);
+ if (ret) {
+ netdev_err(netdev,
+ "request_irq failed for %s; err=%d",
+ irq_name, ret);
+ goto non_ioq_irq_err;
+ }
+ }
+
+ /* Request IRQs for Tx/Rx queues */
+ for (i = 0; i < oct->num_oqs; i++) {
+ ioq_vector = oct->ioq_vector[i];
+ msix_entry = &oct->msix_entries[i + num_non_ioq_msix];
+
+ snprintf(ioq_vector->name, sizeof(ioq_vector->name),
+ "%s-q%d", netdev->name, i);
+ ret = request_irq(msix_entry->vector,
+ octep_ioq_intr_handler, 0,
+ ioq_vector->name, ioq_vector);
+ if (ret) {
+ netdev_err(netdev,
+ "request_irq failed for Q-%d; err=%d",
+ i, ret);
+ goto ioq_irq_err;
+ }
+
+ cpumask_set_cpu(i % num_online_cpus(),
+ &ioq_vector->affinity_mask);
+ irq_set_affinity_hint(msix_entry->vector,
+ &ioq_vector->affinity_mask);
+ }
+
+ return 0;
+ioq_irq_err:
+ while (i > num_non_ioq_msix) {
+ --i;
+ irq_set_affinity_hint(oct->msix_entries[i].vector, NULL);
+ free_irq(oct->msix_entries[i].vector, oct->ioq_vector[i]);
+ }
+non_ioq_irq_err:
+ while (i) {
+ --i;
+ free_irq(oct->msix_entries[i].vector, oct);
+ }
+alloc_err:
+ return -1;
+}
+
+/**
+ * octep_free_irqs() - free all registered interrupts.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Free all queue and non-queue interrupts of the Octeon device.
+ */
+static void octep_free_irqs(struct octep_device *oct)
+{
+ int i;
+
+ /* First few MSI-X interrupts are non queue interrupts; free them */
+ for (i = 0; i < CFG_GET_NON_IOQ_MSIX(oct->conf); i++)
+ free_irq(oct->msix_entries[i].vector, oct);
+ kfree(oct->non_ioq_irq_names);
+
+ /* Free IRQs for Input/Output (Tx/Rx) queues */
+ for (i = CFG_GET_NON_IOQ_MSIX(oct->conf); i < oct->num_irqs; i++) {
+ irq_set_affinity_hint(oct->msix_entries[i].vector, NULL);
+ free_irq(oct->msix_entries[i].vector,
+ oct->ioq_vector[i - CFG_GET_NON_IOQ_MSIX(oct->conf)]);
+ }
+ netdev_info(oct->netdev, "IRQs freed\n");
+}
+
+/**
+ * octep_setup_irqs() - setup interrupts for the Octeon device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Allocate data structures to hold per interrupt information, allocate/enable
+ * MSI-x interrupt and register interrupt handlers.
+ *
+ * Return: 0, on successful allocation and registration of all interrupts.
+ * -1, on any error.
+ */
+static int octep_setup_irqs(struct octep_device *oct)
+{
+ if (octep_alloc_ioq_vectors(oct))
+ goto ioq_vector_err;
+
+ if (octep_enable_msix_range(oct))
+ goto enable_msix_err;
+
+ if (octep_request_irqs(oct))
+ goto request_irq_err;
+
+ return 0;
+
+request_irq_err:
+ octep_disable_msix(oct);
+enable_msix_err:
+ octep_free_ioq_vectors(oct);
+ioq_vector_err:
+ return -1;
+}
+
+/**
+ * octep_clean_irqs() - free all interrupts and its resources.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_clean_irqs(struct octep_device *oct)
+{
+ octep_free_irqs(oct);
+ octep_disable_msix(oct);
+ octep_free_ioq_vectors(oct);
+}
+
+/**
+ * octep_enable_ioq_irq() - Enable MSI-x interrupt of a Tx/Rx queue.
+ *
+ * @iq: Octeon Tx queue data structure.
+ * @oq: Octeon Rx queue data structure.
+ */
+static void octep_enable_ioq_irq(struct octep_iq *iq, struct octep_oq *oq)
+{
+ u32 pkts_pend = oq->pkts_pending;
+
+ netdev_dbg(iq->netdev, "enabling intr for Q-%u\n", iq->q_no);
+ if (iq->pkts_processed) {
+ writel(iq->pkts_processed, iq->inst_cnt_reg);
+ iq->pkt_in_done -= iq->pkts_processed;
+ iq->pkts_processed = 0;
+ }
+ if (oq->last_pkt_count - pkts_pend) {
+ writel(oq->last_pkt_count - pkts_pend, oq->pkts_sent_reg);
+ oq->last_pkt_count = pkts_pend;
+ }
+
+ /* Flush the previous wrties before writing to RESEND bit */
+ wmb();
+ writeq(1UL << OCTEP_OQ_INTR_RESEND_BIT, oq->pkts_sent_reg);
+ writeq(1UL << OCTEP_IQ_INTR_RESEND_BIT, iq->inst_cnt_reg);
+}
+
+/**
+ * octep_napi_poll() - NAPI poll function for Tx/Rx.
+ *
+ * @napi: pointer to napi context.
+ * @budget: max number of packets to be processed in single invocation.
+ */
+static int octep_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct octep_ioq_vector *ioq_vector =
+ container_of(napi, struct octep_ioq_vector, napi);
+ u32 tx_pending, rx_done;
+
+ tx_pending = octep_iq_process_completions(ioq_vector->iq, budget);
+ rx_done = octep_oq_process_rx(ioq_vector->oq, budget);
+
+ /* need more polling if tx completion processing is still pending or
+ * processed at least 'budget' number of rx packets.
+ */
+ if (tx_pending || rx_done >= budget)
+ return budget;
+
+ napi_complete(napi);
+ octep_enable_ioq_irq(ioq_vector->iq, ioq_vector->oq);
+ return rx_done;
+}
+
+/**
+ * octep_napi_add() - Add NAPI poll for all Tx/Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_napi_add(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ netdev_dbg(oct->netdev, "Adding NAPI on Q-%d\n", i);
+ netif_napi_add(oct->netdev, &oct->ioq_vector[i]->napi,
+ octep_napi_poll, 64);
+ oct->oq[i]->napi = &oct->ioq_vector[i]->napi;
+ }
+}
+
+/**
+ * octep_napi_delete() - delete NAPI poll callback for all Tx/Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_napi_delete(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ netdev_dbg(oct->netdev, "Deleting NAPI on Q-%d\n", i);
+ netif_napi_del(&oct->ioq_vector[i]->napi);
+ oct->oq[i]->napi = NULL;
+ }
+}
+
+/**
+ * octep_napi_enable() - enable NAPI for all Tx/Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_napi_enable(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ netdev_dbg(oct->netdev, "Enabling NAPI on Q-%d\n", i);
+ napi_enable(&oct->ioq_vector[i]->napi);
+ }
+}
+
+/**
+ * octep_napi_disable() - disable NAPI for all Tx/Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_napi_disable(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ netdev_dbg(oct->netdev, "Disabling NAPI on Q-%d\n", i);
+ napi_disable(&oct->ioq_vector[i]->napi);
+ }
+}
+
+static void octep_link_up(struct net_device *netdev)
+{
+ netif_carrier_on(netdev);
+ netif_tx_start_all_queues(netdev);
+}
+
+/**
+ * octep_open() - start the octeon network device.
+ *
+ * @netdev: pointer to kernel network device.
+ *
+ * setup Tx/Rx queues, interrupts and enable hardware operation of Tx/Rx queues
+ * and interrupts..
+ *
+ * Return: 0, on successfully setting up device and bring it up.
+ * -1, on any error.
+ */
+static int octep_open(struct net_device *netdev)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ int err, ret;
+
+ netdev_info(netdev, "Starting netdev ...\n");
+ netif_carrier_off(netdev);
+
+ oct->hw_ops.reset_io_queues(oct);
+
+ if (octep_setup_iqs(oct))
+ goto setup_iq_err;
+ if (octep_setup_oqs(oct))
+ goto setup_oq_err;
+ if (octep_setup_irqs(oct))
+ goto setup_irq_err;
+
+ err = netif_set_real_num_tx_queues(netdev, oct->num_oqs);
+ if (err)
+ goto set_queues_err;
+ err = netif_set_real_num_rx_queues(netdev, oct->num_iqs);
+ if (err)
+ goto set_queues_err;
+
+ octep_napi_add(oct);
+ octep_napi_enable(oct);
+
+ oct->link_info.admin_up = 1;
+ octep_set_rx_state(oct, true);
+
+ ret = octep_get_link_status(oct);
+ if (!ret)
+ octep_set_link_status(oct, true);
+
+ /* Enable the input and output queues for this Octeon device */
+ oct->hw_ops.enable_io_queues(oct);
+
+ /* Enable Octeon device interrupts */
+ oct->hw_ops.enable_interrupts(oct);
+
+ octep_oq_dbell_init(oct);
+
+ ret = octep_get_link_status(oct);
+ if (ret)
+ octep_link_up(netdev);
+
+ return 0;
+
+set_queues_err:
+ octep_napi_disable(oct);
+ octep_napi_delete(oct);
+ octep_clean_irqs(oct);
+setup_irq_err:
+ octep_free_oqs(oct);
+setup_oq_err:
+ octep_free_iqs(oct);
+setup_iq_err:
+ return -1;
+}
+
+/**
+ * octep_stop() - stop the octeon network device.
+ *
+ * @netdev: pointer to kernel network device.
+ *
+ * stop the device Tx/Rx operations, bring down the link and
+ * free up all resources allocated for Tx/Rx queues and interrupts.
+ */
+static int octep_stop(struct net_device *netdev)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+
+ netdev_info(netdev, "Stopping the device ...\n");
+
+ /* Stop Tx from stack */
+ netif_tx_stop_all_queues(netdev);
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+
+ octep_set_link_status(oct, false);
+ octep_set_rx_state(oct, false);
+
+ oct->link_info.admin_up = 0;
+ oct->link_info.oper_up = 0;
+
+ oct->hw_ops.disable_interrupts(oct);
+ octep_napi_disable(oct);
+ octep_napi_delete(oct);
+
+ octep_clean_irqs(oct);
+ octep_clean_iqs(oct);
+
+ oct->hw_ops.disable_io_queues(oct);
+ oct->hw_ops.reset_io_queues(oct);
+ octep_free_oqs(oct);
+ octep_free_iqs(oct);
+ netdev_info(netdev, "Device stopped !!\n");
+ return 0;
+}
+
+/**
+ * octep_iq_full_check() - check if a Tx queue is full.
+ *
+ * @iq: Octeon Tx queue data structure.
+ *
+ * Return: 0, if the Tx queue is not full.
+ * 1, if the Tx queue is full.
+ */
+static inline int octep_iq_full_check(struct octep_iq *iq)
+{
+ if (likely((iq->max_count - atomic_read(&iq->instr_pending)) >=
+ OCTEP_WAKE_QUEUE_THRESHOLD))
+ return 0;
+
+ /* Stop the queue if unable to send */
+ netif_stop_subqueue(iq->netdev, iq->q_no);
+
+ /* check again and restart the queue, in case NAPI has just freed
+ * enough Tx ring entries.
+ */
+ if (unlikely((iq->max_count - atomic_read(&iq->instr_pending)) >=
+ OCTEP_WAKE_QUEUE_THRESHOLD)) {
+ netif_start_subqueue(iq->netdev, iq->q_no);
+ iq->stats.restart_cnt++;
+ return 0;
+ }
+
+ return 1;
+}
+
+/**
+ * octep_start_xmit() - Enqueue packet to Octoen hardware Tx Queue.
+ *
+ * @skb: packet skbuff pointer.
+ * @netdev: kernel network device.
+ *
+ * Return: NETDEV_TX_BUSY, if Tx Queue is full.
+ * NETDEV_TX_OK, if successfully enqueued to hardware Tx queue.
+ */
+static netdev_tx_t octep_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ struct octep_tx_sglist_desc *sglist;
+ struct octep_tx_buffer *tx_buffer;
+ struct octep_tx_desc_hw *hw_desc;
+ struct skb_shared_info *shinfo;
+ struct octep_instr_hdr *ih;
+ struct octep_iq *iq;
+ skb_frag_t *frag;
+ u16 nr_frags, si;
+ u16 q_no, wi;
+
+ q_no = skb_get_queue_mapping(skb);
+ if (q_no >= oct->num_iqs) {
+ netdev_err(netdev, "Invalid Tx skb->queue_mapping=%d\n", q_no);
+ q_no = q_no % oct->num_iqs;
+ }
+
+ iq = oct->iq[q_no];
+ if (octep_iq_full_check(iq)) {
+ iq->stats.tx_busy++;
+ return NETDEV_TX_BUSY;
+ }
+
+ shinfo = skb_shinfo(skb);
+ nr_frags = shinfo->nr_frags;
+
+ wi = iq->host_write_index;
+ hw_desc = &iq->desc_ring[wi];
+ hw_desc->ih64 = 0;
+
+ tx_buffer = iq->buff_info + wi;
+ tx_buffer->skb = skb;
+
+ ih = &hw_desc->ih;
+ ih->tlen = skb->len;
+ ih->pkind = oct->pkind;
+
+ if (!nr_frags) {
+ tx_buffer->gather = 0;
+ tx_buffer->dma = dma_map_single(iq->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(iq->dev, tx_buffer->dma))
+ goto dma_map_err;
+ hw_desc->dptr = tx_buffer->dma;
+ } else {
+ /* Scatter/Gather */
+ dma_addr_t dma;
+ u16 len;
+
+ sglist = tx_buffer->sglist;
+
+ ih->gsz = nr_frags + 1;
+ ih->gather = 1;
+ tx_buffer->gather = 1;
+
+ len = skb_headlen(skb);
+ dma = dma_map_single(iq->dev, skb->data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(iq->dev, dma))
+ goto dma_map_err;
+
+ dma_sync_single_for_cpu(iq->dev, tx_buffer->sglist_dma,
+ OCTEP_SGLIST_SIZE_PER_PKT,
+ DMA_TO_DEVICE);
+ memset(sglist, 0, OCTEP_SGLIST_SIZE_PER_PKT);
+ sglist[0].len[3] = len;
+ sglist[0].dma_ptr[0] = dma;
+
+ si = 1; /* entry 0 is main skb, mapped above */
+ frag = &shinfo->frags[0];
+ while (nr_frags--) {
+ len = skb_frag_size(frag);
+ dma = skb_frag_dma_map(iq->dev, frag, 0,
+ len, DMA_TO_DEVICE);
+ if (dma_mapping_error(iq->dev, dma))
+ goto dma_map_sg_err;
+
+ sglist[si >> 2].len[3 - (si & 3)] = len;
+ sglist[si >> 2].dma_ptr[si & 3] = dma;
+
+ frag++;
+ si++;
+ }
+ dma_sync_single_for_device(iq->dev, tx_buffer->sglist_dma,
+ OCTEP_SGLIST_SIZE_PER_PKT,
+ DMA_TO_DEVICE);
+
+ hw_desc->dptr = tx_buffer->sglist_dma;
+ }
+
+ /* Flush the hw descriptor before writing to doorbell */
+ wmb();
+
+ /* Ring Doorbell to notify the NIC there is a new packet */
+ writel(1, iq->doorbell_reg);
+ atomic_inc(&iq->instr_pending);
+ wi++;
+ if (wi == iq->max_count)
+ wi = 0;
+ iq->host_write_index = wi;
+
+ netdev_tx_sent_queue(iq->netdev_q, skb->len);
+ iq->stats.instr_posted++;
+ skb_tx_timestamp(skb);
+ return NETDEV_TX_OK;
+
+dma_map_sg_err:
+ if (si > 0) {
+ dma_unmap_single(iq->dev, sglist[0].dma_ptr[0],
+ sglist[0].len[0], DMA_TO_DEVICE);
+ sglist[0].len[0] = 0;
+ }
+ while (si > 1) {
+ dma_unmap_page(iq->dev, sglist[si >> 2].dma_ptr[si & 3],
+ sglist[si >> 2].len[si & 3], DMA_TO_DEVICE);
+ sglist[si >> 2].len[si & 3] = 0;
+ si--;
+ }
+ tx_buffer->gather = 0;
+dma_map_err:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+/**
+ * octep_get_stats64() - Get Octeon network device statistics.
+ *
+ * @netdev: kernel network device.
+ * @stats: pointer to stats structure to be filled in.
+ */
+static void octep_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
+ struct octep_device *oct = netdev_priv(netdev);
+ int q;
+
+ octep_get_if_stats(oct);
+ tx_packets = 0;
+ tx_bytes = 0;
+ rx_packets = 0;
+ rx_bytes = 0;
+ for (q = 0; q < oct->num_oqs; q++) {
+ struct octep_iq *iq = oct->iq[q];
+ struct octep_oq *oq = oct->oq[q];
+
+ tx_packets += iq->stats.instr_completed;
+ tx_bytes += iq->stats.bytes_sent;
+ rx_packets += oq->stats.packets;
+ rx_bytes += oq->stats.bytes;
+ }
+ stats->tx_packets = tx_packets;
+ stats->tx_bytes = tx_bytes;
+ stats->rx_packets = rx_packets;
+ stats->rx_bytes = rx_bytes;
+ stats->multicast = oct->iface_rx_stats.mcast_pkts;
+ stats->rx_errors = oct->iface_rx_stats.err_pkts;
+ stats->collisions = oct->iface_tx_stats.xscol;
+ stats->tx_fifo_errors = oct->iface_tx_stats.undflw;
+}
+
+/**
+ * octep_tx_timeout_task - work queue task to Handle Tx queue timeout.
+ *
+ * @work: pointer to Tx queue timeout work_struct
+ *
+ * Stop and start the device so that it frees up all queue resources
+ * and restarts the queues, that potentially clears a Tx queue timeout
+ * condition.
+ **/
+static void octep_tx_timeout_task(struct work_struct *work)
+{
+ struct octep_device *oct = container_of(work, struct octep_device,
+ tx_timeout_task);
+ struct net_device *netdev = oct->netdev;
+
+ rtnl_lock();
+ if (netif_running(netdev)) {
+ octep_stop(netdev);
+ octep_open(netdev);
+ }
+ rtnl_unlock();
+}
+
+/**
+ * octep_tx_timeout() - Handle Tx Queue timeout.
+ *
+ * @netdev: pointer to kernel network device.
+ * @txqueue: Timed out Tx queue number.
+ *
+ * Schedule a work to handle Tx queue timeout.
+ */
+static void octep_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+
+ queue_work(octep_wq, &oct->tx_timeout_task);
+}
+
+static int octep_set_mac(struct net_device *netdev, void *p)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ struct sockaddr *addr = (struct sockaddr *)p;
+ int err;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ err = octep_set_mac_addr(oct, addr->sa_data);
+ if (err)
+ return err;
+
+ memcpy(oct->mac_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(netdev, addr->sa_data);
+
+ return 0;
+}
+
+static int octep_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ struct octep_iface_link_info *link_info;
+ int err = 0;
+
+ link_info = &oct->link_info;
+ if (link_info->mtu == new_mtu)
+ return 0;
+
+ err = octep_set_mtu(oct, new_mtu);
+ if (!err) {
+ oct->link_info.mtu = new_mtu;
+ netdev->mtu = new_mtu;
+ }
+
+ return err;
+}
+
+static const struct net_device_ops octep_netdev_ops = {
+ .ndo_open = octep_open,
+ .ndo_stop = octep_stop,
+ .ndo_start_xmit = octep_start_xmit,
+ .ndo_get_stats64 = octep_get_stats64,
+ .ndo_tx_timeout = octep_tx_timeout,
+ .ndo_set_mac_address = octep_set_mac,
+ .ndo_change_mtu = octep_change_mtu,
+};
+
+/**
+ * octep_ctrl_mbox_task - work queue task to handle ctrl mbox messages.
+ *
+ * @work: pointer to ctrl mbox work_struct
+ *
+ * Poll ctrl mbox message queue and handle control messages from firmware.
+ **/
+static void octep_ctrl_mbox_task(struct work_struct *work)
+{
+ struct octep_device *oct = container_of(work, struct octep_device,
+ ctrl_mbox_task);
+ struct net_device *netdev = oct->netdev;
+ struct octep_ctrl_net_f2h_req req = {};
+ struct octep_ctrl_mbox_msg msg;
+ int ret = 0;
+
+ msg.msg = &req;
+ while (true) {
+ ret = octep_ctrl_mbox_recv(&oct->ctrl_mbox, &msg);
+ if (ret)
+ break;
+
+ switch (req.hdr.cmd) {
+ case OCTEP_CTRL_NET_F2H_CMD_LINK_STATUS:
+ if (netif_running(netdev)) {
+ if (req.link.state) {
+ dev_info(&oct->pdev->dev, "netif_carrier_on\n");
+ netif_carrier_on(netdev);
+ } else {
+ dev_info(&oct->pdev->dev, "netif_carrier_off\n");
+ netif_carrier_off(netdev);
+ }
+ }
+ break;
+ default:
+ pr_info("Unknown mbox req : %u\n", req.hdr.cmd);
+ break;
+ }
+ }
+}
+
+/**
+ * octep_device_setup() - Setup Octeon Device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Setup Octeon device hardware operations, configuration, etc ...
+ */
+int octep_device_setup(struct octep_device *oct)
+{
+ struct octep_ctrl_mbox *ctrl_mbox;
+ struct pci_dev *pdev = oct->pdev;
+ int i, ret;
+
+ /* allocate memory for oct->conf */
+ oct->conf = kzalloc(sizeof(*oct->conf), GFP_KERNEL);
+ if (!oct->conf)
+ return -ENOMEM;
+
+ /* Map BAR regions */
+ for (i = 0; i < OCTEP_MMIO_REGIONS; i++) {
+ oct->mmio[i].hw_addr =
+ ioremap(pci_resource_start(oct->pdev, i * 2),
+ pci_resource_len(oct->pdev, i * 2));
+ oct->mmio[i].mapped = 1;
+ }
+
+ oct->chip_id = pdev->device;
+ oct->rev_id = pdev->revision;
+ dev_info(&pdev->dev, "chip_id = 0x%x\n", pdev->device);
+
+ switch (oct->chip_id) {
+ case OCTEP_PCI_DEVICE_ID_CN93_PF:
+ dev_info(&pdev->dev,
+ "Setting up OCTEON CN93XX PF PASS%d.%d\n",
+ OCTEP_MAJOR_REV(oct), OCTEP_MINOR_REV(oct));
+ octep_device_setup_cn93_pf(oct);
+ break;
+ default:
+ dev_err(&pdev->dev,
+ "%s: unsupported device\n", __func__);
+ goto unsupported_dev;
+ }
+
+ oct->pkind = CFG_GET_IQ_PKIND(oct->conf);
+
+ /* Initialize control mbox */
+ ctrl_mbox = &oct->ctrl_mbox;
+ ctrl_mbox->barmem = CFG_GET_CTRL_MBOX_MEM_ADDR(oct->conf);
+ ret = octep_ctrl_mbox_init(ctrl_mbox);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to initialize control mbox\n");
+ return -1;
+ }
+ oct->ctrl_mbox_ifstats_offset = OCTEP_CTRL_MBOX_SZ(ctrl_mbox->h2fq.elem_sz,
+ ctrl_mbox->h2fq.elem_cnt,
+ ctrl_mbox->f2hq.elem_sz,
+ ctrl_mbox->f2hq.elem_cnt);
+
+ return 0;
+
+unsupported_dev:
+ return -1;
+}
+
+/**
+ * octep_device_cleanup() - Cleanup Octeon Device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Cleanup Octeon device allocated resources.
+ */
+static void octep_device_cleanup(struct octep_device *oct)
+{
+ int i;
+
+ dev_info(&oct->pdev->dev, "Cleaning up Octeon Device ...\n");
+
+ for (i = 0; i < OCTEP_MAX_VF; i++) {
+ if (oct->mbox[i])
+ vfree(oct->mbox[i]);
+ oct->mbox[i] = NULL;
+ }
+
+ octep_ctrl_mbox_uninit(&oct->ctrl_mbox);
+
+ oct->hw_ops.soft_reset(oct);
+ for (i = 0; i < OCTEP_MMIO_REGIONS; i++) {
+ if (oct->mmio[i].mapped)
+ iounmap(oct->mmio[i].hw_addr);
+ }
+
+ kfree(oct->conf);
+ oct->conf = NULL;
+}
+
+/**
+ * octep_probe() - Octeon PCI device probe handler.
+ *
+ * @pdev: PCI device structure.
+ * @ent: entry in Octeon PCI device ID table.
+ *
+ * Initializes and enables the Octeon PCI device for network operations.
+ * Initializes Octeon private data structure and registers a network device.
+ */
+static int octep_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct octep_device *octep_dev = NULL;
+ struct net_device *netdev;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to enable PCI device\n");
+ return err;
+ }
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "Failed to set DMA mask !!\n");
+ goto err_dma_mask;
+ }
+
+ err = pci_request_mem_regions(pdev, OCTEP_DRV_NAME);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to map PCI memory regions\n");
+ goto err_pci_regions;
+ }
+
+ pci_enable_pcie_error_reporting(pdev);
+ pci_set_master(pdev);
+
+ netdev = alloc_etherdev_mq(sizeof(struct octep_device),
+ OCTEP_MAX_QUEUES);
+ if (!netdev) {
+ dev_err(&pdev->dev, "Failed to allocate netdev\n");
+ err = -ENOMEM;
+ goto err_alloc_netdev;
+ }
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ octep_dev = netdev_priv(netdev);
+ octep_dev->netdev = netdev;
+ octep_dev->pdev = pdev;
+ octep_dev->dev = &pdev->dev;
+ pci_set_drvdata(pdev, octep_dev);
+
+ err = octep_device_setup(octep_dev);
+ if (err) {
+ dev_err(&pdev->dev, "Device setup failed\n");
+ goto err_octep_config;
+ }
+ INIT_WORK(&octep_dev->tx_timeout_task, octep_tx_timeout_task);
+ INIT_WORK(&octep_dev->ctrl_mbox_task, octep_ctrl_mbox_task);
+
+ netdev->netdev_ops = &octep_netdev_ops;
+ octep_set_ethtool_ops(netdev);
+ netif_carrier_off(netdev);
+
+ netdev->hw_features = NETIF_F_SG;
+ netdev->features |= netdev->hw_features;
+ netdev->min_mtu = OCTEP_MIN_MTU;
+ netdev->max_mtu = OCTEP_MAX_MTU;
+ netdev->mtu = OCTEP_DEFAULT_MTU;
+
+ octep_get_mac_addr(octep_dev, octep_dev->mac_addr);
+ eth_hw_addr_set(netdev, octep_dev->mac_addr);
+
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register netdev\n");
+ goto register_dev_err;
+ }
+ dev_info(&pdev->dev, "Device probe successful\n");
+ return 0;
+
+register_dev_err:
+ octep_device_cleanup(octep_dev);
+err_octep_config:
+ free_netdev(netdev);
+err_alloc_netdev:
+ pci_disable_pcie_error_reporting(pdev);
+ pci_release_mem_regions(pdev);
+err_pci_regions:
+err_dma_mask:
+ pci_disable_device(pdev);
+ return err;
+}
+
+/**
+ * octep_remove() - Remove Octeon PCI device from driver control.
+ *
+ * @pdev: PCI device structure of the Octeon device.
+ *
+ * Cleanup all resources allocated for the Octeon device.
+ * Unregister from network device and disable the PCI device.
+ */
+static void octep_remove(struct pci_dev *pdev)
+{
+ struct octep_device *oct = pci_get_drvdata(pdev);
+ struct net_device *netdev;
+
+ if (!oct)
+ return;
+
+ cancel_work_sync(&oct->tx_timeout_task);
+ cancel_work_sync(&oct->ctrl_mbox_task);
+ netdev = oct->netdev;
+ if (netdev->reg_state == NETREG_REGISTERED)
+ unregister_netdev(netdev);
+
+ octep_device_cleanup(oct);
+ pci_release_mem_regions(pdev);
+ free_netdev(netdev);
+ pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
+}
+
+static struct pci_driver octep_driver = {
+ .name = OCTEP_DRV_NAME,
+ .id_table = octep_pci_id_tbl,
+ .probe = octep_probe,
+ .remove = octep_remove,
+};
+
+/**
+ * octep_init_module() - Module initialiation.
+ *
+ * create common resource for the driver and register PCI driver.
+ */
+static int __init octep_init_module(void)
+{
+ int ret;
+
+ pr_info("%s: Loading %s ...\n", OCTEP_DRV_NAME, OCTEP_DRV_STRING);
+
+ /* work queue for all deferred tasks */
+ octep_wq = create_singlethread_workqueue(OCTEP_DRV_NAME);
+ if (!octep_wq) {
+ pr_err("%s: Failed to create common workqueue\n",
+ OCTEP_DRV_NAME);
+ return -ENOMEM;
+ }
+
+ ret = pci_register_driver(&octep_driver);
+ if (ret < 0) {
+ pr_err("%s: Failed to register PCI driver; err=%d\n",
+ OCTEP_DRV_NAME, ret);
+ return ret;
+ }
+
+ pr_info("%s: Loaded successfully !\n", OCTEP_DRV_NAME);
+
+ return ret;
+}
+
+/**
+ * octep_exit_module() - Module exit routine.
+ *
+ * unregister the driver with PCI subsystem and cleanup common resources.
+ */
+static void __exit octep_exit_module(void)
+{
+ pr_info("%s: Unloading ...\n", OCTEP_DRV_NAME);
+
+ pci_unregister_driver(&octep_driver);
+ destroy_workqueue(octep_wq);
+
+ pr_info("%s: Unloading complete\n", OCTEP_DRV_NAME);
+}
+
+module_init(octep_init_module);
+module_exit(octep_exit_module);
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
new file mode 100644
index 000000000000..025626a61383
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
@@ -0,0 +1,357 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_MAIN_H_
+#define _OCTEP_MAIN_H_
+
+#include "octep_tx.h"
+#include "octep_rx.h"
+#include "octep_ctrl_mbox.h"
+
+#define OCTEP_DRV_NAME "octeon_ep"
+#define OCTEP_DRV_STRING "Marvell Octeon EndPoint NIC Driver"
+
+#define OCTEP_PCIID_CN93_PF 0xB200177d
+#define OCTEP_PCIID_CN93_VF 0xB203177d
+
+#define OCTEP_PCI_DEVICE_ID_CN93_PF 0xB200
+#define OCTEP_PCI_DEVICE_ID_CN93_VF 0xB203
+
+#define OCTEP_MAX_QUEUES 63
+#define OCTEP_MAX_IQ OCTEP_MAX_QUEUES
+#define OCTEP_MAX_OQ OCTEP_MAX_QUEUES
+#define OCTEP_MAX_VF 64
+
+#define OCTEP_MAX_MSIX_VECTORS OCTEP_MAX_OQ
+
+/* Flags to disable and enable Interrupts */
+#define OCTEP_INPUT_INTR (1)
+#define OCTEP_OUTPUT_INTR (2)
+#define OCTEP_MBOX_INTR (4)
+#define OCTEP_ALL_INTR 0xff
+
+#define OCTEP_IQ_INTR_RESEND_BIT 59
+#define OCTEP_OQ_INTR_RESEND_BIT 59
+
+#define OCTEP_MMIO_REGIONS 3
+/* PCI address space mapping information.
+ * Each of the 3 address spaces given by BAR0, BAR2 and BAR4 of
+ * Octeon gets mapped to different physical address spaces in
+ * the kernel.
+ */
+struct octep_mmio {
+ /* The physical address to which the PCI address space is mapped. */
+ u8 __iomem *hw_addr;
+
+ /* Flag indicating the mapping was successful. */
+ int mapped;
+};
+
+struct octep_pci_win_regs {
+ u8 __iomem *pci_win_wr_addr;
+ u8 __iomem *pci_win_rd_addr;
+ u8 __iomem *pci_win_wr_data;
+ u8 __iomem *pci_win_rd_data;
+};
+
+struct octep_hw_ops {
+ void (*setup_iq_regs)(struct octep_device *oct, int q);
+ void (*setup_oq_regs)(struct octep_device *oct, int q);
+ void (*setup_mbox_regs)(struct octep_device *oct, int mbox);
+
+ irqreturn_t (*non_ioq_intr_handler)(void *ioq_vector);
+ irqreturn_t (*ioq_intr_handler)(void *ioq_vector);
+ int (*soft_reset)(struct octep_device *oct);
+ void (*reinit_regs)(struct octep_device *oct);
+ u32 (*update_iq_read_idx)(struct octep_iq *iq);
+
+ void (*enable_interrupts)(struct octep_device *oct);
+ void (*disable_interrupts)(struct octep_device *oct);
+
+ void (*enable_io_queues)(struct octep_device *oct);
+ void (*disable_io_queues)(struct octep_device *oct);
+ void (*enable_iq)(struct octep_device *oct, int q);
+ void (*disable_iq)(struct octep_device *oct, int q);
+ void (*enable_oq)(struct octep_device *oct, int q);
+ void (*disable_oq)(struct octep_device *oct, int q);
+ void (*reset_io_queues)(struct octep_device *oct);
+ void (*dump_registers)(struct octep_device *oct);
+};
+
+/* Octeon mailbox data */
+struct octep_mbox_data {
+ u32 cmd;
+ u32 total_len;
+ u32 recv_len;
+ u32 rsvd;
+ u64 *data;
+};
+
+/* Octeon device mailbox */
+struct octep_mbox {
+ /* A spinlock to protect access to this q_mbox. */
+ spinlock_t lock;
+
+ u32 q_no;
+ u32 state;
+
+ /* SLI_MAC_PF_MBOX_INT for PF, SLI_PKT_MBOX_INT for VF. */
+ u8 __iomem *mbox_int_reg;
+
+ /* SLI_PKT_PF_VF_MBOX_SIG(0) for PF,
+ * SLI_PKT_PF_VF_MBOX_SIG(1) for VF.
+ */
+ u8 __iomem *mbox_write_reg;
+
+ /* SLI_PKT_PF_VF_MBOX_SIG(1) for PF,
+ * SLI_PKT_PF_VF_MBOX_SIG(0) for VF.
+ */
+ u8 __iomem *mbox_read_reg;
+
+ struct octep_mbox_data mbox_data;
+};
+
+/* Tx/Rx queue vector per interrupt. */
+struct octep_ioq_vector {
+ char name[OCTEP_MSIX_NAME_SIZE];
+ struct napi_struct napi;
+ struct octep_device *octep_dev;
+ struct octep_iq *iq;
+ struct octep_oq *oq;
+ cpumask_t affinity_mask;
+};
+
+/* Octeon hardware/firmware offload capability flags. */
+#define OCTEP_CAP_TX_CHECKSUM BIT(0)
+#define OCTEP_CAP_RX_CHECKSUM BIT(1)
+#define OCTEP_CAP_TSO BIT(2)
+
+/* Link modes */
+enum octep_link_mode_bit_indices {
+ OCTEP_LINK_MODE_10GBASE_T = 0,
+ OCTEP_LINK_MODE_10GBASE_R,
+ OCTEP_LINK_MODE_10GBASE_CR,
+ OCTEP_LINK_MODE_10GBASE_KR,
+ OCTEP_LINK_MODE_10GBASE_LR,
+ OCTEP_LINK_MODE_10GBASE_SR,
+ OCTEP_LINK_MODE_25GBASE_CR,
+ OCTEP_LINK_MODE_25GBASE_KR,
+ OCTEP_LINK_MODE_25GBASE_SR,
+ OCTEP_LINK_MODE_40GBASE_CR4,
+ OCTEP_LINK_MODE_40GBASE_KR4,
+ OCTEP_LINK_MODE_40GBASE_LR4,
+ OCTEP_LINK_MODE_40GBASE_SR4,
+ OCTEP_LINK_MODE_50GBASE_CR2,
+ OCTEP_LINK_MODE_50GBASE_KR2,
+ OCTEP_LINK_MODE_50GBASE_SR2,
+ OCTEP_LINK_MODE_50GBASE_CR,
+ OCTEP_LINK_MODE_50GBASE_KR,
+ OCTEP_LINK_MODE_50GBASE_LR,
+ OCTEP_LINK_MODE_50GBASE_SR,
+ OCTEP_LINK_MODE_100GBASE_CR4,
+ OCTEP_LINK_MODE_100GBASE_KR4,
+ OCTEP_LINK_MODE_100GBASE_LR4,
+ OCTEP_LINK_MODE_100GBASE_SR4,
+ OCTEP_LINK_MODE_NBITS
+};
+
+/* Hardware interface link state information. */
+struct octep_iface_link_info {
+ /* Bitmap of Supported link speeds/modes. */
+ u64 supported_modes;
+
+ /* Bitmap of Advertised link speeds/modes. */
+ u64 advertised_modes;
+
+ /* Negotiated link speed in Mbps. */
+ u32 speed;
+
+ /* MTU */
+ u16 mtu;
+
+ /* Autonegotation state. */
+#define OCTEP_LINK_MODE_AUTONEG_SUPPORTED BIT(0)
+#define OCTEP_LINK_MODE_AUTONEG_ADVERTISED BIT(1)
+ u8 autoneg;
+
+ /* Pause frames setting. */
+#define OCTEP_LINK_MODE_PAUSE_SUPPORTED BIT(0)
+#define OCTEP_LINK_MODE_PAUSE_ADVERTISED BIT(1)
+ u8 pause;
+
+ /* Admin state of the link (ifconfig <iface> up/down */
+ u8 admin_up;
+
+ /* Operational state of the link: physical link is up down */
+ u8 oper_up;
+};
+
+/* The Octeon device specific private data structure.
+ * Each Octeon device has this structure to represent all its components.
+ */
+struct octep_device {
+ struct octep_config *conf;
+
+ /* Octeon Chip type. */
+ u16 chip_id;
+ u16 rev_id;
+
+ /* Device capabilities enabled */
+ u64 caps_enabled;
+ /* Device capabilities supported */
+ u64 caps_supported;
+
+ /* Pointer to basic Linux device */
+ struct device *dev;
+ /* Linux PCI device pointer */
+ struct pci_dev *pdev;
+ /* Netdev corresponding to the Octeon device */
+ struct net_device *netdev;
+
+ /* memory mapped io range */
+ struct octep_mmio mmio[OCTEP_MMIO_REGIONS];
+
+ /* MAC address */
+ u8 mac_addr[ETH_ALEN];
+
+ /* Tx queues (IQ: Instruction Queue) */
+ u16 num_iqs;
+ /* pkind value to be used in every Tx hardware descriptor */
+ u8 pkind;
+ /* Pointers to Octeon Tx queues */
+ struct octep_iq *iq[OCTEP_MAX_IQ];
+
+ /* Rx queues (OQ: Output Queue) */
+ u16 num_oqs;
+ /* Pointers to Octeon Rx queues */
+ struct octep_oq *oq[OCTEP_MAX_OQ];
+
+ /* Hardware port number of the PCIe interface */
+ u16 pcie_port;
+
+ /* PCI Window registers to access some hardware CSRs */
+ struct octep_pci_win_regs pci_win_regs;
+ /* Hardware operations */
+ struct octep_hw_ops hw_ops;
+
+ /* IRQ info */
+ u16 num_irqs;
+ u16 num_non_ioq_irqs;
+ char *non_ioq_irq_names;
+ struct msix_entry *msix_entries;
+ /* IOq information of it's corresponding MSI-X interrupt. */
+ struct octep_ioq_vector *ioq_vector[OCTEP_MAX_QUEUES];
+
+ /* Hardware Interface Tx statistics */
+ struct octep_iface_tx_stats iface_tx_stats;
+ /* Hardware Interface Rx statistics */
+ struct octep_iface_rx_stats iface_rx_stats;
+
+ /* Hardware Interface Link info like supported modes, aneg support */
+ struct octep_iface_link_info link_info;
+
+ /* Mailbox to talk to VFs */
+ struct octep_mbox *mbox[OCTEP_MAX_VF];
+
+ /* Work entry to handle Tx timeout */
+ struct work_struct tx_timeout_task;
+
+ /* control mbox over pf */
+ struct octep_ctrl_mbox ctrl_mbox;
+
+ /* offset for iface stats */
+ u32 ctrl_mbox_ifstats_offset;
+
+ /* Work entry to handle ctrl mbox interrupt */
+ struct work_struct ctrl_mbox_task;
+
+};
+
+static inline u16 OCTEP_MAJOR_REV(struct octep_device *oct)
+{
+ u16 rev = (oct->rev_id & 0xC) >> 2;
+
+ return (rev == 0) ? 1 : rev;
+}
+
+static inline u16 OCTEP_MINOR_REV(struct octep_device *oct)
+{
+ return (oct->rev_id & 0x3);
+}
+
+/* Octeon CSR read/write access APIs */
+#define octep_write_csr(octep_dev, reg_off, value) \
+ writel(value, (octep_dev)->mmio[0].hw_addr + (reg_off))
+
+#define octep_write_csr64(octep_dev, reg_off, val64) \
+ writeq(val64, (octep_dev)->mmio[0].hw_addr + (reg_off))
+
+#define octep_read_csr(octep_dev, reg_off) \
+ readl((octep_dev)->mmio[0].hw_addr + (reg_off))
+
+#define octep_read_csr64(octep_dev, reg_off) \
+ readq((octep_dev)->mmio[0].hw_addr + (reg_off))
+
+/* Read windowed register.
+ * @param oct - pointer to the Octeon device.
+ * @param addr - Address of the register to read.
+ *
+ * This routine is called to read from the indirectly accessed
+ * Octeon registers that are visible through a PCI BAR0 mapped window
+ * register.
+ * @return - 64 bit value read from the register.
+ */
+static inline u64
+OCTEP_PCI_WIN_READ(struct octep_device *oct, u64 addr)
+{
+ u64 val64;
+
+ addr |= 1ull << 53; /* read 8 bytes */
+ writeq(addr, oct->pci_win_regs.pci_win_rd_addr);
+ val64 = readq(oct->pci_win_regs.pci_win_rd_data);
+
+ dev_dbg(&oct->pdev->dev,
+ "%s: reg: 0x%016llx val: 0x%016llx\n", __func__, addr, val64);
+
+ return val64;
+}
+
+/* Write windowed register.
+ * @param oct - pointer to the Octeon device.
+ * @param addr - Address of the register to write
+ * @param val - Value to write
+ *
+ * This routine is called to write to the indirectly accessed
+ * Octeon registers that are visible through a PCI BAR0 mapped window
+ * register.
+ * @return Nothing.
+ */
+static inline void
+OCTEP_PCI_WIN_WRITE(struct octep_device *oct, u64 addr, u64 val)
+{
+ writeq(addr, oct->pci_win_regs.pci_win_wr_addr);
+ writeq(val, oct->pci_win_regs.pci_win_wr_data);
+
+ dev_dbg(&oct->pdev->dev,
+ "%s: reg: 0x%016llx val: 0x%016llx\n", __func__, addr, val);
+}
+
+extern struct workqueue_struct *octep_wq;
+
+int octep_device_setup(struct octep_device *oct);
+int octep_setup_iqs(struct octep_device *oct);
+void octep_free_iqs(struct octep_device *oct);
+void octep_clean_iqs(struct octep_device *oct);
+int octep_setup_oqs(struct octep_device *oct);
+void octep_free_oqs(struct octep_device *oct);
+void octep_oq_dbell_init(struct octep_device *oct);
+void octep_device_setup_cn93_pf(struct octep_device *oct);
+int octep_iq_process_completions(struct octep_iq *iq, u16 budget);
+int octep_oq_process_rx(struct octep_oq *oq, int budget);
+void octep_set_ethtool_ops(struct net_device *netdev);
+
+#endif /* _OCTEP_MAIN_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h
new file mode 100644
index 000000000000..cc51149790ff
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h
@@ -0,0 +1,367 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_REGS_CN9K_PF_H_
+#define _OCTEP_REGS_CN9K_PF_H_
+
+/* ############################ RST ######################### */
+#define CN93_RST_BOOT 0x000087E006001600ULL
+#define CN93_RST_CORE_DOMAIN_W1S 0x000087E006001820ULL
+#define CN93_RST_CORE_DOMAIN_W1C 0x000087E006001828ULL
+
+#define CN93_CONFIG_XPANSION_BAR 0x38
+#define CN93_CONFIG_PCIE_CAP 0x70
+#define CN93_CONFIG_PCIE_DEVCAP 0x74
+#define CN93_CONFIG_PCIE_DEVCTL 0x78
+#define CN93_CONFIG_PCIE_LINKCAP 0x7C
+#define CN93_CONFIG_PCIE_LINKCTL 0x80
+#define CN93_CONFIG_PCIE_SLOTCAP 0x84
+#define CN93_CONFIG_PCIE_SLOTCTL 0x88
+
+#define CN93_PCIE_SRIOV_FDL 0x188 /* 0x98 */
+#define CN93_PCIE_SRIOV_FDL_BIT_POS 0x10
+#define CN93_PCIE_SRIOV_FDL_MASK 0xFF
+
+#define CN93_CONFIG_PCIE_FLTMSK 0x720
+
+/* ################# Offsets of RING, EPF, MAC ######################### */
+#define CN93_RING_OFFSET (0x1ULL << 17)
+#define CN93_EPF_OFFSET (0x1ULL << 25)
+#define CN93_MAC_OFFSET (0x1ULL << 4)
+#define CN93_BIT_ARRAY_OFFSET (0x1ULL << 4)
+#define CN93_EPVF_RING_OFFSET (0x1ULL << 4)
+
+/* ################# Scratch Registers ######################### */
+#define CN93_SDP_EPF_SCRATCH 0x205E0
+
+/* ################# Window Registers ######################### */
+#define CN93_SDP_WIN_WR_ADDR64 0x20000
+#define CN93_SDP_WIN_RD_ADDR64 0x20010
+#define CN93_SDP_WIN_WR_DATA64 0x20020
+#define CN93_SDP_WIN_WR_MASK_REG 0x20030
+#define CN93_SDP_WIN_RD_DATA64 0x20040
+
+#define CN93_SDP_MAC_NUMBER 0x2C100
+
+/* ################# Global Previliged registers ######################### */
+#define CN93_SDP_EPF_RINFO 0x205F0
+
+#define CN93_SDP_EPF_RINFO_SRN(val) ((val) & 0xFF)
+#define CN93_SDP_EPF_RINFO_RPVF(val) (((val) >> 32) & 0xF)
+#define CN93_SDP_EPF_RINFO_NVFS(val) (((val) >> 48) && 0xFF)
+
+/* SDP Function select */
+#define CN93_SDP_FUNC_SEL_EPF_BIT_POS 8
+#define CN93_SDP_FUNC_SEL_FUNC_BIT_POS 0
+
+/* ##### RING IN (Into device from PCI: Tx Ring) REGISTERS #### */
+#define CN93_SDP_R_IN_CONTROL_START 0x10000
+#define CN93_SDP_R_IN_ENABLE_START 0x10010
+#define CN93_SDP_R_IN_INSTR_BADDR_START 0x10020
+#define CN93_SDP_R_IN_INSTR_RSIZE_START 0x10030
+#define CN93_SDP_R_IN_INSTR_DBELL_START 0x10040
+#define CN93_SDP_R_IN_CNTS_START 0x10050
+#define CN93_SDP_R_IN_INT_LEVELS_START 0x10060
+#define CN93_SDP_R_IN_PKT_CNT_START 0x10080
+#define CN93_SDP_R_IN_BYTE_CNT_START 0x10090
+
+#define CN93_SDP_R_IN_CONTROL(ring) \
+ (CN93_SDP_R_IN_CONTROL_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_ENABLE(ring) \
+ (CN93_SDP_R_IN_ENABLE_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_INSTR_BADDR(ring) \
+ (CN93_SDP_R_IN_INSTR_BADDR_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_INSTR_RSIZE(ring) \
+ (CN93_SDP_R_IN_INSTR_RSIZE_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_INSTR_DBELL(ring) \
+ (CN93_SDP_R_IN_INSTR_DBELL_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_CNTS(ring) \
+ (CN93_SDP_R_IN_CNTS_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_INT_LEVELS(ring) \
+ (CN93_SDP_R_IN_INT_LEVELS_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_PKT_CNT(ring) \
+ (CN93_SDP_R_IN_PKT_CNT_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_BYTE_CNT(ring) \
+ (CN93_SDP_R_IN_BYTE_CNT_START + ((ring) * CN93_RING_OFFSET))
+
+/* Rings per Virtual Function */
+#define CN93_R_IN_CTL_RPVF_MASK (0xF)
+#define CN93_R_IN_CTL_RPVF_POS (48)
+
+/* Number of instructions to be read in one MAC read request.
+ * setting to Max value(4)
+ */
+#define CN93_R_IN_CTL_IDLE (0x1ULL << 28)
+#define CN93_R_IN_CTL_RDSIZE (0x3ULL << 25)
+#define CN93_R_IN_CTL_IS_64B (0x1ULL << 24)
+#define CN93_R_IN_CTL_D_NSR (0x1ULL << 8)
+#define CN93_R_IN_CTL_D_ESR (0x1ULL << 6)
+#define CN93_R_IN_CTL_D_ROR (0x1ULL << 5)
+#define CN93_R_IN_CTL_NSR (0x1ULL << 3)
+#define CN93_R_IN_CTL_ESR (0x1ULL << 1)
+#define CN93_R_IN_CTL_ROR (0x1ULL << 0)
+
+#define CN93_R_IN_CTL_MASK (CN93_R_IN_CTL_RDSIZE | CN93_R_IN_CTL_IS_64B)
+
+/* ##### RING OUT (out from device to PCI host: Rx Ring) REGISTERS #### */
+#define CN93_SDP_R_OUT_CNTS_START 0x10100
+#define CN93_SDP_R_OUT_INT_LEVELS_START 0x10110
+#define CN93_SDP_R_OUT_SLIST_BADDR_START 0x10120
+#define CN93_SDP_R_OUT_SLIST_RSIZE_START 0x10130
+#define CN93_SDP_R_OUT_SLIST_DBELL_START 0x10140
+#define CN93_SDP_R_OUT_CONTROL_START 0x10150
+#define CN93_SDP_R_OUT_ENABLE_START 0x10160
+#define CN93_SDP_R_OUT_PKT_CNT_START 0x10180
+#define CN93_SDP_R_OUT_BYTE_CNT_START 0x10190
+
+#define CN93_SDP_R_OUT_CONTROL(ring) \
+ (CN93_SDP_R_OUT_CONTROL_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_ENABLE(ring) \
+ (CN93_SDP_R_OUT_ENABLE_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_SLIST_BADDR(ring) \
+ (CN93_SDP_R_OUT_SLIST_BADDR_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_SLIST_RSIZE(ring) \
+ (CN93_SDP_R_OUT_SLIST_RSIZE_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_SLIST_DBELL(ring) \
+ (CN93_SDP_R_OUT_SLIST_DBELL_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_CNTS(ring) \
+ (CN93_SDP_R_OUT_CNTS_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_INT_LEVELS(ring) \
+ (CN93_SDP_R_OUT_INT_LEVELS_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_PKT_CNT(ring) \
+ (CN93_SDP_R_OUT_PKT_CNT_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_BYTE_CNT(ring) \
+ (CN93_SDP_R_OUT_BYTE_CNT_START + ((ring) * CN93_RING_OFFSET))
+
+/*------------------ R_OUT Masks ----------------*/
+#define CN93_R_OUT_INT_LEVELS_BMODE BIT_ULL(63)
+#define CN93_R_OUT_INT_LEVELS_TIMET (32)
+
+#define CN93_R_OUT_CTL_IDLE BIT_ULL(40)
+#define CN93_R_OUT_CTL_ES_I BIT_ULL(34)
+#define CN93_R_OUT_CTL_NSR_I BIT_ULL(33)
+#define CN93_R_OUT_CTL_ROR_I BIT_ULL(32)
+#define CN93_R_OUT_CTL_ES_D BIT_ULL(30)
+#define CN93_R_OUT_CTL_NSR_D BIT_ULL(29)
+#define CN93_R_OUT_CTL_ROR_D BIT_ULL(28)
+#define CN93_R_OUT_CTL_ES_P BIT_ULL(26)
+#define CN93_R_OUT_CTL_NSR_P BIT_ULL(25)
+#define CN93_R_OUT_CTL_ROR_P BIT_ULL(24)
+#define CN93_R_OUT_CTL_IMODE BIT_ULL(23)
+
+/* ############### Interrupt Moderation Registers ############### */
+#define CN93_SDP_R_IN_INT_MDRT_CTL0_START 0x10280
+#define CN93_SDP_R_IN_INT_MDRT_CTL1_START 0x102A0
+#define CN93_SDP_R_IN_INT_MDRT_DBG_START 0x102C0
+
+#define CN93_SDP_R_OUT_INT_MDRT_CTL0_START 0x10380
+#define CN93_SDP_R_OUT_INT_MDRT_CTL1_START 0x103A0
+#define CN93_SDP_R_OUT_INT_MDRT_DBG_START 0x103C0
+
+#define CN93_SDP_R_IN_INT_MDRT_CTL0(ring) \
+ (CN93_SDP_R_IN_INT_MDRT_CTL0_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_INT_MDRT_CTL1(ring) \
+ (CN93_SDP_R_IN_INT_MDRT_CTL1_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_INT_MDRT_DBG(ring) \
+ (CN93_SDP_R_IN_INT_MDRT_DBG_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_INT_MDRT_CTL0(ring) \
+ (CN93_SDP_R_OUT_INT_MDRT_CTL0_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_INT_MDRT_CTL1(ring) \
+ (CN93_SDP_R_OUT_INT_MDRT_CTL1_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_INT_MDRT_DBG(ring) \
+ (CN93_SDP_R_OUT_INT_MDRT_DBG_START + ((ring) * CN93_RING_OFFSET))
+
+/* ##################### Mail Box Registers ########################## */
+/* INT register for VF. when a MBOX write from PF happed to a VF,
+ * corresponding bit will be set in this register as well as in
+ * PF_VF_INT register.
+ *
+ * This is a RO register, the int can be cleared by writing 1 to PF_VF_INT
+ */
+/* Basically first 3 are from PF to VF. The last one is data from VF to PF */
+#define CN93_SDP_R_MBOX_PF_VF_DATA_START 0x10210
+#define CN93_SDP_R_MBOX_PF_VF_INT_START 0x10220
+#define CN93_SDP_R_MBOX_VF_PF_DATA_START 0x10230
+
+#define CN93_SDP_R_MBOX_PF_VF_DATA(ring) \
+ (CN93_SDP_R_MBOX_PF_VF_DATA_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_MBOX_PF_VF_INT(ring) \
+ (CN93_SDP_R_MBOX_PF_VF_INT_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_MBOX_VF_PF_DATA(ring) \
+ (CN93_SDP_R_MBOX_VF_PF_DATA_START + ((ring) * CN93_RING_OFFSET))
+
+/* ##################### Interrupt Registers ########################## */
+#define CN93_SDP_R_ERR_TYPE_START 0x10400
+
+#define CN93_SDP_R_ERR_TYPE(ring) \
+ (CN93_SDP_R_ERR_TYPE_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_MBOX_ISM_START 0x10500
+#define CN93_SDP_R_OUT_CNTS_ISM_START 0x10510
+#define CN93_SDP_R_IN_CNTS_ISM_START 0x10520
+
+#define CN93_SDP_R_MBOX_ISM(ring) \
+ (CN93_SDP_R_MBOX_ISM_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_CNTS_ISM(ring) \
+ (CN93_SDP_R_OUT_CNTS_ISM_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_CNTS_ISM(ring) \
+ (CN93_SDP_R_IN_CNTS_ISM_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_EPF_MBOX_RINT_START 0x20100
+#define CN93_SDP_EPF_MBOX_RINT_W1S_START 0x20120
+#define CN93_SDP_EPF_MBOX_RINT_ENA_W1C_START 0x20140
+#define CN93_SDP_EPF_MBOX_RINT_ENA_W1S_START 0x20160
+
+#define CN93_SDP_EPF_VFIRE_RINT_START 0x20180
+#define CN93_SDP_EPF_VFIRE_RINT_W1S_START 0x201A0
+#define CN93_SDP_EPF_VFIRE_RINT_ENA_W1C_START 0x201C0
+#define CN93_SDP_EPF_VFIRE_RINT_ENA_W1S_START 0x201E0
+
+#define CN93_SDP_EPF_IRERR_RINT 0x20200
+#define CN93_SDP_EPF_IRERR_RINT_W1S 0x20210
+#define CN93_SDP_EPF_IRERR_RINT_ENA_W1C 0x20220
+#define CN93_SDP_EPF_IRERR_RINT_ENA_W1S 0x20230
+
+#define CN93_SDP_EPF_VFORE_RINT_START 0x20240
+#define CN93_SDP_EPF_VFORE_RINT_W1S_START 0x20260
+#define CN93_SDP_EPF_VFORE_RINT_ENA_W1C_START 0x20280
+#define CN93_SDP_EPF_VFORE_RINT_ENA_W1S_START 0x202A0
+
+#define CN93_SDP_EPF_ORERR_RINT 0x20320
+#define CN93_SDP_EPF_ORERR_RINT_W1S 0x20330
+#define CN93_SDP_EPF_ORERR_RINT_ENA_W1C 0x20340
+#define CN93_SDP_EPF_ORERR_RINT_ENA_W1S 0x20350
+
+#define CN93_SDP_EPF_OEI_RINT 0x20360
+#define CN93_SDP_EPF_OEI_RINT_W1S 0x20370
+#define CN93_SDP_EPF_OEI_RINT_ENA_W1C 0x20380
+#define CN93_SDP_EPF_OEI_RINT_ENA_W1S 0x20390
+
+#define CN93_SDP_EPF_DMA_RINT 0x20400
+#define CN93_SDP_EPF_DMA_RINT_W1S 0x20410
+#define CN93_SDP_EPF_DMA_RINT_ENA_W1C 0x20420
+#define CN93_SDP_EPF_DMA_RINT_ENA_W1S 0x20430
+
+#define CN93_SDP_EPF_DMA_INT_LEVEL_START 0x20440
+#define CN93_SDP_EPF_DMA_CNT_START 0x20460
+#define CN93_SDP_EPF_DMA_TIM_START 0x20480
+
+#define CN93_SDP_EPF_MISC_RINT 0x204A0
+#define CN93_SDP_EPF_MISC_RINT_W1S 0x204B0
+#define CN93_SDP_EPF_MISC_RINT_ENA_W1C 0x204C0
+#define CN93_SDP_EPF_MISC_RINT_ENA_W1S 0x204D0
+
+#define CN93_SDP_EPF_DMA_VF_RINT_START 0x204E0
+#define CN93_SDP_EPF_DMA_VF_RINT_W1S_START 0x20500
+#define CN93_SDP_EPF_DMA_VF_RINT_ENA_W1C_START 0x20520
+#define CN93_SDP_EPF_DMA_VF_RINT_ENA_W1S_START 0x20540
+
+#define CN93_SDP_EPF_PP_VF_RINT_START 0x20560
+#define CN93_SDP_EPF_PP_VF_RINT_W1S_START 0x20580
+#define CN93_SDP_EPF_PP_VF_RINT_ENA_W1C_START 0x205A0
+#define CN93_SDP_EPF_PP_VF_RINT_ENA_W1S_START 0x205C0
+
+#define CN93_SDP_EPF_MBOX_RINT(index) \
+ (CN93_SDP_EPF_MBOX_RINT_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_MBOX_RINT_W1S(index) \
+ (CN93_SDP_EPF_MBOX_RINT_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_MBOX_RINT_ENA_W1C(index) \
+ (CN93_SDP_EPF_MBOX_RINT_ENA_W1C_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_MBOX_RINT_ENA_W1S(index) \
+ (CN93_SDP_EPF_MBOX_RINT_ENA_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+
+#define CN93_SDP_EPF_VFIRE_RINT(index) \
+ (CN93_SDP_EPF_VFIRE_RINT_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_VFIRE_RINT_W1S(index) \
+ (CN93_SDP_EPF_VFIRE_RINT_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_VFIRE_RINT_ENA_W1C(index) \
+ (CN93_SDP_EPF_VFIRE_RINT_ENA_W1C_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_VFIRE_RINT_ENA_W1S(index) \
+ (CN93_SDP_EPF_VFIRE_RINT_ENA_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+
+#define CN93_SDP_EPF_VFORE_RINT(index) \
+ (CN93_SDP_EPF_VFORE_RINT_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_VFORE_RINT_W1S(index) \
+ (CN93_SDP_EPF_VFORE_RINT_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_VFORE_RINT_ENA_W1C(index) \
+ (CN93_SDP_EPF_VFORE_RINT_ENA_W1C_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_VFORE_RINT_ENA_W1S(index) \
+ (CN93_SDP_EPF_VFORE_RINT_ENA_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+
+#define CN93_SDP_EPF_DMA_VF_RINT(index) \
+ (CN93_SDP_EPF_DMA_VF_RINT_START + ((index) + CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_DMA_VF_RINT_W1S(index) \
+ (CN93_SDP_EPF_DMA_VF_RINT_W1S_START + ((index) + CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_DMA_VF_RINT_ENA_W1C(index) \
+ (CN93_SDP_EPF_DMA_VF_RINT_ENA_W1C_START + ((index) + CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_DMA_VF_RINT_ENA_W1S(index) \
+ (CN93_SDP_EPF_DMA_VF_RINT_ENA_W1S_START + ((index) + CN93_BIT_ARRAY_OFFSET))
+
+#define CN93_SDP_EPF_PP_VF_RINT(index) \
+ (CN93_SDP_EPF_PP_VF_RINT_START + ((index) + CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_PP_VF_RINT_W1S(index) \
+ (CN93_SDP_EPF_PP_VF_RINT_W1S_START + ((index) + CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_PP_VF_RINT_ENA_W1C(index) \
+ (CN93_SDP_EPF_PP_VF_RINT_ENA_W1C_START + ((index) + CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_PP_VF_RINT_ENA_W1S(index) \
+ (CN93_SDP_EPF_PP_VF_RINT_ENA_W1S_START + ((index) + CN93_BIT_ARRAY_OFFSET))
+
+/*------------------ Interrupt Masks ----------------*/
+#define CN93_INTR_R_SEND_ISM BIT_ULL(63)
+#define CN93_INTR_R_OUT_INT BIT_ULL(62)
+#define CN93_INTR_R_IN_INT BIT_ULL(61)
+#define CN93_INTR_R_MBOX_INT BIT_ULL(60)
+#define CN93_INTR_R_RESEND BIT_ULL(59)
+#define CN93_INTR_R_CLR_TIM BIT_ULL(58)
+
+/* ####################### Ring Mapping Registers ################################## */
+#define CN93_SDP_EPVF_RING_START 0x26000
+#define CN93_SDP_IN_RING_TB_MAP_START 0x28000
+#define CN93_SDP_IN_RATE_LIMIT_START 0x2A000
+#define CN93_SDP_MAC_PF_RING_CTL_START 0x2C000
+
+#define CN93_SDP_EPVF_RING(ring) \
+ (CN93_SDP_EPVF_RING_START + ((ring) * CN93_EPVF_RING_OFFSET))
+#define CN93_SDP_IN_RING_TB_MAP(ring) \
+ (CN93_SDP_N_RING_TB_MAP_START + ((ring) * CN93_EPVF_RING_OFFSET))
+#define CN93_SDP_IN_RATE_LIMIT(ring) \
+ (CN93_SDP_IN_RATE_LIMIT_START + ((ring) * CN93_EPVF_RING_OFFSET))
+#define CN93_SDP_MAC_PF_RING_CTL(mac) \
+ (CN93_SDP_MAC_PF_RING_CTL_START + ((mac) * CN93_MAC_OFFSET))
+
+#define CN93_SDP_MAC_PF_RING_CTL_NPFS(val) ((val) & 0xF)
+#define CN93_SDP_MAC_PF_RING_CTL_SRN(val) (((val) >> 8) & 0xFF)
+#define CN93_SDP_MAC_PF_RING_CTL_RPPF(val) (((val) >> 16) & 0x3F)
+
+/* Number of non-queue interrupts in CN93xx */
+#define CN93_NUM_NON_IOQ_INTR 16
+#endif /* _OCTEP_REGS_CN9K_PF_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
new file mode 100644
index 000000000000..945947ec7723
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
@@ -0,0 +1,508 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include <linux/vmalloc.h>
+
+#include "octep_config.h"
+#include "octep_main.h"
+
+static void octep_oq_reset_indices(struct octep_oq *oq)
+{
+ oq->host_read_idx = 0;
+ oq->host_refill_idx = 0;
+ oq->refill_count = 0;
+ oq->last_pkt_count = 0;
+ oq->pkts_pending = 0;
+}
+
+/**
+ * octep_oq_fill_ring_buffers() - fill initial receive buffers for Rx ring.
+ *
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Return: 0, if successfully filled receive buffers for all descriptors.
+ * -1, if failed to allocate a buffer or failed to map for DMA.
+ */
+static int octep_oq_fill_ring_buffers(struct octep_oq *oq)
+{
+ struct octep_oq_desc_hw *desc_ring = oq->desc_ring;
+ struct page *page;
+ u32 i;
+
+ for (i = 0; i < oq->max_count; i++) {
+ page = dev_alloc_page();
+ if (unlikely(!page)) {
+ dev_err(oq->dev, "Rx buffer alloc failed\n");
+ goto rx_buf_alloc_err;
+ }
+ desc_ring[i].buffer_ptr = dma_map_page(oq->dev, page, 0,
+ PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(oq->dev, desc_ring[i].buffer_ptr)) {
+ dev_err(oq->dev,
+ "OQ-%d buffer alloc: DMA mapping error!\n",
+ oq->q_no);
+ put_page(page);
+ goto dma_map_err;
+ }
+ oq->buff_info[i].page = page;
+ }
+
+ return 0;
+
+dma_map_err:
+rx_buf_alloc_err:
+ while (i) {
+ i--;
+ dma_unmap_page(oq->dev, desc_ring[i].buffer_ptr, PAGE_SIZE, DMA_FROM_DEVICE);
+ put_page(oq->buff_info[i].page);
+ oq->buff_info[i].page = NULL;
+ }
+
+ return -1;
+}
+
+/**
+ * octep_oq_refill() - refill buffers for used Rx ring descriptors.
+ *
+ * @oct: Octeon device private data structure.
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Return: number of descriptors successfully refilled with receive buffers.
+ */
+static int octep_oq_refill(struct octep_device *oct, struct octep_oq *oq)
+{
+ struct octep_oq_desc_hw *desc_ring = oq->desc_ring;
+ struct page *page;
+ u32 refill_idx, i;
+
+ refill_idx = oq->host_refill_idx;
+ for (i = 0; i < oq->refill_count; i++) {
+ page = dev_alloc_page();
+ if (unlikely(!page)) {
+ dev_err(oq->dev, "refill: rx buffer alloc failed\n");
+ oq->stats.alloc_failures++;
+ break;
+ }
+
+ desc_ring[refill_idx].buffer_ptr = dma_map_page(oq->dev, page, 0,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(oq->dev, desc_ring[refill_idx].buffer_ptr)) {
+ dev_err(oq->dev,
+ "OQ-%d buffer refill: DMA mapping error!\n",
+ oq->q_no);
+ put_page(page);
+ oq->stats.alloc_failures++;
+ break;
+ }
+ oq->buff_info[refill_idx].page = page;
+ refill_idx++;
+ if (refill_idx == oq->max_count)
+ refill_idx = 0;
+ }
+ oq->host_refill_idx = refill_idx;
+ oq->refill_count -= i;
+
+ return i;
+}
+
+/**
+ * octep_setup_oq() - Setup a Rx queue.
+ *
+ * @oct: Octeon device private data structure.
+ * @q_no: Rx queue number to be setup.
+ *
+ * Allocate resources for a Rx queue.
+ */
+static int octep_setup_oq(struct octep_device *oct, int q_no)
+{
+ struct octep_oq *oq;
+ u32 desc_ring_size;
+
+ oq = vzalloc(sizeof(*oq));
+ if (!oq)
+ goto create_oq_fail;
+ oct->oq[q_no] = oq;
+
+ oq->octep_dev = oct;
+ oq->netdev = oct->netdev;
+ oq->dev = &oct->pdev->dev;
+ oq->q_no = q_no;
+ oq->max_count = CFG_GET_OQ_NUM_DESC(oct->conf);
+ oq->ring_size_mask = oq->max_count - 1;
+ oq->buffer_size = CFG_GET_OQ_BUF_SIZE(oct->conf);
+ oq->max_single_buffer_size = oq->buffer_size - OCTEP_OQ_RESP_HW_SIZE;
+
+ /* When the hardware/firmware supports additional capabilities,
+ * additional header is filled-in by Octeon after length field in
+ * Rx packets. this header contains additional packet information.
+ */
+ if (oct->caps_enabled)
+ oq->max_single_buffer_size -= OCTEP_OQ_RESP_HW_EXT_SIZE;
+
+ oq->refill_threshold = CFG_GET_OQ_REFILL_THRESHOLD(oct->conf);
+
+ desc_ring_size = oq->max_count * OCTEP_OQ_DESC_SIZE;
+ oq->desc_ring = dma_alloc_coherent(oq->dev, desc_ring_size,
+ &oq->desc_ring_dma, GFP_KERNEL);
+
+ if (unlikely(!oq->desc_ring)) {
+ dev_err(oq->dev,
+ "Failed to allocate DMA memory for OQ-%d !!\n", q_no);
+ goto desc_dma_alloc_err;
+ }
+
+ oq->buff_info = (struct octep_rx_buffer *)
+ vzalloc(oq->max_count * OCTEP_OQ_RECVBUF_SIZE);
+ if (unlikely(!oq->buff_info)) {
+ dev_err(&oct->pdev->dev,
+ "Failed to allocate buffer info for OQ-%d\n", q_no);
+ goto buf_list_err;
+ }
+
+ if (octep_oq_fill_ring_buffers(oq))
+ goto oq_fill_buff_err;
+
+ octep_oq_reset_indices(oq);
+ oct->hw_ops.setup_oq_regs(oct, q_no);
+ oct->num_oqs++;
+
+ return 0;
+
+oq_fill_buff_err:
+ vfree(oq->buff_info);
+ oq->buff_info = NULL;
+buf_list_err:
+ dma_free_coherent(oq->dev, desc_ring_size,
+ oq->desc_ring, oq->desc_ring_dma);
+ oq->desc_ring = NULL;
+desc_dma_alloc_err:
+ vfree(oq);
+ oct->oq[q_no] = NULL;
+create_oq_fail:
+ return -1;
+}
+
+/**
+ * octep_oq_free_ring_buffers() - Free ring buffers.
+ *
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Free receive buffers in unused Rx queue descriptors.
+ */
+static void octep_oq_free_ring_buffers(struct octep_oq *oq)
+{
+ struct octep_oq_desc_hw *desc_ring = oq->desc_ring;
+ int i;
+
+ if (!oq->desc_ring || !oq->buff_info)
+ return;
+
+ for (i = 0; i < oq->max_count; i++) {
+ if (oq->buff_info[i].page) {
+ dma_unmap_page(oq->dev, desc_ring[i].buffer_ptr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ put_page(oq->buff_info[i].page);
+ oq->buff_info[i].page = NULL;
+ desc_ring[i].buffer_ptr = 0;
+ }
+ }
+ octep_oq_reset_indices(oq);
+}
+
+/**
+ * octep_free_oq() - Free Rx queue resources.
+ *
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Free all resources of a Rx queue.
+ */
+static int octep_free_oq(struct octep_oq *oq)
+{
+ struct octep_device *oct = oq->octep_dev;
+ int q_no = oq->q_no;
+
+ octep_oq_free_ring_buffers(oq);
+
+ if (oq->buff_info)
+ vfree(oq->buff_info);
+
+ if (oq->desc_ring)
+ dma_free_coherent(oq->dev,
+ oq->max_count * OCTEP_OQ_DESC_SIZE,
+ oq->desc_ring, oq->desc_ring_dma);
+
+ vfree(oq);
+ oct->oq[q_no] = NULL;
+ oct->num_oqs--;
+ return 0;
+}
+
+/**
+ * octep_setup_oqs() - setup resources for all Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+int octep_setup_oqs(struct octep_device *oct)
+{
+ int i, retval = 0;
+
+ oct->num_oqs = 0;
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ retval = octep_setup_oq(oct, i);
+ if (retval) {
+ dev_err(&oct->pdev->dev,
+ "Failed to setup OQ(RxQ)-%d.\n", i);
+ goto oq_setup_err;
+ }
+ dev_dbg(&oct->pdev->dev, "Successfully setup OQ(RxQ)-%d.\n", i);
+ }
+
+ return 0;
+
+oq_setup_err:
+ while (i) {
+ i--;
+ octep_free_oq(oct->oq[i]);
+ }
+ return -1;
+}
+
+/**
+ * octep_oq_dbell_init() - Initialize Rx queue doorbell.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Write number of descriptors to Rx queue doorbell register.
+ */
+void octep_oq_dbell_init(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++)
+ writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg);
+}
+
+/**
+ * octep_free_oqs() - Free resources of all Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+void octep_free_oqs(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ if (!oct->oq[i])
+ continue;
+ octep_free_oq(oct->oq[i]);
+ dev_dbg(&oct->pdev->dev,
+ "Successfully freed OQ(RxQ)-%d.\n", i);
+ }
+}
+
+/**
+ * octep_oq_check_hw_for_pkts() - Check for new Rx packets.
+ *
+ * @oct: Octeon device private data structure.
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Return: packets received after previous check.
+ */
+static int octep_oq_check_hw_for_pkts(struct octep_device *oct,
+ struct octep_oq *oq)
+{
+ u32 pkt_count, new_pkts;
+
+ pkt_count = readl(oq->pkts_sent_reg);
+ new_pkts = pkt_count - oq->last_pkt_count;
+
+ /* Clear the hardware packets counter register if the rx queue is
+ * being processed continuously with-in a single interrupt and
+ * reached half its max value.
+ * this counter is not cleared every time read, to save write cycles.
+ */
+ if (unlikely(pkt_count > 0xF0000000U)) {
+ writel(pkt_count, oq->pkts_sent_reg);
+ pkt_count = readl(oq->pkts_sent_reg);
+ new_pkts += pkt_count;
+ }
+ oq->last_pkt_count = pkt_count;
+ oq->pkts_pending += new_pkts;
+ return new_pkts;
+}
+
+/**
+ * __octep_oq_process_rx() - Process hardware Rx queue and push to stack.
+ *
+ * @oct: Octeon device private data structure.
+ * @oq: Octeon Rx queue data structure.
+ * @pkts_to_process: number of packets to be processed.
+ *
+ * Process the new packets in Rx queue.
+ * Packets larger than single Rx buffer arrive in consecutive descriptors.
+ * But, count returned by the API only accounts full packets, not fragments.
+ *
+ * Return: number of packets processed and pushed to stack.
+ */
+static int __octep_oq_process_rx(struct octep_device *oct,
+ struct octep_oq *oq, u16 pkts_to_process)
+{
+ struct octep_oq_resp_hw_ext *resp_hw_ext = NULL;
+ struct octep_rx_buffer *buff_info;
+ struct octep_oq_resp_hw *resp_hw;
+ u32 pkt, rx_bytes, desc_used;
+ struct sk_buff *skb;
+ u16 data_offset;
+ u32 read_idx;
+
+ read_idx = oq->host_read_idx;
+ rx_bytes = 0;
+ desc_used = 0;
+ for (pkt = 0; pkt < pkts_to_process; pkt++) {
+ buff_info = (struct octep_rx_buffer *)&oq->buff_info[read_idx];
+ dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ resp_hw = page_address(buff_info->page);
+ buff_info->page = NULL;
+
+ /* Swap the length field that is in Big-Endian to CPU */
+ buff_info->len = be64_to_cpu(resp_hw->length);
+ if (oct->caps_enabled & OCTEP_CAP_RX_CHECKSUM) {
+ /* Extended response header is immediately after
+ * response header (resp_hw)
+ */
+ resp_hw_ext = (struct octep_oq_resp_hw_ext *)
+ (resp_hw + 1);
+ buff_info->len -= OCTEP_OQ_RESP_HW_EXT_SIZE;
+ /* Packet Data is immediately after
+ * extended response header.
+ */
+ data_offset = OCTEP_OQ_RESP_HW_SIZE +
+ OCTEP_OQ_RESP_HW_EXT_SIZE;
+ } else {
+ /* Data is immediately after
+ * Hardware Rx response header.
+ */
+ data_offset = OCTEP_OQ_RESP_HW_SIZE;
+ }
+ rx_bytes += buff_info->len;
+
+ if (buff_info->len <= oq->max_single_buffer_size) {
+ skb = build_skb((void *)resp_hw, PAGE_SIZE);
+ skb_reserve(skb, data_offset);
+ skb_put(skb, buff_info->len);
+ read_idx++;
+ desc_used++;
+ if (read_idx == oq->max_count)
+ read_idx = 0;
+ } else {
+ struct skb_shared_info *shinfo;
+ u16 data_len;
+
+ skb = build_skb((void *)resp_hw, PAGE_SIZE);
+ skb_reserve(skb, data_offset);
+ /* Head fragment includes response header(s);
+ * subsequent fragments contains only data.
+ */
+ skb_put(skb, oq->max_single_buffer_size);
+ read_idx++;
+ desc_used++;
+ if (read_idx == oq->max_count)
+ read_idx = 0;
+
+ shinfo = skb_shinfo(skb);
+ data_len = buff_info->len - oq->max_single_buffer_size;
+ while (data_len) {
+ dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ buff_info = (struct octep_rx_buffer *)
+ &oq->buff_info[read_idx];
+ if (data_len < oq->buffer_size) {
+ buff_info->len = data_len;
+ data_len = 0;
+ } else {
+ buff_info->len = oq->buffer_size;
+ data_len -= oq->buffer_size;
+ }
+
+ skb_add_rx_frag(skb, shinfo->nr_frags,
+ buff_info->page, 0,
+ buff_info->len,
+ buff_info->len);
+ buff_info->page = NULL;
+ read_idx++;
+ desc_used++;
+ if (read_idx == oq->max_count)
+ read_idx = 0;
+ }
+ }
+
+ skb->dev = oq->netdev;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ if (resp_hw_ext &&
+ resp_hw_ext->csum_verified == OCTEP_CSUM_VERIFIED)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb->ip_summed = CHECKSUM_NONE;
+ napi_gro_receive(oq->napi, skb);
+ }
+
+ oq->host_read_idx = read_idx;
+ oq->refill_count += desc_used;
+ oq->stats.packets += pkt;
+ oq->stats.bytes += rx_bytes;
+
+ return pkt;
+}
+
+/**
+ * octep_oq_process_rx() - Process Rx queue.
+ *
+ * @oq: Octeon Rx queue data structure.
+ * @budget: max number of packets can be processed in one invocation.
+ *
+ * Check for newly received packets and process them.
+ * Keeps checking for new packets until budget is used or no new packets seen.
+ *
+ * Return: number of packets processed.
+ */
+int octep_oq_process_rx(struct octep_oq *oq, int budget)
+{
+ u32 pkts_available, pkts_processed, total_pkts_processed;
+ struct octep_device *oct = oq->octep_dev;
+
+ pkts_available = 0;
+ pkts_processed = 0;
+ total_pkts_processed = 0;
+ while (total_pkts_processed < budget) {
+ /* update pending count only when current one exhausted */
+ if (oq->pkts_pending == 0)
+ octep_oq_check_hw_for_pkts(oct, oq);
+ pkts_available = min(budget - total_pkts_processed,
+ oq->pkts_pending);
+ if (!pkts_available)
+ break;
+
+ pkts_processed = __octep_oq_process_rx(oct, oq,
+ pkts_available);
+ oq->pkts_pending -= pkts_processed;
+ total_pkts_processed += pkts_processed;
+ }
+
+ if (oq->refill_count >= oq->refill_threshold) {
+ u32 desc_refilled = octep_oq_refill(oct, oq);
+
+ /* flush pending writes before updating credits */
+ wmb();
+ writel(desc_refilled, oq->pkts_credit_reg);
+ }
+
+ return total_pkts_processed;
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h
new file mode 100644
index 000000000000..782a24f27f3e
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_RX_H_
+#define _OCTEP_RX_H_
+
+/* struct octep_oq_desc_hw - Octeon Hardware OQ descriptor format.
+ *
+ * The descriptor ring is made of descriptors which have 2 64-bit values:
+ *
+ * @buffer_ptr: DMA address of the skb->data
+ * @info_ptr: DMA address of host memory, used to update pkt count by hw.
+ * This is currently unused to save pci writes.
+ */
+struct octep_oq_desc_hw {
+ dma_addr_t buffer_ptr;
+ u64 info_ptr;
+};
+
+#define OCTEP_OQ_DESC_SIZE (sizeof(struct octep_oq_desc_hw))
+
+#define OCTEP_CSUM_L4_VERIFIED 0x1
+#define OCTEP_CSUM_IP_VERIFIED 0x2
+#define OCTEP_CSUM_VERIFIED (OCTEP_CSUM_L4_VERIFIED | OCTEP_CSUM_IP_VERIFIED)
+
+/* Extended Response Header in packet data received from Hardware.
+ * Includes metadata like checksum status.
+ * this is valid only if hardware/firmware published support for this.
+ * This is at offset 0 of packet data (skb->data).
+ */
+struct octep_oq_resp_hw_ext {
+ /* Reserved. */
+ u64 reserved:62;
+
+ /* checksum verified. */
+ u64 csum_verified:2;
+};
+
+#define OCTEP_OQ_RESP_HW_EXT_SIZE (sizeof(struct octep_oq_resp_hw_ext))
+
+/* Length of Rx packet DMA'ed by Octeon to Host.
+ * this is in bigendian; so need to be converted to cpu endian.
+ * Octeon writes this at the beginning of Rx buffer (skb->data).
+ */
+struct octep_oq_resp_hw {
+ /* The Length of the packet. */
+ __be64 length;
+};
+
+#define OCTEP_OQ_RESP_HW_SIZE (sizeof(struct octep_oq_resp_hw))
+
+/* Pointer to data buffer.
+ * Driver keeps a pointer to the data buffer that it made available to
+ * the Octeon device. Since the descriptor ring keeps physical (bus)
+ * addresses, this field is required for the driver to keep track of
+ * the virtual address pointers. The fields are operated by
+ * OS-dependent routines.
+ */
+struct octep_rx_buffer {
+ struct page *page;
+
+ /* length from rx hardware descriptor after converting to cpu endian */
+ u64 len;
+};
+
+#define OCTEP_OQ_RECVBUF_SIZE (sizeof(struct octep_rx_buffer))
+
+/* Output Queue statistics. Each output queue has four stats fields. */
+struct octep_oq_stats {
+ /* Number of packets received from the Device. */
+ u64 packets;
+
+ /* Number of bytes received from the Device. */
+ u64 bytes;
+
+ /* Number of times failed to allocate buffers. */
+ u64 alloc_failures;
+};
+
+#define OCTEP_OQ_STATS_SIZE (sizeof(struct octep_oq_stats))
+
+/* Hardware interface Rx statistics */
+struct octep_iface_rx_stats {
+ /* Received packets */
+ u64 pkts;
+
+ /* Octets of received packets */
+ u64 octets;
+
+ /* Received PAUSE and Control packets */
+ u64 pause_pkts;
+
+ /* Received PAUSE and Control octets */
+ u64 pause_octets;
+
+ /* Filtered DMAC0 packets */
+ u64 dmac0_pkts;
+
+ /* Filtered DMAC0 octets */
+ u64 dmac0_octets;
+
+ /* Packets dropped due to RX FIFO full */
+ u64 dropped_pkts_fifo_full;
+
+ /* Octets dropped due to RX FIFO full */
+ u64 dropped_octets_fifo_full;
+
+ /* Error packets */
+ u64 err_pkts;
+
+ /* Filtered DMAC1 packets */
+ u64 dmac1_pkts;
+
+ /* Filtered DMAC1 octets */
+ u64 dmac1_octets;
+
+ /* NCSI-bound packets dropped */
+ u64 ncsi_dropped_pkts;
+
+ /* NCSI-bound octets dropped */
+ u64 ncsi_dropped_octets;
+
+ /* Multicast packets received. */
+ u64 mcast_pkts;
+
+ /* Broadcast packets received. */
+ u64 bcast_pkts;
+
+};
+
+/* The Descriptor Ring Output Queue structure.
+ * This structure has all the information required to implement a
+ * Octeon OQ.
+ */
+struct octep_oq {
+ u32 q_no;
+
+ struct octep_device *octep_dev;
+ struct net_device *netdev;
+ struct device *dev;
+
+ struct napi_struct *napi;
+
+ /* The receive buffer list. This list has the virtual addresses
+ * of the buffers.
+ */
+ struct octep_rx_buffer *buff_info;
+
+ /* Pointer to the mapped packet credit register.
+ * Host writes number of info/buffer ptrs available to this register
+ */
+ u8 __iomem *pkts_credit_reg;
+
+ /* Pointer to the mapped packet sent register.
+ * Octeon writes the number of packets DMA'ed to host memory
+ * in this register.
+ */
+ u8 __iomem *pkts_sent_reg;
+
+ /* Statistics for this OQ. */
+ struct octep_oq_stats stats;
+
+ /* Packets pending to be processed */
+ u32 pkts_pending;
+ u32 last_pkt_count;
+
+ /* Index in the ring where the driver should read the next packet */
+ u32 host_read_idx;
+
+ /* Number of descriptors in this ring. */
+ u32 max_count;
+ u32 ring_size_mask;
+
+ /* The number of descriptors pending refill. */
+ u32 refill_count;
+
+ /* Index in the ring where the driver will refill the
+ * descriptor's buffer
+ */
+ u32 host_refill_idx;
+ u32 refill_threshold;
+
+ /* The size of each buffer pointed by the buffer pointer. */
+ u32 buffer_size;
+ u32 max_single_buffer_size;
+
+ /* The 8B aligned descriptor ring starts at this address. */
+ struct octep_oq_desc_hw *desc_ring;
+
+ /* DMA mapped address of the OQ descriptor ring. */
+ dma_addr_t desc_ring_dma;
+};
+
+#define OCTEP_OQ_SIZE (sizeof(struct octep_oq))
+#endif /* _OCTEP_RX_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
new file mode 100644
index 000000000000..511552bc3e87
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include <linux/vmalloc.h>
+
+#include "octep_config.h"
+#include "octep_main.h"
+
+/* Reset various index of Tx queue data structure. */
+static void octep_iq_reset_indices(struct octep_iq *iq)
+{
+ iq->fill_cnt = 0;
+ iq->host_write_index = 0;
+ iq->octep_read_index = 0;
+ iq->flush_index = 0;
+ iq->pkts_processed = 0;
+ iq->pkt_in_done = 0;
+ atomic_set(&iq->instr_pending, 0);
+}
+
+/**
+ * octep_iq_process_completions() - Process Tx queue completions.
+ *
+ * @iq: Octeon Tx queue data structure.
+ * @budget: max number of completions to be processed in one invocation.
+ */
+int octep_iq_process_completions(struct octep_iq *iq, u16 budget)
+{
+ u32 compl_pkts, compl_bytes, compl_sg;
+ struct octep_device *oct = iq->octep_dev;
+ struct octep_tx_buffer *tx_buffer;
+ struct skb_shared_info *shinfo;
+ u32 fi = iq->flush_index;
+ struct sk_buff *skb;
+ u8 frags, i;
+
+ compl_pkts = 0;
+ compl_sg = 0;
+ compl_bytes = 0;
+ iq->octep_read_index = oct->hw_ops.update_iq_read_idx(iq);
+
+ while (likely(budget && (fi != iq->octep_read_index))) {
+ tx_buffer = iq->buff_info + fi;
+ skb = tx_buffer->skb;
+
+ fi++;
+ if (unlikely(fi == iq->max_count))
+ fi = 0;
+ compl_bytes += skb->len;
+ compl_pkts++;
+ budget--;
+
+ if (!tx_buffer->gather) {
+ dma_unmap_single(iq->dev, tx_buffer->dma,
+ tx_buffer->skb->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ /* Scatter/Gather */
+ shinfo = skb_shinfo(skb);
+ frags = shinfo->nr_frags;
+ compl_sg++;
+
+ dma_unmap_single(iq->dev, tx_buffer->sglist[0].dma_ptr[0],
+ tx_buffer->sglist[0].len[0], DMA_TO_DEVICE);
+
+ i = 1; /* entry 0 is main skb, unmapped above */
+ while (frags--) {
+ dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3],
+ tx_buffer->sglist[i >> 2].len[i & 3], DMA_TO_DEVICE);
+ i++;
+ }
+
+ dev_kfree_skb_any(skb);
+ }
+
+ iq->pkts_processed += compl_pkts;
+ atomic_sub(compl_pkts, &iq->instr_pending);
+ iq->stats.instr_completed += compl_pkts;
+ iq->stats.bytes_sent += compl_bytes;
+ iq->stats.sgentry_sent += compl_sg;
+ iq->flush_index = fi;
+
+ netdev_tx_completed_queue(iq->netdev_q, compl_pkts, compl_bytes);
+
+ if (unlikely(__netif_subqueue_stopped(iq->netdev, iq->q_no)) &&
+ ((iq->max_count - atomic_read(&iq->instr_pending)) >
+ OCTEP_WAKE_QUEUE_THRESHOLD))
+ netif_wake_subqueue(iq->netdev, iq->q_no);
+ return !budget;
+}
+
+/**
+ * octep_iq_free_pending() - Free Tx buffers for pending completions.
+ *
+ * @iq: Octeon Tx queue data structure.
+ */
+static void octep_iq_free_pending(struct octep_iq *iq)
+{
+ struct octep_tx_buffer *tx_buffer;
+ struct skb_shared_info *shinfo;
+ u32 fi = iq->flush_index;
+ struct sk_buff *skb;
+ u8 frags, i;
+
+ while (fi != iq->host_write_index) {
+ tx_buffer = iq->buff_info + fi;
+ skb = tx_buffer->skb;
+
+ fi++;
+ if (unlikely(fi == iq->max_count))
+ fi = 0;
+
+ if (!tx_buffer->gather) {
+ dma_unmap_single(iq->dev, tx_buffer->dma,
+ tx_buffer->skb->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ /* Scatter/Gather */
+ shinfo = skb_shinfo(skb);
+ frags = shinfo->nr_frags;
+
+ dma_unmap_single(iq->dev,
+ tx_buffer->sglist[0].dma_ptr[0],
+ tx_buffer->sglist[0].len[0],
+ DMA_TO_DEVICE);
+
+ i = 1; /* entry 0 is main skb, unmapped above */
+ while (frags--) {
+ dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3],
+ tx_buffer->sglist[i >> 2].len[i & 3], DMA_TO_DEVICE);
+ i++;
+ }
+
+ dev_kfree_skb_any(skb);
+ }
+
+ atomic_set(&iq->instr_pending, 0);
+ iq->flush_index = fi;
+ netdev_tx_reset_queue(netdev_get_tx_queue(iq->netdev, iq->q_no));
+}
+
+/**
+ * octep_clean_iqs() - Clean Tx queues to shutdown the device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Free the buffers in Tx queue descriptors pending completion and
+ * reset queue indices
+ */
+void octep_clean_iqs(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_iqs; i++) {
+ octep_iq_free_pending(oct->iq[i]);
+ octep_iq_reset_indices(oct->iq[i]);
+ }
+}
+
+/**
+ * octep_setup_iq() - Setup a Tx queue.
+ *
+ * @oct: Octeon device private data structure.
+ * @q_no: Tx queue number to be setup.
+ *
+ * Allocate resources for a Tx queue.
+ */
+static int octep_setup_iq(struct octep_device *oct, int q_no)
+{
+ u32 desc_ring_size, buff_info_size, sglist_size;
+ struct octep_iq *iq;
+ int i;
+
+ iq = vzalloc(sizeof(*iq));
+ if (!iq)
+ goto iq_alloc_err;
+ oct->iq[q_no] = iq;
+
+ iq->octep_dev = oct;
+ iq->netdev = oct->netdev;
+ iq->dev = &oct->pdev->dev;
+ iq->q_no = q_no;
+ iq->max_count = CFG_GET_IQ_NUM_DESC(oct->conf);
+ iq->ring_size_mask = iq->max_count - 1;
+ iq->fill_threshold = CFG_GET_IQ_DB_MIN(oct->conf);
+ iq->netdev_q = netdev_get_tx_queue(iq->netdev, q_no);
+
+ /* Allocate memory for hardware queue descriptors */
+ desc_ring_size = OCTEP_IQ_DESC_SIZE * CFG_GET_IQ_NUM_DESC(oct->conf);
+ iq->desc_ring = dma_alloc_coherent(iq->dev, desc_ring_size,
+ &iq->desc_ring_dma, GFP_KERNEL);
+ if (unlikely(!iq->desc_ring)) {
+ dev_err(iq->dev,
+ "Failed to allocate DMA memory for IQ-%d\n", q_no);
+ goto desc_dma_alloc_err;
+ }
+
+ /* Allocate memory for hardware SGLIST descriptors */
+ sglist_size = OCTEP_SGLIST_SIZE_PER_PKT *
+ CFG_GET_IQ_NUM_DESC(oct->conf);
+ iq->sglist = dma_alloc_coherent(iq->dev, sglist_size,
+ &iq->sglist_dma, GFP_KERNEL);
+ if (unlikely(!iq->sglist)) {
+ dev_err(iq->dev,
+ "Failed to allocate DMA memory for IQ-%d SGLIST\n",
+ q_no);
+ goto sglist_alloc_err;
+ }
+
+ /* allocate memory to manage Tx packets pending completion */
+ buff_info_size = OCTEP_IQ_TXBUFF_INFO_SIZE * iq->max_count;
+ iq->buff_info = vzalloc(buff_info_size);
+ if (!iq->buff_info) {
+ dev_err(iq->dev,
+ "Failed to allocate buff info for IQ-%d\n", q_no);
+ goto buff_info_err;
+ }
+
+ /* Setup sglist addresses in tx_buffer entries */
+ for (i = 0; i < CFG_GET_IQ_NUM_DESC(oct->conf); i++) {
+ struct octep_tx_buffer *tx_buffer;
+
+ tx_buffer = &iq->buff_info[i];
+ tx_buffer->sglist =
+ &iq->sglist[i * OCTEP_SGLIST_ENTRIES_PER_PKT];
+ tx_buffer->sglist_dma =
+ iq->sglist_dma + (i * OCTEP_SGLIST_SIZE_PER_PKT);
+ }
+
+ octep_iq_reset_indices(iq);
+ oct->hw_ops.setup_iq_regs(oct, q_no);
+
+ oct->num_iqs++;
+ return 0;
+
+buff_info_err:
+ dma_free_coherent(iq->dev, sglist_size, iq->sglist, iq->sglist_dma);
+sglist_alloc_err:
+ dma_free_coherent(iq->dev, desc_ring_size,
+ iq->desc_ring, iq->desc_ring_dma);
+desc_dma_alloc_err:
+ vfree(iq);
+ oct->iq[q_no] = NULL;
+iq_alloc_err:
+ return -1;
+}
+
+/**
+ * octep_free_iq() - Free Tx queue resources.
+ *
+ * @iq: Octeon Tx queue data structure.
+ *
+ * Free all the resources allocated for a Tx queue.
+ */
+static void octep_free_iq(struct octep_iq *iq)
+{
+ struct octep_device *oct = iq->octep_dev;
+ u64 desc_ring_size, sglist_size;
+ int q_no = iq->q_no;
+
+ desc_ring_size = OCTEP_IQ_DESC_SIZE * CFG_GET_IQ_NUM_DESC(oct->conf);
+
+ if (iq->buff_info)
+ vfree(iq->buff_info);
+
+ if (iq->desc_ring)
+ dma_free_coherent(iq->dev, desc_ring_size,
+ iq->desc_ring, iq->desc_ring_dma);
+
+ sglist_size = OCTEP_SGLIST_SIZE_PER_PKT *
+ CFG_GET_IQ_NUM_DESC(oct->conf);
+ if (iq->sglist)
+ dma_free_coherent(iq->dev, sglist_size,
+ iq->sglist, iq->sglist_dma);
+
+ vfree(iq);
+ oct->iq[q_no] = NULL;
+ oct->num_iqs--;
+}
+
+/**
+ * octep_setup_iqs() - setup resources for all Tx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+int octep_setup_iqs(struct octep_device *oct)
+{
+ int i;
+
+ oct->num_iqs = 0;
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ if (octep_setup_iq(oct, i)) {
+ dev_err(&oct->pdev->dev,
+ "Failed to setup IQ(TxQ)-%d.\n", i);
+ goto iq_setup_err;
+ }
+ dev_dbg(&oct->pdev->dev, "Successfully setup IQ(TxQ)-%d.\n", i);
+ }
+
+ return 0;
+
+iq_setup_err:
+ while (i) {
+ i--;
+ octep_free_iq(oct->iq[i]);
+ }
+ return -1;
+}
+
+/**
+ * octep_free_iqs() - Free resources of all Tx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+void octep_free_iqs(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ octep_free_iq(oct->iq[i]);
+ dev_dbg(&oct->pdev->dev,
+ "Successfully destroyed IQ(TxQ)-%d.\n", i);
+ }
+ oct->num_iqs = 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
new file mode 100644
index 000000000000..2ef57980eb47
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
@@ -0,0 +1,284 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_TX_H_
+#define _OCTEP_TX_H_
+
+#define IQ_SEND_OK 0
+#define IQ_SEND_STOP 1
+#define IQ_SEND_FAILED -1
+
+#define TX_BUFTYPE_NONE 0
+#define TX_BUFTYPE_NET 1
+#define TX_BUFTYPE_NET_SG 2
+#define NUM_TX_BUFTYPES 3
+
+/* Hardware format for Scatter/Gather list */
+struct octep_tx_sglist_desc {
+ u16 len[4];
+ dma_addr_t dma_ptr[4];
+};
+
+/* Each Scatter/Gather entry sent to hardwar hold four pointers.
+ * So, number of entries required is (MAX_SKB_FRAGS + 1)/4, where '+1'
+ * is for main skb which also goes as a gather buffer to Octeon hardware.
+ * To allocate sufficient SGLIST entries for a packet with max fragments,
+ * align by adding 3 before calcuating max SGLIST entries per packet.
+ */
+#define OCTEP_SGLIST_ENTRIES_PER_PKT ((MAX_SKB_FRAGS + 1 + 3) / 4)
+#define OCTEP_SGLIST_SIZE_PER_PKT \
+ (OCTEP_SGLIST_ENTRIES_PER_PKT * sizeof(struct octep_tx_sglist_desc))
+
+struct octep_tx_buffer {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ struct octep_tx_sglist_desc *sglist;
+ dma_addr_t sglist_dma;
+ u8 gather;
+};
+
+#define OCTEP_IQ_TXBUFF_INFO_SIZE (sizeof(struct octep_tx_buffer))
+
+/* Hardware interface Tx statistics */
+struct octep_iface_tx_stats {
+ /* Packets dropped due to excessive collisions */
+ u64 xscol;
+
+ /* Packets dropped due to excessive deferral */
+ u64 xsdef;
+
+ /* Packets sent that experienced multiple collisions before successful
+ * transmission
+ */
+ u64 mcol;
+
+ /* Packets sent that experienced a single collision before successful
+ * transmission
+ */
+ u64 scol;
+
+ /* Total octets sent on the interface */
+ u64 octs;
+
+ /* Total frames sent on the interface */
+ u64 pkts;
+
+ /* Packets sent with an octet count < 64 */
+ u64 hist_lt64;
+
+ /* Packets sent with an octet count == 64 */
+ u64 hist_eq64;
+
+ /* Packets sent with an octet count of 65–127 */
+ u64 hist_65to127;
+
+ /* Packets sent with an octet count of 128–255 */
+ u64 hist_128to255;
+
+ /* Packets sent with an octet count of 256–511 */
+ u64 hist_256to511;
+
+ /* Packets sent with an octet count of 512–1023 */
+ u64 hist_512to1023;
+
+ /* Packets sent with an octet count of 1024-1518 */
+ u64 hist_1024to1518;
+
+ /* Packets sent with an octet count of > 1518 */
+ u64 hist_gt1518;
+
+ /* Packets sent to a broadcast DMAC */
+ u64 bcst;
+
+ /* Packets sent to the multicast DMAC */
+ u64 mcst;
+
+ /* Packets sent that experienced a transmit underflow and were
+ * truncated
+ */
+ u64 undflw;
+
+ /* Control/PAUSE packets sent */
+ u64 ctl;
+};
+
+/* Input Queue statistics. Each input queue has four stats fields. */
+struct octep_iq_stats {
+ /* Instructions posted to this queue. */
+ u64 instr_posted;
+
+ /* Instructions copied by hardware for processing. */
+ u64 instr_completed;
+
+ /* Instructions that could not be processed. */
+ u64 instr_dropped;
+
+ /* Bytes sent through this queue. */
+ u64 bytes_sent;
+
+ /* Gather entries sent through this queue. */
+ u64 sgentry_sent;
+
+ /* Number of transmit failures due to TX_BUSY */
+ u64 tx_busy;
+
+ /* Number of times the queue is restarted */
+ u64 restart_cnt;
+};
+
+/* The instruction (input) queue.
+ * The input queue is used to post raw (instruction) mode data or packet
+ * data to Octeon device from the host. Each input queue (up to 4) for
+ * a Octeon device has one such structure to represent it.
+ */
+struct octep_iq {
+ u32 q_no;
+
+ struct octep_device *octep_dev;
+ struct net_device *netdev;
+ struct device *dev;
+ struct netdev_queue *netdev_q;
+
+ /* Index in input ring where driver should write the next packet */
+ u16 host_write_index;
+
+ /* Index in input ring where Octeon is expected to read next packet */
+ u16 octep_read_index;
+
+ /* This index aids in finding the window in the queue where Octeon
+ * has read the commands.
+ */
+ u16 flush_index;
+
+ /* Statistics for this input queue. */
+ struct octep_iq_stats stats;
+
+ /* This field keeps track of the instructions pending in this queue. */
+ atomic_t instr_pending;
+
+ /* Pointer to the Virtual Base addr of the input ring. */
+ struct octep_tx_desc_hw *desc_ring;
+
+ /* DMA mapped base address of the input descriptor ring. */
+ dma_addr_t desc_ring_dma;
+
+ /* Info of Tx buffers pending completion. */
+ struct octep_tx_buffer *buff_info;
+
+ /* Base pointer to Scatter/Gather lists for all ring descriptors. */
+ struct octep_tx_sglist_desc *sglist;
+
+ /* DMA mapped addr of Scatter Gather Lists */
+ dma_addr_t sglist_dma;
+
+ /* Octeon doorbell register for the ring. */
+ u8 __iomem *doorbell_reg;
+
+ /* Octeon instruction count register for this ring. */
+ u8 __iomem *inst_cnt_reg;
+
+ /* interrupt level register for this ring */
+ u8 __iomem *intr_lvl_reg;
+
+ /* Maximum no. of instructions in this queue. */
+ u32 max_count;
+ u32 ring_size_mask;
+
+ u32 pkt_in_done;
+ u32 pkts_processed;
+
+ u32 status;
+
+ /* Number of instructions pending to be posted to Octeon. */
+ u32 fill_cnt;
+
+ /* The max. number of instructions that can be held pending by the
+ * driver before ringing doorbell.
+ */
+ u32 fill_threshold;
+};
+
+/* Hardware Tx Instruction Header */
+struct octep_instr_hdr {
+ /* Data Len */
+ u64 tlen:16;
+
+ /* Reserved */
+ u64 rsvd:20;
+
+ /* PKIND for SDP */
+ u64 pkind:6;
+
+ /* Front Data size */
+ u64 fsz:6;
+
+ /* No. of entries in gather list */
+ u64 gsz:14;
+
+ /* Gather indicator 1=gather*/
+ u64 gather:1;
+
+ /* Reserved3 */
+ u64 reserved3:1;
+};
+
+/* Hardware Tx completion response header */
+struct octep_instr_resp_hdr {
+ /* Request ID */
+ u64 rid:16;
+
+ /* PCIe port to use for response */
+ u64 pcie_port:3;
+
+ /* Scatter indicator 1=scatter */
+ u64 scatter:1;
+
+ /* Size of Expected result OR no. of entries in scatter list */
+ u64 rlenssz:14;
+
+ /* Desired destination port for result */
+ u64 dport:6;
+
+ /* Opcode Specific parameters */
+ u64 param:8;
+
+ /* Opcode for the return packet */
+ u64 opcode:16;
+};
+
+/* 64-byte Tx instruction format.
+ * Format of instruction for a 64-byte mode input queue.
+ *
+ * only first 16-bytes (dptr and ih) are mandatory; rest are optional
+ * and filled by the driver based on firmware/hardware capabilities.
+ * These optional headers together called Front Data and its size is
+ * described by ih->fsz.
+ */
+struct octep_tx_desc_hw {
+ /* Pointer where the input data is available. */
+ u64 dptr;
+
+ /* Instruction Header. */
+ union {
+ struct octep_instr_hdr ih;
+ u64 ih64;
+ };
+
+ /* Pointer where the response for a RAW mode packet will be written
+ * by Octeon.
+ */
+ u64 rptr;
+
+ /* Input Instruction Response Header. */
+ struct octep_instr_resp_hdr irh;
+
+ /* Additional headers available in a 64-byte instruction. */
+ u64 exhdr[4];
+};
+
+#define OCTEP_IQ_DESC_SIZE (sizeof(struct octep_tx_desc_hw))
+#endif /* _OCTEP_TX_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_acl.c b/drivers/net/ethernet/marvell/prestera/prestera_acl.c
index 47c899c08951..e5627782fac6 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_acl.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_acl.c
@@ -421,13 +421,6 @@ int prestera_acl_rule_add(struct prestera_switch *sw,
rule->re_arg.vtcam_id = ruleset->vtcam_id;
rule->re_key.prio = rule->priority;
- /* setup counter */
- rule->re_arg.count.valid = true;
- err = prestera_acl_chain_to_client(ruleset->ht_key.chain_index,
- &rule->re_arg.count.client);
- if (err)
- goto err_rule_add;
-
rule->re = prestera_acl_rule_entry_find(sw->acl, &rule->re_key);
err = WARN_ON(rule->re) ? -EEXIST : 0;
if (err)
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flower.c b/drivers/net/ethernet/marvell/prestera/prestera_flower.c
index 921959a980ee..c12b09ac6559 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_flower.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flower.c
@@ -70,6 +70,24 @@ static int prestera_flower_parse_actions(struct prestera_flow_block *block,
if (!flow_action_has_entries(flow_action))
return 0;
+ if (!flow_action_mixed_hw_stats_check(flow_action, extack))
+ return -EOPNOTSUPP;
+
+ act = flow_action_first_entry_get(flow_action);
+ if (act->hw_stats & FLOW_ACTION_HW_STATS_DISABLED) {
+ /* Nothing to do */
+ } else if (act->hw_stats & FLOW_ACTION_HW_STATS_DELAYED) {
+ /* setup counter first */
+ rule->re_arg.count.valid = true;
+ err = prestera_acl_chain_to_client(chain_index,
+ &rule->re_arg.count.client);
+ if (err)
+ return err;
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported action HW stats type");
+ return -EOPNOTSUPP;
+ }
+
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
case FLOW_ACTION_ACCEPT:
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router.c b/drivers/net/ethernet/marvell/prestera/prestera_router.c
index 6c5618cf4f08..3754d8aec76d 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_router.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_router.c
@@ -4,6 +4,7 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/inetdevice.h>
+#include <net/inet_dscp.h>
#include <net/switchdev.h>
#include <linux/rhashtable.h>
@@ -26,7 +27,7 @@ struct prestera_kern_fib_cache {
/* Indicate if route is not overlapped by another table */
struct rhash_head ht_node; /* node of prestera_router */
struct fib_info *fi;
- u8 kern_tos;
+ dscp_t kern_dscp;
u8 kern_type;
bool reachable;
};
@@ -88,7 +89,7 @@ prestera_kern_fib_cache_destroy(struct prestera_switch *sw,
static struct prestera_kern_fib_cache *
prestera_kern_fib_cache_create(struct prestera_switch *sw,
struct prestera_kern_fib_cache_key *key,
- struct fib_info *fi, u8 tos, u8 type)
+ struct fib_info *fi, dscp_t dscp, u8 type)
{
struct prestera_kern_fib_cache *fib_cache;
int err;
@@ -100,7 +101,7 @@ prestera_kern_fib_cache_create(struct prestera_switch *sw,
memcpy(&fib_cache->key, key, sizeof(*key));
fib_info_hold(fi);
fib_cache->fi = fi;
- fib_cache->kern_tos = tos;
+ fib_cache->kern_dscp = dscp;
fib_cache->kern_type = type;
err = rhashtable_insert_fast(&sw->router->kern_fib_cache_ht,
@@ -132,7 +133,7 @@ __prestera_k_arb_fib_lpm_offload_set(struct prestera_switch *sw,
fri.tb_id = fc->key.kern_tb_id;
fri.dst = fc->key.addr.u.ipv4;
fri.dst_len = fc->key.prefix_len;
- fri.tos = fc->kern_tos;
+ fri.dscp = fc->kern_dscp;
fri.type = fc->kern_type;
/* flags begin */
fri.offload = offload;
@@ -305,7 +306,7 @@ prestera_k_arb_fib_evt(struct prestera_switch *sw,
if (replace) {
fib_cache = prestera_kern_fib_cache_create(sw, &fc_key,
fen_info->fi,
- fen_info->tos,
+ fen_info->dscp,
fen_info->type);
if (!fib_cache) {
dev_err(sw->dev->dev, "fib_cache == NULL");
diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig
index 86d356b4388d..da4ec235d146 100644
--- a/drivers/net/ethernet/mediatek/Kconfig
+++ b/drivers/net/ethernet/mediatek/Kconfig
@@ -7,6 +7,10 @@ config NET_VENDOR_MEDIATEK
if NET_VENDOR_MEDIATEK
+config NET_MEDIATEK_SOC_WED
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ def_bool NET_MEDIATEK_SOC != n
+
config NET_MEDIATEK_SOC
tristate "MediaTek SoC Gigabit Ethernet support"
depends on NET_DSA || !NET_DSA
diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile
index 79d4cdbbcbf5..45ba0970504a 100644
--- a/drivers/net/ethernet/mediatek/Makefile
+++ b/drivers/net/ethernet/mediatek/Makefile
@@ -5,4 +5,9 @@
obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o
mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o
+mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o
+ifdef CONFIG_DEBUG_FS
+mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o
+endif
+obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o
obj-$(CONFIG_NET_MEDIATEK_STAR_EMAC) += mtk_star_emac.o
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index f02d07ec5ccb..18eebcaa6a76 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -9,6 +9,7 @@
#include <linux/of_device.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
+#include <linux/of_address.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/clk.h>
@@ -20,9 +21,11 @@
#include <linux/pinctrl/devinfo.h>
#include <linux/phylink.h>
#include <linux/jhash.h>
+#include <linux/bitfield.h>
#include <net/dsa.h>
#include "mtk_eth_soc.h"
+#include "mtk_wed.h"
static int mtk_msg_level = -1;
module_param_named(msg_level, mtk_msg_level, int, 0);
@@ -786,7 +789,7 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
dma_addr_t dma_addr;
int i;
- eth->scratch_ring = dma_alloc_coherent(eth->dev,
+ eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
cnt * sizeof(struct mtk_tx_dma),
&eth->phy_scratch_ring,
GFP_ATOMIC);
@@ -798,10 +801,10 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
if (unlikely(!eth->scratch_head))
return -ENOMEM;
- dma_addr = dma_map_single(eth->dev,
+ dma_addr = dma_map_single(eth->dma_dev,
eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
+ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
return -ENOMEM;
phy_ring_tail = eth->phy_scratch_ring +
@@ -855,26 +858,26 @@ static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
{
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
- dma_unmap_single(eth->dev,
+ dma_unmap_single(eth->dma_dev,
dma_unmap_addr(tx_buf, dma_addr0),
dma_unmap_len(tx_buf, dma_len0),
DMA_TO_DEVICE);
} else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
- dma_unmap_page(eth->dev,
+ dma_unmap_page(eth->dma_dev,
dma_unmap_addr(tx_buf, dma_addr0),
dma_unmap_len(tx_buf, dma_len0),
DMA_TO_DEVICE);
}
} else {
if (dma_unmap_len(tx_buf, dma_len0)) {
- dma_unmap_page(eth->dev,
+ dma_unmap_page(eth->dma_dev,
dma_unmap_addr(tx_buf, dma_addr0),
dma_unmap_len(tx_buf, dma_len0),
DMA_TO_DEVICE);
}
if (dma_unmap_len(tx_buf, dma_len1)) {
- dma_unmap_page(eth->dev,
+ dma_unmap_page(eth->dma_dev,
dma_unmap_addr(tx_buf, dma_addr1),
dma_unmap_len(tx_buf, dma_len1),
DMA_TO_DEVICE);
@@ -952,9 +955,9 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
if (skb_vlan_tag_present(skb))
txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
- mapped_addr = dma_map_single(eth->dev, skb->data,
+ mapped_addr = dma_map_single(eth->dma_dev, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
+ if (unlikely(dma_mapping_error(eth->dma_dev, mapped_addr)))
return -ENOMEM;
WRITE_ONCE(itxd->txd1, mapped_addr);
@@ -993,10 +996,10 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
- mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
+ mapped_addr = skb_frag_dma_map(eth->dma_dev, frag, offset,
frag_map_size,
DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
+ if (unlikely(dma_mapping_error(eth->dma_dev, mapped_addr)))
goto err_dma;
if (i == nr_frags - 1 &&
@@ -1237,7 +1240,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
struct net_device *netdev;
unsigned int pktlen;
dma_addr_t dma_addr;
- u32 hash;
+ u32 hash, reason;
int mac;
ring = mtk_get_rx_ring(eth);
@@ -1274,18 +1277,18 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
netdev->stats.rx_dropped++;
goto release_desc;
}
- dma_addr = dma_map_single(eth->dev,
+ dma_addr = dma_map_single(eth->dma_dev,
new_data + NET_SKB_PAD +
eth->ip_align,
ring->buf_size,
DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
+ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) {
skb_free_frag(new_data);
netdev->stats.rx_dropped++;
goto release_desc;
}
- dma_unmap_single(eth->dev, trxd.rxd1,
+ dma_unmap_single(eth->dma_dev, trxd.rxd1,
ring->buf_size, DMA_FROM_DEVICE);
/* receive data */
@@ -1313,6 +1316,11 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
}
+ reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
+ if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
+ mtk_ppe_check_skb(eth->ppe, skb,
+ trxd.rxd4 & MTK_RXD4_FOE_ENTRY);
+
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
(trxd.rxd2 & RX_DMA_VTAG))
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
@@ -1558,7 +1566,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
if (!ring->buf)
goto no_tx_mem;
- ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
+ ring->dma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
&ring->phys, GFP_ATOMIC);
if (!ring->dma)
goto no_tx_mem;
@@ -1576,7 +1584,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
* descriptors in ring->dma_pdma.
*/
if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
- ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
+ ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
&ring->phys_pdma,
GFP_ATOMIC);
if (!ring->dma_pdma)
@@ -1635,7 +1643,7 @@ static void mtk_tx_clean(struct mtk_eth *eth)
}
if (ring->dma) {
- dma_free_coherent(eth->dev,
+ dma_free_coherent(eth->dma_dev,
MTK_DMA_SIZE * sizeof(*ring->dma),
ring->dma,
ring->phys);
@@ -1643,7 +1651,7 @@ static void mtk_tx_clean(struct mtk_eth *eth)
}
if (ring->dma_pdma) {
- dma_free_coherent(eth->dev,
+ dma_free_coherent(eth->dma_dev,
MTK_DMA_SIZE * sizeof(*ring->dma_pdma),
ring->dma_pdma,
ring->phys_pdma);
@@ -1688,18 +1696,18 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
return -ENOMEM;
}
- ring->dma = dma_alloc_coherent(eth->dev,
+ ring->dma = dma_alloc_coherent(eth->dma_dev,
rx_dma_size * sizeof(*ring->dma),
&ring->phys, GFP_ATOMIC);
if (!ring->dma)
return -ENOMEM;
for (i = 0; i < rx_dma_size; i++) {
- dma_addr_t dma_addr = dma_map_single(eth->dev,
+ dma_addr_t dma_addr = dma_map_single(eth->dma_dev,
ring->data[i] + NET_SKB_PAD + eth->ip_align,
ring->buf_size,
DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
+ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
return -ENOMEM;
ring->dma[i].rxd1 = (unsigned int)dma_addr;
@@ -1735,7 +1743,7 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
continue;
if (!ring->dma[i].rxd1)
continue;
- dma_unmap_single(eth->dev,
+ dma_unmap_single(eth->dma_dev,
ring->dma[i].rxd1,
ring->buf_size,
DMA_FROM_DEVICE);
@@ -1746,7 +1754,7 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
}
if (ring->dma) {
- dma_free_coherent(eth->dev,
+ dma_free_coherent(eth->dma_dev,
ring->dma_size * sizeof(*ring->dma),
ring->dma,
ring->phys);
@@ -2099,7 +2107,7 @@ static void mtk_dma_free(struct mtk_eth *eth)
if (eth->netdev[i])
netdev_reset_queue(eth->netdev[i]);
if (eth->scratch_ring) {
- dma_free_coherent(eth->dev,
+ dma_free_coherent(eth->dma_dev,
MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
eth->scratch_ring,
eth->phy_scratch_ring);
@@ -2267,7 +2275,7 @@ static int mtk_open(struct net_device *dev)
if (err)
return err;
- if (eth->soc->offload_version && mtk_ppe_start(&eth->ppe) == 0)
+ if (eth->soc->offload_version && mtk_ppe_start(eth->ppe) == 0)
gdm_config = MTK_GDMA_TO_PPE;
mtk_gdm_config(eth, gdm_config);
@@ -2341,7 +2349,7 @@ static int mtk_stop(struct net_device *dev)
mtk_dma_free(eth);
if (eth->soc->offload_version)
- mtk_ppe_stop(&eth->ppe);
+ mtk_ppe_stop(eth->ppe);
return 0;
}
@@ -2448,6 +2456,8 @@ static void mtk_dim_tx(struct work_struct *work)
static int mtk_hw_init(struct mtk_eth *eth)
{
+ u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
+ ETHSYS_DMA_AG_MAP_PPE;
int i, val, ret;
if (test_and_set_bit(MTK_HW_INIT, &eth->state))
@@ -2460,6 +2470,10 @@ static int mtk_hw_init(struct mtk_eth *eth)
if (ret)
goto err_disable_pm;
+ if (eth->ethsys)
+ regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
+ of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask);
+
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
ret = device_reset(eth->dev);
if (ret) {
@@ -3040,6 +3054,35 @@ free_netdev:
return err;
}
+void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
+{
+ struct net_device *dev, *tmp;
+ LIST_HEAD(dev_list);
+ int i;
+
+ rtnl_lock();
+
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ dev = eth->netdev[i];
+
+ if (!dev || !(dev->flags & IFF_UP))
+ continue;
+
+ list_add_tail(&dev->close_list, &dev_list);
+ }
+
+ dev_close_many(&dev_list, false);
+
+ eth->dma_dev = dma_dev;
+
+ list_for_each_entry_safe(dev, tmp, &dev_list, close_list) {
+ list_del_init(&dev->close_list);
+ dev_open(dev, NULL);
+ }
+
+ rtnl_unlock();
+}
+
static int mtk_probe(struct platform_device *pdev)
{
struct device_node *mac_np;
@@ -3053,6 +3096,7 @@ static int mtk_probe(struct platform_device *pdev)
eth->soc = of_device_get_match_data(&pdev->dev);
eth->dev = &pdev->dev;
+ eth->dma_dev = &pdev->dev;
eth->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(eth->base))
return PTR_ERR(eth->base);
@@ -3101,6 +3145,16 @@ static int mtk_probe(struct platform_device *pdev)
}
}
+ if (of_dma_is_coherent(pdev->dev.of_node)) {
+ struct regmap *cci;
+
+ cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "cci-control-port");
+ /* enable CPU/bus coherency */
+ if (!IS_ERR(cci))
+ regmap_write(cci, 0, 3);
+ }
+
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii),
GFP_KERNEL);
@@ -3123,6 +3177,22 @@ static int mtk_probe(struct platform_device *pdev)
}
}
+ for (i = 0;; i++) {
+ struct device_node *np = of_parse_phandle(pdev->dev.of_node,
+ "mediatek,wed", i);
+ static const u32 wdma_regs[] = {
+ MTK_WDMA0_BASE,
+ MTK_WDMA1_BASE
+ };
+ void __iomem *wdma;
+
+ if (!np || i >= ARRAY_SIZE(wdma_regs))
+ break;
+
+ wdma = eth->base + wdma_regs[i];
+ mtk_wed_add_hw(np, eth, wdma, i);
+ }
+
for (i = 0; i < 3; i++) {
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
eth->irq[i] = eth->irq[0];
@@ -3198,10 +3268,11 @@ static int mtk_probe(struct platform_device *pdev)
}
if (eth->soc->offload_version) {
- err = mtk_ppe_init(&eth->ppe, eth->dev,
- eth->base + MTK_ETH_PPE_BASE, 2);
- if (err)
+ eth->ppe = mtk_ppe_init(eth, eth->base + MTK_ETH_PPE_BASE, 2);
+ if (!eth->ppe) {
+ err = -ENOMEM;
goto err_free_dev;
+ }
err = mtk_eth_offload_init(eth);
if (err)
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index c9d42be314b5..c98c7ee42c6f 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -295,6 +295,9 @@
#define MTK_GDM1_TX_GPCNT 0x2438
#define MTK_STAT_OFFSET 0x40
+#define MTK_WDMA0_BASE 0x2800
+#define MTK_WDMA1_BASE 0x2c00
+
/* QDMA descriptor txd4 */
#define TX_DMA_CHKSUM (0x7 << 29)
#define TX_DMA_TSO BIT(28)
@@ -465,6 +468,12 @@
#define RSTCTRL_FE BIT(6)
#define RSTCTRL_PPE BIT(31)
+/* ethernet dma channel agent map */
+#define ETHSYS_DMA_AG_MAP 0x408
+#define ETHSYS_DMA_AG_MAP_PDMA BIT(0)
+#define ETHSYS_DMA_AG_MAP_QDMA BIT(1)
+#define ETHSYS_DMA_AG_MAP_PPE BIT(2)
+
/* SGMII subsystem config registers */
/* Register to auto-negotiation restart */
#define SGMSYS_PCS_CONTROL_1 0x0
@@ -882,6 +891,7 @@ struct mtk_sgmii {
/* struct mtk_eth - This is the main datasructure for holding the state
* of the driver
* @dev: The device pointer
+ * @dev: The device pointer used for dma mapping/alloc
* @base: The mapped register i/o base
* @page_lock: Make sure that register operations are atomic
* @tx_irq__lock: Make sure that IRQ register operations are atomic
@@ -925,6 +935,7 @@ struct mtk_sgmii {
struct mtk_eth {
struct device *dev;
+ struct device *dma_dev;
void __iomem *base;
spinlock_t page_lock;
spinlock_t tx_irq_lock;
@@ -974,7 +985,7 @@ struct mtk_eth {
u32 rx_dma_l4_valid;
int ip_align;
- struct mtk_ppe ppe;
+ struct mtk_ppe *ppe;
struct rhashtable flow_table;
};
@@ -1023,6 +1034,7 @@ int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id);
int mtk_eth_offload_init(struct mtk_eth *eth);
int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data);
+void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev);
#endif /* MTK_ETH_H */
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
index 3ad10c793308..683f89f8e3b2 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
@@ -6,9 +6,22 @@
#include <linux/iopoll.h>
#include <linux/etherdevice.h>
#include <linux/platform_device.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <net/dsa.h>
+#include "mtk_eth_soc.h"
#include "mtk_ppe.h"
#include "mtk_ppe_regs.h"
+static DEFINE_SPINLOCK(ppe_lock);
+
+static const struct rhashtable_params mtk_flow_l2_ht_params = {
+ .head_offset = offsetof(struct mtk_flow_entry, l2_node),
+ .key_offset = offsetof(struct mtk_flow_entry, data.bridge),
+ .key_len = offsetof(struct mtk_foe_bridge, key_end),
+ .automatic_shrinking = true,
+};
+
static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val)
{
writel(val, ppe->base + reg);
@@ -41,6 +54,11 @@ static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val)
return ppe_m32(ppe, reg, val, 0);
}
+static u32 mtk_eth_timestamp(struct mtk_eth *eth)
+{
+ return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP;
+}
+
static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
{
int ret;
@@ -76,13 +94,6 @@ static u32 mtk_ppe_hash_entry(struct mtk_foe_entry *e)
u32 hash;
switch (FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, e->ib1)) {
- case MTK_PPE_PKT_TYPE_BRIDGE:
- hv1 = e->bridge.src_mac_lo;
- hv1 ^= ((e->bridge.src_mac_hi & 0xffff) << 16);
- hv2 = e->bridge.src_mac_hi >> 16;
- hv2 ^= e->bridge.dest_mac_lo;
- hv3 = e->bridge.dest_mac_hi;
- break;
case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
hv1 = e->ipv4.orig.ports;
@@ -122,6 +133,9 @@ mtk_foe_entry_l2(struct mtk_foe_entry *entry)
{
int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
+ if (type == MTK_PPE_PKT_TYPE_BRIDGE)
+ return &entry->bridge.l2;
+
if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
return &entry->ipv6.l2;
@@ -133,6 +147,9 @@ mtk_foe_entry_ib2(struct mtk_foe_entry *entry)
{
int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
+ if (type == MTK_PPE_PKT_TYPE_BRIDGE)
+ return &entry->bridge.ib2;
+
if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
return &entry->ipv6.ib2;
@@ -167,7 +184,12 @@ int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
entry->ipv6.ports = ports_pad;
- if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
+ if (type == MTK_PPE_PKT_TYPE_BRIDGE) {
+ ether_addr_copy(entry->bridge.src_mac, src_mac);
+ ether_addr_copy(entry->bridge.dest_mac, dest_mac);
+ entry->bridge.ib2 = val;
+ l2 = &entry->bridge.l2;
+ } else if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
entry->ipv6.ib2 = val;
l2 = &entry->ipv6.l2;
} else {
@@ -329,32 +351,167 @@ int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid)
return 0;
}
+int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
+ int bss, int wcid)
+{
+ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
+ u32 *ib2 = mtk_foe_entry_ib2(entry);
+
+ *ib2 &= ~MTK_FOE_IB2_PORT_MG;
+ *ib2 |= MTK_FOE_IB2_WDMA_WINFO;
+ if (wdma_idx)
+ *ib2 |= MTK_FOE_IB2_WDMA_DEVIDX;
+
+ l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) |
+ FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) |
+ FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq);
+
+ return 0;
+}
+
static inline bool mtk_foe_entry_usable(struct mtk_foe_entry *entry)
{
return !(entry->ib1 & MTK_FOE_IB1_STATIC) &&
FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1) != MTK_FOE_STATE_BIND;
}
-int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
- u16 timestamp)
+static bool
+mtk_flow_entry_match(struct mtk_flow_entry *entry, struct mtk_foe_entry *data)
+{
+ int type, len;
+
+ if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP)
+ return false;
+
+ type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1);
+ if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
+ len = offsetof(struct mtk_foe_entry, ipv6._rsv);
+ else
+ len = offsetof(struct mtk_foe_entry, ipv4.ib2);
+
+ return !memcmp(&entry->data.data, &data->data, len - 4);
+}
+
+static void
+__mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+{
+ struct hlist_head *head;
+ struct hlist_node *tmp;
+
+ if (entry->type == MTK_FLOW_TYPE_L2) {
+ rhashtable_remove_fast(&ppe->l2_flows, &entry->l2_node,
+ mtk_flow_l2_ht_params);
+
+ head = &entry->l2_flows;
+ hlist_for_each_entry_safe(entry, tmp, head, l2_data.list)
+ __mtk_foe_entry_clear(ppe, entry);
+ return;
+ }
+
+ hlist_del_init(&entry->list);
+ if (entry->hash != 0xffff) {
+ ppe->foe_table[entry->hash].ib1 &= ~MTK_FOE_IB1_STATE;
+ ppe->foe_table[entry->hash].ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE,
+ MTK_FOE_STATE_BIND);
+ dma_wmb();
+ }
+ entry->hash = 0xffff;
+
+ if (entry->type != MTK_FLOW_TYPE_L2_SUBFLOW)
+ return;
+
+ hlist_del_init(&entry->l2_data.list);
+ kfree(entry);
+}
+
+static int __mtk_foe_entry_idle_time(struct mtk_ppe *ppe, u32 ib1)
+{
+ u16 timestamp;
+ u16 now;
+
+ now = mtk_eth_timestamp(ppe->eth) & MTK_FOE_IB1_BIND_TIMESTAMP;
+ timestamp = ib1 & MTK_FOE_IB1_BIND_TIMESTAMP;
+
+ if (timestamp > now)
+ return MTK_FOE_IB1_BIND_TIMESTAMP + 1 - timestamp + now;
+ else
+ return now - timestamp;
+}
+
+static void
+mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
{
+ struct mtk_flow_entry *cur;
struct mtk_foe_entry *hwe;
- u32 hash;
+ struct hlist_node *tmp;
+ int idle;
+
+ idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
+ hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_data.list) {
+ int cur_idle;
+ u32 ib1;
+
+ hwe = &ppe->foe_table[cur->hash];
+ ib1 = READ_ONCE(hwe->ib1);
+
+ if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) {
+ cur->hash = 0xffff;
+ __mtk_foe_entry_clear(ppe, cur);
+ continue;
+ }
+
+ cur_idle = __mtk_foe_entry_idle_time(ppe, ib1);
+ if (cur_idle >= idle)
+ continue;
+
+ idle = cur_idle;
+ entry->data.ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
+ entry->data.ib1 |= hwe->ib1 & MTK_FOE_IB1_BIND_TIMESTAMP;
+ }
+}
+
+static void
+mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+{
+ struct mtk_foe_entry *hwe;
+ struct mtk_foe_entry foe;
+
+ spin_lock_bh(&ppe_lock);
+
+ if (entry->type == MTK_FLOW_TYPE_L2) {
+ mtk_flow_entry_update_l2(ppe, entry);
+ goto out;
+ }
+
+ if (entry->hash == 0xffff)
+ goto out;
+
+ hwe = &ppe->foe_table[entry->hash];
+ memcpy(&foe, hwe, sizeof(foe));
+ if (!mtk_flow_entry_match(entry, &foe)) {
+ entry->hash = 0xffff;
+ goto out;
+ }
+
+ entry->data.ib1 = foe.ib1;
+
+out:
+ spin_unlock_bh(&ppe_lock);
+}
+
+static void
+__mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
+ u16 hash)
+{
+ struct mtk_foe_entry *hwe;
+ u16 timestamp;
+ timestamp = mtk_eth_timestamp(ppe->eth);
timestamp &= MTK_FOE_IB1_BIND_TIMESTAMP;
entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP, timestamp);
- hash = mtk_ppe_hash_entry(entry);
hwe = &ppe->foe_table[hash];
- if (!mtk_foe_entry_usable(hwe)) {
- hwe++;
- hash++;
-
- if (!mtk_foe_entry_usable(hwe))
- return -ENOSPC;
- }
-
memcpy(&hwe->data, &entry->data, sizeof(hwe->data));
wmb();
hwe->ib1 = entry->ib1;
@@ -362,32 +519,195 @@ int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
dma_wmb();
mtk_ppe_cache_clear(ppe);
+}
- return hash;
+void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+{
+ spin_lock_bh(&ppe_lock);
+ __mtk_foe_entry_clear(ppe, entry);
+ spin_unlock_bh(&ppe_lock);
+}
+
+static int
+mtk_foe_entry_commit_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+{
+ entry->type = MTK_FLOW_TYPE_L2;
+
+ return rhashtable_insert_fast(&ppe->l2_flows, &entry->l2_node,
+ mtk_flow_l2_ht_params);
+}
+
+int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+{
+ int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1);
+ u32 hash;
+
+ if (type == MTK_PPE_PKT_TYPE_BRIDGE)
+ return mtk_foe_entry_commit_l2(ppe, entry);
+
+ hash = mtk_ppe_hash_entry(&entry->data);
+ entry->hash = 0xffff;
+ spin_lock_bh(&ppe_lock);
+ hlist_add_head(&entry->list, &ppe->foe_flow[hash / 2]);
+ spin_unlock_bh(&ppe_lock);
+
+ return 0;
+}
+
+static void
+mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
+ u16 hash)
+{
+ struct mtk_flow_entry *flow_info;
+ struct mtk_foe_entry foe, *hwe;
+ struct mtk_foe_mac_info *l2;
+ u32 ib1_mask = MTK_FOE_IB1_PACKET_TYPE | MTK_FOE_IB1_UDP;
+ int type;
+
+ flow_info = kzalloc(offsetof(struct mtk_flow_entry, l2_data.end),
+ GFP_ATOMIC);
+ if (!flow_info)
+ return;
+
+ flow_info->l2_data.base_flow = entry;
+ flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW;
+ flow_info->hash = hash;
+ hlist_add_head(&flow_info->list, &ppe->foe_flow[hash / 2]);
+ hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows);
+
+ hwe = &ppe->foe_table[hash];
+ memcpy(&foe, hwe, sizeof(foe));
+ foe.ib1 &= ib1_mask;
+ foe.ib1 |= entry->data.ib1 & ~ib1_mask;
+
+ l2 = mtk_foe_entry_l2(&foe);
+ memcpy(l2, &entry->data.bridge.l2, sizeof(*l2));
+
+ type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, foe.ib1);
+ if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT)
+ memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new));
+ else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP)
+ l2->etype = ETH_P_IPV6;
+
+ *mtk_foe_entry_ib2(&foe) = entry->data.bridge.ib2;
+
+ __mtk_foe_entry_commit(ppe, &foe, hash);
}
-int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base,
+void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
+{
+ struct hlist_head *head = &ppe->foe_flow[hash / 2];
+ struct mtk_foe_entry *hwe = &ppe->foe_table[hash];
+ struct mtk_flow_entry *entry;
+ struct mtk_foe_bridge key = {};
+ struct hlist_node *n;
+ struct ethhdr *eh;
+ bool found = false;
+ u8 *tag;
+
+ spin_lock_bh(&ppe_lock);
+
+ if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND)
+ goto out;
+
+ hlist_for_each_entry_safe(entry, n, head, list) {
+ if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) {
+ if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) ==
+ MTK_FOE_STATE_BIND))
+ continue;
+
+ entry->hash = 0xffff;
+ __mtk_foe_entry_clear(ppe, entry);
+ continue;
+ }
+
+ if (found || !mtk_flow_entry_match(entry, hwe)) {
+ if (entry->hash != 0xffff)
+ entry->hash = 0xffff;
+ continue;
+ }
+
+ entry->hash = hash;
+ __mtk_foe_entry_commit(ppe, &entry->data, hash);
+ found = true;
+ }
+
+ if (found)
+ goto out;
+
+ eh = eth_hdr(skb);
+ ether_addr_copy(key.dest_mac, eh->h_dest);
+ ether_addr_copy(key.src_mac, eh->h_source);
+ tag = skb->data - 2;
+ key.vlan = 0;
+ switch (skb->protocol) {
+#if IS_ENABLED(CONFIG_NET_DSA)
+ case htons(ETH_P_XDSA):
+ if (!netdev_uses_dsa(skb->dev) ||
+ skb->dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK)
+ goto out;
+
+ tag += 4;
+ if (get_unaligned_be16(tag) != ETH_P_8021Q)
+ break;
+
+ fallthrough;
+#endif
+ case htons(ETH_P_8021Q):
+ key.vlan = get_unaligned_be16(tag + 2) & VLAN_VID_MASK;
+ break;
+ default:
+ break;
+ }
+
+ entry = rhashtable_lookup_fast(&ppe->l2_flows, &key, mtk_flow_l2_ht_params);
+ if (!entry)
+ goto out;
+
+ mtk_foe_entry_commit_subflow(ppe, entry, hash);
+
+out:
+ spin_unlock_bh(&ppe_lock);
+}
+
+int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+{
+ mtk_flow_entry_update(ppe, entry);
+
+ return __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
+}
+
+struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
int version)
{
+ struct device *dev = eth->dev;
struct mtk_foe_entry *foe;
+ struct mtk_ppe *ppe;
+
+ ppe = devm_kzalloc(dev, sizeof(*ppe), GFP_KERNEL);
+ if (!ppe)
+ return NULL;
+
+ rhashtable_init(&ppe->l2_flows, &mtk_flow_l2_ht_params);
/* need to allocate a separate device, since it PPE DMA access is
* not coherent.
*/
ppe->base = base;
+ ppe->eth = eth;
ppe->dev = dev;
ppe->version = version;
foe = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*foe),
&ppe->foe_phys, GFP_KERNEL);
if (!foe)
- return -ENOMEM;
+ return NULL;
ppe->foe_table = foe;
mtk_ppe_debugfs_init(ppe);
- return 0;
+ return ppe;
}
static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
@@ -443,7 +763,6 @@ int mtk_ppe_start(struct mtk_ppe *ppe)
MTK_PPE_FLOW_CFG_IP4_NAT |
MTK_PPE_FLOW_CFG_IP4_NAPT |
MTK_PPE_FLOW_CFG_IP4_DSLITE |
- MTK_PPE_FLOW_CFG_L2_BRIDGE |
MTK_PPE_FLOW_CFG_IP4_NAT_FRAG;
ppe_w32(ppe, MTK_PPE_FLOW_CFG, val);
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h
index 242fb8f2ae65..1f5cf1c9a947 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
@@ -6,6 +6,7 @@
#include <linux/kernel.h>
#include <linux/bitfield.h>
+#include <linux/rhashtable.h>
#define MTK_ETH_PPE_BASE 0xc00
@@ -48,9 +49,9 @@ enum {
#define MTK_FOE_IB2_DEST_PORT GENMASK(7, 5)
#define MTK_FOE_IB2_MULTICAST BIT(8)
-#define MTK_FOE_IB2_WHNAT_QID2 GENMASK(13, 12)
-#define MTK_FOE_IB2_WHNAT_DEVIDX BIT(16)
-#define MTK_FOE_IB2_WHNAT_NAT BIT(17)
+#define MTK_FOE_IB2_WDMA_QID2 GENMASK(13, 12)
+#define MTK_FOE_IB2_WDMA_DEVIDX BIT(16)
+#define MTK_FOE_IB2_WDMA_WINFO BIT(17)
#define MTK_FOE_IB2_PORT_MG GENMASK(17, 12)
@@ -58,9 +59,9 @@ enum {
#define MTK_FOE_IB2_DSCP GENMASK(31, 24)
-#define MTK_FOE_VLAN2_WHNAT_BSS GEMMASK(5, 0)
-#define MTK_FOE_VLAN2_WHNAT_WCID GENMASK(13, 6)
-#define MTK_FOE_VLAN2_WHNAT_RING GENMASK(15, 14)
+#define MTK_FOE_VLAN2_WINFO_BSS GENMASK(5, 0)
+#define MTK_FOE_VLAN2_WINFO_WCID GENMASK(13, 6)
+#define MTK_FOE_VLAN2_WINFO_RING GENMASK(15, 14)
enum {
MTK_FOE_STATE_INVALID,
@@ -84,19 +85,16 @@ struct mtk_foe_mac_info {
u16 src_mac_lo;
};
+/* software-only entry type */
struct mtk_foe_bridge {
- u32 dest_mac_hi;
+ u8 dest_mac[ETH_ALEN];
+ u8 src_mac[ETH_ALEN];
+ u16 vlan;
- u16 src_mac_lo;
- u16 dest_mac_lo;
-
- u32 src_mac_hi;
+ struct {} key_end;
u32 ib2;
- u32 _rsv[5];
-
- u32 udf_tsid;
struct mtk_foe_mac_info l2;
};
@@ -235,7 +233,37 @@ enum {
MTK_PPE_CPU_REASON_INVALID = 0x1f,
};
+enum {
+ MTK_FLOW_TYPE_L4,
+ MTK_FLOW_TYPE_L2,
+ MTK_FLOW_TYPE_L2_SUBFLOW,
+};
+
+struct mtk_flow_entry {
+ union {
+ struct hlist_node list;
+ struct {
+ struct rhash_head l2_node;
+ struct hlist_head l2_flows;
+ };
+ };
+ u8 type;
+ s8 wed_index;
+ u16 hash;
+ union {
+ struct mtk_foe_entry data;
+ struct {
+ struct mtk_flow_entry *base_flow;
+ struct hlist_node list;
+ struct {} end;
+ } l2_data;
+ };
+ struct rhash_head node;
+ unsigned long cookie;
+};
+
struct mtk_ppe {
+ struct mtk_eth *eth;
struct device *dev;
void __iomem *base;
int version;
@@ -243,19 +271,35 @@ struct mtk_ppe {
struct mtk_foe_entry *foe_table;
dma_addr_t foe_phys;
+ u16 foe_check_time[MTK_PPE_ENTRIES];
+ struct hlist_head foe_flow[MTK_PPE_ENTRIES / 2];
+
+ struct rhashtable l2_flows;
+
void *acct_table;
};
-int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base,
- int version);
+struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int version);
int mtk_ppe_start(struct mtk_ppe *ppe);
int mtk_ppe_stop(struct mtk_ppe *ppe);
+void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash);
+
static inline void
-mtk_foe_entry_clear(struct mtk_ppe *ppe, u16 hash)
+mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
{
- ppe->foe_table[hash].ib1 = 0;
- dma_wmb();
+ u16 now, diff;
+
+ if (!ppe)
+ return;
+
+ now = (u16)jiffies;
+ diff = now - ppe->foe_check_time[hash];
+ if (diff < HZ / 10)
+ return;
+
+ ppe->foe_check_time[hash] = now;
+ __mtk_ppe_check_skb(ppe, skb, hash);
}
static inline int
@@ -281,8 +325,11 @@ int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry,
int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port);
int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid);
int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid);
-int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
- u16 timestamp);
+int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
+ int bss, int wcid);
+int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
+void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
+int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
int mtk_ppe_debugfs_init(struct mtk_ppe *ppe);
#endif
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
index d4b482340cb9..eb0b598f14e4 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
@@ -32,7 +32,6 @@ static const char *mtk_foe_pkt_type_str(int type)
static const char * const type_str[] = {
[MTK_PPE_PKT_TYPE_IPV4_HNAPT] = "IPv4 5T",
[MTK_PPE_PKT_TYPE_IPV4_ROUTE] = "IPv4 3T",
- [MTK_PPE_PKT_TYPE_BRIDGE] = "L2",
[MTK_PPE_PKT_TYPE_IPV4_DSLITE] = "DS-LITE",
[MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T] = "IPv6 3T",
[MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T] = "IPv6 5T",
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
index 7bb1f20002b5..1fe31058b0f2 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
@@ -6,10 +6,12 @@
#include <linux/if_ether.h>
#include <linux/rhashtable.h>
#include <linux/ip.h>
+#include <linux/ipv6.h>
#include <net/flow_offload.h>
#include <net/pkt_cls.h>
#include <net/dsa.h>
#include "mtk_eth_soc.h"
+#include "mtk_wed.h"
struct mtk_flow_data {
struct ethhdr eth;
@@ -19,11 +21,18 @@ struct mtk_flow_data {
__be32 src_addr;
__be32 dst_addr;
} v4;
+
+ struct {
+ struct in6_addr src_addr;
+ struct in6_addr dst_addr;
+ } v6;
};
__be16 src_port;
__be16 dst_port;
+ u16 vlan_in;
+
struct {
u16 id;
__be16 proto;
@@ -35,12 +44,6 @@ struct mtk_flow_data {
} pppoe;
};
-struct mtk_flow_entry {
- struct rhash_head node;
- unsigned long cookie;
- u16 hash;
-};
-
static const struct rhashtable_params mtk_flow_ht_params = {
.head_offset = offsetof(struct mtk_flow_entry, node),
.key_offset = offsetof(struct mtk_flow_entry, cookie),
@@ -48,12 +51,6 @@ static const struct rhashtable_params mtk_flow_ht_params = {
.automatic_shrinking = true,
};
-static u32
-mtk_eth_timestamp(struct mtk_eth *eth)
-{
- return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP;
-}
-
static int
mtk_flow_set_ipv4_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data,
bool egress)
@@ -63,6 +60,14 @@ mtk_flow_set_ipv4_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data,
data->v4.dst_addr, data->dst_port);
}
+static int
+mtk_flow_set_ipv6_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data)
+{
+ return mtk_foe_entry_set_ipv6_tuple(foe,
+ data->v6.src_addr.s6_addr32, data->src_port,
+ data->v6.dst_addr.s6_addr32, data->dst_port);
+}
+
static void
mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth)
{
@@ -80,6 +85,35 @@ mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth)
memcpy(dest, src, act->mangle.mask ? 2 : 4);
}
+static int
+mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_info *info)
+{
+ struct net_device_path_ctx ctx = {
+ .dev = dev,
+ .daddr = addr,
+ };
+ struct net_device_path path = {};
+
+ if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED))
+ return -1;
+
+ if (!dev->netdev_ops->ndo_fill_forward_path)
+ return -1;
+
+ if (dev->netdev_ops->ndo_fill_forward_path(&ctx, &path))
+ return -1;
+
+ if (path.type != DEV_PATH_MTK_WDMA)
+ return -1;
+
+ info->wdma_idx = path.mtk_wdma.wdma_idx;
+ info->queue = path.mtk_wdma.queue;
+ info->bss = path.mtk_wdma.bss;
+ info->wcid = path.mtk_wdma.wcid;
+
+ return 0;
+}
+
static int
mtk_flow_mangle_ports(const struct flow_action_entry *act,
@@ -149,10 +183,20 @@ mtk_flow_get_dsa_port(struct net_device **dev)
static int
mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
- struct net_device *dev)
+ struct net_device *dev, const u8 *dest_mac,
+ int *wed_index)
{
+ struct mtk_wdma_info info = {};
int pse_port, dsa_port;
+ if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) {
+ mtk_foe_entry_set_wdma(foe, info.wdma_idx, info.queue, info.bss,
+ info.wcid);
+ pse_port = 3;
+ *wed_index = info.wdma_idx;
+ goto out;
+ }
+
dsa_port = mtk_flow_get_dsa_port(&dev);
if (dsa_port >= 0)
mtk_foe_entry_set_dsa(foe, dsa_port);
@@ -164,6 +208,7 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
else
return -EOPNOTSUPP;
+out:
mtk_foe_entry_set_pse_port(foe, pse_port);
return 0;
@@ -179,11 +224,10 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
struct net_device *odev = NULL;
struct mtk_flow_entry *entry;
int offload_type = 0;
+ int wed_index = -1;
u16 addr_type = 0;
- u32 timestamp;
u8 l4proto = 0;
int err = 0;
- int hash;
int i;
if (rhashtable_lookup(&eth->flow_table, &f->cookie, mtk_flow_ht_params))
@@ -215,9 +259,45 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
return -EOPNOTSUPP;
}
+ switch (addr_type) {
+ case 0:
+ offload_type = MTK_PPE_PKT_TYPE_BRIDGE;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+
+ flow_rule_match_eth_addrs(rule, &match);
+ memcpy(data.eth.h_dest, match.key->dst, ETH_ALEN);
+ memcpy(data.eth.h_source, match.key->src, ETH_ALEN);
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+
+ flow_rule_match_vlan(rule, &match);
+
+ if (match.key->vlan_tpid != cpu_to_be16(ETH_P_8021Q))
+ return -EOPNOTSUPP;
+
+ data.vlan_in = match.key->vlan_id;
+ }
+ break;
+ case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
+ offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT;
+ break;
+ case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
+ offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
flow_action_for_each(i, act, &rule->action) {
switch (act->id) {
case FLOW_ACTION_MANGLE:
+ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
+ return -EOPNOTSUPP;
if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH)
mtk_flow_offload_mangle_eth(act, &data.eth);
break;
@@ -249,14 +329,6 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
}
}
- switch (addr_type) {
- case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
- offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT;
- break;
- default:
- return -EOPNOTSUPP;
- }
-
if (!is_valid_ether_addr(data.eth.h_source) ||
!is_valid_ether_addr(data.eth.h_dest))
return -EINVAL;
@@ -270,10 +342,13 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
struct flow_match_ports ports;
+ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
+ return -EOPNOTSUPP;
+
flow_rule_match_ports(rule, &ports);
data.src_port = ports.key->src;
data.dst_port = ports.key->dst;
- } else {
+ } else if (offload_type != MTK_PPE_PKT_TYPE_BRIDGE) {
return -EOPNOTSUPP;
}
@@ -288,10 +363,24 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
mtk_flow_set_ipv4_addr(&foe, &data, false);
}
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+ struct flow_match_ipv6_addrs addrs;
+
+ flow_rule_match_ipv6_addrs(rule, &addrs);
+
+ data.v6.src_addr = addrs.key->src;
+ data.v6.dst_addr = addrs.key->dst;
+
+ mtk_flow_set_ipv6_addr(&foe, &data);
+ }
+
flow_action_for_each(i, act, &rule->action) {
if (act->id != FLOW_ACTION_MANGLE)
continue;
+ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
+ return -EOPNOTSUPP;
+
switch (act->mangle.htype) {
case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
@@ -317,6 +406,9 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
return err;
}
+ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
+ foe.bridge.vlan = data.vlan_in;
+
if (data.vlan.num == 1) {
if (data.vlan.proto != htons(ETH_P_8021Q))
return -EOPNOTSUPP;
@@ -326,33 +418,38 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
if (data.pppoe.num == 1)
mtk_foe_entry_set_pppoe(&foe, data.pppoe.sid);
- err = mtk_flow_set_output_device(eth, &foe, odev);
+ err = mtk_flow_set_output_device(eth, &foe, odev, data.eth.h_dest,
+ &wed_index);
if (err)
return err;
+ if (wed_index >= 0 && (err = mtk_wed_flow_add(wed_index)) < 0)
+ return err;
+
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
entry->cookie = f->cookie;
- timestamp = mtk_eth_timestamp(eth);
- hash = mtk_foe_entry_commit(&eth->ppe, &foe, timestamp);
- if (hash < 0) {
- err = hash;
+ memcpy(&entry->data, &foe, sizeof(entry->data));
+ entry->wed_index = wed_index;
+
+ if (mtk_foe_entry_commit(eth->ppe, entry) < 0)
goto free;
- }
- entry->hash = hash;
err = rhashtable_insert_fast(&eth->flow_table, &entry->node,
mtk_flow_ht_params);
if (err < 0)
- goto clear_flow;
+ goto clear;
return 0;
-clear_flow:
- mtk_foe_entry_clear(&eth->ppe, hash);
+
+clear:
+ mtk_foe_entry_clear(eth->ppe, entry);
free:
kfree(entry);
+ if (wed_index >= 0)
+ mtk_wed_flow_remove(wed_index);
return err;
}
@@ -366,9 +463,11 @@ mtk_flow_offload_destroy(struct mtk_eth *eth, struct flow_cls_offload *f)
if (!entry)
return -ENOENT;
- mtk_foe_entry_clear(&eth->ppe, entry->hash);
+ mtk_foe_entry_clear(eth->ppe, entry);
rhashtable_remove_fast(&eth->flow_table, &entry->node,
mtk_flow_ht_params);
+ if (entry->wed_index >= 0)
+ mtk_wed_flow_remove(entry->wed_index);
kfree(entry);
return 0;
@@ -378,7 +477,6 @@ static int
mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
{
struct mtk_flow_entry *entry;
- int timestamp;
u32 idle;
entry = rhashtable_lookup(&eth->flow_table, &f->cookie,
@@ -386,11 +484,7 @@ mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
if (!entry)
return -ENOENT;
- timestamp = mtk_foe_entry_timestamp(&eth->ppe, entry->hash);
- if (timestamp < 0)
- return -ETIMEDOUT;
-
- idle = mtk_eth_timestamp(eth) - timestamp;
+ idle = mtk_foe_entry_idle_time(eth->ppe, entry);
f->stats.lastused = jiffies - idle * HZ;
return 0;
@@ -442,7 +536,7 @@ mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
struct flow_block_cb *block_cb;
flow_setup_cb_t *cb;
- if (!eth->ppe.foe_table)
+ if (!eth->ppe || !eth->ppe->foe_table)
return -EOPNOTSUPP;
if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
@@ -483,15 +577,18 @@ mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
- if (type == TC_SETUP_FT)
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ case TC_SETUP_FT:
return mtk_eth_setup_tc_block(dev, type_data);
-
- return -EOPNOTSUPP;
+ default:
+ return -EOPNOTSUPP;
+ }
}
int mtk_eth_offload_init(struct mtk_eth *eth)
{
- if (!eth->ppe.foe_table)
+ if (!eth->ppe || !eth->ppe->foe_table)
return 0;
return rhashtable_init(&eth->flow_table, &mtk_flow_ht_params);
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
new file mode 100644
index 000000000000..8f0cd3196aac
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
@@ -0,0 +1,880 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/bitfield.h>
+#include <linux/dma-mapping.h>
+#include <linux/skbuff.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/mfd/syscon.h>
+#include <linux/debugfs.h>
+#include <linux/soc/mediatek/mtk_wed.h>
+#include "mtk_eth_soc.h"
+#include "mtk_wed_regs.h"
+#include "mtk_wed.h"
+#include "mtk_ppe.h"
+
+#define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000)
+
+#define MTK_WED_PKT_SIZE 1900
+#define MTK_WED_BUF_SIZE 2048
+#define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048)
+
+#define MTK_WED_TX_RING_SIZE 2048
+#define MTK_WED_WDMA_RING_SIZE 1024
+
+static struct mtk_wed_hw *hw_list[2];
+static DEFINE_MUTEX(hw_lock);
+
+static void
+wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
+{
+ regmap_update_bits(dev->hw->regs, reg, mask | val, val);
+}
+
+static void
+wed_set(struct mtk_wed_device *dev, u32 reg, u32 mask)
+{
+ return wed_m32(dev, reg, 0, mask);
+}
+
+static void
+wed_clr(struct mtk_wed_device *dev, u32 reg, u32 mask)
+{
+ return wed_m32(dev, reg, mask, 0);
+}
+
+static void
+wdma_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
+{
+ wdma_w32(dev, reg, (wdma_r32(dev, reg) & ~mask) | val);
+}
+
+static void
+wdma_set(struct mtk_wed_device *dev, u32 reg, u32 mask)
+{
+ wdma_m32(dev, reg, 0, mask);
+}
+
+static u32
+mtk_wed_read_reset(struct mtk_wed_device *dev)
+{
+ return wed_r32(dev, MTK_WED_RESET);
+}
+
+static void
+mtk_wed_reset(struct mtk_wed_device *dev, u32 mask)
+{
+ u32 status;
+
+ wed_w32(dev, MTK_WED_RESET, mask);
+ if (readx_poll_timeout(mtk_wed_read_reset, dev, status,
+ !(status & mask), 0, 1000))
+ WARN_ON_ONCE(1);
+}
+
+static struct mtk_wed_hw *
+mtk_wed_assign(struct mtk_wed_device *dev)
+{
+ struct mtk_wed_hw *hw;
+
+ hw = hw_list[pci_domain_nr(dev->wlan.pci_dev->bus)];
+ if (!hw || hw->wed_dev)
+ return NULL;
+
+ hw->wed_dev = dev;
+ return hw;
+}
+
+static int
+mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
+{
+ struct mtk_wdma_desc *desc;
+ dma_addr_t desc_phys;
+ void **page_list;
+ int token = dev->wlan.token_start;
+ int ring_size;
+ int n_pages;
+ int i, page_idx;
+
+ ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
+ n_pages = ring_size / MTK_WED_BUF_PER_PAGE;
+
+ page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL);
+ if (!page_list)
+ return -ENOMEM;
+
+ dev->buf_ring.size = ring_size;
+ dev->buf_ring.pages = page_list;
+
+ desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc),
+ &desc_phys, GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ dev->buf_ring.desc = desc;
+ dev->buf_ring.desc_phys = desc_phys;
+
+ for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) {
+ dma_addr_t page_phys, buf_phys;
+ struct page *page;
+ void *buf;
+ int s;
+
+ page = __dev_alloc_pages(GFP_KERNEL, 0);
+ if (!page)
+ return -ENOMEM;
+
+ page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev->hw->dev, page_phys)) {
+ __free_page(page);
+ return -ENOMEM;
+ }
+
+ page_list[page_idx++] = page;
+ dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+
+ buf = page_to_virt(page);
+ buf_phys = page_phys;
+
+ for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) {
+ u32 txd_size;
+ u32 ctrl;
+
+ txd_size = dev->wlan.init_buf(buf, buf_phys, token++);
+
+ desc->buf0 = cpu_to_le32(buf_phys);
+ desc->buf1 = cpu_to_le32(buf_phys + txd_size);
+ ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) |
+ FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
+ MTK_WED_BUF_SIZE - txd_size) |
+ MTK_WDMA_DESC_CTRL_LAST_SEG1;
+ desc->ctrl = cpu_to_le32(ctrl);
+ desc->info = 0;
+ desc++;
+
+ buf += MTK_WED_BUF_SIZE;
+ buf_phys += MTK_WED_BUF_SIZE;
+ }
+
+ dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ }
+
+ return 0;
+}
+
+static void
+mtk_wed_free_buffer(struct mtk_wed_device *dev)
+{
+ struct mtk_wdma_desc *desc = dev->buf_ring.desc;
+ void **page_list = dev->buf_ring.pages;
+ int page_idx;
+ int i;
+
+ if (!page_list)
+ return;
+
+ if (!desc)
+ goto free_pagelist;
+
+ for (i = 0, page_idx = 0; i < dev->buf_ring.size; i += MTK_WED_BUF_PER_PAGE) {
+ void *page = page_list[page_idx++];
+ dma_addr_t buf_addr;
+
+ if (!page)
+ break;
+
+ buf_addr = le32_to_cpu(desc[i].buf0);
+ dma_unmap_page(dev->hw->dev, buf_addr, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ __free_page(page);
+ }
+
+ dma_free_coherent(dev->hw->dev, dev->buf_ring.size * sizeof(*desc),
+ desc, dev->buf_ring.desc_phys);
+
+free_pagelist:
+ kfree(page_list);
+}
+
+static void
+mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring)
+{
+ if (!ring->desc)
+ return;
+
+ dma_free_coherent(dev->hw->dev, ring->size * sizeof(*ring->desc),
+ ring->desc, ring->desc_phys);
+}
+
+static void
+mtk_wed_free_tx_rings(struct mtk_wed_device *dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++)
+ mtk_wed_free_ring(dev, &dev->tx_ring[i]);
+ for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
+ mtk_wed_free_ring(dev, &dev->tx_wdma[i]);
+}
+
+static void
+mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en)
+{
+ u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
+
+ if (!dev->hw->num_flows)
+ mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
+
+ wed_w32(dev, MTK_WED_EXT_INT_MASK, en ? mask : 0);
+ wed_r32(dev, MTK_WED_EXT_INT_MASK);
+}
+
+static void
+mtk_wed_stop(struct mtk_wed_device *dev)
+{
+ regmap_write(dev->hw->mirror, dev->hw->index * 4, 0);
+ mtk_wed_set_ext_int(dev, false);
+
+ wed_clr(dev, MTK_WED_CTRL,
+ MTK_WED_CTRL_WDMA_INT_AGENT_EN |
+ MTK_WED_CTRL_WPDMA_INT_AGENT_EN |
+ MTK_WED_CTRL_WED_TX_BM_EN |
+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
+ wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0);
+ wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0);
+ wdma_w32(dev, MTK_WDMA_INT_MASK, 0);
+ wdma_w32(dev, MTK_WDMA_INT_GRP2, 0);
+ wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0);
+
+ wed_clr(dev, MTK_WED_GLO_CFG,
+ MTK_WED_GLO_CFG_TX_DMA_EN |
+ MTK_WED_GLO_CFG_RX_DMA_EN);
+ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
+ MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
+}
+
+static void
+mtk_wed_detach(struct mtk_wed_device *dev)
+{
+ struct device_node *wlan_node = dev->wlan.pci_dev->dev.of_node;
+ struct mtk_wed_hw *hw = dev->hw;
+
+ mutex_lock(&hw_lock);
+
+ mtk_wed_stop(dev);
+
+ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
+ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
+
+ mtk_wed_reset(dev, MTK_WED_RESET_WED);
+
+ mtk_wed_free_buffer(dev);
+ mtk_wed_free_tx_rings(dev);
+
+ if (of_dma_is_coherent(wlan_node))
+ regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
+ BIT(hw->index), BIT(hw->index));
+
+ if (!hw_list[!hw->index]->wed_dev &&
+ hw->eth->dma_dev != hw->eth->dev)
+ mtk_eth_set_dma_device(hw->eth, hw->eth->dev);
+
+ memset(dev, 0, sizeof(*dev));
+ module_put(THIS_MODULE);
+
+ hw->wed_dev = NULL;
+ mutex_unlock(&hw_lock);
+}
+
+static void
+mtk_wed_hw_init_early(struct mtk_wed_device *dev)
+{
+ u32 mask, set;
+ u32 offset;
+
+ mtk_wed_stop(dev);
+ mtk_wed_reset(dev, MTK_WED_RESET_WED);
+
+ mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE |
+ MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE |
+ MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE;
+ set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) |
+ MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP |
+ MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
+ wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set);
+
+ wdma_set(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_INFO_PRERES);
+
+ offset = dev->hw->index ? 0x04000400 : 0;
+ wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset);
+ wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset);
+
+ wed_w32(dev, MTK_WED_PCIE_CFG_BASE, MTK_PCIE_BASE(dev->hw->index));
+ wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
+}
+
+static void
+mtk_wed_hw_init(struct mtk_wed_device *dev)
+{
+ if (dev->init_done)
+ return;
+
+ dev->init_done = true;
+ mtk_wed_set_ext_int(dev, false);
+ wed_w32(dev, MTK_WED_TX_BM_CTRL,
+ MTK_WED_TX_BM_CTRL_PAUSE |
+ FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM,
+ dev->buf_ring.size / 128) |
+ FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM,
+ MTK_WED_TX_RING_SIZE / 256));
+
+ wed_w32(dev, MTK_WED_TX_BM_BASE, dev->buf_ring.desc_phys);
+
+ wed_w32(dev, MTK_WED_TX_BM_TKID,
+ FIELD_PREP(MTK_WED_TX_BM_TKID_START,
+ dev->wlan.token_start) |
+ FIELD_PREP(MTK_WED_TX_BM_TKID_END,
+ dev->wlan.token_start + dev->wlan.nbuf - 1));
+
+ wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE);
+
+ wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
+ FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) |
+ MTK_WED_TX_BM_DYN_THR_HI);
+
+ mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
+
+ wed_set(dev, MTK_WED_CTRL,
+ MTK_WED_CTRL_WED_TX_BM_EN |
+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
+
+ wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE);
+}
+
+static void
+mtk_wed_ring_reset(struct mtk_wdma_desc *desc, int size)
+{
+ int i;
+
+ for (i = 0; i < size; i++) {
+ desc[i].buf0 = 0;
+ desc[i].ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
+ desc[i].buf1 = 0;
+ desc[i].info = 0;
+ }
+}
+
+static u32
+mtk_wed_check_busy(struct mtk_wed_device *dev)
+{
+ if (wed_r32(dev, MTK_WED_GLO_CFG) & MTK_WED_GLO_CFG_TX_DMA_BUSY)
+ return true;
+
+ if (wed_r32(dev, MTK_WED_WPDMA_GLO_CFG) &
+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY)
+ return true;
+
+ if (wed_r32(dev, MTK_WED_CTRL) & MTK_WED_CTRL_WDMA_INT_AGENT_BUSY)
+ return true;
+
+ if (wed_r32(dev, MTK_WED_WDMA_GLO_CFG) &
+ MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY)
+ return true;
+
+ if (wdma_r32(dev, MTK_WDMA_GLO_CFG) &
+ MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY)
+ return true;
+
+ if (wed_r32(dev, MTK_WED_CTRL) &
+ (MTK_WED_CTRL_WED_TX_BM_BUSY | MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY))
+ return true;
+
+ return false;
+}
+
+static int
+mtk_wed_poll_busy(struct mtk_wed_device *dev)
+{
+ int sleep = 15000;
+ int timeout = 100 * sleep;
+ u32 val;
+
+ return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep,
+ timeout, false, dev);
+}
+
+static void
+mtk_wed_reset_dma(struct mtk_wed_device *dev)
+{
+ bool busy = false;
+ u32 val;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) {
+ struct mtk_wdma_desc *desc = dev->tx_ring[i].desc;
+
+ if (!desc)
+ continue;
+
+ mtk_wed_ring_reset(desc, MTK_WED_TX_RING_SIZE);
+ }
+
+ if (mtk_wed_poll_busy(dev))
+ busy = mtk_wed_check_busy(dev);
+
+ if (busy) {
+ mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA);
+ } else {
+ wed_w32(dev, MTK_WED_RESET_IDX,
+ MTK_WED_RESET_IDX_TX |
+ MTK_WED_RESET_IDX_RX);
+ wed_w32(dev, MTK_WED_RESET_IDX, 0);
+ }
+
+ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
+ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
+
+ if (busy) {
+ mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT);
+ mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV);
+ } else {
+ wed_w32(dev, MTK_WED_WDMA_RESET_IDX,
+ MTK_WED_WDMA_RESET_IDX_RX | MTK_WED_WDMA_RESET_IDX_DRV);
+ wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 0);
+
+ wed_set(dev, MTK_WED_WDMA_GLO_CFG,
+ MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE);
+
+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
+ MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE);
+ }
+
+ for (i = 0; i < 100; i++) {
+ val = wed_r32(dev, MTK_WED_TX_BM_INTF);
+ if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40)
+ break;
+ }
+
+ mtk_wed_reset(dev, MTK_WED_RESET_TX_FREE_AGENT);
+ mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
+
+ if (busy) {
+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV);
+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_DRV);
+ } else {
+ wed_w32(dev, MTK_WED_WPDMA_RESET_IDX,
+ MTK_WED_WPDMA_RESET_IDX_TX |
+ MTK_WED_WPDMA_RESET_IDX_RX);
+ wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0);
+ }
+
+}
+
+static int
+mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
+ int size)
+{
+ ring->desc = dma_alloc_coherent(dev->hw->dev,
+ size * sizeof(*ring->desc),
+ &ring->desc_phys, GFP_KERNEL);
+ if (!ring->desc)
+ return -ENOMEM;
+
+ ring->size = size;
+ mtk_wed_ring_reset(ring->desc, size);
+
+ return 0;
+}
+
+static int
+mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size)
+{
+ struct mtk_wed_ring *wdma = &dev->tx_wdma[idx];
+
+ if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE))
+ return -ENOMEM;
+
+ wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
+ wdma->desc_phys);
+ wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT,
+ size);
+ wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
+
+ wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
+ wdma->desc_phys);
+ wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT,
+ size);
+
+ return 0;
+}
+
+static void
+mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
+{
+ u32 wdma_mask;
+ u32 val;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
+ if (!dev->tx_wdma[i].desc)
+ mtk_wed_wdma_ring_setup(dev, i, 16);
+
+ wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0));
+
+ mtk_wed_hw_init(dev);
+
+ wed_set(dev, MTK_WED_CTRL,
+ MTK_WED_CTRL_WDMA_INT_AGENT_EN |
+ MTK_WED_CTRL_WPDMA_INT_AGENT_EN |
+ MTK_WED_CTRL_WED_TX_BM_EN |
+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
+
+ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, MTK_WED_PCIE_INT_TRIGGER_STATUS);
+
+ wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER,
+ MTK_WED_WPDMA_INT_TRIGGER_RX_DONE |
+ MTK_WED_WPDMA_INT_TRIGGER_TX_DONE);
+
+ wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
+ MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
+
+ wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask);
+ wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
+
+ wdma_w32(dev, MTK_WDMA_INT_MASK, wdma_mask);
+ wdma_w32(dev, MTK_WDMA_INT_GRP2, wdma_mask);
+
+ wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask);
+ wed_w32(dev, MTK_WED_INT_MASK, irq_mask);
+
+ wed_set(dev, MTK_WED_GLO_CFG,
+ MTK_WED_GLO_CFG_TX_DMA_EN |
+ MTK_WED_GLO_CFG_RX_DMA_EN);
+ wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
+ wed_set(dev, MTK_WED_WDMA_GLO_CFG,
+ MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
+
+ mtk_wed_set_ext_int(dev, true);
+ val = dev->wlan.wpdma_phys |
+ MTK_PCIE_MIRROR_MAP_EN |
+ FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID, dev->hw->index);
+
+ if (dev->hw->index)
+ val |= BIT(1);
+ val |= BIT(0);
+ regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
+
+ dev->running = true;
+}
+
+static int
+mtk_wed_attach(struct mtk_wed_device *dev)
+ __releases(RCU)
+{
+ struct mtk_wed_hw *hw;
+ int ret = 0;
+
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
+ "mtk_wed_attach without holding the RCU read lock");
+
+ if (pci_domain_nr(dev->wlan.pci_dev->bus) > 1 ||
+ !try_module_get(THIS_MODULE))
+ ret = -ENODEV;
+
+ rcu_read_unlock();
+
+ if (ret)
+ return ret;
+
+ mutex_lock(&hw_lock);
+
+ hw = mtk_wed_assign(dev);
+ if (!hw) {
+ module_put(THIS_MODULE);
+ ret = -ENODEV;
+ goto out;
+ }
+
+ dev_info(&dev->wlan.pci_dev->dev, "attaching wed device %d\n", hw->index);
+
+ dev->hw = hw;
+ dev->dev = hw->dev;
+ dev->irq = hw->irq;
+ dev->wdma_idx = hw->index;
+
+ if (hw->eth->dma_dev == hw->eth->dev &&
+ of_dma_is_coherent(hw->eth->dev->of_node))
+ mtk_eth_set_dma_device(hw->eth, hw->dev);
+
+ ret = mtk_wed_buffer_alloc(dev);
+ if (ret) {
+ mtk_wed_detach(dev);
+ goto out;
+ }
+
+ mtk_wed_hw_init_early(dev);
+ regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, BIT(hw->index), 0);
+
+out:
+ mutex_unlock(&hw_lock);
+
+ return ret;
+}
+
+static int
+mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
+{
+ struct mtk_wed_ring *ring = &dev->tx_ring[idx];
+
+ /*
+ * Tx ring redirection:
+ * Instead of configuring the WLAN PDMA TX ring directly, the WLAN
+ * driver allocated DMA ring gets configured into WED MTK_WED_RING_TX(n)
+ * registers.
+ *
+ * WED driver posts its own DMA ring as WLAN PDMA TX and configures it
+ * into MTK_WED_WPDMA_RING_TX(n) registers.
+ * It gets filled with packets picked up from WED TX ring and from
+ * WDMA RX.
+ */
+
+ BUG_ON(idx > ARRAY_SIZE(dev->tx_ring));
+
+ if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE))
+ return -ENOMEM;
+
+ if (mtk_wed_wdma_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
+ return -ENOMEM;
+
+ ring->reg_base = MTK_WED_RING_TX(idx);
+ ring->wpdma = regs;
+
+ /* WED -> WPDMA */
+ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys);
+ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_TX_RING_SIZE);
+ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_CPU_IDX, 0);
+
+ wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
+ ring->desc_phys);
+ wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT,
+ MTK_WED_TX_RING_SIZE);
+ wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
+
+ return 0;
+}
+
+static int
+mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
+{
+ struct mtk_wed_ring *ring = &dev->txfree_ring;
+ int i;
+
+ /*
+ * For txfree event handling, the same DMA ring is shared between WED
+ * and WLAN. The WLAN driver accesses the ring index registers through
+ * WED
+ */
+ ring->reg_base = MTK_WED_RING_RX(1);
+ ring->wpdma = regs;
+
+ for (i = 0; i < 12; i += 4) {
+ u32 val = readl(regs + i);
+
+ wed_w32(dev, MTK_WED_RING_RX(1) + i, val);
+ wed_w32(dev, MTK_WED_WPDMA_RING_RX(1) + i, val);
+ }
+
+ return 0;
+}
+
+static u32
+mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
+{
+ u32 val;
+
+ val = wed_r32(dev, MTK_WED_EXT_INT_STATUS);
+ wed_w32(dev, MTK_WED_EXT_INT_STATUS, val);
+ val &= MTK_WED_EXT_INT_STATUS_ERROR_MASK;
+ if (!dev->hw->num_flows)
+ val &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
+ if (val && net_ratelimit())
+ pr_err("mtk_wed%d: error status=%08x\n", dev->hw->index, val);
+
+ val = wed_r32(dev, MTK_WED_INT_STATUS);
+ val &= mask;
+ wed_w32(dev, MTK_WED_INT_STATUS, val); /* ACK */
+
+ return val;
+}
+
+static void
+mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask)
+{
+ if (!dev->running)
+ return;
+
+ mtk_wed_set_ext_int(dev, !!mask);
+ wed_w32(dev, MTK_WED_INT_MASK, mask);
+}
+
+int mtk_wed_flow_add(int index)
+{
+ struct mtk_wed_hw *hw = hw_list[index];
+ int ret;
+
+ if (!hw || !hw->wed_dev)
+ return -ENODEV;
+
+ if (hw->num_flows) {
+ hw->num_flows++;
+ return 0;
+ }
+
+ mutex_lock(&hw_lock);
+ if (!hw->wed_dev) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ ret = hw->wed_dev->wlan.offload_enable(hw->wed_dev);
+ if (!ret)
+ hw->num_flows++;
+ mtk_wed_set_ext_int(hw->wed_dev, true);
+
+out:
+ mutex_unlock(&hw_lock);
+
+ return ret;
+}
+
+void mtk_wed_flow_remove(int index)
+{
+ struct mtk_wed_hw *hw = hw_list[index];
+
+ if (!hw)
+ return;
+
+ if (--hw->num_flows)
+ return;
+
+ mutex_lock(&hw_lock);
+ if (!hw->wed_dev)
+ goto out;
+
+ hw->wed_dev->wlan.offload_disable(hw->wed_dev);
+ mtk_wed_set_ext_int(hw->wed_dev, true);
+
+out:
+ mutex_unlock(&hw_lock);
+}
+
+void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
+ void __iomem *wdma, int index)
+{
+ static const struct mtk_wed_ops wed_ops = {
+ .attach = mtk_wed_attach,
+ .tx_ring_setup = mtk_wed_tx_ring_setup,
+ .txfree_ring_setup = mtk_wed_txfree_ring_setup,
+ .start = mtk_wed_start,
+ .stop = mtk_wed_stop,
+ .reset_dma = mtk_wed_reset_dma,
+ .reg_read = wed_r32,
+ .reg_write = wed_w32,
+ .irq_get = mtk_wed_irq_get,
+ .irq_set_mask = mtk_wed_irq_set_mask,
+ .detach = mtk_wed_detach,
+ };
+ struct device_node *eth_np = eth->dev->of_node;
+ struct platform_device *pdev;
+ struct mtk_wed_hw *hw;
+ struct regmap *regs;
+ int irq;
+
+ if (!np)
+ return;
+
+ pdev = of_find_device_by_node(np);
+ if (!pdev)
+ return;
+
+ get_device(&pdev->dev);
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return;
+
+ regs = syscon_regmap_lookup_by_phandle(np, NULL);
+ if (IS_ERR(regs))
+ return;
+
+ rcu_assign_pointer(mtk_soc_wed_ops, &wed_ops);
+
+ mutex_lock(&hw_lock);
+
+ if (WARN_ON(hw_list[index]))
+ goto unlock;
+
+ hw = kzalloc(sizeof(*hw), GFP_KERNEL);
+ if (!hw)
+ goto unlock;
+ hw->node = np;
+ hw->regs = regs;
+ hw->eth = eth;
+ hw->dev = &pdev->dev;
+ hw->wdma = wdma;
+ hw->index = index;
+ hw->irq = irq;
+ hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,
+ "mediatek,pcie-mirror");
+ hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np,
+ "mediatek,hifsys");
+ if (IS_ERR(hw->mirror) || IS_ERR(hw->hifsys)) {
+ kfree(hw);
+ goto unlock;
+ }
+
+ if (!index) {
+ regmap_write(hw->mirror, 0, 0);
+ regmap_write(hw->mirror, 4, 0);
+ }
+ mtk_wed_hw_add_debugfs(hw);
+
+ hw_list[index] = hw;
+
+unlock:
+ mutex_unlock(&hw_lock);
+}
+
+void mtk_wed_exit(void)
+{
+ int i;
+
+ rcu_assign_pointer(mtk_soc_wed_ops, NULL);
+
+ synchronize_rcu();
+
+ for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
+ struct mtk_wed_hw *hw;
+
+ hw = hw_list[i];
+ if (!hw)
+ continue;
+
+ hw_list[i] = NULL;
+ debugfs_remove(hw->debugfs_dir);
+ put_device(hw->dev);
+ kfree(hw);
+ }
+}
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.h b/drivers/net/ethernet/mediatek/mtk_wed.h
new file mode 100644
index 000000000000..981ec613f4b0
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_wed.h
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
+
+#ifndef __MTK_WED_PRIV_H
+#define __MTK_WED_PRIV_H
+
+#include <linux/soc/mediatek/mtk_wed.h>
+#include <linux/debugfs.h>
+#include <linux/regmap.h>
+#include <linux/netdevice.h>
+
+struct mtk_eth;
+
+struct mtk_wed_hw {
+ struct device_node *node;
+ struct mtk_eth *eth;
+ struct regmap *regs;
+ struct regmap *hifsys;
+ struct device *dev;
+ void __iomem *wdma;
+ struct regmap *mirror;
+ struct dentry *debugfs_dir;
+ struct mtk_wed_device *wed_dev;
+ u32 debugfs_reg;
+ u32 num_flows;
+ char dirname[5];
+ int irq;
+ int index;
+};
+
+struct mtk_wdma_info {
+ u8 wdma_idx;
+ u8 queue;
+ u16 wcid;
+ u8 bss;
+};
+
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+static inline void
+wed_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
+{
+ regmap_write(dev->hw->regs, reg, val);
+}
+
+static inline u32
+wed_r32(struct mtk_wed_device *dev, u32 reg)
+{
+ unsigned int val;
+
+ regmap_read(dev->hw->regs, reg, &val);
+
+ return val;
+}
+
+static inline void
+wdma_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
+{
+ writel(val, dev->hw->wdma + reg);
+}
+
+static inline u32
+wdma_r32(struct mtk_wed_device *dev, u32 reg)
+{
+ return readl(dev->hw->wdma + reg);
+}
+
+static inline u32
+wpdma_tx_r32(struct mtk_wed_device *dev, int ring, u32 reg)
+{
+ if (!dev->tx_ring[ring].wpdma)
+ return 0;
+
+ return readl(dev->tx_ring[ring].wpdma + reg);
+}
+
+static inline void
+wpdma_tx_w32(struct mtk_wed_device *dev, int ring, u32 reg, u32 val)
+{
+ if (!dev->tx_ring[ring].wpdma)
+ return;
+
+ writel(val, dev->tx_ring[ring].wpdma + reg);
+}
+
+static inline u32
+wpdma_txfree_r32(struct mtk_wed_device *dev, u32 reg)
+{
+ if (!dev->txfree_ring.wpdma)
+ return 0;
+
+ return readl(dev->txfree_ring.wpdma + reg);
+}
+
+static inline void
+wpdma_txfree_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
+{
+ if (!dev->txfree_ring.wpdma)
+ return;
+
+ writel(val, dev->txfree_ring.wpdma + reg);
+}
+
+void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
+ void __iomem *wdma, int index);
+void mtk_wed_exit(void);
+int mtk_wed_flow_add(int index);
+void mtk_wed_flow_remove(int index);
+#else
+static inline void
+mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
+ void __iomem *wdma, int index)
+{
+}
+static inline void
+mtk_wed_exit(void)
+{
+}
+static inline int mtk_wed_flow_add(int index)
+{
+ return -EINVAL;
+}
+static inline void mtk_wed_flow_remove(int index)
+{
+}
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw);
+#else
+static inline void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
+{
+}
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
new file mode 100644
index 000000000000..a81d3fd1a439
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
+
+#include <linux/seq_file.h>
+#include "mtk_wed.h"
+#include "mtk_wed_regs.h"
+
+struct reg_dump {
+ const char *name;
+ u16 offset;
+ u8 type;
+ u8 base;
+};
+
+enum {
+ DUMP_TYPE_STRING,
+ DUMP_TYPE_WED,
+ DUMP_TYPE_WDMA,
+ DUMP_TYPE_WPDMA_TX,
+ DUMP_TYPE_WPDMA_TXFREE,
+};
+
+#define DUMP_STR(_str) { _str, 0, DUMP_TYPE_STRING }
+#define DUMP_REG(_reg, ...) { #_reg, MTK_##_reg, __VA_ARGS__ }
+#define DUMP_RING(_prefix, _base, ...) \
+ { _prefix " BASE", _base, __VA_ARGS__ }, \
+ { _prefix " CNT", _base + 0x4, __VA_ARGS__ }, \
+ { _prefix " CIDX", _base + 0x8, __VA_ARGS__ }, \
+ { _prefix " DIDX", _base + 0xc, __VA_ARGS__ }
+
+#define DUMP_WED(_reg) DUMP_REG(_reg, DUMP_TYPE_WED)
+#define DUMP_WED_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WED)
+
+#define DUMP_WDMA(_reg) DUMP_REG(_reg, DUMP_TYPE_WDMA)
+#define DUMP_WDMA_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WDMA)
+
+#define DUMP_WPDMA_TX_RING(_n) DUMP_RING("WPDMA_TX" #_n, 0, DUMP_TYPE_WPDMA_TX, _n)
+#define DUMP_WPDMA_TXFREE_RING DUMP_RING("WPDMA_RX1", 0, DUMP_TYPE_WPDMA_TXFREE)
+
+static void
+print_reg_val(struct seq_file *s, const char *name, u32 val)
+{
+ seq_printf(s, "%-32s %08x\n", name, val);
+}
+
+static void
+dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev,
+ const struct reg_dump *regs, int n_regs)
+{
+ const struct reg_dump *cur;
+ u32 val;
+
+ for (cur = regs; cur < &regs[n_regs]; cur++) {
+ switch (cur->type) {
+ case DUMP_TYPE_STRING:
+ seq_printf(s, "%s======== %s:\n",
+ cur > regs ? "\n" : "",
+ cur->name);
+ continue;
+ case DUMP_TYPE_WED:
+ val = wed_r32(dev, cur->offset);
+ break;
+ case DUMP_TYPE_WDMA:
+ val = wdma_r32(dev, cur->offset);
+ break;
+ case DUMP_TYPE_WPDMA_TX:
+ val = wpdma_tx_r32(dev, cur->base, cur->offset);
+ break;
+ case DUMP_TYPE_WPDMA_TXFREE:
+ val = wpdma_txfree_r32(dev, cur->offset);
+ break;
+ }
+ print_reg_val(s, cur->name, val);
+ }
+}
+
+
+static int
+wed_txinfo_show(struct seq_file *s, void *data)
+{
+ static const struct reg_dump regs[] = {
+ DUMP_STR("WED TX"),
+ DUMP_WED(WED_TX_MIB(0)),
+ DUMP_WED_RING(WED_RING_TX(0)),
+
+ DUMP_WED(WED_TX_MIB(1)),
+ DUMP_WED_RING(WED_RING_TX(1)),
+
+ DUMP_STR("WPDMA TX"),
+ DUMP_WED(WED_WPDMA_TX_MIB(0)),
+ DUMP_WED_RING(WED_WPDMA_RING_TX(0)),
+ DUMP_WED(WED_WPDMA_TX_COHERENT_MIB(0)),
+
+ DUMP_WED(WED_WPDMA_TX_MIB(1)),
+ DUMP_WED_RING(WED_WPDMA_RING_TX(1)),
+ DUMP_WED(WED_WPDMA_TX_COHERENT_MIB(1)),
+
+ DUMP_STR("WPDMA TX"),
+ DUMP_WPDMA_TX_RING(0),
+ DUMP_WPDMA_TX_RING(1),
+
+ DUMP_STR("WED WDMA RX"),
+ DUMP_WED(WED_WDMA_RX_MIB(0)),
+ DUMP_WED_RING(WED_WDMA_RING_RX(0)),
+ DUMP_WED(WED_WDMA_RX_THRES(0)),
+ DUMP_WED(WED_WDMA_RX_RECYCLE_MIB(0)),
+ DUMP_WED(WED_WDMA_RX_PROCESSED_MIB(0)),
+
+ DUMP_WED(WED_WDMA_RX_MIB(1)),
+ DUMP_WED_RING(WED_WDMA_RING_RX(1)),
+ DUMP_WED(WED_WDMA_RX_THRES(1)),
+ DUMP_WED(WED_WDMA_RX_RECYCLE_MIB(1)),
+ DUMP_WED(WED_WDMA_RX_PROCESSED_MIB(1)),
+
+ DUMP_STR("WDMA RX"),
+ DUMP_WDMA(WDMA_GLO_CFG),
+ DUMP_WDMA_RING(WDMA_RING_RX(0)),
+ DUMP_WDMA_RING(WDMA_RING_RX(1)),
+ };
+ struct mtk_wed_hw *hw = s->private;
+ struct mtk_wed_device *dev = hw->wed_dev;
+
+ if (!dev)
+ return 0;
+
+ dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(wed_txinfo);
+
+
+static int
+mtk_wed_reg_set(void *data, u64 val)
+{
+ struct mtk_wed_hw *hw = data;
+
+ regmap_write(hw->regs, hw->debugfs_reg, val);
+
+ return 0;
+}
+
+static int
+mtk_wed_reg_get(void *data, u64 *val)
+{
+ struct mtk_wed_hw *hw = data;
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(hw->regs, hw->debugfs_reg, &regval);
+ if (ret)
+ return ret;
+
+ *val = regval;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mtk_wed_reg_get, mtk_wed_reg_set,
+ "0x%08llx\n");
+
+void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
+{
+ struct dentry *dir;
+
+ snprintf(hw->dirname, sizeof(hw->dirname), "wed%d", hw->index);
+ dir = debugfs_create_dir(hw->dirname, NULL);
+ if (!dir)
+ return;
+
+ hw->debugfs_dir = dir;
+ debugfs_create_u32("regidx", 0600, dir, &hw->debugfs_reg);
+ debugfs_create_file_unsafe("regval", 0600, dir, hw, &fops_regval);
+ debugfs_create_file_unsafe("txinfo", 0400, dir, hw, &wed_txinfo_fops);
+}
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_ops.c b/drivers/net/ethernet/mediatek/mtk_wed_ops.c
new file mode 100644
index 000000000000..a5d9d8a5bce2
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_wed_ops.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
+
+#include <linux/kernel.h>
+#include <linux/soc/mediatek/mtk_wed.h>
+
+const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
+EXPORT_SYMBOL_GPL(mtk_soc_wed_ops);
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_regs.h b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
new file mode 100644
index 000000000000..0a0465ea58b4
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
@@ -0,0 +1,251 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
+
+#ifndef __MTK_WED_REGS_H
+#define __MTK_WED_REGS_H
+
+#define MTK_WDMA_DESC_CTRL_LEN1 GENMASK(14, 0)
+#define MTK_WDMA_DESC_CTRL_LAST_SEG1 BIT(15)
+#define MTK_WDMA_DESC_CTRL_BURST BIT(16)
+#define MTK_WDMA_DESC_CTRL_LEN0 GENMASK(29, 16)
+#define MTK_WDMA_DESC_CTRL_LAST_SEG0 BIT(30)
+#define MTK_WDMA_DESC_CTRL_DMA_DONE BIT(31)
+
+struct mtk_wdma_desc {
+ __le32 buf0;
+ __le32 ctrl;
+ __le32 buf1;
+ __le32 info;
+} __packed __aligned(4);
+
+#define MTK_WED_RESET 0x008
+#define MTK_WED_RESET_TX_BM BIT(0)
+#define MTK_WED_RESET_TX_FREE_AGENT BIT(4)
+#define MTK_WED_RESET_WPDMA_TX_DRV BIT(8)
+#define MTK_WED_RESET_WPDMA_RX_DRV BIT(9)
+#define MTK_WED_RESET_WPDMA_INT_AGENT BIT(11)
+#define MTK_WED_RESET_WED_TX_DMA BIT(12)
+#define MTK_WED_RESET_WDMA_RX_DRV BIT(17)
+#define MTK_WED_RESET_WDMA_INT_AGENT BIT(19)
+#define MTK_WED_RESET_WED BIT(31)
+
+#define MTK_WED_CTRL 0x00c
+#define MTK_WED_CTRL_WPDMA_INT_AGENT_EN BIT(0)
+#define MTK_WED_CTRL_WPDMA_INT_AGENT_BUSY BIT(1)
+#define MTK_WED_CTRL_WDMA_INT_AGENT_EN BIT(2)
+#define MTK_WED_CTRL_WDMA_INT_AGENT_BUSY BIT(3)
+#define MTK_WED_CTRL_WED_TX_BM_EN BIT(8)
+#define MTK_WED_CTRL_WED_TX_BM_BUSY BIT(9)
+#define MTK_WED_CTRL_WED_TX_FREE_AGENT_EN BIT(10)
+#define MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY BIT(11)
+#define MTK_WED_CTRL_RESERVE_EN BIT(12)
+#define MTK_WED_CTRL_RESERVE_BUSY BIT(13)
+#define MTK_WED_CTRL_FINAL_DIDX_READ BIT(24)
+#define MTK_WED_CTRL_MIB_READ_CLEAR BIT(28)
+
+#define MTK_WED_EXT_INT_STATUS 0x020
+#define MTK_WED_EXT_INT_STATUS_TF_LEN_ERR BIT(0)
+#define MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD BIT(1)
+#define MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID BIT(4)
+#define MTK_WED_EXT_INT_STATUS_TX_FBUF_LO_TH BIT(8)
+#define MTK_WED_EXT_INT_STATUS_TX_FBUF_HI_TH BIT(9)
+#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(12)
+#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(13)
+#define MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR BIT(16)
+#define MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR BIT(17)
+#define MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT BIT(18)
+#define MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN BIT(19)
+#define MTK_WED_EXT_INT_STATUS_RX_DRV_BM_DMAD_COHERENT BIT(20)
+#define MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR BIT(21)
+#define MTK_WED_EXT_INT_STATUS_TX_DRV_W_RESP_ERR BIT(22)
+#define MTK_WED_EXT_INT_STATUS_RX_DRV_DMA_RECYCLE BIT(24)
+#define MTK_WED_EXT_INT_STATUS_ERROR_MASK (MTK_WED_EXT_INT_STATUS_TF_LEN_ERR | \
+ MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD | \
+ MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID | \
+ MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR | \
+ MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR | \
+ MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN | \
+ MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR | \
+ MTK_WED_EXT_INT_STATUS_TX_DRV_W_RESP_ERR)
+
+#define MTK_WED_EXT_INT_MASK 0x028
+
+#define MTK_WED_STATUS 0x060
+#define MTK_WED_STATUS_TX GENMASK(15, 8)
+
+#define MTK_WED_TX_BM_CTRL 0x080
+#define MTK_WED_TX_BM_CTRL_VLD_GRP_NUM GENMASK(6, 0)
+#define MTK_WED_TX_BM_CTRL_RSV_GRP_NUM GENMASK(22, 16)
+#define MTK_WED_TX_BM_CTRL_PAUSE BIT(28)
+
+#define MTK_WED_TX_BM_BASE 0x084
+
+#define MTK_WED_TX_BM_TKID 0x088
+#define MTK_WED_TX_BM_TKID_START GENMASK(15, 0)
+#define MTK_WED_TX_BM_TKID_END GENMASK(31, 16)
+
+#define MTK_WED_TX_BM_BUF_LEN 0x08c
+
+#define MTK_WED_TX_BM_INTF 0x09c
+#define MTK_WED_TX_BM_INTF_TKID GENMASK(15, 0)
+#define MTK_WED_TX_BM_INTF_TKFIFO_FDEP GENMASK(23, 16)
+#define MTK_WED_TX_BM_INTF_TKID_VALID BIT(28)
+#define MTK_WED_TX_BM_INTF_TKID_READ BIT(29)
+
+#define MTK_WED_TX_BM_DYN_THR 0x0a0
+#define MTK_WED_TX_BM_DYN_THR_LO GENMASK(6, 0)
+#define MTK_WED_TX_BM_DYN_THR_HI GENMASK(22, 16)
+
+#define MTK_WED_INT_STATUS 0x200
+#define MTK_WED_INT_MASK 0x204
+
+#define MTK_WED_GLO_CFG 0x208
+#define MTK_WED_GLO_CFG_TX_DMA_EN BIT(0)
+#define MTK_WED_GLO_CFG_TX_DMA_BUSY BIT(1)
+#define MTK_WED_GLO_CFG_RX_DMA_EN BIT(2)
+#define MTK_WED_GLO_CFG_RX_DMA_BUSY BIT(3)
+#define MTK_WED_GLO_CFG_RX_BT_SIZE GENMASK(5, 4)
+#define MTK_WED_GLO_CFG_TX_WB_DDONE BIT(6)
+#define MTK_WED_GLO_CFG_BIG_ENDIAN BIT(7)
+#define MTK_WED_GLO_CFG_DIS_BT_SIZE_ALIGN BIT(8)
+#define MTK_WED_GLO_CFG_TX_BT_SIZE_LO BIT(9)
+#define MTK_WED_GLO_CFG_MULTI_DMA_EN GENMASK(11, 10)
+#define MTK_WED_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12)
+#define MTK_WED_GLO_CFG_MI_DEPTH_RD GENMASK(21, 13)
+#define MTK_WED_GLO_CFG_TX_BT_SIZE_HI GENMASK(23, 22)
+#define MTK_WED_GLO_CFG_SW_RESET BIT(24)
+#define MTK_WED_GLO_CFG_FIRST_TOKEN_ONLY BIT(26)
+#define MTK_WED_GLO_CFG_OMIT_RX_INFO BIT(27)
+#define MTK_WED_GLO_CFG_OMIT_TX_INFO BIT(28)
+#define MTK_WED_GLO_CFG_BYTE_SWAP BIT(29)
+#define MTK_WED_GLO_CFG_RX_2B_OFFSET BIT(31)
+
+#define MTK_WED_RESET_IDX 0x20c
+#define MTK_WED_RESET_IDX_TX GENMASK(3, 0)
+#define MTK_WED_RESET_IDX_RX GENMASK(17, 16)
+
+#define MTK_WED_TX_MIB(_n) (0x2a0 + (_n) * 4)
+
+#define MTK_WED_RING_TX(_n) (0x300 + (_n) * 0x10)
+
+#define MTK_WED_RING_RX(_n) (0x400 + (_n) * 0x10)
+
+#define MTK_WED_WPDMA_INT_TRIGGER 0x504
+#define MTK_WED_WPDMA_INT_TRIGGER_RX_DONE BIT(1)
+#define MTK_WED_WPDMA_INT_TRIGGER_TX_DONE GENMASK(5, 4)
+
+#define MTK_WED_WPDMA_GLO_CFG 0x508
+#define MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN BIT(0)
+#define MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY BIT(1)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN BIT(2)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_BUSY BIT(3)
+#define MTK_WED_WPDMA_GLO_CFG_RX_BT_SIZE GENMASK(5, 4)
+#define MTK_WED_WPDMA_GLO_CFG_TX_WB_DDONE BIT(6)
+#define MTK_WED_WPDMA_GLO_CFG_BIG_ENDIAN BIT(7)
+#define MTK_WED_WPDMA_GLO_CFG_DIS_BT_SIZE_ALIGN BIT(8)
+#define MTK_WED_WPDMA_GLO_CFG_TX_BT_SIZE_LO BIT(9)
+#define MTK_WED_WPDMA_GLO_CFG_MULTI_DMA_EN GENMASK(11, 10)
+#define MTK_WED_WPDMA_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12)
+#define MTK_WED_WPDMA_GLO_CFG_MI_DEPTH_RD GENMASK(21, 13)
+#define MTK_WED_WPDMA_GLO_CFG_TX_BT_SIZE_HI GENMASK(23, 22)
+#define MTK_WED_WPDMA_GLO_CFG_SW_RESET BIT(24)
+#define MTK_WED_WPDMA_GLO_CFG_FIRST_TOKEN_ONLY BIT(26)
+#define MTK_WED_WPDMA_GLO_CFG_OMIT_RX_INFO BIT(27)
+#define MTK_WED_WPDMA_GLO_CFG_OMIT_TX_INFO BIT(28)
+#define MTK_WED_WPDMA_GLO_CFG_BYTE_SWAP BIT(29)
+#define MTK_WED_WPDMA_GLO_CFG_RX_2B_OFFSET BIT(31)
+
+#define MTK_WED_WPDMA_RESET_IDX 0x50c
+#define MTK_WED_WPDMA_RESET_IDX_TX GENMASK(3, 0)
+#define MTK_WED_WPDMA_RESET_IDX_RX GENMASK(17, 16)
+
+#define MTK_WED_WPDMA_INT_CTRL 0x520
+#define MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV BIT(21)
+
+#define MTK_WED_WPDMA_INT_MASK 0x524
+
+#define MTK_WED_PCIE_CFG_BASE 0x560
+
+#define MTK_WED_PCIE_INT_TRIGGER 0x570
+#define MTK_WED_PCIE_INT_TRIGGER_STATUS BIT(16)
+
+#define MTK_WED_WPDMA_CFG_BASE 0x580
+
+#define MTK_WED_WPDMA_TX_MIB(_n) (0x5a0 + (_n) * 4)
+#define MTK_WED_WPDMA_TX_COHERENT_MIB(_n) (0x5d0 + (_n) * 4)
+
+#define MTK_WED_WPDMA_RING_TX(_n) (0x600 + (_n) * 0x10)
+#define MTK_WED_WPDMA_RING_RX(_n) (0x700 + (_n) * 0x10)
+#define MTK_WED_WDMA_RING_RX(_n) (0x900 + (_n) * 0x10)
+#define MTK_WED_WDMA_RX_THRES(_n) (0x940 + (_n) * 0x4)
+
+#define MTK_WED_WDMA_GLO_CFG 0xa04
+#define MTK_WED_WDMA_GLO_CFG_TX_DRV_EN BIT(0)
+#define MTK_WED_WDMA_GLO_CFG_RX_DRV_EN BIT(2)
+#define MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY BIT(3)
+#define MTK_WED_WDMA_GLO_CFG_BT_SIZE GENMASK(5, 4)
+#define MTK_WED_WDMA_GLO_CFG_TX_WB_DDONE BIT(6)
+#define MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE BIT(13)
+#define MTK_WED_WDMA_GLO_CFG_WCOMPLETE_SEL BIT(16)
+#define MTK_WED_WDMA_GLO_CFG_INIT_PHASE_RXDMA_BYPASS BIT(17)
+#define MTK_WED_WDMA_GLO_CFG_INIT_PHASE_BYPASS BIT(18)
+#define MTK_WED_WDMA_GLO_CFG_FSM_RETURN_IDLE BIT(19)
+#define MTK_WED_WDMA_GLO_CFG_WAIT_COHERENT BIT(20)
+#define MTK_WED_WDMA_GLO_CFG_AXI_W_AFTER_AW BIT(21)
+#define MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY_SINGLE_W BIT(22)
+#define MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY BIT(23)
+#define MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP BIT(24)
+#define MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE BIT(25)
+#define MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE BIT(26)
+#define MTK_WED_WDMA_GLO_CFG_RXDRV_CLKGATE_BYPASS BIT(30)
+
+#define MTK_WED_WDMA_RESET_IDX 0xa08
+#define MTK_WED_WDMA_RESET_IDX_RX GENMASK(17, 16)
+#define MTK_WED_WDMA_RESET_IDX_DRV GENMASK(25, 24)
+
+#define MTK_WED_WDMA_INT_TRIGGER 0xa28
+#define MTK_WED_WDMA_INT_TRIGGER_RX_DONE GENMASK(17, 16)
+
+#define MTK_WED_WDMA_INT_CTRL 0xa2c
+#define MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL GENMASK(17, 16)
+
+#define MTK_WED_WDMA_OFFSET0 0xaa4
+#define MTK_WED_WDMA_OFFSET1 0xaa8
+
+#define MTK_WED_WDMA_RX_MIB(_n) (0xae0 + (_n) * 4)
+#define MTK_WED_WDMA_RX_RECYCLE_MIB(_n) (0xae8 + (_n) * 4)
+#define MTK_WED_WDMA_RX_PROCESSED_MIB(_n) (0xaf0 + (_n) * 4)
+
+#define MTK_WED_RING_OFS_BASE 0x00
+#define MTK_WED_RING_OFS_COUNT 0x04
+#define MTK_WED_RING_OFS_CPU_IDX 0x08
+#define MTK_WED_RING_OFS_DMA_IDX 0x0c
+
+#define MTK_WDMA_RING_RX(_n) (0x100 + (_n) * 0x10)
+
+#define MTK_WDMA_GLO_CFG 0x204
+#define MTK_WDMA_GLO_CFG_RX_INFO_PRERES GENMASK(28, 26)
+
+#define MTK_WDMA_RESET_IDX 0x208
+#define MTK_WDMA_RESET_IDX_TX GENMASK(3, 0)
+#define MTK_WDMA_RESET_IDX_RX GENMASK(17, 16)
+
+#define MTK_WDMA_INT_MASK 0x228
+#define MTK_WDMA_INT_MASK_TX_DONE GENMASK(3, 0)
+#define MTK_WDMA_INT_MASK_RX_DONE GENMASK(17, 16)
+#define MTK_WDMA_INT_MASK_TX_DELAY BIT(28)
+#define MTK_WDMA_INT_MASK_TX_COHERENT BIT(29)
+#define MTK_WDMA_INT_MASK_RX_DELAY BIT(30)
+#define MTK_WDMA_INT_MASK_RX_COHERENT BIT(31)
+
+#define MTK_WDMA_INT_GRP1 0x250
+#define MTK_WDMA_INT_GRP2 0x254
+
+#define MTK_PCIE_MIRROR_MAP(n) ((n) ? 0x4 : 0x0)
+#define MTK_PCIE_MIRROR_MAP_EN BIT(0)
+#define MTK_PCIE_MIRROR_MAP_WED_ID BIT(1)
+
+/* DMA channel mapping */
+#define HIFSYS_DMA_AG_MAP 0x008
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 4ba1a78c6515..bfc0cd5ec423 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -16,13 +16,9 @@ config MLX5_CORE
Core driver for low level functionality of the ConnectX-4 and
Connect-IB cards by Mellanox Technologies.
-config MLX5_ACCEL
- bool
-
config MLX5_FPGA
bool "Mellanox Technologies Innova support"
depends on MLX5_CORE
- select MLX5_ACCEL
help
Build support for the Innova family of network cards by Mellanox
Technologies. Innova network cards are comprised of a ConnectX chip
@@ -143,71 +139,21 @@ config MLX5_CORE_IPOIB
help
MLX5 IPoIB offloads & acceleration support.
-config MLX5_FPGA_IPSEC
- bool "Mellanox Technologies IPsec Innova support"
- depends on MLX5_CORE
- depends on MLX5_FPGA
- help
- Build IPsec support for the Innova family of network cards by Mellanox
- Technologies. Innova network cards are comprised of a ConnectX chip
- and an FPGA chip on one board. If you select this option, the
- mlx5_core driver will include the Innova FPGA core and allow building
- sandbox-specific client drivers.
-
-config MLX5_IPSEC
- bool "Mellanox Technologies IPsec Connect-X support"
- depends on MLX5_CORE_EN
- depends on XFRM_OFFLOAD
- depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
- select MLX5_ACCEL
- help
- Build IPsec support for the Connect-X family of network cards by Mellanox
- Technologies.
- Note: If you select this option, the mlx5_core driver will include
- IPsec support for the Connect-X family.
-
config MLX5_EN_IPSEC
- bool "IPSec XFRM cryptography-offload acceleration"
+ bool "Mellanox Technologies IPsec Connect-X support"
depends on MLX5_CORE_EN
depends on XFRM_OFFLOAD
depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
- depends on MLX5_FPGA_IPSEC || MLX5_IPSEC
help
Build support for IPsec cryptography-offload acceleration in the NIC.
- Note: Support for hardware with this capability needs to be selected
- for this option to become available.
-
-config MLX5_FPGA_TLS
- bool "Mellanox Technologies TLS Innova support"
- depends on TLS_DEVICE
- depends on TLS=y || MLX5_CORE=m
- depends on MLX5_CORE_EN
- depends on MLX5_FPGA
- select MLX5_EN_TLS
- help
- Build TLS support for the Innova family of network cards by Mellanox
- Technologies. Innova network cards are comprised of a ConnectX chip
- and an FPGA chip on one board. If you select this option, the
- mlx5_core driver will include the Innova FPGA core and allow building
- sandbox-specific client drivers.
-config MLX5_TLS
+config MLX5_EN_TLS
bool "Mellanox Technologies TLS Connect-X support"
depends on TLS_DEVICE
depends on TLS=y || MLX5_CORE=m
depends on MLX5_CORE_EN
- select MLX5_ACCEL
- select MLX5_EN_TLS
- help
- Build TLS support for the Connect-X family of network cards by Mellanox
- Technologies.
-
-config MLX5_EN_TLS
- bool
help
Build support for TLS cryptography-offload acceleration in the NIC.
- Note: Support for hardware with this capability needs to be selected
- for this option to become available.
config MLX5_SW_STEERING
bool "Mellanox Technologies software-managed steering"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 4bc666714a35..81620c25c77e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -28,7 +28,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en/rqt.o en/tir.o en/rss.o en/rx_res.o \
en_selftest.o en/port.o en/monitor_stats.o en/health.o \
en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/pool.o \
en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o en/ptp.o \
- en/qos.o en/trap.o en/fs_tt_redirect.o en/selq.o
+ en/qos.o en/trap.o en/fs_tt_redirect.o en/selq.o lib/crypto.o
#
# Netdev extra
@@ -88,17 +88,13 @@ mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib
#
# Accelerations & FPGA
#
-mlx5_core-$(CONFIG_MLX5_IPSEC) += accel/ipsec_offload.o
-mlx5_core-$(CONFIG_MLX5_FPGA_IPSEC) += fpga/ipsec.o
-mlx5_core-$(CONFIG_MLX5_FPGA_TLS) += fpga/tls.o
-mlx5_core-$(CONFIG_MLX5_ACCEL) += lib/crypto.o accel/tls.o accel/ipsec.o
-
mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o
mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \
- en_accel/ipsec_stats.o en_accel/ipsec_fs.o
+ en_accel/ipsec_stats.o en_accel/ipsec_fs.o \
+ en_accel/ipsec_offload.o
-mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/tls_stats.o \
+mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/ktls_stats.o \
en_accel/fs_tcp.o en_accel/ktls.o en_accel/ktls_txrx.o \
en_accel/ktls_tx.o en_accel/ktls_rx.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h
deleted file mode 100644
index 82b185121edb..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h
+++ /dev/null
@@ -1,36 +0,0 @@
-#ifndef __MLX5E_ACCEL_H__
-#define __MLX5E_ACCEL_H__
-
-#ifdef CONFIG_MLX5_ACCEL
-
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-
-static inline bool is_metadata_hdr_valid(struct sk_buff *skb)
-{
- __be16 *ethtype;
-
- if (unlikely(skb->len < ETH_HLEN + MLX5E_METADATA_ETHER_LEN))
- return false;
- ethtype = (__be16 *)(skb->data + ETH_ALEN * 2);
- if (*ethtype != cpu_to_be16(MLX5E_METADATA_ETHER_TYPE))
- return false;
- return true;
-}
-
-static inline void remove_metadata_hdr(struct sk_buff *skb)
-{
- struct ethhdr *old_eth;
- struct ethhdr *new_eth;
-
- /* Remove the metadata from the buffer */
- old_eth = (struct ethhdr *)skb->data;
- new_eth = (struct ethhdr *)(skb->data + MLX5E_METADATA_ETHER_LEN);
- memmove(new_eth, old_eth, 2 * ETH_ALEN);
- /* Ethertype is already in its new place */
- skb_pull_inline(skb, MLX5E_METADATA_ETHER_LEN);
-}
-
-#endif /* CONFIG_MLX5_ACCEL */
-
-#endif /* __MLX5E_EN_ACCEL_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c
deleted file mode 100644
index 09f5ce97af46..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/mlx5/device.h>
-
-#include "accel/ipsec.h"
-#include "mlx5_core.h"
-#include "fpga/ipsec.h"
-#include "accel/ipsec_offload.h"
-
-void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev)
-{
- const struct mlx5_accel_ipsec_ops *ipsec_ops;
- int err = 0;
-
- ipsec_ops = (mlx5_ipsec_offload_ops(mdev)) ?
- mlx5_ipsec_offload_ops(mdev) :
- mlx5_fpga_ipsec_ops(mdev);
-
- if (!ipsec_ops || !ipsec_ops->init) {
- mlx5_core_dbg(mdev, "IPsec ops is not supported\n");
- return;
- }
-
- err = ipsec_ops->init(mdev);
- if (err) {
- mlx5_core_warn_once(mdev, "Failed to start IPsec device, err = %d\n", err);
- return;
- }
-
- mdev->ipsec_ops = ipsec_ops;
-}
-
-void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev)
-{
- const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
-
- if (!ipsec_ops || !ipsec_ops->cleanup)
- return;
-
- ipsec_ops->cleanup(mdev);
-}
-
-u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev)
-{
- const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
-
- if (!ipsec_ops || !ipsec_ops->device_caps)
- return 0;
-
- return ipsec_ops->device_caps(mdev);
-}
-EXPORT_SYMBOL_GPL(mlx5_accel_ipsec_device_caps);
-
-unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev)
-{
- const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
-
- if (!ipsec_ops || !ipsec_ops->counters_count)
- return -EOPNOTSUPP;
-
- return ipsec_ops->counters_count(mdev);
-}
-
-int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
- unsigned int count)
-{
- const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
-
- if (!ipsec_ops || !ipsec_ops->counters_read)
- return -EOPNOTSUPP;
-
- return ipsec_ops->counters_read(mdev, counters, count);
-}
-
-void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
- struct mlx5_accel_esp_xfrm *xfrm,
- u32 *sa_handle)
-{
- const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
- __be32 saddr[4] = {}, daddr[4] = {};
-
- if (!ipsec_ops || !ipsec_ops->create_hw_context)
- return ERR_PTR(-EOPNOTSUPP);
-
- if (!xfrm->attrs.is_ipv6) {
- saddr[3] = xfrm->attrs.saddr.a4;
- daddr[3] = xfrm->attrs.daddr.a4;
- } else {
- memcpy(saddr, xfrm->attrs.saddr.a6, sizeof(saddr));
- memcpy(daddr, xfrm->attrs.daddr.a6, sizeof(daddr));
- }
-
- return ipsec_ops->create_hw_context(mdev, xfrm, saddr, daddr, xfrm->attrs.spi,
- xfrm->attrs.is_ipv6, sa_handle);
-}
-
-void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context)
-{
- const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
-
- if (!ipsec_ops || !ipsec_ops->free_hw_context)
- return;
-
- ipsec_ops->free_hw_context(context);
-}
-
-struct mlx5_accel_esp_xfrm *
-mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev,
- const struct mlx5_accel_esp_xfrm_attrs *attrs,
- u32 flags)
-{
- const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
- struct mlx5_accel_esp_xfrm *xfrm;
-
- if (!ipsec_ops || !ipsec_ops->esp_create_xfrm)
- return ERR_PTR(-EOPNOTSUPP);
-
- xfrm = ipsec_ops->esp_create_xfrm(mdev, attrs, flags);
- if (IS_ERR(xfrm))
- return xfrm;
-
- xfrm->mdev = mdev;
- return xfrm;
-}
-EXPORT_SYMBOL_GPL(mlx5_accel_esp_create_xfrm);
-
-void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
-{
- const struct mlx5_accel_ipsec_ops *ipsec_ops = xfrm->mdev->ipsec_ops;
-
- if (!ipsec_ops || !ipsec_ops->esp_destroy_xfrm)
- return;
-
- ipsec_ops->esp_destroy_xfrm(xfrm);
-}
-EXPORT_SYMBOL_GPL(mlx5_accel_esp_destroy_xfrm);
-
-int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
- const struct mlx5_accel_esp_xfrm_attrs *attrs)
-{
- const struct mlx5_accel_ipsec_ops *ipsec_ops = xfrm->mdev->ipsec_ops;
-
- if (!ipsec_ops || !ipsec_ops->esp_modify_xfrm)
- return -EOPNOTSUPP;
-
- return ipsec_ops->esp_modify_xfrm(xfrm, attrs);
-}
-EXPORT_SYMBOL_GPL(mlx5_accel_esp_modify_xfrm);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h
deleted file mode 100644
index fbb9c5415d53..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef __MLX5_ACCEL_IPSEC_H__
-#define __MLX5_ACCEL_IPSEC_H__
-
-#include <linux/mlx5/driver.h>
-#include <linux/mlx5/accel.h>
-
-#ifdef CONFIG_MLX5_ACCEL
-
-#define MLX5_IPSEC_DEV(mdev) (mlx5_accel_ipsec_device_caps(mdev) & \
- MLX5_ACCEL_IPSEC_CAP_DEVICE)
-
-unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev);
-int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
- unsigned int count);
-
-void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
- struct mlx5_accel_esp_xfrm *xfrm,
- u32 *sa_handle);
-void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context);
-
-void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev);
-void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev);
-
-struct mlx5_accel_ipsec_ops {
- u32 (*device_caps)(struct mlx5_core_dev *mdev);
- unsigned int (*counters_count)(struct mlx5_core_dev *mdev);
- int (*counters_read)(struct mlx5_core_dev *mdev, u64 *counters, unsigned int count);
- void* (*create_hw_context)(struct mlx5_core_dev *mdev,
- struct mlx5_accel_esp_xfrm *xfrm,
- const __be32 saddr[4], const __be32 daddr[4],
- const __be32 spi, bool is_ipv6, u32 *sa_handle);
- void (*free_hw_context)(void *context);
- int (*init)(struct mlx5_core_dev *mdev);
- void (*cleanup)(struct mlx5_core_dev *mdev);
- struct mlx5_accel_esp_xfrm* (*esp_create_xfrm)(struct mlx5_core_dev *mdev,
- const struct mlx5_accel_esp_xfrm_attrs *attrs,
- u32 flags);
- int (*esp_modify_xfrm)(struct mlx5_accel_esp_xfrm *xfrm,
- const struct mlx5_accel_esp_xfrm_attrs *attrs);
- void (*esp_destroy_xfrm)(struct mlx5_accel_esp_xfrm *xfrm);
-};
-
-#else
-
-#define MLX5_IPSEC_DEV(mdev) false
-
-static inline void *
-mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
- struct mlx5_accel_esp_xfrm *xfrm,
- u32 *sa_handle)
-{
- return NULL;
-}
-
-static inline void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context) {}
-
-static inline void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev) {}
-
-static inline void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev) {}
-
-#endif /* CONFIG_MLX5_ACCEL */
-
-#endif /* __MLX5_ACCEL_IPSEC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.h
deleted file mode 100644
index 970c66d19c1d..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
-/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
-
-#ifndef __MLX5_IPSEC_OFFLOAD_H__
-#define __MLX5_IPSEC_OFFLOAD_H__
-
-#include <linux/mlx5/driver.h>
-#include "accel/ipsec.h"
-
-#ifdef CONFIG_MLX5_IPSEC
-
-const struct mlx5_accel_ipsec_ops *mlx5_ipsec_offload_ops(struct mlx5_core_dev *mdev);
-static inline bool mlx5_is_ipsec_device(struct mlx5_core_dev *mdev)
-{
- if (!MLX5_CAP_GEN(mdev, ipsec_offload))
- return false;
-
- if (!MLX5_CAP_GEN(mdev, log_max_dek))
- return false;
-
- if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
- MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC))
- return false;
-
- return MLX5_CAP_IPSEC(mdev, ipsec_crypto_offload) &&
- MLX5_CAP_ETH(mdev, insert_trailer);
-}
-
-#else
-static inline const struct mlx5_accel_ipsec_ops *
-mlx5_ipsec_offload_ops(struct mlx5_core_dev *mdev) { return NULL; }
-static inline bool mlx5_is_ipsec_device(struct mlx5_core_dev *mdev)
-{
- return false;
-}
-
-#endif /* CONFIG_MLX5_IPSEC */
-#endif /* __MLX5_IPSEC_OFFLOAD_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c
deleted file mode 100644
index 6c2b86a26863..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/mlx5/device.h>
-
-#include "accel/tls.h"
-#include "mlx5_core.h"
-#include "lib/mlx5.h"
-
-#ifdef CONFIG_MLX5_FPGA_TLS
-#include "fpga/tls.h"
-
-int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn, u32 *p_swid,
- bool direction_sx)
-{
- return mlx5_fpga_tls_add_flow(mdev, flow, crypto_info,
- start_offload_tcp_sn, p_swid,
- direction_sx);
-}
-
-void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
- bool direction_sx)
-{
- mlx5_fpga_tls_del_flow(mdev, swid, GFP_KERNEL, direction_sx);
-}
-
-int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle,
- u32 seq, __be64 rcd_sn)
-{
- return mlx5_fpga_tls_resync_rx(mdev, handle, seq, rcd_sn);
-}
-
-bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev)
-{
- return mlx5_fpga_is_tls_device(mdev) ||
- mlx5_accel_is_ktls_device(mdev);
-}
-
-u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev)
-{
- return mlx5_fpga_tls_device_caps(mdev);
-}
-
-int mlx5_accel_tls_init(struct mlx5_core_dev *mdev)
-{
- return mlx5_fpga_tls_init(mdev);
-}
-
-void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev)
-{
- mlx5_fpga_tls_cleanup(mdev);
-}
-#endif
-
-#ifdef CONFIG_MLX5_TLS
-int mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
- struct tls_crypto_info *crypto_info,
- u32 *p_key_id)
-{
- u32 sz_bytes;
- void *key;
-
- switch (crypto_info->cipher_type) {
- case TLS_CIPHER_AES_GCM_128: {
- struct tls12_crypto_info_aes_gcm_128 *info =
- (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
-
- key = info->key;
- sz_bytes = sizeof(info->key);
- break;
- }
- case TLS_CIPHER_AES_GCM_256: {
- struct tls12_crypto_info_aes_gcm_256 *info =
- (struct tls12_crypto_info_aes_gcm_256 *)crypto_info;
-
- key = info->key;
- sz_bytes = sizeof(info->key);
- break;
- }
- default:
- return -EINVAL;
- }
-
- return mlx5_create_encryption_key(mdev, key, sz_bytes,
- MLX5_ACCEL_OBJ_TLS_KEY,
- p_key_id);
-}
-
-void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id)
-{
- mlx5_destroy_encryption_key(mdev, key_id);
-}
-#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h
deleted file mode 100644
index fd874f0c380a..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef __MLX5_ACCEL_TLS_H__
-#define __MLX5_ACCEL_TLS_H__
-
-#include <linux/mlx5/driver.h>
-#include <linux/tls.h>
-
-#ifdef CONFIG_MLX5_TLS
-int mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
- struct tls_crypto_info *crypto_info,
- u32 *p_key_id);
-void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id);
-
-static inline bool mlx5_accel_is_ktls_tx(struct mlx5_core_dev *mdev)
-{
- return MLX5_CAP_GEN(mdev, tls_tx);
-}
-
-static inline bool mlx5_accel_is_ktls_rx(struct mlx5_core_dev *mdev)
-{
- return MLX5_CAP_GEN(mdev, tls_rx);
-}
-
-static inline bool mlx5_accel_is_ktls_device(struct mlx5_core_dev *mdev)
-{
- if (!mlx5_accel_is_ktls_tx(mdev) &&
- !mlx5_accel_is_ktls_rx(mdev))
- return false;
-
- if (!MLX5_CAP_GEN(mdev, log_max_dek))
- return false;
-
- return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128);
-}
-
-static inline bool mlx5e_ktls_type_check(struct mlx5_core_dev *mdev,
- struct tls_crypto_info *crypto_info)
-{
- switch (crypto_info->cipher_type) {
- case TLS_CIPHER_AES_GCM_128:
- if (crypto_info->version == TLS_1_2_VERSION)
- return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128);
- break;
- }
-
- return false;
-}
-#else
-static inline bool mlx5_accel_is_ktls_tx(struct mlx5_core_dev *mdev)
-{ return false; }
-
-static inline bool mlx5_accel_is_ktls_rx(struct mlx5_core_dev *mdev)
-{ return false; }
-
-static inline int
-mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
- struct tls_crypto_info *crypto_info,
- u32 *p_key_id) { return -ENOTSUPP; }
-static inline void
-mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id) {}
-
-static inline bool
-mlx5_accel_is_ktls_device(struct mlx5_core_dev *mdev) { return false; }
-static inline bool
-mlx5e_ktls_type_check(struct mlx5_core_dev *mdev,
- struct tls_crypto_info *crypto_info) { return false; }
-#endif
-
-enum {
- MLX5_ACCEL_TLS_TX = BIT(0),
- MLX5_ACCEL_TLS_RX = BIT(1),
- MLX5_ACCEL_TLS_V12 = BIT(2),
- MLX5_ACCEL_TLS_V13 = BIT(3),
- MLX5_ACCEL_TLS_LRO = BIT(4),
- MLX5_ACCEL_TLS_IPV6 = BIT(5),
- MLX5_ACCEL_TLS_AES_GCM128 = BIT(30),
- MLX5_ACCEL_TLS_AES_GCM256 = BIT(31),
-};
-
-struct mlx5_ifc_tls_flow_bits {
- u8 src_port[0x10];
- u8 dst_port[0x10];
- union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits src_ipv4_src_ipv6;
- union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6;
- u8 ipv6[0x1];
- u8 direction_sx[0x1];
- u8 reserved_at_2[0x1e];
-};
-
-#ifdef CONFIG_MLX5_FPGA_TLS
-int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn, u32 *p_swid,
- bool direction_sx);
-void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
- bool direction_sx);
-int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle,
- u32 seq, __be64 rcd_sn);
-bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev);
-u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev);
-int mlx5_accel_tls_init(struct mlx5_core_dev *mdev);
-void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev);
-
-#else
-
-static inline int
-mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn, u32 *p_swid,
- bool direction_sx) { return -ENOTSUPP; }
-static inline void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
- bool direction_sx) { }
-static inline int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle,
- u32 seq, __be64 rcd_sn) { return 0; }
-static inline bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev)
-{
- return mlx5_accel_is_ktls_device(mdev);
-}
-static inline u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev) { return 0; }
-static inline int mlx5_accel_tls_init(struct mlx5_core_dev *mdev) { return 0; }
-static inline void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev) { }
-#endif
-
-#endif /* __MLX5_ACCEL_TLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 8653ac0fd865..50818081bdc0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -354,7 +354,6 @@ enum {
MLX5E_RQ_STATE_AM,
MLX5E_RQ_STATE_NO_CSUM_COMPLETE,
MLX5E_RQ_STATE_CSUM_FULL, /* cqe_csum_full hw bit is set */
- MLX5E_RQ_STATE_FPGA_TLS, /* FPGA TLS enabled */
MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, /* set when mini_cqe_resp_stride_index cap is used */
MLX5E_RQ_STATE_SHAMPO, /* set when SHAMPO cap is used */
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 08fd1370a8b0..1e8700957280 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -5,8 +5,7 @@
#include "en/txrx.h"
#include "en/port.h"
#include "en_accel/en_accel.h"
-#include "accel/ipsec.h"
-#include "fpga/ipsec.h"
+#include "en_accel/ipsec_offload.h"
static bool mlx5e_rx_is_xdp(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
@@ -207,7 +206,7 @@ u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *par
bool is_mpwqe = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE);
u16 stop_room;
- stop_room = mlx5e_tls_get_stop_room(mdev, params);
+ stop_room = mlx5e_ktls_get_stop_room(mdev, params);
stop_room += mlx5e_stop_room_for_max_wqe(mdev);
if (is_mpwqe)
/* A MPWQE can take up to the maximum-sized WQE + all the normal
@@ -327,9 +326,6 @@ bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
return false;
- if (mlx5_fpga_is_ipsec_device(mdev))
- return false;
-
if (params->xdp_prog) {
/* XSK params are not considered here. If striding RQ is in use,
* and an XSK is being opened, mlx5e_rx_mpwqe_is_linear_skb will
@@ -423,9 +419,6 @@ static int mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
int max_mtu;
int i;
- if (mlx5_fpga_is_ipsec_device(mdev))
- byte_count += MLX5E_METADATA_ETHER_LEN;
-
if (mlx5e_rx_is_linear_skb(params, xsk)) {
int frag_stride;
@@ -696,8 +689,8 @@ void mlx5e_build_sq_param(struct mlx5_core_dev *mdev,
void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
bool allow_swp;
- allow_swp = mlx5_geneve_tx_allowed(mdev) ||
- !!MLX5_IPSEC_DEV(mdev);
+ allow_swp =
+ mlx5_geneve_tx_allowed(mdev) || !!mlx5_ipsec_device_caps(mdev);
mlx5e_build_sq_param_common(mdev, param);
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
MLX5_SET(sqc, sqc, allow_swp, allow_swp);
@@ -804,7 +797,7 @@ static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5_core_dev *mdev,
static u8 mlx5e_build_async_icosq_log_wq_sz(struct mlx5_core_dev *mdev)
{
- if (mlx5e_accel_is_ktls_rx(mdev))
+ if (mlx5e_is_ktls_rx(mdev))
return MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
@@ -833,7 +826,7 @@ static void mlx5e_build_async_icosq_param(struct mlx5_core_dev *mdev,
mlx5e_build_sq_param_common(mdev, param);
param->stop_room = mlx5e_stop_room_for_wqe(mdev, 1); /* for XSK NOP */
- param->is_tls = mlx5e_accel_is_ktls_rx(mdev);
+ param->is_tls = mlx5e_is_ktls_rx(mdev);
if (param->is_tls)
param->stop_room += mlx5e_stop_room_for_wqe(mdev, 1); /* for TLS RX resync NOP */
MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
index 3ec0c17db010..4902ef74fedf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
@@ -23,7 +23,7 @@ int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
c = priv->channels.c[ix];
if (unlikely(!test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)))
- return -ENXIO;
+ return -EINVAL;
if (!napi_if_scheduled_mark_missed(&c->napi)) {
/* To avoid WQE overrun, don't post a NOP if async_icosq is not
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index 62cde3e87c2e..04c0a5e1c89a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -37,8 +37,8 @@
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include "en_accel/ipsec_rxtx.h"
-#include "en_accel/tls.h"
-#include "en_accel/tls_rxtx.h"
+#include "en_accel/ktls.h"
+#include "en_accel/ktls_txrx.h"
#include "en.h"
#include "en/txrx.h"
@@ -124,8 +124,9 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
#ifdef CONFIG_MLX5_EN_TLS
/* May send SKBs and WQEs. */
- if (mlx5e_tls_skb_offloaded(skb))
- if (unlikely(!mlx5e_tls_handle_tx_skb(dev, sq, skb, &state->tls)))
+ if (mlx5e_ktls_skb_offloaded(skb))
+ if (unlikely(!mlx5e_ktls_handle_tx_skb(dev, sq, skb,
+ &state->tls)))
return false;
#endif
@@ -174,7 +175,7 @@ static inline void mlx5e_accel_tx_finish(struct mlx5e_txqsq *sq,
struct mlx5_wqe_inline_seg *inlseg)
{
#ifdef CONFIG_MLX5_EN_TLS
- mlx5e_tls_handle_tx_wqe(&wqe->ctrl, &state->tls);
+ mlx5e_ktls_handle_tx_wqe(&wqe->ctrl, &state->tls);
#endif
#ifdef CONFIG_MLX5_EN_IPSEC
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
index 4c4ee524176c..3ae6067c7e6b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
@@ -102,7 +102,7 @@ struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_priv *priv,
break;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
- if (!sk->sk_ipv6only &&
+ if (!ipv6_only_sock(sk) &&
ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) {
accel_fs_tcp_set_ipv4_flow(spec, sk);
ft = &fs_tcp->tables[ACCEL_FS_IPV4_TCP];
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 299e3f0fcb5c..c280a18ff002 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -226,8 +226,7 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
return -EINVAL;
}
if (x->props.flags & XFRM_STATE_ESN &&
- !(mlx5_accel_ipsec_device_caps(priv->mdev) &
- MLX5_ACCEL_IPSEC_CAP_ESN)) {
+ !(mlx5_ipsec_device_caps(priv->mdev) & MLX5_ACCEL_IPSEC_CAP_ESN)) {
netdev_info(netdev, "Cannot offload ESN xfrm states\n");
return -EINVAL;
}
@@ -275,8 +274,7 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
return -EINVAL;
}
if (x->props.family == AF_INET6 &&
- !(mlx5_accel_ipsec_device_caps(priv->mdev) &
- MLX5_ACCEL_IPSEC_CAP_IPV6)) {
+ !(mlx5_ipsec_device_caps(priv->mdev) & MLX5_ACCEL_IPSEC_CAP_IPV6)) {
netdev_info(netdev, "IPv6 xfrm state offload is not supported by this device\n");
return -EINVAL;
}
@@ -286,9 +284,6 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
static int mlx5e_xfrm_fs_add_rule(struct mlx5e_priv *priv,
struct mlx5e_ipsec_sa_entry *sa_entry)
{
- if (!mlx5_is_ipsec_device(priv->mdev))
- return 0;
-
return mlx5e_accel_ipsec_fs_add_rule(priv, &sa_entry->xfrm->attrs,
sa_entry->ipsec_obj_id,
&sa_entry->ipsec_rule);
@@ -297,9 +292,6 @@ static int mlx5e_xfrm_fs_add_rule(struct mlx5e_priv *priv,
static void mlx5e_xfrm_fs_del_rule(struct mlx5e_priv *priv,
struct mlx5e_ipsec_sa_entry *sa_entry)
{
- if (!mlx5_is_ipsec_device(priv->mdev))
- return;
-
mlx5e_accel_ipsec_fs_del_rule(priv, &sa_entry->xfrm->attrs,
&sa_entry->ipsec_rule);
}
@@ -333,9 +325,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
/* create xfrm */
mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &attrs);
- sa_entry->xfrm =
- mlx5_accel_esp_create_xfrm(priv->mdev, &attrs,
- MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA);
+ sa_entry->xfrm = mlx5_accel_esp_create_xfrm(priv->mdev, &attrs);
if (IS_ERR(sa_entry->xfrm)) {
err = PTR_ERR(sa_entry->xfrm);
goto err_sa_entry;
@@ -414,7 +404,7 @@ int mlx5e_ipsec_init(struct mlx5e_priv *priv)
{
struct mlx5e_ipsec *ipsec = NULL;
- if (!MLX5_IPSEC_DEV(priv->mdev)) {
+ if (!mlx5_ipsec_device_caps(priv->mdev)) {
netdev_dbg(priv->netdev, "Not an IPSec offload device\n");
return 0;
}
@@ -425,10 +415,7 @@ int mlx5e_ipsec_init(struct mlx5e_priv *priv)
hash_init(ipsec->sadb_rx);
spin_lock_init(&ipsec->sadb_rx_lock);
- ida_init(&ipsec->halloc);
ipsec->en_priv = priv;
- ipsec->no_trailer = !!(mlx5_accel_ipsec_device_caps(priv->mdev) &
- MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER);
ipsec->wq = alloc_ordered_workqueue("mlx5e_ipsec: %s", 0,
priv->netdev->name);
if (!ipsec->wq) {
@@ -452,7 +439,6 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
mlx5e_accel_ipsec_fs_cleanup(priv);
destroy_workqueue(ipsec->wq);
- ida_destroy(&ipsec->halloc);
kfree(ipsec);
priv->ipsec = NULL;
}
@@ -531,7 +517,7 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
struct mlx5_core_dev *mdev = priv->mdev;
struct net_device *netdev = priv->netdev;
- if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_ESP) ||
+ if (!(mlx5_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_ESP) ||
!MLX5_CAP_ETH(mdev, swp)) {
mlx5_core_dbg(mdev, "mlx5e: ESP and SWP offload not supported\n");
return;
@@ -550,15 +536,13 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
netdev->features |= NETIF_F_HW_ESP_TX_CSUM;
netdev->hw_enc_features |= NETIF_F_HW_ESP_TX_CSUM;
- if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_LSO) ||
+ if (!(mlx5_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_LSO) ||
!MLX5_CAP_ETH(mdev, swp_lso)) {
mlx5_core_dbg(mdev, "mlx5e: ESP LSO not supported\n");
return;
}
- if (mlx5_is_ipsec_device(mdev))
- netdev->gso_partial_features |= NETIF_F_GSO_ESP;
-
+ netdev->gso_partial_features |= NETIF_F_GSO_ESP;
mlx5_core_dbg(mdev, "mlx5e: ESP GSO capability turned on\n");
netdev->features |= NETIF_F_GSO_ESP;
netdev->hw_features |= NETIF_F_GSO_ESP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index 6164c7f59efb..a0e9dade09e9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -40,7 +40,7 @@
#include <net/xfrm.h>
#include <linux/idr.h>
-#include "accel/ipsec.h"
+#include "ipsec_offload.h"
#define MLX5E_IPSEC_SADB_RX_BITS 10
#define MLX5E_IPSEC_ESN_SCOPE_MID 0x80000000L
@@ -55,24 +55,6 @@ struct mlx5e_ipsec_sw_stats {
atomic64_t ipsec_tx_drop_no_state;
atomic64_t ipsec_tx_drop_not_ip;
atomic64_t ipsec_tx_drop_trailer;
- atomic64_t ipsec_tx_drop_metadata;
-};
-
-struct mlx5e_ipsec_stats {
- u64 ipsec_dec_in_packets;
- u64 ipsec_dec_out_packets;
- u64 ipsec_dec_bypass_packets;
- u64 ipsec_enc_in_packets;
- u64 ipsec_enc_out_packets;
- u64 ipsec_enc_bypass_packets;
- u64 ipsec_dec_drop_packets;
- u64 ipsec_dec_auth_fail_packets;
- u64 ipsec_enc_drop_packets;
- u64 ipsec_add_sa_success;
- u64 ipsec_add_sa_fail;
- u64 ipsec_del_sa_success;
- u64 ipsec_del_sa_fail;
- u64 ipsec_cmd_drop;
};
struct mlx5e_accel_fs_esp;
@@ -81,11 +63,8 @@ struct mlx5e_ipsec_tx;
struct mlx5e_ipsec {
struct mlx5e_priv *en_priv;
DECLARE_HASHTABLE(sadb_rx, MLX5E_IPSEC_SADB_RX_BITS);
- bool no_trailer;
- spinlock_t sadb_rx_lock; /* Protects sadb_rx and halloc */
- struct ida halloc;
+ spinlock_t sadb_rx_lock; /* Protects sadb_rx */
struct mlx5e_ipsec_sw_stats sw_stats;
- struct mlx5e_ipsec_stats stats;
struct workqueue_struct *wq;
struct mlx5e_accel_fs_esp *rx_fs;
struct mlx5e_ipsec_tx *tx_fs;
@@ -116,7 +95,6 @@ struct mlx5e_ipsec_sa_entry {
struct mlx5e_ipsec_rule ipsec_rule;
};
-void mlx5e_ipsec_build_inverse_table(void);
int mlx5e_ipsec_init(struct mlx5e_priv *priv);
void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv);
void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv);
@@ -125,11 +103,6 @@ struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *dev,
unsigned int handle);
#else
-
-static inline void mlx5e_ipsec_build_inverse_table(void)
-{
-}
-
static inline int mlx5e_ipsec_init(struct mlx5e_priv *priv)
{
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index 17da23dff0ed..66b529e36ea1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -2,7 +2,7 @@
/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
#include <linux/netdevice.h>
-#include "accel/ipsec_offload.h"
+#include "ipsec_offload.h"
#include "ipsec_fs.h"
#include "fs_core.h"
@@ -700,9 +700,6 @@ int mlx5e_accel_ipsec_fs_init(struct mlx5e_priv *priv)
{
int err;
- if (!mlx5_is_ipsec_device(priv->mdev) || !priv->ipsec)
- return -EOPNOTSUPP;
-
err = fs_init_tx(priv);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h
index 3389b3bb3ef8..b70953979709 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h
@@ -6,10 +6,9 @@
#include "en.h"
#include "ipsec.h"
-#include "accel/ipsec_offload.h"
+#include "ipsec_offload.h"
#include "en/fs.h"
-#ifdef CONFIG_MLX5_EN_IPSEC
void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_priv *priv);
int mlx5e_accel_ipsec_fs_init(struct mlx5e_priv *priv);
int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv,
@@ -19,8 +18,4 @@ int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv,
void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv,
struct mlx5_accel_esp_xfrm_attrs *attrs,
struct mlx5e_ipsec_rule *ipsec_rule);
-#else
-static inline void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_priv *priv) {}
-static inline int mlx5e_accel_ipsec_fs_init(struct mlx5e_priv *priv) { return 0; }
-#endif
#endif /* __MLX5_IPSEC_STEERING_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
index d6667d38e1de..37c9880719cf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
@@ -1,14 +1,11 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
+/* Copyright (c) 2017, Mellanox Technologies inc. All rights reserved. */
#include "mlx5_core.h"
#include "ipsec_offload.h"
#include "lib/mlx5.h"
#include "en_accel/ipsec_fs.h"
-#define MLX5_IPSEC_DEV_BASIC_CAPS (MLX5_ACCEL_IPSEC_CAP_DEVICE | MLX5_ACCEL_IPSEC_CAP_IPV6 | \
- MLX5_ACCEL_IPSEC_CAP_LSO)
-
struct mlx5_ipsec_sa_ctx {
struct rhash_head hash;
u32 enc_key_id;
@@ -25,25 +22,37 @@ struct mlx5_ipsec_esp_xfrm {
struct mlx5_accel_esp_xfrm accel_xfrm;
};
-static u32 mlx5_ipsec_offload_device_caps(struct mlx5_core_dev *mdev)
+u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
{
- u32 caps = MLX5_IPSEC_DEV_BASIC_CAPS;
+ u32 caps;
+
+ if (!MLX5_CAP_GEN(mdev, ipsec_offload))
+ return 0;
+
+ if (!MLX5_CAP_GEN(mdev, log_max_dek))
+ return 0;
+
+ if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
+ MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC))
+ return 0;
- if (!mlx5_is_ipsec_device(mdev))
+ if (!MLX5_CAP_IPSEC(mdev, ipsec_crypto_offload) ||
+ !MLX5_CAP_ETH(mdev, insert_trailer))
return 0;
if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ipsec_encrypt) ||
!MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ipsec_decrypt))
return 0;
+ caps = MLX5_ACCEL_IPSEC_CAP_DEVICE | MLX5_ACCEL_IPSEC_CAP_IPV6 |
+ MLX5_ACCEL_IPSEC_CAP_LSO;
+
if (MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_encrypt) &&
MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_decrypt))
caps |= MLX5_ACCEL_IPSEC_CAP_ESP;
- if (MLX5_CAP_IPSEC(mdev, ipsec_esn)) {
+ if (MLX5_CAP_IPSEC(mdev, ipsec_esn))
caps |= MLX5_ACCEL_IPSEC_CAP_ESN;
- caps |= MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN;
- }
/* We can accommodate up to 2^24 different IPsec objects
* because we use up to 24 bit in flow table metadata
@@ -52,6 +61,7 @@ static u32 mlx5_ipsec_offload_device_caps(struct mlx5_core_dev *mdev)
WARN_ON_ONCE(MLX5_CAP_IPSEC(mdev, log_max_ipsec_offload) > 24);
return caps;
}
+EXPORT_SYMBOL_GPL(mlx5_ipsec_device_caps);
static int
mlx5_ipsec_offload_esp_validate_xfrm_attrs(struct mlx5_core_dev *mdev,
@@ -94,8 +104,7 @@ mlx5_ipsec_offload_esp_validate_xfrm_attrs(struct mlx5_core_dev *mdev,
static struct mlx5_accel_esp_xfrm *
mlx5_ipsec_offload_esp_create_xfrm(struct mlx5_core_dev *mdev,
- const struct mlx5_accel_esp_xfrm_attrs *attrs,
- u32 flags)
+ const struct mlx5_accel_esp_xfrm_attrs *attrs)
{
struct mlx5_ipsec_esp_xfrm *mxfrm;
int err = 0;
@@ -274,11 +283,6 @@ static void mlx5_ipsec_offload_delete_sa_ctx(void *context)
mutex_unlock(&mxfrm->lock);
}
-static int mlx5_ipsec_offload_init(struct mlx5_core_dev *mdev)
-{
- return 0;
-}
-
static int mlx5_modify_ipsec_obj(struct mlx5_core_dev *mdev,
struct mlx5_ipsec_obj_attrs *attrs,
u32 ipsec_id)
@@ -366,20 +370,51 @@ change_sw_xfrm_attrs:
return err;
}
-static const struct mlx5_accel_ipsec_ops ipsec_offload_ops = {
- .device_caps = mlx5_ipsec_offload_device_caps,
- .create_hw_context = mlx5_ipsec_offload_create_sa_ctx,
- .free_hw_context = mlx5_ipsec_offload_delete_sa_ctx,
- .init = mlx5_ipsec_offload_init,
- .esp_create_xfrm = mlx5_ipsec_offload_esp_create_xfrm,
- .esp_destroy_xfrm = mlx5_ipsec_offload_esp_destroy_xfrm,
- .esp_modify_xfrm = mlx5_ipsec_offload_esp_modify_xfrm,
-};
+void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
+ struct mlx5_accel_esp_xfrm *xfrm,
+ u32 *sa_handle)
+{
+ __be32 saddr[4] = {}, daddr[4] = {};
+
+ if (!xfrm->attrs.is_ipv6) {
+ saddr[3] = xfrm->attrs.saddr.a4;
+ daddr[3] = xfrm->attrs.daddr.a4;
+ } else {
+ memcpy(saddr, xfrm->attrs.saddr.a6, sizeof(saddr));
+ memcpy(daddr, xfrm->attrs.daddr.a6, sizeof(daddr));
+ }
+
+ return mlx5_ipsec_offload_create_sa_ctx(mdev, xfrm, saddr, daddr,
+ xfrm->attrs.spi,
+ xfrm->attrs.is_ipv6, sa_handle);
+}
+
+void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context)
+{
+ mlx5_ipsec_offload_delete_sa_ctx(context);
+}
-const struct mlx5_accel_ipsec_ops *mlx5_ipsec_offload_ops(struct mlx5_core_dev *mdev)
+struct mlx5_accel_esp_xfrm *
+mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev,
+ const struct mlx5_accel_esp_xfrm_attrs *attrs)
{
- if (!mlx5_ipsec_offload_device_caps(mdev))
- return NULL;
+ struct mlx5_accel_esp_xfrm *xfrm;
- return &ipsec_offload_ops;
+ xfrm = mlx5_ipsec_offload_esp_create_xfrm(mdev, attrs);
+ if (IS_ERR(xfrm))
+ return xfrm;
+
+ xfrm->mdev = mdev;
+ return xfrm;
+}
+
+void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
+{
+ mlx5_ipsec_offload_esp_destroy_xfrm(xfrm);
+}
+
+int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
+ const struct mlx5_accel_esp_xfrm_attrs *attrs)
+{
+ return mlx5_ipsec_offload_esp_modify_xfrm(xfrm, attrs);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.h
new file mode 100644
index 000000000000..7dac104e6ef1
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
+
+#ifndef __MLX5_IPSEC_OFFLOAD_H__
+#define __MLX5_IPSEC_OFFLOAD_H__
+
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/accel.h>
+
+void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
+ struct mlx5_accel_esp_xfrm *xfrm,
+ u32 *sa_handle);
+void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context);
+#endif /* __MLX5_IPSEC_OFFLOAD_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index b56fea142c24..9b65c765cbd9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -34,78 +34,16 @@
#include <crypto/aead.h>
#include <net/xfrm.h>
#include <net/esp.h>
-#include "accel/ipsec_offload.h"
+#include "ipsec_offload.h"
#include "en_accel/ipsec_rxtx.h"
#include "en_accel/ipsec.h"
-#include "accel/accel.h"
#include "en.h"
enum {
- MLX5E_IPSEC_RX_SYNDROME_DECRYPTED = 0x11,
- MLX5E_IPSEC_RX_SYNDROME_AUTH_FAILED = 0x12,
- MLX5E_IPSEC_RX_SYNDROME_BAD_PROTO = 0x17,
-};
-
-struct mlx5e_ipsec_rx_metadata {
- unsigned char nexthdr;
- __be32 sa_handle;
-} __packed;
-
-enum {
MLX5E_IPSEC_TX_SYNDROME_OFFLOAD = 0x8,
MLX5E_IPSEC_TX_SYNDROME_OFFLOAD_WITH_LSO_TCP = 0x9,
};
-struct mlx5e_ipsec_tx_metadata {
- __be16 mss_inv; /* 1/MSS in 16bit fixed point, only for LSO */
- __be16 seq; /* LSBs of the first TCP seq, only for LSO */
- u8 esp_next_proto; /* Next protocol of ESP */
-} __packed;
-
-struct mlx5e_ipsec_metadata {
- unsigned char syndrome;
- union {
- unsigned char raw[5];
- /* from FPGA to host, on successful decrypt */
- struct mlx5e_ipsec_rx_metadata rx;
- /* from host to FPGA */
- struct mlx5e_ipsec_tx_metadata tx;
- } __packed content;
- /* packet type ID field */
- __be16 ethertype;
-} __packed;
-
-#define MAX_LSO_MSS 2048
-
-/* Pre-calculated (Q0.16) fixed-point inverse 1/x function */
-static __be16 mlx5e_ipsec_inverse_table[MAX_LSO_MSS];
-
-static inline __be16 mlx5e_ipsec_mss_inv(struct sk_buff *skb)
-{
- return mlx5e_ipsec_inverse_table[skb_shinfo(skb)->gso_size];
-}
-
-static struct mlx5e_ipsec_metadata *mlx5e_ipsec_add_metadata(struct sk_buff *skb)
-{
- struct mlx5e_ipsec_metadata *mdata;
- struct ethhdr *eth;
-
- if (unlikely(skb_cow_head(skb, sizeof(*mdata))))
- return ERR_PTR(-ENOMEM);
-
- eth = (struct ethhdr *)skb_push(skb, sizeof(*mdata));
- skb->mac_header -= sizeof(*mdata);
- mdata = (struct mlx5e_ipsec_metadata *)(eth + 1);
-
- memmove(skb->data, skb->data + sizeof(*mdata),
- 2 * ETH_ALEN);
-
- eth->h_proto = cpu_to_be16(MLX5E_METADATA_ETHER_TYPE);
-
- memset(mdata->content.raw, 0, sizeof(mdata->content.raw));
- return mdata;
-}
-
static int mlx5e_ipsec_remove_trailer(struct sk_buff *skb, struct xfrm_state *x)
{
unsigned int alen = crypto_aead_authsize(x->data);
@@ -244,40 +182,6 @@ void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x,
skb_store_bits(skb, iv_offset, &seqno, 8);
}
-static void mlx5e_ipsec_set_metadata(struct sk_buff *skb,
- struct mlx5e_ipsec_metadata *mdata,
- struct xfrm_offload *xo)
-{
- struct ip_esp_hdr *esph;
- struct tcphdr *tcph;
-
- if (skb_is_gso(skb)) {
- /* Add LSO metadata indication */
- esph = ip_esp_hdr(skb);
- tcph = inner_tcp_hdr(skb);
- netdev_dbg(skb->dev, " Offloading GSO packet outer L3 %u; L4 %u; Inner L3 %u; L4 %u\n",
- skb->network_header,
- skb->transport_header,
- skb->inner_network_header,
- skb->inner_transport_header);
- netdev_dbg(skb->dev, " Offloading GSO packet of len %u; mss %u; TCP sp %u dp %u seq 0x%x ESP seq 0x%x\n",
- skb->len, skb_shinfo(skb)->gso_size,
- ntohs(tcph->source), ntohs(tcph->dest),
- ntohl(tcph->seq), ntohl(esph->seq_no));
- mdata->syndrome = MLX5E_IPSEC_TX_SYNDROME_OFFLOAD_WITH_LSO_TCP;
- mdata->content.tx.mss_inv = mlx5e_ipsec_mss_inv(skb);
- mdata->content.tx.seq = htons(ntohl(tcph->seq) & 0xFFFF);
- } else {
- mdata->syndrome = MLX5E_IPSEC_TX_SYNDROME_OFFLOAD;
- }
- mdata->content.tx.esp_next_proto = xo->proto;
-
- netdev_dbg(skb->dev, " TX metadata syndrome %u proto %u mss_inv %04x seq %04x\n",
- mdata->syndrome, mdata->content.tx.esp_next_proto,
- ntohs(mdata->content.tx.mss_inv),
- ntohs(mdata->content.tx.seq));
-}
-
void mlx5e_ipsec_handle_tx_wqe(struct mlx5e_tx_wqe *wqe,
struct mlx5e_accel_tx_ipsec_state *ipsec_st,
struct mlx5_wqe_inline_seg *inlseg)
@@ -298,16 +202,14 @@ static int mlx5e_ipsec_set_state(struct mlx5e_priv *priv,
ipsec_st->x = x;
ipsec_st->xo = xo;
- if (mlx5_is_ipsec_device(priv->mdev)) {
- aead = x->data;
- alen = crypto_aead_authsize(aead);
- blksize = ALIGN(crypto_aead_blocksize(aead), 4);
- clen = ALIGN(skb->len + 2, blksize);
- plen = max_t(u32, clen - skb->len, 4);
- tailen = plen + alen;
- ipsec_st->plen = plen;
- ipsec_st->tailen = tailen;
- }
+ aead = x->data;
+ alen = crypto_aead_authsize(aead);
+ blksize = ALIGN(crypto_aead_blocksize(aead), 4);
+ clen = ALIGN(skb->len + 2, blksize);
+ plen = max_t(u32, clen - skb->len, 4);
+ tailen = plen + alen;
+ ipsec_st->plen = plen;
+ ipsec_st->tailen = tailen;
return 0;
}
@@ -340,19 +242,17 @@ void mlx5e_ipsec_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb,
((struct iphdr *)skb_network_header(skb))->protocol :
((struct ipv6hdr *)skb_network_header(skb))->nexthdr;
- if (mlx5_is_ipsec_device(priv->mdev)) {
- eseg->flow_table_metadata |= cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC);
- eseg->trailer |= cpu_to_be32(MLX5_ETH_WQE_INSERT_TRAILER);
- encap = x->encap;
- if (!encap) {
- eseg->trailer |= (l3_proto == IPPROTO_ESP) ?
- cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_IP_ASSOC) :
- cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_L4_ASSOC);
- } else if (encap->encap_type == UDP_ENCAP_ESPINUDP) {
- eseg->trailer |= (l3_proto == IPPROTO_ESP) ?
- cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_IP_ASSOC) :
- cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_L4_ASSOC);
- }
+ eseg->flow_table_metadata |= cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC);
+ eseg->trailer |= cpu_to_be32(MLX5_ETH_WQE_INSERT_TRAILER);
+ encap = x->encap;
+ if (!encap) {
+ eseg->trailer |= (l3_proto == IPPROTO_ESP) ?
+ cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_IP_ASSOC) :
+ cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_L4_ASSOC);
+ } else if (encap->encap_type == UDP_ENCAP_ESPINUDP) {
+ eseg->trailer |= (l3_proto == IPPROTO_ESP) ?
+ cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_IP_ASSOC) :
+ cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_L4_ASSOC);
}
}
@@ -363,7 +263,6 @@ bool mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
struct mlx5e_priv *priv = netdev_priv(netdev);
struct xfrm_offload *xo = xfrm_offload(skb);
struct mlx5e_ipsec_sa_entry *sa_entry;
- struct mlx5e_ipsec_metadata *mdata;
struct xfrm_state *x;
struct sec_path *sp;
@@ -392,19 +291,8 @@ bool mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
goto drop;
}
- if (MLX5_CAP_GEN(priv->mdev, fpga)) {
- mdata = mlx5e_ipsec_add_metadata(skb);
- if (IS_ERR(mdata)) {
- atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_metadata);
- goto drop;
- }
- }
-
sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
sa_entry->set_iv_op(skb, x, xo);
- if (MLX5_CAP_GEN(priv->mdev, fpga))
- mlx5e_ipsec_set_metadata(skb, mdata, xo);
-
mlx5e_ipsec_set_state(priv, skb, x, xo, ipsec_st);
return true;
@@ -414,79 +302,6 @@ drop:
return false;
}
-static inline struct xfrm_state *
-mlx5e_ipsec_build_sp(struct net_device *netdev, struct sk_buff *skb,
- struct mlx5e_ipsec_metadata *mdata)
-{
- struct mlx5e_priv *priv = netdev_priv(netdev);
- struct xfrm_offload *xo;
- struct xfrm_state *xs;
- struct sec_path *sp;
- u32 sa_handle;
-
- sp = secpath_set(skb);
- if (unlikely(!sp)) {
- atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sp_alloc);
- return NULL;
- }
-
- sa_handle = be32_to_cpu(mdata->content.rx.sa_handle);
- xs = mlx5e_ipsec_sadb_rx_lookup(priv->ipsec, sa_handle);
- if (unlikely(!xs)) {
- atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sadb_miss);
- return NULL;
- }
-
- sp = skb_sec_path(skb);
- sp->xvec[sp->len++] = xs;
- sp->olen++;
-
- xo = xfrm_offload(skb);
- xo->flags = CRYPTO_DONE;
- switch (mdata->syndrome) {
- case MLX5E_IPSEC_RX_SYNDROME_DECRYPTED:
- xo->status = CRYPTO_SUCCESS;
- if (likely(priv->ipsec->no_trailer)) {
- xo->flags |= XFRM_ESP_NO_TRAILER;
- xo->proto = mdata->content.rx.nexthdr;
- }
- break;
- case MLX5E_IPSEC_RX_SYNDROME_AUTH_FAILED:
- xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED;
- break;
- case MLX5E_IPSEC_RX_SYNDROME_BAD_PROTO:
- xo->status = CRYPTO_INVALID_PROTOCOL;
- break;
- default:
- atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_syndrome);
- return NULL;
- }
- return xs;
-}
-
-struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
- struct sk_buff *skb, u32 *cqe_bcnt)
-{
- struct mlx5e_ipsec_metadata *mdata;
- struct xfrm_state *xs;
-
- if (!is_metadata_hdr_valid(skb))
- return skb;
-
- /* Use the metadata */
- mdata = (struct mlx5e_ipsec_metadata *)(skb->data + ETH_HLEN);
- xs = mlx5e_ipsec_build_sp(netdev, skb, mdata);
- if (unlikely(!xs)) {
- kfree_skb(skb);
- return NULL;
- }
-
- remove_metadata_hdr(skb);
- *cqe_bcnt -= MLX5E_METADATA_ETHER_LEN;
-
- return skb;
-}
-
enum {
MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED,
MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED,
@@ -528,8 +343,6 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
switch (MLX5_IPSEC_METADATA_SYNDROM(ipsec_meta_data)) {
case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED:
xo->status = CRYPTO_SUCCESS;
- if (WARN_ON_ONCE(priv->ipsec->no_trailer))
- xo->flags |= XFRM_ESP_NO_TRAILER;
break;
case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED:
xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED;
@@ -541,21 +354,3 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_syndrome);
}
}
-
-void mlx5e_ipsec_build_inverse_table(void)
-{
- u16 mss_inv;
- u32 mss;
-
- /* Calculate 1/x inverse table for use in GSO data path.
- * Using this table, we provide the IPSec accelerator with the value of
- * 1/gso_size so that it can infer the position of each segment inside
- * the GSO, and increment the ESP sequence number, and generate the IV.
- * The HW needs this value in Q0.16 fixed-point number format
- */
- mlx5e_ipsec_inverse_table[1] = htons(0xFFFF);
- for (mss = 2; mss < MAX_LSO_MSS; mss++) {
- mss_inv = div_u64(1ULL << 32, mss) >> 16;
- mlx5e_ipsec_inverse_table[mss] = htons(mss_inv);
- }
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index 428881e0adcb..0ae4e12ce528 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -53,9 +53,6 @@ struct mlx5e_accel_tx_ipsec_state {
#ifdef CONFIG_MLX5_EN_IPSEC
-struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
- struct sk_buff *skb, u32 *cqe_bcnt);
-
void mlx5e_ipsec_inverse_table_init(void);
void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
struct xfrm_offload *xo);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
index 5cb936541b9e..3aace1c2a763 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
@@ -35,27 +35,9 @@
#include <net/sock.h>
#include "en.h"
-#include "accel/ipsec.h"
+#include "ipsec_offload.h"
#include "fpga/sdk.h"
#include "en_accel/ipsec.h"
-#include "fpga/ipsec.h"
-
-static const struct counter_desc mlx5e_ipsec_hw_stats_desc[] = {
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_in_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_out_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_bypass_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_enc_in_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_enc_out_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_enc_bypass_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_drop_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_auth_fail_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_enc_drop_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_add_sa_success) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_add_sa_fail) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_del_sa_success) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_del_sa_fail) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_cmd_drop) },
-};
static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_sp_alloc) },
@@ -65,13 +47,11 @@ static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_no_state) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_not_ip) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_trailer) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_metadata) },
};
#define MLX5E_READ_CTR_ATOMIC64(ptr, dsc, i) \
atomic64_read((atomic64_t *)((char *)(ptr) + (dsc)[i].offset))
-#define NUM_IPSEC_HW_COUNTERS ARRAY_SIZE(mlx5e_ipsec_hw_stats_desc)
#define NUM_IPSEC_SW_COUNTERS ARRAY_SIZE(mlx5e_ipsec_sw_stats_desc)
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_sw)
@@ -103,45 +83,4 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_sw)
return idx;
}
-static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_hw)
-{
- return (priv->ipsec && mlx5_fpga_ipsec_device_caps(priv->mdev)) ? NUM_IPSEC_HW_COUNTERS : 0;
-}
-
-static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec_hw)
-{
- int ret = 0;
-
- if (priv->ipsec)
- ret = mlx5_accel_ipsec_counters_read(priv->mdev, (u64 *)&priv->ipsec->stats,
- NUM_IPSEC_HW_COUNTERS);
- if (ret)
- memset(&priv->ipsec->stats, 0, sizeof(priv->ipsec->stats));
-}
-
-static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ipsec_hw)
-{
- unsigned int i;
-
- if (priv->ipsec && mlx5_fpga_ipsec_device_caps(priv->mdev))
- for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- mlx5e_ipsec_hw_stats_desc[i].format);
-
- return idx;
-}
-
-static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_hw)
-{
- int i;
-
- if (priv->ipsec && mlx5_fpga_ipsec_device_caps(priv->mdev))
- for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_CPU(&priv->ipsec->stats,
- mlx5e_ipsec_hw_stats_desc,
- i);
- return idx;
-}
-
MLX5E_DEFINE_STATS_GRP(ipsec_sw, 0);
-MLX5E_DEFINE_STATS_GRP(ipsec_hw, 0);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
index d93aadbf10da..814f2a56f633 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
@@ -2,11 +2,49 @@
// Copyright (c) 2019 Mellanox Technologies.
#include "en.h"
-#include "en_accel/tls.h"
+#include "lib/mlx5.h"
#include "en_accel/ktls.h"
#include "en_accel/ktls_utils.h"
#include "en_accel/fs_tcp.h"
+int mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
+ struct tls_crypto_info *crypto_info,
+ u32 *p_key_id)
+{
+ u32 sz_bytes;
+ void *key;
+
+ switch (crypto_info->cipher_type) {
+ case TLS_CIPHER_AES_GCM_128: {
+ struct tls12_crypto_info_aes_gcm_128 *info =
+ (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
+
+ key = info->key;
+ sz_bytes = sizeof(info->key);
+ break;
+ }
+ case TLS_CIPHER_AES_GCM_256: {
+ struct tls12_crypto_info_aes_gcm_256 *info =
+ (struct tls12_crypto_info_aes_gcm_256 *)crypto_info;
+
+ key = info->key;
+ sz_bytes = sizeof(info->key);
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+
+ return mlx5_create_encryption_key(mdev, key, sz_bytes,
+ MLX5_ACCEL_OBJ_TLS_KEY,
+ p_key_id);
+}
+
+void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id)
+{
+ mlx5_destroy_encryption_key(mdev, key_id);
+}
+
static int mlx5e_ktls_add(struct net_device *netdev, struct sock *sk,
enum tls_offload_ctx_dir direction,
struct tls_crypto_info *crypto_info,
@@ -59,15 +97,15 @@ void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
- if (!mlx5e_accel_is_ktls_tx(mdev) && !mlx5e_accel_is_ktls_rx(mdev))
+ if (!mlx5e_is_ktls_tx(mdev) && !mlx5e_is_ktls_rx(mdev))
return;
- if (mlx5e_accel_is_ktls_tx(mdev)) {
+ if (mlx5e_is_ktls_tx(mdev)) {
netdev->hw_features |= NETIF_F_HW_TLS_TX;
netdev->features |= NETIF_F_HW_TLS_TX;
}
- if (mlx5e_accel_is_ktls_rx(mdev))
+ if (mlx5e_is_ktls_rx(mdev))
netdev->hw_features |= NETIF_F_HW_TLS_RX;
netdev->tlsdev_ops = &mlx5e_ktls_ops;
@@ -92,7 +130,7 @@ int mlx5e_ktls_init_rx(struct mlx5e_priv *priv)
{
int err;
- if (!mlx5e_accel_is_ktls_rx(priv->mdev))
+ if (!mlx5e_is_ktls_rx(priv->mdev))
return 0;
priv->tls->rx_wq = create_singlethread_workqueue("mlx5e_tls_rx");
@@ -112,7 +150,7 @@ int mlx5e_ktls_init_rx(struct mlx5e_priv *priv)
void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv)
{
- if (!mlx5e_accel_is_ktls_rx(priv->mdev))
+ if (!mlx5e_is_ktls_rx(priv->mdev))
return;
if (priv->netdev->features & NETIF_F_HW_TLS_RX)
@@ -120,3 +158,24 @@ void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv)
destroy_workqueue(priv->tls->rx_wq);
}
+
+int mlx5e_ktls_init(struct mlx5e_priv *priv)
+{
+ struct mlx5e_tls *tls;
+
+ if (!mlx5e_is_ktls_device(priv->mdev))
+ return 0;
+
+ tls = kzalloc(sizeof(*tls), GFP_KERNEL);
+ if (!tls)
+ return -ENOMEM;
+
+ priv->tls = tls;
+ return 0;
+}
+
+void mlx5e_ktls_cleanup(struct mlx5e_priv *priv)
+{
+ kfree(priv->tls);
+ priv->tls = NULL;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
index 5833deb2354c..d016624fbc9d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
@@ -4,9 +4,42 @@
#ifndef __MLX5E_KTLS_H__
#define __MLX5E_KTLS_H__
+#include <linux/tls.h>
+#include <net/tls.h>
#include "en.h"
#ifdef CONFIG_MLX5_EN_TLS
+int mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
+ struct tls_crypto_info *crypto_info,
+ u32 *p_key_id);
+void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id);
+
+static inline bool mlx5e_is_ktls_device(struct mlx5_core_dev *mdev)
+{
+ if (is_kdump_kernel())
+ return false;
+
+ if (!MLX5_CAP_GEN(mdev, tls_tx) && !MLX5_CAP_GEN(mdev, tls_rx))
+ return false;
+
+ if (!MLX5_CAP_GEN(mdev, log_max_dek))
+ return false;
+
+ return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128);
+}
+
+static inline bool mlx5e_ktls_type_check(struct mlx5_core_dev *mdev,
+ struct tls_crypto_info *crypto_info)
+{
+ switch (crypto_info->cipher_type) {
+ case TLS_CIPHER_AES_GCM_128:
+ if (crypto_info->version == TLS_1_2_VERSION)
+ return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128);
+ break;
+ }
+
+ return false;
+}
void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv);
int mlx5e_ktls_init_rx(struct mlx5e_priv *priv);
@@ -16,26 +49,36 @@ struct mlx5e_ktls_resync_resp *
mlx5e_ktls_rx_resync_create_resp_list(void);
void mlx5e_ktls_rx_resync_destroy_resp_list(struct mlx5e_ktls_resync_resp *resp_list);
-static inline bool mlx5e_accel_is_ktls_tx(struct mlx5_core_dev *mdev)
+static inline bool mlx5e_is_ktls_tx(struct mlx5_core_dev *mdev)
{
- return !is_kdump_kernel() &&
- mlx5_accel_is_ktls_tx(mdev);
+ return !is_kdump_kernel() && MLX5_CAP_GEN(mdev, tls_tx);
}
-static inline bool mlx5e_accel_is_ktls_rx(struct mlx5_core_dev *mdev)
+static inline bool mlx5e_is_ktls_rx(struct mlx5_core_dev *mdev)
{
- return !is_kdump_kernel() &&
- mlx5_accel_is_ktls_rx(mdev);
+ return !is_kdump_kernel() && MLX5_CAP_GEN(mdev, tls_rx);
}
-static inline bool mlx5e_accel_is_ktls_device(struct mlx5_core_dev *mdev)
-{
- return !is_kdump_kernel() &&
- mlx5_accel_is_ktls_device(mdev);
-}
+struct mlx5e_tls_sw_stats {
+ atomic64_t tx_tls_ctx;
+ atomic64_t tx_tls_del;
+ atomic64_t rx_tls_ctx;
+ atomic64_t rx_tls_del;
+};
-#else
+struct mlx5e_tls {
+ struct mlx5e_tls_sw_stats sw_stats;
+ struct workqueue_struct *rx_wq;
+};
+int mlx5e_ktls_init(struct mlx5e_priv *priv);
+void mlx5e_ktls_cleanup(struct mlx5e_priv *priv);
+
+int mlx5e_ktls_get_count(struct mlx5e_priv *priv);
+int mlx5e_ktls_get_strings(struct mlx5e_priv *priv, uint8_t *data);
+int mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 *data);
+
+#else
static inline void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
{
}
@@ -64,10 +107,23 @@ mlx5e_ktls_rx_resync_create_resp_list(void)
static inline void
mlx5e_ktls_rx_resync_destroy_resp_list(struct mlx5e_ktls_resync_resp *resp_list) {}
-static inline bool mlx5e_accel_is_ktls_tx(struct mlx5_core_dev *mdev) { return false; }
-static inline bool mlx5e_accel_is_ktls_rx(struct mlx5_core_dev *mdev) { return false; }
-static inline bool mlx5e_accel_is_ktls_device(struct mlx5_core_dev *mdev) { return false; }
+static inline bool mlx5e_is_ktls_rx(struct mlx5_core_dev *mdev)
+{
+ return false;
+}
+
+static inline int mlx5e_ktls_init(struct mlx5e_priv *priv) { return 0; }
+static inline void mlx5e_ktls_cleanup(struct mlx5e_priv *priv) { }
+static inline int mlx5e_ktls_get_count(struct mlx5e_priv *priv) { return 0; }
+static inline int mlx5e_ktls_get_strings(struct mlx5e_priv *priv, uint8_t *data)
+{
+ return 0;
+}
+static inline int mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 *data)
+{
+ return 0;
+}
#endif
#endif /* __MLX5E_TLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
index 96064a2033f7..0bb0633b7542 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
@@ -3,7 +3,7 @@
#include <net/inet6_hashtables.h>
#include "en_accel/en_accel.h"
-#include "en_accel/tls.h"
+#include "en_accel/ktls.h"
#include "en_accel/ktls_txrx.h"
#include "en_accel/ktls_utils.h"
#include "en_accel/fs_tcp.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c
index 56e7b2aee85f..2ab46c4247ff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c
@@ -36,14 +36,7 @@
#include "en.h"
#include "fpga/sdk.h"
-#include "en_accel/tls.h"
-
-static const struct counter_desc mlx5e_tls_sw_stats_desc[] = {
- { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_metadata) },
- { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_resync_alloc) },
- { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_no_sync_data) },
- { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_bypass_required) },
-};
+#include "en_accel/ktls.h"
static const struct counter_desc mlx5e_ktls_sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_ctx) },
@@ -55,51 +48,43 @@ static const struct counter_desc mlx5e_ktls_sw_stats_desc[] = {
#define MLX5E_READ_CTR_ATOMIC64(ptr, dsc, i) \
atomic64_read((atomic64_t *)((char *)(ptr) + (dsc)[i].offset))
-static const struct counter_desc *get_tls_atomic_stats(struct mlx5e_priv *priv)
-{
- if (!priv->tls)
- return NULL;
- if (mlx5e_accel_is_ktls_device(priv->mdev))
- return mlx5e_ktls_sw_stats_desc;
- return mlx5e_tls_sw_stats_desc;
-}
-
-int mlx5e_tls_get_count(struct mlx5e_priv *priv)
+int mlx5e_ktls_get_count(struct mlx5e_priv *priv)
{
if (!priv->tls)
return 0;
- if (mlx5e_accel_is_ktls_device(priv->mdev))
- return ARRAY_SIZE(mlx5e_ktls_sw_stats_desc);
- return ARRAY_SIZE(mlx5e_tls_sw_stats_desc);
+
+ return ARRAY_SIZE(mlx5e_ktls_sw_stats_desc);
}
-int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data)
+int mlx5e_ktls_get_strings(struct mlx5e_priv *priv, uint8_t *data)
{
- const struct counter_desc *stats_desc;
unsigned int i, n, idx = 0;
- stats_desc = get_tls_atomic_stats(priv);
- n = mlx5e_tls_get_count(priv);
+ if (!priv->tls)
+ return 0;
+
+ n = mlx5e_ktls_get_count(priv);
for (i = 0; i < n; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
- stats_desc[i].format);
+ mlx5e_ktls_sw_stats_desc[i].format);
return n;
}
-int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data)
+int mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 *data)
{
- const struct counter_desc *stats_desc;
unsigned int i, n, idx = 0;
- stats_desc = get_tls_atomic_stats(priv);
- n = mlx5e_tls_get_count(priv);
+ if (!priv->tls)
+ return 0;
+
+ n = mlx5e_ktls_get_count(priv);
for (i = 0; i < n; i++)
- data[idx++] =
- MLX5E_READ_CTR_ATOMIC64(&priv->tls->sw_stats,
- stats_desc, i);
+ data[idx++] = MLX5E_READ_CTR_ATOMIC64(&priv->tls->sw_stats,
+ mlx5e_ktls_sw_stats_desc,
+ i);
return n;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index aaf11c66bf4c..4b6f0d1ea59a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2019 Mellanox Technologies.
-#include "en_accel/tls.h"
+#include "en_accel/ktls.h"
#include "en_accel/ktls_txrx.h"
#include "en_accel/ktls_utils.h"
@@ -27,7 +27,7 @@ u16 mlx5e_ktls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *pa
{
u16 num_dumps, stop_room = 0;
- if (!mlx5e_accel_is_ktls_tx(mdev))
+ if (!mlx5e_is_ktls_tx(mdev))
return 0;
num_dumps = mlx5e_ktls_dumps_num_wqes(params, MAX_SKB_FRAGS, TLS_MAX_PAYLOAD_SIZE);
@@ -448,14 +448,26 @@ err_out:
return MLX5E_KTLS_SYNC_FAIL;
}
-bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *sq,
- struct sk_buff *skb, int datalen,
+bool mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
+ struct sk_buff *skb,
struct mlx5e_accel_tx_tls_state *state)
{
struct mlx5e_ktls_offload_context_tx *priv_tx;
struct mlx5e_sq_stats *stats = sq->stats;
+ struct tls_context *tls_ctx;
+ int datalen;
u32 seq;
+ datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ if (!datalen)
+ return true;
+
+ mlx5e_tx_mpwqe_ensure_complete(sq);
+
+ tls_ctx = tls_get_ctx(skb->sk);
+ if (WARN_ON_ONCE(tls_ctx->netdev != netdev))
+ goto err_out;
+
priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx))) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
index 08c9d5134479..2dd78dd4ad65 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
@@ -16,8 +16,8 @@ struct mlx5e_accel_tx_tls_state {
u16 mlx5e_ktls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
-bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *sq,
- struct sk_buff *skb, int datalen,
+bool mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
+ struct sk_buff *skb,
struct mlx5e_accel_tx_tls_state *state);
void mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb,
struct mlx5_cqe64 *cqe, u32 *cqe_bcnt);
@@ -48,6 +48,18 @@ mlx5e_ktls_rx_pending_resync_list(struct mlx5e_channel *c, int budget)
{
return budget && test_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &c->async_icosq.state);
}
+
+static inline bool mlx5e_ktls_skb_offloaded(struct sk_buff *skb)
+{
+ return skb->sk && tls_is_sk_tx_device_offloaded(skb->sk);
+}
+
+static inline void
+mlx5e_ktls_handle_tx_wqe(struct mlx5_wqe_ctrl_seg *cseg,
+ struct mlx5e_accel_tx_tls_state *state)
+{
+ cseg->tis_tir_num = cpu_to_be32(state->tls_tisn << 8);
+}
#else
static inline bool
mlx5e_ktls_tx_try_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
@@ -69,6 +81,18 @@ mlx5e_ktls_rx_pending_resync_list(struct mlx5e_channel *c, int budget)
return false;
}
+static inline u16 mlx5e_ktls_get_stop_room(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
+{
+ return 0;
+}
+
+static inline void mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq,
+ struct sk_buff *skb,
+ struct mlx5_cqe64 *cqe,
+ u32 *cqe_bcnt)
+{
+}
#endif /* CONFIG_MLX5_EN_TLS */
#endif /* __MLX5E_TLS_TXRX_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h
index e5c180f2403b..0dc715c4c10d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h
@@ -6,7 +6,6 @@
#include <net/tls.h>
#include "en.h"
-#include "accel/tls.h"
enum {
MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD = 0,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
deleted file mode 100644
index b8fc863aa68d..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
+++ /dev/null
@@ -1,247 +0,0 @@
-/*
- * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/netdevice.h>
-#include <net/ipv6.h>
-#include "en_accel/tls.h"
-#include "accel/tls.h"
-
-static void mlx5e_tls_set_ipv4_flow(void *flow, struct sock *sk)
-{
- struct inet_sock *inet = inet_sk(sk);
-
- MLX5_SET(tls_flow, flow, ipv6, 0);
- memcpy(MLX5_ADDR_OF(tls_flow, flow, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
- &inet->inet_daddr, MLX5_FLD_SZ_BYTES(ipv4_layout, ipv4));
- memcpy(MLX5_ADDR_OF(tls_flow, flow, src_ipv4_src_ipv6.ipv4_layout.ipv4),
- &inet->inet_rcv_saddr, MLX5_FLD_SZ_BYTES(ipv4_layout, ipv4));
-}
-
-#if IS_ENABLED(CONFIG_IPV6)
-static void mlx5e_tls_set_ipv6_flow(void *flow, struct sock *sk)
-{
- struct ipv6_pinfo *np = inet6_sk(sk);
-
- MLX5_SET(tls_flow, flow, ipv6, 1);
- memcpy(MLX5_ADDR_OF(tls_flow, flow, dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
- &sk->sk_v6_daddr, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
- memcpy(MLX5_ADDR_OF(tls_flow, flow, src_ipv4_src_ipv6.ipv6_layout.ipv6),
- &np->saddr, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
-}
-#endif
-
-static void mlx5e_tls_set_flow_tcp_ports(void *flow, struct sock *sk)
-{
- struct inet_sock *inet = inet_sk(sk);
-
- memcpy(MLX5_ADDR_OF(tls_flow, flow, src_port), &inet->inet_sport,
- MLX5_FLD_SZ_BYTES(tls_flow, src_port));
- memcpy(MLX5_ADDR_OF(tls_flow, flow, dst_port), &inet->inet_dport,
- MLX5_FLD_SZ_BYTES(tls_flow, dst_port));
-}
-
-static int mlx5e_tls_set_flow(void *flow, struct sock *sk, u32 caps)
-{
- switch (sk->sk_family) {
- case AF_INET:
- mlx5e_tls_set_ipv4_flow(flow, sk);
- break;
-#if IS_ENABLED(CONFIG_IPV6)
- case AF_INET6:
- if (!sk->sk_ipv6only &&
- ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) {
- mlx5e_tls_set_ipv4_flow(flow, sk);
- break;
- }
- if (!(caps & MLX5_ACCEL_TLS_IPV6))
- goto error_out;
-
- mlx5e_tls_set_ipv6_flow(flow, sk);
- break;
-#endif
- default:
- goto error_out;
- }
-
- mlx5e_tls_set_flow_tcp_ports(flow, sk);
- return 0;
-error_out:
- return -EINVAL;
-}
-
-static int mlx5e_tls_add(struct net_device *netdev, struct sock *sk,
- enum tls_offload_ctx_dir direction,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn)
-{
- struct mlx5e_priv *priv = netdev_priv(netdev);
- struct tls_context *tls_ctx = tls_get_ctx(sk);
- struct mlx5_core_dev *mdev = priv->mdev;
- u32 caps = mlx5_accel_tls_device_caps(mdev);
- int ret = -ENOMEM;
- void *flow;
- u32 swid;
-
- flow = kzalloc(MLX5_ST_SZ_BYTES(tls_flow), GFP_KERNEL);
- if (!flow)
- return ret;
-
- ret = mlx5e_tls_set_flow(flow, sk, caps);
- if (ret)
- goto free_flow;
-
- ret = mlx5_accel_tls_add_flow(mdev, flow, crypto_info,
- start_offload_tcp_sn, &swid,
- direction == TLS_OFFLOAD_CTX_DIR_TX);
- if (ret < 0)
- goto free_flow;
-
- if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
- struct mlx5e_tls_offload_context_tx *tx_ctx =
- mlx5e_get_tls_tx_context(tls_ctx);
-
- tx_ctx->swid = htonl(swid);
- tx_ctx->expected_seq = start_offload_tcp_sn;
- } else {
- struct mlx5e_tls_offload_context_rx *rx_ctx =
- mlx5e_get_tls_rx_context(tls_ctx);
-
- rx_ctx->handle = htonl(swid);
- }
-
- return 0;
-free_flow:
- kfree(flow);
- return ret;
-}
-
-static void mlx5e_tls_del(struct net_device *netdev,
- struct tls_context *tls_ctx,
- enum tls_offload_ctx_dir direction)
-{
- struct mlx5e_priv *priv = netdev_priv(netdev);
- unsigned int handle;
-
- handle = ntohl((direction == TLS_OFFLOAD_CTX_DIR_TX) ?
- mlx5e_get_tls_tx_context(tls_ctx)->swid :
- mlx5e_get_tls_rx_context(tls_ctx)->handle);
-
- mlx5_accel_tls_del_flow(priv->mdev, handle,
- direction == TLS_OFFLOAD_CTX_DIR_TX);
-}
-
-static int mlx5e_tls_resync(struct net_device *netdev, struct sock *sk,
- u32 seq, u8 *rcd_sn_data,
- enum tls_offload_ctx_dir direction)
-{
- struct tls_context *tls_ctx = tls_get_ctx(sk);
- struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5e_tls_offload_context_rx *rx_ctx;
- __be64 rcd_sn = *(__be64 *)rcd_sn_data;
-
- if (WARN_ON_ONCE(direction != TLS_OFFLOAD_CTX_DIR_RX))
- return -EINVAL;
- rx_ctx = mlx5e_get_tls_rx_context(tls_ctx);
-
- netdev_info(netdev, "resyncing seq %d rcd %lld\n", seq,
- be64_to_cpu(rcd_sn));
- mlx5_accel_tls_resync_rx(priv->mdev, rx_ctx->handle, seq, rcd_sn);
- atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_reply);
-
- return 0;
-}
-
-static const struct tlsdev_ops mlx5e_tls_ops = {
- .tls_dev_add = mlx5e_tls_add,
- .tls_dev_del = mlx5e_tls_del,
- .tls_dev_resync = mlx5e_tls_resync,
-};
-
-void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
-{
- struct net_device *netdev = priv->netdev;
- u32 caps;
-
- if (mlx5e_accel_is_ktls_device(priv->mdev)) {
- mlx5e_ktls_build_netdev(priv);
- return;
- }
-
- /* FPGA */
- if (!mlx5e_accel_is_tls_device(priv->mdev))
- return;
-
- caps = mlx5_accel_tls_device_caps(priv->mdev);
- if (caps & MLX5_ACCEL_TLS_TX) {
- netdev->features |= NETIF_F_HW_TLS_TX;
- netdev->hw_features |= NETIF_F_HW_TLS_TX;
- }
-
- if (caps & MLX5_ACCEL_TLS_RX) {
- netdev->features |= NETIF_F_HW_TLS_RX;
- netdev->hw_features |= NETIF_F_HW_TLS_RX;
- }
-
- if (!(caps & MLX5_ACCEL_TLS_LRO)) {
- netdev->features &= ~NETIF_F_LRO;
- netdev->hw_features &= ~NETIF_F_LRO;
- }
-
- netdev->tlsdev_ops = &mlx5e_tls_ops;
-}
-
-int mlx5e_tls_init(struct mlx5e_priv *priv)
-{
- struct mlx5e_tls *tls;
-
- if (!mlx5e_accel_is_tls_device(priv->mdev))
- return 0;
-
- tls = kzalloc(sizeof(*tls), GFP_KERNEL);
- if (!tls)
- return -ENOMEM;
-
- priv->tls = tls;
- return 0;
-}
-
-void mlx5e_tls_cleanup(struct mlx5e_priv *priv)
-{
- struct mlx5e_tls *tls = priv->tls;
-
- if (!tls)
- return;
-
- kfree(tls);
- priv->tls = NULL;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
deleted file mode 100644
index 62ecf14bf86a..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-#ifndef __MLX5E_TLS_H__
-#define __MLX5E_TLS_H__
-
-#include "accel/tls.h"
-#include "en_accel/ktls.h"
-
-#ifdef CONFIG_MLX5_EN_TLS
-#include <net/tls.h>
-#include "en.h"
-
-struct mlx5e_tls_sw_stats {
- atomic64_t tx_tls_ctx;
- atomic64_t tx_tls_del;
- atomic64_t tx_tls_drop_metadata;
- atomic64_t tx_tls_drop_resync_alloc;
- atomic64_t tx_tls_drop_no_sync_data;
- atomic64_t tx_tls_drop_bypass_required;
- atomic64_t rx_tls_ctx;
- atomic64_t rx_tls_del;
- atomic64_t rx_tls_drop_resync_request;
- atomic64_t rx_tls_resync_request;
- atomic64_t rx_tls_resync_reply;
- atomic64_t rx_tls_auth_fail;
-};
-
-struct mlx5e_tls {
- struct mlx5e_tls_sw_stats sw_stats;
- struct workqueue_struct *rx_wq;
-};
-
-struct mlx5e_tls_offload_context_tx {
- struct tls_offload_context_tx base;
- u32 expected_seq;
- __be32 swid;
-};
-
-static inline struct mlx5e_tls_offload_context_tx *
-mlx5e_get_tls_tx_context(struct tls_context *tls_ctx)
-{
- BUILD_BUG_ON(sizeof(struct mlx5e_tls_offload_context_tx) >
- TLS_OFFLOAD_CONTEXT_SIZE_TX);
- return container_of(tls_offload_ctx_tx(tls_ctx),
- struct mlx5e_tls_offload_context_tx,
- base);
-}
-
-struct mlx5e_tls_offload_context_rx {
- struct tls_offload_context_rx base;
- __be32 handle;
-};
-
-static inline struct mlx5e_tls_offload_context_rx *
-mlx5e_get_tls_rx_context(struct tls_context *tls_ctx)
-{
- BUILD_BUG_ON(sizeof(struct mlx5e_tls_offload_context_rx) >
- TLS_OFFLOAD_CONTEXT_SIZE_RX);
- return container_of(tls_offload_ctx_rx(tls_ctx),
- struct mlx5e_tls_offload_context_rx,
- base);
-}
-
-static inline bool mlx5e_is_tls_on(struct mlx5e_priv *priv)
-{
- return priv->tls;
-}
-
-void mlx5e_tls_build_netdev(struct mlx5e_priv *priv);
-int mlx5e_tls_init(struct mlx5e_priv *priv);
-void mlx5e_tls_cleanup(struct mlx5e_priv *priv);
-
-int mlx5e_tls_get_count(struct mlx5e_priv *priv);
-int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data);
-int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data);
-
-static inline bool mlx5e_accel_is_tls_device(struct mlx5_core_dev *mdev)
-{
- return !is_kdump_kernel() &&
- mlx5_accel_is_tls_device(mdev);
-}
-
-#else
-
-static inline void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
-{
- if (!is_kdump_kernel() &&
- mlx5_accel_is_ktls_device(priv->mdev))
- mlx5e_ktls_build_netdev(priv);
-}
-
-static inline bool mlx5e_is_tls_on(struct mlx5e_priv *priv) { return false; }
-static inline int mlx5e_tls_init(struct mlx5e_priv *priv) { return 0; }
-static inline void mlx5e_tls_cleanup(struct mlx5e_priv *priv) { }
-static inline int mlx5e_tls_get_count(struct mlx5e_priv *priv) { return 0; }
-static inline int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data) { return 0; }
-static inline int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data) { return 0; }
-static inline bool mlx5e_accel_is_tls_device(struct mlx5_core_dev *mdev) { return false; }
-
-#endif
-
-#endif /* __MLX5E_TLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
deleted file mode 100644
index a05580cea481..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
+++ /dev/null
@@ -1,390 +0,0 @@
-/*
- * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include "en_accel/tls.h"
-#include "en_accel/tls_rxtx.h"
-#include "accel/accel.h"
-
-#include <net/inet6_hashtables.h>
-#include <linux/ipv6.h>
-
-#define SYNDROM_DECRYPTED 0x30
-#define SYNDROM_RESYNC_REQUEST 0x31
-#define SYNDROM_AUTH_FAILED 0x32
-
-#define SYNDROME_OFFLOAD_REQUIRED 32
-#define SYNDROME_SYNC 33
-
-struct sync_info {
- u64 rcd_sn;
- s32 sync_len;
- int nr_frags;
- skb_frag_t frags[MAX_SKB_FRAGS];
-};
-
-struct recv_metadata_content {
- u8 syndrome;
- u8 reserved;
- __be32 sync_seq;
-} __packed;
-
-struct send_metadata_content {
- /* One byte of syndrome followed by 3 bytes of swid */
- __be32 syndrome_swid;
- __be16 first_seq;
-} __packed;
-
-struct mlx5e_tls_metadata {
- union {
- /* from fpga to host */
- struct recv_metadata_content recv;
- /* from host to fpga */
- struct send_metadata_content send;
- unsigned char raw[6];
- } __packed content;
- /* packet type ID field */
- __be16 ethertype;
-} __packed;
-
-static int mlx5e_tls_add_metadata(struct sk_buff *skb, __be32 swid)
-{
- struct mlx5e_tls_metadata *pet;
- struct ethhdr *eth;
-
- if (skb_cow_head(skb, sizeof(struct mlx5e_tls_metadata)))
- return -ENOMEM;
-
- eth = (struct ethhdr *)skb_push(skb, sizeof(struct mlx5e_tls_metadata));
- skb->mac_header -= sizeof(struct mlx5e_tls_metadata);
- pet = (struct mlx5e_tls_metadata *)(eth + 1);
-
- memmove(skb->data, skb->data + sizeof(struct mlx5e_tls_metadata),
- 2 * ETH_ALEN);
-
- eth->h_proto = cpu_to_be16(MLX5E_METADATA_ETHER_TYPE);
- pet->content.send.syndrome_swid =
- htonl(SYNDROME_OFFLOAD_REQUIRED << 24) | swid;
-
- return 0;
-}
-
-static int mlx5e_tls_get_sync_data(struct mlx5e_tls_offload_context_tx *context,
- u32 tcp_seq, struct sync_info *info)
-{
- int remaining, i = 0, ret = -EINVAL;
- struct tls_record_info *record;
- unsigned long flags;
- s32 sync_size;
-
- spin_lock_irqsave(&context->base.lock, flags);
- record = tls_get_record(&context->base, tcp_seq, &info->rcd_sn);
-
- if (unlikely(!record))
- goto out;
-
- sync_size = tcp_seq - tls_record_start_seq(record);
- info->sync_len = sync_size;
- if (unlikely(sync_size < 0)) {
- if (tls_record_is_start_marker(record))
- goto done;
-
- goto out;
- }
-
- remaining = sync_size;
- while (remaining > 0) {
- info->frags[i] = record->frags[i];
- __skb_frag_ref(&info->frags[i]);
- remaining -= skb_frag_size(&info->frags[i]);
-
- if (remaining < 0)
- skb_frag_size_add(&info->frags[i], remaining);
-
- i++;
- }
- info->nr_frags = i;
-done:
- ret = 0;
-out:
- spin_unlock_irqrestore(&context->base.lock, flags);
- return ret;
-}
-
-static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb,
- struct sk_buff *nskb, u32 tcp_seq,
- int headln, __be64 rcd_sn)
-{
- struct mlx5e_tls_metadata *pet;
- u8 syndrome = SYNDROME_SYNC;
- struct iphdr *iph;
- struct tcphdr *th;
- int data_len, mss;
-
- nskb->dev = skb->dev;
- skb_reset_mac_header(nskb);
- skb_set_network_header(nskb, skb_network_offset(skb));
- skb_set_transport_header(nskb, skb_transport_offset(skb));
- memcpy(nskb->data, skb->data, headln);
- memcpy(nskb->data + headln, &rcd_sn, sizeof(rcd_sn));
-
- iph = ip_hdr(nskb);
- iph->tot_len = htons(nskb->len - skb_network_offset(nskb));
- th = tcp_hdr(nskb);
- data_len = nskb->len - headln;
- tcp_seq -= data_len;
- th->seq = htonl(tcp_seq);
-
- mss = nskb->dev->mtu - (headln - skb_network_offset(nskb));
- skb_shinfo(nskb)->gso_size = 0;
- if (data_len > mss) {
- skb_shinfo(nskb)->gso_size = mss;
- skb_shinfo(nskb)->gso_segs = DIV_ROUND_UP(data_len, mss);
- }
- skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type;
-
- pet = (struct mlx5e_tls_metadata *)(nskb->data + sizeof(struct ethhdr));
- memcpy(pet, &syndrome, sizeof(syndrome));
- pet->content.send.first_seq = htons(tcp_seq);
-
- /* MLX5 devices don't care about the checksum partial start, offset
- * and pseudo header
- */
- nskb->ip_summed = CHECKSUM_PARTIAL;
-
- nskb->queue_mapping = skb->queue_mapping;
-}
-
-static bool mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
- struct mlx5e_txqsq *sq, struct sk_buff *skb,
- struct mlx5e_tls *tls)
-{
- u32 tcp_seq = ntohl(tcp_hdr(skb)->seq);
- struct sync_info info;
- struct sk_buff *nskb;
- int linear_len = 0;
- int headln;
- int i;
-
- sq->stats->tls_ooo++;
-
- if (mlx5e_tls_get_sync_data(context, tcp_seq, &info)) {
- /* We might get here if a retransmission reaches the driver
- * after the relevant record is acked.
- * It should be safe to drop the packet in this case
- */
- atomic64_inc(&tls->sw_stats.tx_tls_drop_no_sync_data);
- goto err_out;
- }
-
- if (unlikely(info.sync_len < 0)) {
- u32 payload;
-
- headln = skb_transport_offset(skb) + tcp_hdrlen(skb);
- payload = skb->len - headln;
- if (likely(payload <= -info.sync_len))
- /* SKB payload doesn't require offload
- */
- return true;
-
- atomic64_inc(&tls->sw_stats.tx_tls_drop_bypass_required);
- goto err_out;
- }
-
- if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) {
- atomic64_inc(&tls->sw_stats.tx_tls_drop_metadata);
- goto err_out;
- }
-
- headln = skb_transport_offset(skb) + tcp_hdrlen(skb);
- linear_len += headln + sizeof(info.rcd_sn);
- nskb = alloc_skb(linear_len, GFP_ATOMIC);
- if (unlikely(!nskb)) {
- atomic64_inc(&tls->sw_stats.tx_tls_drop_resync_alloc);
- goto err_out;
- }
-
- context->expected_seq = tcp_seq + skb->len - headln;
- skb_put(nskb, linear_len);
- for (i = 0; i < info.nr_frags; i++)
- skb_shinfo(nskb)->frags[i] = info.frags[i];
-
- skb_shinfo(nskb)->nr_frags = info.nr_frags;
- nskb->data_len = info.sync_len;
- nskb->len += info.sync_len;
- sq->stats->tls_resync_bytes += nskb->len;
- mlx5e_tls_complete_sync_skb(skb, nskb, tcp_seq, headln,
- cpu_to_be64(info.rcd_sn));
- mlx5e_sq_xmit_simple(sq, nskb, true);
-
- return true;
-
-err_out:
- dev_kfree_skb_any(skb);
- return false;
-}
-
-bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
- struct sk_buff *skb, struct mlx5e_accel_tx_tls_state *state)
-{
- struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5e_tls_offload_context_tx *context;
- struct tls_context *tls_ctx;
- u32 expected_seq;
- int datalen;
- u32 skb_seq;
-
- datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
- if (!datalen)
- return true;
-
- mlx5e_tx_mpwqe_ensure_complete(sq);
-
- tls_ctx = tls_get_ctx(skb->sk);
- if (WARN_ON_ONCE(tls_ctx->netdev != netdev))
- goto err_out;
-
- if (mlx5e_accel_is_ktls_tx(sq->mdev))
- return mlx5e_ktls_handle_tx_skb(tls_ctx, sq, skb, datalen, state);
-
- /* FPGA */
- skb_seq = ntohl(tcp_hdr(skb)->seq);
- context = mlx5e_get_tls_tx_context(tls_ctx);
- expected_seq = context->expected_seq;
-
- if (unlikely(expected_seq != skb_seq))
- return mlx5e_tls_handle_ooo(context, sq, skb, priv->tls);
-
- if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) {
- atomic64_inc(&priv->tls->sw_stats.tx_tls_drop_metadata);
- dev_kfree_skb_any(skb);
- return false;
- }
-
- context->expected_seq = skb_seq + datalen;
- return true;
-
-err_out:
- dev_kfree_skb_any(skb);
- return false;
-}
-
-static int tls_update_resync_sn(struct net_device *netdev,
- struct sk_buff *skb,
- struct mlx5e_tls_metadata *mdata)
-{
- struct sock *sk = NULL;
- struct iphdr *iph;
- struct tcphdr *th;
- __be32 seq;
-
- if (mdata->ethertype != htons(ETH_P_IP))
- return -EINVAL;
-
- iph = (struct iphdr *)(mdata + 1);
-
- th = ((void *)iph) + iph->ihl * 4;
-
- if (iph->version == 4) {
- sk = inet_lookup_established(dev_net(netdev), &tcp_hashinfo,
- iph->saddr, th->source, iph->daddr,
- th->dest, netdev->ifindex);
-#if IS_ENABLED(CONFIG_IPV6)
- } else {
- struct ipv6hdr *ipv6h = (struct ipv6hdr *)iph;
-
- sk = __inet6_lookup_established(dev_net(netdev), &tcp_hashinfo,
- &ipv6h->saddr, th->source,
- &ipv6h->daddr, ntohs(th->dest),
- netdev->ifindex, 0);
-#endif
- }
- if (!sk || sk->sk_state == TCP_TIME_WAIT) {
- struct mlx5e_priv *priv = netdev_priv(netdev);
-
- atomic64_inc(&priv->tls->sw_stats.rx_tls_drop_resync_request);
- goto out;
- }
-
- skb->sk = sk;
- skb->destructor = sock_edemux;
-
- memcpy(&seq, &mdata->content.recv.sync_seq, sizeof(seq));
- tls_offload_rx_resync_request(sk, seq);
-out:
- return 0;
-}
-
-/* FPGA tls rx handler */
-void mlx5e_tls_handle_rx_skb_metadata(struct mlx5e_rq *rq, struct sk_buff *skb,
- u32 *cqe_bcnt)
-{
- struct mlx5e_tls_metadata *mdata;
- struct mlx5e_priv *priv;
-
- /* Use the metadata */
- mdata = (struct mlx5e_tls_metadata *)(skb->data + ETH_HLEN);
- switch (mdata->content.recv.syndrome) {
- case SYNDROM_DECRYPTED:
- skb->decrypted = 1;
- break;
- case SYNDROM_RESYNC_REQUEST:
- tls_update_resync_sn(rq->netdev, skb, mdata);
- priv = netdev_priv(rq->netdev);
- atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_request);
- break;
- case SYNDROM_AUTH_FAILED:
- /* Authentication failure will be observed and verified by kTLS */
- priv = netdev_priv(rq->netdev);
- atomic64_inc(&priv->tls->sw_stats.rx_tls_auth_fail);
- break;
- default:
- /* Bypass the metadata header to others */
- return;
- }
-
- remove_metadata_hdr(skb);
- *cqe_bcnt -= MLX5E_METADATA_ETHER_LEN;
-}
-
-u16 mlx5e_tls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
-{
- if (!mlx5e_accel_is_tls_device(mdev))
- return 0;
-
- if (mlx5e_accel_is_ktls_device(mdev))
- return mlx5e_ktls_get_stop_room(mdev, params);
-
- /* FPGA */
- /* Resync SKB. */
- return mlx5e_stop_room_for_max_wqe(mdev);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
deleted file mode 100644
index 0ca0a023fb8d..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef __MLX5E_TLS_RXTX_H__
-#define __MLX5E_TLS_RXTX_H__
-
-#include "accel/accel.h"
-#include "en_accel/ktls_txrx.h"
-
-#ifdef CONFIG_MLX5_EN_TLS
-
-#include <linux/skbuff.h>
-#include "en.h"
-#include "en/txrx.h"
-
-u16 mlx5e_tls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
-
-bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
- struct sk_buff *skb, struct mlx5e_accel_tx_tls_state *state);
-
-static inline bool mlx5e_tls_skb_offloaded(struct sk_buff *skb)
-{
- return skb->sk && tls_is_sk_tx_device_offloaded(skb->sk);
-}
-
-static inline void
-mlx5e_tls_handle_tx_wqe(struct mlx5_wqe_ctrl_seg *cseg,
- struct mlx5e_accel_tx_tls_state *state)
-{
- cseg->tis_tir_num = cpu_to_be32(state->tls_tisn << 8);
-}
-
-void mlx5e_tls_handle_rx_skb_metadata(struct mlx5e_rq *rq, struct sk_buff *skb,
- u32 *cqe_bcnt);
-
-static inline void
-mlx5e_tls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb,
- struct mlx5_cqe64 *cqe, u32 *cqe_bcnt)
-{
- if (unlikely(get_cqe_tls_offload(cqe))) /* cqe bit indicates a TLS device */
- return mlx5e_ktls_handle_rx_skb(rq, skb, cqe, cqe_bcnt);
-
- if (unlikely(test_bit(MLX5E_RQ_STATE_FPGA_TLS, &rq->state) && is_metadata_hdr_valid(skb)))
- return mlx5e_tls_handle_rx_skb_metadata(rq, skb, cqe_bcnt);
-}
-
-#else
-
-static inline bool
-mlx5e_accel_is_tls(struct mlx5_cqe64 *cqe, struct sk_buff *skb) { return false; }
-static inline void
-mlx5e_tls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb,
- struct mlx5_cqe64 *cqe, u32 *cqe_bcnt) {}
-static inline u16 mlx5e_tls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
-{
- return 0;
-}
-
-#endif /* CONFIG_MLX5_EN_TLS */
-
-#endif /* __MLX5E_TLS_RXTX_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 2f1dedc721d1..12b72a0bcb1a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -47,9 +47,8 @@
#include "en_rep.h"
#include "en_accel/ipsec.h"
#include "en_accel/en_accel.h"
-#include "en_accel/tls.h"
-#include "accel/ipsec.h"
-#include "accel/tls.h"
+#include "en_accel/ktls.h"
+#include "en_accel/ipsec_offload.h"
#include "lib/vxlan.h"
#include "lib/clock.h"
#include "en/port.h"
@@ -68,7 +67,6 @@
#include "en/ptp.h"
#include "qos.h"
#include "en/trap.h"
-#include "fpga/ipsec.h"
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
{
@@ -1036,9 +1034,6 @@ int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param,
if (err)
goto err_destroy_rq;
- if (mlx5e_is_tls_on(rq->priv) && !mlx5e_accel_is_ktls_device(mdev))
- __set_bit(MLX5E_RQ_STATE_FPGA_TLS, &rq->state); /* must be FPGA */
-
if (MLX5_CAP_ETH(mdev, cqe_checksum_full))
__set_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state);
@@ -1334,7 +1329,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
- if (MLX5_IPSEC_DEV(c->priv->mdev))
+ if (mlx5_ipsec_device_caps(c->priv->mdev))
set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
if (param->is_mpw)
set_bit(MLX5E_SQ_STATE_MPWQE, &sq->state);
@@ -4471,12 +4466,6 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
return -EINVAL;
}
- if (mlx5_fpga_is_ipsec_device(priv->mdev)) {
- netdev_warn(netdev,
- "XDP is not available on Innova cards with IPsec support\n");
- return -EINVAL;
- }
-
new_params = priv->channels.params;
new_params.xdp_prog = prog;
@@ -4934,7 +4923,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
mlx5e_set_netdev_dev_addr(netdev);
mlx5e_ipsec_build_netdev(priv);
- mlx5e_tls_build_netdev(priv);
+ mlx5e_ktls_build_netdev(priv);
}
void mlx5e_create_q_counters(struct mlx5e_priv *priv)
@@ -4996,7 +4985,7 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
if (err)
mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err);
- err = mlx5e_tls_init(priv);
+ err = mlx5e_ktls_init(priv);
if (err)
mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
@@ -5007,7 +4996,7 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
{
mlx5e_health_destroy_reporters(priv);
- mlx5e_tls_cleanup(priv);
+ mlx5e_ktls_cleanup(priv);
mlx5e_ipsec_cleanup(priv);
mlx5e_fs_cleanup(priv);
}
@@ -5704,7 +5693,6 @@ int mlx5e_init(void)
{
int ret;
- mlx5e_ipsec_build_inverse_table();
mlx5e_build_ptys2ethtool_map();
ret = auxiliary_driver_register(&mlx5e_driver);
if (ret)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 6b7e7ea6ded2..47f7b4c034cc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -1112,7 +1112,6 @@ static mlx5e_stats_grp_t mlx5e_ul_rep_stats_grps[] = {
&MLX5E_STATS_GRP(per_port_buff_congest),
#ifdef CONFIG_MLX5_EN_IPSEC
&MLX5E_STATS_GRP(ipsec_sw),
- &MLX5E_STATS_GRP(ipsec_hw),
#endif
&MLX5E_STATS_GRP(ptp),
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 56bb58704bf9..a5f6fd16b665 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -48,10 +48,9 @@
#include "en_rep.h"
#include "en/rep/tc.h"
#include "ipoib/ipoib.h"
-#include "accel/ipsec.h"
-#include "fpga/ipsec.h"
+#include "en_accel/ipsec_offload.h"
#include "en_accel/ipsec_rxtx.h"
-#include "en_accel/tls_rxtx.h"
+#include "en_accel/ktls_txrx.h"
#include "en/xdp.h"
#include "en/xsk/rx.h"
#include "en/health.h"
@@ -1416,7 +1415,8 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
skb->mac_len = ETH_HLEN;
- mlx5e_tls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt);
+ if (unlikely(get_cqe_tls_offload(cqe)))
+ mlx5e_ktls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt);
if (unlikely(mlx5_ipsec_is_rx_flow(cqe)))
mlx5e_ipsec_offload_handle_rx_skb(netdev, skb, cqe);
@@ -2383,46 +2383,6 @@ const struct mlx5e_rx_handlers mlx5i_rx_handlers = {
};
#endif /* CONFIG_MLX5_CORE_IPOIB */
-#ifdef CONFIG_MLX5_EN_IPSEC
-
-static void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
-{
- struct mlx5_wq_cyc *wq = &rq->wqe.wq;
- struct mlx5e_wqe_frag_info *wi;
- struct sk_buff *skb;
- u32 cqe_bcnt;
- u16 ci;
-
- ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
- wi = get_frag(rq, ci);
- cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
-
- if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
- rq->stats->wqe_err++;
- goto wq_free_wqe;
- }
-
- skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
- mlx5e_skb_from_cqe_linear,
- mlx5e_skb_from_cqe_nonlinear,
- rq, cqe, wi, cqe_bcnt);
- if (unlikely(!skb)) /* a DROP, save the page-reuse checks */
- goto wq_free_wqe;
-
- skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb, &cqe_bcnt);
- if (unlikely(!skb))
- goto wq_free_wqe;
-
- mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
- napi_gro_receive(rq->cq.napi, skb);
-
-wq_free_wqe:
- mlx5e_free_rx_wqe(rq, wi, true);
- mlx5_wq_cyc_pop(wq);
-}
-
-#endif /* CONFIG_MLX5_EN_IPSEC */
-
int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk)
{
struct net_device *netdev = rq->netdev;
@@ -2439,10 +2399,6 @@ int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool
rq->post_wqes = mlx5e_post_rx_mpwqes;
rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
- if (mlx5_fpga_is_ipsec_device(mdev)) {
- netdev_err(netdev, "MPWQE RQ with Innova IPSec offload not supported\n");
- return -EINVAL;
- }
if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) {
rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe_shampo;
if (!rq->handle_rx_cqe) {
@@ -2466,14 +2422,7 @@ int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool
mlx5e_skb_from_cqe_nonlinear;
rq->post_wqes = mlx5e_post_rx_wqes;
rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
-
-#ifdef CONFIG_MLX5_EN_IPSEC
- if ((mlx5_fpga_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) &&
- priv->ipsec)
- rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe;
- else
-#endif
- rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe;
+ rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe;
if (!rq->handle_rx_cqe) {
netdev_err(netdev, "RX handler of RQ is not set\n");
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index bdc870f9c2f3..57fa0489eeb8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -32,7 +32,7 @@
#include "lib/mlx5.h"
#include "en.h"
-#include "en_accel/tls.h"
+#include "en_accel/ktls.h"
#include "en_accel/en_accel.h"
#include "en/ptp.h"
#include "en/port.h"
@@ -1900,17 +1900,17 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme) { return; }
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls)
{
- return mlx5e_tls_get_count(priv);
+ return mlx5e_ktls_get_count(priv);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls)
{
- return idx + mlx5e_tls_get_strings(priv, data + idx * ETH_GSTRING_LEN);
+ return idx + mlx5e_ktls_get_strings(priv, data + idx * ETH_GSTRING_LEN);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls)
{
- return idx + mlx5e_tls_get_stats(priv, data + idx);
+ return idx + mlx5e_ktls_get_stats(priv, data + idx);
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls) { return; }
@@ -2443,7 +2443,6 @@ mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
&MLX5E_STATS_GRP(pme),
#ifdef CONFIG_MLX5_EN_IPSEC
&MLX5E_STATS_GRP(ipsec_sw),
- &MLX5E_STATS_GRP(ipsec_hw),
#endif
&MLX5E_STATS_GRP(tls),
&MLX5E_STATS_GRP(channels),
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index a7a025d15c14..e48b15b55b6f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -482,7 +482,6 @@ extern MLX5E_DECLARE_STATS_GRP(per_prio);
extern MLX5E_DECLARE_STATS_GRP(pme);
extern MLX5E_DECLARE_STATS_GRP(channels);
extern MLX5E_DECLARE_STATS_GRP(per_port_buff_congest);
-extern MLX5E_DECLARE_STATS_GRP(ipsec_hw);
extern MLX5E_DECLARE_STATS_GRP(ipsec_sw);
extern MLX5E_DECLARE_STATS_GRP(ptp);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h
index 2a984e82ae16..750c32050165 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h
@@ -57,9 +57,6 @@ struct mlx5_fpga_device {
u32 mkey;
struct mlx5_uars_page *uar;
} conn_res;
-
- struct mlx5_fpga_ipsec *ipsec;
- struct mlx5_fpga_tls *tls;
};
#define mlx5_fpga_dbg(__adev, format, ...) \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
deleted file mode 100644
index 8ec148010d62..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
+++ /dev/null
@@ -1,1582 +0,0 @@
-/*
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/rhashtable.h>
-#include <linux/mlx5/driver.h>
-#include <linux/mlx5/fs_helpers.h>
-#include <linux/mlx5/fs.h>
-#include <linux/rbtree.h>
-
-#include "mlx5_core.h"
-#include "fs_cmd.h"
-#include "fpga/ipsec.h"
-#include "fpga/sdk.h"
-#include "fpga/core.h"
-
-enum mlx5_fpga_ipsec_cmd_status {
- MLX5_FPGA_IPSEC_CMD_PENDING,
- MLX5_FPGA_IPSEC_CMD_SEND_FAIL,
- MLX5_FPGA_IPSEC_CMD_COMPLETE,
-};
-
-struct mlx5_fpga_ipsec_cmd_context {
- struct mlx5_fpga_dma_buf buf;
- enum mlx5_fpga_ipsec_cmd_status status;
- struct mlx5_ifc_fpga_ipsec_cmd_resp resp;
- int status_code;
- struct completion complete;
- struct mlx5_fpga_device *dev;
- struct list_head list; /* Item in pending_cmds */
- u8 command[];
-};
-
-struct mlx5_fpga_esp_xfrm;
-
-struct mlx5_fpga_ipsec_sa_ctx {
- struct rhash_head hash;
- struct mlx5_ifc_fpga_ipsec_sa hw_sa;
- u32 sa_handle;
- struct mlx5_core_dev *dev;
- struct mlx5_fpga_esp_xfrm *fpga_xfrm;
-};
-
-struct mlx5_fpga_esp_xfrm {
- unsigned int num_rules;
- struct mlx5_fpga_ipsec_sa_ctx *sa_ctx;
- struct mutex lock; /* xfrm lock */
- struct mlx5_accel_esp_xfrm accel_xfrm;
-};
-
-struct mlx5_fpga_ipsec_rule {
- struct rb_node node;
- struct fs_fte *fte;
- struct mlx5_fpga_ipsec_sa_ctx *ctx;
-};
-
-static const struct rhashtable_params rhash_sa = {
- /* Keep out "cmd" field from the key as it's
- * value is not constant during the lifetime
- * of the key object.
- */
- .key_len = sizeof_field(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) -
- sizeof_field(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd),
- .key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) +
- sizeof_field(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd),
- .head_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hash),
- .automatic_shrinking = true,
- .min_size = 1,
-};
-
-struct mlx5_fpga_ipsec {
- struct mlx5_fpga_device *fdev;
- struct list_head pending_cmds;
- spinlock_t pending_cmds_lock; /* Protects pending_cmds */
- u32 caps[MLX5_ST_SZ_DW(ipsec_extended_cap)];
- struct mlx5_fpga_conn *conn;
-
- struct notifier_block fs_notifier_ingress_bypass;
- struct notifier_block fs_notifier_egress;
-
- /* Map hardware SA --> SA context
- * (mlx5_fpga_ipsec_sa) (mlx5_fpga_ipsec_sa_ctx)
- * We will use this hash to avoid SAs duplication in fpga which
- * aren't allowed
- */
- struct rhashtable sa_hash; /* hw_sa -> mlx5_fpga_ipsec_sa_ctx */
- struct mutex sa_hash_lock;
-
- /* Tree holding all rules for this fpga device
- * Key for searching a rule (mlx5_fpga_ipsec_rule) is (ft, id)
- */
- struct rb_root rules_rb;
- struct mutex rules_rb_lock; /* rules lock */
-
- struct ida halloc;
-};
-
-bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev)
-{
- if (!mdev->fpga || !MLX5_CAP_GEN(mdev, fpga))
- return false;
-
- if (MLX5_CAP_FPGA(mdev, ieee_vendor_id) !=
- MLX5_FPGA_CAP_SANDBOX_VENDOR_ID_MLNX)
- return false;
-
- if (MLX5_CAP_FPGA(mdev, sandbox_product_id) !=
- MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_IPSEC)
- return false;
-
- return true;
-}
-
-static void mlx5_fpga_ipsec_send_complete(struct mlx5_fpga_conn *conn,
- struct mlx5_fpga_device *fdev,
- struct mlx5_fpga_dma_buf *buf,
- u8 status)
-{
- struct mlx5_fpga_ipsec_cmd_context *context;
-
- if (status) {
- context = container_of(buf, struct mlx5_fpga_ipsec_cmd_context,
- buf);
- mlx5_fpga_warn(fdev, "IPSec command send failed with status %u\n",
- status);
- context->status = MLX5_FPGA_IPSEC_CMD_SEND_FAIL;
- complete(&context->complete);
- }
-}
-
-static inline
-int syndrome_to_errno(enum mlx5_ifc_fpga_ipsec_response_syndrome syndrome)
-{
- switch (syndrome) {
- case MLX5_FPGA_IPSEC_RESPONSE_SUCCESS:
- return 0;
- case MLX5_FPGA_IPSEC_RESPONSE_SADB_ISSUE:
- return -EEXIST;
- case MLX5_FPGA_IPSEC_RESPONSE_ILLEGAL_REQUEST:
- return -EINVAL;
- case MLX5_FPGA_IPSEC_RESPONSE_WRITE_RESPONSE_ISSUE:
- return -EIO;
- }
- return -EIO;
-}
-
-static void mlx5_fpga_ipsec_recv(void *cb_arg, struct mlx5_fpga_dma_buf *buf)
-{
- struct mlx5_ifc_fpga_ipsec_cmd_resp *resp = buf->sg[0].data;
- struct mlx5_fpga_ipsec_cmd_context *context;
- enum mlx5_ifc_fpga_ipsec_response_syndrome syndrome;
- struct mlx5_fpga_device *fdev = cb_arg;
- unsigned long flags;
-
- if (buf->sg[0].size < sizeof(*resp)) {
- mlx5_fpga_warn(fdev, "Short receive from FPGA IPSec: %u < %zu bytes\n",
- buf->sg[0].size, sizeof(*resp));
- return;
- }
-
- mlx5_fpga_dbg(fdev, "mlx5_ipsec recv_cb syndrome %08x\n",
- ntohl(resp->syndrome));
-
- spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
- context = list_first_entry_or_null(&fdev->ipsec->pending_cmds,
- struct mlx5_fpga_ipsec_cmd_context,
- list);
- if (context)
- list_del(&context->list);
- spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags);
-
- if (!context) {
- mlx5_fpga_warn(fdev, "Received IPSec offload response without pending command request\n");
- return;
- }
- mlx5_fpga_dbg(fdev, "Handling response for %p\n", context);
-
- syndrome = ntohl(resp->syndrome);
- context->status_code = syndrome_to_errno(syndrome);
- context->status = MLX5_FPGA_IPSEC_CMD_COMPLETE;
- memcpy(&context->resp, resp, sizeof(*resp));
-
- if (context->status_code)
- mlx5_fpga_warn(fdev, "IPSec command failed with syndrome %08x\n",
- syndrome);
-
- complete(&context->complete);
-}
-
-static void *mlx5_fpga_ipsec_cmd_exec(struct mlx5_core_dev *mdev,
- const void *cmd, int cmd_size)
-{
- struct mlx5_fpga_ipsec_cmd_context *context;
- struct mlx5_fpga_device *fdev = mdev->fpga;
- unsigned long flags;
- int res;
-
- if (!fdev || !fdev->ipsec)
- return ERR_PTR(-EOPNOTSUPP);
-
- if (cmd_size & 3)
- return ERR_PTR(-EINVAL);
-
- context = kzalloc(sizeof(*context) + cmd_size, GFP_ATOMIC);
- if (!context)
- return ERR_PTR(-ENOMEM);
-
- context->status = MLX5_FPGA_IPSEC_CMD_PENDING;
- context->dev = fdev;
- context->buf.complete = mlx5_fpga_ipsec_send_complete;
- init_completion(&context->complete);
- memcpy(&context->command, cmd, cmd_size);
- context->buf.sg[0].size = cmd_size;
- context->buf.sg[0].data = &context->command;
-
- spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
- res = mlx5_fpga_sbu_conn_sendmsg(fdev->ipsec->conn, &context->buf);
- if (!res)
- list_add_tail(&context->list, &fdev->ipsec->pending_cmds);
- spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags);
-
- if (res) {
- mlx5_fpga_warn(fdev, "Failed to send IPSec command: %d\n", res);
- kfree(context);
- return ERR_PTR(res);
- }
-
- /* Context should be freed by the caller after completion. */
- return context;
-}
-
-static int mlx5_fpga_ipsec_cmd_wait(void *ctx)
-{
- struct mlx5_fpga_ipsec_cmd_context *context = ctx;
- unsigned long timeout =
- msecs_to_jiffies(MLX5_FPGA_CMD_TIMEOUT_MSEC);
- int res;
-
- res = wait_for_completion_timeout(&context->complete, timeout);
- if (!res) {
- mlx5_fpga_warn(context->dev, "Failure waiting for IPSec command response\n");
- return -ETIMEDOUT;
- }
-
- if (context->status == MLX5_FPGA_IPSEC_CMD_COMPLETE)
- res = context->status_code;
- else
- res = -EIO;
-
- return res;
-}
-
-static inline bool is_v2_sadb_supported(struct mlx5_fpga_ipsec *fipsec)
-{
- if (MLX5_GET(ipsec_extended_cap, fipsec->caps, v2_command))
- return true;
- return false;
-}
-
-static int mlx5_fpga_ipsec_update_hw_sa(struct mlx5_fpga_device *fdev,
- struct mlx5_ifc_fpga_ipsec_sa *hw_sa,
- int opcode)
-{
- struct mlx5_core_dev *dev = fdev->mdev;
- struct mlx5_ifc_fpga_ipsec_sa *sa;
- struct mlx5_fpga_ipsec_cmd_context *cmd_context;
- size_t sa_cmd_size;
- int err;
-
- hw_sa->ipsec_sa_v1.cmd = htonl(opcode);
- if (is_v2_sadb_supported(fdev->ipsec))
- sa_cmd_size = sizeof(*hw_sa);
- else
- sa_cmd_size = sizeof(hw_sa->ipsec_sa_v1);
-
- cmd_context = (struct mlx5_fpga_ipsec_cmd_context *)
- mlx5_fpga_ipsec_cmd_exec(dev, hw_sa, sa_cmd_size);
- if (IS_ERR(cmd_context))
- return PTR_ERR(cmd_context);
-
- err = mlx5_fpga_ipsec_cmd_wait(cmd_context);
- if (err)
- goto out;
-
- sa = (struct mlx5_ifc_fpga_ipsec_sa *)&cmd_context->command;
- if (sa->ipsec_sa_v1.sw_sa_handle != cmd_context->resp.sw_sa_handle) {
- mlx5_fpga_err(fdev, "mismatch SA handle. cmd 0x%08x vs resp 0x%08x\n",
- ntohl(sa->ipsec_sa_v1.sw_sa_handle),
- ntohl(cmd_context->resp.sw_sa_handle));
- err = -EIO;
- }
-
-out:
- kfree(cmd_context);
- return err;
-}
-
-u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev)
-{
- struct mlx5_fpga_device *fdev = mdev->fpga;
- u32 ret = 0;
-
- if (mlx5_fpga_is_ipsec_device(mdev)) {
- ret |= MLX5_ACCEL_IPSEC_CAP_DEVICE;
- ret |= MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA;
- } else {
- return ret;
- }
-
- if (!fdev->ipsec)
- return ret;
-
- if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, esp))
- ret |= MLX5_ACCEL_IPSEC_CAP_ESP;
-
- if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, ipv6))
- ret |= MLX5_ACCEL_IPSEC_CAP_IPV6;
-
- if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, lso))
- ret |= MLX5_ACCEL_IPSEC_CAP_LSO;
-
- if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, rx_no_trailer))
- ret |= MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER;
-
- if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, esn)) {
- ret |= MLX5_ACCEL_IPSEC_CAP_ESN;
- ret |= MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN;
- }
-
- return ret;
-}
-
-static unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev)
-{
- struct mlx5_fpga_device *fdev = mdev->fpga;
-
- if (!fdev || !fdev->ipsec)
- return 0;
-
- return MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps,
- number_of_ipsec_counters);
-}
-
-static int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
- unsigned int counters_count)
-{
- struct mlx5_fpga_device *fdev = mdev->fpga;
- unsigned int i;
- __be32 *data;
- u32 count;
- u64 addr;
- int ret;
-
- if (!fdev || !fdev->ipsec)
- return 0;
-
- addr = (u64)MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps,
- ipsec_counters_addr_low) +
- ((u64)MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps,
- ipsec_counters_addr_high) << 32);
-
- count = mlx5_fpga_ipsec_counters_count(mdev);
-
- data = kzalloc(array3_size(sizeof(*data), count, 2), GFP_KERNEL);
- if (!data) {
- ret = -ENOMEM;
- goto out;
- }
-
- ret = mlx5_fpga_mem_read(fdev, count * sizeof(u64), addr, data,
- MLX5_FPGA_ACCESS_TYPE_DONTCARE);
- if (ret < 0) {
- mlx5_fpga_err(fdev, "Failed to read IPSec counters from HW: %d\n",
- ret);
- goto out;
- }
- ret = 0;
-
- if (count > counters_count)
- count = counters_count;
-
- /* Each counter is low word, then high. But each word is big-endian */
- for (i = 0; i < count; i++)
- counters[i] = (u64)ntohl(data[i * 2]) |
- ((u64)ntohl(data[i * 2 + 1]) << 32);
-
-out:
- kfree(data);
- return ret;
-}
-
-static int mlx5_fpga_ipsec_set_caps(struct mlx5_core_dev *mdev, u32 flags)
-{
- struct mlx5_fpga_ipsec_cmd_context *context;
- struct mlx5_ifc_fpga_ipsec_cmd_cap cmd = {0};
- int err;
-
- cmd.cmd = htonl(MLX5_FPGA_IPSEC_CMD_OP_SET_CAP);
- cmd.flags = htonl(flags);
- context = mlx5_fpga_ipsec_cmd_exec(mdev, &cmd, sizeof(cmd));
- if (IS_ERR(context))
- return PTR_ERR(context);
-
- err = mlx5_fpga_ipsec_cmd_wait(context);
- if (err)
- goto out;
-
- if ((context->resp.flags & cmd.flags) != cmd.flags) {
- mlx5_fpga_err(context->dev, "Failed to set capabilities. cmd 0x%08x vs resp 0x%08x\n",
- cmd.flags,
- context->resp.flags);
- err = -EIO;
- }
-
-out:
- kfree(context);
- return err;
-}
-
-static int mlx5_fpga_ipsec_enable_supported_caps(struct mlx5_core_dev *mdev)
-{
- u32 dev_caps = mlx5_fpga_ipsec_device_caps(mdev);
- u32 flags = 0;
-
- if (dev_caps & MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER)
- flags |= MLX5_FPGA_IPSEC_CAP_NO_TRAILER;
-
- return mlx5_fpga_ipsec_set_caps(mdev, flags);
-}
-
-static void
-mlx5_fpga_ipsec_build_hw_xfrm(struct mlx5_core_dev *mdev,
- const struct mlx5_accel_esp_xfrm_attrs *xfrm_attrs,
- struct mlx5_ifc_fpga_ipsec_sa *hw_sa)
-{
- const struct aes_gcm_keymat *aes_gcm = &xfrm_attrs->keymat.aes_gcm;
-
- /* key */
- memcpy(&hw_sa->ipsec_sa_v1.key_enc, aes_gcm->aes_key,
- aes_gcm->key_len / 8);
- /* Duplicate 128 bit key twice according to HW layout */
- if (aes_gcm->key_len == 128)
- memcpy(&hw_sa->ipsec_sa_v1.key_enc[16],
- aes_gcm->aes_key, aes_gcm->key_len / 8);
-
- /* salt and seq_iv */
- memcpy(&hw_sa->ipsec_sa_v1.gcm.salt_iv, &aes_gcm->seq_iv,
- sizeof(aes_gcm->seq_iv));
- memcpy(&hw_sa->ipsec_sa_v1.gcm.salt, &aes_gcm->salt,
- sizeof(aes_gcm->salt));
-
- /* esn */
- if (xfrm_attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) {
- hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_ESN_EN;
- hw_sa->ipsec_sa_v1.flags |=
- (xfrm_attrs->flags &
- MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) ?
- MLX5_FPGA_IPSEC_SA_ESN_OVERLAP : 0;
- hw_sa->esn = htonl(xfrm_attrs->esn);
- } else {
- hw_sa->ipsec_sa_v1.flags &= ~MLX5_FPGA_IPSEC_SA_ESN_EN;
- hw_sa->ipsec_sa_v1.flags &=
- ~(xfrm_attrs->flags &
- MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) ?
- MLX5_FPGA_IPSEC_SA_ESN_OVERLAP : 0;
- hw_sa->esn = 0;
- }
-
- /* rx handle */
- hw_sa->ipsec_sa_v1.sw_sa_handle = htonl(xfrm_attrs->sa_handle);
-
- /* enc mode */
- switch (aes_gcm->key_len) {
- case 128:
- hw_sa->ipsec_sa_v1.enc_mode =
- MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_128_AUTH_128;
- break;
- case 256:
- hw_sa->ipsec_sa_v1.enc_mode =
- MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_256_AUTH_128;
- break;
- }
-
- /* flags */
- hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_SA_VALID |
- MLX5_FPGA_IPSEC_SA_SPI_EN |
- MLX5_FPGA_IPSEC_SA_IP_ESP;
-
- if (xfrm_attrs->action & MLX5_ACCEL_ESP_ACTION_ENCRYPT)
- hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_DIR_SX;
- else
- hw_sa->ipsec_sa_v1.flags &= ~MLX5_FPGA_IPSEC_SA_DIR_SX;
-}
-
-static void
-mlx5_fpga_ipsec_build_hw_sa(struct mlx5_core_dev *mdev,
- struct mlx5_accel_esp_xfrm_attrs *xfrm_attrs,
- const __be32 saddr[4],
- const __be32 daddr[4],
- const __be32 spi, bool is_ipv6,
- struct mlx5_ifc_fpga_ipsec_sa *hw_sa)
-{
- mlx5_fpga_ipsec_build_hw_xfrm(mdev, xfrm_attrs, hw_sa);
-
- /* IPs */
- memcpy(hw_sa->ipsec_sa_v1.sip, saddr, sizeof(hw_sa->ipsec_sa_v1.sip));
- memcpy(hw_sa->ipsec_sa_v1.dip, daddr, sizeof(hw_sa->ipsec_sa_v1.dip));
-
- /* SPI */
- hw_sa->ipsec_sa_v1.spi = spi;
-
- /* flags */
- if (is_ipv6)
- hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_IPV6;
-}
-
-static bool is_full_mask(const void *p, size_t len)
-{
- WARN_ON(len % 4);
-
- return !memchr_inv(p, 0xff, len);
-}
-
-static bool validate_fpga_full_mask(struct mlx5_core_dev *dev,
- const u32 *match_c,
- const u32 *match_v)
-{
- const void *misc_params_c = MLX5_ADDR_OF(fte_match_param,
- match_c,
- misc_parameters);
- const void *headers_c = MLX5_ADDR_OF(fte_match_param,
- match_c,
- outer_headers);
- const void *headers_v = MLX5_ADDR_OF(fte_match_param,
- match_v,
- outer_headers);
-
- if (mlx5_fs_is_outer_ipv4_flow(dev, headers_c, headers_v)) {
- const void *s_ipv4_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
- headers_c,
- src_ipv4_src_ipv6.ipv4_layout.ipv4);
- const void *d_ipv4_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
- headers_c,
- dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
-
- if (!is_full_mask(s_ipv4_c, MLX5_FLD_SZ_BYTES(ipv4_layout,
- ipv4)) ||
- !is_full_mask(d_ipv4_c, MLX5_FLD_SZ_BYTES(ipv4_layout,
- ipv4)))
- return false;
- } else {
- const void *s_ipv6_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
- headers_c,
- src_ipv4_src_ipv6.ipv6_layout.ipv6);
- const void *d_ipv6_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
- headers_c,
- dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
-
- if (!is_full_mask(s_ipv6_c, MLX5_FLD_SZ_BYTES(ipv6_layout,
- ipv6)) ||
- !is_full_mask(d_ipv6_c, MLX5_FLD_SZ_BYTES(ipv6_layout,
- ipv6)))
- return false;
- }
-
- if (!is_full_mask(MLX5_ADDR_OF(fte_match_set_misc, misc_params_c,
- outer_esp_spi),
- MLX5_FLD_SZ_BYTES(fte_match_set_misc, outer_esp_spi)))
- return false;
-
- return true;
-}
-
-static bool mlx5_is_fpga_ipsec_rule(struct mlx5_core_dev *dev,
- u8 match_criteria_enable,
- const u32 *match_c,
- const u32 *match_v)
-{
- u32 ipsec_dev_caps = mlx5_fpga_ipsec_device_caps(dev);
- bool ipv6_flow;
-
- ipv6_flow = mlx5_fs_is_outer_ipv6_flow(dev, match_c, match_v);
-
- if (!(match_criteria_enable & MLX5_MATCH_OUTER_HEADERS) ||
- mlx5_fs_is_outer_udp_flow(match_c, match_v) ||
- mlx5_fs_is_outer_tcp_flow(match_c, match_v) ||
- mlx5_fs_is_vxlan_flow(match_c) ||
- !(mlx5_fs_is_outer_ipv4_flow(dev, match_c, match_v) ||
- ipv6_flow))
- return false;
-
- if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_DEVICE))
- return false;
-
- if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_ESP) &&
- mlx5_fs_is_outer_ipsec_flow(match_c))
- return false;
-
- if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_IPV6) &&
- ipv6_flow)
- return false;
-
- if (!validate_fpga_full_mask(dev, match_c, match_v))
- return false;
-
- return true;
-}
-
-static bool mlx5_is_fpga_egress_ipsec_rule(struct mlx5_core_dev *dev,
- u8 match_criteria_enable,
- const u32 *match_c,
- const u32 *match_v,
- struct mlx5_flow_act *flow_act,
- struct mlx5_flow_context *flow_context)
-{
- const void *outer_c = MLX5_ADDR_OF(fte_match_param, match_c,
- outer_headers);
- bool is_dmac = MLX5_GET(fte_match_set_lyr_2_4, outer_c, dmac_47_16) ||
- MLX5_GET(fte_match_set_lyr_2_4, outer_c, dmac_15_0);
- bool is_smac = MLX5_GET(fte_match_set_lyr_2_4, outer_c, smac_47_16) ||
- MLX5_GET(fte_match_set_lyr_2_4, outer_c, smac_15_0);
- int ret;
-
- ret = mlx5_is_fpga_ipsec_rule(dev, match_criteria_enable, match_c,
- match_v);
- if (!ret)
- return ret;
-
- if (is_dmac || is_smac ||
- (match_criteria_enable &
- ~(MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS)) ||
- (flow_act->action & ~(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | MLX5_FLOW_CONTEXT_ACTION_ALLOW)) ||
- (flow_context->flags & FLOW_CONTEXT_HAS_TAG))
- return false;
-
- return true;
-}
-
-static void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev,
- struct mlx5_accel_esp_xfrm *accel_xfrm,
- const __be32 saddr[4], const __be32 daddr[4],
- const __be32 spi, bool is_ipv6, u32 *sa_handle)
-{
- struct mlx5_fpga_ipsec_sa_ctx *sa_ctx;
- struct mlx5_fpga_esp_xfrm *fpga_xfrm =
- container_of(accel_xfrm, typeof(*fpga_xfrm),
- accel_xfrm);
- struct mlx5_fpga_device *fdev = mdev->fpga;
- struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
- int opcode, err;
- void *context;
-
- /* alloc SA */
- sa_ctx = kzalloc(sizeof(*sa_ctx), GFP_KERNEL);
- if (!sa_ctx)
- return ERR_PTR(-ENOMEM);
-
- sa_ctx->dev = mdev;
-
- /* build candidate SA */
- mlx5_fpga_ipsec_build_hw_sa(mdev, &accel_xfrm->attrs,
- saddr, daddr, spi, is_ipv6,
- &sa_ctx->hw_sa);
-
- mutex_lock(&fpga_xfrm->lock);
-
- if (fpga_xfrm->sa_ctx) { /* multiple rules for same accel_xfrm */
- /* all rules must be with same IPs and SPI */
- if (memcmp(&sa_ctx->hw_sa, &fpga_xfrm->sa_ctx->hw_sa,
- sizeof(sa_ctx->hw_sa))) {
- context = ERR_PTR(-EINVAL);
- goto exists;
- }
-
- ++fpga_xfrm->num_rules;
- context = fpga_xfrm->sa_ctx;
- goto exists;
- }
-
- if (accel_xfrm->attrs.action == MLX5_ACCEL_ESP_ACTION_DECRYPT) {
- err = ida_alloc_min(&fipsec->halloc, 1, GFP_KERNEL);
- if (err < 0) {
- context = ERR_PTR(err);
- goto exists;
- }
-
- sa_ctx->sa_handle = err;
- if (sa_handle)
- *sa_handle = sa_ctx->sa_handle;
- }
- /* This is unbounded fpga_xfrm, try to add to hash */
- mutex_lock(&fipsec->sa_hash_lock);
-
- err = rhashtable_lookup_insert_fast(&fipsec->sa_hash, &sa_ctx->hash,
- rhash_sa);
- if (err) {
- /* Can't bound different accel_xfrm to already existing sa_ctx.
- * This is because we can't support multiple ketmats for
- * same IPs and SPI
- */
- context = ERR_PTR(-EEXIST);
- goto unlock_hash;
- }
-
- /* Bound accel_xfrm to sa_ctx */
- opcode = is_v2_sadb_supported(fdev->ipsec) ?
- MLX5_FPGA_IPSEC_CMD_OP_ADD_SA_V2 :
- MLX5_FPGA_IPSEC_CMD_OP_ADD_SA;
- err = mlx5_fpga_ipsec_update_hw_sa(fdev, &sa_ctx->hw_sa, opcode);
- sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0;
- if (err) {
- context = ERR_PTR(err);
- goto delete_hash;
- }
-
- mutex_unlock(&fipsec->sa_hash_lock);
-
- ++fpga_xfrm->num_rules;
- fpga_xfrm->sa_ctx = sa_ctx;
- sa_ctx->fpga_xfrm = fpga_xfrm;
-
- mutex_unlock(&fpga_xfrm->lock);
-
- return sa_ctx;
-
-delete_hash:
- WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, &sa_ctx->hash,
- rhash_sa));
-unlock_hash:
- mutex_unlock(&fipsec->sa_hash_lock);
- if (accel_xfrm->attrs.action == MLX5_ACCEL_ESP_ACTION_DECRYPT)
- ida_free(&fipsec->halloc, sa_ctx->sa_handle);
-exists:
- mutex_unlock(&fpga_xfrm->lock);
- kfree(sa_ctx);
- return context;
-}
-
-static void *
-mlx5_fpga_ipsec_fs_create_sa_ctx(struct mlx5_core_dev *mdev,
- struct fs_fte *fte,
- bool is_egress)
-{
- struct mlx5_accel_esp_xfrm *accel_xfrm;
- __be32 saddr[4], daddr[4], spi;
- struct mlx5_flow_group *fg;
- bool is_ipv6 = false;
-
- fs_get_obj(fg, fte->node.parent);
- /* validate */
- if (is_egress &&
- !mlx5_is_fpga_egress_ipsec_rule(mdev,
- fg->mask.match_criteria_enable,
- fg->mask.match_criteria,
- fte->val,
- &fte->action,
- &fte->flow_context))
- return ERR_PTR(-EINVAL);
- else if (!mlx5_is_fpga_ipsec_rule(mdev,
- fg->mask.match_criteria_enable,
- fg->mask.match_criteria,
- fte->val))
- return ERR_PTR(-EINVAL);
-
- /* get xfrm context */
- accel_xfrm =
- (struct mlx5_accel_esp_xfrm *)fte->action.esp_id;
-
- /* IPs */
- if (mlx5_fs_is_outer_ipv4_flow(mdev, fg->mask.match_criteria,
- fte->val)) {
- memcpy(&saddr[3],
- MLX5_ADDR_OF(fte_match_set_lyr_2_4,
- fte->val,
- src_ipv4_src_ipv6.ipv4_layout.ipv4),
- sizeof(saddr[3]));
- memcpy(&daddr[3],
- MLX5_ADDR_OF(fte_match_set_lyr_2_4,
- fte->val,
- dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
- sizeof(daddr[3]));
- } else {
- memcpy(saddr,
- MLX5_ADDR_OF(fte_match_param,
- fte->val,
- outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
- sizeof(saddr));
- memcpy(daddr,
- MLX5_ADDR_OF(fte_match_param,
- fte->val,
- outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
- sizeof(daddr));
- is_ipv6 = true;
- }
-
- /* SPI */
- spi = MLX5_GET_BE(typeof(spi),
- fte_match_param, fte->val,
- misc_parameters.outer_esp_spi);
-
- /* create */
- return mlx5_fpga_ipsec_create_sa_ctx(mdev, accel_xfrm,
- saddr, daddr,
- spi, is_ipv6, NULL);
-}
-
-static void
-mlx5_fpga_ipsec_release_sa_ctx(struct mlx5_fpga_ipsec_sa_ctx *sa_ctx)
-{
- struct mlx5_fpga_device *fdev = sa_ctx->dev->fpga;
- struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
- int opcode = is_v2_sadb_supported(fdev->ipsec) ?
- MLX5_FPGA_IPSEC_CMD_OP_DEL_SA_V2 :
- MLX5_FPGA_IPSEC_CMD_OP_DEL_SA;
- int err;
-
- err = mlx5_fpga_ipsec_update_hw_sa(fdev, &sa_ctx->hw_sa, opcode);
- sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0;
- if (err) {
- WARN_ON(err);
- return;
- }
-
- if (sa_ctx->fpga_xfrm->accel_xfrm.attrs.action ==
- MLX5_ACCEL_ESP_ACTION_DECRYPT)
- ida_free(&fipsec->halloc, sa_ctx->sa_handle);
-
- mutex_lock(&fipsec->sa_hash_lock);
- WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, &sa_ctx->hash,
- rhash_sa));
- mutex_unlock(&fipsec->sa_hash_lock);
-}
-
-static void mlx5_fpga_ipsec_delete_sa_ctx(void *context)
-{
- struct mlx5_fpga_esp_xfrm *fpga_xfrm =
- ((struct mlx5_fpga_ipsec_sa_ctx *)context)->fpga_xfrm;
-
- mutex_lock(&fpga_xfrm->lock);
- if (!--fpga_xfrm->num_rules) {
- mlx5_fpga_ipsec_release_sa_ctx(fpga_xfrm->sa_ctx);
- kfree(fpga_xfrm->sa_ctx);
- fpga_xfrm->sa_ctx = NULL;
- }
- mutex_unlock(&fpga_xfrm->lock);
-}
-
-static inline struct mlx5_fpga_ipsec_rule *
-_rule_search(struct rb_root *root, struct fs_fte *fte)
-{
- struct rb_node *node = root->rb_node;
-
- while (node) {
- struct mlx5_fpga_ipsec_rule *rule =
- container_of(node, struct mlx5_fpga_ipsec_rule,
- node);
-
- if (rule->fte < fte)
- node = node->rb_left;
- else if (rule->fte > fte)
- node = node->rb_right;
- else
- return rule;
- }
- return NULL;
-}
-
-static struct mlx5_fpga_ipsec_rule *
-rule_search(struct mlx5_fpga_ipsec *ipsec_dev, struct fs_fte *fte)
-{
- struct mlx5_fpga_ipsec_rule *rule;
-
- mutex_lock(&ipsec_dev->rules_rb_lock);
- rule = _rule_search(&ipsec_dev->rules_rb, fte);
- mutex_unlock(&ipsec_dev->rules_rb_lock);
-
- return rule;
-}
-
-static inline int _rule_insert(struct rb_root *root,
- struct mlx5_fpga_ipsec_rule *rule)
-{
- struct rb_node **new = &root->rb_node, *parent = NULL;
-
- /* Figure out where to put new node */
- while (*new) {
- struct mlx5_fpga_ipsec_rule *this =
- container_of(*new, struct mlx5_fpga_ipsec_rule,
- node);
-
- parent = *new;
- if (rule->fte < this->fte)
- new = &((*new)->rb_left);
- else if (rule->fte > this->fte)
- new = &((*new)->rb_right);
- else
- return -EEXIST;
- }
-
- /* Add new node and rebalance tree. */
- rb_link_node(&rule->node, parent, new);
- rb_insert_color(&rule->node, root);
-
- return 0;
-}
-
-static int rule_insert(struct mlx5_fpga_ipsec *ipsec_dev,
- struct mlx5_fpga_ipsec_rule *rule)
-{
- int ret;
-
- mutex_lock(&ipsec_dev->rules_rb_lock);
- ret = _rule_insert(&ipsec_dev->rules_rb, rule);
- mutex_unlock(&ipsec_dev->rules_rb_lock);
-
- return ret;
-}
-
-static inline void _rule_delete(struct mlx5_fpga_ipsec *ipsec_dev,
- struct mlx5_fpga_ipsec_rule *rule)
-{
- struct rb_root *root = &ipsec_dev->rules_rb;
-
- mutex_lock(&ipsec_dev->rules_rb_lock);
- rb_erase(&rule->node, root);
- mutex_unlock(&ipsec_dev->rules_rb_lock);
-}
-
-static void rule_delete(struct mlx5_fpga_ipsec *ipsec_dev,
- struct mlx5_fpga_ipsec_rule *rule)
-{
- _rule_delete(ipsec_dev, rule);
- kfree(rule);
-}
-
-struct mailbox_mod {
- uintptr_t saved_esp_id;
- u32 saved_action;
- u32 saved_outer_esp_spi_value;
-};
-
-static void restore_spec_mailbox(struct fs_fte *fte,
- struct mailbox_mod *mbox_mod)
-{
- char *misc_params_v = MLX5_ADDR_OF(fte_match_param,
- fte->val,
- misc_parameters);
-
- MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi,
- mbox_mod->saved_outer_esp_spi_value);
- fte->action.action |= mbox_mod->saved_action;
- fte->action.esp_id = (uintptr_t)mbox_mod->saved_esp_id;
-}
-
-static void modify_spec_mailbox(struct mlx5_core_dev *mdev,
- struct fs_fte *fte,
- struct mailbox_mod *mbox_mod)
-{
- char *misc_params_v = MLX5_ADDR_OF(fte_match_param,
- fte->val,
- misc_parameters);
-
- mbox_mod->saved_esp_id = fte->action.esp_id;
- mbox_mod->saved_action = fte->action.action &
- (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
- MLX5_FLOW_CONTEXT_ACTION_DECRYPT);
- mbox_mod->saved_outer_esp_spi_value =
- MLX5_GET(fte_match_set_misc, misc_params_v,
- outer_esp_spi);
-
- fte->action.esp_id = 0;
- fte->action.action &= ~(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
- MLX5_FLOW_CONTEXT_ACTION_DECRYPT);
- if (!MLX5_CAP_FLOWTABLE(mdev,
- flow_table_properties_nic_receive.ft_field_support.outer_esp_spi))
- MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi, 0);
-}
-
-static enum fs_flow_table_type egress_to_fs_ft(bool egress)
-{
- return egress ? FS_FT_NIC_TX : FS_FT_NIC_RX;
-}
-
-static int fpga_ipsec_fs_create_flow_group(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- u32 *in,
- struct mlx5_flow_group *fg,
- bool is_egress)
-{
- int (*create_flow_group)(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft, u32 *in,
- struct mlx5_flow_group *fg) =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->create_flow_group;
- char *misc_params_c = MLX5_ADDR_OF(create_flow_group_in, in,
- match_criteria.misc_parameters);
- struct mlx5_core_dev *dev = ns->dev;
- u32 saved_outer_esp_spi_mask;
- u8 match_criteria_enable;
- int ret;
-
- if (MLX5_CAP_FLOWTABLE(dev,
- flow_table_properties_nic_receive.ft_field_support.outer_esp_spi))
- return create_flow_group(ns, ft, in, fg);
-
- match_criteria_enable =
- MLX5_GET(create_flow_group_in, in, match_criteria_enable);
- saved_outer_esp_spi_mask =
- MLX5_GET(fte_match_set_misc, misc_params_c, outer_esp_spi);
- if (!match_criteria_enable || !saved_outer_esp_spi_mask)
- return create_flow_group(ns, ft, in, fg);
-
- MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, 0);
-
- if (!(*misc_params_c) &&
- !memcmp(misc_params_c, misc_params_c + 1, MLX5_ST_SZ_BYTES(fte_match_set_misc) - 1))
- MLX5_SET(create_flow_group_in, in, match_criteria_enable,
- match_criteria_enable & ~MLX5_MATCH_MISC_PARAMETERS);
-
- ret = create_flow_group(ns, ft, in, fg);
-
- MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, saved_outer_esp_spi_mask);
- MLX5_SET(create_flow_group_in, in, match_criteria_enable, match_criteria_enable);
-
- return ret;
-}
-
-static int fpga_ipsec_fs_create_fte(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- struct mlx5_flow_group *fg,
- struct fs_fte *fte,
- bool is_egress)
-{
- int (*create_fte)(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- struct mlx5_flow_group *fg,
- struct fs_fte *fte) =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->create_fte;
- struct mlx5_core_dev *dev = ns->dev;
- struct mlx5_fpga_device *fdev = dev->fpga;
- struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
- struct mlx5_fpga_ipsec_rule *rule;
- bool is_esp = fte->action.esp_id;
- struct mailbox_mod mbox_mod;
- int ret;
-
- if (!is_esp ||
- !(fte->action.action &
- (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
- MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
- return create_fte(ns, ft, fg, fte);
-
- rule = kzalloc(sizeof(*rule), GFP_KERNEL);
- if (!rule)
- return -ENOMEM;
-
- rule->ctx = mlx5_fpga_ipsec_fs_create_sa_ctx(dev, fte, is_egress);
- if (IS_ERR(rule->ctx)) {
- int err = PTR_ERR(rule->ctx);
-
- kfree(rule);
- return err;
- }
-
- rule->fte = fte;
- WARN_ON(rule_insert(fipsec, rule));
-
- modify_spec_mailbox(dev, fte, &mbox_mod);
- ret = create_fte(ns, ft, fg, fte);
- restore_spec_mailbox(fte, &mbox_mod);
- if (ret) {
- _rule_delete(fipsec, rule);
- mlx5_fpga_ipsec_delete_sa_ctx(rule->ctx);
- kfree(rule);
- }
-
- return ret;
-}
-
-static int fpga_ipsec_fs_update_fte(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- struct mlx5_flow_group *fg,
- int modify_mask,
- struct fs_fte *fte,
- bool is_egress)
-{
- int (*update_fte)(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- struct mlx5_flow_group *fg,
- int modify_mask,
- struct fs_fte *fte) =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->update_fte;
- struct mlx5_core_dev *dev = ns->dev;
- bool is_esp = fte->action.esp_id;
- struct mailbox_mod mbox_mod;
- int ret;
-
- if (!is_esp ||
- !(fte->action.action &
- (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
- MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
- return update_fte(ns, ft, fg, modify_mask, fte);
-
- modify_spec_mailbox(dev, fte, &mbox_mod);
- ret = update_fte(ns, ft, fg, modify_mask, fte);
- restore_spec_mailbox(fte, &mbox_mod);
-
- return ret;
-}
-
-static int fpga_ipsec_fs_delete_fte(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- struct fs_fte *fte,
- bool is_egress)
-{
- int (*delete_fte)(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- struct fs_fte *fte) =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->delete_fte;
- struct mlx5_core_dev *dev = ns->dev;
- struct mlx5_fpga_device *fdev = dev->fpga;
- struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
- struct mlx5_fpga_ipsec_rule *rule;
- bool is_esp = fte->action.esp_id;
- struct mailbox_mod mbox_mod;
- int ret;
-
- if (!is_esp ||
- !(fte->action.action &
- (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
- MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
- return delete_fte(ns, ft, fte);
-
- rule = rule_search(fipsec, fte);
- if (!rule)
- return -ENOENT;
-
- mlx5_fpga_ipsec_delete_sa_ctx(rule->ctx);
- rule_delete(fipsec, rule);
-
- modify_spec_mailbox(dev, fte, &mbox_mod);
- ret = delete_fte(ns, ft, fte);
- restore_spec_mailbox(fte, &mbox_mod);
-
- return ret;
-}
-
-static int
-mlx5_fpga_ipsec_fs_create_flow_group_egress(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- u32 *in,
- struct mlx5_flow_group *fg)
-{
- return fpga_ipsec_fs_create_flow_group(ns, ft, in, fg, true);
-}
-
-static int
-mlx5_fpga_ipsec_fs_create_fte_egress(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- struct mlx5_flow_group *fg,
- struct fs_fte *fte)
-{
- return fpga_ipsec_fs_create_fte(ns, ft, fg, fte, true);
-}
-
-static int
-mlx5_fpga_ipsec_fs_update_fte_egress(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- struct mlx5_flow_group *fg,
- int modify_mask,
- struct fs_fte *fte)
-{
- return fpga_ipsec_fs_update_fte(ns, ft, fg, modify_mask, fte,
- true);
-}
-
-static int
-mlx5_fpga_ipsec_fs_delete_fte_egress(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- struct fs_fte *fte)
-{
- return fpga_ipsec_fs_delete_fte(ns, ft, fte, true);
-}
-
-static int
-mlx5_fpga_ipsec_fs_create_flow_group_ingress(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- u32 *in,
- struct mlx5_flow_group *fg)
-{
- return fpga_ipsec_fs_create_flow_group(ns, ft, in, fg, false);
-}
-
-static int
-mlx5_fpga_ipsec_fs_create_fte_ingress(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- struct mlx5_flow_group *fg,
- struct fs_fte *fte)
-{
- return fpga_ipsec_fs_create_fte(ns, ft, fg, fte, false);
-}
-
-static int
-mlx5_fpga_ipsec_fs_update_fte_ingress(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- struct mlx5_flow_group *fg,
- int modify_mask,
- struct fs_fte *fte)
-{
- return fpga_ipsec_fs_update_fte(ns, ft, fg, modify_mask, fte,
- false);
-}
-
-static int
-mlx5_fpga_ipsec_fs_delete_fte_ingress(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_table *ft,
- struct fs_fte *fte)
-{
- return fpga_ipsec_fs_delete_fte(ns, ft, fte, false);
-}
-
-static struct mlx5_flow_cmds fpga_ipsec_ingress;
-static struct mlx5_flow_cmds fpga_ipsec_egress;
-
-const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type)
-{
- switch (type) {
- case FS_FT_NIC_RX:
- return &fpga_ipsec_ingress;
- case FS_FT_NIC_TX:
- return &fpga_ipsec_egress;
- default:
- WARN_ON(true);
- return NULL;
- }
-}
-
-static int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev)
-{
- struct mlx5_fpga_conn_attr init_attr = {0};
- struct mlx5_fpga_device *fdev = mdev->fpga;
- struct mlx5_fpga_conn *conn;
- int err;
-
- if (!mlx5_fpga_is_ipsec_device(mdev))
- return 0;
-
- fdev->ipsec = kzalloc(sizeof(*fdev->ipsec), GFP_KERNEL);
- if (!fdev->ipsec)
- return -ENOMEM;
-
- fdev->ipsec->fdev = fdev;
-
- err = mlx5_fpga_get_sbu_caps(fdev, sizeof(fdev->ipsec->caps),
- fdev->ipsec->caps);
- if (err) {
- mlx5_fpga_err(fdev, "Failed to retrieve IPSec extended capabilities: %d\n",
- err);
- goto error;
- }
-
- INIT_LIST_HEAD(&fdev->ipsec->pending_cmds);
- spin_lock_init(&fdev->ipsec->pending_cmds_lock);
-
- init_attr.rx_size = SBU_QP_QUEUE_SIZE;
- init_attr.tx_size = SBU_QP_QUEUE_SIZE;
- init_attr.recv_cb = mlx5_fpga_ipsec_recv;
- init_attr.cb_arg = fdev;
- conn = mlx5_fpga_sbu_conn_create(fdev, &init_attr);
- if (IS_ERR(conn)) {
- err = PTR_ERR(conn);
- mlx5_fpga_err(fdev, "Error creating IPSec command connection %d\n",
- err);
- goto error;
- }
- fdev->ipsec->conn = conn;
-
- err = rhashtable_init(&fdev->ipsec->sa_hash, &rhash_sa);
- if (err)
- goto err_destroy_conn;
- mutex_init(&fdev->ipsec->sa_hash_lock);
-
- fdev->ipsec->rules_rb = RB_ROOT;
- mutex_init(&fdev->ipsec->rules_rb_lock);
-
- err = mlx5_fpga_ipsec_enable_supported_caps(mdev);
- if (err) {
- mlx5_fpga_err(fdev, "Failed to enable IPSec extended capabilities: %d\n",
- err);
- goto err_destroy_hash;
- }
-
- ida_init(&fdev->ipsec->halloc);
-
- return 0;
-
-err_destroy_hash:
- rhashtable_destroy(&fdev->ipsec->sa_hash);
-
-err_destroy_conn:
- mlx5_fpga_sbu_conn_destroy(conn);
-
-error:
- kfree(fdev->ipsec);
- fdev->ipsec = NULL;
- return err;
-}
-
-static void destroy_rules_rb(struct rb_root *root)
-{
- struct mlx5_fpga_ipsec_rule *r, *tmp;
-
- rbtree_postorder_for_each_entry_safe(r, tmp, root, node) {
- rb_erase(&r->node, root);
- mlx5_fpga_ipsec_delete_sa_ctx(r->ctx);
- kfree(r);
- }
-}
-
-static void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev)
-{
- struct mlx5_fpga_device *fdev = mdev->fpga;
-
- if (!mlx5_fpga_is_ipsec_device(mdev))
- return;
-
- ida_destroy(&fdev->ipsec->halloc);
- destroy_rules_rb(&fdev->ipsec->rules_rb);
- rhashtable_destroy(&fdev->ipsec->sa_hash);
-
- mlx5_fpga_sbu_conn_destroy(fdev->ipsec->conn);
- kfree(fdev->ipsec);
- fdev->ipsec = NULL;
-}
-
-void mlx5_fpga_ipsec_build_fs_cmds(void)
-{
- /* ingress */
- fpga_ipsec_ingress.create_flow_table =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->create_flow_table;
- fpga_ipsec_ingress.destroy_flow_table =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->destroy_flow_table;
- fpga_ipsec_ingress.modify_flow_table =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->modify_flow_table;
- fpga_ipsec_ingress.create_flow_group =
- mlx5_fpga_ipsec_fs_create_flow_group_ingress;
- fpga_ipsec_ingress.destroy_flow_group =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->destroy_flow_group;
- fpga_ipsec_ingress.create_fte =
- mlx5_fpga_ipsec_fs_create_fte_ingress;
- fpga_ipsec_ingress.update_fte =
- mlx5_fpga_ipsec_fs_update_fte_ingress;
- fpga_ipsec_ingress.delete_fte =
- mlx5_fpga_ipsec_fs_delete_fte_ingress;
- fpga_ipsec_ingress.update_root_ft =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->update_root_ft;
-
- /* egress */
- fpga_ipsec_egress.create_flow_table =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->create_flow_table;
- fpga_ipsec_egress.destroy_flow_table =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->destroy_flow_table;
- fpga_ipsec_egress.modify_flow_table =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->modify_flow_table;
- fpga_ipsec_egress.create_flow_group =
- mlx5_fpga_ipsec_fs_create_flow_group_egress;
- fpga_ipsec_egress.destroy_flow_group =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->destroy_flow_group;
- fpga_ipsec_egress.create_fte =
- mlx5_fpga_ipsec_fs_create_fte_egress;
- fpga_ipsec_egress.update_fte =
- mlx5_fpga_ipsec_fs_update_fte_egress;
- fpga_ipsec_egress.delete_fte =
- mlx5_fpga_ipsec_fs_delete_fte_egress;
- fpga_ipsec_egress.update_root_ft =
- mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->update_root_ft;
-}
-
-static int
-mlx5_fpga_esp_validate_xfrm_attrs(struct mlx5_core_dev *mdev,
- const struct mlx5_accel_esp_xfrm_attrs *attrs)
-{
- if (attrs->tfc_pad) {
- mlx5_core_err(mdev, "Cannot offload xfrm states with tfc padding\n");
- return -EOPNOTSUPP;
- }
-
- if (attrs->replay_type != MLX5_ACCEL_ESP_REPLAY_NONE) {
- mlx5_core_err(mdev, "Cannot offload xfrm states with anti replay\n");
- return -EOPNOTSUPP;
- }
-
- if (attrs->keymat_type != MLX5_ACCEL_ESP_KEYMAT_AES_GCM) {
- mlx5_core_err(mdev, "Only aes gcm keymat is supported\n");
- return -EOPNOTSUPP;
- }
-
- if (attrs->keymat.aes_gcm.iv_algo !=
- MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ) {
- mlx5_core_err(mdev, "Only iv sequence algo is supported\n");
- return -EOPNOTSUPP;
- }
-
- if (attrs->keymat.aes_gcm.icv_len != 128) {
- mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD ICV length other than 128bit\n");
- return -EOPNOTSUPP;
- }
-
- if (attrs->keymat.aes_gcm.key_len != 128 &&
- attrs->keymat.aes_gcm.key_len != 256) {
- mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
- return -EOPNOTSUPP;
- }
-
- if ((attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) &&
- (!MLX5_GET(ipsec_extended_cap, mdev->fpga->ipsec->caps,
- v2_command))) {
- mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
-static struct mlx5_accel_esp_xfrm *
-mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev,
- const struct mlx5_accel_esp_xfrm_attrs *attrs,
- u32 flags)
-{
- struct mlx5_fpga_esp_xfrm *fpga_xfrm;
-
- if (!(flags & MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA)) {
- mlx5_core_warn(mdev, "Tried to create an esp action without metadata\n");
- return ERR_PTR(-EINVAL);
- }
-
- if (mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) {
- mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n");
- return ERR_PTR(-EOPNOTSUPP);
- }
-
- fpga_xfrm = kzalloc(sizeof(*fpga_xfrm), GFP_KERNEL);
- if (!fpga_xfrm)
- return ERR_PTR(-ENOMEM);
-
- mutex_init(&fpga_xfrm->lock);
- memcpy(&fpga_xfrm->accel_xfrm.attrs, attrs,
- sizeof(fpga_xfrm->accel_xfrm.attrs));
-
- return &fpga_xfrm->accel_xfrm;
-}
-
-static void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
-{
- struct mlx5_fpga_esp_xfrm *fpga_xfrm =
- container_of(xfrm, struct mlx5_fpga_esp_xfrm,
- accel_xfrm);
- /* assuming no sa_ctx are connected to this xfrm_ctx */
- kfree(fpga_xfrm);
-}
-
-static int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
- const struct mlx5_accel_esp_xfrm_attrs *attrs)
-{
- struct mlx5_core_dev *mdev = xfrm->mdev;
- struct mlx5_fpga_device *fdev = mdev->fpga;
- struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
- struct mlx5_fpga_esp_xfrm *fpga_xfrm;
- struct mlx5_ifc_fpga_ipsec_sa org_hw_sa;
-
- int err = 0;
-
- if (!memcmp(&xfrm->attrs, attrs, sizeof(xfrm->attrs)))
- return 0;
-
- if (mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) {
- mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n");
- return -EOPNOTSUPP;
- }
-
- if (is_v2_sadb_supported(fipsec)) {
- mlx5_core_warn(mdev, "Modify esp is not supported\n");
- return -EOPNOTSUPP;
- }
-
- fpga_xfrm = container_of(xfrm, struct mlx5_fpga_esp_xfrm, accel_xfrm);
-
- mutex_lock(&fpga_xfrm->lock);
-
- if (!fpga_xfrm->sa_ctx)
- /* Unbounded xfrm, change only sw attrs */
- goto change_sw_xfrm_attrs;
-
- /* copy original hw sa */
- memcpy(&org_hw_sa, &fpga_xfrm->sa_ctx->hw_sa, sizeof(org_hw_sa));
- mutex_lock(&fipsec->sa_hash_lock);
- /* remove original hw sa from hash */
- WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash,
- &fpga_xfrm->sa_ctx->hash, rhash_sa));
- /* update hw_sa with new xfrm attrs*/
- mlx5_fpga_ipsec_build_hw_xfrm(xfrm->mdev, attrs,
- &fpga_xfrm->sa_ctx->hw_sa);
- /* try to insert new hw_sa to hash */
- err = rhashtable_insert_fast(&fipsec->sa_hash,
- &fpga_xfrm->sa_ctx->hash, rhash_sa);
- if (err)
- goto rollback_sa;
-
- /* modify device with new hw_sa */
- err = mlx5_fpga_ipsec_update_hw_sa(fdev, &fpga_xfrm->sa_ctx->hw_sa,
- MLX5_FPGA_IPSEC_CMD_OP_MOD_SA_V2);
- fpga_xfrm->sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0;
- if (err)
- WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash,
- &fpga_xfrm->sa_ctx->hash,
- rhash_sa));
-rollback_sa:
- if (err) {
- /* return original hw_sa to hash */
- memcpy(&fpga_xfrm->sa_ctx->hw_sa, &org_hw_sa,
- sizeof(org_hw_sa));
- WARN_ON(rhashtable_insert_fast(&fipsec->sa_hash,
- &fpga_xfrm->sa_ctx->hash,
- rhash_sa));
- }
- mutex_unlock(&fipsec->sa_hash_lock);
-
-change_sw_xfrm_attrs:
- if (!err)
- memcpy(&xfrm->attrs, attrs, sizeof(xfrm->attrs));
- mutex_unlock(&fpga_xfrm->lock);
- return err;
-}
-
-static const struct mlx5_accel_ipsec_ops fpga_ipsec_ops = {
- .device_caps = mlx5_fpga_ipsec_device_caps,
- .counters_count = mlx5_fpga_ipsec_counters_count,
- .counters_read = mlx5_fpga_ipsec_counters_read,
- .create_hw_context = mlx5_fpga_ipsec_create_sa_ctx,
- .free_hw_context = mlx5_fpga_ipsec_delete_sa_ctx,
- .init = mlx5_fpga_ipsec_init,
- .cleanup = mlx5_fpga_ipsec_cleanup,
- .esp_create_xfrm = mlx5_fpga_esp_create_xfrm,
- .esp_modify_xfrm = mlx5_fpga_esp_modify_xfrm,
- .esp_destroy_xfrm = mlx5_fpga_esp_destroy_xfrm,
-};
-
-const struct mlx5_accel_ipsec_ops *mlx5_fpga_ipsec_ops(struct mlx5_core_dev *mdev)
-{
- if (!mlx5_fpga_is_ipsec_device(mdev))
- return NULL;
-
- return &fpga_ipsec_ops;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h
deleted file mode 100644
index 8931b5584477..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef __MLX5_FPGA_IPSEC_H__
-#define __MLX5_FPGA_IPSEC_H__
-
-#include "accel/ipsec.h"
-#include "fs_cmd.h"
-
-#ifdef CONFIG_MLX5_FPGA_IPSEC
-const struct mlx5_accel_ipsec_ops *mlx5_fpga_ipsec_ops(struct mlx5_core_dev *mdev);
-u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev);
-const struct mlx5_flow_cmds *
-mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type);
-void mlx5_fpga_ipsec_build_fs_cmds(void);
-bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev);
-#else
-static inline
-const struct mlx5_accel_ipsec_ops *mlx5_fpga_ipsec_ops(struct mlx5_core_dev *mdev)
-{ return NULL; }
-static inline u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev) { return 0; }
-static inline const struct mlx5_flow_cmds *
-mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type)
-{
- return mlx5_fs_cmd_get_default(type);
-}
-
-static inline void mlx5_fpga_ipsec_build_fs_cmds(void) {};
-static inline bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev) { return false; }
-
-#endif /* CONFIG_MLX5_FPGA_IPSEC */
-#endif /* __MLX5_FPGA_IPSEC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
deleted file mode 100644
index 29b7339ebfa3..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
+++ /dev/null
@@ -1,622 +0,0 @@
-/*
- * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/mlx5/device.h>
-#include "fpga/tls.h"
-#include "fpga/cmd.h"
-#include "fpga/sdk.h"
-#include "fpga/core.h"
-#include "accel/tls.h"
-
-struct mlx5_fpga_tls_command_context;
-
-typedef void (*mlx5_fpga_tls_command_complete)
- (struct mlx5_fpga_conn *conn, struct mlx5_fpga_device *fdev,
- struct mlx5_fpga_tls_command_context *ctx,
- struct mlx5_fpga_dma_buf *resp);
-
-struct mlx5_fpga_tls_command_context {
- struct list_head list;
- /* There is no guarantee on the order between the TX completion
- * and the command response.
- * The TX completion is going to touch cmd->buf even in
- * the case of successful transmission.
- * So instead of requiring separate allocations for cmd
- * and cmd->buf we've decided to use a reference counter
- */
- refcount_t ref;
- struct mlx5_fpga_dma_buf buf;
- mlx5_fpga_tls_command_complete complete;
-};
-
-static void
-mlx5_fpga_tls_put_command_ctx(struct mlx5_fpga_tls_command_context *ctx)
-{
- if (refcount_dec_and_test(&ctx->ref))
- kfree(ctx);
-}
-
-static void mlx5_fpga_tls_cmd_complete(struct mlx5_fpga_device *fdev,
- struct mlx5_fpga_dma_buf *resp)
-{
- struct mlx5_fpga_conn *conn = fdev->tls->conn;
- struct mlx5_fpga_tls_command_context *ctx;
- struct mlx5_fpga_tls *tls = fdev->tls;
- unsigned long flags;
-
- spin_lock_irqsave(&tls->pending_cmds_lock, flags);
- ctx = list_first_entry(&tls->pending_cmds,
- struct mlx5_fpga_tls_command_context, list);
- list_del(&ctx->list);
- spin_unlock_irqrestore(&tls->pending_cmds_lock, flags);
- ctx->complete(conn, fdev, ctx, resp);
-}
-
-static void mlx5_fpga_cmd_send_complete(struct mlx5_fpga_conn *conn,
- struct mlx5_fpga_device *fdev,
- struct mlx5_fpga_dma_buf *buf,
- u8 status)
-{
- struct mlx5_fpga_tls_command_context *ctx =
- container_of(buf, struct mlx5_fpga_tls_command_context, buf);
-
- mlx5_fpga_tls_put_command_ctx(ctx);
-
- if (unlikely(status))
- mlx5_fpga_tls_cmd_complete(fdev, NULL);
-}
-
-static void mlx5_fpga_tls_cmd_send(struct mlx5_fpga_device *fdev,
- struct mlx5_fpga_tls_command_context *cmd,
- mlx5_fpga_tls_command_complete complete)
-{
- struct mlx5_fpga_tls *tls = fdev->tls;
- unsigned long flags;
- int ret;
-
- refcount_set(&cmd->ref, 2);
- cmd->complete = complete;
- cmd->buf.complete = mlx5_fpga_cmd_send_complete;
-
- spin_lock_irqsave(&tls->pending_cmds_lock, flags);
- /* mlx5_fpga_sbu_conn_sendmsg is called under pending_cmds_lock
- * to make sure commands are inserted to the tls->pending_cmds list
- * and the command QP in the same order.
- */
- ret = mlx5_fpga_sbu_conn_sendmsg(tls->conn, &cmd->buf);
- if (likely(!ret))
- list_add_tail(&cmd->list, &tls->pending_cmds);
- else
- complete(tls->conn, fdev, cmd, NULL);
- spin_unlock_irqrestore(&tls->pending_cmds_lock, flags);
-}
-
-/* Start of context identifiers range (inclusive) */
-#define SWID_START 0
-/* End of context identifiers range (exclusive) */
-#define SWID_END BIT(24)
-
-static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock,
- void *ptr)
-{
- unsigned long flags;
- int ret;
-
- /* TLS metadata format is 1 byte for syndrome followed
- * by 3 bytes of swid (software ID)
- * swid must not exceed 3 bytes.
- * See tls_rxtx.c:insert_pet() for details
- */
- BUILD_BUG_ON((SWID_END - 1) & 0xFF000000);
-
- idr_preload(GFP_KERNEL);
- spin_lock_irqsave(idr_spinlock, flags);
- ret = idr_alloc(idr, ptr, SWID_START, SWID_END, GFP_ATOMIC);
- spin_unlock_irqrestore(idr_spinlock, flags);
- idr_preload_end();
-
- return ret;
-}
-
-static void *mlx5_fpga_tls_release_swid(struct idr *idr,
- spinlock_t *idr_spinlock, u32 swid)
-{
- unsigned long flags;
- void *ptr;
-
- spin_lock_irqsave(idr_spinlock, flags);
- ptr = idr_remove(idr, swid);
- spin_unlock_irqrestore(idr_spinlock, flags);
- return ptr;
-}
-
-static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn,
- struct mlx5_fpga_device *fdev,
- struct mlx5_fpga_dma_buf *buf, u8 status)
-{
- kfree(buf);
-}
-
-static void
-mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn,
- struct mlx5_fpga_device *fdev,
- struct mlx5_fpga_tls_command_context *cmd,
- struct mlx5_fpga_dma_buf *resp)
-{
- if (resp) {
- u32 syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome);
-
- if (syndrome)
- mlx5_fpga_err(fdev,
- "Teardown stream failed with syndrome = %d",
- syndrome);
- }
- mlx5_fpga_tls_put_command_ctx(cmd);
-}
-
-static void mlx5_fpga_tls_flow_to_cmd(void *flow, void *cmd)
-{
- memcpy(MLX5_ADDR_OF(tls_cmd, cmd, src_port), flow,
- MLX5_BYTE_OFF(tls_flow, ipv6));
-
- MLX5_SET(tls_cmd, cmd, ipv6, MLX5_GET(tls_flow, flow, ipv6));
- MLX5_SET(tls_cmd, cmd, direction_sx,
- MLX5_GET(tls_flow, flow, direction_sx));
-}
-
-int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle,
- u32 seq, __be64 rcd_sn)
-{
- struct mlx5_fpga_dma_buf *buf;
- int size = sizeof(*buf) + MLX5_TLS_COMMAND_SIZE;
- void *flow;
- void *cmd;
- int ret;
-
- buf = kzalloc(size, GFP_ATOMIC);
- if (!buf)
- return -ENOMEM;
-
- cmd = (buf + 1);
-
- rcu_read_lock();
- flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
- if (unlikely(!flow)) {
- rcu_read_unlock();
- WARN_ONCE(1, "Received NULL pointer for handle\n");
- kfree(buf);
- return -EINVAL;
- }
- mlx5_fpga_tls_flow_to_cmd(flow, cmd);
- rcu_read_unlock();
-
- MLX5_SET(tls_cmd, cmd, swid, ntohl(handle));
- MLX5_SET64(tls_cmd, cmd, tls_rcd_sn, be64_to_cpu(rcd_sn));
- MLX5_SET(tls_cmd, cmd, tcp_sn, seq);
- MLX5_SET(tls_cmd, cmd, command_type, CMD_RESYNC_RX);
-
- buf->sg[0].data = cmd;
- buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
- buf->complete = mlx_tls_kfree_complete;
-
- ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf);
- if (ret < 0)
- kfree(buf);
-
- return ret;
-}
-
-static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
- void *flow, u32 swid, gfp_t flags)
-{
- struct mlx5_fpga_tls_command_context *ctx;
- struct mlx5_fpga_dma_buf *buf;
- void *cmd;
-
- ctx = kzalloc(sizeof(*ctx) + MLX5_TLS_COMMAND_SIZE, flags);
- if (!ctx)
- return;
-
- buf = &ctx->buf;
- cmd = (ctx + 1);
- MLX5_SET(tls_cmd, cmd, command_type, CMD_TEARDOWN_STREAM);
- MLX5_SET(tls_cmd, cmd, swid, swid);
-
- mlx5_fpga_tls_flow_to_cmd(flow, cmd);
- kfree(flow);
-
- buf->sg[0].data = cmd;
- buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
-
- mlx5_fpga_tls_cmd_send(mdev->fpga, ctx,
- mlx5_fpga_tls_teardown_completion);
-}
-
-void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
- gfp_t flags, bool direction_sx)
-{
- struct mlx5_fpga_tls *tls = mdev->fpga->tls;
- void *flow;
-
- if (direction_sx)
- flow = mlx5_fpga_tls_release_swid(&tls->tx_idr,
- &tls->tx_idr_spinlock,
- swid);
- else
- flow = mlx5_fpga_tls_release_swid(&tls->rx_idr,
- &tls->rx_idr_spinlock,
- swid);
-
- if (!flow) {
- mlx5_fpga_err(mdev->fpga, "No flow information for swid %u\n",
- swid);
- return;
- }
-
- synchronize_rcu(); /* before kfree(flow) */
- mlx5_fpga_tls_send_teardown_cmd(mdev, flow, swid, flags);
-}
-
-enum mlx5_fpga_setup_stream_status {
- MLX5_FPGA_CMD_PENDING,
- MLX5_FPGA_CMD_SEND_FAILED,
- MLX5_FPGA_CMD_RESPONSE_RECEIVED,
- MLX5_FPGA_CMD_ABANDONED,
-};
-
-struct mlx5_setup_stream_context {
- struct mlx5_fpga_tls_command_context cmd;
- atomic_t status;
- u32 syndrome;
- struct completion comp;
-};
-
-static void
-mlx5_fpga_tls_setup_completion(struct mlx5_fpga_conn *conn,
- struct mlx5_fpga_device *fdev,
- struct mlx5_fpga_tls_command_context *cmd,
- struct mlx5_fpga_dma_buf *resp)
-{
- struct mlx5_setup_stream_context *ctx =
- container_of(cmd, struct mlx5_setup_stream_context, cmd);
- int status = MLX5_FPGA_CMD_SEND_FAILED;
- void *tls_cmd = ctx + 1;
-
- /* If we failed to send to command resp == NULL */
- if (resp) {
- ctx->syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome);
- status = MLX5_FPGA_CMD_RESPONSE_RECEIVED;
- }
-
- status = atomic_xchg_release(&ctx->status, status);
- if (likely(status != MLX5_FPGA_CMD_ABANDONED)) {
- complete(&ctx->comp);
- return;
- }
-
- mlx5_fpga_err(fdev, "Command was abandoned, syndrome = %u\n",
- ctx->syndrome);
-
- if (!ctx->syndrome) {
- /* The process was killed while waiting for the context to be
- * added, and the add completed successfully.
- * We need to destroy the HW context, and we can't can't reuse
- * the command context because we might not have received
- * the tx completion yet.
- */
- mlx5_fpga_tls_del_flow(fdev->mdev,
- MLX5_GET(tls_cmd, tls_cmd, swid),
- GFP_ATOMIC,
- MLX5_GET(tls_cmd, tls_cmd,
- direction_sx));
- }
-
- mlx5_fpga_tls_put_command_ctx(cmd);
-}
-
-static int mlx5_fpga_tls_setup_stream_cmd(struct mlx5_core_dev *mdev,
- struct mlx5_setup_stream_context *ctx)
-{
- struct mlx5_fpga_dma_buf *buf;
- void *cmd = ctx + 1;
- int status, ret = 0;
-
- buf = &ctx->cmd.buf;
- buf->sg[0].data = cmd;
- buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
- MLX5_SET(tls_cmd, cmd, command_type, CMD_SETUP_STREAM);
-
- init_completion(&ctx->comp);
- atomic_set(&ctx->status, MLX5_FPGA_CMD_PENDING);
- ctx->syndrome = -1;
-
- mlx5_fpga_tls_cmd_send(mdev->fpga, &ctx->cmd,
- mlx5_fpga_tls_setup_completion);
- wait_for_completion_killable(&ctx->comp);
-
- status = atomic_xchg_acquire(&ctx->status, MLX5_FPGA_CMD_ABANDONED);
- if (unlikely(status == MLX5_FPGA_CMD_PENDING))
- /* ctx is going to be released in mlx5_fpga_tls_setup_completion */
- return -EINTR;
-
- if (unlikely(ctx->syndrome))
- ret = -ENOMEM;
-
- mlx5_fpga_tls_put_command_ctx(&ctx->cmd);
- return ret;
-}
-
-static void mlx5_fpga_tls_hw_qp_recv_cb(void *cb_arg,
- struct mlx5_fpga_dma_buf *buf)
-{
- struct mlx5_fpga_device *fdev = (struct mlx5_fpga_device *)cb_arg;
-
- mlx5_fpga_tls_cmd_complete(fdev, buf);
-}
-
-bool mlx5_fpga_is_tls_device(struct mlx5_core_dev *mdev)
-{
- if (!mdev->fpga || !MLX5_CAP_GEN(mdev, fpga))
- return false;
-
- if (MLX5_CAP_FPGA(mdev, ieee_vendor_id) !=
- MLX5_FPGA_CAP_SANDBOX_VENDOR_ID_MLNX)
- return false;
-
- if (MLX5_CAP_FPGA(mdev, sandbox_product_id) !=
- MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_TLS)
- return false;
-
- if (MLX5_CAP_FPGA(mdev, sandbox_product_version) != 0)
- return false;
-
- return true;
-}
-
-static int mlx5_fpga_tls_get_caps(struct mlx5_fpga_device *fdev,
- u32 *p_caps)
-{
- int err, cap_size = MLX5_ST_SZ_BYTES(tls_extended_cap);
- u32 caps = 0;
- void *buf;
-
- buf = kzalloc(cap_size, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- err = mlx5_fpga_get_sbu_caps(fdev, cap_size, buf);
- if (err)
- goto out;
-
- if (MLX5_GET(tls_extended_cap, buf, tx))
- caps |= MLX5_ACCEL_TLS_TX;
- if (MLX5_GET(tls_extended_cap, buf, rx))
- caps |= MLX5_ACCEL_TLS_RX;
- if (MLX5_GET(tls_extended_cap, buf, tls_v12))
- caps |= MLX5_ACCEL_TLS_V12;
- if (MLX5_GET(tls_extended_cap, buf, tls_v13))
- caps |= MLX5_ACCEL_TLS_V13;
- if (MLX5_GET(tls_extended_cap, buf, lro))
- caps |= MLX5_ACCEL_TLS_LRO;
- if (MLX5_GET(tls_extended_cap, buf, ipv6))
- caps |= MLX5_ACCEL_TLS_IPV6;
-
- if (MLX5_GET(tls_extended_cap, buf, aes_gcm_128))
- caps |= MLX5_ACCEL_TLS_AES_GCM128;
- if (MLX5_GET(tls_extended_cap, buf, aes_gcm_256))
- caps |= MLX5_ACCEL_TLS_AES_GCM256;
-
- *p_caps = caps;
- err = 0;
-out:
- kfree(buf);
- return err;
-}
-
-int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev)
-{
- struct mlx5_fpga_device *fdev = mdev->fpga;
- struct mlx5_fpga_conn_attr init_attr = {0};
- struct mlx5_fpga_conn *conn;
- struct mlx5_fpga_tls *tls;
- int err = 0;
-
- if (!mlx5_fpga_is_tls_device(mdev) || !fdev)
- return 0;
-
- tls = kzalloc(sizeof(*tls), GFP_KERNEL);
- if (!tls)
- return -ENOMEM;
-
- err = mlx5_fpga_tls_get_caps(fdev, &tls->caps);
- if (err)
- goto error;
-
- if (!(tls->caps & (MLX5_ACCEL_TLS_V12 | MLX5_ACCEL_TLS_AES_GCM128))) {
- err = -ENOTSUPP;
- goto error;
- }
-
- init_attr.rx_size = SBU_QP_QUEUE_SIZE;
- init_attr.tx_size = SBU_QP_QUEUE_SIZE;
- init_attr.recv_cb = mlx5_fpga_tls_hw_qp_recv_cb;
- init_attr.cb_arg = fdev;
- conn = mlx5_fpga_sbu_conn_create(fdev, &init_attr);
- if (IS_ERR(conn)) {
- err = PTR_ERR(conn);
- mlx5_fpga_err(fdev, "Error creating TLS command connection %d\n",
- err);
- goto error;
- }
-
- tls->conn = conn;
- spin_lock_init(&tls->pending_cmds_lock);
- INIT_LIST_HEAD(&tls->pending_cmds);
-
- idr_init(&tls->tx_idr);
- idr_init(&tls->rx_idr);
- spin_lock_init(&tls->tx_idr_spinlock);
- spin_lock_init(&tls->rx_idr_spinlock);
- fdev->tls = tls;
- return 0;
-
-error:
- kfree(tls);
- return err;
-}
-
-void mlx5_fpga_tls_cleanup(struct mlx5_core_dev *mdev)
-{
- struct mlx5_fpga_device *fdev = mdev->fpga;
-
- if (!fdev || !fdev->tls)
- return;
-
- mlx5_fpga_sbu_conn_destroy(fdev->tls->conn);
- kfree(fdev->tls);
- fdev->tls = NULL;
-}
-
-static void mlx5_fpga_tls_set_aes_gcm128_ctx(void *cmd,
- struct tls_crypto_info *info,
- __be64 *rcd_sn)
-{
- struct tls12_crypto_info_aes_gcm_128 *crypto_info =
- (struct tls12_crypto_info_aes_gcm_128 *)info;
-
- memcpy(MLX5_ADDR_OF(tls_cmd, cmd, tls_rcd_sn), crypto_info->rec_seq,
- TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE);
-
- memcpy(MLX5_ADDR_OF(tls_cmd, cmd, tls_implicit_iv),
- crypto_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
- memcpy(MLX5_ADDR_OF(tls_cmd, cmd, encryption_key),
- crypto_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
-
- /* in AES-GCM 128 we need to write the key twice */
- memcpy(MLX5_ADDR_OF(tls_cmd, cmd, encryption_key) +
- TLS_CIPHER_AES_GCM_128_KEY_SIZE,
- crypto_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
-
- MLX5_SET(tls_cmd, cmd, alg, MLX5_TLS_ALG_AES_GCM_128);
-}
-
-static int mlx5_fpga_tls_set_key_material(void *cmd, u32 caps,
- struct tls_crypto_info *crypto_info)
-{
- __be64 rcd_sn;
-
- switch (crypto_info->cipher_type) {
- case TLS_CIPHER_AES_GCM_128:
- if (!(caps & MLX5_ACCEL_TLS_AES_GCM128))
- return -EINVAL;
- mlx5_fpga_tls_set_aes_gcm128_ctx(cmd, crypto_info, &rcd_sn);
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int _mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
- struct tls_crypto_info *crypto_info,
- u32 swid, u32 tcp_sn)
-{
- u32 caps = mlx5_fpga_tls_device_caps(mdev);
- struct mlx5_setup_stream_context *ctx;
- int ret = -ENOMEM;
- size_t cmd_size;
- void *cmd;
-
- cmd_size = MLX5_TLS_COMMAND_SIZE + sizeof(*ctx);
- ctx = kzalloc(cmd_size, GFP_KERNEL);
- if (!ctx)
- goto out;
-
- cmd = ctx + 1;
- ret = mlx5_fpga_tls_set_key_material(cmd, caps, crypto_info);
- if (ret)
- goto free_ctx;
-
- mlx5_fpga_tls_flow_to_cmd(flow, cmd);
-
- MLX5_SET(tls_cmd, cmd, swid, swid);
- MLX5_SET(tls_cmd, cmd, tcp_sn, tcp_sn);
-
- return mlx5_fpga_tls_setup_stream_cmd(mdev, ctx);
-
-free_ctx:
- kfree(ctx);
-out:
- return ret;
-}
-
-int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn, u32 *p_swid,
- bool direction_sx)
-{
- struct mlx5_fpga_tls *tls = mdev->fpga->tls;
- int ret = -ENOMEM;
- u32 swid;
-
- if (direction_sx)
- ret = mlx5_fpga_tls_alloc_swid(&tls->tx_idr,
- &tls->tx_idr_spinlock, flow);
- else
- ret = mlx5_fpga_tls_alloc_swid(&tls->rx_idr,
- &tls->rx_idr_spinlock, flow);
-
- if (ret < 0)
- return ret;
-
- swid = ret;
- MLX5_SET(tls_flow, flow, direction_sx, direction_sx ? 1 : 0);
-
- ret = _mlx5_fpga_tls_add_flow(mdev, flow, crypto_info, swid,
- start_offload_tcp_sn);
- if (ret && ret != -EINTR)
- goto free_swid;
-
- *p_swid = swid;
- return 0;
-free_swid:
- if (direction_sx)
- mlx5_fpga_tls_release_swid(&tls->tx_idr,
- &tls->tx_idr_spinlock, swid);
- else
- mlx5_fpga_tls_release_swid(&tls->rx_idr,
- &tls->rx_idr_spinlock, swid);
-
- return ret;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h
deleted file mode 100644
index 5714cf391d1b..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef __MLX5_FPGA_TLS_H__
-#define __MLX5_FPGA_TLS_H__
-
-#include <linux/mlx5/driver.h>
-
-#include <net/tls.h>
-#include "fpga/core.h"
-
-struct mlx5_fpga_tls {
- struct list_head pending_cmds;
- spinlock_t pending_cmds_lock; /* Protects pending_cmds */
- u32 caps;
- struct mlx5_fpga_conn *conn;
-
- struct idr tx_idr;
- struct idr rx_idr;
- spinlock_t tx_idr_spinlock; /* protects the IDR */
- spinlock_t rx_idr_spinlock; /* protects the IDR */
-};
-
-int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn, u32 *p_swid,
- bool direction_sx);
-
-void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
- gfp_t flags, bool direction_sx);
-
-bool mlx5_fpga_is_tls_device(struct mlx5_core_dev *mdev);
-int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev);
-void mlx5_fpga_tls_cleanup(struct mlx5_core_dev *mdev);
-
-static inline u32 mlx5_fpga_tls_device_caps(struct mlx5_core_dev *mdev)
-{
- return mdev->fpga->tls->caps;
-}
-
-int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle,
- u32 seq, __be64 rcd_sn);
-
-#endif /* __MLX5_FPGA_TLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index a0ac17c3f12f..33e9f86cf7d4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -878,9 +878,7 @@ static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
table_type = FS_FT_NIC_RX;
break;
case MLX5_FLOW_NAMESPACE_EGRESS:
-#ifdef CONFIG_MLX5_IPSEC
case MLX5_FLOW_NAMESPACE_EGRESS_KERNEL:
-#endif
max_actions = MLX5_CAP_FLOWTABLE_NIC_TX(dev, max_modify_header_actions);
table_type = FS_FT_NIC_TX;
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 816d991f7621..297e6a468a3e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -40,8 +40,6 @@
#include "fs_cmd.h"
#include "fs_ft_pool.h"
#include "diag/fs_tracepoint.h"
-#include "accel/ipsec.h"
-#include "fpga/ipsec.h"
#define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
sizeof(struct init_tree_node))
@@ -188,24 +186,18 @@ static struct init_tree_node {
static struct init_tree_node egress_root_fs = {
.type = FS_TYPE_NAMESPACE,
-#ifdef CONFIG_MLX5_IPSEC
.ar_size = 2,
-#else
- .ar_size = 1,
-#endif
.children = (struct init_tree_node[]) {
ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
FS_CHAINING_CAPS_EGRESS,
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
BY_PASS_PRIO_NUM_LEVELS))),
-#ifdef CONFIG_MLX5_IPSEC
ADD_PRIO(0, KERNEL_TX_MIN_LEVEL, 0,
FS_CHAINING_CAPS_EGRESS,
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
ADD_MULTIPLE_PRIO(KERNEL_TX_IPSEC_NUM_PRIOS,
KERNEL_TX_IPSEC_NUM_LEVELS))),
-#endif
}
};
@@ -2519,10 +2511,6 @@ static struct mlx5_flow_root_namespace
struct mlx5_flow_root_namespace *root_ns;
struct mlx5_flow_namespace *ns;
- if (mlx5_fpga_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE &&
- (table_type == FS_FT_NIC_RX || table_type == FS_FT_NIC_TX))
- cmds = mlx5_fs_cmd_get_default_ipsec_fpga_cmds(table_type);
-
/* Create the root namespace */
root_ns = kzalloc(sizeof(*root_ns), GFP_KERNEL);
if (!root_ns)
@@ -3172,8 +3160,7 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
goto err;
}
- if (mlx5_fpga_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE ||
- MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
+ if (MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
err = init_egress_root_ns(steering);
if (err)
goto err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 614687e0e3d9..cfb8bedba512 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -35,7 +35,6 @@
#include "mlx5_core.h"
#include "../../mlxfw/mlxfw.h"
#include "lib/tout.h"
-#include "accel/tls.h"
enum {
MCQS_IDENTIFIER_BOOT_IMG = 0x1,
@@ -249,7 +248,7 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return err;
}
- if (mlx5_accel_is_ktls_tx(dev) || mlx5_accel_is_ktls_rx(dev)) {
+ if (MLX5_CAP_GEN(dev, tls_tx) || MLX5_CAP_GEN(dev, tls_rx)) {
err = mlx5_core_get_caps(dev, MLX5_CAP_TLS);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 2589e39eb9c7..d504c8cb8f96 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -62,9 +62,7 @@
#include "lib/mlx5.h"
#include "lib/tout.h"
#include "fpga/core.h"
-#include "fpga/ipsec.h"
-#include "accel/ipsec.h"
-#include "accel/tls.h"
+#include "en_accel/ipsec_offload.h"
#include "lib/clock.h"
#include "lib/vxlan.h"
#include "lib/geneve.h"
@@ -1183,14 +1181,6 @@ static int mlx5_load(struct mlx5_core_dev *dev)
goto err_fpga_start;
}
- mlx5_accel_ipsec_init(dev);
-
- err = mlx5_accel_tls_init(dev);
- if (err) {
- mlx5_core_err(dev, "TLS device start failed %d\n", err);
- goto err_tls_start;
- }
-
err = mlx5_init_fs(dev);
if (err) {
mlx5_core_err(dev, "Failed to init flow steering\n");
@@ -1238,9 +1228,6 @@ err_vhca:
err_set_hca:
mlx5_cleanup_fs(dev);
err_fs:
- mlx5_accel_tls_cleanup(dev);
-err_tls_start:
- mlx5_accel_ipsec_cleanup(dev);
mlx5_fpga_device_stop(dev);
err_fpga_start:
mlx5_rsc_dump_cleanup(dev);
@@ -1266,8 +1253,6 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
mlx5_sf_hw_table_destroy(dev);
mlx5_vhca_event_stop(dev);
mlx5_cleanup_fs(dev);
- mlx5_accel_ipsec_cleanup(dev);
- mlx5_accel_tls_cleanup(dev);
mlx5_fpga_device_stop(dev);
mlx5_rsc_dump_cleanup(dev);
mlx5_hv_vhca_cleanup(dev->hv_vhca);
@@ -1947,7 +1932,6 @@ static int __init init(void)
get_random_bytes(&sw_owner_id, sizeof(sw_owner_id));
mlx5_core_verify_params();
- mlx5_fpga_ipsec_build_fs_cmds();
mlx5_register_debugfs();
err = pci_register_driver(&mlx5_core_driver);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index 196adeb33495..1a465fd5d8b3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -1,7 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_MLXSW_CORE) += mlxsw_core.o
mlxsw_core-objs := core.o core_acl_flex_keys.o \
- core_acl_flex_actions.o core_env.o
+ core_acl_flex_actions.o core_env.o \
+ core_linecards.o
mlxsw_core-$(CONFIG_MLXSW_CORE_HWMON) += core_hwmon.o
mlxsw_core-$(CONFIG_MLXSW_CORE_THERMAL) += core_thermal.o
obj-$(CONFIG_MLXSW_PCI) += mlxsw_pci.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index b13e0f8d232a..fc52832241b3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -48,6 +48,7 @@ struct mlxsw_core_port {
struct devlink_port devlink_port;
void *port_driver_priv;
u16 local_port;
+ struct mlxsw_linecard *linecard;
};
void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port)
@@ -82,6 +83,7 @@ struct mlxsw_core {
struct mlxsw_res res;
struct mlxsw_hwmon *hwmon;
struct mlxsw_thermal *thermal;
+ struct mlxsw_linecards *linecards;
struct mlxsw_core_port *ports;
unsigned int max_ports;
atomic_t active_ports_count;
@@ -94,6 +96,17 @@ struct mlxsw_core {
/* driver_priv has to be always the last item */
};
+struct mlxsw_linecards *mlxsw_core_linecards(struct mlxsw_core *mlxsw_core)
+{
+ return mlxsw_core->linecards;
+}
+
+void mlxsw_core_linecards_set(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards *linecards)
+{
+ mlxsw_core->linecards = linecards;
+}
+
#define MLXSW_PORT_MAX_PORTS_DEFAULT 0x40
static u64 mlxsw_ports_occ_get(void *priv)
@@ -2145,6 +2158,10 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (err)
goto err_fw_rev_validate;
+ err = mlxsw_linecards_init(mlxsw_core, mlxsw_bus_info);
+ if (err)
+ goto err_linecards_init;
+
err = mlxsw_core_health_init(mlxsw_core);
if (err)
goto err_health_init;
@@ -2158,7 +2175,7 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (err)
goto err_thermal_init;
- err = mlxsw_env_init(mlxsw_core, &mlxsw_core->env);
+ err = mlxsw_env_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->env);
if (err)
goto err_env_init;
@@ -2183,6 +2200,8 @@ err_thermal_init:
err_hwmon_init:
mlxsw_core_health_fini(mlxsw_core);
err_health_init:
+ mlxsw_linecards_fini(mlxsw_core);
+err_linecards_init:
err_fw_rev_validate:
if (!reload)
mlxsw_core_params_unregister(mlxsw_core);
@@ -2255,6 +2274,7 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
mlxsw_thermal_fini(mlxsw_core->thermal);
mlxsw_hwmon_fini(mlxsw_core->hwmon);
mlxsw_core_health_fini(mlxsw_core);
+ mlxsw_linecards_fini(mlxsw_core);
if (!reload)
mlxsw_core_params_unregister(mlxsw_core);
mlxsw_emad_fini(mlxsw_core);
@@ -2956,7 +2976,7 @@ EXPORT_SYMBOL(mlxsw_core_res_get);
static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port,
enum devlink_port_flavour flavour,
- u32 port_number, bool split,
+ u8 slot_index, u32 port_number, bool split,
u32 split_port_subnumber,
bool splittable, u32 lanes,
const unsigned char *switch_id,
@@ -2979,6 +2999,15 @@ static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port,
attrs.switch_id.id_len = switch_id_len;
mlxsw_core_port->local_port = local_port;
devlink_port_attrs_set(devlink_port, &attrs);
+ if (slot_index) {
+ struct mlxsw_linecard *linecard;
+
+ linecard = mlxsw_linecard_get(mlxsw_core->linecards,
+ slot_index);
+ mlxsw_core_port->linecard = linecard;
+ devlink_port_linecard_set(devlink_port,
+ linecard->devlink_linecard);
+ }
err = devl_port_register(devlink, devlink_port, local_port);
if (err)
memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
@@ -2996,7 +3025,7 @@ static void __mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u16 local_port
}
int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port,
- u32 port_number, bool split,
+ u8 slot_index, u32 port_number, bool split,
u32 split_port_subnumber,
bool splittable, u32 lanes,
const unsigned char *switch_id,
@@ -3005,7 +3034,7 @@ int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port,
int err;
err = __mlxsw_core_port_init(mlxsw_core, local_port,
- DEVLINK_PORT_FLAVOUR_PHYSICAL,
+ DEVLINK_PORT_FLAVOUR_PHYSICAL, slot_index,
port_number, split, split_port_subnumber,
splittable, lanes,
switch_id, switch_id_len);
@@ -3036,7 +3065,7 @@ int mlxsw_core_cpu_port_init(struct mlxsw_core *mlxsw_core,
err = __mlxsw_core_port_init(mlxsw_core, MLXSW_PORT_CPU_PORT,
DEVLINK_PORT_FLAVOUR_CPU,
- 0, false, 0, false, 0,
+ 0, 0, false, 0, false, 0,
switch_id, switch_id_len);
if (err)
return err;
@@ -3112,6 +3141,16 @@ mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_core_port_devlink_port_get);
+struct mlxsw_linecard *
+mlxsw_core_port_linecard_get(struct mlxsw_core *mlxsw_core,
+ u16 local_port)
+{
+ struct mlxsw_core_port *mlxsw_core_port =
+ &mlxsw_core->ports[local_port];
+
+ return mlxsw_core_port->linecard;
+}
+
bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u16 local_port)
{
const struct mlxsw_bus_info *bus_info = mlxsw_core->bus_info;
@@ -3124,6 +3163,15 @@ bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u16 local_port)
}
EXPORT_SYMBOL(mlxsw_core_port_is_xm);
+void mlxsw_core_ports_remove_selected(struct mlxsw_core *mlxsw_core,
+ bool (*selector)(void *priv, u16 local_port),
+ void *priv)
+{
+ if (WARN_ON_ONCE(!mlxsw_core->driver->ports_remove_selected))
+ return;
+ mlxsw_core->driver->ports_remove_selected(mlxsw_core, selector, priv);
+}
+
struct mlxsw_env *mlxsw_core_env(const struct mlxsw_core *mlxsw_core)
{
return mlxsw_core->env;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 16ee5e90973d..d008282d7f2e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -35,6 +35,11 @@ unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core);
void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core);
+struct mlxsw_linecards *mlxsw_core_linecards(struct mlxsw_core *mlxsw_core);
+
+void mlxsw_core_linecards_set(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards *linecard);
+
bool
mlxsw_core_fw_rev_minor_subminor_validate(const struct mlxsw_fw_rev *rev,
const struct mlxsw_fw_rev *req_rev);
@@ -231,7 +236,8 @@ void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port);
int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port,
- u32 port_number, bool split, u32 split_port_subnumber,
+ u8 slot_index, u32 port_number, bool split,
+ u32 split_port_subnumber,
bool splittable, u32 lanes,
const unsigned char *switch_id,
unsigned char switch_id_len);
@@ -252,7 +258,14 @@ enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core,
struct devlink_port *
mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
u16 local_port);
+struct mlxsw_linecard *
+mlxsw_core_port_linecard_get(struct mlxsw_core *mlxsw_core,
+ u16 local_port);
bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u16 local_port);
+void mlxsw_core_ports_remove_selected(struct mlxsw_core *mlxsw_core,
+ bool (*selector)(void *priv,
+ u16 local_port),
+ void *priv);
struct mlxsw_env *mlxsw_core_env(const struct mlxsw_core *mlxsw_core);
int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay);
@@ -326,6 +339,10 @@ struct mlxsw_driver {
unsigned int count, struct netlink_ext_ack *extack);
int (*port_unsplit)(struct mlxsw_core *mlxsw_core, u16 local_port,
struct netlink_ext_ack *extack);
+ void (*ports_remove_selected)(struct mlxsw_core *mlxsw_core,
+ bool (*selector)(void *priv,
+ u16 local_port),
+ void *priv);
int (*sb_pool_get)(struct mlxsw_core *mlxsw_core,
unsigned int sb_index, u16 pool_index,
struct devlink_sb_pool_info *pool_info);
@@ -543,4 +560,65 @@ static inline struct mlxsw_skb_cb *mlxsw_skb_cb(struct sk_buff *skb)
return (struct mlxsw_skb_cb *) skb->cb;
}
+struct mlxsw_linecards;
+
+enum mlxsw_linecard_status_event_type {
+ MLXSW_LINECARD_STATUS_EVENT_TYPE_PROVISION,
+ MLXSW_LINECARD_STATUS_EVENT_TYPE_UNPROVISION,
+};
+
+struct mlxsw_linecard {
+ u8 slot_index;
+ struct mlxsw_linecards *linecards;
+ struct devlink_linecard *devlink_linecard;
+ struct mutex lock; /* Locks accesses to the linecard structure */
+ char name[MLXSW_REG_MDDQ_SLOT_ASCII_NAME_LEN];
+ char mbct_pl[MLXSW_REG_MBCT_LEN]; /* Too big for stack */
+ enum mlxsw_linecard_status_event_type status_event_type_to;
+ struct delayed_work status_event_to_dw;
+ u8 provisioned:1,
+ ready:1,
+ active:1;
+ u16 hw_revision;
+ u16 ini_version;
+ struct list_head device_list;
+};
+
+struct mlxsw_linecard_types_info;
+
+struct mlxsw_linecards {
+ struct mlxsw_core *mlxsw_core;
+ const struct mlxsw_bus_info *bus_info;
+ u8 count;
+ struct mlxsw_linecard_types_info *types_info;
+ struct list_head event_ops_list;
+ struct mutex event_ops_list_lock; /* Locks accesses to event ops list */
+ struct mlxsw_linecard linecards[];
+};
+
+static inline struct mlxsw_linecard *
+mlxsw_linecard_get(struct mlxsw_linecards *linecards, u8 slot_index)
+{
+ return &linecards->linecards[slot_index - 1];
+}
+
+int mlxsw_linecards_init(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *bus_info);
+void mlxsw_linecards_fini(struct mlxsw_core *mlxsw_core);
+
+typedef void mlxsw_linecards_event_op_t(struct mlxsw_core *mlxsw_core,
+ u8 slot_index, void *priv);
+
+struct mlxsw_linecards_event_ops {
+ mlxsw_linecards_event_op_t *got_active;
+ mlxsw_linecards_event_op_t *got_inactive;
+};
+
+int mlxsw_linecards_event_ops_register(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards_event_ops *ops,
+ void *priv);
+void mlxsw_linecards_event_ops_unregister(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards_event_ops *ops,
+ void *priv);
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
index 29a74b8bd5b5..34bec9cd572c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
@@ -21,19 +21,60 @@ struct mlxsw_env_module_info {
enum mlxsw_reg_pmtm_module_type type;
};
-struct mlxsw_env {
- struct mlxsw_core *core;
+struct mlxsw_env_line_card {
u8 module_count;
- struct mutex module_info_lock; /* Protects 'module_info'. */
+ bool active;
struct mlxsw_env_module_info module_info[];
};
-static int __mlxsw_env_validate_module_type(struct mlxsw_core *core, u8 module)
+struct mlxsw_env {
+ struct mlxsw_core *core;
+ const struct mlxsw_bus_info *bus_info;
+ u8 max_module_count; /* Maximum number of modules per-slot. */
+ u8 num_of_slots; /* Including the main board. */
+ struct mutex line_cards_lock; /* Protects line cards. */
+ struct mlxsw_env_line_card *line_cards[];
+};
+
+static bool __mlxsw_env_linecard_is_active(struct mlxsw_env *mlxsw_env,
+ u8 slot_index)
+{
+ return mlxsw_env->line_cards[slot_index]->active;
+}
+
+static bool mlxsw_env_linecard_is_active(struct mlxsw_env *mlxsw_env,
+ u8 slot_index)
+{
+ bool active;
+
+ mutex_lock(&mlxsw_env->line_cards_lock);
+ active = __mlxsw_env_linecard_is_active(mlxsw_env, slot_index);
+ mutex_unlock(&mlxsw_env->line_cards_lock);
+
+ return active;
+}
+
+static struct
+mlxsw_env_module_info *mlxsw_env_module_info_get(struct mlxsw_core *mlxsw_core,
+ u8 slot_index, u8 module)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+
+ return &mlxsw_env->line_cards[slot_index]->module_info[module];
+}
+
+static int __mlxsw_env_validate_module_type(struct mlxsw_core *core,
+ u8 slot_index, u8 module)
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(core);
+ struct mlxsw_env_module_info *module_info;
int err;
- switch (mlxsw_env->module_info[module].type) {
+ if (!__mlxsw_env_linecard_is_active(mlxsw_env, slot_index))
+ return 0;
+
+ module_info = mlxsw_env_module_info_get(core, slot_index, module);
+ switch (module_info->type) {
case MLXSW_REG_PMTM_MODULE_TYPE_TWISTED_PAIR:
err = -EINVAL;
break;
@@ -44,32 +85,34 @@ static int __mlxsw_env_validate_module_type(struct mlxsw_core *core, u8 module)
return err;
}
-static int mlxsw_env_validate_module_type(struct mlxsw_core *core, u8 module)
+static int mlxsw_env_validate_module_type(struct mlxsw_core *core,
+ u8 slot_index, u8 module)
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(core);
int err;
- mutex_lock(&mlxsw_env->module_info_lock);
- err = __mlxsw_env_validate_module_type(core, module);
- mutex_unlock(&mlxsw_env->module_info_lock);
+ mutex_lock(&mlxsw_env->line_cards_lock);
+ err = __mlxsw_env_validate_module_type(core, slot_index, module);
+ mutex_unlock(&mlxsw_env->line_cards_lock);
return err;
}
static int
-mlxsw_env_validate_cable_ident(struct mlxsw_core *core, int id, bool *qsfp,
- bool *cmis)
+mlxsw_env_validate_cable_ident(struct mlxsw_core *core, u8 slot_index, int id,
+ bool *qsfp, bool *cmis)
{
char mcia_pl[MLXSW_REG_MCIA_LEN];
char *eeprom_tmp;
u8 ident;
int err;
- err = mlxsw_env_validate_module_type(core, id);
+ err = mlxsw_env_validate_module_type(core, slot_index, id);
if (err)
return err;
- mlxsw_reg_mcia_pack(mcia_pl, id, 0, MLXSW_REG_MCIA_PAGE0_LO_OFF, 0, 1,
+ mlxsw_reg_mcia_pack(mcia_pl, slot_index, id, 0,
+ MLXSW_REG_MCIA_PAGE0_LO_OFF, 0, 1,
MLXSW_REG_MCIA_I2C_ADDR_LOW);
err = mlxsw_reg_query(core, MLXSW_REG(mcia), mcia_pl);
if (err)
@@ -99,8 +142,8 @@ mlxsw_env_validate_cable_ident(struct mlxsw_core *core, int id, bool *qsfp,
}
static int
-mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module,
- u16 offset, u16 size, void *data,
+mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ int module, u16 offset, u16 size, void *data,
bool qsfp, unsigned int *p_read_size)
{
char mcia_pl[MLXSW_REG_MCIA_LEN];
@@ -145,7 +188,8 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module,
}
}
- mlxsw_reg_mcia_pack(mcia_pl, module, 0, page, offset, size, i2c_addr);
+ mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, 0, page, offset, size,
+ i2c_addr);
err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcia), mcia_pl);
if (err)
@@ -162,8 +206,9 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module,
return 0;
}
-int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
- int off, int *temp)
+int
+mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, u8 slot_index,
+ int module, int off, int *temp)
{
unsigned int module_temp, module_crit, module_emerg;
union {
@@ -177,8 +222,9 @@ int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
int page;
int err;
- mlxsw_reg_mtmp_pack(mtmp_pl, MLXSW_REG_MTMP_MODULE_INDEX_MIN + module,
- false, false);
+ mlxsw_reg_mtmp_pack(mtmp_pl, slot_index,
+ MLXSW_REG_MTMP_MODULE_INDEX_MIN + module, false,
+ false);
err = mlxsw_reg_query(core, MLXSW_REG(mtmp), mtmp_pl);
if (err)
return err;
@@ -207,7 +253,8 @@ int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
*/
/* Validate module identifier value. */
- err = mlxsw_env_validate_cable_ident(core, module, &qsfp, &cmis);
+ err = mlxsw_env_validate_cable_ident(core, slot_index, module, &qsfp,
+ &cmis);
if (err)
return err;
@@ -219,12 +266,12 @@ int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
page = MLXSW_REG_MCIA_TH_PAGE_CMIS_NUM;
else
page = MLXSW_REG_MCIA_TH_PAGE_NUM;
- mlxsw_reg_mcia_pack(mcia_pl, module, 0, page,
+ mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, 0, page,
MLXSW_REG_MCIA_TH_PAGE_OFF + off,
MLXSW_REG_MCIA_TH_ITEM_SIZE,
MLXSW_REG_MCIA_I2C_ADDR_LOW);
} else {
- mlxsw_reg_mcia_pack(mcia_pl, module, 0,
+ mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, 0,
MLXSW_REG_MCIA_PAGE0_LO,
off, MLXSW_REG_MCIA_TH_ITEM_SIZE,
MLXSW_REG_MCIA_I2C_ADDR_HIGH);
@@ -242,24 +289,31 @@ int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
}
int mlxsw_env_get_module_info(struct net_device *netdev,
- struct mlxsw_core *mlxsw_core, int module,
- struct ethtool_modinfo *modinfo)
+ struct mlxsw_core *mlxsw_core, u8 slot_index,
+ int module, struct ethtool_modinfo *modinfo)
{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
u8 module_info[MLXSW_REG_MCIA_EEPROM_MODULE_INFO_SIZE];
u16 offset = MLXSW_REG_MCIA_EEPROM_MODULE_INFO_SIZE;
u8 module_rev_id, module_id, diag_mon;
unsigned int read_size;
int err;
- err = mlxsw_env_validate_module_type(mlxsw_core, module);
+ if (!mlxsw_env_linecard_is_active(mlxsw_env, slot_index)) {
+ netdev_err(netdev, "Cannot read EEPROM of module on an inactive line card\n");
+ return -EIO;
+ }
+
+ err = mlxsw_env_validate_module_type(mlxsw_core, slot_index, module);
if (err) {
netdev_err(netdev,
"EEPROM is not equipped on port module type");
return err;
}
- err = mlxsw_env_query_module_eeprom(mlxsw_core, module, 0, offset,
- module_info, false, &read_size);
+ err = mlxsw_env_query_module_eeprom(mlxsw_core, slot_index, module, 0,
+ offset, module_info, false,
+ &read_size);
if (err)
return err;
@@ -288,9 +342,10 @@ int mlxsw_env_get_module_info(struct net_device *netdev,
break;
case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_SFP:
/* Verify if transceiver provides diagnostic monitoring page */
- err = mlxsw_env_query_module_eeprom(mlxsw_core, module,
- SFP_DIAGMON, 1, &diag_mon,
- false, &read_size);
+ err = mlxsw_env_query_module_eeprom(mlxsw_core, slot_index,
+ module, SFP_DIAGMON, 1,
+ &diag_mon, false,
+ &read_size);
if (err)
return err;
@@ -329,9 +384,11 @@ int mlxsw_env_get_module_info(struct net_device *netdev,
EXPORT_SYMBOL(mlxsw_env_get_module_info);
int mlxsw_env_get_module_eeprom(struct net_device *netdev,
- struct mlxsw_core *mlxsw_core, int module,
- struct ethtool_eeprom *ee, u8 *data)
+ struct mlxsw_core *mlxsw_core, u8 slot_index,
+ int module, struct ethtool_eeprom *ee,
+ u8 *data)
{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
int offset = ee->offset;
unsigned int read_size;
bool qsfp, cmis;
@@ -341,14 +398,21 @@ int mlxsw_env_get_module_eeprom(struct net_device *netdev,
if (!ee->len)
return -EINVAL;
+ if (!mlxsw_env_linecard_is_active(mlxsw_env, slot_index)) {
+ netdev_err(netdev, "Cannot read EEPROM of module on an inactive line card\n");
+ return -EIO;
+ }
+
memset(data, 0, ee->len);
/* Validate module identifier value. */
- err = mlxsw_env_validate_cable_ident(mlxsw_core, module, &qsfp, &cmis);
+ err = mlxsw_env_validate_cable_ident(mlxsw_core, slot_index, module,
+ &qsfp, &cmis);
if (err)
return err;
while (i < ee->len) {
- err = mlxsw_env_query_module_eeprom(mlxsw_core, module, offset,
+ err = mlxsw_env_query_module_eeprom(mlxsw_core, slot_index,
+ module, offset,
ee->len - i, data + i,
qsfp, &read_size);
if (err) {
@@ -394,15 +458,23 @@ static int mlxsw_env_mcia_status_process(const char *mcia_pl,
}
int
-mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, u8 module,
+mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core,
+ u8 slot_index, u8 module,
const struct ethtool_module_eeprom *page,
struct netlink_ext_ack *extack)
{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
u32 bytes_read = 0;
u16 device_addr;
int err;
- err = mlxsw_env_validate_module_type(mlxsw_core, module);
+ if (!mlxsw_env_linecard_is_active(mlxsw_env, slot_index)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot read EEPROM of module on an inactive line card");
+ return -EIO;
+ }
+
+ err = mlxsw_env_validate_module_type(mlxsw_core, slot_index, module);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "EEPROM is not equipped on port module type");
return err;
@@ -419,7 +491,7 @@ mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, u8 module,
size = min_t(u8, page->length - bytes_read,
MLXSW_REG_MCIA_EEPROM_SIZE);
- mlxsw_reg_mcia_pack(mcia_pl, module, 0, page->page,
+ mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, 0, page->page,
device_addr + bytes_read, size,
page->i2c_address);
mlxsw_reg_mcia_bank_number_set(mcia_pl, page->bank);
@@ -443,20 +515,23 @@ mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, u8 module,
}
EXPORT_SYMBOL(mlxsw_env_get_module_eeprom_by_page);
-static int mlxsw_env_module_reset(struct mlxsw_core *mlxsw_core, u8 module)
+static int mlxsw_env_module_reset(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module)
{
char pmaos_pl[MLXSW_REG_PMAOS_LEN];
- mlxsw_reg_pmaos_pack(pmaos_pl, module);
+ mlxsw_reg_pmaos_pack(pmaos_pl, slot_index, module);
mlxsw_reg_pmaos_rst_set(pmaos_pl, true);
return mlxsw_reg_write(mlxsw_core, MLXSW_REG(pmaos), pmaos_pl);
}
int mlxsw_env_reset_module(struct net_device *netdev,
- struct mlxsw_core *mlxsw_core, u8 module, u32 *flags)
+ struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module, u32 *flags)
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ struct mlxsw_env_module_info *module_info;
u32 req = *flags;
int err;
@@ -464,28 +539,34 @@ int mlxsw_env_reset_module(struct net_device *netdev,
!(req & (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT)))
return 0;
- mutex_lock(&mlxsw_env->module_info_lock);
+ if (!mlxsw_env_linecard_is_active(mlxsw_env, slot_index)) {
+ netdev_err(netdev, "Cannot reset module on an inactive line card\n");
+ return -EIO;
+ }
+
+ mutex_lock(&mlxsw_env->line_cards_lock);
- err = __mlxsw_env_validate_module_type(mlxsw_core, module);
+ err = __mlxsw_env_validate_module_type(mlxsw_core, slot_index, module);
if (err) {
netdev_err(netdev, "Reset module is not supported on port module type\n");
goto out;
}
- if (mlxsw_env->module_info[module].num_ports_up) {
+ module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module);
+ if (module_info->num_ports_up) {
netdev_err(netdev, "Cannot reset module when ports using it are administratively up\n");
err = -EINVAL;
goto out;
}
- if (mlxsw_env->module_info[module].num_ports_mapped > 1 &&
+ if (module_info->num_ports_mapped > 1 &&
!(req & (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT))) {
netdev_err(netdev, "Cannot reset module without \"phy-shared\" flag when shared by multiple ports\n");
err = -EINVAL;
goto out;
}
- err = mlxsw_env_module_reset(mlxsw_core, module);
+ err = mlxsw_env_module_reset(mlxsw_core, slot_index, module);
if (err) {
netdev_err(netdev, "Failed to reset module\n");
goto out;
@@ -494,32 +575,39 @@ int mlxsw_env_reset_module(struct net_device *netdev,
*flags &= ~(ETH_RESET_PHY | (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT));
out:
- mutex_unlock(&mlxsw_env->module_info_lock);
+ mutex_unlock(&mlxsw_env->line_cards_lock);
return err;
}
EXPORT_SYMBOL(mlxsw_env_reset_module);
int
-mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module,
+mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module,
struct ethtool_module_power_mode_params *params,
struct netlink_ext_ack *extack)
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ struct mlxsw_env_module_info *module_info;
char mcion_pl[MLXSW_REG_MCION_LEN];
u32 status_bits;
- int err;
+ int err = 0;
- mutex_lock(&mlxsw_env->module_info_lock);
+ mutex_lock(&mlxsw_env->line_cards_lock);
- err = __mlxsw_env_validate_module_type(mlxsw_core, module);
+ err = __mlxsw_env_validate_module_type(mlxsw_core, slot_index, module);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Power mode is not supported on port module type");
goto out;
}
- params->policy = mlxsw_env->module_info[module].power_mode_policy;
+ module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module);
+ params->policy = module_info->power_mode_policy;
+
+ /* Avoid accessing an inactive line card, as it will result in an error. */
+ if (!__mlxsw_env_linecard_is_active(mlxsw_env, slot_index))
+ goto out;
- mlxsw_reg_mcion_pack(mcion_pl, module);
+ mlxsw_reg_mcion_pack(mcion_pl, slot_index, module);
err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcion), mcion_pl);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed to retrieve module's power mode");
@@ -536,18 +624,18 @@ mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module,
params->mode = ETHTOOL_MODULE_POWER_MODE_HIGH;
out:
- mutex_unlock(&mlxsw_env->module_info_lock);
+ mutex_unlock(&mlxsw_env->line_cards_lock);
return err;
}
EXPORT_SYMBOL(mlxsw_env_get_module_power_mode);
static int mlxsw_env_module_enable_set(struct mlxsw_core *mlxsw_core,
- u8 module, bool enable)
+ u8 slot_index, u8 module, bool enable)
{
enum mlxsw_reg_pmaos_admin_status admin_status;
char pmaos_pl[MLXSW_REG_PMAOS_LEN];
- mlxsw_reg_pmaos_pack(pmaos_pl, module);
+ mlxsw_reg_pmaos_pack(pmaos_pl, slot_index, module);
admin_status = enable ? MLXSW_REG_PMAOS_ADMIN_STATUS_ENABLED :
MLXSW_REG_PMAOS_ADMIN_STATUS_DISABLED;
mlxsw_reg_pmaos_admin_status_set(pmaos_pl, admin_status);
@@ -557,12 +645,13 @@ static int mlxsw_env_module_enable_set(struct mlxsw_core *mlxsw_core,
}
static int mlxsw_env_module_low_power_set(struct mlxsw_core *mlxsw_core,
- u8 module, bool low_power)
+ u8 slot_index, u8 module,
+ bool low_power)
{
u16 eeprom_override_mask, eeprom_override;
char pmmp_pl[MLXSW_REG_PMMP_LEN];
- mlxsw_reg_pmmp_pack(pmmp_pl, module);
+ mlxsw_reg_pmmp_pack(pmmp_pl, slot_index, module);
mlxsw_reg_pmmp_sticky_set(pmmp_pl, true);
/* Mask all the bits except low power mode. */
eeprom_override_mask = ~MLXSW_REG_PMMP_EEPROM_OVERRIDE_LOW_POWER_MASK;
@@ -575,24 +664,34 @@ static int mlxsw_env_module_low_power_set(struct mlxsw_core *mlxsw_core,
}
static int __mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core,
- u8 module, bool low_power,
+ u8 slot_index, u8 module,
+ bool low_power,
struct netlink_ext_ack *extack)
{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
int err;
- err = mlxsw_env_module_enable_set(mlxsw_core, module, false);
+ /* Avoid accessing an inactive line card, as it will result in an error.
+ * Cached configuration will be applied by mlxsw_env_got_active() when
+ * line card becomes active.
+ */
+ if (!__mlxsw_env_linecard_is_active(mlxsw_env, slot_index))
+ return 0;
+
+ err = mlxsw_env_module_enable_set(mlxsw_core, slot_index, module, false);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed to disable module");
return err;
}
- err = mlxsw_env_module_low_power_set(mlxsw_core, module, low_power);
+ err = mlxsw_env_module_low_power_set(mlxsw_core, slot_index, module,
+ low_power);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed to set module's power mode");
goto err_module_low_power_set;
}
- err = mlxsw_env_module_enable_set(mlxsw_core, module, true);
+ err = mlxsw_env_module_enable_set(mlxsw_core, slot_index, module, true);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed to enable module");
goto err_module_enable_set;
@@ -601,67 +700,84 @@ static int __mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core,
return 0;
err_module_enable_set:
- mlxsw_env_module_low_power_set(mlxsw_core, module, !low_power);
+ mlxsw_env_module_low_power_set(mlxsw_core, slot_index, module,
+ !low_power);
err_module_low_power_set:
- mlxsw_env_module_enable_set(mlxsw_core, module, true);
+ mlxsw_env_module_enable_set(mlxsw_core, slot_index, module, true);
return err;
}
-int
-mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module,
- enum ethtool_module_power_mode_policy policy,
- struct netlink_ext_ack *extack)
+static int
+mlxsw_env_set_module_power_mode_apply(struct mlxsw_core *mlxsw_core,
+ u8 slot_index, u8 module,
+ enum ethtool_module_power_mode_policy policy,
+ struct netlink_ext_ack *extack)
{
- struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ struct mlxsw_env_module_info *module_info;
bool low_power;
int err = 0;
- if (policy != ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH &&
- policy != ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO) {
- NL_SET_ERR_MSG_MOD(extack, "Unsupported power mode policy");
- return -EOPNOTSUPP;
- }
-
- mutex_lock(&mlxsw_env->module_info_lock);
-
- err = __mlxsw_env_validate_module_type(mlxsw_core, module);
+ err = __mlxsw_env_validate_module_type(mlxsw_core, slot_index, module);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"Power mode set is not supported on port module type");
goto out;
}
- if (mlxsw_env->module_info[module].power_mode_policy == policy)
+ module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module);
+ if (module_info->power_mode_policy == policy)
goto out;
/* If any ports are up, we are already in high power mode. */
- if (mlxsw_env->module_info[module].num_ports_up)
+ if (module_info->num_ports_up)
goto out_set_policy;
low_power = policy == ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO;
- err = __mlxsw_env_set_module_power_mode(mlxsw_core, module, low_power,
- extack);
+ err = __mlxsw_env_set_module_power_mode(mlxsw_core, slot_index, module,
+ low_power, extack);
if (err)
goto out;
out_set_policy:
- mlxsw_env->module_info[module].power_mode_policy = policy;
+ module_info->power_mode_policy = policy;
out:
- mutex_unlock(&mlxsw_env->module_info_lock);
+ return err;
+}
+
+int
+mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module,
+ enum ethtool_module_power_mode_policy policy,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ int err;
+
+ if (policy != ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH &&
+ policy != ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported power mode policy");
+ return -EOPNOTSUPP;
+ }
+
+ mutex_lock(&mlxsw_env->line_cards_lock);
+ err = mlxsw_env_set_module_power_mode_apply(mlxsw_core, slot_index,
+ module, policy, extack);
+ mutex_unlock(&mlxsw_env->line_cards_lock);
+
return err;
}
EXPORT_SYMBOL(mlxsw_env_set_module_power_mode);
static int mlxsw_env_module_has_temp_sensor(struct mlxsw_core *mlxsw_core,
- u8 module,
+ u8 slot_index, u8 module,
bool *p_has_temp_sensor)
{
char mtbr_pl[MLXSW_REG_MTBR_LEN];
u16 temp;
int err;
- mlxsw_reg_mtbr_pack(mtbr_pl, MLXSW_REG_MTBR_BASE_MODULE_INDEX + module,
- 1);
+ mlxsw_reg_mtbr_pack(mtbr_pl, slot_index,
+ MLXSW_REG_MTBR_BASE_MODULE_INDEX + module, 1);
err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mtbr), mtbr_pl);
if (err)
return err;
@@ -681,13 +797,15 @@ static int mlxsw_env_module_has_temp_sensor(struct mlxsw_core *mlxsw_core,
return 0;
}
-static int mlxsw_env_temp_event_set(struct mlxsw_core *mlxsw_core,
- u16 sensor_index, bool enable)
+static int
+mlxsw_env_temp_event_set(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u16 sensor_index, bool enable)
{
char mtmp_pl[MLXSW_REG_MTMP_LEN] = {0};
enum mlxsw_reg_mtmp_tee tee;
int err, threshold_hi;
+ mlxsw_reg_mtmp_slot_index_set(mtmp_pl, slot_index);
mlxsw_reg_mtmp_sensor_index_set(mtmp_pl, sensor_index);
err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mtmp), mtmp_pl);
if (err)
@@ -695,6 +813,7 @@ static int mlxsw_env_temp_event_set(struct mlxsw_core *mlxsw_core,
if (enable) {
err = mlxsw_env_module_temp_thresholds_get(mlxsw_core,
+ slot_index,
sensor_index -
MLXSW_REG_MTMP_MODULE_INDEX_MIN,
SFP_TEMP_HIGH_WARN,
@@ -721,14 +840,16 @@ static int mlxsw_env_temp_event_set(struct mlxsw_core *mlxsw_core,
return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtmp), mtmp_pl);
}
-static int mlxsw_env_module_temp_event_enable(struct mlxsw_core *mlxsw_core)
+static int mlxsw_env_module_temp_event_enable(struct mlxsw_core *mlxsw_core,
+ u8 slot_index)
{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
int i, err, sensor_index;
bool has_temp_sensor;
- for (i = 0; i < mlxsw_core_env(mlxsw_core)->module_count; i++) {
- err = mlxsw_env_module_has_temp_sensor(mlxsw_core, i,
- &has_temp_sensor);
+ for (i = 0; i < mlxsw_env->line_cards[slot_index]->module_count; i++) {
+ err = mlxsw_env_module_has_temp_sensor(mlxsw_core, slot_index,
+ i, &has_temp_sensor);
if (err)
return err;
@@ -736,7 +857,8 @@ static int mlxsw_env_module_temp_event_enable(struct mlxsw_core *mlxsw_core)
continue;
sensor_index = i + MLXSW_REG_MTMP_MODULE_INDEX_MIN;
- err = mlxsw_env_temp_event_set(mlxsw_core, sensor_index, true);
+ err = mlxsw_env_temp_event_set(mlxsw_core, slot_index,
+ sensor_index, true);
if (err)
return err;
}
@@ -753,6 +875,7 @@ struct mlxsw_env_module_temp_warn_event {
static void mlxsw_env_mtwe_event_work(struct work_struct *work)
{
struct mlxsw_env_module_temp_warn_event *event;
+ struct mlxsw_env_module_info *module_info;
struct mlxsw_env *mlxsw_env;
int i, sensor_warning;
bool is_overheat;
@@ -761,7 +884,7 @@ static void mlxsw_env_mtwe_event_work(struct work_struct *work)
work);
mlxsw_env = event->mlxsw_env;
- for (i = 0; i < mlxsw_env->module_count; i++) {
+ for (i = 0; i < mlxsw_env->max_module_count; i++) {
/* 64-127 of sensor_index are mapped to the port modules
* sequentially (module 0 is mapped to sensor_index 64,
* module 1 to sensor_index 65 and so on)
@@ -769,9 +892,10 @@ static void mlxsw_env_mtwe_event_work(struct work_struct *work)
sensor_warning =
mlxsw_reg_mtwe_sensor_warning_get(event->mtwe_pl,
i + MLXSW_REG_MTMP_MODULE_INDEX_MIN);
- mutex_lock(&mlxsw_env->module_info_lock);
- is_overheat =
- mlxsw_env->module_info[i].is_overheat;
+ mutex_lock(&mlxsw_env->line_cards_lock);
+ /* MTWE only supports main board. */
+ module_info = mlxsw_env_module_info_get(mlxsw_env->core, 0, i);
+ is_overheat = module_info->is_overheat;
if ((is_overheat && sensor_warning) ||
(!is_overheat && !sensor_warning)) {
@@ -779,21 +903,21 @@ static void mlxsw_env_mtwe_event_work(struct work_struct *work)
* warning OR current state in "no warning" and MTWE
* does not report warning.
*/
- mutex_unlock(&mlxsw_env->module_info_lock);
+ mutex_unlock(&mlxsw_env->line_cards_lock);
continue;
} else if (is_overheat && !sensor_warning) {
/* MTWE reports "no warning", turn is_overheat off.
*/
- mlxsw_env->module_info[i].is_overheat = false;
- mutex_unlock(&mlxsw_env->module_info_lock);
+ module_info->is_overheat = false;
+ mutex_unlock(&mlxsw_env->line_cards_lock);
} else {
/* Current state is "no warning" and MTWE reports
* "warning", increase the counter and turn is_overheat
* on.
*/
- mlxsw_env->module_info[i].is_overheat = true;
- mlxsw_env->module_info[i].module_overheat_counter++;
- mutex_unlock(&mlxsw_env->module_info_lock);
+ module_info->is_overheat = true;
+ module_info->module_overheat_counter++;
+ mutex_unlock(&mlxsw_env->line_cards_lock);
}
}
@@ -837,6 +961,7 @@ static void mlxsw_env_temp_warn_event_unregister(struct mlxsw_env *mlxsw_env)
struct mlxsw_env_module_plug_unplug_event {
struct mlxsw_env *mlxsw_env;
+ u8 slot_index;
u8 module;
struct work_struct work;
};
@@ -844,6 +969,7 @@ struct mlxsw_env_module_plug_unplug_event {
static void mlxsw_env_pmpe_event_work(struct work_struct *work)
{
struct mlxsw_env_module_plug_unplug_event *event;
+ struct mlxsw_env_module_info *module_info;
struct mlxsw_env *mlxsw_env;
bool has_temp_sensor;
u16 sensor_index;
@@ -853,11 +979,16 @@ static void mlxsw_env_pmpe_event_work(struct work_struct *work)
work);
mlxsw_env = event->mlxsw_env;
- mutex_lock(&mlxsw_env->module_info_lock);
- mlxsw_env->module_info[event->module].is_overheat = false;
- mutex_unlock(&mlxsw_env->module_info_lock);
+ mutex_lock(&mlxsw_env->line_cards_lock);
+ module_info = mlxsw_env_module_info_get(mlxsw_env->core,
+ event->slot_index,
+ event->module);
+ module_info->is_overheat = false;
+ mutex_unlock(&mlxsw_env->line_cards_lock);
- err = mlxsw_env_module_has_temp_sensor(mlxsw_env->core, event->module,
+ err = mlxsw_env_module_has_temp_sensor(mlxsw_env->core,
+ event->slot_index,
+ event->module,
&has_temp_sensor);
/* Do not disable events on modules without sensors or faulty sensors
* because FW returns errors.
@@ -869,7 +1000,8 @@ static void mlxsw_env_pmpe_event_work(struct work_struct *work)
goto out;
sensor_index = event->module + MLXSW_REG_MTMP_MODULE_INDEX_MIN;
- mlxsw_env_temp_event_set(mlxsw_env->core, sensor_index, true);
+ mlxsw_env_temp_event_set(mlxsw_env->core, event->slot_index,
+ sensor_index, true);
out:
kfree(event);
@@ -879,12 +1011,14 @@ static void
mlxsw_env_pmpe_listener_func(const struct mlxsw_reg_info *reg, char *pmpe_pl,
void *priv)
{
+ u8 slot_index = mlxsw_reg_pmpe_slot_index_get(pmpe_pl);
struct mlxsw_env_module_plug_unplug_event *event;
enum mlxsw_reg_pmpe_module_status module_status;
u8 module = mlxsw_reg_pmpe_module_get(pmpe_pl);
struct mlxsw_env *mlxsw_env = priv;
- if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
+ if (WARN_ON_ONCE(module >= mlxsw_env->max_module_count ||
+ slot_index >= mlxsw_env->num_of_slots))
return;
module_status = mlxsw_reg_pmpe_module_status_get(pmpe_pl);
@@ -896,6 +1030,7 @@ mlxsw_env_pmpe_listener_func(const struct mlxsw_reg_info *reg, char *pmpe_pl,
return;
event->mlxsw_env = mlxsw_env;
+ event->slot_index = slot_index;
event->module = module;
INIT_WORK(&event->work, mlxsw_env_pmpe_event_work);
mlxsw_core_schedule_work(&event->work);
@@ -923,14 +1058,16 @@ mlxsw_env_module_plug_event_unregister(struct mlxsw_env *mlxsw_env)
}
static int
-mlxsw_env_module_oper_state_event_enable(struct mlxsw_core *mlxsw_core)
+mlxsw_env_module_oper_state_event_enable(struct mlxsw_core *mlxsw_core,
+ u8 slot_index)
{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
int i, err;
- for (i = 0; i < mlxsw_core_env(mlxsw_core)->module_count; i++) {
+ for (i = 0; i < mlxsw_env->line_cards[slot_index]->module_count; i++) {
char pmaos_pl[MLXSW_REG_PMAOS_LEN];
- mlxsw_reg_pmaos_pack(pmaos_pl, i);
+ mlxsw_reg_pmaos_pack(pmaos_pl, slot_index, i);
mlxsw_reg_pmaos_e_set(pmaos_pl,
MLXSW_REG_PMAOS_E_GENERATE_EVENT);
mlxsw_reg_pmaos_ee_set(pmaos_pl, true);
@@ -942,146 +1079,330 @@ mlxsw_env_module_oper_state_event_enable(struct mlxsw_core *mlxsw_core)
}
int
-mlxsw_env_module_overheat_counter_get(struct mlxsw_core *mlxsw_core, u8 module,
- u64 *p_counter)
+mlxsw_env_module_overheat_counter_get(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module, u64 *p_counter)
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ struct mlxsw_env_module_info *module_info;
- mutex_lock(&mlxsw_env->module_info_lock);
- *p_counter = mlxsw_env->module_info[module].module_overheat_counter;
- mutex_unlock(&mlxsw_env->module_info_lock);
+ mutex_lock(&mlxsw_env->line_cards_lock);
+ module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module);
+ *p_counter = module_info->module_overheat_counter;
+ mutex_unlock(&mlxsw_env->line_cards_lock);
return 0;
}
EXPORT_SYMBOL(mlxsw_env_module_overheat_counter_get);
-void mlxsw_env_module_port_map(struct mlxsw_core *mlxsw_core, u8 module)
+void mlxsw_env_module_port_map(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module)
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ struct mlxsw_env_module_info *module_info;
- mutex_lock(&mlxsw_env->module_info_lock);
- mlxsw_env->module_info[module].num_ports_mapped++;
- mutex_unlock(&mlxsw_env->module_info_lock);
+ mutex_lock(&mlxsw_env->line_cards_lock);
+ module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module);
+ module_info->num_ports_mapped++;
+ mutex_unlock(&mlxsw_env->line_cards_lock);
}
EXPORT_SYMBOL(mlxsw_env_module_port_map);
-void mlxsw_env_module_port_unmap(struct mlxsw_core *mlxsw_core, u8 module)
+void mlxsw_env_module_port_unmap(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module)
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ struct mlxsw_env_module_info *module_info;
- mutex_lock(&mlxsw_env->module_info_lock);
- mlxsw_env->module_info[module].num_ports_mapped--;
- mutex_unlock(&mlxsw_env->module_info_lock);
+ mutex_lock(&mlxsw_env->line_cards_lock);
+ module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module);
+ module_info->num_ports_mapped--;
+ mutex_unlock(&mlxsw_env->line_cards_lock);
}
EXPORT_SYMBOL(mlxsw_env_module_port_unmap);
-int mlxsw_env_module_port_up(struct mlxsw_core *mlxsw_core, u8 module)
+int mlxsw_env_module_port_up(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module)
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ struct mlxsw_env_module_info *module_info;
int err = 0;
- mutex_lock(&mlxsw_env->module_info_lock);
+ mutex_lock(&mlxsw_env->line_cards_lock);
- if (mlxsw_env->module_info[module].power_mode_policy !=
+ module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module);
+ if (module_info->power_mode_policy !=
ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO)
goto out_inc;
- if (mlxsw_env->module_info[module].num_ports_up != 0)
+ if (module_info->num_ports_up != 0)
goto out_inc;
/* Transition to high power mode following first port using the module
* being put administratively up.
*/
- err = __mlxsw_env_set_module_power_mode(mlxsw_core, module, false,
- NULL);
+ err = __mlxsw_env_set_module_power_mode(mlxsw_core, slot_index, module,
+ false, NULL);
if (err)
goto out_unlock;
out_inc:
- mlxsw_env->module_info[module].num_ports_up++;
+ module_info->num_ports_up++;
out_unlock:
- mutex_unlock(&mlxsw_env->module_info_lock);
+ mutex_unlock(&mlxsw_env->line_cards_lock);
return err;
}
EXPORT_SYMBOL(mlxsw_env_module_port_up);
-void mlxsw_env_module_port_down(struct mlxsw_core *mlxsw_core, u8 module)
+void mlxsw_env_module_port_down(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module)
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ struct mlxsw_env_module_info *module_info;
- mutex_lock(&mlxsw_env->module_info_lock);
+ mutex_lock(&mlxsw_env->line_cards_lock);
- mlxsw_env->module_info[module].num_ports_up--;
+ module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module);
+ module_info->num_ports_up--;
- if (mlxsw_env->module_info[module].power_mode_policy !=
+ if (module_info->power_mode_policy !=
ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO)
goto out_unlock;
- if (mlxsw_env->module_info[module].num_ports_up != 0)
+ if (module_info->num_ports_up != 0)
goto out_unlock;
/* Transition to low power mode following last port using the module
* being put administratively down.
*/
- __mlxsw_env_set_module_power_mode(mlxsw_core, module, true, NULL);
+ __mlxsw_env_set_module_power_mode(mlxsw_core, slot_index, module, true,
+ NULL);
out_unlock:
- mutex_unlock(&mlxsw_env->module_info_lock);
+ mutex_unlock(&mlxsw_env->line_cards_lock);
}
EXPORT_SYMBOL(mlxsw_env_module_port_down);
+static int mlxsw_env_line_cards_alloc(struct mlxsw_env *env)
+{
+ struct mlxsw_env_module_info *module_info;
+ int i, j;
+
+ for (i = 0; i < env->num_of_slots; i++) {
+ env->line_cards[i] = kzalloc(struct_size(env->line_cards[i],
+ module_info,
+ env->max_module_count),
+ GFP_KERNEL);
+ if (!env->line_cards[i])
+ goto kzalloc_err;
+
+ /* Firmware defaults to high power mode policy where modules
+ * are transitioned to high power mode following plug-in.
+ */
+ for (j = 0; j < env->max_module_count; j++) {
+ module_info = &env->line_cards[i]->module_info[j];
+ module_info->power_mode_policy =
+ ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH;
+ }
+ }
+
+ return 0;
+
+kzalloc_err:
+ for (i--; i >= 0; i--)
+ kfree(env->line_cards[i]);
+ return -ENOMEM;
+}
+
+static void mlxsw_env_line_cards_free(struct mlxsw_env *env)
+{
+ int i = env->num_of_slots;
+
+ for (i--; i >= 0; i--)
+ kfree(env->line_cards[i]);
+}
+
+static int
+mlxsw_env_module_event_enable(struct mlxsw_env *mlxsw_env, u8 slot_index)
+{
+ int err;
+
+ err = mlxsw_env_module_oper_state_event_enable(mlxsw_env->core,
+ slot_index);
+ if (err)
+ return err;
+
+ err = mlxsw_env_module_temp_event_enable(mlxsw_env->core, slot_index);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void
+mlxsw_env_module_event_disable(struct mlxsw_env *mlxsw_env, u8 slot_index)
+{
+}
+
static int
-mlxsw_env_module_type_set(struct mlxsw_core *mlxsw_core)
+mlxsw_env_module_type_set(struct mlxsw_core *mlxsw_core, u8 slot_index)
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
int i;
- for (i = 0; i < mlxsw_env->module_count; i++) {
+ for (i = 0; i < mlxsw_env->line_cards[slot_index]->module_count; i++) {
+ struct mlxsw_env_module_info *module_info;
char pmtm_pl[MLXSW_REG_PMTM_LEN];
int err;
- mlxsw_reg_pmtm_pack(pmtm_pl, 0, i);
+ mlxsw_reg_pmtm_pack(pmtm_pl, slot_index, i);
err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtm), pmtm_pl);
if (err)
return err;
- mlxsw_env->module_info[i].type =
- mlxsw_reg_pmtm_module_type_get(pmtm_pl);
+ module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index,
+ i);
+ module_info->type = mlxsw_reg_pmtm_module_type_get(pmtm_pl);
}
return 0;
}
-int mlxsw_env_init(struct mlxsw_core *mlxsw_core, struct mlxsw_env **p_env)
+static void
+mlxsw_env_linecard_modules_power_mode_apply(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_env *env,
+ u8 slot_index)
{
+ int i;
+
+ for (i = 0; i < env->line_cards[slot_index]->module_count; i++) {
+ enum ethtool_module_power_mode_policy policy;
+ struct mlxsw_env_module_info *module_info;
+ struct netlink_ext_ack extack;
+ int err;
+
+ module_info = &env->line_cards[slot_index]->module_info[i];
+ policy = module_info->power_mode_policy;
+ err = mlxsw_env_set_module_power_mode_apply(mlxsw_core,
+ slot_index, i,
+ policy, &extack);
+ if (err)
+ dev_err(env->bus_info->dev, "%s\n", extack._msg);
+ }
+}
+
+static void
+mlxsw_env_got_active(struct mlxsw_core *mlxsw_core, u8 slot_index, void *priv)
+{
+ struct mlxsw_env *mlxsw_env = priv;
+ char mgpir_pl[MLXSW_REG_MGPIR_LEN];
+ int err;
+
+ mutex_lock(&mlxsw_env->line_cards_lock);
+ if (__mlxsw_env_linecard_is_active(mlxsw_env, slot_index))
+ goto out_unlock;
+
+ mlxsw_reg_mgpir_pack(mgpir_pl, slot_index);
+ err = mlxsw_reg_query(mlxsw_env->core, MLXSW_REG(mgpir), mgpir_pl);
+ if (err)
+ goto out_unlock;
+
+ mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL,
+ &mlxsw_env->line_cards[slot_index]->module_count,
+ NULL);
+
+ err = mlxsw_env_module_event_enable(mlxsw_env, slot_index);
+ if (err) {
+ dev_err(mlxsw_env->bus_info->dev, "Failed to enable port module events for line card in slot %d\n",
+ slot_index);
+ goto err_mlxsw_env_module_event_enable;
+ }
+ err = mlxsw_env_module_type_set(mlxsw_env->core, slot_index);
+ if (err) {
+ dev_err(mlxsw_env->bus_info->dev, "Failed to set modules' type for line card in slot %d\n",
+ slot_index);
+ goto err_type_set;
+ }
+
+ mlxsw_env->line_cards[slot_index]->active = true;
+ /* Apply power mode policy. */
+ mlxsw_env_linecard_modules_power_mode_apply(mlxsw_core, mlxsw_env,
+ slot_index);
+ mutex_unlock(&mlxsw_env->line_cards_lock);
+
+ return;
+
+err_type_set:
+ mlxsw_env_module_event_disable(mlxsw_env, slot_index);
+err_mlxsw_env_module_event_enable:
+out_unlock:
+ mutex_unlock(&mlxsw_env->line_cards_lock);
+}
+
+static void
+mlxsw_env_got_inactive(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ void *priv)
+{
+ struct mlxsw_env *mlxsw_env = priv;
+
+ mutex_lock(&mlxsw_env->line_cards_lock);
+ if (!__mlxsw_env_linecard_is_active(mlxsw_env, slot_index))
+ goto out_unlock;
+ mlxsw_env->line_cards[slot_index]->active = false;
+ mlxsw_env_module_event_disable(mlxsw_env, slot_index);
+ mlxsw_env->line_cards[slot_index]->module_count = 0;
+out_unlock:
+ mutex_unlock(&mlxsw_env->line_cards_lock);
+}
+
+static struct mlxsw_linecards_event_ops mlxsw_env_event_ops = {
+ .got_active = mlxsw_env_got_active,
+ .got_inactive = mlxsw_env_got_inactive,
+};
+
+int mlxsw_env_init(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *bus_info,
+ struct mlxsw_env **p_env)
+{
+ u8 module_count, num_of_slots, max_module_count;
char mgpir_pl[MLXSW_REG_MGPIR_LEN];
struct mlxsw_env *env;
- u8 module_count;
- int i, err;
+ int err;
- mlxsw_reg_mgpir_pack(mgpir_pl);
+ mlxsw_reg_mgpir_pack(mgpir_pl, 0);
err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgpir), mgpir_pl);
if (err)
return err;
- mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL, &module_count);
+ mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL, &module_count,
+ &num_of_slots);
+ /* If the system is modular, get the maximum number of modules per-slot.
+ * Otherwise, get the maximum number of modules on the main board.
+ */
+ max_module_count = num_of_slots ?
+ mlxsw_reg_mgpir_max_modules_per_slot_get(mgpir_pl) :
+ module_count;
- env = kzalloc(struct_size(env, module_info, module_count), GFP_KERNEL);
+ env = kzalloc(struct_size(env, line_cards, num_of_slots + 1),
+ GFP_KERNEL);
if (!env)
return -ENOMEM;
- /* Firmware defaults to high power mode policy where modules are
- * transitioned to high power mode following plug-in.
- */
- for (i = 0; i < module_count; i++)
- env->module_info[i].power_mode_policy =
- ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH;
-
- mutex_init(&env->module_info_lock);
env->core = mlxsw_core;
- env->module_count = module_count;
+ env->bus_info = bus_info;
+ env->num_of_slots = num_of_slots + 1;
+ env->max_module_count = max_module_count;
+ err = mlxsw_env_line_cards_alloc(env);
+ if (err)
+ goto err_mlxsw_env_line_cards_alloc;
+
+ mutex_init(&env->line_cards_lock);
*p_env = env;
+ err = mlxsw_linecards_event_ops_register(env->core,
+ &mlxsw_env_event_ops, env);
+ if (err)
+ goto err_linecards_event_ops_register;
+
err = mlxsw_env_temp_warn_event_register(mlxsw_core);
if (err)
goto err_temp_warn_event_register;
@@ -1090,38 +1411,54 @@ int mlxsw_env_init(struct mlxsw_core *mlxsw_core, struct mlxsw_env **p_env)
if (err)
goto err_module_plug_event_register;
- err = mlxsw_env_module_oper_state_event_enable(mlxsw_core);
- if (err)
- goto err_oper_state_event_enable;
-
- err = mlxsw_env_module_temp_event_enable(mlxsw_core);
+ /* Set 'module_count' only for main board. Actual count for line card
+ * is to be set after line card is activated.
+ */
+ env->line_cards[0]->module_count = num_of_slots ? 0 : module_count;
+ /* Enable events only for main board. Line card events are to be
+ * configured only after line card is activated. Before that, access to
+ * modules on line cards is not allowed.
+ */
+ err = mlxsw_env_module_event_enable(env, 0);
if (err)
- goto err_temp_event_enable;
+ goto err_mlxsw_env_module_event_enable;
- err = mlxsw_env_module_type_set(mlxsw_core);
+ err = mlxsw_env_module_type_set(mlxsw_core, 0);
if (err)
goto err_type_set;
+ env->line_cards[0]->active = true;
+
return 0;
err_type_set:
-err_temp_event_enable:
-err_oper_state_event_enable:
+ mlxsw_env_module_event_disable(env, 0);
+err_mlxsw_env_module_event_enable:
mlxsw_env_module_plug_event_unregister(env);
err_module_plug_event_register:
mlxsw_env_temp_warn_event_unregister(env);
err_temp_warn_event_register:
- mutex_destroy(&env->module_info_lock);
+ mlxsw_linecards_event_ops_unregister(env->core,
+ &mlxsw_env_event_ops, env);
+err_linecards_event_ops_register:
+ mutex_destroy(&env->line_cards_lock);
+ mlxsw_env_line_cards_free(env);
+err_mlxsw_env_line_cards_alloc:
kfree(env);
return err;
}
void mlxsw_env_fini(struct mlxsw_env *env)
{
+ env->line_cards[0]->active = false;
+ mlxsw_env_module_event_disable(env, 0);
mlxsw_env_module_plug_event_unregister(env);
/* Make sure there is no more event work scheduled. */
mlxsw_core_flush_owq();
mlxsw_env_temp_warn_event_unregister(env);
- mutex_destroy(&env->module_info_lock);
+ mlxsw_linecards_event_ops_unregister(env->core,
+ &mlxsw_env_event_ops, env);
+ mutex_destroy(&env->line_cards_lock);
+ mlxsw_env_line_cards_free(env);
kfree(env);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.h b/drivers/net/ethernet/mellanox/mlxsw/core_env.h
index ec6564e5d2ee..a197e3ae069c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.h
@@ -9,49 +9,60 @@
struct ethtool_modinfo;
struct ethtool_eeprom;
-int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
- int off, int *temp);
+int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core,
+ u8 slot_index, int module, int off,
+ int *temp);
int mlxsw_env_get_module_info(struct net_device *netdev,
- struct mlxsw_core *mlxsw_core, int module,
- struct ethtool_modinfo *modinfo);
+ struct mlxsw_core *mlxsw_core, u8 slot_index,
+ int module, struct ethtool_modinfo *modinfo);
int mlxsw_env_get_module_eeprom(struct net_device *netdev,
- struct mlxsw_core *mlxsw_core, int module,
- struct ethtool_eeprom *ee, u8 *data);
+ struct mlxsw_core *mlxsw_core, u8 slot_index,
+ int module, struct ethtool_eeprom *ee,
+ u8 *data);
int
-mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, u8 module,
+mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core,
+ u8 slot_index, u8 module,
const struct ethtool_module_eeprom *page,
struct netlink_ext_ack *extack);
int mlxsw_env_reset_module(struct net_device *netdev,
- struct mlxsw_core *mlxsw_core, u8 module,
- u32 *flags);
+ struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module, u32 *flags);
int
-mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module,
+mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module,
struct ethtool_module_power_mode_params *params,
struct netlink_ext_ack *extack);
int
-mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module,
+mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module,
enum ethtool_module_power_mode_policy policy,
struct netlink_ext_ack *extack);
int
-mlxsw_env_module_overheat_counter_get(struct mlxsw_core *mlxsw_core, u8 module,
- u64 *p_counter);
+mlxsw_env_module_overheat_counter_get(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module, u64 *p_counter);
-void mlxsw_env_module_port_map(struct mlxsw_core *mlxsw_core, u8 module);
+void mlxsw_env_module_port_map(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module);
-void mlxsw_env_module_port_unmap(struct mlxsw_core *mlxsw_core, u8 module);
+void mlxsw_env_module_port_unmap(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module);
-int mlxsw_env_module_port_up(struct mlxsw_core *mlxsw_core, u8 module);
+int mlxsw_env_module_port_up(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module);
-void mlxsw_env_module_port_down(struct mlxsw_core *mlxsw_core, u8 module);
+void mlxsw_env_module_port_down(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ u8 module);
-int mlxsw_env_init(struct mlxsw_core *core, struct mlxsw_env **p_env);
+int mlxsw_env_init(struct mlxsw_core *core,
+ const struct mlxsw_bus_info *bus_info,
+ struct mlxsw_env **p_env);
void mlxsw_env_fini(struct mlxsw_env *env);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
index 8b170ad92302..70735068cf29 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
@@ -19,6 +19,7 @@
#define MLXSW_HWMON_ATTR_PER_SENSOR 3
#define MLXSW_HWMON_ATTR_PER_MODULE 7
#define MLXSW_HWMON_ATTR_PER_GEARBOX 4
+#define MLXSW_HWMON_DEV_NAME_LEN_MAX 16
#define MLXSW_HWMON_ATTR_COUNT (MLXSW_HWMON_SENSORS_MAX_COUNT * MLXSW_HWMON_ATTR_PER_SENSOR + \
MLXSW_HWMON_MODULES_MAX_COUNT * MLXSW_HWMON_ATTR_PER_MODULE + \
@@ -27,7 +28,7 @@
struct mlxsw_hwmon_attr {
struct device_attribute dev_attr;
- struct mlxsw_hwmon *hwmon;
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev;
unsigned int type_index;
char name[32];
};
@@ -40,9 +41,9 @@ static int mlxsw_hwmon_get_attr_index(int index, int count)
return index;
}
-struct mlxsw_hwmon {
- struct mlxsw_core *core;
- const struct mlxsw_bus_info *bus_info;
+struct mlxsw_hwmon_dev {
+ char name[MLXSW_HWMON_DEV_NAME_LEN_MAX];
+ struct mlxsw_hwmon *hwmon;
struct device *hwmon_dev;
struct attribute_group group;
const struct attribute_group *groups[2];
@@ -51,6 +52,14 @@ struct mlxsw_hwmon {
unsigned int attrs_count;
u8 sensor_count;
u8 module_sensor_max;
+ u8 slot_index;
+ bool active;
+};
+
+struct mlxsw_hwmon {
+ struct mlxsw_core *core;
+ const struct mlxsw_bus_info *bus_info;
+ struct mlxsw_hwmon_dev line_cards[];
};
static ssize_t mlxsw_hwmon_temp_show(struct device *dev,
@@ -59,14 +68,16 @@ static ssize_t mlxsw_hwmon_temp_show(struct device *dev,
{
struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
char mtmp_pl[MLXSW_REG_MTMP_LEN];
int temp, index;
int err;
index = mlxsw_hwmon_get_attr_index(mlxsw_hwmon_attr->type_index,
- mlxsw_hwmon->module_sensor_max);
- mlxsw_reg_mtmp_pack(mtmp_pl, index, false, false);
+ mlxsw_hwmon_dev->module_sensor_max);
+ mlxsw_reg_mtmp_pack(mtmp_pl, mlxsw_hwmon_dev->slot_index, index, false,
+ false);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl);
if (err) {
dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query temp sensor\n");
@@ -82,14 +93,16 @@ static ssize_t mlxsw_hwmon_temp_max_show(struct device *dev,
{
struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
char mtmp_pl[MLXSW_REG_MTMP_LEN];
int temp_max, index;
int err;
index = mlxsw_hwmon_get_attr_index(mlxsw_hwmon_attr->type_index,
- mlxsw_hwmon->module_sensor_max);
- mlxsw_reg_mtmp_pack(mtmp_pl, index, false, false);
+ mlxsw_hwmon_dev->module_sensor_max);
+ mlxsw_reg_mtmp_pack(mtmp_pl, mlxsw_hwmon_dev->slot_index, index, false,
+ false);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl);
if (err) {
dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query temp sensor\n");
@@ -105,8 +118,9 @@ static ssize_t mlxsw_hwmon_temp_rst_store(struct device *dev,
{
struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
- char mtmp_pl[MLXSW_REG_MTMP_LEN] = {0};
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
+ char mtmp_pl[MLXSW_REG_MTMP_LEN];
unsigned long val;
int index;
int err;
@@ -118,8 +132,9 @@ static ssize_t mlxsw_hwmon_temp_rst_store(struct device *dev,
return -EINVAL;
index = mlxsw_hwmon_get_attr_index(mlxsw_hwmon_attr->type_index,
- mlxsw_hwmon->module_sensor_max);
+ mlxsw_hwmon_dev->module_sensor_max);
+ mlxsw_reg_mtmp_slot_index_set(mtmp_pl, mlxsw_hwmon_dev->slot_index);
mlxsw_reg_mtmp_sensor_index_set(mtmp_pl, index);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl);
if (err)
@@ -140,7 +155,8 @@ static ssize_t mlxsw_hwmon_fan_rpm_show(struct device *dev,
{
struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
char mfsm_pl[MLXSW_REG_MFSM_LEN];
int err;
@@ -159,7 +175,8 @@ static ssize_t mlxsw_hwmon_fan_fault_show(struct device *dev,
{
struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
char fore_pl[MLXSW_REG_FORE_LEN];
bool fault;
int err;
@@ -180,7 +197,8 @@ static ssize_t mlxsw_hwmon_pwm_show(struct device *dev,
{
struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
char mfsc_pl[MLXSW_REG_MFSC_LEN];
int err;
@@ -200,7 +218,8 @@ static ssize_t mlxsw_hwmon_pwm_store(struct device *dev,
{
struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
char mfsc_pl[MLXSW_REG_MFSC_LEN];
unsigned long val;
int err;
@@ -226,14 +245,16 @@ static int mlxsw_hwmon_module_temp_get(struct device *dev,
{
struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
char mtmp_pl[MLXSW_REG_MTMP_LEN];
u8 module;
int err;
- module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count;
- mlxsw_reg_mtmp_pack(mtmp_pl, MLXSW_REG_MTMP_MODULE_INDEX_MIN + module,
- false, false);
+ module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon_dev->sensor_count;
+ mlxsw_reg_mtmp_pack(mtmp_pl, mlxsw_hwmon_dev->slot_index,
+ MLXSW_REG_MTMP_MODULE_INDEX_MIN + module, false,
+ false);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl);
if (err) {
dev_err(dev, "Failed to query module temperature\n");
@@ -263,15 +284,16 @@ static ssize_t mlxsw_hwmon_module_temp_fault_show(struct device *dev,
{
struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
char mtbr_pl[MLXSW_REG_MTBR_LEN] = {0};
u8 module, fault;
u16 temp;
int err;
- module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count;
- mlxsw_reg_mtbr_pack(mtbr_pl, MLXSW_REG_MTBR_BASE_MODULE_INDEX + module,
- 1);
+ module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon_dev->sensor_count;
+ mlxsw_reg_mtbr_pack(mtbr_pl, mlxsw_hwmon_dev->slot_index,
+ MLXSW_REG_MTBR_BASE_MODULE_INDEX + module, 1);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtbr), mtbr_pl);
if (err) {
dev_err(dev, "Failed to query module temperature sensor\n");
@@ -305,13 +327,16 @@ static int mlxsw_hwmon_module_temp_critical_get(struct device *dev,
{
struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
u8 module;
int err;
- module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count;
- err = mlxsw_env_module_temp_thresholds_get(mlxsw_hwmon->core, module,
- SFP_TEMP_HIGH_WARN, p_temp);
+ module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon_dev->sensor_count;
+ err = mlxsw_env_module_temp_thresholds_get(mlxsw_hwmon->core,
+ mlxsw_hwmon_dev->slot_index,
+ module, SFP_TEMP_HIGH_WARN,
+ p_temp);
if (err) {
dev_err(dev, "Failed to query module temperature thresholds\n");
return err;
@@ -339,13 +364,16 @@ static int mlxsw_hwmon_module_temp_emergency_get(struct device *dev,
{
struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
u8 module;
int err;
- module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count;
- err = mlxsw_env_module_temp_thresholds_get(mlxsw_hwmon->core, module,
- SFP_TEMP_HIGH_ALARM, p_temp);
+ module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon_dev->sensor_count;
+ err = mlxsw_env_module_temp_thresholds_get(mlxsw_hwmon->core,
+ mlxsw_hwmon_dev->slot_index,
+ module, SFP_TEMP_HIGH_ALARM,
+ p_temp);
if (err) {
dev_err(dev, "Failed to query module temperature thresholds\n");
return err;
@@ -387,9 +415,9 @@ mlxsw_hwmon_gbox_temp_label_show(struct device *dev,
{
struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev;
int index = mlxsw_hwmon_attr->type_index -
- mlxsw_hwmon->module_sensor_max + 1;
+ mlxsw_hwmon_dev->module_sensor_max + 1;
return sprintf(buf, "gearbox %03u\n", index);
}
@@ -458,14 +486,15 @@ enum mlxsw_hwmon_attr_type {
MLXSW_HWMON_ATTR_TYPE_TEMP_EMERGENCY_ALARM,
};
-static void mlxsw_hwmon_attr_add(struct mlxsw_hwmon *mlxsw_hwmon,
+static void mlxsw_hwmon_attr_add(struct mlxsw_hwmon_dev *mlxsw_hwmon_dev,
enum mlxsw_hwmon_attr_type attr_type,
- unsigned int type_index, unsigned int num) {
+ unsigned int type_index, unsigned int num)
+{
struct mlxsw_hwmon_attr *mlxsw_hwmon_attr;
unsigned int attr_index;
- attr_index = mlxsw_hwmon->attrs_count;
- mlxsw_hwmon_attr = &mlxsw_hwmon->hwmon_attrs[attr_index];
+ attr_index = mlxsw_hwmon_dev->attrs_count;
+ mlxsw_hwmon_attr = &mlxsw_hwmon_dev->hwmon_attrs[attr_index];
switch (attr_type) {
case MLXSW_HWMON_ATTR_TYPE_TEMP:
@@ -565,16 +594,17 @@ static void mlxsw_hwmon_attr_add(struct mlxsw_hwmon *mlxsw_hwmon,
}
mlxsw_hwmon_attr->type_index = type_index;
- mlxsw_hwmon_attr->hwmon = mlxsw_hwmon;
+ mlxsw_hwmon_attr->mlxsw_hwmon_dev = mlxsw_hwmon_dev;
mlxsw_hwmon_attr->dev_attr.attr.name = mlxsw_hwmon_attr->name;
sysfs_attr_init(&mlxsw_hwmon_attr->dev_attr.attr);
- mlxsw_hwmon->attrs[attr_index] = &mlxsw_hwmon_attr->dev_attr.attr;
- mlxsw_hwmon->attrs_count++;
+ mlxsw_hwmon_dev->attrs[attr_index] = &mlxsw_hwmon_attr->dev_attr.attr;
+ mlxsw_hwmon_dev->attrs_count++;
}
-static int mlxsw_hwmon_temp_init(struct mlxsw_hwmon *mlxsw_hwmon)
+static int mlxsw_hwmon_temp_init(struct mlxsw_hwmon_dev *mlxsw_hwmon_dev)
{
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
char mtcap_pl[MLXSW_REG_MTCAP_LEN] = {0};
int i;
int err;
@@ -584,10 +614,12 @@ static int mlxsw_hwmon_temp_init(struct mlxsw_hwmon *mlxsw_hwmon)
dev_err(mlxsw_hwmon->bus_info->dev, "Failed to get number of temp sensors\n");
return err;
}
- mlxsw_hwmon->sensor_count = mlxsw_reg_mtcap_sensor_count_get(mtcap_pl);
- for (i = 0; i < mlxsw_hwmon->sensor_count; i++) {
+ mlxsw_hwmon_dev->sensor_count = mlxsw_reg_mtcap_sensor_count_get(mtcap_pl);
+ for (i = 0; i < mlxsw_hwmon_dev->sensor_count; i++) {
char mtmp_pl[MLXSW_REG_MTMP_LEN] = {0};
+ mlxsw_reg_mtmp_slot_index_set(mtmp_pl,
+ mlxsw_hwmon_dev->slot_index);
mlxsw_reg_mtmp_sensor_index_set(mtmp_pl, i);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp),
mtmp_pl);
@@ -602,18 +634,19 @@ static int mlxsw_hwmon_temp_init(struct mlxsw_hwmon *mlxsw_hwmon)
i);
return err;
}
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_TEMP, i, i);
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_TEMP_MAX, i, i);
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_TEMP_RST, i, i);
}
return 0;
}
-static int mlxsw_hwmon_fans_init(struct mlxsw_hwmon *mlxsw_hwmon)
+static int mlxsw_hwmon_fans_init(struct mlxsw_hwmon_dev *mlxsw_hwmon_dev)
{
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
char mfcr_pl[MLXSW_REG_MFCR_LEN] = {0};
enum mlxsw_reg_mfcr_pwm_frequency freq;
unsigned int type_index;
@@ -631,10 +664,10 @@ static int mlxsw_hwmon_fans_init(struct mlxsw_hwmon *mlxsw_hwmon)
num = 0;
for (type_index = 0; type_index < MLXSW_MFCR_TACHOS_MAX; type_index++) {
if (tacho_active & BIT(type_index)) {
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_FAN_RPM,
type_index, num);
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_FAN_FAULT,
type_index, num++);
}
@@ -642,54 +675,55 @@ static int mlxsw_hwmon_fans_init(struct mlxsw_hwmon *mlxsw_hwmon)
num = 0;
for (type_index = 0; type_index < MLXSW_MFCR_PWMS_MAX; type_index++) {
if (pwm_active & BIT(type_index))
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_PWM,
type_index, num++);
}
return 0;
}
-static int mlxsw_hwmon_module_init(struct mlxsw_hwmon *mlxsw_hwmon)
+static int mlxsw_hwmon_module_init(struct mlxsw_hwmon_dev *mlxsw_hwmon_dev)
{
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
char mgpir_pl[MLXSW_REG_MGPIR_LEN];
u8 module_sensor_max;
int i, err;
- mlxsw_reg_mgpir_pack(mgpir_pl);
+ mlxsw_reg_mgpir_pack(mgpir_pl, mlxsw_hwmon_dev->slot_index);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mgpir), mgpir_pl);
if (err)
return err;
mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL,
- &module_sensor_max);
+ &module_sensor_max, NULL);
/* Add extra attributes for module temperature. Sensor index is
* assigned to sensor_count value, while all indexed before
* sensor_count are already utilized by the sensors connected through
* mtmp register by mlxsw_hwmon_temp_init().
*/
- mlxsw_hwmon->module_sensor_max = mlxsw_hwmon->sensor_count +
- module_sensor_max;
- for (i = mlxsw_hwmon->sensor_count;
- i < mlxsw_hwmon->module_sensor_max; i++) {
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_dev->module_sensor_max = mlxsw_hwmon_dev->sensor_count +
+ module_sensor_max;
+ for (i = mlxsw_hwmon_dev->sensor_count;
+ i < mlxsw_hwmon_dev->module_sensor_max; i++) {
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE, i, i);
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_FAULT,
i, i);
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_CRIT, i,
i);
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_EMERG,
i, i);
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_LABEL,
i, i);
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_TEMP_CRIT_ALARM,
i, i);
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_TEMP_EMERGENCY_ALARM,
i, i);
}
@@ -697,8 +731,9 @@ static int mlxsw_hwmon_module_init(struct mlxsw_hwmon *mlxsw_hwmon)
return 0;
}
-static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon *mlxsw_hwmon)
+static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon_dev *mlxsw_hwmon_dev)
{
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon;
enum mlxsw_reg_mgpir_device_type device_type;
int index, max_index, sensor_index;
char mgpir_pl[MLXSW_REG_MGPIR_LEN];
@@ -706,22 +741,24 @@ static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon *mlxsw_hwmon)
u8 gbox_num;
int err;
- mlxsw_reg_mgpir_pack(mgpir_pl);
+ mlxsw_reg_mgpir_pack(mgpir_pl, mlxsw_hwmon_dev->slot_index);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mgpir), mgpir_pl);
if (err)
return err;
- mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, &device_type, NULL, NULL);
+ mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, &device_type, NULL, NULL,
+ NULL);
if (device_type != MLXSW_REG_MGPIR_DEVICE_TYPE_GEARBOX_DIE ||
!gbox_num)
return 0;
- index = mlxsw_hwmon->module_sensor_max;
- max_index = mlxsw_hwmon->module_sensor_max + gbox_num;
+ index = mlxsw_hwmon_dev->module_sensor_max;
+ max_index = mlxsw_hwmon_dev->module_sensor_max + gbox_num;
while (index < max_index) {
- sensor_index = index % mlxsw_hwmon->module_sensor_max +
+ sensor_index = index % mlxsw_hwmon_dev->module_sensor_max +
MLXSW_REG_MTMP_GBOX_INDEX_MIN;
- mlxsw_reg_mtmp_pack(mtmp_pl, sensor_index, true, true);
+ mlxsw_reg_mtmp_pack(mtmp_pl, mlxsw_hwmon_dev->slot_index,
+ sensor_index, true, true);
err = mlxsw_reg_write(mlxsw_hwmon->core,
MLXSW_REG(mtmp), mtmp_pl);
if (err) {
@@ -729,15 +766,15 @@ static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon *mlxsw_hwmon)
sensor_index);
return err;
}
- mlxsw_hwmon_attr_add(mlxsw_hwmon, MLXSW_HWMON_ATTR_TYPE_TEMP,
- index, index);
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
+ MLXSW_HWMON_ATTR_TYPE_TEMP, index, index);
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_TEMP_MAX, index,
index);
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_TEMP_RST, index,
index);
- mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ mlxsw_hwmon_attr_add(mlxsw_hwmon_dev,
MLXSW_HWMON_ATTR_TYPE_TEMP_GBOX_LABEL,
index, index);
index++;
@@ -746,51 +783,144 @@ static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon *mlxsw_hwmon)
return 0;
}
+static void
+mlxsw_hwmon_got_active(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ void *priv)
+{
+ struct mlxsw_hwmon *hwmon = priv;
+ struct mlxsw_hwmon_dev *linecard;
+ struct device *dev;
+ int err;
+
+ dev = hwmon->bus_info->dev;
+ linecard = &hwmon->line_cards[slot_index];
+ if (linecard->active)
+ return;
+ /* For the main board, module sensor indexes start from 1, sensor index
+ * 0 is used for the ASIC. Use the same numbering for line cards.
+ */
+ linecard->sensor_count = 1;
+ linecard->slot_index = slot_index;
+ linecard->hwmon = hwmon;
+ err = mlxsw_hwmon_module_init(linecard);
+ if (err) {
+ dev_err(dev, "Failed to configure hwmon objects for line card modules in slot %d\n",
+ slot_index);
+ return;
+ }
+
+ err = mlxsw_hwmon_gearbox_init(linecard);
+ if (err) {
+ dev_err(dev, "Failed to configure hwmon objects for line card gearboxes in slot %d\n",
+ slot_index);
+ return;
+ }
+
+ linecard->groups[0] = &linecard->group;
+ linecard->group.attrs = linecard->attrs;
+ sprintf(linecard->name, "%s#%02u", "linecard", slot_index);
+ linecard->hwmon_dev =
+ hwmon_device_register_with_groups(dev, linecard->name,
+ linecard, linecard->groups);
+ if (IS_ERR(linecard->hwmon_dev)) {
+ dev_err(dev, "Failed to register hwmon objects for line card in slot %d\n",
+ slot_index);
+ return;
+ }
+
+ linecard->active = true;
+}
+
+static void
+mlxsw_hwmon_got_inactive(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ void *priv)
+{
+ struct mlxsw_hwmon *hwmon = priv;
+ struct mlxsw_hwmon_dev *linecard;
+
+ linecard = &hwmon->line_cards[slot_index];
+ if (!linecard->active)
+ return;
+ linecard->active = false;
+ hwmon_device_unregister(linecard->hwmon_dev);
+ /* Reset attributes counter */
+ linecard->attrs_count = 0;
+}
+
+static struct mlxsw_linecards_event_ops mlxsw_hwmon_event_ops = {
+ .got_active = mlxsw_hwmon_got_active,
+ .got_inactive = mlxsw_hwmon_got_inactive,
+};
+
int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core,
const struct mlxsw_bus_info *mlxsw_bus_info,
struct mlxsw_hwmon **p_hwmon)
{
+ char mgpir_pl[MLXSW_REG_MGPIR_LEN];
struct mlxsw_hwmon *mlxsw_hwmon;
struct device *hwmon_dev;
+ u8 num_of_slots;
int err;
- mlxsw_hwmon = kzalloc(sizeof(*mlxsw_hwmon), GFP_KERNEL);
+ mlxsw_reg_mgpir_pack(mgpir_pl, 0);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgpir), mgpir_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL, NULL,
+ &num_of_slots);
+
+ mlxsw_hwmon = kzalloc(struct_size(mlxsw_hwmon, line_cards,
+ num_of_slots + 1), GFP_KERNEL);
if (!mlxsw_hwmon)
return -ENOMEM;
+
mlxsw_hwmon->core = mlxsw_core;
mlxsw_hwmon->bus_info = mlxsw_bus_info;
+ mlxsw_hwmon->line_cards[0].hwmon = mlxsw_hwmon;
+ mlxsw_hwmon->line_cards[0].slot_index = 0;
- err = mlxsw_hwmon_temp_init(mlxsw_hwmon);
+ err = mlxsw_hwmon_temp_init(&mlxsw_hwmon->line_cards[0]);
if (err)
goto err_temp_init;
- err = mlxsw_hwmon_fans_init(mlxsw_hwmon);
+ err = mlxsw_hwmon_fans_init(&mlxsw_hwmon->line_cards[0]);
if (err)
goto err_fans_init;
- err = mlxsw_hwmon_module_init(mlxsw_hwmon);
+ err = mlxsw_hwmon_module_init(&mlxsw_hwmon->line_cards[0]);
if (err)
goto err_temp_module_init;
- err = mlxsw_hwmon_gearbox_init(mlxsw_hwmon);
+ err = mlxsw_hwmon_gearbox_init(&mlxsw_hwmon->line_cards[0]);
if (err)
goto err_temp_gearbox_init;
- mlxsw_hwmon->groups[0] = &mlxsw_hwmon->group;
- mlxsw_hwmon->group.attrs = mlxsw_hwmon->attrs;
+ mlxsw_hwmon->line_cards[0].groups[0] = &mlxsw_hwmon->line_cards[0].group;
+ mlxsw_hwmon->line_cards[0].group.attrs = mlxsw_hwmon->line_cards[0].attrs;
hwmon_dev = hwmon_device_register_with_groups(mlxsw_bus_info->dev,
- "mlxsw", mlxsw_hwmon,
- mlxsw_hwmon->groups);
+ "mlxsw",
+ &mlxsw_hwmon->line_cards[0],
+ mlxsw_hwmon->line_cards[0].groups);
if (IS_ERR(hwmon_dev)) {
err = PTR_ERR(hwmon_dev);
goto err_hwmon_register;
}
- mlxsw_hwmon->hwmon_dev = hwmon_dev;
+ err = mlxsw_linecards_event_ops_register(mlxsw_hwmon->core,
+ &mlxsw_hwmon_event_ops,
+ mlxsw_hwmon);
+ if (err)
+ goto err_linecards_event_ops_register;
+
+ mlxsw_hwmon->line_cards[0].hwmon_dev = hwmon_dev;
+ mlxsw_hwmon->line_cards[0].active = true;
*p_hwmon = mlxsw_hwmon;
return 0;
+err_linecards_event_ops_register:
+ hwmon_device_unregister(mlxsw_hwmon->line_cards[0].hwmon_dev);
err_hwmon_register:
err_temp_gearbox_init:
err_temp_module_init:
@@ -802,6 +932,9 @@ err_temp_init:
void mlxsw_hwmon_fini(struct mlxsw_hwmon *mlxsw_hwmon)
{
- hwmon_device_unregister(mlxsw_hwmon->hwmon_dev);
+ mlxsw_hwmon->line_cards[0].active = false;
+ mlxsw_linecards_event_ops_unregister(mlxsw_hwmon->core,
+ &mlxsw_hwmon_event_ops, mlxsw_hwmon);
+ hwmon_device_unregister(mlxsw_hwmon->line_cards[0].hwmon_dev);
kfree(mlxsw_hwmon);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
new file mode 100644
index 000000000000..2abd31a62776
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
@@ -0,0 +1,1373 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2022 NVIDIA Corporation and Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+#include <linux/gfp.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/vmalloc.h>
+
+#include "core.h"
+
+struct mlxsw_linecard_ini_file {
+ __le16 size;
+ union {
+ u8 data[0];
+ struct {
+ __be16 hw_revision;
+ __be16 ini_version;
+ u8 __dontcare[3];
+ u8 type;
+ u8 name[20];
+ } format;
+ };
+};
+
+struct mlxsw_linecard_types_info {
+ struct mlxsw_linecard_ini_file **ini_files;
+ unsigned int count;
+ size_t data_size;
+ char *data;
+};
+
+#define MLXSW_LINECARD_STATUS_EVENT_TO (10 * MSEC_PER_SEC)
+
+static void
+mlxsw_linecard_status_event_to_schedule(struct mlxsw_linecard *linecard,
+ enum mlxsw_linecard_status_event_type status_event_type)
+{
+ cancel_delayed_work_sync(&linecard->status_event_to_dw);
+ linecard->status_event_type_to = status_event_type;
+ mlxsw_core_schedule_dw(&linecard->status_event_to_dw,
+ msecs_to_jiffies(MLXSW_LINECARD_STATUS_EVENT_TO));
+}
+
+static void
+mlxsw_linecard_status_event_done(struct mlxsw_linecard *linecard,
+ enum mlxsw_linecard_status_event_type status_event_type)
+{
+ if (linecard->status_event_type_to == status_event_type)
+ cancel_delayed_work_sync(&linecard->status_event_to_dw);
+}
+
+static const char *
+mlxsw_linecard_types_lookup(struct mlxsw_linecards *linecards, u8 card_type)
+{
+ struct mlxsw_linecard_types_info *types_info;
+ struct mlxsw_linecard_ini_file *ini_file;
+ int i;
+
+ types_info = linecards->types_info;
+ if (!types_info)
+ return NULL;
+ for (i = 0; i < types_info->count; i++) {
+ ini_file = linecards->types_info->ini_files[i];
+ if (ini_file->format.type == card_type)
+ return ini_file->format.name;
+ }
+ return NULL;
+}
+
+static const char *mlxsw_linecard_type_name(struct mlxsw_linecard *linecard)
+{
+ struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core;
+ char mddq_pl[MLXSW_REG_MDDQ_LEN];
+ int err;
+
+ mlxsw_reg_mddq_slot_name_pack(mddq_pl, linecard->slot_index);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mddq), mddq_pl);
+ if (err)
+ return ERR_PTR(err);
+ mlxsw_reg_mddq_slot_name_unpack(mddq_pl, linecard->name);
+ return linecard->name;
+}
+
+struct mlxsw_linecard_device_info {
+ u16 fw_major;
+ u16 fw_minor;
+ u16 fw_sub_minor;
+};
+
+struct mlxsw_linecard_device {
+ struct list_head list;
+ u8 index;
+ struct mlxsw_linecard *linecard;
+ struct devlink_linecard_device *devlink_device;
+ struct mlxsw_linecard_device_info info;
+};
+
+static struct mlxsw_linecard_device *
+mlxsw_linecard_device_lookup(struct mlxsw_linecard *linecard, u8 index)
+{
+ struct mlxsw_linecard_device *device;
+
+ list_for_each_entry(device, &linecard->device_list, list)
+ if (device->index == index)
+ return device;
+ return NULL;
+}
+
+static int mlxsw_linecard_device_attach(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecard *linecard,
+ u8 device_index, bool flash_owner)
+{
+ struct mlxsw_linecard_device *device;
+ int err;
+
+ device = kzalloc(sizeof(*device), GFP_KERNEL);
+ if (!device)
+ return -ENOMEM;
+ device->index = device_index;
+ device->linecard = linecard;
+
+ device->devlink_device = devlink_linecard_device_create(linecard->devlink_linecard,
+ device_index, device);
+ if (IS_ERR(device->devlink_device)) {
+ err = PTR_ERR(device->devlink_device);
+ goto err_devlink_linecard_device_attach;
+ }
+
+ list_add_tail(&device->list, &linecard->device_list);
+ return 0;
+
+err_devlink_linecard_device_attach:
+ kfree(device);
+ return err;
+}
+
+static void mlxsw_linecard_device_detach(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecard *linecard,
+ struct mlxsw_linecard_device *device)
+{
+ list_del(&device->list);
+ devlink_linecard_device_destroy(linecard->devlink_linecard,
+ device->devlink_device);
+ kfree(device);
+}
+
+static void mlxsw_linecard_devices_detach(struct mlxsw_linecard *linecard)
+{
+ struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core;
+ struct mlxsw_linecard_device *device, *tmp;
+
+ list_for_each_entry_safe(device, tmp, &linecard->device_list, list)
+ mlxsw_linecard_device_detach(mlxsw_core, linecard, device);
+}
+
+static int mlxsw_linecard_devices_attach(struct mlxsw_linecard *linecard)
+{
+ struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core;
+ u8 msg_seq = 0;
+ int err;
+
+ do {
+ char mddq_pl[MLXSW_REG_MDDQ_LEN];
+ bool flash_owner;
+ bool data_valid;
+ u8 device_index;
+
+ mlxsw_reg_mddq_device_info_pack(mddq_pl, linecard->slot_index,
+ msg_seq);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mddq), mddq_pl);
+ if (err)
+ return err;
+ mlxsw_reg_mddq_device_info_unpack(mddq_pl, &msg_seq,
+ &data_valid, &flash_owner,
+ &device_index, NULL,
+ NULL, NULL);
+ if (!data_valid)
+ break;
+ err = mlxsw_linecard_device_attach(mlxsw_core, linecard,
+ device_index, flash_owner);
+ if (err)
+ goto rollback;
+ } while (msg_seq);
+
+ return 0;
+
+rollback:
+ mlxsw_linecard_devices_detach(linecard);
+ return err;
+}
+
+static void mlxsw_linecard_device_update(struct mlxsw_linecard *linecard,
+ u8 device_index,
+ struct mlxsw_linecard_device_info *info)
+{
+ struct mlxsw_linecard_device *device;
+
+ device = mlxsw_linecard_device_lookup(linecard, device_index);
+ if (!device)
+ return;
+ device->info = *info;
+}
+
+static int mlxsw_linecard_devices_update(struct mlxsw_linecard *linecard)
+{
+ struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core;
+ u8 msg_seq = 0;
+
+ do {
+ struct mlxsw_linecard_device_info info;
+ char mddq_pl[MLXSW_REG_MDDQ_LEN];
+ bool data_valid;
+ u8 device_index;
+ int err;
+
+ mlxsw_reg_mddq_device_info_pack(mddq_pl, linecard->slot_index,
+ msg_seq);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mddq), mddq_pl);
+ if (err)
+ return err;
+ mlxsw_reg_mddq_device_info_unpack(mddq_pl, &msg_seq,
+ &data_valid, NULL,
+ &device_index,
+ &info.fw_major,
+ &info.fw_minor,
+ &info.fw_sub_minor);
+ if (!data_valid)
+ break;
+ mlxsw_linecard_device_update(linecard, device_index, &info);
+ } while (msg_seq);
+
+ return 0;
+}
+
+static int
+mlxsw_linecard_device_info_get(struct devlink_linecard_device *devlink_linecard_device,
+ void *priv, struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_linecard_device *device = priv;
+ struct mlxsw_linecard_device_info *info;
+ struct mlxsw_linecard *linecard;
+ char buf[32];
+
+ linecard = device->linecard;
+ mutex_lock(&linecard->lock);
+ if (!linecard->active) {
+ mutex_unlock(&linecard->lock);
+ return 0;
+ }
+
+ info = &device->info;
+
+ sprintf(buf, "%u.%u.%u", info->fw_major, info->fw_minor,
+ info->fw_sub_minor);
+ mutex_unlock(&linecard->lock);
+
+ return devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ buf);
+}
+
+static void mlxsw_linecard_provision_fail(struct mlxsw_linecard *linecard)
+{
+ linecard->provisioned = false;
+ linecard->ready = false;
+ linecard->active = false;
+ mlxsw_linecard_devices_detach(linecard);
+ devlink_linecard_provision_fail(linecard->devlink_linecard);
+}
+
+struct mlxsw_linecards_event_ops_item {
+ struct list_head list;
+ const struct mlxsw_linecards_event_ops *event_ops;
+ void *priv;
+};
+
+static void
+mlxsw_linecard_event_op_call(struct mlxsw_linecard *linecard,
+ mlxsw_linecards_event_op_t *op, void *priv)
+{
+ struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core;
+
+ if (!op)
+ return;
+ op(mlxsw_core, linecard->slot_index, priv);
+}
+
+static void
+mlxsw_linecard_active_ops_call(struct mlxsw_linecard *linecard)
+{
+ struct mlxsw_linecards *linecards = linecard->linecards;
+ struct mlxsw_linecards_event_ops_item *item;
+
+ mutex_lock(&linecards->event_ops_list_lock);
+ list_for_each_entry(item, &linecards->event_ops_list, list)
+ mlxsw_linecard_event_op_call(linecard,
+ item->event_ops->got_active,
+ item->priv);
+ mutex_unlock(&linecards->event_ops_list_lock);
+}
+
+static void
+mlxsw_linecard_inactive_ops_call(struct mlxsw_linecard *linecard)
+{
+ struct mlxsw_linecards *linecards = linecard->linecards;
+ struct mlxsw_linecards_event_ops_item *item;
+
+ mutex_lock(&linecards->event_ops_list_lock);
+ list_for_each_entry(item, &linecards->event_ops_list, list)
+ mlxsw_linecard_event_op_call(linecard,
+ item->event_ops->got_inactive,
+ item->priv);
+ mutex_unlock(&linecards->event_ops_list_lock);
+}
+
+static void
+mlxsw_linecards_event_ops_register_call(struct mlxsw_linecards *linecards,
+ const struct mlxsw_linecards_event_ops_item *item)
+{
+ struct mlxsw_linecard *linecard;
+ int i;
+
+ for (i = 0; i < linecards->count; i++) {
+ linecard = mlxsw_linecard_get(linecards, i + 1);
+ mutex_lock(&linecard->lock);
+ if (linecard->active)
+ mlxsw_linecard_event_op_call(linecard,
+ item->event_ops->got_active,
+ item->priv);
+ mutex_unlock(&linecard->lock);
+ }
+}
+
+static void
+mlxsw_linecards_event_ops_unregister_call(struct mlxsw_linecards *linecards,
+ const struct mlxsw_linecards_event_ops_item *item)
+{
+ struct mlxsw_linecard *linecard;
+ int i;
+
+ for (i = 0; i < linecards->count; i++) {
+ linecard = mlxsw_linecard_get(linecards, i + 1);
+ mutex_lock(&linecard->lock);
+ if (linecard->active)
+ mlxsw_linecard_event_op_call(linecard,
+ item->event_ops->got_inactive,
+ item->priv);
+ mutex_unlock(&linecard->lock);
+ }
+}
+
+int mlxsw_linecards_event_ops_register(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards_event_ops *ops,
+ void *priv)
+{
+ struct mlxsw_linecards *linecards = mlxsw_core_linecards(mlxsw_core);
+ struct mlxsw_linecards_event_ops_item *item;
+
+ if (!linecards)
+ return 0;
+ item = kzalloc(sizeof(*item), GFP_KERNEL);
+ if (!item)
+ return -ENOMEM;
+ item->event_ops = ops;
+ item->priv = priv;
+
+ mutex_lock(&linecards->event_ops_list_lock);
+ list_add_tail(&item->list, &linecards->event_ops_list);
+ mutex_unlock(&linecards->event_ops_list_lock);
+ mlxsw_linecards_event_ops_register_call(linecards, item);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_linecards_event_ops_register);
+
+void mlxsw_linecards_event_ops_unregister(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards_event_ops *ops,
+ void *priv)
+{
+ struct mlxsw_linecards *linecards = mlxsw_core_linecards(mlxsw_core);
+ struct mlxsw_linecards_event_ops_item *item, *tmp;
+ bool found = false;
+
+ if (!linecards)
+ return;
+ mutex_lock(&linecards->event_ops_list_lock);
+ list_for_each_entry_safe(item, tmp, &linecards->event_ops_list, list) {
+ if (item->event_ops == ops && item->priv == priv) {
+ list_del(&item->list);
+ found = true;
+ break;
+ }
+ }
+ mutex_unlock(&linecards->event_ops_list_lock);
+
+ if (!found)
+ return;
+ mlxsw_linecards_event_ops_unregister_call(linecards, item);
+ kfree(item);
+}
+EXPORT_SYMBOL(mlxsw_linecards_event_ops_unregister);
+
+static int
+mlxsw_linecard_provision_set(struct mlxsw_linecard *linecard, u8 card_type,
+ u16 hw_revision, u16 ini_version)
+{
+ struct mlxsw_linecards *linecards = linecard->linecards;
+ const char *type;
+ int err;
+
+ type = mlxsw_linecard_types_lookup(linecards, card_type);
+ mlxsw_linecard_status_event_done(linecard,
+ MLXSW_LINECARD_STATUS_EVENT_TYPE_PROVISION);
+ if (!type) {
+ /* It is possible for a line card to be provisioned before
+ * driver initialization. Due to a missing INI bundle file
+ * or an outdated one, the queried card's type might not
+ * be recognized by the driver. In this case, try to query
+ * the card's name from the device.
+ */
+ type = mlxsw_linecard_type_name(linecard);
+ if (IS_ERR(type)) {
+ mlxsw_linecard_provision_fail(linecard);
+ return PTR_ERR(type);
+ }
+ }
+ err = mlxsw_linecard_devices_attach(linecard);
+ if (err) {
+ mlxsw_linecard_provision_fail(linecard);
+ return err;
+ }
+ linecard->provisioned = true;
+ linecard->hw_revision = hw_revision;
+ linecard->ini_version = ini_version;
+ devlink_linecard_provision_set(linecard->devlink_linecard, type);
+ return 0;
+}
+
+static void mlxsw_linecard_provision_clear(struct mlxsw_linecard *linecard)
+{
+ mlxsw_linecard_status_event_done(linecard,
+ MLXSW_LINECARD_STATUS_EVENT_TYPE_UNPROVISION);
+ linecard->provisioned = false;
+ mlxsw_linecard_devices_detach(linecard);
+ devlink_linecard_provision_clear(linecard->devlink_linecard);
+}
+
+static int mlxsw_linecard_ready_set(struct mlxsw_linecard *linecard)
+{
+ struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core;
+ char mddc_pl[MLXSW_REG_MDDC_LEN];
+ int err;
+
+ mlxsw_reg_mddc_pack(mddc_pl, linecard->slot_index, false, true);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddc), mddc_pl);
+ if (err)
+ return err;
+ linecard->ready = true;
+ return 0;
+}
+
+static int mlxsw_linecard_ready_clear(struct mlxsw_linecard *linecard)
+{
+ struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core;
+ char mddc_pl[MLXSW_REG_MDDC_LEN];
+ int err;
+
+ mlxsw_reg_mddc_pack(mddc_pl, linecard->slot_index, false, false);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddc), mddc_pl);
+ if (err)
+ return err;
+ linecard->ready = false;
+ return 0;
+}
+
+static int mlxsw_linecard_active_set(struct mlxsw_linecard *linecard)
+{
+ int err;
+
+ err = mlxsw_linecard_devices_update(linecard);
+ if (err)
+ return err;
+
+ mlxsw_linecard_active_ops_call(linecard);
+ linecard->active = true;
+ devlink_linecard_activate(linecard->devlink_linecard);
+ return 0;
+}
+
+static void mlxsw_linecard_active_clear(struct mlxsw_linecard *linecard)
+{
+ mlxsw_linecard_inactive_ops_call(linecard);
+ linecard->active = false;
+ devlink_linecard_deactivate(linecard->devlink_linecard);
+}
+
+static int mlxsw_linecard_status_process(struct mlxsw_linecards *linecards,
+ struct mlxsw_linecard *linecard,
+ const char *mddq_pl)
+{
+ enum mlxsw_reg_mddq_slot_info_ready ready;
+ bool provisioned, sr_valid, active;
+ u16 ini_version, hw_revision;
+ u8 slot_index, card_type;
+ int err = 0;
+
+ mlxsw_reg_mddq_slot_info_unpack(mddq_pl, &slot_index, &provisioned,
+ &sr_valid, &ready, &active,
+ &hw_revision, &ini_version,
+ &card_type);
+
+ if (linecard) {
+ if (WARN_ON(slot_index != linecard->slot_index))
+ return -EINVAL;
+ } else {
+ if (WARN_ON(slot_index > linecards->count))
+ return -EINVAL;
+ linecard = mlxsw_linecard_get(linecards, slot_index);
+ }
+
+ mutex_lock(&linecard->lock);
+
+ if (provisioned && linecard->provisioned != provisioned) {
+ err = mlxsw_linecard_provision_set(linecard, card_type,
+ hw_revision, ini_version);
+ if (err)
+ goto out;
+ }
+
+ if (ready == MLXSW_REG_MDDQ_SLOT_INFO_READY_READY && !linecard->ready) {
+ err = mlxsw_linecard_ready_set(linecard);
+ if (err)
+ goto out;
+ }
+
+ if (active && linecard->active != active) {
+ err = mlxsw_linecard_active_set(linecard);
+ if (err)
+ goto out;
+ }
+
+ if (!active && linecard->active != active)
+ mlxsw_linecard_active_clear(linecard);
+
+ if (ready != MLXSW_REG_MDDQ_SLOT_INFO_READY_READY &&
+ linecard->ready) {
+ err = mlxsw_linecard_ready_clear(linecard);
+ if (err)
+ goto out;
+ }
+
+ if (!provisioned && linecard->provisioned != provisioned)
+ mlxsw_linecard_provision_clear(linecard);
+
+out:
+ mutex_unlock(&linecard->lock);
+ return err;
+}
+
+static int mlxsw_linecard_status_get_and_process(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards *linecards,
+ struct mlxsw_linecard *linecard)
+{
+ char mddq_pl[MLXSW_REG_MDDQ_LEN];
+ int err;
+
+ mlxsw_reg_mddq_slot_info_pack(mddq_pl, linecard->slot_index, false);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mddq), mddq_pl);
+ if (err)
+ return err;
+
+ return mlxsw_linecard_status_process(linecards, linecard, mddq_pl);
+}
+
+static const char * const mlxsw_linecard_status_event_type_name[] = {
+ [MLXSW_LINECARD_STATUS_EVENT_TYPE_PROVISION] = "provision",
+ [MLXSW_LINECARD_STATUS_EVENT_TYPE_UNPROVISION] = "unprovision",
+};
+
+static void mlxsw_linecard_status_event_to_work(struct work_struct *work)
+{
+ struct mlxsw_linecard *linecard =
+ container_of(work, struct mlxsw_linecard,
+ status_event_to_dw.work);
+
+ mutex_lock(&linecard->lock);
+ dev_err(linecard->linecards->bus_info->dev, "linecard %u: Timeout reached waiting on %s status event",
+ linecard->slot_index,
+ mlxsw_linecard_status_event_type_name[linecard->status_event_type_to]);
+ mlxsw_linecard_provision_fail(linecard);
+ mutex_unlock(&linecard->lock);
+}
+
+static int __mlxsw_linecard_fix_fsm_state(struct mlxsw_linecard *linecard)
+{
+ dev_info(linecard->linecards->bus_info->dev, "linecard %u: Clearing FSM state error",
+ linecard->slot_index);
+ mlxsw_reg_mbct_pack(linecard->mbct_pl, linecard->slot_index,
+ MLXSW_REG_MBCT_OP_CLEAR_ERRORS, false);
+ return mlxsw_reg_write(linecard->linecards->mlxsw_core,
+ MLXSW_REG(mbct), linecard->mbct_pl);
+}
+
+static int mlxsw_linecard_fix_fsm_state(struct mlxsw_linecard *linecard,
+ enum mlxsw_reg_mbct_fsm_state fsm_state)
+{
+ if (fsm_state != MLXSW_REG_MBCT_FSM_STATE_ERROR)
+ return 0;
+ return __mlxsw_linecard_fix_fsm_state(linecard);
+}
+
+static int
+mlxsw_linecard_query_ini_status(struct mlxsw_linecard *linecard,
+ enum mlxsw_reg_mbct_status *status,
+ enum mlxsw_reg_mbct_fsm_state *fsm_state,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ mlxsw_reg_mbct_pack(linecard->mbct_pl, linecard->slot_index,
+ MLXSW_REG_MBCT_OP_QUERY_STATUS, false);
+ err = mlxsw_reg_query(linecard->linecards->mlxsw_core, MLXSW_REG(mbct),
+ linecard->mbct_pl);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to query linecard INI status");
+ return err;
+ }
+ mlxsw_reg_mbct_unpack(linecard->mbct_pl, NULL, status, fsm_state);
+ return err;
+}
+
+static int
+mlxsw_linecard_ini_transfer(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecard *linecard,
+ const struct mlxsw_linecard_ini_file *ini_file,
+ struct netlink_ext_ack *extack)
+{
+ enum mlxsw_reg_mbct_fsm_state fsm_state;
+ enum mlxsw_reg_mbct_status status;
+ size_t size_left;
+ const u8 *data;
+ int err;
+
+ size_left = le16_to_cpu(ini_file->size);
+ data = ini_file->data;
+ while (size_left) {
+ size_t data_size = MLXSW_REG_MBCT_DATA_LEN;
+ bool is_last = false;
+
+ if (size_left <= MLXSW_REG_MBCT_DATA_LEN) {
+ data_size = size_left;
+ is_last = true;
+ }
+
+ mlxsw_reg_mbct_pack(linecard->mbct_pl, linecard->slot_index,
+ MLXSW_REG_MBCT_OP_DATA_TRANSFER, false);
+ mlxsw_reg_mbct_dt_pack(linecard->mbct_pl, data_size,
+ is_last, data);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(mbct),
+ linecard->mbct_pl);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to issue linecard INI data transfer");
+ return err;
+ }
+ mlxsw_reg_mbct_unpack(linecard->mbct_pl, NULL,
+ &status, &fsm_state);
+ if ((!is_last && status != MLXSW_REG_MBCT_STATUS_PART_DATA) ||
+ (is_last && status != MLXSW_REG_MBCT_STATUS_LAST_DATA)) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to transfer linecard INI data");
+ mlxsw_linecard_fix_fsm_state(linecard, fsm_state);
+ return -EINVAL;
+ }
+ size_left -= data_size;
+ data += data_size;
+ }
+
+ return 0;
+}
+
+static int
+mlxsw_linecard_ini_erase(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecard *linecard,
+ struct netlink_ext_ack *extack)
+{
+ enum mlxsw_reg_mbct_fsm_state fsm_state;
+ enum mlxsw_reg_mbct_status status;
+ int err;
+
+ mlxsw_reg_mbct_pack(linecard->mbct_pl, linecard->slot_index,
+ MLXSW_REG_MBCT_OP_ERASE_INI_IMAGE, false);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(mbct),
+ linecard->mbct_pl);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to issue linecard INI erase");
+ return err;
+ }
+ mlxsw_reg_mbct_unpack(linecard->mbct_pl, NULL, &status, &fsm_state);
+ switch (status) {
+ case MLXSW_REG_MBCT_STATUS_ERASE_COMPLETE:
+ break;
+ default:
+ /* Should not happen */
+ fallthrough;
+ case MLXSW_REG_MBCT_STATUS_ERASE_FAILED:
+ NL_SET_ERR_MSG_MOD(extack, "Failed to erase linecard INI");
+ goto fix_fsm_err_out;
+ case MLXSW_REG_MBCT_STATUS_ERROR_INI_IN_USE:
+ NL_SET_ERR_MSG_MOD(extack, "Failed to erase linecard INI while being used");
+ goto fix_fsm_err_out;
+ }
+ return 0;
+
+fix_fsm_err_out:
+ mlxsw_linecard_fix_fsm_state(linecard, fsm_state);
+ return -EINVAL;
+}
+
+static void mlxsw_linecard_bct_process(struct mlxsw_core *mlxsw_core,
+ const char *mbct_pl)
+{
+ struct mlxsw_linecards *linecards = mlxsw_core_linecards(mlxsw_core);
+ enum mlxsw_reg_mbct_fsm_state fsm_state;
+ enum mlxsw_reg_mbct_status status;
+ struct mlxsw_linecard *linecard;
+ u8 slot_index;
+
+ mlxsw_reg_mbct_unpack(mbct_pl, &slot_index, &status, &fsm_state);
+ if (WARN_ON(slot_index > linecards->count))
+ return;
+ linecard = mlxsw_linecard_get(linecards, slot_index);
+ mutex_lock(&linecard->lock);
+ if (status == MLXSW_REG_MBCT_STATUS_ACTIVATION_FAILED) {
+ dev_err(linecards->bus_info->dev, "linecard %u: Failed to activate INI",
+ linecard->slot_index);
+ goto fix_fsm_out;
+ }
+ mutex_unlock(&linecard->lock);
+ return;
+
+fix_fsm_out:
+ mlxsw_linecard_fix_fsm_state(linecard, fsm_state);
+ mlxsw_linecard_provision_fail(linecard);
+ mutex_unlock(&linecard->lock);
+}
+
+static int
+mlxsw_linecard_ini_activate(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecard *linecard,
+ struct netlink_ext_ack *extack)
+{
+ enum mlxsw_reg_mbct_fsm_state fsm_state;
+ enum mlxsw_reg_mbct_status status;
+ int err;
+
+ mlxsw_reg_mbct_pack(linecard->mbct_pl, linecard->slot_index,
+ MLXSW_REG_MBCT_OP_ACTIVATE, true);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(mbct), linecard->mbct_pl);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to issue linecard INI activation");
+ return err;
+ }
+ mlxsw_reg_mbct_unpack(linecard->mbct_pl, NULL, &status, &fsm_state);
+ if (status == MLXSW_REG_MBCT_STATUS_ACTIVATION_FAILED) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to activate linecard INI");
+ goto fix_fsm_err_out;
+ }
+
+ return 0;
+
+fix_fsm_err_out:
+ mlxsw_linecard_fix_fsm_state(linecard, fsm_state);
+ return -EINVAL;
+}
+
+#define MLXSW_LINECARD_INI_WAIT_RETRIES 10
+#define MLXSW_LINECARD_INI_WAIT_MS 500
+
+static int
+mlxsw_linecard_ini_in_use_wait(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecard *linecard,
+ struct netlink_ext_ack *extack)
+{
+ enum mlxsw_reg_mbct_fsm_state fsm_state;
+ enum mlxsw_reg_mbct_status status;
+ unsigned int ini_wait_retries = 0;
+ int err;
+
+query_ini_status:
+ err = mlxsw_linecard_query_ini_status(linecard, &status,
+ &fsm_state, extack);
+ if (err)
+ return err;
+
+ switch (fsm_state) {
+ case MLXSW_REG_MBCT_FSM_STATE_INI_IN_USE:
+ if (ini_wait_retries++ > MLXSW_LINECARD_INI_WAIT_RETRIES) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to wait for linecard INI to be unused");
+ return -EINVAL;
+ }
+ mdelay(MLXSW_LINECARD_INI_WAIT_MS);
+ goto query_ini_status;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static bool mlxsw_linecard_port_selector(void *priv, u16 local_port)
+{
+ struct mlxsw_linecard *linecard = priv;
+ struct mlxsw_core *mlxsw_core;
+
+ mlxsw_core = linecard->linecards->mlxsw_core;
+ return linecard == mlxsw_core_port_linecard_get(mlxsw_core, local_port);
+}
+
+static int mlxsw_linecard_provision(struct devlink_linecard *devlink_linecard,
+ void *priv, const char *type,
+ const void *type_priv,
+ struct netlink_ext_ack *extack)
+{
+ const struct mlxsw_linecard_ini_file *ini_file = type_priv;
+ struct mlxsw_linecard *linecard = priv;
+ struct mlxsw_core *mlxsw_core;
+ int err;
+
+ mutex_lock(&linecard->lock);
+
+ mlxsw_core = linecard->linecards->mlxsw_core;
+
+ err = mlxsw_linecard_ini_erase(mlxsw_core, linecard, extack);
+ if (err)
+ goto err_out;
+
+ err = mlxsw_linecard_ini_transfer(mlxsw_core, linecard,
+ ini_file, extack);
+ if (err)
+ goto err_out;
+
+ mlxsw_linecard_status_event_to_schedule(linecard,
+ MLXSW_LINECARD_STATUS_EVENT_TYPE_PROVISION);
+ err = mlxsw_linecard_ini_activate(mlxsw_core, linecard, extack);
+ if (err)
+ goto err_out;
+
+ goto out;
+
+err_out:
+ mlxsw_linecard_provision_fail(linecard);
+out:
+ mutex_unlock(&linecard->lock);
+ return err;
+}
+
+static int mlxsw_linecard_unprovision(struct devlink_linecard *devlink_linecard,
+ void *priv,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_linecard *linecard = priv;
+ struct mlxsw_core *mlxsw_core;
+ int err;
+
+ mutex_lock(&linecard->lock);
+
+ mlxsw_core = linecard->linecards->mlxsw_core;
+
+ mlxsw_core_ports_remove_selected(mlxsw_core,
+ mlxsw_linecard_port_selector,
+ linecard);
+
+ err = mlxsw_linecard_ini_in_use_wait(mlxsw_core, linecard, extack);
+ if (err)
+ goto err_out;
+
+ mlxsw_linecard_status_event_to_schedule(linecard,
+ MLXSW_LINECARD_STATUS_EVENT_TYPE_UNPROVISION);
+ err = mlxsw_linecard_ini_erase(mlxsw_core, linecard, extack);
+ if (err)
+ goto err_out;
+
+ goto out;
+
+err_out:
+ mlxsw_linecard_provision_fail(linecard);
+out:
+ mutex_unlock(&linecard->lock);
+ return err;
+}
+
+static bool mlxsw_linecard_same_provision(struct devlink_linecard *devlink_linecard,
+ void *priv, const char *type,
+ const void *type_priv)
+{
+ const struct mlxsw_linecard_ini_file *ini_file = type_priv;
+ struct mlxsw_linecard *linecard = priv;
+ bool ret;
+
+ mutex_lock(&linecard->lock);
+ ret = linecard->hw_revision == be16_to_cpu(ini_file->format.hw_revision) &&
+ linecard->ini_version == be16_to_cpu(ini_file->format.ini_version);
+ mutex_unlock(&linecard->lock);
+ return ret;
+}
+
+static unsigned int
+mlxsw_linecard_types_count(struct devlink_linecard *devlink_linecard,
+ void *priv)
+{
+ struct mlxsw_linecard *linecard = priv;
+
+ return linecard->linecards->types_info ?
+ linecard->linecards->types_info->count : 0;
+}
+
+static void mlxsw_linecard_types_get(struct devlink_linecard *devlink_linecard,
+ void *priv, unsigned int index,
+ const char **type, const void **type_priv)
+{
+ struct mlxsw_linecard_types_info *types_info;
+ struct mlxsw_linecard_ini_file *ini_file;
+ struct mlxsw_linecard *linecard = priv;
+
+ types_info = linecard->linecards->types_info;
+ if (WARN_ON_ONCE(!types_info))
+ return;
+ ini_file = types_info->ini_files[index];
+ *type = ini_file->format.name;
+ *type_priv = ini_file;
+}
+
+static int
+mlxsw_linecard_info_get(struct devlink_linecard *devlink_linecard, void *priv,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_linecard *linecard = priv;
+ char buf[32];
+ int err;
+
+ mutex_lock(&linecard->lock);
+ if (!linecard->provisioned) {
+ err = 0;
+ goto unlock;
+ }
+
+ sprintf(buf, "%d", linecard->hw_revision);
+ err = devlink_info_version_fixed_put(req, "hw.revision", buf);
+ if (err)
+ goto unlock;
+
+ sprintf(buf, "%d", linecard->ini_version);
+ err = devlink_info_version_running_put(req, "ini.version", buf);
+ if (err)
+ goto unlock;
+
+unlock:
+ mutex_unlock(&linecard->lock);
+ return err;
+}
+
+static const struct devlink_linecard_ops mlxsw_linecard_ops = {
+ .provision = mlxsw_linecard_provision,
+ .unprovision = mlxsw_linecard_unprovision,
+ .same_provision = mlxsw_linecard_same_provision,
+ .types_count = mlxsw_linecard_types_count,
+ .types_get = mlxsw_linecard_types_get,
+ .info_get = mlxsw_linecard_info_get,
+ .device_info_get = mlxsw_linecard_device_info_get,
+};
+
+struct mlxsw_linecard_status_event {
+ struct mlxsw_core *mlxsw_core;
+ char mddq_pl[MLXSW_REG_MDDQ_LEN];
+ struct work_struct work;
+};
+
+static void mlxsw_linecard_status_event_work(struct work_struct *work)
+{
+ struct mlxsw_linecard_status_event *event;
+ struct mlxsw_linecards *linecards;
+ struct mlxsw_core *mlxsw_core;
+
+ event = container_of(work, struct mlxsw_linecard_status_event, work);
+ mlxsw_core = event->mlxsw_core;
+ linecards = mlxsw_core_linecards(mlxsw_core);
+ mlxsw_linecard_status_process(linecards, NULL, event->mddq_pl);
+ kfree(event);
+}
+
+static void
+mlxsw_linecard_status_listener_func(const struct mlxsw_reg_info *reg,
+ char *mddq_pl, void *priv)
+{
+ struct mlxsw_linecard_status_event *event;
+ struct mlxsw_core *mlxsw_core = priv;
+
+ event = kmalloc(sizeof(*event), GFP_ATOMIC);
+ if (!event)
+ return;
+ event->mlxsw_core = mlxsw_core;
+ memcpy(event->mddq_pl, mddq_pl, sizeof(event->mddq_pl));
+ INIT_WORK(&event->work, mlxsw_linecard_status_event_work);
+ mlxsw_core_schedule_work(&event->work);
+}
+
+struct mlxsw_linecard_bct_event {
+ struct mlxsw_core *mlxsw_core;
+ char mbct_pl[MLXSW_REG_MBCT_LEN];
+ struct work_struct work;
+};
+
+static void mlxsw_linecard_bct_event_work(struct work_struct *work)
+{
+ struct mlxsw_linecard_bct_event *event;
+ struct mlxsw_core *mlxsw_core;
+
+ event = container_of(work, struct mlxsw_linecard_bct_event, work);
+ mlxsw_core = event->mlxsw_core;
+ mlxsw_linecard_bct_process(mlxsw_core, event->mbct_pl);
+ kfree(event);
+}
+
+static void
+mlxsw_linecard_bct_listener_func(const struct mlxsw_reg_info *reg,
+ char *mbct_pl, void *priv)
+{
+ struct mlxsw_linecard_bct_event *event;
+ struct mlxsw_core *mlxsw_core = priv;
+
+ event = kmalloc(sizeof(*event), GFP_ATOMIC);
+ if (!event)
+ return;
+ event->mlxsw_core = mlxsw_core;
+ memcpy(event->mbct_pl, mbct_pl, sizeof(event->mbct_pl));
+ INIT_WORK(&event->work, mlxsw_linecard_bct_event_work);
+ mlxsw_core_schedule_work(&event->work);
+}
+
+static const struct mlxsw_listener mlxsw_linecard_listener[] = {
+ MLXSW_CORE_EVENTL(mlxsw_linecard_status_listener_func, DSDSC),
+ MLXSW_CORE_EVENTL(mlxsw_linecard_bct_listener_func, BCTOE),
+};
+
+static int mlxsw_linecard_event_delivery_set(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecard *linecard,
+ bool enable)
+{
+ char mddq_pl[MLXSW_REG_MDDQ_LEN];
+
+ mlxsw_reg_mddq_slot_info_pack(mddq_pl, linecard->slot_index, enable);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddq), mddq_pl);
+}
+
+static int mlxsw_linecard_init(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards *linecards,
+ u8 slot_index)
+{
+ struct devlink_linecard *devlink_linecard;
+ struct mlxsw_linecard *linecard;
+ int err;
+
+ linecard = mlxsw_linecard_get(linecards, slot_index);
+ linecard->slot_index = slot_index;
+ linecard->linecards = linecards;
+ mutex_init(&linecard->lock);
+ INIT_LIST_HEAD(&linecard->device_list);
+
+ devlink_linecard = devlink_linecard_create(priv_to_devlink(mlxsw_core),
+ slot_index, &mlxsw_linecard_ops,
+ linecard);
+ if (IS_ERR(devlink_linecard)) {
+ err = PTR_ERR(devlink_linecard);
+ goto err_devlink_linecard_create;
+ }
+ linecard->devlink_linecard = devlink_linecard;
+ INIT_DELAYED_WORK(&linecard->status_event_to_dw,
+ &mlxsw_linecard_status_event_to_work);
+
+ err = mlxsw_linecard_event_delivery_set(mlxsw_core, linecard, true);
+ if (err)
+ goto err_event_delivery_set;
+
+ err = mlxsw_linecard_status_get_and_process(mlxsw_core, linecards,
+ linecard);
+ if (err)
+ goto err_status_get_and_process;
+
+ return 0;
+
+err_status_get_and_process:
+ mlxsw_linecard_event_delivery_set(mlxsw_core, linecard, false);
+err_event_delivery_set:
+ devlink_linecard_destroy(linecard->devlink_linecard);
+err_devlink_linecard_create:
+ mutex_destroy(&linecard->lock);
+ return err;
+}
+
+static void mlxsw_linecard_fini(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards *linecards,
+ u8 slot_index)
+{
+ struct mlxsw_linecard *linecard;
+
+ linecard = mlxsw_linecard_get(linecards, slot_index);
+ mlxsw_linecard_event_delivery_set(mlxsw_core, linecard, false);
+ cancel_delayed_work_sync(&linecard->status_event_to_dw);
+ /* Make sure all scheduled events are processed */
+ mlxsw_core_flush_owq();
+ if (linecard->active)
+ mlxsw_linecard_active_clear(linecard);
+ mlxsw_linecard_devices_detach(linecard);
+ devlink_linecard_destroy(linecard->devlink_linecard);
+ mutex_destroy(&linecard->lock);
+}
+
+/* LINECARDS INI BUNDLE FILE
+ * +----------------------------------+
+ * | MAGIC ("NVLCINI+") |
+ * +----------------------------------+ +--------------------+
+ * | INI 0 +---> | __le16 size |
+ * +----------------------------------+ | __be16 hw_revision |
+ * | INI 1 | | __be16 ini_version |
+ * +----------------------------------+ | u8 __dontcare[3] |
+ * | ... | | u8 type |
+ * +----------------------------------+ | u8 name[20] |
+ * | INI N | | ... |
+ * +----------------------------------+ +--------------------+
+ */
+
+#define MLXSW_LINECARDS_INI_BUNDLE_MAGIC "NVLCINI+"
+
+static int
+mlxsw_linecard_types_file_validate(struct mlxsw_linecards *linecards,
+ struct mlxsw_linecard_types_info *types_info)
+{
+ size_t magic_size = strlen(MLXSW_LINECARDS_INI_BUNDLE_MAGIC);
+ struct mlxsw_linecard_ini_file *ini_file;
+ size_t size = types_info->data_size;
+ const u8 *data = types_info->data;
+ unsigned int count = 0;
+ u16 ini_file_size;
+
+ if (size < magic_size) {
+ dev_warn(linecards->bus_info->dev, "Invalid linecards INIs file size, smaller than magic size\n");
+ return -EINVAL;
+ }
+ if (memcmp(data, MLXSW_LINECARDS_INI_BUNDLE_MAGIC, magic_size)) {
+ dev_warn(linecards->bus_info->dev, "Invalid linecards INIs file magic pattern\n");
+ return -EINVAL;
+ }
+
+ data += magic_size;
+ size -= magic_size;
+
+ while (size > 0) {
+ if (size < sizeof(*ini_file)) {
+ dev_warn(linecards->bus_info->dev, "Linecards INIs file contains INI which is smaller than bare minimum\n");
+ return -EINVAL;
+ }
+ ini_file = (struct mlxsw_linecard_ini_file *) data;
+ ini_file_size = le16_to_cpu(ini_file->size);
+ if (ini_file_size + sizeof(__le16) > size) {
+ dev_warn(linecards->bus_info->dev, "Linecards INIs file appears to be truncated\n");
+ return -EINVAL;
+ }
+ if (ini_file_size % 4) {
+ dev_warn(linecards->bus_info->dev, "Linecards INIs file contains INI with invalid size\n");
+ return -EINVAL;
+ }
+ data += ini_file_size + sizeof(__le16);
+ size -= ini_file_size + sizeof(__le16);
+ count++;
+ }
+ if (!count) {
+ dev_warn(linecards->bus_info->dev, "Linecards INIs file does not contain any INI\n");
+ return -EINVAL;
+ }
+ types_info->count = count;
+ return 0;
+}
+
+static void
+mlxsw_linecard_types_file_parse(struct mlxsw_linecard_types_info *types_info)
+{
+ size_t magic_size = strlen(MLXSW_LINECARDS_INI_BUNDLE_MAGIC);
+ size_t size = types_info->data_size - magic_size;
+ const u8 *data = types_info->data + magic_size;
+ struct mlxsw_linecard_ini_file *ini_file;
+ unsigned int count = 0;
+ u16 ini_file_size;
+ int i;
+
+ while (size) {
+ ini_file = (struct mlxsw_linecard_ini_file *) data;
+ ini_file_size = le16_to_cpu(ini_file->size);
+ for (i = 0; i < ini_file_size / 4; i++) {
+ u32 *val = &((u32 *) ini_file->data)[i];
+
+ *val = swab32(*val);
+ }
+ types_info->ini_files[count] = ini_file;
+ data += ini_file_size + sizeof(__le16);
+ size -= ini_file_size + sizeof(__le16);
+ count++;
+ }
+}
+
+#define MLXSW_LINECARDS_INI_BUNDLE_FILENAME_FMT \
+ "mellanox/lc_ini_bundle_%u_%u.bin"
+#define MLXSW_LINECARDS_INI_BUNDLE_FILENAME_LEN \
+ (sizeof(MLXSW_LINECARDS_INI_BUNDLE_FILENAME_FMT) + 4)
+
+static int mlxsw_linecard_types_init(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards *linecards)
+{
+ const struct mlxsw_fw_rev *rev = &linecards->bus_info->fw_rev;
+ char filename[MLXSW_LINECARDS_INI_BUNDLE_FILENAME_LEN];
+ struct mlxsw_linecard_types_info *types_info;
+ const struct firmware *firmware;
+ int err;
+
+ err = snprintf(filename, sizeof(filename),
+ MLXSW_LINECARDS_INI_BUNDLE_FILENAME_FMT,
+ rev->minor, rev->subminor);
+ WARN_ON(err >= sizeof(filename));
+
+ err = request_firmware_direct(&firmware, filename,
+ linecards->bus_info->dev);
+ if (err) {
+ dev_warn(linecards->bus_info->dev, "Could not request linecards INI file \"%s\", provisioning will not be possible\n",
+ filename);
+ return 0;
+ }
+
+ types_info = kzalloc(sizeof(*types_info), GFP_KERNEL);
+ if (!types_info) {
+ release_firmware(firmware);
+ return -ENOMEM;
+ }
+ linecards->types_info = types_info;
+
+ types_info->data_size = firmware->size;
+ types_info->data = vmalloc(types_info->data_size);
+ if (!types_info->data) {
+ err = -ENOMEM;
+ release_firmware(firmware);
+ goto err_data_alloc;
+ }
+ memcpy(types_info->data, firmware->data, types_info->data_size);
+ release_firmware(firmware);
+
+ err = mlxsw_linecard_types_file_validate(linecards, types_info);
+ if (err) {
+ err = 0;
+ goto err_type_file_file_validate;
+ }
+
+ types_info->ini_files = kmalloc_array(types_info->count,
+ sizeof(struct mlxsw_linecard_ini_file *),
+ GFP_KERNEL);
+ if (!types_info->ini_files) {
+ err = -ENOMEM;
+ goto err_ini_files_alloc;
+ }
+
+ mlxsw_linecard_types_file_parse(types_info);
+
+ return 0;
+
+err_ini_files_alloc:
+err_type_file_file_validate:
+ vfree(types_info->data);
+err_data_alloc:
+ kfree(types_info);
+ return err;
+}
+
+static void mlxsw_linecard_types_fini(struct mlxsw_linecards *linecards)
+{
+ struct mlxsw_linecard_types_info *types_info = linecards->types_info;
+
+ if (!types_info)
+ return;
+ kfree(types_info->ini_files);
+ vfree(types_info->data);
+ kfree(types_info);
+}
+
+int mlxsw_linecards_init(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *bus_info)
+{
+ char mgpir_pl[MLXSW_REG_MGPIR_LEN];
+ struct mlxsw_linecards *linecards;
+ u8 slot_count;
+ int err;
+ int i;
+
+ mlxsw_reg_mgpir_pack(mgpir_pl, 0);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgpir), mgpir_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL,
+ NULL, &slot_count);
+ if (!slot_count)
+ return 0;
+
+ linecards = vzalloc(struct_size(linecards, linecards, slot_count));
+ if (!linecards)
+ return -ENOMEM;
+ linecards->count = slot_count;
+ linecards->mlxsw_core = mlxsw_core;
+ linecards->bus_info = bus_info;
+ INIT_LIST_HEAD(&linecards->event_ops_list);
+ mutex_init(&linecards->event_ops_list_lock);
+
+ err = mlxsw_linecard_types_init(mlxsw_core, linecards);
+ if (err)
+ goto err_types_init;
+
+ err = mlxsw_core_traps_register(mlxsw_core, mlxsw_linecard_listener,
+ ARRAY_SIZE(mlxsw_linecard_listener),
+ mlxsw_core);
+ if (err)
+ goto err_traps_register;
+
+ mlxsw_core_linecards_set(mlxsw_core, linecards);
+
+ for (i = 0; i < linecards->count; i++) {
+ err = mlxsw_linecard_init(mlxsw_core, linecards, i + 1);
+ if (err)
+ goto err_linecard_init;
+ }
+
+ return 0;
+
+err_linecard_init:
+ for (i--; i >= 0; i--)
+ mlxsw_linecard_fini(mlxsw_core, linecards, i + 1);
+ mlxsw_core_traps_unregister(mlxsw_core, mlxsw_linecard_listener,
+ ARRAY_SIZE(mlxsw_linecard_listener),
+ mlxsw_core);
+err_traps_register:
+ mlxsw_linecard_types_fini(linecards);
+err_types_init:
+ vfree(linecards);
+ return err;
+}
+
+void mlxsw_linecards_fini(struct mlxsw_core *mlxsw_core)
+{
+ struct mlxsw_linecards *linecards = mlxsw_core_linecards(mlxsw_core);
+ int i;
+
+ if (!linecards)
+ return;
+ for (i = 0; i < linecards->count; i++)
+ mlxsw_linecard_fini(mlxsw_core, linecards, i + 1);
+ mlxsw_core_traps_unregister(mlxsw_core, mlxsw_linecard_listener,
+ ARRAY_SIZE(mlxsw_linecard_listener),
+ mlxsw_core);
+ mlxsw_linecard_types_fini(linecards);
+ mutex_destroy(&linecards->event_ops_list_lock);
+ WARN_ON(!list_empty(&linecards->event_ops_list));
+ vfree(linecards);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index 05f54bd982c0..3548fe1df7c8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -21,7 +21,6 @@
#define MLXSW_THERMAL_ASIC_TEMP_HOT 105000 /* 105C */
#define MLXSW_THERMAL_HYSTERESIS_TEMP 5000 /* 5C */
#define MLXSW_THERMAL_MODULE_TEMP_SHIFT (MLXSW_THERMAL_HYSTERESIS_TEMP * 2)
-#define MLXSW_THERMAL_ZONE_MAX_NAME 16
#define MLXSW_THERMAL_TEMP_SCORE_MAX GENMASK(31, 0)
#define MLXSW_THERMAL_MAX_STATE 10
#define MLXSW_THERMAL_MIN_STATE 2
@@ -82,6 +81,16 @@ struct mlxsw_thermal_module {
struct thermal_zone_device *tzdev;
struct mlxsw_thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS];
int module; /* Module or gearbox number */
+ u8 slot_index;
+};
+
+struct mlxsw_thermal_area {
+ struct mlxsw_thermal_module *tz_module_arr;
+ u8 tz_module_num;
+ struct mlxsw_thermal_module *tz_gearbox_arr;
+ u8 tz_gearbox_num;
+ u8 slot_index;
+ bool active;
};
struct mlxsw_thermal {
@@ -92,12 +101,9 @@ struct mlxsw_thermal {
struct thermal_cooling_device *cdevs[MLXSW_MFCR_PWMS_MAX];
u8 cooling_levels[MLXSW_THERMAL_MAX_STATE + 1];
struct mlxsw_thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS];
- struct mlxsw_thermal_module *tz_module_arr;
- u8 tz_module_num;
- struct mlxsw_thermal_module *tz_gearbox_arr;
- u8 tz_gearbox_num;
unsigned int tz_highest_score;
struct thermal_zone_device *tz_highest_dev;
+ struct mlxsw_thermal_area line_cards[];
};
static inline u8 mlxsw_state_to_duty(int state)
@@ -123,8 +129,7 @@ static int mlxsw_get_cooling_device_idx(struct mlxsw_thermal *thermal,
/* Allow mlxsw thermal zone binding to an external cooling device */
for (i = 0; i < ARRAY_SIZE(mlxsw_thermal_external_allowed_cdev); i++) {
- if (strnstr(cdev->type, mlxsw_thermal_external_allowed_cdev[i],
- strlen(cdev->type)))
+ if (!strcmp(cdev->type, mlxsw_thermal_external_allowed_cdev[i]))
return 0;
}
@@ -150,13 +155,15 @@ mlxsw_thermal_module_trips_update(struct device *dev, struct mlxsw_core *core,
* EEPROM if we got valid thresholds from MTMP.
*/
if (!emerg_temp || !crit_temp) {
- err = mlxsw_env_module_temp_thresholds_get(core, tz->module,
+ err = mlxsw_env_module_temp_thresholds_get(core, tz->slot_index,
+ tz->module,
SFP_TEMP_HIGH_WARN,
&crit_temp);
if (err)
return err;
- err = mlxsw_env_module_temp_thresholds_get(core, tz->module,
+ err = mlxsw_env_module_temp_thresholds_get(core, tz->slot_index,
+ tz->module,
SFP_TEMP_HIGH_ALARM,
&emerg_temp);
if (err)
@@ -271,7 +278,7 @@ static int mlxsw_thermal_get_temp(struct thermal_zone_device *tzdev,
int temp;
int err;
- mlxsw_reg_mtmp_pack(mtmp_pl, 0, false, false);
+ mlxsw_reg_mtmp_pack(mtmp_pl, 0, 0, false, false);
err = mlxsw_reg_query(thermal->core, MLXSW_REG(mtmp), mtmp_pl);
if (err) {
@@ -423,15 +430,16 @@ static int mlxsw_thermal_module_unbind(struct thermal_zone_device *tzdev,
static void
mlxsw_thermal_module_temp_and_thresholds_get(struct mlxsw_core *core,
- u16 sensor_index, int *p_temp,
- int *p_crit_temp,
+ u8 slot_index, u16 sensor_index,
+ int *p_temp, int *p_crit_temp,
int *p_emerg_temp)
{
char mtmp_pl[MLXSW_REG_MTMP_LEN];
int err;
/* Read module temperature and thresholds. */
- mlxsw_reg_mtmp_pack(mtmp_pl, sensor_index, false, false);
+ mlxsw_reg_mtmp_pack(mtmp_pl, slot_index, sensor_index,
+ false, false);
err = mlxsw_reg_query(core, MLXSW_REG(mtmp), mtmp_pl);
if (err) {
/* Set temperature and thresholds to zero to avoid passing
@@ -462,6 +470,7 @@ static int mlxsw_thermal_module_temp_get(struct thermal_zone_device *tzdev,
/* Read module temperature and thresholds. */
mlxsw_thermal_module_temp_and_thresholds_get(thermal->core,
+ tz->slot_index,
sensor_index, &temp,
&crit_temp, &emerg_temp);
*p_temp = temp;
@@ -576,7 +585,7 @@ static int mlxsw_thermal_gearbox_temp_get(struct thermal_zone_device *tzdev,
int err;
index = MLXSW_REG_MTMP_GBOX_INDEX_MIN + tz->module;
- mlxsw_reg_mtmp_pack(mtmp_pl, index, false, false);
+ mlxsw_reg_mtmp_pack(mtmp_pl, tz->slot_index, index, false, false);
err = mlxsw_reg_query(thermal->core, MLXSW_REG(mtmp), mtmp_pl);
if (err)
@@ -672,11 +681,15 @@ static const struct thermal_cooling_device_ops mlxsw_cooling_ops = {
static int
mlxsw_thermal_module_tz_init(struct mlxsw_thermal_module *module_tz)
{
- char tz_name[MLXSW_THERMAL_ZONE_MAX_NAME];
+ char tz_name[THERMAL_NAME_LENGTH];
int err;
- snprintf(tz_name, sizeof(tz_name), "mlxsw-module%d",
- module_tz->module + 1);
+ if (module_tz->slot_index)
+ snprintf(tz_name, sizeof(tz_name), "mlxsw-lc%d-module%d",
+ module_tz->slot_index, module_tz->module + 1);
+ else
+ snprintf(tz_name, sizeof(tz_name), "mlxsw-module%d",
+ module_tz->module + 1);
module_tz->tzdev = thermal_zone_device_register(tz_name,
MLXSW_THERMAL_NUM_TRIPS,
MLXSW_THERMAL_TRIP_MASK,
@@ -704,25 +717,28 @@ static void mlxsw_thermal_module_tz_fini(struct thermal_zone_device *tzdev)
static int
mlxsw_thermal_module_init(struct device *dev, struct mlxsw_core *core,
- struct mlxsw_thermal *thermal, u8 module)
+ struct mlxsw_thermal *thermal,
+ struct mlxsw_thermal_area *area, u8 module)
{
struct mlxsw_thermal_module *module_tz;
int dummy_temp, crit_temp, emerg_temp;
u16 sensor_index;
sensor_index = MLXSW_REG_MTMP_MODULE_INDEX_MIN + module;
- module_tz = &thermal->tz_module_arr[module];
+ module_tz = &area->tz_module_arr[module];
/* Skip if parent is already set (case of port split). */
if (module_tz->parent)
return 0;
module_tz->module = module;
+ module_tz->slot_index = area->slot_index;
module_tz->parent = thermal;
memcpy(module_tz->trips, default_thermal_trips,
sizeof(thermal->trips));
/* Initialize all trip point. */
mlxsw_thermal_module_trips_reset(module_tz);
/* Read module temperature and thresholds. */
- mlxsw_thermal_module_temp_and_thresholds_get(core, sensor_index, &dummy_temp,
+ mlxsw_thermal_module_temp_and_thresholds_get(core, area->slot_index,
+ sensor_index, &dummy_temp,
&crit_temp, &emerg_temp);
/* Update trip point according to the module data. */
return mlxsw_thermal_module_trips_update(dev, core, module_tz,
@@ -740,34 +756,39 @@ static void mlxsw_thermal_module_fini(struct mlxsw_thermal_module *module_tz)
static int
mlxsw_thermal_modules_init(struct device *dev, struct mlxsw_core *core,
- struct mlxsw_thermal *thermal)
+ struct mlxsw_thermal *thermal,
+ struct mlxsw_thermal_area *area)
{
struct mlxsw_thermal_module *module_tz;
char mgpir_pl[MLXSW_REG_MGPIR_LEN];
int i, err;
- mlxsw_reg_mgpir_pack(mgpir_pl);
+ mlxsw_reg_mgpir_pack(mgpir_pl, area->slot_index);
err = mlxsw_reg_query(core, MLXSW_REG(mgpir), mgpir_pl);
if (err)
return err;
mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL,
- &thermal->tz_module_num);
+ &area->tz_module_num, NULL);
+
+ /* For modular system module counter could be zero. */
+ if (!area->tz_module_num)
+ return 0;
- thermal->tz_module_arr = kcalloc(thermal->tz_module_num,
- sizeof(*thermal->tz_module_arr),
- GFP_KERNEL);
- if (!thermal->tz_module_arr)
+ area->tz_module_arr = kcalloc(area->tz_module_num,
+ sizeof(*area->tz_module_arr),
+ GFP_KERNEL);
+ if (!area->tz_module_arr)
return -ENOMEM;
- for (i = 0; i < thermal->tz_module_num; i++) {
- err = mlxsw_thermal_module_init(dev, core, thermal, i);
+ for (i = 0; i < area->tz_module_num; i++) {
+ err = mlxsw_thermal_module_init(dev, core, thermal, area, i);
if (err)
goto err_thermal_module_init;
}
- for (i = 0; i < thermal->tz_module_num; i++) {
- module_tz = &thermal->tz_module_arr[i];
+ for (i = 0; i < area->tz_module_num; i++) {
+ module_tz = &area->tz_module_arr[i];
if (!module_tz->parent)
continue;
err = mlxsw_thermal_module_tz_init(module_tz);
@@ -779,30 +800,35 @@ mlxsw_thermal_modules_init(struct device *dev, struct mlxsw_core *core,
err_thermal_module_tz_init:
err_thermal_module_init:
- for (i = thermal->tz_module_num - 1; i >= 0; i--)
- mlxsw_thermal_module_fini(&thermal->tz_module_arr[i]);
- kfree(thermal->tz_module_arr);
+ for (i = area->tz_module_num - 1; i >= 0; i--)
+ mlxsw_thermal_module_fini(&area->tz_module_arr[i]);
+ kfree(area->tz_module_arr);
return err;
}
static void
-mlxsw_thermal_modules_fini(struct mlxsw_thermal *thermal)
+mlxsw_thermal_modules_fini(struct mlxsw_thermal *thermal,
+ struct mlxsw_thermal_area *area)
{
int i;
- for (i = thermal->tz_module_num - 1; i >= 0; i--)
- mlxsw_thermal_module_fini(&thermal->tz_module_arr[i]);
- kfree(thermal->tz_module_arr);
+ for (i = area->tz_module_num - 1; i >= 0; i--)
+ mlxsw_thermal_module_fini(&area->tz_module_arr[i]);
+ kfree(area->tz_module_arr);
}
static int
mlxsw_thermal_gearbox_tz_init(struct mlxsw_thermal_module *gearbox_tz)
{
- char tz_name[MLXSW_THERMAL_ZONE_MAX_NAME];
+ char tz_name[THERMAL_NAME_LENGTH];
int ret;
- snprintf(tz_name, sizeof(tz_name), "mlxsw-gearbox%d",
- gearbox_tz->module + 1);
+ if (gearbox_tz->slot_index)
+ snprintf(tz_name, sizeof(tz_name), "mlxsw-lc%d-gearbox%d",
+ gearbox_tz->slot_index, gearbox_tz->module + 1);
+ else
+ snprintf(tz_name, sizeof(tz_name), "mlxsw-gearbox%d",
+ gearbox_tz->module + 1);
gearbox_tz->tzdev = thermal_zone_device_register(tz_name,
MLXSW_THERMAL_NUM_TRIPS,
MLXSW_THERMAL_TRIP_MASK,
@@ -828,7 +854,8 @@ mlxsw_thermal_gearbox_tz_fini(struct mlxsw_thermal_module *gearbox_tz)
static int
mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core,
- struct mlxsw_thermal *thermal)
+ struct mlxsw_thermal *thermal,
+ struct mlxsw_thermal_area *area)
{
enum mlxsw_reg_mgpir_device_type device_type;
struct mlxsw_thermal_module *gearbox_tz;
@@ -837,30 +864,31 @@ mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core,
int i;
int err;
- mlxsw_reg_mgpir_pack(mgpir_pl);
+ mlxsw_reg_mgpir_pack(mgpir_pl, area->slot_index);
err = mlxsw_reg_query(core, MLXSW_REG(mgpir), mgpir_pl);
if (err)
return err;
mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, &device_type, NULL,
- NULL);
+ NULL, NULL);
if (device_type != MLXSW_REG_MGPIR_DEVICE_TYPE_GEARBOX_DIE ||
!gbox_num)
return 0;
- thermal->tz_gearbox_num = gbox_num;
- thermal->tz_gearbox_arr = kcalloc(thermal->tz_gearbox_num,
- sizeof(*thermal->tz_gearbox_arr),
- GFP_KERNEL);
- if (!thermal->tz_gearbox_arr)
+ area->tz_gearbox_num = gbox_num;
+ area->tz_gearbox_arr = kcalloc(area->tz_gearbox_num,
+ sizeof(*area->tz_gearbox_arr),
+ GFP_KERNEL);
+ if (!area->tz_gearbox_arr)
return -ENOMEM;
- for (i = 0; i < thermal->tz_gearbox_num; i++) {
- gearbox_tz = &thermal->tz_gearbox_arr[i];
+ for (i = 0; i < area->tz_gearbox_num; i++) {
+ gearbox_tz = &area->tz_gearbox_arr[i];
memcpy(gearbox_tz->trips, default_thermal_trips,
sizeof(thermal->trips));
gearbox_tz->module = i;
gearbox_tz->parent = thermal;
+ gearbox_tz->slot_index = area->slot_index;
err = mlxsw_thermal_gearbox_tz_init(gearbox_tz);
if (err)
goto err_thermal_gearbox_tz_init;
@@ -870,21 +898,80 @@ mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core,
err_thermal_gearbox_tz_init:
for (i--; i >= 0; i--)
- mlxsw_thermal_gearbox_tz_fini(&thermal->tz_gearbox_arr[i]);
- kfree(thermal->tz_gearbox_arr);
+ mlxsw_thermal_gearbox_tz_fini(&area->tz_gearbox_arr[i]);
+ kfree(area->tz_gearbox_arr);
return err;
}
static void
-mlxsw_thermal_gearboxes_fini(struct mlxsw_thermal *thermal)
+mlxsw_thermal_gearboxes_fini(struct mlxsw_thermal *thermal,
+ struct mlxsw_thermal_area *area)
{
int i;
- for (i = thermal->tz_gearbox_num - 1; i >= 0; i--)
- mlxsw_thermal_gearbox_tz_fini(&thermal->tz_gearbox_arr[i]);
- kfree(thermal->tz_gearbox_arr);
+ for (i = area->tz_gearbox_num - 1; i >= 0; i--)
+ mlxsw_thermal_gearbox_tz_fini(&area->tz_gearbox_arr[i]);
+ kfree(area->tz_gearbox_arr);
+}
+
+static void
+mlxsw_thermal_got_active(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ void *priv)
+{
+ struct mlxsw_thermal *thermal = priv;
+ struct mlxsw_thermal_area *linecard;
+ int err;
+
+ linecard = &thermal->line_cards[slot_index];
+
+ if (linecard->active)
+ return;
+
+ linecard->slot_index = slot_index;
+ err = mlxsw_thermal_modules_init(thermal->bus_info->dev, thermal->core,
+ thermal, linecard);
+ if (err) {
+ dev_err(thermal->bus_info->dev, "Failed to configure thermal objects for line card modules in slot %d\n",
+ slot_index);
+ return;
+ }
+
+ err = mlxsw_thermal_gearboxes_init(thermal->bus_info->dev,
+ thermal->core, thermal, linecard);
+ if (err) {
+ dev_err(thermal->bus_info->dev, "Failed to configure thermal objects for line card gearboxes in slot %d\n",
+ slot_index);
+ goto err_thermal_linecard_gearboxes_init;
+ }
+
+ linecard->active = true;
+
+ return;
+
+err_thermal_linecard_gearboxes_init:
+ mlxsw_thermal_modules_fini(thermal, linecard);
+}
+
+static void
+mlxsw_thermal_got_inactive(struct mlxsw_core *mlxsw_core, u8 slot_index,
+ void *priv)
+{
+ struct mlxsw_thermal *thermal = priv;
+ struct mlxsw_thermal_area *linecard;
+
+ linecard = &thermal->line_cards[slot_index];
+ if (!linecard->active)
+ return;
+ linecard->active = false;
+ mlxsw_thermal_gearboxes_fini(thermal, linecard);
+ mlxsw_thermal_modules_fini(thermal, linecard);
}
+static struct mlxsw_linecards_event_ops mlxsw_thermal_event_ops = {
+ .got_active = mlxsw_thermal_got_active,
+ .got_inactive = mlxsw_thermal_got_inactive,
+};
+
int mlxsw_thermal_init(struct mlxsw_core *core,
const struct mlxsw_bus_info *bus_info,
struct mlxsw_thermal **p_thermal)
@@ -892,19 +979,29 @@ int mlxsw_thermal_init(struct mlxsw_core *core,
char mfcr_pl[MLXSW_REG_MFCR_LEN] = { 0 };
enum mlxsw_reg_mfcr_pwm_frequency freq;
struct device *dev = bus_info->dev;
+ char mgpir_pl[MLXSW_REG_MGPIR_LEN];
struct mlxsw_thermal *thermal;
+ u8 pwm_active, num_of_slots;
u16 tacho_active;
- u8 pwm_active;
int err, i;
- thermal = devm_kzalloc(dev, sizeof(*thermal),
- GFP_KERNEL);
+ mlxsw_reg_mgpir_pack(mgpir_pl, 0);
+ err = mlxsw_reg_query(core, MLXSW_REG(mgpir), mgpir_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL, NULL,
+ &num_of_slots);
+
+ thermal = kzalloc(struct_size(thermal, line_cards, num_of_slots + 1),
+ GFP_KERNEL);
if (!thermal)
return -ENOMEM;
thermal->core = core;
thermal->bus_info = bus_info;
memcpy(thermal->trips, default_thermal_trips, sizeof(thermal->trips));
+ thermal->line_cards[0].slot_index = 0;
err = mlxsw_reg_query(thermal->core, MLXSW_REG(mfcr), mfcr_pl);
if (err) {
@@ -970,25 +1067,38 @@ int mlxsw_thermal_init(struct mlxsw_core *core,
goto err_thermal_zone_device_register;
}
- err = mlxsw_thermal_modules_init(dev, core, thermal);
+ err = mlxsw_thermal_modules_init(dev, core, thermal,
+ &thermal->line_cards[0]);
if (err)
goto err_thermal_modules_init;
- err = mlxsw_thermal_gearboxes_init(dev, core, thermal);
+ err = mlxsw_thermal_gearboxes_init(dev, core, thermal,
+ &thermal->line_cards[0]);
if (err)
goto err_thermal_gearboxes_init;
+ err = mlxsw_linecards_event_ops_register(core,
+ &mlxsw_thermal_event_ops,
+ thermal);
+ if (err)
+ goto err_linecards_event_ops_register;
+
err = thermal_zone_device_enable(thermal->tzdev);
if (err)
goto err_thermal_zone_device_enable;
+ thermal->line_cards[0].active = true;
*p_thermal = thermal;
return 0;
err_thermal_zone_device_enable:
- mlxsw_thermal_gearboxes_fini(thermal);
+ mlxsw_linecards_event_ops_unregister(thermal->core,
+ &mlxsw_thermal_event_ops,
+ thermal);
+err_linecards_event_ops_register:
+ mlxsw_thermal_gearboxes_fini(thermal, &thermal->line_cards[0]);
err_thermal_gearboxes_init:
- mlxsw_thermal_modules_fini(thermal);
+ mlxsw_thermal_modules_fini(thermal, &thermal->line_cards[0]);
err_thermal_modules_init:
if (thermal->tzdev) {
thermal_zone_device_unregister(thermal->tzdev);
@@ -1001,7 +1111,7 @@ err_thermal_cooling_device_register:
thermal_cooling_device_unregister(thermal->cdevs[i]);
err_reg_write:
err_reg_query:
- devm_kfree(dev, thermal);
+ kfree(thermal);
return err;
}
@@ -1009,8 +1119,12 @@ void mlxsw_thermal_fini(struct mlxsw_thermal *thermal)
{
int i;
- mlxsw_thermal_gearboxes_fini(thermal);
- mlxsw_thermal_modules_fini(thermal);
+ thermal->line_cards[0].active = false;
+ mlxsw_linecards_event_ops_unregister(thermal->core,
+ &mlxsw_thermal_event_ops,
+ thermal);
+ mlxsw_thermal_gearboxes_fini(thermal, &thermal->line_cards[0]);
+ mlxsw_thermal_modules_fini(thermal, &thermal->line_cards[0]);
if (thermal->tzdev) {
thermal_zone_device_unregister(thermal->tzdev);
thermal->tzdev = NULL;
@@ -1023,5 +1137,5 @@ void mlxsw_thermal_fini(struct mlxsw_thermal *thermal)
}
}
- devm_kfree(thermal->bus_info->dev, thermal);
+ kfree(thermal);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
index 3bc012dafd08..d9660d4cce96 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
@@ -59,7 +59,8 @@ static int mlxsw_m_port_open(struct net_device *dev)
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(dev);
struct mlxsw_m *mlxsw_m = mlxsw_m_port->mlxsw_m;
- return mlxsw_env_module_port_up(mlxsw_m->core, mlxsw_m_port->module);
+ return mlxsw_env_module_port_up(mlxsw_m->core, 0,
+ mlxsw_m_port->module);
}
static int mlxsw_m_port_stop(struct net_device *dev)
@@ -67,7 +68,7 @@ static int mlxsw_m_port_stop(struct net_device *dev)
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(dev);
struct mlxsw_m *mlxsw_m = mlxsw_m_port->mlxsw_m;
- mlxsw_env_module_port_down(mlxsw_m->core, mlxsw_m_port->module);
+ mlxsw_env_module_port_down(mlxsw_m->core, 0, mlxsw_m_port->module);
return 0;
}
@@ -110,7 +111,7 @@ static int mlxsw_m_get_module_info(struct net_device *netdev,
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
- return mlxsw_env_get_module_info(netdev, core, mlxsw_m_port->module,
+ return mlxsw_env_get_module_info(netdev, core, 0, mlxsw_m_port->module,
modinfo);
}
@@ -121,8 +122,8 @@ mlxsw_m_get_module_eeprom(struct net_device *netdev, struct ethtool_eeprom *ee,
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
- return mlxsw_env_get_module_eeprom(netdev, core, mlxsw_m_port->module,
- ee, data);
+ return mlxsw_env_get_module_eeprom(netdev, core, 0,
+ mlxsw_m_port->module, ee, data);
}
static int
@@ -133,7 +134,8 @@ mlxsw_m_get_module_eeprom_by_page(struct net_device *netdev,
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
- return mlxsw_env_get_module_eeprom_by_page(core, mlxsw_m_port->module,
+ return mlxsw_env_get_module_eeprom_by_page(core, 0,
+ mlxsw_m_port->module,
page, extack);
}
@@ -142,7 +144,7 @@ static int mlxsw_m_reset(struct net_device *netdev, u32 *flags)
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
- return mlxsw_env_reset_module(netdev, core, mlxsw_m_port->module,
+ return mlxsw_env_reset_module(netdev, core, 0, mlxsw_m_port->module,
flags);
}
@@ -154,7 +156,7 @@ mlxsw_m_get_module_power_mode(struct net_device *netdev,
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
- return mlxsw_env_get_module_power_mode(core, mlxsw_m_port->module,
+ return mlxsw_env_get_module_power_mode(core, 0, mlxsw_m_port->module,
params, extack);
}
@@ -166,7 +168,7 @@ mlxsw_m_set_module_power_mode(struct net_device *netdev,
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
- return mlxsw_env_set_module_power_mode(core, mlxsw_m_port->module,
+ return mlxsw_env_set_module_power_mode(core, 0, mlxsw_m_port->module,
params->policy, extack);
}
@@ -221,7 +223,7 @@ mlxsw_m_port_create(struct mlxsw_m *mlxsw_m, u16 local_port, u8 module)
struct net_device *dev;
int err;
- err = mlxsw_core_port_init(mlxsw_m->core, local_port,
+ err = mlxsw_core_port_init(mlxsw_m->core, local_port, 0,
module + 1, false, 0, false,
0, mlxsw_m->base_mac,
sizeof(mlxsw_m->base_mac));
@@ -311,7 +313,7 @@ static int mlxsw_m_port_module_map(struct mlxsw_m *mlxsw_m, u16 local_port,
if (WARN_ON_ONCE(module >= max_ports))
return -EINVAL;
- mlxsw_env_module_port_map(mlxsw_m->core, module);
+ mlxsw_env_module_port_map(mlxsw_m->core, 0, module);
mlxsw_m->module_to_port[module] = ++mlxsw_m->max_ports;
return 0;
@@ -320,12 +322,13 @@ static int mlxsw_m_port_module_map(struct mlxsw_m *mlxsw_m, u16 local_port,
static void mlxsw_m_port_module_unmap(struct mlxsw_m *mlxsw_m, u8 module)
{
mlxsw_m->module_to_port[module] = -1;
- mlxsw_env_module_port_unmap(mlxsw_m->core, module);
+ mlxsw_env_module_port_unmap(mlxsw_m->core, 0, module);
}
static int mlxsw_m_ports_create(struct mlxsw_m *mlxsw_m)
{
unsigned int max_ports = mlxsw_core_max_ports(mlxsw_m->core);
+ struct devlink *devlink = priv_to_devlink(mlxsw_m->core);
u8 last_module = max_ports;
int i;
int err;
@@ -354,6 +357,7 @@ static int mlxsw_m_ports_create(struct mlxsw_m *mlxsw_m)
}
/* Create port objects for each valid entry */
+ devl_lock(devlink);
for (i = 0; i < mlxsw_m->max_ports; i++) {
if (mlxsw_m->module_to_port[i] > 0 &&
!mlxsw_core_port_is_xm(mlxsw_m->core, i)) {
@@ -364,6 +368,7 @@ static int mlxsw_m_ports_create(struct mlxsw_m *mlxsw_m)
goto err_module_to_port_create;
}
}
+ devl_unlock(devlink);
return 0;
@@ -373,6 +378,7 @@ err_module_to_port_create:
mlxsw_m_port_remove(mlxsw_m,
mlxsw_m->module_to_port[i]);
}
+ devl_unlock(devlink);
i = max_ports;
err_module_to_port_map:
for (i--; i > 0; i--)
@@ -385,8 +391,10 @@ err_module_to_port_alloc:
static void mlxsw_m_ports_remove(struct mlxsw_m *mlxsw_m)
{
+ struct devlink *devlink = priv_to_devlink(mlxsw_m->core);
int i;
+ devl_lock(devlink);
for (i = 0; i < mlxsw_m->max_ports; i++) {
if (mlxsw_m->module_to_port[i] > 0) {
mlxsw_m_port_remove(mlxsw_m,
@@ -394,6 +402,7 @@ static void mlxsw_m_ports_remove(struct mlxsw_m *mlxsw_m)
mlxsw_m_port_module_unmap(mlxsw_m, i);
}
}
+ devl_unlock(devlink);
kfree(mlxsw_m->module_to_port);
kfree(mlxsw_m->ports);
@@ -422,7 +431,6 @@ static int mlxsw_m_init(struct mlxsw_core *mlxsw_core,
struct netlink_ext_ack *extack)
{
struct mlxsw_m *mlxsw_m = mlxsw_core_driver_priv(mlxsw_core);
- struct devlink *devlink = priv_to_devlink(mlxsw_core);
int err;
mlxsw_m->core = mlxsw_core;
@@ -438,9 +446,7 @@ static int mlxsw_m_init(struct mlxsw_core *mlxsw_core,
return err;
}
- devl_lock(devlink);
err = mlxsw_m_ports_create(mlxsw_m);
- devl_unlock(devlink);
if (err) {
dev_err(mlxsw_m->bus_info->dev, "Failed to create ports\n");
return err;
@@ -452,11 +458,8 @@ static int mlxsw_m_init(struct mlxsw_core *mlxsw_core,
static void mlxsw_m_fini(struct mlxsw_core *mlxsw_core)
{
struct mlxsw_m *mlxsw_m = mlxsw_core_driver_priv(mlxsw_core);
- struct devlink *devlink = priv_to_devlink(mlxsw_core);
- devl_lock(devlink);
mlxsw_m_ports_remove(mlxsw_m);
- devl_unlock(devlink);
}
static const struct mlxsw_config_profile mlxsw_m_config_profile;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 67b1a2f8397f..04c4d7a4bd83 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -4325,6 +4325,15 @@ MLXSW_ITEM32(reg, pmlp, width, 0x00, 0, 8);
*/
MLXSW_ITEM32_INDEXED(reg, pmlp, module, 0x04, 0, 8, 0x04, 0x00, false);
+/* reg_pmlp_slot_index
+ * Module number.
+ * Slot_index
+ * Slot_index = 0 represent the onboard (motherboard).
+ * In case of non-modular system only slot_index = 0 is available.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pmlp, slot_index, 0x04, 8, 4, 0x04, 0x00, false);
+
/* reg_pmlp_tx_lane
* Tx Lane. When rxtx field is cleared, this field is used for Rx as well.
* Access: RW
@@ -5769,9 +5778,10 @@ enum mlxsw_reg_pmaos_e {
*/
MLXSW_ITEM32(reg, pmaos, e, 0x04, 0, 2);
-static inline void mlxsw_reg_pmaos_pack(char *payload, u8 module)
+static inline void mlxsw_reg_pmaos_pack(char *payload, u8 slot_index, u8 module)
{
MLXSW_REG_ZERO(pmaos, payload);
+ mlxsw_reg_pmaos_slot_index_set(payload, slot_index);
mlxsw_reg_pmaos_module_set(payload, module);
}
@@ -5874,6 +5884,69 @@ static inline void mlxsw_reg_pmtdb_pack(char *payload, u8 slot_index, u8 module,
mlxsw_reg_pmtdb_num_ports_set(payload, num_ports);
}
+/* PMECR - Ports Mapping Event Configuration Register
+ * --------------------------------------------------
+ * The PMECR register is used to enable/disable event triggering
+ * in case of local port mapping change.
+ */
+#define MLXSW_REG_PMECR_ID 0x501B
+#define MLXSW_REG_PMECR_LEN 0x20
+
+MLXSW_REG_DEFINE(pmecr, MLXSW_REG_PMECR_ID, MLXSW_REG_PMECR_LEN);
+
+/* reg_pmecr_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32_LP(reg, pmecr, 0x00, 16, 0x00, 12);
+
+/* reg_pmecr_ee
+ * Event update enable. If this bit is set, event generation will be updated
+ * based on the e field. Only relevant on Set operations.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, pmecr, ee, 0x04, 30, 1);
+
+/* reg_pmecr_eswi
+ * Software ignore enable bit. If this bit is set, the value of swi is used.
+ * If this bit is clear, the value of swi is ignored.
+ * Only relevant on Set operations.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, pmecr, eswi, 0x04, 24, 1);
+
+/* reg_pmecr_swi
+ * Software ignore. If this bit is set, the device shouldn't generate events
+ * in case of PMLP SET operation but only upon self local port mapping change
+ * (if applicable according to e configuration). This is supplementary
+ * configuration on top of e value.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmecr, swi, 0x04, 8, 1);
+
+enum mlxsw_reg_pmecr_e {
+ MLXSW_REG_PMECR_E_DO_NOT_GENERATE_EVENT,
+ MLXSW_REG_PMECR_E_GENERATE_EVENT,
+ MLXSW_REG_PMECR_E_GENERATE_SINGLE_EVENT,
+};
+
+/* reg_pmecr_e
+ * Event generation on local port mapping change.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmecr, e, 0x04, 0, 2);
+
+static inline void mlxsw_reg_pmecr_pack(char *payload, u16 local_port,
+ enum mlxsw_reg_pmecr_e e)
+{
+ MLXSW_REG_ZERO(pmecr, payload);
+ mlxsw_reg_pmecr_local_port_set(payload, local_port);
+ mlxsw_reg_pmecr_e_set(payload, e);
+ mlxsw_reg_pmecr_ee_set(payload, true);
+ mlxsw_reg_pmecr_swi_set(payload, true);
+ mlxsw_reg_pmecr_eswi_set(payload, true);
+}
+
/* PMPE - Port Module Plug/Unplug Event Register
* ---------------------------------------------
* This register reports any operational status change of a module.
@@ -5984,6 +6057,12 @@ MLXSW_REG_DEFINE(pmmp, MLXSW_REG_PMMP_ID, MLXSW_REG_PMMP_LEN);
*/
MLXSW_ITEM32(reg, pmmp, module, 0x00, 16, 8);
+/* reg_pmmp_slot_index
+ * Slot index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmmp, slot_index, 0x00, 24, 4);
+
/* reg_pmmp_sticky
* When set, will keep eeprom_override values after plug-out event.
* Access: OP
@@ -6011,9 +6090,10 @@ enum {
*/
MLXSW_ITEM32(reg, pmmp, eeprom_override, 0x04, 0, 16);
-static inline void mlxsw_reg_pmmp_pack(char *payload, u8 module)
+static inline void mlxsw_reg_pmmp_pack(char *payload, u8 slot_index, u8 module)
{
MLXSW_REG_ZERO(pmmp, payload);
+ mlxsw_reg_pmmp_slot_index_set(payload, slot_index);
mlxsw_reg_pmmp_module_set(payload, module);
}
@@ -9721,6 +9801,12 @@ MLXSW_ITEM32(reg, mtcap, sensor_count, 0x00, 0, 7);
MLXSW_REG_DEFINE(mtmp, MLXSW_REG_MTMP_ID, MLXSW_REG_MTMP_LEN);
+/* reg_mtmp_slot_index
+ * Slot index (0: Main board).
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mtmp, slot_index, 0x00, 16, 4);
+
#define MLXSW_REG_MTMP_MODULE_INDEX_MIN 64
#define MLXSW_REG_MTMP_GBOX_INDEX_MIN 256
/* reg_mtmp_sensor_index
@@ -9810,11 +9896,12 @@ MLXSW_ITEM32(reg, mtmp, temperature_threshold_lo, 0x10, 0, 16);
*/
MLXSW_ITEM_BUF(reg, mtmp, sensor_name, 0x18, MLXSW_REG_MTMP_SENSOR_NAME_SIZE);
-static inline void mlxsw_reg_mtmp_pack(char *payload, u16 sensor_index,
- bool max_temp_enable,
+static inline void mlxsw_reg_mtmp_pack(char *payload, u8 slot_index,
+ u16 sensor_index, bool max_temp_enable,
bool max_temp_reset)
{
MLXSW_REG_ZERO(mtmp, payload);
+ mlxsw_reg_mtmp_slot_index_set(payload, slot_index);
mlxsw_reg_mtmp_sensor_index_set(payload, sensor_index);
mlxsw_reg_mtmp_mte_set(payload, max_temp_enable);
mlxsw_reg_mtmp_mtr_set(payload, max_temp_reset);
@@ -9880,6 +9967,12 @@ MLXSW_ITEM_BIT_ARRAY(reg, mtwe, sensor_warning, 0x0, 0x10, 1);
MLXSW_REG_DEFINE(mtbr, MLXSW_REG_MTBR_ID, MLXSW_REG_MTBR_LEN);
+/* reg_mtbr_slot_index
+ * Slot index (0: Main board).
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mtbr, slot_index, 0x00, 16, 4);
+
/* reg_mtbr_base_sensor_index
* Base sensors index to access (0 - ASIC sensor, 1-63 - ambient sensors,
* 64-127 are mapped to the SFP+/QSFP modules sequentially).
@@ -9912,10 +10005,11 @@ MLXSW_ITEM32_INDEXED(reg, mtbr, rec_max_temp, MLXSW_REG_MTBR_BASE_LEN, 16,
MLXSW_ITEM32_INDEXED(reg, mtbr, rec_temp, MLXSW_REG_MTBR_BASE_LEN, 0, 16,
MLXSW_REG_MTBR_REC_LEN, 0x00, false);
-static inline void mlxsw_reg_mtbr_pack(char *payload, u16 base_sensor_index,
- u8 num_rec)
+static inline void mlxsw_reg_mtbr_pack(char *payload, u8 slot_index,
+ u16 base_sensor_index, u8 num_rec)
{
MLXSW_REG_ZERO(mtbr, payload);
+ mlxsw_reg_mtbr_slot_index_set(payload, slot_index);
mlxsw_reg_mtbr_base_sensor_index_set(payload, base_sensor_index);
mlxsw_reg_mtbr_num_rec_set(payload, num_rec);
}
@@ -9964,6 +10058,12 @@ MLXSW_ITEM32(reg, mcia, l, 0x00, 31, 1);
*/
MLXSW_ITEM32(reg, mcia, module, 0x00, 16, 8);
+/* reg_mcia_slot_index
+ * Slot index (0: Main board)
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mcia, slot, 0x00, 12, 4);
+
enum {
MLXSW_REG_MCIA_STATUS_GOOD = 0,
/* No response from module's EEPROM. */
@@ -10063,11 +10163,13 @@ MLXSW_ITEM_BUF(reg, mcia, eeprom, 0x10, MLXSW_REG_MCIA_EEPROM_SIZE);
MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH) / \
MLXSW_REG_MCIA_EEPROM_UP_PAGE_LENGTH + 1)
-static inline void mlxsw_reg_mcia_pack(char *payload, u8 module, u8 lock,
- u8 page_number, u16 device_addr,
- u8 size, u8 i2c_device_addr)
+static inline void mlxsw_reg_mcia_pack(char *payload, u8 slot_index, u8 module,
+ u8 lock, u8 page_number,
+ u16 device_addr, u8 size,
+ u8 i2c_device_addr)
{
MLXSW_REG_ZERO(mcia, payload);
+ mlxsw_reg_mcia_slot_set(payload, slot_index);
mlxsw_reg_mcia_module_set(payload, module);
mlxsw_reg_mcia_l_set(payload, lock);
mlxsw_reg_mcia_page_number_set(payload, page_number);
@@ -10499,6 +10601,12 @@ MLXSW_REG_DEFINE(mcion, MLXSW_REG_MCION_ID, MLXSW_REG_MCION_LEN);
*/
MLXSW_ITEM32(reg, mcion, module, 0x00, 16, 8);
+/* reg_mcion_slot_index
+ * Slot index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mcion, slot_index, 0x00, 12, 4);
+
enum {
MLXSW_REG_MCION_MODULE_STATUS_BITS_PRESENT_MASK = BIT(0),
MLXSW_REG_MCION_MODULE_STATUS_BITS_LOW_POWER_MASK = BIT(8),
@@ -10510,9 +10618,10 @@ enum {
*/
MLXSW_ITEM32(reg, mcion, module_status_bits, 0x04, 0, 16);
-static inline void mlxsw_reg_mcion_pack(char *payload, u8 module)
+static inline void mlxsw_reg_mcion_pack(char *payload, u8 slot_index, u8 module)
{
MLXSW_REG_ZERO(mcion, payload);
+ mlxsw_reg_mcion_slot_index_set(payload, slot_index);
mlxsw_reg_mcion_module_set(payload, module);
}
@@ -11326,6 +11435,12 @@ enum mlxsw_reg_mgpir_device_type {
MLXSW_REG_MGPIR_DEVICE_TYPE_GEARBOX_DIE,
};
+/* mgpir_slot_index
+ * Slot index (0: Main board).
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mgpir, slot_index, 0x00, 28, 4);
+
/* mgpir_device_type
* Access: RO
*/
@@ -11343,21 +11458,35 @@ MLXSW_ITEM32(reg, mgpir, devices_per_flash, 0x00, 16, 8);
*/
MLXSW_ITEM32(reg, mgpir, num_of_devices, 0x00, 0, 8);
+/* max_modules_per_slot
+ * Maximum number of modules that can be connected per slot.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mgpir, max_modules_per_slot, 0x04, 16, 8);
+
+/* mgpir_num_of_slots
+ * Number of slots in the system.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mgpir, num_of_slots, 0x04, 8, 8);
+
/* mgpir_num_of_modules
* Number of modules.
* Access: RO
*/
MLXSW_ITEM32(reg, mgpir, num_of_modules, 0x04, 0, 8);
-static inline void mlxsw_reg_mgpir_pack(char *payload)
+static inline void mlxsw_reg_mgpir_pack(char *payload, u8 slot_index)
{
MLXSW_REG_ZERO(mgpir, payload);
+ mlxsw_reg_mgpir_slot_index_set(payload, slot_index);
}
static inline void
mlxsw_reg_mgpir_unpack(char *payload, u8 *num_of_devices,
enum mlxsw_reg_mgpir_device_type *device_type,
- u8 *devices_per_flash, u8 *num_of_modules)
+ u8 *devices_per_flash, u8 *num_of_modules,
+ u8 *num_of_slots)
{
if (num_of_devices)
*num_of_devices = mlxsw_reg_mgpir_num_of_devices_get(payload);
@@ -11368,6 +11497,393 @@ mlxsw_reg_mgpir_unpack(char *payload, u8 *num_of_devices,
mlxsw_reg_mgpir_devices_per_flash_get(payload);
if (num_of_modules)
*num_of_modules = mlxsw_reg_mgpir_num_of_modules_get(payload);
+ if (num_of_slots)
+ *num_of_slots = mlxsw_reg_mgpir_num_of_slots_get(payload);
+}
+
+/* MBCT - Management Binary Code Transfer Register
+ * -----------------------------------------------
+ * This register allows to transfer binary codes from the host to
+ * the management FW by transferring it by chunks of maximum 1KB.
+ */
+#define MLXSW_REG_MBCT_ID 0x9120
+#define MLXSW_REG_MBCT_LEN 0x420
+
+MLXSW_REG_DEFINE(mbct, MLXSW_REG_MBCT_ID, MLXSW_REG_MBCT_LEN);
+
+/* reg_mbct_slot_index
+ * Slot index. 0 is reserved.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mbct, slot_index, 0x00, 0, 4);
+
+/* reg_mbct_data_size
+ * Actual data field size in bytes for the current data transfer.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, mbct, data_size, 0x04, 0, 11);
+
+enum mlxsw_reg_mbct_op {
+ MLXSW_REG_MBCT_OP_ERASE_INI_IMAGE = 1,
+ MLXSW_REG_MBCT_OP_DATA_TRANSFER, /* Download */
+ MLXSW_REG_MBCT_OP_ACTIVATE,
+ MLXSW_REG_MBCT_OP_CLEAR_ERRORS = 6,
+ MLXSW_REG_MBCT_OP_QUERY_STATUS,
+};
+
+/* reg_mbct_op
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, mbct, op, 0x08, 28, 4);
+
+/* reg_mbct_last
+ * Indicates that the current data field is the last chunk of the INI.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, mbct, last, 0x08, 26, 1);
+
+/* reg_mbct_oee
+ * Opcode Event Enable. When set a BCTOE event will be sent once the opcode
+ * was executed and the fsm_state has changed.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, mbct, oee, 0x08, 25, 1);
+
+enum mlxsw_reg_mbct_status {
+ /* Partial data transfer completed successfully and ready for next
+ * data transfer.
+ */
+ MLXSW_REG_MBCT_STATUS_PART_DATA = 2,
+ MLXSW_REG_MBCT_STATUS_LAST_DATA,
+ MLXSW_REG_MBCT_STATUS_ERASE_COMPLETE,
+ /* Error - trying to erase INI while it being used. */
+ MLXSW_REG_MBCT_STATUS_ERROR_INI_IN_USE,
+ /* Last data transfer completed, applying magic pattern. */
+ MLXSW_REG_MBCT_STATUS_ERASE_FAILED = 7,
+ MLXSW_REG_MBCT_STATUS_INI_ERROR,
+ MLXSW_REG_MBCT_STATUS_ACTIVATION_FAILED,
+ MLXSW_REG_MBCT_STATUS_ILLEGAL_OPERATION = 11,
+};
+
+/* reg_mbct_status
+ * Status.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mbct, status, 0x0C, 24, 5);
+
+enum mlxsw_reg_mbct_fsm_state {
+ MLXSW_REG_MBCT_FSM_STATE_INI_IN_USE = 5,
+ MLXSW_REG_MBCT_FSM_STATE_ERROR,
+};
+
+/* reg_mbct_fsm_state
+ * FSM state.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mbct, fsm_state, 0x0C, 16, 4);
+
+#define MLXSW_REG_MBCT_DATA_LEN 1024
+
+/* reg_mbct_data
+ * Up to 1KB of data.
+ * Access: WO
+ */
+MLXSW_ITEM_BUF(reg, mbct, data, 0x20, MLXSW_REG_MBCT_DATA_LEN);
+
+static inline void mlxsw_reg_mbct_pack(char *payload, u8 slot_index,
+ enum mlxsw_reg_mbct_op op, bool oee)
+{
+ MLXSW_REG_ZERO(mbct, payload);
+ mlxsw_reg_mbct_slot_index_set(payload, slot_index);
+ mlxsw_reg_mbct_op_set(payload, op);
+ mlxsw_reg_mbct_oee_set(payload, oee);
+}
+
+static inline void mlxsw_reg_mbct_dt_pack(char *payload,
+ u16 data_size, bool last,
+ const char *data)
+{
+ if (WARN_ON(data_size > MLXSW_REG_MBCT_DATA_LEN))
+ return;
+ mlxsw_reg_mbct_data_size_set(payload, data_size);
+ mlxsw_reg_mbct_last_set(payload, last);
+ mlxsw_reg_mbct_data_memcpy_to(payload, data);
+}
+
+static inline void
+mlxsw_reg_mbct_unpack(const char *payload, u8 *p_slot_index,
+ enum mlxsw_reg_mbct_status *p_status,
+ enum mlxsw_reg_mbct_fsm_state *p_fsm_state)
+{
+ if (p_slot_index)
+ *p_slot_index = mlxsw_reg_mbct_slot_index_get(payload);
+ *p_status = mlxsw_reg_mbct_status_get(payload);
+ if (p_fsm_state)
+ *p_fsm_state = mlxsw_reg_mbct_fsm_state_get(payload);
+}
+
+/* MDDQ - Management DownStream Device Query Register
+ * --------------------------------------------------
+ * This register allows to query the DownStream device properties. The desired
+ * information is chosen upon the query_type field and is delivered by 32B
+ * of data blocks.
+ */
+#define MLXSW_REG_MDDQ_ID 0x9161
+#define MLXSW_REG_MDDQ_LEN 0x30
+
+MLXSW_REG_DEFINE(mddq, MLXSW_REG_MDDQ_ID, MLXSW_REG_MDDQ_LEN);
+
+/* reg_mddq_sie
+ * Slot info event enable.
+ * When set to '1', each change in the slot_info.provisioned / sr_valid /
+ * active / ready will generate a DSDSC event.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mddq, sie, 0x00, 31, 1);
+
+enum mlxsw_reg_mddq_query_type {
+ MLXSW_REG_MDDQ_QUERY_TYPE_SLOT_INFO = 1,
+ MLXSW_REG_MDDQ_QUERY_TYPE_DEVICE_INFO, /* If there are no devices
+ * on the slot, data_valid
+ * will be '0'.
+ */
+ MLXSW_REG_MDDQ_QUERY_TYPE_SLOT_NAME,
+};
+
+/* reg_mddq_query_type
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mddq, query_type, 0x00, 16, 8);
+
+/* reg_mddq_slot_index
+ * Slot index. 0 is reserved.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mddq, slot_index, 0x00, 0, 4);
+
+/* reg_mddq_response_msg_seq
+ * Response message sequential number. For a specific request, the response
+ * message sequential number is the following one. In addition, the last
+ * message should be 0.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, response_msg_seq, 0x04, 16, 8);
+
+/* reg_mddq_request_msg_seq
+ * Request message sequential number.
+ * The first message number should be 0.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mddq, request_msg_seq, 0x04, 0, 8);
+
+/* reg_mddq_data_valid
+ * If set, the data in the data field is valid and contain the information
+ * for the queried index.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, data_valid, 0x08, 31, 1);
+
+/* reg_mddq_slot_info_provisioned
+ * If set, the INI file is applied and the card is provisioned.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, slot_info_provisioned, 0x10, 31, 1);
+
+/* reg_mddq_slot_info_sr_valid
+ * If set, Shift Register is valid (after being provisioned) and data
+ * can be sent from the switch ASIC to the line-card CPLD over Shift-Register.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, slot_info_sr_valid, 0x10, 30, 1);
+
+enum mlxsw_reg_mddq_slot_info_ready {
+ MLXSW_REG_MDDQ_SLOT_INFO_READY_NOT_READY,
+ MLXSW_REG_MDDQ_SLOT_INFO_READY_READY,
+ MLXSW_REG_MDDQ_SLOT_INFO_READY_ERROR,
+};
+
+/* reg_mddq_slot_info_lc_ready
+ * If set, the LC is powered on, matching the INI version and a new FW
+ * version can be burnt (if necessary).
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, slot_info_lc_ready, 0x10, 28, 2);
+
+/* reg_mddq_slot_info_active
+ * If set, the FW has completed the MDDC.device_enable command.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, slot_info_active, 0x10, 27, 1);
+
+/* reg_mddq_slot_info_hw_revision
+ * Major user-configured version number of the current INI file.
+ * Valid only when active or ready are '1'.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, slot_info_hw_revision, 0x14, 16, 16);
+
+/* reg_mddq_slot_info_ini_file_version
+ * User-configured version number of the current INI file.
+ * Valid only when active or lc_ready are '1'.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, slot_info_ini_file_version, 0x14, 0, 16);
+
+/* reg_mddq_slot_info_card_type
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, slot_info_card_type, 0x18, 0, 8);
+
+static inline void
+__mlxsw_reg_mddq_pack(char *payload, u8 slot_index,
+ enum mlxsw_reg_mddq_query_type query_type)
+{
+ MLXSW_REG_ZERO(mddq, payload);
+ mlxsw_reg_mddq_slot_index_set(payload, slot_index);
+ mlxsw_reg_mddq_query_type_set(payload, query_type);
+}
+
+static inline void
+mlxsw_reg_mddq_slot_info_pack(char *payload, u8 slot_index, bool sie)
+{
+ __mlxsw_reg_mddq_pack(payload, slot_index,
+ MLXSW_REG_MDDQ_QUERY_TYPE_SLOT_INFO);
+ mlxsw_reg_mddq_sie_set(payload, sie);
+}
+
+static inline void
+mlxsw_reg_mddq_slot_info_unpack(const char *payload, u8 *p_slot_index,
+ bool *p_provisioned, bool *p_sr_valid,
+ enum mlxsw_reg_mddq_slot_info_ready *p_lc_ready,
+ bool *p_active, u16 *p_hw_revision,
+ u16 *p_ini_file_version,
+ u8 *p_card_type)
+{
+ *p_slot_index = mlxsw_reg_mddq_slot_index_get(payload);
+ *p_provisioned = mlxsw_reg_mddq_slot_info_provisioned_get(payload);
+ *p_sr_valid = mlxsw_reg_mddq_slot_info_sr_valid_get(payload);
+ *p_lc_ready = mlxsw_reg_mddq_slot_info_lc_ready_get(payload);
+ *p_active = mlxsw_reg_mddq_slot_info_active_get(payload);
+ *p_hw_revision = mlxsw_reg_mddq_slot_info_hw_revision_get(payload);
+ *p_ini_file_version = mlxsw_reg_mddq_slot_info_ini_file_version_get(payload);
+ *p_card_type = mlxsw_reg_mddq_slot_info_card_type_get(payload);
+}
+
+/* reg_mddq_device_info_flash_owner
+ * If set, the device is the flash owner. Otherwise, a shared flash
+ * is used by this device (another device is the flash owner).
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, device_info_flash_owner, 0x10, 30, 1);
+
+/* reg_mddq_device_info_device_index
+ * Device index. The first device should number 0.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, device_info_device_index, 0x10, 0, 8);
+
+/* reg_mddq_device_info_fw_major
+ * Major FW version number.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, device_info_fw_major, 0x14, 16, 16);
+
+/* reg_mddq_device_info_fw_minor
+ * Minor FW version number.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, device_info_fw_minor, 0x18, 16, 16);
+
+/* reg_mddq_device_info_fw_sub_minor
+ * Sub-minor FW version number.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, device_info_fw_sub_minor, 0x18, 0, 16);
+
+static inline void
+mlxsw_reg_mddq_device_info_pack(char *payload, u8 slot_index,
+ u8 request_msg_seq)
+{
+ __mlxsw_reg_mddq_pack(payload, slot_index,
+ MLXSW_REG_MDDQ_QUERY_TYPE_DEVICE_INFO);
+ mlxsw_reg_mddq_request_msg_seq_set(payload, request_msg_seq);
+}
+
+static inline void
+mlxsw_reg_mddq_device_info_unpack(const char *payload, u8 *p_response_msg_seq,
+ bool *p_data_valid, bool *p_flash_owner,
+ u8 *p_device_index, u16 *p_fw_major,
+ u16 *p_fw_minor, u16 *p_fw_sub_minor)
+{
+ *p_response_msg_seq = mlxsw_reg_mddq_response_msg_seq_get(payload);
+ *p_data_valid = mlxsw_reg_mddq_data_valid_get(payload);
+ if (p_flash_owner)
+ *p_flash_owner = mlxsw_reg_mddq_device_info_flash_owner_get(payload);
+ *p_device_index = mlxsw_reg_mddq_device_info_device_index_get(payload);
+ if (p_fw_major)
+ *p_fw_major = mlxsw_reg_mddq_device_info_fw_major_get(payload);
+ if (p_fw_minor)
+ *p_fw_minor = mlxsw_reg_mddq_device_info_fw_minor_get(payload);
+ if (p_fw_sub_minor)
+ *p_fw_sub_minor = mlxsw_reg_mddq_device_info_fw_sub_minor_get(payload);
+}
+
+#define MLXSW_REG_MDDQ_SLOT_ASCII_NAME_LEN 20
+
+/* reg_mddq_slot_ascii_name
+ * Slot's ASCII name.
+ * Access: RO
+ */
+MLXSW_ITEM_BUF(reg, mddq, slot_ascii_name, 0x10,
+ MLXSW_REG_MDDQ_SLOT_ASCII_NAME_LEN);
+
+static inline void
+mlxsw_reg_mddq_slot_name_pack(char *payload, u8 slot_index)
+{
+ __mlxsw_reg_mddq_pack(payload, slot_index,
+ MLXSW_REG_MDDQ_QUERY_TYPE_SLOT_NAME);
+}
+
+static inline void
+mlxsw_reg_mddq_slot_name_unpack(const char *payload, char *slot_ascii_name)
+{
+ mlxsw_reg_mddq_slot_ascii_name_memcpy_from(payload, slot_ascii_name);
+}
+
+/* MDDC - Management DownStream Device Control Register
+ * ----------------------------------------------------
+ * This register allows to control downstream devices and line cards.
+ */
+#define MLXSW_REG_MDDC_ID 0x9163
+#define MLXSW_REG_MDDC_LEN 0x30
+
+MLXSW_REG_DEFINE(mddc, MLXSW_REG_MDDC_ID, MLXSW_REG_MDDC_LEN);
+
+/* reg_mddc_slot_index
+ * Slot index. 0 is reserved.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mddc, slot_index, 0x00, 0, 4);
+
+/* reg_mddc_rst
+ * Reset request.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, mddc, rst, 0x04, 29, 1);
+
+/* reg_mddc_device_enable
+ * When set, FW is the manager and allowed to program the downstream device.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mddc, device_enable, 0x04, 28, 1);
+
+static inline void mlxsw_reg_mddc_pack(char *payload, u8 slot_index, bool rst,
+ bool device_enable)
+{
+ MLXSW_REG_ZERO(mddc, payload);
+ mlxsw_reg_mddc_slot_index_set(payload, slot_index);
+ mlxsw_reg_mddc_rst_set(payload, rst);
+ mlxsw_reg_mddc_device_enable_set(payload, device_enable);
}
/* MFDE - Monitoring FW Debug Register
@@ -12619,6 +13135,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(pmaos),
MLXSW_REG(pplr),
MLXSW_REG(pmtdb),
+ MLXSW_REG(pmecr),
MLXSW_REG(pmpe),
MLXSW_REG(pddr),
MLXSW_REG(pmmp),
@@ -12688,6 +13205,9 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(mtptpt),
MLXSW_REG(mfgd),
MLXSW_REG(mgpir),
+ MLXSW_REG(mbct),
+ MLXSW_REG(mddq),
+ MLXSW_REG(mddc),
MLXSW_REG(mfde),
MLXSW_REG(tngcr),
MLXSW_REG(tnumt),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 8eb05090ffec..ac6348e2ff1f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -89,6 +89,11 @@ static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = {
"." __stringify(MLXSW_SP_FWREV_MINOR) \
"." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2"
+#define MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME \
+ "mellanox/lc_ini_bundle_" \
+ __stringify(MLXSW_SP_FWREV_MINOR) "_" \
+ __stringify(MLXSW_SP_FWREV_SUBMINOR) ".bin"
+
static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum";
static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2";
static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3";
@@ -481,23 +486,22 @@ mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
}
static int
-mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port,
- struct mlxsw_sp_port_mapping *port_mapping)
+mlxsw_sp_port_module_info_parse(struct mlxsw_sp *mlxsw_sp,
+ u16 local_port, char *pmlp_pl,
+ struct mlxsw_sp_port_mapping *port_mapping)
{
- char pmlp_pl[MLXSW_REG_PMLP_LEN];
bool separate_rxtx;
+ u8 first_lane;
+ u8 slot_index;
u8 module;
u8 width;
- int err;
int i;
- mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
- if (err)
- return err;
module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
+ slot_index = mlxsw_reg_pmlp_slot_index_get(pmlp_pl, 0);
width = mlxsw_reg_pmlp_width_get(pmlp_pl);
separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl);
+ first_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
if (width && !is_power_of_2(width)) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n",
@@ -511,6 +515,11 @@ mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port,
local_port);
return -EINVAL;
}
+ if (mlxsw_reg_pmlp_slot_index_get(pmlp_pl, i) != slot_index) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple slot indexes\n",
+ local_port);
+ return -EINVAL;
+ }
if (separate_rxtx &&
mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) !=
mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) {
@@ -518,7 +527,7 @@ mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port,
local_port);
return -EINVAL;
}
- if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i) {
+ if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i + first_lane) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n",
local_port);
return -EINVAL;
@@ -526,6 +535,7 @@ mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port,
}
port_mapping->module = module;
+ port_mapping->slot_index = slot_index;
port_mapping->width = width;
port_mapping->module_width = width;
port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
@@ -533,17 +543,35 @@ mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port,
}
static int
+mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port,
+ struct mlxsw_sp_port_mapping *port_mapping)
+{
+ char pmlp_pl[MLXSW_REG_PMLP_LEN];
+ int err;
+
+ mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
+ if (err)
+ return err;
+ return mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port,
+ pmlp_pl, port_mapping);
+}
+
+static int
mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u16 local_port,
const struct mlxsw_sp_port_mapping *port_mapping)
{
char pmlp_pl[MLXSW_REG_PMLP_LEN];
int i, err;
- mlxsw_env_module_port_map(mlxsw_sp->core, port_mapping->module);
+ mlxsw_env_module_port_map(mlxsw_sp->core, port_mapping->slot_index,
+ port_mapping->module);
mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width);
for (i = 0; i < port_mapping->width; i++) {
+ mlxsw_reg_pmlp_slot_index_set(pmlp_pl, i,
+ port_mapping->slot_index);
mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module);
mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */
}
@@ -554,19 +582,20 @@ mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u16 local_port,
return 0;
err_pmlp_write:
- mlxsw_env_module_port_unmap(mlxsw_sp->core, port_mapping->module);
+ mlxsw_env_module_port_unmap(mlxsw_sp->core, port_mapping->slot_index,
+ port_mapping->module);
return err;
}
static void mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u16 local_port,
- u8 module)
+ u8 slot_index, u8 module)
{
char pmlp_pl[MLXSW_REG_PMLP_LEN];
mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
- mlxsw_env_module_port_unmap(mlxsw_sp->core, module);
+ mlxsw_env_module_port_unmap(mlxsw_sp->core, slot_index, module);
}
static int mlxsw_sp_port_open(struct net_device *dev)
@@ -576,6 +605,7 @@ static int mlxsw_sp_port_open(struct net_device *dev)
int err;
err = mlxsw_env_module_port_up(mlxsw_sp->core,
+ mlxsw_sp_port->mapping.slot_index,
mlxsw_sp_port->mapping.module);
if (err)
return err;
@@ -587,6 +617,7 @@ static int mlxsw_sp_port_open(struct net_device *dev)
err_port_admin_status_set:
mlxsw_env_module_port_down(mlxsw_sp->core,
+ mlxsw_sp_port->mapping.slot_index,
mlxsw_sp_port->mapping.module);
return err;
}
@@ -599,6 +630,7 @@ static int mlxsw_sp_port_stop(struct net_device *dev)
netif_stop_queue(dev);
mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
mlxsw_env_module_port_down(mlxsw_sp->core,
+ mlxsw_sp_port->mapping.slot_index,
mlxsw_sp_port->mapping.module);
return 0;
}
@@ -1445,12 +1477,13 @@ static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
static int mlxsw_sp_port_overheat_init_val_set(struct mlxsw_sp_port *mlxsw_sp_port)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 slot_index = mlxsw_sp_port->mapping.slot_index;
u8 module = mlxsw_sp_port->mapping.module;
u64 overheat_counter;
int err;
- err = mlxsw_env_module_overheat_counter_get(mlxsw_sp->core, module,
- &overheat_counter);
+ err = mlxsw_env_module_overheat_counter_get(mlxsw_sp->core, slot_index,
+ module, &overheat_counter);
if (err)
return err;
@@ -1525,7 +1558,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port,
}
splittable = lanes > 1 && !split;
- err = mlxsw_core_port_init(mlxsw_sp->core, local_port,
+ err = mlxsw_core_port_init(mlxsw_sp->core, local_port, slot_index,
port_number, split, split_port_subnumber,
splittable, lanes, mlxsw_sp->base_mac,
sizeof(mlxsw_sp->base_mac));
@@ -1775,13 +1808,16 @@ err_port_label_info_get:
mlxsw_sp_port_swid_set(mlxsw_sp, local_port,
MLXSW_PORT_SWID_DISABLED_PORT);
err_port_swid_set:
- mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, port_mapping->module);
+ mlxsw_sp_port_module_unmap(mlxsw_sp, local_port,
+ port_mapping->slot_index,
+ port_mapping->module);
return err;
}
static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u16 local_port)
{
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ u8 slot_index = mlxsw_sp_port->mapping.slot_index;
u8 module = mlxsw_sp_port->mapping.module;
cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
@@ -1804,7 +1840,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u16 local_port)
mlxsw_core_port_fini(mlxsw_sp->core, local_port);
mlxsw_sp_port_swid_set(mlxsw_sp, local_port,
MLXSW_PORT_SWID_DISABLED_PORT);
- mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, module);
+ mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, slot_index, module);
}
static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp)
@@ -1858,21 +1894,148 @@ static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u16 local_port)
return mlxsw_sp->ports[local_port] != NULL;
}
+static int mlxsw_sp_port_mapping_event_set(struct mlxsw_sp *mlxsw_sp,
+ u16 local_port, bool enable)
+{
+ char pmecr_pl[MLXSW_REG_PMECR_LEN];
+
+ mlxsw_reg_pmecr_pack(pmecr_pl, local_port,
+ enable ? MLXSW_REG_PMECR_E_GENERATE_EVENT :
+ MLXSW_REG_PMECR_E_DO_NOT_GENERATE_EVENT);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmecr), pmecr_pl);
+}
+
+struct mlxsw_sp_port_mapping_event {
+ struct list_head list;
+ char pmlp_pl[MLXSW_REG_PMLP_LEN];
+};
+
+static void mlxsw_sp_port_mapping_events_work(struct work_struct *work)
+{
+ struct mlxsw_sp_port_mapping_event *event, *next_event;
+ struct mlxsw_sp_port_mapping_events *events;
+ struct mlxsw_sp_port_mapping port_mapping;
+ struct mlxsw_sp *mlxsw_sp;
+ struct devlink *devlink;
+ LIST_HEAD(event_queue);
+ u16 local_port;
+ int err;
+
+ events = container_of(work, struct mlxsw_sp_port_mapping_events, work);
+ mlxsw_sp = container_of(events, struct mlxsw_sp, port_mapping_events);
+ devlink = priv_to_devlink(mlxsw_sp->core);
+
+ spin_lock_bh(&events->queue_lock);
+ list_splice_init(&events->queue, &event_queue);
+ spin_unlock_bh(&events->queue_lock);
+
+ list_for_each_entry_safe(event, next_event, &event_queue, list) {
+ local_port = mlxsw_reg_pmlp_local_port_get(event->pmlp_pl);
+ err = mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port,
+ event->pmlp_pl, &port_mapping);
+ if (err)
+ goto out;
+
+ if (WARN_ON_ONCE(!port_mapping.width))
+ goto out;
+
+ devl_lock(devlink);
+
+ if (!mlxsw_sp_port_created(mlxsw_sp, local_port))
+ mlxsw_sp_port_create(mlxsw_sp, local_port,
+ false, &port_mapping);
+ else
+ WARN_ON_ONCE(1);
+
+ devl_unlock(devlink);
+
+ mlxsw_sp->port_mapping[local_port] = port_mapping;
+
+out:
+ kfree(event);
+ }
+}
+
+static void
+mlxsw_sp_port_mapping_listener_func(const struct mlxsw_reg_info *reg,
+ char *pmlp_pl, void *priv)
+{
+ struct mlxsw_sp_port_mapping_events *events;
+ struct mlxsw_sp_port_mapping_event *event;
+ struct mlxsw_sp *mlxsw_sp = priv;
+ u16 local_port;
+
+ local_port = mlxsw_reg_pmlp_local_port_get(pmlp_pl);
+ if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
+ return;
+
+ events = &mlxsw_sp->port_mapping_events;
+ event = kmalloc(sizeof(*event), GFP_ATOMIC);
+ if (!event)
+ return;
+ memcpy(event->pmlp_pl, pmlp_pl, sizeof(event->pmlp_pl));
+ spin_lock(&events->queue_lock);
+ list_add_tail(&event->list, &events->queue);
+ spin_unlock(&events->queue_lock);
+ mlxsw_core_schedule_work(&events->work);
+}
+
+static void
+__mlxsw_sp_port_mapping_events_cancel(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_port_mapping_event *event, *next_event;
+ struct mlxsw_sp_port_mapping_events *events;
+
+ events = &mlxsw_sp->port_mapping_events;
+
+ /* Caller needs to make sure that no new event is going to appear. */
+ cancel_work_sync(&events->work);
+ list_for_each_entry_safe(event, next_event, &events->queue, list) {
+ list_del(&event->list);
+ kfree(event);
+ }
+}
+
static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
{
+ unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
int i;
- for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
+ for (i = 1; i < max_ports; i++)
+ mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false);
+ /* Make sure all scheduled events are processed */
+ __mlxsw_sp_port_mapping_events_cancel(mlxsw_sp);
+
+ devl_lock(devlink);
+ for (i = 1; i < max_ports; i++)
if (mlxsw_sp_port_created(mlxsw_sp, i))
mlxsw_sp_port_remove(mlxsw_sp, i);
mlxsw_sp_cpu_port_remove(mlxsw_sp);
+ devl_unlock(devlink);
kfree(mlxsw_sp->ports);
mlxsw_sp->ports = NULL;
}
+static void
+mlxsw_sp_ports_remove_selected(struct mlxsw_core *mlxsw_core,
+ bool (*selector)(void *priv, u16 local_port),
+ void *priv)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ unsigned int max_ports = mlxsw_core_max_ports(mlxsw_core);
+ int i;
+
+ for (i = 1; i < max_ports; i++)
+ if (mlxsw_sp_port_created(mlxsw_sp, i) && selector(priv, i))
+ mlxsw_sp_port_remove(mlxsw_sp, i);
+}
+
static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
{
unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ struct mlxsw_sp_port_mapping_events *events;
struct mlxsw_sp_port_mapping *port_mapping;
size_t alloc_size;
int i;
@@ -1883,26 +2046,46 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
if (!mlxsw_sp->ports)
return -ENOMEM;
+ events = &mlxsw_sp->port_mapping_events;
+ INIT_LIST_HEAD(&events->queue);
+ spin_lock_init(&events->queue_lock);
+ INIT_WORK(&events->work, mlxsw_sp_port_mapping_events_work);
+
+ for (i = 1; i < max_ports; i++) {
+ err = mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, true);
+ if (err)
+ goto err_event_enable;
+ }
+
+ devl_lock(devlink);
err = mlxsw_sp_cpu_port_create(mlxsw_sp);
if (err)
goto err_cpu_port_create;
for (i = 1; i < max_ports; i++) {
- port_mapping = mlxsw_sp->port_mapping[i];
- if (!port_mapping)
+ port_mapping = &mlxsw_sp->port_mapping[i];
+ if (!port_mapping->width)
continue;
err = mlxsw_sp_port_create(mlxsw_sp, i, false, port_mapping);
if (err)
goto err_port_create;
}
+ devl_unlock(devlink);
return 0;
err_port_create:
for (i--; i >= 1; i--)
if (mlxsw_sp_port_created(mlxsw_sp, i))
mlxsw_sp_port_remove(mlxsw_sp, i);
+ i = max_ports;
mlxsw_sp_cpu_port_remove(mlxsw_sp);
err_cpu_port_create:
+ devl_unlock(devlink);
+err_event_enable:
+ for (i--; i >= 1; i--)
+ mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false);
+ /* Make sure all scheduled events are processed */
+ __mlxsw_sp_port_mapping_events_cancel(mlxsw_sp);
kfree(mlxsw_sp->ports);
mlxsw_sp->ports = NULL;
return err;
@@ -1911,12 +2094,12 @@ err_cpu_port_create:
static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp)
{
unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
- struct mlxsw_sp_port_mapping port_mapping;
+ struct mlxsw_sp_port_mapping *port_mapping;
int i;
int err;
mlxsw_sp->port_mapping = kcalloc(max_ports,
- sizeof(struct mlxsw_sp_port_mapping *),
+ sizeof(struct mlxsw_sp_port_mapping),
GFP_KERNEL);
if (!mlxsw_sp->port_mapping)
return -ENOMEM;
@@ -1925,36 +2108,20 @@ static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp)
if (mlxsw_core_port_is_xm(mlxsw_sp->core, i))
continue;
- err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &port_mapping);
+ port_mapping = &mlxsw_sp->port_mapping[i];
+ err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, port_mapping);
if (err)
goto err_port_module_info_get;
- if (!port_mapping.width)
- continue;
-
- mlxsw_sp->port_mapping[i] = kmemdup(&port_mapping,
- sizeof(port_mapping),
- GFP_KERNEL);
- if (!mlxsw_sp->port_mapping[i]) {
- err = -ENOMEM;
- goto err_port_module_info_dup;
- }
}
return 0;
err_port_module_info_get:
-err_port_module_info_dup:
- for (i--; i >= 1; i--)
- kfree(mlxsw_sp->port_mapping[i]);
kfree(mlxsw_sp->port_mapping);
return err;
}
static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp)
{
- int i;
-
- for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
- kfree(mlxsw_sp->port_mapping[i]);
kfree(mlxsw_sp->port_mapping);
}
@@ -2004,8 +2171,8 @@ static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
for (i = 0; i < count; i++) {
u16 local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
- port_mapping = mlxsw_sp->port_mapping[local_port];
- if (!port_mapping || !mlxsw_sp_local_port_valid(local_port))
+ port_mapping = &mlxsw_sp->port_mapping[local_port];
+ if (!port_mapping->width || !mlxsw_sp_local_port_valid(local_port))
continue;
mlxsw_sp_port_create(mlxsw_sp, local_port,
false, port_mapping);
@@ -2045,7 +2212,8 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u16 local_port,
return -EINVAL;
}
- mlxsw_reg_pmtdb_pack(pmtdb_pl, 0, mlxsw_sp_port->mapping.module,
+ mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index,
+ mlxsw_sp_port->mapping.module,
mlxsw_sp_port->mapping.module_width / count,
count);
err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl);
@@ -2080,6 +2248,7 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u16 local_port,
err_port_split_create:
mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl);
+
return err;
}
@@ -2109,7 +2278,8 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u16 local_port,
count = mlxsw_sp_port->mapping.module_width /
mlxsw_sp_port->mapping.width;
- mlxsw_reg_pmtdb_pack(pmtdb_pl, 0, mlxsw_sp_port->mapping.module,
+ mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index,
+ mlxsw_sp_port->mapping.module,
mlxsw_sp_port->mapping.module_width / count,
count);
err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl);
@@ -2300,6 +2470,11 @@ static const struct mlxsw_listener mlxsw_sp1_listener[] = {
MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0),
};
+static const struct mlxsw_listener mlxsw_sp2_listener[] = {
+ /* Events */
+ MLXSW_SP_EVENTL(mlxsw_sp_port_mapping_listener_func, PMLPE),
+};
+
static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
@@ -2818,7 +2993,6 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
- struct devlink *devlink = priv_to_devlink(mlxsw_core);
int err;
mlxsw_sp->core = mlxsw_core;
@@ -2979,9 +3153,7 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_sample_trigger_init;
}
- devl_lock(devlink);
err = mlxsw_sp_ports_create(mlxsw_sp);
- devl_unlock(devlink);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
goto err_ports_create;
@@ -3094,6 +3266,8 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
+ mlxsw_sp->listeners = mlxsw_sp2_listener;
+ mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
@@ -3124,6 +3298,8 @@ static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
+ mlxsw_sp->listeners = mlxsw_sp2_listener;
+ mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
@@ -3154,6 +3330,8 @@ static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
+ mlxsw_sp->listeners = mlxsw_sp2_listener;
+ mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP4;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
@@ -3162,12 +3340,8 @@ static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core,
static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
- struct devlink *devlink = priv_to_devlink(mlxsw_core);
- devl_lock(devlink);
mlxsw_sp_ports_remove(mlxsw_sp);
- devl_unlock(devlink);
-
rhashtable_destroy(&mlxsw_sp->sample_trigger_ht);
mlxsw_sp_port_module_info_fini(mlxsw_sp);
mlxsw_sp_dpipe_fini(mlxsw_sp);
@@ -3645,6 +3819,7 @@ static struct mlxsw_driver mlxsw_sp2_driver = {
.fini = mlxsw_sp_fini,
.port_split = mlxsw_sp_port_split,
.port_unsplit = mlxsw_sp_port_unsplit,
+ .ports_remove_selected = mlxsw_sp_ports_remove_selected,
.sb_pool_get = mlxsw_sp_sb_pool_get,
.sb_pool_set = mlxsw_sp_sb_pool_set,
.sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
@@ -3682,6 +3857,7 @@ static struct mlxsw_driver mlxsw_sp3_driver = {
.fini = mlxsw_sp_fini,
.port_split = mlxsw_sp_port_split,
.port_unsplit = mlxsw_sp_port_unsplit,
+ .ports_remove_selected = mlxsw_sp_ports_remove_selected,
.sb_pool_get = mlxsw_sp_sb_pool_get,
.sb_pool_set = mlxsw_sp_sb_pool_set,
.sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
@@ -3717,6 +3893,7 @@ static struct mlxsw_driver mlxsw_sp4_driver = {
.fini = mlxsw_sp_fini,
.port_split = mlxsw_sp_port_split,
.port_unsplit = mlxsw_sp_port_unsplit,
+ .ports_remove_selected = mlxsw_sp_ports_remove_selected,
.sb_pool_get = mlxsw_sp_sb_pool_get,
.sb_pool_set = mlxsw_sp_sb_pool_set,
.sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
@@ -5024,3 +5201,4 @@ MODULE_DEVICE_TABLE(pci, mlxsw_sp4_pci_id_table);
MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME);
MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME);
MODULE_FIRMWARE(MLXSW_SP3_FW_FILENAME);
+MODULE_FIRMWARE(MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 20588e699588..2ad29ae1c640 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -145,11 +145,18 @@ struct mlxsw_sp_mall_entry;
struct mlxsw_sp_port_mapping {
u8 module;
+ u8 slot_index;
u8 width; /* Number of lanes used by the port */
u8 module_width; /* Number of lanes in the module (static) */
u8 lane;
};
+struct mlxsw_sp_port_mapping_events {
+ struct list_head queue;
+ spinlock_t queue_lock; /* protects queue */
+ struct work_struct work;
+};
+
struct mlxsw_sp_parsing {
refcount_t parsing_depth_ref;
u16 parsing_depth;
@@ -164,7 +171,8 @@ struct mlxsw_sp {
unsigned char base_mac[ETH_ALEN];
const unsigned char *mac_mask;
struct mlxsw_sp_upper *lags;
- struct mlxsw_sp_port_mapping **port_mapping;
+ struct mlxsw_sp_port_mapping *port_mapping;
+ struct mlxsw_sp_port_mapping_events port_mapping_events;
struct rhashtable sample_trigger_ht;
struct mlxsw_sp_sb *sb;
struct mlxsw_sp_bridge *bridge;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
index 8b5d7f83b9b0..915dffb85a1c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
@@ -568,14 +568,14 @@ struct mlxsw_sp_port_stats {
static u64
mlxsw_sp_port_get_transceiver_overheat_stats(struct mlxsw_sp_port *mlxsw_sp_port)
{
- struct mlxsw_sp_port_mapping port_mapping = mlxsw_sp_port->mapping;
struct mlxsw_core *mlxsw_core = mlxsw_sp_port->mlxsw_sp->core;
+ u8 slot_index = mlxsw_sp_port->mapping.slot_index;
+ u8 module = mlxsw_sp_port->mapping.module;
u64 stats;
int err;
- err = mlxsw_env_module_overheat_counter_get(mlxsw_core,
- port_mapping.module,
- &stats);
+ err = mlxsw_env_module_overheat_counter_get(mlxsw_core, slot_index,
+ module, &stats);
if (err)
return mlxsw_sp_port->module_overheat_initial_val;
@@ -1036,6 +1036,7 @@ static int mlxsw_sp_get_module_info(struct net_device *netdev,
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
return mlxsw_env_get_module_info(netdev, mlxsw_sp->core,
+ mlxsw_sp_port->mapping.slot_index,
mlxsw_sp_port->mapping.module,
modinfo);
}
@@ -1045,10 +1046,11 @@ static int mlxsw_sp_get_module_eeprom(struct net_device *netdev,
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 slot_index = mlxsw_sp_port->mapping.slot_index;
+ u8 module = mlxsw_sp_port->mapping.module;
- return mlxsw_env_get_module_eeprom(netdev, mlxsw_sp->core,
- mlxsw_sp_port->mapping.module, ee,
- data);
+ return mlxsw_env_get_module_eeprom(netdev, mlxsw_sp->core, slot_index,
+ module, ee, data);
}
static int
@@ -1058,10 +1060,11 @@ mlxsw_sp_get_module_eeprom_by_page(struct net_device *dev,
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 slot_index = mlxsw_sp_port->mapping.slot_index;
u8 module = mlxsw_sp_port->mapping.module;
- return mlxsw_env_get_module_eeprom_by_page(mlxsw_sp->core, module, page,
- extack);
+ return mlxsw_env_get_module_eeprom_by_page(mlxsw_sp->core, slot_index,
+ module, page, extack);
}
static int
@@ -1202,9 +1205,11 @@ static int mlxsw_sp_reset(struct net_device *dev, u32 *flags)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 slot_index = mlxsw_sp_port->mapping.slot_index;
u8 module = mlxsw_sp_port->mapping.module;
- return mlxsw_env_reset_module(dev, mlxsw_sp->core, module, flags);
+ return mlxsw_env_reset_module(dev, mlxsw_sp->core, slot_index,
+ module, flags);
}
static int
@@ -1214,10 +1219,11 @@ mlxsw_sp_get_module_power_mode(struct net_device *dev,
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 slot_index = mlxsw_sp_port->mapping.slot_index;
u8 module = mlxsw_sp_port->mapping.module;
- return mlxsw_env_get_module_power_mode(mlxsw_sp->core, module, params,
- extack);
+ return mlxsw_env_get_module_power_mode(mlxsw_sp->core, slot_index,
+ module, params, extack);
}
static int
@@ -1227,10 +1233,11 @@ mlxsw_sp_set_module_power_mode(struct net_device *dev,
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 slot_index = mlxsw_sp_port->mapping.slot_index;
u8 module = mlxsw_sp_port->mapping.module;
- return mlxsw_env_set_module_power_mode(mlxsw_sp->core, module,
- params->policy, extack);
+ return mlxsw_env_set_module_power_mode(mlxsw_sp->core, slot_index,
+ module, params->policy, extack);
}
const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 79deb19e3a19..dc820d9f2696 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -21,6 +21,7 @@
#include <net/netevent.h>
#include <net/neighbour.h>
#include <net/arp.h>
+#include <net/inet_dscp.h>
#include <net/ip_fib.h>
#include <net/ip6_fib.h>
#include <net/nexthop.h>
@@ -507,7 +508,7 @@ struct mlxsw_sp_fib4_entry {
struct mlxsw_sp_fib_entry common;
struct fib_info *fi;
u32 tb_id;
- u8 tos;
+ dscp_t dscp;
u8 type;
};
@@ -5559,7 +5560,7 @@ mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
common);
- return !fib4_entry->tos;
+ return !fib4_entry->dscp;
}
static bool
@@ -5620,7 +5621,7 @@ mlxsw_sp_fib4_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
fri.tb_id = fen_info->tb_id;
fri.dst = cpu_to_be32(*p_dst);
fri.dst_len = fen_info->dst_len;
- fri.tos = fen_info->tos;
+ fri.dscp = fen_info->dscp;
fri.type = fen_info->type;
fri.offload = false;
fri.trap = false;
@@ -5645,7 +5646,7 @@ mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
fri.tb_id = fib4_entry->tb_id;
fri.dst = cpu_to_be32(*p_dst);
fri.dst_len = dst_len;
- fri.tos = fib4_entry->tos;
+ fri.dscp = fib4_entry->dscp;
fri.type = fib4_entry->type;
fri.offload = should_offload;
fri.trap = !should_offload;
@@ -5668,7 +5669,7 @@ mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
fri.tb_id = fib4_entry->tb_id;
fri.dst = cpu_to_be32(*p_dst);
fri.dst_len = dst_len;
- fri.tos = fib4_entry->tos;
+ fri.dscp = fib4_entry->dscp;
fri.type = fib4_entry->type;
fri.offload = false;
fri.trap = false;
@@ -6250,7 +6251,7 @@ mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
fib_info_hold(fib4_entry->fi);
fib4_entry->tb_id = fen_info->tb_id;
fib4_entry->type = fen_info->type;
- fib4_entry->tos = fen_info->tos;
+ fib4_entry->dscp = fen_info->dscp;
fib_entry->fib_node = fib_node;
@@ -6304,7 +6305,7 @@ mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
fib4_entry = container_of(fib_node->fib_entry,
struct mlxsw_sp_fib4_entry, common);
if (fib4_entry->tb_id == fen_info->tb_id &&
- fib4_entry->tos == fen_info->tos &&
+ fib4_entry->dscp == fen_info->dscp &&
fib4_entry->type == fen_info->type &&
fib4_entry->fi == fen_info->fi)
return fib4_entry;
@@ -7010,7 +7011,7 @@ mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
if (IS_ERR(mlxsw_sp_rt6)) {
err = PTR_ERR(mlxsw_sp_rt6);
- goto err_rt6_create;
+ goto err_rt6_unwind;
}
list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
@@ -7019,14 +7020,12 @@ mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, op_ctx, fib6_entry);
if (err)
- goto err_nexthop6_group_update;
+ goto err_rt6_unwind;
return 0;
-err_nexthop6_group_update:
- i = nrt6;
-err_rt6_create:
- for (i--; i >= 0; i--) {
+err_rt6_unwind:
+ for (; i > 0; i--) {
fib6_entry->nrt6--;
mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
struct mlxsw_sp_rt6, list);
@@ -7154,7 +7153,7 @@ mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
if (IS_ERR(mlxsw_sp_rt6)) {
err = PTR_ERR(mlxsw_sp_rt6);
- goto err_rt6_create;
+ goto err_rt6_unwind;
}
list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
fib6_entry->nrt6++;
@@ -7162,7 +7161,7 @@ mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
if (err)
- goto err_nexthop6_group_get;
+ goto err_rt6_unwind;
err = mlxsw_sp_nexthop_group_vr_link(fib_entry->nh_group,
fib_node->fib);
@@ -7181,10 +7180,8 @@ err_fib6_entry_type_set:
mlxsw_sp_nexthop_group_vr_unlink(fib_entry->nh_group, fib_node->fib);
err_nexthop_group_vr_link:
mlxsw_sp_nexthop6_group_put(mlxsw_sp, fib_entry);
-err_nexthop6_group_get:
- i = nrt6;
-err_rt6_create:
- for (i--; i >= 0; i--) {
+err_rt6_unwind:
+ for (; i > 0; i--) {
fib6_entry->nrt6--;
mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
struct mlxsw_sp_rt6, list);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index 9e070ab3ed76..d888498aed33 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -133,6 +133,12 @@ enum mlxsw_event_trap_id {
MLXSW_TRAP_ID_PTP_ING_FIFO = 0x2D,
/* PTP Egress FIFO has a new entry */
MLXSW_TRAP_ID_PTP_EGR_FIFO = 0x2E,
+ /* Downstream Device Status Change */
+ MLXSW_TRAP_ID_DSDSC = 0x321,
+ /* Binary Code Transfer Operation Executed Event */
+ MLXSW_TRAP_ID_BCTOE = 0x322,
+ /* Port mapping change */
+ MLXSW_TRAP_ID_PMLPE = 0x32E,
};
#endif /* _MLXSW_TRAP_H */
diff --git a/drivers/net/ethernet/microchip/lan966x/Makefile b/drivers/net/ethernet/microchip/lan966x/Makefile
index a9ffc719aa0e..fd2e0ebb2427 100644
--- a/drivers/net/ethernet/microchip/lan966x/Makefile
+++ b/drivers/net/ethernet/microchip/lan966x/Makefile
@@ -8,4 +8,4 @@ obj-$(CONFIG_LAN966X_SWITCH) += lan966x-switch.o
lan966x-switch-objs := lan966x_main.o lan966x_phylink.o lan966x_port.o \
lan966x_mac.o lan966x_ethtool.o lan966x_switchdev.o \
lan966x_vlan.o lan966x_fdb.o lan966x_mdb.o \
- lan966x_ptp.o
+ lan966x_ptp.o lan966x_fdma.o
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
new file mode 100644
index 000000000000..9e2a7323eaf0
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
@@ -0,0 +1,842 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+static int lan966x_fdma_channel_active(struct lan966x *lan966x)
+{
+ return lan_rd(lan966x, FDMA_CH_ACTIVE);
+}
+
+static struct page *lan966x_fdma_rx_alloc_page(struct lan966x_rx *rx,
+ struct lan966x_db *db)
+{
+ struct lan966x *lan966x = rx->lan966x;
+ dma_addr_t dma_addr;
+ struct page *page;
+
+ page = dev_alloc_pages(rx->page_order);
+ if (unlikely(!page))
+ return NULL;
+
+ dma_addr = dma_map_page(lan966x->dev, page, 0,
+ PAGE_SIZE << rx->page_order,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(lan966x->dev, dma_addr)))
+ goto free_page;
+
+ db->dataptr = dma_addr;
+
+ return page;
+
+free_page:
+ __free_pages(page, rx->page_order);
+ return NULL;
+}
+
+static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx)
+{
+ struct lan966x *lan966x = rx->lan966x;
+ struct lan966x_rx_dcb *dcb;
+ struct lan966x_db *db;
+ int i, j;
+
+ for (i = 0; i < FDMA_DCB_MAX; ++i) {
+ dcb = &rx->dcbs[i];
+
+ for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j) {
+ db = &dcb->db[j];
+ dma_unmap_single(lan966x->dev,
+ (dma_addr_t)db->dataptr,
+ PAGE_SIZE << rx->page_order,
+ DMA_FROM_DEVICE);
+ __free_pages(rx->page[i][j], rx->page_order);
+ }
+ }
+}
+
+static void lan966x_fdma_rx_add_dcb(struct lan966x_rx *rx,
+ struct lan966x_rx_dcb *dcb,
+ u64 nextptr)
+{
+ struct lan966x_db *db;
+ int i;
+
+ for (i = 0; i < FDMA_RX_DCB_MAX_DBS; ++i) {
+ db = &dcb->db[i];
+ db->status = FDMA_DCB_STATUS_INTR;
+ }
+
+ dcb->nextptr = FDMA_DCB_INVALID_DATA;
+ dcb->info = FDMA_DCB_INFO_DATAL(PAGE_SIZE << rx->page_order);
+
+ rx->last_entry->nextptr = nextptr;
+ rx->last_entry = dcb;
+}
+
+static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx)
+{
+ struct lan966x *lan966x = rx->lan966x;
+ struct lan966x_rx_dcb *dcb;
+ struct lan966x_db *db;
+ struct page *page;
+ int i, j;
+ int size;
+
+ /* calculate how many pages are needed to allocate the dcbs */
+ size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX;
+ size = ALIGN(size, PAGE_SIZE);
+
+ rx->dcbs = dma_alloc_coherent(lan966x->dev, size, &rx->dma, GFP_KERNEL);
+ if (!rx->dcbs)
+ return -ENOMEM;
+
+ rx->last_entry = rx->dcbs;
+ rx->db_index = 0;
+ rx->dcb_index = 0;
+
+ /* Now for each dcb allocate the dbs */
+ for (i = 0; i < FDMA_DCB_MAX; ++i) {
+ dcb = &rx->dcbs[i];
+ dcb->info = 0;
+
+ /* For each db allocate a page and map it to the DB dataptr. */
+ for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j) {
+ db = &dcb->db[j];
+ page = lan966x_fdma_rx_alloc_page(rx, db);
+ if (!page)
+ return -ENOMEM;
+
+ db->status = 0;
+ rx->page[i][j] = page;
+ }
+
+ lan966x_fdma_rx_add_dcb(rx, dcb, rx->dma + sizeof(*dcb) * i);
+ }
+
+ return 0;
+}
+
+static void lan966x_fdma_rx_free(struct lan966x_rx *rx)
+{
+ struct lan966x *lan966x = rx->lan966x;
+ u32 size;
+
+ /* Now it is possible to do the cleanup of dcb */
+ size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
+ size = ALIGN(size, PAGE_SIZE);
+ dma_free_coherent(lan966x->dev, size, rx->dcbs, rx->dma);
+}
+
+static void lan966x_fdma_rx_start(struct lan966x_rx *rx)
+{
+ struct lan966x *lan966x = rx->lan966x;
+ u32 mask;
+
+ /* When activating a channel, first is required to write the first DCB
+ * address and then to activate it
+ */
+ lan_wr(lower_32_bits((u64)rx->dma), lan966x,
+ FDMA_DCB_LLP(rx->channel_id));
+ lan_wr(upper_32_bits((u64)rx->dma), lan966x,
+ FDMA_DCB_LLP1(rx->channel_id));
+
+ lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_RX_DCB_MAX_DBS) |
+ FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
+ FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
+ FDMA_CH_CFG_CH_MEM_SET(1),
+ lan966x, FDMA_CH_CFG(rx->channel_id));
+
+ /* Start fdma */
+ lan_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(0),
+ FDMA_PORT_CTRL_XTR_STOP,
+ lan966x, FDMA_PORT_CTRL(0));
+
+ /* Enable interrupts */
+ mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
+ mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
+ mask |= BIT(rx->channel_id);
+ lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
+ FDMA_INTR_DB_ENA_INTR_DB_ENA,
+ lan966x, FDMA_INTR_DB_ENA);
+
+ /* Activate the channel */
+ lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(rx->channel_id)),
+ FDMA_CH_ACTIVATE_CH_ACTIVATE,
+ lan966x, FDMA_CH_ACTIVATE);
+}
+
+static void lan966x_fdma_rx_disable(struct lan966x_rx *rx)
+{
+ struct lan966x *lan966x = rx->lan966x;
+ u32 val;
+
+ /* Disable the channel */
+ lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(rx->channel_id)),
+ FDMA_CH_DISABLE_CH_DISABLE,
+ lan966x, FDMA_CH_DISABLE);
+
+ readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
+ val, !(val & BIT(rx->channel_id)),
+ READL_SLEEP_US, READL_TIMEOUT_US);
+
+ lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(rx->channel_id)),
+ FDMA_CH_DB_DISCARD_DB_DISCARD,
+ lan966x, FDMA_CH_DB_DISCARD);
+}
+
+static void lan966x_fdma_rx_reload(struct lan966x_rx *rx)
+{
+ struct lan966x *lan966x = rx->lan966x;
+
+ lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(rx->channel_id)),
+ FDMA_CH_RELOAD_CH_RELOAD,
+ lan966x, FDMA_CH_RELOAD);
+}
+
+static void lan966x_fdma_tx_add_dcb(struct lan966x_tx *tx,
+ struct lan966x_tx_dcb *dcb)
+{
+ dcb->nextptr = FDMA_DCB_INVALID_DATA;
+ dcb->info = 0;
+}
+
+static int lan966x_fdma_tx_alloc(struct lan966x_tx *tx)
+{
+ struct lan966x *lan966x = tx->lan966x;
+ struct lan966x_tx_dcb *dcb;
+ struct lan966x_db *db;
+ int size;
+ int i, j;
+
+ tx->dcbs_buf = kcalloc(FDMA_DCB_MAX, sizeof(struct lan966x_tx_dcb_buf),
+ GFP_KERNEL);
+ if (!tx->dcbs_buf)
+ return -ENOMEM;
+
+ /* calculate how many pages are needed to allocate the dcbs */
+ size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
+ size = ALIGN(size, PAGE_SIZE);
+ tx->dcbs = dma_alloc_coherent(lan966x->dev, size, &tx->dma, GFP_KERNEL);
+ if (!tx->dcbs)
+ goto out;
+
+ /* Now for each dcb allocate the db */
+ for (i = 0; i < FDMA_DCB_MAX; ++i) {
+ dcb = &tx->dcbs[i];
+
+ for (j = 0; j < FDMA_TX_DCB_MAX_DBS; ++j) {
+ db = &dcb->db[j];
+ db->dataptr = 0;
+ db->status = 0;
+ }
+
+ lan966x_fdma_tx_add_dcb(tx, dcb);
+ }
+
+ return 0;
+
+out:
+ kfree(tx->dcbs_buf);
+ return -ENOMEM;
+}
+
+static void lan966x_fdma_tx_free(struct lan966x_tx *tx)
+{
+ struct lan966x *lan966x = tx->lan966x;
+ int size;
+
+ kfree(tx->dcbs_buf);
+
+ size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
+ size = ALIGN(size, PAGE_SIZE);
+ dma_free_coherent(lan966x->dev, size, tx->dcbs, tx->dma);
+}
+
+static void lan966x_fdma_tx_activate(struct lan966x_tx *tx)
+{
+ struct lan966x *lan966x = tx->lan966x;
+ u32 mask;
+
+ /* When activating a channel, first is required to write the first DCB
+ * address and then to activate it
+ */
+ lan_wr(lower_32_bits((u64)tx->dma), lan966x,
+ FDMA_DCB_LLP(tx->channel_id));
+ lan_wr(upper_32_bits((u64)tx->dma), lan966x,
+ FDMA_DCB_LLP1(tx->channel_id));
+
+ lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_TX_DCB_MAX_DBS) |
+ FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
+ FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
+ FDMA_CH_CFG_CH_MEM_SET(1),
+ lan966x, FDMA_CH_CFG(tx->channel_id));
+
+ /* Start fdma */
+ lan_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0),
+ FDMA_PORT_CTRL_INJ_STOP,
+ lan966x, FDMA_PORT_CTRL(0));
+
+ /* Enable interrupts */
+ mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
+ mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
+ mask |= BIT(tx->channel_id);
+ lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
+ FDMA_INTR_DB_ENA_INTR_DB_ENA,
+ lan966x, FDMA_INTR_DB_ENA);
+
+ /* Activate the channel */
+ lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(tx->channel_id)),
+ FDMA_CH_ACTIVATE_CH_ACTIVATE,
+ lan966x, FDMA_CH_ACTIVATE);
+}
+
+static void lan966x_fdma_tx_disable(struct lan966x_tx *tx)
+{
+ struct lan966x *lan966x = tx->lan966x;
+ u32 val;
+
+ /* Disable the channel */
+ lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(tx->channel_id)),
+ FDMA_CH_DISABLE_CH_DISABLE,
+ lan966x, FDMA_CH_DISABLE);
+
+ readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
+ val, !(val & BIT(tx->channel_id)),
+ READL_SLEEP_US, READL_TIMEOUT_US);
+
+ lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(tx->channel_id)),
+ FDMA_CH_DB_DISCARD_DB_DISCARD,
+ lan966x, FDMA_CH_DB_DISCARD);
+
+ tx->activated = false;
+}
+
+static void lan966x_fdma_tx_reload(struct lan966x_tx *tx)
+{
+ struct lan966x *lan966x = tx->lan966x;
+
+ /* Write the registers to reload the channel */
+ lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(tx->channel_id)),
+ FDMA_CH_RELOAD_CH_RELOAD,
+ lan966x, FDMA_CH_RELOAD);
+}
+
+static void lan966x_fdma_wakeup_netdev(struct lan966x *lan966x)
+{
+ struct lan966x_port *port;
+ int i;
+
+ for (i = 0; i < lan966x->num_phys_ports; ++i) {
+ port = lan966x->ports[i];
+ if (!port)
+ continue;
+
+ if (netif_queue_stopped(port->dev))
+ netif_wake_queue(port->dev);
+ }
+}
+
+static void lan966x_fdma_stop_netdev(struct lan966x *lan966x)
+{
+ struct lan966x_port *port;
+ int i;
+
+ for (i = 0; i < lan966x->num_phys_ports; ++i) {
+ port = lan966x->ports[i];
+ if (!port)
+ continue;
+
+ netif_stop_queue(port->dev);
+ }
+}
+
+static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight)
+{
+ struct lan966x_tx *tx = &lan966x->tx;
+ struct lan966x_tx_dcb_buf *dcb_buf;
+ struct lan966x_db *db;
+ unsigned long flags;
+ bool clear = false;
+ int i;
+
+ spin_lock_irqsave(&lan966x->tx_lock, flags);
+ for (i = 0; i < FDMA_DCB_MAX; ++i) {
+ dcb_buf = &tx->dcbs_buf[i];
+
+ if (!dcb_buf->used)
+ continue;
+
+ db = &tx->dcbs[i].db[0];
+ if (!(db->status & FDMA_DCB_STATUS_DONE))
+ continue;
+
+ dcb_buf->dev->stats.tx_packets++;
+ dcb_buf->dev->stats.tx_bytes += dcb_buf->skb->len;
+
+ dcb_buf->used = false;
+ dma_unmap_single(lan966x->dev,
+ dcb_buf->dma_addr,
+ dcb_buf->skb->len,
+ DMA_TO_DEVICE);
+ if (!dcb_buf->ptp)
+ dev_kfree_skb_any(dcb_buf->skb);
+
+ clear = true;
+ }
+
+ if (clear)
+ lan966x_fdma_wakeup_netdev(lan966x);
+
+ spin_unlock_irqrestore(&lan966x->tx_lock, flags);
+}
+
+static bool lan966x_fdma_rx_more_frames(struct lan966x_rx *rx)
+{
+ struct lan966x_db *db;
+
+ /* Check if there is any data */
+ db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
+ if (unlikely(!(db->status & FDMA_DCB_STATUS_DONE)))
+ return false;
+
+ return true;
+}
+
+static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx)
+{
+ struct lan966x *lan966x = rx->lan966x;
+ u64 src_port, timestamp;
+ struct lan966x_db *db;
+ struct sk_buff *skb;
+ struct page *page;
+
+ /* Get the received frame and unmap it */
+ db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
+ page = rx->page[rx->dcb_index][rx->db_index];
+ skb = build_skb(page_address(page), PAGE_SIZE << rx->page_order);
+ if (unlikely(!skb))
+ goto unmap_page;
+
+ dma_unmap_single(lan966x->dev, (dma_addr_t)db->dataptr,
+ FDMA_DCB_STATUS_BLOCKL(db->status),
+ DMA_FROM_DEVICE);
+ skb_put(skb, FDMA_DCB_STATUS_BLOCKL(db->status));
+
+ lan966x_ifh_get_src_port(skb->data, &src_port);
+ lan966x_ifh_get_timestamp(skb->data, &timestamp);
+
+ WARN_ON(src_port >= lan966x->num_phys_ports);
+
+ skb->dev = lan966x->ports[src_port]->dev;
+ skb_pull(skb, IFH_LEN * sizeof(u32));
+
+ if (likely(!(skb->dev->features & NETIF_F_RXFCS)))
+ skb_trim(skb, skb->len - ETH_FCS_LEN);
+
+ lan966x_ptp_rxtstamp(lan966x, skb, timestamp);
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ if (lan966x->bridge_mask & BIT(src_port)) {
+ skb->offload_fwd_mark = 1;
+
+ skb_reset_network_header(skb);
+ if (!lan966x_hw_offload(lan966x, src_port, skb))
+ skb->offload_fwd_mark = 0;
+ }
+
+ skb->dev->stats.rx_bytes += skb->len;
+ skb->dev->stats.rx_packets++;
+
+ return skb;
+
+unmap_page:
+ dma_unmap_page(lan966x->dev, (dma_addr_t)db->dataptr,
+ FDMA_DCB_STATUS_BLOCKL(db->status),
+ DMA_FROM_DEVICE);
+ __free_pages(page, rx->page_order);
+
+ return NULL;
+}
+
+static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
+{
+ struct lan966x *lan966x = container_of(napi, struct lan966x, napi);
+ struct lan966x_rx *rx = &lan966x->rx;
+ int dcb_reload = rx->dcb_index;
+ struct lan966x_rx_dcb *old_dcb;
+ struct lan966x_db *db;
+ struct sk_buff *skb;
+ struct page *page;
+ int counter = 0;
+ u64 nextptr;
+
+ lan966x_fdma_tx_clear_buf(lan966x, weight);
+
+ /* Get all received skb */
+ while (counter < weight) {
+ if (!lan966x_fdma_rx_more_frames(rx))
+ break;
+
+ skb = lan966x_fdma_rx_get_frame(rx);
+
+ rx->page[rx->dcb_index][rx->db_index] = NULL;
+ rx->dcb_index++;
+ rx->dcb_index &= FDMA_DCB_MAX - 1;
+
+ if (!skb)
+ break;
+
+ napi_gro_receive(&lan966x->napi, skb);
+ counter++;
+ }
+
+ /* Allocate new pages and map them */
+ while (dcb_reload != rx->dcb_index) {
+ db = &rx->dcbs[dcb_reload].db[rx->db_index];
+ page = lan966x_fdma_rx_alloc_page(rx, db);
+ if (unlikely(!page))
+ break;
+ rx->page[dcb_reload][rx->db_index] = page;
+
+ old_dcb = &rx->dcbs[dcb_reload];
+ dcb_reload++;
+ dcb_reload &= FDMA_DCB_MAX - 1;
+
+ nextptr = rx->dma + ((unsigned long)old_dcb -
+ (unsigned long)rx->dcbs);
+ lan966x_fdma_rx_add_dcb(rx, old_dcb, nextptr);
+ lan966x_fdma_rx_reload(rx);
+ }
+
+ if (counter < weight && napi_complete_done(napi, counter))
+ lan_wr(0xff, lan966x, FDMA_INTR_DB_ENA);
+
+ return counter;
+}
+
+irqreturn_t lan966x_fdma_irq_handler(int irq, void *args)
+{
+ struct lan966x *lan966x = args;
+ u32 db, err, err_type;
+
+ db = lan_rd(lan966x, FDMA_INTR_DB);
+ err = lan_rd(lan966x, FDMA_INTR_ERR);
+
+ if (db) {
+ lan_wr(0, lan966x, FDMA_INTR_DB_ENA);
+ lan_wr(db, lan966x, FDMA_INTR_DB);
+
+ napi_schedule(&lan966x->napi);
+ }
+
+ if (err) {
+ err_type = lan_rd(lan966x, FDMA_ERRORS);
+
+ WARN(1, "Unexpected error: %d, error_type: %d\n", err, err_type);
+
+ lan_wr(err, lan966x, FDMA_INTR_ERR);
+ lan_wr(err_type, lan966x, FDMA_ERRORS);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int lan966x_fdma_get_next_dcb(struct lan966x_tx *tx)
+{
+ struct lan966x_tx_dcb_buf *dcb_buf;
+ int i;
+
+ for (i = 0; i < FDMA_DCB_MAX; ++i) {
+ dcb_buf = &tx->dcbs_buf[i];
+ if (!dcb_buf->used && i != tx->last_in_use)
+ return i;
+ }
+
+ return -1;
+}
+
+int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ struct lan966x_tx_dcb_buf *next_dcb_buf;
+ struct lan966x_tx_dcb *next_dcb, *dcb;
+ struct lan966x_tx *tx = &lan966x->tx;
+ struct lan966x_db *next_db;
+ int needed_headroom;
+ int needed_tailroom;
+ dma_addr_t dma_addr;
+ int next_to_use;
+ int err;
+
+ /* Get next index */
+ next_to_use = lan966x_fdma_get_next_dcb(tx);
+ if (next_to_use < 0) {
+ netif_stop_queue(dev);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (skb_put_padto(skb, ETH_ZLEN)) {
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ /* skb processing */
+ needed_headroom = max_t(int, IFH_LEN * sizeof(u32) - skb_headroom(skb), 0);
+ needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0);
+ if (needed_headroom || needed_tailroom || skb_header_cloned(skb)) {
+ err = pskb_expand_head(skb, needed_headroom, needed_tailroom,
+ GFP_ATOMIC);
+ if (unlikely(err)) {
+ dev->stats.tx_dropped++;
+ err = NETDEV_TX_OK;
+ goto release;
+ }
+ }
+
+ skb_tx_timestamp(skb);
+ skb_push(skb, IFH_LEN * sizeof(u32));
+ memcpy(skb->data, ifh, IFH_LEN * sizeof(u32));
+ skb_put(skb, 4);
+
+ dma_addr = dma_map_single(lan966x->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(lan966x->dev, dma_addr)) {
+ dev->stats.tx_dropped++;
+ err = NETDEV_TX_OK;
+ goto release;
+ }
+
+ /* Setup next dcb */
+ next_dcb = &tx->dcbs[next_to_use];
+ next_dcb->nextptr = FDMA_DCB_INVALID_DATA;
+
+ next_db = &next_dcb->db[0];
+ next_db->dataptr = dma_addr;
+ next_db->status = FDMA_DCB_STATUS_SOF |
+ FDMA_DCB_STATUS_EOF |
+ FDMA_DCB_STATUS_INTR |
+ FDMA_DCB_STATUS_BLOCKO(0) |
+ FDMA_DCB_STATUS_BLOCKL(skb->len);
+
+ /* Fill up the buffer */
+ next_dcb_buf = &tx->dcbs_buf[next_to_use];
+ next_dcb_buf->skb = skb;
+ next_dcb_buf->dma_addr = dma_addr;
+ next_dcb_buf->used = true;
+ next_dcb_buf->ptp = false;
+ next_dcb_buf->dev = dev;
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
+ next_dcb_buf->ptp = true;
+
+ if (likely(lan966x->tx.activated)) {
+ /* Connect current dcb to the next db */
+ dcb = &tx->dcbs[tx->last_in_use];
+ dcb->nextptr = tx->dma + (next_to_use *
+ sizeof(struct lan966x_tx_dcb));
+
+ lan966x_fdma_tx_reload(tx);
+ } else {
+ /* Because it is first time, then just activate */
+ lan966x->tx.activated = true;
+ lan966x_fdma_tx_activate(tx);
+ }
+
+ /* Move to next dcb because this last in use */
+ tx->last_in_use = next_to_use;
+
+ return NETDEV_TX_OK;
+
+release:
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
+ lan966x_ptp_txtstamp_release(port, skb);
+
+ dev_kfree_skb_any(skb);
+ return err;
+}
+
+static int lan966x_fdma_get_max_mtu(struct lan966x *lan966x)
+{
+ int max_mtu = 0;
+ int i;
+
+ for (i = 0; i < lan966x->num_phys_ports; ++i) {
+ int mtu;
+
+ if (!lan966x->ports[i])
+ continue;
+
+ mtu = lan966x->ports[i]->dev->mtu;
+ if (mtu > max_mtu)
+ max_mtu = mtu;
+ }
+
+ return max_mtu;
+}
+
+static int lan966x_qsys_sw_status(struct lan966x *lan966x)
+{
+ return lan_rd(lan966x, QSYS_SW_STATUS(CPU_PORT));
+}
+
+static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
+{
+ void *rx_dcbs, *tx_dcbs, *tx_dcbs_buf;
+ dma_addr_t rx_dma, tx_dma;
+ u32 size;
+ int err;
+
+ /* Store these for later to free them */
+ rx_dma = lan966x->rx.dma;
+ tx_dma = lan966x->tx.dma;
+ rx_dcbs = lan966x->rx.dcbs;
+ tx_dcbs = lan966x->tx.dcbs;
+ tx_dcbs_buf = lan966x->tx.dcbs_buf;
+
+ napi_synchronize(&lan966x->napi);
+ napi_disable(&lan966x->napi);
+ lan966x_fdma_stop_netdev(lan966x);
+
+ lan966x_fdma_rx_disable(&lan966x->rx);
+ lan966x_fdma_rx_free_pages(&lan966x->rx);
+ lan966x->rx.page_order = round_up(new_mtu, PAGE_SIZE) / PAGE_SIZE - 1;
+ err = lan966x_fdma_rx_alloc(&lan966x->rx);
+ if (err)
+ goto restore;
+ lan966x_fdma_rx_start(&lan966x->rx);
+
+ size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX;
+ size = ALIGN(size, PAGE_SIZE);
+ dma_free_coherent(lan966x->dev, size, rx_dcbs, rx_dma);
+
+ lan966x_fdma_tx_disable(&lan966x->tx);
+ err = lan966x_fdma_tx_alloc(&lan966x->tx);
+ if (err)
+ goto restore_tx;
+
+ size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
+ size = ALIGN(size, PAGE_SIZE);
+ dma_free_coherent(lan966x->dev, size, tx_dcbs, tx_dma);
+
+ kfree(tx_dcbs_buf);
+
+ lan966x_fdma_wakeup_netdev(lan966x);
+ napi_enable(&lan966x->napi);
+
+ return err;
+restore:
+ lan966x->rx.dma = rx_dma;
+ lan966x->tx.dma = tx_dma;
+ lan966x_fdma_rx_start(&lan966x->rx);
+
+restore_tx:
+ lan966x->rx.dcbs = rx_dcbs;
+ lan966x->tx.dcbs = tx_dcbs;
+ lan966x->tx.dcbs_buf = tx_dcbs_buf;
+
+ return err;
+}
+
+int lan966x_fdma_change_mtu(struct lan966x *lan966x)
+{
+ int max_mtu;
+ int err;
+ u32 val;
+
+ max_mtu = lan966x_fdma_get_max_mtu(lan966x);
+ max_mtu += IFH_LEN * sizeof(u32);
+
+ if (round_up(max_mtu, PAGE_SIZE) / PAGE_SIZE - 1 ==
+ lan966x->rx.page_order)
+ return 0;
+
+ /* Disable the CPU port */
+ lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(0),
+ QSYS_SW_PORT_MODE_PORT_ENA,
+ lan966x, QSYS_SW_PORT_MODE(CPU_PORT));
+
+ /* Flush the CPU queues */
+ readx_poll_timeout(lan966x_qsys_sw_status, lan966x,
+ val, !(QSYS_SW_STATUS_EQ_AVAIL_GET(val)),
+ READL_SLEEP_US, READL_TIMEOUT_US);
+
+ /* Add a sleep in case there are frames between the queues and the CPU
+ * port
+ */
+ usleep_range(1000, 2000);
+
+ err = lan966x_fdma_reload(lan966x, max_mtu);
+
+ /* Enable back the CPU port */
+ lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(1),
+ QSYS_SW_PORT_MODE_PORT_ENA,
+ lan966x, QSYS_SW_PORT_MODE(CPU_PORT));
+
+ return err;
+}
+
+void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev)
+{
+ if (lan966x->fdma_ndev)
+ return;
+
+ lan966x->fdma_ndev = dev;
+ netif_napi_add(dev, &lan966x->napi, lan966x_fdma_napi_poll,
+ NAPI_POLL_WEIGHT);
+ napi_enable(&lan966x->napi);
+}
+
+void lan966x_fdma_netdev_deinit(struct lan966x *lan966x, struct net_device *dev)
+{
+ if (lan966x->fdma_ndev == dev) {
+ netif_napi_del(&lan966x->napi);
+ lan966x->fdma_ndev = NULL;
+ }
+}
+
+int lan966x_fdma_init(struct lan966x *lan966x)
+{
+ int err;
+
+ if (!lan966x->fdma)
+ return 0;
+
+ lan966x->rx.lan966x = lan966x;
+ lan966x->rx.channel_id = FDMA_XTR_CHANNEL;
+ lan966x->tx.lan966x = lan966x;
+ lan966x->tx.channel_id = FDMA_INJ_CHANNEL;
+ lan966x->tx.last_in_use = -1;
+
+ err = lan966x_fdma_rx_alloc(&lan966x->rx);
+ if (err)
+ return err;
+
+ err = lan966x_fdma_tx_alloc(&lan966x->tx);
+ if (err) {
+ lan966x_fdma_rx_free(&lan966x->rx);
+ return err;
+ }
+
+ lan966x_fdma_rx_start(&lan966x->rx);
+
+ return 0;
+}
+
+void lan966x_fdma_deinit(struct lan966x *lan966x)
+{
+ if (!lan966x->fdma)
+ return;
+
+ lan966x_fdma_rx_disable(&lan966x->rx);
+ lan966x_fdma_tx_disable(&lan966x->tx);
+
+ napi_synchronize(&lan966x->napi);
+ napi_disable(&lan966x->napi);
+
+ lan966x_fdma_rx_free_pages(&lan966x->rx);
+ lan966x_fdma_rx_free(&lan966x->rx);
+ lan966x_fdma_tx_free(&lan966x->tx);
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index 95830e3e2b1f..6579c7062aa5 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -24,9 +24,6 @@
#define XTR_NOT_READY 0x07000080U
#define XTR_VALID_BYTES(x) (4 - (((x) >> 24) & 3))
-#define READL_SLEEP_US 10
-#define READL_TIMEOUT_US 100000000
-
#define IO_RANGES 2
static const struct of_device_id lan966x_match[] = {
@@ -43,6 +40,7 @@ struct lan966x_main_io_resource {
static const struct lan966x_main_io_resource lan966x_main_iomap[] = {
{ TARGET_CPU, 0xc0000, 0 }, /* 0xe00c0000 */
+ { TARGET_FDMA, 0xc0400, 0 }, /* 0xe00c0400 */
{ TARGET_ORG, 0, 1 }, /* 0xe2000000 */
{ TARGET_GCB, 0x4000, 1 }, /* 0xe2004000 */
{ TARGET_QS, 0x8000, 1 }, /* 0xe2008000 */
@@ -343,7 +341,10 @@ static int lan966x_port_xmit(struct sk_buff *skb, struct net_device *dev)
}
spin_lock(&lan966x->tx_lock);
- err = lan966x_port_ifh_xmit(skb, ifh, dev);
+ if (port->lan966x->fdma)
+ err = lan966x_fdma_xmit(skb, ifh, dev);
+ else
+ err = lan966x_port_ifh_xmit(skb, ifh, dev);
spin_unlock(&lan966x->tx_lock);
return err;
@@ -353,12 +354,24 @@ static int lan966x_port_change_mtu(struct net_device *dev, int new_mtu)
{
struct lan966x_port *port = netdev_priv(dev);
struct lan966x *lan966x = port->lan966x;
+ int old_mtu = dev->mtu;
+ int err;
lan_wr(DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(new_mtu),
lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port));
dev->mtu = new_mtu;
- return 0;
+ if (!lan966x->fdma)
+ return 0;
+
+ err = lan966x_fdma_change_mtu(lan966x);
+ if (err) {
+ lan_wr(DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(old_mtu),
+ lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port));
+ dev->mtu = old_mtu;
+ }
+
+ return err;
}
static int lan966x_mc_unsync(struct net_device *dev, const unsigned char *addr)
@@ -432,8 +445,7 @@ bool lan966x_netdevice_check(const struct net_device *dev)
return dev->netdev_ops == &lan966x_port_netdev_ops;
}
-static bool lan966x_hw_offload(struct lan966x *lan966x, u32 port,
- struct sk_buff *skb)
+bool lan966x_hw_offload(struct lan966x *lan966x, u32 port, struct sk_buff *skb)
{
u32 val;
@@ -520,7 +532,7 @@ static int lan966x_rx_frame_word(struct lan966x *lan966x, u8 grp, u32 *rval)
}
}
-static void lan966x_ifh_get_src_port(void *ifh, u64 *src_port)
+void lan966x_ifh_get_src_port(void *ifh, u64 *src_port)
{
packing(ifh, src_port, IFH_POS_SRCPORT + IFH_WID_SRCPORT - 1,
IFH_POS_SRCPORT, IFH_LEN * 4, UNPACK, 0);
@@ -532,7 +544,7 @@ static void lan966x_ifh_get_len(void *ifh, u64 *len)
IFH_POS_LEN, IFH_LEN * 4, UNPACK, 0);
}
-static void lan966x_ifh_get_timestamp(void *ifh, u64 *timestamp)
+void lan966x_ifh_get_timestamp(void *ifh, u64 *timestamp)
{
packing(ifh, timestamp, IFH_POS_TIMESTAMP + IFH_WID_TIMESTAMP - 1,
IFH_POS_TIMESTAMP, IFH_LEN * 4, UNPACK, 0);
@@ -652,6 +664,9 @@ static void lan966x_cleanup_ports(struct lan966x *lan966x)
if (port->dev)
unregister_netdev(port->dev);
+ if (lan966x->fdma && lan966x->fdma_ndev == port->dev)
+ lan966x_fdma_netdev_deinit(lan966x, port->dev);
+
if (port->phylink) {
rtnl_lock();
lan966x_port_stop(port->dev);
@@ -672,8 +687,14 @@ static void lan966x_cleanup_ports(struct lan966x *lan966x)
lan966x->ana_irq = -ENXIO;
}
+ if (lan966x->fdma)
+ devm_free_irq(lan966x->dev, lan966x->fdma_irq, lan966x);
+
if (lan966x->ptp_irq)
devm_free_irq(lan966x->dev, lan966x->ptp_irq, lan966x);
+
+ if (lan966x->ptp_ext_irq)
+ devm_free_irq(lan966x->dev, lan966x->ptp_ext_irq, lan966x);
}
static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
@@ -802,12 +823,12 @@ static void lan966x_init(struct lan966x *lan966x)
/* Do byte-swap and expect status after last data word
* Extraction: Mode: manual extraction) | Byte_swap
*/
- lan_wr(QS_XTR_GRP_CFG_MODE_SET(1) |
+ lan_wr(QS_XTR_GRP_CFG_MODE_SET(lan966x->fdma ? 2 : 1) |
QS_XTR_GRP_CFG_BYTE_SWAP_SET(1),
lan966x, QS_XTR_GRP_CFG(0));
/* Injection: Mode: manual injection | Byte_swap */
- lan_wr(QS_INJ_GRP_CFG_MODE_SET(1) |
+ lan_wr(QS_INJ_GRP_CFG_MODE_SET(lan966x->fdma ? 2 : 1) |
QS_INJ_GRP_CFG_BYTE_SWAP_SET(1),
lan966x, QS_INJ_GRP_CFG(0));
@@ -1029,6 +1050,31 @@ static int lan966x_probe(struct platform_device *pdev)
lan966x->ptp = 1;
}
+ lan966x->fdma_irq = platform_get_irq_byname(pdev, "fdma");
+ if (lan966x->fdma_irq > 0) {
+ err = devm_request_irq(&pdev->dev, lan966x->fdma_irq,
+ lan966x_fdma_irq_handler, 0,
+ "fdma irq", lan966x);
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "Unable to use fdma irq");
+
+ lan966x->fdma = true;
+ }
+
+ if (lan966x->ptp) {
+ lan966x->ptp_ext_irq = platform_get_irq_byname(pdev, "ptp-ext");
+ if (lan966x->ptp_ext_irq > 0) {
+ err = devm_request_threaded_irq(&pdev->dev,
+ lan966x->ptp_ext_irq, NULL,
+ lan966x_ptp_ext_irq_handler,
+ IRQF_ONESHOT,
+ "ptp-ext irq", lan966x);
+ if (err)
+ return dev_err_probe(&pdev->dev, err,
+ "Unable to use ptp-ext irq");
+ }
+ }
+
/* init switch */
lan966x_init(lan966x);
lan966x_stats_init(lan966x);
@@ -1067,8 +1113,15 @@ static int lan966x_probe(struct platform_device *pdev)
if (err)
goto cleanup_fdb;
+ err = lan966x_fdma_init(lan966x);
+ if (err)
+ goto cleanup_ptp;
+
return 0;
+cleanup_ptp:
+ lan966x_ptp_deinit(lan966x);
+
cleanup_fdb:
lan966x_fdb_deinit(lan966x);
@@ -1088,6 +1141,7 @@ static int lan966x_remove(struct platform_device *pdev)
{
struct lan966x *lan966x = platform_get_drvdata(pdev);
+ lan966x_fdma_deinit(lan966x);
lan966x_cleanup_ports(lan966x);
cancel_delayed_work_sync(&lan966x->stats_work);
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
index ae282da1da74..3b86ddddc756 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
@@ -17,6 +17,9 @@
#define TABLE_UPDATE_SLEEP_US 10
#define TABLE_UPDATE_TIMEOUT_US 100000
+#define READL_SLEEP_US 10
+#define READL_TIMEOUT_US 100000000
+
#define LAN966X_BUFFER_CELL_SZ 64
#define LAN966X_BUFFER_MEMORY (160 * 1024)
#define LAN966X_BUFFER_MIN_SZ 60
@@ -53,11 +56,28 @@
#define LAN966X_PHC_COUNT 3
#define LAN966X_PHC_PORT 0
+#define LAN966X_PHC_PINS_NUM 7
#define IFH_REW_OP_NOOP 0x0
#define IFH_REW_OP_ONE_STEP_PTP 0x3
#define IFH_REW_OP_TWO_STEP_PTP 0x4
+#define FDMA_RX_DCB_MAX_DBS 1
+#define FDMA_TX_DCB_MAX_DBS 1
+#define FDMA_DCB_INFO_DATAL(x) ((x) & GENMASK(15, 0))
+
+#define FDMA_DCB_STATUS_BLOCKL(x) ((x) & GENMASK(15, 0))
+#define FDMA_DCB_STATUS_SOF BIT(16)
+#define FDMA_DCB_STATUS_EOF BIT(17)
+#define FDMA_DCB_STATUS_INTR BIT(18)
+#define FDMA_DCB_STATUS_DONE BIT(19)
+#define FDMA_DCB_STATUS_BLOCKO(x) (((x) << 20) & GENMASK(31, 20))
+#define FDMA_DCB_INVALID_DATA 0x1
+
+#define FDMA_XTR_CHANNEL 6
+#define FDMA_INJ_CHANNEL 0
+#define FDMA_DCB_MAX 512
+
/* MAC table entry types.
* ENTRYTYPE_NORMAL is subject to aging.
* ENTRYTYPE_LOCKED is not subject to aging.
@@ -73,6 +93,83 @@ enum macaccess_entry_type {
struct lan966x_port;
+struct lan966x_db {
+ u64 dataptr;
+ u64 status;
+};
+
+struct lan966x_rx_dcb {
+ u64 nextptr;
+ u64 info;
+ struct lan966x_db db[FDMA_RX_DCB_MAX_DBS];
+};
+
+struct lan966x_tx_dcb {
+ u64 nextptr;
+ u64 info;
+ struct lan966x_db db[FDMA_TX_DCB_MAX_DBS];
+};
+
+struct lan966x_rx {
+ struct lan966x *lan966x;
+
+ /* Pointer to the array of hardware dcbs. */
+ struct lan966x_rx_dcb *dcbs;
+
+ /* Pointer to the last address in the dcbs. */
+ struct lan966x_rx_dcb *last_entry;
+
+ /* For each DB, there is a page */
+ struct page *page[FDMA_DCB_MAX][FDMA_RX_DCB_MAX_DBS];
+
+ /* Represents the db_index, it can have a value between 0 and
+ * FDMA_RX_DCB_MAX_DBS, once it reaches the value of FDMA_RX_DCB_MAX_DBS
+ * it means that the DCB can be reused.
+ */
+ int db_index;
+
+ /* Represents the index in the dcbs. It has a value between 0 and
+ * FDMA_DCB_MAX
+ */
+ int dcb_index;
+
+ /* Represents the dma address to the dcbs array */
+ dma_addr_t dma;
+
+ /* Represents the page order that is used to allocate the pages for the
+ * RX buffers. This value is calculated based on max MTU of the devices.
+ */
+ u8 page_order;
+
+ u8 channel_id;
+};
+
+struct lan966x_tx_dcb_buf {
+ struct net_device *dev;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ bool used;
+ bool ptp;
+};
+
+struct lan966x_tx {
+ struct lan966x *lan966x;
+
+ /* Pointer to the dcb list */
+ struct lan966x_tx_dcb *dcbs;
+ u16 last_in_use;
+
+ /* Represents the DMA address to the first entry of the dcb entries. */
+ dma_addr_t dma;
+
+ /* Array of dcbs that are given to the HW */
+ struct lan966x_tx_dcb_buf *dcbs_buf;
+
+ u8 channel_id;
+
+ bool activated;
+};
+
struct lan966x_stat_layout {
u32 offset;
char name[ETH_GSTRING_LEN];
@@ -81,6 +178,7 @@ struct lan966x_stat_layout {
struct lan966x_phc {
struct ptp_clock *clock;
struct ptp_clock_info info;
+ struct ptp_pin_desc pins[LAN966X_PHC_PINS_NUM];
struct hwtstamp_config hwtstamp_config;
struct lan966x *lan966x;
u8 index;
@@ -134,6 +232,8 @@ struct lan966x {
int xtr_irq;
int ana_irq;
int ptp_irq;
+ int fdma_irq;
+ int ptp_ext_irq;
/* worqueue for fdb */
struct workqueue_struct *fdb_work;
@@ -150,6 +250,13 @@ struct lan966x {
spinlock_t ptp_ts_id_lock; /* lock for ts_id */
struct mutex ptp_lock; /* lock for ptp interface state */
u16 ptp_skbs;
+
+ /* fdma */
+ bool fdma;
+ struct net_device *fdma_ndev;
+ struct lan966x_rx rx;
+ struct lan966x_tx tx;
+ struct napi_struct napi;
};
struct lan966x_port_config {
@@ -195,6 +302,11 @@ bool lan966x_netdevice_check(const struct net_device *dev);
void lan966x_register_notifier_blocks(void);
void lan966x_unregister_notifier_blocks(void);
+bool lan966x_hw_offload(struct lan966x *lan966x, u32 port, struct sk_buff *skb);
+
+void lan966x_ifh_get_src_port(void *ifh, u64 *src_port);
+void lan966x_ifh_get_timestamp(void *ifh, u64 *timestamp);
+
void lan966x_stats_get(struct net_device *dev,
struct rtnl_link_stats64 *stats);
int lan966x_stats_init(struct lan966x *lan966x);
@@ -283,6 +395,15 @@ int lan966x_ptp_txtstamp_request(struct lan966x_port *port,
void lan966x_ptp_txtstamp_release(struct lan966x_port *port,
struct sk_buff *skb);
irqreturn_t lan966x_ptp_irq_handler(int irq, void *args);
+irqreturn_t lan966x_ptp_ext_irq_handler(int irq, void *args);
+
+int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev);
+int lan966x_fdma_change_mtu(struct lan966x *lan966x);
+void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev);
+void lan966x_fdma_netdev_deinit(struct lan966x *lan966x, struct net_device *dev);
+int lan966x_fdma_init(struct lan966x *lan966x);
+void lan966x_fdma_deinit(struct lan966x *lan966x);
+irqreturn_t lan966x_fdma_irq_handler(int irq, void *args);
static inline void __iomem *lan_addr(void __iomem *base[],
int id, int tinst, int tcnt,
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
index 237555845a52..f141644e4372 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
@@ -393,6 +393,9 @@ void lan966x_port_init(struct lan966x_port *port)
lan966x_port_config_down(port);
+ if (lan966x->fdma)
+ lan966x_fdma_netdev_init(lan966x, port->dev);
+
if (config->portmode != PHY_INTERFACE_MODE_QSGMII)
return;
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
index 0a1041da4384..3a621c5165bc 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
@@ -16,7 +16,7 @@
*/
#define LAN966X_1PPB_FORMAT 3480517749LL
-#define TOD_ACC_PIN 0x5
+#define TOD_ACC_PIN 0x7
enum {
PTP_PIN_ACTION_IDLE = 0,
@@ -321,6 +321,63 @@ irqreturn_t lan966x_ptp_irq_handler(int irq, void *args)
return IRQ_HANDLED;
}
+irqreturn_t lan966x_ptp_ext_irq_handler(int irq, void *args)
+{
+ struct lan966x *lan966x = args;
+ struct lan966x_phc *phc;
+ unsigned long flags;
+ u64 time = 0;
+ time64_t s;
+ int pin, i;
+ s64 ns;
+
+ if (!(lan_rd(lan966x, PTP_PIN_INTR)))
+ return IRQ_NONE;
+
+ /* Go through all domains and see which pin generated the interrupt */
+ for (i = 0; i < LAN966X_PHC_COUNT; ++i) {
+ struct ptp_clock_event ptp_event = {0};
+
+ phc = &lan966x->phc[i];
+ pin = ptp_find_pin_unlocked(phc->clock, PTP_PF_EXTTS, 0);
+ if (pin == -1)
+ continue;
+
+ if (!(lan_rd(lan966x, PTP_PIN_INTR) & BIT(pin)))
+ continue;
+
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+
+ /* Enable to get the new interrupt.
+ * By writing 1 it clears the bit
+ */
+ lan_wr(BIT(pin), lan966x, PTP_PIN_INTR);
+
+ /* Get current time */
+ s = lan_rd(lan966x, PTP_TOD_SEC_MSB(pin));
+ s <<= 32;
+ s |= lan_rd(lan966x, PTP_TOD_SEC_LSB(pin));
+ ns = lan_rd(lan966x, PTP_TOD_NSEC(pin));
+ ns &= PTP_TOD_NSEC_TOD_NSEC;
+
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+
+ if ((ns & 0xFFFFFFF0) == 0x3FFFFFF0) {
+ s--;
+ ns &= 0xf;
+ ns += 999999984;
+ }
+ time = ktime_set(s, ns);
+
+ ptp_event.index = pin;
+ ptp_event.timestamp = time;
+ ptp_event.type = PTP_CLOCK_EXTTS;
+ ptp_clock_event(phc->clock, &ptp_event);
+ }
+
+ return IRQ_HANDLED;
+}
+
static int lan966x_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info);
@@ -493,6 +550,207 @@ static int lan966x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
return 0;
}
+static int lan966x_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info);
+ struct lan966x *lan966x = phc->lan966x;
+ struct ptp_clock_info *info;
+ int i;
+
+ /* Currently support only 1 channel */
+ if (chan != 0)
+ return -1;
+
+ switch (func) {
+ case PTP_PF_NONE:
+ case PTP_PF_PEROUT:
+ case PTP_PF_EXTTS:
+ break;
+ default:
+ return -1;
+ }
+
+ /* The PTP pins are shared by all the PHC. So it is required to see if
+ * the pin is connected to another PHC. The pin is connected to another
+ * PHC if that pin already has a function on that PHC.
+ */
+ for (i = 0; i < LAN966X_PHC_COUNT; ++i) {
+ info = &lan966x->phc[i].info;
+
+ /* Ignore the check with ourself */
+ if (ptp == info)
+ continue;
+
+ if (info->pin_config[pin].func == PTP_PF_PEROUT ||
+ info->pin_config[pin].func == PTP_PF_EXTTS)
+ return -1;
+ }
+
+ return 0;
+}
+
+static int lan966x_ptp_perout(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info);
+ struct lan966x *lan966x = phc->lan966x;
+ struct timespec64 ts_phase, ts_period;
+ unsigned long flags;
+ s64 wf_high, wf_low;
+ bool pps = false;
+ int pin;
+
+ if (rq->perout.flags & ~(PTP_PEROUT_DUTY_CYCLE |
+ PTP_PEROUT_PHASE))
+ return -EOPNOTSUPP;
+
+ pin = ptp_find_pin(phc->clock, PTP_PF_PEROUT, rq->perout.index);
+ if (pin == -1 || pin >= LAN966X_PHC_PINS_NUM)
+ return -EINVAL;
+
+ if (!on) {
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_IDLE) |
+ PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
+ PTP_PIN_CFG_PIN_SYNC_SET(0),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SYNC,
+ lan966x, PTP_PIN_CFG(pin));
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+ return 0;
+ }
+
+ if (rq->perout.period.sec == 1 &&
+ rq->perout.period.nsec == 0)
+ pps = true;
+
+ if (rq->perout.flags & PTP_PEROUT_PHASE) {
+ ts_phase.tv_sec = rq->perout.phase.sec;
+ ts_phase.tv_nsec = rq->perout.phase.nsec;
+ } else {
+ ts_phase.tv_sec = rq->perout.start.sec;
+ ts_phase.tv_nsec = rq->perout.start.nsec;
+ }
+
+ if (ts_phase.tv_sec || (ts_phase.tv_nsec && !pps)) {
+ dev_warn(lan966x->dev,
+ "Absolute time not supported!\n");
+ return -EINVAL;
+ }
+
+ if (rq->perout.flags & PTP_PEROUT_DUTY_CYCLE) {
+ struct timespec64 ts_on;
+
+ ts_on.tv_sec = rq->perout.on.sec;
+ ts_on.tv_nsec = rq->perout.on.nsec;
+
+ wf_high = timespec64_to_ns(&ts_on);
+ } else {
+ wf_high = 5000;
+ }
+
+ if (pps) {
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+ lan_wr(PTP_WF_LOW_PERIOD_PIN_WFL(ts_phase.tv_nsec),
+ lan966x, PTP_WF_LOW_PERIOD(pin));
+ lan_wr(PTP_WF_HIGH_PERIOD_PIN_WFH(wf_high),
+ lan966x, PTP_WF_HIGH_PERIOD(pin));
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_CLOCK) |
+ PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
+ PTP_PIN_CFG_PIN_SYNC_SET(3),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SYNC,
+ lan966x, PTP_PIN_CFG(pin));
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+ return 0;
+ }
+
+ ts_period.tv_sec = rq->perout.period.sec;
+ ts_period.tv_nsec = rq->perout.period.nsec;
+
+ wf_low = timespec64_to_ns(&ts_period);
+ wf_low -= wf_high;
+
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+ lan_wr(PTP_WF_LOW_PERIOD_PIN_WFL(wf_low),
+ lan966x, PTP_WF_LOW_PERIOD(pin));
+ lan_wr(PTP_WF_HIGH_PERIOD_PIN_WFH(wf_high),
+ lan966x, PTP_WF_HIGH_PERIOD(pin));
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_CLOCK) |
+ PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
+ PTP_PIN_CFG_PIN_SYNC_SET(0),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SYNC,
+ lan966x, PTP_PIN_CFG(pin));
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+
+ return 0;
+}
+
+static int lan966x_ptp_extts(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info);
+ struct lan966x *lan966x = phc->lan966x;
+ unsigned long flags;
+ int pin;
+ u32 val;
+
+ if (lan966x->ptp_ext_irq <= 0)
+ return -EOPNOTSUPP;
+
+ /* Reject requests with unsupported flags */
+ if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
+ PTP_RISING_EDGE |
+ PTP_STRICT_FLAGS))
+ return -EOPNOTSUPP;
+
+ pin = ptp_find_pin(phc->clock, PTP_PF_EXTTS, rq->extts.index);
+ if (pin == -1 || pin >= LAN966X_PHC_PINS_NUM)
+ return -EINVAL;
+
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_SAVE) |
+ PTP_PIN_CFG_PIN_SYNC_SET(on ? 3 : 0) |
+ PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
+ PTP_PIN_CFG_PIN_SELECT_SET(pin),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_SYNC |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SELECT,
+ lan966x, PTP_PIN_CFG(pin));
+
+ val = lan_rd(lan966x, PTP_PIN_INTR_ENA);
+ if (on)
+ val |= BIT(pin);
+ else
+ val &= ~BIT(pin);
+ lan_wr(val, lan966x, PTP_PIN_INTR_ENA);
+
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+
+ return 0;
+}
+
+static int lan966x_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ switch (rq->type) {
+ case PTP_CLK_REQ_PEROUT:
+ return lan966x_ptp_perout(ptp, rq, on);
+ case PTP_CLK_REQ_EXTTS:
+ return lan966x_ptp_extts(ptp, rq, on);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static struct ptp_clock_info lan966x_ptp_clock_info = {
.owner = THIS_MODULE,
.name = "lan966x ptp",
@@ -501,6 +759,11 @@ static struct ptp_clock_info lan966x_ptp_clock_info = {
.settime64 = lan966x_ptp_settime64,
.adjtime = lan966x_ptp_adjtime,
.adjfine = lan966x_ptp_adjfine,
+ .verify = lan966x_ptp_verify,
+ .enable = lan966x_ptp_enable,
+ .n_per_out = LAN966X_PHC_PINS_NUM,
+ .n_ext_ts = LAN966X_PHC_PINS_NUM,
+ .n_pins = LAN966X_PHC_PINS_NUM,
};
static int lan966x_ptp_phc_init(struct lan966x *lan966x,
@@ -508,8 +771,19 @@ static int lan966x_ptp_phc_init(struct lan966x *lan966x,
struct ptp_clock_info *clock_info)
{
struct lan966x_phc *phc = &lan966x->phc[index];
+ struct ptp_pin_desc *p;
+ int i;
+
+ for (i = 0; i < LAN966X_PHC_PINS_NUM; i++) {
+ p = &phc->pins[i];
+
+ snprintf(p->name, sizeof(p->name), "pin%d", i);
+ p->index = i;
+ p->func = PTP_PF_NONE;
+ }
phc->info = *clock_info;
+ phc->info.pin_config = &phc->pins[0];
phc->clock = ptp_clock_register(&phc->info, lan966x->dev);
if (IS_ERR(phc->clock))
return PTR_ERR(phc->clock);
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h b/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h
index 0c0b3e173d53..8265ad89f0bc 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h
@@ -17,6 +17,7 @@ enum lan966x_target {
TARGET_CHIP_TOP = 5,
TARGET_CPU = 6,
TARGET_DEV = 13,
+ TARGET_FDMA = 21,
TARGET_GCB = 27,
TARGET_ORG = 36,
TARGET_PTP = 41,
@@ -578,6 +579,129 @@ enum lan966x_target {
#define DEV_PCS1G_STICKY_LINK_DOWN_STICKY_GET(x)\
FIELD_GET(DEV_PCS1G_STICKY_LINK_DOWN_STICKY, x)
+/* FDMA:FDMA:FDMA_CH_ACTIVATE */
+#define FDMA_CH_ACTIVATE __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 0, 0, 1, 4)
+
+#define FDMA_CH_ACTIVATE_CH_ACTIVATE GENMASK(7, 0)
+#define FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(x)\
+ FIELD_PREP(FDMA_CH_ACTIVATE_CH_ACTIVATE, x)
+#define FDMA_CH_ACTIVATE_CH_ACTIVATE_GET(x)\
+ FIELD_GET(FDMA_CH_ACTIVATE_CH_ACTIVATE, x)
+
+/* FDMA:FDMA:FDMA_CH_RELOAD */
+#define FDMA_CH_RELOAD __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 4, 0, 1, 4)
+
+#define FDMA_CH_RELOAD_CH_RELOAD GENMASK(7, 0)
+#define FDMA_CH_RELOAD_CH_RELOAD_SET(x)\
+ FIELD_PREP(FDMA_CH_RELOAD_CH_RELOAD, x)
+#define FDMA_CH_RELOAD_CH_RELOAD_GET(x)\
+ FIELD_GET(FDMA_CH_RELOAD_CH_RELOAD, x)
+
+/* FDMA:FDMA:FDMA_CH_DISABLE */
+#define FDMA_CH_DISABLE __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 8, 0, 1, 4)
+
+#define FDMA_CH_DISABLE_CH_DISABLE GENMASK(7, 0)
+#define FDMA_CH_DISABLE_CH_DISABLE_SET(x)\
+ FIELD_PREP(FDMA_CH_DISABLE_CH_DISABLE, x)
+#define FDMA_CH_DISABLE_CH_DISABLE_GET(x)\
+ FIELD_GET(FDMA_CH_DISABLE_CH_DISABLE, x)
+
+/* FDMA:FDMA:FDMA_CH_DB_DISCARD */
+#define FDMA_CH_DB_DISCARD __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 16, 0, 1, 4)
+
+#define FDMA_CH_DB_DISCARD_DB_DISCARD GENMASK(7, 0)
+#define FDMA_CH_DB_DISCARD_DB_DISCARD_SET(x)\
+ FIELD_PREP(FDMA_CH_DB_DISCARD_DB_DISCARD, x)
+#define FDMA_CH_DB_DISCARD_DB_DISCARD_GET(x)\
+ FIELD_GET(FDMA_CH_DB_DISCARD_DB_DISCARD, x)
+
+/* FDMA:FDMA:FDMA_DCB_LLP */
+#define FDMA_DCB_LLP(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 52, r, 8, 4)
+
+/* FDMA:FDMA:FDMA_DCB_LLP1 */
+#define FDMA_DCB_LLP1(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 84, r, 8, 4)
+
+/* FDMA:FDMA:FDMA_CH_ACTIVE */
+#define FDMA_CH_ACTIVE __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 180, 0, 1, 4)
+
+/* FDMA:FDMA:FDMA_CH_CFG */
+#define FDMA_CH_CFG(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 224, r, 8, 4)
+
+#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY BIT(4)
+#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(x)\
+ FIELD_PREP(FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY, x)
+#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_GET(x)\
+ FIELD_GET(FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY, x)
+
+#define FDMA_CH_CFG_CH_INJ_PORT BIT(3)
+#define FDMA_CH_CFG_CH_INJ_PORT_SET(x)\
+ FIELD_PREP(FDMA_CH_CFG_CH_INJ_PORT, x)
+#define FDMA_CH_CFG_CH_INJ_PORT_GET(x)\
+ FIELD_GET(FDMA_CH_CFG_CH_INJ_PORT, x)
+
+#define FDMA_CH_CFG_CH_DCB_DB_CNT GENMASK(2, 1)
+#define FDMA_CH_CFG_CH_DCB_DB_CNT_SET(x)\
+ FIELD_PREP(FDMA_CH_CFG_CH_DCB_DB_CNT, x)
+#define FDMA_CH_CFG_CH_DCB_DB_CNT_GET(x)\
+ FIELD_GET(FDMA_CH_CFG_CH_DCB_DB_CNT, x)
+
+#define FDMA_CH_CFG_CH_MEM BIT(0)
+#define FDMA_CH_CFG_CH_MEM_SET(x)\
+ FIELD_PREP(FDMA_CH_CFG_CH_MEM, x)
+#define FDMA_CH_CFG_CH_MEM_GET(x)\
+ FIELD_GET(FDMA_CH_CFG_CH_MEM, x)
+
+/* FDMA:FDMA:FDMA_PORT_CTRL */
+#define FDMA_PORT_CTRL(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 376, r, 2, 4)
+
+#define FDMA_PORT_CTRL_INJ_STOP BIT(4)
+#define FDMA_PORT_CTRL_INJ_STOP_SET(x)\
+ FIELD_PREP(FDMA_PORT_CTRL_INJ_STOP, x)
+#define FDMA_PORT_CTRL_INJ_STOP_GET(x)\
+ FIELD_GET(FDMA_PORT_CTRL_INJ_STOP, x)
+
+#define FDMA_PORT_CTRL_XTR_STOP BIT(2)
+#define FDMA_PORT_CTRL_XTR_STOP_SET(x)\
+ FIELD_PREP(FDMA_PORT_CTRL_XTR_STOP, x)
+#define FDMA_PORT_CTRL_XTR_STOP_GET(x)\
+ FIELD_GET(FDMA_PORT_CTRL_XTR_STOP, x)
+
+/* FDMA:FDMA:FDMA_INTR_DB */
+#define FDMA_INTR_DB __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 392, 0, 1, 4)
+
+/* FDMA:FDMA:FDMA_INTR_DB_ENA */
+#define FDMA_INTR_DB_ENA __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 396, 0, 1, 4)
+
+#define FDMA_INTR_DB_ENA_INTR_DB_ENA GENMASK(7, 0)
+#define FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(x)\
+ FIELD_PREP(FDMA_INTR_DB_ENA_INTR_DB_ENA, x)
+#define FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(x)\
+ FIELD_GET(FDMA_INTR_DB_ENA_INTR_DB_ENA, x)
+
+/* FDMA:FDMA:FDMA_INTR_ERR */
+#define FDMA_INTR_ERR __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 400, 0, 1, 4)
+
+/* FDMA:FDMA:FDMA_ERRORS */
+#define FDMA_ERRORS __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 412, 0, 1, 4)
+
+/* PTP:PTP_CFG:PTP_PIN_INTR */
+#define PTP_PIN_INTR __REG(TARGET_PTP, 0, 1, 512, 0, 1, 16, 0, 0, 1, 4)
+
+#define PTP_PIN_INTR_INTR_PTP GENMASK(7, 0)
+#define PTP_PIN_INTR_INTR_PTP_SET(x)\
+ FIELD_PREP(PTP_PIN_INTR_INTR_PTP, x)
+#define PTP_PIN_INTR_INTR_PTP_GET(x)\
+ FIELD_GET(PTP_PIN_INTR_INTR_PTP, x)
+
+/* PTP:PTP_CFG:PTP_PIN_INTR_ENA */
+#define PTP_PIN_INTR_ENA __REG(TARGET_PTP, 0, 1, 512, 0, 1, 16, 4, 0, 1, 4)
+
+#define PTP_PIN_INTR_ENA_INTR_ENA GENMASK(7, 0)
+#define PTP_PIN_INTR_ENA_INTR_ENA_SET(x)\
+ FIELD_PREP(PTP_PIN_INTR_ENA_INTR_ENA, x)
+#define PTP_PIN_INTR_ENA_INTR_ENA_GET(x)\
+ FIELD_GET(PTP_PIN_INTR_ENA_INTR_ENA, x)
+
/* PTP:PTP_CFG:PTP_DOM_CFG */
#define PTP_DOM_CFG __REG(TARGET_PTP, 0, 1, 512, 0, 1, 16, 12, 0, 1, 4)
@@ -611,6 +735,12 @@ enum lan966x_target {
#define PTP_PIN_CFG_PIN_SYNC_GET(x)\
FIELD_GET(PTP_PIN_CFG_PIN_SYNC, x)
+#define PTP_PIN_CFG_PIN_SELECT GENMASK(23, 21)
+#define PTP_PIN_CFG_PIN_SELECT_SET(x)\
+ FIELD_PREP(PTP_PIN_CFG_PIN_SELECT, x)
+#define PTP_PIN_CFG_PIN_SELECT_GET(x)\
+ FIELD_GET(PTP_PIN_CFG_PIN_SELECT, x)
+
#define PTP_PIN_CFG_PIN_DOM GENMASK(17, 16)
#define PTP_PIN_CFG_PIN_DOM_SET(x)\
FIELD_PREP(PTP_PIN_CFG_PIN_DOM, x)
@@ -638,6 +768,22 @@ enum lan966x_target {
#define PTP_TOD_NSEC_TOD_NSEC_GET(x)\
FIELD_GET(PTP_TOD_NSEC_TOD_NSEC, x)
+/* PTP:PTP_PINS:WF_HIGH_PERIOD */
+#define PTP_WF_HIGH_PERIOD(g) __REG(TARGET_PTP,\
+ 0, 1, 0, g, 8, 64, 24, 0, 1, 4)
+
+#define PTP_WF_HIGH_PERIOD_PIN_WFH(x) ((x) & GENMASK(29, 0))
+#define PTP_WF_HIGH_PERIOD_PIN_WFH_M GENMASK(29, 0)
+#define PTP_WF_HIGH_PERIOD_PIN_WFH_X(x) ((x) & GENMASK(29, 0))
+
+/* PTP:PTP_PINS:WF_LOW_PERIOD */
+#define PTP_WF_LOW_PERIOD(g) __REG(TARGET_PTP,\
+ 0, 1, 0, g, 8, 64, 28, 0, 1, 4)
+
+#define PTP_WF_LOW_PERIOD_PIN_WFL(x) ((x) & GENMASK(29, 0))
+#define PTP_WF_LOW_PERIOD_PIN_WFL_M GENMASK(29, 0)
+#define PTP_WF_LOW_PERIOD_PIN_WFL_X(x) ((x) & GENMASK(29, 0))
+
/* PTP:PTP_TS_FIFO:PTP_TWOSTEP_CTRL */
#define PTP_TWOSTEP_CTRL __REG(TARGET_PTP, 0, 1, 612, 0, 1, 12, 0, 0, 1, 4)
diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.c b/drivers/net/ethernet/mscc/ocelot_vcap.c
index c8701ac955a8..1e74bdb215ec 100644
--- a/drivers/net/ethernet/mscc/ocelot_vcap.c
+++ b/drivers/net/ethernet/mscc/ocelot_vcap.c
@@ -672,12 +672,10 @@ static void is1_entry_set(struct ocelot *ocelot, int ix,
{
const struct vcap_props *vcap = &ocelot->vcap[VCAP_IS1];
struct ocelot_vcap_key_vlan *tag = &filter->vlan;
- struct ocelot_vcap_u64 payload;
struct vcap_data data;
int row = ix / 2;
u32 type;
- memset(&payload, 0, sizeof(payload));
memset(&data, 0, sizeof(data));
/* Read row */
@@ -813,11 +811,9 @@ static void es0_entry_set(struct ocelot *ocelot, int ix,
{
const struct vcap_props *vcap = &ocelot->vcap[VCAP_ES0];
struct ocelot_vcap_key_vlan *tag = &filter->vlan;
- struct ocelot_vcap_u64 payload;
struct vcap_data data;
int row = ix;
- memset(&payload, 0, sizeof(payload));
memset(&data, 0, sizeof(data));
/* Read row */
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 21d2645885ce..fe5e77330f5f 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -579,7 +579,7 @@ static int myri10ge_load_hotplug_firmware(struct myri10ge_priv *mgp, u32 * size)
int status;
unsigned i;
- if ((status = request_firmware(&fw, mgp->fw_name, dev)) < 0) {
+ if (request_firmware(&fw, mgp->fw_name, dev) < 0) {
dev_err(dev, "Unable to load %s firmware image via hotplug\n",
mgp->fw_name);
status = -EINVAL;
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 82a22711ce45..50bca486a244 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -989,8 +989,6 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need
a delay. Note that pre-2.0.34 kernels had a cache-alignment bug that
made udelay() unreliable.
- The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is
- deprecated.
*/
#define eeprom_delay(ee_addr) readl(ee_addr)
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/tls.c b/drivers/net/ethernet/netronome/nfp/crypto/tls.c
index 84d66d138c3d..78368e71ce83 100644
--- a/drivers/net/ethernet/netronome/nfp/crypto/tls.c
+++ b/drivers/net/ethernet/netronome/nfp/crypto/tls.c
@@ -289,7 +289,7 @@ nfp_net_tls_add(struct net_device *netdev, struct sock *sk,
switch (sk->sk_family) {
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
- if (sk->sk_ipv6only ||
+ if (ipv6_only_sock(sk) ||
ipv6_addr_type(&sk->sk_v6_daddr) != IPV6_ADDR_MAPPED) {
req_sz = sizeof(struct nfp_crypto_req_add_v6);
ipv6 = true;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c
index 3fdaaf8ed2ba..4627715a5e32 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c
@@ -95,15 +95,17 @@ int nfp_app_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
__be16 vlan_proto)
{
struct nfp_app *app = nfp_app_from_netdev(netdev);
+ u16 update = NFP_NET_VF_CFG_MB_UPD_VLAN;
+ bool is_proto_sup = true;
unsigned int vf_offset;
- u16 vlan_tci;
+ u32 vlan_tag;
int err;
err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN, "vlan");
if (err)
return err;
- if (vlan_proto != htons(ETH_P_8021Q))
+ if (!eth_type_vlan(vlan_proto))
return -EOPNOTSUPP;
if (vlan > 4095 || qos > 7) {
@@ -112,14 +114,32 @@ int nfp_app_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
return -EINVAL;
}
+ /* Check if fw supports or not */
+ err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO, "vlan_proto");
+ if (err)
+ is_proto_sup = false;
+
+ if (vlan_proto != htons(ETH_P_8021Q)) {
+ if (!is_proto_sup)
+ return -EOPNOTSUPP;
+ update |= NFP_NET_VF_CFG_MB_UPD_VLAN_PROTO;
+ }
+
/* Write VLAN tag to VF entry in VF config symbol */
- vlan_tci = FIELD_PREP(NFP_NET_VF_CFG_VLAN_VID, vlan) |
+ vlan_tag = FIELD_PREP(NFP_NET_VF_CFG_VLAN_VID, vlan) |
FIELD_PREP(NFP_NET_VF_CFG_VLAN_QOS, qos);
+
+ /* vlan_tag of 0 means that the configuration should be cleared and in
+ * such circumstances setting the TPID has no meaning when
+ * configuring firmware.
+ */
+ if (vlan_tag && is_proto_sup)
+ vlan_tag |= FIELD_PREP(NFP_NET_VF_CFG_VLAN_PROT, ntohs(vlan_proto));
+
vf_offset = NFP_NET_VF_CFG_MB_SZ + vf * NFP_NET_VF_CFG_SZ;
- writew(vlan_tci, app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_VLAN);
+ writel(vlan_tag, app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_VLAN);
- return nfp_net_sriov_update(app, vf, NFP_NET_VF_CFG_MB_UPD_VLAN,
- "vlan");
+ return nfp_net_sriov_update(app, vf, update, "vlan");
}
int nfp_app_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
@@ -209,7 +229,7 @@ int nfp_app_get_vf_config(struct net_device *netdev, int vf,
{
struct nfp_app *app = nfp_app_from_netdev(netdev);
unsigned int vf_offset;
- u16 vlan_tci;
+ u32 vlan_tag;
u32 mac_hi;
u16 mac_lo;
u8 flags;
@@ -225,7 +245,7 @@ int nfp_app_get_vf_config(struct net_device *netdev, int vf,
mac_lo = readw(app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_MAC_LO);
flags = readb(app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_CTRL);
- vlan_tci = readw(app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_VLAN);
+ vlan_tag = readl(app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_VLAN);
memset(ivi, 0, sizeof(*ivi));
ivi->vf = vf;
@@ -233,9 +253,10 @@ int nfp_app_get_vf_config(struct net_device *netdev, int vf,
put_unaligned_be32(mac_hi, &ivi->mac[0]);
put_unaligned_be16(mac_lo, &ivi->mac[4]);
- ivi->vlan = FIELD_GET(NFP_NET_VF_CFG_VLAN_VID, vlan_tci);
- ivi->qos = FIELD_GET(NFP_NET_VF_CFG_VLAN_QOS, vlan_tci);
-
+ ivi->vlan = FIELD_GET(NFP_NET_VF_CFG_VLAN_VID, vlan_tag);
+ ivi->qos = FIELD_GET(NFP_NET_VF_CFG_VLAN_QOS, vlan_tag);
+ if (!nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO, "vlan_proto"))
+ ivi->vlan_proto = htons(FIELD_GET(NFP_NET_VF_CFG_VLAN_PROT, vlan_tag));
ivi->spoofchk = FIELD_GET(NFP_NET_VF_CFG_CTRL_SPOOF, flags);
ivi->trusted = FIELD_GET(NFP_NET_VF_CFG_CTRL_TRUST, flags);
ivi->linkstate = FIELD_GET(NFP_NET_VF_CFG_CTRL_LINK_STATE, flags);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h
index 786be58a907e..7b72cc083476 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h
@@ -19,6 +19,7 @@
#define NFP_NET_VF_CFG_MB_CAP_SPOOF (0x1 << 2)
#define NFP_NET_VF_CFG_MB_CAP_LINK_STATE (0x1 << 3)
#define NFP_NET_VF_CFG_MB_CAP_TRUST (0x1 << 4)
+#define NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO (0x1 << 5)
#define NFP_NET_VF_CFG_MB_RET 0x2
#define NFP_NET_VF_CFG_MB_UPD 0x4
#define NFP_NET_VF_CFG_MB_UPD_MAC (0x1 << 0)
@@ -26,6 +27,7 @@
#define NFP_NET_VF_CFG_MB_UPD_SPOOF (0x1 << 2)
#define NFP_NET_VF_CFG_MB_UPD_LINK_STATE (0x1 << 3)
#define NFP_NET_VF_CFG_MB_UPD_TRUST (0x1 << 4)
+#define NFP_NET_VF_CFG_MB_UPD_VLAN_PROTO (0x1 << 5)
#define NFP_NET_VF_CFG_MB_VF_NUM 0x7
/* VF config entry
@@ -43,6 +45,7 @@
#define NFP_NET_VF_CFG_LS_MODE_ENABLE 1
#define NFP_NET_VF_CFG_LS_MODE_DISABLE 2
#define NFP_NET_VF_CFG_VLAN 0x8
+#define NFP_NET_VF_CFG_VLAN_PROT 0xffff0000
#define NFP_NET_VF_CFG_VLAN_QOS 0xe000
#define NFP_NET_VF_CFG_VLAN_VID 0x0fff
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
index 3d379e937184..ddb34bfb9bef 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
@@ -13,22 +13,36 @@
#include <linux/ctype.h>
#include <linux/types.h>
#include <linux/sizes.h>
+#include <linux/stringify.h>
#ifndef NFP_SUBSYS
#define NFP_SUBSYS "nfp"
#endif
-#define nfp_err(cpp, fmt, args...) \
+#define string_format(x) __FILE__ ":" __stringify(__LINE__) ": " x
+
+#define __nfp_err(cpp, fmt, args...) \
dev_err(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args)
-#define nfp_warn(cpp, fmt, args...) \
+#define __nfp_warn(cpp, fmt, args...) \
dev_warn(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args)
-#define nfp_info(cpp, fmt, args...) \
+#define __nfp_info(cpp, fmt, args...) \
dev_info(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args)
-#define nfp_dbg(cpp, fmt, args...) \
+#define __nfp_dbg(cpp, fmt, args...) \
dev_dbg(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args)
+#define __nfp_printk(level, cpp, fmt, args...) \
+ dev_printk(level, nfp_cpp_device(cpp)->parent, \
+ NFP_SUBSYS ": " fmt, ## args)
+
+#define nfp_err(cpp, fmt, args...) \
+ __nfp_err(cpp, string_format(fmt), ## args)
+#define nfp_warn(cpp, fmt, args...) \
+ __nfp_warn(cpp, string_format(fmt), ## args)
+#define nfp_info(cpp, fmt, args...) \
+ __nfp_info(cpp, string_format(fmt), ## args)
+#define nfp_dbg(cpp, fmt, args...) \
+ __nfp_dbg(cpp, string_format(fmt), ## args)
#define nfp_printk(level, cpp, fmt, args...) \
- dev_printk(level, nfp_cpp_device(cpp)->parent, \
- NFP_SUBSYS ": " fmt, ## args)
+ __nfp_printk(level, cpp, string_format(fmt), ## args)
#define PCI_64BIT_BAR_COUNT 3
diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile
index 0d9c2fe0245d..3d2098f21bb7 100644
--- a/drivers/net/ethernet/qlogic/qed/Makefile
+++ b/drivers/net/ethernet/qlogic/qed/Makefile
@@ -30,8 +30,7 @@ qed-$(CONFIG_QED_OOO) += qed_ooo.o
qed-$(CONFIG_QED_NVMETCP) += \
qed_nvmetcp.o \
- qed_nvmetcp_fw_funcs.o \
- qed_nvmetcp_ip_services.o
+ qed_nvmetcp_fw_funcs.o
qed-$(CONFIG_QED_RDMA) += \
qed_iwarp.o \
diff --git a/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_ip_services.c b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_ip_services.c
deleted file mode 100644
index 96a2077fd315..000000000000
--- a/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_ip_services.c
+++ /dev/null
@@ -1,238 +0,0 @@
-// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
-/*
- * Copyright 2021 Marvell. All rights reserved.
- */
-
-#include <linux/types.h>
-#include <asm/byteorder.h>
-#include <asm/param.h>
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/etherdevice.h>
-#include <linux/kernel.h>
-#include <linux/stddef.h>
-#include <linux/errno.h>
-
-#include <net/tcp.h>
-
-#include <linux/qed/qed_nvmetcp_ip_services_if.h>
-
-#define QED_IP_RESOL_TIMEOUT 4
-
-int qed_route_ipv4(struct sockaddr_storage *local_addr,
- struct sockaddr_storage *remote_addr,
- struct sockaddr *hardware_address,
- struct net_device **ndev)
-{
- struct neighbour *neigh = NULL;
- __be32 *loc_ip, *rem_ip;
- struct rtable *rt;
- int rc = -ENXIO;
- int retry;
-
- loc_ip = &((struct sockaddr_in *)local_addr)->sin_addr.s_addr;
- rem_ip = &((struct sockaddr_in *)remote_addr)->sin_addr.s_addr;
- *ndev = NULL;
- rt = ip_route_output(&init_net, *rem_ip, *loc_ip, 0/*tos*/, 0/*oif*/);
- if (IS_ERR(rt)) {
- pr_err("lookup route failed\n");
- rc = PTR_ERR(rt);
- goto return_err;
- }
-
- neigh = dst_neigh_lookup(&rt->dst, rem_ip);
- if (!neigh) {
- rc = -ENOMEM;
- ip_rt_put(rt);
- goto return_err;
- }
-
- *ndev = rt->dst.dev;
- ip_rt_put(rt);
-
- /* If not resolved, kick-off state machine towards resolution */
- if (!(neigh->nud_state & NUD_VALID))
- neigh_event_send(neigh, NULL);
-
- /* query neighbor until resolved or timeout */
- retry = QED_IP_RESOL_TIMEOUT;
- while (!(neigh->nud_state & NUD_VALID) && retry > 0) {
- msleep(1000);
- retry--;
- }
-
- if (neigh->nud_state & NUD_VALID) {
- /* copy resolved MAC address */
- neigh_ha_snapshot(hardware_address->sa_data, neigh, *ndev);
- hardware_address->sa_family = (*ndev)->type;
- rc = 0;
- }
-
- neigh_release(neigh);
- if (!(*loc_ip)) {
- *loc_ip = inet_select_addr(*ndev, *rem_ip, RT_SCOPE_UNIVERSE);
- local_addr->ss_family = AF_INET;
- }
-
-return_err:
-
- return rc;
-}
-EXPORT_SYMBOL(qed_route_ipv4);
-
-int qed_route_ipv6(struct sockaddr_storage *local_addr,
- struct sockaddr_storage *remote_addr,
- struct sockaddr *hardware_address,
- struct net_device **ndev)
-{
- struct neighbour *neigh = NULL;
- struct dst_entry *dst;
- struct flowi6 fl6;
- int rc = -ENXIO;
- int retry;
-
- memset(&fl6, 0, sizeof(fl6));
- fl6.saddr = ((struct sockaddr_in6 *)local_addr)->sin6_addr;
- fl6.daddr = ((struct sockaddr_in6 *)remote_addr)->sin6_addr;
- dst = ip6_route_output(&init_net, NULL, &fl6);
- if (!dst || dst->error) {
- if (dst) {
- dst_release(dst);
- pr_err("lookup route failed %d\n", dst->error);
- }
-
- goto out;
- }
-
- neigh = dst_neigh_lookup(dst, &fl6.daddr);
- if (neigh) {
- *ndev = ip6_dst_idev(dst)->dev;
-
- /* If not resolved, kick-off state machine towards resolution */
- if (!(neigh->nud_state & NUD_VALID))
- neigh_event_send(neigh, NULL);
-
- /* query neighbor until resolved or timeout */
- retry = QED_IP_RESOL_TIMEOUT;
- while (!(neigh->nud_state & NUD_VALID) && retry > 0) {
- msleep(1000);
- retry--;
- }
-
- if (neigh->nud_state & NUD_VALID) {
- neigh_ha_snapshot((u8 *)hardware_address->sa_data,
- neigh, *ndev);
- hardware_address->sa_family = (*ndev)->type;
- rc = 0;
- }
-
- neigh_release(neigh);
-
- if (ipv6_addr_any(&fl6.saddr)) {
- if (ipv6_dev_get_saddr(dev_net(*ndev), *ndev,
- &fl6.daddr, 0, &fl6.saddr)) {
- pr_err("Unable to find source IP address\n");
- goto out;
- }
-
- local_addr->ss_family = AF_INET6;
- ((struct sockaddr_in6 *)local_addr)->sin6_addr =
- fl6.saddr;
- }
- }
-
- dst_release(dst);
-
-out:
-
- return rc;
-}
-EXPORT_SYMBOL(qed_route_ipv6);
-
-void qed_vlan_get_ndev(struct net_device **ndev, u16 *vlan_id)
-{
- if (is_vlan_dev(*ndev)) {
- *vlan_id = vlan_dev_vlan_id(*ndev);
- *ndev = vlan_dev_real_dev(*ndev);
- }
-}
-EXPORT_SYMBOL(qed_vlan_get_ndev);
-
-struct pci_dev *qed_validate_ndev(struct net_device *ndev)
-{
- struct pci_dev *pdev = NULL;
- struct net_device *upper;
-
- for_each_pci_dev(pdev) {
- if (pdev && pdev->driver &&
- !strcmp(pdev->driver->name, "qede")) {
- upper = pci_get_drvdata(pdev);
- if (upper->ifindex == ndev->ifindex)
- return pdev;
- }
- }
-
- return NULL;
-}
-EXPORT_SYMBOL(qed_validate_ndev);
-
-__be16 qed_get_in_port(struct sockaddr_storage *sa)
-{
- return sa->ss_family == AF_INET
- ? ((struct sockaddr_in *)sa)->sin_port
- : ((struct sockaddr_in6 *)sa)->sin6_port;
-}
-EXPORT_SYMBOL(qed_get_in_port);
-
-int qed_fetch_tcp_port(struct sockaddr_storage local_ip_addr,
- struct socket **sock, u16 *port)
-{
- struct sockaddr_storage sa;
- int rc = 0;
-
- rc = sock_create(local_ip_addr.ss_family, SOCK_STREAM, IPPROTO_TCP,
- sock);
- if (rc) {
- pr_warn("failed to create socket: %d\n", rc);
- goto err;
- }
-
- (*sock)->sk->sk_allocation = GFP_KERNEL;
- sk_set_memalloc((*sock)->sk);
-
- rc = kernel_bind(*sock, (struct sockaddr *)&local_ip_addr,
- sizeof(local_ip_addr));
-
- if (rc) {
- pr_warn("failed to bind socket: %d\n", rc);
- goto err_sock;
- }
-
- rc = kernel_getsockname(*sock, (struct sockaddr *)&sa);
- if (rc < 0) {
- pr_warn("getsockname() failed: %d\n", rc);
- goto err_sock;
- }
-
- *port = ntohs(qed_get_in_port(&sa));
-
- return 0;
-
-err_sock:
- sock_release(*sock);
- sock = NULL;
-err:
-
- return rc;
-}
-EXPORT_SYMBOL(qed_fetch_tcp_port);
-
-void qed_return_tcp_port(struct socket *sock)
-{
- if (sock && sock->sk) {
- tcp_set_state(sock->sk, TCP_CLOSE);
- sock_release(sock);
- }
-}
-EXPORT_SYMBOL(qed_return_tcp_port);
diff --git a/drivers/net/ethernet/realtek/atp.h b/drivers/net/ethernet/realtek/atp.h
index 63f0d2d0e87b..b202184eddd4 100644
--- a/drivers/net/ethernet/realtek/atp.h
+++ b/drivers/net/ethernet/realtek/atp.h
@@ -255,10 +255,6 @@ static inline void write_word_mode0(short ioaddr, unsigned short value)
#define EE_DATA_WRITE 0x01 /* EEPROM chip data in. */
#define EE_DATA_READ 0x08 /* EEPROM chip data out. */
-/* Delay between EEPROM clock transitions. */
-#define eeprom_delay(ticks) \
-do { int _i = 40; while (--_i > 0) { __SLOW_DOWN_IO; } } while (0)
-
/* The EEPROM commands include the alway-set leading bit. */
#define EE_WRITE_CMD(offset) (((5 << 6) + (offset)) << 17)
#define EE_READ(offset) (((6 << 6) + (offset)) << 17)
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 50d535981a35..c9ee5011803f 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -2256,7 +2256,7 @@ int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
* guaranteed to satisfy the second as we only attempt TSO if
* inner_network_header <= 208.
*/
- ip_tot_len = -EFX_TSO2_MAX_HDRLEN;
+ ip_tot_len = 0x10000 - EFX_TSO2_MAX_HDRLEN;
EFX_WARN_ON_ONCE_PARANOID(mss + EFX_TSO2_MAX_HDRLEN +
(tcp->doff << 2u) > ip_tot_len);
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index daf0c00c1242..c05a83da9e44 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -28,7 +28,6 @@ static inline netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct
ef100_enqueue_skb, __efx_enqueue_skb,
tx_queue, skb);
}
-void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
void efx_xmit_done_single(struct efx_tx_queue *tx_queue);
int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
void *type_data);
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
index 377df8b7f015..eec80b024195 100644
--- a/drivers/net/ethernet/sfc/efx_channels.c
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@ -51,28 +51,7 @@ MODULE_PARM_DESC(irq_adapt_high_thresh,
*/
static int napi_weight = 64;
-/***************
- * Housekeeping
- ***************/
-
-int efx_channel_dummy_op_int(struct efx_channel *channel)
-{
- return 0;
-}
-
-void efx_channel_dummy_op_void(struct efx_channel *channel)
-{
-}
-
-static const struct efx_channel_type efx_default_channel_type = {
- .pre_probe = efx_channel_dummy_op_int,
- .post_remove = efx_channel_dummy_op_void,
- .get_name = efx_get_channel_name,
- .copy = efx_copy_channel,
- .want_txqs = efx_default_channel_want_txqs,
- .keep_eventq = false,
- .want_pio = true,
-};
+static const struct efx_channel_type efx_default_channel_type;
/*************
* INTERRUPTS
@@ -619,6 +598,7 @@ void efx_fini_channels(struct efx_nic *efx)
/* Allocate and initialise a channel structure, copying parameters
* (but not resources) from an old channel structure.
*/
+static
struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel)
{
struct efx_rx_queue *rx_queue;
@@ -696,7 +676,8 @@ fail:
return rc;
}
-void efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
+static void efx_get_channel_name(struct efx_channel *channel, char *buf,
+ size_t len)
{
struct efx_nic *efx = channel->efx;
const char *type;
@@ -1004,7 +985,7 @@ int efx_set_channels(struct efx_nic *efx)
return netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
}
-bool efx_default_channel_want_txqs(struct efx_channel *channel)
+static bool efx_default_channel_want_txqs(struct efx_channel *channel)
{
return channel->channel - channel->efx->tx_channel_offset <
channel->efx->n_tx_channels;
@@ -1362,3 +1343,26 @@ void efx_fini_napi(struct efx_nic *efx)
efx_for_each_channel(channel, efx)
efx_fini_napi_channel(channel);
}
+
+/***************
+ * Housekeeping
+ ***************/
+
+static int efx_channel_dummy_op_int(struct efx_channel *channel)
+{
+ return 0;
+}
+
+void efx_channel_dummy_op_void(struct efx_channel *channel)
+{
+}
+
+static const struct efx_channel_type efx_default_channel_type = {
+ .pre_probe = efx_channel_dummy_op_int,
+ .post_remove = efx_channel_dummy_op_void,
+ .get_name = efx_get_channel_name,
+ .copy = efx_copy_channel,
+ .want_txqs = efx_default_channel_want_txqs,
+ .keep_eventq = false,
+ .want_pio = true,
+};
diff --git a/drivers/net/ethernet/sfc/efx_channels.h b/drivers/net/ethernet/sfc/efx_channels.h
index d77ec1f77fb1..64abb99a56b8 100644
--- a/drivers/net/ethernet/sfc/efx_channels.h
+++ b/drivers/net/ethernet/sfc/efx_channels.h
@@ -32,16 +32,13 @@ void efx_fini_eventq(struct efx_channel *channel);
void efx_remove_eventq(struct efx_channel *channel);
int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
-void efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len);
void efx_set_channel_names(struct efx_nic *efx);
int efx_init_channels(struct efx_nic *efx);
int efx_probe_channels(struct efx_nic *efx);
int efx_set_channels(struct efx_nic *efx);
-bool efx_default_channel_want_txqs(struct efx_channel *channel);
void efx_remove_channel(struct efx_channel *channel);
void efx_remove_channels(struct efx_nic *efx);
void efx_fini_channels(struct efx_nic *efx);
-struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel);
void efx_start_channels(struct efx_nic *efx);
void efx_stop_channels(struct efx_nic *efx);
@@ -50,7 +47,6 @@ void efx_init_napi(struct efx_nic *efx);
void efx_fini_napi_channel(struct efx_channel *channel);
void efx_fini_napi(struct efx_nic *efx);
-int efx_channel_dummy_op_int(struct efx_channel *channel);
void efx_channel_dummy_op_void(struct efx_channel *channel);
#endif
diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c
index af37c990217e..f6577e74d6e6 100644
--- a/drivers/net/ethernet/sfc/efx_common.c
+++ b/drivers/net/ethernet/sfc/efx_common.c
@@ -51,8 +51,8 @@ static unsigned int efx_monitor_interval = 1 * HZ;
/* Default stats update time */
#define STATS_PERIOD_MS_DEFAULT 1000
-const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
-const char *const efx_reset_type_names[] = {
+static const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
+static const char *const efx_reset_type_names[] = {
[RESET_TYPE_INVISIBLE] = "INVISIBLE",
[RESET_TYPE_ALL] = "ALL",
[RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL",
diff --git a/drivers/net/ethernet/sfc/falcon/rx.c b/drivers/net/ethernet/sfc/falcon/rx.c
index 0c6cc2191369..6bbdb5d2eebf 100644
--- a/drivers/net/ethernet/sfc/falcon/rx.c
+++ b/drivers/net/ethernet/sfc/falcon/rx.c
@@ -718,12 +718,14 @@ static void ef4_init_rx_recycle_ring(struct ef4_nic *efx,
struct ef4_rx_queue *rx_queue)
{
unsigned int bufs_in_recycle_ring, page_ring_size;
+ struct iommu_domain __maybe_unused *domain;
/* Set the RX recycle ring size */
#ifdef CONFIG_PPC64
bufs_in_recycle_ring = EF4_RECYCLE_RING_SIZE_IOMMU;
#else
- if (iommu_present(&pci_bus_type))
+ domain = iommu_get_domain_for_dev(&efx->pci_dev->dev);
+ if (domain && domain->type != IOMMU_DOMAIN_IDENTITY)
bufs_in_recycle_ring = EF4_RECYCLE_RING_SIZE_IOMMU;
else
bufs_in_recycle_ring = EF4_RECYCLE_RING_SIZE_NOIOMMU;
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index 148dcd48b58d..9599123bc28d 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -16,6 +16,7 @@
#include "bitfield.h"
#include "efx.h"
#include "rx_common.h"
+#include "tx_common.h"
#include "nic.h"
#include "farch_regs.h"
#include "sriov.h"
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index d3fcbf930dba..ff617b1b38d3 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -73,8 +73,8 @@
* \------------------------------ Resync (always set)
*
* The client writes it's request into MC shared memory, and rings the
- * doorbell. Each request is completed by either by the MC writting
- * back into shared memory, or by writting out an event.
+ * doorbell. Each request is completed by either by the MC writing
+ * back into shared memory, or by writing out an event.
*
* All MCDI commands support completion by shared memory response. Each
* request may also contain additional data (accounted for by HEADER.LEN),
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index c75dc75e2857..318db906a154 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -612,11 +612,6 @@ extern const unsigned int efx_loopback_mode_max;
#define LOOPBACK_MODE(efx) \
STRING_TABLE_LOOKUP((efx)->loopback_mode, efx_loopback_mode)
-extern const char *const efx_reset_type_names[];
-extern const unsigned int efx_reset_type_max;
-#define RESET_TYPE(type) \
- STRING_TABLE_LOOKUP(type, efx_reset_type)
-
enum efx_int_mode {
/* Be careful if altering to correct macro below */
EFX_INT_MODE_MSIX = 0,
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 6983799e1c05..138bca611341 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -527,7 +527,8 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
/* PTP "event" packet */
if (unlikely(efx_xmit_with_hwtstamp(skb)) &&
- unlikely(efx_ptp_is_ptp_tx(efx, skb))) {
+ ((efx_ptp_use_mac_tx_timestamps(efx) && efx->ptp_data) ||
+ unlikely(efx_ptp_is_ptp_tx(efx, skb)))) {
/* There may be existing transmits on the channel that are
* waiting for this packet to trigger the doorbell write.
* We need to send the packets at this point.
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
index 84651207a1de..bd52fb7cf486 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
@@ -197,9 +197,9 @@ imx_dwmac_parse_dt(struct imx_priv_data *dwmac, struct device *dev)
}
if (of_machine_is_compatible("fsl,imx8mp")) {
- /* Binding doc describes the propety:
+ /* Binding doc describes the property:
is required by i.MX8MP.
- is optinoal for i.MX8DXL.
+ is optional for i.MX8DXL.
*/
dwmac->intf_regmap = syscon_regmap_lookup_by_phandle(np, "intf_mode");
if (IS_ERR(dwmac->intf_regmap))
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 4a4b3651ab3e..7c834c02e084 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -3643,11 +3643,9 @@ static int stmmac_open(struct net_device *dev)
u32 chan;
int ret;
- ret = pm_runtime_get_sync(priv->device);
- if (ret < 0) {
- pm_runtime_put_noidle(priv->device);
+ ret = pm_runtime_resume_and_get(priv->device);
+ if (ret < 0)
return ret;
- }
if (priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI &&
@@ -5886,11 +5884,9 @@ static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
struct stmmac_priv *priv = netdev_priv(ndev);
int ret = 0;
- ret = pm_runtime_get_sync(priv->device);
- if (ret < 0) {
- pm_runtime_put_noidle(priv->device);
+ ret = pm_runtime_resume_and_get(priv->device);
+ if (ret < 0)
return ret;
- }
ret = eth_mac_addr(ndev, addr);
if (ret)
@@ -6220,11 +6216,9 @@ static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vi
bool is_double = false;
int ret;
- ret = pm_runtime_get_sync(priv->device);
- if (ret < 0) {
- pm_runtime_put_noidle(priv->device);
+ ret = pm_runtime_resume_and_get(priv->device);
+ if (ret < 0)
return ret;
- }
if (be16_to_cpu(proto) == ETH_P_8021AD)
is_double = true;
@@ -6565,7 +6559,7 @@ int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
return -ENETDOWN;
if (!stmmac_xdp_is_enabled(priv))
- return -ENXIO;
+ return -EINVAL;
if (queue >= priv->plat->rx_queues_to_use ||
queue >= priv->plat->tx_queues_to_use)
@@ -6576,7 +6570,7 @@ int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
ch = &priv->channel[queue];
if (!rx_q->xsk_pool && !tx_q->xsk_pool)
- return -ENXIO;
+ return -EINVAL;
if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
/* EQoS does not have per-DMA channel SW interrupt,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index a5d150c5f3d8..9bc625fccca0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -88,11 +88,9 @@ static int stmmac_xgmac2_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
u32 tmp, addr, value = MII_XGMAC_BUSY;
int ret;
- ret = pm_runtime_get_sync(priv->device);
- if (ret < 0) {
- pm_runtime_put_noidle(priv->device);
+ ret = pm_runtime_resume_and_get(priv->device);
+ if (ret < 0)
return ret;
- }
/* Wait until any existing MII operation is complete */
if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
@@ -156,11 +154,9 @@ static int stmmac_xgmac2_mdio_write(struct mii_bus *bus, int phyaddr,
u32 addr, tmp, value = MII_XGMAC_BUSY;
int ret;
- ret = pm_runtime_get_sync(priv->device);
- if (ret < 0) {
- pm_runtime_put_noidle(priv->device);
+ ret = pm_runtime_resume_and_get(priv->device);
+ if (ret < 0)
return ret;
- }
/* Wait until any existing MII operation is complete */
if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
@@ -229,11 +225,9 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
int data = 0;
u32 v;
- data = pm_runtime_get_sync(priv->device);
- if (data < 0) {
- pm_runtime_put_noidle(priv->device);
+ data = pm_runtime_resume_and_get(priv->device);
+ if (data < 0)
return data;
- }
value |= (phyaddr << priv->hw->mii.addr_shift)
& priv->hw->mii.addr_mask;
@@ -297,11 +291,9 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
u32 value = MII_BUSY;
u32 v;
- ret = pm_runtime_get_sync(priv->device);
- if (ret < 0) {
- pm_runtime_put_noidle(priv->device);
+ ret = pm_runtime_resume_and_get(priv->device);
+ if (ret < 0)
return ret;
- }
value |= (phyaddr << priv->hw->mii.addr_shift)
& priv->hw->mii.addr_mask;
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index affcf92cd3aa..fb30bc5d56cb 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -94,6 +94,7 @@ config TI_K3_AM65_CPSW_NUSS
depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER
select NET_DEVLINK
select TI_DAVINCI_MDIO
+ select PHYLINK
imply PHY_TI_GMII_SEL
depends on TI_K3_AM65_CPTS || !TI_K3_AM65_CPTS
help
diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
index 72acdf802258..abc1e4276cf0 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
@@ -380,11 +380,9 @@ static int am65_cpsw_ethtool_op_begin(struct net_device *ndev)
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
int ret;
- ret = pm_runtime_get_sync(common->dev);
- if (ret < 0) {
+ ret = pm_runtime_resume_and_get(common->dev);
+ if (ret < 0)
dev_err(common->dev, "ethtool begin failed %d\n", ret);
- pm_runtime_put_noidle(common->dev);
- }
return ret;
}
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index d2747e9db286..b7ebd741f284 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -173,11 +173,9 @@ static int am65_cpsw_nuss_ndo_slave_add_vid(struct net_device *ndev,
if (!netif_running(ndev) || !vid)
return 0;
- ret = pm_runtime_get_sync(common->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(common->dev);
+ ret = pm_runtime_resume_and_get(common->dev);
+ if (ret < 0)
return ret;
- }
port_mask = BIT(port->port_id) | ALE_PORT_HOST;
if (!vid)
@@ -203,11 +201,9 @@ static int am65_cpsw_nuss_ndo_slave_kill_vid(struct net_device *ndev,
if (!netif_running(ndev) || !vid)
return 0;
- ret = pm_runtime_get_sync(common->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(common->dev);
+ ret = pm_runtime_resume_and_get(common->dev);
+ if (ret < 0)
return ret;
- }
dev_info(common->dev, "Removing vlan %d from vlan filter\n", vid);
ret = cpsw_ale_del_vlan(common->ale, vid,
@@ -557,11 +553,9 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
int ret, i;
- ret = pm_runtime_get_sync(common->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(common->dev);
+ ret = pm_runtime_resume_and_get(common->dev);
+ if (ret < 0)
return ret;
- }
/* Notify the stack of the actual queue counts. */
ret = netif_set_real_num_tx_queues(ndev, common->tx_ch_num);
@@ -1214,11 +1208,9 @@ static int am65_cpsw_nuss_ndo_slave_set_mac_address(struct net_device *ndev,
if (ret < 0)
return ret;
- ret = pm_runtime_get_sync(common->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(common->dev);
+ ret = pm_runtime_resume_and_get(common->dev);
+ if (ret < 0)
return ret;
- }
cpsw_ale_del_ucast(common->ale, ndev->dev_addr,
HOST_PORT_NUM, 0, 0);
@@ -2692,9 +2684,8 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
common->bus_freq = clk_get_rate(clk);
pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
- pm_runtime_put_noidle(dev);
pm_runtime_disable(dev);
return ret;
}
@@ -2789,11 +2780,9 @@ static int am65_cpsw_nuss_remove(struct platform_device *pdev)
common = dev_get_drvdata(dev);
- ret = pm_runtime_get_sync(&pdev->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret < 0)
return ret;
- }
am65_cpsw_nuss_phylink_cleanup(common);
am65_cpsw_unregister_devlink(common);
diff --git a/drivers/net/ethernet/ti/am65-cpsw-qos.c b/drivers/net/ethernet/ti/am65-cpsw-qos.c
index ebcc6386cc34..aa32dd905e2b 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-qos.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-qos.c
@@ -8,10 +8,12 @@
#include <linux/pm_runtime.h>
#include <linux/time.h>
+#include <net/pkt_cls.h>
#include "am65-cpsw-nuss.h"
#include "am65-cpsw-qos.h"
#include "am65-cpts.h"
+#include "cpsw_ale.h"
#define AM65_CPSW_REG_CTL 0x004
#define AM65_CPSW_PN_REG_CTL 0x004
@@ -588,12 +590,190 @@ static int am65_cpsw_setup_taprio(struct net_device *ndev, void *type_data)
return am65_cpsw_set_taprio(ndev, type_data);
}
+static int am65_cpsw_qos_clsflower_add_policer(struct am65_cpsw_port *port,
+ struct netlink_ext_ack *extack,
+ struct flow_cls_offload *cls,
+ u64 rate_pkt_ps)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct flow_dissector *dissector = rule->match.dissector;
+ static const u8 mc_mac[] = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00};
+ struct am65_cpsw_qos *qos = &port->qos;
+ struct flow_match_eth_addrs match;
+ int ret;
+
+ if (dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported keys used");
+ return -EOPNOTSUPP;
+ }
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ NL_SET_ERR_MSG_MOD(extack, "Not matching on eth address");
+ return -EOPNOTSUPP;
+ }
+
+ flow_rule_match_eth_addrs(rule, &match);
+
+ if (!is_zero_ether_addr(match.mask->src)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on source MAC not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (is_broadcast_ether_addr(match.key->dst) &&
+ is_broadcast_ether_addr(match.mask->dst)) {
+ ret = cpsw_ale_rx_ratelimit_bc(port->common->ale, port->port_id, rate_pkt_ps);
+ if (ret)
+ return ret;
+
+ qos->ale_bc_ratelimit.cookie = cls->cookie;
+ qos->ale_bc_ratelimit.rate_packet_ps = rate_pkt_ps;
+ } else if (ether_addr_equal_unaligned(match.key->dst, mc_mac) &&
+ ether_addr_equal_unaligned(match.mask->dst, mc_mac)) {
+ ret = cpsw_ale_rx_ratelimit_mc(port->common->ale, port->port_id, rate_pkt_ps);
+ if (ret)
+ return ret;
+
+ qos->ale_mc_ratelimit.cookie = cls->cookie;
+ qos->ale_mc_ratelimit.rate_packet_ps = rate_pkt_ps;
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "Not supported matching key");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int am65_cpsw_qos_clsflower_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(action, act)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is ok, but action is not last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.rate_bytes_ps || act->police.peakrate_bytes_ps ||
+ act->police.avrate || act->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when bytes per second/peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int am65_cpsw_qos_configure_clsflower(struct am65_cpsw_port *port,
+ struct flow_cls_offload *cls)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct netlink_ext_ack *extack = cls->common.extack;
+ const struct flow_action_entry *act;
+ int i, ret;
+
+ flow_action_for_each(i, act, &rule->action) {
+ switch (act->id) {
+ case FLOW_ACTION_POLICE:
+ ret = am65_cpsw_qos_clsflower_policer_validate(&rule->action, act, extack);
+ if (ret)
+ return ret;
+
+ return am65_cpsw_qos_clsflower_add_policer(port, extack, cls,
+ act->police.rate_pkt_ps);
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Action not supported");
+ return -EOPNOTSUPP;
+ }
+ }
+ return -EOPNOTSUPP;
+}
+
+static int am65_cpsw_qos_delete_clsflower(struct am65_cpsw_port *port, struct flow_cls_offload *cls)
+{
+ struct am65_cpsw_qos *qos = &port->qos;
+
+ if (cls->cookie == qos->ale_bc_ratelimit.cookie) {
+ qos->ale_bc_ratelimit.cookie = 0;
+ qos->ale_bc_ratelimit.rate_packet_ps = 0;
+ cpsw_ale_rx_ratelimit_bc(port->common->ale, port->port_id, 0);
+ }
+
+ if (cls->cookie == qos->ale_mc_ratelimit.cookie) {
+ qos->ale_mc_ratelimit.cookie = 0;
+ qos->ale_mc_ratelimit.rate_packet_ps = 0;
+ cpsw_ale_rx_ratelimit_mc(port->common->ale, port->port_id, 0);
+ }
+
+ return 0;
+}
+
+static int am65_cpsw_qos_setup_tc_clsflower(struct am65_cpsw_port *port,
+ struct flow_cls_offload *cls_flower)
+{
+ switch (cls_flower->command) {
+ case FLOW_CLS_REPLACE:
+ return am65_cpsw_qos_configure_clsflower(port, cls_flower);
+ case FLOW_CLS_DESTROY:
+ return am65_cpsw_qos_delete_clsflower(port, cls_flower);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int am65_cpsw_qos_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
+{
+ struct am65_cpsw_port *port = cb_priv;
+
+ if (!tc_cls_can_offload_and_chain0(port->ndev, type_data))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return am65_cpsw_qos_setup_tc_clsflower(port, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static LIST_HEAD(am65_cpsw_qos_block_cb_list);
+
+static int am65_cpsw_qos_setup_tc_block(struct net_device *ndev, struct flow_block_offload *f)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+
+ return flow_block_cb_setup_simple(f, &am65_cpsw_qos_block_cb_list,
+ am65_cpsw_qos_setup_tc_block_cb,
+ port, port, true);
+}
+
int am65_cpsw_qos_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
void *type_data)
{
switch (type) {
case TC_SETUP_QDISC_TAPRIO:
return am65_cpsw_setup_taprio(ndev, type_data);
+ case TC_SETUP_BLOCK:
+ return am65_cpsw_qos_setup_tc_block(ndev, type_data);
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/ti/am65-cpsw-qos.h b/drivers/net/ethernet/ti/am65-cpsw-qos.h
index e8f1b6b59e93..fb223b43b196 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-qos.h
+++ b/drivers/net/ethernet/ti/am65-cpsw-qos.h
@@ -14,11 +14,19 @@ struct am65_cpsw_est {
struct tc_taprio_qopt_offload taprio;
};
+struct am65_cpsw_ale_ratelimit {
+ unsigned long cookie;
+ u64 rate_packet_ps;
+};
+
struct am65_cpsw_qos {
struct am65_cpsw_est *est_admin;
struct am65_cpsw_est *est_oper;
ktime_t link_down_time;
int link_speed;
+
+ struct am65_cpsw_ale_ratelimit ale_bc_ratelimit;
+ struct am65_cpsw_ale_ratelimit ale_mc_ratelimit;
};
int am65_cpsw_qos_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 03575c017500..e6ad2e53f1cd 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -335,7 +335,7 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
static unsigned int cpsw_rxbuf_total_len(unsigned int len)
{
- len += CPSW_HEADROOM;
+ len += CPSW_HEADROOM_NA;
len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
return SKB_DATA_ALIGN(len);
@@ -756,11 +756,9 @@ static int cpsw_ndo_open(struct net_device *ndev)
int ret;
u32 reg;
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(cpsw->dev);
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
return ret;
- }
netif_carrier_off(ndev);
@@ -968,11 +966,9 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(cpsw->dev);
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
return ret;
- }
if (cpsw->data.dual_emac) {
vid = cpsw->slaves[priv->emac_port].port_vlan;
@@ -1052,11 +1048,9 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
if (vid == cpsw->data.default_vlan)
return 0;
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(cpsw->dev);
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
return ret;
- }
if (cpsw->data.dual_emac) {
/* In dual EMAC, reserved VLAN id should not be used for
@@ -1090,11 +1084,9 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
if (vid == cpsw->data.default_vlan)
return 0;
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(cpsw->dev);
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
return ret;
- }
if (cpsw->data.dual_emac) {
int i;
@@ -1567,11 +1559,9 @@ static int cpsw_probe(struct platform_device *pdev)
/* Need to enable clocks with runtime PM api to access module
* registers
*/
- ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
- pm_runtime_put_noidle(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
goto clean_runtime_disable_ret;
- }
ret = cpsw_probe_dt(&cpsw->data, pdev);
if (ret)
@@ -1734,11 +1724,9 @@ static int cpsw_remove(struct platform_device *pdev)
struct cpsw_common *cpsw = platform_get_drvdata(pdev);
int i, ret;
- ret = pm_runtime_get_sync(&pdev->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret < 0)
return ret;
- }
for (i = 0; i < cpsw->data.slaves; i++)
if (cpsw->slaves[i].ndev)
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 1ef0aaef5c61..231370e9a801 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -50,6 +50,8 @@
/* ALE_AGING_TIMER */
#define ALE_AGING_TIMER_MASK GENMASK(23, 0)
+#define ALE_RATE_LIMIT_MIN_PPS 1000
+
/**
* struct ale_entry_fld - The ALE tbl entry field description
* @start_bit: field start bit
@@ -1136,6 +1138,50 @@ int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control)
return tmp & BITMASK(info->bits);
}
+int cpsw_ale_rx_ratelimit_mc(struct cpsw_ale *ale, int port, unsigned int ratelimit_pps)
+
+{
+ int val = ratelimit_pps / ALE_RATE_LIMIT_MIN_PPS;
+ u32 remainder = ratelimit_pps % ALE_RATE_LIMIT_MIN_PPS;
+
+ if (ratelimit_pps && !val) {
+ dev_err(ale->params.dev, "ALE MC port:%d ratelimit min value 1000pps\n", port);
+ return -EINVAL;
+ }
+
+ if (remainder)
+ dev_info(ale->params.dev, "ALE port:%d MC ratelimit set to %dpps (requested %d)\n",
+ port, ratelimit_pps - remainder, ratelimit_pps);
+
+ cpsw_ale_control_set(ale, port, ALE_PORT_MCAST_LIMIT, val);
+
+ dev_dbg(ale->params.dev, "ALE port:%d MC ratelimit set %d\n",
+ port, val * ALE_RATE_LIMIT_MIN_PPS);
+ return 0;
+}
+
+int cpsw_ale_rx_ratelimit_bc(struct cpsw_ale *ale, int port, unsigned int ratelimit_pps)
+
+{
+ int val = ratelimit_pps / ALE_RATE_LIMIT_MIN_PPS;
+ u32 remainder = ratelimit_pps % ALE_RATE_LIMIT_MIN_PPS;
+
+ if (ratelimit_pps && !val) {
+ dev_err(ale->params.dev, "ALE port:%d BC ratelimit min value 1000pps\n", port);
+ return -EINVAL;
+ }
+
+ if (remainder)
+ dev_info(ale->params.dev, "ALE port:%d BC ratelimit set to %dpps (requested %d)\n",
+ port, ratelimit_pps - remainder, ratelimit_pps);
+
+ cpsw_ale_control_set(ale, port, ALE_PORT_BCAST_LIMIT, val);
+
+ dev_dbg(ale->params.dev, "ALE port:%d BC ratelimit set %d\n",
+ port, val * ALE_RATE_LIMIT_MIN_PPS);
+ return 0;
+}
+
static void cpsw_ale_timer(struct timer_list *t)
{
struct cpsw_ale *ale = from_timer(ale, t, timer);
@@ -1199,6 +1245,26 @@ static void cpsw_ale_aging_stop(struct cpsw_ale *ale)
void cpsw_ale_start(struct cpsw_ale *ale)
{
+ unsigned long ale_prescale;
+
+ /* configure Broadcast and Multicast Rate Limit
+ * number_of_packets = (Fclk / ALE_PRESCALE) * port.BCAST/MCAST_LIMIT
+ * ALE_PRESCALE width is 19bit and min value 0x10
+ * port.BCAST/MCAST_LIMIT is 8bit
+ *
+ * For multi port configuration support the ALE_PRESCALE is configured to 1ms interval,
+ * which allows to configure port.BCAST/MCAST_LIMIT per port and achieve:
+ * min number_of_packets = 1000 when port.BCAST/MCAST_LIMIT = 1
+ * max number_of_packets = 1000 * 255 = 255000 when port.BCAST/MCAST_LIMIT = 0xFF
+ */
+ ale_prescale = ale->params.bus_freq / ALE_RATE_LIMIT_MIN_PPS;
+ writel((u32)ale_prescale, ale->params.ale_regs + ALE_PRESCALE);
+
+ /* Allow MC/BC rate limiting globally.
+ * The actual Rate Limit cfg enabled per-port by port.BCAST/MCAST_LIMIT
+ */
+ cpsw_ale_control_set(ale, 0, ALE_RATE_LIMIT, 1);
+
cpsw_ale_control_set(ale, 0, ALE_ENABLE, 1);
cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1);
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index 13fe47687fde..aba4572cfa3b 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -120,6 +120,8 @@ int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
int reg_mcast, int unreg_mcast);
int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port);
void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti, int port);
+int cpsw_ale_rx_ratelimit_bc(struct cpsw_ale *ale, int port, unsigned int ratelimit_pps);
+int cpsw_ale_rx_ratelimit_mc(struct cpsw_ale *ale, int port, unsigned int ratelimit_pps);
int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control);
int cpsw_ale_control_set(struct cpsw_ale *ale, int port,
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index bd4b1528cf99..0f31cb4168bb 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -273,7 +273,7 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
static unsigned int cpsw_rxbuf_total_len(unsigned int len)
{
- len += CPSW_HEADROOM;
+ len += CPSW_HEADROOM_NA;
len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
return SKB_DATA_ALIGN(len);
@@ -449,11 +449,9 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
if (vid == cpsw->data.default_vlan)
return 0;
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(cpsw->dev);
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
return ret;
- }
/* In dual EMAC, reserved VLAN id should not be used for
* creating VLAN interfaces as this can break the dual
@@ -498,6 +496,8 @@ static void cpsw_restore(struct cpsw_priv *priv)
/* restore CBS offload */
cpsw_cbs_resume(&cpsw->slaves[priv->emac_port - 1], priv);
+
+ cpsw_qos_clsflower_resume(priv);
}
static void cpsw_init_stp_ale_entry(struct cpsw_common *cpsw)
@@ -829,11 +829,9 @@ static int cpsw_ndo_open(struct net_device *ndev)
dev_info(priv->dev, "starting ndev. mode: %s\n",
cpsw_is_switch_en(cpsw) ? "switch" : "dual_mac");
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(cpsw->dev);
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
return ret;
- }
/* Notify the stack of the actual queue counts. */
ret = netif_set_real_num_tx_queues(ndev, cpsw->tx_ch_num);
@@ -985,11 +983,9 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(cpsw->dev);
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
return ret;
- }
vid = cpsw->slaves[slave_no].port_vlan;
flags = ALE_VLAN | ALE_SECURE;
@@ -1024,11 +1020,9 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
if (vid == cpsw->data.default_vlan)
return 0;
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(cpsw->dev);
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
return ret;
- }
/* reset the return code as pm_runtime_get_sync() can return
* non zero values as well.
@@ -1407,7 +1401,7 @@ static int cpsw_create_ports(struct cpsw_common *cpsw)
cpsw->slaves[i].ndev = ndev;
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
- NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_NETNS_LOCAL;
+ NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_NETNS_LOCAL | NETIF_F_HW_TC;
ndev->netdev_ops = &cpsw_netdev_ops;
ndev->ethtool_ops = &cpsw_ethtool_ops;
@@ -1918,9 +1912,8 @@ static int cpsw_probe(struct platform_device *pdev)
/* Need to enable clocks with runtime PM api to access module
* registers
*/
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
- pm_runtime_put_noidle(dev);
pm_runtime_disable(dev);
return ret;
}
@@ -2045,11 +2038,9 @@ static int cpsw_remove(struct platform_device *pdev)
struct cpsw_common *cpsw = platform_get_drvdata(pdev);
int ret;
- ret = pm_runtime_get_sync(&pdev->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret < 0)
return ret;
- }
cpsw_unregister_notifiers(cpsw);
cpsw_unregister_devlink(cpsw);
diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c
index 8f6817f346ba..887285c57db8 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.c
+++ b/drivers/net/ethernet/ti/cpsw_priv.c
@@ -502,6 +502,7 @@ int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs,
ale_params.ale_ageout = ale_ageout;
ale_params.ale_ports = CPSW_ALE_PORTS_NUM;
ale_params.dev_id = "cpsw";
+ ale_params.bus_freq = cpsw->bus_freq_mhz * 1000000;
cpsw->ale = cpsw_ale_create(&ale_params);
if (IS_ERR(cpsw->ale)) {
@@ -754,11 +755,9 @@ int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate)
return -EINVAL;
}
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(cpsw->dev);
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
return ret;
- }
ret = cpdma_chan_set_rate(cpsw->txv[queue].ch, ch_rate);
pm_runtime_put(cpsw->dev);
@@ -970,11 +969,9 @@ static int cpsw_set_cbs(struct net_device *ndev,
return -1;
}
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(cpsw->dev);
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
return ret;
- }
bw = qopt->enable ? qopt->idleslope : 0;
ret = cpsw_set_fifo_rlimit(priv, fifo, bw);
@@ -1008,11 +1005,9 @@ static int cpsw_set_mqprio(struct net_device *ndev, void *type_data)
if (mqprio->mode != TC_MQPRIO_MODE_DCB)
return -EINVAL;
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(cpsw->dev);
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
return ret;
- }
if (num_tc) {
for (i = 0; i < 8; i++) {
@@ -1048,6 +1043,8 @@ static int cpsw_set_mqprio(struct net_device *ndev, void *type_data)
return 0;
}
+static int cpsw_qos_setup_tc_block(struct net_device *ndev, struct flow_block_offload *f);
+
int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
void *type_data)
{
@@ -1058,6 +1055,9 @@ int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
case TC_SETUP_QDISC_MQPRIO:
return cpsw_set_mqprio(ndev, type_data);
+ case TC_SETUP_BLOCK:
+ return cpsw_qos_setup_tc_block(ndev, type_data);
+
default:
return -EOPNOTSUPP;
}
@@ -1381,3 +1381,202 @@ drop:
page_pool_recycle_direct(cpsw->page_pool[ch], page);
return ret;
}
+
+static int cpsw_qos_clsflower_add_policer(struct cpsw_priv *priv,
+ struct netlink_ext_ack *extack,
+ struct flow_cls_offload *cls,
+ u64 rate_pkt_ps)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct flow_dissector *dissector = rule->match.dissector;
+ static const u8 mc_mac[] = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00};
+ struct flow_match_eth_addrs match;
+ u32 port_id;
+ int ret;
+
+ if (dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported keys used");
+ return -EOPNOTSUPP;
+ }
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ NL_SET_ERR_MSG_MOD(extack, "Not matching on eth address");
+ return -EOPNOTSUPP;
+ }
+
+ flow_rule_match_eth_addrs(rule, &match);
+
+ if (!is_zero_ether_addr(match.mask->src)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on source MAC not supported");
+ return -EOPNOTSUPP;
+ }
+
+ port_id = cpsw_slave_index(priv->cpsw, priv) + 1;
+
+ if (is_broadcast_ether_addr(match.key->dst) &&
+ is_broadcast_ether_addr(match.mask->dst)) {
+ ret = cpsw_ale_rx_ratelimit_bc(priv->cpsw->ale, port_id, rate_pkt_ps);
+ if (ret)
+ return ret;
+
+ priv->ale_bc_ratelimit.cookie = cls->cookie;
+ priv->ale_bc_ratelimit.rate_packet_ps = rate_pkt_ps;
+ } else if (ether_addr_equal_unaligned(match.key->dst, mc_mac) &&
+ ether_addr_equal_unaligned(match.mask->dst, mc_mac)) {
+ ret = cpsw_ale_rx_ratelimit_mc(priv->cpsw->ale, port_id, rate_pkt_ps);
+ if (ret)
+ return ret;
+
+ priv->ale_mc_ratelimit.cookie = cls->cookie;
+ priv->ale_mc_ratelimit.rate_packet_ps = rate_pkt_ps;
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "Not supported matching key");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int cpsw_qos_clsflower_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(action, act)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is ok, but action is not last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.rate_bytes_ps || act->police.peakrate_bytes_ps ||
+ act->police.avrate || act->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when bytes per second/peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int cpsw_qos_configure_clsflower(struct cpsw_priv *priv, struct flow_cls_offload *cls)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct netlink_ext_ack *extack = cls->common.extack;
+ const struct flow_action_entry *act;
+ int i, ret;
+
+ flow_action_for_each(i, act, &rule->action) {
+ switch (act->id) {
+ case FLOW_ACTION_POLICE:
+ ret = cpsw_qos_clsflower_policer_validate(&rule->action, act, extack);
+ if (ret)
+ return ret;
+
+ return cpsw_qos_clsflower_add_policer(priv, extack, cls,
+ act->police.rate_pkt_ps);
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Action not supported");
+ return -EOPNOTSUPP;
+ }
+ }
+ return -EOPNOTSUPP;
+}
+
+static int cpsw_qos_delete_clsflower(struct cpsw_priv *priv, struct flow_cls_offload *cls)
+{
+ u32 port_id = cpsw_slave_index(priv->cpsw, priv) + 1;
+
+ if (cls->cookie == priv->ale_bc_ratelimit.cookie) {
+ priv->ale_bc_ratelimit.cookie = 0;
+ priv->ale_bc_ratelimit.rate_packet_ps = 0;
+ cpsw_ale_rx_ratelimit_bc(priv->cpsw->ale, port_id, 0);
+ }
+
+ if (cls->cookie == priv->ale_mc_ratelimit.cookie) {
+ priv->ale_mc_ratelimit.cookie = 0;
+ priv->ale_mc_ratelimit.rate_packet_ps = 0;
+ cpsw_ale_rx_ratelimit_mc(priv->cpsw->ale, port_id, 0);
+ }
+
+ return 0;
+}
+
+static int cpsw_qos_setup_tc_clsflower(struct cpsw_priv *priv, struct flow_cls_offload *cls_flower)
+{
+ switch (cls_flower->command) {
+ case FLOW_CLS_REPLACE:
+ return cpsw_qos_configure_clsflower(priv, cls_flower);
+ case FLOW_CLS_DESTROY:
+ return cpsw_qos_delete_clsflower(priv, cls_flower);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int cpsw_qos_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
+{
+ struct cpsw_priv *priv = cb_priv;
+ int ret;
+
+ if (!tc_cls_can_offload_and_chain0(priv->ndev, type_data))
+ return -EOPNOTSUPP;
+
+ ret = pm_runtime_get_sync(priv->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(priv->dev);
+ return ret;
+ }
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ ret = cpsw_qos_setup_tc_clsflower(priv, type_data);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ pm_runtime_put(priv->dev);
+ return ret;
+}
+
+static LIST_HEAD(cpsw_qos_block_cb_list);
+
+static int cpsw_qos_setup_tc_block(struct net_device *ndev, struct flow_block_offload *f)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+
+ return flow_block_cb_setup_simple(f, &cpsw_qos_block_cb_list,
+ cpsw_qos_setup_tc_block_cb,
+ priv, priv, true);
+}
+
+void cpsw_qos_clsflower_resume(struct cpsw_priv *priv)
+{
+ u32 port_id = cpsw_slave_index(priv->cpsw, priv) + 1;
+
+ if (priv->ale_bc_ratelimit.cookie)
+ cpsw_ale_rx_ratelimit_bc(priv->cpsw->ale, port_id,
+ priv->ale_bc_ratelimit.rate_packet_ps);
+
+ if (priv->ale_mc_ratelimit.cookie)
+ cpsw_ale_rx_ratelimit_mc(priv->cpsw->ale, port_id,
+ priv->ale_mc_ratelimit.rate_packet_ps);
+}
diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h
index 74555970730c..fc591f5ebe18 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.h
+++ b/drivers/net/ethernet/ti/cpsw_priv.h
@@ -364,6 +364,11 @@ struct cpsw_common {
u8 base_mac[ETH_ALEN];
};
+struct cpsw_ale_ratelimit {
+ unsigned long cookie;
+ u64 rate_packet_ps;
+};
+
struct cpsw_priv {
struct net_device *ndev;
struct device *dev;
@@ -384,6 +389,8 @@ struct cpsw_priv {
struct cpsw_common *cpsw;
int offload_fwd_mark;
u32 tx_packet_min;
+ struct cpsw_ale_ratelimit ale_bc_ratelimit;
+ struct cpsw_ale_ratelimit ale_mc_ratelimit;
};
#define ndev_to_cpsw(ndev) (((struct cpsw_priv *)netdev_priv(ndev))->cpsw)
@@ -411,7 +418,6 @@ struct __aligned(sizeof(long)) cpsw_meta_xdp {
/* The buf includes headroom compatible with both skb and xdpf */
#define CPSW_HEADROOM_NA (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + NET_IP_ALIGN)
-#define CPSW_HEADROOM ALIGN(CPSW_HEADROOM_NA, sizeof(long))
static inline int cpsw_is_xdpf_handle(void *handle)
{
@@ -462,6 +468,7 @@ int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
bool cpsw_shp_is_off(struct cpsw_priv *priv);
void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv);
void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv);
+void cpsw_qos_clsflower_resume(struct cpsw_priv *priv);
/* ethtool */
u32 cpsw_get_msglevel(struct net_device *ndev);
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 4b6aed78d392..9d1e98db308b 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1422,9 +1422,8 @@ static int emac_dev_open(struct net_device *ndev)
struct phy_device *phydev = NULL;
struct device *phy = NULL;
- ret = pm_runtime_get_sync(&priv->pdev->dev);
+ ret = pm_runtime_resume_and_get(&priv->pdev->dev);
if (ret < 0) {
- pm_runtime_put_noidle(&priv->pdev->dev);
dev_err(&priv->pdev->dev, "%s: failed to get_sync(%d)\n",
__func__, ret);
return ret;
@@ -1661,9 +1660,8 @@ static struct net_device_stats *emac_dev_getnetstats(struct net_device *ndev)
u32 stats_clear_mask;
int err;
- err = pm_runtime_get_sync(&priv->pdev->dev);
+ err = pm_runtime_resume_and_get(&priv->pdev->dev);
if (err < 0) {
- pm_runtime_put_noidle(&priv->pdev->dev);
dev_err(&priv->pdev->dev, "%s: failed to get_sync(%d)\n",
__func__, err);
return &ndev->stats;
@@ -1954,9 +1952,8 @@ static int davinci_emac_probe(struct platform_device *pdev)
netif_napi_add(ndev, &priv->napi, emac_poll, EMAC_POLL_WEIGHT);
pm_runtime_enable(&pdev->dev);
- rc = pm_runtime_get_sync(&pdev->dev);
+ rc = pm_runtime_resume_and_get(&pdev->dev);
if (rc < 0) {
- pm_runtime_put_noidle(&pdev->dev);
dev_err(&pdev->dev, "%s: failed to get_sync(%d)\n",
__func__, rc);
goto err_napi_del;
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index fce2626e34fa..ea3772618043 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -134,11 +134,9 @@ static int davinci_mdio_reset(struct mii_bus *bus)
u32 phy_mask, ver;
int ret;
- ret = pm_runtime_get_sync(data->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(data->dev);
+ ret = pm_runtime_resume_and_get(data->dev);
+ if (ret < 0)
return ret;
- }
/* wait for scan logic to settle */
msleep(PHY_MAX_ADDR * data->access_time);
@@ -232,11 +230,9 @@ static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
return -EINVAL;
- ret = pm_runtime_get_sync(data->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(data->dev);
+ ret = pm_runtime_resume_and_get(data->dev);
+ if (ret < 0)
return ret;
- }
reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) |
(phy_id << 16));
@@ -276,11 +272,9 @@ static int davinci_mdio_write(struct mii_bus *bus, int phy_id,
if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
return -EINVAL;
- ret = pm_runtime_get_sync(data->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(data->dev);
+ ret = pm_runtime_resume_and_get(data->dev);
+ if (ret < 0)
return ret;
- }
reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) |
(phy_id << 16) | (phy_data & USERACCESS_DATA));
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 869e362e09c1..3f6b9dfca095 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -1515,7 +1515,7 @@ static int temac_probe(struct platform_device *pdev)
of_node_put(dma_np);
return PTR_ERR(lp->sdma_regs);
}
- if (of_get_property(dma_np, "little-endian", NULL)) {
+ if (of_property_read_bool(dma_np, "little-endian")) {
lp->dma_in = temac_dma_in32_le;
lp->dma_out = temac_dma_out32_le;
} else {
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 57a24f62e353..7a86ae82fcc1 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -1,11 +1,10 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Xilinx EmacLite Linux driver for the Xilinx Ethernet MAC Lite device.
+/* Xilinx EmacLite Linux driver for the Xilinx Ethernet MAC Lite device.
*
* This is a new flat driver which is based on the original emac_lite
* driver from John Williams <john.williams@xilinx.com>.
*
- * 2007 - 2013 (c) Xilinx, Inc.
+ * Copyright (c) 2007 - 2013 Xilinx, Inc.
*/
#include <linux/module.h>
@@ -91,13 +90,7 @@
#define XEL_ARP_PACKET_SIZE 28 /* Max ARP packet size */
#define XEL_HEADER_IP_LENGTH_OFFSET 16 /* IP Length Offset */
-
-
#define TX_TIMEOUT (60 * HZ) /* Tx timeout is 60 seconds. */
-#define ALIGNMENT 4
-
-/* BUFFER_ALIGN(adr) calculates the number of bytes to the next alignment. */
-#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((uintptr_t)adr)) % ALIGNMENT)
#ifdef __BIG_ENDIAN
#define xemaclite_readl ioread32be
@@ -124,7 +117,6 @@
* @last_link: last link status
*/
struct net_local {
-
struct net_device *ndev;
bool tx_ping_pong;
@@ -133,7 +125,7 @@ struct net_local {
u32 next_rx_buf_to_use;
void __iomem *base_addr;
- spinlock_t reset_lock;
+ spinlock_t reset_lock; /* serialize xmit and tx_timeout execution */
struct sk_buff *deferred_skb;
struct phy_device *phy_dev;
@@ -144,7 +136,6 @@ struct net_local {
int last_link;
};
-
/*************************/
/* EmacLite driver calls */
/*************************/
@@ -207,7 +198,7 @@ static void xemaclite_disable_interrupts(struct net_local *drvdata)
* address in the EmacLite device.
*/
static void xemaclite_aligned_write(const void *src_ptr, u32 *dest_ptr,
- unsigned length)
+ unsigned int length)
{
const u16 *from_u16_ptr;
u32 align_buffer;
@@ -265,7 +256,7 @@ static void xemaclite_aligned_write(const void *src_ptr, u32 *dest_ptr,
* to a 16-bit aligned buffer.
*/
static void xemaclite_aligned_read(u32 *src_ptr, u8 *dest_ptr,
- unsigned length)
+ unsigned int length)
{
u16 *to_u16_ptr, *from_u16_ptr;
u32 *from_u32_ptr;
@@ -330,7 +321,6 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
XEL_TSR_XMIT_ACTIVE_MASK)) == 0) {
-
/* Switch to next buffer if configured */
if (drvdata->tx_ping_pong != 0)
drvdata->next_tx_buf_to_use ^= XEL_BUFFER_OFFSET;
@@ -346,8 +336,9 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
XEL_TSR_XMIT_ACTIVE_MASK)) != 0)
return -1; /* Buffers were full, return failure */
- } else
+ } else {
return -1; /* Buffer was full, return failure */
+ }
/* Write the frame to the buffer */
xemaclite_aligned_write(data, (u32 __force *)addr, byte_count);
@@ -423,7 +414,6 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
* or an IP packet or an ARP packet
*/
if (proto_type > ETH_DATA_LEN) {
-
if (proto_type == ETH_P_IP) {
length = ((ntohl(xemaclite_readl(addr +
XEL_HEADER_IP_LENGTH_OFFSET +
@@ -433,23 +423,25 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
length = min_t(u16, length, ETH_DATA_LEN);
length += ETH_HLEN + ETH_FCS_LEN;
- } else if (proto_type == ETH_P_ARP)
+ } else if (proto_type == ETH_P_ARP) {
length = XEL_ARP_PACKET_SIZE + ETH_HLEN + ETH_FCS_LEN;
- else
+ } else {
/* Field contains type other than IP or ARP, use max
* frame size and let user parse it
*/
length = ETH_FRAME_LEN + ETH_FCS_LEN;
- } else
+ }
+ } else {
/* Use the length in the frame, plus the header and trailer */
length = proto_type + ETH_HLEN + ETH_FCS_LEN;
+ }
if (WARN_ON(length > maxlen))
length = maxlen;
/* Read from the EmacLite device */
xemaclite_aligned_read((u32 __force *)(addr + XEL_RXBUFF_OFFSET),
- data, length);
+ data, length);
/* Acknowledge the frame */
reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET);
@@ -599,11 +591,10 @@ static void xemaclite_rx_handler(struct net_device *dev)
{
struct net_local *lp = netdev_priv(dev);
struct sk_buff *skb;
- unsigned int align;
u32 len;
len = ETH_FRAME_LEN + ETH_FCS_LEN;
- skb = netdev_alloc_skb(dev, len + ALIGNMENT);
+ skb = netdev_alloc_skb(dev, len + NET_IP_ALIGN);
if (!skb) {
/* Couldn't get memory. */
dev->stats.rx_dropped++;
@@ -611,16 +602,7 @@ static void xemaclite_rx_handler(struct net_device *dev)
return;
}
- /* A new skb should have the data halfword aligned, but this code is
- * here just in case that isn't true. Calculate how many
- * bytes we should reserve to get the data to start on a word
- * boundary
- */
- align = BUFFER_ALIGN(skb->data);
- if (align)
- skb_reserve(skb, align);
-
- skb_reserve(skb, 2);
+ skb_reserve(skb, NET_IP_ALIGN);
len = xemaclite_recv_data(lp, (u8 *)skb->data, len);
@@ -671,8 +653,7 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
/* Check if the Transmission for the first buffer is completed */
tx_status = xemaclite_readl(base_addr + XEL_TSR_OFFSET);
if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
- (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
-
+ (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
xemaclite_writel(tx_status, base_addr + XEL_TSR_OFFSET);
@@ -682,8 +663,7 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
/* Check if the Transmission for the second buffer is completed */
tx_status = xemaclite_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
- (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
-
+ (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
xemaclite_writel(tx_status, base_addr + XEL_BUFFER_OFFSET +
XEL_TSR_OFFSET);
@@ -840,6 +820,7 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
of_address_to_resource(npp, 0, &res);
if (lp->ndev->mem_start != res.start) {
struct phy_device *phydev;
+
phydev = of_phy_find_device(lp->phy_node);
if (!phydev)
dev_info(dev,
diff --git a/drivers/net/ethernet/xscale/ptp_ixp46x.c b/drivers/net/ethernet/xscale/ptp_ixp46x.c
index 1f382777aa5a..9abbdb71e629 100644
--- a/drivers/net/ethernet/xscale/ptp_ixp46x.c
+++ b/drivers/net/ethernet/xscale/ptp_ixp46x.c
@@ -271,7 +271,7 @@ static int ptp_ixp_probe(struct platform_device *pdev)
ixp_clock.master_irq = platform_get_irq(pdev, 0);
ixp_clock.slave_irq = platform_get_irq(pdev, 1);
if (IS_ERR(ixp_clock.regs) ||
- !ixp_clock.master_irq || !ixp_clock.slave_irq)
+ ixp_clock.master_irq < 0 || ixp_clock.slave_irq < 0)
return -ENXIO;
ixp_clock.caps = ptp_ixp_caps;
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 7db6c135ac6c..2495a5719e1c 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -533,14 +533,16 @@ static struct sk_buff *geneve_gro_receive(struct sock *sk,
}
}
+ skb_gro_pull(skb, gh_len);
+ skb_gro_postpull_rcsum(skb, gh, gh_len);
type = gh->proto_type;
+ if (likely(type == htons(ETH_P_TEB)))
+ return call_gro_receive(eth_gro_receive, head, skb);
ptype = gro_find_receive_by_type(type);
if (!ptype)
goto out;
- skb_gro_pull(skb, gh_len);
- skb_gro_postpull_rcsum(skb, gh, gh_len);
pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
flush = 0;
@@ -563,6 +565,10 @@ static int geneve_gro_complete(struct sock *sk, struct sk_buff *skb,
gh_len = geneve_hlen(gh);
type = gh->proto_type;
+ /* since skb->encapsulation is set, eth_gro_complete() sets the inner mac header */
+ if (likely(type == htons(ETH_P_TEB)))
+ return eth_gro_complete(skb, nhoff + gh_len);
+
ptype = gro_find_complete_by_type(type);
if (ptype)
err = ptype->callbacks.gro_complete(skb, nhoff + gh_len);
diff --git a/drivers/net/hamradio/Kconfig b/drivers/net/hamradio/Kconfig
index 441da03c23ee..a9c44f08199d 100644
--- a/drivers/net/hamradio/Kconfig
+++ b/drivers/net/hamradio/Kconfig
@@ -45,40 +45,6 @@ config BPQETHER
useful if some other computer on your local network has a direct
amateur radio connection.
-config DMASCC
- tristate "High-speed (DMA) SCC driver for AX.25"
- depends on ISA && AX25 && BROKEN_ON_SMP && ISA_DMA_API
- depends on VIRT_TO_BUS
- help
- This is a driver for high-speed SCC boards, i.e. those supporting
- DMA on one port. You usually use those boards to connect your
- computer to an amateur radio modem (such as the WA4DSY 56kbps
- modem), in order to send and receive AX.25 packet radio network
- traffic.
-
- Currently, this driver supports Ottawa PI/PI2, Paccomm/Gracilis
- PackeTwin, and S5SCC/DMA boards. They are detected automatically.
- If you have one of these cards, say Y here and read the AX25-HOWTO,
- available from <http://www.tldp.org/docs.html#howto>.
-
- This driver can operate multiple boards simultaneously. If you
- compile it as a module (by saying M instead of Y), it will be called
- dmascc. If you don't pass any parameter to the driver, all
- possible I/O addresses are probed. This could irritate other devices
- that are currently not in use. You may specify the list of addresses
- to be probed by "dmascc.io=addr1,addr2,..." (when compiled into the
- kernel image) or "io=addr1,addr2,..." (when loaded as a module). The
- network interfaces will be called dmascc0 and dmascc1 for the board
- detected first, dmascc2 and dmascc3 for the second one, and so on.
-
- Before you configure each interface with ifconfig, you MUST set
- certain parameters, such as channel access timing, clock mode, and
- DMA channel. This is accomplished with a small utility program,
- dmascc_cfg, available at
- <http://www.linux-ax25.org/wiki/Ax25-tools>. Please be sure to
- get at least version 1.27 of dmascc_cfg, as older versions will not
- work with the current driver.
-
config SCC
tristate "Z8530 SCC driver"
depends on ISA && AX25 && ISA_DMA_API
diff --git a/drivers/net/hamradio/Makefile b/drivers/net/hamradio/Makefile
index 7a1518d763e3..25fc400369ba 100644
--- a/drivers/net/hamradio/Makefile
+++ b/drivers/net/hamradio/Makefile
@@ -11,7 +11,6 @@
# Christoph Hellwig <hch@infradead.org>
#
-obj-$(CONFIG_DMASCC) += dmascc.o
obj-$(CONFIG_SCC) += scc.o
obj-$(CONFIG_MKISS) += mkiss.o
obj-$(CONFIG_6PACK) += 6pack.o
diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c
deleted file mode 100644
index a2a12208e3ad..000000000000
--- a/drivers/net/hamradio/dmascc.c
+++ /dev/null
@@ -1,1450 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Driver for high-speed SCC boards (those with DMA support)
- * Copyright (C) 1997-2000 Klaus Kudielka
- *
- * S5SCC/DMA support by Janko Koleznik S52HI
- */
-
-
-#include <linux/module.h>
-#include <linux/bitops.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/if_arp.h>
-#include <linux/in.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/netdevice.h>
-#include <linux/slab.h>
-#include <linux/rtnetlink.h>
-#include <linux/sockios.h>
-#include <linux/workqueue.h>
-#include <linux/atomic.h>
-#include <asm/dma.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <linux/uaccess.h>
-#include <linux/jiffies.h>
-#include <net/ax25.h>
-#include "z8530.h"
-
-
-/* Number of buffers per channel */
-
-#define NUM_TX_BUF 2 /* NUM_TX_BUF >= 1 (min. 2 recommended) */
-#define NUM_RX_BUF 6 /* NUM_RX_BUF >= 1 (min. 2 recommended) */
-#define BUF_SIZE 1576 /* BUF_SIZE >= mtu + hard_header_len */
-
-
-/* Cards supported */
-
-#define HW_PI { "Ottawa PI", 0x300, 0x20, 0x10, 8, \
- 0, 8, 1843200, 3686400 }
-#define HW_PI2 { "Ottawa PI2", 0x300, 0x20, 0x10, 8, \
- 0, 8, 3686400, 7372800 }
-#define HW_TWIN { "Gracilis PackeTwin", 0x200, 0x10, 0x10, 32, \
- 0, 4, 6144000, 6144000 }
-#define HW_S5 { "S5SCC/DMA", 0x200, 0x10, 0x10, 32, \
- 0, 8, 4915200, 9830400 }
-
-#define HARDWARE { HW_PI, HW_PI2, HW_TWIN, HW_S5 }
-
-#define TMR_0_HZ 25600 /* Frequency of timer 0 */
-
-#define TYPE_PI 0
-#define TYPE_PI2 1
-#define TYPE_TWIN 2
-#define TYPE_S5 3
-#define NUM_TYPES 4
-
-#define MAX_NUM_DEVS 32
-
-
-/* SCC chips supported */
-
-#define Z8530 0
-#define Z85C30 1
-#define Z85230 2
-
-#define CHIPNAMES { "Z8530", "Z85C30", "Z85230" }
-
-
-/* I/O registers */
-
-/* 8530 registers relative to card base */
-#define SCCB_CMD 0x00
-#define SCCB_DATA 0x01
-#define SCCA_CMD 0x02
-#define SCCA_DATA 0x03
-
-/* 8253/8254 registers relative to card base */
-#define TMR_CNT0 0x00
-#define TMR_CNT1 0x01
-#define TMR_CNT2 0x02
-#define TMR_CTRL 0x03
-
-/* Additional PI/PI2 registers relative to card base */
-#define PI_DREQ_MASK 0x04
-
-/* Additional PackeTwin registers relative to card base */
-#define TWIN_INT_REG 0x08
-#define TWIN_CLR_TMR1 0x09
-#define TWIN_CLR_TMR2 0x0a
-#define TWIN_SPARE_1 0x0b
-#define TWIN_DMA_CFG 0x08
-#define TWIN_SERIAL_CFG 0x09
-#define TWIN_DMA_CLR_FF 0x0a
-#define TWIN_SPARE_2 0x0b
-
-
-/* PackeTwin I/O register values */
-
-/* INT_REG */
-#define TWIN_SCC_MSK 0x01
-#define TWIN_TMR1_MSK 0x02
-#define TWIN_TMR2_MSK 0x04
-#define TWIN_INT_MSK 0x07
-
-/* SERIAL_CFG */
-#define TWIN_DTRA_ON 0x01
-#define TWIN_DTRB_ON 0x02
-#define TWIN_EXTCLKA 0x04
-#define TWIN_EXTCLKB 0x08
-#define TWIN_LOOPA_ON 0x10
-#define TWIN_LOOPB_ON 0x20
-#define TWIN_EI 0x80
-
-/* DMA_CFG */
-#define TWIN_DMA_HDX_T1 0x08
-#define TWIN_DMA_HDX_R1 0x0a
-#define TWIN_DMA_HDX_T3 0x14
-#define TWIN_DMA_HDX_R3 0x16
-#define TWIN_DMA_FDX_T3R1 0x1b
-#define TWIN_DMA_FDX_T1R3 0x1d
-
-
-/* Status values */
-
-#define IDLE 0
-#define TX_HEAD 1
-#define TX_DATA 2
-#define TX_PAUSE 3
-#define TX_TAIL 4
-#define RTS_OFF 5
-#define WAIT 6
-#define DCD_ON 7
-#define RX_ON 8
-#define DCD_OFF 9
-
-
-/* Ioctls */
-
-#define SIOCGSCCPARAM SIOCDEVPRIVATE
-#define SIOCSSCCPARAM (SIOCDEVPRIVATE+1)
-
-
-/* Data types */
-
-struct scc_param {
- int pclk_hz; /* frequency of BRG input (don't change) */
- int brg_tc; /* BRG terminal count; BRG disabled if < 0 */
- int nrzi; /* 0 (nrz), 1 (nrzi) */
- int clocks; /* see dmascc_cfg documentation */
- int txdelay; /* [1/TMR_0_HZ] */
- int txtimeout; /* [1/HZ] */
- int txtail; /* [1/TMR_0_HZ] */
- int waittime; /* [1/TMR_0_HZ] */
- int slottime; /* [1/TMR_0_HZ] */
- int persist; /* 1 ... 256 */
- int dma; /* -1 (disable), 0, 1, 3 */
- int txpause; /* [1/TMR_0_HZ] */
- int rtsoff; /* [1/TMR_0_HZ] */
- int dcdon; /* [1/TMR_0_HZ] */
- int dcdoff; /* [1/TMR_0_HZ] */
-};
-
-struct scc_hardware {
- char *name;
- int io_region;
- int io_delta;
- int io_size;
- int num_devs;
- int scc_offset;
- int tmr_offset;
- int tmr_hz;
- int pclk_hz;
-};
-
-struct scc_priv {
- int type;
- int chip;
- struct net_device *dev;
- struct scc_info *info;
-
- int channel;
- int card_base, scc_cmd, scc_data;
- int tmr_cnt, tmr_ctrl, tmr_mode;
- struct scc_param param;
- char rx_buf[NUM_RX_BUF][BUF_SIZE];
- int rx_len[NUM_RX_BUF];
- int rx_ptr;
- struct work_struct rx_work;
- int rx_head, rx_tail, rx_count;
- int rx_over;
- char tx_buf[NUM_TX_BUF][BUF_SIZE];
- int tx_len[NUM_TX_BUF];
- int tx_ptr;
- int tx_head, tx_tail, tx_count;
- int state;
- unsigned long tx_start;
- int rr0;
- spinlock_t *register_lock; /* Per scc_info */
- spinlock_t ring_lock;
-};
-
-struct scc_info {
- int irq_used;
- int twin_serial_cfg;
- struct net_device *dev[2];
- struct scc_priv priv[2];
- struct scc_info *next;
- spinlock_t register_lock; /* Per device register lock */
-};
-
-
-/* Function declarations */
-static int setup_adapter(int card_base, int type, int n) __init;
-
-static void write_scc(struct scc_priv *priv, int reg, int val);
-static void write_scc_data(struct scc_priv *priv, int val, int fast);
-static int read_scc(struct scc_priv *priv, int reg);
-static int read_scc_data(struct scc_priv *priv);
-
-static int scc_open(struct net_device *dev);
-static int scc_close(struct net_device *dev);
-static int scc_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
- void __user *data, int cmd);
-static int scc_send_packet(struct sk_buff *skb, struct net_device *dev);
-static int scc_set_mac_address(struct net_device *dev, void *sa);
-
-static inline void tx_on(struct scc_priv *priv);
-static inline void rx_on(struct scc_priv *priv);
-static inline void rx_off(struct scc_priv *priv);
-static void start_timer(struct scc_priv *priv, int t, int r15);
-static inline unsigned char random(void);
-
-static inline void z8530_isr(struct scc_info *info);
-static irqreturn_t scc_isr(int irq, void *dev_id);
-static void rx_isr(struct scc_priv *priv);
-static void special_condition(struct scc_priv *priv, int rc);
-static void rx_bh(struct work_struct *);
-static void tx_isr(struct scc_priv *priv);
-static void es_isr(struct scc_priv *priv);
-static void tm_isr(struct scc_priv *priv);
-
-
-/* Initialization variables */
-
-static int io[MAX_NUM_DEVS] __initdata = { 0, };
-
-/* Beware! hw[] is also used in dmascc_exit(). */
-static struct scc_hardware hw[NUM_TYPES] = HARDWARE;
-
-
-/* Global variables */
-
-static struct scc_info *first;
-static unsigned long rand;
-
-
-MODULE_AUTHOR("Klaus Kudielka");
-MODULE_DESCRIPTION("Driver for high-speed SCC boards");
-module_param_hw_array(io, int, ioport, NULL, 0);
-MODULE_LICENSE("GPL");
-
-static void __exit dmascc_exit(void)
-{
- int i;
- struct scc_info *info;
-
- while (first) {
- info = first;
-
- /* Unregister devices */
- for (i = 0; i < 2; i++)
- unregister_netdev(info->dev[i]);
-
- /* Reset board */
- if (info->priv[0].type == TYPE_TWIN)
- outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG);
- write_scc(&info->priv[0], R9, FHWRES);
- release_region(info->dev[0]->base_addr,
- hw[info->priv[0].type].io_size);
-
- for (i = 0; i < 2; i++)
- free_netdev(info->dev[i]);
-
- /* Free memory */
- first = info->next;
- kfree(info);
- }
-}
-
-static int __init dmascc_init(void)
-{
- int h, i, j, n;
- int base[MAX_NUM_DEVS], tcmd[MAX_NUM_DEVS], t0[MAX_NUM_DEVS],
- t1[MAX_NUM_DEVS];
- unsigned t_val;
- unsigned long time, start[MAX_NUM_DEVS], delay[MAX_NUM_DEVS],
- counting[MAX_NUM_DEVS];
-
- /* Initialize random number generator */
- rand = jiffies;
- /* Cards found = 0 */
- n = 0;
- /* Warning message */
- if (!io[0])
- printk(KERN_INFO "dmascc: autoprobing (dangerous)\n");
-
- /* Run autodetection for each card type */
- for (h = 0; h < NUM_TYPES; h++) {
-
- if (io[0]) {
- /* User-specified I/O address regions */
- for (i = 0; i < hw[h].num_devs; i++)
- base[i] = 0;
- for (i = 0; i < MAX_NUM_DEVS && io[i]; i++) {
- j = (io[i] -
- hw[h].io_region) / hw[h].io_delta;
- if (j >= 0 && j < hw[h].num_devs &&
- hw[h].io_region +
- j * hw[h].io_delta == io[i]) {
- base[j] = io[i];
- }
- }
- } else {
- /* Default I/O address regions */
- for (i = 0; i < hw[h].num_devs; i++) {
- base[i] =
- hw[h].io_region + i * hw[h].io_delta;
- }
- }
-
- /* Check valid I/O address regions */
- for (i = 0; i < hw[h].num_devs; i++)
- if (base[i]) {
- if (!request_region
- (base[i], hw[h].io_size, "dmascc"))
- base[i] = 0;
- else {
- tcmd[i] =
- base[i] + hw[h].tmr_offset +
- TMR_CTRL;
- t0[i] =
- base[i] + hw[h].tmr_offset +
- TMR_CNT0;
- t1[i] =
- base[i] + hw[h].tmr_offset +
- TMR_CNT1;
- }
- }
-
- /* Start timers */
- for (i = 0; i < hw[h].num_devs; i++)
- if (base[i]) {
- /* Timer 0: LSB+MSB, Mode 3, TMR_0_HZ */
- outb(0x36, tcmd[i]);
- outb((hw[h].tmr_hz / TMR_0_HZ) & 0xFF,
- t0[i]);
- outb((hw[h].tmr_hz / TMR_0_HZ) >> 8,
- t0[i]);
- /* Timer 1: LSB+MSB, Mode 0, HZ/10 */
- outb(0x70, tcmd[i]);
- outb((TMR_0_HZ / HZ * 10) & 0xFF, t1[i]);
- outb((TMR_0_HZ / HZ * 10) >> 8, t1[i]);
- start[i] = jiffies;
- delay[i] = 0;
- counting[i] = 1;
- /* Timer 2: LSB+MSB, Mode 0 */
- outb(0xb0, tcmd[i]);
- }
- time = jiffies;
- /* Wait until counter registers are loaded */
- udelay(2000000 / TMR_0_HZ);
-
- /* Timing loop */
- while (time_is_after_jiffies(time + 13)) {
- for (i = 0; i < hw[h].num_devs; i++)
- if (base[i] && counting[i]) {
- /* Read back Timer 1: latch; read LSB; read MSB */
- outb(0x40, tcmd[i]);
- t_val =
- inb(t1[i]) + (inb(t1[i]) << 8);
- /* Also check whether counter did wrap */
- if (t_val == 0 ||
- t_val > TMR_0_HZ / HZ * 10)
- counting[i] = 0;
- delay[i] = jiffies - start[i];
- }
- }
-
- /* Evaluate measurements */
- for (i = 0; i < hw[h].num_devs; i++)
- if (base[i]) {
- if ((delay[i] >= 9 && delay[i] <= 11) &&
- /* Ok, we have found an adapter */
- (setup_adapter(base[i], h, n) == 0))
- n++;
- else
- release_region(base[i],
- hw[h].io_size);
- }
-
- } /* NUM_TYPES */
-
- /* If any adapter was successfully initialized, return ok */
- if (n)
- return 0;
-
- /* If no adapter found, return error */
- printk(KERN_INFO "dmascc: no adapters found\n");
- return -EIO;
-}
-
-module_init(dmascc_init);
-module_exit(dmascc_exit);
-
-static void __init dev_setup(struct net_device *dev)
-{
- dev->type = ARPHRD_AX25;
- dev->hard_header_len = AX25_MAX_HEADER_LEN;
- dev->mtu = 1500;
- dev->addr_len = AX25_ADDR_LEN;
- dev->tx_queue_len = 64;
- memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
- dev_addr_set(dev, (u8 *)&ax25_defaddr);
-}
-
-static const struct net_device_ops scc_netdev_ops = {
- .ndo_open = scc_open,
- .ndo_stop = scc_close,
- .ndo_start_xmit = scc_send_packet,
- .ndo_siocdevprivate = scc_siocdevprivate,
- .ndo_set_mac_address = scc_set_mac_address,
-};
-
-static int __init setup_adapter(int card_base, int type, int n)
-{
- int i, irq, chip, err;
- struct scc_info *info;
- struct net_device *dev;
- struct scc_priv *priv;
- unsigned long time;
- unsigned int irqs;
- int tmr_base = card_base + hw[type].tmr_offset;
- int scc_base = card_base + hw[type].scc_offset;
- char *chipnames[] = CHIPNAMES;
-
- /* Initialize what is necessary for write_scc and write_scc_data */
- info = kzalloc(sizeof(struct scc_info), GFP_KERNEL | GFP_DMA);
- if (!info) {
- err = -ENOMEM;
- goto out;
- }
-
- info->dev[0] = alloc_netdev(0, "", NET_NAME_UNKNOWN, dev_setup);
- if (!info->dev[0]) {
- printk(KERN_ERR "dmascc: "
- "could not allocate memory for %s at %#3x\n",
- hw[type].name, card_base);
- err = -ENOMEM;
- goto out1;
- }
-
- info->dev[1] = alloc_netdev(0, "", NET_NAME_UNKNOWN, dev_setup);
- if (!info->dev[1]) {
- printk(KERN_ERR "dmascc: "
- "could not allocate memory for %s at %#3x\n",
- hw[type].name, card_base);
- err = -ENOMEM;
- goto out2;
- }
- spin_lock_init(&info->register_lock);
-
- priv = &info->priv[0];
- priv->type = type;
- priv->card_base = card_base;
- priv->scc_cmd = scc_base + SCCA_CMD;
- priv->scc_data = scc_base + SCCA_DATA;
- priv->register_lock = &info->register_lock;
-
- /* Reset SCC */
- write_scc(priv, R9, FHWRES | MIE | NV);
-
- /* Determine type of chip by enabling SDLC/HDLC enhancements */
- write_scc(priv, R15, SHDLCE);
- if (!read_scc(priv, R15)) {
- /* WR7' not present. This is an ordinary Z8530 SCC. */
- chip = Z8530;
- } else {
- /* Put one character in TX FIFO */
- write_scc_data(priv, 0, 0);
- if (read_scc(priv, R0) & Tx_BUF_EMP) {
- /* TX FIFO not full. This is a Z85230 ESCC with a 4-byte FIFO. */
- chip = Z85230;
- } else {
- /* TX FIFO full. This is a Z85C30 SCC with a 1-byte FIFO. */
- chip = Z85C30;
- }
- }
- write_scc(priv, R15, 0);
-
- /* Start IRQ auto-detection */
- irqs = probe_irq_on();
-
- /* Enable interrupts */
- if (type == TYPE_TWIN) {
- outb(0, card_base + TWIN_DMA_CFG);
- inb(card_base + TWIN_CLR_TMR1);
- inb(card_base + TWIN_CLR_TMR2);
- info->twin_serial_cfg = TWIN_EI;
- outb(info->twin_serial_cfg, card_base + TWIN_SERIAL_CFG);
- } else {
- write_scc(priv, R15, CTSIE);
- write_scc(priv, R0, RES_EXT_INT);
- write_scc(priv, R1, EXT_INT_ENAB);
- }
-
- /* Start timer */
- outb(1, tmr_base + TMR_CNT1);
- outb(0, tmr_base + TMR_CNT1);
-
- /* Wait and detect IRQ */
- time = jiffies;
- while (time_is_after_jiffies(time + 2 + HZ / TMR_0_HZ));
- irq = probe_irq_off(irqs);
-
- /* Clear pending interrupt, disable interrupts */
- if (type == TYPE_TWIN) {
- inb(card_base + TWIN_CLR_TMR1);
- } else {
- write_scc(priv, R1, 0);
- write_scc(priv, R15, 0);
- write_scc(priv, R0, RES_EXT_INT);
- }
-
- if (irq <= 0) {
- printk(KERN_ERR
- "dmascc: could not find irq of %s at %#3x (irq=%d)\n",
- hw[type].name, card_base, irq);
- err = -ENODEV;
- goto out3;
- }
-
- /* Set up data structures */
- for (i = 0; i < 2; i++) {
- dev = info->dev[i];
- priv = &info->priv[i];
- priv->type = type;
- priv->chip = chip;
- priv->dev = dev;
- priv->info = info;
- priv->channel = i;
- spin_lock_init(&priv->ring_lock);
- priv->register_lock = &info->register_lock;
- priv->card_base = card_base;
- priv->scc_cmd = scc_base + (i ? SCCB_CMD : SCCA_CMD);
- priv->scc_data = scc_base + (i ? SCCB_DATA : SCCA_DATA);
- priv->tmr_cnt = tmr_base + (i ? TMR_CNT2 : TMR_CNT1);
- priv->tmr_ctrl = tmr_base + TMR_CTRL;
- priv->tmr_mode = i ? 0xb0 : 0x70;
- priv->param.pclk_hz = hw[type].pclk_hz;
- priv->param.brg_tc = -1;
- priv->param.clocks = TCTRxCP | RCRTxCP;
- priv->param.persist = 256;
- priv->param.dma = -1;
- INIT_WORK(&priv->rx_work, rx_bh);
- dev->ml_priv = priv;
- snprintf(dev->name, sizeof(dev->name), "dmascc%i", 2 * n + i);
- dev->base_addr = card_base;
- dev->irq = irq;
- dev->netdev_ops = &scc_netdev_ops;
- dev->header_ops = &ax25_header_ops;
- }
- if (register_netdev(info->dev[0])) {
- printk(KERN_ERR "dmascc: could not register %s\n",
- info->dev[0]->name);
- err = -ENODEV;
- goto out3;
- }
- if (register_netdev(info->dev[1])) {
- printk(KERN_ERR "dmascc: could not register %s\n",
- info->dev[1]->name);
- err = -ENODEV;
- goto out4;
- }
-
-
- info->next = first;
- first = info;
- printk(KERN_INFO "dmascc: found %s (%s) at %#3x, irq %d\n",
- hw[type].name, chipnames[chip], card_base, irq);
- return 0;
-
- out4:
- unregister_netdev(info->dev[0]);
- out3:
- if (info->priv[0].type == TYPE_TWIN)
- outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG);
- write_scc(&info->priv[0], R9, FHWRES);
- free_netdev(info->dev[1]);
- out2:
- free_netdev(info->dev[0]);
- out1:
- kfree(info);
- out:
- return err;
-}
-
-
-/* Driver functions */
-
-static void write_scc(struct scc_priv *priv, int reg, int val)
-{
- unsigned long flags;
- switch (priv->type) {
- case TYPE_S5:
- if (reg)
- outb(reg, priv->scc_cmd);
- outb(val, priv->scc_cmd);
- return;
- case TYPE_TWIN:
- if (reg)
- outb_p(reg, priv->scc_cmd);
- outb_p(val, priv->scc_cmd);
- return;
- default:
- spin_lock_irqsave(priv->register_lock, flags);
- outb_p(0, priv->card_base + PI_DREQ_MASK);
- if (reg)
- outb_p(reg, priv->scc_cmd);
- outb_p(val, priv->scc_cmd);
- outb(1, priv->card_base + PI_DREQ_MASK);
- spin_unlock_irqrestore(priv->register_lock, flags);
- return;
- }
-}
-
-
-static void write_scc_data(struct scc_priv *priv, int val, int fast)
-{
- unsigned long flags;
- switch (priv->type) {
- case TYPE_S5:
- outb(val, priv->scc_data);
- return;
- case TYPE_TWIN:
- outb_p(val, priv->scc_data);
- return;
- default:
- if (fast)
- outb_p(val, priv->scc_data);
- else {
- spin_lock_irqsave(priv->register_lock, flags);
- outb_p(0, priv->card_base + PI_DREQ_MASK);
- outb_p(val, priv->scc_data);
- outb(1, priv->card_base + PI_DREQ_MASK);
- spin_unlock_irqrestore(priv->register_lock, flags);
- }
- return;
- }
-}
-
-
-static int read_scc(struct scc_priv *priv, int reg)
-{
- int rc;
- unsigned long flags;
- switch (priv->type) {
- case TYPE_S5:
- if (reg)
- outb(reg, priv->scc_cmd);
- return inb(priv->scc_cmd);
- case TYPE_TWIN:
- if (reg)
- outb_p(reg, priv->scc_cmd);
- return inb_p(priv->scc_cmd);
- default:
- spin_lock_irqsave(priv->register_lock, flags);
- outb_p(0, priv->card_base + PI_DREQ_MASK);
- if (reg)
- outb_p(reg, priv->scc_cmd);
- rc = inb_p(priv->scc_cmd);
- outb(1, priv->card_base + PI_DREQ_MASK);
- spin_unlock_irqrestore(priv->register_lock, flags);
- return rc;
- }
-}
-
-
-static int read_scc_data(struct scc_priv *priv)
-{
- int rc;
- unsigned long flags;
- switch (priv->type) {
- case TYPE_S5:
- return inb(priv->scc_data);
- case TYPE_TWIN:
- return inb_p(priv->scc_data);
- default:
- spin_lock_irqsave(priv->register_lock, flags);
- outb_p(0, priv->card_base + PI_DREQ_MASK);
- rc = inb_p(priv->scc_data);
- outb(1, priv->card_base + PI_DREQ_MASK);
- spin_unlock_irqrestore(priv->register_lock, flags);
- return rc;
- }
-}
-
-
-static int scc_open(struct net_device *dev)
-{
- struct scc_priv *priv = dev->ml_priv;
- struct scc_info *info = priv->info;
- int card_base = priv->card_base;
-
- /* Request IRQ if not already used by other channel */
- if (!info->irq_used) {
- if (request_irq(dev->irq, scc_isr, 0, "dmascc", info)) {
- return -EAGAIN;
- }
- }
- info->irq_used++;
-
- /* Request DMA if required */
- if (priv->param.dma >= 0) {
- if (request_dma(priv->param.dma, "dmascc")) {
- if (--info->irq_used == 0)
- free_irq(dev->irq, info);
- return -EAGAIN;
- } else {
- unsigned long flags = claim_dma_lock();
- clear_dma_ff(priv->param.dma);
- release_dma_lock(flags);
- }
- }
-
- /* Initialize local variables */
- priv->rx_ptr = 0;
- priv->rx_over = 0;
- priv->rx_head = priv->rx_tail = priv->rx_count = 0;
- priv->state = IDLE;
- priv->tx_head = priv->tx_tail = priv->tx_count = 0;
- priv->tx_ptr = 0;
-
- /* Reset channel */
- write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
- /* X1 clock, SDLC mode */
- write_scc(priv, R4, SDLC | X1CLK);
- /* DMA */
- write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
- /* 8 bit RX char, RX disable */
- write_scc(priv, R3, Rx8);
- /* 8 bit TX char, TX disable */
- write_scc(priv, R5, Tx8);
- /* SDLC address field */
- write_scc(priv, R6, 0);
- /* SDLC flag */
- write_scc(priv, R7, FLAG);
- switch (priv->chip) {
- case Z85C30:
- /* Select WR7' */
- write_scc(priv, R15, SHDLCE);
- /* Auto EOM reset */
- write_scc(priv, R7, AUTOEOM);
- write_scc(priv, R15, 0);
- break;
- case Z85230:
- /* Select WR7' */
- write_scc(priv, R15, SHDLCE);
- /* The following bits are set (see 2.5.2.1):
- - Automatic EOM reset
- - Interrupt request if RX FIFO is half full
- This bit should be ignored in DMA mode (according to the
- documentation), but actually isn't. The receiver doesn't work if
- it is set. Thus, we have to clear it in DMA mode.
- - Interrupt/DMA request if TX FIFO is completely empty
- a) If set, the ESCC behaves as if it had no TX FIFO (Z85C30
- compatibility).
- b) If cleared, DMA requests may follow each other very quickly,
- filling up the TX FIFO.
- Advantage: TX works even in case of high bus latency.
- Disadvantage: Edge-triggered DMA request circuitry may miss
- a request. No more data is delivered, resulting
- in a TX FIFO underrun.
- Both PI2 and S5SCC/DMA seem to work fine with TXFIFOE cleared.
- The PackeTwin doesn't. I don't know about the PI, but let's
- assume it behaves like the PI2.
- */
- if (priv->param.dma >= 0) {
- if (priv->type == TYPE_TWIN)
- write_scc(priv, R7, AUTOEOM | TXFIFOE);
- else
- write_scc(priv, R7, AUTOEOM);
- } else {
- write_scc(priv, R7, AUTOEOM | RXFIFOH);
- }
- write_scc(priv, R15, 0);
- break;
- }
- /* Preset CRC, NRZ(I) encoding */
- write_scc(priv, R10, CRCPS | (priv->param.nrzi ? NRZI : NRZ));
-
- /* Configure baud rate generator */
- if (priv->param.brg_tc >= 0) {
- /* Program BR generator */
- write_scc(priv, R12, priv->param.brg_tc & 0xFF);
- write_scc(priv, R13, (priv->param.brg_tc >> 8) & 0xFF);
- /* BRG source = SYS CLK; enable BRG; DTR REQ function (required by
- PackeTwin, not connected on the PI2); set DPLL source to BRG */
- write_scc(priv, R14, SSBR | DTRREQ | BRSRC | BRENABL);
- /* Enable DPLL */
- write_scc(priv, R14, SEARCH | DTRREQ | BRSRC | BRENABL);
- } else {
- /* Disable BR generator */
- write_scc(priv, R14, DTRREQ | BRSRC);
- }
-
- /* Configure clocks */
- if (priv->type == TYPE_TWIN) {
- /* Disable external TX clock receiver */
- outb((info->twin_serial_cfg &=
- ~(priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
- card_base + TWIN_SERIAL_CFG);
- }
- write_scc(priv, R11, priv->param.clocks);
- if ((priv->type == TYPE_TWIN) && !(priv->param.clocks & TRxCOI)) {
- /* Enable external TX clock receiver */
- outb((info->twin_serial_cfg |=
- (priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
- card_base + TWIN_SERIAL_CFG);
- }
-
- /* Configure PackeTwin */
- if (priv->type == TYPE_TWIN) {
- /* Assert DTR, enable interrupts */
- outb((info->twin_serial_cfg |= TWIN_EI |
- (priv->channel ? TWIN_DTRB_ON : TWIN_DTRA_ON)),
- card_base + TWIN_SERIAL_CFG);
- }
-
- /* Read current status */
- priv->rr0 = read_scc(priv, R0);
- /* Enable DCD interrupt */
- write_scc(priv, R15, DCDIE);
-
- netif_start_queue(dev);
-
- return 0;
-}
-
-
-static int scc_close(struct net_device *dev)
-{
- struct scc_priv *priv = dev->ml_priv;
- struct scc_info *info = priv->info;
- int card_base = priv->card_base;
-
- netif_stop_queue(dev);
-
- if (priv->type == TYPE_TWIN) {
- /* Drop DTR */
- outb((info->twin_serial_cfg &=
- (priv->channel ? ~TWIN_DTRB_ON : ~TWIN_DTRA_ON)),
- card_base + TWIN_SERIAL_CFG);
- }
-
- /* Reset channel, free DMA and IRQ */
- write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
- if (priv->param.dma >= 0) {
- if (priv->type == TYPE_TWIN)
- outb(0, card_base + TWIN_DMA_CFG);
- free_dma(priv->param.dma);
- }
- if (--info->irq_used == 0)
- free_irq(dev->irq, info);
-
- return 0;
-}
-
-
-static int scc_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __user *data, int cmd)
-{
- struct scc_priv *priv = dev->ml_priv;
-
- switch (cmd) {
- case SIOCGSCCPARAM:
- if (copy_to_user(data, &priv->param, sizeof(struct scc_param)))
- return -EFAULT;
- return 0;
- case SIOCSSCCPARAM:
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- if (netif_running(dev))
- return -EAGAIN;
- if (copy_from_user(&priv->param, data,
- sizeof(struct scc_param)))
- return -EFAULT;
- return 0;
- default:
- return -EOPNOTSUPP;
- }
-}
-
-
-static int scc_send_packet(struct sk_buff *skb, struct net_device *dev)
-{
- struct scc_priv *priv = dev->ml_priv;
- unsigned long flags;
- int i;
-
- if (skb->protocol == htons(ETH_P_IP))
- return ax25_ip_xmit(skb);
-
- /* Temporarily stop the scheduler feeding us packets */
- netif_stop_queue(dev);
-
- /* Transfer data to DMA buffer */
- i = priv->tx_head;
- skb_copy_from_linear_data_offset(skb, 1, priv->tx_buf[i], skb->len - 1);
- priv->tx_len[i] = skb->len - 1;
-
- /* Clear interrupts while we touch our circular buffers */
-
- spin_lock_irqsave(&priv->ring_lock, flags);
- /* Move the ring buffer's head */
- priv->tx_head = (i + 1) % NUM_TX_BUF;
- priv->tx_count++;
-
- /* If we just filled up the last buffer, leave queue stopped.
- The higher layers must wait until we have a DMA buffer
- to accept the data. */
- if (priv->tx_count < NUM_TX_BUF)
- netif_wake_queue(dev);
-
- /* Set new TX state */
- if (priv->state == IDLE) {
- /* Assert RTS, start timer */
- priv->state = TX_HEAD;
- priv->tx_start = jiffies;
- write_scc(priv, R5, TxCRC_ENAB | RTS | TxENAB | Tx8);
- write_scc(priv, R15, 0);
- start_timer(priv, priv->param.txdelay, 0);
- }
-
- /* Turn interrupts back on and free buffer */
- spin_unlock_irqrestore(&priv->ring_lock, flags);
- dev_kfree_skb(skb);
-
- return NETDEV_TX_OK;
-}
-
-
-static int scc_set_mac_address(struct net_device *dev, void *sa)
-{
- dev_addr_set(dev, ((struct sockaddr *)sa)->sa_data);
- return 0;
-}
-
-
-static inline void tx_on(struct scc_priv *priv)
-{
- int i, n;
- unsigned long flags;
-
- if (priv->param.dma >= 0) {
- n = (priv->chip == Z85230) ? 3 : 1;
- /* Program DMA controller */
- flags = claim_dma_lock();
- set_dma_mode(priv->param.dma, DMA_MODE_WRITE);
- set_dma_addr(priv->param.dma,
- virt_to_bus(priv->tx_buf[priv->tx_tail]) + n);
- set_dma_count(priv->param.dma,
- priv->tx_len[priv->tx_tail] - n);
- release_dma_lock(flags);
- /* Enable TX underrun interrupt */
- write_scc(priv, R15, TxUIE);
- /* Configure DREQ */
- if (priv->type == TYPE_TWIN)
- outb((priv->param.dma ==
- 1) ? TWIN_DMA_HDX_T1 : TWIN_DMA_HDX_T3,
- priv->card_base + TWIN_DMA_CFG);
- else
- write_scc(priv, R1,
- EXT_INT_ENAB | WT_FN_RDYFN |
- WT_RDY_ENAB);
- /* Write first byte(s) */
- spin_lock_irqsave(priv->register_lock, flags);
- for (i = 0; i < n; i++)
- write_scc_data(priv,
- priv->tx_buf[priv->tx_tail][i], 1);
- enable_dma(priv->param.dma);
- spin_unlock_irqrestore(priv->register_lock, flags);
- } else {
- write_scc(priv, R15, TxUIE);
- write_scc(priv, R1,
- EXT_INT_ENAB | WT_FN_RDYFN | TxINT_ENAB);
- tx_isr(priv);
- }
- /* Reset EOM latch if we do not have the AUTOEOM feature */
- if (priv->chip == Z8530)
- write_scc(priv, R0, RES_EOM_L);
-}
-
-
-static inline void rx_on(struct scc_priv *priv)
-{
- unsigned long flags;
-
- /* Clear RX FIFO */
- while (read_scc(priv, R0) & Rx_CH_AV)
- read_scc_data(priv);
- priv->rx_over = 0;
- if (priv->param.dma >= 0) {
- /* Program DMA controller */
- flags = claim_dma_lock();
- set_dma_mode(priv->param.dma, DMA_MODE_READ);
- set_dma_addr(priv->param.dma,
- virt_to_bus(priv->rx_buf[priv->rx_head]));
- set_dma_count(priv->param.dma, BUF_SIZE);
- release_dma_lock(flags);
- enable_dma(priv->param.dma);
- /* Configure PackeTwin DMA */
- if (priv->type == TYPE_TWIN) {
- outb((priv->param.dma ==
- 1) ? TWIN_DMA_HDX_R1 : TWIN_DMA_HDX_R3,
- priv->card_base + TWIN_DMA_CFG);
- }
- /* Sp. cond. intr. only, ext int enable, RX DMA enable */
- write_scc(priv, R1, EXT_INT_ENAB | INT_ERR_Rx |
- WT_RDY_RT | WT_FN_RDYFN | WT_RDY_ENAB);
- } else {
- /* Reset current frame */
- priv->rx_ptr = 0;
- /* Intr. on all Rx characters and Sp. cond., ext int enable */
- write_scc(priv, R1, EXT_INT_ENAB | INT_ALL_Rx | WT_RDY_RT |
- WT_FN_RDYFN);
- }
- write_scc(priv, R0, ERR_RES);
- write_scc(priv, R3, RxENABLE | Rx8 | RxCRC_ENAB);
-}
-
-
-static inline void rx_off(struct scc_priv *priv)
-{
- /* Disable receiver */
- write_scc(priv, R3, Rx8);
- /* Disable DREQ / RX interrupt */
- if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
- outb(0, priv->card_base + TWIN_DMA_CFG);
- else
- write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
- /* Disable DMA */
- if (priv->param.dma >= 0)
- disable_dma(priv->param.dma);
-}
-
-
-static void start_timer(struct scc_priv *priv, int t, int r15)
-{
- outb(priv->tmr_mode, priv->tmr_ctrl);
- if (t == 0) {
- tm_isr(priv);
- } else if (t > 0) {
- outb(t & 0xFF, priv->tmr_cnt);
- outb((t >> 8) & 0xFF, priv->tmr_cnt);
- if (priv->type != TYPE_TWIN) {
- write_scc(priv, R15, r15 | CTSIE);
- priv->rr0 |= CTS;
- }
- }
-}
-
-
-static inline unsigned char random(void)
-{
- /* See "Numerical Recipes in C", second edition, p. 284 */
- rand = rand * 1664525L + 1013904223L;
- return (unsigned char) (rand >> 24);
-}
-
-static inline void z8530_isr(struct scc_info *info)
-{
- int is, i = 100;
-
- while ((is = read_scc(&info->priv[0], R3)) && i--) {
- if (is & CHARxIP) {
- rx_isr(&info->priv[0]);
- } else if (is & CHATxIP) {
- tx_isr(&info->priv[0]);
- } else if (is & CHAEXT) {
- es_isr(&info->priv[0]);
- } else if (is & CHBRxIP) {
- rx_isr(&info->priv[1]);
- } else if (is & CHBTxIP) {
- tx_isr(&info->priv[1]);
- } else {
- es_isr(&info->priv[1]);
- }
- write_scc(&info->priv[0], R0, RES_H_IUS);
- i++;
- }
- if (i < 0) {
- printk(KERN_ERR "dmascc: stuck in ISR with RR3=0x%02x.\n",
- is);
- }
- /* Ok, no interrupts pending from this 8530. The INT line should
- be inactive now. */
-}
-
-
-static irqreturn_t scc_isr(int irq, void *dev_id)
-{
- struct scc_info *info = dev_id;
-
- spin_lock(info->priv[0].register_lock);
- /* At this point interrupts are enabled, and the interrupt under service
- is already acknowledged, but masked off.
-
- Interrupt processing: We loop until we know that the IRQ line is
- low. If another positive edge occurs afterwards during the ISR,
- another interrupt will be triggered by the interrupt controller
- as soon as the IRQ level is enabled again (see asm/irq.h).
-
- Bottom-half handlers will be processed after scc_isr(). This is
- important, since we only have small ringbuffers and want new data
- to be fetched/delivered immediately. */
-
- if (info->priv[0].type == TYPE_TWIN) {
- int is, card_base = info->priv[0].card_base;
- while ((is = ~inb(card_base + TWIN_INT_REG)) &
- TWIN_INT_MSK) {
- if (is & TWIN_SCC_MSK) {
- z8530_isr(info);
- } else if (is & TWIN_TMR1_MSK) {
- inb(card_base + TWIN_CLR_TMR1);
- tm_isr(&info->priv[0]);
- } else {
- inb(card_base + TWIN_CLR_TMR2);
- tm_isr(&info->priv[1]);
- }
- }
- } else
- z8530_isr(info);
- spin_unlock(info->priv[0].register_lock);
- return IRQ_HANDLED;
-}
-
-
-static void rx_isr(struct scc_priv *priv)
-{
- if (priv->param.dma >= 0) {
- /* Check special condition and perform error reset. See 2.4.7.5. */
- special_condition(priv, read_scc(priv, R1));
- write_scc(priv, R0, ERR_RES);
- } else {
- /* Check special condition for each character. Error reset not necessary.
- Same algorithm for SCC and ESCC. See 2.4.7.1 and 2.4.7.4. */
- int rc;
- while (read_scc(priv, R0) & Rx_CH_AV) {
- rc = read_scc(priv, R1);
- if (priv->rx_ptr < BUF_SIZE)
- priv->rx_buf[priv->rx_head][priv->
- rx_ptr++] =
- read_scc_data(priv);
- else {
- priv->rx_over = 2;
- read_scc_data(priv);
- }
- special_condition(priv, rc);
- }
- }
-}
-
-
-static void special_condition(struct scc_priv *priv, int rc)
-{
- int cb;
- unsigned long flags;
-
- /* See Figure 2-15. Only overrun and EOF need to be checked. */
-
- if (rc & Rx_OVR) {
- /* Receiver overrun */
- priv->rx_over = 1;
- if (priv->param.dma < 0)
- write_scc(priv, R0, ERR_RES);
- } else if (rc & END_FR) {
- /* End of frame. Get byte count */
- if (priv->param.dma >= 0) {
- flags = claim_dma_lock();
- cb = BUF_SIZE - get_dma_residue(priv->param.dma) -
- 2;
- release_dma_lock(flags);
- } else {
- cb = priv->rx_ptr - 2;
- }
- if (priv->rx_over) {
- /* We had an overrun */
- priv->dev->stats.rx_errors++;
- if (priv->rx_over == 2)
- priv->dev->stats.rx_length_errors++;
- else
- priv->dev->stats.rx_fifo_errors++;
- priv->rx_over = 0;
- } else if (rc & CRC_ERR) {
- /* Count invalid CRC only if packet length >= minimum */
- if (cb >= 15) {
- priv->dev->stats.rx_errors++;
- priv->dev->stats.rx_crc_errors++;
- }
- } else {
- if (cb >= 15) {
- if (priv->rx_count < NUM_RX_BUF - 1) {
- /* Put good frame in FIFO */
- priv->rx_len[priv->rx_head] = cb;
- priv->rx_head =
- (priv->rx_head +
- 1) % NUM_RX_BUF;
- priv->rx_count++;
- schedule_work(&priv->rx_work);
- } else {
- priv->dev->stats.rx_errors++;
- priv->dev->stats.rx_over_errors++;
- }
- }
- }
- /* Get ready for new frame */
- if (priv->param.dma >= 0) {
- flags = claim_dma_lock();
- set_dma_addr(priv->param.dma,
- virt_to_bus(priv->rx_buf[priv->rx_head]));
- set_dma_count(priv->param.dma, BUF_SIZE);
- release_dma_lock(flags);
- } else {
- priv->rx_ptr = 0;
- }
- }
-}
-
-
-static void rx_bh(struct work_struct *ugli_api)
-{
- struct scc_priv *priv = container_of(ugli_api, struct scc_priv, rx_work);
- int i = priv->rx_tail;
- int cb;
- unsigned long flags;
- struct sk_buff *skb;
- unsigned char *data;
-
- spin_lock_irqsave(&priv->ring_lock, flags);
- while (priv->rx_count) {
- spin_unlock_irqrestore(&priv->ring_lock, flags);
- cb = priv->rx_len[i];
- /* Allocate buffer */
- skb = dev_alloc_skb(cb + 1);
- if (skb == NULL) {
- /* Drop packet */
- priv->dev->stats.rx_dropped++;
- } else {
- /* Fill buffer */
- data = skb_put(skb, cb + 1);
- data[0] = 0;
- memcpy(&data[1], priv->rx_buf[i], cb);
- skb->protocol = ax25_type_trans(skb, priv->dev);
- netif_rx(skb);
- priv->dev->stats.rx_packets++;
- priv->dev->stats.rx_bytes += cb;
- }
- spin_lock_irqsave(&priv->ring_lock, flags);
- /* Move tail */
- priv->rx_tail = i = (i + 1) % NUM_RX_BUF;
- priv->rx_count--;
- }
- spin_unlock_irqrestore(&priv->ring_lock, flags);
-}
-
-
-static void tx_isr(struct scc_priv *priv)
-{
- int i = priv->tx_tail, p = priv->tx_ptr;
-
- /* Suspend TX interrupts if we don't want to send anything.
- See Figure 2-22. */
- if (p == priv->tx_len[i]) {
- write_scc(priv, R0, RES_Tx_P);
- return;
- }
-
- /* Write characters */
- while ((read_scc(priv, R0) & Tx_BUF_EMP) && p < priv->tx_len[i]) {
- write_scc_data(priv, priv->tx_buf[i][p++], 0);
- }
-
- /* Reset EOM latch of Z8530 */
- if (!priv->tx_ptr && p && priv->chip == Z8530)
- write_scc(priv, R0, RES_EOM_L);
-
- priv->tx_ptr = p;
-}
-
-
-static void es_isr(struct scc_priv *priv)
-{
- int i, rr0, drr0, res;
- unsigned long flags;
-
- /* Read status, reset interrupt bit (open latches) */
- rr0 = read_scc(priv, R0);
- write_scc(priv, R0, RES_EXT_INT);
- drr0 = priv->rr0 ^ rr0;
- priv->rr0 = rr0;
-
- /* Transmit underrun (2.4.9.6). We can't check the TxEOM flag, since
- it might have already been cleared again by AUTOEOM. */
- if (priv->state == TX_DATA) {
- /* Get remaining bytes */
- i = priv->tx_tail;
- if (priv->param.dma >= 0) {
- disable_dma(priv->param.dma);
- flags = claim_dma_lock();
- res = get_dma_residue(priv->param.dma);
- release_dma_lock(flags);
- } else {
- res = priv->tx_len[i] - priv->tx_ptr;
- priv->tx_ptr = 0;
- }
- /* Disable DREQ / TX interrupt */
- if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
- outb(0, priv->card_base + TWIN_DMA_CFG);
- else
- write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
- if (res) {
- /* Update packet statistics */
- priv->dev->stats.tx_errors++;
- priv->dev->stats.tx_fifo_errors++;
- /* Other underrun interrupts may already be waiting */
- write_scc(priv, R0, RES_EXT_INT);
- write_scc(priv, R0, RES_EXT_INT);
- } else {
- /* Update packet statistics */
- priv->dev->stats.tx_packets++;
- priv->dev->stats.tx_bytes += priv->tx_len[i];
- /* Remove frame from FIFO */
- priv->tx_tail = (i + 1) % NUM_TX_BUF;
- priv->tx_count--;
- /* Inform upper layers */
- netif_wake_queue(priv->dev);
- }
- /* Switch state */
- write_scc(priv, R15, 0);
- if (priv->tx_count &&
- time_is_after_jiffies(priv->tx_start + priv->param.txtimeout)) {
- priv->state = TX_PAUSE;
- start_timer(priv, priv->param.txpause, 0);
- } else {
- priv->state = TX_TAIL;
- start_timer(priv, priv->param.txtail, 0);
- }
- }
-
- /* DCD transition */
- if (drr0 & DCD) {
- if (rr0 & DCD) {
- switch (priv->state) {
- case IDLE:
- case WAIT:
- priv->state = DCD_ON;
- write_scc(priv, R15, 0);
- start_timer(priv, priv->param.dcdon, 0);
- }
- } else {
- switch (priv->state) {
- case RX_ON:
- rx_off(priv);
- priv->state = DCD_OFF;
- write_scc(priv, R15, 0);
- start_timer(priv, priv->param.dcdoff, 0);
- }
- }
- }
-
- /* CTS transition */
- if ((drr0 & CTS) && (~rr0 & CTS) && priv->type != TYPE_TWIN)
- tm_isr(priv);
-
-}
-
-
-static void tm_isr(struct scc_priv *priv)
-{
- switch (priv->state) {
- case TX_HEAD:
- case TX_PAUSE:
- tx_on(priv);
- priv->state = TX_DATA;
- break;
- case TX_TAIL:
- write_scc(priv, R5, TxCRC_ENAB | Tx8);
- priv->state = RTS_OFF;
- if (priv->type != TYPE_TWIN)
- write_scc(priv, R15, 0);
- start_timer(priv, priv->param.rtsoff, 0);
- break;
- case RTS_OFF:
- write_scc(priv, R15, DCDIE);
- priv->rr0 = read_scc(priv, R0);
- if (priv->rr0 & DCD) {
- priv->dev->stats.collisions++;
- rx_on(priv);
- priv->state = RX_ON;
- } else {
- priv->state = WAIT;
- start_timer(priv, priv->param.waittime, DCDIE);
- }
- break;
- case WAIT:
- if (priv->tx_count) {
- priv->state = TX_HEAD;
- priv->tx_start = jiffies;
- write_scc(priv, R5,
- TxCRC_ENAB | RTS | TxENAB | Tx8);
- write_scc(priv, R15, 0);
- start_timer(priv, priv->param.txdelay, 0);
- } else {
- priv->state = IDLE;
- if (priv->type != TYPE_TWIN)
- write_scc(priv, R15, DCDIE);
- }
- break;
- case DCD_ON:
- case DCD_OFF:
- write_scc(priv, R15, DCDIE);
- priv->rr0 = read_scc(priv, R0);
- if (priv->rr0 & DCD) {
- rx_on(priv);
- priv->state = RX_ON;
- } else {
- priv->state = WAIT;
- start_timer(priv,
- random() / priv->param.persist *
- priv->param.slottime, DCDIE);
- }
- break;
- }
-}
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index cf69da0e296c..25b38a374e3c 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -15,6 +15,7 @@
#include <linux/list.h>
#include <linux/hyperv.h>
#include <linux/rndis.h>
+#include <linux/jhash.h>
/* RSS related */
#define OID_GEN_RECEIVE_SCALE_CAPABILITIES 0x00010203 /* query only */
@@ -237,6 +238,7 @@ int netvsc_recv_callback(struct net_device *net,
void netvsc_channel_cb(void *context);
int netvsc_poll(struct napi_struct *napi, int budget);
+void netvsc_xdp_xmit(struct sk_buff *skb, struct net_device *ndev);
u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan,
struct xdp_buff *xdp);
unsigned int netvsc_xdp_fraglen(unsigned int len);
@@ -246,6 +248,8 @@ int netvsc_xdp_set(struct net_device *dev, struct bpf_prog *prog,
struct netvsc_device *nvdev);
int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog);
int netvsc_bpf(struct net_device *dev, struct netdev_bpf *bpf);
+int netvsc_ndoxdp_xmit(struct net_device *ndev, int n,
+ struct xdp_frame **frames, u32 flags);
int rndis_set_subchannel(struct net_device *ndev,
struct netvsc_device *nvdev,
@@ -942,12 +946,21 @@ struct nvsc_rsc {
#define NVSC_RSC_CSUM_INFO BIT(1) /* valid/present bit for 'csum_info' */
#define NVSC_RSC_HASH_INFO BIT(2) /* valid/present bit for 'hash_info' */
-struct netvsc_stats {
+struct netvsc_stats_tx {
+ u64 packets;
+ u64 bytes;
+ u64 xdp_xmit;
+ struct u64_stats_sync syncp;
+};
+
+struct netvsc_stats_rx {
u64 packets;
u64 bytes;
u64 broadcast;
u64 multicast;
u64 xdp_drop;
+ u64 xdp_redirect;
+ u64 xdp_tx;
struct u64_stats_sync syncp;
};
@@ -1046,6 +1059,55 @@ struct net_device_context {
struct netvsc_device_info *saved_netvsc_dev_info;
};
+/* Azure hosts don't support non-TCP port numbers in hashing for fragmented
+ * packets. We can use ethtool to change UDP hash level when necessary.
+ */
+static inline u32 netvsc_get_hash(struct sk_buff *skb,
+ const struct net_device_context *ndc)
+{
+ struct flow_keys flow;
+ u32 hash, pkt_proto = 0;
+ static u32 hashrnd __read_mostly;
+
+ net_get_random_once(&hashrnd, sizeof(hashrnd));
+
+ if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
+ return 0;
+
+ switch (flow.basic.ip_proto) {
+ case IPPROTO_TCP:
+ if (flow.basic.n_proto == htons(ETH_P_IP))
+ pkt_proto = HV_TCP4_L4HASH;
+ else if (flow.basic.n_proto == htons(ETH_P_IPV6))
+ pkt_proto = HV_TCP6_L4HASH;
+
+ break;
+
+ case IPPROTO_UDP:
+ if (flow.basic.n_proto == htons(ETH_P_IP))
+ pkt_proto = HV_UDP4_L4HASH;
+ else if (flow.basic.n_proto == htons(ETH_P_IPV6))
+ pkt_proto = HV_UDP6_L4HASH;
+
+ break;
+ }
+
+ if (pkt_proto & ndc->l4_hash) {
+ return skb_get_hash(skb);
+ } else {
+ if (flow.basic.n_proto == htons(ETH_P_IP))
+ hash = jhash2((u32 *)&flow.addrs.v4addrs, 2, hashrnd);
+ else if (flow.basic.n_proto == htons(ETH_P_IPV6))
+ hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd);
+ else
+ return 0;
+
+ __skb_set_sw_hash(skb, hash, false);
+ }
+
+ return hash;
+}
+
/* Per channel data */
struct netvsc_channel {
struct vmbus_channel *channel;
@@ -1060,9 +1122,10 @@ struct netvsc_channel {
struct bpf_prog __rcu *bpf_prog;
struct xdp_rxq_info xdp_rxq;
+ bool xdp_flush;
- struct netvsc_stats tx_stats;
- struct netvsc_stats rx_stats;
+ struct netvsc_stats_tx tx_stats;
+ struct netvsc_stats_rx rx_stats;
};
/* Per netvsc device */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 9442f751ad3a..6e42cb03e226 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -20,6 +20,7 @@
#include <linux/vmalloc.h>
#include <linux/rtnetlink.h>
#include <linux/prefetch.h>
+#include <linux/filter.h>
#include <asm/sync_bitops.h>
#include <asm/mshyperv.h>
@@ -792,9 +793,9 @@ static void netvsc_send_tx_complete(struct net_device *ndev,
int queue_sends;
u64 cmd_rqst;
- cmd_rqst = channel->request_addr_callback(channel, (u64)desc->trans_id);
+ cmd_rqst = channel->request_addr_callback(channel, desc->trans_id);
if (cmd_rqst == VMBUS_RQST_ERROR) {
- netdev_err(ndev, "Incorrect transaction id\n");
+ netdev_err(ndev, "Invalid transaction ID %llx\n", desc->trans_id);
return;
}
@@ -805,7 +806,7 @@ static void netvsc_send_tx_complete(struct net_device *ndev,
struct hv_netvsc_packet *packet
= (struct hv_netvsc_packet *)skb->cb;
u32 send_index = packet->send_buf_index;
- struct netvsc_stats *tx_stats;
+ struct netvsc_stats_tx *tx_stats;
if (send_index != NETVSC_INVALID_INDEX)
netvsc_free_send_slot(net_device, send_index);
@@ -854,9 +855,9 @@ static void netvsc_send_completion(struct net_device *ndev,
/* First check if this is a VMBUS completion without data payload */
if (!msglen) {
cmd_rqst = incoming_channel->request_addr_callback(incoming_channel,
- (u64)desc->trans_id);
+ desc->trans_id);
if (cmd_rqst == VMBUS_RQST_ERROR) {
- netdev_err(ndev, "Invalid transaction id\n");
+ netdev_err(ndev, "Invalid transaction ID %llx\n", desc->trans_id);
return;
}
@@ -1670,12 +1671,17 @@ int netvsc_poll(struct napi_struct *napi, int budget)
if (!nvchan->desc)
nvchan->desc = hv_pkt_iter_first(channel);
+ nvchan->xdp_flush = false;
+
while (nvchan->desc && work_done < budget) {
work_done += netvsc_process_raw_pkt(device, nvchan, net_device,
ndev, nvchan->desc, budget);
nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc);
}
+ if (nvchan->xdp_flush)
+ xdp_do_flush();
+
/* Send any pending receive completions */
ret = send_recv_completions(ndev, net_device, nvchan);
diff --git a/drivers/net/hyperv/netvsc_bpf.c b/drivers/net/hyperv/netvsc_bpf.c
index 7856905414eb..4a9522689fa4 100644
--- a/drivers/net/hyperv/netvsc_bpf.c
+++ b/drivers/net/hyperv/netvsc_bpf.c
@@ -10,6 +10,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
+#include <linux/netpoll.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <linux/kernel.h>
@@ -23,11 +24,13 @@
u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan,
struct xdp_buff *xdp)
{
+ struct netvsc_stats_rx *rx_stats = &nvchan->rx_stats;
void *data = nvchan->rsc.data[0];
u32 len = nvchan->rsc.len[0];
struct page *page = NULL;
struct bpf_prog *prog;
u32 act = XDP_PASS;
+ bool drop = true;
xdp->data_hard_start = NULL;
@@ -60,9 +63,34 @@ u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan,
switch (act) {
case XDP_PASS:
case XDP_TX:
+ drop = false;
+ break;
+
case XDP_DROP:
break;
+ case XDP_REDIRECT:
+ if (!xdp_do_redirect(ndev, xdp, prog)) {
+ nvchan->xdp_flush = true;
+ drop = false;
+
+ u64_stats_update_begin(&rx_stats->syncp);
+
+ rx_stats->xdp_redirect++;
+ rx_stats->packets++;
+ rx_stats->bytes += nvchan->rsc.pktlen;
+
+ u64_stats_update_end(&rx_stats->syncp);
+
+ break;
+ } else {
+ u64_stats_update_begin(&rx_stats->syncp);
+ rx_stats->xdp_drop++;
+ u64_stats_update_end(&rx_stats->syncp);
+ }
+
+ fallthrough;
+
case XDP_ABORTED:
trace_xdp_exception(ndev, prog, act);
break;
@@ -74,7 +102,7 @@ u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan,
out:
rcu_read_unlock();
- if (page && act != XDP_PASS && act != XDP_TX) {
+ if (page && drop) {
__free_page(page);
xdp->data_hard_start = NULL;
}
@@ -137,7 +165,6 @@ int netvsc_xdp_set(struct net_device *dev, struct bpf_prog *prog,
int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog)
{
struct netdev_bpf xdp;
- bpf_op_t ndo_bpf;
int ret;
ASSERT_RTNL();
@@ -145,8 +172,7 @@ int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog)
if (!vf_netdev)
return 0;
- ndo_bpf = vf_netdev->netdev_ops->ndo_bpf;
- if (!ndo_bpf)
+ if (!vf_netdev->netdev_ops->ndo_bpf)
return 0;
memset(&xdp, 0, sizeof(xdp));
@@ -157,7 +183,7 @@ int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog)
xdp.command = XDP_SETUP_PROG;
xdp.prog = prog;
- ret = ndo_bpf(vf_netdev, &xdp);
+ ret = vf_netdev->netdev_ops->ndo_bpf(vf_netdev, &xdp);
if (ret && prog)
bpf_prog_put(prog);
@@ -199,3 +225,68 @@ int netvsc_bpf(struct net_device *dev, struct netdev_bpf *bpf)
return -EINVAL;
}
}
+
+static int netvsc_ndoxdp_xmit_fm(struct net_device *ndev,
+ struct xdp_frame *frame, u16 q_idx)
+{
+ struct sk_buff *skb;
+
+ skb = xdp_build_skb_from_frame(frame, ndev);
+ if (unlikely(!skb))
+ return -ENOMEM;
+
+ netvsc_get_hash(skb, netdev_priv(ndev));
+
+ skb_record_rx_queue(skb, q_idx);
+
+ netvsc_xdp_xmit(skb, ndev);
+
+ return 0;
+}
+
+int netvsc_ndoxdp_xmit(struct net_device *ndev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct net_device_context *ndev_ctx = netdev_priv(ndev);
+ const struct net_device_ops *vf_ops;
+ struct netvsc_stats_tx *tx_stats;
+ struct netvsc_device *nvsc_dev;
+ struct net_device *vf_netdev;
+ int i, count = 0;
+ u16 q_idx;
+
+ /* Don't transmit if netvsc_device is gone */
+ nvsc_dev = rcu_dereference_bh(ndev_ctx->nvdev);
+ if (unlikely(!nvsc_dev || nvsc_dev->destroy))
+ return 0;
+
+ /* If VF is present and up then redirect packets to it.
+ * Skip the VF if it is marked down or has no carrier.
+ * If netpoll is in uses, then VF can not be used either.
+ */
+ vf_netdev = rcu_dereference_bh(ndev_ctx->vf_netdev);
+ if (vf_netdev && netif_running(vf_netdev) &&
+ netif_carrier_ok(vf_netdev) && !netpoll_tx_running(ndev) &&
+ vf_netdev->netdev_ops->ndo_xdp_xmit &&
+ ndev_ctx->data_path_is_vf) {
+ vf_ops = vf_netdev->netdev_ops;
+ return vf_ops->ndo_xdp_xmit(vf_netdev, n, frames, flags);
+ }
+
+ q_idx = smp_processor_id() % ndev->real_num_tx_queues;
+
+ for (i = 0; i < n; i++) {
+ if (netvsc_ndoxdp_xmit_fm(ndev, frames[i], q_idx))
+ break;
+
+ count++;
+ }
+
+ tx_stats = &nvsc_dev->chan_table[q_idx].tx_stats;
+
+ u64_stats_update_begin(&tx_stats->syncp);
+ tx_stats->xdp_xmit += count;
+ u64_stats_update_end(&tx_stats->syncp);
+
+ return count;
+}
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index fde1c492ca02..27f6bbca6619 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -242,56 +242,6 @@ static inline void *init_ppi_data(struct rndis_message *msg,
return ppi + 1;
}
-/* Azure hosts don't support non-TCP port numbers in hashing for fragmented
- * packets. We can use ethtool to change UDP hash level when necessary.
- */
-static inline u32 netvsc_get_hash(
- struct sk_buff *skb,
- const struct net_device_context *ndc)
-{
- struct flow_keys flow;
- u32 hash, pkt_proto = 0;
- static u32 hashrnd __read_mostly;
-
- net_get_random_once(&hashrnd, sizeof(hashrnd));
-
- if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
- return 0;
-
- switch (flow.basic.ip_proto) {
- case IPPROTO_TCP:
- if (flow.basic.n_proto == htons(ETH_P_IP))
- pkt_proto = HV_TCP4_L4HASH;
- else if (flow.basic.n_proto == htons(ETH_P_IPV6))
- pkt_proto = HV_TCP6_L4HASH;
-
- break;
-
- case IPPROTO_UDP:
- if (flow.basic.n_proto == htons(ETH_P_IP))
- pkt_proto = HV_UDP4_L4HASH;
- else if (flow.basic.n_proto == htons(ETH_P_IPV6))
- pkt_proto = HV_UDP6_L4HASH;
-
- break;
- }
-
- if (pkt_proto & ndc->l4_hash) {
- return skb_get_hash(skb);
- } else {
- if (flow.basic.n_proto == htons(ETH_P_IP))
- hash = jhash2((u32 *)&flow.addrs.v4addrs, 2, hashrnd);
- else if (flow.basic.n_proto == htons(ETH_P_IPV6))
- hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd);
- else
- return 0;
-
- __skb_set_sw_hash(skb, hash, false);
- }
-
- return hash;
-}
-
static inline int netvsc_get_tx_queue(struct net_device *ndev,
struct sk_buff *skb, int old_idx)
{
@@ -804,7 +754,7 @@ void netvsc_linkstatus_callback(struct net_device *net,
}
/* This function should only be called after skb_record_rx_queue() */
-static void netvsc_xdp_xmit(struct sk_buff *skb, struct net_device *ndev)
+void netvsc_xdp_xmit(struct sk_buff *skb, struct net_device *ndev)
{
int rc;
@@ -925,7 +875,7 @@ int netvsc_recv_callback(struct net_device *net,
struct vmbus_channel *channel = nvchan->channel;
u16 q_idx = channel->offermsg.offer.sub_channel_index;
struct sk_buff *skb;
- struct netvsc_stats *rx_stats = &nvchan->rx_stats;
+ struct netvsc_stats_rx *rx_stats = &nvchan->rx_stats;
struct xdp_buff xdp;
u32 act;
@@ -934,6 +884,9 @@ int netvsc_recv_callback(struct net_device *net,
act = netvsc_run_xdp(net, nvchan, &xdp);
+ if (act == XDP_REDIRECT)
+ return NVSP_STAT_SUCCESS;
+
if (act != XDP_PASS && act != XDP_TX) {
u64_stats_update_begin(&rx_stats->syncp);
rx_stats->xdp_drop++;
@@ -958,6 +911,9 @@ int netvsc_recv_callback(struct net_device *net,
* statistics will not work correctly.
*/
u64_stats_update_begin(&rx_stats->syncp);
+ if (act == XDP_TX)
+ rx_stats->xdp_tx++;
+
rx_stats->packets++;
rx_stats->bytes += nvchan->rsc.pktlen;
@@ -1353,28 +1309,29 @@ static void netvsc_get_pcpu_stats(struct net_device *net,
/* fetch percpu stats of netvsc */
for (i = 0; i < nvdev->num_chn; i++) {
const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
- const struct netvsc_stats *stats;
+ const struct netvsc_stats_tx *tx_stats;
+ const struct netvsc_stats_rx *rx_stats;
struct netvsc_ethtool_pcpu_stats *this_tot =
&pcpu_tot[nvchan->channel->target_cpu];
u64 packets, bytes;
unsigned int start;
- stats = &nvchan->tx_stats;
+ tx_stats = &nvchan->tx_stats;
do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
- packets = stats->packets;
- bytes = stats->bytes;
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
+ packets = tx_stats->packets;
+ bytes = tx_stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
this_tot->tx_bytes += bytes;
this_tot->tx_packets += packets;
- stats = &nvchan->rx_stats;
+ rx_stats = &nvchan->rx_stats;
do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
- packets = stats->packets;
- bytes = stats->bytes;
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
+ packets = rx_stats->packets;
+ bytes = rx_stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
this_tot->rx_bytes += bytes;
this_tot->rx_packets += packets;
@@ -1406,27 +1363,28 @@ static void netvsc_get_stats64(struct net_device *net,
for (i = 0; i < nvdev->num_chn; i++) {
const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
- const struct netvsc_stats *stats;
+ const struct netvsc_stats_tx *tx_stats;
+ const struct netvsc_stats_rx *rx_stats;
u64 packets, bytes, multicast;
unsigned int start;
- stats = &nvchan->tx_stats;
+ tx_stats = &nvchan->tx_stats;
do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
- packets = stats->packets;
- bytes = stats->bytes;
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
+ packets = tx_stats->packets;
+ bytes = tx_stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
t->tx_bytes += bytes;
t->tx_packets += packets;
- stats = &nvchan->rx_stats;
+ rx_stats = &nvchan->rx_stats;
do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
- packets = stats->packets;
- bytes = stats->bytes;
- multicast = stats->multicast + stats->broadcast;
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
+ packets = rx_stats->packets;
+ bytes = rx_stats->bytes;
+ multicast = rx_stats->multicast + rx_stats->broadcast;
+ } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
t->rx_bytes += bytes;
t->rx_packets += packets;
@@ -1515,8 +1473,8 @@ static const struct {
/* statistics per queue (rx/tx packets/bytes) */
#define NETVSC_PCPU_STATS_LEN (num_present_cpus() * ARRAY_SIZE(pcpu_stats))
-/* 5 statistics per queue (rx/tx packets/bytes, rx xdp_drop) */
-#define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 5)
+/* 8 statistics per queue (rx/tx packets/bytes, XDP actions) */
+#define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 8)
static int netvsc_get_sset_count(struct net_device *dev, int string_set)
{
@@ -1543,12 +1501,16 @@ static void netvsc_get_ethtool_stats(struct net_device *dev,
struct net_device_context *ndc = netdev_priv(dev);
struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
const void *nds = &ndc->eth_stats;
- const struct netvsc_stats *qstats;
+ const struct netvsc_stats_tx *tx_stats;
+ const struct netvsc_stats_rx *rx_stats;
struct netvsc_vf_pcpu_stats sum;
struct netvsc_ethtool_pcpu_stats *pcpu_sum;
unsigned int start;
u64 packets, bytes;
u64 xdp_drop;
+ u64 xdp_redirect;
+ u64 xdp_tx;
+ u64 xdp_xmit;
int i, j, cpu;
if (!nvdev)
@@ -1562,26 +1524,32 @@ static void netvsc_get_ethtool_stats(struct net_device *dev,
data[i++] = *(u64 *)((void *)&sum + vf_stats[j].offset);
for (j = 0; j < nvdev->num_chn; j++) {
- qstats = &nvdev->chan_table[j].tx_stats;
+ tx_stats = &nvdev->chan_table[j].tx_stats;
do {
- start = u64_stats_fetch_begin_irq(&qstats->syncp);
- packets = qstats->packets;
- bytes = qstats->bytes;
- } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
+ start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
+ packets = tx_stats->packets;
+ bytes = tx_stats->bytes;
+ xdp_xmit = tx_stats->xdp_xmit;
+ } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
data[i++] = packets;
data[i++] = bytes;
+ data[i++] = xdp_xmit;
- qstats = &nvdev->chan_table[j].rx_stats;
+ rx_stats = &nvdev->chan_table[j].rx_stats;
do {
- start = u64_stats_fetch_begin_irq(&qstats->syncp);
- packets = qstats->packets;
- bytes = qstats->bytes;
- xdp_drop = qstats->xdp_drop;
- } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
+ start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
+ packets = rx_stats->packets;
+ bytes = rx_stats->bytes;
+ xdp_drop = rx_stats->xdp_drop;
+ xdp_redirect = rx_stats->xdp_redirect;
+ xdp_tx = rx_stats->xdp_tx;
+ } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
data[i++] = packets;
data[i++] = bytes;
data[i++] = xdp_drop;
+ data[i++] = xdp_redirect;
+ data[i++] = xdp_tx;
}
pcpu_sum = kvmalloc_array(num_possible_cpus(),
@@ -1622,9 +1590,12 @@ static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
for (i = 0; i < nvdev->num_chn; i++) {
ethtool_sprintf(&p, "tx_queue_%u_packets", i);
ethtool_sprintf(&p, "tx_queue_%u_bytes", i);
+ ethtool_sprintf(&p, "tx_queue_%u_xdp_xmit", i);
ethtool_sprintf(&p, "rx_queue_%u_packets", i);
ethtool_sprintf(&p, "rx_queue_%u_bytes", i);
ethtool_sprintf(&p, "rx_queue_%u_xdp_drop", i);
+ ethtool_sprintf(&p, "rx_queue_%u_xdp_redirect", i);
+ ethtool_sprintf(&p, "rx_queue_%u_xdp_tx", i);
}
for_each_present_cpu(cpu) {
@@ -2057,6 +2028,7 @@ static const struct net_device_ops device_ops = {
.ndo_select_queue = netvsc_select_queue,
.ndo_get_stats64 = netvsc_get_stats64,
.ndo_bpf = netvsc_bpf,
+ .ndo_xdp_xmit = netvsc_ndoxdp_xmit,
};
/*
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index 888e94278a84..e133eb2bebcf 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -130,9 +130,10 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
*/
if (data->endpoint.config.aggregation) {
limit += SZ_1K * aggr_byte_limit_max(ipa->version);
- if (buffer_size > limit) {
+ if (buffer_size - NET_SKB_PAD > limit) {
dev_err(dev, "RX buffer size too large for aggregated RX endpoint %u (%u > %u)\n",
- data->endpoint_id, buffer_size, limit);
+ data->endpoint_id,
+ buffer_size - NET_SKB_PAD, limit);
return false;
}
@@ -739,6 +740,7 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
if (endpoint->data->aggregation) {
if (!endpoint->toward_ipa) {
const struct ipa_endpoint_rx_data *rx_data;
+ u32 buffer_size;
bool close_eof;
u32 limit;
@@ -746,7 +748,8 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK);
val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK);
- limit = ipa_aggr_size_kb(rx_data->buffer_size);
+ buffer_size = rx_data->buffer_size;
+ limit = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD);
val |= aggr_byte_limit_encoded(version, limit);
limit = IPA_AGGR_TIME_LIMIT;
diff --git a/drivers/net/mdio/mdio-aspeed.c b/drivers/net/mdio/mdio-aspeed.c
index e2273588c75b..944d005d2bd1 100644
--- a/drivers/net/mdio/mdio-aspeed.c
+++ b/drivers/net/mdio/mdio-aspeed.c
@@ -3,6 +3,7 @@
#include <linux/bitfield.h>
#include <linux/delay.h>
+#include <linux/reset.h>
#include <linux/iopoll.h>
#include <linux/mdio.h>
#include <linux/module.h>
@@ -21,6 +22,10 @@
#define ASPEED_MDIO_CTRL_OP GENMASK(27, 26)
#define MDIO_C22_OP_WRITE 0b01
#define MDIO_C22_OP_READ 0b10
+#define MDIO_C45_OP_ADDR 0b00
+#define MDIO_C45_OP_WRITE 0b01
+#define MDIO_C45_OP_PREAD 0b10
+#define MDIO_C45_OP_READ 0b11
#define ASPEED_MDIO_CTRL_PHYAD GENMASK(25, 21)
#define ASPEED_MDIO_CTRL_REGAD GENMASK(20, 16)
#define ASPEED_MDIO_CTRL_MIIWDATA GENMASK(15, 0)
@@ -37,36 +42,38 @@
struct aspeed_mdio {
void __iomem *base;
+ struct reset_control *reset;
};
-static int aspeed_mdio_read(struct mii_bus *bus, int addr, int regnum)
+static int aspeed_mdio_op(struct mii_bus *bus, u8 st, u8 op, u8 phyad, u8 regad,
+ u16 data)
{
struct aspeed_mdio *ctx = bus->priv;
u32 ctrl;
- u32 data;
- int rc;
- dev_dbg(&bus->dev, "%s: addr: %d, regnum: %d\n", __func__, addr,
- regnum);
-
- /* Just clause 22 for the moment */
- if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
+ dev_dbg(&bus->dev, "%s: st: %u op: %u, phyad: %u, regad: %u, data: %u\n",
+ __func__, st, op, phyad, regad, data);
ctrl = ASPEED_MDIO_CTRL_FIRE
- | FIELD_PREP(ASPEED_MDIO_CTRL_ST, ASPEED_MDIO_CTRL_ST_C22)
- | FIELD_PREP(ASPEED_MDIO_CTRL_OP, MDIO_C22_OP_READ)
- | FIELD_PREP(ASPEED_MDIO_CTRL_PHYAD, addr)
- | FIELD_PREP(ASPEED_MDIO_CTRL_REGAD, regnum);
+ | FIELD_PREP(ASPEED_MDIO_CTRL_ST, st)
+ | FIELD_PREP(ASPEED_MDIO_CTRL_OP, op)
+ | FIELD_PREP(ASPEED_MDIO_CTRL_PHYAD, phyad)
+ | FIELD_PREP(ASPEED_MDIO_CTRL_REGAD, regad)
+ | FIELD_PREP(ASPEED_MDIO_DATA_MIIRDATA, data);
iowrite32(ctrl, ctx->base + ASPEED_MDIO_CTRL);
- rc = readl_poll_timeout(ctx->base + ASPEED_MDIO_CTRL, ctrl,
+ return readl_poll_timeout(ctx->base + ASPEED_MDIO_CTRL, ctrl,
!(ctrl & ASPEED_MDIO_CTRL_FIRE),
ASPEED_MDIO_INTERVAL_US,
ASPEED_MDIO_TIMEOUT_US);
- if (rc < 0)
- return rc;
+}
+
+static int aspeed_mdio_get_data(struct mii_bus *bus)
+{
+ struct aspeed_mdio *ctx = bus->priv;
+ u32 data;
+ int rc;
rc = readl_poll_timeout(ctx->base + ASPEED_MDIO_DATA, data,
data & ASPEED_MDIO_DATA_IDLE,
@@ -78,31 +85,80 @@ static int aspeed_mdio_read(struct mii_bus *bus, int addr, int regnum)
return FIELD_GET(ASPEED_MDIO_DATA_MIIRDATA, data);
}
-static int aspeed_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
+static int aspeed_mdio_read_c22(struct mii_bus *bus, int addr, int regnum)
{
- struct aspeed_mdio *ctx = bus->priv;
- u32 ctrl;
+ int rc;
- dev_dbg(&bus->dev, "%s: addr: %d, regnum: %d, val: 0x%x\n",
- __func__, addr, regnum, val);
+ rc = aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C22, MDIO_C22_OP_READ,
+ addr, regnum, 0);
+ if (rc < 0)
+ return rc;
+
+ return aspeed_mdio_get_data(bus);
+}
+
+static int aspeed_mdio_write_c22(struct mii_bus *bus, int addr, int regnum,
+ u16 val)
+{
+ return aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C22, MDIO_C22_OP_WRITE,
+ addr, regnum, val);
+}
+
+static int aspeed_mdio_read_c45(struct mii_bus *bus, int addr, int regnum)
+{
+ u8 c45_dev = (regnum >> 16) & 0x1F;
+ u16 c45_addr = regnum & 0xFFFF;
+ int rc;
+
+ rc = aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C45, MDIO_C45_OP_ADDR,
+ addr, c45_dev, c45_addr);
+ if (rc < 0)
+ return rc;
+
+ rc = aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C45, MDIO_C45_OP_READ,
+ addr, c45_dev, 0);
+ if (rc < 0)
+ return rc;
+
+ return aspeed_mdio_get_data(bus);
+}
+
+static int aspeed_mdio_write_c45(struct mii_bus *bus, int addr, int regnum,
+ u16 val)
+{
+ u8 c45_dev = (regnum >> 16) & 0x1F;
+ u16 c45_addr = regnum & 0xFFFF;
+ int rc;
+
+ rc = aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C45, MDIO_C45_OP_ADDR,
+ addr, c45_dev, c45_addr);
+ if (rc < 0)
+ return rc;
+
+ return aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C45, MDIO_C45_OP_WRITE,
+ addr, c45_dev, val);
+}
+
+static int aspeed_mdio_read(struct mii_bus *bus, int addr, int regnum)
+{
+ dev_dbg(&bus->dev, "%s: addr: %d, regnum: %d\n", __func__, addr,
+ regnum);
- /* Just clause 22 for the moment */
if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
+ return aspeed_mdio_read_c45(bus, addr, regnum);
- ctrl = ASPEED_MDIO_CTRL_FIRE
- | FIELD_PREP(ASPEED_MDIO_CTRL_ST, ASPEED_MDIO_CTRL_ST_C22)
- | FIELD_PREP(ASPEED_MDIO_CTRL_OP, MDIO_C22_OP_WRITE)
- | FIELD_PREP(ASPEED_MDIO_CTRL_PHYAD, addr)
- | FIELD_PREP(ASPEED_MDIO_CTRL_REGAD, regnum)
- | FIELD_PREP(ASPEED_MDIO_CTRL_MIIWDATA, val);
+ return aspeed_mdio_read_c22(bus, addr, regnum);
+}
- iowrite32(ctrl, ctx->base + ASPEED_MDIO_CTRL);
+static int aspeed_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
+{
+ dev_dbg(&bus->dev, "%s: addr: %d, regnum: %d, val: 0x%x\n",
+ __func__, addr, regnum, val);
- return readl_poll_timeout(ctx->base + ASPEED_MDIO_CTRL, ctrl,
- !(ctrl & ASPEED_MDIO_CTRL_FIRE),
- ASPEED_MDIO_INTERVAL_US,
- ASPEED_MDIO_TIMEOUT_US);
+ if (regnum & MII_ADDR_C45)
+ return aspeed_mdio_write_c45(bus, addr, regnum, val);
+
+ return aspeed_mdio_write_c22(bus, addr, regnum, val);
}
static int aspeed_mdio_probe(struct platform_device *pdev)
@@ -120,15 +176,23 @@ static int aspeed_mdio_probe(struct platform_device *pdev)
if (IS_ERR(ctx->base))
return PTR_ERR(ctx->base);
+ ctx->reset = devm_reset_control_get_optional_shared(&pdev->dev, NULL);
+ if (IS_ERR(ctx->reset))
+ return PTR_ERR(ctx->reset);
+
+ reset_control_deassert(ctx->reset);
+
bus->name = DRV_NAME;
snprintf(bus->id, MII_BUS_ID_SIZE, "%s%d", pdev->name, pdev->id);
bus->parent = &pdev->dev;
bus->read = aspeed_mdio_read;
bus->write = aspeed_mdio_write;
+ bus->probe_capabilities = MDIOBUS_C22_C45;
rc = of_mdiobus_register(bus, pdev->dev.of_node);
if (rc) {
dev_err(&pdev->dev, "Cannot register MDIO bus!\n");
+ reset_control_assert(ctx->reset);
return rc;
}
@@ -139,7 +203,11 @@ static int aspeed_mdio_probe(struct platform_device *pdev)
static int aspeed_mdio_remove(struct platform_device *pdev)
{
- mdiobus_unregister(platform_get_drvdata(pdev));
+ struct mii_bus *bus = (struct mii_bus *)platform_get_drvdata(pdev);
+ struct aspeed_mdio *ctx = bus->priv;
+
+ reset_control_assert(ctx->reset);
+ mdiobus_unregister(bus);
return 0;
}
diff --git a/drivers/net/mdio/mdio-mscc-miim.c b/drivers/net/mdio/mdio-mscc-miim.c
index 582969751b4c..08541007b18a 100644
--- a/drivers/net/mdio/mdio-mscc-miim.c
+++ b/drivers/net/mdio/mdio-mscc-miim.c
@@ -7,6 +7,7 @@
*/
#include <linux/bitops.h>
+#include <linux/clk.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
@@ -30,6 +31,8 @@
#define MSCC_MIIM_CMD_VLD BIT(31)
#define MSCC_MIIM_REG_DATA 0xC
#define MSCC_MIIM_DATA_ERROR (BIT(16) | BIT(17))
+#define MSCC_MIIM_REG_CFG 0x10
+#define MSCC_MIIM_CFG_PRESCALE_MASK GENMASK(7, 0)
#define MSCC_PHY_REG_PHY_CFG 0x0
#define PHY_CFG_PHY_ENA (BIT(0) | BIT(1) | BIT(2) | BIT(3))
@@ -50,6 +53,8 @@ struct mscc_miim_dev {
int mii_status_offset;
struct regmap *phy_regs;
const struct mscc_miim_info *info;
+ struct clk *clk;
+ u32 bus_freq;
};
/* When high resolution timers aren't built-in: we can't use usleep_range() as
@@ -241,9 +246,33 @@ int mscc_miim_setup(struct device *dev, struct mii_bus **pbus, const char *name,
}
EXPORT_SYMBOL(mscc_miim_setup);
+static int mscc_miim_clk_set(struct mii_bus *bus)
+{
+ struct mscc_miim_dev *miim = bus->priv;
+ unsigned long rate;
+ u32 div;
+
+ /* Keep the current settings */
+ if (!miim->bus_freq)
+ return 0;
+
+ rate = clk_get_rate(miim->clk);
+
+ div = DIV_ROUND_UP(rate, 2 * miim->bus_freq) - 1;
+ if (div == 0 || div & ~MSCC_MIIM_CFG_PRESCALE_MASK) {
+ dev_err(&bus->dev, "Incorrect MDIO clock frequency\n");
+ return -EINVAL;
+ }
+
+ return regmap_update_bits(miim->regs, MSCC_MIIM_REG_CFG,
+ MSCC_MIIM_CFG_PRESCALE_MASK, div);
+}
+
static int mscc_miim_probe(struct platform_device *pdev)
{
struct regmap *mii_regmap, *phy_regmap = NULL;
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
void __iomem *regs, *phy_regs;
struct mscc_miim_dev *miim;
struct resource *res;
@@ -252,63 +281,87 @@ static int mscc_miim_probe(struct platform_device *pdev)
regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(regs)) {
- dev_err(&pdev->dev, "Unable to map MIIM registers\n");
+ dev_err(dev, "Unable to map MIIM registers\n");
return PTR_ERR(regs);
}
- mii_regmap = devm_regmap_init_mmio(&pdev->dev, regs,
- &mscc_miim_regmap_config);
+ mii_regmap = devm_regmap_init_mmio(dev, regs, &mscc_miim_regmap_config);
if (IS_ERR(mii_regmap)) {
- dev_err(&pdev->dev, "Unable to create MIIM regmap\n");
+ dev_err(dev, "Unable to create MIIM regmap\n");
return PTR_ERR(mii_regmap);
}
/* This resource is optional */
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (res) {
- phy_regs = devm_ioremap_resource(&pdev->dev, res);
+ phy_regs = devm_ioremap_resource(dev, res);
if (IS_ERR(phy_regs)) {
- dev_err(&pdev->dev, "Unable to map internal phy registers\n");
+ dev_err(dev, "Unable to map internal phy registers\n");
return PTR_ERR(phy_regs);
}
- phy_regmap = devm_regmap_init_mmio(&pdev->dev, phy_regs,
+ phy_regmap = devm_regmap_init_mmio(dev, phy_regs,
&mscc_miim_phy_regmap_config);
if (IS_ERR(phy_regmap)) {
- dev_err(&pdev->dev, "Unable to create phy register regmap\n");
+ dev_err(dev, "Unable to create phy register regmap\n");
return PTR_ERR(phy_regmap);
}
}
- ret = mscc_miim_setup(&pdev->dev, &bus, "mscc_miim", mii_regmap, 0);
+ ret = mscc_miim_setup(dev, &bus, "mscc_miim", mii_regmap, 0);
if (ret < 0) {
- dev_err(&pdev->dev, "Unable to setup the MDIO bus\n");
+ dev_err(dev, "Unable to setup the MDIO bus\n");
return ret;
}
miim = bus->priv;
miim->phy_regs = phy_regmap;
- miim->info = device_get_match_data(&pdev->dev);
+ miim->info = device_get_match_data(dev);
if (!miim->info)
return -EINVAL;
- ret = of_mdiobus_register(bus, pdev->dev.of_node);
- if (ret < 0) {
- dev_err(&pdev->dev, "Cannot register MDIO bus (%d)\n", ret);
+ miim->clk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(miim->clk))
+ return PTR_ERR(miim->clk);
+
+ of_property_read_u32(np, "clock-frequency", &miim->bus_freq);
+
+ if (miim->bus_freq && !miim->clk) {
+ dev_err(dev, "cannot use clock-frequency without a clock\n");
+ return -EINVAL;
+ }
+
+ ret = clk_prepare_enable(miim->clk);
+ if (ret)
return ret;
+
+ ret = mscc_miim_clk_set(bus);
+ if (ret)
+ goto out_disable_clk;
+
+ ret = of_mdiobus_register(bus, np);
+ if (ret < 0) {
+ dev_err(dev, "Cannot register MDIO bus (%d)\n", ret);
+ goto out_disable_clk;
}
platform_set_drvdata(pdev, bus);
return 0;
+
+out_disable_clk:
+ clk_disable_unprepare(miim->clk);
+ return ret;
}
static int mscc_miim_remove(struct platform_device *pdev)
{
struct mii_bus *bus = platform_get_drvdata(pdev);
+ struct mscc_miim_dev *miim = bus->priv;
+ clk_disable_unprepare(miim->clk);
mdiobus_unregister(bus);
return 0;
diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c
index 378ee779061c..c8f398f5bc5b 100644
--- a/drivers/net/netdevsim/fib.c
+++ b/drivers/net/netdevsim/fib.c
@@ -22,6 +22,7 @@
#include <linux/spinlock_types.h>
#include <linux/types.h>
#include <net/fib_notifier.h>
+#include <net/inet_dscp.h>
#include <net/ip_fib.h>
#include <net/ip6_fib.h>
#include <net/fib_rules.h>
@@ -78,7 +79,7 @@ struct nsim_fib_rt {
struct nsim_fib4_rt {
struct nsim_fib_rt common;
struct fib_info *fi;
- u8 tos;
+ dscp_t dscp;
u8 type;
};
@@ -283,7 +284,7 @@ nsim_fib4_rt_create(struct nsim_fib_data *data,
fib4_rt->fi = fen_info->fi;
fib_info_hold(fib4_rt->fi);
- fib4_rt->tos = fen_info->tos;
+ fib4_rt->dscp = fen_info->dscp;
fib4_rt->type = fen_info->type;
return fib4_rt;
@@ -322,7 +323,7 @@ nsim_fib4_rt_offload_failed_flag_set(struct net *net,
fri.tb_id = fen_info->tb_id;
fri.dst = cpu_to_be32(*p_dst);
fri.dst_len = fen_info->dst_len;
- fri.tos = fen_info->tos;
+ fri.dscp = fen_info->dscp;
fri.type = fen_info->type;
fri.offload = false;
fri.trap = false;
@@ -342,7 +343,7 @@ static void nsim_fib4_rt_hw_flags_set(struct net *net,
fri.tb_id = fib4_rt->common.key.tb_id;
fri.dst = cpu_to_be32(*p_dst);
fri.dst_len = dst_len;
- fri.tos = fib4_rt->tos;
+ fri.dscp = fib4_rt->dscp;
fri.type = fib4_rt->type;
fri.offload = false;
fri.trap = trap;
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index fc53b71dc872..96840695debd 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -70,6 +70,27 @@
#define KSZ8081_LMD_SHORT_INDICATOR BIT(12)
#define KSZ8081_LMD_DELTA_TIME_MASK GENMASK(8, 0)
+#define KSZ9x31_LMD 0x12
+#define KSZ9x31_LMD_VCT_EN BIT(15)
+#define KSZ9x31_LMD_VCT_DIS_TX BIT(14)
+#define KSZ9x31_LMD_VCT_PAIR(n) (((n) & 0x3) << 12)
+#define KSZ9x31_LMD_VCT_SEL_RESULT 0
+#define KSZ9x31_LMD_VCT_SEL_THRES_HI BIT(10)
+#define KSZ9x31_LMD_VCT_SEL_THRES_LO BIT(11)
+#define KSZ9x31_LMD_VCT_SEL_MASK GENMASK(11, 10)
+#define KSZ9x31_LMD_VCT_ST_NORMAL 0
+#define KSZ9x31_LMD_VCT_ST_OPEN 1
+#define KSZ9x31_LMD_VCT_ST_SHORT 2
+#define KSZ9x31_LMD_VCT_ST_FAIL 3
+#define KSZ9x31_LMD_VCT_ST_MASK GENMASK(9, 8)
+#define KSZ9x31_LMD_VCT_DATA_REFLECTED_INVALID BIT(7)
+#define KSZ9x31_LMD_VCT_DATA_SIG_WAIT_TOO_LONG BIT(6)
+#define KSZ9x31_LMD_VCT_DATA_MASK100 BIT(5)
+#define KSZ9x31_LMD_VCT_DATA_NLP_FLP BIT(4)
+#define KSZ9x31_LMD_VCT_DATA_LO_PULSE_MASK GENMASK(3, 2)
+#define KSZ9x31_LMD_VCT_DATA_HI_PULSE_MASK GENMASK(1, 0)
+#define KSZ9x31_LMD_VCT_DATA_MASK GENMASK(7, 0)
+
/* Lan8814 general Interrupt control/status reg in GPHY specific block. */
#define LAN8814_INTC 0x18
#define LAN8814_INTS 0x1B
@@ -280,6 +301,7 @@ struct kszphy_priv {
struct kszphy_ptp_priv ptp_priv;
const struct kszphy_type *type;
int led_mode;
+ u16 vct_ctrl1000;
bool rmii_ref_clk_sel;
bool rmii_ref_clk_sel_val;
u64 stats[ARRAY_SIZE(kszphy_hw_stats)];
@@ -1326,6 +1348,199 @@ static int ksz9031_read_status(struct phy_device *phydev)
return 0;
}
+static int ksz9x31_cable_test_start(struct phy_device *phydev)
+{
+ struct kszphy_priv *priv = phydev->priv;
+ int ret;
+
+ /* KSZ9131RNX, DS00002841B-page 38, 4.14 LinkMD (R) Cable Diagnostic
+ * Prior to running the cable diagnostics, Auto-negotiation should
+ * be disabled, full duplex set and the link speed set to 1000Mbps
+ * via the Basic Control Register.
+ */
+ ret = phy_modify(phydev, MII_BMCR,
+ BMCR_SPEED1000 | BMCR_FULLDPLX |
+ BMCR_ANENABLE | BMCR_SPEED100,
+ BMCR_SPEED1000 | BMCR_FULLDPLX);
+ if (ret)
+ return ret;
+
+ /* KSZ9131RNX, DS00002841B-page 38, 4.14 LinkMD (R) Cable Diagnostic
+ * The Master-Slave configuration should be set to Slave by writing
+ * a value of 0x1000 to the Auto-Negotiation Master Slave Control
+ * Register.
+ */
+ ret = phy_read(phydev, MII_CTRL1000);
+ if (ret < 0)
+ return ret;
+
+ /* Cache these bits, they need to be restored once LinkMD finishes. */
+ priv->vct_ctrl1000 = ret & (CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER);
+ ret &= ~(CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER);
+ ret |= CTL1000_ENABLE_MASTER;
+
+ return phy_write(phydev, MII_CTRL1000, ret);
+}
+
+static int ksz9x31_cable_test_result_trans(u16 status)
+{
+ switch (FIELD_GET(KSZ9x31_LMD_VCT_ST_MASK, status)) {
+ case KSZ9x31_LMD_VCT_ST_NORMAL:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OK;
+ case KSZ9x31_LMD_VCT_ST_OPEN:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
+ case KSZ9x31_LMD_VCT_ST_SHORT:
+ return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
+ case KSZ9x31_LMD_VCT_ST_FAIL:
+ fallthrough;
+ default:
+ return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
+ }
+}
+
+static bool ksz9x31_cable_test_failed(u16 status)
+{
+ int stat = FIELD_GET(KSZ9x31_LMD_VCT_ST_MASK, status);
+
+ return stat == KSZ9x31_LMD_VCT_ST_FAIL;
+}
+
+static bool ksz9x31_cable_test_fault_length_valid(u16 status)
+{
+ switch (FIELD_GET(KSZ9x31_LMD_VCT_ST_MASK, status)) {
+ case KSZ9x31_LMD_VCT_ST_OPEN:
+ fallthrough;
+ case KSZ9x31_LMD_VCT_ST_SHORT:
+ return true;
+ }
+ return false;
+}
+
+static int ksz9x31_cable_test_fault_length(struct phy_device *phydev, u16 stat)
+{
+ int dt = FIELD_GET(KSZ9x31_LMD_VCT_DATA_MASK, stat);
+
+ /* KSZ9131RNX, DS00002841B-page 38, 4.14 LinkMD (R) Cable Diagnostic
+ *
+ * distance to fault = (VCT_DATA - 22) * 4 / cable propagation velocity
+ */
+ if ((phydev->phy_id & MICREL_PHY_ID_MASK) == PHY_ID_KSZ9131)
+ dt = clamp(dt - 22, 0, 255);
+
+ return (dt * 400) / 10;
+}
+
+static int ksz9x31_cable_test_wait_for_completion(struct phy_device *phydev)
+{
+ int val, ret;
+
+ ret = phy_read_poll_timeout(phydev, KSZ9x31_LMD, val,
+ !(val & KSZ9x31_LMD_VCT_EN),
+ 30000, 100000, true);
+
+ return ret < 0 ? ret : 0;
+}
+
+static int ksz9x31_cable_test_get_pair(int pair)
+{
+ static const int ethtool_pair[] = {
+ ETHTOOL_A_CABLE_PAIR_A,
+ ETHTOOL_A_CABLE_PAIR_B,
+ ETHTOOL_A_CABLE_PAIR_C,
+ ETHTOOL_A_CABLE_PAIR_D,
+ };
+
+ return ethtool_pair[pair];
+}
+
+static int ksz9x31_cable_test_one_pair(struct phy_device *phydev, int pair)
+{
+ int ret, val;
+
+ /* KSZ9131RNX, DS00002841B-page 38, 4.14 LinkMD (R) Cable Diagnostic
+ * To test each individual cable pair, set the cable pair in the Cable
+ * Diagnostics Test Pair (VCT_PAIR[1:0]) field of the LinkMD Cable
+ * Diagnostic Register, along with setting the Cable Diagnostics Test
+ * Enable (VCT_EN) bit. The Cable Diagnostics Test Enable (VCT_EN) bit
+ * will self clear when the test is concluded.
+ */
+ ret = phy_write(phydev, KSZ9x31_LMD,
+ KSZ9x31_LMD_VCT_EN | KSZ9x31_LMD_VCT_PAIR(pair));
+ if (ret)
+ return ret;
+
+ ret = ksz9x31_cable_test_wait_for_completion(phydev);
+ if (ret)
+ return ret;
+
+ val = phy_read(phydev, KSZ9x31_LMD);
+ if (val < 0)
+ return val;
+
+ if (ksz9x31_cable_test_failed(val))
+ return -EAGAIN;
+
+ ret = ethnl_cable_test_result(phydev,
+ ksz9x31_cable_test_get_pair(pair),
+ ksz9x31_cable_test_result_trans(val));
+ if (ret)
+ return ret;
+
+ if (!ksz9x31_cable_test_fault_length_valid(val))
+ return 0;
+
+ return ethnl_cable_test_fault_length(phydev,
+ ksz9x31_cable_test_get_pair(pair),
+ ksz9x31_cable_test_fault_length(phydev, val));
+}
+
+static int ksz9x31_cable_test_get_status(struct phy_device *phydev,
+ bool *finished)
+{
+ struct kszphy_priv *priv = phydev->priv;
+ unsigned long pair_mask = 0xf;
+ int retries = 20;
+ int pair, ret, rv;
+
+ *finished = false;
+
+ /* Try harder if link partner is active */
+ while (pair_mask && retries--) {
+ for_each_set_bit(pair, &pair_mask, 4) {
+ ret = ksz9x31_cable_test_one_pair(phydev, pair);
+ if (ret == -EAGAIN)
+ continue;
+ if (ret < 0)
+ return ret;
+ clear_bit(pair, &pair_mask);
+ }
+ /* If link partner is in autonegotiation mode it will send 2ms
+ * of FLPs with at least 6ms of silence.
+ * Add 2ms sleep to have better chances to hit this silence.
+ */
+ if (pair_mask)
+ usleep_range(2000, 3000);
+ }
+
+ /* Report remaining unfinished pair result as unknown. */
+ for_each_set_bit(pair, &pair_mask, 4) {
+ ret = ethnl_cable_test_result(phydev,
+ ksz9x31_cable_test_get_pair(pair),
+ ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC);
+ }
+
+ *finished = true;
+
+ /* Restore cached bits from before LinkMD got started. */
+ rv = phy_modify(phydev, MII_CTRL1000,
+ CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER,
+ priv->vct_ctrl1000);
+ if (rv)
+ return rv;
+
+ return ret;
+}
+
static int ksz8873mll_config_aneg(struct phy_device *phydev)
{
return 0;
@@ -2806,6 +3021,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ9031,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ9031 Gigabit PHY",
+ .flags = PHY_POLL_CABLE_TEST,
.driver_data = &ksz9021_type,
.probe = kszphy_probe,
.get_features = ksz9031_get_features,
@@ -2819,6 +3035,8 @@ static struct phy_driver ksphy_driver[] = {
.get_stats = kszphy_get_stats,
.suspend = kszphy_suspend,
.resume = kszphy_resume,
+ .cable_test_start = ksz9x31_cable_test_start,
+ .cable_test_get_status = ksz9x31_cable_test_get_status,
}, {
.phy_id = PHY_ID_LAN8814,
.phy_id_mask = MICREL_PHY_ID_MASK,
@@ -2853,6 +3071,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Microchip KSZ9131 Gigabit PHY",
/* PHY_GBIT_FEATURES */
+ .flags = PHY_POLL_CABLE_TEST,
.driver_data = &ksz9021_type,
.probe = kszphy_probe,
.config_init = ksz9131_config_init,
@@ -2863,6 +3082,8 @@ static struct phy_driver ksphy_driver[] = {
.get_stats = kszphy_get_stats,
.suspend = kszphy_suspend,
.resume = kszphy_resume,
+ .cable_test_start = ksz9x31_cable_test_start,
+ .cable_test_get_status = ksz9x31_cable_test_get_status,
}, {
.phy_id = PHY_ID_KSZ8873MLL,
.phy_id_mask = MICREL_PHY_ID_MASK,
diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c
index c2c0e361fd3d..d4c93d59bc53 100644
--- a/drivers/net/phy/microchip_t1.c
+++ b/drivers/net/phy/microchip_t1.c
@@ -68,7 +68,12 @@
#define T1_POST_LCK_MUFACT_CFG_REG 0x1C
#define T1_TX_RX_FIFO_CFG_REG 0x02
#define T1_TX_LPF_FIR_CFG_REG 0x55
+#define T1_COEF_CLK_PWR_DN_CFG 0x04
+#define T1_COEF_RW_CTL_CFG 0x0D
#define T1_SQI_CONFIG_REG 0x2E
+#define T1_SQI_CONFIG2_REG 0x4A
+#define T1_DCQ_SQI_REG 0xC3
+#define T1_DCQ_SQI_MSK GENMASK(3, 1)
#define T1_MDIO_CONTROL2_REG 0x10
#define T1_INTERRUPT_SOURCE_REG 0x18
#define T1_INTERRUPT2_SOURCE_REG 0x08
@@ -82,6 +87,9 @@
#define T1_MODE_STAT_REG 0x11
#define T1_LINK_UP_MSK BIT(0)
+/* SQI defines */
+#define LAN87XX_MAX_SQI 0x07
+
#define DRIVER_AUTHOR "Nisar Sayed <nisar.sayed@microchip.com>"
#define DRIVER_DESC "Microchip LAN87XX/LAN937x T1 PHY driver"
@@ -346,9 +354,20 @@ static int lan87xx_phy_init(struct phy_device *phydev)
T1_TX_LPF_FIR_CFG_REG, 0x1011, 0 },
{ PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
T1_TX_LPF_FIR_CFG_REG, 0x1000, 0 },
+ /* Setup SQI measurement */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_COEF_CLK_PWR_DN_CFG, 0x16d6, 0 },
/* SQI enable */
{ PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
T1_SQI_CONFIG_REG, 0x9572, 0 },
+ /* SQI select mode 5 */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_SQI_CONFIG2_REG, 0x0001, 0 },
+ /* Throws the first SQI reading */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_COEF_RW_CTL_CFG, 0x0301, 0 },
+ { PHYACC_ATTR_MODE_READ, PHYACC_ATTR_BANK_DSP,
+ T1_DCQ_SQI_REG, 0, 0 },
/* Flag LPS and WUR as idle errors */
{ PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_SMI,
T1_MDIO_CONTROL2_REG, 0x0014, 0 },
@@ -724,6 +743,31 @@ static int lan87xx_config_aneg(struct phy_device *phydev)
return phy_modify_changed(phydev, MII_CTRL1000, CTL1000_AS_MASTER, ctl);
}
+static int lan87xx_get_sqi(struct phy_device *phydev)
+{
+ u8 sqi_value = 0;
+ int rc;
+
+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_WRITE,
+ PHYACC_ATTR_BANK_DSP, T1_COEF_RW_CTL_CFG, 0x0301);
+ if (rc < 0)
+ return rc;
+
+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_READ,
+ PHYACC_ATTR_BANK_DSP, T1_DCQ_SQI_REG, 0x0);
+ if (rc < 0)
+ return rc;
+
+ sqi_value = FIELD_GET(T1_DCQ_SQI_MSK, rc);
+
+ return sqi_value;
+}
+
+static int lan87xx_get_sqi_max(struct phy_device *phydev)
+{
+ return LAN87XX_MAX_SQI;
+}
+
static struct phy_driver microchip_t1_phy_driver[] = {
{
PHY_ID_MATCH_MODEL(PHY_ID_LAN87XX),
@@ -737,6 +781,8 @@ static struct phy_driver microchip_t1_phy_driver[] = {
.resume = genphy_resume,
.config_aneg = lan87xx_config_aneg,
.read_status = lan87xx_read_status,
+ .get_sqi = lan87xx_get_sqi,
+ .get_sqi_max = lan87xx_get_sqi_max,
.cable_test_start = lan87xx_cable_test_start,
.cable_test_get_status = lan87xx_cable_test_get_status,
},
@@ -746,10 +792,14 @@ static struct phy_driver microchip_t1_phy_driver[] = {
.flags = PHY_POLL_CABLE_TEST,
.features = PHY_BASIC_T1_FEATURES,
.config_init = lan87xx_config_init,
+ .config_intr = lan87xx_phy_config_intr,
+ .handle_interrupt = lan87xx_handle_interrupt,
.suspend = genphy_suspend,
.resume = genphy_resume,
.config_aneg = lan87xx_config_aneg,
.read_status = lan87xx_read_status,
+ .get_sqi = lan87xx_get_sqi,
+ .get_sqi_max = lan87xx_get_sqi_max,
.cable_test_start = lan87xx_cable_test_start,
.cable_test_get_status = lan87xx_cable_test_get_status,
}
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 06943889d747..33c285252584 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -2778,34 +2778,6 @@ static const struct sfp_upstream_ops sfp_phylink_ops = {
/* Helpers for MAC drivers */
-/**
- * phylink_helper_basex_speed() - 1000BaseX/2500BaseX helper
- * @state: a pointer to a &struct phylink_link_state
- *
- * Inspect the interface mode, advertising mask or forced speed and
- * decide whether to run at 2.5Gbit or 1Gbit appropriately, switching
- * the interface mode to suit. @state->interface is appropriately
- * updated, and the advertising mask has the "other" baseX_Full flag
- * cleared.
- */
-void phylink_helper_basex_speed(struct phylink_link_state *state)
-{
- if (phy_interface_mode_is_8023z(state->interface)) {
- bool want_2500 = state->an_enabled ?
- phylink_test(state->advertising, 2500baseX_Full) :
- state->speed == SPEED_2500;
-
- if (want_2500) {
- phylink_clear(state->advertising, 1000baseX_Full);
- state->interface = PHY_INTERFACE_MODE_2500BASEX;
- } else {
- phylink_clear(state->advertising, 2500baseX_Full);
- state->interface = PHY_INTERFACE_MODE_1000BASEX;
- }
- }
-}
-EXPORT_SYMBOL_GPL(phylink_helper_basex_speed);
-
static void phylink_decode_c37_word(struct phylink_link_state *state,
uint16_t config_reg, int speed)
{
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 3619520340b7..1b41cd9732d7 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -1011,8 +1011,7 @@ static int pppoe_recvmsg(struct socket *sock, struct msghdr *m,
goto end;
}
- skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
- flags & MSG_DONTWAIT, &error);
+ skb = skb_recv_datagram(sk, flags, &error);
if (error < 0)
goto end;
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 9b4dfa3001d6..2de09ad5bac0 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -479,7 +479,7 @@ static int usbnet_cdc_zte_bind(struct usbnet *dev, struct usb_interface *intf)
* device MAC address has been updated). Always set MAC address to that of the
* device.
*/
-static int usbnet_cdc_zte_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+int usbnet_cdc_zte_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
{
if (skb->len < ETH_HLEN || !(skb->data[0] & 0x02))
return 1;
@@ -489,6 +489,7 @@ static int usbnet_cdc_zte_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
return 1;
}
+EXPORT_SYMBOL_GPL(usbnet_cdc_zte_rx_fixup);
/* Ensure correct link state
*
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 15f91d691bba..cdca00c0dc1f 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1492,19 +1492,19 @@ static void cdc_ncm_txpath_bh(struct tasklet_struct *t)
struct cdc_ncm_ctx *ctx = from_tasklet(ctx, t, bh);
struct usbnet *dev = ctx->dev;
- spin_lock_bh(&ctx->mtx);
+ spin_lock(&ctx->mtx);
if (ctx->tx_timer_pending != 0) {
ctx->tx_timer_pending--;
cdc_ncm_tx_timeout_start(ctx);
- spin_unlock_bh(&ctx->mtx);
+ spin_unlock(&ctx->mtx);
} else if (dev->net != NULL) {
ctx->tx_reason_timeout++; /* count reason for transmitting */
- spin_unlock_bh(&ctx->mtx);
+ spin_unlock(&ctx->mtx);
netif_tx_lock_bh(dev->net);
usbnet_start_xmit(NULL, dev->net);
netif_tx_unlock_bh(dev->net);
} else {
- spin_unlock_bh(&ctx->mtx);
+ spin_unlock(&ctx->mtx);
}
}
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 3353e761016d..79f8bd849b1a 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -190,7 +190,6 @@ static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
skbn = netdev_alloc_skb(net, pkt_len + LL_MAX_HEADER);
if (!skbn)
return 0;
- skbn->dev = net;
switch (skb->data[offset + qmimux_hdr_sz] & 0xf0) {
case 0x40:
@@ -1351,6 +1350,7 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_SET_DTR(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */
{QMI_QUIRK_SET_DTR(0x1199, 0x907b, 10)},/* Sierra Wireless EM74xx */
{QMI_QUIRK_SET_DTR(0x1199, 0x9091, 8)}, /* Sierra Wireless EM7565 */
+ {QMI_QUIRK_SET_DTR(0x1199, 0xc081, 8)}, /* Sierra Wireless EM7590 */
{QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
{QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
@@ -1358,6 +1358,7 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1031, 3)}, /* Telit LE910C1-EUX */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1057, 2)}, /* Telit FN980 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1060, 2)}, /* Telit LN920 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1070, 2)}, /* Telit FN990 */
{QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index 247f58cb0f84..4e70dec30e5a 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -418,10 +418,7 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags)
goto halt_fail_and_release;
}
- if (bp[0] & 0x02)
- eth_hw_addr_random(net);
- else
- eth_hw_addr_set(net, bp);
+ eth_hw_addr_set(net, bp);
/* set a nonzero filter to enable data transfers */
memset(u.set, 0, sizeof *u.set);
@@ -463,6 +460,16 @@ static int rndis_bind(struct usbnet *dev, struct usb_interface *intf)
return generic_rndis_bind(dev, intf, FLAG_RNDIS_PHYM_NOT_WIRELESS);
}
+static int zte_rndis_bind(struct usbnet *dev, struct usb_interface *intf)
+{
+ int status = rndis_bind(dev, intf);
+
+ if (!status && (dev->net->dev_addr[0] & 0x02))
+ eth_hw_addr_random(dev->net);
+
+ return status;
+}
+
void rndis_unbind(struct usbnet *dev, struct usb_interface *intf)
{
struct rndis_halt *halt;
@@ -485,10 +492,14 @@ EXPORT_SYMBOL_GPL(rndis_unbind);
*/
int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
{
+ bool dst_mac_fixup;
+
/* This check is no longer done by usbnet */
if (skb->len < dev->net->hard_header_len)
return 0;
+ dst_mac_fixup = !!(dev->driver_info->data & RNDIS_DRIVER_DATA_DST_MAC_FIXUP);
+
/* peripheral may have batched packets to us... */
while (likely(skb->len)) {
struct rndis_data_hdr *hdr = (void *)skb->data;
@@ -523,10 +534,17 @@ int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
break;
skb_pull(skb, msg_len - sizeof *hdr);
skb_trim(skb2, data_len);
+
+ if (unlikely(dst_mac_fixup))
+ usbnet_cdc_zte_rx_fixup(dev, skb2);
+
usbnet_skb_return(dev, skb2);
}
/* caller will usbnet_skb_return the remaining packet */
+ if (unlikely(dst_mac_fixup))
+ usbnet_cdc_zte_rx_fixup(dev, skb);
+
return 1;
}
EXPORT_SYMBOL_GPL(rndis_rx_fixup);
@@ -600,6 +618,17 @@ static const struct driver_info rndis_poll_status_info = {
.tx_fixup = rndis_tx_fixup,
};
+static const struct driver_info zte_rndis_info = {
+ .description = "ZTE RNDIS device",
+ .flags = FLAG_ETHER | FLAG_POINTTOPOINT | FLAG_FRAMING_RN | FLAG_NO_SETINT,
+ .data = RNDIS_DRIVER_DATA_DST_MAC_FIXUP,
+ .bind = zte_rndis_bind,
+ .unbind = rndis_unbind,
+ .status = rndis_status,
+ .rx_fixup = rndis_rx_fixup,
+ .tx_fixup = rndis_tx_fixup,
+};
+
/*-------------------------------------------------------------------------*/
static const struct usb_device_id products [] = {
@@ -614,6 +643,16 @@ static const struct usb_device_id products [] = {
USB_CLASS_COMM, 2 /* ACM */, 0x0ff),
.driver_info = (unsigned long)&rndis_info,
}, {
+ /* ZTE WWAN modules */
+ USB_VENDOR_AND_INTERFACE_INFO(0x19d2,
+ USB_CLASS_WIRELESS_CONTROLLER, 1, 3),
+ .driver_info = (unsigned long)&zte_rndis_info,
+}, {
+ /* ZTE WWAN modules, ACM flavour */
+ USB_VENDOR_AND_INTERFACE_INFO(0x19d2,
+ USB_CLASS_COMM, 2 /* ACM */, 0x0ff),
+ .driver_info = (unsigned long)&zte_rndis_info,
+}, {
/* RNDIS is MSFT's un-official variant of CDC ACM */
USB_INTERFACE_INFO(USB_CLASS_COMM, 2 /* ACM */, 0x0ff),
.driver_info = (unsigned long) &rndis_info,
diff --git a/drivers/net/usb/sr9800.h b/drivers/net/usb/sr9800.h
index 18f670251275..952e6f7c0321 100644
--- a/drivers/net/usb/sr9800.h
+++ b/drivers/net/usb/sr9800.h
@@ -163,7 +163,7 @@
#define SR9800_MAX_BULKIN_24K 6
#define SR9800_MAX_BULKIN_32K 7
-struct {unsigned short size, byte_cnt, threshold; } SR9800_BULKIN_SIZE[] = {
+static const struct {unsigned short size, byte_cnt, threshold; } SR9800_BULKIN_SIZE[] = {
/* 2k */
{2048, 0x8000, 0x8001},
/* 4k */
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 140780ac1745..dcb069dde66b 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -23,78 +23,6 @@ menuconfig WAN
if WAN
-# There is no way to detect a comtrol sv11 - force it modular for now.
-config HOSTESS_SV11
- tristate "Comtrol Hostess SV-11 support"
- depends on ISA && m && ISA_DMA_API && INET && HDLC && VIRT_TO_BUS
- help
- Driver for Comtrol Hostess SV-11 network card which
- operates on low speed synchronous serial links at up to
- 256Kbps, supporting PPP and Cisco HDLC.
-
- The driver will be compiled as a module: the
- module will be called hostess_sv11.
-
-# The COSA/SRP driver has not been tested as non-modular yet.
-config COSA
- tristate "COSA/SRP sync serial boards support"
- depends on ISA && m && ISA_DMA_API && HDLC && VIRT_TO_BUS
- help
- Driver for COSA and SRP synchronous serial boards.
-
- These boards allow to connect synchronous serial devices (for example
- base-band modems, or any other device with the X.21, V.24, V.35 or
- V.36 interface) to your Linux box. The cards can work as the
- character device, synchronous PPP network device, or the Cisco HDLC
- network device.
-
- You will need user-space utilities COSA or SRP boards for downloading
- the firmware to the cards and to set them up. Look at the
- <http://www.fi.muni.cz/~kas/cosa/> for more information. You can also
- read the comment at the top of the <file:drivers/net/wan/cosa.c> for
- details about the cards and the driver itself.
-
- The driver will be compiled as a module: the
- module will be called cosa.
-
-#
-# Lan Media's board. Currently 1000, 1200, 5200, 5245
-#
-config LANMEDIA
- tristate "LanMedia Corp. SSI/V.35, T1/E1, HSSI, T3 boards"
- depends on PCI && VIRT_TO_BUS && HDLC
- help
- Driver for the following Lan Media family of serial boards:
-
- - LMC 1000 board allows you to connect synchronous serial devices
- (for example base-band modems, or any other device with the X.21,
- V.24, V.35 or V.36 interface) to your Linux box.
-
- - LMC 1200 with on board DSU board allows you to connect your Linux
- box directly to a T1 or E1 circuit.
-
- - LMC 5200 board provides a HSSI interface capable of running up to
- 52 Mbits per second.
-
- - LMC 5245 board connects directly to a T3 circuit saving the
- additional external hardware.
-
- To change setting such as clock source you will need lmcctl.
- It is available at <ftp://ftp.lanmedia.com/> (broken link).
-
- To compile this driver as a module, choose M here: the
- module will be called lmc.
-
-# There is no way to detect a Sealevel board. Force it modular
-config SEALEVEL_4021
- tristate "Sealevel Systems 4021 support"
- depends on ISA && m && ISA_DMA_API && INET && HDLC && VIRT_TO_BUS
- help
- This is a driver for the Sealevel Systems ACB 56 serial I/O adapter.
-
- The driver will be compiled as a module: the
- module will be called sealevel.
-
# Generic HDLC
config HDLC
tristate "Generic HDLC layer"
diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile
index 480bcd1f6c1c..5bec8fae47f8 100644
--- a/drivers/net/wan/Makefile
+++ b/drivers/net/wan/Makefile
@@ -14,13 +14,8 @@ obj-$(CONFIG_HDLC_FR) += hdlc_fr.o
obj-$(CONFIG_HDLC_PPP) += hdlc_ppp.o
obj-$(CONFIG_HDLC_X25) += hdlc_x25.o
-obj-$(CONFIG_HOSTESS_SV11) += z85230.o hostess_sv11.o
-obj-$(CONFIG_SEALEVEL_4021) += z85230.o sealevel.o
-obj-$(CONFIG_COSA) += cosa.o
obj-$(CONFIG_FARSYNC) += farsync.o
-obj-$(CONFIG_LANMEDIA) += lmc/
-
obj-$(CONFIG_LAPBETHER) += lapbether.o
obj-$(CONFIG_N2) += n2.o
obj-$(CONFIG_C101) += c101.o
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
deleted file mode 100644
index 1e5672019922..000000000000
--- a/drivers/net/wan/cosa.c
+++ /dev/null
@@ -1,2052 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* $Id: cosa.c,v 1.31 2000/03/08 17:47:16 kas Exp $ */
-
-/* Copyright (C) 1995-1997 Jan "Yenya" Kasprzak <kas@fi.muni.cz>
- * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>
- */
-
-/* The driver for the SRP and COSA synchronous serial cards.
- *
- * HARDWARE INFO
- *
- * Both cards are developed at the Institute of Computer Science,
- * Masaryk University (https://www.ics.muni.cz/). The hardware is
- * developed by Jiri Novotny <novotny@ics.muni.cz>. More information
- * and the photo of both cards is available at
- * http://www.pavoucek.cz/cosa.html. The card documentation, firmwares
- * and other goods can be downloaded from ftp://ftp.ics.muni.cz/pub/cosa/.
- * For Linux-specific utilities, see below in the "Software info" section.
- * If you want to order the card, contact Jiri Novotny.
- *
- * The SRP (serial port?, the Czech word "srp" means "sickle") card
- * is a 2-port intelligent (with its own 8-bit CPU) synchronous serial card
- * with V.24 interfaces up to 80kb/s each.
- *
- * The COSA (communication serial adapter?, the Czech word "kosa" means
- * "scythe") is a next-generation sync/async board with two interfaces
- * - currently any of V.24, X.21, V.35 and V.36 can be selected.
- * It has a 16-bit SAB80166 CPU and can do up to 10 Mb/s per channel.
- * The 8-channels version is in development.
- *
- * Both types have downloadable firmware and communicate via ISA DMA.
- * COSA can be also a bus-mastering device.
- *
- * SOFTWARE INFO
- *
- * The homepage of the Linux driver is at https://www.fi.muni.cz/~kas/cosa/.
- * The CVS tree of Linux driver can be viewed there, as well as the
- * firmware binaries and user-space utilities for downloading the firmware
- * into the card and setting up the card.
- *
- * The Linux driver (unlike the present *BSD drivers :-) can work even
- * for the COSA and SRP in one computer and allows each channel to work
- * in one of the two modes (character or network device).
- *
- * AUTHOR
- *
- * The Linux driver was written by Jan "Yenya" Kasprzak <kas@fi.muni.cz>.
- *
- * You can mail me bugfixes and even success reports. I am especially
- * interested in the SMP and/or muliti-channel success/failure reports
- * (I wonder if I did the locking properly :-).
- *
- * THE AUTHOR USED THE FOLLOWING SOURCES WHEN PROGRAMMING THE DRIVER
- *
- * The COSA/SRP NetBSD driver by Zdenek Salvet and Ivos Cernohlavek
- * The skeleton.c by Donald Becker
- * The SDL Riscom/N2 driver by Mike Natale
- * The Comtrol Hostess SV11 driver by Alan Cox
- * The Sync PPP/Cisco HDLC layer (syncppp.c) ported to Linux by Alan Cox
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched/signal.h>
-#include <linux/slab.h>
-#include <linux/poll.h>
-#include <linux/fs.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/hdlc.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/netdevice.h>
-#include <linux/spinlock.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
-#include <asm/io.h>
-#include <asm/dma.h>
-#include <asm/byteorder.h>
-
-#undef COSA_SLOW_IO /* for testing purposes only */
-
-#include "cosa.h"
-
-/* Maximum length of the identification string. */
-#define COSA_MAX_ID_STRING 128
-
-/* Maximum length of the channel name */
-#define COSA_MAX_NAME (sizeof("cosaXXXcXXX") + 1)
-
-/* Per-channel data structure */
-
-struct channel_data {
- int usage; /* Usage count; >0 for chrdev, -1 for netdev */
- int num; /* Number of the channel */
- struct cosa_data *cosa; /* Pointer to the per-card structure */
- int txsize; /* Size of transmitted data */
- char *txbuf; /* Transmit buffer */
- char name[COSA_MAX_NAME]; /* channel name */
-
- /* The HW layer interface */
- /* routine called from the RX interrupt */
- char *(*setup_rx)(struct channel_data *channel, int size);
- /* routine called when the RX is done (from the EOT interrupt) */
- int (*rx_done)(struct channel_data *channel);
- /* routine called when the TX is done (from the EOT interrupt) */
- int (*tx_done)(struct channel_data *channel, int size);
-
- /* Character device parts */
- struct mutex rlock;
- struct semaphore wsem;
- char *rxdata;
- int rxsize;
- wait_queue_head_t txwaitq, rxwaitq;
- int tx_status, rx_status;
-
- /* generic HDLC device parts */
- struct net_device *netdev;
- struct sk_buff *rx_skb, *tx_skb;
-};
-
-/* cosa->firmware_status bits */
-#define COSA_FW_RESET BIT(0) /* Is the ROM monitor active? */
-#define COSA_FW_DOWNLOAD BIT(1) /* Is the microcode downloaded? */
-#define COSA_FW_START BIT(2) /* Is the microcode running? */
-
-struct cosa_data {
- int num; /* Card number */
- char name[COSA_MAX_NAME]; /* Card name - e.g "cosa0" */
- unsigned int datareg, statusreg; /* I/O ports */
- unsigned short irq, dma; /* IRQ and DMA number */
- unsigned short startaddr; /* Firmware start address */
- unsigned short busmaster; /* Use busmastering? */
- int nchannels; /* # of channels on this card */
- int driver_status; /* For communicating with firmware */
- int firmware_status; /* Downloaded, reseted, etc. */
- unsigned long rxbitmap, txbitmap;/* Bitmap of channels who are willing to send/receive data */
- unsigned long rxtx; /* RX or TX in progress? */
- int enabled;
- int usage; /* usage count */
- int txchan, txsize, rxsize;
- struct channel_data *rxchan;
- char *bouncebuf;
- char *txbuf, *rxbuf;
- struct channel_data *chan;
- spinlock_t lock; /* For exclusive operations on this structure */
- char id_string[COSA_MAX_ID_STRING]; /* ROM monitor ID string */
- char *type; /* card type */
-};
-
-/* Define this if you want all the possible ports to be autoprobed.
- * It is here but it probably is not a good idea to use this.
- */
-/* #define COSA_ISA_AUTOPROBE 1*/
-
-/* Character device major number. 117 was allocated for us.
- * The value of 0 means to allocate a first free one.
- */
-static DEFINE_MUTEX(cosa_chardev_mutex);
-static int cosa_major = 117;
-
-/* Encoding of the minor numbers:
- * The lowest CARD_MINOR_BITS bits means the channel on the single card,
- * the highest bits means the card number.
- */
-#define CARD_MINOR_BITS 4 /* How many bits in minor number are reserved
- * for the single card
- */
-/* The following depends on CARD_MINOR_BITS. Unfortunately, the "MODULE_STRING"
- * macro doesn't like anything other than the raw number as an argument :-(
- */
-#define MAX_CARDS 16
-/* #define MAX_CARDS (1 << (8-CARD_MINOR_BITS)) */
-
-#define DRIVER_RX_READY 0x0001
-#define DRIVER_TX_READY 0x0002
-#define DRIVER_TXMAP_SHIFT 2
-#define DRIVER_TXMAP_MASK 0x0c /* FIXME: 0xfc for 8-channel version */
-
-/* for cosa->rxtx - indicates whether either transmit or receive is
- * in progress. These values are mean number of the bit.
- */
-#define TXBIT 0
-#define RXBIT 1
-#define IRQBIT 2
-
-#define COSA_MTU 2000 /* FIXME: I don't know this exactly */
-
-#undef DEBUG_DATA //1 /* Dump the data read or written to the channel */
-#undef DEBUG_IRQS //1 /* Print the message when the IRQ is received */
-#undef DEBUG_IO //1 /* Dump the I/O traffic */
-
-#define TX_TIMEOUT (5 * HZ)
-
-/* Maybe the following should be allocated dynamically */
-static struct cosa_data cosa_cards[MAX_CARDS];
-static int nr_cards;
-
-#ifdef COSA_ISA_AUTOPROBE
-static int io[MAX_CARDS + 1] = {0x220, 0x228, 0x210, 0x218, 0,};
-/* NOTE: DMA is not autoprobed!!! */
-static int dma[MAX_CARDS + 1] = {1, 7, 1, 7, 1, 7, 1, 7, 0,};
-#else
-static int io[MAX_CARDS + 1];
-static int dma[MAX_CARDS + 1];
-#endif
-/* IRQ can be safely autoprobed */
-static int irq[MAX_CARDS + 1] = {-1, -1, -1, -1, -1, -1, 0,};
-
-/* for class stuff*/
-static struct class *cosa_class;
-
-#ifdef MODULE
-module_param_hw_array(io, int, ioport, NULL, 0);
-MODULE_PARM_DESC(io, "The I/O bases of the COSA or SRP cards");
-module_param_hw_array(irq, int, irq, NULL, 0);
-MODULE_PARM_DESC(irq, "The IRQ lines of the COSA or SRP cards");
-module_param_hw_array(dma, int, dma, NULL, 0);
-MODULE_PARM_DESC(dma, "The DMA channels of the COSA or SRP cards");
-
-MODULE_AUTHOR("Jan \"Yenya\" Kasprzak, <kas@fi.muni.cz>");
-MODULE_DESCRIPTION("Modular driver for the COSA or SRP synchronous card");
-MODULE_LICENSE("GPL");
-#endif
-
-/* I use this mainly for testing purposes */
-#ifdef COSA_SLOW_IO
-#define cosa_outb outb_p
-#define cosa_outw outw_p
-#define cosa_inb inb_p
-#define cosa_inw inw_p
-#else
-#define cosa_outb outb
-#define cosa_outw outw
-#define cosa_inb inb
-#define cosa_inw inw
-#endif
-
-#define is_8bit(cosa) (!((cosa)->datareg & 0x08))
-
-#define cosa_getstatus(cosa) (cosa_inb((cosa)->statusreg))
-#define cosa_putstatus(cosa, stat) (cosa_outb(stat, (cosa)->statusreg))
-#define cosa_getdata16(cosa) (cosa_inw((cosa)->datareg))
-#define cosa_getdata8(cosa) (cosa_inb((cosa)->datareg))
-#define cosa_putdata16(cosa, dt) (cosa_outw(dt, (cosa)->datareg))
-#define cosa_putdata8(cosa, dt) (cosa_outb(dt, (cosa)->datareg))
-
-/* Initialization stuff */
-static int cosa_probe(int ioaddr, int irq, int dma);
-
-/* HW interface */
-static void cosa_enable_rx(struct channel_data *chan);
-static void cosa_disable_rx(struct channel_data *chan);
-static int cosa_start_tx(struct channel_data *channel, char *buf, int size);
-static void cosa_kick(struct cosa_data *cosa);
-static int cosa_dma_able(struct channel_data *chan, char *buf, int data);
-
-/* Network device stuff */
-static int cosa_net_attach(struct net_device *dev, unsigned short encoding,
- unsigned short parity);
-static int cosa_net_open(struct net_device *d);
-static int cosa_net_close(struct net_device *d);
-static void cosa_net_timeout(struct net_device *d, unsigned int txqueue);
-static netdev_tx_t cosa_net_tx(struct sk_buff *skb, struct net_device *d);
-static char *cosa_net_setup_rx(struct channel_data *channel, int size);
-static int cosa_net_rx_done(struct channel_data *channel);
-static int cosa_net_tx_done(struct channel_data *channel, int size);
-
-/* Character device */
-static char *chrdev_setup_rx(struct channel_data *channel, int size);
-static int chrdev_rx_done(struct channel_data *channel);
-static int chrdev_tx_done(struct channel_data *channel, int size);
-static ssize_t cosa_read(struct file *file,
- char __user *buf, size_t count, loff_t *ppos);
-static ssize_t cosa_write(struct file *file,
- const char __user *buf, size_t count, loff_t *ppos);
-static unsigned int cosa_poll(struct file *file, poll_table *poll);
-static int cosa_open(struct inode *inode, struct file *file);
-static int cosa_release(struct inode *inode, struct file *file);
-static long cosa_chardev_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg);
-#ifdef COSA_FASYNC_WORKING
-static int cosa_fasync(struct inode *inode, struct file *file, int on);
-#endif
-
-static const struct file_operations cosa_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .read = cosa_read,
- .write = cosa_write,
- .poll = cosa_poll,
- .unlocked_ioctl = cosa_chardev_ioctl,
- .open = cosa_open,
- .release = cosa_release,
-#ifdef COSA_FASYNC_WORKING
- .fasync = cosa_fasync,
-#endif
-};
-
-/* Ioctls */
-static int cosa_start(struct cosa_data *cosa, int address);
-static int cosa_reset(struct cosa_data *cosa);
-static int cosa_download(struct cosa_data *cosa, void __user *a);
-static int cosa_readmem(struct cosa_data *cosa, void __user *a);
-
-/* COSA/SRP ROM monitor */
-static int download(struct cosa_data *cosa, const char __user *data, int addr, int len);
-static int startmicrocode(struct cosa_data *cosa, int address);
-static int readmem(struct cosa_data *cosa, char __user *data, int addr, int len);
-static int cosa_reset_and_read_id(struct cosa_data *cosa, char *id);
-
-/* Auxiliary functions */
-static int get_wait_data(struct cosa_data *cosa);
-static int put_wait_data(struct cosa_data *cosa, int data);
-static int puthexnumber(struct cosa_data *cosa, int number);
-static void put_driver_status(struct cosa_data *cosa);
-static void put_driver_status_nolock(struct cosa_data *cosa);
-
-/* Interrupt handling */
-static irqreturn_t cosa_interrupt(int irq, void *cosa);
-
-/* I/O ops debugging */
-#ifdef DEBUG_IO
-static void debug_data_in(struct cosa_data *cosa, int data);
-static void debug_data_out(struct cosa_data *cosa, int data);
-static void debug_data_cmd(struct cosa_data *cosa, int data);
-static void debug_status_in(struct cosa_data *cosa, int status);
-static void debug_status_out(struct cosa_data *cosa, int status);
-#endif
-
-static inline struct channel_data *dev_to_chan(struct net_device *dev)
-{
- return (struct channel_data *)dev_to_hdlc(dev)->priv;
-}
-
-/* ---------- Initialization stuff ---------- */
-
-static int __init cosa_init(void)
-{
- int i, err = 0;
-
- if (cosa_major > 0) {
- if (register_chrdev(cosa_major, "cosa", &cosa_fops)) {
- pr_warn("unable to get major %d\n", cosa_major);
- err = -EIO;
- goto out;
- }
- } else {
- cosa_major = register_chrdev(0, "cosa", &cosa_fops);
- if (cosa_major < 0) {
- pr_warn("unable to register chardev\n");
- err = -EIO;
- goto out;
- }
- }
- for (i = 0; i < MAX_CARDS; i++)
- cosa_cards[i].num = -1;
- for (i = 0; io[i] != 0 && i < MAX_CARDS; i++)
- cosa_probe(io[i], irq[i], dma[i]);
- if (!nr_cards) {
- pr_warn("no devices found\n");
- unregister_chrdev(cosa_major, "cosa");
- err = -ENODEV;
- goto out;
- }
- cosa_class = class_create(THIS_MODULE, "cosa");
- if (IS_ERR(cosa_class)) {
- err = PTR_ERR(cosa_class);
- goto out_chrdev;
- }
- for (i = 0; i < nr_cards; i++)
- device_create(cosa_class, NULL, MKDEV(cosa_major, i), NULL,
- "cosa%d", i);
- err = 0;
- goto out;
-
-out_chrdev:
- unregister_chrdev(cosa_major, "cosa");
-out:
- return err;
-}
-module_init(cosa_init);
-
-static void __exit cosa_exit(void)
-{
- struct cosa_data *cosa;
- int i;
-
- for (i = 0; i < nr_cards; i++)
- device_destroy(cosa_class, MKDEV(cosa_major, i));
- class_destroy(cosa_class);
-
- for (cosa = cosa_cards; nr_cards--; cosa++) {
- /* Clean up the per-channel data */
- for (i = 0; i < cosa->nchannels; i++) {
- /* Chardev driver has no alloc'd per-channel data */
- unregister_hdlc_device(cosa->chan[i].netdev);
- free_netdev(cosa->chan[i].netdev);
- }
- /* Clean up the per-card data */
- kfree(cosa->chan);
- kfree(cosa->bouncebuf);
- free_irq(cosa->irq, cosa);
- free_dma(cosa->dma);
- release_region(cosa->datareg, is_8bit(cosa) ? 2 : 4);
- }
- unregister_chrdev(cosa_major, "cosa");
-}
-module_exit(cosa_exit);
-
-static const struct net_device_ops cosa_ops = {
- .ndo_open = cosa_net_open,
- .ndo_stop = cosa_net_close,
- .ndo_start_xmit = hdlc_start_xmit,
- .ndo_siocwandev = hdlc_ioctl,
- .ndo_tx_timeout = cosa_net_timeout,
-};
-
-static int cosa_probe(int base, int irq, int dma)
-{
- struct cosa_data *cosa = cosa_cards + nr_cards;
- int i, err = 0;
-
- memset(cosa, 0, sizeof(struct cosa_data));
-
- /* Checking validity of parameters: */
- /* IRQ should be 2-7 or 10-15; negative IRQ means autoprobe */
- if ((irq >= 0 && irq < 2) || irq > 15 || (irq < 10 && irq > 7)) {
- pr_info("invalid IRQ %d\n", irq);
- return -1;
- }
- /* I/O address should be between 0x100 and 0x3ff and should be
- * multiple of 8.
- */
- if (base < 0x100 || base > 0x3ff || base & 0x7) {
- pr_info("invalid I/O address 0x%x\n", base);
- return -1;
- }
- /* DMA should be 0,1 or 3-7 */
- if (dma < 0 || dma == 4 || dma > 7) {
- pr_info("invalid DMA %d\n", dma);
- return -1;
- }
- /* and finally, on 16-bit COSA DMA should be 4-7 and
- * I/O base should not be multiple of 0x10
- */
- if (((base & 0x8) && dma < 4) || (!(base & 0x8) && dma > 3)) {
- pr_info("8/16 bit base and DMA mismatch (base=0x%x, dma=%d)\n",
- base, dma);
- return -1;
- }
-
- cosa->dma = dma;
- cosa->datareg = base;
- cosa->statusreg = is_8bit(cosa) ? base + 1 : base + 2;
- spin_lock_init(&cosa->lock);
-
- if (!request_region(base, is_8bit(cosa) ? 2 : 4, "cosa"))
- return -1;
-
- if (cosa_reset_and_read_id(cosa, cosa->id_string) < 0) {
- printk(KERN_DEBUG "probe at 0x%x failed.\n", base);
- err = -1;
- goto err_out;
- }
-
- /* Test the validity of identification string */
- if (!strncmp(cosa->id_string, "SRP", 3)) {
- cosa->type = "srp";
- } else if (!strncmp(cosa->id_string, "COSA", 4)) {
- cosa->type = is_8bit(cosa) ? "cosa8" : "cosa16";
- } else {
-/* Print a warning only if we are not autoprobing */
-#ifndef COSA_ISA_AUTOPROBE
- pr_info("valid signature not found at 0x%x\n", base);
-#endif
- err = -1;
- goto err_out;
- }
- /* Update the name of the region now we know the type of card */
- release_region(base, is_8bit(cosa) ? 2 : 4);
- if (!request_region(base, is_8bit(cosa) ? 2 : 4, cosa->type)) {
- printk(KERN_DEBUG "changing name at 0x%x failed.\n", base);
- return -1;
- }
-
- /* Now do IRQ autoprobe */
- if (irq < 0) {
- unsigned long irqs;
-/* pr_info("IRQ autoprobe\n"); */
- irqs = probe_irq_on();
- /* Enable interrupt on tx buffer empty (it sure is)
- * really sure ?
- * FIXME: When this code is not used as module, we should
- * probably call udelay() instead of the interruptible sleep.
- */
- set_current_state(TASK_INTERRUPTIBLE);
- cosa_putstatus(cosa, SR_TX_INT_ENA);
- schedule_timeout(msecs_to_jiffies(300));
- irq = probe_irq_off(irqs);
- /* Disable all IRQs from the card */
- cosa_putstatus(cosa, 0);
- /* Empty the received data register */
- cosa_getdata8(cosa);
-
- if (irq < 0) {
- pr_info("multiple interrupts obtained (%d, board at 0x%x)\n",
- irq, cosa->datareg);
- err = -1;
- goto err_out;
- }
- if (irq == 0) {
- pr_info("no interrupt obtained (board at 0x%x)\n",
- cosa->datareg);
- /* return -1; */
- }
- }
-
- cosa->irq = irq;
- cosa->num = nr_cards;
- cosa->usage = 0;
- cosa->nchannels = 2; /* FIXME: how to determine this? */
-
- if (request_irq(cosa->irq, cosa_interrupt, 0, cosa->type, cosa)) {
- err = -1;
- goto err_out;
- }
- if (request_dma(cosa->dma, cosa->type)) {
- err = -1;
- goto err_out1;
- }
-
- cosa->bouncebuf = kmalloc(COSA_MTU, GFP_KERNEL | GFP_DMA);
- if (!cosa->bouncebuf) {
- err = -ENOMEM;
- goto err_out2;
- }
- sprintf(cosa->name, "cosa%d", cosa->num);
-
- /* Initialize the per-channel data */
- cosa->chan = kcalloc(cosa->nchannels, sizeof(struct channel_data), GFP_KERNEL);
- if (!cosa->chan) {
- err = -ENOMEM;
- goto err_out3;
- }
-
- for (i = 0; i < cosa->nchannels; i++) {
- struct channel_data *chan = &cosa->chan[i];
-
- chan->cosa = cosa;
- chan->num = i;
- sprintf(chan->name, "cosa%dc%d", chan->cosa->num, i);
-
- /* Initialize the chardev data structures */
- mutex_init(&chan->rlock);
- sema_init(&chan->wsem, 1);
-
- /* Register the network interface */
- chan->netdev = alloc_hdlcdev(chan);
- if (!chan->netdev) {
- pr_warn("%s: alloc_hdlcdev failed\n", chan->name);
- err = -ENOMEM;
- goto err_hdlcdev;
- }
- dev_to_hdlc(chan->netdev)->attach = cosa_net_attach;
- dev_to_hdlc(chan->netdev)->xmit = cosa_net_tx;
- chan->netdev->netdev_ops = &cosa_ops;
- chan->netdev->watchdog_timeo = TX_TIMEOUT;
- chan->netdev->base_addr = chan->cosa->datareg;
- chan->netdev->irq = chan->cosa->irq;
- chan->netdev->dma = chan->cosa->dma;
- err = register_hdlc_device(chan->netdev);
- if (err) {
- netdev_warn(chan->netdev,
- "register_hdlc_device() failed\n");
- free_netdev(chan->netdev);
- goto err_hdlcdev;
- }
- }
-
- pr_info("cosa%d: %s (%s at 0x%x irq %d dma %d), %d channels\n",
- cosa->num, cosa->id_string, cosa->type,
- cosa->datareg, cosa->irq, cosa->dma, cosa->nchannels);
-
- return nr_cards++;
-
-err_hdlcdev:
- while (i-- > 0) {
- unregister_hdlc_device(cosa->chan[i].netdev);
- free_netdev(cosa->chan[i].netdev);
- }
- kfree(cosa->chan);
-err_out3:
- kfree(cosa->bouncebuf);
-err_out2:
- free_dma(cosa->dma);
-err_out1:
- free_irq(cosa->irq, cosa);
-err_out:
- release_region(cosa->datareg, is_8bit(cosa) ? 2 : 4);
- pr_notice("cosa%d: allocating resources failed\n", cosa->num);
- return err;
-}
-
-/*---------- network device ---------- */
-
-static int cosa_net_attach(struct net_device *dev, unsigned short encoding,
- unsigned short parity)
-{
- if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT)
- return 0;
- return -EINVAL;
-}
-
-static int cosa_net_open(struct net_device *dev)
-{
- struct channel_data *chan = dev_to_chan(dev);
- int err;
- unsigned long flags;
-
- if (!(chan->cosa->firmware_status & COSA_FW_START)) {
- pr_notice("%s: start the firmware first (status %d)\n",
- chan->cosa->name, chan->cosa->firmware_status);
- return -EPERM;
- }
- spin_lock_irqsave(&chan->cosa->lock, flags);
- if (chan->usage != 0) {
- pr_warn("%s: cosa_net_open called with usage count %d\n",
- chan->name, chan->usage);
- spin_unlock_irqrestore(&chan->cosa->lock, flags);
- return -EBUSY;
- }
- chan->setup_rx = cosa_net_setup_rx;
- chan->tx_done = cosa_net_tx_done;
- chan->rx_done = cosa_net_rx_done;
- chan->usage = -1;
- chan->cosa->usage++;
- spin_unlock_irqrestore(&chan->cosa->lock, flags);
-
- err = hdlc_open(dev);
- if (err) {
- spin_lock_irqsave(&chan->cosa->lock, flags);
- chan->usage = 0;
- chan->cosa->usage--;
- spin_unlock_irqrestore(&chan->cosa->lock, flags);
- return err;
- }
-
- netif_start_queue(dev);
- cosa_enable_rx(chan);
- return 0;
-}
-
-static netdev_tx_t cosa_net_tx(struct sk_buff *skb,
- struct net_device *dev)
-{
- struct channel_data *chan = dev_to_chan(dev);
-
- netif_stop_queue(dev);
-
- chan->tx_skb = skb;
- cosa_start_tx(chan, skb->data, skb->len);
- return NETDEV_TX_OK;
-}
-
-static void cosa_net_timeout(struct net_device *dev, unsigned int txqueue)
-{
- struct channel_data *chan = dev_to_chan(dev);
-
- if (test_bit(RXBIT, &chan->cosa->rxtx)) {
- chan->netdev->stats.rx_errors++;
- chan->netdev->stats.rx_missed_errors++;
- } else {
- chan->netdev->stats.tx_errors++;
- chan->netdev->stats.tx_aborted_errors++;
- }
- cosa_kick(chan->cosa);
- if (chan->tx_skb) {
- dev_kfree_skb(chan->tx_skb);
- chan->tx_skb = NULL;
- }
- netif_wake_queue(dev);
-}
-
-static int cosa_net_close(struct net_device *dev)
-{
- struct channel_data *chan = dev_to_chan(dev);
- unsigned long flags;
-
- netif_stop_queue(dev);
- hdlc_close(dev);
- cosa_disable_rx(chan);
- spin_lock_irqsave(&chan->cosa->lock, flags);
- if (chan->rx_skb) {
- kfree_skb(chan->rx_skb);
- chan->rx_skb = NULL;
- }
- if (chan->tx_skb) {
- kfree_skb(chan->tx_skb);
- chan->tx_skb = NULL;
- }
- chan->usage = 0;
- chan->cosa->usage--;
- spin_unlock_irqrestore(&chan->cosa->lock, flags);
- return 0;
-}
-
-static char *cosa_net_setup_rx(struct channel_data *chan, int size)
-{
- /* We can safely fall back to non-dma-able memory, because we have
- * the cosa->bouncebuf pre-allocated.
- */
- kfree_skb(chan->rx_skb);
- chan->rx_skb = dev_alloc_skb(size);
- if (!chan->rx_skb) {
- pr_notice("%s: Memory squeeze, dropping packet\n", chan->name);
- chan->netdev->stats.rx_dropped++;
- return NULL;
- }
- netif_trans_update(chan->netdev);
- return skb_put(chan->rx_skb, size);
-}
-
-static int cosa_net_rx_done(struct channel_data *chan)
-{
- if (!chan->rx_skb) {
- pr_warn("%s: rx_done with empty skb!\n", chan->name);
- chan->netdev->stats.rx_errors++;
- chan->netdev->stats.rx_frame_errors++;
- return 0;
- }
- chan->rx_skb->protocol = hdlc_type_trans(chan->rx_skb, chan->netdev);
- chan->rx_skb->dev = chan->netdev;
- skb_reset_mac_header(chan->rx_skb);
- chan->netdev->stats.rx_packets++;
- chan->netdev->stats.rx_bytes += chan->cosa->rxsize;
- netif_rx(chan->rx_skb);
- chan->rx_skb = NULL;
- return 0;
-}
-
-/* ARGSUSED */
-static int cosa_net_tx_done(struct channel_data *chan, int size)
-{
- if (!chan->tx_skb) {
- pr_warn("%s: tx_done with empty skb!\n", chan->name);
- chan->netdev->stats.tx_errors++;
- chan->netdev->stats.tx_aborted_errors++;
- return 1;
- }
- dev_consume_skb_irq(chan->tx_skb);
- chan->tx_skb = NULL;
- chan->netdev->stats.tx_packets++;
- chan->netdev->stats.tx_bytes += size;
- netif_wake_queue(chan->netdev);
- return 1;
-}
-
-/*---------- Character device ---------- */
-
-static ssize_t cosa_read(struct file *file,
- char __user *buf, size_t count, loff_t *ppos)
-{
- DECLARE_WAITQUEUE(wait, current);
- unsigned long flags;
- struct channel_data *chan = file->private_data;
- struct cosa_data *cosa = chan->cosa;
- char *kbuf;
-
- if (!(cosa->firmware_status & COSA_FW_START)) {
- pr_notice("%s: start the firmware first (status %d)\n",
- cosa->name, cosa->firmware_status);
- return -EPERM;
- }
- if (mutex_lock_interruptible(&chan->rlock))
- return -ERESTARTSYS;
-
- chan->rxdata = kmalloc(COSA_MTU, GFP_DMA | GFP_KERNEL);
- if (!chan->rxdata) {
- mutex_unlock(&chan->rlock);
- return -ENOMEM;
- }
-
- chan->rx_status = 0;
- cosa_enable_rx(chan);
- spin_lock_irqsave(&cosa->lock, flags);
- add_wait_queue(&chan->rxwaitq, &wait);
- while (!chan->rx_status) {
- set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irqrestore(&cosa->lock, flags);
- schedule();
- spin_lock_irqsave(&cosa->lock, flags);
- if (signal_pending(current) && chan->rx_status == 0) {
- chan->rx_status = 1;
- remove_wait_queue(&chan->rxwaitq, &wait);
- __set_current_state(TASK_RUNNING);
- spin_unlock_irqrestore(&cosa->lock, flags);
- mutex_unlock(&chan->rlock);
- return -ERESTARTSYS;
- }
- }
- remove_wait_queue(&chan->rxwaitq, &wait);
- __set_current_state(TASK_RUNNING);
- kbuf = chan->rxdata;
- count = chan->rxsize;
- spin_unlock_irqrestore(&cosa->lock, flags);
- mutex_unlock(&chan->rlock);
-
- if (copy_to_user(buf, kbuf, count)) {
- kfree(kbuf);
- return -EFAULT;
- }
- kfree(kbuf);
- return count;
-}
-
-static char *chrdev_setup_rx(struct channel_data *chan, int size)
-{
- /* Expect size <= COSA_MTU */
- chan->rxsize = size;
- return chan->rxdata;
-}
-
-static int chrdev_rx_done(struct channel_data *chan)
-{
- if (chan->rx_status) { /* Reader has died */
- kfree(chan->rxdata);
- up(&chan->wsem);
- }
- chan->rx_status = 1;
- wake_up_interruptible(&chan->rxwaitq);
- return 1;
-}
-
-static ssize_t cosa_write(struct file *file,
- const char __user *buf, size_t count, loff_t *ppos)
-{
- DECLARE_WAITQUEUE(wait, current);
- struct channel_data *chan = file->private_data;
- struct cosa_data *cosa = chan->cosa;
- unsigned long flags;
- char *kbuf;
-
- if (!(cosa->firmware_status & COSA_FW_START)) {
- pr_notice("%s: start the firmware first (status %d)\n",
- cosa->name, cosa->firmware_status);
- return -EPERM;
- }
- if (down_interruptible(&chan->wsem))
- return -ERESTARTSYS;
-
- if (count > COSA_MTU)
- count = COSA_MTU;
-
- /* Allocate the buffer */
- kbuf = kmalloc(count, GFP_KERNEL | GFP_DMA);
- if (!kbuf) {
- up(&chan->wsem);
- return -ENOMEM;
- }
- if (copy_from_user(kbuf, buf, count)) {
- up(&chan->wsem);
- kfree(kbuf);
- return -EFAULT;
- }
- chan->tx_status = 0;
- cosa_start_tx(chan, kbuf, count);
-
- spin_lock_irqsave(&cosa->lock, flags);
- add_wait_queue(&chan->txwaitq, &wait);
- while (!chan->tx_status) {
- set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irqrestore(&cosa->lock, flags);
- schedule();
- spin_lock_irqsave(&cosa->lock, flags);
- if (signal_pending(current) && chan->tx_status == 0) {
- chan->tx_status = 1;
- remove_wait_queue(&chan->txwaitq, &wait);
- __set_current_state(TASK_RUNNING);
- chan->tx_status = 1;
- spin_unlock_irqrestore(&cosa->lock, flags);
- up(&chan->wsem);
- kfree(kbuf);
- return -ERESTARTSYS;
- }
- }
- remove_wait_queue(&chan->txwaitq, &wait);
- __set_current_state(TASK_RUNNING);
- up(&chan->wsem);
- spin_unlock_irqrestore(&cosa->lock, flags);
- kfree(kbuf);
- return count;
-}
-
-static int chrdev_tx_done(struct channel_data *chan, int size)
-{
- if (chan->tx_status) { /* Writer was interrupted */
- kfree(chan->txbuf);
- up(&chan->wsem);
- }
- chan->tx_status = 1;
- wake_up_interruptible(&chan->txwaitq);
- return 1;
-}
-
-static __poll_t cosa_poll(struct file *file, poll_table *poll)
-{
- pr_info("cosa_poll is here\n");
- return 0;
-}
-
-static int cosa_open(struct inode *inode, struct file *file)
-{
- struct cosa_data *cosa;
- struct channel_data *chan;
- unsigned long flags;
- int n;
- int ret = 0;
-
- mutex_lock(&cosa_chardev_mutex);
- n = iminor(file_inode(file)) >> CARD_MINOR_BITS;
- if (n >= nr_cards) {
- ret = -ENODEV;
- goto out;
- }
- cosa = cosa_cards + n;
-
- n = iminor(file_inode(file)) & ((1 << CARD_MINOR_BITS) - 1);
- if (n >= cosa->nchannels) {
- ret = -ENODEV;
- goto out;
- }
- chan = cosa->chan + n;
-
- file->private_data = chan;
-
- spin_lock_irqsave(&cosa->lock, flags);
-
- if (chan->usage < 0) { /* in netdev mode */
- spin_unlock_irqrestore(&cosa->lock, flags);
- ret = -EBUSY;
- goto out;
- }
- cosa->usage++;
- chan->usage++;
-
- chan->tx_done = chrdev_tx_done;
- chan->setup_rx = chrdev_setup_rx;
- chan->rx_done = chrdev_rx_done;
- spin_unlock_irqrestore(&cosa->lock, flags);
-out:
- mutex_unlock(&cosa_chardev_mutex);
- return ret;
-}
-
-static int cosa_release(struct inode *inode, struct file *file)
-{
- struct channel_data *channel = file->private_data;
- struct cosa_data *cosa;
- unsigned long flags;
-
- cosa = channel->cosa;
- spin_lock_irqsave(&cosa->lock, flags);
- cosa->usage--;
- channel->usage--;
- spin_unlock_irqrestore(&cosa->lock, flags);
- return 0;
-}
-
-#ifdef COSA_FASYNC_WORKING
-static struct fasync_struct *fasync[256] = { NULL, };
-
-/* To be done ... */
-static int cosa_fasync(struct inode *inode, struct file *file, int on)
-{
- int port = iminor(inode);
-
- return fasync_helper(inode, file, on, &fasync[port]);
-}
-#endif
-
-/* ---------- Ioctls ---------- */
-
-/* Ioctl subroutines can safely be made inline, because they are called
- * only from cosa_ioctl().
- */
-static inline int cosa_reset(struct cosa_data *cosa)
-{
- char idstring[COSA_MAX_ID_STRING];
-
- if (cosa->usage > 1)
- pr_info("cosa%d: WARNING: reset requested with cosa->usage > 1 (%d). Odd things may happen.\n",
- cosa->num, cosa->usage);
- cosa->firmware_status &= ~(COSA_FW_RESET | COSA_FW_START);
- if (cosa_reset_and_read_id(cosa, idstring) < 0) {
- pr_notice("cosa%d: reset failed\n", cosa->num);
- return -EIO;
- }
- pr_info("cosa%d: resetting device: %s\n", cosa->num, idstring);
- cosa->firmware_status |= COSA_FW_RESET;
- return 0;
-}
-
-/* High-level function to download data into COSA memory. Calls download() */
-static inline int cosa_download(struct cosa_data *cosa, void __user *arg)
-{
- struct cosa_download d;
- int i;
-
- if (cosa->usage > 1)
- pr_info("%s: WARNING: download of microcode requested with cosa->usage > 1 (%d). Odd things may happen.\n",
- cosa->name, cosa->usage);
- if (!(cosa->firmware_status & COSA_FW_RESET)) {
- pr_notice("%s: reset the card first (status %d)\n",
- cosa->name, cosa->firmware_status);
- return -EPERM;
- }
-
- if (copy_from_user(&d, arg, sizeof(d)))
- return -EFAULT;
-
- if (d.addr < 0 || d.addr > COSA_MAX_FIRMWARE_SIZE)
- return -EINVAL;
- if (d.len < 0 || d.len > COSA_MAX_FIRMWARE_SIZE)
- return -EINVAL;
-
- /* If something fails, force the user to reset the card */
- cosa->firmware_status &= ~(COSA_FW_RESET | COSA_FW_DOWNLOAD);
-
- i = download(cosa, d.code, d.len, d.addr);
- if (i < 0) {
- pr_notice("cosa%d: microcode download failed: %d\n",
- cosa->num, i);
- return -EIO;
- }
- pr_info("cosa%d: downloading microcode - 0x%04x bytes at 0x%04x\n",
- cosa->num, d.len, d.addr);
- cosa->firmware_status |= COSA_FW_RESET | COSA_FW_DOWNLOAD;
- return 0;
-}
-
-/* High-level function to read COSA memory. Calls readmem() */
-static inline int cosa_readmem(struct cosa_data *cosa, void __user *arg)
-{
- struct cosa_download d;
- int i;
-
- if (cosa->usage > 1)
- pr_info("cosa%d: WARNING: readmem requested with cosa->usage > 1 (%d). Odd things may happen.\n",
- cosa->num, cosa->usage);
- if (!(cosa->firmware_status & COSA_FW_RESET)) {
- pr_notice("%s: reset the card first (status %d)\n",
- cosa->name, cosa->firmware_status);
- return -EPERM;
- }
-
- if (copy_from_user(&d, arg, sizeof(d)))
- return -EFAULT;
-
- /* If something fails, force the user to reset the card */
- cosa->firmware_status &= ~COSA_FW_RESET;
-
- i = readmem(cosa, d.code, d.len, d.addr);
- if (i < 0) {
- pr_notice("cosa%d: reading memory failed: %d\n", cosa->num, i);
- return -EIO;
- }
- pr_info("cosa%d: reading card memory - 0x%04x bytes at 0x%04x\n",
- cosa->num, d.len, d.addr);
- cosa->firmware_status |= COSA_FW_RESET;
- return 0;
-}
-
-/* High-level function to start microcode. Calls startmicrocode(). */
-static inline int cosa_start(struct cosa_data *cosa, int address)
-{
- int i;
-
- if (cosa->usage > 1)
- pr_info("cosa%d: WARNING: start microcode requested with cosa->usage > 1 (%d). Odd things may happen.\n",
- cosa->num, cosa->usage);
-
- if ((cosa->firmware_status & (COSA_FW_RESET | COSA_FW_DOWNLOAD))
- != (COSA_FW_RESET | COSA_FW_DOWNLOAD)) {
- pr_notice("%s: download the microcode and/or reset the card first (status %d)\n",
- cosa->name, cosa->firmware_status);
- return -EPERM;
- }
- cosa->firmware_status &= ~COSA_FW_RESET;
- i = startmicrocode(cosa, address);
- if (i < 0) {
- pr_notice("cosa%d: start microcode at 0x%04x failed: %d\n",
- cosa->num, address, i);
- return -EIO;
- }
- pr_info("cosa%d: starting microcode at 0x%04x\n", cosa->num, address);
- cosa->startaddr = address;
- cosa->firmware_status |= COSA_FW_START;
- return 0;
-}
-
-/* Buffer of size at least COSA_MAX_ID_STRING is expected */
-static inline int cosa_getidstr(struct cosa_data *cosa, char __user *string)
-{
- int l = strlen(cosa->id_string) + 1;
-
- if (copy_to_user(string, cosa->id_string, l))
- return -EFAULT;
- return l;
-}
-
-/* Buffer of size at least COSA_MAX_ID_STRING is expected */
-static inline int cosa_gettype(struct cosa_data *cosa, char __user *string)
-{
- int l = strlen(cosa->type) + 1;
-
- if (copy_to_user(string, cosa->type, l))
- return -EFAULT;
- return l;
-}
-
-static int cosa_ioctl_common(struct cosa_data *cosa,
- struct channel_data *channel, unsigned int cmd,
- unsigned long arg)
-{
- void __user *argp = (void __user *)arg;
-
- switch (cmd) {
- case COSAIORSET: /* Reset the device */
- if (!capable(CAP_NET_ADMIN))
- return -EACCES;
- return cosa_reset(cosa);
- case COSAIOSTRT: /* Start the firmware */
- if (!capable(CAP_SYS_RAWIO))
- return -EACCES;
- return cosa_start(cosa, arg);
- case COSAIODOWNLD: /* Download the firmware */
- if (!capable(CAP_SYS_RAWIO))
- return -EACCES;
-
- return cosa_download(cosa, argp);
- case COSAIORMEM:
- if (!capable(CAP_SYS_RAWIO))
- return -EACCES;
- return cosa_readmem(cosa, argp);
- case COSAIORTYPE:
- return cosa_gettype(cosa, argp);
- case COSAIORIDSTR:
- return cosa_getidstr(cosa, argp);
- case COSAIONRCARDS:
- return nr_cards;
- case COSAIONRCHANS:
- return cosa->nchannels;
- case COSAIOBMSET:
- if (!capable(CAP_SYS_RAWIO))
- return -EACCES;
- if (is_8bit(cosa))
- return -EINVAL;
- if (arg != COSA_BM_OFF && arg != COSA_BM_ON)
- return -EINVAL;
- cosa->busmaster = arg;
- return 0;
- case COSAIOBMGET:
- return cosa->busmaster;
- }
- return -ENOIOCTLCMD;
-}
-
-static long cosa_chardev_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- struct channel_data *channel = file->private_data;
- struct cosa_data *cosa;
- long ret;
-
- mutex_lock(&cosa_chardev_mutex);
- cosa = channel->cosa;
- ret = cosa_ioctl_common(cosa, channel, cmd, arg);
- mutex_unlock(&cosa_chardev_mutex);
- return ret;
-}
-
-/*---------- HW layer interface ---------- */
-
-/* The higher layer can bind itself to the HW layer by setting the callbacks
- * in the channel_data structure and by using these routines.
- */
-static void cosa_enable_rx(struct channel_data *chan)
-{
- struct cosa_data *cosa = chan->cosa;
-
- if (!test_and_set_bit(chan->num, &cosa->rxbitmap))
- put_driver_status(cosa);
-}
-
-static void cosa_disable_rx(struct channel_data *chan)
-{
- struct cosa_data *cosa = chan->cosa;
-
- if (test_and_clear_bit(chan->num, &cosa->rxbitmap))
- put_driver_status(cosa);
-}
-
-/* FIXME: This routine probably should check for cosa_start_tx() called when
- * the previous transmit is still unfinished. In this case the non-zero
- * return value should indicate to the caller that the queuing(sp?) up
- * the transmit has failed.
- */
-static int cosa_start_tx(struct channel_data *chan, char *buf, int len)
-{
- struct cosa_data *cosa = chan->cosa;
- unsigned long flags;
-#ifdef DEBUG_DATA
- int i;
-
- pr_info("cosa%dc%d: starting tx(0x%x)",
- chan->cosa->num, chan->num, len);
- for (i = 0; i < len; i++)
- pr_cont(" %02x", buf[i]&0xff);
- pr_cont("\n");
-#endif
- spin_lock_irqsave(&cosa->lock, flags);
- chan->txbuf = buf;
- chan->txsize = len;
- if (len > COSA_MTU)
- chan->txsize = COSA_MTU;
- spin_unlock_irqrestore(&cosa->lock, flags);
-
- /* Tell the firmware we are ready */
- set_bit(chan->num, &cosa->txbitmap);
- put_driver_status(cosa);
-
- return 0;
-}
-
-static void put_driver_status(struct cosa_data *cosa)
-{
- unsigned long flags;
- int status;
-
- spin_lock_irqsave(&cosa->lock, flags);
-
- status = (cosa->rxbitmap ? DRIVER_RX_READY : 0)
- | (cosa->txbitmap ? DRIVER_TX_READY : 0)
- | (cosa->txbitmap ? ~(cosa->txbitmap << DRIVER_TXMAP_SHIFT)
- & DRIVER_TXMAP_MASK : 0);
- if (!cosa->rxtx) {
- if (cosa->rxbitmap | cosa->txbitmap) {
- if (!cosa->enabled) {
- cosa_putstatus(cosa, SR_RX_INT_ENA);
-#ifdef DEBUG_IO
- debug_status_out(cosa, SR_RX_INT_ENA);
-#endif
- cosa->enabled = 1;
- }
- } else if (cosa->enabled) {
- cosa->enabled = 0;
- cosa_putstatus(cosa, 0);
-#ifdef DEBUG_IO
- debug_status_out(cosa, 0);
-#endif
- }
- cosa_putdata8(cosa, status);
-#ifdef DEBUG_IO
- debug_data_cmd(cosa, status);
-#endif
- }
- spin_unlock_irqrestore(&cosa->lock, flags);
-}
-
-static void put_driver_status_nolock(struct cosa_data *cosa)
-{
- int status;
-
- status = (cosa->rxbitmap ? DRIVER_RX_READY : 0)
- | (cosa->txbitmap ? DRIVER_TX_READY : 0)
- | (cosa->txbitmap ? ~(cosa->txbitmap << DRIVER_TXMAP_SHIFT)
- & DRIVER_TXMAP_MASK : 0);
-
- if (cosa->rxbitmap | cosa->txbitmap) {
- cosa_putstatus(cosa, SR_RX_INT_ENA);
-#ifdef DEBUG_IO
- debug_status_out(cosa, SR_RX_INT_ENA);
-#endif
- cosa->enabled = 1;
- } else {
- cosa_putstatus(cosa, 0);
-#ifdef DEBUG_IO
- debug_status_out(cosa, 0);
-#endif
- cosa->enabled = 0;
- }
- cosa_putdata8(cosa, status);
-#ifdef DEBUG_IO
- debug_data_cmd(cosa, status);
-#endif
-}
-
-/* The "kickme" function: When the DMA times out, this is called to
- * clean up the driver status.
- * FIXME: Preliminary support, the interface is probably wrong.
- */
-static void cosa_kick(struct cosa_data *cosa)
-{
- unsigned long flags, flags1;
- char *s = "(probably) IRQ";
-
- if (test_bit(RXBIT, &cosa->rxtx))
- s = "RX DMA";
- if (test_bit(TXBIT, &cosa->rxtx))
- s = "TX DMA";
-
- pr_info("%s: %s timeout - restarting\n", cosa->name, s);
- spin_lock_irqsave(&cosa->lock, flags);
- cosa->rxtx = 0;
-
- flags1 = claim_dma_lock();
- disable_dma(cosa->dma);
- clear_dma_ff(cosa->dma);
- release_dma_lock(flags1);
-
- /* FIXME: Anything else? */
- udelay(100);
- cosa_putstatus(cosa, 0);
- udelay(100);
- (void)cosa_getdata8(cosa);
- udelay(100);
- cosa_putdata8(cosa, 0);
- udelay(100);
- put_driver_status_nolock(cosa);
- spin_unlock_irqrestore(&cosa->lock, flags);
-}
-
-/* Check if the whole buffer is DMA-able. It means it is below the 16M of
- * physical memory and doesn't span the 64k boundary. For now it seems
- * SKB's never do this, but we'll check this anyway.
- */
-static int cosa_dma_able(struct channel_data *chan, char *buf, int len)
-{
- static int count;
- unsigned long b = (unsigned long)buf;
-
- if (b + len >= MAX_DMA_ADDRESS)
- return 0;
- if ((b ^ (b + len)) & 0x10000) {
- if (count++ < 5)
- pr_info("%s: packet spanning a 64k boundary\n",
- chan->name);
- return 0;
- }
- return 1;
-}
-
-/* ---------- The SRP/COSA ROM monitor functions ---------- */
-
-/* Downloading SRP microcode: say "w" to SRP monitor, it answers by "w=",
- * drivers need to say 4-digit hex number meaning start address of the microcode
- * separated by a single space. Monitor replies by saying " =". Now driver
- * has to write 4-digit hex number meaning the last byte address ended
- * by a single space. Monitor has to reply with a space. Now the download
- * begins. After the download monitor replies with "\r\n." (CR LF dot).
- */
-static int download(struct cosa_data *cosa, const char __user *microcode, int length, int address)
-{
- int i;
-
- if (put_wait_data(cosa, 'w') == -1)
- return -1;
- if ((i=get_wait_data(cosa)) != 'w') { printk("dnld: 0x%04x\n",i); return -2;}
- if (get_wait_data(cosa) != '=')
- return -3;
-
- if (puthexnumber(cosa, address) < 0)
- return -4;
- if (put_wait_data(cosa, ' ') == -1)
- return -10;
- if (get_wait_data(cosa) != ' ')
- return -11;
- if (get_wait_data(cosa) != '=')
- return -12;
-
- if (puthexnumber(cosa, address + length - 1) < 0)
- return -13;
- if (put_wait_data(cosa, ' ') == -1)
- return -18;
- if (get_wait_data(cosa) != ' ')
- return -19;
-
- while (length--) {
- char c;
-#ifndef SRP_DOWNLOAD_AT_BOOT
- if (get_user(c, microcode))
- return -23; /* ??? */
-#else
- c = *microcode;
-#endif
- if (put_wait_data(cosa, c) == -1)
- return -20;
- microcode++;
- }
-
- if (get_wait_data(cosa) != '\r')
- return -21;
- if (get_wait_data(cosa) != '\n')
- return -22;
- if (get_wait_data(cosa) != '.')
- return -23;
-#if 0
- printk(KERN_DEBUG "cosa%d: download completed.\n", cosa->num);
-#endif
- return 0;
-}
-
-/* Starting microcode is done via the "g" command of the SRP monitor.
- * The chat should be the following: "g" "g=" "<addr><CR>"
- * "<CR><CR><LF><CR><LF>".
- */
-static int startmicrocode(struct cosa_data *cosa, int address)
-{
- if (put_wait_data(cosa, 'g') == -1)
- return -1;
- if (get_wait_data(cosa) != 'g')
- return -2;
- if (get_wait_data(cosa) != '=')
- return -3;
-
- if (puthexnumber(cosa, address) < 0)
- return -4;
- if (put_wait_data(cosa, '\r') == -1)
- return -5;
-
- if (get_wait_data(cosa) != '\r')
- return -6;
- if (get_wait_data(cosa) != '\r')
- return -7;
- if (get_wait_data(cosa) != '\n')
- return -8;
- if (get_wait_data(cosa) != '\r')
- return -9;
- if (get_wait_data(cosa) != '\n')
- return -10;
-#if 0
- printk(KERN_DEBUG "cosa%d: microcode started\n", cosa->num);
-#endif
- return 0;
-}
-
-/* Reading memory is done via the "r" command of the SRP monitor.
- * The chat is the following "r" "r=" "<addr> " " =" "<last_byte> " " "
- * Then driver can read the data and the conversation is finished
- * by SRP monitor sending "<CR><LF>." (dot at the end).
- *
- * This routine is not needed during the normal operation and serves
- * for debugging purposes only.
- */
-static int readmem(struct cosa_data *cosa, char __user *microcode, int length, int address)
-{
- if (put_wait_data(cosa, 'r') == -1)
- return -1;
- if ((get_wait_data(cosa)) != 'r')
- return -2;
- if ((get_wait_data(cosa)) != '=')
- return -3;
-
- if (puthexnumber(cosa, address) < 0)
- return -4;
- if (put_wait_data(cosa, ' ') == -1)
- return -5;
- if (get_wait_data(cosa) != ' ')
- return -6;
- if (get_wait_data(cosa) != '=')
- return -7;
-
- if (puthexnumber(cosa, address + length - 1) < 0)
- return -8;
- if (put_wait_data(cosa, ' ') == -1)
- return -9;
- if (get_wait_data(cosa) != ' ')
- return -10;
-
- while (length--) {
- char c;
- int i;
-
- i = get_wait_data(cosa);
- if (i == -1) {
- pr_info("0x%04x bytes remaining\n", length);
- return -11;
- }
- c = i;
-#if 1
- if (put_user(c, microcode))
- return -23; /* ??? */
-#else
- *microcode = c;
-#endif
- microcode++;
- }
-
- if (get_wait_data(cosa) != '\r')
- return -21;
- if (get_wait_data(cosa) != '\n')
- return -22;
- if (get_wait_data(cosa) != '.')
- return -23;
-#if 0
- printk(KERN_DEBUG "cosa%d: readmem completed.\n", cosa->num);
-#endif
- return 0;
-}
-
-/* This function resets the device and reads the initial prompt
- * of the device's ROM monitor.
- */
-static int cosa_reset_and_read_id(struct cosa_data *cosa, char *idstring)
-{
- int i = 0, id = 0, prev = 0, curr = 0;
-
- /* Reset the card ... */
- cosa_putstatus(cosa, 0);
- cosa_getdata8(cosa);
- cosa_putstatus(cosa, SR_RST);
- msleep(500);
- /* Disable all IRQs from the card */
- cosa_putstatus(cosa, 0);
-
- /* Try to read the ID string. The card then prints out the
- * identification string ended by the "\n\x2e".
- *
- * The following loop is indexed through i (instead of id)
- * to avoid looping forever when for any reason
- * the port returns '\r', '\n' or '\x2e' permanently.
- */
- for (i = 0; i < COSA_MAX_ID_STRING - 1; i++, prev = curr) {
- curr = get_wait_data(cosa);
- if (curr == -1)
- return -1;
-
- curr &= 0xff;
- if (curr != '\r' && curr != '\n' && curr != 0x2e)
- idstring[id++] = curr;
- if (curr == 0x2e && prev == '\n')
- break;
- }
- /* Perhaps we should fail when i==COSA_MAX_ID_STRING-1 ? */
- idstring[id] = '\0';
- return id;
-}
-
-/* ---------- Auxiliary routines for COSA/SRP monitor ---------- */
-
-/* This routine gets the data byte from the card waiting for the SR_RX_RDY
- * bit to be set in a loop. It should be used in the exceptional cases
- * only (for example when resetting the card or downloading the firmware.
- */
-static int get_wait_data(struct cosa_data *cosa)
-{
- int retries = 1000;
-
- while (--retries) {
- /* read data and return them */
- if (cosa_getstatus(cosa) & SR_RX_RDY) {
- short r;
-
- r = cosa_getdata8(cosa);
-#if 0
- pr_info("get_wait_data returning after %d retries\n",
- 999 - retries);
-#endif
- return r;
- }
- /* sleep if not ready to read */
- schedule_timeout_interruptible(1);
- }
- pr_info("timeout in get_wait_data (status 0x%x)\n",
- cosa_getstatus(cosa));
- return -1;
-}
-
-/* This routine puts the data byte to the card waiting for the SR_TX_RDY
- * bit to be set in a loop. It should be used in the exceptional cases
- * only (for example when resetting the card or downloading the firmware).
- */
-static int put_wait_data(struct cosa_data *cosa, int data)
-{
- int retries = 1000;
-
- while (--retries) {
- /* read data and return them */
- if (cosa_getstatus(cosa) & SR_TX_RDY) {
- cosa_putdata8(cosa, data);
-#if 0
- pr_info("Putdata: %d retries\n", 999 - retries);
-#endif
- return 0;
- }
-#if 0
- /* sleep if not ready to read */
- schedule_timeout_interruptible(1);
-#endif
- }
- pr_info("cosa%d: timeout in put_wait_data (status 0x%x)\n",
- cosa->num, cosa_getstatus(cosa));
- return -1;
-}
-
-/* The following routine puts the hexadecimal number into the SRP monitor
- * and verifies the proper echo of the sent bytes. Returns 0 on success,
- * negative number on failure (-1,-3,-5,-7) means that put_wait_data() failed,
- * (-2,-4,-6,-8) means that reading echo failed.
- */
-static int puthexnumber(struct cosa_data *cosa, int number)
-{
- char temp[5];
- int i;
-
- /* Well, I should probably replace this by something faster. */
- sprintf(temp, "%04X", number);
- for (i = 0; i < 4; i++) {
- if (put_wait_data(cosa, temp[i]) == -1) {
- pr_notice("cosa%d: puthexnumber failed to write byte %d\n",
- cosa->num, i);
- return -1 - 2 * i;
- }
- if (get_wait_data(cosa) != temp[i]) {
- pr_notice("cosa%d: puthexhumber failed to read echo of byte %d\n",
- cosa->num, i);
- return -2 - 2 * i;
- }
- }
- return 0;
-}
-
-/* ---------- Interrupt routines ---------- */
-
-/* There are three types of interrupt:
- * At the beginning of transmit - this handled is in tx_interrupt(),
- * at the beginning of receive - it is in rx_interrupt() and
- * at the end of transmit/receive - it is the eot_interrupt() function.
- * These functions are multiplexed by cosa_interrupt() according to the
- * COSA status byte. I have moved the rx/tx/eot interrupt handling into
- * separate functions to make it more readable. These functions are inline,
- * so there should be no overhead of function call.
- *
- * In the COSA bus-master mode, we need to tell the card the address of a
- * buffer. Unfortunately, COSA may be too slow for us, so we must busy-wait.
- * It's time to use the bottom half :-(
- */
-
-/* Transmit interrupt routine - called when COSA is willing to obtain
- * data from the OS. The most tricky part of the routine is selection
- * of channel we (OS) want to send packet for. For SRP we should probably
- * use the round-robin approach. The newer COSA firmwares have a simple
- * flow-control - in the status word has bits 2 and 3 set to 1 means that the
- * channel 0 or 1 doesn't want to receive data.
- *
- * It seems there is a bug in COSA firmware (need to trace it further):
- * When the driver status says that the kernel has no more data for transmit
- * (e.g. at the end of TX DMA) and then the kernel changes its mind
- * (e.g. new packet is queued to hard_start_xmit()), the card issues
- * the TX interrupt but does not mark the channel as ready-to-transmit.
- * The fix seems to be to push the packet to COSA despite its request.
- * We first try to obey the card's opinion, and then fall back to forced TX.
- */
-static inline void tx_interrupt(struct cosa_data *cosa, int status)
-{
- unsigned long flags, flags1;
-#ifdef DEBUG_IRQS
- pr_info("cosa%d: SR_DOWN_REQUEST status=0x%04x\n", cosa->num, status);
-#endif
- spin_lock_irqsave(&cosa->lock, flags);
- set_bit(TXBIT, &cosa->rxtx);
- if (!test_bit(IRQBIT, &cosa->rxtx)) {
- /* flow control, see the comment above */
- int i = 0;
-
- if (!cosa->txbitmap) {
- pr_warn("%s: No channel wants data in TX IRQ. Expect DMA timeout.\n",
- cosa->name);
- put_driver_status_nolock(cosa);
- clear_bit(TXBIT, &cosa->rxtx);
- spin_unlock_irqrestore(&cosa->lock, flags);
- return;
- }
- while (1) {
- cosa->txchan++;
- i++;
- if (cosa->txchan >= cosa->nchannels)
- cosa->txchan = 0;
- if (!(cosa->txbitmap & (1 << cosa->txchan)))
- continue;
- if (~status &
- (1 << (cosa->txchan + DRIVER_TXMAP_SHIFT)))
- break;
- /* in second pass, accept first ready-to-TX channel */
- if (i > cosa->nchannels) {
- /* Can be safely ignored */
-#ifdef DEBUG_IRQS
- printk(KERN_DEBUG "%s: Forcing TX "
- "to not-ready channel %d\n",
- cosa->name, cosa->txchan);
-#endif
- break;
- }
- }
-
- cosa->txsize = cosa->chan[cosa->txchan].txsize;
- if (cosa_dma_able(cosa->chan + cosa->txchan,
- cosa->chan[cosa->txchan].txbuf,
- cosa->txsize)) {
- cosa->txbuf = cosa->chan[cosa->txchan].txbuf;
- } else {
- memcpy(cosa->bouncebuf, cosa->chan[cosa->txchan].txbuf,
- cosa->txsize);
- cosa->txbuf = cosa->bouncebuf;
- }
- }
-
- if (is_8bit(cosa)) {
- if (!test_bit(IRQBIT, &cosa->rxtx)) {
- cosa_putstatus(cosa, SR_TX_INT_ENA);
- cosa_putdata8(cosa, ((cosa->txchan << 5) & 0xe0) |
- ((cosa->txsize >> 8) & 0x1f));
-#ifdef DEBUG_IO
- debug_status_out(cosa, SR_TX_INT_ENA);
- debug_data_out(cosa, ((cosa->txchan << 5) & 0xe0) |
- ((cosa->txsize >> 8) & 0x1f));
- debug_data_in(cosa, cosa_getdata8(cosa));
-#else
- cosa_getdata8(cosa);
-#endif
- set_bit(IRQBIT, &cosa->rxtx);
- spin_unlock_irqrestore(&cosa->lock, flags);
- return;
- } else {
- clear_bit(IRQBIT, &cosa->rxtx);
- cosa_putstatus(cosa, 0);
- cosa_putdata8(cosa, cosa->txsize & 0xff);
-#ifdef DEBUG_IO
- debug_status_out(cosa, 0);
- debug_data_out(cosa, cosa->txsize & 0xff);
-#endif
- }
- } else {
- cosa_putstatus(cosa, SR_TX_INT_ENA);
- cosa_putdata16(cosa, ((cosa->txchan << 13) & 0xe000)
- | (cosa->txsize & 0x1fff));
-#ifdef DEBUG_IO
- debug_status_out(cosa, SR_TX_INT_ENA);
- debug_data_out(cosa, ((cosa->txchan << 13) & 0xe000) |
- (cosa->txsize & 0x1fff));
- debug_data_in(cosa, cosa_getdata8(cosa));
- debug_status_out(cosa, 0);
-#else
- cosa_getdata8(cosa);
-#endif
- cosa_putstatus(cosa, 0);
- }
-
- if (cosa->busmaster) {
- unsigned long addr = virt_to_bus(cosa->txbuf);
- int count = 0;
-
- pr_info("busmaster IRQ\n");
- while (!(cosa_getstatus(cosa) & SR_TX_RDY)) {
- count++;
- udelay(10);
- if (count > 1000)
- break;
- }
- pr_info("status %x\n", cosa_getstatus(cosa));
- pr_info("ready after %d loops\n", count);
- cosa_putdata16(cosa, (addr >> 16) & 0xffff);
-
- count = 0;
- while (!(cosa_getstatus(cosa) & SR_TX_RDY)) {
- count++;
- if (count > 1000)
- break;
- udelay(10);
- }
- pr_info("ready after %d loops\n", count);
- cosa_putdata16(cosa, addr & 0xffff);
- flags1 = claim_dma_lock();
- set_dma_mode(cosa->dma, DMA_MODE_CASCADE);
- enable_dma(cosa->dma);
- release_dma_lock(flags1);
- } else {
- /* start the DMA */
- flags1 = claim_dma_lock();
- disable_dma(cosa->dma);
- clear_dma_ff(cosa->dma);
- set_dma_mode(cosa->dma, DMA_MODE_WRITE);
- set_dma_addr(cosa->dma, virt_to_bus(cosa->txbuf));
- set_dma_count(cosa->dma, cosa->txsize);
- enable_dma(cosa->dma);
- release_dma_lock(flags1);
- }
- cosa_putstatus(cosa, SR_TX_DMA_ENA | SR_USR_INT_ENA);
-#ifdef DEBUG_IO
- debug_status_out(cosa, SR_TX_DMA_ENA | SR_USR_INT_ENA);
-#endif
- spin_unlock_irqrestore(&cosa->lock, flags);
-}
-
-static inline void rx_interrupt(struct cosa_data *cosa, int status)
-{
- unsigned long flags;
-#ifdef DEBUG_IRQS
- pr_info("cosa%d: SR_UP_REQUEST\n", cosa->num);
-#endif
-
- spin_lock_irqsave(&cosa->lock, flags);
- set_bit(RXBIT, &cosa->rxtx);
-
- if (is_8bit(cosa)) {
- if (!test_bit(IRQBIT, &cosa->rxtx)) {
- set_bit(IRQBIT, &cosa->rxtx);
- put_driver_status_nolock(cosa);
- cosa->rxsize = cosa_getdata8(cosa) << 8;
-#ifdef DEBUG_IO
- debug_data_in(cosa, cosa->rxsize >> 8);
-#endif
- spin_unlock_irqrestore(&cosa->lock, flags);
- return;
- } else {
- clear_bit(IRQBIT, &cosa->rxtx);
- cosa->rxsize |= cosa_getdata8(cosa) & 0xff;
-#ifdef DEBUG_IO
- debug_data_in(cosa, cosa->rxsize & 0xff);
-#endif
-#if 0
- pr_info("cosa%d: receive rxsize = (0x%04x)\n",
- cosa->num, cosa->rxsize);
-#endif
- }
- } else {
- cosa->rxsize = cosa_getdata16(cosa);
-#ifdef DEBUG_IO
- debug_data_in(cosa, cosa->rxsize);
-#endif
-#if 0
- pr_info("cosa%d: receive rxsize = (0x%04x)\n",
- cosa->num, cosa->rxsize);
-#endif
- }
- if (((cosa->rxsize & 0xe000) >> 13) >= cosa->nchannels) {
- pr_warn("%s: rx for unknown channel (0x%04x)\n",
- cosa->name, cosa->rxsize);
- spin_unlock_irqrestore(&cosa->lock, flags);
- goto reject;
- }
- cosa->rxchan = cosa->chan + ((cosa->rxsize & 0xe000) >> 13);
- cosa->rxsize &= 0x1fff;
- spin_unlock_irqrestore(&cosa->lock, flags);
-
- cosa->rxbuf = NULL;
- if (cosa->rxchan->setup_rx)
- cosa->rxbuf = cosa->rxchan->setup_rx(cosa->rxchan, cosa->rxsize);
-
- if (!cosa->rxbuf) {
-reject: /* Reject the packet */
- pr_info("cosa%d: rejecting packet on channel %d\n",
- cosa->num, cosa->rxchan->num);
- cosa->rxbuf = cosa->bouncebuf;
- }
-
- /* start the DMA */
- flags = claim_dma_lock();
- disable_dma(cosa->dma);
- clear_dma_ff(cosa->dma);
- set_dma_mode(cosa->dma, DMA_MODE_READ);
- if (cosa_dma_able(cosa->rxchan, cosa->rxbuf, cosa->rxsize & 0x1fff))
- set_dma_addr(cosa->dma, virt_to_bus(cosa->rxbuf));
- else
- set_dma_addr(cosa->dma, virt_to_bus(cosa->bouncebuf));
-
- set_dma_count(cosa->dma, (cosa->rxsize & 0x1fff));
- enable_dma(cosa->dma);
- release_dma_lock(flags);
- spin_lock_irqsave(&cosa->lock, flags);
- cosa_putstatus(cosa, SR_RX_DMA_ENA | SR_USR_INT_ENA);
- if (!is_8bit(cosa) && (status & SR_TX_RDY))
- cosa_putdata8(cosa, DRIVER_RX_READY);
-#ifdef DEBUG_IO
- debug_status_out(cosa, SR_RX_DMA_ENA | SR_USR_INT_ENA);
- if (!is_8bit(cosa) && (status & SR_TX_RDY))
- debug_data_cmd(cosa, DRIVER_RX_READY);
-#endif
- spin_unlock_irqrestore(&cosa->lock, flags);
-}
-
-static inline void eot_interrupt(struct cosa_data *cosa, int status)
-{
- unsigned long flags, flags1;
-
- spin_lock_irqsave(&cosa->lock, flags);
- flags1 = claim_dma_lock();
- disable_dma(cosa->dma);
- clear_dma_ff(cosa->dma);
- release_dma_lock(flags1);
- if (test_bit(TXBIT, &cosa->rxtx)) {
- struct channel_data *chan = cosa->chan + cosa->txchan;
-
- if (chan->tx_done)
- if (chan->tx_done(chan, cosa->txsize))
- clear_bit(chan->num, &cosa->txbitmap);
- } else if (test_bit(RXBIT, &cosa->rxtx)) {
-#ifdef DEBUG_DATA
- {
- int i;
-
- pr_info("cosa%dc%d: done rx(0x%x)",
- cosa->num, cosa->rxchan->num, cosa->rxsize);
- for (i = 0; i < cosa->rxsize; i++)
- pr_cont(" %02x", cosa->rxbuf[i]&0xff);
- pr_cont("\n");
- }
-#endif
- /* Packet for unknown channel? */
- if (cosa->rxbuf == cosa->bouncebuf)
- goto out;
- if (!cosa_dma_able(cosa->rxchan, cosa->rxbuf, cosa->rxsize))
- memcpy(cosa->rxbuf, cosa->bouncebuf, cosa->rxsize);
- if (cosa->rxchan->rx_done)
- if (cosa->rxchan->rx_done(cosa->rxchan))
- clear_bit(cosa->rxchan->num, &cosa->rxbitmap);
- } else {
- pr_notice("cosa%d: unexpected EOT interrupt\n", cosa->num);
- }
- /* Clear the RXBIT, TXBIT and IRQBIT (the latest should be
- * cleared anyway). We should do it as soon as possible
- * so that we can tell the COSA we are done and to give it a time
- * for recovery.
- */
-out:
- cosa->rxtx = 0;
- put_driver_status_nolock(cosa);
- spin_unlock_irqrestore(&cosa->lock, flags);
-}
-
-static irqreturn_t cosa_interrupt(int irq, void *cosa_)
-{
- unsigned status;
- int count = 0;
- struct cosa_data *cosa = cosa_;
-again:
- status = cosa_getstatus(cosa);
-#ifdef DEBUG_IRQS
- pr_info("cosa%d: got IRQ, status 0x%02x\n", cosa->num, status & 0xff);
-#endif
-#ifdef DEBUG_IO
- debug_status_in(cosa, status);
-#endif
- switch (status & SR_CMD_FROM_SRP_MASK) {
- case SR_DOWN_REQUEST:
- tx_interrupt(cosa, status);
- break;
- case SR_UP_REQUEST:
- rx_interrupt(cosa, status);
- break;
- case SR_END_OF_TRANSFER:
- eot_interrupt(cosa, status);
- break;
- default:
- /* We may be too fast for SRP. Try to wait a bit more. */
- if (count++ < 100) {
- udelay(100);
- goto again;
- }
- pr_info("cosa%d: unknown status 0x%02x in IRQ after %d retries\n",
- cosa->num, status & 0xff, count);
- }
-#ifdef DEBUG_IRQS
- if (count)
- pr_info("%s: %d-times got unknown status in IRQ\n",
- cosa->name, count);
- else
- pr_info("%s: returning from IRQ\n", cosa->name);
-#endif
- return IRQ_HANDLED;
-}
-
-/* ---------- I/O debugging routines ---------- */
-/* These routines can be used to monitor COSA/SRP I/O and to printk()
- * the data being transferred on the data and status I/O port in a
- * readable way.
- */
-
-#ifdef DEBUG_IO
-static void debug_status_in(struct cosa_data *cosa, int status)
-{
- char *s;
-
- switch (status & SR_CMD_FROM_SRP_MASK) {
- case SR_UP_REQUEST:
- s = "RX_REQ";
- break;
- case SR_DOWN_REQUEST:
- s = "TX_REQ";
- break;
- case SR_END_OF_TRANSFER:
- s = "ET_REQ";
- break;
- default:
- s = "NO_REQ";
- break;
- }
- pr_info("%s: IO: status -> 0x%02x (%s%s%s%s)\n",
- cosa->name,
- status,
- status & SR_USR_RQ ? "USR_RQ|" : "",
- status & SR_TX_RDY ? "TX_RDY|" : "",
- status & SR_RX_RDY ? "RX_RDY|" : "",
- s);
-}
-
-static void debug_status_out(struct cosa_data *cosa, int status)
-{
- pr_info("%s: IO: status <- 0x%02x (%s%s%s%s%s%s)\n",
- cosa->name,
- status,
- status & SR_RX_DMA_ENA ? "RXDMA|" : "!rxdma|",
- status & SR_TX_DMA_ENA ? "TXDMA|" : "!txdma|",
- status & SR_RST ? "RESET|" : "",
- status & SR_USR_INT_ENA ? "USRINT|" : "!usrint|",
- status & SR_TX_INT_ENA ? "TXINT|" : "!txint|",
- status & SR_RX_INT_ENA ? "RXINT" : "!rxint");
-}
-
-static void debug_data_in(struct cosa_data *cosa, int data)
-{
- pr_info("%s: IO: data -> 0x%04x\n", cosa->name, data);
-}
-
-static void debug_data_out(struct cosa_data *cosa, int data)
-{
- pr_info("%s: IO: data <- 0x%04x\n", cosa->name, data);
-}
-
-static void debug_data_cmd(struct cosa_data *cosa, int data)
-{
- pr_info("%s: IO: data <- 0x%04x (%s|%s)\n",
- cosa->name, data,
- data & SR_RDY_RCV ? "RX_RDY" : "!rx_rdy",
- data & SR_RDY_SND ? "TX_RDY" : "!tx_rdy");
-}
-#endif
-
-/* EOF -- this file has not been truncated */
diff --git a/drivers/net/wan/cosa.h b/drivers/net/wan/cosa.h
deleted file mode 100644
index f57e0af9d56a..000000000000
--- a/drivers/net/wan/cosa.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/* $Id: cosa.h,v 1.6 1999/01/06 14:02:44 kas Exp $ */
-
-/*
- * Copyright (C) 1995-1997 Jan "Yenya" Kasprzak <kas@fi.muni.cz>
- */
-
-#ifndef COSA_H__
-#define COSA_H__
-
-#include <linux/ioctl.h>
-
-#ifdef __KERNEL__
-/* status register - output bits */
-#define SR_RX_DMA_ENA 0x04 /* receiver DMA enable bit */
-#define SR_TX_DMA_ENA 0x08 /* transmitter DMA enable bit */
-#define SR_RST 0x10 /* SRP reset */
-#define SR_USR_INT_ENA 0x20 /* user interrupt enable bit */
-#define SR_TX_INT_ENA 0x40 /* transmitter interrupt enable bit */
-#define SR_RX_INT_ENA 0x80 /* receiver interrupt enable bit */
-
-/* status register - input bits */
-#define SR_USR_RQ 0x20 /* user interrupt request pending */
-#define SR_TX_RDY 0x40 /* transmitter empty (ready) */
-#define SR_RX_RDY 0x80 /* receiver data ready */
-
-#define SR_UP_REQUEST 0x02 /* request from SRP to transfer data
- up to PC */
-#define SR_DOWN_REQUEST 0x01 /* SRP is able to transfer data down
- from PC to SRP */
-#define SR_END_OF_TRANSFER 0x03 /* SRP signalize end of
- transfer (up or down) */
-
-#define SR_CMD_FROM_SRP_MASK 0x03 /* mask to get SRP command */
-
-/* bits in driver status byte definitions : */
-#define SR_RDY_RCV 0x01 /* ready to receive packet */
-#define SR_RDY_SND 0x02 /* ready to send packet */
-#define SR_CMD_PND 0x04 /* command pending */ /* not currently used */
-
-/* ???? */
-#define SR_PKT_UP 0x01 /* transfer of packet up in progress */
-#define SR_PKT_DOWN 0x02 /* transfer of packet down in progress */
-
-#endif /* __KERNEL__ */
-
-#define SR_LOAD_ADDR 0x4400 /* SRP microcode load address */
-#define SR_START_ADDR 0x4400 /* SRP microcode start address */
-
-#define COSA_LOAD_ADDR 0x400 /* SRP microcode load address */
-#define COSA_MAX_FIRMWARE_SIZE 0x10000
-
-/* ioctls */
-struct cosa_download {
- int addr, len;
- char __user *code;
-};
-
-/* Reset the device */
-#define COSAIORSET _IO('C',0xf0)
-
-/* Start microcode at given address */
-#define COSAIOSTRT _IOW('C',0xf1, int)
-
-/* Read the block from the device memory */
-#define COSAIORMEM _IOWR('C',0xf2, struct cosa_download *)
- /* actually the struct cosa_download itself; this is to keep
- * the ioctl number same as in 2.4 in order to keep the user-space
- * utils compatible. */
-
-/* Write the block to the device memory (i.e. download the microcode) */
-#define COSAIODOWNLD _IOW('C',0xf2, struct cosa_download *)
- /* actually the struct cosa_download itself; this is to keep
- * the ioctl number same as in 2.4 in order to keep the user-space
- * utils compatible. */
-
-/* Read the device type (one of "srp", "cosa", and "cosa8" for now) */
-#define COSAIORTYPE _IOR('C',0xf3, char *)
-
-/* Read the device identification string */
-#define COSAIORIDSTR _IOR('C',0xf4, char *)
-/* Maximum length of the identification string. */
-#define COSA_MAX_ID_STRING 128
-
-/* Increment/decrement the module usage count :-) */
-/* #define COSAIOMINC _IO('C',0xf5) */
-/* #define COSAIOMDEC _IO('C',0xf6) */
-
-/* Get the total number of cards installed */
-#define COSAIONRCARDS _IO('C',0xf7)
-
-/* Get the number of channels on this card */
-#define COSAIONRCHANS _IO('C',0xf8)
-
-/* Set the driver for the bus-master operations */
-#define COSAIOBMSET _IOW('C', 0xf9, unsigned short)
-
-#define COSA_BM_OFF 0 /* Bus-mastering off - use ISA DMA (default) */
-#define COSA_BM_ON 1 /* Bus-mastering on - faster but untested */
-
-/* Gets the busmaster status */
-#define COSAIOBMGET _IO('C', 0xfa)
-
-#endif /* !COSA_H__ */
diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c
deleted file mode 100644
index e985e54ba75d..000000000000
--- a/drivers/net/wan/hostess_sv11.c
+++ /dev/null
@@ -1,336 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Comtrol SV11 card driver
- *
- * This is a slightly odd Z85230 synchronous driver. All you need to
- * know basically is
- *
- * Its a genuine Z85230
- *
- * It supports DMA using two DMA channels in SYNC mode. The driver doesn't
- * use these facilities
- *
- * The control port is at io+1, the data at io+3 and turning off the DMA
- * is done by writing 0 to io+4
- *
- * The hardware does the bus handling to avoid the need for delays between
- * touching control registers.
- *
- * Port B isn't wired (why - beats me)
- *
- * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/net.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/if_arp.h>
-#include <linux/delay.h>
-#include <linux/hdlc.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <net/arp.h>
-
-#include <asm/irq.h>
-#include <asm/io.h>
-#include <asm/dma.h>
-#include <asm/byteorder.h>
-#include "z85230.h"
-
-static int dma;
-
-/* Network driver support routines
- */
-
-static inline struct z8530_dev *dev_to_sv(struct net_device *dev)
-{
- return (struct z8530_dev *)dev_to_hdlc(dev)->priv;
-}
-
-/* Frame receive. Simple for our card as we do HDLC and there
- * is no funny garbage involved
- */
-
-static void hostess_input(struct z8530_channel *c, struct sk_buff *skb)
-{
- /* Drop the CRC - it's not a good idea to try and negotiate it ;) */
- skb_trim(skb, skb->len - 2);
- skb->protocol = hdlc_type_trans(skb, c->netdevice);
- skb_reset_mac_header(skb);
- skb->dev = c->netdevice;
- /* Send it to the PPP layer. We don't have time to process
- * it right now.
- */
- netif_rx(skb);
-}
-
-/* We've been placed in the UP state
- */
-
-static int hostess_open(struct net_device *d)
-{
- struct z8530_dev *sv11 = dev_to_sv(d);
- int err = -1;
-
- /* Link layer up
- */
- switch (dma) {
- case 0:
- err = z8530_sync_open(d, &sv11->chanA);
- break;
- case 1:
- err = z8530_sync_dma_open(d, &sv11->chanA);
- break;
- case 2:
- err = z8530_sync_txdma_open(d, &sv11->chanA);
- break;
- }
-
- if (err)
- return err;
-
- err = hdlc_open(d);
- if (err) {
- switch (dma) {
- case 0:
- z8530_sync_close(d, &sv11->chanA);
- break;
- case 1:
- z8530_sync_dma_close(d, &sv11->chanA);
- break;
- case 2:
- z8530_sync_txdma_close(d, &sv11->chanA);
- break;
- }
- return err;
- }
- sv11->chanA.rx_function = hostess_input;
-
- /*
- * Go go go
- */
-
- netif_start_queue(d);
- return 0;
-}
-
-static int hostess_close(struct net_device *d)
-{
- struct z8530_dev *sv11 = dev_to_sv(d);
- /* Discard new frames
- */
- sv11->chanA.rx_function = z8530_null_rx;
-
- hdlc_close(d);
- netif_stop_queue(d);
-
- switch (dma) {
- case 0:
- z8530_sync_close(d, &sv11->chanA);
- break;
- case 1:
- z8530_sync_dma_close(d, &sv11->chanA);
- break;
- case 2:
- z8530_sync_txdma_close(d, &sv11->chanA);
- break;
- }
- return 0;
-}
-
-/* Passed network frames, fire them downwind.
- */
-
-static netdev_tx_t hostess_queue_xmit(struct sk_buff *skb,
- struct net_device *d)
-{
- return z8530_queue_xmit(&dev_to_sv(d)->chanA, skb);
-}
-
-static int hostess_attach(struct net_device *dev, unsigned short encoding,
- unsigned short parity)
-{
- if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT)
- return 0;
- return -EINVAL;
-}
-
-/* Description block for a Comtrol Hostess SV11 card
- */
-
-static const struct net_device_ops hostess_ops = {
- .ndo_open = hostess_open,
- .ndo_stop = hostess_close,
- .ndo_start_xmit = hdlc_start_xmit,
- .ndo_siocwandev = hdlc_ioctl,
-};
-
-static struct z8530_dev *sv11_init(int iobase, int irq)
-{
- struct z8530_dev *sv;
- struct net_device *netdev;
- /* Get the needed I/O space
- */
-
- if (!request_region(iobase, 8, "Comtrol SV11")) {
- pr_warn("I/O 0x%X already in use\n", iobase);
- return NULL;
- }
-
- sv = kzalloc(sizeof(struct z8530_dev), GFP_KERNEL);
- if (!sv)
- goto err_kzalloc;
-
- /* Stuff in the I/O addressing
- */
-
- sv->active = 0;
-
- sv->chanA.ctrlio = iobase + 1;
- sv->chanA.dataio = iobase + 3;
- sv->chanB.ctrlio = -1;
- sv->chanB.dataio = -1;
- sv->chanA.irqs = &z8530_nop;
- sv->chanB.irqs = &z8530_nop;
-
- outb(0, iobase + 4); /* DMA off */
-
- /* We want a fast IRQ for this device. Actually we'd like an even faster
- * IRQ ;) - This is one driver RtLinux is made for
- */
-
- if (request_irq(irq, z8530_interrupt, 0,
- "Hostess SV11", sv) < 0) {
- pr_warn("IRQ %d already in use\n", irq);
- goto err_irq;
- }
-
- sv->irq = irq;
- sv->chanA.private = sv;
- sv->chanA.dev = sv;
- sv->chanB.dev = sv;
-
- if (dma) {
- /* You can have DMA off or 1 and 3 thats the lot
- * on the Comtrol.
- */
- sv->chanA.txdma = 3;
- sv->chanA.rxdma = 1;
- outb(0x03 | 0x08, iobase + 4); /* DMA on */
- if (request_dma(sv->chanA.txdma, "Hostess SV/11 (TX)"))
- goto err_txdma;
-
- if (dma == 1)
- if (request_dma(sv->chanA.rxdma, "Hostess SV/11 (RX)"))
- goto err_rxdma;
- }
-
- /* Kill our private IRQ line the hostess can end up chattering
- * until the configuration is set
- */
- disable_irq(irq);
-
- /* Begin normal initialise
- */
-
- if (z8530_init(sv)) {
- pr_err("Z8530 series device not found\n");
- enable_irq(irq);
- goto free_dma;
- }
- z8530_channel_load(&sv->chanB, z8530_dead_port);
- if (sv->type == Z85C30)
- z8530_channel_load(&sv->chanA, z8530_hdlc_kilostream);
- else
- z8530_channel_load(&sv->chanA, z8530_hdlc_kilostream_85230);
-
- enable_irq(irq);
-
- /* Now we can take the IRQ
- */
-
- sv->chanA.netdevice = netdev = alloc_hdlcdev(sv);
- if (!netdev)
- goto free_dma;
-
- dev_to_hdlc(netdev)->attach = hostess_attach;
- dev_to_hdlc(netdev)->xmit = hostess_queue_xmit;
- netdev->netdev_ops = &hostess_ops;
- netdev->base_addr = iobase;
- netdev->irq = irq;
-
- if (register_hdlc_device(netdev)) {
- pr_err("unable to register HDLC device\n");
- free_netdev(netdev);
- goto free_dma;
- }
-
- z8530_describe(sv, "I/O", iobase);
- sv->active = 1;
- return sv;
-
-free_dma:
- if (dma == 1)
- free_dma(sv->chanA.rxdma);
-err_rxdma:
- if (dma)
- free_dma(sv->chanA.txdma);
-err_txdma:
- free_irq(irq, sv);
-err_irq:
- kfree(sv);
-err_kzalloc:
- release_region(iobase, 8);
- return NULL;
-}
-
-static void sv11_shutdown(struct z8530_dev *dev)
-{
- unregister_hdlc_device(dev->chanA.netdevice);
- z8530_shutdown(dev);
- free_irq(dev->irq, dev);
- if (dma) {
- if (dma == 1)
- free_dma(dev->chanA.rxdma);
- free_dma(dev->chanA.txdma);
- }
- release_region(dev->chanA.ctrlio - 1, 8);
- free_netdev(dev->chanA.netdevice);
- kfree(dev);
-}
-
-static int io = 0x200;
-static int irq = 9;
-
-module_param_hw(io, int, ioport, 0);
-MODULE_PARM_DESC(io, "The I/O base of the Comtrol Hostess SV11 card");
-module_param_hw(dma, int, dma, 0);
-MODULE_PARM_DESC(dma, "Set this to 1 to use DMA1/DMA3 for TX/RX");
-module_param_hw(irq, int, irq, 0);
-MODULE_PARM_DESC(irq, "The interrupt line setting for the Comtrol Hostess SV11 card");
-
-MODULE_AUTHOR("Alan Cox");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Modular driver for the Comtrol Hostess SV11");
-
-static struct z8530_dev *sv11_unit;
-
-static int sv11_module_init(void)
-{
- sv11_unit = sv11_init(io, irq);
- if (!sv11_unit)
- return -ENODEV;
- return 0;
-}
-module_init(sv11_module_init);
-
-static void sv11_module_cleanup(void)
-{
- if (sv11_unit)
- sv11_shutdown(sv11_unit);
-}
-module_exit(sv11_module_cleanup);
diff --git a/drivers/net/wan/lmc/Makefile b/drivers/net/wan/lmc/Makefile
deleted file mode 100644
index f00fe4491d69..000000000000
--- a/drivers/net/wan/lmc/Makefile
+++ /dev/null
@@ -1,18 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile for the Lan Media 21140 based WAN cards
-# Specifically the 1000,1200,5200,5245
-#
-
-obj-$(CONFIG_LANMEDIA) += lmc.o
-
-lmc-objs := lmc_debug.o lmc_media.o lmc_main.o lmc_proto.o
-
-# Like above except every packet gets echoed to KERN_DEBUG
-# in hex
-#
-# DBDEF = \
-# -DDEBUG \
-# -DLMC_PACKET_LOG
-
-ccflags-y := $(DBGDEF)
diff --git a/drivers/net/wan/lmc/lmc.h b/drivers/net/wan/lmc/lmc.h
deleted file mode 100644
index d7d59b4595f9..000000000000
--- a/drivers/net/wan/lmc/lmc.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LMC_H_
-#define _LMC_H_
-
-#include "lmc_var.h"
-
-/*
- * prototypes for everyone
- */
-int lmc_probe(struct net_device * dev);
-unsigned lmc_mii_readreg(lmc_softc_t * const sc, unsigned
- devaddr, unsigned regno);
-void lmc_mii_writereg(lmc_softc_t * const sc, unsigned devaddr,
- unsigned regno, unsigned data);
-void lmc_led_on(lmc_softc_t * const, u32);
-void lmc_led_off(lmc_softc_t * const, u32);
-unsigned lmc_mii_readreg(lmc_softc_t * const, unsigned, unsigned);
-void lmc_mii_writereg(lmc_softc_t * const, unsigned, unsigned, unsigned);
-void lmc_gpio_mkinput(lmc_softc_t * const sc, u32 bits);
-void lmc_gpio_mkoutput(lmc_softc_t * const sc, u32 bits);
-
-int lmc_ioctl(struct net_device *dev, struct if_settings *ifs);
-
-extern lmc_media_t lmc_ds3_media;
-extern lmc_media_t lmc_ssi_media;
-extern lmc_media_t lmc_t1_media;
-extern lmc_media_t lmc_hssi_media;
-
-#ifdef _DBG_EVENTLOG
-static void lmcEventLog(u32 EventNum, u32 arg2, u32 arg3);
-#endif
-
-#endif
diff --git a/drivers/net/wan/lmc/lmc_debug.c b/drivers/net/wan/lmc/lmc_debug.c
deleted file mode 100644
index 2b6051bda3fb..000000000000
--- a/drivers/net/wan/lmc/lmc_debug.c
+++ /dev/null
@@ -1,65 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/types.h>
-#include <linux/netdevice.h>
-#include <linux/interrupt.h>
-
-#include "lmc_debug.h"
-
-/*
- * Prints out len, max to 80 octets using printk, 20 per line
- */
-#ifdef DEBUG
-#ifdef LMC_PACKET_LOG
-void lmcConsoleLog(char *type, unsigned char *ucData, int iLen)
-{
- int iNewLine = 1;
- char str[80], *pstr;
-
- sprintf(str, KERN_DEBUG "lmc: %s: ", type);
- pstr = str+strlen(str);
-
- if(iLen > 240){
- printk(KERN_DEBUG "lmc: Printing 240 chars... out of: %d\n", iLen);
- iLen = 240;
- }
- else{
- printk(KERN_DEBUG "lmc: Printing %d chars\n", iLen);
- }
-
- while(iLen > 0)
- {
- sprintf(pstr, "%02x ", *ucData);
- pstr+=3;
- ucData++;
- if( !(iNewLine % 20))
- {
- sprintf(pstr, "\n");
- printk(str);
- sprintf(str, KERN_DEBUG "lmc: %s: ", type);
- pstr=str+strlen(str);
- }
- iNewLine++;
- iLen--;
- }
- sprintf(pstr, "\n");
- printk(str);
-}
-#endif
-#endif
-
-#ifdef DEBUG
-u32 lmcEventLogIndex;
-u32 lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS];
-
-void lmcEventLog(u32 EventNum, u32 arg2, u32 arg3)
-{
- lmcEventLogBuf[lmcEventLogIndex++] = EventNum;
- lmcEventLogBuf[lmcEventLogIndex++] = arg2;
- lmcEventLogBuf[lmcEventLogIndex++] = arg3;
- lmcEventLogBuf[lmcEventLogIndex++] = jiffies;
-
- lmcEventLogIndex &= (LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS) - 1;
-}
-#endif /* DEBUG */
-
-/* --------------------------- end if_lmc_linux.c ------------------------ */
diff --git a/drivers/net/wan/lmc/lmc_debug.h b/drivers/net/wan/lmc/lmc_debug.h
deleted file mode 100644
index cfae9eddf003..000000000000
--- a/drivers/net/wan/lmc/lmc_debug.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LMC_DEBUG_H_
-#define _LMC_DEBUG_H_
-
-#ifdef DEBUG
-#ifdef LMC_PACKET_LOG
-#define LMC_CONSOLE_LOG(x,y,z) lmcConsoleLog((x), (y), (z))
-#else
-#define LMC_CONSOLE_LOG(x,y,z)
-#endif
-#else
-#define LMC_CONSOLE_LOG(x,y,z)
-#endif
-
-
-
-/* Debug --- Event log definitions --- */
-/* EVENTLOGSIZE*EVENTLOGARGS needs to be a power of 2 */
-#define LMC_EVENTLOGSIZE 1024 /* number of events in eventlog */
-#define LMC_EVENTLOGARGS 4 /* number of args for each event */
-
-/* event indicators */
-#define LMC_EVENT_XMT 1
-#define LMC_EVENT_XMTEND 2
-#define LMC_EVENT_XMTINT 3
-#define LMC_EVENT_RCVINT 4
-#define LMC_EVENT_RCVEND 5
-#define LMC_EVENT_INT 6
-#define LMC_EVENT_XMTINTTMO 7
-#define LMC_EVENT_XMTPRCTMO 8
-#define LMC_EVENT_INTEND 9
-#define LMC_EVENT_RESET1 10
-#define LMC_EVENT_RESET2 11
-#define LMC_EVENT_FORCEDRESET 12
-#define LMC_EVENT_WATCHDOG 13
-#define LMC_EVENT_BADPKTSURGE 14
-#define LMC_EVENT_TBUSY0 15
-#define LMC_EVENT_TBUSY1 16
-
-
-#ifdef DEBUG
-extern u32 lmcEventLogIndex;
-extern u32 lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS];
-#define LMC_EVENT_LOG(x, y, z) lmcEventLog((x), (y), (z))
-#else
-#define LMC_EVENT_LOG(x,y,z)
-#endif /* end ifdef _DBG_EVENTLOG */
-
-void lmcConsoleLog(char *type, unsigned char *ucData, int iLen);
-void lmcEventLog(u32 EventNum, u32 arg2, u32 arg3);
-
-#endif
diff --git a/drivers/net/wan/lmc/lmc_ioctl.h b/drivers/net/wan/lmc/lmc_ioctl.h
deleted file mode 100644
index 8c65e2176e94..000000000000
--- a/drivers/net/wan/lmc/lmc_ioctl.h
+++ /dev/null
@@ -1,255 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-#ifndef _LMC_IOCTL_H_
-#define _LMC_IOCTL_H_
-/* $Id: lmc_ioctl.h,v 1.15 2000/04/06 12:16:43 asj Exp $ */
-
- /*
- * Copyright (c) 1997-2000 LAN Media Corporation (LMC)
- * All rights reserved. www.lanmedia.com
- *
- * This code is written by:
- * Andrew Stanley-Jones (asj@cban.com)
- * Rob Braun (bbraun@vix.com),
- * Michael Graff (explorer@vix.com) and
- * Matt Thomas (matt@3am-software.com).
- */
-
-#define LMCIOCGINFO SIOCDEVPRIVATE+3 /* get current state */
-#define LMCIOCSINFO SIOCDEVPRIVATE+4 /* set state to user values */
-#define LMCIOCGETLMCSTATS SIOCDEVPRIVATE+5
-#define LMCIOCCLEARLMCSTATS SIOCDEVPRIVATE+6
-#define LMCIOCDUMPEVENTLOG SIOCDEVPRIVATE+7
-#define LMCIOCGETXINFO SIOCDEVPRIVATE+8
-#define LMCIOCSETCIRCUIT SIOCDEVPRIVATE+9
-#define LMCIOCUNUSEDATM SIOCDEVPRIVATE+10
-#define LMCIOCRESET SIOCDEVPRIVATE+11
-#define LMCIOCT1CONTROL SIOCDEVPRIVATE+12
-#define LMCIOCIFTYPE SIOCDEVPRIVATE+13
-#define LMCIOCXILINX SIOCDEVPRIVATE+14
-
-#define LMC_CARDTYPE_UNKNOWN -1
-#define LMC_CARDTYPE_HSSI 1 /* probed card is a HSSI card */
-#define LMC_CARDTYPE_DS3 2 /* probed card is a DS3 card */
-#define LMC_CARDTYPE_SSI 3 /* probed card is a SSI card */
-#define LMC_CARDTYPE_T1 4 /* probed card is a T1 card */
-
-#define LMC_CTL_CARDTYPE_LMC5200 0 /* HSSI */
-#define LMC_CTL_CARDTYPE_LMC5245 1 /* DS3 */
-#define LMC_CTL_CARDTYPE_LMC1000 2 /* SSI, V.35 */
-#define LMC_CTL_CARDTYPE_LMC1200 3 /* DS1 */
-
-#define LMC_CTL_OFF 0 /* generic OFF value */
-#define LMC_CTL_ON 1 /* generic ON value */
-
-#define LMC_CTL_CLOCK_SOURCE_EXT 0 /* clock off line */
-#define LMC_CTL_CLOCK_SOURCE_INT 1 /* internal clock */
-
-#define LMC_CTL_CRC_LENGTH_16 16
-#define LMC_CTL_CRC_LENGTH_32 32
-#define LMC_CTL_CRC_BYTESIZE_2 2
-#define LMC_CTL_CRC_BYTESIZE_4 4
-
-
-#define LMC_CTL_CABLE_LENGTH_LT_100FT 0 /* DS3 cable < 100 feet */
-#define LMC_CTL_CABLE_LENGTH_GT_100FT 1 /* DS3 cable >= 100 feet */
-
-#define LMC_CTL_CIRCUIT_TYPE_E1 0
-#define LMC_CTL_CIRCUIT_TYPE_T1 1
-
-/*
- * IFTYPE defines
- */
-#define LMC_PPP 1 /* use generic HDLC interface */
-#define LMC_NET 2 /* use direct net interface */
-#define LMC_RAW 3 /* use direct net interface */
-
-/*
- * These are not in the least IOCTL related, but I want them common.
- */
-/*
- * assignments for the GPIO register on the DEC chip (common)
- */
-#define LMC_GEP_INIT 0x01 /* 0: */
-#define LMC_GEP_RESET 0x02 /* 1: */
-#define LMC_GEP_MODE 0x10 /* 4: */
-#define LMC_GEP_DP 0x20 /* 5: */
-#define LMC_GEP_DATA 0x40 /* 6: serial out */
-#define LMC_GEP_CLK 0x80 /* 7: serial clock */
-
-/*
- * HSSI GPIO assignments
- */
-#define LMC_GEP_HSSI_ST 0x04 /* 2: receive timing sense (deprecated) */
-#define LMC_GEP_HSSI_CLOCK 0x08 /* 3: clock source */
-
-/*
- * T1 GPIO assignments
- */
-#define LMC_GEP_SSI_GENERATOR 0x04 /* 2: enable prog freq gen serial i/f */
-#define LMC_GEP_SSI_TXCLOCK 0x08 /* 3: provide clock on TXCLOCK output */
-
-/*
- * Common MII16 bits
- */
-#define LMC_MII16_LED0 0x0080
-#define LMC_MII16_LED1 0x0100
-#define LMC_MII16_LED2 0x0200
-#define LMC_MII16_LED3 0x0400 /* Error, and the red one */
-#define LMC_MII16_LED_ALL 0x0780 /* LED bit mask */
-#define LMC_MII16_FIFO_RESET 0x0800
-
-/*
- * definitions for HSSI
- */
-#define LMC_MII16_HSSI_TA 0x0001
-#define LMC_MII16_HSSI_CA 0x0002
-#define LMC_MII16_HSSI_LA 0x0004
-#define LMC_MII16_HSSI_LB 0x0008
-#define LMC_MII16_HSSI_LC 0x0010
-#define LMC_MII16_HSSI_TM 0x0020
-#define LMC_MII16_HSSI_CRC 0x0040
-
-/*
- * assignments for the MII register 16 (DS3)
- */
-#define LMC_MII16_DS3_ZERO 0x0001
-#define LMC_MII16_DS3_TRLBK 0x0002
-#define LMC_MII16_DS3_LNLBK 0x0004
-#define LMC_MII16_DS3_RAIS 0x0008
-#define LMC_MII16_DS3_TAIS 0x0010
-#define LMC_MII16_DS3_BIST 0x0020
-#define LMC_MII16_DS3_DLOS 0x0040
-#define LMC_MII16_DS3_CRC 0x1000
-#define LMC_MII16_DS3_SCRAM 0x2000
-#define LMC_MII16_DS3_SCRAM_LARS 0x4000
-
-/* Note: 2 pairs of LEDs where swapped by mistake
- * in Xilinx code for DS3 & DS1 adapters */
-#define LMC_DS3_LED0 0x0100 /* bit 08 yellow */
-#define LMC_DS3_LED1 0x0080 /* bit 07 blue */
-#define LMC_DS3_LED2 0x0400 /* bit 10 green */
-#define LMC_DS3_LED3 0x0200 /* bit 09 red */
-
-/*
- * framer register 0 and 7 (7 is latched and reset on read)
- */
-#define LMC_FRAMER_REG0_DLOS 0x80 /* digital loss of service */
-#define LMC_FRAMER_REG0_OOFS 0x40 /* out of frame sync */
-#define LMC_FRAMER_REG0_AIS 0x20 /* alarm indication signal */
-#define LMC_FRAMER_REG0_CIS 0x10 /* channel idle */
-#define LMC_FRAMER_REG0_LOC 0x08 /* loss of clock */
-
-/*
- * Framer register 9 contains the blue alarm signal
- */
-#define LMC_FRAMER_REG9_RBLUE 0x02 /* Blue alarm failure */
-
-/*
- * Framer register 0x10 contains xbit error
- */
-#define LMC_FRAMER_REG10_XBIT 0x01 /* X bit error alarm failure */
-
-/*
- * And SSI, LMC1000
- */
-#define LMC_MII16_SSI_DTR 0x0001 /* DTR output RW */
-#define LMC_MII16_SSI_DSR 0x0002 /* DSR input RO */
-#define LMC_MII16_SSI_RTS 0x0004 /* RTS output RW */
-#define LMC_MII16_SSI_CTS 0x0008 /* CTS input RO */
-#define LMC_MII16_SSI_DCD 0x0010 /* DCD input RO */
-#define LMC_MII16_SSI_RI 0x0020 /* RI input RO */
-#define LMC_MII16_SSI_CRC 0x1000 /* CRC select - RW */
-
-/*
- * bits 0x0080 through 0x0800 are generic, and described
- * above with LMC_MII16_LED[0123] _LED_ALL, and _FIFO_RESET
- */
-#define LMC_MII16_SSI_LL 0x1000 /* LL output RW */
-#define LMC_MII16_SSI_RL 0x2000 /* RL output RW */
-#define LMC_MII16_SSI_TM 0x4000 /* TM input RO */
-#define LMC_MII16_SSI_LOOP 0x8000 /* loopback enable RW */
-
-/*
- * Some of the MII16 bits are mirrored in the MII17 register as well,
- * but let's keep thing separate for now, and get only the cable from
- * the MII17.
- */
-#define LMC_MII17_SSI_CABLE_MASK 0x0038 /* mask to extract the cable type */
-#define LMC_MII17_SSI_CABLE_SHIFT 3 /* shift to extract the cable type */
-
-/*
- * And T1, LMC1200
- */
-#define LMC_MII16_T1_UNUSED1 0x0003
-#define LMC_MII16_T1_XOE 0x0004
-#define LMC_MII16_T1_RST 0x0008 /* T1 chip reset - RW */
-#define LMC_MII16_T1_Z 0x0010 /* output impedance T1=1, E1=0 output - RW */
-#define LMC_MII16_T1_INTR 0x0020 /* interrupt from 8370 - RO */
-#define LMC_MII16_T1_ONESEC 0x0040 /* one second square wave - ro */
-
-#define LMC_MII16_T1_LED0 0x0100
-#define LMC_MII16_T1_LED1 0x0080
-#define LMC_MII16_T1_LED2 0x0400
-#define LMC_MII16_T1_LED3 0x0200
-#define LMC_MII16_T1_FIFO_RESET 0x0800
-
-#define LMC_MII16_T1_CRC 0x1000 /* CRC select - RW */
-#define LMC_MII16_T1_UNUSED2 0xe000
-
-
-/* 8370 framer registers */
-
-#define T1FRAMER_ALARM1_STATUS 0x47
-#define T1FRAMER_ALARM2_STATUS 0x48
-#define T1FRAMER_FERR_LSB 0x50
-#define T1FRAMER_FERR_MSB 0x51 /* framing bit error counter */
-#define T1FRAMER_LCV_LSB 0x54
-#define T1FRAMER_LCV_MSB 0x55 /* line code violation counter */
-#define T1FRAMER_AERR 0x5A
-
-/* mask for the above AERR register */
-#define T1FRAMER_LOF_MASK (0x0f0) /* receive loss of frame */
-#define T1FRAMER_COFA_MASK (0x0c0) /* change of frame alignment */
-#define T1FRAMER_SEF_MASK (0x03) /* severely errored frame */
-
-/* 8370 framer register ALM1 (0x47) values
- * used to determine link status
- */
-
-#define T1F_SIGFRZ 0x01 /* signaling freeze */
-#define T1F_RLOF 0x02 /* receive loss of frame alignment */
-#define T1F_RLOS 0x04 /* receive loss of signal */
-#define T1F_RALOS 0x08 /* receive analog loss of signal or RCKI loss of clock */
-#define T1F_RAIS 0x10 /* receive alarm indication signal */
-#define T1F_UNUSED 0x20
-#define T1F_RYEL 0x40 /* receive yellow alarm */
-#define T1F_RMYEL 0x80 /* receive multiframe yellow alarm */
-
-#define LMC_T1F_WRITE 0
-#define LMC_T1F_READ 1
-
-typedef struct lmc_st1f_control {
- int command;
- int address;
- int value;
- char __user *data;
-} lmc_t1f_control;
-
-enum lmc_xilinx_c {
- lmc_xilinx_reset = 1,
- lmc_xilinx_load_prom = 2,
- lmc_xilinx_load = 3
-};
-
-struct lmc_xilinx_control {
- enum lmc_xilinx_c command;
- int len;
- char __user *data;
-};
-
-/* ------------------ end T1 defs ------------------- */
-
-#define LMC_MII_LedMask 0x0780
-#define LMC_MII_LedBitPos 7
-
-#endif
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
deleted file mode 100644
index 76c6b4f89890..000000000000
--- a/drivers/net/wan/lmc/lmc_main.c
+++ /dev/null
@@ -1,2009 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
- /*
- * Copyright (c) 1997-2000 LAN Media Corporation (LMC)
- * All rights reserved. www.lanmedia.com
- * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>
- *
- * This code is written by:
- * Andrew Stanley-Jones (asj@cban.com)
- * Rob Braun (bbraun@vix.com),
- * Michael Graff (explorer@vix.com) and
- * Matt Thomas (matt@3am-software.com).
- *
- * With Help By:
- * David Boggs
- * Ron Crane
- * Alan Cox
- *
- * Driver for the LanMedia LMC5200, LMC5245, LMC1000, LMC1200 cards.
- *
- * To control link specific options lmcctl is required.
- * It can be obtained from ftp.lanmedia.com.
- *
- * Linux driver notes:
- * Linux uses the device struct lmc_private to pass private information
- * around.
- *
- * The initialization portion of this driver (the lmc_reset() and the
- * lmc_dec_reset() functions, as well as the led controls and the
- * lmc_initcsrs() functions.
- *
- * The watchdog function runs every second and checks to see if
- * we still have link, and that the timing source is what we expected
- * it to be. If link is lost, the interface is marked down, and
- * we no longer can transmit.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/ptrace.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/hdlc.h>
-#include <linux/in.h>
-#include <linux/if_arp.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/inet.h>
-#include <linux/bitops.h>
-#include <asm/processor.h> /* Processor type for cache alignment. */
-#include <asm/io.h>
-#include <asm/dma.h>
-#include <linux/uaccess.h>
-#include <linux/jiffies.h>
-//#include <asm/spinlock.h>
-
-#define DRIVER_MAJOR_VERSION 1
-#define DRIVER_MINOR_VERSION 34
-#define DRIVER_SUB_VERSION 0
-
-#define DRIVER_VERSION ((DRIVER_MAJOR_VERSION << 8) + DRIVER_MINOR_VERSION)
-
-#include "lmc.h"
-#include "lmc_var.h"
-#include "lmc_ioctl.h"
-#include "lmc_debug.h"
-#include "lmc_proto.h"
-
-static int LMC_PKT_BUF_SZ = 1542;
-
-static const struct pci_device_id lmc_pci_tbl[] = {
- { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
- PCI_VENDOR_ID_LMC, PCI_ANY_ID },
- { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
- PCI_ANY_ID, PCI_VENDOR_ID_LMC },
- { 0 }
-};
-
-MODULE_DEVICE_TABLE(pci, lmc_pci_tbl);
-MODULE_LICENSE("GPL v2");
-
-
-static netdev_tx_t lmc_start_xmit(struct sk_buff *skb,
- struct net_device *dev);
-static int lmc_rx (struct net_device *dev);
-static int lmc_open(struct net_device *dev);
-static int lmc_close(struct net_device *dev);
-static struct net_device_stats *lmc_get_stats(struct net_device *dev);
-static irqreturn_t lmc_interrupt(int irq, void *dev_instance);
-static void lmc_initcsrs(lmc_softc_t * const sc, lmc_csrptr_t csr_base, size_t csr_size);
-static void lmc_softreset(lmc_softc_t * const);
-static void lmc_running_reset(struct net_device *dev);
-static int lmc_ifdown(struct net_device * const);
-static void lmc_watchdog(struct timer_list *t);
-static void lmc_reset(lmc_softc_t * const sc);
-static void lmc_dec_reset(lmc_softc_t * const sc);
-static void lmc_driver_timeout(struct net_device *dev, unsigned int txqueue);
-
-/*
- * linux reserves 16 device specific IOCTLs. We call them
- * LMCIOC* to control various bits of our world.
- */
-static int lmc_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
- void __user *data, int cmd) /*fold00*/
-{
- lmc_softc_t *sc = dev_to_sc(dev);
- lmc_ctl_t ctl;
- int ret = -EOPNOTSUPP;
- u16 regVal;
- unsigned long flags;
-
- /*
- * Most functions mess with the structure
- * Disable interrupts while we do the polling
- */
-
- switch (cmd) {
- /*
- * Return current driver state. Since we keep this up
- * To date internally, just copy this out to the user.
- */
- case LMCIOCGINFO: /*fold01*/
- if (copy_to_user(data, &sc->ictl, sizeof(lmc_ctl_t)))
- ret = -EFAULT;
- else
- ret = 0;
- break;
-
- case LMCIOCSINFO: /*fold01*/
- if (!capable(CAP_NET_ADMIN)) {
- ret = -EPERM;
- break;
- }
-
- if(dev->flags & IFF_UP){
- ret = -EBUSY;
- break;
- }
-
- if (copy_from_user(&ctl, data, sizeof(lmc_ctl_t))) {
- ret = -EFAULT;
- break;
- }
-
- spin_lock_irqsave(&sc->lmc_lock, flags);
- sc->lmc_media->set_status (sc, &ctl);
-
- if(ctl.crc_length != sc->ictl.crc_length) {
- sc->lmc_media->set_crc_length(sc, ctl.crc_length);
- if (sc->ictl.crc_length == LMC_CTL_CRC_LENGTH_16)
- sc->TxDescriptControlInit |= LMC_TDES_ADD_CRC_DISABLE;
- else
- sc->TxDescriptControlInit &= ~LMC_TDES_ADD_CRC_DISABLE;
- }
- spin_unlock_irqrestore(&sc->lmc_lock, flags);
-
- ret = 0;
- break;
-
- case LMCIOCIFTYPE: /*fold01*/
- {
- u16 old_type = sc->if_type;
- u16 new_type;
-
- if (!capable(CAP_NET_ADMIN)) {
- ret = -EPERM;
- break;
- }
-
- if (copy_from_user(&new_type, data, sizeof(u16))) {
- ret = -EFAULT;
- break;
- }
-
-
- if (new_type == old_type)
- {
- ret = 0 ;
- break; /* no change */
- }
-
- spin_lock_irqsave(&sc->lmc_lock, flags);
- lmc_proto_close(sc);
-
- sc->if_type = new_type;
- lmc_proto_attach(sc);
- ret = lmc_proto_open(sc);
- spin_unlock_irqrestore(&sc->lmc_lock, flags);
- break;
- }
-
- case LMCIOCGETXINFO: /*fold01*/
- spin_lock_irqsave(&sc->lmc_lock, flags);
- sc->lmc_xinfo.Magic0 = 0xBEEFCAFE;
-
- sc->lmc_xinfo.PciCardType = sc->lmc_cardtype;
- sc->lmc_xinfo.PciSlotNumber = 0;
- sc->lmc_xinfo.DriverMajorVersion = DRIVER_MAJOR_VERSION;
- sc->lmc_xinfo.DriverMinorVersion = DRIVER_MINOR_VERSION;
- sc->lmc_xinfo.DriverSubVersion = DRIVER_SUB_VERSION;
- sc->lmc_xinfo.XilinxRevisionNumber =
- lmc_mii_readreg (sc, 0, 3) & 0xf;
- sc->lmc_xinfo.MaxFrameSize = LMC_PKT_BUF_SZ;
- sc->lmc_xinfo.link_status = sc->lmc_media->get_link_status (sc);
- sc->lmc_xinfo.mii_reg16 = lmc_mii_readreg (sc, 0, 16);
- spin_unlock_irqrestore(&sc->lmc_lock, flags);
-
- sc->lmc_xinfo.Magic1 = 0xDEADBEEF;
-
- if (copy_to_user(data, &sc->lmc_xinfo, sizeof(struct lmc_xinfo)))
- ret = -EFAULT;
- else
- ret = 0;
-
- break;
-
- case LMCIOCGETLMCSTATS:
- spin_lock_irqsave(&sc->lmc_lock, flags);
- if (sc->lmc_cardtype == LMC_CARDTYPE_T1) {
- lmc_mii_writereg(sc, 0, 17, T1FRAMER_FERR_LSB);
- sc->extra_stats.framingBitErrorCount +=
- lmc_mii_readreg(sc, 0, 18) & 0xff;
- lmc_mii_writereg(sc, 0, 17, T1FRAMER_FERR_MSB);
- sc->extra_stats.framingBitErrorCount +=
- (lmc_mii_readreg(sc, 0, 18) & 0xff) << 8;
- lmc_mii_writereg(sc, 0, 17, T1FRAMER_LCV_LSB);
- sc->extra_stats.lineCodeViolationCount +=
- lmc_mii_readreg(sc, 0, 18) & 0xff;
- lmc_mii_writereg(sc, 0, 17, T1FRAMER_LCV_MSB);
- sc->extra_stats.lineCodeViolationCount +=
- (lmc_mii_readreg(sc, 0, 18) & 0xff) << 8;
- lmc_mii_writereg(sc, 0, 17, T1FRAMER_AERR);
- regVal = lmc_mii_readreg(sc, 0, 18) & 0xff;
-
- sc->extra_stats.lossOfFrameCount +=
- (regVal & T1FRAMER_LOF_MASK) >> 4;
- sc->extra_stats.changeOfFrameAlignmentCount +=
- (regVal & T1FRAMER_COFA_MASK) >> 2;
- sc->extra_stats.severelyErroredFrameCount +=
- regVal & T1FRAMER_SEF_MASK;
- }
- spin_unlock_irqrestore(&sc->lmc_lock, flags);
- if (copy_to_user(data, &sc->lmc_device->stats,
- sizeof(sc->lmc_device->stats)) ||
- copy_to_user(data + sizeof(sc->lmc_device->stats),
- &sc->extra_stats, sizeof(sc->extra_stats)))
- ret = -EFAULT;
- else
- ret = 0;
- break;
-
- case LMCIOCCLEARLMCSTATS:
- if (!capable(CAP_NET_ADMIN)) {
- ret = -EPERM;
- break;
- }
-
- spin_lock_irqsave(&sc->lmc_lock, flags);
- memset(&sc->lmc_device->stats, 0, sizeof(sc->lmc_device->stats));
- memset(&sc->extra_stats, 0, sizeof(sc->extra_stats));
- sc->extra_stats.check = STATCHECK;
- sc->extra_stats.version_size = (DRIVER_VERSION << 16) +
- sizeof(sc->lmc_device->stats) + sizeof(sc->extra_stats);
- sc->extra_stats.lmc_cardtype = sc->lmc_cardtype;
- spin_unlock_irqrestore(&sc->lmc_lock, flags);
- ret = 0;
- break;
-
- case LMCIOCSETCIRCUIT: /*fold01*/
- if (!capable(CAP_NET_ADMIN)){
- ret = -EPERM;
- break;
- }
-
- if(dev->flags & IFF_UP){
- ret = -EBUSY;
- break;
- }
-
- if (copy_from_user(&ctl, data, sizeof(lmc_ctl_t))) {
- ret = -EFAULT;
- break;
- }
- spin_lock_irqsave(&sc->lmc_lock, flags);
- sc->lmc_media->set_circuit_type(sc, ctl.circuit_type);
- sc->ictl.circuit_type = ctl.circuit_type;
- spin_unlock_irqrestore(&sc->lmc_lock, flags);
- ret = 0;
-
- break;
-
- case LMCIOCRESET: /*fold01*/
- if (!capable(CAP_NET_ADMIN)){
- ret = -EPERM;
- break;
- }
-
- spin_lock_irqsave(&sc->lmc_lock, flags);
- /* Reset driver and bring back to current state */
- printk (" REG16 before reset +%04x\n", lmc_mii_readreg (sc, 0, 16));
- lmc_running_reset (dev);
- printk (" REG16 after reset +%04x\n", lmc_mii_readreg (sc, 0, 16));
-
- LMC_EVENT_LOG(LMC_EVENT_FORCEDRESET, LMC_CSR_READ (sc, csr_status), lmc_mii_readreg (sc, 0, 16));
- spin_unlock_irqrestore(&sc->lmc_lock, flags);
-
- ret = 0;
- break;
-
-#ifdef DEBUG
- case LMCIOCDUMPEVENTLOG:
- if (copy_to_user(data, &lmcEventLogIndex, sizeof(u32))) {
- ret = -EFAULT;
- break;
- }
- if (copy_to_user(data + sizeof(u32), lmcEventLogBuf,
- sizeof(lmcEventLogBuf)))
- ret = -EFAULT;
- else
- ret = 0;
-
- break;
-#endif /* end ifdef _DBG_EVENTLOG */
- case LMCIOCT1CONTROL: /*fold01*/
- if (sc->lmc_cardtype != LMC_CARDTYPE_T1){
- ret = -EOPNOTSUPP;
- break;
- }
- break;
- case LMCIOCXILINX: /*fold01*/
- {
- struct lmc_xilinx_control xc; /*fold02*/
-
- if (!capable(CAP_NET_ADMIN)){
- ret = -EPERM;
- break;
- }
-
- /*
- * Stop the xwitter whlie we restart the hardware
- */
- netif_stop_queue(dev);
-
- if (copy_from_user(&xc, data, sizeof(struct lmc_xilinx_control))) {
- ret = -EFAULT;
- break;
- }
- switch(xc.command){
- case lmc_xilinx_reset: /*fold02*/
- {
- spin_lock_irqsave(&sc->lmc_lock, flags);
- lmc_mii_readreg (sc, 0, 16);
-
- /*
- * Make all of them 0 and make input
- */
- lmc_gpio_mkinput(sc, 0xff);
-
- /*
- * make the reset output
- */
- lmc_gpio_mkoutput(sc, LMC_GEP_RESET);
-
- /*
- * RESET low to force configuration. This also forces
- * the transmitter clock to be internal, but we expect to reset
- * that later anyway.
- */
-
- sc->lmc_gpio &= ~LMC_GEP_RESET;
- LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
-
-
- /*
- * hold for more than 10 microseconds
- */
- udelay(50);
-
- sc->lmc_gpio |= LMC_GEP_RESET;
- LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
-
-
- /*
- * stop driving Xilinx-related signals
- */
- lmc_gpio_mkinput(sc, 0xff);
-
- /* Reset the frammer hardware */
- sc->lmc_media->set_link_status (sc, 1);
- sc->lmc_media->set_status (sc, NULL);
-// lmc_softreset(sc);
-
- {
- int i;
- for(i = 0; i < 5; i++){
- lmc_led_on(sc, LMC_DS3_LED0);
- mdelay(100);
- lmc_led_off(sc, LMC_DS3_LED0);
- lmc_led_on(sc, LMC_DS3_LED1);
- mdelay(100);
- lmc_led_off(sc, LMC_DS3_LED1);
- lmc_led_on(sc, LMC_DS3_LED3);
- mdelay(100);
- lmc_led_off(sc, LMC_DS3_LED3);
- lmc_led_on(sc, LMC_DS3_LED2);
- mdelay(100);
- lmc_led_off(sc, LMC_DS3_LED2);
- }
- }
- spin_unlock_irqrestore(&sc->lmc_lock, flags);
-
-
-
- ret = 0x0;
-
- }
-
- break;
- case lmc_xilinx_load_prom: /*fold02*/
- {
- int timeout = 500000;
- spin_lock_irqsave(&sc->lmc_lock, flags);
- lmc_mii_readreg (sc, 0, 16);
-
- /*
- * Make all of them 0 and make input
- */
- lmc_gpio_mkinput(sc, 0xff);
-
- /*
- * make the reset output
- */
- lmc_gpio_mkoutput(sc, LMC_GEP_DP | LMC_GEP_RESET);
-
- /*
- * RESET low to force configuration. This also forces
- * the transmitter clock to be internal, but we expect to reset
- * that later anyway.
- */
-
- sc->lmc_gpio &= ~(LMC_GEP_RESET | LMC_GEP_DP);
- LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
-
-
- /*
- * hold for more than 10 microseconds
- */
- udelay(50);
-
- sc->lmc_gpio |= LMC_GEP_DP | LMC_GEP_RESET;
- LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
-
- /*
- * busy wait for the chip to reset
- */
- while( (LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0 &&
- (timeout-- > 0))
- cpu_relax();
-
-
- /*
- * stop driving Xilinx-related signals
- */
- lmc_gpio_mkinput(sc, 0xff);
- spin_unlock_irqrestore(&sc->lmc_lock, flags);
-
- ret = 0x0;
-
-
- break;
-
- }
-
- case lmc_xilinx_load: /*fold02*/
- {
- char *data;
- int pos;
- int timeout = 500000;
-
- if (!xc.data) {
- ret = -EINVAL;
- break;
- }
-
- data = memdup_user(xc.data, xc.len);
- if (IS_ERR(data)) {
- ret = PTR_ERR(data);
- break;
- }
-
- printk("%s: Starting load of data Len: %d at 0x%p == 0x%p\n", dev->name, xc.len, xc.data, data);
-
- spin_lock_irqsave(&sc->lmc_lock, flags);
- lmc_gpio_mkinput(sc, 0xff);
-
- /*
- * Clear the Xilinx and start prgramming from the DEC
- */
-
- /*
- * Set ouput as:
- * Reset: 0 (active)
- * DP: 0 (active)
- * Mode: 1
- *
- */
- sc->lmc_gpio = 0x00;
- sc->lmc_gpio &= ~LMC_GEP_DP;
- sc->lmc_gpio &= ~LMC_GEP_RESET;
- sc->lmc_gpio |= LMC_GEP_MODE;
- LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
-
- lmc_gpio_mkoutput(sc, LMC_GEP_MODE | LMC_GEP_DP | LMC_GEP_RESET);
-
- /*
- * Wait at least 10 us 20 to be safe
- */
- udelay(50);
-
- /*
- * Clear reset and activate programming lines
- * Reset: Input
- * DP: Input
- * Clock: Output
- * Data: Output
- * Mode: Output
- */
- lmc_gpio_mkinput(sc, LMC_GEP_DP | LMC_GEP_RESET);
-
- /*
- * Set LOAD, DATA, Clock to 1
- */
- sc->lmc_gpio = 0x00;
- sc->lmc_gpio |= LMC_GEP_MODE;
- sc->lmc_gpio |= LMC_GEP_DATA;
- sc->lmc_gpio |= LMC_GEP_CLK;
- LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
-
- lmc_gpio_mkoutput(sc, LMC_GEP_DATA | LMC_GEP_CLK | LMC_GEP_MODE );
-
- /*
- * busy wait for the chip to reset
- */
- while( (LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0 &&
- (timeout-- > 0))
- cpu_relax();
-
- printk(KERN_DEBUG "%s: Waited %d for the Xilinx to clear its memory\n", dev->name, 500000-timeout);
-
- for(pos = 0; pos < xc.len; pos++){
- switch(data[pos]){
- case 0:
- sc->lmc_gpio &= ~LMC_GEP_DATA; /* Data is 0 */
- break;
- case 1:
- sc->lmc_gpio |= LMC_GEP_DATA; /* Data is 1 */
- break;
- default:
- printk(KERN_WARNING "%s Bad data in xilinx programming data at %d, got %d wanted 0 or 1\n", dev->name, pos, data[pos]);
- sc->lmc_gpio |= LMC_GEP_DATA; /* Assume it's 1 */
- }
- sc->lmc_gpio &= ~LMC_GEP_CLK; /* Clock to zero */
- sc->lmc_gpio |= LMC_GEP_MODE;
- LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
- udelay(1);
-
- sc->lmc_gpio |= LMC_GEP_CLK; /* Put the clack back to one */
- sc->lmc_gpio |= LMC_GEP_MODE;
- LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
- udelay(1);
- }
- if((LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0){
- printk(KERN_WARNING "%s: Reprogramming FAILED. Needs to be reprogrammed. (corrupted data)\n", dev->name);
- }
- else if((LMC_CSR_READ(sc, csr_gp) & LMC_GEP_DP) == 0){
- printk(KERN_WARNING "%s: Reprogramming FAILED. Needs to be reprogrammed. (done)\n", dev->name);
- }
- else {
- printk(KERN_DEBUG "%s: Done reprogramming Xilinx, %d bits, good luck!\n", dev->name, pos);
- }
-
- lmc_gpio_mkinput(sc, 0xff);
-
- sc->lmc_miireg16 |= LMC_MII16_FIFO_RESET;
- lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
-
- sc->lmc_miireg16 &= ~LMC_MII16_FIFO_RESET;
- lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
- spin_unlock_irqrestore(&sc->lmc_lock, flags);
-
- kfree(data);
-
- ret = 0;
-
- break;
- }
- default: /*fold02*/
- ret = -EBADE;
- break;
- }
-
- netif_wake_queue(dev);
- sc->lmc_txfull = 0;
-
- }
- break;
- default:
- break;
- }
-
- return ret;
-}
-
-
-/* the watchdog process that cruises around */
-static void lmc_watchdog(struct timer_list *t) /*fold00*/
-{
- lmc_softc_t *sc = from_timer(sc, t, timer);
- struct net_device *dev = sc->lmc_device;
- int link_status;
- u32 ticks;
- unsigned long flags;
-
- spin_lock_irqsave(&sc->lmc_lock, flags);
-
- if(sc->check != 0xBEAFCAFE){
- printk("LMC: Corrupt net_device struct, breaking out\n");
- spin_unlock_irqrestore(&sc->lmc_lock, flags);
- return;
- }
-
-
- /* Make sure the tx jabber and rx watchdog are off,
- * and the transmit and receive processes are running.
- */
-
- LMC_CSR_WRITE (sc, csr_15, 0x00000011);
- sc->lmc_cmdmode |= TULIP_CMD_TXRUN | TULIP_CMD_RXRUN;
- LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode);
-
- if (sc->lmc_ok == 0)
- goto kick_timer;
-
- LMC_EVENT_LOG(LMC_EVENT_WATCHDOG, LMC_CSR_READ (sc, csr_status), lmc_mii_readreg (sc, 0, 16));
-
- /* --- begin time out check -----------------------------------
- * check for a transmit interrupt timeout
- * Has the packet xmt vs xmt serviced threshold been exceeded */
- if (sc->lmc_taint_tx == sc->lastlmc_taint_tx &&
- sc->lmc_device->stats.tx_packets > sc->lasttx_packets &&
- sc->tx_TimeoutInd == 0)
- {
-
- /* wait for the watchdog to come around again */
- sc->tx_TimeoutInd = 1;
- }
- else if (sc->lmc_taint_tx == sc->lastlmc_taint_tx &&
- sc->lmc_device->stats.tx_packets > sc->lasttx_packets &&
- sc->tx_TimeoutInd)
- {
-
- LMC_EVENT_LOG(LMC_EVENT_XMTINTTMO, LMC_CSR_READ (sc, csr_status), 0);
-
- sc->tx_TimeoutDisplay = 1;
- sc->extra_stats.tx_TimeoutCnt++;
-
- /* DEC chip is stuck, hit it with a RESET!!!! */
- lmc_running_reset (dev);
-
-
- /* look at receive & transmit process state to make sure they are running */
- LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0);
-
- /* look at: DSR - 02 for Reg 16
- * CTS - 08
- * DCD - 10
- * RI - 20
- * for Reg 17
- */
- LMC_EVENT_LOG(LMC_EVENT_RESET2, lmc_mii_readreg (sc, 0, 16), lmc_mii_readreg (sc, 0, 17));
-
- /* reset the transmit timeout detection flag */
- sc->tx_TimeoutInd = 0;
- sc->lastlmc_taint_tx = sc->lmc_taint_tx;
- sc->lasttx_packets = sc->lmc_device->stats.tx_packets;
- } else {
- sc->tx_TimeoutInd = 0;
- sc->lastlmc_taint_tx = sc->lmc_taint_tx;
- sc->lasttx_packets = sc->lmc_device->stats.tx_packets;
- }
-
- /* --- end time out check ----------------------------------- */
-
-
- link_status = sc->lmc_media->get_link_status (sc);
-
- /*
- * hardware level link lost, but the interface is marked as up.
- * Mark it as down.
- */
- if ((link_status == 0) && (sc->last_link_status != 0)) {
- printk(KERN_WARNING "%s: hardware/physical link down\n", dev->name);
- sc->last_link_status = 0;
- /* lmc_reset (sc); Why reset??? The link can go down ok */
-
- /* Inform the world that link has been lost */
- netif_carrier_off(dev);
- }
-
- /*
- * hardware link is up, but the interface is marked as down.
- * Bring it back up again.
- */
- if (link_status != 0 && sc->last_link_status == 0) {
- printk(KERN_WARNING "%s: hardware/physical link up\n", dev->name);
- sc->last_link_status = 1;
- /* lmc_reset (sc); Again why reset??? */
-
- netif_carrier_on(dev);
- }
-
- /* Call media specific watchdog functions */
- sc->lmc_media->watchdog(sc);
-
- /*
- * Poke the transmitter to make sure it
- * never stops, even if we run out of mem
- */
- LMC_CSR_WRITE(sc, csr_rxpoll, 0);
-
- /*
- * Check for code that failed
- * and try and fix it as appropriate
- */
- if(sc->failed_ring == 1){
- /*
- * Failed to setup the recv/xmit rin
- * Try again
- */
- sc->failed_ring = 0;
- lmc_softreset(sc);
- }
- if(sc->failed_recv_alloc == 1){
- /*
- * We failed to alloc mem in the
- * interrupt handler, go through the rings
- * and rebuild them
- */
- sc->failed_recv_alloc = 0;
- lmc_softreset(sc);
- }
-
-
- /*
- * remember the timer value
- */
-kick_timer:
-
- ticks = LMC_CSR_READ (sc, csr_gp_timer);
- LMC_CSR_WRITE (sc, csr_gp_timer, 0xffffffffUL);
- sc->ictl.ticks = 0x0000ffff - (ticks & 0x0000ffff);
-
- /*
- * restart this timer.
- */
- sc->timer.expires = jiffies + (HZ);
- add_timer (&sc->timer);
-
- spin_unlock_irqrestore(&sc->lmc_lock, flags);
-}
-
-static int lmc_attach(struct net_device *dev, unsigned short encoding,
- unsigned short parity)
-{
- if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT)
- return 0;
- return -EINVAL;
-}
-
-static const struct net_device_ops lmc_ops = {
- .ndo_open = lmc_open,
- .ndo_stop = lmc_close,
- .ndo_start_xmit = hdlc_start_xmit,
- .ndo_siocwandev = hdlc_ioctl,
- .ndo_siocdevprivate = lmc_siocdevprivate,
- .ndo_tx_timeout = lmc_driver_timeout,
- .ndo_get_stats = lmc_get_stats,
-};
-
-static int lmc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- lmc_softc_t *sc;
- struct net_device *dev;
- u16 subdevice;
- u16 AdapModelNum;
- int err;
- static int cards_found;
-
- err = pcim_enable_device(pdev);
- if (err) {
- printk(KERN_ERR "lmc: pci enable failed: %d\n", err);
- return err;
- }
-
- err = pci_request_regions(pdev, "lmc");
- if (err) {
- printk(KERN_ERR "lmc: pci_request_region failed\n");
- return err;
- }
-
- /*
- * Allocate our own device structure
- */
- sc = devm_kzalloc(&pdev->dev, sizeof(lmc_softc_t), GFP_KERNEL);
- if (!sc)
- return -ENOMEM;
-
- dev = alloc_hdlcdev(sc);
- if (!dev) {
- printk(KERN_ERR "lmc:alloc_netdev for device failed\n");
- return -ENOMEM;
- }
-
-
- dev->type = ARPHRD_HDLC;
- dev_to_hdlc(dev)->xmit = lmc_start_xmit;
- dev_to_hdlc(dev)->attach = lmc_attach;
- dev->netdev_ops = &lmc_ops;
- dev->watchdog_timeo = HZ; /* 1 second */
- dev->tx_queue_len = 100;
- sc->lmc_device = dev;
- sc->name = dev->name;
- sc->if_type = LMC_PPP;
- sc->check = 0xBEAFCAFE;
- dev->base_addr = pci_resource_start(pdev, 0);
- dev->irq = pdev->irq;
- pci_set_drvdata(pdev, dev);
- SET_NETDEV_DEV(dev, &pdev->dev);
-
- /*
- * This will get the protocol layer ready and do any 1 time init's
- * Must have a valid sc and dev structure
- */
- lmc_proto_attach(sc);
-
- /* Init the spin lock so can call it latter */
-
- spin_lock_init(&sc->lmc_lock);
- pci_set_master(pdev);
-
- printk(KERN_INFO "hdlc: detected at %lx, irq %d\n",
- dev->base_addr, dev->irq);
-
- err = register_hdlc_device(dev);
- if (err) {
- printk(KERN_ERR "%s: register_netdev failed.\n", dev->name);
- free_netdev(dev);
- return err;
- }
-
- sc->lmc_cardtype = LMC_CARDTYPE_UNKNOWN;
- sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_EXT;
-
- /*
- *
- * Check either the subvendor or the subdevice, some systems reverse
- * the setting in the bois, seems to be version and arch dependent?
- * Fix the error, exchange the two values
- */
- if ((subdevice = pdev->subsystem_device) == PCI_VENDOR_ID_LMC)
- subdevice = pdev->subsystem_vendor;
-
- switch (subdevice) {
- case PCI_DEVICE_ID_LMC_HSSI:
- printk(KERN_INFO "%s: LMC HSSI\n", dev->name);
- sc->lmc_cardtype = LMC_CARDTYPE_HSSI;
- sc->lmc_media = &lmc_hssi_media;
- break;
- case PCI_DEVICE_ID_LMC_DS3:
- printk(KERN_INFO "%s: LMC DS3\n", dev->name);
- sc->lmc_cardtype = LMC_CARDTYPE_DS3;
- sc->lmc_media = &lmc_ds3_media;
- break;
- case PCI_DEVICE_ID_LMC_SSI:
- printk(KERN_INFO "%s: LMC SSI\n", dev->name);
- sc->lmc_cardtype = LMC_CARDTYPE_SSI;
- sc->lmc_media = &lmc_ssi_media;
- break;
- case PCI_DEVICE_ID_LMC_T1:
- printk(KERN_INFO "%s: LMC T1\n", dev->name);
- sc->lmc_cardtype = LMC_CARDTYPE_T1;
- sc->lmc_media = &lmc_t1_media;
- break;
- default:
- printk(KERN_WARNING "%s: LMC UNKNOWN CARD!\n", dev->name);
- unregister_hdlc_device(dev);
- return -EIO;
- break;
- }
-
- lmc_initcsrs (sc, dev->base_addr, 8);
-
- lmc_gpio_mkinput (sc, 0xff);
- sc->lmc_gpio = 0; /* drive no signals yet */
-
- sc->lmc_media->defaults (sc);
-
- sc->lmc_media->set_link_status (sc, LMC_LINK_UP);
-
- /* verify that the PCI Sub System ID matches the Adapter Model number
- * from the MII register
- */
- AdapModelNum = (lmc_mii_readreg (sc, 0, 3) & 0x3f0) >> 4;
-
- if ((AdapModelNum != LMC_ADAP_T1 || /* detect LMC1200 */
- subdevice != PCI_DEVICE_ID_LMC_T1) &&
- (AdapModelNum != LMC_ADAP_SSI || /* detect LMC1000 */
- subdevice != PCI_DEVICE_ID_LMC_SSI) &&
- (AdapModelNum != LMC_ADAP_DS3 || /* detect LMC5245 */
- subdevice != PCI_DEVICE_ID_LMC_DS3) &&
- (AdapModelNum != LMC_ADAP_HSSI || /* detect LMC5200 */
- subdevice != PCI_DEVICE_ID_LMC_HSSI))
- printk(KERN_WARNING "%s: Model number (%d) miscompare for PCI"
- " Subsystem ID = 0x%04x\n",
- dev->name, AdapModelNum, subdevice);
-
- /*
- * reset clock
- */
- LMC_CSR_WRITE (sc, csr_gp_timer, 0xFFFFFFFFUL);
-
- sc->board_idx = cards_found++;
- sc->extra_stats.check = STATCHECK;
- sc->extra_stats.version_size = (DRIVER_VERSION << 16) +
- sizeof(sc->lmc_device->stats) + sizeof(sc->extra_stats);
- sc->extra_stats.lmc_cardtype = sc->lmc_cardtype;
-
- sc->lmc_ok = 0;
- sc->last_link_status = 0;
-
- return 0;
-}
-
-/*
- * Called from pci when removing module.
- */
-static void lmc_remove_one(struct pci_dev *pdev)
-{
- struct net_device *dev = pci_get_drvdata(pdev);
-
- if (dev) {
- printk(KERN_DEBUG "%s: removing...\n", dev->name);
- unregister_hdlc_device(dev);
- free_netdev(dev);
- }
-}
-
-/* After this is called, packets can be sent.
- * Does not initialize the addresses
- */
-static int lmc_open(struct net_device *dev)
-{
- lmc_softc_t *sc = dev_to_sc(dev);
- int err;
-
- lmc_led_on(sc, LMC_DS3_LED0);
-
- lmc_dec_reset(sc);
- lmc_reset(sc);
-
- LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ(sc, csr_status), 0);
- LMC_EVENT_LOG(LMC_EVENT_RESET2, lmc_mii_readreg(sc, 0, 16),
- lmc_mii_readreg(sc, 0, 17));
-
- if (sc->lmc_ok)
- return 0;
-
- lmc_softreset (sc);
-
- /* Since we have to use PCI bus, this should work on x86,alpha,ppc */
- if (request_irq (dev->irq, lmc_interrupt, IRQF_SHARED, dev->name, dev)){
- printk(KERN_WARNING "%s: could not get irq: %d\n", dev->name, dev->irq);
- return -EAGAIN;
- }
- sc->got_irq = 1;
-
- /* Assert Terminal Active */
- sc->lmc_miireg16 |= LMC_MII16_LED_ALL;
- sc->lmc_media->set_link_status (sc, LMC_LINK_UP);
-
- /*
- * reset to last state.
- */
- sc->lmc_media->set_status (sc, NULL);
-
- /* setup default bits to be used in tulip_desc_t transmit descriptor
- * -baz */
- sc->TxDescriptControlInit = (
- LMC_TDES_INTERRUPT_ON_COMPLETION
- | LMC_TDES_FIRST_SEGMENT
- | LMC_TDES_LAST_SEGMENT
- | LMC_TDES_SECOND_ADDR_CHAINED
- | LMC_TDES_DISABLE_PADDING
- );
-
- if (sc->ictl.crc_length == LMC_CTL_CRC_LENGTH_16) {
- /* disable 32 bit CRC generated by ASIC */
- sc->TxDescriptControlInit |= LMC_TDES_ADD_CRC_DISABLE;
- }
- sc->lmc_media->set_crc_length(sc, sc->ictl.crc_length);
- /* Acknoledge the Terminal Active and light LEDs */
-
- /* dev->flags |= IFF_UP; */
-
- if ((err = lmc_proto_open(sc)) != 0)
- return err;
-
- netif_start_queue(dev);
- sc->extra_stats.tx_tbusy0++;
-
- /*
- * select what interrupts we want to get
- */
- sc->lmc_intrmask = 0;
- /* Should be using the default interrupt mask defined in the .h file. */
- sc->lmc_intrmask |= (TULIP_STS_NORMALINTR
- | TULIP_STS_RXINTR
- | TULIP_STS_TXINTR
- | TULIP_STS_ABNRMLINTR
- | TULIP_STS_SYSERROR
- | TULIP_STS_TXSTOPPED
- | TULIP_STS_TXUNDERFLOW
- | TULIP_STS_RXSTOPPED
- | TULIP_STS_RXNOBUF
- );
- LMC_CSR_WRITE (sc, csr_intr, sc->lmc_intrmask);
-
- sc->lmc_cmdmode |= TULIP_CMD_TXRUN;
- sc->lmc_cmdmode |= TULIP_CMD_RXRUN;
- LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode);
-
- sc->lmc_ok = 1; /* Run watchdog */
-
- /*
- * Set the if up now - pfb
- */
-
- sc->last_link_status = 1;
-
- /*
- * Setup a timer for the watchdog on probe, and start it running.
- * Since lmc_ok == 0, it will be a NOP for now.
- */
- timer_setup(&sc->timer, lmc_watchdog, 0);
- sc->timer.expires = jiffies + HZ;
- add_timer (&sc->timer);
-
- return 0;
-}
-
-/* Total reset to compensate for the AdTran DSU doing bad things
- * under heavy load
- */
-
-static void lmc_running_reset (struct net_device *dev) /*fold00*/
-{
- lmc_softc_t *sc = dev_to_sc(dev);
-
- /* stop interrupts */
- /* Clear the interrupt mask */
- LMC_CSR_WRITE (sc, csr_intr, 0x00000000);
-
- lmc_dec_reset (sc);
- lmc_reset (sc);
- lmc_softreset (sc);
- /* sc->lmc_miireg16 |= LMC_MII16_LED_ALL; */
- sc->lmc_media->set_link_status (sc, 1);
- sc->lmc_media->set_status (sc, NULL);
-
- netif_wake_queue(dev);
-
- sc->lmc_txfull = 0;
- sc->extra_stats.tx_tbusy0++;
-
- sc->lmc_intrmask = TULIP_DEFAULT_INTR_MASK;
- LMC_CSR_WRITE (sc, csr_intr, sc->lmc_intrmask);
-
- sc->lmc_cmdmode |= (TULIP_CMD_TXRUN | TULIP_CMD_RXRUN);
- LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode);
-}
-
-
-/* This is what is called when you ifconfig down a device.
- * This disables the timer for the watchdog and keepalives,
- * and disables the irq for dev.
- */
-static int lmc_close(struct net_device *dev)
-{
- /* not calling release_region() as we should */
- lmc_softc_t *sc = dev_to_sc(dev);
-
- sc->lmc_ok = 0;
- sc->lmc_media->set_link_status (sc, 0);
- del_timer (&sc->timer);
- lmc_proto_close(sc);
- lmc_ifdown (dev);
-
- return 0;
-}
-
-/* Ends the transfer of packets */
-/* When the interface goes down, this is called */
-static int lmc_ifdown (struct net_device *dev) /*fold00*/
-{
- lmc_softc_t *sc = dev_to_sc(dev);
- u32 csr6;
- int i;
-
- /* Don't let anything else go on right now */
- // dev->start = 0;
- netif_stop_queue(dev);
- sc->extra_stats.tx_tbusy1++;
-
- /* stop interrupts */
- /* Clear the interrupt mask */
- LMC_CSR_WRITE (sc, csr_intr, 0x00000000);
-
- /* Stop Tx and Rx on the chip */
- csr6 = LMC_CSR_READ (sc, csr_command);
- csr6 &= ~LMC_DEC_ST; /* Turn off the Transmission bit */
- csr6 &= ~LMC_DEC_SR; /* Turn off the Receive bit */
- LMC_CSR_WRITE (sc, csr_command, csr6);
-
- sc->lmc_device->stats.rx_missed_errors +=
- LMC_CSR_READ(sc, csr_missed_frames) & 0xffff;
-
- /* release the interrupt */
- if(sc->got_irq == 1){
- free_irq (dev->irq, dev);
- sc->got_irq = 0;
- }
-
- /* free skbuffs in the Rx queue */
- for (i = 0; i < LMC_RXDESCS; i++)
- {
- struct sk_buff *skb = sc->lmc_rxq[i];
- sc->lmc_rxq[i] = NULL;
- sc->lmc_rxring[i].status = 0;
- sc->lmc_rxring[i].length = 0;
- sc->lmc_rxring[i].buffer1 = 0xDEADBEEF;
- if (skb != NULL)
- dev_kfree_skb(skb);
- sc->lmc_rxq[i] = NULL;
- }
-
- for (i = 0; i < LMC_TXDESCS; i++)
- {
- if (sc->lmc_txq[i] != NULL)
- dev_kfree_skb(sc->lmc_txq[i]);
- sc->lmc_txq[i] = NULL;
- }
-
- lmc_led_off (sc, LMC_MII16_LED_ALL);
-
- netif_wake_queue(dev);
- sc->extra_stats.tx_tbusy0++;
-
- return 0;
-}
-
-/* Interrupt handling routine. This will take an incoming packet, or clean
- * up after a trasmit.
- */
-static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/
-{
- struct net_device *dev = (struct net_device *) dev_instance;
- lmc_softc_t *sc = dev_to_sc(dev);
- u32 csr;
- int i;
- s32 stat;
- unsigned int badtx;
- int max_work = LMC_RXDESCS;
- int handled = 0;
-
- spin_lock(&sc->lmc_lock);
-
- /*
- * Read the csr to find what interrupts we have (if any)
- */
- csr = LMC_CSR_READ (sc, csr_status);
-
- /*
- * Make sure this is our interrupt
- */
- if ( ! (csr & sc->lmc_intrmask)) {
- goto lmc_int_fail_out;
- }
-
- /* always go through this loop at least once */
- while (csr & sc->lmc_intrmask) {
- handled = 1;
-
- /*
- * Clear interrupt bits, we handle all case below
- */
- LMC_CSR_WRITE (sc, csr_status, csr);
-
- /*
- * One of
- * - Transmit process timed out CSR5<1>
- * - Transmit jabber timeout CSR5<3>
- * - Transmit underflow CSR5<5>
- * - Transmit Receiver buffer unavailable CSR5<7>
- * - Receive process stopped CSR5<8>
- * - Receive watchdog timeout CSR5<9>
- * - Early transmit interrupt CSR5<10>
- *
- * Is this really right? Should we do a running reset for jabber?
- * (being a WAN card and all)
- */
- if (csr & TULIP_STS_ABNRMLINTR){
- lmc_running_reset (dev);
- break;
- }
-
- if (csr & TULIP_STS_RXINTR)
- lmc_rx (dev);
-
- if (csr & (TULIP_STS_TXINTR | TULIP_STS_TXNOBUF | TULIP_STS_TXSTOPPED)) {
-
- int n_compl = 0 ;
- /* reset the transmit timeout detection flag -baz */
- sc->extra_stats.tx_NoCompleteCnt = 0;
-
- badtx = sc->lmc_taint_tx;
- i = badtx % LMC_TXDESCS;
-
- while ((badtx < sc->lmc_next_tx)) {
- stat = sc->lmc_txring[i].status;
-
- LMC_EVENT_LOG (LMC_EVENT_XMTINT, stat,
- sc->lmc_txring[i].length);
- /*
- * If bit 31 is 1 the tulip owns it break out of the loop
- */
- if (stat & 0x80000000)
- break;
-
- n_compl++ ; /* i.e., have an empty slot in ring */
- /*
- * If we have no skbuff or have cleared it
- * Already continue to the next buffer
- */
- if (sc->lmc_txq[i] == NULL)
- continue;
-
- /*
- * Check the total error summary to look for any errors
- */
- if (stat & 0x8000) {
- sc->lmc_device->stats.tx_errors++;
- if (stat & 0x4104)
- sc->lmc_device->stats.tx_aborted_errors++;
- if (stat & 0x0C00)
- sc->lmc_device->stats.tx_carrier_errors++;
- if (stat & 0x0200)
- sc->lmc_device->stats.tx_window_errors++;
- if (stat & 0x0002)
- sc->lmc_device->stats.tx_fifo_errors++;
- } else {
- sc->lmc_device->stats.tx_bytes += sc->lmc_txring[i].length & 0x7ff;
-
- sc->lmc_device->stats.tx_packets++;
- }
-
- dev_consume_skb_irq(sc->lmc_txq[i]);
- sc->lmc_txq[i] = NULL;
-
- badtx++;
- i = badtx % LMC_TXDESCS;
- }
-
- if (sc->lmc_next_tx - badtx > LMC_TXDESCS)
- {
- printk ("%s: out of sync pointer\n", dev->name);
- badtx += LMC_TXDESCS;
- }
- LMC_EVENT_LOG(LMC_EVENT_TBUSY0, n_compl, 0);
- sc->lmc_txfull = 0;
- netif_wake_queue(dev);
- sc->extra_stats.tx_tbusy0++;
-
-
-#ifdef DEBUG
- sc->extra_stats.dirtyTx = badtx;
- sc->extra_stats.lmc_next_tx = sc->lmc_next_tx;
- sc->extra_stats.lmc_txfull = sc->lmc_txfull;
-#endif
- sc->lmc_taint_tx = badtx;
-
- /*
- * Why was there a break here???
- */
- } /* end handle transmit interrupt */
-
- if (csr & TULIP_STS_SYSERROR) {
- u32 error;
- printk (KERN_WARNING "%s: system bus error csr: %#8.8x\n", dev->name, csr);
- error = csr>>23 & 0x7;
- switch(error){
- case 0x000:
- printk(KERN_WARNING "%s: Parity Fault (bad)\n", dev->name);
- break;
- case 0x001:
- printk(KERN_WARNING "%s: Master Abort (naughty)\n", dev->name);
- break;
- case 0x002:
- printk(KERN_WARNING "%s: Target Abort (not so naughty)\n", dev->name);
- break;
- default:
- printk(KERN_WARNING "%s: This bus error code was supposed to be reserved!\n", dev->name);
- }
- lmc_dec_reset (sc);
- lmc_reset (sc);
- LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0);
- LMC_EVENT_LOG(LMC_EVENT_RESET2,
- lmc_mii_readreg (sc, 0, 16),
- lmc_mii_readreg (sc, 0, 17));
-
- }
-
-
- if(max_work-- <= 0)
- break;
-
- /*
- * Get current csr status to make sure
- * we've cleared all interrupts
- */
- csr = LMC_CSR_READ (sc, csr_status);
- } /* end interrupt loop */
- LMC_EVENT_LOG(LMC_EVENT_INT, firstcsr, csr);
-
-lmc_int_fail_out:
-
- spin_unlock(&sc->lmc_lock);
-
- return IRQ_RETVAL(handled);
-}
-
-static netdev_tx_t lmc_start_xmit(struct sk_buff *skb,
- struct net_device *dev)
-{
- lmc_softc_t *sc = dev_to_sc(dev);
- u32 flag;
- int entry;
- unsigned long flags;
-
- spin_lock_irqsave(&sc->lmc_lock, flags);
-
- /* normal path, tbusy known to be zero */
-
- entry = sc->lmc_next_tx % LMC_TXDESCS;
-
- sc->lmc_txq[entry] = skb;
- sc->lmc_txring[entry].buffer1 = virt_to_bus (skb->data);
-
- LMC_CONSOLE_LOG("xmit", skb->data, skb->len);
-
-#ifndef GCOM
- /* If the queue is less than half full, don't interrupt */
- if (sc->lmc_next_tx - sc->lmc_taint_tx < LMC_TXDESCS / 2)
- {
- /* Do not interrupt on completion of this packet */
- flag = 0x60000000;
- netif_wake_queue(dev);
- }
- else if (sc->lmc_next_tx - sc->lmc_taint_tx == LMC_TXDESCS / 2)
- {
- /* This generates an interrupt on completion of this packet */
- flag = 0xe0000000;
- netif_wake_queue(dev);
- }
- else if (sc->lmc_next_tx - sc->lmc_taint_tx < LMC_TXDESCS - 1)
- {
- /* Do not interrupt on completion of this packet */
- flag = 0x60000000;
- netif_wake_queue(dev);
- }
- else
- {
- /* This generates an interrupt on completion of this packet */
- flag = 0xe0000000;
- sc->lmc_txfull = 1;
- netif_stop_queue(dev);
- }
-#else
- flag = LMC_TDES_INTERRUPT_ON_COMPLETION;
-
- if (sc->lmc_next_tx - sc->lmc_taint_tx >= LMC_TXDESCS - 1)
- { /* ring full, go busy */
- sc->lmc_txfull = 1;
- netif_stop_queue(dev);
- sc->extra_stats.tx_tbusy1++;
- LMC_EVENT_LOG(LMC_EVENT_TBUSY1, entry, 0);
- }
-#endif
-
-
- if (entry == LMC_TXDESCS - 1) /* last descriptor in ring */
- flag |= LMC_TDES_END_OF_RING; /* flag as such for Tulip */
-
- /* don't pad small packets either */
- flag = sc->lmc_txring[entry].length = (skb->len) | flag |
- sc->TxDescriptControlInit;
-
- /* set the transmit timeout flag to be checked in
- * the watchdog timer handler. -baz
- */
-
- sc->extra_stats.tx_NoCompleteCnt++;
- sc->lmc_next_tx++;
-
- /* give ownership to the chip */
- LMC_EVENT_LOG(LMC_EVENT_XMT, flag, entry);
- sc->lmc_txring[entry].status = 0x80000000;
-
- /* send now! */
- LMC_CSR_WRITE (sc, csr_txpoll, 0);
-
- spin_unlock_irqrestore(&sc->lmc_lock, flags);
-
- return NETDEV_TX_OK;
-}
-
-
-static int lmc_rx(struct net_device *dev)
-{
- lmc_softc_t *sc = dev_to_sc(dev);
- int i;
- int rx_work_limit = LMC_RXDESCS;
- int rxIntLoopCnt; /* debug -baz */
- int localLengthErrCnt = 0;
- long stat;
- struct sk_buff *skb, *nsb;
- u16 len;
-
- lmc_led_on(sc, LMC_DS3_LED3);
-
- rxIntLoopCnt = 0; /* debug -baz */
-
- i = sc->lmc_next_rx % LMC_RXDESCS;
-
- while (((stat = sc->lmc_rxring[i].status) & LMC_RDES_OWN_BIT) != DESC_OWNED_BY_DC21X4)
- {
- rxIntLoopCnt++; /* debug -baz */
- len = ((stat & LMC_RDES_FRAME_LENGTH) >> RDES_FRAME_LENGTH_BIT_NUMBER);
- if ((stat & 0x0300) != 0x0300) { /* Check first segment and last segment */
- if ((stat & 0x0000ffff) != 0x7fff) {
- /* Oversized frame */
- sc->lmc_device->stats.rx_length_errors++;
- goto skip_packet;
- }
- }
-
- if (stat & 0x00000008) { /* Catch a dribbling bit error */
- sc->lmc_device->stats.rx_errors++;
- sc->lmc_device->stats.rx_frame_errors++;
- goto skip_packet;
- }
-
-
- if (stat & 0x00000004) { /* Catch a CRC error by the Xilinx */
- sc->lmc_device->stats.rx_errors++;
- sc->lmc_device->stats.rx_crc_errors++;
- goto skip_packet;
- }
-
- if (len > LMC_PKT_BUF_SZ) {
- sc->lmc_device->stats.rx_length_errors++;
- localLengthErrCnt++;
- goto skip_packet;
- }
-
- if (len < sc->lmc_crcSize + 2) {
- sc->lmc_device->stats.rx_length_errors++;
- sc->extra_stats.rx_SmallPktCnt++;
- localLengthErrCnt++;
- goto skip_packet;
- }
-
- if(stat & 0x00004000){
- printk(KERN_WARNING "%s: Receiver descriptor error, receiver out of sync?\n", dev->name);
- }
-
- len -= sc->lmc_crcSize;
-
- skb = sc->lmc_rxq[i];
-
- /*
- * We ran out of memory at some point
- * just allocate an skb buff and continue.
- */
-
- if (!skb) {
- nsb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2);
- if (nsb) {
- sc->lmc_rxq[i] = nsb;
- nsb->dev = dev;
- sc->lmc_rxring[i].buffer1 = virt_to_bus(skb_tail_pointer(nsb));
- }
- sc->failed_recv_alloc = 1;
- goto skip_packet;
- }
-
- sc->lmc_device->stats.rx_packets++;
- sc->lmc_device->stats.rx_bytes += len;
-
- LMC_CONSOLE_LOG("recv", skb->data, len);
-
- /*
- * I'm not sure of the sanity of this
- * Packets could be arriving at a constant
- * 44.210mbits/sec and we're going to copy
- * them into a new buffer??
- */
-
- if(len > (LMC_MTU - (LMC_MTU>>2))){ /* len > LMC_MTU * 0.75 */
- /*
- * If it's a large packet don't copy it just hand it up
- */
- give_it_anyways:
-
- sc->lmc_rxq[i] = NULL;
- sc->lmc_rxring[i].buffer1 = 0x0;
-
- skb_put (skb, len);
- skb->protocol = lmc_proto_type(sc, skb);
- skb_reset_mac_header(skb);
- /* skb_reset_network_header(skb); */
- skb->dev = dev;
- lmc_proto_netif(sc, skb);
-
- /*
- * This skb will be destroyed by the upper layers, make a new one
- */
- nsb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2);
- if (nsb) {
- sc->lmc_rxq[i] = nsb;
- nsb->dev = dev;
- sc->lmc_rxring[i].buffer1 = virt_to_bus(skb_tail_pointer(nsb));
- /* Transferred to 21140 below */
- }
- else {
- /*
- * We've run out of memory, stop trying to allocate
- * memory and exit the interrupt handler
- *
- * The chip may run out of receivers and stop
- * in which care we'll try to allocate the buffer
- * again. (once a second)
- */
- sc->extra_stats.rx_BuffAllocErr++;
- LMC_EVENT_LOG(LMC_EVENT_RCVINT, stat, len);
- sc->failed_recv_alloc = 1;
- goto skip_out_of_mem;
- }
- }
- else {
- nsb = dev_alloc_skb(len);
- if(!nsb) {
- goto give_it_anyways;
- }
- skb_copy_from_linear_data(skb, skb_put(nsb, len), len);
-
- nsb->protocol = lmc_proto_type(sc, nsb);
- skb_reset_mac_header(nsb);
- /* skb_reset_network_header(nsb); */
- nsb->dev = dev;
- lmc_proto_netif(sc, nsb);
- }
-
- skip_packet:
- LMC_EVENT_LOG(LMC_EVENT_RCVINT, stat, len);
- sc->lmc_rxring[i].status = DESC_OWNED_BY_DC21X4;
-
- sc->lmc_next_rx++;
- i = sc->lmc_next_rx % LMC_RXDESCS;
- rx_work_limit--;
- if (rx_work_limit < 0)
- break;
- }
-
- /* detect condition for LMC1000 where DSU cable attaches and fills
- * descriptors with bogus packets
- *
- if (localLengthErrCnt > LMC_RXDESCS - 3) {
- sc->extra_stats.rx_BadPktSurgeCnt++;
- LMC_EVENT_LOG(LMC_EVENT_BADPKTSURGE, localLengthErrCnt,
- sc->extra_stats.rx_BadPktSurgeCnt);
- } */
-
- /* save max count of receive descriptors serviced */
- if (rxIntLoopCnt > sc->extra_stats.rxIntLoopCnt)
- sc->extra_stats.rxIntLoopCnt = rxIntLoopCnt; /* debug -baz */
-
-#ifdef DEBUG
- if (rxIntLoopCnt == 0)
- {
- for (i = 0; i < LMC_RXDESCS; i++)
- {
- if ((sc->lmc_rxring[i].status & LMC_RDES_OWN_BIT)
- != DESC_OWNED_BY_DC21X4)
- {
- rxIntLoopCnt++;
- }
- }
- LMC_EVENT_LOG(LMC_EVENT_RCVEND, rxIntLoopCnt, 0);
- }
-#endif
-
-
- lmc_led_off(sc, LMC_DS3_LED3);
-
-skip_out_of_mem:
- return 0;
-}
-
-static struct net_device_stats *lmc_get_stats(struct net_device *dev)
-{
- lmc_softc_t *sc = dev_to_sc(dev);
- unsigned long flags;
-
- spin_lock_irqsave(&sc->lmc_lock, flags);
-
- sc->lmc_device->stats.rx_missed_errors += LMC_CSR_READ(sc, csr_missed_frames) & 0xffff;
-
- spin_unlock_irqrestore(&sc->lmc_lock, flags);
-
- return &sc->lmc_device->stats;
-}
-
-static struct pci_driver lmc_driver = {
- .name = "lmc",
- .id_table = lmc_pci_tbl,
- .probe = lmc_init_one,
- .remove = lmc_remove_one,
-};
-
-module_pci_driver(lmc_driver);
-
-unsigned lmc_mii_readreg (lmc_softc_t * const sc, unsigned devaddr, unsigned regno) /*fold00*/
-{
- int i;
- int command = (0xf6 << 10) | (devaddr << 5) | regno;
- int retval = 0;
-
- LMC_MII_SYNC (sc);
-
- for (i = 15; i >= 0; i--)
- {
- int dataval = (command & (1 << i)) ? 0x20000 : 0;
-
- LMC_CSR_WRITE (sc, csr_9, dataval);
- lmc_delay ();
- /* __SLOW_DOWN_IO; */
- LMC_CSR_WRITE (sc, csr_9, dataval | 0x10000);
- lmc_delay ();
- /* __SLOW_DOWN_IO; */
- }
-
- for (i = 19; i > 0; i--)
- {
- LMC_CSR_WRITE (sc, csr_9, 0x40000);
- lmc_delay ();
- /* __SLOW_DOWN_IO; */
- retval = (retval << 1) | ((LMC_CSR_READ (sc, csr_9) & 0x80000) ? 1 : 0);
- LMC_CSR_WRITE (sc, csr_9, 0x40000 | 0x10000);
- lmc_delay ();
- /* __SLOW_DOWN_IO; */
- }
-
- return (retval >> 1) & 0xffff;
-}
-
-void lmc_mii_writereg (lmc_softc_t * const sc, unsigned devaddr, unsigned regno, unsigned data) /*fold00*/
-{
- int i = 32;
- int command = (0x5002 << 16) | (devaddr << 23) | (regno << 18) | data;
-
- LMC_MII_SYNC (sc);
-
- i = 31;
- while (i >= 0)
- {
- int datav;
-
- if (command & (1 << i))
- datav = 0x20000;
- else
- datav = 0x00000;
-
- LMC_CSR_WRITE (sc, csr_9, datav);
- lmc_delay ();
- /* __SLOW_DOWN_IO; */
- LMC_CSR_WRITE (sc, csr_9, (datav | 0x10000));
- lmc_delay ();
- /* __SLOW_DOWN_IO; */
- i--;
- }
-
- i = 2;
- while (i > 0)
- {
- LMC_CSR_WRITE (sc, csr_9, 0x40000);
- lmc_delay ();
- /* __SLOW_DOWN_IO; */
- LMC_CSR_WRITE (sc, csr_9, 0x50000);
- lmc_delay ();
- /* __SLOW_DOWN_IO; */
- i--;
- }
-}
-
-static void lmc_softreset (lmc_softc_t * const sc) /*fold00*/
-{
- int i;
-
- /* Initialize the receive rings and buffers. */
- sc->lmc_txfull = 0;
- sc->lmc_next_rx = 0;
- sc->lmc_next_tx = 0;
- sc->lmc_taint_rx = 0;
- sc->lmc_taint_tx = 0;
-
- /*
- * Setup each one of the receiver buffers
- * allocate an skbuff for each one, setup the descriptor table
- * and point each buffer at the next one
- */
-
- for (i = 0; i < LMC_RXDESCS; i++)
- {
- struct sk_buff *skb;
-
- if (sc->lmc_rxq[i] == NULL)
- {
- skb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2);
- if(skb == NULL){
- printk(KERN_WARNING "%s: Failed to allocate receiver ring, will try again\n", sc->name);
- sc->failed_ring = 1;
- break;
- }
- else{
- sc->lmc_rxq[i] = skb;
- }
- }
- else
- {
- skb = sc->lmc_rxq[i];
- }
-
- skb->dev = sc->lmc_device;
-
- /* owned by 21140 */
- sc->lmc_rxring[i].status = 0x80000000;
-
- /* used to be PKT_BUF_SZ now uses skb since we lose some to head room */
- sc->lmc_rxring[i].length = skb_tailroom(skb);
-
- /* use to be tail which is dumb since you're thinking why write
- * to the end of the packj,et but since there's nothing there tail == data
- */
- sc->lmc_rxring[i].buffer1 = virt_to_bus (skb->data);
-
- /* This is fair since the structure is static and we have the next address */
- sc->lmc_rxring[i].buffer2 = virt_to_bus (&sc->lmc_rxring[i + 1]);
-
- }
-
- /*
- * Sets end of ring
- */
- if (i != 0) {
- sc->lmc_rxring[i - 1].length |= 0x02000000; /* Set end of buffers flag */
- sc->lmc_rxring[i - 1].buffer2 = virt_to_bus(&sc->lmc_rxring[0]); /* Point back to the start */
- }
- LMC_CSR_WRITE (sc, csr_rxlist, virt_to_bus (sc->lmc_rxring)); /* write base address */
-
- /* Initialize the transmit rings and buffers */
- for (i = 0; i < LMC_TXDESCS; i++)
- {
- if (sc->lmc_txq[i] != NULL){ /* have buffer */
- dev_kfree_skb(sc->lmc_txq[i]); /* free it */
- sc->lmc_device->stats.tx_dropped++; /* We just dropped a packet */
- }
- sc->lmc_txq[i] = NULL;
- sc->lmc_txring[i].status = 0x00000000;
- sc->lmc_txring[i].buffer2 = virt_to_bus (&sc->lmc_txring[i + 1]);
- }
- sc->lmc_txring[i - 1].buffer2 = virt_to_bus (&sc->lmc_txring[0]);
- LMC_CSR_WRITE (sc, csr_txlist, virt_to_bus (sc->lmc_txring));
-}
-
-void lmc_gpio_mkinput(lmc_softc_t * const sc, u32 bits) /*fold00*/
-{
- sc->lmc_gpio_io &= ~bits;
- LMC_CSR_WRITE(sc, csr_gp, TULIP_GP_PINSET | (sc->lmc_gpio_io));
-}
-
-void lmc_gpio_mkoutput(lmc_softc_t * const sc, u32 bits) /*fold00*/
-{
- sc->lmc_gpio_io |= bits;
- LMC_CSR_WRITE(sc, csr_gp, TULIP_GP_PINSET | (sc->lmc_gpio_io));
-}
-
-void lmc_led_on(lmc_softc_t * const sc, u32 led) /*fold00*/
-{
- if ((~sc->lmc_miireg16) & led) /* Already on! */
- return;
-
- sc->lmc_miireg16 &= ~led;
- lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
-}
-
-void lmc_led_off(lmc_softc_t * const sc, u32 led) /*fold00*/
-{
- if (sc->lmc_miireg16 & led) /* Already set don't do anything */
- return;
-
- sc->lmc_miireg16 |= led;
- lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
-}
-
-static void lmc_reset(lmc_softc_t * const sc) /*fold00*/
-{
- sc->lmc_miireg16 |= LMC_MII16_FIFO_RESET;
- lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
-
- sc->lmc_miireg16 &= ~LMC_MII16_FIFO_RESET;
- lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
-
- /*
- * make some of the GPIO pins be outputs
- */
- lmc_gpio_mkoutput(sc, LMC_GEP_RESET);
-
- /*
- * RESET low to force state reset. This also forces
- * the transmitter clock to be internal, but we expect to reset
- * that later anyway.
- */
- sc->lmc_gpio &= ~(LMC_GEP_RESET);
- LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
-
- /*
- * hold for more than 10 microseconds
- */
- udelay(50);
-
- /*
- * stop driving Xilinx-related signals
- */
- lmc_gpio_mkinput(sc, LMC_GEP_RESET);
-
- /*
- * Call media specific init routine
- */
- sc->lmc_media->init(sc);
-
- sc->extra_stats.resetCount++;
-}
-
-static void lmc_dec_reset(lmc_softc_t * const sc) /*fold00*/
-{
- u32 val;
-
- /*
- * disable all interrupts
- */
- sc->lmc_intrmask = 0;
- LMC_CSR_WRITE(sc, csr_intr, sc->lmc_intrmask);
-
- /*
- * Reset the chip with a software reset command.
- * Wait 10 microseconds (actually 50 PCI cycles but at
- * 33MHz that comes to two microseconds but wait a
- * bit longer anyways)
- */
- LMC_CSR_WRITE(sc, csr_busmode, TULIP_BUSMODE_SWRESET);
- udelay(25);
-#ifdef __sparc__
- sc->lmc_busmode = LMC_CSR_READ(sc, csr_busmode);
- sc->lmc_busmode = 0x00100000;
- sc->lmc_busmode &= ~TULIP_BUSMODE_SWRESET;
- LMC_CSR_WRITE(sc, csr_busmode, sc->lmc_busmode);
-#endif
- sc->lmc_cmdmode = LMC_CSR_READ(sc, csr_command);
-
- /*
- * We want:
- * no ethernet address in frames we write
- * disable padding (txdesc, padding disable)
- * ignore runt frames (rdes0 bit 15)
- * no receiver watchdog or transmitter jabber timer
- * (csr15 bit 0,14 == 1)
- * if using 16-bit CRC, turn off CRC (trans desc, crc disable)
- */
-
- sc->lmc_cmdmode |= ( TULIP_CMD_PROMISCUOUS
- | TULIP_CMD_FULLDUPLEX
- | TULIP_CMD_PASSBADPKT
- | TULIP_CMD_NOHEARTBEAT
- | TULIP_CMD_PORTSELECT
- | TULIP_CMD_RECEIVEALL
- | TULIP_CMD_MUSTBEONE
- );
- sc->lmc_cmdmode &= ~( TULIP_CMD_OPERMODE
- | TULIP_CMD_THRESHOLDCTL
- | TULIP_CMD_STOREFWD
- | TULIP_CMD_TXTHRSHLDCTL
- );
-
- LMC_CSR_WRITE(sc, csr_command, sc->lmc_cmdmode);
-
- /*
- * disable receiver watchdog and transmit jabber
- */
- val = LMC_CSR_READ(sc, csr_sia_general);
- val |= (TULIP_WATCHDOG_TXDISABLE | TULIP_WATCHDOG_RXDISABLE);
- LMC_CSR_WRITE(sc, csr_sia_general, val);
-}
-
-static void lmc_initcsrs(lmc_softc_t * const sc, lmc_csrptr_t csr_base, /*fold00*/
- size_t csr_size)
-{
- sc->lmc_csrs.csr_busmode = csr_base + 0 * csr_size;
- sc->lmc_csrs.csr_txpoll = csr_base + 1 * csr_size;
- sc->lmc_csrs.csr_rxpoll = csr_base + 2 * csr_size;
- sc->lmc_csrs.csr_rxlist = csr_base + 3 * csr_size;
- sc->lmc_csrs.csr_txlist = csr_base + 4 * csr_size;
- sc->lmc_csrs.csr_status = csr_base + 5 * csr_size;
- sc->lmc_csrs.csr_command = csr_base + 6 * csr_size;
- sc->lmc_csrs.csr_intr = csr_base + 7 * csr_size;
- sc->lmc_csrs.csr_missed_frames = csr_base + 8 * csr_size;
- sc->lmc_csrs.csr_9 = csr_base + 9 * csr_size;
- sc->lmc_csrs.csr_10 = csr_base + 10 * csr_size;
- sc->lmc_csrs.csr_11 = csr_base + 11 * csr_size;
- sc->lmc_csrs.csr_12 = csr_base + 12 * csr_size;
- sc->lmc_csrs.csr_13 = csr_base + 13 * csr_size;
- sc->lmc_csrs.csr_14 = csr_base + 14 * csr_size;
- sc->lmc_csrs.csr_15 = csr_base + 15 * csr_size;
-}
-
-static void lmc_driver_timeout(struct net_device *dev, unsigned int txqueue)
-{
- lmc_softc_t *sc = dev_to_sc(dev);
- u32 csr6;
- unsigned long flags;
-
- spin_lock_irqsave(&sc->lmc_lock, flags);
-
- printk("%s: Xmitter busy|\n", dev->name);
-
- sc->extra_stats.tx_tbusy_calls++;
- if (time_is_before_jiffies(dev_trans_start(dev) + TX_TIMEOUT))
- goto bug_out;
-
- /*
- * Chip seems to have locked up
- * Reset it
- * This whips out all our descriptor
- * table and starts from scartch
- */
-
- LMC_EVENT_LOG(LMC_EVENT_XMTPRCTMO,
- LMC_CSR_READ (sc, csr_status),
- sc->extra_stats.tx_ProcTimeout);
-
- lmc_running_reset (dev);
-
- LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0);
- LMC_EVENT_LOG(LMC_EVENT_RESET2,
- lmc_mii_readreg (sc, 0, 16),
- lmc_mii_readreg (sc, 0, 17));
-
- /* restart the tx processes */
- csr6 = LMC_CSR_READ (sc, csr_command);
- LMC_CSR_WRITE (sc, csr_command, csr6 | 0x0002);
- LMC_CSR_WRITE (sc, csr_command, csr6 | 0x2002);
-
- /* immediate transmit */
- LMC_CSR_WRITE (sc, csr_txpoll, 0);
-
- sc->lmc_device->stats.tx_errors++;
- sc->extra_stats.tx_ProcTimeout++; /* -baz */
-
- netif_trans_update(dev); /* prevent tx timeout */
-
-bug_out:
-
- spin_unlock_irqrestore(&sc->lmc_lock, flags);
-}
diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c
deleted file mode 100644
index ec1ac7b1f3fd..000000000000
--- a/drivers/net/wan/lmc/lmc_media.c
+++ /dev/null
@@ -1,1206 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* $Id: lmc_media.c,v 1.13 2000/04/11 05:25:26 asj Exp $ */
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/ptrace.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/in.h>
-#include <linux/if_arp.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/inet.h>
-#include <linux/bitops.h>
-
-#include <asm/processor.h> /* Processor type for cache alignment. */
-#include <asm/io.h>
-#include <asm/dma.h>
-
-#include <linux/uaccess.h>
-
-#include "lmc.h"
-#include "lmc_var.h"
-#include "lmc_ioctl.h"
-#include "lmc_debug.h"
-
-#define CONFIG_LMC_IGNORE_HARDWARE_HANDSHAKE 1
-
- /*
- * Copyright (c) 1997-2000 LAN Media Corporation (LMC)
- * All rights reserved. www.lanmedia.com
- *
- * This code is written by:
- * Andrew Stanley-Jones (asj@cban.com)
- * Rob Braun (bbraun@vix.com),
- * Michael Graff (explorer@vix.com) and
- * Matt Thomas (matt@3am-software.com).
- */
-
-/*
- * protocol independent method.
- */
-static void lmc_set_protocol (lmc_softc_t * const, lmc_ctl_t *);
-
-/*
- * media independent methods to check on media status, link, light LEDs,
- * etc.
- */
-static void lmc_ds3_init (lmc_softc_t * const);
-static void lmc_ds3_default (lmc_softc_t * const);
-static void lmc_ds3_set_status (lmc_softc_t * const, lmc_ctl_t *);
-static void lmc_ds3_set_100ft (lmc_softc_t * const, int);
-static int lmc_ds3_get_link_status (lmc_softc_t * const);
-static void lmc_ds3_set_crc_length (lmc_softc_t * const, int);
-static void lmc_ds3_set_scram (lmc_softc_t * const, int);
-static void lmc_ds3_watchdog (lmc_softc_t * const);
-
-static void lmc_hssi_init (lmc_softc_t * const);
-static void lmc_hssi_default (lmc_softc_t * const);
-static void lmc_hssi_set_status (lmc_softc_t * const, lmc_ctl_t *);
-static void lmc_hssi_set_clock (lmc_softc_t * const, int);
-static int lmc_hssi_get_link_status (lmc_softc_t * const);
-static void lmc_hssi_set_link_status (lmc_softc_t * const, int);
-static void lmc_hssi_set_crc_length (lmc_softc_t * const, int);
-static void lmc_hssi_watchdog (lmc_softc_t * const);
-
-static void lmc_ssi_init (lmc_softc_t * const);
-static void lmc_ssi_default (lmc_softc_t * const);
-static void lmc_ssi_set_status (lmc_softc_t * const, lmc_ctl_t *);
-static void lmc_ssi_set_clock (lmc_softc_t * const, int);
-static void lmc_ssi_set_speed (lmc_softc_t * const, lmc_ctl_t *);
-static int lmc_ssi_get_link_status (lmc_softc_t * const);
-static void lmc_ssi_set_link_status (lmc_softc_t * const, int);
-static void lmc_ssi_set_crc_length (lmc_softc_t * const, int);
-static void lmc_ssi_watchdog (lmc_softc_t * const);
-
-static void lmc_t1_init (lmc_softc_t * const);
-static void lmc_t1_default (lmc_softc_t * const);
-static void lmc_t1_set_status (lmc_softc_t * const, lmc_ctl_t *);
-static int lmc_t1_get_link_status (lmc_softc_t * const);
-static void lmc_t1_set_circuit_type (lmc_softc_t * const, int);
-static void lmc_t1_set_crc_length (lmc_softc_t * const, int);
-static void lmc_t1_set_clock (lmc_softc_t * const, int);
-static void lmc_t1_watchdog (lmc_softc_t * const);
-
-static void lmc_dummy_set_1 (lmc_softc_t * const, int);
-static void lmc_dummy_set2_1 (lmc_softc_t * const, lmc_ctl_t *);
-
-static inline void write_av9110_bit (lmc_softc_t *, int);
-static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32);
-
-lmc_media_t lmc_ds3_media = {
- .init = lmc_ds3_init, /* special media init stuff */
- .defaults = lmc_ds3_default, /* reset to default state */
- .set_status = lmc_ds3_set_status, /* reset status to state provided */
- .set_clock_source = lmc_dummy_set_1, /* set clock source */
- .set_speed = lmc_dummy_set2_1, /* set line speed */
- .set_cable_length = lmc_ds3_set_100ft, /* set cable length */
- .set_scrambler = lmc_ds3_set_scram, /* set scrambler */
- .get_link_status = lmc_ds3_get_link_status, /* get link status */
- .set_link_status = lmc_dummy_set_1, /* set link status */
- .set_crc_length = lmc_ds3_set_crc_length, /* set CRC length */
- .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
- .watchdog = lmc_ds3_watchdog
-};
-
-lmc_media_t lmc_hssi_media = {
- .init = lmc_hssi_init, /* special media init stuff */
- .defaults = lmc_hssi_default, /* reset to default state */
- .set_status = lmc_hssi_set_status, /* reset status to state provided */
- .set_clock_source = lmc_hssi_set_clock, /* set clock source */
- .set_speed = lmc_dummy_set2_1, /* set line speed */
- .set_cable_length = lmc_dummy_set_1, /* set cable length */
- .set_scrambler = lmc_dummy_set_1, /* set scrambler */
- .get_link_status = lmc_hssi_get_link_status, /* get link status */
- .set_link_status = lmc_hssi_set_link_status, /* set link status */
- .set_crc_length = lmc_hssi_set_crc_length, /* set CRC length */
- .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
- .watchdog = lmc_hssi_watchdog
-};
-
-lmc_media_t lmc_ssi_media = {
- .init = lmc_ssi_init, /* special media init stuff */
- .defaults = lmc_ssi_default, /* reset to default state */
- .set_status = lmc_ssi_set_status, /* reset status to state provided */
- .set_clock_source = lmc_ssi_set_clock, /* set clock source */
- .set_speed = lmc_ssi_set_speed, /* set line speed */
- .set_cable_length = lmc_dummy_set_1, /* set cable length */
- .set_scrambler = lmc_dummy_set_1, /* set scrambler */
- .get_link_status = lmc_ssi_get_link_status, /* get link status */
- .set_link_status = lmc_ssi_set_link_status, /* set link status */
- .set_crc_length = lmc_ssi_set_crc_length, /* set CRC length */
- .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
- .watchdog = lmc_ssi_watchdog
-};
-
-lmc_media_t lmc_t1_media = {
- .init = lmc_t1_init, /* special media init stuff */
- .defaults = lmc_t1_default, /* reset to default state */
- .set_status = lmc_t1_set_status, /* reset status to state provided */
- .set_clock_source = lmc_t1_set_clock, /* set clock source */
- .set_speed = lmc_dummy_set2_1, /* set line speed */
- .set_cable_length = lmc_dummy_set_1, /* set cable length */
- .set_scrambler = lmc_dummy_set_1, /* set scrambler */
- .get_link_status = lmc_t1_get_link_status, /* get link status */
- .set_link_status = lmc_dummy_set_1, /* set link status */
- .set_crc_length = lmc_t1_set_crc_length, /* set CRC length */
- .set_circuit_type = lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
- .watchdog = lmc_t1_watchdog
-};
-
-static void
-lmc_dummy_set_1 (lmc_softc_t * const sc, int a)
-{
-}
-
-static void
-lmc_dummy_set2_1 (lmc_softc_t * const sc, lmc_ctl_t * a)
-{
-}
-
-/*
- * HSSI methods
- */
-
-static void
-lmc_hssi_init (lmc_softc_t * const sc)
-{
- sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC5200;
-
- lmc_gpio_mkoutput (sc, LMC_GEP_HSSI_CLOCK);
-}
-
-static void
-lmc_hssi_default (lmc_softc_t * const sc)
-{
- sc->lmc_miireg16 = LMC_MII16_LED_ALL;
-
- sc->lmc_media->set_link_status (sc, LMC_LINK_DOWN);
- sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_EXT);
- sc->lmc_media->set_crc_length (sc, LMC_CTL_CRC_LENGTH_16);
-}
-
-/*
- * Given a user provided state, set ourselves up to match it. This will
- * always reset the card if needed.
- */
-static void
-lmc_hssi_set_status (lmc_softc_t * const sc, lmc_ctl_t * ctl)
-{
- if (ctl == NULL)
- {
- sc->lmc_media->set_clock_source (sc, sc->ictl.clock_source);
- lmc_set_protocol (sc, NULL);
-
- return;
- }
-
- /*
- * check for change in clock source
- */
- if (ctl->clock_source && !sc->ictl.clock_source)
- {
- sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_INT);
- sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_INT;
- }
- else if (!ctl->clock_source && sc->ictl.clock_source)
- {
- sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_EXT;
- sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_EXT);
- }
-
- lmc_set_protocol (sc, ctl);
-}
-
-/*
- * 1 == internal, 0 == external
- */
-static void
-lmc_hssi_set_clock (lmc_softc_t * const sc, int ie)
-{
- int old;
- old = sc->ictl.clock_source;
- if (ie == LMC_CTL_CLOCK_SOURCE_EXT)
- {
- sc->lmc_gpio |= LMC_GEP_HSSI_CLOCK;
- LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
- sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_EXT;
- if(old != ie)
- printk (LMC_PRINTF_FMT ": clock external\n", LMC_PRINTF_ARGS);
- }
- else
- {
- sc->lmc_gpio &= ~(LMC_GEP_HSSI_CLOCK);
- LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
- sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_INT;
- if(old != ie)
- printk (LMC_PRINTF_FMT ": clock internal\n", LMC_PRINTF_ARGS);
- }
-}
-
-/*
- * return hardware link status.
- * 0 == link is down, 1 == link is up.
- */
-static int
-lmc_hssi_get_link_status (lmc_softc_t * const sc)
-{
- /*
- * We're using the same code as SSI since
- * they're practically the same
- */
- return lmc_ssi_get_link_status(sc);
-}
-
-static void
-lmc_hssi_set_link_status (lmc_softc_t * const sc, int state)
-{
- if (state == LMC_LINK_UP)
- sc->lmc_miireg16 |= LMC_MII16_HSSI_TA;
- else
- sc->lmc_miireg16 &= ~LMC_MII16_HSSI_TA;
-
- lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
-}
-
-/*
- * 0 == 16bit, 1 == 32bit
- */
-static void
-lmc_hssi_set_crc_length (lmc_softc_t * const sc, int state)
-{
- if (state == LMC_CTL_CRC_LENGTH_32)
- {
- /* 32 bit */
- sc->lmc_miireg16 |= LMC_MII16_HSSI_CRC;
- sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_32;
- }
- else
- {
- /* 16 bit */
- sc->lmc_miireg16 &= ~LMC_MII16_HSSI_CRC;
- sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_16;
- }
-
- lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
-}
-
-static void
-lmc_hssi_watchdog (lmc_softc_t * const sc)
-{
- /* HSSI is blank */
-}
-
-/*
- * DS3 methods
- */
-
-/*
- * Set cable length
- */
-static void
-lmc_ds3_set_100ft (lmc_softc_t * const sc, int ie)
-{
- if (ie == LMC_CTL_CABLE_LENGTH_GT_100FT)
- {
- sc->lmc_miireg16 &= ~LMC_MII16_DS3_ZERO;
- sc->ictl.cable_length = LMC_CTL_CABLE_LENGTH_GT_100FT;
- }
- else if (ie == LMC_CTL_CABLE_LENGTH_LT_100FT)
- {
- sc->lmc_miireg16 |= LMC_MII16_DS3_ZERO;
- sc->ictl.cable_length = LMC_CTL_CABLE_LENGTH_LT_100FT;
- }
- lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
-}
-
-static void
-lmc_ds3_default (lmc_softc_t * const sc)
-{
- sc->lmc_miireg16 = LMC_MII16_LED_ALL;
-
- sc->lmc_media->set_link_status (sc, LMC_LINK_DOWN);
- sc->lmc_media->set_cable_length (sc, LMC_CTL_CABLE_LENGTH_LT_100FT);
- sc->lmc_media->set_scrambler (sc, LMC_CTL_OFF);
- sc->lmc_media->set_crc_length (sc, LMC_CTL_CRC_LENGTH_16);
-}
-
-/*
- * Given a user provided state, set ourselves up to match it. This will
- * always reset the card if needed.
- */
-static void
-lmc_ds3_set_status (lmc_softc_t * const sc, lmc_ctl_t * ctl)
-{
- if (ctl == NULL)
- {
- sc->lmc_media->set_cable_length (sc, sc->ictl.cable_length);
- sc->lmc_media->set_scrambler (sc, sc->ictl.scrambler_onoff);
- lmc_set_protocol (sc, NULL);
-
- return;
- }
-
- /*
- * check for change in cable length setting
- */
- if (ctl->cable_length && !sc->ictl.cable_length)
- lmc_ds3_set_100ft (sc, LMC_CTL_CABLE_LENGTH_GT_100FT);
- else if (!ctl->cable_length && sc->ictl.cable_length)
- lmc_ds3_set_100ft (sc, LMC_CTL_CABLE_LENGTH_LT_100FT);
-
- /*
- * Check for change in scrambler setting (requires reset)
- */
- if (ctl->scrambler_onoff && !sc->ictl.scrambler_onoff)
- lmc_ds3_set_scram (sc, LMC_CTL_ON);
- else if (!ctl->scrambler_onoff && sc->ictl.scrambler_onoff)
- lmc_ds3_set_scram (sc, LMC_CTL_OFF);
-
- lmc_set_protocol (sc, ctl);
-}
-
-static void
-lmc_ds3_init (lmc_softc_t * const sc)
-{
- int i;
-
- sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC5245;
-
- /* writes zeros everywhere */
- for (i = 0; i < 21; i++)
- {
- lmc_mii_writereg (sc, 0, 17, i);
- lmc_mii_writereg (sc, 0, 18, 0);
- }
-
- /* set some essential bits */
- lmc_mii_writereg (sc, 0, 17, 1);
- lmc_mii_writereg (sc, 0, 18, 0x25); /* ser, xtx */
-
- lmc_mii_writereg (sc, 0, 17, 5);
- lmc_mii_writereg (sc, 0, 18, 0x80); /* emode */
-
- lmc_mii_writereg (sc, 0, 17, 14);
- lmc_mii_writereg (sc, 0, 18, 0x30); /* rcgen, tcgen */
-
- /* clear counters and latched bits */
- for (i = 0; i < 21; i++)
- {
- lmc_mii_writereg (sc, 0, 17, i);
- lmc_mii_readreg (sc, 0, 18);
- }
-}
-
-/*
- * 1 == DS3 payload scrambled, 0 == not scrambled
- */
-static void
-lmc_ds3_set_scram (lmc_softc_t * const sc, int ie)
-{
- if (ie == LMC_CTL_ON)
- {
- sc->lmc_miireg16 |= LMC_MII16_DS3_SCRAM;
- sc->ictl.scrambler_onoff = LMC_CTL_ON;
- }
- else
- {
- sc->lmc_miireg16 &= ~LMC_MII16_DS3_SCRAM;
- sc->ictl.scrambler_onoff = LMC_CTL_OFF;
- }
- lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
-}
-
-/*
- * return hardware link status.
- * 0 == link is down, 1 == link is up.
- */
-static int
-lmc_ds3_get_link_status (lmc_softc_t * const sc)
-{
- u16 link_status, link_status_11;
- int ret = 1;
-
- lmc_mii_writereg (sc, 0, 17, 7);
- link_status = lmc_mii_readreg (sc, 0, 18);
-
- /* LMC5245 (DS3) & LMC1200 (DS1) LED definitions
- * led0 yellow = far-end adapter is in Red alarm condition
- * led1 blue = received an Alarm Indication signal
- * (upstream failure)
- * led2 Green = power to adapter, Gate Array loaded & driver
- * attached
- * led3 red = Loss of Signal (LOS) or out of frame (OOF)
- * conditions detected on T3 receive signal
- */
-
- lmc_led_on(sc, LMC_DS3_LED2);
-
- if ((link_status & LMC_FRAMER_REG0_DLOS) ||
- (link_status & LMC_FRAMER_REG0_OOFS)){
- ret = 0;
- if(sc->last_led_err[3] != 1){
- u16 r1;
- lmc_mii_writereg (sc, 0, 17, 01); /* Turn on Xbit error as our cisco does */
- r1 = lmc_mii_readreg (sc, 0, 18);
- r1 &= 0xfe;
- lmc_mii_writereg(sc, 0, 18, r1);
- printk(KERN_WARNING "%s: Red Alarm - Loss of Signal or Loss of Framing\n", sc->name);
- }
- lmc_led_on(sc, LMC_DS3_LED3); /* turn on red LED */
- sc->last_led_err[3] = 1;
- }
- else {
- lmc_led_off(sc, LMC_DS3_LED3); /* turn on red LED */
- if(sc->last_led_err[3] == 1){
- u16 r1;
- lmc_mii_writereg (sc, 0, 17, 01); /* Turn off Xbit error */
- r1 = lmc_mii_readreg (sc, 0, 18);
- r1 |= 0x01;
- lmc_mii_writereg(sc, 0, 18, r1);
- }
- sc->last_led_err[3] = 0;
- }
-
- lmc_mii_writereg(sc, 0, 17, 0x10);
- link_status_11 = lmc_mii_readreg(sc, 0, 18);
- if((link_status & LMC_FRAMER_REG0_AIS) ||
- (link_status_11 & LMC_FRAMER_REG10_XBIT)) {
- ret = 0;
- if(sc->last_led_err[0] != 1){
- printk(KERN_WARNING "%s: AIS Alarm or XBit Error\n", sc->name);
- printk(KERN_WARNING "%s: Remote end has loss of signal or framing\n", sc->name);
- }
- lmc_led_on(sc, LMC_DS3_LED0);
- sc->last_led_err[0] = 1;
- }
- else {
- lmc_led_off(sc, LMC_DS3_LED0);
- sc->last_led_err[0] = 0;
- }
-
- lmc_mii_writereg (sc, 0, 17, 9);
- link_status = lmc_mii_readreg (sc, 0, 18);
-
- if(link_status & LMC_FRAMER_REG9_RBLUE){
- ret = 0;
- if(sc->last_led_err[1] != 1){
- printk(KERN_WARNING "%s: Blue Alarm - Receiving all 1's\n", sc->name);
- }
- lmc_led_on(sc, LMC_DS3_LED1);
- sc->last_led_err[1] = 1;
- }
- else {
- lmc_led_off(sc, LMC_DS3_LED1);
- sc->last_led_err[1] = 0;
- }
-
- return ret;
-}
-
-/*
- * 0 == 16bit, 1 == 32bit
- */
-static void
-lmc_ds3_set_crc_length (lmc_softc_t * const sc, int state)
-{
- if (state == LMC_CTL_CRC_LENGTH_32)
- {
- /* 32 bit */
- sc->lmc_miireg16 |= LMC_MII16_DS3_CRC;
- sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_32;
- }
- else
- {
- /* 16 bit */
- sc->lmc_miireg16 &= ~LMC_MII16_DS3_CRC;
- sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_16;
- }
-
- lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
-}
-
-static void
-lmc_ds3_watchdog (lmc_softc_t * const sc)
-{
-
-}
-
-
-/*
- * SSI methods
- */
-
-static void lmc_ssi_init(lmc_softc_t * const sc)
-{
- u16 mii17;
- int cable;
-
- sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC1000;
-
- mii17 = lmc_mii_readreg(sc, 0, 17);
-
- cable = (mii17 & LMC_MII17_SSI_CABLE_MASK) >> LMC_MII17_SSI_CABLE_SHIFT;
- sc->ictl.cable_type = cable;
-
- lmc_gpio_mkoutput(sc, LMC_GEP_SSI_TXCLOCK);
-}
-
-static void
-lmc_ssi_default (lmc_softc_t * const sc)
-{
- sc->lmc_miireg16 = LMC_MII16_LED_ALL;
-
- /*
- * make TXCLOCK always be an output
- */
- lmc_gpio_mkoutput (sc, LMC_GEP_SSI_TXCLOCK);
-
- sc->lmc_media->set_link_status (sc, LMC_LINK_DOWN);
- sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_EXT);
- sc->lmc_media->set_speed (sc, NULL);
- sc->lmc_media->set_crc_length (sc, LMC_CTL_CRC_LENGTH_16);
-}
-
-/*
- * Given a user provided state, set ourselves up to match it. This will
- * always reset the card if needed.
- */
-static void
-lmc_ssi_set_status (lmc_softc_t * const sc, lmc_ctl_t * ctl)
-{
- if (ctl == NULL)
- {
- sc->lmc_media->set_clock_source (sc, sc->ictl.clock_source);
- sc->lmc_media->set_speed (sc, &sc->ictl);
- lmc_set_protocol (sc, NULL);
-
- return;
- }
-
- /*
- * check for change in clock source
- */
- if (ctl->clock_source == LMC_CTL_CLOCK_SOURCE_INT
- && sc->ictl.clock_source == LMC_CTL_CLOCK_SOURCE_EXT)
- {
- sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_INT);
- sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_INT;
- }
- else if (ctl->clock_source == LMC_CTL_CLOCK_SOURCE_EXT
- && sc->ictl.clock_source == LMC_CTL_CLOCK_SOURCE_INT)
- {
- sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_EXT);
- sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_EXT;
- }
-
- if (ctl->clock_rate != sc->ictl.clock_rate)
- sc->lmc_media->set_speed (sc, ctl);
-
- lmc_set_protocol (sc, ctl);
-}
-
-/*
- * 1 == internal, 0 == external
- */
-static void
-lmc_ssi_set_clock (lmc_softc_t * const sc, int ie)
-{
- int old;
- old = ie;
- if (ie == LMC_CTL_CLOCK_SOURCE_EXT)
- {
- sc->lmc_gpio &= ~(LMC_GEP_SSI_TXCLOCK);
- LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
- sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_EXT;
- if(ie != old)
- printk (LMC_PRINTF_FMT ": clock external\n", LMC_PRINTF_ARGS);
- }
- else
- {
- sc->lmc_gpio |= LMC_GEP_SSI_TXCLOCK;
- LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
- sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_INT;
- if(ie != old)
- printk (LMC_PRINTF_FMT ": clock internal\n", LMC_PRINTF_ARGS);
- }
-}
-
-static void
-lmc_ssi_set_speed (lmc_softc_t * const sc, lmc_ctl_t * ctl)
-{
- lmc_ctl_t *ictl = &sc->ictl;
- lmc_av9110_t *av;
-
- /* original settings for clock rate of:
- * 100 Khz (8,25,0,0,2) were incorrect
- * they should have been 80,125,1,3,3
- * There are 17 param combinations to produce this freq.
- * For 1.5 Mhz use 120,100,1,1,2 (226 param. combinations)
- */
- if (ctl == NULL)
- {
- av = &ictl->cardspec.ssi;
- ictl->clock_rate = 1500000;
- av->f = ictl->clock_rate;
- av->n = 120;
- av->m = 100;
- av->v = 1;
- av->x = 1;
- av->r = 2;
-
- write_av9110 (sc, av->n, av->m, av->v, av->x, av->r);
- return;
- }
-
- av = &ctl->cardspec.ssi;
-
- if (av->f == 0)
- return;
-
- ictl->clock_rate = av->f; /* really, this is the rate we are */
- ictl->cardspec.ssi = *av;
-
- write_av9110 (sc, av->n, av->m, av->v, av->x, av->r);
-}
-
-/*
- * return hardware link status.
- * 0 == link is down, 1 == link is up.
- */
-static int
-lmc_ssi_get_link_status (lmc_softc_t * const sc)
-{
- u16 link_status;
- u32 ticks;
- int ret = 1;
- int hw_hdsk = 1;
-
- /*
- * missing CTS? Hmm. If we require CTS on, we may never get the
- * link to come up, so omit it in this test.
- *
- * Also, it seems that with a loopback cable, DCD isn't asserted,
- * so just check for things like this:
- * DSR _must_ be asserted.
- * One of DCD or CTS must be asserted.
- */
-
- /* LMC 1000 (SSI) LED definitions
- * led0 Green = power to adapter, Gate Array loaded &
- * driver attached
- * led1 Green = DSR and DTR and RTS and CTS are set
- * led2 Green = Cable detected
- * led3 red = No timing is available from the
- * cable or the on-board frequency
- * generator.
- */
-
- link_status = lmc_mii_readreg (sc, 0, 16);
-
- /* Is the transmit clock still available */
- ticks = LMC_CSR_READ (sc, csr_gp_timer);
- ticks = 0x0000ffff - (ticks & 0x0000ffff);
-
- lmc_led_on (sc, LMC_MII16_LED0);
-
- /* ====== transmit clock determination ===== */
- if (sc->lmc_timing == LMC_CTL_CLOCK_SOURCE_INT) {
- lmc_led_off(sc, LMC_MII16_LED3);
- }
- else if (ticks == 0 ) { /* no clock found ? */
- ret = 0;
- if (sc->last_led_err[3] != 1) {
- sc->extra_stats.tx_lossOfClockCnt++;
- printk(KERN_WARNING "%s: Lost Clock, Link Down\n", sc->name);
- }
- sc->last_led_err[3] = 1;
- lmc_led_on (sc, LMC_MII16_LED3); /* turn ON red LED */
- }
- else {
- if(sc->last_led_err[3] == 1)
- printk(KERN_WARNING "%s: Clock Returned\n", sc->name);
- sc->last_led_err[3] = 0;
- lmc_led_off (sc, LMC_MII16_LED3); /* turn OFF red LED */
- }
-
- if ((link_status & LMC_MII16_SSI_DSR) == 0) { /* Also HSSI CA */
- ret = 0;
- hw_hdsk = 0;
- }
-
-#ifdef CONFIG_LMC_IGNORE_HARDWARE_HANDSHAKE
- if ((link_status & (LMC_MII16_SSI_CTS | LMC_MII16_SSI_DCD)) == 0){
- ret = 0;
- hw_hdsk = 0;
- }
-#endif
-
- if(hw_hdsk == 0){
- if(sc->last_led_err[1] != 1)
- printk(KERN_WARNING "%s: DSR not asserted\n", sc->name);
- sc->last_led_err[1] = 1;
- lmc_led_off(sc, LMC_MII16_LED1);
- }
- else {
- if(sc->last_led_err[1] != 0)
- printk(KERN_WARNING "%s: DSR now asserted\n", sc->name);
- sc->last_led_err[1] = 0;
- lmc_led_on(sc, LMC_MII16_LED1);
- }
-
- if(ret == 1) {
- lmc_led_on(sc, LMC_MII16_LED2); /* Over all good status? */
- }
-
- return ret;
-}
-
-static void
-lmc_ssi_set_link_status (lmc_softc_t * const sc, int state)
-{
- if (state == LMC_LINK_UP)
- {
- sc->lmc_miireg16 |= (LMC_MII16_SSI_DTR | LMC_MII16_SSI_RTS);
- printk (LMC_PRINTF_FMT ": asserting DTR and RTS\n", LMC_PRINTF_ARGS);
- }
- else
- {
- sc->lmc_miireg16 &= ~(LMC_MII16_SSI_DTR | LMC_MII16_SSI_RTS);
- printk (LMC_PRINTF_FMT ": deasserting DTR and RTS\n", LMC_PRINTF_ARGS);
- }
-
- lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
-
-}
-
-/*
- * 0 == 16bit, 1 == 32bit
- */
-static void
-lmc_ssi_set_crc_length (lmc_softc_t * const sc, int state)
-{
- if (state == LMC_CTL_CRC_LENGTH_32)
- {
- /* 32 bit */
- sc->lmc_miireg16 |= LMC_MII16_SSI_CRC;
- sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_32;
- sc->lmc_crcSize = LMC_CTL_CRC_BYTESIZE_4;
-
- }
- else
- {
- /* 16 bit */
- sc->lmc_miireg16 &= ~LMC_MII16_SSI_CRC;
- sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_16;
- sc->lmc_crcSize = LMC_CTL_CRC_BYTESIZE_2;
- }
-
- lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
-}
-
-/*
- * These are bits to program the ssi frequency generator
- */
-static inline void
-write_av9110_bit (lmc_softc_t * sc, int c)
-{
- /*
- * set the data bit as we need it.
- */
- sc->lmc_gpio &= ~(LMC_GEP_CLK);
- if (c & 0x01)
- sc->lmc_gpio |= LMC_GEP_DATA;
- else
- sc->lmc_gpio &= ~(LMC_GEP_DATA);
- LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
-
- /*
- * set the clock to high
- */
- sc->lmc_gpio |= LMC_GEP_CLK;
- LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
-
- /*
- * set the clock to low again.
- */
- sc->lmc_gpio &= ~(LMC_GEP_CLK);
- LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
-}
-
-static void write_av9110(lmc_softc_t *sc, u32 n, u32 m, u32 v, u32 x, u32 r)
-{
- int i;
-
-#if 0
- printk (LMC_PRINTF_FMT ": speed %u, %d %d %d %d %d\n",
- LMC_PRINTF_ARGS, sc->ictl.clock_rate, n, m, v, x, r);
-#endif
-
- sc->lmc_gpio |= LMC_GEP_SSI_GENERATOR;
- sc->lmc_gpio &= ~(LMC_GEP_DATA | LMC_GEP_CLK);
- LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
-
- /*
- * Set the TXCLOCK, GENERATOR, SERIAL, and SERIALCLK
- * as outputs.
- */
- lmc_gpio_mkoutput (sc, (LMC_GEP_DATA | LMC_GEP_CLK
- | LMC_GEP_SSI_GENERATOR));
-
- sc->lmc_gpio &= ~(LMC_GEP_SSI_GENERATOR);
- LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
-
- /*
- * a shifting we will go...
- */
- for (i = 0; i < 7; i++)
- write_av9110_bit (sc, n >> i);
- for (i = 0; i < 7; i++)
- write_av9110_bit (sc, m >> i);
- for (i = 0; i < 1; i++)
- write_av9110_bit (sc, v >> i);
- for (i = 0; i < 2; i++)
- write_av9110_bit (sc, x >> i);
- for (i = 0; i < 2; i++)
- write_av9110_bit (sc, r >> i);
- for (i = 0; i < 5; i++)
- write_av9110_bit (sc, 0x17 >> i);
-
- /*
- * stop driving serial-related signals
- */
- lmc_gpio_mkinput (sc,
- (LMC_GEP_DATA | LMC_GEP_CLK
- | LMC_GEP_SSI_GENERATOR));
-}
-
-static void lmc_ssi_watchdog(lmc_softc_t * const sc)
-{
- u16 mii17 = lmc_mii_readreg(sc, 0, 17);
- if (((mii17 >> 3) & 7) == 7)
- lmc_led_off(sc, LMC_MII16_LED2);
- else
- lmc_led_on(sc, LMC_MII16_LED2);
-}
-
-/*
- * T1 methods
- */
-
-/*
- * The framer regs are multiplexed through MII regs 17 & 18
- * write the register address to MII reg 17 and the * data to MII reg 18. */
-static void
-lmc_t1_write (lmc_softc_t * const sc, int a, int d)
-{
- lmc_mii_writereg (sc, 0, 17, a);
- lmc_mii_writereg (sc, 0, 18, d);
-}
-
-/* Save a warning
-static int
-lmc_t1_read (lmc_softc_t * const sc, int a)
-{
- lmc_mii_writereg (sc, 0, 17, a);
- return lmc_mii_readreg (sc, 0, 18);
-}
-*/
-
-
-static void
-lmc_t1_init (lmc_softc_t * const sc)
-{
- u16 mii16;
- int i;
-
- sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC1200;
- mii16 = lmc_mii_readreg (sc, 0, 16);
-
- /* reset 8370 */
- mii16 &= ~LMC_MII16_T1_RST;
- lmc_mii_writereg (sc, 0, 16, mii16 | LMC_MII16_T1_RST);
- lmc_mii_writereg (sc, 0, 16, mii16);
-
- /* set T1 or E1 line. Uses sc->lmcmii16 reg in function so update it */
- sc->lmc_miireg16 = mii16;
- lmc_t1_set_circuit_type(sc, LMC_CTL_CIRCUIT_TYPE_T1);
- mii16 = sc->lmc_miireg16;
-
- lmc_t1_write (sc, 0x01, 0x1B); /* CR0 - primary control */
- lmc_t1_write (sc, 0x02, 0x42); /* JAT_CR - jitter atten config */
- lmc_t1_write (sc, 0x14, 0x00); /* LOOP - loopback config */
- lmc_t1_write (sc, 0x15, 0x00); /* DL3_TS - external data link timeslot */
- lmc_t1_write (sc, 0x18, 0xFF); /* PIO - programmable I/O */
- lmc_t1_write (sc, 0x19, 0x30); /* POE - programmable OE */
- lmc_t1_write (sc, 0x1A, 0x0F); /* CMUX - clock input mux */
- lmc_t1_write (sc, 0x20, 0x41); /* LIU_CR - RX LIU config */
- lmc_t1_write (sc, 0x22, 0x76); /* RLIU_CR - RX LIU config */
- lmc_t1_write (sc, 0x40, 0x03); /* RCR0 - RX config */
- lmc_t1_write (sc, 0x45, 0x00); /* RALM - RX alarm config */
- lmc_t1_write (sc, 0x46, 0x05); /* LATCH - RX alarm/err/cntr latch */
- lmc_t1_write (sc, 0x68, 0x40); /* TLIU_CR - TX LIU config */
- lmc_t1_write (sc, 0x70, 0x0D); /* TCR0 - TX framer config */
- lmc_t1_write (sc, 0x71, 0x05); /* TCR1 - TX config */
- lmc_t1_write (sc, 0x72, 0x0B); /* TFRM - TX frame format */
- lmc_t1_write (sc, 0x73, 0x00); /* TERROR - TX error insert */
- lmc_t1_write (sc, 0x74, 0x00); /* TMAN - TX manual Sa/FEBE config */
- lmc_t1_write (sc, 0x75, 0x00); /* TALM - TX alarm signal config */
- lmc_t1_write (sc, 0x76, 0x00); /* TPATT - TX test pattern config */
- lmc_t1_write (sc, 0x77, 0x00); /* TLB - TX inband loopback config */
- lmc_t1_write (sc, 0x90, 0x05); /* CLAD_CR - clock rate adapter config */
- lmc_t1_write (sc, 0x91, 0x05); /* CSEL - clad freq sel */
- lmc_t1_write (sc, 0xA6, 0x00); /* DL1_CTL - DL1 control */
- lmc_t1_write (sc, 0xB1, 0x00); /* DL2_CTL - DL2 control */
- lmc_t1_write (sc, 0xD0, 0x47); /* SBI_CR - sys bus iface config */
- lmc_t1_write (sc, 0xD1, 0x70); /* RSB_CR - RX sys bus config */
- lmc_t1_write (sc, 0xD4, 0x30); /* TSB_CR - TX sys bus config */
- for (i = 0; i < 32; i++)
- {
- lmc_t1_write (sc, 0x0E0 + i, 0x00); /* SBCn - sys bus per-channel ctl */
- lmc_t1_write (sc, 0x100 + i, 0x00); /* TPCn - TX per-channel ctl */
- lmc_t1_write (sc, 0x180 + i, 0x00); /* RPCn - RX per-channel ctl */
- }
- for (i = 1; i < 25; i++)
- {
- lmc_t1_write (sc, 0x0E0 + i, 0x0D); /* SBCn - sys bus per-channel ctl */
- }
-
- mii16 |= LMC_MII16_T1_XOE;
- lmc_mii_writereg (sc, 0, 16, mii16);
- sc->lmc_miireg16 = mii16;
-}
-
-static void
-lmc_t1_default (lmc_softc_t * const sc)
-{
- sc->lmc_miireg16 = LMC_MII16_LED_ALL;
- sc->lmc_media->set_link_status (sc, LMC_LINK_DOWN);
- sc->lmc_media->set_circuit_type (sc, LMC_CTL_CIRCUIT_TYPE_T1);
- sc->lmc_media->set_crc_length (sc, LMC_CTL_CRC_LENGTH_16);
- /* Right now we can only clock from out internal source */
- sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_INT;
-}
-/* * Given a user provided state, set ourselves up to match it. This will * always reset the card if needed.
- */
-static void
-lmc_t1_set_status (lmc_softc_t * const sc, lmc_ctl_t * ctl)
-{
- if (ctl == NULL)
- {
- sc->lmc_media->set_circuit_type (sc, sc->ictl.circuit_type);
- lmc_set_protocol (sc, NULL);
-
- return;
- }
- /*
- * check for change in circuit type */
- if (ctl->circuit_type == LMC_CTL_CIRCUIT_TYPE_T1
- && sc->ictl.circuit_type ==
- LMC_CTL_CIRCUIT_TYPE_E1) sc->lmc_media->set_circuit_type (sc,
- LMC_CTL_CIRCUIT_TYPE_E1);
- else if (ctl->circuit_type == LMC_CTL_CIRCUIT_TYPE_E1
- && sc->ictl.circuit_type == LMC_CTL_CIRCUIT_TYPE_T1)
- sc->lmc_media->set_circuit_type (sc, LMC_CTL_CIRCUIT_TYPE_T1);
- lmc_set_protocol (sc, ctl);
-}
-/*
- * return hardware link status.
- * 0 == link is down, 1 == link is up.
- */ static int
-lmc_t1_get_link_status (lmc_softc_t * const sc)
-{
- u16 link_status;
- int ret = 1;
-
- /* LMC5245 (DS3) & LMC1200 (DS1) LED definitions
- * led0 yellow = far-end adapter is in Red alarm condition
- * led1 blue = received an Alarm Indication signal
- * (upstream failure)
- * led2 Green = power to adapter, Gate Array loaded & driver
- * attached
- * led3 red = Loss of Signal (LOS) or out of frame (OOF)
- * conditions detected on T3 receive signal
- */
- lmc_led_on(sc, LMC_DS3_LED2);
-
- lmc_mii_writereg (sc, 0, 17, T1FRAMER_ALARM1_STATUS);
- link_status = lmc_mii_readreg (sc, 0, 18);
-
-
- if (link_status & T1F_RAIS) { /* turn on blue LED */
- ret = 0;
- if(sc->last_led_err[1] != 1){
- printk(KERN_WARNING "%s: Receive AIS/Blue Alarm. Far end in RED alarm\n", sc->name);
- }
- lmc_led_on(sc, LMC_DS3_LED1);
- sc->last_led_err[1] = 1;
- }
- else {
- if(sc->last_led_err[1] != 0){
- printk(KERN_WARNING "%s: End AIS/Blue Alarm\n", sc->name);
- }
- lmc_led_off (sc, LMC_DS3_LED1);
- sc->last_led_err[1] = 0;
- }
-
- /*
- * Yellow Alarm is nasty evil stuff, looks at data patterns
- * inside the channel and confuses it with HDLC framing
- * ignore all yellow alarms.
- *
- * Do listen to MultiFrame Yellow alarm which while implemented
- * different ways isn't in the channel and hence somewhat
- * more reliable
- */
-
- if (link_status & T1F_RMYEL) {
- ret = 0;
- if(sc->last_led_err[0] != 1){
- printk(KERN_WARNING "%s: Receive Yellow AIS Alarm\n", sc->name);
- }
- lmc_led_on(sc, LMC_DS3_LED0);
- sc->last_led_err[0] = 1;
- }
- else {
- if(sc->last_led_err[0] != 0){
- printk(KERN_WARNING "%s: End of Yellow AIS Alarm\n", sc->name);
- }
- lmc_led_off(sc, LMC_DS3_LED0);
- sc->last_led_err[0] = 0;
- }
-
- /*
- * Loss of signal and los of frame
- * Use the green bit to identify which one lit the led
- */
- if(link_status & T1F_RLOF){
- ret = 0;
- if(sc->last_led_err[3] != 1){
- printk(KERN_WARNING "%s: Local Red Alarm: Loss of Framing\n", sc->name);
- }
- lmc_led_on(sc, LMC_DS3_LED3);
- sc->last_led_err[3] = 1;
-
- }
- else {
- if(sc->last_led_err[3] != 0){
- printk(KERN_WARNING "%s: End Red Alarm (LOF)\n", sc->name);
- }
- if( ! (link_status & T1F_RLOS))
- lmc_led_off(sc, LMC_DS3_LED3);
- sc->last_led_err[3] = 0;
- }
-
- if(link_status & T1F_RLOS){
- ret = 0;
- if(sc->last_led_err[2] != 1){
- printk(KERN_WARNING "%s: Local Red Alarm: Loss of Signal\n", sc->name);
- }
- lmc_led_on(sc, LMC_DS3_LED3);
- sc->last_led_err[2] = 1;
-
- }
- else {
- if(sc->last_led_err[2] != 0){
- printk(KERN_WARNING "%s: End Red Alarm (LOS)\n", sc->name);
- }
- if( ! (link_status & T1F_RLOF))
- lmc_led_off(sc, LMC_DS3_LED3);
- sc->last_led_err[2] = 0;
- }
-
- sc->lmc_xinfo.t1_alarm1_status = link_status;
-
- lmc_mii_writereg (sc, 0, 17, T1FRAMER_ALARM2_STATUS);
- sc->lmc_xinfo.t1_alarm2_status = lmc_mii_readreg (sc, 0, 18);
-
- return ret;
-}
-
-/*
- * 1 == T1 Circuit Type , 0 == E1 Circuit Type
- */
-static void
-lmc_t1_set_circuit_type (lmc_softc_t * const sc, int ie)
-{
- if (ie == LMC_CTL_CIRCUIT_TYPE_T1) {
- sc->lmc_miireg16 |= LMC_MII16_T1_Z;
- sc->ictl.circuit_type = LMC_CTL_CIRCUIT_TYPE_T1;
- printk(KERN_INFO "%s: In T1 Mode\n", sc->name);
- }
- else {
- sc->lmc_miireg16 &= ~LMC_MII16_T1_Z;
- sc->ictl.circuit_type = LMC_CTL_CIRCUIT_TYPE_E1;
- printk(KERN_INFO "%s: In E1 Mode\n", sc->name);
- }
-
- lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
-
-}
-
-/*
- * 0 == 16bit, 1 == 32bit */
-static void
-lmc_t1_set_crc_length (lmc_softc_t * const sc, int state)
-{
- if (state == LMC_CTL_CRC_LENGTH_32)
- {
- /* 32 bit */
- sc->lmc_miireg16 |= LMC_MII16_T1_CRC;
- sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_32;
- sc->lmc_crcSize = LMC_CTL_CRC_BYTESIZE_4;
-
- }
- else
- {
- /* 16 bit */ sc->lmc_miireg16 &= ~LMC_MII16_T1_CRC;
- sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_16;
- sc->lmc_crcSize = LMC_CTL_CRC_BYTESIZE_2;
-
- }
-
- lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
-}
-
-/*
- * 1 == internal, 0 == external
- */
-static void
-lmc_t1_set_clock (lmc_softc_t * const sc, int ie)
-{
- int old;
- old = ie;
- if (ie == LMC_CTL_CLOCK_SOURCE_EXT)
- {
- sc->lmc_gpio &= ~(LMC_GEP_SSI_TXCLOCK);
- LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
- sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_EXT;
- if(old != ie)
- printk (LMC_PRINTF_FMT ": clock external\n", LMC_PRINTF_ARGS);
- }
- else
- {
- sc->lmc_gpio |= LMC_GEP_SSI_TXCLOCK;
- LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
- sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_INT;
- if(old != ie)
- printk (LMC_PRINTF_FMT ": clock internal\n", LMC_PRINTF_ARGS);
- }
-}
-
-static void
-lmc_t1_watchdog (lmc_softc_t * const sc)
-{
-}
-
-static void
-lmc_set_protocol (lmc_softc_t * const sc, lmc_ctl_t * ctl)
-{
- if (!ctl)
- sc->ictl.keepalive_onoff = LMC_CTL_ON;
-}
diff --git a/drivers/net/wan/lmc/lmc_proto.c b/drivers/net/wan/lmc/lmc_proto.c
deleted file mode 100644
index e5487616a816..000000000000
--- a/drivers/net/wan/lmc/lmc_proto.c
+++ /dev/null
@@ -1,106 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
- /*
- * Copyright (c) 1997-2000 LAN Media Corporation (LMC)
- * All rights reserved. www.lanmedia.com
- *
- * This code is written by:
- * Andrew Stanley-Jones (asj@cban.com)
- * Rob Braun (bbraun@vix.com),
- * Michael Graff (explorer@vix.com) and
- * Matt Thomas (matt@3am-software.com).
- *
- * With Help By:
- * David Boggs
- * Ron Crane
- * Allan Cox
- *
- * Driver for the LanMedia LMC5200, LMC5245, LMC1000, LMC1200 cards.
- */
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/ptrace.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/in.h>
-#include <linux/if_arp.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/inet.h>
-#include <linux/workqueue.h>
-#include <linux/proc_fs.h>
-#include <linux/bitops.h>
-#include <asm/processor.h> /* Processor type for cache alignment. */
-#include <asm/io.h>
-#include <asm/dma.h>
-#include <linux/smp.h>
-
-#include "lmc.h"
-#include "lmc_var.h"
-#include "lmc_debug.h"
-#include "lmc_ioctl.h"
-#include "lmc_proto.h"
-
-// attach
-void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/
-{
- if (sc->if_type == LMC_NET) {
- struct net_device *dev = sc->lmc_device;
- /*
- * They set a few basics because they don't use HDLC
- */
- dev->flags |= IFF_POINTOPOINT;
- dev->hard_header_len = 0;
- dev->addr_len = 0;
- }
-}
-
-int lmc_proto_open(lmc_softc_t *sc)
-{
- int ret = 0;
-
- if (sc->if_type == LMC_PPP) {
- ret = hdlc_open(sc->lmc_device);
- if (ret < 0)
- printk(KERN_WARNING "%s: HDLC open failed: %d\n",
- sc->name, ret);
- }
- return ret;
-}
-
-void lmc_proto_close(lmc_softc_t *sc)
-{
- if (sc->if_type == LMC_PPP)
- hdlc_close(sc->lmc_device);
-}
-
-__be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/
-{
- switch(sc->if_type){
- case LMC_PPP:
- return hdlc_type_trans(skb, sc->lmc_device);
- case LMC_NET:
- return htons(ETH_P_802_2);
- case LMC_RAW: /* Packet type for skbuff kind of useless */
- return htons(ETH_P_802_2);
- default:
- printk(KERN_WARNING "%s: No protocol set for this interface, assuming 802.2 (which is wrong!!)\n", sc->name);
- return htons(ETH_P_802_2);
- }
-}
-
-void lmc_proto_netif(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/
-{
- switch(sc->if_type){
- case LMC_PPP:
- case LMC_NET:
- default:
- netif_rx(skb);
- break;
- case LMC_RAW:
- break;
- }
-}
diff --git a/drivers/net/wan/lmc/lmc_proto.h b/drivers/net/wan/lmc/lmc_proto.h
deleted file mode 100644
index e56e7072de44..000000000000
--- a/drivers/net/wan/lmc/lmc_proto.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LMC_PROTO_H_
-#define _LMC_PROTO_H_
-
-#include <linux/hdlc.h>
-
-void lmc_proto_attach(lmc_softc_t *sc);
-int lmc_proto_open(lmc_softc_t *sc);
-void lmc_proto_close(lmc_softc_t *sc);
-__be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb);
-void lmc_proto_netif(lmc_softc_t *sc, struct sk_buff *skb);
-
-static inline lmc_softc_t* dev_to_sc(struct net_device *dev)
-{
- return (lmc_softc_t *)dev_to_hdlc(dev)->priv;
-}
-
-#endif
diff --git a/drivers/net/wan/lmc/lmc_var.h b/drivers/net/wan/lmc/lmc_var.h
deleted file mode 100644
index 99f0aa787a35..000000000000
--- a/drivers/net/wan/lmc/lmc_var.h
+++ /dev/null
@@ -1,468 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-#ifndef _LMC_VAR_H_
-#define _LMC_VAR_H_
-
- /*
- * Copyright (c) 1997-2000 LAN Media Corporation (LMC)
- * All rights reserved. www.lanmedia.com
- *
- * This code is written by:
- * Andrew Stanley-Jones (asj@cban.com)
- * Rob Braun (bbraun@vix.com),
- * Michael Graff (explorer@vix.com) and
- * Matt Thomas (matt@3am-software.com).
- */
-
-#include <linux/timer.h>
-
-/*
- * basic definitions used in lmc include files
- */
-
-typedef struct lmc___softc lmc_softc_t;
-typedef struct lmc___media lmc_media_t;
-typedef struct lmc___ctl lmc_ctl_t;
-
-#define lmc_csrptr_t unsigned long
-
-#define LMC_REG_RANGE 0x80
-
-#define LMC_PRINTF_FMT "%s"
-#define LMC_PRINTF_ARGS (sc->lmc_device->name)
-
-#define TX_TIMEOUT (2*HZ)
-
-#define LMC_TXDESCS 32
-#define LMC_RXDESCS 32
-
-#define LMC_LINK_UP 1
-#define LMC_LINK_DOWN 0
-
-/* These macros for generic read and write to and from the dec chip */
-#define LMC_CSR_READ(sc, csr) \
- inl((sc)->lmc_csrs.csr)
-#define LMC_CSR_WRITE(sc, reg, val) \
- outl((val), (sc)->lmc_csrs.reg)
-
-//#ifdef _LINUX_DELAY_H
-// #define SLOW_DOWN_IO udelay(2);
-// #undef __SLOW_DOWN_IO
-// #define __SLOW_DOWN_IO udelay(2);
-//#endif
-
-#define DELAY(n) SLOW_DOWN_IO
-
-#define lmc_delay() inl(sc->lmc_csrs.csr_9)
-
-/* This macro sync's up with the mii so that reads and writes can take place */
-#define LMC_MII_SYNC(sc) do {int n=32; while( n >= 0 ) { \
- LMC_CSR_WRITE((sc), csr_9, 0x20000); \
- lmc_delay(); \
- LMC_CSR_WRITE((sc), csr_9, 0x30000); \
- lmc_delay(); \
- n--; }} while(0)
-
-struct lmc_regfile_t {
- lmc_csrptr_t csr_busmode; /* CSR0 */
- lmc_csrptr_t csr_txpoll; /* CSR1 */
- lmc_csrptr_t csr_rxpoll; /* CSR2 */
- lmc_csrptr_t csr_rxlist; /* CSR3 */
- lmc_csrptr_t csr_txlist; /* CSR4 */
- lmc_csrptr_t csr_status; /* CSR5 */
- lmc_csrptr_t csr_command; /* CSR6 */
- lmc_csrptr_t csr_intr; /* CSR7 */
- lmc_csrptr_t csr_missed_frames; /* CSR8 */
- lmc_csrptr_t csr_9; /* CSR9 */
- lmc_csrptr_t csr_10; /* CSR10 */
- lmc_csrptr_t csr_11; /* CSR11 */
- lmc_csrptr_t csr_12; /* CSR12 */
- lmc_csrptr_t csr_13; /* CSR13 */
- lmc_csrptr_t csr_14; /* CSR14 */
- lmc_csrptr_t csr_15; /* CSR15 */
-};
-
-#define csr_enetrom csr_9 /* 21040 */
-#define csr_reserved csr_10 /* 21040 */
-#define csr_full_duplex csr_11 /* 21040 */
-#define csr_bootrom csr_10 /* 21041/21140A/?? */
-#define csr_gp csr_12 /* 21140* */
-#define csr_watchdog csr_15 /* 21140* */
-#define csr_gp_timer csr_11 /* 21041/21140* */
-#define csr_srom_mii csr_9 /* 21041/21140* */
-#define csr_sia_status csr_12 /* 2104x */
-#define csr_sia_connectivity csr_13 /* 2104x */
-#define csr_sia_tx_rx csr_14 /* 2104x */
-#define csr_sia_general csr_15 /* 2104x */
-
-/* tulip length/control transmit descriptor definitions
- * used to define bits in the second tulip_desc_t field (length)
- * for the transmit descriptor -baz */
-
-#define LMC_TDES_FIRST_BUFFER_SIZE ((u32)(0x000007FF))
-#define LMC_TDES_SECOND_BUFFER_SIZE ((u32)(0x003FF800))
-#define LMC_TDES_HASH_FILTERING ((u32)(0x00400000))
-#define LMC_TDES_DISABLE_PADDING ((u32)(0x00800000))
-#define LMC_TDES_SECOND_ADDR_CHAINED ((u32)(0x01000000))
-#define LMC_TDES_END_OF_RING ((u32)(0x02000000))
-#define LMC_TDES_ADD_CRC_DISABLE ((u32)(0x04000000))
-#define LMC_TDES_SETUP_PACKET ((u32)(0x08000000))
-#define LMC_TDES_INVERSE_FILTERING ((u32)(0x10000000))
-#define LMC_TDES_FIRST_SEGMENT ((u32)(0x20000000))
-#define LMC_TDES_LAST_SEGMENT ((u32)(0x40000000))
-#define LMC_TDES_INTERRUPT_ON_COMPLETION ((u32)(0x80000000))
-
-#define TDES_SECOND_BUFFER_SIZE_BIT_NUMBER 11
-#define TDES_COLLISION_COUNT_BIT_NUMBER 3
-
-/* Constants for the RCV descriptor RDES */
-
-#define LMC_RDES_OVERFLOW ((u32)(0x00000001))
-#define LMC_RDES_CRC_ERROR ((u32)(0x00000002))
-#define LMC_RDES_DRIBBLING_BIT ((u32)(0x00000004))
-#define LMC_RDES_REPORT_ON_MII_ERR ((u32)(0x00000008))
-#define LMC_RDES_RCV_WATCHDOG_TIMEOUT ((u32)(0x00000010))
-#define LMC_RDES_FRAME_TYPE ((u32)(0x00000020))
-#define LMC_RDES_COLLISION_SEEN ((u32)(0x00000040))
-#define LMC_RDES_FRAME_TOO_LONG ((u32)(0x00000080))
-#define LMC_RDES_LAST_DESCRIPTOR ((u32)(0x00000100))
-#define LMC_RDES_FIRST_DESCRIPTOR ((u32)(0x00000200))
-#define LMC_RDES_MULTICAST_FRAME ((u32)(0x00000400))
-#define LMC_RDES_RUNT_FRAME ((u32)(0x00000800))
-#define LMC_RDES_DATA_TYPE ((u32)(0x00003000))
-#define LMC_RDES_LENGTH_ERROR ((u32)(0x00004000))
-#define LMC_RDES_ERROR_SUMMARY ((u32)(0x00008000))
-#define LMC_RDES_FRAME_LENGTH ((u32)(0x3FFF0000))
-#define LMC_RDES_OWN_BIT ((u32)(0x80000000))
-
-#define RDES_FRAME_LENGTH_BIT_NUMBER 16
-
-#define LMC_RDES_ERROR_MASK ( (u32)( \
- LMC_RDES_OVERFLOW \
- | LMC_RDES_DRIBBLING_BIT \
- | LMC_RDES_REPORT_ON_MII_ERR \
- | LMC_RDES_COLLISION_SEEN ) )
-
-
-/*
- * Ioctl info
- */
-
-typedef struct {
- u32 n;
- u32 m;
- u32 v;
- u32 x;
- u32 r;
- u32 f;
- u32 exact;
-} lmc_av9110_t;
-
-/*
- * Common structure passed to the ioctl code.
- */
-struct lmc___ctl {
- u32 cardtype;
- u32 clock_source; /* HSSI, T1 */
- u32 clock_rate; /* T1 */
- u32 crc_length;
- u32 cable_length; /* DS3 */
- u32 scrambler_onoff; /* DS3 */
- u32 cable_type; /* T1 */
- u32 keepalive_onoff; /* protocol */
- u32 ticks; /* ticks/sec */
- union {
- lmc_av9110_t ssi;
- } cardspec;
- u32 circuit_type; /* T1 or E1 */
-};
-
-
-/*
- * Careful, look at the data sheet, there's more to this
- * structure than meets the eye. It should probably be:
- *
- * struct tulip_desc_t {
- * u8 own:1;
- * u32 status:31;
- * u32 control:10;
- * u32 buffer1;
- * u32 buffer2;
- * };
- * You could also expand status control to provide more bit information
- */
-
-struct tulip_desc_t {
- s32 status;
- s32 length;
- u32 buffer1;
- u32 buffer2;
-};
-
-/*
- * media independent methods to check on media status, link, light LEDs,
- * etc.
- */
-struct lmc___media {
- void (* init)(lmc_softc_t * const);
- void (* defaults)(lmc_softc_t * const);
- void (* set_status)(lmc_softc_t * const, lmc_ctl_t *);
- void (* set_clock_source)(lmc_softc_t * const, int);
- void (* set_speed)(lmc_softc_t * const, lmc_ctl_t *);
- void (* set_cable_length)(lmc_softc_t * const, int);
- void (* set_scrambler)(lmc_softc_t * const, int);
- int (* get_link_status)(lmc_softc_t * const);
- void (* set_link_status)(lmc_softc_t * const, int);
- void (* set_crc_length)(lmc_softc_t * const, int);
- void (* set_circuit_type)(lmc_softc_t * const, int);
- void (* watchdog)(lmc_softc_t * const);
-};
-
-
-#define STATCHECK 0xBEEFCAFE
-
-struct lmc_extra_statistics
-{
- u32 version_size;
- u32 lmc_cardtype;
-
- u32 tx_ProcTimeout;
- u32 tx_IntTimeout;
- u32 tx_NoCompleteCnt;
- u32 tx_MaxXmtsB4Int;
- u32 tx_TimeoutCnt;
- u32 tx_OutOfSyncPtr;
- u32 tx_tbusy0;
- u32 tx_tbusy1;
- u32 tx_tbusy_calls;
- u32 resetCount;
- u32 lmc_txfull;
- u32 tbusy;
- u32 dirtyTx;
- u32 lmc_next_tx;
- u32 otherTypeCnt;
- u32 lastType;
- u32 lastTypeOK;
- u32 txLoopCnt;
- u32 usedXmtDescripCnt;
- u32 txIndexCnt;
- u32 rxIntLoopCnt;
-
- u32 rx_SmallPktCnt;
- u32 rx_BadPktSurgeCnt;
- u32 rx_BuffAllocErr;
- u32 tx_lossOfClockCnt;
-
- /* T1 error counters */
- u32 framingBitErrorCount;
- u32 lineCodeViolationCount;
-
- u32 lossOfFrameCount;
- u32 changeOfFrameAlignmentCount;
- u32 severelyErroredFrameCount;
-
- u32 check;
-};
-
-typedef struct lmc_xinfo {
- u32 Magic0; /* BEEFCAFE */
-
- u32 PciCardType;
- u32 PciSlotNumber; /* PCI slot number */
-
- u16 DriverMajorVersion;
- u16 DriverMinorVersion;
- u16 DriverSubVersion;
-
- u16 XilinxRevisionNumber;
- u16 MaxFrameSize;
-
- u16 t1_alarm1_status;
- u16 t1_alarm2_status;
-
- int link_status;
- u32 mii_reg16;
-
- u32 Magic1; /* DEADBEEF */
-} LMC_XINFO;
-
-
-/*
- * forward decl
- */
-struct lmc___softc {
- char *name;
- u8 board_idx;
- struct lmc_extra_statistics extra_stats;
- struct net_device *lmc_device;
-
- int hang, rxdesc, bad_packet, some_counter;
- u32 txgo;
- struct lmc_regfile_t lmc_csrs;
- volatile u32 lmc_txtick;
- volatile u32 lmc_rxtick;
- u32 lmc_flags;
- u32 lmc_intrmask; /* our copy of csr_intr */
- u32 lmc_cmdmode; /* our copy of csr_cmdmode */
- u32 lmc_busmode; /* our copy of csr_busmode */
- u32 lmc_gpio_io; /* state of in/out settings */
- u32 lmc_gpio; /* state of outputs */
- struct sk_buff* lmc_txq[LMC_TXDESCS];
- struct sk_buff* lmc_rxq[LMC_RXDESCS];
- volatile
- struct tulip_desc_t lmc_rxring[LMC_RXDESCS];
- volatile
- struct tulip_desc_t lmc_txring[LMC_TXDESCS];
- unsigned int lmc_next_rx, lmc_next_tx;
- volatile
- unsigned int lmc_taint_tx, lmc_taint_rx;
- int lmc_tx_start, lmc_txfull;
- int lmc_txbusy;
- u16 lmc_miireg16;
- int lmc_ok;
- int last_link_status;
- int lmc_cardtype;
- u32 last_frameerr;
- lmc_media_t *lmc_media;
- struct timer_list timer;
- lmc_ctl_t ictl;
- u32 TxDescriptControlInit;
-
- int tx_TimeoutInd; /* additional driver state */
- int tx_TimeoutDisplay;
- unsigned int lastlmc_taint_tx;
- int lasttx_packets;
- u32 tx_clockState;
- u32 lmc_crcSize;
- LMC_XINFO lmc_xinfo;
- char lmc_yel, lmc_blue, lmc_red; /* for T1 and DS3 */
- char lmc_timing; /* for HSSI and SSI */
- int got_irq;
-
- char last_led_err[4];
-
- u32 last_int;
- u32 num_int;
-
- spinlock_t lmc_lock;
- u16 if_type; /* HDLC/PPP or NET */
-
- /* Failure cases */
- u8 failed_ring;
- u8 failed_recv_alloc;
-
- /* Structure check */
- u32 check;
-};
-
-#define LMC_PCI_TIME 1
-#define LMC_EXT_TIME 0
-
-#define PKT_BUF_SZ 1542 /* was 1536 */
-
-/* CSR5 settings */
-#define TIMER_INT 0x00000800
-#define TP_LINK_FAIL 0x00001000
-#define TP_LINK_PASS 0x00000010
-#define NORMAL_INT 0x00010000
-#define ABNORMAL_INT 0x00008000
-#define RX_JABBER_INT 0x00000200
-#define RX_DIED 0x00000100
-#define RX_NOBUFF 0x00000080
-#define RX_INT 0x00000040
-#define TX_FIFO_UNDER 0x00000020
-#define TX_JABBER 0x00000008
-#define TX_NOBUFF 0x00000004
-#define TX_DIED 0x00000002
-#define TX_INT 0x00000001
-
-/* CSR6 settings */
-#define OPERATION_MODE 0x00000200 /* Full Duplex */
-#define PROMISC_MODE 0x00000040 /* Promiscuous Mode */
-#define RECEIVE_ALL 0x40000000 /* Receive All */
-#define PASS_BAD_FRAMES 0x00000008 /* Pass Bad Frames */
-
-/* Dec control registers CSR6 as well */
-#define LMC_DEC_ST 0x00002000
-#define LMC_DEC_SR 0x00000002
-
-/* CSR15 settings */
-#define RECV_WATCHDOG_DISABLE 0x00000010
-#define JABBER_DISABLE 0x00000001
-
-/* More settings */
-/*
- * aSR6 -- Command (Operation Mode) Register
- */
-#define TULIP_CMD_RECEIVEALL 0x40000000L /* (RW) Receivel all frames? */
-#define TULIP_CMD_MUSTBEONE 0x02000000L /* (RW) Must Be One (21140) */
-#define TULIP_CMD_TXTHRSHLDCTL 0x00400000L /* (RW) Transmit Threshold Mode (21140) */
-#define TULIP_CMD_STOREFWD 0x00200000L /* (RW) Store and Forward (21140) */
-#define TULIP_CMD_NOHEARTBEAT 0x00080000L /* (RW) No Heartbeat (21140) */
-#define TULIP_CMD_PORTSELECT 0x00040000L /* (RW) Post Select (100Mb) (21140) */
-#define TULIP_CMD_FULLDUPLEX 0x00000200L /* (RW) Full Duplex Mode */
-#define TULIP_CMD_OPERMODE 0x00000C00L /* (RW) Operating Mode */
-#define TULIP_CMD_PROMISCUOUS 0x00000041L /* (RW) Promiscuous Mode */
-#define TULIP_CMD_PASSBADPKT 0x00000008L /* (RW) Pass Bad Frames */
-#define TULIP_CMD_THRESHOLDCTL 0x0000C000L /* (RW) Threshold Control */
-
-#define TULIP_GP_PINSET 0x00000100L
-#define TULIP_BUSMODE_SWRESET 0x00000001L
-#define TULIP_WATCHDOG_TXDISABLE 0x00000001L
-#define TULIP_WATCHDOG_RXDISABLE 0x00000010L
-
-#define TULIP_STS_NORMALINTR 0x00010000L /* (RW) Normal Interrupt */
-#define TULIP_STS_ABNRMLINTR 0x00008000L /* (RW) Abnormal Interrupt */
-#define TULIP_STS_ERI 0x00004000L /* (RW) Early Receive Interrupt */
-#define TULIP_STS_SYSERROR 0x00002000L /* (RW) System Error */
-#define TULIP_STS_GTE 0x00000800L /* (RW) General Pupose Timer Exp */
-#define TULIP_STS_ETI 0x00000400L /* (RW) Early Transmit Interrupt */
-#define TULIP_STS_RXWT 0x00000200L /* (RW) Receiver Watchdog Timeout */
-#define TULIP_STS_RXSTOPPED 0x00000100L /* (RW) Receiver Process Stopped */
-#define TULIP_STS_RXNOBUF 0x00000080L /* (RW) Receive Buf Unavail */
-#define TULIP_STS_RXINTR 0x00000040L /* (RW) Receive Interrupt */
-#define TULIP_STS_TXUNDERFLOW 0x00000020L /* (RW) Transmit Underflow */
-#define TULIP_STS_TXJABER 0x00000008L /* (RW) Jabber timeout */
-#define TULIP_STS_TXNOBUF 0x00000004L
-#define TULIP_STS_TXSTOPPED 0x00000002L /* (RW) Transmit Process Stopped */
-#define TULIP_STS_TXINTR 0x00000001L /* (RW) Transmit Interrupt */
-
-#define TULIP_STS_RXS_STOPPED 0x00000000L /* 000 - Stopped */
-
-#define TULIP_STS_RXSTOPPED 0x00000100L /* (RW) Receive Process Stopped */
-#define TULIP_STS_RXNOBUF 0x00000080L
-
-#define TULIP_CMD_TXRUN 0x00002000L /* (RW) Start/Stop Transmitter */
-#define TULIP_CMD_RXRUN 0x00000002L /* (RW) Start/Stop Receive Filtering */
-#define TULIP_DSTS_TxDEFERRED 0x00000001 /* Initially Deferred */
-#define TULIP_DSTS_OWNER 0x80000000 /* Owner (1 = 21040) */
-#define TULIP_DSTS_RxMIIERR 0x00000008
-#define LMC_DSTS_ERRSUM (TULIP_DSTS_RxMIIERR)
-
-#define TULIP_DEFAULT_INTR_MASK (TULIP_STS_NORMALINTR \
- | TULIP_STS_RXINTR \
- | TULIP_STS_TXINTR \
- | TULIP_STS_ABNRMLINTR \
- | TULIP_STS_SYSERROR \
- | TULIP_STS_TXSTOPPED \
- | TULIP_STS_TXUNDERFLOW\
- | TULIP_STS_RXSTOPPED )
-
-#define DESC_OWNED_BY_SYSTEM ((u32)(0x00000000))
-#define DESC_OWNED_BY_DC21X4 ((u32)(0x80000000))
-
-#ifndef TULIP_CMD_RECEIVEALL
-#define TULIP_CMD_RECEIVEALL 0x40000000L
-#endif
-
-/* Adapter module number */
-#define LMC_ADAP_HSSI 2
-#define LMC_ADAP_DS3 3
-#define LMC_ADAP_SSI 4
-#define LMC_ADAP_T1 5
-
-#define LMC_MTU 1500
-
-#define LMC_CRC_LEN_16 2 /* 16-bit CRC */
-#define LMC_CRC_LEN_32 4
-
-#endif /* _LMC_VAR_H_ */
diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c
deleted file mode 100644
index eddd20aab691..000000000000
--- a/drivers/net/wan/sealevel.c
+++ /dev/null
@@ -1,352 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* Sealevel Systems 4021 driver.
- *
- * (c) Copyright 1999, 2001 Alan Cox
- * (c) Copyright 2001 Red Hat Inc.
- * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/net.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/if_arp.h>
-#include <linux/delay.h>
-#include <linux/hdlc.h>
-#include <linux/ioport.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <net/arp.h>
-
-#include <asm/irq.h>
-#include <asm/io.h>
-#include <asm/dma.h>
-#include <asm/byteorder.h>
-#include "z85230.h"
-
-struct slvl_device {
- struct z8530_channel *chan;
- int channel;
-};
-
-struct slvl_board {
- struct slvl_device dev[2];
- struct z8530_dev board;
- int iobase;
-};
-
- /* Network driver support routines */
-
-static inline struct slvl_device *dev_to_chan(struct net_device *dev)
-{
- return (struct slvl_device *)dev_to_hdlc(dev)->priv;
-}
-
-/* Frame receive. Simple for our card as we do HDLC and there
- * is no funny garbage involved
- */
-
-static void sealevel_input(struct z8530_channel *c, struct sk_buff *skb)
-{
- /* Drop the CRC - it's not a good idea to try and negotiate it ;) */
- skb_trim(skb, skb->len - 2);
- skb->protocol = hdlc_type_trans(skb, c->netdevice);
- skb_reset_mac_header(skb);
- skb->dev = c->netdevice;
- netif_rx(skb);
-}
-
- /* We've been placed in the UP state */
-
-static int sealevel_open(struct net_device *d)
-{
- struct slvl_device *slvl = dev_to_chan(d);
- int err = -1;
- int unit = slvl->channel;
-
- /* Link layer up. */
-
- switch (unit) {
- case 0:
- err = z8530_sync_dma_open(d, slvl->chan);
- break;
- case 1:
- err = z8530_sync_open(d, slvl->chan);
- break;
- }
-
- if (err)
- return err;
-
- err = hdlc_open(d);
- if (err) {
- switch (unit) {
- case 0:
- z8530_sync_dma_close(d, slvl->chan);
- break;
- case 1:
- z8530_sync_close(d, slvl->chan);
- break;
- }
- return err;
- }
-
- slvl->chan->rx_function = sealevel_input;
-
- netif_start_queue(d);
- return 0;
-}
-
-static int sealevel_close(struct net_device *d)
-{
- struct slvl_device *slvl = dev_to_chan(d);
- int unit = slvl->channel;
-
- /* Discard new frames */
-
- slvl->chan->rx_function = z8530_null_rx;
-
- hdlc_close(d);
- netif_stop_queue(d);
-
- switch (unit) {
- case 0:
- z8530_sync_dma_close(d, slvl->chan);
- break;
- case 1:
- z8530_sync_close(d, slvl->chan);
- break;
- }
- return 0;
-}
-
-/* Passed network frames, fire them downwind. */
-
-static netdev_tx_t sealevel_queue_xmit(struct sk_buff *skb,
- struct net_device *d)
-{
- return z8530_queue_xmit(dev_to_chan(d)->chan, skb);
-}
-
-static int sealevel_attach(struct net_device *dev, unsigned short encoding,
- unsigned short parity)
-{
- if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT)
- return 0;
- return -EINVAL;
-}
-
-static const struct net_device_ops sealevel_ops = {
- .ndo_open = sealevel_open,
- .ndo_stop = sealevel_close,
- .ndo_start_xmit = hdlc_start_xmit,
- .ndo_siocwandev = hdlc_ioctl,
-};
-
-static int slvl_setup(struct slvl_device *sv, int iobase, int irq)
-{
- struct net_device *dev = alloc_hdlcdev(sv);
-
- if (!dev)
- return -1;
-
- dev_to_hdlc(dev)->attach = sealevel_attach;
- dev_to_hdlc(dev)->xmit = sealevel_queue_xmit;
- dev->netdev_ops = &sealevel_ops;
- dev->base_addr = iobase;
- dev->irq = irq;
-
- if (register_hdlc_device(dev)) {
- pr_err("unable to register HDLC device\n");
- free_netdev(dev);
- return -1;
- }
-
- sv->chan->netdevice = dev;
- return 0;
-}
-
-/* Allocate and setup Sealevel board. */
-
-static __init struct slvl_board *slvl_init(int iobase, int irq,
- int txdma, int rxdma, int slow)
-{
- struct z8530_dev *dev;
- struct slvl_board *b;
-
- /* Get the needed I/O space */
-
- if (!request_region(iobase, 8, "Sealevel 4021")) {
- pr_warn("I/O 0x%X already in use\n", iobase);
- return NULL;
- }
-
- b = kzalloc(sizeof(struct slvl_board), GFP_KERNEL);
- if (!b)
- goto err_kzalloc;
-
- b->dev[0].chan = &b->board.chanA;
- b->dev[0].channel = 0;
-
- b->dev[1].chan = &b->board.chanB;
- b->dev[1].channel = 1;
-
- dev = &b->board;
-
- /* Stuff in the I/O addressing */
-
- dev->active = 0;
-
- b->iobase = iobase;
-
- /* Select 8530 delays for the old board */
-
- if (slow)
- iobase |= Z8530_PORT_SLEEP;
-
- dev->chanA.ctrlio = iobase + 1;
- dev->chanA.dataio = iobase;
- dev->chanB.ctrlio = iobase + 3;
- dev->chanB.dataio = iobase + 2;
-
- dev->chanA.irqs = &z8530_nop;
- dev->chanB.irqs = &z8530_nop;
-
- /* Assert DTR enable DMA */
-
- outb(3 | (1 << 7), b->iobase + 4);
-
- /* We want a fast IRQ for this device. Actually we'd like an even faster
- * IRQ ;) - This is one driver RtLinux is made for
- */
-
- if (request_irq(irq, z8530_interrupt, 0,
- "SeaLevel", dev) < 0) {
- pr_warn("IRQ %d already in use\n", irq);
- goto err_request_irq;
- }
-
- dev->irq = irq;
- dev->chanA.private = &b->dev[0];
- dev->chanB.private = &b->dev[1];
- dev->chanA.dev = dev;
- dev->chanB.dev = dev;
-
- dev->chanA.txdma = 3;
- dev->chanA.rxdma = 1;
- if (request_dma(dev->chanA.txdma, "SeaLevel (TX)"))
- goto err_dma_tx;
-
- if (request_dma(dev->chanA.rxdma, "SeaLevel (RX)"))
- goto err_dma_rx;
-
- disable_irq(irq);
-
- /* Begin normal initialise */
-
- if (z8530_init(dev) != 0) {
- pr_err("Z8530 series device not found\n");
- enable_irq(irq);
- goto free_hw;
- }
- if (dev->type == Z85C30) {
- z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream);
- z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream);
- } else {
- z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream_85230);
- z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream_85230);
- }
-
- /* Now we can take the IRQ */
-
- enable_irq(irq);
-
- if (slvl_setup(&b->dev[0], iobase, irq))
- goto free_hw;
- if (slvl_setup(&b->dev[1], iobase, irq))
- goto free_netdev0;
-
- z8530_describe(dev, "I/O", iobase);
- dev->active = 1;
- return b;
-
-free_netdev0:
- unregister_hdlc_device(b->dev[0].chan->netdevice);
- free_netdev(b->dev[0].chan->netdevice);
-free_hw:
- free_dma(dev->chanA.rxdma);
-err_dma_rx:
- free_dma(dev->chanA.txdma);
-err_dma_tx:
- free_irq(irq, dev);
-err_request_irq:
- kfree(b);
-err_kzalloc:
- release_region(iobase, 8);
- return NULL;
-}
-
-static void __exit slvl_shutdown(struct slvl_board *b)
-{
- int u;
-
- z8530_shutdown(&b->board);
-
- for (u = 0; u < 2; u++) {
- struct net_device *d = b->dev[u].chan->netdevice;
-
- unregister_hdlc_device(d);
- free_netdev(d);
- }
-
- free_irq(b->board.irq, &b->board);
- free_dma(b->board.chanA.rxdma);
- free_dma(b->board.chanA.txdma);
- /* DMA off on the card, drop DTR */
- outb(0, b->iobase);
- release_region(b->iobase, 8);
- kfree(b);
-}
-
-static int io = 0x238;
-static int txdma = 1;
-static int rxdma = 3;
-static int irq = 5;
-static bool slow;
-
-module_param_hw(io, int, ioport, 0);
-MODULE_PARM_DESC(io, "The I/O base of the Sealevel card");
-module_param_hw(txdma, int, dma, 0);
-MODULE_PARM_DESC(txdma, "Transmit DMA channel");
-module_param_hw(rxdma, int, dma, 0);
-MODULE_PARM_DESC(rxdma, "Receive DMA channel");
-module_param_hw(irq, int, irq, 0);
-MODULE_PARM_DESC(irq, "The interrupt line setting for the SeaLevel card");
-module_param(slow, bool, 0);
-MODULE_PARM_DESC(slow, "Set this for an older Sealevel card such as the 4012");
-
-MODULE_AUTHOR("Alan Cox");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Modular driver for the SeaLevel 4021");
-
-static struct slvl_board *slvl_unit;
-
-static int __init slvl_init_module(void)
-{
- slvl_unit = slvl_init(io, irq, txdma, rxdma, slow);
-
- return slvl_unit ? 0 : -ENODEV;
-}
-
-static void __exit slvl_cleanup_module(void)
-{
- if (slvl_unit)
- slvl_shutdown(slvl_unit);
-}
-
-module_init(slvl_init_module);
-module_exit(slvl_cleanup_module);
diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
deleted file mode 100644
index 982a03488a00..000000000000
--- a/drivers/net/wan/z85230.c
+++ /dev/null
@@ -1,1641 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* (c) Copyright 1998 Alan Cox <alan@lxorguk.ukuu.org.uk>
- * (c) Copyright 2000, 2001 Red Hat Inc
- *
- * Development of this driver was funded by Equiinet Ltd
- * http://www.equiinet.com
- *
- * ChangeLog:
- *
- * Asynchronous mode dropped for 2.2. For 2.5 we will attempt the
- * unification of all the Z85x30 asynchronous drivers for real.
- *
- * DMA now uses get_free_page as kmalloc buffers may span a 64K
- * boundary.
- *
- * Modified for SMP safety and SMP locking by Alan Cox
- * <alan@lxorguk.ukuu.org.uk>
- *
- * Performance
- *
- * Z85230:
- * Non DMA you want a 486DX50 or better to do 64Kbits. 9600 baud
- * X.25 is not unrealistic on all machines. DMA mode can in theory
- * handle T1/E1 quite nicely. In practice the limit seems to be about
- * 512Kbit->1Mbit depending on motherboard.
- *
- * Z85C30:
- * 64K will take DMA, 9600 baud X.25 should be ok.
- *
- * Z8530:
- * Synchronous mode without DMA is unlikely to pass about 2400 baud.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/net.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/if_arp.h>
-#include <linux/delay.h>
-#include <linux/hdlc.h>
-#include <linux/ioport.h>
-#include <linux/init.h>
-#include <linux/gfp.h>
-#include <asm/dma.h>
-#include <asm/io.h>
-#define RT_LOCK
-#define RT_UNLOCK
-#include <linux/spinlock.h>
-
-#include "z85230.h"
-
-/**
- * z8530_read_port - Architecture specific interface function
- * @p: port to read
- *
- * Provided port access methods. The Comtrol SV11 requires no delays
- * between accesses and uses PC I/O. Some drivers may need a 5uS delay
- *
- * In the longer term this should become an architecture specific
- * section so that this can become a generic driver interface for all
- * platforms. For now we only handle PC I/O ports with or without the
- * dread 5uS sanity delay.
- *
- * The caller must hold sufficient locks to avoid violating the horrible
- * 5uS delay rule.
- */
-
-static inline int z8530_read_port(unsigned long p)
-{
- u8 r = inb(Z8530_PORT_OF(p));
-
- if (p & Z8530_PORT_SLEEP) /* gcc should figure this out efficiently ! */
- udelay(5);
- return r;
-}
-
-/**
- * z8530_write_port - Architecture specific interface function
- * @p: port to write
- * @d: value to write
- *
- * Write a value to a port with delays if need be. Note that the
- * caller must hold locks to avoid read/writes from other contexts
- * violating the 5uS rule
- *
- * In the longer term this should become an architecture specific
- * section so that this can become a generic driver interface for all
- * platforms. For now we only handle PC I/O ports with or without the
- * dread 5uS sanity delay.
- */
-
-static inline void z8530_write_port(unsigned long p, u8 d)
-{
- outb(d, Z8530_PORT_OF(p));
- if (p & Z8530_PORT_SLEEP)
- udelay(5);
-}
-
-static void z8530_rx_done(struct z8530_channel *c);
-static void z8530_tx_done(struct z8530_channel *c);
-
-/**
- * read_zsreg - Read a register from a Z85230
- * @c: Z8530 channel to read from (2 per chip)
- * @reg: Register to read
- * FIXME: Use a spinlock.
- *
- * Most of the Z8530 registers are indexed off the control registers.
- * A read is done by writing to the control register and reading the
- * register back. The caller must hold the lock
- */
-
-static inline u8 read_zsreg(struct z8530_channel *c, u8 reg)
-{
- if (reg)
- z8530_write_port(c->ctrlio, reg);
- return z8530_read_port(c->ctrlio);
-}
-
-/**
- * read_zsdata - Read the data port of a Z8530 channel
- * @c: The Z8530 channel to read the data port from
- *
- * The data port provides fast access to some things. We still
- * have all the 5uS delays to worry about.
- */
-
-static inline u8 read_zsdata(struct z8530_channel *c)
-{
- u8 r;
-
- r = z8530_read_port(c->dataio);
- return r;
-}
-
-/**
- * write_zsreg - Write to a Z8530 channel register
- * @c: The Z8530 channel
- * @reg: Register number
- * @val: Value to write
- *
- * Write a value to an indexed register. The caller must hold the lock
- * to honour the irritating delay rules. We know about register 0
- * being fast to access.
- *
- * Assumes c->lock is held.
- */
-static inline void write_zsreg(struct z8530_channel *c, u8 reg, u8 val)
-{
- if (reg)
- z8530_write_port(c->ctrlio, reg);
- z8530_write_port(c->ctrlio, val);
-}
-
-/**
- * write_zsctrl - Write to a Z8530 control register
- * @c: The Z8530 channel
- * @val: Value to write
- *
- * Write directly to the control register on the Z8530
- */
-
-static inline void write_zsctrl(struct z8530_channel *c, u8 val)
-{
- z8530_write_port(c->ctrlio, val);
-}
-
-/**
- * write_zsdata - Write to a Z8530 control register
- * @c: The Z8530 channel
- * @val: Value to write
- *
- * Write directly to the data register on the Z8530
- */
-static inline void write_zsdata(struct z8530_channel *c, u8 val)
-{
- z8530_write_port(c->dataio, val);
-}
-
-/* Register loading parameters for a dead port
- */
-
-u8 z8530_dead_port[] = {
- 255
-};
-EXPORT_SYMBOL(z8530_dead_port);
-
-/* Register loading parameters for currently supported circuit types
- */
-
-/* Data clocked by telco end. This is the correct data for the UK
- * "kilostream" service, and most other similar services.
- */
-
-u8 z8530_hdlc_kilostream[] = {
- 4, SYNC_ENAB | SDLC | X1CLK,
- 2, 0, /* No vector */
- 1, 0,
- 3, ENT_HM | RxCRC_ENAB | Rx8,
- 5, TxCRC_ENAB | RTS | TxENAB | Tx8 | DTR,
- 9, 0, /* Disable interrupts */
- 6, 0xFF,
- 7, FLAG,
- 10, ABUNDER | NRZ | CRCPS,/*MARKIDLE ??*/
- 11, TCTRxCP,
- 14, DISDPLL,
- 15, DCDIE | SYNCIE | CTSIE | TxUIE | BRKIE,
- 1, EXT_INT_ENAB | TxINT_ENAB | INT_ALL_Rx,
- 9, NV | MIE | NORESET,
- 255
-};
-EXPORT_SYMBOL(z8530_hdlc_kilostream);
-
-/* As above but for enhanced chips.
- */
-
-u8 z8530_hdlc_kilostream_85230[] = {
- 4, SYNC_ENAB | SDLC | X1CLK,
- 2, 0, /* No vector */
- 1, 0,
- 3, ENT_HM | RxCRC_ENAB | Rx8,
- 5, TxCRC_ENAB | RTS | TxENAB | Tx8 | DTR,
- 9, 0, /* Disable interrupts */
- 6, 0xFF,
- 7, FLAG,
- 10, ABUNDER | NRZ | CRCPS, /* MARKIDLE?? */
- 11, TCTRxCP,
- 14, DISDPLL,
- 15, DCDIE | SYNCIE | CTSIE | TxUIE | BRKIE,
- 1, EXT_INT_ENAB | TxINT_ENAB | INT_ALL_Rx,
- 9, NV | MIE | NORESET,
- 23, 3, /* Extended mode AUTO TX and EOM*/
-
- 255
-};
-EXPORT_SYMBOL(z8530_hdlc_kilostream_85230);
-
-/**
- * z8530_flush_fifo - Flush on chip RX FIFO
- * @c: Channel to flush
- *
- * Flush the receive FIFO. There is no specific option for this, we
- * blindly read bytes and discard them. Reading when there is no data
- * is harmless. The 8530 has a 4 byte FIFO, the 85230 has 8 bytes.
- *
- * All locking is handled for the caller. On return data may still be
- * present if it arrived during the flush.
- */
-
-static void z8530_flush_fifo(struct z8530_channel *c)
-{
- read_zsreg(c, R1);
- read_zsreg(c, R1);
- read_zsreg(c, R1);
- read_zsreg(c, R1);
- if (c->dev->type == Z85230) {
- read_zsreg(c, R1);
- read_zsreg(c, R1);
- read_zsreg(c, R1);
- read_zsreg(c, R1);
- }
-}
-
-/**
- * z8530_rtsdtr - Control the outgoing DTS/RTS line
- * @c: The Z8530 channel to control;
- * @set: 1 to set, 0 to clear
- *
- * Sets or clears DTR/RTS on the requested line. All locking is handled
- * by the caller. For now we assume all boards use the actual RTS/DTR
- * on the chip. Apparently one or two don't. We'll scream about them
- * later.
- */
-
-static void z8530_rtsdtr(struct z8530_channel *c, int set)
-{
- if (set)
- c->regs[5] |= (RTS | DTR);
- else
- c->regs[5] &= ~(RTS | DTR);
- write_zsreg(c, R5, c->regs[5]);
-}
-
-/**
- * z8530_rx - Handle a PIO receive event
- * @c: Z8530 channel to process
- *
- * Receive handler for receiving in PIO mode. This is much like the
- * async one but not quite the same or as complex
- *
- * Note: Its intended that this handler can easily be separated from
- * the main code to run realtime. That'll be needed for some machines
- * (eg to ever clock 64kbits on a sparc ;)).
- *
- * The RT_LOCK macros don't do anything now. Keep the code covered
- * by them as short as possible in all circumstances - clocks cost
- * baud. The interrupt handler is assumed to be atomic w.r.t. to
- * other code - this is true in the RT case too.
- *
- * We only cover the sync cases for this. If you want 2Mbit async
- * do it yourself but consider medical assistance first. This non DMA
- * synchronous mode is portable code. The DMA mode assumes PCI like
- * ISA DMA
- *
- * Called with the device lock held
- */
-
-static void z8530_rx(struct z8530_channel *c)
-{
- u8 ch, stat;
-
- while (1) {
- /* FIFO empty ? */
- if (!(read_zsreg(c, R0) & 1))
- break;
- ch = read_zsdata(c);
- stat = read_zsreg(c, R1);
-
- /* Overrun ?
- */
- if (c->count < c->max) {
- *c->dptr++ = ch;
- c->count++;
- }
-
- if (stat & END_FR) {
- /* Error ?
- */
- if (stat & (Rx_OVR | CRC_ERR)) {
- /* Rewind the buffer and return */
- if (c->skb)
- c->dptr = c->skb->data;
- c->count = 0;
- if (stat & Rx_OVR) {
- pr_warn("%s: overrun\n", c->dev->name);
- c->rx_overrun++;
- }
- if (stat & CRC_ERR) {
- c->rx_crc_err++;
- /* printk("crc error\n"); */
- }
- /* Shove the frame upstream */
- } else {
- /* Drop the lock for RX processing, or
- * there are deadlocks
- */
- z8530_rx_done(c);
- write_zsctrl(c, RES_Rx_CRC);
- }
- }
- }
- /* Clear irq
- */
- write_zsctrl(c, ERR_RES);
- write_zsctrl(c, RES_H_IUS);
-}
-
-/**
- * z8530_tx - Handle a PIO transmit event
- * @c: Z8530 channel to process
- *
- * Z8530 transmit interrupt handler for the PIO mode. The basic
- * idea is to attempt to keep the FIFO fed. We fill as many bytes
- * in as possible, its quite possible that we won't keep up with the
- * data rate otherwise.
- */
-
-static void z8530_tx(struct z8530_channel *c)
-{
- while (c->txcount) {
- /* FIFO full ? */
- if (!(read_zsreg(c, R0) & 4))
- return;
- c->txcount--;
- /* Shovel out the byte
- */
- write_zsreg(c, R8, *c->tx_ptr++);
- write_zsctrl(c, RES_H_IUS);
- /* We are about to underflow */
- if (c->txcount == 0) {
- write_zsctrl(c, RES_EOM_L);
- write_zsreg(c, R10, c->regs[10] & ~ABUNDER);
- }
- }
-
- /* End of frame TX - fire another one
- */
-
- write_zsctrl(c, RES_Tx_P);
-
- z8530_tx_done(c);
- write_zsctrl(c, RES_H_IUS);
-}
-
-/**
- * z8530_status - Handle a PIO status exception
- * @chan: Z8530 channel to process
- *
- * A status event occurred in PIO synchronous mode. There are several
- * reasons the chip will bother us here. A transmit underrun means we
- * failed to feed the chip fast enough and just broke a packet. A DCD
- * change is a line up or down.
- */
-
-static void z8530_status(struct z8530_channel *chan)
-{
- u8 status, altered;
-
- status = read_zsreg(chan, R0);
- altered = chan->status ^ status;
-
- chan->status = status;
-
- if (status & TxEOM) {
-/* printk("%s: Tx underrun.\n", chan->dev->name); */
- chan->netdevice->stats.tx_fifo_errors++;
- write_zsctrl(chan, ERR_RES);
- z8530_tx_done(chan);
- }
-
- if (altered & chan->dcdcheck) {
- if (status & chan->dcdcheck) {
- pr_info("%s: DCD raised\n", chan->dev->name);
- write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
- if (chan->netdevice)
- netif_carrier_on(chan->netdevice);
- } else {
- pr_info("%s: DCD lost\n", chan->dev->name);
- write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);
- z8530_flush_fifo(chan);
- if (chan->netdevice)
- netif_carrier_off(chan->netdevice);
- }
- }
- write_zsctrl(chan, RES_EXT_INT);
- write_zsctrl(chan, RES_H_IUS);
-}
-
-struct z8530_irqhandler z8530_sync = {
- .rx = z8530_rx,
- .tx = z8530_tx,
- .status = z8530_status,
-};
-EXPORT_SYMBOL(z8530_sync);
-
-/**
- * z8530_dma_rx - Handle a DMA RX event
- * @chan: Channel to handle
- *
- * Non bus mastering DMA interfaces for the Z8x30 devices. This
- * is really pretty PC specific. The DMA mode means that most receive
- * events are handled by the DMA hardware. We get a kick here only if
- * a frame ended.
- */
-
-static void z8530_dma_rx(struct z8530_channel *chan)
-{
- if (chan->rxdma_on) {
- /* Special condition check only */
- u8 status;
-
- read_zsreg(chan, R7);
- read_zsreg(chan, R6);
-
- status = read_zsreg(chan, R1);
-
- if (status & END_FR)
- z8530_rx_done(chan); /* Fire up the next one */
-
- write_zsctrl(chan, ERR_RES);
- write_zsctrl(chan, RES_H_IUS);
- } else {
- /* DMA is off right now, drain the slow way */
- z8530_rx(chan);
- }
-}
-
-/**
- * z8530_dma_tx - Handle a DMA TX event
- * @chan: The Z8530 channel to handle
- *
- * We have received an interrupt while doing DMA transmissions. It
- * shouldn't happen. Scream loudly if it does.
- */
-static void z8530_dma_tx(struct z8530_channel *chan)
-{
- if (!chan->dma_tx) {
- pr_warn("Hey who turned the DMA off?\n");
- z8530_tx(chan);
- return;
- }
- /* This shouldn't occur in DMA mode */
- pr_err("DMA tx - bogus event!\n");
- z8530_tx(chan);
-}
-
-/**
- * z8530_dma_status - Handle a DMA status exception
- * @chan: Z8530 channel to process
- *
- * A status event occurred on the Z8530. We receive these for two reasons
- * when in DMA mode. Firstly if we finished a packet transfer we get one
- * and kick the next packet out. Secondly we may see a DCD change.
- *
- */
-static void z8530_dma_status(struct z8530_channel *chan)
-{
- u8 status, altered;
-
- status = read_zsreg(chan, R0);
- altered = chan->status ^ status;
-
- chan->status = status;
-
- if (chan->dma_tx) {
- if (status & TxEOM) {
- unsigned long flags;
-
- flags = claim_dma_lock();
- disable_dma(chan->txdma);
- clear_dma_ff(chan->txdma);
- chan->txdma_on = 0;
- release_dma_lock(flags);
- z8530_tx_done(chan);
- }
- }
-
- if (altered & chan->dcdcheck) {
- if (status & chan->dcdcheck) {
- pr_info("%s: DCD raised\n", chan->dev->name);
- write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
- if (chan->netdevice)
- netif_carrier_on(chan->netdevice);
- } else {
- pr_info("%s: DCD lost\n", chan->dev->name);
- write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);
- z8530_flush_fifo(chan);
- if (chan->netdevice)
- netif_carrier_off(chan->netdevice);
- }
- }
-
- write_zsctrl(chan, RES_EXT_INT);
- write_zsctrl(chan, RES_H_IUS);
-}
-
-static struct z8530_irqhandler z8530_dma_sync = {
- .rx = z8530_dma_rx,
- .tx = z8530_dma_tx,
- .status = z8530_dma_status,
-};
-
-static struct z8530_irqhandler z8530_txdma_sync = {
- .rx = z8530_rx,
- .tx = z8530_dma_tx,
- .status = z8530_dma_status,
-};
-
-/**
- * z8530_rx_clear - Handle RX events from a stopped chip
- * @c: Z8530 channel to shut up
- *
- * Receive interrupt vectors for a Z8530 that is in 'parked' mode.
- * For machines with PCI Z85x30 cards, or level triggered interrupts
- * (eg the MacII) we must clear the interrupt cause or die.
- */
-
-static void z8530_rx_clear(struct z8530_channel *c)
-{
- /* Data and status bytes
- */
- u8 stat;
-
- read_zsdata(c);
- stat = read_zsreg(c, R1);
-
- if (stat & END_FR)
- write_zsctrl(c, RES_Rx_CRC);
- /* Clear irq
- */
- write_zsctrl(c, ERR_RES);
- write_zsctrl(c, RES_H_IUS);
-}
-
-/**
- * z8530_tx_clear - Handle TX events from a stopped chip
- * @c: Z8530 channel to shut up
- *
- * Transmit interrupt vectors for a Z8530 that is in 'parked' mode.
- * For machines with PCI Z85x30 cards, or level triggered interrupts
- * (eg the MacII) we must clear the interrupt cause or die.
- */
-
-static void z8530_tx_clear(struct z8530_channel *c)
-{
- write_zsctrl(c, RES_Tx_P);
- write_zsctrl(c, RES_H_IUS);
-}
-
-/**
- * z8530_status_clear - Handle status events from a stopped chip
- * @chan: Z8530 channel to shut up
- *
- * Status interrupt vectors for a Z8530 that is in 'parked' mode.
- * For machines with PCI Z85x30 cards, or level triggered interrupts
- * (eg the MacII) we must clear the interrupt cause or die.
- */
-
-static void z8530_status_clear(struct z8530_channel *chan)
-{
- u8 status = read_zsreg(chan, R0);
-
- if (status & TxEOM)
- write_zsctrl(chan, ERR_RES);
- write_zsctrl(chan, RES_EXT_INT);
- write_zsctrl(chan, RES_H_IUS);
-}
-
-struct z8530_irqhandler z8530_nop = {
- .rx = z8530_rx_clear,
- .tx = z8530_tx_clear,
- .status = z8530_status_clear,
-};
-EXPORT_SYMBOL(z8530_nop);
-
-/**
- * z8530_interrupt - Handle an interrupt from a Z8530
- * @irq: Interrupt number
- * @dev_id: The Z8530 device that is interrupting.
- *
- * A Z85[2]30 device has stuck its hand in the air for attention.
- * We scan both the channels on the chip for events and then call
- * the channel specific call backs for each channel that has events.
- * We have to use callback functions because the two channels can be
- * in different modes.
- *
- * Locking is done for the handlers. Note that locking is done
- * at the chip level (the 5uS delay issue is per chip not per
- * channel). c->lock for both channels points to dev->lock
- */
-
-irqreturn_t z8530_interrupt(int irq, void *dev_id)
-{
- struct z8530_dev *dev = dev_id;
- u8 intr;
- static volatile int locker=0;
- int work = 0;
- struct z8530_irqhandler *irqs;
-
- if (locker) {
- pr_err("IRQ re-enter\n");
- return IRQ_NONE;
- }
- locker = 1;
-
- spin_lock(&dev->lock);
-
- while (++work < 5000) {
- intr = read_zsreg(&dev->chanA, R3);
- if (!(intr &
- (CHARxIP | CHATxIP | CHAEXT | CHBRxIP | CHBTxIP | CHBEXT)))
- break;
-
- /* This holds the IRQ status. On the 8530 you must read it
- * from chan A even though it applies to the whole chip
- */
-
- /* Now walk the chip and see what it is wanting - it may be
- * an IRQ for someone else remember
- */
-
- irqs = dev->chanA.irqs;
-
- if (intr & (CHARxIP | CHATxIP | CHAEXT)) {
- if (intr & CHARxIP)
- irqs->rx(&dev->chanA);
- if (intr & CHATxIP)
- irqs->tx(&dev->chanA);
- if (intr & CHAEXT)
- irqs->status(&dev->chanA);
- }
-
- irqs = dev->chanB.irqs;
-
- if (intr & (CHBRxIP | CHBTxIP | CHBEXT)) {
- if (intr & CHBRxIP)
- irqs->rx(&dev->chanB);
- if (intr & CHBTxIP)
- irqs->tx(&dev->chanB);
- if (intr & CHBEXT)
- irqs->status(&dev->chanB);
- }
- }
- spin_unlock(&dev->lock);
- if (work == 5000)
- pr_err("%s: interrupt jammed - abort(0x%X)!\n",
- dev->name, intr);
- /* Ok all done */
- locker = 0;
- return IRQ_HANDLED;
-}
-EXPORT_SYMBOL(z8530_interrupt);
-
-static const u8 reg_init[16] = {
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0x55, 0, 0, 0
-};
-
-/**
- * z8530_sync_open - Open a Z8530 channel for PIO
- * @dev: The network interface we are using
- * @c: The Z8530 channel to open in synchronous PIO mode
- *
- * Switch a Z8530 into synchronous mode without DMA assist. We
- * raise the RTS/DTR and commence network operation.
- */
-int z8530_sync_open(struct net_device *dev, struct z8530_channel *c)
-{
- unsigned long flags;
-
- spin_lock_irqsave(c->lock, flags);
-
- c->sync = 1;
- c->mtu = dev->mtu + 64;
- c->count = 0;
- c->skb = NULL;
- c->skb2 = NULL;
- c->irqs = &z8530_sync;
-
- /* This loads the double buffer up */
- z8530_rx_done(c); /* Load the frame ring */
- z8530_rx_done(c); /* Load the backup frame */
- z8530_rtsdtr(c, 1);
- c->dma_tx = 0;
- c->regs[R1] |= TxINT_ENAB;
- write_zsreg(c, R1, c->regs[R1]);
- write_zsreg(c, R3, c->regs[R3] | RxENABLE);
-
- spin_unlock_irqrestore(c->lock, flags);
- return 0;
-}
-EXPORT_SYMBOL(z8530_sync_open);
-
-/**
- * z8530_sync_close - Close a PIO Z8530 channel
- * @dev: Network device to close
- * @c: Z8530 channel to disassociate and move to idle
- *
- * Close down a Z8530 interface and switch its interrupt handlers
- * to discard future events.
- */
-int z8530_sync_close(struct net_device *dev, struct z8530_channel *c)
-{
- u8 chk;
- unsigned long flags;
-
- spin_lock_irqsave(c->lock, flags);
- c->irqs = &z8530_nop;
- c->max = 0;
- c->sync = 0;
-
- chk = read_zsreg(c, R0);
- write_zsreg(c, R3, c->regs[R3]);
- z8530_rtsdtr(c, 0);
-
- spin_unlock_irqrestore(c->lock, flags);
- return 0;
-}
-EXPORT_SYMBOL(z8530_sync_close);
-
-/**
- * z8530_sync_dma_open - Open a Z8530 for DMA I/O
- * @dev: The network device to attach
- * @c: The Z8530 channel to configure in sync DMA mode.
- *
- * Set up a Z85x30 device for synchronous DMA in both directions. Two
- * ISA DMA channels must be available for this to work. We assume ISA
- * DMA driven I/O and PC limits on access.
- */
-int z8530_sync_dma_open(struct net_device *dev, struct z8530_channel *c)
-{
- unsigned long cflags, dflags;
-
- c->sync = 1;
- c->mtu = dev->mtu + 64;
- c->count = 0;
- c->skb = NULL;
- c->skb2 = NULL;
-
- /* Load the DMA interfaces up
- */
- c->rxdma_on = 0;
- c->txdma_on = 0;
-
- /* Allocate the DMA flip buffers. Limit by page size.
- * Everyone runs 1500 mtu or less on wan links so this
- * should be fine.
- */
-
- if (c->mtu > PAGE_SIZE / 2)
- return -EMSGSIZE;
-
- c->rx_buf[0] = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!c->rx_buf[0])
- return -ENOBUFS;
- c->rx_buf[1] = c->rx_buf[0] + PAGE_SIZE / 2;
-
- c->tx_dma_buf[0] = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!c->tx_dma_buf[0]) {
- free_page((unsigned long)c->rx_buf[0]);
- c->rx_buf[0] = NULL;
- return -ENOBUFS;
- }
- c->tx_dma_buf[1] = c->tx_dma_buf[0] + PAGE_SIZE / 2;
-
- c->tx_dma_used = 0;
- c->dma_tx = 1;
- c->dma_num = 0;
- c->dma_ready = 1;
-
- /* Enable DMA control mode
- */
-
- spin_lock_irqsave(c->lock, cflags);
-
- /* TX DMA via DIR/REQ
- */
-
- c->regs[R14] |= DTRREQ;
- write_zsreg(c, R14, c->regs[R14]);
-
- c->regs[R1] &= ~TxINT_ENAB;
- write_zsreg(c, R1, c->regs[R1]);
-
- /* RX DMA via W/Req
- */
-
- c->regs[R1] |= WT_FN_RDYFN;
- c->regs[R1] |= WT_RDY_RT;
- c->regs[R1] |= INT_ERR_Rx;
- c->regs[R1] &= ~TxINT_ENAB;
- write_zsreg(c, R1, c->regs[R1]);
- c->regs[R1] |= WT_RDY_ENAB;
- write_zsreg(c, R1, c->regs[R1]);
-
- /* DMA interrupts
- */
-
- /* Set up the DMA configuration
- */
-
- dflags = claim_dma_lock();
-
- disable_dma(c->rxdma);
- clear_dma_ff(c->rxdma);
- set_dma_mode(c->rxdma, DMA_MODE_READ | 0x10);
- set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[0]));
- set_dma_count(c->rxdma, c->mtu);
- enable_dma(c->rxdma);
-
- disable_dma(c->txdma);
- clear_dma_ff(c->txdma);
- set_dma_mode(c->txdma, DMA_MODE_WRITE);
- disable_dma(c->txdma);
-
- release_dma_lock(dflags);
-
- /* Select the DMA interrupt handlers
- */
-
- c->rxdma_on = 1;
- c->txdma_on = 1;
- c->tx_dma_used = 1;
-
- c->irqs = &z8530_dma_sync;
- z8530_rtsdtr(c, 1);
- write_zsreg(c, R3, c->regs[R3] | RxENABLE);
-
- spin_unlock_irqrestore(c->lock, cflags);
-
- return 0;
-}
-EXPORT_SYMBOL(z8530_sync_dma_open);
-
-/**
- * z8530_sync_dma_close - Close down DMA I/O
- * @dev: Network device to detach
- * @c: Z8530 channel to move into discard mode
- *
- * Shut down a DMA mode synchronous interface. Halt the DMA, and
- * free the buffers.
- */
-int z8530_sync_dma_close(struct net_device *dev, struct z8530_channel *c)
-{
- u8 chk;
- unsigned long flags;
-
- c->irqs = &z8530_nop;
- c->max = 0;
- c->sync = 0;
-
- /* Disable the PC DMA channels
- */
-
- flags = claim_dma_lock();
- disable_dma(c->rxdma);
- clear_dma_ff(c->rxdma);
-
- c->rxdma_on = 0;
-
- disable_dma(c->txdma);
- clear_dma_ff(c->txdma);
- release_dma_lock(flags);
-
- c->txdma_on = 0;
- c->tx_dma_used = 0;
-
- spin_lock_irqsave(c->lock, flags);
-
- /* Disable DMA control mode
- */
-
- c->regs[R1] &= ~WT_RDY_ENAB;
- write_zsreg(c, R1, c->regs[R1]);
- c->regs[R1] &= ~(WT_RDY_RT | WT_FN_RDYFN | INT_ERR_Rx);
- c->regs[R1] |= INT_ALL_Rx;
- write_zsreg(c, R1, c->regs[R1]);
- c->regs[R14] &= ~DTRREQ;
- write_zsreg(c, R14, c->regs[R14]);
-
- if (c->rx_buf[0]) {
- free_page((unsigned long)c->rx_buf[0]);
- c->rx_buf[0] = NULL;
- }
- if (c->tx_dma_buf[0]) {
- free_page((unsigned long)c->tx_dma_buf[0]);
- c->tx_dma_buf[0] = NULL;
- }
- chk = read_zsreg(c, R0);
- write_zsreg(c, R3, c->regs[R3]);
- z8530_rtsdtr(c, 0);
-
- spin_unlock_irqrestore(c->lock, flags);
-
- return 0;
-}
-EXPORT_SYMBOL(z8530_sync_dma_close);
-
-/**
- * z8530_sync_txdma_open - Open a Z8530 for TX driven DMA
- * @dev: The network device to attach
- * @c: The Z8530 channel to configure in sync DMA mode.
- *
- * Set up a Z85x30 device for synchronous DMA transmission. One
- * ISA DMA channel must be available for this to work. The receive
- * side is run in PIO mode, but then it has the bigger FIFO.
- */
-
-int z8530_sync_txdma_open(struct net_device *dev, struct z8530_channel *c)
-{
- unsigned long cflags, dflags;
-
- printk("Opening sync interface for TX-DMA\n");
- c->sync = 1;
- c->mtu = dev->mtu + 64;
- c->count = 0;
- c->skb = NULL;
- c->skb2 = NULL;
-
- /* Allocate the DMA flip buffers. Limit by page size.
- * Everyone runs 1500 mtu or less on wan links so this
- * should be fine.
- */
-
- if (c->mtu > PAGE_SIZE / 2)
- return -EMSGSIZE;
-
- c->tx_dma_buf[0] = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!c->tx_dma_buf[0])
- return -ENOBUFS;
-
- c->tx_dma_buf[1] = c->tx_dma_buf[0] + PAGE_SIZE / 2;
-
- spin_lock_irqsave(c->lock, cflags);
-
- /* Load the PIO receive ring
- */
-
- z8530_rx_done(c);
- z8530_rx_done(c);
-
- /* Load the DMA interfaces up
- */
-
- c->rxdma_on = 0;
- c->txdma_on = 0;
-
- c->tx_dma_used = 0;
- c->dma_num = 0;
- c->dma_ready = 1;
- c->dma_tx = 1;
-
- /* Enable DMA control mode
- */
-
- /* TX DMA via DIR/REQ
- */
- c->regs[R14] |= DTRREQ;
- write_zsreg(c, R14, c->regs[R14]);
-
- c->regs[R1] &= ~TxINT_ENAB;
- write_zsreg(c, R1, c->regs[R1]);
-
- /* Set up the DMA configuration
- */
-
- dflags = claim_dma_lock();
-
- disable_dma(c->txdma);
- clear_dma_ff(c->txdma);
- set_dma_mode(c->txdma, DMA_MODE_WRITE);
- disable_dma(c->txdma);
-
- release_dma_lock(dflags);
-
- /* Select the DMA interrupt handlers
- */
-
- c->rxdma_on = 0;
- c->txdma_on = 1;
- c->tx_dma_used = 1;
-
- c->irqs = &z8530_txdma_sync;
- z8530_rtsdtr(c, 1);
- write_zsreg(c, R3, c->regs[R3] | RxENABLE);
- spin_unlock_irqrestore(c->lock, cflags);
-
- return 0;
-}
-EXPORT_SYMBOL(z8530_sync_txdma_open);
-
-/**
- * z8530_sync_txdma_close - Close down a TX driven DMA channel
- * @dev: Network device to detach
- * @c: Z8530 channel to move into discard mode
- *
- * Shut down a DMA/PIO split mode synchronous interface. Halt the DMA,
- * and free the buffers.
- */
-
-int z8530_sync_txdma_close(struct net_device *dev, struct z8530_channel *c)
-{
- unsigned long dflags, cflags;
- u8 chk;
-
- spin_lock_irqsave(c->lock, cflags);
-
- c->irqs = &z8530_nop;
- c->max = 0;
- c->sync = 0;
-
- /* Disable the PC DMA channels
- */
-
- dflags = claim_dma_lock();
-
- disable_dma(c->txdma);
- clear_dma_ff(c->txdma);
- c->txdma_on = 0;
- c->tx_dma_used = 0;
-
- release_dma_lock(dflags);
-
- /* Disable DMA control mode
- */
-
- c->regs[R1] &= ~WT_RDY_ENAB;
- write_zsreg(c, R1, c->regs[R1]);
- c->regs[R1] &= ~(WT_RDY_RT | WT_FN_RDYFN | INT_ERR_Rx);
- c->regs[R1] |= INT_ALL_Rx;
- write_zsreg(c, R1, c->regs[R1]);
- c->regs[R14] &= ~DTRREQ;
- write_zsreg(c, R14, c->regs[R14]);
-
- if (c->tx_dma_buf[0]) {
- free_page((unsigned long)c->tx_dma_buf[0]);
- c->tx_dma_buf[0] = NULL;
- }
- chk = read_zsreg(c, R0);
- write_zsreg(c, R3, c->regs[R3]);
- z8530_rtsdtr(c, 0);
-
- spin_unlock_irqrestore(c->lock, cflags);
- return 0;
-}
-EXPORT_SYMBOL(z8530_sync_txdma_close);
-
-/* Name strings for Z8530 chips. SGI claim to have a 130, Zilog deny
- * it exists...
- */
-static const char * const z8530_type_name[] = {
- "Z8530",
- "Z85C30",
- "Z85230"
-};
-
-/**
- * z8530_describe - Uniformly describe a Z8530 port
- * @dev: Z8530 device to describe
- * @mapping: string holding mapping type (eg "I/O" or "Mem")
- * @io: the port value in question
- *
- * Describe a Z8530 in a standard format. We must pass the I/O as
- * the port offset isn't predictable. The main reason for this function
- * is to try and get a common format of report.
- */
-
-void z8530_describe(struct z8530_dev *dev, char *mapping, unsigned long io)
-{
- pr_info("%s: %s found at %s 0x%lX, IRQ %d\n",
- dev->name,
- z8530_type_name[dev->type],
- mapping,
- Z8530_PORT_OF(io),
- dev->irq);
-}
-EXPORT_SYMBOL(z8530_describe);
-
-/* Locked operation part of the z8530 init code
- */
-static inline int do_z8530_init(struct z8530_dev *dev)
-{
- /* NOP the interrupt handlers first - we might get a
- * floating IRQ transition when we reset the chip
- */
- dev->chanA.irqs = &z8530_nop;
- dev->chanB.irqs = &z8530_nop;
- dev->chanA.dcdcheck = DCD;
- dev->chanB.dcdcheck = DCD;
-
- /* Reset the chip */
- write_zsreg(&dev->chanA, R9, 0xC0);
- udelay(200);
- /* Now check its valid */
- write_zsreg(&dev->chanA, R12, 0xAA);
- if (read_zsreg(&dev->chanA, R12) != 0xAA)
- return -ENODEV;
- write_zsreg(&dev->chanA, R12, 0x55);
- if (read_zsreg(&dev->chanA, R12) != 0x55)
- return -ENODEV;
-
- dev->type = Z8530;
-
- /* See the application note.
- */
-
- write_zsreg(&dev->chanA, R15, 0x01);
-
- /* If we can set the low bit of R15 then
- * the chip is enhanced.
- */
-
- if (read_zsreg(&dev->chanA, R15) == 0x01) {
- /* This C30 versus 230 detect is from Klaus Kudielka's dmascc */
- /* Put a char in the fifo */
- write_zsreg(&dev->chanA, R8, 0);
- if (read_zsreg(&dev->chanA, R0) & Tx_BUF_EMP)
- dev->type = Z85230; /* Has a FIFO */
- else
- dev->type = Z85C30; /* Z85C30, 1 byte FIFO */
- }
-
- /* The code assumes R7' and friends are
- * off. Use write_zsext() for these and keep
- * this bit clear.
- */
-
- write_zsreg(&dev->chanA, R15, 0);
-
- /* At this point it looks like the chip is behaving
- */
-
- memcpy(dev->chanA.regs, reg_init, 16);
- memcpy(dev->chanB.regs, reg_init, 16);
-
- return 0;
-}
-
-/**
- * z8530_init - Initialise a Z8530 device
- * @dev: Z8530 device to initialise.
- *
- * Configure up a Z8530/Z85C30 or Z85230 chip. We check the device
- * is present, identify the type and then program it to hopefully
- * keep quite and behave. This matters a lot, a Z8530 in the wrong
- * state will sometimes get into stupid modes generating 10Khz
- * interrupt streams and the like.
- *
- * We set the interrupt handler up to discard any events, in case
- * we get them during reset or setp.
- *
- * Return 0 for success, or a negative value indicating the problem
- * in errno form.
- */
-
-int z8530_init(struct z8530_dev *dev)
-{
- unsigned long flags;
- int ret;
-
- /* Set up the chip level lock */
- spin_lock_init(&dev->lock);
- dev->chanA.lock = &dev->lock;
- dev->chanB.lock = &dev->lock;
-
- spin_lock_irqsave(&dev->lock, flags);
- ret = do_z8530_init(dev);
- spin_unlock_irqrestore(&dev->lock, flags);
-
- return ret;
-}
-EXPORT_SYMBOL(z8530_init);
-
-/**
- * z8530_shutdown - Shutdown a Z8530 device
- * @dev: The Z8530 chip to shutdown
- *
- * We set the interrupt handlers to silence any interrupts. We then
- * reset the chip and wait 100uS to be sure the reset completed. Just
- * in case the caller then tries to do stuff.
- *
- * This is called without the lock held
- */
-int z8530_shutdown(struct z8530_dev *dev)
-{
- unsigned long flags;
- /* Reset the chip */
-
- spin_lock_irqsave(&dev->lock, flags);
- dev->chanA.irqs = &z8530_nop;
- dev->chanB.irqs = &z8530_nop;
- write_zsreg(&dev->chanA, R9, 0xC0);
- /* We must lock the udelay, the chip is offlimits here */
- udelay(100);
- spin_unlock_irqrestore(&dev->lock, flags);
- return 0;
-}
-EXPORT_SYMBOL(z8530_shutdown);
-
-/**
- * z8530_channel_load - Load channel data
- * @c: Z8530 channel to configure
- * @rtable: table of register, value pairs
- * FIXME: ioctl to allow user uploaded tables
- *
- * Load a Z8530 channel up from the system data. We use +16 to
- * indicate the "prime" registers. The value 255 terminates the
- * table.
- */
-
-int z8530_channel_load(struct z8530_channel *c, u8 *rtable)
-{
- unsigned long flags;
-
- spin_lock_irqsave(c->lock, flags);
-
- while (*rtable != 255) {
- int reg = *rtable++;
-
- if (reg > 0x0F)
- write_zsreg(c, R15, c->regs[15] | 1);
- write_zsreg(c, reg & 0x0F, *rtable);
- if (reg > 0x0F)
- write_zsreg(c, R15, c->regs[15] & ~1);
- c->regs[reg] = *rtable++;
- }
- c->rx_function = z8530_null_rx;
- c->skb = NULL;
- c->tx_skb = NULL;
- c->tx_next_skb = NULL;
- c->mtu = 1500;
- c->max = 0;
- c->count = 0;
- c->status = read_zsreg(c, R0);
- c->sync = 1;
- write_zsreg(c, R3, c->regs[R3] | RxENABLE);
-
- spin_unlock_irqrestore(c->lock, flags);
- return 0;
-}
-EXPORT_SYMBOL(z8530_channel_load);
-
-/**
- * z8530_tx_begin - Begin packet transmission
- * @c: The Z8530 channel to kick
- *
- * This is the speed sensitive side of transmission. If we are called
- * and no buffer is being transmitted we commence the next buffer. If
- * nothing is queued we idle the sync.
- *
- * Note: We are handling this code path in the interrupt path, keep it
- * fast or bad things will happen.
- *
- * Called with the lock held.
- */
-
-static void z8530_tx_begin(struct z8530_channel *c)
-{
- unsigned long flags;
-
- if (c->tx_skb)
- return;
-
- c->tx_skb = c->tx_next_skb;
- c->tx_next_skb = NULL;
- c->tx_ptr = c->tx_next_ptr;
-
- if (!c->tx_skb) {
- /* Idle on */
- if (c->dma_tx) {
- flags = claim_dma_lock();
- disable_dma(c->txdma);
- /* Check if we crapped out.
- */
- if (get_dma_residue(c->txdma)) {
- c->netdevice->stats.tx_dropped++;
- c->netdevice->stats.tx_fifo_errors++;
- }
- release_dma_lock(flags);
- }
- c->txcount = 0;
- } else {
- c->txcount = c->tx_skb->len;
-
- if (c->dma_tx) {
- /* FIXME. DMA is broken for the original 8530,
- * on the older parts we need to set a flag and
- * wait for a further TX interrupt to fire this
- * stage off
- */
-
- flags = claim_dma_lock();
- disable_dma(c->txdma);
-
- /* These two are needed by the 8530/85C30
- * and must be issued when idling.
- */
- if (c->dev->type != Z85230) {
- write_zsctrl(c, RES_Tx_CRC);
- write_zsctrl(c, RES_EOM_L);
- }
- write_zsreg(c, R10, c->regs[10] & ~ABUNDER);
- clear_dma_ff(c->txdma);
- set_dma_addr(c->txdma, virt_to_bus(c->tx_ptr));
- set_dma_count(c->txdma, c->txcount);
- enable_dma(c->txdma);
- release_dma_lock(flags);
- write_zsctrl(c, RES_EOM_L);
- write_zsreg(c, R5, c->regs[R5] | TxENAB);
- } else {
- /* ABUNDER off */
- write_zsreg(c, R10, c->regs[10]);
- write_zsctrl(c, RES_Tx_CRC);
-
- while (c->txcount && (read_zsreg(c, R0) & Tx_BUF_EMP)) {
- write_zsreg(c, R8, *c->tx_ptr++);
- c->txcount--;
- }
- }
- }
- /* Since we emptied tx_skb we can ask for more
- */
- netif_wake_queue(c->netdevice);
-}
-
-/**
- * z8530_tx_done - TX complete callback
- * @c: The channel that completed a transmit.
- *
- * This is called when we complete a packet send. We wake the queue,
- * start the next packet going and then free the buffer of the existing
- * packet. This code is fairly timing sensitive.
- *
- * Called with the register lock held.
- */
-
-static void z8530_tx_done(struct z8530_channel *c)
-{
- struct sk_buff *skb;
-
- /* Actually this can happen.*/
- if (!c->tx_skb)
- return;
-
- skb = c->tx_skb;
- c->tx_skb = NULL;
- z8530_tx_begin(c);
- c->netdevice->stats.tx_packets++;
- c->netdevice->stats.tx_bytes += skb->len;
- dev_consume_skb_irq(skb);
-}
-
-/**
- * z8530_null_rx - Discard a packet
- * @c: The channel the packet arrived on
- * @skb: The buffer
- *
- * We point the receive handler at this function when idle. Instead
- * of processing the frames we get to throw them away.
- */
-void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb)
-{
- dev_kfree_skb_any(skb);
-}
-EXPORT_SYMBOL(z8530_null_rx);
-
-/**
- * z8530_rx_done - Receive completion callback
- * @c: The channel that completed a receive
- *
- * A new packet is complete. Our goal here is to get back into receive
- * mode as fast as possible. On the Z85230 we could change to using
- * ESCC mode, but on the older chips we have no choice. We flip to the
- * new buffer immediately in DMA mode so that the DMA of the next
- * frame can occur while we are copying the previous buffer to an sk_buff
- *
- * Called with the lock held
- */
-static void z8530_rx_done(struct z8530_channel *c)
-{
- struct sk_buff *skb;
- int ct;
-
- /* Is our receive engine in DMA mode
- */
- if (c->rxdma_on) {
- /* Save the ready state and the buffer currently
- * being used as the DMA target
- */
- int ready = c->dma_ready;
- unsigned char *rxb = c->rx_buf[c->dma_num];
- unsigned long flags;
-
- /* Complete this DMA. Necessary to find the length
- */
- flags = claim_dma_lock();
-
- disable_dma(c->rxdma);
- clear_dma_ff(c->rxdma);
- c->rxdma_on = 0;
- ct = c->mtu - get_dma_residue(c->rxdma);
- if (ct < 0)
- ct = 2; /* Shit happens.. */
- c->dma_ready = 0;
-
- /* Normal case: the other slot is free, start the next DMA
- * into it immediately.
- */
-
- if (ready) {
- c->dma_num ^= 1;
- set_dma_mode(c->rxdma, DMA_MODE_READ | 0x10);
- set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[c->dma_num]));
- set_dma_count(c->rxdma, c->mtu);
- c->rxdma_on = 1;
- enable_dma(c->rxdma);
- /* Stop any frames that we missed the head of
- * from passing
- */
- write_zsreg(c, R0, RES_Rx_CRC);
- } else {
- /* Can't occur as we dont reenable the DMA irq until
- * after the flip is done
- */
- netdev_warn(c->netdevice, "DMA flip overrun!\n");
- }
-
- release_dma_lock(flags);
-
- /* Shove the old buffer into an sk_buff. We can't DMA
- * directly into one on a PC - it might be above the 16Mb
- * boundary. Optimisation - we could check to see if we
- * can avoid the copy. Optimisation 2 - make the memcpy
- * a copychecksum.
- */
-
- skb = dev_alloc_skb(ct);
- if (!skb) {
- c->netdevice->stats.rx_dropped++;
- netdev_warn(c->netdevice, "Memory squeeze\n");
- } else {
- skb_put(skb, ct);
- skb_copy_to_linear_data(skb, rxb, ct);
- c->netdevice->stats.rx_packets++;
- c->netdevice->stats.rx_bytes += ct;
- }
- c->dma_ready = 1;
- } else {
- RT_LOCK;
- skb = c->skb;
-
- /* The game we play for non DMA is similar. We want to
- * get the controller set up for the next packet as fast
- * as possible. We potentially only have one byte + the
- * fifo length for this. Thus we want to flip to the new
- * buffer and then mess around copying and allocating
- * things. For the current case it doesn't matter but
- * if you build a system where the sync irq isn't blocked
- * by the kernel IRQ disable then you need only block the
- * sync IRQ for the RT_LOCK area.
- *
- */
- ct = c->count;
-
- c->skb = c->skb2;
- c->count = 0;
- c->max = c->mtu;
- if (c->skb) {
- c->dptr = c->skb->data;
- c->max = c->mtu;
- } else {
- c->count = 0;
- c->max = 0;
- }
- RT_UNLOCK;
-
- c->skb2 = dev_alloc_skb(c->mtu);
- if (c->skb2)
- skb_put(c->skb2, c->mtu);
-
- c->netdevice->stats.rx_packets++;
- c->netdevice->stats.rx_bytes += ct;
- }
- /* If we received a frame we must now process it.
- */
- if (skb) {
- skb_trim(skb, ct);
- c->rx_function(c, skb);
- } else {
- c->netdevice->stats.rx_dropped++;
- netdev_err(c->netdevice, "Lost a frame\n");
- }
-}
-
-/**
- * spans_boundary - Check a packet can be ISA DMA'd
- * @skb: The buffer to check
- *
- * Returns true if the buffer cross a DMA boundary on a PC. The poor
- * thing can only DMA within a 64K block not across the edges of it.
- */
-
-static inline int spans_boundary(struct sk_buff *skb)
-{
- unsigned long a = (unsigned long)skb->data;
-
- a ^= (a + skb->len);
- if (a & 0x00010000) /* If the 64K bit is different.. */
- return 1;
- return 0;
-}
-
-/**
- * z8530_queue_xmit - Queue a packet
- * @c: The channel to use
- * @skb: The packet to kick down the channel
- *
- * Queue a packet for transmission. Because we have rather
- * hard to hit interrupt latencies for the Z85230 per packet
- * even in DMA mode we do the flip to DMA buffer if needed here
- * not in the IRQ.
- *
- * Called from the network code. The lock is not held at this
- * point.
- */
-netdev_tx_t z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb)
-{
- unsigned long flags;
-
- netif_stop_queue(c->netdevice);
- if (c->tx_next_skb)
- return NETDEV_TX_BUSY;
-
- /* PC SPECIFIC - DMA limits */
- /* If we will DMA the transmit and its gone over the ISA bus
- * limit, then copy to the flip buffer
- */
-
- if (c->dma_tx &&
- ((unsigned long)(virt_to_bus(skb->data + skb->len)) >=
- 16 * 1024 * 1024 || spans_boundary(skb))) {
- /* Send the flip buffer, and flip the flippy bit.
- * We don't care which is used when just so long as
- * we never use the same buffer twice in a row. Since
- * only one buffer can be going out at a time the other
- * has to be safe.
- */
- c->tx_next_ptr = c->tx_dma_buf[c->tx_dma_used];
- c->tx_dma_used ^= 1; /* Flip temp buffer */
- skb_copy_from_linear_data(skb, c->tx_next_ptr, skb->len);
- } else {
- c->tx_next_ptr = skb->data;
- }
- RT_LOCK;
- c->tx_next_skb = skb;
- RT_UNLOCK;
-
- spin_lock_irqsave(c->lock, flags);
- z8530_tx_begin(c);
- spin_unlock_irqrestore(c->lock, flags);
-
- return NETDEV_TX_OK;
-}
-EXPORT_SYMBOL(z8530_queue_xmit);
-
-/* Module support
- */
-static const char banner[] __initconst =
- KERN_INFO "Generic Z85C30/Z85230 interface driver v0.02\n";
-
-static int __init z85230_init_driver(void)
-{
- printk(banner);
- return 0;
-}
-module_init(z85230_init_driver);
-
-static void __exit z85230_cleanup_driver(void)
-{
-}
-module_exit(z85230_cleanup_driver);
-
-MODULE_AUTHOR("Red Hat Inc.");
-MODULE_DESCRIPTION("Z85x30 synchronous driver core");
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/wan/z85230.h b/drivers/net/wan/z85230.h
deleted file mode 100644
index 462cb620bc5d..000000000000
--- a/drivers/net/wan/z85230.h
+++ /dev/null
@@ -1,407 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Description of Z8530 Z85C30 and Z85230 communications chips
- *
- * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
- * Copyright (C) 1998 Alan Cox <alan@lxorguk.ukuu.org.uk>
- */
-
-#ifndef _Z8530_H
-#define _Z8530_H
-
-#include <linux/tty.h>
-#include <linux/interrupt.h>
-
-/* Conversion routines to/from brg time constants from/to bits
- * per second.
- */
-#define BRG_TO_BPS(brg, freq) ((freq) / 2 / ((brg) + 2))
-#define BPS_TO_BRG(bps, freq) ((((freq) + (bps)) / (2 * (bps))) - 2)
-
-/* The Zilog register set */
-
-#define FLAG 0x7e
-
-/* Write Register 0 */
-#define R0 0 /* Register selects */
-#define R1 1
-#define R2 2
-#define R3 3
-#define R4 4
-#define R5 5
-#define R6 6
-#define R7 7
-#define R8 8
-#define R9 9
-#define R10 10
-#define R11 11
-#define R12 12
-#define R13 13
-#define R14 14
-#define R15 15
-
-#define RPRIME 16 /* Indicate a prime register access on 230 */
-
-#define NULLCODE 0 /* Null Code */
-#define POINT_HIGH 0x8 /* Select upper half of registers */
-#define RES_EXT_INT 0x10 /* Reset Ext. Status Interrupts */
-#define SEND_ABORT 0x18 /* HDLC Abort */
-#define RES_RxINT_FC 0x20 /* Reset RxINT on First Character */
-#define RES_Tx_P 0x28 /* Reset TxINT Pending */
-#define ERR_RES 0x30 /* Error Reset */
-#define RES_H_IUS 0x38 /* Reset highest IUS */
-
-#define RES_Rx_CRC 0x40 /* Reset Rx CRC Checker */
-#define RES_Tx_CRC 0x80 /* Reset Tx CRC Checker */
-#define RES_EOM_L 0xC0 /* Reset EOM latch */
-
-/* Write Register 1 */
-
-#define EXT_INT_ENAB 0x1 /* Ext Int Enable */
-#define TxINT_ENAB 0x2 /* Tx Int Enable */
-#define PAR_SPEC 0x4 /* Parity is special condition */
-
-#define RxINT_DISAB 0 /* Rx Int Disable */
-#define RxINT_FCERR 0x8 /* Rx Int on First Character Only or Error */
-#define INT_ALL_Rx 0x10 /* Int on all Rx Characters or error */
-#define INT_ERR_Rx 0x18 /* Int on error only */
-
-#define WT_RDY_RT 0x20 /* Wait/Ready on R/T */
-#define WT_FN_RDYFN 0x40 /* Wait/FN/Ready FN */
-#define WT_RDY_ENAB 0x80 /* Wait/Ready Enable */
-
-/* Write Register #2 (Interrupt Vector) */
-
-/* Write Register 3 */
-
-#define RxENABLE 0x1 /* Rx Enable */
-#define SYNC_L_INH 0x2 /* Sync Character Load Inhibit */
-#define ADD_SM 0x4 /* Address Search Mode (SDLC) */
-#define RxCRC_ENAB 0x8 /* Rx CRC Enable */
-#define ENT_HM 0x10 /* Enter Hunt Mode */
-#define AUTO_ENAB 0x20 /* Auto Enables */
-#define Rx5 0x0 /* Rx 5 Bits/Character */
-#define Rx7 0x40 /* Rx 7 Bits/Character */
-#define Rx6 0x80 /* Rx 6 Bits/Character */
-#define Rx8 0xc0 /* Rx 8 Bits/Character */
-
-/* Write Register 4 */
-
-#define PAR_ENA 0x1 /* Parity Enable */
-#define PAR_EVEN 0x2 /* Parity Even/Odd* */
-
-#define SYNC_ENAB 0 /* Sync Modes Enable */
-#define SB1 0x4 /* 1 stop bit/char */
-#define SB15 0x8 /* 1.5 stop bits/char */
-#define SB2 0xc /* 2 stop bits/char */
-
-#define MONSYNC 0 /* 8 Bit Sync character */
-#define BISYNC 0x10 /* 16 bit sync character */
-#define SDLC 0x20 /* SDLC Mode (01111110 Sync Flag) */
-#define EXTSYNC 0x30 /* External Sync Mode */
-
-#define X1CLK 0x0 /* x1 clock mode */
-#define X16CLK 0x40 /* x16 clock mode */
-#define X32CLK 0x80 /* x32 clock mode */
-#define X64CLK 0xC0 /* x64 clock mode */
-
-/* Write Register 5 */
-
-#define TxCRC_ENAB 0x1 /* Tx CRC Enable */
-#define RTS 0x2 /* RTS */
-#define SDLC_CRC 0x4 /* SDLC/CRC-16 */
-#define TxENAB 0x8 /* Tx Enable */
-#define SND_BRK 0x10 /* Send Break */
-#define Tx5 0x0 /* Tx 5 bits (or less)/character */
-#define Tx7 0x20 /* Tx 7 bits/character */
-#define Tx6 0x40 /* Tx 6 bits/character */
-#define Tx8 0x60 /* Tx 8 bits/character */
-#define DTR 0x80 /* DTR */
-
-/* Write Register 6 (Sync bits 0-7/SDLC Address Field) */
-
-/* Write Register 7 (Sync bits 8-15/SDLC 01111110) */
-
-/* Write Register 8 (transmit buffer) */
-
-/* Write Register 9 (Master interrupt control) */
-#define VIS 1 /* Vector Includes Status */
-#define NV 2 /* No Vector */
-#define DLC 4 /* Disable Lower Chain */
-#define MIE 8 /* Master Interrupt Enable */
-#define STATHI 0x10 /* Status high */
-#define NORESET 0 /* No reset on write to R9 */
-#define CHRB 0x40 /* Reset channel B */
-#define CHRA 0x80 /* Reset channel A */
-#define FHWRES 0xc0 /* Force hardware reset */
-
-/* Write Register 10 (misc control bits) */
-#define BIT6 1 /* 6 bit/8bit sync */
-#define LOOPMODE 2 /* SDLC Loop mode */
-#define ABUNDER 4 /* Abort/flag on SDLC xmit underrun */
-#define MARKIDLE 8 /* Mark/flag on idle */
-#define GAOP 0x10 /* Go active on poll */
-#define NRZ 0 /* NRZ mode */
-#define NRZI 0x20 /* NRZI mode */
-#define FM1 0x40 /* FM1 (transition = 1) */
-#define FM0 0x60 /* FM0 (transition = 0) */
-#define CRCPS 0x80 /* CRC Preset I/O */
-
-/* Write Register 11 (Clock Mode control) */
-#define TRxCXT 0 /* TRxC = Xtal output */
-#define TRxCTC 1 /* TRxC = Transmit clock */
-#define TRxCBR 2 /* TRxC = BR Generator Output */
-#define TRxCDP 3 /* TRxC = DPLL output */
-#define TRxCOI 4 /* TRxC O/I */
-#define TCRTxCP 0 /* Transmit clock = RTxC pin */
-#define TCTRxCP 8 /* Transmit clock = TRxC pin */
-#define TCBR 0x10 /* Transmit clock = BR Generator output */
-#define TCDPLL 0x18 /* Transmit clock = DPLL output */
-#define RCRTxCP 0 /* Receive clock = RTxC pin */
-#define RCTRxCP 0x20 /* Receive clock = TRxC pin */
-#define RCBR 0x40 /* Receive clock = BR Generator output */
-#define RCDPLL 0x60 /* Receive clock = DPLL output */
-#define RTxCX 0x80 /* RTxC Xtal/No Xtal */
-
-/* Write Register 12 (lower byte of baud rate generator time constant) */
-
-/* Write Register 13 (upper byte of baud rate generator time constant) */
-
-/* Write Register 14 (Misc control bits) */
-#define BRENABL 1 /* Baud rate generator enable */
-#define BRSRC 2 /* Baud rate generator source */
-#define DTRREQ 4 /* DTR/Request function */
-#define AUTOECHO 8 /* Auto Echo */
-#define LOOPBAK 0x10 /* Local loopback */
-#define SEARCH 0x20 /* Enter search mode */
-#define RMC 0x40 /* Reset missing clock */
-#define DISDPLL 0x60 /* Disable DPLL */
-#define SSBR 0x80 /* Set DPLL source = BR generator */
-#define SSRTxC 0xa0 /* Set DPLL source = RTxC */
-#define SFMM 0xc0 /* Set FM mode */
-#define SNRZI 0xe0 /* Set NRZI mode */
-
-/* Write Register 15 (external/status interrupt control) */
-#define PRIME 1 /* R5' etc register access (Z85C30/230 only) */
-#define ZCIE 2 /* Zero count IE */
-#define FIFOE 4 /* Z85230 only */
-#define DCDIE 8 /* DCD IE */
-#define SYNCIE 0x10 /* Sync/hunt IE */
-#define CTSIE 0x20 /* CTS IE */
-#define TxUIE 0x40 /* Tx Underrun/EOM IE */
-#define BRKIE 0x80 /* Break/Abort IE */
-
-
-/* Read Register 0 */
-#define Rx_CH_AV 0x1 /* Rx Character Available */
-#define ZCOUNT 0x2 /* Zero count */
-#define Tx_BUF_EMP 0x4 /* Tx Buffer empty */
-#define DCD 0x8 /* DCD */
-#define SYNC_HUNT 0x10 /* Sync/hunt */
-#define CTS 0x20 /* CTS */
-#define TxEOM 0x40 /* Tx underrun */
-#define BRK_ABRT 0x80 /* Break/Abort */
-
-/* Read Register 1 */
-#define ALL_SNT 0x1 /* All sent */
-/* Residue Data for 8 Rx bits/char programmed */
-#define RES3 0x8 /* 0/3 */
-#define RES4 0x4 /* 0/4 */
-#define RES5 0xc /* 0/5 */
-#define RES6 0x2 /* 0/6 */
-#define RES7 0xa /* 0/7 */
-#define RES8 0x6 /* 0/8 */
-#define RES18 0xe /* 1/8 */
-#define RES28 0x0 /* 2/8 */
-/* Special Rx Condition Interrupts */
-#define PAR_ERR 0x10 /* Parity error */
-#define Rx_OVR 0x20 /* Rx Overrun Error */
-#define CRC_ERR 0x40 /* CRC/Framing Error */
-#define END_FR 0x80 /* End of Frame (SDLC) */
-
-/* Read Register 2 (channel b only) - Interrupt vector */
-
-/* Read Register 3 (interrupt pending register) ch a only */
-#define CHBEXT 0x1 /* Channel B Ext/Stat IP */
-#define CHBTxIP 0x2 /* Channel B Tx IP */
-#define CHBRxIP 0x4 /* Channel B Rx IP */
-#define CHAEXT 0x8 /* Channel A Ext/Stat IP */
-#define CHATxIP 0x10 /* Channel A Tx IP */
-#define CHARxIP 0x20 /* Channel A Rx IP */
-
-/* Read Register 8 (receive data register) */
-
-/* Read Register 10 (misc status bits) */
-#define ONLOOP 2 /* On loop */
-#define LOOPSEND 0x10 /* Loop sending */
-#define CLK2MIS 0x40 /* Two clocks missing */
-#define CLK1MIS 0x80 /* One clock missing */
-
-/* Read Register 12 (lower byte of baud rate generator constant) */
-
-/* Read Register 13 (upper byte of baud rate generator constant) */
-
-/* Read Register 15 (value of WR 15) */
-
-
-/*
- * Interrupt handling functions for this SCC
- */
-
-struct z8530_channel;
-
-struct z8530_irqhandler
-{
- void (*rx)(struct z8530_channel *);
- void (*tx)(struct z8530_channel *);
- void (*status)(struct z8530_channel *);
-};
-
-/*
- * A channel of the Z8530
- */
-
-struct z8530_channel
-{
- struct z8530_irqhandler *irqs; /* IRQ handlers */
- /*
- * Synchronous
- */
- u16 count; /* Buyes received */
- u16 max; /* Most we can receive this frame */
- u16 mtu; /* MTU of the device */
- u8 *dptr; /* Pointer into rx buffer */
- struct sk_buff *skb; /* Buffer dptr points into */
- struct sk_buff *skb2; /* Pending buffer */
- u8 status; /* Current DCD */
- u8 dcdcheck; /* which bit to check for line */
- u8 sync; /* Set if in sync mode */
-
- u8 regs[32]; /* Register map for the chip */
- u8 pendregs[32]; /* Pending register values */
-
- struct sk_buff *tx_skb; /* Buffer being transmitted */
- struct sk_buff *tx_next_skb; /* Next transmit buffer */
- u8 *tx_ptr; /* Byte pointer into the buffer */
- u8 *tx_next_ptr; /* Next pointer to use */
- u8 *tx_dma_buf[2]; /* TX flip buffers for DMA */
- u8 tx_dma_used; /* Flip buffer usage toggler */
- u16 txcount; /* Count of bytes to transmit */
-
- void (*rx_function)(struct z8530_channel *, struct sk_buff *);
-
- /*
- * Sync DMA
- */
-
- u8 rxdma; /* DMA channels */
- u8 txdma;
- u8 rxdma_on; /* DMA active if flag set */
- u8 txdma_on;
- u8 dma_num; /* Buffer we are DMAing into */
- u8 dma_ready; /* Is the other buffer free */
- u8 dma_tx; /* TX is to use DMA */
- u8 *rx_buf[2]; /* The flip buffers */
-
- /*
- * System
- */
-
- struct z8530_dev *dev; /* Z85230 chip instance we are from */
- unsigned long ctrlio; /* I/O ports */
- unsigned long dataio;
-
- /*
- * For PC we encode this way.
- */
-#define Z8530_PORT_SLEEP 0x80000000
-#define Z8530_PORT_OF(x) ((x)&0xFFFF)
-
- u32 rx_overrun; /* Overruns - not done yet */
- u32 rx_crc_err;
-
- /*
- * Bound device pointers
- */
-
- void *private; /* For our owner */
- struct net_device *netdevice; /* Network layer device */
-
- spinlock_t *lock; /* Device lock */
-};
-
-/*
- * Each Z853x0 device.
- */
-
-struct z8530_dev
-{
- char *name; /* Device instance name */
- struct z8530_channel chanA; /* SCC channel A */
- struct z8530_channel chanB; /* SCC channel B */
- int type;
-#define Z8530 0 /* NMOS dinosaur */
-#define Z85C30 1 /* CMOS - better */
-#define Z85230 2 /* CMOS with real FIFO */
- int irq; /* Interrupt for the device */
- int active; /* Soft interrupt enable - the Mac doesn't
- always have a hard disable on its 8530s... */
- spinlock_t lock;
-};
-
-
-/*
- * Functions
- */
-
-extern u8 z8530_dead_port[];
-extern u8 z8530_hdlc_kilostream_85230[];
-extern u8 z8530_hdlc_kilostream[];
-irqreturn_t z8530_interrupt(int, void *);
-void z8530_describe(struct z8530_dev *, char *mapping, unsigned long io);
-int z8530_init(struct z8530_dev *);
-int z8530_shutdown(struct z8530_dev *);
-int z8530_sync_open(struct net_device *, struct z8530_channel *);
-int z8530_sync_close(struct net_device *, struct z8530_channel *);
-int z8530_sync_dma_open(struct net_device *, struct z8530_channel *);
-int z8530_sync_dma_close(struct net_device *, struct z8530_channel *);
-int z8530_sync_txdma_open(struct net_device *, struct z8530_channel *);
-int z8530_sync_txdma_close(struct net_device *, struct z8530_channel *);
-int z8530_channel_load(struct z8530_channel *, u8 *);
-netdev_tx_t z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb);
-void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb);
-
-
-/*
- * Standard interrupt vector sets
- */
-
-extern struct z8530_irqhandler z8530_sync, z8530_async, z8530_nop;
-
-/*
- * Asynchronous Interfacing
- */
-
-/*
- * The size of the serial xmit buffer is 1 page, or 4096 bytes
- */
-
-#define SERIAL_XMIT_SIZE 4096
-#define WAKEUP_CHARS 256
-
-/*
- * Events are used to schedule things to happen at timer-interrupt
- * time, instead of at rs interrupt time.
- */
-#define RS_EVENT_WRITE_WAKEUP 0
-
-/* Internal flags used only by kernel/chr_drv/serial.c */
-#define ZILOG_INITIALIZED 0x80000000 /* Serial port was initialized */
-#define ZILOG_CALLOUT_ACTIVE 0x40000000 /* Call out device is active */
-#define ZILOG_NORMAL_ACTIVE 0x20000000 /* Normal device is active */
-#define ZILOG_BOOT_AUTOCONF 0x10000000 /* Autoconfigure port on bootup */
-#define ZILOG_CLOSING 0x08000000 /* Serial port is closing */
-#define ZILOG_CTS_FLOW 0x04000000 /* Do CTS flow control */
-#define ZILOG_CHECK_CD 0x02000000 /* i.e., CLOCAL */
-
-#endif /* !(_Z8530_H) */
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 9cabd342d156..7527a382327f 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -1500,7 +1500,7 @@ static int ar5523_load_firmware(struct usb_device *dev)
return -ENOENT;
}
- txblock = kmalloc(sizeof(*txblock), GFP_KERNEL);
+ txblock = kzalloc(sizeof(*txblock), GFP_KERNEL);
if (!txblock)
goto out;
@@ -1512,7 +1512,6 @@ static int ar5523_load_firmware(struct usb_device *dev)
if (!fwbuf)
goto out_free_rxblock;
- memset(txblock, 0, sizeof(struct ar5523_fwblock));
txblock->flags = cpu_to_be32(AR5523_WRITE_BLOCK);
txblock->total = cpu_to_be32(fw->size);
diff --git a/drivers/net/wwan/wwan_hwsim.c b/drivers/net/wwan/wwan_hwsim.c
index 5b62cf3b3c42..fad642f9ffd8 100644
--- a/drivers/net/wwan/wwan_hwsim.c
+++ b/drivers/net/wwan/wwan_hwsim.c
@@ -33,6 +33,7 @@ static struct dentry *wwan_hwsim_debugfs_devcreate;
static DEFINE_SPINLOCK(wwan_hwsim_devs_lock);
static LIST_HEAD(wwan_hwsim_devs);
static unsigned int wwan_hwsim_dev_idx;
+static struct workqueue_struct *wwan_wq;
struct wwan_hwsim_dev {
struct list_head list;
@@ -371,7 +372,7 @@ static ssize_t wwan_hwsim_debugfs_portdestroy_write(struct file *file,
* waiting this callback to finish in the debugfs_remove() call. So,
* use workqueue.
*/
- schedule_work(&port->del_work);
+ queue_work(wwan_wq, &port->del_work);
return count;
}
@@ -416,7 +417,7 @@ static ssize_t wwan_hwsim_debugfs_devdestroy_write(struct file *file,
* waiting this callback to finish in the debugfs_remove() call. So,
* use workqueue.
*/
- schedule_work(&dev->del_work);
+ queue_work(wwan_wq, &dev->del_work);
return count;
}
@@ -506,9 +507,15 @@ static int __init wwan_hwsim_init(void)
if (wwan_hwsim_devsnum < 0 || wwan_hwsim_devsnum > 128)
return -EINVAL;
+ wwan_wq = alloc_workqueue("wwan_wq", 0, 0);
+ if (!wwan_wq)
+ return -ENOMEM;
+
wwan_hwsim_class = class_create(THIS_MODULE, "wwan_hwsim");
- if (IS_ERR(wwan_hwsim_class))
- return PTR_ERR(wwan_hwsim_class);
+ if (IS_ERR(wwan_hwsim_class)) {
+ err = PTR_ERR(wwan_hwsim_class);
+ goto err_wq_destroy;
+ }
wwan_hwsim_debugfs_topdir = debugfs_create_dir("wwan_hwsim", NULL);
wwan_hwsim_debugfs_devcreate =
@@ -523,9 +530,13 @@ static int __init wwan_hwsim_init(void)
return 0;
err_clean_devs:
+ debugfs_remove(wwan_hwsim_debugfs_devcreate); /* Avoid new devs */
wwan_hwsim_free_devs();
+ flush_workqueue(wwan_wq); /* Wait deletion works completion */
debugfs_remove(wwan_hwsim_debugfs_topdir);
class_destroy(wwan_hwsim_class);
+err_wq_destroy:
+ destroy_workqueue(wwan_wq);
return err;
}
@@ -534,9 +545,10 @@ static void __exit wwan_hwsim_exit(void)
{
debugfs_remove(wwan_hwsim_debugfs_devcreate); /* Avoid new devs */
wwan_hwsim_free_devs();
- flush_scheduled_work(); /* Wait deletion works completion */
+ flush_workqueue(wwan_wq); /* Wait deletion works completion */
debugfs_remove(wwan_hwsim_debugfs_topdir);
class_destroy(wwan_hwsim_class);
+ destroy_workqueue(wwan_wq);
}
module_init(wwan_hwsim_init);