summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Kconfig4
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/dsa/b53/b53_common.c15
-rw-r--r--drivers/net/dsa/bcm_sf2.c84
-rw-r--r--drivers/net/dsa/bcm_sf2.h4
-rw-r--r--drivers/net/dsa/dsa_loop.c56
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c11
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.c59
-rw-r--r--drivers/net/dsa/ocelot/Kconfig1
-rw-r--r--drivers/net/dsa/ocelot/felix.c28
-rw-r--r--drivers/net/dsa/ocelot/felix.h20
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c374
-rw-r--r--drivers/net/dsa/ocelot/seville_vsc9953.c21
-rw-r--r--drivers/net/dsa/realtek-smi-core.h4
-rw-r--r--drivers/net/dsa/rtl8366.c277
-rw-r--r--drivers/net/ethernet/3com/typhoon.c61
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c17
-rw-r--r--drivers/net/ethernet/8390/lib8390.c14
-rw-r--r--drivers/net/ethernet/8390/pcnet_cs.c6
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c77
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.h2
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c48
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c57
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c48
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c12
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c40
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.c9
-rw-r--r--drivers/net/ethernet/chelsio/Kconfig2
-rw-r--r--drivers/net/ethernet/chelsio/Makefile1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c64
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c17
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h8
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/Kconfig37
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/Makefile3
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/Makefile8
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c857
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.h58
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/Makefile6
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h580
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c2327
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h222
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c426
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c1907
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c641
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.h4
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c1
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-dcb.c8
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c400
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c89
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c5
-rw-r--r--drivers/net/ethernet/huawei/hinic/Makefile3
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_debugfs.c318
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_debugfs.h114
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_dev.h20
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_io.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_io.h1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h6
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c83
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c187
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c29
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c22
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c81
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.h4
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c11
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c16
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c138
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.h7
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c10
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c45
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c63
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c11
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h1
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c466
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c29
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h21
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.c275
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.h25
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c29
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c39
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c43
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c31
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c96
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h24
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c28
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c174
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c212
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h13
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c112
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c)110
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.h27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.h29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c152
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c25
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h17
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h6
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c73
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c85
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic.h7
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c32
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_debugfs.c29
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c79
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h71
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_devlink.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c131
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c829
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h108
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c52
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_stats.c48
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c149
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig5
-rw-r--r--drivers/net/ethernet/qlogic/qed/Makefile1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_devlink.c259
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_devlink.h20
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c116
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c1
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c38
-rw-r--r--drivers/net/ethernet/qualcomm/qca_uart.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c10
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c2
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c2
-rw-r--r--drivers/net/ethernet/sfc/ef10.c2
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c3
-rw-r--r--drivers/net/ethernet/sfc/farch.c9
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c6
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h2
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/selftest.c2
-rw-r--r--drivers/net/ethernet/sfc/siena.c1
-rw-r--r--drivers/net/ethernet/smsc/epic100.c71
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c51
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c191
-rw-r--r--drivers/net/ethernet/sun/sungem.c5
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.c43
-rw-r--r--drivers/net/ethernet/ti/cpts.c42
-rw-r--r--drivers/net/gtp.c60
-rw-r--r--drivers/net/ipa/ipa.h1
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c8
-rw-r--r--drivers/net/macsec.c2
-rw-r--r--drivers/net/mdio/Kconfig241
-rw-r--r--drivers/net/mdio/Makefile27
-rw-r--r--drivers/net/mdio/mdio-aspeed.c (renamed from drivers/net/phy/mdio-aspeed.c)0
-rw-r--r--drivers/net/mdio/mdio-bcm-iproc.c (renamed from drivers/net/phy/mdio-bcm-iproc.c)0
-rw-r--r--drivers/net/mdio/mdio-bcm-unimac.c (renamed from drivers/net/phy/mdio-bcm-unimac.c)0
-rw-r--r--drivers/net/mdio/mdio-bitbang.c (renamed from drivers/net/phy/mdio-bitbang.c)0
-rw-r--r--drivers/net/mdio/mdio-cavium.c (renamed from drivers/net/phy/mdio-cavium.c)0
-rw-r--r--drivers/net/mdio/mdio-cavium.h (renamed from drivers/net/phy/mdio-cavium.h)0
-rw-r--r--drivers/net/mdio/mdio-gpio.c (renamed from drivers/net/phy/mdio-gpio.c)0
-rw-r--r--drivers/net/mdio/mdio-hisi-femac.c (renamed from drivers/net/phy/mdio-hisi-femac.c)0
-rw-r--r--drivers/net/mdio/mdio-i2c.c (renamed from drivers/net/phy/mdio-i2c.c)3
-rw-r--r--drivers/net/mdio/mdio-ipq4019.c (renamed from drivers/net/phy/mdio-ipq4019.c)0
-rw-r--r--drivers/net/mdio/mdio-ipq8064.c (renamed from drivers/net/phy/mdio-ipq8064.c)0
-rw-r--r--drivers/net/mdio/mdio-moxart.c (renamed from drivers/net/phy/mdio-moxart.c)0
-rw-r--r--drivers/net/mdio/mdio-mscc-miim.c (renamed from drivers/net/phy/mdio-mscc-miim.c)0
-rw-r--r--drivers/net/mdio/mdio-mux-bcm-iproc.c (renamed from drivers/net/phy/mdio-mux-bcm-iproc.c)0
-rw-r--r--drivers/net/mdio/mdio-mux-gpio.c (renamed from drivers/net/phy/mdio-mux-gpio.c)0
-rw-r--r--drivers/net/mdio/mdio-mux-meson-g12a.c (renamed from drivers/net/phy/mdio-mux-meson-g12a.c)0
-rw-r--r--drivers/net/mdio/mdio-mux-mmioreg.c (renamed from drivers/net/phy/mdio-mux-mmioreg.c)0
-rw-r--r--drivers/net/mdio/mdio-mux-multiplexer.c (renamed from drivers/net/phy/mdio-mux-multiplexer.c)0
-rw-r--r--drivers/net/mdio/mdio-mux.c (renamed from drivers/net/phy/mdio-mux.c)0
-rw-r--r--drivers/net/mdio/mdio-mvusb.c (renamed from drivers/net/phy/mdio-mvusb.c)0
-rw-r--r--drivers/net/mdio/mdio-octeon.c (renamed from drivers/net/phy/mdio-octeon.c)0
-rw-r--r--drivers/net/mdio/mdio-sun4i.c (renamed from drivers/net/phy/mdio-sun4i.c)0
-rw-r--r--drivers/net/mdio/mdio-thunder.c (renamed from drivers/net/phy/mdio-thunder.c)0
-rw-r--r--drivers/net/mdio/mdio-xgene.c (renamed from drivers/net/phy/mdio-xgene.c)2
-rw-r--r--drivers/net/pcs/Kconfig22
-rw-r--r--drivers/net/pcs/Makefile5
-rw-r--r--drivers/net/pcs/pcs-lynx.c312
-rw-r--r--drivers/net/pcs/pcs-xpcs.c (renamed from drivers/net/phy/mdio-xpcs.c)2
-rw-r--r--drivers/net/phy/Kconfig404
-rw-r--r--drivers/net/phy/Makefile37
-rw-r--r--drivers/net/phy/at803x.c4
-rw-r--r--drivers/net/phy/dp83640.c70
-rw-r--r--drivers/net/phy/dp83822.c225
-rw-r--r--drivers/net/phy/dp83867.c45
-rw-r--r--drivers/net/phy/mdio-i2c.h16
-rw-r--r--drivers/net/phy/mdio-xgene.h130
-rw-r--r--drivers/net/phy/mscc/mscc_macsec.c2
-rw-r--r--drivers/net/phy/phylink.c48
-rw-r--r--drivers/net/phy/sfp.c2
-rw-r--r--drivers/net/phy/smsc.c67
-rw-r--r--drivers/net/tun.c18
-rw-r--r--drivers/net/usb/Kconfig2
-rw-r--r--drivers/net/usb/smsc95xx.c475
-rw-r--r--drivers/net/veth.c6
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c1
-rw-r--r--drivers/net/wan/sbni.c101
-rw-r--r--drivers/net/wan/slic_ds26522.c2
-rw-r--r--drivers/net/wireguard/netlink.c14
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/thermal.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c8
256 files changed, 13708 insertions, 3908 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 1368d1d6a114..c3dbe64e628e 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -473,6 +473,10 @@ config NET_SB1000
source "drivers/net/phy/Kconfig"
+source "drivers/net/mdio/Kconfig"
+
+source "drivers/net/pcs/Kconfig"
+
source "drivers/net/plip/Kconfig"
source "drivers/net/ppp/Kconfig"
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 94b60800887a..72e18d505d1a 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -21,6 +21,8 @@ obj-$(CONFIG_MDIO) += mdio.o
obj-$(CONFIG_NET) += Space.o loopback.o
obj-$(CONFIG_NETCONSOLE) += netconsole.o
obj-y += phy/
+obj-y += mdio/
+obj-y += pcs/
obj-$(CONFIG_RIONET) += rionet.o
obj-$(CONFIG_NET_TEAM) += team/
obj-$(CONFIG_TUN) += tun.o
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index e731db900ee0..26fcff85d881 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -17,8 +17,6 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/gpio.h>
@@ -767,8 +765,11 @@ static int b53_switch_reset(struct b53_device *dev)
usleep_range(1000, 2000);
} while (timeout-- > 0);
- if (timeout == 0)
+ if (timeout == 0) {
+ dev_err(dev->dev,
+ "Timeout waiting for SW_RST to clear!\n");
return -ETIMEDOUT;
+ }
}
b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
@@ -2620,8 +2621,9 @@ int b53_switch_detect(struct b53_device *dev)
dev->chip_id = id32;
break;
default:
- pr_err("unsupported switch detected (BCM53%02x/BCM%x)\n",
- id8, id32);
+ dev_err(dev->dev,
+ "unsupported switch detected (BCM53%02x/BCM%x)\n",
+ id8, id32);
return -ENODEV;
}
}
@@ -2651,7 +2653,8 @@ int b53_switch_register(struct b53_device *dev)
if (ret)
return ret;
- pr_info("found switch: %s, rev %i\n", dev->name, dev->core_rev);
+ dev_info(dev->dev, "found switch: %s, rev %i\n",
+ dev->name, dev->core_rev);
return dsa_register_switch(dev->ds);
}
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 5ebff986a1ac..7a74e4d73415 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -14,6 +14,7 @@
#include <linux/phy_fixed.h>
#include <linux/phylink.h>
#include <linux/mii.h>
+#include <linux/clk.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
@@ -31,6 +32,49 @@
#include "b53/b53_priv.h"
#include "b53/b53_regs.h"
+/* Return the number of active ports, not counting the IMP (CPU) port */
+static unsigned int bcm_sf2_num_active_ports(struct dsa_switch *ds)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ unsigned int port, count = 0;
+
+ for (port = 0; port < ARRAY_SIZE(priv->port_sts); port++) {
+ if (dsa_is_cpu_port(ds, port))
+ continue;
+ if (priv->port_sts[port].enabled)
+ count++;
+ }
+
+ return count;
+}
+
+static void bcm_sf2_recalc_clock(struct dsa_switch *ds)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ unsigned long new_rate;
+ unsigned int ports_active;
+ /* Frequenty in Mhz */
+ const unsigned long rate_table[] = {
+ 59220000,
+ 60820000,
+ 62500000,
+ 62500000,
+ };
+
+ ports_active = bcm_sf2_num_active_ports(ds);
+ if (ports_active == 0 || !priv->clk_mdiv)
+ return;
+
+ /* If we overflow our table, just use the recommended operational
+ * frequency
+ */
+ if (ports_active > ARRAY_SIZE(rate_table))
+ new_rate = 90000000;
+ else
+ new_rate = rate_table[ports_active - 1];
+ clk_set_rate(priv->clk_mdiv, new_rate);
+}
+
static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
@@ -82,6 +126,8 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
reg &= ~(RX_DIS | TX_DIS);
core_writel(priv, reg, CORE_G_PCTL_PORT(port));
}
+
+ priv->port_sts[port].enabled = true;
}
static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable)
@@ -167,6 +213,10 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
if (!dsa_is_user_port(ds, port))
return 0;
+ priv->port_sts[port].enabled = true;
+
+ bcm_sf2_recalc_clock(ds);
+
/* Clear the memory power down */
reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
reg &= ~P_TXQ_PSM_VDD(port);
@@ -260,6 +310,10 @@ static void bcm_sf2_port_disable(struct dsa_switch *ds, int port)
reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
reg |= P_TXQ_PSM_VDD(port);
core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
+
+ priv->port_sts[port].enabled = false;
+
+ bcm_sf2_recalc_clock(ds);
}
@@ -750,6 +804,9 @@ static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
bcm_sf2_port_disable(ds, port);
}
+ if (!priv->wol_ports_mask)
+ clk_disable_unprepare(priv->clk);
+
return 0;
}
@@ -758,6 +815,9 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds)
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
int ret;
+ if (!priv->wol_ports_mask)
+ clk_prepare_enable(priv->clk);
+
ret = bcm_sf2_sw_rst(priv);
if (ret) {
pr_err("%s: failed to software reset switch\n", __func__);
@@ -1189,10 +1249,24 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
base++;
}
+ priv->clk = devm_clk_get_optional(&pdev->dev, "sw_switch");
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
+ clk_prepare_enable(priv->clk);
+
+ priv->clk_mdiv = devm_clk_get_optional(&pdev->dev, "sw_switch_mdiv");
+ if (IS_ERR(priv->clk_mdiv)) {
+ ret = PTR_ERR(priv->clk_mdiv);
+ goto out_clk;
+ }
+
+ clk_prepare_enable(priv->clk_mdiv);
+
ret = bcm_sf2_sw_rst(priv);
if (ret) {
pr_err("unable to software reset switch: %d\n", ret);
- return ret;
+ goto out_clk_mdiv;
}
bcm_sf2_gphy_enable_set(priv->dev->ds, true);
@@ -1200,7 +1274,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
ret = bcm_sf2_mdio_register(ds);
if (ret) {
pr_err("failed to register MDIO bus\n");
- return ret;
+ goto out_clk_mdiv;
}
bcm_sf2_gphy_enable_set(priv->dev->ds, false);
@@ -1267,6 +1341,10 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
out_mdio:
bcm_sf2_mdio_unregister(priv);
+out_clk_mdiv:
+ clk_disable_unprepare(priv->clk_mdiv);
+out_clk:
+ clk_disable_unprepare(priv->clk);
return ret;
}
@@ -1280,6 +1358,8 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev)
dsa_unregister_switch(priv->dev->ds);
bcm_sf2_cfp_exit(priv->dev->ds);
bcm_sf2_mdio_unregister(priv);
+ clk_disable_unprepare(priv->clk_mdiv);
+ clk_disable_unprepare(priv->clk);
if (priv->type == BCM7278_DEVICE_ID && !IS_ERR(priv->rcdev))
reset_control_assert(priv->rcdev);
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
index de386dd96d66..1ed901a68536 100644
--- a/drivers/net/dsa/bcm_sf2.h
+++ b/drivers/net/dsa/bcm_sf2.h
@@ -45,6 +45,7 @@ struct bcm_sf2_hw_params {
struct bcm_sf2_port_status {
unsigned int link;
+ bool enabled;
};
struct bcm_sf2_cfp_priv {
@@ -93,6 +94,9 @@ struct bcm_sf2_priv {
/* Mask of ports enabled for Wake-on-LAN */
u32 wol_ports_mask;
+ struct clk *clk;
+ struct clk *clk_mdiv;
+
/* MoCA port location */
int moca_port;
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
index eb600b3dbf26..b588614d1e5e 100644
--- a/drivers/net/dsa/dsa_loop.c
+++ b/drivers/net/dsa/dsa_loop.c
@@ -28,6 +28,53 @@ static struct dsa_loop_mib_entry dsa_loop_mibs[] = {
static struct phy_device *phydevs[PHY_MAX_ADDR];
+enum dsa_loop_devlink_resource_id {
+ DSA_LOOP_DEVLINK_PARAM_ID_VTU,
+};
+
+static u64 dsa_loop_devlink_vtu_get(void *priv)
+{
+ struct dsa_loop_priv *ps = priv;
+ unsigned int i, count = 0;
+ struct dsa_loop_vlan *vl;
+
+ for (i = 0; i < ARRAY_SIZE(ps->vlans); i++) {
+ vl = &ps->vlans[i];
+ if (vl->members)
+ count++;
+ }
+
+ return count;
+}
+
+static int dsa_loop_setup_devlink_resources(struct dsa_switch *ds)
+{
+ struct devlink_resource_size_params size_params;
+ struct dsa_loop_priv *ps = ds->priv;
+ int err;
+
+ devlink_resource_size_params_init(&size_params, ARRAY_SIZE(ps->vlans),
+ ARRAY_SIZE(ps->vlans),
+ 1, DEVLINK_RESOURCE_UNIT_ENTRY);
+
+ err = dsa_devlink_resource_register(ds, "VTU", ARRAY_SIZE(ps->vlans),
+ DSA_LOOP_DEVLINK_PARAM_ID_VTU,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &size_params);
+ if (err)
+ goto out;
+
+ dsa_devlink_resource_occ_get_register(ds,
+ DSA_LOOP_DEVLINK_PARAM_ID_VTU,
+ dsa_loop_devlink_vtu_get, ps);
+
+ return 0;
+
+out:
+ dsa_devlink_resources_unregister(ds);
+ return err;
+}
+
static enum dsa_tag_protocol dsa_loop_get_protocol(struct dsa_switch *ds,
int port,
enum dsa_tag_protocol mp)
@@ -48,7 +95,12 @@ static int dsa_loop_setup(struct dsa_switch *ds)
dev_dbg(ds->dev, "%s\n", __func__);
- return 0;
+ return dsa_loop_setup_devlink_resources(ds);
+}
+
+static void dsa_loop_teardown(struct dsa_switch *ds)
+{
+ dsa_devlink_resources_unregister(ds);
}
static int dsa_loop_get_sset_count(struct dsa_switch *ds, int port, int sset)
@@ -243,6 +295,7 @@ static int dsa_loop_port_max_mtu(struct dsa_switch *ds, int port)
static const struct dsa_switch_ops dsa_loop_driver = {
.get_tag_protocol = dsa_loop_get_protocol,
.setup = dsa_loop_setup,
+ .teardown = dsa_loop_teardown,
.get_strings = dsa_loop_get_strings,
.get_ethtool_stats = dsa_loop_get_ethtool_stats,
.get_sset_count = dsa_loop_get_sset_count,
@@ -290,6 +343,7 @@ static int dsa_loop_drv_probe(struct mdio_device *mdiodev)
ds->dev = &mdiodev->dev;
ds->ops = &dsa_loop_driver;
ds->priv = ps;
+ ds->configure_vlan_while_not_filtering = true;
ps->bus = mdiodev->bus;
dev_set_drvdata(&mdiodev->dev, ds);
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index f0dbc05e30a4..15b97a4f8d93 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -3329,12 +3329,6 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip,
return 0;
}
-static const struct of_device_id mv88e6xxx_mdio_external_match[] = {
- { .compatible = "marvell,mv88e6xxx-mdio-external",
- .data = (void *)true },
- { },
-};
-
static void mv88e6xxx_mdios_unregister(struct mv88e6xxx_chip *chip)
{
@@ -3354,7 +3348,6 @@ static void mv88e6xxx_mdios_unregister(struct mv88e6xxx_chip *chip)
static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip,
struct device_node *np)
{
- const struct of_device_id *match;
struct device_node *child;
int err;
@@ -3372,8 +3365,8 @@ static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip,
* bus.
*/
for_each_available_child_of_node(np, child) {
- match = of_match_node(mv88e6xxx_mdio_external_match, child);
- if (match) {
+ if (of_device_is_compatible(
+ child, "marvell,mv88e6xxx-mdio-external")) {
err = mv88e6xxx_mdio_register(chip, child, true);
if (err) {
mv88e6xxx_mdios_unregister(chip);
diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.c b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
index a4c488b12e8f..094d17a1d037 100644
--- a/drivers/net/dsa/mv88e6xxx/hwtstamp.c
+++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
@@ -211,49 +211,20 @@ int mv88e6xxx_port_hwtstamp_get(struct dsa_switch *ds, int port,
-EFAULT : 0;
}
-/* Get the start of the PTP header in this skb */
-static u8 *parse_ptp_header(struct sk_buff *skb, unsigned int type)
-{
- u8 *data = skb_mac_header(skb);
- unsigned int offset = 0;
-
- if (type & PTP_CLASS_VLAN)
- offset += VLAN_HLEN;
-
- switch (type & PTP_CLASS_PMASK) {
- case PTP_CLASS_IPV4:
- offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
- break;
- case PTP_CLASS_IPV6:
- offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
- break;
- case PTP_CLASS_L2:
- offset += ETH_HLEN;
- break;
- default:
- return NULL;
- }
-
- /* Ensure that the entire header is present in this packet. */
- if (skb->len + ETH_HLEN < offset + 34)
- return NULL;
-
- return data + offset;
-}
-
/* Returns a pointer to the PTP header if the caller should time stamp,
* or NULL if the caller should not.
*/
-static u8 *mv88e6xxx_should_tstamp(struct mv88e6xxx_chip *chip, int port,
- struct sk_buff *skb, unsigned int type)
+static struct ptp_header *mv88e6xxx_should_tstamp(struct mv88e6xxx_chip *chip,
+ int port, struct sk_buff *skb,
+ unsigned int type)
{
struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port];
- u8 *hdr;
+ struct ptp_header *hdr;
if (!chip->info->ptp_support)
return NULL;
- hdr = parse_ptp_header(skb, type);
+ hdr = ptp_parse_header(skb, type);
if (!hdr)
return NULL;
@@ -275,12 +246,11 @@ static int mv88e6xxx_ts_valid(u16 status)
static int seq_match(struct sk_buff *skb, u16 ts_seqid)
{
unsigned int type = SKB_PTP_TYPE(skb);
- u8 *hdr = parse_ptp_header(skb, type);
- __be16 *seqid;
+ struct ptp_header *hdr;
- seqid = (__be16 *)(hdr + OFF_PTP_SEQUENCE_ID);
+ hdr = ptp_parse_header(skb, type);
- return ts_seqid == ntohs(*seqid);
+ return ts_seqid == ntohs(hdr->sequence_id);
}
static void mv88e6xxx_get_rxts(struct mv88e6xxx_chip *chip,
@@ -357,9 +327,9 @@ static void mv88e6xxx_rxtstamp_work(struct mv88e6xxx_chip *chip,
&ps->rx_queue2);
}
-static int is_pdelay_resp(u8 *msgtype)
+static int is_pdelay_resp(const struct ptp_header *hdr)
{
- return (*msgtype & 0xf) == 3;
+ return (hdr->tsmt & 0xf) == 3;
}
bool mv88e6xxx_port_rxtstamp(struct dsa_switch *ds, int port,
@@ -367,7 +337,7 @@ bool mv88e6xxx_port_rxtstamp(struct dsa_switch *ds, int port,
{
struct mv88e6xxx_port_hwtstamp *ps;
struct mv88e6xxx_chip *chip;
- u8 *hdr;
+ struct ptp_header *hdr;
chip = ds->priv;
ps = &chip->port_hwtstamp[port];
@@ -503,8 +473,7 @@ bool mv88e6xxx_port_txtstamp(struct dsa_switch *ds, int port,
{
struct mv88e6xxx_chip *chip = ds->priv;
struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port];
- __be16 *seq_ptr;
- u8 *hdr;
+ struct ptp_header *hdr;
if (!(skb_shinfo(clone)->tx_flags & SKBTX_HW_TSTAMP))
return false;
@@ -513,15 +482,13 @@ bool mv88e6xxx_port_txtstamp(struct dsa_switch *ds, int port,
if (!hdr)
return false;
- seq_ptr = (__be16 *)(hdr + OFF_PTP_SEQUENCE_ID);
-
if (test_and_set_bit_lock(MV88E6XXX_HWTSTAMP_TX_IN_PROGRESS,
&ps->state))
return false;
ps->tx_skb = clone;
ps->tx_tstamp_start = jiffies;
- ps->tx_seq_id = be16_to_cpup(seq_ptr);
+ ps->tx_seq_id = be16_to_cpu(hdr->sequence_id);
ptp_schedule_worker(chip->ptp_clock, 0);
return true;
diff --git a/drivers/net/dsa/ocelot/Kconfig b/drivers/net/dsa/ocelot/Kconfig
index 2d23ccef7d0e..e19718d4a7d4 100644
--- a/drivers/net/dsa/ocelot/Kconfig
+++ b/drivers/net/dsa/ocelot/Kconfig
@@ -8,6 +8,7 @@ config NET_DSA_MSCC_FELIX
select MSCC_OCELOT_SWITCH_LIB
select NET_DSA_TAG_OCELOT
select FSL_ENETC_MDIO
+ select PCS_LYNX
help
This driver supports network switches from the Vitesse /
Microsemi / Microchip Ocelot family of switching cores that are
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 04bfa6e465ff..a1e1d3824110 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -19,6 +19,7 @@
#include <linux/of_net.h>
#include <linux/pci.h>
#include <linux/of.h>
+#include <linux/pcs-lynx.h>
#include <net/pkt_sched.h>
#include <net/dsa.h>
#include "felix.h"
@@ -196,27 +197,16 @@ static void felix_phylink_validate(struct dsa_switch *ds, int port,
felix->info->phylink_validate(ocelot, port, supported, state);
}
-static int felix_phylink_mac_pcs_get_state(struct dsa_switch *ds, int port,
- struct phylink_link_state *state)
-{
- struct ocelot *ocelot = ds->priv;
- struct felix *felix = ocelot_to_felix(ocelot);
-
- if (felix->info->pcs_link_state)
- felix->info->pcs_link_state(ocelot, port, state);
-
- return 0;
-}
-
static void felix_phylink_mac_config(struct dsa_switch *ds, int port,
unsigned int link_an_mode,
const struct phylink_link_state *state)
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
+ struct dsa_port *dp = dsa_to_port(ds, port);
- if (felix->info->pcs_config)
- felix->info->pcs_config(ocelot, port, link_an_mode, state);
+ if (felix->pcs[port])
+ phylink_set_pcs(dp->pl, &felix->pcs[port]->pcs);
}
static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port,
@@ -306,10 +296,6 @@ static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port,
ocelot_fields_write(ocelot, port,
QSYS_SWITCH_PORT_MODE_PORT_ENA, 1);
- if (felix->info->pcs_link_up)
- felix->info->pcs_link_up(ocelot, port, link_an_mode, interface,
- speed, duplex);
-
if (felix->info->port_sched_speed_set)
felix->info->port_sched_speed_set(ocelot, port, speed);
}
@@ -627,11 +613,6 @@ static int felix_setup(struct dsa_switch *ds)
ds->mtu_enforcement_ingress = true;
ds->configure_vlan_while_not_filtering = true;
- /* It looks like the MAC/PCS interrupt register - PM0_IEVENT (0x8040)
- * isn't instantiated for the Felix PF.
- * In-band AN may take a few ms to complete, so we need to poll.
- */
- ds->pcs_poll = true;
return 0;
}
@@ -787,7 +768,6 @@ const struct dsa_switch_ops felix_switch_ops = {
.get_sset_count = felix_get_sset_count,
.get_ts_info = felix_get_ts_info,
.phylink_validate = felix_phylink_validate,
- .phylink_mac_link_state = felix_phylink_mac_pcs_get_state,
.phylink_mac_config = felix_phylink_mac_config,
.phylink_mac_link_down = felix_phylink_mac_link_down,
.phylink_mac_link_up = felix_phylink_mac_link_up,
diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h
index 98f14621ac23..9bceb994b7db 100644
--- a/drivers/net/dsa/ocelot/felix.h
+++ b/drivers/net/dsa/ocelot/felix.h
@@ -28,15 +28,6 @@ struct felix_info {
int imdio_pci_bar;
int (*mdio_bus_alloc)(struct ocelot *ocelot);
void (*mdio_bus_free)(struct ocelot *ocelot);
- void (*pcs_config)(struct ocelot *ocelot, int port,
- unsigned int link_an_mode,
- const struct phylink_link_state *state);
- void (*pcs_link_up)(struct ocelot *ocelot, int port,
- unsigned int link_an_mode,
- phy_interface_t interface,
- int speed, int duplex);
- void (*pcs_link_state)(struct ocelot *ocelot, int port,
- struct phylink_link_state *state);
void (*phylink_validate)(struct ocelot *ocelot, int port,
unsigned long *supported,
struct phylink_link_state *state);
@@ -59,20 +50,11 @@ struct felix {
const struct felix_info *info;
struct ocelot ocelot;
struct mii_bus *imdio;
- struct phy_device **pcs;
+ struct lynx_pcs **pcs;
resource_size_t switch_base;
resource_size_t imdio_base;
};
-void vsc9959_pcs_link_state(struct ocelot *ocelot, int port,
- struct phylink_link_state *state);
-void vsc9959_pcs_config(struct ocelot *ocelot, int port,
- unsigned int link_an_mode,
- const struct phylink_link_state *state);
-void vsc9959_pcs_link_up(struct ocelot *ocelot, int port,
- unsigned int link_an_mode,
- phy_interface_t interface,
- int speed, int duplex);
void vsc9959_mdio_bus_free(struct ocelot *ocelot);
#endif
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 9b720c8ddfc3..126a53a811f7 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -9,6 +9,7 @@
#include <soc/mscc/ocelot_sys.h>
#include <soc/mscc/ocelot.h>
#include <linux/packing.h>
+#include <linux/pcs-lynx.h>
#include <net/pkt_sched.h>
#include <linux/iopoll.h>
#include <linux/mdio.h>
@@ -766,347 +767,6 @@ static int vsc9959_reset(struct ocelot *ocelot)
return 0;
}
-/* We enable SGMII AN only when the PHY has managed = "in-band-status" in the
- * device tree. If we are in MLO_AN_PHY mode, we program directly state->speed
- * into the PCS, which is retrieved out-of-band over MDIO. This also has the
- * benefit of working with SGMII fixed-links, like downstream switches, where
- * both link partners attempt to operate as AN slaves and therefore AN never
- * completes. But it also has the disadvantage that some PHY chips don't pass
- * traffic if SGMII AN is enabled but not completed (acknowledged by us), so
- * setting MLO_AN_INBAND is actually required for those.
- */
-static void vsc9959_pcs_config_sgmii(struct phy_device *pcs,
- unsigned int link_an_mode,
- const struct phylink_link_state *state)
-{
- int bmsr, bmcr;
-
- /* Some PHYs like VSC8234 don't like it when AN restarts on
- * their system side and they restart line side AN too, going
- * into an endless link up/down loop. Don't restart PCS AN if
- * link is up already.
- * We do check that AN is enabled just in case this is the 1st
- * call, PCS detects a carrier but AN is disabled from power on
- * or by boot loader.
- */
- bmcr = phy_read(pcs, MII_BMCR);
- if (bmcr < 0)
- return;
-
- bmsr = phy_read(pcs, MII_BMSR);
- if (bmsr < 0)
- return;
-
- if ((bmcr & BMCR_ANENABLE) && (bmsr & BMSR_LSTATUS))
- return;
-
- /* SGMII spec requires tx_config_Reg[15:0] to be exactly 0x4001
- * for the MAC PCS in order to acknowledge the AN.
- */
- phy_write(pcs, MII_ADVERTISE, ADVERTISE_SGMII |
- ADVERTISE_LPACK);
-
- phy_write(pcs, ENETC_PCS_IF_MODE,
- ENETC_PCS_IF_MODE_SGMII_EN |
- ENETC_PCS_IF_MODE_USE_SGMII_AN);
-
- /* Adjust link timer for SGMII */
- phy_write(pcs, ENETC_PCS_LINK_TIMER1,
- ENETC_PCS_LINK_TIMER1_VAL);
- phy_write(pcs, ENETC_PCS_LINK_TIMER2,
- ENETC_PCS_LINK_TIMER2_VAL);
-
- phy_set_bits(pcs, MII_BMCR, BMCR_ANENABLE);
-}
-
-static void vsc9959_pcs_config_usxgmii(struct phy_device *pcs,
- unsigned int link_an_mode,
- const struct phylink_link_state *state)
-{
- /* Configure device ability for the USXGMII Replicator */
- phy_write_mmd(pcs, MDIO_MMD_VEND2, MII_ADVERTISE,
- MDIO_USXGMII_2500FULL |
- MDIO_USXGMII_LINK |
- ADVERTISE_SGMII |
- ADVERTISE_LPACK);
-}
-
-void vsc9959_pcs_config(struct ocelot *ocelot, int port,
- unsigned int link_an_mode,
- const struct phylink_link_state *state)
-{
- struct felix *felix = ocelot_to_felix(ocelot);
- struct phy_device *pcs = felix->pcs[port];
-
- if (!pcs)
- return;
-
- /* The PCS does not implement the BMSR register fully, so capability
- * detection via genphy_read_abilities does not work. Since we can get
- * the PHY config word from the LPA register though, there is still
- * value in using the generic phy_resolve_aneg_linkmode function. So
- * populate the supported and advertising link modes manually here.
- */
- linkmode_set_bit_array(phy_basic_ports_array,
- ARRAY_SIZE(phy_basic_ports_array),
- pcs->supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, pcs->supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, pcs->supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, pcs->supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, pcs->supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, pcs->supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, pcs->supported);
- if (pcs->interface == PHY_INTERFACE_MODE_2500BASEX ||
- pcs->interface == PHY_INTERFACE_MODE_USXGMII)
- linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseX_Full_BIT,
- pcs->supported);
- if (pcs->interface != PHY_INTERFACE_MODE_2500BASEX)
- linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
- pcs->supported);
- phy_advertise_supported(pcs);
-
- if (!phylink_autoneg_inband(link_an_mode))
- return;
-
- switch (pcs->interface) {
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_QSGMII:
- vsc9959_pcs_config_sgmii(pcs, link_an_mode, state);
- break;
- case PHY_INTERFACE_MODE_2500BASEX:
- phydev_err(pcs, "AN not supported on 3.125GHz SerDes lane\n");
- break;
- case PHY_INTERFACE_MODE_USXGMII:
- vsc9959_pcs_config_usxgmii(pcs, link_an_mode, state);
- break;
- default:
- dev_err(ocelot->dev, "Unsupported link mode %s\n",
- phy_modes(pcs->interface));
- }
-}
-
-static void vsc9959_pcs_link_up_sgmii(struct phy_device *pcs,
- unsigned int link_an_mode,
- int speed, int duplex)
-{
- u16 if_mode = ENETC_PCS_IF_MODE_SGMII_EN;
-
- switch (speed) {
- case SPEED_1000:
- if_mode |= ENETC_PCS_IF_MODE_SGMII_SPEED(ENETC_PCS_SPEED_1000);
- break;
- case SPEED_100:
- if_mode |= ENETC_PCS_IF_MODE_SGMII_SPEED(ENETC_PCS_SPEED_100);
- break;
- case SPEED_10:
- if_mode |= ENETC_PCS_IF_MODE_SGMII_SPEED(ENETC_PCS_SPEED_10);
- break;
- default:
- phydev_err(pcs, "Invalid PCS speed %d\n", speed);
- return;
- }
-
- if (duplex == DUPLEX_HALF)
- if_mode |= ENETC_PCS_IF_MODE_DUPLEX_HALF;
-
- phy_write(pcs, ENETC_PCS_IF_MODE, if_mode);
- phy_clear_bits(pcs, MII_BMCR, BMCR_ANENABLE);
-}
-
-/* 2500Base-X is SerDes protocol 7 on Felix and 6 on ENETC. It is a SerDes lane
- * clocked at 3.125 GHz which encodes symbols with 8b/10b and does not have
- * auto-negotiation of any link parameters. Electrically it is compatible with
- * a single lane of XAUI.
- * The hardware reference manual wants to call this mode SGMII, but it isn't
- * really, since the fundamental features of SGMII:
- * - Downgrading the link speed by duplicating symbols
- * - Auto-negotiation
- * are not there.
- * The speed is configured at 1000 in the IF_MODE and BMCR MDIO registers
- * because the clock frequency is actually given by a PLL configured in the
- * Reset Configuration Word (RCW).
- * Since there is no difference between fixed speed SGMII w/o AN and 802.3z w/o
- * AN, we call this PHY interface type 2500Base-X. In case a PHY negotiates a
- * lower link speed on line side, the system-side interface remains fixed at
- * 2500 Mbps and we do rate adaptation through pause frames.
- */
-static void vsc9959_pcs_link_up_2500basex(struct phy_device *pcs,
- unsigned int link_an_mode,
- int speed, int duplex)
-{
- u16 if_mode = ENETC_PCS_IF_MODE_SGMII_SPEED(ENETC_PCS_SPEED_2500) |
- ENETC_PCS_IF_MODE_SGMII_EN;
-
- if (duplex == DUPLEX_HALF)
- if_mode |= ENETC_PCS_IF_MODE_DUPLEX_HALF;
-
- phy_write(pcs, ENETC_PCS_IF_MODE, if_mode);
- phy_clear_bits(pcs, MII_BMCR, BMCR_ANENABLE);
-}
-
-void vsc9959_pcs_link_up(struct ocelot *ocelot, int port,
- unsigned int link_an_mode,
- phy_interface_t interface,
- int speed, int duplex)
-{
- struct felix *felix = ocelot_to_felix(ocelot);
- struct phy_device *pcs = felix->pcs[port];
-
- if (!pcs)
- return;
-
- if (phylink_autoneg_inband(link_an_mode))
- return;
-
- switch (interface) {
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_QSGMII:
- vsc9959_pcs_link_up_sgmii(pcs, link_an_mode, speed, duplex);
- break;
- case PHY_INTERFACE_MODE_2500BASEX:
- vsc9959_pcs_link_up_2500basex(pcs, link_an_mode, speed,
- duplex);
- break;
- case PHY_INTERFACE_MODE_USXGMII:
- phydev_err(pcs, "USXGMII only supports in-band AN for now\n");
- break;
- default:
- dev_err(ocelot->dev, "Unsupported link mode %s\n",
- phy_modes(pcs->interface));
- }
-}
-
-static void vsc9959_pcs_link_state_resolve(struct phy_device *pcs,
- struct phylink_link_state *state)
-{
- state->an_complete = pcs->autoneg_complete;
- state->an_enabled = pcs->autoneg;
- state->link = pcs->link;
- state->duplex = pcs->duplex;
- state->speed = pcs->speed;
- /* SGMII AN does not negotiate flow control, but that's ok,
- * since phylink already knows that, and does:
- * link_state.pause |= pl->phy_state.pause;
- */
- state->pause = MLO_PAUSE_NONE;
-
- phydev_dbg(pcs,
- "mode=%s/%s/%s adv=%*pb lpa=%*pb link=%u an_enabled=%u an_complete=%u\n",
- phy_modes(pcs->interface),
- phy_speed_to_str(pcs->speed),
- phy_duplex_to_str(pcs->duplex),
- __ETHTOOL_LINK_MODE_MASK_NBITS, pcs->advertising,
- __ETHTOOL_LINK_MODE_MASK_NBITS, pcs->lp_advertising,
- pcs->link, pcs->autoneg, pcs->autoneg_complete);
-}
-
-static void vsc9959_pcs_link_state_sgmii(struct phy_device *pcs,
- struct phylink_link_state *state)
-{
- int err;
-
- err = genphy_update_link(pcs);
- if (err < 0)
- return;
-
- if (pcs->autoneg_complete) {
- u16 lpa = phy_read(pcs, MII_LPA);
-
- mii_lpa_to_linkmode_lpa_sgmii(pcs->lp_advertising, lpa);
-
- phy_resolve_aneg_linkmode(pcs);
- }
-}
-
-static void vsc9959_pcs_link_state_2500basex(struct phy_device *pcs,
- struct phylink_link_state *state)
-{
- int err;
-
- err = genphy_update_link(pcs);
- if (err < 0)
- return;
-
- pcs->speed = SPEED_2500;
- pcs->asym_pause = true;
- pcs->pause = true;
-}
-
-static void vsc9959_pcs_link_state_usxgmii(struct phy_device *pcs,
- struct phylink_link_state *state)
-{
- int status, lpa;
-
- status = phy_read_mmd(pcs, MDIO_MMD_VEND2, MII_BMSR);
- if (status < 0)
- return;
-
- pcs->autoneg = true;
- pcs->autoneg_complete = !!(status & BMSR_ANEGCOMPLETE);
- pcs->link = !!(status & BMSR_LSTATUS);
-
- if (!pcs->link || !pcs->autoneg_complete)
- return;
-
- lpa = phy_read_mmd(pcs, MDIO_MMD_VEND2, MII_LPA);
- if (lpa < 0)
- return;
-
- switch (lpa & MDIO_USXGMII_SPD_MASK) {
- case MDIO_USXGMII_10:
- pcs->speed = SPEED_10;
- break;
- case MDIO_USXGMII_100:
- pcs->speed = SPEED_100;
- break;
- case MDIO_USXGMII_1000:
- pcs->speed = SPEED_1000;
- break;
- case MDIO_USXGMII_2500:
- pcs->speed = SPEED_2500;
- break;
- default:
- break;
- }
-
- if (lpa & MDIO_USXGMII_FULL_DUPLEX)
- pcs->duplex = DUPLEX_FULL;
- else
- pcs->duplex = DUPLEX_HALF;
-}
-
-void vsc9959_pcs_link_state(struct ocelot *ocelot, int port,
- struct phylink_link_state *state)
-{
- struct felix *felix = ocelot_to_felix(ocelot);
- struct phy_device *pcs = felix->pcs[port];
-
- if (!pcs)
- return;
-
- pcs->speed = SPEED_UNKNOWN;
- pcs->duplex = DUPLEX_UNKNOWN;
- pcs->pause = 0;
- pcs->asym_pause = 0;
-
- switch (pcs->interface) {
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_QSGMII:
- vsc9959_pcs_link_state_sgmii(pcs, state);
- break;
- case PHY_INTERFACE_MODE_2500BASEX:
- vsc9959_pcs_link_state_2500basex(pcs, state);
- break;
- case PHY_INTERFACE_MODE_USXGMII:
- vsc9959_pcs_link_state_usxgmii(pcs, state);
- break;
- default:
- return;
- }
-
- vsc9959_pcs_link_state_resolve(pcs, state);
-}
-
static void vsc9959_phylink_validate(struct ocelot *ocelot, int port,
unsigned long *supported,
struct phylink_link_state *state)
@@ -1195,7 +855,7 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
int rc;
felix->pcs = devm_kcalloc(dev, felix->info->num_ports,
- sizeof(struct phy_device *),
+ sizeof(struct lynx_pcs *),
GFP_KERNEL);
if (!felix->pcs) {
dev_err(dev, "failed to allocate array for PCS PHYs\n");
@@ -1246,18 +906,26 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
for (port = 0; port < felix->info->num_ports; port++) {
struct ocelot_port *ocelot_port = ocelot->ports[port];
- struct phy_device *pcs;
- bool is_c45 = false;
+ struct mdio_device *pcs;
+ struct lynx_pcs *lynx;
+
+ if (dsa_is_unused_port(felix->ds, port))
+ continue;
- if (ocelot_port->phy_mode == PHY_INTERFACE_MODE_USXGMII)
- is_c45 = true;
+ if (ocelot_port->phy_mode == PHY_INTERFACE_MODE_INTERNAL)
+ continue;
- pcs = get_phy_device(felix->imdio, port, is_c45);
+ pcs = mdio_device_create(felix->imdio, port);
if (IS_ERR(pcs))
continue;
- pcs->interface = ocelot_port->phy_mode;
- felix->pcs[port] = pcs;
+ lynx = lynx_pcs_create(pcs);
+ if (!lynx) {
+ mdio_device_free(pcs);
+ continue;
+ }
+
+ felix->pcs[port] = lynx;
dev_info(dev, "Found PCS at internal MDIO address %d\n", port);
}
@@ -1271,12 +939,13 @@ void vsc9959_mdio_bus_free(struct ocelot *ocelot)
int port;
for (port = 0; port < ocelot->num_phys_ports; port++) {
- struct phy_device *pcs = felix->pcs[port];
+ struct lynx_pcs *pcs = felix->pcs[port];
if (!pcs)
continue;
- put_device(&pcs->mdio.dev);
+ mdio_device_free(pcs->mdio);
+ lynx_pcs_destroy(pcs);
}
mdiobus_unregister(felix->imdio);
}
@@ -1499,9 +1168,6 @@ static const struct felix_info felix_info_vsc9959 = {
.imdio_pci_bar = 0,
.mdio_bus_alloc = vsc9959_mdio_bus_alloc,
.mdio_bus_free = vsc9959_mdio_bus_free,
- .pcs_config = vsc9959_pcs_config,
- .pcs_link_up = vsc9959_pcs_link_up,
- .pcs_link_state = vsc9959_pcs_link_state,
.phylink_validate = vsc9959_phylink_validate,
.prevalidate_phy_mode = vsc9959_prevalidate_phy_mode,
.port_setup_tc = vsc9959_port_setup_tc,
diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c
index 625b1891d955..2d6a5f5758f8 100644
--- a/drivers/net/dsa/ocelot/seville_vsc9953.c
+++ b/drivers/net/dsa/ocelot/seville_vsc9953.c
@@ -7,6 +7,7 @@
#include <soc/mscc/ocelot_sys.h>
#include <soc/mscc/ocelot.h>
#include <linux/of_platform.h>
+#include <linux/pcs-lynx.h>
#include <linux/packing.h>
#include <linux/iopoll.h>
#include "felix.h"
@@ -960,18 +961,27 @@ static int vsc9953_mdio_bus_alloc(struct ocelot *ocelot)
for (port = 0; port < felix->info->num_ports; port++) {
struct ocelot_port *ocelot_port = ocelot->ports[port];
- struct phy_device *pcs;
int addr = port + 4;
+ struct mdio_device *pcs;
+ struct lynx_pcs *lynx;
+
+ if (dsa_is_unused_port(felix->ds, port))
+ continue;
if (ocelot_port->phy_mode == PHY_INTERFACE_MODE_INTERNAL)
continue;
- pcs = get_phy_device(felix->imdio, addr, false);
+ pcs = mdio_device_create(felix->imdio, addr);
if (IS_ERR(pcs))
continue;
- pcs->interface = ocelot_port->phy_mode;
- felix->pcs[port] = pcs;
+ lynx = lynx_pcs_create(pcs);
+ if (!lynx) {
+ mdio_device_free(pcs);
+ continue;
+ }
+
+ felix->pcs[port] = lynx;
dev_info(dev, "Found PCS at internal MDIO address %d\n", addr);
}
@@ -1013,9 +1023,6 @@ static const struct felix_info seville_info_vsc9953 = {
.num_ports = 10,
.mdio_bus_alloc = vsc9953_mdio_bus_alloc,
.mdio_bus_free = vsc9959_mdio_bus_free,
- .pcs_config = vsc9959_pcs_config,
- .pcs_link_up = vsc9959_pcs_link_up,
- .pcs_link_state = vsc9959_pcs_link_state,
.phylink_validate = vsc9953_phylink_validate,
.prevalidate_phy_mode = vsc9953_prevalidate_phy_mode,
.xmit_template_populate = vsc9953_xmit_template_populate,
diff --git a/drivers/net/dsa/realtek-smi-core.h b/drivers/net/dsa/realtek-smi-core.h
index 9a63b51e1d82..6f2dab7e33d6 100644
--- a/drivers/net/dsa/realtek-smi-core.h
+++ b/drivers/net/dsa/realtek-smi-core.h
@@ -25,6 +25,9 @@ struct rtl8366_mib_counter {
const char *name;
};
+/**
+ * struct rtl8366_vlan_mc - Virtual LAN member configuration
+ */
struct rtl8366_vlan_mc {
u16 vid;
u16 untag;
@@ -119,7 +122,6 @@ int realtek_smi_setup_mdio(struct realtek_smi *smi);
int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used);
int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
u32 untag, u32 fid);
-int rtl8366_get_pvid(struct realtek_smi *smi, int port, int *val);
int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port,
unsigned int vid);
int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable);
diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c
index 8f40fbf70a82..2dcde7a91721 100644
--- a/drivers/net/dsa/rtl8366.c
+++ b/drivers/net/dsa/rtl8366.c
@@ -36,12 +36,113 @@ int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used)
}
EXPORT_SYMBOL_GPL(rtl8366_mc_is_used);
+/**
+ * rtl8366_obtain_mc() - retrieve or allocate a VLAN member configuration
+ * @smi: the Realtek SMI device instance
+ * @vid: the VLAN ID to look up or allocate
+ * @vlanmc: the pointer will be assigned to a pointer to a valid member config
+ * if successful
+ * @return: index of a new member config or negative error number
+ */
+static int rtl8366_obtain_mc(struct realtek_smi *smi, int vid,
+ struct rtl8366_vlan_mc *vlanmc)
+{
+ struct rtl8366_vlan_4k vlan4k;
+ int ret;
+ int i;
+
+ /* Try to find an existing member config entry for this VID */
+ for (i = 0; i < smi->num_vlan_mc; i++) {
+ ret = smi->ops->get_vlan_mc(smi, i, vlanmc);
+ if (ret) {
+ dev_err(smi->dev, "error searching for VLAN MC %d for VID %d\n",
+ i, vid);
+ return ret;
+ }
+
+ if (vid == vlanmc->vid)
+ return i;
+ }
+
+ /* We have no MC entry for this VID, try to find an empty one */
+ for (i = 0; i < smi->num_vlan_mc; i++) {
+ ret = smi->ops->get_vlan_mc(smi, i, vlanmc);
+ if (ret) {
+ dev_err(smi->dev, "error searching for VLAN MC %d for VID %d\n",
+ i, vid);
+ return ret;
+ }
+
+ if (vlanmc->vid == 0 && vlanmc->member == 0) {
+ /* Update the entry from the 4K table */
+ ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
+ if (ret) {
+ dev_err(smi->dev, "error looking for 4K VLAN MC %d for VID %d\n",
+ i, vid);
+ return ret;
+ }
+
+ vlanmc->vid = vid;
+ vlanmc->member = vlan4k.member;
+ vlanmc->untag = vlan4k.untag;
+ vlanmc->fid = vlan4k.fid;
+ ret = smi->ops->set_vlan_mc(smi, i, vlanmc);
+ if (ret) {
+ dev_err(smi->dev, "unable to set/update VLAN MC %d for VID %d\n",
+ i, vid);
+ return ret;
+ }
+
+ dev_dbg(smi->dev, "created new MC at index %d for VID %d\n",
+ i, vid);
+ return i;
+ }
+ }
+
+ /* MC table is full, try to find an unused entry and replace it */
+ for (i = 0; i < smi->num_vlan_mc; i++) {
+ int used;
+
+ ret = rtl8366_mc_is_used(smi, i, &used);
+ if (ret)
+ return ret;
+
+ if (!used) {
+ /* Update the entry from the 4K table */
+ ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
+ if (ret)
+ return ret;
+
+ vlanmc->vid = vid;
+ vlanmc->member = vlan4k.member;
+ vlanmc->untag = vlan4k.untag;
+ vlanmc->fid = vlan4k.fid;
+ ret = smi->ops->set_vlan_mc(smi, i, vlanmc);
+ if (ret) {
+ dev_err(smi->dev, "unable to set/update VLAN MC %d for VID %d\n",
+ i, vid);
+ return ret;
+ }
+ dev_dbg(smi->dev, "recycled MC at index %i for VID %d\n",
+ i, vid);
+ return i;
+ }
+ }
+
+ dev_err(smi->dev, "all VLAN member configurations are in use\n");
+ return -ENOSPC;
+}
+
int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
u32 untag, u32 fid)
{
+ struct rtl8366_vlan_mc vlanmc;
struct rtl8366_vlan_4k vlan4k;
+ int mc;
int ret;
- int i;
+
+ if (!smi->ops->is_vlan_valid(smi, vid))
+ return -EINVAL;
dev_dbg(smi->dev,
"setting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n",
@@ -63,133 +164,58 @@ int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
"resulting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n",
vid, vlan4k.member, vlan4k.untag);
- /* Try to find an existing MC entry for this VID */
- for (i = 0; i < smi->num_vlan_mc; i++) {
- struct rtl8366_vlan_mc vlanmc;
-
- ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
- if (ret)
- return ret;
-
- if (vid == vlanmc.vid) {
- /* update the MC entry */
- vlanmc.member |= member;
- vlanmc.untag |= untag;
- vlanmc.fid = fid;
-
- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
+ /* Find or allocate a member config for this VID */
+ ret = rtl8366_obtain_mc(smi, vid, &vlanmc);
+ if (ret < 0)
+ return ret;
+ mc = ret;
- dev_dbg(smi->dev,
- "resulting VLAN%d MC members: 0x%02x, untagged: 0x%02x\n",
- vid, vlanmc.member, vlanmc.untag);
+ /* Update the MC entry */
+ vlanmc.member |= member;
+ vlanmc.untag |= untag;
+ vlanmc.fid = fid;
- break;
- }
- }
+ /* Commit updates to the MC entry */
+ ret = smi->ops->set_vlan_mc(smi, mc, &vlanmc);
+ if (ret)
+ dev_err(smi->dev, "failed to commit changes to VLAN MC index %d for VID %d\n",
+ mc, vid);
+ else
+ dev_dbg(smi->dev,
+ "resulting VLAN%d MC members: 0x%02x, untagged: 0x%02x\n",
+ vid, vlanmc.member, vlanmc.untag);
return ret;
}
EXPORT_SYMBOL_GPL(rtl8366_set_vlan);
-int rtl8366_get_pvid(struct realtek_smi *smi, int port, int *val)
-{
- struct rtl8366_vlan_mc vlanmc;
- int ret;
- int index;
-
- ret = smi->ops->get_mc_index(smi, port, &index);
- if (ret)
- return ret;
-
- ret = smi->ops->get_vlan_mc(smi, index, &vlanmc);
- if (ret)
- return ret;
-
- *val = vlanmc.vid;
- return 0;
-}
-EXPORT_SYMBOL_GPL(rtl8366_get_pvid);
-
int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port,
unsigned int vid)
{
struct rtl8366_vlan_mc vlanmc;
- struct rtl8366_vlan_4k vlan4k;
+ int mc;
int ret;
- int i;
-
- /* Try to find an existing MC entry for this VID */
- for (i = 0; i < smi->num_vlan_mc; i++) {
- ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
- if (ret)
- return ret;
-
- if (vid == vlanmc.vid) {
- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
- if (ret)
- return ret;
- ret = smi->ops->set_mc_index(smi, port, i);
- return ret;
- }
- }
-
- /* We have no MC entry for this VID, try to find an empty one */
- for (i = 0; i < smi->num_vlan_mc; i++) {
- ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
- if (ret)
- return ret;
-
- if (vlanmc.vid == 0 && vlanmc.member == 0) {
- /* Update the entry from the 4K table */
- ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
- if (ret)
- return ret;
-
- vlanmc.vid = vid;
- vlanmc.member = vlan4k.member;
- vlanmc.untag = vlan4k.untag;
- vlanmc.fid = vlan4k.fid;
- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
- if (ret)
- return ret;
-
- ret = smi->ops->set_mc_index(smi, port, i);
- return ret;
- }
- }
-
- /* MC table is full, try to find an unused entry and replace it */
- for (i = 0; i < smi->num_vlan_mc; i++) {
- int used;
-
- ret = rtl8366_mc_is_used(smi, i, &used);
- if (ret)
- return ret;
-
- if (!used) {
- /* Update the entry from the 4K table */
- ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
- if (ret)
- return ret;
+ if (!smi->ops->is_vlan_valid(smi, vid))
+ return -EINVAL;
- vlanmc.vid = vid;
- vlanmc.member = vlan4k.member;
- vlanmc.untag = vlan4k.untag;
- vlanmc.fid = vlan4k.fid;
- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
- if (ret)
- return ret;
+ /* Find or allocate a member config for this VID */
+ ret = rtl8366_obtain_mc(smi, vid, &vlanmc);
+ if (ret < 0)
+ return ret;
+ mc = ret;
- ret = smi->ops->set_mc_index(smi, port, i);
- return ret;
- }
+ ret = smi->ops->set_mc_index(smi, port, mc);
+ if (ret) {
+ dev_err(smi->dev, "set PVID: failed to set MC index %d for port %d\n",
+ mc, port);
+ return ret;
}
- dev_err(smi->dev,
- "all VLAN member configurations are in use\n");
+ dev_dbg(smi->dev, "set PVID: the PVID for port %d set to %d using existing MC index %d\n",
+ port, vid, mc);
- return -ENOSPC;
+ return 0;
}
EXPORT_SYMBOL_GPL(rtl8366_set_pvid);
@@ -389,7 +415,8 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port,
if (!smi->ops->is_vlan_valid(smi, vid))
return;
- dev_info(smi->dev, "add VLAN on port %d, %s, %s\n",
+ dev_info(smi->dev, "add VLAN %d on port %d, %s, %s\n",
+ vlan->vid_begin,
port,
untagged ? "untagged" : "tagged",
pvid ? " PVID" : "no PVID");
@@ -398,34 +425,26 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port,
dev_err(smi->dev, "port is DSA or CPU port\n");
for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- int pvid_val = 0;
-
- dev_info(smi->dev, "add VLAN %04x\n", vid);
member |= BIT(port);
if (untagged)
untag |= BIT(port);
- /* To ensure that we have a valid MC entry for this VLAN,
- * initialize the port VLAN ID here.
- */
- ret = rtl8366_get_pvid(smi, port, &pvid_val);
- if (ret < 0) {
- dev_err(smi->dev, "could not lookup PVID for port %d\n",
- port);
- return;
- }
- if (pvid_val == 0) {
- ret = rtl8366_set_pvid(smi, port, vid);
- if (ret < 0)
- return;
- }
-
ret = rtl8366_set_vlan(smi, vid, member, untag, 0);
if (ret)
dev_err(smi->dev,
"failed to set up VLAN %04x",
vid);
+
+ ret = rtl8366_set_pvid(smi, port, vid);
+ if (ret)
+ dev_err(smi->dev,
+ "failed to set PVID on port %d to VLAN %04x",
+ port, vid);
+
+ if (!ret)
+ dev_dbg(smi->dev, "VLAN add: added VLAN %d with PVID on port %d\n",
+ vid, port);
}
}
EXPORT_SYMBOL_GPL(rtl8366_vlan_add);
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index d3b30bacc94e..f11474cac59f 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -789,8 +789,8 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
* it with zeros to ETH_ZLEN for us.
*/
if (skb_shinfo(skb)->nr_frags == 0) {
- skb_dma = pci_map_single(tp->tx_pdev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
+ skb_dma = dma_map_single(&tp->tx_pdev->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
txd->len = cpu_to_le16(skb->len);
txd->frag.addr = cpu_to_le32(skb_dma);
@@ -800,8 +800,8 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
int i, len;
len = skb_headlen(skb);
- skb_dma = pci_map_single(tp->tx_pdev, skb->data, len,
- PCI_DMA_TODEVICE);
+ skb_dma = dma_map_single(&tp->tx_pdev->dev, skb->data, len,
+ DMA_TO_DEVICE);
txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
txd->len = cpu_to_le16(len);
txd->frag.addr = cpu_to_le32(skb_dma);
@@ -818,8 +818,8 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
len = skb_frag_size(frag);
frag_addr = skb_frag_address(frag);
- skb_dma = pci_map_single(tp->tx_pdev, frag_addr, len,
- PCI_DMA_TODEVICE);
+ skb_dma = dma_map_single(&tp->tx_pdev->dev, frag_addr,
+ len, DMA_TO_DEVICE);
txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
txd->len = cpu_to_le16(len);
txd->frag.addr = cpu_to_le32(skb_dma);
@@ -1349,12 +1349,12 @@ typhoon_download_firmware(struct typhoon *tp)
image_data = typhoon_fw->data;
fHdr = (struct typhoon_file_header *) image_data;
- /* Cannot just map the firmware image using pci_map_single() as
+ /* Cannot just map the firmware image using dma_map_single() as
* the firmware is vmalloc()'d and may not be physically contiguous,
- * so we allocate some consistent memory to copy the sections into.
+ * so we allocate some coherent memory to copy the sections into.
*/
err = -ENOMEM;
- dpage = pci_alloc_consistent(pdev, PAGE_SIZE, &dpage_dma);
+ dpage = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &dpage_dma, GFP_ATOMIC);
if (!dpage) {
netdev_err(tp->dev, "no DMA mem for firmware\n");
goto err_out;
@@ -1460,7 +1460,7 @@ err_out_irq:
iowrite32(irqMasked, ioaddr + TYPHOON_REG_INTR_MASK);
iowrite32(irqEnabled, ioaddr + TYPHOON_REG_INTR_ENABLE);
- pci_free_consistent(pdev, PAGE_SIZE, dpage, dpage_dma);
+ dma_free_coherent(&pdev->dev, PAGE_SIZE, dpage, dpage_dma);
err_out:
return err;
@@ -1527,8 +1527,8 @@ typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing,
*/
skb_dma = (dma_addr_t) le32_to_cpu(tx->frag.addr);
dma_len = le16_to_cpu(tx->len);
- pci_unmap_single(tp->pdev, skb_dma, dma_len,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&tp->pdev->dev, skb_dma, dma_len,
+ DMA_TO_DEVICE);
}
tx->flags = 0;
@@ -1609,8 +1609,8 @@ typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx)
skb_reserve(skb, 2);
#endif
- dma_addr = pci_map_single(tp->pdev, skb->data,
- PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+ dma_addr = dma_map_single(&tp->pdev->dev, skb->data, PKT_BUF_SZ,
+ DMA_FROM_DEVICE);
/* Since no card does 64 bit DAC, the high bits will never
* change from zero.
@@ -1665,20 +1665,19 @@ typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * read
if (pkt_len < rx_copybreak &&
(new_skb = netdev_alloc_skb(tp->dev, pkt_len + 2)) != NULL) {
skb_reserve(new_skb, 2);
- pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
- PKT_BUF_SZ,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&tp->pdev->dev, dma_addr,
+ PKT_BUF_SZ, DMA_FROM_DEVICE);
skb_copy_to_linear_data(new_skb, skb->data, pkt_len);
- pci_dma_sync_single_for_device(tp->pdev, dma_addr,
- PKT_BUF_SZ,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&tp->pdev->dev, dma_addr,
+ PKT_BUF_SZ,
+ DMA_FROM_DEVICE);
skb_put(new_skb, pkt_len);
typhoon_recycle_rx_skb(tp, idx);
} else {
new_skb = skb;
skb_put(new_skb, pkt_len);
- pci_unmap_single(tp->pdev, dma_addr, PKT_BUF_SZ,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&tp->pdev->dev, dma_addr, PKT_BUF_SZ,
+ DMA_FROM_DEVICE);
typhoon_alloc_rx_skb(tp, idx);
}
new_skb->protocol = eth_type_trans(new_skb, tp->dev);
@@ -1792,8 +1791,8 @@ typhoon_free_rx_rings(struct typhoon *tp)
for (i = 0; i < RXENT_ENTRIES; i++) {
struct rxbuff_ent *rxb = &tp->rxbuffers[i];
if (rxb->skb) {
- pci_unmap_single(tp->pdev, rxb->dma_addr, PKT_BUF_SZ,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&tp->pdev->dev, rxb->dma_addr,
+ PKT_BUF_SZ, DMA_FROM_DEVICE);
dev_kfree_skb(rxb->skb);
rxb->skb = NULL;
}
@@ -2306,7 +2305,7 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto error_out_disable;
}
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err < 0) {
err_msg = "No usable DMA configuration";
goto error_out_mwi;
@@ -2355,8 +2354,8 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* allocate pci dma space for rx and tx descriptor rings
*/
- shared = pci_alloc_consistent(pdev, sizeof(struct typhoon_shared),
- &shared_dma);
+ shared = dma_alloc_coherent(&pdev->dev, sizeof(struct typhoon_shared),
+ &shared_dma, GFP_KERNEL);
if (!shared) {
err_msg = "could not allocate DMA memory";
err = -ENOMEM;
@@ -2509,8 +2508,8 @@ error_out_reset:
typhoon_reset(ioaddr, NoWait);
error_out_dma:
- pci_free_consistent(pdev, sizeof(struct typhoon_shared),
- shared, shared_dma);
+ dma_free_coherent(&pdev->dev, sizeof(struct typhoon_shared), shared,
+ shared_dma);
error_out_remap:
pci_iounmap(pdev, ioaddr);
error_out_regions:
@@ -2537,8 +2536,8 @@ typhoon_remove_one(struct pci_dev *pdev)
pci_restore_state(pdev);
typhoon_reset(tp->ioaddr, NoWait);
pci_iounmap(pdev, tp->ioaddr);
- pci_free_consistent(pdev, sizeof(struct typhoon_shared),
- tp->shared, tp->shared_dma);
+ dma_free_coherent(&pdev->dev, sizeof(struct typhoon_shared),
+ tp->shared, tp->shared_dma);
pci_release_regions(pdev);
pci_clear_mwi(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c
index a00b36f91d9f..2488bfdb9133 100644
--- a/drivers/net/ethernet/8390/axnet_cs.c
+++ b/drivers/net/ethernet/8390/axnet_cs.c
@@ -657,8 +657,10 @@ static void block_input(struct net_device *dev, int count,
outb_p(E8390_RREAD+E8390_START, nic_base + AXNET_CMD);
insw(nic_base + AXNET_DATAPORT,buf,count>>1);
- if (count & 0x01)
- buf[count-1] = inb(nic_base + AXNET_DATAPORT), xfer_count++;
+ if (count & 0x01) {
+ buf[count-1] = inb(nic_base + AXNET_DATAPORT);
+ xfer_count++;
+ }
}
@@ -1270,10 +1272,12 @@ static void ei_tx_intr(struct net_device *dev)
ei_local->txing = 1;
NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6);
netif_trans_update(dev);
- ei_local->tx2 = -1,
+ ei_local->tx2 = -1;
ei_local->lasttx = 2;
+ } else {
+ ei_local->lasttx = 20;
+ ei_local->txing = 0;
}
- else ei_local->lasttx = 20, ei_local->txing = 0;
}
else if (ei_local->tx2 < 0)
{
@@ -1289,9 +1293,10 @@ static void ei_tx_intr(struct net_device *dev)
netif_trans_update(dev);
ei_local->tx1 = -1;
ei_local->lasttx = 1;
+ } else {
+ ei_local->lasttx = 10;
+ ei_local->txing = 0;
}
- else
- ei_local->lasttx = 10, ei_local->txing = 0;
}
// else
// netdev_warn(dev, "unexpected TX-done interrupt, lasttx=%d\n",
diff --git a/drivers/net/ethernet/8390/lib8390.c b/drivers/net/ethernet/8390/lib8390.c
index babc92e2692e..1f48d7f6365c 100644
--- a/drivers/net/ethernet/8390/lib8390.c
+++ b/drivers/net/ethernet/8390/lib8390.c
@@ -597,10 +597,12 @@ static void ei_tx_intr(struct net_device *dev)
ei_local->txing = 1;
NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6);
netif_trans_update(dev);
- ei_local->tx2 = -1,
+ ei_local->tx2 = -1;
ei_local->lasttx = 2;
- } else
- ei_local->lasttx = 20, ei_local->txing = 0;
+ } else {
+ ei_local->lasttx = 20;
+ ei_local->txing = 0;
+ }
} else if (ei_local->tx2 < 0) {
if (ei_local->lasttx != 2 && ei_local->lasttx != -2)
pr_err("%s: bogus last_tx_buffer %d, tx2=%d\n",
@@ -612,8 +614,10 @@ static void ei_tx_intr(struct net_device *dev)
netif_trans_update(dev);
ei_local->tx1 = -1;
ei_local->lasttx = 1;
- } else
- ei_local->lasttx = 10, ei_local->txing = 0;
+ } else {
+ ei_local->lasttx = 10;
+ ei_local->txing = 0;
+ }
} /* else
netdev_warn(dev, "unexpected TX-done interrupt, lasttx=%d\n",
ei_local->lasttx);
diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c
index 164c3ed550bf..9d3b1e0e425c 100644
--- a/drivers/net/ethernet/8390/pcnet_cs.c
+++ b/drivers/net/ethernet/8390/pcnet_cs.c
@@ -1178,8 +1178,10 @@ static void dma_block_input(struct net_device *dev, int count,
outb_p(E8390_RREAD+E8390_START, nic_base + PCNET_CMD);
insw(nic_base + PCNET_DATAPORT,buf,count>>1);
- if (count & 0x01)
- buf[count-1] = inb(nic_base + PCNET_DATAPORT), xfer_count++;
+ if (count & 0x01) {
+ buf[count-1] = inb(nic_base + PCNET_DATAPORT);
+ xfer_count++;
+ }
/* This was for the ALPHA version only, but enough people have been
encountering problems that it is still here. */
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index ba0055bb1614..555299737b51 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -886,7 +886,9 @@ static int netdev_open(struct net_device *dev)
tx_ring_size = ((sizeof(starfire_tx_desc) * TX_RING_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
rx_ring_size = sizeof(struct starfire_rx_desc) * RX_RING_SIZE;
np->queue_mem_size = tx_done_q_size + rx_done_q_size + tx_ring_size + rx_ring_size;
- np->queue_mem = pci_alloc_consistent(np->pci_dev, np->queue_mem_size, &np->queue_mem_dma);
+ np->queue_mem = dma_alloc_coherent(&np->pci_dev->dev,
+ np->queue_mem_size,
+ &np->queue_mem_dma, GFP_ATOMIC);
if (np->queue_mem == NULL) {
free_irq(irq, dev);
return -ENOMEM;
@@ -1136,9 +1138,11 @@ static void init_ring(struct net_device *dev)
np->rx_info[i].skb = skb;
if (skb == NULL)
break;
- np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(np->pci_dev,
- np->rx_info[i].mapping)) {
+ np->rx_info[i].mapping = dma_map_single(&np->pci_dev->dev,
+ skb->data,
+ np->rx_buf_sz,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&np->pci_dev->dev, np->rx_info[i].mapping)) {
dev_kfree_skb(skb);
np->rx_info[i].skb = NULL;
break;
@@ -1217,18 +1221,19 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
status |= skb_first_frag_len(skb) | (skb_num_frags(skb) << 16);
np->tx_info[entry].mapping =
- pci_map_single(np->pci_dev, skb->data, skb_first_frag_len(skb), PCI_DMA_TODEVICE);
+ dma_map_single(&np->pci_dev->dev, skb->data,
+ skb_first_frag_len(skb),
+ DMA_TO_DEVICE);
} else {
const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[i - 1];
status |= skb_frag_size(this_frag);
np->tx_info[entry].mapping =
- pci_map_single(np->pci_dev,
+ dma_map_single(&np->pci_dev->dev,
skb_frag_address(this_frag),
skb_frag_size(this_frag),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
}
- if (pci_dma_mapping_error(np->pci_dev,
- np->tx_info[entry].mapping)) {
+ if (dma_mapping_error(&np->pci_dev->dev, np->tx_info[entry].mapping)) {
dev->stats.tx_dropped++;
goto err_out;
}
@@ -1271,18 +1276,16 @@ err_out:
entry = prev_tx % TX_RING_SIZE;
np->tx_info[entry].skb = NULL;
if (i > 0) {
- pci_unmap_single(np->pci_dev,
+ dma_unmap_single(&np->pci_dev->dev,
np->tx_info[entry].mapping,
- skb_first_frag_len(skb),
- PCI_DMA_TODEVICE);
+ skb_first_frag_len(skb), DMA_TO_DEVICE);
np->tx_info[entry].mapping = 0;
entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
for (j = 1; j < i; j++) {
- pci_unmap_single(np->pci_dev,
+ dma_unmap_single(&np->pci_dev->dev,
np->tx_info[entry].mapping,
- skb_frag_size(
- &skb_shinfo(skb)->frags[j-1]),
- PCI_DMA_TODEVICE);
+ skb_frag_size(&skb_shinfo(skb)->frags[j - 1]),
+ DMA_TO_DEVICE);
entry++;
}
}
@@ -1356,20 +1359,20 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
u16 entry = (tx_status & 0x7fff) / sizeof(starfire_tx_desc);
struct sk_buff *skb = np->tx_info[entry].skb;
np->tx_info[entry].skb = NULL;
- pci_unmap_single(np->pci_dev,
+ dma_unmap_single(&np->pci_dev->dev,
np->tx_info[entry].mapping,
skb_first_frag_len(skb),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
np->tx_info[entry].mapping = 0;
np->dirty_tx += np->tx_info[entry].used_slots;
entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
{
int i;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- pci_unmap_single(np->pci_dev,
+ dma_unmap_single(&np->pci_dev->dev,
np->tx_info[entry].mapping,
skb_frag_size(&skb_shinfo(skb)->frags[i]),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
np->dirty_tx++;
entry++;
}
@@ -1461,16 +1464,18 @@ static int __netdev_rx(struct net_device *dev, int *quota)
if (pkt_len < rx_copybreak &&
(skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
skb_reserve(skb, 2); /* 16 byte align the IP header */
- pci_dma_sync_single_for_cpu(np->pci_dev,
- np->rx_info[entry].mapping,
- pkt_len, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&np->pci_dev->dev,
+ np->rx_info[entry].mapping,
+ pkt_len, DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, np->rx_info[entry].skb->data, pkt_len);
- pci_dma_sync_single_for_device(np->pci_dev,
- np->rx_info[entry].mapping,
- pkt_len, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&np->pci_dev->dev,
+ np->rx_info[entry].mapping,
+ pkt_len, DMA_FROM_DEVICE);
skb_put(skb, pkt_len);
} else {
- pci_unmap_single(np->pci_dev, np->rx_info[entry].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&np->pci_dev->dev,
+ np->rx_info[entry].mapping,
+ np->rx_buf_sz, DMA_FROM_DEVICE);
skb = np->rx_info[entry].skb;
skb_put(skb, pkt_len);
np->rx_info[entry].skb = NULL;
@@ -1588,9 +1593,9 @@ static void refill_rx_ring(struct net_device *dev)
if (skb == NULL)
break; /* Better luck next round. */
np->rx_info[entry].mapping =
- pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(np->pci_dev,
- np->rx_info[entry].mapping)) {
+ dma_map_single(&np->pci_dev->dev, skb->data,
+ np->rx_buf_sz, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&np->pci_dev->dev, np->rx_info[entry].mapping)) {
dev_kfree_skb(skb);
np->rx_info[entry].skb = NULL;
break;
@@ -1963,7 +1968,9 @@ static int netdev_close(struct net_device *dev)
for (i = 0; i < RX_RING_SIZE; i++) {
np->rx_ring[i].rxaddr = cpu_to_dma(0xBADF00D0); /* An invalid address. */
if (np->rx_info[i].skb != NULL) {
- pci_unmap_single(np->pci_dev, np->rx_info[i].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&np->pci_dev->dev,
+ np->rx_info[i].mapping,
+ np->rx_buf_sz, DMA_FROM_DEVICE);
dev_kfree_skb(np->rx_info[i].skb);
}
np->rx_info[i].skb = NULL;
@@ -1973,9 +1980,8 @@ static int netdev_close(struct net_device *dev)
struct sk_buff *skb = np->tx_info[i].skb;
if (skb == NULL)
continue;
- pci_unmap_single(np->pci_dev,
- np->tx_info[i].mapping,
- skb_first_frag_len(skb), PCI_DMA_TODEVICE);
+ dma_unmap_single(&np->pci_dev->dev, np->tx_info[i].mapping,
+ skb_first_frag_len(skb), DMA_TO_DEVICE);
np->tx_info[i].mapping = 0;
dev_kfree_skb(skb);
np->tx_info[i].skb = NULL;
@@ -2018,7 +2024,8 @@ static void starfire_remove_one(struct pci_dev *pdev)
unregister_netdev(dev);
if (np->queue_mem)
- pci_free_consistent(pdev, np->queue_mem_size, np->queue_mem, np->queue_mem_dma);
+ dma_free_coherent(&pdev->dev, np->queue_mem_size,
+ np->queue_mem, np->queue_mem_dma);
/* XXX: add wakeup code -- requires firmware for MagicPacket */
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index d35a338120cf..643f5e646740 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -18,6 +18,7 @@
#include <linux/of_platform.h>
#include <linux/of_net.h>
#include <linux/of_mdio.h>
+#include <linux/mdio/mdio-xgene.h>
#include <linux/module.h>
#include <net/ip.h>
#include <linux/prefetch.h>
@@ -26,7 +27,6 @@
#include "xgene_enet_hw.h"
#include "xgene_enet_cle.h"
#include "xgene_enet_ring2.h"
-#include "../../../phy/mdio-xgene.h"
#define ETHER_MIN_PACKET 64
#define ETHER_STD_PACKET 1518
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index decab9a8e4a8..c7288e1fa3a2 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -826,16 +826,16 @@ static inline void atl1c_clean_buffer(struct pci_dev *pdev,
return;
if (buffer_info->dma) {
if (buffer_info->flags & ATL1C_PCIMAP_FROMDEVICE)
- pci_driection = PCI_DMA_FROMDEVICE;
+ pci_driection = DMA_FROM_DEVICE;
else
- pci_driection = PCI_DMA_TODEVICE;
+ pci_driection = DMA_TO_DEVICE;
if (buffer_info->flags & ATL1C_PCIMAP_SINGLE)
- pci_unmap_single(pdev, buffer_info->dma,
- buffer_info->length, pci_driection);
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
+ buffer_info->length, pci_driection);
else if (buffer_info->flags & ATL1C_PCIMAP_PAGE)
- pci_unmap_page(pdev, buffer_info->dma,
- buffer_info->length, pci_driection);
+ dma_unmap_page(&pdev->dev, buffer_info->dma,
+ buffer_info->length, pci_driection);
}
if (buffer_info->skb)
dev_consume_skb_any(buffer_info->skb);
@@ -933,9 +933,8 @@ static void atl1c_free_ring_resources(struct atl1c_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
- pci_free_consistent(pdev, adapter->ring_header.size,
- adapter->ring_header.desc,
- adapter->ring_header.dma);
+ dma_free_coherent(&pdev->dev, adapter->ring_header.size,
+ adapter->ring_header.desc, adapter->ring_header.dma);
adapter->ring_header.desc = NULL;
/* Note: just free tdp_ring.buffer_info,
@@ -1717,10 +1716,9 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter)
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
buffer_info->skb = skb;
buffer_info->length = adapter->rx_buffer_len;
- mapping = pci_map_single(pdev, vir_addr,
- buffer_info->length,
- PCI_DMA_FROMDEVICE);
- if (unlikely(pci_dma_mapping_error(pdev, mapping))) {
+ mapping = dma_map_single(&pdev->dev, vir_addr,
+ buffer_info->length, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
dev_kfree_skb(skb);
buffer_info->skb = NULL;
buffer_info->length = 0;
@@ -1831,8 +1829,8 @@ rrs_checked:
rfd_index = (rrs->word0 >> RRS_RX_RFD_INDEX_SHIFT) &
RRS_RX_RFD_INDEX_MASK;
buffer_info = &rfd_ring->buffer_info[rfd_index];
- pci_unmap_single(pdev, buffer_info->dma,
- buffer_info->length, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_FROM_DEVICE);
skb = buffer_info->skb;
} else {
/* TODO */
@@ -2106,10 +2104,10 @@ static int atl1c_tx_map(struct atl1c_adapter *adapter,
buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
buffer_info->length = map_len;
- buffer_info->dma = pci_map_single(adapter->pdev,
- skb->data, hdr_len, PCI_DMA_TODEVICE);
- if (unlikely(pci_dma_mapping_error(adapter->pdev,
- buffer_info->dma)))
+ buffer_info->dma = dma_map_single(&adapter->pdev->dev,
+ skb->data, hdr_len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)))
goto err_dma;
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE,
@@ -2131,10 +2129,10 @@ static int atl1c_tx_map(struct atl1c_adapter *adapter,
buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
buffer_info->length = buf_len - mapped_len;
buffer_info->dma =
- pci_map_single(adapter->pdev, skb->data + mapped_len,
- buffer_info->length, PCI_DMA_TODEVICE);
- if (unlikely(pci_dma_mapping_error(adapter->pdev,
- buffer_info->dma)))
+ dma_map_single(&adapter->pdev->dev,
+ skb->data + mapped_len,
+ buffer_info->length, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)))
goto err_dma;
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
@@ -2542,8 +2540,8 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* various kernel subsystems to support the mechanics required by a
* fixed-high-32-bit system.
*/
- if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) ||
- (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) {
+ if ((dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) ||
+ (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0)) {
dev_err(&pdev->dev, "No usable DMA configuration,aborting\n");
goto err_dma;
}
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 223ef846123e..fb78f6c31708 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -654,11 +654,13 @@ static void atl1e_clean_tx_ring(struct atl1e_adapter *adapter)
tx_buffer = &tx_ring->tx_buffer[index];
if (tx_buffer->dma) {
if (tx_buffer->flags & ATL1E_TX_PCIMAP_SINGLE)
- pci_unmap_single(pdev, tx_buffer->dma,
- tx_buffer->length, PCI_DMA_TODEVICE);
+ dma_unmap_single(&pdev->dev, tx_buffer->dma,
+ tx_buffer->length,
+ DMA_TO_DEVICE);
else if (tx_buffer->flags & ATL1E_TX_PCIMAP_PAGE)
- pci_unmap_page(pdev, tx_buffer->dma,
- tx_buffer->length, PCI_DMA_TODEVICE);
+ dma_unmap_page(&pdev->dev, tx_buffer->dma,
+ tx_buffer->length,
+ DMA_TO_DEVICE);
tx_buffer->dma = 0;
}
}
@@ -774,8 +776,8 @@ static void atl1e_free_ring_resources(struct atl1e_adapter *adapter)
atl1e_clean_rx_ring(adapter);
if (adapter->ring_vir_addr) {
- pci_free_consistent(pdev, adapter->ring_size,
- adapter->ring_vir_addr, adapter->ring_dma);
+ dma_free_coherent(&pdev->dev, adapter->ring_size,
+ adapter->ring_vir_addr, adapter->ring_dma);
adapter->ring_vir_addr = NULL;
}
@@ -810,11 +812,12 @@ static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter)
/* real ring DMA buffer */
size = adapter->ring_size;
- adapter->ring_vir_addr = pci_zalloc_consistent(pdev, adapter->ring_size,
- &adapter->ring_dma);
+ adapter->ring_vir_addr = dma_alloc_coherent(&pdev->dev,
+ adapter->ring_size,
+ &adapter->ring_dma, GFP_KERNEL);
if (adapter->ring_vir_addr == NULL) {
netdev_err(adapter->netdev,
- "pci_alloc_consistent failed, size = D%d\n", size);
+ "dma_alloc_coherent failed, size = D%d\n", size);
return -ENOMEM;
}
@@ -870,8 +873,8 @@ static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter)
return 0;
failed:
if (adapter->ring_vir_addr != NULL) {
- pci_free_consistent(pdev, adapter->ring_size,
- adapter->ring_vir_addr, adapter->ring_dma);
+ dma_free_coherent(&pdev->dev, adapter->ring_size,
+ adapter->ring_vir_addr, adapter->ring_dma);
adapter->ring_vir_addr = NULL;
}
return err;
@@ -1233,11 +1236,15 @@ static bool atl1e_clean_tx_irq(struct atl1e_adapter *adapter)
tx_buffer = &tx_ring->tx_buffer[next_to_clean];
if (tx_buffer->dma) {
if (tx_buffer->flags & ATL1E_TX_PCIMAP_SINGLE)
- pci_unmap_single(adapter->pdev, tx_buffer->dma,
- tx_buffer->length, PCI_DMA_TODEVICE);
+ dma_unmap_single(&adapter->pdev->dev,
+ tx_buffer->dma,
+ tx_buffer->length,
+ DMA_TO_DEVICE);
else if (tx_buffer->flags & ATL1E_TX_PCIMAP_PAGE)
- pci_unmap_page(adapter->pdev, tx_buffer->dma,
- tx_buffer->length, PCI_DMA_TODEVICE);
+ dma_unmap_page(&adapter->pdev->dev,
+ tx_buffer->dma,
+ tx_buffer->length,
+ DMA_TO_DEVICE);
tx_buffer->dma = 0;
}
@@ -1710,8 +1717,9 @@ static int atl1e_tx_map(struct atl1e_adapter *adapter,
tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd);
tx_buffer->length = map_len;
- tx_buffer->dma = pci_map_single(adapter->pdev,
- skb->data, hdr_len, PCI_DMA_TODEVICE);
+ tx_buffer->dma = dma_map_single(&adapter->pdev->dev,
+ skb->data, hdr_len,
+ DMA_TO_DEVICE);
if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma))
return -ENOSPC;
@@ -1739,8 +1747,9 @@ static int atl1e_tx_map(struct atl1e_adapter *adapter,
((buf_len - mapped_len) >= MAX_TX_BUF_LEN) ?
MAX_TX_BUF_LEN : (buf_len - mapped_len);
tx_buffer->dma =
- pci_map_single(adapter->pdev, skb->data + mapped_len,
- map_len, PCI_DMA_TODEVICE);
+ dma_map_single(&adapter->pdev->dev,
+ skb->data + mapped_len, map_len,
+ DMA_TO_DEVICE);
if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) {
/* We need to unwind the mappings we've done */
@@ -1749,8 +1758,10 @@ static int atl1e_tx_map(struct atl1e_adapter *adapter,
while (adapter->tx_ring.next_to_use != ring_end) {
tpd = atl1e_get_tpd(adapter);
tx_buffer = atl1e_get_tx_buffer(adapter, tpd);
- pci_unmap_single(adapter->pdev, tx_buffer->dma,
- tx_buffer->length, PCI_DMA_TODEVICE);
+ dma_unmap_single(&adapter->pdev->dev,
+ tx_buffer->dma,
+ tx_buffer->length,
+ DMA_TO_DEVICE);
}
/* Reset the tx rings next pointer */
adapter->tx_ring.next_to_use = ring_start;
@@ -2300,8 +2311,8 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* various kernel subsystems to support the mechanics required by a
* fixed-high-32-bit system.
*/
- if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) ||
- (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) {
+ if ((dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) ||
+ (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0)) {
dev_err(&pdev->dev, "No usable DMA configuration,aborting\n");
goto err_dma;
}
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index b35fcfcd692d..60f8aa79deb2 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -1050,11 +1050,11 @@ static s32 atl1_setup_ring_resources(struct atl1_adapter *adapter)
+ sizeof(struct stats_msg_block)
+ 40;
- ring_header->desc = pci_alloc_consistent(pdev, ring_header->size,
- &ring_header->dma);
+ ring_header->desc = dma_alloc_coherent(&pdev->dev, ring_header->size,
+ &ring_header->dma, GFP_KERNEL);
if (unlikely(!ring_header->desc)) {
if (netif_msg_drv(adapter))
- dev_err(&pdev->dev, "pci_alloc_consistent failed\n");
+ dev_err(&pdev->dev, "dma_alloc_coherent failed\n");
goto err_nomem;
}
@@ -1136,8 +1136,8 @@ static void atl1_clean_rx_ring(struct atl1_adapter *adapter)
for (i = 0; i < rfd_ring->count; i++) {
buffer_info = &rfd_ring->buffer_info[i];
if (buffer_info->dma) {
- pci_unmap_page(pdev, buffer_info->dma,
- buffer_info->length, PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_FROM_DEVICE);
buffer_info->dma = 0;
}
if (buffer_info->skb) {
@@ -1175,8 +1175,8 @@ static void atl1_clean_tx_ring(struct atl1_adapter *adapter)
for (i = 0; i < tpd_ring->count; i++) {
buffer_info = &tpd_ring->buffer_info[i];
if (buffer_info->dma) {
- pci_unmap_page(pdev, buffer_info->dma,
- buffer_info->length, PCI_DMA_TODEVICE);
+ dma_unmap_page(&pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_TO_DEVICE);
buffer_info->dma = 0;
}
}
@@ -1217,8 +1217,8 @@ static void atl1_free_ring_resources(struct atl1_adapter *adapter)
atl1_clean_rx_ring(adapter);
kfree(tpd_ring->buffer_info);
- pci_free_consistent(pdev, ring_header->size, ring_header->desc,
- ring_header->dma);
+ dma_free_coherent(&pdev->dev, ring_header->size, ring_header->desc,
+ ring_header->dma);
tpd_ring->buffer_info = NULL;
tpd_ring->desc = NULL;
@@ -1866,9 +1866,9 @@ static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter)
buffer_info->length = (u16) adapter->rx_buffer_len;
page = virt_to_page(skb->data);
offset = offset_in_page(skb->data);
- buffer_info->dma = pci_map_page(pdev, page, offset,
+ buffer_info->dma = dma_map_page(&pdev->dev, page, offset,
adapter->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
rfd_desc->buf_len = cpu_to_le16(adapter->rx_buffer_len);
rfd_desc->coalese = 0;
@@ -1992,8 +1992,8 @@ rrd_ok:
}
/* Good Receive */
- pci_unmap_page(adapter->pdev, buffer_info->dma,
- buffer_info->length, PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_FROM_DEVICE);
buffer_info->dma = 0;
skb = buffer_info->skb;
length = le16_to_cpu(rrd->xsz.xsum_sz.pkt_size);
@@ -2062,8 +2062,8 @@ static int atl1_intr_tx(struct atl1_adapter *adapter)
while (cmb_tpd_next_to_clean != sw_tpd_next_to_clean) {
buffer_info = &tpd_ring->buffer_info[sw_tpd_next_to_clean];
if (buffer_info->dma) {
- pci_unmap_page(adapter->pdev, buffer_info->dma,
- buffer_info->length, PCI_DMA_TODEVICE);
+ dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_TO_DEVICE);
buffer_info->dma = 0;
}
@@ -2210,9 +2210,9 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
buffer_info->length = hdr_len;
page = virt_to_page(skb->data);
offset = offset_in_page(skb->data);
- buffer_info->dma = pci_map_page(adapter->pdev, page,
+ buffer_info->dma = dma_map_page(&adapter->pdev->dev, page,
offset, hdr_len,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
if (++next_to_use == tpd_ring->count)
next_to_use = 0;
@@ -2235,9 +2235,10 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
(hdr_len + i * ATL1_MAX_TX_BUF_LEN));
offset = offset_in_page(skb->data +
(hdr_len + i * ATL1_MAX_TX_BUF_LEN));
- buffer_info->dma = pci_map_page(adapter->pdev,
- page, offset, buffer_info->length,
- PCI_DMA_TODEVICE);
+ buffer_info->dma = dma_map_page(&adapter->pdev->dev,
+ page, offset,
+ buffer_info->length,
+ DMA_TO_DEVICE);
if (++next_to_use == tpd_ring->count)
next_to_use = 0;
}
@@ -2247,8 +2248,9 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
buffer_info->length = buf_len;
page = virt_to_page(skb->data);
offset = offset_in_page(skb->data);
- buffer_info->dma = pci_map_page(adapter->pdev, page,
- offset, buf_len, PCI_DMA_TODEVICE);
+ buffer_info->dma = dma_map_page(&adapter->pdev->dev, page,
+ offset, buf_len,
+ DMA_TO_DEVICE);
if (++next_to_use == tpd_ring->count)
next_to_use = 0;
}
@@ -2922,7 +2924,7 @@ static int atl1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* various kernel subsystems to support the mechanics required by a
* fixed-high-32-bit system.
*/
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "no usable DMA configuration\n");
goto err_dma;
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index c915852b8892..e2526c0fb7cf 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -281,8 +281,8 @@ static s32 atl2_setup_ring_resources(struct atl2_adapter *adapter)
adapter->txs_ring_size * 4 + 7 + /* dword align */
adapter->rxd_ring_size * 1536 + 127; /* 128bytes align */
- adapter->ring_vir_addr = pci_alloc_consistent(pdev, size,
- &adapter->ring_dma);
+ adapter->ring_vir_addr = dma_alloc_coherent(&pdev->dev, size,
+ &adapter->ring_dma, GFP_KERNEL);
if (!adapter->ring_vir_addr)
return -ENOMEM;
@@ -663,8 +663,8 @@ static int atl2_request_irq(struct atl2_adapter *adapter)
static void atl2_free_ring_resources(struct atl2_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
- pci_free_consistent(pdev, adapter->ring_size, adapter->ring_vir_addr,
- adapter->ring_dma);
+ dma_free_coherent(&pdev->dev, adapter->ring_size,
+ adapter->ring_vir_addr, adapter->ring_dma);
}
/**
@@ -1328,8 +1328,8 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* until the kernel has the proper infrastructure to support 64-bit DMA
* on these devices.
*/
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) &&
- pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) &&
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) {
printk(KERN_ERR "atl2: No usable DMA configuration, aborting\n");
err = -EIO;
goto err_dma;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 0762d5d1a810..0fdd19d99d99 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -20,6 +20,7 @@
#include <linux/phy.h>
#include <linux/phy_fixed.h>
#include <net/dsa.h>
+#include <linux/clk.h>
#include <net/ip.h>
#include <net/ipv6.h>
@@ -186,6 +187,11 @@ static int bcm_sysport_set_features(struct net_device *dev,
netdev_features_t features)
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
+ int ret;
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
/* Read CRC forward */
if (!priv->is_lite)
@@ -197,6 +203,8 @@ static int bcm_sysport_set_features(struct net_device *dev,
bcm_sysport_set_rx_csum(dev, features);
bcm_sysport_set_tx_csum(dev, features);
+ clk_disable_unprepare(priv->clk);
+
return 0;
}
@@ -1940,6 +1948,8 @@ static int bcm_sysport_open(struct net_device *dev)
unsigned int i;
int ret;
+ clk_prepare_enable(priv->clk);
+
/* Reset UniMAC */
umac_reset(priv);
@@ -1970,7 +1980,8 @@ static int bcm_sysport_open(struct net_device *dev)
0, priv->phy_interface);
if (!phydev) {
netdev_err(dev, "could not attach to PHY\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto out_clk_disable;
}
/* Reset house keeping link status */
@@ -2048,6 +2059,8 @@ out_free_irq0:
free_irq(priv->irq0, dev);
out_phy_disconnect:
phy_disconnect(phydev);
+out_clk_disable:
+ clk_disable_unprepare(priv->clk);
return ret;
}
@@ -2106,6 +2119,8 @@ static int bcm_sysport_stop(struct net_device *dev)
/* Disconnect from PHY */
phy_disconnect(dev->phydev);
+ clk_disable_unprepare(priv->clk);
+
return 0;
}
@@ -2487,6 +2502,10 @@ static int bcm_sysport_probe(struct platform_device *pdev)
/* Initialize private members */
priv = netdev_priv(dev);
+ priv->clk = devm_clk_get_optional(&pdev->dev, "sw_sysport");
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
/* Allocate number of TX rings */
priv->tx_rings = devm_kcalloc(&pdev->dev, txq,
sizeof(struct bcm_sysport_tx_ring),
@@ -2566,6 +2585,10 @@ static int bcm_sysport_probe(struct platform_device *pdev)
if (!ret)
device_set_wakeup_capable(&pdev->dev, 1);
+ priv->wol_clk = devm_clk_get_optional(&pdev->dev, "sw_sysportwol");
+ if (IS_ERR(priv->wol_clk))
+ return PTR_ERR(priv->wol_clk);
+
/* Set the needed headroom once and for all */
BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
dev->needed_headroom += sizeof(struct bcm_tsb);
@@ -2590,6 +2613,8 @@ static int bcm_sysport_probe(struct platform_device *pdev)
goto err_deregister_notifier;
}
+ clk_prepare_enable(priv->clk);
+
priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
dev_info(&pdev->dev,
"Broadcom SYSTEMPORT%s " REV_FMT
@@ -2598,6 +2623,8 @@ static int bcm_sysport_probe(struct platform_device *pdev)
(priv->rev >> 8) & 0xff, priv->rev & 0xff,
priv->irq0, priv->irq1, txq, rxq);
+ clk_disable_unprepare(priv->clk);
+
return 0;
err_deregister_notifier:
@@ -2751,8 +2778,12 @@ static int __maybe_unused bcm_sysport_suspend(struct device *d)
bcm_sysport_fini_rx_ring(priv);
/* Get prepared for Wake-on-LAN */
- if (device_may_wakeup(d) && priv->wolopts)
+ if (device_may_wakeup(d) && priv->wolopts) {
+ clk_prepare_enable(priv->wol_clk);
ret = bcm_sysport_suspend_to_wol(priv);
+ }
+
+ clk_disable_unprepare(priv->clk);
return ret;
}
@@ -2767,6 +2798,10 @@ static int __maybe_unused bcm_sysport_resume(struct device *d)
if (!netif_running(dev))
return 0;
+ clk_prepare_enable(priv->clk);
+ if (priv->wolopts)
+ clk_disable_unprepare(priv->wol_clk);
+
umac_reset(priv);
/* Disable the UniMAC RX/TX */
@@ -2846,6 +2881,7 @@ out_free_rx_ring:
out_free_tx_rings:
for (i = 0; i < dev->num_tx_queues; i++)
bcm_sysport_fini_tx_ring(priv, i);
+ clk_disable_unprepare(priv->clk);
return ret;
}
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index 6d80735fbc7f..3a5cb6f128f5 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -770,6 +770,8 @@ struct bcm_sysport_priv {
u32 wolopts;
u8 sopass[SOPASS_MAX];
unsigned int wol_irq_disabled:1;
+ struct clk *clk;
+ struct clk *wol_clk;
/* MIB related fields */
struct bcm_sysport_mib mib;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
index 017169023cca..cf4fe5b17f8a 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
@@ -280,13 +280,10 @@ int octeon_init_droq(struct octeon_device *oct,
dev_dbg(&oct->pci_dev->dev, "droq[%d]: num_desc: %d\n", q_no,
droq->max_count);
- droq->recv_buf_list = (struct octeon_recv_buffer *)
- vzalloc_node(array_size(droq->max_count, OCT_DROQ_RECVBUF_SIZE),
- numa_node);
+ droq->recv_buf_list = vzalloc_node(array_size(droq->max_count, OCT_DROQ_RECVBUF_SIZE),
+ numa_node);
if (!droq->recv_buf_list)
- droq->recv_buf_list = (struct octeon_recv_buffer *)
- vzalloc(array_size(droq->max_count,
- OCT_DROQ_RECVBUF_SIZE));
+ droq->recv_buf_list = vzalloc(array_size(droq->max_count, OCT_DROQ_RECVBUF_SIZE));
if (!droq->recv_buf_list) {
dev_err(&oct->pci_dev->dev, "Output queue recv buf list alloc failed\n");
goto init_droq_fail;
diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig
index f6f3ef9a93cf..87cc0ef68b31 100644
--- a/drivers/net/ethernet/chelsio/Kconfig
+++ b/drivers/net/ethernet/chelsio/Kconfig
@@ -134,4 +134,6 @@ config CHELSIO_LIB
help
Common library for Chelsio drivers.
+source "drivers/net/ethernet/chelsio/inline_crypto/Kconfig"
+
endif # NET_VENDOR_CHELSIO
diff --git a/drivers/net/ethernet/chelsio/Makefile b/drivers/net/ethernet/chelsio/Makefile
index c0f978d2e8a7..1a6fd8b2bb7d 100644
--- a/drivers/net/ethernet/chelsio/Makefile
+++ b/drivers/net/ethernet/chelsio/Makefile
@@ -8,3 +8,4 @@ obj-$(CONFIG_CHELSIO_T3) += cxgb3/
obj-$(CONFIG_CHELSIO_T4) += cxgb4/
obj-$(CONFIG_CHELSIO_T4VF) += cxgb4vf/
obj-$(CONFIG_CHELSIO_LIB) += libcxgb/
+obj-$(CONFIG_CHELSIO_INLINE_CRYPTO) += inline_crypto/
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index 99736796e1a0..0e4a0f413960 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -997,17 +997,17 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_disable_pdev;
}
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
pci_using_dac = 1;
- if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
- pr_err("%s: unable to obtain 64-bit DMA for "
- "consistent allocations\n", pci_name(pdev));
+ if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+ pr_err("%s: unable to obtain 64-bit DMA for coherent allocations\n",
+ pci_name(pdev));
err = -ENODEV;
goto out_disable_pdev;
}
- } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
+ } else if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) != 0) {
pr_err("%s: no usable DMA configuration\n", pci_name(pdev));
goto out_disable_pdev;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index 47b5c8e2104b..21016de20b2d 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -509,9 +509,8 @@ static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q)
while (q->credits--) {
struct freelQ_ce *ce = &q->centries[cidx];
- pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
- dma_unmap_len(ce, dma_len),
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&pdev->dev, dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len), DMA_FROM_DEVICE);
dev_kfree_skb(ce->skb);
ce->skb = NULL;
if (++cidx == q->size)
@@ -529,8 +528,8 @@ static void free_rx_resources(struct sge *sge)
if (sge->respQ.entries) {
size = sizeof(struct respQ_e) * sge->respQ.size;
- pci_free_consistent(pdev, size, sge->respQ.entries,
- sge->respQ.dma_addr);
+ dma_free_coherent(&pdev->dev, size, sge->respQ.entries,
+ sge->respQ.dma_addr);
}
for (i = 0; i < SGE_FREELQ_N; i++) {
@@ -542,8 +541,8 @@ static void free_rx_resources(struct sge *sge)
}
if (q->entries) {
size = sizeof(struct freelQ_e) * q->size;
- pci_free_consistent(pdev, size, q->entries,
- q->dma_addr);
+ dma_free_coherent(&pdev->dev, size, q->entries,
+ q->dma_addr);
}
}
}
@@ -564,7 +563,8 @@ static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
q->size = p->freelQ_size[i];
q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN;
size = sizeof(struct freelQ_e) * q->size;
- q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr);
+ q->entries = dma_alloc_coherent(&pdev->dev, size,
+ &q->dma_addr, GFP_KERNEL);
if (!q->entries)
goto err_no_mem;
@@ -601,7 +601,8 @@ static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
sge->respQ.credits = 0;
size = sizeof(struct respQ_e) * sge->respQ.size;
sge->respQ.entries =
- pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr);
+ dma_alloc_coherent(&pdev->dev, size, &sge->respQ.dma_addr,
+ GFP_KERNEL);
if (!sge->respQ.entries)
goto err_no_mem;
return 0;
@@ -624,9 +625,10 @@ static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n)
ce = &q->centries[cidx];
while (n--) {
if (likely(dma_unmap_len(ce, dma_len))) {
- pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
+ dma_unmap_single(&pdev->dev,
+ dma_unmap_addr(ce, dma_addr),
dma_unmap_len(ce, dma_len),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
if (q->sop)
q->sop = 0;
}
@@ -663,8 +665,8 @@ static void free_tx_resources(struct sge *sge)
}
if (q->entries) {
size = sizeof(struct cmdQ_e) * q->size;
- pci_free_consistent(pdev, size, q->entries,
- q->dma_addr);
+ dma_free_coherent(&pdev->dev, size, q->entries,
+ q->dma_addr);
}
}
}
@@ -689,7 +691,8 @@ static int alloc_tx_resources(struct sge *sge, struct sge_params *p)
q->stop_thres = 0;
spin_lock_init(&q->lock);
size = sizeof(struct cmdQ_e) * q->size;
- q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr);
+ q->entries = dma_alloc_coherent(&pdev->dev, size,
+ &q->dma_addr, GFP_KERNEL);
if (!q->entries)
goto err_no_mem;
@@ -837,8 +840,8 @@ static void refill_free_list(struct sge *sge, struct freelQ *q)
break;
skb_reserve(skb, q->dma_offset);
- mapping = pci_map_single(pdev, skb->data, dma_len,
- PCI_DMA_FROMDEVICE);
+ mapping = dma_map_single(&pdev->dev, skb->data, dma_len,
+ DMA_FROM_DEVICE);
skb_reserve(skb, sge->rx_pkt_pad);
ce->skb = skb;
@@ -1049,15 +1052,15 @@ static inline struct sk_buff *get_packet(struct adapter *adapter,
goto use_orig_buf;
skb_put(skb, len);
- pci_dma_sync_single_for_cpu(pdev,
- dma_unmap_addr(ce, dma_addr),
- dma_unmap_len(ce, dma_len),
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&pdev->dev,
+ dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len),
+ DMA_FROM_DEVICE);
skb_copy_from_linear_data(ce->skb, skb->data, len);
- pci_dma_sync_single_for_device(pdev,
- dma_unmap_addr(ce, dma_addr),
- dma_unmap_len(ce, dma_len),
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&pdev->dev,
+ dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len),
+ DMA_FROM_DEVICE);
recycle_fl_buf(fl, fl->cidx);
return skb;
}
@@ -1068,8 +1071,8 @@ use_orig_buf:
return NULL;
}
- pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
- dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&pdev->dev, dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len), DMA_FROM_DEVICE);
skb = ce->skb;
prefetch(skb->data);
@@ -1091,8 +1094,9 @@ static void unexpected_offload(struct adapter *adapter, struct freelQ *fl)
struct freelQ_ce *ce = &fl->centries[fl->cidx];
struct sk_buff *skb = ce->skb;
- pci_dma_sync_single_for_cpu(adapter->pdev, dma_unmap_addr(ce, dma_addr),
- dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&adapter->pdev->dev,
+ dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len), DMA_FROM_DEVICE);
pr_err("%s: unexpected offload packet, cmd %u\n",
adapter->name, *skb->data);
recycle_fl_buf(fl, fl->cidx);
@@ -1209,8 +1213,8 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
e = e1 = &q->entries[pidx];
ce = &q->centries[pidx];
- mapping = pci_map_single(adapter->pdev, skb->data,
- skb_headlen(skb), PCI_DMA_TODEVICE);
+ mapping = dma_map_single(&adapter->pdev->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
desc_mapping = mapping;
desc_len = skb_headlen(skb);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index 6dabbf1502c7..ee6188dea705 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -2372,10 +2372,7 @@ no_mem:
if (fl->use_pages) {
void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
- prefetch(addr);
-#if L1_CACHE_BYTES < 128
- prefetch(addr + L1_CACHE_BYTES);
-#endif
+ net_prefetch(addr);
__refill_fl(adap, fl);
if (lro > 0) {
lro_add_page(adap, qs, fl,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 9cb8b229c1b3..e5d5c0fb7f47 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -1196,6 +1196,9 @@ struct adapter {
struct cxgb4_tc_u32_table *tc_u32;
struct chcr_ktls chcr_ktls;
struct chcr_stats_debug chcr_stats;
+#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
+ struct ch_ipsec_stats_debug ch_ipsec_stats;
+#endif
/* TC flower offload */
bool tc_flower_initialized;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 05f33b7e3677..42112e8ad687 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -3542,14 +3542,17 @@ static int chcr_stats_show(struct seq_file *seq, void *v)
atomic_read(&adap->chcr_stats.error));
seq_printf(seq, "Fallback: %10u \n",
atomic_read(&adap->chcr_stats.fallback));
- seq_printf(seq, "IPSec PDU: %10u\n",
- atomic_read(&adap->chcr_stats.ipsec_cnt));
seq_printf(seq, "TLS PDU Tx: %10u\n",
atomic_read(&adap->chcr_stats.tls_pdu_tx));
seq_printf(seq, "TLS PDU Rx: %10u\n",
atomic_read(&adap->chcr_stats.tls_pdu_rx));
seq_printf(seq, "TLS Keys (DDR) Count: %10u\n",
atomic_read(&adap->chcr_stats.tls_key));
+#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
+ seq_puts(seq, "\nChelsio Inline IPsec Crypto Accelerator Stats\n");
+ seq_printf(seq, "IPSec PDU: %10u\n",
+ atomic_read(&adap->ch_ipsec_stats.ipsec_cnt));
+#endif
#ifdef CONFIG_CHELSIO_TLS_DEVICE
seq_puts(seq, "\nChelsio KTLS Crypto Accelerator Stats\n");
seq_printf(seq, "Tx TLS offload refcount: %20u\n",
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index 650db92cb11c..f6c1ec140e09 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -604,17 +604,14 @@ int cxgb4_get_free_ftid(struct net_device *dev, u8 family, bool hash_en,
/* If the new rule wants to get inserted into
* HPFILTER region, but its prio is greater
* than the rule with the highest prio in HASH
- * region, then reject the rule.
- */
- if (t->tc_hash_tids_max_prio &&
- tc_prio > t->tc_hash_tids_max_prio)
- break;
-
- /* If there's not enough slots available
- * in HPFILTER region, then move on to
- * normal FILTER region immediately.
+ * region, or if there's not enough slots
+ * available in HPFILTER region, then skip
+ * trying to insert this rule into HPFILTER
+ * region and directly go to the next region.
*/
- if (ftid + n > t->nhpftids) {
+ if ((t->tc_hash_tids_max_prio &&
+ tc_prio > t->tc_hash_tids_max_prio) ||
+ (ftid + n) > t->nhpftids) {
ftid = t->nhpftids;
continue;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index a963fd0b4540..83c8189e4088 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -302,6 +302,7 @@ enum cxgb4_uld {
CXGB4_ULD_ISCSI,
CXGB4_ULD_ISCSIT,
CXGB4_ULD_CRYPTO,
+ CXGB4_ULD_IPSEC,
CXGB4_ULD_TLS,
CXGB4_ULD_MAX
};
@@ -368,7 +369,6 @@ struct chcr_stats_debug {
atomic_t complete;
atomic_t error;
atomic_t fallback;
- atomic_t ipsec_cnt;
atomic_t tls_pdu_tx;
atomic_t tls_pdu_rx;
atomic_t tls_key;
@@ -394,6 +394,12 @@ struct chcr_stats_debug {
#endif
};
+#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
+struct ch_ipsec_stats_debug {
+ atomic_t ipsec_cnt;
+};
+#endif
+
#define OCQ_WIN_OFFSET(pdev, vres) \
(pci_resource_len((pdev), 2) - roundup_pow_of_two((vres)->ocq.size))
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 869431a1eedd..fddd70ee6436 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -1416,9 +1416,9 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
pi = netdev_priv(dev);
adap = pi->adapter;
ssi = skb_shinfo(skb);
-#ifdef CONFIG_CHELSIO_IPSEC_INLINE
+#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
if (xfrm_offload(skb) && !ssi->gso_size)
- return adap->uld[CXGB4_ULD_CRYPTO].tx_handler(skb, dev);
+ return adap->uld[CXGB4_ULD_IPSEC].tx_handler(skb, dev);
#endif /* CHELSIO_IPSEC_INLINE */
#ifdef CONFIG_CHELSIO_TLS_DEVICE
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index fa3367966f4b..98d01a7497ec 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -4745,9 +4745,11 @@ static void le_intr_handler(struct adapter *adap)
static struct intr_info t6_le_intr_info[] = {
{ T6_LIPMISS_F, "LE LIP miss", -1, 0 },
{ T6_LIP0_F, "LE 0 LIP error", -1, 0 },
+ { CMDTIDERR_F, "LE cmd tid error", -1, 1 },
{ TCAMINTPERR_F, "LE parity error", -1, 1 },
{ T6_UNKNOWNCMD_F, "LE unknown command", -1, 1 },
{ SSRAMINTPERR_F, "LE request queue parity error", -1, 1 },
+ { HASHTBLMEMCRCERR_F, "LE hash table mem crc error", -1, 0 },
{ 0 }
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 065c01c654ff..b11a172b5174 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -3017,6 +3017,14 @@
#define REV_V(x) ((x) << REV_S)
#define REV_G(x) (((x) >> REV_S) & REV_M)
+#define HASHTBLMEMCRCERR_S 27
+#define HASHTBLMEMCRCERR_V(x) ((x) << HASHTBLMEMCRCERR_S)
+#define HASHTBLMEMCRCERR_F HASHTBLMEMCRCERR_V(1U)
+
+#define CMDTIDERR_S 22
+#define CMDTIDERR_V(x) ((x) << CMDTIDERR_S)
+#define CMDTIDERR_F CMDTIDERR_V(1U)
+
#define T6_UNKNOWNCMD_S 3
#define T6_UNKNOWNCMD_V(x) ((x) << T6_UNKNOWNCMD_S)
#define T6_UNKNOWNCMD_F T6_UNKNOWNCMD_V(1U)
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/Kconfig b/drivers/net/ethernet/chelsio/inline_crypto/Kconfig
new file mode 100644
index 000000000000..be70b59b6f80
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/inline_crypto/Kconfig
@@ -0,0 +1,37 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Chelsio inline crypto configuration
+#
+
+config CHELSIO_INLINE_CRYPTO
+ bool "Chelsio Inline Crypto support"
+ depends on CHELSIO_T4
+ default y
+ help
+ Enable support for inline crypto.
+ Allows enable/disable from list of inline crypto drivers.
+
+if CHELSIO_INLINE_CRYPTO
+
+config CRYPTO_DEV_CHELSIO_TLS
+ tristate "Chelsio Crypto Inline TLS Driver"
+ depends on TLS_TOE
+ help
+ Support Chelsio Inline TLS with Chelsio crypto accelerator.
+ Enable inline TLS support for Tx and Rx.
+
+ To compile this driver as a module, choose M here: the module
+ will be called chtls.
+
+config CHELSIO_IPSEC_INLINE
+ tristate "Chelsio IPSec XFRM Tx crypto offload"
+ depends on XFRM_OFFLOAD
+ depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
+ help
+ Support Chelsio Inline IPsec with Chelsio crypto accelerator.
+ Enable inline IPsec support for Tx.
+
+ To compile this driver as a module, choose M here: the module
+ will be called ch_ipsec.
+
+endif # CHELSIO_INLINE_CRYPTO
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/Makefile b/drivers/net/ethernet/chelsio/inline_crypto/Makefile
new file mode 100644
index 000000000000..9a86ee8f1f38
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/inline_crypto/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_CRYPTO_DEV_CHELSIO_TLS) += chtls/
+obj-$(CONFIG_CHELSIO_IPSEC_INLINE) += ch_ipsec/
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/Makefile b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/Makefile
new file mode 100644
index 000000000000..efdcaaebc455
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+ccflags-y := -I $(srctree)/drivers/net/ethernet/chelsio/cxgb4 \
+ -I $(srctree)/drivers/crypto/chelsio
+
+obj-$(CONFIG_CHELSIO_IPSEC_INLINE) += ch_ipsec.o
+ch_ipsec-objs := chcr_ipsec.o
+
+
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
new file mode 100644
index 000000000000..276f8841becc
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
@@ -0,0 +1,857 @@
+/*
+ * This file is part of the Chelsio T6 Crypto driver for Linux.
+ *
+ * Copyright (c) 2003-2017 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Written and Maintained by:
+ * Atul Gupta (atul.gupta@chelsio.com)
+ */
+
+#define pr_fmt(fmt) "chcr:" fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/highmem.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/netdevice.h>
+#include <net/esp.h>
+#include <net/xfrm.h>
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/hash.h>
+#include <crypto/sha.h>
+#include <crypto/authenc.h>
+#include <crypto/internal/aead.h>
+#include <crypto/null.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/aead.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/hash.h>
+
+#include "chcr_ipsec.h"
+
+/*
+ * Max Tx descriptor space we allow for an Ethernet packet to be inlined
+ * into a WR.
+ */
+#define MAX_IMM_TX_PKT_LEN 256
+#define GCM_ESP_IV_SIZE 8
+
+static LIST_HEAD(uld_ctx_list);
+static DEFINE_MUTEX(dev_mutex);
+
+static int chcr_xfrm_add_state(struct xfrm_state *x);
+static void chcr_xfrm_del_state(struct xfrm_state *x);
+static void chcr_xfrm_free_state(struct xfrm_state *x);
+static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
+static void chcr_advance_esn_state(struct xfrm_state *x);
+static int ch_ipsec_uld_state_change(void *handle, enum cxgb4_state new_state);
+static void *ch_ipsec_uld_add(const struct cxgb4_lld_info *infop);
+static void update_netdev_features(void);
+
+static const struct xfrmdev_ops chcr_xfrmdev_ops = {
+ .xdo_dev_state_add = chcr_xfrm_add_state,
+ .xdo_dev_state_delete = chcr_xfrm_del_state,
+ .xdo_dev_state_free = chcr_xfrm_free_state,
+ .xdo_dev_offload_ok = chcr_ipsec_offload_ok,
+ .xdo_dev_state_advance_esn = chcr_advance_esn_state,
+};
+
+/* Add offload xfrms to Chelsio Interface */
+void chcr_add_xfrmops(const struct cxgb4_lld_info *lld)
+{
+ struct net_device *netdev = NULL;
+ int i;
+
+ for (i = 0; i < lld->nports; i++) {
+ netdev = lld->ports[i];
+ if (!netdev)
+ continue;
+ netdev->xfrmdev_ops = &chcr_xfrmdev_ops;
+ netdev->hw_enc_features |= NETIF_F_HW_ESP;
+ netdev->features |= NETIF_F_HW_ESP;
+ netdev_change_features(netdev);
+ }
+}
+
+static struct cxgb4_uld_info ch_ipsec_uld_info = {
+ .name = CHIPSEC_DRV_MODULE_NAME,
+ .nrxq = MAX_ULD_QSETS,
+ /* Max ntxq will be derived from fw config file*/
+ .rxq_size = 1024,
+ .add = ch_ipsec_uld_add,
+ .state_change = ch_ipsec_uld_state_change,
+ .tx_handler = chcr_ipsec_xmit,
+};
+
+static void *ch_ipsec_uld_add(const struct cxgb4_lld_info *infop)
+{
+ struct ipsec_uld_ctx *u_ctx;
+
+ pr_info_once("%s - version %s\n", CHIPSEC_DRV_DESC,
+ CHIPSEC_DRV_VERSION);
+ u_ctx = kzalloc(sizeof(*u_ctx), GFP_KERNEL);
+ if (!u_ctx) {
+ u_ctx = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+ u_ctx->lldi = *infop;
+out:
+ return u_ctx;
+}
+
+static int ch_ipsec_uld_state_change(void *handle, enum cxgb4_state new_state)
+{
+ struct ipsec_uld_ctx *u_ctx = handle;
+
+ pr_info("new_state %u\n", new_state);
+ switch (new_state) {
+ case CXGB4_STATE_UP:
+ pr_info("%s: Up\n", pci_name(u_ctx->lldi.pdev));
+ mutex_lock(&dev_mutex);
+ list_add_tail(&u_ctx->entry, &uld_ctx_list);
+ mutex_unlock(&dev_mutex);
+ break;
+ case CXGB4_STATE_START_RECOVERY:
+ case CXGB4_STATE_DOWN:
+ case CXGB4_STATE_DETACH:
+ pr_info("%s: Down\n", pci_name(u_ctx->lldi.pdev));
+ list_del(&u_ctx->entry);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static inline int chcr_ipsec_setauthsize(struct xfrm_state *x,
+ struct ipsec_sa_entry *sa_entry)
+{
+ int hmac_ctrl;
+ int authsize = x->aead->alg_icv_len / 8;
+
+ sa_entry->authsize = authsize;
+
+ switch (authsize) {
+ case ICV_8:
+ hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
+ break;
+ case ICV_12:
+ hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+ break;
+ case ICV_16:
+ hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return hmac_ctrl;
+}
+
+static inline int chcr_ipsec_setkey(struct xfrm_state *x,
+ struct ipsec_sa_entry *sa_entry)
+{
+ int keylen = (x->aead->alg_key_len + 7) / 8;
+ unsigned char *key = x->aead->alg_key;
+ int ck_size, key_ctx_size = 0;
+ unsigned char ghash_h[AEAD_H_SIZE];
+ struct crypto_aes_ctx aes;
+ int ret = 0;
+
+ if (keylen > 3) {
+ keylen -= 4; /* nonce/salt is present in the last 4 bytes */
+ memcpy(sa_entry->salt, key + keylen, 4);
+ }
+
+ if (keylen == AES_KEYSIZE_128) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ } else if (keylen == AES_KEYSIZE_192) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+ } else if (keylen == AES_KEYSIZE_256) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+ } else {
+ pr_err("GCM: Invalid key length %d\n", keylen);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(sa_entry->key, key, keylen);
+ sa_entry->enckey_len = keylen;
+ key_ctx_size = sizeof(struct _key_ctx) +
+ ((DIV_ROUND_UP(keylen, 16)) << 4) +
+ AEAD_H_SIZE;
+
+ sa_entry->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
+ CHCR_KEYCTX_MAC_KEY_SIZE_128,
+ 0, 0,
+ key_ctx_size >> 4);
+
+ /* Calculate the H = CIPH(K, 0 repeated 16 times).
+ * It will go in key context
+ */
+ ret = aes_expandkey(&aes, key, keylen);
+ if (ret) {
+ sa_entry->enckey_len = 0;
+ goto out;
+ }
+ memset(ghash_h, 0, AEAD_H_SIZE);
+ aes_encrypt(&aes, ghash_h, ghash_h);
+ memzero_explicit(&aes, sizeof(aes));
+
+ memcpy(sa_entry->key + (DIV_ROUND_UP(sa_entry->enckey_len, 16) *
+ 16), ghash_h, AEAD_H_SIZE);
+ sa_entry->kctx_len = ((DIV_ROUND_UP(sa_entry->enckey_len, 16)) << 4) +
+ AEAD_H_SIZE;
+out:
+ return ret;
+}
+
+/*
+ * chcr_xfrm_add_state
+ * returns 0 on success, negative error if failed to send message to FPGA
+ * positive error if FPGA returned a bad response
+ */
+static int chcr_xfrm_add_state(struct xfrm_state *x)
+{
+ struct ipsec_sa_entry *sa_entry;
+ int res = 0;
+
+ if (x->props.aalgo != SADB_AALG_NONE) {
+ pr_debug("CHCR: Cannot offload authenticated xfrm states\n");
+ return -EINVAL;
+ }
+ if (x->props.calgo != SADB_X_CALG_NONE) {
+ pr_debug("CHCR: Cannot offload compressed xfrm states\n");
+ return -EINVAL;
+ }
+ if (x->props.family != AF_INET &&
+ x->props.family != AF_INET6) {
+ pr_debug("CHCR: Only IPv4/6 xfrm state offloaded\n");
+ return -EINVAL;
+ }
+ if (x->props.mode != XFRM_MODE_TRANSPORT &&
+ x->props.mode != XFRM_MODE_TUNNEL) {
+ pr_debug("CHCR: Only transport and tunnel xfrm offload\n");
+ return -EINVAL;
+ }
+ if (x->id.proto != IPPROTO_ESP) {
+ pr_debug("CHCR: Only ESP xfrm state offloaded\n");
+ return -EINVAL;
+ }
+ if (x->encap) {
+ pr_debug("CHCR: Encapsulated xfrm state not offloaded\n");
+ return -EINVAL;
+ }
+ if (!x->aead) {
+ pr_debug("CHCR: Cannot offload xfrm states without aead\n");
+ return -EINVAL;
+ }
+ if (x->aead->alg_icv_len != 128 &&
+ x->aead->alg_icv_len != 96) {
+ pr_debug("CHCR: Cannot offload xfrm states with AEAD ICV length other than 96b & 128b\n");
+ return -EINVAL;
+ }
+ if ((x->aead->alg_key_len != 128 + 32) &&
+ (x->aead->alg_key_len != 256 + 32)) {
+ pr_debug("CHCR: Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
+ return -EINVAL;
+ }
+ if (x->tfcpad) {
+ pr_debug("CHCR: Cannot offload xfrm states with tfc padding\n");
+ return -EINVAL;
+ }
+ if (!x->geniv) {
+ pr_debug("CHCR: Cannot offload xfrm states without geniv\n");
+ return -EINVAL;
+ }
+ if (strcmp(x->geniv, "seqiv")) {
+ pr_debug("CHCR: Cannot offload xfrm states with geniv other than seqiv\n");
+ return -EINVAL;
+ }
+
+ sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
+ if (!sa_entry) {
+ res = -ENOMEM;
+ goto out;
+ }
+
+ sa_entry->hmac_ctrl = chcr_ipsec_setauthsize(x, sa_entry);
+ if (x->props.flags & XFRM_STATE_ESN)
+ sa_entry->esn = 1;
+ chcr_ipsec_setkey(x, sa_entry);
+ x->xso.offload_handle = (unsigned long)sa_entry;
+ try_module_get(THIS_MODULE);
+out:
+ return res;
+}
+
+static void chcr_xfrm_del_state(struct xfrm_state *x)
+{
+ /* do nothing */
+ if (!x->xso.offload_handle)
+ return;
+}
+
+static void chcr_xfrm_free_state(struct xfrm_state *x)
+{
+ struct ipsec_sa_entry *sa_entry;
+
+ if (!x->xso.offload_handle)
+ return;
+
+ sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle;
+ kfree(sa_entry);
+ module_put(THIS_MODULE);
+}
+
+static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
+{
+ if (x->props.family == AF_INET) {
+ /* Offload with IP options is not supported yet */
+ if (ip_hdr(skb)->ihl > 5)
+ return false;
+ } else {
+ /* Offload with IPv6 extension headers is not support yet */
+ if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
+ return false;
+ }
+ return true;
+}
+
+static void chcr_advance_esn_state(struct xfrm_state *x)
+{
+ /* do nothing */
+ if (!x->xso.offload_handle)
+ return;
+}
+
+static inline int is_eth_imm(const struct sk_buff *skb,
+ struct ipsec_sa_entry *sa_entry)
+{
+ unsigned int kctx_len;
+ int hdrlen;
+
+ kctx_len = sa_entry->kctx_len;
+ hdrlen = sizeof(struct fw_ulptx_wr) +
+ sizeof(struct chcr_ipsec_req) + kctx_len;
+
+ hdrlen += sizeof(struct cpl_tx_pkt);
+ if (sa_entry->esn)
+ hdrlen += (DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv), 16)
+ << 4);
+ if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
+ return hdrlen;
+ return 0;
+}
+
+static inline unsigned int calc_tx_sec_flits(const struct sk_buff *skb,
+ struct ipsec_sa_entry *sa_entry,
+ bool *immediate)
+{
+ unsigned int kctx_len;
+ unsigned int flits;
+ int aadivlen;
+ int hdrlen;
+
+ kctx_len = sa_entry->kctx_len;
+ hdrlen = is_eth_imm(skb, sa_entry);
+ aadivlen = sa_entry->esn ? DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv),
+ 16) : 0;
+ aadivlen <<= 4;
+
+ /* If the skb is small enough, we can pump it out as a work request
+ * with only immediate data. In that case we just have to have the
+ * TX Packet header plus the skb data in the Work Request.
+ */
+
+ if (hdrlen) {
+ *immediate = true;
+ return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
+ }
+
+ flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
+
+ /* Otherwise, we're going to have to construct a Scatter gather list
+ * of the skb body and fragments. We also include the flits necessary
+ * for the TX Packet Work Request and CPL. We always have a firmware
+ * Write Header (incorporated as part of the cpl_tx_pkt_lso and
+ * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
+ * message or, if we're doing a Large Send Offload, an LSO CPL message
+ * with an embedded TX Packet Write CPL message.
+ */
+ flits += (sizeof(struct fw_ulptx_wr) +
+ sizeof(struct chcr_ipsec_req) +
+ kctx_len +
+ sizeof(struct cpl_tx_pkt_core) +
+ aadivlen) / sizeof(__be64);
+ return flits;
+}
+
+inline void *copy_esn_pktxt(struct sk_buff *skb,
+ struct net_device *dev,
+ void *pos,
+ struct ipsec_sa_entry *sa_entry)
+{
+ struct chcr_ipsec_aadiv *aadiv;
+ struct ulptx_idata *sc_imm;
+ struct ip_esp_hdr *esphdr;
+ struct xfrm_offload *xo;
+ struct sge_eth_txq *q;
+ struct adapter *adap;
+ struct port_info *pi;
+ __be64 seqno;
+ u32 qidx;
+ u32 seqlo;
+ u8 *iv;
+ int eoq;
+ int len;
+
+ pi = netdev_priv(dev);
+ adap = pi->adapter;
+ qidx = skb->queue_mapping;
+ q = &adap->sge.ethtxq[qidx + pi->first_qset];
+
+ /* end of queue, reset pos to start of queue */
+ eoq = (void *)q->q.stat - pos;
+ if (!eoq)
+ pos = q->q.desc;
+
+ len = DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv), 16) << 4;
+ memset(pos, 0, len);
+ aadiv = (struct chcr_ipsec_aadiv *)pos;
+ esphdr = (struct ip_esp_hdr *)skb_transport_header(skb);
+ iv = skb_transport_header(skb) + sizeof(struct ip_esp_hdr);
+ xo = xfrm_offload(skb);
+
+ aadiv->spi = (esphdr->spi);
+ seqlo = ntohl(esphdr->seq_no);
+ seqno = cpu_to_be64(seqlo + ((u64)xo->seq.hi << 32));
+ memcpy(aadiv->seq_no, &seqno, 8);
+ iv = skb_transport_header(skb) + sizeof(struct ip_esp_hdr);
+ memcpy(aadiv->iv, iv, 8);
+
+ if (is_eth_imm(skb, sa_entry) && !skb_is_nonlinear(skb)) {
+ sc_imm = (struct ulptx_idata *)(pos +
+ (DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv),
+ sizeof(__be64)) << 3));
+ sc_imm->cmd_more = FILL_CMD_MORE(0);
+ sc_imm->len = cpu_to_be32(skb->len);
+ }
+ pos += len;
+ return pos;
+}
+
+inline void *copy_cpltx_pktxt(struct sk_buff *skb,
+ struct net_device *dev,
+ void *pos,
+ struct ipsec_sa_entry *sa_entry)
+{
+ struct cpl_tx_pkt_core *cpl;
+ struct sge_eth_txq *q;
+ struct adapter *adap;
+ struct port_info *pi;
+ u32 ctrl0, qidx;
+ u64 cntrl = 0;
+ int left;
+
+ pi = netdev_priv(dev);
+ adap = pi->adapter;
+ qidx = skb->queue_mapping;
+ q = &adap->sge.ethtxq[qidx + pi->first_qset];
+
+ left = (void *)q->q.stat - pos;
+ if (!left)
+ pos = q->q.desc;
+
+ cpl = (struct cpl_tx_pkt_core *)pos;
+
+ cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
+ ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) |
+ TXPKT_PF_V(adap->pf);
+ if (skb_vlan_tag_present(skb)) {
+ q->vlan_ins++;
+ cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
+ }
+
+ cpl->ctrl0 = htonl(ctrl0);
+ cpl->pack = htons(0);
+ cpl->len = htons(skb->len);
+ cpl->ctrl1 = cpu_to_be64(cntrl);
+
+ pos += sizeof(struct cpl_tx_pkt_core);
+ /* Copy ESN info for HW */
+ if (sa_entry->esn)
+ pos = copy_esn_pktxt(skb, dev, pos, sa_entry);
+ return pos;
+}
+
+inline void *copy_key_cpltx_pktxt(struct sk_buff *skb,
+ struct net_device *dev,
+ void *pos,
+ struct ipsec_sa_entry *sa_entry)
+{
+ struct _key_ctx *key_ctx;
+ int left, eoq, key_len;
+ struct sge_eth_txq *q;
+ struct adapter *adap;
+ struct port_info *pi;
+ unsigned int qidx;
+
+ pi = netdev_priv(dev);
+ adap = pi->adapter;
+ qidx = skb->queue_mapping;
+ q = &adap->sge.ethtxq[qidx + pi->first_qset];
+ key_len = sa_entry->kctx_len;
+
+ /* end of queue, reset pos to start of queue */
+ eoq = (void *)q->q.stat - pos;
+ left = eoq;
+ if (!eoq) {
+ pos = q->q.desc;
+ left = 64 * q->q.size;
+ }
+
+ /* Copy the Key context header */
+ key_ctx = (struct _key_ctx *)pos;
+ key_ctx->ctx_hdr = sa_entry->key_ctx_hdr;
+ memcpy(key_ctx->salt, sa_entry->salt, MAX_SALT);
+ pos += sizeof(struct _key_ctx);
+ left -= sizeof(struct _key_ctx);
+
+ if (likely(key_len <= left)) {
+ memcpy(key_ctx->key, sa_entry->key, key_len);
+ pos += key_len;
+ } else {
+ memcpy(pos, sa_entry->key, left);
+ memcpy(q->q.desc, sa_entry->key + left,
+ key_len - left);
+ pos = (u8 *)q->q.desc + (key_len - left);
+ }
+ /* Copy CPL TX PKT XT */
+ pos = copy_cpltx_pktxt(skb, dev, pos, sa_entry);
+
+ return pos;
+}
+
+inline void *chcr_crypto_wreq(struct sk_buff *skb,
+ struct net_device *dev,
+ void *pos,
+ int credits,
+ struct ipsec_sa_entry *sa_entry)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
+ unsigned int ivsize = GCM_ESP_IV_SIZE;
+ struct chcr_ipsec_wr *wr;
+ bool immediate = false;
+ u16 immdatalen = 0;
+ unsigned int flits;
+ u32 ivinoffset;
+ u32 aadstart;
+ u32 aadstop;
+ u32 ciphstart;
+ u16 sc_more = 0;
+ u32 ivdrop = 0;
+ u32 esnlen = 0;
+ u32 wr_mid;
+ u16 ndesc;
+ int qidx = skb_get_queue_mapping(skb);
+ struct sge_eth_txq *q = &adap->sge.ethtxq[qidx + pi->first_qset];
+ unsigned int kctx_len = sa_entry->kctx_len;
+ int qid = q->q.cntxt_id;
+
+ atomic_inc(&adap->ch_ipsec_stats.ipsec_cnt);
+
+ flits = calc_tx_sec_flits(skb, sa_entry, &immediate);
+ ndesc = DIV_ROUND_UP(flits, 2);
+ if (sa_entry->esn)
+ ivdrop = 1;
+
+ if (immediate)
+ immdatalen = skb->len;
+
+ if (sa_entry->esn) {
+ esnlen = sizeof(struct chcr_ipsec_aadiv);
+ if (!skb_is_nonlinear(skb))
+ sc_more = 1;
+ }
+
+ /* WR Header */
+ wr = (struct chcr_ipsec_wr *)pos;
+ wr->wreq.op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
+ wr_mid = FW_CRYPTO_LOOKASIDE_WR_LEN16_V(ndesc);
+
+ if (unlikely(credits < ETHTXQ_STOP_THRES)) {
+ netif_tx_stop_queue(q->txq);
+ q->q.stops++;
+ if (!q->dbqt)
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ }
+ wr_mid |= FW_ULPTX_WR_DATA_F;
+ wr->wreq.flowid_len16 = htonl(wr_mid);
+
+ /* ULPTX */
+ wr->req.ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(pi->port_id, qid);
+ wr->req.ulptx.len = htonl(ndesc - 1);
+
+ /* Sub-command */
+ wr->req.sc_imm.cmd_more = FILL_CMD_MORE(!immdatalen || sc_more);
+ wr->req.sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
+ sizeof(wr->req.key_ctx) +
+ kctx_len +
+ sizeof(struct cpl_tx_pkt_core) +
+ esnlen +
+ (esnlen ? 0 : immdatalen));
+
+ /* CPL_SEC_PDU */
+ ivinoffset = sa_entry->esn ? (ESN_IV_INSERT_OFFSET + 1) :
+ (skb_transport_offset(skb) +
+ sizeof(struct ip_esp_hdr) + 1);
+ wr->req.sec_cpl.op_ivinsrtofst = htonl(
+ CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) |
+ CPL_TX_SEC_PDU_CPLLEN_V(2) |
+ CPL_TX_SEC_PDU_PLACEHOLDER_V(1) |
+ CPL_TX_SEC_PDU_IVINSRTOFST_V(
+ ivinoffset));
+
+ wr->req.sec_cpl.pldlen = htonl(skb->len + esnlen);
+ aadstart = sa_entry->esn ? 1 : (skb_transport_offset(skb) + 1);
+ aadstop = sa_entry->esn ? ESN_IV_INSERT_OFFSET :
+ (skb_transport_offset(skb) +
+ sizeof(struct ip_esp_hdr));
+ ciphstart = skb_transport_offset(skb) + sizeof(struct ip_esp_hdr) +
+ GCM_ESP_IV_SIZE + 1;
+ ciphstart += sa_entry->esn ? esnlen : 0;
+
+ wr->req.sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
+ aadstart,
+ aadstop,
+ ciphstart, 0);
+
+ wr->req.sec_cpl.cipherstop_lo_authinsert =
+ FILL_SEC_CPL_AUTHINSERT(0, ciphstart,
+ sa_entry->authsize,
+ sa_entry->authsize);
+ wr->req.sec_cpl.seqno_numivs =
+ FILL_SEC_CPL_SCMD0_SEQNO(CHCR_ENCRYPT_OP, 1,
+ CHCR_SCMD_CIPHER_MODE_AES_GCM,
+ CHCR_SCMD_AUTH_MODE_GHASH,
+ sa_entry->hmac_ctrl,
+ ivsize >> 1);
+ wr->req.sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
+ 0, ivdrop, 0);
+
+ pos += sizeof(struct fw_ulptx_wr) +
+ sizeof(struct ulp_txpkt) +
+ sizeof(struct ulptx_idata) +
+ sizeof(struct cpl_tx_sec_pdu);
+
+ pos = copy_key_cpltx_pktxt(skb, dev, pos, sa_entry);
+
+ return pos;
+}
+
+/**
+ * flits_to_desc - returns the num of Tx descriptors for the given flits
+ * @n: the number of flits
+ *
+ * Returns the number of Tx descriptors needed for the supplied number
+ * of flits.
+ */
+static inline unsigned int flits_to_desc(unsigned int n)
+{
+ WARN_ON(n > SGE_MAX_WR_LEN / 8);
+ return DIV_ROUND_UP(n, 8);
+}
+
+static inline unsigned int txq_avail(const struct sge_txq *q)
+{
+ return q->size - 1 - q->in_use;
+}
+
+static void eth_txq_stop(struct sge_eth_txq *q)
+{
+ netif_tx_stop_queue(q->txq);
+ q->q.stops++;
+}
+
+static inline void txq_advance(struct sge_txq *q, unsigned int n)
+{
+ q->in_use += n;
+ q->pidx += n;
+ if (q->pidx >= q->size)
+ q->pidx -= q->size;
+}
+
+/*
+ * chcr_ipsec_xmit called from ULD Tx handler
+ */
+int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct xfrm_state *x = xfrm_input_state(skb);
+ unsigned int last_desc, ndesc, flits = 0;
+ struct ipsec_sa_entry *sa_entry;
+ u64 *pos, *end, *before, *sgl;
+ struct tx_sw_desc *sgl_sdesc;
+ int qidx, left, credits;
+ bool immediate = false;
+ struct sge_eth_txq *q;
+ struct adapter *adap;
+ struct port_info *pi;
+ struct sec_path *sp;
+
+ if (!x->xso.offload_handle)
+ return NETDEV_TX_BUSY;
+
+ sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle;
+
+ sp = skb_sec_path(skb);
+ if (sp->len != 1) {
+out_free: dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ pi = netdev_priv(dev);
+ adap = pi->adapter;
+ qidx = skb->queue_mapping;
+ q = &adap->sge.ethtxq[qidx + pi->first_qset];
+
+ cxgb4_reclaim_completed_tx(adap, &q->q, true);
+
+ flits = calc_tx_sec_flits(skb, sa_entry, &immediate);
+ ndesc = flits_to_desc(flits);
+ credits = txq_avail(&q->q) - ndesc;
+
+ if (unlikely(credits < 0)) {
+ eth_txq_stop(q);
+ dev_err(adap->pdev_dev,
+ "%s: Tx ring %u full while queue awake! cred:%d %d %d flits:%d\n",
+ dev->name, qidx, credits, ndesc, txq_avail(&q->q),
+ flits);
+ return NETDEV_TX_BUSY;
+ }
+
+ last_desc = q->q.pidx + ndesc - 1;
+ if (last_desc >= q->q.size)
+ last_desc -= q->q.size;
+ sgl_sdesc = &q->q.sdesc[last_desc];
+
+ if (!immediate &&
+ unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) {
+ memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
+ q->mapping_err++;
+ goto out_free;
+ }
+
+ pos = (u64 *)&q->q.desc[q->q.pidx];
+ before = (u64 *)pos;
+ end = (u64 *)pos + flits;
+ /* Setup IPSec CPL */
+ pos = (void *)chcr_crypto_wreq(skb, dev, (void *)pos,
+ credits, sa_entry);
+ if (before > (u64 *)pos) {
+ left = (u8 *)end - (u8 *)q->q.stat;
+ end = (void *)q->q.desc + left;
+ }
+ if (pos == (u64 *)q->q.stat) {
+ left = (u8 *)end - (u8 *)q->q.stat;
+ end = (void *)q->q.desc + left;
+ pos = (void *)q->q.desc;
+ }
+
+ sgl = (void *)pos;
+ if (immediate) {
+ cxgb4_inline_tx_skb(skb, &q->q, sgl);
+ dev_consume_skb_any(skb);
+ } else {
+ cxgb4_write_sgl(skb, &q->q, (void *)sgl, end,
+ 0, sgl_sdesc->addr);
+ skb_orphan(skb);
+ sgl_sdesc->skb = skb;
+ }
+ txq_advance(&q->q, ndesc);
+
+ cxgb4_ring_tx_db(adap, &q->q, ndesc);
+ return NETDEV_TX_OK;
+}
+
+static void update_netdev_features(void)
+{
+ struct ipsec_uld_ctx *u_ctx, *tmp;
+
+ mutex_lock(&dev_mutex);
+ list_for_each_entry_safe(u_ctx, tmp, &uld_ctx_list, entry) {
+ if (u_ctx->lldi.crypto & ULP_CRYPTO_IPSEC_INLINE)
+ chcr_add_xfrmops(&u_ctx->lldi);
+ }
+ mutex_unlock(&dev_mutex);
+}
+
+static int __init chcr_ipsec_init(void)
+{
+ cxgb4_register_uld(CXGB4_ULD_IPSEC, &ch_ipsec_uld_info);
+
+ rtnl_lock();
+ update_netdev_features();
+ rtnl_unlock();
+
+ return 0;
+}
+
+static void __exit chcr_ipsec_exit(void)
+{
+ struct ipsec_uld_ctx *u_ctx, *tmp;
+ struct adapter *adap;
+
+ mutex_lock(&dev_mutex);
+ list_for_each_entry_safe(u_ctx, tmp, &uld_ctx_list, entry) {
+ adap = pci_get_drvdata(u_ctx->lldi.pdev);
+ atomic_set(&adap->ch_ipsec_stats.ipsec_cnt, 0);
+ list_del(&u_ctx->entry);
+ kfree(u_ctx);
+ }
+ mutex_unlock(&dev_mutex);
+ cxgb4_unregister_uld(CXGB4_ULD_IPSEC);
+}
+
+module_init(chcr_ipsec_init);
+module_exit(chcr_ipsec_exit);
+
+MODULE_DESCRIPTION("Crypto IPSEC for Chelsio Terminator cards.");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Chelsio Communications");
+MODULE_VERSION(CHIPSEC_DRV_VERSION);
+
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.h b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.h
new file mode 100644
index 000000000000..1d110d2edd64
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2018 Chelsio Communications, Inc. */
+
+#ifndef __CHCR_IPSEC_H__
+#define __CHCR_IPSEC_H__
+
+#include <crypto/algapi.h>
+#include "t4_hw.h"
+#include "cxgb4.h"
+#include "t4_msg.h"
+#include "cxgb4_uld.h"
+
+#include "chcr_core.h"
+#include "chcr_algo.h"
+#include "chcr_crypto.h"
+
+#define CHIPSEC_DRV_MODULE_NAME "ch_ipsec"
+#define CHIPSEC_DRV_VERSION "1.0.0.0-ko"
+#define CHIPSEC_DRV_DESC "Chelsio T6 Crypto Ipsec offload Driver"
+
+struct ipsec_uld_ctx {
+ struct list_head entry;
+ struct cxgb4_lld_info lldi;
+};
+
+struct chcr_ipsec_req {
+ struct ulp_txpkt ulptx;
+ struct ulptx_idata sc_imm;
+ struct cpl_tx_sec_pdu sec_cpl;
+ struct _key_ctx key_ctx;
+};
+
+struct chcr_ipsec_wr {
+ struct fw_ulptx_wr wreq;
+ struct chcr_ipsec_req req;
+};
+
+#define ESN_IV_INSERT_OFFSET 12
+struct chcr_ipsec_aadiv {
+ __be32 spi;
+ u8 seq_no[8];
+ u8 iv[8];
+};
+
+struct ipsec_sa_entry {
+ int hmac_ctrl;
+ u16 esn;
+ u16 resv;
+ unsigned int enckey_len;
+ unsigned int kctx_len;
+ unsigned int authsize;
+ __be32 key_ctx_hdr;
+ char salt[MAX_SALT];
+ char key[2 * AES_MAX_KEY_SIZE];
+};
+
+#endif /* __CHCR_IPSEC_H__ */
+
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/Makefile b/drivers/net/ethernet/chelsio/inline_crypto/chtls/Makefile
new file mode 100644
index 000000000000..bc11495acdb3
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+ccflags-y := -I $(srctree)/drivers/net/ethernet/chelsio/cxgb4 \
+ -I $(srctree)/drivers/crypto/chelsio
+
+obj-$(CONFIG_CRYPTO_DEV_CHELSIO_TLS) += chtls.o
+chtls-objs := chtls_main.o chtls_cm.o chtls_io.o chtls_hw.o
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
new file mode 100644
index 000000000000..2d3dfdd2a716
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
@@ -0,0 +1,580 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018 Chelsio Communications, Inc.
+ */
+
+#ifndef __CHTLS_H__
+#define __CHTLS_H__
+
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/hash.h>
+#include <crypto/sha.h>
+#include <crypto/authenc.h>
+#include <crypto/ctr.h>
+#include <crypto/gf128mul.h>
+#include <crypto/internal/aead.h>
+#include <crypto/null.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/aead.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/hash.h>
+#include <linux/tls.h>
+#include <net/tls.h>
+#include <net/tls_toe.h>
+
+#include "t4fw_api.h"
+#include "t4_msg.h"
+#include "cxgb4.h"
+#include "cxgb4_uld.h"
+#include "l2t.h"
+#include "chcr_algo.h"
+#include "chcr_core.h"
+#include "chcr_crypto.h"
+
+#define CHTLS_DRV_VERSION "1.0.0.0-ko"
+
+#define TLS_KEYCTX_RXFLIT_CNT_S 24
+#define TLS_KEYCTX_RXFLIT_CNT_V(x) ((x) << TLS_KEYCTX_RXFLIT_CNT_S)
+
+#define TLS_KEYCTX_RXPROT_VER_S 20
+#define TLS_KEYCTX_RXPROT_VER_M 0xf
+#define TLS_KEYCTX_RXPROT_VER_V(x) ((x) << TLS_KEYCTX_RXPROT_VER_S)
+
+#define TLS_KEYCTX_RXCIPH_MODE_S 16
+#define TLS_KEYCTX_RXCIPH_MODE_M 0xf
+#define TLS_KEYCTX_RXCIPH_MODE_V(x) ((x) << TLS_KEYCTX_RXCIPH_MODE_S)
+
+#define TLS_KEYCTX_RXAUTH_MODE_S 12
+#define TLS_KEYCTX_RXAUTH_MODE_M 0xf
+#define TLS_KEYCTX_RXAUTH_MODE_V(x) ((x) << TLS_KEYCTX_RXAUTH_MODE_S)
+
+#define TLS_KEYCTX_RXCIAU_CTRL_S 11
+#define TLS_KEYCTX_RXCIAU_CTRL_V(x) ((x) << TLS_KEYCTX_RXCIAU_CTRL_S)
+
+#define TLS_KEYCTX_RX_SEQCTR_S 9
+#define TLS_KEYCTX_RX_SEQCTR_M 0x3
+#define TLS_KEYCTX_RX_SEQCTR_V(x) ((x) << TLS_KEYCTX_RX_SEQCTR_S)
+
+#define TLS_KEYCTX_RX_VALID_S 8
+#define TLS_KEYCTX_RX_VALID_V(x) ((x) << TLS_KEYCTX_RX_VALID_S)
+
+#define TLS_KEYCTX_RXCK_SIZE_S 3
+#define TLS_KEYCTX_RXCK_SIZE_M 0x7
+#define TLS_KEYCTX_RXCK_SIZE_V(x) ((x) << TLS_KEYCTX_RXCK_SIZE_S)
+
+#define TLS_KEYCTX_RXMK_SIZE_S 0
+#define TLS_KEYCTX_RXMK_SIZE_M 0x7
+#define TLS_KEYCTX_RXMK_SIZE_V(x) ((x) << TLS_KEYCTX_RXMK_SIZE_S)
+
+#define KEYCTX_TX_WR_IV_S 55
+#define KEYCTX_TX_WR_IV_M 0x1ffULL
+#define KEYCTX_TX_WR_IV_V(x) ((x) << KEYCTX_TX_WR_IV_S)
+#define KEYCTX_TX_WR_IV_G(x) \
+ (((x) >> KEYCTX_TX_WR_IV_S) & KEYCTX_TX_WR_IV_M)
+
+#define KEYCTX_TX_WR_AAD_S 47
+#define KEYCTX_TX_WR_AAD_M 0xffULL
+#define KEYCTX_TX_WR_AAD_V(x) ((x) << KEYCTX_TX_WR_AAD_S)
+#define KEYCTX_TX_WR_AAD_G(x) (((x) >> KEYCTX_TX_WR_AAD_S) & \
+ KEYCTX_TX_WR_AAD_M)
+
+#define KEYCTX_TX_WR_AADST_S 39
+#define KEYCTX_TX_WR_AADST_M 0xffULL
+#define KEYCTX_TX_WR_AADST_V(x) ((x) << KEYCTX_TX_WR_AADST_S)
+#define KEYCTX_TX_WR_AADST_G(x) \
+ (((x) >> KEYCTX_TX_WR_AADST_S) & KEYCTX_TX_WR_AADST_M)
+
+#define KEYCTX_TX_WR_CIPHER_S 30
+#define KEYCTX_TX_WR_CIPHER_M 0x1ffULL
+#define KEYCTX_TX_WR_CIPHER_V(x) ((x) << KEYCTX_TX_WR_CIPHER_S)
+#define KEYCTX_TX_WR_CIPHER_G(x) \
+ (((x) >> KEYCTX_TX_WR_CIPHER_S) & KEYCTX_TX_WR_CIPHER_M)
+
+#define KEYCTX_TX_WR_CIPHERST_S 23
+#define KEYCTX_TX_WR_CIPHERST_M 0x7f
+#define KEYCTX_TX_WR_CIPHERST_V(x) ((x) << KEYCTX_TX_WR_CIPHERST_S)
+#define KEYCTX_TX_WR_CIPHERST_G(x) \
+ (((x) >> KEYCTX_TX_WR_CIPHERST_S) & KEYCTX_TX_WR_CIPHERST_M)
+
+#define KEYCTX_TX_WR_AUTH_S 14
+#define KEYCTX_TX_WR_AUTH_M 0x1ff
+#define KEYCTX_TX_WR_AUTH_V(x) ((x) << KEYCTX_TX_WR_AUTH_S)
+#define KEYCTX_TX_WR_AUTH_G(x) \
+ (((x) >> KEYCTX_TX_WR_AUTH_S) & KEYCTX_TX_WR_AUTH_M)
+
+#define KEYCTX_TX_WR_AUTHST_S 7
+#define KEYCTX_TX_WR_AUTHST_M 0x7f
+#define KEYCTX_TX_WR_AUTHST_V(x) ((x) << KEYCTX_TX_WR_AUTHST_S)
+#define KEYCTX_TX_WR_AUTHST_G(x) \
+ (((x) >> KEYCTX_TX_WR_AUTHST_S) & KEYCTX_TX_WR_AUTHST_M)
+
+#define KEYCTX_TX_WR_AUTHIN_S 0
+#define KEYCTX_TX_WR_AUTHIN_M 0x7f
+#define KEYCTX_TX_WR_AUTHIN_V(x) ((x) << KEYCTX_TX_WR_AUTHIN_S)
+#define KEYCTX_TX_WR_AUTHIN_G(x) \
+ (((x) >> KEYCTX_TX_WR_AUTHIN_S) & KEYCTX_TX_WR_AUTHIN_M)
+
+struct sge_opaque_hdr {
+ void *dev;
+ dma_addr_t addr[MAX_SKB_FRAGS + 1];
+};
+
+#define MAX_IVS_PAGE 256
+#define TLS_KEY_CONTEXT_SZ 64
+#define CIPHER_BLOCK_SIZE 16
+#define GCM_TAG_SIZE 16
+#define KEY_ON_MEM_SZ 16
+#define AEAD_EXPLICIT_DATA_SIZE 8
+#define TLS_HEADER_LENGTH 5
+#define SCMD_CIPH_MODE_AES_GCM 2
+/* Any MFS size should work and come from openssl */
+#define TLS_MFS 16384
+
+#define RSS_HDR sizeof(struct rss_header)
+#define TLS_WR_CPL_LEN \
+ (sizeof(struct fw_tlstx_data_wr) + sizeof(struct cpl_tx_tls_sfo))
+
+enum {
+ CHTLS_KEY_CONTEXT_DSGL,
+ CHTLS_KEY_CONTEXT_IMM,
+ CHTLS_KEY_CONTEXT_DDR,
+};
+
+enum {
+ CHTLS_LISTEN_START,
+ CHTLS_LISTEN_STOP,
+};
+
+/* Flags for return value of CPL message handlers */
+enum {
+ CPL_RET_BUF_DONE = 1, /* buffer processing done */
+ CPL_RET_BAD_MSG = 2, /* bad CPL message */
+ CPL_RET_UNKNOWN_TID = 4 /* unexpected unknown TID */
+};
+
+#define LISTEN_INFO_HASH_SIZE 32
+#define RSPQ_HASH_BITS 5
+struct listen_info {
+ struct listen_info *next; /* Link to next entry */
+ struct sock *sk; /* The listening socket */
+ unsigned int stid; /* The server TID */
+};
+
+enum {
+ T4_LISTEN_START_PENDING,
+ T4_LISTEN_STARTED
+};
+
+enum csk_flags {
+ CSK_CALLBACKS_CHKD, /* socket callbacks have been sanitized */
+ CSK_ABORT_REQ_RCVD, /* received one ABORT_REQ_RSS message */
+ CSK_TX_MORE_DATA, /* sending ULP data; don't set SHOVE bit */
+ CSK_TX_WAIT_IDLE, /* suspend Tx until in-flight data is ACKed */
+ CSK_ABORT_SHUTDOWN, /* shouldn't send more abort requests */
+ CSK_ABORT_RPL_PENDING, /* expecting an abort reply */
+ CSK_CLOSE_CON_REQUESTED,/* we've sent a close_conn_req */
+ CSK_TX_DATA_SENT, /* sent a TX_DATA WR on this connection */
+ CSK_TX_FAILOVER, /* Tx traffic failing over */
+ CSK_UPDATE_RCV_WND, /* Need to update rcv window */
+ CSK_RST_ABORTED, /* outgoing RST was aborted */
+ CSK_TLS_HANDSHK, /* TLS Handshake */
+ CSK_CONN_INLINE, /* Connection on HW */
+};
+
+enum chtls_cdev_state {
+ CHTLS_CDEV_STATE_UP = 1
+};
+
+struct listen_ctx {
+ struct sock *lsk;
+ struct chtls_dev *cdev;
+ struct sk_buff_head synq;
+ u32 state;
+};
+
+struct key_map {
+ unsigned long *addr;
+ unsigned int start;
+ unsigned int available;
+ unsigned int size;
+ spinlock_t lock; /* lock for key id request from map */
+} __packed;
+
+struct tls_scmd {
+ u32 seqno_numivs;
+ u32 ivgen_hdrlen;
+};
+
+struct chtls_dev {
+ struct tls_toe_device tlsdev;
+ struct list_head list;
+ struct cxgb4_lld_info *lldi;
+ struct pci_dev *pdev;
+ struct listen_info *listen_hash_tab[LISTEN_INFO_HASH_SIZE];
+ spinlock_t listen_lock; /* lock for listen list */
+ struct net_device **ports;
+ struct tid_info *tids;
+ unsigned int pfvf;
+ const unsigned short *mtus;
+
+ struct idr hwtid_idr;
+ struct idr stid_idr;
+
+ spinlock_t idr_lock ____cacheline_aligned_in_smp;
+
+ struct net_device *egr_dev[NCHAN * 2];
+ struct sk_buff *rspq_skb_cache[1 << RSPQ_HASH_BITS];
+ struct sk_buff *askb;
+
+ struct sk_buff_head deferq;
+ struct work_struct deferq_task;
+
+ struct list_head list_node;
+ struct list_head rcu_node;
+ struct list_head na_node;
+ unsigned int send_page_order;
+ int max_host_sndbuf;
+ struct key_map kmap;
+ unsigned int cdev_state;
+};
+
+struct chtls_listen {
+ struct chtls_dev *cdev;
+ struct sock *sk;
+};
+
+struct chtls_hws {
+ struct sk_buff_head sk_recv_queue;
+ u8 txqid;
+ u8 ofld;
+ u16 type;
+ u16 rstate;
+ u16 keyrpl;
+ u16 pldlen;
+ u16 rcvpld;
+ u16 compute;
+ u16 expansion;
+ u16 keylen;
+ u16 pdus;
+ u16 adjustlen;
+ u16 ivsize;
+ u16 txleft;
+ u32 mfs;
+ s32 txkey;
+ s32 rxkey;
+ u32 fcplenmax;
+ u32 copied_seq;
+ u64 tx_seq_no;
+ struct tls_scmd scmd;
+ union {
+ struct tls12_crypto_info_aes_gcm_128 aes_gcm_128;
+ struct tls12_crypto_info_aes_gcm_256 aes_gcm_256;
+ } crypto_info;
+};
+
+struct chtls_sock {
+ struct sock *sk;
+ struct chtls_dev *cdev;
+ struct l2t_entry *l2t_entry; /* pointer to the L2T entry */
+ struct net_device *egress_dev; /* TX_CHAN for act open retry */
+
+ struct sk_buff_head txq;
+ struct sk_buff *wr_skb_head;
+ struct sk_buff *wr_skb_tail;
+ struct sk_buff *ctrl_skb_cache;
+ struct sk_buff *txdata_skb_cache; /* abort path messages */
+ struct kref kref;
+ unsigned long flags;
+ u32 opt2;
+ u32 wr_credits;
+ u32 wr_unacked;
+ u32 wr_max_credits;
+ u32 wr_nondata;
+ u32 hwtid; /* TCP Control Block ID */
+ u32 txq_idx;
+ u32 rss_qid;
+ u32 tid;
+ u32 idr;
+ u32 mss;
+ u32 ulp_mode;
+ u32 tx_chan;
+ u32 rx_chan;
+ u32 sndbuf;
+ u32 txplen_max;
+ u32 mtu_idx; /* MTU table index */
+ u32 smac_idx;
+ u8 port_id;
+ u8 tos;
+ u16 resv2;
+ u32 delack_mode;
+ u32 delack_seq;
+ u32 snd_win;
+ u32 rcv_win;
+
+ void *passive_reap_next; /* placeholder for passive */
+ struct chtls_hws tlshws;
+ struct synq {
+ struct sk_buff *next;
+ struct sk_buff *prev;
+ } synq;
+ struct listen_ctx *listen_ctx;
+};
+
+struct tls_hdr {
+ u8 type;
+ u16 version;
+ u16 length;
+} __packed;
+
+struct tlsrx_cmp_hdr {
+ u8 type;
+ u16 version;
+ u16 length;
+
+ u64 tls_seq;
+ u16 reserved1;
+ u8 res_to_mac_error;
+} __packed;
+
+/* res_to_mac_error fields */
+#define TLSRX_HDR_PKT_INT_ERROR_S 4
+#define TLSRX_HDR_PKT_INT_ERROR_M 0x1
+#define TLSRX_HDR_PKT_INT_ERROR_V(x) \
+ ((x) << TLSRX_HDR_PKT_INT_ERROR_S)
+#define TLSRX_HDR_PKT_INT_ERROR_G(x) \
+ (((x) >> TLSRX_HDR_PKT_INT_ERROR_S) & TLSRX_HDR_PKT_INT_ERROR_M)
+#define TLSRX_HDR_PKT_INT_ERROR_F TLSRX_HDR_PKT_INT_ERROR_V(1U)
+
+#define TLSRX_HDR_PKT_SPP_ERROR_S 3
+#define TLSRX_HDR_PKT_SPP_ERROR_M 0x1
+#define TLSRX_HDR_PKT_SPP_ERROR_V(x) ((x) << TLSRX_HDR_PKT_SPP_ERROR)
+#define TLSRX_HDR_PKT_SPP_ERROR_G(x) \
+ (((x) >> TLSRX_HDR_PKT_SPP_ERROR_S) & TLSRX_HDR_PKT_SPP_ERROR_M)
+#define TLSRX_HDR_PKT_SPP_ERROR_F TLSRX_HDR_PKT_SPP_ERROR_V(1U)
+
+#define TLSRX_HDR_PKT_CCDX_ERROR_S 2
+#define TLSRX_HDR_PKT_CCDX_ERROR_M 0x1
+#define TLSRX_HDR_PKT_CCDX_ERROR_V(x) ((x) << TLSRX_HDR_PKT_CCDX_ERROR_S)
+#define TLSRX_HDR_PKT_CCDX_ERROR_G(x) \
+ (((x) >> TLSRX_HDR_PKT_CCDX_ERROR_S) & TLSRX_HDR_PKT_CCDX_ERROR_M)
+#define TLSRX_HDR_PKT_CCDX_ERROR_F TLSRX_HDR_PKT_CCDX_ERROR_V(1U)
+
+#define TLSRX_HDR_PKT_PAD_ERROR_S 1
+#define TLSRX_HDR_PKT_PAD_ERROR_M 0x1
+#define TLSRX_HDR_PKT_PAD_ERROR_V(x) ((x) << TLSRX_HDR_PKT_PAD_ERROR_S)
+#define TLSRX_HDR_PKT_PAD_ERROR_G(x) \
+ (((x) >> TLSRX_HDR_PKT_PAD_ERROR_S) & TLSRX_HDR_PKT_PAD_ERROR_M)
+#define TLSRX_HDR_PKT_PAD_ERROR_F TLSRX_HDR_PKT_PAD_ERROR_V(1U)
+
+#define TLSRX_HDR_PKT_MAC_ERROR_S 0
+#define TLSRX_HDR_PKT_MAC_ERROR_M 0x1
+#define TLSRX_HDR_PKT_MAC_ERROR_V(x) ((x) << TLSRX_HDR_PKT_MAC_ERROR)
+#define TLSRX_HDR_PKT_MAC_ERROR_G(x) \
+ (((x) >> S_TLSRX_HDR_PKT_MAC_ERROR_S) & TLSRX_HDR_PKT_MAC_ERROR_M)
+#define TLSRX_HDR_PKT_MAC_ERROR_F TLSRX_HDR_PKT_MAC_ERROR_V(1U)
+
+#define TLSRX_HDR_PKT_ERROR_M 0x1F
+#define CONTENT_TYPE_ERROR 0x7F
+
+struct ulp_mem_rw {
+ __be32 cmd;
+ __be32 len16; /* command length */
+ __be32 dlen; /* data length in 32-byte units */
+ __be32 lock_addr;
+};
+
+struct tls_key_wr {
+ __be32 op_to_compl;
+ __be32 flowid_len16;
+ __be32 ftid;
+ u8 reneg_to_write_rx;
+ u8 protocol;
+ __be16 mfs;
+};
+
+struct tls_key_req {
+ struct tls_key_wr wr;
+ struct ulp_mem_rw req;
+ struct ulptx_idata sc_imm;
+};
+
+/*
+ * This lives in skb->cb and is used to chain WRs in a linked list.
+ */
+struct wr_skb_cb {
+ struct l2t_skb_cb l2t; /* reserve space for l2t CB */
+ struct sk_buff *next_wr; /* next write request */
+};
+
+/* Per-skb backlog handler. Run when a socket's backlog is processed. */
+struct blog_skb_cb {
+ void (*backlog_rcv)(struct sock *sk, struct sk_buff *skb);
+ struct chtls_dev *cdev;
+};
+
+/*
+ * Similar to tcp_skb_cb but with ULP elements added to support TLS,
+ * etc.
+ */
+struct ulp_skb_cb {
+ struct wr_skb_cb wr; /* reserve space for write request */
+ u16 flags; /* TCP-like flags */
+ u8 psh;
+ u8 ulp_mode; /* ULP mode/submode of sk_buff */
+ u32 seq; /* TCP sequence number */
+ union { /* ULP-specific fields */
+ struct {
+ u8 type;
+ u8 ofld;
+ u8 iv;
+ } tls;
+ } ulp;
+};
+
+#define ULP_SKB_CB(skb) ((struct ulp_skb_cb *)&((skb)->cb[0]))
+#define BLOG_SKB_CB(skb) ((struct blog_skb_cb *)(skb)->cb)
+
+/*
+ * Flags for ulp_skb_cb.flags.
+ */
+enum {
+ ULPCB_FLAG_NEED_HDR = 1 << 0, /* packet needs a TX_DATA_WR header */
+ ULPCB_FLAG_NO_APPEND = 1 << 1, /* don't grow this skb */
+ ULPCB_FLAG_BARRIER = 1 << 2, /* set TX_WAIT_IDLE after sending */
+ ULPCB_FLAG_HOLD = 1 << 3, /* skb not ready for Tx yet */
+ ULPCB_FLAG_COMPL = 1 << 4, /* request WR completion */
+ ULPCB_FLAG_URG = 1 << 5, /* urgent data */
+ ULPCB_FLAG_TLS_HDR = 1 << 6, /* payload with tls hdr */
+ ULPCB_FLAG_NO_HDR = 1 << 7, /* not a ofld wr */
+};
+
+/* The ULP mode/submode of an skbuff */
+#define skb_ulp_mode(skb) (ULP_SKB_CB(skb)->ulp_mode)
+#define TCP_PAGE(sk) (sk->sk_frag.page)
+#define TCP_OFF(sk) (sk->sk_frag.offset)
+
+static inline struct chtls_dev *to_chtls_dev(struct tls_toe_device *tlsdev)
+{
+ return container_of(tlsdev, struct chtls_dev, tlsdev);
+}
+
+static inline void csk_set_flag(struct chtls_sock *csk,
+ enum csk_flags flag)
+{
+ __set_bit(flag, &csk->flags);
+}
+
+static inline void csk_reset_flag(struct chtls_sock *csk,
+ enum csk_flags flag)
+{
+ __clear_bit(flag, &csk->flags);
+}
+
+static inline bool csk_conn_inline(const struct chtls_sock *csk)
+{
+ return test_bit(CSK_CONN_INLINE, &csk->flags);
+}
+
+static inline int csk_flag(const struct sock *sk, enum csk_flags flag)
+{
+ struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+
+ if (!csk_conn_inline(csk))
+ return 0;
+ return test_bit(flag, &csk->flags);
+}
+
+static inline int csk_flag_nochk(const struct chtls_sock *csk,
+ enum csk_flags flag)
+{
+ return test_bit(flag, &csk->flags);
+}
+
+static inline void *cplhdr(struct sk_buff *skb)
+{
+ return skb->data;
+}
+
+static inline int is_neg_adv(unsigned int status)
+{
+ return status == CPL_ERR_RTX_NEG_ADVICE ||
+ status == CPL_ERR_KEEPALV_NEG_ADVICE ||
+ status == CPL_ERR_PERSIST_NEG_ADVICE;
+}
+
+static inline void process_cpl_msg(void (*fn)(struct sock *, struct sk_buff *),
+ struct sock *sk,
+ struct sk_buff *skb)
+{
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+
+ bh_lock_sock(sk);
+ if (unlikely(sock_owned_by_user(sk))) {
+ BLOG_SKB_CB(skb)->backlog_rcv = fn;
+ __sk_add_backlog(sk, skb);
+ } else {
+ fn(sk, skb);
+ }
+ bh_unlock_sock(sk);
+}
+
+static inline void chtls_sock_free(struct kref *ref)
+{
+ struct chtls_sock *csk = container_of(ref, struct chtls_sock,
+ kref);
+ kfree(csk);
+}
+
+static inline void __chtls_sock_put(const char *fn, struct chtls_sock *csk)
+{
+ kref_put(&csk->kref, chtls_sock_free);
+}
+
+static inline void __chtls_sock_get(const char *fn,
+ struct chtls_sock *csk)
+{
+ kref_get(&csk->kref);
+}
+
+static inline void send_or_defer(struct sock *sk, struct tcp_sock *tp,
+ struct sk_buff *skb, int through_l2t)
+{
+ struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+
+ if (through_l2t) {
+ /* send through L2T */
+ cxgb4_l2t_send(csk->egress_dev, skb, csk->l2t_entry);
+ } else {
+ /* send directly */
+ cxgb4_ofld_send(csk->egress_dev, skb);
+ }
+}
+
+typedef int (*chtls_handler_func)(struct chtls_dev *, struct sk_buff *);
+extern chtls_handler_func chtls_handlers[NUM_CPL_CMDS];
+void chtls_install_cpl_ops(struct sock *sk);
+int chtls_init_kmap(struct chtls_dev *cdev, struct cxgb4_lld_info *lldi);
+void chtls_listen_stop(struct chtls_dev *cdev, struct sock *sk);
+int chtls_listen_start(struct chtls_dev *cdev, struct sock *sk);
+void chtls_close(struct sock *sk, long timeout);
+int chtls_disconnect(struct sock *sk, int flags);
+void chtls_shutdown(struct sock *sk, int how);
+void chtls_destroy_sock(struct sock *sk);
+int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
+int chtls_recvmsg(struct sock *sk, struct msghdr *msg,
+ size_t len, int nonblock, int flags, int *addr_len);
+int chtls_sendpage(struct sock *sk, struct page *page,
+ int offset, size_t size, int flags);
+int send_tx_flowc_wr(struct sock *sk, int compl,
+ u32 snd_nxt, u32 rcv_nxt);
+void chtls_tcp_push(struct sock *sk, int flags);
+int chtls_push_frames(struct chtls_sock *csk, int comp);
+int chtls_set_tcb_tflag(struct sock *sk, unsigned int bit_pos, int val);
+int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 mode, int cipher_type);
+void skb_entail(struct sock *sk, struct sk_buff *skb, int flags);
+unsigned int keyid_to_addr(int start_addr, int keyid);
+void free_tls_keyid(struct sock *sk);
+#endif
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
new file mode 100644
index 000000000000..05520dccd906
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
@@ -0,0 +1,2327 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018 Chelsio Communications, Inc.
+ *
+ * Written by: Atul Gupta (atul.gupta@chelsio.com)
+ */
+
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include <linux/skbuff.h>
+#include <linux/timer.h>
+#include <linux/notifier.h>
+#include <linux/inetdevice.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/sched/signal.h>
+#include <linux/kallsyms.h>
+#include <linux/kprobes.h>
+#include <linux/if_vlan.h>
+#include <linux/ipv6.h>
+#include <net/ipv6.h>
+#include <net/transp_v6.h>
+#include <net/ip6_route.h>
+#include <net/inet_common.h>
+#include <net/tcp.h>
+#include <net/dst.h>
+#include <net/tls.h>
+#include <net/addrconf.h>
+#include <net/secure_seq.h>
+
+#include "chtls.h"
+#include "chtls_cm.h"
+#include "clip_tbl.h"
+
+/*
+ * State transitions and actions for close. Note that if we are in SYN_SENT
+ * we remain in that state as we cannot control a connection while it's in
+ * SYN_SENT; such connections are allowed to establish and are then aborted.
+ */
+static unsigned char new_state[16] = {
+ /* current state: new state: action: */
+ /* (Invalid) */ TCP_CLOSE,
+ /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
+ /* TCP_SYN_SENT */ TCP_SYN_SENT,
+ /* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
+ /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1,
+ /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2,
+ /* TCP_TIME_WAIT */ TCP_CLOSE,
+ /* TCP_CLOSE */ TCP_CLOSE,
+ /* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN,
+ /* TCP_LAST_ACK */ TCP_LAST_ACK,
+ /* TCP_LISTEN */ TCP_CLOSE,
+ /* TCP_CLOSING */ TCP_CLOSING,
+};
+
+static struct chtls_sock *chtls_sock_create(struct chtls_dev *cdev)
+{
+ struct chtls_sock *csk = kzalloc(sizeof(*csk), GFP_ATOMIC);
+
+ if (!csk)
+ return NULL;
+
+ csk->txdata_skb_cache = alloc_skb(TXDATA_SKB_LEN, GFP_ATOMIC);
+ if (!csk->txdata_skb_cache) {
+ kfree(csk);
+ return NULL;
+ }
+
+ kref_init(&csk->kref);
+ csk->cdev = cdev;
+ skb_queue_head_init(&csk->txq);
+ csk->wr_skb_head = NULL;
+ csk->wr_skb_tail = NULL;
+ csk->mss = MAX_MSS;
+ csk->tlshws.ofld = 1;
+ csk->tlshws.txkey = -1;
+ csk->tlshws.rxkey = -1;
+ csk->tlshws.mfs = TLS_MFS;
+ skb_queue_head_init(&csk->tlshws.sk_recv_queue);
+ return csk;
+}
+
+static void chtls_sock_release(struct kref *ref)
+{
+ struct chtls_sock *csk =
+ container_of(ref, struct chtls_sock, kref);
+
+ kfree(csk);
+}
+
+static struct net_device *chtls_find_netdev(struct chtls_dev *cdev,
+ struct sock *sk)
+{
+ struct net_device *ndev = cdev->ports[0];
+#if IS_ENABLED(CONFIG_IPV6)
+ struct net_device *temp;
+ int addr_type;
+#endif
+
+ switch (sk->sk_family) {
+ case PF_INET:
+ if (likely(!inet_sk(sk)->inet_rcv_saddr))
+ return ndev;
+ ndev = __ip_dev_find(&init_net, inet_sk(sk)->inet_rcv_saddr, false);
+ break;
+#if IS_ENABLED(CONFIG_IPV6)
+ case PF_INET6:
+ addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr);
+ if (likely(addr_type == IPV6_ADDR_ANY))
+ return ndev;
+
+ for_each_netdev_rcu(&init_net, temp) {
+ if (ipv6_chk_addr(&init_net, (struct in6_addr *)
+ &sk->sk_v6_rcv_saddr, temp, 1)) {
+ ndev = temp;
+ break;
+ }
+ }
+ break;
+#endif
+ default:
+ return NULL;
+ }
+
+ if (!ndev)
+ return NULL;
+
+ if (is_vlan_dev(ndev))
+ return vlan_dev_real_dev(ndev);
+ return ndev;
+}
+
+static void assign_rxopt(struct sock *sk, unsigned int opt)
+{
+ const struct chtls_dev *cdev;
+ struct chtls_sock *csk;
+ struct tcp_sock *tp;
+
+ csk = rcu_dereference_sk_user_data(sk);
+ tp = tcp_sk(sk);
+
+ cdev = csk->cdev;
+ tp->tcp_header_len = sizeof(struct tcphdr);
+ tp->rx_opt.mss_clamp = cdev->mtus[TCPOPT_MSS_G(opt)] - 40;
+ tp->mss_cache = tp->rx_opt.mss_clamp;
+ tp->rx_opt.tstamp_ok = TCPOPT_TSTAMP_G(opt);
+ tp->rx_opt.snd_wscale = TCPOPT_SACK_G(opt);
+ tp->rx_opt.wscale_ok = TCPOPT_WSCALE_OK_G(opt);
+ SND_WSCALE(tp) = TCPOPT_SND_WSCALE_G(opt);
+ if (!tp->rx_opt.wscale_ok)
+ tp->rx_opt.rcv_wscale = 0;
+ if (tp->rx_opt.tstamp_ok) {
+ tp->tcp_header_len += TCPOLEN_TSTAMP_ALIGNED;
+ tp->rx_opt.mss_clamp -= TCPOLEN_TSTAMP_ALIGNED;
+ } else if (csk->opt2 & TSTAMPS_EN_F) {
+ csk->opt2 &= ~TSTAMPS_EN_F;
+ csk->mtu_idx = TCPOPT_MSS_G(opt);
+ }
+}
+
+static void chtls_purge_receive_queue(struct sock *sk)
+{
+ struct sk_buff *skb;
+
+ while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
+ skb_dst_set(skb, (void *)NULL);
+ kfree_skb(skb);
+ }
+}
+
+static void chtls_purge_write_queue(struct sock *sk)
+{
+ struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+ struct sk_buff *skb;
+
+ while ((skb = __skb_dequeue(&csk->txq))) {
+ sk->sk_wmem_queued -= skb->truesize;
+ __kfree_skb(skb);
+ }
+}
+
+static void chtls_purge_recv_queue(struct sock *sk)
+{
+ struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+ struct chtls_hws *tlsk = &csk->tlshws;
+ struct sk_buff *skb;
+
+ while ((skb = __skb_dequeue(&tlsk->sk_recv_queue)) != NULL) {
+ skb_dst_set(skb, NULL);
+ kfree_skb(skb);
+ }
+}
+
+static void abort_arp_failure(void *handle, struct sk_buff *skb)
+{
+ struct cpl_abort_req *req = cplhdr(skb);
+ struct chtls_dev *cdev;
+
+ cdev = (struct chtls_dev *)handle;
+ req->cmd = CPL_ABORT_NO_RST;
+ cxgb4_ofld_send(cdev->lldi->ports[0], skb);
+}
+
+static struct sk_buff *alloc_ctrl_skb(struct sk_buff *skb, int len)
+{
+ if (likely(skb && !skb_shared(skb) && !skb_cloned(skb))) {
+ __skb_trim(skb, 0);
+ refcount_add(2, &skb->users);
+ } else {
+ skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
+ }
+ return skb;
+}
+
+static void chtls_send_abort(struct sock *sk, int mode, struct sk_buff *skb)
+{
+ struct cpl_abort_req *req;
+ struct chtls_sock *csk;
+ struct tcp_sock *tp;
+
+ csk = rcu_dereference_sk_user_data(sk);
+ tp = tcp_sk(sk);
+
+ if (!skb)
+ skb = alloc_ctrl_skb(csk->txdata_skb_cache, sizeof(*req));
+
+ req = (struct cpl_abort_req *)skb_put(skb, sizeof(*req));
+ INIT_TP_WR_CPL(req, CPL_ABORT_REQ, csk->tid);
+ skb_set_queue_mapping(skb, (csk->txq_idx << 1) | CPL_PRIORITY_DATA);
+ req->rsvd0 = htonl(tp->snd_nxt);
+ req->rsvd1 = !csk_flag_nochk(csk, CSK_TX_DATA_SENT);
+ req->cmd = mode;
+ t4_set_arp_err_handler(skb, csk->cdev, abort_arp_failure);
+ send_or_defer(sk, tp, skb, mode == CPL_ABORT_SEND_RST);
+}
+
+static void chtls_send_reset(struct sock *sk, int mode, struct sk_buff *skb)
+{
+ struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+
+ if (unlikely(csk_flag_nochk(csk, CSK_ABORT_SHUTDOWN) ||
+ !csk->cdev)) {
+ if (sk->sk_state == TCP_SYN_RECV)
+ csk_set_flag(csk, CSK_RST_ABORTED);
+ goto out;
+ }
+
+ if (!csk_flag_nochk(csk, CSK_TX_DATA_SENT)) {
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if (send_tx_flowc_wr(sk, 0, tp->snd_nxt, tp->rcv_nxt) < 0)
+ WARN_ONCE(1, "send tx flowc error");
+ csk_set_flag(csk, CSK_TX_DATA_SENT);
+ }
+
+ csk_set_flag(csk, CSK_ABORT_RPL_PENDING);
+ chtls_purge_write_queue(sk);
+
+ csk_set_flag(csk, CSK_ABORT_SHUTDOWN);
+ if (sk->sk_state != TCP_SYN_RECV)
+ chtls_send_abort(sk, mode, skb);
+ else
+ goto out;
+
+ return;
+out:
+ kfree_skb(skb);
+}
+
+static void release_tcp_port(struct sock *sk)
+{
+ if (inet_csk(sk)->icsk_bind_hash)
+ inet_put_port(sk);
+}
+
+static void tcp_uncork(struct sock *sk)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if (tp->nonagle & TCP_NAGLE_CORK) {
+ tp->nonagle &= ~TCP_NAGLE_CORK;
+ chtls_tcp_push(sk, 0);
+ }
+}
+
+static void chtls_close_conn(struct sock *sk)
+{
+ struct cpl_close_con_req *req;
+ struct chtls_sock *csk;
+ struct sk_buff *skb;
+ unsigned int tid;
+ unsigned int len;
+
+ len = roundup(sizeof(struct cpl_close_con_req), 16);
+ csk = rcu_dereference_sk_user_data(sk);
+ tid = csk->tid;
+
+ skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
+ req = (struct cpl_close_con_req *)__skb_put(skb, len);
+ memset(req, 0, len);
+ req->wr.wr_hi = htonl(FW_WR_OP_V(FW_TP_WR) |
+ FW_WR_IMMDLEN_V(sizeof(*req) -
+ sizeof(req->wr)));
+ req->wr.wr_mid = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)) |
+ FW_WR_FLOWID_V(tid));
+
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
+
+ tcp_uncork(sk);
+ skb_entail(sk, skb, ULPCB_FLAG_NO_HDR | ULPCB_FLAG_NO_APPEND);
+ if (sk->sk_state != TCP_SYN_SENT)
+ chtls_push_frames(csk, 1);
+}
+
+/*
+ * Perform a state transition during close and return the actions indicated
+ * for the transition. Do not make this function inline, the main reason
+ * it exists at all is to avoid multiple inlining of tcp_set_state.
+ */
+static int make_close_transition(struct sock *sk)
+{
+ int next = (int)new_state[sk->sk_state];
+
+ tcp_set_state(sk, next & TCP_STATE_MASK);
+ return next & TCP_ACTION_FIN;
+}
+
+void chtls_close(struct sock *sk, long timeout)
+{
+ int data_lost, prev_state;
+ struct chtls_sock *csk;
+
+ csk = rcu_dereference_sk_user_data(sk);
+
+ lock_sock(sk);
+ sk->sk_shutdown |= SHUTDOWN_MASK;
+
+ data_lost = skb_queue_len(&sk->sk_receive_queue);
+ data_lost |= skb_queue_len(&csk->tlshws.sk_recv_queue);
+ chtls_purge_recv_queue(sk);
+ chtls_purge_receive_queue(sk);
+
+ if (sk->sk_state == TCP_CLOSE) {
+ goto wait;
+ } else if (data_lost || sk->sk_state == TCP_SYN_SENT) {
+ chtls_send_reset(sk, CPL_ABORT_SEND_RST, NULL);
+ release_tcp_port(sk);
+ goto unlock;
+ } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
+ sk->sk_prot->disconnect(sk, 0);
+ } else if (make_close_transition(sk)) {
+ chtls_close_conn(sk);
+ }
+wait:
+ if (timeout)
+ sk_stream_wait_close(sk, timeout);
+
+unlock:
+ prev_state = sk->sk_state;
+ sock_hold(sk);
+ sock_orphan(sk);
+
+ release_sock(sk);
+
+ local_bh_disable();
+ bh_lock_sock(sk);
+
+ if (prev_state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
+ goto out;
+
+ if (sk->sk_state == TCP_FIN_WAIT2 && tcp_sk(sk)->linger2 < 0 &&
+ !csk_flag(sk, CSK_ABORT_SHUTDOWN)) {
+ struct sk_buff *skb;
+
+ skb = alloc_skb(sizeof(struct cpl_abort_req), GFP_ATOMIC);
+ if (skb)
+ chtls_send_reset(sk, CPL_ABORT_SEND_RST, skb);
+ }
+
+ if (sk->sk_state == TCP_CLOSE)
+ inet_csk_destroy_sock(sk);
+
+out:
+ bh_unlock_sock(sk);
+ local_bh_enable();
+ sock_put(sk);
+}
+
+/*
+ * Wait until a socket enters on of the given states.
+ */
+static int wait_for_states(struct sock *sk, unsigned int states)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ struct socket_wq _sk_wq;
+ long current_timeo;
+ int err = 0;
+
+ current_timeo = 200;
+
+ /*
+ * We want this to work even when there's no associated struct socket.
+ * In that case we provide a temporary wait_queue_head_t.
+ */
+ if (!sk->sk_wq) {
+ init_waitqueue_head(&_sk_wq.wait);
+ _sk_wq.fasync_list = NULL;
+ init_rcu_head_on_stack(&_sk_wq.rcu);
+ RCU_INIT_POINTER(sk->sk_wq, &_sk_wq);
+ }
+
+ add_wait_queue(sk_sleep(sk), &wait);
+ while (!sk_in_state(sk, states)) {
+ if (!current_timeo) {
+ err = -EBUSY;
+ break;
+ }
+ if (signal_pending(current)) {
+ err = sock_intr_errno(current_timeo);
+ break;
+ }
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ release_sock(sk);
+ if (!sk_in_state(sk, states))
+ current_timeo = schedule_timeout(current_timeo);
+ __set_current_state(TASK_RUNNING);
+ lock_sock(sk);
+ }
+ remove_wait_queue(sk_sleep(sk), &wait);
+
+ if (rcu_dereference(sk->sk_wq) == &_sk_wq)
+ sk->sk_wq = NULL;
+ return err;
+}
+
+int chtls_disconnect(struct sock *sk, int flags)
+{
+ struct tcp_sock *tp;
+ int err;
+
+ tp = tcp_sk(sk);
+ chtls_purge_recv_queue(sk);
+ chtls_purge_receive_queue(sk);
+ chtls_purge_write_queue(sk);
+
+ if (sk->sk_state != TCP_CLOSE) {
+ sk->sk_err = ECONNRESET;
+ chtls_send_reset(sk, CPL_ABORT_SEND_RST, NULL);
+ err = wait_for_states(sk, TCPF_CLOSE);
+ if (err)
+ return err;
+ }
+ chtls_purge_recv_queue(sk);
+ chtls_purge_receive_queue(sk);
+ tp->max_window = 0xFFFF << (tp->rx_opt.snd_wscale);
+ return tcp_disconnect(sk, flags);
+}
+
+#define SHUTDOWN_ELIGIBLE_STATE (TCPF_ESTABLISHED | \
+ TCPF_SYN_RECV | TCPF_CLOSE_WAIT)
+void chtls_shutdown(struct sock *sk, int how)
+{
+ if ((how & SEND_SHUTDOWN) &&
+ sk_in_state(sk, SHUTDOWN_ELIGIBLE_STATE) &&
+ make_close_transition(sk))
+ chtls_close_conn(sk);
+}
+
+void chtls_destroy_sock(struct sock *sk)
+{
+ struct chtls_sock *csk;
+
+ csk = rcu_dereference_sk_user_data(sk);
+ chtls_purge_recv_queue(sk);
+ csk->ulp_mode = ULP_MODE_NONE;
+ chtls_purge_write_queue(sk);
+ free_tls_keyid(sk);
+ kref_put(&csk->kref, chtls_sock_release);
+ csk->cdev = NULL;
+ if (sk->sk_family == AF_INET)
+ sk->sk_prot = &tcp_prot;
+#if IS_ENABLED(CONFIG_IPV6)
+ else
+ sk->sk_prot = &tcpv6_prot;
+#endif
+ sk->sk_prot->destroy(sk);
+}
+
+static void reset_listen_child(struct sock *child)
+{
+ struct chtls_sock *csk = rcu_dereference_sk_user_data(child);
+ struct sk_buff *skb;
+
+ skb = alloc_ctrl_skb(csk->txdata_skb_cache,
+ sizeof(struct cpl_abort_req));
+
+ chtls_send_reset(child, CPL_ABORT_SEND_RST, skb);
+ sock_orphan(child);
+ INC_ORPHAN_COUNT(child);
+ if (child->sk_state == TCP_CLOSE)
+ inet_csk_destroy_sock(child);
+}
+
+static void chtls_disconnect_acceptq(struct sock *listen_sk)
+{
+ struct request_sock **pprev;
+
+ pprev = ACCEPT_QUEUE(listen_sk);
+ while (*pprev) {
+ struct request_sock *req = *pprev;
+
+ if (req->rsk_ops == &chtls_rsk_ops ||
+ req->rsk_ops == &chtls_rsk_opsv6) {
+ struct sock *child = req->sk;
+
+ *pprev = req->dl_next;
+ sk_acceptq_removed(listen_sk);
+ reqsk_put(req);
+ sock_hold(child);
+ local_bh_disable();
+ bh_lock_sock(child);
+ release_tcp_port(child);
+ reset_listen_child(child);
+ bh_unlock_sock(child);
+ local_bh_enable();
+ sock_put(child);
+ } else {
+ pprev = &req->dl_next;
+ }
+ }
+}
+
+static int listen_hashfn(const struct sock *sk)
+{
+ return ((unsigned long)sk >> 10) & (LISTEN_INFO_HASH_SIZE - 1);
+}
+
+static struct listen_info *listen_hash_add(struct chtls_dev *cdev,
+ struct sock *sk,
+ unsigned int stid)
+{
+ struct listen_info *p = kmalloc(sizeof(*p), GFP_KERNEL);
+
+ if (p) {
+ int key = listen_hashfn(sk);
+
+ p->sk = sk;
+ p->stid = stid;
+ spin_lock(&cdev->listen_lock);
+ p->next = cdev->listen_hash_tab[key];
+ cdev->listen_hash_tab[key] = p;
+ spin_unlock(&cdev->listen_lock);
+ }
+ return p;
+}
+
+static int listen_hash_find(struct chtls_dev *cdev,
+ struct sock *sk)
+{
+ struct listen_info *p;
+ int stid = -1;
+ int key;
+
+ key = listen_hashfn(sk);
+
+ spin_lock(&cdev->listen_lock);
+ for (p = cdev->listen_hash_tab[key]; p; p = p->next)
+ if (p->sk == sk) {
+ stid = p->stid;
+ break;
+ }
+ spin_unlock(&cdev->listen_lock);
+ return stid;
+}
+
+static int listen_hash_del(struct chtls_dev *cdev,
+ struct sock *sk)
+{
+ struct listen_info *p, **prev;
+ int stid = -1;
+ int key;
+
+ key = listen_hashfn(sk);
+ prev = &cdev->listen_hash_tab[key];
+
+ spin_lock(&cdev->listen_lock);
+ for (p = *prev; p; prev = &p->next, p = p->next)
+ if (p->sk == sk) {
+ stid = p->stid;
+ *prev = p->next;
+ kfree(p);
+ break;
+ }
+ spin_unlock(&cdev->listen_lock);
+ return stid;
+}
+
+static void cleanup_syn_rcv_conn(struct sock *child, struct sock *parent)
+{
+ struct request_sock *req;
+ struct chtls_sock *csk;
+
+ csk = rcu_dereference_sk_user_data(child);
+ req = csk->passive_reap_next;
+
+ reqsk_queue_removed(&inet_csk(parent)->icsk_accept_queue, req);
+ __skb_unlink((struct sk_buff *)&csk->synq, &csk->listen_ctx->synq);
+ chtls_reqsk_free(req);
+ csk->passive_reap_next = NULL;
+}
+
+static void chtls_reset_synq(struct listen_ctx *listen_ctx)
+{
+ struct sock *listen_sk = listen_ctx->lsk;
+
+ while (!skb_queue_empty(&listen_ctx->synq)) {
+ struct chtls_sock *csk =
+ container_of((struct synq *)__skb_dequeue
+ (&listen_ctx->synq), struct chtls_sock, synq);
+ struct sock *child = csk->sk;
+
+ cleanup_syn_rcv_conn(child, listen_sk);
+ sock_hold(child);
+ local_bh_disable();
+ bh_lock_sock(child);
+ release_tcp_port(child);
+ reset_listen_child(child);
+ bh_unlock_sock(child);
+ local_bh_enable();
+ sock_put(child);
+ }
+}
+
+int chtls_listen_start(struct chtls_dev *cdev, struct sock *sk)
+{
+ struct net_device *ndev;
+#if IS_ENABLED(CONFIG_IPV6)
+ bool clip_valid = false;
+#endif
+ struct listen_ctx *ctx;
+ struct adapter *adap;
+ struct port_info *pi;
+ int ret = 0;
+ int stid;
+
+ rcu_read_lock();
+ ndev = chtls_find_netdev(cdev, sk);
+ rcu_read_unlock();
+ if (!ndev)
+ return -EBADF;
+
+ pi = netdev_priv(ndev);
+ adap = pi->adapter;
+ if (!(adap->flags & CXGB4_FULL_INIT_DONE))
+ return -EBADF;
+
+ if (listen_hash_find(cdev, sk) >= 0) /* already have it */
+ return -EADDRINUSE;
+
+ ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ __module_get(THIS_MODULE);
+ ctx->lsk = sk;
+ ctx->cdev = cdev;
+ ctx->state = T4_LISTEN_START_PENDING;
+ skb_queue_head_init(&ctx->synq);
+
+ stid = cxgb4_alloc_stid(cdev->tids, sk->sk_family, ctx);
+ if (stid < 0)
+ goto free_ctx;
+
+ sock_hold(sk);
+ if (!listen_hash_add(cdev, sk, stid))
+ goto free_stid;
+
+ if (sk->sk_family == PF_INET) {
+ ret = cxgb4_create_server(ndev, stid,
+ inet_sk(sk)->inet_rcv_saddr,
+ inet_sk(sk)->inet_sport, 0,
+ cdev->lldi->rxq_ids[0]);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ int addr_type;
+
+ addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr);
+ if (addr_type != IPV6_ADDR_ANY) {
+ ret = cxgb4_clip_get(ndev, (const u32 *)
+ &sk->sk_v6_rcv_saddr, 1);
+ if (ret)
+ goto del_hash;
+ clip_valid = true;
+ }
+ ret = cxgb4_create_server6(ndev, stid,
+ &sk->sk_v6_rcv_saddr,
+ inet_sk(sk)->inet_sport,
+ cdev->lldi->rxq_ids[0]);
+#endif
+ }
+ if (ret > 0)
+ ret = net_xmit_errno(ret);
+ if (ret)
+ goto del_hash;
+ return 0;
+del_hash:
+#if IS_ENABLED(CONFIG_IPV6)
+ if (clip_valid)
+ cxgb4_clip_release(ndev, (const u32 *)&sk->sk_v6_rcv_saddr, 1);
+#endif
+ listen_hash_del(cdev, sk);
+free_stid:
+ cxgb4_free_stid(cdev->tids, stid, sk->sk_family);
+ sock_put(sk);
+free_ctx:
+ kfree(ctx);
+ module_put(THIS_MODULE);
+ return -EBADF;
+}
+
+void chtls_listen_stop(struct chtls_dev *cdev, struct sock *sk)
+{
+ struct listen_ctx *listen_ctx;
+ int stid;
+
+ stid = listen_hash_del(cdev, sk);
+ if (stid < 0)
+ return;
+
+ listen_ctx = (struct listen_ctx *)lookup_stid(cdev->tids, stid);
+ chtls_reset_synq(listen_ctx);
+
+ cxgb4_remove_server(cdev->lldi->ports[0], stid,
+ cdev->lldi->rxq_ids[0], sk->sk_family == PF_INET6);
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (sk->sk_family == PF_INET6) {
+ struct chtls_sock *csk;
+ int addr_type = 0;
+
+ csk = rcu_dereference_sk_user_data(sk);
+ addr_type = ipv6_addr_type((const struct in6_addr *)
+ &sk->sk_v6_rcv_saddr);
+ if (addr_type != IPV6_ADDR_ANY)
+ cxgb4_clip_release(csk->egress_dev, (const u32 *)
+ &sk->sk_v6_rcv_saddr, 1);
+ }
+#endif
+ chtls_disconnect_acceptq(sk);
+}
+
+static int chtls_pass_open_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
+{
+ struct cpl_pass_open_rpl *rpl = cplhdr(skb) + RSS_HDR;
+ unsigned int stid = GET_TID(rpl);
+ struct listen_ctx *listen_ctx;
+
+ listen_ctx = (struct listen_ctx *)lookup_stid(cdev->tids, stid);
+ if (!listen_ctx)
+ return CPL_RET_BUF_DONE;
+
+ if (listen_ctx->state == T4_LISTEN_START_PENDING) {
+ listen_ctx->state = T4_LISTEN_STARTED;
+ return CPL_RET_BUF_DONE;
+ }
+
+ if (rpl->status != CPL_ERR_NONE) {
+ pr_info("Unexpected PASS_OPEN_RPL status %u for STID %u\n",
+ rpl->status, stid);
+ return CPL_RET_BUF_DONE;
+ }
+ cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
+ sock_put(listen_ctx->lsk);
+ kfree(listen_ctx);
+ module_put(THIS_MODULE);
+
+ return 0;
+}
+
+static int chtls_close_listsrv_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
+{
+ struct cpl_close_listsvr_rpl *rpl = cplhdr(skb) + RSS_HDR;
+ struct listen_ctx *listen_ctx;
+ unsigned int stid;
+ void *data;
+
+ stid = GET_TID(rpl);
+ data = lookup_stid(cdev->tids, stid);
+ listen_ctx = (struct listen_ctx *)data;
+
+ if (rpl->status != CPL_ERR_NONE) {
+ pr_info("Unexpected CLOSE_LISTSRV_RPL status %u for STID %u\n",
+ rpl->status, stid);
+ return CPL_RET_BUF_DONE;
+ }
+
+ cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
+ sock_put(listen_ctx->lsk);
+ kfree(listen_ctx);
+ module_put(THIS_MODULE);
+
+ return 0;
+}
+
+static void chtls_purge_wr_queue(struct sock *sk)
+{
+ struct sk_buff *skb;
+
+ while ((skb = dequeue_wr(sk)) != NULL)
+ kfree_skb(skb);
+}
+
+static void chtls_release_resources(struct sock *sk)
+{
+ struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+ struct chtls_dev *cdev = csk->cdev;
+ unsigned int tid = csk->tid;
+ struct tid_info *tids;
+
+ if (!cdev)
+ return;
+
+ tids = cdev->tids;
+ kfree_skb(csk->txdata_skb_cache);
+ csk->txdata_skb_cache = NULL;
+
+ if (csk->wr_credits != csk->wr_max_credits) {
+ chtls_purge_wr_queue(sk);
+ chtls_reset_wr_list(csk);
+ }
+
+ if (csk->l2t_entry) {
+ cxgb4_l2t_release(csk->l2t_entry);
+ csk->l2t_entry = NULL;
+ }
+
+ if (sk->sk_state != TCP_SYN_SENT) {
+ cxgb4_remove_tid(tids, csk->port_id, tid, sk->sk_family);
+ sock_put(sk);
+ }
+}
+
+static void chtls_conn_done(struct sock *sk)
+{
+ if (sock_flag(sk, SOCK_DEAD))
+ chtls_purge_receive_queue(sk);
+ sk_wakeup_sleepers(sk, 0);
+ tcp_done(sk);
+}
+
+static void do_abort_syn_rcv(struct sock *child, struct sock *parent)
+{
+ /*
+ * If the server is still open we clean up the child connection,
+ * otherwise the server already did the clean up as it was purging
+ * its SYN queue and the skb was just sitting in its backlog.
+ */
+ if (likely(parent->sk_state == TCP_LISTEN)) {
+ cleanup_syn_rcv_conn(child, parent);
+ /* Without the below call to sock_orphan,
+ * we leak the socket resource with syn_flood test
+ * as inet_csk_destroy_sock will not be called
+ * in tcp_done since SOCK_DEAD flag is not set.
+ * Kernel handles this differently where new socket is
+ * created only after 3 way handshake is done.
+ */
+ sock_orphan(child);
+ percpu_counter_inc((child)->sk_prot->orphan_count);
+ chtls_release_resources(child);
+ chtls_conn_done(child);
+ } else {
+ if (csk_flag(child, CSK_RST_ABORTED)) {
+ chtls_release_resources(child);
+ chtls_conn_done(child);
+ }
+ }
+}
+
+static void pass_open_abort(struct sock *child, struct sock *parent,
+ struct sk_buff *skb)
+{
+ do_abort_syn_rcv(child, parent);
+ kfree_skb(skb);
+}
+
+static void bl_pass_open_abort(struct sock *lsk, struct sk_buff *skb)
+{
+ pass_open_abort(skb->sk, lsk, skb);
+}
+
+static void chtls_pass_open_arp_failure(struct sock *sk,
+ struct sk_buff *skb)
+{
+ const struct request_sock *oreq;
+ struct chtls_sock *csk;
+ struct chtls_dev *cdev;
+ struct sock *parent;
+ void *data;
+
+ csk = rcu_dereference_sk_user_data(sk);
+ cdev = csk->cdev;
+
+ /*
+ * If the connection is being aborted due to the parent listening
+ * socket going away there's nothing to do, the ABORT_REQ will close
+ * the connection.
+ */
+ if (csk_flag(sk, CSK_ABORT_RPL_PENDING)) {
+ kfree_skb(skb);
+ return;
+ }
+
+ oreq = csk->passive_reap_next;
+ data = lookup_stid(cdev->tids, oreq->ts_recent);
+ parent = ((struct listen_ctx *)data)->lsk;
+
+ bh_lock_sock(parent);
+ if (!sock_owned_by_user(parent)) {
+ pass_open_abort(sk, parent, skb);
+ } else {
+ BLOG_SKB_CB(skb)->backlog_rcv = bl_pass_open_abort;
+ __sk_add_backlog(parent, skb);
+ }
+ bh_unlock_sock(parent);
+}
+
+static void chtls_accept_rpl_arp_failure(void *handle,
+ struct sk_buff *skb)
+{
+ struct sock *sk = (struct sock *)handle;
+
+ sock_hold(sk);
+ process_cpl_msg(chtls_pass_open_arp_failure, sk, skb);
+ sock_put(sk);
+}
+
+static unsigned int chtls_select_mss(const struct chtls_sock *csk,
+ unsigned int pmtu,
+ struct cpl_pass_accept_req *req)
+{
+ struct chtls_dev *cdev;
+ struct dst_entry *dst;
+ unsigned int tcpoptsz;
+ unsigned int iphdrsz;
+ unsigned int mtu_idx;
+ struct tcp_sock *tp;
+ unsigned int mss;
+ struct sock *sk;
+
+ mss = ntohs(req->tcpopt.mss);
+ sk = csk->sk;
+ dst = __sk_dst_get(sk);
+ cdev = csk->cdev;
+ tp = tcp_sk(sk);
+ tcpoptsz = 0;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (sk->sk_family == AF_INET6)
+ iphdrsz = sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
+ else
+#endif
+ iphdrsz = sizeof(struct iphdr) + sizeof(struct tcphdr);
+ if (req->tcpopt.tstamp)
+ tcpoptsz += round_up(TCPOLEN_TIMESTAMP, 4);
+
+ tp->advmss = dst_metric_advmss(dst);
+ if (USER_MSS(tp) && tp->advmss > USER_MSS(tp))
+ tp->advmss = USER_MSS(tp);
+ if (tp->advmss > pmtu - iphdrsz)
+ tp->advmss = pmtu - iphdrsz;
+ if (mss && tp->advmss > mss)
+ tp->advmss = mss;
+
+ tp->advmss = cxgb4_best_aligned_mtu(cdev->lldi->mtus,
+ iphdrsz + tcpoptsz,
+ tp->advmss - tcpoptsz,
+ 8, &mtu_idx);
+ tp->advmss -= iphdrsz;
+
+ inet_csk(sk)->icsk_pmtu_cookie = pmtu;
+ return mtu_idx;
+}
+
+static unsigned int select_rcv_wscale(int space, int wscale_ok, int win_clamp)
+{
+ int wscale = 0;
+
+ if (space > MAX_RCV_WND)
+ space = MAX_RCV_WND;
+ if (win_clamp && win_clamp < space)
+ space = win_clamp;
+
+ if (wscale_ok) {
+ while (wscale < 14 && (65535 << wscale) < space)
+ wscale++;
+ }
+ return wscale;
+}
+
+static void chtls_pass_accept_rpl(struct sk_buff *skb,
+ struct cpl_pass_accept_req *req,
+ unsigned int tid)
+
+{
+ struct cpl_t5_pass_accept_rpl *rpl5;
+ struct cxgb4_lld_info *lldi;
+ const struct tcphdr *tcph;
+ const struct tcp_sock *tp;
+ struct chtls_sock *csk;
+ unsigned int len;
+ struct sock *sk;
+ u32 opt2, hlen;
+ u64 opt0;
+
+ sk = skb->sk;
+ tp = tcp_sk(sk);
+ csk = sk->sk_user_data;
+ csk->tid = tid;
+ lldi = csk->cdev->lldi;
+ len = roundup(sizeof(*rpl5), 16);
+
+ rpl5 = __skb_put_zero(skb, len);
+ INIT_TP_WR(rpl5, tid);
+
+ OPCODE_TID(rpl5) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
+ csk->tid));
+ csk->mtu_idx = chtls_select_mss(csk, dst_mtu(__sk_dst_get(sk)),
+ req);
+ opt0 = TCAM_BYPASS_F |
+ WND_SCALE_V(RCV_WSCALE(tp)) |
+ MSS_IDX_V(csk->mtu_idx) |
+ L2T_IDX_V(csk->l2t_entry->idx) |
+ NAGLE_V(!(tp->nonagle & TCP_NAGLE_OFF)) |
+ TX_CHAN_V(csk->tx_chan) |
+ SMAC_SEL_V(csk->smac_idx) |
+ DSCP_V(csk->tos >> 2) |
+ ULP_MODE_V(ULP_MODE_TLS) |
+ RCV_BUFSIZ_V(min(tp->rcv_wnd >> 10, RCV_BUFSIZ_M));
+
+ opt2 = RX_CHANNEL_V(0) |
+ RSS_QUEUE_VALID_F | RSS_QUEUE_V(csk->rss_qid);
+
+ if (!is_t5(lldi->adapter_type))
+ opt2 |= RX_FC_DISABLE_F;
+ if (req->tcpopt.tstamp)
+ opt2 |= TSTAMPS_EN_F;
+ if (req->tcpopt.sack)
+ opt2 |= SACK_EN_F;
+ hlen = ntohl(req->hdr_len);
+
+ tcph = (struct tcphdr *)((u8 *)(req + 1) +
+ T6_ETH_HDR_LEN_G(hlen) + T6_IP_HDR_LEN_G(hlen));
+ if (tcph->ece && tcph->cwr)
+ opt2 |= CCTRL_ECN_V(1);
+ opt2 |= CONG_CNTRL_V(CONG_ALG_NEWRENO);
+ opt2 |= T5_ISS_F;
+ opt2 |= T5_OPT_2_VALID_F;
+ opt2 |= WND_SCALE_EN_V(WSCALE_OK(tp));
+ rpl5->opt0 = cpu_to_be64(opt0);
+ rpl5->opt2 = cpu_to_be32(opt2);
+ rpl5->iss = cpu_to_be32((prandom_u32() & ~7UL) - 1);
+ set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
+ t4_set_arp_err_handler(skb, sk, chtls_accept_rpl_arp_failure);
+ cxgb4_l2t_send(csk->egress_dev, skb, csk->l2t_entry);
+}
+
+static void inet_inherit_port(struct inet_hashinfo *hash_info,
+ struct sock *lsk, struct sock *newsk)
+{
+ local_bh_disable();
+ __inet_inherit_port(lsk, newsk);
+ local_bh_enable();
+}
+
+static int chtls_backlog_rcv(struct sock *sk, struct sk_buff *skb)
+{
+ if (skb->protocol) {
+ kfree_skb(skb);
+ return 0;
+ }
+ BLOG_SKB_CB(skb)->backlog_rcv(sk, skb);
+ return 0;
+}
+
+static void chtls_set_tcp_window(struct chtls_sock *csk)
+{
+ struct net_device *ndev = csk->egress_dev;
+ struct port_info *pi = netdev_priv(ndev);
+ unsigned int linkspeed;
+ u8 scale;
+
+ linkspeed = pi->link_cfg.speed;
+ scale = linkspeed / SPEED_10000;
+#define CHTLS_10G_RCVWIN (256 * 1024)
+ csk->rcv_win = CHTLS_10G_RCVWIN;
+ if (scale)
+ csk->rcv_win *= scale;
+#define CHTLS_10G_SNDWIN (256 * 1024)
+ csk->snd_win = CHTLS_10G_SNDWIN;
+ if (scale)
+ csk->snd_win *= scale;
+}
+
+static struct sock *chtls_recv_sock(struct sock *lsk,
+ struct request_sock *oreq,
+ void *network_hdr,
+ const struct cpl_pass_accept_req *req,
+ struct chtls_dev *cdev)
+{
+ struct neighbour *n = NULL;
+ struct inet_sock *newinet;
+ const struct iphdr *iph;
+ struct tls_context *ctx;
+ struct net_device *ndev;
+ struct chtls_sock *csk;
+ struct dst_entry *dst;
+ struct tcp_sock *tp;
+ struct sock *newsk;
+ u16 port_id;
+ int rxq_idx;
+ int step;
+
+ iph = (const struct iphdr *)network_hdr;
+ newsk = tcp_create_openreq_child(lsk, oreq, cdev->askb);
+ if (!newsk)
+ goto free_oreq;
+
+ if (lsk->sk_family == AF_INET) {
+ dst = inet_csk_route_child_sock(lsk, newsk, oreq);
+ if (!dst)
+ goto free_sk;
+
+ n = dst_neigh_lookup(dst, &iph->saddr);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ const struct ipv6hdr *ip6h;
+ struct flowi6 fl6;
+
+ ip6h = (const struct ipv6hdr *)network_hdr;
+ memset(&fl6, 0, sizeof(fl6));
+ fl6.flowi6_proto = IPPROTO_TCP;
+ fl6.saddr = ip6h->daddr;
+ fl6.daddr = ip6h->saddr;
+ fl6.fl6_dport = inet_rsk(oreq)->ir_rmt_port;
+ fl6.fl6_sport = htons(inet_rsk(oreq)->ir_num);
+ security_req_classify_flow(oreq, flowi6_to_flowi(&fl6));
+ dst = ip6_dst_lookup_flow(sock_net(lsk), lsk, &fl6, NULL);
+ if (IS_ERR(dst))
+ goto free_sk;
+ n = dst_neigh_lookup(dst, &ip6h->saddr);
+#endif
+ }
+ if (!n)
+ goto free_sk;
+
+ ndev = n->dev;
+ if (!ndev)
+ goto free_dst;
+ port_id = cxgb4_port_idx(ndev);
+
+ csk = chtls_sock_create(cdev);
+ if (!csk)
+ goto free_dst;
+
+ csk->l2t_entry = cxgb4_l2t_get(cdev->lldi->l2t, n, ndev, 0);
+ if (!csk->l2t_entry)
+ goto free_csk;
+
+ newsk->sk_user_data = csk;
+ newsk->sk_backlog_rcv = chtls_backlog_rcv;
+
+ tp = tcp_sk(newsk);
+ newinet = inet_sk(newsk);
+
+ if (iph->version == 0x4) {
+ newinet->inet_daddr = iph->saddr;
+ newinet->inet_rcv_saddr = iph->daddr;
+ newinet->inet_saddr = iph->daddr;
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ struct tcp6_sock *newtcp6sk = (struct tcp6_sock *)newsk;
+ struct inet_request_sock *treq = inet_rsk(oreq);
+ struct ipv6_pinfo *newnp = inet6_sk(newsk);
+ struct ipv6_pinfo *np = inet6_sk(lsk);
+
+ inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
+ memcpy(newnp, np, sizeof(struct ipv6_pinfo));
+ newsk->sk_v6_daddr = treq->ir_v6_rmt_addr;
+ newsk->sk_v6_rcv_saddr = treq->ir_v6_loc_addr;
+ inet6_sk(newsk)->saddr = treq->ir_v6_loc_addr;
+ newnp->ipv6_fl_list = NULL;
+ newnp->pktoptions = NULL;
+ newsk->sk_bound_dev_if = treq->ir_iif;
+ newinet->inet_opt = NULL;
+ newinet->inet_daddr = LOOPBACK4_IPV6;
+ newinet->inet_saddr = LOOPBACK4_IPV6;
+#endif
+ }
+
+ oreq->ts_recent = PASS_OPEN_TID_G(ntohl(req->tos_stid));
+ sk_setup_caps(newsk, dst);
+ ctx = tls_get_ctx(lsk);
+ newsk->sk_destruct = ctx->sk_destruct;
+ csk->sk = newsk;
+ csk->passive_reap_next = oreq;
+ csk->tx_chan = cxgb4_port_chan(ndev);
+ csk->port_id = port_id;
+ csk->egress_dev = ndev;
+ csk->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
+ chtls_set_tcp_window(csk);
+ tp->rcv_wnd = csk->rcv_win;
+ csk->sndbuf = csk->snd_win;
+ csk->ulp_mode = ULP_MODE_TLS;
+ step = cdev->lldi->nrxq / cdev->lldi->nchan;
+ csk->rss_qid = cdev->lldi->rxq_ids[port_id * step];
+ rxq_idx = port_id * step;
+ csk->txq_idx = (rxq_idx < cdev->lldi->ntxq) ? rxq_idx :
+ port_id * step;
+ csk->sndbuf = newsk->sk_sndbuf;
+ csk->smac_idx = ((struct port_info *)netdev_priv(ndev))->smt_idx;
+ RCV_WSCALE(tp) = select_rcv_wscale(tcp_full_space(newsk),
+ sock_net(newsk)->
+ ipv4.sysctl_tcp_window_scaling,
+ tp->window_clamp);
+ neigh_release(n);
+ inet_inherit_port(&tcp_hashinfo, lsk, newsk);
+ csk_set_flag(csk, CSK_CONN_INLINE);
+ bh_unlock_sock(newsk); /* tcp_create_openreq_child ->sk_clone_lock */
+
+ return newsk;
+free_csk:
+ chtls_sock_release(&csk->kref);
+free_dst:
+ dst_release(dst);
+free_sk:
+ inet_csk_prepare_forced_close(newsk);
+ tcp_done(newsk);
+free_oreq:
+ chtls_reqsk_free(oreq);
+ return NULL;
+}
+
+/*
+ * Populate a TID_RELEASE WR. The skb must be already propely sized.
+ */
+static void mk_tid_release(struct sk_buff *skb,
+ unsigned int chan, unsigned int tid)
+{
+ struct cpl_tid_release *req;
+ unsigned int len;
+
+ len = roundup(sizeof(struct cpl_tid_release), 16);
+ req = (struct cpl_tid_release *)__skb_put(skb, len);
+ memset(req, 0, len);
+ set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
+ INIT_TP_WR_CPL(req, CPL_TID_RELEASE, tid);
+}
+
+static int chtls_get_module(struct sock *sk)
+{
+ struct inet_connection_sock *icsk = inet_csk(sk);
+
+ if (!try_module_get(icsk->icsk_ulp_ops->owner))
+ return -1;
+
+ return 0;
+}
+
+static void chtls_pass_accept_request(struct sock *sk,
+ struct sk_buff *skb)
+{
+ struct cpl_t5_pass_accept_rpl *rpl;
+ struct cpl_pass_accept_req *req;
+ struct listen_ctx *listen_ctx;
+ struct vlan_ethhdr *vlan_eh;
+ struct request_sock *oreq;
+ struct sk_buff *reply_skb;
+ struct chtls_sock *csk;
+ struct chtls_dev *cdev;
+ struct ipv6hdr *ip6h;
+ struct tcphdr *tcph;
+ struct sock *newsk;
+ struct ethhdr *eh;
+ struct iphdr *iph;
+ void *network_hdr;
+ unsigned int stid;
+ unsigned int len;
+ unsigned int tid;
+ bool th_ecn, ect;
+ __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
+ u16 eth_hdr_len;
+ bool ecn_ok;
+
+ req = cplhdr(skb) + RSS_HDR;
+ tid = GET_TID(req);
+ cdev = BLOG_SKB_CB(skb)->cdev;
+ newsk = lookup_tid(cdev->tids, tid);
+ stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
+ if (newsk) {
+ pr_info("tid (%d) already in use\n", tid);
+ return;
+ }
+
+ len = roundup(sizeof(*rpl), 16);
+ reply_skb = alloc_skb(len, GFP_ATOMIC);
+ if (!reply_skb) {
+ cxgb4_remove_tid(cdev->tids, 0, tid, sk->sk_family);
+ kfree_skb(skb);
+ return;
+ }
+
+ if (sk->sk_state != TCP_LISTEN)
+ goto reject;
+
+ if (inet_csk_reqsk_queue_is_full(sk))
+ goto reject;
+
+ if (sk_acceptq_is_full(sk))
+ goto reject;
+
+
+ eth_hdr_len = T6_ETH_HDR_LEN_G(ntohl(req->hdr_len));
+ if (eth_hdr_len == ETH_HLEN) {
+ eh = (struct ethhdr *)(req + 1);
+ iph = (struct iphdr *)(eh + 1);
+ ip6h = (struct ipv6hdr *)(eh + 1);
+ network_hdr = (void *)(eh + 1);
+ } else {
+ vlan_eh = (struct vlan_ethhdr *)(req + 1);
+ iph = (struct iphdr *)(vlan_eh + 1);
+ ip6h = (struct ipv6hdr *)(vlan_eh + 1);
+ network_hdr = (void *)(vlan_eh + 1);
+ }
+
+ if (iph->version == 0x4) {
+ tcph = (struct tcphdr *)(iph + 1);
+ skb_set_network_header(skb, (void *)iph - (void *)req);
+ oreq = inet_reqsk_alloc(&chtls_rsk_ops, sk, true);
+ } else {
+ tcph = (struct tcphdr *)(ip6h + 1);
+ skb_set_network_header(skb, (void *)ip6h - (void *)req);
+ oreq = inet_reqsk_alloc(&chtls_rsk_opsv6, sk, false);
+ }
+
+ if (!oreq)
+ goto reject;
+
+ oreq->rsk_rcv_wnd = 0;
+ oreq->rsk_window_clamp = 0;
+ oreq->syncookie = 0;
+ oreq->mss = 0;
+ oreq->ts_recent = 0;
+
+ tcp_rsk(oreq)->tfo_listener = false;
+ tcp_rsk(oreq)->rcv_isn = ntohl(tcph->seq);
+ chtls_set_req_port(oreq, tcph->source, tcph->dest);
+ if (iph->version == 0x4) {
+ chtls_set_req_addr(oreq, iph->daddr, iph->saddr);
+ ip_dsfield = ipv4_get_dsfield(iph);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ inet_rsk(oreq)->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
+ inet_rsk(oreq)->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
+ ip_dsfield = ipv6_get_dsfield(ipv6_hdr(skb));
+#endif
+ }
+ if (req->tcpopt.wsf <= 14 &&
+ sock_net(sk)->ipv4.sysctl_tcp_window_scaling) {
+ inet_rsk(oreq)->wscale_ok = 1;
+ inet_rsk(oreq)->snd_wscale = req->tcpopt.wsf;
+ }
+ inet_rsk(oreq)->ir_iif = sk->sk_bound_dev_if;
+ th_ecn = tcph->ece && tcph->cwr;
+ if (th_ecn) {
+ ect = !INET_ECN_is_not_ect(ip_dsfield);
+ ecn_ok = sock_net(sk)->ipv4.sysctl_tcp_ecn;
+ if ((!ect && ecn_ok) || tcp_ca_needs_ecn(sk))
+ inet_rsk(oreq)->ecn_ok = 1;
+ }
+
+ newsk = chtls_recv_sock(sk, oreq, network_hdr, req, cdev);
+ if (!newsk)
+ goto free_oreq;
+
+ if (chtls_get_module(newsk))
+ goto reject;
+ inet_csk_reqsk_queue_added(sk);
+ reply_skb->sk = newsk;
+ chtls_install_cpl_ops(newsk);
+ cxgb4_insert_tid(cdev->tids, newsk, tid, newsk->sk_family);
+ csk = rcu_dereference_sk_user_data(newsk);
+ listen_ctx = (struct listen_ctx *)lookup_stid(cdev->tids, stid);
+ csk->listen_ctx = listen_ctx;
+ __skb_queue_tail(&listen_ctx->synq, (struct sk_buff *)&csk->synq);
+ chtls_pass_accept_rpl(reply_skb, req, tid);
+ kfree_skb(skb);
+ return;
+
+free_oreq:
+ chtls_reqsk_free(oreq);
+reject:
+ mk_tid_release(reply_skb, 0, tid);
+ cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
+ kfree_skb(skb);
+}
+
+/*
+ * Handle a CPL_PASS_ACCEPT_REQ message.
+ */
+static int chtls_pass_accept_req(struct chtls_dev *cdev, struct sk_buff *skb)
+{
+ struct cpl_pass_accept_req *req = cplhdr(skb) + RSS_HDR;
+ struct listen_ctx *ctx;
+ unsigned int stid;
+ unsigned int tid;
+ struct sock *lsk;
+ void *data;
+
+ stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
+ tid = GET_TID(req);
+
+ data = lookup_stid(cdev->tids, stid);
+ if (!data)
+ return 1;
+
+ ctx = (struct listen_ctx *)data;
+ lsk = ctx->lsk;
+
+ if (unlikely(tid_out_of_range(cdev->tids, tid))) {
+ pr_info("passive open TID %u too large\n", tid);
+ return 1;
+ }
+
+ BLOG_SKB_CB(skb)->cdev = cdev;
+ process_cpl_msg(chtls_pass_accept_request, lsk, skb);
+ return 0;
+}
+
+/*
+ * Completes some final bits of initialization for just established connections
+ * and changes their state to TCP_ESTABLISHED.
+ *
+ * snd_isn here is the ISN after the SYN, i.e., the true ISN + 1.
+ */
+static void make_established(struct sock *sk, u32 snd_isn, unsigned int opt)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ tp->pushed_seq = snd_isn;
+ tp->write_seq = snd_isn;
+ tp->snd_nxt = snd_isn;
+ tp->snd_una = snd_isn;
+ inet_sk(sk)->inet_id = prandom_u32();
+ assign_rxopt(sk, opt);
+
+ if (tp->rcv_wnd > (RCV_BUFSIZ_M << 10))
+ tp->rcv_wup -= tp->rcv_wnd - (RCV_BUFSIZ_M << 10);
+
+ smp_mb();
+ tcp_set_state(sk, TCP_ESTABLISHED);
+}
+
+static void chtls_abort_conn(struct sock *sk, struct sk_buff *skb)
+{
+ struct sk_buff *abort_skb;
+
+ abort_skb = alloc_skb(sizeof(struct cpl_abort_req), GFP_ATOMIC);
+ if (abort_skb)
+ chtls_send_reset(sk, CPL_ABORT_SEND_RST, abort_skb);
+}
+
+static struct sock *reap_list;
+static DEFINE_SPINLOCK(reap_list_lock);
+
+/*
+ * Process the reap list.
+ */
+DECLARE_TASK_FUNC(process_reap_list, task_param)
+{
+ spin_lock_bh(&reap_list_lock);
+ while (reap_list) {
+ struct sock *sk = reap_list;
+ struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+
+ reap_list = csk->passive_reap_next;
+ csk->passive_reap_next = NULL;
+ spin_unlock(&reap_list_lock);
+ sock_hold(sk);
+
+ bh_lock_sock(sk);
+ chtls_abort_conn(sk, NULL);
+ sock_orphan(sk);
+ if (sk->sk_state == TCP_CLOSE)
+ inet_csk_destroy_sock(sk);
+ bh_unlock_sock(sk);
+ sock_put(sk);
+ spin_lock(&reap_list_lock);
+ }
+ spin_unlock_bh(&reap_list_lock);
+}
+
+static DECLARE_WORK(reap_task, process_reap_list);
+
+static void add_to_reap_list(struct sock *sk)
+{
+ struct chtls_sock *csk = sk->sk_user_data;
+
+ local_bh_disable();
+ bh_lock_sock(sk);
+ release_tcp_port(sk); /* release the port immediately */
+
+ spin_lock(&reap_list_lock);
+ csk->passive_reap_next = reap_list;
+ reap_list = sk;
+ if (!csk->passive_reap_next)
+ schedule_work(&reap_task);
+ spin_unlock(&reap_list_lock);
+ bh_unlock_sock(sk);
+ local_bh_enable();
+}
+
+static void add_pass_open_to_parent(struct sock *child, struct sock *lsk,
+ struct chtls_dev *cdev)
+{
+ struct request_sock *oreq;
+ struct chtls_sock *csk;
+
+ if (lsk->sk_state != TCP_LISTEN)
+ return;
+
+ csk = child->sk_user_data;
+ oreq = csk->passive_reap_next;
+ csk->passive_reap_next = NULL;
+
+ reqsk_queue_removed(&inet_csk(lsk)->icsk_accept_queue, oreq);
+ __skb_unlink((struct sk_buff *)&csk->synq, &csk->listen_ctx->synq);
+
+ if (sk_acceptq_is_full(lsk)) {
+ chtls_reqsk_free(oreq);
+ add_to_reap_list(child);
+ } else {
+ refcount_set(&oreq->rsk_refcnt, 1);
+ inet_csk_reqsk_queue_add(lsk, oreq, child);
+ lsk->sk_data_ready(lsk);
+ }
+}
+
+static void bl_add_pass_open_to_parent(struct sock *lsk, struct sk_buff *skb)
+{
+ struct sock *child = skb->sk;
+
+ skb->sk = NULL;
+ add_pass_open_to_parent(child, lsk, BLOG_SKB_CB(skb)->cdev);
+ kfree_skb(skb);
+}
+
+static int chtls_pass_establish(struct chtls_dev *cdev, struct sk_buff *skb)
+{
+ struct cpl_pass_establish *req = cplhdr(skb) + RSS_HDR;
+ struct chtls_sock *csk;
+ struct sock *lsk, *sk;
+ unsigned int hwtid;
+
+ hwtid = GET_TID(req);
+ sk = lookup_tid(cdev->tids, hwtid);
+ if (!sk)
+ return (CPL_RET_UNKNOWN_TID | CPL_RET_BUF_DONE);
+
+ bh_lock_sock(sk);
+ if (unlikely(sock_owned_by_user(sk))) {
+ kfree_skb(skb);
+ } else {
+ unsigned int stid;
+ void *data;
+
+ csk = sk->sk_user_data;
+ csk->wr_max_credits = 64;
+ csk->wr_credits = 64;
+ csk->wr_unacked = 0;
+ make_established(sk, ntohl(req->snd_isn), ntohs(req->tcp_opt));
+ stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
+ sk->sk_state_change(sk);
+ if (unlikely(sk->sk_socket))
+ sk_wake_async(sk, 0, POLL_OUT);
+
+ data = lookup_stid(cdev->tids, stid);
+ lsk = ((struct listen_ctx *)data)->lsk;
+
+ bh_lock_sock(lsk);
+ if (unlikely(skb_queue_empty(&csk->listen_ctx->synq))) {
+ /* removed from synq */
+ bh_unlock_sock(lsk);
+ kfree_skb(skb);
+ goto unlock;
+ }
+
+ if (likely(!sock_owned_by_user(lsk))) {
+ kfree_skb(skb);
+ add_pass_open_to_parent(sk, lsk, cdev);
+ } else {
+ skb->sk = sk;
+ BLOG_SKB_CB(skb)->cdev = cdev;
+ BLOG_SKB_CB(skb)->backlog_rcv =
+ bl_add_pass_open_to_parent;
+ __sk_add_backlog(lsk, skb);
+ }
+ bh_unlock_sock(lsk);
+ }
+unlock:
+ bh_unlock_sock(sk);
+ return 0;
+}
+
+/*
+ * Handle receipt of an urgent pointer.
+ */
+static void handle_urg_ptr(struct sock *sk, u32 urg_seq)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ urg_seq--;
+ if (tp->urg_data && !after(urg_seq, tp->urg_seq))
+ return; /* duplicate pointer */
+
+ sk_send_sigurg(sk);
+ if (tp->urg_seq == tp->copied_seq && tp->urg_data &&
+ !sock_flag(sk, SOCK_URGINLINE) &&
+ tp->copied_seq != tp->rcv_nxt) {
+ struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
+
+ tp->copied_seq++;
+ if (skb && tp->copied_seq - ULP_SKB_CB(skb)->seq >= skb->len)
+ chtls_free_skb(sk, skb);
+ }
+
+ tp->urg_data = TCP_URG_NOTYET;
+ tp->urg_seq = urg_seq;
+}
+
+static void check_sk_callbacks(struct chtls_sock *csk)
+{
+ struct sock *sk = csk->sk;
+
+ if (unlikely(sk->sk_user_data &&
+ !csk_flag_nochk(csk, CSK_CALLBACKS_CHKD)))
+ csk_set_flag(csk, CSK_CALLBACKS_CHKD);
+}
+
+/*
+ * Handles Rx data that arrives in a state where the socket isn't accepting
+ * new data.
+ */
+static void handle_excess_rx(struct sock *sk, struct sk_buff *skb)
+{
+ if (!csk_flag(sk, CSK_ABORT_SHUTDOWN))
+ chtls_abort_conn(sk, skb);
+
+ kfree_skb(skb);
+}
+
+static void chtls_recv_data(struct sock *sk, struct sk_buff *skb)
+{
+ struct cpl_rx_data *hdr = cplhdr(skb) + RSS_HDR;
+ struct chtls_sock *csk;
+ struct tcp_sock *tp;
+
+ csk = rcu_dereference_sk_user_data(sk);
+ tp = tcp_sk(sk);
+
+ if (unlikely(sk->sk_shutdown & RCV_SHUTDOWN)) {
+ handle_excess_rx(sk, skb);
+ return;
+ }
+
+ ULP_SKB_CB(skb)->seq = ntohl(hdr->seq);
+ ULP_SKB_CB(skb)->psh = hdr->psh;
+ skb_ulp_mode(skb) = ULP_MODE_NONE;
+
+ skb_reset_transport_header(skb);
+ __skb_pull(skb, sizeof(*hdr) + RSS_HDR);
+ if (!skb->data_len)
+ __skb_trim(skb, ntohs(hdr->len));
+
+ if (unlikely(hdr->urg))
+ handle_urg_ptr(sk, tp->rcv_nxt + ntohs(hdr->urg));
+ if (unlikely(tp->urg_data == TCP_URG_NOTYET &&
+ tp->urg_seq - tp->rcv_nxt < skb->len))
+ tp->urg_data = TCP_URG_VALID |
+ skb->data[tp->urg_seq - tp->rcv_nxt];
+
+ if (unlikely(hdr->dack_mode != csk->delack_mode)) {
+ csk->delack_mode = hdr->dack_mode;
+ csk->delack_seq = tp->rcv_nxt;
+ }
+
+ tcp_hdr(skb)->fin = 0;
+ tp->rcv_nxt += skb->len;
+
+ __skb_queue_tail(&sk->sk_receive_queue, skb);
+
+ if (!sock_flag(sk, SOCK_DEAD)) {
+ check_sk_callbacks(csk);
+ sk->sk_data_ready(sk);
+ }
+}
+
+static int chtls_rx_data(struct chtls_dev *cdev, struct sk_buff *skb)
+{
+ struct cpl_rx_data *req = cplhdr(skb) + RSS_HDR;
+ unsigned int hwtid = GET_TID(req);
+ struct sock *sk;
+
+ sk = lookup_tid(cdev->tids, hwtid);
+ if (unlikely(!sk)) {
+ pr_err("can't find conn. for hwtid %u.\n", hwtid);
+ return -EINVAL;
+ }
+ skb_dst_set(skb, NULL);
+ process_cpl_msg(chtls_recv_data, sk, skb);
+ return 0;
+}
+
+static void chtls_recv_pdu(struct sock *sk, struct sk_buff *skb)
+{
+ struct cpl_tls_data *hdr = cplhdr(skb);
+ struct chtls_sock *csk;
+ struct chtls_hws *tlsk;
+ struct tcp_sock *tp;
+
+ csk = rcu_dereference_sk_user_data(sk);
+ tlsk = &csk->tlshws;
+ tp = tcp_sk(sk);
+
+ if (unlikely(sk->sk_shutdown & RCV_SHUTDOWN)) {
+ handle_excess_rx(sk, skb);
+ return;
+ }
+
+ ULP_SKB_CB(skb)->seq = ntohl(hdr->seq);
+ ULP_SKB_CB(skb)->flags = 0;
+ skb_ulp_mode(skb) = ULP_MODE_TLS;
+
+ skb_reset_transport_header(skb);
+ __skb_pull(skb, sizeof(*hdr));
+ if (!skb->data_len)
+ __skb_trim(skb,
+ CPL_TLS_DATA_LENGTH_G(ntohl(hdr->length_pkd)));
+
+ if (unlikely(tp->urg_data == TCP_URG_NOTYET && tp->urg_seq -
+ tp->rcv_nxt < skb->len))
+ tp->urg_data = TCP_URG_VALID |
+ skb->data[tp->urg_seq - tp->rcv_nxt];
+
+ tcp_hdr(skb)->fin = 0;
+ tlsk->pldlen = CPL_TLS_DATA_LENGTH_G(ntohl(hdr->length_pkd));
+ __skb_queue_tail(&tlsk->sk_recv_queue, skb);
+}
+
+static int chtls_rx_pdu(struct chtls_dev *cdev, struct sk_buff *skb)
+{
+ struct cpl_tls_data *req = cplhdr(skb);
+ unsigned int hwtid = GET_TID(req);
+ struct sock *sk;
+
+ sk = lookup_tid(cdev->tids, hwtid);
+ if (unlikely(!sk)) {
+ pr_err("can't find conn. for hwtid %u.\n", hwtid);
+ return -EINVAL;
+ }
+ skb_dst_set(skb, NULL);
+ process_cpl_msg(chtls_recv_pdu, sk, skb);
+ return 0;
+}
+
+static void chtls_set_hdrlen(struct sk_buff *skb, unsigned int nlen)
+{
+ struct tlsrx_cmp_hdr *tls_cmp_hdr = cplhdr(skb);
+
+ skb->hdr_len = ntohs((__force __be16)tls_cmp_hdr->length);
+ tls_cmp_hdr->length = ntohs((__force __be16)nlen);
+}
+
+static void chtls_rx_hdr(struct sock *sk, struct sk_buff *skb)
+{
+ struct tlsrx_cmp_hdr *tls_hdr_pkt;
+ struct cpl_rx_tls_cmp *cmp_cpl;
+ struct sk_buff *skb_rec;
+ struct chtls_sock *csk;
+ struct chtls_hws *tlsk;
+ struct tcp_sock *tp;
+
+ cmp_cpl = cplhdr(skb);
+ csk = rcu_dereference_sk_user_data(sk);
+ tlsk = &csk->tlshws;
+ tp = tcp_sk(sk);
+
+ ULP_SKB_CB(skb)->seq = ntohl(cmp_cpl->seq);
+ ULP_SKB_CB(skb)->flags = 0;
+
+ skb_reset_transport_header(skb);
+ __skb_pull(skb, sizeof(*cmp_cpl));
+ tls_hdr_pkt = (struct tlsrx_cmp_hdr *)skb->data;
+ if (tls_hdr_pkt->res_to_mac_error & TLSRX_HDR_PKT_ERROR_M)
+ tls_hdr_pkt->type = CONTENT_TYPE_ERROR;
+ if (!skb->data_len)
+ __skb_trim(skb, TLS_HEADER_LENGTH);
+
+ tp->rcv_nxt +=
+ CPL_RX_TLS_CMP_PDULENGTH_G(ntohl(cmp_cpl->pdulength_length));
+
+ ULP_SKB_CB(skb)->flags |= ULPCB_FLAG_TLS_HDR;
+ skb_rec = __skb_dequeue(&tlsk->sk_recv_queue);
+ if (!skb_rec) {
+ __skb_queue_tail(&sk->sk_receive_queue, skb);
+ } else {
+ chtls_set_hdrlen(skb, tlsk->pldlen);
+ tlsk->pldlen = 0;
+ __skb_queue_tail(&sk->sk_receive_queue, skb);
+ __skb_queue_tail(&sk->sk_receive_queue, skb_rec);
+ }
+
+ if (!sock_flag(sk, SOCK_DEAD)) {
+ check_sk_callbacks(csk);
+ sk->sk_data_ready(sk);
+ }
+}
+
+static int chtls_rx_cmp(struct chtls_dev *cdev, struct sk_buff *skb)
+{
+ struct cpl_rx_tls_cmp *req = cplhdr(skb);
+ unsigned int hwtid = GET_TID(req);
+ struct sock *sk;
+
+ sk = lookup_tid(cdev->tids, hwtid);
+ if (unlikely(!sk)) {
+ pr_err("can't find conn. for hwtid %u.\n", hwtid);
+ return -EINVAL;
+ }
+ skb_dst_set(skb, NULL);
+ process_cpl_msg(chtls_rx_hdr, sk, skb);
+
+ return 0;
+}
+
+static void chtls_timewait(struct sock *sk)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ tp->rcv_nxt++;
+ tp->rx_opt.ts_recent_stamp = ktime_get_seconds();
+ tp->srtt_us = 0;
+ tcp_time_wait(sk, TCP_TIME_WAIT, 0);
+}
+
+static void chtls_peer_close(struct sock *sk, struct sk_buff *skb)
+{
+ struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+
+ if (csk_flag_nochk(csk, CSK_ABORT_RPL_PENDING))
+ goto out;
+
+ sk->sk_shutdown |= RCV_SHUTDOWN;
+ sock_set_flag(sk, SOCK_DONE);
+
+ switch (sk->sk_state) {
+ case TCP_SYN_RECV:
+ case TCP_ESTABLISHED:
+ tcp_set_state(sk, TCP_CLOSE_WAIT);
+ break;
+ case TCP_FIN_WAIT1:
+ tcp_set_state(sk, TCP_CLOSING);
+ break;
+ case TCP_FIN_WAIT2:
+ chtls_release_resources(sk);
+ if (csk_flag_nochk(csk, CSK_ABORT_RPL_PENDING))
+ chtls_conn_done(sk);
+ else
+ chtls_timewait(sk);
+ break;
+ default:
+ pr_info("cpl_peer_close in bad state %d\n", sk->sk_state);
+ }
+
+ if (!sock_flag(sk, SOCK_DEAD)) {
+ sk->sk_state_change(sk);
+ /* Do not send POLL_HUP for half duplex close. */
+
+ if ((sk->sk_shutdown & SEND_SHUTDOWN) ||
+ sk->sk_state == TCP_CLOSE)
+ sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
+ else
+ sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
+ }
+out:
+ kfree_skb(skb);
+}
+
+static void chtls_close_con_rpl(struct sock *sk, struct sk_buff *skb)
+{
+ struct cpl_close_con_rpl *rpl = cplhdr(skb) + RSS_HDR;
+ struct chtls_sock *csk;
+ struct tcp_sock *tp;
+
+ csk = rcu_dereference_sk_user_data(sk);
+
+ if (csk_flag_nochk(csk, CSK_ABORT_RPL_PENDING))
+ goto out;
+
+ tp = tcp_sk(sk);
+
+ tp->snd_una = ntohl(rpl->snd_nxt) - 1; /* exclude FIN */
+
+ switch (sk->sk_state) {
+ case TCP_CLOSING:
+ chtls_release_resources(sk);
+ if (csk_flag_nochk(csk, CSK_ABORT_RPL_PENDING))
+ chtls_conn_done(sk);
+ else
+ chtls_timewait(sk);
+ break;
+ case TCP_LAST_ACK:
+ chtls_release_resources(sk);
+ chtls_conn_done(sk);
+ break;
+ case TCP_FIN_WAIT1:
+ tcp_set_state(sk, TCP_FIN_WAIT2);
+ sk->sk_shutdown |= SEND_SHUTDOWN;
+
+ if (!sock_flag(sk, SOCK_DEAD))
+ sk->sk_state_change(sk);
+ else if (tcp_sk(sk)->linger2 < 0 &&
+ !csk_flag_nochk(csk, CSK_ABORT_SHUTDOWN))
+ chtls_abort_conn(sk, skb);
+ break;
+ default:
+ pr_info("close_con_rpl in bad state %d\n", sk->sk_state);
+ }
+out:
+ kfree_skb(skb);
+}
+
+static struct sk_buff *get_cpl_skb(struct sk_buff *skb,
+ size_t len, gfp_t gfp)
+{
+ if (likely(!skb_is_nonlinear(skb) && !skb_cloned(skb))) {
+ WARN_ONCE(skb->len < len, "skb alloc error");
+ __skb_trim(skb, len);
+ skb_get(skb);
+ } else {
+ skb = alloc_skb(len, gfp);
+ if (skb)
+ __skb_put(skb, len);
+ }
+ return skb;
+}
+
+static void set_abort_rpl_wr(struct sk_buff *skb, unsigned int tid,
+ int cmd)
+{
+ struct cpl_abort_rpl *rpl = cplhdr(skb);
+
+ INIT_TP_WR_CPL(rpl, CPL_ABORT_RPL, tid);
+ rpl->cmd = cmd;
+}
+
+static void send_defer_abort_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
+{
+ struct cpl_abort_req_rss *req = cplhdr(skb);
+ struct sk_buff *reply_skb;
+
+ reply_skb = alloc_skb(sizeof(struct cpl_abort_rpl),
+ GFP_KERNEL | __GFP_NOFAIL);
+ __skb_put(reply_skb, sizeof(struct cpl_abort_rpl));
+ set_abort_rpl_wr(reply_skb, GET_TID(req),
+ (req->status & CPL_ABORT_NO_RST));
+ set_wr_txq(reply_skb, CPL_PRIORITY_DATA, req->status >> 1);
+ cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
+ kfree_skb(skb);
+}
+
+/*
+ * Add an skb to the deferred skb queue for processing from process context.
+ */
+static void t4_defer_reply(struct sk_buff *skb, struct chtls_dev *cdev,
+ defer_handler_t handler)
+{
+ DEFERRED_SKB_CB(skb)->handler = handler;
+ spin_lock_bh(&cdev->deferq.lock);
+ __skb_queue_tail(&cdev->deferq, skb);
+ if (skb_queue_len(&cdev->deferq) == 1)
+ schedule_work(&cdev->deferq_task);
+ spin_unlock_bh(&cdev->deferq.lock);
+}
+
+static void send_abort_rpl(struct sock *sk, struct sk_buff *skb,
+ struct chtls_dev *cdev, int status, int queue)
+{
+ struct cpl_abort_req_rss *req = cplhdr(skb);
+ struct sk_buff *reply_skb;
+ struct chtls_sock *csk;
+
+ csk = rcu_dereference_sk_user_data(sk);
+
+ reply_skb = alloc_skb(sizeof(struct cpl_abort_rpl),
+ GFP_KERNEL);
+
+ if (!reply_skb) {
+ req->status = (queue << 1);
+ t4_defer_reply(skb, cdev, send_defer_abort_rpl);
+ return;
+ }
+
+ set_abort_rpl_wr(reply_skb, GET_TID(req), status);
+ kfree_skb(skb);
+
+ set_wr_txq(reply_skb, CPL_PRIORITY_DATA, queue);
+ if (csk_conn_inline(csk)) {
+ struct l2t_entry *e = csk->l2t_entry;
+
+ if (e && sk->sk_state != TCP_SYN_RECV) {
+ cxgb4_l2t_send(csk->egress_dev, reply_skb, e);
+ return;
+ }
+ }
+ cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
+}
+
+static void chtls_send_abort_rpl(struct sock *sk, struct sk_buff *skb,
+ struct chtls_dev *cdev,
+ int status, int queue)
+{
+ struct cpl_abort_req_rss *req = cplhdr(skb) + RSS_HDR;
+ struct sk_buff *reply_skb;
+ struct chtls_sock *csk;
+ unsigned int tid;
+
+ csk = rcu_dereference_sk_user_data(sk);
+ tid = GET_TID(req);
+
+ reply_skb = get_cpl_skb(skb, sizeof(struct cpl_abort_rpl), gfp_any());
+ if (!reply_skb) {
+ req->status = (queue << 1) | status;
+ t4_defer_reply(skb, cdev, send_defer_abort_rpl);
+ return;
+ }
+
+ set_abort_rpl_wr(reply_skb, tid, status);
+ kfree_skb(skb);
+ set_wr_txq(reply_skb, CPL_PRIORITY_DATA, queue);
+ if (csk_conn_inline(csk)) {
+ struct l2t_entry *e = csk->l2t_entry;
+
+ if (e && sk->sk_state != TCP_SYN_RECV) {
+ cxgb4_l2t_send(csk->egress_dev, reply_skb, e);
+ return;
+ }
+ }
+ cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
+}
+
+/*
+ * This is run from a listener's backlog to abort a child connection in
+ * SYN_RCV state (i.e., one on the listener's SYN queue).
+ */
+static void bl_abort_syn_rcv(struct sock *lsk, struct sk_buff *skb)
+{
+ struct chtls_sock *csk;
+ struct sock *child;
+ int queue;
+
+ child = skb->sk;
+ csk = rcu_dereference_sk_user_data(child);
+ queue = csk->txq_idx;
+
+ skb->sk = NULL;
+ do_abort_syn_rcv(child, lsk);
+ send_abort_rpl(child, skb, BLOG_SKB_CB(skb)->cdev,
+ CPL_ABORT_NO_RST, queue);
+}
+
+static int abort_syn_rcv(struct sock *sk, struct sk_buff *skb)
+{
+ const struct request_sock *oreq;
+ struct listen_ctx *listen_ctx;
+ struct chtls_sock *csk;
+ struct chtls_dev *cdev;
+ struct sock *psk;
+ void *ctx;
+
+ csk = sk->sk_user_data;
+ oreq = csk->passive_reap_next;
+ cdev = csk->cdev;
+
+ if (!oreq)
+ return -1;
+
+ ctx = lookup_stid(cdev->tids, oreq->ts_recent);
+ if (!ctx)
+ return -1;
+
+ listen_ctx = (struct listen_ctx *)ctx;
+ psk = listen_ctx->lsk;
+
+ bh_lock_sock(psk);
+ if (!sock_owned_by_user(psk)) {
+ int queue = csk->txq_idx;
+
+ do_abort_syn_rcv(sk, psk);
+ send_abort_rpl(sk, skb, cdev, CPL_ABORT_NO_RST, queue);
+ } else {
+ skb->sk = sk;
+ BLOG_SKB_CB(skb)->backlog_rcv = bl_abort_syn_rcv;
+ __sk_add_backlog(psk, skb);
+ }
+ bh_unlock_sock(psk);
+ return 0;
+}
+
+static void chtls_abort_req_rss(struct sock *sk, struct sk_buff *skb)
+{
+ const struct cpl_abort_req_rss *req = cplhdr(skb) + RSS_HDR;
+ struct chtls_sock *csk = sk->sk_user_data;
+ int rst_status = CPL_ABORT_NO_RST;
+ int queue = csk->txq_idx;
+
+ if (is_neg_adv(req->status)) {
+ if (sk->sk_state == TCP_SYN_RECV)
+ chtls_set_tcb_tflag(sk, 0, 0);
+
+ kfree_skb(skb);
+ return;
+ }
+
+ csk_reset_flag(csk, CSK_ABORT_REQ_RCVD);
+
+ if (!csk_flag_nochk(csk, CSK_ABORT_SHUTDOWN) &&
+ !csk_flag_nochk(csk, CSK_TX_DATA_SENT)) {
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if (send_tx_flowc_wr(sk, 0, tp->snd_nxt, tp->rcv_nxt) < 0)
+ WARN_ONCE(1, "send_tx_flowc error");
+ csk_set_flag(csk, CSK_TX_DATA_SENT);
+ }
+
+ csk_set_flag(csk, CSK_ABORT_SHUTDOWN);
+
+ if (!csk_flag_nochk(csk, CSK_ABORT_RPL_PENDING)) {
+ sk->sk_err = ETIMEDOUT;
+
+ if (!sock_flag(sk, SOCK_DEAD))
+ sk->sk_error_report(sk);
+
+ if (sk->sk_state == TCP_SYN_RECV && !abort_syn_rcv(sk, skb))
+ return;
+
+ chtls_release_resources(sk);
+ chtls_conn_done(sk);
+ }
+
+ chtls_send_abort_rpl(sk, skb, BLOG_SKB_CB(skb)->cdev,
+ rst_status, queue);
+}
+
+static void chtls_abort_rpl_rss(struct sock *sk, struct sk_buff *skb)
+{
+ struct cpl_abort_rpl_rss *rpl = cplhdr(skb) + RSS_HDR;
+ struct chtls_sock *csk;
+ struct chtls_dev *cdev;
+
+ csk = rcu_dereference_sk_user_data(sk);
+ cdev = csk->cdev;
+
+ if (csk_flag_nochk(csk, CSK_ABORT_RPL_PENDING)) {
+ csk_reset_flag(csk, CSK_ABORT_RPL_PENDING);
+ if (!csk_flag_nochk(csk, CSK_ABORT_REQ_RCVD)) {
+ if (sk->sk_state == TCP_SYN_SENT) {
+ cxgb4_remove_tid(cdev->tids,
+ csk->port_id,
+ GET_TID(rpl),
+ sk->sk_family);
+ sock_put(sk);
+ }
+ chtls_release_resources(sk);
+ chtls_conn_done(sk);
+ }
+ }
+ kfree_skb(skb);
+}
+
+static int chtls_conn_cpl(struct chtls_dev *cdev, struct sk_buff *skb)
+{
+ struct cpl_peer_close *req = cplhdr(skb) + RSS_HDR;
+ void (*fn)(struct sock *sk, struct sk_buff *skb);
+ unsigned int hwtid = GET_TID(req);
+ struct chtls_sock *csk;
+ struct sock *sk;
+ u8 opcode;
+
+ opcode = ((const struct rss_header *)cplhdr(skb))->opcode;
+
+ sk = lookup_tid(cdev->tids, hwtid);
+ if (!sk)
+ goto rel_skb;
+
+ csk = sk->sk_user_data;
+
+ switch (opcode) {
+ case CPL_PEER_CLOSE:
+ fn = chtls_peer_close;
+ break;
+ case CPL_CLOSE_CON_RPL:
+ fn = chtls_close_con_rpl;
+ break;
+ case CPL_ABORT_REQ_RSS:
+ /*
+ * Save the offload device in the skb, we may process this
+ * message after the socket has closed.
+ */
+ BLOG_SKB_CB(skb)->cdev = csk->cdev;
+ fn = chtls_abort_req_rss;
+ break;
+ case CPL_ABORT_RPL_RSS:
+ fn = chtls_abort_rpl_rss;
+ break;
+ default:
+ goto rel_skb;
+ }
+
+ process_cpl_msg(fn, sk, skb);
+ return 0;
+
+rel_skb:
+ kfree_skb(skb);
+ return 0;
+}
+
+static void chtls_rx_ack(struct sock *sk, struct sk_buff *skb)
+{
+ struct cpl_fw4_ack *hdr = cplhdr(skb) + RSS_HDR;
+ struct chtls_sock *csk = sk->sk_user_data;
+ struct tcp_sock *tp = tcp_sk(sk);
+ u32 credits = hdr->credits;
+ u32 snd_una;
+
+ snd_una = ntohl(hdr->snd_una);
+ csk->wr_credits += credits;
+
+ if (csk->wr_unacked > csk->wr_max_credits - csk->wr_credits)
+ csk->wr_unacked = csk->wr_max_credits - csk->wr_credits;
+
+ while (credits) {
+ struct sk_buff *pskb = csk->wr_skb_head;
+ u32 csum;
+
+ if (unlikely(!pskb)) {
+ if (csk->wr_nondata)
+ csk->wr_nondata -= credits;
+ break;
+ }
+ csum = (__force u32)pskb->csum;
+ if (unlikely(credits < csum)) {
+ pskb->csum = (__force __wsum)(csum - credits);
+ break;
+ }
+ dequeue_wr(sk);
+ credits -= csum;
+ kfree_skb(pskb);
+ }
+ if (hdr->seq_vld & CPL_FW4_ACK_FLAGS_SEQVAL) {
+ if (unlikely(before(snd_una, tp->snd_una))) {
+ kfree_skb(skb);
+ return;
+ }
+
+ if (tp->snd_una != snd_una) {
+ tp->snd_una = snd_una;
+ tp->rcv_tstamp = tcp_time_stamp(tp);
+ if (tp->snd_una == tp->snd_nxt &&
+ !csk_flag_nochk(csk, CSK_TX_FAILOVER))
+ csk_reset_flag(csk, CSK_TX_WAIT_IDLE);
+ }
+ }
+
+ if (hdr->seq_vld & CPL_FW4_ACK_FLAGS_CH) {
+ unsigned int fclen16 = roundup(failover_flowc_wr_len, 16);
+
+ csk->wr_credits -= fclen16;
+ csk_reset_flag(csk, CSK_TX_WAIT_IDLE);
+ csk_reset_flag(csk, CSK_TX_FAILOVER);
+ }
+ if (skb_queue_len(&csk->txq) && chtls_push_frames(csk, 0))
+ sk->sk_write_space(sk);
+
+ kfree_skb(skb);
+}
+
+static int chtls_wr_ack(struct chtls_dev *cdev, struct sk_buff *skb)
+{
+ struct cpl_fw4_ack *rpl = cplhdr(skb) + RSS_HDR;
+ unsigned int hwtid = GET_TID(rpl);
+ struct sock *sk;
+
+ sk = lookup_tid(cdev->tids, hwtid);
+ if (unlikely(!sk)) {
+ pr_err("can't find conn. for hwtid %u.\n", hwtid);
+ return -EINVAL;
+ }
+ process_cpl_msg(chtls_rx_ack, sk, skb);
+
+ return 0;
+}
+
+chtls_handler_func chtls_handlers[NUM_CPL_CMDS] = {
+ [CPL_PASS_OPEN_RPL] = chtls_pass_open_rpl,
+ [CPL_CLOSE_LISTSRV_RPL] = chtls_close_listsrv_rpl,
+ [CPL_PASS_ACCEPT_REQ] = chtls_pass_accept_req,
+ [CPL_PASS_ESTABLISH] = chtls_pass_establish,
+ [CPL_RX_DATA] = chtls_rx_data,
+ [CPL_TLS_DATA] = chtls_rx_pdu,
+ [CPL_RX_TLS_CMP] = chtls_rx_cmp,
+ [CPL_PEER_CLOSE] = chtls_conn_cpl,
+ [CPL_CLOSE_CON_RPL] = chtls_conn_cpl,
+ [CPL_ABORT_REQ_RSS] = chtls_conn_cpl,
+ [CPL_ABORT_RPL_RSS] = chtls_conn_cpl,
+ [CPL_FW4_ACK] = chtls_wr_ack,
+};
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h
new file mode 100644
index 000000000000..47ba81e42f5d
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h
@@ -0,0 +1,222 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018 Chelsio Communications, Inc.
+ */
+
+#ifndef __CHTLS_CM_H__
+#define __CHTLS_CM_H__
+
+/*
+ * TCB settings
+ */
+/* 3:0 */
+#define TCB_ULP_TYPE_W 0
+#define TCB_ULP_TYPE_S 0
+#define TCB_ULP_TYPE_M 0xfULL
+#define TCB_ULP_TYPE_V(x) ((x) << TCB_ULP_TYPE_S)
+
+/* 11:4 */
+#define TCB_ULP_RAW_W 0
+#define TCB_ULP_RAW_S 4
+#define TCB_ULP_RAW_M 0xffULL
+#define TCB_ULP_RAW_V(x) ((x) << TCB_ULP_RAW_S)
+
+#define TF_TLS_KEY_SIZE_S 7
+#define TF_TLS_KEY_SIZE_V(x) ((x) << TF_TLS_KEY_SIZE_S)
+
+#define TF_TLS_CONTROL_S 2
+#define TF_TLS_CONTROL_V(x) ((x) << TF_TLS_CONTROL_S)
+
+#define TF_TLS_ACTIVE_S 1
+#define TF_TLS_ACTIVE_V(x) ((x) << TF_TLS_ACTIVE_S)
+
+#define TF_TLS_ENABLE_S 0
+#define TF_TLS_ENABLE_V(x) ((x) << TF_TLS_ENABLE_S)
+
+#define TF_RX_QUIESCE_S 15
+#define TF_RX_QUIESCE_V(x) ((x) << TF_RX_QUIESCE_S)
+
+/*
+ * Max receive window supported by HW in bytes. Only a small part of it can
+ * be set through option0, the rest needs to be set through RX_DATA_ACK.
+ */
+#define MAX_RCV_WND ((1U << 27) - 1)
+#define MAX_MSS 65536
+
+/*
+ * Min receive window. We want it to be large enough to accommodate receive
+ * coalescing, handle jumbo frames, and not trigger sender SWS avoidance.
+ */
+#define MIN_RCV_WND (24 * 1024U)
+#define LOOPBACK(x) (((x) & htonl(0xff000000)) == htonl(0x7f000000))
+
+/* ulp_mem_io + ulptx_idata + payload + padding */
+#define MAX_IMM_ULPTX_WR_LEN (32 + 8 + 256 + 8)
+
+/* for TX: a skb must have a headroom of at least TX_HEADER_LEN bytes */
+#define TX_HEADER_LEN \
+ (sizeof(struct fw_ofld_tx_data_wr) + sizeof(struct sge_opaque_hdr))
+#define TX_TLSHDR_LEN \
+ (sizeof(struct fw_tlstx_data_wr) + sizeof(struct cpl_tx_tls_sfo) + \
+ sizeof(struct sge_opaque_hdr))
+#define TXDATA_SKB_LEN 128
+
+enum {
+ CPL_TX_TLS_SFO_TYPE_CCS,
+ CPL_TX_TLS_SFO_TYPE_ALERT,
+ CPL_TX_TLS_SFO_TYPE_HANDSHAKE,
+ CPL_TX_TLS_SFO_TYPE_DATA,
+ CPL_TX_TLS_SFO_TYPE_HEARTBEAT,
+};
+
+enum {
+ TLS_HDR_TYPE_CCS = 20,
+ TLS_HDR_TYPE_ALERT,
+ TLS_HDR_TYPE_HANDSHAKE,
+ TLS_HDR_TYPE_RECORD,
+ TLS_HDR_TYPE_HEARTBEAT,
+};
+
+typedef void (*defer_handler_t)(struct chtls_dev *dev, struct sk_buff *skb);
+extern struct request_sock_ops chtls_rsk_ops;
+extern struct request_sock_ops chtls_rsk_opsv6;
+
+struct deferred_skb_cb {
+ defer_handler_t handler;
+ struct chtls_dev *dev;
+};
+
+#define DEFERRED_SKB_CB(skb) ((struct deferred_skb_cb *)(skb)->cb)
+#define failover_flowc_wr_len offsetof(struct fw_flowc_wr, mnemval[3])
+#define WR_SKB_CB(skb) ((struct wr_skb_cb *)(skb)->cb)
+#define ACCEPT_QUEUE(sk) (&inet_csk(sk)->icsk_accept_queue.rskq_accept_head)
+
+#define SND_WSCALE(tp) ((tp)->rx_opt.snd_wscale)
+#define RCV_WSCALE(tp) ((tp)->rx_opt.rcv_wscale)
+#define USER_MSS(tp) ((tp)->rx_opt.user_mss)
+#define TS_RECENT_STAMP(tp) ((tp)->rx_opt.ts_recent_stamp)
+#define WSCALE_OK(tp) ((tp)->rx_opt.wscale_ok)
+#define TSTAMP_OK(tp) ((tp)->rx_opt.tstamp_ok)
+#define SACK_OK(tp) ((tp)->rx_opt.sack_ok)
+#define INC_ORPHAN_COUNT(sk) percpu_counter_inc((sk)->sk_prot->orphan_count)
+
+/* TLS SKB */
+#define skb_ulp_tls_inline(skb) (ULP_SKB_CB(skb)->ulp.tls.ofld)
+#define skb_ulp_tls_iv_imm(skb) (ULP_SKB_CB(skb)->ulp.tls.iv)
+
+void chtls_defer_reply(struct sk_buff *skb, struct chtls_dev *dev,
+ defer_handler_t handler);
+
+/*
+ * Returns true if the socket is in one of the supplied states.
+ */
+static inline unsigned int sk_in_state(const struct sock *sk,
+ unsigned int states)
+{
+ return states & (1 << sk->sk_state);
+}
+
+static void chtls_rsk_destructor(struct request_sock *req)
+{
+ /* do nothing */
+}
+
+static inline void chtls_init_rsk_ops(struct proto *chtls_tcp_prot,
+ struct request_sock_ops *chtls_tcp_ops,
+ struct proto *tcp_prot, int family)
+{
+ memset(chtls_tcp_ops, 0, sizeof(*chtls_tcp_ops));
+ chtls_tcp_ops->family = family;
+ chtls_tcp_ops->obj_size = sizeof(struct tcp_request_sock);
+ chtls_tcp_ops->destructor = chtls_rsk_destructor;
+ chtls_tcp_ops->slab = tcp_prot->rsk_prot->slab;
+ chtls_tcp_prot->rsk_prot = chtls_tcp_ops;
+}
+
+static inline void chtls_reqsk_free(struct request_sock *req)
+{
+ if (req->rsk_listener)
+ sock_put(req->rsk_listener);
+ kmem_cache_free(req->rsk_ops->slab, req);
+}
+
+#define DECLARE_TASK_FUNC(task, task_param) \
+ static void task(struct work_struct *task_param)
+
+static inline void sk_wakeup_sleepers(struct sock *sk, bool interruptable)
+{
+ struct socket_wq *wq;
+
+ rcu_read_lock();
+ wq = rcu_dereference(sk->sk_wq);
+ if (skwq_has_sleeper(wq)) {
+ if (interruptable)
+ wake_up_interruptible(sk_sleep(sk));
+ else
+ wake_up_all(sk_sleep(sk));
+ }
+ rcu_read_unlock();
+}
+
+static inline void chtls_set_req_port(struct request_sock *oreq,
+ __be16 source, __be16 dest)
+{
+ inet_rsk(oreq)->ir_rmt_port = source;
+ inet_rsk(oreq)->ir_num = ntohs(dest);
+}
+
+static inline void chtls_set_req_addr(struct request_sock *oreq,
+ __be32 local_ip, __be32 peer_ip)
+{
+ inet_rsk(oreq)->ir_loc_addr = local_ip;
+ inet_rsk(oreq)->ir_rmt_addr = peer_ip;
+}
+
+static inline void chtls_free_skb(struct sock *sk, struct sk_buff *skb)
+{
+ skb_dst_set(skb, NULL);
+ __skb_unlink(skb, &sk->sk_receive_queue);
+ __kfree_skb(skb);
+}
+
+static inline void chtls_kfree_skb(struct sock *sk, struct sk_buff *skb)
+{
+ skb_dst_set(skb, NULL);
+ __skb_unlink(skb, &sk->sk_receive_queue);
+ kfree_skb(skb);
+}
+
+static inline void chtls_reset_wr_list(struct chtls_sock *csk)
+{
+ csk->wr_skb_head = NULL;
+ csk->wr_skb_tail = NULL;
+}
+
+static inline void enqueue_wr(struct chtls_sock *csk, struct sk_buff *skb)
+{
+ WR_SKB_CB(skb)->next_wr = NULL;
+
+ skb_get(skb);
+
+ if (!csk->wr_skb_head)
+ csk->wr_skb_head = skb;
+ else
+ WR_SKB_CB(csk->wr_skb_tail)->next_wr = skb;
+ csk->wr_skb_tail = skb;
+}
+
+static inline struct sk_buff *dequeue_wr(struct sock *sk)
+{
+ struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+ struct sk_buff *skb = NULL;
+
+ skb = csk->wr_skb_head;
+
+ if (likely(skb)) {
+ /* Don't bother clearing the tail */
+ csk->wr_skb_head = WR_SKB_CB(skb)->next_wr;
+ WR_SKB_CB(skb)->next_wr = NULL;
+ }
+ return skb;
+}
+#endif
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c
new file mode 100644
index 000000000000..f1820aca0d33
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c
@@ -0,0 +1,426 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018 Chelsio Communications, Inc.
+ *
+ * Written by: Atul Gupta (atul.gupta@chelsio.com)
+ */
+
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include <linux/skbuff.h>
+#include <linux/timer.h>
+#include <linux/notifier.h>
+#include <linux/inetdevice.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/tls.h>
+#include <net/tls.h>
+
+#include "chtls.h"
+#include "chtls_cm.h"
+
+static void __set_tcb_field_direct(struct chtls_sock *csk,
+ struct cpl_set_tcb_field *req, u16 word,
+ u64 mask, u64 val, u8 cookie, int no_reply)
+{
+ struct ulptx_idata *sc;
+
+ INIT_TP_WR_CPL(req, CPL_SET_TCB_FIELD, csk->tid);
+ req->wr.wr_mid |= htonl(FW_WR_FLOWID_V(csk->tid));
+ req->reply_ctrl = htons(NO_REPLY_V(no_reply) |
+ QUEUENO_V(csk->rss_qid));
+ req->word_cookie = htons(TCB_WORD_V(word) | TCB_COOKIE_V(cookie));
+ req->mask = cpu_to_be64(mask);
+ req->val = cpu_to_be64(val);
+ sc = (struct ulptx_idata *)(req + 1);
+ sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_NOOP));
+ sc->len = htonl(0);
+}
+
+static void __set_tcb_field(struct sock *sk, struct sk_buff *skb, u16 word,
+ u64 mask, u64 val, u8 cookie, int no_reply)
+{
+ struct cpl_set_tcb_field *req;
+ struct chtls_sock *csk;
+ struct ulptx_idata *sc;
+ unsigned int wrlen;
+
+ wrlen = roundup(sizeof(*req) + sizeof(*sc), 16);
+ csk = rcu_dereference_sk_user_data(sk);
+
+ req = (struct cpl_set_tcb_field *)__skb_put(skb, wrlen);
+ __set_tcb_field_direct(csk, req, word, mask, val, cookie, no_reply);
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id);
+}
+
+/*
+ * Send control message to HW, message go as immediate data and packet
+ * is freed immediately.
+ */
+static int chtls_set_tcb_field(struct sock *sk, u16 word, u64 mask, u64 val)
+{
+ struct cpl_set_tcb_field *req;
+ unsigned int credits_needed;
+ struct chtls_sock *csk;
+ struct ulptx_idata *sc;
+ struct sk_buff *skb;
+ unsigned int wrlen;
+ int ret;
+
+ wrlen = roundup(sizeof(*req) + sizeof(*sc), 16);
+
+ skb = alloc_skb(wrlen, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ credits_needed = DIV_ROUND_UP(wrlen, 16);
+ csk = rcu_dereference_sk_user_data(sk);
+
+ __set_tcb_field(sk, skb, word, mask, val, 0, 1);
+ skb_set_queue_mapping(skb, (csk->txq_idx << 1) | CPL_PRIORITY_DATA);
+ csk->wr_credits -= credits_needed;
+ csk->wr_unacked += credits_needed;
+ enqueue_wr(csk, skb);
+ ret = cxgb4_ofld_send(csk->egress_dev, skb);
+ if (ret < 0)
+ kfree_skb(skb);
+ return ret < 0 ? ret : 0;
+}
+
+/*
+ * Set one of the t_flags bits in the TCB.
+ */
+int chtls_set_tcb_tflag(struct sock *sk, unsigned int bit_pos, int val)
+{
+ return chtls_set_tcb_field(sk, 1, 1ULL << bit_pos,
+ (u64)val << bit_pos);
+}
+
+static int chtls_set_tcb_keyid(struct sock *sk, int keyid)
+{
+ return chtls_set_tcb_field(sk, 31, 0xFFFFFFFFULL, keyid);
+}
+
+static int chtls_set_tcb_seqno(struct sock *sk)
+{
+ return chtls_set_tcb_field(sk, 28, ~0ULL, 0);
+}
+
+static int chtls_set_tcb_quiesce(struct sock *sk, int val)
+{
+ return chtls_set_tcb_field(sk, 1, (1ULL << TF_RX_QUIESCE_S),
+ TF_RX_QUIESCE_V(val));
+}
+
+/* TLS Key bitmap processing */
+int chtls_init_kmap(struct chtls_dev *cdev, struct cxgb4_lld_info *lldi)
+{
+ unsigned int num_key_ctx, bsize;
+ int ksize;
+
+ num_key_ctx = (lldi->vr->key.size / TLS_KEY_CONTEXT_SZ);
+ bsize = BITS_TO_LONGS(num_key_ctx);
+
+ cdev->kmap.size = num_key_ctx;
+ cdev->kmap.available = bsize;
+ ksize = sizeof(*cdev->kmap.addr) * bsize;
+ cdev->kmap.addr = kvzalloc(ksize, GFP_KERNEL);
+ if (!cdev->kmap.addr)
+ return -ENOMEM;
+
+ cdev->kmap.start = lldi->vr->key.start;
+ spin_lock_init(&cdev->kmap.lock);
+ return 0;
+}
+
+static int get_new_keyid(struct chtls_sock *csk, u32 optname)
+{
+ struct net_device *dev = csk->egress_dev;
+ struct chtls_dev *cdev = csk->cdev;
+ struct chtls_hws *hws;
+ struct adapter *adap;
+ int keyid;
+
+ adap = netdev2adap(dev);
+ hws = &csk->tlshws;
+
+ spin_lock_bh(&cdev->kmap.lock);
+ keyid = find_first_zero_bit(cdev->kmap.addr, cdev->kmap.size);
+ if (keyid < cdev->kmap.size) {
+ __set_bit(keyid, cdev->kmap.addr);
+ if (optname == TLS_RX)
+ hws->rxkey = keyid;
+ else
+ hws->txkey = keyid;
+ atomic_inc(&adap->chcr_stats.tls_key);
+ } else {
+ keyid = -1;
+ }
+ spin_unlock_bh(&cdev->kmap.lock);
+ return keyid;
+}
+
+void free_tls_keyid(struct sock *sk)
+{
+ struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+ struct net_device *dev = csk->egress_dev;
+ struct chtls_dev *cdev = csk->cdev;
+ struct chtls_hws *hws;
+ struct adapter *adap;
+
+ if (!cdev->kmap.addr)
+ return;
+
+ adap = netdev2adap(dev);
+ hws = &csk->tlshws;
+
+ spin_lock_bh(&cdev->kmap.lock);
+ if (hws->rxkey >= 0) {
+ __clear_bit(hws->rxkey, cdev->kmap.addr);
+ atomic_dec(&adap->chcr_stats.tls_key);
+ hws->rxkey = -1;
+ }
+ if (hws->txkey >= 0) {
+ __clear_bit(hws->txkey, cdev->kmap.addr);
+ atomic_dec(&adap->chcr_stats.tls_key);
+ hws->txkey = -1;
+ }
+ spin_unlock_bh(&cdev->kmap.lock);
+}
+
+unsigned int keyid_to_addr(int start_addr, int keyid)
+{
+ return (start_addr + (keyid * TLS_KEY_CONTEXT_SZ)) >> 5;
+}
+
+static void chtls_rxkey_ivauth(struct _key_ctx *kctx)
+{
+ kctx->iv_to_auth = cpu_to_be64(KEYCTX_TX_WR_IV_V(6ULL) |
+ KEYCTX_TX_WR_AAD_V(1ULL) |
+ KEYCTX_TX_WR_AADST_V(5ULL) |
+ KEYCTX_TX_WR_CIPHER_V(14ULL) |
+ KEYCTX_TX_WR_CIPHERST_V(0ULL) |
+ KEYCTX_TX_WR_AUTH_V(14ULL) |
+ KEYCTX_TX_WR_AUTHST_V(16ULL) |
+ KEYCTX_TX_WR_AUTHIN_V(16ULL));
+}
+
+static int chtls_key_info(struct chtls_sock *csk,
+ struct _key_ctx *kctx,
+ u32 keylen, u32 optname,
+ int cipher_type)
+{
+ unsigned char key[AES_MAX_KEY_SIZE];
+ unsigned char *key_p, *salt;
+ unsigned char ghash_h[AEAD_H_SIZE];
+ int ck_size, key_ctx_size, kctx_mackey_size, salt_size;
+ struct crypto_aes_ctx aes;
+ int ret;
+
+ key_ctx_size = sizeof(struct _key_ctx) +
+ roundup(keylen, 16) + AEAD_H_SIZE;
+
+ /* GCM mode of AES supports 128 and 256 bit encryption, so
+ * prepare key context base on GCM cipher type
+ */
+ switch (cipher_type) {
+ case TLS_CIPHER_AES_GCM_128: {
+ struct tls12_crypto_info_aes_gcm_128 *gcm_ctx_128 =
+ (struct tls12_crypto_info_aes_gcm_128 *)
+ &csk->tlshws.crypto_info;
+ memcpy(key, gcm_ctx_128->key, keylen);
+
+ key_p = gcm_ctx_128->key;
+ salt = gcm_ctx_128->salt;
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE;
+ kctx_mackey_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
+ break;
+ }
+ case TLS_CIPHER_AES_GCM_256: {
+ struct tls12_crypto_info_aes_gcm_256 *gcm_ctx_256 =
+ (struct tls12_crypto_info_aes_gcm_256 *)
+ &csk->tlshws.crypto_info;
+ memcpy(key, gcm_ctx_256->key, keylen);
+
+ key_p = gcm_ctx_256->key;
+ salt = gcm_ctx_256->salt;
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+ salt_size = TLS_CIPHER_AES_GCM_256_SALT_SIZE;
+ kctx_mackey_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
+ break;
+ }
+ default:
+ pr_err("GCM: Invalid key length %d\n", keylen);
+ return -EINVAL;
+ }
+
+ /* Calculate the H = CIPH(K, 0 repeated 16 times).
+ * It will go in key context
+ */
+ ret = aes_expandkey(&aes, key, keylen);
+ if (ret)
+ return ret;
+
+ memset(ghash_h, 0, AEAD_H_SIZE);
+ aes_encrypt(&aes, ghash_h, ghash_h);
+ memzero_explicit(&aes, sizeof(aes));
+ csk->tlshws.keylen = key_ctx_size;
+
+ /* Copy the Key context */
+ if (optname == TLS_RX) {
+ int key_ctx;
+
+ key_ctx = ((key_ctx_size >> 4) << 3);
+ kctx->ctx_hdr = FILL_KEY_CRX_HDR(ck_size,
+ kctx_mackey_size,
+ 0, 0, key_ctx);
+ chtls_rxkey_ivauth(kctx);
+ } else {
+ kctx->ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
+ kctx_mackey_size,
+ 0, 0, key_ctx_size >> 4);
+ }
+
+ memcpy(kctx->salt, salt, salt_size);
+ memcpy(kctx->key, key_p, keylen);
+ memcpy(kctx->key + keylen, ghash_h, AEAD_H_SIZE);
+ /* erase key info from driver */
+ memset(key_p, 0, keylen);
+
+ return 0;
+}
+
+static void chtls_set_scmd(struct chtls_sock *csk)
+{
+ struct chtls_hws *hws = &csk->tlshws;
+
+ hws->scmd.seqno_numivs =
+ SCMD_SEQ_NO_CTRL_V(3) |
+ SCMD_PROTO_VERSION_V(0) |
+ SCMD_ENC_DEC_CTRL_V(0) |
+ SCMD_CIPH_AUTH_SEQ_CTRL_V(1) |
+ SCMD_CIPH_MODE_V(2) |
+ SCMD_AUTH_MODE_V(4) |
+ SCMD_HMAC_CTRL_V(0) |
+ SCMD_IV_SIZE_V(4) |
+ SCMD_NUM_IVS_V(1);
+
+ hws->scmd.ivgen_hdrlen =
+ SCMD_IV_GEN_CTRL_V(1) |
+ SCMD_KEY_CTX_INLINE_V(0) |
+ SCMD_TLS_FRAG_ENABLE_V(1);
+}
+
+int chtls_setkey(struct chtls_sock *csk, u32 keylen,
+ u32 optname, int cipher_type)
+{
+ struct tls_key_req *kwr;
+ struct chtls_dev *cdev;
+ struct _key_ctx *kctx;
+ int wrlen, klen, len;
+ struct sk_buff *skb;
+ struct sock *sk;
+ int keyid;
+ int kaddr;
+ int ret;
+
+ cdev = csk->cdev;
+ sk = csk->sk;
+
+ klen = roundup((keylen + AEAD_H_SIZE) + sizeof(*kctx), 32);
+ wrlen = roundup(sizeof(*kwr), 16);
+ len = klen + wrlen;
+
+ /* Flush out-standing data before new key takes effect */
+ if (optname == TLS_TX) {
+ lock_sock(sk);
+ if (skb_queue_len(&csk->txq))
+ chtls_push_frames(csk, 0);
+ release_sock(sk);
+ }
+
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ keyid = get_new_keyid(csk, optname);
+ if (keyid < 0) {
+ ret = -ENOSPC;
+ goto out_nokey;
+ }
+
+ kaddr = keyid_to_addr(cdev->kmap.start, keyid);
+ kwr = (struct tls_key_req *)__skb_put_zero(skb, len);
+ kwr->wr.op_to_compl =
+ cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) | FW_WR_COMPL_F |
+ FW_WR_ATOMIC_V(1U));
+ kwr->wr.flowid_len16 =
+ cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(len, 16) |
+ FW_WR_FLOWID_V(csk->tid)));
+ kwr->wr.protocol = 0;
+ kwr->wr.mfs = htons(TLS_MFS);
+ kwr->wr.reneg_to_write_rx = optname;
+
+ /* ulptx command */
+ kwr->req.cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
+ T5_ULP_MEMIO_ORDER_V(1) |
+ T5_ULP_MEMIO_IMM_V(1));
+ kwr->req.len16 = cpu_to_be32((csk->tid << 8) |
+ DIV_ROUND_UP(len - sizeof(kwr->wr), 16));
+ kwr->req.dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(klen >> 5));
+ kwr->req.lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(kaddr));
+
+ /* sub command */
+ kwr->sc_imm.cmd_more = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_IMM));
+ kwr->sc_imm.len = cpu_to_be32(klen);
+
+ lock_sock(sk);
+ /* key info */
+ kctx = (struct _key_ctx *)(kwr + 1);
+ ret = chtls_key_info(csk, kctx, keylen, optname, cipher_type);
+ if (ret)
+ goto out_notcb;
+
+ set_wr_txq(skb, CPL_PRIORITY_DATA, csk->tlshws.txqid);
+ csk->wr_credits -= DIV_ROUND_UP(len, 16);
+ csk->wr_unacked += DIV_ROUND_UP(len, 16);
+ enqueue_wr(csk, skb);
+ cxgb4_ofld_send(csk->egress_dev, skb);
+
+ chtls_set_scmd(csk);
+ /* Clear quiesce for Rx key */
+ if (optname == TLS_RX) {
+ ret = chtls_set_tcb_keyid(sk, keyid);
+ if (ret)
+ goto out_notcb;
+ ret = chtls_set_tcb_field(sk, 0,
+ TCB_ULP_RAW_V(TCB_ULP_RAW_M),
+ TCB_ULP_RAW_V((TF_TLS_KEY_SIZE_V(1) |
+ TF_TLS_CONTROL_V(1) |
+ TF_TLS_ACTIVE_V(1) |
+ TF_TLS_ENABLE_V(1))));
+ if (ret)
+ goto out_notcb;
+ ret = chtls_set_tcb_seqno(sk);
+ if (ret)
+ goto out_notcb;
+ ret = chtls_set_tcb_quiesce(sk, 0);
+ if (ret)
+ goto out_notcb;
+ csk->tlshws.rxkey = keyid;
+ } else {
+ csk->tlshws.tx_seq_no = 0;
+ csk->tlshws.txkey = keyid;
+ }
+
+ release_sock(sk);
+ return ret;
+out_notcb:
+ release_sock(sk);
+ free_tls_keyid(sk);
+out_nokey:
+ kfree_skb(skb);
+ return ret;
+}
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
new file mode 100644
index 000000000000..2e9acae1cba3
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
@@ -0,0 +1,1907 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018 Chelsio Communications, Inc.
+ *
+ * Written by: Atul Gupta (atul.gupta@chelsio.com)
+ */
+
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include <linux/skbuff.h>
+#include <linux/timer.h>
+#include <linux/notifier.h>
+#include <linux/inetdevice.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/sched/signal.h>
+#include <net/tcp.h>
+#include <net/busy_poll.h>
+#include <crypto/aes.h>
+
+#include "chtls.h"
+#include "chtls_cm.h"
+
+static bool is_tls_tx(struct chtls_sock *csk)
+{
+ return csk->tlshws.txkey >= 0;
+}
+
+static bool is_tls_rx(struct chtls_sock *csk)
+{
+ return csk->tlshws.rxkey >= 0;
+}
+
+static int data_sgl_len(const struct sk_buff *skb)
+{
+ unsigned int cnt;
+
+ cnt = skb_shinfo(skb)->nr_frags;
+ return sgl_len(cnt) * 8;
+}
+
+static int nos_ivs(struct sock *sk, unsigned int size)
+{
+ struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+
+ return DIV_ROUND_UP(size, csk->tlshws.mfs);
+}
+
+static int set_ivs_imm(struct sock *sk, const struct sk_buff *skb)
+{
+ int ivs_size = nos_ivs(sk, skb->len) * CIPHER_BLOCK_SIZE;
+ int hlen = TLS_WR_CPL_LEN + data_sgl_len(skb);
+
+ if ((hlen + KEY_ON_MEM_SZ + ivs_size) <
+ MAX_IMM_OFLD_TX_DATA_WR_LEN) {
+ ULP_SKB_CB(skb)->ulp.tls.iv = 1;
+ return 1;
+ }
+ ULP_SKB_CB(skb)->ulp.tls.iv = 0;
+ return 0;
+}
+
+static int max_ivs_size(struct sock *sk, int size)
+{
+ return nos_ivs(sk, size) * CIPHER_BLOCK_SIZE;
+}
+
+static int ivs_size(struct sock *sk, const struct sk_buff *skb)
+{
+ return set_ivs_imm(sk, skb) ? (nos_ivs(sk, skb->len) *
+ CIPHER_BLOCK_SIZE) : 0;
+}
+
+static int flowc_wr_credits(int nparams, int *flowclenp)
+{
+ int flowclen16, flowclen;
+
+ flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
+ flowclen16 = DIV_ROUND_UP(flowclen, 16);
+ flowclen = flowclen16 * 16;
+
+ if (flowclenp)
+ *flowclenp = flowclen;
+
+ return flowclen16;
+}
+
+static struct sk_buff *create_flowc_wr_skb(struct sock *sk,
+ struct fw_flowc_wr *flowc,
+ int flowclen)
+{
+ struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+ struct sk_buff *skb;
+
+ skb = alloc_skb(flowclen, GFP_ATOMIC);
+ if (!skb)
+ return NULL;
+
+ __skb_put_data(skb, flowc, flowclen);
+ skb_set_queue_mapping(skb, (csk->txq_idx << 1) | CPL_PRIORITY_DATA);
+
+ return skb;
+}
+
+static int send_flowc_wr(struct sock *sk, struct fw_flowc_wr *flowc,
+ int flowclen)
+{
+ struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct sk_buff *skb;
+ int flowclen16;
+ int ret;
+
+ flowclen16 = flowclen / 16;
+
+ if (csk_flag(sk, CSK_TX_DATA_SENT)) {
+ skb = create_flowc_wr_skb(sk, flowc, flowclen);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_entail(sk, skb,
+ ULPCB_FLAG_NO_HDR | ULPCB_FLAG_NO_APPEND);
+ return 0;
+ }
+
+ ret = cxgb4_immdata_send(csk->egress_dev,
+ csk->txq_idx,
+ flowc, flowclen);
+ if (!ret)
+ return flowclen16;
+ skb = create_flowc_wr_skb(sk, flowc, flowclen);
+ if (!skb)
+ return -ENOMEM;
+ send_or_defer(sk, tp, skb, 0);
+ return flowclen16;
+}
+
+static u8 tcp_state_to_flowc_state(u8 state)
+{
+ switch (state) {
+ case TCP_ESTABLISHED:
+ return FW_FLOWC_MNEM_TCPSTATE_ESTABLISHED;
+ case TCP_CLOSE_WAIT:
+ return FW_FLOWC_MNEM_TCPSTATE_CLOSEWAIT;
+ case TCP_FIN_WAIT1:
+ return FW_FLOWC_MNEM_TCPSTATE_FINWAIT1;
+ case TCP_CLOSING:
+ return FW_FLOWC_MNEM_TCPSTATE_CLOSING;
+ case TCP_LAST_ACK:
+ return FW_FLOWC_MNEM_TCPSTATE_LASTACK;
+ case TCP_FIN_WAIT2:
+ return FW_FLOWC_MNEM_TCPSTATE_FINWAIT2;
+ }
+
+ return FW_FLOWC_MNEM_TCPSTATE_ESTABLISHED;
+}
+
+int send_tx_flowc_wr(struct sock *sk, int compl,
+ u32 snd_nxt, u32 rcv_nxt)
+{
+ struct flowc_packed {
+ struct fw_flowc_wr fc;
+ struct fw_flowc_mnemval mnemval[FW_FLOWC_MNEM_MAX];
+ } __packed sflowc;
+ int nparams, paramidx, flowclen16, flowclen;
+ struct fw_flowc_wr *flowc;
+ struct chtls_sock *csk;
+ struct tcp_sock *tp;
+
+ csk = rcu_dereference_sk_user_data(sk);
+ tp = tcp_sk(sk);
+ memset(&sflowc, 0, sizeof(sflowc));
+ flowc = &sflowc.fc;
+
+#define FLOWC_PARAM(__m, __v) \
+ do { \
+ flowc->mnemval[paramidx].mnemonic = FW_FLOWC_MNEM_##__m; \
+ flowc->mnemval[paramidx].val = cpu_to_be32(__v); \
+ paramidx++; \
+ } while (0)
+
+ paramidx = 0;
+
+ FLOWC_PARAM(PFNVFN, FW_PFVF_CMD_PFN_V(csk->cdev->lldi->pf));
+ FLOWC_PARAM(CH, csk->tx_chan);
+ FLOWC_PARAM(PORT, csk->tx_chan);
+ FLOWC_PARAM(IQID, csk->rss_qid);
+ FLOWC_PARAM(SNDNXT, tp->snd_nxt);
+ FLOWC_PARAM(RCVNXT, tp->rcv_nxt);
+ FLOWC_PARAM(SNDBUF, csk->sndbuf);
+ FLOWC_PARAM(MSS, tp->mss_cache);
+ FLOWC_PARAM(TCPSTATE, tcp_state_to_flowc_state(sk->sk_state));
+
+ if (SND_WSCALE(tp))
+ FLOWC_PARAM(RCV_SCALE, SND_WSCALE(tp));
+
+ if (csk->ulp_mode == ULP_MODE_TLS)
+ FLOWC_PARAM(ULD_MODE, ULP_MODE_TLS);
+
+ if (csk->tlshws.fcplenmax)
+ FLOWC_PARAM(TXDATAPLEN_MAX, csk->tlshws.fcplenmax);
+
+ nparams = paramidx;
+#undef FLOWC_PARAM
+
+ flowclen16 = flowc_wr_credits(nparams, &flowclen);
+ flowc->op_to_nparams =
+ cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
+ FW_WR_COMPL_V(compl) |
+ FW_FLOWC_WR_NPARAMS_V(nparams));
+ flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(flowclen16) |
+ FW_WR_FLOWID_V(csk->tid));
+
+ return send_flowc_wr(sk, flowc, flowclen);
+}
+
+/* Copy IVs to WR */
+static int tls_copy_ivs(struct sock *sk, struct sk_buff *skb)
+
+{
+ struct chtls_sock *csk;
+ unsigned char *iv_loc;
+ struct chtls_hws *hws;
+ unsigned char *ivs;
+ u16 number_of_ivs;
+ struct page *page;
+ int err = 0;
+
+ csk = rcu_dereference_sk_user_data(sk);
+ hws = &csk->tlshws;
+ number_of_ivs = nos_ivs(sk, skb->len);
+
+ if (number_of_ivs > MAX_IVS_PAGE) {
+ pr_warn("MAX IVs in PAGE exceeded %d\n", number_of_ivs);
+ return -ENOMEM;
+ }
+
+ /* generate the IVs */
+ ivs = kmalloc_array(CIPHER_BLOCK_SIZE, number_of_ivs, GFP_ATOMIC);
+ if (!ivs)
+ return -ENOMEM;
+ get_random_bytes(ivs, number_of_ivs * CIPHER_BLOCK_SIZE);
+
+ if (skb_ulp_tls_iv_imm(skb)) {
+ /* send the IVs as immediate data in the WR */
+ iv_loc = (unsigned char *)__skb_push(skb, number_of_ivs *
+ CIPHER_BLOCK_SIZE);
+ if (iv_loc)
+ memcpy(iv_loc, ivs, number_of_ivs * CIPHER_BLOCK_SIZE);
+
+ hws->ivsize = number_of_ivs * CIPHER_BLOCK_SIZE;
+ } else {
+ /* Send the IVs as sgls */
+ /* Already accounted IV DSGL for credits */
+ skb_shinfo(skb)->nr_frags--;
+ page = alloc_pages(sk->sk_allocation | __GFP_COMP, 0);
+ if (!page) {
+ pr_info("%s : Page allocation for IVs failed\n",
+ __func__);
+ err = -ENOMEM;
+ goto out;
+ }
+ memcpy(page_address(page), ivs, number_of_ivs *
+ CIPHER_BLOCK_SIZE);
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page, 0,
+ number_of_ivs * CIPHER_BLOCK_SIZE);
+ hws->ivsize = 0;
+ }
+out:
+ kfree(ivs);
+ return err;
+}
+
+/* Copy Key to WR */
+static void tls_copy_tx_key(struct sock *sk, struct sk_buff *skb)
+{
+ struct ulptx_sc_memrd *sc_memrd;
+ struct chtls_sock *csk;
+ struct chtls_dev *cdev;
+ struct ulptx_idata *sc;
+ struct chtls_hws *hws;
+ u32 immdlen;
+ int kaddr;
+
+ csk = rcu_dereference_sk_user_data(sk);
+ hws = &csk->tlshws;
+ cdev = csk->cdev;
+
+ immdlen = sizeof(*sc) + sizeof(*sc_memrd);
+ kaddr = keyid_to_addr(cdev->kmap.start, hws->txkey);
+ sc = (struct ulptx_idata *)__skb_push(skb, immdlen);
+ if (sc) {
+ sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_NOOP));
+ sc->len = htonl(0);
+ sc_memrd = (struct ulptx_sc_memrd *)(sc + 1);
+ sc_memrd->cmd_to_len =
+ htonl(ULPTX_CMD_V(ULP_TX_SC_MEMRD) |
+ ULP_TX_SC_MORE_V(1) |
+ ULPTX_LEN16_V(hws->keylen >> 4));
+ sc_memrd->addr = htonl(kaddr);
+ }
+}
+
+static u64 tlstx_incr_seqnum(struct chtls_hws *hws)
+{
+ return hws->tx_seq_no++;
+}
+
+static bool is_sg_request(const struct sk_buff *skb)
+{
+ return skb->peeked ||
+ (skb->len > MAX_IMM_ULPTX_WR_LEN);
+}
+
+/*
+ * Returns true if an sk_buff carries urgent data.
+ */
+static bool skb_urgent(struct sk_buff *skb)
+{
+ return ULP_SKB_CB(skb)->flags & ULPCB_FLAG_URG;
+}
+
+/* TLS content type for CPL SFO */
+static unsigned char tls_content_type(unsigned char content_type)
+{
+ switch (content_type) {
+ case TLS_HDR_TYPE_CCS:
+ return CPL_TX_TLS_SFO_TYPE_CCS;
+ case TLS_HDR_TYPE_ALERT:
+ return CPL_TX_TLS_SFO_TYPE_ALERT;
+ case TLS_HDR_TYPE_HANDSHAKE:
+ return CPL_TX_TLS_SFO_TYPE_HANDSHAKE;
+ case TLS_HDR_TYPE_HEARTBEAT:
+ return CPL_TX_TLS_SFO_TYPE_HEARTBEAT;
+ }
+ return CPL_TX_TLS_SFO_TYPE_DATA;
+}
+
+static void tls_tx_data_wr(struct sock *sk, struct sk_buff *skb,
+ int dlen, int tls_immd, u32 credits,
+ int expn, int pdus)
+{
+ struct fw_tlstx_data_wr *req_wr;
+ struct cpl_tx_tls_sfo *req_cpl;
+ unsigned int wr_ulp_mode_force;
+ struct tls_scmd *updated_scmd;
+ unsigned char data_type;
+ struct chtls_sock *csk;
+ struct net_device *dev;
+ struct chtls_hws *hws;
+ struct tls_scmd *scmd;
+ struct adapter *adap;
+ unsigned char *req;
+ int immd_len;
+ int iv_imm;
+ int len;
+
+ csk = rcu_dereference_sk_user_data(sk);
+ iv_imm = skb_ulp_tls_iv_imm(skb);
+ dev = csk->egress_dev;
+ adap = netdev2adap(dev);
+ hws = &csk->tlshws;
+ scmd = &hws->scmd;
+ len = dlen + expn;
+
+ dlen = (dlen < hws->mfs) ? dlen : hws->mfs;
+ atomic_inc(&adap->chcr_stats.tls_pdu_tx);
+
+ updated_scmd = scmd;
+ updated_scmd->seqno_numivs &= 0xffffff80;
+ updated_scmd->seqno_numivs |= SCMD_NUM_IVS_V(pdus);
+ hws->scmd = *updated_scmd;
+
+ req = (unsigned char *)__skb_push(skb, sizeof(struct cpl_tx_tls_sfo));
+ req_cpl = (struct cpl_tx_tls_sfo *)req;
+ req = (unsigned char *)__skb_push(skb, (sizeof(struct
+ fw_tlstx_data_wr)));
+
+ req_wr = (struct fw_tlstx_data_wr *)req;
+ immd_len = (tls_immd ? dlen : 0);
+ req_wr->op_to_immdlen =
+ htonl(FW_WR_OP_V(FW_TLSTX_DATA_WR) |
+ FW_TLSTX_DATA_WR_COMPL_V(1) |
+ FW_TLSTX_DATA_WR_IMMDLEN_V(immd_len));
+ req_wr->flowid_len16 = htonl(FW_TLSTX_DATA_WR_FLOWID_V(csk->tid) |
+ FW_TLSTX_DATA_WR_LEN16_V(credits));
+ wr_ulp_mode_force = TX_ULP_MODE_V(ULP_MODE_TLS);
+
+ if (is_sg_request(skb))
+ wr_ulp_mode_force |= FW_OFLD_TX_DATA_WR_ALIGNPLD_F |
+ ((tcp_sk(sk)->nonagle & TCP_NAGLE_OFF) ? 0 :
+ FW_OFLD_TX_DATA_WR_SHOVE_F);
+
+ req_wr->lsodisable_to_flags =
+ htonl(TX_ULP_MODE_V(ULP_MODE_TLS) |
+ TX_URG_V(skb_urgent(skb)) |
+ T6_TX_FORCE_F | wr_ulp_mode_force |
+ TX_SHOVE_V((!csk_flag(sk, CSK_TX_MORE_DATA)) &&
+ skb_queue_empty(&csk->txq)));
+
+ req_wr->ctxloc_to_exp =
+ htonl(FW_TLSTX_DATA_WR_NUMIVS_V(pdus) |
+ FW_TLSTX_DATA_WR_EXP_V(expn) |
+ FW_TLSTX_DATA_WR_CTXLOC_V(CHTLS_KEY_CONTEXT_DDR) |
+ FW_TLSTX_DATA_WR_IVDSGL_V(!iv_imm) |
+ FW_TLSTX_DATA_WR_KEYSIZE_V(hws->keylen >> 4));
+
+ /* Fill in the length */
+ req_wr->plen = htonl(len);
+ req_wr->mfs = htons(hws->mfs);
+ req_wr->adjustedplen_pkd =
+ htons(FW_TLSTX_DATA_WR_ADJUSTEDPLEN_V(hws->adjustlen));
+ req_wr->expinplenmax_pkd =
+ htons(FW_TLSTX_DATA_WR_EXPINPLENMAX_V(hws->expansion));
+ req_wr->pdusinplenmax_pkd =
+ FW_TLSTX_DATA_WR_PDUSINPLENMAX_V(hws->pdus);
+ req_wr->r10 = 0;
+
+ data_type = tls_content_type(ULP_SKB_CB(skb)->ulp.tls.type);
+ req_cpl->op_to_seg_len = htonl(CPL_TX_TLS_SFO_OPCODE_V(CPL_TX_TLS_SFO) |
+ CPL_TX_TLS_SFO_DATA_TYPE_V(data_type) |
+ CPL_TX_TLS_SFO_CPL_LEN_V(2) |
+ CPL_TX_TLS_SFO_SEG_LEN_V(dlen));
+ req_cpl->pld_len = htonl(len - expn);
+
+ req_cpl->type_protover = htonl(CPL_TX_TLS_SFO_TYPE_V
+ ((data_type == CPL_TX_TLS_SFO_TYPE_HEARTBEAT) ?
+ TLS_HDR_TYPE_HEARTBEAT : 0) |
+ CPL_TX_TLS_SFO_PROTOVER_V(0));
+
+ /* create the s-command */
+ req_cpl->r1_lo = 0;
+ req_cpl->seqno_numivs = cpu_to_be32(hws->scmd.seqno_numivs);
+ req_cpl->ivgen_hdrlen = cpu_to_be32(hws->scmd.ivgen_hdrlen);
+ req_cpl->scmd1 = cpu_to_be64(tlstx_incr_seqnum(hws));
+}
+
+/*
+ * Calculate the TLS data expansion size
+ */
+static int chtls_expansion_size(struct sock *sk, int data_len,
+ int fullpdu,
+ unsigned short *pducnt)
+{
+ struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+ struct chtls_hws *hws = &csk->tlshws;
+ struct tls_scmd *scmd = &hws->scmd;
+ int fragsize = hws->mfs;
+ int expnsize = 0;
+ int fragleft;
+ int fragcnt;
+ int expppdu;
+
+ if (SCMD_CIPH_MODE_G(scmd->seqno_numivs) ==
+ SCMD_CIPH_MODE_AES_GCM) {
+ expppdu = GCM_TAG_SIZE + AEAD_EXPLICIT_DATA_SIZE +
+ TLS_HEADER_LENGTH;
+
+ if (fullpdu) {
+ *pducnt = data_len / (expppdu + fragsize);
+ if (*pducnt > 32)
+ *pducnt = 32;
+ else if (!*pducnt)
+ *pducnt = 1;
+ expnsize = (*pducnt) * expppdu;
+ return expnsize;
+ }
+ fragcnt = (data_len / fragsize);
+ expnsize = fragcnt * expppdu;
+ fragleft = data_len % fragsize;
+ if (fragleft > 0)
+ expnsize += expppdu;
+ }
+ return expnsize;
+}
+
+/* WR with IV, KEY and CPL SFO added */
+static void make_tlstx_data_wr(struct sock *sk, struct sk_buff *skb,
+ int tls_tx_imm, int tls_len, u32 credits)
+{
+ unsigned short pdus_per_ulp = 0;
+ struct chtls_sock *csk;
+ struct chtls_hws *hws;
+ int expn_sz;
+ int pdus;
+
+ csk = rcu_dereference_sk_user_data(sk);
+ hws = &csk->tlshws;
+ pdus = DIV_ROUND_UP(tls_len, hws->mfs);
+ expn_sz = chtls_expansion_size(sk, tls_len, 0, NULL);
+ if (!hws->compute) {
+ hws->expansion = chtls_expansion_size(sk,
+ hws->fcplenmax,
+ 1, &pdus_per_ulp);
+ hws->pdus = pdus_per_ulp;
+ hws->adjustlen = hws->pdus *
+ ((hws->expansion / hws->pdus) + hws->mfs);
+ hws->compute = 1;
+ }
+ if (tls_copy_ivs(sk, skb))
+ return;
+ tls_copy_tx_key(sk, skb);
+ tls_tx_data_wr(sk, skb, tls_len, tls_tx_imm, credits, expn_sz, pdus);
+ hws->tx_seq_no += (pdus - 1);
+}
+
+static void make_tx_data_wr(struct sock *sk, struct sk_buff *skb,
+ unsigned int immdlen, int len,
+ u32 credits, u32 compl)
+{
+ struct fw_ofld_tx_data_wr *req;
+ unsigned int wr_ulp_mode_force;
+ struct chtls_sock *csk;
+ unsigned int opcode;
+
+ csk = rcu_dereference_sk_user_data(sk);
+ opcode = FW_OFLD_TX_DATA_WR;
+
+ req = (struct fw_ofld_tx_data_wr *)__skb_push(skb, sizeof(*req));
+ req->op_to_immdlen = htonl(WR_OP_V(opcode) |
+ FW_WR_COMPL_V(compl) |
+ FW_WR_IMMDLEN_V(immdlen));
+ req->flowid_len16 = htonl(FW_WR_FLOWID_V(csk->tid) |
+ FW_WR_LEN16_V(credits));
+
+ wr_ulp_mode_force = TX_ULP_MODE_V(csk->ulp_mode);
+ if (is_sg_request(skb))
+ wr_ulp_mode_force |= FW_OFLD_TX_DATA_WR_ALIGNPLD_F |
+ ((tcp_sk(sk)->nonagle & TCP_NAGLE_OFF) ? 0 :
+ FW_OFLD_TX_DATA_WR_SHOVE_F);
+
+ req->tunnel_to_proxy = htonl(wr_ulp_mode_force |
+ TX_URG_V(skb_urgent(skb)) |
+ TX_SHOVE_V((!csk_flag(sk, CSK_TX_MORE_DATA)) &&
+ skb_queue_empty(&csk->txq)));
+ req->plen = htonl(len);
+}
+
+static int chtls_wr_size(struct chtls_sock *csk, const struct sk_buff *skb,
+ bool size)
+{
+ int wr_size;
+
+ wr_size = TLS_WR_CPL_LEN;
+ wr_size += KEY_ON_MEM_SZ;
+ wr_size += ivs_size(csk->sk, skb);
+
+ if (size)
+ return wr_size;
+
+ /* frags counted for IV dsgl */
+ if (!skb_ulp_tls_iv_imm(skb))
+ skb_shinfo(skb)->nr_frags++;
+
+ return wr_size;
+}
+
+static bool is_ofld_imm(struct chtls_sock *csk, const struct sk_buff *skb)
+{
+ int length = skb->len;
+
+ if (skb->peeked || skb->len > MAX_IMM_ULPTX_WR_LEN)
+ return false;
+
+ if (likely(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NEED_HDR)) {
+ /* Check TLS header len for Immediate */
+ if (csk->ulp_mode == ULP_MODE_TLS &&
+ skb_ulp_tls_inline(skb))
+ length += chtls_wr_size(csk, skb, true);
+ else
+ length += sizeof(struct fw_ofld_tx_data_wr);
+
+ return length <= MAX_IMM_OFLD_TX_DATA_WR_LEN;
+ }
+ return true;
+}
+
+static unsigned int calc_tx_flits(const struct sk_buff *skb,
+ unsigned int immdlen)
+{
+ unsigned int flits, cnt;
+
+ flits = immdlen / 8; /* headers */
+ cnt = skb_shinfo(skb)->nr_frags;
+ if (skb_tail_pointer(skb) != skb_transport_header(skb))
+ cnt++;
+ return flits + sgl_len(cnt);
+}
+
+static void arp_failure_discard(void *handle, struct sk_buff *skb)
+{
+ kfree_skb(skb);
+}
+
+int chtls_push_frames(struct chtls_sock *csk, int comp)
+{
+ struct chtls_hws *hws = &csk->tlshws;
+ struct tcp_sock *tp;
+ struct sk_buff *skb;
+ int total_size = 0;
+ struct sock *sk;
+ int wr_size;
+
+ wr_size = sizeof(struct fw_ofld_tx_data_wr);
+ sk = csk->sk;
+ tp = tcp_sk(sk);
+
+ if (unlikely(sk_in_state(sk, TCPF_SYN_SENT | TCPF_CLOSE)))
+ return 0;
+
+ if (unlikely(csk_flag(sk, CSK_ABORT_SHUTDOWN)))
+ return 0;
+
+ while (csk->wr_credits && (skb = skb_peek(&csk->txq)) &&
+ (!(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_HOLD) ||
+ skb_queue_len(&csk->txq) > 1)) {
+ unsigned int credit_len = skb->len;
+ unsigned int credits_needed;
+ unsigned int completion = 0;
+ int tls_len = skb->len;/* TLS data len before IV/key */
+ unsigned int immdlen;
+ int len = skb->len; /* length [ulp bytes] inserted by hw */
+ int flowclen16 = 0;
+ int tls_tx_imm = 0;
+
+ immdlen = skb->len;
+ if (!is_ofld_imm(csk, skb)) {
+ immdlen = skb_transport_offset(skb);
+ if (skb_ulp_tls_inline(skb))
+ wr_size = chtls_wr_size(csk, skb, false);
+ credit_len = 8 * calc_tx_flits(skb, immdlen);
+ } else {
+ if (skb_ulp_tls_inline(skb)) {
+ wr_size = chtls_wr_size(csk, skb, false);
+ tls_tx_imm = 1;
+ }
+ }
+ if (likely(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NEED_HDR))
+ credit_len += wr_size;
+ credits_needed = DIV_ROUND_UP(credit_len, 16);
+ if (!csk_flag_nochk(csk, CSK_TX_DATA_SENT)) {
+ flowclen16 = send_tx_flowc_wr(sk, 1, tp->snd_nxt,
+ tp->rcv_nxt);
+ if (flowclen16 <= 0)
+ break;
+ csk->wr_credits -= flowclen16;
+ csk->wr_unacked += flowclen16;
+ csk->wr_nondata += flowclen16;
+ csk_set_flag(csk, CSK_TX_DATA_SENT);
+ }
+
+ if (csk->wr_credits < credits_needed) {
+ if (skb_ulp_tls_inline(skb) &&
+ !skb_ulp_tls_iv_imm(skb))
+ skb_shinfo(skb)->nr_frags--;
+ break;
+ }
+
+ __skb_unlink(skb, &csk->txq);
+ skb_set_queue_mapping(skb, (csk->txq_idx << 1) |
+ CPL_PRIORITY_DATA);
+ if (hws->ofld)
+ hws->txqid = (skb->queue_mapping >> 1);
+ skb->csum = (__force __wsum)(credits_needed + csk->wr_nondata);
+ csk->wr_credits -= credits_needed;
+ csk->wr_unacked += credits_needed;
+ csk->wr_nondata = 0;
+ enqueue_wr(csk, skb);
+
+ if (likely(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NEED_HDR)) {
+ if ((comp && csk->wr_unacked == credits_needed) ||
+ (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_COMPL) ||
+ csk->wr_unacked >= csk->wr_max_credits / 2) {
+ completion = 1;
+ csk->wr_unacked = 0;
+ }
+ if (skb_ulp_tls_inline(skb))
+ make_tlstx_data_wr(sk, skb, tls_tx_imm,
+ tls_len, credits_needed);
+ else
+ make_tx_data_wr(sk, skb, immdlen, len,
+ credits_needed, completion);
+ tp->snd_nxt += len;
+ tp->lsndtime = tcp_jiffies32;
+ if (completion)
+ ULP_SKB_CB(skb)->flags &= ~ULPCB_FLAG_NEED_HDR;
+ } else {
+ struct cpl_close_con_req *req = cplhdr(skb);
+ unsigned int cmd = CPL_OPCODE_G(ntohl
+ (OPCODE_TID(req)));
+
+ if (cmd == CPL_CLOSE_CON_REQ)
+ csk_set_flag(csk,
+ CSK_CLOSE_CON_REQUESTED);
+
+ if ((ULP_SKB_CB(skb)->flags & ULPCB_FLAG_COMPL) &&
+ (csk->wr_unacked >= csk->wr_max_credits / 2)) {
+ req->wr.wr_hi |= htonl(FW_WR_COMPL_F);
+ csk->wr_unacked = 0;
+ }
+ }
+ total_size += skb->truesize;
+ if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_BARRIER)
+ csk_set_flag(csk, CSK_TX_WAIT_IDLE);
+ t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
+ cxgb4_l2t_send(csk->egress_dev, skb, csk->l2t_entry);
+ }
+ sk->sk_wmem_queued -= total_size;
+ return total_size;
+}
+
+static void mark_urg(struct tcp_sock *tp, int flags,
+ struct sk_buff *skb)
+{
+ if (unlikely(flags & MSG_OOB)) {
+ tp->snd_up = tp->write_seq;
+ ULP_SKB_CB(skb)->flags = ULPCB_FLAG_URG |
+ ULPCB_FLAG_BARRIER |
+ ULPCB_FLAG_NO_APPEND |
+ ULPCB_FLAG_NEED_HDR;
+ }
+}
+
+/*
+ * Returns true if a connection should send more data to TCP engine
+ */
+static bool should_push(struct sock *sk)
+{
+ struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+ struct chtls_dev *cdev = csk->cdev;
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ /*
+ * If we've released our offload resources there's nothing to do ...
+ */
+ if (!cdev)
+ return false;
+
+ /*
+ * If there aren't any work requests in flight, or there isn't enough
+ * data in flight, or Nagle is off then send the current TX_DATA
+ * otherwise hold it and wait to accumulate more data.
+ */
+ return csk->wr_credits == csk->wr_max_credits ||
+ (tp->nonagle & TCP_NAGLE_OFF);
+}
+
+/*
+ * Returns true if a TCP socket is corked.
+ */
+static bool corked(const struct tcp_sock *tp, int flags)
+{
+ return (flags & MSG_MORE) || (tp->nonagle & TCP_NAGLE_CORK);
+}
+
+/*
+ * Returns true if a send should try to push new data.
+ */
+static bool send_should_push(struct sock *sk, int flags)
+{
+ return should_push(sk) && !corked(tcp_sk(sk), flags);
+}
+
+void chtls_tcp_push(struct sock *sk, int flags)
+{
+ struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+ int qlen = skb_queue_len(&csk->txq);
+
+ if (likely(qlen)) {
+ struct sk_buff *skb = skb_peek_tail(&csk->txq);
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ mark_urg(tp, flags, skb);
+
+ if (!(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) &&
+ corked(tp, flags)) {
+ ULP_SKB_CB(skb)->flags |= ULPCB_FLAG_HOLD;
+ return;
+ }
+
+ ULP_SKB_CB(skb)->flags &= ~ULPCB_FLAG_HOLD;
+ if (qlen == 1 &&
+ ((ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) ||
+ should_push(sk)))
+ chtls_push_frames(csk, 1);
+ }
+}
+
+/*
+ * Calculate the size for a new send sk_buff. It's maximum size so we can
+ * pack lots of data into it, unless we plan to send it immediately, in which
+ * case we size it more tightly.
+ *
+ * Note: we don't bother compensating for MSS < PAGE_SIZE because it doesn't
+ * arise in normal cases and when it does we are just wasting memory.
+ */
+static int select_size(struct sock *sk, int io_len, int flags, int len)
+{
+ const int pgbreak = SKB_MAX_HEAD(len);
+
+ /*
+ * If the data wouldn't fit in the main body anyway, put only the
+ * header in the main body so it can use immediate data and place all
+ * the payload in page fragments.
+ */
+ if (io_len > pgbreak)
+ return 0;
+
+ /*
+ * If we will be accumulating payload get a large main body.
+ */
+ if (!send_should_push(sk, flags))
+ return pgbreak;
+
+ return io_len;
+}
+
+void skb_entail(struct sock *sk, struct sk_buff *skb, int flags)
+{
+ struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ ULP_SKB_CB(skb)->seq = tp->write_seq;
+ ULP_SKB_CB(skb)->flags = flags;
+ __skb_queue_tail(&csk->txq, skb);
+ sk->sk_wmem_queued += skb->truesize;
+
+ if (TCP_PAGE(sk) && TCP_OFF(sk)) {
+ put_page(TCP_PAGE(sk));
+ TCP_PAGE(sk) = NULL;
+ TCP_OFF(sk) = 0;
+ }
+}
+
+static struct sk_buff *get_tx_skb(struct sock *sk, int size)
+{
+ struct sk_buff *skb;
+
+ skb = alloc_skb(size + TX_HEADER_LEN, sk->sk_allocation);
+ if (likely(skb)) {
+ skb_reserve(skb, TX_HEADER_LEN);
+ skb_entail(sk, skb, ULPCB_FLAG_NEED_HDR);
+ skb_reset_transport_header(skb);
+ }
+ return skb;
+}
+
+static struct sk_buff *get_record_skb(struct sock *sk, int size, bool zcopy)
+{
+ struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+ struct sk_buff *skb;
+
+ skb = alloc_skb(((zcopy ? 0 : size) + TX_TLSHDR_LEN +
+ KEY_ON_MEM_SZ + max_ivs_size(sk, size)),
+ sk->sk_allocation);
+ if (likely(skb)) {
+ skb_reserve(skb, (TX_TLSHDR_LEN +
+ KEY_ON_MEM_SZ + max_ivs_size(sk, size)));
+ skb_entail(sk, skb, ULPCB_FLAG_NEED_HDR);
+ skb_reset_transport_header(skb);
+ ULP_SKB_CB(skb)->ulp.tls.ofld = 1;
+ ULP_SKB_CB(skb)->ulp.tls.type = csk->tlshws.type;
+ }
+ return skb;
+}
+
+static void tx_skb_finalize(struct sk_buff *skb)
+{
+ struct ulp_skb_cb *cb = ULP_SKB_CB(skb);
+
+ if (!(cb->flags & ULPCB_FLAG_NO_HDR))
+ cb->flags = ULPCB_FLAG_NEED_HDR;
+ cb->flags |= ULPCB_FLAG_NO_APPEND;
+}
+
+static void push_frames_if_head(struct sock *sk)
+{
+ struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+
+ if (skb_queue_len(&csk->txq) == 1)
+ chtls_push_frames(csk, 1);
+}
+
+static int chtls_skb_copy_to_page_nocache(struct sock *sk,
+ struct iov_iter *from,
+ struct sk_buff *skb,
+ struct page *page,
+ int off, int copy)
+{
+ int err;
+
+ err = skb_do_copy_data_nocache(sk, skb, from, page_address(page) +
+ off, copy, skb->len);
+ if (err)
+ return err;
+
+ skb->len += copy;
+ skb->data_len += copy;
+ skb->truesize += copy;
+ sk->sk_wmem_queued += copy;
+ return 0;
+}
+
+static int csk_mem_free(struct chtls_dev *cdev, struct sock *sk)
+{
+ return (cdev->max_host_sndbuf - sk->sk_wmem_queued);
+}
+
+static int csk_wait_memory(struct chtls_dev *cdev,
+ struct sock *sk, long *timeo_p)
+{
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ int err = 0;
+ long current_timeo;
+ long vm_wait = 0;
+ bool noblock;
+
+ current_timeo = *timeo_p;
+ noblock = (*timeo_p ? false : true);
+ if (csk_mem_free(cdev, sk)) {
+ current_timeo = (prandom_u32() % (HZ / 5)) + 2;
+ vm_wait = (prandom_u32() % (HZ / 5)) + 2;
+ }
+
+ add_wait_queue(sk_sleep(sk), &wait);
+ while (1) {
+ sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
+
+ if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
+ goto do_error;
+ if (!*timeo_p) {
+ if (noblock)
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+ goto do_nonblock;
+ }
+ if (signal_pending(current))
+ goto do_interrupted;
+ sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
+ if (csk_mem_free(cdev, sk) && !vm_wait)
+ break;
+
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+ sk->sk_write_pending++;
+ sk_wait_event(sk, &current_timeo, sk->sk_err ||
+ (sk->sk_shutdown & SEND_SHUTDOWN) ||
+ (csk_mem_free(cdev, sk) && !vm_wait), &wait);
+ sk->sk_write_pending--;
+
+ if (vm_wait) {
+ vm_wait -= current_timeo;
+ current_timeo = *timeo_p;
+ if (current_timeo != MAX_SCHEDULE_TIMEOUT) {
+ current_timeo -= vm_wait;
+ if (current_timeo < 0)
+ current_timeo = 0;
+ }
+ vm_wait = 0;
+ }
+ *timeo_p = current_timeo;
+ }
+do_rm_wq:
+ remove_wait_queue(sk_sleep(sk), &wait);
+ return err;
+do_error:
+ err = -EPIPE;
+ goto do_rm_wq;
+do_nonblock:
+ err = -EAGAIN;
+ goto do_rm_wq;
+do_interrupted:
+ err = sock_intr_errno(*timeo_p);
+ goto do_rm_wq;
+}
+
+static int chtls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
+ unsigned char *record_type)
+{
+ struct cmsghdr *cmsg;
+ int rc = -EINVAL;
+
+ for_each_cmsghdr(cmsg, msg) {
+ if (!CMSG_OK(msg, cmsg))
+ return -EINVAL;
+ if (cmsg->cmsg_level != SOL_TLS)
+ continue;
+
+ switch (cmsg->cmsg_type) {
+ case TLS_SET_RECORD_TYPE:
+ if (cmsg->cmsg_len < CMSG_LEN(sizeof(*record_type)))
+ return -EINVAL;
+
+ if (msg->msg_flags & MSG_MORE)
+ return -EINVAL;
+
+ *record_type = *(unsigned char *)CMSG_DATA(cmsg);
+ rc = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return rc;
+}
+
+int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
+{
+ struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+ struct chtls_dev *cdev = csk->cdev;
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct sk_buff *skb;
+ int mss, flags, err;
+ int recordsz = 0;
+ int copied = 0;
+ long timeo;
+
+ lock_sock(sk);
+ flags = msg->msg_flags;
+ timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
+
+ if (!sk_in_state(sk, TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
+ err = sk_stream_wait_connect(sk, &timeo);
+ if (err)
+ goto out_err;
+ }
+
+ sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
+ err = -EPIPE;
+ if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
+ goto out_err;
+
+ mss = csk->mss;
+ csk_set_flag(csk, CSK_TX_MORE_DATA);
+
+ while (msg_data_left(msg)) {
+ int copy = 0;
+
+ skb = skb_peek_tail(&csk->txq);
+ if (skb) {
+ copy = mss - skb->len;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+ if (!csk_mem_free(cdev, sk))
+ goto wait_for_sndbuf;
+
+ if (is_tls_tx(csk) && !csk->tlshws.txleft) {
+ unsigned char record_type = TLS_RECORD_TYPE_DATA;
+
+ if (unlikely(msg->msg_controllen)) {
+ err = chtls_proccess_cmsg(sk, msg,
+ &record_type);
+ if (err)
+ goto out_err;
+
+ /* Avoid appending tls handshake, alert to tls data */
+ if (skb)
+ tx_skb_finalize(skb);
+ }
+
+ recordsz = size;
+ csk->tlshws.txleft = recordsz;
+ csk->tlshws.type = record_type;
+ }
+
+ if (!skb || (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) ||
+ copy <= 0) {
+new_buf:
+ if (skb) {
+ tx_skb_finalize(skb);
+ push_frames_if_head(sk);
+ }
+
+ if (is_tls_tx(csk)) {
+ skb = get_record_skb(sk,
+ select_size(sk,
+ recordsz,
+ flags,
+ TX_TLSHDR_LEN),
+ false);
+ } else {
+ skb = get_tx_skb(sk,
+ select_size(sk, size, flags,
+ TX_HEADER_LEN));
+ }
+ if (unlikely(!skb))
+ goto wait_for_memory;
+
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ copy = mss;
+ }
+ if (copy > size)
+ copy = size;
+
+ if (skb_tailroom(skb) > 0) {
+ copy = min(copy, skb_tailroom(skb));
+ if (is_tls_tx(csk))
+ copy = min_t(int, copy, csk->tlshws.txleft);
+ err = skb_add_data_nocache(sk, skb,
+ &msg->msg_iter, copy);
+ if (err)
+ goto do_fault;
+ } else {
+ int i = skb_shinfo(skb)->nr_frags;
+ struct page *page = TCP_PAGE(sk);
+ int pg_size = PAGE_SIZE;
+ int off = TCP_OFF(sk);
+ bool merge;
+
+ if (page)
+ pg_size = page_size(page);
+ if (off < pg_size &&
+ skb_can_coalesce(skb, i, page, off)) {
+ merge = true;
+ goto copy;
+ }
+ merge = false;
+ if (i == (is_tls_tx(csk) ? (MAX_SKB_FRAGS - 1) :
+ MAX_SKB_FRAGS))
+ goto new_buf;
+
+ if (page && off == pg_size) {
+ put_page(page);
+ TCP_PAGE(sk) = page = NULL;
+ pg_size = PAGE_SIZE;
+ }
+
+ if (!page) {
+ gfp_t gfp = sk->sk_allocation;
+ int order = cdev->send_page_order;
+
+ if (order) {
+ page = alloc_pages(gfp | __GFP_COMP |
+ __GFP_NOWARN |
+ __GFP_NORETRY,
+ order);
+ if (page)
+ pg_size <<= order;
+ }
+ if (!page) {
+ page = alloc_page(gfp);
+ pg_size = PAGE_SIZE;
+ }
+ if (!page)
+ goto wait_for_memory;
+ off = 0;
+ }
+copy:
+ if (copy > pg_size - off)
+ copy = pg_size - off;
+ if (is_tls_tx(csk))
+ copy = min_t(int, copy, csk->tlshws.txleft);
+
+ err = chtls_skb_copy_to_page_nocache(sk, &msg->msg_iter,
+ skb, page,
+ off, copy);
+ if (unlikely(err)) {
+ if (!TCP_PAGE(sk)) {
+ TCP_PAGE(sk) = page;
+ TCP_OFF(sk) = 0;
+ }
+ goto do_fault;
+ }
+ /* Update the skb. */
+ if (merge) {
+ skb_frag_size_add(
+ &skb_shinfo(skb)->frags[i - 1],
+ copy);
+ } else {
+ skb_fill_page_desc(skb, i, page, off, copy);
+ if (off + copy < pg_size) {
+ /* space left keep page */
+ get_page(page);
+ TCP_PAGE(sk) = page;
+ } else {
+ TCP_PAGE(sk) = NULL;
+ }
+ }
+ TCP_OFF(sk) = off + copy;
+ }
+ if (unlikely(skb->len == mss))
+ tx_skb_finalize(skb);
+ tp->write_seq += copy;
+ copied += copy;
+ size -= copy;
+
+ if (is_tls_tx(csk))
+ csk->tlshws.txleft -= copy;
+
+ if (corked(tp, flags) &&
+ (sk_stream_wspace(sk) < sk_stream_min_wspace(sk)))
+ ULP_SKB_CB(skb)->flags |= ULPCB_FLAG_NO_APPEND;
+
+ if (size == 0)
+ goto out;
+
+ if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND)
+ push_frames_if_head(sk);
+ continue;
+wait_for_sndbuf:
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+wait_for_memory:
+ err = csk_wait_memory(cdev, sk, &timeo);
+ if (err)
+ goto do_error;
+ }
+out:
+ csk_reset_flag(csk, CSK_TX_MORE_DATA);
+ if (copied)
+ chtls_tcp_push(sk, flags);
+done:
+ release_sock(sk);
+ return copied;
+do_fault:
+ if (!skb->len) {
+ __skb_unlink(skb, &csk->txq);
+ sk->sk_wmem_queued -= skb->truesize;
+ __kfree_skb(skb);
+ }
+do_error:
+ if (copied)
+ goto out;
+out_err:
+ if (csk_conn_inline(csk))
+ csk_reset_flag(csk, CSK_TX_MORE_DATA);
+ copied = sk_stream_error(sk, flags, err);
+ goto done;
+}
+
+int chtls_sendpage(struct sock *sk, struct page *page,
+ int offset, size_t size, int flags)
+{
+ struct chtls_sock *csk;
+ struct chtls_dev *cdev;
+ int mss, err, copied;
+ struct tcp_sock *tp;
+ long timeo;
+
+ tp = tcp_sk(sk);
+ copied = 0;
+ csk = rcu_dereference_sk_user_data(sk);
+ cdev = csk->cdev;
+ timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
+
+ err = sk_stream_wait_connect(sk, &timeo);
+ if (!sk_in_state(sk, TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
+ err != 0)
+ goto out_err;
+
+ mss = csk->mss;
+ csk_set_flag(csk, CSK_TX_MORE_DATA);
+
+ while (size > 0) {
+ struct sk_buff *skb = skb_peek_tail(&csk->txq);
+ int copy, i;
+
+ if (!skb || (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) ||
+ (copy = mss - skb->len) <= 0) {
+new_buf:
+ if (!csk_mem_free(cdev, sk))
+ goto wait_for_sndbuf;
+
+ if (is_tls_tx(csk)) {
+ skb = get_record_skb(sk,
+ select_size(sk, size,
+ flags,
+ TX_TLSHDR_LEN),
+ true);
+ } else {
+ skb = get_tx_skb(sk, 0);
+ }
+ if (!skb)
+ goto wait_for_memory;
+ copy = mss;
+ }
+ if (copy > size)
+ copy = size;
+
+ i = skb_shinfo(skb)->nr_frags;
+ if (skb_can_coalesce(skb, i, page, offset)) {
+ skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
+ } else if (i < MAX_SKB_FRAGS) {
+ get_page(page);
+ skb_fill_page_desc(skb, i, page, offset, copy);
+ } else {
+ tx_skb_finalize(skb);
+ push_frames_if_head(sk);
+ goto new_buf;
+ }
+
+ skb->len += copy;
+ if (skb->len == mss)
+ tx_skb_finalize(skb);
+ skb->data_len += copy;
+ skb->truesize += copy;
+ sk->sk_wmem_queued += copy;
+ tp->write_seq += copy;
+ copied += copy;
+ offset += copy;
+ size -= copy;
+
+ if (corked(tp, flags) &&
+ (sk_stream_wspace(sk) < sk_stream_min_wspace(sk)))
+ ULP_SKB_CB(skb)->flags |= ULPCB_FLAG_NO_APPEND;
+
+ if (!size)
+ break;
+
+ if (unlikely(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND))
+ push_frames_if_head(sk);
+ continue;
+wait_for_sndbuf:
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+wait_for_memory:
+ err = csk_wait_memory(cdev, sk, &timeo);
+ if (err)
+ goto do_error;
+ }
+out:
+ csk_reset_flag(csk, CSK_TX_MORE_DATA);
+ if (copied)
+ chtls_tcp_push(sk, flags);
+done:
+ release_sock(sk);
+ return copied;
+
+do_error:
+ if (copied)
+ goto out;
+
+out_err:
+ if (csk_conn_inline(csk))
+ csk_reset_flag(csk, CSK_TX_MORE_DATA);
+ copied = sk_stream_error(sk, flags, err);
+ goto done;
+}
+
+static void chtls_select_window(struct sock *sk)
+{
+ struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+ unsigned int wnd = tp->rcv_wnd;
+
+ wnd = max_t(unsigned int, wnd, tcp_full_space(sk));
+ wnd = max_t(unsigned int, MIN_RCV_WND, wnd);
+
+ if (wnd > MAX_RCV_WND)
+ wnd = MAX_RCV_WND;
+
+/*
+ * Check if we need to grow the receive window in response to an increase in
+ * the socket's receive buffer size. Some applications increase the buffer
+ * size dynamically and rely on the window to grow accordingly.
+ */
+
+ if (wnd > tp->rcv_wnd) {
+ tp->rcv_wup -= wnd - tp->rcv_wnd;
+ tp->rcv_wnd = wnd;
+ /* Mark the receive window as updated */
+ csk_reset_flag(csk, CSK_UPDATE_RCV_WND);
+ }
+}
+
+/*
+ * Send RX credits through an RX_DATA_ACK CPL message. We are permitted
+ * to return without sending the message in case we cannot allocate
+ * an sk_buff. Returns the number of credits sent.
+ */
+static u32 send_rx_credits(struct chtls_sock *csk, u32 credits)
+{
+ struct cpl_rx_data_ack *req;
+ struct sk_buff *skb;
+
+ skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
+ if (!skb)
+ return 0;
+ __skb_put(skb, sizeof(*req));
+ req = (struct cpl_rx_data_ack *)skb->head;
+
+ set_wr_txq(skb, CPL_PRIORITY_ACK, csk->port_id);
+ INIT_TP_WR(req, csk->tid);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
+ csk->tid));
+ req->credit_dack = cpu_to_be32(RX_CREDITS_V(credits) |
+ RX_FORCE_ACK_F);
+ cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
+ return credits;
+}
+
+#define CREDIT_RETURN_STATE (TCPF_ESTABLISHED | \
+ TCPF_FIN_WAIT1 | \
+ TCPF_FIN_WAIT2)
+
+/*
+ * Called after some received data has been read. It returns RX credits
+ * to the HW for the amount of data processed.
+ */
+static void chtls_cleanup_rbuf(struct sock *sk, int copied)
+{
+ struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+ struct tcp_sock *tp;
+ int must_send;
+ u32 credits;
+ u32 thres;
+
+ thres = 15 * 1024;
+
+ if (!sk_in_state(sk, CREDIT_RETURN_STATE))
+ return;
+
+ chtls_select_window(sk);
+ tp = tcp_sk(sk);
+ credits = tp->copied_seq - tp->rcv_wup;
+ if (unlikely(!credits))
+ return;
+
+/*
+ * For coalescing to work effectively ensure the receive window has
+ * at least 16KB left.
+ */
+ must_send = credits + 16384 >= tp->rcv_wnd;
+
+ if (must_send || credits >= thres)
+ tp->rcv_wup += send_rx_credits(csk, credits);
+}
+
+static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ int nonblock, int flags, int *addr_len)
+{
+ struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+ struct chtls_hws *hws = &csk->tlshws;
+ struct net_device *dev = csk->egress_dev;
+ struct adapter *adap = netdev2adap(dev);
+ struct tcp_sock *tp = tcp_sk(sk);
+ unsigned long avail;
+ int buffers_freed;
+ int copied = 0;
+ int target;
+ long timeo;
+
+ buffers_freed = 0;
+
+ timeo = sock_rcvtimeo(sk, nonblock);
+ target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
+
+ if (unlikely(csk_flag(sk, CSK_UPDATE_RCV_WND)))
+ chtls_cleanup_rbuf(sk, copied);
+
+ do {
+ struct sk_buff *skb;
+ u32 offset = 0;
+
+ if (unlikely(tp->urg_data &&
+ tp->urg_seq == tp->copied_seq)) {
+ if (copied)
+ break;
+ if (signal_pending(current)) {
+ copied = timeo ? sock_intr_errno(timeo) :
+ -EAGAIN;
+ break;
+ }
+ }
+ skb = skb_peek(&sk->sk_receive_queue);
+ if (skb)
+ goto found_ok_skb;
+ if (csk->wr_credits &&
+ skb_queue_len(&csk->txq) &&
+ chtls_push_frames(csk, csk->wr_credits ==
+ csk->wr_max_credits))
+ sk->sk_write_space(sk);
+
+ if (copied >= target && !READ_ONCE(sk->sk_backlog.tail))
+ break;
+
+ if (copied) {
+ if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
+ (sk->sk_shutdown & RCV_SHUTDOWN) ||
+ signal_pending(current))
+ break;
+
+ if (!timeo)
+ break;
+ } else {
+ if (sock_flag(sk, SOCK_DONE))
+ break;
+ if (sk->sk_err) {
+ copied = sock_error(sk);
+ break;
+ }
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
+ break;
+ if (sk->sk_state == TCP_CLOSE) {
+ copied = -ENOTCONN;
+ break;
+ }
+ if (!timeo) {
+ copied = -EAGAIN;
+ break;
+ }
+ if (signal_pending(current)) {
+ copied = sock_intr_errno(timeo);
+ break;
+ }
+ }
+ if (READ_ONCE(sk->sk_backlog.tail)) {
+ release_sock(sk);
+ lock_sock(sk);
+ chtls_cleanup_rbuf(sk, copied);
+ continue;
+ }
+
+ if (copied >= target)
+ break;
+ chtls_cleanup_rbuf(sk, copied);
+ sk_wait_data(sk, &timeo, NULL);
+ continue;
+found_ok_skb:
+ if (!skb->len) {
+ skb_dst_set(skb, NULL);
+ __skb_unlink(skb, &sk->sk_receive_queue);
+ kfree_skb(skb);
+
+ if (!copied && !timeo) {
+ copied = -EAGAIN;
+ break;
+ }
+
+ if (copied < target) {
+ release_sock(sk);
+ lock_sock(sk);
+ continue;
+ }
+ break;
+ }
+ offset = hws->copied_seq;
+ avail = skb->len - offset;
+ if (len < avail)
+ avail = len;
+
+ if (unlikely(tp->urg_data)) {
+ u32 urg_offset = tp->urg_seq - tp->copied_seq;
+
+ if (urg_offset < avail) {
+ if (urg_offset) {
+ avail = urg_offset;
+ } else if (!sock_flag(sk, SOCK_URGINLINE)) {
+ /* First byte is urgent, skip */
+ tp->copied_seq++;
+ offset++;
+ avail--;
+ if (!avail)
+ goto skip_copy;
+ }
+ }
+ }
+ /* Set record type if not already done. For a non-data record,
+ * do not proceed if record type could not be copied.
+ */
+ if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_TLS_HDR) {
+ struct tls_hdr *thdr = (struct tls_hdr *)skb->data;
+ int cerr = 0;
+
+ cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
+ sizeof(thdr->type), &thdr->type);
+
+ if (cerr && thdr->type != TLS_RECORD_TYPE_DATA)
+ return -EIO;
+ /* don't send tls header, skip copy */
+ goto skip_copy;
+ }
+
+ if (skb_copy_datagram_msg(skb, offset, msg, avail)) {
+ if (!copied) {
+ copied = -EFAULT;
+ break;
+ }
+ }
+
+ copied += avail;
+ len -= avail;
+ hws->copied_seq += avail;
+skip_copy:
+ if (tp->urg_data && after(tp->copied_seq, tp->urg_seq))
+ tp->urg_data = 0;
+
+ if ((avail + offset) >= skb->len) {
+ if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_TLS_HDR) {
+ tp->copied_seq += skb->len;
+ hws->rcvpld = skb->hdr_len;
+ } else {
+ atomic_inc(&adap->chcr_stats.tls_pdu_rx);
+ tp->copied_seq += hws->rcvpld;
+ }
+ chtls_free_skb(sk, skb);
+ buffers_freed++;
+ hws->copied_seq = 0;
+ if (copied >= target &&
+ !skb_peek(&sk->sk_receive_queue))
+ break;
+ }
+ } while (len > 0);
+
+ if (buffers_freed)
+ chtls_cleanup_rbuf(sk, copied);
+ release_sock(sk);
+ return copied;
+}
+
+/*
+ * Peek at data in a socket's receive buffer.
+ */
+static int peekmsg(struct sock *sk, struct msghdr *msg,
+ size_t len, int nonblock, int flags)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ u32 peek_seq, offset;
+ struct sk_buff *skb;
+ int copied = 0;
+ size_t avail; /* amount of available data in current skb */
+ long timeo;
+
+ lock_sock(sk);
+ timeo = sock_rcvtimeo(sk, nonblock);
+ peek_seq = tp->copied_seq;
+
+ do {
+ if (unlikely(tp->urg_data && tp->urg_seq == peek_seq)) {
+ if (copied)
+ break;
+ if (signal_pending(current)) {
+ copied = timeo ? sock_intr_errno(timeo) :
+ -EAGAIN;
+ break;
+ }
+ }
+
+ skb_queue_walk(&sk->sk_receive_queue, skb) {
+ offset = peek_seq - ULP_SKB_CB(skb)->seq;
+ if (offset < skb->len)
+ goto found_ok_skb;
+ }
+
+ /* empty receive queue */
+ if (copied)
+ break;
+ if (sock_flag(sk, SOCK_DONE))
+ break;
+ if (sk->sk_err) {
+ copied = sock_error(sk);
+ break;
+ }
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
+ break;
+ if (sk->sk_state == TCP_CLOSE) {
+ copied = -ENOTCONN;
+ break;
+ }
+ if (!timeo) {
+ copied = -EAGAIN;
+ break;
+ }
+ if (signal_pending(current)) {
+ copied = sock_intr_errno(timeo);
+ break;
+ }
+
+ if (READ_ONCE(sk->sk_backlog.tail)) {
+ /* Do not sleep, just process backlog. */
+ release_sock(sk);
+ lock_sock(sk);
+ } else {
+ sk_wait_data(sk, &timeo, NULL);
+ }
+
+ if (unlikely(peek_seq != tp->copied_seq)) {
+ if (net_ratelimit())
+ pr_info("TCP(%s:%d), race in MSG_PEEK.\n",
+ current->comm, current->pid);
+ peek_seq = tp->copied_seq;
+ }
+ continue;
+
+found_ok_skb:
+ avail = skb->len - offset;
+ if (len < avail)
+ avail = len;
+ /*
+ * Do we have urgent data here? We need to skip over the
+ * urgent byte.
+ */
+ if (unlikely(tp->urg_data)) {
+ u32 urg_offset = tp->urg_seq - peek_seq;
+
+ if (urg_offset < avail) {
+ /*
+ * The amount of data we are preparing to copy
+ * contains urgent data.
+ */
+ if (!urg_offset) { /* First byte is urgent */
+ if (!sock_flag(sk, SOCK_URGINLINE)) {
+ peek_seq++;
+ offset++;
+ avail--;
+ }
+ if (!avail)
+ continue;
+ } else {
+ /* stop short of the urgent data */
+ avail = urg_offset;
+ }
+ }
+ }
+
+ /*
+ * If MSG_TRUNC is specified the data is discarded.
+ */
+ if (likely(!(flags & MSG_TRUNC)))
+ if (skb_copy_datagram_msg(skb, offset, msg, len)) {
+ if (!copied) {
+ copied = -EFAULT;
+ break;
+ }
+ }
+ peek_seq += avail;
+ copied += avail;
+ len -= avail;
+ } while (len > 0);
+
+ release_sock(sk);
+ return copied;
+}
+
+int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ int nonblock, int flags, int *addr_len)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct chtls_sock *csk;
+ unsigned long avail; /* amount of available data in current skb */
+ int buffers_freed;
+ int copied = 0;
+ long timeo;
+ int target; /* Read at least this many bytes */
+
+ buffers_freed = 0;
+
+ if (unlikely(flags & MSG_OOB))
+ return tcp_prot.recvmsg(sk, msg, len, nonblock, flags,
+ addr_len);
+
+ if (unlikely(flags & MSG_PEEK))
+ return peekmsg(sk, msg, len, nonblock, flags);
+
+ if (sk_can_busy_loop(sk) &&
+ skb_queue_empty_lockless(&sk->sk_receive_queue) &&
+ sk->sk_state == TCP_ESTABLISHED)
+ sk_busy_loop(sk, nonblock);
+
+ lock_sock(sk);
+ csk = rcu_dereference_sk_user_data(sk);
+
+ if (is_tls_rx(csk))
+ return chtls_pt_recvmsg(sk, msg, len, nonblock,
+ flags, addr_len);
+
+ timeo = sock_rcvtimeo(sk, nonblock);
+ target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
+
+ if (unlikely(csk_flag(sk, CSK_UPDATE_RCV_WND)))
+ chtls_cleanup_rbuf(sk, copied);
+
+ do {
+ struct sk_buff *skb;
+ u32 offset;
+
+ if (unlikely(tp->urg_data && tp->urg_seq == tp->copied_seq)) {
+ if (copied)
+ break;
+ if (signal_pending(current)) {
+ copied = timeo ? sock_intr_errno(timeo) :
+ -EAGAIN;
+ break;
+ }
+ }
+
+ skb = skb_peek(&sk->sk_receive_queue);
+ if (skb)
+ goto found_ok_skb;
+
+ if (csk->wr_credits &&
+ skb_queue_len(&csk->txq) &&
+ chtls_push_frames(csk, csk->wr_credits ==
+ csk->wr_max_credits))
+ sk->sk_write_space(sk);
+
+ if (copied >= target && !READ_ONCE(sk->sk_backlog.tail))
+ break;
+
+ if (copied) {
+ if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
+ (sk->sk_shutdown & RCV_SHUTDOWN) ||
+ signal_pending(current))
+ break;
+ } else {
+ if (sock_flag(sk, SOCK_DONE))
+ break;
+ if (sk->sk_err) {
+ copied = sock_error(sk);
+ break;
+ }
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
+ break;
+ if (sk->sk_state == TCP_CLOSE) {
+ copied = -ENOTCONN;
+ break;
+ }
+ if (!timeo) {
+ copied = -EAGAIN;
+ break;
+ }
+ if (signal_pending(current)) {
+ copied = sock_intr_errno(timeo);
+ break;
+ }
+ }
+
+ if (READ_ONCE(sk->sk_backlog.tail)) {
+ release_sock(sk);
+ lock_sock(sk);
+ chtls_cleanup_rbuf(sk, copied);
+ continue;
+ }
+
+ if (copied >= target)
+ break;
+ chtls_cleanup_rbuf(sk, copied);
+ sk_wait_data(sk, &timeo, NULL);
+ continue;
+
+found_ok_skb:
+ if (!skb->len) {
+ chtls_kfree_skb(sk, skb);
+ if (!copied && !timeo) {
+ copied = -EAGAIN;
+ break;
+ }
+
+ if (copied < target)
+ continue;
+
+ break;
+ }
+
+ offset = tp->copied_seq - ULP_SKB_CB(skb)->seq;
+ avail = skb->len - offset;
+ if (len < avail)
+ avail = len;
+
+ if (unlikely(tp->urg_data)) {
+ u32 urg_offset = tp->urg_seq - tp->copied_seq;
+
+ if (urg_offset < avail) {
+ if (urg_offset) {
+ avail = urg_offset;
+ } else if (!sock_flag(sk, SOCK_URGINLINE)) {
+ tp->copied_seq++;
+ offset++;
+ avail--;
+ if (!avail)
+ goto skip_copy;
+ }
+ }
+ }
+
+ if (likely(!(flags & MSG_TRUNC))) {
+ if (skb_copy_datagram_msg(skb, offset,
+ msg, avail)) {
+ if (!copied) {
+ copied = -EFAULT;
+ break;
+ }
+ }
+ }
+
+ tp->copied_seq += avail;
+ copied += avail;
+ len -= avail;
+
+skip_copy:
+ if (tp->urg_data && after(tp->copied_seq, tp->urg_seq))
+ tp->urg_data = 0;
+
+ if (avail + offset >= skb->len) {
+ chtls_free_skb(sk, skb);
+ buffers_freed++;
+
+ if (copied >= target &&
+ !skb_peek(&sk->sk_receive_queue))
+ break;
+ }
+ } while (len > 0);
+
+ if (buffers_freed)
+ chtls_cleanup_rbuf(sk, copied);
+
+ release_sock(sk);
+ return copied;
+}
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c
new file mode 100644
index 000000000000..9098b3eed4da
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c
@@ -0,0 +1,641 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018 Chelsio Communications, Inc.
+ *
+ * Written by: Atul Gupta (atul.gupta@chelsio.com)
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/socket.h>
+#include <linux/hash.h>
+#include <linux/in.h>
+#include <linux/net.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <net/ipv6.h>
+#include <net/transp_v6.h>
+#include <net/tcp.h>
+#include <net/tls.h>
+
+#include "chtls.h"
+#include "chtls_cm.h"
+
+#define DRV_NAME "chtls"
+
+/*
+ * chtls device management
+ * maintains a list of the chtls devices
+ */
+static LIST_HEAD(cdev_list);
+static DEFINE_MUTEX(cdev_mutex);
+
+static DEFINE_MUTEX(notify_mutex);
+static RAW_NOTIFIER_HEAD(listen_notify_list);
+static struct proto chtls_cpl_prot, chtls_cpl_protv6;
+struct request_sock_ops chtls_rsk_ops, chtls_rsk_opsv6;
+static uint send_page_order = (14 - PAGE_SHIFT < 0) ? 0 : 14 - PAGE_SHIFT;
+
+static void register_listen_notifier(struct notifier_block *nb)
+{
+ mutex_lock(&notify_mutex);
+ raw_notifier_chain_register(&listen_notify_list, nb);
+ mutex_unlock(&notify_mutex);
+}
+
+static void unregister_listen_notifier(struct notifier_block *nb)
+{
+ mutex_lock(&notify_mutex);
+ raw_notifier_chain_unregister(&listen_notify_list, nb);
+ mutex_unlock(&notify_mutex);
+}
+
+static int listen_notify_handler(struct notifier_block *this,
+ unsigned long event, void *data)
+{
+ struct chtls_listen *clisten;
+ int ret = NOTIFY_DONE;
+
+ clisten = (struct chtls_listen *)data;
+
+ switch (event) {
+ case CHTLS_LISTEN_START:
+ ret = chtls_listen_start(clisten->cdev, clisten->sk);
+ kfree(clisten);
+ break;
+ case CHTLS_LISTEN_STOP:
+ chtls_listen_stop(clisten->cdev, clisten->sk);
+ kfree(clisten);
+ break;
+ }
+ return ret;
+}
+
+static struct notifier_block listen_notifier = {
+ .notifier_call = listen_notify_handler
+};
+
+static int listen_backlog_rcv(struct sock *sk, struct sk_buff *skb)
+{
+ if (likely(skb_transport_header(skb) != skb_network_header(skb)))
+ return tcp_v4_do_rcv(sk, skb);
+ BLOG_SKB_CB(skb)->backlog_rcv(sk, skb);
+ return 0;
+}
+
+static int chtls_start_listen(struct chtls_dev *cdev, struct sock *sk)
+{
+ struct chtls_listen *clisten;
+
+ if (sk->sk_protocol != IPPROTO_TCP)
+ return -EPROTONOSUPPORT;
+
+ if (sk->sk_family == PF_INET &&
+ LOOPBACK(inet_sk(sk)->inet_rcv_saddr))
+ return -EADDRNOTAVAIL;
+
+ sk->sk_backlog_rcv = listen_backlog_rcv;
+ clisten = kmalloc(sizeof(*clisten), GFP_KERNEL);
+ if (!clisten)
+ return -ENOMEM;
+ clisten->cdev = cdev;
+ clisten->sk = sk;
+ mutex_lock(&notify_mutex);
+ raw_notifier_call_chain(&listen_notify_list,
+ CHTLS_LISTEN_START, clisten);
+ mutex_unlock(&notify_mutex);
+ return 0;
+}
+
+static void chtls_stop_listen(struct chtls_dev *cdev, struct sock *sk)
+{
+ struct chtls_listen *clisten;
+
+ if (sk->sk_protocol != IPPROTO_TCP)
+ return;
+
+ clisten = kmalloc(sizeof(*clisten), GFP_KERNEL);
+ if (!clisten)
+ return;
+ clisten->cdev = cdev;
+ clisten->sk = sk;
+ mutex_lock(&notify_mutex);
+ raw_notifier_call_chain(&listen_notify_list,
+ CHTLS_LISTEN_STOP, clisten);
+ mutex_unlock(&notify_mutex);
+}
+
+static int chtls_inline_feature(struct tls_toe_device *dev)
+{
+ struct net_device *netdev;
+ struct chtls_dev *cdev;
+ int i;
+
+ cdev = to_chtls_dev(dev);
+
+ for (i = 0; i < cdev->lldi->nports; i++) {
+ netdev = cdev->ports[i];
+ if (netdev->features & NETIF_F_HW_TLS_RECORD)
+ return 1;
+ }
+ return 0;
+}
+
+static int chtls_create_hash(struct tls_toe_device *dev, struct sock *sk)
+{
+ struct chtls_dev *cdev = to_chtls_dev(dev);
+
+ if (sk->sk_state == TCP_LISTEN)
+ return chtls_start_listen(cdev, sk);
+ return 0;
+}
+
+static void chtls_destroy_hash(struct tls_toe_device *dev, struct sock *sk)
+{
+ struct chtls_dev *cdev = to_chtls_dev(dev);
+
+ if (sk->sk_state == TCP_LISTEN)
+ chtls_stop_listen(cdev, sk);
+}
+
+static void chtls_free_uld(struct chtls_dev *cdev)
+{
+ int i;
+
+ tls_toe_unregister_device(&cdev->tlsdev);
+ kvfree(cdev->kmap.addr);
+ idr_destroy(&cdev->hwtid_idr);
+ for (i = 0; i < (1 << RSPQ_HASH_BITS); i++)
+ kfree_skb(cdev->rspq_skb_cache[i]);
+ kfree(cdev->lldi);
+ kfree_skb(cdev->askb);
+ kfree(cdev);
+}
+
+static inline void chtls_dev_release(struct kref *kref)
+{
+ struct tls_toe_device *dev;
+ struct chtls_dev *cdev;
+ struct adapter *adap;
+
+ dev = container_of(kref, struct tls_toe_device, kref);
+ cdev = to_chtls_dev(dev);
+
+ /* Reset tls rx/tx stats */
+ adap = pci_get_drvdata(cdev->pdev);
+ atomic_set(&adap->chcr_stats.tls_pdu_tx, 0);
+ atomic_set(&adap->chcr_stats.tls_pdu_rx, 0);
+
+ chtls_free_uld(cdev);
+}
+
+static void chtls_register_dev(struct chtls_dev *cdev)
+{
+ struct tls_toe_device *tlsdev = &cdev->tlsdev;
+
+ strlcpy(tlsdev->name, "chtls", TLS_TOE_DEVICE_NAME_MAX);
+ strlcat(tlsdev->name, cdev->lldi->ports[0]->name,
+ TLS_TOE_DEVICE_NAME_MAX);
+ tlsdev->feature = chtls_inline_feature;
+ tlsdev->hash = chtls_create_hash;
+ tlsdev->unhash = chtls_destroy_hash;
+ tlsdev->release = chtls_dev_release;
+ kref_init(&tlsdev->kref);
+ tls_toe_register_device(tlsdev);
+ cdev->cdev_state = CHTLS_CDEV_STATE_UP;
+}
+
+static void process_deferq(struct work_struct *task_param)
+{
+ struct chtls_dev *cdev = container_of(task_param,
+ struct chtls_dev, deferq_task);
+ struct sk_buff *skb;
+
+ spin_lock_bh(&cdev->deferq.lock);
+ while ((skb = __skb_dequeue(&cdev->deferq)) != NULL) {
+ spin_unlock_bh(&cdev->deferq.lock);
+ DEFERRED_SKB_CB(skb)->handler(cdev, skb);
+ spin_lock_bh(&cdev->deferq.lock);
+ }
+ spin_unlock_bh(&cdev->deferq.lock);
+}
+
+static int chtls_get_skb(struct chtls_dev *cdev)
+{
+ cdev->askb = alloc_skb(sizeof(struct tcphdr), GFP_KERNEL);
+ if (!cdev->askb)
+ return -ENOMEM;
+
+ skb_put(cdev->askb, sizeof(struct tcphdr));
+ skb_reset_transport_header(cdev->askb);
+ memset(cdev->askb->data, 0, cdev->askb->len);
+ return 0;
+}
+
+static void *chtls_uld_add(const struct cxgb4_lld_info *info)
+{
+ struct cxgb4_lld_info *lldi;
+ struct chtls_dev *cdev;
+ int i, j;
+
+ cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
+ if (!cdev)
+ goto out;
+
+ lldi = kzalloc(sizeof(*lldi), GFP_KERNEL);
+ if (!lldi)
+ goto out_lldi;
+
+ if (chtls_get_skb(cdev))
+ goto out_skb;
+
+ *lldi = *info;
+ cdev->lldi = lldi;
+ cdev->pdev = lldi->pdev;
+ cdev->tids = lldi->tids;
+ cdev->ports = lldi->ports;
+ cdev->mtus = lldi->mtus;
+ cdev->tids = lldi->tids;
+ cdev->pfvf = FW_VIID_PFN_G(cxgb4_port_viid(lldi->ports[0]))
+ << FW_VIID_PFN_S;
+
+ for (i = 0; i < (1 << RSPQ_HASH_BITS); i++) {
+ unsigned int size = 64 - sizeof(struct rsp_ctrl) - 8;
+
+ cdev->rspq_skb_cache[i] = __alloc_skb(size,
+ gfp_any(), 0,
+ lldi->nodeid);
+ if (unlikely(!cdev->rspq_skb_cache[i]))
+ goto out_rspq_skb;
+ }
+
+ idr_init(&cdev->hwtid_idr);
+ INIT_WORK(&cdev->deferq_task, process_deferq);
+ spin_lock_init(&cdev->listen_lock);
+ spin_lock_init(&cdev->idr_lock);
+ cdev->send_page_order = min_t(uint, get_order(32768),
+ send_page_order);
+ cdev->max_host_sndbuf = 48 * 1024;
+
+ if (lldi->vr->key.size)
+ if (chtls_init_kmap(cdev, lldi))
+ goto out_rspq_skb;
+
+ mutex_lock(&cdev_mutex);
+ list_add_tail(&cdev->list, &cdev_list);
+ mutex_unlock(&cdev_mutex);
+
+ return cdev;
+out_rspq_skb:
+ for (j = 0; j < i; j++)
+ kfree_skb(cdev->rspq_skb_cache[j]);
+ kfree_skb(cdev->askb);
+out_skb:
+ kfree(lldi);
+out_lldi:
+ kfree(cdev);
+out:
+ return NULL;
+}
+
+static void chtls_free_all_uld(void)
+{
+ struct chtls_dev *cdev, *tmp;
+
+ mutex_lock(&cdev_mutex);
+ list_for_each_entry_safe(cdev, tmp, &cdev_list, list) {
+ if (cdev->cdev_state == CHTLS_CDEV_STATE_UP) {
+ list_del(&cdev->list);
+ kref_put(&cdev->tlsdev.kref, cdev->tlsdev.release);
+ }
+ }
+ mutex_unlock(&cdev_mutex);
+}
+
+static int chtls_uld_state_change(void *handle, enum cxgb4_state new_state)
+{
+ struct chtls_dev *cdev = handle;
+
+ switch (new_state) {
+ case CXGB4_STATE_UP:
+ chtls_register_dev(cdev);
+ break;
+ case CXGB4_STATE_DOWN:
+ break;
+ case CXGB4_STATE_START_RECOVERY:
+ break;
+ case CXGB4_STATE_DETACH:
+ mutex_lock(&cdev_mutex);
+ list_del(&cdev->list);
+ mutex_unlock(&cdev_mutex);
+ kref_put(&cdev->tlsdev.kref, cdev->tlsdev.release);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl,
+ const __be64 *rsp,
+ u32 pktshift)
+{
+ struct sk_buff *skb;
+
+ /* Allocate space for cpl_pass_accpet_req which will be synthesized by
+ * driver. Once driver synthesizes cpl_pass_accpet_req the skb will go
+ * through the regular cpl_pass_accept_req processing in TOM.
+ */
+ skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req)
+ - pktshift, GFP_ATOMIC);
+ if (unlikely(!skb))
+ return NULL;
+ __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req)
+ - pktshift);
+ /* For now we will copy cpl_rx_pkt in the skb */
+ skb_copy_to_linear_data(skb, rsp, sizeof(struct cpl_rx_pkt));
+ skb_copy_to_linear_data_offset(skb, sizeof(struct cpl_pass_accept_req)
+ , gl->va + pktshift,
+ gl->tot_len - pktshift);
+
+ return skb;
+}
+
+static int chtls_recv_packet(struct chtls_dev *cdev,
+ const struct pkt_gl *gl, const __be64 *rsp)
+{
+ unsigned int opcode = *(u8 *)rsp;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = copy_gl_to_skb_pkt(gl, rsp, cdev->lldi->sge_pktshift);
+ if (!skb)
+ return -ENOMEM;
+
+ ret = chtls_handlers[opcode](cdev, skb);
+ if (ret & CPL_RET_BUF_DONE)
+ kfree_skb(skb);
+
+ return 0;
+}
+
+static int chtls_recv_rsp(struct chtls_dev *cdev, const __be64 *rsp)
+{
+ unsigned long rspq_bin;
+ unsigned int opcode;
+ struct sk_buff *skb;
+ unsigned int len;
+ int ret;
+
+ len = 64 - sizeof(struct rsp_ctrl) - 8;
+ opcode = *(u8 *)rsp;
+
+ rspq_bin = hash_ptr((void *)rsp, RSPQ_HASH_BITS);
+ skb = cdev->rspq_skb_cache[rspq_bin];
+ if (skb && !skb_is_nonlinear(skb) &&
+ !skb_shared(skb) && !skb_cloned(skb)) {
+ refcount_inc(&skb->users);
+ if (refcount_read(&skb->users) == 2) {
+ __skb_trim(skb, 0);
+ if (skb_tailroom(skb) >= len)
+ goto copy_out;
+ }
+ refcount_dec(&skb->users);
+ }
+ skb = alloc_skb(len, GFP_ATOMIC);
+ if (unlikely(!skb))
+ return -ENOMEM;
+
+copy_out:
+ __skb_put(skb, len);
+ skb_copy_to_linear_data(skb, rsp, len);
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+ ret = chtls_handlers[opcode](cdev, skb);
+
+ if (ret & CPL_RET_BUF_DONE)
+ kfree_skb(skb);
+ return 0;
+}
+
+static void chtls_recv(struct chtls_dev *cdev,
+ struct sk_buff **skbs, const __be64 *rsp)
+{
+ struct sk_buff *skb = *skbs;
+ unsigned int opcode;
+ int ret;
+
+ opcode = *(u8 *)rsp;
+
+ __skb_push(skb, sizeof(struct rss_header));
+ skb_copy_to_linear_data(skb, rsp, sizeof(struct rss_header));
+
+ ret = chtls_handlers[opcode](cdev, skb);
+ if (ret & CPL_RET_BUF_DONE)
+ kfree_skb(skb);
+}
+
+static int chtls_uld_rx_handler(void *handle, const __be64 *rsp,
+ const struct pkt_gl *gl)
+{
+ struct chtls_dev *cdev = handle;
+ unsigned int opcode;
+ struct sk_buff *skb;
+
+ opcode = *(u8 *)rsp;
+
+ if (unlikely(opcode == CPL_RX_PKT)) {
+ if (chtls_recv_packet(cdev, gl, rsp) < 0)
+ goto nomem;
+ return 0;
+ }
+
+ if (!gl)
+ return chtls_recv_rsp(cdev, rsp);
+
+#define RX_PULL_LEN 128
+ skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
+ if (unlikely(!skb))
+ goto nomem;
+ chtls_recv(cdev, &skb, rsp);
+ return 0;
+
+nomem:
+ return -ENOMEM;
+}
+
+static int do_chtls_getsockopt(struct sock *sk, char __user *optval,
+ int __user *optlen)
+{
+ struct tls_crypto_info crypto_info = { 0 };
+
+ crypto_info.version = TLS_1_2_VERSION;
+ if (copy_to_user(optval, &crypto_info, sizeof(struct tls_crypto_info)))
+ return -EFAULT;
+ return 0;
+}
+
+static int chtls_getsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, int __user *optlen)
+{
+ struct tls_context *ctx = tls_get_ctx(sk);
+
+ if (level != SOL_TLS)
+ return ctx->sk_proto->getsockopt(sk, level,
+ optname, optval, optlen);
+
+ return do_chtls_getsockopt(sk, optval, optlen);
+}
+
+static int do_chtls_setsockopt(struct sock *sk, int optname,
+ sockptr_t optval, unsigned int optlen)
+{
+ struct tls_crypto_info *crypto_info, tmp_crypto_info;
+ struct chtls_sock *csk;
+ int keylen;
+ int cipher_type;
+ int rc = 0;
+
+ csk = rcu_dereference_sk_user_data(sk);
+
+ if (sockptr_is_null(optval) || optlen < sizeof(*crypto_info)) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ rc = copy_from_sockptr(&tmp_crypto_info, optval, sizeof(*crypto_info));
+ if (rc) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ /* check version */
+ if (tmp_crypto_info.version != TLS_1_2_VERSION) {
+ rc = -ENOTSUPP;
+ goto out;
+ }
+
+ crypto_info = (struct tls_crypto_info *)&csk->tlshws.crypto_info;
+
+ /* GCM mode of AES supports 128 and 256 bit encryption, so
+ * copy keys from user based on GCM cipher type.
+ */
+ switch (tmp_crypto_info.cipher_type) {
+ case TLS_CIPHER_AES_GCM_128: {
+ /* Obtain version and type from previous copy */
+ crypto_info[0] = tmp_crypto_info;
+ /* Now copy the following data */
+ rc = copy_from_sockptr_offset((char *)crypto_info +
+ sizeof(*crypto_info),
+ optval, sizeof(*crypto_info),
+ sizeof(struct tls12_crypto_info_aes_gcm_128)
+ - sizeof(*crypto_info));
+
+ if (rc) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ keylen = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
+ cipher_type = TLS_CIPHER_AES_GCM_128;
+ break;
+ }
+ case TLS_CIPHER_AES_GCM_256: {
+ crypto_info[0] = tmp_crypto_info;
+ rc = copy_from_sockptr_offset((char *)crypto_info +
+ sizeof(*crypto_info),
+ optval, sizeof(*crypto_info),
+ sizeof(struct tls12_crypto_info_aes_gcm_256)
+ - sizeof(*crypto_info));
+
+ if (rc) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ keylen = TLS_CIPHER_AES_GCM_256_KEY_SIZE;
+ cipher_type = TLS_CIPHER_AES_GCM_256;
+ break;
+ }
+ default:
+ rc = -EINVAL;
+ goto out;
+ }
+ rc = chtls_setkey(csk, keylen, optname, cipher_type);
+out:
+ return rc;
+}
+
+static int chtls_setsockopt(struct sock *sk, int level, int optname,
+ sockptr_t optval, unsigned int optlen)
+{
+ struct tls_context *ctx = tls_get_ctx(sk);
+
+ if (level != SOL_TLS)
+ return ctx->sk_proto->setsockopt(sk, level,
+ optname, optval, optlen);
+
+ return do_chtls_setsockopt(sk, optname, optval, optlen);
+}
+
+static struct cxgb4_uld_info chtls_uld_info = {
+ .name = DRV_NAME,
+ .nrxq = MAX_ULD_QSETS,
+ .ntxq = MAX_ULD_QSETS,
+ .rxq_size = 1024,
+ .add = chtls_uld_add,
+ .state_change = chtls_uld_state_change,
+ .rx_handler = chtls_uld_rx_handler,
+};
+
+void chtls_install_cpl_ops(struct sock *sk)
+{
+ if (sk->sk_family == AF_INET)
+ sk->sk_prot = &chtls_cpl_prot;
+ else
+ sk->sk_prot = &chtls_cpl_protv6;
+}
+
+static void __init chtls_init_ulp_ops(void)
+{
+ chtls_cpl_prot = tcp_prot;
+ chtls_init_rsk_ops(&chtls_cpl_prot, &chtls_rsk_ops,
+ &tcp_prot, PF_INET);
+ chtls_cpl_prot.close = chtls_close;
+ chtls_cpl_prot.disconnect = chtls_disconnect;
+ chtls_cpl_prot.destroy = chtls_destroy_sock;
+ chtls_cpl_prot.shutdown = chtls_shutdown;
+ chtls_cpl_prot.sendmsg = chtls_sendmsg;
+ chtls_cpl_prot.sendpage = chtls_sendpage;
+ chtls_cpl_prot.recvmsg = chtls_recvmsg;
+ chtls_cpl_prot.setsockopt = chtls_setsockopt;
+ chtls_cpl_prot.getsockopt = chtls_getsockopt;
+#if IS_ENABLED(CONFIG_IPV6)
+ chtls_cpl_protv6 = chtls_cpl_prot;
+ chtls_init_rsk_ops(&chtls_cpl_protv6, &chtls_rsk_opsv6,
+ &tcpv6_prot, PF_INET6);
+#endif
+}
+
+static int __init chtls_register(void)
+{
+ chtls_init_ulp_ops();
+ register_listen_notifier(&listen_notifier);
+ cxgb4_register_uld(CXGB4_ULD_TLS, &chtls_uld_info);
+ return 0;
+}
+
+static void __exit chtls_unregister(void)
+{
+ unregister_listen_notifier(&listen_notifier);
+ chtls_free_all_uld();
+ cxgb4_unregister_uld(CXGB4_ULD_TLS);
+}
+
+module_init(chtls_register);
+module_exit(chtls_unregister);
+
+MODULE_DESCRIPTION("Chelsio TLS Inline driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Chelsio Communications");
+MODULE_VERSION(CHTLS_DRV_VERSION);
diff --git a/drivers/net/ethernet/cirrus/cs89x0.h b/drivers/net/ethernet/cirrus/cs89x0.h
index 91423b70bb45..210f9ec9af4b 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.h
+++ b/drivers/net/ethernet/cirrus/cs89x0.h
@@ -459,7 +459,3 @@
#define PNP_CNF_INT 0x70
#define PNP_CNF_DMA 0x74
#define PNP_CNF_MEM 0x48
-
-#define BIT0 1
-#define BIT15 0x8000
-
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index be6d8a9ada27..e8e563d6e86b 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -7,7 +7,6 @@
*/
-#define DRV_NAME "DL2000/TC902x-based linux driver"
#include "dl2k.h"
#include <linux/dma-mapping.h>
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-dcb.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-dcb.c
index 83dee575c2fa..84de0644168d 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-dcb.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-dcb.c
@@ -17,12 +17,12 @@ static int dpaa2_eth_dcbnl_ieee_getpfc(struct net_device *net_dev,
return 0;
}
-static inline bool is_prio_enabled(u8 pfc_en, u8 tc)
+static inline bool dpaa2_eth_is_prio_enabled(u8 pfc_en, u8 tc)
{
return !!(pfc_en & (1 << tc));
}
-static int set_pfc_cn(struct dpaa2_eth_priv *priv, u8 pfc_en)
+static int dpaa2_eth_set_pfc_cn(struct dpaa2_eth_priv *priv, u8 pfc_en)
{
struct dpni_congestion_notification_cfg cfg = {0};
int i, err;
@@ -33,7 +33,7 @@ static int set_pfc_cn(struct dpaa2_eth_priv *priv, u8 pfc_en)
cfg.message_ctx = 0ULL;
for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
- if (is_prio_enabled(pfc_en, i)) {
+ if (dpaa2_eth_is_prio_enabled(pfc_en, i)) {
cfg.threshold_entry = DPAA2_ETH_CN_THRESH_ENTRY(priv);
cfg.threshold_exit = DPAA2_ETH_CN_THRESH_EXIT(priv);
} else {
@@ -93,7 +93,7 @@ static int dpaa2_eth_dcbnl_ieee_setpfc(struct net_device *net_dev,
}
/* Configure congestion notifications for the enabled priorities */
- err = set_pfc_cn(priv, pfc->pfc_en);
+ err = dpaa2_eth_set_pfc_cn(priv, pfc->pfc_en);
if (err)
return err;
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index cf5383bb8331..ceaf76158e23 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -40,9 +40,9 @@ static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
return phys_to_virt(phys_addr);
}
-static void validate_rx_csum(struct dpaa2_eth_priv *priv,
- u32 fd_status,
- struct sk_buff *skb)
+static void dpaa2_eth_validate_rx_csum(struct dpaa2_eth_priv *priv,
+ u32 fd_status,
+ struct sk_buff *skb)
{
skb_checksum_none_assert(skb);
@@ -62,9 +62,9 @@ static void validate_rx_csum(struct dpaa2_eth_priv *priv,
/* Free a received FD.
* Not to be used for Tx conf FDs or on any other paths.
*/
-static void free_rx_fd(struct dpaa2_eth_priv *priv,
- const struct dpaa2_fd *fd,
- void *vaddr)
+static void dpaa2_eth_free_rx_fd(struct dpaa2_eth_priv *priv,
+ const struct dpaa2_fd *fd,
+ void *vaddr)
{
struct device *dev = priv->net_dev->dev.parent;
dma_addr_t addr = dpaa2_fd_get_addr(fd);
@@ -100,9 +100,9 @@ free_buf:
}
/* Build a linear skb based on a single-buffer frame descriptor */
-static struct sk_buff *build_linear_skb(struct dpaa2_eth_channel *ch,
- const struct dpaa2_fd *fd,
- void *fd_vaddr)
+static struct sk_buff *dpaa2_eth_build_linear_skb(struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd,
+ void *fd_vaddr)
{
struct sk_buff *skb = NULL;
u16 fd_offset = dpaa2_fd_get_offset(fd);
@@ -121,9 +121,9 @@ static struct sk_buff *build_linear_skb(struct dpaa2_eth_channel *ch,
}
/* Build a non linear (fragmented) skb based on a S/G table */
-static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch,
- struct dpaa2_sg_entry *sgt)
+static struct sk_buff *dpaa2_eth_build_frag_skb(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ struct dpaa2_sg_entry *sgt)
{
struct sk_buff *skb = NULL;
struct device *dev = priv->net_dev->dev.parent;
@@ -204,7 +204,8 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
/* Free buffers acquired from the buffer pool or which were meant to
* be released in the pool
*/
-static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
+static void dpaa2_eth_free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array,
+ int count)
{
struct device *dev = priv->net_dev->dev.parent;
void *vaddr;
@@ -218,9 +219,9 @@ static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
}
}
-static void xdp_release_buf(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch,
- dma_addr_t addr)
+static void dpaa2_eth_xdp_release_buf(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ dma_addr_t addr)
{
int retries = 0;
int err;
@@ -238,7 +239,7 @@ static void xdp_release_buf(struct dpaa2_eth_priv *priv,
}
if (err) {
- free_bufs(priv, ch->xdp.drop_bufs, ch->xdp.drop_cnt);
+ dpaa2_eth_free_bufs(priv, ch->xdp.drop_bufs, ch->xdp.drop_cnt);
ch->buf_count -= ch->xdp.drop_cnt;
}
@@ -274,9 +275,9 @@ static int dpaa2_eth_xdp_flush(struct dpaa2_eth_priv *priv,
return total_enqueued;
}
-static void xdp_tx_flush(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch,
- struct dpaa2_eth_fq *fq)
+static void dpaa2_eth_xdp_tx_flush(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ struct dpaa2_eth_fq *fq)
{
struct rtnl_link_stats64 *percpu_stats;
struct dpaa2_fd *fds;
@@ -295,17 +296,17 @@ static void xdp_tx_flush(struct dpaa2_eth_priv *priv,
ch->stats.xdp_tx++;
}
for (i = enqueued; i < fq->xdp_tx_fds.num; i++) {
- xdp_release_buf(priv, ch, dpaa2_fd_get_addr(&fds[i]));
+ dpaa2_eth_xdp_release_buf(priv, ch, dpaa2_fd_get_addr(&fds[i]));
percpu_stats->tx_errors++;
ch->stats.xdp_tx_err++;
}
fq->xdp_tx_fds.num = 0;
}
-static void xdp_enqueue(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch,
- struct dpaa2_fd *fd,
- void *buf_start, u16 queue_id)
+static void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ struct dpaa2_fd *fd,
+ void *buf_start, u16 queue_id)
{
struct dpaa2_faead *faead;
struct dpaa2_fd *dest_fd;
@@ -333,13 +334,13 @@ static void xdp_enqueue(struct dpaa2_eth_priv *priv,
if (fq->xdp_tx_fds.num < DEV_MAP_BULK_SIZE)
return;
- xdp_tx_flush(priv, ch, fq);
+ dpaa2_eth_xdp_tx_flush(priv, ch, fq);
}
-static u32 run_xdp(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch,
- struct dpaa2_eth_fq *rx_fq,
- struct dpaa2_fd *fd, void *vaddr)
+static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ struct dpaa2_eth_fq *rx_fq,
+ struct dpaa2_fd *fd, void *vaddr)
{
dma_addr_t addr = dpaa2_fd_get_addr(fd);
struct bpf_prog *xdp_prog;
@@ -372,7 +373,7 @@ static u32 run_xdp(struct dpaa2_eth_priv *priv,
case XDP_PASS:
break;
case XDP_TX:
- xdp_enqueue(priv, ch, fd, vaddr, rx_fq->flowid);
+ dpaa2_eth_xdp_enqueue(priv, ch, fd, vaddr, rx_fq->flowid);
break;
default:
bpf_warn_invalid_xdp_action(xdp_act);
@@ -381,7 +382,7 @@ static u32 run_xdp(struct dpaa2_eth_priv *priv,
trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
fallthrough;
case XDP_DROP:
- xdp_release_buf(priv, ch, addr);
+ dpaa2_eth_xdp_release_buf(priv, ch, addr);
ch->stats.xdp_drop++;
break;
case XDP_REDIRECT:
@@ -441,7 +442,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
percpu_extras = this_cpu_ptr(priv->percpu_extras);
if (fd_format == dpaa2_fd_single) {
- xdp_act = run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr);
+ xdp_act = dpaa2_eth_run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr);
if (xdp_act != XDP_PASS) {
percpu_stats->rx_packets++;
percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
@@ -450,13 +451,13 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
dma_unmap_page(dev, addr, priv->rx_buf_size,
DMA_BIDIRECTIONAL);
- skb = build_linear_skb(ch, fd, vaddr);
+ skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr);
} else if (fd_format == dpaa2_fd_sg) {
WARN_ON(priv->xdp_prog);
dma_unmap_page(dev, addr, priv->rx_buf_size,
DMA_BIDIRECTIONAL);
- skb = build_frag_skb(priv, ch, buf_data);
+ skb = dpaa2_eth_build_frag_skb(priv, ch, buf_data);
free_pages((unsigned long)vaddr, 0);
percpu_extras->rx_sg_frames++;
percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
@@ -485,7 +486,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
/* Check if we need to validate the L4 csum */
if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
status = le32_to_cpu(fas->status);
- validate_rx_csum(priv, status, skb);
+ dpaa2_eth_validate_rx_csum(priv, status, skb);
}
skb->protocol = eth_type_trans(skb, priv->net_dev);
@@ -499,7 +500,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
return;
err_build_skb:
- free_rx_fd(priv, fd, vaddr);
+ dpaa2_eth_free_rx_fd(priv, fd, vaddr);
err_frame_format:
percpu_stats->rx_dropped++;
}
@@ -510,8 +511,8 @@ err_frame_format:
*
* Observance of NAPI budget is not our concern, leaving that to the caller.
*/
-static int consume_frames(struct dpaa2_eth_channel *ch,
- struct dpaa2_eth_fq **src)
+static int dpaa2_eth_consume_frames(struct dpaa2_eth_channel *ch,
+ struct dpaa2_eth_fq **src)
{
struct dpaa2_eth_priv *priv = ch->priv;
struct dpaa2_eth_fq *fq = NULL;
@@ -560,7 +561,7 @@ static int consume_frames(struct dpaa2_eth_channel *ch,
}
/* Configure the egress frame annotation for timestamp update */
-static void enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start)
+static void dpaa2_eth_enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start)
{
struct dpaa2_faead *faead;
u32 ctrl, frc;
@@ -582,9 +583,9 @@ static void enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start)
}
/* Create a frame descriptor based on a fragmented skb */
-static int build_sg_fd(struct dpaa2_eth_priv *priv,
- struct sk_buff *skb,
- struct dpaa2_fd *fd)
+static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv,
+ struct sk_buff *skb,
+ struct dpaa2_fd *fd)
{
struct device *dev = priv->net_dev->dev.parent;
void *sgt_buf = NULL;
@@ -673,7 +674,7 @@ static int build_sg_fd(struct dpaa2_eth_priv *priv,
dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
- enable_tx_tstamp(fd, sgt_buf);
+ dpaa2_eth_enable_tx_tstamp(fd, sgt_buf);
return 0;
@@ -692,9 +693,9 @@ dma_map_sg_failed:
* enough for the HW requirements, thus instead of realloc-ing the skb we
* create a SG frame descriptor with only one entry.
*/
-static int build_sg_fd_single_buf(struct dpaa2_eth_priv *priv,
- struct sk_buff *skb,
- struct dpaa2_fd *fd)
+static int dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv *priv,
+ struct sk_buff *skb,
+ struct dpaa2_fd *fd)
{
struct device *dev = priv->net_dev->dev.parent;
struct dpaa2_eth_sgt_cache *sgt_cache;
@@ -751,7 +752,7 @@ static int build_sg_fd_single_buf(struct dpaa2_eth_priv *priv,
dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
- enable_tx_tstamp(fd, sgt_buf);
+ dpaa2_eth_enable_tx_tstamp(fd, sgt_buf);
return 0;
@@ -767,9 +768,9 @@ data_map_failed:
}
/* Create a frame descriptor based on a linear skb */
-static int build_single_fd(struct dpaa2_eth_priv *priv,
- struct sk_buff *skb,
- struct dpaa2_fd *fd)
+static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv,
+ struct sk_buff *skb,
+ struct dpaa2_fd *fd)
{
struct device *dev = priv->net_dev->dev.parent;
u8 *buffer_start, *aligned_start;
@@ -807,7 +808,7 @@ static int build_single_fd(struct dpaa2_eth_priv *priv,
dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
- enable_tx_tstamp(fd, buffer_start);
+ dpaa2_eth_enable_tx_tstamp(fd, buffer_start);
return 0;
}
@@ -819,9 +820,9 @@ static int build_single_fd(struct dpaa2_eth_priv *priv,
* This can be called either from dpaa2_eth_tx_conf() or on the error path of
* dpaa2_eth_tx().
*/
-static void free_tx_fd(const struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_fq *fq,
- const struct dpaa2_fd *fd, bool in_napi)
+static void dpaa2_eth_free_tx_fd(const struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_fq *fq,
+ const struct dpaa2_fd *fd, bool in_napi)
{
struct device *dev = priv->net_dev->dev.parent;
dma_addr_t fd_addr, sg_addr;
@@ -954,17 +955,17 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
memset(&fd, 0, sizeof(fd));
if (skb_is_nonlinear(skb)) {
- err = build_sg_fd(priv, skb, &fd);
+ err = dpaa2_eth_build_sg_fd(priv, skb, &fd);
percpu_extras->tx_sg_frames++;
percpu_extras->tx_sg_bytes += skb->len;
} else if (skb_headroom(skb) < needed_headroom) {
- err = build_sg_fd_single_buf(priv, skb, &fd);
+ err = dpaa2_eth_build_sg_fd_single_buf(priv, skb, &fd);
percpu_extras->tx_sg_frames++;
percpu_extras->tx_sg_bytes += skb->len;
percpu_extras->tx_converted_sg_frames++;
percpu_extras->tx_converted_sg_bytes += skb->len;
} else {
- err = build_single_fd(priv, skb, &fd);
+ err = dpaa2_eth_build_single_fd(priv, skb, &fd);
}
if (unlikely(err)) {
@@ -1010,7 +1011,7 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
if (unlikely(err < 0)) {
percpu_stats->tx_errors++;
/* Clean up everything, including freeing the skb */
- free_tx_fd(priv, fq, &fd, false);
+ dpaa2_eth_free_tx_fd(priv, fq, &fd, false);
netdev_tx_completed_queue(nq, 1, fd_len);
} else {
percpu_stats->tx_packets++;
@@ -1045,7 +1046,7 @@ static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
/* Check frame errors in the FD field */
fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
- free_tx_fd(priv, fq, fd, true);
+ dpaa2_eth_free_tx_fd(priv, fq, fd, true);
if (likely(!fd_errors))
return;
@@ -1059,7 +1060,7 @@ static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
percpu_stats->tx_errors++;
}
-static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
+static int dpaa2_eth_set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
{
int err;
@@ -1082,7 +1083,7 @@ static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
return 0;
}
-static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
+static int dpaa2_eth_set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
{
int err;
@@ -1106,8 +1107,8 @@ static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
/* Perform a single release command to add buffers
* to the specified buffer pool
*/
-static int add_bufs(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch, u16 bpid)
+static int dpaa2_eth_add_bufs(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch, u16 bpid)
{
struct device *dev = priv->net_dev->dev.parent;
u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
@@ -1155,7 +1156,7 @@ release_bufs:
* not much else we can do about it
*/
if (err) {
- free_bufs(priv, buf_array, i);
+ dpaa2_eth_free_bufs(priv, buf_array, i);
return 0;
}
@@ -1173,7 +1174,7 @@ err_alloc:
return 0;
}
-static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
+static int dpaa2_eth_seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
{
int i, j;
int new_count;
@@ -1181,7 +1182,7 @@ static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
for (j = 0; j < priv->num_channels; j++) {
for (i = 0; i < DPAA2_ETH_NUM_BUFS;
i += DPAA2_ETH_BUFS_PER_CMD) {
- new_count = add_bufs(priv, priv->channel[j], bpid);
+ new_count = dpaa2_eth_add_bufs(priv, priv->channel[j], bpid);
priv->channel[j]->buf_count += new_count;
if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
@@ -1197,7 +1198,7 @@ static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
* Drain the specified number of buffers from the DPNI's private buffer pool.
* @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
*/
-static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
+static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int count)
{
u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
int retries = 0;
@@ -1213,17 +1214,17 @@ static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
return;
}
- free_bufs(priv, buf_array, ret);
+ dpaa2_eth_free_bufs(priv, buf_array, ret);
retries = 0;
} while (ret);
}
-static void drain_pool(struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_drain_pool(struct dpaa2_eth_priv *priv)
{
int i;
- drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
- drain_bufs(priv, 1);
+ dpaa2_eth_drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
+ dpaa2_eth_drain_bufs(priv, 1);
for (i = 0; i < priv->num_channels; i++)
priv->channel[i]->buf_count = 0;
@@ -1232,9 +1233,9 @@ static void drain_pool(struct dpaa2_eth_priv *priv)
/* Function is called from softirq context only, so we don't need to guard
* the access to percpu count
*/
-static int refill_pool(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch,
- u16 bpid)
+static int dpaa2_eth_refill_pool(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ u16 bpid)
{
int new_count;
@@ -1242,7 +1243,7 @@ static int refill_pool(struct dpaa2_eth_priv *priv,
return 0;
do {
- new_count = add_bufs(priv, ch, bpid);
+ new_count = dpaa2_eth_add_bufs(priv, ch, bpid);
if (unlikely(!new_count)) {
/* Out of memory; abort for now, we'll try later on */
break;
@@ -1272,7 +1273,7 @@ static void dpaa2_eth_sgt_cache_drain(struct dpaa2_eth_priv *priv)
}
}
-static int pull_channel(struct dpaa2_eth_channel *ch)
+static int dpaa2_eth_pull_channel(struct dpaa2_eth_channel *ch)
{
int err;
int dequeues = -1;
@@ -1319,14 +1320,14 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
ch->rx_list = &rx_list;
do {
- err = pull_channel(ch);
+ err = dpaa2_eth_pull_channel(ch);
if (unlikely(err))
break;
/* Refill pool if appropriate */
- refill_pool(priv, ch, priv->bpid);
+ dpaa2_eth_refill_pool(priv, ch, priv->bpid);
- store_cleaned = consume_frames(ch, &fq);
+ store_cleaned = dpaa2_eth_consume_frames(ch, &fq);
if (store_cleaned <= 0)
break;
if (fq->type == DPAA2_RX_FQ) {
@@ -1375,12 +1376,12 @@ out:
if (ch->xdp.res & XDP_REDIRECT)
xdp_do_flush_map();
else if (rx_cleaned && ch->xdp.res & XDP_TX)
- xdp_tx_flush(priv, ch, &priv->fq[flowid]);
+ dpaa2_eth_xdp_tx_flush(priv, ch, &priv->fq[flowid]);
return work_done;
}
-static void enable_ch_napi(struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_enable_ch_napi(struct dpaa2_eth_priv *priv)
{
struct dpaa2_eth_channel *ch;
int i;
@@ -1391,7 +1392,7 @@ static void enable_ch_napi(struct dpaa2_eth_priv *priv)
}
}
-static void disable_ch_napi(struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_disable_ch_napi(struct dpaa2_eth_priv *priv)
{
struct dpaa2_eth_channel *ch;
int i;
@@ -1465,7 +1466,7 @@ set_cgtd:
priv->rx_cgtd_enabled = td.enable;
}
-static int link_state_update(struct dpaa2_eth_priv *priv)
+static int dpaa2_eth_link_state_update(struct dpaa2_eth_priv *priv)
{
struct dpni_link_state state = {0};
bool tx_pause;
@@ -1517,7 +1518,7 @@ static int dpaa2_eth_open(struct net_device *net_dev)
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
int err;
- err = seed_pool(priv, priv->bpid);
+ err = dpaa2_eth_seed_pool(priv, priv->bpid);
if (err) {
/* Not much to do; the buffer pool, though not filled up,
* may still contain some buffers which would enable us
@@ -1541,7 +1542,7 @@ static int dpaa2_eth_open(struct net_device *net_dev)
*/
netif_carrier_off(net_dev);
}
- enable_ch_napi(priv);
+ dpaa2_eth_enable_ch_napi(priv);
err = dpni_enable(priv->mc_io, 0, priv->mc_token);
if (err < 0) {
@@ -1553,7 +1554,7 @@ static int dpaa2_eth_open(struct net_device *net_dev)
/* If the DPMAC object has already processed the link up
* interrupt, we have to learn the link state ourselves.
*/
- err = link_state_update(priv);
+ err = dpaa2_eth_link_state_update(priv);
if (err < 0) {
netdev_err(net_dev, "Can't update link state\n");
goto link_state_err;
@@ -1566,13 +1567,13 @@ static int dpaa2_eth_open(struct net_device *net_dev)
link_state_err:
enable_err:
- disable_ch_napi(priv);
- drain_pool(priv);
+ dpaa2_eth_disable_ch_napi(priv);
+ dpaa2_eth_drain_pool(priv);
return err;
}
/* Total number of in-flight frames on ingress queues */
-static u32 ingress_fq_count(struct dpaa2_eth_priv *priv)
+static u32 dpaa2_eth_ingress_fq_count(struct dpaa2_eth_priv *priv)
{
struct dpaa2_eth_fq *fq;
u32 fcnt = 0, bcnt = 0, total = 0;
@@ -1591,13 +1592,13 @@ static u32 ingress_fq_count(struct dpaa2_eth_priv *priv)
return total;
}
-static void wait_for_ingress_fq_empty(struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_wait_for_ingress_fq_empty(struct dpaa2_eth_priv *priv)
{
int retries = 10;
u32 pending;
do {
- pending = ingress_fq_count(priv);
+ pending = dpaa2_eth_ingress_fq_count(priv);
if (pending)
msleep(100);
} while (pending && --retries);
@@ -1605,7 +1606,7 @@ static void wait_for_ingress_fq_empty(struct dpaa2_eth_priv *priv)
#define DPNI_TX_PENDING_VER_MAJOR 7
#define DPNI_TX_PENDING_VER_MINOR 13
-static void wait_for_egress_fq_empty(struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_wait_for_egress_fq_empty(struct dpaa2_eth_priv *priv)
{
union dpni_statistics stats;
int retries = 10;
@@ -1651,7 +1652,7 @@ static int dpaa2_eth_stop(struct net_device *net_dev)
* on WRIOP. After it finishes, wait until all remaining frames on Rx
* and Tx conf queues are consumed on NAPI poll.
*/
- wait_for_egress_fq_empty(priv);
+ dpaa2_eth_wait_for_egress_fq_empty(priv);
do {
dpni_disable(priv->mc_io, 0, priv->mc_token);
@@ -1667,11 +1668,11 @@ static int dpaa2_eth_stop(struct net_device *net_dev)
*/
}
- wait_for_ingress_fq_empty(priv);
- disable_ch_napi(priv);
+ dpaa2_eth_wait_for_ingress_fq_empty(priv);
+ dpaa2_eth_disable_ch_napi(priv);
/* Empty the buffer pool */
- drain_pool(priv);
+ dpaa2_eth_drain_pool(priv);
/* Empty the Scatter-Gather Buffer cache */
dpaa2_eth_sgt_cache_drain(priv);
@@ -1725,8 +1726,8 @@ static void dpaa2_eth_get_stats(struct net_device *net_dev,
/* Copy mac unicast addresses from @net_dev to @priv.
* Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
*/
-static void add_uc_hw_addr(const struct net_device *net_dev,
- struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_add_uc_hw_addr(const struct net_device *net_dev,
+ struct dpaa2_eth_priv *priv)
{
struct netdev_hw_addr *ha;
int err;
@@ -1744,8 +1745,8 @@ static void add_uc_hw_addr(const struct net_device *net_dev,
/* Copy mac multicast addresses from @net_dev to @priv
* Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
*/
-static void add_mc_hw_addr(const struct net_device *net_dev,
- struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_add_mc_hw_addr(const struct net_device *net_dev,
+ struct dpaa2_eth_priv *priv)
{
struct netdev_hw_addr *ha;
int err;
@@ -1810,7 +1811,7 @@ static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0);
if (err)
netdev_warn(net_dev, "Can't clear uc filters\n");
- add_uc_hw_addr(net_dev, priv);
+ dpaa2_eth_add_uc_hw_addr(net_dev, priv);
/* Finally, clear uc promisc and set mc promisc as requested. */
err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
@@ -1833,8 +1834,8 @@ static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1);
if (err)
netdev_warn(net_dev, "Can't clear mac filters\n");
- add_mc_hw_addr(net_dev, priv);
- add_uc_hw_addr(net_dev, priv);
+ dpaa2_eth_add_mc_hw_addr(net_dev, priv);
+ dpaa2_eth_add_uc_hw_addr(net_dev, priv);
/* Now we can clear both ucast and mcast promisc, without risking
* to drop legitimate frames anymore.
@@ -1868,14 +1869,14 @@ static int dpaa2_eth_set_features(struct net_device *net_dev,
if (changed & NETIF_F_RXCSUM) {
enable = !!(features & NETIF_F_RXCSUM);
- err = set_rx_csum(priv, enable);
+ err = dpaa2_eth_set_rx_csum(priv, enable);
if (err)
return err;
}
if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
- err = set_tx_csum(priv, enable);
+ err = dpaa2_eth_set_tx_csum(priv, enable);
if (err)
return err;
}
@@ -1944,7 +1945,7 @@ static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu)
return true;
}
-static int set_rx_mfl(struct dpaa2_eth_priv *priv, int mtu, bool has_xdp)
+static int dpaa2_eth_set_rx_mfl(struct dpaa2_eth_priv *priv, int mtu, bool has_xdp)
{
int mfl, err;
@@ -1978,7 +1979,7 @@ static int dpaa2_eth_change_mtu(struct net_device *dev, int new_mtu)
if (!xdp_mtu_valid(priv, new_mtu))
return -EINVAL;
- err = set_rx_mfl(priv, new_mtu, true);
+ err = dpaa2_eth_set_rx_mfl(priv, new_mtu, true);
if (err)
return err;
@@ -1987,7 +1988,7 @@ out:
return 0;
}
-static int update_rx_buffer_headroom(struct dpaa2_eth_priv *priv, bool has_xdp)
+static int dpaa2_eth_update_rx_buffer_headroom(struct dpaa2_eth_priv *priv, bool has_xdp)
{
struct dpni_buffer_layout buf_layout = {0};
int err;
@@ -2013,7 +2014,7 @@ static int update_rx_buffer_headroom(struct dpaa2_eth_priv *priv, bool has_xdp)
return 0;
}
-static int setup_xdp(struct net_device *dev, struct bpf_prog *prog)
+static int dpaa2_eth_setup_xdp(struct net_device *dev, struct bpf_prog *prog)
{
struct dpaa2_eth_priv *priv = netdev_priv(dev);
struct dpaa2_eth_channel *ch;
@@ -2039,10 +2040,10 @@ static int setup_xdp(struct net_device *dev, struct bpf_prog *prog)
* so we are sure no old format buffers will be used from now on.
*/
if (need_update) {
- err = set_rx_mfl(priv, dev->mtu, !!prog);
+ err = dpaa2_eth_set_rx_mfl(priv, dev->mtu, !!prog);
if (err)
goto out_err;
- err = update_rx_buffer_headroom(priv, !!prog);
+ err = dpaa2_eth_update_rx_buffer_headroom(priv, !!prog);
if (err)
goto out_err;
}
@@ -2079,7 +2080,7 @@ static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
switch (xdp->command) {
case XDP_SETUP_PROG:
- return setup_xdp(dev, xdp->prog);
+ return dpaa2_eth_setup_xdp(dev, xdp->prog);
default:
return -EINVAL;
}
@@ -2316,7 +2317,7 @@ static const struct net_device_ops dpaa2_eth_ops = {
.ndo_setup_tc = dpaa2_eth_setup_tc,
};
-static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
+static void dpaa2_eth_cdan_cb(struct dpaa2_io_notification_ctx *ctx)
{
struct dpaa2_eth_channel *ch;
@@ -2329,7 +2330,7 @@ static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
}
/* Allocate and configure a DPCON object */
-static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv)
+static struct fsl_mc_device *dpaa2_eth_setup_dpcon(struct dpaa2_eth_priv *priv)
{
struct fsl_mc_device *dpcon;
struct device *dev = priv->net_dev->dev.parent;
@@ -2373,16 +2374,15 @@ free:
return ERR_PTR(err);
}
-static void free_dpcon(struct dpaa2_eth_priv *priv,
- struct fsl_mc_device *dpcon)
+static void dpaa2_eth_free_dpcon(struct dpaa2_eth_priv *priv,
+ struct fsl_mc_device *dpcon)
{
dpcon_disable(priv->mc_io, 0, dpcon->mc_handle);
dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
fsl_mc_object_free(dpcon);
}
-static struct dpaa2_eth_channel *
-alloc_channel(struct dpaa2_eth_priv *priv)
+static struct dpaa2_eth_channel *dpaa2_eth_alloc_channel(struct dpaa2_eth_priv *priv)
{
struct dpaa2_eth_channel *channel;
struct dpcon_attr attr;
@@ -2393,7 +2393,7 @@ alloc_channel(struct dpaa2_eth_priv *priv)
if (!channel)
return NULL;
- channel->dpcon = setup_dpcon(priv);
+ channel->dpcon = dpaa2_eth_setup_dpcon(priv);
if (IS_ERR(channel->dpcon)) {
err = PTR_ERR(channel->dpcon);
goto err_setup;
@@ -2413,23 +2413,23 @@ alloc_channel(struct dpaa2_eth_priv *priv)
return channel;
err_get_attr:
- free_dpcon(priv, channel->dpcon);
+ dpaa2_eth_free_dpcon(priv, channel->dpcon);
err_setup:
kfree(channel);
return ERR_PTR(err);
}
-static void free_channel(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *channel)
+static void dpaa2_eth_free_channel(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *channel)
{
- free_dpcon(priv, channel->dpcon);
+ dpaa2_eth_free_dpcon(priv, channel->dpcon);
kfree(channel);
}
/* DPIO setup: allocate and configure QBMan channels, setup core affinity
* and register data availability notifications
*/
-static int setup_dpio(struct dpaa2_eth_priv *priv)
+static int dpaa2_eth_setup_dpio(struct dpaa2_eth_priv *priv)
{
struct dpaa2_io_notification_ctx *nctx;
struct dpaa2_eth_channel *channel;
@@ -2449,7 +2449,7 @@ static int setup_dpio(struct dpaa2_eth_priv *priv)
cpumask_clear(&priv->dpio_cpumask);
for_each_online_cpu(i) {
/* Try to allocate a channel */
- channel = alloc_channel(priv);
+ channel = dpaa2_eth_alloc_channel(priv);
if (IS_ERR_OR_NULL(channel)) {
err = PTR_ERR_OR_ZERO(channel);
if (err != -EPROBE_DEFER)
@@ -2462,7 +2462,7 @@ static int setup_dpio(struct dpaa2_eth_priv *priv)
nctx = &channel->nctx;
nctx->is_cdan = 1;
- nctx->cb = cdan_cb;
+ nctx->cb = dpaa2_eth_cdan_cb;
nctx->id = channel->ch_id;
nctx->desired_cpu = i;
@@ -2510,14 +2510,14 @@ static int setup_dpio(struct dpaa2_eth_priv *priv)
err_set_cdan:
dpaa2_io_service_deregister(channel->dpio, nctx, dev);
err_service_reg:
- free_channel(priv, channel);
+ dpaa2_eth_free_channel(priv, channel);
err_alloc_ch:
if (err == -EPROBE_DEFER) {
for (i = 0; i < priv->num_channels; i++) {
channel = priv->channel[i];
nctx = &channel->nctx;
dpaa2_io_service_deregister(channel->dpio, nctx, dev);
- free_channel(priv, channel);
+ dpaa2_eth_free_channel(priv, channel);
}
priv->num_channels = 0;
return err;
@@ -2534,7 +2534,7 @@ err_alloc_ch:
return 0;
}
-static void free_dpio(struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_free_dpio(struct dpaa2_eth_priv *priv)
{
struct device *dev = priv->net_dev->dev.parent;
struct dpaa2_eth_channel *ch;
@@ -2544,12 +2544,12 @@ static void free_dpio(struct dpaa2_eth_priv *priv)
for (i = 0; i < priv->num_channels; i++) {
ch = priv->channel[i];
dpaa2_io_service_deregister(ch->dpio, &ch->nctx, dev);
- free_channel(priv, ch);
+ dpaa2_eth_free_channel(priv, ch);
}
}
-static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv,
- int cpu)
+static struct dpaa2_eth_channel *dpaa2_eth_get_affine_channel(struct dpaa2_eth_priv *priv,
+ int cpu)
{
struct device *dev = priv->net_dev->dev.parent;
int i;
@@ -2566,7 +2566,7 @@ static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv,
return priv->channel[0];
}
-static void set_fq_affinity(struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_set_fq_affinity(struct dpaa2_eth_priv *priv)
{
struct device *dev = priv->net_dev->dev.parent;
struct dpaa2_eth_fq *fq;
@@ -2597,13 +2597,13 @@ static void set_fq_affinity(struct dpaa2_eth_priv *priv)
default:
dev_err(dev, "Unknown FQ type: %d\n", fq->type);
}
- fq->channel = get_affine_channel(priv, fq->target_cpu);
+ fq->channel = dpaa2_eth_get_affine_channel(priv, fq->target_cpu);
}
update_xps(priv);
}
-static void setup_fqs(struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_setup_fqs(struct dpaa2_eth_priv *priv)
{
int i, j;
@@ -2627,11 +2627,11 @@ static void setup_fqs(struct dpaa2_eth_priv *priv)
}
/* For each FQ, decide on which core to process incoming frames */
- set_fq_affinity(priv);
+ dpaa2_eth_set_fq_affinity(priv);
}
/* Allocate and configure one buffer pool for each interface */
-static int setup_dpbp(struct dpaa2_eth_priv *priv)
+static int dpaa2_eth_setup_dpbp(struct dpaa2_eth_priv *priv)
{
int err;
struct fsl_mc_device *dpbp_dev;
@@ -2690,15 +2690,15 @@ err_open:
return err;
}
-static void free_dpbp(struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv)
{
- drain_pool(priv);
+ dpaa2_eth_drain_pool(priv);
dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
fsl_mc_object_free(priv->dpbp_dev);
}
-static int set_buffer_layout(struct dpaa2_eth_priv *priv)
+static int dpaa2_eth_set_buffer_layout(struct dpaa2_eth_priv *priv)
{
struct device *dev = priv->net_dev->dev.parent;
struct dpni_buffer_layout buf_layout = {0};
@@ -2815,7 +2815,7 @@ static inline int dpaa2_eth_enqueue_fq_multiple(struct dpaa2_eth_priv *priv,
return 0;
}
-static void set_enqueue_mode(struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_set_enqueue_mode(struct dpaa2_eth_priv *priv)
{
if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
@@ -2824,7 +2824,7 @@ static void set_enqueue_mode(struct dpaa2_eth_priv *priv)
priv->enqueue = dpaa2_eth_enqueue_fq_multiple;
}
-static int set_pause(struct dpaa2_eth_priv *priv)
+static int dpaa2_eth_set_pause(struct dpaa2_eth_priv *priv)
{
struct device *dev = priv->net_dev->dev.parent;
struct dpni_link_cfg link_cfg = {0};
@@ -2851,7 +2851,7 @@ static int set_pause(struct dpaa2_eth_priv *priv)
return 0;
}
-static void update_tx_fqids(struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_update_tx_fqids(struct dpaa2_eth_priv *priv)
{
struct dpni_queue_id qid = {0};
struct dpaa2_eth_fq *fq;
@@ -2893,7 +2893,7 @@ out_err:
}
/* Configure ingress classification based on VLAN PCP */
-static int set_vlan_qos(struct dpaa2_eth_priv *priv)
+static int dpaa2_eth_set_vlan_qos(struct dpaa2_eth_priv *priv)
{
struct device *dev = priv->net_dev->dev.parent;
struct dpkg_profile_cfg kg_cfg = {0};
@@ -3005,7 +3005,7 @@ out_free_tbl:
}
/* Configure the DPNI object this interface is associated with */
-static int setup_dpni(struct fsl_mc_device *ls_dev)
+static int dpaa2_eth_setup_dpni(struct fsl_mc_device *ls_dev)
{
struct device *dev = &ls_dev->dev;
struct dpaa2_eth_priv *priv;
@@ -3053,20 +3053,20 @@ static int setup_dpni(struct fsl_mc_device *ls_dev)
goto close;
}
- err = set_buffer_layout(priv);
+ err = dpaa2_eth_set_buffer_layout(priv);
if (err)
goto close;
- set_enqueue_mode(priv);
+ dpaa2_eth_set_enqueue_mode(priv);
/* Enable pause frame support */
if (dpaa2_eth_has_pause_support(priv)) {
- err = set_pause(priv);
+ err = dpaa2_eth_set_pause(priv);
if (err)
goto close;
}
- err = set_vlan_qos(priv);
+ err = dpaa2_eth_set_vlan_qos(priv);
if (err && err != -EOPNOTSUPP)
goto close;
@@ -3086,7 +3086,7 @@ close:
return err;
}
-static void free_dpni(struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_free_dpni(struct dpaa2_eth_priv *priv)
{
int err;
@@ -3098,8 +3098,8 @@ static void free_dpni(struct dpaa2_eth_priv *priv)
dpni_close(priv->mc_io, 0, priv->mc_token);
}
-static int setup_rx_flow(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_fq *fq)
+static int dpaa2_eth_setup_rx_flow(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_fq *fq)
{
struct device *dev = priv->net_dev->dev.parent;
struct dpni_queue queue;
@@ -3150,8 +3150,8 @@ static int setup_rx_flow(struct dpaa2_eth_priv *priv,
return 0;
}
-static int setup_tx_flow(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_fq *fq)
+static int dpaa2_eth_setup_tx_flow(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_fq *fq)
{
struct device *dev = priv->net_dev->dev.parent;
struct dpni_queue queue;
@@ -3266,7 +3266,7 @@ static const struct dpaa2_eth_dist_fields dist_fields[] = {
};
/* Configure the Rx hash key using the legacy API */
-static int config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
+static int dpaa2_eth_config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
{
struct device *dev = priv->net_dev->dev.parent;
struct dpni_rx_tc_dist_cfg dist_cfg;
@@ -3291,7 +3291,7 @@ static int config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
}
/* Configure the Rx hash key using the new API */
-static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
+static int dpaa2_eth_config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
{
struct device *dev = priv->net_dev->dev.parent;
struct dpni_rx_dist_cfg dist_cfg;
@@ -3317,7 +3317,7 @@ static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
}
/* Configure the Rx flow classification key */
-static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
+static int dpaa2_eth_config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
{
struct device *dev = priv->net_dev->dev.parent;
struct dpni_rx_dist_cfg dist_cfg;
@@ -3452,11 +3452,11 @@ static int dpaa2_eth_set_dist_key(struct net_device *net_dev,
if (type == DPAA2_ETH_RX_DIST_HASH) {
if (dpaa2_eth_has_legacy_dist(priv))
- err = config_legacy_hash_key(priv, key_iova);
+ err = dpaa2_eth_config_legacy_hash_key(priv, key_iova);
else
- err = config_hash_key(priv, key_iova);
+ err = dpaa2_eth_config_hash_key(priv, key_iova);
} else {
- err = config_cls_key(priv, key_iova);
+ err = dpaa2_eth_config_cls_key(priv, key_iova);
}
dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE,
@@ -3531,7 +3531,7 @@ out:
/* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
* frame queues and channels
*/
-static int bind_dpni(struct dpaa2_eth_priv *priv)
+static int dpaa2_eth_bind_dpni(struct dpaa2_eth_priv *priv)
{
struct net_device *net_dev = priv->net_dev;
struct device *dev = net_dev->dev.parent;
@@ -3579,10 +3579,10 @@ static int bind_dpni(struct dpaa2_eth_priv *priv)
for (i = 0; i < priv->num_fqs; i++) {
switch (priv->fq[i].type) {
case DPAA2_RX_FQ:
- err = setup_rx_flow(priv, &priv->fq[i]);
+ err = dpaa2_eth_setup_rx_flow(priv, &priv->fq[i]);
break;
case DPAA2_TX_CONF_FQ:
- err = setup_tx_flow(priv, &priv->fq[i]);
+ err = dpaa2_eth_setup_tx_flow(priv, &priv->fq[i]);
break;
default:
dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
@@ -3603,7 +3603,7 @@ static int bind_dpni(struct dpaa2_eth_priv *priv)
}
/* Allocate rings for storing incoming frame descriptors */
-static int alloc_rings(struct dpaa2_eth_priv *priv)
+static int dpaa2_eth_alloc_rings(struct dpaa2_eth_priv *priv)
{
struct net_device *net_dev = priv->net_dev;
struct device *dev = net_dev->dev.parent;
@@ -3630,7 +3630,7 @@ err_ring:
return -ENOMEM;
}
-static void free_rings(struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_free_rings(struct dpaa2_eth_priv *priv)
{
int i;
@@ -3638,7 +3638,7 @@ static void free_rings(struct dpaa2_eth_priv *priv)
dpaa2_io_store_destroy(priv->channel[i]->store);
}
-static int set_mac_addr(struct dpaa2_eth_priv *priv)
+static int dpaa2_eth_set_mac_addr(struct dpaa2_eth_priv *priv)
{
struct net_device *net_dev = priv->net_dev;
struct device *dev = net_dev->dev.parent;
@@ -3703,7 +3703,7 @@ static int set_mac_addr(struct dpaa2_eth_priv *priv)
return 0;
}
-static int netdev_init(struct net_device *net_dev)
+static int dpaa2_eth_netdev_init(struct net_device *net_dev)
{
struct device *dev = net_dev->dev.parent;
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
@@ -3716,7 +3716,7 @@ static int netdev_init(struct net_device *net_dev)
net_dev->netdev_ops = &dpaa2_eth_ops;
net_dev->ethtool_ops = &dpaa2_ethtool_ops;
- err = set_mac_addr(priv);
+ err = dpaa2_eth_set_mac_addr(priv);
if (err)
return err;
@@ -3771,13 +3771,13 @@ static int netdev_init(struct net_device *net_dev)
return 0;
}
-static int poll_link_state(void *arg)
+static int dpaa2_eth_poll_link_state(void *arg)
{
struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg;
int err;
while (!kthread_should_stop()) {
- err = link_state_update(priv);
+ err = dpaa2_eth_link_state_update(priv);
if (unlikely(err))
return err;
@@ -3847,11 +3847,11 @@ static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
}
if (status & DPNI_IRQ_EVENT_LINK_CHANGED)
- link_state_update(netdev_priv(net_dev));
+ dpaa2_eth_link_state_update(netdev_priv(net_dev));
if (status & DPNI_IRQ_EVENT_ENDPOINT_CHANGED) {
- set_mac_addr(netdev_priv(net_dev));
- update_tx_fqids(priv);
+ dpaa2_eth_set_mac_addr(netdev_priv(net_dev));
+ dpaa2_eth_update_tx_fqids(priv);
rtnl_lock();
if (priv->mac)
@@ -3864,7 +3864,7 @@ static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
return IRQ_HANDLED;
}
-static int setup_irqs(struct fsl_mc_device *ls_dev)
+static int dpaa2_eth_setup_irqs(struct fsl_mc_device *ls_dev)
{
int err = 0;
struct fsl_mc_device_irq *irq;
@@ -3910,7 +3910,7 @@ free_mc_irq:
return err;
}
-static void add_ch_napi(struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_add_ch_napi(struct dpaa2_eth_priv *priv)
{
int i;
struct dpaa2_eth_channel *ch;
@@ -3923,7 +3923,7 @@ static void add_ch_napi(struct dpaa2_eth_priv *priv)
}
}
-static void del_ch_napi(struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_del_ch_napi(struct dpaa2_eth_priv *priv)
{
int i;
struct dpaa2_eth_channel *ch;
@@ -3970,26 +3970,26 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
}
/* MC objects initialization and configuration */
- err = setup_dpni(dpni_dev);
+ err = dpaa2_eth_setup_dpni(dpni_dev);
if (err)
goto err_dpni_setup;
- err = setup_dpio(priv);
+ err = dpaa2_eth_setup_dpio(priv);
if (err)
goto err_dpio_setup;
- setup_fqs(priv);
+ dpaa2_eth_setup_fqs(priv);
- err = setup_dpbp(priv);
+ err = dpaa2_eth_setup_dpbp(priv);
if (err)
goto err_dpbp_setup;
- err = bind_dpni(priv);
+ err = dpaa2_eth_bind_dpni(priv);
if (err)
goto err_bind;
/* Add a NAPI context for each channel */
- add_ch_napi(priv);
+ dpaa2_eth_add_ch_napi(priv);
/* Percpu statistics */
priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
@@ -4012,21 +4012,21 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
goto err_alloc_sgt_cache;
}
- err = netdev_init(net_dev);
+ err = dpaa2_eth_netdev_init(net_dev);
if (err)
goto err_netdev_init;
/* Configure checksum offload based on current interface flags */
- err = set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM));
+ err = dpaa2_eth_set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM));
if (err)
goto err_csum;
- err = set_tx_csum(priv, !!(net_dev->features &
- (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)));
+ err = dpaa2_eth_set_tx_csum(priv,
+ !!(net_dev->features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)));
if (err)
goto err_csum;
- err = alloc_rings(priv);
+ err = dpaa2_eth_alloc_rings(priv);
if (err)
goto err_alloc_rings;
@@ -4039,10 +4039,10 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
}
#endif
- err = setup_irqs(dpni_dev);
+ err = dpaa2_eth_setup_irqs(dpni_dev);
if (err) {
netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
- priv->poll_thread = kthread_run(poll_link_state, priv,
+ priv->poll_thread = kthread_run(dpaa2_eth_poll_link_state, priv,
"%s_poll_link", net_dev->name);
if (IS_ERR(priv->poll_thread)) {
dev_err(dev, "Error starting polling thread\n");
@@ -4076,7 +4076,7 @@ err_connect_mac:
else
fsl_mc_free_irqs(dpni_dev);
err_poll_thread:
- free_rings(priv);
+ dpaa2_eth_free_rings(priv);
err_alloc_rings:
err_csum:
err_netdev_init:
@@ -4086,13 +4086,13 @@ err_alloc_sgt_cache:
err_alloc_percpu_extras:
free_percpu(priv->percpu_stats);
err_alloc_percpu_stats:
- del_ch_napi(priv);
+ dpaa2_eth_del_ch_napi(priv);
err_bind:
- free_dpbp(priv);
+ dpaa2_eth_free_dpbp(priv);
err_dpbp_setup:
- free_dpio(priv);
+ dpaa2_eth_free_dpio(priv);
err_dpio_setup:
- free_dpni(priv);
+ dpaa2_eth_free_dpni(priv);
err_dpni_setup:
fsl_mc_portal_free(priv->mc_io);
err_portal_alloc:
@@ -4126,15 +4126,15 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
else
fsl_mc_free_irqs(ls_dev);
- free_rings(priv);
+ dpaa2_eth_free_rings(priv);
free_percpu(priv->sgt_cache);
free_percpu(priv->percpu_stats);
free_percpu(priv->percpu_extras);
- del_ch_napi(priv);
- free_dpbp(priv);
- free_dpio(priv);
- free_dpni(priv);
+ dpaa2_eth_del_ch_napi(priv);
+ dpaa2_eth_free_dpbp(priv);
+ dpaa2_eth_free_dpio(priv);
+ dpaa2_eth_free_dpni(priv);
fsl_mc_portal_free(priv->mc_io);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index 8356f1fbbee1..26bd99b76765 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -316,8 +316,8 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
dpaa2_mac_get_ethtool_stats(priv->mac, data + i);
}
-static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
- void *key, void *mask, u64 *fields)
+static int dpaa2_eth_prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
+ void *key, void *mask, u64 *fields)
{
int off;
@@ -345,9 +345,9 @@ static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
return 0;
}
-static int prep_uip_rule(struct ethtool_usrip4_spec *uip_value,
- struct ethtool_usrip4_spec *uip_mask,
- void *key, void *mask, u64 *fields)
+static int dpaa2_eth_prep_uip_rule(struct ethtool_usrip4_spec *uip_value,
+ struct ethtool_usrip4_spec *uip_mask,
+ void *key, void *mask, u64 *fields)
{
int off;
u32 tmp_value, tmp_mask;
@@ -400,9 +400,9 @@ static int prep_uip_rule(struct ethtool_usrip4_spec *uip_value,
return 0;
}
-static int prep_l4_rule(struct ethtool_tcpip4_spec *l4_value,
- struct ethtool_tcpip4_spec *l4_mask,
- void *key, void *mask, u8 l4_proto, u64 *fields)
+static int dpaa2_eth_prep_l4_rule(struct ethtool_tcpip4_spec *l4_value,
+ struct ethtool_tcpip4_spec *l4_mask,
+ void *key, void *mask, u8 l4_proto, u64 *fields)
{
int off;
@@ -451,9 +451,9 @@ static int prep_l4_rule(struct ethtool_tcpip4_spec *l4_value,
return 0;
}
-static int prep_ext_rule(struct ethtool_flow_ext *ext_value,
- struct ethtool_flow_ext *ext_mask,
- void *key, void *mask, u64 *fields)
+static int dpaa2_eth_prep_ext_rule(struct ethtool_flow_ext *ext_value,
+ struct ethtool_flow_ext *ext_mask,
+ void *key, void *mask, u64 *fields)
{
int off;
@@ -470,9 +470,9 @@ static int prep_ext_rule(struct ethtool_flow_ext *ext_value,
return 0;
}
-static int prep_mac_ext_rule(struct ethtool_flow_ext *ext_value,
- struct ethtool_flow_ext *ext_mask,
- void *key, void *mask, u64 *fields)
+static int dpaa2_eth_prep_mac_ext_rule(struct ethtool_flow_ext *ext_value,
+ struct ethtool_flow_ext *ext_mask,
+ void *key, void *mask, u64 *fields)
{
int off;
@@ -486,32 +486,32 @@ static int prep_mac_ext_rule(struct ethtool_flow_ext *ext_value,
return 0;
}
-static int prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, void *mask,
- u64 *fields)
+static int dpaa2_eth_prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key,
+ void *mask, u64 *fields)
{
int err;
switch (fs->flow_type & 0xFF) {
case ETHER_FLOW:
- err = prep_eth_rule(&fs->h_u.ether_spec, &fs->m_u.ether_spec,
- key, mask, fields);
+ err = dpaa2_eth_prep_eth_rule(&fs->h_u.ether_spec, &fs->m_u.ether_spec,
+ key, mask, fields);
break;
case IP_USER_FLOW:
- err = prep_uip_rule(&fs->h_u.usr_ip4_spec,
- &fs->m_u.usr_ip4_spec, key, mask, fields);
+ err = dpaa2_eth_prep_uip_rule(&fs->h_u.usr_ip4_spec,
+ &fs->m_u.usr_ip4_spec, key, mask, fields);
break;
case TCP_V4_FLOW:
- err = prep_l4_rule(&fs->h_u.tcp_ip4_spec, &fs->m_u.tcp_ip4_spec,
- key, mask, IPPROTO_TCP, fields);
+ err = dpaa2_eth_prep_l4_rule(&fs->h_u.tcp_ip4_spec, &fs->m_u.tcp_ip4_spec,
+ key, mask, IPPROTO_TCP, fields);
break;
case UDP_V4_FLOW:
- err = prep_l4_rule(&fs->h_u.udp_ip4_spec, &fs->m_u.udp_ip4_spec,
- key, mask, IPPROTO_UDP, fields);
+ err = dpaa2_eth_prep_l4_rule(&fs->h_u.udp_ip4_spec, &fs->m_u.udp_ip4_spec,
+ key, mask, IPPROTO_UDP, fields);
break;
case SCTP_V4_FLOW:
- err = prep_l4_rule(&fs->h_u.sctp_ip4_spec,
- &fs->m_u.sctp_ip4_spec, key, mask,
- IPPROTO_SCTP, fields);
+ err = dpaa2_eth_prep_l4_rule(&fs->h_u.sctp_ip4_spec,
+ &fs->m_u.sctp_ip4_spec, key, mask,
+ IPPROTO_SCTP, fields);
break;
default:
return -EOPNOTSUPP;
@@ -521,14 +521,14 @@ static int prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, void *mask,
return err;
if (fs->flow_type & FLOW_EXT) {
- err = prep_ext_rule(&fs->h_ext, &fs->m_ext, key, mask, fields);
+ err = dpaa2_eth_prep_ext_rule(&fs->h_ext, &fs->m_ext, key, mask, fields);
if (err)
return err;
}
if (fs->flow_type & FLOW_MAC_EXT) {
- err = prep_mac_ext_rule(&fs->h_ext, &fs->m_ext, key, mask,
- fields);
+ err = dpaa2_eth_prep_mac_ext_rule(&fs->h_ext, &fs->m_ext, key,
+ mask, fields);
if (err)
return err;
}
@@ -536,9 +536,9 @@ static int prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, void *mask,
return 0;
}
-static int do_cls_rule(struct net_device *net_dev,
- struct ethtool_rx_flow_spec *fs,
- bool add)
+static int dpaa2_eth_do_cls_rule(struct net_device *net_dev,
+ struct ethtool_rx_flow_spec *fs,
+ bool add)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
struct device *dev = net_dev->dev.parent;
@@ -561,7 +561,7 @@ static int do_cls_rule(struct net_device *net_dev,
return -ENOMEM;
/* Fill the key and mask memory areas */
- err = prep_cls_rule(fs, key_buf, key_buf + rule_cfg.key_size, &fields);
+ err = dpaa2_eth_prep_cls_rule(fs, key_buf, key_buf + rule_cfg.key_size, &fields);
if (err)
goto free_mem;
@@ -629,7 +629,7 @@ free_mem:
return err;
}
-static int num_rules(struct dpaa2_eth_priv *priv)
+static int dpaa2_eth_num_cls_rules(struct dpaa2_eth_priv *priv)
{
int i, rules = 0;
@@ -640,9 +640,9 @@ static int num_rules(struct dpaa2_eth_priv *priv)
return rules;
}
-static int update_cls_rule(struct net_device *net_dev,
- struct ethtool_rx_flow_spec *new_fs,
- unsigned int location)
+static int dpaa2_eth_update_cls_rule(struct net_device *net_dev,
+ struct ethtool_rx_flow_spec *new_fs,
+ unsigned int location)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
struct dpaa2_eth_cls_rule *rule;
@@ -658,13 +658,14 @@ static int update_cls_rule(struct net_device *net_dev,
/* If a rule is present at the specified location, delete it. */
if (rule->in_use) {
- err = do_cls_rule(net_dev, &rule->fs, false);
+ err = dpaa2_eth_do_cls_rule(net_dev, &rule->fs, false);
if (err)
return err;
rule->in_use = 0;
- if (!dpaa2_eth_fs_mask_enabled(priv) && !num_rules(priv))
+ if (!dpaa2_eth_fs_mask_enabled(priv) &&
+ !dpaa2_eth_num_cls_rules(priv))
priv->rx_cls_fields = 0;
}
@@ -672,7 +673,7 @@ static int update_cls_rule(struct net_device *net_dev,
if (!new_fs)
return err;
- err = do_cls_rule(net_dev, new_fs, true);
+ err = dpaa2_eth_do_cls_rule(net_dev, new_fs, true);
if (err)
return err;
@@ -702,7 +703,7 @@ static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
break;
case ETHTOOL_GRXCLSRLCNT:
rxnfc->rule_cnt = 0;
- rxnfc->rule_cnt = num_rules(priv);
+ rxnfc->rule_cnt = dpaa2_eth_num_cls_rules(priv);
rxnfc->data = max_rules;
break;
case ETHTOOL_GRXCLSRULE:
@@ -744,10 +745,10 @@ static int dpaa2_eth_set_rxnfc(struct net_device *net_dev,
err = dpaa2_eth_set_hash(net_dev, rxnfc->data);
break;
case ETHTOOL_SRXCLSRLINS:
- err = update_cls_rule(net_dev, &rxnfc->fs, rxnfc->fs.location);
+ err = dpaa2_eth_update_cls_rule(net_dev, &rxnfc->fs, rxnfc->fs.location);
break;
case ETHTOOL_SRXCLSRLDEL:
- err = update_cls_rule(net_dev, NULL, rxnfc->fs.location);
+ err = dpaa2_eth_update_cls_rule(net_dev, NULL, rxnfc->fs.location);
break;
default:
err = -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index bf846b42bc74..78e008b81374 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -562,10 +562,13 @@ fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
BD_ENET_TX_TC);
CBDS_SC(bdp, BD_ENET_TX_READY);
- if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
- bdp++, curidx++;
- else
- bdp = fep->tx_bd_base, curidx = 0;
+ if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0) {
+ bdp++;
+ curidx++;
+ } else {
+ bdp = fep->tx_bd_base;
+ curidx = 0;
+ }
len = skb_frag_size(frag);
CBDW_BUFADDR(bdp, skb_frag_dma_map(fep->dev, frag, 0, len,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index b43dec0560a8..b98244f75ab9 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -13,8 +13,6 @@
#include "hns_dsaf_ppe.h"
#include "hns_dsaf_rcb.h"
-#define AE_NAME_PORT_ID_IDX 6
-
static struct hns_mac_cb *hns_get_mac_cb(struct hnae_handle *handle)
{
struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 22522f8a5299..b13f3a5cdf59 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -557,10 +557,7 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
/* prefetch first cache line of first page */
- prefetch(va);
-#if L1_CACHE_BYTES < 128
- prefetch(va + L1_CACHE_BYTES);
-#endif
+ net_prefetch(va);
skb = *out_skb = napi_alloc_skb(&ring_data->napi,
HNS_RX_HEAD_SIZE);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index a4f1d515e5e0..47ab2a5c7391 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -3093,10 +3093,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring)
* lines. In such a case, single fetch would suffice to cache in the
* relevant part of the header.
*/
- prefetch(ring->va);
-#if L1_CACHE_BYTES < 128
- prefetch(ring->va + L1_CACHE_BYTES);
-#endif
+ net_prefetch(ring->va);
if (!skb) {
ret = hns3_alloc_skb(ring, length, ring->va);
diff --git a/drivers/net/ethernet/huawei/hinic/Makefile b/drivers/net/ethernet/huawei/hinic/Makefile
index 67b59d0ba769..2f89119c9b69 100644
--- a/drivers/net/ethernet/huawei/hinic/Makefile
+++ b/drivers/net/ethernet/huawei/hinic/Makefile
@@ -4,4 +4,5 @@ obj-$(CONFIG_HINIC) += hinic.o
hinic-y := hinic_main.o hinic_tx.o hinic_rx.o hinic_port.o hinic_hw_dev.o \
hinic_hw_io.o hinic_hw_qp.o hinic_hw_cmdq.o hinic_hw_wq.o \
hinic_hw_mgmt.o hinic_hw_api_cmd.o hinic_hw_eqs.o hinic_hw_if.o \
- hinic_common.o hinic_ethtool.o hinic_devlink.o hinic_hw_mbox.o hinic_sriov.o
+ hinic_common.o hinic_ethtool.o hinic_devlink.o hinic_hw_mbox.o \
+ hinic_sriov.o hinic_debugfs.o
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_debugfs.c b/drivers/net/ethernet/huawei/hinic/hinic_debugfs.c
new file mode 100644
index 000000000000..19eb839177ec
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_debugfs.c
@@ -0,0 +1,318 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+
+#include "hinic_debugfs.h"
+
+static struct dentry *hinic_dbgfs_root;
+
+enum sq_dbg_info {
+ GLB_SQ_ID,
+ SQ_PI,
+ SQ_CI,
+ SQ_FI,
+ SQ_MSIX_ENTRY,
+};
+
+static char *sq_fields[] = {"glb_sq_id", "sq_pi", "sq_ci", "sq_fi", "sq_msix_entry"};
+
+static u64 hinic_dbg_get_sq_info(struct hinic_dev *nic_dev, struct hinic_sq *sq, int idx)
+{
+ struct hinic_wq *wq = sq->wq;
+
+ switch (idx) {
+ case GLB_SQ_ID:
+ return nic_dev->hwdev->func_to_io.global_qpn + sq->qid;
+ case SQ_PI:
+ return atomic_read(&wq->prod_idx) & wq->mask;
+ case SQ_CI:
+ return atomic_read(&wq->cons_idx) & wq->mask;
+ case SQ_FI:
+ return be16_to_cpu(*(__be16 *)(sq->hw_ci_addr)) & wq->mask;
+ case SQ_MSIX_ENTRY:
+ return sq->msix_entry;
+ }
+
+ return 0;
+}
+
+enum rq_dbg_info {
+ GLB_RQ_ID,
+ RQ_HW_PI,
+ RQ_SW_CI,
+ RQ_SW_PI,
+ RQ_MSIX_ENTRY,
+};
+
+static char *rq_fields[] = {"glb_rq_id", "rq_hw_pi", "rq_sw_ci", "rq_sw_pi", "rq_msix_entry"};
+
+static u64 hinic_dbg_get_rq_info(struct hinic_dev *nic_dev, struct hinic_rq *rq, int idx)
+{
+ struct hinic_wq *wq = rq->wq;
+
+ switch (idx) {
+ case GLB_RQ_ID:
+ return nic_dev->hwdev->func_to_io.global_qpn + rq->qid;
+ case RQ_HW_PI:
+ return be16_to_cpu(*(__be16 *)(rq->pi_virt_addr)) & wq->mask;
+ case RQ_SW_CI:
+ return atomic_read(&wq->cons_idx) & wq->mask;
+ case RQ_SW_PI:
+ return atomic_read(&wq->prod_idx) & wq->mask;
+ case RQ_MSIX_ENTRY:
+ return rq->msix_entry;
+ }
+
+ return 0;
+}
+
+enum func_tbl_info {
+ VALID,
+ RX_MODE,
+ MTU,
+ RQ_DEPTH,
+ QUEUE_NUM,
+};
+
+static char *func_table_fields[] = {"valid", "rx_mode", "mtu", "rq_depth", "cfg_q_num"};
+
+static int hinic_dbg_get_func_table(struct hinic_dev *nic_dev, int idx)
+{
+ struct tag_sml_funcfg_tbl *funcfg_table_elem;
+ struct hinic_cmd_lt_rd *read_data;
+ u16 out_size = sizeof(*read_data);
+ int err;
+
+ read_data = kzalloc(sizeof(*read_data), GFP_KERNEL);
+ if (!read_data)
+ return ~0;
+
+ read_data->node = TBL_ID_FUNC_CFG_SM_NODE;
+ read_data->inst = TBL_ID_FUNC_CFG_SM_INST;
+ read_data->entry_size = HINIC_FUNCTION_CONFIGURE_TABLE_SIZE;
+ read_data->lt_index = HINIC_HWIF_FUNC_IDX(nic_dev->hwdev->hwif);
+ read_data->len = HINIC_FUNCTION_CONFIGURE_TABLE_SIZE;
+
+ err = hinic_port_msg_cmd(nic_dev->hwdev, HINIC_PORT_CMD_RD_LINE_TBL, read_data,
+ sizeof(*read_data), read_data, &out_size);
+ if (err || out_size != sizeof(*read_data) || read_data->status) {
+ netif_err(nic_dev, drv, nic_dev->netdev,
+ "Failed to get func table, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, read_data->status, out_size);
+ kfree(read_data);
+ return ~0;
+ }
+
+ funcfg_table_elem = (struct tag_sml_funcfg_tbl *)read_data->data;
+
+ switch (idx) {
+ case VALID:
+ return funcfg_table_elem->dw0.bs.valid;
+ case RX_MODE:
+ return funcfg_table_elem->dw0.bs.nic_rx_mode;
+ case MTU:
+ return funcfg_table_elem->dw1.bs.mtu;
+ case RQ_DEPTH:
+ return funcfg_table_elem->dw13.bs.cfg_rq_depth;
+ case QUEUE_NUM:
+ return funcfg_table_elem->dw13.bs.cfg_q_num;
+ }
+
+ kfree(read_data);
+
+ return ~0;
+}
+
+static ssize_t hinic_dbg_cmd_read(struct file *filp, char __user *buffer, size_t count,
+ loff_t *ppos)
+{
+ struct hinic_debug_priv *dbg;
+ char ret_buf[20];
+ int *desc;
+ u64 out;
+ int ret;
+
+ desc = filp->private_data;
+ dbg = container_of(desc, struct hinic_debug_priv, field_id[*desc]);
+
+ switch (dbg->type) {
+ case HINIC_DBG_SQ_INFO:
+ out = hinic_dbg_get_sq_info(dbg->dev, dbg->object, *desc);
+ break;
+
+ case HINIC_DBG_RQ_INFO:
+ out = hinic_dbg_get_rq_info(dbg->dev, dbg->object, *desc);
+ break;
+
+ case HINIC_DBG_FUNC_TABLE:
+ out = hinic_dbg_get_func_table(dbg->dev, *desc);
+ break;
+
+ default:
+ netif_warn(dbg->dev, drv, dbg->dev->netdev, "Invalid hinic debug cmd: %d\n",
+ dbg->type);
+ return -EINVAL;
+ }
+
+ ret = snprintf(ret_buf, sizeof(ret_buf), "0x%llx\n", out);
+
+ return simple_read_from_buffer(buffer, count, ppos, ret_buf, ret);
+}
+
+static const struct file_operations hinic_dbg_cmd_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = hinic_dbg_cmd_read,
+};
+
+static int create_dbg_files(struct hinic_dev *dev, enum hinic_dbg_type type, void *data,
+ struct dentry *root, struct hinic_debug_priv **dbg, char **field,
+ int nfile)
+{
+ struct hinic_debug_priv *tmp;
+ int i;
+
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ tmp->dev = dev;
+ tmp->object = data;
+ tmp->type = type;
+ tmp->root = root;
+
+ for (i = 0; i < nfile; i++) {
+ tmp->field_id[i] = i;
+ debugfs_create_file(field[i], 0400, root, &tmp->field_id[i], &hinic_dbg_cmd_fops);
+ }
+
+ *dbg = tmp;
+
+ return 0;
+}
+
+static void rem_dbg_files(struct hinic_debug_priv *dbg)
+{
+ if (dbg->type != HINIC_DBG_FUNC_TABLE)
+ debugfs_remove_recursive(dbg->root);
+
+ kfree(dbg);
+}
+
+int hinic_sq_debug_add(struct hinic_dev *dev, u16 sq_id)
+{
+ struct hinic_sq *sq;
+ struct dentry *root;
+ char sub_dir[16];
+
+ sq = dev->txqs[sq_id].sq;
+
+ sprintf(sub_dir, "0x%x", sq_id);
+
+ root = debugfs_create_dir(sub_dir, dev->sq_dbgfs);
+
+ return create_dbg_files(dev, HINIC_DBG_SQ_INFO, sq, root, &sq->dbg, sq_fields,
+ ARRAY_SIZE(sq_fields));
+}
+
+void hinic_sq_debug_rem(struct hinic_sq *sq)
+{
+ if (sq->dbg)
+ rem_dbg_files(sq->dbg);
+}
+
+int hinic_rq_debug_add(struct hinic_dev *dev, u16 rq_id)
+{
+ struct hinic_rq *rq;
+ struct dentry *root;
+ char sub_dir[16];
+
+ rq = dev->rxqs[rq_id].rq;
+
+ sprintf(sub_dir, "0x%x", rq_id);
+
+ root = debugfs_create_dir(sub_dir, dev->rq_dbgfs);
+
+ return create_dbg_files(dev, HINIC_DBG_RQ_INFO, rq, root, &rq->dbg, rq_fields,
+ ARRAY_SIZE(rq_fields));
+}
+
+void hinic_rq_debug_rem(struct hinic_rq *rq)
+{
+ if (rq->dbg)
+ rem_dbg_files(rq->dbg);
+}
+
+int hinic_func_table_debug_add(struct hinic_dev *dev)
+{
+ if (HINIC_IS_VF(dev->hwdev->hwif))
+ return 0;
+
+ return create_dbg_files(dev, HINIC_DBG_FUNC_TABLE, dev, dev->func_tbl_dbgfs, &dev->dbg,
+ func_table_fields, ARRAY_SIZE(func_table_fields));
+}
+
+void hinic_func_table_debug_rem(struct hinic_dev *dev)
+{
+ if (!HINIC_IS_VF(dev->hwdev->hwif) && dev->dbg)
+ rem_dbg_files(dev->dbg);
+}
+
+void hinic_sq_dbgfs_init(struct hinic_dev *nic_dev)
+{
+ nic_dev->sq_dbgfs = debugfs_create_dir("SQs", nic_dev->dbgfs_root);
+}
+
+void hinic_sq_dbgfs_uninit(struct hinic_dev *nic_dev)
+{
+ debugfs_remove_recursive(nic_dev->sq_dbgfs);
+}
+
+void hinic_rq_dbgfs_init(struct hinic_dev *nic_dev)
+{
+ nic_dev->rq_dbgfs = debugfs_create_dir("RQs", nic_dev->dbgfs_root);
+}
+
+void hinic_rq_dbgfs_uninit(struct hinic_dev *nic_dev)
+{
+ debugfs_remove_recursive(nic_dev->rq_dbgfs);
+}
+
+void hinic_func_tbl_dbgfs_init(struct hinic_dev *nic_dev)
+{
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
+ nic_dev->func_tbl_dbgfs = debugfs_create_dir("func_table", nic_dev->dbgfs_root);
+}
+
+void hinic_func_tbl_dbgfs_uninit(struct hinic_dev *nic_dev)
+{
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
+ debugfs_remove_recursive(nic_dev->func_tbl_dbgfs);
+}
+
+void hinic_dbg_init(struct hinic_dev *nic_dev)
+{
+ nic_dev->dbgfs_root = debugfs_create_dir(pci_name(nic_dev->hwdev->hwif->pdev),
+ hinic_dbgfs_root);
+}
+
+void hinic_dbg_uninit(struct hinic_dev *nic_dev)
+{
+ debugfs_remove_recursive(nic_dev->dbgfs_root);
+ nic_dev->dbgfs_root = NULL;
+}
+
+void hinic_dbg_register_debugfs(const char *debugfs_dir_name)
+{
+ hinic_dbgfs_root = debugfs_create_dir(debugfs_dir_name, NULL);
+}
+
+void hinic_dbg_unregister_debugfs(void)
+{
+ debugfs_remove_recursive(hinic_dbgfs_root);
+ hinic_dbgfs_root = NULL;
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_debugfs.h b/drivers/net/ethernet/huawei/hinic/hinic_debugfs.h
new file mode 100644
index 000000000000..e9e00cfa1329
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_debugfs.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef HINIC_DEBUGFS_H
+#define HINIC_DEBUGFS_H
+
+#include "hinic_dev.h"
+
+#define TBL_ID_FUNC_CFG_SM_NODE 11
+#define TBL_ID_FUNC_CFG_SM_INST 1
+
+#define HINIC_FUNCTION_CONFIGURE_TABLE_SIZE 64
+#define HINIC_FUNCTION_CONFIGURE_TABLE 1
+
+struct hinic_cmd_lt_rd {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ unsigned char node;
+ unsigned char inst;
+ unsigned char entry_size;
+ unsigned char rsvd;
+ unsigned int lt_index;
+ unsigned int offset;
+ unsigned int len;
+ unsigned char data[100];
+};
+
+struct tag_sml_funcfg_tbl {
+ union {
+ struct {
+ u32 rsvd0 :8;
+ u32 nic_rx_mode :5;
+ u32 rsvd1 :18;
+ u32 valid :1;
+ } bs;
+
+ u32 value;
+ } dw0;
+
+ union {
+ struct {
+ u32 vlan_id :12;
+ u32 vlan_mode :3;
+ u32 fast_recycled_mode :1;
+ u32 mtu :16;
+ } bs;
+
+ u32 value;
+ } dw1;
+
+ u32 dw2;
+ u32 dw3;
+ u32 dw4;
+ u32 dw5;
+ u32 dw6;
+ u32 dw7;
+ u32 dw8;
+ u32 dw9;
+ u32 dw10;
+ u32 dw11;
+ u32 dw12;
+
+ union {
+ struct {
+ u32 rsvd2 :15;
+ u32 cfg_q_num :9;
+ u32 cfg_rq_depth :6;
+ u32 vhd_type :2;
+ } bs;
+
+ u32 value;
+ } dw13;
+
+ u32 dw14;
+ u32 dw15;
+};
+
+int hinic_sq_debug_add(struct hinic_dev *dev, u16 sq_id);
+
+void hinic_sq_debug_rem(struct hinic_sq *sq);
+
+int hinic_rq_debug_add(struct hinic_dev *dev, u16 rq_id);
+
+void hinic_rq_debug_rem(struct hinic_rq *rq);
+
+int hinic_func_table_debug_add(struct hinic_dev *dev);
+
+void hinic_func_table_debug_rem(struct hinic_dev *dev);
+
+void hinic_sq_dbgfs_init(struct hinic_dev *nic_dev);
+
+void hinic_sq_dbgfs_uninit(struct hinic_dev *nic_dev);
+
+void hinic_rq_dbgfs_init(struct hinic_dev *nic_dev);
+
+void hinic_rq_dbgfs_uninit(struct hinic_dev *nic_dev);
+
+void hinic_func_tbl_dbgfs_init(struct hinic_dev *nic_dev);
+
+void hinic_func_tbl_dbgfs_uninit(struct hinic_dev *nic_dev);
+
+void hinic_dbg_init(struct hinic_dev *nic_dev);
+
+void hinic_dbg_uninit(struct hinic_dev *nic_dev);
+
+void hinic_dbg_register_debugfs(const char *debugfs_dir_name);
+
+void hinic_dbg_unregister_debugfs(void);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_dev.h
index 0a1e20edf7cf..fb3e89141a0d 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_dev.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_dev.h
@@ -58,6 +58,20 @@ struct hinic_intr_coal_info {
u8 resend_timer_cfg;
};
+enum hinic_dbg_type {
+ HINIC_DBG_SQ_INFO,
+ HINIC_DBG_RQ_INFO,
+ HINIC_DBG_FUNC_TABLE,
+};
+
+struct hinic_debug_priv {
+ struct hinic_dev *dev;
+ void *object;
+ enum hinic_dbg_type type;
+ struct dentry *root;
+ int field_id[64];
+};
+
struct hinic_dev {
struct net_device *netdev;
struct hinic_hwdev *hwdev;
@@ -97,6 +111,12 @@ struct hinic_dev {
int lb_test_rx_idx;
int lb_pkt_len;
u8 *lb_test_rx_buf;
+
+ struct dentry *dbgfs_root;
+ struct dentry *sq_dbgfs;
+ struct dentry *rq_dbgfs;
+ struct dentry *func_tbl_dbgfs;
+ struct hinic_debug_priv *dbg;
struct devlink *devlink;
bool cable_unplugged;
bool module_unrecognized;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
index 0c737765d113..239685152f6e 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
@@ -465,6 +465,7 @@ int hinic_hwdev_ifup(struct hinic_hwdev *hwdev, u16 sq_depth, u16 rq_depth)
func_to_io->hwdev = hwdev;
func_to_io->sq_depth = sq_depth;
func_to_io->rq_depth = rq_depth;
+ func_to_io->global_qpn = base_qpn;
err = hinic_io_init(func_to_io, hwif, nic_cap->max_qps, num_ceqs,
ceq_msix_entries);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
index 701eb81e09a7..416492e48274 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
@@ -96,6 +96,8 @@ enum hinic_port_cmd {
HINIC_PORT_CMD_RSS_TEMP_MGR = 49,
+ HINIC_PORT_CMD_RD_LINE_TBL = 57,
+
HINIC_PORT_CMD_RSS_CFG = 66,
HINIC_PORT_CMD_FWCTXT_INIT = 69,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
index 3e3fa742e476..6772d8978722 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
@@ -305,6 +305,7 @@ static int init_qp(struct hinic_func_to_io *func_to_io,
func_to_io->sq_db[q_id] = db_base;
+ qp->sq.qid = q_id;
err = hinic_init_sq(&qp->sq, hwif, &func_to_io->sq_wq[q_id],
sq_msix_entry,
CI_ADDR(func_to_io->ci_addr_base, q_id),
@@ -314,6 +315,7 @@ static int init_qp(struct hinic_func_to_io *func_to_io,
goto err_sq_init;
}
+ qp->rq.qid = q_id;
err = hinic_init_rq(&qp->rq, hwif, &func_to_io->rq_wq[q_id],
rq_msix_entry);
if (err) {
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h
index ee6d60762d84..52159a90278a 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h
@@ -59,6 +59,7 @@ struct hinic_nic_cfg {
struct hinic_func_to_io {
struct hinic_hwif *hwif;
struct hinic_hwdev *hwdev;
+ u16 global_qpn;
struct hinic_ceqs ceqs;
struct hinic_wqs wqs;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
index ca3e2d060284..0dfa51ad5855 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
@@ -81,6 +81,8 @@ struct hinic_sq {
struct hinic_wq *wq;
+ u16 qid;
+
u32 irq;
u16 msix_entry;
@@ -90,6 +92,7 @@ struct hinic_sq {
void __iomem *db_base;
struct sk_buff **saved_skb;
+ struct hinic_debug_priv *dbg;
};
struct hinic_rq {
@@ -97,6 +100,8 @@ struct hinic_rq {
struct hinic_wq *wq;
+ u16 qid;
+
struct cpumask affinity_mask;
u32 irq;
u16 msix_entry;
@@ -110,6 +115,7 @@ struct hinic_rq {
u16 *pi_virt_addr;
dma_addr_t pi_dma_addr;
+ struct hinic_debug_priv *dbg;
};
struct hinic_qp {
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index 501056fd32ee..797c55a1d9c6 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -24,6 +24,7 @@
#include <linux/delay.h>
#include <linux/err.h>
+#include "hinic_debugfs.h"
#include "hinic_hw_qp.h"
#include "hinic_hw_dev.h"
#include "hinic_devlink.h"
@@ -153,6 +154,8 @@ static int create_txqs(struct hinic_dev *nic_dev)
if (!nic_dev->txqs)
return -ENOMEM;
+ hinic_sq_dbgfs_init(nic_dev);
+
for (i = 0; i < num_txqs; i++) {
struct hinic_sq *sq = hinic_hwdev_get_sq(nic_dev->hwdev, i);
@@ -162,13 +165,27 @@ static int create_txqs(struct hinic_dev *nic_dev)
"Failed to init Txq\n");
goto err_init_txq;
}
+
+ err = hinic_sq_debug_add(nic_dev, i);
+ if (err) {
+ netif_err(nic_dev, drv, netdev,
+ "Failed to add SQ%d debug\n", i);
+ goto err_add_sq_dbg;
+ }
+
}
return 0;
+err_add_sq_dbg:
+ hinic_clean_txq(&nic_dev->txqs[i]);
err_init_txq:
- for (j = 0; j < i; j++)
+ for (j = 0; j < i; j++) {
+ hinic_sq_debug_rem(nic_dev->txqs[j].sq);
hinic_clean_txq(&nic_dev->txqs[j]);
+ }
+
+ hinic_sq_dbgfs_uninit(nic_dev);
devm_kfree(&netdev->dev, nic_dev->txqs);
return err;
@@ -186,8 +203,12 @@ static void free_txqs(struct hinic_dev *nic_dev)
if (!nic_dev->txqs)
return;
- for (i = 0; i < num_txqs; i++)
+ for (i = 0; i < num_txqs; i++) {
+ hinic_sq_debug_rem(nic_dev->txqs[i].sq);
hinic_clean_txq(&nic_dev->txqs[i]);
+ }
+
+ hinic_sq_dbgfs_uninit(nic_dev);
devm_kfree(&netdev->dev, nic_dev->txqs);
nic_dev->txqs = NULL;
@@ -213,6 +234,8 @@ static int create_rxqs(struct hinic_dev *nic_dev)
if (!nic_dev->rxqs)
return -ENOMEM;
+ hinic_rq_dbgfs_init(nic_dev);
+
for (i = 0; i < num_rxqs; i++) {
struct hinic_rq *rq = hinic_hwdev_get_rq(nic_dev->hwdev, i);
@@ -222,13 +245,26 @@ static int create_rxqs(struct hinic_dev *nic_dev)
"Failed to init rxq\n");
goto err_init_rxq;
}
+
+ err = hinic_rq_debug_add(nic_dev, i);
+ if (err) {
+ netif_err(nic_dev, drv, netdev,
+ "Failed to add RQ%d debug\n", i);
+ goto err_add_rq_dbg;
+ }
}
return 0;
+err_add_rq_dbg:
+ hinic_clean_rxq(&nic_dev->rxqs[i]);
err_init_rxq:
- for (j = 0; j < i; j++)
+ for (j = 0; j < i; j++) {
+ hinic_rq_debug_rem(nic_dev->rxqs[j].rq);
hinic_clean_rxq(&nic_dev->rxqs[j]);
+ }
+
+ hinic_rq_dbgfs_uninit(nic_dev);
devm_kfree(&netdev->dev, nic_dev->rxqs);
return err;
@@ -246,8 +282,12 @@ static void free_rxqs(struct hinic_dev *nic_dev)
if (!nic_dev->rxqs)
return;
- for (i = 0; i < num_rxqs; i++)
+ for (i = 0; i < num_rxqs; i++) {
+ hinic_rq_debug_rem(nic_dev->rxqs[i].rq);
hinic_clean_rxq(&nic_dev->rxqs[i]);
+ }
+
+ hinic_rq_dbgfs_uninit(nic_dev);
devm_kfree(&netdev->dev, nic_dev->rxqs);
nic_dev->rxqs = NULL;
@@ -1260,6 +1300,16 @@ static int nic_dev_init(struct pci_dev *pdev)
goto err_init_intr;
}
+ hinic_dbg_init(nic_dev);
+
+ hinic_func_tbl_dbgfs_init(nic_dev);
+
+ err = hinic_func_table_debug_add(nic_dev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to add func_table debug\n");
+ goto err_add_func_table_dbg;
+ }
+
err = register_netdev(netdev);
if (err) {
dev_err(&pdev->dev, "Failed to register netdev\n");
@@ -1269,6 +1319,10 @@ static int nic_dev_init(struct pci_dev *pdev)
return 0;
err_reg_netdev:
+ hinic_func_table_debug_rem(nic_dev);
+err_add_func_table_dbg:
+ hinic_func_tbl_dbgfs_uninit(nic_dev);
+ hinic_dbg_uninit(nic_dev);
hinic_free_intr_coalesce(nic_dev);
err_init_intr:
err_set_pfc:
@@ -1391,6 +1445,12 @@ static void hinic_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
+ hinic_func_table_debug_rem(nic_dev);
+
+ hinic_func_tbl_dbgfs_uninit(nic_dev);
+
+ hinic_dbg_uninit(nic_dev);
+
hinic_free_intr_coalesce(nic_dev);
hinic_port_del_mac(nic_dev, netdev->dev_addr, 0);
@@ -1445,4 +1505,17 @@ static struct pci_driver hinic_driver = {
.sriov_configure = hinic_pci_sriov_configure,
};
-module_pci_driver(hinic_driver);
+static int __init hinic_module_init(void)
+{
+ hinic_dbg_register_debugfs(HINIC_DRV_NAME);
+ return pci_register_driver(&hinic_driver);
+}
+
+static void __exit hinic_module_exit(void)
+{
+ pci_unregister_driver(&hinic_driver);
+ hinic_dbg_unregister_debugfs();
+}
+
+module_init(hinic_module_init);
+module_exit(hinic_module_exit);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index d3a774331afc..6b619c190239 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -104,8 +104,7 @@ static int send_login(struct ibmvnic_adapter *adapter);
static void send_cap_queries(struct ibmvnic_adapter *adapter);
static int init_sub_crqs(struct ibmvnic_adapter *);
static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
-static int ibmvnic_init(struct ibmvnic_adapter *);
-static int ibmvnic_reset_init(struct ibmvnic_adapter *);
+static int ibmvnic_reset_init(struct ibmvnic_adapter *, bool reset);
static void release_crq_queue(struct ibmvnic_adapter *);
static int __ibmvnic_set_mac(struct net_device *, u8 *);
static int init_crq_queue(struct ibmvnic_adapter *adapter);
@@ -297,8 +296,7 @@ static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
{
int i;
- for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
- i++)
+ for (i = 0; i < adapter->num_active_rx_pools; i++)
adapter->rx_pool[i].active = 0;
}
@@ -306,6 +304,7 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
struct ibmvnic_rx_pool *pool)
{
int count = pool->size - atomic_read(&pool->available);
+ u64 handle = adapter->rx_scrq[pool->index]->handle;
struct device *dev = &adapter->vdev->dev;
int buffers_added = 0;
unsigned long lpar_rc;
@@ -314,7 +313,6 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
unsigned int offset;
dma_addr_t dma_addr;
unsigned char *dst;
- u64 *handle_array;
int shift = 0;
int index;
int i;
@@ -322,10 +320,6 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
if (!pool->active)
return;
- handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
- be32_to_cpu(adapter->login_rsp_buf->
- off_rxadd_subcrqs));
-
for (i = 0; i < count; ++i) {
skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
if (!skb) {
@@ -369,8 +363,7 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
#endif
sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
- lpar_rc = send_subcrq(adapter, handle_array[pool->index],
- &sub_crq);
+ lpar_rc = send_subcrq(adapter, handle, &sub_crq);
if (lpar_rc != H_SUCCESS)
goto failure;
@@ -407,8 +400,7 @@ static void replenish_pools(struct ibmvnic_adapter *adapter)
int i;
adapter->replenish_task_cycles++;
- for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
- i++) {
+ for (i = 0; i < adapter->num_active_rx_pools; i++) {
if (adapter->rx_pool[i].active)
replenish_rx_pool(adapter, &adapter->rx_pool[i]);
}
@@ -475,25 +467,23 @@ static int init_stats_token(struct ibmvnic_adapter *adapter)
static int reset_rx_pools(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_rx_pool *rx_pool;
+ u64 buff_size;
int rx_scrqs;
int i, j, rc;
- u64 *size_array;
if (!adapter->rx_pool)
return -1;
- size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
- be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
-
- rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
+ buff_size = adapter->cur_rx_buf_sz;
+ rx_scrqs = adapter->num_active_rx_pools;
for (i = 0; i < rx_scrqs; i++) {
rx_pool = &adapter->rx_pool[i];
netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
- if (rx_pool->buff_size != be64_to_cpu(size_array[i])) {
+ if (rx_pool->buff_size != buff_size) {
free_long_term_buff(adapter, &rx_pool->long_term_buff);
- rx_pool->buff_size = be64_to_cpu(size_array[i]);
+ rx_pool->buff_size = buff_size;
rc = alloc_long_term_buff(adapter,
&rx_pool->long_term_buff,
rx_pool->size *
@@ -561,13 +551,11 @@ static int init_rx_pools(struct net_device *netdev)
struct device *dev = &adapter->vdev->dev;
struct ibmvnic_rx_pool *rx_pool;
int rxadd_subcrqs;
- u64 *size_array;
+ u64 buff_size;
int i, j;
- rxadd_subcrqs =
- be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
- size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
- be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
+ rxadd_subcrqs = adapter->num_active_rx_scrqs;
+ buff_size = adapter->cur_rx_buf_sz;
adapter->rx_pool = kcalloc(rxadd_subcrqs,
sizeof(struct ibmvnic_rx_pool),
@@ -585,11 +573,11 @@ static int init_rx_pools(struct net_device *netdev)
netdev_dbg(adapter->netdev,
"Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
i, adapter->req_rx_add_entries_per_subcrq,
- be64_to_cpu(size_array[i]));
+ buff_size);
rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
rx_pool->index = i;
- rx_pool->buff_size = be64_to_cpu(size_array[i]);
+ rx_pool->buff_size = buff_size;
rx_pool->active = 1;
rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
@@ -655,7 +643,7 @@ static int reset_tx_pools(struct ibmvnic_adapter *adapter)
if (!adapter->tx_pool)
return -1;
- tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
+ tx_scrqs = adapter->num_active_tx_pools;
for (i = 0; i < tx_scrqs; i++) {
rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
if (rc)
@@ -744,7 +732,7 @@ static int init_tx_pools(struct net_device *netdev)
int tx_subcrqs;
int i, rc;
- tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
+ tx_subcrqs = adapter->num_active_tx_scrqs;
adapter->tx_pool = kcalloc(tx_subcrqs,
sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
if (!adapter->tx_pool)
@@ -980,7 +968,7 @@ static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
return -1;
}
- if (adapter->init_done_rc == 1) {
+ if (adapter->init_done_rc == PARTIALSUCCESS) {
/* Partuial success, delay and re-send */
mdelay(1000);
resend = true;
@@ -1530,9 +1518,9 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
unsigned int offset;
int num_entries = 1;
unsigned char *dst;
- u64 *handle_array;
int index = 0;
u8 proto = 0;
+ u64 handle;
netdev_tx_t ret = NETDEV_TX_OK;
if (test_bit(0, &adapter->resetting)) {
@@ -1559,8 +1547,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_scrq = adapter->tx_scrq[queue_num];
txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
- handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
- be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
+ handle = tx_scrq->handle;
index = tx_pool->free_map[tx_pool->consumer_index];
@@ -1672,14 +1659,14 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
ret = NETDEV_TX_OK;
goto tx_err_out;
}
- lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
+ lpar_rc = send_subcrq_indirect(adapter, handle,
(u64)tx_buff->indir_dma,
(u64)num_entries);
dma_unmap_single(dev, tx_buff->indir_dma,
sizeof(tx_buff->indir_arr), DMA_TO_DEVICE);
} else {
tx_buff->num_entries = num_entries;
- lpar_rc = send_subcrq(adapter, handle_array[queue_num],
+ lpar_rc = send_subcrq(adapter, handle,
&tx_crq);
}
if (lpar_rc != H_SUCCESS) {
@@ -1874,7 +1861,7 @@ static int do_change_param_reset(struct ibmvnic_adapter *adapter,
return rc;
}
- rc = ibmvnic_reset_init(adapter);
+ rc = ibmvnic_reset_init(adapter, true);
if (rc)
return IBMVNIC_INIT_FAILED;
@@ -1992,7 +1979,7 @@ static int do_reset(struct ibmvnic_adapter *adapter,
goto out;
}
- rc = ibmvnic_reset_init(adapter);
+ rc = ibmvnic_reset_init(adapter, true);
if (rc) {
rc = IBMVNIC_INIT_FAILED;
goto out;
@@ -2106,7 +2093,7 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
return rc;
}
- rc = ibmvnic_init(adapter);
+ rc = ibmvnic_reset_init(adapter, false);
if (rc)
return rc;
@@ -3581,8 +3568,7 @@ static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
if (rc) {
if (rc == H_CLOSED) {
dev_warn(dev, "CRQ Queue closed\n");
- if (test_bit(0, &adapter->resetting))
- ibmvnic_reset(adapter, VNIC_RESET_FATAL);
+ /* do not reset, report the fail, wait for passive init from server */
}
dev_warn(dev, "Send error (rc=%d)\n", rc);
@@ -3593,14 +3579,31 @@ static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
{
+ struct device *dev = &adapter->vdev->dev;
union ibmvnic_crq crq;
+ int retries = 100;
+ int rc;
memset(&crq, 0, sizeof(crq));
crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
crq.generic.cmd = IBMVNIC_CRQ_INIT;
netdev_dbg(adapter->netdev, "Sending CRQ init\n");
- return ibmvnic_send_crq(adapter, &crq);
+ do {
+ rc = ibmvnic_send_crq(adapter, &crq);
+ if (rc != H_CLOSED)
+ break;
+ retries--;
+ msleep(50);
+
+ } while (retries > 0);
+
+ if (rc) {
+ dev_err(dev, "Failed to send init request, rc = %d\n", rc);
+ return rc;
+ }
+
+ return 0;
}
static int send_version_xchg(struct ibmvnic_adapter *adapter)
@@ -4305,6 +4308,11 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
struct net_device *netdev = adapter->netdev;
struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
struct ibmvnic_login_buffer *login = adapter->login_buf;
+ u64 *tx_handle_array;
+ u64 *rx_handle_array;
+ int num_tx_pools;
+ int num_rx_pools;
+ u64 *size_array;
int i;
dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
@@ -4339,6 +4347,30 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
ibmvnic_remove(adapter->vdev);
return -EIO;
}
+ size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
+ be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
+ /* variable buffer sizes are not supported, so just read the
+ * first entry.
+ */
+ adapter->cur_rx_buf_sz = be64_to_cpu(size_array[0]);
+
+ num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
+ num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
+
+ tx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
+ be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
+ rx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
+ be32_to_cpu(adapter->login_rsp_buf->off_rxadd_subcrqs));
+
+ for (i = 0; i < num_tx_pools; i++)
+ adapter->tx_scrq[i]->handle = tx_handle_array[i];
+
+ for (i = 0; i < num_rx_pools; i++)
+ adapter->rx_scrq[i]->handle = rx_handle_array[i];
+
+ adapter->num_active_tx_scrqs = num_tx_pools;
+ adapter->num_active_rx_scrqs = num_rx_pools;
+ release_login_rsp_buffer(adapter);
release_login_buffer(adapter);
complete(&adapter->init_done);
@@ -4984,7 +5016,7 @@ map_failed:
return retrc;
}
-static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
+static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
{
struct device *dev = &adapter->vdev->dev;
unsigned long timeout = msecs_to_jiffies(30000);
@@ -4993,12 +5025,19 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
adapter->from_passive_init = false;
- old_num_rx_queues = adapter->req_rx_queues;
- old_num_tx_queues = adapter->req_tx_queues;
+ if (reset) {
+ old_num_rx_queues = adapter->req_rx_queues;
+ old_num_tx_queues = adapter->req_tx_queues;
+ reinit_completion(&adapter->init_done);
+ }
- reinit_completion(&adapter->init_done);
adapter->init_done_rc = 0;
- ibmvnic_send_crq_init(adapter);
+ rc = ibmvnic_send_crq_init(adapter);
+ if (rc) {
+ dev_err(dev, "Send crq init failed with error %d\n", rc);
+ return rc;
+ }
+
if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
dev_err(dev, "Initialization sequence timed out\n");
return -1;
@@ -5009,13 +5048,8 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
return adapter->init_done_rc;
}
- if (adapter->from_passive_init) {
- adapter->state = VNIC_OPEN;
- adapter->from_passive_init = false;
- return -1;
- }
-
- if (test_bit(0, &adapter->resetting) && !adapter->wait_for_reset &&
+ if (reset &&
+ test_bit(0, &adapter->resetting) && !adapter->wait_for_reset &&
adapter->reset_reason != VNIC_RESET_MOBILITY) {
if (adapter->req_rx_queues != old_num_rx_queues ||
adapter->req_tx_queues != old_num_tx_queues) {
@@ -5043,48 +5077,6 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
return rc;
}
-static int ibmvnic_init(struct ibmvnic_adapter *adapter)
-{
- struct device *dev = &adapter->vdev->dev;
- unsigned long timeout = msecs_to_jiffies(30000);
- int rc;
-
- adapter->from_passive_init = false;
-
- adapter->init_done_rc = 0;
- ibmvnic_send_crq_init(adapter);
- if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
- dev_err(dev, "Initialization sequence timed out\n");
- return -1;
- }
-
- if (adapter->init_done_rc) {
- release_crq_queue(adapter);
- return adapter->init_done_rc;
- }
-
- if (adapter->from_passive_init) {
- adapter->state = VNIC_OPEN;
- adapter->from_passive_init = false;
- return -1;
- }
-
- rc = init_sub_crqs(adapter);
- if (rc) {
- dev_err(dev, "Initialization of sub crqs failed\n");
- release_crq_queue(adapter);
- return rc;
- }
-
- rc = init_sub_crq_irqs(adapter);
- if (rc) {
- dev_err(dev, "Failed to initialize sub crq irqs\n");
- release_crq_queue(adapter);
- }
-
- return rc;
-}
-
static struct device_attribute dev_attr_failover;
static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
@@ -5147,7 +5139,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
goto ibmvnic_init_fail;
}
- rc = ibmvnic_init(adapter);
+ rc = ibmvnic_reset_init(adapter, false);
if (rc && rc != EAGAIN)
goto ibmvnic_init_fail;
} while (rc == EAGAIN);
@@ -5297,8 +5289,7 @@ static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
ret += 4 * PAGE_SIZE; /* the scrq message queue */
- for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
- i++)
+ for (i = 0; i < adapter->num_active_rx_pools; i++)
ret += adapter->rx_pool[i].size *
IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index f8416e1d4cf0..8da98794eda9 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -875,6 +875,7 @@ struct ibmvnic_sub_crq_queue {
struct ibmvnic_adapter *adapter;
atomic_t used;
char name[32];
+ u64 handle;
};
struct ibmvnic_long_term_buff {
@@ -1075,6 +1076,7 @@ struct ibmvnic_adapter {
u32 num_active_rx_napi;
u32 num_active_tx_scrqs;
u32 num_active_tx_pools;
+ u32 cur_rx_buf_sz;
struct tasklet_struct tasklet;
enum vnic_state state;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index d88dd41a9442..99b8252eb969 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -310,10 +310,7 @@ static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring,
rx_buffer->page_offset;
/* prefetch first cache line of first page */
- prefetch(page_addr);
-#if L1_CACHE_BYTES < 128
- prefetch((void *)((u8 *)page_addr + L1_CACHE_BYTES));
-#endif
+ net_prefetch(page_addr);
/* allocate a skb to store the frags */
skb = napi_alloc_skb(&rx_ring->q_vector->napi,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 825c104ecba1..dc1577156bb6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -1967,7 +1967,7 @@ static int i40e_set_ringparam(struct net_device *netdev,
(new_rx_count == vsi->rx_rings[0]->count))
return 0;
- /* If there is a AF_XDP UMEM attached to any of Rx rings,
+ /* If there is a AF_XDP page pool attached to any of Rx rings,
* disallow changing the number of descriptors -- regardless
* if the netdev is running or not.
*/
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 2e433fdbf2c3..05c6d3ea11e6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -3122,12 +3122,12 @@ static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
}
/**
- * i40e_xsk_umem - Retrieve the AF_XDP ZC if XDP and ZC is enabled
+ * i40e_xsk_pool - Retrieve the AF_XDP buffer pool if XDP and ZC is enabled
* @ring: The Tx or Rx ring
*
- * Returns the UMEM or NULL.
+ * Returns the AF_XDP buffer pool or NULL.
**/
-static struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring)
+static struct xsk_buff_pool *i40e_xsk_pool(struct i40e_ring *ring)
{
bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
int qid = ring->queue_index;
@@ -3138,7 +3138,7 @@ static struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring)
if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps))
return NULL;
- return xdp_get_umem_from_qid(ring->vsi->netdev, qid);
+ return xsk_get_pool_from_qid(ring->vsi->netdev, qid);
}
/**
@@ -3157,7 +3157,7 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
u32 qtx_ctl = 0;
if (ring_is_xdp(ring))
- ring->xsk_umem = i40e_xsk_umem(ring);
+ ring->xsk_pool = i40e_xsk_pool(ring);
/* some ATR related tx ring init */
if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
@@ -3280,12 +3280,13 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
kfree(ring->rx_bi);
- ring->xsk_umem = i40e_xsk_umem(ring);
- if (ring->xsk_umem) {
+ ring->xsk_pool = i40e_xsk_pool(ring);
+ if (ring->xsk_pool) {
ret = i40e_alloc_rx_bi_zc(ring);
if (ret)
return ret;
- ring->rx_buf_len = xsk_umem_get_rx_frame_size(ring->xsk_umem);
+ ring->rx_buf_len =
+ xsk_pool_get_rx_frame_size(ring->xsk_pool);
/* For AF_XDP ZC, we disallow packets to span on
* multiple buffers, thus letting us skip that
* handling in the fast-path.
@@ -3368,8 +3369,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
writel(0, ring->tail);
- if (ring->xsk_umem) {
- xsk_buff_set_rxq_info(ring->xsk_umem, &ring->xdp_rxq);
+ if (ring->xsk_pool) {
+ xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
ok = i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring));
} else {
ok = !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
@@ -3380,7 +3381,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
*/
dev_info(&vsi->back->pdev->dev,
"Failed to allocate some buffers on %sRx ring %d (pf_q %d)\n",
- ring->xsk_umem ? "UMEM enabled " : "",
+ ring->xsk_pool ? "AF_XDP ZC enabled " : "",
ring->queue_index, pf_q);
}
@@ -12644,7 +12645,7 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi,
*/
if (need_reset && prog)
for (i = 0; i < vsi->num_queue_pairs; i++)
- if (vsi->xdp_rings[i]->xsk_umem)
+ if (vsi->xdp_rings[i]->xsk_pool)
(void)i40e_xsk_wakeup(vsi->netdev, i,
XDP_WAKEUP_RX);
@@ -12923,8 +12924,8 @@ static int i40e_xdp(struct net_device *dev,
switch (xdp->command) {
case XDP_SETUP_PROG:
return i40e_xdp_setup(vsi, xdp->prog);
- case XDP_SETUP_XSK_UMEM:
- return i40e_xsk_umem_setup(vsi, xdp->xsk.umem,
+ case XDP_SETUP_XSK_POOL:
+ return i40e_xsk_pool_setup(vsi, xdp->xsk.pool,
xdp->xsk.queue_id);
default:
return -EINVAL;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 3e5c566ceb01..91ab824926b9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -636,7 +636,7 @@ void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
unsigned long bi_size;
u16 i;
- if (ring_is_xdp(tx_ring) && tx_ring->xsk_umem) {
+ if (ring_is_xdp(tx_ring) && tx_ring->xsk_pool) {
i40e_xsk_clean_tx_ring(tx_ring);
} else {
/* ring already cleared, nothing to do */
@@ -1335,7 +1335,7 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
rx_ring->skb = NULL;
}
- if (rx_ring->xsk_umem) {
+ if (rx_ring->xsk_pool) {
i40e_xsk_clean_rx_ring(rx_ring);
goto skip_free;
}
@@ -1369,7 +1369,7 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
}
skip_free:
- if (rx_ring->xsk_umem)
+ if (rx_ring->xsk_pool)
i40e_clear_rx_bi_zc(rx_ring);
else
i40e_clear_rx_bi(rx_ring);
@@ -1992,10 +1992,8 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
struct sk_buff *skb;
/* prefetch first cache line of first page */
- prefetch(xdp->data);
-#if L1_CACHE_BYTES < 128
- prefetch(xdp->data + L1_CACHE_BYTES);
-#endif
+ net_prefetch(xdp->data);
+
/* Note, we get here by enabling legacy-rx via:
*
* ethtool --set-priv-flags <dev> legacy-rx on
@@ -2078,10 +2076,8 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
* likely have a consumer accessing first few bytes of meta
* data, and then actual data.
*/
- prefetch(xdp->data_meta);
-#if L1_CACHE_BYTES < 128
- prefetch(xdp->data_meta + L1_CACHE_BYTES);
-#endif
+ net_prefetch(xdp->data_meta);
+
/* build an skb around the page buffer */
skb = build_skb(xdp->data_hard_start, truesize);
if (unlikely(!skb))
@@ -2579,7 +2575,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
i40e_for_each_ring(ring, q_vector->tx) {
- bool wd = ring->xsk_umem ?
+ bool wd = ring->xsk_pool ?
i40e_clean_xdp_tx_irq(vsi, ring) :
i40e_clean_tx_irq(vsi, ring, budget);
@@ -2607,7 +2603,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
budget_per_ring = budget;
i40e_for_each_ring(ring, q_vector->rx) {
- int cleaned = ring->xsk_umem ?
+ int cleaned = ring->xsk_pool ?
i40e_clean_rx_irq_zc(ring, budget_per_ring) :
i40e_clean_rx_irq(ring, budget_per_ring);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 4036893d6825..703b644fd71f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -388,7 +388,7 @@ struct i40e_ring {
struct i40e_channel *ch;
struct xdp_rxq_info xdp_rxq;
- struct xdp_umem *xsk_umem;
+ struct xsk_buff_pool *xsk_pool;
} ____cacheline_internodealigned_in_smp;
static inline bool ring_uses_build_skb(struct i40e_ring *ring)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 8ce57b507a21..2a1153d8957b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -29,14 +29,16 @@ static struct xdp_buff **i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx)
}
/**
- * i40e_xsk_umem_enable - Enable/associate a UMEM to a certain ring/qid
+ * i40e_xsk_pool_enable - Enable/associate an AF_XDP buffer pool to a
+ * certain ring/qid
* @vsi: Current VSI
- * @umem: UMEM
- * @qid: Rx ring to associate UMEM to
+ * @pool: buffer pool
+ * @qid: Rx ring to associate buffer pool with
*
* Returns 0 on success, <0 on failure
**/
-static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
+static int i40e_xsk_pool_enable(struct i40e_vsi *vsi,
+ struct xsk_buff_pool *pool,
u16 qid)
{
struct net_device *netdev = vsi->netdev;
@@ -53,7 +55,7 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
qid >= netdev->real_num_tx_queues)
return -EINVAL;
- err = xsk_buff_dma_map(umem, &vsi->back->pdev->dev, I40E_RX_DMA_ATTR);
+ err = xsk_pool_dma_map(pool, &vsi->back->pdev->dev, I40E_RX_DMA_ATTR);
if (err)
return err;
@@ -80,21 +82,22 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
}
/**
- * i40e_xsk_umem_disable - Disassociate a UMEM from a certain ring/qid
+ * i40e_xsk_pool_disable - Disassociate an AF_XDP buffer pool from a
+ * certain ring/qid
* @vsi: Current VSI
- * @qid: Rx ring to associate UMEM to
+ * @qid: Rx ring to associate buffer pool with
*
* Returns 0 on success, <0 on failure
**/
-static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
+static int i40e_xsk_pool_disable(struct i40e_vsi *vsi, u16 qid)
{
struct net_device *netdev = vsi->netdev;
- struct xdp_umem *umem;
+ struct xsk_buff_pool *pool;
bool if_running;
int err;
- umem = xdp_get_umem_from_qid(netdev, qid);
- if (!umem)
+ pool = xsk_get_pool_from_qid(netdev, qid);
+ if (!pool)
return -EINVAL;
if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
@@ -106,7 +109,7 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
}
clear_bit(qid, vsi->af_xdp_zc_qps);
- xsk_buff_dma_unmap(umem, I40E_RX_DMA_ATTR);
+ xsk_pool_dma_unmap(pool, I40E_RX_DMA_ATTR);
if (if_running) {
err = i40e_queue_pair_enable(vsi, qid);
@@ -118,20 +121,21 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
}
/**
- * i40e_xsk_umem_setup - Enable/disassociate a UMEM to/from a ring/qid
+ * i40e_xsk_pool_setup - Enable/disassociate an AF_XDP buffer pool to/from
+ * a ring/qid
* @vsi: Current VSI
- * @umem: UMEM to enable/associate to a ring, or NULL to disable
- * @qid: Rx ring to (dis)associate UMEM (from)to
+ * @pool: Buffer pool to enable/associate to a ring, or NULL to disable
+ * @qid: Rx ring to (dis)associate buffer pool (from)to
*
- * This function enables or disables a UMEM to a certain ring.
+ * This function enables or disables a buffer pool to a certain ring.
*
* Returns 0 on success, <0 on failure
**/
-int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem,
+int i40e_xsk_pool_setup(struct i40e_vsi *vsi, struct xsk_buff_pool *pool,
u16 qid)
{
- return umem ? i40e_xsk_umem_enable(vsi, umem, qid) :
- i40e_xsk_umem_disable(vsi, qid);
+ return pool ? i40e_xsk_pool_enable(vsi, pool, qid) :
+ i40e_xsk_pool_disable(vsi, qid);
}
/**
@@ -191,7 +195,7 @@ bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
rx_desc = I40E_RX_DESC(rx_ring, ntu);
bi = i40e_rx_bi(rx_ring, ntu);
do {
- xdp = xsk_buff_alloc(rx_ring->xsk_umem);
+ xdp = xsk_buff_alloc(rx_ring->xsk_pool);
if (!xdp) {
ok = false;
goto no_buffers;
@@ -310,7 +314,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
bi = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
(*bi)->data_end = (*bi)->data + size;
- xsk_buff_dma_sync_for_cpu(*bi);
+ xsk_buff_dma_sync_for_cpu(*bi, rx_ring->xsk_pool);
xdp_res = i40e_run_xdp_zc(rx_ring, *bi);
if (xdp_res) {
@@ -358,11 +362,11 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
- if (xsk_umem_uses_need_wakeup(rx_ring->xsk_umem)) {
+ if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
- xsk_set_rx_need_wakeup(rx_ring->xsk_umem);
+ xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
else
- xsk_clear_rx_need_wakeup(rx_ring->xsk_umem);
+ xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
return (int)total_rx_packets;
}
@@ -385,11 +389,11 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
dma_addr_t dma;
while (budget-- > 0) {
- if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc))
+ if (!xsk_tx_peek_desc(xdp_ring->xsk_pool, &desc))
break;
- dma = xsk_buff_raw_get_dma(xdp_ring->xsk_umem, desc.addr);
- xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_umem, dma,
+ dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc.addr);
+ xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma,
desc.len);
tx_bi = &xdp_ring->tx_bi[xdp_ring->next_to_use];
@@ -416,7 +420,7 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
I40E_TXD_QW1_CMD_SHIFT);
i40e_xdp_ring_update_tail(xdp_ring);
- xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
+ xsk_tx_release(xdp_ring->xsk_pool);
i40e_update_tx_stats(xdp_ring, sent_frames, total_bytes);
}
@@ -448,7 +452,7 @@ static void i40e_clean_xdp_tx_buffer(struct i40e_ring *tx_ring,
**/
bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring)
{
- struct xdp_umem *umem = tx_ring->xsk_umem;
+ struct xsk_buff_pool *bp = tx_ring->xsk_pool;
u32 i, completed_frames, xsk_frames = 0;
u32 head_idx = i40e_get_head(tx_ring);
struct i40e_tx_buffer *tx_bi;
@@ -488,13 +492,13 @@ skip:
tx_ring->next_to_clean -= tx_ring->count;
if (xsk_frames)
- xsk_umem_complete_tx(umem, xsk_frames);
+ xsk_tx_completed(bp, xsk_frames);
i40e_arm_wb(tx_ring, vsi, completed_frames);
out_xmit:
- if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem))
- xsk_set_tx_need_wakeup(tx_ring->xsk_umem);
+ if (xsk_uses_need_wakeup(tx_ring->xsk_pool))
+ xsk_set_tx_need_wakeup(tx_ring->xsk_pool);
return i40e_xmit_zc(tx_ring, I40E_DESC_UNUSED(tx_ring));
}
@@ -526,7 +530,7 @@ int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
if (queue_id >= vsi->num_queue_pairs)
return -ENXIO;
- if (!vsi->xdp_rings[queue_id]->xsk_umem)
+ if (!vsi->xdp_rings[queue_id]->xsk_pool)
return -ENXIO;
ring = vsi->xdp_rings[queue_id];
@@ -565,7 +569,7 @@ void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring)
void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring)
{
u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
- struct xdp_umem *umem = tx_ring->xsk_umem;
+ struct xsk_buff_pool *bp = tx_ring->xsk_pool;
struct i40e_tx_buffer *tx_bi;
u32 xsk_frames = 0;
@@ -585,14 +589,15 @@ void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring)
}
if (xsk_frames)
- xsk_umem_complete_tx(umem, xsk_frames);
+ xsk_tx_completed(bp, xsk_frames);
}
/**
- * i40e_xsk_any_rx_ring_enabled - Checks if Rx rings have AF_XDP UMEM attached
+ * i40e_xsk_any_rx_ring_enabled - Checks if Rx rings have an AF_XDP
+ * buffer pool attached
* @vsi: vsi
*
- * Returns true if any of the Rx rings has an AF_XDP UMEM attached
+ * Returns true if any of the Rx rings has an AF_XDP buffer pool attached
**/
bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi)
{
@@ -600,7 +605,7 @@ bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi)
int i;
for (i = 0; i < vsi->num_queue_pairs; i++) {
- if (xdp_get_umem_from_qid(netdev, i))
+ if (xsk_get_pool_from_qid(netdev, i))
return true;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.h b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
index c524c142127f..7adfd8539247 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
@@ -5,12 +5,12 @@
#define _I40E_XSK_H_
struct i40e_vsi;
-struct xdp_umem;
+struct xsk_buff_pool;
struct zero_copy_allocator;
int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair);
int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair);
-int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem,
+int i40e_xsk_pool_setup(struct i40e_vsi *vsi, struct xsk_buff_pool *pool,
u16 qid);
bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 cleaned_count);
int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index ca041b39ffda..256fa07d54d5 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -1309,10 +1309,7 @@ static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring,
return NULL;
/* prefetch first cache line of first page */
va = page_address(rx_buffer->page) + rx_buffer->page_offset;
- prefetch(va);
-#if L1_CACHE_BYTES < 128
- prefetch(va + L1_CACHE_BYTES);
-#endif
+ net_prefetch(va);
/* allocate a skb to store the frags */
skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
@@ -1376,10 +1373,8 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
return NULL;
/* prefetch first cache line of first page */
va = page_address(rx_buffer->page) + rx_buffer->page_offset;
- prefetch(va);
-#if L1_CACHE_BYTES < 128
- prefetch(va + L1_CACHE_BYTES);
-#endif
+ net_prefetch(va);
+
/* build an skb around the page buffer */
skb = build_skb(va - IAVF_SKB_PAD, truesize);
if (unlikely(!skb))
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index fe140ff38f74..65583f0a1797 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -321,9 +321,9 @@ struct ice_vsi {
struct ice_ring **xdp_rings; /* XDP ring array */
u16 num_xdp_txq; /* Used XDP queues */
u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
- struct xdp_umem **xsk_umems;
- u16 num_xsk_umems_used;
- u16 num_xsk_umems;
+ struct xsk_buff_pool **xsk_pools;
+ u16 num_xsk_pools_used;
+ u16 num_xsk_pools;
} ____cacheline_internodealigned_in_smp;
/* struct that defines an interrupt vector */
@@ -507,25 +507,25 @@ static inline void ice_set_ring_xdp(struct ice_ring *ring)
}
/**
- * ice_xsk_umem - get XDP UMEM bound to a ring
+ * ice_xsk_pool - get XSK buffer pool bound to a ring
* @ring - ring to use
*
- * Returns a pointer to xdp_umem structure if there is an UMEM present,
+ * Returns a pointer to xdp_umem structure if there is a buffer pool present,
* NULL otherwise.
*/
-static inline struct xdp_umem *ice_xsk_umem(struct ice_ring *ring)
+static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_ring *ring)
{
- struct xdp_umem **umems = ring->vsi->xsk_umems;
+ struct xsk_buff_pool **pools = ring->vsi->xsk_pools;
u16 qid = ring->q_index;
if (ice_ring_is_xdp(ring))
qid -= ring->vsi->num_xdp_txq;
- if (qid >= ring->vsi->num_xsk_umems || !umems || !umems[qid] ||
+ if (qid >= ring->vsi->num_xsk_pools || !pools || !pools[qid] ||
!ice_is_xdp_ena_vsi(ring->vsi))
return NULL;
- return umems[qid];
+ return pools[qid];
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 87008476d8fe..fe4320e2d1f2 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -308,12 +308,12 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
ring->q_index);
- ring->xsk_umem = ice_xsk_umem(ring);
- if (ring->xsk_umem) {
+ ring->xsk_pool = ice_xsk_pool(ring);
+ if (ring->xsk_pool) {
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
ring->rx_buf_len =
- xsk_umem_get_rx_frame_size(ring->xsk_umem);
+ xsk_pool_get_rx_frame_size(ring->xsk_pool);
/* For AF_XDP ZC, we disallow packets to span on
* multiple buffers, thus letting us skip that
* handling in the fast-path.
@@ -324,7 +324,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
NULL);
if (err)
return err;
- xsk_buff_set_rxq_info(ring->xsk_umem, &ring->xdp_rxq);
+ xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
ring->q_index);
@@ -417,9 +417,9 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
writel(0, ring->tail);
- if (ring->xsk_umem) {
- if (!xsk_buff_can_alloc(ring->xsk_umem, num_bufs)) {
- dev_warn(dev, "UMEM does not provide enough addresses to fill %d buffers on Rx ring %d\n",
+ if (ring->xsk_pool) {
+ if (!xsk_buff_can_alloc(ring->xsk_pool, num_bufs)) {
+ dev_warn(dev, "XSK buffer pool does not provide enough addresses to fill %d buffers on Rx ring %d\n",
num_bufs, ring->q_index);
dev_warn(dev, "Change Rx ring/fill queue size to avoid performance issues\n");
@@ -428,7 +428,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
err = ice_alloc_rx_bufs_zc(ring, num_bufs);
if (err)
- dev_info(dev, "Failed to allocate some buffers on UMEM enabled Rx ring %d (pf_q %d)\n",
+ dev_info(dev, "Failed to allocate some buffers on XSK buffer pool enabled Rx ring %d (pf_q %d)\n",
ring->q_index, pf_q);
return 0;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index f2682776f8c8..feeb5cdccdc5 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -1743,7 +1743,7 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
return ret;
for (i = 0; i < vsi->num_xdp_txq; i++)
- vsi->xdp_rings[i]->xsk_umem = ice_xsk_umem(vsi->xdp_rings[i]);
+ vsi->xdp_rings[i]->xsk_pool = ice_xsk_pool(vsi->xdp_rings[i]);
return ret;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 4634b48949bb..2297ee7dba26 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -2273,7 +2273,7 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
if (ice_setup_tx_ring(xdp_ring))
goto free_xdp_rings;
ice_set_ring_xdp(xdp_ring);
- xdp_ring->xsk_umem = ice_xsk_umem(xdp_ring);
+ xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring);
}
return 0;
@@ -2517,13 +2517,13 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
if (if_running)
ret = ice_up(vsi);
- if (!ret && prog && vsi->xsk_umems) {
+ if (!ret && prog && vsi->xsk_pools) {
int i;
ice_for_each_rxq(vsi, i) {
struct ice_ring *rx_ring = vsi->rx_rings[i];
- if (rx_ring->xsk_umem)
+ if (rx_ring->xsk_pool)
napi_schedule(&rx_ring->q_vector->napi);
}
}
@@ -2549,8 +2549,8 @@ static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
switch (xdp->command) {
case XDP_SETUP_PROG:
return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
- case XDP_SETUP_XSK_UMEM:
- return ice_xsk_umem_setup(vsi, xdp->xsk.umem,
+ case XDP_SETUP_XSK_POOL:
+ return ice_xsk_pool_setup(vsi, xdp->xsk.pool,
xdp->xsk.queue_id);
default:
return -EINVAL;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 9d0d6b0025cf..eae75260fe20 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -145,7 +145,7 @@ void ice_clean_tx_ring(struct ice_ring *tx_ring)
{
u16 i;
- if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_umem) {
+ if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) {
ice_xsk_clean_xdp_ring(tx_ring);
goto tx_skip_free;
}
@@ -375,7 +375,7 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring)
if (!rx_ring->rx_buf)
return;
- if (rx_ring->xsk_umem) {
+ if (rx_ring->xsk_pool) {
ice_xsk_clean_rx_ring(rx_ring);
goto rx_skip_free;
}
@@ -919,10 +919,7 @@ ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
* likely have a consumer accessing first few bytes of meta
* data, and then actual data.
*/
- prefetch(xdp->data_meta);
-#if L1_CACHE_BYTES < 128
- prefetch((void *)(xdp->data + L1_CACHE_BYTES));
-#endif
+ net_prefetch(xdp->data_meta);
/* build an skb around the page buffer */
skb = build_skb(xdp->data_hard_start, truesize);
if (unlikely(!skb))
@@ -964,10 +961,7 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
struct sk_buff *skb;
/* prefetch first cache line of first page */
- prefetch(xdp->data);
-#if L1_CACHE_BYTES < 128
- prefetch((void *)(xdp->data + L1_CACHE_BYTES));
-#endif /* L1_CACHE_BYTES */
+ net_prefetch(xdp->data);
/* allocate a skb to store the frags */
skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE,
@@ -1616,7 +1610,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
ice_for_each_ring(ring, q_vector->tx) {
- bool wd = ring->xsk_umem ?
+ bool wd = ring->xsk_pool ?
ice_clean_tx_irq_zc(ring, budget) :
ice_clean_tx_irq(ring, budget);
@@ -1646,7 +1640,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
* comparison in the irq context instead of many inside the
* ice_clean_rx_irq function and makes the codebase cleaner.
*/
- cleaned = ring->xsk_umem ?
+ cleaned = ring->xsk_pool ?
ice_clean_rx_irq_zc(ring, budget_per_ring) :
ice_clean_rx_irq(ring, budget_per_ring);
work_done += cleaned;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index 51b4df7a59d2..e9f60d550fcb 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -295,7 +295,7 @@ struct ice_ring {
struct rcu_head rcu; /* to avoid race on free */
struct bpf_prog *xdp_prog;
- struct xdp_umem *xsk_umem;
+ struct xsk_buff_pool *xsk_pool;
/* CL3 - 3rd cacheline starts here */
struct xdp_rxq_info xdp_rxq;
/* CLX - the below items are only accessed infrequently and should be
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 20ac5fca68c6..797886524054 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -236,7 +236,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
if (err)
goto free_buf;
ice_set_ring_xdp(xdp_ring);
- xdp_ring->xsk_umem = ice_xsk_umem(xdp_ring);
+ xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring);
}
err = ice_setup_rx_ctx(rx_ring);
@@ -260,21 +260,21 @@ free_buf:
}
/**
- * ice_xsk_alloc_umems - allocate a UMEM region for an XDP socket
- * @vsi: VSI to allocate the UMEM on
+ * ice_xsk_alloc_pools - allocate a buffer pool for an XDP socket
+ * @vsi: VSI to allocate the buffer pool on
*
* Returns 0 on success, negative on error
*/
-static int ice_xsk_alloc_umems(struct ice_vsi *vsi)
+static int ice_xsk_alloc_pools(struct ice_vsi *vsi)
{
- if (vsi->xsk_umems)
+ if (vsi->xsk_pools)
return 0;
- vsi->xsk_umems = kcalloc(vsi->num_xsk_umems, sizeof(*vsi->xsk_umems),
+ vsi->xsk_pools = kcalloc(vsi->num_xsk_pools, sizeof(*vsi->xsk_pools),
GFP_KERNEL);
- if (!vsi->xsk_umems) {
- vsi->num_xsk_umems = 0;
+ if (!vsi->xsk_pools) {
+ vsi->num_xsk_pools = 0;
return -ENOMEM;
}
@@ -282,73 +282,73 @@ static int ice_xsk_alloc_umems(struct ice_vsi *vsi)
}
/**
- * ice_xsk_remove_umem - Remove an UMEM for a certain ring/qid
+ * ice_xsk_remove_pool - Remove an buffer pool for a certain ring/qid
* @vsi: VSI from which the VSI will be removed
- * @qid: Ring/qid associated with the UMEM
+ * @qid: Ring/qid associated with the buffer pool
*/
-static void ice_xsk_remove_umem(struct ice_vsi *vsi, u16 qid)
+static void ice_xsk_remove_pool(struct ice_vsi *vsi, u16 qid)
{
- vsi->xsk_umems[qid] = NULL;
- vsi->num_xsk_umems_used--;
+ vsi->xsk_pools[qid] = NULL;
+ vsi->num_xsk_pools_used--;
- if (vsi->num_xsk_umems_used == 0) {
- kfree(vsi->xsk_umems);
- vsi->xsk_umems = NULL;
- vsi->num_xsk_umems = 0;
+ if (vsi->num_xsk_pools_used == 0) {
+ kfree(vsi->xsk_pools);
+ vsi->xsk_pools = NULL;
+ vsi->num_xsk_pools = 0;
}
}
/**
- * ice_xsk_umem_disable - disable a UMEM region
+ * ice_xsk_pool_disable - disable a buffer pool region
* @vsi: Current VSI
* @qid: queue ID
*
* Returns 0 on success, negative on failure
*/
-static int ice_xsk_umem_disable(struct ice_vsi *vsi, u16 qid)
+static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
{
- if (!vsi->xsk_umems || qid >= vsi->num_xsk_umems ||
- !vsi->xsk_umems[qid])
+ if (!vsi->xsk_pools || qid >= vsi->num_xsk_pools ||
+ !vsi->xsk_pools[qid])
return -EINVAL;
- xsk_buff_dma_unmap(vsi->xsk_umems[qid], ICE_RX_DMA_ATTR);
- ice_xsk_remove_umem(vsi, qid);
+ xsk_pool_dma_unmap(vsi->xsk_pools[qid], ICE_RX_DMA_ATTR);
+ ice_xsk_remove_pool(vsi, qid);
return 0;
}
/**
- * ice_xsk_umem_enable - enable a UMEM region
+ * ice_xsk_pool_enable - enable a buffer pool region
* @vsi: Current VSI
- * @umem: pointer to a requested UMEM region
+ * @pool: pointer to a requested buffer pool region
* @qid: queue ID
*
* Returns 0 on success, negative on failure
*/
static int
-ice_xsk_umem_enable(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
+ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
{
int err;
if (vsi->type != ICE_VSI_PF)
return -EINVAL;
- if (!vsi->num_xsk_umems)
- vsi->num_xsk_umems = min_t(u16, vsi->num_rxq, vsi->num_txq);
- if (qid >= vsi->num_xsk_umems)
+ if (!vsi->num_xsk_pools)
+ vsi->num_xsk_pools = min_t(u16, vsi->num_rxq, vsi->num_txq);
+ if (qid >= vsi->num_xsk_pools)
return -EINVAL;
- err = ice_xsk_alloc_umems(vsi);
+ err = ice_xsk_alloc_pools(vsi);
if (err)
return err;
- if (vsi->xsk_umems && vsi->xsk_umems[qid])
+ if (vsi->xsk_pools && vsi->xsk_pools[qid])
return -EBUSY;
- vsi->xsk_umems[qid] = umem;
- vsi->num_xsk_umems_used++;
+ vsi->xsk_pools[qid] = pool;
+ vsi->num_xsk_pools_used++;
- err = xsk_buff_dma_map(vsi->xsk_umems[qid], ice_pf_to_dev(vsi->back),
+ err = xsk_pool_dma_map(vsi->xsk_pools[qid], ice_pf_to_dev(vsi->back),
ICE_RX_DMA_ATTR);
if (err)
return err;
@@ -357,17 +357,17 @@ ice_xsk_umem_enable(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
}
/**
- * ice_xsk_umem_setup - enable/disable a UMEM region depending on its state
+ * ice_xsk_pool_setup - enable/disable a buffer pool region depending on its state
* @vsi: Current VSI
- * @umem: UMEM to enable/associate to a ring, NULL to disable
+ * @pool: buffer pool to enable/associate to a ring, NULL to disable
* @qid: queue ID
*
* Returns 0 on success, negative on failure
*/
-int ice_xsk_umem_setup(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
+int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
{
- bool if_running, umem_present = !!umem;
- int ret = 0, umem_failure = 0;
+ bool if_running, pool_present = !!pool;
+ int ret = 0, pool_failure = 0;
if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi);
@@ -375,26 +375,26 @@ int ice_xsk_umem_setup(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
ret = ice_qp_dis(vsi, qid);
if (ret) {
netdev_err(vsi->netdev, "ice_qp_dis error = %d\n", ret);
- goto xsk_umem_if_up;
+ goto xsk_pool_if_up;
}
}
- umem_failure = umem_present ? ice_xsk_umem_enable(vsi, umem, qid) :
- ice_xsk_umem_disable(vsi, qid);
+ pool_failure = pool_present ? ice_xsk_pool_enable(vsi, pool, qid) :
+ ice_xsk_pool_disable(vsi, qid);
-xsk_umem_if_up:
+xsk_pool_if_up:
if (if_running) {
ret = ice_qp_ena(vsi, qid);
- if (!ret && umem_present)
+ if (!ret && pool_present)
napi_schedule(&vsi->xdp_rings[qid]->q_vector->napi);
else if (ret)
netdev_err(vsi->netdev, "ice_qp_ena error = %d\n", ret);
}
- if (umem_failure) {
- netdev_err(vsi->netdev, "Could not %sable UMEM, error = %d\n",
- umem_present ? "en" : "dis", umem_failure);
- return umem_failure;
+ if (pool_failure) {
+ netdev_err(vsi->netdev, "Could not %sable buffer pool, error = %d\n",
+ pool_present ? "en" : "dis", pool_failure);
+ return pool_failure;
}
return ret;
@@ -425,7 +425,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count)
rx_buf = &rx_ring->rx_buf[ntu];
do {
- rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_umem);
+ rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_pool);
if (!rx_buf->xdp) {
ret = true;
break;
@@ -595,7 +595,7 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
rx_buf->xdp->data_end = rx_buf->xdp->data + size;
- xsk_buff_dma_sync_for_cpu(rx_buf->xdp);
+ xsk_buff_dma_sync_for_cpu(rx_buf->xdp, rx_ring->xsk_pool);
xdp_res = ice_run_xdp_zc(rx_ring, rx_buf->xdp);
if (xdp_res) {
@@ -645,11 +645,11 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
ice_finalize_xdp_rx(rx_ring, xdp_xmit);
ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
- if (xsk_umem_uses_need_wakeup(rx_ring->xsk_umem)) {
+ if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
- xsk_set_rx_need_wakeup(rx_ring->xsk_umem);
+ xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
else
- xsk_clear_rx_need_wakeup(rx_ring->xsk_umem);
+ xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
return (int)total_rx_packets;
}
@@ -682,11 +682,11 @@ static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)
tx_buf = &xdp_ring->tx_buf[xdp_ring->next_to_use];
- if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc))
+ if (!xsk_tx_peek_desc(xdp_ring->xsk_pool, &desc))
break;
- dma = xsk_buff_raw_get_dma(xdp_ring->xsk_umem, desc.addr);
- xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_umem, dma,
+ dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc.addr);
+ xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma,
desc.len);
tx_buf->bytecount = desc.len;
@@ -703,7 +703,7 @@ static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)
if (tx_desc) {
ice_xdp_ring_update_tail(xdp_ring);
- xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
+ xsk_tx_release(xdp_ring->xsk_pool);
}
return budget > 0 && work_done;
@@ -777,10 +777,10 @@ bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget)
xdp_ring->next_to_clean = ntc;
if (xsk_frames)
- xsk_umem_complete_tx(xdp_ring->xsk_umem, xsk_frames);
+ xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
- if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem))
- xsk_set_tx_need_wakeup(xdp_ring->xsk_umem);
+ if (xsk_uses_need_wakeup(xdp_ring->xsk_pool))
+ xsk_set_tx_need_wakeup(xdp_ring->xsk_pool);
ice_update_tx_ring_stats(xdp_ring, total_packets, total_bytes);
xmit_done = ice_xmit_zc(xdp_ring, ICE_DFLT_IRQ_WORK);
@@ -814,7 +814,7 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
if (queue_id >= vsi->num_txq)
return -ENXIO;
- if (!vsi->xdp_rings[queue_id]->xsk_umem)
+ if (!vsi->xdp_rings[queue_id]->xsk_pool)
return -ENXIO;
ring = vsi->xdp_rings[queue_id];
@@ -833,20 +833,20 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
}
/**
- * ice_xsk_any_rx_ring_ena - Checks if Rx rings have AF_XDP UMEM attached
+ * ice_xsk_any_rx_ring_ena - Checks if Rx rings have AF_XDP buff pool attached
* @vsi: VSI to be checked
*
- * Returns true if any of the Rx rings has an AF_XDP UMEM attached
+ * Returns true if any of the Rx rings has an AF_XDP buff pool attached
*/
bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
{
int i;
- if (!vsi->xsk_umems)
+ if (!vsi->xsk_pools)
return false;
- for (i = 0; i < vsi->num_xsk_umems; i++) {
- if (vsi->xsk_umems[i])
+ for (i = 0; i < vsi->num_xsk_pools; i++) {
+ if (vsi->xsk_pools[i])
return true;
}
@@ -854,7 +854,7 @@ bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
}
/**
- * ice_xsk_clean_rx_ring - clean UMEM queues connected to a given Rx ring
+ * ice_xsk_clean_rx_ring - clean buffer pool queues connected to a given Rx ring
* @rx_ring: ring to be cleaned
*/
void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring)
@@ -872,7 +872,7 @@ void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring)
}
/**
- * ice_xsk_clean_xdp_ring - Clean the XDP Tx ring and its UMEM queues
+ * ice_xsk_clean_xdp_ring - Clean the XDP Tx ring and its buffer pool queues
* @xdp_ring: XDP_Tx ring
*/
void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring)
@@ -896,5 +896,5 @@ void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring)
}
if (xsk_frames)
- xsk_umem_complete_tx(xdp_ring->xsk_umem, xsk_frames);
+ xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h
index fc1a06b4df36..fad783690134 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.h
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.h
@@ -9,7 +9,8 @@
struct ice_vsi;
#ifdef CONFIG_XDP_SOCKETS
-int ice_xsk_umem_setup(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid);
+int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool,
+ u16 qid);
int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget);
bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget);
int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags);
@@ -19,8 +20,8 @@ void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring);
void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring);
#else
static inline int
-ice_xsk_umem_setup(struct ice_vsi __always_unused *vsi,
- struct xdp_umem __always_unused *umem,
+ice_xsk_pool_setup(struct ice_vsi __always_unused *vsi,
+ struct xsk_buff_pool __always_unused *pool,
u16 __always_unused qid)
{
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index d9c3a6b169f9..e1e37d0b7703 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -8046,10 +8046,7 @@ static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
struct sk_buff *skb;
/* prefetch first cache line of first page */
- prefetch(va);
-#if L1_CACHE_BYTES < 128
- prefetch(va + L1_CACHE_BYTES);
-#endif
+ net_prefetch(va);
/* allocate a skb to store the frags */
skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN);
@@ -8103,10 +8100,7 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
struct sk_buff *skb;
/* prefetch first cache line of first page */
- prefetch(va);
-#if L1_CACHE_BYTES < 128
- prefetch(va + L1_CACHE_BYTES);
-#endif
+ net_prefetch(va);
/* build an skb around the page buffer */
skb = build_skb(va - IGB_SKB_PAD, truesize);
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 9593aa4eea36..c6968fdb6caa 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -1550,10 +1550,7 @@ static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring,
struct sk_buff *skb;
/* prefetch first cache line of first page */
- prefetch(va);
-#if L1_CACHE_BYTES < 128
- prefetch(va + L1_CACHE_BYTES);
-#endif
+ net_prefetch(va);
/* build an skb around the page buffer */
skb = build_skb(va - IGC_SKB_PAD, truesize);
@@ -1589,10 +1586,7 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
struct sk_buff *skb;
/* prefetch first cache line of first page */
- prefetch(va);
-#if L1_CACHE_BYTES < 128
- prefetch(va + L1_CACHE_BYTES);
-#endif
+ net_prefetch(va);
/* allocate a skb to store the frags */
skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGC_RX_HDR_LEN);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 1e8a809233a0..de0fc6ecf491 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -350,7 +350,7 @@ struct ixgbe_ring {
struct ixgbe_rx_queue_stats rx_stats;
};
struct xdp_rxq_info xdp_rxq;
- struct xdp_umem *xsk_umem;
+ struct xsk_buff_pool *xsk_pool;
u16 ring_idx; /* {rx,tx,xdp}_ring back reference idx */
u16 rx_buf_len;
} ____cacheline_internodealigned_in_smp;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 2f8a4cfc5fa1..0b675c34ce49 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -2095,10 +2095,8 @@ static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring,
struct sk_buff *skb;
/* prefetch first cache line of first page */
- prefetch(xdp->data);
-#if L1_CACHE_BYTES < 128
- prefetch(xdp->data + L1_CACHE_BYTES);
-#endif
+ net_prefetch(xdp->data);
+
/* Note, we get here by enabling legacy-rx via:
*
* ethtool --set-priv-flags <dev> legacy-rx on
@@ -2161,10 +2159,7 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
* likely have a consumer accessing first few bytes of meta
* data, and then actual data.
*/
- prefetch(xdp->data_meta);
-#if L1_CACHE_BYTES < 128
- prefetch(xdp->data_meta + L1_CACHE_BYTES);
-#endif
+ net_prefetch(xdp->data_meta);
/* build an skb to around the page buffer */
skb = build_skb(xdp->data_hard_start, truesize);
@@ -3156,7 +3151,7 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
#endif
ixgbe_for_each_ring(ring, q_vector->tx) {
- bool wd = ring->xsk_umem ?
+ bool wd = ring->xsk_pool ?
ixgbe_clean_xdp_tx_irq(q_vector, ring, budget) :
ixgbe_clean_tx_irq(q_vector, ring, budget);
@@ -3176,7 +3171,7 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
per_ring_budget = budget;
ixgbe_for_each_ring(ring, q_vector->rx) {
- int cleaned = ring->xsk_umem ?
+ int cleaned = ring->xsk_pool ?
ixgbe_clean_rx_irq_zc(q_vector, ring,
per_ring_budget) :
ixgbe_clean_rx_irq(q_vector, ring,
@@ -3471,9 +3466,9 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
u32 txdctl = IXGBE_TXDCTL_ENABLE;
u8 reg_idx = ring->reg_idx;
- ring->xsk_umem = NULL;
+ ring->xsk_pool = NULL;
if (ring_is_xdp(ring))
- ring->xsk_umem = ixgbe_xsk_umem(adapter, ring);
+ ring->xsk_pool = ixgbe_xsk_pool(adapter, ring);
/* disable queue to avoid issues while updating state */
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0);
@@ -3713,8 +3708,8 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
/* configure the packet buffer length */
- if (rx_ring->xsk_umem) {
- u32 xsk_buf_len = xsk_umem_get_rx_frame_size(rx_ring->xsk_umem);
+ if (rx_ring->xsk_pool) {
+ u32 xsk_buf_len = xsk_pool_get_rx_frame_size(rx_ring->xsk_pool);
/* If the MAC support setting RXDCTL.RLPML, the
* SRRCTL[n].BSIZEPKT is set to PAGE_SIZE and
@@ -4059,12 +4054,12 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
u8 reg_idx = ring->reg_idx;
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
- ring->xsk_umem = ixgbe_xsk_umem(adapter, ring);
- if (ring->xsk_umem) {
+ ring->xsk_pool = ixgbe_xsk_pool(adapter, ring);
+ if (ring->xsk_pool) {
WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_XSK_BUFF_POOL,
NULL));
- xsk_buff_set_rxq_info(ring->xsk_umem, &ring->xdp_rxq);
+ xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
} else {
WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_PAGE_SHARED, NULL));
@@ -4119,8 +4114,8 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
#endif
}
- if (ring->xsk_umem && hw->mac.type != ixgbe_mac_82599EB) {
- u32 xsk_buf_len = xsk_umem_get_rx_frame_size(ring->xsk_umem);
+ if (ring->xsk_pool && hw->mac.type != ixgbe_mac_82599EB) {
+ u32 xsk_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool);
rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
IXGBE_RXDCTL_RLPML_EN);
@@ -4142,7 +4137,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
ixgbe_rx_desc_queue_enable(adapter, ring);
- if (ring->xsk_umem)
+ if (ring->xsk_pool)
ixgbe_alloc_rx_buffers_zc(ring, ixgbe_desc_unused(ring));
else
ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
@@ -5292,7 +5287,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
u16 i = rx_ring->next_to_clean;
struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
- if (rx_ring->xsk_umem) {
+ if (rx_ring->xsk_pool) {
ixgbe_xsk_clean_rx_ring(rx_ring);
goto skip_free;
}
@@ -5984,7 +5979,7 @@ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
u16 i = tx_ring->next_to_clean;
struct ixgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
- if (tx_ring->xsk_umem) {
+ if (tx_ring->xsk_pool) {
ixgbe_xsk_clean_tx_ring(tx_ring);
goto out;
}
@@ -10146,7 +10141,7 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
*/
if (need_reset && prog)
for (i = 0; i < adapter->num_rx_queues; i++)
- if (adapter->xdp_ring[i]->xsk_umem)
+ if (adapter->xdp_ring[i]->xsk_pool)
(void)ixgbe_xsk_wakeup(adapter->netdev, i,
XDP_WAKEUP_RX);
@@ -10160,8 +10155,8 @@ static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp)
switch (xdp->command) {
case XDP_SETUP_PROG:
return ixgbe_xdp_setup(dev, xdp->prog);
- case XDP_SETUP_XSK_UMEM:
- return ixgbe_xsk_umem_setup(adapter, xdp->xsk.umem,
+ case XDP_SETUP_XSK_POOL:
+ return ixgbe_xsk_pool_setup(adapter, xdp->xsk.pool,
xdp->xsk.queue_id);
default:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
index 7887ae4aaf4f..2aeec78029bc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
@@ -28,9 +28,10 @@ void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, u64 qmask);
void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring);
void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring);
-struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *ring);
-int ixgbe_xsk_umem_setup(struct ixgbe_adapter *adapter, struct xdp_umem *umem,
+struct xsk_buff_pool *ixgbe_xsk_pool(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *ring);
+int ixgbe_xsk_pool_setup(struct ixgbe_adapter *adapter,
+ struct xsk_buff_pool *pool,
u16 qid);
void ixgbe_zca_free(struct zero_copy_allocator *alloc, unsigned long handle);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index ec7121f352e2..3771857cf887 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -8,8 +8,8 @@
#include "ixgbe.h"
#include "ixgbe_txrx_common.h"
-struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *ring)
+struct xsk_buff_pool *ixgbe_xsk_pool(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *ring)
{
bool xdp_on = READ_ONCE(adapter->xdp_prog);
int qid = ring->ring_idx;
@@ -17,11 +17,11 @@ struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter,
if (!xdp_on || !test_bit(qid, adapter->af_xdp_zc_qps))
return NULL;
- return xdp_get_umem_from_qid(adapter->netdev, qid);
+ return xsk_get_pool_from_qid(adapter->netdev, qid);
}
-static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter,
- struct xdp_umem *umem,
+static int ixgbe_xsk_pool_enable(struct ixgbe_adapter *adapter,
+ struct xsk_buff_pool *pool,
u16 qid)
{
struct net_device *netdev = adapter->netdev;
@@ -35,7 +35,7 @@ static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter,
qid >= netdev->real_num_tx_queues)
return -EINVAL;
- err = xsk_buff_dma_map(umem, &adapter->pdev->dev, IXGBE_RX_DMA_ATTR);
+ err = xsk_pool_dma_map(pool, &adapter->pdev->dev, IXGBE_RX_DMA_ATTR);
if (err)
return err;
@@ -59,13 +59,13 @@ static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter,
return 0;
}
-static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid)
+static int ixgbe_xsk_pool_disable(struct ixgbe_adapter *adapter, u16 qid)
{
- struct xdp_umem *umem;
+ struct xsk_buff_pool *pool;
bool if_running;
- umem = xdp_get_umem_from_qid(adapter->netdev, qid);
- if (!umem)
+ pool = xsk_get_pool_from_qid(adapter->netdev, qid);
+ if (!pool)
return -EINVAL;
if_running = netif_running(adapter->netdev) &&
@@ -75,7 +75,7 @@ static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid)
ixgbe_txrx_ring_disable(adapter, qid);
clear_bit(qid, adapter->af_xdp_zc_qps);
- xsk_buff_dma_unmap(umem, IXGBE_RX_DMA_ATTR);
+ xsk_pool_dma_unmap(pool, IXGBE_RX_DMA_ATTR);
if (if_running)
ixgbe_txrx_ring_enable(adapter, qid);
@@ -83,11 +83,12 @@ static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid)
return 0;
}
-int ixgbe_xsk_umem_setup(struct ixgbe_adapter *adapter, struct xdp_umem *umem,
+int ixgbe_xsk_pool_setup(struct ixgbe_adapter *adapter,
+ struct xsk_buff_pool *pool,
u16 qid)
{
- return umem ? ixgbe_xsk_umem_enable(adapter, umem, qid) :
- ixgbe_xsk_umem_disable(adapter, qid);
+ return pool ? ixgbe_xsk_pool_enable(adapter, pool, qid) :
+ ixgbe_xsk_pool_disable(adapter, qid);
}
static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
@@ -149,7 +150,7 @@ bool ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count)
i -= rx_ring->count;
do {
- bi->xdp = xsk_buff_alloc(rx_ring->xsk_umem);
+ bi->xdp = xsk_buff_alloc(rx_ring->xsk_pool);
if (!bi->xdp) {
ok = false;
break;
@@ -286,7 +287,7 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
}
bi->xdp->data_end = bi->xdp->data + size;
- xsk_buff_dma_sync_for_cpu(bi->xdp);
+ xsk_buff_dma_sync_for_cpu(bi->xdp, rx_ring->xsk_pool);
xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, bi->xdp);
if (xdp_res) {
@@ -344,11 +345,11 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
q_vector->rx.total_packets += total_rx_packets;
q_vector->rx.total_bytes += total_rx_bytes;
- if (xsk_umem_uses_need_wakeup(rx_ring->xsk_umem)) {
+ if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
- xsk_set_rx_need_wakeup(rx_ring->xsk_umem);
+ xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
else
- xsk_clear_rx_need_wakeup(rx_ring->xsk_umem);
+ xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
return (int)total_rx_packets;
}
@@ -373,6 +374,7 @@ void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring)
static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
{
+ struct xsk_buff_pool *pool = xdp_ring->xsk_pool;
union ixgbe_adv_tx_desc *tx_desc = NULL;
struct ixgbe_tx_buffer *tx_bi;
bool work_done = true;
@@ -387,12 +389,11 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
break;
}
- if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc))
+ if (!xsk_tx_peek_desc(pool, &desc))
break;
- dma = xsk_buff_raw_get_dma(xdp_ring->xsk_umem, desc.addr);
- xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_umem, dma,
- desc.len);
+ dma = xsk_buff_raw_get_dma(pool, desc.addr);
+ xsk_buff_raw_dma_sync_for_device(pool, dma, desc.len);
tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use];
tx_bi->bytecount = desc.len;
@@ -418,7 +419,7 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
if (tx_desc) {
ixgbe_xdp_ring_update_tail(xdp_ring);
- xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
+ xsk_tx_release(pool);
}
return !!budget && work_done;
@@ -439,7 +440,7 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
{
u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
unsigned int total_packets = 0, total_bytes = 0;
- struct xdp_umem *umem = tx_ring->xsk_umem;
+ struct xsk_buff_pool *pool = tx_ring->xsk_pool;
union ixgbe_adv_tx_desc *tx_desc;
struct ixgbe_tx_buffer *tx_bi;
u32 xsk_frames = 0;
@@ -484,10 +485,10 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
q_vector->tx.total_packets += total_packets;
if (xsk_frames)
- xsk_umem_complete_tx(umem, xsk_frames);
+ xsk_tx_completed(pool, xsk_frames);
- if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem))
- xsk_set_tx_need_wakeup(tx_ring->xsk_umem);
+ if (xsk_uses_need_wakeup(pool))
+ xsk_set_tx_need_wakeup(pool);
return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
}
@@ -511,7 +512,7 @@ int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
if (test_bit(__IXGBE_TX_DISABLED, &ring->state))
return -ENETDOWN;
- if (!ring->xsk_umem)
+ if (!ring->xsk_pool)
return -ENXIO;
if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) {
@@ -526,7 +527,7 @@ int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring)
{
u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
- struct xdp_umem *umem = tx_ring->xsk_umem;
+ struct xsk_buff_pool *pool = tx_ring->xsk_pool;
struct ixgbe_tx_buffer *tx_bi;
u32 xsk_frames = 0;
@@ -546,5 +547,5 @@ void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring)
}
if (xsk_frames)
- xsk_umem_complete_tx(umem, xsk_frames);
+ xsk_tx_completed(pool, xsk_frames);
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index a428113e6d54..50afec43e001 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -866,10 +866,8 @@ struct sk_buff *ixgbevf_construct_skb(struct ixgbevf_ring *rx_ring,
struct sk_buff *skb;
/* prefetch first cache line of first page */
- prefetch(xdp->data);
-#if L1_CACHE_BYTES < 128
- prefetch(xdp->data + L1_CACHE_BYTES);
-#endif
+ net_prefetch(xdp->data);
+
/* Note, we get here by enabling legacy-rx via:
*
* ethtool --set-priv-flags <dev> legacy-rx on
@@ -947,10 +945,7 @@ static struct sk_buff *ixgbevf_build_skb(struct ixgbevf_ring *rx_ring,
* have a consumer accessing first few bytes of meta data,
* and then actual data.
*/
- prefetch(xdp->data_meta);
-#if L1_CACHE_BYTES < 128
- prefetch(xdp->data_meta + L1_CACHE_BYTES);
-#endif
+ net_prefetch(xdp->data_meta);
/* build an skb around the page buffer */
skb = build_skb(xdp->data_hard_start, truesize);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index 32753cc771bf..ecb5f4616a36 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -967,6 +967,7 @@ struct mvpp2_port {
phy_interface_t phy_interface;
struct phylink *phylink;
struct phylink_config phylink_config;
+ struct phylink_pcs phylink_pcs;
struct phy *comphy;
struct mvpp2_bm_pool *pool_long;
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 6e140d1b8967..ee8b6a9037ce 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -57,13 +57,7 @@ static struct {
/* The prototype is added here to be used in start_dev when using ACPI. This
* will be removed once phylink is used for all modes (dt+ACPI).
*/
-static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
- const struct phylink_link_state *state);
-static void mvpp2_mac_link_up(struct phylink_config *config,
- struct phy_device *phy,
- unsigned int mode, phy_interface_t interface,
- int speed, int duplex,
- bool tx_pause, bool rx_pause);
+static void mvpp2_acpi_start(struct mvpp2_port *port);
/* Queue modes */
#define MVPP2_QDIST_SINGLE_MODE 0
@@ -1485,8 +1479,8 @@ static void mvpp2_port_loopback_set(struct mvpp2_port *port,
else
val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
- if (phy_interface_mode_is_8023z(port->phy_interface) ||
- port->phy_interface == PHY_INTERFACE_MODE_SGMII)
+ if (phy_interface_mode_is_8023z(state->interface) ||
+ state->interface == PHY_INTERFACE_MODE_SGMII)
val |= MVPP2_GMAC_PCS_LB_EN_MASK;
else
val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
@@ -4007,17 +4001,7 @@ static void mvpp2_start_dev(struct mvpp2_port *port)
if (port->phylink) {
phylink_start(port->phylink);
} else {
- /* Phylink isn't used as of now for ACPI, so the MAC has to be
- * configured manually when the interface is started. This will
- * be removed as soon as the phylink ACPI support lands in.
- */
- struct phylink_link_state state = {
- .interface = port->phy_interface,
- };
- mvpp2_mac_config(&port->phylink_config, MLO_AN_INBAND, &state);
- mvpp2_mac_link_up(&port->phylink_config, NULL,
- MLO_AN_INBAND, port->phy_interface,
- SPEED_UNKNOWN, DUPLEX_UNKNOWN, false, false);
+ mvpp2_acpi_start(port);
}
netif_tx_start_all_queues(port->dev);
@@ -5392,6 +5376,155 @@ static struct mvpp2_port *mvpp2_phylink_to_port(struct phylink_config *config)
return container_of(config, struct mvpp2_port, phylink_config);
}
+static struct mvpp2_port *mvpp2_pcs_to_port(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct mvpp2_port, phylink_pcs);
+}
+
+static void mvpp2_xlg_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
+{
+ struct mvpp2_port *port = mvpp2_pcs_to_port(pcs);
+ u32 val;
+
+ state->speed = SPEED_10000;
+ state->duplex = 1;
+ state->an_complete = 1;
+
+ val = readl(port->base + MVPP22_XLG_STATUS);
+ state->link = !!(val & MVPP22_XLG_STATUS_LINK_UP);
+
+ state->pause = 0;
+ val = readl(port->base + MVPP22_XLG_CTRL0_REG);
+ if (val & MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN)
+ state->pause |= MLO_PAUSE_TX;
+ if (val & MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN)
+ state->pause |= MLO_PAUSE_RX;
+}
+
+static int mvpp2_xlg_pcs_config(struct phylink_pcs *pcs,
+ unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ return 0;
+}
+
+static const struct phylink_pcs_ops mvpp2_phylink_xlg_pcs_ops = {
+ .pcs_get_state = mvpp2_xlg_pcs_get_state,
+ .pcs_config = mvpp2_xlg_pcs_config,
+};
+
+static void mvpp2_gmac_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
+{
+ struct mvpp2_port *port = mvpp2_pcs_to_port(pcs);
+ u32 val;
+
+ val = readl(port->base + MVPP2_GMAC_STATUS0);
+
+ state->an_complete = !!(val & MVPP2_GMAC_STATUS0_AN_COMPLETE);
+ state->link = !!(val & MVPP2_GMAC_STATUS0_LINK_UP);
+ state->duplex = !!(val & MVPP2_GMAC_STATUS0_FULL_DUPLEX);
+
+ switch (port->phy_interface) {
+ case PHY_INTERFACE_MODE_1000BASEX:
+ state->speed = SPEED_1000;
+ break;
+ case PHY_INTERFACE_MODE_2500BASEX:
+ state->speed = SPEED_2500;
+ break;
+ default:
+ if (val & MVPP2_GMAC_STATUS0_GMII_SPEED)
+ state->speed = SPEED_1000;
+ else if (val & MVPP2_GMAC_STATUS0_MII_SPEED)
+ state->speed = SPEED_100;
+ else
+ state->speed = SPEED_10;
+ }
+
+ state->pause = 0;
+ if (val & MVPP2_GMAC_STATUS0_RX_PAUSE)
+ state->pause |= MLO_PAUSE_RX;
+ if (val & MVPP2_GMAC_STATUS0_TX_PAUSE)
+ state->pause |= MLO_PAUSE_TX;
+}
+
+static int mvpp2_gmac_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ struct mvpp2_port *port = mvpp2_pcs_to_port(pcs);
+ u32 mask, val, an, old_an, changed;
+
+ mask = MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS |
+ MVPP2_GMAC_IN_BAND_AUTONEG |
+ MVPP2_GMAC_AN_SPEED_EN |
+ MVPP2_GMAC_FLOW_CTRL_AUTONEG |
+ MVPP2_GMAC_AN_DUPLEX_EN;
+
+ if (phylink_autoneg_inband(mode)) {
+ mask |= MVPP2_GMAC_CONFIG_MII_SPEED |
+ MVPP2_GMAC_CONFIG_GMII_SPEED |
+ MVPP2_GMAC_CONFIG_FULL_DUPLEX;
+ val = MVPP2_GMAC_IN_BAND_AUTONEG;
+
+ if (interface == PHY_INTERFACE_MODE_SGMII) {
+ /* SGMII mode receives the speed and duplex from PHY */
+ val |= MVPP2_GMAC_AN_SPEED_EN |
+ MVPP2_GMAC_AN_DUPLEX_EN;
+ } else {
+ /* 802.3z mode has fixed speed and duplex */
+ val |= MVPP2_GMAC_CONFIG_GMII_SPEED |
+ MVPP2_GMAC_CONFIG_FULL_DUPLEX;
+
+ /* The FLOW_CTRL_AUTONEG bit selects either the hardware
+ * automatically or the bits in MVPP22_GMAC_CTRL_4_REG
+ * manually controls the GMAC pause modes.
+ */
+ if (permit_pause_to_mac)
+ val |= MVPP2_GMAC_FLOW_CTRL_AUTONEG;
+
+ /* Configure advertisement bits */
+ mask |= MVPP2_GMAC_FC_ADV_EN | MVPP2_GMAC_FC_ADV_ASM_EN;
+ if (phylink_test(advertising, Pause))
+ val |= MVPP2_GMAC_FC_ADV_EN;
+ if (phylink_test(advertising, Asym_Pause))
+ val |= MVPP2_GMAC_FC_ADV_ASM_EN;
+ }
+ } else {
+ val = 0;
+ }
+
+ old_an = an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+ an = (an & ~mask) | val;
+ changed = an ^ old_an;
+ if (changed)
+ writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+
+ /* We are only interested in the advertisement bits changing */
+ return changed & (MVPP2_GMAC_FC_ADV_EN | MVPP2_GMAC_FC_ADV_ASM_EN);
+}
+
+static void mvpp2_gmac_pcs_an_restart(struct phylink_pcs *pcs)
+{
+ struct mvpp2_port *port = mvpp2_pcs_to_port(pcs);
+ u32 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+
+ writel(val | MVPP2_GMAC_IN_BAND_RESTART_AN,
+ port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+ writel(val & ~MVPP2_GMAC_IN_BAND_RESTART_AN,
+ port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+}
+
+static const struct phylink_pcs_ops mvpp2_phylink_gmac_pcs_ops = {
+ .pcs_get_state = mvpp2_gmac_pcs_get_state,
+ .pcs_config = mvpp2_gmac_pcs_config,
+ .pcs_an_restart = mvpp2_gmac_pcs_an_restart,
+};
+
static void mvpp2_phylink_validate(struct phylink_config *config,
unsigned long *supported,
struct phylink_link_state *state)
@@ -5480,89 +5613,6 @@ empty_set:
bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
}
-static void mvpp22_xlg_pcs_get_state(struct mvpp2_port *port,
- struct phylink_link_state *state)
-{
- u32 val;
-
- state->speed = SPEED_10000;
- state->duplex = 1;
- state->an_complete = 1;
-
- val = readl(port->base + MVPP22_XLG_STATUS);
- state->link = !!(val & MVPP22_XLG_STATUS_LINK_UP);
-
- state->pause = 0;
- val = readl(port->base + MVPP22_XLG_CTRL0_REG);
- if (val & MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN)
- state->pause |= MLO_PAUSE_TX;
- if (val & MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN)
- state->pause |= MLO_PAUSE_RX;
-}
-
-static void mvpp2_gmac_pcs_get_state(struct mvpp2_port *port,
- struct phylink_link_state *state)
-{
- u32 val;
-
- val = readl(port->base + MVPP2_GMAC_STATUS0);
-
- state->an_complete = !!(val & MVPP2_GMAC_STATUS0_AN_COMPLETE);
- state->link = !!(val & MVPP2_GMAC_STATUS0_LINK_UP);
- state->duplex = !!(val & MVPP2_GMAC_STATUS0_FULL_DUPLEX);
-
- switch (port->phy_interface) {
- case PHY_INTERFACE_MODE_1000BASEX:
- state->speed = SPEED_1000;
- break;
- case PHY_INTERFACE_MODE_2500BASEX:
- state->speed = SPEED_2500;
- break;
- default:
- if (val & MVPP2_GMAC_STATUS0_GMII_SPEED)
- state->speed = SPEED_1000;
- else if (val & MVPP2_GMAC_STATUS0_MII_SPEED)
- state->speed = SPEED_100;
- else
- state->speed = SPEED_10;
- }
-
- state->pause = 0;
- if (val & MVPP2_GMAC_STATUS0_RX_PAUSE)
- state->pause |= MLO_PAUSE_RX;
- if (val & MVPP2_GMAC_STATUS0_TX_PAUSE)
- state->pause |= MLO_PAUSE_TX;
-}
-
-static void mvpp2_phylink_mac_pcs_get_state(struct phylink_config *config,
- struct phylink_link_state *state)
-{
- struct mvpp2_port *port = mvpp2_phylink_to_port(config);
-
- if (port->priv->hw_version == MVPP22 && port->gop_id == 0) {
- u32 mode = readl(port->base + MVPP22_XLG_CTRL3_REG);
- mode &= MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
-
- if (mode == MVPP22_XLG_CTRL3_MACMODESELECT_10G) {
- mvpp22_xlg_pcs_get_state(port, state);
- return;
- }
- }
-
- mvpp2_gmac_pcs_get_state(port, state);
-}
-
-static void mvpp2_mac_an_restart(struct phylink_config *config)
-{
- struct mvpp2_port *port = mvpp2_phylink_to_port(config);
- u32 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
-
- writel(val | MVPP2_GMAC_IN_BAND_RESTART_AN,
- port->base + MVPP2_GMAC_AUTONEG_CONFIG);
- writel(val & ~MVPP2_GMAC_IN_BAND_RESTART_AN,
- port->base + MVPP2_GMAC_AUTONEG_CONFIG);
-}
-
static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode,
const struct phylink_link_state *state)
{
@@ -5586,23 +5636,16 @@ static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode,
static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
const struct phylink_link_state *state)
{
- u32 old_an, an;
u32 old_ctrl0, ctrl0;
u32 old_ctrl2, ctrl2;
u32 old_ctrl4, ctrl4;
- old_an = an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
old_ctrl0 = ctrl0 = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
old_ctrl2 = ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
old_ctrl4 = ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
- an &= ~(MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FC_ADV_EN |
- MVPP2_GMAC_FC_ADV_ASM_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG |
- MVPP2_GMAC_AN_DUPLEX_EN | MVPP2_GMAC_IN_BAND_AUTONEG |
- MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS);
ctrl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK;
- ctrl2 &= ~(MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PORT_RESET_MASK |
- MVPP2_GMAC_PCS_ENABLE_MASK);
+ ctrl2 &= ~(MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK);
/* Configure port type */
if (phy_interface_mode_is_8023z(state->interface)) {
@@ -5624,12 +5667,6 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
}
- /* Configure advertisement bits */
- if (phylink_test(state->advertising, Pause))
- an |= MVPP2_GMAC_FC_ADV_EN;
- if (phylink_test(state->advertising, Asym_Pause))
- an |= MVPP2_GMAC_FC_ADV_ASM_EN;
-
/* Configure negotiation style */
if (!phylink_autoneg_inband(mode)) {
/* Phy or fixed speed - no in-band AN, nothing to do, leave the
@@ -5638,14 +5675,6 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
} else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
/* SGMII in-band mode receives the speed and duplex from
* the PHY. Flow control information is not received. */
- an &= ~(MVPP2_GMAC_FORCE_LINK_DOWN |
- MVPP2_GMAC_FORCE_LINK_PASS |
- MVPP2_GMAC_CONFIG_MII_SPEED |
- MVPP2_GMAC_CONFIG_GMII_SPEED |
- MVPP2_GMAC_CONFIG_FULL_DUPLEX);
- an |= MVPP2_GMAC_IN_BAND_AUTONEG |
- MVPP2_GMAC_AN_SPEED_EN |
- MVPP2_GMAC_AN_DUPLEX_EN;
} else if (phy_interface_mode_is_8023z(state->interface)) {
/* 1000BaseX and 2500BaseX ports cannot negotiate speed nor can
* they negotiate duplex: they are always operating with a fixed
@@ -5653,42 +5682,6 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
* speed and full duplex here.
*/
ctrl0 |= MVPP2_GMAC_PORT_TYPE_MASK;
- an &= ~(MVPP2_GMAC_FORCE_LINK_DOWN |
- MVPP2_GMAC_FORCE_LINK_PASS |
- MVPP2_GMAC_CONFIG_MII_SPEED |
- MVPP2_GMAC_CONFIG_GMII_SPEED |
- MVPP2_GMAC_CONFIG_FULL_DUPLEX);
- an |= MVPP2_GMAC_IN_BAND_AUTONEG |
- MVPP2_GMAC_CONFIG_GMII_SPEED |
- MVPP2_GMAC_CONFIG_FULL_DUPLEX;
-
- if (state->pause & MLO_PAUSE_AN && state->an_enabled)
- an |= MVPP2_GMAC_FLOW_CTRL_AUTONEG;
- }
-
-/* Some fields of the auto-negotiation register require the port to be down when
- * their value is updated.
- */
-#define MVPP2_GMAC_AN_PORT_DOWN_MASK \
- (MVPP2_GMAC_IN_BAND_AUTONEG | \
- MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS | \
- MVPP2_GMAC_CONFIG_MII_SPEED | MVPP2_GMAC_CONFIG_GMII_SPEED | \
- MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_CONFIG_FULL_DUPLEX | \
- MVPP2_GMAC_AN_DUPLEX_EN)
-
- if ((old_ctrl0 ^ ctrl0) & MVPP2_GMAC_PORT_TYPE_MASK ||
- (old_ctrl2 ^ ctrl2) & MVPP2_GMAC_INBAND_AN_MASK ||
- (old_an ^ an) & MVPP2_GMAC_AN_PORT_DOWN_MASK) {
- /* Force link down */
- old_an &= ~MVPP2_GMAC_FORCE_LINK_PASS;
- old_an |= MVPP2_GMAC_FORCE_LINK_DOWN;
- writel(old_an, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
-
- /* Set the GMAC in a reset state - do this in a way that
- * ensures we clear it below.
- */
- old_ctrl2 |= MVPP2_GMAC_PORT_RESET_MASK;
- writel(old_ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG);
}
if (old_ctrl0 != ctrl0)
@@ -5697,41 +5690,85 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG);
if (old_ctrl4 != ctrl4)
writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG);
- if (old_an != an)
- writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
-
- if (old_ctrl2 & MVPP2_GMAC_PORT_RESET_MASK) {
- while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
- MVPP2_GMAC_PORT_RESET_MASK)
- continue;
- }
}
-static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
- const struct phylink_link_state *state)
+static int mvpp2__mac_prepare(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
{
struct mvpp2_port *port = mvpp2_phylink_to_port(config);
- bool change_interface = port->phy_interface != state->interface;
/* Check for invalid configuration */
- if (mvpp2_is_xlg(state->interface) && port->gop_id != 0) {
+ if (mvpp2_is_xlg(interface) && port->gop_id != 0) {
netdev_err(port->dev, "Invalid mode on %s\n", port->dev->name);
- return;
+ return -EINVAL;
+ }
+
+ if (port->phy_interface != interface ||
+ phylink_autoneg_inband(mode)) {
+ /* Force the link down when changing the interface or if in
+ * in-band mode to ensure we do not change the configuration
+ * while the hardware is indicating link is up. We force both
+ * XLG and GMAC down to ensure that they're both in a known
+ * state.
+ */
+ mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG,
+ MVPP2_GMAC_FORCE_LINK_PASS |
+ MVPP2_GMAC_FORCE_LINK_DOWN,
+ MVPP2_GMAC_FORCE_LINK_DOWN);
+
+ if (mvpp2_port_supports_xlg(port))
+ mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
+ MVPP22_XLG_CTRL0_FORCE_LINK_PASS |
+ MVPP22_XLG_CTRL0_FORCE_LINK_DOWN,
+ MVPP22_XLG_CTRL0_FORCE_LINK_DOWN);
}
/* Make sure the port is disabled when reconfiguring the mode */
mvpp2_port_disable(port);
- if (port->priv->hw_version == MVPP22 && change_interface) {
- mvpp22_gop_mask_irq(port);
+ if (port->phy_interface != interface) {
+ /* Place GMAC into reset */
+ mvpp2_modify(port->base + MVPP2_GMAC_CTRL_2_REG,
+ MVPP2_GMAC_PORT_RESET_MASK,
+ MVPP2_GMAC_PORT_RESET_MASK);
- port->phy_interface = state->interface;
+ if (port->priv->hw_version == MVPP22) {
+ mvpp22_gop_mask_irq(port);
- /* Reconfigure the serdes lanes */
- phy_power_off(port->comphy);
- mvpp22_mode_reconfigure(port);
+ phy_power_off(port->comphy);
+ }
}
+ /* Select the appropriate PCS operations depending on the
+ * configured interface mode. We will only switch to a mode
+ * that the validate() checks have already passed.
+ */
+ if (mvpp2_is_xlg(interface))
+ port->phylink_pcs.ops = &mvpp2_phylink_xlg_pcs_ops;
+ else
+ port->phylink_pcs.ops = &mvpp2_phylink_gmac_pcs_ops;
+
+ return 0;
+}
+
+static int mvpp2_mac_prepare(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct mvpp2_port *port = mvpp2_phylink_to_port(config);
+ int ret;
+
+ ret = mvpp2__mac_prepare(config, mode, interface);
+ if (ret == 0)
+ phylink_set_pcs(port->phylink, &port->phylink_pcs);
+
+ return ret;
+}
+
+static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct mvpp2_port *port = mvpp2_phylink_to_port(config);
+
/* mac (re)configuration */
if (mvpp2_is_xlg(state->interface))
mvpp2_xlg_config(port, mode, state);
@@ -5742,11 +5779,51 @@ static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK)
mvpp2_port_loopback_set(port, state);
+}
+
+static int mvpp2_mac_finish(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct mvpp2_port *port = mvpp2_phylink_to_port(config);
+
+ if (port->priv->hw_version == MVPP22 &&
+ port->phy_interface != interface) {
+ port->phy_interface = interface;
+
+ /* Reconfigure the serdes lanes */
+ mvpp22_mode_reconfigure(port);
- if (port->priv->hw_version == MVPP22 && change_interface)
+ /* Unmask interrupts */
mvpp22_gop_unmask_irq(port);
+ }
+
+ if (!mvpp2_is_xlg(interface)) {
+ /* Release GMAC reset and wait */
+ mvpp2_modify(port->base + MVPP2_GMAC_CTRL_2_REG,
+ MVPP2_GMAC_PORT_RESET_MASK, 0);
+
+ while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
+ MVPP2_GMAC_PORT_RESET_MASK)
+ continue;
+ }
mvpp2_port_enable(port);
+
+ /* Allow the link to come up if in in-band mode, otherwise the
+ * link is forced via mac_link_down()/mac_link_up()
+ */
+ if (phylink_autoneg_inband(mode)) {
+ if (mvpp2_is_xlg(interface))
+ mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
+ MVPP22_XLG_CTRL0_FORCE_LINK_PASS |
+ MVPP22_XLG_CTRL0_FORCE_LINK_DOWN, 0);
+ else
+ mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG,
+ MVPP2_GMAC_FORCE_LINK_PASS |
+ MVPP2_GMAC_FORCE_LINK_DOWN, 0);
+ }
+
+ return 0;
}
static void mvpp2_mac_link_up(struct phylink_config *config,
@@ -5843,13 +5920,36 @@ static void mvpp2_mac_link_down(struct phylink_config *config,
static const struct phylink_mac_ops mvpp2_phylink_ops = {
.validate = mvpp2_phylink_validate,
- .mac_pcs_get_state = mvpp2_phylink_mac_pcs_get_state,
- .mac_an_restart = mvpp2_mac_an_restart,
+ .mac_prepare = mvpp2_mac_prepare,
.mac_config = mvpp2_mac_config,
+ .mac_finish = mvpp2_mac_finish,
.mac_link_up = mvpp2_mac_link_up,
.mac_link_down = mvpp2_mac_link_down,
};
+/* Work-around for ACPI */
+static void mvpp2_acpi_start(struct mvpp2_port *port)
+{
+ /* Phylink isn't used as of now for ACPI, so the MAC has to be
+ * configured manually when the interface is started. This will
+ * be removed as soon as the phylink ACPI support lands in.
+ */
+ struct phylink_link_state state = {
+ .interface = port->phy_interface,
+ };
+ mvpp2__mac_prepare(&port->phylink_config, MLO_AN_INBAND,
+ port->phy_interface);
+ mvpp2_mac_config(&port->phylink_config, MLO_AN_INBAND, &state);
+ port->phylink_pcs.ops->pcs_config(&port->phylink_pcs, MLO_AN_INBAND,
+ port->phy_interface,
+ state.advertising, false);
+ mvpp2_mac_finish(&port->phylink_config, MLO_AN_INBAND,
+ port->phy_interface);
+ mvpp2_mac_link_up(&port->phylink_config, NULL,
+ MLO_AN_INBAND, port->phy_interface,
+ SPEED_UNKNOWN, DUPLEX_UNKNOWN, false, false);
+}
+
/* Ports initialization */
static int mvpp2_port_probe(struct platform_device *pdev,
struct fwnode_handle *port_fwnode,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
index 1b25948c662b..0bc2410c8949 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
@@ -8,4 +8,4 @@ obj-$(CONFIG_OCTEONTX2_AF) += octeontx2_af.o
octeontx2_mbox-y := mbox.o
octeontx2_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
- rvu_reg.o rvu_npc.o rvu_debugfs.o
+ rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index a4e65da8d95b..8f17e26dca53 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -468,6 +468,35 @@ static void cgx_lmac_pause_frm_config(struct cgx *cgx, int lmac_id, bool enable)
}
}
+void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (!cgx)
+ return;
+
+ if (enable) {
+ /* Enable inbound PTP timestamping */
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg |= CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg |= CGX_SMUX_RX_FRM_CTL_PTP_MODE;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ } else {
+ /* Disable inbound PTP stamping */
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg &= ~CGX_SMUX_RX_FRM_CTL_PTP_MODE;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ }
+}
+
/* CGX Firmware interface low level support */
static int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
{
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
index 394f96591feb..27ca3291682b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -58,8 +58,10 @@
#define CGXX_SMUX_RX_FRM_CTL 0x20020
#define CGX_SMUX_RX_FRM_CTL_CTL_BCK BIT_ULL(3)
+#define CGX_SMUX_RX_FRM_CTL_PTP_MODE BIT_ULL(12)
#define CGXX_GMP_GMI_RXX_FRM_CTL 0x38028
#define CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK BIT_ULL(3)
+#define CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE BIT_ULL(12)
#define CGXX_SMUX_TX_CTL 0x20178
#define CGXX_SMUX_TX_PAUSE_PKT_TIME 0x20110
#define CGXX_SMUX_TX_PAUSE_PKT_INTERVAL 0x20120
@@ -139,4 +141,6 @@ int cgx_lmac_get_pause_frm(void *cgxd, int lmac_id,
u8 *tx_pause, u8 *rx_pause);
int cgx_lmac_set_pause_frm(void *cgxd, int lmac_id,
u8 tx_pause, u8 rx_pause);
+void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable);
+
#endif /* CGX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 6dfd0f90cd70..4aaef0a2b51c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -127,6 +127,7 @@ M(ATTACH_RESOURCES, 0x002, attach_resources, rsrc_attach, msg_rsp) \
M(DETACH_RESOURCES, 0x003, detach_resources, rsrc_detach, msg_rsp) \
M(MSIX_OFFSET, 0x005, msix_offset, msg_req, msix_offset_rsp) \
M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \
+M(PTP_OP, 0x007, ptp_op, ptp_req, ptp_rsp) \
M(GET_HW_CAP, 0x008, get_hw_cap, msg_req, get_hw_cap_rsp) \
/* CGX mbox IDs (range 0x200 - 0x3FF) */ \
M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp) \
@@ -143,6 +144,8 @@ M(CGX_STOP_LINKEVENTS, 0x208, cgx_stop_linkevents, msg_req, msg_rsp) \
M(CGX_GET_LINKINFO, 0x209, cgx_get_linkinfo, msg_req, cgx_link_info_msg) \
M(CGX_INTLBK_ENABLE, 0x20A, cgx_intlbk_enable, msg_req, msg_rsp) \
M(CGX_INTLBK_DISABLE, 0x20B, cgx_intlbk_disable, msg_req, msg_rsp) \
+M(CGX_PTP_RX_ENABLE, 0x20C, cgx_ptp_rx_enable, msg_req, msg_rsp) \
+M(CGX_PTP_RX_DISABLE, 0x20D, cgx_ptp_rx_disable, msg_req, msg_rsp) \
M(CGX_CFG_PAUSE_FRM, 0x20E, cgx_cfg_pause_frm, cgx_pause_frm_cfg, \
cgx_pause_frm_cfg) \
/* NPA mbox IDs (range 0x400 - 0x5FF) */ \
@@ -213,6 +216,8 @@ M(NIX_LSO_FORMAT_CFG, 0x8011, nix_lso_format_cfg, \
nix_lso_format_cfg, \
nix_lso_format_cfg_rsp) \
M(NIX_RXVLAN_ALLOC, 0x8012, nix_rxvlan_alloc, msg_req, msg_rsp) \
+M(NIX_LF_PTP_TX_ENABLE, 0x8013, nix_lf_ptp_tx_enable, msg_req, msg_rsp) \
+M(NIX_LF_PTP_TX_DISABLE, 0x8014, nix_lf_ptp_tx_disable, msg_req, msg_rsp) \
M(NIX_BP_ENABLE, 0x8016, nix_bp_enable, nix_bp_cfg_req, \
nix_bp_cfg_rsp) \
M(NIX_BP_DISABLE, 0x8017, nix_bp_disable, nix_bp_cfg_req, msg_rsp) \
@@ -858,4 +863,20 @@ struct npc_get_kex_cfg_rsp {
u8 mkex_pfl_name[MKEX_NAME_LEN];
};
+enum ptp_op {
+ PTP_OP_ADJFINE = 0,
+ PTP_OP_GET_CLOCK = 1,
+};
+
+struct ptp_req {
+ struct mbox_msghdr hdr;
+ u8 op;
+ s64 scaled_ppm;
+};
+
+struct ptp_rsp {
+ struct mbox_msghdr hdr;
+ u64 clk;
+};
+
#endif /* MBOX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
new file mode 100644
index 000000000000..f69f4f35ae48
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell PTP driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "ptp.h"
+#include "mbox.h"
+#include "rvu.h"
+
+#define DRV_NAME "Marvell PTP Driver"
+
+#define PCI_DEVID_OCTEONTX2_PTP 0xA00C
+#define PCI_SUBSYS_DEVID_OCTX2_98xx_PTP 0xB100
+#define PCI_SUBSYS_DEVID_OCTX2_96XX_PTP 0xB200
+#define PCI_SUBSYS_DEVID_OCTX2_95XX_PTP 0xB300
+#define PCI_SUBSYS_DEVID_OCTX2_LOKI_PTP 0xB400
+#define PCI_SUBSYS_DEVID_OCTX2_95MM_PTP 0xB500
+#define PCI_DEVID_OCTEONTX2_RST 0xA085
+
+#define PCI_PTP_BAR_NO 0
+#define PCI_RST_BAR_NO 0
+
+#define PTP_CLOCK_CFG 0xF00ULL
+#define PTP_CLOCK_CFG_PTP_EN BIT_ULL(0)
+#define PTP_CLOCK_LO 0xF08ULL
+#define PTP_CLOCK_HI 0xF10ULL
+#define PTP_CLOCK_COMP 0xF18ULL
+
+#define RST_BOOT 0x1600ULL
+#define RST_MUL_BITS GENMASK_ULL(38, 33)
+#define CLOCK_BASE_RATE 50000000ULL
+
+static u64 get_clock_rate(void)
+{
+ u64 cfg, ret = CLOCK_BASE_RATE * 16;
+ struct pci_dev *pdev;
+ void __iomem *base;
+
+ /* To get the input clock frequency with which PTP co-processor
+ * block is running the base frequency(50 MHz) needs to be multiplied
+ * with multiplier bits present in RST_BOOT register of RESET block.
+ * Hence below code gets the multiplier bits from the RESET PCI
+ * device present in the system.
+ */
+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_RST, NULL);
+ if (!pdev)
+ goto error;
+
+ base = pci_ioremap_bar(pdev, PCI_RST_BAR_NO);
+ if (!base)
+ goto error_put_pdev;
+
+ cfg = readq(base + RST_BOOT);
+ ret = CLOCK_BASE_RATE * FIELD_GET(RST_MUL_BITS, cfg);
+
+ iounmap(base);
+
+error_put_pdev:
+ pci_dev_put(pdev);
+
+error:
+ return ret;
+}
+
+struct ptp *ptp_get(void)
+{
+ struct pci_dev *pdev;
+ struct ptp *ptp;
+
+ /* If the PTP pci device is found on the system and ptp
+ * driver is bound to it then the PTP pci device is returned
+ * to the caller(rvu driver).
+ */
+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_PTP, NULL);
+ if (!pdev)
+ return ERR_PTR(-ENODEV);
+
+ ptp = pci_get_drvdata(pdev);
+ if (!ptp)
+ ptp = ERR_PTR(-EPROBE_DEFER);
+ if (IS_ERR(ptp))
+ pci_dev_put(pdev);
+
+ return ptp;
+}
+
+void ptp_put(struct ptp *ptp)
+{
+ if (!ptp)
+ return;
+
+ pci_dev_put(ptp->pdev);
+}
+
+static int ptp_adjfine(struct ptp *ptp, long scaled_ppm)
+{
+ bool neg_adj = false;
+ u64 comp;
+ u64 adj;
+ s64 ppb;
+
+ if (scaled_ppm < 0) {
+ neg_adj = true;
+ scaled_ppm = -scaled_ppm;
+ }
+
+ /* The hardware adds the clock compensation value to the PTP clock
+ * on every coprocessor clock cycle. Typical convention is that it
+ * represent number of nanosecond betwen each cycle. In this
+ * convention compensation value is in 64 bit fixed-point
+ * representation where upper 32 bits are number of nanoseconds
+ * and lower is fractions of nanosecond.
+ * The scaled_ppm represent the ratio in "parts per million" by which
+ * the compensation value should be corrected.
+ * To calculate new compenstation value we use 64bit fixed point
+ * arithmetic on following formula
+ * comp = tbase + tbase * scaled_ppm / (1M * 2^16)
+ * where tbase is the basic compensation value calculated
+ * initialy in the probe function.
+ */
+ comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
+ /* convert scaled_ppm to ppb */
+ ppb = 1 + scaled_ppm;
+ ppb *= 125;
+ ppb >>= 13;
+ adj = comp * ppb;
+ adj = div_u64(adj, 1000000000ull);
+ comp = neg_adj ? comp - adj : comp + adj;
+
+ writeq(comp, ptp->reg_base + PTP_CLOCK_COMP);
+
+ return 0;
+}
+
+static int ptp_get_clock(struct ptp *ptp, u64 *clk)
+{
+ /* Return the current PTP clock */
+ *clk = readq(ptp->reg_base + PTP_CLOCK_HI);
+
+ return 0;
+}
+
+static int ptp_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct ptp *ptp;
+ u64 clock_comp;
+ u64 clock_cfg;
+ int err;
+
+ ptp = devm_kzalloc(dev, sizeof(*ptp), GFP_KERNEL);
+ if (!ptp) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ ptp->pdev = pdev;
+
+ err = pcim_enable_device(pdev);
+ if (err)
+ goto error_free;
+
+ err = pcim_iomap_regions(pdev, 1 << PCI_PTP_BAR_NO, pci_name(pdev));
+ if (err)
+ goto error_free;
+
+ ptp->reg_base = pcim_iomap_table(pdev)[PCI_PTP_BAR_NO];
+
+ ptp->clock_rate = get_clock_rate();
+
+ /* Enable PTP clock */
+ clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
+ clock_cfg |= PTP_CLOCK_CFG_PTP_EN;
+ writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
+
+ clock_comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
+ /* Initial compensation value to start the nanosecs counter */
+ writeq(clock_comp, ptp->reg_base + PTP_CLOCK_COMP);
+
+ pci_set_drvdata(pdev, ptp);
+
+ return 0;
+
+error_free:
+ devm_kfree(dev, ptp);
+
+error:
+ /* For `ptp_get()` we need to differentiate between the case
+ * when the core has not tried to probe this device and the case when
+ * the probe failed. In the later case we pretend that the
+ * initialization was successful and keep the error in
+ * `dev->driver_data`.
+ */
+ pci_set_drvdata(pdev, ERR_PTR(err));
+ return 0;
+}
+
+static void ptp_remove(struct pci_dev *pdev)
+{
+ struct ptp *ptp = pci_get_drvdata(pdev);
+ u64 clock_cfg;
+
+ if (IS_ERR_OR_NULL(ptp))
+ return;
+
+ /* Disable PTP clock */
+ clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
+ clock_cfg &= ~PTP_CLOCK_CFG_PTP_EN;
+ writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
+}
+
+static const struct pci_device_id ptp_id_table[] = {
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_OCTX2_98xx_PTP) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_OCTX2_96XX_PTP) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_OCTX2_95XX_PTP) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_OCTX2_LOKI_PTP) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_OCTX2_95MM_PTP) },
+ { 0, }
+};
+
+struct pci_driver ptp_driver = {
+ .name = DRV_NAME,
+ .id_table = ptp_id_table,
+ .probe = ptp_probe,
+ .remove = ptp_remove,
+};
+
+int rvu_mbox_handler_ptp_op(struct rvu *rvu, struct ptp_req *req,
+ struct ptp_rsp *rsp)
+{
+ int err = 0;
+
+ /* This function is the PTP mailbox handler invoked when
+ * called by AF consumers/netdev drivers via mailbox mechanism.
+ * It is used by netdev driver to get the PTP clock and to set
+ * frequency adjustments. Since mailbox can be called without
+ * notion of whether the driver is bound to ptp device below
+ * validation is needed as first step.
+ */
+ if (!rvu->ptp)
+ return -ENODEV;
+
+ switch (req->op) {
+ case PTP_OP_ADJFINE:
+ err = ptp_adjfine(rvu->ptp, req->scaled_ppm);
+ break;
+ case PTP_OP_GET_CLOCK:
+ err = ptp_get_clock(rvu->ptp, &rsp->clk);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
new file mode 100644
index 000000000000..878bc395d28f
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell PTP driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef PTP_H
+#define PTP_H
+
+#include <linux/timecounter.h>
+#include <linux/time64.h>
+#include <linux/spinlock.h>
+
+struct ptp {
+ struct pci_dev *pdev;
+ void __iomem *reg_base;
+ u32 clock_rate;
+};
+
+struct ptp *ptp_get(void);
+void ptp_put(struct ptp *ptp);
+
+extern struct pci_driver ptp_driver;
+
+#endif
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 557e4292c846..c3ef73ae782c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -18,6 +18,7 @@
#include "cgx.h"
#include "rvu.h"
#include "rvu_reg.h"
+#include "ptp.h"
#define DRV_NAME "octeontx2-af"
#define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver"
@@ -2565,13 +2566,21 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_master(pdev);
+ rvu->ptp = ptp_get();
+ if (IS_ERR(rvu->ptp)) {
+ err = PTR_ERR(rvu->ptp);
+ if (err == -EPROBE_DEFER)
+ goto err_release_regions;
+ rvu->ptp = NULL;
+ }
+
/* Map Admin function CSRs */
rvu->afreg_base = pcim_iomap(pdev, PCI_AF_REG_BAR_NUM, 0);
rvu->pfreg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0);
if (!rvu->afreg_base || !rvu->pfreg_base) {
dev_err(dev, "Unable to map admin function CSRs, aborting\n");
err = -ENOMEM;
- goto err_release_regions;
+ goto err_put_ptp;
}
/* Store module params in rvu structure */
@@ -2586,7 +2595,7 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = rvu_setup_hw_resources(rvu);
if (err)
- goto err_release_regions;
+ goto err_put_ptp;
/* Init mailbox btw AF and PFs */
err = rvu_mbox_init(rvu, &rvu->afpf_wq_info, TYPE_AFPF,
@@ -2626,6 +2635,8 @@ err_hwsetup:
rvu_reset_all_blocks(rvu);
rvu_free_hw_resources(rvu);
rvu_clear_rvum_blk_revid(rvu);
+err_put_ptp:
+ ptp_put(rvu->ptp);
err_release_regions:
pci_release_regions(pdev);
err_disable_device:
@@ -2651,6 +2662,7 @@ static void rvu_remove(struct pci_dev *pdev)
rvu_reset_all_blocks(rvu);
rvu_free_hw_resources(rvu);
rvu_clear_rvum_blk_revid(rvu);
+ ptp_put(rvu->ptp);
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
@@ -2676,9 +2688,19 @@ static int __init rvu_init_module(void)
if (err < 0)
return err;
+ err = pci_register_driver(&ptp_driver);
+ if (err < 0)
+ goto ptp_err;
+
err = pci_register_driver(&rvu_driver);
if (err < 0)
- pci_unregister_driver(&cgx_driver);
+ goto rvu_err;
+
+ return 0;
+rvu_err:
+ pci_unregister_driver(&ptp_driver);
+ptp_err:
+ pci_unregister_driver(&cgx_driver);
return err;
}
@@ -2686,6 +2708,7 @@ static int __init rvu_init_module(void)
static void __exit rvu_cleanup_module(void)
{
pci_unregister_driver(&rvu_driver);
+ pci_unregister_driver(&ptp_driver);
pci_unregister_driver(&cgx_driver);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index dcf25a092008..05da7a91944a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -289,6 +289,8 @@ struct rvu_fwdata {
u64 reserved[FWDATA_RESERVED_MEM];
};
+struct ptp;
+
struct rvu {
void __iomem *afreg_base;
void __iomem *pfreg_base;
@@ -337,6 +339,8 @@ struct rvu {
/* Firmware data */
struct rvu_fwdata *fwdata;
+ struct ptp *ptp;
+
#ifdef CONFIG_DEBUG_FS
struct rvu_debugfs rvu_dbg;
#endif
@@ -469,6 +473,7 @@ int rvu_npc_init(struct rvu *rvu);
void rvu_npc_freemem(struct rvu *rvu);
int rvu_npc_get_pkind(struct rvu *rvu, u16 pf);
void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf);
+int npc_config_ts_kpuaction(struct rvu *rvu, int pf, u16 pcifunc, bool en);
void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan, u8 *mac_addr);
void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index f3c82e489897..fe3389c144b5 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -509,6 +509,45 @@ int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
return 0;
}
+static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
+{
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+
+ /* This msg is expected only from PFs that are mapped to CGX LMACs,
+ * if received from other PF/VF simply ACK, nothing to do.
+ */
+ if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
+ !is_pf_cgxmapped(rvu, pf))
+ return -ENODEV;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+
+ cgx_lmac_ptp_config(cgxd, lmac_id, enable);
+ /* If PTP is enabled then inform NPC that packets to be
+ * parsed by this PF will have their data shifted by 8 bytes
+ * and if PTP is disabled then no shift is required
+ */
+ if (npc_config_ts_kpuaction(rvu, pf, pcifunc, enable))
+ return -EINVAL;
+
+ return 0;
+}
+
+int rvu_mbox_handler_cgx_ptp_rx_enable(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, true);
+}
+
+int rvu_mbox_handler_cgx_ptp_rx_disable(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, false);
+}
+
static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en)
{
int pf = rvu_get_pf(pcifunc);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 01a793105599..08181fc5f5d4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -3319,6 +3319,49 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
nix_ctx_free(rvu, pfvf);
}
+#define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32)
+
+static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkaddr;
+ int nixlf;
+ u64 cfg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ block = &hw->block[blkaddr];
+ nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
+ if (nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf));
+
+ if (enable)
+ cfg |= NIX_AF_LFX_TX_CFG_PTP_EN;
+ else
+ cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
+
+ return 0;
+}
+
+int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true);
+}
+
+int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false);
+}
+
int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
struct nix_lso_format_cfg *req,
struct nix_lso_format_cfg_rsp *rsp)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 0a214084406a..e2e585d4de9b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -27,6 +27,7 @@
#define NIXLF_PROMISC_ENTRY 2
#define NPC_PARSE_RESULT_DMAC_OFFSET 8
+#define NPC_HW_TSTAMP_OFFSET 8
static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, u16 pcifunc);
@@ -61,6 +62,36 @@ int rvu_npc_get_pkind(struct rvu *rvu, u16 pf)
return -1;
}
+#define NPC_AF_ACTION0_PTR_ADVANCE GENMASK_ULL(27, 20)
+
+int npc_config_ts_kpuaction(struct rvu *rvu, int pf, u16 pcifunc, bool enable)
+{
+ int pkind, blkaddr;
+ u64 val;
+
+ pkind = rvu_npc_get_pkind(rvu, pf);
+ if (pkind < 0) {
+ dev_err(rvu->dev, "%s: pkind not mapped\n", __func__);
+ return -EINVAL;
+ }
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, pcifunc);
+ if (blkaddr < 0) {
+ dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
+ return -EINVAL;
+ }
+
+ val = rvu_read64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind));
+ val &= ~NPC_AF_ACTION0_PTR_ADVANCE;
+ /* If timestamp is enabled then configure NPC to shift 8 bytes */
+ if (enable)
+ val |= FIELD_PREP(NPC_AF_ACTION0_PTR_ADVANCE,
+ NPC_HW_TSTAMP_OFFSET);
+ rvu_write64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind), val);
+
+ return 0;
+}
+
static int npc_get_nixlf_mcam_index(struct npc_mcam *mcam,
u16 pcifunc, int nixlf, int type)
{
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index 778df331c8ac..b2c6385707c9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -6,7 +6,8 @@
obj-$(CONFIG_OCTEONTX2_PF) += octeontx2_nicpf.o
obj-$(CONFIG_OCTEONTX2_VF) += octeontx2_nicvf.o
-octeontx2_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o
+octeontx2_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
+ otx2_ptp.o
octeontx2_nicvf-y := otx2_vf.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 93c4cf7fedbf..820fc660de66 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -365,6 +365,95 @@ int otx2_rss_init(struct otx2_nic *pfvf)
return 0;
}
+/* Setup UDP segmentation algorithm in HW */
+static void otx2_setup_udp_segmentation(struct nix_lso_format_cfg *lso, bool v4)
+{
+ struct nix_lso_format *field;
+
+ field = (struct nix_lso_format *)&lso->fields[0];
+ lso->field_mask = GENMASK(18, 0);
+
+ /* IP's Length field */
+ field->layer = NIX_TXLAYER_OL3;
+ /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
+ field->offset = v4 ? 2 : 4;
+ field->sizem1 = 1; /* i.e 2 bytes */
+ field->alg = NIX_LSOALG_ADD_PAYLEN;
+ field++;
+
+ /* No ID field in IPv6 header */
+ if (v4) {
+ /* Increment IPID */
+ field->layer = NIX_TXLAYER_OL3;
+ field->offset = 4;
+ field->sizem1 = 1; /* i.e 2 bytes */
+ field->alg = NIX_LSOALG_ADD_SEGNUM;
+ field++;
+ }
+
+ /* Update length in UDP header */
+ field->layer = NIX_TXLAYER_OL4;
+ field->offset = 4;
+ field->sizem1 = 1;
+ field->alg = NIX_LSOALG_ADD_PAYLEN;
+}
+
+/* Setup segmentation algorithms in HW and retrieve algorithm index */
+void otx2_setup_segmentation(struct otx2_nic *pfvf)
+{
+ struct nix_lso_format_cfg_rsp *rsp;
+ struct nix_lso_format_cfg *lso;
+ struct otx2_hw *hw = &pfvf->hw;
+ int err;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ /* UDPv4 segmentation */
+ lso = otx2_mbox_alloc_msg_nix_lso_format_cfg(&pfvf->mbox);
+ if (!lso)
+ goto fail;
+
+ /* Setup UDP/IP header fields that HW should update per segment */
+ otx2_setup_udp_segmentation(lso, true);
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err)
+ goto fail;
+
+ rsp = (struct nix_lso_format_cfg_rsp *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &lso->hdr);
+ if (IS_ERR(rsp))
+ goto fail;
+
+ hw->lso_udpv4_idx = rsp->lso_format_idx;
+
+ /* UDPv6 segmentation */
+ lso = otx2_mbox_alloc_msg_nix_lso_format_cfg(&pfvf->mbox);
+ if (!lso)
+ goto fail;
+
+ /* Setup UDP/IP header fields that HW should update per segment */
+ otx2_setup_udp_segmentation(lso, false);
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err)
+ goto fail;
+
+ rsp = (struct nix_lso_format_cfg_rsp *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &lso->hdr);
+ if (IS_ERR(rsp))
+ goto fail;
+
+ hw->lso_udpv6_idx = rsp->lso_format_idx;
+ mutex_unlock(&pfvf->mbox.lock);
+ return;
+fail:
+ mutex_unlock(&pfvf->mbox.lock);
+ netdev_info(pfvf->netdev,
+ "Failed to get LSO index for UDP GSO offload, disabling\n");
+ pfvf->netdev->hw_features &= ~NETIF_F_GSO_UDP_L4;
+}
+
void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx)
{
/* Configure CQE interrupt coalescing parameters
@@ -671,6 +760,13 @@ static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
if (!sq->sg)
return -ENOMEM;
+ if (pfvf->ptp) {
+ err = qmem_alloc(pfvf->dev, &sq->timestamps, qset->sqe_cnt,
+ sizeof(*sq->timestamps));
+ if (err)
+ return err;
+ }
+
sq->head = 0;
sq->sqe_per_sqb = (pfvf->hw.sqb_size / sq->sqe_size) - 1;
sq->num_sqbs = (qset->sqe_cnt + sq->sqe_per_sqb) / sq->sqe_per_sqb;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 2fa29889522e..ac47762cce9b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -13,6 +13,9 @@
#include <linux/pci.h>
#include <linux/iommu.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/timecounter.h>
#include <mbox.h>
#include "otx2_reg.h"
@@ -174,9 +177,11 @@ struct otx2_hw {
u16 rq_skid;
u8 cq_time_wait;
- /* For TSO segmentation */
+ /* Segmentation */
u8 lso_tsov4_idx;
u8 lso_tsov6_idx;
+ u8 lso_udpv4_idx;
+ u8 lso_udpv6_idx;
u8 hw_tso;
/* MSI-X */
@@ -209,6 +214,17 @@ struct refill_work {
struct otx2_nic *pf;
};
+struct otx2_ptp {
+ struct ptp_clock_info ptp_info;
+ struct ptp_clock *ptp_clock;
+ struct otx2_nic *nic;
+
+ struct cyclecounter cycle_counter;
+ struct timecounter time_counter;
+};
+
+#define OTX2_HW_TIMESTAMP_LEN 8
+
struct otx2_nic {
void __iomem *reg_base;
struct net_device *netdev;
@@ -216,6 +232,8 @@ struct otx2_nic {
u16 max_frs;
u16 rbsize; /* Receive buffer size */
+#define OTX2_FLAG_RX_TSTAMP_ENABLED BIT_ULL(0)
+#define OTX2_FLAG_TX_TSTAMP_ENABLED BIT_ULL(1)
#define OTX2_FLAG_INTF_DOWN BIT_ULL(2)
#define OTX2_FLAG_RX_PAUSE_ENABLED BIT_ULL(9)
#define OTX2_FLAG_TX_PAUSE_ENABLED BIT_ULL(10)
@@ -251,6 +269,9 @@ struct otx2_nic {
/* Block address of NIX either BLKADDR_NIX0 or BLKADDR_NIX1 */
int nix_blkaddr;
+
+ struct otx2_ptp *ptp;
+ struct hwtstamp_config tstamp;
};
static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
@@ -561,6 +582,7 @@ void otx2_tx_timeout(struct net_device *netdev, unsigned int txq);
void otx2_get_mac_from_af(struct net_device *netdev);
void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx);
int otx2_config_pause_frm(struct otx2_nic *pfvf);
+void otx2_setup_segmentation(struct otx2_nic *pfvf);
/* RVU block related APIs */
int otx2_attach_npa_nix(struct otx2_nic *pfvf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index d59f5a9c7273..0341d9694e8b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -13,8 +13,10 @@
#include <linux/stddef.h>
#include <linux/etherdevice.h>
#include <linux/log2.h>
+#include <linux/net_tstamp.h>
#include "otx2_common.h"
+#include "otx2_ptp.h"
#define DRV_NAME "octeontx2-nicpf"
#define DRV_VF_NAME "octeontx2-nicvf"
@@ -663,6 +665,31 @@ static u32 otx2_get_link(struct net_device *netdev)
return pfvf->linfo.link_up;
}
+static int otx2_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *info)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+
+ if (!pfvf->ptp)
+ return ethtool_op_get_ts_info(netdev, info);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->phc_index = otx2_ptp_clock_index(pfvf);
+
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
static const struct ethtool_ops otx2_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
@@ -687,6 +714,7 @@ static const struct ethtool_ops otx2_ethtool_ops = {
.set_msglevel = otx2_set_msglevel,
.get_pauseparam = otx2_get_pauseparam,
.set_pauseparam = otx2_set_pauseparam,
+ .get_ts_info = otx2_get_ts_info,
};
void otx2_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 75a8c407e815..aac2845c1fb1 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -21,6 +21,7 @@
#include "otx2_common.h"
#include "otx2_txrx.h"
#include "otx2_struct.h"
+#include "otx2_ptp.h"
#define DRV_NAME "octeontx2-nicpf"
#define DRV_STRING "Marvell OcteonTX2 NIC Physical Function Driver"
@@ -41,6 +42,9 @@ enum {
TYPE_PFVF,
};
+static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable);
+static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable);
+
static int otx2_change_mtu(struct net_device *netdev, int new_mtu)
{
bool if_up = netif_running(netdev);
@@ -1281,7 +1285,8 @@ static int otx2_init_hw_resources(struct otx2_nic *pf)
hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt;
/* Get the size of receive buffers to allocate */
- pf->rbsize = RCV_FRAG_LEN(pf->netdev->mtu + OTX2_ETH_HLEN);
+ pf->rbsize = RCV_FRAG_LEN(OTX2_HW_TIMESTAMP_LEN + pf->netdev->mtu +
+ OTX2_ETH_HLEN);
mutex_lock(&mbox->lock);
/* NPA init */
@@ -1496,6 +1501,9 @@ int otx2_open(struct net_device *netdev)
if (err)
goto err_disable_napi;
+ /* Setup segmentation algorithms, if failed, clear offload capability */
+ otx2_setup_segmentation(pf);
+
/* Initialize RSS */
err = otx2_rss_init(pf);
if (err)
@@ -1547,6 +1555,16 @@ int otx2_open(struct net_device *netdev)
otx2_set_cints_affinity(pf);
+ /* When reinitializing enable time stamping if it is enabled before */
+ if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED) {
+ pf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED;
+ otx2_config_hw_tx_tstamp(pf, true);
+ }
+ if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED) {
+ pf->flags &= ~OTX2_FLAG_RX_TSTAMP_ENABLED;
+ otx2_config_hw_rx_tstamp(pf, true);
+ }
+
pf->flags &= ~OTX2_FLAG_INTF_DOWN;
/* 'intf_down' may be checked on any cpu */
smp_wmb();
@@ -1738,6 +1756,143 @@ static void otx2_reset_task(struct work_struct *work)
rtnl_unlock();
}
+static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable)
+{
+ struct msg_req *req;
+ int err;
+
+ if (pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED && enable)
+ return 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ if (enable)
+ req = otx2_mbox_alloc_msg_cgx_ptp_rx_enable(&pfvf->mbox);
+ else
+ req = otx2_mbox_alloc_msg_cgx_ptp_rx_disable(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+ }
+
+ mutex_unlock(&pfvf->mbox.lock);
+ if (enable)
+ pfvf->flags |= OTX2_FLAG_RX_TSTAMP_ENABLED;
+ else
+ pfvf->flags &= ~OTX2_FLAG_RX_TSTAMP_ENABLED;
+ return 0;
+}
+
+static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable)
+{
+ struct msg_req *req;
+ int err;
+
+ if (pfvf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED && enable)
+ return 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ if (enable)
+ req = otx2_mbox_alloc_msg_nix_lf_ptp_tx_enable(&pfvf->mbox);
+ else
+ req = otx2_mbox_alloc_msg_nix_lf_ptp_tx_disable(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+ }
+
+ mutex_unlock(&pfvf->mbox.lock);
+ if (enable)
+ pfvf->flags |= OTX2_FLAG_TX_TSTAMP_ENABLED;
+ else
+ pfvf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED;
+ return 0;
+}
+
+static int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct hwtstamp_config config;
+
+ if (!pfvf->ptp)
+ return -ENODEV;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (config.flags)
+ return -EINVAL;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ otx2_config_hw_tx_tstamp(pfvf, false);
+ break;
+ case HWTSTAMP_TX_ON:
+ otx2_config_hw_tx_tstamp(pfvf, true);
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ otx2_config_hw_rx_tstamp(pfvf, false);
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ otx2_config_hw_rx_tstamp(pfvf, true);
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ memcpy(&pfvf->tstamp, &config, sizeof(config));
+
+ return copy_to_user(ifr->ifr_data, &config,
+ sizeof(config)) ? -EFAULT : 0;
+}
+
+static int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct hwtstamp_config *cfg = &pfvf->tstamp;
+
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return otx2_config_hwtstamp(netdev, req);
+ case SIOCGHWTSTAMP:
+ return copy_to_user(req->ifr_data, cfg,
+ sizeof(*cfg)) ? -EFAULT : 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct net_device_ops otx2_netdev_ops = {
.ndo_open = otx2_open,
.ndo_stop = otx2_stop,
@@ -1748,6 +1903,7 @@ static const struct net_device_ops otx2_netdev_ops = {
.ndo_set_features = otx2_set_features,
.ndo_tx_timeout = otx2_tx_timeout,
.ndo_get_stats64 = otx2_get_stats64,
+ .ndo_do_ioctl = otx2_ioctl,
};
static int otx2_wq_init(struct otx2_nic *pf)
@@ -1920,6 +2076,9 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Assign default mac address */
otx2_get_mac_from_af(netdev);
+ /* Don't check for error. Proceed without ptp */
+ otx2_ptp_init(pf);
+
/* NPA's pool is a stack to which SW frees buffer pointers via Aura.
* HW allocates buffer pointer from stack and uses it for DMA'ing
* ingress packet. In some scenarios HW can free back allocated buffer
@@ -1935,7 +2094,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM | NETIF_F_RXHASH |
- NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6);
+ NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_L4);
netdev->features |= netdev->hw_features;
netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_RXALL;
@@ -1952,7 +2112,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = register_netdev(netdev);
if (err) {
dev_err(dev, "Failed to register netdevice\n");
- goto err_detach_rsrc;
+ goto err_ptp_destroy;
}
err = otx2_wq_init(pf);
@@ -1972,6 +2132,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err_unreg_netdev:
unregister_netdev(netdev);
+err_ptp_destroy:
+ otx2_ptp_destroy(pf);
err_detach_rsrc:
otx2_detach_resources(&pf->mbox);
err_disable_mbox_intr:
@@ -2113,6 +2275,11 @@ static void otx2_remove(struct pci_dev *pdev)
pf = netdev_priv(netdev);
+ if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED)
+ otx2_config_hw_tx_tstamp(pf, false);
+ if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED)
+ otx2_config_hw_rx_tstamp(pf, false);
+
cancel_work_sync(&pf->reset_task);
/* Disable link notifications */
otx2_cgx_config_linkevents(pf, false);
@@ -2122,6 +2289,7 @@ static void otx2_remove(struct pci_dev *pdev)
if (pf->otx2_wq)
destroy_workqueue(pf->otx2_wq);
+ otx2_ptp_destroy(pf);
otx2_detach_resources(&pf->mbox);
otx2_disable_mbox_intr(pf);
otx2_pfaf_mbox_destroy(pf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
new file mode 100644
index 000000000000..7bcf5246350f
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 PTP support for ethernet driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#include "otx2_common.h"
+#include "otx2_ptp.h"
+
+static int otx2_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
+{
+ struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
+ ptp_info);
+ struct ptp_req *req;
+ int err;
+
+ if (!ptp->nic)
+ return -ENODEV;
+
+ req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox);
+ if (!req)
+ return -ENOMEM;
+
+ req->op = PTP_OP_ADJFINE;
+ req->scaled_ppm = scaled_ppm;
+
+ err = otx2_sync_mbox_msg(&ptp->nic->mbox);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static u64 ptp_cc_read(const struct cyclecounter *cc)
+{
+ struct otx2_ptp *ptp = container_of(cc, struct otx2_ptp, cycle_counter);
+ struct ptp_req *req;
+ struct ptp_rsp *rsp;
+ int err;
+
+ if (!ptp->nic)
+ return 0;
+
+ req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox);
+ if (!req)
+ return 0;
+
+ req->op = PTP_OP_GET_CLOCK;
+
+ err = otx2_sync_mbox_msg(&ptp->nic->mbox);
+ if (err)
+ return 0;
+
+ rsp = (struct ptp_rsp *)otx2_mbox_get_rsp(&ptp->nic->mbox.mbox, 0,
+ &req->hdr);
+ if (IS_ERR(rsp))
+ return 0;
+
+ return rsp->clk;
+}
+
+static int otx2_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
+{
+ struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
+ ptp_info);
+ struct otx2_nic *pfvf = ptp->nic;
+
+ mutex_lock(&pfvf->mbox.lock);
+ timecounter_adjtime(&ptp->time_counter, delta);
+ mutex_unlock(&pfvf->mbox.lock);
+
+ return 0;
+}
+
+static int otx2_ptp_gettime(struct ptp_clock_info *ptp_info,
+ struct timespec64 *ts)
+{
+ struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
+ ptp_info);
+ struct otx2_nic *pfvf = ptp->nic;
+ u64 nsec;
+
+ mutex_lock(&pfvf->mbox.lock);
+ nsec = timecounter_read(&ptp->time_counter);
+ mutex_unlock(&pfvf->mbox.lock);
+
+ *ts = ns_to_timespec64(nsec);
+
+ return 0;
+}
+
+static int otx2_ptp_settime(struct ptp_clock_info *ptp_info,
+ const struct timespec64 *ts)
+{
+ struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
+ ptp_info);
+ struct otx2_nic *pfvf = ptp->nic;
+ u64 nsec;
+
+ nsec = timespec64_to_ns(ts);
+
+ mutex_lock(&pfvf->mbox.lock);
+ timecounter_init(&ptp->time_counter, &ptp->cycle_counter, nsec);
+ mutex_unlock(&pfvf->mbox.lock);
+
+ return 0;
+}
+
+static int otx2_ptp_enable(struct ptp_clock_info *ptp_info,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+int otx2_ptp_init(struct otx2_nic *pfvf)
+{
+ struct otx2_ptp *ptp_ptr;
+ struct cyclecounter *cc;
+ struct ptp_req *req;
+ int err;
+
+ mutex_lock(&pfvf->mbox.lock);
+ /* check if PTP block is available */
+ req = otx2_mbox_alloc_msg_ptp_op(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->op = PTP_OP_GET_CLOCK;
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+ }
+ mutex_unlock(&pfvf->mbox.lock);
+
+ ptp_ptr = kzalloc(sizeof(*ptp_ptr), GFP_KERNEL);
+ if (!ptp_ptr) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ ptp_ptr->nic = pfvf;
+
+ cc = &ptp_ptr->cycle_counter;
+ cc->read = ptp_cc_read;
+ cc->mask = CYCLECOUNTER_MASK(64);
+ cc->mult = 1;
+ cc->shift = 0;
+
+ timecounter_init(&ptp_ptr->time_counter, &ptp_ptr->cycle_counter,
+ ktime_to_ns(ktime_get_real()));
+
+ ptp_ptr->ptp_info = (struct ptp_clock_info) {
+ .owner = THIS_MODULE,
+ .name = "OcteonTX2 PTP",
+ .max_adj = 1000000000ull,
+ .n_ext_ts = 0,
+ .n_pins = 0,
+ .pps = 0,
+ .adjfine = otx2_ptp_adjfine,
+ .adjtime = otx2_ptp_adjtime,
+ .gettime64 = otx2_ptp_gettime,
+ .settime64 = otx2_ptp_settime,
+ .enable = otx2_ptp_enable,
+ };
+
+ ptp_ptr->ptp_clock = ptp_clock_register(&ptp_ptr->ptp_info, pfvf->dev);
+ if (IS_ERR_OR_NULL(ptp_ptr->ptp_clock)) {
+ err = ptp_ptr->ptp_clock ?
+ PTR_ERR(ptp_ptr->ptp_clock) : -ENODEV;
+ kfree(ptp_ptr);
+ goto error;
+ }
+
+ pfvf->ptp = ptp_ptr;
+
+error:
+ return err;
+}
+
+void otx2_ptp_destroy(struct otx2_nic *pfvf)
+{
+ struct otx2_ptp *ptp = pfvf->ptp;
+
+ if (!ptp)
+ return;
+
+ ptp_clock_unregister(ptp->ptp_clock);
+ kfree(ptp);
+ pfvf->ptp = NULL;
+}
+
+int otx2_ptp_clock_index(struct otx2_nic *pfvf)
+{
+ if (!pfvf->ptp)
+ return -ENODEV;
+
+ return ptp_clock_index(pfvf->ptp->ptp_clock);
+}
+
+int otx2_ptp_tstamp2time(struct otx2_nic *pfvf, u64 tstamp, u64 *tsns)
+{
+ if (!pfvf->ptp)
+ return -ENODEV;
+
+ *tsns = timecounter_cyc2time(&pfvf->ptp->time_counter, tstamp);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h
new file mode 100644
index 000000000000..706d63a43ae1
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 PTP support for ethernet driver */
+
+#ifndef OTX2_PTP_H
+#define OTX2_PTP_H
+
+int otx2_ptp_init(struct otx2_nic *pfvf);
+void otx2_ptp_destroy(struct otx2_nic *pfvf);
+
+int otx2_ptp_clock_index(struct otx2_nic *pfvf);
+int otx2_ptp_tstamp2time(struct otx2_nic *pfvf, u64 tstamp, u64 *tsns);
+
+#endif
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index 3a5b34a2a7a6..faaa322265a0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -16,6 +16,7 @@
#include "otx2_common.h"
#include "otx2_struct.h"
#include "otx2_txrx.h"
+#include "otx2_ptp.h"
#define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx)))
@@ -81,8 +82,11 @@ static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
int budget, int *tx_pkts, int *tx_bytes)
{
struct nix_send_comp_s *snd_comp = &cqe->comp;
+ struct skb_shared_hwtstamps ts;
struct sk_buff *skb = NULL;
+ u64 timestamp, tsns;
struct sg_list *sg;
+ int err;
if (unlikely(snd_comp->status) && netif_msg_tx_err(pfvf))
net_err_ratelimited("%s: TX%d: Error in send CQ status:%x\n",
@@ -94,6 +98,18 @@ static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
if (unlikely(!skb))
return;
+ if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) {
+ timestamp = ((u64 *)sq->timestamps->base)[snd_comp->sqe_id];
+ if (timestamp != 1) {
+ err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns);
+ if (!err) {
+ memset(&ts, 0, sizeof(ts));
+ ts.hwtstamp = ns_to_ktime(tsns);
+ skb_tstamp_tx(skb, &ts);
+ }
+ }
+ }
+
*tx_bytes += skb->len;
(*tx_pkts)++;
otx2_dma_unmap_skb_frags(pfvf, sg);
@@ -101,16 +117,47 @@ static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
sg->skb = (u64)NULL;
}
+static void otx2_set_rxtstamp(struct otx2_nic *pfvf,
+ struct sk_buff *skb, void *data)
+{
+ u64 tsns;
+ int err;
+
+ if (!(pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED))
+ return;
+
+ /* The first 8 bytes is the timestamp */
+ err = otx2_ptp_tstamp2time(pfvf, be64_to_cpu(*(__be64 *)data), &tsns);
+ if (err)
+ return;
+
+ skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(tsns);
+}
+
static void otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
- u64 iova, int len)
+ u64 iova, int len, struct nix_rx_parse_s *parse)
{
struct page *page;
+ int off = 0;
void *va;
va = phys_to_virt(otx2_iova_to_phys(pfvf->iommu_domain, iova));
+
+ if (likely(!skb_shinfo(skb)->nr_frags)) {
+ /* Check if data starts at some nonzero offset
+ * from the start of the buffer. For now the
+ * only possible offset is 8 bytes in the case
+ * where packet is prepended by a timestamp.
+ */
+ if (parse->laptr) {
+ otx2_set_rxtstamp(pfvf, skb, va);
+ off = OTX2_HW_TIMESTAMP_LEN;
+ }
+ }
+
page = virt_to_page(va);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- va - page_address(page), len, pfvf->rbsize);
+ va - page_address(page) + off, len - off, pfvf->rbsize);
otx2_dma_unmap_page(pfvf, iova - OTX2_HEAD_ROOM,
pfvf->rbsize, DMA_FROM_DEVICE);
@@ -239,7 +286,7 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
if (unlikely(!skb))
return;
- otx2_skb_add_frag(pfvf, skb, cqe->sg.seg_addr, cqe->sg.seg_size);
+ otx2_skb_add_frag(pfvf, skb, cqe->sg.seg_addr, cqe->sg.seg_size, parse);
cq->pool_ptrs++;
otx2_set_rxhash(pfvf, cqe, skb);
@@ -477,15 +524,55 @@ static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
*/
ip_hdr(skb)->tot_len =
htons(ext->lso_sb - skb_network_offset(skb));
- } else {
+ } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
ext->lso_format = pfvf->hw.lso_tsov6_idx;
+
ipv6_hdr(skb)->payload_len =
htons(ext->lso_sb - skb_network_offset(skb));
+ } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+ __be16 l3_proto = vlan_get_protocol(skb);
+ struct udphdr *udph = udp_hdr(skb);
+ u16 iplen;
+
+ ext->lso_sb = skb_transport_offset(skb) +
+ sizeof(struct udphdr);
+
+ /* HW adds payload size to length fields in IP and
+ * UDP headers while segmentation, hence adjust the
+ * lengths to just header sizes.
+ */
+ iplen = htons(ext->lso_sb - skb_network_offset(skb));
+ if (l3_proto == htons(ETH_P_IP)) {
+ ip_hdr(skb)->tot_len = iplen;
+ ext->lso_format = pfvf->hw.lso_udpv4_idx;
+ } else {
+ ipv6_hdr(skb)->payload_len = iplen;
+ ext->lso_format = pfvf->hw.lso_udpv6_idx;
+ }
+
+ udph->len = htons(sizeof(struct udphdr));
}
+ } else if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ ext->tstmp = 1;
}
+
*offset += sizeof(*ext);
}
+static void otx2_sqe_add_mem(struct otx2_snd_queue *sq, int *offset,
+ int alg, u64 iova)
+{
+ struct nix_sqe_mem_s *mem;
+
+ mem = (struct nix_sqe_mem_s *)(sq->sqe_base + *offset);
+ mem->subdc = NIX_SUBDC_MEM;
+ mem->alg = alg;
+ mem->wmem = 1; /* wait for the memory operation */
+ mem->addr = iova;
+
+ *offset += sizeof(*mem);
+}
+
/* Add SQE header subdescriptor structure */
static void otx2_sqe_add_hdr(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
struct nix_sqe_hdr_s *sqe_hdr,
@@ -736,6 +823,21 @@ static int otx2_get_sqe_count(struct otx2_nic *pfvf, struct sk_buff *skb)
return skb_shinfo(skb)->gso_segs;
}
+static void otx2_set_txtstamp(struct otx2_nic *pfvf, struct sk_buff *skb,
+ struct otx2_snd_queue *sq, int *offset)
+{
+ u64 iova;
+
+ if (!skb_shinfo(skb)->gso_size &&
+ skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ iova = sq->timestamps->iova + (sq->head * sizeof(u64));
+ otx2_sqe_add_mem(sq, offset, NIX_SENDMEMALG_E_SETTSTMP, iova);
+ } else {
+ skb_tx_timestamp(skb);
+ }
+}
+
bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
struct sk_buff *skb, u16 qidx)
{
@@ -789,6 +891,8 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
return false;
}
+ otx2_set_txtstamp(pfvf, skb, sq, &offset);
+
sqe_hdr->sizem1 = (offset / 16) - 1;
netdev_tx_sent_queue(txq, skb->len);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index da97f2d4416f..73af15685657 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -91,6 +91,7 @@ struct otx2_snd_queue {
struct qmem *sqe;
struct qmem *tso_hdrs;
struct sg_list *sg;
+ struct qmem *timestamps;
struct queue_stats stats;
u16 sqb_count;
u64 *sqb_ptrs;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 92a3db69a6cd..70e0d4ca6688 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -553,7 +553,8 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM | NETIF_F_RXHASH |
- NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6;
+ NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_L4;
netdev->features = netdev->hw_features;
netdev->gso_max_segs = OTX2_MAX_GSO_SEGS;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index b50c567ef508..99d7737e8ad6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -705,7 +705,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
frags = ring->rx_info + (index << priv->log_rx_info);
va = page_address(frags[0].page) + frags[0].page_offset;
- prefetchw(va);
+ net_prefetchw(va);
/*
* make sure we read the CQE after we read the ownership bit
*/
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 10e6886c96ba..0b3eaa102751 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -24,7 +24,7 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \
en_selftest.o en/port.o en/monitor_stats.o en/health.o \
- en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/umem.o \
+ en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/pool.o \
en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o
#
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 0cc2080fd847..4f33658da25a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -442,7 +442,7 @@ struct mlx5e_xdpsq {
struct mlx5e_cq cq;
/* read only */
- struct xdp_umem *umem;
+ struct xsk_buff_pool *xsk_pool;
struct mlx5_wq_cyc wq;
struct mlx5e_xdpsq_stats *stats;
mlx5e_fp_xmit_xdp_frame_check xmit_xdp_frame_check;
@@ -606,7 +606,7 @@ struct mlx5e_rq {
struct page_pool *page_pool;
/* AF_XDP zero-copy */
- struct xdp_umem *umem;
+ struct xsk_buff_pool *xsk_pool;
struct work_struct recover_work;
@@ -729,12 +729,13 @@ struct mlx5e_hv_vhca_stats_agent {
#endif
struct mlx5e_xsk {
- /* UMEMs are stored separately from channels, because we don't want to
- * lose them when channels are recreated. The kernel also stores UMEMs,
- * but it doesn't distinguish between zero-copy and non-zero-copy UMEMs,
- * so rely on our mechanism.
+ /* XSK buffer pools are stored separately from channels,
+ * because we don't want to lose them when channels are
+ * recreated. The kernel also stores buffer pool, but it doesn't
+ * distinguish between zero-copy and non-zero-copy UMEMs, so
+ * rely on our mechanism.
*/
- struct xdp_umem **umems;
+ struct xsk_buff_pool **pools;
u16 refcnt;
bool ever_used;
};
@@ -893,7 +894,7 @@ struct mlx5e_xsk_param;
struct mlx5e_rq_param;
int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
struct mlx5e_rq_param *param, struct mlx5e_xsk_param *xsk,
- struct xdp_umem *umem, struct mlx5e_rq *rq);
+ struct xsk_buff_pool *xsk_pool, struct mlx5e_rq *rq);
int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time);
void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
void mlx5e_close_rq(struct mlx5e_rq *rq);
@@ -903,7 +904,7 @@ int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
struct mlx5e_sq_param *param, struct mlx5e_icosq *sq);
void mlx5e_close_icosq(struct mlx5e_icosq *sq);
int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
- struct mlx5e_sq_param *param, struct xdp_umem *umem,
+ struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool,
struct mlx5e_xdpsq *sq, bool is_redirect);
void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 0e6946fc121f..145592788de5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -201,7 +201,7 @@ static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq)
pi = mlx5e_xdpsq_get_next_pi(sq, MLX5_SEND_WQE_MAX_WQEBBS);
session->wqe = MLX5E_TX_FETCH_WQE(sq, pi);
- prefetchw(session->wqe->data);
+ net_prefetchw(session->wqe->data);
session->ds_count = MLX5E_XDP_TX_EMPTY_DS_COUNT;
session->pkt_count = 0;
@@ -322,7 +322,7 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_xmit_data *xdptxd,
struct mlx5e_xdpsq_stats *stats = sq->stats;
- prefetchw(wqe);
+ net_prefetchw(wqe);
if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || sq->hw_mtu < dma_len)) {
stats->err++;
@@ -445,7 +445,7 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
} while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
if (xsk_frames)
- xsk_umem_complete_tx(sq->umem, xsk_frames);
+ xsk_tx_completed(sq->xsk_pool, xsk_frames);
sq->stats->cqes += i;
@@ -475,7 +475,7 @@ void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
}
if (xsk_frames)
- xsk_umem_complete_tx(sq->umem, xsk_frames);
+ xsk_tx_completed(sq->xsk_pool, xsk_frames);
}
int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
@@ -563,4 +563,3 @@ void mlx5e_set_xmit_fp(struct mlx5e_xdpsq *sq, bool is_mpw)
sq->xmit_xdp_frame = is_mpw ?
mlx5e_xmit_xdp_frame_mpwqe : mlx5e_xmit_xdp_frame;
}
-
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
index 331ca2b0f8a4..3503e7711178 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
@@ -1,31 +1,31 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/* Copyright (c) 2019 Mellanox Technologies. */
+/* Copyright (c) 2019-2020, Mellanox Technologies inc. All rights reserved. */
#include <net/xdp_sock_drv.h>
-#include "umem.h"
+#include "pool.h"
#include "setup.h"
#include "en/params.h"
-static int mlx5e_xsk_map_umem(struct mlx5e_priv *priv,
- struct xdp_umem *umem)
+static int mlx5e_xsk_map_pool(struct mlx5e_priv *priv,
+ struct xsk_buff_pool *pool)
{
struct device *dev = priv->mdev->device;
- return xsk_buff_dma_map(umem, dev, 0);
+ return xsk_pool_dma_map(pool, dev, 0);
}
-static void mlx5e_xsk_unmap_umem(struct mlx5e_priv *priv,
- struct xdp_umem *umem)
+static void mlx5e_xsk_unmap_pool(struct mlx5e_priv *priv,
+ struct xsk_buff_pool *pool)
{
- return xsk_buff_dma_unmap(umem, 0);
+ return xsk_pool_dma_unmap(pool, 0);
}
-static int mlx5e_xsk_get_umems(struct mlx5e_xsk *xsk)
+static int mlx5e_xsk_get_pools(struct mlx5e_xsk *xsk)
{
- if (!xsk->umems) {
- xsk->umems = kcalloc(MLX5E_MAX_NUM_CHANNELS,
- sizeof(*xsk->umems), GFP_KERNEL);
- if (unlikely(!xsk->umems))
+ if (!xsk->pools) {
+ xsk->pools = kcalloc(MLX5E_MAX_NUM_CHANNELS,
+ sizeof(*xsk->pools), GFP_KERNEL);
+ if (unlikely(!xsk->pools))
return -ENOMEM;
}
@@ -35,68 +35,68 @@ static int mlx5e_xsk_get_umems(struct mlx5e_xsk *xsk)
return 0;
}
-static void mlx5e_xsk_put_umems(struct mlx5e_xsk *xsk)
+static void mlx5e_xsk_put_pools(struct mlx5e_xsk *xsk)
{
if (!--xsk->refcnt) {
- kfree(xsk->umems);
- xsk->umems = NULL;
+ kfree(xsk->pools);
+ xsk->pools = NULL;
}
}
-static int mlx5e_xsk_add_umem(struct mlx5e_xsk *xsk, struct xdp_umem *umem, u16 ix)
+static int mlx5e_xsk_add_pool(struct mlx5e_xsk *xsk, struct xsk_buff_pool *pool, u16 ix)
{
int err;
- err = mlx5e_xsk_get_umems(xsk);
+ err = mlx5e_xsk_get_pools(xsk);
if (unlikely(err))
return err;
- xsk->umems[ix] = umem;
+ xsk->pools[ix] = pool;
return 0;
}
-static void mlx5e_xsk_remove_umem(struct mlx5e_xsk *xsk, u16 ix)
+static void mlx5e_xsk_remove_pool(struct mlx5e_xsk *xsk, u16 ix)
{
- xsk->umems[ix] = NULL;
+ xsk->pools[ix] = NULL;
- mlx5e_xsk_put_umems(xsk);
+ mlx5e_xsk_put_pools(xsk);
}
-static bool mlx5e_xsk_is_umem_sane(struct xdp_umem *umem)
+static bool mlx5e_xsk_is_pool_sane(struct xsk_buff_pool *pool)
{
- return xsk_umem_get_headroom(umem) <= 0xffff &&
- xsk_umem_get_chunk_size(umem) <= 0xffff;
+ return xsk_pool_get_headroom(pool) <= 0xffff &&
+ xsk_pool_get_chunk_size(pool) <= 0xffff;
}
-void mlx5e_build_xsk_param(struct xdp_umem *umem, struct mlx5e_xsk_param *xsk)
+void mlx5e_build_xsk_param(struct xsk_buff_pool *pool, struct mlx5e_xsk_param *xsk)
{
- xsk->headroom = xsk_umem_get_headroom(umem);
- xsk->chunk_size = xsk_umem_get_chunk_size(umem);
+ xsk->headroom = xsk_pool_get_headroom(pool);
+ xsk->chunk_size = xsk_pool_get_chunk_size(pool);
}
static int mlx5e_xsk_enable_locked(struct mlx5e_priv *priv,
- struct xdp_umem *umem, u16 ix)
+ struct xsk_buff_pool *pool, u16 ix)
{
struct mlx5e_params *params = &priv->channels.params;
struct mlx5e_xsk_param xsk;
struct mlx5e_channel *c;
int err;
- if (unlikely(mlx5e_xsk_get_umem(&priv->channels.params, &priv->xsk, ix)))
+ if (unlikely(mlx5e_xsk_get_pool(&priv->channels.params, &priv->xsk, ix)))
return -EBUSY;
- if (unlikely(!mlx5e_xsk_is_umem_sane(umem)))
+ if (unlikely(!mlx5e_xsk_is_pool_sane(pool)))
return -EINVAL;
- err = mlx5e_xsk_map_umem(priv, umem);
+ err = mlx5e_xsk_map_pool(priv, pool);
if (unlikely(err))
return err;
- err = mlx5e_xsk_add_umem(&priv->xsk, umem, ix);
+ err = mlx5e_xsk_add_pool(&priv->xsk, pool, ix);
if (unlikely(err))
- goto err_unmap_umem;
+ goto err_unmap_pool;
- mlx5e_build_xsk_param(umem, &xsk);
+ mlx5e_build_xsk_param(pool, &xsk);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
/* XSK objects will be created on open. */
@@ -112,9 +112,9 @@ static int mlx5e_xsk_enable_locked(struct mlx5e_priv *priv,
c = priv->channels.c[ix];
- err = mlx5e_open_xsk(priv, params, &xsk, umem, c);
+ err = mlx5e_open_xsk(priv, params, &xsk, pool, c);
if (unlikely(err))
- goto err_remove_umem;
+ goto err_remove_pool;
mlx5e_activate_xsk(c);
@@ -132,11 +132,11 @@ err_deactivate:
mlx5e_deactivate_xsk(c);
mlx5e_close_xsk(c);
-err_remove_umem:
- mlx5e_xsk_remove_umem(&priv->xsk, ix);
+err_remove_pool:
+ mlx5e_xsk_remove_pool(&priv->xsk, ix);
-err_unmap_umem:
- mlx5e_xsk_unmap_umem(priv, umem);
+err_unmap_pool:
+ mlx5e_xsk_unmap_pool(priv, pool);
return err;
@@ -146,7 +146,7 @@ validate_closed:
*/
if (!mlx5e_validate_xsk_param(params, &xsk, priv->mdev)) {
err = -EINVAL;
- goto err_remove_umem;
+ goto err_remove_pool;
}
return 0;
@@ -154,45 +154,45 @@ validate_closed:
static int mlx5e_xsk_disable_locked(struct mlx5e_priv *priv, u16 ix)
{
- struct xdp_umem *umem = mlx5e_xsk_get_umem(&priv->channels.params,
+ struct xsk_buff_pool *pool = mlx5e_xsk_get_pool(&priv->channels.params,
&priv->xsk, ix);
struct mlx5e_channel *c;
- if (unlikely(!umem))
+ if (unlikely(!pool))
return -EINVAL;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
- goto remove_umem;
+ goto remove_pool;
/* XSK RQ and SQ are only created if XDP program is set. */
if (!priv->channels.params.xdp_prog)
- goto remove_umem;
+ goto remove_pool;
c = priv->channels.c[ix];
mlx5e_xsk_redirect_rqt_to_drop(priv, ix);
mlx5e_deactivate_xsk(c);
mlx5e_close_xsk(c);
-remove_umem:
- mlx5e_xsk_remove_umem(&priv->xsk, ix);
- mlx5e_xsk_unmap_umem(priv, umem);
+remove_pool:
+ mlx5e_xsk_remove_pool(&priv->xsk, ix);
+ mlx5e_xsk_unmap_pool(priv, pool);
return 0;
}
-static int mlx5e_xsk_enable_umem(struct mlx5e_priv *priv, struct xdp_umem *umem,
+static int mlx5e_xsk_enable_pool(struct mlx5e_priv *priv, struct xsk_buff_pool *pool,
u16 ix)
{
int err;
mutex_lock(&priv->state_lock);
- err = mlx5e_xsk_enable_locked(priv, umem, ix);
+ err = mlx5e_xsk_enable_locked(priv, pool, ix);
mutex_unlock(&priv->state_lock);
return err;
}
-static int mlx5e_xsk_disable_umem(struct mlx5e_priv *priv, u16 ix)
+static int mlx5e_xsk_disable_pool(struct mlx5e_priv *priv, u16 ix)
{
int err;
@@ -203,7 +203,7 @@ static int mlx5e_xsk_disable_umem(struct mlx5e_priv *priv, u16 ix)
return err;
}
-int mlx5e_xsk_setup_umem(struct net_device *dev, struct xdp_umem *umem, u16 qid)
+int mlx5e_xsk_setup_pool(struct net_device *dev, struct xsk_buff_pool *pool, u16 qid)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_params *params = &priv->channels.params;
@@ -212,6 +212,6 @@ int mlx5e_xsk_setup_umem(struct net_device *dev, struct xdp_umem *umem, u16 qid)
if (unlikely(!mlx5e_qid_get_ch_if_in_group(params, qid, MLX5E_RQ_GROUP_XSK, &ix)))
return -EINVAL;
- return umem ? mlx5e_xsk_enable_umem(priv, umem, ix) :
- mlx5e_xsk_disable_umem(priv, ix);
+ return pool ? mlx5e_xsk_enable_pool(priv, pool, ix) :
+ mlx5e_xsk_disable_pool(priv, ix);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.h
new file mode 100644
index 000000000000..dca0010a0866
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019-2020, Mellanox Technologies inc. All rights reserved. */
+
+#ifndef __MLX5_EN_XSK_POOL_H__
+#define __MLX5_EN_XSK_POOL_H__
+
+#include "en.h"
+
+static inline struct xsk_buff_pool *mlx5e_xsk_get_pool(struct mlx5e_params *params,
+ struct mlx5e_xsk *xsk, u16 ix)
+{
+ if (!xsk || !xsk->pools)
+ return NULL;
+
+ if (unlikely(ix >= params->num_channels))
+ return NULL;
+
+ return xsk->pools[ix];
+}
+
+struct mlx5e_xsk_param;
+void mlx5e_build_xsk_param(struct xsk_buff_pool *pool, struct mlx5e_xsk_param *xsk);
+
+/* .ndo_bpf callback. */
+int mlx5e_xsk_setup_pool(struct net_device *dev, struct xsk_buff_pool *pool, u16 qid);
+
+#endif /* __MLX5_EN_XSK_POOL_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
index a33a1f762c70..bb6669d2a916 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
@@ -48,8 +48,8 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
xdp->data_end = xdp->data + cqe_bcnt32;
xdp_set_data_meta_invalid(xdp);
- xsk_buff_dma_sync_for_cpu(xdp);
- prefetch(xdp->data);
+ xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool);
+ net_prefetch(xdp->data);
rcu_read_lock();
consumed = mlx5e_xdp_handle(rq, NULL, &cqe_bcnt32, xdp);
@@ -99,8 +99,8 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
xdp->data_end = xdp->data + cqe_bcnt;
xdp_set_data_meta_invalid(xdp);
- xsk_buff_dma_sync_for_cpu(xdp);
- prefetch(xdp->data);
+ xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool);
+ net_prefetch(xdp->data);
if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) {
rq->stats->wqe_err++;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
index d147b2f13b54..7f88ccf67fdd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
@@ -19,10 +19,10 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
struct mlx5e_wqe_frag_info *wi,
u32 cqe_bcnt);
-static inline int mlx5e_xsk_page_alloc_umem(struct mlx5e_rq *rq,
+static inline int mlx5e_xsk_page_alloc_pool(struct mlx5e_rq *rq,
struct mlx5e_dma_info *dma_info)
{
- dma_info->xsk = xsk_buff_alloc(rq->umem);
+ dma_info->xsk = xsk_buff_alloc(rq->xsk_pool);
if (!dma_info->xsk)
return -ENOMEM;
@@ -38,13 +38,13 @@ static inline int mlx5e_xsk_page_alloc_umem(struct mlx5e_rq *rq,
static inline bool mlx5e_xsk_update_rx_wakeup(struct mlx5e_rq *rq, bool alloc_err)
{
- if (!xsk_umem_uses_need_wakeup(rq->umem))
+ if (!xsk_uses_need_wakeup(rq->xsk_pool))
return alloc_err;
if (unlikely(alloc_err))
- xsk_set_rx_need_wakeup(rq->umem);
+ xsk_set_rx_need_wakeup(rq->xsk_pool);
else
- xsk_clear_rx_need_wakeup(rq->umem);
+ xsk_clear_rx_need_wakeup(rq->xsk_pool);
return false;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index dd9df519d383..662a1dafeaad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -45,7 +45,7 @@ static void mlx5e_build_xsk_cparam(struct mlx5e_priv *priv,
}
int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk, struct xdp_umem *umem,
+ struct mlx5e_xsk_param *xsk, struct xsk_buff_pool *pool,
struct mlx5e_channel *c)
{
struct mlx5e_channel_param *cparam;
@@ -64,7 +64,7 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
if (unlikely(err))
goto err_free_cparam;
- err = mlx5e_open_rq(c, params, &cparam->rq, xsk, umem, &c->xskrq);
+ err = mlx5e_open_rq(c, params, &cparam->rq, xsk, pool, &c->xskrq);
if (unlikely(err))
goto err_close_rx_cq;
@@ -72,13 +72,13 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
if (unlikely(err))
goto err_close_rq;
- /* Create a separate SQ, so that when the UMEM is disabled, we could
+ /* Create a separate SQ, so that when the buff pool is disabled, we could
* close this SQ safely and stop receiving CQEs. In other case, e.g., if
- * the XDPSQ was used instead, we might run into trouble when the UMEM
+ * the XDPSQ was used instead, we might run into trouble when the buff pool
* is disabled and then reenabled, but the SQ continues receiving CQEs
- * from the old UMEM.
+ * from the old buff pool.
*/
- err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, umem, &c->xsksq, true);
+ err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, pool, &c->xsksq, true);
if (unlikely(err))
goto err_close_tx_cq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.h
index 0dd11b81c046..ca20f1ff5e39 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.h
@@ -12,7 +12,7 @@ bool mlx5e_validate_xsk_param(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
struct mlx5_core_dev *mdev);
int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk, struct xdp_umem *umem,
+ struct mlx5e_xsk_param *xsk, struct xsk_buff_pool *pool,
struct mlx5e_channel *c);
void mlx5e_close_xsk(struct mlx5e_channel *c);
void mlx5e_activate_xsk(struct mlx5e_channel *c);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
index 4d892f6cecb3..aa91cbdfe969 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
@@ -2,7 +2,7 @@
/* Copyright (c) 2019 Mellanox Technologies. */
#include "tx.h"
-#include "umem.h"
+#include "pool.h"
#include "en/xdp.h"
#include "en/params.h"
#include <net/xdp_sock_drv.h>
@@ -66,7 +66,7 @@ static void mlx5e_xsk_tx_post_err(struct mlx5e_xdpsq *sq,
bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
{
- struct xdp_umem *umem = sq->umem;
+ struct xsk_buff_pool *pool = sq->xsk_pool;
struct mlx5e_xdp_info xdpi;
struct mlx5e_xdp_xmit_data xdptxd;
bool work_done = true;
@@ -87,7 +87,7 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
break;
}
- if (!xsk_umem_consume_tx(umem, &desc)) {
+ if (!xsk_tx_peek_desc(pool, &desc)) {
/* TX will get stuck until something wakes it up by
* triggering NAPI. Currently it's expected that the
* application calls sendto() if there are consumed, but
@@ -96,11 +96,11 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
break;
}
- xdptxd.dma_addr = xsk_buff_raw_get_dma(umem, desc.addr);
- xdptxd.data = xsk_buff_raw_get_data(umem, desc.addr);
+ xdptxd.dma_addr = xsk_buff_raw_get_dma(pool, desc.addr);
+ xdptxd.data = xsk_buff_raw_get_data(pool, desc.addr);
xdptxd.len = desc.len;
- xsk_buff_raw_dma_sync_for_device(umem, xdptxd.dma_addr, xdptxd.len);
+ xsk_buff_raw_dma_sync_for_device(pool, xdptxd.dma_addr, xdptxd.len);
ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
mlx5e_xmit_xdp_frame, sq, &xdptxd, &xdpi, check_result);
@@ -119,7 +119,7 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
mlx5e_xdp_mpwqe_complete(sq);
mlx5e_xmit_xdp_doorbell(sq);
- xsk_umem_consume_tx_done(umem);
+ xsk_tx_release(pool);
}
return !(budget && work_done);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h
index 39fa0a705856..a05085035f23 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h
@@ -15,13 +15,13 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget);
static inline void mlx5e_xsk_update_tx_wakeup(struct mlx5e_xdpsq *sq)
{
- if (!xsk_umem_uses_need_wakeup(sq->umem))
+ if (!xsk_uses_need_wakeup(sq->xsk_pool))
return;
if (sq->pc != sq->cc)
- xsk_clear_tx_need_wakeup(sq->umem);
+ xsk_clear_tx_need_wakeup(sq->xsk_pool);
else
- xsk_set_tx_need_wakeup(sq->umem);
+ xsk_set_tx_need_wakeup(sq->xsk_pool);
}
#endif /* __MLX5_EN_XSK_TX_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.h
deleted file mode 100644
index bada94973586..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
-/* Copyright (c) 2019 Mellanox Technologies. */
-
-#ifndef __MLX5_EN_XSK_UMEM_H__
-#define __MLX5_EN_XSK_UMEM_H__
-
-#include "en.h"
-
-static inline struct xdp_umem *mlx5e_xsk_get_umem(struct mlx5e_params *params,
- struct mlx5e_xsk *xsk, u16 ix)
-{
- if (!xsk || !xsk->umems)
- return NULL;
-
- if (unlikely(ix >= params->num_channels))
- return NULL;
-
- return xsk->umems[ix];
-}
-
-struct mlx5e_xsk_param;
-void mlx5e_build_xsk_param(struct xdp_umem *umem, struct mlx5e_xsk_param *xsk);
-
-/* .ndo_bpf callback. */
-int mlx5e_xsk_setup_umem(struct net_device *dev, struct xdp_umem *umem, u16 qid);
-
-int mlx5e_xsk_resize_reuseq(struct xdp_umem *umem, u32 nentries);
-
-#endif /* __MLX5_EN_XSK_UMEM_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 08270987c506..5cb1e4839eb7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -32,7 +32,7 @@
#include "en.h"
#include "en/port.h"
-#include "en/xsk/umem.h"
+#include "en/xsk/pool.h"
#include "lib/clock.h"
void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index 83c9b2bbc4af..b416a8ee2eed 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -33,7 +33,7 @@
#include <linux/mlx5/fs.h>
#include "en.h"
#include "en/params.h"
-#include "en/xsk/umem.h"
+#include "en/xsk/pool.h"
struct mlx5e_ethtool_rule {
struct list_head list;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index aebcf73f8546..26834625556d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -57,7 +57,7 @@
#include "en/monitor_stats.h"
#include "en/health.h"
#include "en/params.h"
-#include "en/xsk/umem.h"
+#include "en/xsk/pool.h"
#include "en/xsk/setup.h"
#include "en/xsk/rx.h"
#include "en/xsk/tx.h"
@@ -363,7 +363,7 @@ static void mlx5e_rq_err_cqe_work(struct work_struct *recover_work)
static int mlx5e_alloc_rq(struct mlx5e_channel *c,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
- struct xdp_umem *umem,
+ struct xsk_buff_pool *xsk_pool,
struct mlx5e_rq_param *rqp,
struct mlx5e_rq *rq)
{
@@ -389,9 +389,9 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
rq->mdev = mdev;
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
rq->xdpsq = &c->rq_xdpsq;
- rq->umem = umem;
+ rq->xsk_pool = xsk_pool;
- if (rq->umem)
+ if (rq->xsk_pool)
rq->stats = &c->priv->channel_stats[c->ix].xskrq;
else
rq->stats = &c->priv->channel_stats[c->ix].rq;
@@ -477,7 +477,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
if (xsk) {
err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
MEM_TYPE_XSK_BUFF_POOL, NULL);
- xsk_buff_set_rxq_info(rq->umem, &rq->xdp_rxq);
+ xsk_pool_set_rxq_info(rq->xsk_pool, &rq->xdp_rxq);
} else {
/* Create a page_pool and register it with rxq */
pp_params.order = 0;
@@ -816,11 +816,11 @@ void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
struct mlx5e_rq_param *param, struct mlx5e_xsk_param *xsk,
- struct xdp_umem *umem, struct mlx5e_rq *rq)
+ struct xsk_buff_pool *xsk_pool, struct mlx5e_rq *rq)
{
int err;
- err = mlx5e_alloc_rq(c, params, xsk, umem, param, rq);
+ err = mlx5e_alloc_rq(c, params, xsk, xsk_pool, param, rq);
if (err)
return err;
@@ -925,7 +925,7 @@ static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
struct mlx5e_params *params,
- struct xdp_umem *umem,
+ struct xsk_buff_pool *xsk_pool,
struct mlx5e_sq_param *param,
struct mlx5e_xdpsq *sq,
bool is_redirect)
@@ -941,9 +941,9 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
sq->uar_map = mdev->mlx5e_res.bfreg.map;
sq->min_inline_mode = params->tx_min_inline_mode;
sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
- sq->umem = umem;
+ sq->xsk_pool = xsk_pool;
- sq->stats = sq->umem ?
+ sq->stats = sq->xsk_pool ?
&c->priv->channel_stats[c->ix].xsksq :
is_redirect ?
&c->priv->channel_stats[c->ix].xdpsq :
@@ -1408,13 +1408,13 @@ void mlx5e_close_icosq(struct mlx5e_icosq *sq)
}
int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
- struct mlx5e_sq_param *param, struct xdp_umem *umem,
+ struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool,
struct mlx5e_xdpsq *sq, bool is_redirect)
{
struct mlx5e_create_sq_param csp = {};
int err;
- err = mlx5e_alloc_xdpsq(c, params, umem, param, sq, is_redirect);
+ err = mlx5e_alloc_xdpsq(c, params, xsk_pool, param, sq, is_redirect);
if (err)
return err;
@@ -1907,7 +1907,7 @@ static u8 mlx5e_enumerate_lag_port(struct mlx5_core_dev *mdev, int ix)
static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct mlx5e_params *params,
struct mlx5e_channel_param *cparam,
- struct xdp_umem *umem,
+ struct xsk_buff_pool *xsk_pool,
struct mlx5e_channel **cp)
{
int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(priv->mdev, ix));
@@ -1946,9 +1946,9 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
if (unlikely(err))
goto err_napi_del;
- if (umem) {
- mlx5e_build_xsk_param(umem, &xsk);
- err = mlx5e_open_xsk(priv, params, &xsk, umem, c);
+ if (xsk_pool) {
+ mlx5e_build_xsk_param(xsk_pool, &xsk);
+ err = mlx5e_open_xsk(priv, params, &xsk, xsk_pool, c);
if (unlikely(err))
goto err_close_queues;
}
@@ -2309,12 +2309,12 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
mlx5e_build_channel_param(priv, &chs->params, cparam);
for (i = 0; i < chs->num; i++) {
- struct xdp_umem *umem = NULL;
+ struct xsk_buff_pool *xsk_pool = NULL;
if (chs->params.xdp_prog)
- umem = mlx5e_xsk_get_umem(&chs->params, chs->params.xsk, i);
+ xsk_pool = mlx5e_xsk_get_pool(&chs->params, chs->params.xsk, i);
- err = mlx5e_open_channel(priv, i, &chs->params, cparam, umem, &chs->c[i]);
+ err = mlx5e_open_channel(priv, i, &chs->params, cparam, xsk_pool, &chs->c[i]);
if (err)
goto err_close_channels;
}
@@ -3892,13 +3892,14 @@ static bool mlx5e_xsk_validate_mtu(struct net_device *netdev,
u16 ix;
for (ix = 0; ix < chs->params.num_channels; ix++) {
- struct xdp_umem *umem = mlx5e_xsk_get_umem(&chs->params, chs->params.xsk, ix);
+ struct xsk_buff_pool *xsk_pool =
+ mlx5e_xsk_get_pool(&chs->params, chs->params.xsk, ix);
struct mlx5e_xsk_param xsk;
- if (!umem)
+ if (!xsk_pool)
continue;
- mlx5e_build_xsk_param(umem, &xsk);
+ mlx5e_build_xsk_param(xsk_pool, &xsk);
if (!mlx5e_validate_xsk_param(new_params, &xsk, mdev)) {
u32 hr = mlx5e_get_linear_rq_headroom(new_params, &xsk);
@@ -4423,8 +4424,8 @@ static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp)
switch (xdp->command) {
case XDP_SETUP_PROG:
return mlx5e_xdp_set(dev, xdp->prog);
- case XDP_SETUP_XSK_UMEM:
- return mlx5e_xsk_setup_umem(dev, xdp->xsk.umem,
+ case XDP_SETUP_XSK_POOL:
+ return mlx5e_xsk_setup_pool(dev, xdp->xsk.pool,
xdp->xsk.queue_id);
default:
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 65828af120b7..7aab69e991a5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -30,7 +30,6 @@
* SOFTWARE.
*/
-#include <linux/prefetch.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
@@ -281,8 +280,8 @@ static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq,
static inline int mlx5e_page_alloc(struct mlx5e_rq *rq,
struct mlx5e_dma_info *dma_info)
{
- if (rq->umem)
- return mlx5e_xsk_page_alloc_umem(rq, dma_info);
+ if (rq->xsk_pool)
+ return mlx5e_xsk_page_alloc_pool(rq, dma_info);
else
return mlx5e_page_alloc_pool(rq, dma_info);
}
@@ -313,7 +312,7 @@ static inline void mlx5e_page_release(struct mlx5e_rq *rq,
struct mlx5e_dma_info *dma_info,
bool recycle)
{
- if (rq->umem)
+ if (rq->xsk_pool)
/* The `recycle` parameter is ignored, and the page is always
* put into the Reuse Ring, because there is no way to return
* the page to the userspace when the interface goes down.
@@ -400,14 +399,14 @@ static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, u8 wqe_bulk)
int err;
int i;
- if (rq->umem) {
+ if (rq->xsk_pool) {
int pages_desired = wqe_bulk << rq->wqe.info.log_num_frags;
/* Check in advance that we have enough frames, instead of
* allocating one-by-one, failing and moving frames to the
* Reuse Ring.
*/
- if (unlikely(!xsk_buff_can_alloc(rq->umem, pages_desired)))
+ if (unlikely(!xsk_buff_can_alloc(rq->xsk_pool, pages_desired)))
return -ENOMEM;
}
@@ -505,8 +504,8 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
/* Check in advance that we have enough frames, instead of allocating
* one-by-one, failing and moving frames to the Reuse Ring.
*/
- if (rq->umem &&
- unlikely(!xsk_buff_can_alloc(rq->umem, MLX5_MPWRQ_PAGES_PER_WQE))) {
+ if (rq->xsk_pool &&
+ unlikely(!xsk_buff_can_alloc(rq->xsk_pool, MLX5_MPWRQ_PAGES_PER_WQE))) {
err = -ENOMEM;
goto err;
}
@@ -754,7 +753,7 @@ INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
* the driver when it refills the Fill Ring.
* 2. Otherwise, busy poll by rescheduling the NAPI poll.
*/
- if (unlikely(alloc_err == -ENOMEM && rq->umem))
+ if (unlikely(alloc_err == -ENOMEM && rq->xsk_pool))
return true;
return false;
@@ -1141,8 +1140,8 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
dma_sync_single_range_for_cpu(rq->pdev, di->addr, wi->offset,
frag_size, DMA_FROM_DEVICE);
- prefetchw(va); /* xdp_frame data area */
- prefetch(data);
+ net_prefetchw(va); /* xdp_frame data area */
+ net_prefetch(data);
rcu_read_lock();
mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
@@ -1184,7 +1183,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
return NULL;
}
- prefetchw(skb->data);
+ net_prefetchw(skb->data);
while (byte_cnt) {
u16 frag_consumed_bytes =
@@ -1399,7 +1398,7 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
return NULL;
}
- prefetchw(skb->data);
+ net_prefetchw(skb->data);
if (unlikely(frag_offset >= PAGE_SIZE)) {
di++;
@@ -1452,8 +1451,8 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
dma_sync_single_range_for_cpu(rq->pdev, di->addr, head_offset,
frag_size, DMA_FROM_DEVICE);
- prefetchw(va); /* xdp_frame data area */
- prefetch(data);
+ net_prefetchw(va); /* xdp_frame data area */
+ net_prefetch(data);
rcu_read_lock();
mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt32, &xdp);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index 46790216ce86..ce8ab1f01876 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -30,7 +30,6 @@
* SOFTWARE.
*/
-#include <linux/prefetch.h>
#include <linux/ip.h>
#include <linux/udp.h>
#include <net/udp.h>
@@ -115,7 +114,7 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
return NULL;
}
- prefetchw(skb->data);
+ net_prefetchw(skb->data);
skb_reserve(skb, NET_IP_ALIGN);
/* Reserve for ethernet and IP header */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
index 61719ec89808..252e91072c5a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
@@ -12,8 +12,17 @@
#include "core.h"
#include "core_env.h"
-#define MLXSW_HWMON_TEMP_SENSOR_MAX_COUNT 127
-#define MLXSW_HWMON_ATTR_COUNT (MLXSW_HWMON_TEMP_SENSOR_MAX_COUNT * 4 + \
+#define MLXSW_HWMON_SENSORS_MAX_COUNT 64
+#define MLXSW_HWMON_MODULES_MAX_COUNT 64
+#define MLXSW_HWMON_GEARBOXES_MAX_COUNT 32
+
+#define MLXSW_HWMON_ATTR_PER_SENSOR 3
+#define MLXSW_HWMON_ATTR_PER_MODULE 7
+#define MLXSW_HWMON_ATTR_PER_GEARBOX 4
+
+#define MLXSW_HWMON_ATTR_COUNT (MLXSW_HWMON_SENSORS_MAX_COUNT * MLXSW_HWMON_ATTR_PER_SENSOR + \
+ MLXSW_HWMON_MODULES_MAX_COUNT * MLXSW_HWMON_ATTR_PER_MODULE + \
+ MLXSW_HWMON_GEARBOXES_MAX_COUNT * MLXSW_HWMON_ATTR_PER_GEARBOX + \
MLXSW_MFCR_TACHOS_MAX + MLXSW_MFCR_PWMS_MAX)
struct mlxsw_hwmon_attr {
@@ -205,25 +214,39 @@ static ssize_t mlxsw_hwmon_pwm_store(struct device *dev,
return len;
}
-static ssize_t mlxsw_hwmon_module_temp_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static int mlxsw_hwmon_module_temp_get(struct device *dev,
+ struct device_attribute *attr,
+ int *p_temp)
{
struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
char mtmp_pl[MLXSW_REG_MTMP_LEN];
u8 module;
- int temp;
int err;
module = mlwsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count;
mlxsw_reg_mtmp_pack(mtmp_pl, MLXSW_REG_MTMP_MODULE_INDEX_MIN + module,
false, false);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl);
+ if (err) {
+ dev_err(dev, "Failed to query module temperature\n");
+ return err;
+ }
+ mlxsw_reg_mtmp_unpack(mtmp_pl, p_temp, NULL, NULL);
+
+ return 0;
+}
+
+static ssize_t mlxsw_hwmon_module_temp_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int err, temp;
+
+ err = mlxsw_hwmon_module_temp_get(dev, attr, &temp);
if (err)
return err;
- mlxsw_reg_mtmp_unpack(mtmp_pl, &temp, NULL, NULL);
return sprintf(buf, "%d\n", temp);
}
@@ -270,48 +293,72 @@ static ssize_t mlxsw_hwmon_module_temp_fault_show(struct device *dev,
return sprintf(buf, "%u\n", fault);
}
-static ssize_t
-mlxsw_hwmon_module_temp_critical_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static int mlxsw_hwmon_module_temp_critical_get(struct device *dev,
+ struct device_attribute *attr,
+ int *p_temp)
{
struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
- int temp;
u8 module;
int err;
module = mlwsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count;
err = mlxsw_env_module_temp_thresholds_get(mlxsw_hwmon->core, module,
- SFP_TEMP_HIGH_WARN, &temp);
+ SFP_TEMP_HIGH_WARN, p_temp);
if (err) {
dev_err(dev, "Failed to query module temperature thresholds\n");
return err;
}
- return sprintf(buf, "%u\n", temp);
+ return 0;
}
static ssize_t
-mlxsw_hwmon_module_temp_emergency_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+mlxsw_hwmon_module_temp_critical_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int err, temp;
+
+ err = mlxsw_hwmon_module_temp_critical_get(dev, attr, &temp);
+ if (err)
+ return err;
+
+ return sprintf(buf, "%u\n", temp);
+}
+
+static int mlxsw_hwmon_module_temp_emergency_get(struct device *dev,
+ struct device_attribute *attr,
+ int *p_temp)
{
struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
u8 module;
- int temp;
int err;
module = mlwsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count;
err = mlxsw_env_module_temp_thresholds_get(mlxsw_hwmon->core, module,
- SFP_TEMP_HIGH_ALARM, &temp);
+ SFP_TEMP_HIGH_ALARM, p_temp);
if (err) {
dev_err(dev, "Failed to query module temperature thresholds\n");
return err;
}
+ return 0;
+}
+
+static ssize_t
+mlxsw_hwmon_module_temp_emergency_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int err, temp;
+
+ err = mlxsw_hwmon_module_temp_emergency_get(dev, attr, &temp);
+ if (err)
+ return err;
+
return sprintf(buf, "%u\n", temp);
}
@@ -341,6 +388,53 @@ mlxsw_hwmon_gbox_temp_label_show(struct device *dev,
return sprintf(buf, "gearbox %03u\n", index);
}
+static ssize_t mlxsw_hwmon_temp_critical_alarm_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int err, temp, emergency_temp, critic_temp;
+
+ err = mlxsw_hwmon_module_temp_get(dev, attr, &temp);
+ if (err)
+ return err;
+
+ if (temp <= 0)
+ return sprintf(buf, "%d\n", false);
+
+ err = mlxsw_hwmon_module_temp_emergency_get(dev, attr, &emergency_temp);
+ if (err)
+ return err;
+
+ if (temp >= emergency_temp)
+ return sprintf(buf, "%d\n", false);
+
+ err = mlxsw_hwmon_module_temp_critical_get(dev, attr, &critic_temp);
+ if (err)
+ return err;
+
+ return sprintf(buf, "%d\n", temp >= critic_temp);
+}
+
+static ssize_t mlxsw_hwmon_temp_emergency_alarm_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int err, temp, emergency_temp;
+
+ err = mlxsw_hwmon_module_temp_get(dev, attr, &temp);
+ if (err)
+ return err;
+
+ if (temp <= 0)
+ return sprintf(buf, "%d\n", false);
+
+ err = mlxsw_hwmon_module_temp_emergency_get(dev, attr, &emergency_temp);
+ if (err)
+ return err;
+
+ return sprintf(buf, "%d\n", temp >= emergency_temp);
+}
+
enum mlxsw_hwmon_attr_type {
MLXSW_HWMON_ATTR_TYPE_TEMP,
MLXSW_HWMON_ATTR_TYPE_TEMP_MAX,
@@ -354,6 +448,8 @@ enum mlxsw_hwmon_attr_type {
MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_EMERG,
MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_LABEL,
MLXSW_HWMON_ATTR_TYPE_TEMP_GBOX_LABEL,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_CRIT_ALARM,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_EMERGENCY_ALARM,
};
static void mlxsw_hwmon_attr_add(struct mlxsw_hwmon *mlxsw_hwmon,
@@ -444,6 +540,20 @@ static void mlxsw_hwmon_attr_add(struct mlxsw_hwmon *mlxsw_hwmon,
snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name),
"temp%u_label", num + 1);
break;
+ case MLXSW_HWMON_ATTR_TYPE_TEMP_CRIT_ALARM:
+ mlxsw_hwmon_attr->dev_attr.show =
+ mlxsw_hwmon_temp_critical_alarm_show;
+ mlxsw_hwmon_attr->dev_attr.attr.mode = 0444;
+ snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name),
+ "temp%u_crit_alarm", num + 1);
+ break;
+ case MLXSW_HWMON_ATTR_TYPE_TEMP_EMERGENCY_ALARM:
+ mlxsw_hwmon_attr->dev_attr.show =
+ mlxsw_hwmon_temp_emergency_alarm_show;
+ mlxsw_hwmon_attr->dev_attr.attr.mode = 0444;
+ snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name),
+ "temp%u_emergency_alarm", num + 1);
+ break;
default:
WARN_ON(1);
}
@@ -566,6 +676,12 @@ static int mlxsw_hwmon_module_init(struct mlxsw_hwmon *mlxsw_hwmon)
mlxsw_hwmon_attr_add(mlxsw_hwmon,
MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_LABEL,
i, i);
+ mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_CRIT_ALARM,
+ i, i);
+ mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_EMERGENCY_ALARM,
+ i, i);
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 079b080de7f7..485e3e02eb70 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -4174,7 +4174,6 @@ MLXSW_ITEM32(reg, ptys, an_status, 0x04, 28, 4);
#define MLXSW_REG_PTYS_EXT_ETH_SPEED_SGMII_100M BIT(0)
#define MLXSW_REG_PTYS_EXT_ETH_SPEED_1000BASE_X_SGMII BIT(1)
-#define MLXSW_REG_PTYS_EXT_ETH_SPEED_2_5GBASE_X_2_5GMII BIT(2)
#define MLXSW_REG_PTYS_EXT_ETH_SPEED_5GBASE_R BIT(3)
#define MLXSW_REG_PTYS_EXT_ETH_SPEED_XFI_XAUI_1_10G BIT(4)
#define MLXSW_REG_PTYS_EXT_ETH_SPEED_XLAUI_4_XLPPI_4_40G BIT(5)
@@ -4197,7 +4196,6 @@ MLXSW_ITEM32(reg, ptys, ext_eth_proto_cap, 0x08, 0, 32);
#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 BIT(2)
#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 BIT(3)
#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR BIT(4)
-#define MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2 BIT(5)
#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 BIT(6)
#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 BIT(7)
#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR BIT(12)
@@ -4210,10 +4208,6 @@ MLXSW_ITEM32(reg, ptys, ext_eth_proto_cap, 0x08, 0, 32);
#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 BIT(20)
#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 BIT(21)
#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 BIT(22)
-#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4 BIT(23)
-#define MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX BIT(24)
-#define MLXSW_REG_PTYS_ETH_SPEED_100BASE_T BIT(25)
-#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T BIT(26)
#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR BIT(27)
#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR BIT(28)
#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR BIT(29)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
index 14c78f73bb65..f08cad5b5657 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
@@ -993,22 +993,12 @@ struct mlxsw_sp1_port_link_mode {
static const struct mlxsw_sp1_port_link_mode mlxsw_sp1_port_link_mode[] = {
{
- .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
- .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
- .speed = SPEED_100,
- },
- {
.mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
.mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
.speed = SPEED_1000,
},
{
- .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
- .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
- .speed = SPEED_10000,
- },
- {
.mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
.mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
@@ -1023,11 +1013,6 @@ static const struct mlxsw_sp1_port_link_mode mlxsw_sp1_port_link_mode[] = {
.speed = SPEED_10000,
},
{
- .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
- .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
- .speed = SPEED_20000,
- },
- {
.mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
.mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
.speed = SPEED_40000,
@@ -1092,11 +1077,6 @@ static const struct mlxsw_sp1_port_link_mode mlxsw_sp1_port_link_mode[] = {
.mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
.speed = SPEED_100000,
},
- {
- .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
- .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
- .speed = SPEED_100000,
- },
};
#define MLXSW_SP1_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp1_port_link_mode)
@@ -1237,14 +1217,6 @@ mlxsw_sp2_mask_ethtool_1000base_x_sgmii[] = {
ARRAY_SIZE(mlxsw_sp2_mask_ethtool_1000base_x_sgmii)
static const enum ethtool_link_mode_bit_indices
-mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii[] = {
- ETHTOOL_LINK_MODE_2500baseX_Full_BIT,
-};
-
-#define MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN \
- ARRAY_SIZE(mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii)
-
-static const enum ethtool_link_mode_bit_indices
mlxsw_sp2_mask_ethtool_5gbase_r[] = {
ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
};
@@ -1408,16 +1380,6 @@ static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
.speed = SPEED_1000,
},
{
- .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_2_5GBASE_X_2_5GMII,
- .mask_ethtool = mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii,
- .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN,
- .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
- MLXSW_SP_PORT_MASK_WIDTH_2X |
- MLXSW_SP_PORT_MASK_WIDTH_4X |
- MLXSW_SP_PORT_MASK_WIDTH_8X,
- .speed = SPEED_2500,
- },
- {
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_5GBASE_R,
.mask_ethtool = mlxsw_sp2_mask_ethtool_5gbase_r,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
index 9650562fc0ef..ca8090a28dec 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
@@ -314,11 +314,9 @@ static int mlxsw_sp_ptp_parse(struct sk_buff *skb,
u8 *p_message_type,
u16 *p_sequence_id)
{
- unsigned int offset = 0;
unsigned int ptp_class;
- u8 *data;
+ struct ptp_header *hdr;
- data = skb_mac_header(skb);
ptp_class = ptp_classify_raw(skb);
switch (ptp_class & PTP_CLASS_VMASK) {
@@ -329,30 +327,14 @@ static int mlxsw_sp_ptp_parse(struct sk_buff *skb,
return -ERANGE;
}
- if (ptp_class & PTP_CLASS_VLAN)
- offset += VLAN_HLEN;
-
- switch (ptp_class & PTP_CLASS_PMASK) {
- case PTP_CLASS_IPV4:
- offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
- break;
- case PTP_CLASS_IPV6:
- offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
- break;
- case PTP_CLASS_L2:
- offset += ETH_HLEN;
- break;
- default:
- return -ERANGE;
- }
-
- /* PTP header is 34 bytes. */
- if (skb->len < offset + 34)
+ hdr = ptp_parse_header(skb, ptp_class);
+ if (!hdr)
return -EINVAL;
- *p_message_type = data[offset] & 0x0f;
- *p_domain_number = data[offset + 4];
- *p_sequence_id = (u16)(data[offset + 30]) << 8 | data[offset + 31];
+ *p_message_type = ptp_get_msgtype(hdr, ptp_class);
+ *p_domain_number = hdr->domain_number;
+ *p_sequence_id = be16_to_cpu(hdr->sequence_id);
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
index 2e41c5519c1b..433f14ade464 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
@@ -291,7 +291,7 @@ static void mlxsw_sp_rx_sample_listener(struct sk_buff *skb, u8 local_port,
static const struct mlxsw_sp_trap_policer_item
mlxsw_sp_trap_policer_items_arr[] = {
{
- .policer = MLXSW_SP_TRAP_POLICER(1, 10 * 1024, 128),
+ .policer = MLXSW_SP_TRAP_POLICER(1, 10 * 1024, 4096),
},
{
.policer = MLXSW_SP_TRAP_POLICER(2, 128, 128),
@@ -303,25 +303,25 @@ mlxsw_sp_trap_policer_items_arr[] = {
.policer = MLXSW_SP_TRAP_POLICER(4, 128, 128),
},
{
- .policer = MLXSW_SP_TRAP_POLICER(5, 16 * 1024, 128),
+ .policer = MLXSW_SP_TRAP_POLICER(5, 16 * 1024, 8192),
},
{
.policer = MLXSW_SP_TRAP_POLICER(6, 128, 128),
},
{
- .policer = MLXSW_SP_TRAP_POLICER(7, 1024, 128),
+ .policer = MLXSW_SP_TRAP_POLICER(7, 1024, 512),
},
{
- .policer = MLXSW_SP_TRAP_POLICER(8, 20 * 1024, 1024),
+ .policer = MLXSW_SP_TRAP_POLICER(8, 20 * 1024, 8192),
},
{
.policer = MLXSW_SP_TRAP_POLICER(9, 128, 128),
},
{
- .policer = MLXSW_SP_TRAP_POLICER(10, 1024, 128),
+ .policer = MLXSW_SP_TRAP_POLICER(10, 1024, 512),
},
{
- .policer = MLXSW_SP_TRAP_POLICER(11, 360, 128),
+ .policer = MLXSW_SP_TRAP_POLICER(11, 256, 128),
},
{
.policer = MLXSW_SP_TRAP_POLICER(12, 128, 128),
@@ -330,19 +330,19 @@ mlxsw_sp_trap_policer_items_arr[] = {
.policer = MLXSW_SP_TRAP_POLICER(13, 128, 128),
},
{
- .policer = MLXSW_SP_TRAP_POLICER(14, 1024, 128),
+ .policer = MLXSW_SP_TRAP_POLICER(14, 1024, 512),
},
{
- .policer = MLXSW_SP_TRAP_POLICER(15, 1024, 128),
+ .policer = MLXSW_SP_TRAP_POLICER(15, 1024, 512),
},
{
- .policer = MLXSW_SP_TRAP_POLICER(16, 24 * 1024, 4096),
+ .policer = MLXSW_SP_TRAP_POLICER(16, 24 * 1024, 16384),
},
{
- .policer = MLXSW_SP_TRAP_POLICER(17, 19 * 1024, 4096),
+ .policer = MLXSW_SP_TRAP_POLICER(17, 19 * 1024, 8192),
},
{
- .policer = MLXSW_SP_TRAP_POLICER(18, 1024, 128),
+ .policer = MLXSW_SP_TRAP_POLICER(18, 1024, 512),
},
{
.policer = MLXSW_SP_TRAP_POLICER(19, 1024, 512),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index 6f9a725662fb..5023d91269f4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -551,16 +551,6 @@ struct mlxsw_sx_port_link_mode {
static const struct mlxsw_sx_port_link_mode mlxsw_sx_port_link_mode[] = {
{
- .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
- .supported = SUPPORTED_100baseT_Full,
- .advertised = ADVERTISED_100baseT_Full,
- .speed = 100,
- },
- {
- .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
- .speed = 100,
- },
- {
.mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
.supported = SUPPORTED_1000baseKX_Full,
@@ -568,12 +558,6 @@ static const struct mlxsw_sx_port_link_mode mlxsw_sx_port_link_mode[] = {
.speed = 1000,
},
{
- .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
- .supported = SUPPORTED_10000baseT_Full,
- .advertised = ADVERTISED_10000baseT_Full,
- .speed = 10000,
- },
- {
.mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
.supported = SUPPORTED_10000baseKX4_Full,
@@ -590,12 +574,6 @@ static const struct mlxsw_sx_port_link_mode mlxsw_sx_port_link_mode[] = {
.speed = 10000,
},
{
- .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
- .supported = SUPPORTED_20000baseKR2_Full,
- .advertised = ADVERTISED_20000baseKR2_Full,
- .speed = 20000,
- },
- {
.mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
.supported = SUPPORTED_40000baseCR4_Full,
.advertised = ADVERTISED_40000baseCR4_Full,
@@ -634,8 +612,7 @@ static const struct mlxsw_sx_port_link_mode mlxsw_sx_port_link_mode[] = {
{
.mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
- MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
- MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4,
.speed = 100000,
},
};
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index bf516285510f..a2926b1b3cff 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -24,6 +24,7 @@
#define NFP_FLOWER_LAYER_VXLAN BIT(7)
#define NFP_FLOWER_LAYER2_GRE BIT(0)
+#define NFP_FLOWER_LAYER2_QINQ BIT(4)
#define NFP_FLOWER_LAYER2_GENEVE BIT(5)
#define NFP_FLOWER_LAYER2_GENEVE_OP BIT(6)
#define NFP_FLOWER_LAYER2_TUN_IPV6 BIT(7)
@@ -319,6 +320,22 @@ struct nfp_flower_mac_mpls {
__be32 mpls_lse;
};
+/* VLAN details (2W/8B)
+ * 3 2 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | outer_tpid | outer_tci |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | inner_tpid | inner_tci |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct nfp_flower_vlan {
+ __be16 outer_tpid;
+ __be16 outer_tci;
+ __be16 inner_tpid;
+ __be16 inner_tci;
+};
+
/* L4 ports (for UDP, TCP, SCTP) (1W/4B)
* 3 2 1
* 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index 3bf9c1afa45e..caf12eec9945 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -30,6 +30,8 @@ struct nfp_app;
#define NFP_FLOWER_MASK_ELEMENT_RS 1
#define NFP_FLOWER_MASK_HASH_BITS 10
+#define NFP_FLOWER_KEY_MAX_LW 32
+
#define NFP_FL_META_FLAG_MANAGE_MASK BIT(7)
#define NFP_FL_MASK_REUSE_TIME_NS 40000
@@ -44,6 +46,7 @@ struct nfp_app;
#define NFP_FL_FEATS_FLOW_MOD BIT(5)
#define NFP_FL_FEATS_PRE_TUN_RULES BIT(6)
#define NFP_FL_FEATS_IPV6_TUN BIT(7)
+#define NFP_FL_FEATS_VLAN_QINQ BIT(8)
#define NFP_FL_FEATS_HOST_ACK BIT(31)
#define NFP_FL_ENABLE_FLOW_MERGE BIT(0)
@@ -57,7 +60,8 @@ struct nfp_app;
NFP_FL_FEATS_VF_RLIM | \
NFP_FL_FEATS_FLOW_MOD | \
NFP_FL_FEATS_PRE_TUN_RULES | \
- NFP_FL_FEATS_IPV6_TUN)
+ NFP_FL_FEATS_IPV6_TUN | \
+ NFP_FL_FEATS_VLAN_QINQ)
struct nfp_fl_mask_id {
struct circ_buf mask_id_free_list;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index f7f01e2e3dce..255a4dff6288 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -10,7 +10,7 @@
static void
nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext,
struct nfp_flower_meta_tci *msk,
- struct flow_rule *rule, u8 key_type)
+ struct flow_rule *rule, u8 key_type, bool qinq_sup)
{
u16 tmp_tci;
@@ -24,7 +24,7 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext,
msk->nfp_flow_key_layer = key_type;
msk->mask_id = ~0;
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ if (!qinq_sup && flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
struct flow_match_vlan match;
flow_rule_match_vlan(rule, &match);
@@ -231,6 +231,50 @@ nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext,
}
static void
+nfp_flower_fill_vlan(struct flow_dissector_key_vlan *key,
+ struct nfp_flower_vlan *frame,
+ bool outer_vlan)
+{
+ u16 tci;
+
+ tci = NFP_FLOWER_MASK_VLAN_PRESENT;
+ tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
+ key->vlan_priority) |
+ FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
+ key->vlan_id);
+
+ if (outer_vlan) {
+ frame->outer_tci = cpu_to_be16(tci);
+ frame->outer_tpid = key->vlan_tpid;
+ } else {
+ frame->inner_tci = cpu_to_be16(tci);
+ frame->inner_tpid = key->vlan_tpid;
+ }
+}
+
+static void
+nfp_flower_compile_vlan(struct nfp_flower_vlan *ext,
+ struct nfp_flower_vlan *msk,
+ struct flow_rule *rule)
+{
+ struct flow_match_vlan match;
+
+ memset(ext, 0, sizeof(struct nfp_flower_vlan));
+ memset(msk, 0, sizeof(struct nfp_flower_vlan));
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ flow_rule_match_vlan(rule, &match);
+ nfp_flower_fill_vlan(match.key, ext, true);
+ nfp_flower_fill_vlan(match.mask, msk, true);
+ }
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
+ flow_rule_match_cvlan(rule, &match);
+ nfp_flower_fill_vlan(match.key, ext, false);
+ nfp_flower_fill_vlan(match.mask, msk, false);
+ }
+}
+
+static void
nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext,
struct nfp_flower_ipv4 *msk, struct flow_rule *rule)
{
@@ -433,7 +477,10 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
struct netlink_ext_ack *extack)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
+ struct nfp_flower_priv *priv = app->priv;
+ bool qinq_sup;
u32 port_id;
+ int ext_len;
int err;
u8 *ext;
u8 *msk;
@@ -446,9 +493,11 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
ext = nfp_flow->unmasked_data;
msk = nfp_flow->mask_data;
+ qinq_sup = !!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ);
+
nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)ext,
(struct nfp_flower_meta_tci *)msk,
- rule, key_ls->key_layer);
+ rule, key_ls->key_layer, qinq_sup);
ext += sizeof(struct nfp_flower_meta_tci);
msk += sizeof(struct nfp_flower_meta_tci);
@@ -547,6 +596,14 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
}
}
+ if (NFP_FLOWER_LAYER2_QINQ & key_ls->key_layer_two) {
+ nfp_flower_compile_vlan((struct nfp_flower_vlan *)ext,
+ (struct nfp_flower_vlan *)msk,
+ rule);
+ ext += sizeof(struct nfp_flower_vlan);
+ msk += sizeof(struct nfp_flower_vlan);
+ }
+
if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN ||
key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
@@ -589,5 +646,15 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
}
}
+ /* Check that the flow key does not exceed the maximum limit.
+ * All structures in the key is multiples of 4 bytes, so use u32.
+ */
+ ext_len = (u32 *)ext - (u32 *)nfp_flow->unmasked_data;
+ if (ext_len > NFP_FLOWER_KEY_MAX_LW) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "unsupported offload: flow key too long");
+ return -EOPNOTSUPP;
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 36356f96661d..1c59aff2163c 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -31,6 +31,7 @@
BIT(FLOW_DISSECTOR_KEY_PORTS) | \
BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \
BIT(FLOW_DISSECTOR_KEY_VLAN) | \
+ BIT(FLOW_DISSECTOR_KEY_CVLAN) | \
BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
@@ -66,7 +67,8 @@
NFP_FLOWER_LAYER_IPV6)
#define NFP_FLOWER_PRE_TUN_RULE_FIELDS \
- (NFP_FLOWER_LAYER_PORT | \
+ (NFP_FLOWER_LAYER_EXT_META | \
+ NFP_FLOWER_LAYER_PORT | \
NFP_FLOWER_LAYER_MAC | \
NFP_FLOWER_LAYER_IPV4 | \
NFP_FLOWER_LAYER_IPV6)
@@ -285,6 +287,30 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support VLAN PCP offload");
return -EOPNOTSUPP;
}
+ if (priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ &&
+ !(key_layer_two & NFP_FLOWER_LAYER2_QINQ)) {
+ key_layer |= NFP_FLOWER_LAYER_EXT_META;
+ key_size += sizeof(struct nfp_flower_ext_meta);
+ key_size += sizeof(struct nfp_flower_vlan);
+ key_layer_two |= NFP_FLOWER_LAYER2_QINQ;
+ }
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
+ struct flow_match_vlan cvlan;
+
+ if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support VLAN QinQ offload");
+ return -EOPNOTSUPP;
+ }
+
+ flow_rule_match_vlan(rule, &cvlan);
+ if (!(key_layer_two & NFP_FLOWER_LAYER2_QINQ)) {
+ key_layer |= NFP_FLOWER_LAYER_EXT_META;
+ key_size += sizeof(struct nfp_flower_ext_meta);
+ key_size += sizeof(struct nfp_flower_vlan);
+ key_layer_two |= NFP_FLOWER_LAYER2_QINQ;
+ }
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
@@ -1066,6 +1092,7 @@ err_destroy_merge_flow:
* nfp_flower_validate_pre_tun_rule()
* @app: Pointer to the APP handle
* @flow: Pointer to NFP flow representation of rule
+ * @key_ls: Pointer to NFP key layers structure
* @extack: Netlink extended ACK report
*
* Verifies the flow as a pre-tunnel rule.
@@ -1075,10 +1102,13 @@ err_destroy_merge_flow:
static int
nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
struct nfp_fl_payload *flow,
+ struct nfp_fl_key_ls *key_ls,
struct netlink_ext_ack *extack)
{
+ struct nfp_flower_priv *priv = app->priv;
struct nfp_flower_meta_tci *meta_tci;
struct nfp_flower_mac_mpls *mac;
+ u8 *ext = flow->unmasked_data;
struct nfp_fl_act_head *act;
u8 *mask = flow->mask_data;
bool vlan = false;
@@ -1086,20 +1116,25 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
u8 key_layer;
meta_tci = (struct nfp_flower_meta_tci *)flow->unmasked_data;
- if (meta_tci->tci & cpu_to_be16(NFP_FLOWER_MASK_VLAN_PRESENT)) {
- u16 vlan_tci = be16_to_cpu(meta_tci->tci);
-
- vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT;
- flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci);
- vlan = true;
- } else {
- flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff);
+ key_layer = key_ls->key_layer;
+ if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) {
+ if (meta_tci->tci & cpu_to_be16(NFP_FLOWER_MASK_VLAN_PRESENT)) {
+ u16 vlan_tci = be16_to_cpu(meta_tci->tci);
+
+ vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT;
+ flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci);
+ vlan = true;
+ } else {
+ flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff);
+ }
}
- key_layer = meta_tci->nfp_flow_key_layer;
if (key_layer & ~NFP_FLOWER_PRE_TUN_RULE_FIELDS) {
NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: too many match fields");
return -EOPNOTSUPP;
+ } else if (key_ls->key_layer_two & ~NFP_FLOWER_LAYER2_QINQ) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: non-vlan in extended match fields");
+ return -EOPNOTSUPP;
}
if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
@@ -1109,7 +1144,13 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
/* Skip fields known to exist. */
mask += sizeof(struct nfp_flower_meta_tci);
+ ext += sizeof(struct nfp_flower_meta_tci);
+ if (key_ls->key_layer_two) {
+ mask += sizeof(struct nfp_flower_ext_meta);
+ ext += sizeof(struct nfp_flower_ext_meta);
+ }
mask += sizeof(struct nfp_flower_in_port);
+ ext += sizeof(struct nfp_flower_in_port);
/* Ensure destination MAC address is fully matched. */
mac = (struct nfp_flower_mac_mpls *)mask;
@@ -1118,6 +1159,8 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
return -EOPNOTSUPP;
}
+ mask += sizeof(struct nfp_flower_mac_mpls);
+ ext += sizeof(struct nfp_flower_mac_mpls);
if (key_layer & NFP_FLOWER_LAYER_IPV4 ||
key_layer & NFP_FLOWER_LAYER_IPV6) {
/* Flags and proto fields have same offset in IPv4 and IPv6. */
@@ -1130,7 +1173,6 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
sizeof(struct nfp_flower_ipv4) :
sizeof(struct nfp_flower_ipv6);
- mask += sizeof(struct nfp_flower_mac_mpls);
/* Ensure proto and flags are the only IP layer fields. */
for (i = 0; i < size; i++)
@@ -1138,6 +1180,25 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: only flags and proto can be matched in ip header");
return -EOPNOTSUPP;
}
+ ext += size;
+ mask += size;
+ }
+
+ if ((priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) {
+ if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_QINQ) {
+ struct nfp_flower_vlan *vlan_tags;
+ u16 vlan_tci;
+
+ vlan_tags = (struct nfp_flower_vlan *)ext;
+
+ vlan_tci = be16_to_cpu(vlan_tags->outer_tci);
+
+ vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT;
+ flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci);
+ vlan = true;
+ } else {
+ flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff);
+ }
}
/* Action must be a single egress or pop_vlan and egress. */
@@ -1220,7 +1281,7 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
goto err_destroy_flow;
if (flow_pay->pre_tun_rule.dev) {
- err = nfp_flower_validate_pre_tun_rule(app, flow_pay, extack);
+ err = nfp_flower_validate_pre_tun_rule(app, flow_pay, key_layer, extack);
if (err)
goto err_destroy_flow;
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic.h b/drivers/net/ethernet/pensando/ionic/ionic.h
index f5a910c458ba..084a924431d5 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic.h
@@ -42,13 +42,11 @@ struct ionic {
struct ionic_dev_bar bars[IONIC_BARS_MAX];
unsigned int num_bars;
struct ionic_identity ident;
- struct list_head lifs;
- struct ionic_lif *master_lif;
+ struct ionic_lif *lif;
unsigned int nnqs_per_lif;
unsigned int neqs_per_lif;
unsigned int ntxqs_per_lif;
unsigned int nrxqs_per_lif;
- DECLARE_BITMAP(lifbits, IONIC_LIFS_MAX);
unsigned int nintrs;
DECLARE_BITMAP(intrs, IONIC_INTR_CTRL_REGS_MAX);
struct work_struct nb_work;
@@ -66,9 +64,6 @@ struct ionic_admin_ctx {
union ionic_adminq_comp comp;
};
-int ionic_napi(struct napi_struct *napi, int budget, ionic_cq_cb cb,
- ionic_cq_done_cb done_cb, void *done_arg);
-
int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx);
int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_wait);
int ionic_set_dma_mask(struct ionic *ionic);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
index 85c686c16741..d1d6fb6669e5 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
@@ -294,21 +294,21 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_port_reset;
}
- err = ionic_lifs_size(ionic);
+ err = ionic_lif_size(ionic);
if (err) {
- dev_err(dev, "Cannot size LIFs: %d, aborting\n", err);
+ dev_err(dev, "Cannot size LIF: %d, aborting\n", err);
goto err_out_port_reset;
}
- err = ionic_lifs_alloc(ionic);
+ err = ionic_lif_alloc(ionic);
if (err) {
- dev_err(dev, "Cannot allocate LIFs: %d, aborting\n", err);
+ dev_err(dev, "Cannot allocate LIF: %d, aborting\n", err);
goto err_out_free_irqs;
}
- err = ionic_lifs_init(ionic);
+ err = ionic_lif_init(ionic->lif);
if (err) {
- dev_err(dev, "Cannot init LIFs: %d, aborting\n", err);
+ dev_err(dev, "Cannot init LIF: %d, aborting\n", err);
goto err_out_free_lifs;
}
@@ -321,9 +321,9 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(dev, "Cannot enable existing VFs: %d\n", err);
}
- err = ionic_lifs_register(ionic);
+ err = ionic_lif_register(ionic->lif);
if (err) {
- dev_err(dev, "Cannot register LIFs: %d, aborting\n", err);
+ dev_err(dev, "Cannot register LIF: %d, aborting\n", err);
goto err_out_deinit_lifs;
}
@@ -336,12 +336,13 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
err_out_deregister_lifs:
- ionic_lifs_unregister(ionic);
+ ionic_lif_unregister(ionic->lif);
err_out_deinit_lifs:
ionic_vf_dealloc(ionic);
- ionic_lifs_deinit(ionic);
+ ionic_lif_deinit(ionic->lif);
err_out_free_lifs:
- ionic_lifs_free(ionic);
+ ionic_lif_free(ionic->lif);
+ ionic->lif = NULL;
err_out_free_irqs:
ionic_bus_free_irq_vectors(ionic);
err_out_port_reset:
@@ -377,11 +378,12 @@ static void ionic_remove(struct pci_dev *pdev)
if (!ionic)
return;
- if (ionic->master_lif) {
+ if (ionic->lif) {
ionic_devlink_unregister(ionic);
- ionic_lifs_unregister(ionic);
- ionic_lifs_deinit(ionic);
- ionic_lifs_free(ionic);
+ ionic_lif_unregister(ionic->lif);
+ ionic_lif_deinit(ionic->lif);
+ ionic_lif_free(ionic->lif);
+ ionic->lif = NULL;
ionic_bus_free_irq_vectors(ionic);
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
index 11621ccc1faf..683bbbf75115 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
@@ -76,7 +76,7 @@ static int q_tail_show(struct seq_file *seq, void *v)
{
struct ionic_queue *q = seq->private;
- seq_printf(seq, "%d\n", q->tail->index);
+ seq_printf(seq, "%d\n", q->tail_idx);
return 0;
}
@@ -86,7 +86,7 @@ static int q_head_show(struct seq_file *seq, void *v)
{
struct ionic_queue *q = seq->private;
- seq_printf(seq, "%d\n", q->head->index);
+ seq_printf(seq, "%d\n", q->head_idx);
return 0;
}
@@ -96,7 +96,7 @@ static int cq_tail_show(struct seq_file *seq, void *v)
{
struct ionic_cq *cq = seq->private;
- seq_printf(seq, "%d\n", cq->tail->index);
+ seq_printf(seq, "%d\n", cq->tail_idx);
return 0;
}
@@ -112,7 +112,8 @@ static const struct debugfs_reg32 intr_ctrl_regs[] = {
void ionic_debugfs_add_qcq(struct ionic_lif *lif, struct ionic_qcq *qcq)
{
- struct dentry *q_dentry, *cq_dentry, *intr_dentry, *stats_dentry;
+ struct dentry *qcq_dentry, *q_dentry, *cq_dentry;
+ struct dentry *intr_dentry, *stats_dentry;
struct ionic_dev *idev = &lif->ionic->idev;
struct debugfs_regset32 *intr_ctrl_regset;
struct ionic_intr_info *intr = &qcq->intr;
@@ -121,21 +122,21 @@ void ionic_debugfs_add_qcq(struct ionic_lif *lif, struct ionic_qcq *qcq)
struct ionic_queue *q = &qcq->q;
struct ionic_cq *cq = &qcq->cq;
- qcq->dentry = debugfs_create_dir(q->name, lif->dentry);
+ qcq_dentry = debugfs_create_dir(q->name, lif->dentry);
+ if (IS_ERR_OR_NULL(qcq_dentry))
+ return;
+ qcq->dentry = qcq_dentry;
- debugfs_create_x32("total_size", 0400, qcq->dentry, &qcq->total_size);
- debugfs_create_x64("base_pa", 0400, qcq->dentry, &qcq->base_pa);
+ debugfs_create_x64("q_base_pa", 0400, qcq_dentry, &qcq->q_base_pa);
+ debugfs_create_x32("q_size", 0400, qcq_dentry, &qcq->q_size);
+ debugfs_create_x64("cq_base_pa", 0400, qcq_dentry, &qcq->cq_base_pa);
+ debugfs_create_x32("cq_size", 0400, qcq_dentry, &qcq->cq_size);
+ debugfs_create_x64("sg_base_pa", 0400, qcq_dentry, &qcq->sg_base_pa);
+ debugfs_create_x32("sg_size", 0400, qcq_dentry, &qcq->sg_size);
q_dentry = debugfs_create_dir("q", qcq->dentry);
debugfs_create_u32("index", 0400, q_dentry, &q->index);
- debugfs_create_x64("base_pa", 0400, q_dentry, &q->base_pa);
- if (qcq->flags & IONIC_QCQ_F_SG) {
- debugfs_create_x64("sg_base_pa", 0400, q_dentry,
- &q->sg_base_pa);
- debugfs_create_u32("sg_desc_size", 0400, q_dentry,
- &q->sg_desc_size);
- }
debugfs_create_u32("num_descs", 0400, q_dentry, &q->num_descs);
debugfs_create_u32("desc_size", 0400, q_dentry, &q->desc_size);
debugfs_create_u32("pid", 0400, q_dentry, &q->pid);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index d83eff0ae0ac..6068f51a11d9 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -21,8 +21,8 @@ static void ionic_watchdog_cb(struct timer_list *t)
hb = ionic_heartbeat_check(ionic);
- if (hb >= 0 && ionic->master_lif)
- ionic_link_status_check_request(ionic->master_lif);
+ if (hb >= 0 && ionic->lif)
+ ionic_link_status_check_request(ionic->lif);
}
void ionic_init_devinfo(struct ionic *ionic)
@@ -126,7 +126,7 @@ int ionic_heartbeat_check(struct ionic *ionic)
/* is this a transition? */
if (fw_status != idev->last_fw_status &&
idev->last_fw_status != 0xff) {
- struct ionic_lif *lif = ionic->master_lif;
+ struct ionic_lif *lif = ionic->lif;
bool trigger = false;
if (!fw_status || fw_status == 0xff) {
@@ -467,9 +467,7 @@ int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq,
struct ionic_intr_info *intr,
unsigned int num_descs, size_t desc_size)
{
- struct ionic_cq_info *cur;
unsigned int ring_size;
- unsigned int i;
if (desc_size == 0 || !is_power_of_2(num_descs))
return -EINVAL;
@@ -482,22 +480,9 @@ int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq,
cq->bound_intr = intr;
cq->num_descs = num_descs;
cq->desc_size = desc_size;
- cq->tail = cq->info;
+ cq->tail_idx = 0;
cq->done_color = 1;
- cur = cq->info;
-
- for (i = 0; i < num_descs; i++) {
- if (i + 1 == num_descs) {
- cur->next = cq->info;
- cur->last = true;
- } else {
- cur->next = cur + 1;
- }
- cur->index = i;
- cur++;
- }
-
return 0;
}
@@ -522,15 +507,18 @@ unsigned int ionic_cq_service(struct ionic_cq *cq, unsigned int work_to_do,
ionic_cq_cb cb, ionic_cq_done_cb done_cb,
void *done_arg)
{
+ struct ionic_cq_info *cq_info;
unsigned int work_done = 0;
if (work_to_do == 0)
return 0;
- while (cb(cq, cq->tail)) {
- if (cq->tail->last)
+ cq_info = &cq->info[cq->tail_idx];
+ while (cb(cq, cq_info)) {
+ if (cq->tail_idx == cq->num_descs - 1)
cq->done_color = !cq->done_color;
- cq->tail = cq->tail->next;
+ cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
+ cq_info = &cq->info[cq->tail_idx];
DEBUG_STATS_CQE_CNT(cq);
if (++work_done >= work_to_do)
@@ -548,9 +536,7 @@ int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev,
unsigned int num_descs, size_t desc_size,
size_t sg_desc_size, unsigned int pid)
{
- struct ionic_desc_info *cur;
unsigned int ring_size;
- unsigned int i;
if (desc_size == 0 || !is_power_of_2(num_descs))
return -EINVAL;
@@ -565,24 +551,12 @@ int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev,
q->num_descs = num_descs;
q->desc_size = desc_size;
q->sg_desc_size = sg_desc_size;
- q->tail = q->info;
- q->head = q->tail;
+ q->tail_idx = 0;
+ q->head_idx = 0;
q->pid = pid;
snprintf(q->name, sizeof(q->name), "L%d-%s%u", lif->index, name, index);
- cur = q->info;
-
- for (i = 0; i < num_descs; i++) {
- if (i + 1 == num_descs)
- cur->next = q->info;
- else
- cur->next = cur + 1;
- cur->index = i;
- cur->left = num_descs - i;
- cur++;
- }
-
return 0;
}
@@ -614,19 +588,22 @@ void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb,
void *cb_arg)
{
struct device *dev = q->lif->ionic->dev;
+ struct ionic_desc_info *desc_info;
struct ionic_lif *lif = q->lif;
- q->head->cb = cb;
- q->head->cb_arg = cb_arg;
- q->head = q->head->next;
+ desc_info = &q->info[q->head_idx];
+ desc_info->cb = cb;
+ desc_info->cb_arg = cb_arg;
+
+ q->head_idx = (q->head_idx + 1) & (q->num_descs - 1);
dev_dbg(dev, "lif=%d qname=%s qid=%d qtype=%d p_index=%d ringdb=%d\n",
q->lif->index, q->name, q->hw_type, q->hw_index,
- q->head->index, ring_doorbell);
+ q->head_idx, ring_doorbell);
if (ring_doorbell)
ionic_dbell_ring(lif->kern_dbpage, q->hw_type,
- q->dbval | q->head->index);
+ q->dbval | q->head_idx);
}
static bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos)
@@ -634,8 +611,8 @@ static bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos)
unsigned int mask, tail, head;
mask = q->num_descs - 1;
- tail = q->tail->index;
- head = q->head->index;
+ tail = q->tail_idx;
+ head = q->head_idx;
return ((pos - tail) & mask) < ((head - tail) & mask);
}
@@ -646,20 +623,22 @@ void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info,
struct ionic_desc_info *desc_info;
ionic_desc_cb cb;
void *cb_arg;
+ u16 index;
/* check for empty queue */
- if (q->tail->index == q->head->index)
+ if (q->tail_idx == q->head_idx)
return;
/* stop index must be for a descriptor that is not yet completed */
if (unlikely(!ionic_q_is_posted(q, stop_index)))
dev_err(q->lif->ionic->dev,
"ionic stop is not posted %s stop %u tail %u head %u\n",
- q->name, stop_index, q->tail->index, q->head->index);
+ q->name, stop_index, q->tail_idx, q->head_idx);
do {
- desc_info = q->tail;
- q->tail = desc_info->next;
+ desc_info = &q->info[q->tail_idx];
+ index = q->tail_idx;
+ q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
cb = desc_info->cb;
cb_arg = desc_info->cb_arg;
@@ -669,5 +648,5 @@ void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info,
if (cb)
cb(q, desc_info, cq_info, cb_arg);
- } while (desc_info->index != stop_index);
+ } while (index != stop_index);
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index d5cba502abca..4a35174e3ff1 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -149,10 +149,13 @@ struct ionic_dev {
};
struct ionic_cq_info {
- void *cq_desc;
- struct ionic_cq_info *next;
- unsigned int index;
- bool last;
+ union {
+ void *cq_desc;
+ struct ionic_txq_comp *txcq;
+ struct ionic_rxq_comp *rxcq;
+ struct ionic_admin_comp *admincq;
+ struct ionic_notifyq_event *notifyq;
+ };
};
struct ionic_queue;
@@ -169,11 +172,17 @@ struct ionic_page_info {
};
struct ionic_desc_info {
- void *desc;
- void *sg_desc;
- struct ionic_desc_info *next;
- unsigned int index;
- unsigned int left;
+ union {
+ void *desc;
+ struct ionic_txq_desc *txq_desc;
+ struct ionic_rxq_desc *rxq_desc;
+ struct ionic_admin_cmd *adminq_desc;
+ };
+ union {
+ void *sg_desc;
+ struct ionic_txq_sg_desc *txq_sg_desc;
+ struct ionic_rxq_sg_desc *rxq_sgl_desc;
+ };
unsigned int npages;
struct ionic_page_info pages[IONIC_RX_MAX_SG_ELEMS + 1];
ionic_desc_cb cb;
@@ -183,25 +192,35 @@ struct ionic_desc_info {
#define IONIC_QUEUE_NAME_MAX_SZ 32
struct ionic_queue {
+ struct device *dev;
+ struct ionic_lif *lif;
+ struct ionic_desc_info *info;
+ u16 head_idx;
+ u16 tail_idx;
+ unsigned int index;
+ unsigned int num_descs;
u64 dbell_count;
- u64 drop;
u64 stop;
u64 wake;
- struct ionic_lif *lif;
- struct ionic_desc_info *info;
- struct ionic_desc_info *tail;
- struct ionic_desc_info *head;
+ u64 drop;
struct ionic_dev *idev;
- unsigned int index;
unsigned int type;
unsigned int hw_index;
unsigned int hw_type;
u64 dbval;
- void *base;
- void *sg_base;
+ union {
+ void *base;
+ struct ionic_txq_desc *txq;
+ struct ionic_rxq_desc *rxq;
+ struct ionic_admin_cmd *adminq;
+ };
+ union {
+ void *sg_base;
+ struct ionic_txq_sg_desc *txq_sgl;
+ struct ionic_rxq_sg_desc *rxq_sgl;
+ };
dma_addr_t base_pa;
dma_addr_t sg_base_pa;
- unsigned int num_descs;
unsigned int desc_size;
unsigned int sg_desc_size;
unsigned int pid;
@@ -221,17 +240,17 @@ struct ionic_intr_info {
};
struct ionic_cq {
- void *base;
- dma_addr_t base_pa;
struct ionic_lif *lif;
struct ionic_cq_info *info;
- struct ionic_cq_info *tail;
struct ionic_queue *bound_q;
struct ionic_intr_info *bound_intr;
+ u16 tail_idx;
bool done_color;
unsigned int num_descs;
- u64 compl_count;
unsigned int desc_size;
+ u64 compl_count;
+ void *base;
+ dma_addr_t base_pa;
};
struct ionic;
@@ -246,12 +265,12 @@ static inline void ionic_intr_init(struct ionic_dev *idev,
static inline unsigned int ionic_q_space_avail(struct ionic_queue *q)
{
- unsigned int avail = q->tail->index;
+ unsigned int avail = q->tail_idx;
- if (q->head->index >= avail)
- avail += q->head->left - 1;
+ if (q->head_idx >= avail)
+ avail += q->num_descs - q->head_idx - 1;
else
- avail -= q->head->index + 1;
+ avail -= q->head_idx + 1;
return avail;
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
index c4f4fd469fe3..8d9fb2e19cca 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
@@ -85,7 +85,7 @@ int ionic_devlink_register(struct ionic *ionic)
dev_err(ionic->dev, "devlink_port_register failed: %d\n", err);
else
devlink_port_type_eth_set(&ionic->dl_port,
- ionic->master_lif->netdev);
+ ionic->lif->netdev);
return err;
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index 3c57c331729f..0d14659fbdfd 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -298,8 +298,8 @@ static void ionic_get_pauseparam(struct net_device *netdev,
pause_type = lif->ionic->idev.port_info->config.pause_type;
if (pause_type) {
- pause->rx_pause = pause_type & IONIC_PAUSE_F_RX ? 1 : 0;
- pause->tx_pause = pause_type & IONIC_PAUSE_F_TX ? 1 : 0;
+ pause->rx_pause = (pause_type & IONIC_PAUSE_F_RX) ? 1 : 0;
+ pause->tx_pause = (pause_type & IONIC_PAUSE_F_TX) ? 1 : 0;
}
}
@@ -454,7 +454,7 @@ static int ionic_set_coalesce(struct net_device *netdev,
if (test_bit(IONIC_LIF_F_UP, lif->state)) {
for (i = 0; i < lif->nxqs; i++) {
- qcq = lif->rxqcqs[i].qcq;
+ qcq = lif->rxqcqs[i];
ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
qcq->intr.index,
lif->rx_coalesce_hw);
@@ -471,7 +471,7 @@ static int ionic_set_coalesce(struct net_device *netdev,
if (test_bit(IONIC_LIF_F_UP, lif->state)) {
for (i = 0; i < lif->nxqs; i++) {
- qcq = lif->txqcqs[i].qcq;
+ qcq = lif->txqcqs[i];
ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
qcq->intr.index,
lif->tx_coalesce_hw);
@@ -493,18 +493,14 @@ static void ionic_get_ringparam(struct net_device *netdev,
ring->rx_pending = lif->nrxq_descs;
}
-static void ionic_set_ringsize(struct ionic_lif *lif, void *arg)
-{
- struct ethtool_ringparam *ring = arg;
-
- lif->ntxq_descs = ring->tx_pending;
- lif->nrxq_descs = ring->rx_pending;
-}
-
static int ionic_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic_queue_params qparam;
+ int err;
+
+ ionic_init_queue_params(lif, &qparam);
if (ring->rx_mini_pending || ring->rx_jumbo_pending) {
netdev_info(netdev, "Changing jumbo or mini descriptors not supported\n");
@@ -522,7 +518,28 @@ static int ionic_set_ringparam(struct net_device *netdev,
ring->rx_pending == lif->nrxq_descs)
return 0;
- return ionic_reset_queues(lif, ionic_set_ringsize, ring);
+ if (ring->tx_pending != lif->ntxq_descs)
+ netdev_info(netdev, "Changing Tx ring size from %d to %d\n",
+ lif->ntxq_descs, ring->tx_pending);
+
+ if (ring->rx_pending != lif->nrxq_descs)
+ netdev_info(netdev, "Changing Rx ring size from %d to %d\n",
+ lif->nrxq_descs, ring->rx_pending);
+
+ /* if we're not running, just set the values and return */
+ if (!netif_running(lif->netdev)) {
+ lif->ntxq_descs = ring->tx_pending;
+ lif->nrxq_descs = ring->rx_pending;
+ return 0;
+ }
+
+ qparam.ntxq_descs = ring->tx_pending;
+ qparam.nrxq_descs = ring->rx_pending;
+ err = ionic_reconfigure_queues(lif, &qparam);
+ if (err)
+ netdev_info(netdev, "Ring reconfiguration failed, changes canceled: %d\n", err);
+
+ return err;
}
static void ionic_get_channels(struct net_device *netdev,
@@ -544,32 +561,15 @@ static void ionic_get_channels(struct net_device *netdev,
}
}
-static void ionic_set_queuecount(struct ionic_lif *lif, void *arg)
-{
- struct ethtool_channels *ch = arg;
-
- if (ch->combined_count) {
- lif->nxqs = ch->combined_count;
- if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) {
- clear_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
- lif->tx_coalesce_usecs = lif->rx_coalesce_usecs;
- lif->tx_coalesce_hw = lif->rx_coalesce_hw;
- netdev_info(lif->netdev, "Sharing queue interrupts\n");
- }
- } else {
- lif->nxqs = ch->rx_count;
- if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) {
- set_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
- netdev_info(lif->netdev, "Splitting queue interrupts\n");
- }
- }
-}
-
static int ionic_set_channels(struct net_device *netdev,
struct ethtool_channels *ch)
{
struct ionic_lif *lif = netdev_priv(netdev);
- int new_cnt;
+ struct ionic_queue_params qparam;
+ int max_cnt;
+ int err;
+
+ ionic_init_queue_params(lif, &qparam);
if (ch->rx_count != ch->tx_count) {
netdev_info(netdev, "The rx and tx count must be equal\n");
@@ -577,20 +577,63 @@ static int ionic_set_channels(struct net_device *netdev,
}
if (ch->combined_count && ch->rx_count) {
- netdev_info(netdev, "Use either combined_count or rx/tx_count, not both\n");
+ netdev_info(netdev, "Use either combined or rx and tx, not both\n");
return -EINVAL;
}
- if (ch->combined_count)
- new_cnt = ch->combined_count;
- else
- new_cnt = ch->rx_count;
+ max_cnt = lif->ionic->ntxqs_per_lif;
+ if (ch->combined_count) {
+ if (ch->combined_count > max_cnt)
+ return -EINVAL;
+
+ if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
+ netdev_info(lif->netdev, "Sharing queue interrupts\n");
+ else if (ch->combined_count == lif->nxqs)
+ return 0;
+
+ if (lif->nxqs != ch->combined_count)
+ netdev_info(netdev, "Changing queue count from %d to %d\n",
+ lif->nxqs, ch->combined_count);
- if (lif->nxqs != new_cnt)
- netdev_info(netdev, "Changing queue count from %d to %d\n",
- lif->nxqs, new_cnt);
+ qparam.nxqs = ch->combined_count;
+ qparam.intr_split = 0;
+ } else {
+ max_cnt /= 2;
+ if (ch->rx_count > max_cnt)
+ return -EINVAL;
- return ionic_reset_queues(lif, ionic_set_queuecount, ch);
+ if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
+ netdev_info(lif->netdev, "Splitting queue interrupts\n");
+ else if (ch->rx_count == lif->nxqs)
+ return 0;
+
+ if (lif->nxqs != ch->rx_count)
+ netdev_info(netdev, "Changing queue count from %d to %d\n",
+ lif->nxqs, ch->rx_count);
+
+ qparam.nxqs = ch->rx_count;
+ qparam.intr_split = 1;
+ }
+
+ /* if we're not running, just set the values and return */
+ if (!netif_running(lif->netdev)) {
+ lif->nxqs = qparam.nxqs;
+
+ if (qparam.intr_split) {
+ set_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
+ } else {
+ clear_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
+ lif->tx_coalesce_usecs = lif->rx_coalesce_usecs;
+ lif->tx_coalesce_hw = lif->rx_coalesce_hw;
+ }
+ return 0;
+ }
+
+ err = ionic_reconfigure_queues(lif, &qparam);
+ if (err)
+ netdev_info(netdev, "Queue reconfiguration failed, changes canceled: %d\n", err);
+
+ return err;
}
static u32 ionic_get_priv_flags(struct net_device *netdev)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 26988ad7ec97..ee683cb142a8 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -36,6 +36,8 @@ static void ionic_lif_handle_fw_down(struct ionic_lif *lif);
static void ionic_lif_handle_fw_up(struct ionic_lif *lif);
static void ionic_lif_set_netdev_info(struct ionic_lif *lif);
+static void ionic_txrx_deinit(struct ionic_lif *lif);
+static int ionic_txrx_init(struct ionic_lif *lif);
static int ionic_start_queues(struct ionic_lif *lif);
static void ionic_stop_queues(struct ionic_lif *lif);
static void ionic_lif_queue_identify(struct ionic_lif *lif);
@@ -297,6 +299,18 @@ static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq)
qcq->flags &= ~IONIC_QCQ_F_INITED;
}
+static void ionic_qcq_intr_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
+{
+ if (!(qcq->flags & IONIC_QCQ_F_INTR) || qcq->intr.vector == 0)
+ return;
+
+ irq_set_affinity_hint(qcq->intr.vector, NULL);
+ devm_free_irq(lif->ionic->dev, qcq->intr.vector, &qcq->napi);
+ qcq->intr.vector = 0;
+ ionic_intr_free(lif->ionic, qcq->intr.index);
+ qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
+}
+
static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
{
struct device *dev = lif->ionic->dev;
@@ -306,51 +320,62 @@ static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
ionic_debugfs_del_qcq(qcq);
- dma_free_coherent(dev, qcq->total_size, qcq->base, qcq->base_pa);
- qcq->base = NULL;
- qcq->base_pa = 0;
+ if (qcq->q_base) {
+ dma_free_coherent(dev, qcq->q_size, qcq->q_base, qcq->q_base_pa);
+ qcq->q_base = NULL;
+ qcq->q_base_pa = 0;
+ }
- if (qcq->flags & IONIC_QCQ_F_INTR) {
- irq_set_affinity_hint(qcq->intr.vector, NULL);
- devm_free_irq(dev, qcq->intr.vector, &qcq->napi);
- qcq->intr.vector = 0;
- ionic_intr_free(lif->ionic, qcq->intr.index);
+ if (qcq->cq_base) {
+ dma_free_coherent(dev, qcq->cq_size, qcq->cq_base, qcq->cq_base_pa);
+ qcq->cq_base = NULL;
+ qcq->cq_base_pa = 0;
}
- devm_kfree(dev, qcq->cq.info);
- qcq->cq.info = NULL;
- devm_kfree(dev, qcq->q.info);
- qcq->q.info = NULL;
- devm_kfree(dev, qcq);
+ if (qcq->sg_base) {
+ dma_free_coherent(dev, qcq->sg_size, qcq->sg_base, qcq->sg_base_pa);
+ qcq->sg_base = NULL;
+ qcq->sg_base_pa = 0;
+ }
+
+ ionic_qcq_intr_free(lif, qcq);
+
+ if (qcq->cq.info) {
+ devm_kfree(dev, qcq->cq.info);
+ qcq->cq.info = NULL;
+ }
+ if (qcq->q.info) {
+ devm_kfree(dev, qcq->q.info);
+ qcq->q.info = NULL;
+ }
}
static void ionic_qcqs_free(struct ionic_lif *lif)
{
struct device *dev = lif->ionic->dev;
- unsigned int i;
if (lif->notifyqcq) {
ionic_qcq_free(lif, lif->notifyqcq);
+ devm_kfree(dev, lif->notifyqcq);
lif->notifyqcq = NULL;
}
if (lif->adminqcq) {
ionic_qcq_free(lif, lif->adminqcq);
+ devm_kfree(dev, lif->adminqcq);
lif->adminqcq = NULL;
}
if (lif->rxqcqs) {
- for (i = 0; i < lif->nxqs; i++)
- if (lif->rxqcqs[i].stats)
- devm_kfree(dev, lif->rxqcqs[i].stats);
+ devm_kfree(dev, lif->rxqstats);
+ lif->rxqstats = NULL;
devm_kfree(dev, lif->rxqcqs);
lif->rxqcqs = NULL;
}
if (lif->txqcqs) {
- for (i = 0; i < lif->nxqs; i++)
- if (lif->txqcqs[i].stats)
- devm_kfree(dev, lif->txqcqs[i].stats);
+ devm_kfree(dev, lif->txqstats);
+ lif->txqstats = NULL;
devm_kfree(dev, lif->txqcqs);
lif->txqcqs = NULL;
}
@@ -368,6 +393,53 @@ static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
n_qcq->intr.index = src_qcq->intr.index;
}
+static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qcq)
+{
+ int err;
+
+ if (!(qcq->flags & IONIC_QCQ_F_INTR)) {
+ qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
+ return 0;
+ }
+
+ err = ionic_intr_alloc(lif, &qcq->intr);
+ if (err) {
+ netdev_warn(lif->netdev, "no intr for %s: %d\n",
+ qcq->q.name, err);
+ goto err_out;
+ }
+
+ err = ionic_bus_get_irq(lif->ionic, qcq->intr.index);
+ if (err < 0) {
+ netdev_warn(lif->netdev, "no vector for %s: %d\n",
+ qcq->q.name, err);
+ goto err_out_free_intr;
+ }
+ qcq->intr.vector = err;
+ ionic_intr_mask_assert(lif->ionic->idev.intr_ctrl, qcq->intr.index,
+ IONIC_INTR_MASK_SET);
+
+ err = ionic_request_irq(lif, qcq);
+ if (err) {
+ netdev_warn(lif->netdev, "irq request failed %d\n", err);
+ goto err_out_free_intr;
+ }
+
+ /* try to get the irq on the local numa node first */
+ qcq->intr.cpu = cpumask_local_spread(qcq->intr.index,
+ dev_to_node(lif->ionic->dev));
+ if (qcq->intr.cpu != -1)
+ cpumask_set_cpu(qcq->intr.cpu, &qcq->intr.affinity_mask);
+
+ netdev_dbg(lif->netdev, "%s: Interrupt index %d\n", qcq->q.name, qcq->intr.index);
+ return 0;
+
+err_out_free_intr:
+ ionic_intr_free(lif->ionic, qcq->intr.index);
+err_out:
+ return err;
+}
+
static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
unsigned int index,
const char *name, unsigned int flags,
@@ -377,7 +449,6 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
unsigned int pid, struct ionic_qcq **qcq)
{
struct ionic_dev *idev = &lif->ionic->idev;
- u32 q_size, cq_size, sg_size, total_size;
struct device *dev = lif->ionic->dev;
void *q_base, *cq_base, *sg_base;
dma_addr_t cq_base_pa = 0;
@@ -388,21 +459,6 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
*qcq = NULL;
- q_size = num_descs * desc_size;
- cq_size = num_descs * cq_desc_size;
- sg_size = num_descs * sg_desc_size;
-
- total_size = ALIGN(q_size, PAGE_SIZE) + ALIGN(cq_size, PAGE_SIZE);
- /* Note: aligning q_size/cq_size is not enough due to cq_base
- * address aligning as q_base could be not aligned to the page.
- * Adding PAGE_SIZE.
- */
- total_size += PAGE_SIZE;
- if (flags & IONIC_QCQ_F_SG) {
- total_size += ALIGN(sg_size, PAGE_SIZE);
- total_size += PAGE_SIZE;
- }
-
new = devm_kzalloc(dev, sizeof(*new), GFP_KERNEL);
if (!new) {
netdev_err(lif->netdev, "Cannot allocate queue structure\n");
@@ -417,7 +473,7 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
if (!new->q.info) {
netdev_err(lif->netdev, "Cannot allocate queue info\n");
err = -ENOMEM;
- goto err_out;
+ goto err_out_free_qcq;
}
new->q.type = type;
@@ -426,41 +482,12 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
desc_size, sg_desc_size, pid);
if (err) {
netdev_err(lif->netdev, "Cannot initialize queue\n");
- goto err_out;
+ goto err_out_free_q_info;
}
- if (flags & IONIC_QCQ_F_INTR) {
- err = ionic_intr_alloc(lif, &new->intr);
- if (err) {
- netdev_warn(lif->netdev, "no intr for %s: %d\n",
- name, err);
- goto err_out;
- }
-
- err = ionic_bus_get_irq(lif->ionic, new->intr.index);
- if (err < 0) {
- netdev_warn(lif->netdev, "no vector for %s: %d\n",
- name, err);
- goto err_out_free_intr;
- }
- new->intr.vector = err;
- ionic_intr_mask_assert(idev->intr_ctrl, new->intr.index,
- IONIC_INTR_MASK_SET);
-
- err = ionic_request_irq(lif, new);
- if (err) {
- netdev_warn(lif->netdev, "irq request failed %d\n", err);
- goto err_out_free_intr;
- }
-
- new->intr.cpu = cpumask_local_spread(new->intr.index,
- dev_to_node(dev));
- if (new->intr.cpu != -1)
- cpumask_set_cpu(new->intr.cpu,
- &new->intr.affinity_mask);
- } else {
- new->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
- }
+ err = ionic_alloc_qcq_interrupt(lif, new);
+ if (err)
+ goto err_out;
new->cq.info = devm_kcalloc(dev, num_descs, sizeof(*new->cq.info),
GFP_KERNEL);
@@ -473,46 +500,67 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size);
if (err) {
netdev_err(lif->netdev, "Cannot initialize completion queue\n");
- goto err_out_free_irq;
+ goto err_out_free_cq_info;
}
- new->base = dma_alloc_coherent(dev, total_size, &new->base_pa,
- GFP_KERNEL);
- if (!new->base) {
+ new->q_size = PAGE_SIZE + (num_descs * desc_size);
+ new->q_base = dma_alloc_coherent(dev, new->q_size, &new->q_base_pa,
+ GFP_KERNEL);
+ if (!new->q_base) {
netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n");
err = -ENOMEM;
- goto err_out_free_irq;
+ goto err_out_free_cq_info;
}
+ q_base = PTR_ALIGN(new->q_base, PAGE_SIZE);
+ q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
+ ionic_q_map(&new->q, q_base, q_base_pa);
- new->total_size = total_size;
-
- q_base = new->base;
- q_base_pa = new->base_pa;
-
- cq_base = (void *)ALIGN((uintptr_t)q_base + q_size, PAGE_SIZE);
- cq_base_pa = ALIGN(q_base_pa + q_size, PAGE_SIZE);
+ new->cq_size = PAGE_SIZE + (num_descs * cq_desc_size);
+ new->cq_base = dma_alloc_coherent(dev, new->cq_size, &new->cq_base_pa,
+ GFP_KERNEL);
+ if (!new->cq_base) {
+ netdev_err(lif->netdev, "Cannot allocate cq DMA memory\n");
+ err = -ENOMEM;
+ goto err_out_free_q;
+ }
+ cq_base = PTR_ALIGN(new->cq_base, PAGE_SIZE);
+ cq_base_pa = ALIGN(new->cq_base_pa, PAGE_SIZE);
+ ionic_cq_map(&new->cq, cq_base, cq_base_pa);
+ ionic_cq_bind(&new->cq, &new->q);
if (flags & IONIC_QCQ_F_SG) {
- sg_base = (void *)ALIGN((uintptr_t)cq_base + cq_size,
- PAGE_SIZE);
- sg_base_pa = ALIGN(cq_base_pa + cq_size, PAGE_SIZE);
+ new->sg_size = PAGE_SIZE + (num_descs * sg_desc_size);
+ new->sg_base = dma_alloc_coherent(dev, new->sg_size, &new->sg_base_pa,
+ GFP_KERNEL);
+ if (!new->sg_base) {
+ netdev_err(lif->netdev, "Cannot allocate sg DMA memory\n");
+ err = -ENOMEM;
+ goto err_out_free_cq;
+ }
+ sg_base = PTR_ALIGN(new->sg_base, PAGE_SIZE);
+ sg_base_pa = ALIGN(new->sg_base_pa, PAGE_SIZE);
ionic_q_sg_map(&new->q, sg_base, sg_base_pa);
}
- ionic_q_map(&new->q, q_base, q_base_pa);
- ionic_cq_map(&new->cq, cq_base, cq_base_pa);
- ionic_cq_bind(&new->cq, &new->q);
-
*qcq = new;
return 0;
+err_out_free_cq:
+ dma_free_coherent(dev, new->cq_size, new->cq_base, new->cq_base_pa);
+err_out_free_q:
+ dma_free_coherent(dev, new->q_size, new->q_base, new->q_base_pa);
+err_out_free_cq_info:
+ devm_kfree(dev, new->cq.info);
err_out_free_irq:
- if (flags & IONIC_QCQ_F_INTR)
+ if (flags & IONIC_QCQ_F_INTR) {
devm_free_irq(dev, new->intr.vector, &new->napi);
-err_out_free_intr:
- if (flags & IONIC_QCQ_F_INTR)
ionic_intr_free(lif->ionic, new->intr.index);
+ }
+err_out_free_q_info:
+ devm_kfree(dev, new->q.info);
+err_out_free_qcq:
+ devm_kfree(dev, new);
err_out:
dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err);
return err;
@@ -521,10 +569,8 @@ err_out:
static int ionic_qcqs_alloc(struct ionic_lif *lif)
{
struct device *dev = lif->ionic->dev;
- unsigned int q_list_size;
unsigned int flags;
int err;
- int i;
flags = IONIC_QCQ_F_INTR;
err = ionic_qcq_alloc(lif, IONIC_QTYPE_ADMINQ, 0, "admin", flags,
@@ -544,63 +590,50 @@ static int ionic_qcqs_alloc(struct ionic_lif *lif)
sizeof(union ionic_notifyq_comp),
0, lif->kern_pid, &lif->notifyqcq);
if (err)
- goto err_out_free_adminqcq;
+ goto err_out;
ionic_debugfs_add_qcq(lif, lif->notifyqcq);
/* Let the notifyq ride on the adminq interrupt */
ionic_link_qcq_interrupts(lif->adminqcq, lif->notifyqcq);
}
- q_list_size = sizeof(*lif->txqcqs) * lif->nxqs;
err = -ENOMEM;
- lif->txqcqs = devm_kzalloc(dev, q_list_size, GFP_KERNEL);
+ lif->txqcqs = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif,
+ sizeof(struct ionic_qcq *), GFP_KERNEL);
if (!lif->txqcqs)
- goto err_out_free_notifyqcq;
- for (i = 0; i < lif->nxqs; i++) {
- lif->txqcqs[i].stats = devm_kzalloc(dev,
- sizeof(struct ionic_q_stats),
- GFP_KERNEL);
- if (!lif->txqcqs[i].stats)
- goto err_out_free_tx_stats;
- }
-
- lif->rxqcqs = devm_kzalloc(dev, q_list_size, GFP_KERNEL);
+ goto err_out;
+ lif->rxqcqs = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif,
+ sizeof(struct ionic_qcq *), GFP_KERNEL);
if (!lif->rxqcqs)
- goto err_out_free_tx_stats;
- for (i = 0; i < lif->nxqs; i++) {
- lif->rxqcqs[i].stats = devm_kzalloc(dev,
- sizeof(struct ionic_q_stats),
- GFP_KERNEL);
- if (!lif->rxqcqs[i].stats)
- goto err_out_free_rx_stats;
- }
+ goto err_out;
- return 0;
+ lif->txqstats = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif,
+ sizeof(struct ionic_tx_stats), GFP_KERNEL);
+ if (!lif->txqstats)
+ goto err_out;
+ lif->rxqstats = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif,
+ sizeof(struct ionic_rx_stats), GFP_KERNEL);
+ if (!lif->rxqstats)
+ goto err_out;
-err_out_free_rx_stats:
- for (i = 0; i < lif->nxqs; i++)
- if (lif->rxqcqs[i].stats)
- devm_kfree(dev, lif->rxqcqs[i].stats);
- devm_kfree(dev, lif->rxqcqs);
- lif->rxqcqs = NULL;
-err_out_free_tx_stats:
- for (i = 0; i < lif->nxqs; i++)
- if (lif->txqcqs[i].stats)
- devm_kfree(dev, lif->txqcqs[i].stats);
- devm_kfree(dev, lif->txqcqs);
- lif->txqcqs = NULL;
-err_out_free_notifyqcq:
- if (lif->notifyqcq) {
- ionic_qcq_free(lif, lif->notifyqcq);
- lif->notifyqcq = NULL;
- }
-err_out_free_adminqcq:
- ionic_qcq_free(lif, lif->adminqcq);
- lif->adminqcq = NULL;
+ return 0;
+err_out:
+ ionic_qcqs_free(lif);
return err;
}
+static void ionic_qcq_sanitize(struct ionic_qcq *qcq)
+{
+ qcq->q.tail_idx = 0;
+ qcq->q.head_idx = 0;
+ qcq->cq.tail_idx = 0;
+ qcq->cq.done_color = 1;
+ memset(qcq->q_base, 0, qcq->q_size);
+ memset(qcq->cq_base, 0, qcq->cq_size);
+ memset(qcq->sg_base, 0, qcq->sg_size);
+}
+
static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
{
struct device *dev = lif->ionic->dev;
@@ -626,10 +659,10 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
unsigned int intr_index;
int err;
- if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
+ if (qcq->flags & IONIC_QCQ_F_INTR)
intr_index = qcq->intr.index;
else
- intr_index = lif->rxqcqs[q->index].qcq->intr.index;
+ intr_index = lif->rxqcqs[q->index]->intr.index;
ctx.cmd.q_init.intr_index = cpu_to_le16(intr_index);
dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid);
@@ -640,9 +673,7 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
dev_dbg(dev, "txq_init.ver %d\n", ctx.cmd.q_init.ver);
dev_dbg(dev, "txq_init.intr_index %d\n", ctx.cmd.q_init.intr_index);
- q->tail = q->info;
- q->head = q->tail;
- cq->tail = cq->info;
+ ionic_qcq_sanitize(qcq);
err = ionic_adminq_post_wait(lif, &ctx);
if (err)
@@ -697,9 +728,7 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
dev_dbg(dev, "rxq_init.ver %d\n", ctx.cmd.q_init.ver);
dev_dbg(dev, "rxq_init.intr_index %d\n", ctx.cmd.q_init.intr_index);
- q->tail = q->info;
- q->head = q->tail;
- cq->tail = cq->info;
+ ionic_qcq_sanitize(qcq);
err = ionic_adminq_post_wait(lif, &ctx);
if (err)
@@ -771,21 +800,6 @@ static bool ionic_notifyq_service(struct ionic_cq *cq,
return true;
}
-static int ionic_notifyq_clean(struct ionic_lif *lif, int budget)
-{
- struct ionic_dev *idev = &lif->ionic->idev;
- struct ionic_cq *cq = &lif->notifyqcq->cq;
- u32 work_done;
-
- work_done = ionic_cq_service(cq, budget, ionic_notifyq_service,
- NULL, NULL);
- if (work_done)
- ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
- work_done, IONIC_INTR_CRED_RESET_COALESCE);
-
- return work_done;
-}
-
static bool ionic_adminq_service(struct ionic_cq *cq,
struct ionic_cq_info *cq_info)
{
@@ -801,15 +815,36 @@ static bool ionic_adminq_service(struct ionic_cq *cq,
static int ionic_adminq_napi(struct napi_struct *napi, int budget)
{
+ struct ionic_intr_info *intr = napi_to_cq(napi)->bound_intr;
struct ionic_lif *lif = napi_to_cq(napi)->lif;
+ struct ionic_dev *idev = &lif->ionic->idev;
+ unsigned int flags = 0;
int n_work = 0;
int a_work = 0;
+ int work_done;
+
+ if (lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED)
+ n_work = ionic_cq_service(&lif->notifyqcq->cq, budget,
+ ionic_notifyq_service, NULL, NULL);
+
+ if (lif->adminqcq && lif->adminqcq->flags & IONIC_QCQ_F_INITED)
+ a_work = ionic_cq_service(&lif->adminqcq->cq, budget,
+ ionic_adminq_service, NULL, NULL);
- if (likely(lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED))
- n_work = ionic_notifyq_clean(lif, budget);
- a_work = ionic_napi(napi, budget, ionic_adminq_service, NULL, NULL);
+ work_done = max(n_work, a_work);
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ flags |= IONIC_INTR_CRED_UNMASK;
+ DEBUG_STATS_INTR_REARM(intr);
+ }
+
+ if (work_done || flags) {
+ flags |= IONIC_INTR_CRED_RESET_COALESCE;
+ ionic_intr_credits(idev->intr_ctrl,
+ intr->index,
+ n_work + a_work, flags);
+ }
- return max(n_work, a_work);
+ return work_done;
}
void ionic_get_stats64(struct net_device *netdev,
@@ -1315,6 +1350,35 @@ static int ionic_set_mac_address(struct net_device *netdev, void *sa)
return ionic_addr_add(netdev, mac);
}
+static void ionic_stop_queues_reconfig(struct ionic_lif *lif)
+{
+ /* Stop and clean the queues before reconfiguration */
+ mutex_lock(&lif->queue_lock);
+ netif_device_detach(lif->netdev);
+ ionic_stop_queues(lif);
+ ionic_txrx_deinit(lif);
+}
+
+static int ionic_start_queues_reconfig(struct ionic_lif *lif)
+{
+ int err;
+
+ /* Re-init the queues after reconfiguration */
+
+ /* The only way txrx_init can fail here is if communication
+ * with FW is suddenly broken. There's not much we can do
+ * at this point - error messages have already been printed,
+ * so we can continue on and the user can eventually do a
+ * DOWN and UP to try to reset and clear the issue.
+ */
+ err = ionic_txrx_init(lif);
+ mutex_unlock(&lif->queue_lock);
+ ionic_link_status_check_request(lif);
+ netif_device_attach(lif->netdev);
+
+ return err;
+}
+
static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
{
struct ionic_lif *lif = netdev_priv(netdev);
@@ -1334,9 +1398,12 @@ static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
return err;
netdev->mtu = new_mtu;
- err = ionic_reset_queues(lif, NULL, NULL);
+ /* if we're not running, nothing more to do */
+ if (!netif_running(netdev))
+ return 0;
- return err;
+ ionic_stop_queues_reconfig(lif);
+ return ionic_start_queues_reconfig(lif);
}
static void ionic_tx_timeout_work(struct work_struct *ws)
@@ -1345,9 +1412,14 @@ static void ionic_tx_timeout_work(struct work_struct *ws)
netdev_info(lif->netdev, "Tx Timeout recovery\n");
- rtnl_lock();
- ionic_reset_queues(lif, NULL, NULL);
- rtnl_unlock();
+ /* if we were stopped before this scheduled job was launched,
+ * don't bother the queues as they are already stopped.
+ */
+ if (!netif_running(lif->netdev))
+ return;
+
+ ionic_stop_queues_reconfig(lif);
+ ionic_start_queues_reconfig(lif);
}
static void ionic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
@@ -1482,7 +1554,7 @@ static void ionic_txrx_disable(struct ionic_lif *lif)
if (lif->txqcqs) {
for (i = 0; i < lif->nxqs; i++) {
- err = ionic_qcq_disable(lif->txqcqs[i].qcq);
+ err = ionic_qcq_disable(lif->txqcqs[i]);
if (err == -ETIMEDOUT)
break;
}
@@ -1490,7 +1562,7 @@ static void ionic_txrx_disable(struct ionic_lif *lif)
if (lif->rxqcqs) {
for (i = 0; i < lif->nxqs; i++) {
- err = ionic_qcq_disable(lif->rxqcqs[i].qcq);
+ err = ionic_qcq_disable(lif->rxqcqs[i]);
if (err == -ETIMEDOUT)
break;
}
@@ -1502,18 +1574,18 @@ static void ionic_txrx_deinit(struct ionic_lif *lif)
unsigned int i;
if (lif->txqcqs) {
- for (i = 0; i < lif->nxqs; i++) {
- ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq);
- ionic_tx_flush(&lif->txqcqs[i].qcq->cq);
- ionic_tx_empty(&lif->txqcqs[i].qcq->q);
+ for (i = 0; i < lif->nxqs && lif->txqcqs[i]; i++) {
+ ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
+ ionic_tx_flush(&lif->txqcqs[i]->cq);
+ ionic_tx_empty(&lif->txqcqs[i]->q);
}
}
if (lif->rxqcqs) {
- for (i = 0; i < lif->nxqs; i++) {
- ionic_lif_qcq_deinit(lif, lif->rxqcqs[i].qcq);
- ionic_rx_flush(&lif->rxqcqs[i].qcq->cq);
- ionic_rx_empty(&lif->rxqcqs[i].qcq->q);
+ for (i = 0; i < lif->nxqs && lif->rxqcqs[i]; i++) {
+ ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]);
+ ionic_rx_flush(&lif->rxqcqs[i]->cq);
+ ionic_rx_empty(&lif->rxqcqs[i]->q);
}
}
lif->rx_mode = 0;
@@ -1524,16 +1596,18 @@ static void ionic_txrx_free(struct ionic_lif *lif)
unsigned int i;
if (lif->txqcqs) {
- for (i = 0; i < lif->nxqs; i++) {
- ionic_qcq_free(lif, lif->txqcqs[i].qcq);
- lif->txqcqs[i].qcq = NULL;
+ for (i = 0; i < lif->ionic->ntxqs_per_lif && lif->txqcqs[i]; i++) {
+ ionic_qcq_free(lif, lif->txqcqs[i]);
+ devm_kfree(lif->ionic->dev, lif->txqcqs[i]);
+ lif->txqcqs[i] = NULL;
}
}
if (lif->rxqcqs) {
- for (i = 0; i < lif->nxqs; i++) {
- ionic_qcq_free(lif, lif->rxqcqs[i].qcq);
- lif->rxqcqs[i].qcq = NULL;
+ for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) {
+ ionic_qcq_free(lif, lif->rxqcqs[i]);
+ devm_kfree(lif->ionic->dev, lif->rxqcqs[i]);
+ lif->rxqcqs[i] = NULL;
}
}
}
@@ -1561,17 +1635,16 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
sizeof(struct ionic_txq_desc),
sizeof(struct ionic_txq_comp),
sg_desc_sz,
- lif->kern_pid, &lif->txqcqs[i].qcq);
+ lif->kern_pid, &lif->txqcqs[i]);
if (err)
goto err_out;
if (flags & IONIC_QCQ_F_INTR)
ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
- lif->txqcqs[i].qcq->intr.index,
+ lif->txqcqs[i]->intr.index,
lif->tx_coalesce_hw);
- lif->txqcqs[i].qcq->stats = lif->txqcqs[i].stats;
- ionic_debugfs_add_qcq(lif, lif->txqcqs[i].qcq);
+ ionic_debugfs_add_qcq(lif, lif->txqcqs[i]);
}
flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG | IONIC_QCQ_F_INTR;
@@ -1581,20 +1654,19 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
sizeof(struct ionic_rxq_desc),
sizeof(struct ionic_rxq_comp),
sizeof(struct ionic_rxq_sg_desc),
- lif->kern_pid, &lif->rxqcqs[i].qcq);
+ lif->kern_pid, &lif->rxqcqs[i]);
if (err)
goto err_out;
ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
- lif->rxqcqs[i].qcq->intr.index,
+ lif->rxqcqs[i]->intr.index,
lif->rx_coalesce_hw);
if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
- ionic_link_qcq_interrupts(lif->rxqcqs[i].qcq,
- lif->txqcqs[i].qcq);
+ ionic_link_qcq_interrupts(lif->rxqcqs[i],
+ lif->txqcqs[i]);
- lif->rxqcqs[i].qcq->stats = lif->rxqcqs[i].stats;
- ionic_debugfs_add_qcq(lif, lif->rxqcqs[i].qcq);
+ ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]);
}
return 0;
@@ -1611,13 +1683,13 @@ static int ionic_txrx_init(struct ionic_lif *lif)
int err;
for (i = 0; i < lif->nxqs; i++) {
- err = ionic_lif_txq_init(lif, lif->txqcqs[i].qcq);
+ err = ionic_lif_txq_init(lif, lif->txqcqs[i]);
if (err)
goto err_out;
- err = ionic_lif_rxq_init(lif, lif->rxqcqs[i].qcq);
+ err = ionic_lif_rxq_init(lif, lif->rxqcqs[i]);
if (err) {
- ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq);
+ ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
goto err_out;
}
}
@@ -1631,8 +1703,8 @@ static int ionic_txrx_init(struct ionic_lif *lif)
err_out:
while (i--) {
- ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq);
- ionic_lif_qcq_deinit(lif, lif->rxqcqs[i].qcq);
+ ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
+ ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]);
}
return err;
@@ -1643,15 +1715,15 @@ static int ionic_txrx_enable(struct ionic_lif *lif)
int i, err;
for (i = 0; i < lif->nxqs; i++) {
- ionic_rx_fill(&lif->rxqcqs[i].qcq->q);
- err = ionic_qcq_enable(lif->rxqcqs[i].qcq);
+ ionic_rx_fill(&lif->rxqcqs[i]->q);
+ err = ionic_qcq_enable(lif->rxqcqs[i]);
if (err)
goto err_out;
- err = ionic_qcq_enable(lif->txqcqs[i].qcq);
+ err = ionic_qcq_enable(lif->txqcqs[i]);
if (err) {
if (err != -ETIMEDOUT)
- ionic_qcq_disable(lif->rxqcqs[i].qcq);
+ ionic_qcq_disable(lif->rxqcqs[i]);
goto err_out;
}
}
@@ -1660,10 +1732,10 @@ static int ionic_txrx_enable(struct ionic_lif *lif)
err_out:
while (i--) {
- err = ionic_qcq_disable(lif->txqcqs[i].qcq);
+ err = ionic_qcq_disable(lif->txqcqs[i]);
if (err == -ETIMEDOUT)
break;
- err = ionic_qcq_disable(lif->rxqcqs[i].qcq);
+ err = ionic_qcq_disable(lif->rxqcqs[i]);
if (err == -ETIMEDOUT)
break;
}
@@ -1688,7 +1760,7 @@ static int ionic_start_queues(struct ionic_lif *lif)
return 0;
}
-int ionic_open(struct net_device *netdev)
+static int ionic_open(struct net_device *netdev)
{
struct ionic_lif *lif = netdev_priv(netdev);
int err;
@@ -1734,7 +1806,7 @@ static void ionic_stop_queues(struct ionic_lif *lif)
ionic_txrx_disable(lif);
}
-int ionic_stop(struct net_device *netdev)
+static int ionic_stop(struct net_device *netdev)
{
struct ionic_lif *lif = netdev_priv(netdev);
@@ -2016,35 +2088,210 @@ static const struct net_device_ops ionic_netdev_ops = {
.ndo_get_vf_stats = ionic_get_vf_stats,
};
-int ionic_reset_queues(struct ionic_lif *lif, ionic_reset_cb cb, void *arg)
-{
- bool running;
- int err = 0;
+static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b)
+{
+ /* only swapping the queues, not the napi, flags, or other stuff */
+ swap(a->q.num_descs, b->q.num_descs);
+ swap(a->q.base, b->q.base);
+ swap(a->q.base_pa, b->q.base_pa);
+ swap(a->q.info, b->q.info);
+ swap(a->q_base, b->q_base);
+ swap(a->q_base_pa, b->q_base_pa);
+ swap(a->q_size, b->q_size);
+
+ swap(a->q.sg_base, b->q.sg_base);
+ swap(a->q.sg_base_pa, b->q.sg_base_pa);
+ swap(a->sg_base, b->sg_base);
+ swap(a->sg_base_pa, b->sg_base_pa);
+ swap(a->sg_size, b->sg_size);
+
+ swap(a->cq.num_descs, b->cq.num_descs);
+ swap(a->cq.base, b->cq.base);
+ swap(a->cq.base_pa, b->cq.base_pa);
+ swap(a->cq.info, b->cq.info);
+ swap(a->cq_base, b->cq_base);
+ swap(a->cq_base_pa, b->cq_base_pa);
+ swap(a->cq_size, b->cq_size);
+}
+
+int ionic_reconfigure_queues(struct ionic_lif *lif,
+ struct ionic_queue_params *qparam)
+{
+ struct ionic_qcq **tx_qcqs = NULL;
+ struct ionic_qcq **rx_qcqs = NULL;
+ unsigned int sg_desc_sz;
+ unsigned int flags;
+ int err = -ENOMEM;
+ unsigned int i;
- mutex_lock(&lif->queue_lock);
- running = netif_running(lif->netdev);
- if (running) {
- netif_device_detach(lif->netdev);
- err = ionic_stop(lif->netdev);
+ /* allocate temporary qcq arrays to hold new queue structs */
+ if (qparam->nxqs != lif->nxqs || qparam->ntxq_descs != lif->ntxq_descs) {
+ tx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->ntxqs_per_lif,
+ sizeof(struct ionic_qcq *), GFP_KERNEL);
+ if (!tx_qcqs)
+ goto err_out;
+ }
+ if (qparam->nxqs != lif->nxqs || qparam->nrxq_descs != lif->nrxq_descs) {
+ rx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->nrxqs_per_lif,
+ sizeof(struct ionic_qcq *), GFP_KERNEL);
+ if (!rx_qcqs)
+ goto err_out;
+ }
+
+ /* allocate new desc_info and rings, but leave the interrupt setup
+ * until later so as to not mess with the still-running queues
+ */
+ if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
+ lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz ==
+ sizeof(struct ionic_txq_sg_desc_v1))
+ sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1);
+ else
+ sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
+
+ if (tx_qcqs) {
+ for (i = 0; i < qparam->nxqs; i++) {
+ flags = lif->txqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
+ err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
+ qparam->ntxq_descs,
+ sizeof(struct ionic_txq_desc),
+ sizeof(struct ionic_txq_comp),
+ sg_desc_sz,
+ lif->kern_pid, &tx_qcqs[i]);
+ if (err)
+ goto err_out;
+ }
+ }
+
+ if (rx_qcqs) {
+ for (i = 0; i < qparam->nxqs; i++) {
+ flags = lif->rxqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
+ err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
+ qparam->nrxq_descs,
+ sizeof(struct ionic_rxq_desc),
+ sizeof(struct ionic_rxq_comp),
+ sizeof(struct ionic_rxq_sg_desc),
+ lif->kern_pid, &rx_qcqs[i]);
+ if (err)
+ goto err_out;
+ }
+ }
+
+ /* stop and clean the queues */
+ ionic_stop_queues_reconfig(lif);
+
+ if (qparam->nxqs != lif->nxqs) {
+ err = netif_set_real_num_tx_queues(lif->netdev, qparam->nxqs);
if (err)
- goto reset_out;
+ goto err_out_reinit_unlock;
+ err = netif_set_real_num_rx_queues(lif->netdev, qparam->nxqs);
+ if (err) {
+ netif_set_real_num_tx_queues(lif->netdev, lif->nxqs);
+ goto err_out_reinit_unlock;
+ }
}
- if (cb)
- cb(lif, arg);
+ /* swap new desc_info and rings, keeping existing interrupt config */
+ if (tx_qcqs) {
+ lif->ntxq_descs = qparam->ntxq_descs;
+ for (i = 0; i < qparam->nxqs; i++)
+ ionic_swap_queues(lif->txqcqs[i], tx_qcqs[i]);
+ }
- if (running) {
- err = ionic_open(lif->netdev);
- netif_device_attach(lif->netdev);
+ if (rx_qcqs) {
+ lif->nrxq_descs = qparam->nrxq_descs;
+ for (i = 0; i < qparam->nxqs; i++)
+ ionic_swap_queues(lif->rxqcqs[i], rx_qcqs[i]);
}
-reset_out:
- mutex_unlock(&lif->queue_lock);
+ /* if we need to change the interrupt layout, this is the time */
+ if (qparam->intr_split != test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state) ||
+ qparam->nxqs != lif->nxqs) {
+ if (qparam->intr_split) {
+ set_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
+ } else {
+ clear_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
+ lif->tx_coalesce_usecs = lif->rx_coalesce_usecs;
+ lif->tx_coalesce_hw = lif->rx_coalesce_hw;
+ }
+
+ /* clear existing interrupt assignments */
+ for (i = 0; i < lif->ionic->ntxqs_per_lif; i++) {
+ ionic_qcq_intr_free(lif, lif->txqcqs[i]);
+ ionic_qcq_intr_free(lif, lif->rxqcqs[i]);
+ }
+
+ /* re-assign the interrupts */
+ for (i = 0; i < qparam->nxqs; i++) {
+ lif->rxqcqs[i]->flags |= IONIC_QCQ_F_INTR;
+ err = ionic_alloc_qcq_interrupt(lif, lif->rxqcqs[i]);
+ ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
+ lif->rxqcqs[i]->intr.index,
+ lif->rx_coalesce_hw);
+
+ if (qparam->intr_split) {
+ lif->txqcqs[i]->flags |= IONIC_QCQ_F_INTR;
+ err = ionic_alloc_qcq_interrupt(lif, lif->txqcqs[i]);
+ ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
+ lif->txqcqs[i]->intr.index,
+ lif->tx_coalesce_hw);
+ } else {
+ lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
+ ionic_link_qcq_interrupts(lif->rxqcqs[i], lif->txqcqs[i]);
+ }
+ }
+ }
+
+ swap(lif->nxqs, qparam->nxqs);
+
+err_out_reinit_unlock:
+ /* re-init the queues, but don't loose an error code */
+ if (err)
+ ionic_start_queues_reconfig(lif);
+ else
+ err = ionic_start_queues_reconfig(lif);
+
+err_out:
+ /* free old allocs without cleaning intr */
+ for (i = 0; i < qparam->nxqs; i++) {
+ if (tx_qcqs && tx_qcqs[i]) {
+ tx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
+ ionic_qcq_free(lif, tx_qcqs[i]);
+ devm_kfree(lif->ionic->dev, tx_qcqs[i]);
+ tx_qcqs[i] = NULL;
+ }
+ if (rx_qcqs && rx_qcqs[i]) {
+ rx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
+ ionic_qcq_free(lif, rx_qcqs[i]);
+ devm_kfree(lif->ionic->dev, rx_qcqs[i]);
+ rx_qcqs[i] = NULL;
+ }
+ }
+
+ /* free q array */
+ if (rx_qcqs) {
+ devm_kfree(lif->ionic->dev, rx_qcqs);
+ rx_qcqs = NULL;
+ }
+ if (tx_qcqs) {
+ devm_kfree(lif->ionic->dev, tx_qcqs);
+ tx_qcqs = NULL;
+ }
+
+ /* clean the unused dma and info allocations when new set is smaller
+ * than the full array, but leave the qcq shells in place
+ */
+ for (i = lif->nxqs; i < lif->ionic->ntxqs_per_lif; i++) {
+ lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
+ ionic_qcq_free(lif, lif->txqcqs[i]);
+
+ lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
+ ionic_qcq_free(lif, lif->rxqcqs[i]);
+ }
return err;
}
-static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index)
+int ionic_lif_alloc(struct ionic *ionic)
{
struct device *dev = ionic->dev;
union ionic_lif_identity *lid;
@@ -2055,7 +2302,7 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index
lid = kzalloc(sizeof(*lid), GFP_KERNEL);
if (!lid)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
netdev = alloc_etherdev_mqs(sizeof(*lif),
ionic->ntxqs_per_lif, ionic->ntxqs_per_lif);
@@ -2069,7 +2316,7 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index
lif = netdev_priv(netdev);
lif->netdev = netdev;
- ionic->master_lif = lif;
+ ionic->lif = lif;
netdev->netdev_ops = &ionic_netdev_ops;
ionic_ethtool_set_ops(netdev);
@@ -2079,7 +2326,8 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index
lif->identity = lid;
lif->lif_type = IONIC_LIF_TYPE_CLASSIC;
ionic_lif_identify(ionic, lif->lif_type, lif->identity);
- lif->netdev->min_mtu = le32_to_cpu(lif->identity->eth.min_frame_size);
+ lif->netdev->min_mtu = max_t(unsigned int, ETH_MIN_MTU,
+ le32_to_cpu(lif->identity->eth.min_frame_size));
lif->netdev->max_mtu =
le32_to_cpu(lif->identity->eth.max_frame_size) - ETH_HLEN - VLAN_HLEN;
@@ -2087,7 +2335,7 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index
lif->nxqs = ionic->ntxqs_per_lif;
lif->ionic = ionic;
- lif->index = index;
+ lif->index = 0;
lif->ntxq_descs = IONIC_DEF_TXRX_DESC;
lif->nrxq_descs = IONIC_DEF_TXRX_DESC;
lif->tx_budget = IONIC_TX_BUDGET_DEFAULT;
@@ -2099,7 +2347,7 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index
lif->tx_coalesce_usecs = lif->rx_coalesce_usecs;
lif->tx_coalesce_hw = lif->rx_coalesce_hw;
- snprintf(lif->name, sizeof(lif->name), "lif%u", index);
+ snprintf(lif->name, sizeof(lif->name), "lif%u", lif->index);
spin_lock_init(&lif->adminq_lock);
@@ -2119,7 +2367,8 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index
ionic_debugfs_add_lif(lif);
- /* allocate queues */
+ /* allocate control queues and txrx queue arrays */
+ ionic_lif_queue_identify(lif);
err = ionic_qcqs_alloc(lif);
if (err)
goto err_out_free_lif_info;
@@ -2138,9 +2387,7 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index
}
netdev_rss_key_fill(lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE);
- list_add_tail(&lif->list, &ionic->lifs);
-
- return lif;
+ return 0;
err_out_free_qcqs:
ionic_qcqs_free(lif);
@@ -2154,27 +2401,7 @@ err_out_free_netdev:
err_out_free_lid:
kfree(lid);
- return ERR_PTR(err);
-}
-
-int ionic_lifs_alloc(struct ionic *ionic)
-{
- struct ionic_lif *lif;
-
- INIT_LIST_HEAD(&ionic->lifs);
-
- /* only build the first lif, others are for later features */
- set_bit(0, ionic->lifbits);
-
- lif = ionic_lif_alloc(ionic, 0);
- if (IS_ERR_OR_NULL(lif)) {
- clear_bit(0, ionic->lifbits);
- return -ENOMEM;
- }
-
- ionic_lif_queue_identify(lif);
-
- return 0;
+ return err;
}
static void ionic_lif_reset(struct ionic_lif *lif)
@@ -2209,7 +2436,7 @@ static void ionic_lif_handle_fw_down(struct ionic_lif *lif)
ionic_txrx_deinit(lif);
ionic_txrx_free(lif);
}
- ionic_lifs_deinit(ionic);
+ ionic_lif_deinit(lif);
ionic_reset(ionic);
ionic_qcqs_free(lif);
@@ -2232,7 +2459,7 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
if (err)
goto err_out;
- err = ionic_lifs_init(ionic);
+ err = ionic_lif_init(lif);
if (err)
goto err_qcqs_free;
@@ -2261,14 +2488,14 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
err_txrx_free:
ionic_txrx_free(lif);
err_lifs_deinit:
- ionic_lifs_deinit(ionic);
+ ionic_lif_deinit(lif);
err_qcqs_free:
ionic_qcqs_free(lif);
err_out:
dev_err(ionic->dev, "FW Up: LIFs restart failed - err %d\n", err);
}
-static void ionic_lif_free(struct ionic_lif *lif)
+void ionic_lif_free(struct ionic_lif *lif)
{
struct device *dev = lif->ionic->dev;
@@ -2297,23 +2524,10 @@ static void ionic_lif_free(struct ionic_lif *lif)
/* free netdev & lif */
ionic_debugfs_del_lif(lif);
- list_del(&lif->list);
free_netdev(lif->netdev);
}
-void ionic_lifs_free(struct ionic *ionic)
-{
- struct list_head *cur, *tmp;
- struct ionic_lif *lif;
-
- list_for_each_safe(cur, tmp, &ionic->lifs) {
- lif = list_entry(cur, struct ionic_lif, list);
-
- ionic_lif_free(lif);
- }
-}
-
-static void ionic_lif_deinit(struct ionic_lif *lif)
+void ionic_lif_deinit(struct ionic_lif *lif)
{
if (!test_and_clear_bit(IONIC_LIF_F_INITED, lif->state))
return;
@@ -2334,17 +2548,6 @@ static void ionic_lif_deinit(struct ionic_lif *lif)
ionic_lif_reset(lif);
}
-void ionic_lifs_deinit(struct ionic *ionic)
-{
- struct list_head *cur, *tmp;
- struct ionic_lif *lif;
-
- list_for_each_safe(cur, tmp, &ionic->lifs) {
- lif = list_entry(cur, struct ionic_lif, list);
- ionic_lif_deinit(lif);
- }
-}
-
static int ionic_lif_adminq_init(struct ionic_lif *lif)
{
struct device *dev = lif->ionic->dev;
@@ -2490,7 +2693,7 @@ static int ionic_station_set(struct ionic_lif *lif)
return 0;
}
-static int ionic_lif_init(struct ionic_lif *lif)
+int ionic_lif_init(struct ionic_lif *lif)
{
struct ionic_dev *idev = &lif->ionic->idev;
struct device *dev = lif->ionic->dev;
@@ -2580,22 +2783,6 @@ err_out_free_dbid:
return err;
}
-int ionic_lifs_init(struct ionic *ionic)
-{
- struct list_head *cur, *tmp;
- struct ionic_lif *lif;
- int err;
-
- list_for_each_safe(cur, tmp, &ionic->lifs) {
- lif = list_entry(cur, struct ionic_lif, list);
- err = ionic_lif_init(lif);
- if (err)
- return err;
- }
-
- return 0;
-}
-
static void ionic_lif_notify_work(struct work_struct *ws)
{
}
@@ -2644,45 +2831,41 @@ static int ionic_lif_notify(struct notifier_block *nb,
return NOTIFY_DONE;
}
-int ionic_lifs_register(struct ionic *ionic)
+int ionic_lif_register(struct ionic_lif *lif)
{
int err;
- INIT_WORK(&ionic->nb_work, ionic_lif_notify_work);
+ INIT_WORK(&lif->ionic->nb_work, ionic_lif_notify_work);
- ionic->nb.notifier_call = ionic_lif_notify;
+ lif->ionic->nb.notifier_call = ionic_lif_notify;
- err = register_netdevice_notifier(&ionic->nb);
+ err = register_netdevice_notifier(&lif->ionic->nb);
if (err)
- ionic->nb.notifier_call = NULL;
+ lif->ionic->nb.notifier_call = NULL;
/* only register LIF0 for now */
- err = register_netdev(ionic->master_lif->netdev);
+ err = register_netdev(lif->netdev);
if (err) {
- dev_err(ionic->dev, "Cannot register net device, aborting\n");
+ dev_err(lif->ionic->dev, "Cannot register net device, aborting\n");
return err;
}
- ionic->master_lif->registered = true;
- ionic_lif_set_netdev_info(ionic->master_lif);
+ lif->registered = true;
+ ionic_lif_set_netdev_info(lif);
return 0;
}
-void ionic_lifs_unregister(struct ionic *ionic)
+void ionic_lif_unregister(struct ionic_lif *lif)
{
- if (ionic->nb.notifier_call) {
- unregister_netdevice_notifier(&ionic->nb);
- cancel_work_sync(&ionic->nb_work);
- ionic->nb.notifier_call = NULL;
+ if (lif->ionic->nb.notifier_call) {
+ unregister_netdevice_notifier(&lif->ionic->nb);
+ cancel_work_sync(&lif->ionic->nb_work);
+ lif->ionic->nb.notifier_call = NULL;
}
- /* There is only one lif ever registered in the
- * current model, so don't bother searching the
- * ionic->lif for candidates to unregister
- */
- if (ionic->master_lif &&
- ionic->master_lif->netdev->reg_state == NETREG_REGISTERED)
- unregister_netdev(ionic->master_lif->netdev);
+ if (lif->netdev->reg_state == NETREG_REGISTERED)
+ unregister_netdev(lif->netdev);
+ lif->registered = false;
}
static void ionic_lif_queue_identify(struct ionic_lif *lif)
@@ -2801,7 +2984,7 @@ int ionic_lif_identify(struct ionic *ionic, u8 lif_type,
return 0;
}
-int ionic_lifs_size(struct ionic *ionic)
+int ionic_lif_size(struct ionic *ionic)
{
struct ionic_identity *ident = &ionic->ident;
unsigned int nintrs, dev_nintrs;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index 1ee3b14c8d50..11ea9e0c6a4a 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -16,32 +16,32 @@
#define IONIC_TX_BUDGET_DEFAULT 256
struct ionic_tx_stats {
- u64 dma_map_err;
u64 pkts;
u64 bytes;
- u64 clean;
- u64 linearize;
u64 csum_none;
u64 csum;
- u64 crc32_csum;
u64 tso;
u64 tso_bytes;
u64 frags;
u64 vlan_inserted;
+ u64 clean;
+ u64 linearize;
+ u64 crc32_csum;
u64 sg_cntr[IONIC_MAX_NUM_SG_CNTR];
+ u64 dma_map_err;
};
struct ionic_rx_stats {
- u64 dma_map_err;
- u64 alloc_err;
u64 pkts;
u64 bytes;
u64 csum_none;
u64 csum_complete;
- u64 csum_error;
u64 buffers_posted;
u64 dropped;
u64 vlan_stripped;
+ u64 csum_error;
+ u64 dma_map_err;
+ u64 alloc_err;
};
#define IONIC_QCQ_F_INITED BIT(0)
@@ -56,35 +56,28 @@ struct ionic_napi_stats {
u64 work_done_cntr[IONIC_MAX_NUM_NAPI_CNTR];
};
-struct ionic_q_stats {
- union {
- struct ionic_tx_stats tx;
- struct ionic_rx_stats rx;
- };
-};
-
struct ionic_qcq {
- void *base;
- dma_addr_t base_pa;
- unsigned int total_size;
+ void *q_base;
+ dma_addr_t q_base_pa;
+ u32 q_size;
+ void *cq_base;
+ dma_addr_t cq_base_pa;
+ u32 cq_size;
+ void *sg_base;
+ dma_addr_t sg_base_pa;
+ u32 sg_size;
struct ionic_queue q;
struct ionic_cq cq;
struct ionic_intr_info intr;
struct napi_struct napi;
struct ionic_napi_stats napi_stats;
- struct ionic_q_stats *stats;
unsigned int flags;
struct dentry *dentry;
};
-struct ionic_qcqst {
- struct ionic_qcq *qcq;
- struct ionic_q_stats *stats;
-};
-
#define q_to_qcq(q) container_of(q, struct ionic_qcq, q)
-#define q_to_tx_stats(q) (&q_to_qcq(q)->stats->tx)
-#define q_to_rx_stats(q) (&q_to_qcq(q)->stats->rx)
+#define q_to_tx_stats(q) (&(q)->lif->txqstats[(q)->index])
+#define q_to_rx_stats(q) (&(q)->lif->rxqstats[(q)->index])
#define napi_to_qcq(napi) container_of(napi, struct ionic_qcq, napi)
#define napi_to_cq(napi) (&napi_to_qcq(napi)->cq)
@@ -170,8 +163,10 @@ struct ionic_lif {
spinlock_t adminq_lock; /* lock for AdminQ operations */
struct ionic_qcq *adminqcq;
struct ionic_qcq *notifyqcq;
- struct ionic_qcqst *txqcqs;
- struct ionic_qcqst *rxqcqs;
+ struct ionic_qcq **txqcqs;
+ struct ionic_tx_stats *txqstats;
+ struct ionic_qcq **rxqcqs;
+ struct ionic_rx_stats *rxqstats;
u64 last_eid;
unsigned int neqs;
unsigned int nxqs;
@@ -212,12 +207,21 @@ struct ionic_lif {
struct work_struct tx_timeout_work;
};
-#define lif_to_txqcq(lif, i) ((lif)->txqcqs[i].qcq)
-#define lif_to_rxqcq(lif, i) ((lif)->rxqcqs[i].qcq)
-#define lif_to_txstats(lif, i) ((lif)->txqcqs[i].stats->tx)
-#define lif_to_rxstats(lif, i) ((lif)->rxqcqs[i].stats->rx)
-#define lif_to_txq(lif, i) (&lif_to_txqcq((lif), i)->q)
-#define lif_to_rxq(lif, i) (&lif_to_txqcq((lif), i)->q)
+struct ionic_queue_params {
+ unsigned int nxqs;
+ unsigned int ntxq_descs;
+ unsigned int nrxq_descs;
+ unsigned int intr_split;
+};
+
+static inline void ionic_init_queue_params(struct ionic_lif *lif,
+ struct ionic_queue_params *qparam)
+{
+ qparam->nxqs = lif->nxqs;
+ qparam->ntxq_descs = lif->ntxq_descs;
+ qparam->nrxq_descs = lif->nrxq_descs;
+ qparam->intr_split = test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
+}
static inline u32 ionic_coal_usec_to_hw(struct ionic *ionic, u32 usecs)
{
@@ -242,34 +246,33 @@ void ionic_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *ns);
void ionic_lif_deferred_enqueue(struct ionic_deferred *def,
struct ionic_deferred_work *work);
-int ionic_lifs_alloc(struct ionic *ionic);
-void ionic_lifs_free(struct ionic *ionic);
-void ionic_lifs_deinit(struct ionic *ionic);
-int ionic_lifs_init(struct ionic *ionic);
-int ionic_lifs_register(struct ionic *ionic);
-void ionic_lifs_unregister(struct ionic *ionic);
+int ionic_lif_alloc(struct ionic *ionic);
+int ionic_lif_init(struct ionic_lif *lif);
+void ionic_lif_free(struct ionic_lif *lif);
+void ionic_lif_deinit(struct ionic_lif *lif);
+int ionic_lif_register(struct ionic_lif *lif);
+void ionic_lif_unregister(struct ionic_lif *lif);
int ionic_lif_identify(struct ionic *ionic, u8 lif_type,
union ionic_lif_identity *lif_ident);
-int ionic_lifs_size(struct ionic *ionic);
+int ionic_lif_size(struct ionic *ionic);
int ionic_lif_rss_config(struct ionic_lif *lif, u16 types,
const u8 *key, const u32 *indir);
+int ionic_reconfigure_queues(struct ionic_lif *lif,
+ struct ionic_queue_params *qparam);
-int ionic_open(struct net_device *netdev);
-int ionic_stop(struct net_device *netdev);
-int ionic_reset_queues(struct ionic_lif *lif, ionic_reset_cb cb, void *arg);
-
-static inline void debug_stats_txq_post(struct ionic_qcq *qcq,
- struct ionic_txq_desc *desc, bool dbell)
+static inline void debug_stats_txq_post(struct ionic_queue *q, bool dbell)
{
- u8 num_sg_elems = ((le64_to_cpu(desc->cmd) >> IONIC_TXQ_DESC_NSGE_SHIFT)
- & IONIC_TXQ_DESC_NSGE_MASK);
+ struct ionic_txq_desc *desc = &q->txq[q->head_idx];
+ u8 num_sg_elems;
- qcq->q.dbell_count += dbell;
+ q->dbell_count += dbell;
+ num_sg_elems = ((le64_to_cpu(desc->cmd) >> IONIC_TXQ_DESC_NSGE_SHIFT)
+ & IONIC_TXQ_DESC_NSGE_MASK);
if (num_sg_elems > (IONIC_MAX_NUM_SG_CNTR - 1))
num_sg_elems = IONIC_MAX_NUM_SG_CNTR - 1;
- qcq->stats->tx.sg_cntr[num_sg_elems]++;
+ q->lif->txqstats[q->index].sg_cntr[num_sg_elems]++;
}
static inline void debug_stats_napi_poll(struct ionic_qcq *qcq,
@@ -284,10 +287,9 @@ static inline void debug_stats_napi_poll(struct ionic_qcq *qcq,
}
#define DEBUG_STATS_CQE_CNT(cq) ((cq)->compl_count++)
-#define DEBUG_STATS_RX_BUFF_CNT(qcq) ((qcq)->stats->rx.buffers_posted++)
+#define DEBUG_STATS_RX_BUFF_CNT(q) ((q)->lif->rxqstats[q->index].buffers_posted++)
#define DEBUG_STATS_INTR_REARM(intr) ((intr)->rearm_count++)
-#define DEBUG_STATS_TXQ_POST(qcq, txdesc, dbell) \
- debug_stats_txq_post(qcq, txdesc, dbell)
+#define DEBUG_STATS_TXQ_POST(q, dbell) debug_stats_txq_post(q, dbell)
#define DEBUG_STATS_NAPI_POLL(qcq, work_done) \
debug_stats_napi_poll(qcq, work_done)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index df5b9bcc3aba..cfb90bf605fe 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -181,15 +181,17 @@ static const char *ionic_opcode_to_str(enum ionic_cmd_opcode opcode)
static void ionic_adminq_flush(struct ionic_lif *lif)
{
- struct ionic_queue *adminq = &lif->adminqcq->q;
+ struct ionic_queue *q = &lif->adminqcq->q;
+ struct ionic_desc_info *desc_info;
spin_lock(&lif->adminq_lock);
- while (adminq->tail != adminq->head) {
- memset(adminq->tail->desc, 0, sizeof(union ionic_adminq_cmd));
- adminq->tail->cb = NULL;
- adminq->tail->cb_arg = NULL;
- adminq->tail = adminq->tail->next;
+ while (q->tail_idx != q->head_idx) {
+ desc_info = &q->info[q->tail_idx];
+ memset(desc_info->desc, 0, sizeof(union ionic_adminq_cmd));
+ desc_info->cb = NULL;
+ desc_info->cb_arg = NULL;
+ q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
}
spin_unlock(&lif->adminq_lock);
}
@@ -245,7 +247,8 @@ static void ionic_adminq_cb(struct ionic_queue *q,
static int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
{
- struct ionic_queue *adminq;
+ struct ionic_desc_info *desc_info;
+ struct ionic_queue *q;
int err = 0;
WARN_ON(in_interrupt());
@@ -253,10 +256,10 @@ static int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
if (!lif->adminqcq)
return -EIO;
- adminq = &lif->adminqcq->q;
+ q = &lif->adminqcq->q;
spin_lock(&lif->adminq_lock);
- if (!ionic_q_has_space(adminq, 1)) {
+ if (!ionic_q_has_space(q, 1)) {
err = -ENOSPC;
goto err_out;
}
@@ -265,13 +268,14 @@ static int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
if (err)
goto err_out;
- memcpy(adminq->head->desc, &ctx->cmd, sizeof(ctx->cmd));
+ desc_info = &q->info[q->head_idx];
+ memcpy(desc_info->desc, &ctx->cmd, sizeof(ctx->cmd));
dev_dbg(&lif->netdev->dev, "post admin queue command:\n");
dynamic_hex_dump("cmd ", DUMP_PREFIX_OFFSET, 16, 1,
&ctx->cmd, sizeof(ctx->cmd), true);
- ionic_q_post(adminq, true, ionic_adminq_cb, ctx);
+ ionic_q_post(q, true, ionic_adminq_cb, ctx);
err_out:
spin_unlock(&lif->adminq_lock);
@@ -301,32 +305,6 @@ int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
return ionic_adminq_check_err(lif, ctx, (remaining == 0));
}
-int ionic_napi(struct napi_struct *napi, int budget, ionic_cq_cb cb,
- ionic_cq_done_cb done_cb, void *done_arg)
-{
- struct ionic_qcq *qcq = napi_to_qcq(napi);
- struct ionic_cq *cq = &qcq->cq;
- u32 work_done, flags = 0;
-
- work_done = ionic_cq_service(cq, budget, cb, done_cb, done_arg);
-
- if (work_done < budget && napi_complete_done(napi, work_done)) {
- flags |= IONIC_INTR_CRED_UNMASK;
- DEBUG_STATS_INTR_REARM(cq->bound_intr);
- }
-
- if (work_done || flags) {
- flags |= IONIC_INTR_CRED_RESET_COALESCE;
- ionic_intr_credits(cq->lif->ionic->idev.intr_ctrl,
- cq->bound_intr->index,
- work_done, flags);
- }
-
- DEBUG_STATS_NAPI_POLL(qcq, work_done);
-
- return work_done;
-}
-
static void ionic_dev_cmd_clean(struct ionic *ionic)
{
union ionic_dev_cmd_regs *regs = ionic->idev.dev_cmd_regs;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.c b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
index 2a1885da58a6..ff20a2ac4c2f 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_stats.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
@@ -179,36 +179,28 @@ static const struct ionic_stat_desc ionic_dbg_napi_stats_desc[] = {
static void ionic_get_lif_stats(struct ionic_lif *lif,
struct ionic_lif_sw_stats *stats)
{
- struct ionic_tx_stats *tstats;
- struct ionic_rx_stats *rstats;
+ struct ionic_tx_stats *txstats;
+ struct ionic_rx_stats *rxstats;
struct rtnl_link_stats64 ns;
- struct ionic_qcq *txqcq;
- struct ionic_qcq *rxqcq;
int q_num;
memset(stats, 0, sizeof(*stats));
for (q_num = 0; q_num < MAX_Q(lif); q_num++) {
- txqcq = lif_to_txqcq(lif, q_num);
- if (txqcq && txqcq->stats) {
- tstats = &txqcq->stats->tx;
- stats->tx_packets += tstats->pkts;
- stats->tx_bytes += tstats->bytes;
- stats->tx_tso += tstats->tso;
- stats->tx_tso_bytes += tstats->tso_bytes;
- stats->tx_csum_none += tstats->csum_none;
- stats->tx_csum += tstats->csum;
- }
-
- rxqcq = lif_to_rxqcq(lif, q_num);
- if (rxqcq && rxqcq->stats) {
- rstats = &rxqcq->stats->rx;
- stats->rx_packets += rstats->pkts;
- stats->rx_bytes += rstats->bytes;
- stats->rx_csum_none += rstats->csum_none;
- stats->rx_csum_complete += rstats->csum_complete;
- stats->rx_csum_error += rstats->csum_error;
- }
+ txstats = &lif->txqstats[q_num];
+ stats->tx_packets += txstats->pkts;
+ stats->tx_bytes += txstats->bytes;
+ stats->tx_tso += txstats->tso;
+ stats->tx_tso_bytes += txstats->tso_bytes;
+ stats->tx_csum_none += txstats->csum_none;
+ stats->tx_csum += txstats->csum;
+
+ rxstats = &lif->rxqstats[q_num];
+ stats->rx_packets += rxstats->pkts;
+ stats->rx_bytes += rxstats->bytes;
+ stats->rx_csum_none += rxstats->csum_none;
+ stats->rx_csum_complete += rxstats->csum_complete;
+ stats->rx_csum_error += rxstats->csum_error;
}
ionic_get_stats64(lif->netdev, &ns);
@@ -371,7 +363,7 @@ static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
}
for (q_num = 0; q_num < MAX_Q(lif); q_num++) {
- txstats = &lif_to_txstats(lif, q_num);
+ txstats = &lif->txqstats[q_num];
for (i = 0; i < IONIC_NUM_TX_STATS; i++) {
**buf = IONIC_READ_STAT64(txstats,
@@ -381,7 +373,7 @@ static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
if (test_bit(IONIC_LIF_F_UP, lif->state) &&
test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) {
- txqcq = lif_to_txqcq(lif, q_num);
+ txqcq = lif->txqcqs[q_num];
for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++) {
**buf = IONIC_READ_STAT64(&txqcq->q,
&ionic_txq_stats_desc[i]);
@@ -405,7 +397,7 @@ static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
}
for (q_num = 0; q_num < MAX_Q(lif); q_num++) {
- rxstats = &lif_to_rxstats(lif, q_num);
+ rxstats = &lif->rxqstats[q_num];
for (i = 0; i < IONIC_NUM_RX_STATS; i++) {
**buf = IONIC_READ_STAT64(rxstats,
@@ -415,7 +407,7 @@ static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
if (test_bit(IONIC_LIF_F_UP, lif->state) &&
test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) {
- rxqcq = lif_to_rxqcq(lif, q_num);
+ rxqcq = lif->rxqcqs[q_num];
for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) {
**buf = IONIC_READ_STAT64(&rxqcq->cq,
&ionic_dbg_cq_stats_desc[i]);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index def65fee27b5..7225251c5563 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -22,7 +22,7 @@ static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell,
ionic_desc_cb cb_func, void *cb_arg)
{
- DEBUG_STATS_TXQ_POST(q_to_qcq(q), q->head->desc, ring_dbell);
+ DEBUG_STATS_TXQ_POST(q, ring_dbell);
ionic_q_post(q, ring_dbell, cb_func, cb_arg);
}
@@ -32,7 +32,7 @@ static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell,
{
ionic_q_post(q, ring_dbell, cb_func, cb_arg);
- DEBUG_STATS_RX_BUFF_CNT(q_to_qcq(q));
+ DEBUG_STATS_RX_BUFF_CNT(q);
}
static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
@@ -49,7 +49,7 @@ static struct sk_buff *ionic_rx_skb_alloc(struct ionic_queue *q,
struct sk_buff *skb;
netdev = lif->netdev;
- stats = q_to_rx_stats(q);
+ stats = &q->lif->rxqstats[q->index];
if (frags)
skb = napi_get_frags(&q_to_qcq(q)->napi);
@@ -235,14 +235,14 @@ static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
return false;
/* check for empty queue */
- if (q->tail->index == q->head->index)
+ if (q->tail_idx == q->head_idx)
return false;
- desc_info = q->tail;
- if (desc_info->index != le16_to_cpu(comp->comp_index))
+ if (q->tail_idx != le16_to_cpu(comp->comp_index))
return false;
- q->tail = desc_info->next;
+ desc_info = &q->info[q->tail_idx];
+ q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
/* clean the related q entry, only one per qc completion */
ionic_rx_clean(q, desc_info, cq_info, desc_info->cb_arg);
@@ -266,40 +266,49 @@ void ionic_rx_flush(struct ionic_cq *cq)
work_done, IONIC_INTR_CRED_RESET_COALESCE);
}
-static struct page *ionic_rx_page_alloc(struct ionic_queue *q,
- dma_addr_t *dma_addr)
+static int ionic_rx_page_alloc(struct ionic_queue *q,
+ struct ionic_page_info *page_info)
{
struct ionic_lif *lif = q->lif;
struct ionic_rx_stats *stats;
struct net_device *netdev;
struct device *dev;
- struct page *page;
netdev = lif->netdev;
dev = lif->ionic->dev;
stats = q_to_rx_stats(q);
- page = alloc_page(GFP_ATOMIC);
- if (unlikely(!page)) {
- net_err_ratelimited("%s: Page alloc failed on %s!\n",
+
+ if (unlikely(!page_info)) {
+ net_err_ratelimited("%s: %s invalid page_info in alloc\n",
+ netdev->name, q->name);
+ return -EINVAL;
+ }
+
+ page_info->page = dev_alloc_page();
+ if (unlikely(!page_info->page)) {
+ net_err_ratelimited("%s: %s page alloc failed\n",
netdev->name, q->name);
stats->alloc_err++;
- return NULL;
+ return -ENOMEM;
}
- *dma_addr = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev, *dma_addr))) {
- __free_page(page);
- net_err_ratelimited("%s: DMA single map failed on %s!\n",
+ page_info->dma_addr = dma_map_page(dev, page_info->page, 0, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, page_info->dma_addr))) {
+ put_page(page_info->page);
+ page_info->dma_addr = 0;
+ page_info->page = NULL;
+ net_err_ratelimited("%s: %s dma map failed\n",
netdev->name, q->name);
stats->dma_map_err++;
- return NULL;
+ return -EIO;
}
- return page;
+ return 0;
}
-static void ionic_rx_page_free(struct ionic_queue *q, struct page *page,
- dma_addr_t dma_addr)
+static void ionic_rx_page_free(struct ionic_queue *q,
+ struct ionic_page_info *page_info)
{
struct ionic_lif *lif = q->lif;
struct net_device *netdev;
@@ -308,15 +317,23 @@ static void ionic_rx_page_free(struct ionic_queue *q, struct page *page,
netdev = lif->netdev;
dev = lif->ionic->dev;
- if (unlikely(!page)) {
- net_err_ratelimited("%s: Trying to free unallocated buffer on %s!\n",
+ if (unlikely(!page_info)) {
+ net_err_ratelimited("%s: %s invalid page_info in free\n",
netdev->name, q->name);
return;
}
- dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
+ if (unlikely(!page_info->page)) {
+ net_err_ratelimited("%s: %s invalid page in free\n",
+ netdev->name, q->name);
+ return;
+ }
+
+ dma_unmap_page(dev, page_info->dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
- __free_page(page);
+ put_page(page_info->page);
+ page_info->dma_addr = 0;
+ page_info->page = NULL;
}
void ionic_rx_fill(struct ionic_queue *q)
@@ -338,7 +355,7 @@ void ionic_rx_fill(struct ionic_queue *q)
for (i = ionic_q_space_avail(q); i; i--) {
remain_len = len;
- desc_info = q->head;
+ desc_info = &q->info[q->head_idx];
desc = desc_info->desc;
sg_desc = desc_info->sg_desc;
page_info = &desc_info->pages[0];
@@ -352,8 +369,7 @@ void ionic_rx_fill(struct ionic_queue *q)
desc->opcode = (nfrags > 1) ? IONIC_RXQ_DESC_OPCODE_SG :
IONIC_RXQ_DESC_OPCODE_SIMPLE;
desc_info->npages = nfrags;
- page_info->page = ionic_rx_page_alloc(q, &page_info->dma_addr);
- if (unlikely(!page_info->page)) {
+ if (unlikely(ionic_rx_page_alloc(q, page_info))) {
desc->addr = 0;
desc->len = 0;
return;
@@ -370,8 +386,7 @@ void ionic_rx_fill(struct ionic_queue *q)
continue;
sg_elem = &sg_desc->elems[j];
- page_info->page = ionic_rx_page_alloc(q, &page_info->dma_addr);
- if (unlikely(!page_info->page)) {
+ if (unlikely(ionic_rx_page_alloc(q, page_info))) {
sg_elem->addr = 0;
sg_elem->len = 0;
return;
@@ -387,7 +402,7 @@ void ionic_rx_fill(struct ionic_queue *q)
}
ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
- q->dbval | q->head->index);
+ q->dbval | q->head_idx);
}
static void ionic_rx_fill_cb(void *arg)
@@ -397,25 +412,23 @@ static void ionic_rx_fill_cb(void *arg)
void ionic_rx_empty(struct ionic_queue *q)
{
- struct ionic_desc_info *cur;
+ struct ionic_desc_info *desc_info;
struct ionic_rxq_desc *desc;
unsigned int i;
+ u16 idx;
- for (cur = q->tail; cur != q->head; cur = cur->next) {
- desc = cur->desc;
+ idx = q->tail_idx;
+ while (idx != q->head_idx) {
+ desc_info = &q->info[idx];
+ desc = desc_info->desc;
desc->addr = 0;
desc->len = 0;
- for (i = 0; i < cur->npages; i++) {
- if (likely(cur->pages[i].page)) {
- ionic_rx_page_free(q, cur->pages[i].page,
- cur->pages[i].dma_addr);
- cur->pages[i].page = NULL;
- cur->pages[i].dma_addr = 0;
- }
- }
+ for (i = 0; i < desc_info->npages; i++)
+ ionic_rx_page_free(q, &desc_info->pages[i]);
- cur->cb_arg = NULL;
+ desc_info->cb_arg = NULL;
+ idx = (idx + 1) & (q->num_descs - 1);
}
}
@@ -500,7 +513,7 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
lif = rxcq->bound_q->lif;
idev = &lif->ionic->idev;
- txcq = &lif->txqcqs[qi].qcq->cq;
+ txcq = &lif->txqcqs[qi]->cq;
tx_work_done = ionic_cq_service(txcq, lif->tx_budget,
ionic_tx_service, NULL, NULL);
@@ -615,6 +628,7 @@ static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
struct ionic_txq_comp *comp = cq_info->cq_desc;
struct ionic_queue *q = cq->bound_q;
struct ionic_desc_info *desc_info;
+ u16 index;
if (!color_match(comp->color, cq->done_color))
return false;
@@ -623,12 +637,13 @@ static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
* several q entries completed for each cq completion
*/
do {
- desc_info = q->tail;
- q->tail = desc_info->next;
- ionic_tx_clean(q, desc_info, cq->tail, desc_info->cb_arg);
+ desc_info = &q->info[q->tail_idx];
+ index = q->tail_idx;
+ q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
+ ionic_tx_clean(q, desc_info, cq_info, desc_info->cb_arg);
desc_info->cb = NULL;
desc_info->cb_arg = NULL;
- } while (desc_info->index != le16_to_cpu(comp->comp_index));
+ } while (index != le16_to_cpu(comp->comp_index));
return true;
}
@@ -648,16 +663,14 @@ void ionic_tx_flush(struct ionic_cq *cq)
void ionic_tx_empty(struct ionic_queue *q)
{
struct ionic_desc_info *desc_info;
- int done = 0;
/* walk the not completed tx entries, if any */
- while (q->head != q->tail) {
- desc_info = q->tail;
- q->tail = desc_info->next;
+ while (q->head_idx != q->tail_idx) {
+ desc_info = &q->info[q->tail_idx];
+ q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
ionic_tx_clean(q, desc_info, NULL, desc_info->cb_arg);
desc_info->cb = NULL;
desc_info->cb_arg = NULL;
- done++;
}
}
@@ -741,8 +754,8 @@ static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc
static struct ionic_txq_desc *ionic_tx_tso_next(struct ionic_queue *q,
struct ionic_txq_sg_elem **elem)
{
- struct ionic_txq_sg_desc *sg_desc = q->head->sg_desc;
- struct ionic_txq_desc *desc = q->head->desc;
+ struct ionic_txq_sg_desc *sg_desc = q->info[q->head_idx].txq_sg_desc;
+ struct ionic_txq_desc *desc = q->info[q->head_idx].txq_desc;
*elem = sg_desc->elems;
return desc;
@@ -751,13 +764,13 @@ static struct ionic_txq_desc *ionic_tx_tso_next(struct ionic_queue *q,
static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
{
struct ionic_tx_stats *stats = q_to_tx_stats(q);
- struct ionic_desc_info *abort = q->head;
+ struct ionic_desc_info *rewind_desc_info;
struct device *dev = q->lif->ionic->dev;
- struct ionic_desc_info *rewind = abort;
struct ionic_txq_sg_elem *elem;
struct ionic_txq_desc *desc;
unsigned int frag_left = 0;
unsigned int offset = 0;
+ u16 abort = q->head_idx;
unsigned int len_left;
dma_addr_t desc_addr;
unsigned int hdrlen;
@@ -765,6 +778,7 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
unsigned int seglen;
u64 total_bytes = 0;
u64 total_pkts = 0;
+ u16 rewind = abort;
unsigned int left;
unsigned int len;
unsigned int mss;
@@ -909,19 +923,20 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
return 0;
err_out_abort:
- while (rewind->desc != q->head->desc) {
- ionic_tx_clean(q, rewind, NULL, NULL);
- rewind = rewind->next;
+ while (rewind != q->head_idx) {
+ rewind_desc_info = &q->info[rewind];
+ ionic_tx_clean(q, rewind_desc_info, NULL, NULL);
+ rewind = (rewind + 1) & (q->num_descs - 1);
}
- q->head = abort;
+ q->head_idx = abort;
return -ENOMEM;
}
static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb)
{
+ struct ionic_txq_desc *desc = q->info[q->head_idx].txq_desc;
struct ionic_tx_stats *stats = q_to_tx_stats(q);
- struct ionic_txq_desc *desc = q->head->desc;
struct device *dev = q->lif->ionic->dev;
dma_addr_t dma_addr;
bool has_vlan;
@@ -960,8 +975,8 @@ static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb)
static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb)
{
+ struct ionic_txq_desc *desc = q->info[q->head_idx].txq_desc;
struct ionic_tx_stats *stats = q_to_tx_stats(q);
- struct ionic_txq_desc *desc = q->head->desc;
struct device *dev = q->lif->ionic->dev;
dma_addr_t dma_addr;
bool has_vlan;
@@ -995,7 +1010,7 @@ static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb)
static int ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb)
{
- struct ionic_txq_sg_desc *sg_desc = q->head->sg_desc;
+ struct ionic_txq_sg_desc *sg_desc = q->info[q->head_idx].txq_sg_desc;
unsigned int len_left = skb->len - skb_headlen(skb);
struct ionic_txq_sg_elem *elem = sg_desc->elems;
struct ionic_tx_stats *stats = q_to_tx_stats(q);
@@ -1104,9 +1119,9 @@ netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
}
- if (unlikely(!lif_to_txqcq(lif, queue_index)))
+ if (unlikely(queue_index >= lif->nxqs))
queue_index = 0;
- q = lif_to_txq(lif, queue_index);
+ q = &lif->txqcqs[queue_index]->q;
ndescs = ionic_tx_descs_needed(q, skb);
if (ndescs < 0)
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index 8f743d80760b..4366c7a8de95 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -80,7 +80,7 @@ config QED
select CRC8
select NET_DEVLINK
help
- This enables the support for ...
+ This enables the support for Marvell FastLinQ adapters family.
config QED_LL2
bool
@@ -100,7 +100,8 @@ config QEDE
depends on QED
imply PTP_1588_CLOCK
help
- This enables the support for ...
+ This enables the support for Marvell FastLinQ adapters family,
+ ethernet driver.
config QED_RDMA
bool
diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile
index f947b105cf14..8251755ec18c 100644
--- a/drivers/net/ethernet/qlogic/qed/Makefile
+++ b/drivers/net/ethernet/qlogic/qed/Makefile
@@ -9,6 +9,7 @@ qed-y := \
qed_dcbx.o \
qed_debug.o \
qed_dev.o \
+ qed_devlink.o \
qed_hw.o \
qed_init_fw_funcs.o \
qed_init_ops.o \
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index b2a7b53ee760..f34b25a79449 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -807,6 +807,7 @@ struct qed_dev {
struct qed_llh_info *p_llh_info;
/* Linux specific here */
+ struct qed_dev_info common_dev_info;
struct qede_dev *edev;
struct pci_dev *pdev;
u32 flags;
@@ -849,7 +850,6 @@ struct qed_dev {
u32 rdma_max_srq_sge;
u16 tunn_feature_mask;
- struct devlink *dl;
bool iwarp_cmt;
};
@@ -981,6 +981,7 @@ void qed_bw_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt);
u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
u32 input_len, u8 *input_buf,
u32 max_size, u8 *unzip_buf);
+int qed_recovery_process(struct qed_dev *cdev);
void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn);
void qed_hw_error_occurred(struct qed_hwfn *p_hwfn,
enum qed_hw_err_type err_type);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index b8f076e4e6b8..f7f08e6a3acf 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -3973,6 +3973,7 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
struct qed_mcp_link_speed_params *ext_speed;
struct qed_mcp_link_capabilities *p_caps;
struct qed_mcp_link_params *link;
+ int i;
/* Read global nvm_cfg address */
nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
@@ -4290,6 +4291,14 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
__set_bit(QED_DEV_CAP_ROCE,
&p_hwfn->hw_info.device_capabilities);
+ /* Read device serial number information from shmem */
+ addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ offsetof(struct nvm_cfg1, glob) +
+ offsetof(struct nvm_cfg1_glob, serial_number);
+
+ for (i = 0; i < 4; i++)
+ p_hwfn->hw_info.part_num[i] = qed_rd(p_hwfn, p_ptt, addr + i * 4);
+
return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_devlink.c b/drivers/net/ethernet/qlogic/qed/qed_devlink.c
new file mode 100644
index 000000000000..cf7f4da68e69
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_devlink.c
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Marvell/Qlogic FastLinQ NIC driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#include <linux/kernel.h>
+#include <linux/qed/qed_if.h>
+#include <linux/vmalloc.h>
+#include "qed.h"
+#include "qed_devlink.h"
+
+enum qed_devlink_param_id {
+ QED_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ QED_DEVLINK_PARAM_ID_IWARP_CMT,
+};
+
+struct qed_fw_fatal_ctx {
+ enum qed_hw_err_type err_type;
+};
+
+int qed_report_fatal_error(struct devlink *devlink, enum qed_hw_err_type err_type)
+{
+ struct qed_devlink *qdl = devlink_priv(devlink);
+ struct qed_fw_fatal_ctx fw_fatal_ctx = {
+ .err_type = err_type,
+ };
+
+ if (qdl->fw_reporter)
+ devlink_health_report(qdl->fw_reporter,
+ "Fatal error occurred", &fw_fatal_ctx);
+
+ return 0;
+}
+
+static int
+qed_fw_fatal_reporter_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct qed_devlink *qdl = devlink_health_reporter_priv(reporter);
+ struct qed_fw_fatal_ctx *fw_fatal_ctx = priv_ctx;
+ struct qed_dev *cdev = qdl->cdev;
+ u32 dbg_data_buf_size;
+ u8 *p_dbg_data_buf;
+ int err;
+
+ /* Having context means that was a dump request after fatal,
+ * so we enable extra debugging while gathering the dump,
+ * just in case
+ */
+ cdev->print_dbg_data = fw_fatal_ctx ? true : false;
+
+ dbg_data_buf_size = qed_dbg_all_data_size(cdev);
+ p_dbg_data_buf = vzalloc(dbg_data_buf_size);
+ if (!p_dbg_data_buf) {
+ DP_NOTICE(cdev,
+ "Failed to allocate memory for a debug data buffer\n");
+ return -ENOMEM;
+ }
+
+ err = qed_dbg_all_data(cdev, p_dbg_data_buf);
+ if (err) {
+ DP_NOTICE(cdev, "Failed to obtain debug data\n");
+ vfree(p_dbg_data_buf);
+ return err;
+ }
+
+ err = devlink_fmsg_binary_pair_put(fmsg, "dump_data",
+ p_dbg_data_buf, dbg_data_buf_size);
+
+ vfree(p_dbg_data_buf);
+
+ return err;
+}
+
+static int
+qed_fw_fatal_reporter_recover(struct devlink_health_reporter *reporter,
+ void *priv_ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct qed_devlink *qdl = devlink_health_reporter_priv(reporter);
+ struct qed_dev *cdev = qdl->cdev;
+
+ qed_recovery_process(cdev);
+
+ return 0;
+}
+
+static const struct devlink_health_reporter_ops qed_fw_fatal_reporter_ops = {
+ .name = "fw_fatal",
+ .recover = qed_fw_fatal_reporter_recover,
+ .dump = qed_fw_fatal_reporter_dump,
+};
+
+#define QED_REPORTER_FW_GRACEFUL_PERIOD 1200000
+
+void qed_fw_reporters_create(struct devlink *devlink)
+{
+ struct qed_devlink *dl = devlink_priv(devlink);
+
+ dl->fw_reporter = devlink_health_reporter_create(devlink, &qed_fw_fatal_reporter_ops,
+ QED_REPORTER_FW_GRACEFUL_PERIOD, dl);
+ if (IS_ERR(dl->fw_reporter)) {
+ DP_NOTICE(dl->cdev, "Failed to create fw reporter, err = %ld\n",
+ PTR_ERR(dl->fw_reporter));
+ dl->fw_reporter = NULL;
+ }
+}
+
+void qed_fw_reporters_destroy(struct devlink *devlink)
+{
+ struct qed_devlink *dl = devlink_priv(devlink);
+ struct devlink_health_reporter *rep;
+
+ rep = dl->fw_reporter;
+
+ if (!IS_ERR_OR_NULL(rep))
+ devlink_health_reporter_destroy(rep);
+}
+
+static int qed_dl_param_get(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct qed_devlink *qed_dl = devlink_priv(dl);
+ struct qed_dev *cdev;
+
+ cdev = qed_dl->cdev;
+ ctx->val.vbool = cdev->iwarp_cmt;
+
+ return 0;
+}
+
+static int qed_dl_param_set(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct qed_devlink *qed_dl = devlink_priv(dl);
+ struct qed_dev *cdev;
+
+ cdev = qed_dl->cdev;
+ cdev->iwarp_cmt = ctx->val.vbool;
+
+ return 0;
+}
+
+static const struct devlink_param qed_devlink_params[] = {
+ DEVLINK_PARAM_DRIVER(QED_DEVLINK_PARAM_ID_IWARP_CMT,
+ "iwarp_cmt", DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ qed_dl_param_get, qed_dl_param_set, NULL),
+};
+
+static int qed_devlink_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct qed_devlink *qed_dl = devlink_priv(devlink);
+ struct qed_dev *cdev = qed_dl->cdev;
+ struct qed_dev_info *dev_info;
+ char buf[100];
+ int err;
+
+ dev_info = &cdev->common_dev_info;
+
+ err = devlink_info_driver_name_put(req, KBUILD_MODNAME);
+ if (err)
+ return err;
+
+ memcpy(buf, cdev->hwfns[0].hw_info.part_num, sizeof(cdev->hwfns[0].hw_info.part_num));
+ buf[sizeof(cdev->hwfns[0].hw_info.part_num)] = 0;
+
+ if (buf[0]) {
+ err = devlink_info_board_serial_number_put(req, buf);
+ if (err)
+ return err;
+ }
+
+ snprintf(buf, sizeof(buf), "%d.%d.%d.%d",
+ GET_MFW_FIELD(dev_info->mfw_rev, QED_MFW_VERSION_3),
+ GET_MFW_FIELD(dev_info->mfw_rev, QED_MFW_VERSION_2),
+ GET_MFW_FIELD(dev_info->mfw_rev, QED_MFW_VERSION_1),
+ GET_MFW_FIELD(dev_info->mfw_rev, QED_MFW_VERSION_0));
+
+ err = devlink_info_version_stored_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, buf);
+ if (err)
+ return err;
+
+ snprintf(buf, sizeof(buf), "%d.%d.%d.%d",
+ dev_info->fw_major,
+ dev_info->fw_minor,
+ dev_info->fw_rev,
+ dev_info->fw_eng);
+
+ return devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_APP, buf);
+}
+
+static const struct devlink_ops qed_dl_ops = {
+ .info_get = qed_devlink_info_get,
+};
+
+struct devlink *qed_devlink_register(struct qed_dev *cdev)
+{
+ union devlink_param_value value;
+ struct qed_devlink *qdevlink;
+ struct devlink *dl;
+ int rc;
+
+ dl = devlink_alloc(&qed_dl_ops, sizeof(struct qed_devlink));
+ if (!dl)
+ return ERR_PTR(-ENOMEM);
+
+ qdevlink = devlink_priv(dl);
+ qdevlink->cdev = cdev;
+
+ rc = devlink_register(dl, &cdev->pdev->dev);
+ if (rc)
+ goto err_free;
+
+ rc = devlink_params_register(dl, qed_devlink_params,
+ ARRAY_SIZE(qed_devlink_params));
+ if (rc)
+ goto err_unregister;
+
+ value.vbool = false;
+ devlink_param_driverinit_value_set(dl,
+ QED_DEVLINK_PARAM_ID_IWARP_CMT,
+ value);
+
+ devlink_params_publish(dl);
+ cdev->iwarp_cmt = false;
+
+ qed_fw_reporters_create(dl);
+
+ return dl;
+
+err_unregister:
+ devlink_unregister(dl);
+
+err_free:
+ devlink_free(dl);
+
+ return ERR_PTR(rc);
+}
+
+void qed_devlink_unregister(struct devlink *devlink)
+{
+ if (!devlink)
+ return;
+
+ qed_fw_reporters_destroy(devlink);
+
+ devlink_params_unregister(devlink, qed_devlink_params,
+ ARRAY_SIZE(qed_devlink_params));
+
+ devlink_unregister(devlink);
+ devlink_free(devlink);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_devlink.h b/drivers/net/ethernet/qlogic/qed/qed_devlink.h
new file mode 100644
index 000000000000..ccc7d1d1bfd4
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_devlink.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Marvell/Qlogic FastLinQ NIC driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+#ifndef _QED_DEVLINK_H
+#define _QED_DEVLINK_H
+
+#include <linux/qed/qed_if.h>
+#include <net/devlink.h>
+
+struct devlink *qed_devlink_register(struct qed_dev *cdev);
+void qed_devlink_unregister(struct devlink *devlink);
+
+void qed_fw_reporters_create(struct devlink *devlink);
+void qed_fw_reporters_destroy(struct devlink *devlink);
+
+int qed_report_fatal_error(struct devlink *dl, enum qed_hw_err_type err_type);
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index f39f629242a1..5b149ceff6b6 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -39,6 +39,7 @@
#include "qed_hw.h"
#include "qed_selftest.h"
#include "qed_debug.h"
+#include "qed_devlink.h"
#define QED_ROCE_QPS (8192)
#define QED_ROCE_DPIS (8)
@@ -478,6 +479,7 @@ int qed_fill_dev_info(struct qed_dev *cdev,
}
dev_info->mtu = hw_info->mtu;
+ cdev->common_dev_info = *dev_info;
return 0;
}
@@ -510,107 +512,6 @@ static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state)
return 0;
}
-struct qed_devlink {
- struct qed_dev *cdev;
-};
-
-enum qed_devlink_param_id {
- QED_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
- QED_DEVLINK_PARAM_ID_IWARP_CMT,
-};
-
-static int qed_dl_param_get(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
-{
- struct qed_devlink *qed_dl;
- struct qed_dev *cdev;
-
- qed_dl = devlink_priv(dl);
- cdev = qed_dl->cdev;
- ctx->val.vbool = cdev->iwarp_cmt;
-
- return 0;
-}
-
-static int qed_dl_param_set(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
-{
- struct qed_devlink *qed_dl;
- struct qed_dev *cdev;
-
- qed_dl = devlink_priv(dl);
- cdev = qed_dl->cdev;
- cdev->iwarp_cmt = ctx->val.vbool;
-
- return 0;
-}
-
-static const struct devlink_param qed_devlink_params[] = {
- DEVLINK_PARAM_DRIVER(QED_DEVLINK_PARAM_ID_IWARP_CMT,
- "iwarp_cmt", DEVLINK_PARAM_TYPE_BOOL,
- BIT(DEVLINK_PARAM_CMODE_RUNTIME),
- qed_dl_param_get, qed_dl_param_set, NULL),
-};
-
-static const struct devlink_ops qed_dl_ops;
-
-static int qed_devlink_register(struct qed_dev *cdev)
-{
- union devlink_param_value value;
- struct qed_devlink *qed_dl;
- struct devlink *dl;
- int rc;
-
- dl = devlink_alloc(&qed_dl_ops, sizeof(*qed_dl));
- if (!dl)
- return -ENOMEM;
-
- qed_dl = devlink_priv(dl);
-
- cdev->dl = dl;
- qed_dl->cdev = cdev;
-
- rc = devlink_register(dl, &cdev->pdev->dev);
- if (rc)
- goto err_free;
-
- rc = devlink_params_register(dl, qed_devlink_params,
- ARRAY_SIZE(qed_devlink_params));
- if (rc)
- goto err_unregister;
-
- value.vbool = false;
- devlink_param_driverinit_value_set(dl,
- QED_DEVLINK_PARAM_ID_IWARP_CMT,
- value);
-
- devlink_params_publish(dl);
- cdev->iwarp_cmt = false;
-
- return 0;
-
-err_unregister:
- devlink_unregister(dl);
-
-err_free:
- cdev->dl = NULL;
- devlink_free(dl);
-
- return rc;
-}
-
-static void qed_devlink_unregister(struct qed_dev *cdev)
-{
- if (!cdev->dl)
- return;
-
- devlink_params_unregister(cdev->dl, qed_devlink_params,
- ARRAY_SIZE(qed_devlink_params));
-
- devlink_unregister(cdev->dl);
- devlink_free(cdev->dl);
-}
-
/* probing */
static struct qed_dev *qed_probe(struct pci_dev *pdev,
struct qed_probe_params *params)
@@ -639,12 +540,6 @@ static struct qed_dev *qed_probe(struct pci_dev *pdev,
}
DP_INFO(cdev, "PCI init completed successfully\n");
- rc = qed_devlink_register(cdev);
- if (rc) {
- DP_INFO(cdev, "Failed to register devlink.\n");
- goto err2;
- }
-
rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT);
if (rc) {
DP_ERR(cdev, "hw prepare failed\n");
@@ -674,8 +569,6 @@ static void qed_remove(struct qed_dev *cdev)
qed_set_power_state(cdev, PCI_D3hot);
- qed_devlink_unregister(cdev);
-
qed_free_cdev(cdev);
}
@@ -2924,7 +2817,7 @@ static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
return status;
}
-static int qed_recovery_process(struct qed_dev *cdev)
+int qed_recovery_process(struct qed_dev *cdev)
{
struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
struct qed_ptt *p_ptt;
@@ -3112,6 +3005,9 @@ const struct qed_common_ops qed_common_ops_pass = {
.get_link = &qed_get_current_link,
.drain = &qed_drain,
.update_msglvl = &qed_init_dp,
+ .devlink_register = qed_devlink_register,
+ .devlink_unregister = qed_devlink_unregister,
+ .report_fatal_error = qed_report_fatal_error,
.dbg_all_data = &qed_dbg_all_data,
.dbg_all_data_size = &qed_dbg_all_data_size,
.chain_alloc = &qed_chain_alloc,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index a4bcde522cdf..4394a4d77224 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -1151,7 +1151,6 @@ qed_rdma_destroy_cq(void *rdma_cxt,
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid);
p_ramrod_res =
- (struct rdma_destroy_cq_output_params *)
dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(struct rdma_destroy_cq_output_params),
&ramrod_res_phys, GFP_KERNEL);
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 803c1fcca8ad..3efc5899f656 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -172,6 +172,7 @@ struct qede_dev {
struct qed_dev *cdev;
struct net_device *ndev;
struct pci_dev *pdev;
+ struct devlink *devlink;
u32 dp_module;
u8 dp_level;
@@ -263,6 +264,7 @@ struct qede_dev {
struct bpf_prog *xdp_prog;
+ enum qed_hw_err_type last_err_type;
unsigned long err_flags;
#define QEDE_ERR_IS_HANDLED 31
#define QEDE_ERR_ATTN_CLR_EN 0
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 140a392a81bb..20d2296beb79 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -1170,10 +1170,23 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
rc = -ENOMEM;
goto err2;
}
+
+ edev->devlink = qed_ops->common->devlink_register(cdev);
+ if (IS_ERR(edev->devlink)) {
+ DP_NOTICE(edev, "Cannot register devlink\n");
+ edev->devlink = NULL;
+ /* Go on, we can live without devlink */
+ }
} else {
struct net_device *ndev = pci_get_drvdata(pdev);
edev = netdev_priv(ndev);
+
+ if (edev->devlink) {
+ struct qed_devlink *qdl = devlink_priv(edev->devlink);
+
+ qdl->cdev = cdev;
+ }
edev->cdev = cdev;
memset(&edev->stats, 0, sizeof(edev->stats));
memcpy(&edev->dev_info, &dev_info, sizeof(dev_info));
@@ -1225,7 +1238,10 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
err4:
qede_rdma_dev_remove(edev, (mode == QEDE_PROBE_RECOVERY));
err3:
- free_netdev(edev->ndev);
+ if (mode != QEDE_PROBE_RECOVERY)
+ free_netdev(edev->ndev);
+ else
+ edev->cdev = NULL;
err2:
qed_ops->common->slowpath_stop(cdev);
err1:
@@ -1296,6 +1312,11 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
qed_ops->common->slowpath_stop(cdev);
if (system_state == SYSTEM_POWER_OFF)
return;
+
+ if (mode != QEDE_REMOVE_RECOVERY && edev->devlink) {
+ qed_ops->common->devlink_unregister(edev->devlink);
+ edev->devlink = NULL;
+ }
qed_ops->common->remove(cdev);
edev->cdev = NULL;
@@ -2455,7 +2476,8 @@ static int qede_close(struct net_device *ndev)
qede_unload(edev, QEDE_UNLOAD_NORMAL, false);
- edev->ops->common->update_drv_state(edev->cdev, false);
+ if (edev->cdev)
+ edev->ops->common->update_drv_state(edev->cdev, false);
return 0;
}
@@ -2577,19 +2599,12 @@ static void qede_atomic_hw_err_handler(struct qede_dev *edev)
static void qede_generic_hw_err_handler(struct qede_dev *edev)
{
- struct qed_dev *cdev = edev->cdev;
-
DP_NOTICE(edev,
"Generic sleepable HW error handling started - err_flags 0x%lx\n",
edev->err_flags);
- /* Trigger a recovery process.
- * This is placed in the sleep requiring section just to make
- * sure it is the last one, and that all the other operations
- * were completed.
- */
- if (test_bit(QEDE_ERR_IS_RECOVERABLE, &edev->err_flags))
- edev->ops->common->recovery_process(cdev);
+ if (edev->devlink)
+ edev->ops->common->report_fatal_error(edev->devlink, edev->last_err_type);
clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags);
@@ -2643,6 +2658,7 @@ static void qede_schedule_hw_err_handler(void *dev,
return;
}
+ edev->last_err_type = err_type;
qede_set_hw_err_flags(edev, err_type);
qede_atomic_hw_err_handler(edev);
set_bit(QEDE_SP_HW_ERR, &edev->sp_flags);
diff --git a/drivers/net/ethernet/qualcomm/qca_uart.c b/drivers/net/ethernet/qualcomm/qca_uart.c
index 375a844cd27c..362b4f5c162c 100644
--- a/drivers/net/ethernet/qualcomm/qca_uart.c
+++ b/drivers/net/ethernet/qualcomm/qca_uart.c
@@ -167,7 +167,7 @@ static void qca_tty_wakeup(struct serdev_device *serdev)
schedule_work(&qca->tx_work);
}
-static struct serdev_device_ops qca_serdev_ops = {
+static const struct serdev_device_ops qca_serdev_ops = {
.receive_buf = qca_tty_receive,
.write_wakeup = qca_tty_wakeup,
};
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index fc9e6626db55..9e4e6a883877 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -617,7 +617,6 @@ struct rtl8169_private {
struct work_struct work;
} wk;
- unsigned irq_enabled:1;
unsigned supports_gmii:1;
unsigned aspm_manageable:1;
dma_addr_t counters_phys_addr;
@@ -1280,12 +1279,10 @@ static void rtl_irq_disable(struct rtl8169_private *tp)
RTL_W32(tp, IntrMask_8125, 0);
else
RTL_W16(tp, IntrMask, 0);
- tp->irq_enabled = 0;
}
static void rtl_irq_enable(struct rtl8169_private *tp)
{
- tp->irq_enabled = 1;
if (rtl_is_8125(tp))
RTL_W32(tp, IntrMask_8125, tp->irq_mask);
else
@@ -4541,8 +4538,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
struct rtl8169_private *tp = dev_instance;
u32 status = rtl_get_events(tp);
- if (!tp->irq_enabled || (status & 0xffff) == 0xffff ||
- !(status & tp->irq_mask))
+ if ((status & 0xffff) == 0xffff || !(status & tp->irq_mask))
return IRQ_NONE;
if (unlikely(status & SYSErr)) {
@@ -4596,10 +4592,8 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
rtl_tx(dev, tp, budget);
- if (work_done < budget) {
- napi_complete_done(napi, work_done);
+ if (work_done < budget && napi_complete_done(napi, work_done))
rtl_irq_enable(tp);
- }
return work_done;
}
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index df89d09b253e..f684296df871 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -162,7 +162,7 @@ static int ravb_get_mdio_data(struct mdiobb_ctrl *ctrl)
}
/* MDIO bus control struct */
-static struct mdiobb_ops bb_ops = {
+static const struct mdiobb_ops bb_ops = {
.owner = THIS_MODULE,
.set_mdc = ravb_set_mdc,
.set_mdio_dir = ravb_set_mdio_dir,
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index f45331ed90b0..586642c33d2b 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1202,7 +1202,7 @@ static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
}
/* mdio bus control struct */
-static struct mdiobb_ops bb_ops = {
+static const struct mdiobb_ops bb_ops = {
.owner = THIS_MODULE,
.set_mdc = sh_mdc_ctrl,
.set_mdio_dir = sh_mmd_ctrl,
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 4b0b2cf026a5..0b4bcac53f18 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -3955,7 +3955,6 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.start_stats = efx_port_dummy_op_void,
.pull_stats = efx_port_dummy_op_void,
.stop_stats = efx_port_dummy_op_void,
- .set_id_led = efx_mcdi_set_id_led,
.push_irq_moderation = efx_ef10_push_irq_moderation,
.reconfigure_mac = efx_ef10_mac_reconfigure,
.check_mac_fault = efx_mcdi_mac_check_fault,
@@ -4066,7 +4065,6 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.start_stats = efx_mcdi_mac_start_stats,
.pull_stats = efx_mcdi_mac_pull_stats,
.stop_stats = efx_mcdi_mac_stop_stats,
- .set_id_led = efx_mcdi_set_id_led,
.push_irq_moderation = efx_ef10_push_irq_moderation,
.reconfigure_mac = efx_ef10_mac_reconfigure,
.check_mac_fault = efx_mcdi_mac_check_fault,
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 4ffda7782f68..12a91c559aa2 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -50,8 +50,7 @@ static int efx_ethtool_phys_id(struct net_device *net_dev,
return 1; /* cycle on/off once per second */
}
- efx->type->set_id_led(efx, mode);
- return 0;
+ return efx_mcdi_set_id_led(efx, mode);
}
static int efx_ethtool_get_regs_len(struct net_device *net_dev)
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index 4002f9a3ae90..a48a931ad0e8 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -863,13 +863,8 @@ static u16 efx_farch_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
bool rx_ev_frm_trunc, rx_ev_tobe_disc;
bool rx_ev_other_err, rx_ev_pause_frm;
- bool rx_ev_hdr_type, rx_ev_mcast_pkt;
- unsigned rx_ev_pkt_type;
- rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
- rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
- rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
@@ -918,6 +913,8 @@ static u16 efx_farch_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
rx_ev_pause_frm ? " [PAUSE]" : "");
}
+#else
+ (void) rx_ev_other_err;
#endif
if (efx->net_dev->features & NETIF_F_RXALL)
@@ -2592,7 +2589,6 @@ int efx_farch_filter_remove_safe(struct efx_nic *efx,
enum efx_farch_filter_table_id table_id;
struct efx_farch_filter_table *table;
unsigned int filter_idx;
- struct efx_farch_filter_spec *spec;
int rc;
table_id = efx_farch_filter_id_table_id(filter_id);
@@ -2604,7 +2600,6 @@ int efx_farch_filter_remove_safe(struct efx_nic *efx,
if (filter_idx >= table->size)
return -ENOENT;
down_write(&state->lock);
- spec = &table->spec[filter_idx];
rc = efx_farch_filter_remove(efx, table, filter_idx, priority);
up_write(&state->lock);
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 5467819aef6e..be6bfd6b7ec7 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -1868,10 +1868,9 @@ int efx_mcdi_handle_assertion(struct efx_nic *efx)
return efx_mcdi_exit_assertion(efx);
}
-void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
+int efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_ID_LED_IN_LEN);
- int rc;
BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF);
BUILD_BUG_ON(EFX_LED_ON != MC_CMD_LED_ON);
@@ -1881,8 +1880,7 @@ void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode);
- rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf),
- NULL, 0, NULL);
+ return efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf), NULL, 0, NULL);
}
static int efx_mcdi_reset_func(struct efx_nic *efx)
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 658cf345420d..8aed65018964 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -348,7 +348,7 @@ int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
int efx_new_mcdi_nvram_test_all(struct efx_nic *efx);
int efx_mcdi_nvram_test_all(struct efx_nic *efx);
int efx_mcdi_handle_assertion(struct efx_nic *efx);
-void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
+int efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac,
int *id_out);
int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out);
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 062462a13847..338ebb0402be 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -1217,7 +1217,6 @@ struct efx_udp_tunnel {
* @start_stats: Start the regular fetching of statistics
* @pull_stats: Pull stats from the NIC and wait until they arrive.
* @stop_stats: Stop the regular fetching of statistics
- * @set_id_led: Set state of identifying LED or revert to automatic function
* @push_irq_moderation: Apply interrupt moderation value
* @reconfigure_port: Push loopback/power/txdis changes to the MAC and PHY
* @prepare_enable_fc_tx: Prepare MAC to enable pause frame TX (may be %NULL)
@@ -1362,7 +1361,6 @@ struct efx_nic_type {
void (*start_stats)(struct efx_nic *efx);
void (*pull_stats)(struct efx_nic *efx);
void (*stop_stats)(struct efx_nic *efx);
- void (*set_id_led)(struct efx_nic *efx, enum efx_led_mode mode);
void (*push_irq_moderation)(struct efx_channel *channel);
int (*reconfigure_port)(struct efx_nic *efx);
void (*prepare_enable_fc_tx)(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index e71d6d37a317..34b9c7d50c4e 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -67,7 +67,7 @@ static const char *const efx_interrupt_mode_names[] = {
STRING_TABLE_LOOKUP(efx->interrupt_mode, efx_interrupt_mode)
/**
- * efx_loopback_state - persistent state during a loopback selftest
+ * struct efx_loopback_state - persistent state during a loopback selftest
* @flush: Drop all packets in efx_loopback_rx_packet
* @packet_count: Number of packets being used in this test
* @skbs: An array of skbs transmitted
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index a7ea630bb5e6..16347a6d0c47 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -994,7 +994,6 @@ const struct efx_nic_type siena_a0_nic_type = {
.start_stats = efx_mcdi_mac_start_stats,
.pull_stats = efx_mcdi_mac_pull_stats,
.stop_stats = efx_mcdi_mac_stop_stats,
- .set_id_led = efx_mcdi_set_id_led,
.push_irq_moderation = siena_push_irq_moderation,
.reconfigure_mac = siena_mac_reconfigure,
.check_mac_fault = efx_mcdi_mac_check_fault,
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index d950b312c418..51cd7dca91cd 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -374,13 +374,15 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
ep->mii.phy_id_mask = 0x1f;
ep->mii.reg_num_mask = 0x1f;
- ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
+ ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma,
+ GFP_KERNEL);
if (!ring_space)
goto err_out_iounmap;
ep->tx_ring = ring_space;
ep->tx_ring_dma = ring_dma;
- ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
+ ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma,
+ GFP_KERNEL);
if (!ring_space)
goto err_out_unmap_tx;
ep->rx_ring = ring_space;
@@ -493,9 +495,11 @@ out:
return ret;
err_out_unmap_rx:
- pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
+ dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, ep->rx_ring,
+ ep->rx_ring_dma);
err_out_unmap_tx:
- pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
+ dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, ep->tx_ring,
+ ep->tx_ring_dma);
err_out_iounmap:
pci_iounmap(pdev, ioaddr);
err_out_free_netdev:
@@ -918,8 +922,10 @@ static void epic_init_ring(struct net_device *dev)
if (skb == NULL)
break;
skb_reserve(skb, 2); /* 16 byte align the IP header. */
- ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev,
- skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ ep->rx_ring[i].bufaddr = dma_map_single(&ep->pci_dev->dev,
+ skb->data,
+ ep->rx_buf_sz,
+ DMA_FROM_DEVICE);
ep->rx_ring[i].rxstatus = DescOwn;
}
ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
@@ -955,8 +961,9 @@ static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
entry = ep->cur_tx % TX_RING_SIZE;
ep->tx_skbuff[entry] = skb;
- ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data,
- skb->len, PCI_DMA_TODEVICE);
+ ep->tx_ring[entry].bufaddr = dma_map_single(&ep->pci_dev->dev,
+ skb->data, skb->len,
+ DMA_TO_DEVICE);
if (free_count < TX_QUEUE_LEN/2) {/* Typical path */
ctrl_word = 0x100000; /* No interrupt */
} else if (free_count == TX_QUEUE_LEN/2) {
@@ -1036,8 +1043,9 @@ static void epic_tx(struct net_device *dev, struct epic_private *ep)
/* Free the original skb. */
skb = ep->tx_skbuff[entry];
- pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
- skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&ep->pci_dev->dev,
+ ep->tx_ring[entry].bufaddr, skb->len,
+ DMA_TO_DEVICE);
dev_consume_skb_irq(skb);
ep->tx_skbuff[entry] = NULL;
}
@@ -1178,20 +1186,21 @@ static int epic_rx(struct net_device *dev, int budget)
if (pkt_len < rx_copybreak &&
(skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
skb_reserve(skb, 2); /* 16 byte align the IP header */
- pci_dma_sync_single_for_cpu(ep->pci_dev,
- ep->rx_ring[entry].bufaddr,
- ep->rx_buf_sz,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&ep->pci_dev->dev,
+ ep->rx_ring[entry].bufaddr,
+ ep->rx_buf_sz,
+ DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, ep->rx_skbuff[entry]->data, pkt_len);
skb_put(skb, pkt_len);
- pci_dma_sync_single_for_device(ep->pci_dev,
- ep->rx_ring[entry].bufaddr,
- ep->rx_buf_sz,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&ep->pci_dev->dev,
+ ep->rx_ring[entry].bufaddr,
+ ep->rx_buf_sz,
+ DMA_FROM_DEVICE);
} else {
- pci_unmap_single(ep->pci_dev,
- ep->rx_ring[entry].bufaddr,
- ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&ep->pci_dev->dev,
+ ep->rx_ring[entry].bufaddr,
+ ep->rx_buf_sz,
+ DMA_FROM_DEVICE);
skb_put(skb = ep->rx_skbuff[entry], pkt_len);
ep->rx_skbuff[entry] = NULL;
}
@@ -1213,8 +1222,10 @@ static int epic_rx(struct net_device *dev, int budget)
if (skb == NULL)
break;
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
- ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev,
- skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ ep->rx_ring[entry].bufaddr = dma_map_single(&ep->pci_dev->dev,
+ skb->data,
+ ep->rx_buf_sz,
+ DMA_FROM_DEVICE);
work_done++;
}
/* AV: shouldn't we add a barrier here? */
@@ -1294,8 +1305,8 @@ static int epic_close(struct net_device *dev)
ep->rx_ring[i].rxstatus = 0; /* Not owned by Epic chip. */
ep->rx_ring[i].buflength = 0;
if (skb) {
- pci_unmap_single(pdev, ep->rx_ring[i].bufaddr,
- ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&pdev->dev, ep->rx_ring[i].bufaddr,
+ ep->rx_buf_sz, DMA_FROM_DEVICE);
dev_kfree_skb(skb);
}
ep->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */
@@ -1305,8 +1316,8 @@ static int epic_close(struct net_device *dev)
ep->tx_skbuff[i] = NULL;
if (!skb)
continue;
- pci_unmap_single(pdev, ep->tx_ring[i].bufaddr, skb->len,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&pdev->dev, ep->tx_ring[i].bufaddr, skb->len,
+ DMA_TO_DEVICE);
dev_kfree_skb(skb);
}
@@ -1502,8 +1513,10 @@ static void epic_remove_one(struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev);
struct epic_private *ep = netdev_priv(dev);
- pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
- pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
+ dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, ep->tx_ring,
+ ep->tx_ring_dma);
+ dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, ep->rx_ring,
+ ep->rx_ring_dma);
unregister_netdev(dev);
pci_iounmap(pdev, ep->ioaddr);
pci_release_regions(pdev);
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index 42bef04d65ba..c1dab009415d 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -497,8 +497,9 @@ static void smsc9420_free_tx_ring(struct smsc9420_pdata *pd)
if (skb) {
BUG_ON(!pd->tx_buffers[i].mapping);
- pci_unmap_single(pd->pdev, pd->tx_buffers[i].mapping,
- skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&pd->pdev->dev,
+ pd->tx_buffers[i].mapping, skb->len,
+ DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
}
@@ -530,8 +531,9 @@ static void smsc9420_free_rx_ring(struct smsc9420_pdata *pd)
dev_kfree_skb_any(pd->rx_buffers[i].skb);
if (pd->rx_buffers[i].mapping)
- pci_unmap_single(pd->pdev, pd->rx_buffers[i].mapping,
- PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&pd->pdev->dev,
+ pd->rx_buffers[i].mapping,
+ PKT_BUF_SZ, DMA_FROM_DEVICE);
pd->rx_ring[i].status = 0;
pd->rx_ring[i].length = 0;
@@ -749,8 +751,8 @@ static void smsc9420_rx_handoff(struct smsc9420_pdata *pd, const int index,
dev->stats.rx_packets++;
dev->stats.rx_bytes += packet_length;
- pci_unmap_single(pd->pdev, pd->rx_buffers[index].mapping,
- PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&pd->pdev->dev, pd->rx_buffers[index].mapping,
+ PKT_BUF_SZ, DMA_FROM_DEVICE);
pd->rx_buffers[index].mapping = 0;
skb = pd->rx_buffers[index].skb;
@@ -782,9 +784,9 @@ static int smsc9420_alloc_rx_buffer(struct smsc9420_pdata *pd, int index)
if (unlikely(!skb))
return -ENOMEM;
- mapping = pci_map_single(pd->pdev, skb_tail_pointer(skb),
- PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(pd->pdev, mapping)) {
+ mapping = dma_map_single(&pd->pdev->dev, skb_tail_pointer(skb),
+ PKT_BUF_SZ, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pd->pdev->dev, mapping)) {
dev_kfree_skb_any(skb);
netif_warn(pd, rx_err, pd->dev, "pci_map_single failed!\n");
return -ENOMEM;
@@ -901,8 +903,10 @@ static void smsc9420_complete_tx(struct net_device *dev)
BUG_ON(!pd->tx_buffers[index].skb);
BUG_ON(!pd->tx_buffers[index].mapping);
- pci_unmap_single(pd->pdev, pd->tx_buffers[index].mapping,
- pd->tx_buffers[index].skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&pd->pdev->dev,
+ pd->tx_buffers[index].mapping,
+ pd->tx_buffers[index].skb->len,
+ DMA_TO_DEVICE);
pd->tx_buffers[index].mapping = 0;
dev_kfree_skb_any(pd->tx_buffers[index].skb);
@@ -932,9 +936,9 @@ static netdev_tx_t smsc9420_hard_start_xmit(struct sk_buff *skb,
BUG_ON(pd->tx_buffers[index].skb);
BUG_ON(pd->tx_buffers[index].mapping);
- mapping = pci_map_single(pd->pdev, skb->data,
- skb->len, PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pd->pdev, mapping)) {
+ mapping = dma_map_single(&pd->pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pd->pdev->dev, mapping)) {
netif_warn(pd, tx_err, pd->dev,
"pci_map_single failed, dropping packet\n");
return NETDEV_TX_BUSY;
@@ -1522,7 +1526,7 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto out_free_netdev_2;
}
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
netdev_err(dev, "No usable DMA configuration, aborting\n");
goto out_free_regions_3;
}
@@ -1540,10 +1544,9 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pd = netdev_priv(dev);
/* pci descriptors are created in the PCI consistent area */
- pd->rx_ring = pci_alloc_consistent(pdev,
- sizeof(struct smsc9420_dma_desc) * RX_RING_SIZE +
- sizeof(struct smsc9420_dma_desc) * TX_RING_SIZE,
- &pd->rx_dma_addr);
+ pd->rx_ring = dma_alloc_coherent(&pdev->dev,
+ sizeof(struct smsc9420_dma_desc) * (RX_RING_SIZE + TX_RING_SIZE),
+ &pd->rx_dma_addr, GFP_KERNEL);
if (!pd->rx_ring)
goto out_free_io_4;
@@ -1599,8 +1602,9 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
out_free_dmadesc_5:
- pci_free_consistent(pdev, sizeof(struct smsc9420_dma_desc) *
- (RX_RING_SIZE + TX_RING_SIZE), pd->rx_ring, pd->rx_dma_addr);
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct smsc9420_dma_desc) * (RX_RING_SIZE + TX_RING_SIZE),
+ pd->rx_ring, pd->rx_dma_addr);
out_free_io_4:
iounmap(virt_addr - LAN9420_CPSR_ENDIAN_OFFSET);
out_free_regions_3:
@@ -1632,8 +1636,9 @@ static void smsc9420_remove(struct pci_dev *pdev)
BUG_ON(!pd->tx_ring);
BUG_ON(!pd->rx_ring);
- pci_free_consistent(pdev, sizeof(struct smsc9420_dma_desc) *
- (RX_RING_SIZE + TX_RING_SIZE), pd->rx_ring, pd->rx_dma_addr);
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct smsc9420_dma_desc) * (RX_RING_SIZE + TX_RING_SIZE),
+ pd->rx_ring, pd->rx_dma_addr);
iounmap(pd->ioaddr - LAN9420_CPSR_ENDIAN_OFFSET);
pci_release_regions(pdev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 9a47c5aec91a..53f14c5a9e02 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -3,7 +3,7 @@ config STMMAC_ETH
tristate "STMicroelectronics Multi-Gigabit Ethernet driver"
depends on HAS_IOMEM && HAS_DMA
select MII
- select MDIO_XPCS
+ select PCS_XPCS
select PAGE_POOL
select PHYLINK
select CRC32
@@ -209,6 +209,16 @@ config DWMAC_IMX8
device driver. This driver is used for i.MX8 series like
iMX8MP/iMX8DXL GMAC ethernet controller.
+config DWMAC_INTEL_PLAT
+ tristate "Intel dwmac support"
+ depends on OF && COMMON_CLK
+ depends on STMMAC_ETH
+ help
+ Support for ethernet controllers on Intel SoCs
+
+ This selects the Intel platform specific glue layer support for
+ the stmmac device driver. This driver is used for the Intel Keem Bay
+ SoC.
endif
config DWMAC_INTEL
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 295615ab36a7..24e6145d4eae 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_DWMAC_STM32) += dwmac-stm32.o
obj-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o
obj-$(CONFIG_DWMAC_SUN8I) += dwmac-sun8i.o
obj-$(CONFIG_DWMAC_DWC_QOS_ETH) += dwmac-dwc-qos-eth.o
+obj-$(CONFIG_DWMAC_INTEL_PLAT) += dwmac-intel-plat.o
obj-$(CONFIG_DWMAC_GENERIC) += dwmac-generic.o
obj-$(CONFIG_DWMAC_IMX8) += dwmac-imx.o
stmmac-platform-objs:= stmmac_platform.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 127f75862962..acc5e3fc1c2f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -15,7 +15,7 @@
#include <linux/netdevice.h>
#include <linux/stmmac.h>
#include <linux/phy.h>
-#include <linux/mdio-xpcs.h>
+#include <linux/pcs/pcs-xpcs.h>
#include <linux/module.h>
#if IS_ENABLED(CONFIG_VLAN_8021Q)
#define STMMAC_VLAN_TAG_USED
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
new file mode 100644
index 000000000000..ccac7bf2a9d3
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Intel DWMAC platform driver
+ *
+ * Copyright(C) 2020 Intel Corporation
+ */
+
+#include <linux/ethtool.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/stmmac.h>
+
+#include "stmmac.h"
+#include "stmmac_platform.h"
+
+struct intel_dwmac {
+ struct device *dev;
+ struct clk *tx_clk;
+ const struct intel_dwmac_data *data;
+};
+
+struct intel_dwmac_data {
+ void (*fix_mac_speed)(void *priv, unsigned int speed);
+ unsigned long ptp_ref_clk_rate;
+ unsigned long tx_clk_rate;
+ bool tx_clk_en;
+};
+
+static void kmb_eth_fix_mac_speed(void *priv, unsigned int speed)
+{
+ struct intel_dwmac *dwmac = priv;
+ unsigned long rate;
+ int ret;
+
+ rate = clk_get_rate(dwmac->tx_clk);
+
+ switch (speed) {
+ case SPEED_1000:
+ rate = 125000000;
+ break;
+
+ case SPEED_100:
+ rate = 25000000;
+ break;
+
+ case SPEED_10:
+ rate = 2500000;
+ break;
+
+ default:
+ dev_err(dwmac->dev, "Invalid speed\n");
+ break;
+ }
+
+ ret = clk_set_rate(dwmac->tx_clk, rate);
+ if (ret)
+ dev_err(dwmac->dev, "Failed to configure tx clock rate\n");
+}
+
+static const struct intel_dwmac_data kmb_data = {
+ .fix_mac_speed = kmb_eth_fix_mac_speed,
+ .ptp_ref_clk_rate = 200000000,
+ .tx_clk_rate = 125000000,
+ .tx_clk_en = true,
+};
+
+static const struct of_device_id intel_eth_plat_match[] = {
+ { .compatible = "intel,keembay-dwmac", .data = &kmb_data },
+ { }
+};
+MODULE_DEVICE_TABLE(of, intel_eth_plat_match);
+
+static int intel_eth_plat_probe(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ const struct of_device_id *match;
+ struct intel_dwmac *dwmac;
+ unsigned long rate;
+ int ret;
+
+ plat_dat = priv->plat;
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ if (IS_ERR(plat_dat)) {
+ dev_err(&pdev->dev, "dt configuration failed\n");
+ return PTR_ERR(plat_dat);
+ }
+
+ dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
+ if (!dwmac) {
+ ret = -ENOMEM;
+ goto err_remove_config_dt;
+ }
+
+ dwmac->dev = &pdev->dev;
+ dwmac->tx_clk = NULL;
+
+ match = of_match_device(intel_eth_plat_match, &pdev->dev);
+ if (match && match->data) {
+ dwmac->data = (const struct intel_dwmac_data *)match->data;
+
+ if (dwmac->data->fix_mac_speed)
+ plat_dat->fix_mac_speed = dwmac->data->fix_mac_speed;
+
+ /* Enable TX clock */
+ if (dwmac->data->tx_clk_en) {
+ dwmac->tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
+ if (IS_ERR(dwmac->tx_clk))
+ goto err_remove_config_dt;
+
+ clk_prepare_enable(dwmac->tx_clk);
+
+ /* Check and configure TX clock rate */
+ rate = clk_get_rate(dwmac->tx_clk);
+ if (dwmac->data->tx_clk_rate &&
+ rate != dwmac->data->tx_clk_rate) {
+ rate = dwmac->data->tx_clk_rate;
+ ret = clk_set_rate(dwmac->tx_clk, rate);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to set tx_clk\n");
+ return ret;
+ }
+ }
+ }
+
+ /* Check and configure PTP ref clock rate */
+ rate = clk_get_rate(plat_dat->clk_ptp_ref);
+ if (dwmac->data->ptp_ref_clk_rate &&
+ rate != dwmac->data->ptp_ref_clk_rate) {
+ rate = dwmac->data->ptp_ref_clk_rate;
+ ret = clk_set_rate(plat_dat->clk_ptp_ref, rate);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to set clk_ptp_ref\n");
+ return ret;
+ }
+ }
+ }
+
+ plat_dat->bsp_priv = dwmac;
+
+ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ if (ret) {
+ if (dwmac->tx_clk)
+ clk_disable_unprepare(dwmac->tx_clk);
+
+ goto err_remove_config_dt;
+ }
+
+ return 0;
+
+err_remove_config_dt:
+ stmmac_remove_config_dt(pdev, plat_dat);
+
+ return ret;
+}
+
+static int intel_eth_plat_remove(struct platform_device *pdev)
+{
+ struct intel_dwmac *dwmac = get_stmmac_bsp_priv(&pdev->dev);
+ int ret;
+
+ ret = stmmac_pltfr_remove(pdev);
+
+ if (dwmac->tx_clk)
+ clk_disable_unprepare(dwmac->tx_clk);
+
+ return ret;
+}
+
+static struct platform_driver intel_eth_plat_driver = {
+ .probe = intel_eth_plat_probe,
+ .remove = intel_eth_plat_remove,
+ .driver = {
+ .name = "intel-eth-plat",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = intel_eth_plat_match,
+ },
+};
+module_platform_driver(intel_eth_plat_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel DWMAC platform driver");
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 8deb943ca5de..58f142ee78a3 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -2965,9 +2965,8 @@ static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* It is guaranteed that the returned buffer will be at least
* PAGE_SIZE aligned.
*/
- gp->init_block = (struct gem_init_block *)
- dma_alloc_coherent(&pdev->dev, sizeof(struct gem_init_block),
- &gp->gblock_dvma, GFP_KERNEL);
+ gp->init_block = dma_alloc_coherent(&pdev->dev, sizeof(struct gem_init_block),
+ &gp->gblock_dvma, GFP_KERNEL);
if (!gp->init_block) {
pr_err("Cannot allocate init block, aborting\n");
err = -ENOMEM;
diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c
index c59a289e428c..75056c14b161 100644
--- a/drivers/net/ethernet/ti/am65-cpts.c
+++ b/drivers/net/ethernet/ti/am65-cpts.c
@@ -83,6 +83,8 @@ struct am65_cpts_regs {
#define AM65_CPTS_CONTROL_HW8_TS_PUSH_EN BIT(15)
#define AM65_CPTS_CONTROL_HW1_TS_PUSH_OFFSET (8)
+#define AM65_CPTS_CONTROL_TX_GENF_CLR_EN BIT(17)
+
#define AM65_CPTS_CONTROL_TS_SYNC_SEL_MASK (0xF)
#define AM65_CPTS_CONTROL_TS_SYNC_SEL_SHIFT (28)
@@ -748,42 +750,23 @@ EXPORT_SYMBOL_GPL(am65_cpts_rx_enable);
static int am65_skb_get_mtype_seqid(struct sk_buff *skb, u32 *mtype_seqid)
{
unsigned int ptp_class = ptp_classify_raw(skb);
- u8 *msgtype, *data = skb->data;
- unsigned int offset = 0;
- __be16 *seqid;
+ struct ptp_header *hdr;
+ u8 msgtype;
+ u16 seqid;
if (ptp_class == PTP_CLASS_NONE)
return 0;
- if (ptp_class & PTP_CLASS_VLAN)
- offset += VLAN_HLEN;
-
- switch (ptp_class & PTP_CLASS_PMASK) {
- case PTP_CLASS_IPV4:
- offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
- break;
- case PTP_CLASS_IPV6:
- offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
- break;
- case PTP_CLASS_L2:
- offset += ETH_HLEN;
- break;
- default:
- return 0;
- }
-
- if (skb->len + ETH_HLEN < offset + OFF_PTP_SEQUENCE_ID + sizeof(*seqid))
+ hdr = ptp_parse_header(skb, ptp_class);
+ if (!hdr)
return 0;
- if (unlikely(ptp_class & PTP_CLASS_V1))
- msgtype = data + offset + OFF_PTP_CONTROL;
- else
- msgtype = data + offset;
+ msgtype = ptp_get_msgtype(hdr, ptp_class);
+ seqid = ntohs(hdr->sequence_id);
- seqid = (__be16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
- *mtype_seqid = (*msgtype << AM65_CPTS_EVENT_1_MESSAGE_TYPE_SHIFT) &
+ *mtype_seqid = (msgtype << AM65_CPTS_EVENT_1_MESSAGE_TYPE_SHIFT) &
AM65_CPTS_EVENT_1_MESSAGE_TYPE_MASK;
- *mtype_seqid |= (ntohs(*seqid) & AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK);
+ *mtype_seqid |= (seqid & AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK);
return 1;
}
@@ -1005,7 +988,9 @@ struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
am65_cpts_set_add_val(cpts);
- am65_cpts_write32(cpts, AM65_CPTS_CONTROL_EN | AM65_CPTS_CONTROL_64MODE,
+ am65_cpts_write32(cpts, AM65_CPTS_CONTROL_EN |
+ AM65_CPTS_CONTROL_64MODE |
+ AM65_CPTS_CONTROL_TX_GENF_CLR_EN,
control);
am65_cpts_write32(cpts, AM65_CPTS_INT_ENABLE_TS_PEND_EN, int_enable);
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index 7c55d395de2c..d1fc7955d422 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -446,41 +446,22 @@ static const struct ptp_clock_info cpts_info = {
static int cpts_skb_get_mtype_seqid(struct sk_buff *skb, u32 *mtype_seqid)
{
unsigned int ptp_class = ptp_classify_raw(skb);
- u8 *msgtype, *data = skb->data;
- unsigned int offset = 0;
- u16 *seqid;
+ struct ptp_header *hdr;
+ u8 msgtype;
+ u16 seqid;
if (ptp_class == PTP_CLASS_NONE)
return 0;
- if (ptp_class & PTP_CLASS_VLAN)
- offset += VLAN_HLEN;
-
- switch (ptp_class & PTP_CLASS_PMASK) {
- case PTP_CLASS_IPV4:
- offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
- break;
- case PTP_CLASS_IPV6:
- offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
- break;
- case PTP_CLASS_L2:
- offset += ETH_HLEN;
- break;
- default:
- return 0;
- }
-
- if (skb->len + ETH_HLEN < offset + OFF_PTP_SEQUENCE_ID + sizeof(*seqid))
+ hdr = ptp_parse_header(skb, ptp_class);
+ if (!hdr)
return 0;
- if (unlikely(ptp_class & PTP_CLASS_V1))
- msgtype = data + offset + OFF_PTP_CONTROL;
- else
- msgtype = data + offset;
+ msgtype = ptp_get_msgtype(hdr, ptp_class);
+ seqid = ntohs(hdr->sequence_id);
- seqid = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
- *mtype_seqid = (*msgtype & MESSAGE_TYPE_MASK) << MESSAGE_TYPE_SHIFT;
- *mtype_seqid |= (ntohs(*seqid) & SEQUENCE_ID_MASK) << SEQUENCE_ID_SHIFT;
+ *mtype_seqid = (msgtype & MESSAGE_TYPE_MASK) << MESSAGE_TYPE_SHIFT;
+ *mtype_seqid |= (seqid & SEQUENCE_ID_MASK) << SEQUENCE_ID_SHIFT;
return 1;
}
@@ -528,6 +509,11 @@ void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb)
int ret;
u64 ns;
+ /* cpts_rx_timestamp() is called before eth_type_trans(), so
+ * skb MAC Hdr properties are not configured yet. Hence need to
+ * reset skb MAC header here
+ */
+ skb_reset_mac_header(skb);
ret = cpts_skb_get_mtype_seqid(skb, &skb_cb->skb_mtype_seqid);
if (!ret)
return;
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 8e47d0112e5d..611722eafed8 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -928,8 +928,8 @@ static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
}
}
-static int gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk,
- struct genl_info *info)
+static struct pdp_ctx *gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk,
+ struct genl_info *info)
{
struct pdp_ctx *pctx, *pctx_tid = NULL;
struct net_device *dev = gtp->dev;
@@ -956,12 +956,12 @@ static int gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk,
if (found) {
if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
- return -EEXIST;
+ return ERR_PTR(-EEXIST);
if (info->nlhdr->nlmsg_flags & NLM_F_REPLACE)
- return -EOPNOTSUPP;
+ return ERR_PTR(-EOPNOTSUPP);
if (pctx && pctx_tid)
- return -EEXIST;
+ return ERR_PTR(-EEXIST);
if (!pctx)
pctx = pctx_tid;
@@ -974,13 +974,13 @@ static int gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk,
netdev_dbg(dev, "GTPv1-U: update tunnel id = %x/%x (pdp %p)\n",
pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx);
- return 0;
+ return pctx;
}
pctx = kmalloc(sizeof(*pctx), GFP_ATOMIC);
if (pctx == NULL)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
sock_hold(sk);
pctx->sk = sk;
@@ -1018,7 +1018,7 @@ static int gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk,
break;
}
- return 0;
+ return pctx;
}
static void pdp_context_free(struct rcu_head *head)
@@ -1036,9 +1036,12 @@ static void pdp_context_delete(struct pdp_ctx *pctx)
call_rcu(&pctx->rcu_head, pdp_context_free);
}
+static int gtp_tunnel_notify(struct pdp_ctx *pctx, u8 cmd, gfp_t allocation);
+
static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
{
unsigned int version;
+ struct pdp_ctx *pctx;
struct gtp_dev *gtp;
struct sock *sk;
int err;
@@ -1068,7 +1071,6 @@ static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
}
rtnl_lock();
- rcu_read_lock();
gtp = gtp_find_dev(sock_net(skb->sk), info->attrs);
if (!gtp) {
@@ -1088,10 +1090,15 @@ static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
goto out_unlock;
}
- err = gtp_pdp_add(gtp, sk, info);
+ pctx = gtp_pdp_add(gtp, sk, info);
+ if (IS_ERR(pctx)) {
+ err = PTR_ERR(pctx);
+ } else {
+ gtp_tunnel_notify(pctx, GTP_CMD_NEWPDP, GFP_KERNEL);
+ err = 0;
+ }
out_unlock:
- rcu_read_unlock();
rtnl_unlock();
return err;
}
@@ -1159,6 +1166,7 @@ static int gtp_genl_del_pdp(struct sk_buff *skb, struct genl_info *info)
netdev_dbg(pctx->dev, "GTPv1-U: deleting tunnel id = %x/%x (pdp %p)\n",
pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx);
+ gtp_tunnel_notify(pctx, GTP_CMD_DELPDP, GFP_ATOMIC);
pdp_context_delete(pctx);
out_unlock:
@@ -1168,6 +1176,14 @@ out_unlock:
static struct genl_family gtp_genl_family;
+enum gtp_multicast_groups {
+ GTP_GENL_MCGRP,
+};
+
+static const struct genl_multicast_group gtp_genl_mcgrps[] = {
+ [GTP_GENL_MCGRP] = { .name = GTP_GENL_MCGRP_NAME },
+};
+
static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,
int flags, u32 type, struct pdp_ctx *pctx)
{
@@ -1205,6 +1221,26 @@ nla_put_failure:
return -EMSGSIZE;
}
+static int gtp_tunnel_notify(struct pdp_ctx *pctx, u8 cmd, gfp_t allocation)
+{
+ struct sk_buff *msg;
+ int ret;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, allocation);
+ if (!msg)
+ return -ENOMEM;
+
+ ret = gtp_genl_fill_info(msg, 0, 0, 0, cmd, pctx);
+ if (ret < 0) {
+ nlmsg_free(msg);
+ return ret;
+ }
+
+ ret = genlmsg_multicast_netns(&gtp_genl_family, dev_net(pctx->dev), msg,
+ 0, GTP_GENL_MCGRP, GFP_ATOMIC);
+ return ret;
+}
+
static int gtp_genl_get_pdp(struct sk_buff *skb, struct genl_info *info)
{
struct pdp_ctx *pctx = NULL;
@@ -1335,6 +1371,8 @@ static struct genl_family gtp_genl_family __ro_after_init = {
.module = THIS_MODULE,
.ops = gtp_genl_ops,
.n_ops = ARRAY_SIZE(gtp_genl_ops),
+ .mcgrps = gtp_genl_mcgrps,
+ .n_mcgrps = ARRAY_SIZE(gtp_genl_mcgrps),
};
static int __net_init gtp_net_init(struct net *net)
diff --git a/drivers/net/ipa/ipa.h b/drivers/net/ipa/ipa.h
index 55115cfb2972..407fee841a9a 100644
--- a/drivers/net/ipa/ipa.h
+++ b/drivers/net/ipa/ipa.h
@@ -10,7 +10,6 @@
#include <linux/device.h>
#include <linux/notifier.h>
#include <linux/pm_wakeup.h>
-#include <linux/notifier.h>
#include "ipa_version.h"
#include "gsi.h"
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 5bca94c99006..60b7d93bb834 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -684,6 +684,13 @@ static const struct nla_policy ipvlan_nl_policy[IFLA_IPVLAN_MAX + 1] =
[IFLA_IPVLAN_FLAGS] = { .type = NLA_U16 },
};
+static struct net *ipvlan_get_link_net(const struct net_device *dev)
+{
+ struct ipvl_dev *ipvlan = netdev_priv(dev);
+
+ return dev_net(ipvlan->phy_dev);
+}
+
static struct rtnl_link_ops ipvlan_link_ops = {
.kind = "ipvlan",
.priv_size = sizeof(struct ipvl_dev),
@@ -691,6 +698,7 @@ static struct rtnl_link_ops ipvlan_link_ops = {
.setup = ipvlan_link_setup,
.newlink = ipvlan_link_new,
.dellink = ipvlan_link_delete,
+ .get_link_net = ipvlan_get_link_net,
};
int ipvlan_link_register(struct rtnl_link_ops *ops)
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 9159846b8b93..124045cbcda3 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -1611,7 +1611,7 @@ static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = {
static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = {
[MACSEC_SA_ATTR_AN] = { .type = NLA_U8 },
[MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 },
- [MACSEC_SA_ATTR_PN] = { .type = NLA_MIN_LEN, .len = 4 },
+ [MACSEC_SA_ATTR_PN] = NLA_POLICY_MIN_LEN(4),
[MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY,
.len = MACSEC_KEYID_LEN, },
[MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY,
diff --git a/drivers/net/mdio/Kconfig b/drivers/net/mdio/Kconfig
new file mode 100644
index 000000000000..1299880dfe74
--- /dev/null
+++ b/drivers/net/mdio/Kconfig
@@ -0,0 +1,241 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# MDIO Layer Configuration
+#
+
+menuconfig MDIO_DEVICE
+ tristate "MDIO bus device drivers"
+ help
+ MDIO devices and driver infrastructure code.
+
+if MDIO_DEVICE
+
+config MDIO_BUS
+ tristate
+ default m if PHYLIB=m
+ default MDIO_DEVICE
+ help
+ This internal symbol is used for link time dependencies and it
+ reflects whether the mdio_bus/mdio_device code is built as a
+ loadable module or built-in.
+
+if MDIO_BUS
+
+config MDIO_DEVRES
+ tristate
+
+config MDIO_SUN4I
+ tristate "Allwinner sun4i MDIO interface support"
+ depends on ARCH_SUNXI || COMPILE_TEST
+ help
+ This driver supports the MDIO interface found in the network
+ interface units of the Allwinner SoC that have an EMAC (A10,
+ A12, A10s, etc.)
+
+config MDIO_XGENE
+ tristate "APM X-Gene SoC MDIO bus controller"
+ depends on ARCH_XGENE || COMPILE_TEST
+ help
+ This module provides a driver for the MDIO busses found in the
+ APM X-Gene SoC's.
+
+config MDIO_ASPEED
+ tristate "ASPEED MDIO bus controller"
+ depends on ARCH_ASPEED || COMPILE_TEST
+ depends on OF_MDIO && HAS_IOMEM
+ help
+ This module provides a driver for the independent MDIO bus
+ controllers found in the ASPEED AST2600 SoC. This is a driver for the
+ third revision of the ASPEED MDIO register interface - the first two
+ revisions are the "old" and "new" interfaces found in the AST2400 and
+ AST2500, embedded in the MAC. For legacy reasons, FTGMAC100 driver
+ continues to drive the embedded MDIO controller for the AST2400 and
+ AST2500 SoCs, so say N if AST2600 support is not required.
+
+config MDIO_BITBANG
+ tristate "Bitbanged MDIO buses"
+ help
+ This module implements the MDIO bus protocol in software,
+ for use by low level drivers that export the ability to
+ drive the relevant pins.
+
+ If in doubt, say N.
+
+config MDIO_BCM_IPROC
+ tristate "Broadcom iProc MDIO bus controller"
+ depends on ARCH_BCM_IPROC || COMPILE_TEST
+ depends on HAS_IOMEM && OF_MDIO
+ default ARCH_BCM_IPROC
+ help
+ This module provides a driver for the MDIO busses found in the
+ Broadcom iProc SoC's.
+
+config MDIO_BCM_UNIMAC
+ tristate "Broadcom UniMAC MDIO bus controller"
+ depends on HAS_IOMEM
+ help
+ This module provides a driver for the Broadcom UniMAC MDIO busses.
+ This hardware can be found in the Broadcom GENET Ethernet MAC
+ controllers as well as some Broadcom Ethernet switches such as the
+ Starfighter 2 switches.
+
+config MDIO_CAVIUM
+ tristate
+
+config MDIO_GPIO
+ tristate "GPIO lib-based bitbanged MDIO buses"
+ depends on MDIO_BITBANG
+ depends on GPIOLIB || COMPILE_TEST
+ help
+ Supports GPIO lib-based MDIO busses.
+
+ To compile this driver as a module, choose M here: the module
+ will be called mdio-gpio.
+
+config MDIO_HISI_FEMAC
+ tristate "Hisilicon FEMAC MDIO bus controller"
+ depends on HAS_IOMEM && OF_MDIO
+ help
+ This module provides a driver for the MDIO busses found in the
+ Hisilicon SoC that have an Fast Ethernet MAC.
+
+config MDIO_I2C
+ tristate
+ depends on I2C
+ help
+ Support I2C based PHYs. This provides a MDIO bus bridged
+ to I2C to allow PHYs connected in I2C mode to be accessed
+ using the existing infrastructure.
+
+ This is library mode.
+
+config MDIO_MVUSB
+ tristate "Marvell USB to MDIO Adapter"
+ depends on USB
+ select MDIO_DEVRES
+ help
+ A USB to MDIO converter present on development boards for
+ Marvell's Link Street family of Ethernet switches.
+
+config MDIO_MSCC_MIIM
+ tristate "Microsemi MIIM interface support"
+ depends on HAS_IOMEM
+ select MDIO_DEVRES
+ help
+ This driver supports the MIIM (MDIO) interface found in the network
+ switches of the Microsemi SoCs; it is recommended to switch on
+ CONFIG_HIGH_RES_TIMERS
+
+config MDIO_MOXART
+ tristate "MOXA ART MDIO interface support"
+ depends on ARCH_MOXART || COMPILE_TEST
+ help
+ This driver supports the MDIO interface found in the network
+ interface units of the MOXA ART SoC
+
+config MDIO_OCTEON
+ tristate "Octeon and some ThunderX SOCs MDIO buses"
+ depends on (64BIT && OF_MDIO) || COMPILE_TEST
+ depends on HAS_IOMEM
+ select MDIO_CAVIUM
+ help
+ This module provides a driver for the Octeon and ThunderX MDIO
+ buses. It is required by the Octeon and ThunderX ethernet device
+ drivers on some systems.
+
+config MDIO_IPQ4019
+ tristate "Qualcomm IPQ4019 MDIO interface support"
+ depends on HAS_IOMEM && OF_MDIO
+ help
+ This driver supports the MDIO interface found in Qualcomm
+ IPQ40xx series Soc-s.
+
+config MDIO_IPQ8064
+ tristate "Qualcomm IPQ8064 MDIO interface support"
+ depends on HAS_IOMEM && OF_MDIO
+ depends on MFD_SYSCON
+ help
+ This driver supports the MDIO interface found in the network
+ interface units of the IPQ8064 SoC
+
+config MDIO_THUNDER
+ tristate "ThunderX SOCs MDIO buses"
+ depends on 64BIT
+ depends on PCI
+ select MDIO_CAVIUM
+ help
+ This driver supports the MDIO interfaces found on Cavium
+ ThunderX SoCs when the MDIO bus device appears as a PCI
+ device.
+
+comment "MDIO Multiplexers"
+
+config MDIO_BUS_MUX
+ tristate
+ depends on OF_MDIO
+ help
+ This module provides a driver framework for MDIO bus
+ multiplexers which connect one of several child MDIO busses
+ to a parent bus. Switching between child busses is done by
+ device specific drivers.
+
+config MDIO_BUS_MUX_MESON_G12A
+ tristate "Amlogic G12a based MDIO bus multiplexer"
+ depends on ARCH_MESON || COMPILE_TEST
+ depends on OF_MDIO && HAS_IOMEM && COMMON_CLK
+ select MDIO_BUS_MUX
+ default m if ARCH_MESON
+ help
+ This module provides a driver for the MDIO multiplexer/glue of
+ the amlogic g12a SoC. The multiplexers connects either the external
+ or the internal MDIO bus to the parent bus.
+
+config MDIO_BUS_MUX_BCM_IPROC
+ tristate "Broadcom iProc based MDIO bus multiplexers"
+ depends on OF && OF_MDIO && (ARCH_BCM_IPROC || COMPILE_TEST)
+ select MDIO_BUS_MUX
+ default ARCH_BCM_IPROC
+ help
+ This module provides a driver for MDIO bus multiplexers found in
+ iProc based Broadcom SoCs. This multiplexer connects one of several
+ child MDIO bus to a parent bus. Buses could be internal as well as
+ external and selection logic lies inside the same multiplexer.
+
+config MDIO_BUS_MUX_GPIO
+ tristate "GPIO controlled MDIO bus multiplexers"
+ depends on OF_GPIO && OF_MDIO
+ select MDIO_BUS_MUX
+ help
+ This module provides a driver for MDIO bus multiplexers that
+ are controlled via GPIO lines. The multiplexer connects one of
+ several child MDIO busses to a parent bus. Child bus
+ selection is under the control of GPIO lines.
+
+config MDIO_BUS_MUX_MULTIPLEXER
+ tristate "MDIO bus multiplexer using kernel multiplexer subsystem"
+ depends on OF_MDIO
+ select MULTIPLEXER
+ select MDIO_BUS_MUX
+ help
+ This module provides a driver for MDIO bus multiplexer
+ that is controlled via the kernel multiplexer subsystem. The
+ bus multiplexer connects one of several child MDIO busses to
+ a parent bus. Child bus selection is under the control of
+ the kernel multiplexer subsystem.
+
+config MDIO_BUS_MUX_MMIOREG
+ tristate "MMIO device-controlled MDIO bus multiplexers"
+ depends on OF_MDIO && HAS_IOMEM
+ select MDIO_BUS_MUX
+ help
+ This module provides a driver for MDIO bus multiplexers that
+ are controlled via a simple memory-mapped device, like an FPGA.
+ The multiplexer connects one of several child MDIO busses to a
+ parent bus. Child bus selection is under the control of one of
+ the FPGA's registers.
+
+ Currently, only 8/16/32 bits registers are supported.
+
+
+endif
+endif
diff --git a/drivers/net/mdio/Makefile b/drivers/net/mdio/Makefile
new file mode 100644
index 000000000000..14d1beb633c9
--- /dev/null
+++ b/drivers/net/mdio/Makefile
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for Linux MDIO bus drivers
+
+obj-$(CONFIG_MDIO_ASPEED) += mdio-aspeed.o
+obj-$(CONFIG_MDIO_BCM_IPROC) += mdio-bcm-iproc.o
+obj-$(CONFIG_MDIO_BCM_UNIMAC) += mdio-bcm-unimac.o
+obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o
+obj-$(CONFIG_MDIO_CAVIUM) += mdio-cavium.o
+obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o
+obj-$(CONFIG_MDIO_HISI_FEMAC) += mdio-hisi-femac.o
+obj-$(CONFIG_MDIO_I2C) += mdio-i2c.o
+obj-$(CONFIG_MDIO_IPQ4019) += mdio-ipq4019.o
+obj-$(CONFIG_MDIO_IPQ8064) += mdio-ipq8064.o
+obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o
+obj-$(CONFIG_MDIO_MSCC_MIIM) += mdio-mscc-miim.o
+obj-$(CONFIG_MDIO_MVUSB) += mdio-mvusb.o
+obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o
+obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o
+obj-$(CONFIG_MDIO_THUNDER) += mdio-thunder.o
+obj-$(CONFIG_MDIO_XGENE) += mdio-xgene.o
+
+obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o
+obj-$(CONFIG_MDIO_BUS_MUX_BCM_IPROC) += mdio-mux-bcm-iproc.o
+obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o
+obj-$(CONFIG_MDIO_BUS_MUX_MESON_G12A) += mdio-mux-meson-g12a.o
+obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
+obj-$(CONFIG_MDIO_BUS_MUX_MULTIPLEXER) += mdio-mux-multiplexer.o
diff --git a/drivers/net/phy/mdio-aspeed.c b/drivers/net/mdio/mdio-aspeed.c
index cad820568f75..cad820568f75 100644
--- a/drivers/net/phy/mdio-aspeed.c
+++ b/drivers/net/mdio/mdio-aspeed.c
diff --git a/drivers/net/phy/mdio-bcm-iproc.c b/drivers/net/mdio/mdio-bcm-iproc.c
index 77fc970cdfde..77fc970cdfde 100644
--- a/drivers/net/phy/mdio-bcm-iproc.c
+++ b/drivers/net/mdio/mdio-bcm-iproc.c
diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/mdio/mdio-bcm-unimac.c
index fbd36891ee64..fbd36891ee64 100644
--- a/drivers/net/phy/mdio-bcm-unimac.c
+++ b/drivers/net/mdio/mdio-bcm-unimac.c
diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/mdio/mdio-bitbang.c
index 5136275c8e73..5136275c8e73 100644
--- a/drivers/net/phy/mdio-bitbang.c
+++ b/drivers/net/mdio/mdio-bitbang.c
diff --git a/drivers/net/phy/mdio-cavium.c b/drivers/net/mdio/mdio-cavium.c
index 1afd6fc1a351..1afd6fc1a351 100644
--- a/drivers/net/phy/mdio-cavium.c
+++ b/drivers/net/mdio/mdio-cavium.c
diff --git a/drivers/net/phy/mdio-cavium.h b/drivers/net/mdio/mdio-cavium.h
index a2245d436f5d..a2245d436f5d 100644
--- a/drivers/net/phy/mdio-cavium.h
+++ b/drivers/net/mdio/mdio-cavium.h
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/mdio/mdio-gpio.c
index 1b00235d7dc5..1b00235d7dc5 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/mdio/mdio-gpio.c
diff --git a/drivers/net/phy/mdio-hisi-femac.c b/drivers/net/mdio/mdio-hisi-femac.c
index f231c2fbb1de..f231c2fbb1de 100644
--- a/drivers/net/phy/mdio-hisi-femac.c
+++ b/drivers/net/mdio/mdio-hisi-femac.c
diff --git a/drivers/net/phy/mdio-i2c.c b/drivers/net/mdio/mdio-i2c.c
index 0746e2cc39ae..09200a70b315 100644
--- a/drivers/net/phy/mdio-i2c.c
+++ b/drivers/net/mdio/mdio-i2c.c
@@ -10,10 +10,9 @@
* of their settings.
*/
#include <linux/i2c.h>
+#include <linux/mdio/mdio-i2c.h>
#include <linux/phy.h>
-#include "mdio-i2c.h"
-
/*
* I2C bus addresses 0x50 and 0x51 are normally an EEPROM, which is
* specified to be present in SFP modules. These correspond with PHY
diff --git a/drivers/net/phy/mdio-ipq4019.c b/drivers/net/mdio/mdio-ipq4019.c
index 1ce81ff2f41d..1ce81ff2f41d 100644
--- a/drivers/net/phy/mdio-ipq4019.c
+++ b/drivers/net/mdio/mdio-ipq4019.c
diff --git a/drivers/net/phy/mdio-ipq8064.c b/drivers/net/mdio/mdio-ipq8064.c
index 1bd18857e1c5..1bd18857e1c5 100644
--- a/drivers/net/phy/mdio-ipq8064.c
+++ b/drivers/net/mdio/mdio-ipq8064.c
diff --git a/drivers/net/phy/mdio-moxart.c b/drivers/net/mdio/mdio-moxart.c
index b72c6d185175..b72c6d185175 100644
--- a/drivers/net/phy/mdio-moxart.c
+++ b/drivers/net/mdio/mdio-moxart.c
diff --git a/drivers/net/phy/mdio-mscc-miim.c b/drivers/net/mdio/mdio-mscc-miim.c
index 11f583fd4611..11f583fd4611 100644
--- a/drivers/net/phy/mdio-mscc-miim.c
+++ b/drivers/net/mdio/mdio-mscc-miim.c
diff --git a/drivers/net/phy/mdio-mux-bcm-iproc.c b/drivers/net/mdio/mdio-mux-bcm-iproc.c
index 42fb5f166136..42fb5f166136 100644
--- a/drivers/net/phy/mdio-mux-bcm-iproc.c
+++ b/drivers/net/mdio/mdio-mux-bcm-iproc.c
diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/mdio/mdio-mux-gpio.c
index 10a758fdc9e6..10a758fdc9e6 100644
--- a/drivers/net/phy/mdio-mux-gpio.c
+++ b/drivers/net/mdio/mdio-mux-gpio.c
diff --git a/drivers/net/phy/mdio-mux-meson-g12a.c b/drivers/net/mdio/mdio-mux-meson-g12a.c
index bf86c9c7a288..bf86c9c7a288 100644
--- a/drivers/net/phy/mdio-mux-meson-g12a.c
+++ b/drivers/net/mdio/mdio-mux-meson-g12a.c
diff --git a/drivers/net/phy/mdio-mux-mmioreg.c b/drivers/net/mdio/mdio-mux-mmioreg.c
index d1a8780e24d8..d1a8780e24d8 100644
--- a/drivers/net/phy/mdio-mux-mmioreg.c
+++ b/drivers/net/mdio/mdio-mux-mmioreg.c
diff --git a/drivers/net/phy/mdio-mux-multiplexer.c b/drivers/net/mdio/mdio-mux-multiplexer.c
index d6564381aa3e..d6564381aa3e 100644
--- a/drivers/net/phy/mdio-mux-multiplexer.c
+++ b/drivers/net/mdio/mdio-mux-multiplexer.c
diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/mdio/mdio-mux.c
index 6a1d3540210b..6a1d3540210b 100644
--- a/drivers/net/phy/mdio-mux.c
+++ b/drivers/net/mdio/mdio-mux.c
diff --git a/drivers/net/phy/mdio-mvusb.c b/drivers/net/mdio/mdio-mvusb.c
index d5eabddfdf51..d5eabddfdf51 100644
--- a/drivers/net/phy/mdio-mvusb.c
+++ b/drivers/net/mdio/mdio-mvusb.c
diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/mdio/mdio-octeon.c
index d1e1009d51af..d1e1009d51af 100644
--- a/drivers/net/phy/mdio-octeon.c
+++ b/drivers/net/mdio/mdio-octeon.c
diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/mdio/mdio-sun4i.c
index f798de3276dc..f798de3276dc 100644
--- a/drivers/net/phy/mdio-sun4i.c
+++ b/drivers/net/mdio/mdio-sun4i.c
diff --git a/drivers/net/phy/mdio-thunder.c b/drivers/net/mdio/mdio-thunder.c
index 3d7eda99d34e..3d7eda99d34e 100644
--- a/drivers/net/phy/mdio-thunder.c
+++ b/drivers/net/mdio/mdio-thunder.c
diff --git a/drivers/net/phy/mdio-xgene.c b/drivers/net/mdio/mdio-xgene.c
index 34990eaa3298..461207cdf5d6 100644
--- a/drivers/net/phy/mdio-xgene.c
+++ b/drivers/net/mdio/mdio-xgene.c
@@ -11,6 +11,7 @@
#include <linux/efi.h>
#include <linux/if_vlan.h>
#include <linux/io.h>
+#include <linux/mdio/mdio-xgene.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/of_net.h>
@@ -18,7 +19,6 @@
#include <linux/prefetch.h>
#include <linux/phy.h>
#include <net/ip.h>
-#include "mdio-xgene.h"
static bool xgene_mdio_status;
diff --git a/drivers/net/pcs/Kconfig b/drivers/net/pcs/Kconfig
new file mode 100644
index 000000000000..074fb3f5db18
--- /dev/null
+++ b/drivers/net/pcs/Kconfig
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# PCS Layer Configuration
+#
+
+menu "PCS device drivers"
+
+config PCS_XPCS
+ tristate "Synopsys DesignWare XPCS controller"
+ select MDIO_BUS
+ depends on MDIO_DEVICE
+ help
+ This module provides helper functions for Synopsys DesignWare XPCS
+ controllers.
+
+config PCS_LYNX
+ tristate
+ help
+ This module provides helpers to phylink for managing the Lynx PCS
+ which is part of the Layerscape and QorIQ Ethernet SERDES.
+
+endmenu
diff --git a/drivers/net/pcs/Makefile b/drivers/net/pcs/Makefile
new file mode 100644
index 000000000000..c23146755972
--- /dev/null
+++ b/drivers/net/pcs/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for Linux PCS drivers
+
+obj-$(CONFIG_PCS_XPCS) += pcs-xpcs.o
+obj-$(CONFIG_PCS_LYNX) += pcs-lynx.o
diff --git a/drivers/net/pcs/pcs-lynx.c b/drivers/net/pcs/pcs-lynx.c
new file mode 100644
index 000000000000..c43d97682083
--- /dev/null
+++ b/drivers/net/pcs/pcs-lynx.c
@@ -0,0 +1,312 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2020 NXP
+ * Lynx PCS MDIO helpers
+ */
+
+#include <linux/mdio.h>
+#include <linux/phylink.h>
+#include <linux/pcs-lynx.h>
+
+#define SGMII_CLOCK_PERIOD_NS 8 /* PCS is clocked at 125 MHz */
+#define LINK_TIMER_VAL(ns) ((u32)((ns) / SGMII_CLOCK_PERIOD_NS))
+
+#define SGMII_AN_LINK_TIMER_NS 1600000 /* defined by SGMII spec */
+
+#define LINK_TIMER_LO 0x12
+#define LINK_TIMER_HI 0x13
+#define IF_MODE 0x14
+#define IF_MODE_SGMII_EN BIT(0)
+#define IF_MODE_USE_SGMII_AN BIT(1)
+#define IF_MODE_SPEED(x) (((x) << 2) & GENMASK(3, 2))
+#define IF_MODE_SPEED_MSK GENMASK(3, 2)
+#define IF_MODE_HALF_DUPLEX BIT(4)
+
+enum sgmii_speed {
+ SGMII_SPEED_10 = 0,
+ SGMII_SPEED_100 = 1,
+ SGMII_SPEED_1000 = 2,
+ SGMII_SPEED_2500 = 2,
+};
+
+#define phylink_pcs_to_lynx(pl_pcs) container_of((pl_pcs), struct lynx_pcs, pcs)
+
+static void lynx_pcs_get_state_usxgmii(struct mdio_device *pcs,
+ struct phylink_link_state *state)
+{
+ struct mii_bus *bus = pcs->bus;
+ int addr = pcs->addr;
+ int status, lpa;
+
+ status = mdiobus_c45_read(bus, addr, MDIO_MMD_VEND2, MII_BMSR);
+ if (status < 0)
+ return;
+
+ state->link = !!(status & MDIO_STAT1_LSTATUS);
+ state->an_complete = !!(status & MDIO_AN_STAT1_COMPLETE);
+ if (!state->link || !state->an_complete)
+ return;
+
+ lpa = mdiobus_c45_read(bus, addr, MDIO_MMD_VEND2, MII_LPA);
+ if (lpa < 0)
+ return;
+
+ phylink_decode_usxgmii_word(state, lpa);
+}
+
+static void lynx_pcs_get_state_2500basex(struct mdio_device *pcs,
+ struct phylink_link_state *state)
+{
+ struct mii_bus *bus = pcs->bus;
+ int addr = pcs->addr;
+ int bmsr, lpa;
+
+ bmsr = mdiobus_read(bus, addr, MII_BMSR);
+ lpa = mdiobus_read(bus, addr, MII_LPA);
+ if (bmsr < 0 || lpa < 0) {
+ state->link = false;
+ return;
+ }
+
+ state->link = !!(bmsr & BMSR_LSTATUS);
+ state->an_complete = !!(bmsr & BMSR_ANEGCOMPLETE);
+ if (!state->link)
+ return;
+
+ state->speed = SPEED_2500;
+ state->pause |= MLO_PAUSE_TX | MLO_PAUSE_RX;
+ state->duplex = DUPLEX_FULL;
+}
+
+static void lynx_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
+{
+ struct lynx_pcs *lynx = phylink_pcs_to_lynx(pcs);
+
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ phylink_mii_c22_pcs_get_state(lynx->mdio, state);
+ break;
+ case PHY_INTERFACE_MODE_2500BASEX:
+ lynx_pcs_get_state_2500basex(lynx->mdio, state);
+ break;
+ case PHY_INTERFACE_MODE_USXGMII:
+ lynx_pcs_get_state_usxgmii(lynx->mdio, state);
+ break;
+ default:
+ break;
+ }
+
+ dev_dbg(&lynx->mdio->dev,
+ "mode=%s/%s/%s link=%u an_enabled=%u an_complete=%u\n",
+ phy_modes(state->interface),
+ phy_speed_to_str(state->speed),
+ phy_duplex_to_str(state->duplex),
+ state->link, state->an_enabled, state->an_complete);
+}
+
+static int lynx_pcs_config_sgmii(struct mdio_device *pcs, unsigned int mode,
+ const unsigned long *advertising)
+{
+ struct mii_bus *bus = pcs->bus;
+ int addr = pcs->addr;
+ u16 if_mode;
+ int err;
+
+ if_mode = IF_MODE_SGMII_EN;
+ if (mode == MLO_AN_INBAND) {
+ u32 link_timer;
+
+ if_mode |= IF_MODE_USE_SGMII_AN;
+
+ /* Adjust link timer for SGMII */
+ link_timer = LINK_TIMER_VAL(SGMII_AN_LINK_TIMER_NS);
+ mdiobus_write(bus, addr, LINK_TIMER_LO, link_timer & 0xffff);
+ mdiobus_write(bus, addr, LINK_TIMER_HI, link_timer >> 16);
+ }
+ err = mdiobus_modify(bus, addr, IF_MODE,
+ IF_MODE_SGMII_EN | IF_MODE_USE_SGMII_AN,
+ if_mode);
+ if (err)
+ return err;
+
+ return phylink_mii_c22_pcs_config(pcs, mode, PHY_INTERFACE_MODE_SGMII,
+ advertising);
+}
+
+static int lynx_pcs_config_usxgmii(struct mdio_device *pcs, unsigned int mode,
+ const unsigned long *advertising)
+{
+ struct mii_bus *bus = pcs->bus;
+ int addr = pcs->addr;
+
+ if (!phylink_autoneg_inband(mode)) {
+ dev_err(&pcs->dev, "USXGMII only supports in-band AN for now\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* Configure device ability for the USXGMII Replicator */
+ return mdiobus_c45_write(bus, addr, MDIO_MMD_VEND2, MII_ADVERTISE,
+ MDIO_USXGMII_10G | MDIO_USXGMII_LINK |
+ MDIO_USXGMII_FULL_DUPLEX |
+ ADVERTISE_SGMII | ADVERTISE_LPACK);
+}
+
+static int lynx_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t ifmode,
+ const unsigned long *advertising,
+ bool permit)
+{
+ struct lynx_pcs *lynx = phylink_pcs_to_lynx(pcs);
+
+ switch (ifmode) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ return lynx_pcs_config_sgmii(lynx->mdio, mode, advertising);
+ case PHY_INTERFACE_MODE_2500BASEX:
+ if (phylink_autoneg_inband(mode)) {
+ dev_err(&lynx->mdio->dev,
+ "AN not supported on 3.125GHz SerDes lane\n");
+ return -EOPNOTSUPP;
+ }
+ break;
+ case PHY_INTERFACE_MODE_USXGMII:
+ return lynx_pcs_config_usxgmii(lynx->mdio, mode, advertising);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static void lynx_pcs_link_up_sgmii(struct mdio_device *pcs, unsigned int mode,
+ int speed, int duplex)
+{
+ struct mii_bus *bus = pcs->bus;
+ u16 if_mode = 0, sgmii_speed;
+ int addr = pcs->addr;
+
+ /* The PCS needs to be configured manually only
+ * when not operating on in-band mode
+ */
+ if (mode == MLO_AN_INBAND)
+ return;
+
+ if (duplex == DUPLEX_HALF)
+ if_mode |= IF_MODE_HALF_DUPLEX;
+
+ switch (speed) {
+ case SPEED_1000:
+ sgmii_speed = SGMII_SPEED_1000;
+ break;
+ case SPEED_100:
+ sgmii_speed = SGMII_SPEED_100;
+ break;
+ case SPEED_10:
+ sgmii_speed = SGMII_SPEED_10;
+ break;
+ case SPEED_UNKNOWN:
+ /* Silently don't do anything */
+ return;
+ default:
+ dev_err(&pcs->dev, "Invalid PCS speed %d\n", speed);
+ return;
+ }
+ if_mode |= IF_MODE_SPEED(sgmii_speed);
+
+ mdiobus_modify(bus, addr, IF_MODE,
+ IF_MODE_HALF_DUPLEX | IF_MODE_SPEED_MSK,
+ if_mode);
+}
+
+/* 2500Base-X is SerDes protocol 7 on Felix and 6 on ENETC. It is a SerDes lane
+ * clocked at 3.125 GHz which encodes symbols with 8b/10b and does not have
+ * auto-negotiation of any link parameters. Electrically it is compatible with
+ * a single lane of XAUI.
+ * The hardware reference manual wants to call this mode SGMII, but it isn't
+ * really, since the fundamental features of SGMII:
+ * - Downgrading the link speed by duplicating symbols
+ * - Auto-negotiation
+ * are not there.
+ * The speed is configured at 1000 in the IF_MODE because the clock frequency
+ * is actually given by a PLL configured in the Reset Configuration Word (RCW).
+ * Since there is no difference between fixed speed SGMII w/o AN and 802.3z w/o
+ * AN, we call this PHY interface type 2500Base-X. In case a PHY negotiates a
+ * lower link speed on line side, the system-side interface remains fixed at
+ * 2500 Mbps and we do rate adaptation through pause frames.
+ */
+static void lynx_pcs_link_up_2500basex(struct mdio_device *pcs,
+ unsigned int mode,
+ int speed, int duplex)
+{
+ struct mii_bus *bus = pcs->bus;
+ int addr = pcs->addr;
+ u16 if_mode = 0;
+
+ if (mode == MLO_AN_INBAND) {
+ dev_err(&pcs->dev, "AN not supported for 2500BaseX\n");
+ return;
+ }
+
+ if (duplex == DUPLEX_HALF)
+ if_mode |= IF_MODE_HALF_DUPLEX;
+ if_mode |= IF_MODE_SPEED(SGMII_SPEED_2500);
+
+ mdiobus_modify(bus, addr, IF_MODE,
+ IF_MODE_HALF_DUPLEX | IF_MODE_SPEED_MSK,
+ if_mode);
+}
+
+static void lynx_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface,
+ int speed, int duplex)
+{
+ struct lynx_pcs *lynx = phylink_pcs_to_lynx(pcs);
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ lynx_pcs_link_up_sgmii(lynx->mdio, mode, speed, duplex);
+ break;
+ case PHY_INTERFACE_MODE_2500BASEX:
+ lynx_pcs_link_up_2500basex(lynx->mdio, mode, speed, duplex);
+ break;
+ case PHY_INTERFACE_MODE_USXGMII:
+ /* At the moment, only in-band AN is supported for USXGMII
+ * so nothing to do in link_up
+ */
+ break;
+ default:
+ break;
+ }
+}
+
+static const struct phylink_pcs_ops lynx_pcs_phylink_ops = {
+ .pcs_get_state = lynx_pcs_get_state,
+ .pcs_config = lynx_pcs_config,
+ .pcs_link_up = lynx_pcs_link_up,
+};
+
+struct lynx_pcs *lynx_pcs_create(struct mdio_device *mdio)
+{
+ struct lynx_pcs *lynx_pcs;
+
+ lynx_pcs = kzalloc(sizeof(*lynx_pcs), GFP_KERNEL);
+ if (!lynx_pcs)
+ return NULL;
+
+ lynx_pcs->mdio = mdio;
+ lynx_pcs->pcs.ops = &lynx_pcs_phylink_ops;
+ lynx_pcs->pcs.poll = true;
+
+ return lynx_pcs;
+}
+EXPORT_SYMBOL(lynx_pcs_create);
+
+void lynx_pcs_destroy(struct lynx_pcs *pcs)
+{
+ kfree(pcs);
+}
+EXPORT_SYMBOL(lynx_pcs_destroy);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/phy/mdio-xpcs.c b/drivers/net/pcs/pcs-xpcs.c
index 0d66a8ba7eb6..1aa9903d602e 100644
--- a/drivers/net/phy/mdio-xpcs.c
+++ b/drivers/net/pcs/pcs-xpcs.c
@@ -7,8 +7,8 @@
*/
#include <linux/delay.h>
+#include <linux/pcs/pcs-xpcs.h>
#include <linux/mdio.h>
-#include <linux/mdio-xpcs.h>
#include <linux/phylink.h>
#include <linux/workqueue.h>
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 726e4b240e7e..698bea312adc 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -3,246 +3,6 @@
# PHY Layer Configuration
#
-menuconfig MDIO_DEVICE
- tristate "MDIO bus device drivers"
- help
- MDIO devices and driver infrastructure code.
-
-if MDIO_DEVICE
-
-config MDIO_BUS
- tristate
- default m if PHYLIB=m
- default MDIO_DEVICE
- help
- This internal symbol is used for link time dependencies and it
- reflects whether the mdio_bus/mdio_device code is built as a
- loadable module or built-in.
-
-if MDIO_BUS
-
-config MDIO_DEVRES
- tristate
-
-config MDIO_ASPEED
- tristate "ASPEED MDIO bus controller"
- depends on ARCH_ASPEED || COMPILE_TEST
- depends on OF_MDIO && HAS_IOMEM
- help
- This module provides a driver for the independent MDIO bus
- controllers found in the ASPEED AST2600 SoC. This is a driver for the
- third revision of the ASPEED MDIO register interface - the first two
- revisions are the "old" and "new" interfaces found in the AST2400 and
- AST2500, embedded in the MAC. For legacy reasons, FTGMAC100 driver
- continues to drive the embedded MDIO controller for the AST2400 and
- AST2500 SoCs, so say N if AST2600 support is not required.
-
-config MDIO_BCM_IPROC
- tristate "Broadcom iProc MDIO bus controller"
- depends on ARCH_BCM_IPROC || COMPILE_TEST
- depends on HAS_IOMEM && OF_MDIO
- default ARCH_BCM_IPROC
- help
- This module provides a driver for the MDIO busses found in the
- Broadcom iProc SoC's.
-
-config MDIO_BCM_UNIMAC
- tristate "Broadcom UniMAC MDIO bus controller"
- depends on HAS_IOMEM
- help
- This module provides a driver for the Broadcom UniMAC MDIO busses.
- This hardware can be found in the Broadcom GENET Ethernet MAC
- controllers as well as some Broadcom Ethernet switches such as the
- Starfighter 2 switches.
-
-config MDIO_BITBANG
- tristate "Bitbanged MDIO buses"
- help
- This module implements the MDIO bus protocol in software,
- for use by low level drivers that export the ability to
- drive the relevant pins.
-
- If in doubt, say N.
-
-config MDIO_BUS_MUX
- tristate
- depends on OF_MDIO
- help
- This module provides a driver framework for MDIO bus
- multiplexers which connect one of several child MDIO busses
- to a parent bus. Switching between child busses is done by
- device specific drivers.
-
-config MDIO_BUS_MUX_BCM_IPROC
- tristate "Broadcom iProc based MDIO bus multiplexers"
- depends on OF && OF_MDIO && (ARCH_BCM_IPROC || COMPILE_TEST)
- select MDIO_BUS_MUX
- default ARCH_BCM_IPROC
- help
- This module provides a driver for MDIO bus multiplexers found in
- iProc based Broadcom SoCs. This multiplexer connects one of several
- child MDIO bus to a parent bus. Buses could be internal as well as
- external and selection logic lies inside the same multiplexer.
-
-config MDIO_BUS_MUX_GPIO
- tristate "GPIO controlled MDIO bus multiplexers"
- depends on OF_GPIO && OF_MDIO
- select MDIO_BUS_MUX
- help
- This module provides a driver for MDIO bus multiplexers that
- are controlled via GPIO lines. The multiplexer connects one of
- several child MDIO busses to a parent bus. Child bus
- selection is under the control of GPIO lines.
-
-config MDIO_BUS_MUX_MESON_G12A
- tristate "Amlogic G12a based MDIO bus multiplexer"
- depends on ARCH_MESON || COMPILE_TEST
- depends on OF_MDIO && HAS_IOMEM && COMMON_CLK
- select MDIO_BUS_MUX
- default m if ARCH_MESON
- help
- This module provides a driver for the MDIO multiplexer/glue of
- the amlogic g12a SoC. The multiplexers connects either the external
- or the internal MDIO bus to the parent bus.
-
-config MDIO_BUS_MUX_MMIOREG
- tristate "MMIO device-controlled MDIO bus multiplexers"
- depends on OF_MDIO && HAS_IOMEM
- select MDIO_BUS_MUX
- help
- This module provides a driver for MDIO bus multiplexers that
- are controlled via a simple memory-mapped device, like an FPGA.
- The multiplexer connects one of several child MDIO busses to a
- parent bus. Child bus selection is under the control of one of
- the FPGA's registers.
-
- Currently, only 8/16/32 bits registers are supported.
-
-config MDIO_BUS_MUX_MULTIPLEXER
- tristate "MDIO bus multiplexer using kernel multiplexer subsystem"
- depends on OF_MDIO
- select MULTIPLEXER
- select MDIO_BUS_MUX
- help
- This module provides a driver for MDIO bus multiplexer
- that is controlled via the kernel multiplexer subsystem. The
- bus multiplexer connects one of several child MDIO busses to
- a parent bus. Child bus selection is under the control of
- the kernel multiplexer subsystem.
-
-config MDIO_CAVIUM
- tristate
-
-config MDIO_GPIO
- tristate "GPIO lib-based bitbanged MDIO buses"
- depends on MDIO_BITBANG
- depends on GPIOLIB || COMPILE_TEST
- help
- Supports GPIO lib-based MDIO busses.
-
- To compile this driver as a module, choose M here: the module
- will be called mdio-gpio.
-
-config MDIO_HISI_FEMAC
- tristate "Hisilicon FEMAC MDIO bus controller"
- depends on HAS_IOMEM && OF_MDIO
- help
- This module provides a driver for the MDIO busses found in the
- Hisilicon SoC that have an Fast Ethernet MAC.
-
-config MDIO_I2C
- tristate
- depends on I2C
- help
- Support I2C based PHYs. This provides a MDIO bus bridged
- to I2C to allow PHYs connected in I2C mode to be accessed
- using the existing infrastructure.
-
- This is library mode.
-
-config MDIO_IPQ4019
- tristate "Qualcomm IPQ4019 MDIO interface support"
- depends on HAS_IOMEM && OF_MDIO
- help
- This driver supports the MDIO interface found in Qualcomm
- IPQ40xx series Soc-s.
-
-config MDIO_IPQ8064
- tristate "Qualcomm IPQ8064 MDIO interface support"
- depends on HAS_IOMEM && OF_MDIO
- depends on MFD_SYSCON
- help
- This driver supports the MDIO interface found in the network
- interface units of the IPQ8064 SoC
-
-config MDIO_MOXART
- tristate "MOXA ART MDIO interface support"
- depends on ARCH_MOXART || COMPILE_TEST
- help
- This driver supports the MDIO interface found in the network
- interface units of the MOXA ART SoC
-
-config MDIO_MSCC_MIIM
- tristate "Microsemi MIIM interface support"
- depends on HAS_IOMEM
- select MDIO_DEVRES
- help
- This driver supports the MIIM (MDIO) interface found in the network
- switches of the Microsemi SoCs; it is recommended to switch on
- CONFIG_HIGH_RES_TIMERS
-
-config MDIO_MVUSB
- tristate "Marvell USB to MDIO Adapter"
- depends on USB
- select MDIO_DEVRES
- help
- A USB to MDIO converter present on development boards for
- Marvell's Link Street family of Ethernet switches.
-
-config MDIO_OCTEON
- tristate "Octeon and some ThunderX SOCs MDIO buses"
- depends on (64BIT && OF_MDIO) || COMPILE_TEST
- depends on HAS_IOMEM
- select MDIO_CAVIUM
- help
- This module provides a driver for the Octeon and ThunderX MDIO
- buses. It is required by the Octeon and ThunderX ethernet device
- drivers on some systems.
-
-config MDIO_SUN4I
- tristate "Allwinner sun4i MDIO interface support"
- depends on ARCH_SUNXI || COMPILE_TEST
- help
- This driver supports the MDIO interface found in the network
- interface units of the Allwinner SoC that have an EMAC (A10,
- A12, A10s, etc.)
-
-config MDIO_THUNDER
- tristate "ThunderX SOCs MDIO buses"
- depends on 64BIT
- depends on PCI
- select MDIO_CAVIUM
- help
- This driver supports the MDIO interfaces found on Cavium
- ThunderX SoCs when the MDIO bus device appears as a PCI
- device.
-
-config MDIO_XGENE
- tristate "APM X-Gene SoC MDIO bus controller"
- depends on ARCH_XGENE || COMPILE_TEST
- help
- This module provides a driver for the MDIO busses found in the
- APM X-Gene SoC's.
-
-config MDIO_XPCS
- tristate "Synopsys DesignWare XPCS controller"
- help
- This module provides helper functions for Synopsys DesignWare XPCS
- controllers.
-
-endif
-endif
-
config PHYLINK
tristate
depends on NETDEVICES
@@ -285,7 +45,15 @@ config LED_TRIGGER_PHY
for any speed known to the PHY.
-comment "MII PHY device drivers"
+config FIXED_PHY
+ tristate "MDIO Bus/PHY emulation with fixed speed/link PHYs"
+ depends on PHYLIB
+ select SWPHY
+ help
+ Adds the platform "fixed" MDIO Bus to cover the boards that use
+ PHYs that are not connected to the real MDIO bus.
+
+ Currently tested with mpc866ads and mpc8349e-mitx.
config SFP
tristate "SFP cage support"
@@ -293,6 +61,19 @@ config SFP
depends on HWMON || HWMON=n
select MDIO_I2C
+comment "MII PHY device drivers"
+
+config AMD_PHY
+ tristate "AMD PHYs"
+ help
+ Currently supports the am79c874
+
+config MESON_GXL_PHY
+ tristate "Amlogic Meson GXL Internal PHY"
+ depends on ARCH_MESON || COMPILE_TEST
+ help
+ Currently has a driver for the Amlogic Meson GXL Internal PHY
+
config ADIN_PHY
tristate "Analog Devices Industrial Ethernet PHYs"
help
@@ -302,11 +83,6 @@ config ADIN_PHY
- ADIN1300 - Robust,Industrial, Low Latency 10/100/1000 Gigabit
Ethernet PHY
-config AMD_PHY
- tristate "AMD PHYs"
- help
- Currently supports the am79c874
-
config AQUANTIA_PHY
tristate "Aquantia PHYs"
help
@@ -318,6 +94,24 @@ config AX88796B_PHY
Currently supports the Asix Electronics PHY found in the X-Surf 100
AX88796B package.
+config BROADCOM_PHY
+ tristate "Broadcom 54XX PHYs"
+ select BCM_NET_PHYLIB
+ help
+ Currently supports the BCM5411, BCM5421, BCM5461, BCM54616S, BCM5464,
+ BCM5481, BCM54810 and BCM5482 PHYs.
+
+config BCM54140_PHY
+ tristate "Broadcom BCM54140 PHY"
+ depends on PHYLIB
+ depends on HWMON || HWMON=n
+ select BCM_NET_PHYLIB
+ help
+ Support the Broadcom BCM54140 Quad SGMII/QSGMII PHY.
+
+ This driver also supports the hardware monitoring of this PHY and
+ exposes voltage and temperature sensors.
+
config BCM63XX_PHY
tristate "Broadcom 63xx SOCs internal PHY"
depends on BCM63XX || COMPILE_TEST
@@ -332,6 +126,12 @@ config BCM7XXX_PHY
Currently supports the BCM7366, BCM7439, BCM7445, and
40nm and 65nm generation of BCM7xxx Set Top Box SoCs.
+config BCM84881_PHY
+ tristate "Broadcom BCM84881 PHY"
+ depends on PHYLIB
+ help
+ Support the Broadcom BCM84881 PHY.
+
config BCM87XX_PHY
tristate "Broadcom BCM8706 and BCM8727 PHYs"
help
@@ -353,30 +153,6 @@ config BCM_CYGNUS_PHY
config BCM_NET_PHYLIB
tristate
-config BROADCOM_PHY
- tristate "Broadcom PHYs"
- select BCM_NET_PHYLIB
- help
- Currently supports the BCM5411, BCM5421, BCM5461, BCM54616S, BCM5464,
- BCM5481, BCM54810 and BCM5482 PHYs.
-
-config BCM54140_PHY
- tristate "Broadcom BCM54140 PHY"
- depends on PHYLIB
- depends on HWMON || HWMON=n
- select BCM_NET_PHYLIB
- help
- Support the Broadcom BCM54140 Quad SGMII/QSGMII PHY.
-
- This driver also supports the hardware monitoring of this PHY and
- exposes voltage and temperature sensors.
-
-config BCM84881_PHY
- tristate "Broadcom BCM84881 PHY"
- depends on PHYLIB
- help
- Support the Broadcom BCM84881 PHY.
-
config CICADA_PHY
tristate "Cicada PHYs"
help
@@ -392,48 +168,16 @@ config DAVICOM_PHY
help
Currently supports dm9161e and dm9131
-config DP83822_PHY
- tristate "Texas Instruments DP83822/825/826 PHYs"
- help
- Supports the DP83822, DP83825I, DP83825CM, DP83825CS, DP83825S,
- DP83826C and DP83826NC PHYs.
-
-config DP83TC811_PHY
- tristate "Texas Instruments DP83TC811 PHY"
- help
- Supports the DP83TC811 PHY.
-
-config DP83848_PHY
- tristate "Texas Instruments DP83848 PHY"
- help
- Supports the DP83848 PHY.
-
-config DP83867_PHY
- tristate "Texas Instruments DP83867 Gigabit PHY"
- help
- Currently supports the DP83867 PHY.
-
-config DP83869_PHY
- tristate "Texas Instruments DP83869 Gigabit PHY"
- help
- Currently supports the DP83869 PHY. This PHY supports copper and
- fiber connections.
-
-config FIXED_PHY
- tristate "MDIO Bus/PHY emulation with fixed speed/link PHYs"
- depends on PHYLIB
- select SWPHY
- help
- Adds the platform "fixed" MDIO Bus to cover the boards that use
- PHYs that are not connected to the real MDIO bus.
-
- Currently tested with mpc866ads and mpc8349e-mitx.
-
config ICPLUS_PHY
tristate "ICPlus PHYs"
help
Currently supports the IP175C and IP1001 PHYs.
+config LXT_PHY
+ tristate "Intel LXT PHYs"
+ help
+ Currently supports the lxt970, lxt971
+
config INTEL_XWAY_PHY
tristate "Intel XWAY PHYs"
help
@@ -447,27 +191,16 @@ config LSI_ET1011C_PHY
help
Supports the LSI ET1011C PHY.
-config LXT_PHY
- tristate "Intel LXT PHYs"
- help
- Currently supports the lxt970, lxt971
-
config MARVELL_PHY
- tristate "Marvell PHYs"
+ tristate "Marvell Alaska PHYs"
help
- Currently has a driver for the 88E1011S
+ Currently has a driver for the 88E1XXX
config MARVELL_10G_PHY
tristate "Marvell Alaska 10Gbit PHYs"
help
Support for the Marvell Alaska MV88X3310 and compatible PHYs.
-config MESON_GXL_PHY
- tristate "Amlogic Meson GXL Internal PHY"
- depends on ARCH_MESON || COMPILE_TEST
- help
- Currently has a driver for the Amlogic Meson GXL Internal PHY
-
config MICREL_PHY
tristate "Micrel PHYs"
help
@@ -518,12 +251,12 @@ config REALTEK_PHY
Supports the Realtek 821x PHY.
config RENESAS_PHY
- tristate "Driver for Renesas PHYs"
+ tristate "Renesas PHYs"
help
Supports the Renesas PHYs uPD60620 and uPD60620A.
config ROCKCHIP_PHY
- tristate "Driver for Rockchip Ethernet PHYs"
+ tristate "Rockchip Ethernet PHYs"
help
Currently supports the integrated Ethernet PHY.
@@ -542,6 +275,33 @@ config TERANETICS_PHY
help
Currently supports the Teranetics TN2020
+config DP83822_PHY
+ tristate "Texas Instruments DP83822/825/826 PHYs"
+ help
+ Supports the DP83822, DP83825I, DP83825CM, DP83825CS, DP83825S,
+ DP83826C and DP83826NC PHYs.
+
+config DP83TC811_PHY
+ tristate "Texas Instruments DP83TC811 PHY"
+ help
+ Supports the DP83TC811 PHY.
+
+config DP83848_PHY
+ tristate "Texas Instruments DP83848 PHY"
+ help
+ Supports the DP83848 PHY.
+
+config DP83867_PHY
+ tristate "Texas Instruments DP83867 Gigabit PHY"
+ help
+ Currently supports the DP83867 PHY.
+
+config DP83869_PHY
+ tristate "Texas Instruments DP83869 Gigabit PHY"
+ help
+ Currently supports the DP83869 PHY. This PHY supports copper and
+ fiber connections.
+
config VITESSE_PHY
tristate "Vitesse PHYs"
help
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index d84bab489a53..a13e402074cf 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-# Makefile for Linux PHY drivers and MDIO bus drivers
+# Makefile for Linux PHY drivers
libphy-y := phy.o phy-c45.o phy-core.o phy_device.o \
linkmode.o
@@ -24,31 +24,6 @@ libphy-$(CONFIG_LED_TRIGGER_PHY) += phy_led_triggers.o
obj-$(CONFIG_PHYLINK) += phylink.o
obj-$(CONFIG_PHYLIB) += libphy.o
-obj-$(CONFIG_MDIO_ASPEED) += mdio-aspeed.o
-obj-$(CONFIG_MDIO_BCM_IPROC) += mdio-bcm-iproc.o
-obj-$(CONFIG_MDIO_BCM_UNIMAC) += mdio-bcm-unimac.o
-obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o
-obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o
-obj-$(CONFIG_MDIO_BUS_MUX_BCM_IPROC) += mdio-mux-bcm-iproc.o
-obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o
-obj-$(CONFIG_MDIO_BUS_MUX_MESON_G12A) += mdio-mux-meson-g12a.o
-obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
-obj-$(CONFIG_MDIO_BUS_MUX_MULTIPLEXER) += mdio-mux-multiplexer.o
-obj-$(CONFIG_MDIO_CAVIUM) += mdio-cavium.o
-obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o
-obj-$(CONFIG_MDIO_HISI_FEMAC) += mdio-hisi-femac.o
-obj-$(CONFIG_MDIO_I2C) += mdio-i2c.o
-obj-$(CONFIG_MDIO_IPQ4019) += mdio-ipq4019.o
-obj-$(CONFIG_MDIO_IPQ8064) += mdio-ipq8064.o
-obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o
-obj-$(CONFIG_MDIO_MSCC_MIIM) += mdio-mscc-miim.o
-obj-$(CONFIG_MDIO_MVUSB) += mdio-mvusb.o
-obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o
-obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o
-obj-$(CONFIG_MDIO_THUNDER) += mdio-thunder.o
-obj-$(CONFIG_MDIO_XGENE) += mdio-xgene.o
-obj-$(CONFIG_MDIO_XPCS) += mdio-xpcs.o
-
obj-$(CONFIG_NETWORK_PHY_TIMESTAMPING) += mii_timestamper.o
obj-$(CONFIG_SFP) += sfp.o
@@ -62,32 +37,32 @@ ifdef CONFIG_HWMON
aquantia-objs += aquantia_hwmon.o
endif
obj-$(CONFIG_AQUANTIA_PHY) += aquantia.o
-obj-$(CONFIG_AX88796B_PHY) += ax88796b.o
obj-$(CONFIG_AT803X_PHY) += at803x.o
+obj-$(CONFIG_AX88796B_PHY) += ax88796b.o
+obj-$(CONFIG_BCM54140_PHY) += bcm54140.o
obj-$(CONFIG_BCM63XX_PHY) += bcm63xx.o
obj-$(CONFIG_BCM7XXX_PHY) += bcm7xxx.o
+obj-$(CONFIG_BCM84881_PHY) += bcm84881.o
obj-$(CONFIG_BCM87XX_PHY) += bcm87xx.o
obj-$(CONFIG_BCM_CYGNUS_PHY) += bcm-cygnus.o
obj-$(CONFIG_BCM_NET_PHYLIB) += bcm-phy-lib.o
obj-$(CONFIG_BROADCOM_PHY) += broadcom.o
-obj-$(CONFIG_BCM54140_PHY) += bcm54140.o
-obj-$(CONFIG_BCM84881_PHY) += bcm84881.o
obj-$(CONFIG_CICADA_PHY) += cicada.o
obj-$(CONFIG_CORTINA_PHY) += cortina.o
obj-$(CONFIG_DAVICOM_PHY) += davicom.o
obj-$(CONFIG_DP83640_PHY) += dp83640.o
obj-$(CONFIG_DP83822_PHY) += dp83822.o
-obj-$(CONFIG_DP83TC811_PHY) += dp83tc811.o
obj-$(CONFIG_DP83848_PHY) += dp83848.o
obj-$(CONFIG_DP83867_PHY) += dp83867.o
obj-$(CONFIG_DP83869_PHY) += dp83869.o
+obj-$(CONFIG_DP83TC811_PHY) += dp83tc811.o
obj-$(CONFIG_FIXED_PHY) += fixed_phy.o
obj-$(CONFIG_ICPLUS_PHY) += icplus.o
obj-$(CONFIG_INTEL_XWAY_PHY) += intel-xway.o
obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o
obj-$(CONFIG_LXT_PHY) += lxt.o
-obj-$(CONFIG_MARVELL_PHY) += marvell.o
obj-$(CONFIG_MARVELL_10G_PHY) += marvell10g.o
+obj-$(CONFIG_MARVELL_PHY) += marvell.o
obj-$(CONFIG_MESON_GXL_PHY) += meson-gxl.o
obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o
obj-$(CONFIG_MICREL_PHY) += micrel.o
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 101651b2de54..ed601a7e46a0 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -343,7 +343,7 @@ static int at803x_rgmii_reg_get_voltage_sel(struct regulator_dev *rdev)
return (val & AT803X_DEBUG_RGMII_1V8) ? 1 : 0;
}
-static struct regulator_ops vddio_regulator_ops = {
+static const struct regulator_ops vddio_regulator_ops = {
.list_voltage = regulator_list_voltage_table,
.set_voltage_sel = at803x_rgmii_reg_set_voltage_sel,
.get_voltage_sel = at803x_rgmii_reg_get_voltage_sel,
@@ -364,7 +364,7 @@ static const struct regulator_desc vddio_desc = {
.owner = THIS_MODULE,
};
-static struct regulator_ops vddh_regulator_ops = {
+static const struct regulator_ops vddh_regulator_ops = {
};
static const struct regulator_desc vddh_desc = {
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 79e67f2fe00a..f2caccaf4408 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -798,51 +798,32 @@ static int decode_evnt(struct dp83640_private *dp83640,
return parsed;
}
-#define DP83640_PACKET_HASH_OFFSET 20
#define DP83640_PACKET_HASH_LEN 10
static int match(struct sk_buff *skb, unsigned int type, struct rxts *rxts)
{
- unsigned int offset = 0;
- u8 *msgtype, *data = skb_mac_header(skb);
- __be16 *seqid;
+ struct ptp_header *hdr;
+ u8 msgtype;
+ u16 seqid;
u16 hash;
/* check sequenceID, messageType, 12 bit hash of offset 20-29 */
- if (type & PTP_CLASS_VLAN)
- offset += VLAN_HLEN;
-
- switch (type & PTP_CLASS_PMASK) {
- case PTP_CLASS_IPV4:
- offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
- break;
- case PTP_CLASS_IPV6:
- offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
- break;
- case PTP_CLASS_L2:
- offset += ETH_HLEN;
- break;
- default:
+ hdr = ptp_parse_header(skb, type);
+ if (!hdr)
return 0;
- }
- if (skb->len + ETH_HLEN < offset + OFF_PTP_SEQUENCE_ID + sizeof(*seqid))
- return 0;
+ msgtype = ptp_get_msgtype(hdr, type);
- if (unlikely(type & PTP_CLASS_V1))
- msgtype = data + offset + OFF_PTP_CONTROL;
- else
- msgtype = data + offset;
- if (rxts->msgtype != (*msgtype & 0xf))
+ if (rxts->msgtype != (msgtype & 0xf))
return 0;
- seqid = (__be16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
- if (rxts->seqid != ntohs(*seqid))
+ seqid = be16_to_cpu(hdr->sequence_id);
+ if (rxts->seqid != seqid)
return 0;
hash = ether_crc(DP83640_PACKET_HASH_LEN,
- data + offset + DP83640_PACKET_HASH_OFFSET) >> 20;
+ (unsigned char *)&hdr->source_port_identity) >> 20;
if (rxts->hash != hash)
return 0;
@@ -982,35 +963,16 @@ static void decode_status_frame(struct dp83640_private *dp83640,
static int is_sync(struct sk_buff *skb, int type)
{
- u8 *data = skb->data, *msgtype;
- unsigned int offset = 0;
-
- if (type & PTP_CLASS_VLAN)
- offset += VLAN_HLEN;
-
- switch (type & PTP_CLASS_PMASK) {
- case PTP_CLASS_IPV4:
- offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
- break;
- case PTP_CLASS_IPV6:
- offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
- break;
- case PTP_CLASS_L2:
- offset += ETH_HLEN;
- break;
- default:
- return 0;
- }
-
- if (type & PTP_CLASS_V1)
- offset += OFF_PTP_CONTROL;
+ struct ptp_header *hdr;
+ u8 msgtype;
- if (skb->len < offset + 1)
+ hdr = ptp_parse_header(skb, type);
+ if (!hdr)
return 0;
- msgtype = data + offset;
+ msgtype = ptp_get_msgtype(hdr, type);
- return (*msgtype & 0xf) == 0;
+ return (msgtype & 0xf) == 0;
}
static void dp83640_free_clocks(void)
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index 37643c468e19..732c8bec7452 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -23,16 +23,31 @@
#define DP83822_DEVADDR 0x1f
+#define MII_DP83822_CTRL_2 0x0a
+#define MII_DP83822_PHYSTS 0x10
#define MII_DP83822_PHYSCR 0x11
#define MII_DP83822_MISR1 0x12
#define MII_DP83822_MISR2 0x13
+#define MII_DP83822_FCSCR 0x14
#define MII_DP83822_RCSR 0x17
#define MII_DP83822_RESET_CTRL 0x1f
#define MII_DP83822_GENCFG 0x465
+#define MII_DP83822_SOR1 0x467
+
+/* GENCFG */
+#define DP83822_SIG_DET_LOW BIT(0)
+
+/* Control Register 2 bits */
+#define DP83822_FX_ENABLE BIT(14)
#define DP83822_HW_RESET BIT(15)
#define DP83822_SW_RESET BIT(14)
+/* PHY STS bits */
+#define DP83822_PHYSTS_DUPLEX BIT(2)
+#define DP83822_PHYSTS_10 BIT(1)
+#define DP83822_PHYSTS_LINK BIT(0)
+
/* PHYSCR Register Fields */
#define DP83822_PHYSCR_INT_OE BIT(0) /* Interrupt Output Enable */
#define DP83822_PHYSCR_INTEN BIT(1) /* Interrupt Enable */
@@ -83,6 +98,28 @@
#define DP83822_RX_CLK_SHIFT BIT(12)
#define DP83822_TX_CLK_SHIFT BIT(11)
+/* SOR1 mode */
+#define DP83822_STRAP_MODE1 0
+#define DP83822_STRAP_MODE2 BIT(0)
+#define DP83822_STRAP_MODE3 BIT(1)
+#define DP83822_STRAP_MODE4 GENMASK(1, 0)
+
+#define DP83822_COL_STRAP_MASK GENMASK(11, 10)
+#define DP83822_COL_SHIFT 10
+#define DP83822_RX_ER_STR_MASK GENMASK(9, 8)
+#define DP83822_RX_ER_SHIFT 8
+
+#define MII_DP83822_FIBER_ADVERTISE (ADVERTISED_TP | ADVERTISED_MII | \
+ ADVERTISED_FIBRE | ADVERTISED_BNC | \
+ ADVERTISED_Pause | ADVERTISED_Asym_Pause | \
+ ADVERTISED_100baseT_Full)
+
+struct dp83822_private {
+ bool fx_signal_det_low;
+ int fx_enabled;
+ u16 fx_sd_enable;
+};
+
static int dp83822_ack_interrupt(struct phy_device *phydev)
{
int err;
@@ -197,6 +234,7 @@ static void dp83822_get_wol(struct phy_device *phydev,
static int dp83822_config_intr(struct phy_device *phydev)
{
+ struct dp83822_private *dp83822 = phydev->priv;
int misr_status;
int physcr_status;
int err;
@@ -208,13 +246,16 @@ static int dp83822_config_intr(struct phy_device *phydev)
misr_status |= (DP83822_RX_ERR_HF_INT_EN |
DP83822_FALSE_CARRIER_HF_INT_EN |
- DP83822_ANEG_COMPLETE_INT_EN |
- DP83822_DUP_MODE_CHANGE_INT_EN |
- DP83822_SPEED_CHANGED_INT_EN |
DP83822_LINK_STAT_INT_EN |
DP83822_ENERGY_DET_INT_EN |
DP83822_LINK_QUAL_INT_EN);
+ if (!dp83822->fx_enabled)
+ misr_status |= DP83822_ANEG_COMPLETE_INT_EN |
+ DP83822_DUP_MODE_CHANGE_INT_EN |
+ DP83822_SPEED_CHANGED_INT_EN;
+
+
err = phy_write(phydev, MII_DP83822_MISR1, misr_status);
if (err < 0)
return err;
@@ -224,14 +265,16 @@ static int dp83822_config_intr(struct phy_device *phydev)
return misr_status;
misr_status |= (DP83822_JABBER_DET_INT_EN |
- DP83822_WOL_PKT_INT_EN |
DP83822_SLEEP_MODE_INT_EN |
- DP83822_MDI_XOVER_INT_EN |
DP83822_LB_FIFO_INT_EN |
DP83822_PAGE_RX_INT_EN |
- DP83822_ANEG_ERR_INT_EN |
DP83822_EEE_ERROR_CHANGE_INT_EN);
+ if (!dp83822->fx_enabled)
+ misr_status |= DP83822_MDI_XOVER_INT_EN |
+ DP83822_ANEG_ERR_INT_EN |
+ DP83822_WOL_PKT_INT_EN;
+
err = phy_write(phydev, MII_DP83822_MISR2, misr_status);
if (err < 0)
return err;
@@ -270,13 +313,60 @@ static int dp8382x_disable_wol(struct phy_device *phydev)
MII_DP83822_WOL_CFG, value);
}
+static int dp83822_read_status(struct phy_device *phydev)
+{
+ struct dp83822_private *dp83822 = phydev->priv;
+ int status = phy_read(phydev, MII_DP83822_PHYSTS);
+ int ctrl2;
+ int ret;
+
+ if (dp83822->fx_enabled) {
+ if (status & DP83822_PHYSTS_LINK) {
+ phydev->speed = SPEED_UNKNOWN;
+ phydev->duplex = DUPLEX_UNKNOWN;
+ } else {
+ ctrl2 = phy_read(phydev, MII_DP83822_CTRL_2);
+ if (ctrl2 < 0)
+ return ctrl2;
+
+ if (!(ctrl2 & DP83822_FX_ENABLE)) {
+ ret = phy_write(phydev, MII_DP83822_CTRL_2,
+ DP83822_FX_ENABLE | ctrl2);
+ if (ret < 0)
+ return ret;
+ }
+ }
+ }
+
+ ret = genphy_read_status(phydev);
+ if (ret)
+ return ret;
+
+ if (status < 0)
+ return status;
+
+ if (status & DP83822_PHYSTS_DUPLEX)
+ phydev->duplex = DUPLEX_FULL;
+ else
+ phydev->duplex = DUPLEX_HALF;
+
+ if (status & DP83822_PHYSTS_10)
+ phydev->speed = SPEED_10;
+ else
+ phydev->speed = SPEED_100;
+
+ return 0;
+}
+
static int dp83822_config_init(struct phy_device *phydev)
{
+ struct dp83822_private *dp83822 = phydev->priv;
struct device *dev = &phydev->mdio.dev;
int rgmii_delay;
s32 rx_int_delay;
s32 tx_int_delay;
int err = 0;
+ int bmcr;
if (phy_interface_is_rgmii(phydev)) {
rx_int_delay = phy_get_internal_delay(phydev, dev, NULL, 0,
@@ -302,6 +392,53 @@ static int dp83822_config_init(struct phy_device *phydev)
}
}
+ if (dp83822->fx_enabled) {
+ err = phy_modify(phydev, MII_DP83822_CTRL_2,
+ DP83822_FX_ENABLE, 1);
+ if (err < 0)
+ return err;
+
+ /* Only allow advertising what this PHY supports */
+ linkmode_and(phydev->advertising, phydev->advertising,
+ phydev->supported);
+
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
+ phydev->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
+ phydev->advertising);
+
+ /* Auto neg is not supported in fiber mode */
+ bmcr = phy_read(phydev, MII_BMCR);
+ if (bmcr < 0)
+ return bmcr;
+
+ if (bmcr & BMCR_ANENABLE) {
+ err = phy_modify(phydev, MII_BMCR, BMCR_ANENABLE, 0);
+ if (err < 0)
+ return err;
+ }
+ phydev->autoneg = AUTONEG_DISABLE;
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ phydev->supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ phydev->advertising);
+
+ /* Setup fiber advertisement */
+ err = phy_modify_changed(phydev, MII_ADVERTISE,
+ MII_DP83822_FIBER_ADVERTISE,
+ MII_DP83822_FIBER_ADVERTISE);
+
+ if (err < 0)
+ return err;
+
+ if (dp83822->fx_signal_det_low) {
+ err = phy_set_bits_mmd(phydev, DP83822_DEVADDR,
+ MII_DP83822_GENCFG,
+ DP83822_SIG_DET_LOW);
+ if (err)
+ return err;
+ }
+ }
return dp8382x_disable_wol(phydev);
}
@@ -314,13 +451,85 @@ static int dp83822_phy_reset(struct phy_device *phydev)
{
int err;
- err = phy_write(phydev, MII_DP83822_RESET_CTRL, DP83822_HW_RESET);
+ err = phy_write(phydev, MII_DP83822_RESET_CTRL, DP83822_SW_RESET);
if (err < 0)
return err;
return phydev->drv->config_init(phydev);
}
+#ifdef CONFIG_OF_MDIO
+static int dp83822_of_init(struct phy_device *phydev)
+{
+ struct dp83822_private *dp83822 = phydev->priv;
+ struct device *dev = &phydev->mdio.dev;
+
+ /* Signal detection for the PHY is only enabled if the FX_EN and the
+ * SD_EN pins are strapped. Signal detection can only enabled if FX_EN
+ * is strapped otherwise signal detection is disabled for the PHY.
+ */
+ if (dp83822->fx_enabled && dp83822->fx_sd_enable)
+ dp83822->fx_signal_det_low = device_property_present(dev,
+ "ti,link-loss-low");
+ if (!dp83822->fx_enabled)
+ dp83822->fx_enabled = device_property_present(dev,
+ "ti,fiber-mode");
+
+ return 0;
+}
+#else
+static int dp83822_of_init(struct phy_device *phydev)
+{
+ return 0;
+}
+#endif /* CONFIG_OF_MDIO */
+
+static int dp83822_read_straps(struct phy_device *phydev)
+{
+ struct dp83822_private *dp83822 = phydev->priv;
+ int fx_enabled, fx_sd_enable;
+ int val;
+
+ val = phy_read_mmd(phydev, DP83822_DEVADDR, MII_DP83822_SOR1);
+ if (val < 0)
+ return val;
+
+ fx_enabled = (val & DP83822_COL_STRAP_MASK) >> DP83822_COL_SHIFT;
+ if (fx_enabled == DP83822_STRAP_MODE2 ||
+ fx_enabled == DP83822_STRAP_MODE3)
+ dp83822->fx_enabled = 1;
+
+ if (dp83822->fx_enabled) {
+ fx_sd_enable = (val & DP83822_RX_ER_STR_MASK) >> DP83822_RX_ER_SHIFT;
+ if (fx_sd_enable == DP83822_STRAP_MODE3 ||
+ fx_sd_enable == DP83822_STRAP_MODE4)
+ dp83822->fx_sd_enable = 1;
+ }
+
+ return 0;
+}
+
+static int dp83822_probe(struct phy_device *phydev)
+{
+ struct dp83822_private *dp83822;
+ int ret;
+
+ dp83822 = devm_kzalloc(&phydev->mdio.dev, sizeof(*dp83822),
+ GFP_KERNEL);
+ if (!dp83822)
+ return -ENOMEM;
+
+ phydev->priv = dp83822;
+
+ ret = dp83822_read_straps(phydev);
+ if (ret)
+ return ret;
+
+ dp83822_of_init(phydev);
+
+ return 0;
+}
+
static int dp83822_suspend(struct phy_device *phydev)
{
int value;
@@ -352,8 +561,10 @@ static int dp83822_resume(struct phy_device *phydev)
PHY_ID_MATCH_MODEL(_id), \
.name = (_name), \
/* PHY_BASIC_FEATURES */ \
+ .probe = dp83822_probe, \
.soft_reset = dp83822_phy_reset, \
.config_init = dp83822_config_init, \
+ .read_status = dp83822_read_status, \
.get_wol = dp83822_get_wol, \
.set_wol = dp83822_set_wol, \
.ack_interrupt = dp83822_ack_interrupt, \
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index cd7032628a28..69d3eacc2b96 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*
- * Driver for the Texas Instruments DP83867 PHY
+/* Driver for the Texas Instruments DP83867 PHY
*
* Copyright (C) 2015 Texas Instruments Inc.
*/
@@ -113,7 +112,6 @@
#define DP83867_RGMII_RX_CLK_DELAY_SHIFT 0
#define DP83867_RGMII_RX_CLK_DELAY_INV (DP83867_RGMII_RX_CLK_DELAY_MAX + 1)
-
/* IO_MUX_CFG bits */
#define DP83867_IO_MUX_CFG_IO_IMPEDANCE_MASK 0x1f
#define DP83867_IO_MUX_CFG_IO_IMPEDANCE_MAX 0x0
@@ -384,22 +382,22 @@ static int dp83867_set_downshift(struct phy_device *phydev, u8 cnt)
DP83867_DOWNSHIFT_EN);
switch (cnt) {
- case DP83867_DOWNSHIFT_1_COUNT:
- count = DP83867_DOWNSHIFT_1_COUNT_VAL;
- break;
- case DP83867_DOWNSHIFT_2_COUNT:
- count = DP83867_DOWNSHIFT_2_COUNT_VAL;
- break;
- case DP83867_DOWNSHIFT_4_COUNT:
- count = DP83867_DOWNSHIFT_4_COUNT_VAL;
- break;
- case DP83867_DOWNSHIFT_8_COUNT:
- count = DP83867_DOWNSHIFT_8_COUNT_VAL;
- break;
- default:
- phydev_err(phydev,
- "Downshift count must be 1, 2, 4 or 8\n");
- return -EINVAL;
+ case DP83867_DOWNSHIFT_1_COUNT:
+ count = DP83867_DOWNSHIFT_1_COUNT_VAL;
+ break;
+ case DP83867_DOWNSHIFT_2_COUNT:
+ count = DP83867_DOWNSHIFT_2_COUNT_VAL;
+ break;
+ case DP83867_DOWNSHIFT_4_COUNT:
+ count = DP83867_DOWNSHIFT_4_COUNT_VAL;
+ break;
+ case DP83867_DOWNSHIFT_8_COUNT:
+ count = DP83867_DOWNSHIFT_8_COUNT_VAL;
+ break;
+ default:
+ phydev_err(phydev,
+ "Downshift count must be 1, 2, 4 or 8\n");
+ return -EINVAL;
}
val = DP83867_DOWNSHIFT_EN;
@@ -411,7 +409,7 @@ static int dp83867_set_downshift(struct phy_device *phydev, u8 cnt)
}
static int dp83867_get_tunable(struct phy_device *phydev,
- struct ethtool_tunable *tuna, void *data)
+ struct ethtool_tunable *tuna, void *data)
{
switch (tuna->id) {
case ETHTOOL_PHY_DOWNSHIFT:
@@ -422,7 +420,7 @@ static int dp83867_get_tunable(struct phy_device *phydev,
}
static int dp83867_set_tunable(struct phy_device *phydev,
- struct ethtool_tunable *tuna, const void *data)
+ struct ethtool_tunable *tuna, const void *data)
{
switch (tuna->id) {
case ETHTOOL_PHY_DOWNSHIFT:
@@ -524,11 +522,10 @@ static int dp83867_of_init(struct phy_device *phydev)
dp83867->io_impedance = -1; /* leave at default */
dp83867->rxctrl_strap_quirk = of_property_read_bool(of_node,
- "ti,dp83867-rxctrl-strap-quirk");
+ "ti,dp83867-rxctrl-strap-quirk");
dp83867->sgmii_ref_clk_en = of_property_read_bool(of_node,
- "ti,sgmii-ref-clock-output-enable");
-
+ "ti,sgmii-ref-clock-output-enable");
dp83867->rx_id_delay = DP83867_RGMII_RX_CLK_DELAY_INV;
ret = of_property_read_u32(of_node, "ti,rx-internal-delay",
diff --git a/drivers/net/phy/mdio-i2c.h b/drivers/net/phy/mdio-i2c.h
deleted file mode 100644
index b1d27f7cd23f..000000000000
--- a/drivers/net/phy/mdio-i2c.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * MDIO I2C bridge
- *
- * Copyright (C) 2015 Russell King
- */
-#ifndef MDIO_I2C_H
-#define MDIO_I2C_H
-
-struct device;
-struct i2c_adapter;
-struct mii_bus;
-
-struct mii_bus *mdio_i2c_alloc(struct device *parent, struct i2c_adapter *i2c);
-
-#endif
diff --git a/drivers/net/phy/mdio-xgene.h b/drivers/net/phy/mdio-xgene.h
deleted file mode 100644
index 8af93ada8b64..000000000000
--- a/drivers/net/phy/mdio-xgene.h
+++ /dev/null
@@ -1,130 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/* Applied Micro X-Gene SoC MDIO Driver
- *
- * Copyright (c) 2016, Applied Micro Circuits Corporation
- * Author: Iyappan Subramanian <isubramanian@apm.com>
- */
-
-#ifndef __MDIO_XGENE_H__
-#define __MDIO_XGENE_H__
-
-#define BLOCK_XG_MDIO_CSR_OFFSET 0x5000
-#define BLOCK_DIAG_CSR_OFFSET 0xd000
-#define XGENET_CONFIG_REG_ADDR 0x20
-
-#define MAC_ADDR_REG_OFFSET 0x00
-#define MAC_COMMAND_REG_OFFSET 0x04
-#define MAC_WRITE_REG_OFFSET 0x08
-#define MAC_READ_REG_OFFSET 0x0c
-#define MAC_COMMAND_DONE_REG_OFFSET 0x10
-
-#define CLKEN_OFFSET 0x08
-#define SRST_OFFSET 0x00
-
-#define MENET_CFG_MEM_RAM_SHUTDOWN_ADDR 0x70
-#define MENET_BLOCK_MEM_RDY_ADDR 0x74
-
-#define MAC_CONFIG_1_ADDR 0x00
-#define MII_MGMT_COMMAND_ADDR 0x24
-#define MII_MGMT_ADDRESS_ADDR 0x28
-#define MII_MGMT_CONTROL_ADDR 0x2c
-#define MII_MGMT_STATUS_ADDR 0x30
-#define MII_MGMT_INDICATORS_ADDR 0x34
-#define SOFT_RESET BIT(31)
-
-#define MII_MGMT_CONFIG_ADDR 0x20
-#define MII_MGMT_COMMAND_ADDR 0x24
-#define MII_MGMT_ADDRESS_ADDR 0x28
-#define MII_MGMT_CONTROL_ADDR 0x2c
-#define MII_MGMT_STATUS_ADDR 0x30
-#define MII_MGMT_INDICATORS_ADDR 0x34
-
-#define MIIM_COMMAND_ADDR 0x20
-#define MIIM_FIELD_ADDR 0x24
-#define MIIM_CONFIGURATION_ADDR 0x28
-#define MIIM_LINKFAILVECTOR_ADDR 0x2c
-#define MIIM_INDICATOR_ADDR 0x30
-#define MIIMRD_FIELD_ADDR 0x34
-
-#define MDIO_CSR_OFFSET 0x5000
-
-#define REG_ADDR_POS 0
-#define REG_ADDR_LEN 5
-#define PHY_ADDR_POS 8
-#define PHY_ADDR_LEN 5
-
-#define HSTMIIMWRDAT_POS 0
-#define HSTMIIMWRDAT_LEN 16
-#define HSTPHYADX_POS 23
-#define HSTPHYADX_LEN 5
-#define HSTREGADX_POS 18
-#define HSTREGADX_LEN 5
-#define HSTLDCMD BIT(3)
-#define HSTMIIMCMD_POS 0
-#define HSTMIIMCMD_LEN 3
-
-#define BUSY_MASK BIT(0)
-#define READ_CYCLE_MASK BIT(0)
-
-enum xgene_enet_cmd {
- XGENE_ENET_WR_CMD = BIT(31),
- XGENE_ENET_RD_CMD = BIT(30)
-};
-
-enum {
- MIIM_CMD_IDLE,
- MIIM_CMD_LEGACY_WRITE,
- MIIM_CMD_LEGACY_READ,
-};
-
-enum xgene_mdio_id {
- XGENE_MDIO_RGMII = 1,
- XGENE_MDIO_XFI
-};
-
-struct xgene_mdio_pdata {
- struct clk *clk;
- struct device *dev;
- void __iomem *mac_csr_addr;
- void __iomem *diag_csr_addr;
- void __iomem *mdio_csr_addr;
- struct mii_bus *mdio_bus;
- int mdio_id;
- spinlock_t mac_lock; /* mac lock */
-};
-
-/* Set the specified value into a bit-field defined by its starting position
- * and length within a single u64.
- */
-static inline u64 xgene_enet_set_field_value(int pos, int len, u64 val)
-{
- return (val & ((1ULL << len) - 1)) << pos;
-}
-
-#define SET_VAL(field, val) \
- xgene_enet_set_field_value(field ## _POS, field ## _LEN, val)
-
-#define SET_BIT(field) \
- xgene_enet_set_field_value(field ## _POS, 1, 1)
-
-/* Get the value from a bit-field defined by its starting position
- * and length within the specified u64.
- */
-static inline u64 xgene_enet_get_field_value(int pos, int len, u64 src)
-{
- return (src >> pos) & ((1ULL << len) - 1);
-}
-
-#define GET_VAL(field, src) \
- xgene_enet_get_field_value(field ## _POS, field ## _LEN, src)
-
-#define GET_BIT(field, src) \
- xgene_enet_get_field_value(field ## _POS, 1, src)
-
-u32 xgene_mdio_rd_mac(struct xgene_mdio_pdata *pdata, u32 rd_addr);
-void xgene_mdio_wr_mac(struct xgene_mdio_pdata *pdata, u32 wr_addr, u32 data);
-int xgene_mdio_rgmii_read(struct mii_bus *bus, int phy_id, int reg);
-int xgene_mdio_rgmii_write(struct mii_bus *bus, int phy_id, int reg, u16 data);
-struct phy_device *xgene_enet_phy_register(struct mii_bus *bus, int phy_addr);
-
-#endif /* __MDIO_XGENE_H__ */
diff --git a/drivers/net/phy/mscc/mscc_macsec.c b/drivers/net/phy/mscc/mscc_macsec.c
index 1d4c012194e9..6cf9b798b710 100644
--- a/drivers/net/phy/mscc/mscc_macsec.c
+++ b/drivers/net/phy/mscc/mscc_macsec.c
@@ -958,7 +958,7 @@ static int vsc8584_macsec_del_txsa(struct macsec_context *ctx)
return 0;
}
-static struct macsec_ops vsc8584_macsec_ops = {
+static const struct macsec_ops vsc8584_macsec_ops = {
.mdo_dev_open = vsc8584_macsec_dev_open,
.mdo_dev_stop = vsc8584_macsec_dev_stop,
.mdo_add_secy = vsc8584_macsec_add_secy,
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 32f4e8ec96cf..fe2296fdda19 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -535,8 +535,10 @@ static void phylink_mac_pcs_get_state(struct phylink *pl,
if (pl->pcs_ops)
pl->pcs_ops->pcs_get_state(pl->pcs, state);
- else
+ else if (pl->mac_ops->mac_pcs_get_state)
pl->mac_ops->mac_pcs_get_state(pl->config, state);
+ else
+ state->link = 0;
}
/* The fixed state is... fixed except for the link state,
@@ -2319,6 +2321,49 @@ static void phylink_decode_sgmii_word(struct phylink_link_state *state,
}
/**
+ * phylink_decode_usxgmii_word() - decode the USXGMII word from a MAC PCS
+ * @state: a pointer to a struct phylink_link_state.
+ * @lpa: a 16 bit value which stores the USXGMII auto-negotiation word
+ *
+ * Helper for MAC PCS supporting the USXGMII protocol and the auto-negotiation
+ * code word. Decode the USXGMII code word and populate the corresponding fields
+ * (speed, duplex) into the phylink_link_state structure.
+ */
+void phylink_decode_usxgmii_word(struct phylink_link_state *state,
+ uint16_t lpa)
+{
+ switch (lpa & MDIO_USXGMII_SPD_MASK) {
+ case MDIO_USXGMII_10:
+ state->speed = SPEED_10;
+ break;
+ case MDIO_USXGMII_100:
+ state->speed = SPEED_100;
+ break;
+ case MDIO_USXGMII_1000:
+ state->speed = SPEED_1000;
+ break;
+ case MDIO_USXGMII_2500:
+ state->speed = SPEED_2500;
+ break;
+ case MDIO_USXGMII_5000:
+ state->speed = SPEED_5000;
+ break;
+ case MDIO_USXGMII_10G:
+ state->speed = SPEED_10000;
+ break;
+ default:
+ state->link = false;
+ return;
+ }
+
+ if (lpa & MDIO_USXGMII_FULL_DUPLEX)
+ state->duplex = DUPLEX_FULL;
+ else
+ state->duplex = DUPLEX_HALF;
+}
+EXPORT_SYMBOL_GPL(phylink_decode_usxgmii_word);
+
+/**
* phylink_mii_c22_pcs_get_state() - read the MAC PCS state
* @pcs: a pointer to a &struct mdio_device.
* @state: a pointer to a &struct phylink_link_state.
@@ -2361,6 +2406,7 @@ void phylink_mii_c22_pcs_get_state(struct mdio_device *pcs,
break;
case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
phylink_decode_sgmii_word(state, lpa);
break;
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index cf83314c8591..1d18c10e8f82 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -7,6 +7,7 @@
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/jiffies.h>
+#include <linux/mdio/mdio-i2c.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
@@ -16,7 +17,6 @@
#include <linux/slab.h>
#include <linux/workqueue.h>
-#include "mdio-i2c.h"
#include "sfp.h"
#include "swphy.h"
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 74568ae16125..638e8c3d1f4a 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -21,6 +21,17 @@
#include <linux/netdevice.h>
#include <linux/smscphy.h>
+/* Vendor-specific PHY Definitions */
+/* EDPD NLP / crossover time configuration */
+#define PHY_EDPD_CONFIG 16
+#define PHY_EDPD_CONFIG_EXT_CROSSOVER_ 0x0001
+
+/* Control/Status Indication Register */
+#define SPECIAL_CTRL_STS 27
+#define SPECIAL_CTRL_STS_OVRRD_AMDIX_ 0x8000
+#define SPECIAL_CTRL_STS_AMDIX_ENABLE_ 0x4000
+#define SPECIAL_CTRL_STS_AMDIX_STATE_ 0x2000
+
struct smsc_hw_stat {
const char *string;
u8 reg;
@@ -96,6 +107,54 @@ static int lan911x_config_init(struct phy_device *phydev)
return smsc_phy_ack_interrupt(phydev);
}
+static int lan87xx_config_aneg(struct phy_device *phydev)
+{
+ int rc;
+ int val;
+
+ switch (phydev->mdix_ctrl) {
+ case ETH_TP_MDI:
+ val = SPECIAL_CTRL_STS_OVRRD_AMDIX_;
+ break;
+ case ETH_TP_MDI_X:
+ val = SPECIAL_CTRL_STS_OVRRD_AMDIX_ |
+ SPECIAL_CTRL_STS_AMDIX_STATE_;
+ break;
+ case ETH_TP_MDI_AUTO:
+ val = SPECIAL_CTRL_STS_AMDIX_ENABLE_;
+ break;
+ default:
+ return genphy_config_aneg(phydev);
+ }
+
+ rc = phy_read(phydev, SPECIAL_CTRL_STS);
+ if (rc < 0)
+ return rc;
+
+ rc &= ~(SPECIAL_CTRL_STS_OVRRD_AMDIX_ |
+ SPECIAL_CTRL_STS_AMDIX_ENABLE_ |
+ SPECIAL_CTRL_STS_AMDIX_STATE_);
+ rc |= val;
+ phy_write(phydev, SPECIAL_CTRL_STS, rc);
+
+ phydev->mdix = phydev->mdix_ctrl;
+ return genphy_config_aneg(phydev);
+}
+
+static int lan87xx_config_aneg_ext(struct phy_device *phydev)
+{
+ int rc;
+
+ /* Extend Manual AutoMDIX timer */
+ rc = phy_read(phydev, PHY_EDPD_CONFIG);
+ if (rc < 0)
+ return rc;
+
+ rc |= PHY_EDPD_CONFIG_EXT_CROSSOVER_;
+ phy_write(phydev, PHY_EDPD_CONFIG, rc);
+ return lan87xx_config_aneg(phydev);
+}
+
/*
* The LAN87xx suffers from rare absence of the ENERGYON-bit when Ethernet cable
* plugs in while LAN87xx is in Energy Detect Power-Down mode. This leads to
@@ -250,6 +309,9 @@ static struct phy_driver smsc_phy_driver[] = {
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
+ /* This covers internal PHY (phy_id: 0x0007C0C3) for
+ * LAN9500 (PID: 0x9500), LAN9514 (PID: 0xec00), LAN9505 (PID: 0x9505)
+ */
.phy_id = 0x0007c0c0, /* OUI=0x00800f, Model#=0x0c */
.phy_id_mask = 0xfffffff0,
.name = "SMSC LAN8700",
@@ -262,6 +324,7 @@ static struct phy_driver smsc_phy_driver[] = {
.read_status = lan87xx_read_status,
.config_init = smsc_phy_config_init,
.soft_reset = smsc_phy_reset,
+ .config_aneg = lan87xx_config_aneg,
/* IRQ related */
.ack_interrupt = smsc_phy_ack_interrupt,
@@ -293,6 +356,9 @@ static struct phy_driver smsc_phy_driver[] = {
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
+ /* This covers internal PHY (phy_id: 0x0007C0F0) for
+ * LAN9500A (PID: 0x9E00), LAN9505A (PID: 0x9E01)
+ */
.phy_id = 0x0007c0f0, /* OUI=0x00800f, Model#=0x0f */
.phy_id_mask = 0xfffffff0,
.name = "SMSC LAN8710/LAN8720",
@@ -306,6 +372,7 @@ static struct phy_driver smsc_phy_driver[] = {
.read_status = lan87xx_read_status,
.config_init = smsc_phy_config_init,
.soft_reset = smsc_phy_reset,
+ .config_aneg = lan87xx_config_aneg_ext,
/* IRQ related */
.ack_interrupt = smsc_phy_ack_interrupt,
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 7959b5c2d11f..be69d272052f 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -219,24 +219,6 @@ struct veth {
__be16 h_vlan_TCI;
};
-bool tun_is_xdp_frame(void *ptr)
-{
- return (unsigned long)ptr & TUN_XDP_FLAG;
-}
-EXPORT_SYMBOL(tun_is_xdp_frame);
-
-void *tun_xdp_to_ptr(void *ptr)
-{
- return (void *)((unsigned long)ptr | TUN_XDP_FLAG);
-}
-EXPORT_SYMBOL(tun_xdp_to_ptr);
-
-void *tun_ptr_to_xdp(void *ptr)
-{
- return (void *)((unsigned long)ptr & ~TUN_XDP_FLAG);
-}
-EXPORT_SYMBOL(tun_ptr_to_xdp);
-
static int tun_napi_receive(struct napi_struct *napi, int budget)
{
struct tun_file *tfile = container_of(napi, struct tun_file, napi);
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index c7bcfca7d70b..b46993d5f997 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -346,6 +346,8 @@ config USB_NET_SMSC75XX
config USB_NET_SMSC95XX
tristate "SMSC LAN95XX based USB 2.0 10/100 ethernet devices"
depends on USB_USBNET
+ select PHYLIB
+ select SMSC_PHY
select BITREVERSE
select CRC16
select CRC32
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index bb4ccbda031a..601fb40a2a0a 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -18,10 +18,12 @@
#include <linux/usb/usbnet.h>
#include <linux/slab.h>
#include <linux/of_net.h>
+#include <linux/mdio.h>
+#include <linux/phy.h>
#include "smsc95xx.h"
#define SMSC_CHIPNAME "smsc95xx"
-#define SMSC_DRIVER_VERSION "1.0.6"
+#define SMSC_DRIVER_VERSION "2.0.0"
#define HS_USB_PKT_SIZE (512)
#define FS_USB_PKT_SIZE (64)
#define DEFAULT_HS_BURST_CAP_SIZE (16 * 1024 + 5 * HS_USB_PKT_SIZE)
@@ -49,10 +51,7 @@
#define SUSPEND_ALLMODES (SUSPEND_SUSPEND0 | SUSPEND_SUSPEND1 | \
SUSPEND_SUSPEND2 | SUSPEND_SUSPEND3)
-#define CARRIER_CHECK_DELAY (2 * HZ)
-
struct smsc95xx_priv {
- u32 chip_id;
u32 mac_cr;
u32 hash_hi;
u32 hash_lo;
@@ -60,10 +59,8 @@ struct smsc95xx_priv {
spinlock_t mac_cr_lock;
u8 features;
u8 suspend_flags;
- u8 mdix_ctrl;
- bool link_ok;
- struct delayed_work carrier_check;
- struct usbnet *dev;
+ struct mii_bus *mdiobus;
+ struct phy_device *phydev;
};
static bool turbo_mode = true;
@@ -173,10 +170,14 @@ static int __must_check __smsc95xx_phy_wait_not_busy(struct usbnet *dev,
return -EIO;
}
-static int __smsc95xx_mdio_read(struct net_device *netdev, int phy_id, int idx,
+static u32 mii_address_cmd(int phy_id, int idx, u16 op)
+{
+ return (phy_id & 0x1f) << 11 | (idx & 0x1f) << 6 | op;
+}
+
+static int __smsc95xx_mdio_read(struct usbnet *dev, int phy_id, int idx,
int in_pm)
{
- struct usbnet *dev = netdev_priv(netdev);
u32 val, addr;
int ret;
@@ -185,14 +186,12 @@ static int __smsc95xx_mdio_read(struct net_device *netdev, int phy_id, int idx,
/* confirm MII not busy */
ret = __smsc95xx_phy_wait_not_busy(dev, in_pm);
if (ret < 0) {
- netdev_warn(dev->net, "MII is busy in smsc95xx_mdio_read\n");
+ netdev_warn(dev->net, "%s: MII is busy\n", __func__);
goto done;
}
/* set the address, index & direction (read from PHY) */
- phy_id &= dev->mii.phy_id_mask;
- idx &= dev->mii.reg_num_mask;
- addr = (phy_id << 11) | (idx << 6) | MII_READ_ | MII_BUSY_;
+ addr = mii_address_cmd(phy_id, idx, MII_READ_ | MII_BUSY_);
ret = __smsc95xx_write_reg(dev, MII_ADDR, addr, in_pm);
if (ret < 0) {
netdev_warn(dev->net, "Error writing MII_ADDR\n");
@@ -218,10 +217,9 @@ done:
return ret;
}
-static void __smsc95xx_mdio_write(struct net_device *netdev, int phy_id,
+static void __smsc95xx_mdio_write(struct usbnet *dev, int phy_id,
int idx, int regval, int in_pm)
{
- struct usbnet *dev = netdev_priv(netdev);
u32 val, addr;
int ret;
@@ -230,7 +228,7 @@ static void __smsc95xx_mdio_write(struct net_device *netdev, int phy_id,
/* confirm MII not busy */
ret = __smsc95xx_phy_wait_not_busy(dev, in_pm);
if (ret < 0) {
- netdev_warn(dev->net, "MII is busy in smsc95xx_mdio_write\n");
+ netdev_warn(dev->net, "%s: MII is busy\n", __func__);
goto done;
}
@@ -242,9 +240,7 @@ static void __smsc95xx_mdio_write(struct net_device *netdev, int phy_id,
}
/* set the address, index & direction (write to PHY) */
- phy_id &= dev->mii.phy_id_mask;
- idx &= dev->mii.reg_num_mask;
- addr = (phy_id << 11) | (idx << 6) | MII_WRITE_ | MII_BUSY_;
+ addr = mii_address_cmd(phy_id, idx, MII_WRITE_ | MII_BUSY_);
ret = __smsc95xx_write_reg(dev, MII_ADDR, addr, in_pm);
if (ret < 0) {
netdev_warn(dev->net, "Error writing MII_ADDR\n");
@@ -261,27 +257,34 @@ done:
mutex_unlock(&dev->phy_mutex);
}
-static int smsc95xx_mdio_read_nopm(struct net_device *netdev, int phy_id,
- int idx)
+static int smsc95xx_mdio_read_nopm(struct usbnet *dev, int idx)
{
- return __smsc95xx_mdio_read(netdev, phy_id, idx, 1);
+ struct smsc95xx_priv *pdata = dev->driver_priv;
+
+ return __smsc95xx_mdio_read(dev, pdata->phydev->mdio.addr, idx, 1);
}
-static void smsc95xx_mdio_write_nopm(struct net_device *netdev, int phy_id,
- int idx, int regval)
+static void smsc95xx_mdio_write_nopm(struct usbnet *dev, int idx, int regval)
{
- __smsc95xx_mdio_write(netdev, phy_id, idx, regval, 1);
+ struct smsc95xx_priv *pdata = dev->driver_priv;
+
+ __smsc95xx_mdio_write(dev, pdata->phydev->mdio.addr, idx, regval, 1);
}
-static int smsc95xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
+static int smsc95xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
{
- return __smsc95xx_mdio_read(netdev, phy_id, idx, 0);
+ struct usbnet *dev = bus->priv;
+
+ return __smsc95xx_mdio_read(dev, phy_id, idx, 0);
}
-static void smsc95xx_mdio_write(struct net_device *netdev, int phy_id, int idx,
- int regval)
+static int smsc95xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
+ u16 regval)
{
- __smsc95xx_mdio_write(netdev, phy_id, idx, regval, 0);
+ struct usbnet *dev = bus->priv;
+
+ __smsc95xx_mdio_write(dev, phy_id, idx, regval, 0);
+ return 0;
}
static int __must_check smsc95xx_wait_eeprom(struct usbnet *dev)
@@ -455,7 +458,7 @@ static unsigned int smsc95xx_hash(char addr[ETH_ALEN])
static void smsc95xx_set_multicast(struct net_device *netdev)
{
struct usbnet *dev = netdev_priv(netdev);
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ struct smsc95xx_priv *pdata = dev->driver_priv;
unsigned long flags;
int ret;
@@ -511,22 +514,23 @@ static void smsc95xx_set_multicast(struct net_device *netdev)
netdev_warn(dev->net, "failed to initiate async write to MAC_CR\n");
}
-static int smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex,
- u16 lcladv, u16 rmtadv)
+static int smsc95xx_phy_update_flowcontrol(struct usbnet *dev)
{
u32 flow = 0, afc_cfg;
+ struct smsc95xx_priv *pdata = dev->driver_priv;
+ bool tx_pause, rx_pause;
int ret = smsc95xx_read_reg(dev, AFC_CFG, &afc_cfg);
if (ret < 0)
return ret;
- if (duplex == DUPLEX_FULL) {
- u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
+ if (pdata->phydev->duplex == DUPLEX_FULL) {
+ phy_get_pause(pdata->phydev, &tx_pause, &rx_pause);
- if (cap & FLOW_CTRL_RX)
+ if (rx_pause)
flow = 0xFFFF0002;
- if (cap & FLOW_CTRL_TX) {
+ if (tx_pause) {
afc_cfg |= 0xF;
flow |= 0xFFFF0000;
} else {
@@ -534,8 +538,8 @@ static int smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex,
}
netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s\n",
- cap & FLOW_CTRL_RX ? "enabled" : "disabled",
- cap & FLOW_CTRL_TX ? "enabled" : "disabled");
+ rx_pause ? "enabled" : "disabled",
+ tx_pause ? "enabled" : "disabled");
} else {
netif_dbg(dev, link, dev->net, "half duplex\n");
afc_cfg |= 0xF;
@@ -550,33 +554,16 @@ static int smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex,
static int smsc95xx_link_reset(struct usbnet *dev)
{
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
- struct mii_if_info *mii = &dev->mii;
- struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
+ struct smsc95xx_priv *pdata = dev->driver_priv;
unsigned long flags;
- u16 lcladv, rmtadv;
int ret;
- /* clear interrupt status */
- ret = smsc95xx_mdio_read(dev->net, mii->phy_id, PHY_INT_SRC);
- if (ret < 0)
- return ret;
-
ret = smsc95xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
if (ret < 0)
return ret;
- mii_check_media(mii, 1, 1);
- mii_ethtool_gset(&dev->mii, &ecmd);
- lcladv = smsc95xx_mdio_read(dev->net, mii->phy_id, MII_ADVERTISE);
- rmtadv = smsc95xx_mdio_read(dev->net, mii->phy_id, MII_LPA);
-
- netif_dbg(dev, link, dev->net,
- "speed: %u duplex: %d lcladv: %04x rmtadv: %04x\n",
- ethtool_cmd_speed(&ecmd), ecmd.duplex, lcladv, rmtadv);
-
spin_lock_irqsave(&pdata->mac_cr_lock, flags);
- if (ecmd.duplex != DUPLEX_FULL) {
+ if (pdata->phydev->duplex != DUPLEX_FULL) {
pdata->mac_cr &= ~MAC_CR_FDPX_;
pdata->mac_cr |= MAC_CR_RCVOWN_;
} else {
@@ -589,7 +576,7 @@ static int smsc95xx_link_reset(struct usbnet *dev)
if (ret < 0)
return ret;
- ret = smsc95xx_phy_update_flowcontrol(dev, ecmd.duplex, lcladv, rmtadv);
+ ret = smsc95xx_phy_update_flowcontrol(dev);
if (ret < 0)
netdev_warn(dev->net, "Error updating PHY flow control\n");
@@ -616,44 +603,6 @@ static void smsc95xx_status(struct usbnet *dev, struct urb *urb)
intdata);
}
-static void set_carrier(struct usbnet *dev, bool link)
-{
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
-
- if (pdata->link_ok == link)
- return;
-
- pdata->link_ok = link;
-
- if (link)
- usbnet_link_change(dev, 1, 0);
- else
- usbnet_link_change(dev, 0, 0);
-}
-
-static void check_carrier(struct work_struct *work)
-{
- struct smsc95xx_priv *pdata = container_of(work, struct smsc95xx_priv,
- carrier_check.work);
- struct usbnet *dev = pdata->dev;
- int ret;
-
- if (pdata->suspend_flags != 0)
- return;
-
- ret = smsc95xx_mdio_read(dev->net, dev->mii.phy_id, MII_BMSR);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to read MII_BMSR\n");
- return;
- }
- if (ret & BMSR_LSTATUS)
- set_carrier(dev, 1);
- else
- set_carrier(dev, 0);
-
- schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY);
-}
-
/* Enable or disable Tx & Rx checksum offload engines */
static int smsc95xx_set_features(struct net_device *netdev,
netdev_features_t features)
@@ -747,7 +696,7 @@ static void smsc95xx_ethtool_get_wol(struct net_device *net,
struct ethtool_wolinfo *wolinfo)
{
struct usbnet *dev = netdev_priv(net);
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ struct smsc95xx_priv *pdata = dev->driver_priv;
wolinfo->supported = SUPPORTED_WAKE;
wolinfo->wolopts = pdata->wolopts;
@@ -757,7 +706,7 @@ static int smsc95xx_ethtool_set_wol(struct net_device *net,
struct ethtool_wolinfo *wolinfo)
{
struct usbnet *dev = netdev_priv(net);
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ struct smsc95xx_priv *pdata = dev->driver_priv;
int ret;
if (wolinfo->wolopts & ~SUPPORTED_WAKE)
@@ -772,108 +721,15 @@ static int smsc95xx_ethtool_set_wol(struct net_device *net,
return ret;
}
-static int get_mdix_status(struct net_device *net)
-{
- struct usbnet *dev = netdev_priv(net);
- u32 val;
- int buf;
-
- buf = smsc95xx_mdio_read(dev->net, dev->mii.phy_id, SPECIAL_CTRL_STS);
- if (buf & SPECIAL_CTRL_STS_OVRRD_AMDIX_) {
- if (buf & SPECIAL_CTRL_STS_AMDIX_ENABLE_)
- return ETH_TP_MDI_AUTO;
- else if (buf & SPECIAL_CTRL_STS_AMDIX_STATE_)
- return ETH_TP_MDI_X;
- } else {
- buf = smsc95xx_read_reg(dev, STRAP_STATUS, &val);
- if (val & STRAP_STATUS_AMDIX_EN_)
- return ETH_TP_MDI_AUTO;
- }
-
- return ETH_TP_MDI;
-}
-
-static void set_mdix_status(struct net_device *net, __u8 mdix_ctrl)
-{
- struct usbnet *dev = netdev_priv(net);
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
- int buf;
-
- if ((pdata->chip_id == ID_REV_CHIP_ID_9500A_) ||
- (pdata->chip_id == ID_REV_CHIP_ID_9530_) ||
- (pdata->chip_id == ID_REV_CHIP_ID_89530_) ||
- (pdata->chip_id == ID_REV_CHIP_ID_9730_)) {
- /* Extend Manual AutoMDIX timer for 9500A/9500Ai */
- buf = smsc95xx_mdio_read(dev->net, dev->mii.phy_id,
- PHY_EDPD_CONFIG);
- buf |= PHY_EDPD_CONFIG_EXT_CROSSOVER_;
- smsc95xx_mdio_write(dev->net, dev->mii.phy_id,
- PHY_EDPD_CONFIG, buf);
- }
-
- if (mdix_ctrl == ETH_TP_MDI) {
- buf = smsc95xx_mdio_read(dev->net, dev->mii.phy_id,
- SPECIAL_CTRL_STS);
- buf |= SPECIAL_CTRL_STS_OVRRD_AMDIX_;
- buf &= ~(SPECIAL_CTRL_STS_AMDIX_ENABLE_ |
- SPECIAL_CTRL_STS_AMDIX_STATE_);
- smsc95xx_mdio_write(dev->net, dev->mii.phy_id,
- SPECIAL_CTRL_STS, buf);
- } else if (mdix_ctrl == ETH_TP_MDI_X) {
- buf = smsc95xx_mdio_read(dev->net, dev->mii.phy_id,
- SPECIAL_CTRL_STS);
- buf |= SPECIAL_CTRL_STS_OVRRD_AMDIX_;
- buf &= ~(SPECIAL_CTRL_STS_AMDIX_ENABLE_ |
- SPECIAL_CTRL_STS_AMDIX_STATE_);
- buf |= SPECIAL_CTRL_STS_AMDIX_STATE_;
- smsc95xx_mdio_write(dev->net, dev->mii.phy_id,
- SPECIAL_CTRL_STS, buf);
- } else if (mdix_ctrl == ETH_TP_MDI_AUTO) {
- buf = smsc95xx_mdio_read(dev->net, dev->mii.phy_id,
- SPECIAL_CTRL_STS);
- buf &= ~SPECIAL_CTRL_STS_OVRRD_AMDIX_;
- buf &= ~(SPECIAL_CTRL_STS_AMDIX_ENABLE_ |
- SPECIAL_CTRL_STS_AMDIX_STATE_);
- buf |= SPECIAL_CTRL_STS_AMDIX_ENABLE_;
- smsc95xx_mdio_write(dev->net, dev->mii.phy_id,
- SPECIAL_CTRL_STS, buf);
- }
- pdata->mdix_ctrl = mdix_ctrl;
-}
-
-static int smsc95xx_get_link_ksettings(struct net_device *net,
- struct ethtool_link_ksettings *cmd)
+static u32 smsc95xx_get_link(struct net_device *net)
{
- struct usbnet *dev = netdev_priv(net);
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
- int retval;
-
- retval = usbnet_get_link_ksettings(net, cmd);
-
- cmd->base.eth_tp_mdix = pdata->mdix_ctrl;
- cmd->base.eth_tp_mdix_ctrl = pdata->mdix_ctrl;
-
- return retval;
-}
-
-static int smsc95xx_set_link_ksettings(struct net_device *net,
- const struct ethtool_link_ksettings *cmd)
-{
- struct usbnet *dev = netdev_priv(net);
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
- int retval;
-
- if (pdata->mdix_ctrl != cmd->base.eth_tp_mdix_ctrl)
- set_mdix_status(net, cmd->base.eth_tp_mdix_ctrl);
-
- retval = usbnet_set_link_ksettings(net, cmd);
-
- return retval;
+ phy_read_status(net->phydev);
+ return net->phydev->link;
}
static const struct ethtool_ops smsc95xx_ethtool_ops = {
- .get_link = usbnet_get_link,
- .nway_reset = usbnet_nway_reset,
+ .get_link = smsc95xx_get_link,
+ .nway_reset = phy_ethtool_nway_reset,
.get_drvinfo = usbnet_get_drvinfo,
.get_msglevel = usbnet_get_msglevel,
.set_msglevel = usbnet_set_msglevel,
@@ -884,19 +740,17 @@ static const struct ethtool_ops smsc95xx_ethtool_ops = {
.get_regs = smsc95xx_ethtool_getregs,
.get_wol = smsc95xx_ethtool_get_wol,
.set_wol = smsc95xx_ethtool_set_wol,
- .get_link_ksettings = smsc95xx_get_link_ksettings,
- .set_link_ksettings = smsc95xx_set_link_ksettings,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
.get_ts_info = ethtool_op_get_ts_info,
};
static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
{
- struct usbnet *dev = netdev_priv(netdev);
-
if (!netif_running(netdev))
return -EINVAL;
- return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
+ return phy_mii_ioctl(netdev->phydev, rq, cmd);
}
static void smsc95xx_init_mac_address(struct usbnet *dev)
@@ -942,7 +796,7 @@ static int smsc95xx_set_mac_address(struct usbnet *dev)
/* starts the TX path */
static int smsc95xx_start_tx_path(struct usbnet *dev)
{
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ struct smsc95xx_priv *pdata = dev->driver_priv;
unsigned long flags;
int ret;
@@ -962,7 +816,7 @@ static int smsc95xx_start_tx_path(struct usbnet *dev)
/* Starts the Receive path */
static int smsc95xx_start_rx_path(struct usbnet *dev, int in_pm)
{
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ struct smsc95xx_priv *pdata = dev->driver_priv;
unsigned long flags;
spin_lock_irqsave(&pdata->mac_cr_lock, flags);
@@ -972,54 +826,9 @@ static int smsc95xx_start_rx_path(struct usbnet *dev, int in_pm)
return __smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr, in_pm);
}
-static int smsc95xx_phy_initialize(struct usbnet *dev)
-{
- int bmcr, ret, timeout = 0;
-
- /* Initialize MII structure */
- dev->mii.dev = dev->net;
- dev->mii.mdio_read = smsc95xx_mdio_read;
- dev->mii.mdio_write = smsc95xx_mdio_write;
- dev->mii.phy_id_mask = 0x1f;
- dev->mii.reg_num_mask = 0x1f;
- dev->mii.phy_id = SMSC95XX_INTERNAL_PHY_ID;
-
- /* reset phy and wait for reset to complete */
- smsc95xx_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR, BMCR_RESET);
-
- do {
- msleep(10);
- bmcr = smsc95xx_mdio_read(dev->net, dev->mii.phy_id, MII_BMCR);
- timeout++;
- } while ((bmcr & BMCR_RESET) && (timeout < 100));
-
- if (timeout >= 100) {
- netdev_warn(dev->net, "timeout on PHY Reset");
- return -EIO;
- }
-
- smsc95xx_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE,
- ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP |
- ADVERTISE_PAUSE_ASYM);
-
- /* read to clear */
- ret = smsc95xx_mdio_read(dev->net, dev->mii.phy_id, PHY_INT_SRC);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to read PHY_INT_SRC during init\n");
- return ret;
- }
-
- smsc95xx_mdio_write(dev->net, dev->mii.phy_id, PHY_INT_MASK,
- PHY_INT_MASK_DEFAULT_);
- mii_nway_restart(&dev->mii);
-
- netif_dbg(dev, ifup, dev->net, "phy initialised successfully\n");
- return 0;
-}
-
static int smsc95xx_reset(struct usbnet *dev)
{
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ struct smsc95xx_priv *pdata = dev->driver_priv;
u32 read_buf, write_buf, burst_cap;
int ret = 0, timeout;
@@ -1198,12 +1007,6 @@ static int smsc95xx_reset(struct usbnet *dev)
smsc95xx_set_multicast(dev->net);
- ret = smsc95xx_phy_initialize(dev);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to init PHY\n");
- return ret;
- }
-
ret = smsc95xx_read_reg(dev, INT_EP_CTL, &read_buf);
if (ret < 0)
return ret;
@@ -1247,7 +1050,8 @@ static const struct net_device_ops smsc95xx_netdev_ops = {
static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
{
- struct smsc95xx_priv *pdata = NULL;
+ struct smsc95xx_priv *pdata;
+ bool is_internal_phy;
u32 val;
int ret;
@@ -1259,13 +1063,12 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
return ret;
}
- dev->data[0] = (unsigned long)kzalloc(sizeof(struct smsc95xx_priv),
- GFP_KERNEL);
-
- pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
+ dev->driver_priv = pdata;
+
spin_lock_init(&pdata->mac_cr_lock);
/* LAN95xx devices do not alter the computed checksum of 0 to 0xffff.
@@ -1290,15 +1093,50 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
if (ret)
goto free_pdata;
+ pdata->mdiobus = mdiobus_alloc();
+ if (!pdata->mdiobus) {
+ ret = -ENOMEM;
+ goto free_pdata;
+ }
+
+ ret = smsc95xx_read_reg(dev, HW_CFG, &val);
+ if (ret < 0)
+ goto free_mdio;
+
+ is_internal_phy = !(val & HW_CFG_PSEL_);
+ if (is_internal_phy)
+ pdata->mdiobus->phy_mask = ~(1u << SMSC95XX_INTERNAL_PHY_ID);
+
+ pdata->mdiobus->priv = dev;
+ pdata->mdiobus->read = smsc95xx_mdiobus_read;
+ pdata->mdiobus->write = smsc95xx_mdiobus_write;
+ pdata->mdiobus->name = "smsc95xx-mdiobus";
+ pdata->mdiobus->parent = &dev->udev->dev;
+
+ snprintf(pdata->mdiobus->id, ARRAY_SIZE(pdata->mdiobus->id),
+ "usb-%03d:%03d", dev->udev->bus->busnum, dev->udev->devnum);
+
+ ret = mdiobus_register(pdata->mdiobus);
+ if (ret) {
+ netdev_err(dev->net, "Could not register MDIO bus\n");
+ goto free_mdio;
+ }
+
+ pdata->phydev = phy_find_first(pdata->mdiobus);
+ if (!pdata->phydev) {
+ netdev_err(dev->net, "no PHY found\n");
+ ret = -ENODEV;
+ goto unregister_mdio;
+ }
+
+ pdata->phydev->is_internal = is_internal_phy;
+
/* detect device revision as different features may be available */
ret = smsc95xx_read_reg(dev, ID_REV, &val);
if (ret < 0)
- goto free_pdata;
+ goto unregister_mdio;
val >>= 16;
- pdata->chip_id = val;
- pdata->mdix_ctrl = get_mdix_status(dev->net);
-
if ((val == ID_REV_CHIP_ID_9500A_) || (val == ID_REV_CHIP_ID_9530_) ||
(val == ID_REV_CHIP_ID_89530_) || (val == ID_REV_CHIP_ID_9730_))
pdata->features = (FEATURE_8_WAKEUP_FILTERS |
@@ -1314,12 +1152,13 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->min_mtu = ETH_MIN_MTU;
dev->net->max_mtu = ETH_DATA_LEN;
dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
+ return 0;
- pdata->dev = dev;
- INIT_DELAYED_WORK(&pdata->carrier_check, check_carrier);
- schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY);
+unregister_mdio:
+ mdiobus_unregister(pdata->mdiobus);
- return 0;
+free_mdio:
+ mdiobus_free(pdata->mdiobus);
free_pdata:
kfree(pdata);
@@ -1328,15 +1167,47 @@ free_pdata:
static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf)
{
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
-
- if (pdata) {
- cancel_delayed_work_sync(&pdata->carrier_check);
- netif_dbg(dev, ifdown, dev->net, "free pdata\n");
- kfree(pdata);
- pdata = NULL;
- dev->data[0] = 0;
+ struct smsc95xx_priv *pdata = dev->driver_priv;
+
+ mdiobus_unregister(pdata->mdiobus);
+ mdiobus_free(pdata->mdiobus);
+ netif_dbg(dev, ifdown, dev->net, "free pdata\n");
+ kfree(pdata);
+}
+
+static void smsc95xx_handle_link_change(struct net_device *net)
+{
+ phy_print_status(net->phydev);
+}
+
+static int smsc95xx_start_phy(struct usbnet *dev)
+{
+ struct smsc95xx_priv *pdata = dev->driver_priv;
+ struct net_device *net = dev->net;
+ int ret;
+
+ ret = smsc95xx_reset(dev);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_connect_direct(net, pdata->phydev,
+ &smsc95xx_handle_link_change,
+ PHY_INTERFACE_MODE_MII);
+ if (ret) {
+ netdev_err(net, "can't attach PHY to %s\n", pdata->mdiobus->id);
+ return ret;
}
+
+ phy_attached_info(net->phydev);
+ phy_start(net->phydev);
+ return 0;
+}
+
+static int smsc95xx_disconnect_phy(struct usbnet *dev)
+{
+ phy_stop(dev->net->phydev);
+ phy_disconnect(dev->net->phydev);
+ return 0;
}
static u32 smsc_crc(const u8 *buffer, size_t len, int filter)
@@ -1347,39 +1218,37 @@ static u32 smsc_crc(const u8 *buffer, size_t len, int filter)
static int smsc95xx_enable_phy_wakeup_interrupts(struct usbnet *dev, u16 mask)
{
- struct mii_if_info *mii = &dev->mii;
int ret;
netdev_dbg(dev->net, "enabling PHY wakeup interrupts\n");
/* read to clear */
- ret = smsc95xx_mdio_read_nopm(dev->net, mii->phy_id, PHY_INT_SRC);
+ ret = smsc95xx_mdio_read_nopm(dev, PHY_INT_SRC);
if (ret < 0)
return ret;
/* enable interrupt source */
- ret = smsc95xx_mdio_read_nopm(dev->net, mii->phy_id, PHY_INT_MASK);
+ ret = smsc95xx_mdio_read_nopm(dev, PHY_INT_MASK);
if (ret < 0)
return ret;
ret |= mask;
- smsc95xx_mdio_write_nopm(dev->net, mii->phy_id, PHY_INT_MASK, ret);
+ smsc95xx_mdio_write_nopm(dev, PHY_INT_MASK, ret);
return 0;
}
static int smsc95xx_link_ok_nopm(struct usbnet *dev)
{
- struct mii_if_info *mii = &dev->mii;
int ret;
/* first, a dummy read, needed to latch some MII phys */
- ret = smsc95xx_mdio_read_nopm(dev->net, mii->phy_id, MII_BMSR);
+ ret = smsc95xx_mdio_read_nopm(dev, MII_BMSR);
if (ret < 0)
return ret;
- ret = smsc95xx_mdio_read_nopm(dev->net, mii->phy_id, MII_BMSR);
+ ret = smsc95xx_mdio_read_nopm(dev, MII_BMSR);
if (ret < 0)
return ret;
@@ -1388,7 +1257,7 @@ static int smsc95xx_link_ok_nopm(struct usbnet *dev)
static int smsc95xx_enter_suspend0(struct usbnet *dev)
{
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ struct smsc95xx_priv *pdata = dev->driver_priv;
u32 val;
int ret;
@@ -1427,8 +1296,7 @@ static int smsc95xx_enter_suspend0(struct usbnet *dev)
static int smsc95xx_enter_suspend1(struct usbnet *dev)
{
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
- struct mii_if_info *mii = &dev->mii;
+ struct smsc95xx_priv *pdata = dev->driver_priv;
u32 val;
int ret;
@@ -1436,17 +1304,17 @@ static int smsc95xx_enter_suspend1(struct usbnet *dev)
* compatibility with non-standard link partners
*/
if (pdata->features & FEATURE_PHY_NLP_CROSSOVER)
- smsc95xx_mdio_write_nopm(dev->net, mii->phy_id, PHY_EDPD_CONFIG,
- PHY_EDPD_CONFIG_DEFAULT);
+ smsc95xx_mdio_write_nopm(dev, PHY_EDPD_CONFIG,
+ PHY_EDPD_CONFIG_DEFAULT);
/* enable energy detect power-down mode */
- ret = smsc95xx_mdio_read_nopm(dev->net, mii->phy_id, PHY_MODE_CTRL_STS);
+ ret = smsc95xx_mdio_read_nopm(dev, PHY_MODE_CTRL_STS);
if (ret < 0)
return ret;
ret |= MODE_CTRL_STS_EDPWRDOWN_;
- smsc95xx_mdio_write_nopm(dev->net, mii->phy_id, PHY_MODE_CTRL_STS, ret);
+ smsc95xx_mdio_write_nopm(dev, PHY_MODE_CTRL_STS, ret);
/* enter SUSPEND1 mode */
ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
@@ -1475,7 +1343,7 @@ static int smsc95xx_enter_suspend1(struct usbnet *dev)
static int smsc95xx_enter_suspend2(struct usbnet *dev)
{
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ struct smsc95xx_priv *pdata = dev->driver_priv;
u32 val;
int ret;
@@ -1497,7 +1365,7 @@ static int smsc95xx_enter_suspend2(struct usbnet *dev)
static int smsc95xx_enter_suspend3(struct usbnet *dev)
{
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ struct smsc95xx_priv *pdata = dev->driver_priv;
u32 val;
int ret;
@@ -1536,7 +1404,7 @@ static int smsc95xx_enter_suspend3(struct usbnet *dev)
static int smsc95xx_autosuspend(struct usbnet *dev, u32 link_up)
{
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ struct smsc95xx_priv *pdata = dev->driver_priv;
int ret;
if (!netif_running(dev->net)) {
@@ -1584,7 +1452,7 @@ static int smsc95xx_autosuspend(struct usbnet *dev, u32 link_up)
static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
{
struct usbnet *dev = usb_get_intfdata(intf);
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ struct smsc95xx_priv *pdata = dev->driver_priv;
u32 val, link_up;
int ret;
@@ -1594,8 +1462,6 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
return ret;
}
- cancel_delayed_work_sync(&pdata->carrier_check);
-
if (pdata->suspend_flags) {
netdev_warn(dev->net, "error during last resume\n");
pdata->suspend_flags = 0;
@@ -1839,10 +1705,6 @@ done:
if (ret && PMSG_IS_AUTO(message))
usbnet_resume(intf);
- if (ret)
- schedule_delayed_work(&pdata->carrier_check,
- CARRIER_CHECK_DELAY);
-
return ret;
}
@@ -1855,14 +1717,13 @@ static int smsc95xx_resume(struct usb_interface *intf)
u32 val;
BUG_ON(!dev);
- pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ pdata = dev->driver_priv;
suspend_flags = pdata->suspend_flags;
netdev_dbg(dev->net, "resume suspend_flags=0x%02x\n", suspend_flags);
/* do this first to ensure it's cleared even in error case */
pdata->suspend_flags = 0;
- schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY);
if (suspend_flags & SUSPEND_ALLMODES) {
/* clear wake-up sources */
@@ -1893,6 +1754,7 @@ static int smsc95xx_resume(struct usb_interface *intf)
if (ret < 0)
netdev_warn(dev->net, "usbnet_resume error\n");
+ phy_init_hw(pdata->phydev);
return ret;
}
@@ -2075,7 +1937,7 @@ static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev,
static int smsc95xx_manage_power(struct usbnet *dev, int on)
{
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ struct smsc95xx_priv *pdata = dev->driver_priv;
dev->intf->needs_remote_wakeup = on;
@@ -2098,7 +1960,8 @@ static const struct driver_info smsc95xx_info = {
.bind = smsc95xx_bind,
.unbind = smsc95xx_unbind,
.link_reset = smsc95xx_link_reset,
- .reset = smsc95xx_reset,
+ .reset = smsc95xx_start_phy,
+ .stop = smsc95xx_disconnect_phy,
.rx_fixup = smsc95xx_rx_fixup,
.tx_fixup = smsc95xx_tx_fixup,
.status = smsc95xx_status,
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index a475f48d43c4..7de8f0ea3f6b 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -234,14 +234,14 @@ static bool veth_is_xdp_frame(void *ptr)
return (unsigned long)ptr & VETH_XDP_FLAG;
}
-static void *veth_ptr_to_xdp(void *ptr)
+static struct xdp_frame *veth_ptr_to_xdp(void *ptr)
{
return (void *)((unsigned long)ptr & ~VETH_XDP_FLAG);
}
-static void *veth_xdp_to_ptr(void *ptr)
+static void *veth_xdp_to_ptr(struct xdp_frame *xdp)
{
- return (void *)((unsigned long)ptr | VETH_XDP_FLAG);
+ return (void *)((unsigned long)xdp | VETH_XDP_FLAG);
}
static void veth_ptr_free(void *ptr)
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index 9edd94679283..dca97cd7c4e7 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -1295,3 +1295,4 @@ static struct platform_driver ucc_hdlc_driver = {
module_platform_driver(ucc_hdlc_driver);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(DRV_DESC);
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index 40c04ea1200a..2fde439543fb 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -260,11 +260,12 @@ static int __init sbni_init(struct net_device *dev)
return sbni_isa_probe( dev );
/* otherwise we have to perform search our adapter */
- if( io[ num ] != -1 )
- dev->base_addr = io[ num ],
+ if( io[ num ] != -1 ) {
+ dev->base_addr = io[ num ];
dev->irq = irq[ num ];
- else if( scandone || io[ 0 ] != -1 )
+ } else if( scandone || io[ 0 ] != -1 ) {
return -ENODEV;
+ }
/* if io[ num ] contains non-zero address, then that is on ISA bus */
if( dev->base_addr )
@@ -399,12 +400,13 @@ sbni_probe1( struct net_device *dev, unsigned long ioaddr, int irq )
nl->maxframe = DEFAULT_FRAME_LEN;
nl->csr1.rate = baud[ num ];
- if( (nl->cur_rxl_index = rxl[ num ]) == -1 )
+ if( (nl->cur_rxl_index = rxl[ num ]) == -1 ) {
/* autotune rxl */
- nl->cur_rxl_index = DEF_RXL,
+ nl->cur_rxl_index = DEF_RXL;
nl->delta_rxl = DEF_RXL_DELTA;
- else
+ } else {
nl->delta_rxl = 0;
+ }
nl->csr1.rxl = rxl_tab[ nl->cur_rxl_index ];
if( inb( ioaddr + CSR0 ) & 0x01 )
nl->state |= FL_SLOW_MODE;
@@ -512,13 +514,15 @@ sbni_interrupt( int irq, void *dev_id )
do {
repeat = 0;
- if( inb( dev->base_addr + CSR0 ) & (RC_RDY | TR_RDY) )
- handle_channel( dev ),
+ if( inb( dev->base_addr + CSR0 ) & (RC_RDY | TR_RDY) ) {
+ handle_channel( dev );
repeat = 1;
+ }
if( nl->second && /* second channel present */
- (inb( nl->second->base_addr+CSR0 ) & (RC_RDY | TR_RDY)) )
- handle_channel( nl->second ),
+ (inb( nl->second->base_addr+CSR0 ) & (RC_RDY | TR_RDY)) ) {
+ handle_channel( nl->second );
repeat = 1;
+ }
} while( repeat );
if( nl->second )
@@ -610,11 +614,12 @@ recv_frame( struct net_device *dev )
nl->state |= FL_PREV_OK;
if( framelen > 4 )
nl->in_stats.all_rx_number++;
- } else
- nl->state &= ~FL_PREV_OK,
- change_level( dev ),
- nl->in_stats.all_rx_number++,
+ } else {
+ nl->state &= ~FL_PREV_OK;
+ change_level( dev );
+ nl->in_stats.all_rx_number++;
nl->in_stats.bad_rx_number++;
+ }
return !frame_ok || framelen > 4;
}
@@ -689,9 +694,10 @@ download_data( struct net_device *dev, u32 *crc_p )
*crc_p = calc_crc32( *crc_p, skb->data + nl->outpos, len );
/* if packet too short we should write some more bytes to pad */
- for( len = nl->framelen - len; len--; )
- outb( 0, dev->base_addr + DAT ),
+ for( len = nl->framelen - len; len--; ) {
+ outb( 0, dev->base_addr + DAT );
*crc_p = CRC32( 0, *crc_p );
+ }
}
@@ -703,9 +709,10 @@ upload_data( struct net_device *dev, unsigned framelen, unsigned frameno,
int frame_ok;
- if( is_first )
- nl->wait_frameno = frameno,
+ if( is_first ) {
+ nl->wait_frameno = frameno;
nl->inppos = 0;
+ }
if( nl->wait_frameno == frameno ) {
@@ -717,33 +724,35 @@ upload_data( struct net_device *dev, unsigned framelen, unsigned frameno,
* error was occurred... drop entire packet
*/
else if( (frame_ok = skip_tail( dev->base_addr, framelen, crc ))
- != 0 )
- nl->wait_frameno = 0,
- nl->inppos = 0,
+ != 0 ) {
+ nl->wait_frameno = 0;
+ nl->inppos = 0;
#ifdef CONFIG_SBNI_MULTILINE
- nl->master->stats.rx_errors++,
+ nl->master->stats.rx_errors++;
nl->master->stats.rx_missed_errors++;
#else
- dev->stats.rx_errors++,
+ dev->stats.rx_errors++;
dev->stats.rx_missed_errors++;
#endif
+ }
/* now skip all frames until is_first != 0 */
} else
frame_ok = skip_tail( dev->base_addr, framelen, crc );
- if( is_first && !frame_ok )
+ if( is_first && !frame_ok ) {
/*
* Frame has been broken, but we had already stored
* is_first... Drop entire packet.
*/
- nl->wait_frameno = 0,
+ nl->wait_frameno = 0;
#ifdef CONFIG_SBNI_MULTILINE
- nl->master->stats.rx_errors++,
+ nl->master->stats.rx_errors++;
nl->master->stats.rx_crc_errors++;
#else
- dev->stats.rx_errors++,
+ dev->stats.rx_errors++;
dev->stats.rx_crc_errors++;
#endif
+ }
return frame_ok;
}
@@ -782,17 +791,18 @@ interpret_ack( struct net_device *dev, unsigned ack )
if( nl->state & FL_WAIT_ACK ) {
nl->outpos += nl->framelen;
- if( --nl->tx_frameno )
+ if( --nl->tx_frameno ) {
nl->framelen = min_t(unsigned int,
nl->maxframe,
nl->tx_buf_p->len - nl->outpos);
- else
- send_complete( dev ),
+ } else {
+ send_complete( dev );
#ifdef CONFIG_SBNI_MULTILINE
netif_wake_queue( nl->master );
#else
netif_wake_queue( dev );
#endif
+ }
}
}
@@ -872,16 +882,17 @@ drop_xmit_queue( struct net_device *dev )
{
struct net_local *nl = netdev_priv(dev);
- if( nl->tx_buf_p )
- dev_kfree_skb_any( nl->tx_buf_p ),
- nl->tx_buf_p = NULL,
+ if( nl->tx_buf_p ) {
+ dev_kfree_skb_any( nl->tx_buf_p );
+ nl->tx_buf_p = NULL;
#ifdef CONFIG_SBNI_MULTILINE
- nl->master->stats.tx_errors++,
+ nl->master->stats.tx_errors++;
nl->master->stats.tx_carrier_errors++;
#else
- dev->stats.tx_errors++,
+ dev->stats.tx_errors++;
dev->stats.tx_carrier_errors++;
#endif
+ }
nl->tx_frameno = 0;
nl->framelen = 0;
@@ -1327,12 +1338,13 @@ sbni_ioctl( struct net_device *dev, struct ifreq *ifr, int cmd )
spin_lock( &nl->lock );
flags = *(struct sbni_flags*) &ifr->ifr_ifru;
- if( flags.fixed_rxl )
- nl->delta_rxl = 0,
+ if( flags.fixed_rxl ) {
+ nl->delta_rxl = 0;
nl->cur_rxl_index = flags.rxl;
- else
- nl->delta_rxl = DEF_RXL_DELTA,
+ } else {
+ nl->delta_rxl = DEF_RXL_DELTA;
nl->cur_rxl_index = DEF_RXL;
+ }
nl->csr1.rxl = rxl_tab[ nl->cur_rxl_index ];
nl->csr1.rate = flags.rate;
@@ -1526,13 +1538,16 @@ sbni_setup( char *p )
(*dest[ parm ])[ n ] = simple_strtol( p, &p, 0 );
if( !*p || *p == ')' )
return 1;
- if( *p == ';' )
- ++p, ++n, parm = 0;
- else if( *p++ != ',' )
+ if( *p == ';' ) {
+ ++p;
+ ++n;
+ parm = 0;
+ } else if( *p++ != ',' ) {
break;
- else
+ } else {
if( ++parm >= 5 )
break;
+ }
}
bad_param:
pr_err("Error in sbni kernel parameter!\n");
diff --git a/drivers/net/wan/slic_ds26522.c b/drivers/net/wan/slic_ds26522.c
index 29053bec694e..8e3b1c717c10 100644
--- a/drivers/net/wan/slic_ds26522.c
+++ b/drivers/net/wan/slic_ds26522.c
@@ -22,8 +22,6 @@
#include <linux/io.h>
#include "slic_ds26522.h"
-#define DRV_NAME "ds26522"
-
#define SLIC_TRANS_LEN 1
#define SLIC_TWO_LEN 2
#define SLIC_THREE_LEN 3
diff --git a/drivers/net/wireguard/netlink.c b/drivers/net/wireguard/netlink.c
index 20a4f3c0a0a1..d0f3b6d7f408 100644
--- a/drivers/net/wireguard/netlink.c
+++ b/drivers/net/wireguard/netlink.c
@@ -22,8 +22,8 @@ static struct genl_family genl_family;
static const struct nla_policy device_policy[WGDEVICE_A_MAX + 1] = {
[WGDEVICE_A_IFINDEX] = { .type = NLA_U32 },
[WGDEVICE_A_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
- [WGDEVICE_A_PRIVATE_KEY] = { .type = NLA_EXACT_LEN, .len = NOISE_PUBLIC_KEY_LEN },
- [WGDEVICE_A_PUBLIC_KEY] = { .type = NLA_EXACT_LEN, .len = NOISE_PUBLIC_KEY_LEN },
+ [WGDEVICE_A_PRIVATE_KEY] = NLA_POLICY_EXACT_LEN(NOISE_PUBLIC_KEY_LEN),
+ [WGDEVICE_A_PUBLIC_KEY] = NLA_POLICY_EXACT_LEN(NOISE_PUBLIC_KEY_LEN),
[WGDEVICE_A_FLAGS] = { .type = NLA_U32 },
[WGDEVICE_A_LISTEN_PORT] = { .type = NLA_U16 },
[WGDEVICE_A_FWMARK] = { .type = NLA_U32 },
@@ -31,12 +31,12 @@ static const struct nla_policy device_policy[WGDEVICE_A_MAX + 1] = {
};
static const struct nla_policy peer_policy[WGPEER_A_MAX + 1] = {
- [WGPEER_A_PUBLIC_KEY] = { .type = NLA_EXACT_LEN, .len = NOISE_PUBLIC_KEY_LEN },
- [WGPEER_A_PRESHARED_KEY] = { .type = NLA_EXACT_LEN, .len = NOISE_SYMMETRIC_KEY_LEN },
+ [WGPEER_A_PUBLIC_KEY] = NLA_POLICY_EXACT_LEN(NOISE_PUBLIC_KEY_LEN),
+ [WGPEER_A_PRESHARED_KEY] = NLA_POLICY_EXACT_LEN(NOISE_SYMMETRIC_KEY_LEN),
[WGPEER_A_FLAGS] = { .type = NLA_U32 },
- [WGPEER_A_ENDPOINT] = { .type = NLA_MIN_LEN, .len = sizeof(struct sockaddr) },
+ [WGPEER_A_ENDPOINT] = NLA_POLICY_MIN_LEN(sizeof(struct sockaddr)),
[WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL] = { .type = NLA_U16 },
- [WGPEER_A_LAST_HANDSHAKE_TIME] = { .type = NLA_EXACT_LEN, .len = sizeof(struct __kernel_timespec) },
+ [WGPEER_A_LAST_HANDSHAKE_TIME] = NLA_POLICY_EXACT_LEN(sizeof(struct __kernel_timespec)),
[WGPEER_A_RX_BYTES] = { .type = NLA_U64 },
[WGPEER_A_TX_BYTES] = { .type = NLA_U64 },
[WGPEER_A_ALLOWEDIPS] = { .type = NLA_NESTED },
@@ -45,7 +45,7 @@ static const struct nla_policy peer_policy[WGPEER_A_MAX + 1] = {
static const struct nla_policy allowedip_policy[WGALLOWEDIP_A_MAX + 1] = {
[WGALLOWEDIP_A_FAMILY] = { .type = NLA_U16 },
- [WGALLOWEDIP_A_IPADDR] = { .type = NLA_MIN_LEN, .len = sizeof(struct in_addr) },
+ [WGALLOWEDIP_A_IPADDR] = NLA_POLICY_MIN_LEN(sizeof(struct in_addr)),
[WGALLOWEDIP_A_CIDR_MASK] = { .type = NLA_U8 }
};
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 3c0c33a9f30c..b37902222517 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -2019,8 +2019,8 @@ static void ath10k_mac_vif_ap_csa_count_down(struct ath10k_vif *arvif)
if (!arvif->is_up)
return;
- if (!ieee80211_csa_is_complete(vif)) {
- ieee80211_csa_update_counter(vif);
+ if (!ieee80211_beacon_cntdwn_is_complete(vif)) {
+ ieee80211_beacon_update_cntdwn(vif);
ret = ath10k_mac_setup_bcn_tmpl(arvif);
if (ret)
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index a81a1ab2de19..b661d4ec5d52 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -3878,7 +3878,7 @@ void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
* actual channel switch is done
*/
if (arvif->vif->csa_active &&
- ieee80211_csa_is_complete(arvif->vif)) {
+ ieee80211_beacon_cntdwn_is_complete(arvif->vif)) {
ieee80211_csa_finish(arvif->vif);
continue;
}
diff --git a/drivers/net/wireless/ath/ath11k/thermal.c b/drivers/net/wireless/ath/ath11k/thermal.c
index 5a7e150c621b..c96b26f39a25 100644
--- a/drivers/net/wireless/ath/ath11k/thermal.c
+++ b/drivers/net/wireless/ath/ath11k/thermal.c
@@ -53,7 +53,7 @@ ath11k_thermal_set_cur_throttle_state(struct thermal_cooling_device *cdev,
return ret;
}
-static struct thermal_cooling_device_ops ath11k_thermal_ops = {
+static const struct thermal_cooling_device_ops ath11k_thermal_ops = {
.get_max_state = ath11k_thermal_get_max_throttle_state,
.get_cur_state = ath11k_thermal_get_cur_throttle_state,
.set_cur_state = ath11k_thermal_set_cur_throttle_state,
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index 8e3437a65673..9fffa37f1e2e 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -1593,8 +1593,8 @@ int ath11k_wmi_bcn_tmpl(struct ath11k *ar, u32 vdev_id,
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
cmd->tim_ie_offset = offs->tim_offset;
- cmd->csa_switch_count_offset = offs->csa_counter_offs[0];
- cmd->ext_csa_switch_count_offset = offs->csa_counter_offs[1];
+ cmd->csa_switch_count_offset = offs->cntdwn_counter_offs[0];
+ cmd->ext_csa_switch_count_offset = offs->cntdwn_counter_offs[1];
cmd->buf_len = bcn->len;
ptr = skb->data + sizeof(*cmd);
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index e36f947e19fc..4daaf9b67d5f 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -365,7 +365,7 @@ bool ath9k_csa_is_finished(struct ath_softc *sc, struct ieee80211_vif *vif)
if (!vif || !vif->csa_active)
return false;
- if (!ieee80211_csa_is_complete(vif))
+ if (!ieee80211_beacon_cntdwn_is_complete(vif))
return false;
ieee80211_csa_finish(vif);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index f20c839aeda2..c745897aa3d6 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -514,7 +514,7 @@ bool ath9k_htc_csa_is_finished(struct ath9k_htc_priv *priv)
if (!vif || !vif->csa_active)
return false;
- if (!ieee80211_csa_is_complete(vif))
+ if (!ieee80211_beacon_cntdwn_is_complete(vif))
return false;
ieee80211_csa_finish(vif);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index b78992e341d5..81bc05d70267 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -1300,8 +1300,8 @@ static void iwl_mvm_csa_count_down(struct iwl_mvm *mvm,
mvmvif->csa_countdown = true;
- if (!ieee80211_csa_is_complete(csa_vif)) {
- int c = ieee80211_csa_update_counter(csa_vif);
+ if (!ieee80211_beacon_cntdwn_is_complete(csa_vif)) {
+ int c = ieee80211_beacon_update_cntdwn(csa_vif);
iwl_mvm_mac_ctxt_beacon_changed(mvm, csa_vif);
if (csa_vif->p2p &&
@@ -1543,7 +1543,7 @@ void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm,
if (notif->csa_counter != IWL_PROBE_RESP_DATA_NO_CSA &&
notif->csa_counter >= 1)
- ieee80211_csa_set_counter(vif, notif->csa_counter);
+ ieee80211_beacon_set_cntdwn(vif, notif->csa_counter);
}
void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index 1babc4bb5194..8abb57012240 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -172,7 +172,7 @@ static void iwl_mvm_csa_noa_start(struct iwl_mvm *mvm)
* So we just do nothing here and the switch
* will be performed on the last TBTT.
*/
- if (!ieee80211_csa_is_complete(csa_vif)) {
+ if (!ieee80211_beacon_cntdwn_is_complete(csa_vif)) {
IWL_WARN(mvm, "CSA NOA started too early\n");
goto out_unlock;
}
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 9dd9d73f4484..dce3bc9c9f84 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -1699,7 +1699,7 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
rcu_dereference(vif->chanctx_conf)->def.chan);
}
- if (vif->csa_active && ieee80211_csa_is_complete(vif))
+ if (vif->csa_active && ieee80211_beacon_cntdwn_is_complete(vif))
ieee80211_csa_finish(vif);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index 3d4bf72700a5..fbfb991ebd90 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -1095,7 +1095,7 @@ EXPORT_SYMBOL_GPL(mt76_get_txpower);
static void
__mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
{
- if (vif->csa_active && ieee80211_csa_is_complete(vif))
+ if (vif->csa_active && ieee80211_beacon_cntdwn_is_complete(vif))
ieee80211_csa_finish(vif);
}
@@ -1120,7 +1120,7 @@ __mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif)
if (!vif->csa_active)
return;
- dev->csa_complete |= ieee80211_csa_is_complete(vif);
+ dev->csa_complete |= ieee80211_beacon_cntdwn_is_complete(vif);
}
void mt76_csa_check(struct mt76_dev *dev)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
index d0cbb283982f..084982eb6abd 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
@@ -650,12 +650,12 @@ mt7615_mcu_add_beacon_offload(struct mt7615_dev *dev,
memcpy(req.pkt + MT_TXD_SIZE, skb->data, skb->len);
req.pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
req.tim_ie_pos = cpu_to_le16(MT_TXD_SIZE + offs.tim_offset);
- if (offs.csa_counter_offs[0]) {
+ if (offs.cntdwn_counter_offs[0]) {
u16 csa_offs;
- csa_offs = MT_TXD_SIZE + offs.csa_counter_offs[0] - 4;
+ csa_offs = MT_TXD_SIZE + offs.cntdwn_counter_offs[0] - 4;
req.csa_ie_pos = cpu_to_le16(csa_offs);
- req.csa_cnt = skb->data[offs.csa_counter_offs[0]];
+ req.csa_cnt = skb->data[offs.cntdwn_counter_offs[0]];
}
dev_kfree_skb(skb);
@@ -1713,10 +1713,10 @@ mt7615_mcu_uni_add_beacon_offload(struct mt7615_dev *dev,
req.beacon_tlv.pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
req.beacon_tlv.tim_ie_pos = cpu_to_le16(MT_TXD_SIZE + offs.tim_offset);
- if (offs.csa_counter_offs[0]) {
+ if (offs.cntdwn_counter_offs[0]) {
u16 csa_offs;
- csa_offs = MT_TXD_SIZE + offs.csa_counter_offs[0] - 4;
+ csa_offs = MT_TXD_SIZE + offs.cntdwn_counter_offs[0] - 4;
req.beacon_tlv.csa_ie_pos = cpu_to_le16(csa_offs);
}
dev_kfree_skb(skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
index eaed5ef05401..ac8ec257da03 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
@@ -2282,7 +2282,7 @@ mt7915_mcu_beacon_csa(struct sk_buff *rskb, struct sk_buff *skb,
struct bss_info_bcn *bcn,
struct ieee80211_mutable_offsets *offs)
{
- if (offs->csa_counter_offs[0]) {
+ if (offs->cntdwn_counter_offs[0]) {
struct tlv *tlv;
struct bss_info_bcn_csa *csa;
@@ -2290,7 +2290,7 @@ mt7915_mcu_beacon_csa(struct sk_buff *rskb, struct sk_buff *skb,
sizeof(*csa), &bcn->sub_ntlv,
&bcn->len);
csa = (struct bss_info_bcn_csa *)tlv;
- csa->cnt = skb->data[offs->csa_counter_offs[0]];
+ csa->cnt = skb->data[offs->cntdwn_counter_offs[0]];
}
}
@@ -2312,8 +2312,8 @@ mt7915_mcu_beacon_cont(struct mt7915_dev *dev, struct sk_buff *rskb,
cont->pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
cont->tim_ofs = cpu_to_le16(offs->tim_offset);
- if (offs->csa_counter_offs[0])
- cont->csa_ofs = cpu_to_le16(offs->csa_counter_offs[0] - 4);
+ if (offs->cntdwn_counter_offs[0])
+ cont->csa_ofs = cpu_to_le16(offs->cntdwn_counter_offs[0] - 4);
buf = (u8 *)tlv + sizeof(*cont);
mt7915_mac_write_txwi(dev, (__le32 *)buf, skb, wcid, NULL,